summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorphk <phk@FreeBSD.org>1994-08-02 20:15:59 +0000
committerphk <phk@FreeBSD.org>1994-08-02 20:15:59 +0000
commit5ba9f35203b5fe0daba8f1ded6df2bcb38880681 (patch)
tree5a7e9c8d4cf5d1bd3b4c8577610d406f89f85545
parentceb5ca06c6ed8827fe5069db7033fc696325b681 (diff)
downloadFreeBSD-src-5ba9f35203b5fe0daba8f1ded6df2bcb38880681.zip
FreeBSD-src-5ba9f35203b5fe0daba8f1ded6df2bcb38880681.tar.gz
Here comes the right import of gcc-2.6.0.
-rw-r--r--gnu/usr.bin/cc/Makefile8
-rw-r--r--gnu/usr.bin/cc/Makefile.inc10
-rw-r--r--gnu/usr.bin/cc/c++/Makefile13
-rw-r--r--gnu/usr.bin/cc/c++/g++.c535
-rw-r--r--gnu/usr.bin/cc/cc/Makefile18
-rw-r--r--gnu/usr.bin/cc/cc/cc.14111
-rw-r--r--gnu/usr.bin/cc/cc/gcc.c4896
-rw-r--r--gnu/usr.bin/cc/cc1/Makefile13
-rw-r--r--gnu/usr.bin/cc/cc1/c-aux-info.c639
-rw-r--r--gnu/usr.bin/cc/cc1/c-convert.c95
-rw-r--r--gnu/usr.bin/cc/cc1/c-decl.c6797
-rw-r--r--gnu/usr.bin/cc/cc1/c-iterate.c595
-rw-r--r--gnu/usr.bin/cc/cc1/c-lang.c129
-rw-r--r--gnu/usr.bin/cc/cc1/c-lex.c1983
-rw-r--r--gnu/usr.bin/cc/cc1/c-parse.c3530
-rw-r--r--gnu/usr.bin/cc/cc1/c-pragma.c188
-rw-r--r--gnu/usr.bin/cc/cc1/c-typeck.c6384
-rw-r--r--gnu/usr.bin/cc/cc1plus/Makefile13
-rw-r--r--gnu/usr.bin/cc/cc1plus/call.c2909
-rw-r--r--gnu/usr.bin/cc/cc1plus/class.c4940
-rw-r--r--gnu/usr.bin/cc/cc1plus/class.h116
-rw-r--r--gnu/usr.bin/cc/cc1plus/cp-tree.h2373
-rw-r--r--gnu/usr.bin/cc/cc1plus/cvt.c2044
-rw-r--r--gnu/usr.bin/cc/cc1plus/decl.c12030
-rw-r--r--gnu/usr.bin/cc/cc1plus/decl.h54
-rw-r--r--gnu/usr.bin/cc/cc1plus/decl2.c3102
-rw-r--r--gnu/usr.bin/cc/cc1plus/edsel.c927
-rw-r--r--gnu/usr.bin/cc/cc1plus/errfn.c217
-rw-r--r--gnu/usr.bin/cc/cc1plus/error.c1404
-rw-r--r--gnu/usr.bin/cc/cc1plus/except.c1481
-rw-r--r--gnu/usr.bin/cc/cc1plus/expr.c275
-rw-r--r--gnu/usr.bin/cc/cc1plus/gc.c988
-rw-r--r--gnu/usr.bin/cc/cc1plus/hash.h197
-rw-r--r--gnu/usr.bin/cc/cc1plus/init.c4077
-rw-r--r--gnu/usr.bin/cc/cc1plus/input.c184
-rw-r--r--gnu/usr.bin/cc/cc1plus/lex.c4818
-rw-r--r--gnu/usr.bin/cc/cc1plus/lex.h130
-rw-r--r--gnu/usr.bin/cc/cc1plus/method.c1948
-rw-r--r--gnu/usr.bin/cc/cc1plus/parse.c7604
-rw-r--r--gnu/usr.bin/cc/cc1plus/parse.h84
-rw-r--r--gnu/usr.bin/cc/cc1plus/pt.c2465
-rw-r--r--gnu/usr.bin/cc/cc1plus/ptree.c167
-rw-r--r--gnu/usr.bin/cc/cc1plus/search.c3199
-rw-r--r--gnu/usr.bin/cc/cc1plus/sig.c1023
-rw-r--r--gnu/usr.bin/cc/cc1plus/spew.c436
-rw-r--r--gnu/usr.bin/cc/cc1plus/tree.c1763
-rw-r--r--gnu/usr.bin/cc/cc1plus/tree.def103
-rw-r--r--gnu/usr.bin/cc/cc1plus/typeck.c7233
-rw-r--r--gnu/usr.bin/cc/cc1plus/typeck2.c1607
-rw-r--r--gnu/usr.bin/cc/cc1plus/xref.c839
-rw-r--r--gnu/usr.bin/cc/cc_int/Makefile12
-rw-r--r--gnu/usr.bin/cc/cc_int/aux-output.c2138
-rw-r--r--gnu/usr.bin/cc/cc_int/bc-emit.c991
-rw-r--r--gnu/usr.bin/cc/cc_int/bc-optab.c788
-rw-r--r--gnu/usr.bin/cc/cc_int/c-common.c1997
-rw-r--r--gnu/usr.bin/cc/cc_int/caller-save.c762
-rw-r--r--gnu/usr.bin/cc/cc_int/calls.c3061
-rw-r--r--gnu/usr.bin/cc/cc_int/combine.c10790
-rw-r--r--gnu/usr.bin/cc/cc_int/convert.c460
-rw-r--r--gnu/usr.bin/cc/cc_int/cse.c8546
-rw-r--r--gnu/usr.bin/cc/cc_int/dbxout.c2585
-rw-r--r--gnu/usr.bin/cc/cc_int/dwarfout.c5667
-rw-r--r--gnu/usr.bin/cc/cc_int/emit-rtl.c3359
-rw-r--r--gnu/usr.bin/cc/cc_int/explow.c1152
-rw-r--r--gnu/usr.bin/cc/cc_int/expmed.c3957
-rw-r--r--gnu/usr.bin/cc/cc_int/expr.c10192
-rw-r--r--gnu/usr.bin/cc/cc_int/final.c3069
-rw-r--r--gnu/usr.bin/cc/cc_int/flow.c2793
-rw-r--r--gnu/usr.bin/cc/cc_int/fold-const.c4889
-rw-r--r--gnu/usr.bin/cc/cc_int/function.c5496
-rw-r--r--gnu/usr.bin/cc/cc_int/getpwd.c94
-rw-r--r--gnu/usr.bin/cc/cc_int/global.c1680
-rw-r--r--gnu/usr.bin/cc/cc_int/insn-attrtab.c14
-rw-r--r--gnu/usr.bin/cc/cc_int/insn-emit.c3973
-rw-r--r--gnu/usr.bin/cc/cc_int/insn-extract.c533
-rw-r--r--gnu/usr.bin/cc/cc_int/insn-opinit.c216
-rw-r--r--gnu/usr.bin/cc/cc_int/insn-output.c6865
-rw-r--r--gnu/usr.bin/cc/cc_int/insn-peep.c28
-rw-r--r--gnu/usr.bin/cc/cc_int/insn-recog.c7138
-rw-r--r--gnu/usr.bin/cc/cc_int/integrate.c3035
-rw-r--r--gnu/usr.bin/cc/cc_int/jump.c4395
-rw-r--r--gnu/usr.bin/cc/cc_int/local-alloc.c2355
-rw-r--r--gnu/usr.bin/cc/cc_int/loop.c6587
-rw-r--r--gnu/usr.bin/cc/cc_int/obstack.c485
-rw-r--r--gnu/usr.bin/cc/cc_int/optabs.c4100
-rw-r--r--gnu/usr.bin/cc/cc_int/print-rtl.c328
-rw-r--r--gnu/usr.bin/cc/cc_int/print-tree.c642
-rw-r--r--gnu/usr.bin/cc/cc_int/real.c5969
-rw-r--r--gnu/usr.bin/cc/cc_int/recog.c1970
-rw-r--r--gnu/usr.bin/cc/cc_int/reg-stack.c3008
-rw-r--r--gnu/usr.bin/cc/cc_int/regclass.c1856
-rw-r--r--gnu/usr.bin/cc/cc_int/reload.c5650
-rw-r--r--gnu/usr.bin/cc/cc_int/reload1.c7122
-rw-r--r--gnu/usr.bin/cc/cc_int/reorg.c4281
-rw-r--r--gnu/usr.bin/cc/cc_int/rtl.c850
-rw-r--r--gnu/usr.bin/cc/cc_int/rtlanal.c1835
-rw-r--r--gnu/usr.bin/cc/cc_int/sched.c4884
-rw-r--r--gnu/usr.bin/cc/cc_int/sdbout.c1530
-rw-r--r--gnu/usr.bin/cc/cc_int/stmt.c5431
-rw-r--r--gnu/usr.bin/cc/cc_int/stor-layout.c1176
-rw-r--r--gnu/usr.bin/cc/cc_int/stupid.c518
-rw-r--r--gnu/usr.bin/cc/cc_int/toplev.c4061
-rw-r--r--gnu/usr.bin/cc/cc_int/tree.c3996
-rw-r--r--gnu/usr.bin/cc/cc_int/unroll.c3345
-rw-r--r--gnu/usr.bin/cc/cc_int/varasm.c3883
-rw-r--r--gnu/usr.bin/cc/cc_int/version.c1
-rw-r--r--gnu/usr.bin/cc/cc_int/xcoffout.c536
-rw-r--r--gnu/usr.bin/cc/cpp/Makefile12
-rw-r--r--gnu/usr.bin/cc/cpp/cccp.c9804
-rw-r--r--gnu/usr.bin/cc/cpp/cexp.c1926
-rw-r--r--gnu/usr.bin/cc/cpp/cpp.11
-rw-r--r--gnu/usr.bin/cc/include/basic-block.h68
-rw-r--r--gnu/usr.bin/cc/include/bc-arity.h232
-rw-r--r--gnu/usr.bin/cc/include/bc-emit.h133
-rw-r--r--gnu/usr.bin/cc/include/bc-opcode.h238
-rw-r--r--gnu/usr.bin/cc/include/bc-optab.h74
-rw-r--r--gnu/usr.bin/cc/include/bc-typecd.def21
-rw-r--r--gnu/usr.bin/cc/include/bc-typecd.h53
-rw-r--r--gnu/usr.bin/cc/include/bi-run.h165
-rw-r--r--gnu/usr.bin/cc/include/bytecode.h91
-rw-r--r--gnu/usr.bin/cc/include/bytetypes.h35
-rw-r--r--gnu/usr.bin/cc/include/c-gperf.h184
-rw-r--r--gnu/usr.bin/cc/include/c-lex.h79
-rw-r--r--gnu/usr.bin/cc/include/c-parse.h65
-rw-r--r--gnu/usr.bin/cc/include/c-tree.h483
-rw-r--r--gnu/usr.bin/cc/include/conditions.h115
-rw-r--r--gnu/usr.bin/cc/include/config.h42
-rw-r--r--gnu/usr.bin/cc/include/convert.h23
-rw-r--r--gnu/usr.bin/cc/include/defaults.h133
-rw-r--r--gnu/usr.bin/cc/include/expr.h834
-rw-r--r--gnu/usr.bin/cc/include/flags.h359
-rw-r--r--gnu/usr.bin/cc/include/function.h216
-rw-r--r--gnu/usr.bin/cc/include/gbl-ctors.h80
-rw-r--r--gnu/usr.bin/cc/include/glimits.h93
-rw-r--r--gnu/usr.bin/cc/include/hard-reg-set.h270
-rw-r--r--gnu/usr.bin/cc/include/i386/bsd.h129
-rw-r--r--gnu/usr.bin/cc/include/i386/gas.h154
-rw-r--r--gnu/usr.bin/cc/include/i386/gstabs.h9
-rw-r--r--gnu/usr.bin/cc/include/i386/i386.h1665
-rw-r--r--gnu/usr.bin/cc/include/i386/perform.h97
-rw-r--r--gnu/usr.bin/cc/include/i386/unix.h145
-rw-r--r--gnu/usr.bin/cc/include/input.h46
-rw-r--r--gnu/usr.bin/cc/include/insn-attr.h19
-rw-r--r--gnu/usr.bin/cc/include/insn-codes.h201
-rw-r--r--gnu/usr.bin/cc/include/insn-config.h12
-rw-r--r--gnu/usr.bin/cc/include/insn-flags.h598
-rw-r--r--gnu/usr.bin/cc/include/integrate.h125
-rw-r--r--gnu/usr.bin/cc/include/longlong.h1185
-rw-r--r--gnu/usr.bin/cc/include/loop.h169
-rw-r--r--gnu/usr.bin/cc/include/machmode.def118
-rw-r--r--gnu/usr.bin/cc/include/machmode.h169
-rw-r--r--gnu/usr.bin/cc/include/modemap.def30
-rw-r--r--gnu/usr.bin/cc/include/multilib.h3
-rw-r--r--gnu/usr.bin/cc/include/obstack.h513
-rw-r--r--gnu/usr.bin/cc/include/output.h241
-rw-r--r--gnu/usr.bin/cc/include/pcp.h100
-rw-r--r--gnu/usr.bin/cc/include/real.h437
-rw-r--r--gnu/usr.bin/cc/include/recog.h120
-rw-r--r--gnu/usr.bin/cc/include/regs.h168
-rw-r--r--gnu/usr.bin/cc/include/reload.h235
-rw-r--r--gnu/usr.bin/cc/include/rtl.def764
-rw-r--r--gnu/usr.bin/cc/include/rtl.h957
-rw-r--r--gnu/usr.bin/cc/include/stack.h41
-rw-r--r--gnu/usr.bin/cc/include/tconfig.h42
-rw-r--r--gnu/usr.bin/cc/include/tm.h327
-rw-r--r--gnu/usr.bin/cc/include/tree.def695
-rw-r--r--gnu/usr.bin/cc/include/tree.h1638
-rw-r--r--gnu/usr.bin/cc/include/typeclass.h14
-rw-r--r--gnu/usr.bin/cc/libgcc/Makefile46
-rw-r--r--gnu/usr.bin/cc/libgcc/libgcc1.c608
-rw-r--r--gnu/usr.bin/cc/libgcc/libgcc2.c2151
171 files changed, 307568 insertions, 0 deletions
diff --git a/gnu/usr.bin/cc/Makefile b/gnu/usr.bin/cc/Makefile
new file mode 100644
index 0000000..74e88b8
--- /dev/null
+++ b/gnu/usr.bin/cc/Makefile
@@ -0,0 +1,8 @@
+#
+# $FreeBSD$
+#
+
+PGMDIR= cc_int cpp cc1 cc cc1plus c++ libgcc
+SUBDIR= $(PGMDIR)
+
+.include <bsd.subdir.mk>
diff --git a/gnu/usr.bin/cc/Makefile.inc b/gnu/usr.bin/cc/Makefile.inc
new file mode 100644
index 0000000..21d3580
--- /dev/null
+++ b/gnu/usr.bin/cc/Makefile.inc
@@ -0,0 +1,10 @@
+#
+# $FreeBSD$
+#
+
+CFLAGS+= -I${.CURDIR} -I${.CURDIR}/../include
+CFLAGS+= -DGCC_INCLUDE_DIR=\"FOO\"
+CFLAGS+= -DDEFAULT_TARGET_VERSION=\"2.6.0\"
+CFLAGS+= -DDEFAULT_TARGET_MACHINE=\"i386-unknown-freebsd\"
+CFLAGS+= -DMD_EXEC_PREFIX=\"/usr/libexec/\"
+CFLAGS+= -DSTANDARD_STARTFILE_PREFIX=\"/usr/lib\"
diff --git a/gnu/usr.bin/cc/c++/Makefile b/gnu/usr.bin/cc/c++/Makefile
new file mode 100644
index 0000000..71fb59e
--- /dev/null
+++ b/gnu/usr.bin/cc/c++/Makefile
@@ -0,0 +1,13 @@
+#
+# $FreeBSD$
+#
+
+PROG = c++
+SRCS = g++.c
+BINDIR= /usr/bin
+NOMAN= 1
+LDDESTDIR+= -L${.CURDIR}/../cc_int/obj
+LDDESTDIR+= -L${.CURDIR}/../cc_int
+LDADD+= -lcc_int
+
+.include <bsd.prog.mk>
diff --git a/gnu/usr.bin/cc/c++/g++.c b/gnu/usr.bin/cc/c++/g++.c
new file mode 100644
index 0000000..fcd1029
--- /dev/null
+++ b/gnu/usr.bin/cc/c++/g++.c
@@ -0,0 +1,535 @@
+/* G++ preliminary semantic processing for the compiler driver.
+ Copyright (C) 1993, 1994 Free Software Foundation, Inc.
+ Contributed by Brendan Kehoe (brendan@cygnus.com).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+/* This program is a wrapper to the main `gcc' driver. For GNU C++,
+ we need to do two special things: a) append `-lg++' in situations
+ where it's appropriate, to link in libg++, and b) add `-xc++'..`-xnone'
+ around file arguments named `foo.c' or `foo.i'. So, we do all of
+ this semantic processing then just exec gcc with the new argument
+ list.
+
+ We used to do all of this in a small shell script, but many users
+ found the performance of this as a shell script to be unacceptable.
+ In situations where your PATH has a lot of NFS-mounted directories,
+ using a script that runs sed and other things would be a nasty
+ performance hit. With this program, we never search the PATH at all. */
+
+#include "config.h"
+#ifdef __STDC__
+#include <stdarg.h>
+#else
+#include <varargs.h>
+#endif
+#include <stdio.h>
+#include <sys/types.h>
+#include <sys/file.h> /* May get R_OK, etc. on some systems. */
+
+/* Defined to the name of the compiler; if using a cross compiler, the
+ Makefile should compile this file with the proper name
+ (e.g., "i386-aout-gcc"). */
+#ifndef GCC_NAME
+#define GCC_NAME "gcc"
+#endif
+
+/* This bit is set if we saw a `-xfoo' language specification. */
+#define LANGSPEC (1<<1)
+/* This bit is set if they did `-lm' or `-lmath'. */
+#define MATHLIB (1<<2)
+
+/* On MSDOS, write temp files in current dir
+ because there's no place else we can expect to use. */
+#ifdef __MSDOS__
+#ifndef P_tmpdir
+#define P_tmpdir "."
+#endif
+#ifndef R_OK
+#define R_OK 4
+#define W_OK 2
+#define X_OK 1
+#endif
+#endif
+
+#ifndef VPROTO
+#ifdef __STDC__
+#define PVPROTO(ARGS) ARGS
+#define VPROTO(ARGS) ARGS
+#define VA_START(va_list,var) va_start(va_list,var)
+#else
+#define PVPROTO(ARGS) ()
+#define VPROTO(ARGS) (va_alist) va_dcl
+#define VA_START(va_list,var) va_start(va_list)
+#endif
+#endif
+
+extern int errno, sys_nerr;
+#if defined(bsd4_4) || defined(__NetBSD__)
+extern const char *const sys_errlist[];
+#else
+extern char *sys_errlist[];
+#endif
+
+/* Name with which this program was invoked. */
+static char *programname;
+
+#ifdef HAVE_VPRINTF
+/* Output an error message and exit */
+
+static void
+fatal VPROTO((char *format, ...))
+{
+#ifndef __STDC__
+ char *format;
+#endif
+ va_list ap;
+
+ VA_START (ap, format);
+
+#ifndef __STDC__
+ format = va_arg (ap, char*);
+#endif
+
+ fprintf (stderr, "%s: ", programname);
+ vfprintf (stderr, format, ap);
+ va_end (ap);
+ fprintf (stderr, "\n");
+#if 0
+ /* XXX Not needed for g++ driver. */
+ delete_temp_files ();
+#endif
+ exit (1);
+}
+
+static void
+error VPROTO((char *format, ...))
+{
+#ifndef __STDC__
+ char *format;
+#endif
+ va_list ap;
+
+ VA_START (ap, format);
+
+#ifndef __STDC__
+ format = va_arg (ap, char*);
+#endif
+
+ fprintf (stderr, "%s: ", programname);
+ vfprintf (stderr, format, ap);
+ va_end (ap);
+
+ fprintf (stderr, "\n");
+}
+
+#else /* not HAVE_VPRINTF */
+
+static void
+error (msg, arg1, arg2)
+ char *msg, *arg1, *arg2;
+{
+ fprintf (stderr, "%s: ", programname);
+ fprintf (stderr, msg, arg1, arg2);
+ fprintf (stderr, "\n");
+}
+
+static void
+fatal (msg, arg1, arg2)
+ char *msg, *arg1, *arg2;
+{
+ error (msg, arg1, arg2);
+#if 0
+ /* XXX Not needed for g++ driver. */
+ delete_temp_files ();
+#endif
+ exit (1);
+}
+
+#endif /* not HAVE_VPRINTF */
+
+/* More 'friendly' abort that prints the line and file.
+ config.h can #define abort fancy_abort if you like that sort of thing. */
+
+void
+fancy_abort ()
+{
+ fatal ("Internal g++ abort.");
+}
+
+char *
+xmalloc (size)
+ unsigned size;
+{
+ register char *value = (char *) malloc (size);
+ if (value == 0)
+ fatal ("virtual memory exhausted");
+ return value;
+}
+
+/* Return a newly-allocated string whose contents concatenate those
+ of s1, s2, s3. */
+static char *
+concat (s1, s2, s3)
+ char *s1, *s2, *s3;
+{
+ int len1 = strlen (s1), len2 = strlen (s2), len3 = strlen (s3);
+ char *result = xmalloc (len1 + len2 + len3 + 1);
+
+ strcpy (result, s1);
+ strcpy (result + len1, s2);
+ strcpy (result + len1 + len2, s3);
+ *(result + len1 + len2 + len3) = 0;
+
+ return result;
+}
+
+static void
+pfatal_with_name (name)
+ char *name;
+{
+ char *s;
+
+ if (errno < sys_nerr)
+ s = concat ("%s: ", sys_errlist[errno], "");
+ else
+ s = "cannot open %s";
+ fatal (s, name);
+}
+
+#ifdef __MSDOS__
+/* This is the common prefix we use to make temp file names. */
+char *temp_filename;
+
+/* Length of the prefix. */
+int temp_filename_length;
+
+/* Compute a string to use as the base of all temporary file names. */
+static char *
+choose_temp_base_try (try, base)
+char *try;
+char *base;
+{
+ char *rv;
+ if (base)
+ rv = base;
+ else if (try == (char *)0)
+ rv = 0;
+ else if (access (try, R_OK | W_OK) != 0)
+ rv = 0;
+ else
+ rv = try;
+ return rv;
+}
+
+static void
+choose_temp_base ()
+{
+ char *base = 0;
+ int len;
+
+ base = choose_temp_base_try (getenv ("TMPDIR"), base);
+ base = choose_temp_base_try (getenv ("TMP"), base);
+ base = choose_temp_base_try (getenv ("TEMP"), base);
+
+#ifdef P_tmpdir
+ base = choose_temp_base_try (P_tmpdir, base);
+#endif
+
+ base = choose_temp_base_try ("/usr/tmp", base);
+ base = choose_temp_base_try ("/tmp", base);
+
+ /* If all else fails, use the current directory! */
+ if (base == (char *)0)
+ base = "./";
+
+ len = strlen (base);
+ temp_filename = xmalloc (len + sizeof("/ccXXXXXX"));
+ strcpy (temp_filename, base);
+ if (len > 0 && temp_filename[len-1] != '/')
+ temp_filename[len++] = '/';
+ strcpy (temp_filename + len, "ccXXXXXX");
+
+ mktemp (temp_filename);
+ temp_filename_length = strlen (temp_filename);
+ if (temp_filename_length == 0)
+ abort ();
+}
+
+static void
+perror_exec (name)
+ char *name;
+{
+ char *s;
+
+ if (errno < sys_nerr)
+ s = concat ("installation problem, cannot exec %s: ",
+ sys_errlist[errno], "");
+ else
+ s = "installation problem, cannot exec %s";
+ error (s, name);
+}
+
+/* This is almost exactly what's in gcc.c:pexecute for MSDOS. */
+void
+run_dos (program, argv)
+ char *program;
+ char *argv[];
+{
+ char *scmd, *rf;
+ FILE *argfile;
+ int i;
+
+ choose_temp_base (); /* not in gcc.c */
+
+ scmd = (char *) malloc (strlen (program) + strlen (temp_filename) + 10);
+ rf = scmd + strlen (program) + 6;
+ sprintf (scmd, "%s.exe @%s.gp", program, temp_filename);
+
+ argfile = fopen (rf, "w");
+ if (argfile == 0)
+ pfatal_with_name (rf);
+
+ for (i=1; argv[i]; i++)
+ {
+ char *cp;
+ for (cp = argv[i]; *cp; cp++)
+ {
+ if (*cp == '"' || *cp == '\'' || *cp == '\\' || isspace (*cp))
+ fputc ('\\', argfile);
+ fputc (*cp, argfile);
+ }
+ fputc ('\n', argfile);
+ }
+ fclose (argfile);
+
+ i = system (scmd);
+
+ remove (rf);
+
+ if (i == -1)
+ perror_exec (program);
+}
+#endif /* __MSDOS__ */
+
+int
+main (argc, argv)
+ int argc;
+ char **argv;
+{
+ register int i, j = 0;
+ register char *p;
+ int verbose = 0;
+
+ /* This will be NULL if we encounter a situation where we should not
+ link in libg++. */
+ char *library = "-lg++";
+
+ /* Used to track options that take arguments, so we don't go wrapping
+ those with -xc++/-xnone. */
+ char *quote = NULL;
+
+ /* The new argument list will be contained in this. */
+ char **arglist;
+
+ /* The name of the compiler we will want to run---by default, it
+ will be the definition of `GCC_NAME', e.g., `gcc'. */
+ char *gcc = GCC_NAME;
+
+ /* Non-zero if we saw a `-xfoo' language specification on the
+ command line. Used to avoid adding our own -xc++ if the user
+ already gave a language for the file. */
+ int saw_speclang = 0;
+
+ /* Non-zero if we saw `-lm' or `-lmath' on the command line. */
+ int saw_math = 0;
+
+ /* The number of arguments being added to what's in argv. By
+ default it's one new argument (adding `-lg++'). We use this
+ to track the number of times we've inserted -xc++/-xnone as well. */
+ int added = 1;
+
+ /* An array used to flag each argument that needs a bit set for
+ LANGSPEC or MATHLIB. */
+ int *args;
+
+ p = argv[0] + strlen (argv[0]);
+ while (p != argv[0] && p[-1] != '/')
+ --p;
+ programname = p;
+
+ if (argc == 1)
+ fatal ("No input files specified.\n");
+
+#ifndef __MSDOS__
+ /* We do a little magic to find out where the main gcc executable
+ is. If they ran us as /usr/local/bin/g++, then we will look
+ for /usr/local/bin/gcc; similarly, if they just ran us as `g++',
+ we'll just look for `gcc'. */
+ if (p != argv[0])
+ {
+ *--p = '\0';
+ gcc = (char *) malloc ((strlen (argv[0]) + 1 + strlen (GCC_NAME) + 1)
+ * sizeof (char));
+ sprintf (gcc, "%s/%s", argv[0], GCC_NAME);
+ }
+#endif
+
+ args = (int *) malloc (argc * sizeof (int));
+ bzero (args, argc * sizeof (int));
+
+ for (i = 1; i < argc; i++)
+ {
+ /* If the previous option took an argument, we swallow it here. */
+ if (quote)
+ {
+ quote = NULL;
+ continue;
+ }
+
+ if (argv[i][0] == '\0' || argv[i][1] == '\0')
+ continue;
+
+ if (argv[i][0] == '-')
+ {
+ if (strcmp (argv[i], "-nostdlib") == 0)
+ {
+ added--;
+ library = NULL;
+ }
+ else if (strcmp (argv[i], "-lm") == 0
+ || strcmp (argv[i], "-lmath") == 0)
+ args[i] |= MATHLIB;
+ else if (strcmp (argv[i], "-v") == 0)
+ {
+ verbose = 1;
+ if (argc == 2)
+ {
+ /* If they only gave us `-v', don't try to link
+ in libg++. */
+ added--;
+ library = NULL;
+ }
+ }
+ else if (strncmp (argv[i], "-x", 2) == 0)
+ saw_speclang = 1;
+ else if (((argv[i][2] == '\0'
+ && (char *)strchr ("bBVDUoeTuIYmLiA", argv[i][1]) != NULL)
+ || strcmp (argv[i], "-Tdata") == 0))
+ quote = argv[i];
+ else if (((argv[i][2] == '\0'
+ && (char *) strchr ("cSEM", argv[i][1]) != NULL)
+ || strcmp (argv[i], "-MM") == 0))
+ {
+ /* Don't specify libraries if we won't link, since that would
+ cause a warning. */
+ added--;
+ library = NULL;
+ }
+ else
+ /* Pass other options through. */
+ continue;
+ }
+ else
+ {
+ int len;
+
+ if (saw_speclang)
+ continue;
+
+ /* If the filename ends in .c or .i, put options around it.
+ But not if a specified -x option is currently active. */
+ len = strlen (argv[i]);
+ if (len > 2
+ && (argv[i][len - 1] == 'c' || argv[i][len - 1] == 'i')
+ && argv[i][len - 2] == '.')
+ {
+ args[i] |= LANGSPEC;
+ added += 2;
+ }
+ }
+ }
+
+ if (quote)
+ fatal ("argument to `%s' missing\n", quote);
+
+ if (added)
+ {
+ arglist = (char **) malloc ((argc + added + 1) * sizeof (char *));
+
+ for (i = 1, j = 1; i < argc; i++, j++)
+ {
+ arglist[j] = argv[i];
+
+ /* Make sure -lg++ is before the math library, since libg++
+ itself uses those math routines. */
+ if (!saw_math && (args[i] & MATHLIB) && library)
+ {
+ saw_math = 1;
+ arglist[j] = library;
+ arglist[++j] = argv[i];
+ }
+
+ /* Wrap foo.c and foo.i files in a language specification to
+ force the gcc compiler driver to run cc1plus on them. */
+ if (args[i] & LANGSPEC)
+ {
+ int len = strlen (argv[i]);
+ if (argv[i][len - 1] == 'i')
+ arglist[j++] = "-xc++-cpp-output";
+ else
+ arglist[j++] = "-xc++";
+ arglist[j++] = argv[i];
+ arglist[j] = "-xnone";
+ }
+ }
+
+ /* Add `-lg++' if we haven't already done so. */
+ if (library && !saw_math)
+ arglist[j++] = library;
+
+ arglist[j] = NULL;
+ }
+ else
+ /* No need to copy 'em all. */
+ arglist = argv;
+
+ arglist[0] = gcc;
+
+ if (verbose)
+ {
+ if (j == 0)
+ j = argc;
+
+ for (i = 0; i < j; i++)
+ fprintf (stderr, " %s", arglist[i]);
+ fprintf (stderr, "\n");
+ }
+#ifndef OS2
+#ifdef __MSDOS__
+ run_dos (gcc, arglist);
+#else /* !__MSDOS__ */
+ if (execvp (gcc, arglist) < 0)
+ pfatal_with_name (gcc);
+#endif /* __MSDOS__ */
+#else /* OS2 */
+ if (spawnvp (gcc, arglist) < 0)
+ pfatal_with_name (gcc);
+#endif
+
+ return 0;
+}
diff --git a/gnu/usr.bin/cc/cc/Makefile b/gnu/usr.bin/cc/cc/Makefile
new file mode 100644
index 0000000..c6f0112
--- /dev/null
+++ b/gnu/usr.bin/cc/cc/Makefile
@@ -0,0 +1,18 @@
+#
+# $FreeBSD$
+#
+
+PROG = cc
+SRCS = gcc.c
+BINDIR= /usr/bin
+MLINKS+=cc.1 gcc.1
+MLINKS+=cc.1 c++.1
+MLINKS+=cc.1 g++.1
+LDDESTDIR+= -L${.CURDIR}/../cc_int/obj
+LDDESTDIR+= -L${.CURDIR}/../cc_int
+LDADD+= -lcc_int
+
+afterinstall:
+ cd $(DESTDIR)$(BINDIR) ; rm gcc ; ln -s cc gcc
+
+.include <bsd.prog.mk>
diff --git a/gnu/usr.bin/cc/cc/cc.1 b/gnu/usr.bin/cc/cc/cc.1
new file mode 100644
index 0000000..223115c
--- /dev/null
+++ b/gnu/usr.bin/cc/cc/cc.1
@@ -0,0 +1,4111 @@
+.\" Copyright (c) 1991, 1992, 1993, 1994 Free Software Foundation -*-Text-*-
+.\" See section COPYING for conditions for redistribution
+.\"
+.\" Set up \*(lq, \*(rq if -man hasn't already set it up.
+.if @@\*(lq@ \{\
+. ds lq "
+. if t .ds lq ``
+. if !@@\(lq@ .ds lq "\(lq
+.\}
+.if @@\*(rq@ \{\
+. ds rq "
+. if t .ds rq ''
+. if !@@\(rq@ .ds rq "\(rq
+.\}
+.de Id
+.ds Rv \\$3
+.ds Dt \\$4
+..
+.de Sp
+.if n .sp
+.if t .sp 0.4
+..
+.Id $Id: gcc.1,v 1.4 1993/10/13 23:19:12 pesch Exp $
+.TH GCC 1 "\*(Dt" "GNU Tools" "GNU Tools"
+.SH NAME
+gcc, g++ \- GNU project C and C++ Compiler (v2.6)
+.SH SYNOPSIS
+.B gcc
+.RI "[ " option " | " filename " ].\|.\|."
+.br
+.B g++
+.RI "[ " option " | " filename " ].\|.\|."
+.SH WARNING
+The information in this man page is an extract from the full
+documentation of the GNU C compiler, and is limited to the meaning of
+the options.
+.PP
+This man page is not kept up to date except when volunteers want to
+maintain it. If you find a discrepancy between the man page and the
+software, please check the Info file, which is the authoritative
+documentation.
+.PP
+If we find that the things in this man page that are out of date cause
+significant confusion or complaints, we will stop distributing the man
+page. The alternative, updating the man page when we update the Info
+file, is impossible because the rest of the work of maintaining GNU CC
+leaves us no time for that. The GNU project regards man pages as
+obsolete and should not let them take time away from other things.
+.PP
+For complete and current documentation, refer to the Info file `\|\c
+.B gcc\c
+\&\|' or the manual
+.I
+Using and Porting GNU CC (for version 2.0)\c
+\&. Both are made from the Texinfo source file
+.BR gcc.texinfo .
+.SH DESCRIPTION
+The C and C++ compilers are integrated. Both process input files
+through one or more of four stages: preprocessing, compilation,
+assembly, and linking. Source filename suffixes identify the source
+language, but which name you use for the compiler governs default
+assumptions:
+.TP
+.B gcc
+assumes preprocessed (\c
+.B .i\c
+\&) files are C and assumes C style linking.
+.TP
+.B g++
+assumes preprocessed (\c
+.B .i\c
+\&) files are C++ and assumes C++ style linking.
+.PP
+Suffixes of source file names indicate the language and kind of
+processing to be done:
+.Sp
+.nf
+.ta \w'\fB.cxx\fP 'u
+\&\fB.c\fP C source; preprocess, compile, assemble
+\&\fB.C\fP C++ source; preprocess, compile, assemble
+\&\fB.cc\fP C++ source; preprocess, compile, assemble
+\&\fB.cxx\fP C++ source; preprocess, compile, assemble
+\&\fB.m\fP Objective-C source; preprocess, compile, assemble
+\&\fB.i\fP preprocessed C; compile, assemble
+\&\fB.ii\fP preprocessed C++; compile, assemble
+\&\fB.s\fP Assembler source; assemble
+\&\fB.S\fP Assembler source; preprocess, assemble
+\&\fB.h\fP Preprocessor file; not usually named on command line
+.Sp
+.fi
+Files with other suffixes are passed to the linker. Common cases include:
+.Sp
+.nf
+\&\fB.o\fP Object file
+\&\fB.a\fP Archive file
+.br
+.fi
+.Sp
+Linking is always the last stage unless you use one of the
+.BR \-c ,
+.BR \-S ,
+or
+.B \-E
+options to avoid it (or unless compilation errors stop the whole
+process). For the link stage, all
+.B .o
+files corresponding to source files,
+.B \-l
+libraries, unrecognized filenames (including named
+.B .o
+object files and
+.B .a
+archives)
+are passed to the linker in command-line order.
+.SH OPTIONS
+Options must be separate: `\|\c
+.B \-dr\c
+\&\|' is quite different from `\|\c
+.B \-d \-r
+\&\|'.
+.PP
+Most `\|\c
+.B \-f\c
+\&\|' and `\|\c
+.B \-W\c
+\&\|' options have two contrary forms:
+.BI \-f name
+and
+.BI \-fno\- name\c
+\& (or
+.BI \-W name
+and
+.BI \-Wno\- name\c
+\&). Only the non-default forms are shown here.
+.PP
+Here is a summary of all the options, grouped by type. Explanations are
+in the following sections.
+.hy 0
+.na
+.TP
+.B Overall Options
+.br
+\-c
+\-S
+\-E
+.RI "\-o " file
+\-pipe
+\-v
+.RI "\-x " language
+.TP
+.B Language Options
+\-ansi
+\-fall\-virtual
+\-fcond\-mismatch
+\-fdollars\-in\-identifiers
+\-fenum\-int\-equiv
+\-fexternal\-templates
+\-fno\-asm
+\-fno\-builtin
+\-fno\-strict\-prototype
+\-fsigned\-bitfields
+\-fsigned\-char
+\-fthis\-is\-variable
+\-funsigned\-bitfields
+\-funsigned\-char
+\-fwritable\-strings
+\-traditional
+\-traditional\-cpp
+\-trigraphs
+.TP
+.B Warning Options
+\-fsyntax\-only
+\-pedantic
+\-pedantic\-errors
+\-w
+\-W
+\-Wall
+\-Waggregate\-return
+\-Wcast\-align
+\-Wcast\-qual
+\-Wchar\-subscript
+\-Wcomment
+\-Wconversion
+\-Wenum\-clash
+\-Werror
+\-Wformat
+.RI \-Wid\-clash\- len
+\-Wimplicit
+\-Winline
+\-Wmissing\-prototypes
+\-Wmissing\-declarations
+\-Wnested\-externs
+\-Wno\-import
+\-Wparentheses
+\-Wpointer\-arith
+\-Wredundant\-decls
+\-Wreturn\-type
+\-Wshadow
+\-Wstrict\-prototypes
+\-Wswitch
+\-Wtemplate\-debugging
+\-Wtraditional
+\-Wtrigraphs
+\-Wuninitialized
+\-Wunused
+\-Wwrite\-strings
+.TP
+.B Debugging Options
+\-a
+.RI \-d letters
+\-fpretend\-float
+\-g
+.RI \-g level
+\-gcoff
+\-gxcoff
+\-gxcoff+
+\-gdwarf
+\-gdwarf+
+\-gstabs
+\-gstabs+
+\-ggdb
+\-p
+\-pg
+\-save\-temps
+.RI \-print\-file\-name= library
+\-print\-libgcc\-file\-name
+.RI \-print\-prog\-name= program
+.TP
+.B Optimization Options
+\-fcaller\-saves
+\-fcse\-follow\-jumps
+\-fcse\-skip\-blocks
+\-fdelayed\-branch
+\-felide\-constructors
+\-fexpensive\-optimizations
+\-ffast\-math
+\-ffloat\-store
+\-fforce\-addr
+\-fforce\-mem
+\-finline\-functions
+\-fkeep\-inline\-functions
+\-fmemoize\-lookups
+\-fno\-default\-inline
+\-fno\-defer\-pop
+\-fno\-function\-cse
+\-fno\-inline
+\-fno\-peephole
+\-fomit\-frame\-pointer
+\-frerun\-cse\-after\-loop
+\-fschedule\-insns
+\-fschedule\-insns2
+\-fstrength\-reduce
+\-fthread\-jumps
+\-funroll\-all\-loops
+\-funroll\-loops
+\-O
+\-O2
+.TP
+.B Preprocessor Options
+.RI \-A assertion
+\-C
+\-dD
+\-dM
+\-dN
+.RI \-D macro [\|= defn \|]
+\-E
+\-H
+.RI "\-idirafter " dir
+.RI "\-include " file
+.RI "\-imacros " file
+.RI "\-iprefix " file
+.RI "\-iwithprefix " dir
+\-M
+\-MD
+\-MM
+\-MMD
+\-nostdinc
+\-P
+.RI \-U macro
+\-undef
+.TP
+.B Assembler Option
+.RI \-Wa, option
+.TP
+.B Linker Options
+.RI \-l library
+\-nostartfiles
+\-nostdlib
+\-static
+\-shared
+\-symbolic
+.RI "\-Xlinker\ " option
+.RI \-Wl, option
+.RI "\-u " symbol
+.TP
+.B Directory Options
+.RI \-B prefix
+.RI \-I dir
+\-I\-
+.RI \-L dir
+.TP
+.B Target Options
+.RI "\-b " machine
+.RI "\-V " version
+.TP
+.B Configuration Dependent Options
+.I M680x0\ Options
+.br
+\-m68000
+\-m68020
+\-m68020\-40
+\-m68030
+\-m68040
+\-m68881
+\-mbitfield
+\-mc68000
+\-mc68020
+\-mfpa
+\-mnobitfield
+\-mrtd
+\-mshort
+\-msoft\-float
+.Sp
+.I VAX Options
+.br
+\-mg
+\-mgnu
+\-munix
+.Sp
+.I SPARC Options
+.br
+\-mepilogue
+\-mfpu
+\-mhard\-float
+\-mno\-fpu
+\-mno\-epilogue
+\-msoft\-float
+\-msparclite
+\-mv8
+\-msupersparc
+\-mcypress
+.Sp
+.I Convex Options
+.br
+\-margcount
+\-mc1
+\-mc2
+\-mnoargcount
+.Sp
+.I AMD29K Options
+.br
+\-m29000
+\-m29050
+\-mbw
+\-mdw
+\-mkernel\-registers
+\-mlarge
+\-mnbw
+\-mnodw
+\-msmall
+\-mstack\-check
+\-muser\-registers
+.Sp
+.I M88K Options
+.br
+\-m88000
+\-m88100
+\-m88110
+\-mbig\-pic
+\-mcheck\-zero\-division
+\-mhandle\-large\-shift
+\-midentify\-revision
+\-mno\-check\-zero\-division
+\-mno\-ocs\-debug\-info
+\-mno\-ocs\-frame\-position
+\-mno\-optimize\-arg\-area
+\-mno\-seriazlize\-volatile
+\-mno\-underscores
+\-mocs\-debug\-info
+\-mocs\-frame\-position
+\-moptimize\-arg\-area
+\-mserialize\-volatile
+.RI \-mshort\-data\- num
+\-msvr3
+\-msvr4
+\-mtrap\-large\-shift
+\-muse\-div\-instruction
+\-mversion\-03.00
+\-mwarn\-passed\-structs
+.Sp
+.I RS6000 Options
+.br
+\-mfp\-in\-toc
+\-mno\-fop\-in\-toc
+.Sp
+.I RT Options
+.br
+\-mcall\-lib\-mul
+\-mfp\-arg\-in\-fpregs
+\-mfp\-arg\-in\-gregs
+\-mfull\-fp\-blocks
+\-mhc\-struct\-return
+\-min\-line\-mul
+\-mminimum\-fp\-blocks
+\-mnohc\-struct\-return
+.Sp
+.I MIPS Options
+.br
+\-mcpu=\fIcpu type\fP
+\-mips2
+\-mips3
+\-mint64
+\-mlong64
+\-mlonglong128
+\-mmips\-as
+\-mgas
+\-mrnames
+\-mno\-rnames
+\-mgpopt
+\-mno\-gpopt
+\-mstats
+\-mno\-stats
+\-mmemcpy
+\-mno\-memcpy
+\-mno\-mips\-tfile
+\-mmips\-tfile
+\-msoft\-float
+\-mhard\-float
+\-mabicalls
+\-mno\-abicalls
+\-mhalf\-pic
+\-mno\-half\-pic
+\-G \fInum\fP
+\-nocpp
+.Sp
+.I i386 Options
+.br
+\-m486
+\-mno\-486
+\-msoft\-float
+\-mno\-fp\-ret\-in\-387
+.Sp
+.I HPPA Options
+.br
+\-mpa\-risc\-1\-0
+\-mpa\-risc\-1\-1
+\-mkernel
+\-mshared\-libs
+\-mno\-shared\-libs
+\-mlong\-calls
+\-mdisable\-fpregs
+\-mdisable\-indexing
+\-mtrailing\-colon
+.Sp
+.I i960 Options
+.br
+\-m\fIcpu-type\fP
+\-mnumerics
+\-msoft\-float
+\-mleaf\-procedures
+\-mno\-leaf\-procedures
+\-mtail\-call
+\-mno\-tail\-call
+\-mcomplex\-addr
+\-mno\-complex\-addr
+\-mcode\-align
+\-mno\-code\-align
+\-mic\-compat
+\-mic2.0\-compat
+\-mic3.0\-compat
+\-masm\-compat
+\-mintel\-asm
+\-mstrict\-align
+\-mno\-strict\-align
+\-mold\-align
+\-mno\-old\-align
+.Sp
+.I DEC Alpha Options
+.br
+\-mfp\-regs
+\-mno\-fp\-regs
+\-mno\-soft\-float
+\-msoft\-float
+.Sp
+.I System V Options
+.br
+\-G
+\-Qy
+\-Qn
+.RI \-YP, paths
+.RI \-Ym, dir
+.TP
+.B Code Generation Options
+.RI \-fcall\-saved\- reg
+.RI \-fcall\-used\- reg
+.RI \-ffixed\- reg
+\-finhibit\-size\-directive
+\-fnonnull\-objects
+\-fno\-common
+\-fno\-ident
+\-fno\-gnu\-linker
+\-fpcc\-struct\-return
+\-fpic
+\-fPIC
+\-freg\-struct\-returno
+\-fshared\-data
+\-fshort\-enums
+\-fshort\-double
+\-fvolatile
+\-fvolatile\-global
+\-fverbose\-asm
+.ad b
+.hy 1
+.SH OVERALL OPTIONS
+.TP
+.BI "\-x " "language"
+Specify explicitly the
+.I language\c
+\& for the following input files (rather than choosing a default based
+on the file name suffix) . This option applies to all following input
+files until the next `\|\c
+.B \-x\c
+\&\|' option. Possible values of \c
+.I language\c
+\& are
+`\|\c
+.B c\c
+\&\|', `\|\c
+.B objective\-c\c
+\&\|', `\|\c
+.B c\-header\c
+\&\|', `\|\c
+.B c++\c
+\&\|',
+`\|\c
+.B cpp\-output\c
+\&\|', `\|\c
+.B assembler\c
+\&\|', and `\|\c
+.B assembler\-with\-cpp\c
+\&\|'.
+.TP
+.B \-x none
+Turn off any specification of a language, so that subsequent files are
+handled according to their file name suffixes (as they are if `\|\c
+.B \-x\c
+\&\|'
+has not been used at all).
+.PP
+If you want only some of the four stages (preprocess, compile,
+assemble, link), you can use
+`\|\c
+.B \-x\c
+\&\|' (or filename suffixes) to tell \c
+.B gcc\c
+\& where to start, and
+one of the options `\|\c
+.B \-c\c
+\&\|', `\|\c
+.B \-S\c
+\&\|', or `\|\c
+.B \-E\c
+\&\|' to say where
+.B gcc\c
+\& is to stop. Note that some combinations (for example,
+`\|\c
+.B \-x cpp\-output \-E\c
+\&\|') instruct \c
+.B gcc\c
+\& to do nothing at all.
+.TP
+.B \-c
+Compile or assemble the source files, but do not link. The compiler
+output is an object file corresponding to each source file.
+.Sp
+By default, GCC makes the object file name for a source file by replacing
+the suffix `\|\c
+.B .c\c
+\&\|', `\|\c
+.B .i\c
+\&\|', `\|\c
+.B .s\c
+\&\|', etc., with `\|\c
+.B .o\c
+\&\|'. Use
+.B \-o\c
+\& to select another name.
+.Sp
+GCC ignores any unrecognized input files (those that do not require
+compilation or assembly) with the
+.B \-c
+option.
+.TP
+.B \-S
+Stop after the stage of compilation proper; do not assemble. The output
+is an assembler code file for each non-assembler input
+file specified.
+.Sp
+By default, GCC makes the assembler file name for a source file by
+replacing the suffix `\|\c
+.B .c\c
+\&\|', `\|\c
+.B .i\c
+\&\|', etc., with `\|\c
+.B .s\c
+\&\|'. Use
+.B \-o\c
+\& to select another name.
+.Sp
+GCC ignores any input files that don't require compilation.
+.TP
+.B \-E
+Stop after the preprocessing stage; do not run the compiler proper. The
+output is preprocessed source code, which is sent to the
+standard output.
+.Sp
+GCC ignores input files which don't require preprocessing.
+.TP
+.BI "\-o " file
+Place output in file \c
+.I file\c
+\&. This applies regardless to whatever
+sort of output GCC is producing, whether it be an executable file,
+an object file, an assembler file or preprocessed C code.
+.Sp
+Since only one output file can be specified, it does not make sense to
+use `\|\c
+.B \-o\c
+\&\|' when compiling more than one input file, unless you are
+producing an executable file as output.
+.Sp
+If you do not specify `\|\c
+.B \-o\c
+\&\|', the default is to put an executable file
+in `\|\c
+.B a.out\c
+\&\|', the object file for `\|\c
+.I source\c
+.B \&.\c
+.I suffix\c
+\&\c
+\&\|' in
+`\|\c
+.I source\c
+.B \&.o\c
+\&\|', its assembler file in `\|\c
+.I source\c
+.B \&.s\c
+\&\|', and
+all preprocessed C source on standard output.
+.TP
+.B \-v
+Print (on standard error output) the commands executed to run the stages
+of compilation. Also print the version number of the compiler driver
+program and of the preprocessor and the compiler proper.
+.TP
+.B \-pipe
+Use pipes rather than temporary files for communication between the
+various stages of compilation. This fails to work on some systems where
+the assembler cannot read from a pipe; but the GNU assembler has
+no trouble.
+.PP
+.SH LANGUAGE OPTIONS
+The following options control the dialect of C that the compiler
+accepts:
+.TP
+.B \-ansi
+Support all ANSI standard C programs.
+.Sp
+This turns off certain features of GNU C that are incompatible with
+ANSI C, such as the \c
+.B asm\c
+\&, \c
+.B inline\c
+\& and \c
+.B typeof
+keywords, and predefined macros such as \c
+.B unix\c
+\& and \c
+.B vax
+that identify the type of system you are using. It also enables the
+undesirable and rarely used ANSI trigraph feature, and disallows `\|\c
+.B $\c
+\&\|' as part of identifiers.
+.Sp
+The alternate keywords \c
+.B _\|_asm_\|_\c
+\&, \c
+.B _\|_extension_\|_\c
+\&,
+.B _\|_inline_\|_\c
+\& and \c
+.B _\|_typeof_\|_\c
+\& continue to work despite
+`\|\c
+.B \-ansi\c
+\&\|'. You would not want to use them in an ANSI C program, of
+course, but it is useful to put them in header files that might be included
+in compilations done with `\|\c
+.B \-ansi\c
+\&\|'. Alternate predefined macros
+such as \c
+.B _\|_unix_\|_\c
+\& and \c
+.B _\|_vax_\|_\c
+\& are also available, with or
+without `\|\c
+.B \-ansi\c
+\&\|'.
+.Sp
+The `\|\c
+.B \-ansi\c
+\&\|' option does not cause non-ANSI programs to be
+rejected gratuitously. For that, `\|\c
+.B \-pedantic\c
+\&\|' is required in
+addition to `\|\c
+.B \-ansi\c
+\&\|'.
+.Sp
+The preprocessor predefines a macro \c
+.B _\|_STRICT_ANSI_\|_\c
+\& when you use the `\|\c
+.B \-ansi\c
+\&\|'
+option. Some header files may notice this macro and refrain
+from declaring certain functions or defining certain macros that the
+ANSI standard doesn't call for; this is to avoid interfering with any
+programs that might use these names for other things.
+.TP
+.B \-fno\-asm
+Do not recognize \c
+.B asm\c
+\&, \c
+.B inline\c
+\& or \c
+.B typeof\c
+\& as a
+keyword. These words may then be used as identifiers. You can
+use \c
+.B _\|_asm_\|_\c
+\&, \c
+.B _\|_inline_\|_\c
+\& and \c
+.B _\|_typeof_\|_\c
+\& instead.
+`\|\c
+.B \-ansi\c
+\&\|' implies `\|\c
+.B \-fno\-asm\c
+\&\|'.
+.TP
+.B \-fno\-builtin
+Don't recognize built-in functions that do not begin with two leading
+underscores. Currently, the functions affected include \c
+.B _exit\c
+\&,
+.B abort\c
+\&, \c
+.B abs\c
+\&, \c
+.B alloca\c
+\&, \c
+.B cos\c
+\&, \c
+.B exit\c
+\&,
+.B fabs\c
+\&, \c
+.B labs\c
+\&, \c
+.B memcmp\c
+\&, \c
+.B memcpy\c
+\&, \c
+.B sin\c
+\&,
+.B sqrt\c
+\&, \c
+.B strcmp\c
+\&, \c
+.B strcpy\c
+\&, and \c
+.B strlen\c
+\&.
+.Sp
+The `\|\c
+.B \-ansi\c
+\&\|' option prevents \c
+.B alloca\c
+\& and \c
+.B _exit\c
+\& from
+being builtin functions.
+.TP
+.B \-fno\-strict\-prototype
+Treat a function declaration with no arguments, such as `\|\c
+.B int foo
+();\c
+\&\|', as C would treat it\(em\&as saying nothing about the number of
+arguments or their types (C++ only). Normally, such a declaration in
+C++ means that the function \c
+.B foo\c
+\& takes no arguments.
+.TP
+.B \-trigraphs
+Support ANSI C trigraphs. The `\|\c
+.B \-ansi\c
+\&\|' option implies `\|\c
+.B \-trigraphs\c
+\&\|'.
+.TP
+.B \-traditional
+Attempt to support some aspects of traditional C compilers.
+For details, see the GNU C Manual; the duplicate list here
+has been deleted so that we won't get complaints when it
+is out of date.
+.Sp
+But one note about C++ programs only (not C). `\|\c
+.B \-traditional\c
+\&\|' has one additional effect for C++: assignment to
+.B this
+is permitted. This is the same as the effect of `\|\c
+.B \-fthis\-is\-variable\c
+\&\|'.
+.TP
+.B \-traditional\-cpp
+Attempt to support some aspects of traditional C preprocessors.
+This includes the items that specifically mention the preprocessor above,
+but none of the other effects of `\|\c
+.B \-traditional\c
+\&\|'.
+.TP
+.B \-fdollars\-in\-identifiers
+Permit the use of `\|\c
+.B $\c
+\&\|' in identifiers (C++ only). You can also use
+`\|\c
+.B \-fno\-dollars\-in\-identifiers\c
+\&\|' to explicitly prohibit use of
+`\|\c
+.B $\c
+\&\|'. (GNU C++ allows `\|\c
+.B $\c
+\&\|' by default on some target systems
+but not others.)
+.TP
+.B \-fenum\-int\-equiv
+Permit implicit conversion of \c
+.B int\c
+\& to enumeration types (C++
+only). Normally GNU C++ allows conversion of \c
+.B enum\c
+\& to \c
+.B int\c
+\&,
+but not the other way around.
+.TP
+.B \-fexternal\-templates
+Produce smaller code for template declarations, by generating only a
+single copy of each template function where it is defined (C++ only).
+To use this option successfully, you must also mark all files that
+use templates with either `\|\c
+.B #pragma implementation\c
+\&\|' (the definition) or
+`\|\c
+.B #pragma interface\c
+\&\|' (declarations).
+
+When your code is compiled with `\|\c
+.B \-fexternal\-templates\c
+\&\|', all
+template instantiations are external. You must arrange for all
+necessary instantiations to appear in the implementation file; you can
+do this with a \c
+.B typedef\c
+\& that references each instantiation needed.
+Conversely, when you compile using the default option
+`\|\c
+.B \-fno\-external\-templates\c
+\&\|', all template instantiations are
+explicitly internal.
+.TP
+.B \-fall\-virtual
+Treat all possible member functions as virtual, implicitly. All
+member functions (except for constructor functions and
+.B new
+or
+.B delete
+member operators) are treated as virtual functions of the class where
+they appear.
+.Sp
+This does not mean that all calls to these member functions will be
+made through the internal table of virtual functions. Under some
+circumstances, the compiler can determine that a call to a given
+virtual function can be made directly; in these cases the calls are
+direct in any case.
+.TP
+.B \-fcond\-mismatch
+Allow conditional expressions with mismatched types in the second and
+third arguments. The value of such an expression is void.
+.TP
+.B \-fthis\-is\-variable
+Permit assignment to \c
+.B this\c
+\& (C++ only). The incorporation of
+user-defined free store management into C++ has made assignment to
+`\|\c
+.B this\c
+\&\|' an anachronism. Therefore, by default it is invalid to
+assign to \c
+.B this\c
+\& within a class member function. However, for
+backwards compatibility, you can make it valid with
+`\|\c
+.B \-fthis-is-variable\c
+\&\|'.
+.TP
+.B \-funsigned\-char
+Let the type \c
+.B char\c
+\& be unsigned, like \c
+.B unsigned char\c
+\&.
+.Sp
+Each kind of machine has a default for what \c
+.B char\c
+\& should
+be. It is either like \c
+.B unsigned char\c
+\& by default or like
+.B signed char\c
+\& by default.
+.Sp
+Ideally, a portable program should always use \c
+.B signed char\c
+\& or
+.B unsigned char\c
+\& when it depends on the signedness of an object.
+But many programs have been written to use plain \c
+.B char\c
+\& and
+expect it to be signed, or expect it to be unsigned, depending on the
+machines they were written for. This option, and its inverse, let you
+make such a program work with the opposite default.
+.Sp
+The type \c
+.B char\c
+\& is always a distinct type from each of
+.B signed char\c
+\& and \c
+.B unsigned char\c
+\&, even though its behavior
+is always just like one of those two.
+.TP
+.B \-fsigned\-char
+Let the type \c
+.B char\c
+\& be signed, like \c
+.B signed char\c
+\&.
+.Sp
+Note that this is equivalent to `\|\c
+.B \-fno\-unsigned\-char\c
+\&\|', which is
+the negative form of `\|\c
+.B \-funsigned\-char\c
+\&\|'. Likewise,
+`\|\c
+.B \-fno\-signed\-char\c
+\&\|' is equivalent to `\|\c
+.B \-funsigned\-char\c
+\&\|'.
+.TP
+.B \-fsigned\-bitfields
+.TP
+.B \-funsigned\-bitfields
+.TP
+.B \-fno\-signed\-bitfields
+.TP
+.B \-fno\-unsigned\-bitfields
+These options control whether a bitfield is
+signed or unsigned, when declared with no explicit `\|\c
+.B signed\c
+\&\|' or `\|\c
+.B unsigned\c
+\&\|' qualifier. By default, such a bitfield is
+signed, because this is consistent: the basic integer types such as
+.B int\c
+\& are signed types.
+.Sp
+However, when you specify `\|\c
+.B \-traditional\c
+\&\|', bitfields are all unsigned
+no matter what.
+.TP
+.B \-fwritable\-strings
+Store string constants in the writable data segment and don't uniquize
+them. This is for compatibility with old programs which assume they
+can write into string constants. `\|\c
+.B \-traditional\c
+\&\|' also has this
+effect.
+.Sp
+Writing into string constants is a very bad idea; \*(lqconstants\*(rq should
+be constant.
+.SH PREPROCESSOR OPTIONS
+These options control the C preprocessor, which is run on each C source
+file before actual compilation.
+.PP
+If you use the `\|\c
+.B \-E\c
+\&\|' option, GCC does nothing except preprocessing.
+Some of these options make sense only together with `\|\c
+.B \-E\c
+\&\|' because
+they cause the preprocessor output to be unsuitable for actual
+compilation.
+.TP
+.BI "\-include " "file"
+Process \c
+.I file\c
+\& as input before processing the regular input file.
+In effect, the contents of \c
+.I file\c
+\& are compiled first. Any `\|\c
+.B \-D\c
+\&\|'
+and `\|\c
+.B \-U\c
+\&\|' options on the command line are always processed before
+`\|\c
+.B \-include \c
+.I file\c
+\&\c
+\&\|', regardless of the order in which they are
+written. All the `\|\c
+.B \-include\c
+\&\|' and `\|\c
+.B \-imacros\c
+\&\|' options are
+processed in the order in which they are written.
+.TP
+.BI "\-imacros " file
+Process \c
+.I file\c
+\& as input, discarding the resulting output, before
+processing the regular input file. Because the output generated from
+.I file\c
+\& is discarded, the only effect of `\|\c
+.B \-imacros \c
+.I file\c
+\&\c
+\&\|' is to
+make the macros defined in \c
+.I file\c
+\& available for use in the main
+input. The preprocessor evaluates any `\|\c
+.B \-D\c
+\&\|' and `\|\c
+.B \-U\c
+\&\|' options
+on the command line before processing `\|\c
+.B \-imacros\c
+.I file\c
+\&\|', regardless of the order in
+which they are written. All the `\|\c
+.B \-include\c
+\&\|' and `\|\c
+.B \-imacros\c
+\&\|'
+options are processed in the order in which they are written.
+.TP
+.BI "\-idirafter " "dir"
+Add the directory \c
+.I dir\c
+\& to the second include path. The directories
+on the second include path are searched when a header file is not found
+in any of the directories in the main include path (the one that
+`\|\c
+.B \-I\c
+\&\|' adds to).
+.TP
+.BI "\-iprefix " "prefix"
+Specify \c
+.I prefix\c
+\& as the prefix for subsequent `\|\c
+.B \-iwithprefix\c
+\&\|'
+options.
+.TP
+.BI "\-iwithprefix " "dir"
+Add a directory to the second include path. The directory's name is
+made by concatenating \c
+.I prefix\c
+\& and \c
+.I dir\c
+\&, where \c
+.I prefix
+was specified previously with `\|\c
+.B \-iprefix\c
+\&\|'.
+.TP
+.B \-nostdinc
+Do not search the standard system directories for header files. Only
+the directories you have specified with `\|\c
+.B \-I\c
+\&\|' options (and the
+current directory, if appropriate) are searched.
+.Sp
+By using both `\|\c
+.B \-nostdinc\c
+\&\|' and `\|\c
+.B \-I\-\c
+\&\|', you can limit the include-file search file to only those
+directories you specify explicitly.
+.TP
+.B \-nostdinc++
+Do not search for header files in the C++\-specific standard directories,
+but do still search the other standard directories.
+(This option is used when building `\|\c
+.B libg++\c
+\&\|'.)
+.TP
+.B \-undef
+Do not predefine any nonstandard macros. (Including architecture flags).
+.TP
+.B \-E
+Run only the C preprocessor. Preprocess all the C source files
+specified and output the results to standard output or to the
+specified output file.
+.TP
+.B \-C
+Tell the preprocessor not to discard comments. Used with the
+`\|\c
+.B \-E\c
+\&\|' option.
+.TP
+.B \-P
+Tell the preprocessor not to generate `\|\c
+.B #line\c
+\&\|' commands.
+Used with the `\|\c
+.B \-E\c
+\&\|' option.
+.TP
+.B \-M\ [ \-MG ]
+Tell the preprocessor to output a rule suitable for \c
+.B make
+describing the dependencies of each object file. For each source file,
+the preprocessor outputs one \c
+.B make\c
+\&-rule whose target is the object
+file name for that source file and whose dependencies are all the files
+`\|\c
+.B #include\c
+\&\|'d in it. This rule may be a single line or may be
+continued with `\|\c
+.B \e\c
+\&\|'-newline if it is long. The list of rules is
+printed on standard output instead of the preprocessed C program.
+.Sp
+`\|\c
+.B \-M\c
+\&\|' implies `\|\c
+.B \-E\c
+\&\|'.
+.Sp
+`\|\c
+.B \-MG\c
+\&\|' says to treat missing header files as generated files and assume \c
+they live in the same directory as the source file. It must be specified \c
+in addition to `\|\c
+.B \-M\c
+\&\|'.
+.TP
+.B \-MM\ [ \-MG ]
+Like `\|\c
+.B \-M\c
+\&\|' but the output mentions only the user header files
+included with `\|\c
+.B #include "\c
+.I file\c
+\&"\c
+\&\|'. System header files
+included with `\|\c
+.B #include <\c
+.I file\c
+\&>\c
+\&\|' are omitted.
+.TP
+.B \-MD
+Like `\|\c
+.B \-M\c
+\&\|' but the dependency information is written to files with
+names made by replacing `\|\c
+.B .o\c
+\&\|' with `\|\c
+.B .d\c
+\&\|' at the end of the
+output file names. This is in addition to compiling the file as
+specified\(em\&`\|\c
+.B \-MD\c
+\&\|' does not inhibit ordinary compilation the way
+`\|\c
+.B \-M\c
+\&\|' does.
+.Sp
+The Mach utility `\|\c
+.B md\c
+\&\|' can be used to merge the `\|\c
+.B .d\c
+\&\|' files
+into a single dependency file suitable for using with the `\|\c
+.B make\c
+\&\|'
+command.
+.TP
+.B \-MMD
+Like `\|\c
+.B \-MD\c
+\&\|' except mention only user header files, not system
+header files.
+.TP
+.B \-H
+Print the name of each header file used, in addition to other normal
+activities.
+.TP
+.BI "\-A" "question" ( answer )
+Assert the answer
+.I answer
+for
+.I question\c
+\&, in case it is tested
+with a preprocessor conditional such as `\|\c
+.BI "#if #" question ( answer )\c
+\&\|'. `\|\c
+.B \-A\-\c
+\&\|' disables the standard
+assertions that normally describe the target machine.
+.TP
+.BI "\-A" "question"\c
+\&(\c
+.I answer\c
+\&)
+Assert the answer \c
+.I answer\c
+\& for \c
+.I question\c
+\&, in case it is tested
+with a preprocessor conditional such as `\|\c
+.B #if
+#\c
+.I question\c
+\&(\c
+.I answer\c
+\&)\c
+\&\|'. `\|\c
+.B \-A-\c
+\&\|' disables the standard
+assertions that normally describe the target machine.
+.TP
+.BI \-D macro
+Define macro \c
+.I macro\c
+\& with the string `\|\c
+.B 1\c
+\&\|' as its definition.
+.TP
+.BI \-D macro = defn
+Define macro \c
+.I macro\c
+\& as \c
+.I defn\c
+\&. All instances of `\|\c
+.B \-D\c
+\&\|' on
+the command line are processed before any `\|\c
+.B \-U\c
+\&\|' options.
+.TP
+.BI \-U macro
+Undefine macro \c
+.I macro\c
+\&. `\|\c
+.B \-U\c
+\&\|' options are evaluated after all `\|\c
+.B \-D\c
+\&\|' options, but before any `\|\c
+.B \-include\c
+\&\|' and `\|\c
+.B \-imacros\c
+\&\|' options.
+.TP
+.B \-dM
+Tell the preprocessor to output only a list of the macro definitions
+that are in effect at the end of preprocessing. Used with the `\|\c
+.B \-E\c
+\&\|'
+option.
+.TP
+.B \-dD
+Tell the preprocessor to pass all macro definitions into the output, in
+their proper sequence in the rest of the output.
+.TP
+.B \-dN
+Like `\|\c
+.B \-dD\c
+\&\|' except that the macro arguments and contents are omitted.
+Only `\|\c
+.B #define \c
+.I name\c
+\&\c
+\&\|' is included in the output.
+.SH ASSEMBLER OPTION
+.TP
+.BI "\-Wa," "option"
+Pass \c
+.I option\c
+\& as an option to the assembler. If \c
+.I option
+contains commas, it is split into multiple options at the commas.
+.SH LINKER OPTIONS
+These options come into play when the compiler links object files into
+an executable output file. They are meaningless if the compiler is
+not doing a link step.
+.TP
+.I object-file-name
+A file name that does not end in a special recognized suffix is
+considered to name an object file or library. (Object files are
+distinguished from libraries by the linker according to the file
+contents.) If GCC does a link step, these object files are used as input
+to the linker.
+.TP
+.BI \-l library
+Use the library named \c
+.I library\c
+\& when linking.
+.Sp
+The linker searches a standard list of directories for the library,
+which is actually a file named `\|\c
+.B lib\c
+.I library\c
+\&.a\c
+\&\|'. The linker
+then uses this file as if it had been specified precisely by name.
+.Sp
+The directories searched include several standard system directories
+plus any that you specify with `\|\c
+.B \-L\c
+\&\|'.
+.Sp
+Normally the files found this way are library files\(em\&archive files
+whose members are object files. The linker handles an archive file by
+scanning through it for members which define symbols that have so far
+been referenced but not defined. However, if the linker finds an
+ordinary object file rather than a library, the object file is linked
+in the usual fashion. The only difference between using an `\|\c
+.B \-l\c
+\&\|' option and specifying a file
+name is that `\|\c
+.B \-l\c
+\&\|' surrounds
+.I library
+with `\|\c
+.B lib\c
+\&\|' and `\|\c
+.B .a\c
+\&\|' and searches several directories.
+.TP
+.B \-lobjc
+You need this special case of the
+.B \-l
+option in order to link an Objective C program.
+.TP
+.B \-nostartfiles
+Do not use the standard system startup files when linking.
+The standard libraries are used normally.
+.TP
+.B \-nostdlib
+Don't use the standard system libraries and startup files when linking.
+Only the files you specify will be passed to the linker.
+.TP
+.B \-static
+On systems that support dynamic linking, this prevents linking with the shared
+libraries. On other systems, this option has no effect.
+.TP
+.B \-shared
+Produce a shared object which can then be linked with other objects to
+form an executable. Only a few systems support this option.
+.TP
+.B \-symbolic
+Bind references to global symbols when building a shared object. Warn
+about any unresolved references (unless overridden by the link editor
+option `\|\c
+.B
+\-Xlinker \-z \-Xlinker defs\c
+\&\|'). Only a few systems support
+this option.
+.TP
+.BI "\-Xlinker " "option"
+Pass \c
+.I option
+as an option to the linker. You can use this to
+supply system-specific linker options which GNU CC does not know how to
+recognize.
+.Sp
+If you want to pass an option that takes an argument, you must use
+`\|\c
+.B \-Xlinker\c
+\&\|' twice, once for the option and once for the argument.
+For example, to pass `\|\c
+.B
+\-assert definitions\c
+\&\|', you must write
+`\|\c
+.B
+\-Xlinker \-assert \-Xlinker definitions\c
+\&\|'. It does not work to write
+`\|\c
+.B
+\-Xlinker "\-assert definitions"\c
+\&\|', because this passes the entire
+string as a single argument, which is not what the linker expects.
+.TP
+.BI "\-Wl," "option"
+Pass \c
+.I option\c
+\& as an option to the linker. If \c
+.I option\c
+\& contains
+commas, it is split into multiple options at the commas.
+.TP
+.BI "\-u " "symbol"
+Pretend the symbol
+.I symbol
+is undefined, to force linking of
+library modules to define it. You can use `\|\c
+.B \-u\c
+\&\|' multiple times with
+different symbols to force loading of additional library modules.
+.SH DIRECTORY OPTIONS
+These options specify directories to search for header files, for
+libraries and for parts of the compiler:
+.TP
+.BI "\-I" "dir"
+Append directory \c
+.I dir\c
+\& to the list of directories searched for include files.
+.TP
+.B \-I\-
+Any directories you specify with `\|\c
+.B \-I\c
+\&\|' options before the `\|\c
+.B \-I\-\c
+\&\|'
+option are searched only for the case of `\|\c
+.B
+#include "\c
+.I file\c
+.B
+\&"\c
+\&\|';
+they are not searched for `\|\c
+.B #include <\c
+.I file\c
+\&>\c
+\&\|'.
+.Sp
+If additional directories are specified with `\|\c
+.B \-I\c
+\&\|' options after
+the `\|\c
+.B \-I\-\c
+\&\|', these directories are searched for all `\|\c
+.B #include\c
+\&\|'
+directives. (Ordinarily \c
+.I all\c
+\& `\|\c
+.B \-I\c
+\&\|' directories are used
+this way.)
+.Sp
+In addition, the `\|\c
+.B \-I\-\c
+\&\|' option inhibits the use of the current
+directory (where the current input file came from) as the first search
+directory for `\|\c
+.B
+#include "\c
+.I file\c
+.B
+\&"\c
+\&\|'. There is no way to
+override this effect of `\|\c
+.B \-I\-\c
+\&\|'. With `\|\c
+.B \-I.\c
+\&\|' you can specify
+searching the directory which was current when the compiler was
+invoked. That is not exactly the same as what the preprocessor does
+by default, but it is often satisfactory.
+.Sp
+`\|\c
+.B \-I\-\c
+\&\|' does not inhibit the use of the standard system directories
+for header files. Thus, `\|\c
+.B \-I\-\c
+\&\|' and `\|\c
+.B \-nostdinc\c
+\&\|' are
+independent.
+.TP
+.BI "\-L" "dir"
+Add directory \c
+.I dir\c
+\& to the list of directories to be searched
+for `\|\c
+.B \-l\c
+\&\|'.
+.TP
+.BI "\-B" "prefix"
+This option specifies where to find the executables, libraries and
+data files of the compiler itself.
+.Sp
+The compiler driver program runs one or more of the subprograms
+`\|\c
+.B cpp\c
+\&\|', `\|\c
+.B cc1\c
+\&\|' (or, for C++, `\|\c
+.B cc1plus\c
+\&\|'), `\|\c
+.B as\c
+\&\|' and `\|\c
+.B ld\c
+\&\|'. It tries
+.I prefix\c
+\& as a prefix for each program it tries to run, both with and
+without `\|\c
+.I machine\c
+.B /\c
+.I version\c
+.B /\c
+\&\|'.
+.Sp
+For each subprogram to be run, the compiler driver first tries the
+`\|\c
+.B \-B\c
+\&\|' prefix, if any. If that name is not found, or if `\|\c
+.B \-B\c
+\&\|'
+was not specified, the driver tries two standard prefixes, which are
+`\|\c
+.B /usr/lib/gcc/\c
+\&\|' and `\|\c
+.B /usr/local/lib/gcc-lib/\c
+\&\|'. If neither of
+those results in a file name that is found, the compiler driver
+searches for the unmodified program
+name, using the directories specified in your
+`\|\c
+.B PATH\c
+\&\|' environment variable.
+.Sp
+The run-time support file `\|\c
+.B libgcc.a\c
+\&\|' is also searched for using the
+`\|\c
+.B \-B\c
+\&\|' prefix, if needed. If it is not found there, the two
+standard prefixes above are tried, and that is all. The file is left
+out of the link if it is not found by those means. Most of the time,
+on most machines, `\|\c
+.B libgcc.a\c
+\&\|' is not actually necessary.
+.Sp
+You can get a similar result from the environment variable
+.B GCC_EXEC_PREFIX\c
+\&; if it is defined, its value is used as a prefix
+in the same way. If both the `\|\c
+.B \-B\c
+\&\|' option and the
+.B GCC_EXEC_PREFIX\c
+\& variable are present, the `\|\c
+.B \-B\c
+\&\|' option is
+used first and the environment variable value second.
+.SH WARNING OPTIONS
+Warnings are diagnostic messages that report constructions which
+are not inherently erroneous but which are risky or suggest there
+may have been an error.
+.Sp
+These options control the amount and kinds of warnings produced by GNU
+CC:
+.TP
+.B \-fsyntax\-only
+Check the code for syntax errors, but don't emit any output.
+.TP
+.B \-w
+Inhibit all warning messages.
+.TP
+.B \-Wno\-import
+Inhibit warning messages about the use of
+.BR #import .
+.TP
+.B \-pedantic
+Issue all the warnings demanded by strict ANSI standard C; reject
+all programs that use forbidden extensions.
+.Sp
+Valid ANSI standard C programs should compile properly with or without
+this option (though a rare few will require `\|\c
+.B \-ansi\c
+\&\|'). However,
+without this option, certain GNU extensions and traditional C features
+are supported as well. With this option, they are rejected. There is
+no reason to \c
+.I use\c
+\& this option; it exists only to satisfy pedants.
+.Sp
+`\|\c
+.B \-pedantic\c
+\&\|' does not cause warning messages for use of the
+alternate keywords whose names begin and end with `\|\c
+.B _\|_\c
+\&\|'. Pedantic
+warnings are also disabled in the expression that follows
+.B _\|_extension_\|_\c
+\&. However, only system header files should use
+these escape routes; application programs should avoid them.
+.TP
+.B \-pedantic\-errors
+Like `\|\c
+.B \-pedantic\c
+\&\|', except that errors are produced rather than
+warnings.
+.TP
+.B \-W
+Print extra warning messages for these events:
+.TP
+\ \ \ \(bu
+A nonvolatile automatic variable might be changed by a call to
+.B longjmp\c
+\&. These warnings are possible only in
+optimizing compilation.
+.Sp
+The compiler sees only the calls to \c
+.B setjmp\c
+\&. It cannot know
+where \c
+.B longjmp\c
+\& will be called; in fact, a signal handler could
+call it at any point in the code. As a result, you may get a warning
+even when there is in fact no problem because \c
+.B longjmp\c
+\& cannot
+in fact be called at the place which would cause a problem.
+.TP
+\ \ \ \(bu
+A function can return either with or without a value. (Falling
+off the end of the function body is considered returning without
+a value.) For example, this function would evoke such a
+warning:
+.Sp
+.nf
+foo (a)
+{
+ if (a > 0)
+ return a;
+}
+.Sp
+.fi
+Spurious warnings can occur because GNU CC does not realize that
+certain functions (including \c
+.B abort\c
+\& and \c
+.B longjmp\c
+\&)
+will never return.
+.TP
+\ \ \ \(bu
+An expression-statement contains no side effects.
+.TP
+\ \ \ \(bu
+An unsigned value is compared against zero with `\|\c
+.B >\c
+\&\|' or `\|\c
+.B <=\c
+\&\|'.
+.PP
+.TP
+.B \-Wimplicit
+Warn whenever a function or parameter is implicitly declared.
+.TP
+.B \-Wreturn\-type
+Warn whenever a function is defined with a return-type that defaults
+to \c
+.B int\c
+\&. Also warn about any \c
+.B return\c
+\& statement with no
+return-value in a function whose return-type is not \c
+.B void\c
+\&.
+.TP
+.B \-Wunused
+Warn whenever a local variable is unused aside from its declaration,
+whenever a function is declared static but never defined, and whenever
+a statement computes a result that is explicitly not used.
+.TP
+.B \-Wswitch
+Warn whenever a \c
+.B switch\c
+\& statement has an index of enumeral type
+and lacks a \c
+.B case\c
+\& for one or more of the named codes of that
+enumeration. (The presence of a \c
+.B default\c
+\& label prevents this
+warning.) \c
+.B case\c
+\& labels outside the enumeration range also
+provoke warnings when this option is used.
+.TP
+.B \-Wcomment
+Warn whenever a comment-start sequence `\|\c
+.B /\(**\c
+\&\|' appears in a comment.
+.TP
+.B \-Wtrigraphs
+Warn if any trigraphs are encountered (assuming they are enabled).
+.TP
+.B \-Wformat
+Check calls to \c
+.B printf\c
+\& and \c
+.B scanf\c
+\&, etc., to make sure that
+the arguments supplied have types appropriate to the format string
+specified.
+.TP
+.B \-Wchar\-subscripts
+Warn if an array subscript has type
+.BR char .
+This is a common cause of error, as programmers often forget that this
+type is signed on some machines.
+.TP
+.B \-Wuninitialized
+An automatic variable is used without first being initialized.
+.Sp
+These warnings are possible only in optimizing compilation,
+because they require data flow information that is computed only
+when optimizing. If you don't specify `\|\c
+.B \-O\c
+\&\|', you simply won't
+get these warnings.
+.Sp
+These warnings occur only for variables that are candidates for
+register allocation. Therefore, they do not occur for a variable that
+is declared \c
+.B volatile\c
+\&, or whose address is taken, or whose size
+is other than 1, 2, 4 or 8 bytes. Also, they do not occur for
+structures, unions or arrays, even when they are in registers.
+.Sp
+Note that there may be no warning about a variable that is used only
+to compute a value that itself is never used, because such
+computations may be deleted by data flow analysis before the warnings
+are printed.
+.Sp
+These warnings are made optional because GNU CC is not smart
+enough to see all the reasons why the code might be correct
+despite appearing to have an error. Here is one example of how
+this can happen:
+.Sp
+.nf
+{
+ int x;
+ switch (y)
+ {
+ case 1: x = 1;
+ break;
+ case 2: x = 4;
+ break;
+ case 3: x = 5;
+ }
+ foo (x);
+}
+.Sp
+.fi
+If the value of \c
+.B y\c
+\& is always 1, 2 or 3, then \c
+.B x\c
+\& is
+always initialized, but GNU CC doesn't know this. Here is
+another common case:
+.Sp
+.nf
+{
+ int save_y;
+ if (change_y) save_y = y, y = new_y;
+ .\|.\|.
+ if (change_y) y = save_y;
+}
+.Sp
+.fi
+This has no bug because \c
+.B save_y\c
+\& is used only if it is set.
+.Sp
+Some spurious warnings can be avoided if you declare as
+.B volatile\c
+\& all the functions you use that never return.
+.TP
+.B \-Wparentheses
+Warn if parentheses are omitted in certain contexts.
+.TP
+.B \-Wtemplate\-debugging
+When using templates in a C++ program, warn if debugging is not yet
+fully available (C++ only).
+.TP
+.B \-Wall
+All of the above `\|\c
+.B \-W\c
+\&\|' options combined. These are all the
+options which pertain to usage that we recommend avoiding and that we
+believe is easy to avoid, even in conjunction with macros.
+.PP
+The remaining `\|\c
+.B \-W.\|.\|.\c
+\&\|' options are not implied by `\|\c
+.B \-Wall\c
+\&\|'
+because they warn about constructions that we consider reasonable to
+use, on occasion, in clean programs.
+.TP
+.B \-Wtraditional
+Warn about certain constructs that behave differently in traditional and
+ANSI C.
+.TP
+\ \ \ \(bu
+Macro arguments occurring within string constants in the macro body.
+These would substitute the argument in traditional C, but are part of
+the constant in ANSI C.
+.TP
+\ \ \ \(bu
+A function declared external in one block and then used after the end of
+the block.
+.TP
+\ \ \ \(bu
+A \c
+.B switch\c
+\& statement has an operand of type \c
+.B long\c
+\&.
+.PP
+.TP
+.B \-Wshadow
+Warn whenever a local variable shadows another local variable.
+.TP
+.BI "\-Wid\-clash\-" "len"
+Warn whenever two distinct identifiers match in the first \c
+.I len
+characters. This may help you prepare a program that will compile
+with certain obsolete, brain-damaged compilers.
+.TP
+.B \-Wpointer\-arith
+Warn about anything that depends on the \*(lqsize of\*(rq a function type or
+of \c
+.B void\c
+\&. GNU C assigns these types a size of 1, for
+convenience in calculations with \c
+.B void \(**\c
+\& pointers and pointers
+to functions.
+.TP
+.B \-Wcast\-qual
+Warn whenever a pointer is cast so as to remove a type qualifier from
+the target type. For example, warn if a \c
+.B const char \(**\c
+\& is cast
+to an ordinary \c
+.B char \(**\c
+\&.
+.TP
+.B \-Wcast\-align
+Warn whenever a pointer is cast such that the required alignment of the
+target is increased. For example, warn if a \c
+.B char \(**\c
+\& is cast to
+an \c
+.B int \(**\c
+\& on machines where integers can only be accessed at
+two- or four-byte boundaries.
+.TP
+.B \-Wwrite\-strings
+Give string constants the type \c
+.B const char[\c
+.I length\c
+.B ]\c
+\& so that
+copying the address of one into a non-\c
+.B const\c
+\& \c
+.B char \(**
+pointer will get a warning. These warnings will help you find at
+compile time code that can try to write into a string constant, but
+only if you have been very careful about using \c
+.B const\c
+\& in
+declarations and prototypes. Otherwise, it will just be a nuisance;
+this is why we did not make `\|\c
+.B \-Wall\c
+\&\|' request these warnings.
+.TP
+.B \-Wconversion
+Warn if a prototype causes a type conversion that is different from what
+would happen to the same argument in the absence of a prototype. This
+includes conversions of fixed point to floating and vice versa, and
+conversions changing the width or signedness of a fixed point argument
+except when the same as the default promotion.
+.TP
+.B \-Waggregate\-return
+Warn if any functions that return structures or unions are defined or
+called. (In languages where you can return an array, this also elicits
+a warning.)
+.TP
+.B \-Wstrict\-prototypes
+Warn if a function is declared or defined without specifying the
+argument types. (An old-style function definition is permitted without
+a warning if preceded by a declaration which specifies the argument
+types.)
+.TP
+.B \-Wmissing\-prototypes
+Warn if a global function is defined without a previous prototype
+declaration. This warning is issued even if the definition itself
+provides a prototype. The aim is to detect global functions that fail
+to be declared in header files.
+.TP
+.B \-Wmissing\-declarations
+Warn if a global function is defined without a previous declaration.
+Do so even if the definition itself provides a prototype.
+Use this option to detect global functions that are not declared in
+header files.
+.TP
+.B \-Wredundant-decls
+Warn if anything is declared more than once in the same scope, even in
+cases where multiple declaration is valid and changes nothing.
+.TP
+.B \-Wnested-externs
+Warn if an \c
+.B extern\c
+\& declaration is encountered within an function.
+.TP
+.B \-Wenum\-clash
+Warn about conversion between different enumeration types (C++ only).
+.TP
+.B \-Woverloaded\-virtual
+(C++ only.)
+In a derived class, the definitions of virtual functions must match
+the type signature of a virtual function declared in the base class.
+Use this option to request warnings when a derived class declares a
+function that may be an erroneous attempt to define a virtual
+function: that is, warn when a function with the same name as a
+virtual function in the base class, but with a type signature that
+doesn't match any virtual functions from the base class.
+.TP
+.B \-Winline
+Warn if a function can not be inlined, and either it was declared as inline,
+or else the
+.B \-finline\-functions
+option was given.
+.TP
+.B \-Werror
+Treat warnings as errors; abort compilation after any warning.
+.SH DEBUGGING OPTIONS
+GNU CC has various special options that are used for debugging
+either your program or GCC:
+.TP
+.B \-g
+Produce debugging information in the operating system's native format
+(stabs, COFF, XCOFF, or DWARF). GDB can work with this debugging
+information.
+.Sp
+On most systems that use stabs format, `\|\c
+.B \-g\c
+\&\|' enables use of extra
+debugging information that only GDB can use; this extra information
+makes debugging work better in GDB but will probably make other debuggers
+crash or
+refuse to read the program. If you want to control for certain whether
+to generate the extra information, use `\|\c
+.B \-gstabs+\c
+\&\|', `\|\c
+.B \-gstabs\c
+\&\|',
+`\|\c
+.B \-gxcoff+\c
+\&\|', `\|\c
+.B \-gxcoff\c
+\&\|', `\|\c
+.B \-gdwarf+\c
+\&\|', or `\|\c
+.B \-gdwarf\c
+\&\|'
+(see below).
+.Sp
+Unlike most other C compilers, GNU CC allows you to use `\|\c
+.B \-g\c
+\&\|' with
+`\|\c
+.B \-O\c
+\&\|'. The shortcuts taken by optimized code may occasionally
+produce surprising results: some variables you declared may not exist
+at all; flow of control may briefly move where you did not expect it;
+some statements may not be executed because they compute constant
+results or their values were already at hand; some statements may
+execute in different places because they were moved out of loops.
+.Sp
+Nevertheless it proves possible to debug optimized output. This makes
+it reasonable to use the optimizer for programs that might have bugs.
+.PP
+The following options are useful when GNU CC is generated with the
+capability for more than one debugging format.
+.TP
+.B \-ggdb
+Produce debugging information in the native format (if that is supported),
+including GDB extensions if at all possible.
+.TP
+.B \-gstabs
+Produce debugging information in stabs format (if that is supported),
+without GDB extensions. This is the format used by DBX on most BSD
+systems.
+.TP
+.B \-gstabs+
+Produce debugging information in stabs format (if that is supported),
+using GNU extensions understood only by the GNU debugger (GDB). The
+use of these extensions is likely to make other debuggers crash or
+refuse to read the program.
+.TP
+.B \-gcoff
+Produce debugging information in COFF format (if that is supported).
+This is the format used by SDB on most System V systems prior to
+System V Release 4.
+.TP
+.B \-gxcoff
+Produce debugging information in XCOFF format (if that is supported).
+This is the format used by the DBX debugger on IBM RS/6000 systems.
+.TP
+.B \-gxcoff+
+Produce debugging information in XCOFF format (if that is supported),
+using GNU extensions understood only by the GNU debugger (GDB). The
+use of these extensions is likely to make other debuggers crash or
+refuse to read the program.
+.TP
+.B \-gdwarf
+Produce debugging information in DWARF format (if that is supported).
+This is the format used by SDB on most System V Release 4 systems.
+.TP
+.B \-gdwarf+
+Produce debugging information in DWARF format (if that is supported),
+using GNU extensions understood only by the GNU debugger (GDB). The
+use of these extensions is likely to make other debuggers crash or
+refuse to read the program.
+.PP
+.BI "\-g" "level"
+.br
+.BI "\-ggdb" "level"
+.br
+.BI "\-gstabs" "level"
+.br
+.BI "\-gcoff" "level"
+.BI "\-gxcoff" "level"
+.TP
+.BI "\-gdwarf" "level"
+Request debugging information and also use \c
+.I level\c
+\& to specify how
+much information. The default level is 2.
+.Sp
+Level 1 produces minimal information, enough for making backtraces in
+parts of the program that you don't plan to debug. This includes
+descriptions of functions and external variables, but no information
+about local variables and no line numbers.
+.Sp
+Level 3 includes extra information, such as all the macro definitions
+present in the program. Some debuggers support macro expansion when
+you use `\|\c
+.B \-g3\c
+\&\|'.
+.TP
+.B \-p
+Generate extra code to write profile information suitable for the
+analysis program \c
+.B prof\c
+\&.
+.TP
+.B \-pg
+Generate extra code to write profile information suitable for the
+analysis program \c
+.B gprof\c
+\&.
+.TP
+.B \-a
+Generate extra code to write profile information for basic blocks,
+which will record the number of times each basic block is executed.
+This data could be analyzed by a program like \c
+.B tcov\c
+\&. Note,
+however, that the format of the data is not what \c
+.B tcov\c
+\& expects.
+Eventually GNU \c
+.B gprof\c
+\& should be extended to process this data.
+.TP
+.BI "\-d" "letters"
+Says to make debugging dumps during compilation at times specified by
+.I letters\c
+\&. This is used for debugging the compiler. The file names
+for most of the dumps are made by appending a word to the source file
+name (e.g. `\|\c
+.B foo.c.rtl\c
+\&\|' or `\|\c
+.B foo.c.jump\c
+\&\|').
+.TP
+.B \-dM
+Dump all macro definitions, at the end of preprocessing, and write no
+output.
+.TP
+.B \-dN
+Dump all macro names, at the end of preprocessing.
+.TP
+.B \-dD
+Dump all macro definitions, at the end of preprocessing, in addition to
+normal output.
+.TP
+.B \-dy
+Dump debugging information during parsing, to standard error.
+.TP
+.B \-dr
+Dump after RTL generation, to `\|\c
+.I file\c
+.B \&.rtl\c
+\&\|'.
+.TP
+.B \-dx
+Just generate RTL for a function instead of compiling it. Usually used
+with `\|\c
+.B r\c
+\&\|'.
+.TP
+.B \-dj
+Dump after first jump optimization, to `\|\c
+.I file\c
+.B \&.jump\c
+\&\|'.
+.TP
+.B \-ds
+Dump after CSE (including the jump optimization that sometimes
+follows CSE), to `\|\c
+.I file\c
+.B \&.cse\c
+\&\|'.
+.TP
+.B \-dL
+Dump after loop optimization, to `\|\c
+.I file\c
+.B \&.loop\c
+\&\|'.
+.TP
+.B \-dt
+Dump after the second CSE pass (including the jump optimization that
+sometimes follows CSE), to `\|\c
+.I file\c
+.B \&.cse2\c
+\&\|'.
+.TP
+.B \-df
+Dump after flow analysis, to `\|\c
+.I file\c
+.B \&.flow\c
+\&\|'.
+.TP
+.B \-dc
+Dump after instruction combination, to `\|\c
+.I file\c
+.B \&.combine\c
+\&\|'.
+.TP
+.B \-dS
+Dump after the first instruction scheduling pass, to
+`\|\c
+.I file\c
+.B \&.sched\c
+\&\|'.
+.TP
+.B \-dl
+Dump after local register allocation, to `\|\c
+.I file\c
+.B \&.lreg\c
+\&\|'.
+.TP
+.B \-dg
+Dump after global register allocation, to `\|\c
+.I file\c
+.B \&.greg\c
+\&\|'.
+.TP
+.B \-dR
+Dump after the second instruction scheduling pass, to
+`\|\c
+.I file\c
+.B \&.sched2\c
+\&\|'.
+.TP
+.B \-dJ
+Dump after last jump optimization, to `\|\c
+.I file\c
+.B \&.jump2\c
+\&\|'.
+.TP
+.B \-dd
+Dump after delayed branch scheduling, to `\|\c
+.I file\c
+.B \&.dbr\c
+\&\|'.
+.TP
+.B \-dk
+Dump after conversion from registers to stack, to `\|\c
+.I file\c
+.B \&.stack\c
+\&\|'.
+.TP
+.B \-da
+Produce all the dumps listed above.
+.TP
+.B \-dm
+Print statistics on memory usage, at the end of the run, to
+standard error.
+.TP
+.B \-dp
+Annotate the assembler output with a comment indicating which
+pattern and alternative was used.
+.TP
+.B \-fpretend\-float
+When running a cross-compiler, pretend that the target machine uses the
+same floating point format as the host machine. This causes incorrect
+output of the actual floating constants, but the actual instruction
+sequence will probably be the same as GNU CC would make when running on
+the target machine.
+.TP
+.B \-save\-temps
+Store the usual \*(lqtemporary\*(rq intermediate files permanently; place them
+in the current directory and name them based on the source file. Thus,
+compiling `\|\c
+.B foo.c\c
+\&\|' with `\|\c
+.B \-c \-save\-temps\c
+\&\|' would produce files
+`\|\c
+.B foo.cpp\c
+\&\|' and `\|\c
+.B foo.s\c
+\&\|', as well as `\|\c
+.B foo.o\c
+\&\|'.
+.TP
+.BI "\-print\-file\-name=" "library"
+Print the full absolute name of the library file \|\c
+.nh
+.I library
+.hy
+\&\| that
+would be used when linking\(em\&and do not do anything else. With this
+option, GNU CC does not compile or link anything; it just prints the
+file name.
+.TP
+.B \-print\-libgcc\-file\-name
+Same as `\|\c
+.B \-print\-file\-name=libgcc.a\c
+\&\|'.
+.TP
+.BI "\-print\-prog\-name=" "program"
+Like `\|\c
+.B \-print\-file\-name\c
+\&\|', but searches for a program such as `\|\c
+cpp\c
+\&\|'.
+.SH OPTIMIZATION OPTIONS
+These options control various sorts of optimizations:
+.TP
+.B \-O
+.TP
+.B \-O1
+Optimize. Optimizing compilation takes somewhat more time, and a lot
+more memory for a large function.
+.Sp
+Without `\|\c
+.B \-O\c
+\&\|', the compiler's goal is to reduce the cost of
+compilation and to make debugging produce the expected results.
+Statements are independent: if you stop the program with a breakpoint
+between statements, you can then assign a new value to any variable or
+change the program counter to any other statement in the function and
+get exactly the results you would expect from the source code.
+.Sp
+Without `\|\c
+.B \-O\c
+\&\|', only variables declared \c
+.B register\c
+\& are
+allocated in registers. The resulting compiled code is a little worse
+than produced by PCC without `\|\c
+.B \-O\c
+\&\|'.
+.Sp
+With `\|\c
+.B \-O\c
+\&\|', the compiler tries to reduce code size and execution
+time.
+.Sp
+When you specify `\|\c
+.B \-O\c
+\&\|', the two options `\|\c
+.B \-fthread\-jumps\c
+\&\|' and `\|\c
+.B \-fdefer\-pop\c
+\&\|' are turned on. On machines that have delay slots, the `\|\c
+.B \-fdelayed\-branch\c
+\&\|' option is turned on. For those machines that can support debugging even
+without a frame pointer, the `\|\c
+.B \-fomit\-frame\-pointer\c
+\&\|' option is turned on. On some machines other flags may also be turned on.
+.TP
+.B \-O2
+Optimize even more. Nearly all supported optimizations that do not
+involve a space-speed tradeoff are performed. Loop unrolling and function
+inlining are not done, for example. As compared to
+.B \-O\c
+\&,
+this option increases both compilation time and the performance of the
+generated code.
+.TP
+.B \-O3
+Optimize yet more. This turns on everything
+.B \-O2
+does, along with also turning on
+.B \-finline\-functions.
+.TP
+.B \-O0
+Do not optimize.
+.Sp
+If you use multiple
+.B \-O
+options, with or without level numbers, the last such option is the
+one that is effective.
+.PP
+Options of the form `\|\c
+.B \-f\c
+.I flag\c
+\&\c
+\&\|' specify machine-independent
+flags. Most flags have both positive and negative forms; the negative
+form of `\|\c
+.B \-ffoo\c
+\&\|' would be `\|\c
+.B \-fno\-foo\c
+\&\|'. The following list shows
+only one form\(em\&the one which is not the default.
+You can figure out the other form by either removing `\|\c
+.B no\-\c
+\&\|' or
+adding it.
+.TP
+.B \-ffloat\-store
+Do not store floating point variables in registers. This
+prevents undesirable excess precision on machines such as the
+68000 where the floating registers (of the 68881) keep more
+precision than a \c
+.B double\c
+\& is supposed to have.
+.Sp
+For most programs, the excess precision does only good, but a few
+programs rely on the precise definition of IEEE floating point.
+Use `\|\c
+.B \-ffloat\-store\c
+\&\|' for such programs.
+.TP
+.B \-fmemoize\-lookups
+.TP
+.B \-fsave\-memoized
+Use heuristics to compile faster (C++ only). These heuristics are not
+enabled by default, since they are only effective for certain input
+files. Other input files compile more slowly.
+.Sp
+The first time the compiler must build a call to a member function (or
+reference to a data member), it must (1) determine whether the class
+implements member functions of that name; (2) resolve which member
+function to call (which involves figuring out what sorts of type
+conversions need to be made); and (3) check the visibility of the member
+function to the caller. All of this adds up to slower compilation.
+Normally, the second time a call is made to that member function (or
+reference to that data member), it must go through the same lengthy
+process again. This means that code like this
+.Sp
+\& cout << "This " << p << " has " << n << " legs.\en";
+.Sp
+makes six passes through all three steps. By using a software cache,
+a \*(lqhit\*(rq significantly reduces this cost. Unfortunately, using the
+cache introduces another layer of mechanisms which must be implemented,
+and so incurs its own overhead. `\|\c
+.B \-fmemoize\-lookups\c
+\&\|' enables
+the software cache.
+.Sp
+Because access privileges (visibility) to members and member functions
+may differ from one function context to the next,
+.B g++
+may need to flush the cache. With the `\|\c
+.B \-fmemoize\-lookups\c
+\&\|' flag, the cache is flushed after every
+function that is compiled. The `\|\c
+\-fsave\-memoized\c
+\&\|' flag enables the same software cache, but when the compiler
+determines that the context of the last function compiled would yield
+the same access privileges of the next function to compile, it
+preserves the cache.
+This is most helpful when defining many member functions for the same
+class: with the exception of member functions which are friends of
+other classes, each member function has exactly the same access
+privileges as every other, and the cache need not be flushed.
+.TP
+.B \-fno\-default\-inline
+Don't make member functions inline by default merely because they are
+defined inside the class scope (C++ only).
+.TP
+.B \-fno\-defer\-pop
+Always pop the arguments to each function call as soon as that
+function returns. For machines which must pop arguments after a
+function call, the compiler normally lets arguments accumulate on the
+stack for several function calls and pops them all at once.
+.TP
+.B \-fforce\-mem
+Force memory operands to be copied into registers before doing
+arithmetic on them. This may produce better code by making all
+memory references potential common subexpressions. When they are
+not common subexpressions, instruction combination should
+eliminate the separate register-load. I am interested in hearing
+about the difference this makes.
+.TP
+.B \-fforce\-addr
+Force memory address constants to be copied into registers before
+doing arithmetic on them. This may produce better code just as
+`\|\c
+.B \-fforce\-mem\c
+\&\|' may. I am interested in hearing about the
+difference this makes.
+.TP
+.B \-fomit\-frame\-pointer
+Don't keep the frame pointer in a register for functions that
+don't need one. This avoids the instructions to save, set up and
+restore frame pointers; it also makes an extra register available
+in many functions. \c
+.I It also makes debugging impossible on
+most machines.
+.Sp
+On some machines, such as the Vax, this flag has no effect, because
+the standard calling sequence automatically handles the frame pointer
+and nothing is saved by pretending it doesn't exist. The
+machine-description macro \c
+.B FRAME_POINTER_REQUIRED\c
+\& controls
+whether a target machine supports this flag.
+.TP
+.B \-finline\-functions
+Integrate all simple functions into their callers. The compiler
+heuristically decides which functions are simple enough to be worth
+integrating in this way.
+.Sp
+If all calls to a given function are integrated, and the function is
+declared \c
+.B static\c
+\&, then GCC normally does not output the function as
+assembler code in its own right.
+.TP
+.B \-fcaller\-saves
+Enable values to be allocated in registers that will be clobbered by
+function calls, by emitting extra instructions to save and restore the
+registers around such calls. Such allocation is done only when it
+seems to result in better code than would otherwise be produced.
+.Sp
+This option is enabled by default on certain machines, usually those
+which have no call-preserved registers to use instead.
+.TP
+.B \-fkeep\-inline\-functions
+Even if all calls to a given function are integrated, and the function
+is declared \c
+.B static\c
+\&, nevertheless output a separate run-time
+callable version of the function.
+.TP
+.B \-fno\-function\-cse
+Do not put function addresses in registers; make each instruction that
+calls a constant function contain the function's address explicitly.
+.Sp
+This option results in less efficient code, but some strange hacks
+that alter the assembler output may be confused by the optimizations
+performed when this option is not used.
+.TP
+.B \-fno\-peephole
+Disable any machine-specific peephole optimizations.
+.TP
+.B \-ffast-math
+This option allows GCC to violate some ANSI or IEEE rules/specifications
+in the interest of optimizing code for speed. For example, it allows
+the compiler to assume arguments to the \c
+.B sqrt\c
+\& function are
+non-negative numbers.
+.Sp
+This option should never be turned on by any `\|\c
+.B \-O\c
+\&\|' option since
+it can result in incorrect output for programs which depend on
+an exact implementation of IEEE or ANSI rules/specifications for
+math functions.
+.PP
+The following options control specific optimizations. The `\|\c
+.B \-O2\c
+\&\|'
+option turns on all of these optimizations except `\|\c
+.B \-funroll\-loops\c
+\&\|'
+and `\|\c
+.B \-funroll\-all\-loops\c
+\&\|'.
+.PP
+The `\|\c
+.B \-O\c
+\&\|' option usually turns on
+the `\|\c
+.B \-fthread\-jumps\c
+\&\|' and `\|\c
+.B \-fdelayed\-branch\c
+\&\|' options, but
+specific machines may change the default optimizations.
+.PP
+You can use the following flags in the rare cases when \*(lqfine-tuning\*(rq
+of optimizations to be performed is desired.
+.TP
+.B \-fstrength\-reduce
+Perform the optimizations of loop strength reduction and
+elimination of iteration variables.
+.TP
+.B \-fthread\-jumps
+Perform optimizations where we check to see if a jump branches to a
+location where another comparison subsumed by the first is found. If
+so, the first branch is redirected to either the destination of the
+second branch or a point immediately following it, depending on whether
+the condition is known to be true or false.
+.TP
+.B \-funroll\-loops
+Perform the optimization of loop unrolling. This is only done for loops
+whose number of iterations can be determined at compile time or run time.
+.TP
+.B \-funroll\-all\-loops
+Perform the optimization of loop unrolling. This is done for all loops.
+This usually makes programs run more slowly.
+.TP
+.B \-fcse\-follow\-jumps
+In common subexpression elimination, scan through jump instructions
+when the target of the jump is not reached by any other path. For
+example, when CSE encounters an \c
+.B if\c
+\& statement with an
+.B else\c
+\& clause, CSE will follow the jump when the condition
+tested is false.
+.TP
+.B \-fcse\-skip\-blocks
+This is similar to `\|\c
+.B \-fcse\-follow\-jumps\c
+\&\|', but causes CSE to
+follow jumps which conditionally skip over blocks. When CSE
+encounters a simple \c
+.B if\c
+\& statement with no else clause,
+`\|\c
+.B \-fcse\-skip\-blocks\c
+\&\|' causes CSE to follow the jump around the
+body of the \c
+.B if\c
+\&.
+.TP
+.B \-frerun\-cse\-after\-loop
+Re-run common subexpression elimination after loop optimizations has been
+performed.
+.TP
+.B \-felide\-constructors
+Elide constructors when this seems plausible (C++ only). With this
+flag, GNU C++ initializes \c
+.B y\c
+\& directly from the call to \c
+.B foo
+without going through a temporary in the following code:
+.Sp
+A foo ();
+A y = foo ();
+.Sp
+Without this option, GNU C++ first initializes \c
+.B y\c
+\& by calling the
+appropriate constructor for type \c
+.B A\c
+\&; then assigns the result of
+.B foo\c
+\& to a temporary; and, finally, replaces the initial valyue of
+`\|\c
+.B y\c
+\&\|' with the temporary.
+.Sp
+The default behavior (`\|\c
+.B \-fno\-elide\-constructors\c
+\&\|') is specified by
+the draft ANSI C++ standard. If your program's constructors have side
+effects, using `\|\c
+.B \-felide-constructors\c
+\&\|' can make your program act
+differently, since some constructor calls may be omitted.
+.TP
+.B \-fexpensive\-optimizations
+Perform a number of minor optimizations that are relatively expensive.
+.TP
+.B \-fdelayed\-branch
+If supported for the target machine, attempt to reorder instructions
+to exploit instruction slots available after delayed branch
+instructions.
+.TP
+.B \-fschedule\-insns
+If supported for the target machine, attempt to reorder instructions to
+eliminate execution stalls due to required data being unavailable. This
+helps machines that have slow floating point or memory load instructions
+by allowing other instructions to be issued until the result of the load
+or floating point instruction is required.
+.TP
+.B \-fschedule\-insns2
+Similar to `\|\c
+.B \-fschedule\-insns\c
+\&\|', but requests an additional pass of
+instruction scheduling after register allocation has been done. This is
+especially useful on machines with a relatively small number of
+registers and where memory load instructions take more than one cycle.
+.SH TARGET OPTIONS
+By default, GNU CC compiles code for the same type of machine that you
+are using. However, it can also be installed as a cross-compiler, to
+compile for some other type of machine. In fact, several different
+configurations of GNU CC, for different target machines, can be
+installed side by side. Then you specify which one to use with the
+`\|\c
+.B \-b\c
+\&\|' option.
+.PP
+In addition, older and newer versions of GNU CC can be installed side
+by side. One of them (probably the newest) will be the default, but
+you may sometimes wish to use another.
+.TP
+.BI "\-b " "machine"
+The argument \c
+.I machine\c
+\& specifies the target machine for compilation.
+This is useful when you have installed GNU CC as a cross-compiler.
+.Sp
+The value to use for \c
+.I machine\c
+\& is the same as was specified as the
+machine type when configuring GNU CC as a cross-compiler. For
+example, if a cross-compiler was configured with `\|\c
+.B configure
+i386v\c
+\&\|', meaning to compile for an 80386 running System V, then you
+would specify `\|\c
+.B \-b i386v\c
+\&\|' to run that cross compiler.
+.Sp
+When you do not specify `\|\c
+.B \-b\c
+\&\|', it normally means to compile for
+the same type of machine that you are using.
+.TP
+.BI "\-V " "version"
+The argument \c
+.I version\c
+\& specifies which version of GNU CC to run.
+This is useful when multiple versions are installed. For example,
+.I version\c
+\& might be `\|\c
+.B 2.0\c
+\&\|', meaning to run GNU CC version 2.0.
+.Sp
+The default version, when you do not specify `\|\c
+.B \-V\c
+\&\|', is controlled
+by the way GNU CC is installed. Normally, it will be a version that
+is recommended for general use.
+.SH MACHINE DEPENDENT OPTIONS
+Each of the target machine types can have its own special options,
+starting with `\|\c
+.B \-m\c
+\&\|', to choose among various hardware models or
+configurations\(em\&for example, 68010 vs 68020, floating coprocessor or
+none. A single installed version of the compiler can compile for any
+model or configuration, according to the options specified.
+.PP
+Some configurations of the compiler also support additional special
+options, usually for command-line compatibility with other compilers on
+the same platform.
+.PP
+These are the `\|\c
+.B \-m\c
+\&\|' options defined for the 68000 series:
+.TP
+.B \-m68000
+.TP
+.B \-mc68000
+Generate output for a 68000. This is the default when the compiler is
+configured for 68000-based systems.
+.TP
+.B \-m68020
+.TP
+.B \-mc68020
+Generate output for a 68020 (rather than a 68000). This is the
+default when the compiler is configured for 68020-based systems.
+.TP
+.B \-m68881
+Generate output containing 68881 instructions for floating point.
+This is the default for most 68020-based systems unless
+.B \-nfp
+was specified when the compiler was configured.
+.TP
+.B \-m68030
+Generate output for a 68030. This is the default when the compiler is
+configured for 68030-based systems.
+.TP
+.B \-m68040
+Generate output for a 68040. This is the default when the compiler is
+configured for 68040-based systems.
+.TP
+.B \-m68020\-40
+Generate output for a 68040, without using any of the new instructions.
+This results in code which can run relatively efficiently on either a
+68020/68881 or a 68030 or a 68040.
+.TP
+.B \-mfpa
+Generate output containing Sun FPA instructions for floating point.
+.TP
+.B \-msoft\-float
+Generate output containing library calls for floating point.
+.I
+WARNING:
+the requisite libraries are not part of GNU CC. Normally the
+facilities of the machine's usual C compiler are used, but this can't
+be done directly in cross-compilation. You must make your own
+arrangements to provide suitable library functions for cross-compilation.
+.TP
+.B \-mshort
+Consider type \c
+.B int\c
+\& to be 16 bits wide, like \c
+.B short int\c
+\&.
+.TP
+.B \-mnobitfield
+Do not use the bit-field instructions. `\|\c
+.B \-m68000\c
+\&\|' implies
+`\|\c
+.B \-mnobitfield\c
+\&\|'.
+.TP
+.B \-mbitfield
+Do use the bit-field instructions. `\|\c
+.B \-m68020\c
+\&\|' implies
+`\|\c
+.B \-mbitfield\c
+\&\|'. This is the default if you use the unmodified
+sources.
+.TP
+.B \-mrtd
+Use a different function-calling convention, in which functions
+that take a fixed number of arguments return with the \c
+.B rtd
+instruction, which pops their arguments while returning. This
+saves one instruction in the caller since there is no need to pop
+the arguments there.
+.Sp
+This calling convention is incompatible with the one normally
+used on Unix, so you cannot use it if you need to call libraries
+compiled with the Unix compiler.
+.Sp
+Also, you must provide function prototypes for all functions that
+take variable numbers of arguments (including \c
+.B printf\c
+\&);
+otherwise incorrect code will be generated for calls to those
+functions.
+.Sp
+In addition, seriously incorrect code will result if you call a
+function with too many arguments. (Normally, extra arguments are
+harmlessly ignored.)
+.Sp
+The \c
+.B rtd\c
+\& instruction is supported by the 68010 and 68020
+processors, but not by the 68000.
+.PP
+These `\|\c
+.B \-m\c
+\&\|' options are defined for the Vax:
+.TP
+.B \-munix
+Do not output certain jump instructions (\c
+.B aobleq\c
+\& and so on)
+that the Unix assembler for the Vax cannot handle across long
+ranges.
+.TP
+.B \-mgnu
+Do output those jump instructions, on the assumption that you
+will assemble with the GNU assembler.
+.TP
+.B \-mg
+Output code for g-format floating point numbers instead of d-format.
+.PP
+These `\|\c
+.B \-m\c
+\&\|' switches are supported on the SPARC:
+.PP
+.B \-mfpu
+.TP
+.B \-mhard\-float
+Generate output containing floating point instructions. This is the
+default.
+.PP
+.B \-mno\-fpu
+.TP
+.B \-msoft\-float
+Generate output containing library calls for floating point.
+.I Warning:
+there is no GNU floating-point library for SPARC.
+Normally the facilities of the machine's usual C compiler are used, but
+this cannot be done directly in cross-compilation. You must make your
+own arrangements to provide suitable library functions for
+cross-compilation.
+.Sp
+.B \-msoft\-float
+changes the calling convention in the output file;
+therefore, it is only useful if you compile
+.I all
+of a program with this option.
+.PP
+.B \-mno\-epilogue
+.TP
+.B \-mepilogue
+With
+.B \-mepilogue
+(the default), the compiler always emits code for
+function exit at the end of each function. Any function exit in
+the middle of the function (such as a return statement in C) will
+generate a jump to the exit code at the end of the function.
+.Sp
+With
+.BR \-mno\-epilogue ,
+the compiler tries to emit exit code inline at every function exit.
+.PP
+.B \-mno\-v8
+.TP
+.B \-mv8
+.TP
+.B \-msparclite
+These three options select variations on the SPARC architecture.
+.Sp
+By default (unless specifically configured for the Fujitsu SPARClite),
+GCC generates code for the v7 variant of the SPARC architecture.
+.Sp
+.B \-mv8
+will give you SPARC v8 code. The only difference from v7
+code is that the compiler emits the integer multiply and integer
+divide instructions which exist in SPARC v8 but not in SPARC v7.
+.Sp
+.B \-msparclite
+will give you SPARClite code. This adds the integer
+multiply, integer divide step and scan (ffs) instructions which
+exist in SPARClite but not in SPARC v7.
+.PP
+.B \-mcypress
+.TP
+.B \-msupersparc
+These two options select the processor for which the code is optimised.
+.Sp
+With
+.B \-mcypress
+(the default), the compiler optimises code for the Cypress CY7C602 chip, as
+used in the SparcStation/SparcSever 3xx series. This is also apropriate for
+the older SparcStation 1, 2, IPX etc.
+.Sp
+With
+.B \-msupersparc
+the compiler optimises code for the SuperSparc cpu, as used in the SparcStation
+10, 1000 and 2000 series. This flag also enables use of the full SPARC v8
+instruction set.
+.PP
+These `\|\c
+.B \-m\c
+\&\|' options are defined for the Convex:
+.TP
+.B \-mc1
+Generate output for a C1. This is the default when the compiler is
+configured for a C1.
+.TP
+.B \-mc2
+Generate output for a C2. This is the default when the compiler is
+configured for a C2.
+.TP
+.B \-margcount
+Generate code which puts an argument count in the word preceding each
+argument list. Some nonportable Convex and Vax programs need this word.
+(Debuggers don't, except for functions with variable-length argument
+lists; this info is in the symbol table.)
+.TP
+.B \-mnoargcount
+Omit the argument count word. This is the default if you use the
+unmodified sources.
+.PP
+These `\|\c
+.B \-m\c
+\&\|' options are defined for the AMD Am29000:
+.TP
+.B \-mdw
+Generate code that assumes the DW bit is set, i.e., that byte and
+halfword operations are directly supported by the hardware. This is the
+default.
+.TP
+.B \-mnodw
+Generate code that assumes the DW bit is not set.
+.TP
+.B \-mbw
+Generate code that assumes the system supports byte and halfword write
+operations. This is the default.
+.TP
+.B \-mnbw
+Generate code that assumes the systems does not support byte and
+halfword write operations. This implies `\|\c
+.B \-mnodw\c
+\&\|'.
+.TP
+.B \-msmall
+Use a small memory model that assumes that all function addresses are
+either within a single 256 KB segment or at an absolute address of less
+than 256K. This allows the \c
+.B call\c
+\& instruction to be used instead
+of a \c
+.B const\c
+\&, \c
+.B consth\c
+\&, \c
+.B calli\c
+\& sequence.
+.TP
+.B \-mlarge
+Do not assume that the \c
+.B call\c
+\& instruction can be used; this is the
+default.
+.TP
+.B \-m29050
+Generate code for the Am29050.
+.TP
+.B \-m29000
+Generate code for the Am29000. This is the default.
+.TP
+.B \-mkernel\-registers
+Generate references to registers \c
+.B gr64-gr95\c
+\& instead of
+.B gr96-gr127\c
+\&. This option can be used when compiling kernel code
+that wants a set of global registers disjoint from that used by
+user-mode code.
+.Sp
+Note that when this option is used, register names in `\|\c
+.B \-f\c
+\&\|' flags
+must use the normal, user-mode, names.
+.TP
+.B \-muser\-registers
+Use the normal set of global registers, \c
+.B gr96-gr127\c
+\&. This is the
+default.
+.TP
+.B \-mstack\-check
+Insert a call to \c
+.B _\|_msp_check\c
+\& after each stack adjustment. This
+is often used for kernel code.
+.PP
+These `\|\c
+.B \-m\c
+\&\|' options are defined for Motorola 88K architectures:
+.TP
+.B \-m88000
+Generate code that works well on both the m88100 and the
+m88110.
+.TP
+.B \-m88100
+Generate code that works best for the m88100, but that also
+runs on the m88110.
+.TP
+.B \-m88110
+Generate code that works best for the m88110, and may not run
+on the m88100.
+.TP
+.B \-midentify\-revision
+Include an \c
+.B ident\c
+\& directive in the assembler output recording the
+source file name, compiler name and version, timestamp, and compilation
+flags used.
+.TP
+.B \-mno\-underscores
+In assembler output, emit symbol names without adding an underscore
+character at the beginning of each name. The default is to use an
+underscore as prefix on each name.
+.TP
+.B \-mno\-check\-zero\-division
+.TP
+.B \-mcheck\-zero\-division
+Early models of the 88K architecture had problems with division by zero;
+in particular, many of them didn't trap. Use these options to avoid
+including (or to include explicitly) additional code to detect division
+by zero and signal an exception. All GCC configurations for the 88K use
+`\|\c
+.B \-mcheck\-zero\-division\c
+\&\|' by default.
+.TP
+.B \-mocs\-debug\-info
+.TP
+.B \-mno\-ocs\-debug\-info
+Include (or omit) additional debugging information (about
+registers used in each stack frame) as specified in the 88Open Object
+Compatibility Standard, \*(lqOCS\*(rq. This extra information is not needed
+by GDB. The default for DG/UX, SVr4, and Delta 88 SVr3.2 is to
+include this information; other 88k configurations omit this information
+by default.
+.TP
+.B \-mocs\-frame\-position
+.TP
+.B \-mno\-ocs\-frame\-position
+Force (or do not require) register values to be stored in a particular
+place in stack frames, as specified in OCS. The DG/UX, Delta88 SVr3.2,
+and BCS configurations use `\|\c
+.B \-mocs\-frame\-position\c
+\&\|'; other 88k
+configurations have the default `\|\c
+.B \-mno\-ocs\-frame\-position\c
+\&\|'.
+.TP
+.B \-moptimize\-arg\-area
+.TP
+.B \-mno\-optimize\-arg\-area
+Control how to store function arguments in stack frames.
+`\|\c
+.B \-moptimize\-arg\-area\c
+\&\|' saves space, but may break some
+debuggers (not GDB). `\|\c
+.B \-mno\-optimize\-arg\-area\c
+\&\|' conforms better to
+standards. By default GCC does not optimize the argument area.
+.TP
+.BI "\-mshort\-data\-" "num"
+.I num
+Generate smaller data references by making them relative to \c
+.B r0\c
+\&,
+which allows loading a value using a single instruction (rather than the
+usual two). You control which data references are affected by
+specifying \c
+.I num\c
+\& with this option. For example, if you specify
+`\|\c
+.B \-mshort\-data\-512\c
+\&\|', then the data references affected are those
+involving displacements of less than 512 bytes.
+`\|\c
+.B \-mshort\-data\-\c
+.I num\c
+\&\c
+\&\|' is not effective for \c
+.I num\c
+\& greater
+than 64K.
+.PP
+.B \-mserialize-volatile
+.TP
+.B \-mno-serialize-volatile
+Do, or do not, generate code to guarantee sequential consistency of
+volatile memory references.
+.Sp
+GNU CC always guarantees consistency by default, for the preferred
+processor submodel. How this is done depends on the submodel.
+.Sp
+The m88100 processor does not reorder memory references and so always
+provides sequential consistency. If you use `\|\c
+.B \-m88100\c
+\&\|', GNU CC does
+not generate any special instructions for sequential consistency.
+.Sp
+The order of memory references made by the m88110 processor does not
+always match the order of the instructions requesting those references.
+In particular, a load instruction may execute before a preceding store
+instruction. Such reordering violates sequential consistency of
+volatile memory references, when there are multiple processors. When
+you use `\|\c
+.B \-m88000\c
+\&\|' or `\|\c
+.B \-m88110\c
+\&\|', GNU CC generates special
+instructions when appropriate, to force execution in the proper order.
+.Sp
+The extra code generated to guarantee consistency may affect the
+performance of your application. If you know that you can safely forgo
+this guarantee, you may use the option `\|\c
+.B \-mno-serialize-volatile\c
+\&\|'.
+.Sp
+If you use the `\|\c
+.B \-m88100\c
+\&\|' option but require sequential consistency
+when running on the m88110 processor, you should use
+`\|\c
+.B \-mserialize-volatile\c
+\&\|'.
+.PP
+.B \-msvr4
+.TP
+.B \-msvr3
+Turn on (`\|\c
+.B \-msvr4\c
+\&\|') or off (`\|\c
+.B \-msvr3\c
+\&\|') compiler extensions
+related to System V release 4 (SVr4). This controls the following:
+.TP
+\ \ \ \(bu
+Which variant of the assembler syntax to emit (which you can select
+independently using `\|\c
+.B \-mversion\-03.00\c
+\&\|').
+.TP
+\ \ \ \(bu
+`\|\c
+.B \-msvr4\c
+\&\|' makes the C preprocessor recognize `\|\c
+.B #pragma weak\c
+\&\|'
+.TP
+\ \ \ \(bu
+`\|\c
+.B \-msvr4\c
+\&\|' makes GCC issue additional declaration directives used in
+SVr4.
+.PP
+`\|\c
+.B \-msvr3\c
+\&\|' is the default for all m88K configurations except
+the SVr4 configuration.
+.TP
+.B \-mtrap\-large\-shift
+.TP
+.B \-mhandle\-large\-shift
+Include code to detect bit-shifts of more than 31 bits; respectively,
+trap such shifts or emit code to handle them properly. By default GCC
+makes no special provision for large bit shifts.
+.TP
+.B \-muse\-div\-instruction
+Very early models of the 88K architecture didn't have a divide
+instruction, so GCC avoids that instruction by default. Use this option
+to specify that it's safe to use the divide instruction.
+.TP
+.B \-mversion\-03.00
+In the DG/UX configuration, there are two flavors of SVr4. This option
+modifies
+.B \-msvr4
+to select whether the hybrid-COFF or real-ELF
+flavor is used. All other configurations ignore this option.
+.TP
+.B \-mwarn\-passed\-structs
+Warn when a function passes a struct as an argument or result.
+Structure-passing conventions have changed during the evolution of the C
+language, and are often the source of portability problems. By default,
+GCC issues no such warning.
+.PP
+These options are defined for the IBM RS6000:
+.PP
+.B \-mfp\-in\-toc
+.TP
+.B \-mno\-fp\-in\-toc
+Control whether or not floating-point constants go in the Table of
+Contents (TOC), a table of all global variable and function addresses. By
+default GCC puts floating-point constants there; if the TOC overflows,
+`\|\c
+.B \-mno\-fp\-in\-toc\c
+\&\|' will reduce the size of the TOC, which may avoid
+the overflow.
+.PP
+These `\|\c
+.B \-m\c
+\&\|' options are defined for the IBM RT PC:
+.TP
+.B \-min\-line\-mul
+Use an in-line code sequence for integer multiplies. This is the
+default.
+.TP
+.B \-mcall\-lib\-mul
+Call \c
+.B lmul$$\c
+\& for integer multiples.
+.TP
+.B \-mfull\-fp\-blocks
+Generate full-size floating point data blocks, including the minimum
+amount of scratch space recommended by IBM. This is the default.
+.TP
+.B \-mminimum\-fp\-blocks
+Do not include extra scratch space in floating point data blocks. This
+results in smaller code, but slower execution, since scratch space must
+be allocated dynamically.
+.TP
+.B \-mfp\-arg\-in\-fpregs
+Use a calling sequence incompatible with the IBM calling convention in
+which floating point arguments are passed in floating point registers.
+Note that \c
+.B varargs.h\c
+\& and \c
+.B stdargs.h\c
+\& will not work with
+floating point operands if this option is specified.
+.TP
+.B \-mfp\-arg\-in\-gregs
+Use the normal calling convention for floating point arguments. This is
+the default.
+.TP
+.B \-mhc\-struct\-return
+Return structures of more than one word in memory, rather than in a
+register. This provides compatibility with the MetaWare HighC (hc)
+compiler. Use `\|\c
+.B \-fpcc\-struct\-return\c
+\&\|' for compatibility with the
+Portable C Compiler (pcc).
+.TP
+.B \-mnohc\-struct\-return
+Return some structures of more than one word in registers, when
+convenient. This is the default. For compatibility with the
+IBM-supplied compilers, use either `\|\c
+.B \-fpcc\-struct\-return\c
+\&\|' or
+`\|\c
+.B \-mhc\-struct\-return\c
+\&\|'.
+.PP
+These `\|\c
+.B \-m\c
+\&\|' options are defined for the MIPS family of computers:
+.TP
+.BI "\-mcpu=" "cpu-type"
+Assume the defaults for the machine type
+.I cpu-type
+when
+scheduling instructions. The default
+.I cpu-type
+is
+.BR default ,
+which picks the longest cycles times for any of the machines, in order
+that the code run at reasonable rates on all MIPS cpu's. Other
+choices for
+.I cpu-type
+are
+.BR r2000 ,
+.BR r3000 ,
+.BR r4000 ,
+and
+.BR r6000 .
+While picking a specific
+.I cpu-type
+will schedule things appropriately for that particular chip, the
+compiler will not generate any code that does not meet level 1 of the
+MIPS ISA (instruction set architecture) without the
+.B \-mips2
+or
+.B \-mips3
+switches being used.
+.TP
+.B \-mips2
+Issue instructions from level 2 of the MIPS ISA (branch likely, square
+root instructions). The
+.B \-mcpu=r4000
+or
+.B \-mcpu=r6000
+switch must be used in conjunction with
+.BR \-mips2 .
+.TP
+.B \-mips3
+Issue instructions from level 3 of the MIPS ISA (64 bit instructions).
+The
+.B \-mcpu=r4000
+switch must be used in conjunction with
+.BR \-mips2 .
+.TP
+.B \-mint64
+.TP
+.B \-mlong64
+.TP
+.B \-mlonglong128
+These options don't work at present.
+.TP
+.B \-mmips\-as
+Generate code for the MIPS assembler, and invoke
+.B mips\-tfile
+to add normal debug information. This is the default for all
+platforms except for the OSF/1 reference platform, using the OSF/rose
+object format. If any of the
+.BR \-ggdb ,
+.BR \-gstabs ,
+or
+.B \-gstabs+
+switches are used, the
+.B mips\-tfile
+program will encapsulate the stabs within MIPS ECOFF.
+.TP
+.B \-mgas
+Generate code for the GNU assembler. This is the default on the OSF/1
+reference platform, using the OSF/rose object format.
+.TP
+.B \-mrnames
+.TP
+.B \-mno\-rnames
+The
+.B \-mrnames
+switch says to output code using the MIPS software names for the
+registers, instead of the hardware names (ie,
+.B a0
+instead of
+.BR $4 ).
+The GNU assembler does not support the
+.B \-mrnames
+switch, and the MIPS assembler will be instructed to run the MIPS C
+preprocessor over the source file. The
+.B \-mno\-rnames
+switch is default.
+.TP
+.B \-mgpopt
+.TP
+.B \-mno\-gpopt
+The
+.B \-mgpopt
+switch says to write all of the data declarations before the
+instructions in the text section, to all the MIPS assembler to
+generate one word memory references instead of using two words for
+short global or static data items. This is on by default if
+optimization is selected.
+.TP
+.B \-mstats
+.TP
+.B \-mno\-stats
+For each non-inline function processed, the
+.B \-mstats
+switch causes the compiler to emit one line to the standard error file
+to print statistics about the program (number of registers saved,
+stack size, etc.).
+.TP
+.B \-mmemcpy
+.TP
+.B \-mno\-memcpy
+The
+.B \-mmemcpy
+switch makes all block moves call the appropriate string function
+.RB ( memcpy
+or
+.BR bcopy )
+instead of possibly generating inline code.
+.TP
+.B \-mmips\-tfile
+.TP
+.B \-mno\-mips\-tfile
+The
+.B \-mno\-mips\-tfile
+switch causes the compiler not postprocess the object file with the
+.B mips\-tfile
+program, after the MIPS assembler has generated it to add debug
+support. If
+.B mips\-tfile
+is not run, then no local variables will be available to the debugger.
+In addition,
+.B stage2
+and
+.B stage3
+objects will have the temporary file names passed to the assembler
+embedded in the object file, which means the objects will not compare
+the same.
+.TP
+.B \-msoft\-float
+Generate output containing library calls for floating point.
+.I
+WARNING:
+the requisite libraries are not part of GNU CC. Normally the
+facilities of the machine's usual C compiler are used, but this can't
+be done directly in cross-compilation. You must make your own
+arrangements to provide suitable library functions for cross-compilation.
+.TP
+.B \-mhard\-float
+Generate output containing floating point instructions. This is the
+default if you use the unmodified sources.
+.TP
+.B \-mfp64
+Assume that the
+.B FR
+bit in the status word is on, and that there are 32 64-bit floating
+point registers, instead of 32 32-bit floating point registers. You
+must also specify the
+.B \-mcpu=r4000
+and
+.B \-mips3
+switches.
+.TP
+.B \-mfp32
+Assume that there are 32 32-bit floating point registers. This is the
+default.
+.PP
+.B \-mabicalls
+.TP
+.B \-mno\-abicalls
+Emit (or do not emit) the
+.BR \&.abicalls ,
+.BR \&.cpload ,
+and
+.B \&.cprestore
+pseudo operations that some System V.4 ports use for position
+independent code.
+.TP
+.B \-mhalf\-pic
+.TP
+.B \-mno\-half\-pic
+The
+.B \-mhalf\-pic
+switch says to put pointers to extern references into the data section
+and load them up, rather than put the references in the text section.
+This option does not work at present.
+.B
+.BI \-G num
+Put global and static items less than or equal to
+.I num
+bytes into the small data or bss sections instead of the normal data
+or bss section. This allows the assembler to emit one word memory
+reference instructions based on the global pointer
+.RB ( gp
+or
+.BR $28 ),
+instead of the normal two words used. By default,
+.I num
+is 8 when the MIPS assembler is used, and 0 when the GNU
+assembler is used. The
+.BI \-G num
+switch is also passed to the assembler and linker. All modules should
+be compiled with the same
+.BI \-G num
+value.
+.TP
+.B \-nocpp
+Tell the MIPS assembler to not run it's preprocessor over user
+assembler files (with a `\|\c
+.B .s\c
+\&\|' suffix) when assembling them.
+.PP
+These `\|\c
+.B \-m\c
+\&\|' options are defined for the Intel 80386 family of computers:
+.B \-m486
+.TP
+.B \-mno\-486
+Control whether or not code is optimized for a 486 instead of an
+386. Code generated for a 486 will run on a 386 and vice versa.
+.TP
+.B \-msoft\-float
+Generate output containing library calls for floating point.
+.I Warning:
+the requisite libraries are not part of GNU CC.
+Normally the facilities of the machine's usual C compiler are used, but
+this can't be done directly in cross-compilation. You must make your
+own arrangements to provide suitable library functions for
+cross-compilation.
+.Sp
+On machines where a function returns floating point results in the 80387
+register stack, some floating point opcodes may be emitted even if
+`\|\c
+.B \-msoft-float\c
+\&\|' is used.
+.TP
+.B \-mno-fp-ret-in-387
+Do not use the FPU registers for return values of functions.
+.Sp
+The usual calling convention has functions return values of types
+.B float\c
+\& and \c
+.B double\c
+\& in an FPU register, even if there
+is no FPU. The idea is that the operating system should emulate
+an FPU.
+.Sp
+The option `\|\c
+.B \-mno-fp-ret-in-387\c
+\&\|' causes such values to be returned
+in ordinary CPU registers instead.
+.PP
+These `\|\c
+.B \-m\c
+\&\|' options are defined for the HPPA family of computers:
+.TP
+.B \-mpa-risc-1-0
+Generate code for a PA 1.0 processor.
+.TP
+.B \-mpa-risc-1-1
+Generate code for a PA 1.1 processor.
+.TP
+.B \-mkernel
+Generate code which is suitable for use in kernels. Specifically, avoid
+.B add\c
+\& instructions in which one of the arguments is the DP register;
+generate \c
+.B addil\c
+\& instructions instead. This avoids a rather serious
+bug in the HP-UX linker.
+.TP
+.B \-mshared-libs
+Generate code that can be linked against HP-UX shared libraries. This option
+is not fully function yet, and is not on by default for any PA target. Using
+this option can cause incorrect code to be generated by the compiler.
+.TP
+.B \-mno-shared-libs
+Don't generate code that will be linked against shared libraries. This is
+the default for all PA targets.
+.TP
+.B \-mlong-calls
+Generate code which allows calls to functions greater than 256K away from
+the caller when the caller and callee are in the same source file. Do
+not turn this option on unless code refuses to link with \*(lqbranch out of
+range errors\*('' from the linker.
+.TP
+.B \-mdisable-fpregs
+Prevent floating point registers from being used in any manner. This is
+necessary for compiling kernels which perform lazy context switching of
+floating point registers. If you use this option and attempt to perform
+floating point operations, the compiler will abort.
+.TP
+.B \-mdisable-indexing
+Prevent the compiler from using indexing address modes. This avoids some
+rather obscure problems when compiling MIG generated code under MACH.
+.TP
+.B \-mtrailing-colon
+Add a colon to the end of label definitions (for ELF assemblers).
+.PP
+These `\|\c
+.B \-m\c
+\&\|' options are defined for the Intel 80960 family of computers:
+.TP
+.BI "\-m" "cpu-type"
+Assume the defaults for the machine type
+.I cpu-type
+for instruction and addressing-mode availability and alignment.
+The default
+.I cpu-type
+is
+.BR kb ;
+other choices are
+.BR ka ,
+.BR mc ,
+.BR ca ,
+.BR cf ,
+.BR sa ,
+and
+.BR sb .
+.TP
+.B \-mnumerics
+.TP
+.B \-msoft\-float
+The
+.B \-mnumerics
+option indicates that the processor does support
+floating-point instructions. The
+.B \-msoft\-float
+option indicates
+that floating-point support should not be assumed.
+.TP
+.B \-mleaf\-procedures
+.TP
+.B \-mno\-leaf\-procedures
+Do (or do not) attempt to alter leaf procedures to be callable with the
+.I bal
+instruction as well as
+.IR call .
+This will result in more
+efficient code for explicit calls when the
+.I bal
+instruction can be
+substituted by the assembler or linker, but less efficient code in other
+cases, such as calls via function pointers, or using a linker that doesn't
+support this optimization.
+.TP
+.B \-mtail\-call
+.TP
+.B \-mno\-tail\-call
+Do (or do not) make additional attempts (beyond those of the
+machine-independent portions of the compiler) to optimize tail-recursive
+calls into branches. You may not want to do this because the detection of
+cases where this is not valid is not totally complete. The default is
+.BR \-mno\-tail\-call .
+.TP
+.B \-mcomplex\-addr
+.TP
+.B \-mno\-complex\-addr
+Assume (or do not assume) that the use of a complex addressing mode is a
+win on this implementation of the i960. Complex addressing modes may not
+be worthwhile on the K-series, but they definitely are on the C-series.
+The default is currently
+.B \-mcomplex\-addr
+for all processors except
+the CB and CC.
+.TP
+.B \-mcode\-align
+.TP
+.B \-mno\-code\-align
+Align code to 8-byte boundaries for faster fetching (or don't bother).
+Currently turned on by default for C-series implementations only.
+.TP
+.B \-mic\-compat
+.TP
+.B \-mic2.0\-compat
+.TP
+.B \-mic3.0\-compat
+Enable compatibility with iC960 v2.0 or v3.0.
+.TP
+.B \-masm\-compat
+.TP
+.B \-mintel\-asm
+Enable compatibility with the iC960 assembler.
+.TP
+.B \-mstrict\-align
+.TP
+.B \-mno\-strict\-align
+Do not permit (do permit) unaligned accesses.
+.TP
+.B \-mold\-align
+Enable structure-alignment compatibility with Intel's gcc release version
+1.3 (based on gcc 1.37). Currently this is buggy in that
+.B #pragma align 1
+is always assumed as well, and cannot be turned off.
+.PP
+These `\|\c
+.B \-m\c
+\&\|' options are defined for the DEC Alpha implementations:
+.TP
+.B \-mno-soft-float
+.TP
+.B \-msoft-float
+Use (do not use) the hardware floating-point instructions for
+floating-point operations. When \c
+.B \-msoft-float\c
+\& is specified,
+functions in `\|\c
+.B libgcc1.c\c
+\&\|' will be used to perform floating-point
+operations. Unless they are replaced by routines that emulate the
+floating-point operations, or compiled in such a way as to call such
+emulations routines, these routines will issue floating-point
+operations. If you are compiling for an Alpha without floating-point
+operations, you must ensure that the library is built so as not to call
+them.
+.Sp
+Note that Alpha implementations without floating-point operations are
+required to have floating-point registers.
+.TP
+.B \-mfp-reg
+.TP
+.B \-mno-fp-regs
+Generate code that uses (does not use) the floating-point register set.
+.B \-mno-fp-regs\c
+\& implies \c
+.B \-msoft-float\c
+\&. If the floating-point
+register set is not used, floating point operands are passed in integer
+registers as if they were integers and floating-point results are passed
+in $0 instead of $f0. This is a non-standard calling sequence, so any
+function with a floating-point argument or return value called by code
+compiled with \c
+.B \-mno-fp-regs\c
+\& must also be compiled with that
+option.
+.Sp
+A typical use of this option is building a kernel that does not use,
+and hence need not save and restore, any floating-point registers.
+.PP
+These additional options are available on System V Release 4 for
+compatibility with other compilers on those systems:
+.TP
+.B \-G
+On SVr4 systems, \c
+.B gcc\c
+\& accepts the option `\|\c
+.B \-G\c
+\&\|' (and passes
+it to the system linker), for compatibility with other compilers.
+However, we suggest you use `\|\c
+.B \-symbolic\c
+\&\|' or `\|\c
+.B \-shared\c
+\&\|' as
+appropriate, instead of supplying linker options on the \c
+.B gcc
+command line.
+.TP
+.B \-Qy
+Identify the versions of each tool used by the compiler, in a
+.B .ident\c
+\& assembler directive in the output.
+.TP
+.B \-Qn
+Refrain from adding \c
+.B .ident\c
+\& directives to the output file (this is
+the default).
+.TP
+.BI "\-YP," "dirs"
+Search the directories \c
+.I dirs\c
+\&, and no others, for libraries
+specified with `\|\c
+.B \-l\c
+\&\|'. You can separate directory entries in
+.I dirs\c
+\& from one another with colons.
+.TP
+.BI "\-Ym," "dir"
+Look in the directory \c
+.I dir\c
+\& to find the M4 preprocessor.
+The assembler uses this option.
+.SH CODE GENERATION OPTIONS
+These machine-independent options control the interface conventions
+used in code generation.
+.PP
+Most of them begin with `\|\c
+\-f\c
+\&\|'. These options have both positive and negative forms; the negative form
+of `\|\c
+.B \-ffoo\c
+\&\|' would be `\|\c
+.B \-fno\-foo\c
+\&\|'. In the table below, only
+one of the forms is listed\(em\&the one which is not the default. You
+can figure out the other form by either removing `\|\c
+.B no\-\c
+\&\|' or adding
+it.
+.TP
+.B \-fnonnull\-objects
+Assume that objects reached through references are not null
+(C++ only).
+.Sp
+Normally, GNU C++ makes conservative assumptions about objects reached
+through references. For example, the compiler must check that \c
+.B a
+is not null in code like the following:
+.Sp
+obj &a = g ();
+a.f (2);
+.Sp
+Checking that references of this sort have non-null values requires
+extra code, however, and it is unnecessary for many programs. You can
+use `\|\c
+.B \-fnonnull-objects\c
+\&\|' to omit the checks for null, if your
+program doesn't require checking.
+.TP
+.B \-fpcc\-struct\-return
+Use the same convention for returning \c
+.B struct\c
+\& and \c
+.B union
+values that is used by the usual C compiler on your system. This
+convention is less efficient for small structures, and on many
+machines it fails to be reentrant; but it has the advantage of
+allowing intercallability between GCC-compiled code and PCC-compiled
+code.
+.TP
+.B \-freg\-struct\-return
+Use the convention that
+.B struct
+and
+.B union
+values are returned in registers when possible. This is more
+efficient for small structures than
+.BR \-fpcc\-struct\-return .
+.Sp
+If you specify neither
+.B \-fpcc\-struct\-return
+nor
+.BR \-freg\-struct\-return ,
+GNU CC defaults to whichever convention is standard for the target.
+If there is no standard convention, GNU CC defaults to
+.BR \-fpcc\-struct\-return .
+.TP
+.B \-fshort\-enums
+Allocate to an \c
+.B enum\c
+\& type only as many bytes as it needs for the
+declared range of possible values. Specifically, the \c
+.B enum\c
+\& type
+will be equivalent to the smallest integer type which has enough room.
+.TP
+.B \-fshort\-double
+Use the same size for
+.B double
+as for
+.B float
+\&.
+.TP
+.B \-fshared\-data
+Requests that the data and non-\c
+.B const\c
+\& variables of this
+compilation be shared data rather than private data. The distinction
+makes sense only on certain operating systems, where shared data is
+shared between processes running the same program, while private data
+exists in one copy per process.
+.TP
+.B \-fno\-common
+Allocate even uninitialized global variables in the bss section of the
+object file, rather than generating them as common blocks. This has the
+effect that if the same variable is declared (without \c
+.B extern\c
+\&) in
+two different compilations, you will get an error when you link them.
+The only reason this might be useful is if you wish to verify that the
+program will work on other systems which always work this way.
+.TP
+.B \-fno\-ident
+Ignore the `\|\c
+.B #ident\c
+\&\|' directive.
+.TP
+.B \-fno\-gnu\-linker
+Do not output global initializations (such as C++ constructors and
+destructors) in the form used by the GNU linker (on systems where the GNU
+linker is the standard method of handling them). Use this option when
+you want to use a non-GNU linker, which also requires using the
+.B collect2\c
+\& program to make sure the system linker includes
+constructors and destructors. (\c
+.B collect2\c
+\& is included in the GNU CC
+distribution.) For systems which \c
+.I must\c
+\& use \c
+.B collect2\c
+\&, the
+compiler driver \c
+.B gcc\c
+\& is configured to do this automatically.
+.TP
+.B \-finhibit-size-directive
+Don't output a \c
+.B .size\c
+\& assembler directive, or anything else that
+would cause trouble if the function is split in the middle, and the
+two halves are placed at locations far apart in memory. This option is
+used when compiling `\|\c
+.B crtstuff.c\c
+\&\|'; you should not need to use it
+for anything else.
+.TP
+.B \-fverbose-asm
+Put extra commentary information in the generated assembly code to
+make it more readable. This option is generally only of use to those
+who actually need to read the generated assembly code (perhaps while
+debugging the compiler itself).
+.TP
+.B \-fvolatile
+Consider all memory references through pointers to be volatile.
+.TP
+.B \-fvolatile\-global
+Consider all memory references to extern and global data items to
+be volatile.
+.TP
+.B \-fpic
+If supported for the target machines, generate position-independent code,
+suitable for use in a shared library.
+.TP
+.B \-fPIC
+If supported for the target machine, emit position-independent code,
+suitable for dynamic linking, even if branches need large displacements.
+.TP
+.BI "\-ffixed\-" "reg"
+Treat the register named \c
+.I reg\c
+\& as a fixed register; generated code
+should never refer to it (except perhaps as a stack pointer, frame
+pointer or in some other fixed role).
+.Sp
+.I reg\c
+\& must be the name of a register. The register names accepted
+are machine-specific and are defined in the \c
+.B REGISTER_NAMES
+macro in the machine description macro file.
+.Sp
+This flag does not have a negative form, because it specifies a
+three-way choice.
+.TP
+.BI "\-fcall\-used\-" "reg"
+Treat the register named \c
+.I reg\c
+\& as an allocatable register that is
+clobbered by function calls. It may be allocated for temporaries or
+variables that do not live across a call. Functions compiled this way
+will not save and restore the register \c
+.I reg\c
+\&.
+.Sp
+Use of this flag for a register that has a fixed pervasive role in the
+machine's execution model, such as the stack pointer or frame pointer,
+will produce disastrous results.
+.Sp
+This flag does not have a negative form, because it specifies a
+three-way choice.
+.TP
+.BI "\-fcall\-saved\-" "reg"
+Treat the register named \c
+.I reg\c
+\& as an allocatable register saved by
+functions. It may be allocated even for temporaries or variables that
+live across a call. Functions compiled this way will save and restore
+the register \c
+.I reg\c
+\& if they use it.
+.Sp
+Use of this flag for a register that has a fixed pervasive role in the
+machine's execution model, such as the stack pointer or frame pointer,
+will produce disastrous results.
+.Sp
+A different sort of disaster will result from the use of this flag for
+a register in which function values may be returned.
+.Sp
+This flag does not have a negative form, because it specifies a
+three-way choice.
+.SH PRAGMAS
+Two `\|\c
+.B #pragma\c
+\&\|' directives are supported for GNU C++, to permit using the same
+header file for two purposes: as a definition of interfaces to a given
+object class, and as the full definition of the contents of that object class.
+.TP
+.B #pragma interface
+(C++ only.)
+Use this directive in header files that define object classes, to save
+space in most of the object files that use those classes. Normally,
+local copies of certain information (backup copies of inline member
+functions, debugging information, and the internal tables that
+implement virtual functions) must be kept in each object file that
+includes class definitions. You can use this pragma to avoid such
+duplication. When a header file containing `\|\c
+.B #pragma interface\c
+\&\|' is included in a compilation, this auxiliary information
+will not be generated (unless the main input source file itself uses
+`\|\c
+.B #pragma implementation\c
+\&\|'). Instead, the object files will contain references to be
+resolved at link time.
+.TP
+.B #pragma implementation
+.TP
+\fB#pragma implementation "\fP\fIobjects\fP\fB.h"\fP
+(C++ only.)
+Use this pragma in a main input file, when you want full output from
+included header files to be generated (and made globally visible).
+The included header file, in turn, should use `\|\c
+.B #pragma interface\c
+\&\|'.
+Backup copies of inline member functions, debugging information, and
+the internal tables used to implement virtual functions are all
+generated in implementation files.
+.Sp
+If you use `\|\c
+.B #pragma implementation\c
+\&\|' with no argument, it applies to an include file with the same
+basename as your source file; for example, in `\|\c
+.B allclass.cc\c
+\&\|', `\|\c
+.B #pragma implementation\c
+\&\|' by itself is equivalent to `\|\c
+.B
+#pragma implementation "allclass.h"\c
+\&\|'. Use the string argument if you want a single implementation
+file to include code from multiple header files.
+.Sp
+There is no way to split up the contents of a single header file into
+multiple implementation files.
+.SH FILES
+.nf
+.ta \w'LIBDIR/g++\-include 'u
+file.c C source file
+file.h C header (preprocessor) file
+file.i preprocessed C source file
+file.C C++ source file
+file.cc C++ source file
+file.cxx C++ source file
+file.m Objective-C source file
+file.s assembly language file
+file.o object file
+a.out link edited output
+\fITMPDIR\fR/cc\(** temporary files
+\fILIBDIR\fR/cpp preprocessor
+\fILIBDIR\fR/cc1 compiler for C
+\fILIBDIR\fR/cc1plus compiler for C++
+\fILIBDIR\fR/collect linker front end needed on some machines
+\fILIBDIR\fR/libgcc.a GCC subroutine library
+/lib/crt[01n].o start-up routine
+\fILIBDIR\fR/ccrt0 additional start-up routine for C++
+/lib/libc.a standard C library, see
+.IR intro (3)
+/usr/include standard directory for \fB#include\fP files
+\fILIBDIR\fR/include standard gcc directory for \fB#include\fP files
+\fILIBDIR\fR/g++\-include additional g++ directory for \fB#include\fP
+.Sp
+.fi
+.I LIBDIR
+is usually
+.B /usr/local/lib/\c
+.IR machine / version .
+.br
+.I TMPDIR
+comes from the environment variable
+.B TMPDIR
+(default
+.B /usr/tmp
+if available, else
+.B /tmp\c
+\&).
+.SH "SEE ALSO"
+cpp(1), as(1), ld(1), gdb(1), adb(1), dbx(1), sdb(1).
+.br
+.RB "`\|" gcc "\|', `\|" cpp \|',
+.RB "`\|" as "\|', `\|" ld \|',
+and
+.RB `\| gdb \|'
+entries in
+.B info\c
+\&.
+.br
+.I
+Using and Porting GNU CC (for version 2.0)\c
+, Richard M. Stallman;
+.I
+The C Preprocessor\c
+, Richard M. Stallman;
+.I
+Debugging with GDB: the GNU Source-Level Debugger\c
+, Richard M. Stallman and Roland H. Pesch;
+.I
+Using as: the GNU Assembler\c
+, Dean Elsner, Jay Fenlason & friends;
+.I
+ld: the GNU linker\c
+, Steve Chamberlain and Roland Pesch.
+.SH BUGS
+For instructions on reporting bugs, see the GCC manual.
+.SH COPYING
+Copyright
+.if t \(co
+1991, 1992, 1993 Free Software Foundation, Inc.
+.PP
+Permission is granted to make and distribute verbatim copies of
+this manual provided the copyright notice and this permission notice
+are preserved on all copies.
+.PP
+Permission is granted to copy and distribute modified versions of this
+manual under the conditions for verbatim copying, provided that the
+entire resulting derived work is distributed under the terms of a
+permission notice identical to this one.
+.PP
+Permission is granted to copy and distribute translations of this
+manual into another language, under the above conditions for modified
+versions, except that this permission notice may be included in
+translations approved by the Free Software Foundation instead of in
+the original English.
+.SH AUTHORS
+See the GNU CC Manual for the contributors to GNU CC.
diff --git a/gnu/usr.bin/cc/cc/gcc.c b/gnu/usr.bin/cc/cc/gcc.c
new file mode 100644
index 0000000..4f877ec
--- /dev/null
+++ b/gnu/usr.bin/cc/cc/gcc.c
@@ -0,0 +1,4896 @@
+/* Compiler driver program that can handle many languages.
+ Copyright (C) 1987, 1989, 1992, 1993, 1994 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+
+This paragraph is here to try to keep Sun CC from dying.
+The number of chars here seems crucial!!!! */
+
+/* This program is the user interface to the C compiler and possibly to
+other compilers. It is used because compilation is a complicated procedure
+which involves running several programs and passing temporary files between
+them, forwarding the users switches to those programs selectively,
+and deleting the temporary files at the end.
+
+CC recognizes how to compile each input file by suffixes in the file names.
+Once it knows which kind of compilation to perform, the procedure for
+compilation is specified by a string called a "spec". */
+
+#include <sys/types.h>
+#include <ctype.h>
+#include <signal.h>
+#include <sys/stat.h>
+#include <sys/file.h> /* May get R_OK, etc. on some systems. */
+
+#include "config.h"
+#include "obstack.h"
+#ifdef __STDC__
+#include <stdarg.h>
+#else
+#include <varargs.h>
+#endif
+#include <stdio.h>
+
+/* Include multi-lib information. */
+#include "multilib.h"
+
+#ifndef R_OK
+#define R_OK 4
+#define W_OK 2
+#define X_OK 1
+#endif
+
+/* Add prototype support. */
+#ifndef PROTO
+#if defined (USE_PROTOTYPES) ? USE_PROTOTYPES : defined (__STDC__)
+#define PROTO(ARGS) ARGS
+#else
+#define PROTO(ARGS) ()
+#endif
+#endif
+
+#ifndef VPROTO
+#ifdef __STDC__
+#define PVPROTO(ARGS) ARGS
+#define VPROTO(ARGS) ARGS
+#define VA_START(va_list,var) va_start(va_list,var)
+#else
+#define PVPROTO(ARGS) ()
+#define VPROTO(ARGS) (va_alist) va_dcl
+#define VA_START(va_list,var) va_start(va_list)
+#endif
+#endif
+
+/* Define a generic NULL if one hasn't already been defined. */
+
+#ifndef NULL
+#define NULL 0
+#endif
+
+#ifndef GENERIC_PTR
+#if defined (USE_PROTOTYPES) ? USE_PROTOTYPES : defined (__STDC__)
+#define GENERIC_PTR void *
+#else
+#define GENERIC_PTR char *
+#endif
+#endif
+
+#ifndef NULL_PTR
+#define NULL_PTR ((GENERIC_PTR)0)
+#endif
+
+#ifdef USG
+#define vfork fork
+#endif /* USG */
+
+/* On MSDOS, write temp files in current dir
+ because there's no place else we can expect to use. */
+#ifdef __MSDOS__
+#ifndef P_tmpdir
+#define P_tmpdir "."
+#endif
+#endif
+
+/* Test if something is a normal file. */
+#ifndef S_ISREG
+#define S_ISREG(m) (((m) & S_IFMT) == S_IFREG)
+#endif
+
+/* Test if something is a directory. */
+#ifndef S_ISDIR
+#define S_ISDIR(m) (((m) & S_IFMT) == S_IFDIR)
+#endif
+
+/* By default there is no special suffix for executables. */
+#ifndef EXECUTABLE_SUFFIX
+#define EXECUTABLE_SUFFIX ""
+#endif
+
+/* By default, colon separates directories in a path. */
+#ifndef PATH_SEPARATOR
+#define PATH_SEPARATOR ':'
+#endif
+
+#define obstack_chunk_alloc xmalloc
+#define obstack_chunk_free free
+
+extern void free ();
+extern char *getenv ();
+
+extern int errno, sys_nerr;
+#if defined(bsd4_4) || defined(__NetBSD__)
+extern const char *const sys_errlist[];
+#else
+extern char *sys_errlist[];
+#endif
+
+extern int execv (), execvp ();
+
+/* If a stage of compilation returns an exit status >= 1,
+ compilation of that file ceases. */
+
+#define MIN_FATAL_STATUS 1
+
+/* Flag saying to print the full filename of this file
+ as found through our usual search mechanism. */
+
+static char *print_file_name = NULL;
+
+/* As print_file_name, but search for executable file. */
+
+static char *print_prog_name = NULL;
+
+/* Flag saying to print the relative path we'd use to
+ find libgcc.a given the current compiler flags. */
+
+static int print_multi_directory;
+
+/* Flag saying to print the list of subdirectories and
+ compiler flags used to select them in a standard form. */
+
+static int print_multi_lib;
+
+/* Flag indicating whether we should print the command and arguments */
+
+static int verbose_flag;
+
+/* Nonzero means write "temp" files in source directory
+ and use the source file's name in them, and don't delete them. */
+
+static int save_temps_flag;
+
+/* The compiler version. */
+
+static char *compiler_version;
+
+/* The target version specified with -V */
+
+static char *spec_version = DEFAULT_TARGET_VERSION;
+
+/* The target machine specified with -b. */
+
+static char *spec_machine = DEFAULT_TARGET_MACHINE;
+
+/* Nonzero if cross-compiling.
+ When -b is used, the value comes from the `specs' file. */
+
+#ifdef CROSS_COMPILE
+static int cross_compile = 1;
+#else
+static int cross_compile = 0;
+#endif
+
+/* The number of errors that have occurred; the link phase will not be
+ run if this is non-zero. */
+static int error_count = 0;
+
+/* This is the obstack which we use to allocate many strings. */
+
+static struct obstack obstack;
+
+/* This is the obstack to build an environment variable to pass to
+ collect2 that describes all of the relevant switches of what to
+ pass the compiler in building the list of pointers to constructors
+ and destructors. */
+
+static struct obstack collect_obstack;
+
+extern char *version_string;
+
+/* Forward declaration for prototypes. */
+struct path_prefix;
+
+static void set_spec PROTO((char *, char *));
+static struct compiler *lookup_compiler PROTO((char *, int, char *));
+static char *find_a_file PROTO((struct path_prefix *, char *, int));
+static void add_prefix PROTO((struct path_prefix *, char *, int, int, int *));
+static char *skip_whitespace PROTO((char *));
+static void record_temp_file PROTO((char *, int, int));
+static void delete_if_ordinary PROTO((char *));
+static void delete_temp_files PROTO((void));
+static void delete_failure_queue PROTO((void));
+static void clear_failure_queue PROTO((void));
+static char *choose_temp_base_try PROTO((char *, char *));
+static void choose_temp_base PROTO((void));
+static int check_live_switch PROTO((int, int));
+static char *handle_braces PROTO((char *));
+static char *save_string PROTO((char *, int));
+static char *concat PROTO((char *, char *, char *));
+static int do_spec PROTO((char *));
+static int do_spec_1 PROTO((char *, int, char *));
+static char *find_file PROTO((char *));
+static int is_directory PROTO((char *, char *, int));
+static void validate_switches PROTO((char *));
+static void validate_all_switches PROTO((void));
+static void give_switch PROTO((int, int));
+static int used_arg PROTO((char *, int));
+static void set_multilib_dir PROTO((void));
+static void print_multilib_info PROTO((void));
+static void pfatal_with_name PROTO((char *));
+static void perror_with_name PROTO((char *));
+static void perror_exec PROTO((char *));
+#ifdef HAVE_VPRINTF
+static void fatal PVPROTO((char *, ...));
+static void error PVPROTO((char *, ...));
+#else
+/* We must not provide any prototype here, even if ANSI C. */
+static void fatal PROTO(());
+static void error PROTO(());
+#endif
+
+void fancy_abort ();
+char *xmalloc ();
+char *xrealloc ();
+
+/* Specs are strings containing lines, each of which (if not blank)
+is made up of a program name, and arguments separated by spaces.
+The program name must be exact and start from root, since no path
+is searched and it is unreliable to depend on the current working directory.
+Redirection of input or output is not supported; the subprograms must
+accept filenames saying what files to read and write.
+
+In addition, the specs can contain %-sequences to substitute variable text
+or for conditional text. Here is a table of all defined %-sequences.
+Note that spaces are not generated automatically around the results of
+expanding these sequences; therefore, you can concatenate them together
+or with constant text in a single argument.
+
+ %% substitute one % into the program name or argument.
+ %i substitute the name of the input file being processed.
+ %b substitute the basename of the input file being processed.
+ This is the substring up to (and not including) the last period
+ and not including the directory.
+ %g substitute the temporary-file-name-base. This is a string chosen
+ once per compilation. Different temporary file names are made by
+ concatenation of constant strings on the end, as in `%g.s'.
+ %g also has the same effect of %d.
+ %u like %g, but make the temporary file name unique.
+ %U returns the last file name generated with %u.
+ %d marks the argument containing or following the %d as a
+ temporary file name, so that that file will be deleted if CC exits
+ successfully. Unlike %g, this contributes no text to the argument.
+ %w marks the argument containing or following the %w as the
+ "output file" of this compilation. This puts the argument
+ into the sequence of arguments that %o will substitute later.
+ %W{...}
+ like %{...} but mark last argument supplied within
+ as a file to be deleted on failure.
+ %o substitutes the names of all the output files, with spaces
+ automatically placed around them. You should write spaces
+ around the %o as well or the results are undefined.
+ %o is for use in the specs for running the linker.
+ Input files whose names have no recognized suffix are not compiled
+ at all, but they are included among the output files, so they will
+ be linked.
+ %p substitutes the standard macro predefinitions for the
+ current target machine. Use this when running cpp.
+ %P like %p, but puts `__' before and after the name of each macro.
+ (Except macros that already have __.)
+ This is for ANSI C.
+ %I Substitute a -iprefix option made from GCC_EXEC_PREFIX.
+ %s current argument is the name of a library or startup file of some sort.
+ Search for that file in a standard list of directories
+ and substitute the full name found.
+ %eSTR Print STR as an error message. STR is terminated by a newline.
+ Use this when inconsistent options are detected.
+ %x{OPTION} Accumulate an option for %X.
+ %X Output the accumulated linker options specified by compilations.
+ %Y Output the accumulated assembler options specified by compilations.
+ %v1 Substitute the major version number of GCC.
+ (For version 2.5.n, this is 2.)
+ %v2 Substitute the minor version number of GCC.
+ (For version 2.5.n, this is 5.)
+ %a process ASM_SPEC as a spec.
+ This allows config.h to specify part of the spec for running as.
+ %A process ASM_FINAL_SPEC as a spec. A capital A is actually
+ used here. This can be used to run a post-processor after the
+ assembler has done it's job.
+ %D Dump out a -L option for each directory in startfile_prefix.
+ If multilib_dir is set, extra entries are generated with it affixed.
+ %l process LINK_SPEC as a spec.
+ %L process LIB_SPEC as a spec.
+ %S process STARTFILE_SPEC as a spec. A capital S is actually used here.
+ %E process ENDFILE_SPEC as a spec. A capital E is actually used here.
+ %c process SIGNED_CHAR_SPEC as a spec.
+ %C process CPP_SPEC as a spec. A capital C is actually used here.
+ %1 process CC1_SPEC as a spec.
+ %2 process CC1PLUS_SPEC as a spec.
+ %| output "-" if the input for the current command is coming from a pipe.
+ %* substitute the variable part of a matched option. (See below.)
+ Note that each comma in the substituted string is replaced by
+ a single space.
+ %{S} substitutes the -S switch, if that switch was given to CC.
+ If that switch was not specified, this substitutes nothing.
+ Here S is a metasyntactic variable.
+ %{S*} substitutes all the switches specified to CC whose names start
+ with -S. This is used for -o, -D, -I, etc; switches that take
+ arguments. CC considers `-o foo' as being one switch whose
+ name starts with `o'. %{o*} would substitute this text,
+ including the space; thus, two arguments would be generated.
+ %{S*:X} substitutes X if one or more switches whose names start with -S are
+ specified to CC. Note that the tail part of the -S option
+ (i.e. the part matched by the `*') will be substituted for each
+ occurrence of %* within X.
+ %{S:X} substitutes X, but only if the -S switch was given to CC.
+ %{!S:X} substitutes X, but only if the -S switch was NOT given to CC.
+ %{|S:X} like %{S:X}, but if no S switch, substitute `-'.
+ %{|!S:X} like %{!S:X}, but if there is an S switch, substitute `-'.
+ %{.S:X} substitutes X, but only if processing a file with suffix S.
+ %{!.S:X} substitutes X, but only if NOT processing a file with suffix S.
+ %(Spec) processes a specification defined in a specs file as *Spec:
+ %[Spec] as above, but put __ around -D arguments
+
+The conditional text X in a %{S:X} or %{!S:X} construct may contain
+other nested % constructs or spaces, or even newlines. They are
+processed as usual, as described above.
+
+The -O, -f, -m, and -W switches are handled specifically in these
+constructs. If another value of -O or the negated form of a -f, -m, or
+-W switch is found later in the command line, the earlier switch
+value is ignored, except with {S*} where S is just one letter; this
+passes all matching options.
+
+The character | is used to indicate that a command should be piped to
+the following command, but only if -pipe is specified.
+
+Note that it is built into CC which switches take arguments and which
+do not. You might think it would be useful to generalize this to
+allow each compiler's spec to say which switches take arguments. But
+this cannot be done in a consistent fashion. CC cannot even decide
+which input files have been specified without knowing which switches
+take arguments, and it must know which input files to compile in order
+to tell which compilers to run.
+
+CC also knows implicitly that arguments starting in `-l' are to be
+treated as compiler output files, and passed to the linker in their
+proper position among the other output files. */
+
+/* Define the macros used for specs %a, %l, %L, %S, %c, %C, %1. */
+
+/* config.h can define ASM_SPEC to provide extra args to the assembler
+ or extra switch-translations. */
+#ifndef ASM_SPEC
+#define ASM_SPEC ""
+#endif
+
+/* config.h can define ASM_FINAL_SPEC to run a post processor after
+ the assembler has run. */
+#ifndef ASM_FINAL_SPEC
+#define ASM_FINAL_SPEC ""
+#endif
+
+/* config.h can define CPP_SPEC to provide extra args to the C preprocessor
+ or extra switch-translations. */
+#ifndef CPP_SPEC
+#define CPP_SPEC ""
+#endif
+
+/* config.h can define CC1_SPEC to provide extra args to cc1 and cc1plus
+ or extra switch-translations. */
+#ifndef CC1_SPEC
+#define CC1_SPEC ""
+#endif
+
+/* config.h can define CC1PLUS_SPEC to provide extra args to cc1plus
+ or extra switch-translations. */
+#ifndef CC1PLUS_SPEC
+#define CC1PLUS_SPEC ""
+#endif
+
+/* config.h can define LINK_SPEC to provide extra args to the linker
+ or extra switch-translations. */
+#ifndef LINK_SPEC
+#define LINK_SPEC ""
+#endif
+
+/* config.h can define LIB_SPEC to override the default libraries. */
+#ifndef LIB_SPEC
+#define LIB_SPEC "%{g*:-lg} %{!p:%{!pg:-lc}}%{p:-lc_p}%{pg:-lc_p}"
+#endif
+
+/* config.h can define STARTFILE_SPEC to override the default crt0 files. */
+#ifndef STARTFILE_SPEC
+#define STARTFILE_SPEC \
+ "%{pg:gcrt0.o%s}%{!pg:%{p:mcrt0.o%s}%{!p:crt0.o%s}}"
+#endif
+
+/* config.h can define SWITCHES_NEED_SPACES to control passing -o and -L.
+ Make the string nonempty to require spaces there. */
+#ifndef SWITCHES_NEED_SPACES
+#define SWITCHES_NEED_SPACES ""
+#endif
+
+/* config.h can define ENDFILE_SPEC to override the default crtn files. */
+#ifndef ENDFILE_SPEC
+#define ENDFILE_SPEC ""
+#endif
+
+/* This spec is used for telling cpp whether char is signed or not. */
+#ifndef SIGNED_CHAR_SPEC
+/* Use #if rather than ?:
+ because MIPS C compiler rejects like ?: in initializers. */
+#if DEFAULT_SIGNED_CHAR
+#define SIGNED_CHAR_SPEC "%{funsigned-char:-D__CHAR_UNSIGNED__}"
+#else
+#define SIGNED_CHAR_SPEC "%{!fsigned-char:-D__CHAR_UNSIGNED__}"
+#endif
+#endif
+
+/* MULTILIB_SELECT comes from multilib.h. It gives a
+ string interpreted by set_multilib_dir to select a library
+ subdirectory based on the compiler options. */
+#ifndef MULTILIB_SELECT
+#define MULTILIB_SELECT ". ;"
+#endif
+
+static char *cpp_spec = CPP_SPEC;
+static char *cpp_predefines = CPP_PREDEFINES;
+static char *cc1_spec = CC1_SPEC;
+static char *cc1plus_spec = CC1PLUS_SPEC;
+static char *signed_char_spec = SIGNED_CHAR_SPEC;
+static char *asm_spec = ASM_SPEC;
+static char *asm_final_spec = ASM_FINAL_SPEC;
+static char *link_spec = LINK_SPEC;
+static char *lib_spec = LIB_SPEC;
+static char *endfile_spec = ENDFILE_SPEC;
+static char *startfile_spec = STARTFILE_SPEC;
+static char *switches_need_spaces = SWITCHES_NEED_SPACES;
+static char *multilib_select = MULTILIB_SELECT;
+
+/* This defines which switch letters take arguments. */
+
+#ifndef SWITCH_TAKES_ARG
+#define SWITCH_TAKES_ARG(CHAR) \
+ ((CHAR) == 'D' || (CHAR) == 'U' || (CHAR) == 'o' \
+ || (CHAR) == 'e' || (CHAR) == 'T' || (CHAR) == 'u' \
+ || (CHAR) == 'I' || (CHAR) == 'm' \
+ || (CHAR) == 'L' || (CHAR) == 'A')
+#endif
+
+/* This defines which multi-letter switches take arguments. */
+
+#define DEFAULT_WORD_SWITCH_TAKES_ARG(STR) \
+ (!strcmp (STR, "Tdata") || !strcmp (STR, "Ttext") \
+ || !strcmp (STR, "Tbss") || !strcmp (STR, "include") \
+ || !strcmp (STR, "imacros") || !strcmp (STR, "aux-info") \
+ || !strcmp (STR, "idirafter") || !strcmp (STR, "iprefix") \
+ || !strcmp (STR, "iwithprefix") || !strcmp (STR, "iwithprefixbefore") \
+ || !strcmp (STR, "isystem"))
+
+#ifndef WORD_SWITCH_TAKES_ARG
+#define WORD_SWITCH_TAKES_ARG(STR) DEFAULT_WORD_SWITCH_TAKES_ARG (STR)
+#endif
+
+/* Record the mapping from file suffixes for compilation specs. */
+
+struct compiler
+{
+ char *suffix; /* Use this compiler for input files
+ whose names end in this suffix. */
+
+ char *spec[4]; /* To use this compiler, concatenate these
+ specs and pass to do_spec. */
+};
+
+/* Pointer to a vector of `struct compiler' that gives the spec for
+ compiling a file, based on its suffix.
+ A file that does not end in any of these suffixes will be passed
+ unchanged to the loader and nothing else will be done to it.
+
+ An entry containing two 0s is used to terminate the vector.
+
+ If multiple entries match a file, the last matching one is used. */
+
+static struct compiler *compilers;
+
+/* Number of entries in `compilers', not counting the null terminator. */
+
+static int n_compilers;
+
+/* The default list of file name suffixes and their compilation specs. */
+
+static struct compiler default_compilers[] =
+{
+ {".c", "@c"},
+ {"@c",
+ "cpp -lang-c %{nostdinc*} %{C} %{v} %{A*} %{I*} %{P} %I\
+ %{C:%{!E:%eGNU C does not support -C without using -E}}\
+ %{M} %{MM} %{MD:-MD %b.d} %{MMD:-MMD %b.d} %{MG}\
+ -undef -D__GNUC__=%v1 -D__GNUC_MINOR__=%v2\
+ %{ansi:-trigraphs -$ -D__STRICT_ANSI__}\
+ %{!undef:%{!ansi:%p} %P} %{trigraphs} \
+ %c %{O*:%{!O0:-D__OPTIMIZE__}} %{traditional} %{ftraditional:-traditional}\
+ %{traditional-cpp:-traditional}\
+ %{g*} %{W*} %{w} %{pedantic*} %{H} %{d*} %C %{D*} %{U*} %{i*}\
+ %i %{!M:%{!MM:%{!E:%{!pipe:%g.i}}}}%{E:%W{o*}}%{M:%W{o*}}%{MM:%W{o*}} |\n",
+ "%{!M:%{!MM:%{!E:cc1 %{!pipe:%g.i} %1 \
+ %{!Q:-quiet} -dumpbase %b.c %{d*} %{m*} %{a}\
+ %{g*} %{O*} %{W*} %{w} %{pedantic*} %{ansi} \
+ %{traditional} %{v:-version} %{pg:-p} %{p} %{f*}\
+ %{aux-info*}\
+ %{pg:%{fomit-frame-pointer:%e-pg and -fomit-frame-pointer are incompatible}}\
+ %{S:%W{o*}%{!o*:-o %b.s}}%{!S:-o %{|!pipe:%g.s}} |\n\
+ %{!S:as %{R} %{j} %{J} %{h} %{d2} %a %Y\
+ %{c:%W{o*}%{!o*:-o %w%b.o}}%{!c:-o %d%w%u.o}\
+ %{!pipe:%g.s} %A\n }}}}"},
+ {"-",
+ "%{E:cpp -lang-c %{nostdinc*} %{C} %{v} %{A*} %{I*} %{P} %I\
+ %{C:%{!E:%eGNU C does not support -C without using -E}}\
+ %{M} %{MM} %{MD:-MD %b.d} %{MMD:-MMD %b.d} %{MG}\
+ -undef -D__GNUC__=%v1 -D__GNUC_MINOR__=%v2\
+ %{ansi:-trigraphs -$ -D__STRICT_ANSI__}\
+ %{!undef:%{!ansi:%p} %P} %{trigraphs}\
+ %c %{O*:%{!O0:-D__OPTIMIZE__}} %{traditional} %{ftraditional:-traditional}\
+ %{traditional-cpp:-traditional}\
+ %{g*} %{W*} %{w} %{pedantic*} %{H} %{d*} %C %{D*} %{U*} %{i*}\
+ %i %W{o*}}\
+ %{!E:%e-E required when input is from standard input}"},
+ {".m", "@objective-c"},
+ {"@objective-c",
+ "cpp -lang-objc %{nostdinc*} %{C} %{v} %{A*} %{I*} %{P} %I\
+ %{C:%{!E:%eGNU C does not support -C without using -E}}\
+ %{M} %{MM} %{MD:-MD %b.d} %{MMD:-MMD %b.d} %{MG}\
+ -undef -D__OBJC__ -D__GNUC__=%v1 -D__GNUC_MINOR__=%v2\
+ %{ansi:-trigraphs -$ -D__STRICT_ANSI__}\
+ %{!undef:%{!ansi:%p} %P} %{trigraphs}\
+ %c %{O*:%{!O0:-D__OPTIMIZE__}} %{traditional} %{ftraditional:-traditional}\
+ %{traditional-cpp:-traditional}\
+ %{g*} %{W*} %{w} %{pedantic*} %{H} %{d*} %C %{D*} %{U*} %{i*}\
+ %i %{!M:%{!MM:%{!E:%{!pipe:%g.i}}}}%{E:%W{o*}}%{M:%W{o*}}%{MM:%W{o*}} |\n",
+ "%{!M:%{!MM:%{!E:cc1obj %{!pipe:%g.i} %1 \
+ %{!Q:-quiet} -dumpbase %b.m %{d*} %{m*} %{a}\
+ %{g*} %{O*} %{W*} %{w} %{pedantic*} %{ansi} \
+ %{traditional} %{v:-version} %{pg:-p} %{p} %{f*} \
+ -lang-objc %{gen-decls} \
+ %{aux-info*}\
+ %{pg:%{fomit-frame-pointer:%e-pg and -fomit-frame-pointer are incompatible}}\
+ %{S:%W{o*}%{!o*:-o %b.s}}%{!S:-o %{|!pipe:%g.s}} |\n\
+ %{!S:as %{R} %{j} %{J} %{h} %{d2} %a %Y\
+ %{c:%W{o*}%{!o*:-o %w%b.o}}%{!c:-o %d%w%u.o}\
+ %{!pipe:%g.s} %A\n }}}}"},
+ {".h", "@c-header"},
+ {"@c-header",
+ "%{!E:%eCompilation of header file requested} \
+ cpp %{nostdinc*} %{C} %{v} %{A*} %{I*} %{P} %I\
+ %{C:%{!E:%eGNU C does not support -C without using -E}}\
+ %{M} %{MM} %{MD:-MD %b.d} %{MMD:-MMD %b.d} %{MG}\
+ -undef -D__GNUC__=%v1 -D__GNUC_MINOR__=%v2\
+ %{ansi:-trigraphs -$ -D__STRICT_ANSI__}\
+ %{!undef:%{!ansi:%p} %P} %{trigraphs}\
+ %c %{O*:%{!O0:-D__OPTIMIZE__}} %{traditional} %{ftraditional:-traditional}\
+ %{traditional-cpp:-traditional}\
+ %{g*} %{W*} %{w} %{pedantic*} %{H} %{d*} %C %{D*} %{U*} %{i*}\
+ %i %W{o*}"},
+ {".cc", "@c++"},
+ {".cxx", "@c++"},
+ {".cpp", "@c++"},
+ {".C", "@c++"},
+ {"@c++",
+ "cpp -lang-c++ %{nostdinc*} %{C} %{v} %{A*} %{I*} %{P} %I\
+ %{C:%{!E:%eGNU C++ does not support -C without using -E}}\
+ %{M} %{MM} %{MD:-MD %b.d} %{MMD:-MMD %b.d} %{MG}\
+ -undef -D__GNUC__=%v1 -D__GNUG__=%v1 -D__cplusplus -D__GNUC_MINOR__=%v2\
+ %{ansi:-trigraphs -$ -D__STRICT_ANSI__} %{!undef:%{!ansi:%p} %P}\
+ %c %{O*:%{!O0:-D__OPTIMIZE__}} %{traditional} %{ftraditional:-traditional}\
+ %{traditional-cpp:-traditional} %{trigraphs}\
+ %{g*} %{W*} %{w} %{pedantic*} %{H} %{d*} %C %{D*} %{U*} %{i*}\
+ %i %{!M:%{!MM:%{!E:%{!pipe:%g.ii}}}}%{E:%W{o*}}%{M:%W{o*}}%{MM:%W{o*}} |\n",
+ "%{!M:%{!MM:%{!E:cc1plus %{!pipe:%g.ii} %1 %2\
+ %{!Q:-quiet} -dumpbase %b.cc %{d*} %{m*} %{a}\
+ %{g*} %{O*} %{W*} %{w} %{pedantic*} %{ansi}\
+ %{traditional} %{v:-version} %{pg:-p} %{p}\
+ %{f*} %{+e*} %{aux-info*}\
+ %{pg:%{fomit-frame-pointer:%e-pg and -fomit-frame-pointer are incompatible}}\
+ %{S:%W{o*}%{!o*:-o %b.s}}%{!S:-o %{|!pipe:%g.s}}|\n\
+ %{!S:as %{R} %{j} %{J} %{h} %{d2} %a %Y\
+ %{c:%W{o*}%{!o*:-o %w%b.o}}%{!c:-o %d%w%u.o}\
+ %{!pipe:%g.s} %A\n }}}}"},
+ {".i", "@cpp-output"},
+ {"@cpp-output",
+ "%{!M:%{!MM:%{!E:cc1 %i %1 %{!Q:-quiet} %{d*} %{m*} %{a}\
+ %{g*} %{O*} %{W*} %{w} %{pedantic*} %{ansi}\
+ %{traditional} %{v:-version} %{pg:-p} %{p} %{f*}\
+ %{aux-info*}\
+ %{pg:%{fomit-frame-pointer:%e-pg and -fomit-frame-pointer are incompatible}}\
+ %{S:%W{o*}%{!o*:-o %b.s}}%{!S:-o %{|!pipe:%g.s}} |\n\
+ %{!S:as %{R} %{j} %{J} %{h} %{d2} %a %Y\
+ %{c:%W{o*}%{!o*:-o %w%b.o}}%{!c:-o %d%w%u.o}\
+ %{!pipe:%g.s} %A\n }}}}"},
+ {".ii", "@c++-cpp-output"},
+ {"@c++-cpp-output",
+ "%{!M:%{!MM:%{!E:cc1plus %i %1 %2 %{!Q:-quiet} %{d*} %{m*} %{a}\
+ %{g*} %{O*} %{W*} %{w} %{pedantic*} %{ansi}\
+ %{traditional} %{v:-version} %{pg:-p} %{p}\
+ %{f*} %{+e*} %{aux-info*}\
+ %{pg:%{fomit-frame-pointer:%e-pg and -fomit-frame-pointer are incompatible}}\
+ %{S:%W{o*}%{!o*:-o %b.s}}%{!S:-o %{|!pipe:%g.s}} |\n\
+ %{!S:as %{R} %{j} %{J} %{h} %{d2} %a %Y\
+ %{c:%W{o*}%{!o*:-o %w%b.o}}%{!c:-o %d%w%u.o}\
+ %{!pipe:%g.s} %A\n }}}}"},
+ {".s", "@assembler"},
+ {"@assembler",
+ "%{!M:%{!MM:%{!E:%{!S:as %{R} %{j} %{J} %{h} %{d2} %a %Y\
+ %{c:%W{o*}%{!o*:-o %w%b.o}}%{!c:-o %d%w%u.o}\
+ %i %A\n }}}}"},
+ {".S", "@assembler-with-cpp"},
+ {"@assembler-with-cpp",
+ "cpp -lang-asm %{nostdinc*} %{C} %{v} %{A*} %{I*} %{P} %I\
+ %{C:%{!E:%eGNU C does not support -C without using -E}}\
+ %{M} %{MM} %{MD:-MD %b.d} %{MMD:-MMD %b.d} %{MG} %{trigraphs}\
+ -undef -$ %{!undef:%p %P} -D__ASSEMBLER__ \
+ %c %{O*:%{!O0:-D__OPTIMIZE__}} %{traditional} %{ftraditional:-traditional}\
+ %{traditional-cpp:-traditional}\
+ %{g*} %{W*} %{w} %{pedantic*} %{H} %{d*} %C %{D*} %{U*} %{i*}\
+ %i %{!M:%{!MM:%{!E:%{!pipe:%g.s}}}}%{E:%W{o*}}%{M:%W{o*}}%{MM:%W{o*}} |\n",
+ "%{!M:%{!MM:%{!E:%{!S:as %{R} %{j} %{J} %{h} %{d2} %a %Y\
+ %{c:%W{o*}%{!o*:-o %w%b.o}}%{!c:-o %d%w%u.o}\
+ %{!pipe:%g.s} %A\n }}}}"},
+ {".ads", "@ada"},
+ {".adb", "@ada"},
+ {".ada", "@ada"},
+ {"@ada",
+ "%{!M:%{!MM:%{!E:gnat1 %{k8:-gnatk8} %{w:-gnatws} %{!Q:-quiet}\
+ -dumpbase %b.ada %{g*} %{O*} %{p} %{pg:-p} %{f*}\
+ %{d*}\
+ %{pg:%{fomit-frame-pointer:%e-pg and -fomit-frame-pointer are incompatible}}\
+ %i %{S:%W{o*}%{!o*:-o %b.s}}%{!S:-o %{|!pipe:%g.s}} |\n\
+ %{!S:%{!gnatc:%{!gnats:as %{R} %{j} %{J} %{h} %{d2} %a %Y\
+ %{c:%W{o*}%{!o*:-o %w%b.o}}\
+ %{!c:-o %d%w%u.o} %{!pipe:%g.s} %A\n}}}}}} "},
+ /* Mark end of table */
+ {0, 0}
+};
+
+/* Number of elements in default_compilers, not counting the terminator. */
+
+static int n_default_compilers
+ = (sizeof default_compilers / sizeof (struct compiler)) - 1;
+
+/* Here is the spec for running the linker, after compiling all files. */
+
+/* -u* was put back because both BSD and SysV seem to support it. */
+/* %{static:} simply prevents an error message if the target machine
+ doesn't handle -static. */
+/* We want %{T*} after %{L*} and %D so that it can be used to specify linker
+ scripts which exist in user specified directories, or in standard
+ directories. */
+#ifdef LINK_LIBGCC_SPECIAL_1
+/* Have gcc do the search for libgcc.a, but generate -L options as usual. */
+static char *link_command_spec = "\
+%{!fsyntax-only: \
+ %{!c:%{!M:%{!MM:%{!E:%{!S:ld %l %X %{o*} %{A} %{d} %{e*} %{m} %{N} %{n} \
+ %{r} %{s} %{t} %{u*} %{x} %{z} %{Z}\
+ %{!A:%{!nostartfiles:%{!nostdlib:%S}}} %{static:}\
+ %{L*} %D %{T*} %o %{!nostdlib:libgcc.a%s %L libgcc.a%s %{!A:%E}}\n }}}}}}";
+#else
+#ifdef LINK_LIBGCC_SPECIAL
+/* Have gcc do the search for libgcc.a, and don't generate -L options. */
+static char *link_command_spec = "\
+%{!fsyntax-only: \
+ %{!c:%{!M:%{!MM:%{!E:%{!S:ld %l %X %{o*} %{A} %{d} %{e*} %{m} %{N} %{n} \
+ %{r} %{s} %{t} %{u*} %{x} %{z} %{Z}\
+ %{!A:%{!nostartfiles:%{!nostdlib:%S}}} %{static:}\
+ %{L*} %{T*} %o %{!nostdlib:libgcc.a%s %L libgcc.a%s %{!A:%E}}\n }}}}}}";
+#else
+/* Use -L and have the linker do the search for -lgcc. */
+static char *link_command_spec = "\
+%{!fsyntax-only: \
+ %{!c:%{!M:%{!MM:%{!E:%{!S:ld %l %X %{o*} %{A} %{d} %{e*} %{m} %{N} %{n} \
+ %{r} %{s} %{t} %{u*} %{x} %{z} %{Z}\
+ %{!A:%{!nostartfiles:%{!nostdlib:%S}}} %{static:}\
+ %{L*} %D %{T*} %o %{!nostdlib:-lgcc %L -lgcc %{!A:%E}}\n }}}}}}";
+#endif
+#endif
+
+/* A vector of options to give to the linker.
+ These options are accumulated by -Xlinker and -Wl,
+ and substituted into the linker command with %X. */
+static int n_linker_options;
+static char **linker_options;
+
+/* A vector of options to give to the assembler.
+ These options are accumulated by -Wa,
+ and substituted into the assembler command with %X. */
+static int n_assembler_options;
+static char **assembler_options;
+
+/* Define how to map long options into short ones. */
+
+/* This structure describes one mapping. */
+struct option_map
+{
+ /* The long option's name. */
+ char *name;
+ /* The equivalent short option. */
+ char *equivalent;
+ /* Argument info. A string of flag chars; NULL equals no options.
+ a => argument required.
+ o => argument optional.
+ j => join argument to equivalent, making one word.
+ * => allow other text after NAME as an argument. */
+ char *arg_info;
+};
+
+/* This is the table of mappings. Mappings are tried sequentially
+ for each option encountered; the first one that matches, wins. */
+
+struct option_map option_map[] =
+ {
+ {"--profile-blocks", "-a", 0},
+ {"--target", "-b", "a"},
+ {"--compile", "-c", 0},
+ {"--dump", "-d", "a"},
+ {"--entry", "-e", 0},
+ {"--debug", "-g", "oj"},
+ {"--include", "-include", "a"},
+ {"--imacros", "-imacros", "a"},
+ {"--include-prefix", "-iprefix", "a"},
+ {"--include-directory-after", "-idirafter", "a"},
+ {"--include-with-prefix", "-iwithprefix", "a"},
+ {"--include-with-prefix-before", "-iwithprefixbefore", "a"},
+ {"--include-with-prefix-after", "-iwithprefix", "a"},
+ {"--machine-", "-m", "*j"},
+ {"--machine", "-m", "aj"},
+ {"--no-standard-includes", "-nostdinc", 0},
+ {"--no-standard-libraries", "-nostdlib", 0},
+ {"--no-precompiled-includes", "-noprecomp", 0},
+ {"--output", "-o", "a"},
+ {"--profile", "-p", 0},
+ {"--quiet", "-q", 0},
+ {"--silent", "-q", 0},
+ {"--force-link", "-u", "a"},
+ {"--verbose", "-v", 0},
+ {"--version", "-dumpversion", 0},
+ {"--no-warnings", "-w", 0},
+ {"--language", "-x", "a"},
+
+ {"--assert", "-A", "a"},
+ {"--prefix", "-B", "a"},
+ {"--comments", "-C", 0},
+ {"--define-macro", "-D", "a"},
+ {"--preprocess", "-E", 0},
+ {"--trace-includes", "-H", 0},
+ {"--include-directory", "-I", "a"},
+ {"--include-barrier", "-I-", 0},
+ {"--library-directory", "-L", "a"},
+ {"--dependencies", "-M", 0},
+ {"--user-dependencies", "-MM", 0},
+ {"--write-dependencies", "-MD", 0},
+ {"--write-user-dependencies", "-MMD", 0},
+ {"--print-missing-file-dependencies", "-MG", 0},
+ {"--optimize", "-O", "oj"},
+ {"--no-line-commands", "-P", 0},
+ {"--assemble", "-S", 0},
+ {"--undefine-macro", "-U", "a"},
+ {"--use-version", "-V", "a"},
+ {"--for-assembler", "-Wa", "a"},
+ {"--extra-warnings", "-W", 0},
+ {"--all-warnings", "-Wall", 0},
+ {"--warn-", "-W", "*j"},
+ {"--for-linker", "-Xlinker", "a"},
+
+ {"--ansi", "-ansi", 0},
+ {"--traditional", "-traditional", 0},
+ {"--traditional-cpp", "-traditional-cpp", 0},
+ {"--trigraphs", "-trigraphs", 0},
+ {"--pipe", "-pipe", 0},
+ {"--dumpbase", "-dumpbase", "a"},
+ {"--pedantic", "-pedantic", 0},
+ {"--pedantic-errors", "-pedantic-errors", 0},
+ {"--save-temps", "-save-temps", 0},
+ {"--print-libgcc-file-name", "-print-libgcc-file-name", 0},
+ {"--print-file-name", "-print-file-name=", "aj"},
+ {"--print-prog-name", "-print-prog-name=", "aj"},
+ {"--print-multi-lib", "-print-multi-lib", 0},
+ {"--print-multi-directory", "-print-multi-directory", 0},
+ {"--static", "-static", 0},
+ {"--shared", "-shared", 0},
+ {"--symbolic", "-symbolic", 0},
+ {"--", "-f", "*j"}
+ };
+
+/* Translate the options described by *ARGCP and *ARGVP.
+ Make a new vector and store it back in *ARGVP,
+ and store its length in *ARGVC. */
+
+static void
+translate_options (argcp, argvp)
+ int *argcp;
+ char ***argvp;
+{
+ int i, j;
+ int argc = *argcp;
+ char **argv = *argvp;
+ char **newv = (char **) xmalloc ((argc + 2) * 2 * sizeof (char *));
+ int newindex = 0;
+
+ i = 0;
+ newv[newindex++] = argv[i++];
+
+ while (i < argc)
+ {
+ /* Translate -- options. */
+ if (argv[i][0] == '-' && argv[i][1] == '-')
+ {
+ /* Find a mapping that applies to this option. */
+ for (j = 0; j < sizeof (option_map) / sizeof (option_map[0]); j++)
+ {
+ int optlen = strlen (option_map[j].name);
+ int complen = strlen (argv[i]);
+ char *arginfo = option_map[j].arg_info;
+
+ if (arginfo == 0)
+ arginfo = "";
+ if (complen > optlen)
+ complen = optlen;
+ if (!strncmp (argv[i], option_map[j].name, complen))
+ {
+ int extra = strlen (argv[i]) > optlen;
+ char *arg = 0;
+
+ if (extra)
+ {
+ /* If the option has an argument, accept that. */
+ if (argv[i][optlen] == '=')
+ arg = argv[i] + optlen + 1;
+ /* If this mapping allows extra text at end of name,
+ accept that as "argument". */
+ else if (index (arginfo, '*') != 0)
+ arg = argv[i] + optlen;
+ /* Otherwise, extra text at end means mismatch.
+ Try other mappings. */
+ else
+ continue;
+ }
+ else if (index (arginfo, '*') != 0)
+ error ("Incomplete `%s' option", option_map[j].name);
+
+ /* Handle arguments. */
+ if (index (arginfo, 'o') != 0)
+ {
+ if (arg == 0)
+ {
+ if (i + 1 == argc)
+ error ("Missing argument to `%s' option",
+ option_map[j].name);
+ arg = argv[++i];
+ }
+ }
+ else if (index (arginfo, '*') != 0)
+ ;
+ else if (index (arginfo, 'a') == 0)
+ {
+ if (arg != 0)
+ error ("Extraneous argument to `%s' option",
+ option_map[j].name);
+ arg = 0;
+ }
+
+ /* Store the translation as one argv elt or as two. */
+ if (arg != 0 && index (arginfo, 'j') != 0)
+ newv[newindex++] = concat (option_map[j].equivalent,
+ arg, "");
+ else if (arg != 0)
+ {
+ newv[newindex++] = option_map[j].equivalent;
+ newv[newindex++] = arg;
+ }
+ else
+ newv[newindex++] = option_map[j].equivalent;
+
+ break;
+ }
+ }
+ i++;
+ }
+ /* Handle old-fashioned options--just copy them through,
+ with their arguments. */
+ else if (argv[i][0] == '-')
+ {
+ char *p = argv[i] + 1;
+ int c = *p;
+ int nskip = 1;
+
+ if (SWITCH_TAKES_ARG (c) > (p[1] != 0))
+ nskip += SWITCH_TAKES_ARG (c) - (p[1] != 0);
+ else if (WORD_SWITCH_TAKES_ARG (p))
+ nskip += WORD_SWITCH_TAKES_ARG (p);
+ else if ((c == 'B' || c == 'b' || c == 'V' || c == 'x')
+ && p[1] == 0)
+ nskip += 1;
+ else if (! strcmp (p, "Xlinker"))
+ nskip += 1;
+
+ /* Watch out for an option at the end of the command line that
+ is missing arguments, and avoid skipping past the end of the
+ command line. */
+ if (nskip + i > argc)
+ nskip = argc - i;
+
+ while (nskip > 0)
+ {
+ newv[newindex++] = argv[i++];
+ nskip--;
+ }
+ }
+ else
+ /* Ordinary operands, or +e options. */
+ newv[newindex++] = argv[i++];
+ }
+
+ newv[newindex] = 0;
+
+ *argvp = newv;
+ *argcp = newindex;
+}
+
+/* Read compilation specs from a file named FILENAME,
+ replacing the default ones.
+
+ A suffix which starts with `*' is a definition for
+ one of the machine-specific sub-specs. The "suffix" should be
+ *asm, *cc1, *cpp, *link, *startfile, *signed_char, etc.
+ The corresponding spec is stored in asm_spec, etc.,
+ rather than in the `compilers' vector.
+
+ Anything invalid in the file is a fatal error. */
+
+static void
+read_specs (filename)
+ char *filename;
+{
+ int desc;
+ struct stat statbuf;
+ char *buffer;
+ register char *p;
+
+ if (verbose_flag)
+ fprintf (stderr, "Reading specs from %s\n", filename);
+
+ /* Open and stat the file. */
+ desc = open (filename, 0, 0);
+ if (desc < 0)
+ pfatal_with_name (filename);
+ if (stat (filename, &statbuf) < 0)
+ pfatal_with_name (filename);
+
+ /* Read contents of file into BUFFER. */
+ buffer = xmalloc ((unsigned) statbuf.st_size + 1);
+ read (desc, buffer, (unsigned) statbuf.st_size);
+ buffer[statbuf.st_size] = 0;
+ close (desc);
+
+ /* Scan BUFFER for specs, putting them in the vector. */
+ p = buffer;
+ while (1)
+ {
+ char *suffix;
+ char *spec;
+ char *in, *out, *p1, *p2;
+
+ /* Advance P in BUFFER to the next nonblank nocomment line. */
+ p = skip_whitespace (p);
+ if (*p == 0)
+ break;
+
+ /* Find the colon that should end the suffix. */
+ p1 = p;
+ while (*p1 && *p1 != ':' && *p1 != '\n') p1++;
+ /* The colon shouldn't be missing. */
+ if (*p1 != ':')
+ fatal ("specs file malformed after %d characters", p1 - buffer);
+ /* Skip back over trailing whitespace. */
+ p2 = p1;
+ while (p2 > buffer && (p2[-1] == ' ' || p2[-1] == '\t')) p2--;
+ /* Copy the suffix to a string. */
+ suffix = save_string (p, p2 - p);
+ /* Find the next line. */
+ p = skip_whitespace (p1 + 1);
+ if (p[1] == 0)
+ fatal ("specs file malformed after %d characters", p - buffer);
+ p1 = p;
+ /* Find next blank line. */
+ while (*p1 && !(*p1 == '\n' && p1[1] == '\n')) p1++;
+ /* Specs end at the blank line and do not include the newline. */
+ spec = save_string (p, p1 - p);
+ p = p1;
+
+ /* Delete backslash-newline sequences from the spec. */
+ in = spec;
+ out = spec;
+ while (*in != 0)
+ {
+ if (in[0] == '\\' && in[1] == '\n')
+ in += 2;
+ else if (in[0] == '#')
+ {
+ while (*in && *in != '\n') in++;
+ }
+ else
+ *out++ = *in++;
+ }
+ *out = 0;
+
+ if (suffix[0] == '*')
+ {
+ if (! strcmp (suffix, "*link_command"))
+ link_command_spec = spec;
+ else
+ set_spec (suffix + 1, spec);
+ }
+ else
+ {
+ /* Add this pair to the vector. */
+ compilers
+ = ((struct compiler *)
+ xrealloc (compilers, (n_compilers + 2) * sizeof (struct compiler)));
+ compilers[n_compilers].suffix = suffix;
+ bzero ((char *) compilers[n_compilers].spec,
+ sizeof compilers[n_compilers].spec);
+ compilers[n_compilers].spec[0] = spec;
+ n_compilers++;
+ bzero ((char *) &compilers[n_compilers],
+ sizeof compilers[n_compilers]);
+ }
+
+ if (*suffix == 0)
+ link_command_spec = spec;
+ }
+
+ if (link_command_spec == 0)
+ fatal ("spec file has no spec for linking");
+}
+
+static char *
+skip_whitespace (p)
+ char *p;
+{
+ while (1)
+ {
+ /* A fully-blank line is a delimiter in the SPEC file and shouldn't
+ be considered whitespace. */
+ if (p[0] == '\n' && p[1] == '\n' && p[2] == '\n')
+ return p + 1;
+ else if (*p == '\n' || *p == ' ' || *p == '\t')
+ p++;
+ else if (*p == '#')
+ {
+ while (*p != '\n') p++;
+ p++;
+ }
+ else
+ break;
+ }
+
+ return p;
+}
+
+/* Structure to keep track of the specs that have been defined so far. These
+ are accessed using %(specname) or %[specname] in a compiler or link spec. */
+
+struct spec_list
+{
+ char *name; /* Name of the spec. */
+ char *spec; /* The spec itself. */
+ struct spec_list *next; /* Next spec in linked list. */
+};
+
+/* List of specs that have been defined so far. */
+
+static struct spec_list *specs = (struct spec_list *) 0;
+
+/* Change the value of spec NAME to SPEC. If SPEC is empty, then the spec is
+ removed; If the spec starts with a + then SPEC is added to the end of the
+ current spec. */
+
+static void
+set_spec (name, spec)
+ char *name;
+ char *spec;
+{
+ struct spec_list *sl;
+ char *old_spec;
+
+ /* See if the spec already exists */
+ for (sl = specs; sl; sl = sl->next)
+ if (strcmp (sl->name, name) == 0)
+ break;
+
+ if (!sl)
+ {
+ /* Not found - make it */
+ sl = (struct spec_list *) xmalloc (sizeof (struct spec_list));
+ sl->name = save_string (name, strlen (name));
+ sl->spec = save_string ("", 0);
+ sl->next = specs;
+ specs = sl;
+ }
+
+ old_spec = sl->spec;
+ if (name && spec[0] == '+' && isspace (spec[1]))
+ sl->spec = concat (old_spec, spec + 1, "");
+ else
+ sl->spec = save_string (spec, strlen (spec));
+
+ if (! strcmp (name, "asm"))
+ asm_spec = sl->spec;
+ else if (! strcmp (name, "asm_final"))
+ asm_final_spec = sl->spec;
+ else if (! strcmp (name, "cc1"))
+ cc1_spec = sl->spec;
+ else if (! strcmp (name, "cc1plus"))
+ cc1plus_spec = sl->spec;
+ else if (! strcmp (name, "cpp"))
+ cpp_spec = sl->spec;
+ else if (! strcmp (name, "endfile"))
+ endfile_spec = sl->spec;
+ else if (! strcmp (name, "lib"))
+ lib_spec = sl->spec;
+ else if (! strcmp (name, "link"))
+ link_spec = sl->spec;
+ else if (! strcmp (name, "predefines"))
+ cpp_predefines = sl->spec;
+ else if (! strcmp (name, "signed_char"))
+ signed_char_spec = sl->spec;
+ else if (! strcmp (name, "startfile"))
+ startfile_spec = sl->spec;
+ else if (! strcmp (name, "switches_need_spaces"))
+ switches_need_spaces = sl->spec;
+ else if (! strcmp (name, "cross_compile"))
+ cross_compile = atoi (sl->spec);
+ else if (! strcmp (name, "multilib"))
+ multilib_select = sl->spec;
+ /* Free the old spec */
+ if (old_spec)
+ free (old_spec);
+}
+
+/* Accumulate a command (program name and args), and run it. */
+
+/* Vector of pointers to arguments in the current line of specifications. */
+
+static char **argbuf;
+
+/* Number of elements allocated in argbuf. */
+
+static int argbuf_length;
+
+/* Number of elements in argbuf currently in use (containing args). */
+
+static int argbuf_index;
+
+/* This is the list of suffixes and codes (%g/%u/%U) and the associated
+ temp file. Used only if MKTEMP_EACH_FILE. */
+
+static struct temp_name {
+ char *suffix; /* suffix associated with the code. */
+ int length; /* strlen (suffix). */
+ int unique; /* Indicates whether %g or %u/%U was used. */
+ char *filename; /* associated filename. */
+ int filename_length; /* strlen (filename). */
+ struct temp_name *next;
+} *temp_names;
+
+/* Number of commands executed so far. */
+
+static int execution_count;
+
+/* Number of commands that exited with a signal. */
+
+static int signal_count;
+
+/* Name with which this program was invoked. */
+
+static char *programname;
+
+/* Structures to keep track of prefixes to try when looking for files. */
+
+struct prefix_list
+{
+ char *prefix; /* String to prepend to the path. */
+ struct prefix_list *next; /* Next in linked list. */
+ int require_machine_suffix; /* Don't use without machine_suffix. */
+ /* 2 means try both machine_suffix and just_machine_suffix. */
+ int *used_flag_ptr; /* 1 if a file was found with this prefix. */
+};
+
+struct path_prefix
+{
+ struct prefix_list *plist; /* List of prefixes to try */
+ int max_len; /* Max length of a prefix in PLIST */
+ char *name; /* Name of this list (used in config stuff) */
+};
+
+/* List of prefixes to try when looking for executables. */
+
+static struct path_prefix exec_prefix = { 0, 0, "exec" };
+
+/* List of prefixes to try when looking for startup (crt0) files. */
+
+static struct path_prefix startfile_prefix = { 0, 0, "startfile" };
+
+/* List of prefixes to try when looking for include files. */
+
+static struct path_prefix include_prefix = { 0, 0, "include" };
+
+/* Suffix to attach to directories searched for commands.
+ This looks like `MACHINE/VERSION/'. */
+
+static char *machine_suffix = 0;
+
+/* Suffix to attach to directories searched for commands.
+ This is just `MACHINE/'. */
+
+static char *just_machine_suffix = 0;
+
+/* Adjusted value of GCC_EXEC_PREFIX envvar. */
+
+static char *gcc_exec_prefix;
+
+/* Default prefixes to attach to command names. */
+
+#ifdef CROSS_COMPILE /* Don't use these prefixes for a cross compiler. */
+#undef MD_EXEC_PREFIX
+#undef MD_STARTFILE_PREFIX
+#undef MD_STARTFILE_PREFIX_1
+#endif
+
+#ifndef STANDARD_EXEC_PREFIX
+#define STANDARD_EXEC_PREFIX "/usr/local/lib/gcc-lib/"
+#endif /* !defined STANDARD_EXEC_PREFIX */
+
+static char *standard_exec_prefix = STANDARD_EXEC_PREFIX;
+static char *standard_exec_prefix_1 = "/usr/lib/gcc/";
+#ifdef MD_EXEC_PREFIX
+static char *md_exec_prefix = MD_EXEC_PREFIX;
+#endif
+
+#ifndef STANDARD_STARTFILE_PREFIX
+#define STANDARD_STARTFILE_PREFIX "/usr/local/lib/"
+#endif /* !defined STANDARD_STARTFILE_PREFIX */
+
+#ifdef MD_STARTFILE_PREFIX
+static char *md_startfile_prefix = MD_STARTFILE_PREFIX;
+#endif
+#ifdef MD_STARTFILE_PREFIX_1
+static char *md_startfile_prefix_1 = MD_STARTFILE_PREFIX_1;
+#endif
+static char *standard_startfile_prefix = STANDARD_STARTFILE_PREFIX;
+static char *standard_startfile_prefix_1 = "/lib/";
+static char *standard_startfile_prefix_2 = "/usr/lib/";
+
+#ifndef TOOLDIR_BASE_PREFIX
+#define TOOLDIR_BASE_PREFIX "/usr/local/"
+#endif
+static char *tooldir_base_prefix = TOOLDIR_BASE_PREFIX;
+static char *tooldir_prefix;
+
+/* Subdirectory to use for locating libraries. Set by
+ set_multilib_dir based on the compilation options. */
+
+static char *multilib_dir;
+
+/* Clear out the vector of arguments (after a command is executed). */
+
+static void
+clear_args ()
+{
+ argbuf_index = 0;
+}
+
+/* Add one argument to the vector at the end.
+ This is done when a space is seen or at the end of the line.
+ If DELETE_ALWAYS is nonzero, the arg is a filename
+ and the file should be deleted eventually.
+ If DELETE_FAILURE is nonzero, the arg is a filename
+ and the file should be deleted if this compilation fails. */
+
+static void
+store_arg (arg, delete_always, delete_failure)
+ char *arg;
+ int delete_always, delete_failure;
+{
+ if (argbuf_index + 1 == argbuf_length)
+ {
+ argbuf = (char **) xrealloc (argbuf, (argbuf_length *= 2) * sizeof (char *));
+ }
+
+ argbuf[argbuf_index++] = arg;
+ argbuf[argbuf_index] = 0;
+
+ if (delete_always || delete_failure)
+ record_temp_file (arg, delete_always, delete_failure);
+}
+
+/* Record the names of temporary files we tell compilers to write,
+ and delete them at the end of the run. */
+
+/* This is the common prefix we use to make temp file names.
+ It is chosen once for each run of this program.
+ It is substituted into a spec by %g.
+ Thus, all temp file names contain this prefix.
+ In practice, all temp file names start with this prefix.
+
+ This prefix comes from the envvar TMPDIR if it is defined;
+ otherwise, from the P_tmpdir macro if that is defined;
+ otherwise, in /usr/tmp or /tmp. */
+
+static char *temp_filename;
+
+/* Length of the prefix. */
+
+static int temp_filename_length;
+
+/* Define the list of temporary files to delete. */
+
+struct temp_file
+{
+ char *name;
+ struct temp_file *next;
+};
+
+/* Queue of files to delete on success or failure of compilation. */
+static struct temp_file *always_delete_queue;
+/* Queue of files to delete on failure of compilation. */
+static struct temp_file *failure_delete_queue;
+
+/* Record FILENAME as a file to be deleted automatically.
+ ALWAYS_DELETE nonzero means delete it if all compilation succeeds;
+ otherwise delete it in any case.
+ FAIL_DELETE nonzero means delete it if a compilation step fails;
+ otherwise delete it in any case. */
+
+static void
+record_temp_file (filename, always_delete, fail_delete)
+ char *filename;
+ int always_delete;
+ int fail_delete;
+{
+ register char *name;
+ name = xmalloc (strlen (filename) + 1);
+ strcpy (name, filename);
+
+ if (always_delete)
+ {
+ register struct temp_file *temp;
+ for (temp = always_delete_queue; temp; temp = temp->next)
+ if (! strcmp (name, temp->name))
+ goto already1;
+ temp = (struct temp_file *) xmalloc (sizeof (struct temp_file));
+ temp->next = always_delete_queue;
+ temp->name = name;
+ always_delete_queue = temp;
+ already1:;
+ }
+
+ if (fail_delete)
+ {
+ register struct temp_file *temp;
+ for (temp = failure_delete_queue; temp; temp = temp->next)
+ if (! strcmp (name, temp->name))
+ goto already2;
+ temp = (struct temp_file *) xmalloc (sizeof (struct temp_file));
+ temp->next = failure_delete_queue;
+ temp->name = name;
+ failure_delete_queue = temp;
+ already2:;
+ }
+}
+
+/* Delete all the temporary files whose names we previously recorded. */
+
+static void
+delete_if_ordinary (name)
+ char *name;
+{
+ struct stat st;
+#ifdef DEBUG
+ int i, c;
+
+ printf ("Delete %s? (y or n) ", name);
+ fflush (stdout);
+ i = getchar ();
+ if (i != '\n')
+ while ((c = getchar ()) != '\n' && c != EOF) ;
+ if (i == 'y' || i == 'Y')
+#endif /* DEBUG */
+ if (stat (name, &st) >= 0 && S_ISREG (st.st_mode))
+ if (unlink (name) < 0)
+ if (verbose_flag)
+ perror_with_name (name);
+}
+
+static void
+delete_temp_files ()
+{
+ register struct temp_file *temp;
+
+ for (temp = always_delete_queue; temp; temp = temp->next)
+ delete_if_ordinary (temp->name);
+ always_delete_queue = 0;
+}
+
+/* Delete all the files to be deleted on error. */
+
+static void
+delete_failure_queue ()
+{
+ register struct temp_file *temp;
+
+ for (temp = failure_delete_queue; temp; temp = temp->next)
+ delete_if_ordinary (temp->name);
+}
+
+static void
+clear_failure_queue ()
+{
+ failure_delete_queue = 0;
+}
+
+/* Compute a string to use as the base of all temporary file names.
+ It is substituted for %g. */
+
+static char *
+choose_temp_base_try (try, base)
+ char *try;
+ char *base;
+{
+ char *rv;
+ if (base)
+ rv = base;
+ else if (try == (char *)0)
+ rv = 0;
+ else if (access (try, R_OK | W_OK) != 0)
+ rv = 0;
+ else
+ rv = try;
+ return rv;
+}
+
+static void
+choose_temp_base ()
+{
+ char *base = 0;
+ int len;
+
+ base = choose_temp_base_try (getenv ("TMPDIR"), base);
+ base = choose_temp_base_try (getenv ("TMP"), base);
+ base = choose_temp_base_try (getenv ("TEMP"), base);
+
+#ifdef P_tmpdir
+ base = choose_temp_base_try (P_tmpdir, base);
+#endif
+
+ base = choose_temp_base_try ("/usr/tmp", base);
+ base = choose_temp_base_try ("/tmp", base);
+
+ /* If all else fails, use the current directory! */
+ if (base == (char *)0)
+ base = "./";
+
+ len = strlen (base);
+ temp_filename = xmalloc (len + sizeof("/ccXXXXXX") + 1);
+ strcpy (temp_filename, base);
+ if (len > 0 && temp_filename[len-1] != '/')
+ temp_filename[len++] = '/';
+ strcpy (temp_filename + len, "ccXXXXXX");
+
+ mktemp (temp_filename);
+ temp_filename_length = strlen (temp_filename);
+ if (temp_filename_length == 0)
+ abort ();
+}
+
+
+/* Routine to add variables to the environment. We do this to pass
+ the pathname of the gcc driver, and the directories search to the
+ collect2 program, which is being run as ld. This way, we can be
+ sure of executing the right compiler when collect2 wants to build
+ constructors and destructors. Since the environment variables we
+ use come from an obstack, we don't have to worry about allocating
+ space for them. */
+
+#ifndef HAVE_PUTENV
+
+void
+putenv (str)
+ char *str;
+{
+#ifndef VMS /* nor about VMS */
+
+ extern char **environ;
+ char **old_environ = environ;
+ char **envp;
+ int num_envs = 0;
+ int name_len = 1;
+ int str_len = strlen (str);
+ char *p = str;
+ int ch;
+
+ while ((ch = *p++) != '\0' && ch != '=')
+ name_len++;
+
+ if (!ch)
+ abort ();
+
+ /* Search for replacing an existing environment variable, and
+ count the number of total environment variables. */
+ for (envp = old_environ; *envp; envp++)
+ {
+ num_envs++;
+ if (!strncmp (str, *envp, name_len))
+ {
+ *envp = str;
+ return;
+ }
+ }
+
+ /* Add a new environment variable */
+ environ = (char **) xmalloc (sizeof (char *) * (num_envs+2));
+ *environ = str;
+ bcopy ((char *) old_environ, (char *) (environ + 1),
+ sizeof (char *) * (num_envs+1));
+
+#endif /* VMS */
+}
+
+#endif /* HAVE_PUTENV */
+
+
+/* Rebuild the COMPILER_PATH and LIBRARY_PATH environment variables for collect. */
+
+static void
+putenv_from_prefixes (paths, env_var)
+ struct path_prefix *paths;
+ char *env_var;
+{
+ int suffix_len = (machine_suffix) ? strlen (machine_suffix) : 0;
+ int just_suffix_len
+ = (just_machine_suffix) ? strlen (just_machine_suffix) : 0;
+ int first_time = TRUE;
+ struct prefix_list *pprefix;
+
+ obstack_grow (&collect_obstack, env_var, strlen (env_var));
+
+ for (pprefix = paths->plist; pprefix != 0; pprefix = pprefix->next)
+ {
+ int len = strlen (pprefix->prefix);
+
+ if (machine_suffix
+ && is_directory (pprefix->prefix, machine_suffix, 0))
+ {
+ if (!first_time)
+ obstack_1grow (&collect_obstack, PATH_SEPARATOR);
+
+ first_time = FALSE;
+ obstack_grow (&collect_obstack, pprefix->prefix, len);
+ obstack_grow (&collect_obstack, machine_suffix, suffix_len);
+ }
+
+ if (just_machine_suffix
+ && pprefix->require_machine_suffix == 2
+ && is_directory (pprefix->prefix, just_machine_suffix, 0))
+ {
+ if (!first_time)
+ obstack_1grow (&collect_obstack, PATH_SEPARATOR);
+
+ first_time = FALSE;
+ obstack_grow (&collect_obstack, pprefix->prefix, len);
+ obstack_grow (&collect_obstack, just_machine_suffix,
+ just_suffix_len);
+ }
+
+ if (!pprefix->require_machine_suffix)
+ {
+ if (!first_time)
+ obstack_1grow (&collect_obstack, PATH_SEPARATOR);
+
+ first_time = FALSE;
+ obstack_grow (&collect_obstack, pprefix->prefix, len);
+ }
+ }
+ obstack_1grow (&collect_obstack, '\0');
+ putenv (obstack_finish (&collect_obstack));
+}
+
+
+/* Search for NAME using the prefix list PREFIXES. MODE is passed to
+ access to check permissions.
+ Return 0 if not found, otherwise return its name, allocated with malloc. */
+
+static char *
+find_a_file (pprefix, name, mode)
+ struct path_prefix *pprefix;
+ char *name;
+ int mode;
+{
+ char *temp;
+ char *file_suffix = ((mode & X_OK) != 0 ? EXECUTABLE_SUFFIX : "");
+ struct prefix_list *pl;
+ int len = pprefix->max_len + strlen (name) + strlen (file_suffix) + 1;
+
+ if (machine_suffix)
+ len += strlen (machine_suffix);
+
+ temp = xmalloc (len);
+
+ /* Determine the filename to execute (special case for absolute paths). */
+
+ if (*name == '/')
+ {
+ if (access (name, mode))
+ {
+ strcpy (temp, name);
+ return temp;
+ }
+ }
+ else
+ for (pl = pprefix->plist; pl; pl = pl->next)
+ {
+ if (machine_suffix)
+ {
+ /* Some systems have a suffix for executable files.
+ So try appending that first. */
+ if (file_suffix[0] != 0)
+ {
+ strcpy (temp, pl->prefix);
+ strcat (temp, machine_suffix);
+ strcat (temp, name);
+ strcat (temp, file_suffix);
+ if (access (temp, mode) == 0)
+ {
+ if (pl->used_flag_ptr != 0)
+ *pl->used_flag_ptr = 1;
+ return temp;
+ }
+ }
+
+ /* Now try just the name. */
+ strcpy (temp, pl->prefix);
+ strcat (temp, machine_suffix);
+ strcat (temp, name);
+ if (access (temp, mode) == 0)
+ {
+ if (pl->used_flag_ptr != 0)
+ *pl->used_flag_ptr = 1;
+ return temp;
+ }
+ }
+
+ /* Certain prefixes are tried with just the machine type,
+ not the version. This is used for finding as, ld, etc. */
+ if (just_machine_suffix && pl->require_machine_suffix == 2)
+ {
+ /* Some systems have a suffix for executable files.
+ So try appending that first. */
+ if (file_suffix[0] != 0)
+ {
+ strcpy (temp, pl->prefix);
+ strcat (temp, just_machine_suffix);
+ strcat (temp, name);
+ strcat (temp, file_suffix);
+ if (access (temp, mode) == 0)
+ {
+ if (pl->used_flag_ptr != 0)
+ *pl->used_flag_ptr = 1;
+ return temp;
+ }
+ }
+
+ strcpy (temp, pl->prefix);
+ strcat (temp, just_machine_suffix);
+ strcat (temp, name);
+ if (access (temp, mode) == 0)
+ {
+ if (pl->used_flag_ptr != 0)
+ *pl->used_flag_ptr = 1;
+ return temp;
+ }
+ }
+
+ /* Certain prefixes can't be used without the machine suffix
+ when the machine or version is explicitly specified. */
+ if (!pl->require_machine_suffix)
+ {
+ /* Some systems have a suffix for executable files.
+ So try appending that first. */
+ if (file_suffix[0] != 0)
+ {
+ strcpy (temp, pl->prefix);
+ strcat (temp, name);
+ strcat (temp, file_suffix);
+ if (access (temp, mode) == 0)
+ {
+ if (pl->used_flag_ptr != 0)
+ *pl->used_flag_ptr = 1;
+ return temp;
+ }
+ }
+
+ strcpy (temp, pl->prefix);
+ strcat (temp, name);
+ if (access (temp, mode) == 0)
+ {
+ if (pl->used_flag_ptr != 0)
+ *pl->used_flag_ptr = 1;
+ return temp;
+ }
+ }
+ }
+
+ free (temp);
+ return 0;
+}
+
+/* Add an entry for PREFIX in PLIST. If FIRST is set, it goes
+ at the start of the list, otherwise it goes at the end.
+
+ If WARN is nonzero, we will warn if no file is found
+ through this prefix. WARN should point to an int
+ which will be set to 1 if this entry is used.
+
+ REQUIRE_MACHINE_SUFFIX is 1 if this prefix can't be used without
+ the complete value of machine_suffix.
+ 2 means try both machine_suffix and just_machine_suffix. */
+
+static void
+add_prefix (pprefix, prefix, first, require_machine_suffix, warn)
+ struct path_prefix *pprefix;
+ char *prefix;
+ int first;
+ int require_machine_suffix;
+ int *warn;
+{
+ struct prefix_list *pl, **prev;
+ int len;
+
+ if (!first && pprefix->plist)
+ {
+ for (pl = pprefix->plist; pl->next; pl = pl->next)
+ ;
+ prev = &pl->next;
+ }
+ else
+ prev = &pprefix->plist;
+
+ /* Keep track of the longest prefix */
+
+ len = strlen (prefix);
+ if (len > pprefix->max_len)
+ pprefix->max_len = len;
+
+ pl = (struct prefix_list *) xmalloc (sizeof (struct prefix_list));
+ pl->prefix = save_string (prefix, len);
+ pl->require_machine_suffix = require_machine_suffix;
+ pl->used_flag_ptr = warn;
+ if (warn)
+ *warn = 0;
+
+ if (*prev)
+ pl->next = *prev;
+ else
+ pl->next = (struct prefix_list *) 0;
+ *prev = pl;
+}
+
+/* Print warnings for any prefixes in the list PPREFIX that were not used. */
+
+static void
+unused_prefix_warnings (pprefix)
+ struct path_prefix *pprefix;
+{
+ struct prefix_list *pl = pprefix->plist;
+
+ while (pl)
+ {
+ if (pl->used_flag_ptr != 0 && !*pl->used_flag_ptr)
+ {
+ error ("file path prefix `%s' never used",
+ pl->prefix);
+ /* Prevent duplicate warnings. */
+ *pl->used_flag_ptr = 1;
+ }
+ pl = pl->next;
+ }
+}
+
+/* Get rid of all prefixes built up so far in *PLISTP. */
+
+static void
+free_path_prefix (pprefix)
+ struct path_prefix *pprefix;
+{
+ struct prefix_list *pl = pprefix->plist;
+ struct prefix_list *temp;
+
+ while (pl)
+ {
+ temp = pl;
+ pl = pl->next;
+ free (temp->prefix);
+ free ((char *) temp);
+ }
+ pprefix->plist = (struct prefix_list *) 0;
+}
+
+/* stdin file number. */
+#define STDIN_FILE_NO 0
+
+/* stdout file number. */
+#define STDOUT_FILE_NO 1
+
+/* value of `pipe': port index for reading. */
+#define READ_PORT 0
+
+/* value of `pipe': port index for writing. */
+#define WRITE_PORT 1
+
+/* Pipe waiting from last process, to be used as input for the next one.
+ Value is STDIN_FILE_NO if no pipe is waiting
+ (i.e. the next command is the first of a group). */
+
+static int last_pipe_input;
+
+/* Fork one piped subcommand. FUNC is the system call to use
+ (either execv or execvp). ARGV is the arg vector to use.
+ NOT_LAST is nonzero if this is not the last subcommand
+ (i.e. its output should be piped to the next one.) */
+
+#ifndef OS2
+#ifdef __MSDOS__
+
+/* Declare these to avoid compilation error. They won't be called. */
+int execv(const char *a, const char **b){}
+int execvp(const char *a, const char **b){}
+
+static int
+pexecute (search_flag, program, argv, not_last)
+ int search_flag;
+ char *program;
+ char *argv[];
+ int not_last;
+{
+ char *scmd, *rf;
+ FILE *argfile;
+ int i, el = search_flag ? 0 : 4;
+
+ scmd = (char *)malloc (strlen (program) + strlen (temp_filename) + 6 + el);
+ rf = scmd + strlen(program) + 2 + el;
+ sprintf (scmd, "%s%s @%s.gp", program,
+ (search_flag ? "" : ".exe"), temp_filename);
+ argfile = fopen (rf, "w");
+ if (argfile == 0)
+ pfatal_with_name (rf);
+
+ for (i=1; argv[i]; i++)
+ {
+ char *cp;
+ for (cp = argv[i]; *cp; cp++)
+ {
+ if (*cp == '"' || *cp == '\'' || *cp == '\\' || isspace (*cp))
+ fputc ('\\', argfile);
+ fputc (*cp, argfile);
+ }
+ fputc ('\n', argfile);
+ }
+ fclose (argfile);
+
+ i = system (scmd);
+
+ remove (rf);
+
+ if (i == -1)
+ {
+ perror_exec (program);
+ return MIN_FATAL_STATUS << 8;
+ }
+
+ return i << 8;
+}
+
+#else /* not __MSDOS__ */
+
+static int
+pexecute (search_flag, program, argv, not_last)
+ int search_flag;
+ char *program;
+ char *argv[];
+ int not_last;
+{
+ int (*func)() = (search_flag ? execv : execvp);
+ int pid;
+ int pdes[2];
+ int input_desc = last_pipe_input;
+ int output_desc = STDOUT_FILE_NO;
+ int retries, sleep_interval;
+
+ /* If this isn't the last process, make a pipe for its output,
+ and record it as waiting to be the input to the next process. */
+
+ if (not_last)
+ {
+ if (pipe (pdes) < 0)
+ pfatal_with_name ("pipe");
+ output_desc = pdes[WRITE_PORT];
+ last_pipe_input = pdes[READ_PORT];
+ }
+ else
+ last_pipe_input = STDIN_FILE_NO;
+
+ /* Fork a subprocess; wait and retry if it fails. */
+ sleep_interval = 1;
+ for (retries = 0; retries < 4; retries++)
+ {
+ pid = vfork ();
+ if (pid >= 0)
+ break;
+ sleep (sleep_interval);
+ sleep_interval *= 2;
+ }
+
+ switch (pid)
+ {
+ case -1:
+#ifdef vfork
+ pfatal_with_name ("fork");
+#else
+ pfatal_with_name ("vfork");
+#endif
+ /* NOTREACHED */
+ return 0;
+
+ case 0: /* child */
+ /* Move the input and output pipes into place, if nec. */
+ if (input_desc != STDIN_FILE_NO)
+ {
+ close (STDIN_FILE_NO);
+ dup (input_desc);
+ close (input_desc);
+ }
+ if (output_desc != STDOUT_FILE_NO)
+ {
+ close (STDOUT_FILE_NO);
+ dup (output_desc);
+ close (output_desc);
+ }
+
+ /* Close the parent's descs that aren't wanted here. */
+ if (last_pipe_input != STDIN_FILE_NO)
+ close (last_pipe_input);
+
+ /* Exec the program. */
+ (*func) (program, argv);
+ perror_exec (program);
+ exit (-1);
+ /* NOTREACHED */
+ return 0;
+
+ default:
+ /* In the parent, after forking.
+ Close the descriptors that we made for this child. */
+ if (input_desc != STDIN_FILE_NO)
+ close (input_desc);
+ if (output_desc != STDOUT_FILE_NO)
+ close (output_desc);
+
+ /* Return child's process number. */
+ return pid;
+ }
+}
+
+#endif /* not __MSDOS__ */
+#else /* not OS2 */
+
+static int
+pexecute (search_flag, program, argv, not_last)
+ int search_flag;
+ char *program;
+ char *argv[];
+ int not_last;
+{
+ return (search_flag ? spawnv : spawnvp) (1, program, argv);
+}
+#endif /* not OS2 */
+
+/* Execute the command specified by the arguments on the current line of spec.
+ When using pipes, this includes several piped-together commands
+ with `|' between them.
+
+ Return 0 if successful, -1 if failed. */
+
+static int
+execute ()
+{
+ int i;
+ int n_commands; /* # of command. */
+ char *string;
+ struct command
+ {
+ char *prog; /* program name. */
+ char **argv; /* vector of args. */
+ int pid; /* pid of process for this command. */
+ };
+
+ struct command *commands; /* each command buffer with above info. */
+
+ /* Count # of piped commands. */
+ for (n_commands = 1, i = 0; i < argbuf_index; i++)
+ if (strcmp (argbuf[i], "|") == 0)
+ n_commands++;
+
+ /* Get storage for each command. */
+ commands
+ = (struct command *) alloca (n_commands * sizeof (struct command));
+
+ /* Split argbuf into its separate piped processes,
+ and record info about each one.
+ Also search for the programs that are to be run. */
+
+ commands[0].prog = argbuf[0]; /* first command. */
+ commands[0].argv = &argbuf[0];
+ string = find_a_file (&exec_prefix, commands[0].prog, X_OK);
+ if (string)
+ commands[0].argv[0] = string;
+
+ for (n_commands = 1, i = 0; i < argbuf_index; i++)
+ if (strcmp (argbuf[i], "|") == 0)
+ { /* each command. */
+#ifdef __MSDOS__
+ fatal ("-pipe not supported under MS-DOS");
+#endif
+ argbuf[i] = 0; /* termination of command args. */
+ commands[n_commands].prog = argbuf[i + 1];
+ commands[n_commands].argv = &argbuf[i + 1];
+ string = find_a_file (&exec_prefix, commands[n_commands].prog, X_OK);
+ if (string)
+ commands[n_commands].argv[0] = string;
+ n_commands++;
+ }
+
+ argbuf[argbuf_index] = 0;
+
+ /* If -v, print what we are about to do, and maybe query. */
+
+ if (verbose_flag)
+ {
+ /* Print each piped command as a separate line. */
+ for (i = 0; i < n_commands ; i++)
+ {
+ char **j;
+
+ for (j = commands[i].argv; *j; j++)
+ fprintf (stderr, " %s", *j);
+
+ /* Print a pipe symbol after all but the last command. */
+ if (i + 1 != n_commands)
+ fprintf (stderr, " |");
+ fprintf (stderr, "\n");
+ }
+ fflush (stderr);
+#ifdef DEBUG
+ fprintf (stderr, "\nGo ahead? (y or n) ");
+ fflush (stderr);
+ i = getchar ();
+ if (i != '\n')
+ while (getchar () != '\n') ;
+ if (i != 'y' && i != 'Y')
+ return 0;
+#endif /* DEBUG */
+ }
+
+ /* Run each piped subprocess. */
+
+ last_pipe_input = STDIN_FILE_NO;
+ for (i = 0; i < n_commands; i++)
+ {
+ char *string = commands[i].argv[0];
+
+ commands[i].pid = pexecute (string != commands[i].prog,
+ string, commands[i].argv,
+ i + 1 < n_commands);
+
+ if (string != commands[i].prog)
+ free (string);
+ }
+
+ execution_count++;
+
+ /* Wait for all the subprocesses to finish.
+ We don't care what order they finish in;
+ we know that N_COMMANDS waits will get them all. */
+
+ {
+ int ret_code = 0;
+
+ for (i = 0; i < n_commands; i++)
+ {
+ int status;
+ int pid;
+ char *prog = "unknown";
+
+#ifdef __MSDOS__
+ status = pid = commands[i].pid;
+#else
+ pid = wait (&status);
+#endif
+ if (pid < 0)
+ abort ();
+
+ if (status != 0)
+ {
+ int j;
+ for (j = 0; j < n_commands; j++)
+ if (commands[j].pid == pid)
+ prog = commands[j].prog;
+
+ if ((status & 0x7F) != 0)
+ {
+ fatal ("Internal compiler error: program %s got fatal signal %d",
+ prog, (status & 0x7F));
+ signal_count++;
+ }
+ if (((status & 0xFF00) >> 8) >= MIN_FATAL_STATUS)
+ ret_code = -1;
+ }
+ }
+ return ret_code;
+ }
+}
+
+/* Find all the switches given to us
+ and make a vector describing them.
+ The elements of the vector are strings, one per switch given.
+ If a switch uses following arguments, then the `part1' field
+ is the switch itself and the `args' field
+ is a null-terminated vector containing the following arguments.
+ The `live_cond' field is 1 if the switch is true in a conditional spec,
+ -1 if false (overridden by a later switch), and is initialized to zero.
+ The `valid' field is nonzero if any spec has looked at this switch;
+ if it remains zero at the end of the run, it must be meaningless. */
+
+struct switchstr
+{
+ char *part1;
+ char **args;
+ int live_cond;
+ int valid;
+};
+
+static struct switchstr *switches;
+
+static int n_switches;
+
+struct infile
+{
+ char *name;
+ char *language;
+};
+
+/* Also a vector of input files specified. */
+
+static struct infile *infiles;
+
+static int n_infiles;
+
+/* And a vector of corresponding output files is made up later. */
+
+static char **outfiles;
+
+/* Create the vector `switches' and its contents.
+ Store its length in `n_switches'. */
+
+static void
+process_command (argc, argv)
+ int argc;
+ char **argv;
+{
+ register int i;
+ char *temp;
+ char *spec_lang = 0;
+ int last_language_n_infiles;
+
+ gcc_exec_prefix = getenv ("GCC_EXEC_PREFIX");
+
+ n_switches = 0;
+ n_infiles = 0;
+
+ /* Figure compiler version from version string. */
+
+ compiler_version = save_string (version_string, strlen (version_string));
+ for (temp = compiler_version; *temp; ++temp)
+ {
+ if (*temp == ' ')
+ {
+ *temp = '\0';
+ break;
+ }
+ }
+
+ /* Set up the default search paths. */
+
+ if (gcc_exec_prefix)
+ {
+ add_prefix (&exec_prefix, gcc_exec_prefix, 0, 0, NULL_PTR);
+ add_prefix (&startfile_prefix, gcc_exec_prefix, 0, 0, NULL_PTR);
+ }
+
+ /* COMPILER_PATH and LIBRARY_PATH have values
+ that are lists of directory names with colons. */
+
+ temp = getenv ("COMPILER_PATH");
+ if (temp)
+ {
+ char *startp, *endp;
+ char *nstore = (char *) alloca (strlen (temp) + 3);
+
+ startp = endp = temp;
+ while (1)
+ {
+ if (*endp == PATH_SEPARATOR || *endp == 0)
+ {
+ strncpy (nstore, startp, endp-startp);
+ if (endp == startp)
+ {
+ strcpy (nstore, "./");
+ }
+ else if (endp[-1] != '/')
+ {
+ nstore[endp-startp] = '/';
+ nstore[endp-startp+1] = 0;
+ }
+ else
+ nstore[endp-startp] = 0;
+ add_prefix (&exec_prefix, nstore, 0, 0, NULL_PTR);
+ if (*endp == 0)
+ break;
+ endp = startp = endp + 1;
+ }
+ else
+ endp++;
+ }
+ }
+
+ temp = getenv ("LIBRARY_PATH");
+ if (temp)
+ {
+ char *startp, *endp;
+ char *nstore = (char *) alloca (strlen (temp) + 3);
+
+ startp = endp = temp;
+ while (1)
+ {
+ if (*endp == PATH_SEPARATOR || *endp == 0)
+ {
+ strncpy (nstore, startp, endp-startp);
+ if (endp == startp)
+ {
+ strcpy (nstore, "./");
+ }
+ else if (endp[-1] != '/')
+ {
+ nstore[endp-startp] = '/';
+ nstore[endp-startp+1] = 0;
+ }
+ else
+ nstore[endp-startp] = 0;
+ add_prefix (&startfile_prefix, nstore, 0, 0, NULL_PTR);
+ if (*endp == 0)
+ break;
+ endp = startp = endp + 1;
+ }
+ else
+ endp++;
+ }
+ }
+
+ /* Use LPATH like LIBRARY_PATH (for the CMU build program). */
+ temp = getenv ("LPATH");
+ if (temp)
+ {
+ char *startp, *endp;
+ char *nstore = (char *) alloca (strlen (temp) + 3);
+
+ startp = endp = temp;
+ while (1)
+ {
+ if (*endp == PATH_SEPARATOR || *endp == 0)
+ {
+ strncpy (nstore, startp, endp-startp);
+ if (endp == startp)
+ {
+ strcpy (nstore, "./");
+ }
+ else if (endp[-1] != '/')
+ {
+ nstore[endp-startp] = '/';
+ nstore[endp-startp+1] = 0;
+ }
+ else
+ nstore[endp-startp] = 0;
+ add_prefix (&startfile_prefix, nstore, 0, 0, NULL_PTR);
+ if (*endp == 0)
+ break;
+ endp = startp = endp + 1;
+ }
+ else
+ endp++;
+ }
+ }
+
+ /* Convert new-style -- options to old-style. */
+ translate_options (&argc, &argv);
+
+ /* Scan argv twice. Here, the first time, just count how many switches
+ there will be in their vector, and how many input files in theirs.
+ Here we also parse the switches that cc itself uses (e.g. -v). */
+
+ for (i = 1; i < argc; i++)
+ {
+ if (! strcmp (argv[i], "-dumpspecs"))
+ {
+ printf ("*asm:\n%s\n\n", asm_spec);
+ printf ("*asm_final:\n%s\n\n", asm_final_spec);
+ printf ("*cpp:\n%s\n\n", cpp_spec);
+ printf ("*cc1:\n%s\n\n", cc1_spec);
+ printf ("*cc1plus:\n%s\n\n", cc1plus_spec);
+ printf ("*endfile:\n%s\n\n", endfile_spec);
+ printf ("*link:\n%s\n\n", link_spec);
+ printf ("*lib:\n%s\n\n", lib_spec);
+ printf ("*startfile:\n%s\n\n", startfile_spec);
+ printf ("*switches_need_spaces:\n%s\n\n", switches_need_spaces);
+ printf ("*signed_char:\n%s\n\n", signed_char_spec);
+ printf ("*predefines:\n%s\n\n", cpp_predefines);
+ printf ("*cross_compile:\n%d\n\n", cross_compile);
+ printf ("*multilib:\n%s\n\n", multilib_select);
+
+ exit (0);
+ }
+ else if (! strcmp (argv[i], "-dumpversion"))
+ {
+ printf ("%s\n", version_string);
+ exit (0);
+ }
+ else if (! strcmp (argv[i], "-print-libgcc-file-name"))
+ print_file_name = "libgcc.a";
+ else if (! strncmp (argv[i], "-print-file-name=", 17))
+ print_file_name = argv[i] + 17;
+ else if (! strncmp (argv[i], "-print-prog-name=", 17))
+ print_prog_name = argv[i] + 17;
+ else if (! strcmp (argv[i], "-print-multi-lib"))
+ print_multi_lib = 1;
+ else if (! strcmp (argv[i], "-print-multi-directory"))
+ print_multi_directory = 1;
+ else if (! strcmp (argv[i], "-Xlinker"))
+ {
+ /* Pass the argument of this option to the linker when we link. */
+
+ if (i + 1 == argc)
+ fatal ("argument to `-Xlinker' is missing");
+
+ n_linker_options++;
+ if (!linker_options)
+ linker_options
+ = (char **) xmalloc (n_linker_options * sizeof (char **));
+ else
+ linker_options
+ = (char **) xrealloc (linker_options,
+ n_linker_options * sizeof (char **));
+
+ linker_options[n_linker_options - 1] = argv[++i];
+ }
+ else if (! strncmp (argv[i], "-Wl,", 4))
+ {
+ int prev, j;
+ /* Pass the rest of this option to the linker when we link. */
+
+ n_linker_options++;
+ if (!linker_options)
+ linker_options
+ = (char **) xmalloc (n_linker_options * sizeof (char **));
+ else
+ linker_options
+ = (char **) xrealloc (linker_options,
+ n_linker_options * sizeof (char **));
+
+ /* Split the argument at commas. */
+ prev = 4;
+ for (j = 4; argv[i][j]; j++)
+ if (argv[i][j] == ',')
+ {
+ linker_options[n_linker_options - 1]
+ = save_string (argv[i] + prev, j - prev);
+ n_linker_options++;
+ linker_options
+ = (char **) xrealloc (linker_options,
+ n_linker_options * sizeof (char **));
+ prev = j + 1;
+ }
+ /* Record the part after the last comma. */
+ linker_options[n_linker_options - 1] = argv[i] + prev;
+ }
+ else if (! strncmp (argv[i], "-Wa,", 4))
+ {
+ int prev, j;
+ /* Pass the rest of this option to the assembler. */
+
+ n_assembler_options++;
+ if (!assembler_options)
+ assembler_options
+ = (char **) xmalloc (n_assembler_options * sizeof (char **));
+ else
+ assembler_options
+ = (char **) xrealloc (assembler_options,
+ n_assembler_options * sizeof (char **));
+
+ /* Split the argument at commas. */
+ prev = 4;
+ for (j = 4; argv[i][j]; j++)
+ if (argv[i][j] == ',')
+ {
+ assembler_options[n_assembler_options - 1]
+ = save_string (argv[i] + prev, j - prev);
+ n_assembler_options++;
+ assembler_options
+ = (char **) xrealloc (assembler_options,
+ n_assembler_options * sizeof (char **));
+ prev = j + 1;
+ }
+ /* Record the part after the last comma. */
+ assembler_options[n_assembler_options - 1] = argv[i] + prev;
+ }
+ else if (argv[i][0] == '+' && argv[i][1] == 'e')
+ /* The +e options to the C++ front-end. */
+ n_switches++;
+ else if (argv[i][0] == '-' && argv[i][1] != 0 && argv[i][1] != 'l')
+ {
+ register char *p = &argv[i][1];
+ register int c = *p;
+
+ switch (c)
+ {
+ case 'b':
+ if (p[1] == 0 && i + 1 == argc)
+ fatal ("argument to `-b' is missing");
+ if (p[1] == 0)
+ spec_machine = argv[++i];
+ else
+ spec_machine = p + 1;
+ break;
+
+ case 'B':
+ {
+ int *temp = (int *) xmalloc (sizeof (int));
+ char *value;
+ if (p[1] == 0 && i + 1 == argc)
+ fatal ("argument to `-B' is missing");
+ if (p[1] == 0)
+ value = argv[++i];
+ else
+ value = p + 1;
+ add_prefix (&exec_prefix, value, 1, 0, temp);
+ add_prefix (&startfile_prefix, value, 1, 0, temp);
+ add_prefix (&include_prefix, concat (value, "include", ""),
+ 1, 0, 0);
+
+ /* As a kludge, if the arg is "[foo/]stageN/", just add
+ "[foo/]stageN/../include" to the include prefix. */
+ {
+ int len = strlen (value);
+ if ((len == 7 || (len > 7 && value[len - 8] == '/'))
+ && strncmp (value + len - 7, "stage", 5) == 0
+ && isdigit (value[len - 2])
+ && value[len - 1] == '/')
+ add_prefix (&include_prefix,
+ concat (value, "../include", ""), 1, 0, 0);
+ }
+ }
+ break;
+
+ case 'v': /* Print our subcommands and print versions. */
+ n_switches++;
+ /* If they do anything other than exactly `-v', don't set
+ verbose_flag; rather, continue on to give the error. */
+ if (p[1] != 0)
+ break;
+ verbose_flag++;
+ break;
+
+ case 'V':
+ if (p[1] == 0 && i + 1 == argc)
+ fatal ("argument to `-V' is missing");
+ if (p[1] == 0)
+ spec_version = argv[++i];
+ else
+ spec_version = p + 1;
+ compiler_version = spec_version;
+ break;
+
+ case 's':
+ if (!strcmp (p, "save-temps"))
+ {
+ save_temps_flag = 1;
+ n_switches++;
+ break;
+ }
+ default:
+ n_switches++;
+
+ if (SWITCH_TAKES_ARG (c) > (p[1] != 0))
+ i += SWITCH_TAKES_ARG (c) - (p[1] != 0);
+ else if (WORD_SWITCH_TAKES_ARG (p))
+ i += WORD_SWITCH_TAKES_ARG (p);
+ }
+ }
+ else
+ n_infiles++;
+ }
+
+ /* Set up the search paths before we go looking for config files. */
+
+ /* These come before the md prefixes so that we will find gcc's subcommands
+ (such as cpp) rather than those of the host system. */
+ /* Use 2 as fourth arg meaning try just the machine as a suffix,
+ as well as trying the machine and the version. */
+ add_prefix (&exec_prefix, standard_exec_prefix, 0, 2, NULL_PTR);
+ add_prefix (&exec_prefix, standard_exec_prefix_1, 0, 2, NULL_PTR);
+
+ add_prefix (&startfile_prefix, standard_exec_prefix, 0, 1, NULL_PTR);
+ add_prefix (&startfile_prefix, standard_exec_prefix_1, 0, 1, NULL_PTR);
+
+ tooldir_prefix = concat (tooldir_base_prefix, spec_machine, "/");
+
+ /* If tooldir is relative, base it on exec_prefix. A relative
+ tooldir lets us move the installed tree as a unit.
+
+ If GCC_EXEC_PREFIX is defined, then we want to add two relative
+ directories, so that we can search both the user specified directory
+ and the standard place. */
+
+ if (*tooldir_prefix != '/')
+ {
+ if (gcc_exec_prefix)
+ {
+ char *gcc_exec_tooldir_prefix
+ = concat (concat (gcc_exec_prefix, spec_machine, "/"),
+ concat (spec_version, "/", tooldir_prefix),
+ "");
+
+ add_prefix (&exec_prefix, concat (gcc_exec_tooldir_prefix, "bin", "/"),
+ 0, 0, NULL_PTR);
+ add_prefix (&startfile_prefix, concat (gcc_exec_tooldir_prefix, "lib", "/"),
+ 0, 0, NULL_PTR);
+ }
+
+ tooldir_prefix = concat (concat (standard_exec_prefix, spec_machine, "/"),
+ concat (spec_version, "/", tooldir_prefix),
+ "");
+ }
+
+ add_prefix (&exec_prefix, concat (tooldir_prefix, "bin", "/"),
+ 0, 0, NULL_PTR);
+ add_prefix (&startfile_prefix, concat (tooldir_prefix, "lib", "/"),
+ 0, 0, NULL_PTR);
+
+ /* More prefixes are enabled in main, after we read the specs file
+ and determine whether this is cross-compilation or not. */
+
+
+ /* Then create the space for the vectors and scan again. */
+
+ switches = ((struct switchstr *)
+ xmalloc ((n_switches + 1) * sizeof (struct switchstr)));
+ infiles = (struct infile *) xmalloc ((n_infiles + 1) * sizeof (struct infile));
+ n_switches = 0;
+ n_infiles = 0;
+ last_language_n_infiles = -1;
+
+ /* This, time, copy the text of each switch and store a pointer
+ to the copy in the vector of switches.
+ Store all the infiles in their vector. */
+
+ for (i = 1; i < argc; i++)
+ {
+ /* Just skip the switches that were handled by the preceding loop. */
+ if (!strcmp (argv[i], "-Xlinker"))
+ i++;
+ else if (! strncmp (argv[i], "-Wl,", 4))
+ ;
+ else if (! strncmp (argv[i], "-Wa,", 4))
+ ;
+ else if (! strcmp (argv[i], "-print-libgcc-file-name"))
+ ;
+ else if (! strncmp (argv[i], "-print-file-name=", 17))
+ ;
+ else if (! strncmp (argv[i], "-print-prog-name=", 17))
+ ;
+ else if (! strcmp (argv[i], "-print-multi-lib"))
+ ;
+ else if (! strcmp (argv[i], "-print-multi-directory"))
+ ;
+ else if (argv[i][0] == '+' && argv[i][1] == 'e')
+ {
+ /* Compensate for the +e options to the C++ front-end;
+ they're there simply for cfront call-compatibility. We do
+ some magic in default_compilers to pass them down properly.
+ Note we deliberately start at the `+' here, to avoid passing
+ -e0 or -e1 down into the linker. */
+ switches[n_switches].part1 = &argv[i][0];
+ switches[n_switches].args = 0;
+ switches[n_switches].live_cond = 0;
+ switches[n_switches].valid = 0;
+ n_switches++;
+ }
+ else if (argv[i][0] == '-' && argv[i][1] != 0 && argv[i][1] != 'l')
+ {
+ register char *p = &argv[i][1];
+ register int c = *p;
+
+ if (c == 'B' || c == 'b' || c == 'V')
+ {
+ /* Skip a separate arg, if any. */
+ if (p[1] == 0)
+ i++;
+ continue;
+ }
+ if (c == 'x')
+ {
+ if (p[1] == 0 && i + 1 == argc)
+ fatal ("argument to `-x' is missing");
+ if (p[1] == 0)
+ spec_lang = argv[++i];
+ else
+ spec_lang = p + 1;
+ if (! strcmp (spec_lang, "none"))
+ /* Suppress the warning if -xnone comes after the last input file,
+ because alternate command interfaces like g++ might find it
+ useful to place -xnone after each input file. */
+ spec_lang = 0;
+ else
+ last_language_n_infiles = n_infiles;
+ continue;
+ }
+ switches[n_switches].part1 = p;
+ /* Deal with option arguments in separate argv elements. */
+ if ((SWITCH_TAKES_ARG (c) > (p[1] != 0))
+ || WORD_SWITCH_TAKES_ARG (p))
+ {
+ int j = 0;
+ int n_args = WORD_SWITCH_TAKES_ARG (p);
+
+ if (n_args == 0)
+ {
+ /* Count only the option arguments in separate argv elements. */
+ n_args = SWITCH_TAKES_ARG (c) - (p[1] != 0);
+ }
+ if (i + n_args >= argc)
+ fatal ("argument to `-%s' is missing", p);
+ switches[n_switches].args
+ = (char **) xmalloc ((n_args + 1) * sizeof (char *));
+ while (j < n_args)
+ switches[n_switches].args[j++] = argv[++i];
+ /* Null-terminate the vector. */
+ switches[n_switches].args[j] = 0;
+ }
+ else if (*switches_need_spaces != 0 && (c == 'o' || c == 'L'))
+ {
+ /* On some systems, ld cannot handle -o or -L without space.
+ So split the -o or -L from its argument. */
+ switches[n_switches].part1 = (c == 'o' ? "o" : "L");
+ switches[n_switches].args = (char **) xmalloc (2 * sizeof (char *));
+ switches[n_switches].args[0] = xmalloc (strlen (p));
+ strcpy (switches[n_switches].args[0], &p[1]);
+ switches[n_switches].args[1] = 0;
+ }
+ else
+ switches[n_switches].args = 0;
+
+ switches[n_switches].live_cond = 0;
+ switches[n_switches].valid = 0;
+ /* This is always valid, since gcc.c itself understands it. */
+ if (!strcmp (p, "save-temps"))
+ switches[n_switches].valid = 1;
+ n_switches++;
+ }
+ else
+ {
+ if ((argv[i][0] != '-' || argv[i][1] != 'l')
+ && strcmp (argv[i], "-")
+ && access (argv[i], R_OK) < 0)
+ {
+ perror_with_name (argv[i]);
+ error_count++;
+ }
+ else
+ {
+ infiles[n_infiles].language = spec_lang;
+ infiles[n_infiles++].name = argv[i];
+ }
+ }
+ }
+
+ if (n_infiles == last_language_n_infiles && spec_lang != 0)
+ error ("Warning: `-x %s' after last input file has no effect", spec_lang);
+
+ switches[n_switches].part1 = 0;
+ infiles[n_infiles].name = 0;
+
+ /* If we have a GCC_EXEC_PREFIX envvar, modify it for cpp's sake. */
+ if (gcc_exec_prefix)
+ {
+ temp = (char *) xmalloc (strlen (gcc_exec_prefix) + strlen (spec_version)
+ + strlen (spec_machine) + 3);
+ strcpy (temp, gcc_exec_prefix);
+ strcat (temp, spec_machine);
+ strcat (temp, "/");
+ strcat (temp, spec_version);
+ strcat (temp, "/");
+ gcc_exec_prefix = temp;
+ }
+}
+
+/* Process a spec string, accumulating and running commands. */
+
+/* These variables describe the input file name.
+ input_file_number is the index on outfiles of this file,
+ so that the output file name can be stored for later use by %o.
+ input_basename is the start of the part of the input file
+ sans all directory names, and basename_length is the number
+ of characters starting there excluding the suffix .c or whatever. */
+
+static char *input_filename;
+static int input_file_number;
+static int input_filename_length;
+static int basename_length;
+static char *input_basename;
+static char *input_suffix;
+
+/* These are variables used within do_spec and do_spec_1. */
+
+/* Nonzero if an arg has been started and not yet terminated
+ (with space, tab or newline). */
+static int arg_going;
+
+/* Nonzero means %d or %g has been seen; the next arg to be terminated
+ is a temporary file name. */
+static int delete_this_arg;
+
+/* Nonzero means %w has been seen; the next arg to be terminated
+ is the output file name of this compilation. */
+static int this_is_output_file;
+
+/* Nonzero means %s has been seen; the next arg to be terminated
+ is the name of a library file and we should try the standard
+ search dirs for it. */
+static int this_is_library_file;
+
+/* Nonzero means that the input of this command is coming from a pipe. */
+static int input_from_pipe;
+
+/* Process the spec SPEC and run the commands specified therein.
+ Returns 0 if the spec is successfully processed; -1 if failed. */
+
+static int
+do_spec (spec)
+ char *spec;
+{
+ int value;
+
+ clear_args ();
+ arg_going = 0;
+ delete_this_arg = 0;
+ this_is_output_file = 0;
+ this_is_library_file = 0;
+ input_from_pipe = 0;
+
+ value = do_spec_1 (spec, 0, NULL_PTR);
+
+ /* Force out any unfinished command.
+ If -pipe, this forces out the last command if it ended in `|'. */
+ if (value == 0)
+ {
+ if (argbuf_index > 0 && !strcmp (argbuf[argbuf_index - 1], "|"))
+ argbuf_index--;
+
+ if (argbuf_index > 0)
+ value = execute ();
+ }
+
+ return value;
+}
+
+/* Process the sub-spec SPEC as a portion of a larger spec.
+ This is like processing a whole spec except that we do
+ not initialize at the beginning and we do not supply a
+ newline by default at the end.
+ INSWITCH nonzero means don't process %-sequences in SPEC;
+ in this case, % is treated as an ordinary character.
+ This is used while substituting switches.
+ INSWITCH nonzero also causes SPC not to terminate an argument.
+
+ Value is zero unless a line was finished
+ and the command on that line reported an error. */
+
+static int
+do_spec_1 (spec, inswitch, soft_matched_part)
+ char *spec;
+ int inswitch;
+ char *soft_matched_part;
+{
+ register char *p = spec;
+ register int c;
+ int i;
+ char *string;
+ int value;
+
+ while (c = *p++)
+ /* If substituting a switch, treat all chars like letters.
+ Otherwise, NL, SPC, TAB and % are special. */
+ switch (inswitch ? 'a' : c)
+ {
+ case '\n':
+ /* End of line: finish any pending argument,
+ then run the pending command if one has been started. */
+ if (arg_going)
+ {
+ obstack_1grow (&obstack, 0);
+ string = obstack_finish (&obstack);
+ if (this_is_library_file)
+ string = find_file (string);
+ store_arg (string, delete_this_arg, this_is_output_file);
+ if (this_is_output_file)
+ outfiles[input_file_number] = string;
+ }
+ arg_going = 0;
+
+ if (argbuf_index > 0 && !strcmp (argbuf[argbuf_index - 1], "|"))
+ {
+ int i;
+ for (i = 0; i < n_switches; i++)
+ if (!strcmp (switches[i].part1, "pipe"))
+ break;
+
+ /* A `|' before the newline means use a pipe here,
+ but only if -pipe was specified.
+ Otherwise, execute now and don't pass the `|' as an arg. */
+ if (i < n_switches)
+ {
+ input_from_pipe = 1;
+ switches[i].valid = 1;
+ break;
+ }
+ else
+ argbuf_index--;
+ }
+
+ if (argbuf_index > 0)
+ {
+ value = execute ();
+ if (value)
+ return value;
+ }
+ /* Reinitialize for a new command, and for a new argument. */
+ clear_args ();
+ arg_going = 0;
+ delete_this_arg = 0;
+ this_is_output_file = 0;
+ this_is_library_file = 0;
+ input_from_pipe = 0;
+ break;
+
+ case '|':
+ /* End any pending argument. */
+ if (arg_going)
+ {
+ obstack_1grow (&obstack, 0);
+ string = obstack_finish (&obstack);
+ if (this_is_library_file)
+ string = find_file (string);
+ store_arg (string, delete_this_arg, this_is_output_file);
+ if (this_is_output_file)
+ outfiles[input_file_number] = string;
+ }
+
+ /* Use pipe */
+ obstack_1grow (&obstack, c);
+ arg_going = 1;
+ break;
+
+ case '\t':
+ case ' ':
+ /* Space or tab ends an argument if one is pending. */
+ if (arg_going)
+ {
+ obstack_1grow (&obstack, 0);
+ string = obstack_finish (&obstack);
+ if (this_is_library_file)
+ string = find_file (string);
+ store_arg (string, delete_this_arg, this_is_output_file);
+ if (this_is_output_file)
+ outfiles[input_file_number] = string;
+ }
+ /* Reinitialize for a new argument. */
+ arg_going = 0;
+ delete_this_arg = 0;
+ this_is_output_file = 0;
+ this_is_library_file = 0;
+ break;
+
+ case '%':
+ switch (c = *p++)
+ {
+ case 0:
+ fatal ("Invalid specification! Bug in cc.");
+
+ case 'b':
+ obstack_grow (&obstack, input_basename, basename_length);
+ arg_going = 1;
+ break;
+
+ case 'd':
+ delete_this_arg = 2;
+ break;
+
+ /* Dump out the directories specified with LIBRARY_PATH,
+ followed by the absolute directories
+ that we search for startfiles. */
+ case 'D':
+ {
+ struct prefix_list *pl = startfile_prefix.plist;
+ int bufsize = 100;
+ char *buffer = (char *) xmalloc (bufsize);
+ int idx;
+
+ for (; pl; pl = pl->next)
+ {
+#ifdef RELATIVE_PREFIX_NOT_LINKDIR
+ /* Used on systems which record the specified -L dirs
+ and use them to search for dynamic linking. */
+ /* Relative directories always come from -B,
+ and it is better not to use them for searching
+ at run time. In particular, stage1 loses */
+ if (pl->prefix[0] != '/')
+ continue;
+#endif
+ /* Try subdirectory if there is one. */
+ if (multilib_dir != NULL)
+ {
+ if (machine_suffix)
+ {
+ if (strlen (pl->prefix) + strlen (machine_suffix)
+ >= bufsize)
+ bufsize = (strlen (pl->prefix)
+ + strlen (machine_suffix)) * 2 + 1;
+ buffer = (char *) xrealloc (buffer, bufsize);
+ strcpy (buffer, pl->prefix);
+ strcat (buffer, machine_suffix);
+ if (is_directory (buffer, multilib_dir, 1))
+ {
+ do_spec_1 ("-L", 0, NULL_PTR);
+#ifdef SPACE_AFTER_L_OPTION
+ do_spec_1 (" ", 0, NULL_PTR);
+#endif
+ do_spec_1 (buffer, 1, NULL_PTR);
+ do_spec_1 (multilib_dir, 1, NULL_PTR);
+ /* Make this a separate argument. */
+ do_spec_1 (" ", 0, NULL_PTR);
+ }
+ }
+ if (!pl->require_machine_suffix)
+ {
+ if (is_directory (pl->prefix, multilib_dir, 1))
+ {
+ do_spec_1 ("-L", 0, NULL_PTR);
+#ifdef SPACE_AFTER_L_OPTION
+ do_spec_1 (" ", 0, NULL_PTR);
+#endif
+ do_spec_1 (pl->prefix, 1, NULL_PTR);
+ do_spec_1 (multilib_dir, 1, NULL_PTR);
+ /* Make this a separate argument. */
+ do_spec_1 (" ", 0, NULL_PTR);
+ }
+ }
+ }
+ if (machine_suffix)
+ {
+ if (is_directory (pl->prefix, machine_suffix, 1))
+ {
+ do_spec_1 ("-L", 0, NULL_PTR);
+#ifdef SPACE_AFTER_L_OPTION
+ do_spec_1 (" ", 0, NULL_PTR);
+#endif
+ do_spec_1 (pl->prefix, 1, NULL_PTR);
+ /* Remove slash from machine_suffix. */
+ if (strlen (machine_suffix) >= bufsize)
+ bufsize = strlen (machine_suffix) * 2 + 1;
+ buffer = (char *) xrealloc (buffer, bufsize);
+ strcpy (buffer, machine_suffix);
+ idx = strlen (buffer);
+ if (buffer[idx - 1] == '/')
+ buffer[idx - 1] = 0;
+ do_spec_1 (buffer, 1, NULL_PTR);
+ /* Make this a separate argument. */
+ do_spec_1 (" ", 0, NULL_PTR);
+ }
+ }
+ if (!pl->require_machine_suffix)
+ {
+ if (is_directory (pl->prefix, "", 1))
+ {
+ do_spec_1 ("-L", 0, NULL_PTR);
+#ifdef SPACE_AFTER_L_OPTION
+ do_spec_1 (" ", 0, NULL_PTR);
+#endif
+ /* Remove slash from pl->prefix. */
+ if (strlen (pl->prefix) >= bufsize)
+ bufsize = strlen (pl->prefix) * 2 + 1;
+ buffer = (char *) xrealloc (buffer, bufsize);
+ strcpy (buffer, pl->prefix);
+ idx = strlen (buffer);
+ if (buffer[idx - 1] == '/')
+ buffer[idx - 1] = 0;
+ do_spec_1 (buffer, 1, NULL_PTR);
+ /* Make this a separate argument. */
+ do_spec_1 (" ", 0, NULL_PTR);
+ }
+ }
+ }
+ free (buffer);
+ }
+ break;
+
+ case 'e':
+ /* {...:%efoo} means report an error with `foo' as error message
+ and don't execute any more commands for this file. */
+ {
+ char *q = p;
+ char *buf;
+ while (*p != 0 && *p != '\n') p++;
+ buf = (char *) alloca (p - q + 1);
+ strncpy (buf, q, p - q);
+ buf[p - q] = 0;
+ error ("%s", buf);
+ return -1;
+ }
+ break;
+
+ case 'g':
+ case 'u':
+ case 'U':
+ if (save_temps_flag)
+ {
+ obstack_grow (&obstack, input_basename, basename_length);
+ delete_this_arg = 0;
+ }
+ else
+ {
+#ifdef MKTEMP_EACH_FILE
+ /* ??? This has a problem: the total number of
+ values mktemp can return is limited.
+ That matters for the names of object files.
+ In 2.4, do something about that. */
+ struct temp_name *t;
+ char *suffix = p;
+ while (*p == '.' || isalpha (*p))
+ p++;
+
+ /* See if we already have an association of %g/%u/%U and
+ suffix. */
+ for (t = temp_names; t; t = t->next)
+ if (t->length == p - suffix
+ && strncmp (t->suffix, suffix, p - suffix) == 0
+ && t->unique == (c != 'g'))
+ break;
+
+ /* Make a new association if needed. %u requires one. */
+ if (t == 0 || c == 'u')
+ {
+ if (t == 0)
+ {
+ t = (struct temp_name *) xmalloc (sizeof (struct temp_name));
+ t->next = temp_names;
+ temp_names = t;
+ }
+ t->length = p - suffix;
+ t->suffix = save_string (suffix, p - suffix);
+ t->unique = (c != 'g');
+ choose_temp_base ();
+ t->filename = temp_filename;
+ t->filename_length = temp_filename_length;
+ }
+
+ obstack_grow (&obstack, t->filename, t->filename_length);
+ delete_this_arg = 1;
+#else
+ obstack_grow (&obstack, temp_filename, temp_filename_length);
+ if (c == 'u' || c == 'U')
+ {
+ static int unique;
+ char buff[9];
+ if (c == 'u')
+ unique++;
+ sprintf (buff, "%d", unique);
+ obstack_grow (&obstack, buff, strlen (buff));
+ }
+#endif
+ delete_this_arg = 1;
+ }
+ arg_going = 1;
+ break;
+
+ case 'i':
+ obstack_grow (&obstack, input_filename, input_filename_length);
+ arg_going = 1;
+ break;
+
+ case 'I':
+ {
+ struct prefix_list *pl = include_prefix.plist;
+
+ if (gcc_exec_prefix)
+ {
+ do_spec_1 ("-iprefix", 1, NULL_PTR);
+ /* Make this a separate argument. */
+ do_spec_1 (" ", 0, NULL_PTR);
+ do_spec_1 (gcc_exec_prefix, 1, NULL_PTR);
+ do_spec_1 (" ", 0, NULL_PTR);
+ }
+
+ for (; pl; pl = pl->next)
+ {
+ do_spec_1 ("-isystem", 1, NULL_PTR);
+ /* Make this a separate argument. */
+ do_spec_1 (" ", 0, NULL_PTR);
+ do_spec_1 (pl->prefix, 1, NULL_PTR);
+ do_spec_1 (" ", 0, NULL_PTR);
+ }
+ }
+ break;
+
+ case 'o':
+ {
+ register int f;
+ for (f = 0; f < n_infiles; f++)
+ store_arg (outfiles[f], 0, 0);
+ }
+ break;
+
+ case 's':
+ this_is_library_file = 1;
+ break;
+
+ case 'w':
+ this_is_output_file = 1;
+ break;
+
+ case 'W':
+ {
+ int index = argbuf_index;
+ /* Handle the {...} following the %W. */
+ if (*p != '{')
+ abort ();
+ p = handle_braces (p + 1);
+ if (p == 0)
+ return -1;
+ /* If any args were output, mark the last one for deletion
+ on failure. */
+ if (argbuf_index != index)
+ record_temp_file (argbuf[argbuf_index - 1], 0, 1);
+ break;
+ }
+
+ /* %x{OPTION} records OPTION for %X to output. */
+ case 'x':
+ {
+ char *p1 = p;
+ char *string;
+
+ /* Skip past the option value and make a copy. */
+ if (*p != '{')
+ abort ();
+ while (*p++ != '}')
+ ;
+ string = save_string (p1 + 1, p - p1 - 2);
+
+ /* See if we already recorded this option. */
+ for (i = 0; i < n_linker_options; i++)
+ if (! strcmp (string, linker_options[i]))
+ {
+ free (string);
+ return 0;
+ }
+
+ /* This option is new; add it. */
+ n_linker_options++;
+ if (!linker_options)
+ linker_options
+ = (char **) xmalloc (n_linker_options * sizeof (char **));
+ else
+ linker_options
+ = (char **) xrealloc (linker_options,
+ n_linker_options * sizeof (char **));
+
+ linker_options[n_linker_options - 1] = string;
+ }
+ break;
+
+ /* Dump out the options accumulated previously using %x,
+ -Xlinker and -Wl,. */
+ case 'X':
+ for (i = 0; i < n_linker_options; i++)
+ {
+ do_spec_1 (linker_options[i], 1, NULL_PTR);
+ /* Make each accumulated option a separate argument. */
+ do_spec_1 (" ", 0, NULL_PTR);
+ }
+ break;
+
+ /* Dump out the options accumulated previously using -Wa,. */
+ case 'Y':
+ for (i = 0; i < n_assembler_options; i++)
+ {
+ do_spec_1 (assembler_options[i], 1, NULL_PTR);
+ /* Make each accumulated option a separate argument. */
+ do_spec_1 (" ", 0, NULL_PTR);
+ }
+ break;
+
+ /* Here are digits and numbers that just process
+ a certain constant string as a spec. */
+
+ case '1':
+ value = do_spec_1 (cc1_spec, 0, NULL_PTR);
+ if (value != 0)
+ return value;
+ break;
+
+ case '2':
+ value = do_spec_1 (cc1plus_spec, 0, NULL_PTR);
+ if (value != 0)
+ return value;
+ break;
+
+ case 'a':
+ value = do_spec_1 (asm_spec, 0, NULL_PTR);
+ if (value != 0)
+ return value;
+ break;
+
+ case 'A':
+ value = do_spec_1 (asm_final_spec, 0, NULL_PTR);
+ if (value != 0)
+ return value;
+ break;
+
+ case 'c':
+ value = do_spec_1 (signed_char_spec, 0, NULL_PTR);
+ if (value != 0)
+ return value;
+ break;
+
+ case 'C':
+ value = do_spec_1 (cpp_spec, 0, NULL_PTR);
+ if (value != 0)
+ return value;
+ break;
+
+ case 'E':
+ value = do_spec_1 (endfile_spec, 0, NULL_PTR);
+ if (value != 0)
+ return value;
+ break;
+
+ case 'l':
+ value = do_spec_1 (link_spec, 0, NULL_PTR);
+ if (value != 0)
+ return value;
+ break;
+
+ case 'L':
+ value = do_spec_1 (lib_spec, 0, NULL_PTR);
+ if (value != 0)
+ return value;
+ break;
+
+ case 'p':
+ {
+ char *x = (char *) alloca (strlen (cpp_predefines) + 1);
+ char *buf = x;
+ char *y;
+
+ /* Copy all of the -D options in CPP_PREDEFINES into BUF. */
+ y = cpp_predefines;
+ while (*y != 0)
+ {
+ if (! strncmp (y, "-D", 2))
+ /* Copy the whole option. */
+ while (*y && *y != ' ' && *y != '\t')
+ *x++ = *y++;
+ else if (*y == ' ' || *y == '\t')
+ /* Copy whitespace to the result. */
+ *x++ = *y++;
+ /* Don't copy other options. */
+ else
+ y++;
+ }
+
+ *x = 0;
+
+ value = do_spec_1 (buf, 0, NULL_PTR);
+ if (value != 0)
+ return value;
+ }
+ break;
+
+ case 'P':
+ {
+ char *x = (char *) alloca (strlen (cpp_predefines) * 4 + 1);
+ char *buf = x;
+ char *y;
+
+ /* Copy all of CPP_PREDEFINES into BUF,
+ but put __ after every -D and at the end of each arg. */
+ y = cpp_predefines;
+ while (*y != 0)
+ {
+ if (! strncmp (y, "-D", 2))
+ {
+ int flag = 0;
+
+ *x++ = *y++;
+ *x++ = *y++;
+
+ if (*y != '_'
+ || (*(y+1) != '_' && ! isupper (*(y+1))))
+ {
+ /* Stick __ at front of macro name. */
+ *x++ = '_';
+ *x++ = '_';
+ /* Arrange to stick __ at the end as well. */
+ flag = 1;
+ }
+
+ /* Copy the macro name. */
+ while (*y && *y != '=' && *y != ' ' && *y != '\t')
+ *x++ = *y++;
+
+ if (flag)
+ {
+ *x++ = '_';
+ *x++ = '_';
+ }
+
+ /* Copy the value given, if any. */
+ while (*y && *y != ' ' && *y != '\t')
+ *x++ = *y++;
+ }
+ else if (*y == ' ' || *y == '\t')
+ /* Copy whitespace to the result. */
+ *x++ = *y++;
+ /* Don't copy -A options */
+ else
+ y++;
+ }
+ *x++ = ' ';
+
+ /* Copy all of CPP_PREDEFINES into BUF,
+ but put __ after every -D. */
+ y = cpp_predefines;
+ while (*y != 0)
+ {
+ if (! strncmp (y, "-D", 2))
+ {
+ y += 2;
+
+ if (*y != '_'
+ || (*(y+1) != '_' && ! isupper (*(y+1))))
+ {
+ /* Stick -D__ at front of macro name. */
+ *x++ = '-';
+ *x++ = 'D';
+ *x++ = '_';
+ *x++ = '_';
+
+ /* Copy the macro name. */
+ while (*y && *y != '=' && *y != ' ' && *y != '\t')
+ *x++ = *y++;
+
+ /* Copy the value given, if any. */
+ while (*y && *y != ' ' && *y != '\t')
+ *x++ = *y++;
+ }
+ else
+ {
+ /* Do not copy this macro - we have just done it before */
+ while (*y && *y != ' ' && *y != '\t')
+ y++;
+ }
+ }
+ else if (*y == ' ' || *y == '\t')
+ /* Copy whitespace to the result. */
+ *x++ = *y++;
+ /* Don't copy -A options */
+ else
+ y++;
+ }
+ *x++ = ' ';
+
+ /* Copy all of the -A options in CPP_PREDEFINES into BUF. */
+ y = cpp_predefines;
+ while (*y != 0)
+ {
+ if (! strncmp (y, "-A", 2))
+ /* Copy the whole option. */
+ while (*y && *y != ' ' && *y != '\t')
+ *x++ = *y++;
+ else if (*y == ' ' || *y == '\t')
+ /* Copy whitespace to the result. */
+ *x++ = *y++;
+ /* Don't copy other options. */
+ else
+ y++;
+ }
+
+ *x = 0;
+
+ value = do_spec_1 (buf, 0, NULL_PTR);
+ if (value != 0)
+ return value;
+ }
+ break;
+
+ case 'S':
+ value = do_spec_1 (startfile_spec, 0, NULL_PTR);
+ if (value != 0)
+ return value;
+ break;
+
+ /* Here we define characters other than letters and digits. */
+
+ case '{':
+ p = handle_braces (p);
+ if (p == 0)
+ return -1;
+ break;
+
+ case '%':
+ obstack_1grow (&obstack, '%');
+ break;
+
+ case '*':
+ do_spec_1 (soft_matched_part, 1, NULL_PTR);
+ do_spec_1 (" ", 0, NULL_PTR);
+ break;
+
+ /* Process a string found as the value of a spec given by name.
+ This feature allows individual machine descriptions
+ to add and use their own specs.
+ %[...] modifies -D options the way %P does;
+ %(...) uses the spec unmodified. */
+ case '(':
+ case '[':
+ {
+ char *name = p;
+ struct spec_list *sl;
+ int len;
+
+ /* The string after the S/P is the name of a spec that is to be
+ processed. */
+ while (*p && *p != ')' && *p != ']')
+ p++;
+
+ /* See if it's in the list */
+ for (len = p - name, sl = specs; sl; sl = sl->next)
+ if (strncmp (sl->name, name, len) == 0 && !sl->name[len])
+ {
+ name = sl->spec;
+ break;
+ }
+
+ if (sl)
+ {
+ if (c == '(')
+ {
+ value = do_spec_1 (name, 0, NULL_PTR);
+ if (value != 0)
+ return value;
+ }
+ else
+ {
+ char *x = (char *) alloca (strlen (name) * 2 + 1);
+ char *buf = x;
+ char *y = name;
+
+ /* Copy all of NAME into BUF, but put __ after
+ every -D and at the end of each arg, */
+ while (1)
+ {
+ if (! strncmp (y, "-D", 2))
+ {
+ *x++ = '-';
+ *x++ = 'D';
+ *x++ = '_';
+ *x++ = '_';
+ y += 2;
+ }
+ else if (*y == ' ' || *y == 0)
+ {
+ *x++ = '_';
+ *x++ = '_';
+ if (*y == 0)
+ break;
+ else
+ *x++ = *y++;
+ }
+ else
+ *x++ = *y++;
+ }
+ *x = 0;
+
+ value = do_spec_1 (buf, 0, NULL_PTR);
+ if (value != 0)
+ return value;
+ }
+ }
+
+ /* Discard the closing paren or bracket. */
+ if (*p)
+ p++;
+ }
+ break;
+
+ case 'v':
+ {
+ int c1 = *p++; /* Select first or second version number. */
+ char *v = compiler_version;
+ char *q, *copy;
+ /* If desired, advance to second version number. */
+ if (c1 == '2')
+ {
+ /* Set P after the first period. */
+ while (*v != 0 && *v != ' ' && *v != '.')
+ v++;
+ if (*v == '.')
+ v++;
+ }
+ /* Set Q at the next period or at the end. */
+ q = v;
+ while (*q != 0 && *q != ' ' && *q != '.')
+ q++;
+ /* Empty string means zero. */
+ if (p == q)
+ {
+ v = "0";
+ q = v + 1;
+ }
+ /* Put that part into the command. */
+ obstack_grow (&obstack, v, q - v);
+ arg_going = 1;
+ }
+ break;
+
+ case '|':
+ if (input_from_pipe)
+ do_spec_1 ("-", 0, NULL_PTR);
+ break;
+
+ default:
+ abort ();
+ }
+ break;
+
+ case '\\':
+ /* Backslash: treat next character as ordinary. */
+ c = *p++;
+
+ /* fall through */
+ default:
+ /* Ordinary character: put it into the current argument. */
+ obstack_1grow (&obstack, c);
+ arg_going = 1;
+ }
+
+ return 0; /* End of string */
+}
+
+/* Return 0 if we call do_spec_1 and that returns -1. */
+
+static char *
+handle_braces (p)
+ register char *p;
+{
+ register char *q;
+ char *filter;
+ int pipe = 0;
+ int negate = 0;
+ int suffix = 0;
+
+ if (*p == '|')
+ /* A `|' after the open-brace means,
+ if the test fails, output a single minus sign rather than nothing.
+ This is used in %{|!pipe:...}. */
+ pipe = 1, ++p;
+
+ if (*p == '!')
+ /* A `!' after the open-brace negates the condition:
+ succeed if the specified switch is not present. */
+ negate = 1, ++p;
+
+ if (*p == '.')
+ /* A `.' after the open-brace means test against the current suffix. */
+ {
+ if (pipe)
+ abort ();
+
+ suffix = 1;
+ ++p;
+ }
+
+ filter = p;
+ while (*p != ':' && *p != '}') p++;
+ if (*p != '}')
+ {
+ register int count = 1;
+ q = p + 1;
+ while (count > 0)
+ {
+ if (*q == '{')
+ count++;
+ else if (*q == '}')
+ count--;
+ else if (*q == 0)
+ abort ();
+ q++;
+ }
+ }
+ else
+ q = p + 1;
+
+ if (suffix)
+ {
+ int found = (input_suffix != 0
+ && strlen (input_suffix) == p - filter
+ && strncmp (input_suffix, filter, p - filter) == 0);
+
+ if (p[0] == '}')
+ abort ();
+
+ if (negate != found
+ && do_spec_1 (save_string (p + 1, q - p - 2), 0, NULL_PTR) < 0)
+ return 0;
+
+ return q;
+ }
+ else if (p[-1] == '*' && p[0] == '}')
+ {
+ /* Substitute all matching switches as separate args. */
+ register int i;
+ --p;
+ for (i = 0; i < n_switches; i++)
+ if (!strncmp (switches[i].part1, filter, p - filter)
+ && check_live_switch (i, p - filter))
+ give_switch (i, 0);
+ }
+ else
+ {
+ /* Test for presence of the specified switch. */
+ register int i;
+ int present = 0;
+
+ /* If name specified ends in *, as in {x*:...},
+ check for %* and handle that case. */
+ if (p[-1] == '*' && !negate)
+ {
+ int substitution;
+ char *r = p;
+
+ /* First see whether we have %*. */
+ substitution = 0;
+ while (r < q)
+ {
+ if (*r == '%' && r[1] == '*')
+ substitution = 1;
+ r++;
+ }
+ /* If we do, handle that case. */
+ if (substitution)
+ {
+ /* Substitute all matching switches as separate args.
+ But do this by substituting for %*
+ in the text that follows the colon. */
+
+ unsigned hard_match_len = p - filter - 1;
+ char *string = save_string (p + 1, q - p - 2);
+
+ for (i = 0; i < n_switches; i++)
+ if (!strncmp (switches[i].part1, filter, hard_match_len)
+ && check_live_switch (i, -1))
+ {
+ do_spec_1 (string, 0, &switches[i].part1[hard_match_len]);
+ /* Pass any arguments this switch has. */
+ give_switch (i, 1);
+ }
+
+ return q;
+ }
+ }
+
+ /* If name specified ends in *, as in {x*:...},
+ check for presence of any switch name starting with x. */
+ if (p[-1] == '*')
+ {
+ for (i = 0; i < n_switches; i++)
+ {
+ unsigned hard_match_len = p - filter - 1;
+
+ if (!strncmp (switches[i].part1, filter, hard_match_len)
+ && check_live_switch (i, hard_match_len))
+ {
+ present = 1;
+ }
+ }
+ }
+ /* Otherwise, check for presence of exact name specified. */
+ else
+ {
+ for (i = 0; i < n_switches; i++)
+ {
+ if (!strncmp (switches[i].part1, filter, p - filter)
+ && switches[i].part1[p - filter] == 0
+ && check_live_switch (i, -1))
+ {
+ present = 1;
+ break;
+ }
+ }
+ }
+
+ /* If it is as desired (present for %{s...}, absent for %{-s...})
+ then substitute either the switch or the specified
+ conditional text. */
+ if (present != negate)
+ {
+ if (*p == '}')
+ {
+ give_switch (i, 0);
+ }
+ else
+ {
+ if (do_spec_1 (save_string (p + 1, q - p - 2), 0, NULL_PTR) < 0)
+ return 0;
+ }
+ }
+ else if (pipe)
+ {
+ /* Here if a %{|...} conditional fails: output a minus sign,
+ which means "standard output" or "standard input". */
+ do_spec_1 ("-", 0, NULL_PTR);
+ }
+ }
+
+ return q;
+}
+
+/* Return 0 iff switch number SWITCHNUM is obsoleted by a later switch
+ on the command line. PREFIX_LENGTH is the length of XXX in an {XXX*}
+ spec, or -1 if either exact match or %* is used.
+
+ A -O switch is obsoleted by a later -O switch. A -f, -m, or -W switch
+ whose value does not begin with "no-" is obsoleted by the same value
+ with the "no-", similarly for a switch with the "no-" prefix. */
+
+static int
+check_live_switch (switchnum, prefix_length)
+ int switchnum;
+ int prefix_length;
+{
+ char *name = switches[switchnum].part1;
+ int i;
+
+ /* In the common case of {<at-most-one-letter>*}, a negating
+ switch would always match, so ignore that case. We will just
+ send the conflicting switches to the compiler phase. */
+ if (prefix_length >= 0 && prefix_length <= 1)
+ return 1;
+
+ /* If we already processed this switch and determined if it was
+ live or not, return our past determination. */
+ if (switches[switchnum].live_cond != 0)
+ return switches[switchnum].live_cond > 0;
+
+ /* Now search for duplicate in a manner that depends on the name. */
+ switch (*name)
+ {
+ case 'O':
+ for (i = switchnum + 1; i < n_switches; i++)
+ if (switches[i].part1[0] == 'O')
+ {
+ switches[switchnum].valid = 1;
+ switches[switchnum].live_cond = -1;
+ return 0;
+ }
+ break;
+
+ case 'W': case 'f': case 'm':
+ if (! strncmp (name + 1, "no-", 3))
+ {
+ /* We have Xno-YYY, search for XYYY. */
+ for (i = switchnum + 1; i < n_switches; i++)
+ if (switches[i].part1[0] == name[0]
+ && ! strcmp (&switches[i].part1[1], &name[4]))
+ {
+ switches[switchnum].valid = 1;
+ switches[switchnum].live_cond = -1;
+ return 0;
+ }
+ }
+ else
+ {
+ /* We have XYYY, search for Xno-YYY. */
+ for (i = switchnum + 1; i < n_switches; i++)
+ if (switches[i].part1[0] == name[0]
+ && switches[i].part1[1] == 'n'
+ && switches[i].part1[2] == 'o'
+ && switches[i].part1[3] == '-'
+ && !strcmp (&switches[i].part1[4], &name[1]))
+ {
+ switches[switchnum].valid = 1;
+ switches[switchnum].live_cond = -1;
+ return 0;
+ }
+ }
+ break;
+ }
+
+ /* Otherwise the switch is live. */
+ switches[switchnum].live_cond = 1;
+ return 1;
+}
+
+/* Pass a switch to the current accumulating command
+ in the same form that we received it.
+ SWITCHNUM identifies the switch; it is an index into
+ the vector of switches gcc received, which is `switches'.
+ This cannot fail since it never finishes a command line.
+
+ If OMIT_FIRST_WORD is nonzero, then we omit .part1 of the argument. */
+
+static void
+give_switch (switchnum, omit_first_word)
+ int switchnum;
+ int omit_first_word;
+{
+ if (!omit_first_word)
+ {
+ do_spec_1 ("-", 0, NULL_PTR);
+ do_spec_1 (switches[switchnum].part1, 1, NULL_PTR);
+ }
+ do_spec_1 (" ", 0, NULL_PTR);
+ if (switches[switchnum].args != 0)
+ {
+ char **p;
+ for (p = switches[switchnum].args; *p; p++)
+ {
+ do_spec_1 (*p, 1, NULL_PTR);
+ do_spec_1 (" ", 0, NULL_PTR);
+ }
+ }
+ switches[switchnum].valid = 1;
+}
+
+/* Search for a file named NAME trying various prefixes including the
+ user's -B prefix and some standard ones.
+ Return the absolute file name found. If nothing is found, return NAME. */
+
+static char *
+find_file (name)
+ char *name;
+{
+ char *newname;
+
+ /* Try multilib_dir if it is defined. */
+ if (multilib_dir != NULL)
+ {
+ char *try;
+
+ try = (char *) alloca (strlen (multilib_dir) + strlen (name) + 2);
+ strcpy (try, multilib_dir);
+ strcat (try, "/");
+ strcat (try, name);
+
+ newname = find_a_file (&startfile_prefix, try, R_OK);
+
+ /* If we don't find it in the multi library dir, then fall
+ through and look for it in the normal places. */
+ if (newname != NULL)
+ return newname;
+ }
+
+ newname = find_a_file (&startfile_prefix, name, R_OK);
+ return newname ? newname : name;
+}
+
+/* Determine whether a directory exists. If LINKER, return 0 for
+ certain fixed names not needed by the linker. If not LINKER, it is
+ only important to return 0 if the host machine has a small ARG_MAX
+ limit. */
+
+static int
+is_directory (path1, path2, linker)
+ char *path1;
+ char *path2;
+ int linker;
+{
+ int len1 = strlen (path1);
+ int len2 = strlen (path2);
+ char *path = (char *) alloca (3 + len1 + len2);
+ char *cp;
+ struct stat st;
+
+#ifndef SMALL_ARG_MAX
+ if (! linker)
+ return 1;
+#endif
+
+ /* Construct the path from the two parts. Ensure the string ends with "/.".
+ The resulting path will be a directory even if the given path is a
+ symbolic link. */
+ bcopy (path1, path, len1);
+ bcopy (path2, path + len1, len2);
+ cp = path + len1 + len2;
+ if (cp[-1] != '/')
+ *cp++ = '/';
+ *cp++ = '.';
+ *cp = '\0';
+
+ /* Exclude directories that the linker is known to search. */
+ if (linker
+ && ((cp - path == 6 && strcmp (path, "/lib/.") == 0)
+ || (cp - path == 10 && strcmp (path, "/usr/lib/.") == 0)))
+ return 0;
+
+ return (stat (path, &st) >= 0 && S_ISDIR (st.st_mode));
+}
+
+/* On fatal signals, delete all the temporary files. */
+
+static void
+fatal_error (signum)
+ int signum;
+{
+ signal (signum, SIG_DFL);
+ delete_failure_queue ();
+ delete_temp_files ();
+ /* Get the same signal again, this time not handled,
+ so its normal effect occurs. */
+ kill (getpid (), signum);
+}
+
+int
+main (argc, argv)
+ int argc;
+ char **argv;
+{
+ register int i;
+ int j;
+ int value;
+ int linker_was_run = 0;
+ char *explicit_link_files;
+ char *specs_file;
+ char *p;
+
+ p = argv[0] + strlen (argv[0]);
+ while (p != argv[0] && p[-1] != '/') --p;
+ programname = p;
+
+ if (signal (SIGINT, SIG_IGN) != SIG_IGN)
+ signal (SIGINT, fatal_error);
+#ifdef SIGHUP
+ if (signal (SIGHUP, SIG_IGN) != SIG_IGN)
+ signal (SIGHUP, fatal_error);
+#endif
+ if (signal (SIGTERM, SIG_IGN) != SIG_IGN)
+ signal (SIGTERM, fatal_error);
+#ifdef SIGPIPE
+ if (signal (SIGPIPE, SIG_IGN) != SIG_IGN)
+ signal (SIGPIPE, fatal_error);
+#endif
+
+ argbuf_length = 10;
+ argbuf = (char **) xmalloc (argbuf_length * sizeof (char *));
+
+ obstack_init (&obstack);
+
+ /* Set up to remember the pathname of gcc and any options
+ needed for collect. We use argv[0] instead of programname because
+ we need the complete pathname. */
+ obstack_init (&collect_obstack);
+ obstack_grow (&collect_obstack, "COLLECT_GCC=", sizeof ("COLLECT_GCC=")-1);
+ obstack_grow (&collect_obstack, argv[0], strlen (argv[0])+1);
+ putenv (obstack_finish (&collect_obstack));
+
+ /* Choose directory for temp files. */
+
+ choose_temp_base ();
+
+ /* Make a table of what switches there are (switches, n_switches).
+ Make a table of specified input files (infiles, n_infiles).
+ Decode switches that are handled locally. */
+
+ process_command (argc, argv);
+
+ /* Initialize the vector of specs to just the default.
+ This means one element containing 0s, as a terminator. */
+
+ compilers = (struct compiler *) xmalloc (sizeof default_compilers);
+ bcopy ((char *) default_compilers, (char *) compilers,
+ sizeof default_compilers);
+ n_compilers = n_default_compilers;
+
+ /* Read specs from a file if there is one. */
+
+ machine_suffix = concat (spec_machine, "/", concat (spec_version, "/", ""));
+ just_machine_suffix = concat (spec_machine, "/", "");
+
+ specs_file = find_a_file (&startfile_prefix, "specs", R_OK);
+ /* Read the specs file unless it is a default one. */
+ if (specs_file != 0 && strcmp (specs_file, "specs"))
+ read_specs (specs_file);
+
+ /* If not cross-compiling, look for startfiles in the standard places. */
+ /* The fact that these are done here, after reading the specs file,
+ means that it cannot be found in these directories.
+ But that's okay. It should never be there anyway. */
+ if (!cross_compile)
+ {
+#ifdef MD_EXEC_PREFIX
+ add_prefix (&exec_prefix, md_exec_prefix, 0, 0, NULL_PTR);
+ add_prefix (&startfile_prefix, md_exec_prefix, 0, 0, NULL_PTR);
+#endif
+
+#ifdef MD_STARTFILE_PREFIX
+ add_prefix (&startfile_prefix, md_startfile_prefix, 0, 0, NULL_PTR);
+#endif
+
+#ifdef MD_STARTFILE_PREFIX_1
+ add_prefix (&startfile_prefix, md_startfile_prefix_1, 0, 0, NULL_PTR);
+#endif
+
+ /* If standard_startfile_prefix is relative, base it on
+ standard_exec_prefix. This lets us move the installed tree
+ as a unit. If GCC_EXEC_PREFIX is defined, base
+ standard_startfile_prefix on that as well. */
+ if (*standard_startfile_prefix == '/')
+ add_prefix (&startfile_prefix, standard_startfile_prefix, 0, 0,
+ NULL_PTR);
+ else
+ {
+ if (gcc_exec_prefix)
+ add_prefix (&startfile_prefix,
+ concat (gcc_exec_prefix,
+ standard_startfile_prefix,
+ ""),
+ 0, 0, NULL_PTR);
+ add_prefix (&startfile_prefix,
+ concat (standard_exec_prefix,
+ machine_suffix,
+ standard_startfile_prefix),
+ 0, 0, NULL_PTR);
+ }
+
+ add_prefix (&startfile_prefix, standard_startfile_prefix_1, 0, 0,
+ NULL_PTR);
+ add_prefix (&startfile_prefix, standard_startfile_prefix_2, 0, 0,
+ NULL_PTR);
+#if 0 /* Can cause surprises, and one can use -B./ instead. */
+ add_prefix (&startfile_prefix, "./", 0, 1, NULL_PTR);
+#endif
+ }
+
+ /* Now we have the specs.
+ Set the `valid' bits for switches that match anything in any spec. */
+
+ validate_all_switches ();
+
+ /* Now that we have the switches and the specs, set
+ the subdirectory based on the options. */
+ set_multilib_dir ();
+
+ /* Warn about any switches that no pass was interested in. */
+
+ for (i = 0; i < n_switches; i++)
+ if (! switches[i].valid)
+ error ("unrecognized option `-%s'", switches[i].part1);
+
+ /* Obey some of the options. */
+
+ if (print_file_name)
+ {
+ printf ("%s\n", find_file (print_file_name));
+ exit (0);
+ }
+
+ if (print_prog_name)
+ {
+ char *newname = find_a_file (&exec_prefix, print_prog_name, X_OK);
+ printf ("%s\n", (newname ? newname : print_prog_name));
+ exit (0);
+ }
+
+ if (print_multi_lib)
+ {
+ print_multilib_info ();
+ exit (0);
+ }
+
+ if (print_multi_directory)
+ {
+ if (multilib_dir == NULL)
+ printf (".\n");
+ else
+ printf ("%s\n", multilib_dir);
+ exit (0);
+ }
+
+ if (verbose_flag)
+ {
+ fprintf (stderr, "gcc version %s\n", version_string);
+ if (n_infiles == 0)
+ exit (0);
+ }
+
+ if (n_infiles == 0)
+ fatal ("No input files");
+
+ /* Make a place to record the compiler output file names
+ that correspond to the input files. */
+
+ outfiles = (char **) xmalloc (n_infiles * sizeof (char *));
+ bzero ((char *) outfiles, n_infiles * sizeof (char *));
+
+ /* Record which files were specified explicitly as link input. */
+
+ explicit_link_files = xmalloc (n_infiles);
+ bzero (explicit_link_files, n_infiles);
+
+ for (i = 0; i < n_infiles; i++)
+ {
+ register struct compiler *cp = 0;
+ int this_file_error = 0;
+
+ /* Tell do_spec what to substitute for %i. */
+
+ input_filename = infiles[i].name;
+ input_filename_length = strlen (input_filename);
+ input_file_number = i;
+
+ /* Use the same thing in %o, unless cp->spec says otherwise. */
+
+ outfiles[i] = input_filename;
+
+ /* Figure out which compiler from the file's suffix. */
+
+ cp = lookup_compiler (infiles[i].name, input_filename_length,
+ infiles[i].language);
+
+ if (cp)
+ {
+ /* Ok, we found an applicable compiler. Run its spec. */
+ /* First say how much of input_filename to substitute for %b */
+ register char *p;
+ int len;
+
+ input_basename = input_filename;
+ for (p = input_filename; *p; p++)
+ if (*p == '/')
+ input_basename = p + 1;
+
+ /* Find a suffix starting with the last period,
+ and set basename_length to exclude that suffix. */
+ basename_length = strlen (input_basename);
+ p = input_basename + basename_length;
+ while (p != input_basename && *p != '.') --p;
+ if (*p == '.' && p != input_basename)
+ {
+ basename_length = p - input_basename;
+ input_suffix = p + 1;
+ }
+ else
+ input_suffix = "";
+
+ len = 0;
+ for (j = 0; j < sizeof cp->spec / sizeof cp->spec[0]; j++)
+ if (cp->spec[j])
+ len += strlen (cp->spec[j]);
+
+ p = (char *) xmalloc (len + 1);
+
+ len = 0;
+ for (j = 0; j < sizeof cp->spec / sizeof cp->spec[0]; j++)
+ if (cp->spec[j])
+ {
+ strcpy (p + len, cp->spec[j]);
+ len += strlen (cp->spec[j]);
+ }
+
+ value = do_spec (p);
+ free (p);
+ if (value < 0)
+ this_file_error = 1;
+ }
+
+ /* If this file's name does not contain a recognized suffix,
+ record it as explicit linker input. */
+
+ else
+ explicit_link_files[i] = 1;
+
+ /* Clear the delete-on-failure queue, deleting the files in it
+ if this compilation failed. */
+
+ if (this_file_error)
+ {
+ delete_failure_queue ();
+ error_count++;
+ }
+ /* If this compilation succeeded, don't delete those files later. */
+ clear_failure_queue ();
+ }
+
+ /* Run ld to link all the compiler output files. */
+
+ if (error_count == 0)
+ {
+ int tmp = execution_count;
+ int i;
+ int first_time;
+
+ /* Rebuild the COMPILER_PATH and LIBRARY_PATH environment variables
+ for collect. */
+ putenv_from_prefixes (&exec_prefix, "COMPILER_PATH=");
+ putenv_from_prefixes (&startfile_prefix, "LIBRARY_PATH=");
+
+ /* Build COLLECT_GCC_OPTIONS to have all of the options specified to
+ the compiler. */
+ obstack_grow (&collect_obstack, "COLLECT_GCC_OPTIONS=",
+ sizeof ("COLLECT_GCC_OPTIONS=")-1);
+
+ first_time = TRUE;
+ for (i = 0; i < n_switches; i++)
+ {
+ char **args;
+ if (!first_time)
+ obstack_grow (&collect_obstack, " ", 1);
+
+ first_time = FALSE;
+ obstack_grow (&collect_obstack, "-", 1);
+ obstack_grow (&collect_obstack, switches[i].part1,
+ strlen (switches[i].part1));
+
+ for (args = switches[i].args; args && *args; args++)
+ {
+ obstack_grow (&collect_obstack, " ", 1);
+ obstack_grow (&collect_obstack, *args, strlen (*args));
+ }
+ }
+ obstack_grow (&collect_obstack, "\0", 1);
+ putenv (obstack_finish (&collect_obstack));
+
+ value = do_spec (link_command_spec);
+ if (value < 0)
+ error_count = 1;
+ linker_was_run = (tmp != execution_count);
+ }
+
+ /* Warn if a -B option was specified but the prefix was never used. */
+ unused_prefix_warnings (&exec_prefix);
+ unused_prefix_warnings (&startfile_prefix);
+
+ /* If options said don't run linker,
+ complain about input files to be given to the linker. */
+
+ if (! linker_was_run && error_count == 0)
+ for (i = 0; i < n_infiles; i++)
+ if (explicit_link_files[i])
+ error ("%s: linker input file unused since linking not done",
+ outfiles[i]);
+
+ /* Delete some or all of the temporary files we made. */
+
+ if (error_count)
+ delete_failure_queue ();
+ delete_temp_files ();
+
+ exit (error_count > 0 ? (signal_count ? 2 : 1) : 0);
+ /* NOTREACHED */
+ return 0;
+}
+
+/* Find the proper compilation spec for the file name NAME,
+ whose length is LENGTH. LANGUAGE is the specified language,
+ or 0 if none specified. */
+
+static struct compiler *
+lookup_compiler (name, length, language)
+ char *name;
+ int length;
+ char *language;
+{
+ struct compiler *cp;
+
+ /* Look for the language, if one is spec'd. */
+ if (language != 0)
+ {
+ for (cp = compilers + n_compilers - 1; cp >= compilers; cp--)
+ {
+ if (language != 0)
+ {
+ if (cp->suffix[0] == '@'
+ && !strcmp (cp->suffix + 1, language))
+ return cp;
+ }
+ }
+ error ("language %s not recognized", language);
+ }
+
+ /* Look for a suffix. */
+ for (cp = compilers + n_compilers - 1; cp >= compilers; cp--)
+ {
+ if (/* The suffix `-' matches only the file name `-'. */
+ (!strcmp (cp->suffix, "-") && !strcmp (name, "-"))
+ ||
+ (strlen (cp->suffix) < length
+ /* See if the suffix matches the end of NAME. */
+ && !strcmp (cp->suffix,
+ name + length - strlen (cp->suffix))))
+ {
+ if (cp->spec[0][0] == '@')
+ {
+ struct compiler *new;
+ /* An alias entry maps a suffix to a language.
+ Search for the language; pass 0 for NAME and LENGTH
+ to avoid infinite recursion if language not found.
+ Construct the new compiler spec. */
+ language = cp->spec[0] + 1;
+ new = (struct compiler *) xmalloc (sizeof (struct compiler));
+ new->suffix = cp->suffix;
+ bcopy ((char *) lookup_compiler (NULL_PTR, 0, language)->spec,
+ (char *) new->spec, sizeof new->spec);
+ return new;
+ }
+ /* A non-alias entry: return it. */
+ return cp;
+ }
+ }
+
+ return 0;
+}
+
+char *
+xmalloc (size)
+ unsigned size;
+{
+ register char *value = (char *) malloc (size);
+ if (value == 0)
+ fatal ("virtual memory exhausted");
+ return value;
+}
+
+char *
+xrealloc (ptr, size)
+ char *ptr;
+ unsigned size;
+{
+ register char *value = (char *) realloc (ptr, size);
+ if (value == 0)
+ fatal ("virtual memory exhausted");
+ return value;
+}
+
+/* Return a newly-allocated string whose contents concatenate those of s1, s2, s3. */
+
+static char *
+concat (s1, s2, s3)
+ char *s1, *s2, *s3;
+{
+ int len1 = strlen (s1), len2 = strlen (s2), len3 = strlen (s3);
+ char *result = xmalloc (len1 + len2 + len3 + 1);
+
+ strcpy (result, s1);
+ strcpy (result + len1, s2);
+ strcpy (result + len1 + len2, s3);
+ *(result + len1 + len2 + len3) = 0;
+
+ return result;
+}
+
+static char *
+save_string (s, len)
+ char *s;
+ int len;
+{
+ register char *result = xmalloc (len + 1);
+
+ bcopy (s, result, len);
+ result[len] = 0;
+ return result;
+}
+
+static void
+pfatal_with_name (name)
+ char *name;
+{
+ char *s;
+
+ if (errno < sys_nerr)
+ s = concat ("%s: ", sys_errlist[errno], "");
+ else
+ s = "cannot open %s";
+ fatal (s, name);
+}
+
+static void
+perror_with_name (name)
+ char *name;
+{
+ char *s;
+
+ if (errno < sys_nerr)
+ s = concat ("%s: ", sys_errlist[errno], "");
+ else
+ s = "cannot open %s";
+ error (s, name);
+}
+
+static void
+perror_exec (name)
+ char *name;
+{
+ char *s;
+
+ if (errno < sys_nerr)
+ s = concat ("installation problem, cannot exec %s: ",
+ sys_errlist[errno], "");
+ else
+ s = "installation problem, cannot exec %s";
+ error (s, name);
+}
+
+/* More 'friendly' abort that prints the line and file.
+ config.h can #define abort fancy_abort if you like that sort of thing. */
+
+void
+fancy_abort ()
+{
+ fatal ("Internal gcc abort.");
+}
+
+#ifdef HAVE_VPRINTF
+
+/* Output an error message and exit */
+
+static void
+fatal VPROTO((char *format, ...))
+{
+#ifndef __STDC__
+ char *format;
+#endif
+ va_list ap;
+
+ VA_START (ap, format);
+
+#ifndef __STDC__
+ format = va_arg (ap, char*);
+#endif
+
+ fprintf (stderr, "%s: ", programname);
+ vfprintf (stderr, format, ap);
+ va_end (ap);
+ fprintf (stderr, "\n");
+ delete_temp_files ();
+ exit (1);
+}
+
+static void
+error VPROTO((char *format, ...))
+{
+#ifndef __STDC__
+ char *format;
+#endif
+ va_list ap;
+
+ VA_START (ap, format);
+
+#ifndef __STDC__
+ format = va_arg (ap, char*);
+#endif
+
+ fprintf (stderr, "%s: ", programname);
+ vfprintf (stderr, format, ap);
+ va_end (ap);
+
+ fprintf (stderr, "\n");
+}
+
+#else /* not HAVE_VPRINTF */
+
+static void
+fatal (msg, arg1, arg2)
+ char *msg, *arg1, *arg2;
+{
+ error (msg, arg1, arg2);
+ delete_temp_files ();
+ exit (1);
+}
+
+static void
+error (msg, arg1, arg2)
+ char *msg, *arg1, *arg2;
+{
+ fprintf (stderr, "%s: ", programname);
+ fprintf (stderr, msg, arg1, arg2);
+ fprintf (stderr, "\n");
+}
+
+#endif /* not HAVE_VPRINTF */
+
+
+static void
+validate_all_switches ()
+{
+ struct compiler *comp;
+ register char *p;
+ register char c;
+ struct spec_list *spec;
+
+ for (comp = compilers; comp->spec[0]; comp++)
+ {
+ int i;
+ for (i = 0; i < sizeof comp->spec / sizeof comp->spec[0] && comp->spec[i]; i++)
+ {
+ p = comp->spec[i];
+ while (c = *p++)
+ if (c == '%' && *p == '{')
+ /* We have a switch spec. */
+ validate_switches (p + 1);
+ }
+ }
+
+ /* look through the linked list of extra specs read from the specs file */
+ for (spec = specs; spec ; spec = spec->next)
+ {
+ p = spec->spec;
+ while (c = *p++)
+ if (c == '%' && *p == '{')
+ /* We have a switch spec. */
+ validate_switches (p + 1);
+ }
+
+ p = link_command_spec;
+ while (c = *p++)
+ if (c == '%' && *p == '{')
+ /* We have a switch spec. */
+ validate_switches (p + 1);
+
+ /* Now notice switches mentioned in the machine-specific specs. */
+
+ p = asm_spec;
+ while (c = *p++)
+ if (c == '%' && *p == '{')
+ /* We have a switch spec. */
+ validate_switches (p + 1);
+
+ p = asm_final_spec;
+ while (c = *p++)
+ if (c == '%' && *p == '{')
+ /* We have a switch spec. */
+ validate_switches (p + 1);
+
+ p = cpp_spec;
+ while (c = *p++)
+ if (c == '%' && *p == '{')
+ /* We have a switch spec. */
+ validate_switches (p + 1);
+
+ p = signed_char_spec;
+ while (c = *p++)
+ if (c == '%' && *p == '{')
+ /* We have a switch spec. */
+ validate_switches (p + 1);
+
+ p = cc1_spec;
+ while (c = *p++)
+ if (c == '%' && *p == '{')
+ /* We have a switch spec. */
+ validate_switches (p + 1);
+
+ p = cc1plus_spec;
+ while (c = *p++)
+ if (c == '%' && *p == '{')
+ /* We have a switch spec. */
+ validate_switches (p + 1);
+
+ p = link_spec;
+ while (c = *p++)
+ if (c == '%' && *p == '{')
+ /* We have a switch spec. */
+ validate_switches (p + 1);
+
+ p = lib_spec;
+ while (c = *p++)
+ if (c == '%' && *p == '{')
+ /* We have a switch spec. */
+ validate_switches (p + 1);
+
+ p = startfile_spec;
+ while (c = *p++)
+ if (c == '%' && *p == '{')
+ /* We have a switch spec. */
+ validate_switches (p + 1);
+}
+
+/* Look at the switch-name that comes after START
+ and mark as valid all supplied switches that match it. */
+
+static void
+validate_switches (start)
+ char *start;
+{
+ register char *p = start;
+ char *filter;
+ register int i;
+ int suffix = 0;
+
+ if (*p == '|')
+ ++p;
+
+ if (*p == '!')
+ ++p;
+
+ if (*p == '.')
+ suffix = 1, ++p;
+
+ filter = p;
+ while (*p != ':' && *p != '}') p++;
+
+ if (suffix)
+ ;
+ else if (p[-1] == '*')
+ {
+ /* Mark all matching switches as valid. */
+ --p;
+ for (i = 0; i < n_switches; i++)
+ if (!strncmp (switches[i].part1, filter, p - filter))
+ switches[i].valid = 1;
+ }
+ else
+ {
+ /* Mark an exact matching switch as valid. */
+ for (i = 0; i < n_switches; i++)
+ {
+ if (!strncmp (switches[i].part1, filter, p - filter)
+ && switches[i].part1[p - filter] == 0)
+ switches[i].valid = 1;
+ }
+ }
+}
+
+/* Check whether a particular argument was used. */
+
+static int
+used_arg (p, len)
+ char *p;
+ int len;
+{
+ int i;
+
+ for (i = 0; i < n_switches; i++)
+ if (! strncmp (switches[i].part1, p, len)
+ && strlen (switches[i].part1) == len)
+ return 1;
+ return 0;
+}
+
+/* Work out the subdirectory to use based on the
+ options. The format of multilib_select is a list of elements.
+ Each element is a subdirectory name followed by a list of options
+ followed by a semicolon. gcc will consider each line in turn. If
+ none of the options beginning with an exclamation point are
+ present, and all of the other options are present, that
+ subdirectory will be used. */
+
+static void
+set_multilib_dir ()
+{
+ char *p = multilib_select;
+ int this_path_len;
+ char *this_path, *this_arg;
+ int failed;
+
+ while (*p != '\0')
+ {
+ /* Ignore newlines. */
+ if (*p == '\n')
+ {
+ ++p;
+ continue;
+ }
+
+ /* Get the initial path. */
+ this_path = p;
+ while (*p != ' ')
+ {
+ if (*p == '\0')
+ abort ();
+ ++p;
+ }
+ this_path_len = p - this_path;
+
+ /* Check the arguments. */
+ failed = 0;
+ ++p;
+ while (*p != ';')
+ {
+ if (*p == '\0')
+ abort ();
+
+ if (failed)
+ {
+ ++p;
+ continue;
+ }
+
+ this_arg = p;
+ while (*p != ' ' && *p != ';')
+ {
+ if (*p == '\0')
+ abort ();
+ ++p;
+ }
+
+ if (*this_arg == '!')
+ failed = used_arg (this_arg + 1, p - (this_arg + 1));
+ else
+ failed = ! used_arg (this_arg, p - this_arg);
+
+ if (*p == ' ')
+ ++p;
+ }
+
+ if (! failed)
+ {
+ if (this_path_len != 1
+ || this_path[0] != '.')
+ {
+ multilib_dir = xmalloc (this_path_len + 1);
+ strncpy (multilib_dir, this_path, this_path_len);
+ multilib_dir[this_path_len] = '\0';
+ }
+ break;
+ }
+
+ ++p;
+ }
+}
+
+/* Print out the multiple library subdirectory selection
+ information. This prints out a series of lines. Each line looks
+ like SUBDIRECTORY;@OPTION@OPTION, with as many options as is
+ required. Only the desired options are printed out, the negative
+ matches. The options are print without a leading dash. There are
+ no spaces to make it easy to use the information in the shell.
+ Each subdirectory is printed only once. This assumes the ordering
+ generated by the genmultilib script. */
+
+static void
+print_multilib_info ()
+{
+ char *p = multilib_select;
+ char *last_path, *this_path;
+ int last_path_len, skip, use_arg;
+
+ while (*p != '\0')
+ {
+ /* Ignore newlines. */
+ if (*p == '\n')
+ {
+ ++p;
+ continue;
+ }
+
+ /* Get the initial path. */
+ this_path = p;
+ while (*p != ' ')
+ {
+ if (*p == '\0')
+ abort ();
+ ++p;
+ }
+
+ /* If this is a duplicate, skip it. */
+ skip = (p - this_path == last_path_len
+ && ! strncmp (last_path, this_path, last_path_len));
+
+ last_path = this_path;
+ last_path_len = p - this_path;
+
+ if (! skip)
+ {
+ char *p1;
+
+ for (p1 = last_path; p1 < p; p1++)
+ putchar (*p1);
+ putchar (';');
+ }
+
+ ++p;
+ while (*p != ';')
+ {
+ int use_arg;
+
+ if (*p == '\0')
+ abort ();
+
+ if (skip)
+ {
+ ++p;
+ continue;
+ }
+
+ use_arg = *p != '!';
+
+ if (use_arg)
+ putchar ('@');
+
+ while (*p != ' ' && *p != ';')
+ {
+ if (*p == '\0')
+ abort ();
+ if (use_arg)
+ putchar (*p);
+ ++p;
+ }
+
+ if (*p == ' ')
+ ++p;
+ }
+
+ if (! skip)
+ putchar ('\n');
+
+ ++p;
+ }
+}
diff --git a/gnu/usr.bin/cc/cc1/Makefile b/gnu/usr.bin/cc/cc1/Makefile
new file mode 100644
index 0000000..667e454
--- /dev/null
+++ b/gnu/usr.bin/cc/cc1/Makefile
@@ -0,0 +1,13 @@
+#
+# $FreeBSD$
+#
+
+PROG = cc1
+SRCS = c-aux-info.c c-convert.c c-decl.c c-iterate.c c-lang.c c-lex.c c-parse.c c-pragma.c c-typeck.c
+BINDIR= /usr/libexec
+NOMAN= 1
+LDDESTDIR+= -L${.CURDIR}/../cc_int/obj
+LDDESTDIR+= -L${.CURDIR}/../cc_int
+LDADD+= -lcc_int
+
+.include <bsd.prog.mk>
diff --git a/gnu/usr.bin/cc/cc1/c-aux-info.c b/gnu/usr.bin/cc/cc1/c-aux-info.c
new file mode 100644
index 0000000..0e7df9b
--- /dev/null
+++ b/gnu/usr.bin/cc/cc1/c-aux-info.c
@@ -0,0 +1,639 @@
+/* Generate information regarding function declarations and definitions based
+ on information stored in GCC's tree structure. This code implements the
+ -aux-info option.
+ Copyright (C) 1989, 1991, 1994 Free Software Foundation, Inc.
+ Contributed by Ron Guilmette (rfg@netcom.com).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+#include <stdio.h>
+#include "config.h"
+#include "flags.h"
+#include "tree.h"
+#include "c-tree.h"
+
+extern char* xmalloc ();
+
+enum formals_style_enum {
+ ansi,
+ k_and_r_names,
+ k_and_r_decls
+};
+typedef enum formals_style_enum formals_style;
+
+
+static char* data_type;
+
+static char * concat ();
+static char * concat3 ();
+static char * gen_formal_list_for_type ();
+static int deserves_ellipsis ();
+static char * gen_formal_list_for_func_def ();
+static char * gen_type ();
+static char * gen_decl ();
+void gen_aux_info_record ();
+
+/* Take two strings and mash them together into a newly allocated area. */
+
+static char*
+concat (s1, s2)
+ char* s1;
+ char* s2;
+{
+ int size1, size2;
+ char* ret_val;
+
+ if (!s1)
+ s1 = "";
+ if (!s2)
+ s2 = "";
+
+ size1 = strlen (s1);
+ size2 = strlen (s2);
+ ret_val = xmalloc (size1 + size2 + 1);
+ strcpy (ret_val, s1);
+ strcpy (&ret_val[size1], s2);
+ return ret_val;
+}
+
+/* Take three strings and mash them together into a newly allocated area. */
+
+static char*
+concat3 (s1, s2, s3)
+ char* s1;
+ char* s2;
+ char* s3;
+{
+ int size1, size2, size3;
+ char* ret_val;
+
+ if (!s1)
+ s1 = "";
+ if (!s2)
+ s2 = "";
+ if (!s3)
+ s3 = "";
+
+ size1 = strlen (s1);
+ size2 = strlen (s2);
+ size3 = strlen (s3);
+ ret_val = xmalloc (size1 + size2 + size3 + 1);
+ strcpy (ret_val, s1);
+ strcpy (&ret_val[size1], s2);
+ strcpy (&ret_val[size1+size2], s3);
+ return ret_val;
+}
+
+/* Given a string representing an entire type or an entire declaration
+ which only lacks the actual "data-type" specifier (at its left end),
+ affix the data-type specifier to the left end of the given type
+ specification or object declaration.
+
+ Because of C language weirdness, the data-type specifier (which normally
+ goes in at the very left end) may have to be slipped in just to the
+ right of any leading "const" or "volatile" qualifiers (there may be more
+ than one). Actually this may not be strictly necessary because it seems
+ that GCC (at least) accepts `<data-type> const foo;' and treats it the
+ same as `const <data-type> foo;' but people are accustomed to seeing
+ `const char *foo;' and *not* `char const *foo;' so we try to create types
+ that look as expected. */
+
+static char*
+affix_data_type (type_or_decl)
+ char *type_or_decl;
+{
+ char *p = type_or_decl;
+ char *qualifiers_then_data_type;
+ char saved;
+
+ /* Skip as many leading const's or volatile's as there are. */
+
+ for (;;)
+ {
+ if (!strncmp (p, "volatile ", 9))
+ {
+ p += 9;
+ continue;
+ }
+ if (!strncmp (p, "const ", 6))
+ {
+ p += 6;
+ continue;
+ }
+ break;
+ }
+
+ /* p now points to the place where we can insert the data type. We have to
+ add a blank after the data-type of course. */
+
+ if (p == type_or_decl)
+ return concat3 (data_type, " ", type_or_decl);
+
+ saved = *p;
+ *p = '\0';
+ qualifiers_then_data_type = concat (type_or_decl, data_type);
+ *p = saved;
+ return concat3 (qualifiers_then_data_type, " ", p);
+}
+
+/* Given a tree node which represents some "function type", generate the
+ source code version of a formal parameter list (of some given style) for
+ this function type. Return the whole formal parameter list (including
+ a pair of surrounding parens) as a string. Note that if the style
+ we are currently aiming for is non-ansi, then we just return a pair
+ of empty parens here. */
+
+static char*
+gen_formal_list_for_type (fntype, style)
+ tree fntype;
+ formals_style style;
+{
+ char* formal_list = "";
+ tree formal_type;
+
+ if (style != ansi)
+ return "()";
+
+ formal_type = TYPE_ARG_TYPES (fntype);
+ while (formal_type && TREE_VALUE (formal_type) != void_type_node)
+ {
+ char* this_type;
+
+ if (*formal_list)
+ formal_list = concat (formal_list, ", ");
+
+ this_type = gen_type ("", TREE_VALUE (formal_type), ansi);
+ formal_list =
+ (strlen (this_type))
+ ? concat (formal_list, affix_data_type (this_type))
+ : concat (formal_list, data_type);
+
+ formal_type = TREE_CHAIN (formal_type);
+ }
+
+ /* If we got to here, then we are trying to generate an ANSI style formal
+ parameters list.
+
+ New style prototyped ANSI formal parameter lists should in theory always
+ contain some stuff between the opening and closing parens, even if it is
+ only "void".
+
+ The brutal truth though is that there is lots of old K&R code out there
+ which contains declarations of "pointer-to-function" parameters and
+ these almost never have fully specified formal parameter lists associated
+ with them. That is, the pointer-to-function parameters are declared
+ with just empty parameter lists.
+
+ In cases such as these, protoize should really insert *something* into
+ the vacant parameter lists, but what? It has no basis on which to insert
+ anything in particular.
+
+ Here, we make life easy for protoize by trying to distinguish between
+ K&R empty parameter lists and new-style prototyped parameter lists
+ that actually contain "void". In the latter case we (obviously) want
+ to output the "void" verbatim, and that what we do. In the former case,
+ we do our best to give protoize something nice to insert.
+
+ This "something nice" should be something that is still legal (when
+ re-compiled) but something that can clearly indicate to the user that
+ more typing information (for the parameter list) should be added (by
+ hand) at some convenient moment.
+
+ The string chosen here is a comment with question marks in it. */
+
+ if (!*formal_list)
+ {
+ if (TYPE_ARG_TYPES (fntype))
+ /* assert (TREE_VALUE (TYPE_ARG_TYPES (fntype)) == void_type_node); */
+ formal_list = "void";
+ else
+ formal_list = "/* ??? */";
+ }
+ else
+ {
+ /* If there were at least some parameters, and if the formals-types-list
+ petered out to a NULL (i.e. without being terminated by a
+ void_type_node) then we need to tack on an ellipsis. */
+ if (!formal_type)
+ formal_list = concat (formal_list, ", ...");
+ }
+
+ return concat3 (" (", formal_list, ")");
+}
+
+/* For the generation of an ANSI prototype for a function definition, we have
+ to look at the formal parameter list of the function's own "type" to
+ determine if the function's formal parameter list should end with an
+ ellipsis. Given a tree node, the following function will return non-zero
+ if the "function type" parameter list should end with an ellipsis. */
+
+static int
+deserves_ellipsis (fntype)
+ tree fntype;
+{
+ tree formal_type;
+
+ formal_type = TYPE_ARG_TYPES (fntype);
+ while (formal_type && TREE_VALUE (formal_type) != void_type_node)
+ formal_type = TREE_CHAIN (formal_type);
+
+ /* If there were at least some parameters, and if the formals-types-list
+ petered out to a NULL (i.e. without being terminated by a void_type_node)
+ then we need to tack on an ellipsis. */
+
+ return (!formal_type && TYPE_ARG_TYPES (fntype));
+}
+
+/* Generate a parameter list for a function definition (in some given style).
+
+ Note that this routine has to be separate (and different) from the code that
+ generates the prototype parameter lists for function declarations, because
+ in the case of a function declaration, all we have to go on is a tree node
+ representing the function's own "function type". This can tell us the types
+ of all of the formal parameters for the function, but it cannot tell us the
+ actual *names* of each of the formal parameters. We need to output those
+ parameter names for each function definition.
+
+ This routine gets a pointer to a tree node which represents the actual
+ declaration of the given function, and this DECL node has a list of formal
+ parameter (variable) declarations attached to it. These formal parameter
+ (variable) declaration nodes give us the actual names of the formal
+ parameters for the given function definition.
+
+ This routine returns a string which is the source form for the entire
+ function formal parameter list. */
+
+static char*
+gen_formal_list_for_func_def (fndecl, style)
+ tree fndecl;
+ formals_style style;
+{
+ char* formal_list = "";
+ tree formal_decl;
+
+ formal_decl = DECL_ARGUMENTS (fndecl);
+ while (formal_decl)
+ {
+ char *this_formal;
+
+ if (*formal_list && ((style == ansi) || (style == k_and_r_names)))
+ formal_list = concat (formal_list, ", ");
+ this_formal = gen_decl (formal_decl, 0, style);
+ if (style == k_and_r_decls)
+ formal_list = concat3 (formal_list, this_formal, "; ");
+ else
+ formal_list = concat (formal_list, this_formal);
+ formal_decl = TREE_CHAIN (formal_decl);
+ }
+ if (style == ansi)
+ {
+ if (!DECL_ARGUMENTS (fndecl))
+ formal_list = concat (formal_list, "void");
+ if (deserves_ellipsis (TREE_TYPE (fndecl)))
+ formal_list = concat (formal_list, ", ...");
+ }
+ if ((style == ansi) || (style == k_and_r_names))
+ formal_list = concat3 (" (", formal_list, ")");
+ return formal_list;
+}
+
+/* Generate a string which is the source code form for a given type (t). This
+ routine is ugly and complex because the C syntax for declarations is ugly
+ and complex. This routine is straightforward so long as *no* pointer types,
+ array types, or function types are involved.
+
+ In the simple cases, this routine will return the (string) value which was
+ passed in as the "ret_val" argument. Usually, this starts out either as an
+ empty string, or as the name of the declared item (i.e. the formal function
+ parameter variable).
+
+ This routine will also return with the global variable "data_type" set to
+ some string value which is the "basic" data-type of the given complete type.
+ This "data_type" string can be concatenated onto the front of the returned
+ string after this routine returns to its caller.
+
+ In complicated cases involving pointer types, array types, or function
+ types, the C declaration syntax requires an "inside out" approach, i.e. if
+ you have a type which is a "pointer-to-function" type, you need to handle
+ the "pointer" part first, but it also has to be "innermost" (relative to
+ the declaration stuff for the "function" type). Thus, is this case, you
+ must prepend a "(*" and append a ")" to the name of the item (i.e. formal
+ variable). Then you must append and prepend the other info for the
+ "function type" part of the overall type.
+
+ To handle the "innermost precedence" rules of complicated C declarators, we
+ do the following (in this routine). The input parameter called "ret_val"
+ is treated as a "seed". Each time gen_type is called (perhaps recursively)
+ some additional strings may be appended or prepended (or both) to the "seed"
+ string. If yet another (lower) level of the GCC tree exists for the given
+ type (as in the case of a pointer type, an array type, or a function type)
+ then the (wrapped) seed is passed to a (recursive) invocation of gen_type()
+ this recursive invocation may again "wrap" the (new) seed with yet more
+ declarator stuff, by appending, prepending (or both). By the time the
+ recursion bottoms out, the "seed value" at that point will have a value
+ which is (almost) the complete source version of the declarator (except
+ for the data_type info). Thus, this deepest "seed" value is simply passed
+ back up through all of the recursive calls until it is given (as the return
+ value) to the initial caller of the gen_type() routine. All that remains
+ to do at this point is for the initial caller to prepend the "data_type"
+ string onto the returned "seed". */
+
+static char*
+gen_type (ret_val, t, style)
+ char* ret_val;
+ tree t;
+ formals_style style;
+{
+ tree chain_p;
+
+ if (TYPE_NAME (t) && DECL_NAME (TYPE_NAME (t)))
+ data_type = IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (t)));
+ else
+ {
+ switch (TREE_CODE (t))
+ {
+ case POINTER_TYPE:
+ if (TYPE_READONLY (t))
+ ret_val = concat ("const ", ret_val);
+ if (TYPE_VOLATILE (t))
+ ret_val = concat ("volatile ", ret_val);
+
+ ret_val = concat ("*", ret_val);
+
+ if (TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE || TREE_CODE (TREE_TYPE (t)) == FUNCTION_TYPE)
+ ret_val = concat3 ("(", ret_val, ")");
+
+ ret_val = gen_type (ret_val, TREE_TYPE (t), style);
+
+ return ret_val;
+
+ case ARRAY_TYPE:
+ if (TYPE_SIZE (t) == 0 || TREE_CODE (TYPE_SIZE (t)) != INTEGER_CST)
+ ret_val = gen_type (concat (ret_val, "[]"), TREE_TYPE (t), style);
+ else if (int_size_in_bytes (t) == 0)
+ ret_val = gen_type (concat (ret_val, "[0]"), TREE_TYPE (t), style);
+ else
+ {
+ int size = (int_size_in_bytes (t) / int_size_in_bytes (TREE_TYPE (t)));
+ char buff[10];
+ sprintf (buff, "[%d]", size);
+ ret_val = gen_type (concat (ret_val, buff),
+ TREE_TYPE (t), style);
+ }
+ break;
+
+ case FUNCTION_TYPE:
+ ret_val = gen_type (concat (ret_val, gen_formal_list_for_type (t, style)), TREE_TYPE (t), style);
+ break;
+
+ case IDENTIFIER_NODE:
+ data_type = IDENTIFIER_POINTER (t);
+ break;
+
+ /* The following three cases are complicated by the fact that a
+ user may do something really stupid, like creating a brand new
+ "anonymous" type specification in a formal argument list (or as
+ part of a function return type specification). For example:
+
+ int f (enum { red, green, blue } color);
+
+ In such cases, we have no name that we can put into the prototype
+ to represent the (anonymous) type. Thus, we have to generate the
+ whole darn type specification. Yuck! */
+
+ case RECORD_TYPE:
+ if (TYPE_NAME (t))
+ data_type = IDENTIFIER_POINTER (TYPE_NAME (t));
+ else
+ {
+ data_type = "";
+ chain_p = TYPE_FIELDS (t);
+ while (chain_p)
+ {
+ data_type = concat (data_type, gen_decl (chain_p, 0, ansi));
+ chain_p = TREE_CHAIN (chain_p);
+ data_type = concat (data_type, "; ");
+ }
+ data_type = concat3 ("{ ", data_type, "}");
+ }
+ data_type = concat ("struct ", data_type);
+ break;
+
+ case UNION_TYPE:
+ if (TYPE_NAME (t))
+ data_type = IDENTIFIER_POINTER (TYPE_NAME (t));
+ else
+ {
+ data_type = "";
+ chain_p = TYPE_FIELDS (t);
+ while (chain_p)
+ {
+ data_type = concat (data_type, gen_decl (chain_p, 0, ansi));
+ chain_p = TREE_CHAIN (chain_p);
+ data_type = concat (data_type, "; ");
+ }
+ data_type = concat3 ("{ ", data_type, "}");
+ }
+ data_type = concat ("union ", data_type);
+ break;
+
+ case ENUMERAL_TYPE:
+ if (TYPE_NAME (t))
+ data_type = IDENTIFIER_POINTER (TYPE_NAME (t));
+ else
+ {
+ data_type = "";
+ chain_p = TYPE_VALUES (t);
+ while (chain_p)
+ {
+ data_type = concat (data_type,
+ IDENTIFIER_POINTER (TREE_PURPOSE (chain_p)));
+ chain_p = TREE_CHAIN (chain_p);
+ if (chain_p)
+ data_type = concat (data_type, ", ");
+ }
+ data_type = concat3 ("{ ", data_type, " }");
+ }
+ data_type = concat ("enum ", data_type);
+ break;
+
+ case TYPE_DECL:
+ data_type = IDENTIFIER_POINTER (DECL_NAME (t));
+ break;
+
+ case INTEGER_TYPE:
+ data_type = IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (t)));
+ /* Normally, `unsigned' is part of the deal. Not so if it comes
+ with `const' or `volatile'. */
+ if (TREE_UNSIGNED (t) && (TYPE_READONLY (t) || TYPE_VOLATILE (t)))
+ data_type = concat ("unsigned ", data_type);
+ break;
+
+ case REAL_TYPE:
+ data_type = IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (t)));
+ break;
+
+ case VOID_TYPE:
+ data_type = "void";
+ break;
+
+ default:
+ abort ();
+ }
+ }
+ if (TYPE_READONLY (t))
+ ret_val = concat ("const ", ret_val);
+ if (TYPE_VOLATILE (t))
+ ret_val = concat ("volatile ", ret_val);
+ return ret_val;
+}
+
+/* Generate a string (source) representation of an entire entity declaration
+ (using some particular style for function types).
+
+ The given entity may be either a variable or a function.
+
+ If the "is_func_definition" parameter is non-zero, assume that the thing
+ we are generating a declaration for is a FUNCTION_DECL node which is
+ associated with a function definition. In this case, we can assume that
+ an attached list of DECL nodes for function formal arguments is present. */
+
+static char*
+gen_decl (decl, is_func_definition, style)
+ tree decl;
+ int is_func_definition;
+ formals_style style;
+{
+ char* ret_val;
+
+ if (DECL_NAME (decl))
+ ret_val = IDENTIFIER_POINTER (DECL_NAME (decl));
+ else
+ ret_val = "";
+
+ /* If we are just generating a list of names of formal parameters, we can
+ simply return the formal parameter name (with no typing information
+ attached to it) now. */
+
+ if (style == k_and_r_names)
+ return ret_val;
+
+ /* Note that for the declaration of some entity (either a function or a
+ data object, like for instance a parameter) if the entity itself was
+ declared as either const or volatile, then const and volatile properties
+ are associated with just the declaration of the entity, and *not* with
+ the `type' of the entity. Thus, for such declared entities, we have to
+ generate the qualifiers here. */
+
+ if (TREE_THIS_VOLATILE (decl))
+ ret_val = concat ("volatile ", ret_val);
+ if (TREE_READONLY (decl))
+ ret_val = concat ("const ", ret_val);
+
+ data_type = "";
+
+ /* For FUNCTION_DECL nodes, there are two possible cases here. First, if
+ this FUNCTION_DECL node was generated from a function "definition", then
+ we will have a list of DECL_NODE's, one for each of the function's formal
+ parameters. In this case, we can print out not only the types of each
+ formal, but also each formal's name. In the second case, this
+ FUNCTION_DECL node came from an actual function declaration (and *not*
+ a definition). In this case, we do nothing here because the formal
+ argument type-list will be output later, when the "type" of the function
+ is added to the string we are building. Note that the ANSI-style formal
+ parameter list is considered to be a (suffix) part of the "type" of the
+ function. */
+
+ if (TREE_CODE (decl) == FUNCTION_DECL && is_func_definition)
+ {
+ ret_val = concat (ret_val, gen_formal_list_for_func_def (decl, ansi));
+
+ /* Since we have already added in the formals list stuff, here we don't
+ add the whole "type" of the function we are considering (which
+ would include its parameter-list info), rather, we only add in
+ the "type" of the "type" of the function, which is really just
+ the return-type of the function (and does not include the parameter
+ list info). */
+
+ ret_val = gen_type (ret_val, TREE_TYPE (TREE_TYPE (decl)), style);
+ }
+ else
+ ret_val = gen_type (ret_val, TREE_TYPE (decl), style);
+
+ ret_val = affix_data_type (ret_val);
+
+ if (DECL_REGISTER (decl))
+ ret_val = concat ("register ", ret_val);
+ if (TREE_PUBLIC (decl))
+ ret_val = concat ("extern ", ret_val);
+ if (TREE_CODE (decl) == FUNCTION_DECL && !TREE_PUBLIC (decl))
+ ret_val = concat ("static ", ret_val);
+
+ return ret_val;
+}
+
+extern FILE* aux_info_file;
+
+/* Generate and write a new line of info to the aux-info (.X) file. This
+ routine is called once for each function declaration, and once for each
+ function definition (even the implicit ones). */
+
+void
+gen_aux_info_record (fndecl, is_definition, is_implicit, is_prototyped)
+ tree fndecl;
+ int is_definition;
+ int is_implicit;
+ int is_prototyped;
+{
+ if (flag_gen_aux_info)
+ {
+ static int compiled_from_record = 0;
+
+ /* Each output .X file must have a header line. Write one now if we
+ have not yet done so. */
+
+ if (! compiled_from_record++)
+ {
+ /* The first line tells which directory file names are relative to.
+ Currently, -aux-info works only for files in the working
+ directory, so just use a `.' as a placeholder for now. */
+ fprintf (aux_info_file, "/* compiled from: . */\n");
+ }
+
+ /* Write the actual line of auxiliary info. */
+
+ fprintf (aux_info_file, "/* %s:%d:%c%c */ %s;",
+ DECL_SOURCE_FILE (fndecl),
+ DECL_SOURCE_LINE (fndecl),
+ (is_implicit) ? 'I' : (is_prototyped) ? 'N' : 'O',
+ (is_definition) ? 'F' : 'C',
+ gen_decl (fndecl, is_definition, ansi));
+
+ /* If this is an explicit function declaration, we need to also write
+ out an old-style (i.e. K&R) function header, just in case the user
+ wants to run unprotoize. */
+
+ if (is_definition)
+ {
+ fprintf (aux_info_file, " /*%s %s*/",
+ gen_formal_list_for_func_def (fndecl, k_and_r_names),
+ gen_formal_list_for_func_def (fndecl, k_and_r_decls));
+ }
+
+ fprintf (aux_info_file, "\n");
+ }
+}
diff --git a/gnu/usr.bin/cc/cc1/c-convert.c b/gnu/usr.bin/cc/cc1/c-convert.c
new file mode 100644
index 0000000..cfa590c
--- /dev/null
+++ b/gnu/usr.bin/cc/cc1/c-convert.c
@@ -0,0 +1,95 @@
+/* Language-level data type conversion for GNU C.
+ Copyright (C) 1987, 1988, 1991 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+/* This file contains the functions for converting C expressions
+ to different data types. The only entry point is `convert'.
+ Every language front end must have a `convert' function
+ but what kind of conversions it does will depend on the language. */
+
+#include "config.h"
+#include "tree.h"
+#include "flags.h"
+#include "convert.h"
+
+/* Change of width--truncation and extension of integers or reals--
+ is represented with NOP_EXPR. Proper functioning of many things
+ assumes that no other conversions can be NOP_EXPRs.
+
+ Conversion between integer and pointer is represented with CONVERT_EXPR.
+ Converting integer to real uses FLOAT_EXPR
+ and real to integer uses FIX_TRUNC_EXPR.
+
+ Here is a list of all the functions that assume that widening and
+ narrowing is always done with a NOP_EXPR:
+ In convert.c, convert_to_integer.
+ In c-typeck.c, build_binary_op (boolean ops), and truthvalue_conversion.
+ In expr.c: expand_expr, for operands of a MULT_EXPR.
+ In fold-const.c: fold.
+ In tree.c: get_narrower and get_unwidened. */
+
+/* Subroutines of `convert'. */
+
+
+
+/* Create an expression whose value is that of EXPR,
+ converted to type TYPE. The TREE_TYPE of the value
+ is always TYPE. This function implements all reasonable
+ conversions; callers should filter out those that are
+ not permitted by the language being compiled. */
+
+tree
+convert (type, expr)
+ tree type, expr;
+{
+ register tree e = expr;
+ register enum tree_code code = TREE_CODE (type);
+
+ if (type == TREE_TYPE (expr)
+ || TREE_CODE (expr) == ERROR_MARK)
+ return expr;
+ if (TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (TREE_TYPE (expr)))
+ return fold (build1 (NOP_EXPR, type, expr));
+ if (TREE_CODE (TREE_TYPE (expr)) == ERROR_MARK)
+ return error_mark_node;
+ if (TREE_CODE (TREE_TYPE (expr)) == VOID_TYPE)
+ {
+ error ("void value not ignored as it ought to be");
+ return error_mark_node;
+ }
+ if (code == VOID_TYPE)
+ return build1 (CONVERT_EXPR, type, e);
+#if 0
+ /* This is incorrect. A truncation can't be stripped this way.
+ Extensions will be stripped by the use of get_unwidened. */
+ if (TREE_CODE (expr) == NOP_EXPR)
+ return convert (type, TREE_OPERAND (expr, 0));
+#endif
+ if (code == INTEGER_TYPE || code == ENUMERAL_TYPE)
+ return fold (convert_to_integer (type, e));
+ if (code == POINTER_TYPE)
+ return fold (convert_to_pointer (type, e));
+ if (code == REAL_TYPE)
+ return fold (convert_to_real (type, e));
+ if (code == COMPLEX_TYPE)
+ return fold (convert_to_complex (type, e));
+
+ error ("conversion to non-scalar type requested");
+ return error_mark_node;
+}
diff --git a/gnu/usr.bin/cc/cc1/c-decl.c b/gnu/usr.bin/cc/cc1/c-decl.c
new file mode 100644
index 0000000..c1a8dc9
--- /dev/null
+++ b/gnu/usr.bin/cc/cc1/c-decl.c
@@ -0,0 +1,6797 @@
+/* Process declarations and variables for C compiler.
+ Copyright (C) 1988, 1992, 1993, 1994 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+/* Process declarations and symbol lookup for C front end.
+ Also constructs types; the standard scalar types at initialization,
+ and structure, union, array and enum types when they are declared. */
+
+/* ??? not all decl nodes are given the most useful possible
+ line numbers. For example, the CONST_DECLs for enum values. */
+
+#include "config.h"
+#include "tree.h"
+#include "flags.h"
+#include "c-tree.h"
+#include "c-lex.h"
+#include <stdio.h>
+
+/* In grokdeclarator, distinguish syntactic contexts of declarators. */
+enum decl_context
+{ NORMAL, /* Ordinary declaration */
+ FUNCDEF, /* Function definition */
+ PARM, /* Declaration of parm before function body */
+ FIELD, /* Declaration inside struct or union */
+ BITFIELD, /* Likewise but with specified width */
+ TYPENAME}; /* Typename (inside cast or sizeof) */
+
+#ifndef CHAR_TYPE_SIZE
+#define CHAR_TYPE_SIZE BITS_PER_UNIT
+#endif
+
+#ifndef SHORT_TYPE_SIZE
+#define SHORT_TYPE_SIZE (BITS_PER_UNIT * MIN ((UNITS_PER_WORD + 1) / 2, 2))
+#endif
+
+#ifndef INT_TYPE_SIZE
+#define INT_TYPE_SIZE BITS_PER_WORD
+#endif
+
+#ifndef LONG_TYPE_SIZE
+#define LONG_TYPE_SIZE BITS_PER_WORD
+#endif
+
+#ifndef LONG_LONG_TYPE_SIZE
+#define LONG_LONG_TYPE_SIZE (BITS_PER_WORD * 2)
+#endif
+
+#ifndef WCHAR_UNSIGNED
+#define WCHAR_UNSIGNED 0
+#endif
+
+#ifndef FLOAT_TYPE_SIZE
+#define FLOAT_TYPE_SIZE BITS_PER_WORD
+#endif
+
+#ifndef DOUBLE_TYPE_SIZE
+#define DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
+#endif
+
+#ifndef LONG_DOUBLE_TYPE_SIZE
+#define LONG_DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
+#endif
+
+/* We let tm.h override the types used here, to handle trivial differences
+ such as the choice of unsigned int or long unsigned int for size_t.
+ When machines start needing nontrivial differences in the size type,
+ it would be best to do something here to figure out automatically
+ from other information what type to use. */
+
+#ifndef SIZE_TYPE
+#define SIZE_TYPE "long unsigned int"
+#endif
+
+#ifndef PTRDIFF_TYPE
+#define PTRDIFF_TYPE "long int"
+#endif
+
+#ifndef WCHAR_TYPE
+#define WCHAR_TYPE "int"
+#endif
+
+/* a node which has tree code ERROR_MARK, and whose type is itself.
+ All erroneous expressions are replaced with this node. All functions
+ that accept nodes as arguments should avoid generating error messages
+ if this node is one of the arguments, since it is undesirable to get
+ multiple error messages from one error in the input. */
+
+tree error_mark_node;
+
+/* INTEGER_TYPE and REAL_TYPE nodes for the standard data types */
+
+tree short_integer_type_node;
+tree integer_type_node;
+tree long_integer_type_node;
+tree long_long_integer_type_node;
+
+tree short_unsigned_type_node;
+tree unsigned_type_node;
+tree long_unsigned_type_node;
+tree long_long_unsigned_type_node;
+
+tree ptrdiff_type_node;
+
+tree unsigned_char_type_node;
+tree signed_char_type_node;
+tree char_type_node;
+tree wchar_type_node;
+tree signed_wchar_type_node;
+tree unsigned_wchar_type_node;
+
+tree float_type_node;
+tree double_type_node;
+tree long_double_type_node;
+
+tree complex_integer_type_node;
+tree complex_float_type_node;
+tree complex_double_type_node;
+tree complex_long_double_type_node;
+
+tree intQI_type_node;
+tree intHI_type_node;
+tree intSI_type_node;
+tree intDI_type_node;
+
+tree unsigned_intQI_type_node;
+tree unsigned_intHI_type_node;
+tree unsigned_intSI_type_node;
+tree unsigned_intDI_type_node;
+
+/* a VOID_TYPE node. */
+
+tree void_type_node;
+
+/* Nodes for types `void *' and `const void *'. */
+
+tree ptr_type_node, const_ptr_type_node;
+
+/* Nodes for types `char *' and `const char *'. */
+
+tree string_type_node, const_string_type_node;
+
+/* Type `char[SOMENUMBER]'.
+ Used when an array of char is needed and the size is irrelevant. */
+
+tree char_array_type_node;
+
+/* Type `int[SOMENUMBER]' or something like it.
+ Used when an array of int needed and the size is irrelevant. */
+
+tree int_array_type_node;
+
+/* Type `wchar_t[SOMENUMBER]' or something like it.
+ Used when a wide string literal is created. */
+
+tree wchar_array_type_node;
+
+/* type `int ()' -- used for implicit declaration of functions. */
+
+tree default_function_type;
+
+/* function types `double (double)' and `double (double, double)', etc. */
+
+tree double_ftype_double, double_ftype_double_double;
+tree int_ftype_int, long_ftype_long;
+
+/* Function type `void (void *, void *, int)' and similar ones */
+
+tree void_ftype_ptr_ptr_int, int_ftype_ptr_ptr_int, void_ftype_ptr_int_int;
+
+/* Function type `char *(char *, char *)' and similar ones */
+tree string_ftype_ptr_ptr, int_ftype_string_string;
+
+/* Function type `int (const void *, const void *, size_t)' */
+tree int_ftype_cptr_cptr_sizet;
+
+/* Two expressions that are constants with value zero.
+ The first is of type `int', the second of type `void *'. */
+
+tree integer_zero_node;
+tree null_pointer_node;
+
+/* A node for the integer constant 1. */
+
+tree integer_one_node;
+
+/* Nonzero if we have seen an invalid cross reference
+ to a struct, union, or enum, but not yet printed the message. */
+
+tree pending_invalid_xref;
+/* File and line to appear in the eventual error message. */
+char *pending_invalid_xref_file;
+int pending_invalid_xref_line;
+
+/* While defining an enum type, this is 1 plus the last enumerator
+ constant value. Note that will do not have to save this or `enum_overflow'
+ around nested function definition since such a definition could only
+ occur in an enum value expression and we don't use these variables in
+ that case. */
+
+static tree enum_next_value;
+
+/* Nonzero means that there was overflow computing enum_next_value. */
+
+static int enum_overflow;
+
+/* Parsing a function declarator leaves a list of parameter names
+ or a chain or parameter decls here. */
+
+static tree last_function_parms;
+
+/* Parsing a function declarator leaves here a chain of structure
+ and enum types declared in the parmlist. */
+
+static tree last_function_parm_tags;
+
+/* After parsing the declarator that starts a function definition,
+ `start_function' puts here the list of parameter names or chain of decls.
+ `store_parm_decls' finds it here. */
+
+static tree current_function_parms;
+
+/* Similar, for last_function_parm_tags. */
+static tree current_function_parm_tags;
+
+/* Similar, for the file and line that the prototype came from if this is
+ an old-style definition. */
+static char *current_function_prototype_file;
+static int current_function_prototype_line;
+
+/* A list (chain of TREE_LIST nodes) of all LABEL_DECLs in the function
+ that have names. Here so we can clear out their names' definitions
+ at the end of the function. */
+
+static tree named_labels;
+
+/* A list of LABEL_DECLs from outer contexts that are currently shadowed. */
+
+static tree shadowed_labels;
+
+/* Nonzero when store_parm_decls is called indicates a varargs function.
+ Value not meaningful after store_parm_decls. */
+
+static int c_function_varargs;
+
+/* The FUNCTION_DECL for the function currently being compiled,
+ or 0 if between functions. */
+tree current_function_decl;
+
+/* Set to 0 at beginning of a function definition, set to 1 if
+ a return statement that specifies a return value is seen. */
+
+int current_function_returns_value;
+
+/* Set to 0 at beginning of a function definition, set to 1 if
+ a return statement with no argument is seen. */
+
+int current_function_returns_null;
+
+/* Set to nonzero by `grokdeclarator' for a function
+ whose return type is defaulted, if warnings for this are desired. */
+
+static int warn_about_return_type;
+
+/* Nonzero when starting a function declared `extern inline'. */
+
+static int current_extern_inline;
+
+/* For each binding contour we allocate a binding_level structure
+ * which records the names defined in that contour.
+ * Contours include:
+ * 0) the global one
+ * 1) one for each function definition,
+ * where internal declarations of the parameters appear.
+ * 2) one for each compound statement,
+ * to record its declarations.
+ *
+ * The current meaning of a name can be found by searching the levels from
+ * the current one out to the global one.
+ */
+
+/* Note that the information in the `names' component of the global contour
+ is duplicated in the IDENTIFIER_GLOBAL_VALUEs of all identifiers. */
+
+struct binding_level
+ {
+ /* A chain of _DECL nodes for all variables, constants, functions,
+ and typedef types. These are in the reverse of the order supplied.
+ */
+ tree names;
+
+ /* A list of structure, union and enum definitions,
+ * for looking up tag names.
+ * It is a chain of TREE_LIST nodes, each of whose TREE_PURPOSE is a name,
+ * or NULL_TREE; and whose TREE_VALUE is a RECORD_TYPE, UNION_TYPE,
+ * or ENUMERAL_TYPE node.
+ */
+ tree tags;
+
+ /* For each level, a list of shadowed outer-level local definitions
+ to be restored when this level is popped.
+ Each link is a TREE_LIST whose TREE_PURPOSE is an identifier and
+ whose TREE_VALUE is its old definition (a kind of ..._DECL node). */
+ tree shadowed;
+
+ /* For each level (except not the global one),
+ a chain of BLOCK nodes for all the levels
+ that were entered and exited one level down. */
+ tree blocks;
+
+ /* The BLOCK node for this level, if one has been preallocated.
+ If 0, the BLOCK is allocated (if needed) when the level is popped. */
+ tree this_block;
+
+ /* The binding level which this one is contained in (inherits from). */
+ struct binding_level *level_chain;
+
+ /* Nonzero for the level that holds the parameters of a function. */
+ char parm_flag;
+
+ /* Nonzero if this level "doesn't exist" for tags. */
+ char tag_transparent;
+
+ /* Nonzero if sublevels of this level "don't exist" for tags.
+ This is set in the parm level of a function definition
+ while reading the function body, so that the outermost block
+ of the function body will be tag-transparent. */
+ char subblocks_tag_transparent;
+
+ /* Nonzero means make a BLOCK for this level regardless of all else. */
+ char keep;
+
+ /* Nonzero means make a BLOCK if this level has any subblocks. */
+ char keep_if_subblocks;
+
+ /* Number of decls in `names' that have incomplete
+ structure or union types. */
+ int n_incomplete;
+
+ /* A list of decls giving the (reversed) specified order of parms,
+ not including any forward-decls in the parmlist.
+ This is so we can put the parms in proper order for assign_parms. */
+ tree parm_order;
+ };
+
+#define NULL_BINDING_LEVEL (struct binding_level *) NULL
+
+/* The binding level currently in effect. */
+
+static struct binding_level *current_binding_level;
+
+/* A chain of binding_level structures awaiting reuse. */
+
+static struct binding_level *free_binding_level;
+
+/* The outermost binding level, for names of file scope.
+ This is created when the compiler is started and exists
+ through the entire run. */
+
+static struct binding_level *global_binding_level;
+
+/* Binding level structures are initialized by copying this one. */
+
+static struct binding_level clear_binding_level
+ = {NULL, NULL, NULL, NULL, NULL, NULL_BINDING_LEVEL, 0, 0, 0, 0, 0, 0,
+ NULL};
+
+/* Nonzero means unconditionally make a BLOCK for the next level pushed. */
+
+static int keep_next_level_flag;
+
+/* Nonzero means make a BLOCK for the next level pushed
+ if it has subblocks. */
+
+static int keep_next_if_subblocks;
+
+/* The chain of outer levels of label scopes.
+ This uses the same data structure used for binding levels,
+ but it works differently: each link in the chain records
+ saved values of named_labels and shadowed_labels for
+ a label binding level outside the current one. */
+
+static struct binding_level *label_level_chain;
+
+/* Forward declarations. */
+
+static tree grokparms (), grokdeclarator ();
+tree pushdecl ();
+tree builtin_function ();
+void shadow_tag_warned ();
+
+static tree lookup_tag ();
+static tree lookup_tag_reverse ();
+tree lookup_name_current_level ();
+static char *redeclaration_error_message ();
+static void layout_array_type ();
+
+/* C-specific option variables. */
+
+/* Nonzero means allow type mismatches in conditional expressions;
+ just make their values `void'. */
+
+int flag_cond_mismatch;
+
+/* Nonzero means give `double' the same size as `float'. */
+
+int flag_short_double;
+
+/* Nonzero means don't recognize the keyword `asm'. */
+
+int flag_no_asm;
+
+/* Nonzero means don't recognize any builtin functions. */
+
+int flag_no_builtin;
+
+/* Nonzero means don't recognize the non-ANSI builtin functions.
+ -ansi sets this. */
+
+int flag_no_nonansi_builtin;
+
+/* Nonzero means do some things the same way PCC does. */
+
+int flag_traditional;
+
+/* Nonzero means to allow single precision math even if we're generally
+ being traditional. */
+int flag_allow_single_precision = 0;
+
+/* Nonzero means to treat bitfields as signed unless they say `unsigned'. */
+
+int flag_signed_bitfields = 1;
+int explicit_flag_signed_bitfields = 0;
+
+/* Nonzero means handle `#ident' directives. 0 means ignore them. */
+
+int flag_no_ident = 0;
+
+/* Nonzero means warn about implicit declarations. */
+
+int warn_implicit;
+
+/* Nonzero means give string constants the type `const char *'
+ to get extra warnings from them. These warnings will be too numerous
+ to be useful, except in thoroughly ANSIfied programs. */
+
+int warn_write_strings;
+
+/* Nonzero means warn about pointer casts that can drop a type qualifier
+ from the pointer target type. */
+
+int warn_cast_qual;
+
+/* Nonzero means warn when casting a function call to a type that does
+ not match the return type (e.g. (float)sqrt() or (anything*)malloc()
+ when there is no previous declaration of sqrt or malloc. */
+
+int warn_bad_function_cast;
+
+/* Warn about traditional constructs whose meanings changed in ANSI C. */
+
+int warn_traditional;
+
+/* Nonzero means warn about sizeof(function) or addition/subtraction
+ of function pointers. */
+
+int warn_pointer_arith;
+
+/* Nonzero means warn for non-prototype function decls
+ or non-prototyped defs without previous prototype. */
+
+int warn_strict_prototypes;
+
+/* Nonzero means warn for any global function def
+ without separate previous prototype decl. */
+
+int warn_missing_prototypes;
+
+/* Nonzero means warn for any global function def
+ without separate previous decl. */
+
+int warn_missing_declarations;
+
+/* Nonzero means warn about multiple (redundant) decls for the same single
+ variable or function. */
+
+int warn_redundant_decls = 0;
+
+/* Nonzero means warn about extern declarations of objects not at
+ file-scope level and about *all* declarations of functions (whether
+ extern or static) not at file-scope level. Note that we exclude
+ implicit function declarations. To get warnings about those, use
+ -Wimplicit. */
+
+int warn_nested_externs = 0;
+
+/* Warn about *printf or *scanf format/argument anomalies. */
+
+int warn_format;
+
+/* Warn about a subscript that has type char. */
+
+int warn_char_subscripts = 0;
+
+/* Warn if a type conversion is done that might have confusing results. */
+
+int warn_conversion;
+
+/* Warn if adding () is suggested. */
+
+int warn_parentheses;
+
+/* Warn if initializer is not completely bracketed. */
+
+int warn_missing_braces;
+
+/* Nonzero means `$' can be in an identifier.
+ See cccp.c for reasons why this breaks some obscure ANSI C programs. */
+
+#ifndef DOLLARS_IN_IDENTIFIERS
+#define DOLLARS_IN_IDENTIFIERS 1
+#endif
+int dollars_in_ident = DOLLARS_IN_IDENTIFIERS > 1;
+
+/* Decode the string P as a language-specific option for C.
+ Return 1 if it is recognized (and handle it);
+ return 0 if not recognized. */
+
+int
+c_decode_option (p)
+ char *p;
+{
+ if (!strcmp (p, "-ftraditional") || !strcmp (p, "-traditional"))
+ {
+ flag_traditional = 1;
+ flag_writable_strings = 1;
+#if DOLLARS_IN_IDENTIFIERS > 0
+ dollars_in_ident = 1;
+#endif
+ }
+ else if (!strcmp (p, "-fallow-single-precision"))
+ flag_allow_single_precision = 1;
+ else if (!strcmp (p, "-fnotraditional") || !strcmp (p, "-fno-traditional"))
+ {
+ flag_traditional = 0;
+ flag_writable_strings = 0;
+ dollars_in_ident = DOLLARS_IN_IDENTIFIERS > 1;
+ }
+ else if (!strcmp (p, "-fdollars-in-identifiers"))
+ {
+#if DOLLARS_IN_IDENTIFIERS > 0
+ dollars_in_ident = 1;
+#endif
+ }
+ else if (!strcmp (p, "-fno-dollars-in-identifiers"))
+ dollars_in_ident = 0;
+ else if (!strcmp (p, "-fsigned-char"))
+ flag_signed_char = 1;
+ else if (!strcmp (p, "-funsigned-char"))
+ flag_signed_char = 0;
+ else if (!strcmp (p, "-fno-signed-char"))
+ flag_signed_char = 0;
+ else if (!strcmp (p, "-fno-unsigned-char"))
+ flag_signed_char = 1;
+ else if (!strcmp (p, "-fsigned-bitfields")
+ || !strcmp (p, "-fno-unsigned-bitfields"))
+ {
+ flag_signed_bitfields = 1;
+ explicit_flag_signed_bitfields = 1;
+ }
+ else if (!strcmp (p, "-funsigned-bitfields")
+ || !strcmp (p, "-fno-signed-bitfields"))
+ {
+ flag_signed_bitfields = 0;
+ explicit_flag_signed_bitfields = 1;
+ }
+ else if (!strcmp (p, "-fshort-enums"))
+ flag_short_enums = 1;
+ else if (!strcmp (p, "-fno-short-enums"))
+ flag_short_enums = 0;
+ else if (!strcmp (p, "-fcond-mismatch"))
+ flag_cond_mismatch = 1;
+ else if (!strcmp (p, "-fno-cond-mismatch"))
+ flag_cond_mismatch = 0;
+ else if (!strcmp (p, "-fshort-double"))
+ flag_short_double = 1;
+ else if (!strcmp (p, "-fno-short-double"))
+ flag_short_double = 0;
+ else if (!strcmp (p, "-fasm"))
+ flag_no_asm = 0;
+ else if (!strcmp (p, "-fno-asm"))
+ flag_no_asm = 1;
+ else if (!strcmp (p, "-fbuiltin"))
+ flag_no_builtin = 0;
+ else if (!strcmp (p, "-fno-builtin"))
+ flag_no_builtin = 1;
+ else if (!strcmp (p, "-fno-ident"))
+ flag_no_ident = 1;
+ else if (!strcmp (p, "-fident"))
+ flag_no_ident = 0;
+ else if (!strcmp (p, "-ansi"))
+ flag_no_asm = 1, flag_no_nonansi_builtin = 1, dollars_in_ident = 0;
+ else if (!strcmp (p, "-Wimplicit"))
+ warn_implicit = 1;
+ else if (!strcmp (p, "-Wno-implicit"))
+ warn_implicit = 0;
+ else if (!strcmp (p, "-Wwrite-strings"))
+ warn_write_strings = 1;
+ else if (!strcmp (p, "-Wno-write-strings"))
+ warn_write_strings = 0;
+ else if (!strcmp (p, "-Wcast-qual"))
+ warn_cast_qual = 1;
+ else if (!strcmp (p, "-Wno-cast-qual"))
+ warn_cast_qual = 0;
+ else if (!strcmp (p, "-Wbad-function-cast"))
+ warn_bad_function_cast = 1;
+ else if (!strcmp (p, "-Wno-bad-function-cast"))
+ warn_bad_function_cast = 0;
+ else if (!strcmp (p, "-Wpointer-arith"))
+ warn_pointer_arith = 1;
+ else if (!strcmp (p, "-Wno-pointer-arith"))
+ warn_pointer_arith = 0;
+ else if (!strcmp (p, "-Wstrict-prototypes"))
+ warn_strict_prototypes = 1;
+ else if (!strcmp (p, "-Wno-strict-prototypes"))
+ warn_strict_prototypes = 0;
+ else if (!strcmp (p, "-Wmissing-prototypes"))
+ warn_missing_prototypes = 1;
+ else if (!strcmp (p, "-Wno-missing-prototypes"))
+ warn_missing_prototypes = 0;
+ else if (!strcmp (p, "-Wmissing-declarations"))
+ warn_missing_declarations = 1;
+ else if (!strcmp (p, "-Wno-missing-declarations"))
+ warn_missing_declarations = 0;
+ else if (!strcmp (p, "-Wredundant-decls"))
+ warn_redundant_decls = 1;
+ else if (!strcmp (p, "-Wno-redundant-decls"))
+ warn_redundant_decls = 0;
+ else if (!strcmp (p, "-Wnested-externs"))
+ warn_nested_externs = 1;
+ else if (!strcmp (p, "-Wno-nested-externs"))
+ warn_nested_externs = 0;
+ else if (!strcmp (p, "-Wtraditional"))
+ warn_traditional = 1;
+ else if (!strcmp (p, "-Wno-traditional"))
+ warn_traditional = 0;
+ else if (!strcmp (p, "-Wformat"))
+ warn_format = 1;
+ else if (!strcmp (p, "-Wno-format"))
+ warn_format = 0;
+ else if (!strcmp (p, "-Wchar-subscripts"))
+ warn_char_subscripts = 1;
+ else if (!strcmp (p, "-Wno-char-subscripts"))
+ warn_char_subscripts = 0;
+ else if (!strcmp (p, "-Wconversion"))
+ warn_conversion = 1;
+ else if (!strcmp (p, "-Wno-conversion"))
+ warn_conversion = 0;
+ else if (!strcmp (p, "-Wparentheses"))
+ warn_parentheses = 1;
+ else if (!strcmp (p, "-Wno-parentheses"))
+ warn_parentheses = 0;
+ else if (!strcmp (p, "-Wreturn-type"))
+ warn_return_type = 1;
+ else if (!strcmp (p, "-Wno-return-type"))
+ warn_return_type = 0;
+ else if (!strcmp (p, "-Wcomment"))
+ ; /* cpp handles this one. */
+ else if (!strcmp (p, "-Wno-comment"))
+ ; /* cpp handles this one. */
+ else if (!strcmp (p, "-Wcomments"))
+ ; /* cpp handles this one. */
+ else if (!strcmp (p, "-Wno-comments"))
+ ; /* cpp handles this one. */
+ else if (!strcmp (p, "-Wtrigraphs"))
+ ; /* cpp handles this one. */
+ else if (!strcmp (p, "-Wno-trigraphs"))
+ ; /* cpp handles this one. */
+ else if (!strcmp (p, "-Wimport"))
+ ; /* cpp handles this one. */
+ else if (!strcmp (p, "-Wno-import"))
+ ; /* cpp handles this one. */
+ else if (!strcmp (p, "-Wmissing-braces"))
+ warn_missing_braces = 1;
+ else if (!strcmp (p, "-Wno-missing-braces"))
+ warn_missing_braces = 0;
+ else if (!strcmp (p, "-Wall"))
+ {
+ extra_warnings = 1;
+ /* We save the value of warn_uninitialized, since if they put
+ -Wuninitialized on the command line, we need to generate a
+ warning about not using it without also specifying -O. */
+ if (warn_uninitialized != 1)
+ warn_uninitialized = 2;
+ warn_implicit = 1;
+ warn_return_type = 1;
+ warn_unused = 1;
+ warn_switch = 1;
+ warn_format = 1;
+ warn_char_subscripts = 1;
+ warn_parentheses = 1;
+ warn_missing_braces = 1;
+ }
+ else
+ return 0;
+
+ return 1;
+}
+
+/* Hooks for print_node. */
+
+void
+print_lang_decl (file, node, indent)
+ FILE *file;
+ tree node;
+ int indent;
+{
+}
+
+void
+print_lang_type (file, node, indent)
+ FILE *file;
+ tree node;
+ int indent;
+{
+}
+
+void
+print_lang_identifier (file, node, indent)
+ FILE *file;
+ tree node;
+ int indent;
+{
+ print_node (file, "global", IDENTIFIER_GLOBAL_VALUE (node), indent + 4);
+ print_node (file, "local", IDENTIFIER_LOCAL_VALUE (node), indent + 4);
+ print_node (file, "label", IDENTIFIER_LABEL_VALUE (node), indent + 4);
+ print_node (file, "implicit", IDENTIFIER_IMPLICIT_DECL (node), indent + 4);
+ print_node (file, "error locus", IDENTIFIER_ERROR_LOCUS (node), indent + 4);
+ print_node (file, "limbo value", IDENTIFIER_LIMBO_VALUE (node), indent + 4);
+}
+
+/* Hook called at end of compilation to assume 1 elt
+ for a top-level array decl that wasn't complete before. */
+
+void
+finish_incomplete_decl (decl)
+ tree decl;
+{
+ if (TREE_CODE (decl) == VAR_DECL && TREE_TYPE (decl) != error_mark_node)
+ {
+ tree type = TREE_TYPE (decl);
+ if (TREE_CODE (type) == ARRAY_TYPE
+ && TYPE_DOMAIN (type) == 0
+ && TREE_CODE (decl) != TYPE_DECL)
+ {
+ complete_array_type (type, NULL_TREE, 1);
+
+ layout_decl (decl, 0);
+ }
+ }
+}
+
+/* Create a new `struct binding_level'. */
+
+static
+struct binding_level *
+make_binding_level ()
+{
+ /* NOSTRICT */
+ return (struct binding_level *) xmalloc (sizeof (struct binding_level));
+}
+
+/* Nonzero if we are currently in the global binding level. */
+
+int
+global_bindings_p ()
+{
+ return current_binding_level == global_binding_level;
+}
+
+void
+keep_next_level ()
+{
+ keep_next_level_flag = 1;
+}
+
+/* Nonzero if the current level needs to have a BLOCK made. */
+
+int
+kept_level_p ()
+{
+ return ((current_binding_level->keep_if_subblocks
+ && current_binding_level->blocks != 0)
+ || current_binding_level->keep
+ || current_binding_level->names != 0
+ || (current_binding_level->tags != 0
+ && !current_binding_level->tag_transparent));
+}
+
+/* Identify this binding level as a level of parameters.
+ DEFINITION_FLAG is 1 for a definition, 0 for a declaration.
+ But it turns out there is no way to pass the right value for
+ DEFINITION_FLAG, so we ignore it. */
+
+void
+declare_parm_level (definition_flag)
+ int definition_flag;
+{
+ current_binding_level->parm_flag = 1;
+}
+
+/* Nonzero if currently making parm declarations. */
+
+int
+in_parm_level_p ()
+{
+ return current_binding_level->parm_flag;
+}
+
+/* Enter a new binding level.
+ If TAG_TRANSPARENT is nonzero, do so only for the name space of variables,
+ not for that of tags. */
+
+void
+pushlevel (tag_transparent)
+ int tag_transparent;
+{
+ register struct binding_level *newlevel = NULL_BINDING_LEVEL;
+
+ /* If this is the top level of a function,
+ just make sure that NAMED_LABELS is 0. */
+
+ if (current_binding_level == global_binding_level)
+ {
+ named_labels = 0;
+ }
+
+ /* Reuse or create a struct for this binding level. */
+
+ if (free_binding_level)
+ {
+ newlevel = free_binding_level;
+ free_binding_level = free_binding_level->level_chain;
+ }
+ else
+ {
+ newlevel = make_binding_level ();
+ }
+
+ /* Add this level to the front of the chain (stack) of levels that
+ are active. */
+
+ *newlevel = clear_binding_level;
+ newlevel->tag_transparent
+ = (tag_transparent
+ || (current_binding_level
+ ? current_binding_level->subblocks_tag_transparent
+ : 0));
+ newlevel->level_chain = current_binding_level;
+ current_binding_level = newlevel;
+ newlevel->keep = keep_next_level_flag;
+ keep_next_level_flag = 0;
+ newlevel->keep_if_subblocks = keep_next_if_subblocks;
+ keep_next_if_subblocks = 0;
+}
+
+/* Exit a binding level.
+ Pop the level off, and restore the state of the identifier-decl mappings
+ that were in effect when this level was entered.
+
+ If KEEP is nonzero, this level had explicit declarations, so
+ and create a "block" (a BLOCK node) for the level
+ to record its declarations and subblocks for symbol table output.
+
+ If FUNCTIONBODY is nonzero, this level is the body of a function,
+ so create a block as if KEEP were set and also clear out all
+ label names.
+
+ If REVERSE is nonzero, reverse the order of decls before putting
+ them into the BLOCK. */
+
+tree
+poplevel (keep, reverse, functionbody)
+ int keep;
+ int reverse;
+ int functionbody;
+{
+ register tree link;
+ /* The chain of decls was accumulated in reverse order.
+ Put it into forward order, just for cleanliness. */
+ tree decls;
+ tree tags = current_binding_level->tags;
+ tree subblocks = current_binding_level->blocks;
+ tree block = 0;
+ tree decl;
+ int block_previously_created;
+
+ keep |= current_binding_level->keep;
+
+ /* This warning is turned off because it causes warnings for
+ declarations like `extern struct foo *x'. */
+#if 0
+ /* Warn about incomplete structure types in this level. */
+ for (link = tags; link; link = TREE_CHAIN (link))
+ if (TYPE_SIZE (TREE_VALUE (link)) == 0)
+ {
+ tree type = TREE_VALUE (link);
+ char *errmsg;
+ switch (TREE_CODE (type))
+ {
+ case RECORD_TYPE:
+ errmsg = "`struct %s' incomplete in scope ending here";
+ break;
+ case UNION_TYPE:
+ errmsg = "`union %s' incomplete in scope ending here";
+ break;
+ case ENUMERAL_TYPE:
+ errmsg = "`enum %s' incomplete in scope ending here";
+ break;
+ }
+ if (TREE_CODE (TYPE_NAME (type)) == IDENTIFIER_NODE)
+ error (errmsg, IDENTIFIER_POINTER (TYPE_NAME (type)));
+ else
+ /* If this type has a typedef-name, the TYPE_NAME is a TYPE_DECL. */
+ error (errmsg, IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (type))));
+ }
+#endif /* 0 */
+
+ /* Get the decls in the order they were written.
+ Usually current_binding_level->names is in reverse order.
+ But parameter decls were previously put in forward order. */
+
+ if (reverse)
+ current_binding_level->names
+ = decls = nreverse (current_binding_level->names);
+ else
+ decls = current_binding_level->names;
+
+ /* Output any nested inline functions within this block
+ if they weren't already output. */
+
+ for (decl = decls; decl; decl = TREE_CHAIN (decl))
+ if (TREE_CODE (decl) == FUNCTION_DECL
+ && ! TREE_ASM_WRITTEN (decl)
+ && DECL_INITIAL (decl) != 0
+ && TREE_ADDRESSABLE (decl))
+ {
+ /* If this decl was copied from a file-scope decl
+ on account of a block-scope extern decl,
+ propagate TREE_ADDRESSABLE to the file-scope decl. */
+ if (DECL_ABSTRACT_ORIGIN (decl) != 0)
+ TREE_ADDRESSABLE (DECL_ABSTRACT_ORIGIN (decl)) = 1;
+ else
+ {
+ push_function_context ();
+ output_inline_function (decl);
+ pop_function_context ();
+ }
+ }
+
+ /* If there were any declarations or structure tags in that level,
+ or if this level is a function body,
+ create a BLOCK to record them for the life of this function. */
+
+ block = 0;
+ block_previously_created = (current_binding_level->this_block != 0);
+ if (block_previously_created)
+ block = current_binding_level->this_block;
+ else if (keep || functionbody
+ || (current_binding_level->keep_if_subblocks && subblocks != 0))
+ block = make_node (BLOCK);
+ if (block != 0)
+ {
+ BLOCK_VARS (block) = decls;
+ BLOCK_TYPE_TAGS (block) = tags;
+ BLOCK_SUBBLOCKS (block) = subblocks;
+ remember_end_note (block);
+ }
+
+ /* In each subblock, record that this is its superior. */
+
+ for (link = subblocks; link; link = TREE_CHAIN (link))
+ BLOCK_SUPERCONTEXT (link) = block;
+
+ /* Clear out the meanings of the local variables of this level. */
+
+ for (link = decls; link; link = TREE_CHAIN (link))
+ {
+ if (DECL_NAME (link) != 0)
+ {
+ /* If the ident. was used or addressed via a local extern decl,
+ don't forget that fact. */
+ if (DECL_EXTERNAL (link))
+ {
+ if (TREE_USED (link))
+ TREE_USED (DECL_NAME (link)) = 1;
+ if (TREE_ADDRESSABLE (link))
+ TREE_ADDRESSABLE (DECL_ASSEMBLER_NAME (link)) = 1;
+ }
+ IDENTIFIER_LOCAL_VALUE (DECL_NAME (link)) = 0;
+ }
+ }
+
+ /* Restore all name-meanings of the outer levels
+ that were shadowed by this level. */
+
+ for (link = current_binding_level->shadowed; link; link = TREE_CHAIN (link))
+ IDENTIFIER_LOCAL_VALUE (TREE_PURPOSE (link)) = TREE_VALUE (link);
+
+ /* If the level being exited is the top level of a function,
+ check over all the labels, and clear out the current
+ (function local) meanings of their names. */
+
+ if (functionbody)
+ {
+ /* If this is the top level block of a function,
+ the vars are the function's parameters.
+ Don't leave them in the BLOCK because they are
+ found in the FUNCTION_DECL instead. */
+
+ BLOCK_VARS (block) = 0;
+
+ /* Clear out the definitions of all label names,
+ since their scopes end here,
+ and add them to BLOCK_VARS. */
+
+ for (link = named_labels; link; link = TREE_CHAIN (link))
+ {
+ register tree label = TREE_VALUE (link);
+
+ if (DECL_INITIAL (label) == 0)
+ {
+ error_with_decl (label, "label `%s' used but not defined");
+ /* Avoid crashing later. */
+ define_label (input_filename, lineno,
+ DECL_NAME (label));
+ }
+ else if (warn_unused && !TREE_USED (label))
+ warning_with_decl (label, "label `%s' defined but not used");
+ IDENTIFIER_LABEL_VALUE (DECL_NAME (label)) = 0;
+
+ /* Put the labels into the "variables" of the
+ top-level block, so debugger can see them. */
+ TREE_CHAIN (label) = BLOCK_VARS (block);
+ BLOCK_VARS (block) = label;
+ }
+ }
+
+ /* Pop the current level, and free the structure for reuse. */
+
+ {
+ register struct binding_level *level = current_binding_level;
+ current_binding_level = current_binding_level->level_chain;
+
+ level->level_chain = free_binding_level;
+ free_binding_level = level;
+ }
+
+ /* Dispose of the block that we just made inside some higher level. */
+ if (functionbody)
+ DECL_INITIAL (current_function_decl) = block;
+ else if (block)
+ {
+ if (!block_previously_created)
+ current_binding_level->blocks
+ = chainon (current_binding_level->blocks, block);
+ }
+ /* If we did not make a block for the level just exited,
+ any blocks made for inner levels
+ (since they cannot be recorded as subblocks in that level)
+ must be carried forward so they will later become subblocks
+ of something else. */
+ else if (subblocks)
+ current_binding_level->blocks
+ = chainon (current_binding_level->blocks, subblocks);
+
+ /* Set the TYPE_CONTEXTs for all of the tagged types belonging to this
+ binding contour so that they point to the appropriate construct, i.e.
+ either to the current FUNCTION_DECL node, or else to the BLOCK node
+ we just constructed.
+
+ Note that for tagged types whose scope is just the formal parameter
+ list for some function type specification, we can't properly set
+ their TYPE_CONTEXTs here, because we don't have a pointer to the
+ appropriate FUNCTION_TYPE node readily available to us. For those
+ cases, the TYPE_CONTEXTs of the relevant tagged type nodes get set
+ in `grokdeclarator' as soon as we have created the FUNCTION_TYPE
+ node which will represent the "scope" for these "parameter list local"
+ tagged types.
+ */
+
+ if (functionbody)
+ for (link = tags; link; link = TREE_CHAIN (link))
+ TYPE_CONTEXT (TREE_VALUE (link)) = current_function_decl;
+ else if (block)
+ for (link = tags; link; link = TREE_CHAIN (link))
+ TYPE_CONTEXT (TREE_VALUE (link)) = block;
+
+ if (block)
+ TREE_USED (block) = 1;
+ return block;
+}
+
+/* Delete the node BLOCK from the current binding level.
+ This is used for the block inside a stmt expr ({...})
+ so that the block can be reinserted where appropriate. */
+
+void
+delete_block (block)
+ tree block;
+{
+ tree t;
+ if (current_binding_level->blocks == block)
+ current_binding_level->blocks = TREE_CHAIN (block);
+ for (t = current_binding_level->blocks; t;)
+ {
+ if (TREE_CHAIN (t) == block)
+ TREE_CHAIN (t) = TREE_CHAIN (block);
+ else
+ t = TREE_CHAIN (t);
+ }
+ TREE_CHAIN (block) = NULL;
+ /* Clear TREE_USED which is always set by poplevel.
+ The flag is set again if insert_block is called. */
+ TREE_USED (block) = 0;
+}
+
+/* Insert BLOCK at the end of the list of subblocks of the
+ current binding level. This is used when a BIND_EXPR is expanded,
+ to handle the BLOCK node inside the BIND_EXPR. */
+
+void
+insert_block (block)
+ tree block;
+{
+ TREE_USED (block) = 1;
+ current_binding_level->blocks
+ = chainon (current_binding_level->blocks, block);
+}
+
+/* Set the BLOCK node for the innermost scope
+ (the one we are currently in). */
+
+void
+set_block (block)
+ register tree block;
+{
+ current_binding_level->this_block = block;
+}
+
+void
+push_label_level ()
+{
+ register struct binding_level *newlevel;
+
+ /* Reuse or create a struct for this binding level. */
+
+ if (free_binding_level)
+ {
+ newlevel = free_binding_level;
+ free_binding_level = free_binding_level->level_chain;
+ }
+ else
+ {
+ newlevel = make_binding_level ();
+ }
+
+ /* Add this level to the front of the chain (stack) of label levels. */
+
+ newlevel->level_chain = label_level_chain;
+ label_level_chain = newlevel;
+
+ newlevel->names = named_labels;
+ newlevel->shadowed = shadowed_labels;
+ named_labels = 0;
+ shadowed_labels = 0;
+}
+
+void
+pop_label_level ()
+{
+ register struct binding_level *level = label_level_chain;
+ tree link, prev;
+
+ /* Clear out the definitions of the declared labels in this level.
+ Leave in the list any ordinary, non-declared labels. */
+ for (link = named_labels, prev = 0; link;)
+ {
+ if (C_DECLARED_LABEL_FLAG (TREE_VALUE (link)))
+ {
+ if (DECL_SOURCE_LINE (TREE_VALUE (link)) == 0)
+ {
+ error_with_decl (TREE_VALUE (link),
+ "label `%s' used but not defined");
+ /* Avoid crashing later. */
+ define_label (input_filename, lineno,
+ DECL_NAME (TREE_VALUE (link)));
+ }
+ else if (warn_unused && !TREE_USED (TREE_VALUE (link)))
+ warning_with_decl (TREE_VALUE (link),
+ "label `%s' defined but not used");
+ IDENTIFIER_LABEL_VALUE (DECL_NAME (TREE_VALUE (link))) = 0;
+
+ /* Delete this element from the list. */
+ link = TREE_CHAIN (link);
+ if (prev)
+ TREE_CHAIN (prev) = link;
+ else
+ named_labels = link;
+ }
+ else
+ {
+ prev = link;
+ link = TREE_CHAIN (link);
+ }
+ }
+
+ /* Bring back all the labels that were shadowed. */
+ for (link = shadowed_labels; link; link = TREE_CHAIN (link))
+ if (DECL_NAME (TREE_VALUE (link)) != 0)
+ IDENTIFIER_LABEL_VALUE (DECL_NAME (TREE_VALUE (link)))
+ = TREE_VALUE (link);
+
+ named_labels = chainon (named_labels, level->names);
+ shadowed_labels = level->shadowed;
+
+ /* Pop the current level, and free the structure for reuse. */
+ label_level_chain = label_level_chain->level_chain;
+ level->level_chain = free_binding_level;
+ free_binding_level = level;
+}
+
+/* Push a definition or a declaration of struct, union or enum tag "name".
+ "type" should be the type node.
+ We assume that the tag "name" is not already defined.
+
+ Note that the definition may really be just a forward reference.
+ In that case, the TYPE_SIZE will be zero. */
+
+void
+pushtag (name, type)
+ tree name, type;
+{
+ register struct binding_level *b;
+
+ /* Find the proper binding level for this type tag. */
+
+ for (b = current_binding_level; b->tag_transparent; b = b->level_chain)
+ continue;
+
+ if (name)
+ {
+ /* Record the identifier as the type's name if it has none. */
+
+ if (TYPE_NAME (type) == 0)
+ TYPE_NAME (type) = name;
+ }
+
+ if (b == global_binding_level)
+ b->tags = perm_tree_cons (name, type, b->tags);
+ else
+ b->tags = saveable_tree_cons (name, type, b->tags);
+
+ /* Create a fake NULL-named TYPE_DECL node whose TREE_TYPE will be the
+ tagged type we just added to the current binding level. This fake
+ NULL-named TYPE_DECL node helps dwarfout.c to know when it needs
+ to output a representation of a tagged type, and it also gives
+ us a convenient place to record the "scope start" address for the
+ tagged type. */
+
+ TYPE_STUB_DECL (type) = pushdecl (build_decl (TYPE_DECL, NULL_TREE, type));
+}
+
+/* Handle when a new declaration NEWDECL
+ has the same name as an old one OLDDECL
+ in the same binding contour.
+ Prints an error message if appropriate.
+
+ If safely possible, alter OLDDECL to look like NEWDECL, and return 1.
+ Otherwise, return 0. */
+
+static int
+duplicate_decls (newdecl, olddecl)
+ register tree newdecl, olddecl;
+{
+ int types_match = comptypes (TREE_TYPE (newdecl), TREE_TYPE (olddecl));
+ int new_is_definition = (TREE_CODE (newdecl) == FUNCTION_DECL
+ && DECL_INITIAL (newdecl) != 0);
+ tree oldtype = TREE_TYPE (olddecl);
+ tree newtype = TREE_TYPE (newdecl);
+
+ if (TREE_CODE (newtype) == ERROR_MARK
+ || TREE_CODE (oldtype) == ERROR_MARK)
+ types_match = 0;
+
+ /* New decl is completely inconsistent with the old one =>
+ tell caller to replace the old one.
+ This is always an error except in the case of shadowing a builtin. */
+ if (TREE_CODE (olddecl) != TREE_CODE (newdecl))
+ {
+ if (TREE_CODE (olddecl) == FUNCTION_DECL
+ && (DECL_BUILT_IN (olddecl)
+ || DECL_BUILT_IN_NONANSI (olddecl)))
+ {
+ /* If you declare a built-in or predefined function name as static,
+ the old definition is overridden,
+ but optionally warn this was a bad choice of name. */
+ if (!TREE_PUBLIC (newdecl))
+ {
+ if (!warn_shadow)
+ ;
+ else if (DECL_BUILT_IN (olddecl))
+ warning_with_decl (newdecl, "shadowing built-in function `%s'");
+ else
+ warning_with_decl (newdecl, "shadowing library function `%s'");
+ }
+ /* Likewise, if the built-in is not ansi, then programs can
+ override it even globally without an error. */
+ else if (! DECL_BUILT_IN (olddecl))
+ warning_with_decl (newdecl,
+ "library function `%s' declared as non-function");
+
+ else if (DECL_BUILT_IN_NONANSI (olddecl))
+ warning_with_decl (newdecl,
+ "built-in function `%s' declared as non-function");
+ else
+ warning_with_decl (newdecl,
+ "built-in function `%s' declared as non-function");
+ }
+ else
+ {
+ error_with_decl (newdecl, "`%s' redeclared as different kind of symbol");
+ error_with_decl (olddecl, "previous declaration of `%s'");
+ }
+
+ return 0;
+ }
+
+ /* For real parm decl following a forward decl,
+ return 1 so old decl will be reused. */
+ if (types_match && TREE_CODE (newdecl) == PARM_DECL
+ && TREE_ASM_WRITTEN (olddecl) && ! TREE_ASM_WRITTEN (newdecl))
+ return 1;
+
+ /* The new declaration is the same kind of object as the old one.
+ The declarations may partially match. Print warnings if they don't
+ match enough. Ultimately, copy most of the information from the new
+ decl to the old one, and keep using the old one. */
+
+ if (flag_traditional && TREE_CODE (newdecl) == FUNCTION_DECL
+ && IDENTIFIER_IMPLICIT_DECL (DECL_NAME (newdecl)) == olddecl
+ && DECL_INITIAL (olddecl) == 0)
+ /* If -traditional, avoid error for redeclaring fcn
+ after implicit decl. */
+ ;
+ else if (TREE_CODE (olddecl) == FUNCTION_DECL
+ && DECL_BUILT_IN (olddecl))
+ {
+ /* A function declaration for a built-in function. */
+ if (!TREE_PUBLIC (newdecl))
+ {
+ /* If you declare a built-in function name as static, the
+ built-in definition is overridden,
+ but optionally warn this was a bad choice of name. */
+ if (warn_shadow)
+ warning_with_decl (newdecl, "shadowing built-in function `%s'");
+ /* Discard the old built-in function. */
+ return 0;
+ }
+ else if (!types_match)
+ {
+ /* Accept the return type of the new declaration if same modes. */
+ tree oldreturntype = TREE_TYPE (TREE_TYPE (olddecl));
+ tree newreturntype = TREE_TYPE (TREE_TYPE (newdecl));
+ if (TYPE_MODE (oldreturntype) == TYPE_MODE (newreturntype))
+ {
+ /* Function types may be shared, so we can't just modify
+ the return type of olddecl's function type. */
+ tree newtype
+ = build_function_type (newreturntype,
+ TYPE_ARG_TYPES (TREE_TYPE (olddecl)));
+
+ types_match = comptypes (TREE_TYPE (newdecl), newtype);
+ if (types_match)
+ TREE_TYPE (olddecl) = newtype;
+ }
+ /* Accept harmless mismatch in first argument type also.
+ This is for ffs. */
+ if (TYPE_ARG_TYPES (TREE_TYPE (newdecl)) != 0
+ && TYPE_ARG_TYPES (TREE_TYPE (olddecl)) != 0
+ && TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (newdecl))) != 0
+ && TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (olddecl))) != 0
+ && (TYPE_MODE (TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (newdecl))))
+ ==
+ TYPE_MODE (TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (olddecl))))))
+ {
+ /* Function types may be shared, so we can't just modify
+ the return type of olddecl's function type. */
+ tree newtype
+ = build_function_type (TREE_TYPE (TREE_TYPE (olddecl)),
+ tree_cons (NULL_TREE,
+ TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (newdecl))),
+ TREE_CHAIN (TYPE_ARG_TYPES (TREE_TYPE (olddecl)))));
+
+ types_match = comptypes (TREE_TYPE (newdecl), newtype);
+ if (types_match)
+ TREE_TYPE (olddecl) = newtype;
+ }
+ }
+ if (!types_match)
+ {
+ /* If types don't match for a built-in, throw away the built-in. */
+ warning_with_decl (newdecl, "conflicting types for built-in function `%s'");
+ return 0;
+ }
+ }
+ else if (TREE_CODE (olddecl) == FUNCTION_DECL
+ && DECL_SOURCE_LINE (olddecl) == 0)
+ {
+ /* A function declaration for a predeclared function
+ that isn't actually built in. */
+ if (!TREE_PUBLIC (newdecl))
+ {
+ /* If you declare it as static, the
+ default definition is overridden. */
+ return 0;
+ }
+ else if (!types_match)
+ {
+ /* If the types don't match, preserve volatility indication.
+ Later on, we will discard everything else about the
+ default declaration. */
+ TREE_THIS_VOLATILE (newdecl) |= TREE_THIS_VOLATILE (olddecl);
+ }
+ }
+ /* Permit char *foo () to match void *foo (...) if not pedantic,
+ if one of them came from a system header file. */
+ else if (!types_match
+ && TREE_CODE (olddecl) == FUNCTION_DECL
+ && TREE_CODE (newdecl) == FUNCTION_DECL
+ && TREE_CODE (TREE_TYPE (oldtype)) == POINTER_TYPE
+ && TREE_CODE (TREE_TYPE (newtype)) == POINTER_TYPE
+ && (DECL_IN_SYSTEM_HEADER (olddecl)
+ || DECL_IN_SYSTEM_HEADER (newdecl))
+ && ((TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (newtype))) == void_type_node
+ && TYPE_ARG_TYPES (oldtype) == 0
+ && self_promoting_args_p (TYPE_ARG_TYPES (newtype))
+ && TREE_TYPE (TREE_TYPE (oldtype)) == char_type_node)
+ ||
+ (TREE_TYPE (TREE_TYPE (newtype)) == char_type_node
+ && TYPE_ARG_TYPES (newtype) == 0
+ && self_promoting_args_p (TYPE_ARG_TYPES (oldtype))
+ && TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (oldtype))) == void_type_node)))
+ {
+ if (pedantic)
+ pedwarn_with_decl (newdecl, "conflicting types for `%s'");
+ /* Make sure we keep void * as ret type, not char *. */
+ if (TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (oldtype))) == void_type_node)
+ TREE_TYPE (newdecl) = newtype = oldtype;
+
+ /* Set DECL_IN_SYSTEM_HEADER, so that if we see another declaration
+ we will come back here again. */
+ DECL_IN_SYSTEM_HEADER (newdecl) = 1;
+ }
+ else if (!types_match
+ /* Permit char *foo (int, ...); followed by char *foo ();
+ if not pedantic. */
+ && ! (TREE_CODE (olddecl) == FUNCTION_DECL
+ && ! pedantic
+ /* Return types must still match. */
+ && comptypes (TREE_TYPE (oldtype),
+ TREE_TYPE (newtype))
+ && TYPE_ARG_TYPES (newtype) == 0))
+ {
+ error_with_decl (newdecl, "conflicting types for `%s'");
+ /* Check for function type mismatch
+ involving an empty arglist vs a nonempty one. */
+ if (TREE_CODE (olddecl) == FUNCTION_DECL
+ && comptypes (TREE_TYPE (oldtype),
+ TREE_TYPE (newtype))
+ && ((TYPE_ARG_TYPES (oldtype) == 0
+ && DECL_INITIAL (olddecl) == 0)
+ ||
+ (TYPE_ARG_TYPES (newtype) == 0
+ && DECL_INITIAL (newdecl) == 0)))
+ {
+ /* Classify the problem further. */
+ register tree t = TYPE_ARG_TYPES (oldtype);
+ if (t == 0)
+ t = TYPE_ARG_TYPES (newtype);
+ for (; t; t = TREE_CHAIN (t))
+ {
+ register tree type = TREE_VALUE (t);
+
+ if (TREE_CHAIN (t) == 0
+ && TYPE_MAIN_VARIANT (type) != void_type_node)
+ {
+ error ("A parameter list with an ellipsis can't match");
+ error ("an empty parameter name list declaration.");
+ break;
+ }
+
+ if (TYPE_MAIN_VARIANT (type) == float_type_node
+ || C_PROMOTING_INTEGER_TYPE_P (type))
+ {
+ error ("An argument type that has a default promotion");
+ error ("can't match an empty parameter name list declaration.");
+ break;
+ }
+ }
+ }
+ error_with_decl (olddecl, "previous declaration of `%s'");
+ }
+ else
+ {
+ char *errmsg = redeclaration_error_message (newdecl, olddecl);
+ if (errmsg)
+ {
+ error_with_decl (newdecl, errmsg);
+ error_with_decl (olddecl,
+ ((DECL_INITIAL (olddecl)
+ && current_binding_level == global_binding_level)
+ ? "`%s' previously defined here"
+ : "`%s' previously declared here"));
+ }
+ else if (TREE_CODE (olddecl) == FUNCTION_DECL
+ && DECL_INITIAL (olddecl) != 0
+ && TYPE_ARG_TYPES (oldtype) == 0
+ && TYPE_ARG_TYPES (newtype) != 0)
+ {
+ register tree type, parm;
+ register int nargs;
+ /* Prototype decl follows defn w/o prototype. */
+
+ for (parm = TYPE_ACTUAL_ARG_TYPES (oldtype),
+ type = TYPE_ARG_TYPES (newtype),
+ nargs = 1;
+ (TYPE_MAIN_VARIANT (TREE_VALUE (parm)) != void_type_node
+ || TYPE_MAIN_VARIANT (TREE_VALUE (type)) != void_type_node);
+ parm = TREE_CHAIN (parm), type = TREE_CHAIN (type), nargs++)
+ {
+ if (TYPE_MAIN_VARIANT (TREE_VALUE (parm)) == void_type_node
+ || TYPE_MAIN_VARIANT (TREE_VALUE (type)) == void_type_node)
+ {
+ errmsg = "prototype for `%s' follows and number of arguments";
+ break;
+ }
+ /* Type for passing arg must be consistent
+ with that declared for the arg. */
+ if (! comptypes (TREE_VALUE (parm), TREE_VALUE (type))
+ /* If -traditional, allow `unsigned int' instead of `int'
+ in the prototype. */
+ && (! (flag_traditional
+ && TYPE_MAIN_VARIANT (TREE_VALUE (parm)) == integer_type_node
+ && TYPE_MAIN_VARIANT (TREE_VALUE (type)) == unsigned_type_node)))
+ {
+ errmsg = "prototype for `%s' follows and argument %d";
+ break;
+ }
+ }
+ if (errmsg)
+ {
+ error_with_decl (newdecl, errmsg, nargs);
+ error_with_decl (olddecl,
+ "doesn't match non-prototype definition here");
+ }
+ else
+ {
+ warning_with_decl (newdecl, "prototype for `%s' follows");
+ warning_with_decl (olddecl, "non-prototype definition here");
+ }
+ }
+ /* Warn about mismatches in various flags. */
+ else
+ {
+ /* Warn if function is now inline
+ but was previously declared not inline and has been called. */
+ if (TREE_CODE (olddecl) == FUNCTION_DECL
+ && ! DECL_INLINE (olddecl) && DECL_INLINE (newdecl)
+ && TREE_USED (olddecl))
+ warning_with_decl (newdecl,
+ "`%s' declared inline after being called");
+ if (TREE_CODE (olddecl) == FUNCTION_DECL
+ && ! DECL_INLINE (olddecl) && DECL_INLINE (newdecl)
+ && DECL_INITIAL (olddecl) != 0)
+ warning_with_decl (newdecl,
+ "`%s' declared inline after its definition");
+
+ /* If pedantic, warn when static declaration follows a non-static
+ declaration. Otherwise, do so only for functions. */
+ if ((pedantic || TREE_CODE (olddecl) == FUNCTION_DECL)
+ && TREE_PUBLIC (olddecl)
+ && !TREE_PUBLIC (newdecl))
+ warning_with_decl (newdecl, "static declaration for `%s' follows non-static");
+
+ /* Warn when const declaration follows a non-const
+ declaration, but not for functions. */
+ if (TREE_CODE (olddecl) != FUNCTION_DECL
+ && !TREE_READONLY (olddecl)
+ && TREE_READONLY (newdecl))
+ warning_with_decl (newdecl, "const declaration for `%s' follows non-const");
+ /* These bits are logically part of the type, for variables.
+ But not for functions
+ (where qualifiers are not valid ANSI anyway). */
+ else if (pedantic && TREE_CODE (olddecl) != FUNCTION_DECL
+ && (TREE_READONLY (newdecl) != TREE_READONLY (olddecl)
+ || TREE_THIS_VOLATILE (newdecl) != TREE_THIS_VOLATILE (olddecl)))
+ pedwarn_with_decl (newdecl, "type qualifiers for `%s' conflict with previous decl");
+ }
+ }
+
+ /* Optionally warn about more than one declaration for the same name. */
+ if (warn_redundant_decls && DECL_SOURCE_LINE (olddecl) != 0
+ /* Dont warn about a function declaration
+ followed by a definition. */
+ && !(TREE_CODE (newdecl) == FUNCTION_DECL && DECL_INITIAL (newdecl) != 0
+ && DECL_INITIAL (olddecl) == 0)
+ /* Don't warn about extern decl followed by (tentative) definition. */
+ && !(DECL_EXTERNAL (olddecl) && ! DECL_EXTERNAL (newdecl)))
+ {
+ warning_with_decl (newdecl, "redundant redeclaration of `%s' in same scope");
+ warning_with_decl (olddecl, "previous declaration of `%s'");
+ }
+
+ /* Copy all the DECL_... slots specified in the new decl
+ except for any that we copy here from the old type.
+
+ Past this point, we don't change OLDTYPE and NEWTYPE
+ even if we change the types of NEWDECL and OLDDECL. */
+
+ if (types_match)
+ {
+ /* Make sure we put the new type in the same obstack as the old ones.
+ If the old types are not both in the same obstack, use the permanent
+ one. */
+ if (TYPE_OBSTACK (oldtype) == TYPE_OBSTACK (newtype))
+ push_obstacks (TYPE_OBSTACK (oldtype), TYPE_OBSTACK (oldtype));
+ else
+ {
+ push_obstacks_nochange ();
+ end_temporary_allocation ();
+ }
+
+ /* Merge the data types specified in the two decls. */
+ if (TREE_CODE (newdecl) != FUNCTION_DECL || !DECL_BUILT_IN (olddecl))
+ TREE_TYPE (newdecl)
+ = TREE_TYPE (olddecl)
+ = common_type (newtype, oldtype);
+
+ /* Lay the type out, unless already done. */
+ if (oldtype != TREE_TYPE (newdecl))
+ {
+ if (TREE_TYPE (newdecl) != error_mark_node)
+ layout_type (TREE_TYPE (newdecl));
+ if (TREE_CODE (newdecl) != FUNCTION_DECL
+ && TREE_CODE (newdecl) != TYPE_DECL
+ && TREE_CODE (newdecl) != CONST_DECL)
+ layout_decl (newdecl, 0);
+ }
+ else
+ {
+ /* Since the type is OLDDECL's, make OLDDECL's size go with. */
+ DECL_SIZE (newdecl) = DECL_SIZE (olddecl);
+ if (TREE_CODE (olddecl) != FUNCTION_DECL)
+ if (DECL_ALIGN (olddecl) > DECL_ALIGN (newdecl))
+ DECL_ALIGN (newdecl) = DECL_ALIGN (olddecl);
+ }
+
+ /* Keep the old rtl since we can safely use it. */
+ DECL_RTL (newdecl) = DECL_RTL (olddecl);
+
+ /* Merge the type qualifiers. */
+ if (DECL_BUILT_IN_NONANSI (olddecl) && TREE_THIS_VOLATILE (olddecl)
+ && !TREE_THIS_VOLATILE (newdecl))
+ TREE_THIS_VOLATILE (olddecl) = 0;
+ if (TREE_READONLY (newdecl))
+ TREE_READONLY (olddecl) = 1;
+ if (TREE_THIS_VOLATILE (newdecl))
+ {
+ TREE_THIS_VOLATILE (olddecl) = 1;
+ if (TREE_CODE (newdecl) == VAR_DECL)
+ make_var_volatile (newdecl);
+ }
+
+ /* Keep source location of definition rather than declaration. */
+ if (DECL_INITIAL (newdecl) == 0 && DECL_INITIAL (olddecl) != 0)
+ {
+ DECL_SOURCE_LINE (newdecl) = DECL_SOURCE_LINE (olddecl);
+ DECL_SOURCE_FILE (newdecl) = DECL_SOURCE_FILE (olddecl);
+ }
+
+ /* Merge the unused-warning information. */
+ if (DECL_IN_SYSTEM_HEADER (olddecl))
+ DECL_IN_SYSTEM_HEADER (newdecl) = 1;
+ else if (DECL_IN_SYSTEM_HEADER (newdecl))
+ DECL_IN_SYSTEM_HEADER (olddecl) = 1;
+
+ /* Merge the initialization information. */
+ if (DECL_INITIAL (newdecl) == 0)
+ DECL_INITIAL (newdecl) = DECL_INITIAL (olddecl);
+
+ /* Merge the section attribute.
+ We want to issue an error if the sections conflict but that must be
+ done later in decl_attributes since we are called before attributes
+ are assigned. */
+ if (DECL_SECTION_NAME (newdecl) == NULL_TREE)
+ DECL_SECTION_NAME (newdecl) = DECL_SECTION_NAME (olddecl);
+
+ pop_obstacks ();
+ }
+ /* If cannot merge, then use the new type and qualifiers,
+ and don't preserve the old rtl. */
+ else
+ {
+ TREE_TYPE (olddecl) = TREE_TYPE (newdecl);
+ TREE_READONLY (olddecl) = TREE_READONLY (newdecl);
+ TREE_THIS_VOLATILE (olddecl) = TREE_THIS_VOLATILE (newdecl);
+ TREE_SIDE_EFFECTS (olddecl) = TREE_SIDE_EFFECTS (newdecl);
+ }
+
+ /* Merge the storage class information. */
+ /* For functions, static overrides non-static. */
+ if (TREE_CODE (newdecl) == FUNCTION_DECL)
+ {
+ TREE_PUBLIC (newdecl) &= TREE_PUBLIC (olddecl);
+ /* This is since we don't automatically
+ copy the attributes of NEWDECL into OLDDECL. */
+ TREE_PUBLIC (olddecl) = TREE_PUBLIC (newdecl);
+ /* If this clears `static', clear it in the identifier too. */
+ if (! TREE_PUBLIC (olddecl))
+ TREE_PUBLIC (DECL_NAME (olddecl)) = 0;
+ }
+ if (DECL_EXTERNAL (newdecl))
+ {
+ TREE_STATIC (newdecl) = TREE_STATIC (olddecl);
+ DECL_EXTERNAL (newdecl) = DECL_EXTERNAL (olddecl);
+ /* An extern decl does not override previous storage class. */
+ TREE_PUBLIC (newdecl) = TREE_PUBLIC (olddecl);
+ }
+ else
+ {
+ TREE_STATIC (olddecl) = TREE_STATIC (newdecl);
+ TREE_PUBLIC (olddecl) = TREE_PUBLIC (newdecl);
+ }
+
+ /* If either decl says `inline', this fn is inline,
+ unless its definition was passed already. */
+ if (DECL_INLINE (newdecl) && DECL_INITIAL (olddecl) == 0)
+ DECL_INLINE (olddecl) = 1;
+ DECL_INLINE (newdecl) = DECL_INLINE (olddecl);
+
+ /* Get rid of any built-in function if new arg types don't match it
+ or if we have a function definition. */
+ if (TREE_CODE (newdecl) == FUNCTION_DECL
+ && DECL_BUILT_IN (olddecl)
+ && (!types_match || new_is_definition))
+ {
+ TREE_TYPE (olddecl) = TREE_TYPE (newdecl);
+ DECL_BUILT_IN (olddecl) = 0;
+ }
+
+ /* If redeclaring a builtin function, and not a definition,
+ it stays built in.
+ Also preserve various other info from the definition. */
+ if (TREE_CODE (newdecl) == FUNCTION_DECL && !new_is_definition)
+ {
+ if (DECL_BUILT_IN (olddecl))
+ {
+ DECL_BUILT_IN (newdecl) = 1;
+ DECL_FUNCTION_CODE (newdecl) = DECL_FUNCTION_CODE (olddecl);
+ }
+ else
+ DECL_FRAME_SIZE (newdecl) = DECL_FRAME_SIZE (olddecl);
+
+ DECL_RESULT (newdecl) = DECL_RESULT (olddecl);
+ DECL_INITIAL (newdecl) = DECL_INITIAL (olddecl);
+ DECL_SAVED_INSNS (newdecl) = DECL_SAVED_INSNS (olddecl);
+ DECL_ARGUMENTS (newdecl) = DECL_ARGUMENTS (olddecl);
+ }
+
+ /* Copy most of the decl-specific fields of NEWDECL into OLDDECL.
+ But preserve OLDdECL's DECL_UID. */
+ {
+ register unsigned olddecl_uid = DECL_UID (olddecl);
+
+ bcopy ((char *) newdecl + sizeof (struct tree_common),
+ (char *) olddecl + sizeof (struct tree_common),
+ sizeof (struct tree_decl) - sizeof (struct tree_common));
+ DECL_UID (olddecl) = olddecl_uid;
+ }
+
+ return 1;
+}
+
+/* Record a decl-node X as belonging to the current lexical scope.
+ Check for errors (such as an incompatible declaration for the same
+ name already seen in the same scope).
+
+ Returns either X or an old decl for the same name.
+ If an old decl is returned, it may have been smashed
+ to agree with what X says. */
+
+tree
+pushdecl (x)
+ tree x;
+{
+ register tree t;
+ register tree name = DECL_NAME (x);
+ register struct binding_level *b = current_binding_level;
+
+ DECL_CONTEXT (x) = current_function_decl;
+ /* A local extern declaration for a function doesn't constitute nesting.
+ A local auto declaration does, since it's a forward decl
+ for a nested function coming later. */
+ if (TREE_CODE (x) == FUNCTION_DECL && DECL_INITIAL (x) == 0
+ && DECL_EXTERNAL (x))
+ DECL_CONTEXT (x) = 0;
+
+ if (warn_nested_externs && DECL_EXTERNAL (x) && b != global_binding_level
+ && x != IDENTIFIER_IMPLICIT_DECL (name)
+ /* Don't print error messages for __FUNCTION__ and __PRETTY_FUNCTION__ */
+ && !DECL_IN_SYSTEM_HEADER (x))
+ warning ("nested extern declaration of `%s'", IDENTIFIER_POINTER (name));
+
+ if (name)
+ {
+ char *file;
+ int line;
+
+ /* Don't type check externs here when -traditional. This is so that
+ code with conflicting declarations inside blocks will get warnings
+ not errors. X11 for instance depends on this. */
+ if (DECL_EXTERNAL (x) && TREE_PUBLIC (x) && ! flag_traditional)
+ t = lookup_name_current_level_global (name);
+ else
+ t = lookup_name_current_level (name);
+ if (t != 0 && t == error_mark_node)
+ /* error_mark_node is 0 for a while during initialization! */
+ {
+ t = 0;
+ error_with_decl (x, "`%s' used prior to declaration");
+ }
+
+ if (t != 0)
+ {
+ file = DECL_SOURCE_FILE (t);
+ line = DECL_SOURCE_LINE (t);
+ }
+
+ if (t != 0 && duplicate_decls (x, t))
+ {
+ if (TREE_CODE (t) == PARM_DECL)
+ {
+ /* Don't allow more than one "real" duplicate
+ of a forward parm decl. */
+ TREE_ASM_WRITTEN (t) = TREE_ASM_WRITTEN (x);
+ return t;
+ }
+ /* If this decl is `static' and an implicit decl was seen previously,
+ warn. But don't complain if -traditional,
+ since traditional compilers don't complain. */
+ if (!flag_traditional && TREE_PUBLIC (name)
+ && ! TREE_PUBLIC (x) && ! DECL_EXTERNAL (x)
+ /* We used to warn also for explicit extern followed by static,
+ but sometimes you need to do it that way. */
+ && IDENTIFIER_IMPLICIT_DECL (name) != 0)
+ {
+ pedwarn ("`%s' was declared implicitly `extern' and later `static'",
+ IDENTIFIER_POINTER (name));
+ pedwarn_with_file_and_line (file, line,
+ "previous declaration of `%s'",
+ IDENTIFIER_POINTER (name));
+ }
+
+ /* If this is a global decl, and there exists a conflicting local
+ decl in a parent block, then we can't return as yet, because we
+ need to register this decl in the current binding block. */
+ if (! DECL_EXTERNAL (x) || ! TREE_PUBLIC (x)
+ || lookup_name (name) == t)
+ return t;
+ }
+
+ /* If we are processing a typedef statement, generate a whole new
+ ..._TYPE node (which will be just an variant of the existing
+ ..._TYPE node with identical properties) and then install the
+ TYPE_DECL node generated to represent the typedef name as the
+ TYPE_NAME of this brand new (duplicate) ..._TYPE node.
+
+ The whole point here is to end up with a situation where each
+ and every ..._TYPE node the compiler creates will be uniquely
+ associated with AT MOST one node representing a typedef name.
+ This way, even though the compiler substitutes corresponding
+ ..._TYPE nodes for TYPE_DECL (i.e. "typedef name") nodes very
+ early on, later parts of the compiler can always do the reverse
+ translation and get back the corresponding typedef name. For
+ example, given:
+
+ typedef struct S MY_TYPE;
+ MY_TYPE object;
+
+ Later parts of the compiler might only know that `object' was of
+ type `struct S' if if were not for code just below. With this
+ code however, later parts of the compiler see something like:
+
+ struct S' == struct S
+ typedef struct S' MY_TYPE;
+ struct S' object;
+
+ And they can then deduce (from the node for type struct S') that
+ the original object declaration was:
+
+ MY_TYPE object;
+
+ Being able to do this is important for proper support of protoize,
+ and also for generating precise symbolic debugging information
+ which takes full account of the programmer's (typedef) vocabulary.
+
+ Obviously, we don't want to generate a duplicate ..._TYPE node if
+ the TYPE_DECL node that we are now processing really represents a
+ standard built-in type.
+
+ Since all standard types are effectively declared at line zero
+ in the source file, we can easily check to see if we are working
+ on a standard type by checking the current value of lineno. */
+
+ if (TREE_CODE (x) == TYPE_DECL)
+ {
+ if (DECL_SOURCE_LINE (x) == 0)
+ {
+ if (TYPE_NAME (TREE_TYPE (x)) == 0)
+ TYPE_NAME (TREE_TYPE (x)) = x;
+ }
+ else if (TREE_TYPE (x) != error_mark_node)
+ {
+ tree tt = TREE_TYPE (x);
+
+ tt = build_type_copy (tt);
+ TYPE_NAME (tt) = x;
+ TREE_TYPE (x) = tt;
+ }
+ }
+
+ /* Multiple external decls of the same identifier ought to match.
+ Check against both global declarations (when traditional) and out of
+ scope (limbo) block level declarations.
+
+ We get warnings about inline functions where they are defined.
+ Avoid duplicate warnings where they are used. */
+ if (TREE_PUBLIC (x) && ! DECL_INLINE (x))
+ {
+ tree decl;
+
+ if (flag_traditional && IDENTIFIER_GLOBAL_VALUE (name) != 0
+ && (DECL_EXTERNAL (IDENTIFIER_GLOBAL_VALUE (name))
+ || TREE_PUBLIC (IDENTIFIER_GLOBAL_VALUE (name))))
+ decl = IDENTIFIER_GLOBAL_VALUE (name);
+ else if (IDENTIFIER_LIMBO_VALUE (name) != 0)
+ /* Decls in limbo are always extern, so no need to check that. */
+ decl = IDENTIFIER_LIMBO_VALUE (name);
+ else
+ decl = 0;
+
+ if (decl && ! comptypes (TREE_TYPE (x), TREE_TYPE (decl))
+ /* If old decl is built-in, we already warned if we should. */
+ && !DECL_BUILT_IN (decl))
+ {
+ pedwarn_with_decl (x,
+ "type mismatch with previous external decl");
+ pedwarn_with_decl (decl, "previous external decl of `%s'");
+ }
+ }
+
+ /* If a function has had an implicit declaration, and then is defined,
+ make sure they are compatible. */
+
+ if (IDENTIFIER_IMPLICIT_DECL (name) != 0
+ && IDENTIFIER_GLOBAL_VALUE (name) == 0
+ && TREE_CODE (x) == FUNCTION_DECL
+ && ! comptypes (TREE_TYPE (x),
+ TREE_TYPE (IDENTIFIER_IMPLICIT_DECL (name))))
+ {
+ warning_with_decl (x, "type mismatch with previous implicit declaration");
+ warning_with_decl (IDENTIFIER_IMPLICIT_DECL (name),
+ "previous implicit declaration of `%s'");
+ }
+
+ /* In PCC-compatibility mode, extern decls of vars with no current decl
+ take effect at top level no matter where they are. */
+ if (flag_traditional && DECL_EXTERNAL (x)
+ && lookup_name (name) == 0)
+ {
+ tree type = TREE_TYPE (x);
+
+ /* But don't do this if the type contains temporary nodes. */
+ while (type)
+ {
+ if (type == error_mark_node)
+ break;
+ if (! TREE_PERMANENT (type))
+ {
+ warning_with_decl (x, "type of external `%s' is not global");
+ /* By exiting the loop early, we leave TYPE nonzero,
+ and thus prevent globalization of the decl. */
+ break;
+ }
+ else if (TREE_CODE (type) == FUNCTION_TYPE
+ && TYPE_ARG_TYPES (type) != 0)
+ /* The types might not be truly local,
+ but the list of arg types certainly is temporary.
+ Since prototypes are nontraditional,
+ ok not to do the traditional thing. */
+ break;
+ type = TREE_TYPE (type);
+ }
+
+ if (type == 0)
+ b = global_binding_level;
+ }
+
+ /* This name is new in its binding level.
+ Install the new declaration and return it. */
+ if (b == global_binding_level)
+ {
+ /* Install a global value. */
+
+ /* If the first global decl has external linkage,
+ warn if we later see static one. */
+ if (IDENTIFIER_GLOBAL_VALUE (name) == 0 && TREE_PUBLIC (x))
+ TREE_PUBLIC (name) = 1;
+
+ IDENTIFIER_GLOBAL_VALUE (name) = x;
+
+ /* We no longer care about any previous block level declarations. */
+ IDENTIFIER_LIMBO_VALUE (name) = 0;
+
+ /* Don't forget if the function was used via an implicit decl. */
+ if (IDENTIFIER_IMPLICIT_DECL (name)
+ && TREE_USED (IDENTIFIER_IMPLICIT_DECL (name)))
+ TREE_USED (x) = 1, TREE_USED (name) = 1;
+
+ /* Don't forget if its address was taken in that way. */
+ if (IDENTIFIER_IMPLICIT_DECL (name)
+ && TREE_ADDRESSABLE (IDENTIFIER_IMPLICIT_DECL (name)))
+ TREE_ADDRESSABLE (x) = 1;
+
+ /* Warn about mismatches against previous implicit decl. */
+ if (IDENTIFIER_IMPLICIT_DECL (name) != 0
+ /* If this real decl matches the implicit, don't complain. */
+ && ! (TREE_CODE (x) == FUNCTION_DECL
+ && (TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (x)))
+ == integer_type_node)))
+ pedwarn ("`%s' was previously implicitly declared to return `int'",
+ IDENTIFIER_POINTER (name));
+
+ /* If this decl is `static' and an `extern' was seen previously,
+ that is erroneous. */
+ if (TREE_PUBLIC (name)
+ && ! TREE_PUBLIC (x) && ! DECL_EXTERNAL (x))
+ {
+ /* Okay to redeclare an ANSI built-in as static. */
+ if (t != 0 && DECL_BUILT_IN (t))
+ ;
+ /* Okay to declare a non-ANSI built-in as anything. */
+ else if (t != 0 && DECL_BUILT_IN_NONANSI (t))
+ ;
+ else if (IDENTIFIER_IMPLICIT_DECL (name))
+ pedwarn ("`%s' was declared implicitly `extern' and later `static'",
+ IDENTIFIER_POINTER (name));
+ else
+ pedwarn ("`%s' was declared `extern' and later `static'",
+ IDENTIFIER_POINTER (name));
+ }
+ }
+ else
+ {
+ /* Here to install a non-global value. */
+ tree oldlocal = IDENTIFIER_LOCAL_VALUE (name);
+ tree oldglobal = IDENTIFIER_GLOBAL_VALUE (name);
+ IDENTIFIER_LOCAL_VALUE (name) = x;
+
+ /* If this is an extern function declaration, see if we
+ have a global definition or declaration for the function. */
+ if (oldlocal == 0
+ && DECL_EXTERNAL (x) && !DECL_INLINE (x)
+ && oldglobal != 0
+ && TREE_CODE (x) == FUNCTION_DECL
+ && TREE_CODE (oldglobal) == FUNCTION_DECL)
+ {
+ /* We have one. Their types must agree. */
+ if (! comptypes (TREE_TYPE (x),
+ TREE_TYPE (IDENTIFIER_GLOBAL_VALUE (name))))
+ pedwarn_with_decl (x, "extern declaration of `%s' doesn't match global one");
+ else
+ {
+ /* Inner extern decl is inline if global one is.
+ Copy enough to really inline it. */
+ if (DECL_INLINE (oldglobal))
+ {
+ DECL_INLINE (x) = DECL_INLINE (oldglobal);
+ DECL_INITIAL (x) = (current_function_decl == oldglobal
+ ? 0 : DECL_INITIAL (oldglobal));
+ DECL_SAVED_INSNS (x) = DECL_SAVED_INSNS (oldglobal);
+ DECL_FRAME_SIZE (x) = DECL_FRAME_SIZE (oldglobal);
+ DECL_ARGUMENTS (x) = DECL_ARGUMENTS (oldglobal);
+ DECL_RESULT (x) = DECL_RESULT (oldglobal);
+ TREE_ASM_WRITTEN (x) = TREE_ASM_WRITTEN (oldglobal);
+ DECL_ABSTRACT_ORIGIN (x) = oldglobal;
+ }
+ /* Inner extern decl is built-in if global one is. */
+ if (DECL_BUILT_IN (oldglobal))
+ {
+ DECL_BUILT_IN (x) = DECL_BUILT_IN (oldglobal);
+ DECL_FUNCTION_CODE (x) = DECL_FUNCTION_CODE (oldglobal);
+ }
+ /* Keep the arg types from a file-scope fcn defn. */
+ if (TYPE_ARG_TYPES (TREE_TYPE (oldglobal)) != 0
+ && DECL_INITIAL (oldglobal)
+ && TYPE_ARG_TYPES (TREE_TYPE (x)) == 0)
+ TREE_TYPE (x) = TREE_TYPE (oldglobal);
+ }
+ }
+
+#if 0 /* This case is probably sometimes the right thing to do. */
+ /* If we have a local external declaration,
+ then any file-scope declaration should not
+ have been static. */
+ if (oldlocal == 0 && oldglobal != 0
+ && !TREE_PUBLIC (oldglobal)
+ && DECL_EXTERNAL (x) && TREE_PUBLIC (x))
+ warning ("`%s' locally external but globally static",
+ IDENTIFIER_POINTER (name));
+#endif
+
+ /* If we have a local external declaration,
+ and no file-scope declaration has yet been seen,
+ then if we later have a file-scope decl it must not be static. */
+ if (oldlocal == 0
+ && oldglobal == 0
+ && DECL_EXTERNAL (x)
+ && TREE_PUBLIC (x))
+ {
+ TREE_PUBLIC (name) = 1;
+
+ /* Save this decl, so that we can do type checking against
+ other decls after it falls out of scope.
+
+ Only save it once. This prevents temporary decls created in
+ expand_inline_function from being used here, since this
+ will have been set when the inline function was parsed.
+ It also helps give slightly better warnings. */
+ if (IDENTIFIER_LIMBO_VALUE (name) == 0)
+ IDENTIFIER_LIMBO_VALUE (name) = x;
+ }
+
+ /* Warn if shadowing an argument at the top level of the body. */
+ if (oldlocal != 0 && !DECL_EXTERNAL (x)
+ /* This warning doesn't apply to the parms of a nested fcn. */
+ && ! current_binding_level->parm_flag
+ /* Check that this is one level down from the parms. */
+ && current_binding_level->level_chain->parm_flag
+ /* Check that the decl being shadowed
+ comes from the parm level, one level up. */
+ && chain_member (oldlocal, current_binding_level->level_chain->names))
+ {
+ if (TREE_CODE (oldlocal) == PARM_DECL)
+ pedwarn ("declaration of `%s' shadows a parameter",
+ IDENTIFIER_POINTER (name));
+ else
+ pedwarn ("declaration of `%s' shadows a symbol from the parameter list",
+ IDENTIFIER_POINTER (name));
+ }
+
+ /* Maybe warn if shadowing something else. */
+ else if (warn_shadow && !DECL_EXTERNAL (x)
+ /* No shadow warnings for internally generated vars. */
+ && DECL_SOURCE_LINE (x) != 0
+ /* No shadow warnings for vars made for inlining. */
+ && ! DECL_FROM_INLINE (x))
+ {
+ char *warnstring = 0;
+
+ if (TREE_CODE (x) == PARM_DECL
+ && current_binding_level->level_chain->parm_flag)
+ /* Don't warn about the parm names in function declarator
+ within a function declarator.
+ It would be nice to avoid warning in any function
+ declarator in a declaration, as opposed to a definition,
+ but there is no way to tell it's not a definition. */
+ ;
+ else if (oldlocal != 0 && TREE_CODE (oldlocal) == PARM_DECL)
+ warnstring = "declaration of `%s' shadows a parameter";
+ else if (oldlocal != 0)
+ warnstring = "declaration of `%s' shadows previous local";
+ else if (IDENTIFIER_GLOBAL_VALUE (name) != 0
+ && IDENTIFIER_GLOBAL_VALUE (name) != error_mark_node)
+ warnstring = "declaration of `%s' shadows global declaration";
+
+ if (warnstring)
+ warning (warnstring, IDENTIFIER_POINTER (name));
+ }
+
+ /* If storing a local value, there may already be one (inherited).
+ If so, record it for restoration when this binding level ends. */
+ if (oldlocal != 0)
+ b->shadowed = tree_cons (name, oldlocal, b->shadowed);
+ }
+
+ /* Keep count of variables in this level with incomplete type. */
+ if (TYPE_SIZE (TREE_TYPE (x)) == 0)
+ ++b->n_incomplete;
+ }
+
+ /* Put decls on list in reverse order.
+ We will reverse them later if necessary. */
+ TREE_CHAIN (x) = b->names;
+ b->names = x;
+
+ return x;
+}
+
+/* Like pushdecl, only it places X in GLOBAL_BINDING_LEVEL, if appropriate. */
+
+tree
+pushdecl_top_level (x)
+ tree x;
+{
+ register tree t;
+ register struct binding_level *b = current_binding_level;
+
+ current_binding_level = global_binding_level;
+ t = pushdecl (x);
+ current_binding_level = b;
+ return t;
+}
+
+/* Generate an implicit declaration for identifier FUNCTIONID
+ as a function of type int (). Print a warning if appropriate. */
+
+tree
+implicitly_declare (functionid)
+ tree functionid;
+{
+ register tree decl;
+ int traditional_warning = 0;
+ /* Only one "implicit declaration" warning per identifier. */
+ int implicit_warning;
+
+ /* Save the decl permanently so we can warn if definition follows. */
+ push_obstacks_nochange ();
+ end_temporary_allocation ();
+
+ /* We used to reuse an old implicit decl here,
+ but this loses with inline functions because it can clobber
+ the saved decl chains. */
+/* if (IDENTIFIER_IMPLICIT_DECL (functionid) != 0)
+ decl = IDENTIFIER_IMPLICIT_DECL (functionid);
+ else */
+ decl = build_decl (FUNCTION_DECL, functionid, default_function_type);
+
+ /* Warn of implicit decl following explicit local extern decl.
+ This is probably a program designed for traditional C. */
+ if (TREE_PUBLIC (functionid) && IDENTIFIER_GLOBAL_VALUE (functionid) == 0)
+ traditional_warning = 1;
+
+ /* Warn once of an implicit declaration. */
+ implicit_warning = (IDENTIFIER_IMPLICIT_DECL (functionid) == 0);
+
+ DECL_EXTERNAL (decl) = 1;
+ TREE_PUBLIC (decl) = 1;
+
+ /* Record that we have an implicit decl and this is it. */
+ IDENTIFIER_IMPLICIT_DECL (functionid) = decl;
+
+ /* ANSI standard says implicit declarations are in the innermost block.
+ So we record the decl in the standard fashion.
+ If flag_traditional is set, pushdecl does it top-level. */
+ pushdecl (decl);
+
+ /* This is a no-op in c-lang.c or something real in objc-actions.c. */
+ maybe_objc_check_decl (decl);
+
+ rest_of_decl_compilation (decl, NULL_PTR, 0, 0);
+
+ if (warn_implicit && implicit_warning)
+ warning ("implicit declaration of function `%s'",
+ IDENTIFIER_POINTER (functionid));
+ else if (warn_traditional && traditional_warning)
+ warning ("function `%s' was previously declared within a block",
+ IDENTIFIER_POINTER (functionid));
+
+ /* Write a record describing this implicit function declaration to the
+ prototypes file (if requested). */
+
+ gen_aux_info_record (decl, 0, 1, 0);
+
+ pop_obstacks ();
+
+ return decl;
+}
+
+/* Return zero if the declaration NEWDECL is valid
+ when the declaration OLDDECL (assumed to be for the same name)
+ has already been seen.
+ Otherwise return an error message format string with a %s
+ where the identifier should go. */
+
+static char *
+redeclaration_error_message (newdecl, olddecl)
+ tree newdecl, olddecl;
+{
+ if (TREE_CODE (newdecl) == TYPE_DECL)
+ {
+ if (flag_traditional && TREE_TYPE (newdecl) == TREE_TYPE (olddecl))
+ return 0;
+ return "redefinition of `%s'";
+ }
+ else if (TREE_CODE (newdecl) == FUNCTION_DECL)
+ {
+ /* Declarations of functions can insist on internal linkage
+ but they can't be inconsistent with internal linkage,
+ so there can be no error on that account.
+ However defining the same name twice is no good. */
+ if (DECL_INITIAL (olddecl) != 0 && DECL_INITIAL (newdecl) != 0
+ /* However, defining once as extern inline and a second
+ time in another way is ok. */
+ && !(DECL_INLINE (olddecl) && DECL_EXTERNAL (olddecl)
+ && !(DECL_INLINE (newdecl) && DECL_EXTERNAL (newdecl))))
+ return "redefinition of `%s'";
+ return 0;
+ }
+ else if (current_binding_level == global_binding_level)
+ {
+ /* Objects declared at top level: */
+ /* If at least one is a reference, it's ok. */
+ if (DECL_EXTERNAL (newdecl) || DECL_EXTERNAL (olddecl))
+ return 0;
+ /* Reject two definitions. */
+ if (DECL_INITIAL (olddecl) != 0 && DECL_INITIAL (newdecl) != 0)
+ return "redefinition of `%s'";
+ /* Now we have two tentative defs, or one tentative and one real def. */
+ /* Insist that the linkage match. */
+ if (TREE_PUBLIC (olddecl) != TREE_PUBLIC (newdecl))
+ return "conflicting declarations of `%s'";
+ return 0;
+ }
+ else if (current_binding_level->parm_flag
+ && TREE_ASM_WRITTEN (olddecl) && !TREE_ASM_WRITTEN (newdecl))
+ return 0;
+ else
+ {
+ /* Newdecl has block scope. If olddecl has block scope also, then
+ reject two definitions, and reject a definition together with an
+ external reference. Otherwise, it is OK, because newdecl must
+ be an extern reference to olddecl. */
+ if (!(DECL_EXTERNAL (newdecl) && DECL_EXTERNAL (olddecl))
+ && DECL_CONTEXT (newdecl) == DECL_CONTEXT (olddecl))
+ return "redeclaration of `%s'";
+ return 0;
+ }
+}
+
+/* Get the LABEL_DECL corresponding to identifier ID as a label.
+ Create one if none exists so far for the current function.
+ This function is called for both label definitions and label references. */
+
+tree
+lookup_label (id)
+ tree id;
+{
+ register tree decl = IDENTIFIER_LABEL_VALUE (id);
+
+ if (current_function_decl == 0)
+ {
+ error ("label %s referenced outside of any function",
+ IDENTIFIER_POINTER (id));
+ return 0;
+ }
+
+ /* Use a label already defined or ref'd with this name. */
+ if (decl != 0)
+ {
+ /* But not if it is inherited and wasn't declared to be inheritable. */
+ if (DECL_CONTEXT (decl) != current_function_decl
+ && ! C_DECLARED_LABEL_FLAG (decl))
+ return shadow_label (id);
+ return decl;
+ }
+
+ decl = build_decl (LABEL_DECL, id, void_type_node);
+
+ /* Make sure every label has an rtx. */
+ label_rtx (decl);
+
+ /* A label not explicitly declared must be local to where it's ref'd. */
+ DECL_CONTEXT (decl) = current_function_decl;
+
+ DECL_MODE (decl) = VOIDmode;
+
+ /* Say where one reference is to the label,
+ for the sake of the error if it is not defined. */
+ DECL_SOURCE_LINE (decl) = lineno;
+ DECL_SOURCE_FILE (decl) = input_filename;
+
+ IDENTIFIER_LABEL_VALUE (id) = decl;
+
+ named_labels = tree_cons (NULL_TREE, decl, named_labels);
+
+ return decl;
+}
+
+/* Make a label named NAME in the current function,
+ shadowing silently any that may be inherited from containing functions
+ or containing scopes.
+
+ Note that valid use, if the label being shadowed
+ comes from another scope in the same function,
+ requires calling declare_nonlocal_label right away. */
+
+tree
+shadow_label (name)
+ tree name;
+{
+ register tree decl = IDENTIFIER_LABEL_VALUE (name);
+
+ if (decl != 0)
+ {
+ shadowed_labels = tree_cons (NULL_TREE, decl, shadowed_labels);
+ IDENTIFIER_LABEL_VALUE (name) = decl = 0;
+ }
+
+ return lookup_label (name);
+}
+
+/* Define a label, specifying the location in the source file.
+ Return the LABEL_DECL node for the label, if the definition is valid.
+ Otherwise return 0. */
+
+tree
+define_label (filename, line, name)
+ char *filename;
+ int line;
+ tree name;
+{
+ tree decl = lookup_label (name);
+
+ /* If label with this name is known from an outer context, shadow it. */
+ if (decl != 0 && DECL_CONTEXT (decl) != current_function_decl)
+ {
+ shadowed_labels = tree_cons (NULL_TREE, decl, shadowed_labels);
+ IDENTIFIER_LABEL_VALUE (name) = 0;
+ decl = lookup_label (name);
+ }
+
+ if (DECL_INITIAL (decl) != 0)
+ {
+ error ("duplicate label `%s'", IDENTIFIER_POINTER (name));
+ return 0;
+ }
+ else
+ {
+ /* Mark label as having been defined. */
+ DECL_INITIAL (decl) = error_mark_node;
+ /* Say where in the source. */
+ DECL_SOURCE_FILE (decl) = filename;
+ DECL_SOURCE_LINE (decl) = line;
+ return decl;
+ }
+}
+
+/* Return the list of declarations of the current level.
+ Note that this list is in reverse order unless/until
+ you nreverse it; and when you do nreverse it, you must
+ store the result back using `storedecls' or you will lose. */
+
+tree
+getdecls ()
+{
+ return current_binding_level->names;
+}
+
+/* Return the list of type-tags (for structs, etc) of the current level. */
+
+tree
+gettags ()
+{
+ return current_binding_level->tags;
+}
+
+/* Store the list of declarations of the current level.
+ This is done for the parameter declarations of a function being defined,
+ after they are modified in the light of any missing parameters. */
+
+static void
+storedecls (decls)
+ tree decls;
+{
+ current_binding_level->names = decls;
+}
+
+/* Similarly, store the list of tags of the current level. */
+
+static void
+storetags (tags)
+ tree tags;
+{
+ current_binding_level->tags = tags;
+}
+
+/* Given NAME, an IDENTIFIER_NODE,
+ return the structure (or union or enum) definition for that name.
+ Searches binding levels from BINDING_LEVEL up to the global level.
+ If THISLEVEL_ONLY is nonzero, searches only the specified context
+ (but skips any tag-transparent contexts to find one that is
+ meaningful for tags).
+ CODE says which kind of type the caller wants;
+ it is RECORD_TYPE or UNION_TYPE or ENUMERAL_TYPE.
+ If the wrong kind of type is found, an error is reported. */
+
+static tree
+lookup_tag (code, name, binding_level, thislevel_only)
+ enum tree_code code;
+ struct binding_level *binding_level;
+ tree name;
+ int thislevel_only;
+{
+ register struct binding_level *level;
+
+ for (level = binding_level; level; level = level->level_chain)
+ {
+ register tree tail;
+ for (tail = level->tags; tail; tail = TREE_CHAIN (tail))
+ {
+ if (TREE_PURPOSE (tail) == name)
+ {
+ if (TREE_CODE (TREE_VALUE (tail)) != code)
+ {
+ /* Definition isn't the kind we were looking for. */
+ pending_invalid_xref = name;
+ pending_invalid_xref_file = input_filename;
+ pending_invalid_xref_line = lineno;
+ }
+ return TREE_VALUE (tail);
+ }
+ }
+ if (thislevel_only && ! level->tag_transparent)
+ return NULL_TREE;
+ }
+ return NULL_TREE;
+}
+
+/* Print an error message now
+ for a recent invalid struct, union or enum cross reference.
+ We don't print them immediately because they are not invalid
+ when used in the `struct foo;' construct for shadowing. */
+
+void
+pending_xref_error ()
+{
+ if (pending_invalid_xref != 0)
+ error_with_file_and_line (pending_invalid_xref_file,
+ pending_invalid_xref_line,
+ "`%s' defined as wrong kind of tag",
+ IDENTIFIER_POINTER (pending_invalid_xref));
+ pending_invalid_xref = 0;
+}
+
+/* Given a type, find the tag that was defined for it and return the tag name.
+ Otherwise return 0. */
+
+static tree
+lookup_tag_reverse (type)
+ tree type;
+{
+ register struct binding_level *level;
+
+ for (level = current_binding_level; level; level = level->level_chain)
+ {
+ register tree tail;
+ for (tail = level->tags; tail; tail = TREE_CHAIN (tail))
+ {
+ if (TREE_VALUE (tail) == type)
+ return TREE_PURPOSE (tail);
+ }
+ }
+ return NULL_TREE;
+}
+
+/* Look up NAME in the current binding level and its superiors
+ in the namespace of variables, functions and typedefs.
+ Return a ..._DECL node of some kind representing its definition,
+ or return 0 if it is undefined. */
+
+tree
+lookup_name (name)
+ tree name;
+{
+ register tree val;
+ if (current_binding_level != global_binding_level
+ && IDENTIFIER_LOCAL_VALUE (name))
+ val = IDENTIFIER_LOCAL_VALUE (name);
+ else
+ val = IDENTIFIER_GLOBAL_VALUE (name);
+ return val;
+}
+
+/* Similar to `lookup_name' but look only at current binding level. */
+
+tree
+lookup_name_current_level (name)
+ tree name;
+{
+ register tree t;
+
+ if (current_binding_level == global_binding_level)
+ return IDENTIFIER_GLOBAL_VALUE (name);
+
+ if (IDENTIFIER_LOCAL_VALUE (name) == 0)
+ return 0;
+
+ for (t = current_binding_level->names; t; t = TREE_CHAIN (t))
+ if (DECL_NAME (t) == name)
+ break;
+
+ return t;
+}
+
+/* Similar to `lookup_name_current_level' but also look at the global binding
+ level. */
+
+tree
+lookup_name_current_level_global (name)
+ tree name;
+{
+ register tree t = 0;
+
+ if (current_binding_level == global_binding_level)
+ return IDENTIFIER_GLOBAL_VALUE (name);
+
+ if (IDENTIFIER_LOCAL_VALUE (name) != 0)
+ for (t = current_binding_level->names; t; t = TREE_CHAIN (t))
+ if (DECL_NAME (t) == name)
+ break;
+
+ if (t == 0)
+ t = IDENTIFIER_GLOBAL_VALUE (name);
+
+ return t;
+}
+
+/* Create the predefined scalar types of C,
+ and some nodes representing standard constants (0, 1, (void *)0).
+ Initialize the global binding level.
+ Make definitions for built-in primitive functions. */
+
+void
+init_decl_processing ()
+{
+ register tree endlink;
+ /* Either char* or void*. */
+ tree traditional_ptr_type_node;
+ /* Data types of memcpy and strlen. */
+ tree memcpy_ftype, strlen_ftype;
+ tree void_ftype_any;
+ int wchar_type_size;
+ tree temp;
+ tree array_domain_type;
+
+ current_function_decl = NULL;
+ named_labels = NULL;
+ current_binding_level = NULL_BINDING_LEVEL;
+ free_binding_level = NULL_BINDING_LEVEL;
+ pushlevel (0); /* make the binding_level structure for global names */
+ global_binding_level = current_binding_level;
+
+ /* Define `int' and `char' first so that dbx will output them first. */
+
+ integer_type_node = make_signed_type (INT_TYPE_SIZE);
+ pushdecl (build_decl (TYPE_DECL, ridpointers[(int) RID_INT],
+ integer_type_node));
+
+ /* Define `char', which is like either `signed char' or `unsigned char'
+ but not the same as either. */
+
+ char_type_node
+ = (flag_signed_char
+ ? make_signed_type (CHAR_TYPE_SIZE)
+ : make_unsigned_type (CHAR_TYPE_SIZE));
+ pushdecl (build_decl (TYPE_DECL, get_identifier ("char"),
+ char_type_node));
+
+ long_integer_type_node = make_signed_type (LONG_TYPE_SIZE);
+ pushdecl (build_decl (TYPE_DECL, get_identifier ("long int"),
+ long_integer_type_node));
+
+ unsigned_type_node = make_unsigned_type (INT_TYPE_SIZE);
+ pushdecl (build_decl (TYPE_DECL, get_identifier ("unsigned int"),
+ unsigned_type_node));
+
+ long_unsigned_type_node = make_unsigned_type (LONG_TYPE_SIZE);
+ pushdecl (build_decl (TYPE_DECL, get_identifier ("long unsigned int"),
+ long_unsigned_type_node));
+
+ long_long_integer_type_node = make_signed_type (LONG_LONG_TYPE_SIZE);
+ pushdecl (build_decl (TYPE_DECL, get_identifier ("long long int"),
+ long_long_integer_type_node));
+
+ long_long_unsigned_type_node = make_unsigned_type (LONG_LONG_TYPE_SIZE);
+ pushdecl (build_decl (TYPE_DECL, get_identifier ("long long unsigned int"),
+ long_long_unsigned_type_node));
+
+ /* `unsigned long' is the standard type for sizeof.
+ Traditionally, use a signed type.
+ Note that stddef.h uses `unsigned long',
+ and this must agree, even of long and int are the same size. */
+ sizetype
+ = TREE_TYPE (IDENTIFIER_GLOBAL_VALUE (get_identifier (SIZE_TYPE)));
+ if (flag_traditional && TREE_UNSIGNED (sizetype))
+ sizetype = signed_type (sizetype);
+
+ ptrdiff_type_node
+ = TREE_TYPE (IDENTIFIER_GLOBAL_VALUE (get_identifier (PTRDIFF_TYPE)));
+
+ TREE_TYPE (TYPE_SIZE (integer_type_node)) = sizetype;
+ TREE_TYPE (TYPE_SIZE (char_type_node)) = sizetype;
+ TREE_TYPE (TYPE_SIZE (unsigned_type_node)) = sizetype;
+ TREE_TYPE (TYPE_SIZE (long_unsigned_type_node)) = sizetype;
+ TREE_TYPE (TYPE_SIZE (long_integer_type_node)) = sizetype;
+ TREE_TYPE (TYPE_SIZE (long_long_integer_type_node)) = sizetype;
+ TREE_TYPE (TYPE_SIZE (long_long_unsigned_type_node)) = sizetype;
+
+ error_mark_node = make_node (ERROR_MARK);
+ TREE_TYPE (error_mark_node) = error_mark_node;
+
+ short_integer_type_node = make_signed_type (SHORT_TYPE_SIZE);
+ pushdecl (build_decl (TYPE_DECL, get_identifier ("short int"),
+ short_integer_type_node));
+
+ short_unsigned_type_node = make_unsigned_type (SHORT_TYPE_SIZE);
+ pushdecl (build_decl (TYPE_DECL, get_identifier ("short unsigned int"),
+ short_unsigned_type_node));
+
+ /* Define both `signed char' and `unsigned char'. */
+ signed_char_type_node = make_signed_type (CHAR_TYPE_SIZE);
+ pushdecl (build_decl (TYPE_DECL, get_identifier ("signed char"),
+ signed_char_type_node));
+
+ unsigned_char_type_node = make_unsigned_type (CHAR_TYPE_SIZE);
+ pushdecl (build_decl (TYPE_DECL, get_identifier ("unsigned char"),
+ unsigned_char_type_node));
+
+ intQI_type_node = make_signed_type (GET_MODE_BITSIZE (QImode));
+ pushdecl (build_decl (TYPE_DECL, NULL_TREE, intQI_type_node));
+
+ intHI_type_node = make_signed_type (GET_MODE_BITSIZE (HImode));
+ pushdecl (build_decl (TYPE_DECL, NULL_TREE, intHI_type_node));
+
+ intSI_type_node = make_signed_type (GET_MODE_BITSIZE (SImode));
+ pushdecl (build_decl (TYPE_DECL, NULL_TREE, intSI_type_node));
+
+ intDI_type_node = make_signed_type (GET_MODE_BITSIZE (DImode));
+ pushdecl (build_decl (TYPE_DECL, NULL_TREE, intDI_type_node));
+
+ unsigned_intQI_type_node = make_unsigned_type (GET_MODE_BITSIZE (QImode));
+ pushdecl (build_decl (TYPE_DECL, NULL_TREE, unsigned_intQI_type_node));
+
+ unsigned_intHI_type_node = make_unsigned_type (GET_MODE_BITSIZE (HImode));
+ pushdecl (build_decl (TYPE_DECL, NULL_TREE, unsigned_intHI_type_node));
+
+ unsigned_intSI_type_node = make_unsigned_type (GET_MODE_BITSIZE (SImode));
+ pushdecl (build_decl (TYPE_DECL, NULL_TREE, unsigned_intSI_type_node));
+
+ unsigned_intDI_type_node = make_unsigned_type (GET_MODE_BITSIZE (DImode));
+ pushdecl (build_decl (TYPE_DECL, NULL_TREE, unsigned_intDI_type_node));
+
+ float_type_node = make_node (REAL_TYPE);
+ TYPE_PRECISION (float_type_node) = FLOAT_TYPE_SIZE;
+ pushdecl (build_decl (TYPE_DECL, ridpointers[(int) RID_FLOAT],
+ float_type_node));
+ layout_type (float_type_node);
+
+ double_type_node = make_node (REAL_TYPE);
+ if (flag_short_double)
+ TYPE_PRECISION (double_type_node) = FLOAT_TYPE_SIZE;
+ else
+ TYPE_PRECISION (double_type_node) = DOUBLE_TYPE_SIZE;
+ pushdecl (build_decl (TYPE_DECL, ridpointers[(int) RID_DOUBLE],
+ double_type_node));
+ layout_type (double_type_node);
+
+ long_double_type_node = make_node (REAL_TYPE);
+ TYPE_PRECISION (long_double_type_node) = LONG_DOUBLE_TYPE_SIZE;
+ pushdecl (build_decl (TYPE_DECL, get_identifier ("long double"),
+ long_double_type_node));
+ layout_type (long_double_type_node);
+
+ complex_integer_type_node = make_node (COMPLEX_TYPE);
+ pushdecl (build_decl (TYPE_DECL, get_identifier ("complex int"),
+ complex_integer_type_node));
+ TREE_TYPE (complex_integer_type_node) = integer_type_node;
+ layout_type (complex_integer_type_node);
+
+ complex_float_type_node = make_node (COMPLEX_TYPE);
+ pushdecl (build_decl (TYPE_DECL, get_identifier ("complex float"),
+ complex_float_type_node));
+ TREE_TYPE (complex_float_type_node) = float_type_node;
+ layout_type (complex_float_type_node);
+
+ complex_double_type_node = make_node (COMPLEX_TYPE);
+ pushdecl (build_decl (TYPE_DECL, get_identifier ("complex double"),
+ complex_double_type_node));
+ TREE_TYPE (complex_double_type_node) = double_type_node;
+ layout_type (complex_double_type_node);
+
+ complex_long_double_type_node = make_node (COMPLEX_TYPE);
+ pushdecl (build_decl (TYPE_DECL, get_identifier ("complex long double"),
+ complex_long_double_type_node));
+ TREE_TYPE (complex_long_double_type_node) = long_double_type_node;
+ layout_type (complex_long_double_type_node);
+
+ wchar_type_node
+ = TREE_TYPE (IDENTIFIER_GLOBAL_VALUE (get_identifier (WCHAR_TYPE)));
+ wchar_type_size = TYPE_PRECISION (wchar_type_node);
+ signed_wchar_type_node = signed_type (wchar_type_node);
+ unsigned_wchar_type_node = unsigned_type (wchar_type_node);
+
+ integer_zero_node = build_int_2 (0, 0);
+ TREE_TYPE (integer_zero_node) = integer_type_node;
+ integer_one_node = build_int_2 (1, 0);
+ TREE_TYPE (integer_one_node) = integer_type_node;
+
+ size_zero_node = build_int_2 (0, 0);
+ TREE_TYPE (size_zero_node) = sizetype;
+ size_one_node = build_int_2 (1, 0);
+ TREE_TYPE (size_one_node) = sizetype;
+
+ void_type_node = make_node (VOID_TYPE);
+ pushdecl (build_decl (TYPE_DECL,
+ ridpointers[(int) RID_VOID], void_type_node));
+ layout_type (void_type_node); /* Uses integer_zero_node */
+ /* We are not going to have real types in C with less than byte alignment,
+ so we might as well not have any types that claim to have it. */
+ TYPE_ALIGN (void_type_node) = BITS_PER_UNIT;
+
+ null_pointer_node = build_int_2 (0, 0);
+ TREE_TYPE (null_pointer_node) = build_pointer_type (void_type_node);
+ layout_type (TREE_TYPE (null_pointer_node));
+
+ string_type_node = build_pointer_type (char_type_node);
+ const_string_type_node
+ = build_pointer_type (build_type_variant (char_type_node, 1, 0));
+
+ /* Make a type to be the domain of a few array types
+ whose domains don't really matter.
+ 200 is small enough that it always fits in size_t
+ and large enough that it can hold most function names for the
+ initializations of __FUNCTION__ and __PRETTY_FUNCTION__. */
+ array_domain_type = build_index_type (build_int_2 (200, 0));
+
+ /* make a type for arrays of characters.
+ With luck nothing will ever really depend on the length of this
+ array type. */
+ char_array_type_node
+ = build_array_type (char_type_node, array_domain_type);
+ /* Likewise for arrays of ints. */
+ int_array_type_node
+ = build_array_type (integer_type_node, array_domain_type);
+ /* This is for wide string constants. */
+ wchar_array_type_node
+ = build_array_type (wchar_type_node, array_domain_type);
+
+ default_function_type
+ = build_function_type (integer_type_node, NULL_TREE);
+
+ ptr_type_node = build_pointer_type (void_type_node);
+ const_ptr_type_node
+ = build_pointer_type (build_type_variant (void_type_node, 1, 0));
+
+ endlink = tree_cons (NULL_TREE, void_type_node, NULL_TREE);
+
+ void_ftype_any
+ = build_function_type (void_type_node, NULL_TREE);
+
+ double_ftype_double
+ = build_function_type (double_type_node,
+ tree_cons (NULL_TREE, double_type_node, endlink));
+
+ double_ftype_double_double
+ = build_function_type (double_type_node,
+ tree_cons (NULL_TREE, double_type_node,
+ tree_cons (NULL_TREE,
+ double_type_node, endlink)));
+
+ int_ftype_int
+ = build_function_type (integer_type_node,
+ tree_cons (NULL_TREE, integer_type_node, endlink));
+
+ long_ftype_long
+ = build_function_type (long_integer_type_node,
+ tree_cons (NULL_TREE,
+ long_integer_type_node, endlink));
+
+ void_ftype_ptr_ptr_int
+ = build_function_type (void_type_node,
+ tree_cons (NULL_TREE, ptr_type_node,
+ tree_cons (NULL_TREE, ptr_type_node,
+ tree_cons (NULL_TREE,
+ integer_type_node,
+ endlink))));
+
+ int_ftype_cptr_cptr_sizet
+ = build_function_type (integer_type_node,
+ tree_cons (NULL_TREE, const_ptr_type_node,
+ tree_cons (NULL_TREE, const_ptr_type_node,
+ tree_cons (NULL_TREE,
+ sizetype,
+ endlink))));
+
+ void_ftype_ptr_int_int
+ = build_function_type (void_type_node,
+ tree_cons (NULL_TREE, ptr_type_node,
+ tree_cons (NULL_TREE, integer_type_node,
+ tree_cons (NULL_TREE,
+ integer_type_node,
+ endlink))));
+
+ string_ftype_ptr_ptr /* strcpy prototype */
+ = build_function_type (string_type_node,
+ tree_cons (NULL_TREE, string_type_node,
+ tree_cons (NULL_TREE,
+ const_string_type_node,
+ endlink)));
+
+ int_ftype_string_string /* strcmp prototype */
+ = build_function_type (integer_type_node,
+ tree_cons (NULL_TREE, const_string_type_node,
+ tree_cons (NULL_TREE,
+ const_string_type_node,
+ endlink)));
+
+ strlen_ftype /* strlen prototype */
+ = build_function_type (flag_traditional ? integer_type_node : sizetype,
+ tree_cons (NULL_TREE, const_string_type_node,
+ endlink));
+
+ traditional_ptr_type_node
+ = (flag_traditional ? string_type_node : ptr_type_node);
+
+ memcpy_ftype /* memcpy prototype */
+ = build_function_type (traditional_ptr_type_node,
+ tree_cons (NULL_TREE, ptr_type_node,
+ tree_cons (NULL_TREE, const_ptr_type_node,
+ tree_cons (NULL_TREE,
+ sizetype,
+ endlink))));
+
+ builtin_function ("__builtin_constant_p", int_ftype_int,
+ BUILT_IN_CONSTANT_P, NULL_PTR);
+
+ builtin_function ("__builtin_return_address",
+ build_function_type (ptr_type_node,
+ tree_cons (NULL_TREE,
+ unsigned_type_node,
+ endlink)),
+ BUILT_IN_RETURN_ADDRESS, NULL_PTR);
+
+ builtin_function ("__builtin_frame_address",
+ build_function_type (ptr_type_node,
+ tree_cons (NULL_TREE,
+ unsigned_type_node,
+ endlink)),
+ BUILT_IN_FRAME_ADDRESS, NULL_PTR);
+
+ builtin_function ("__builtin_alloca",
+ build_function_type (ptr_type_node,
+ tree_cons (NULL_TREE,
+ sizetype,
+ endlink)),
+ BUILT_IN_ALLOCA, "alloca");
+ builtin_function ("__builtin_ffs", int_ftype_int, BUILT_IN_FFS, NULL_PTR);
+ /* Define alloca, ffs as builtins.
+ Declare _exit just to mark it as volatile. */
+ if (! flag_no_builtin && !flag_no_nonansi_builtin)
+ {
+ temp = builtin_function ("alloca",
+ build_function_type (ptr_type_node,
+ tree_cons (NULL_TREE,
+ sizetype,
+ endlink)),
+ BUILT_IN_ALLOCA, NULL_PTR);
+ /* Suppress error if redefined as a non-function. */
+ DECL_BUILT_IN_NONANSI (temp) = 1;
+ temp = builtin_function ("ffs", int_ftype_int, BUILT_IN_FFS, NULL_PTR);
+ /* Suppress error if redefined as a non-function. */
+ DECL_BUILT_IN_NONANSI (temp) = 1;
+ temp = builtin_function ("_exit", void_ftype_any, NOT_BUILT_IN,
+ NULL_PTR);
+ TREE_THIS_VOLATILE (temp) = 1;
+ TREE_SIDE_EFFECTS (temp) = 1;
+ /* Suppress error if redefined as a non-function. */
+ DECL_BUILT_IN_NONANSI (temp) = 1;
+ }
+
+ builtin_function ("__builtin_abs", int_ftype_int, BUILT_IN_ABS, NULL_PTR);
+ builtin_function ("__builtin_fabs", double_ftype_double, BUILT_IN_FABS,
+ NULL_PTR);
+ builtin_function ("__builtin_labs", long_ftype_long, BUILT_IN_LABS,
+ NULL_PTR);
+ builtin_function ("__builtin_saveregs",
+ build_function_type (ptr_type_node, NULL_TREE),
+ BUILT_IN_SAVEREGS, NULL_PTR);
+/* EXPAND_BUILTIN_VARARGS is obsolete. */
+#if 0
+ builtin_function ("__builtin_varargs",
+ build_function_type (ptr_type_node,
+ tree_cons (NULL_TREE,
+ integer_type_node,
+ endlink)),
+ BUILT_IN_VARARGS, NULL_PTR);
+#endif
+ builtin_function ("__builtin_classify_type", default_function_type,
+ BUILT_IN_CLASSIFY_TYPE, NULL_PTR);
+ builtin_function ("__builtin_next_arg",
+ build_function_type (ptr_type_node, NULL_TREE),
+ BUILT_IN_NEXT_ARG, NULL_PTR);
+ builtin_function ("__builtin_args_info",
+ build_function_type (integer_type_node,
+ tree_cons (NULL_TREE,
+ integer_type_node,
+ endlink)),
+ BUILT_IN_ARGS_INFO, NULL_PTR);
+
+ /* Untyped call and return. */
+ builtin_function ("__builtin_apply_args",
+ build_function_type (ptr_type_node, NULL_TREE),
+ BUILT_IN_APPLY_ARGS, NULL_PTR);
+
+ temp = tree_cons (NULL_TREE,
+ build_pointer_type (build_function_type (void_type_node,
+ NULL_TREE)),
+ tree_cons (NULL_TREE,
+ ptr_type_node,
+ tree_cons (NULL_TREE,
+ sizetype,
+ endlink)));
+ builtin_function ("__builtin_apply",
+ build_function_type (ptr_type_node, temp),
+ BUILT_IN_APPLY, NULL_PTR);
+ builtin_function ("__builtin_return",
+ build_function_type (void_type_node,
+ tree_cons (NULL_TREE,
+ ptr_type_node,
+ endlink)),
+ BUILT_IN_RETURN, NULL_PTR);
+
+ /* Currently under experimentation. */
+ builtin_function ("__builtin_memcpy", memcpy_ftype,
+ BUILT_IN_MEMCPY, "memcpy");
+ builtin_function ("__builtin_memcmp", int_ftype_cptr_cptr_sizet,
+ BUILT_IN_MEMCMP, "memcmp");
+ builtin_function ("__builtin_strcmp", int_ftype_string_string,
+ BUILT_IN_STRCMP, "strcmp");
+ builtin_function ("__builtin_strcpy", string_ftype_ptr_ptr,
+ BUILT_IN_STRCPY, "strcpy");
+ builtin_function ("__builtin_strlen", strlen_ftype,
+ BUILT_IN_STRLEN, "strlen");
+ builtin_function ("__builtin_fsqrt", double_ftype_double,
+ BUILT_IN_FSQRT, "sqrt");
+ builtin_function ("__builtin_sin", double_ftype_double,
+ BUILT_IN_SIN, "sin");
+ builtin_function ("__builtin_cos", double_ftype_double,
+ BUILT_IN_COS, "cos");
+
+ /* In an ANSI C program, it is okay to supply built-in meanings
+ for these functions, since applications cannot validly use them
+ with any other meaning.
+ However, honor the -fno-builtin option. */
+ if (!flag_no_builtin)
+ {
+ builtin_function ("abs", int_ftype_int, BUILT_IN_ABS, NULL_PTR);
+ builtin_function ("fabs", double_ftype_double, BUILT_IN_FABS, NULL_PTR);
+ builtin_function ("labs", long_ftype_long, BUILT_IN_LABS, NULL_PTR);
+ builtin_function ("memcpy", memcpy_ftype, BUILT_IN_MEMCPY, NULL_PTR);
+ builtin_function ("memcmp", int_ftype_cptr_cptr_sizet, BUILT_IN_MEMCMP,
+ NULL_PTR);
+ builtin_function ("strcmp", int_ftype_string_string, BUILT_IN_STRCMP,
+ NULL_PTR);
+ builtin_function ("strcpy", string_ftype_ptr_ptr, BUILT_IN_STRCPY,
+ NULL_PTR);
+ builtin_function ("strlen", strlen_ftype, BUILT_IN_STRLEN, NULL_PTR);
+ builtin_function ("sqrt", double_ftype_double, BUILT_IN_FSQRT, NULL_PTR);
+ builtin_function ("sin", double_ftype_double, BUILT_IN_SIN, NULL_PTR);
+ builtin_function ("cos", double_ftype_double, BUILT_IN_COS, NULL_PTR);
+
+ /* Declare these functions volatile
+ to avoid spurious "control drops through" warnings. */
+ /* Don't specify the argument types, to avoid errors
+ from certain code which isn't valid in ANSI but which exists. */
+ temp = builtin_function ("abort", void_ftype_any, NOT_BUILT_IN,
+ NULL_PTR);
+ TREE_THIS_VOLATILE (temp) = 1;
+ TREE_SIDE_EFFECTS (temp) = 1;
+ temp = builtin_function ("exit", void_ftype_any, NOT_BUILT_IN, NULL_PTR);
+ TREE_THIS_VOLATILE (temp) = 1;
+ TREE_SIDE_EFFECTS (temp) = 1;
+ }
+
+#if 0
+ /* Support for these has not been written in either expand_builtin
+ or build_function_call. */
+ builtin_function ("__builtin_div", default_ftype, BUILT_IN_DIV, NULL_PTR);
+ builtin_function ("__builtin_ldiv", default_ftype, BUILT_IN_LDIV, NULL_PTR);
+ builtin_function ("__builtin_ffloor", double_ftype_double, BUILT_IN_FFLOOR,
+ NULL_PTR);
+ builtin_function ("__builtin_fceil", double_ftype_double, BUILT_IN_FCEIL,
+ NULL_PTR);
+ builtin_function ("__builtin_fmod", double_ftype_double_double,
+ BUILT_IN_FMOD, NULL_PTR);
+ builtin_function ("__builtin_frem", double_ftype_double_double,
+ BUILT_IN_FREM, NULL_PTR);
+ builtin_function ("__builtin_memset", ptr_ftype_ptr_int_int,
+ BUILT_IN_MEMSET, NULL_PTR);
+ builtin_function ("__builtin_getexp", double_ftype_double, BUILT_IN_GETEXP,
+ NULL_PTR);
+ builtin_function ("__builtin_getman", double_ftype_double, BUILT_IN_GETMAN,
+ NULL_PTR);
+#endif
+
+ /* Create the global bindings for __FUNCTION__ and __PRETTY_FUNCTION__. */
+ declare_function_name ();
+
+ start_identifier_warnings ();
+
+ /* Prepare to check format strings against argument lists. */
+ init_function_format_info ();
+
+ init_iterators ();
+
+ incomplete_decl_finalize_hook = finish_incomplete_decl;
+}
+
+/* Return a definition for a builtin function named NAME and whose data type
+ is TYPE. TYPE should be a function type with argument types.
+ FUNCTION_CODE tells later passes how to compile calls to this function.
+ See tree.h for its possible values.
+
+ If LIBRARY_NAME is nonzero, use that for DECL_ASSEMBLER_NAME,
+ the name to be called if we can't opencode the function. */
+
+tree
+builtin_function (name, type, function_code, library_name)
+ char *name;
+ tree type;
+ enum built_in_function function_code;
+ char *library_name;
+{
+ tree decl = build_decl (FUNCTION_DECL, get_identifier (name), type);
+ DECL_EXTERNAL (decl) = 1;
+ TREE_PUBLIC (decl) = 1;
+ /* If -traditional, permit redefining a builtin function any way you like.
+ (Though really, if the program redefines these functions,
+ it probably won't work right unless compiled with -fno-builtin.) */
+ if (flag_traditional && name[0] != '_')
+ DECL_BUILT_IN_NONANSI (decl) = 1;
+ if (library_name)
+ DECL_ASSEMBLER_NAME (decl) = get_identifier (library_name);
+ make_decl_rtl (decl, NULL_PTR, 1);
+ pushdecl (decl);
+ if (function_code != NOT_BUILT_IN)
+ {
+ DECL_BUILT_IN (decl) = 1;
+ DECL_FUNCTION_CODE (decl) = function_code;
+ }
+ /* Warn if a function in the namespace for users
+ is used without an occasion to consider it declared. */
+ if (name[0] != '_' || name[1] != '_')
+ C_DECL_ANTICIPATED (decl) = 1;
+
+ return decl;
+}
+
+/* Called when a declaration is seen that contains no names to declare.
+ If its type is a reference to a structure, union or enum inherited
+ from a containing scope, shadow that tag name for the current scope
+ with a forward reference.
+ If its type defines a new named structure or union
+ or defines an enum, it is valid but we need not do anything here.
+ Otherwise, it is an error. */
+
+void
+shadow_tag (declspecs)
+ tree declspecs;
+{
+ shadow_tag_warned (declspecs, 0);
+}
+
+void
+shadow_tag_warned (declspecs, warned)
+ tree declspecs;
+ int warned;
+ /* 1 => we have done a pedwarn. 2 => we have done a warning, but
+ no pedwarn. */
+{
+ int found_tag = 0;
+ register tree link;
+
+ pending_invalid_xref = 0;
+
+ for (link = declspecs; link; link = TREE_CHAIN (link))
+ {
+ register tree value = TREE_VALUE (link);
+ register enum tree_code code = TREE_CODE (value);
+
+ if (code == RECORD_TYPE || code == UNION_TYPE || code == ENUMERAL_TYPE)
+ /* Used to test also that TYPE_SIZE (value) != 0.
+ That caused warning for `struct foo;' at top level in the file. */
+ {
+ register tree name = lookup_tag_reverse (value);
+ register tree t;
+
+ found_tag++;
+
+ if (name == 0)
+ {
+ if (warned != 1 && code != ENUMERAL_TYPE)
+ /* Empty unnamed enum OK */
+ {
+ pedwarn ("unnamed struct/union that defines no instances");
+ warned = 1;
+ }
+ }
+ else
+ {
+ t = lookup_tag (code, name, current_binding_level, 1);
+
+ if (t == 0)
+ {
+ t = make_node (code);
+ pushtag (name, t);
+ }
+ }
+ }
+ else
+ {
+ if (!warned)
+ {
+ warning ("useless keyword or type name in empty declaration");
+ warned = 2;
+ }
+ }
+ }
+
+ if (found_tag > 1)
+ error ("two types specified in one empty declaration");
+
+ if (warned != 1)
+ {
+ if (found_tag == 0)
+ pedwarn ("empty declaration");
+ }
+}
+
+/* Decode a "typename", such as "int **", returning a ..._TYPE node. */
+
+tree
+groktypename (typename)
+ tree typename;
+{
+ if (TREE_CODE (typename) != TREE_LIST)
+ return typename;
+ return grokdeclarator (TREE_VALUE (typename),
+ TREE_PURPOSE (typename),
+ TYPENAME, 0);
+}
+
+/* Return a PARM_DECL node for a given pair of specs and declarator. */
+
+tree
+groktypename_in_parm_context (typename)
+ tree typename;
+{
+ if (TREE_CODE (typename) != TREE_LIST)
+ return typename;
+ return grokdeclarator (TREE_VALUE (typename),
+ TREE_PURPOSE (typename),
+ PARM, 0);
+}
+
+/* Decode a declarator in an ordinary declaration or data definition.
+ This is called as soon as the type information and variable name
+ have been parsed, before parsing the initializer if any.
+ Here we create the ..._DECL node, fill in its type,
+ and put it on the list of decls for the current context.
+ The ..._DECL node is returned as the value.
+
+ Exception: for arrays where the length is not specified,
+ the type is left null, to be filled in by `finish_decl'.
+
+ Function definitions do not come here; they go to start_function
+ instead. However, external and forward declarations of functions
+ do go through here. Structure field declarations are done by
+ grokfield and not through here. */
+
+/* Set this to zero to debug not using the temporary obstack
+ to parse initializers. */
+int debug_temp_inits = 1;
+
+tree
+start_decl (declarator, declspecs, initialized)
+ tree declarator, declspecs;
+ int initialized;
+{
+ register tree decl = grokdeclarator (declarator, declspecs,
+ NORMAL, initialized);
+ register tree tem;
+ int init_written = initialized;
+
+ /* The corresponding pop_obstacks is in finish_decl. */
+ push_obstacks_nochange ();
+
+ if (initialized)
+ /* Is it valid for this decl to have an initializer at all?
+ If not, set INITIALIZED to zero, which will indirectly
+ tell `finish_decl' to ignore the initializer once it is parsed. */
+ switch (TREE_CODE (decl))
+ {
+ case TYPE_DECL:
+ /* typedef foo = bar means give foo the same type as bar.
+ We haven't parsed bar yet, so `finish_decl' will fix that up.
+ Any other case of an initialization in a TYPE_DECL is an error. */
+ if (pedantic || list_length (declspecs) > 1)
+ {
+ error ("typedef `%s' is initialized",
+ IDENTIFIER_POINTER (DECL_NAME (decl)));
+ initialized = 0;
+ }
+ break;
+
+ case FUNCTION_DECL:
+ error ("function `%s' is initialized like a variable",
+ IDENTIFIER_POINTER (DECL_NAME (decl)));
+ initialized = 0;
+ break;
+
+ case PARM_DECL:
+ /* DECL_INITIAL in a PARM_DECL is really DECL_ARG_TYPE. */
+ error ("parameter `%s' is initialized",
+ IDENTIFIER_POINTER (DECL_NAME (decl)));
+ initialized = 0;
+ break;
+
+ default:
+ /* Don't allow initializations for incomplete types
+ except for arrays which might be completed by the initialization. */
+ if (TYPE_SIZE (TREE_TYPE (decl)) != 0)
+ {
+ /* A complete type is ok if size is fixed. */
+
+ if (TREE_CODE (TYPE_SIZE (TREE_TYPE (decl))) != INTEGER_CST
+ || C_DECL_VARIABLE_SIZE (decl))
+ {
+ error ("variable-sized object may not be initialized");
+ initialized = 0;
+ }
+ }
+ else if (TREE_CODE (TREE_TYPE (decl)) != ARRAY_TYPE)
+ {
+ error ("variable `%s' has initializer but incomplete type",
+ IDENTIFIER_POINTER (DECL_NAME (decl)));
+ initialized = 0;
+ }
+ else if (TYPE_SIZE (TREE_TYPE (TREE_TYPE (decl))) == 0)
+ {
+ error ("elements of array `%s' have incomplete type",
+ IDENTIFIER_POINTER (DECL_NAME (decl)));
+ initialized = 0;
+ }
+ }
+
+ if (initialized)
+ {
+#if 0 /* Seems redundant with grokdeclarator. */
+ if (current_binding_level != global_binding_level
+ && DECL_EXTERNAL (decl)
+ && TREE_CODE (decl) != FUNCTION_DECL)
+ warning ("declaration of `%s' has `extern' and is initialized",
+ IDENTIFIER_POINTER (DECL_NAME (decl)));
+#endif
+ DECL_EXTERNAL (decl) = 0;
+ if (current_binding_level == global_binding_level)
+ TREE_STATIC (decl) = 1;
+
+ /* Tell `pushdecl' this is an initialized decl
+ even though we don't yet have the initializer expression.
+ Also tell `finish_decl' it may store the real initializer. */
+ DECL_INITIAL (decl) = error_mark_node;
+ }
+
+ /* If this is a function declaration, write a record describing it to the
+ prototypes file (if requested). */
+
+ if (TREE_CODE (decl) == FUNCTION_DECL)
+ gen_aux_info_record (decl, 0, 0, TYPE_ARG_TYPES (TREE_TYPE (decl)) != 0);
+
+ /* Add this decl to the current binding level.
+ TEM may equal DECL or it may be a previous decl of the same name. */
+ tem = pushdecl (decl);
+
+ /* For C and Obective-C, we by default put things in .common when
+ possible. */
+ DECL_COMMON (tem) = 1;
+
+ /* For a local variable, define the RTL now. */
+ if (current_binding_level != global_binding_level
+ /* But not if this is a duplicate decl
+ and we preserved the rtl from the previous one
+ (which may or may not happen). */
+ && DECL_RTL (tem) == 0)
+ {
+ if (TYPE_SIZE (TREE_TYPE (tem)) != 0)
+ expand_decl (tem);
+ else if (TREE_CODE (TREE_TYPE (tem)) == ARRAY_TYPE
+ && DECL_INITIAL (tem) != 0)
+ expand_decl (tem);
+ }
+
+ if (init_written)
+ {
+ /* When parsing and digesting the initializer,
+ use temporary storage. Do this even if we will ignore the value. */
+ if (current_binding_level == global_binding_level && debug_temp_inits)
+ temporary_allocation ();
+ }
+
+ return tem;
+}
+
+/* Finish processing of a declaration;
+ install its initial value.
+ If the length of an array type is not known before,
+ it must be determined now, from the initial value, or it is an error. */
+
+void
+finish_decl (decl, init, asmspec_tree)
+ tree decl, init;
+ tree asmspec_tree;
+{
+ register tree type = TREE_TYPE (decl);
+ int was_incomplete = (DECL_SIZE (decl) == 0);
+ int temporary = allocation_temporary_p ();
+ char *asmspec = 0;
+
+ /* If a name was specified, get the string. */
+ if (asmspec_tree)
+ asmspec = TREE_STRING_POINTER (asmspec_tree);
+
+ /* If `start_decl' didn't like having an initialization, ignore it now. */
+
+ if (init != 0 && DECL_INITIAL (decl) == 0)
+ init = 0;
+ /* Don't crash if parm is initialized. */
+ if (TREE_CODE (decl) == PARM_DECL)
+ init = 0;
+
+ if (ITERATOR_P (decl))
+ {
+ if (init == 0)
+ error_with_decl (decl, "iterator has no initial value");
+ else
+ init = save_expr (init);
+ }
+
+ if (init)
+ {
+ if (TREE_CODE (decl) != TYPE_DECL)
+ store_init_value (decl, init);
+ else
+ {
+ /* typedef foo = bar; store the type of bar as the type of foo. */
+ TREE_TYPE (decl) = TREE_TYPE (init);
+ DECL_INITIAL (decl) = init = 0;
+ }
+ }
+
+ /* Pop back to the obstack that is current for this binding level.
+ This is because MAXINDEX, rtl, etc. to be made below
+ must go in the permanent obstack. But don't discard the
+ temporary data yet. */
+ pop_obstacks ();
+#if 0 /* pop_obstacks was near the end; this is what was here. */
+ if (current_binding_level == global_binding_level && temporary)
+ end_temporary_allocation ();
+#endif
+
+ /* Deduce size of array from initialization, if not already known */
+
+ if (TREE_CODE (type) == ARRAY_TYPE
+ && TYPE_DOMAIN (type) == 0
+ && TREE_CODE (decl) != TYPE_DECL)
+ {
+ int do_default
+ = (TREE_STATIC (decl)
+ /* Even if pedantic, an external linkage array
+ may have incomplete type at first. */
+ ? pedantic && !TREE_PUBLIC (decl)
+ : !DECL_EXTERNAL (decl));
+ int failure
+ = complete_array_type (type, DECL_INITIAL (decl), do_default);
+
+ /* Get the completed type made by complete_array_type. */
+ type = TREE_TYPE (decl);
+
+ if (failure == 1)
+ error_with_decl (decl, "initializer fails to determine size of `%s'");
+
+ if (failure == 2)
+ {
+ if (do_default)
+ error_with_decl (decl, "array size missing in `%s'");
+ /* If a `static' var's size isn't known,
+ make it extern as well as static, so it does not get
+ allocated.
+ If it is not `static', then do not mark extern;
+ finish_incomplete_decl will give it a default size
+ and it will get allocated. */
+ else if (!pedantic && TREE_STATIC (decl) && ! TREE_PUBLIC (decl))
+ DECL_EXTERNAL (decl) = 1;
+ }
+
+ /* TYPE_MAX_VALUE is always one less than the number of elements
+ in the array, because we start counting at zero. Therefore,
+ warn only if the value is less than zero. */
+ if (pedantic && TYPE_DOMAIN (type) != 0
+ && tree_int_cst_sgn (TYPE_MAX_VALUE (TYPE_DOMAIN (type))) < 0)
+ error_with_decl (decl, "zero or negative size array `%s'");
+
+ layout_decl (decl, 0);
+ }
+
+ if (TREE_CODE (decl) == VAR_DECL)
+ {
+ if (DECL_SIZE (decl) == 0
+ && TYPE_SIZE (TREE_TYPE (decl)) != 0)
+ layout_decl (decl, 0);
+
+ if (DECL_SIZE (decl) == 0
+ && (TREE_STATIC (decl)
+ ?
+ /* A static variable with an incomplete type
+ is an error if it is initialized.
+ Also if it is not file scope.
+ Otherwise, let it through, but if it is not `extern'
+ then it may cause an error message later. */
+ (DECL_INITIAL (decl) != 0
+ || current_binding_level != global_binding_level)
+ :
+ /* An automatic variable with an incomplete type
+ is an error. */
+ !DECL_EXTERNAL (decl)))
+ {
+ error_with_decl (decl, "storage size of `%s' isn't known");
+ TREE_TYPE (decl) = error_mark_node;
+ }
+
+ if ((DECL_EXTERNAL (decl) || TREE_STATIC (decl))
+ && DECL_SIZE (decl) != 0)
+ {
+ if (TREE_CODE (DECL_SIZE (decl)) == INTEGER_CST)
+ constant_expression_warning (DECL_SIZE (decl));
+ else
+ error_with_decl (decl, "storage size of `%s' isn't constant");
+ }
+ }
+
+ /* If this is a function and an assembler name is specified, it isn't
+ builtin any more. Also reset DECL_RTL so we can give it its new
+ name. */
+ if (TREE_CODE (decl) == FUNCTION_DECL && asmspec)
+ {
+ DECL_BUILT_IN (decl) = 0;
+ DECL_RTL (decl) = 0;
+ }
+
+ /* Output the assembler code and/or RTL code for variables and functions,
+ unless the type is an undefined structure or union.
+ If not, it will get done when the type is completed. */
+
+ if (TREE_CODE (decl) == VAR_DECL || TREE_CODE (decl) == FUNCTION_DECL)
+ {
+ if ((flag_traditional || TREE_PERMANENT (decl))
+ && allocation_temporary_p ())
+ {
+ push_obstacks_nochange ();
+ end_temporary_allocation ();
+ /* This is a no-op in c-lang.c or something real in objc-actions.c. */
+ maybe_objc_check_decl (decl);
+ rest_of_decl_compilation (decl, asmspec,
+ current_binding_level == global_binding_level,
+ 0);
+ pop_obstacks ();
+ }
+ else
+ {
+ /* This is a no-op in c-lang.c or something real in objc-actions.c. */
+ maybe_objc_check_decl (decl);
+ rest_of_decl_compilation (decl, asmspec,
+ current_binding_level == global_binding_level,
+ 0);
+ }
+ if (current_binding_level != global_binding_level)
+ {
+ /* Recompute the RTL of a local array now
+ if it used to be an incomplete type. */
+ if (was_incomplete
+ && ! TREE_STATIC (decl) && ! DECL_EXTERNAL (decl))
+ {
+ /* If we used it already as memory, it must stay in memory. */
+ TREE_ADDRESSABLE (decl) = TREE_USED (decl);
+ /* If it's still incomplete now, no init will save it. */
+ if (DECL_SIZE (decl) == 0)
+ DECL_INITIAL (decl) = 0;
+ expand_decl (decl);
+ }
+ /* Compute and store the initial value. */
+ if (TREE_CODE (decl) != FUNCTION_DECL)
+ expand_decl_init (decl);
+ }
+ }
+
+ if (TREE_CODE (decl) == TYPE_DECL)
+ {
+ /* This is a no-op in c-lang.c or something real in objc-actions.c. */
+ maybe_objc_check_decl (decl);
+ rest_of_decl_compilation (decl, NULL_PTR,
+ current_binding_level == global_binding_level,
+ 0);
+ }
+
+ /* ??? After 2.3, test (init != 0) instead of TREE_CODE. */
+ /* This test used to include TREE_PERMANENT, however, we have the same
+ problem with initializers at the function level. Such initializers get
+ saved until the end of the function on the momentary_obstack. */
+ if (!(TREE_CODE (decl) == FUNCTION_DECL && DECL_INLINE (decl))
+ && temporary
+ /* DECL_INITIAL is not defined in PARM_DECLs, since it shares
+ space with DECL_ARG_TYPE. */
+ && TREE_CODE (decl) != PARM_DECL)
+ {
+ /* We need to remember that this array HAD an initialization,
+ but discard the actual temporary nodes,
+ since we can't have a permanent node keep pointing to them. */
+ /* We make an exception for inline functions, since it's
+ normal for a local extern redeclaration of an inline function
+ to have a copy of the top-level decl's DECL_INLINE. */
+ if (DECL_INITIAL (decl) != 0 && DECL_INITIAL (decl) != error_mark_node)
+ {
+ /* If this is a const variable, then preserve the
+ initializer instead of discarding it so that we can optimize
+ references to it. */
+ /* This test used to include TREE_STATIC, but this won't be set
+ for function level initializers. */
+ if (TREE_READONLY (decl))
+ {
+ preserve_initializer ();
+ /* Hack? Set the permanent bit for something that is permanent,
+ but not on the permenent obstack, so as to convince
+ output_constant_def to make its rtl on the permanent
+ obstack. */
+ TREE_PERMANENT (DECL_INITIAL (decl)) = 1;
+
+ /* The initializer and DECL must have the same (or equivalent
+ types), but if the initializer is a STRING_CST, its type
+ might not be on the right obstack, so copy the type
+ of DECL. */
+ TREE_TYPE (DECL_INITIAL (decl)) = type;
+ }
+ else
+ DECL_INITIAL (decl) = error_mark_node;
+ }
+ }
+
+ /* If requested, warn about definitions of large data objects. */
+
+ if (warn_larger_than
+ && (TREE_CODE (decl) == VAR_DECL || TREE_CODE (decl) == PARM_DECL)
+ && !DECL_EXTERNAL (decl))
+ {
+ register tree decl_size = DECL_SIZE (decl);
+
+ if (decl_size && TREE_CODE (decl_size) == INTEGER_CST)
+ {
+ unsigned units = TREE_INT_CST_LOW(decl_size) / BITS_PER_UNIT;
+
+ if (units > larger_than_size)
+ warning_with_decl (decl, "size of `%s' is %u bytes", units);
+ }
+ }
+
+#if 0
+ /* Resume permanent allocation, if not within a function. */
+ /* The corresponding push_obstacks_nochange is in start_decl,
+ and in push_parm_decl and in grokfield. */
+ pop_obstacks ();
+#endif
+
+ /* If we have gone back from temporary to permanent allocation,
+ actually free the temporary space that we no longer need. */
+ if (temporary && !allocation_temporary_p ())
+ permanent_allocation (0);
+
+ /* At the end of a declaration, throw away any variable type sizes
+ of types defined inside that declaration. There is no use
+ computing them in the following function definition. */
+ if (current_binding_level == global_binding_level)
+ get_pending_sizes ();
+}
+
+/* If DECL has a cleanup, build and return that cleanup here.
+ This is a callback called by expand_expr. */
+
+tree
+maybe_build_cleanup (decl)
+ tree decl;
+{
+ /* There are no cleanups in C. */
+ return NULL_TREE;
+}
+
+/* Given a parsed parameter declaration,
+ decode it into a PARM_DECL and push that on the current binding level.
+ Also, for the sake of forward parm decls,
+ record the given order of parms in `parm_order'. */
+
+void
+push_parm_decl (parm)
+ tree parm;
+{
+ tree decl;
+ int old_immediate_size_expand = immediate_size_expand;
+ /* Don't try computing parm sizes now -- wait till fn is called. */
+ immediate_size_expand = 0;
+
+ /* The corresponding pop_obstacks is in finish_decl. */
+ push_obstacks_nochange ();
+
+ decl = grokdeclarator (TREE_VALUE (parm), TREE_PURPOSE (parm), PARM, 0);
+
+#if 0
+ if (DECL_NAME (decl))
+ {
+ tree olddecl;
+ olddecl = lookup_name (DECL_NAME (decl));
+ if (pedantic && olddecl != 0 && TREE_CODE (olddecl) == TYPE_DECL)
+ pedwarn_with_decl (decl, "ANSI C forbids parameter `%s' shadowing typedef");
+ }
+#endif
+
+ decl = pushdecl (decl);
+
+ immediate_size_expand = old_immediate_size_expand;
+
+ current_binding_level->parm_order
+ = tree_cons (NULL_TREE, decl, current_binding_level->parm_order);
+
+ /* Add this decl to the current binding level. */
+ finish_decl (decl, NULL_TREE, NULL_TREE);
+}
+
+/* Clear the given order of parms in `parm_order'.
+ Used at start of parm list,
+ and also at semicolon terminating forward decls. */
+
+void
+clear_parm_order ()
+{
+ current_binding_level->parm_order = NULL_TREE;
+}
+
+/* Make TYPE a complete type based on INITIAL_VALUE.
+ Return 0 if successful, 1 if INITIAL_VALUE can't be deciphered,
+ 2 if there was no information (in which case assume 1 if DO_DEFAULT). */
+
+int
+complete_array_type (type, initial_value, do_default)
+ tree type;
+ tree initial_value;
+ int do_default;
+{
+ register tree maxindex = NULL_TREE;
+ int value = 0;
+
+ if (initial_value)
+ {
+ /* Note MAXINDEX is really the maximum index,
+ one less than the size. */
+ if (TREE_CODE (initial_value) == STRING_CST)
+ {
+ int eltsize
+ = int_size_in_bytes (TREE_TYPE (TREE_TYPE (initial_value)));
+ maxindex = build_int_2 ((TREE_STRING_LENGTH (initial_value)
+ / eltsize) - 1, 0);
+ }
+ else if (TREE_CODE (initial_value) == CONSTRUCTOR)
+ {
+ tree elts = CONSTRUCTOR_ELTS (initial_value);
+ maxindex = size_binop (MINUS_EXPR, integer_zero_node, size_one_node);
+ for (; elts; elts = TREE_CHAIN (elts))
+ {
+ if (TREE_PURPOSE (elts))
+ maxindex = TREE_PURPOSE (elts);
+ else
+ maxindex = size_binop (PLUS_EXPR, maxindex, size_one_node);
+ }
+ maxindex = copy_node (maxindex);
+ }
+ else
+ {
+ /* Make an error message unless that happened already. */
+ if (initial_value != error_mark_node)
+ value = 1;
+
+ /* Prevent further error messages. */
+ maxindex = build_int_2 (0, 0);
+ }
+ }
+
+ if (!maxindex)
+ {
+ if (do_default)
+ maxindex = build_int_2 (0, 0);
+ value = 2;
+ }
+
+ if (maxindex)
+ {
+ TYPE_DOMAIN (type) = build_index_type (maxindex);
+ if (!TREE_TYPE (maxindex))
+ TREE_TYPE (maxindex) = TYPE_DOMAIN (type);
+#if 0 /* I took out this change
+ together with the change in build_array_type. --rms */
+ change_main_variant (type,
+ build_array_type (TREE_TYPE (type),
+ TYPE_DOMAIN (type)));
+#endif
+ }
+
+ /* Lay out the type now that we can get the real answer. */
+
+ layout_type (type);
+
+ return value;
+}
+
+/* Given declspecs and a declarator,
+ determine the name and type of the object declared
+ and construct a ..._DECL node for it.
+ (In one case we can return a ..._TYPE node instead.
+ For invalid input we sometimes return 0.)
+
+ DECLSPECS is a chain of tree_list nodes whose value fields
+ are the storage classes and type specifiers.
+
+ DECL_CONTEXT says which syntactic context this declaration is in:
+ NORMAL for most contexts. Make a VAR_DECL or FUNCTION_DECL or TYPE_DECL.
+ FUNCDEF for a function definition. Like NORMAL but a few different
+ error messages in each case. Return value may be zero meaning
+ this definition is too screwy to try to parse.
+ PARM for a parameter declaration (either within a function prototype
+ or before a function body). Make a PARM_DECL, or return void_type_node.
+ TYPENAME if for a typename (in a cast or sizeof).
+ Don't make a DECL node; just return the ..._TYPE node.
+ FIELD for a struct or union field; make a FIELD_DECL.
+ BITFIELD for a field with specified width.
+ INITIALIZED is 1 if the decl has an initializer.
+
+ In the TYPENAME case, DECLARATOR is really an absolute declarator.
+ It may also be so in the PARM case, for a prototype where the
+ argument type is specified but not the name.
+
+ This function is where the complicated C meanings of `static'
+ and `extern' are interpreted. */
+
+static tree
+grokdeclarator (declarator, declspecs, decl_context, initialized)
+ tree declspecs;
+ tree declarator;
+ enum decl_context decl_context;
+ int initialized;
+{
+ int specbits = 0;
+ tree spec;
+ tree type = NULL_TREE;
+ int longlong = 0;
+ int constp;
+ int volatilep;
+ int inlinep;
+ int explicit_int = 0;
+ int explicit_char = 0;
+ int defaulted_int = 0;
+ tree typedef_decl = 0;
+ char *name;
+ tree typedef_type = 0;
+ int funcdef_flag = 0;
+ enum tree_code innermost_code = ERROR_MARK;
+ int bitfield = 0;
+ int size_varies = 0;
+
+ if (decl_context == BITFIELD)
+ bitfield = 1, decl_context = FIELD;
+
+ if (decl_context == FUNCDEF)
+ funcdef_flag = 1, decl_context = NORMAL;
+
+ push_obstacks_nochange ();
+
+ if (flag_traditional && allocation_temporary_p ())
+ end_temporary_allocation ();
+
+ /* Look inside a declarator for the name being declared
+ and get it as a string, for an error message. */
+ {
+ register tree decl = declarator;
+ name = 0;
+
+ while (decl)
+ switch (TREE_CODE (decl))
+ {
+ case ARRAY_REF:
+ case INDIRECT_REF:
+ case CALL_EXPR:
+ innermost_code = TREE_CODE (decl);
+ decl = TREE_OPERAND (decl, 0);
+ break;
+
+ case IDENTIFIER_NODE:
+ name = IDENTIFIER_POINTER (decl);
+ decl = 0;
+ break;
+
+ default:
+ abort ();
+ }
+ if (name == 0)
+ name = "type name";
+ }
+
+ /* A function definition's declarator must have the form of
+ a function declarator. */
+
+ if (funcdef_flag && innermost_code != CALL_EXPR)
+ return 0;
+
+ /* Anything declared one level down from the top level
+ must be one of the parameters of a function
+ (because the body is at least two levels down). */
+
+ /* If this looks like a function definition, make it one,
+ even if it occurs where parms are expected.
+ Then store_parm_decls will reject it and not use it as a parm. */
+ if (decl_context == NORMAL && !funcdef_flag
+ && current_binding_level->level_chain == global_binding_level)
+ decl_context = PARM;
+
+ /* Look through the decl specs and record which ones appear.
+ Some typespecs are defined as built-in typenames.
+ Others, the ones that are modifiers of other types,
+ are represented by bits in SPECBITS: set the bits for
+ the modifiers that appear. Storage class keywords are also in SPECBITS.
+
+ If there is a typedef name or a type, store the type in TYPE.
+ This includes builtin typedefs such as `int'.
+
+ Set EXPLICIT_INT or EXPLICIT_CHAR if the type is `int' or `char'
+ and did not come from a user typedef.
+
+ Set LONGLONG if `long' is mentioned twice. */
+
+ for (spec = declspecs; spec; spec = TREE_CHAIN (spec))
+ {
+ register int i;
+ register tree id = TREE_VALUE (spec);
+
+ if (id == ridpointers[(int) RID_INT])
+ explicit_int = 1;
+ if (id == ridpointers[(int) RID_CHAR])
+ explicit_char = 1;
+
+ if (TREE_CODE (id) == IDENTIFIER_NODE)
+ for (i = (int) RID_FIRST_MODIFIER; i < (int) RID_MAX; i++)
+ {
+ if (ridpointers[i] == id)
+ {
+ if (i == (int) RID_LONG && specbits & (1<<i))
+ {
+ if (longlong)
+ error ("`long long long' is too long for GCC");
+ else
+ {
+ if (pedantic && ! in_system_header)
+ pedwarn ("ANSI C does not support `long long'");
+ longlong = 1;
+ }
+ }
+ else if (specbits & (1 << i))
+ pedwarn ("duplicate `%s'", IDENTIFIER_POINTER (id));
+ specbits |= 1 << i;
+ goto found;
+ }
+ }
+ if (type)
+ error ("two or more data types in declaration of `%s'", name);
+ /* Actual typedefs come to us as TYPE_DECL nodes. */
+ else if (TREE_CODE (id) == TYPE_DECL)
+ {
+ type = TREE_TYPE (id);
+ typedef_decl = id;
+ }
+ /* Built-in types come as identifiers. */
+ else if (TREE_CODE (id) == IDENTIFIER_NODE)
+ {
+ register tree t = lookup_name (id);
+ if (TREE_TYPE (t) == error_mark_node)
+ ;
+ else if (!t || TREE_CODE (t) != TYPE_DECL)
+ error ("`%s' fails to be a typedef or built in type",
+ IDENTIFIER_POINTER (id));
+ else
+ {
+ type = TREE_TYPE (t);
+ typedef_decl = t;
+ }
+ }
+ else if (TREE_CODE (id) != ERROR_MARK)
+ type = id;
+
+ found: {}
+ }
+
+ typedef_type = type;
+ if (type)
+ size_varies = C_TYPE_VARIABLE_SIZE (type);
+
+ /* No type at all: default to `int', and set DEFAULTED_INT
+ because it was not a user-defined typedef. */
+
+ if (type == 0)
+ {
+ if (funcdef_flag && warn_return_type
+ && ! (specbits & ((1 << (int) RID_LONG) | (1 << (int) RID_SHORT)
+ | (1 << (int) RID_SIGNED) | (1 << (int) RID_UNSIGNED))))
+ warn_about_return_type = 1;
+ defaulted_int = 1;
+ type = integer_type_node;
+ }
+
+ /* Now process the modifiers that were specified
+ and check for invalid combinations. */
+
+ /* Long double is a special combination. */
+
+ if ((specbits & 1 << (int) RID_LONG)
+ && TYPE_MAIN_VARIANT (type) == double_type_node)
+ {
+ specbits &= ~ (1 << (int) RID_LONG);
+ type = long_double_type_node;
+ }
+
+ /* Check all other uses of type modifiers. */
+
+ if (specbits & ((1 << (int) RID_LONG) | (1 << (int) RID_SHORT)
+ | (1 << (int) RID_UNSIGNED) | (1 << (int) RID_SIGNED)))
+ {
+ int ok = 0;
+
+ if (TREE_CODE (type) != INTEGER_TYPE)
+ error ("long, short, signed or unsigned invalid for `%s'", name);
+ else if ((specbits & 1 << (int) RID_LONG)
+ && (specbits & 1 << (int) RID_SHORT))
+ error ("long and short specified together for `%s'", name);
+ else if (((specbits & 1 << (int) RID_LONG)
+ || (specbits & 1 << (int) RID_SHORT))
+ && explicit_char)
+ error ("long or short specified with char for `%s'", name);
+ else if (((specbits & 1 << (int) RID_LONG)
+ || (specbits & 1 << (int) RID_SHORT))
+ && TREE_CODE (type) == REAL_TYPE)
+ error ("long or short specified with floating type for `%s'", name);
+ else if ((specbits & 1 << (int) RID_SIGNED)
+ && (specbits & 1 << (int) RID_UNSIGNED))
+ error ("signed and unsigned given together for `%s'", name);
+ else
+ {
+ ok = 1;
+ if (!explicit_int && !defaulted_int && !explicit_char && pedantic)
+ {
+ pedwarn ("long, short, signed or unsigned used invalidly for `%s'",
+ name);
+ if (flag_pedantic_errors)
+ ok = 0;
+ }
+ }
+
+ /* Discard the type modifiers if they are invalid. */
+ if (! ok)
+ {
+ specbits &= ~((1 << (int) RID_LONG) | (1 << (int) RID_SHORT)
+ | (1 << (int) RID_UNSIGNED) | (1 << (int) RID_SIGNED));
+ longlong = 0;
+ }
+ }
+
+ if ((specbits & (1 << (int) RID_COMPLEX))
+ && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
+ {
+ error ("complex invalid for `%s'", name);
+ specbits &= ~ (1 << (int) RID_COMPLEX);
+ }
+
+ /* Decide whether an integer type is signed or not.
+ Optionally treat bitfields as signed by default. */
+ if (specbits & 1 << (int) RID_UNSIGNED
+ /* Traditionally, all bitfields are unsigned. */
+ || (bitfield && flag_traditional
+ && (! explicit_flag_signed_bitfields || !flag_signed_bitfields))
+ || (bitfield && ! flag_signed_bitfields
+ && (explicit_int || defaulted_int || explicit_char
+ /* A typedef for plain `int' without `signed'
+ can be controlled just like plain `int'. */
+ || ! (typedef_decl != 0
+ && C_TYPEDEF_EXPLICITLY_SIGNED (typedef_decl)))
+ && TREE_CODE (type) != ENUMERAL_TYPE
+ && !(specbits & 1 << (int) RID_SIGNED)))
+ {
+ if (longlong)
+ type = long_long_unsigned_type_node;
+ else if (specbits & 1 << (int) RID_LONG)
+ type = long_unsigned_type_node;
+ else if (specbits & 1 << (int) RID_SHORT)
+ type = short_unsigned_type_node;
+ else if (type == char_type_node)
+ type = unsigned_char_type_node;
+ else if (typedef_decl)
+ type = unsigned_type (type);
+ else
+ type = unsigned_type_node;
+ }
+ else if ((specbits & 1 << (int) RID_SIGNED)
+ && type == char_type_node)
+ type = signed_char_type_node;
+ else if (longlong)
+ type = long_long_integer_type_node;
+ else if (specbits & 1 << (int) RID_LONG)
+ type = long_integer_type_node;
+ else if (specbits & 1 << (int) RID_SHORT)
+ type = short_integer_type_node;
+
+ if (specbits & 1 << (int) RID_COMPLEX)
+ {
+ /* If we just have "complex", it is equivalent to
+ "complex double", but if any modifiers at all are specified it is
+ the complex form of TYPE. E.g, "complex short" is
+ "complex short int". */
+
+ if (defaulted_int && ! longlong
+ && ! (specbits & ((1 << (int) RID_LONG) | (1 << (int) RID_SHORT)
+ | (1 << (int) RID_SIGNED)
+ | (1 << (int) RID_UNSIGNED))))
+ type = complex_double_type_node;
+ else if (type == integer_type_node)
+ type = complex_integer_type_node;
+ else if (type == float_type_node)
+ type = complex_float_type_node;
+ else if (type == double_type_node)
+ type = complex_double_type_node;
+ else if (type == long_double_type_node)
+ type = complex_long_double_type_node;
+ else
+ type = build_complex_type (type);
+ }
+
+ /* Set CONSTP if this declaration is `const', whether by
+ explicit specification or via a typedef.
+ Likewise for VOLATILEP. */
+
+ constp = !! (specbits & 1 << (int) RID_CONST) + TYPE_READONLY (type);
+ volatilep = !! (specbits & 1 << (int) RID_VOLATILE) + TYPE_VOLATILE (type);
+ inlinep = !! (specbits & (1 << (int) RID_INLINE));
+ if (constp > 1)
+ pedwarn ("duplicate `const'");
+ if (volatilep > 1)
+ pedwarn ("duplicate `volatile'");
+ if (! flag_gen_aux_info && (TYPE_READONLY (type) || TYPE_VOLATILE (type)))
+ type = TYPE_MAIN_VARIANT (type);
+
+ /* Warn if two storage classes are given. Default to `auto'. */
+
+ {
+ int nclasses = 0;
+
+ if (specbits & 1 << (int) RID_AUTO) nclasses++;
+ if (specbits & 1 << (int) RID_STATIC) nclasses++;
+ if (specbits & 1 << (int) RID_EXTERN) nclasses++;
+ if (specbits & 1 << (int) RID_REGISTER) nclasses++;
+ if (specbits & 1 << (int) RID_TYPEDEF) nclasses++;
+ if (specbits & 1 << (int) RID_ITERATOR) nclasses++;
+
+ /* Warn about storage classes that are invalid for certain
+ kinds of declarations (parameters, typenames, etc.). */
+
+ if (nclasses > 1)
+ error ("multiple storage classes in declaration of `%s'", name);
+ else if (funcdef_flag
+ && (specbits
+ & ((1 << (int) RID_REGISTER)
+ | (1 << (int) RID_AUTO)
+ | (1 << (int) RID_TYPEDEF))))
+ {
+ if (specbits & 1 << (int) RID_AUTO
+ && (pedantic || current_binding_level == global_binding_level))
+ pedwarn ("function definition declared `auto'");
+ if (specbits & 1 << (int) RID_REGISTER)
+ error ("function definition declared `register'");
+ if (specbits & 1 << (int) RID_TYPEDEF)
+ error ("function definition declared `typedef'");
+ specbits &= ~ ((1 << (int) RID_TYPEDEF) | (1 << (int) RID_REGISTER)
+ | (1 << (int) RID_AUTO));
+ }
+ else if (decl_context != NORMAL && nclasses > 0)
+ {
+ if (decl_context == PARM && specbits & 1 << (int) RID_REGISTER)
+ ;
+ else
+ {
+ error ((decl_context == FIELD
+ ? "storage class specified for structure field `%s'"
+ : (decl_context == PARM
+ ? "storage class specified for parameter `%s'"
+ : "storage class specified for typename")),
+ name);
+ specbits &= ~ ((1 << (int) RID_TYPEDEF) | (1 << (int) RID_REGISTER)
+ | (1 << (int) RID_AUTO) | (1 << (int) RID_STATIC)
+ | (1 << (int) RID_EXTERN));
+ }
+ }
+ else if (specbits & 1 << (int) RID_EXTERN && initialized && ! funcdef_flag)
+ {
+ /* `extern' with initialization is invalid if not at top level. */
+ if (current_binding_level == global_binding_level)
+ warning ("`%s' initialized and declared `extern'", name);
+ else
+ error ("`%s' has both `extern' and initializer", name);
+ }
+ else if (specbits & 1 << (int) RID_EXTERN && funcdef_flag
+ && current_binding_level != global_binding_level)
+ error ("nested function `%s' declared `extern'", name);
+ else if (current_binding_level == global_binding_level
+ && specbits & (1 << (int) RID_AUTO))
+ error ("top-level declaration of `%s' specifies `auto'", name);
+ else if ((specbits & 1 << (int) RID_ITERATOR)
+ && TREE_CODE (declarator) != IDENTIFIER_NODE)
+ {
+ error ("iterator `%s' has derived type", name);
+ type = error_mark_node;
+ }
+ else if ((specbits & 1 << (int) RID_ITERATOR)
+ && TREE_CODE (type) != INTEGER_TYPE)
+ {
+ error ("iterator `%s' has noninteger type", name);
+ type = error_mark_node;
+ }
+ }
+
+ /* Now figure out the structure of the declarator proper.
+ Descend through it, creating more complex types, until we reach
+ the declared identifier (or NULL_TREE, in an absolute declarator). */
+
+ while (declarator && TREE_CODE (declarator) != IDENTIFIER_NODE)
+ {
+ if (type == error_mark_node)
+ {
+ declarator = TREE_OPERAND (declarator, 0);
+ continue;
+ }
+
+ /* Each level of DECLARATOR is either an ARRAY_REF (for ...[..]),
+ an INDIRECT_REF (for *...),
+ a CALL_EXPR (for ...(...)),
+ an identifier (for the name being declared)
+ or a null pointer (for the place in an absolute declarator
+ where the name was omitted).
+ For the last two cases, we have just exited the loop.
+
+ At this point, TYPE is the type of elements of an array,
+ or for a function to return, or for a pointer to point to.
+ After this sequence of ifs, TYPE is the type of the
+ array or function or pointer, and DECLARATOR has had its
+ outermost layer removed. */
+
+ if (TREE_CODE (declarator) == ARRAY_REF)
+ {
+ register tree itype = NULL_TREE;
+ register tree size = TREE_OPERAND (declarator, 1);
+ /* An uninitialized decl with `extern' is a reference. */
+ int extern_ref = !initialized && (specbits & (1 << (int) RID_EXTERN));
+ /* The index is a signed object `sizetype' bits wide. */
+ tree index_type = signed_type (sizetype);
+
+ declarator = TREE_OPERAND (declarator, 0);
+
+ /* Check for some types that there cannot be arrays of. */
+
+ if (TYPE_MAIN_VARIANT (type) == void_type_node)
+ {
+ error ("declaration of `%s' as array of voids", name);
+ type = error_mark_node;
+ }
+
+ if (TREE_CODE (type) == FUNCTION_TYPE)
+ {
+ error ("declaration of `%s' as array of functions", name);
+ type = error_mark_node;
+ }
+
+ if (size == error_mark_node)
+ type = error_mark_node;
+
+ if (type == error_mark_node)
+ continue;
+
+ /* If this is a block level extern, it must live past the end
+ of the function so that we can check it against other extern
+ declarations (IDENTIFIER_LIMBO_VALUE). */
+ if (extern_ref && allocation_temporary_p ())
+ end_temporary_allocation ();
+
+ /* If size was specified, set ITYPE to a range-type for that size.
+ Otherwise, ITYPE remains null. finish_decl may figure it out
+ from an initial value. */
+
+ if (size)
+ {
+ /* Strip NON_LVALUE_EXPRs since we aren't using as an lvalue. */
+ STRIP_TYPE_NOPS (size);
+
+ if (TREE_CODE (TREE_TYPE (size)) != INTEGER_TYPE
+ && TREE_CODE (TREE_TYPE (size)) != ENUMERAL_TYPE)
+ {
+ error ("size of array `%s' has non-integer type", name);
+ size = integer_one_node;
+ }
+
+ if (pedantic && integer_zerop (size))
+ pedwarn ("ANSI C forbids zero-size array `%s'", name);
+
+ if (TREE_CODE (size) == INTEGER_CST)
+ {
+ constant_expression_warning (size);
+ if (tree_int_cst_sgn (size) < 0)
+ {
+ error ("size of array `%s' is negative", name);
+ size = integer_one_node;
+ }
+ }
+ else
+ {
+ /* Make sure the array size remains visibly nonconstant
+ even if it is (eg) a const variable with known value. */
+ size_varies = 1;
+
+ if (pedantic)
+ {
+ if (TREE_CONSTANT (size))
+ pedwarn ("ANSI C forbids array `%s' whose size can't be evaluated", name);
+ else
+ pedwarn ("ANSI C forbids variable-size array `%s'", name);
+ }
+ }
+
+ /* Convert size to index_type, so that if it is a variable
+ the computations will be done in the proper mode. */
+ itype = fold (build (MINUS_EXPR, index_type,
+ convert (index_type, size),
+ convert (index_type, size_one_node)));
+
+ if (size_varies)
+ itype = variable_size (itype);
+ itype = build_index_type (itype);
+ }
+
+#if 0 /* This had bad results for pointers to arrays, as in
+ union incomplete (*foo)[4]; */
+ /* Complain about arrays of incomplete types, except in typedefs. */
+
+ if (TYPE_SIZE (type) == 0
+ /* Avoid multiple warnings for nested array types. */
+ && TREE_CODE (type) != ARRAY_TYPE
+ && !(specbits & (1 << (int) RID_TYPEDEF))
+ && !C_TYPE_BEING_DEFINED (type))
+ warning ("array type has incomplete element type");
+#endif
+
+#if 0 /* We shouldn't have a function type here at all!
+ Functions aren't allowed as array elements. */
+ if (pedantic && TREE_CODE (type) == FUNCTION_TYPE
+ && (constp || volatilep))
+ pedwarn ("ANSI C forbids const or volatile function types");
+#endif
+
+ /* Build the array type itself, then merge any constancy or
+ volatility into the target type. We must do it in this order
+ to ensure that the TYPE_MAIN_VARIANT field of the array type
+ is set correctly. */
+
+ type = build_array_type (type, itype);
+ if (constp || volatilep)
+ type = c_build_type_variant (type, constp, volatilep);
+
+#if 0 /* don't clear these; leave them set so that the array type
+ or the variable is itself const or volatile. */
+ constp = 0;
+ volatilep = 0;
+#endif
+
+ if (size_varies)
+ C_TYPE_VARIABLE_SIZE (type) = 1;
+ }
+ else if (TREE_CODE (declarator) == CALL_EXPR)
+ {
+ int extern_ref = (!(specbits & (1 << (int) RID_AUTO))
+ || current_binding_level == global_binding_level);
+ tree arg_types;
+
+ /* Declaring a function type.
+ Make sure we have a valid type for the function to return. */
+ if (type == error_mark_node)
+ continue;
+
+ size_varies = 0;
+
+ /* Warn about some types functions can't return. */
+
+ if (TREE_CODE (type) == FUNCTION_TYPE)
+ {
+ error ("`%s' declared as function returning a function", name);
+ type = integer_type_node;
+ }
+ if (TREE_CODE (type) == ARRAY_TYPE)
+ {
+ error ("`%s' declared as function returning an array", name);
+ type = integer_type_node;
+ }
+
+#ifndef TRADITIONAL_RETURN_FLOAT
+ /* Traditionally, declaring return type float means double. */
+
+ if (flag_traditional && TYPE_MAIN_VARIANT (type) == float_type_node)
+ type = double_type_node;
+#endif /* TRADITIONAL_RETURN_FLOAT */
+
+ /* If this is a block level extern, it must live past the end
+ of the function so that we can check it against other extern
+ declarations (IDENTIFIER_LIMBO_VALUE). */
+ if (extern_ref && allocation_temporary_p ())
+ end_temporary_allocation ();
+
+ /* Construct the function type and go to the next
+ inner layer of declarator. */
+
+ arg_types = grokparms (TREE_OPERAND (declarator, 1),
+ funcdef_flag
+ /* Say it's a definition
+ only for the CALL_EXPR
+ closest to the identifier. */
+ && TREE_CODE (TREE_OPERAND (declarator, 0)) == IDENTIFIER_NODE);
+#if 0 /* This seems to be false. We turn off temporary allocation
+ above in this function if -traditional.
+ And this code caused inconsistent results with prototypes:
+ callers would ignore them, and pass arguments wrong. */
+
+ /* Omit the arg types if -traditional, since the arg types
+ and the list links might not be permanent. */
+ type = build_function_type (type,
+ flag_traditional
+ ? NULL_TREE : arg_types);
+#endif
+ /* ANSI seems to say that `const int foo ();'
+ does not make the function foo const. */
+ if (constp || volatilep)
+ type = c_build_type_variant (type, constp, volatilep);
+ constp = 0;
+ volatilep = 0;
+
+ type = build_function_type (type, arg_types);
+ declarator = TREE_OPERAND (declarator, 0);
+
+ /* Set the TYPE_CONTEXTs for each tagged type which is local to
+ the formal parameter list of this FUNCTION_TYPE to point to
+ the FUNCTION_TYPE node itself. */
+
+ {
+ register tree link;
+
+ for (link = current_function_parm_tags;
+ link;
+ link = TREE_CHAIN (link))
+ TYPE_CONTEXT (TREE_VALUE (link)) = type;
+ }
+ }
+ else if (TREE_CODE (declarator) == INDIRECT_REF)
+ {
+ /* Merge any constancy or volatility into the target type
+ for the pointer. */
+
+ if (pedantic && TREE_CODE (type) == FUNCTION_TYPE
+ && (constp || volatilep))
+ pedwarn ("ANSI C forbids const or volatile function types");
+ if (constp || volatilep)
+ type = c_build_type_variant (type, constp, volatilep);
+ constp = 0;
+ volatilep = 0;
+ size_varies = 0;
+
+ type = build_pointer_type (type);
+
+ /* Process a list of type modifier keywords
+ (such as const or volatile) that were given inside the `*'. */
+
+ if (TREE_TYPE (declarator))
+ {
+ register tree typemodlist;
+ int erred = 0;
+ for (typemodlist = TREE_TYPE (declarator); typemodlist;
+ typemodlist = TREE_CHAIN (typemodlist))
+ {
+ if (TREE_VALUE (typemodlist) == ridpointers[(int) RID_CONST])
+ constp++;
+ else if (TREE_VALUE (typemodlist) == ridpointers[(int) RID_VOLATILE])
+ volatilep++;
+ else if (!erred)
+ {
+ erred = 1;
+ error ("invalid type modifier within pointer declarator");
+ }
+ }
+ if (constp > 1)
+ pedwarn ("duplicate `const'");
+ if (volatilep > 1)
+ pedwarn ("duplicate `volatile'");
+ }
+
+ declarator = TREE_OPERAND (declarator, 0);
+ }
+ else
+ abort ();
+
+ }
+
+ /* Now TYPE has the actual type. */
+
+ /* If this is declaring a typedef name, return a TYPE_DECL. */
+
+ if (specbits & (1 << (int) RID_TYPEDEF))
+ {
+ tree decl;
+ /* Note that the grammar rejects storage classes
+ in typenames, fields or parameters */
+ if (pedantic && TREE_CODE (type) == FUNCTION_TYPE
+ && (constp || volatilep))
+ pedwarn ("ANSI C forbids const or volatile function types");
+ if (constp || volatilep)
+ type = c_build_type_variant (type, constp, volatilep);
+ pop_obstacks ();
+ decl = build_decl (TYPE_DECL, declarator, type);
+ if ((specbits & (1 << (int) RID_SIGNED))
+ || (typedef_decl && C_TYPEDEF_EXPLICITLY_SIGNED (typedef_decl)))
+ C_TYPEDEF_EXPLICITLY_SIGNED (decl) = 1;
+ return decl;
+ }
+
+ /* Detect the case of an array type of unspecified size
+ which came, as such, direct from a typedef name.
+ We must copy the type, so that each identifier gets
+ a distinct type, so that each identifier's size can be
+ controlled separately by its own initializer. */
+
+ if (type != 0 && typedef_type != 0
+ && TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (typedef_type)
+ && TREE_CODE (type) == ARRAY_TYPE && TYPE_DOMAIN (type) == 0)
+ {
+ type = build_array_type (TREE_TYPE (type), 0);
+ if (size_varies)
+ C_TYPE_VARIABLE_SIZE (type) = 1;
+ }
+
+ /* If this is a type name (such as, in a cast or sizeof),
+ compute the type and return it now. */
+
+ if (decl_context == TYPENAME)
+ {
+ /* Note that the grammar rejects storage classes
+ in typenames, fields or parameters */
+ if (pedantic && TREE_CODE (type) == FUNCTION_TYPE
+ && (constp || volatilep))
+ pedwarn ("ANSI C forbids const or volatile function types");
+ if (constp || volatilep)
+ type = c_build_type_variant (type, constp, volatilep);
+ pop_obstacks ();
+ return type;
+ }
+
+ /* Aside from typedefs and type names (handle above),
+ `void' at top level (not within pointer)
+ is allowed only in public variables.
+ We don't complain about parms either, but that is because
+ a better error message can be made later. */
+
+ if (TYPE_MAIN_VARIANT (type) == void_type_node && decl_context != PARM
+ && ! ((decl_context != FIELD && TREE_CODE (type) != FUNCTION_TYPE)
+ && ((specbits & (1 << (int) RID_EXTERN))
+ || (current_binding_level == global_binding_level
+ && !(specbits
+ & ((1 << (int) RID_STATIC) | (1 << (int) RID_REGISTER)))))))
+ {
+ error ("variable or field `%s' declared void",
+ IDENTIFIER_POINTER (declarator));
+ type = integer_type_node;
+ }
+
+ /* Now create the decl, which may be a VAR_DECL, a PARM_DECL
+ or a FUNCTION_DECL, depending on DECL_CONTEXT and TYPE. */
+
+ {
+ register tree decl;
+
+ if (decl_context == PARM)
+ {
+ tree type_as_written = type;
+ tree main_type;
+
+ /* A parameter declared as an array of T is really a pointer to T.
+ One declared as a function is really a pointer to a function. */
+
+ if (TREE_CODE (type) == ARRAY_TYPE)
+ {
+ /* Transfer const-ness of array into that of type pointed to. */
+ type = TREE_TYPE (type);
+ if (constp || volatilep)
+ type = c_build_type_variant (type, constp, volatilep);
+ type = build_pointer_type (type);
+ volatilep = constp = 0;
+ size_varies = 0;
+ }
+ else if (TREE_CODE (type) == FUNCTION_TYPE)
+ {
+ if (pedantic && (constp || volatilep))
+ pedwarn ("ANSI C forbids const or volatile function types");
+ if (constp || volatilep)
+ type = c_build_type_variant (type, constp, volatilep);
+ type = build_pointer_type (type);
+ volatilep = constp = 0;
+ }
+
+ decl = build_decl (PARM_DECL, declarator, type);
+ if (size_varies)
+ C_DECL_VARIABLE_SIZE (decl) = 1;
+
+ /* Compute the type actually passed in the parmlist,
+ for the case where there is no prototype.
+ (For example, shorts and chars are passed as ints.)
+ When there is a prototype, this is overridden later. */
+
+ DECL_ARG_TYPE (decl) = type;
+ main_type = (type == error_mark_node
+ ? error_mark_node
+ : TYPE_MAIN_VARIANT (type));
+ if (main_type == float_type_node)
+ DECL_ARG_TYPE (decl) = double_type_node;
+ /* Don't use TYPE_PRECISION to decide whether to promote,
+ because we should convert short if it's the same size as int,
+ but we should not convert long if it's the same size as int. */
+ else if (TREE_CODE (main_type) != ERROR_MARK
+ && C_PROMOTING_INTEGER_TYPE_P (main_type))
+ {
+ if (TYPE_PRECISION (type) == TYPE_PRECISION (integer_type_node)
+ && TREE_UNSIGNED (type))
+ DECL_ARG_TYPE (decl) = unsigned_type_node;
+ else
+ DECL_ARG_TYPE (decl) = integer_type_node;
+ }
+
+ DECL_ARG_TYPE_AS_WRITTEN (decl) = type_as_written;
+ }
+ else if (decl_context == FIELD)
+ {
+ /* Structure field. It may not be a function. */
+
+ if (TREE_CODE (type) == FUNCTION_TYPE)
+ {
+ error ("field `%s' declared as a function",
+ IDENTIFIER_POINTER (declarator));
+ type = build_pointer_type (type);
+ }
+ else if (TREE_CODE (type) != ERROR_MARK && TYPE_SIZE (type) == 0)
+ {
+ error ("field `%s' has incomplete type",
+ IDENTIFIER_POINTER (declarator));
+ type = error_mark_node;
+ }
+ /* Move type qualifiers down to element of an array. */
+ if (TREE_CODE (type) == ARRAY_TYPE && (constp || volatilep))
+ {
+ type = build_array_type (c_build_type_variant (TREE_TYPE (type),
+ constp, volatilep),
+ TYPE_DOMAIN (type));
+#if 0 /* Leave the field const or volatile as well. */
+ constp = volatilep = 0;
+#endif
+ }
+ decl = build_decl (FIELD_DECL, declarator, type);
+ if (size_varies)
+ C_DECL_VARIABLE_SIZE (decl) = 1;
+ }
+ else if (TREE_CODE (type) == FUNCTION_TYPE)
+ {
+ /* Every function declaration is "external"
+ except for those which are inside a function body
+ in which `auto' is used.
+ That is a case not specified by ANSI C,
+ and we use it for forward declarations for nested functions. */
+ int extern_ref = (!(specbits & (1 << (int) RID_AUTO))
+ || current_binding_level == global_binding_level);
+
+ if (specbits & (1 << (int) RID_AUTO)
+ && (pedantic || current_binding_level == global_binding_level))
+ pedwarn ("invalid storage class for function `%s'",
+ IDENTIFIER_POINTER (declarator));
+ if (specbits & (1 << (int) RID_REGISTER))
+ error ("invalid storage class for function `%s'",
+ IDENTIFIER_POINTER (declarator));
+ /* Function declaration not at top level.
+ Storage classes other than `extern' are not allowed
+ and `extern' makes no difference. */
+ if (current_binding_level != global_binding_level
+ && (specbits & ((1 << (int) RID_STATIC) | (1 << (int) RID_INLINE)))
+ && pedantic)
+ pedwarn ("invalid storage class for function `%s'",
+ IDENTIFIER_POINTER (declarator));
+
+ /* If this is a block level extern, it must live past the end
+ of the function so that we can check it against other
+ extern declarations (IDENTIFIER_LIMBO_VALUE). */
+ if (extern_ref && allocation_temporary_p ())
+ end_temporary_allocation ();
+
+ decl = build_decl (FUNCTION_DECL, declarator, type);
+
+ if (pedantic && (constp || volatilep)
+ && ! DECL_IN_SYSTEM_HEADER (decl))
+ pedwarn ("ANSI C forbids const or volatile functions");
+
+ if (volatilep
+ && TREE_TYPE (TREE_TYPE (decl)) != void_type_node)
+ warning ("`noreturn' function returns non-void value");
+
+ if (extern_ref)
+ DECL_EXTERNAL (decl) = 1;
+ /* Record absence of global scope for `static' or `auto'. */
+ TREE_PUBLIC (decl)
+ = !(specbits & ((1 << (int) RID_STATIC) | (1 << (int) RID_AUTO)));
+ /* Record presence of `inline', if it is reasonable. */
+ if (inlinep)
+ {
+ tree last = tree_last (TYPE_ARG_TYPES (type));
+
+ if (! strcmp (IDENTIFIER_POINTER (declarator), "main"))
+ warning ("cannot inline function `main'");
+ else if (last && (TYPE_MAIN_VARIANT (TREE_VALUE (last))
+ != void_type_node))
+ warning ("inline declaration ignored for function with `...'");
+ else
+ /* Assume that otherwise the function can be inlined. */
+ DECL_INLINE (decl) = 1;
+
+ if (specbits & (1 << (int) RID_EXTERN))
+ current_extern_inline = 1;
+ }
+ }
+ else
+ {
+ /* It's a variable. */
+ /* An uninitialized decl with `extern' is a reference. */
+ int extern_ref = !initialized && (specbits & (1 << (int) RID_EXTERN));
+
+ /* Move type qualifiers down to element of an array. */
+ if (TREE_CODE (type) == ARRAY_TYPE && (constp || volatilep))
+ {
+ type = build_array_type (c_build_type_variant (TREE_TYPE (type),
+ constp, volatilep),
+ TYPE_DOMAIN (type));
+#if 0 /* Leave the variable const or volatile as well. */
+ constp = volatilep = 0;
+#endif
+ }
+
+ /* If this is a block level extern, it must live past the end
+ of the function so that we can check it against other
+ extern declarations (IDENTIFIER_LIMBO_VALUE). */
+ if (extern_ref && allocation_temporary_p ())
+ end_temporary_allocation ();
+
+ decl = build_decl (VAR_DECL, declarator, type);
+ if (size_varies)
+ C_DECL_VARIABLE_SIZE (decl) = 1;
+
+ if (inlinep)
+ pedwarn_with_decl (decl, "variable `%s' declared `inline'");
+
+ DECL_EXTERNAL (decl) = extern_ref;
+ /* At top level, the presence of a `static' or `register' storage
+ class specifier, or the absence of all storage class specifiers
+ makes this declaration a definition (perhaps tentative). Also,
+ the absence of both `static' and `register' makes it public. */
+ if (current_binding_level == global_binding_level)
+ {
+ TREE_PUBLIC (decl)
+ = !(specbits
+ & ((1 << (int) RID_STATIC) | (1 << (int) RID_REGISTER)));
+ TREE_STATIC (decl) = ! DECL_EXTERNAL (decl);
+ }
+ /* Not at top level, only `static' makes a static definition. */
+ else
+ {
+ TREE_STATIC (decl) = (specbits & (1 << (int) RID_STATIC)) != 0;
+ TREE_PUBLIC (decl) = DECL_EXTERNAL (decl);
+ }
+
+ if (specbits & 1 << (int) RID_ITERATOR)
+ ITERATOR_P (decl) = 1;
+ }
+
+ /* Record `register' declaration for warnings on &
+ and in case doing stupid register allocation. */
+
+ if (specbits & (1 << (int) RID_REGISTER))
+ DECL_REGISTER (decl) = 1;
+
+ /* Record constancy and volatility. */
+
+ if (constp)
+ TREE_READONLY (decl) = 1;
+ if (volatilep)
+ {
+ TREE_SIDE_EFFECTS (decl) = 1;
+ TREE_THIS_VOLATILE (decl) = 1;
+ }
+ /* If a type has volatile components, it should be stored in memory.
+ Otherwise, the fact that those components are volatile
+ will be ignored, and would even crash the compiler. */
+ if (C_TYPE_FIELDS_VOLATILE (TREE_TYPE (decl)))
+ mark_addressable (decl);
+
+ pop_obstacks ();
+
+ return decl;
+ }
+}
+
+/* Decode the parameter-list info for a function type or function definition.
+ The argument is the value returned by `get_parm_info' (or made in parse.y
+ if there is an identifier list instead of a parameter decl list).
+ These two functions are separate because when a function returns
+ or receives functions then each is called multiple times but the order
+ of calls is different. The last call to `grokparms' is always the one
+ that contains the formal parameter names of a function definition.
+
+ Store in `last_function_parms' a chain of the decls of parms.
+ Also store in `last_function_parm_tags' a chain of the struct, union,
+ and enum tags declared among the parms.
+
+ Return a list of arg types to use in the FUNCTION_TYPE for this function.
+
+ FUNCDEF_FLAG is nonzero for a function definition, 0 for
+ a mere declaration. A nonempty identifier-list gets an error message
+ when FUNCDEF_FLAG is zero. */
+
+static tree
+grokparms (parms_info, funcdef_flag)
+ tree parms_info;
+ int funcdef_flag;
+{
+ tree first_parm = TREE_CHAIN (parms_info);
+
+ last_function_parms = TREE_PURPOSE (parms_info);
+ last_function_parm_tags = TREE_VALUE (parms_info);
+
+ if (warn_strict_prototypes && first_parm == 0 && !funcdef_flag
+ && !in_system_header)
+ warning ("function declaration isn't a prototype");
+
+ if (first_parm != 0
+ && TREE_CODE (TREE_VALUE (first_parm)) == IDENTIFIER_NODE)
+ {
+ if (! funcdef_flag)
+ pedwarn ("parameter names (without types) in function declaration");
+
+ last_function_parms = first_parm;
+ return 0;
+ }
+ else
+ {
+ tree parm;
+ tree typelt;
+ /* We no longer test FUNCDEF_FLAG.
+ If the arg types are incomplete in a declaration,
+ they must include undefined tags.
+ These tags can never be defined in the scope of the declaration,
+ so the types can never be completed,
+ and no call can be compiled successfully. */
+#if 0
+ /* In a fcn definition, arg types must be complete. */
+ if (funcdef_flag)
+#endif
+ for (parm = last_function_parms, typelt = first_parm;
+ parm;
+ parm = TREE_CHAIN (parm))
+ /* Skip over any enumeration constants declared here. */
+ if (TREE_CODE (parm) == PARM_DECL)
+ {
+ /* Barf if the parameter itself has an incomplete type. */
+ tree type = TREE_VALUE (typelt);
+ if (TYPE_SIZE (type) == 0)
+ {
+ if (funcdef_flag && DECL_NAME (parm) != 0)
+ error ("parameter `%s' has incomplete type",
+ IDENTIFIER_POINTER (DECL_NAME (parm)));
+ else
+ warning ("parameter has incomplete type");
+ if (funcdef_flag)
+ {
+ TREE_VALUE (typelt) = error_mark_node;
+ TREE_TYPE (parm) = error_mark_node;
+ }
+ }
+#if 0 /* This has been replaced by parm_tags_warning
+ which uses a more accurate criterion for what to warn about. */
+ else
+ {
+ /* Now warn if is a pointer to an incomplete type. */
+ while (TREE_CODE (type) == POINTER_TYPE
+ || TREE_CODE (type) == REFERENCE_TYPE)
+ type = TREE_TYPE (type);
+ type = TYPE_MAIN_VARIANT (type);
+ if (TYPE_SIZE (type) == 0)
+ {
+ if (DECL_NAME (parm) != 0)
+ warning ("parameter `%s' points to incomplete type",
+ IDENTIFIER_POINTER (DECL_NAME (parm)));
+ else
+ warning ("parameter points to incomplete type");
+ }
+ }
+#endif
+ typelt = TREE_CHAIN (typelt);
+ }
+
+ /* Allocate the list of types the way we allocate a type. */
+ if (first_parm && ! TREE_PERMANENT (first_parm))
+ {
+ /* Construct a copy of the list of types
+ on the saveable obstack. */
+ tree result = NULL;
+ for (typelt = first_parm; typelt; typelt = TREE_CHAIN (typelt))
+ result = saveable_tree_cons (NULL_TREE, TREE_VALUE (typelt),
+ result);
+ return nreverse (result);
+ }
+ else
+ /* The list we have is permanent already. */
+ return first_parm;
+ }
+}
+
+
+/* Return a tree_list node with info on a parameter list just parsed.
+ The TREE_PURPOSE is a chain of decls of those parms.
+ The TREE_VALUE is a list of structure, union and enum tags defined.
+ The TREE_CHAIN is a list of argument types to go in the FUNCTION_TYPE.
+ This tree_list node is later fed to `grokparms'.
+
+ VOID_AT_END nonzero means append `void' to the end of the type-list.
+ Zero means the parmlist ended with an ellipsis so don't append `void'. */
+
+tree
+get_parm_info (void_at_end)
+ int void_at_end;
+{
+ register tree decl, t;
+ register tree types = 0;
+ int erred = 0;
+ tree tags = gettags ();
+ tree parms = getdecls ();
+ tree new_parms = 0;
+ tree order = current_binding_level->parm_order;
+
+ /* Just `void' (and no ellipsis) is special. There are really no parms. */
+ if (void_at_end && parms != 0
+ && TREE_CHAIN (parms) == 0
+ && TYPE_MAIN_VARIANT (TREE_TYPE (parms)) == void_type_node
+ && DECL_NAME (parms) == 0)
+ {
+ parms = NULL_TREE;
+ storedecls (NULL_TREE);
+ return saveable_tree_cons (NULL_TREE, NULL_TREE,
+ saveable_tree_cons (NULL_TREE, void_type_node, NULL_TREE));
+ }
+
+ /* Extract enumerator values and other non-parms declared with the parms.
+ Likewise any forward parm decls that didn't have real parm decls. */
+ for (decl = parms; decl; )
+ {
+ tree next = TREE_CHAIN (decl);
+
+ if (TREE_CODE (decl) != PARM_DECL)
+ {
+ TREE_CHAIN (decl) = new_parms;
+ new_parms = decl;
+ }
+ else if (TREE_ASM_WRITTEN (decl))
+ {
+ error_with_decl (decl, "parameter `%s' has just a forward declaration");
+ TREE_CHAIN (decl) = new_parms;
+ new_parms = decl;
+ }
+ decl = next;
+ }
+
+ /* Put the parm decls back in the order they were in in the parm list. */
+ for (t = order; t; t = TREE_CHAIN (t))
+ {
+ if (TREE_CHAIN (t))
+ TREE_CHAIN (TREE_VALUE (t)) = TREE_VALUE (TREE_CHAIN (t));
+ else
+ TREE_CHAIN (TREE_VALUE (t)) = 0;
+ }
+
+ new_parms = chainon (order ? nreverse (TREE_VALUE (order)) : 0,
+ new_parms);
+
+ /* Store the parmlist in the binding level since the old one
+ is no longer a valid list. (We have changed the chain pointers.) */
+ storedecls (new_parms);
+
+ for (decl = new_parms; decl; decl = TREE_CHAIN (decl))
+ /* There may also be declarations for enumerators if an enumeration
+ type is declared among the parms. Ignore them here. */
+ if (TREE_CODE (decl) == PARM_DECL)
+ {
+ /* Since there is a prototype,
+ args are passed in their declared types. */
+ tree type = TREE_TYPE (decl);
+ DECL_ARG_TYPE (decl) = type;
+#ifdef PROMOTE_PROTOTYPES
+ if ((TREE_CODE (type) == INTEGER_TYPE
+ || TREE_CODE (type) == ENUMERAL_TYPE)
+ && TYPE_PRECISION (type) < TYPE_PRECISION (integer_type_node))
+ DECL_ARG_TYPE (decl) = integer_type_node;
+#endif
+
+ types = saveable_tree_cons (NULL_TREE, TREE_TYPE (decl), types);
+ if (TYPE_MAIN_VARIANT (TREE_VALUE (types)) == void_type_node && ! erred
+ && DECL_NAME (decl) == 0)
+ {
+ error ("`void' in parameter list must be the entire list");
+ erred = 1;
+ }
+ }
+
+ if (void_at_end)
+ return saveable_tree_cons (new_parms, tags,
+ nreverse (saveable_tree_cons (NULL_TREE, void_type_node, types)));
+
+ return saveable_tree_cons (new_parms, tags, nreverse (types));
+}
+
+/* At end of parameter list, warn about any struct, union or enum tags
+ defined within. Do so because these types cannot ever become complete. */
+
+void
+parmlist_tags_warning ()
+{
+ tree elt;
+ static int already;
+
+ for (elt = current_binding_level->tags; elt; elt = TREE_CHAIN (elt))
+ {
+ enum tree_code code = TREE_CODE (TREE_VALUE (elt));
+ /* An anonymous union parm type is meaningful as a GNU extension.
+ So don't warn for that. */
+ if (code == UNION_TYPE && !pedantic)
+ continue;
+ if (TREE_PURPOSE (elt) != 0)
+ warning ("`%s %s' declared inside parameter list",
+ (code == RECORD_TYPE ? "struct"
+ : code == UNION_TYPE ? "union"
+ : "enum"),
+ IDENTIFIER_POINTER (TREE_PURPOSE (elt)));
+ else
+ warning ("anonymous %s declared inside parameter list",
+ (code == RECORD_TYPE ? "struct"
+ : code == UNION_TYPE ? "union"
+ : "enum"));
+
+ if (! already)
+ {
+ warning ("its scope is only this definition or declaration,");
+ warning ("which is probably not what you want.");
+ already = 1;
+ }
+ }
+}
+
+/* Get the struct, enum or union (CODE says which) with tag NAME.
+ Define the tag as a forward-reference if it is not defined. */
+
+tree
+xref_tag (code, name)
+ enum tree_code code;
+ tree name;
+{
+ int temporary = allocation_temporary_p ();
+
+ /* If a cross reference is requested, look up the type
+ already defined for this tag and return it. */
+
+ register tree ref = lookup_tag (code, name, current_binding_level, 0);
+ /* Even if this is the wrong type of tag, return what we found.
+ There will be an error message anyway, from pending_xref_error.
+ If we create an empty xref just for an invalid use of the type,
+ the main result is to create lots of superfluous error messages. */
+ if (ref)
+ return ref;
+
+ push_obstacks_nochange ();
+
+ if (current_binding_level == global_binding_level && temporary)
+ end_temporary_allocation ();
+
+ /* If no such tag is yet defined, create a forward-reference node
+ and record it as the "definition".
+ When a real declaration of this type is found,
+ the forward-reference will be altered into a real type. */
+
+ ref = make_node (code);
+ if (code == ENUMERAL_TYPE)
+ {
+ /* (In ANSI, Enums can be referred to only if already defined.) */
+ if (pedantic)
+ pedwarn ("ANSI C forbids forward references to `enum' types");
+ /* Give the type a default layout like unsigned int
+ to avoid crashing if it does not get defined. */
+ TYPE_MODE (ref) = TYPE_MODE (unsigned_type_node);
+ TYPE_ALIGN (ref) = TYPE_ALIGN (unsigned_type_node);
+ TREE_UNSIGNED (ref) = 1;
+ TYPE_PRECISION (ref) = TYPE_PRECISION (unsigned_type_node);
+ TYPE_MIN_VALUE (ref) = TYPE_MIN_VALUE (unsigned_type_node);
+ TYPE_MAX_VALUE (ref) = TYPE_MAX_VALUE (unsigned_type_node);
+ }
+
+ pushtag (name, ref);
+
+ pop_obstacks ();
+
+ return ref;
+}
+
+/* Make sure that the tag NAME is defined *in the current binding level*
+ at least as a forward reference.
+ CODE says which kind of tag NAME ought to be.
+
+ We also do a push_obstacks_nochange
+ whose matching pop is in finish_struct. */
+
+tree
+start_struct (code, name)
+ enum tree_code code;
+ tree name;
+{
+ /* If there is already a tag defined at this binding level
+ (as a forward reference), just return it. */
+
+ register tree ref = 0;
+
+ push_obstacks_nochange ();
+ if (current_binding_level == global_binding_level)
+ end_temporary_allocation ();
+
+ if (name != 0)
+ ref = lookup_tag (code, name, current_binding_level, 1);
+ if (ref && TREE_CODE (ref) == code)
+ {
+ C_TYPE_BEING_DEFINED (ref) = 1;
+ if (TYPE_FIELDS (ref))
+ error ((code == UNION_TYPE ? "redefinition of `union %s'"
+ : "redefinition of `struct %s'"),
+ IDENTIFIER_POINTER (name));
+
+ return ref;
+ }
+
+ /* Otherwise create a forward-reference just so the tag is in scope. */
+
+ ref = make_node (code);
+ pushtag (name, ref);
+ C_TYPE_BEING_DEFINED (ref) = 1;
+ return ref;
+}
+
+/* Process the specs, declarator (NULL if omitted) and width (NULL if omitted)
+ of a structure component, returning a FIELD_DECL node.
+ WIDTH is non-NULL for bit fields only, and is an INTEGER_CST node.
+
+ This is done during the parsing of the struct declaration.
+ The FIELD_DECL nodes are chained together and the lot of them
+ are ultimately passed to `build_struct' to make the RECORD_TYPE node. */
+
+tree
+grokfield (filename, line, declarator, declspecs, width)
+ char *filename;
+ int line;
+ tree declarator, declspecs, width;
+{
+ tree value;
+
+ /* The corresponding pop_obstacks is in finish_decl. */
+ push_obstacks_nochange ();
+
+ value = grokdeclarator (declarator, declspecs, width ? BITFIELD : FIELD, 0);
+
+ finish_decl (value, NULL_TREE, NULL_TREE);
+ DECL_INITIAL (value) = width;
+
+ maybe_objc_check_decl (value);
+ return value;
+}
+
+/* Function to help qsort sort FIELD_DECLs by name order. */
+
+static int
+field_decl_cmp (x, y)
+ tree *x, *y;
+{
+ return (long)DECL_NAME (*x) - (long)DECL_NAME (*y);
+}
+
+/* Fill in the fields of a RECORD_TYPE or UNION_TYPE node, T.
+ FIELDLIST is a chain of FIELD_DECL nodes for the fields.
+
+ We also do a pop_obstacks to match the push in start_struct. */
+
+tree
+finish_struct (t, fieldlist)
+ register tree t, fieldlist;
+{
+ register tree x;
+ int old_momentary;
+ int toplevel = global_binding_level == current_binding_level;
+
+ /* If this type was previously laid out as a forward reference,
+ make sure we lay it out again. */
+
+ TYPE_SIZE (t) = 0;
+
+ /* Nameless union parm types are useful as GCC extension. */
+ if (! (TREE_CODE (t) == UNION_TYPE && TYPE_NAME (t) == 0) && !pedantic)
+ /* Otherwise, warn about any struct or union def. in parmlist. */
+ if (in_parm_level_p ())
+ {
+ if (pedantic)
+ pedwarn ((TREE_CODE (t) == UNION_TYPE ? "union defined inside parms"
+ : "structure defined inside parms"));
+ else if (! flag_traditional)
+ warning ((TREE_CODE (t) == UNION_TYPE ? "union defined inside parms"
+ : "structure defined inside parms"));
+ }
+
+ old_momentary = suspend_momentary ();
+
+ if (fieldlist == 0 && pedantic)
+ pedwarn ((TREE_CODE (t) == UNION_TYPE ? "union has no members"
+ : "structure has no members"));
+
+ /* Install struct as DECL_CONTEXT of each field decl.
+ Also process specified field sizes.
+ Set DECL_FIELD_SIZE to the specified size, or 0 if none specified.
+ The specified size is found in the DECL_INITIAL.
+ Store 0 there, except for ": 0" fields (so we can find them
+ and delete them, below). */
+
+ for (x = fieldlist; x; x = TREE_CHAIN (x))
+ {
+ DECL_CONTEXT (x) = t;
+ DECL_FIELD_SIZE (x) = 0;
+
+ /* If any field is const, the structure type is pseudo-const. */
+ if (TREE_READONLY (x))
+ C_TYPE_FIELDS_READONLY (t) = 1;
+ else
+ {
+ /* A field that is pseudo-const makes the structure likewise. */
+ tree t1 = TREE_TYPE (x);
+ while (TREE_CODE (t1) == ARRAY_TYPE)
+ t1 = TREE_TYPE (t1);
+ if ((TREE_CODE (t1) == RECORD_TYPE || TREE_CODE (t1) == UNION_TYPE)
+ && C_TYPE_FIELDS_READONLY (t1))
+ C_TYPE_FIELDS_READONLY (t) = 1;
+ }
+
+ /* Any field that is volatile means variables of this type must be
+ treated in some ways as volatile. */
+ if (TREE_THIS_VOLATILE (x))
+ C_TYPE_FIELDS_VOLATILE (t) = 1;
+
+ /* Any field of nominal variable size implies structure is too. */
+ if (C_DECL_VARIABLE_SIZE (x))
+ C_TYPE_VARIABLE_SIZE (t) = 1;
+
+ /* Detect invalid nested redefinition. */
+ if (TREE_TYPE (x) == t)
+ error ("nested redefinition of `%s'",
+ IDENTIFIER_POINTER (TYPE_NAME (t)));
+
+ /* Detect invalid bit-field size. */
+ if (DECL_INITIAL (x))
+ STRIP_NOPS (DECL_INITIAL (x));
+ if (DECL_INITIAL (x))
+ {
+ if (TREE_CODE (DECL_INITIAL (x)) == INTEGER_CST)
+ constant_expression_warning (DECL_INITIAL (x));
+ else
+ {
+ error_with_decl (x, "bit-field `%s' width not an integer constant");
+ DECL_INITIAL (x) = NULL;
+ }
+ }
+
+ /* Detect invalid bit-field type. */
+ if (DECL_INITIAL (x)
+ && TREE_CODE (TREE_TYPE (x)) != INTEGER_TYPE
+ && TREE_CODE (TREE_TYPE (x)) != ENUMERAL_TYPE)
+ {
+ error_with_decl (x, "bit-field `%s' has invalid type");
+ DECL_INITIAL (x) = NULL;
+ }
+ if (DECL_INITIAL (x) && pedantic
+ && TYPE_MAIN_VARIANT (TREE_TYPE (x)) != integer_type_node
+ && TYPE_MAIN_VARIANT (TREE_TYPE (x)) != unsigned_type_node
+ /* Accept an enum that's equivalent to int or unsigned int. */
+ && !(TREE_CODE (TREE_TYPE (x)) == ENUMERAL_TYPE
+ && (TYPE_PRECISION (TREE_TYPE (x))
+ == TYPE_PRECISION (integer_type_node))))
+ pedwarn_with_decl (x, "bit-field `%s' type invalid in ANSI C");
+
+ /* Detect and ignore out of range field width. */
+ if (DECL_INITIAL (x))
+ {
+ unsigned HOST_WIDE_INT width = TREE_INT_CST_LOW (DECL_INITIAL (x));
+
+ if (tree_int_cst_sgn (DECL_INITIAL (x)) < 0)
+ {
+ DECL_INITIAL (x) = NULL;
+ error_with_decl (x, "negative width in bit-field `%s'");
+ }
+ else if (TREE_INT_CST_HIGH (DECL_INITIAL (x)) != 0
+ || width > TYPE_PRECISION (TREE_TYPE (x)))
+ {
+ DECL_INITIAL (x) = NULL;
+ pedwarn_with_decl (x, "width of `%s' exceeds its type");
+ }
+ else if (width == 0 && DECL_NAME (x) != 0)
+ {
+ error_with_decl (x, "zero width for bit-field `%s'");
+ DECL_INITIAL (x) = NULL;
+ }
+ }
+
+ /* Process valid field width. */
+ if (DECL_INITIAL (x))
+ {
+ register int width = TREE_INT_CST_LOW (DECL_INITIAL (x));
+
+ DECL_FIELD_SIZE (x) = width;
+ DECL_BIT_FIELD (x) = 1;
+ DECL_INITIAL (x) = NULL;
+
+ if (width == 0)
+ {
+ /* field size 0 => force desired amount of alignment. */
+#ifdef EMPTY_FIELD_BOUNDARY
+ DECL_ALIGN (x) = MAX (DECL_ALIGN (x), EMPTY_FIELD_BOUNDARY);
+#endif
+#ifdef PCC_BITFIELD_TYPE_MATTERS
+ DECL_ALIGN (x) = MAX (DECL_ALIGN (x),
+ TYPE_ALIGN (TREE_TYPE (x)));
+#endif
+ }
+ }
+ else
+ {
+ int min_align = (DECL_PACKED (x) ? BITS_PER_UNIT
+ : TYPE_ALIGN (TREE_TYPE (x)));
+ /* Non-bit-fields are aligned for their type, except packed
+ fields which require only BITS_PER_UNIT alignment. */
+ DECL_ALIGN (x) = MAX (DECL_ALIGN (x), min_align);
+ }
+ }
+
+ /* Now DECL_INITIAL is null on all members. */
+
+ /* Delete all duplicate fields from the fieldlist */
+ for (x = fieldlist; x && TREE_CHAIN (x);)
+ /* Anonymous fields aren't duplicates. */
+ if (DECL_NAME (TREE_CHAIN (x)) == 0)
+ x = TREE_CHAIN (x);
+ else
+ {
+ register tree y = fieldlist;
+
+ while (1)
+ {
+ if (DECL_NAME (y) == DECL_NAME (TREE_CHAIN (x)))
+ break;
+ if (y == x)
+ break;
+ y = TREE_CHAIN (y);
+ }
+ if (DECL_NAME (y) == DECL_NAME (TREE_CHAIN (x)))
+ {
+ error_with_decl (TREE_CHAIN (x), "duplicate member `%s'");
+ TREE_CHAIN (x) = TREE_CHAIN (TREE_CHAIN (x));
+ }
+ else x = TREE_CHAIN (x);
+ }
+
+ /* Now we have the nearly final fieldlist. Record it,
+ then lay out the structure or union (including the fields). */
+
+ TYPE_FIELDS (t) = fieldlist;
+
+ layout_type (t);
+
+ /* Delete all zero-width bit-fields from the front of the fieldlist */
+ while (fieldlist
+ && DECL_INITIAL (fieldlist))
+ fieldlist = TREE_CHAIN (fieldlist);
+ /* Delete all such members from the rest of the fieldlist */
+ for (x = fieldlist; x;)
+ {
+ if (TREE_CHAIN (x) && DECL_INITIAL (TREE_CHAIN (x)))
+ TREE_CHAIN (x) = TREE_CHAIN (TREE_CHAIN (x));
+ else x = TREE_CHAIN (x);
+ }
+
+ /* Now we have the truly final field list.
+ Store it in this type and in the variants. */
+
+ TYPE_FIELDS (t) = fieldlist;
+
+ /* If there are lots of fields, sort so we can look through them fast.
+ We arbitrarily consider 16 or more elts to be "a lot". */
+ {
+ int len = 0;
+
+ for (x = fieldlist; x; x = TREE_CHAIN (x))
+ {
+ if (len > 15)
+ break;
+ len += 1;
+ }
+ if (len > 15)
+ {
+ tree *field_array;
+ char *space;
+
+ len += list_length (x);
+ /* Use the same allocation policy here that make_node uses, to
+ ensure that this lives as long as the rest of the struct decl.
+ All decls in an inline function need to be saved. */
+ if (allocation_temporary_p ())
+ space = savealloc (sizeof (struct lang_type) + len * sizeof (tree));
+ else
+ space = oballoc (sizeof (struct lang_type) + len * sizeof (tree));
+
+ TYPE_LANG_SPECIFIC (t) = (struct lang_type *) space;
+ TYPE_LANG_SPECIFIC (t)->len = len;
+
+ field_array = &TYPE_LANG_SPECIFIC (t)->elts[0];
+ len = 0;
+ for (x = fieldlist; x; x = TREE_CHAIN (x))
+ field_array[len++] = x;
+
+ qsort (field_array, len, sizeof (tree), field_decl_cmp);
+ }
+ }
+
+ for (x = TYPE_MAIN_VARIANT (t); x; x = TYPE_NEXT_VARIANT (x))
+ {
+ TYPE_FIELDS (x) = TYPE_FIELDS (t);
+ TYPE_LANG_SPECIFIC (x) = TYPE_LANG_SPECIFIC (t);
+ TYPE_ALIGN (x) = TYPE_ALIGN (t);
+ }
+
+ /* Promote each bit-field's type to int if it is narrower than that. */
+ for (x = fieldlist; x; x = TREE_CHAIN (x))
+ if (DECL_BIT_FIELD (x)
+ && (C_PROMOTING_INTEGER_TYPE_P (TREE_TYPE (x))
+ || DECL_FIELD_SIZE (x) < TYPE_PRECISION (integer_type_node)))
+ {
+ tree type = TREE_TYPE (x);
+
+ /* Preserve unsignedness if traditional
+ or if not really getting any wider. */
+ if (TREE_UNSIGNED (type)
+ && (flag_traditional
+ ||
+ (TYPE_PRECISION (type) == TYPE_PRECISION (integer_type_node)
+ &&
+ DECL_FIELD_SIZE (x) == TYPE_PRECISION (integer_type_node))))
+ TREE_TYPE (x) = unsigned_type_node;
+ else
+ TREE_TYPE (x) = integer_type_node;
+ }
+
+ /* If this structure or union completes the type of any previous
+ variable declaration, lay it out and output its rtl. */
+
+ if (current_binding_level->n_incomplete != 0)
+ {
+ tree decl;
+ for (decl = current_binding_level->names; decl; decl = TREE_CHAIN (decl))
+ {
+ if (TREE_TYPE (decl) == t
+ && TREE_CODE (decl) != TYPE_DECL)
+ {
+ layout_decl (decl, 0);
+ /* This is a no-op in c-lang.c or something real in objc-actions.c. */
+ maybe_objc_check_decl (decl);
+ rest_of_decl_compilation (decl, NULL_PTR, toplevel, 0);
+ if (! toplevel)
+ expand_decl (decl);
+ --current_binding_level->n_incomplete;
+ }
+ else if (TYPE_SIZE (TREE_TYPE (decl)) == 0
+ && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
+ {
+ tree element = TREE_TYPE (decl);
+ while (TREE_CODE (element) == ARRAY_TYPE)
+ element = TREE_TYPE (element);
+ if (element == t)
+ layout_array_type (TREE_TYPE (decl));
+ }
+ }
+ }
+
+ resume_momentary (old_momentary);
+
+ /* Finish debugging output for this type. */
+ rest_of_type_compilation (t, toplevel);
+
+ /* The matching push is in start_struct. */
+ pop_obstacks ();
+
+ return t;
+}
+
+/* Lay out the type T, and its element type, and so on. */
+
+static void
+layout_array_type (t)
+ tree t;
+{
+ if (TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE)
+ layout_array_type (TREE_TYPE (t));
+ layout_type (t);
+}
+
+/* Begin compiling the definition of an enumeration type.
+ NAME is its name (or null if anonymous).
+ Returns the type object, as yet incomplete.
+ Also records info about it so that build_enumerator
+ may be used to declare the individual values as they are read. */
+
+tree
+start_enum (name)
+ tree name;
+{
+ register tree enumtype = 0;
+
+ /* If this is the real definition for a previous forward reference,
+ fill in the contents in the same object that used to be the
+ forward reference. */
+
+ if (name != 0)
+ enumtype = lookup_tag (ENUMERAL_TYPE, name, current_binding_level, 1);
+
+ /* The corresponding pop_obstacks is in finish_enum. */
+ push_obstacks_nochange ();
+ /* If these symbols and types are global, make them permanent. */
+ if (current_binding_level == global_binding_level)
+ end_temporary_allocation ();
+
+ if (enumtype == 0 || TREE_CODE (enumtype) != ENUMERAL_TYPE)
+ {
+ enumtype = make_node (ENUMERAL_TYPE);
+ pushtag (name, enumtype);
+ }
+
+ C_TYPE_BEING_DEFINED (enumtype) = 1;
+
+ if (TYPE_VALUES (enumtype) != 0)
+ {
+ /* This enum is a named one that has been declared already. */
+ error ("redeclaration of `enum %s'", IDENTIFIER_POINTER (name));
+
+ /* Completely replace its old definition.
+ The old enumerators remain defined, however. */
+ TYPE_VALUES (enumtype) = 0;
+ }
+
+ enum_next_value = integer_zero_node;
+ enum_overflow = 0;
+
+ return enumtype;
+}
+
+/* Return the minimum number of bits needed to represent VALUE in a
+ signed or unsigned type, UNSIGNEDP says which. */
+
+static int
+min_precision (value, unsignedp)
+ tree value;
+ int unsignedp;
+{
+ int log;
+
+ /* If the value is negative, compute its negative minus 1. The latter
+ adjustment is because the absolute value of the largest negative value
+ is one larger than the largest positive value. This is equivalent to
+ a bit-wise negation, so use that operation instead. */
+
+ if (tree_int_cst_sgn (value) < 0)
+ value = fold (build1 (BIT_NOT_EXPR, TREE_TYPE (value), value));
+
+ /* Return the number of bits needed, taking into account the fact
+ that we need one more bit for a signed than unsigned type. */
+
+ if (integer_zerop (value))
+ log = 0;
+ else if (TREE_INT_CST_HIGH (value) != 0)
+ log = HOST_BITS_PER_WIDE_INT + floor_log2 (TREE_INT_CST_HIGH (value));
+ else
+ log = floor_log2 (TREE_INT_CST_LOW (value));
+
+ return log + 1 + ! unsignedp;
+}
+
+/* After processing and defining all the values of an enumeration type,
+ install their decls in the enumeration type and finish it off.
+ ENUMTYPE is the type object and VALUES a list of decl-value pairs.
+ Returns ENUMTYPE. */
+
+tree
+finish_enum (enumtype, values)
+ register tree enumtype, values;
+{
+ register tree pair, tem;
+ tree minnode = 0, maxnode = 0;
+ int lowprec, highprec, precision;
+ int toplevel = global_binding_level == current_binding_level;
+
+ if (in_parm_level_p ())
+ warning ("enum defined inside parms");
+
+ /* Calculate the maximum value of any enumerator in this type. */
+
+ if (values == error_mark_node)
+ minnode = maxnode = integer_zero_node;
+ else
+ for (pair = values; pair; pair = TREE_CHAIN (pair))
+ {
+ tree value = TREE_VALUE (pair);
+ if (pair == values)
+ minnode = maxnode = TREE_VALUE (pair);
+ else
+ {
+ if (tree_int_cst_lt (maxnode, value))
+ maxnode = value;
+ if (tree_int_cst_lt (value, minnode))
+ minnode = value;
+ }
+ }
+
+ TYPE_MIN_VALUE (enumtype) = minnode;
+ TYPE_MAX_VALUE (enumtype) = maxnode;
+
+ /* An enum can have some negative values; then it is signed. */
+ TREE_UNSIGNED (enumtype) = tree_int_cst_sgn (minnode) >= 0;
+
+ /* Determine the precision this type needs. */
+
+ lowprec = min_precision (minnode, TREE_UNSIGNED (enumtype));
+ highprec = min_precision (maxnode, TREE_UNSIGNED (enumtype));
+ precision = MAX (lowprec, highprec);
+
+ if (flag_short_enums || precision > TYPE_PRECISION (integer_type_node))
+ /* Use the width of the narrowest normal C type which is wide enough. */
+ TYPE_PRECISION (enumtype) = TYPE_PRECISION (type_for_size (precision, 1));
+ else
+ TYPE_PRECISION (enumtype) = TYPE_PRECISION (integer_type_node);
+
+ TYPE_SIZE (enumtype) = 0;
+ layout_type (enumtype);
+
+ if (values != error_mark_node)
+ {
+ /* Change the type of the enumerators to be the enum type.
+ Formerly this was done only for enums that fit in an int,
+ but the comment said it was done only for enums wider than int.
+ It seems necessary to do this for wide enums,
+ and best not to change what's done for ordinary narrower ones. */
+ for (pair = values; pair; pair = TREE_CHAIN (pair))
+ {
+ TREE_TYPE (TREE_PURPOSE (pair)) = enumtype;
+ DECL_SIZE (TREE_PURPOSE (pair)) = TYPE_SIZE (enumtype);
+ if (TREE_CODE (TREE_PURPOSE (pair)) != FUNCTION_DECL)
+ DECL_ALIGN (TREE_PURPOSE (pair)) = TYPE_ALIGN (enumtype);
+ }
+
+ /* Replace the decl nodes in VALUES with their names. */
+ for (pair = values; pair; pair = TREE_CHAIN (pair))
+ TREE_PURPOSE (pair) = DECL_NAME (TREE_PURPOSE (pair));
+
+ TYPE_VALUES (enumtype) = values;
+ }
+
+ /* Fix up all variant types of this enum type. */
+ for (tem = TYPE_MAIN_VARIANT (enumtype); tem; tem = TYPE_NEXT_VARIANT (tem))
+ {
+ TYPE_VALUES (tem) = TYPE_VALUES (enumtype);
+ TYPE_MIN_VALUE (tem) = TYPE_MIN_VALUE (enumtype);
+ TYPE_MAX_VALUE (tem) = TYPE_MAX_VALUE (enumtype);
+ TYPE_SIZE (tem) = TYPE_SIZE (enumtype);
+ TYPE_MODE (tem) = TYPE_MODE (enumtype);
+ TYPE_PRECISION (tem) = TYPE_PRECISION (enumtype);
+ TYPE_ALIGN (tem) = TYPE_ALIGN (enumtype);
+ TREE_UNSIGNED (tem) = TREE_UNSIGNED (enumtype);
+ }
+
+ /* Finish debugging output for this type. */
+ rest_of_type_compilation (enumtype, toplevel);
+
+ /* This matches a push in start_enum. */
+ pop_obstacks ();
+
+ return enumtype;
+}
+
+/* Build and install a CONST_DECL for one value of the
+ current enumeration type (one that was begun with start_enum).
+ Return a tree-list containing the CONST_DECL and its value.
+ Assignment of sequential values by default is handled here. */
+
+tree
+build_enumerator (name, value)
+ tree name, value;
+{
+ register tree decl, type;
+
+ /* Validate and default VALUE. */
+
+ /* Remove no-op casts from the value. */
+ if (value)
+ STRIP_TYPE_NOPS (value);
+
+ if (value != 0)
+ {
+ if (TREE_CODE (value) == INTEGER_CST)
+ {
+ value = default_conversion (value);
+ constant_expression_warning (value);
+ }
+ else
+ {
+ error ("enumerator value for `%s' not integer constant",
+ IDENTIFIER_POINTER (name));
+ value = 0;
+ }
+ }
+
+ /* Default based on previous value. */
+ /* It should no longer be possible to have NON_LVALUE_EXPR
+ in the default. */
+ if (value == 0)
+ {
+ value = enum_next_value;
+ if (enum_overflow)
+ error ("overflow in enumeration values");
+ }
+
+ if (pedantic && ! int_fits_type_p (value, integer_type_node))
+ {
+ pedwarn ("ANSI C restricts enumerator values to range of `int'");
+ value = integer_zero_node;
+ }
+
+ /* Set basis for default for next value. */
+ enum_next_value = build_binary_op (PLUS_EXPR, value, integer_one_node, 0);
+ enum_overflow = tree_int_cst_lt (enum_next_value, value);
+
+ /* Now create a declaration for the enum value name. */
+
+ type = TREE_TYPE (value);
+ type = type_for_size (MAX (TYPE_PRECISION (type),
+ TYPE_PRECISION (integer_type_node)),
+ ((flag_traditional
+ || TYPE_PRECISION (type) >= TYPE_PRECISION (integer_type_node))
+ && TREE_UNSIGNED (type)));
+
+ decl = build_decl (CONST_DECL, name, type);
+ DECL_INITIAL (decl) = value;
+ TREE_TYPE (value) = type;
+ pushdecl (decl);
+
+ return saveable_tree_cons (decl, value, NULL_TREE);
+}
+
+/* Create the FUNCTION_DECL for a function definition.
+ DECLSPECS and DECLARATOR are the parts of the declaration;
+ they describe the function's name and the type it returns,
+ but twisted together in a fashion that parallels the syntax of C.
+
+ This function creates a binding context for the function body
+ as well as setting up the FUNCTION_DECL in current_function_decl.
+
+ Returns 1 on success. If the DECLARATOR is not suitable for a function
+ (it defines a datum instead), we return 0, which tells
+ yyparse to report a parse error.
+
+ NESTED is nonzero for a function nested within another function. */
+
+int
+start_function (declspecs, declarator, nested)
+ tree declarator, declspecs;
+ int nested;
+{
+ tree decl1, old_decl;
+ tree restype;
+
+ current_function_returns_value = 0; /* Assume, until we see it does. */
+ current_function_returns_null = 0;
+ warn_about_return_type = 0;
+ current_extern_inline = 0;
+ c_function_varargs = 0;
+ named_labels = 0;
+ shadowed_labels = 0;
+
+ decl1 = grokdeclarator (declarator, declspecs, FUNCDEF, 1);
+
+ /* If the declarator is not suitable for a function definition,
+ cause a syntax error. */
+ if (decl1 == 0)
+ return 0;
+
+ announce_function (decl1);
+
+ if (TYPE_SIZE (TREE_TYPE (TREE_TYPE (decl1))) == 0)
+ {
+ error ("return-type is an incomplete type");
+ /* Make it return void instead. */
+ TREE_TYPE (decl1)
+ = build_function_type (void_type_node,
+ TYPE_ARG_TYPES (TREE_TYPE (decl1)));
+ }
+
+ if (warn_about_return_type)
+ warning ("return-type defaults to `int'");
+
+ /* Save the parm names or decls from this function's declarator
+ where store_parm_decls will find them. */
+ current_function_parms = last_function_parms;
+ current_function_parm_tags = last_function_parm_tags;
+
+ /* Make the init_value nonzero so pushdecl knows this is not tentative.
+ error_mark_node is replaced below (in poplevel) with the BLOCK. */
+ DECL_INITIAL (decl1) = error_mark_node;
+
+ /* If this definition isn't a prototype and we had a prototype declaration
+ before, copy the arg type info from that prototype.
+ But not if what we had before was a builtin function. */
+ old_decl = lookup_name_current_level (DECL_NAME (decl1));
+ if (old_decl != 0 && TREE_CODE (TREE_TYPE (old_decl)) == FUNCTION_TYPE
+ && !DECL_BUILT_IN (old_decl)
+ && (TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (decl1)))
+ == TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (old_decl))))
+ && TYPE_ARG_TYPES (TREE_TYPE (decl1)) == 0)
+ {
+ TREE_TYPE (decl1) = TREE_TYPE (old_decl);
+ current_function_prototype_file = DECL_SOURCE_FILE (old_decl);
+ current_function_prototype_line = DECL_SOURCE_LINE (old_decl);
+ }
+
+ /* Optionally warn of old-fashioned def with no previous prototype. */
+ if (warn_strict_prototypes
+ && TYPE_ARG_TYPES (TREE_TYPE (decl1)) == 0
+ && !(old_decl != 0 && TYPE_ARG_TYPES (TREE_TYPE (old_decl)) != 0))
+ warning ("function declaration isn't a prototype");
+ /* Optionally warn of any global def with no previous prototype. */
+ else if (warn_missing_prototypes
+ && TREE_PUBLIC (decl1)
+ && !(old_decl != 0 && TYPE_ARG_TYPES (TREE_TYPE (old_decl)) != 0)
+ && strcmp ("main", IDENTIFIER_POINTER (DECL_NAME (decl1))))
+ warning_with_decl (decl1, "no previous prototype for `%s'");
+ /* Optionally warn of any def with no previous prototype
+ if the function has already been used. */
+ else if (warn_missing_prototypes
+ && old_decl != 0 && TREE_USED (old_decl)
+ && !(old_decl != 0 && TYPE_ARG_TYPES (TREE_TYPE (old_decl)) != 0))
+ warning_with_decl (decl1,
+ "`%s' was used with no prototype before its definition");
+ /* Optionally warn of any global def with no previous declaration. */
+ else if (warn_missing_declarations
+ && TREE_PUBLIC (decl1)
+ && old_decl == 0
+ && strcmp ("main", IDENTIFIER_POINTER (DECL_NAME (decl1))))
+ warning_with_decl (decl1, "no previous declaration for `%s'");
+ /* Optionally warn of any def with no previous declaration
+ if the function has already been used. */
+ else if (warn_missing_declarations
+ && old_decl != 0 && TREE_USED (old_decl))
+ warning_with_decl (decl1,
+ "`%s' was used with no declaration before its definition");
+
+ /* This is a definition, not a reference.
+ So normally clear DECL_EXTERNAL.
+ However, `extern inline' acts like a declaration
+ except for defining how to inline. So set DECL_EXTERNAL in that case. */
+ DECL_EXTERNAL (decl1) = current_extern_inline;
+
+ /* This function exists in static storage.
+ (This does not mean `static' in the C sense!) */
+ TREE_STATIC (decl1) = 1;
+
+ /* A nested function is not global. */
+ if (current_function_decl != 0)
+ TREE_PUBLIC (decl1) = 0;
+
+ /* Record the decl so that the function name is defined.
+ If we already have a decl for this name, and it is a FUNCTION_DECL,
+ use the old decl. */
+
+ current_function_decl = pushdecl (decl1);
+
+ pushlevel (0);
+ declare_parm_level (1);
+ current_binding_level->subblocks_tag_transparent = 1;
+
+ make_function_rtl (current_function_decl);
+
+ restype = TREE_TYPE (TREE_TYPE (current_function_decl));
+ /* Promote the value to int before returning it. */
+ if (C_PROMOTING_INTEGER_TYPE_P (restype))
+ {
+ /* It retains unsignedness if traditional
+ or if not really getting wider. */
+ if (TREE_UNSIGNED (restype)
+ && (flag_traditional
+ || (TYPE_PRECISION (restype)
+ == TYPE_PRECISION (integer_type_node))))
+ restype = unsigned_type_node;
+ else
+ restype = integer_type_node;
+ }
+ DECL_RESULT (current_function_decl)
+ = build_decl (RESULT_DECL, NULL_TREE, restype);
+
+ if (!nested)
+ /* Allocate further tree nodes temporarily during compilation
+ of this function only. */
+ temporary_allocation ();
+
+ /* If this fcn was already referenced via a block-scope `extern' decl
+ (or an implicit decl), propagate certain information about the usage. */
+ if (TREE_ADDRESSABLE (DECL_ASSEMBLER_NAME (current_function_decl)))
+ TREE_ADDRESSABLE (current_function_decl) = 1;
+
+ return 1;
+}
+
+/* Record that this function is going to be a varargs function.
+ This is called before store_parm_decls, which is too early
+ to call mark_varargs directly. */
+
+void
+c_mark_varargs ()
+{
+ c_function_varargs = 1;
+}
+
+/* Store the parameter declarations into the current function declaration.
+ This is called after parsing the parameter declarations, before
+ digesting the body of the function.
+
+ For an old-style definition, modify the function's type
+ to specify at least the number of arguments. */
+
+void
+store_parm_decls ()
+{
+ register tree fndecl = current_function_decl;
+ register tree parm;
+
+ /* This is either a chain of PARM_DECLs (if a prototype was used)
+ or a list of IDENTIFIER_NODEs (for an old-fashioned C definition). */
+ tree specparms = current_function_parms;
+
+ /* This is a list of types declared among parms in a prototype. */
+ tree parmtags = current_function_parm_tags;
+
+ /* This is a chain of PARM_DECLs from old-style parm declarations. */
+ register tree parmdecls = getdecls ();
+
+ /* This is a chain of any other decls that came in among the parm
+ declarations. If a parm is declared with enum {foo, bar} x;
+ then CONST_DECLs for foo and bar are put here. */
+ tree nonparms = 0;
+
+ /* Nonzero if this definition is written with a prototype. */
+ int prototype = 0;
+
+ if (specparms != 0 && TREE_CODE (specparms) != TREE_LIST)
+ {
+ /* This case is when the function was defined with an ANSI prototype.
+ The parms already have decls, so we need not do anything here
+ except record them as in effect
+ and complain if any redundant old-style parm decls were written. */
+
+ register tree next;
+ tree others = 0;
+
+ prototype = 1;
+
+ if (parmdecls != 0)
+ {
+ tree decl, link;
+
+ error_with_decl (fndecl,
+ "parm types given both in parmlist and separately");
+ /* Get rid of the erroneous decls; don't keep them on
+ the list of parms, since they might not be PARM_DECLs. */
+ for (decl = current_binding_level->names;
+ decl; decl = TREE_CHAIN (decl))
+ if (DECL_NAME (decl))
+ IDENTIFIER_LOCAL_VALUE (DECL_NAME (decl)) = 0;
+ for (link = current_binding_level->shadowed;
+ link; link = TREE_CHAIN (link))
+ IDENTIFIER_LOCAL_VALUE (TREE_PURPOSE (link)) = TREE_VALUE (link);
+ current_binding_level->names = 0;
+ current_binding_level->shadowed = 0;
+ }
+
+ specparms = nreverse (specparms);
+ for (parm = specparms; parm; parm = next)
+ {
+ next = TREE_CHAIN (parm);
+ if (TREE_CODE (parm) == PARM_DECL)
+ {
+ if (DECL_NAME (parm) == 0)
+ error_with_decl (parm, "parameter name omitted");
+ else if (TYPE_MAIN_VARIANT (TREE_TYPE (parm)) == void_type_node)
+ {
+ error_with_decl (parm, "parameter `%s' declared void");
+ /* Change the type to error_mark_node so this parameter
+ will be ignored by assign_parms. */
+ TREE_TYPE (parm) = error_mark_node;
+ }
+ pushdecl (parm);
+ }
+ else
+ {
+ /* If we find an enum constant or a type tag,
+ put it aside for the moment. */
+ TREE_CHAIN (parm) = 0;
+ others = chainon (others, parm);
+ }
+ }
+
+ /* Get the decls in their original chain order
+ and record in the function. */
+ DECL_ARGUMENTS (fndecl) = getdecls ();
+
+#if 0
+ /* If this function takes a variable number of arguments,
+ add a phony parameter to the end of the parm list,
+ to represent the position of the first unnamed argument. */
+ if (TREE_VALUE (tree_last (TYPE_ARG_TYPES (TREE_TYPE (fndecl))))
+ != void_type_node)
+ {
+ tree dummy = build_decl (PARM_DECL, NULL_TREE, void_type_node);
+ /* Let's hope the address of the unnamed parm
+ won't depend on its type. */
+ TREE_TYPE (dummy) = integer_type_node;
+ DECL_ARG_TYPE (dummy) = integer_type_node;
+ DECL_ARGUMENTS (fndecl)
+ = chainon (DECL_ARGUMENTS (fndecl), dummy);
+ }
+#endif
+
+ /* Now pushdecl the enum constants. */
+ for (parm = others; parm; parm = next)
+ {
+ next = TREE_CHAIN (parm);
+ if (DECL_NAME (parm) == 0)
+ ;
+ else if (TYPE_MAIN_VARIANT (TREE_TYPE (parm)) == void_type_node)
+ ;
+ else if (TREE_CODE (parm) != PARM_DECL)
+ pushdecl (parm);
+ }
+
+ storetags (chainon (parmtags, gettags ()));
+ }
+ else
+ {
+ /* SPECPARMS is an identifier list--a chain of TREE_LIST nodes
+ each with a parm name as the TREE_VALUE.
+
+ PARMDECLS is a chain of declarations for parameters.
+ Warning! It can also contain CONST_DECLs which are not parameters
+ but are names of enumerators of any enum types
+ declared among the parameters.
+
+ First match each formal parameter name with its declaration.
+ Associate decls with the names and store the decls
+ into the TREE_PURPOSE slots. */
+
+ for (parm = parmdecls; parm; parm = TREE_CHAIN (parm))
+ DECL_RESULT (parm) = 0;
+
+ for (parm = specparms; parm; parm = TREE_CHAIN (parm))
+ {
+ register tree tail, found = NULL;
+
+ if (TREE_VALUE (parm) == 0)
+ {
+ error_with_decl (fndecl, "parameter name missing from parameter list");
+ TREE_PURPOSE (parm) = 0;
+ continue;
+ }
+
+ /* See if any of the parmdecls specifies this parm by name.
+ Ignore any enumerator decls. */
+ for (tail = parmdecls; tail; tail = TREE_CHAIN (tail))
+ if (DECL_NAME (tail) == TREE_VALUE (parm)
+ && TREE_CODE (tail) == PARM_DECL)
+ {
+ found = tail;
+ break;
+ }
+
+ /* If declaration already marked, we have a duplicate name.
+ Complain, and don't use this decl twice. */
+ if (found && DECL_RESULT (found) != 0)
+ {
+ error_with_decl (found, "multiple parameters named `%s'");
+ found = 0;
+ }
+
+ /* If the declaration says "void", complain and ignore it. */
+ if (found && TYPE_MAIN_VARIANT (TREE_TYPE (found)) == void_type_node)
+ {
+ error_with_decl (found, "parameter `%s' declared void");
+ TREE_TYPE (found) = integer_type_node;
+ DECL_ARG_TYPE (found) = integer_type_node;
+ layout_decl (found, 0);
+ }
+
+ /* Traditionally, a parm declared float is actually a double. */
+ if (found && flag_traditional
+ && TYPE_MAIN_VARIANT (TREE_TYPE (found)) == float_type_node)
+ {
+ TREE_TYPE (found) = double_type_node;
+ DECL_ARG_TYPE (found) = double_type_node;
+ layout_decl (found, 0);
+ }
+
+ /* If no declaration found, default to int. */
+ if (!found)
+ {
+ found = build_decl (PARM_DECL, TREE_VALUE (parm),
+ integer_type_node);
+ DECL_ARG_TYPE (found) = TREE_TYPE (found);
+ DECL_SOURCE_LINE (found) = DECL_SOURCE_LINE (fndecl);
+ DECL_SOURCE_FILE (found) = DECL_SOURCE_FILE (fndecl);
+ if (extra_warnings)
+ warning_with_decl (found, "type of `%s' defaults to `int'");
+ pushdecl (found);
+ }
+
+ TREE_PURPOSE (parm) = found;
+
+ /* Mark this decl as "already found" -- see test, above.
+ It is safe to use DECL_RESULT for this
+ since it is not used in PARM_DECLs or CONST_DECLs. */
+ DECL_RESULT (found) = error_mark_node;
+ }
+
+ /* Put anything which is on the parmdecls chain and which is
+ not a PARM_DECL onto the list NONPARMS. (The types of
+ non-parm things which might appear on the list include
+ enumerators and NULL-named TYPE_DECL nodes.) Complain about
+ any actual PARM_DECLs not matched with any names. */
+
+ nonparms = 0;
+ for (parm = parmdecls; parm; )
+ {
+ tree next = TREE_CHAIN (parm);
+ TREE_CHAIN (parm) = 0;
+
+ if (TREE_CODE (parm) != PARM_DECL)
+ nonparms = chainon (nonparms, parm);
+ else
+ {
+ /* Complain about args with incomplete types. */
+ if (TYPE_SIZE (TREE_TYPE (parm)) == 0)
+ {
+ error_with_decl (parm, "parameter `%s' has incomplete type");
+ TREE_TYPE (parm) = error_mark_node;
+ }
+
+ if (DECL_RESULT (parm) == 0)
+ {
+ error_with_decl (parm,
+ "declaration for parameter `%s' but no such parameter");
+ /* Pretend the parameter was not missing.
+ This gets us to a standard state and minimizes
+ further error messages. */
+ specparms
+ = chainon (specparms,
+ tree_cons (parm, NULL_TREE, NULL_TREE));
+ }
+ }
+
+ parm = next;
+ }
+
+ /* Chain the declarations together in the order of the list of names. */
+ /* Store that chain in the function decl, replacing the list of names. */
+ parm = specparms;
+ DECL_ARGUMENTS (fndecl) = 0;
+ {
+ register tree last;
+ for (last = 0; parm; parm = TREE_CHAIN (parm))
+ if (TREE_PURPOSE (parm))
+ {
+ if (last == 0)
+ DECL_ARGUMENTS (fndecl) = TREE_PURPOSE (parm);
+ else
+ TREE_CHAIN (last) = TREE_PURPOSE (parm);
+ last = TREE_PURPOSE (parm);
+ TREE_CHAIN (last) = 0;
+ }
+ }
+
+ /* If there was a previous prototype,
+ set the DECL_ARG_TYPE of each argument according to
+ the type previously specified, and report any mismatches. */
+
+ if (TYPE_ARG_TYPES (TREE_TYPE (fndecl)))
+ {
+ register tree type;
+ for (parm = DECL_ARGUMENTS (fndecl),
+ type = TYPE_ARG_TYPES (TREE_TYPE (fndecl));
+ parm || (type && (TYPE_MAIN_VARIANT (TREE_VALUE (type))
+ != void_type_node));
+ parm = TREE_CHAIN (parm), type = TREE_CHAIN (type))
+ {
+ if (parm == 0 || type == 0
+ || TYPE_MAIN_VARIANT (TREE_VALUE (type)) == void_type_node)
+ {
+ error ("number of arguments doesn't match prototype");
+ error_with_file_and_line (current_function_prototype_file,
+ current_function_prototype_line,
+ "prototype declaration");
+ break;
+ }
+ /* Type for passing arg must be consistent
+ with that declared for the arg. */
+ if (! comptypes (DECL_ARG_TYPE (parm), TREE_VALUE (type)))
+ {
+ if (TYPE_MAIN_VARIANT (TREE_TYPE (parm))
+ == TYPE_MAIN_VARIANT (TREE_VALUE (type)))
+ {
+ /* Adjust argument to match prototype. E.g. a previous
+ `int foo(float);' prototype causes
+ `int foo(x) float x; {...}' to be treated like
+ `int foo(float x) {...}'. This is particularly
+ useful for argument types like uid_t. */
+ DECL_ARG_TYPE (parm) = TREE_TYPE (parm);
+#ifdef PROMOTE_PROTOTYPES
+ if ((TREE_CODE (TREE_TYPE (parm)) == INTEGER_TYPE
+ || TREE_CODE (TREE_TYPE (parm)) == ENUMERAL_TYPE)
+ && TYPE_PRECISION (TREE_TYPE (parm))
+ < TYPE_PRECISION (integer_type_node))
+ DECL_ARG_TYPE (parm) = integer_type_node;
+#endif
+ if (pedantic)
+ {
+ pedwarn ("promoted argument `%s' doesn't match prototype",
+ IDENTIFIER_POINTER (DECL_NAME (parm)));
+ warning_with_file_and_line
+ (current_function_prototype_file,
+ current_function_prototype_line,
+ "prototype declaration");
+ }
+ }
+ /* If -traditional, allow `int' argument to match
+ `unsigned' prototype. */
+ else if (! (flag_traditional
+ && TYPE_MAIN_VARIANT (TREE_TYPE (parm)) == integer_type_node
+ && TYPE_MAIN_VARIANT (TREE_VALUE (type)) == unsigned_type_node))
+ {
+ error ("argument `%s' doesn't match prototype",
+ IDENTIFIER_POINTER (DECL_NAME (parm)));
+ error_with_file_and_line (current_function_prototype_file,
+ current_function_prototype_line,
+ "prototype declaration");
+ }
+ }
+ }
+ TYPE_ACTUAL_ARG_TYPES (TREE_TYPE (fndecl)) = 0;
+ }
+
+ /* Otherwise, create a prototype that would match. */
+
+ else
+ {
+ tree actual = 0, last = 0, type;
+
+ for (parm = DECL_ARGUMENTS (fndecl); parm; parm = TREE_CHAIN (parm))
+ {
+ type = perm_tree_cons (NULL_TREE, DECL_ARG_TYPE (parm),
+ NULL_TREE);
+ if (last)
+ TREE_CHAIN (last) = type;
+ else
+ actual = type;
+ last = type;
+ }
+ type = perm_tree_cons (NULL_TREE, void_type_node, NULL_TREE);
+ if (last)
+ TREE_CHAIN (last) = type;
+ else
+ actual = type;
+
+ /* We are going to assign a new value for the TYPE_ACTUAL_ARG_TYPES
+ of the type of this function, but we need to avoid having this
+ affect the types of other similarly-typed functions, so we must
+ first force the generation of an identical (but separate) type
+ node for the relevant function type. The new node we create
+ will be a variant of the main variant of the original function
+ type. */
+
+ TREE_TYPE (fndecl) = build_type_copy (TREE_TYPE (fndecl));
+
+ TYPE_ACTUAL_ARG_TYPES (TREE_TYPE (fndecl)) = actual;
+ }
+
+ /* Now store the final chain of decls for the arguments
+ as the decl-chain of the current lexical scope.
+ Put the enumerators in as well, at the front so that
+ DECL_ARGUMENTS is not modified. */
+
+ storedecls (chainon (nonparms, DECL_ARGUMENTS (fndecl)));
+ }
+
+ /* Make sure the binding level for the top of the function body
+ gets a BLOCK if there are any in the function.
+ Otherwise, the dbx output is wrong. */
+
+ keep_next_if_subblocks = 1;
+
+ /* ??? This might be an improvement,
+ but needs to be thought about some more. */
+#if 0
+ keep_next_level_flag = 1;
+#endif
+
+ /* Write a record describing this function definition to the prototypes
+ file (if requested). */
+
+ gen_aux_info_record (fndecl, 1, 0, prototype);
+
+ /* Initialize the RTL code for the function. */
+
+ init_function_start (fndecl, input_filename, lineno);
+
+ /* If this is a varargs function, inform function.c. */
+
+ if (c_function_varargs)
+ mark_varargs ();
+
+ /* Declare __FUNCTION__ and __PRETTY_FUNCTION__ for this function. */
+
+ declare_function_name ();
+
+ /* Set up parameters and prepare for return, for the function. */
+
+ expand_function_start (fndecl, 0);
+
+ /* If this function is `main', emit a call to `__main'
+ to run global initializers, etc. */
+ if (DECL_NAME (fndecl)
+ && strcmp (IDENTIFIER_POINTER (DECL_NAME (fndecl)), "main") == 0
+ && DECL_CONTEXT (fndecl) == NULL_TREE)
+ expand_main_function ();
+}
+
+/* SPECPARMS is an identifier list--a chain of TREE_LIST nodes
+ each with a parm name as the TREE_VALUE. A null pointer as TREE_VALUE
+ stands for an ellipsis in the identifier list.
+
+ PARMLIST is the data returned by get_parm_info for the
+ parmlist that follows the semicolon.
+
+ We return a value of the same sort that get_parm_info returns,
+ except that it describes the combination of identifiers and parmlist. */
+
+tree
+combine_parm_decls (specparms, parmlist, void_at_end)
+ tree specparms, parmlist;
+ int void_at_end;
+{
+ register tree fndecl = current_function_decl;
+ register tree parm;
+
+ tree parmdecls = TREE_PURPOSE (parmlist);
+
+ /* This is a chain of any other decls that came in among the parm
+ declarations. They were separated already by get_parm_info,
+ so we just need to keep them separate. */
+ tree nonparms = TREE_VALUE (parmlist);
+
+ tree types = 0;
+
+ for (parm = parmdecls; parm; parm = TREE_CHAIN (parm))
+ DECL_RESULT (parm) = 0;
+
+ for (parm = specparms; parm; parm = TREE_CHAIN (parm))
+ {
+ register tree tail, found = NULL;
+
+ /* See if any of the parmdecls specifies this parm by name. */
+ for (tail = parmdecls; tail; tail = TREE_CHAIN (tail))
+ if (DECL_NAME (tail) == TREE_VALUE (parm))
+ {
+ found = tail;
+ break;
+ }
+
+ /* If declaration already marked, we have a duplicate name.
+ Complain, and don't use this decl twice. */
+ if (found && DECL_RESULT (found) != 0)
+ {
+ error_with_decl (found, "multiple parameters named `%s'");
+ found = 0;
+ }
+
+ /* If the declaration says "void", complain and ignore it. */
+ if (found && TYPE_MAIN_VARIANT (TREE_TYPE (found)) == void_type_node)
+ {
+ error_with_decl (found, "parameter `%s' declared void");
+ TREE_TYPE (found) = integer_type_node;
+ DECL_ARG_TYPE (found) = integer_type_node;
+ layout_decl (found, 0);
+ }
+
+ /* Traditionally, a parm declared float is actually a double. */
+ if (found && flag_traditional
+ && TYPE_MAIN_VARIANT (TREE_TYPE (found)) == float_type_node)
+ {
+ TREE_TYPE (found) = double_type_node;
+ DECL_ARG_TYPE (found) = double_type_node;
+ layout_decl (found, 0);
+ }
+
+ /* If no declaration found, default to int. */
+ if (!found)
+ {
+ found = build_decl (PARM_DECL, TREE_VALUE (parm),
+ integer_type_node);
+ DECL_ARG_TYPE (found) = TREE_TYPE (found);
+ DECL_SOURCE_LINE (found) = DECL_SOURCE_LINE (fndecl);
+ DECL_SOURCE_FILE (found) = DECL_SOURCE_FILE (fndecl);
+ error_with_decl (found, "type of parameter `%s' is not declared");
+ pushdecl (found);
+ }
+
+ TREE_PURPOSE (parm) = found;
+
+ /* Mark this decl as "already found" -- see test, above.
+ It is safe to use DECL_RESULT for this
+ since it is not used in PARM_DECLs or CONST_DECLs. */
+ DECL_RESULT (found) = error_mark_node;
+ }
+
+ /* Complain about any actual PARM_DECLs not matched with any names. */
+
+ for (parm = parmdecls; parm; )
+ {
+ tree next = TREE_CHAIN (parm);
+ TREE_CHAIN (parm) = 0;
+
+ /* Complain about args with incomplete types. */
+ if (TYPE_SIZE (TREE_TYPE (parm)) == 0)
+ {
+ error_with_decl (parm, "parameter `%s' has incomplete type");
+ TREE_TYPE (parm) = error_mark_node;
+ }
+
+ if (DECL_RESULT (parm) == 0)
+ {
+ error_with_decl (parm,
+ "declaration for parameter `%s' but no such parameter");
+ /* Pretend the parameter was not missing.
+ This gets us to a standard state and minimizes
+ further error messages. */
+ specparms
+ = chainon (specparms,
+ tree_cons (parm, NULL_TREE, NULL_TREE));
+ }
+
+ parm = next;
+ }
+
+ /* Chain the declarations together in the order of the list of names.
+ At the same time, build up a list of their types, in reverse order. */
+
+ parm = specparms;
+ parmdecls = 0;
+ {
+ register tree last;
+ for (last = 0; parm; parm = TREE_CHAIN (parm))
+ if (TREE_PURPOSE (parm))
+ {
+ if (last == 0)
+ parmdecls = TREE_PURPOSE (parm);
+ else
+ TREE_CHAIN (last) = TREE_PURPOSE (parm);
+ last = TREE_PURPOSE (parm);
+ TREE_CHAIN (last) = 0;
+
+ types = saveable_tree_cons (NULL_TREE, TREE_TYPE (parm), types);
+ }
+ }
+
+ if (void_at_end)
+ return saveable_tree_cons (parmdecls, nonparms,
+ nreverse (saveable_tree_cons (NULL_TREE, void_type_node, types)));
+
+ return saveable_tree_cons (parmdecls, nonparms, nreverse (types));
+}
+
+/* Finish up a function declaration and compile that function
+ all the way to assembler language output. The free the storage
+ for the function definition.
+
+ This is called after parsing the body of the function definition.
+
+ NESTED is nonzero if the function being finished is nested in another. */
+
+void
+finish_function (nested)
+ int nested;
+{
+ register tree fndecl = current_function_decl;
+
+/* TREE_READONLY (fndecl) = 1;
+ This caused &foo to be of type ptr-to-const-function
+ which then got a warning when stored in a ptr-to-function variable. */
+
+ poplevel (1, 0, 1);
+ BLOCK_SUPERCONTEXT (DECL_INITIAL (fndecl)) = fndecl;
+
+ /* Must mark the RESULT_DECL as being in this function. */
+
+ DECL_CONTEXT (DECL_RESULT (fndecl)) = fndecl;
+
+ /* Obey `register' declarations if `setjmp' is called in this fn. */
+ if (flag_traditional && current_function_calls_setjmp)
+ {
+ setjmp_protect (DECL_INITIAL (fndecl));
+ setjmp_protect_args ();
+ }
+
+#ifdef DEFAULT_MAIN_RETURN
+ if (! strcmp (IDENTIFIER_POINTER (DECL_NAME (fndecl)), "main"))
+ {
+ if (TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (fndecl)))
+ != integer_type_node)
+ warning_with_decl (fndecl, "return type of `%s' is not `int'");
+ else
+ {
+ /* Make it so that `main' always returns success by default. */
+ DEFAULT_MAIN_RETURN;
+ }
+ }
+#endif
+
+ /* Generate rtl for function exit. */
+ expand_function_end (input_filename, lineno, 0);
+
+ /* So we can tell if jump_optimize sets it to 1. */
+ can_reach_end = 0;
+
+ /* Run the optimizers and output the assembler code for this function. */
+ rest_of_compilation (fndecl);
+
+ current_function_returns_null |= can_reach_end;
+
+ if (TREE_THIS_VOLATILE (fndecl) && current_function_returns_null)
+ warning ("`noreturn' function does return");
+ else if (warn_return_type && can_reach_end
+ && TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (fndecl))) != void_type_node)
+ /* If this function returns non-void and control can drop through,
+ complain. */
+ warning ("control reaches end of non-void function");
+ /* With just -W, complain only if function returns both with
+ and without a value. */
+ else if (extra_warnings
+ && current_function_returns_value && current_function_returns_null)
+ warning ("this function may return with or without a value");
+
+ /* If requested, warn about function definitions where the function will
+ return a value (usually of some struct or union type) which itself will
+ take up a lot of stack space. */
+
+ if (warn_larger_than && !DECL_EXTERNAL (fndecl) && TREE_TYPE (fndecl))
+ {
+ register tree ret_type = TREE_TYPE (TREE_TYPE (fndecl));
+
+ if (ret_type)
+ {
+ register tree ret_type_size = TYPE_SIZE (ret_type);
+
+ if (TREE_CODE (ret_type_size) == INTEGER_CST)
+ {
+ unsigned units
+ = TREE_INT_CST_LOW (ret_type_size) / BITS_PER_UNIT;
+
+ if (units > larger_than_size)
+ warning_with_decl (fndecl,
+ "size of return value of `%s' is %u bytes",
+ units);
+ }
+ }
+ }
+
+ /* Free all the tree nodes making up this function. */
+ /* Switch back to allocating nodes permanently
+ until we start another function. */
+ if (! nested)
+ permanent_allocation (1);
+
+ if (DECL_SAVED_INSNS (fndecl) == 0 && ! nested)
+ {
+ /* Stop pointing to the local nodes about to be freed. */
+ /* But DECL_INITIAL must remain nonzero so we know this
+ was an actual function definition. */
+ /* For a nested function, this is done in pop_c_function_context. */
+ /* If rest_of_compilation set this to 0, leave it 0. */
+ if (DECL_INITIAL (fndecl) != 0)
+ DECL_INITIAL (fndecl) = error_mark_node;
+ DECL_ARGUMENTS (fndecl) = 0;
+ }
+
+ if (! nested)
+ {
+ /* Let the error reporting routines know that we're outside a
+ function. For a nested function, this value is used in
+ pop_c_function_context and then reset via pop_function_context. */
+ current_function_decl = NULL;
+ }
+}
+
+/* Save and restore the variables in this file and elsewhere
+ that keep track of the progress of compilation of the current function.
+ Used for nested functions. */
+
+struct c_function
+{
+ struct c_function *next;
+ tree named_labels;
+ tree shadowed_labels;
+ int returns_value;
+ int returns_null;
+ int warn_about_return_type;
+ int extern_inline;
+ struct binding_level *binding_level;
+};
+
+struct c_function *c_function_chain;
+
+/* Save and reinitialize the variables
+ used during compilation of a C function. */
+
+void
+push_c_function_context ()
+{
+ struct c_function *p
+ = (struct c_function *) xmalloc (sizeof (struct c_function));
+
+ if (pedantic)
+ pedwarn ("ANSI C forbids nested functions");
+
+ push_function_context ();
+
+ p->next = c_function_chain;
+ c_function_chain = p;
+
+ p->named_labels = named_labels;
+ p->shadowed_labels = shadowed_labels;
+ p->returns_value = current_function_returns_value;
+ p->returns_null = current_function_returns_null;
+ p->warn_about_return_type = warn_about_return_type;
+ p->extern_inline = current_extern_inline;
+ p->binding_level = current_binding_level;
+}
+
+/* Restore the variables used during compilation of a C function. */
+
+void
+pop_c_function_context ()
+{
+ struct c_function *p = c_function_chain;
+ tree link;
+
+ /* Bring back all the labels that were shadowed. */
+ for (link = shadowed_labels; link; link = TREE_CHAIN (link))
+ if (DECL_NAME (TREE_VALUE (link)) != 0)
+ IDENTIFIER_LABEL_VALUE (DECL_NAME (TREE_VALUE (link)))
+ = TREE_VALUE (link);
+
+ if (DECL_SAVED_INSNS (current_function_decl) == 0)
+ {
+ /* Stop pointing to the local nodes about to be freed. */
+ /* But DECL_INITIAL must remain nonzero so we know this
+ was an actual function definition. */
+ DECL_INITIAL (current_function_decl) = error_mark_node;
+ DECL_ARGUMENTS (current_function_decl) = 0;
+ }
+
+ pop_function_context ();
+
+ c_function_chain = p->next;
+
+ named_labels = p->named_labels;
+ shadowed_labels = p->shadowed_labels;
+ current_function_returns_value = p->returns_value;
+ current_function_returns_null = p->returns_null;
+ warn_about_return_type = p->warn_about_return_type;
+ current_extern_inline = p->extern_inline;
+ current_binding_level = p->binding_level;
+
+ free (p);
+}
+
+/* integrate_decl_tree calls this function, but since we don't use the
+ DECL_LANG_SPECIFIC field, this is a no-op. */
+
+void
+copy_lang_decl (node)
+ tree node;
+{
+}
diff --git a/gnu/usr.bin/cc/cc1/c-iterate.c b/gnu/usr.bin/cc/cc1/c-iterate.c
new file mode 100644
index 0000000..99f9a79
--- /dev/null
+++ b/gnu/usr.bin/cc/cc1/c-iterate.c
@@ -0,0 +1,595 @@
+/* Build expressions with type checking for C compiler.
+ Copyright (C) 1987, 1988, 1989, 1992, 1993 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+/* This file is part of the C front end.
+ It is responsible for implementing iterators,
+ both their declarations and the expansion of statements using them. */
+
+#include "config.h"
+#include <stdio.h>
+#include "tree.h"
+#include "c-tree.h"
+#include "flags.h"
+#include "obstack.h"
+#include "rtl.h"
+
+static void expand_stmt_with_iterators_1 ();
+static tree collect_iterators ();
+static void iterator_loop_prologue ();
+static void iterator_loop_epilogue ();
+static void add_ixpansion ();
+static void delete_ixpansion();
+static int top_level_ixpansion_p ();
+static void istack_sublevel_to_current ();
+
+/* A special obstack, and a pointer to the start of
+ all the data in it (so we can free everything easily). */
+static struct obstack ixp_obstack;
+static char *ixp_firstobj;
+
+/*
+ KEEPING TRACK OF EXPANSIONS
+
+ In order to clean out expansions corresponding to statements inside
+ "{(...)}" constructs we have to keep track of all expansions. The
+ cleanup is needed when an automatic, or implicit, expansion on
+ iterator, say X, happens to a statement which contains a {(...)}
+ form with a statement already expanded on X. In this case we have
+ to go back and cleanup the inner expansion. This can be further
+ complicated by the fact that {(...)} can be nested.
+
+ To make this cleanup possible, we keep lists of all expansions, and
+ to make it work for nested constructs, we keep a stack. The list at
+ the top of the stack (ITER_STACK.CURRENT_LEVEL) corresponds to the
+ currently parsed level. All expansions of the levels below the
+ current one are kept in one list whose head is pointed to by
+ ITER_STACK.SUBLEVEL_FIRST (SUBLEVEL_LAST is there for making merges
+ easy). The process works as follows:
+
+ -- On "({" a new node is added to the stack by PUSH_ITERATOR_STACK.
+ The sublevel list is not changed at this point.
+
+ -- On "})" the list for the current level is appended to the sublevel
+ list.
+
+ -- On ";" sublevel lists are appended to the current level lists.
+ The reason is this: if they have not been superseded by the
+ expansion at the current level, they still might be
+ superseded later by the expansion on the higher level.
+ The levels do not have to distinguish levels below, so we
+ can merge the lists together. */
+
+struct ixpansion
+{
+ tree ixdecl; /* Iterator decl */
+ rtx ixprologue_start; /* First insn of epilogue. NULL means */
+ /* explicit (FOR) expansion*/
+ rtx ixprologue_end;
+ rtx ixepilogue_start;
+ rtx ixepilogue_end;
+ struct ixpansion *next; /* Next in the list */
+};
+
+struct iter_stack_node
+{
+ struct ixpansion *first; /* Head of list of ixpansions */
+ struct ixpansion *last; /* Last node in list of ixpansions */
+ struct iter_stack_node *next; /* Next level iterator stack node */
+};
+
+struct iter_stack_node *iter_stack;
+
+struct iter_stack_node sublevel_ixpansions;
+
+/* During collect_iterators, a list of SAVE_EXPRs already scanned. */
+static tree save_exprs;
+
+/* Initialize our obstack once per compilation. */
+
+void
+init_iterators ()
+{
+ gcc_obstack_init (&ixp_obstack);
+ ixp_firstobj = (char *) obstack_alloc (&ixp_obstack, 0);
+}
+
+/* Handle the start of an explicit `for' loop for iterator IDECL. */
+
+void
+iterator_for_loop_start (idecl)
+ tree idecl;
+{
+ ITERATOR_BOUND_P (idecl) = 1;
+ add_ixpansion (idecl, 0, 0, 0, 0);
+ iterator_loop_prologue (idecl, 0, 0);
+}
+
+/* Handle the end of an explicit `for' loop for iterator IDECL. */
+
+void
+iterator_for_loop_end (idecl)
+ tree idecl;
+{
+ iterator_loop_epilogue (idecl, 0, 0);
+ ITERATOR_BOUND_P (idecl) = 0;
+}
+
+/*
+ ITERATOR RTL EXPANSIONS
+
+ Expanding simple statements with iterators is straightforward:
+ collect the list of all free iterators in the statement, and
+ generate a loop for each of them.
+
+ An iterator is "free" if it has not been "bound" by a FOR
+ operator. The DECL_RTL of the iterator is the loop counter. */
+
+/* Expand a statement STMT, possibly containing iterator usage, into RTL. */
+
+void
+iterator_expand (stmt)
+ tree stmt;
+{
+ tree iter_list;
+ save_exprs = NULL_TREE;
+ iter_list = collect_iterators (stmt, NULL_TREE);
+ expand_stmt_with_iterators_1 (stmt, iter_list);
+ istack_sublevel_to_current ();
+}
+
+
+static void
+expand_stmt_with_iterators_1 (stmt, iter_list)
+ tree stmt, iter_list;
+{
+ if (iter_list == 0)
+ expand_expr_stmt (stmt);
+ else
+ {
+ tree current_iterator = TREE_VALUE (iter_list);
+ tree iter_list_tail = TREE_CHAIN (iter_list);
+ rtx p_start, p_end, e_start, e_end;
+
+ iterator_loop_prologue (current_iterator, &p_start, &p_end);
+ expand_stmt_with_iterators_1 (stmt, iter_list_tail);
+ iterator_loop_epilogue (current_iterator, &e_start, &e_end);
+
+ /** Delete all inner expansions based on current_iterator **/
+ /** before adding the outer one. **/
+
+ delete_ixpansion (current_iterator);
+ add_ixpansion (current_iterator, p_start, p_end, e_start, e_end);
+ }
+}
+
+
+/* Return a list containing all the free (i.e. not bound by a
+ containing `for' statement) iterators mentioned in EXP, plus those
+ in LIST. Do not add duplicate entries to the list. */
+
+static tree
+collect_iterators (exp, list)
+ tree exp, list;
+{
+ if (exp == 0) return list;
+
+ switch (TREE_CODE (exp))
+ {
+ case VAR_DECL:
+ if (! ITERATOR_P (exp) || ITERATOR_BOUND_P (exp))
+ return list;
+ if (value_member (exp, list))
+ return list;
+ return tree_cons (NULL_TREE, exp, list);
+
+ case TREE_LIST:
+ {
+ tree tail;
+ for (tail = exp; tail; tail = TREE_CHAIN (tail))
+ list = collect_iterators (TREE_VALUE (tail), list);
+ return list;
+ }
+
+ case SAVE_EXPR:
+ /* In each scan, scan a given save_expr only once. */
+ if (value_member (exp, save_exprs))
+ return list;
+
+ save_exprs = tree_cons (NULL_TREE, exp, save_exprs);
+ return collect_iterators (TREE_OPERAND (exp, 0), list);
+
+ /* we do not automatically iterate blocks -- one must */
+ /* use the FOR construct to do that */
+
+ case BLOCK:
+ return list;
+
+ default:
+ switch (TREE_CODE_CLASS (TREE_CODE (exp)))
+ {
+ case '1':
+ return collect_iterators (TREE_OPERAND (exp, 0), list);
+
+ case '2':
+ case '<':
+ return collect_iterators (TREE_OPERAND (exp, 0),
+ collect_iterators (TREE_OPERAND (exp, 1),
+ list));
+
+ case 'e':
+ case 'r':
+ {
+ int num_args = tree_code_length[(int) TREE_CODE (exp)];
+ int i;
+
+ /* Some tree codes have RTL, not trees, as operands. */
+ switch (TREE_CODE (exp))
+ {
+ case CALL_EXPR:
+ num_args = 2;
+ break;
+ case METHOD_CALL_EXPR:
+ num_args = 3;
+ break;
+ case WITH_CLEANUP_EXPR:
+ num_args = 1;
+ break;
+ case RTL_EXPR:
+ return list;
+ }
+
+ for (i = 0; i < num_args; i++)
+ list = collect_iterators (TREE_OPERAND (exp, i), list);
+ return list;
+ }
+ default:
+ return list;
+ }
+ }
+}
+
+/* Emit rtl for the start of a loop for iterator IDECL.
+
+ If necessary, create loop counter rtx and store it as DECL_RTL of IDECL.
+
+ The prologue normally starts and ends with notes, which are returned
+ by this function in *START_NOTE and *END_NODE.
+ If START_NOTE and END_NODE are 0, we don't make those notes. */
+
+static void
+iterator_loop_prologue (idecl, start_note, end_note)
+ tree idecl;
+ rtx *start_note, *end_note;
+{
+ tree expr;
+
+ /* Force the save_expr in DECL_INITIAL to be calculated
+ if it hasn't been calculated yet. */
+ expand_expr (DECL_INITIAL (idecl), const0_rtx, VOIDmode, 0);
+
+ if (DECL_RTL (idecl) == 0)
+ expand_decl (idecl);
+
+ if (start_note)
+ *start_note = emit_note (0, NOTE_INSN_DELETED);
+
+ /* Initialize counter. */
+ expr = build (MODIFY_EXPR, TREE_TYPE (idecl), idecl, integer_zero_node);
+ TREE_SIDE_EFFECTS (expr) = 1;
+ expand_expr (expr, const0_rtx, VOIDmode, 0);
+
+ expand_start_loop_continue_elsewhere (1);
+
+ ITERATOR_BOUND_P (idecl) = 1;
+
+ if (end_note)
+ *end_note = emit_note (0, NOTE_INSN_DELETED);
+}
+
+/* Similar to the previous function, but for the end of the loop.
+
+ DECL_RTL is zeroed unless we are inside "({...})". The reason for that is
+ described below.
+
+ When we create two (or more) loops based on the same IDECL, and
+ both inside the same "({...})" construct, we must be prepared to
+ delete both of the loops and create a single one on the level
+ above, i.e. enclosing the "({...})". The new loop has to use the
+ same counter rtl because the references to the iterator decl
+ (IDECL) have already been expanded as references to the counter
+ rtl.
+
+ It is incorrect to use the same counter reg in different functions,
+ and it is desirable to use different counters in disjoint loops
+ when we know there's no need to combine them (because then they can
+ get allocated separately). */
+
+static void
+iterator_loop_epilogue (idecl, start_note, end_note)
+ tree idecl;
+ rtx *start_note, *end_note;
+{
+ tree test, incr;
+
+ if (start_note)
+ *start_note = emit_note (0, NOTE_INSN_DELETED);
+ expand_loop_continue_here ();
+ incr = build_binary_op (PLUS_EXPR, idecl, integer_one_node, 0);
+ incr = build (MODIFY_EXPR, TREE_TYPE (idecl), idecl, incr);
+ TREE_SIDE_EFFECTS (incr) = 1;
+ expand_expr (incr, const0_rtx, VOIDmode, 0);
+ test = build_binary_op (LT_EXPR, idecl, DECL_INITIAL (idecl), 0);
+ expand_exit_loop_if_false (0, test);
+ expand_end_loop ();
+
+ ITERATOR_BOUND_P (idecl) = 0;
+ /* we can reset rtl since there is not chance that this expansion */
+ /* would be superceded by a higher level one */
+ if (top_level_ixpansion_p ())
+ DECL_RTL (idecl) = 0;
+ if (end_note)
+ *end_note = emit_note (0, NOTE_INSN_DELETED);
+}
+
+/* Return true if we are not currently inside a "({...})" construct. */
+
+static int
+top_level_ixpansion_p ()
+{
+ return iter_stack == 0;
+}
+
+/* Given two chains of iter_stack_nodes,
+ append the nodes in X into Y. */
+
+static void
+isn_append (x, y)
+ struct iter_stack_node *x, *y;
+{
+ if (x->first == 0)
+ return;
+
+ if (y->first == 0)
+ {
+ y->first = x->first;
+ y->last = x->last;
+ }
+ else
+ {
+ y->last->next = x->first;
+ y->last = x->last;
+ }
+}
+
+/** Make X empty **/
+
+#define ISN_ZERO(X) (X).first=(X).last=0
+
+/* Move the ixpansions in sublevel_ixpansions into the current
+ node on the iter_stack, or discard them if the iter_stack is empty.
+ We do this at the end of a statement. */
+
+static void
+istack_sublevel_to_current ()
+{
+ /* At the top level we can throw away sublevel's expansions **/
+ /* because there is nobody above us to ask for a cleanup **/
+ if (iter_stack != 0)
+ /** Merging with empty sublevel list is a no-op **/
+ if (sublevel_ixpansions.last)
+ isn_append (&sublevel_ixpansions, iter_stack);
+
+ if (iter_stack == 0)
+ obstack_free (&ixp_obstack, ixp_firstobj);
+
+ ISN_ZERO (sublevel_ixpansions);
+}
+
+/* Push a new node on the iter_stack, when we enter a ({...}). */
+
+void
+push_iterator_stack ()
+{
+ struct iter_stack_node *new_top
+ = (struct iter_stack_node*)
+ obstack_alloc (&ixp_obstack, sizeof (struct iter_stack_node));
+
+ new_top->first = 0;
+ new_top->last = 0;
+ new_top->next = iter_stack;
+ iter_stack = new_top;
+}
+
+/* Pop iter_stack, moving the ixpansions in the node being popped
+ into sublevel_ixpansions. */
+
+void
+pop_iterator_stack ()
+{
+ if (iter_stack == 0)
+ abort ();
+
+ isn_append (iter_stack, &sublevel_ixpansions);
+ /** Pop current level node: */
+ iter_stack = iter_stack->next;
+}
+
+
+/* Record an iterator expansion ("ixpansion") for IDECL.
+ The remaining paramters are the notes in the loop entry
+ and exit rtl. */
+
+static void
+add_ixpansion (idecl, pro_start, pro_end, epi_start, epi_end)
+ tree idecl;
+ rtx pro_start, pro_end, epi_start, epi_end;
+{
+ struct ixpansion* newix;
+
+ /* Do nothing if we are not inside "({...})",
+ as in that case this expansion can't need subsequent RTL modification. */
+ if (iter_stack == 0)
+ return;
+
+ newix = (struct ixpansion*) obstack_alloc (&ixp_obstack,
+ sizeof (struct ixpansion));
+ newix->ixdecl = idecl;
+ newix->ixprologue_start = pro_start;
+ newix->ixprologue_end = pro_end;
+ newix->ixepilogue_start = epi_start;
+ newix->ixepilogue_end = epi_end;
+
+ newix->next = iter_stack->first;
+ iter_stack->first = newix;
+ if (iter_stack->last == 0)
+ iter_stack->last = newix;
+}
+
+/* Delete the RTL for all ixpansions for iterator IDECL
+ in our sublevels. We do this when we make a larger
+ containing expansion for IDECL. */
+
+static void
+delete_ixpansion (idecl)
+ tree idecl;
+{
+ struct ixpansion* previx = 0, *ix;
+
+ for (ix = sublevel_ixpansions.first; ix; ix = ix->next)
+ if (ix->ixdecl == idecl)
+ {
+ /** zero means that this is a mark for FOR -- **/
+ /** we do not delete anything, just issue an error. **/
+
+ if (ix->ixprologue_start == 0)
+ error_with_decl (idecl,
+ "`for (%s)' appears within implicit iteration");
+ else
+ {
+ rtx insn;
+ /* We delete all insns, including notes because leaving loop */
+ /* notes and barriers produced by iterator expansion would */
+ /* be misleading to other phases */
+
+ for (insn = NEXT_INSN (ix->ixprologue_start);
+ insn != ix->ixprologue_end;
+ insn = NEXT_INSN (insn))
+ delete_insn (insn);
+ for (insn = NEXT_INSN (ix->ixepilogue_start);
+ insn != ix->ixepilogue_end;
+ insn = NEXT_INSN (insn))
+ delete_insn (insn);
+ }
+
+ /* Delete this ixpansion from sublevel_ixpansions. */
+ if (previx)
+ previx->next = ix->next;
+ else
+ sublevel_ixpansions.first = ix->next;
+ if (sublevel_ixpansions.last == ix)
+ sublevel_ixpansions.last = previx;
+ }
+ else
+ previx = ix;
+}
+
+#ifdef DEBUG_ITERATORS
+
+/* The functions below are for use from source level debugger.
+ They print short forms of iterator lists and the iterator stack. */
+
+/* Print the name of the iterator D. */
+
+void
+prdecl (d)
+ tree d;
+{
+ if (d)
+ {
+ if (TREE_CODE (d) == VAR_DECL)
+ {
+ tree tname = DECL_NAME (d);
+ char *dname = IDENTIFIER_POINTER (tname);
+ fprintf (stderr, dname);
+ }
+ else
+ fprintf (stderr, "<<Not a Decl!!!>>");
+ }
+ else
+ fprintf (stderr, "<<NULL!!>>");
+}
+
+/* Print Iterator List -- names only */
+
+tree
+pil (head)
+ tree head;
+{
+ tree current, next;
+ for (current = head; current; current = next)
+ {
+ tree node = TREE_VALUE (current);
+ prdecl (node);
+ next = TREE_CHAIN (current);
+ if (next) fprintf (stderr, ",");
+ }
+ fprintf (stderr, "\n");
+}
+
+/* Print IXpansion List */
+
+struct ixpansion *
+pixl (head)
+ struct ixpansion *head;
+{
+ struct ixpansion *current, *next;
+ fprintf (stderr, "> ");
+ if (head == 0)
+ fprintf (stderr, "(empty)");
+
+ for (current=head; current; current = next)
+ {
+ tree node = current->ixdecl;
+ prdecl (node);
+ next = current->next;
+ if (next)
+ fprintf (stderr, ",");
+ }
+ fprintf (stderr, "\n");
+ return head;
+}
+
+/* Print Iterator Stack*/
+
+void
+pis ()
+{
+ struct iter_stack_node *stack_node;
+
+ fprintf (stderr, "--SubLevel: ");
+ pixl (sublevel_ixpansions.first);
+ fprintf (stderr, "--Stack:--\n");
+ for (stack_node = iter_stack;
+ stack_node;
+ stack_node = stack_node->next)
+ pixl (stack_node->first);
+}
+
+#endif /* DEBUG_ITERATORS */
diff --git a/gnu/usr.bin/cc/cc1/c-lang.c b/gnu/usr.bin/cc/cc1/c-lang.c
new file mode 100644
index 0000000..8b46b3c
--- /dev/null
+++ b/gnu/usr.bin/cc/cc1/c-lang.c
@@ -0,0 +1,129 @@
+/* Language-specific hook definitions for C front end.
+ Copyright (C) 1991 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+#include "config.h"
+#include "tree.h"
+#include <stdio.h>
+#include "input.h"
+
+/* Each of the functions defined here
+ is an alternative to a function in objc-actions.c. */
+
+int
+lang_decode_option (p)
+ char *p;
+{
+ return c_decode_option (p);
+}
+
+void
+lang_init ()
+{
+ /* the beginning of the file is a new line; check for # */
+ /* With luck, we discover the real source file's name from that
+ and put it in input_filename. */
+ ungetc (check_newline (), finput);
+}
+
+void
+lang_finish ()
+{
+}
+
+char *
+lang_identify ()
+{
+ return "c";
+}
+
+void
+print_lang_statistics ()
+{
+}
+
+/* Used by c-lex.c, but only for objc. */
+
+tree
+lookup_interface (arg)
+ tree arg;
+{
+ return 0;
+}
+
+tree
+is_class_name (arg)
+ tree arg;
+{
+ return 0;
+}
+
+void
+maybe_objc_check_decl (decl)
+ tree decl;
+{
+}
+
+int
+maybe_objc_comptypes (lhs, rhs, reflexive)
+ tree lhs, rhs;
+ int reflexive;
+{
+ return -1;
+}
+
+tree
+maybe_objc_method_name (decl)
+ tree decl;
+{
+ return 0;
+}
+
+tree
+maybe_building_objc_message_expr ()
+{
+ return 0;
+}
+
+int
+recognize_objc_keyword ()
+{
+ return 0;
+}
+
+tree
+build_objc_string (len, str)
+ int len;
+ char *str;
+{
+ abort ();
+ return NULL_TREE;
+}
+
+void
+GNU_xref_begin ()
+{
+ fatal ("GCC does not yet support XREF");
+}
+
+void
+GNU_xref_end ()
+{
+ fatal ("GCC does not yet support XREF");
+}
diff --git a/gnu/usr.bin/cc/cc1/c-lex.c b/gnu/usr.bin/cc/cc1/c-lex.c
new file mode 100644
index 0000000..17d50be
--- /dev/null
+++ b/gnu/usr.bin/cc/cc1/c-lex.c
@@ -0,0 +1,1983 @@
+/* Lexical analyzer for C and Objective C.
+ Copyright (C) 1987, 1988, 1989, 1992, 1994 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+#include <stdio.h>
+#include <errno.h>
+#include <setjmp.h>
+
+#include "config.h"
+#include "rtl.h"
+#include "tree.h"
+#include "input.h"
+#include "c-lex.h"
+#include "c-tree.h"
+#include "flags.h"
+#include "c-parse.h"
+
+#include <ctype.h>
+
+#ifdef MULTIBYTE_CHARS
+#include <stdlib.h>
+#include <locale.h>
+#endif
+
+#ifndef errno
+extern int errno;
+#endif
+
+/* The elements of `ridpointers' are identifier nodes
+ for the reserved type names and storage classes.
+ It is indexed by a RID_... value. */
+tree ridpointers[(int) RID_MAX];
+
+/* Cause the `yydebug' variable to be defined. */
+#define YYDEBUG 1
+
+/* the declaration found for the last IDENTIFIER token read in.
+ yylex must look this up to detect typedefs, which get token type TYPENAME,
+ so it is left around in case the identifier is not a typedef but is
+ used in a context which makes it a reference to a variable. */
+tree lastiddecl;
+
+/* Nonzero enables objc features. */
+
+int doing_objc_thang;
+
+extern tree is_class_name ();
+
+extern int yydebug;
+
+/* File used for outputting assembler code. */
+extern FILE *asm_out_file;
+
+#ifndef WCHAR_TYPE_SIZE
+#ifdef INT_TYPE_SIZE
+#define WCHAR_TYPE_SIZE INT_TYPE_SIZE
+#else
+#define WCHAR_TYPE_SIZE BITS_PER_WORD
+#endif
+#endif
+
+/* Number of bytes in a wide character. */
+#define WCHAR_BYTES (WCHAR_TYPE_SIZE / BITS_PER_UNIT)
+
+static int maxtoken; /* Current nominal length of token buffer. */
+char *token_buffer; /* Pointer to token buffer.
+ Actual allocated length is maxtoken + 2.
+ This is not static because objc-parse.y uses it. */
+
+/* Nonzero if end-of-file has been seen on input. */
+static int end_of_file;
+
+/* Buffered-back input character; faster than using ungetc. */
+static int nextchar = -1;
+
+int check_newline ();
+
+/* Do not insert generated code into the source, instead, include it.
+ This allows us to build gcc automatically even for targets that
+ need to add or modify the reserved keyword lists. */
+#include "c-gperf.h"
+
+/* Return something to represent absolute declarators containing a *.
+ TARGET is the absolute declarator that the * contains.
+ TYPE_QUALS is a list of modifiers such as const or volatile
+ to apply to the pointer type, represented as identifiers.
+
+ We return an INDIRECT_REF whose "contents" are TARGET
+ and whose type is the modifier list. */
+
+tree
+make_pointer_declarator (type_quals, target)
+ tree type_quals, target;
+{
+ return build1 (INDIRECT_REF, type_quals, target);
+}
+
+void
+forget_protocol_qualifiers ()
+{
+ int i, n = sizeof wordlist / sizeof (struct resword);
+
+ for (i = 0; i < n; i++)
+ if ((int) wordlist[i].rid >= (int) RID_IN
+ && (int) wordlist[i].rid <= (int) RID_ONEWAY)
+ wordlist[i].name = "";
+}
+
+void
+remember_protocol_qualifiers ()
+{
+ int i, n = sizeof wordlist / sizeof (struct resword);
+
+ for (i = 0; i < n; i++)
+ if (wordlist[i].rid == RID_IN)
+ wordlist[i].name = "in";
+ else if (wordlist[i].rid == RID_OUT)
+ wordlist[i].name = "out";
+ else if (wordlist[i].rid == RID_INOUT)
+ wordlist[i].name = "inout";
+ else if (wordlist[i].rid == RID_BYCOPY)
+ wordlist[i].name = "bycopy";
+ else if (wordlist[i].rid == RID_ONEWAY)
+ wordlist[i].name = "oneway";
+}
+
+void
+init_lex ()
+{
+ /* Make identifier nodes long enough for the language-specific slots. */
+ set_identifier_size (sizeof (struct lang_identifier));
+
+ /* Start it at 0, because check_newline is called at the very beginning
+ and will increment it to 1. */
+ lineno = 0;
+
+#ifdef MULTIBYTE_CHARS
+ /* Change to the native locale for multibyte conversions. */
+ setlocale (LC_CTYPE, "");
+#endif
+
+ maxtoken = 40;
+ token_buffer = (char *) xmalloc (maxtoken + 2);
+
+ ridpointers[(int) RID_INT] = get_identifier ("int");
+ ridpointers[(int) RID_CHAR] = get_identifier ("char");
+ ridpointers[(int) RID_VOID] = get_identifier ("void");
+ ridpointers[(int) RID_FLOAT] = get_identifier ("float");
+ ridpointers[(int) RID_DOUBLE] = get_identifier ("double");
+ ridpointers[(int) RID_SHORT] = get_identifier ("short");
+ ridpointers[(int) RID_LONG] = get_identifier ("long");
+ ridpointers[(int) RID_UNSIGNED] = get_identifier ("unsigned");
+ ridpointers[(int) RID_SIGNED] = get_identifier ("signed");
+ ridpointers[(int) RID_INLINE] = get_identifier ("inline");
+ ridpointers[(int) RID_CONST] = get_identifier ("const");
+ ridpointers[(int) RID_VOLATILE] = get_identifier ("volatile");
+ ridpointers[(int) RID_AUTO] = get_identifier ("auto");
+ ridpointers[(int) RID_STATIC] = get_identifier ("static");
+ ridpointers[(int) RID_EXTERN] = get_identifier ("extern");
+ ridpointers[(int) RID_TYPEDEF] = get_identifier ("typedef");
+ ridpointers[(int) RID_REGISTER] = get_identifier ("register");
+ ridpointers[(int) RID_ITERATOR] = get_identifier ("iterator");
+ ridpointers[(int) RID_COMPLEX] = get_identifier ("complex");
+ ridpointers[(int) RID_ID] = get_identifier ("id");
+ ridpointers[(int) RID_IN] = get_identifier ("in");
+ ridpointers[(int) RID_OUT] = get_identifier ("out");
+ ridpointers[(int) RID_INOUT] = get_identifier ("inout");
+ ridpointers[(int) RID_BYCOPY] = get_identifier ("bycopy");
+ ridpointers[(int) RID_ONEWAY] = get_identifier ("oneway");
+ forget_protocol_qualifiers();
+
+ /* Some options inhibit certain reserved words.
+ Clear those words out of the hash table so they won't be recognized. */
+#define UNSET_RESERVED_WORD(STRING) \
+ do { struct resword *s = is_reserved_word (STRING, sizeof (STRING) - 1); \
+ if (s) s->name = ""; } while (0)
+
+ if (! doing_objc_thang)
+ UNSET_RESERVED_WORD ("id");
+
+ if (flag_traditional)
+ {
+ UNSET_RESERVED_WORD ("const");
+ UNSET_RESERVED_WORD ("volatile");
+ UNSET_RESERVED_WORD ("typeof");
+ UNSET_RESERVED_WORD ("signed");
+ UNSET_RESERVED_WORD ("inline");
+ UNSET_RESERVED_WORD ("iterator");
+ UNSET_RESERVED_WORD ("complex");
+ }
+ if (flag_no_asm)
+ {
+ UNSET_RESERVED_WORD ("asm");
+ UNSET_RESERVED_WORD ("typeof");
+ UNSET_RESERVED_WORD ("inline");
+ UNSET_RESERVED_WORD ("iterator");
+ UNSET_RESERVED_WORD ("complex");
+ }
+}
+
+void
+reinit_parse_for_function ()
+{
+}
+
+/* Function used when yydebug is set, to print a token in more detail. */
+
+void
+yyprint (file, yychar, yylval)
+ FILE *file;
+ int yychar;
+ YYSTYPE yylval;
+{
+ tree t;
+ switch (yychar)
+ {
+ case IDENTIFIER:
+ case TYPENAME:
+ case OBJECTNAME:
+ t = yylval.ttype;
+ if (IDENTIFIER_POINTER (t))
+ fprintf (file, " `%s'", IDENTIFIER_POINTER (t));
+ break;
+
+ case CONSTANT:
+ t = yylval.ttype;
+ if (TREE_CODE (t) == INTEGER_CST)
+ fprintf (file,
+#if HOST_BITS_PER_WIDE_INT == 64
+#if HOST_BITS_PER_WIDE_INT != HOST_BITS_PER_INT
+ " 0x%lx%016lx",
+#else
+ " 0x%x%016x",
+#endif
+#else
+#if HOST_BITS_PER_WIDE_INT != HOST_BITS_PER_INT
+ " 0x%lx%08lx",
+#else
+ " 0x%x%08x",
+#endif
+#endif
+ TREE_INT_CST_HIGH (t), TREE_INT_CST_LOW (t));
+ break;
+ }
+}
+
+
+/* If C is not whitespace, return C.
+ Otherwise skip whitespace and return first nonwhite char read. */
+
+static int
+skip_white_space (c)
+ register int c;
+{
+ static int newline_warning = 0;
+
+ for (;;)
+ {
+ switch (c)
+ {
+ /* We don't recognize comments here, because
+ cpp output can include / and * consecutively as operators.
+ Also, there's no need, since cpp removes all comments. */
+
+ case '\n':
+ c = check_newline ();
+ break;
+
+ case ' ':
+ case '\t':
+ case '\f':
+ case '\v':
+ case '\b':
+ c = getc (finput);
+ break;
+
+ case '\r':
+ /* ANSI C says the effects of a carriage return in a source file
+ are undefined. */
+ if (pedantic && !newline_warning)
+ {
+ warning ("carriage return in source file");
+ warning ("(we only warn about the first carriage return)");
+ newline_warning = 1;
+ }
+ c = getc (finput);
+ break;
+
+ case '\\':
+ c = getc (finput);
+ if (c == '\n')
+ lineno++;
+ else
+ error ("stray '\\' in program");
+ c = getc (finput);
+ break;
+
+ default:
+ return (c);
+ }
+ }
+}
+
+/* Skips all of the white space at the current location in the input file.
+ Must use and reset nextchar if it has the next character. */
+
+void
+position_after_white_space ()
+{
+ register int c;
+
+ if (nextchar != -1)
+ c = nextchar, nextchar = -1;
+ else
+ c = getc (finput);
+
+ ungetc (skip_white_space (c), finput);
+}
+
+/* Make the token buffer longer, preserving the data in it.
+ P should point to just beyond the last valid character in the old buffer.
+ The value we return is a pointer to the new buffer
+ at a place corresponding to P. */
+
+static char *
+extend_token_buffer (p)
+ char *p;
+{
+ int offset = p - token_buffer;
+
+ maxtoken = maxtoken * 2 + 10;
+ token_buffer = (char *) xrealloc (token_buffer, maxtoken + 2);
+
+ return token_buffer + offset;
+}
+
+/* At the beginning of a line, increment the line number
+ and process any #-directive on this line.
+ If the line is a #-directive, read the entire line and return a newline.
+ Otherwise, return the line's first non-whitespace character. */
+
+int
+check_newline ()
+{
+ register int c;
+ register int token;
+
+ lineno++;
+
+ /* Read first nonwhite char on the line. */
+
+ c = getc (finput);
+ while (c == ' ' || c == '\t')
+ c = getc (finput);
+
+ if (c != '#')
+ {
+ /* If not #, return it so caller will use it. */
+ return c;
+ }
+
+ /* Read first nonwhite char after the `#'. */
+
+ c = getc (finput);
+ while (c == ' ' || c == '\t')
+ c = getc (finput);
+
+ /* If a letter follows, then if the word here is `line', skip
+ it and ignore it; otherwise, ignore the line, with an error
+ if the word isn't `pragma', `ident', `define', or `undef'. */
+
+ if ((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z'))
+ {
+ if (c == 'p')
+ {
+ if (getc (finput) == 'r'
+ && getc (finput) == 'a'
+ && getc (finput) == 'g'
+ && getc (finput) == 'm'
+ && getc (finput) == 'a'
+ && ((c = getc (finput)) == ' ' || c == '\t' || c == '\n'))
+ {
+#ifdef HANDLE_SYSV_PRAGMA
+ return handle_sysv_pragma (finput, c);
+#else /* !HANDLE_SYSV_PRAGMA */
+#ifdef HANDLE_PRAGMA
+ HANDLE_PRAGMA (finput);
+#endif /* HANDLE_PRAGMA */
+ goto skipline;
+#endif /* !HANDLE_SYSV_PRAGMA */
+ }
+ }
+
+ else if (c == 'd')
+ {
+ if (getc (finput) == 'e'
+ && getc (finput) == 'f'
+ && getc (finput) == 'i'
+ && getc (finput) == 'n'
+ && getc (finput) == 'e'
+ && ((c = getc (finput)) == ' ' || c == '\t' || c == '\n'))
+ {
+#ifdef DWARF_DEBUGGING_INFO
+ if ((debug_info_level == DINFO_LEVEL_VERBOSE)
+ && (write_symbols == DWARF_DEBUG))
+ dwarfout_define (lineno, get_directive_line (finput));
+#endif /* DWARF_DEBUGGING_INFO */
+ goto skipline;
+ }
+ }
+ else if (c == 'u')
+ {
+ if (getc (finput) == 'n'
+ && getc (finput) == 'd'
+ && getc (finput) == 'e'
+ && getc (finput) == 'f'
+ && ((c = getc (finput)) == ' ' || c == '\t' || c == '\n'))
+ {
+#ifdef DWARF_DEBUGGING_INFO
+ if ((debug_info_level == DINFO_LEVEL_VERBOSE)
+ && (write_symbols == DWARF_DEBUG))
+ dwarfout_undef (lineno, get_directive_line (finput));
+#endif /* DWARF_DEBUGGING_INFO */
+ goto skipline;
+ }
+ }
+ else if (c == 'l')
+ {
+ if (getc (finput) == 'i'
+ && getc (finput) == 'n'
+ && getc (finput) == 'e'
+ && ((c = getc (finput)) == ' ' || c == '\t'))
+ goto linenum;
+ }
+ else if (c == 'i')
+ {
+ if (getc (finput) == 'd'
+ && getc (finput) == 'e'
+ && getc (finput) == 'n'
+ && getc (finput) == 't'
+ && ((c = getc (finput)) == ' ' || c == '\t'))
+ {
+ /* #ident. The pedantic warning is now in cccp.c. */
+
+ /* Here we have just seen `#ident '.
+ A string constant should follow. */
+
+ while (c == ' ' || c == '\t')
+ c = getc (finput);
+
+ /* If no argument, ignore the line. */
+ if (c == '\n')
+ return c;
+
+ ungetc (c, finput);
+ token = yylex ();
+ if (token != STRING
+ || TREE_CODE (yylval.ttype) != STRING_CST)
+ {
+ error ("invalid #ident");
+ goto skipline;
+ }
+
+ if (!flag_no_ident)
+ {
+#ifdef ASM_OUTPUT_IDENT
+ ASM_OUTPUT_IDENT (asm_out_file, TREE_STRING_POINTER (yylval.ttype));
+#endif
+ }
+
+ /* Skip the rest of this line. */
+ goto skipline;
+ }
+ }
+
+ error ("undefined or invalid # directive");
+ goto skipline;
+ }
+
+linenum:
+ /* Here we have either `#line' or `# <nonletter>'.
+ In either case, it should be a line number; a digit should follow. */
+
+ while (c == ' ' || c == '\t')
+ c = getc (finput);
+
+ /* If the # is the only nonwhite char on the line,
+ just ignore it. Check the new newline. */
+ if (c == '\n')
+ return c;
+
+ /* Something follows the #; read a token. */
+
+ ungetc (c, finput);
+ token = yylex ();
+
+ if (token == CONSTANT
+ && TREE_CODE (yylval.ttype) == INTEGER_CST)
+ {
+ int old_lineno = lineno;
+ int used_up = 0;
+ /* subtract one, because it is the following line that
+ gets the specified number */
+
+ int l = TREE_INT_CST_LOW (yylval.ttype) - 1;
+
+ /* Is this the last nonwhite stuff on the line? */
+ c = getc (finput);
+ while (c == ' ' || c == '\t')
+ c = getc (finput);
+ if (c == '\n')
+ {
+ /* No more: store the line number and check following line. */
+ lineno = l;
+ return c;
+ }
+ ungetc (c, finput);
+
+ /* More follows: it must be a string constant (filename). */
+
+ /* Read the string constant. */
+ token = yylex ();
+
+ if (token != STRING || TREE_CODE (yylval.ttype) != STRING_CST)
+ {
+ error ("invalid #line");
+ goto skipline;
+ }
+
+ input_filename
+ = (char *) permalloc (TREE_STRING_LENGTH (yylval.ttype) + 1);
+ strcpy (input_filename, TREE_STRING_POINTER (yylval.ttype));
+ lineno = l;
+
+ /* Each change of file name
+ reinitializes whether we are now in a system header. */
+ in_system_header = 0;
+
+ if (main_input_filename == 0)
+ main_input_filename = input_filename;
+
+ /* Is this the last nonwhite stuff on the line? */
+ c = getc (finput);
+ while (c == ' ' || c == '\t')
+ c = getc (finput);
+ if (c == '\n')
+ {
+ /* Update the name in the top element of input_file_stack. */
+ if (input_file_stack)
+ input_file_stack->name = input_filename;
+
+ return c;
+ }
+ ungetc (c, finput);
+
+ token = yylex ();
+ used_up = 0;
+
+ /* `1' after file name means entering new file.
+ `2' after file name means just left a file. */
+
+ if (token == CONSTANT
+ && TREE_CODE (yylval.ttype) == INTEGER_CST)
+ {
+ if (TREE_INT_CST_LOW (yylval.ttype) == 1)
+ {
+ /* Pushing to a new file. */
+ struct file_stack *p
+ = (struct file_stack *) xmalloc (sizeof (struct file_stack));
+ input_file_stack->line = old_lineno;
+ p->next = input_file_stack;
+ p->name = input_filename;
+ input_file_stack = p;
+ input_file_stack_tick++;
+#ifdef DWARF_DEBUGGING_INFO
+ if (debug_info_level == DINFO_LEVEL_VERBOSE
+ && write_symbols == DWARF_DEBUG)
+ dwarfout_start_new_source_file (input_filename);
+#endif /* DWARF_DEBUGGING_INFO */
+
+ used_up = 1;
+ }
+ else if (TREE_INT_CST_LOW (yylval.ttype) == 2)
+ {
+ /* Popping out of a file. */
+ if (input_file_stack->next)
+ {
+ struct file_stack *p = input_file_stack;
+ input_file_stack = p->next;
+ free (p);
+ input_file_stack_tick++;
+#ifdef DWARF_DEBUGGING_INFO
+ if (debug_info_level == DINFO_LEVEL_VERBOSE
+ && write_symbols == DWARF_DEBUG)
+ dwarfout_resume_previous_source_file (input_file_stack->line);
+#endif /* DWARF_DEBUGGING_INFO */
+ }
+ else
+ error ("#-lines for entering and leaving files don't match");
+
+ used_up = 1;
+ }
+ }
+
+ /* Now that we've pushed or popped the input stack,
+ update the name in the top element. */
+ if (input_file_stack)
+ input_file_stack->name = input_filename;
+
+ /* If we have handled a `1' or a `2',
+ see if there is another number to read. */
+ if (used_up)
+ {
+ /* Is this the last nonwhite stuff on the line? */
+ c = getc (finput);
+ while (c == ' ' || c == '\t')
+ c = getc (finput);
+ if (c == '\n')
+ return c;
+ ungetc (c, finput);
+
+ token = yylex ();
+ used_up = 0;
+ }
+
+ /* `3' after file name means this is a system header file. */
+
+ if (token == CONSTANT
+ && TREE_CODE (yylval.ttype) == INTEGER_CST
+ && TREE_INT_CST_LOW (yylval.ttype) == 3)
+ in_system_header = 1;
+ }
+ else
+ error ("invalid #-line");
+
+ /* skip the rest of this line. */
+ skipline:
+ if (c == '\n')
+ return c;
+ while ((c = getc (finput)) != EOF && c != '\n');
+ return c;
+}
+
+#ifdef HANDLE_SYSV_PRAGMA
+
+/* Handle a #pragma directive. INPUT is the current input stream,
+ and C is a character to reread. Processes the entire input line
+ and returns a character for the caller to reread: either \n or EOF. */
+
+/* This function has to be in this file, in order to get at
+ the token types. */
+
+int
+handle_sysv_pragma (input, c)
+ FILE *input;
+ int c;
+{
+ for (;;)
+ {
+ while (c == ' ' || c == '\t')
+ c = getc (input);
+ if (c == '\n' || c == EOF)
+ {
+ handle_pragma_token (0, 0);
+ return c;
+ }
+ ungetc (c, input);
+ switch (yylex ())
+ {
+ case IDENTIFIER:
+ case TYPENAME:
+ case STRING:
+ case CONSTANT:
+ handle_pragma_token (token_buffer, yylval.ttype);
+ break;
+ default:
+ handle_pragma_token (token_buffer, 0);
+ }
+ if (nextchar >= 0)
+ c = nextchar, nextchar = -1;
+ else
+ c = getc (input);
+ }
+}
+
+#endif /* HANDLE_SYSV_PRAGMA */
+
+#define ENDFILE -1 /* token that represents end-of-file */
+
+/* Read an escape sequence, returning its equivalent as a character,
+ or store 1 in *ignore_ptr if it is backslash-newline. */
+
+static int
+readescape (ignore_ptr)
+ int *ignore_ptr;
+{
+ register int c = getc (finput);
+ register int code;
+ register unsigned count;
+ unsigned firstdig = 0;
+ int nonnull;
+
+ switch (c)
+ {
+ case 'x':
+ if (warn_traditional)
+ warning ("the meaning of `\\x' varies with -traditional");
+
+ if (flag_traditional)
+ return c;
+
+ code = 0;
+ count = 0;
+ nonnull = 0;
+ while (1)
+ {
+ c = getc (finput);
+ if (!(c >= 'a' && c <= 'f')
+ && !(c >= 'A' && c <= 'F')
+ && !(c >= '0' && c <= '9'))
+ {
+ ungetc (c, finput);
+ break;
+ }
+ code *= 16;
+ if (c >= 'a' && c <= 'f')
+ code += c - 'a' + 10;
+ if (c >= 'A' && c <= 'F')
+ code += c - 'A' + 10;
+ if (c >= '0' && c <= '9')
+ code += c - '0';
+ if (code != 0 || count != 0)
+ {
+ if (count == 0)
+ firstdig = code;
+ count++;
+ }
+ nonnull = 1;
+ }
+ if (! nonnull)
+ error ("\\x used with no following hex digits");
+ else if (count == 0)
+ /* Digits are all 0's. Ok. */
+ ;
+ else if ((count - 1) * 4 >= TYPE_PRECISION (integer_type_node)
+ || (count > 1
+ && ((1 << (TYPE_PRECISION (integer_type_node) - (count - 1) * 4))
+ <= firstdig)))
+ pedwarn ("hex escape out of range");
+ return code;
+
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7':
+ code = 0;
+ count = 0;
+ while ((c <= '7') && (c >= '0') && (count++ < 3))
+ {
+ code = (code * 8) + (c - '0');
+ c = getc (finput);
+ }
+ ungetc (c, finput);
+ return code;
+
+ case '\\': case '\'': case '"':
+ return c;
+
+ case '\n':
+ lineno++;
+ *ignore_ptr = 1;
+ return 0;
+
+ case 'n':
+ return TARGET_NEWLINE;
+
+ case 't':
+ return TARGET_TAB;
+
+ case 'r':
+ return TARGET_CR;
+
+ case 'f':
+ return TARGET_FF;
+
+ case 'b':
+ return TARGET_BS;
+
+ case 'a':
+ if (warn_traditional)
+ warning ("the meaning of `\\a' varies with -traditional");
+
+ if (flag_traditional)
+ return c;
+ return TARGET_BELL;
+
+ case 'v':
+#if 0 /* Vertical tab is present in common usage compilers. */
+ if (flag_traditional)
+ return c;
+#endif
+ return TARGET_VT;
+
+ case 'e':
+ case 'E':
+ if (pedantic)
+ pedwarn ("non-ANSI-standard escape sequence, `\\%c'", c);
+ return 033;
+
+ case '?':
+ return c;
+
+ /* `\(', etc, are used at beginning of line to avoid confusing Emacs. */
+ case '(':
+ case '{':
+ case '[':
+ /* `\%' is used to prevent SCCS from getting confused. */
+ case '%':
+ if (pedantic)
+ pedwarn ("non-ANSI escape sequence `\\%c'", c);
+ return c;
+ }
+ if (c >= 040 && c < 0177)
+ pedwarn ("unknown escape sequence `\\%c'", c);
+ else
+ pedwarn ("unknown escape sequence: `\\' followed by char code 0x%x", c);
+ return c;
+}
+
+void
+yyerror (string)
+ char *string;
+{
+ char buf[200];
+
+ strcpy (buf, string);
+
+ /* We can't print string and character constants well
+ because the token_buffer contains the result of processing escapes. */
+ if (end_of_file)
+ strcat (buf, " at end of input");
+ else if (token_buffer[0] == 0)
+ strcat (buf, " at null character");
+ else if (token_buffer[0] == '"')
+ strcat (buf, " before string constant");
+ else if (token_buffer[0] == '\'')
+ strcat (buf, " before character constant");
+ else if (token_buffer[0] < 040 || (unsigned char) token_buffer[0] >= 0177)
+ sprintf (buf + strlen (buf), " before character 0%o",
+ (unsigned char) token_buffer[0]);
+ else
+ strcat (buf, " before `%s'");
+
+ error (buf, token_buffer);
+}
+
+#if 0
+
+struct try_type
+{
+ tree *node_var;
+ char unsigned_flag;
+ char long_flag;
+ char long_long_flag;
+};
+
+struct try_type type_sequence[] =
+{
+ { &integer_type_node, 0, 0, 0},
+ { &unsigned_type_node, 1, 0, 0},
+ { &long_integer_type_node, 0, 1, 0},
+ { &long_unsigned_type_node, 1, 1, 0},
+ { &long_long_integer_type_node, 0, 1, 1},
+ { &long_long_unsigned_type_node, 1, 1, 1}
+};
+#endif /* 0 */
+
+int
+yylex ()
+{
+ register int c;
+ register char *p;
+ register int value;
+ int wide_flag = 0;
+ int objc_flag = 0;
+
+ if (nextchar >= 0)
+ c = nextchar, nextchar = -1;
+ else
+ c = getc (finput);
+
+ /* Effectively do c = skip_white_space (c)
+ but do it faster in the usual cases. */
+ while (1)
+ switch (c)
+ {
+ case ' ':
+ case '\t':
+ case '\f':
+ case '\v':
+ case '\b':
+ c = getc (finput);
+ break;
+
+ case '\r':
+ /* Call skip_white_space so we can warn if appropriate. */
+
+ case '\n':
+ case '/':
+ case '\\':
+ c = skip_white_space (c);
+ default:
+ goto found_nonwhite;
+ }
+ found_nonwhite:
+
+ token_buffer[0] = c;
+ token_buffer[1] = 0;
+
+/* yylloc.first_line = lineno; */
+
+ switch (c)
+ {
+ case EOF:
+ end_of_file = 1;
+ token_buffer[0] = 0;
+ value = ENDFILE;
+ break;
+
+ case '$':
+ if (dollars_in_ident)
+ goto letter;
+ return '$';
+
+ case 'L':
+ /* Capital L may start a wide-string or wide-character constant. */
+ {
+ register int c = getc (finput);
+ if (c == '\'')
+ {
+ wide_flag = 1;
+ goto char_constant;
+ }
+ if (c == '"')
+ {
+ wide_flag = 1;
+ goto string_constant;
+ }
+ ungetc (c, finput);
+ }
+ goto letter;
+
+ case '@':
+ if (!doing_objc_thang)
+ {
+ value = c;
+ break;
+ }
+ else
+ {
+ /* '@' may start a constant string object. */
+ register int c = getc(finput);
+ if (c == '"')
+ {
+ objc_flag = 1;
+ goto string_constant;
+ }
+ ungetc(c, finput);
+ /* Fall through to treat '@' as the start of an indentifier. */
+ }
+
+ case 'A': case 'B': case 'C': case 'D': case 'E':
+ case 'F': case 'G': case 'H': case 'I': case 'J':
+ case 'K': case 'M': case 'N': case 'O':
+ case 'P': case 'Q': case 'R': case 'S': case 'T':
+ case 'U': case 'V': case 'W': case 'X': case 'Y':
+ case 'Z':
+ case 'a': case 'b': case 'c': case 'd': case 'e':
+ case 'f': case 'g': case 'h': case 'i': case 'j':
+ case 'k': case 'l': case 'm': case 'n': case 'o':
+ case 'p': case 'q': case 'r': case 's': case 't':
+ case 'u': case 'v': case 'w': case 'x': case 'y':
+ case 'z':
+ case '_':
+ letter:
+ p = token_buffer;
+ while (isalnum (c) || c == '_' || c == '$' || c == '@')
+ {
+ /* Make sure this char really belongs in an identifier. */
+ if (c == '@' && ! doing_objc_thang)
+ break;
+ if (c == '$' && ! dollars_in_ident)
+ break;
+
+ if (p >= token_buffer + maxtoken)
+ p = extend_token_buffer (p);
+
+ *p++ = c;
+ c = getc (finput);
+ }
+
+ *p = 0;
+ nextchar = c;
+
+ value = IDENTIFIER;
+ yylval.itype = 0;
+
+ /* Try to recognize a keyword. Uses minimum-perfect hash function */
+
+ {
+ register struct resword *ptr;
+
+ if (ptr = is_reserved_word (token_buffer, p - token_buffer))
+ {
+ if (ptr->rid)
+ yylval.ttype = ridpointers[(int) ptr->rid];
+ value = (int) ptr->token;
+
+ /* Only return OBJECTNAME if it is a typedef. */
+ if (doing_objc_thang && value == OBJECTNAME)
+ {
+ lastiddecl = lookup_name(yylval.ttype);
+
+ if (lastiddecl == NULL_TREE
+ || TREE_CODE (lastiddecl) != TYPE_DECL)
+ value = IDENTIFIER;
+ }
+
+ /* Even if we decided to recognize asm, still perhaps warn. */
+ if (pedantic
+ && (value == ASM_KEYWORD || value == TYPEOF
+ || ptr->rid == RID_INLINE)
+ && token_buffer[0] != '_')
+ pedwarn ("ANSI does not permit the keyword `%s'",
+ token_buffer);
+ }
+ }
+
+ /* If we did not find a keyword, look for an identifier
+ (or a typename). */
+
+ if (value == IDENTIFIER)
+ {
+ if (token_buffer[0] == '@')
+ error("invalid identifier `%s'", token_buffer);
+
+ yylval.ttype = get_identifier (token_buffer);
+ lastiddecl = lookup_name (yylval.ttype);
+
+ if (lastiddecl != 0 && TREE_CODE (lastiddecl) == TYPE_DECL)
+ value = TYPENAME;
+ /* A user-invisible read-only initialized variable
+ should be replaced by its value.
+ We handle only strings since that's the only case used in C. */
+ else if (lastiddecl != 0 && TREE_CODE (lastiddecl) == VAR_DECL
+ && DECL_IGNORED_P (lastiddecl)
+ && TREE_READONLY (lastiddecl)
+ && DECL_INITIAL (lastiddecl) != 0
+ && TREE_CODE (DECL_INITIAL (lastiddecl)) == STRING_CST)
+ {
+ tree stringval = DECL_INITIAL (lastiddecl);
+
+ /* Copy the string value so that we won't clobber anything
+ if we put something in the TREE_CHAIN of this one. */
+ yylval.ttype = build_string (TREE_STRING_LENGTH (stringval),
+ TREE_STRING_POINTER (stringval));
+ value = STRING;
+ }
+ else if (doing_objc_thang)
+ {
+ tree objc_interface_decl = is_class_name (yylval.ttype);
+
+ if (objc_interface_decl)
+ {
+ value = CLASSNAME;
+ yylval.ttype = objc_interface_decl;
+ }
+ }
+ }
+
+ break;
+
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ case '.':
+ {
+ int base = 10;
+ int count = 0;
+ int largest_digit = 0;
+ int numdigits = 0;
+ /* for multi-precision arithmetic,
+ we actually store only HOST_BITS_PER_CHAR bits in each part.
+ The number of parts is chosen so as to be sufficient to hold
+ the enough bits to fit into the two HOST_WIDE_INTs that contain
+ the integer value (this is always at least as many bits as are
+ in a target `long long' value, but may be wider). */
+#define TOTAL_PARTS ((HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR) * 2 + 2)
+ int parts[TOTAL_PARTS];
+ int overflow = 0;
+
+ enum anon1 { NOT_FLOAT, AFTER_POINT, TOO_MANY_POINTS} floatflag
+ = NOT_FLOAT;
+
+ for (count = 0; count < TOTAL_PARTS; count++)
+ parts[count] = 0;
+
+ p = token_buffer;
+ *p++ = c;
+
+ if (c == '0')
+ {
+ *p++ = (c = getc (finput));
+ if ((c == 'x') || (c == 'X'))
+ {
+ base = 16;
+ *p++ = (c = getc (finput));
+ }
+ /* Leading 0 forces octal unless the 0 is the only digit. */
+ else if (c >= '0' && c <= '9')
+ {
+ base = 8;
+ numdigits++;
+ }
+ else
+ numdigits++;
+ }
+
+ /* Read all the digits-and-decimal-points. */
+
+ while (c == '.'
+ || (isalnum (c) && c != 'l' && c != 'L'
+ && c != 'u' && c != 'U'
+ && c != 'i' && c != 'I' && c != 'j' && c != 'J'
+ && (floatflag == NOT_FLOAT || ((c != 'f') && (c != 'F')))))
+ {
+ if (c == '.')
+ {
+ if (base == 16)
+ error ("floating constant may not be in radix 16");
+ if (floatflag == TOO_MANY_POINTS)
+ /* We have already emitted an error. Don't need another. */
+ ;
+ else if (floatflag == AFTER_POINT)
+ {
+ error ("malformed floating constant");
+ floatflag = TOO_MANY_POINTS;
+ /* Avoid another error from atof by forcing all characters
+ from here on to be ignored. */
+ p[-1] = '\0';
+ }
+ else
+ floatflag = AFTER_POINT;
+
+ base = 10;
+ *p++ = c = getc (finput);
+ /* Accept '.' as the start of a floating-point number
+ only when it is followed by a digit.
+ Otherwise, unread the following non-digit
+ and use the '.' as a structural token. */
+ if (p == token_buffer + 2 && !isdigit (c))
+ {
+ if (c == '.')
+ {
+ c = getc (finput);
+ if (c == '.')
+ {
+ *p++ = c;
+ *p = 0;
+ return ELLIPSIS;
+ }
+ error ("parse error at `..'");
+ }
+ ungetc (c, finput);
+ token_buffer[1] = 0;
+ value = '.';
+ goto done;
+ }
+ }
+ else
+ {
+ /* It is not a decimal point.
+ It should be a digit (perhaps a hex digit). */
+
+ if (isdigit (c))
+ {
+ c = c - '0';
+ }
+ else if (base <= 10)
+ {
+ if (c == 'e' || c == 'E')
+ {
+ base = 10;
+ floatflag = AFTER_POINT;
+ break; /* start of exponent */
+ }
+ error ("nondigits in number and not hexadecimal");
+ c = 0;
+ }
+ else if (c >= 'a')
+ {
+ c = c - 'a' + 10;
+ }
+ else
+ {
+ c = c - 'A' + 10;
+ }
+ if (c >= largest_digit)
+ largest_digit = c;
+ numdigits++;
+
+ for (count = 0; count < TOTAL_PARTS; count++)
+ {
+ parts[count] *= base;
+ if (count)
+ {
+ parts[count]
+ += (parts[count-1] >> HOST_BITS_PER_CHAR);
+ parts[count-1]
+ &= (1 << HOST_BITS_PER_CHAR) - 1;
+ }
+ else
+ parts[0] += c;
+ }
+
+ /* If the extra highest-order part ever gets anything in it,
+ the number is certainly too big. */
+ if (parts[TOTAL_PARTS - 1] != 0)
+ overflow = 1;
+
+ if (p >= token_buffer + maxtoken - 3)
+ p = extend_token_buffer (p);
+ *p++ = (c = getc (finput));
+ }
+ }
+
+ if (numdigits == 0)
+ error ("numeric constant with no digits");
+
+ if (largest_digit >= base)
+ error ("numeric constant contains digits beyond the radix");
+
+ /* Remove terminating char from the token buffer and delimit the string */
+ *--p = 0;
+
+ if (floatflag != NOT_FLOAT)
+ {
+ tree type = double_type_node;
+ int garbage_chars = 0, exceeds_double = 0;
+ int imag = 0;
+ REAL_VALUE_TYPE value;
+ jmp_buf handler;
+
+ /* Read explicit exponent if any, and put it in tokenbuf. */
+
+ if ((c == 'e') || (c == 'E'))
+ {
+ if (p >= token_buffer + maxtoken - 3)
+ p = extend_token_buffer (p);
+ *p++ = c;
+ c = getc (finput);
+ if ((c == '+') || (c == '-'))
+ {
+ *p++ = c;
+ c = getc (finput);
+ }
+ if (! isdigit (c))
+ error ("floating constant exponent has no digits");
+ while (isdigit (c))
+ {
+ if (p >= token_buffer + maxtoken - 3)
+ p = extend_token_buffer (p);
+ *p++ = c;
+ c = getc (finput);
+ }
+ }
+
+ *p = 0;
+ errno = 0;
+
+ /* Convert string to a double, checking for overflow. */
+ if (setjmp (handler))
+ {
+ error ("floating constant out of range");
+ value = dconst0;
+ }
+ else
+ {
+ int fflag = 0, lflag = 0;
+ /* Copy token_buffer now, while it has just the number
+ and not the suffixes; once we add `f' or `i',
+ REAL_VALUE_ATOF may not work any more. */
+ char *copy = (char *) alloca (p - token_buffer + 1);
+ bcopy (token_buffer, copy, p - token_buffer + 1);
+
+ set_float_handler (handler);
+
+ while (1)
+ {
+ int lose = 0;
+
+ /* Read the suffixes to choose a data type. */
+ switch (c)
+ {
+ case 'f': case 'F':
+ if (fflag)
+ error ("more than one `f' in numeric constant");
+ fflag = 1;
+ break;
+
+ case 'l': case 'L':
+ if (lflag)
+ error ("more than one `l' in numeric constant");
+ lflag = 1;
+ break;
+
+ case 'i': case 'I':
+ if (imag)
+ error ("more than one `i' or `j' in numeric constant");
+ else if (pedantic)
+ pedwarn ("ANSI C forbids imaginary numeric constants");
+ imag = 1;
+ break;
+
+ default:
+ lose = 1;
+ }
+
+ if (lose)
+ break;
+
+ if (p >= token_buffer + maxtoken - 3)
+ p = extend_token_buffer (p);
+ *p++ = c;
+ *p = 0;
+ c = getc (finput);
+ }
+
+ /* The second argument, machine_mode, of REAL_VALUE_ATOF
+ tells the desired precision of the binary result
+ of decimal-to-binary conversion. */
+
+ if (fflag)
+ {
+ if (lflag)
+ error ("both `f' and `l' in floating constant");
+
+ type = float_type_node;
+ value = REAL_VALUE_ATOF (copy, TYPE_MODE (type));
+ if (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
+ && REAL_VALUE_ISINF (value) && pedantic)
+ pedwarn ("floating point number exceeds range of `float'");
+ }
+ else if (lflag)
+ {
+ type = long_double_type_node;
+ value = REAL_VALUE_ATOF (copy, TYPE_MODE (type));
+ if (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
+ && REAL_VALUE_ISINF (value) && pedantic)
+ pedwarn ("floating point number exceeds range of `long double'");
+ }
+ else
+ {
+ value = REAL_VALUE_ATOF (copy, TYPE_MODE (type));
+ if (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
+ && REAL_VALUE_ISINF (value) && pedantic)
+ pedwarn ("floating point number exceeds range of `double'");
+ }
+
+ set_float_handler (NULL_PTR);
+ }
+#ifdef ERANGE
+ if (errno == ERANGE && !flag_traditional && pedantic)
+ {
+ /* ERANGE is also reported for underflow,
+ so test the value to distinguish overflow from that. */
+ if (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
+ && (REAL_VALUES_LESS (dconst1, value)
+ || REAL_VALUES_LESS (value, dconstm1)))
+ {
+ pedwarn ("floating point number exceeds range of `double'");
+ exceeds_double = 1;
+ }
+ }
+#endif
+ garbage_chars = 0;
+ while (isalnum (c) || c == '.' || c == '_'
+ || (!flag_traditional && (c == '+' || c == '-')
+ && (p[-1] == 'e' || p[-1] == 'E')))
+ {
+ if (p >= token_buffer + maxtoken - 3)
+ p = extend_token_buffer (p);
+ *p++ = c;
+ c = getc (finput);
+ garbage_chars++;
+ }
+ if (garbage_chars > 0)
+ error ("garbage at end of number");
+
+ /* If the result is not a number, assume it must have been
+ due to some error message above, so silently convert
+ it to a zero. */
+ if (REAL_VALUE_ISNAN (value))
+ value = dconst0;
+
+ /* Create a node with determined type and value. */
+ if (imag)
+ yylval.ttype = build_complex (convert (type, integer_zero_node),
+ build_real (type, value));
+ else
+ yylval.ttype = build_real (type, value);
+
+ ungetc (c, finput);
+ *p = 0;
+ }
+ else
+ {
+ tree traditional_type, ansi_type, type;
+ HOST_WIDE_INT high, low;
+ int spec_unsigned = 0;
+ int spec_long = 0;
+ int spec_long_long = 0;
+ int spec_imag = 0;
+ int bytes, warn, i;
+
+ while (1)
+ {
+ if (c == 'u' || c == 'U')
+ {
+ if (spec_unsigned)
+ error ("two `u's in integer constant");
+ spec_unsigned = 1;
+ }
+ else if (c == 'l' || c == 'L')
+ {
+ if (spec_long)
+ {
+ if (spec_long_long)
+ error ("three `l's in integer constant");
+ else if (pedantic)
+ pedwarn ("ANSI C forbids long long integer constants");
+ spec_long_long = 1;
+ }
+ spec_long = 1;
+ }
+ else if (c == 'i' || c == 'j' || c == 'I' || c == 'J')
+ {
+ if (spec_imag)
+ error ("more than one `i' or `j' in numeric constant");
+ else if (pedantic)
+ pedwarn ("ANSI C forbids imaginary numeric constants");
+ spec_imag = 1;
+ }
+ else
+ {
+ if (isalnum (c) || c == '.' || c == '_'
+ || (!flag_traditional && (c == '+' || c == '-')
+ && (p[-1] == 'e' || p[-1] == 'E')))
+ {
+ error ("garbage at end of number");
+ while (isalnum (c) || c == '.' || c == '_'
+ || (!flag_traditional && (c == '+' || c == '-')
+ && (p[-1] == 'e' || p[-1] == 'E')))
+ {
+ if (p >= token_buffer + maxtoken - 3)
+ p = extend_token_buffer (p);
+ *p++ = c;
+ c = getc (finput);
+ }
+ }
+ break;
+ }
+ if (p >= token_buffer + maxtoken - 3)
+ p = extend_token_buffer (p);
+ *p++ = c;
+ c = getc (finput);
+ }
+
+ ungetc (c, finput);
+
+ /* If the constant is not long long and it won't fit in an
+ unsigned long, or if the constant is long long and won't fit
+ in an unsigned long long, then warn that the constant is out
+ of range. */
+
+ /* ??? This assumes that long long and long integer types are
+ a multiple of 8 bits. This better than the original code
+ though which assumed that long was exactly 32 bits and long
+ long was exactly 64 bits. */
+
+ if (spec_long_long)
+ bytes = TYPE_PRECISION (long_long_integer_type_node) / 8;
+ else
+ bytes = TYPE_PRECISION (long_integer_type_node) / 8;
+
+ warn = overflow;
+ for (i = bytes; i < TOTAL_PARTS; i++)
+ if (parts[i])
+ warn = 1;
+ if (warn)
+ pedwarn ("integer constant out of range");
+
+ /* This is simplified by the fact that our constant
+ is always positive. */
+
+ high = low = 0;
+
+ for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR; i++)
+ {
+ high |= ((HOST_WIDE_INT) parts[i + (HOST_BITS_PER_WIDE_INT
+ / HOST_BITS_PER_CHAR)]
+ << (i * HOST_BITS_PER_CHAR));
+ low |= (HOST_WIDE_INT) parts[i] << (i * HOST_BITS_PER_CHAR);
+ }
+
+ yylval.ttype = build_int_2 (low, high);
+ TREE_TYPE (yylval.ttype) = long_long_unsigned_type_node;
+
+ /* If warn_traditional, calculate both the ANSI type and the
+ traditional type, then see if they disagree.
+ Otherwise, calculate only the type for the dialect in use. */
+ if (warn_traditional || flag_traditional)
+ {
+ /* Calculate the traditional type. */
+ /* Traditionally, any constant is signed;
+ but if unsigned is specified explicitly, obey that.
+ Use the smallest size with the right number of bits,
+ except for one special case with decimal constants. */
+ if (! spec_long && base != 10
+ && int_fits_type_p (yylval.ttype, unsigned_type_node))
+ traditional_type = (spec_unsigned ? unsigned_type_node
+ : integer_type_node);
+ /* A decimal constant must be long
+ if it does not fit in type int.
+ I think this is independent of whether
+ the constant is signed. */
+ else if (! spec_long && base == 10
+ && int_fits_type_p (yylval.ttype, integer_type_node))
+ traditional_type = (spec_unsigned ? unsigned_type_node
+ : integer_type_node);
+ else if (! spec_long_long)
+ traditional_type = (spec_unsigned ? long_unsigned_type_node
+ : long_integer_type_node);
+ else
+ traditional_type = (spec_unsigned
+ ? long_long_unsigned_type_node
+ : long_long_integer_type_node);
+ }
+ if (warn_traditional || ! flag_traditional)
+ {
+ /* Calculate the ANSI type. */
+ if (! spec_long && ! spec_unsigned
+ && int_fits_type_p (yylval.ttype, integer_type_node))
+ ansi_type = integer_type_node;
+ else if (! spec_long && (base != 10 || spec_unsigned)
+ && int_fits_type_p (yylval.ttype, unsigned_type_node))
+ ansi_type = unsigned_type_node;
+ else if (! spec_unsigned && !spec_long_long
+ && int_fits_type_p (yylval.ttype, long_integer_type_node))
+ ansi_type = long_integer_type_node;
+ else if (! spec_long_long)
+ ansi_type = long_unsigned_type_node;
+ else if (! spec_unsigned
+ /* Verify value does not overflow into sign bit. */
+ && TREE_INT_CST_HIGH (yylval.ttype) >= 0
+ && int_fits_type_p (yylval.ttype,
+ long_long_integer_type_node))
+ ansi_type = long_long_integer_type_node;
+ else
+ ansi_type = long_long_unsigned_type_node;
+ }
+
+ type = flag_traditional ? traditional_type : ansi_type;
+
+ if (warn_traditional && traditional_type != ansi_type)
+ {
+ if (TYPE_PRECISION (traditional_type)
+ != TYPE_PRECISION (ansi_type))
+ warning ("width of integer constant changes with -traditional");
+ else if (TREE_UNSIGNED (traditional_type)
+ != TREE_UNSIGNED (ansi_type))
+ warning ("integer constant is unsigned in ANSI C, signed with -traditional");
+ else
+ warning ("width of integer constant may change on other systems with -traditional");
+ }
+
+ if (!flag_traditional && !int_fits_type_p (yylval.ttype, type)
+ && !warn)
+ pedwarn ("integer constant out of range");
+
+ if (base == 10 && ! spec_unsigned && TREE_UNSIGNED (type))
+ warning ("decimal constant is so large that it is unsigned");
+
+ if (spec_imag)
+ {
+ if (TYPE_PRECISION (type)
+ <= TYPE_PRECISION (integer_type_node))
+ yylval.ttype
+ = build_complex (integer_zero_node,
+ convert (integer_type_node, yylval.ttype));
+ else
+ error ("complex integer constant is too wide for `complex int'");
+ }
+ else if (flag_traditional && !int_fits_type_p (yylval.ttype, type))
+ /* The traditional constant 0x80000000 is signed
+ but doesn't fit in the range of int.
+ This will change it to -0x80000000, which does fit. */
+ {
+ TREE_TYPE (yylval.ttype) = unsigned_type (type);
+ yylval.ttype = convert (type, yylval.ttype);
+ TREE_OVERFLOW (yylval.ttype)
+ = TREE_CONSTANT_OVERFLOW (yylval.ttype) = 0;
+ }
+ else
+ TREE_TYPE (yylval.ttype) = type;
+
+ *p = 0;
+ }
+
+ value = CONSTANT; break;
+ }
+
+ case '\'':
+ char_constant:
+ {
+ register int result = 0;
+ register int num_chars = 0;
+ unsigned width = TYPE_PRECISION (char_type_node);
+ int max_chars;
+
+ if (wide_flag)
+ {
+ width = WCHAR_TYPE_SIZE;
+#ifdef MULTIBYTE_CHARS
+ max_chars = MB_CUR_MAX;
+#else
+ max_chars = 1;
+#endif
+ }
+ else
+ max_chars = TYPE_PRECISION (integer_type_node) / width;
+
+ while (1)
+ {
+ tryagain:
+
+ c = getc (finput);
+
+ if (c == '\'' || c == EOF)
+ break;
+
+ if (c == '\\')
+ {
+ int ignore = 0;
+ c = readescape (&ignore);
+ if (ignore)
+ goto tryagain;
+ if (width < HOST_BITS_PER_INT
+ && (unsigned) c >= (1 << width))
+ pedwarn ("escape sequence out of range for character");
+#ifdef MAP_CHARACTER
+ if (isprint (c))
+ c = MAP_CHARACTER (c);
+#endif
+ }
+ else if (c == '\n')
+ {
+ if (pedantic)
+ pedwarn ("ANSI C forbids newline in character constant");
+ lineno++;
+ }
+#ifdef MAP_CHARACTER
+ else
+ c = MAP_CHARACTER (c);
+#endif
+
+ num_chars++;
+ if (num_chars > maxtoken - 4)
+ extend_token_buffer (token_buffer);
+
+ token_buffer[num_chars] = c;
+
+ /* Merge character into result; ignore excess chars. */
+ if (num_chars < max_chars + 1)
+ {
+ if (width < HOST_BITS_PER_INT)
+ result = (result << width) | (c & ((1 << width) - 1));
+ else
+ result = c;
+ }
+ }
+
+ token_buffer[num_chars + 1] = '\'';
+ token_buffer[num_chars + 2] = 0;
+
+ if (c != '\'')
+ error ("malformatted character constant");
+ else if (num_chars == 0)
+ error ("empty character constant");
+ else if (num_chars > max_chars)
+ {
+ num_chars = max_chars;
+ error ("character constant too long");
+ }
+ else if (num_chars != 1 && ! flag_traditional)
+ warning ("multi-character character constant");
+
+ /* If char type is signed, sign-extend the constant. */
+ if (! wide_flag)
+ {
+ int num_bits = num_chars * width;
+ if (num_bits == 0)
+ /* We already got an error; avoid invalid shift. */
+ yylval.ttype = build_int_2 (0, 0);
+ else if (TREE_UNSIGNED (char_type_node)
+ || ((result >> (num_bits - 1)) & 1) == 0)
+ yylval.ttype
+ = build_int_2 (result & ((unsigned HOST_WIDE_INT) ~0
+ >> (HOST_BITS_PER_WIDE_INT - num_bits)),
+ 0);
+ else
+ yylval.ttype
+ = build_int_2 (result | ~((unsigned HOST_WIDE_INT) ~0
+ >> (HOST_BITS_PER_WIDE_INT - num_bits)),
+ -1);
+ TREE_TYPE (yylval.ttype) = integer_type_node;
+ }
+ else
+ {
+#ifdef MULTIBYTE_CHARS
+ /* Set the initial shift state and convert the next sequence. */
+ result = 0;
+ /* In all locales L'\0' is zero and mbtowc will return zero,
+ so don't use it. */
+ if (num_chars > 1
+ || (num_chars == 1 && token_buffer[1] != '\0'))
+ {
+ wchar_t wc;
+ (void) mbtowc (NULL_PTR, NULL_PTR, 0);
+ if (mbtowc (& wc, token_buffer + 1, num_chars) == num_chars)
+ result = wc;
+ else
+ warning ("Ignoring invalid multibyte character");
+ }
+#endif
+ yylval.ttype = build_int_2 (result, 0);
+ TREE_TYPE (yylval.ttype) = wchar_type_node;
+ }
+
+ value = CONSTANT;
+ break;
+ }
+
+ case '"':
+ string_constant:
+ {
+ c = getc (finput);
+ p = token_buffer + 1;
+
+ while (c != '"' && c >= 0)
+ {
+ if (c == '\\')
+ {
+ int ignore = 0;
+ c = readescape (&ignore);
+ if (ignore)
+ goto skipnewline;
+ if (!wide_flag
+ && TYPE_PRECISION (char_type_node) < HOST_BITS_PER_INT
+ && c >= (1 << TYPE_PRECISION (char_type_node)))
+ pedwarn ("escape sequence out of range for character");
+ }
+ else if (c == '\n')
+ {
+ if (pedantic)
+ pedwarn ("ANSI C forbids newline in string constant");
+ lineno++;
+ }
+
+ if (p == token_buffer + maxtoken)
+ p = extend_token_buffer (p);
+ *p++ = c;
+
+ skipnewline:
+ c = getc (finput);
+ }
+ *p = 0;
+
+ /* We have read the entire constant.
+ Construct a STRING_CST for the result. */
+
+ if (wide_flag)
+ {
+ /* If this is a L"..." wide-string, convert the multibyte string
+ to a wide character string. */
+ char *widep = (char *) alloca ((p - token_buffer) * WCHAR_BYTES);
+ int len;
+
+#ifdef MULTIBYTE_CHARS
+ len = mbstowcs ((wchar_t *) widep, token_buffer + 1, p - token_buffer);
+ if (len < 0 || len >= (p - token_buffer))
+ {
+ warning ("Ignoring invalid multibyte string");
+ len = 0;
+ }
+ bzero (widep + (len * WCHAR_BYTES), WCHAR_BYTES);
+#else
+ {
+ union { long l; char c[sizeof (long)]; } u;
+ int big_endian;
+ char *wp, *cp;
+
+ /* Determine whether host is little or big endian. */
+ u.l = 1;
+ big_endian = u.c[sizeof (long) - 1];
+ wp = widep + (big_endian ? WCHAR_BYTES - 1 : 0);
+
+ bzero (widep, (p - token_buffer) * WCHAR_BYTES);
+ for (cp = token_buffer + 1; cp < p; cp++)
+ *wp = *cp, wp += WCHAR_BYTES;
+ len = p - token_buffer - 1;
+ }
+#endif
+ yylval.ttype = build_string ((len + 1) * WCHAR_BYTES, widep);
+ TREE_TYPE (yylval.ttype) = wchar_array_type_node;
+ value = STRING;
+ }
+ else if (objc_flag)
+ {
+ extern tree build_objc_string();
+ /* Return an Objective-C @"..." constant string object. */
+ yylval.ttype = build_objc_string (p - token_buffer,
+ token_buffer + 1);
+ TREE_TYPE (yylval.ttype) = char_array_type_node;
+ value = OBJC_STRING;
+ }
+ else
+ {
+ yylval.ttype = build_string (p - token_buffer, token_buffer + 1);
+ TREE_TYPE (yylval.ttype) = char_array_type_node;
+ value = STRING;
+ }
+
+ *p++ = '"';
+ *p = 0;
+
+ break;
+ }
+
+ case '+':
+ case '-':
+ case '&':
+ case '|':
+ case '<':
+ case '>':
+ case '*':
+ case '/':
+ case '%':
+ case '^':
+ case '!':
+ case '=':
+ {
+ register int c1;
+
+ combine:
+
+ switch (c)
+ {
+ case '+':
+ yylval.code = PLUS_EXPR; break;
+ case '-':
+ yylval.code = MINUS_EXPR; break;
+ case '&':
+ yylval.code = BIT_AND_EXPR; break;
+ case '|':
+ yylval.code = BIT_IOR_EXPR; break;
+ case '*':
+ yylval.code = MULT_EXPR; break;
+ case '/':
+ yylval.code = TRUNC_DIV_EXPR; break;
+ case '%':
+ yylval.code = TRUNC_MOD_EXPR; break;
+ case '^':
+ yylval.code = BIT_XOR_EXPR; break;
+ case LSHIFT:
+ yylval.code = LSHIFT_EXPR; break;
+ case RSHIFT:
+ yylval.code = RSHIFT_EXPR; break;
+ case '<':
+ yylval.code = LT_EXPR; break;
+ case '>':
+ yylval.code = GT_EXPR; break;
+ }
+
+ token_buffer[1] = c1 = getc (finput);
+ token_buffer[2] = 0;
+
+ if (c1 == '=')
+ {
+ switch (c)
+ {
+ case '<':
+ value = ARITHCOMPARE; yylval.code = LE_EXPR; goto done;
+ case '>':
+ value = ARITHCOMPARE; yylval.code = GE_EXPR; goto done;
+ case '!':
+ value = EQCOMPARE; yylval.code = NE_EXPR; goto done;
+ case '=':
+ value = EQCOMPARE; yylval.code = EQ_EXPR; goto done;
+ }
+ value = ASSIGN; goto done;
+ }
+ else if (c == c1)
+ switch (c)
+ {
+ case '+':
+ value = PLUSPLUS; goto done;
+ case '-':
+ value = MINUSMINUS; goto done;
+ case '&':
+ value = ANDAND; goto done;
+ case '|':
+ value = OROR; goto done;
+ case '<':
+ c = LSHIFT;
+ goto combine;
+ case '>':
+ c = RSHIFT;
+ goto combine;
+ }
+ else if ((c == '-') && (c1 == '>'))
+ { value = POINTSAT; goto done; }
+ ungetc (c1, finput);
+ token_buffer[1] = 0;
+
+ if ((c == '<') || (c == '>'))
+ value = ARITHCOMPARE;
+ else value = c;
+ goto done;
+ }
+
+ case 0:
+ /* Don't make yyparse think this is eof. */
+ value = 1;
+ break;
+
+ default:
+ value = c;
+ }
+
+done:
+/* yylloc.last_line = lineno; */
+
+ return value;
+}
+
+/* Sets the value of the 'yydebug' variable to VALUE.
+ This is a function so we don't have to have YYDEBUG defined
+ in order to build the compiler. */
+
+void
+set_yydebug (value)
+ int value;
+{
+#if YYDEBUG != 0
+ yydebug = value;
+#else
+ warning ("YYDEBUG not defined.");
+#endif
+}
diff --git a/gnu/usr.bin/cc/cc1/c-parse.c b/gnu/usr.bin/cc/cc1/c-parse.c
new file mode 100644
index 0000000..5120144
--- /dev/null
+++ b/gnu/usr.bin/cc/cc1/c-parse.c
@@ -0,0 +1,3530 @@
+
+/* A Bison parser, made from c-parse.y with Bison version GNU Bison version 1.22
+ */
+
+#define YYBISON 1 /* Identify Bison output. */
+
+#define IDENTIFIER 258
+#define TYPENAME 259
+#define SCSPEC 260
+#define TYPESPEC 261
+#define TYPE_QUAL 262
+#define CONSTANT 263
+#define STRING 264
+#define ELLIPSIS 265
+#define SIZEOF 266
+#define ENUM 267
+#define STRUCT 268
+#define UNION 269
+#define IF 270
+#define ELSE 271
+#define WHILE 272
+#define DO 273
+#define FOR 274
+#define SWITCH 275
+#define CASE 276
+#define DEFAULT 277
+#define BREAK 278
+#define CONTINUE 279
+#define RETURN 280
+#define GOTO 281
+#define ASM_KEYWORD 282
+#define TYPEOF 283
+#define ALIGNOF 284
+#define ALIGN 285
+#define ATTRIBUTE 286
+#define EXTENSION 287
+#define LABEL 288
+#define REALPART 289
+#define IMAGPART 290
+#define ASSIGN 291
+#define OROR 292
+#define ANDAND 293
+#define EQCOMPARE 294
+#define ARITHCOMPARE 295
+#define LSHIFT 296
+#define RSHIFT 297
+#define UNARY 298
+#define PLUSPLUS 299
+#define MINUSMINUS 300
+#define HYPERUNARY 301
+#define POINTSAT 302
+#define INTERFACE 303
+#define IMPLEMENTATION 304
+#define END 305
+#define SELECTOR 306
+#define DEFS 307
+#define ENCODE 308
+#define CLASSNAME 309
+#define PUBLIC 310
+#define PRIVATE 311
+#define PROTECTED 312
+#define PROTOCOL 313
+#define OBJECTNAME 314
+#define CLASS 315
+#define ALIAS 316
+#define OBJC_STRING 317
+
+#line 45 "c-parse.y"
+
+#include <stdio.h>
+#include <errno.h>
+#include <setjmp.h>
+
+#include "config.h"
+#include "tree.h"
+#include "input.h"
+#include "c-lex.h"
+#include "c-tree.h"
+#include "flags.h"
+
+#ifdef MULTIBYTE_CHARS
+#include <stdlib.h>
+#include <locale.h>
+#endif
+
+
+/* Since parsers are distinct for each language, put the language string
+ definition here. */
+char *language_string = "GNU C";
+
+#ifndef errno
+extern int errno;
+#endif
+
+void yyerror ();
+
+/* Like YYERROR but do call yyerror. */
+#define YYERROR1 { yyerror ("syntax error"); YYERROR; }
+
+/* Cause the `yydebug' variable to be defined. */
+#define YYDEBUG 1
+
+#line 82 "c-parse.y"
+typedef union {long itype; tree ttype; enum tree_code code;
+ char *filename; int lineno; } YYSTYPE;
+#line 194 "c-parse.y"
+
+/* Number of statements (loosely speaking) seen so far. */
+static int stmt_count;
+
+/* Input file and line number of the end of the body of last simple_if;
+ used by the stmt-rule immediately after simple_if returns. */
+static char *if_stmt_file;
+static int if_stmt_line;
+
+/* List of types and structure classes of the current declaration. */
+static tree current_declspecs;
+
+/* Stack of saved values of current_declspecs. */
+static tree declspec_stack;
+
+/* 1 if we explained undeclared var errors. */
+static int undeclared_variable_notice;
+
+
+/* Tell yyparse how to print a token's value, if yydebug is set. */
+
+#define YYPRINT(FILE,YYCHAR,YYLVAL) yyprint(FILE,YYCHAR,YYLVAL)
+extern void yyprint ();
+
+#ifndef YYLTYPE
+typedef
+ struct yyltype
+ {
+ int timestamp;
+ int first_line;
+ int first_column;
+ int last_line;
+ int last_column;
+ char *text;
+ }
+ yyltype;
+
+#define YYLTYPE yyltype
+#endif
+
+#include <stdio.h>
+
+#ifndef __cplusplus
+#ifndef __STDC__
+#define const
+#endif
+#endif
+
+
+
+#define YYFINAL 626
+#define YYFLAG -32768
+#define YYNTBASE 85
+
+#define YYTRANSLATE(x) ((unsigned)(x) <= 317 ? yytranslate[x] : 225)
+
+static const char yytranslate[] = { 0,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 81, 2, 2, 2, 53, 44, 2, 60,
+ 77, 51, 49, 82, 50, 59, 52, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 39, 78, 2,
+ 37, 2, 38, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 61, 2, 84, 43, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 83, 42, 79, 80, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 1, 2, 3, 4, 5,
+ 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
+ 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
+ 36, 40, 41, 45, 46, 47, 48, 54, 55, 56,
+ 57, 58, 62, 63, 64, 65, 66, 67, 68, 69,
+ 70, 71, 72, 73, 74, 75, 76
+};
+
+#if YYDEBUG != 0
+static const short yyprhs[] = { 0,
+ 0, 1, 3, 4, 7, 8, 12, 14, 16, 22,
+ 26, 31, 36, 39, 42, 45, 48, 50, 51, 52,
+ 60, 65, 66, 67, 75, 80, 81, 82, 89, 93,
+ 95, 97, 99, 101, 103, 105, 107, 109, 111, 113,
+ 114, 116, 118, 122, 124, 127, 128, 132, 135, 138,
+ 141, 146, 149, 154, 157, 160, 162, 167, 168, 176,
+ 178, 182, 186, 190, 194, 198, 202, 206, 210, 214,
+ 218, 222, 226, 230, 234, 240, 244, 248, 250, 252,
+ 254, 258, 262, 263, 268, 273, 278, 282, 286, 289,
+ 292, 294, 297, 298, 300, 303, 307, 309, 311, 314,
+ 317, 322, 327, 330, 333, 337, 339, 341, 344, 347,
+ 348, 353, 358, 362, 366, 369, 372, 375, 379, 380,
+ 383, 386, 388, 390, 393, 396, 399, 403, 404, 407,
+ 409, 411, 413, 418, 423, 425, 427, 429, 431, 435,
+ 437, 441, 442, 447, 448, 455, 459, 460, 467, 471,
+ 472, 474, 476, 479, 486, 488, 492, 493, 495, 500,
+ 507, 512, 514, 516, 518, 520, 522, 523, 528, 530,
+ 531, 534, 536, 540, 542, 543, 548, 550, 551, 560,
+ 561, 568, 569, 575, 576, 581, 582, 588, 589, 593,
+ 594, 598, 600, 602, 606, 610, 615, 619, 623, 625,
+ 629, 634, 638, 642, 644, 648, 652, 656, 661, 665,
+ 667, 668, 675, 680, 683, 684, 691, 696, 699, 700,
+ 708, 709, 716, 719, 720, 722, 723, 725, 727, 730,
+ 731, 735, 738, 742, 744, 748, 750, 752, 754, 758,
+ 763, 770, 776, 778, 782, 784, 786, 790, 793, 796,
+ 797, 799, 801, 804, 805, 808, 812, 816, 819, 823,
+ 828, 832, 835, 839, 842, 844, 847, 850, 851, 853,
+ 856, 857, 858, 860, 862, 865, 869, 871, 874, 877,
+ 884, 890, 896, 899, 902, 907, 908, 913, 914, 915,
+ 919, 924, 928, 930, 932, 934, 936, 939, 940, 945,
+ 947, 951, 952, 953, 961, 967, 970, 971, 972, 973,
+ 986, 987, 994, 997, 1000, 1003, 1007, 1014, 1023, 1034,
+ 1047, 1051, 1056, 1058, 1060, 1061, 1068, 1072, 1078, 1081,
+ 1084, 1085, 1087, 1088, 1090, 1091, 1093, 1095, 1099, 1104,
+ 1106, 1110, 1111, 1114, 1117, 1118, 1123, 1126, 1127, 1129,
+ 1131, 1135, 1137, 1141, 1144, 1147, 1150, 1153, 1156, 1157,
+ 1160, 1162, 1165, 1167, 1171, 1173
+};
+
+static const short yyrhs[] = { -1,
+ 86, 0, 0, 87, 89, 0, 0, 86, 88, 89,
+ 0, 91, 0, 90, 0, 27, 60, 100, 77, 78,
+ 0, 117, 127, 78, 0, 121, 117, 127, 78, 0,
+ 119, 117, 126, 78, 0, 121, 78, 0, 119, 78,
+ 0, 1, 78, 0, 1, 79, 0, 78, 0, 0,
+ 0, 119, 117, 154, 92, 111, 93, 184, 0, 119,
+ 117, 154, 1, 0, 0, 0, 121, 117, 157, 94,
+ 111, 95, 184, 0, 121, 117, 157, 1, 0, 0,
+ 0, 117, 157, 96, 111, 97, 184, 0, 117, 157,
+ 1, 0, 3, 0, 4, 0, 44, 0, 50, 0,
+ 49, 0, 55, 0, 56, 0, 80, 0, 81, 0,
+ 102, 0, 0, 102, 0, 107, 0, 102, 82, 107,
+ 0, 108, 0, 51, 105, 0, 0, 32, 104, 105,
+ 0, 99, 105, 0, 41, 98, 0, 11, 103, 0,
+ 11, 60, 172, 77, 0, 29, 103, 0, 29, 60,
+ 172, 77, 0, 34, 105, 0, 35, 105, 0, 103,
+ 0, 60, 172, 77, 105, 0, 0, 60, 172, 77,
+ 83, 106, 141, 79, 0, 105, 0, 107, 49, 107,
+ 0, 107, 50, 107, 0, 107, 51, 107, 0, 107,
+ 52, 107, 0, 107, 53, 107, 0, 107, 47, 107,
+ 0, 107, 48, 107, 0, 107, 46, 107, 0, 107,
+ 45, 107, 0, 107, 44, 107, 0, 107, 42, 107,
+ 0, 107, 43, 107, 0, 107, 41, 107, 0, 107,
+ 40, 107, 0, 107, 38, 208, 39, 107, 0, 107,
+ 37, 107, 0, 107, 36, 107, 0, 3, 0, 8,
+ 0, 110, 0, 60, 100, 77, 0, 60, 1, 77,
+ 0, 0, 60, 109, 185, 77, 0, 108, 60, 101,
+ 77, 0, 108, 61, 100, 84, 0, 108, 59, 98,
+ 0, 108, 58, 98, 0, 108, 55, 0, 108, 56,
+ 0, 9, 0, 110, 9, 0, 0, 113, 0, 113,
+ 10, 0, 190, 191, 114, 0, 112, 0, 179, 0,
+ 113, 112, 0, 112, 179, 0, 119, 117, 126, 78,
+ 0, 121, 117, 127, 78, 0, 119, 78, 0, 121,
+ 78, 0, 190, 191, 118, 0, 115, 0, 179, 0,
+ 116, 115, 0, 115, 179, 0, 0, 119, 117, 126,
+ 78, 0, 121, 117, 127, 78, 0, 119, 117, 150,
+ 0, 121, 117, 152, 0, 119, 78, 0, 121, 78,
+ 0, 124, 120, 0, 121, 124, 120, 0, 0, 120,
+ 125, 0, 120, 5, 0, 7, 0, 5, 0, 121,
+ 7, 0, 121, 5, 0, 124, 123, 0, 174, 124,
+ 123, 0, 0, 123, 125, 0, 6, 0, 158, 0,
+ 4, 0, 28, 60, 100, 77, 0, 28, 60, 172,
+ 77, 0, 6, 0, 7, 0, 158, 0, 129, 0,
+ 126, 82, 129, 0, 131, 0, 127, 82, 129, 0,
+ 0, 27, 60, 110, 77, 0, 0, 154, 128, 133,
+ 37, 130, 139, 0, 154, 128, 133, 0, 0, 157,
+ 128, 133, 37, 132, 139, 0, 157, 128, 133, 0,
+ 0, 134, 0, 135, 0, 134, 135, 0, 31, 60,
+ 60, 136, 77, 77, 0, 137, 0, 136, 82, 137,
+ 0, 0, 138, 0, 138, 60, 3, 77, 0, 138,
+ 60, 3, 82, 102, 77, 0, 138, 60, 102, 77,
+ 0, 98, 0, 5, 0, 6, 0, 7, 0, 107,
+ 0, 0, 83, 140, 141, 79, 0, 1, 0, 0,
+ 142, 163, 0, 143, 0, 142, 82, 143, 0, 107,
+ 0, 0, 83, 144, 141, 79, 0, 1, 0, 0,
+ 61, 107, 10, 107, 84, 37, 145, 143, 0, 0,
+ 61, 107, 84, 37, 146, 143, 0, 0, 61, 107,
+ 84, 147, 143, 0, 0, 98, 39, 148, 143, 0,
+ 0, 59, 98, 37, 149, 143, 0, 0, 154, 151,
+ 185, 0, 0, 157, 153, 185, 0, 155, 0, 157,
+ 0, 60, 155, 77, 0, 155, 60, 220, 0, 155,
+ 61, 100, 84, 0, 155, 61, 84, 0, 51, 175,
+ 155, 0, 4, 0, 156, 60, 220, 0, 156, 61,
+ 100, 84, 0, 156, 61, 84, 0, 51, 175, 156,
+ 0, 4, 0, 157, 60, 220, 0, 60, 157, 77,
+ 0, 51, 175, 157, 0, 157, 61, 100, 84, 0,
+ 157, 61, 84, 0, 3, 0, 0, 13, 98, 83,
+ 159, 165, 79, 0, 13, 83, 165, 79, 0, 13,
+ 98, 0, 0, 14, 98, 83, 160, 165, 79, 0,
+ 14, 83, 165, 79, 0, 14, 98, 0, 0, 12,
+ 98, 83, 161, 170, 164, 79, 0, 0, 12, 83,
+ 162, 170, 164, 79, 0, 12, 98, 0, 0, 82,
+ 0, 0, 82, 0, 166, 0, 166, 167, 0, 0,
+ 166, 167, 78, 0, 166, 78, 0, 122, 117, 168,
+ 0, 122, 0, 174, 117, 168, 0, 174, 0, 1,
+ 0, 169, 0, 168, 82, 169, 0, 190, 191, 154,
+ 133, 0, 190, 191, 154, 39, 107, 133, 0, 190,
+ 191, 39, 107, 133, 0, 171, 0, 170, 82, 171,
+ 0, 1, 0, 98, 0, 98, 37, 107, 0, 122,
+ 173, 0, 174, 173, 0, 0, 176, 0, 7, 0,
+ 174, 7, 0, 0, 175, 7, 0, 60, 176, 77,
+ 0, 51, 175, 176, 0, 51, 175, 0, 176, 60,
+ 213, 0, 176, 61, 100, 84, 0, 176, 61, 84,
+ 0, 60, 213, 0, 61, 100, 84, 0, 61, 84,
+ 0, 193, 0, 177, 193, 0, 177, 179, 0, 0,
+ 177, 0, 1, 78, 0, 0, 0, 182, 0, 183,
+ 0, 182, 183, 0, 33, 224, 78, 0, 185, 0,
+ 1, 185, 0, 83, 79, 0, 83, 180, 181, 116,
+ 178, 79, 0, 83, 180, 181, 1, 79, 0, 83,
+ 180, 181, 177, 79, 0, 187, 192, 0, 187, 1,
+ 0, 15, 60, 100, 77, 0, 0, 18, 189, 192,
+ 17, 0, 0, 0, 190, 191, 195, 0, 190, 191,
+ 206, 192, 0, 190, 191, 194, 0, 195, 0, 206,
+ 0, 185, 0, 203, 0, 100, 78, 0, 0, 186,
+ 16, 196, 192, 0, 186, 0, 186, 16, 1, 0,
+ 0, 0, 17, 197, 60, 100, 77, 198, 192, 0,
+ 188, 60, 100, 77, 78, 0, 188, 1, 0, 0,
+ 0, 0, 19, 60, 208, 78, 199, 208, 78, 200,
+ 208, 77, 201, 192, 0, 0, 20, 60, 100, 77,
+ 202, 192, 0, 23, 78, 0, 24, 78, 0, 25,
+ 78, 0, 25, 100, 78, 0, 27, 207, 60, 100,
+ 77, 78, 0, 27, 207, 60, 100, 39, 209, 77,
+ 78, 0, 27, 207, 60, 100, 39, 209, 39, 209,
+ 77, 78, 0, 27, 207, 60, 100, 39, 209, 39,
+ 209, 39, 212, 77, 78, 0, 26, 98, 78, 0,
+ 26, 51, 100, 78, 0, 78, 0, 204, 0, 0,
+ 19, 60, 108, 77, 205, 192, 0, 21, 107, 39,
+ 0, 21, 107, 10, 107, 39, 0, 22, 39, 0,
+ 98, 39, 0, 0, 7, 0, 0, 100, 0, 0,
+ 210, 0, 211, 0, 210, 82, 211, 0, 9, 60,
+ 100, 77, 0, 110, 0, 212, 82, 110, 0, 0,
+ 214, 215, 0, 217, 77, 0, 0, 218, 78, 216,
+ 215, 0, 1, 77, 0, 0, 10, 0, 218, 0,
+ 218, 82, 10, 0, 219, 0, 218, 82, 219, 0,
+ 119, 156, 0, 119, 157, 0, 119, 173, 0, 121,
+ 157, 0, 121, 173, 0, 0, 221, 222, 0, 215,
+ 0, 223, 77, 0, 3, 0, 223, 82, 3, 0,
+ 98, 0, 224, 82, 98, 0
+};
+
+#endif
+
+#if YYDEBUG != 0
+static const short yyrline[] = { 0,
+ 220, 224, 237, 239, 239, 240, 242, 244, 245, 255,
+ 261, 263, 265, 267, 269, 270, 271, 276, 282, 284,
+ 285, 287, 292, 294, 295, 297, 302, 304, 305, 309,
+ 311, 314, 316, 318, 320, 322, 324, 326, 330, 334,
+ 337, 340, 343, 347, 349, 352, 355, 358, 362, 388,
+ 393, 395, 397, 399, 401, 405, 407, 410, 414, 441,
+ 443, 445, 447, 449, 451, 453, 455, 457, 459, 461,
+ 463, 465, 467, 469, 471, 473, 476, 482, 581, 582,
+ 584, 590, 592, 606, 629, 631, 633, 637, 643, 645,
+ 650, 652, 657, 659, 660, 670, 675, 677, 678, 679,
+ 682, 687, 691, 694, 702, 707, 709, 710, 711, 718,
+ 726, 731, 735, 739, 743, 745, 753, 756, 760, 762,
+ 764, 775, 779, 781, 784, 797, 800, 804, 806, 814,
+ 815, 816, 820, 822, 828, 829, 830, 833, 835, 838,
+ 840, 843, 846, 852, 859, 862, 868, 875, 878, 885,
+ 888, 892, 895, 899, 904, 907, 911, 914, 916, 919,
+ 922, 929, 931, 932, 933, 938, 940, 945, 953, 958,
+ 962, 965, 967, 972, 975, 977, 979, 983, 986, 986,
+ 989, 989, 992, 992, 995, 995, 998, 1000, 1017, 1021,
+ 1038, 1045, 1047, 1052, 1055, 1060, 1062, 1064, 1066, 1074,
+ 1080, 1082, 1084, 1086, 1092, 1098, 1100, 1102, 1104, 1106,
+ 1109, 1114, 1118, 1121, 1123, 1125, 1127, 1130, 1132, 1135,
+ 1138, 1141, 1144, 1148, 1150, 1153, 1155, 1159, 1162, 1167,
+ 1169, 1171, 1185, 1191, 1196, 1201, 1206, 1210, 1212, 1216,
+ 1220, 1224, 1234, 1236, 1238, 1243, 1246, 1250, 1253, 1257,
+ 1260, 1263, 1266, 1270, 1273, 1277, 1281, 1283, 1285, 1287,
+ 1289, 1291, 1293, 1295, 1303, 1305, 1306, 1309, 1311, 1314,
+ 1317, 1328, 1330, 1335, 1337, 1340, 1354, 1357, 1360, 1362,
+ 1370, 1378, 1389, 1394, 1397, 1410, 1418, 1422, 1426, 1430,
+ 1436, 1440, 1445, 1447, 1458, 1461, 1462, 1479, 1484, 1487,
+ 1499, 1501, 1511, 1521, 1522, 1530, 1533, 1545, 1549, 1566,
+ 1576, 1585, 1590, 1595, 1600, 1604, 1608, 1619, 1626, 1633,
+ 1640, 1651, 1655, 1658, 1663, 1686, 1720, 1745, 1774, 1789,
+ 1800, 1804, 1808, 1811, 1816, 1818, 1821, 1823, 1827, 1832,
+ 1835, 1841, 1846, 1851, 1853, 1862, 1863, 1869, 1871, 1876,
+ 1878, 1882, 1885, 1891, 1894, 1896, 1898, 1900, 1907, 1912,
+ 1917, 1919, 1928, 1931, 1936, 1939
+};
+
+static const char * const yytname[] = { "$","error","$illegal.","IDENTIFIER",
+"TYPENAME","SCSPEC","TYPESPEC","TYPE_QUAL","CONSTANT","STRING","ELLIPSIS","SIZEOF",
+"ENUM","STRUCT","UNION","IF","ELSE","WHILE","DO","FOR","SWITCH","CASE","DEFAULT",
+"BREAK","CONTINUE","RETURN","GOTO","ASM_KEYWORD","TYPEOF","ALIGNOF","ALIGN",
+"ATTRIBUTE","EXTENSION","LABEL","REALPART","IMAGPART","ASSIGN","'='","'?'","':'",
+"OROR","ANDAND","'|'","'^'","'&'","EQCOMPARE","ARITHCOMPARE","LSHIFT","RSHIFT",
+"'+'","'-'","'*'","'/'","'%'","UNARY","PLUSPLUS","MINUSMINUS","HYPERUNARY","POINTSAT",
+"'.'","'('","'['","INTERFACE","IMPLEMENTATION","END","SELECTOR","DEFS","ENCODE",
+"CLASSNAME","PUBLIC","PRIVATE","PROTECTED","PROTOCOL","OBJECTNAME","CLASS","ALIAS",
+"OBJC_STRING","')'","';'","'}'","'~'","'!'","','","'{'","']'","program","extdefs",
+"@1","@2","extdef","datadef","fndef","@3","@4","@5","@6","@7","@8","identifier",
+"unop","expr","exprlist","nonnull_exprlist","unary_expr","@9","cast_expr","@10",
+"expr_no_commas","primary","@11","string","xdecls","lineno_datadecl","datadecls",
+"datadecl","lineno_decl","decls","setspecs","decl","typed_declspecs","reserved_declspecs",
+"declmods","typed_typespecs","reserved_typespecquals","typespec","typespecqual_reserved",
+"initdecls","notype_initdecls","maybeasm","initdcl","@12","notype_initdcl","@13",
+"maybe_attribute","attributes","attribute","attribute_list","attrib","any_word",
+"init","@14","initlist_maybe_comma","initlist1","initelt","@15","@16","@17",
+"@18","@19","@20","nested_function","@21","notype_nested_function","@22","declarator",
+"after_type_declarator","parm_declarator","notype_declarator","structsp","@23",
+"@24","@25","@26","maybecomma","maybecomma_warn","component_decl_list","component_decl_list2",
+"component_decl","components","component_declarator","enumlist","enumerator",
+"typename","absdcl","nonempty_type_quals","type_quals","absdcl1","stmts","xstmts",
+"errstmt","pushlevel","maybe_label_decls","label_decls","label_decl","compstmt_or_error",
+"compstmt","simple_if","if_prefix","do_stmt_start","@27","save_filename","save_lineno",
+"lineno_labeled_stmt","lineno_stmt_or_label","stmt_or_label","stmt","@28","@29",
+"@30","@31","@32","@33","@34","all_iter_stmt","all_iter_stmt_simple","@35","label",
+"maybe_type_qual","xexpr","asm_operands","nonnull_asm_operands","asm_operand",
+"asm_clobbers","parmlist","@36","parmlist_1","@37","parmlist_2","parms","parm",
+"parmlist_or_identifiers","@38","parmlist_or_identifiers_1","identifiers","identifiers_or_typenames",
+""
+};
+#endif
+
+static const short yyr1[] = { 0,
+ 85, 85, 87, 86, 88, 86, 89, 89, 89, 90,
+ 90, 90, 90, 90, 90, 90, 90, 92, 93, 91,
+ 91, 94, 95, 91, 91, 96, 97, 91, 91, 98,
+ 98, 99, 99, 99, 99, 99, 99, 99, 100, 101,
+ 101, 102, 102, 103, 103, 104, 103, 103, 103, 103,
+ 103, 103, 103, 103, 103, 105, 105, 106, 105, 107,
+ 107, 107, 107, 107, 107, 107, 107, 107, 107, 107,
+ 107, 107, 107, 107, 107, 107, 107, 108, 108, 108,
+ 108, 108, 109, 108, 108, 108, 108, 108, 108, 108,
+ 110, 110, 111, 111, 111, 112, 113, 113, 113, 113,
+ 114, 114, 114, 114, 115, 116, 116, 116, 116, 117,
+ 118, 118, 118, 118, 118, 118, 119, 119, 120, 120,
+ 120, 121, 121, 121, 121, 122, 122, 123, 123, 124,
+ 124, 124, 124, 124, 125, 125, 125, 126, 126, 127,
+ 127, 128, 128, 130, 129, 129, 132, 131, 131, 133,
+ 133, 134, 134, 135, 136, 136, 137, 137, 137, 137,
+ 137, 138, 138, 138, 138, 139, 140, 139, 139, 141,
+ 141, 142, 142, 143, 144, 143, 143, 145, 143, 146,
+ 143, 147, 143, 148, 143, 149, 143, 151, 150, 153,
+ 152, 154, 154, 155, 155, 155, 155, 155, 155, 156,
+ 156, 156, 156, 156, 157, 157, 157, 157, 157, 157,
+ 159, 158, 158, 158, 160, 158, 158, 158, 161, 158,
+ 162, 158, 158, 163, 163, 164, 164, 165, 165, 166,
+ 166, 166, 167, 167, 167, 167, 167, 168, 168, 169,
+ 169, 169, 170, 170, 170, 171, 171, 172, 172, 173,
+ 173, 174, 174, 175, 175, 176, 176, 176, 176, 176,
+ 176, 176, 176, 176, 177, 177, 177, 178, 178, 179,
+ 180, 181, 181, 182, 182, 183, 184, 184, 185, 185,
+ 185, 185, 186, 186, 187, 189, 188, 190, 191, 192,
+ 192, 193, 194, 194, 195, 195, 195, 196, 195, 195,
+ 195, 197, 198, 195, 195, 195, 199, 200, 201, 195,
+ 202, 195, 195, 195, 195, 195, 195, 195, 195, 195,
+ 195, 195, 195, 203, 205, 204, 206, 206, 206, 206,
+ 207, 207, 208, 208, 209, 209, 210, 210, 211, 212,
+ 212, 214, 213, 215, 216, 215, 215, 217, 217, 217,
+ 217, 218, 218, 219, 219, 219, 219, 219, 221, 220,
+ 222, 222, 223, 223, 224, 224
+};
+
+static const short yyr2[] = { 0,
+ 0, 1, 0, 2, 0, 3, 1, 1, 5, 3,
+ 4, 4, 2, 2, 2, 2, 1, 0, 0, 7,
+ 4, 0, 0, 7, 4, 0, 0, 6, 3, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 0,
+ 1, 1, 3, 1, 2, 0, 3, 2, 2, 2,
+ 4, 2, 4, 2, 2, 1, 4, 0, 7, 1,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 5, 3, 3, 1, 1, 1,
+ 3, 3, 0, 4, 4, 4, 3, 3, 2, 2,
+ 1, 2, 0, 1, 2, 3, 1, 1, 2, 2,
+ 4, 4, 2, 2, 3, 1, 1, 2, 2, 0,
+ 4, 4, 3, 3, 2, 2, 2, 3, 0, 2,
+ 2, 1, 1, 2, 2, 2, 3, 0, 2, 1,
+ 1, 1, 4, 4, 1, 1, 1, 1, 3, 1,
+ 3, 0, 4, 0, 6, 3, 0, 6, 3, 0,
+ 1, 1, 2, 6, 1, 3, 0, 1, 4, 6,
+ 4, 1, 1, 1, 1, 1, 0, 4, 1, 0,
+ 2, 1, 3, 1, 0, 4, 1, 0, 8, 0,
+ 6, 0, 5, 0, 4, 0, 5, 0, 3, 0,
+ 3, 1, 1, 3, 3, 4, 3, 3, 1, 3,
+ 4, 3, 3, 1, 3, 3, 3, 4, 3, 1,
+ 0, 6, 4, 2, 0, 6, 4, 2, 0, 7,
+ 0, 6, 2, 0, 1, 0, 1, 1, 2, 0,
+ 3, 2, 3, 1, 3, 1, 1, 1, 3, 4,
+ 6, 5, 1, 3, 1, 1, 3, 2, 2, 0,
+ 1, 1, 2, 0, 2, 3, 3, 2, 3, 4,
+ 3, 2, 3, 2, 1, 2, 2, 0, 1, 2,
+ 0, 0, 1, 1, 2, 3, 1, 2, 2, 6,
+ 5, 5, 2, 2, 4, 0, 4, 0, 0, 3,
+ 4, 3, 1, 1, 1, 1, 2, 0, 4, 1,
+ 3, 0, 0, 7, 5, 2, 0, 0, 0, 12,
+ 0, 6, 2, 2, 2, 3, 6, 8, 10, 12,
+ 3, 4, 1, 1, 0, 6, 3, 5, 2, 2,
+ 0, 1, 0, 1, 0, 1, 1, 3, 4, 1,
+ 3, 0, 2, 2, 0, 4, 2, 0, 1, 1,
+ 3, 1, 3, 2, 2, 2, 2, 2, 0, 2,
+ 1, 2, 1, 3, 1, 3
+};
+
+static const short yydefact[] = { 3,
+ 5, 0, 0, 0, 132, 123, 130, 122, 0, 0,
+ 0, 0, 0, 17, 4, 8, 7, 0, 110, 110,
+ 119, 131, 6, 15, 16, 30, 31, 221, 223, 230,
+ 214, 230, 218, 0, 0, 210, 254, 0, 0, 140,
+ 0, 14, 0, 125, 124, 13, 0, 119, 117, 0,
+ 219, 0, 0, 211, 0, 215, 78, 79, 91, 0,
+ 0, 46, 0, 0, 0, 32, 34, 33, 0, 35,
+ 36, 0, 37, 38, 0, 0, 39, 56, 60, 42,
+ 44, 80, 252, 0, 250, 128, 0, 250, 0, 0,
+ 10, 0, 29, 0, 359, 0, 0, 150, 199, 254,
+ 0, 0, 138, 0, 192, 193, 0, 0, 118, 121,
+ 135, 136, 120, 137, 245, 246, 226, 243, 0, 213,
+ 237, 232, 110, 229, 110, 230, 217, 230, 0, 50,
+ 0, 52, 0, 54, 55, 49, 45, 0, 0, 0,
+ 0, 48, 0, 0, 0, 0, 333, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 89, 90, 0, 0, 40, 0, 92, 133, 254,
+ 342, 0, 248, 251, 126, 134, 253, 128, 249, 255,
+ 207, 206, 141, 142, 0, 205, 0, 209, 0, 0,
+ 27, 0, 288, 98, 289, 0, 149, 151, 152, 0,
+ 0, 12, 0, 21, 0, 150, 359, 0, 11, 25,
+ 0, 0, 227, 0, 226, 288, 231, 288, 0, 0,
+ 0, 0, 47, 82, 81, 271, 0, 0, 9, 43,
+ 77, 76, 334, 0, 74, 73, 71, 72, 70, 69,
+ 68, 66, 67, 61, 62, 63, 64, 65, 88, 87,
+ 0, 41, 0, 258, 0, 262, 0, 264, 0, 342,
+ 0, 129, 127, 0, 0, 363, 349, 250, 250, 361,
+ 0, 350, 352, 360, 0, 208, 270, 0, 100, 95,
+ 99, 0, 0, 147, 153, 198, 194, 139, 19, 146,
+ 195, 197, 0, 23, 247, 244, 222, 0, 233, 238,
+ 289, 235, 212, 216, 51, 53, 279, 272, 84, 58,
+ 57, 0, 85, 86, 257, 256, 343, 263, 259, 261,
+ 0, 143, 347, 204, 254, 342, 354, 355, 356, 254,
+ 357, 358, 344, 345, 0, 362, 0, 0, 28, 277,
+ 96, 110, 110, 157, 0, 0, 144, 196, 0, 220,
+ 288, 0, 0, 0, 273, 274, 0, 75, 260, 258,
+ 359, 0, 258, 0, 351, 353, 364, 278, 103, 0,
+ 104, 0, 163, 164, 165, 162, 0, 155, 158, 169,
+ 167, 166, 148, 20, 0, 24, 239, 0, 150, 365,
+ 0, 0, 0, 288, 0, 107, 289, 265, 275, 177,
+ 78, 0, 0, 175, 0, 174, 0, 224, 172, 203,
+ 200, 202, 0, 346, 0, 0, 142, 0, 157, 0,
+ 0, 145, 150, 0, 240, 276, 0, 281, 109, 108,
+ 0, 0, 282, 267, 289, 266, 0, 0, 0, 0,
+ 184, 59, 0, 171, 201, 101, 102, 154, 156, 78,
+ 0, 0, 242, 150, 366, 280, 0, 132, 0, 302,
+ 286, 0, 0, 0, 0, 0, 0, 0, 0, 331,
+ 323, 0, 0, 105, 110, 110, 295, 300, 0, 0,
+ 292, 293, 296, 324, 294, 186, 0, 182, 0, 0,
+ 173, 159, 0, 161, 168, 241, 0, 0, 288, 333,
+ 0, 0, 329, 313, 314, 315, 0, 0, 0, 332,
+ 0, 330, 297, 115, 0, 116, 0, 0, 284, 289,
+ 283, 306, 0, 0, 0, 180, 0, 176, 185, 0,
+ 0, 0, 0, 44, 0, 0, 0, 327, 316, 0,
+ 321, 0, 0, 113, 142, 0, 114, 142, 301, 288,
+ 0, 0, 187, 0, 0, 183, 160, 285, 0, 287,
+ 325, 307, 311, 0, 322, 0, 111, 0, 112, 0,
+ 299, 290, 288, 0, 178, 181, 303, 288, 333, 288,
+ 328, 335, 0, 189, 191, 291, 305, 0, 288, 326,
+ 0, 312, 0, 0, 336, 337, 317, 179, 304, 308,
+ 0, 335, 0, 0, 333, 0, 0, 318, 338, 0,
+ 339, 0, 0, 309, 340, 0, 319, 288, 0, 0,
+ 310, 320, 341, 0, 0, 0
+};
+
+static const short yydefgoto[] = { 624,
+ 1, 2, 3, 15, 16, 17, 205, 346, 211, 349,
+ 97, 278, 405, 75, 233, 251, 77, 78, 133, 79,
+ 357, 80, 81, 140, 82, 191, 192, 193, 341, 393,
+ 394, 18, 474, 268, 49, 269, 85, 175, 21, 113,
+ 102, 39, 98, 103, 385, 40, 345, 197, 198, 199,
+ 377, 378, 379, 383, 421, 407, 408, 409, 440, 588,
+ 555, 527, 490, 524, 544, 568, 547, 570, 184, 105,
+ 327, 106, 22, 126, 128, 119, 50, 444, 214, 52,
+ 53, 124, 299, 300, 117, 118, 87, 173, 88, 89,
+ 174, 395, 432, 194, 308, 354, 355, 356, 339, 340,
+ 478, 479, 480, 499, 520, 282, 521, 398, 481, 482,
+ 550, 498, 589, 579, 605, 618, 580, 483, 484, 578,
+ 485, 511, 234, 594, 595, 596, 616, 256, 257, 270,
+ 364, 271, 272, 273, 186, 187, 274, 275, 391
+};
+
+static const short yypact[] = { 61,
+ 79, 401, 401, 192,-32768,-32768,-32768,-32768, 56, 63,
+ 67, 3, 36,-32768,-32768,-32768,-32768, 96, 20, 488,
+-32768,-32768,-32768,-32768,-32768,-32768,-32768,-32768, 40,-32768,
+ 66,-32768, 76, 1775, 1691,-32768,-32768, 96, 156,-32768,
+ 762,-32768, 69,-32768,-32768,-32768, 96,-32768, 404, 393,
+-32768, 109, 293,-32768, 111,-32768,-32768,-32768,-32768, 1788,
+ 1838,-32768, 1775, 1775, 330,-32768,-32768,-32768, 1775,-32768,
+-32768, 528,-32768,-32768, 1775, 98, 118,-32768,-32768, 1983,
+ 451, 193,-32768, 134, 172,-32768, 141, 1050, 258, 4,
+-32768, 69,-32768, 165,-32768, 1317, 236, 204,-32768,-32768,
+ 69, 191,-32768, 362, 331, 361, 232, 924, 404,-32768,
+-32768,-32768,-32768,-32768,-32768, 210, 162,-32768, 393,-32768,
+-32768,-32768, 360, 182, 1005,-32768,-32768,-32768, 528,-32768,
+ 528,-32768, 1775,-32768,-32768,-32768,-32768, 180, 200, 219,
+ 216,-32768, 233, 1775, 1775, 1775, 1775, 1775, 1775, 1775,
+ 1775, 1775, 1775, 1775, 1775, 1775, 1775, 1775, 1775, 1775,
+ 1775,-32768,-32768, 330, 330, 1775, 1775,-32768,-32768,-32768,
+ 172, 1330,-32768, 388, 419,-32768,-32768,-32768,-32768,-32768,
+ 361,-32768,-32768, 314, 311,-32768, 586,-32768, 264, 292,
+-32768, 179, 44,-32768,-32768, 322, 363, 204,-32768, 194,
+ 33,-32768, 69,-32768, 236, 204,-32768, 1384,-32768,-32768,
+ 236, 1775, 330, 319, 162,-32768,-32768,-32768, 340, 345,
+ 350, 359,-32768,-32768,-32768, 367, 364, 1633,-32768, 1983,
+ 1983, 1983,-32768, 416, 2012, 2024, 1877, 475, 2033, 1423,
+ 433, 496, 496, 406, 406,-32768,-32768,-32768,-32768,-32768,
+ 383, 118, 380, 265, 235,-32768, 846,-32768, 386,-32768,
+ 1397,-32768, 419, 31, 399,-32768,-32768, 148, 684,-32768,
+ 411, 249,-32768,-32768, 83,-32768,-32768, 48,-32768,-32768,
+-32768, 1484, 436,-32768,-32768, 331,-32768,-32768,-32768, 460,
+-32768,-32768, 415,-32768, 1983,-32768,-32768, 424, 423,-32768,
+-32768, 423,-32768,-32768,-32768,-32768,-32768, 480,-32768,-32768,
+-32768, 1775,-32768,-32768, 388,-32768,-32768,-32768,-32768,-32768,
+ 446,-32768,-32768,-32768,-32768, 161, 405, 361,-32768,-32768,
+ 361,-32768,-32768,-32768, 373,-32768, 511, 219,-32768,-32768,
+-32768, 465, 1278, 598, 1267, 48,-32768,-32768, 48,-32768,
+-32768, 53, 330, 702, 480,-32768, 1084, 1999,-32768, 102,
+-32768, 1451, 207, 846,-32768,-32768,-32768,-32768,-32768, 69,
+-32768, 96,-32768,-32768,-32768,-32768, 149,-32768, 478,-32768,
+-32768, 1983,-32768,-32768, 1267,-32768,-32768, 1775, 138,-32768,
+ 271, 390, 621, 471, 783,-32768,-32768,-32768,-32768,-32768,
+ 513, 330, 1775,-32768, 514, 1983, 476, 477,-32768, 405,
+-32768,-32768, 474,-32768, 280, 282, 26, 484, 598, 1851,
+ 1084,-32768, 1943, 1775,-32768,-32768, 330,-32768,-32768,-32768,
+ 864, 485,-32768,-32768,-32768,-32768, 1533, 531, 1898, 1084,
+-32768,-32768, 1145,-32768,-32768,-32768,-32768,-32768,-32768, 226,
+ 240, 486,-32768, 1943,-32768,-32768, 1583, 532, 515,-32768,
+-32768, 516, 520, 1775, 535, 503, 504, 1725, 168, 578,
+-32768, 547, 517,-32768, 519, 1643,-32768, 590, 945, 51,
+-32768,-32768,-32768,-32768,-32768,-32768, 1775, 570, 533, 1206,
+-32768,-32768, 1775,-32768,-32768,-32768, 1775, 550,-32768, 1775,
+ 1775, 1477,-32768,-32768,-32768,-32768, 537, 1775, 538,-32768,
+ 553,-32768,-32768,-32768, 69,-32768, 96, 1026,-32768,-32768,
+-32768,-32768, 1775, 1206, 1916,-32768, 1206,-32768,-32768, 247,
+ 541, 1775, 602, 296, 543, 546, 1775,-32768,-32768, 559,
+-32768, 1775, 283,-32768, 41, 306,-32768, 169,-32768,-32768,
+ 1583, 554,-32768, 614, 1206,-32768,-32768,-32768, 575,-32768,
+-32768,-32768,-32768, 1965,-32768, 38,-32768, 219,-32768, 219,
+-32768,-32768,-32768, 580,-32768,-32768,-32768,-32768, 1775,-32768,
+-32768, 650, 582,-32768,-32768,-32768,-32768, 1206,-32768,-32768,
+ 583,-32768, 604, 128, 584,-32768,-32768,-32768,-32768,-32768,
+ 1775, 650, 591, 650, 1775, 596, 143,-32768,-32768, 597,
+-32768, 311, 600,-32768, 193, 253,-32768,-32768, 601, 311,
+-32768,-32768, 193, 668, 675,-32768
+};
+
+static const short yypgoto[] = {-32768,
+-32768,-32768,-32768, 677,-32768,-32768,-32768,-32768,-32768,-32768,
+-32768,-32768, -7,-32768, -34,-32768, -157, 430,-32768, -33,
+-32768, 130, 183,-32768, -177, -122, 489,-32768,-32768, 290,
+-32768, -4,-32768, 10, 638, 16, 639, 555, -3, -129,
+ -342, -40, -94, -63,-32768,-32768,-32768, -195,-32768, 495,
+-32768, 275,-32768, 310,-32768, -366,-32768, -410,-32768,-32768,
+-32768,-32768,-32768,-32768,-32768,-32768,-32768,-32768, -37, -64,
+ 372, -13, -27,-32768,-32768,-32768,-32768,-32768, 523, 9,
+-32768,-32768, 521, 389, 622, 529, -28, -65, 694, -79,
+ -147, 354,-32768, -178,-32768,-32768,-32768, 394, -271, -114,
+-32768,-32768,-32768,-32768, -50, -281, -448, -347,-32768, 199,
+-32768,-32768,-32768,-32768,-32768,-32768,-32768,-32768,-32768,-32768,
+ 203,-32768, -461, 153,-32768, 152,-32768, 499,-32768, -222,
+-32768,-32768,-32768, 425, -180,-32768,-32768,-32768,-32768
+};
+
+
+#define YYLAST 2086
+
+
+static const short yytable[] = { 76,
+ 84, 29, 31, 33, 41, 104, 107, 264, 252, 206,
+ 290, 19, 19, 279, 43, 47, 48, 20, 20, 352,
+ 200, 114, 179, 255, 90, 227, 291, 415, 183, 134,
+ 135, 86, 491, 108, 317, 137, 201, 139, 535, 168,
+ 55, 142, 116, 141, -94, 262, 195, 436, 338, 86,
+ 533, 522, 94, 280, 452, 36, 99, 136, 26, 27,
+ -1, 189, 34, 95, 96, 26, 27, 94, 86, 26,
+ 27, 36, 99, 489, 384, 181, 582, 386, -2, 529,
+ 182, 114, 289, 436, 178, 95, 96, 90, 294, 206,
+ 254, 388, 207, 208, 139, 35, 139, 42, 36, 223,
+ 221, 571, 222, 100, 36, 324, 315, 322, 180, 287,
+ 523, 116, 101, 553, 583, 437, 556, 591, 216, 100,
+ 218, 178, 51, -188, 586, 86, -94, 86, 101, 590,
+ 226, 592, 253, 262, 219, 286, 220, 259, 28, 288,
+ 599, 414, 195, 610, 576, 30, 37, 114, 54, 32,
+ 36, 324, 325, 457, 195, 38, 249, 250, 56, 336,
+ 195, 326, 172, 36, 337, 301, 602, 301, 196, 621,
+ 26, 27, 543, 293, 143, 396, 424, 598, 255, 190,
+ 411, 612, -97, -97, -97, -97, 181, 120, -97, 127,
+ -97, -97, -97, 425, 311, 94, 36, 99, 325, 144,
+ 180, 168, 329, 332, 603, 116, -97, 326, 172, 36,
+ 169, 330, 315, 180, 429, 315, 434, 176, 508, 613,
+ 326, 172, 170, 368, 185, 418, 321, 453, 95, 96,
+ 419, 171, 172, 91, 196, 114, 190, 92, 551, -288,
+ -288, -288, -288, 213, 100, 360, 212, -288, -288, -288,
+ 363, -190, 434, 101, 328, 331, 224, 330, 496, 217,
+ 36, -97, 451, -288, 180, 48, 326, 172, 202, 24,
+ 25, 180, 203, 230, 231, 232, 225, 235, 236, 237,
+ 238, 239, 240, 241, 242, 243, 244, 245, 246, 247,
+ 248, 342, 228, 121, 260, 261, 5, 343, 7, 83,
+ 301, 226, 492, 397, 9, 10, 11, 493, 37, 209,
+ 229, 316, 90, 92, 389, 170, 494, 38, -93, 59,
+ 13, 144, 477, 557, 171, 172, 334, 413, 144, 619,
+ 335, 416, 26, 27, 620, 530, 376, 370, 372, 48,
+ 94, 295, 477, 397, 435, 390, 181, 276, 426, 181,
+ 162, 163, 427, 164, 165, 166, 167, 446, 417, 447,
+ 567, 203, 204, 92, 203, -18, -18, -18, -18, 277,
+ 122, -228, 561, -18, -18, -18, 5, 6, 7, 8,
+ 435, 283, 365, 569, 9, 10, 11, 92, 94, -18,
+ 207, 208, -142, 115, 438, 26, 27, 297, -142, 284,
+ 13, 4, 473, -110, 5, 6, 7, 8, 110, 111,
+ 112, 376, 9, 10, 11, 9, 10, 11, 303, 455,
+ 95, 96, 473, 304, 111, 112, 305, 12, 13, 472,
+ 9, 10, 11, 507, 615, 306, 477, -234, -234, -142,
+ 309, 358, 623, -142, -18, 307, 475, 260, 261, 472,
+ 206, -110, 476, 584, 312, 585, 159, 160, 161, 313,
+ -110, 509, 531, 314, 361, 362, 536, 277, 428, 318,
+ 515, 517, 48, 540, 382, 323, 546, 545, 14, 155,
+ 156, 157, 158, 159, 160, 161, 406, 333, 552, 130,
+ 132, 5, 44, 7, 45, 344, 347, 559, 348, 9,
+ 10, 11, 350, 548, 351, 162, 163, 566, 164, 165,
+ 166, 167, 353, 367, 382, 13, 473, 423, 152, 153,
+ 154, 155, 156, 157, 158, 159, 160, 161, 138, 359,
+ 57, 5, 439, 7, 83, 58, 59, 420, 60, 9,
+ 10, 11, 369, 472, 157, 158, 159, 160, 161, -268,
+ 406, -30, 441, 454, 442, 13, 61, 445, 443, 62,
+ 448, 63, 64, 456, 495, 46, 606, 486, 65, 406,
+ -31, 66, 406, 503, 497, 500, 67, 68, 69, 501,
+ 504, 505, 70, 71, 510, 512, 265, 72, 266, 5,
+ 6, 7, 8, 502, 513, 267, 514, 9, 10, 11,
+ 26, 27, 373, 374, 375, 518, 526, 73, 74, 532,
+ -83, 528, 542, 13, 539, 541, 525, 558, 560, 406,
+ 562, 190, 563, -106, -106, -106, -106, -106, -106, -106,
+ 574, -106, -106, -106, -106, -106, 565, -106, -106, -106,
+ -106, -106, -106, -106, -106, -106, -106, -106, -106, -106,
+ 575, 577, -106, 406, -106, -106, 406, 587, 593, 597,
+ 600, -106, -348, 601, -106, 604, 564, 625, 608, -106,
+ -106, -106, 611, 614, 626, -106, -106, 617, 622, 23,
+ -106, 281, 534, 430, 406, 109, 36, 5, 44, 7,
+ 45, 123, 285, 449, 422, 9, 10, 11, -106, -106,
+ -106, -106, 392, -106, -288, -288, -288, -288, -288, -288,
+ -288, 13, -288, -288, -288, -288, -288, 406, -288, -288,
+ -288, -288, -288, -288, -288, -288, -288, -288, -288, -288,
+ -288, 410, 263, -288, 330, -288, -288, 298, 302, 387,
+ 215, 296, -288, 326, 172, -288, 125, 431, 399, 572,
+ -288, -288, -288, 573, 607, 609, -288, -288, 319, 366,
+ 0, -288, 93, 0, 0, -26, -26, -26, -26, 0,
+ 0, 0, 0, -26, -26, -26, 0, 0, 0, -288,
+ 0, -288, -288, 190, -288, -288, -288, 0, 94, -26,
+ -288, -288, -142, -288, 0, 0, 0, -288, -142, -288,
+ -288, -288, -288, -288, -288, -288, -288, -288, -288, -288,
+ 0, -288, 0, 0, -288, 0, -288, -288, 0, 0,
+ 0, 95, 96, -288, 0, 0, -288, 0, 0, 0,
+ 0, -288, -288, -288, 0, 0, 0, -288, -288, -142,
+ 0, 0, -288, -142, -26, 0, 265, 0, 0, 5,
+ 6, 7, 8, 0, 0, 267, 0, 9, 10, 11,
+ -288, 433, -288, -288, 190, -288, -288, -288, 0, 0,
+ 0, -288, -288, 13, -288, 0, 0, 0, -288, 0,
+ -288, -288, -288, -288, -288, -288, -288, -288, -288, -288,
+ -288, 0, -288, 0, 0, -288, 0, -288, -288, 0,
+ 0, 0, 0, 0, -288, 0, 0, -288, 0, 0,
+ 0, 0, -288, -288, -288, 0, 0, 0, -288, -288,
+ 0, 0, -348, -288, 210, 0, 0, -22, -22, -22,
+ -22, 0, 0, 0, 0, -22, -22, -22, 0, 0,
+ 0, -288, -269, -288, -288, 519, -288, -288, -288, 0,
+ 94, -22, -288, -288, -142, -288, 0, 0, 0, -288,
+ -142, -288, -288, -288, -288, -288, -288, -288, -288, -288,
+ -288, -288, 0, -288, 0, 0, -288, 0, -288, -288,
+ 0, 0, 0, 95, 96, -288, 0, 0, -288, 0,
+ 0, 0, 0, -288, -288, -288, 0, 0, 0, -288,
+ -288, -142, 0, 0, -288, -142, -22, 0, 5, 0,
+ 7, 177, 0, 0, 0, 0, 9, 10, 11, 0,
+ 0, 0, -288, 0, -288, -288, 549, -288, -298, -298,
+ 0, 0, 13, -298, -298, 0, -298, 0, 0, 0,
+ -298, 0, -298, -298, -298, -298, -298, -298, -298, -298,
+ -298, -298, -298, 5, -298, 7, 177, -298, 0, -298,
+ -298, 9, 10, 11, 0, 0, -298, 0, 0, -298,
+ 0, 0, 0, 0, -298, -298, -298, 13, 0, 0,
+ -298, -298, -236, -236, 400, -298, 401, 27, 0, 0,
+ 0, 58, 59, 0, 60, 0, 0, 0, 0, 0,
+ 170, 0, 0, -298, 0, -298, -298, 0, -298, 171,
+ 172, 0, 61, 0, 0, 62, 0, 63, 64, 0,
+ 0, 0, 0, 0, 65, 0, 0, 66, 0, 0,
+ 0, 0, 67, 68, 69, 0, 0, 0, 70, 71,
+ 0, 0, 402, 72, 403, 400, 0, 401, 27, 0,
+ 0, 0, 58, 59, 0, 60, 0, 0, 0, 0,
+ 0, 0, -170, 73, 74, 0, 404, 0, 0, 0,
+ 0, 0, 0, 61, 0, 0, 62, 0, 63, 64,
+ 0, 0, 0, 0, 0, 65, 0, 0, 66, 0,
+ 0, 0, 0, 67, 68, 69, 0, 0, 0, 70,
+ 71, 0, 0, 402, 72, 403, 400, 0, 401, 27,
+ 0, 0, 0, 58, 59, 0, 60, 0, 0, 0,
+ 0, 0, 0, -225, 73, 74, 0, 404, 0, 0,
+ 0, 0, 0, 0, 61, 0, 0, 62, 0, 63,
+ 64, 0, 0, 0, 0, 0, 65, 0, 0, 66,
+ 0, 0, 0, 0, 67, 68, 69, 0, 0, 0,
+ 70, 71, 0, 0, 402, 72, 403, 380, 0, 57,
+ 0, 0, 0, 0, 58, 59, 0, 60, 0, 0,
+ 0, 5, 44, 7, 45, 73, 74, 0, 404, 9,
+ 10, 11, 0, 0, 0, 61, 0, 0, 62, 0,
+ 63, 64, 0, 0, 0, 13, 0, 65, 0, 0,
+ 66, 0, 0, 0, 0, 67, 68, 69, 0, 57,
+ 0, 70, 71, 0, 58, 59, 72, 60, 0, 0,
+ 0, 0, 57, 0, 0, 0, 0, 58, 59, 0,
+ 60, 0, 0, 0, 0, 61, 73, 74, 62, 381,
+ 63, 64, 0, 0, 0, 371, 0, 65, 61, 0,
+ 66, 62, 0, 63, 64, 67, 68, 69, 0, 0,
+ 65, 70, 71, 66, 0, 0, 72, 0, 67, 68,
+ 69, 0, 0, 0, 70, 71, 57, 0, 0, 72,
+ 0, 58, 59, 0, 60, 0, 73, 74, 0, 57,
+ 188, 0, 0, 0, 58, 59, 0, 60, 0, 73,
+ 74, 0, 61, 258, 0, 62, 0, 63, 64, 0,
+ 0, 0, 0, 0, 65, 61, 0, 66, 62, 0,
+ 63, 64, 67, 68, 69, 0, 0, 65, 70, 71,
+ 66, 0, 0, 72, 0, 67, 68, 69, 0, 0,
+ 0, 70, 71, 57, 0, 0, 72, 0, 58, 59,
+ 0, 60, 0, 73, 74, 0, 0, 292, 154, 155,
+ 156, 157, 158, 159, 160, 161, 73, 74, 0, 61,
+ 320, 0, 62, 0, 63, 64, 537, 5, 6, 7,
+ 8, 65, 0, 0, 66, 9, 10, 11, 0, 67,
+ 68, 69, 0, 0, 0, 70, 71, 0, 0, 0,
+ 72, 13, 145, 146, 147, 538, 148, 149, 150, 151,
+ 152, 153, 154, 155, 156, 157, 158, 159, 160, 161,
+ 73, 74, 0, 0, 412, 401, 458, 6, 7, 8,
+ 58, 59, 0, 60, 9, 10, 11, 459, 0, 460,
+ 461, 462, 463, 464, 465, 466, 467, 468, 469, 470,
+ 13, 61, 0, 0, 62, 0, 63, 64, 0, 0,
+ 0, 0, 0, 65, 0, 0, 66, 0, 0, 0,
+ 0, 67, 68, 69, 0, 401, 27, 70, 71, 0,
+ 58, 59, 72, 60, 0, 0, 0, 459, 0, 460,
+ 461, 462, 463, 464, 465, 466, 467, 468, 469, 470,
+ 471, 61, 73, 74, 62, 226, 63, 64, 0, 0,
+ 0, 0, 0, 65, 0, 0, 66, 0, 0, 0,
+ 0, 67, 68, 69, 0, 57, 0, 70, 71, 0,
+ 58, 59, 72, 60, 0, 0, 5, 44, 7, 45,
+ 0, 0, 0, 0, 9, 10, 11, 0, 0, 0,
+ 471, 61, 73, 74, 62, 226, 63, 64, 0, 0,
+ 13, 0, 0, 65, 0, 0, 66, 0, 0, 0,
+ 0, 67, 68, 69, 0, 0, 0, 70, 71, 0,
+ 0, 0, 72, 57, 5, 0, 7, 83, 58, 59,
+ 0, 60, 9, 10, 11, 0, 0, 0, 0, 0,
+ 0, 0, 73, 74, 0, 310, 0, 0, 13, 61,
+ 516, 0, 62, 0, 63, 64, 0, 57, 0, 0,
+ 0, 65, 58, 59, 66, 60, 0, 0, 0, 67,
+ 68, 69, 0, 0, 0, 70, 71, 0, 0, 0,
+ 72, 0, 0, 61, 0, 0, 62, 0, 63, 64,
+ 0, 0, 0, 0, 0, 65, 0, 0, 66, 0,
+ 73, 74, 0, 67, 68, 69, 0, 57, 0, 70,
+ 71, 0, 58, 59, 72, 60, 0, 0, 0, 0,
+ 57, 0, 0, 0, 0, 58, 59, 0, 60, 0,
+ 0, 0, 506, 61, 73, 74, 62, 0, 63, 64,
+ 0, 0, 0, 0, 0, 65, 61, 0, 66, 62,
+ 0, 63, 64, 67, 68, 69, 0, 0, 65, 70,
+ 71, 66, 0, 0, 72, 0, 67, 68, 69, 0,
+ 57, 0, 70, 71, 0, 58, 59, 129, 60, 0,
+ 0, 0, 0, 450, 73, 74, 0, 0, 58, 59,
+ 0, 60, 0, 0, 0, 0, 61, 73, 74, 62,
+ 0, 63, 64, 0, 0, 0, 0, 0, 65, 61,
+ 0, 66, 62, 0, 63, 64, 67, 68, 69, 0,
+ 0, 65, 70, 71, 66, 0, 0, 131, 0, 67,
+ 68, 69, 0, 0, 0, 70, 71, 487, 0, 0,
+ 72, 0, 0, 0, 0, 0, 0, 73, 74, 151,
+ 152, 153, 154, 155, 156, 157, 158, 159, 160, 161,
+ 73, 74, 0, 145, 146, 147, 0, 148, 149, 150,
+ 151, 152, 153, 154, 155, 156, 157, 158, 159, 160,
+ 161, 145, 146, 147, 0, 148, 149, 150, 151, 152,
+ 153, 154, 155, 156, 157, 158, 159, 160, 161, 0,
+ 0, 0, 0, 196, 0, 0, 0, 0, 145, 146,
+ 147, 488, 148, 149, 150, 151, 152, 153, 154, 155,
+ 156, 157, 158, 159, 160, 161, 0, 0, 0, 554,
+ 145, 146, 147, 581, 148, 149, 150, 151, 152, 153,
+ 154, 155, 156, 157, 158, 159, 160, 161, 145, 146,
+ 147, 0, 148, 149, 150, 151, 152, 153, 154, 155,
+ 156, 157, 158, 159, 160, 161, 147, 0, 148, 149,
+ 150, 151, 152, 153, 154, 155, 156, 157, 158, 159,
+ 160, 161, 149, 150, 151, 152, 153, 154, 155, 156,
+ 157, 158, 159, 160, 161, 150, 151, 152, 153, 154,
+ 155, 156, 157, 158, 159, 160, 161, 153, 154, 155,
+ 156, 157, 158, 159, 160, 161
+};
+
+static const short yycheck[] = { 34,
+ 35, 9, 10, 11, 18, 43, 47, 185, 166, 104,
+ 206, 2, 3, 192, 19, 20, 20, 2, 3, 301,
+ 100, 49, 88, 171, 38, 140, 207, 370, 92, 63,
+ 64, 35, 443, 47, 257, 69, 101, 72, 500, 9,
+ 32, 75, 50, 72, 1, 175, 97, 395, 1, 53,
+ 499, 1, 27, 10, 421, 3, 4, 65, 3, 4,
+ 0, 96, 60, 60, 61, 3, 4, 27, 72, 3,
+ 4, 3, 4, 440, 346, 89, 39, 349, 0, 490,
+ 77, 109, 205, 431, 88, 60, 61, 101, 211, 184,
+ 170, 39, 60, 61, 129, 60, 131, 78, 3, 133,
+ 129, 550, 131, 51, 3, 4, 254, 77, 7, 77,
+ 60, 119, 60, 524, 77, 397, 527, 579, 123, 51,
+ 125, 125, 83, 83, 573, 129, 83, 131, 60, 578,
+ 83, 580, 167, 263, 126, 200, 128, 172, 83, 203,
+ 589, 364, 193, 605, 555, 83, 51, 175, 83, 83,
+ 3, 4, 51, 435, 205, 60, 164, 165, 83, 77,
+ 211, 60, 61, 3, 82, 216, 39, 218, 31, 618,
+ 3, 4, 515, 208, 77, 354, 39, 588, 326, 1,
+ 361, 39, 4, 5, 6, 7, 200, 79, 10, 79,
+ 12, 13, 14, 389, 228, 27, 3, 4, 51, 82,
+ 7, 9, 268, 269, 77, 213, 28, 60, 61, 3,
+ 77, 51, 360, 7, 393, 363, 395, 77, 51, 77,
+ 60, 61, 51, 338, 60, 77, 261, 423, 60, 61,
+ 82, 60, 61, 78, 31, 263, 1, 82, 520, 4,
+ 5, 6, 7, 82, 51, 325, 37, 12, 13, 14,
+ 330, 83, 431, 60, 268, 269, 77, 51, 454, 78,
+ 3, 83, 420, 28, 7, 269, 60, 61, 78, 78,
+ 79, 7, 82, 144, 145, 146, 77, 148, 149, 150,
+ 151, 152, 153, 154, 155, 156, 157, 158, 159, 160,
+ 161, 282, 77, 1, 60, 61, 4, 282, 6, 7,
+ 351, 83, 77, 354, 12, 13, 14, 82, 51, 78,
+ 78, 77, 326, 82, 352, 51, 77, 60, 83, 9,
+ 28, 82, 437, 77, 60, 61, 78, 362, 82, 77,
+ 82, 372, 3, 4, 82, 493, 344, 342, 343, 343,
+ 27, 212, 457, 394, 395, 353, 360, 84, 78, 363,
+ 55, 56, 82, 58, 59, 60, 61, 78, 372, 78,
+ 78, 82, 1, 82, 82, 4, 5, 6, 7, 78,
+ 78, 79, 77, 12, 13, 14, 4, 5, 6, 7,
+ 431, 60, 10, 78, 12, 13, 14, 82, 27, 28,
+ 60, 61, 31, 1, 402, 3, 4, 79, 37, 37,
+ 28, 1, 437, 3, 4, 5, 6, 7, 5, 6,
+ 7, 419, 12, 13, 14, 12, 13, 14, 79, 427,
+ 60, 61, 457, 79, 6, 7, 77, 27, 28, 437,
+ 12, 13, 14, 468, 612, 77, 551, 78, 79, 78,
+ 77, 312, 620, 82, 83, 79, 437, 60, 61, 457,
+ 545, 51, 437, 568, 39, 570, 51, 52, 53, 77,
+ 60, 469, 497, 84, 60, 61, 501, 78, 79, 84,
+ 475, 476, 476, 508, 345, 77, 517, 515, 78, 47,
+ 48, 49, 50, 51, 52, 53, 357, 77, 523, 60,
+ 61, 4, 5, 6, 7, 60, 37, 532, 84, 12,
+ 13, 14, 79, 517, 82, 55, 56, 542, 58, 59,
+ 60, 61, 33, 3, 385, 28, 551, 388, 44, 45,
+ 46, 47, 48, 49, 50, 51, 52, 53, 1, 84,
+ 3, 4, 403, 6, 7, 8, 9, 60, 11, 12,
+ 13, 14, 78, 551, 49, 50, 51, 52, 53, 79,
+ 421, 39, 39, 424, 79, 28, 29, 84, 82, 32,
+ 77, 34, 35, 79, 79, 78, 601, 37, 41, 440,
+ 39, 44, 443, 39, 60, 60, 49, 50, 51, 60,
+ 78, 78, 55, 56, 7, 39, 1, 60, 3, 4,
+ 5, 6, 7, 464, 78, 10, 78, 12, 13, 14,
+ 3, 4, 5, 6, 7, 16, 37, 80, 81, 60,
+ 83, 79, 60, 28, 78, 78, 487, 77, 17, 490,
+ 78, 1, 77, 3, 4, 5, 6, 7, 8, 9,
+ 77, 11, 12, 13, 14, 15, 78, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+ 37, 77, 32, 524, 34, 35, 527, 78, 9, 78,
+ 78, 41, 77, 60, 44, 82, 537, 0, 78, 49,
+ 50, 51, 77, 77, 0, 55, 56, 78, 78, 3,
+ 60, 193, 500, 394, 555, 48, 3, 4, 5, 6,
+ 7, 53, 198, 419, 385, 12, 13, 14, 78, 79,
+ 80, 81, 1, 83, 3, 4, 5, 6, 7, 8,
+ 9, 28, 11, 12, 13, 14, 15, 588, 17, 18,
+ 19, 20, 21, 22, 23, 24, 25, 26, 27, 28,
+ 29, 360, 178, 32, 51, 34, 35, 215, 218, 351,
+ 119, 213, 41, 60, 61, 44, 53, 394, 355, 551,
+ 49, 50, 51, 551, 602, 604, 55, 56, 260, 335,
+ -1, 60, 1, -1, -1, 4, 5, 6, 7, -1,
+ -1, -1, -1, 12, 13, 14, -1, -1, -1, 78,
+ -1, 80, 81, 1, 83, 3, 4, -1, 27, 28,
+ 8, 9, 31, 11, -1, -1, -1, 15, 37, 17,
+ 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
+ -1, 29, -1, -1, 32, -1, 34, 35, -1, -1,
+ -1, 60, 61, 41, -1, -1, 44, -1, -1, -1,
+ -1, 49, 50, 51, -1, -1, -1, 55, 56, 78,
+ -1, -1, 60, 82, 83, -1, 1, -1, -1, 4,
+ 5, 6, 7, -1, -1, 10, -1, 12, 13, 14,
+ 78, 79, 80, 81, 1, 83, 3, 4, -1, -1,
+ -1, 8, 9, 28, 11, -1, -1, -1, 15, -1,
+ 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
+ 27, -1, 29, -1, -1, 32, -1, 34, 35, -1,
+ -1, -1, -1, -1, 41, -1, -1, 44, -1, -1,
+ -1, -1, 49, 50, 51, -1, -1, -1, 55, 56,
+ -1, -1, 77, 60, 1, -1, -1, 4, 5, 6,
+ 7, -1, -1, -1, -1, 12, 13, 14, -1, -1,
+ -1, 78, 79, 80, 81, 1, 83, 3, 4, -1,
+ 27, 28, 8, 9, 31, 11, -1, -1, -1, 15,
+ 37, 17, 18, 19, 20, 21, 22, 23, 24, 25,
+ 26, 27, -1, 29, -1, -1, 32, -1, 34, 35,
+ -1, -1, -1, 60, 61, 41, -1, -1, 44, -1,
+ -1, -1, -1, 49, 50, 51, -1, -1, -1, 55,
+ 56, 78, -1, -1, 60, 82, 83, -1, 4, -1,
+ 6, 7, -1, -1, -1, -1, 12, 13, 14, -1,
+ -1, -1, 78, -1, 80, 81, 1, 83, 3, 4,
+ -1, -1, 28, 8, 9, -1, 11, -1, -1, -1,
+ 15, -1, 17, 18, 19, 20, 21, 22, 23, 24,
+ 25, 26, 27, 4, 29, 6, 7, 32, -1, 34,
+ 35, 12, 13, 14, -1, -1, 41, -1, -1, 44,
+ -1, -1, -1, -1, 49, 50, 51, 28, -1, -1,
+ 55, 56, 78, 79, 1, 60, 3, 4, -1, -1,
+ -1, 8, 9, -1, 11, -1, -1, -1, -1, -1,
+ 51, -1, -1, 78, -1, 80, 81, -1, 83, 60,
+ 61, -1, 29, -1, -1, 32, -1, 34, 35, -1,
+ -1, -1, -1, -1, 41, -1, -1, 44, -1, -1,
+ -1, -1, 49, 50, 51, -1, -1, -1, 55, 56,
+ -1, -1, 59, 60, 61, 1, -1, 3, 4, -1,
+ -1, -1, 8, 9, -1, 11, -1, -1, -1, -1,
+ -1, -1, 79, 80, 81, -1, 83, -1, -1, -1,
+ -1, -1, -1, 29, -1, -1, 32, -1, 34, 35,
+ -1, -1, -1, -1, -1, 41, -1, -1, 44, -1,
+ -1, -1, -1, 49, 50, 51, -1, -1, -1, 55,
+ 56, -1, -1, 59, 60, 61, 1, -1, 3, 4,
+ -1, -1, -1, 8, 9, -1, 11, -1, -1, -1,
+ -1, -1, -1, 79, 80, 81, -1, 83, -1, -1,
+ -1, -1, -1, -1, 29, -1, -1, 32, -1, 34,
+ 35, -1, -1, -1, -1, -1, 41, -1, -1, 44,
+ -1, -1, -1, -1, 49, 50, 51, -1, -1, -1,
+ 55, 56, -1, -1, 59, 60, 61, 1, -1, 3,
+ -1, -1, -1, -1, 8, 9, -1, 11, -1, -1,
+ -1, 4, 5, 6, 7, 80, 81, -1, 83, 12,
+ 13, 14, -1, -1, -1, 29, -1, -1, 32, -1,
+ 34, 35, -1, -1, -1, 28, -1, 41, -1, -1,
+ 44, -1, -1, -1, -1, 49, 50, 51, -1, 3,
+ -1, 55, 56, -1, 8, 9, 60, 11, -1, -1,
+ -1, -1, 3, -1, -1, -1, -1, 8, 9, -1,
+ 11, -1, -1, -1, -1, 29, 80, 81, 32, 83,
+ 34, 35, -1, -1, -1, 78, -1, 41, 29, -1,
+ 44, 32, -1, 34, 35, 49, 50, 51, -1, -1,
+ 41, 55, 56, 44, -1, -1, 60, -1, 49, 50,
+ 51, -1, -1, -1, 55, 56, 3, -1, -1, 60,
+ -1, 8, 9, -1, 11, -1, 80, 81, -1, 3,
+ 84, -1, -1, -1, 8, 9, -1, 11, -1, 80,
+ 81, -1, 29, 84, -1, 32, -1, 34, 35, -1,
+ -1, -1, -1, -1, 41, 29, -1, 44, 32, -1,
+ 34, 35, 49, 50, 51, -1, -1, 41, 55, 56,
+ 44, -1, -1, 60, -1, 49, 50, 51, -1, -1,
+ -1, 55, 56, 3, -1, -1, 60, -1, 8, 9,
+ -1, 11, -1, 80, 81, -1, -1, 84, 46, 47,
+ 48, 49, 50, 51, 52, 53, 80, 81, -1, 29,
+ 84, -1, 32, -1, 34, 35, 10, 4, 5, 6,
+ 7, 41, -1, -1, 44, 12, 13, 14, -1, 49,
+ 50, 51, -1, -1, -1, 55, 56, -1, -1, -1,
+ 60, 28, 36, 37, 38, 39, 40, 41, 42, 43,
+ 44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
+ 80, 81, -1, -1, 84, 3, 4, 5, 6, 7,
+ 8, 9, -1, 11, 12, 13, 14, 15, -1, 17,
+ 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
+ 28, 29, -1, -1, 32, -1, 34, 35, -1, -1,
+ -1, -1, -1, 41, -1, -1, 44, -1, -1, -1,
+ -1, 49, 50, 51, -1, 3, 4, 55, 56, -1,
+ 8, 9, 60, 11, -1, -1, -1, 15, -1, 17,
+ 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
+ 78, 29, 80, 81, 32, 83, 34, 35, -1, -1,
+ -1, -1, -1, 41, -1, -1, 44, -1, -1, -1,
+ -1, 49, 50, 51, -1, 3, -1, 55, 56, -1,
+ 8, 9, 60, 11, -1, -1, 4, 5, 6, 7,
+ -1, -1, -1, -1, 12, 13, 14, -1, -1, -1,
+ 78, 29, 80, 81, 32, 83, 34, 35, -1, -1,
+ 28, -1, -1, 41, -1, -1, 44, -1, -1, -1,
+ -1, 49, 50, 51, -1, -1, -1, 55, 56, -1,
+ -1, -1, 60, 3, 4, -1, 6, 7, 8, 9,
+ -1, 11, 12, 13, 14, -1, -1, -1, -1, -1,
+ -1, -1, 80, 81, -1, 83, -1, -1, 28, 29,
+ 78, -1, 32, -1, 34, 35, -1, 3, -1, -1,
+ -1, 41, 8, 9, 44, 11, -1, -1, -1, 49,
+ 50, 51, -1, -1, -1, 55, 56, -1, -1, -1,
+ 60, -1, -1, 29, -1, -1, 32, -1, 34, 35,
+ -1, -1, -1, -1, -1, 41, -1, -1, 44, -1,
+ 80, 81, -1, 49, 50, 51, -1, 3, -1, 55,
+ 56, -1, 8, 9, 60, 11, -1, -1, -1, -1,
+ 3, -1, -1, -1, -1, 8, 9, -1, 11, -1,
+ -1, -1, 78, 29, 80, 81, 32, -1, 34, 35,
+ -1, -1, -1, -1, -1, 41, 29, -1, 44, 32,
+ -1, 34, 35, 49, 50, 51, -1, -1, 41, 55,
+ 56, 44, -1, -1, 60, -1, 49, 50, 51, -1,
+ 3, -1, 55, 56, -1, 8, 9, 60, 11, -1,
+ -1, -1, -1, 3, 80, 81, -1, -1, 8, 9,
+ -1, 11, -1, -1, -1, -1, 29, 80, 81, 32,
+ -1, 34, 35, -1, -1, -1, -1, -1, 41, 29,
+ -1, 44, 32, -1, 34, 35, 49, 50, 51, -1,
+ -1, 41, 55, 56, 44, -1, -1, 60, -1, 49,
+ 50, 51, -1, -1, -1, 55, 56, 10, -1, -1,
+ 60, -1, -1, -1, -1, -1, -1, 80, 81, 43,
+ 44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
+ 80, 81, -1, 36, 37, 38, -1, 40, 41, 42,
+ 43, 44, 45, 46, 47, 48, 49, 50, 51, 52,
+ 53, 36, 37, 38, -1, 40, 41, 42, 43, 44,
+ 45, 46, 47, 48, 49, 50, 51, 52, 53, -1,
+ -1, -1, -1, 31, -1, -1, -1, -1, 36, 37,
+ 38, 84, 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, -1, -1, -1, 84,
+ 36, 37, 38, 39, 40, 41, 42, 43, 44, 45,
+ 46, 47, 48, 49, 50, 51, 52, 53, 36, 37,
+ 38, -1, 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 38, -1, 40, 41,
+ 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
+ 52, 53, 41, 42, 43, 44, 45, 46, 47, 48,
+ 49, 50, 51, 52, 53, 42, 43, 44, 45, 46,
+ 47, 48, 49, 50, 51, 52, 53, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53
+};
+/* -*-C-*- Note some compilers choke on comments on `#line' lines. */
+#line 3 "/usr/local/lib/bison.simple"
+
+/* Skeleton output parser for bison,
+ Copyright (C) 1984, 1989, 1990 Bob Corbett and Richard Stallman
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 1, or (at your option)
+ any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+#ifndef alloca
+#ifdef __GNUC__
+#define alloca __builtin_alloca
+#else /* not GNU C. */
+#if (!defined (__STDC__) && defined (sparc)) || defined (__sparc__) || defined (__sparc) || defined (__sgi)
+#include <alloca.h>
+#else /* not sparc */
+#if defined (MSDOS) && !defined (__TURBOC__)
+#include <malloc.h>
+#else /* not MSDOS, or __TURBOC__ */
+#if defined(_AIX)
+#include <malloc.h>
+ #pragma alloca
+#else /* not MSDOS, __TURBOC__, or _AIX */
+#ifdef __hpux
+#ifdef __cplusplus
+extern "C" {
+void *alloca (unsigned int);
+};
+#else /* not __cplusplus */
+void *alloca ();
+#endif /* not __cplusplus */
+#endif /* __hpux */
+#endif /* not _AIX */
+#endif /* not MSDOS, or __TURBOC__ */
+#endif /* not sparc. */
+#endif /* not GNU C. */
+#endif /* alloca not defined. */
+
+/* This is the parser code that is written into each bison parser
+ when the %semantic_parser declaration is not specified in the grammar.
+ It was written by Richard Stallman by simplifying the hairy parser
+ used when %semantic_parser is specified. */
+
+/* Note: there must be only one dollar sign in this file.
+ It is replaced by the list of actions, each action
+ as one case of the switch. */
+
+#define yyerrok (yyerrstatus = 0)
+#define yyclearin (yychar = YYEMPTY)
+#define YYEMPTY -2
+#define YYEOF 0
+#define YYACCEPT return(0)
+#define YYABORT return(1)
+#define YYERROR goto yyerrlab1
+/* Like YYERROR except do call yyerror.
+ This remains here temporarily to ease the
+ transition to the new meaning of YYERROR, for GCC.
+ Once GCC version 2 has supplanted version 1, this can go. */
+#define YYFAIL goto yyerrlab
+#define YYRECOVERING() (!!yyerrstatus)
+#define YYBACKUP(token, value) \
+do \
+ if (yychar == YYEMPTY && yylen == 1) \
+ { yychar = (token), yylval = (value); \
+ yychar1 = YYTRANSLATE (yychar); \
+ YYPOPSTACK; \
+ goto yybackup; \
+ } \
+ else \
+ { yyerror ("syntax error: cannot back up"); YYERROR; } \
+while (0)
+
+#define YYTERROR 1
+#define YYERRCODE 256
+
+#ifndef YYPURE
+#define YYLEX yylex()
+#endif
+
+#ifdef YYPURE
+#ifdef YYLSP_NEEDED
+#define YYLEX yylex(&yylval, &yylloc)
+#else
+#define YYLEX yylex(&yylval)
+#endif
+#endif
+
+/* If nonreentrant, generate the variables here */
+
+#ifndef YYPURE
+
+int yychar; /* the lookahead symbol */
+YYSTYPE yylval; /* the semantic value of the */
+ /* lookahead symbol */
+
+#ifdef YYLSP_NEEDED
+YYLTYPE yylloc; /* location data for the lookahead */
+ /* symbol */
+#endif
+
+int yynerrs; /* number of parse errors so far */
+#endif /* not YYPURE */
+
+#if YYDEBUG != 0
+int yydebug; /* nonzero means print parse trace */
+/* Since this is uninitialized, it does not stop multiple parsers
+ from coexisting. */
+#endif
+
+/* YYINITDEPTH indicates the initial size of the parser's stacks */
+
+#ifndef YYINITDEPTH
+#define YYINITDEPTH 200
+#endif
+
+/* YYMAXDEPTH is the maximum size the stacks can grow to
+ (effective only if the built-in stack extension method is used). */
+
+#if YYMAXDEPTH == 0
+#undef YYMAXDEPTH
+#endif
+
+#ifndef YYMAXDEPTH
+#define YYMAXDEPTH 10000
+#endif
+
+/* Prevent warning if -Wstrict-prototypes. */
+#ifdef __GNUC__
+int yyparse (void);
+#endif
+
+#if __GNUC__ > 1 /* GNU C and GNU C++ define this. */
+#define __yy_bcopy(FROM,TO,COUNT) __builtin_memcpy(TO,FROM,COUNT)
+#else /* not GNU C or C++ */
+#ifndef __cplusplus
+
+/* This is the most reliable way to avoid incompatibilities
+ in available built-in functions on various systems. */
+static void
+__yy_bcopy (from, to, count)
+ char *from;
+ char *to;
+ int count;
+{
+ register char *f = from;
+ register char *t = to;
+ register int i = count;
+
+ while (i-- > 0)
+ *t++ = *f++;
+}
+
+#else /* __cplusplus */
+
+/* This is the most reliable way to avoid incompatibilities
+ in available built-in functions on various systems. */
+static void
+__yy_bcopy (char *from, char *to, int count)
+{
+ register char *f = from;
+ register char *t = to;
+ register int i = count;
+
+ while (i-- > 0)
+ *t++ = *f++;
+}
+
+#endif
+#endif
+
+#line 184 "/usr/local/lib/bison.simple"
+
+/* The user can define YYPARSE_PARAM as the name of an argument to be passed
+ into yyparse. The argument should have type void *.
+ It should actually point to an object.
+ Grammar actions can access the variable by casting it
+ to the proper pointer type. */
+
+#ifdef YYPARSE_PARAM
+#define YYPARSE_PARAM_DECL void *YYPARSE_PARAM;
+#else
+#define YYPARSE_PARAM
+#define YYPARSE_PARAM_DECL
+#endif
+
+int
+yyparse(YYPARSE_PARAM)
+ YYPARSE_PARAM_DECL
+{
+ register int yystate;
+ register int yyn;
+ register short *yyssp;
+ register YYSTYPE *yyvsp;
+ int yyerrstatus; /* number of tokens to shift before error messages enabled */
+ int yychar1 = 0; /* lookahead token as an internal (translated) token number */
+
+ short yyssa[YYINITDEPTH]; /* the state stack */
+ YYSTYPE yyvsa[YYINITDEPTH]; /* the semantic value stack */
+
+ short *yyss = yyssa; /* refer to the stacks thru separate pointers */
+ YYSTYPE *yyvs = yyvsa; /* to allow yyoverflow to reallocate them elsewhere */
+
+#ifdef YYLSP_NEEDED
+ YYLTYPE yylsa[YYINITDEPTH]; /* the location stack */
+ YYLTYPE *yyls = yylsa;
+ YYLTYPE *yylsp;
+
+#define YYPOPSTACK (yyvsp--, yyssp--, yylsp--)
+#else
+#define YYPOPSTACK (yyvsp--, yyssp--)
+#endif
+
+ int yystacksize = YYINITDEPTH;
+
+#ifdef YYPURE
+ int yychar;
+ YYSTYPE yylval;
+ int yynerrs;
+#ifdef YYLSP_NEEDED
+ YYLTYPE yylloc;
+#endif
+#endif
+
+ YYSTYPE yyval; /* the variable used to return */
+ /* semantic values from the action */
+ /* routines */
+
+ int yylen;
+
+#if YYDEBUG != 0
+ if (yydebug)
+ fprintf(stderr, "Starting parse\n");
+#endif
+
+ yystate = 0;
+ yyerrstatus = 0;
+ yynerrs = 0;
+ yychar = YYEMPTY; /* Cause a token to be read. */
+
+ /* Initialize stack pointers.
+ Waste one element of value and location stack
+ so that they stay on the same level as the state stack.
+ The wasted elements are never initialized. */
+
+ yyssp = yyss - 1;
+ yyvsp = yyvs;
+#ifdef YYLSP_NEEDED
+ yylsp = yyls;
+#endif
+
+/* Push a new state, which is found in yystate . */
+/* In all cases, when you get here, the value and location stacks
+ have just been pushed. so pushing a state here evens the stacks. */
+yynewstate:
+
+ *++yyssp = yystate;
+
+ if (yyssp >= yyss + yystacksize - 1)
+ {
+ /* Give user a chance to reallocate the stack */
+ /* Use copies of these so that the &'s don't force the real ones into memory. */
+ YYSTYPE *yyvs1 = yyvs;
+ short *yyss1 = yyss;
+#ifdef YYLSP_NEEDED
+ YYLTYPE *yyls1 = yyls;
+#endif
+
+ /* Get the current used size of the three stacks, in elements. */
+ int size = yyssp - yyss + 1;
+
+#ifdef yyoverflow
+ /* Each stack pointer address is followed by the size of
+ the data in use in that stack, in bytes. */
+#ifdef YYLSP_NEEDED
+ /* This used to be a conditional around just the two extra args,
+ but that might be undefined if yyoverflow is a macro. */
+ yyoverflow("parser stack overflow",
+ &yyss1, size * sizeof (*yyssp),
+ &yyvs1, size * sizeof (*yyvsp),
+ &yyls1, size * sizeof (*yylsp),
+ &yystacksize);
+#else
+ yyoverflow("parser stack overflow",
+ &yyss1, size * sizeof (*yyssp),
+ &yyvs1, size * sizeof (*yyvsp),
+ &yystacksize);
+#endif
+
+ yyss = yyss1; yyvs = yyvs1;
+#ifdef YYLSP_NEEDED
+ yyls = yyls1;
+#endif
+#else /* no yyoverflow */
+ /* Extend the stack our own way. */
+ if (yystacksize >= YYMAXDEPTH)
+ {
+ yyerror("parser stack overflow");
+ return 2;
+ }
+ yystacksize *= 2;
+ if (yystacksize > YYMAXDEPTH)
+ yystacksize = YYMAXDEPTH;
+ yyss = (short *) alloca (yystacksize * sizeof (*yyssp));
+ __yy_bcopy ((char *)yyss1, (char *)yyss, size * sizeof (*yyssp));
+ yyvs = (YYSTYPE *) alloca (yystacksize * sizeof (*yyvsp));
+ __yy_bcopy ((char *)yyvs1, (char *)yyvs, size * sizeof (*yyvsp));
+#ifdef YYLSP_NEEDED
+ yyls = (YYLTYPE *) alloca (yystacksize * sizeof (*yylsp));
+ __yy_bcopy ((char *)yyls1, (char *)yyls, size * sizeof (*yylsp));
+#endif
+#endif /* no yyoverflow */
+
+ yyssp = yyss + size - 1;
+ yyvsp = yyvs + size - 1;
+#ifdef YYLSP_NEEDED
+ yylsp = yyls + size - 1;
+#endif
+
+#if YYDEBUG != 0
+ if (yydebug)
+ fprintf(stderr, "Stack size increased to %d\n", yystacksize);
+#endif
+
+ if (yyssp >= yyss + yystacksize - 1)
+ YYABORT;
+ }
+
+#if YYDEBUG != 0
+ if (yydebug)
+ fprintf(stderr, "Entering state %d\n", yystate);
+#endif
+
+ goto yybackup;
+ yybackup:
+
+/* Do appropriate processing given the current state. */
+/* Read a lookahead token if we need one and don't already have one. */
+/* yyresume: */
+
+ /* First try to decide what to do without reference to lookahead token. */
+
+ yyn = yypact[yystate];
+ if (yyn == YYFLAG)
+ goto yydefault;
+
+ /* Not known => get a lookahead token if don't already have one. */
+
+ /* yychar is either YYEMPTY or YYEOF
+ or a valid token in external form. */
+
+ if (yychar == YYEMPTY)
+ {
+#if YYDEBUG != 0
+ if (yydebug)
+ fprintf(stderr, "Reading a token: ");
+#endif
+ yychar = YYLEX;
+ }
+
+ /* Convert token to internal form (in yychar1) for indexing tables with */
+
+ if (yychar <= 0) /* This means end of input. */
+ {
+ yychar1 = 0;
+ yychar = YYEOF; /* Don't call YYLEX any more */
+
+#if YYDEBUG != 0
+ if (yydebug)
+ fprintf(stderr, "Now at end of input.\n");
+#endif
+ }
+ else
+ {
+ yychar1 = YYTRANSLATE(yychar);
+
+#if YYDEBUG != 0
+ if (yydebug)
+ {
+ fprintf (stderr, "Next token is %d (%s", yychar, yytname[yychar1]);
+ /* Give the individual parser a way to print the precise meaning
+ of a token, for further debugging info. */
+#ifdef YYPRINT
+ YYPRINT (stderr, yychar, yylval);
+#endif
+ fprintf (stderr, ")\n");
+ }
+#endif
+ }
+
+ yyn += yychar1;
+ if (yyn < 0 || yyn > YYLAST || yycheck[yyn] != yychar1)
+ goto yydefault;
+
+ yyn = yytable[yyn];
+
+ /* yyn is what to do for this token type in this state.
+ Negative => reduce, -yyn is rule number.
+ Positive => shift, yyn is new state.
+ New state is final state => don't bother to shift,
+ just return success.
+ 0, or most negative number => error. */
+
+ if (yyn < 0)
+ {
+ if (yyn == YYFLAG)
+ goto yyerrlab;
+ yyn = -yyn;
+ goto yyreduce;
+ }
+ else if (yyn == 0)
+ goto yyerrlab;
+
+ if (yyn == YYFINAL)
+ YYACCEPT;
+
+ /* Shift the lookahead token. */
+
+#if YYDEBUG != 0
+ if (yydebug)
+ fprintf(stderr, "Shifting token %d (%s), ", yychar, yytname[yychar1]);
+#endif
+
+ /* Discard the token being shifted unless it is eof. */
+ if (yychar != YYEOF)
+ yychar = YYEMPTY;
+
+ *++yyvsp = yylval;
+#ifdef YYLSP_NEEDED
+ *++yylsp = yylloc;
+#endif
+
+ /* count tokens shifted since error; after three, turn off error status. */
+ if (yyerrstatus) yyerrstatus--;
+
+ yystate = yyn;
+ goto yynewstate;
+
+/* Do the default action for the current state. */
+yydefault:
+
+ yyn = yydefact[yystate];
+ if (yyn == 0)
+ goto yyerrlab;
+
+/* Do a reduction. yyn is the number of a rule to reduce with. */
+yyreduce:
+ yylen = yyr2[yyn];
+ if (yylen > 0)
+ yyval = yyvsp[1-yylen]; /* implement default value of the action */
+
+#if YYDEBUG != 0
+ if (yydebug)
+ {
+ int i;
+
+ fprintf (stderr, "Reducing via rule %d (line %d), ",
+ yyn, yyrline[yyn]);
+
+ /* Print the symbols being reduced, and their result. */
+ for (i = yyprhs[yyn]; yyrhs[i] > 0; i++)
+ fprintf (stderr, "%s ", yytname[yyrhs[i]]);
+ fprintf (stderr, " -> %s\n", yytname[yyr1[yyn]]);
+ }
+#endif
+
+
+ switch (yyn) {
+
+case 1:
+#line 221 "c-parse.y"
+{ if (pedantic)
+ pedwarn ("ANSI C forbids an empty source file");
+ ;
+ break;}
+case 2:
+#line 225 "c-parse.y"
+{
+ /* In case there were missing closebraces,
+ get us back to the global binding level. */
+ while (! global_bindings_p ())
+ poplevel (0, 0, 0);
+ ;
+ break;}
+case 3:
+#line 238 "c-parse.y"
+{yyval.ttype = NULL_TREE; ;
+ break;}
+case 5:
+#line 239 "c-parse.y"
+{yyval.ttype = NULL_TREE; ;
+ break;}
+case 9:
+#line 246 "c-parse.y"
+{ STRIP_NOPS (yyvsp[-2].ttype);
+ if ((TREE_CODE (yyvsp[-2].ttype) == ADDR_EXPR
+ && TREE_CODE (TREE_OPERAND (yyvsp[-2].ttype, 0)) == STRING_CST)
+ || TREE_CODE (yyvsp[-2].ttype) == STRING_CST)
+ assemble_asm (yyvsp[-2].ttype);
+ else
+ error ("argument of `asm' is not a constant string"); ;
+ break;}
+case 10:
+#line 257 "c-parse.y"
+{ if (pedantic)
+ error ("ANSI C forbids data definition with no type or storage class");
+ else if (!flag_traditional)
+ warning ("data definition has no type or storage class"); ;
+ break;}
+case 11:
+#line 262 "c-parse.y"
+{;
+ break;}
+case 12:
+#line 264 "c-parse.y"
+{;
+ break;}
+case 13:
+#line 266 "c-parse.y"
+{ pedwarn ("empty declaration"); ;
+ break;}
+case 14:
+#line 268 "c-parse.y"
+{ shadow_tag (yyvsp[-1].ttype); ;
+ break;}
+case 17:
+#line 272 "c-parse.y"
+{ if (pedantic)
+ pedwarn ("ANSI C does not allow extra `;' outside of a function"); ;
+ break;}
+case 18:
+#line 278 "c-parse.y"
+{ if (! start_function (yyvsp[-2].ttype, yyvsp[0].ttype, 0))
+ YYERROR1;
+ reinit_parse_for_function (); ;
+ break;}
+case 19:
+#line 282 "c-parse.y"
+{ store_parm_decls (); ;
+ break;}
+case 20:
+#line 284 "c-parse.y"
+{ finish_function (0); ;
+ break;}
+case 21:
+#line 286 "c-parse.y"
+{ ;
+ break;}
+case 22:
+#line 288 "c-parse.y"
+{ if (! start_function (yyvsp[-2].ttype, yyvsp[0].ttype, 0))
+ YYERROR1;
+ reinit_parse_for_function (); ;
+ break;}
+case 23:
+#line 292 "c-parse.y"
+{ store_parm_decls (); ;
+ break;}
+case 24:
+#line 294 "c-parse.y"
+{ finish_function (0); ;
+ break;}
+case 25:
+#line 296 "c-parse.y"
+{ ;
+ break;}
+case 26:
+#line 298 "c-parse.y"
+{ if (! start_function (NULL_TREE, yyvsp[0].ttype, 0))
+ YYERROR1;
+ reinit_parse_for_function (); ;
+ break;}
+case 27:
+#line 302 "c-parse.y"
+{ store_parm_decls (); ;
+ break;}
+case 28:
+#line 304 "c-parse.y"
+{ finish_function (0); ;
+ break;}
+case 29:
+#line 306 "c-parse.y"
+{ ;
+ break;}
+case 32:
+#line 315 "c-parse.y"
+{ yyval.code = ADDR_EXPR; ;
+ break;}
+case 33:
+#line 317 "c-parse.y"
+{ yyval.code = NEGATE_EXPR; ;
+ break;}
+case 34:
+#line 319 "c-parse.y"
+{ yyval.code = CONVERT_EXPR; ;
+ break;}
+case 35:
+#line 321 "c-parse.y"
+{ yyval.code = PREINCREMENT_EXPR; ;
+ break;}
+case 36:
+#line 323 "c-parse.y"
+{ yyval.code = PREDECREMENT_EXPR; ;
+ break;}
+case 37:
+#line 325 "c-parse.y"
+{ yyval.code = BIT_NOT_EXPR; ;
+ break;}
+case 38:
+#line 327 "c-parse.y"
+{ yyval.code = TRUTH_NOT_EXPR; ;
+ break;}
+case 39:
+#line 331 "c-parse.y"
+{ yyval.ttype = build_compound_expr (yyvsp[0].ttype); ;
+ break;}
+case 40:
+#line 336 "c-parse.y"
+{ yyval.ttype = NULL_TREE; ;
+ break;}
+case 42:
+#line 342 "c-parse.y"
+{ yyval.ttype = build_tree_list (NULL_TREE, yyvsp[0].ttype); ;
+ break;}
+case 43:
+#line 344 "c-parse.y"
+{ chainon (yyvsp[-2].ttype, build_tree_list (NULL_TREE, yyvsp[0].ttype)); ;
+ break;}
+case 45:
+#line 350 "c-parse.y"
+{ yyval.ttype = build_indirect_ref (yyvsp[0].ttype, "unary *"); ;
+ break;}
+case 46:
+#line 353 "c-parse.y"
+{ yyvsp[0].itype = pedantic;
+ pedantic = 0; ;
+ break;}
+case 47:
+#line 356 "c-parse.y"
+{ yyval.ttype = yyvsp[0].ttype;
+ pedantic = yyvsp[-2].itype; ;
+ break;}
+case 48:
+#line 359 "c-parse.y"
+{ yyval.ttype = build_unary_op (yyvsp[-1].code, yyvsp[0].ttype, 0);
+ overflow_warning (yyval.ttype); ;
+ break;}
+case 49:
+#line 363 "c-parse.y"
+{ tree label = lookup_label (yyvsp[0].ttype);
+ if (label == 0)
+ yyval.ttype = null_pointer_node;
+ else
+ {
+ TREE_USED (label) = 1;
+ yyval.ttype = build1 (ADDR_EXPR, ptr_type_node, label);
+ TREE_CONSTANT (yyval.ttype) = 1;
+ }
+ ;
+ break;}
+case 50:
+#line 389 "c-parse.y"
+{ if (TREE_CODE (yyvsp[0].ttype) == COMPONENT_REF
+ && DECL_BIT_FIELD (TREE_OPERAND (yyvsp[0].ttype, 1)))
+ error ("`sizeof' applied to a bit-field");
+ yyval.ttype = c_sizeof (TREE_TYPE (yyvsp[0].ttype)); ;
+ break;}
+case 51:
+#line 394 "c-parse.y"
+{ yyval.ttype = c_sizeof (groktypename (yyvsp[-1].ttype)); ;
+ break;}
+case 52:
+#line 396 "c-parse.y"
+{ yyval.ttype = c_alignof_expr (yyvsp[0].ttype); ;
+ break;}
+case 53:
+#line 398 "c-parse.y"
+{ yyval.ttype = c_alignof (groktypename (yyvsp[-1].ttype)); ;
+ break;}
+case 54:
+#line 400 "c-parse.y"
+{ yyval.ttype = build_unary_op (REALPART_EXPR, yyvsp[0].ttype, 0); ;
+ break;}
+case 55:
+#line 402 "c-parse.y"
+{ yyval.ttype = build_unary_op (IMAGPART_EXPR, yyvsp[0].ttype, 0); ;
+ break;}
+case 57:
+#line 408 "c-parse.y"
+{ tree type = groktypename (yyvsp[-2].ttype);
+ yyval.ttype = build_c_cast (type, yyvsp[0].ttype); ;
+ break;}
+case 58:
+#line 411 "c-parse.y"
+{ start_init (NULL_TREE, NULL, 0);
+ yyvsp[-2].ttype = groktypename (yyvsp[-2].ttype);
+ really_start_incremental_init (yyvsp[-2].ttype); ;
+ break;}
+case 59:
+#line 415 "c-parse.y"
+{ char *name;
+ tree result = pop_init_level (0);
+ tree type = yyvsp[-5].ttype;
+ finish_init ();
+
+ if (pedantic)
+ pedwarn ("ANSI C forbids constructor expressions");
+ if (TYPE_NAME (type) != 0)
+ {
+ if (TREE_CODE (TYPE_NAME (type)) == IDENTIFIER_NODE)
+ name = IDENTIFIER_POINTER (TYPE_NAME (type));
+ else
+ name = IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (type)));
+ }
+ else
+ name = "";
+ yyval.ttype = result;
+ if (TREE_CODE (type) == ARRAY_TYPE && TYPE_SIZE (type) == 0)
+ {
+ int failure = complete_array_type (type, yyval.ttype, 1);
+ if (failure)
+ abort ();
+ }
+ ;
+ break;}
+case 61:
+#line 444 "c-parse.y"
+{ yyval.ttype = parser_build_binary_op (yyvsp[-1].code, yyvsp[-2].ttype, yyvsp[0].ttype); ;
+ break;}
+case 62:
+#line 446 "c-parse.y"
+{ yyval.ttype = parser_build_binary_op (yyvsp[-1].code, yyvsp[-2].ttype, yyvsp[0].ttype); ;
+ break;}
+case 63:
+#line 448 "c-parse.y"
+{ yyval.ttype = parser_build_binary_op (yyvsp[-1].code, yyvsp[-2].ttype, yyvsp[0].ttype); ;
+ break;}
+case 64:
+#line 450 "c-parse.y"
+{ yyval.ttype = parser_build_binary_op (yyvsp[-1].code, yyvsp[-2].ttype, yyvsp[0].ttype); ;
+ break;}
+case 65:
+#line 452 "c-parse.y"
+{ yyval.ttype = parser_build_binary_op (yyvsp[-1].code, yyvsp[-2].ttype, yyvsp[0].ttype); ;
+ break;}
+case 66:
+#line 454 "c-parse.y"
+{ yyval.ttype = parser_build_binary_op (yyvsp[-1].code, yyvsp[-2].ttype, yyvsp[0].ttype); ;
+ break;}
+case 67:
+#line 456 "c-parse.y"
+{ yyval.ttype = parser_build_binary_op (yyvsp[-1].code, yyvsp[-2].ttype, yyvsp[0].ttype); ;
+ break;}
+case 68:
+#line 458 "c-parse.y"
+{ yyval.ttype = parser_build_binary_op (yyvsp[-1].code, yyvsp[-2].ttype, yyvsp[0].ttype); ;
+ break;}
+case 69:
+#line 460 "c-parse.y"
+{ yyval.ttype = parser_build_binary_op (yyvsp[-1].code, yyvsp[-2].ttype, yyvsp[0].ttype); ;
+ break;}
+case 70:
+#line 462 "c-parse.y"
+{ yyval.ttype = parser_build_binary_op (yyvsp[-1].code, yyvsp[-2].ttype, yyvsp[0].ttype); ;
+ break;}
+case 71:
+#line 464 "c-parse.y"
+{ yyval.ttype = parser_build_binary_op (yyvsp[-1].code, yyvsp[-2].ttype, yyvsp[0].ttype); ;
+ break;}
+case 72:
+#line 466 "c-parse.y"
+{ yyval.ttype = parser_build_binary_op (yyvsp[-1].code, yyvsp[-2].ttype, yyvsp[0].ttype); ;
+ break;}
+case 73:
+#line 468 "c-parse.y"
+{ yyval.ttype = parser_build_binary_op (TRUTH_ANDIF_EXPR, yyvsp[-2].ttype, yyvsp[0].ttype); ;
+ break;}
+case 74:
+#line 470 "c-parse.y"
+{ yyval.ttype = parser_build_binary_op (TRUTH_ORIF_EXPR, yyvsp[-2].ttype, yyvsp[0].ttype); ;
+ break;}
+case 75:
+#line 472 "c-parse.y"
+{ yyval.ttype = build_conditional_expr (yyvsp[-4].ttype, yyvsp[-2].ttype, yyvsp[0].ttype); ;
+ break;}
+case 76:
+#line 474 "c-parse.y"
+{ yyval.ttype = build_modify_expr (yyvsp[-2].ttype, NOP_EXPR, yyvsp[0].ttype);
+ C_SET_EXP_ORIGINAL_CODE (yyval.ttype, MODIFY_EXPR); ;
+ break;}
+case 77:
+#line 477 "c-parse.y"
+{ yyval.ttype = build_modify_expr (yyvsp[-2].ttype, yyvsp[-1].code, yyvsp[0].ttype);
+ /* This inhibits warnings in truthvalue_conversion. */
+ C_SET_EXP_ORIGINAL_CODE (yyval.ttype, ERROR_MARK); ;
+ break;}
+case 78:
+#line 484 "c-parse.y"
+{
+ yyval.ttype = lastiddecl;
+ if (!yyval.ttype || yyval.ttype == error_mark_node)
+ {
+ if (yychar == YYEMPTY)
+ yychar = YYLEX;
+ if (yychar == '(')
+ {
+ {
+ /* Ordinary implicit function declaration. */
+ yyval.ttype = implicitly_declare (yyvsp[0].ttype);
+ assemble_external (yyval.ttype);
+ TREE_USED (yyval.ttype) = 1;
+ }
+ }
+ else if (current_function_decl == 0)
+ {
+ error ("`%s' undeclared here (not in a function)",
+ IDENTIFIER_POINTER (yyvsp[0].ttype));
+ yyval.ttype = error_mark_node;
+ }
+ else
+ {
+ {
+ if (IDENTIFIER_GLOBAL_VALUE (yyvsp[0].ttype) != error_mark_node
+ || IDENTIFIER_ERROR_LOCUS (yyvsp[0].ttype) != current_function_decl)
+ {
+ error ("`%s' undeclared (first use this function)",
+ IDENTIFIER_POINTER (yyvsp[0].ttype));
+
+ if (! undeclared_variable_notice)
+ {
+ error ("(Each undeclared identifier is reported only once");
+ error ("for each function it appears in.)");
+ undeclared_variable_notice = 1;
+ }
+ }
+ yyval.ttype = error_mark_node;
+ /* Prevent repeated error messages. */
+ IDENTIFIER_GLOBAL_VALUE (yyvsp[0].ttype) = error_mark_node;
+ IDENTIFIER_ERROR_LOCUS (yyvsp[0].ttype) = current_function_decl;
+ }
+ }
+ }
+ else if (TREE_TYPE (yyval.ttype) == error_mark_node)
+ yyval.ttype = error_mark_node;
+ else if (C_DECL_ANTICIPATED (yyval.ttype))
+ {
+ /* The first time we see a build-in function used,
+ if it has not been declared. */
+ C_DECL_ANTICIPATED (yyval.ttype) = 0;
+ if (yychar == YYEMPTY)
+ yychar = YYLEX;
+ if (yychar == '(')
+ {
+ /* Omit the implicit declaration we
+ would ordinarily do, so we don't lose
+ the actual built in type.
+ But print a diagnostic for the mismatch. */
+ if (TREE_CODE (yyval.ttype) != FUNCTION_DECL)
+ error ("`%s' implicitly declared as function",
+ IDENTIFIER_POINTER (DECL_NAME (yyval.ttype)));
+ else if ((TYPE_MODE (TREE_TYPE (TREE_TYPE (yyval.ttype)))
+ != TYPE_MODE (integer_type_node))
+ && (TREE_TYPE (TREE_TYPE (yyval.ttype))
+ != void_type_node))
+ pedwarn ("type mismatch in implicit declaration for built-in function `%s'",
+ IDENTIFIER_POINTER (DECL_NAME (yyval.ttype)));
+ /* If it really returns void, change that to int. */
+ if (TREE_TYPE (TREE_TYPE (yyval.ttype)) == void_type_node)
+ TREE_TYPE (yyval.ttype)
+ = build_function_type (integer_type_node,
+ TYPE_ARG_TYPES (TREE_TYPE (yyval.ttype)));
+ }
+ else
+ pedwarn ("built-in function `%s' used without declaration",
+ IDENTIFIER_POINTER (DECL_NAME (yyval.ttype)));
+
+ /* Do what we would ordinarily do when a fn is used. */
+ assemble_external (yyval.ttype);
+ TREE_USED (yyval.ttype) = 1;
+ }
+ else
+ {
+ assemble_external (yyval.ttype);
+ TREE_USED (yyval.ttype) = 1;
+ }
+
+ if (TREE_CODE (yyval.ttype) == CONST_DECL)
+ {
+ yyval.ttype = DECL_INITIAL (yyval.ttype);
+ /* This is to prevent an enum whose value is 0
+ from being considered a null pointer constant. */
+ yyval.ttype = build1 (NOP_EXPR, TREE_TYPE (yyval.ttype), yyval.ttype);
+ TREE_CONSTANT (yyval.ttype) = 1;
+ }
+ ;
+ break;}
+case 80:
+#line 583 "c-parse.y"
+{ yyval.ttype = combine_strings (yyvsp[0].ttype); ;
+ break;}
+case 81:
+#line 585 "c-parse.y"
+{ char class = TREE_CODE_CLASS (TREE_CODE (yyvsp[-1].ttype));
+ if (class == 'e' || class == '1'
+ || class == '2' || class == '<')
+ C_SET_EXP_ORIGINAL_CODE (yyvsp[-1].ttype, ERROR_MARK);
+ yyval.ttype = yyvsp[-1].ttype; ;
+ break;}
+case 82:
+#line 591 "c-parse.y"
+{ yyval.ttype = error_mark_node; ;
+ break;}
+case 83:
+#line 593 "c-parse.y"
+{ if (current_function_decl == 0)
+ {
+ error ("braced-group within expression allowed only inside a function");
+ YYERROR;
+ }
+ /* We must force a BLOCK for this level
+ so that, if it is not expanded later,
+ there is a way to turn off the entire subtree of blocks
+ that are contained in it. */
+ keep_next_level ();
+ push_iterator_stack ();
+ push_label_level ();
+ yyval.ttype = expand_start_stmt_expr (); ;
+ break;}
+case 84:
+#line 607 "c-parse.y"
+{ tree rtl_exp;
+ if (pedantic)
+ pedwarn ("ANSI C forbids braced-groups within expressions");
+ pop_iterator_stack ();
+ pop_label_level ();
+ rtl_exp = expand_end_stmt_expr (yyvsp[-2].ttype);
+ /* The statements have side effects, so the group does. */
+ TREE_SIDE_EFFECTS (rtl_exp) = 1;
+
+ if (TREE_CODE (yyvsp[-1].ttype) == BLOCK)
+ {
+ /* Make a BIND_EXPR for the BLOCK already made. */
+ yyval.ttype = build (BIND_EXPR, TREE_TYPE (rtl_exp),
+ NULL_TREE, rtl_exp, yyvsp[-1].ttype);
+ /* Remove the block from the tree at this point.
+ It gets put back at the proper place
+ when the BIND_EXPR is expanded. */
+ delete_block (yyvsp[-1].ttype);
+ }
+ else
+ yyval.ttype = yyvsp[-1].ttype;
+ ;
+ break;}
+case 85:
+#line 630 "c-parse.y"
+{ yyval.ttype = build_function_call (yyvsp[-3].ttype, yyvsp[-1].ttype); ;
+ break;}
+case 86:
+#line 632 "c-parse.y"
+{ yyval.ttype = build_array_ref (yyvsp[-3].ttype, yyvsp[-1].ttype); ;
+ break;}
+case 87:
+#line 634 "c-parse.y"
+{
+ yyval.ttype = build_component_ref (yyvsp[-2].ttype, yyvsp[0].ttype);
+ ;
+ break;}
+case 88:
+#line 638 "c-parse.y"
+{
+ tree expr = build_indirect_ref (yyvsp[-2].ttype, "->");
+
+ yyval.ttype = build_component_ref (expr, yyvsp[0].ttype);
+ ;
+ break;}
+case 89:
+#line 644 "c-parse.y"
+{ yyval.ttype = build_unary_op (POSTINCREMENT_EXPR, yyvsp[-1].ttype, 0); ;
+ break;}
+case 90:
+#line 646 "c-parse.y"
+{ yyval.ttype = build_unary_op (POSTDECREMENT_EXPR, yyvsp[-1].ttype, 0); ;
+ break;}
+case 92:
+#line 653 "c-parse.y"
+{ yyval.ttype = chainon (yyvsp[-1].ttype, yyvsp[0].ttype); ;
+ break;}
+case 95:
+#line 662 "c-parse.y"
+{ c_mark_varargs ();
+ if (pedantic)
+ pedwarn ("ANSI C does not permit use of `varargs.h'"); ;
+ break;}
+case 96:
+#line 672 "c-parse.y"
+{ ;
+ break;}
+case 101:
+#line 684 "c-parse.y"
+{ current_declspecs = TREE_VALUE (declspec_stack);
+ declspec_stack = TREE_CHAIN (declspec_stack);
+ resume_momentary (yyvsp[-2].itype); ;
+ break;}
+case 102:
+#line 688 "c-parse.y"
+{ current_declspecs = TREE_VALUE (declspec_stack);
+ declspec_stack = TREE_CHAIN (declspec_stack);
+ resume_momentary (yyvsp[-2].itype); ;
+ break;}
+case 103:
+#line 692 "c-parse.y"
+{ shadow_tag_warned (yyvsp[-1].ttype, 1);
+ pedwarn ("empty declaration"); ;
+ break;}
+case 104:
+#line 695 "c-parse.y"
+{ pedwarn ("empty declaration"); ;
+ break;}
+case 105:
+#line 704 "c-parse.y"
+{ ;
+ break;}
+case 110:
+#line 719 "c-parse.y"
+{ yyval.itype = suspend_momentary ();
+ pending_xref_error ();
+ declspec_stack = tree_cons (NULL_TREE, current_declspecs,
+ declspec_stack);
+ current_declspecs = yyvsp[0].ttype; ;
+ break;}
+case 111:
+#line 728 "c-parse.y"
+{ current_declspecs = TREE_VALUE (declspec_stack);
+ declspec_stack = TREE_CHAIN (declspec_stack);
+ resume_momentary (yyvsp[-2].itype); ;
+ break;}
+case 112:
+#line 732 "c-parse.y"
+{ current_declspecs = TREE_VALUE (declspec_stack);
+ declspec_stack = TREE_CHAIN (declspec_stack);
+ resume_momentary (yyvsp[-2].itype); ;
+ break;}
+case 113:
+#line 736 "c-parse.y"
+{ current_declspecs = TREE_VALUE (declspec_stack);
+ declspec_stack = TREE_CHAIN (declspec_stack);
+ resume_momentary (yyvsp[-1].itype); ;
+ break;}
+case 114:
+#line 740 "c-parse.y"
+{ current_declspecs = TREE_VALUE (declspec_stack);
+ declspec_stack = TREE_CHAIN (declspec_stack);
+ resume_momentary (yyvsp[-1].itype); ;
+ break;}
+case 115:
+#line 744 "c-parse.y"
+{ shadow_tag (yyvsp[-1].ttype); ;
+ break;}
+case 116:
+#line 746 "c-parse.y"
+{ pedwarn ("empty declaration"); ;
+ break;}
+case 117:
+#line 755 "c-parse.y"
+{ yyval.ttype = tree_cons (NULL_TREE, yyvsp[-1].ttype, yyvsp[0].ttype); ;
+ break;}
+case 118:
+#line 757 "c-parse.y"
+{ yyval.ttype = chainon (yyvsp[0].ttype, tree_cons (NULL_TREE, yyvsp[-1].ttype, yyvsp[-2].ttype)); ;
+ break;}
+case 119:
+#line 761 "c-parse.y"
+{ yyval.ttype = NULL_TREE; ;
+ break;}
+case 120:
+#line 763 "c-parse.y"
+{ yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); ;
+ break;}
+case 121:
+#line 765 "c-parse.y"
+{ if (extra_warnings)
+ warning ("`%s' is not at beginning of declaration",
+ IDENTIFIER_POINTER (yyvsp[0].ttype));
+ yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); ;
+ break;}
+case 122:
+#line 777 "c-parse.y"
+{ yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, NULL_TREE);
+ TREE_STATIC (yyval.ttype) = 1; ;
+ break;}
+case 123:
+#line 780 "c-parse.y"
+{ yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, NULL_TREE); ;
+ break;}
+case 124:
+#line 782 "c-parse.y"
+{ yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype);
+ TREE_STATIC (yyval.ttype) = 1; ;
+ break;}
+case 125:
+#line 785 "c-parse.y"
+{ if (extra_warnings && TREE_STATIC (yyvsp[-1].ttype))
+ warning ("`%s' is not at beginning of declaration",
+ IDENTIFIER_POINTER (yyvsp[0].ttype));
+ yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype);
+ TREE_STATIC (yyval.ttype) = TREE_STATIC (yyvsp[-1].ttype); ;
+ break;}
+case 126:
+#line 799 "c-parse.y"
+{ yyval.ttype = tree_cons (NULL_TREE, yyvsp[-1].ttype, yyvsp[0].ttype); ;
+ break;}
+case 127:
+#line 801 "c-parse.y"
+{ yyval.ttype = chainon (yyvsp[0].ttype, tree_cons (NULL_TREE, yyvsp[-1].ttype, yyvsp[-2].ttype)); ;
+ break;}
+case 128:
+#line 805 "c-parse.y"
+{ yyval.ttype = NULL_TREE; ;
+ break;}
+case 129:
+#line 807 "c-parse.y"
+{ yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); ;
+ break;}
+case 132:
+#line 817 "c-parse.y"
+{ /* For a typedef name, record the meaning, not the name.
+ In case of `foo foo, bar;'. */
+ yyval.ttype = lookup_name (yyvsp[0].ttype); ;
+ break;}
+case 133:
+#line 821 "c-parse.y"
+{ yyval.ttype = TREE_TYPE (yyvsp[-1].ttype); ;
+ break;}
+case 134:
+#line 823 "c-parse.y"
+{ yyval.ttype = groktypename (yyvsp[-1].ttype); ;
+ break;}
+case 142:
+#line 845 "c-parse.y"
+{ yyval.ttype = NULL_TREE; ;
+ break;}
+case 143:
+#line 847 "c-parse.y"
+{ if (TREE_CHAIN (yyvsp[-1].ttype)) yyvsp[-1].ttype = combine_strings (yyvsp[-1].ttype);
+ yyval.ttype = yyvsp[-1].ttype;
+ ;
+ break;}
+case 144:
+#line 854 "c-parse.y"
+{ yyval.ttype = start_decl (yyvsp[-3].ttype, current_declspecs, 1);
+ decl_attributes (yyval.ttype, yyvsp[-1].ttype);
+ start_init (yyval.ttype, yyvsp[-2].ttype, global_bindings_p ()); ;
+ break;}
+case 145:
+#line 859 "c-parse.y"
+{ finish_init ();
+ decl_attributes (yyvsp[-1].ttype, yyvsp[-3].ttype);
+ finish_decl (yyvsp[-1].ttype, yyvsp[0].ttype, yyvsp[-4].ttype); ;
+ break;}
+case 146:
+#line 863 "c-parse.y"
+{ tree d = start_decl (yyvsp[-2].ttype, current_declspecs, 0);
+ decl_attributes (d, yyvsp[0].ttype);
+ finish_decl (d, NULL_TREE, yyvsp[-1].ttype); ;
+ break;}
+case 147:
+#line 870 "c-parse.y"
+{ yyval.ttype = start_decl (yyvsp[-3].ttype, current_declspecs, 1);
+ decl_attributes (yyval.ttype, yyvsp[-1].ttype);
+ start_init (yyval.ttype, yyvsp[-2].ttype, global_bindings_p ()); ;
+ break;}
+case 148:
+#line 875 "c-parse.y"
+{ finish_init ();
+ decl_attributes (yyvsp[-1].ttype, yyvsp[-3].ttype);
+ finish_decl (yyvsp[-1].ttype, yyvsp[0].ttype, yyvsp[-4].ttype); ;
+ break;}
+case 149:
+#line 879 "c-parse.y"
+{ tree d = start_decl (yyvsp[-2].ttype, current_declspecs, 0);
+ decl_attributes (d, yyvsp[0].ttype);
+ finish_decl (d, NULL_TREE, yyvsp[-1].ttype); ;
+ break;}
+case 150:
+#line 887 "c-parse.y"
+{ yyval.ttype = NULL_TREE; ;
+ break;}
+case 151:
+#line 889 "c-parse.y"
+{ yyval.ttype = yyvsp[0].ttype; ;
+ break;}
+case 152:
+#line 894 "c-parse.y"
+{ yyval.ttype = yyvsp[0].ttype; ;
+ break;}
+case 153:
+#line 896 "c-parse.y"
+{ yyval.ttype = chainon (yyvsp[-1].ttype, yyvsp[0].ttype); ;
+ break;}
+case 154:
+#line 901 "c-parse.y"
+{ yyval.ttype = yyvsp[-2].ttype; ;
+ break;}
+case 155:
+#line 906 "c-parse.y"
+{ yyval.ttype = build_tree_list (NULL_TREE, yyvsp[0].ttype); ;
+ break;}
+case 156:
+#line 908 "c-parse.y"
+{ yyval.ttype = chainon (yyvsp[-2].ttype, build_tree_list (NULL_TREE, yyvsp[0].ttype)); ;
+ break;}
+case 157:
+#line 913 "c-parse.y"
+{ yyval.ttype = NULL_TREE; ;
+ break;}
+case 158:
+#line 915 "c-parse.y"
+{ yyval.ttype = yyvsp[0].ttype; ;
+ break;}
+case 159:
+#line 917 "c-parse.y"
+{ yyval.ttype = tree_cons (yyvsp[-3].ttype, NULL_TREE,
+ build_tree_list (NULL_TREE, yyvsp[-1].ttype)); ;
+ break;}
+case 160:
+#line 920 "c-parse.y"
+{ yyval.ttype = tree_cons (yyvsp[-5].ttype, NULL_TREE,
+ tree_cons (NULL_TREE, yyvsp[-3].ttype, yyvsp[-1].ttype)); ;
+ break;}
+case 161:
+#line 923 "c-parse.y"
+{ yyval.ttype = tree_cons (yyvsp[-3].ttype, NULL_TREE, yyvsp[-1].ttype); ;
+ break;}
+case 167:
+#line 941 "c-parse.y"
+{ really_start_incremental_init (NULL_TREE);
+ /* Note that the call to clear_momentary
+ is in process_init_element. */
+ push_momentary (); ;
+ break;}
+case 168:
+#line 946 "c-parse.y"
+{ yyval.ttype = pop_init_level (0);
+ if (yyval.ttype == error_mark_node
+ && ! (yychar == STRING || yychar == CONSTANT))
+ pop_momentary ();
+ else
+ pop_momentary_nofree (); ;
+ break;}
+case 169:
+#line 954 "c-parse.y"
+{ yyval.ttype = error_mark_node; ;
+ break;}
+case 170:
+#line 960 "c-parse.y"
+{ if (pedantic)
+ pedwarn ("ANSI C forbids empty initializer braces"); ;
+ break;}
+case 174:
+#line 974 "c-parse.y"
+{ process_init_element (yyvsp[0].ttype); ;
+ break;}
+case 175:
+#line 976 "c-parse.y"
+{ push_init_level (0); ;
+ break;}
+case 176:
+#line 978 "c-parse.y"
+{ process_init_element (pop_init_level (0)); ;
+ break;}
+case 178:
+#line 984 "c-parse.y"
+{ set_init_index (yyvsp[-4].ttype, yyvsp[-2].ttype); ;
+ break;}
+case 180:
+#line 987 "c-parse.y"
+{ set_init_index (yyvsp[-2].ttype, NULL_TREE); ;
+ break;}
+case 182:
+#line 990 "c-parse.y"
+{ set_init_index (yyvsp[-1].ttype, NULL_TREE); ;
+ break;}
+case 184:
+#line 993 "c-parse.y"
+{ set_init_label (yyvsp[-1].ttype); ;
+ break;}
+case 186:
+#line 996 "c-parse.y"
+{ set_init_label (yyvsp[-1].ttype); ;
+ break;}
+case 188:
+#line 1002 "c-parse.y"
+{ push_c_function_context ();
+ if (! start_function (current_declspecs, yyvsp[0].ttype, 1))
+ {
+ pop_c_function_context ();
+ YYERROR1;
+ }
+ reinit_parse_for_function ();
+ store_parm_decls (); ;
+ break;}
+case 189:
+#line 1017 "c-parse.y"
+{ finish_function (1);
+ pop_c_function_context (); ;
+ break;}
+case 190:
+#line 1023 "c-parse.y"
+{ push_c_function_context ();
+ if (! start_function (current_declspecs, yyvsp[0].ttype, 1))
+ {
+ pop_c_function_context ();
+ YYERROR1;
+ }
+ reinit_parse_for_function ();
+ store_parm_decls (); ;
+ break;}
+case 191:
+#line 1038 "c-parse.y"
+{ finish_function (1);
+ pop_c_function_context (); ;
+ break;}
+case 194:
+#line 1054 "c-parse.y"
+{ yyval.ttype = yyvsp[-1].ttype; ;
+ break;}
+case 195:
+#line 1056 "c-parse.y"
+{ yyval.ttype = build_nt (CALL_EXPR, yyvsp[-2].ttype, yyvsp[0].ttype, NULL_TREE); ;
+ break;}
+case 196:
+#line 1061 "c-parse.y"
+{ yyval.ttype = build_nt (ARRAY_REF, yyvsp[-3].ttype, yyvsp[-1].ttype); ;
+ break;}
+case 197:
+#line 1063 "c-parse.y"
+{ yyval.ttype = build_nt (ARRAY_REF, yyvsp[-2].ttype, NULL_TREE); ;
+ break;}
+case 198:
+#line 1065 "c-parse.y"
+{ yyval.ttype = make_pointer_declarator (yyvsp[-1].ttype, yyvsp[0].ttype); ;
+ break;}
+case 200:
+#line 1076 "c-parse.y"
+{ yyval.ttype = build_nt (CALL_EXPR, yyvsp[-2].ttype, yyvsp[0].ttype, NULL_TREE); ;
+ break;}
+case 201:
+#line 1081 "c-parse.y"
+{ yyval.ttype = build_nt (ARRAY_REF, yyvsp[-3].ttype, yyvsp[-1].ttype); ;
+ break;}
+case 202:
+#line 1083 "c-parse.y"
+{ yyval.ttype = build_nt (ARRAY_REF, yyvsp[-2].ttype, NULL_TREE); ;
+ break;}
+case 203:
+#line 1085 "c-parse.y"
+{ yyval.ttype = make_pointer_declarator (yyvsp[-1].ttype, yyvsp[0].ttype); ;
+ break;}
+case 205:
+#line 1094 "c-parse.y"
+{ yyval.ttype = build_nt (CALL_EXPR, yyvsp[-2].ttype, yyvsp[0].ttype, NULL_TREE); ;
+ break;}
+case 206:
+#line 1099 "c-parse.y"
+{ yyval.ttype = yyvsp[-1].ttype; ;
+ break;}
+case 207:
+#line 1101 "c-parse.y"
+{ yyval.ttype = make_pointer_declarator (yyvsp[-1].ttype, yyvsp[0].ttype); ;
+ break;}
+case 208:
+#line 1103 "c-parse.y"
+{ yyval.ttype = build_nt (ARRAY_REF, yyvsp[-3].ttype, yyvsp[-1].ttype); ;
+ break;}
+case 209:
+#line 1105 "c-parse.y"
+{ yyval.ttype = build_nt (ARRAY_REF, yyvsp[-2].ttype, NULL_TREE); ;
+ break;}
+case 211:
+#line 1111 "c-parse.y"
+{ yyval.ttype = start_struct (RECORD_TYPE, yyvsp[-1].ttype);
+ /* Start scope of tag before parsing components. */
+ ;
+ break;}
+case 212:
+#line 1115 "c-parse.y"
+{ yyval.ttype = finish_struct (yyvsp[-2].ttype, yyvsp[-1].ttype);
+ /* Really define the structure. */
+ ;
+ break;}
+case 213:
+#line 1119 "c-parse.y"
+{ yyval.ttype = finish_struct (start_struct (RECORD_TYPE, NULL_TREE),
+ yyvsp[-1].ttype); ;
+ break;}
+case 214:
+#line 1122 "c-parse.y"
+{ yyval.ttype = xref_tag (RECORD_TYPE, yyvsp[0].ttype); ;
+ break;}
+case 215:
+#line 1124 "c-parse.y"
+{ yyval.ttype = start_struct (UNION_TYPE, yyvsp[-1].ttype); ;
+ break;}
+case 216:
+#line 1126 "c-parse.y"
+{ yyval.ttype = finish_struct (yyvsp[-2].ttype, yyvsp[-1].ttype); ;
+ break;}
+case 217:
+#line 1128 "c-parse.y"
+{ yyval.ttype = finish_struct (start_struct (UNION_TYPE, NULL_TREE),
+ yyvsp[-1].ttype); ;
+ break;}
+case 218:
+#line 1131 "c-parse.y"
+{ yyval.ttype = xref_tag (UNION_TYPE, yyvsp[0].ttype); ;
+ break;}
+case 219:
+#line 1133 "c-parse.y"
+{ yyvsp[0].itype = suspend_momentary ();
+ yyval.ttype = start_enum (yyvsp[-1].ttype); ;
+ break;}
+case 220:
+#line 1136 "c-parse.y"
+{ yyval.ttype = finish_enum (yyvsp[-3].ttype, nreverse (yyvsp[-2].ttype));
+ resume_momentary (yyvsp[-4].itype); ;
+ break;}
+case 221:
+#line 1139 "c-parse.y"
+{ yyvsp[0].itype = suspend_momentary ();
+ yyval.ttype = start_enum (NULL_TREE); ;
+ break;}
+case 222:
+#line 1142 "c-parse.y"
+{ yyval.ttype = finish_enum (yyvsp[-3].ttype, nreverse (yyvsp[-2].ttype));
+ resume_momentary (yyvsp[-4].itype); ;
+ break;}
+case 223:
+#line 1145 "c-parse.y"
+{ yyval.ttype = xref_tag (ENUMERAL_TYPE, yyvsp[0].ttype); ;
+ break;}
+case 227:
+#line 1156 "c-parse.y"
+{ if (pedantic) pedwarn ("comma at end of enumerator list"); ;
+ break;}
+case 228:
+#line 1161 "c-parse.y"
+{ yyval.ttype = yyvsp[0].ttype; ;
+ break;}
+case 229:
+#line 1163 "c-parse.y"
+{ yyval.ttype = chainon (yyvsp[-1].ttype, yyvsp[0].ttype);
+ pedwarn ("no semicolon at end of struct or union"); ;
+ break;}
+case 230:
+#line 1168 "c-parse.y"
+{ yyval.ttype = NULL_TREE; ;
+ break;}
+case 231:
+#line 1170 "c-parse.y"
+{ yyval.ttype = chainon (yyvsp[-2].ttype, yyvsp[-1].ttype); ;
+ break;}
+case 232:
+#line 1172 "c-parse.y"
+{ if (pedantic)
+ pedwarn ("extra semicolon in struct or union specified"); ;
+ break;}
+case 233:
+#line 1187 "c-parse.y"
+{ yyval.ttype = yyvsp[0].ttype;
+ current_declspecs = TREE_VALUE (declspec_stack);
+ declspec_stack = TREE_CHAIN (declspec_stack);
+ resume_momentary (yyvsp[-1].itype); ;
+ break;}
+case 234:
+#line 1192 "c-parse.y"
+{ if (pedantic)
+ pedwarn ("ANSI C forbids member declarations with no members");
+ shadow_tag(yyvsp[0].ttype);
+ yyval.ttype = NULL_TREE; ;
+ break;}
+case 235:
+#line 1197 "c-parse.y"
+{ yyval.ttype = yyvsp[0].ttype;
+ current_declspecs = TREE_VALUE (declspec_stack);
+ declspec_stack = TREE_CHAIN (declspec_stack);
+ resume_momentary (yyvsp[-1].itype); ;
+ break;}
+case 236:
+#line 1202 "c-parse.y"
+{ if (pedantic)
+ pedwarn ("ANSI C forbids member declarations with no members");
+ shadow_tag(yyvsp[0].ttype);
+ yyval.ttype = NULL_TREE; ;
+ break;}
+case 237:
+#line 1207 "c-parse.y"
+{ yyval.ttype = NULL_TREE; ;
+ break;}
+case 239:
+#line 1213 "c-parse.y"
+{ yyval.ttype = chainon (yyvsp[-2].ttype, yyvsp[0].ttype); ;
+ break;}
+case 240:
+#line 1218 "c-parse.y"
+{ yyval.ttype = grokfield (yyvsp[-3].filename, yyvsp[-2].lineno, yyvsp[-1].ttype, current_declspecs, NULL_TREE);
+ decl_attributes (yyval.ttype, yyvsp[0].ttype); ;
+ break;}
+case 241:
+#line 1222 "c-parse.y"
+{ yyval.ttype = grokfield (yyvsp[-5].filename, yyvsp[-4].lineno, yyvsp[-3].ttype, current_declspecs, yyvsp[-1].ttype);
+ decl_attributes (yyval.ttype, yyvsp[0].ttype); ;
+ break;}
+case 242:
+#line 1225 "c-parse.y"
+{ yyval.ttype = grokfield (yyvsp[-4].filename, yyvsp[-3].lineno, NULL_TREE, current_declspecs, yyvsp[-1].ttype);
+ decl_attributes (yyval.ttype, yyvsp[0].ttype); ;
+ break;}
+case 244:
+#line 1237 "c-parse.y"
+{ yyval.ttype = chainon (yyvsp[0].ttype, yyvsp[-2].ttype); ;
+ break;}
+case 245:
+#line 1239 "c-parse.y"
+{ yyval.ttype = error_mark_node; ;
+ break;}
+case 246:
+#line 1245 "c-parse.y"
+{ yyval.ttype = build_enumerator (yyvsp[0].ttype, NULL_TREE); ;
+ break;}
+case 247:
+#line 1247 "c-parse.y"
+{ yyval.ttype = build_enumerator (yyvsp[-2].ttype, yyvsp[0].ttype); ;
+ break;}
+case 248:
+#line 1252 "c-parse.y"
+{ yyval.ttype = build_tree_list (yyvsp[-1].ttype, yyvsp[0].ttype); ;
+ break;}
+case 249:
+#line 1254 "c-parse.y"
+{ yyval.ttype = build_tree_list (yyvsp[-1].ttype, yyvsp[0].ttype); ;
+ break;}
+case 250:
+#line 1259 "c-parse.y"
+{ yyval.ttype = NULL_TREE; ;
+ break;}
+case 252:
+#line 1265 "c-parse.y"
+{ yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, NULL_TREE); ;
+ break;}
+case 253:
+#line 1267 "c-parse.y"
+{ yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); ;
+ break;}
+case 254:
+#line 1272 "c-parse.y"
+{ yyval.ttype = NULL_TREE; ;
+ break;}
+case 255:
+#line 1274 "c-parse.y"
+{ yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); ;
+ break;}
+case 256:
+#line 1279 "c-parse.y"
+{ yyval.ttype = yyvsp[-1].ttype; ;
+ break;}
+case 257:
+#line 1282 "c-parse.y"
+{ yyval.ttype = make_pointer_declarator (yyvsp[-1].ttype, yyvsp[0].ttype); ;
+ break;}
+case 258:
+#line 1284 "c-parse.y"
+{ yyval.ttype = make_pointer_declarator (yyvsp[0].ttype, NULL_TREE); ;
+ break;}
+case 259:
+#line 1286 "c-parse.y"
+{ yyval.ttype = build_nt (CALL_EXPR, yyvsp[-2].ttype, yyvsp[0].ttype, NULL_TREE); ;
+ break;}
+case 260:
+#line 1288 "c-parse.y"
+{ yyval.ttype = build_nt (ARRAY_REF, yyvsp[-3].ttype, yyvsp[-1].ttype); ;
+ break;}
+case 261:
+#line 1290 "c-parse.y"
+{ yyval.ttype = build_nt (ARRAY_REF, yyvsp[-2].ttype, NULL_TREE); ;
+ break;}
+case 262:
+#line 1292 "c-parse.y"
+{ yyval.ttype = build_nt (CALL_EXPR, NULL_TREE, yyvsp[0].ttype, NULL_TREE); ;
+ break;}
+case 263:
+#line 1294 "c-parse.y"
+{ yyval.ttype = build_nt (ARRAY_REF, NULL_TREE, yyvsp[-1].ttype); ;
+ break;}
+case 264:
+#line 1296 "c-parse.y"
+{ yyval.ttype = build_nt (ARRAY_REF, NULL_TREE, NULL_TREE); ;
+ break;}
+case 271:
+#line 1318 "c-parse.y"
+{ emit_line_note (input_filename, lineno);
+ pushlevel (0);
+ clear_last_expr ();
+ push_momentary ();
+ expand_start_bindings (0);
+ ;
+ break;}
+case 273:
+#line 1331 "c-parse.y"
+{ if (pedantic)
+ pedwarn ("ANSI C forbids label declarations"); ;
+ break;}
+case 276:
+#line 1342 "c-parse.y"
+{ tree link;
+ for (link = yyvsp[-1].ttype; link; link = TREE_CHAIN (link))
+ {
+ tree label = shadow_label (TREE_VALUE (link));
+ C_DECLARED_LABEL_FLAG (label) = 1;
+ declare_nonlocal_label (label);
+ }
+ ;
+ break;}
+case 277:
+#line 1356 "c-parse.y"
+{;
+ break;}
+case 279:
+#line 1361 "c-parse.y"
+{ yyval.ttype = convert (void_type_node, integer_zero_node); ;
+ break;}
+case 280:
+#line 1363 "c-parse.y"
+{ emit_line_note (input_filename, lineno);
+ expand_end_bindings (getdecls (), 1, 0);
+ yyval.ttype = poplevel (1, 1, 0);
+ if (yychar == CONSTANT || yychar == STRING)
+ pop_momentary_nofree ();
+ else
+ pop_momentary (); ;
+ break;}
+case 281:
+#line 1371 "c-parse.y"
+{ emit_line_note (input_filename, lineno);
+ expand_end_bindings (getdecls (), kept_level_p (), 0);
+ yyval.ttype = poplevel (kept_level_p (), 0, 0);
+ if (yychar == CONSTANT || yychar == STRING)
+ pop_momentary_nofree ();
+ else
+ pop_momentary (); ;
+ break;}
+case 282:
+#line 1379 "c-parse.y"
+{ emit_line_note (input_filename, lineno);
+ expand_end_bindings (getdecls (), kept_level_p (), 0);
+ yyval.ttype = poplevel (kept_level_p (), 0, 0);
+ if (yychar == CONSTANT || yychar == STRING)
+ pop_momentary_nofree ();
+ else
+ pop_momentary (); ;
+ break;}
+case 285:
+#line 1399 "c-parse.y"
+{ emit_line_note (yyvsp[-5].filename, yyvsp[-4].lineno);
+ expand_start_cond (truthvalue_conversion (yyvsp[-1].ttype), 0);
+ yyval.itype = stmt_count;
+ if_stmt_file = yyvsp[-5].filename;
+ if_stmt_line = yyvsp[-4].lineno;
+ position_after_white_space (); ;
+ break;}
+case 286:
+#line 1412 "c-parse.y"
+{ stmt_count++;
+ emit_line_note (yyvsp[-2].filename, yyvsp[-1].lineno);
+ /* See comment in `while' alternative, above. */
+ emit_nop ();
+ expand_start_loop_continue_elsewhere (1);
+ position_after_white_space (); ;
+ break;}
+case 287:
+#line 1419 "c-parse.y"
+{ expand_loop_continue_here (); ;
+ break;}
+case 288:
+#line 1423 "c-parse.y"
+{ yyval.filename = input_filename; ;
+ break;}
+case 289:
+#line 1427 "c-parse.y"
+{ yyval.lineno = lineno; ;
+ break;}
+case 290:
+#line 1432 "c-parse.y"
+{ ;
+ break;}
+case 291:
+#line 1437 "c-parse.y"
+{ ;
+ break;}
+case 292:
+#line 1442 "c-parse.y"
+{ ;
+ break;}
+case 294:
+#line 1448 "c-parse.y"
+{ int next;
+ position_after_white_space ();
+ next = getc (finput);
+ ungetc (next, finput);
+ if (pedantic && next == '}')
+ pedwarn ("ANSI C forbids label at end of compound statement");
+ ;
+ break;}
+case 295:
+#line 1460 "c-parse.y"
+{ stmt_count++; ;
+ break;}
+case 297:
+#line 1463 "c-parse.y"
+{ stmt_count++;
+ emit_line_note (yyvsp[-3].filename, yyvsp[-2].lineno);
+/* It appears that this should not be done--that a non-lvalue array
+ shouldn't get an error if the value isn't used.
+ Section 3.2.2.1 says that an array lvalue gets converted to a pointer
+ if it appears as a top-level expression,
+ but says nothing about non-lvalue arrays. */
+#if 0
+ /* Call default_conversion to get an error
+ on referring to a register array if pedantic. */
+ if (TREE_CODE (TREE_TYPE (yyvsp[-1].ttype)) == ARRAY_TYPE
+ || TREE_CODE (TREE_TYPE (yyvsp[-1].ttype)) == FUNCTION_TYPE)
+ yyvsp[-1].ttype = default_conversion (yyvsp[-1].ttype);
+#endif
+ iterator_expand (yyvsp[-1].ttype);
+ clear_momentary (); ;
+ break;}
+case 298:
+#line 1480 "c-parse.y"
+{ expand_start_else ();
+ yyvsp[-1].itype = stmt_count;
+ position_after_white_space (); ;
+ break;}
+case 299:
+#line 1484 "c-parse.y"
+{ expand_end_cond ();
+ if (extra_warnings && stmt_count == yyvsp[-3].itype)
+ warning ("empty body in an else-statement"); ;
+ break;}
+case 300:
+#line 1488 "c-parse.y"
+{ expand_end_cond ();
+ /* This warning is here instead of in simple_if, because we
+ do not want a warning if an empty if is followed by an
+ else statement. Increment stmt_count so we don't
+ give a second error if this is a nested `if'. */
+ if (extra_warnings && stmt_count++ == yyvsp[0].itype)
+ warning_with_file_and_line (if_stmt_file, if_stmt_line,
+ "empty body in an if-statement"); ;
+ break;}
+case 301:
+#line 1500 "c-parse.y"
+{ expand_end_cond (); ;
+ break;}
+case 302:
+#line 1502 "c-parse.y"
+{ stmt_count++;
+ emit_line_note (yyvsp[-2].filename, yyvsp[-1].lineno);
+ /* The emit_nop used to come before emit_line_note,
+ but that made the nop seem like part of the preceding line.
+ And that was confusing when the preceding line was
+ inside of an if statement and was not really executed.
+ I think it ought to work to put the nop after the line number.
+ We will see. --rms, July 15, 1991. */
+ emit_nop (); ;
+ break;}
+case 303:
+#line 1512 "c-parse.y"
+{ /* Don't start the loop till we have succeeded
+ in parsing the end test. This is to make sure
+ that we end every loop we start. */
+ expand_start_loop (1);
+ emit_line_note (input_filename, lineno);
+ expand_exit_loop_if_false (NULL_PTR,
+ truthvalue_conversion (yyvsp[-1].ttype));
+ position_after_white_space (); ;
+ break;}
+case 304:
+#line 1521 "c-parse.y"
+{ expand_end_loop (); ;
+ break;}
+case 305:
+#line 1524 "c-parse.y"
+{ emit_line_note (input_filename, lineno);
+ expand_exit_loop_if_false (NULL_PTR,
+ truthvalue_conversion (yyvsp[-2].ttype));
+ expand_end_loop ();
+ clear_momentary (); ;
+ break;}
+case 306:
+#line 1531 "c-parse.y"
+{ expand_end_loop ();
+ clear_momentary (); ;
+ break;}
+case 307:
+#line 1535 "c-parse.y"
+{ stmt_count++;
+ emit_line_note (yyvsp[-5].filename, yyvsp[-4].lineno);
+ /* See comment in `while' alternative, above. */
+ emit_nop ();
+ if (yyvsp[-1].ttype) c_expand_expr_stmt (yyvsp[-1].ttype);
+ /* Next step is to call expand_start_loop_continue_elsewhere,
+ but wait till after we parse the entire for (...).
+ Otherwise, invalid input might cause us to call that
+ fn without calling expand_end_loop. */
+ ;
+ break;}
+case 308:
+#line 1547 "c-parse.y"
+{ yyvsp[0].lineno = lineno;
+ yyval.filename = input_filename; ;
+ break;}
+case 309:
+#line 1550 "c-parse.y"
+{
+ /* Start the loop. Doing this after parsing
+ all the expressions ensures we will end the loop. */
+ expand_start_loop_continue_elsewhere (1);
+ /* Emit the end-test, with a line number. */
+ emit_line_note (yyvsp[-2].filename, yyvsp[-3].lineno);
+ if (yyvsp[-4].ttype)
+ expand_exit_loop_if_false (NULL_PTR,
+ truthvalue_conversion (yyvsp[-4].ttype));
+ /* Don't let the tree nodes for $9 be discarded by
+ clear_momentary during the parsing of the next stmt. */
+ push_momentary ();
+ yyvsp[-3].lineno = lineno;
+ yyvsp[-2].filename = input_filename;
+ position_after_white_space (); ;
+ break;}
+case 310:
+#line 1566 "c-parse.y"
+{ /* Emit the increment expression, with a line number. */
+ emit_line_note (yyvsp[-4].filename, yyvsp[-5].lineno);
+ expand_loop_continue_here ();
+ if (yyvsp[-3].ttype)
+ c_expand_expr_stmt (yyvsp[-3].ttype);
+ if (yychar == CONSTANT || yychar == STRING)
+ pop_momentary_nofree ();
+ else
+ pop_momentary ();
+ expand_end_loop (); ;
+ break;}
+case 311:
+#line 1577 "c-parse.y"
+{ stmt_count++;
+ emit_line_note (yyvsp[-5].filename, yyvsp[-4].lineno);
+ c_expand_start_case (yyvsp[-1].ttype);
+ /* Don't let the tree nodes for $3 be discarded by
+ clear_momentary during the parsing of the next stmt. */
+ push_momentary ();
+ position_after_white_space (); ;
+ break;}
+case 312:
+#line 1585 "c-parse.y"
+{ expand_end_case (yyvsp[-3].ttype);
+ if (yychar == CONSTANT || yychar == STRING)
+ pop_momentary_nofree ();
+ else
+ pop_momentary (); ;
+ break;}
+case 313:
+#line 1591 "c-parse.y"
+{ stmt_count++;
+ emit_line_note (yyvsp[-3].filename, yyvsp[-2].lineno);
+ if ( ! expand_exit_something ())
+ error ("break statement not within loop or switch"); ;
+ break;}
+case 314:
+#line 1596 "c-parse.y"
+{ stmt_count++;
+ emit_line_note (yyvsp[-3].filename, yyvsp[-2].lineno);
+ if (! expand_continue_loop (NULL_PTR))
+ error ("continue statement not within a loop"); ;
+ break;}
+case 315:
+#line 1601 "c-parse.y"
+{ stmt_count++;
+ emit_line_note (yyvsp[-3].filename, yyvsp[-2].lineno);
+ c_expand_return (NULL_TREE); ;
+ break;}
+case 316:
+#line 1605 "c-parse.y"
+{ stmt_count++;
+ emit_line_note (yyvsp[-4].filename, yyvsp[-3].lineno);
+ c_expand_return (yyvsp[-1].ttype); ;
+ break;}
+case 317:
+#line 1609 "c-parse.y"
+{ stmt_count++;
+ emit_line_note (yyvsp[-7].filename, yyvsp[-6].lineno);
+ STRIP_NOPS (yyvsp[-2].ttype);
+ if ((TREE_CODE (yyvsp[-2].ttype) == ADDR_EXPR
+ && TREE_CODE (TREE_OPERAND (yyvsp[-2].ttype, 0)) == STRING_CST)
+ || TREE_CODE (yyvsp[-2].ttype) == STRING_CST)
+ expand_asm (yyvsp[-2].ttype);
+ else
+ error ("argument of `asm' is not a constant string"); ;
+ break;}
+case 318:
+#line 1620 "c-parse.y"
+{ stmt_count++;
+ emit_line_note (yyvsp[-9].filename, yyvsp[-8].lineno);
+ c_expand_asm_operands (yyvsp[-4].ttype, yyvsp[-2].ttype, NULL_TREE, NULL_TREE,
+ yyvsp[-6].ttype == ridpointers[(int)RID_VOLATILE],
+ input_filename, lineno); ;
+ break;}
+case 319:
+#line 1627 "c-parse.y"
+{ stmt_count++;
+ emit_line_note (yyvsp[-11].filename, yyvsp[-10].lineno);
+ c_expand_asm_operands (yyvsp[-6].ttype, yyvsp[-4].ttype, yyvsp[-2].ttype, NULL_TREE,
+ yyvsp[-8].ttype == ridpointers[(int)RID_VOLATILE],
+ input_filename, lineno); ;
+ break;}
+case 320:
+#line 1635 "c-parse.y"
+{ stmt_count++;
+ emit_line_note (yyvsp[-13].filename, yyvsp[-12].lineno);
+ c_expand_asm_operands (yyvsp[-8].ttype, yyvsp[-6].ttype, yyvsp[-4].ttype, yyvsp[-2].ttype,
+ yyvsp[-10].ttype == ridpointers[(int)RID_VOLATILE],
+ input_filename, lineno); ;
+ break;}
+case 321:
+#line 1641 "c-parse.y"
+{ tree decl;
+ stmt_count++;
+ emit_line_note (yyvsp[-4].filename, yyvsp[-3].lineno);
+ decl = lookup_label (yyvsp[-1].ttype);
+ if (decl != 0)
+ {
+ TREE_USED (decl) = 1;
+ expand_goto (decl);
+ }
+ ;
+ break;}
+case 322:
+#line 1652 "c-parse.y"
+{ stmt_count++;
+ emit_line_note (yyvsp[-5].filename, yyvsp[-4].lineno);
+ expand_computed_goto (convert (ptr_type_node, yyvsp[-1].ttype)); ;
+ break;}
+case 325:
+#line 1665 "c-parse.y"
+{
+ /* The value returned by this action is */
+ /* 1 if everything is OK */
+ /* 0 in case of error or already bound iterator */
+
+ yyval.itype = 0;
+ if (TREE_CODE (yyvsp[-1].ttype) != VAR_DECL)
+ error ("invalid `for (ITERATOR)' syntax");
+ else if (! ITERATOR_P (yyvsp[-1].ttype))
+ error ("`%s' is not an iterator",
+ IDENTIFIER_POINTER (DECL_NAME (yyvsp[-1].ttype)));
+ else if (ITERATOR_BOUND_P (yyvsp[-1].ttype))
+ error ("`for (%s)' inside expansion of same iterator",
+ IDENTIFIER_POINTER (DECL_NAME (yyvsp[-1].ttype)));
+ else
+ {
+ yyval.itype = 1;
+ iterator_for_loop_start (yyvsp[-1].ttype);
+ }
+ ;
+ break;}
+case 326:
+#line 1686 "c-parse.y"
+{
+ if (yyvsp[-1].itype)
+ iterator_for_loop_end (yyvsp[-3].ttype);
+ ;
+ break;}
+case 327:
+#line 1721 "c-parse.y"
+{ register tree value = check_case_value (yyvsp[-1].ttype);
+ register tree label
+ = build_decl (LABEL_DECL, NULL_TREE, NULL_TREE);
+
+ stmt_count++;
+
+ if (value != error_mark_node)
+ {
+ tree duplicate;
+ int success = pushcase (value, convert_and_check,
+ label, &duplicate);
+ if (success == 1)
+ error ("case label not within a switch statement");
+ else if (success == 2)
+ {
+ error ("duplicate case value");
+ error_with_decl (duplicate, "this is the first entry for that value");
+ }
+ else if (success == 3)
+ warning ("case value out of range");
+ else if (success == 5)
+ error ("case label within scope of cleanup or variable array");
+ }
+ position_after_white_space (); ;
+ break;}
+case 328:
+#line 1746 "c-parse.y"
+{ register tree value1 = check_case_value (yyvsp[-3].ttype);
+ register tree value2 = check_case_value (yyvsp[-1].ttype);
+ register tree label
+ = build_decl (LABEL_DECL, NULL_TREE, NULL_TREE);
+
+ stmt_count++;
+
+ if (value1 != error_mark_node && value2 != error_mark_node)
+ {
+ tree duplicate;
+ int success = pushcase_range (value1, value2,
+ convert_and_check, label,
+ &duplicate);
+ if (success == 1)
+ error ("case label not within a switch statement");
+ else if (success == 2)
+ {
+ error ("duplicate case value");
+ error_with_decl (duplicate, "this is the first entry for that value");
+ }
+ else if (success == 3)
+ warning ("case value out of range");
+ else if (success == 4)
+ warning ("empty case range");
+ else if (success == 5)
+ error ("case label within scope of cleanup or variable array");
+ }
+ position_after_white_space (); ;
+ break;}
+case 329:
+#line 1775 "c-parse.y"
+{
+ tree duplicate;
+ register tree label
+ = build_decl (LABEL_DECL, NULL_TREE, NULL_TREE);
+ int success = pushcase (NULL_TREE, 0, label, &duplicate);
+ stmt_count++;
+ if (success == 1)
+ error ("default label not within a switch statement");
+ else if (success == 2)
+ {
+ error ("multiple default labels in one switch");
+ error_with_decl (duplicate, "this is the first default label");
+ }
+ position_after_white_space (); ;
+ break;}
+case 330:
+#line 1790 "c-parse.y"
+{ tree label = define_label (input_filename, lineno, yyvsp[-1].ttype);
+ stmt_count++;
+ emit_nop ();
+ if (label)
+ expand_label (label);
+ position_after_white_space (); ;
+ break;}
+case 331:
+#line 1802 "c-parse.y"
+{ emit_line_note (input_filename, lineno);
+ yyval.ttype = NULL_TREE; ;
+ break;}
+case 332:
+#line 1805 "c-parse.y"
+{ emit_line_note (input_filename, lineno); ;
+ break;}
+case 333:
+#line 1810 "c-parse.y"
+{ yyval.ttype = NULL_TREE; ;
+ break;}
+case 335:
+#line 1817 "c-parse.y"
+{ yyval.ttype = NULL_TREE; ;
+ break;}
+case 338:
+#line 1824 "c-parse.y"
+{ yyval.ttype = chainon (yyvsp[-2].ttype, yyvsp[0].ttype); ;
+ break;}
+case 339:
+#line 1829 "c-parse.y"
+{ yyval.ttype = build_tree_list (yyvsp[-3].ttype, yyvsp[-1].ttype); ;
+ break;}
+case 340:
+#line 1834 "c-parse.y"
+{ yyval.ttype = tree_cons (NULL_TREE, combine_strings (yyvsp[0].ttype), NULL_TREE); ;
+ break;}
+case 341:
+#line 1836 "c-parse.y"
+{ yyval.ttype = tree_cons (NULL_TREE, combine_strings (yyvsp[0].ttype), yyvsp[-2].ttype); ;
+ break;}
+case 342:
+#line 1842 "c-parse.y"
+{ pushlevel (0);
+ clear_parm_order ();
+ declare_parm_level (0); ;
+ break;}
+case 343:
+#line 1846 "c-parse.y"
+{ yyval.ttype = yyvsp[0].ttype;
+ parmlist_tags_warning ();
+ poplevel (0, 0, 0); ;
+ break;}
+case 345:
+#line 1854 "c-parse.y"
+{ tree parm;
+ if (pedantic)
+ pedwarn ("ANSI C forbids forward parameter declarations");
+ /* Mark the forward decls as such. */
+ for (parm = getdecls (); parm; parm = TREE_CHAIN (parm))
+ TREE_ASM_WRITTEN (parm) = 1;
+ clear_parm_order (); ;
+ break;}
+case 346:
+#line 1862 "c-parse.y"
+{ yyval.ttype = yyvsp[0].ttype; ;
+ break;}
+case 347:
+#line 1864 "c-parse.y"
+{ yyval.ttype = tree_cons (NULL_TREE, NULL_TREE, NULL_TREE); ;
+ break;}
+case 348:
+#line 1870 "c-parse.y"
+{ yyval.ttype = get_parm_info (0); ;
+ break;}
+case 349:
+#line 1872 "c-parse.y"
+{ yyval.ttype = get_parm_info (0);
+ if (pedantic)
+ pedwarn ("ANSI C requires a named argument before `...'");
+ ;
+ break;}
+case 350:
+#line 1877 "c-parse.y"
+{ yyval.ttype = get_parm_info (1); ;
+ break;}
+case 351:
+#line 1879 "c-parse.y"
+{ yyval.ttype = get_parm_info (0); ;
+ break;}
+case 352:
+#line 1884 "c-parse.y"
+{ push_parm_decl (yyvsp[0].ttype); ;
+ break;}
+case 353:
+#line 1886 "c-parse.y"
+{ push_parm_decl (yyvsp[0].ttype); ;
+ break;}
+case 354:
+#line 1893 "c-parse.y"
+{ yyval.ttype = build_tree_list (yyvsp[-1].ttype, yyvsp[0].ttype) ; ;
+ break;}
+case 355:
+#line 1895 "c-parse.y"
+{ yyval.ttype = build_tree_list (yyvsp[-1].ttype, yyvsp[0].ttype) ; ;
+ break;}
+case 356:
+#line 1897 "c-parse.y"
+{ yyval.ttype = build_tree_list (yyvsp[-1].ttype, yyvsp[0].ttype); ;
+ break;}
+case 357:
+#line 1899 "c-parse.y"
+{ yyval.ttype = build_tree_list (yyvsp[-1].ttype, yyvsp[0].ttype) ; ;
+ break;}
+case 358:
+#line 1901 "c-parse.y"
+{ yyval.ttype = build_tree_list (yyvsp[-1].ttype, yyvsp[0].ttype); ;
+ break;}
+case 359:
+#line 1908 "c-parse.y"
+{ pushlevel (0);
+ clear_parm_order ();
+ declare_parm_level (1); ;
+ break;}
+case 360:
+#line 1912 "c-parse.y"
+{ yyval.ttype = yyvsp[0].ttype;
+ parmlist_tags_warning ();
+ poplevel (0, 0, 0); ;
+ break;}
+case 362:
+#line 1920 "c-parse.y"
+{ tree t;
+ for (t = yyvsp[-1].ttype; t; t = TREE_CHAIN (t))
+ if (TREE_VALUE (t) == NULL_TREE)
+ error ("`...' in old-style identifier list");
+ yyval.ttype = tree_cons (NULL_TREE, NULL_TREE, yyvsp[-1].ttype); ;
+ break;}
+case 363:
+#line 1930 "c-parse.y"
+{ yyval.ttype = build_tree_list (NULL_TREE, yyvsp[0].ttype); ;
+ break;}
+case 364:
+#line 1932 "c-parse.y"
+{ yyval.ttype = chainon (yyvsp[-2].ttype, build_tree_list (NULL_TREE, yyvsp[0].ttype)); ;
+ break;}
+case 365:
+#line 1938 "c-parse.y"
+{ yyval.ttype = build_tree_list (NULL_TREE, yyvsp[0].ttype); ;
+ break;}
+case 366:
+#line 1940 "c-parse.y"
+{ yyval.ttype = chainon (yyvsp[-2].ttype, build_tree_list (NULL_TREE, yyvsp[0].ttype)); ;
+ break;}
+}
+ /* the action file gets copied in in place of this dollarsign */
+#line 480 "/usr/local/lib/bison.simple"
+
+ yyvsp -= yylen;
+ yyssp -= yylen;
+#ifdef YYLSP_NEEDED
+ yylsp -= yylen;
+#endif
+
+#if YYDEBUG != 0
+ if (yydebug)
+ {
+ short *ssp1 = yyss - 1;
+ fprintf (stderr, "state stack now");
+ while (ssp1 != yyssp)
+ fprintf (stderr, " %d", *++ssp1);
+ fprintf (stderr, "\n");
+ }
+#endif
+
+ *++yyvsp = yyval;
+
+#ifdef YYLSP_NEEDED
+ yylsp++;
+ if (yylen == 0)
+ {
+ yylsp->first_line = yylloc.first_line;
+ yylsp->first_column = yylloc.first_column;
+ yylsp->last_line = (yylsp-1)->last_line;
+ yylsp->last_column = (yylsp-1)->last_column;
+ yylsp->text = 0;
+ }
+ else
+ {
+ yylsp->last_line = (yylsp+yylen-1)->last_line;
+ yylsp->last_column = (yylsp+yylen-1)->last_column;
+ }
+#endif
+
+ /* Now "shift" the result of the reduction.
+ Determine what state that goes to,
+ based on the state we popped back to
+ and the rule number reduced by. */
+
+ yyn = yyr1[yyn];
+
+ yystate = yypgoto[yyn - YYNTBASE] + *yyssp;
+ if (yystate >= 0 && yystate <= YYLAST && yycheck[yystate] == *yyssp)
+ yystate = yytable[yystate];
+ else
+ yystate = yydefgoto[yyn - YYNTBASE];
+
+ goto yynewstate;
+
+yyerrlab: /* here on detecting error */
+
+ if (! yyerrstatus)
+ /* If not already recovering from an error, report this error. */
+ {
+ ++yynerrs;
+
+#ifdef YYERROR_VERBOSE
+ yyn = yypact[yystate];
+
+ if (yyn > YYFLAG && yyn < YYLAST)
+ {
+ int size = 0;
+ char *msg;
+ int x, count;
+
+ count = 0;
+ /* Start X at -yyn if nec to avoid negative indexes in yycheck. */
+ for (x = (yyn < 0 ? -yyn : 0);
+ x < (sizeof(yytname) / sizeof(char *)); x++)
+ if (yycheck[x + yyn] == x)
+ size += strlen(yytname[x]) + 15, count++;
+ msg = (char *) malloc(size + 15);
+ if (msg != 0)
+ {
+ strcpy(msg, "parse error");
+
+ if (count < 5)
+ {
+ count = 0;
+ for (x = (yyn < 0 ? -yyn : 0);
+ x < (sizeof(yytname) / sizeof(char *)); x++)
+ if (yycheck[x + yyn] == x)
+ {
+ strcat(msg, count == 0 ? ", expecting `" : " or `");
+ strcat(msg, yytname[x]);
+ strcat(msg, "'");
+ count++;
+ }
+ }
+ yyerror(msg);
+ free(msg);
+ }
+ else
+ yyerror ("parse error; also virtual memory exceeded");
+ }
+ else
+#endif /* YYERROR_VERBOSE */
+ yyerror("parse error");
+ }
+
+ goto yyerrlab1;
+yyerrlab1: /* here on error raised explicitly by an action */
+
+ if (yyerrstatus == 3)
+ {
+ /* if just tried and failed to reuse lookahead token after an error, discard it. */
+
+ /* return failure if at end of input */
+ if (yychar == YYEOF)
+ YYABORT;
+
+#if YYDEBUG != 0
+ if (yydebug)
+ fprintf(stderr, "Discarding token %d (%s).\n", yychar, yytname[yychar1]);
+#endif
+
+ yychar = YYEMPTY;
+ }
+
+ /* Else will try to reuse lookahead token
+ after shifting the error token. */
+
+ yyerrstatus = 3; /* Each real token shifted decrements this */
+
+ goto yyerrhandle;
+
+yyerrdefault: /* current state does not do anything special for the error token. */
+
+#if 0
+ /* This is wrong; only states that explicitly want error tokens
+ should shift them. */
+ yyn = yydefact[yystate]; /* If its default is to accept any token, ok. Otherwise pop it.*/
+ if (yyn) goto yydefault;
+#endif
+
+yyerrpop: /* pop the current state because it cannot handle the error token */
+
+ if (yyssp == yyss) YYABORT;
+ yyvsp--;
+ yystate = *--yyssp;
+#ifdef YYLSP_NEEDED
+ yylsp--;
+#endif
+
+#if YYDEBUG != 0
+ if (yydebug)
+ {
+ short *ssp1 = yyss - 1;
+ fprintf (stderr, "Error: state stack now");
+ while (ssp1 != yyssp)
+ fprintf (stderr, " %d", *++ssp1);
+ fprintf (stderr, "\n");
+ }
+#endif
+
+yyerrhandle:
+
+ yyn = yypact[yystate];
+ if (yyn == YYFLAG)
+ goto yyerrdefault;
+
+ yyn += YYTERROR;
+ if (yyn < 0 || yyn > YYLAST || yycheck[yyn] != YYTERROR)
+ goto yyerrdefault;
+
+ yyn = yytable[yyn];
+ if (yyn < 0)
+ {
+ if (yyn == YYFLAG)
+ goto yyerrpop;
+ yyn = -yyn;
+ goto yyreduce;
+ }
+ else if (yyn == 0)
+ goto yyerrpop;
+
+ if (yyn == YYFINAL)
+ YYACCEPT;
+
+#if YYDEBUG != 0
+ if (yydebug)
+ fprintf(stderr, "Shifting error token, ");
+#endif
+
+ *++yyvsp = yylval;
+#ifdef YYLSP_NEEDED
+ *++yylsp = yylloc;
+#endif
+
+ yystate = yyn;
+ goto yynewstate;
+}
+#line 1943 "c-parse.y"
+
diff --git a/gnu/usr.bin/cc/cc1/c-pragma.c b/gnu/usr.bin/cc/cc1/c-pragma.c
new file mode 100644
index 0000000..cdade3e
--- /dev/null
+++ b/gnu/usr.bin/cc/cc1/c-pragma.c
@@ -0,0 +1,188 @@
+/* Handle #pragma, system V.4 style. Supports #pragma weak and #pragma pack.
+ Copyright (C) 1992 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+#include <stdio.h>
+#include "config.h"
+#include "tree.h"
+#include "function.h"
+#include "defaults.h"
+
+#ifdef HANDLE_SYSV_PRAGMA
+
+/* Support #pragma weak by default if WEAK_ASM_OP and ASM_OUTPUT_DEF
+ are defined. */
+#if !defined (HANDLE_PRAGMA_WEAK) && defined (WEAK_ASM_OP) && defined (ASM_OUTPUT_DEF)
+#define HANDLE_PRAGMA_WEAK 1
+#endif
+
+/* See varasm.c for an identical definition. */
+enum pragma_state
+{
+ ps_start,
+ ps_done,
+ ps_bad,
+ ps_weak,
+ ps_name,
+ ps_equals,
+ ps_value,
+ ps_pack,
+ ps_left,
+ ps_align,
+ ps_right
+};
+
+/* When structure field packing is in effect, this variable is the
+ number of bits to use as the maximum alignment. When packing is not
+ in effect, this is zero. */
+
+extern int maximum_field_alignment;
+
+/* File used for outputting assembler code. */
+extern FILE *asm_out_file;
+
+/* Handle one token of a pragma directive. TOKEN is the
+ current token, and STRING is its printable form. */
+
+void
+handle_pragma_token (string, token)
+ char *string;
+ tree token;
+{
+ static enum pragma_state state = ps_start, type;
+ static char *name;
+ static char *value;
+ static int align;
+
+ if (string == 0)
+ {
+ if (type == ps_pack)
+ {
+ if (state == ps_right)
+ maximum_field_alignment = align * 8;
+ else
+ warning ("malformed `#pragma pack'");
+ }
+ else if (type == ps_weak)
+ {
+#ifdef HANDLE_PRAGMA_WEAK
+ if (HANDLE_PRAGMA_WEAK)
+ handle_pragma_weak (state, asm_out_file, name, value);
+
+#endif /* HANDLE_PRAMA_WEAK */
+ }
+
+ type = state = ps_start;
+ return;
+ }
+
+ switch (state)
+ {
+ case ps_start:
+ if (token && TREE_CODE (token) == IDENTIFIER_NODE)
+ {
+ if (strcmp (IDENTIFIER_POINTER (token), "pack") == 0)
+ type = state = ps_pack;
+ else if (strcmp (IDENTIFIER_POINTER (token), "weak") == 0)
+ type = state = ps_weak;
+ else
+ type = state = ps_done;
+ }
+ else
+ type = state = ps_done;
+ break;
+
+ case ps_weak:
+ if (token && TREE_CODE (token) == IDENTIFIER_NODE)
+ {
+ name = IDENTIFIER_POINTER (token);
+ state = ps_name;
+ }
+ else
+ state = ps_bad;
+ break;
+
+ case ps_name:
+ state = (strcmp (string, "=") ? ps_bad : ps_equals);
+ break;
+
+ case ps_equals:
+ if (token && TREE_CODE (token) == IDENTIFIER_NODE)
+ {
+ value = IDENTIFIER_POINTER (token);
+ state = ps_value;
+ }
+ else
+ state = ps_bad;
+ break;
+
+ case ps_value:
+ state = ps_bad;
+ break;
+
+ case ps_pack:
+ if (strcmp (string, "(") == 0)
+ state = ps_left;
+ else
+ state = ps_bad;
+ break;
+
+ case ps_left:
+ if (token && TREE_CODE (token) == INTEGER_CST
+ && TREE_INT_CST_HIGH (token) == 0)
+ switch (TREE_INT_CST_LOW (token))
+ {
+ case 1:
+ case 2:
+ case 4:
+ align = TREE_INT_CST_LOW (token);
+ state = ps_align;
+ break;
+
+ default:
+ state = ps_bad;
+ }
+ else if (! token && strcmp (string, ")") == 0)
+ {
+ align = 0;
+ state = ps_right;
+ }
+ else
+ state = ps_bad;
+ break;
+
+ case ps_align:
+ if (strcmp (string, ")") == 0)
+ state = ps_right;
+ else
+ state = ps_bad;
+ break;
+
+ case ps_right:
+ state = ps_bad;
+ break;
+
+ case ps_bad:
+ case ps_done:
+ break;
+
+ default:
+ abort ();
+ }
+}
+#endif /* HANDLE_SYSV_PRAGMA */
diff --git a/gnu/usr.bin/cc/cc1/c-typeck.c b/gnu/usr.bin/cc/cc1/c-typeck.c
new file mode 100644
index 0000000..d5283c6
--- /dev/null
+++ b/gnu/usr.bin/cc/cc1/c-typeck.c
@@ -0,0 +1,6384 @@
+/* Build expressions with type checking for C compiler.
+ Copyright (C) 1987, 88, 91, 92, 93, 1994 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+/* This file is part of the C front end.
+ It contains routines to build C expressions given their operands,
+ including computing the types of the result, C-specific error checks,
+ and some optimization.
+
+ There are also routines to build RETURN_STMT nodes and CASE_STMT nodes,
+ and to process initializations in declarations (since they work
+ like a strange sort of assignment). */
+
+#include "config.h"
+#include <stdio.h>
+#include "tree.h"
+#include "c-tree.h"
+#include "flags.h"
+
+/* Nonzero if we've already printed a "missing braces around initializer"
+ message within this initializer. */
+static int missing_braces_mentioned;
+
+extern char *index ();
+extern char *rindex ();
+
+static tree quality_type PROTO((tree, tree));
+static int comp_target_types PROTO((tree, tree));
+static int function_types_compatible_p PROTO((tree, tree));
+static int type_lists_compatible_p PROTO((tree, tree));
+static int self_promoting_type_p PROTO((tree));
+static tree decl_constant_value PROTO((tree));
+static tree lookup_field PROTO((tree, tree, tree *));
+static tree convert_arguments PROTO((tree, tree, tree, tree));
+static tree pointer_int_sum PROTO((enum tree_code, tree, tree));
+static tree pointer_diff PROTO((tree, tree));
+static tree unary_complex_lvalue PROTO((enum tree_code, tree));
+static void pedantic_lvalue_warning PROTO((enum tree_code));
+static tree internal_build_compound_expr PROTO((tree, int));
+static tree convert_for_assignment PROTO((tree, tree, char *, tree,
+ tree, int));
+static void warn_for_assignment PROTO((char *, char *, tree, int));
+static tree valid_compound_expr_initializer PROTO((tree, tree));
+static void push_string PROTO((char *));
+static void push_member_name PROTO((tree));
+static void push_array_bounds PROTO((int));
+static int spelling_length PROTO((void));
+static char *print_spelling PROTO((char *));
+static char *get_spelling PROTO((char *));
+static void warning_init PROTO((char *, char *,
+ char *));
+static tree digest_init PROTO((tree, tree, int, int));
+static void check_init_type_bitfields PROTO((tree));
+static void output_init_element PROTO((tree, tree, tree, int));
+static void output_pending_init_elements PROTO((int));
+
+/* Do `exp = require_complete_type (exp);' to make sure exp
+ does not have an incomplete type. (That includes void types.) */
+
+tree
+require_complete_type (value)
+ tree value;
+{
+ tree type = TREE_TYPE (value);
+
+ /* First, detect a valid value with a complete type. */
+ if (TYPE_SIZE (type) != 0
+ && type != void_type_node)
+ return value;
+
+ incomplete_type_error (value, type);
+ return error_mark_node;
+}
+
+/* Print an error message for invalid use of an incomplete type.
+ VALUE is the expression that was used (or 0 if that isn't known)
+ and TYPE is the type that was invalid. */
+
+void
+incomplete_type_error (value, type)
+ tree value;
+ tree type;
+{
+ char *errmsg;
+
+ /* Avoid duplicate error message. */
+ if (TREE_CODE (type) == ERROR_MARK)
+ return;
+
+ if (value != 0 && (TREE_CODE (value) == VAR_DECL
+ || TREE_CODE (value) == PARM_DECL))
+ error ("`%s' has an incomplete type",
+ IDENTIFIER_POINTER (DECL_NAME (value)));
+ else
+ {
+ retry:
+ /* We must print an error message. Be clever about what it says. */
+
+ switch (TREE_CODE (type))
+ {
+ case RECORD_TYPE:
+ errmsg = "invalid use of undefined type `struct %s'";
+ break;
+
+ case UNION_TYPE:
+ errmsg = "invalid use of undefined type `union %s'";
+ break;
+
+ case ENUMERAL_TYPE:
+ errmsg = "invalid use of undefined type `enum %s'";
+ break;
+
+ case VOID_TYPE:
+ error ("invalid use of void expression");
+ return;
+
+ case ARRAY_TYPE:
+ if (TYPE_DOMAIN (type))
+ {
+ type = TREE_TYPE (type);
+ goto retry;
+ }
+ error ("invalid use of array with unspecified bounds");
+ return;
+
+ default:
+ abort ();
+ }
+
+ if (TREE_CODE (TYPE_NAME (type)) == IDENTIFIER_NODE)
+ error (errmsg, IDENTIFIER_POINTER (TYPE_NAME (type)));
+ else
+ /* If this type has a typedef-name, the TYPE_NAME is a TYPE_DECL. */
+ error ("invalid use of incomplete typedef `%s'",
+ IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (type))));
+ }
+}
+
+/* Return a variant of TYPE which has all the type qualifiers of LIKE
+ as well as those of TYPE. */
+
+static tree
+qualify_type (type, like)
+ tree type, like;
+{
+ int constflag = TYPE_READONLY (type) || TYPE_READONLY (like);
+ int volflag = TYPE_VOLATILE (type) || TYPE_VOLATILE (like);
+ return c_build_type_variant (type, constflag, volflag);
+}
+
+/* Return the common type of two types.
+ We assume that comptypes has already been done and returned 1;
+ if that isn't so, this may crash. In particular, we assume that qualifiers
+ match.
+
+ This is the type for the result of most arithmetic operations
+ if the operands have the given two types. */
+
+tree
+common_type (t1, t2)
+ tree t1, t2;
+{
+ register enum tree_code code1;
+ register enum tree_code code2;
+ tree attributes;
+
+ /* Save time if the two types are the same. */
+
+ if (t1 == t2) return t1;
+
+ /* If one type is nonsense, use the other. */
+ if (t1 == error_mark_node)
+ return t2;
+ if (t2 == error_mark_node)
+ return t1;
+
+ /* Merge the attributes */
+
+ { register tree a1, a2;
+ a1 = TYPE_ATTRIBUTES (t1);
+ a2 = TYPE_ATTRIBUTES (t2);
+
+ /* Either one unset? Take the set one. */
+
+ if (!(attributes = a1))
+ attributes = a2;
+
+ /* One that completely contains the other? Take it. */
+
+ else if (a2 && !attribute_list_contained (a1, a2))
+ if (attribute_list_contained (a2, a1))
+ attributes = a2;
+ else
+ {
+ /* Pick the longest list, and hang on the other
+ list. */
+
+ if (list_length (a1) < list_length (a2))
+ attributes = a2, a2 = a1;
+
+ for (; a2; a2 = TREE_CHAIN (a2))
+ if (!value_member (attributes, a2))
+ {
+ a1 = copy_node (a2);
+ TREE_CHAIN (a1) = attributes;
+ attributes = a1;
+ }
+ }
+ }
+
+ /* Treat an enum type as the unsigned integer type of the same width. */
+
+ if (TREE_CODE (t1) == ENUMERAL_TYPE)
+ t1 = type_for_size (TYPE_PRECISION (t1), 1);
+ if (TREE_CODE (t2) == ENUMERAL_TYPE)
+ t2 = type_for_size (TYPE_PRECISION (t2), 1);
+
+ code1 = TREE_CODE (t1);
+ code2 = TREE_CODE (t2);
+
+ /* If one type is complex, form the common type of the non-complex
+ components, then make that complex. Use T1 or T2 if it is the
+ required type. */
+ if (code1 == COMPLEX_TYPE || code2 == COMPLEX_TYPE)
+ {
+ tree subtype1 = code1 == COMPLEX_TYPE ? TREE_TYPE (t1) : t1;
+ tree subtype2 = code2 == COMPLEX_TYPE ? TREE_TYPE (t2) : t2;
+ tree subtype = common_type (subtype1, subtype2);
+
+ if (code1 == COMPLEX_TYPE && TREE_TYPE (t1) == subtype)
+ return build_type_attribute_variant (t1, attributes);
+ else if (code2 == COMPLEX_TYPE && TREE_TYPE (t2) == subtype)
+ return build_type_attribute_variant (t2, attributes);
+ else
+ return build_type_attribute_variant (build_complex_type (subtype),
+ attributes);
+ }
+
+ switch (code1)
+ {
+ case INTEGER_TYPE:
+ case REAL_TYPE:
+ /* If only one is real, use it as the result. */
+
+ if (code1 == REAL_TYPE && code2 != REAL_TYPE)
+ return build_type_attribute_variant (t1, attributes);
+
+ if (code2 == REAL_TYPE && code1 != REAL_TYPE)
+ return build_type_attribute_variant (t2, attributes);
+
+ /* Both real or both integers; use the one with greater precision. */
+
+ if (TYPE_PRECISION (t1) > TYPE_PRECISION (t2))
+ return build_type_attribute_variant (t1, attributes);
+ else if (TYPE_PRECISION (t2) > TYPE_PRECISION (t1))
+ return build_type_attribute_variant (t2, attributes);
+
+ /* Same precision. Prefer longs to ints even when same size. */
+
+ if (TYPE_MAIN_VARIANT (t1) == long_unsigned_type_node
+ || TYPE_MAIN_VARIANT (t2) == long_unsigned_type_node)
+ return build_type_attribute_variant (long_unsigned_type_node,
+ attributes);
+
+ if (TYPE_MAIN_VARIANT (t1) == long_integer_type_node
+ || TYPE_MAIN_VARIANT (t2) == long_integer_type_node)
+ {
+ /* But preserve unsignedness from the other type,
+ since long cannot hold all the values of an unsigned int. */
+ if (TREE_UNSIGNED (t1) || TREE_UNSIGNED (t2))
+ t1 = long_unsigned_type_node;
+ else
+ t1 = long_integer_type_node;
+ return build_type_attribute_variant (t1, attributes);
+ }
+
+ /* Otherwise prefer the unsigned one. */
+
+ if (TREE_UNSIGNED (t1))
+ return build_type_attribute_variant (t1, attributes);
+ else
+ return build_type_attribute_variant (t2, attributes);
+
+ case POINTER_TYPE:
+ /* For two pointers, do this recursively on the target type,
+ and combine the qualifiers of the two types' targets. */
+ /* This code was turned off; I don't know why.
+ But ANSI C specifies doing this with the qualifiers.
+ So I turned it on again. */
+ {
+ tree target = common_type (TYPE_MAIN_VARIANT (TREE_TYPE (t1)),
+ TYPE_MAIN_VARIANT (TREE_TYPE (t2)));
+ int constp
+ = TYPE_READONLY (TREE_TYPE (t1)) || TYPE_READONLY (TREE_TYPE (t2));
+ int volatilep
+ = TYPE_VOLATILE (TREE_TYPE (t1)) || TYPE_VOLATILE (TREE_TYPE (t2));
+ t1 = build_pointer_type (c_build_type_variant (target, constp,
+ volatilep));
+ return build_type_attribute_variant (t1, attributes);
+ }
+#if 0
+ t1 = build_pointer_type (common_type (TREE_TYPE (t1), TREE_TYPE (t2)));
+ return build_type_attribute_variant (t1, attributes);
+#endif
+
+ case ARRAY_TYPE:
+ {
+ tree elt = common_type (TREE_TYPE (t1), TREE_TYPE (t2));
+ /* Save space: see if the result is identical to one of the args. */
+ if (elt == TREE_TYPE (t1) && TYPE_DOMAIN (t1))
+ return build_type_attribute_variant (t1, attributes);
+ if (elt == TREE_TYPE (t2) && TYPE_DOMAIN (t2))
+ return build_type_attribute_variant (t2, attributes);
+ /* Merge the element types, and have a size if either arg has one. */
+ t1 = build_array_type (elt, TYPE_DOMAIN (TYPE_DOMAIN (t1) ? t1 : t2));
+ return build_type_attribute_variant (t1, attributes);
+ }
+
+ case FUNCTION_TYPE:
+ /* Function types: prefer the one that specified arg types.
+ If both do, merge the arg types. Also merge the return types. */
+ {
+ tree valtype = common_type (TREE_TYPE (t1), TREE_TYPE (t2));
+ tree p1 = TYPE_ARG_TYPES (t1);
+ tree p2 = TYPE_ARG_TYPES (t2);
+ int len;
+ tree newargs, n;
+ int i;
+
+ /* Save space: see if the result is identical to one of the args. */
+ if (valtype == TREE_TYPE (t1) && ! TYPE_ARG_TYPES (t2))
+ return build_type_attribute_variant (t1, attributes);
+ if (valtype == TREE_TYPE (t2) && ! TYPE_ARG_TYPES (t1))
+ return build_type_attribute_variant (t2, attributes);
+
+ /* Simple way if one arg fails to specify argument types. */
+ if (TYPE_ARG_TYPES (t1) == 0)
+ {
+ t1 = build_function_type (valtype, TYPE_ARG_TYPES (t2));
+ return build_type_attribute_variant (t1, attributes);
+ }
+ if (TYPE_ARG_TYPES (t2) == 0)
+ {
+ t1 = build_function_type (valtype, TYPE_ARG_TYPES (t1));
+ return build_type_attribute_variant (t1, attributes);
+ }
+
+ /* If both args specify argument types, we must merge the two
+ lists, argument by argument. */
+
+ len = list_length (p1);
+ newargs = 0;
+
+ for (i = 0; i < len; i++)
+ newargs = tree_cons (NULL_TREE, NULL_TREE, newargs);
+
+ n = newargs;
+
+ for (; p1;
+ p1 = TREE_CHAIN (p1), p2 = TREE_CHAIN (p2), n = TREE_CHAIN (n))
+ {
+ /* A null type means arg type is not specified.
+ Take whatever the other function type has. */
+ if (TREE_VALUE (p1) == 0)
+ {
+ TREE_VALUE (n) = TREE_VALUE (p2);
+ goto parm_done;
+ }
+ if (TREE_VALUE (p2) == 0)
+ {
+ TREE_VALUE (n) = TREE_VALUE (p1);
+ goto parm_done;
+ }
+
+ /* Given wait (union {union wait *u; int *i} *)
+ and wait (union wait *),
+ prefer union wait * as type of parm. */
+ if (TREE_CODE (TREE_VALUE (p1)) == UNION_TYPE
+ && TREE_VALUE (p1) != TREE_VALUE (p2))
+ {
+ tree memb;
+ for (memb = TYPE_FIELDS (TREE_VALUE (p1));
+ memb; memb = TREE_CHAIN (memb))
+ if (comptypes (TREE_TYPE (memb), TREE_VALUE (p2)))
+ {
+ TREE_VALUE (n) = TREE_VALUE (p2);
+ if (pedantic)
+ pedwarn ("function types not truly compatible in ANSI C");
+ goto parm_done;
+ }
+ }
+ if (TREE_CODE (TREE_VALUE (p2)) == UNION_TYPE
+ && TREE_VALUE (p2) != TREE_VALUE (p1))
+ {
+ tree memb;
+ for (memb = TYPE_FIELDS (TREE_VALUE (p2));
+ memb; memb = TREE_CHAIN (memb))
+ if (comptypes (TREE_TYPE (memb), TREE_VALUE (p1)))
+ {
+ TREE_VALUE (n) = TREE_VALUE (p1);
+ if (pedantic)
+ pedwarn ("function types not truly compatible in ANSI C");
+ goto parm_done;
+ }
+ }
+ TREE_VALUE (n) = common_type (TREE_VALUE (p1), TREE_VALUE (p2));
+ parm_done: ;
+ }
+
+ t1 = build_function_type (valtype, newargs);
+ /* ... falls through ... */
+ }
+
+ default:
+ return build_type_attribute_variant (t1, attributes);
+ }
+
+}
+
+/* Return 1 if TYPE1 and TYPE2 are compatible types for assignment
+ or various other operations. Return 2 if they are compatible
+ but a warning may be needed if you use them together. */
+
+int
+comptypes (type1, type2)
+ tree type1, type2;
+{
+ register tree t1 = type1;
+ register tree t2 = type2;
+ int attrval, val;
+
+ /* Suppress errors caused by previously reported errors. */
+
+ if (t1 == t2 || TREE_CODE (t1) == ERROR_MARK || TREE_CODE (t2) == ERROR_MARK)
+ return 1;
+
+ /* Treat an enum type as the integer type of the same width and
+ signedness. */
+
+ if (TREE_CODE (t1) == ENUMERAL_TYPE)
+ t1 = type_for_size (TYPE_PRECISION (t1), TREE_UNSIGNED (t1));
+ if (TREE_CODE (t2) == ENUMERAL_TYPE)
+ t2 = type_for_size (TYPE_PRECISION (t2), TREE_UNSIGNED (t2));
+
+ if (t1 == t2)
+ return 1;
+
+ /* Different classes of types can't be compatible. */
+
+ if (TREE_CODE (t1) != TREE_CODE (t2)) return 0;
+
+ /* Qualifiers must match. */
+
+ if (TYPE_READONLY (t1) != TYPE_READONLY (t2))
+ return 0;
+ if (TYPE_VOLATILE (t1) != TYPE_VOLATILE (t2))
+ return 0;
+
+ /* Allow for two different type nodes which have essentially the same
+ definition. Note that we already checked for equality of the type
+ type qualifiers (just above). */
+
+ if (TYPE_MAIN_VARIANT (t1) == TYPE_MAIN_VARIANT (t2))
+ return 1;
+
+#ifndef COMP_TYPE_ATTRIBUTES
+#define COMP_TYPE_ATTRIBUTES(t1,t2) 1
+#endif
+
+ /* 1 if no need for warning yet, 2 if warning cause has been seen. */
+ if (! (attrval = COMP_TYPE_ATTRIBUTES (t1, t2)))
+ return 0;
+
+ /* 1 if no need for warning yet, 2 if warning cause has been seen. */
+ val = 0;
+
+ switch (TREE_CODE (t1))
+ {
+ case POINTER_TYPE:
+ val = (TREE_TYPE (t1) == TREE_TYPE (t2)
+ ? 1 : comptypes (TREE_TYPE (t1), TREE_TYPE (t2)));
+ break;
+
+ case FUNCTION_TYPE:
+ val = function_types_compatible_p (t1, t2);
+ break;
+
+ case ARRAY_TYPE:
+ {
+ tree d1 = TYPE_DOMAIN (t1);
+ tree d2 = TYPE_DOMAIN (t2);
+ val = 1;
+
+ /* Target types must match incl. qualifiers. */
+ if (TREE_TYPE (t1) != TREE_TYPE (t2)
+ && 0 == (val = comptypes (TREE_TYPE (t1), TREE_TYPE (t2))))
+ return 0;
+
+ /* Sizes must match unless one is missing or variable. */
+ if (d1 == 0 || d2 == 0 || d1 == d2
+ || TREE_CODE (TYPE_MIN_VALUE (d1)) != INTEGER_CST
+ || TREE_CODE (TYPE_MIN_VALUE (d2)) != INTEGER_CST
+ || TREE_CODE (TYPE_MAX_VALUE (d1)) != INTEGER_CST
+ || TREE_CODE (TYPE_MAX_VALUE (d2)) != INTEGER_CST)
+ break;
+
+ if (! ((TREE_INT_CST_LOW (TYPE_MIN_VALUE (d1))
+ == TREE_INT_CST_LOW (TYPE_MIN_VALUE (d2)))
+ && (TREE_INT_CST_HIGH (TYPE_MIN_VALUE (d1))
+ == TREE_INT_CST_HIGH (TYPE_MIN_VALUE (d2)))
+ && (TREE_INT_CST_LOW (TYPE_MAX_VALUE (d1))
+ == TREE_INT_CST_LOW (TYPE_MAX_VALUE (d2)))
+ && (TREE_INT_CST_HIGH (TYPE_MAX_VALUE (d1))
+ == TREE_INT_CST_HIGH (TYPE_MAX_VALUE (d2)))))
+ val = 0;
+ break;
+ }
+
+ case RECORD_TYPE:
+ if (maybe_objc_comptypes (t1, t2, 0) == 1)
+ val = 1;
+ break;
+ }
+ return attrval == 2 && val == 1 ? 2 : val;
+}
+
+/* Return 1 if TTL and TTR are pointers to types that are equivalent,
+ ignoring their qualifiers. */
+
+static int
+comp_target_types (ttl, ttr)
+ tree ttl, ttr;
+{
+ int val;
+
+ /* Give maybe_objc_comptypes a crack at letting these types through. */
+ if (val = maybe_objc_comptypes (ttl, ttr, 1) >= 0)
+ return val;
+
+ val = comptypes (TYPE_MAIN_VARIANT (TREE_TYPE (ttl)),
+ TYPE_MAIN_VARIANT (TREE_TYPE (ttr)));
+
+ if (val == 2 && pedantic)
+ pedwarn ("types are not quite compatible");
+ return val;
+}
+
+/* Subroutines of `comptypes'. */
+
+/* Return 1 if two function types F1 and F2 are compatible.
+ If either type specifies no argument types,
+ the other must specify a fixed number of self-promoting arg types.
+ Otherwise, if one type specifies only the number of arguments,
+ the other must specify that number of self-promoting arg types.
+ Otherwise, the argument types must match. */
+
+static int
+function_types_compatible_p (f1, f2)
+ tree f1, f2;
+{
+ tree args1, args2;
+ /* 1 if no need for warning yet, 2 if warning cause has been seen. */
+ int val = 1;
+ int val1;
+
+ if (!(TREE_TYPE (f1) == TREE_TYPE (f2)
+ || (val = comptypes (TREE_TYPE (f1), TREE_TYPE (f2)))))
+ return 0;
+
+ args1 = TYPE_ARG_TYPES (f1);
+ args2 = TYPE_ARG_TYPES (f2);
+
+ /* An unspecified parmlist matches any specified parmlist
+ whose argument types don't need default promotions. */
+
+ if (args1 == 0)
+ {
+ if (!self_promoting_args_p (args2))
+ return 0;
+ /* If one of these types comes from a non-prototype fn definition,
+ compare that with the other type's arglist.
+ If they don't match, ask for a warning (but no error). */
+ if (TYPE_ACTUAL_ARG_TYPES (f1)
+ && 1 != type_lists_compatible_p (args2, TYPE_ACTUAL_ARG_TYPES (f1)))
+ val = 2;
+ return val;
+ }
+ if (args2 == 0)
+ {
+ if (!self_promoting_args_p (args1))
+ return 0;
+ if (TYPE_ACTUAL_ARG_TYPES (f2)
+ && 1 != type_lists_compatible_p (args1, TYPE_ACTUAL_ARG_TYPES (f2)))
+ val = 2;
+ return val;
+ }
+
+ /* Both types have argument lists: compare them and propagate results. */
+ val1 = type_lists_compatible_p (args1, args2);
+ return val1 != 1 ? val1 : val;
+}
+
+/* Check two lists of types for compatibility,
+ returning 0 for incompatible, 1 for compatible,
+ or 2 for compatible with warning. */
+
+static int
+type_lists_compatible_p (args1, args2)
+ tree args1, args2;
+{
+ /* 1 if no need for warning yet, 2 if warning cause has been seen. */
+ int val = 1;
+ int newval = 0;
+
+ while (1)
+ {
+ if (args1 == 0 && args2 == 0)
+ return val;
+ /* If one list is shorter than the other,
+ they fail to match. */
+ if (args1 == 0 || args2 == 0)
+ return 0;
+ /* A null pointer instead of a type
+ means there is supposed to be an argument
+ but nothing is specified about what type it has.
+ So match anything that self-promotes. */
+ if (TREE_VALUE (args1) == 0)
+ {
+ if (! self_promoting_type_p (TREE_VALUE (args2)))
+ return 0;
+ }
+ else if (TREE_VALUE (args2) == 0)
+ {
+ if (! self_promoting_type_p (TREE_VALUE (args1)))
+ return 0;
+ }
+ else if (! (newval = comptypes (TREE_VALUE (args1), TREE_VALUE (args2))))
+ {
+ /* Allow wait (union {union wait *u; int *i} *)
+ and wait (union wait *) to be compatible. */
+ if (TREE_CODE (TREE_VALUE (args1)) == UNION_TYPE
+ && TYPE_NAME (TREE_VALUE (args1)) == 0
+ && TREE_CODE (TYPE_SIZE (TREE_VALUE (args1))) == INTEGER_CST
+ && tree_int_cst_equal (TYPE_SIZE (TREE_VALUE (args1)),
+ TYPE_SIZE (TREE_VALUE (args2))))
+ {
+ tree memb;
+ for (memb = TYPE_FIELDS (TREE_VALUE (args1));
+ memb; memb = TREE_CHAIN (memb))
+ if (comptypes (TREE_TYPE (memb), TREE_VALUE (args2)))
+ break;
+ if (memb == 0)
+ return 0;
+ }
+ else if (TREE_CODE (TREE_VALUE (args2)) == UNION_TYPE
+ && TYPE_NAME (TREE_VALUE (args2)) == 0
+ && TREE_CODE (TYPE_SIZE (TREE_VALUE (args2))) == INTEGER_CST
+ && tree_int_cst_equal (TYPE_SIZE (TREE_VALUE (args2)),
+ TYPE_SIZE (TREE_VALUE (args1))))
+ {
+ tree memb;
+ for (memb = TYPE_FIELDS (TREE_VALUE (args2));
+ memb; memb = TREE_CHAIN (memb))
+ if (comptypes (TREE_TYPE (memb), TREE_VALUE (args1)))
+ break;
+ if (memb == 0)
+ return 0;
+ }
+ else
+ return 0;
+ }
+
+ /* comptypes said ok, but record if it said to warn. */
+ if (newval > val)
+ val = newval;
+
+ args1 = TREE_CHAIN (args1);
+ args2 = TREE_CHAIN (args2);
+ }
+}
+
+/* Return 1 if PARMS specifies a fixed number of parameters
+ and none of their types is affected by default promotions. */
+
+int
+self_promoting_args_p (parms)
+ tree parms;
+{
+ register tree t;
+ for (t = parms; t; t = TREE_CHAIN (t))
+ {
+ register tree type = TREE_VALUE (t);
+
+ if (TREE_CHAIN (t) == 0 && type != void_type_node)
+ return 0;
+
+ if (type == 0)
+ return 0;
+
+ if (TYPE_MAIN_VARIANT (type) == float_type_node)
+ return 0;
+
+ if (C_PROMOTING_INTEGER_TYPE_P (type))
+ return 0;
+ }
+ return 1;
+}
+
+/* Return 1 if TYPE is not affected by default promotions. */
+
+static int
+self_promoting_type_p (type)
+ tree type;
+{
+ if (TYPE_MAIN_VARIANT (type) == float_type_node)
+ return 0;
+
+ if (C_PROMOTING_INTEGER_TYPE_P (type))
+ return 0;
+
+ return 1;
+}
+
+/* Return an unsigned type the same as TYPE in other respects. */
+
+tree
+unsigned_type (type)
+ tree type;
+{
+ tree type1 = TYPE_MAIN_VARIANT (type);
+ if (type1 == signed_char_type_node || type1 == char_type_node)
+ return unsigned_char_type_node;
+ if (type1 == integer_type_node)
+ return unsigned_type_node;
+ if (type1 == short_integer_type_node)
+ return short_unsigned_type_node;
+ if (type1 == long_integer_type_node)
+ return long_unsigned_type_node;
+ if (type1 == long_long_integer_type_node)
+ return long_long_unsigned_type_node;
+ return type;
+}
+
+/* Return a signed type the same as TYPE in other respects. */
+
+tree
+signed_type (type)
+ tree type;
+{
+ tree type1 = TYPE_MAIN_VARIANT (type);
+ if (type1 == unsigned_char_type_node || type1 == char_type_node)
+ return signed_char_type_node;
+ if (type1 == unsigned_type_node)
+ return integer_type_node;
+ if (type1 == short_unsigned_type_node)
+ return short_integer_type_node;
+ if (type1 == long_unsigned_type_node)
+ return long_integer_type_node;
+ if (type1 == long_long_unsigned_type_node)
+ return long_long_integer_type_node;
+ return type;
+}
+
+/* Return a type the same as TYPE except unsigned or
+ signed according to UNSIGNEDP. */
+
+tree
+signed_or_unsigned_type (unsignedp, type)
+ int unsignedp;
+ tree type;
+{
+ if (! INTEGRAL_TYPE_P (type))
+ return type;
+ if (TYPE_PRECISION (type) == TYPE_PRECISION (signed_char_type_node))
+ return unsignedp ? unsigned_char_type_node : signed_char_type_node;
+ if (TYPE_PRECISION (type) == TYPE_PRECISION (integer_type_node))
+ return unsignedp ? unsigned_type_node : integer_type_node;
+ if (TYPE_PRECISION (type) == TYPE_PRECISION (short_integer_type_node))
+ return unsignedp ? short_unsigned_type_node : short_integer_type_node;
+ if (TYPE_PRECISION (type) == TYPE_PRECISION (long_integer_type_node))
+ return unsignedp ? long_unsigned_type_node : long_integer_type_node;
+ if (TYPE_PRECISION (type) == TYPE_PRECISION (long_long_integer_type_node))
+ return (unsignedp ? long_long_unsigned_type_node
+ : long_long_integer_type_node);
+ return type;
+}
+
+/* Compute the value of the `sizeof' operator. */
+
+tree
+c_sizeof (type)
+ tree type;
+{
+ enum tree_code code = TREE_CODE (type);
+ tree t;
+
+ if (code == FUNCTION_TYPE)
+ {
+ if (pedantic || warn_pointer_arith)
+ pedwarn ("sizeof applied to a function type");
+ return size_int (1);
+ }
+ if (code == VOID_TYPE)
+ {
+ if (pedantic || warn_pointer_arith)
+ pedwarn ("sizeof applied to a void type");
+ return size_int (1);
+ }
+ if (code == ERROR_MARK)
+ return size_int (1);
+ if (TYPE_SIZE (type) == 0)
+ {
+ error ("sizeof applied to an incomplete type");
+ return size_int (0);
+ }
+
+ /* Convert in case a char is more than one unit. */
+ t = size_binop (CEIL_DIV_EXPR, TYPE_SIZE (type),
+ size_int (TYPE_PRECISION (char_type_node)));
+ /* size_binop does not put the constant in range, so do it now. */
+ if (TREE_CODE (t) == INTEGER_CST && force_fit_type (t, 0))
+ TREE_CONSTANT_OVERFLOW (t) = TREE_OVERFLOW (t) = 1;
+ return t;
+}
+
+tree
+c_sizeof_nowarn (type)
+ tree type;
+{
+ enum tree_code code = TREE_CODE (type);
+ tree t;
+
+ if (code == FUNCTION_TYPE
+ || code == VOID_TYPE
+ || code == ERROR_MARK)
+ return size_int (1);
+ if (TYPE_SIZE (type) == 0)
+ return size_int (0);
+
+ /* Convert in case a char is more than one unit. */
+ t = size_binop (CEIL_DIV_EXPR, TYPE_SIZE (type),
+ size_int (TYPE_PRECISION (char_type_node)));
+ force_fit_type (t, 0);
+ return t;
+}
+
+/* Compute the size to increment a pointer by. */
+
+tree
+c_size_in_bytes (type)
+ tree type;
+{
+ enum tree_code code = TREE_CODE (type);
+ tree t;
+
+ if (code == FUNCTION_TYPE)
+ return size_int (1);
+ if (code == VOID_TYPE)
+ return size_int (1);
+ if (code == ERROR_MARK)
+ return size_int (1);
+ if (TYPE_SIZE (type) == 0)
+ {
+ error ("arithmetic on pointer to an incomplete type");
+ return size_int (1);
+ }
+
+ /* Convert in case a char is more than one unit. */
+ t = size_binop (CEIL_DIV_EXPR, TYPE_SIZE (type),
+ size_int (BITS_PER_UNIT));
+ force_fit_type (t, 0);
+ return t;
+}
+
+/* Implement the __alignof keyword: Return the minimum required
+ alignment of TYPE, measured in bytes. */
+
+tree
+c_alignof (type)
+ tree type;
+{
+ enum tree_code code = TREE_CODE (type);
+
+ if (code == FUNCTION_TYPE)
+ return size_int (FUNCTION_BOUNDARY / BITS_PER_UNIT);
+
+ if (code == VOID_TYPE || code == ERROR_MARK)
+ return size_int (1);
+
+ return size_int (TYPE_ALIGN (type) / BITS_PER_UNIT);
+}
+
+/* Implement the __alignof keyword: Return the minimum required
+ alignment of EXPR, measured in bytes. For VAR_DECL's and
+ FIELD_DECL's return DECL_ALIGN (which can be set from an
+ "aligned" __attribute__ specification). */
+
+tree
+c_alignof_expr (expr)
+ tree expr;
+{
+ if (TREE_CODE (expr) == VAR_DECL)
+ return size_int (DECL_ALIGN (expr) / BITS_PER_UNIT);
+
+ if (TREE_CODE (expr) == COMPONENT_REF
+ && DECL_BIT_FIELD (TREE_OPERAND (expr, 1)))
+ {
+ error ("`__alignof' applied to a bit-field");
+ return size_int (1);
+ }
+ else if (TREE_CODE (expr) == COMPONENT_REF
+ && TREE_CODE (TREE_OPERAND (expr, 1)) == FIELD_DECL)
+ return size_int (DECL_ALIGN (TREE_OPERAND (expr, 1)) / BITS_PER_UNIT);
+
+ if (TREE_CODE (expr) == INDIRECT_REF)
+ {
+ tree t = TREE_OPERAND (expr, 0);
+ tree best = t;
+ int bestalign = TYPE_ALIGN (TREE_TYPE (TREE_TYPE (t)));
+
+ while (TREE_CODE (t) == NOP_EXPR
+ && TREE_CODE (TREE_TYPE (TREE_OPERAND (t, 0))) == POINTER_TYPE)
+ {
+ int thisalign;
+
+ t = TREE_OPERAND (t, 0);
+ thisalign = TYPE_ALIGN (TREE_TYPE (TREE_TYPE (t)));
+ if (thisalign > bestalign)
+ best = t, bestalign = thisalign;
+ }
+ return c_alignof (TREE_TYPE (TREE_TYPE (best)));
+ }
+ else
+ return c_alignof (TREE_TYPE (expr));
+}
+/* Return either DECL or its known constant value (if it has one). */
+
+static tree
+decl_constant_value (decl)
+ tree decl;
+{
+ if (! TREE_PUBLIC (decl)
+ /* Don't change a variable array bound or initial value to a constant
+ in a place where a variable is invalid. */
+ && current_function_decl != 0
+ && ! pedantic
+ && ! TREE_THIS_VOLATILE (decl)
+ && TREE_READONLY (decl) && ! ITERATOR_P (decl)
+ && DECL_INITIAL (decl) != 0
+ && TREE_CODE (DECL_INITIAL (decl)) != ERROR_MARK
+ /* This is invalid if initial value is not constant.
+ If it has either a function call, a memory reference,
+ or a variable, then re-evaluating it could give different results. */
+ && TREE_CONSTANT (DECL_INITIAL (decl))
+ /* Check for cases where this is sub-optimal, even though valid. */
+ && TREE_CODE (DECL_INITIAL (decl)) != CONSTRUCTOR
+ && DECL_MODE (decl) != BLKmode)
+ return DECL_INITIAL (decl);
+ return decl;
+}
+
+/* Perform default promotions for C data used in expressions.
+ Arrays and functions are converted to pointers;
+ enumeral types or short or char, to int.
+ In addition, manifest constants symbols are replaced by their values. */
+
+tree
+default_conversion (exp)
+ tree exp;
+{
+ register tree type = TREE_TYPE (exp);
+ register enum tree_code code = TREE_CODE (type);
+
+ /* Constants can be used directly unless they're not loadable. */
+ if (TREE_CODE (exp) == CONST_DECL)
+ exp = DECL_INITIAL (exp);
+ /* Replace a nonvolatile const static variable with its value. */
+ else if (optimize && TREE_CODE (exp) == VAR_DECL)
+ {
+ exp = decl_constant_value (exp);
+ type = TREE_TYPE (exp);
+ }
+
+ /* Strip NON_LVALUE_EXPRs and no-op conversions, since we aren't using as
+ an lvalue. */
+ /* Do not use STRIP_NOPS here! It will remove conversions from pointer
+ to integer and cause infinite recursion. */
+ while (TREE_CODE (exp) == NON_LVALUE_EXPR
+ || (TREE_CODE (exp) == NOP_EXPR
+ && TREE_TYPE (TREE_OPERAND (exp, 0)) == TREE_TYPE (exp)))
+ exp = TREE_OPERAND (exp, 0);
+
+ /* Normally convert enums to int,
+ but convert wide enums to something wider. */
+ if (code == ENUMERAL_TYPE)
+ {
+ type = type_for_size (MAX (TYPE_PRECISION (type),
+ TYPE_PRECISION (integer_type_node)),
+ ((flag_traditional
+ || TYPE_PRECISION (type) >= TYPE_PRECISION (integer_type_node))
+ && TREE_UNSIGNED (type)));
+ return convert (type, exp);
+ }
+
+ if (C_PROMOTING_INTEGER_TYPE_P (type))
+ {
+ /* Traditionally, unsignedness is preserved in default promotions.
+ Also preserve unsignedness if not really getting any wider. */
+ if (TREE_UNSIGNED (type)
+ && (flag_traditional
+ || TYPE_PRECISION (type) == TYPE_PRECISION (integer_type_node)))
+ return convert (unsigned_type_node, exp);
+ return convert (integer_type_node, exp);
+ }
+ if (flag_traditional && !flag_allow_single_precision
+ && TYPE_MAIN_VARIANT (type) == float_type_node)
+ return convert (double_type_node, exp);
+ if (code == VOID_TYPE)
+ {
+ error ("void value not ignored as it ought to be");
+ return error_mark_node;
+ }
+ if (code == FUNCTION_TYPE)
+ {
+ return build_unary_op (ADDR_EXPR, exp, 0);
+ }
+ if (code == ARRAY_TYPE)
+ {
+ register tree adr;
+ tree restype = TREE_TYPE (type);
+ tree ptrtype;
+ int constp = 0;
+ int volatilep = 0;
+
+ if (TREE_CODE_CLASS (TREE_CODE (exp)) == 'r'
+ || TREE_CODE_CLASS (TREE_CODE (exp)) == 'd')
+ {
+ constp = TREE_READONLY (exp);
+ volatilep = TREE_THIS_VOLATILE (exp);
+ }
+
+ if (TYPE_READONLY (type) || TYPE_VOLATILE (type)
+ || constp || volatilep)
+ restype = c_build_type_variant (restype,
+ TYPE_READONLY (type) || constp,
+ TYPE_VOLATILE (type) || volatilep);
+
+ if (TREE_CODE (exp) == INDIRECT_REF)
+ return convert (TYPE_POINTER_TO (restype),
+ TREE_OPERAND (exp, 0));
+
+ if (TREE_CODE (exp) == COMPOUND_EXPR)
+ {
+ tree op1 = default_conversion (TREE_OPERAND (exp, 1));
+ return build (COMPOUND_EXPR, TREE_TYPE (op1),
+ TREE_OPERAND (exp, 0), op1);
+ }
+
+ if (!lvalue_p (exp)
+ && ! (TREE_CODE (exp) == CONSTRUCTOR && TREE_STATIC (exp)))
+ {
+ error ("invalid use of non-lvalue array");
+ return error_mark_node;
+ }
+
+ ptrtype = build_pointer_type (restype);
+
+ if (TREE_CODE (exp) == VAR_DECL)
+ {
+ /* ??? This is not really quite correct
+ in that the type of the operand of ADDR_EXPR
+ is not the target type of the type of the ADDR_EXPR itself.
+ Question is, can this lossage be avoided? */
+ adr = build1 (ADDR_EXPR, ptrtype, exp);
+ if (mark_addressable (exp) == 0)
+ return error_mark_node;
+ TREE_CONSTANT (adr) = staticp (exp);
+ TREE_SIDE_EFFECTS (adr) = 0; /* Default would be, same as EXP. */
+ return adr;
+ }
+ /* This way is better for a COMPONENT_REF since it can
+ simplify the offset for a component. */
+ adr = build_unary_op (ADDR_EXPR, exp, 1);
+ return convert (ptrtype, adr);
+ }
+ return exp;
+}
+
+/* Look up component name in the structure type definition.
+
+ If this component name is found indirectly within an anonymous union,
+ store in *INDIRECT the component which directly contains
+ that anonymous union. Otherwise, set *INDIRECT to 0. */
+
+static tree
+lookup_field (type, component, indirect)
+ tree type, component;
+ tree *indirect;
+{
+ tree field;
+
+ /* If TYPE_LANG_SPECIFIC is set, then it is a sorted array of pointers
+ to the field elements. Use a binary search on this array to quickly
+ find the element. Otherwise, do a linear search. TYPE_LANG_SPECIFIC
+ will always be set for structures which have many elements. */
+
+ if (TYPE_LANG_SPECIFIC (type))
+ {
+ int bot, top, half;
+ tree *field_array = &TYPE_LANG_SPECIFIC (type)->elts[0];
+
+ field = TYPE_FIELDS (type);
+ bot = 0;
+ top = TYPE_LANG_SPECIFIC (type)->len;
+ while (top - bot > 1)
+ {
+ HOST_WIDE_INT cmp;
+
+ half = (top - bot + 1) >> 1;
+ field = field_array[bot+half];
+
+ if (DECL_NAME (field) == NULL_TREE)
+ {
+ /* Step through all anon unions in linear fashion. */
+ while (DECL_NAME (field_array[bot]) == NULL_TREE)
+ {
+ tree anon, junk;
+
+ field = field_array[bot++];
+ anon = lookup_field (TREE_TYPE (field), component, &junk);
+ if (anon != NULL_TREE)
+ {
+ *indirect = field;
+ return anon;
+ }
+ }
+
+ /* Entire record is only anon unions. */
+ if (bot > top)
+ return NULL_TREE;
+
+ /* Restart the binary search, with new lower bound. */
+ continue;
+ }
+
+ cmp = (HOST_WIDE_INT) DECL_NAME (field) - (HOST_WIDE_INT) component;
+ if (cmp == 0)
+ break;
+ if (cmp < 0)
+ bot += half;
+ else
+ top = bot + half;
+ }
+
+ if (DECL_NAME (field_array[bot]) == component)
+ field = field_array[bot];
+ else if (DECL_NAME (field) != component)
+ field = 0;
+ }
+ else
+ {
+ for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
+ {
+ if (DECL_NAME (field) == NULL_TREE)
+ {
+ tree junk;
+ tree anon = lookup_field (TREE_TYPE (field), component, &junk);
+ if (anon != NULL_TREE)
+ {
+ *indirect = field;
+ return anon;
+ }
+ }
+
+ if (DECL_NAME (field) == component)
+ break;
+ }
+ }
+
+ *indirect = NULL_TREE;
+ return field;
+}
+
+/* Make an expression to refer to the COMPONENT field of
+ structure or union value DATUM. COMPONENT is an IDENTIFIER_NODE. */
+
+tree
+build_component_ref (datum, component)
+ tree datum, component;
+{
+ register tree type = TREE_TYPE (datum);
+ register enum tree_code code = TREE_CODE (type);
+ register tree field = NULL;
+ register tree ref;
+
+ /* If DATUM is a COMPOUND_EXPR or COND_EXPR, move our reference inside it
+ unless we are not to support things not strictly ANSI. */
+ switch (TREE_CODE (datum))
+ {
+ case COMPOUND_EXPR:
+ {
+ tree value = build_component_ref (TREE_OPERAND (datum, 1), component);
+ return build (COMPOUND_EXPR, TREE_TYPE (value),
+ TREE_OPERAND (datum, 0), value);
+ }
+ case COND_EXPR:
+ return build_conditional_expr
+ (TREE_OPERAND (datum, 0),
+ build_component_ref (TREE_OPERAND (datum, 1), component),
+ build_component_ref (TREE_OPERAND (datum, 2), component));
+ }
+
+ /* See if there is a field or component with name COMPONENT. */
+
+ if (code == RECORD_TYPE || code == UNION_TYPE)
+ {
+ tree indirect = 0;
+
+ if (TYPE_SIZE (type) == 0)
+ {
+ incomplete_type_error (NULL_TREE, type);
+ return error_mark_node;
+ }
+
+ field = lookup_field (type, component, &indirect);
+
+ if (!field)
+ {
+ error (code == RECORD_TYPE
+ ? "structure has no member named `%s'"
+ : "union has no member named `%s'",
+ IDENTIFIER_POINTER (component));
+ return error_mark_node;
+ }
+ if (TREE_TYPE (field) == error_mark_node)
+ return error_mark_node;
+
+ /* If FIELD was found buried within an anonymous union,
+ make one COMPONENT_REF to get that anonymous union,
+ then fall thru to make a second COMPONENT_REF to get FIELD. */
+ if (indirect != 0)
+ {
+ ref = build (COMPONENT_REF, TREE_TYPE (indirect), datum, indirect);
+ if (TREE_READONLY (datum) || TREE_READONLY (indirect))
+ TREE_READONLY (ref) = 1;
+ if (TREE_THIS_VOLATILE (datum) || TREE_THIS_VOLATILE (indirect))
+ TREE_THIS_VOLATILE (ref) = 1;
+ datum = ref;
+ }
+
+ ref = build (COMPONENT_REF, TREE_TYPE (field), datum, field);
+
+ if (TREE_READONLY (datum) || TREE_READONLY (field))
+ TREE_READONLY (ref) = 1;
+ if (TREE_THIS_VOLATILE (datum) || TREE_THIS_VOLATILE (field))
+ TREE_THIS_VOLATILE (ref) = 1;
+
+ return ref;
+ }
+ else if (code != ERROR_MARK)
+ error ("request for member `%s' in something not a structure or union",
+ IDENTIFIER_POINTER (component));
+
+ return error_mark_node;
+}
+
+/* Given an expression PTR for a pointer, return an expression
+ for the value pointed to.
+ ERRORSTRING is the name of the operator to appear in error messages. */
+
+tree
+build_indirect_ref (ptr, errorstring)
+ tree ptr;
+ char *errorstring;
+{
+ register tree pointer = default_conversion (ptr);
+ register tree type = TREE_TYPE (pointer);
+
+ if (TREE_CODE (type) == POINTER_TYPE)
+ {
+ if (TREE_CODE (pointer) == ADDR_EXPR
+ && !flag_volatile
+ && (TREE_TYPE (TREE_OPERAND (pointer, 0))
+ == TREE_TYPE (type)))
+ return TREE_OPERAND (pointer, 0);
+ else
+ {
+ tree t = TREE_TYPE (type);
+ register tree ref = build1 (INDIRECT_REF,
+ TYPE_MAIN_VARIANT (t), pointer);
+
+ if (TYPE_SIZE (t) == 0 && TREE_CODE (t) != ARRAY_TYPE)
+ {
+ error ("dereferencing pointer to incomplete type");
+ return error_mark_node;
+ }
+ if (TREE_CODE (t) == VOID_TYPE)
+ warning ("dereferencing `void *' pointer");
+
+ /* We *must* set TREE_READONLY when dereferencing a pointer to const,
+ so that we get the proper error message if the result is used
+ to assign to. Also, &* is supposed to be a no-op.
+ And ANSI C seems to specify that the type of the result
+ should be the const type. */
+ /* A de-reference of a pointer to const is not a const. It is valid
+ to change it via some other pointer. */
+ TREE_READONLY (ref) = TYPE_READONLY (t);
+ TREE_SIDE_EFFECTS (ref)
+ = TYPE_VOLATILE (t) || TREE_SIDE_EFFECTS (pointer) || flag_volatile;
+ TREE_THIS_VOLATILE (ref) = TYPE_VOLATILE (t);
+ return ref;
+ }
+ }
+ else if (TREE_CODE (pointer) != ERROR_MARK)
+ error ("invalid type argument of `%s'", errorstring);
+ return error_mark_node;
+}
+
+/* This handles expressions of the form "a[i]", which denotes
+ an array reference.
+
+ This is logically equivalent in C to *(a+i), but we may do it differently.
+ If A is a variable or a member, we generate a primitive ARRAY_REF.
+ This avoids forcing the array out of registers, and can work on
+ arrays that are not lvalues (for example, members of structures returned
+ by functions). */
+
+tree
+build_array_ref (array, index)
+ tree array, index;
+{
+ if (index == 0)
+ {
+ error ("subscript missing in array reference");
+ return error_mark_node;
+ }
+
+ if (TREE_TYPE (array) == error_mark_node
+ || TREE_TYPE (index) == error_mark_node)
+ return error_mark_node;
+
+ if (TREE_CODE (TREE_TYPE (array)) == ARRAY_TYPE
+ && TREE_CODE (array) != INDIRECT_REF)
+ {
+ tree rval, type;
+
+ /* Subscripting with type char is likely to lose
+ on a machine where chars are signed.
+ So warn on any machine, but optionally.
+ Don't warn for unsigned char since that type is safe.
+ Don't warn for signed char because anyone who uses that
+ must have done so deliberately. */
+ if (warn_char_subscripts
+ && TYPE_MAIN_VARIANT (TREE_TYPE (index)) == char_type_node)
+ warning ("array subscript has type `char'");
+
+ /* Apply default promotions *after* noticing character types. */
+ index = default_conversion (index);
+
+ /* Require integer *after* promotion, for sake of enums. */
+ if (TREE_CODE (TREE_TYPE (index)) != INTEGER_TYPE)
+ {
+ error ("array subscript is not an integer");
+ return error_mark_node;
+ }
+
+ /* An array that is indexed by a non-constant
+ cannot be stored in a register; we must be able to do
+ address arithmetic on its address.
+ Likewise an array of elements of variable size. */
+ if (TREE_CODE (index) != INTEGER_CST
+ || (TYPE_SIZE (TREE_TYPE (TREE_TYPE (array))) != 0
+ && TREE_CODE (TYPE_SIZE (TREE_TYPE (TREE_TYPE (array)))) != INTEGER_CST))
+ {
+ if (mark_addressable (array) == 0)
+ return error_mark_node;
+ }
+ /* An array that is indexed by a constant value which is not within
+ the array bounds cannot be stored in a register either; because we
+ would get a crash in store_bit_field/extract_bit_field when trying
+ to access a non-existent part of the register. */
+ if (TREE_CODE (index) == INTEGER_CST
+ && TYPE_VALUES (TREE_TYPE (array))
+ && ! int_fits_type_p (index, TYPE_VALUES (TREE_TYPE (array))))
+ {
+ if (mark_addressable (array) == 0)
+ return error_mark_node;
+ }
+
+ if (pedantic && !lvalue_p (array))
+ {
+ if (DECL_REGISTER (array))
+ pedwarn ("ANSI C forbids subscripting `register' array");
+ else
+ pedwarn ("ANSI C forbids subscripting non-lvalue array");
+ }
+
+ if (pedantic)
+ {
+ tree foo = array;
+ while (TREE_CODE (foo) == COMPONENT_REF)
+ foo = TREE_OPERAND (foo, 0);
+ if (TREE_CODE (foo) == VAR_DECL && DECL_REGISTER (foo))
+ pedwarn ("ANSI C forbids subscripting non-lvalue array");
+ }
+
+ type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (array)));
+ rval = build (ARRAY_REF, type, array, index);
+ /* Array ref is const/volatile if the array elements are
+ or if the array is. */
+ TREE_READONLY (rval)
+ |= (TYPE_READONLY (TREE_TYPE (TREE_TYPE (array)))
+ | TREE_READONLY (array));
+ TREE_SIDE_EFFECTS (rval)
+ |= (TYPE_VOLATILE (TREE_TYPE (TREE_TYPE (array)))
+ | TREE_SIDE_EFFECTS (array));
+ TREE_THIS_VOLATILE (rval)
+ |= (TYPE_VOLATILE (TREE_TYPE (TREE_TYPE (array)))
+ /* This was added by rms on 16 Nov 91.
+ It fixes vol struct foo *a; a->elts[1]
+ in an inline function.
+ Hope it doesn't break something else. */
+ | TREE_THIS_VOLATILE (array));
+ return require_complete_type (fold (rval));
+ }
+
+ {
+ tree ar = default_conversion (array);
+ tree ind = default_conversion (index);
+
+ /* Put the integer in IND to simplify error checking. */
+ if (TREE_CODE (TREE_TYPE (ar)) == INTEGER_TYPE)
+ {
+ tree temp = ar;
+ ar = ind;
+ ind = temp;
+ }
+
+ if (ar == error_mark_node)
+ return ar;
+
+ if (TREE_CODE (TREE_TYPE (ar)) != POINTER_TYPE)
+ {
+ error ("subscripted value is neither array nor pointer");
+ return error_mark_node;
+ }
+ if (TREE_CODE (TREE_TYPE (ind)) != INTEGER_TYPE)
+ {
+ error ("array subscript is not an integer");
+ return error_mark_node;
+ }
+
+ return build_indirect_ref (build_binary_op (PLUS_EXPR, ar, ind, 0),
+ "array indexing");
+ }
+}
+
+/* Build a function call to function FUNCTION with parameters PARAMS.
+ PARAMS is a list--a chain of TREE_LIST nodes--in which the
+ TREE_VALUE of each node is a parameter-expression.
+ FUNCTION's data type may be a function type or a pointer-to-function. */
+
+tree
+build_function_call (function, params)
+ tree function, params;
+{
+ register tree fntype, fundecl = 0;
+ register tree coerced_params;
+ tree name = NULL_TREE, assembler_name = NULL_TREE;
+
+ /* Strip NON_LVALUE_EXPRs, etc., since we aren't using as an lvalue. */
+ STRIP_TYPE_NOPS (function);
+
+ /* Convert anything with function type to a pointer-to-function. */
+ if (TREE_CODE (function) == FUNCTION_DECL)
+ {
+ name = DECL_NAME (function);
+ assembler_name = DECL_ASSEMBLER_NAME (function);
+
+ /* Differs from default_conversion by not setting TREE_ADDRESSABLE
+ (because calling an inline function does not mean the function
+ needs to be separately compiled). */
+ fntype = build_type_variant (TREE_TYPE (function),
+ TREE_READONLY (function),
+ TREE_THIS_VOLATILE (function));
+ fundecl = function;
+ function = build1 (ADDR_EXPR, build_pointer_type (fntype), function);
+ }
+ else
+ function = default_conversion (function);
+
+ fntype = TREE_TYPE (function);
+
+ if (TREE_CODE (fntype) == ERROR_MARK)
+ return error_mark_node;
+
+ if (!(TREE_CODE (fntype) == POINTER_TYPE
+ && TREE_CODE (TREE_TYPE (fntype)) == FUNCTION_TYPE))
+ {
+ error ("called object is not a function");
+ return error_mark_node;
+ }
+
+ /* fntype now gets the type of function pointed to. */
+ fntype = TREE_TYPE (fntype);
+
+ /* Convert the parameters to the types declared in the
+ function prototype, or apply default promotions. */
+
+ coerced_params
+ = convert_arguments (TYPE_ARG_TYPES (fntype), params, name, fundecl);
+
+ /* Check for errors in format strings. */
+
+ if (warn_format && (name || assembler_name))
+ check_function_format (name, assembler_name, coerced_params);
+
+ /* Recognize certain built-in functions so we can make tree-codes
+ other than CALL_EXPR. We do this when it enables fold-const.c
+ to do something useful. */
+
+ if (TREE_CODE (function) == ADDR_EXPR
+ && TREE_CODE (TREE_OPERAND (function, 0)) == FUNCTION_DECL
+ && DECL_BUILT_IN (TREE_OPERAND (function, 0)))
+ switch (DECL_FUNCTION_CODE (TREE_OPERAND (function, 0)))
+ {
+ case BUILT_IN_ABS:
+ case BUILT_IN_LABS:
+ case BUILT_IN_FABS:
+ if (coerced_params == 0)
+ return integer_zero_node;
+ return build_unary_op (ABS_EXPR, TREE_VALUE (coerced_params), 0);
+ }
+
+ {
+ register tree result
+ = build (CALL_EXPR, TREE_TYPE (fntype),
+ function, coerced_params, NULL_TREE);
+
+ TREE_SIDE_EFFECTS (result) = 1;
+ if (TREE_TYPE (result) == void_type_node)
+ return result;
+ return require_complete_type (result);
+ }
+}
+
+/* Convert the argument expressions in the list VALUES
+ to the types in the list TYPELIST. The result is a list of converted
+ argument expressions.
+
+ If TYPELIST is exhausted, or when an element has NULL as its type,
+ perform the default conversions.
+
+ PARMLIST is the chain of parm decls for the function being called.
+ It may be 0, if that info is not available.
+ It is used only for generating error messages.
+
+ NAME is an IDENTIFIER_NODE or 0. It is used only for error messages.
+
+ This is also where warnings about wrong number of args are generated.
+
+ Both VALUES and the returned value are chains of TREE_LIST nodes
+ with the elements of the list in the TREE_VALUE slots of those nodes. */
+
+static tree
+convert_arguments (typelist, values, name, fundecl)
+ tree typelist, values, name, fundecl;
+{
+ register tree typetail, valtail;
+ register tree result = NULL;
+ int parmnum;
+
+ /* Scan the given expressions and types, producing individual
+ converted arguments and pushing them on RESULT in reverse order. */
+
+ for (valtail = values, typetail = typelist, parmnum = 0;
+ valtail;
+ valtail = TREE_CHAIN (valtail), parmnum++)
+ {
+ register tree type = typetail ? TREE_VALUE (typetail) : 0;
+ register tree val = TREE_VALUE (valtail);
+
+ if (type == void_type_node)
+ {
+ if (name)
+ error ("too many arguments to function `%s'",
+ IDENTIFIER_POINTER (name));
+ else
+ error ("too many arguments to function");
+ break;
+ }
+
+ /* Strip NON_LVALUE_EXPRs since we aren't using as an lvalue. */
+ /* Do not use STRIP_NOPS here! We do not want an enumerator with value 0
+ to convert automatically to a pointer. */
+ if (TREE_CODE (val) == NON_LVALUE_EXPR)
+ val = TREE_OPERAND (val, 0);
+
+ if (TREE_CODE (TREE_TYPE (val)) == ARRAY_TYPE
+ || TREE_CODE (TREE_TYPE (val)) == FUNCTION_TYPE)
+ val = default_conversion (val);
+
+ val = require_complete_type (val);
+
+ if (type != 0)
+ {
+ /* Formal parm type is specified by a function prototype. */
+ tree parmval;
+
+ if (TYPE_SIZE (type) == 0)
+ {
+ error ("type of formal parameter %d is incomplete", parmnum + 1);
+ parmval = val;
+ }
+ else
+ {
+#if 0 /* This turns out not to win--there's no way to write a prototype
+ for a function whose arg type is a union with no tag. */
+ /* Nameless union automatically casts the types it contains. */
+ if (TREE_CODE (type) == UNION_TYPE && TYPE_NAME (type) == 0)
+ {
+ tree field;
+
+ for (field = TYPE_FIELDS (type); field;
+ field = TREE_CHAIN (field))
+ if (comptypes (TYPE_MAIN_VARIANT (TREE_TYPE (field)),
+ TYPE_MAIN_VARIANT (TREE_TYPE (val))))
+ break;
+
+ if (field)
+ val = build1 (CONVERT_EXPR, type, val);
+ }
+#endif
+
+ /* Optionally warn about conversions that
+ differ from the default conversions. */
+ if (warn_conversion)
+ {
+ int formal_prec = TYPE_PRECISION (type);
+
+ if (TREE_CODE (type) != REAL_TYPE
+ && TREE_CODE (TREE_TYPE (val)) == REAL_TYPE)
+ warn_for_assignment ("%s as integer rather than floating due to prototype", (char *) 0, name, parmnum + 1);
+ else if (TREE_CODE (type) == REAL_TYPE
+ && TREE_CODE (TREE_TYPE (val)) != REAL_TYPE)
+ warn_for_assignment ("%s as floating rather than integer due to prototype", (char *) 0, name, parmnum + 1);
+ else if (TREE_CODE (type) == REAL_TYPE
+ && TREE_CODE (TREE_TYPE (val)) == REAL_TYPE)
+ {
+ /* Warn if any argument is passed as `float',
+ since without a prototype it would be `double'. */
+ if (formal_prec == TYPE_PRECISION (float_type_node))
+ warn_for_assignment ("%s as `float' rather than `double' due to prototype", (char *) 0, name, parmnum + 1);
+ }
+ /* Detect integer changing in width or signedness. */
+ else if ((TREE_CODE (type) == INTEGER_TYPE
+ || TREE_CODE (type) == ENUMERAL_TYPE)
+ && (TREE_CODE (TREE_TYPE (val)) == INTEGER_TYPE
+ || TREE_CODE (TREE_TYPE (val)) == ENUMERAL_TYPE))
+ {
+ tree would_have_been = default_conversion (val);
+ tree type1 = TREE_TYPE (would_have_been);
+
+ if (TREE_CODE (type) == ENUMERAL_TYPE
+ && type == TREE_TYPE (val))
+ /* No warning if function asks for enum
+ and the actual arg is that enum type. */
+ ;
+ else if (formal_prec != TYPE_PRECISION (type1))
+ warn_for_assignment ("%s with different width due to prototype", (char *) 0, name, parmnum + 1);
+ else if (TREE_UNSIGNED (type) == TREE_UNSIGNED (type1))
+ ;
+ /* Don't complain if the formal parameter type
+ is an enum, because we can't tell now whether
+ the value was an enum--even the same enum. */
+ else if (TREE_CODE (type) == ENUMERAL_TYPE)
+ ;
+ else if (TREE_CODE (val) == INTEGER_CST
+ && int_fits_type_p (val, type))
+ /* Change in signedness doesn't matter
+ if a constant value is unaffected. */
+ ;
+ /* Likewise for a constant in a NOP_EXPR. */
+ else if (TREE_CODE (val) == NOP_EXPR
+ && TREE_CODE (TREE_OPERAND (val, 0)) == INTEGER_CST
+ && int_fits_type_p (TREE_OPERAND (val, 0), type))
+ ;
+#if 0 /* We never get such tree structure here. */
+ else if (TREE_CODE (TREE_TYPE (val)) == ENUMERAL_TYPE
+ && int_fits_type_p (TYPE_MIN_VALUE (TREE_TYPE (val)), type)
+ && int_fits_type_p (TYPE_MAX_VALUE (TREE_TYPE (val)), type))
+ /* Change in signedness doesn't matter
+ if an enum value is unaffected. */
+ ;
+#endif
+ /* If the value is extended from a narrower
+ unsigned type, it doesn't matter whether we
+ pass it as signed or unsigned; the value
+ certainly is the same either way. */
+ else if (TYPE_PRECISION (TREE_TYPE (val)) < TYPE_PRECISION (type)
+ && TREE_UNSIGNED (TREE_TYPE (val)))
+ ;
+ else if (TREE_UNSIGNED (type))
+ warn_for_assignment ("%s as unsigned due to prototype", (char *) 0, name, parmnum + 1);
+ else
+ warn_for_assignment ("%s as signed due to prototype", (char *) 0, name, parmnum + 1);
+ }
+ }
+
+ parmval = convert_for_assignment (type, val,
+ (char *)0, /* arg passing */
+ fundecl, name, parmnum + 1);
+
+#ifdef PROMOTE_PROTOTYPES
+ if ((TREE_CODE (type) == INTEGER_TYPE
+ || TREE_CODE (type) == ENUMERAL_TYPE)
+ && (TYPE_PRECISION (type) < TYPE_PRECISION (integer_type_node)))
+ parmval = default_conversion (parmval);
+#endif
+ }
+ result = tree_cons (NULL_TREE, parmval, result);
+ }
+ else if (TREE_CODE (TREE_TYPE (val)) == REAL_TYPE
+ && (TYPE_PRECISION (TREE_TYPE (val))
+ < TYPE_PRECISION (double_type_node)))
+ /* Convert `float' to `double'. */
+ result = tree_cons (NULL_TREE, convert (double_type_node, val), result);
+ else
+ /* Convert `short' and `char' to full-size `int'. */
+ result = tree_cons (NULL_TREE, default_conversion (val), result);
+
+ if (typetail)
+ typetail = TREE_CHAIN (typetail);
+ }
+
+ if (typetail != 0 && TREE_VALUE (typetail) != void_type_node)
+ {
+ if (name)
+ error ("too few arguments to function `%s'",
+ IDENTIFIER_POINTER (name));
+ else
+ error ("too few arguments to function");
+ }
+
+ return nreverse (result);
+}
+
+/* This is the entry point used by the parser
+ for binary operators in the input.
+ In addition to constructing the expression,
+ we check for operands that were written with other binary operators
+ in a way that is likely to confuse the user. */
+
+tree
+parser_build_binary_op (code, arg1, arg2)
+ enum tree_code code;
+ tree arg1, arg2;
+{
+ tree result = build_binary_op (code, arg1, arg2, 1);
+
+ char class;
+ char class1 = TREE_CODE_CLASS (TREE_CODE (arg1));
+ char class2 = TREE_CODE_CLASS (TREE_CODE (arg2));
+ enum tree_code code1 = ERROR_MARK;
+ enum tree_code code2 = ERROR_MARK;
+
+ if (class1 == 'e' || class1 == '1'
+ || class1 == '2' || class1 == '<')
+ code1 = C_EXP_ORIGINAL_CODE (arg1);
+ if (class2 == 'e' || class2 == '1'
+ || class2 == '2' || class2 == '<')
+ code2 = C_EXP_ORIGINAL_CODE (arg2);
+
+ /* Check for cases such as x+y<<z which users are likely
+ to misinterpret. If parens are used, C_EXP_ORIGINAL_CODE
+ is cleared to prevent these warnings. */
+ if (warn_parentheses)
+ {
+ if (code == LSHIFT_EXPR || code == RSHIFT_EXPR)
+ {
+ if (code1 == PLUS_EXPR || code1 == MINUS_EXPR
+ || code2 == PLUS_EXPR || code2 == MINUS_EXPR)
+ warning ("suggest parentheses around + or - inside shift");
+ }
+
+ if (code == TRUTH_ORIF_EXPR)
+ {
+ if (code1 == TRUTH_ANDIF_EXPR
+ || code2 == TRUTH_ANDIF_EXPR)
+ warning ("suggest parentheses around && within ||");
+ }
+
+ if (code == BIT_IOR_EXPR)
+ {
+ if (code1 == BIT_AND_EXPR || code1 == BIT_XOR_EXPR
+ || code1 == PLUS_EXPR || code1 == MINUS_EXPR
+ || code2 == BIT_AND_EXPR || code2 == BIT_XOR_EXPR
+ || code2 == PLUS_EXPR || code2 == MINUS_EXPR)
+ warning ("suggest parentheses around arithmetic in operand of |");
+ }
+
+ if (code == BIT_XOR_EXPR)
+ {
+ if (code1 == BIT_AND_EXPR
+ || code1 == PLUS_EXPR || code1 == MINUS_EXPR
+ || code2 == BIT_AND_EXPR
+ || code2 == PLUS_EXPR || code2 == MINUS_EXPR)
+ warning ("suggest parentheses around arithmetic in operand of ^");
+ }
+
+ if (code == BIT_AND_EXPR)
+ {
+ if (code1 == PLUS_EXPR || code1 == MINUS_EXPR
+ || code2 == PLUS_EXPR || code2 == MINUS_EXPR)
+ warning ("suggest parentheses around + or - in operand of &");
+ }
+ }
+
+ /* Similarly, check for cases like 1<=i<=10 that are probably errors. */
+ if (TREE_CODE_CLASS (code) == '<' && extra_warnings
+ && (TREE_CODE_CLASS (code1) == '<' || TREE_CODE_CLASS (code2) == '<'))
+ warning ("comparisons like X<=Y<=Z do not have their mathematical meaning");
+
+ unsigned_conversion_warning (result, arg1);
+ unsigned_conversion_warning (result, arg2);
+ overflow_warning (result);
+
+ class = TREE_CODE_CLASS (TREE_CODE (result));
+
+ /* Record the code that was specified in the source,
+ for the sake of warnings about confusing nesting. */
+ if (class == 'e' || class == '1'
+ || class == '2' || class == '<')
+ C_SET_EXP_ORIGINAL_CODE (result, code);
+ else
+ {
+ int flag = TREE_CONSTANT (result);
+ /* We used to use NOP_EXPR rather than NON_LVALUE_EXPR
+ so that convert_for_assignment wouldn't strip it.
+ That way, we got warnings for things like p = (1 - 1).
+ But it turns out we should not get those warnings. */
+ result = build1 (NON_LVALUE_EXPR, TREE_TYPE (result), result);
+ C_SET_EXP_ORIGINAL_CODE (result, code);
+ TREE_CONSTANT (result) = flag;
+ }
+
+ return result;
+}
+
+/* Build a binary-operation expression without default conversions.
+ CODE is the kind of expression to build.
+ This function differs from `build' in several ways:
+ the data type of the result is computed and recorded in it,
+ warnings are generated if arg data types are invalid,
+ special handling for addition and subtraction of pointers is known,
+ and some optimization is done (operations on narrow ints
+ are done in the narrower type when that gives the same result).
+ Constant folding is also done before the result is returned.
+
+ Note that the operands will never have enumeral types, or function
+ or array types, because either they will have the default conversions
+ performed or they have both just been converted to some other type in which
+ the arithmetic is to be done. */
+
+tree
+build_binary_op (code, orig_op0, orig_op1, convert_p)
+ enum tree_code code;
+ tree orig_op0, orig_op1;
+ int convert_p;
+{
+ tree type0, type1;
+ register enum tree_code code0, code1;
+ tree op0, op1;
+
+ /* Expression code to give to the expression when it is built.
+ Normally this is CODE, which is what the caller asked for,
+ but in some special cases we change it. */
+ register enum tree_code resultcode = code;
+
+ /* Data type in which the computation is to be performed.
+ In the simplest cases this is the common type of the arguments. */
+ register tree result_type = NULL;
+
+ /* Nonzero means operands have already been type-converted
+ in whatever way is necessary.
+ Zero means they need to be converted to RESULT_TYPE. */
+ int converted = 0;
+
+ /* Nonzero means after finally constructing the expression
+ give it this type. Otherwise, give it type RESULT_TYPE. */
+ tree final_type = 0;
+
+ /* Nonzero if this is an operation like MIN or MAX which can
+ safely be computed in short if both args are promoted shorts.
+ Also implies COMMON.
+ -1 indicates a bitwise operation; this makes a difference
+ in the exact conditions for when it is safe to do the operation
+ in a narrower mode. */
+ int shorten = 0;
+
+ /* Nonzero if this is a comparison operation;
+ if both args are promoted shorts, compare the original shorts.
+ Also implies COMMON. */
+ int short_compare = 0;
+
+ /* Nonzero if this is a right-shift operation, which can be computed on the
+ original short and then promoted if the operand is a promoted short. */
+ int short_shift = 0;
+
+ /* Nonzero means set RESULT_TYPE to the common type of the args. */
+ int common = 0;
+
+ if (convert_p)
+ {
+ op0 = default_conversion (orig_op0);
+ op1 = default_conversion (orig_op1);
+ }
+ else
+ {
+ op0 = orig_op0;
+ op1 = orig_op1;
+ }
+
+ type0 = TREE_TYPE (op0);
+ type1 = TREE_TYPE (op1);
+
+ /* The expression codes of the data types of the arguments tell us
+ whether the arguments are integers, floating, pointers, etc. */
+ code0 = TREE_CODE (type0);
+ code1 = TREE_CODE (type1);
+
+ /* Strip NON_LVALUE_EXPRs, etc., since we aren't using as an lvalue. */
+ STRIP_TYPE_NOPS (op0);
+ STRIP_TYPE_NOPS (op1);
+
+ /* If an error was already reported for one of the arguments,
+ avoid reporting another error. */
+
+ if (code0 == ERROR_MARK || code1 == ERROR_MARK)
+ return error_mark_node;
+
+ switch (code)
+ {
+ case PLUS_EXPR:
+ /* Handle the pointer + int case. */
+ if (code0 == POINTER_TYPE && code1 == INTEGER_TYPE)
+ return pointer_int_sum (PLUS_EXPR, op0, op1);
+ else if (code1 == POINTER_TYPE && code0 == INTEGER_TYPE)
+ return pointer_int_sum (PLUS_EXPR, op1, op0);
+ else
+ common = 1;
+ break;
+
+ case MINUS_EXPR:
+ /* Subtraction of two similar pointers.
+ We must subtract them as integers, then divide by object size. */
+ if (code0 == POINTER_TYPE && code1 == POINTER_TYPE
+ && comp_target_types (type0, type1))
+ return pointer_diff (op0, op1);
+ /* Handle pointer minus int. Just like pointer plus int. */
+ else if (code0 == POINTER_TYPE && code1 == INTEGER_TYPE)
+ return pointer_int_sum (MINUS_EXPR, op0, op1);
+ else
+ common = 1;
+ break;
+
+ case MULT_EXPR:
+ common = 1;
+ break;
+
+ case TRUNC_DIV_EXPR:
+ case CEIL_DIV_EXPR:
+ case FLOOR_DIV_EXPR:
+ case ROUND_DIV_EXPR:
+ case EXACT_DIV_EXPR:
+ if ((code0 == INTEGER_TYPE || code0 == REAL_TYPE
+ || code0 == COMPLEX_TYPE)
+ && (code1 == INTEGER_TYPE || code1 == REAL_TYPE
+ || code1 == COMPLEX_TYPE))
+ {
+ if (!(code0 == INTEGER_TYPE && code1 == INTEGER_TYPE))
+ resultcode = RDIV_EXPR;
+ else
+ {
+ /* Although it would be tempting to shorten always here, that
+ loses on some targets, since the modulo instruction is
+ undefined if the quotient can't be represented in the
+ computation mode. We shorten only if unsigned or if
+ dividing by something we know != -1. */
+ shorten = (TREE_UNSIGNED (TREE_TYPE (orig_op0))
+ || (TREE_CODE (op1) == INTEGER_CST
+ && (TREE_INT_CST_LOW (op1) != -1
+ || TREE_INT_CST_HIGH (op1) != -1)));
+ }
+ common = 1;
+ }
+ break;
+
+ case BIT_AND_EXPR:
+ case BIT_ANDTC_EXPR:
+ case BIT_IOR_EXPR:
+ case BIT_XOR_EXPR:
+ if (code0 == INTEGER_TYPE && code1 == INTEGER_TYPE)
+ shorten = -1;
+ /* If one operand is a constant, and the other is a short type
+ that has been converted to an int,
+ really do the work in the short type and then convert the
+ result to int. If we are lucky, the constant will be 0 or 1
+ in the short type, making the entire operation go away. */
+ if (TREE_CODE (op0) == INTEGER_CST
+ && TREE_CODE (op1) == NOP_EXPR
+ && TYPE_PRECISION (type1) > TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (op1, 0)))
+ && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (op1, 0))))
+ {
+ final_type = result_type;
+ op1 = TREE_OPERAND (op1, 0);
+ result_type = TREE_TYPE (op1);
+ }
+ if (TREE_CODE (op1) == INTEGER_CST
+ && TREE_CODE (op0) == NOP_EXPR
+ && TYPE_PRECISION (type0) > TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (op0, 0)))
+ && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (op0, 0))))
+ {
+ final_type = result_type;
+ op0 = TREE_OPERAND (op0, 0);
+ result_type = TREE_TYPE (op0);
+ }
+ break;
+
+ case TRUNC_MOD_EXPR:
+ case FLOOR_MOD_EXPR:
+ if (code0 == INTEGER_TYPE && code1 == INTEGER_TYPE)
+ {
+ /* Although it would be tempting to shorten always here, that loses
+ on some targets, since the modulo instruction is undefined if the
+ quotient can't be represented in the computation mode. We shorten
+ only if unsigned or if dividing by something we know != -1. */
+ shorten = (TREE_UNSIGNED (TREE_TYPE (orig_op0))
+ || (TREE_CODE (op1) == INTEGER_CST
+ && (TREE_INT_CST_LOW (op1) != -1
+ || TREE_INT_CST_HIGH (op1) != -1)));
+ common = 1;
+ }
+ break;
+
+ case TRUTH_ANDIF_EXPR:
+ case TRUTH_ORIF_EXPR:
+ case TRUTH_AND_EXPR:
+ case TRUTH_OR_EXPR:
+ case TRUTH_XOR_EXPR:
+ if ((code0 == INTEGER_TYPE || code0 == POINTER_TYPE
+ || code0 == REAL_TYPE || code0 == COMPLEX_TYPE)
+ && (code1 == INTEGER_TYPE || code1 == POINTER_TYPE
+ || code1 == REAL_TYPE || code1 == COMPLEX_TYPE))
+ {
+ /* Result of these operations is always an int,
+ but that does not mean the operands should be
+ converted to ints! */
+ result_type = integer_type_node;
+ op0 = truthvalue_conversion (op0);
+ op1 = truthvalue_conversion (op1);
+ converted = 1;
+ }
+ break;
+
+ /* Shift operations: result has same type as first operand;
+ always convert second operand to int.
+ Also set SHORT_SHIFT if shifting rightward. */
+
+ case RSHIFT_EXPR:
+ if (code0 == INTEGER_TYPE && code1 == INTEGER_TYPE)
+ {
+ if (TREE_CODE (op1) == INTEGER_CST)
+ {
+ if (tree_int_cst_sgn (op1) < 0)
+ warning ("right shift count is negative");
+ else
+ {
+ if (TREE_INT_CST_LOW (op1) | TREE_INT_CST_HIGH (op1))
+ short_shift = 1;
+ if (TREE_INT_CST_HIGH (op1) != 0
+ || ((unsigned HOST_WIDE_INT) TREE_INT_CST_LOW (op1)
+ >= TYPE_PRECISION (type0)))
+ warning ("right shift count >= width of type");
+ }
+ }
+ /* Use the type of the value to be shifted.
+ This is what most traditional C compilers do. */
+ result_type = type0;
+ /* Unless traditional, convert the shift-count to an integer,
+ regardless of size of value being shifted. */
+ if (! flag_traditional)
+ {
+ if (TYPE_MAIN_VARIANT (TREE_TYPE (op1)) != integer_type_node)
+ op1 = convert (integer_type_node, op1);
+ /* Avoid converting op1 to result_type later. */
+ converted = 1;
+ }
+ }
+ break;
+
+ case LSHIFT_EXPR:
+ if (code0 == INTEGER_TYPE && code1 == INTEGER_TYPE)
+ {
+ if (TREE_CODE (op1) == INTEGER_CST)
+ {
+ if (tree_int_cst_sgn (op1) < 0)
+ warning ("left shift count is negative");
+ else if (TREE_INT_CST_HIGH (op1) != 0
+ || ((unsigned HOST_WIDE_INT) TREE_INT_CST_LOW (op1)
+ >= TYPE_PRECISION (type0)))
+ warning ("left shift count >= width of type");
+ }
+ /* Use the type of the value to be shifted.
+ This is what most traditional C compilers do. */
+ result_type = type0;
+ /* Unless traditional, convert the shift-count to an integer,
+ regardless of size of value being shifted. */
+ if (! flag_traditional)
+ {
+ if (TYPE_MAIN_VARIANT (TREE_TYPE (op1)) != integer_type_node)
+ op1 = convert (integer_type_node, op1);
+ /* Avoid converting op1 to result_type later. */
+ converted = 1;
+ }
+ }
+ break;
+
+ case RROTATE_EXPR:
+ case LROTATE_EXPR:
+ if (code0 == INTEGER_TYPE && code1 == INTEGER_TYPE)
+ {
+ if (TREE_CODE (op1) == INTEGER_CST)
+ {
+ if (tree_int_cst_sgn (op1) < 0)
+ warning ("shift count is negative");
+ else if (TREE_INT_CST_HIGH (op1) != 0
+ || ((unsigned HOST_WIDE_INT) TREE_INT_CST_LOW (op1)
+ >= TYPE_PRECISION (type0)))
+ warning ("shift count >= width of type");
+ }
+ /* Use the type of the value to be shifted.
+ This is what most traditional C compilers do. */
+ result_type = type0;
+ /* Unless traditional, convert the shift-count to an integer,
+ regardless of size of value being shifted. */
+ if (! flag_traditional)
+ {
+ if (TYPE_MAIN_VARIANT (TREE_TYPE (op1)) != integer_type_node)
+ op1 = convert (integer_type_node, op1);
+ /* Avoid converting op1 to result_type later. */
+ converted = 1;
+ }
+ }
+ break;
+
+ case EQ_EXPR:
+ case NE_EXPR:
+ /* Result of comparison is always int,
+ but don't convert the args to int! */
+ result_type = integer_type_node;
+ converted = 1;
+ if ((code0 == INTEGER_TYPE || code0 == REAL_TYPE
+ || code0 == COMPLEX_TYPE)
+ && (code1 == INTEGER_TYPE || code1 == REAL_TYPE
+ || code1 == COMPLEX_TYPE))
+ short_compare = 1;
+ else if (code0 == POINTER_TYPE && code1 == POINTER_TYPE)
+ {
+ register tree tt0 = TREE_TYPE (type0);
+ register tree tt1 = TREE_TYPE (type1);
+ /* Anything compares with void *. void * compares with anything.
+ Otherwise, the targets must be compatible
+ and both must be object or both incomplete. */
+ if (comp_target_types (type0, type1))
+ ;
+ else if (TYPE_MAIN_VARIANT (tt0) == void_type_node)
+ {
+ /* op0 != orig_op0 detects the case of something
+ whose value is 0 but which isn't a valid null ptr const. */
+ if (pedantic && (!integer_zerop (op0) || op0 != orig_op0)
+ && TREE_CODE (tt1) == FUNCTION_TYPE)
+ pedwarn ("ANSI C forbids comparison of `void *' with function pointer");
+ }
+ else if (TYPE_MAIN_VARIANT (tt1) == void_type_node)
+ {
+ if (pedantic && (!integer_zerop (op1) || op1 != orig_op1)
+ && TREE_CODE (tt0) == FUNCTION_TYPE)
+ pedwarn ("ANSI C forbids comparison of `void *' with function pointer");
+ }
+ else
+ pedwarn ("comparison of distinct pointer types lacks a cast");
+ }
+ else if (code0 == POINTER_TYPE && TREE_CODE (op1) == INTEGER_CST
+ && integer_zerop (op1))
+ op1 = null_pointer_node;
+ else if (code1 == POINTER_TYPE && TREE_CODE (op0) == INTEGER_CST
+ && integer_zerop (op0))
+ op0 = null_pointer_node;
+ else if (code0 == POINTER_TYPE && code1 == INTEGER_TYPE)
+ {
+ if (! flag_traditional)
+ pedwarn ("comparison between pointer and integer");
+ op1 = convert (TREE_TYPE (op0), op1);
+ }
+ else if (code0 == INTEGER_TYPE && code1 == POINTER_TYPE)
+ {
+ if (! flag_traditional)
+ pedwarn ("comparison between pointer and integer");
+ op0 = convert (TREE_TYPE (op1), op0);
+ }
+ else
+ /* If args are not valid, clear out RESULT_TYPE
+ to cause an error message later. */
+ result_type = 0;
+ break;
+
+ case MAX_EXPR:
+ case MIN_EXPR:
+ if ((code0 == INTEGER_TYPE || code0 == REAL_TYPE)
+ && (code1 == INTEGER_TYPE || code1 == REAL_TYPE))
+ shorten = 1;
+ else if (code0 == POINTER_TYPE && code1 == POINTER_TYPE)
+ {
+ if (! comp_target_types (type0, type1))
+ pedwarn ("comparison of distinct pointer types lacks a cast");
+ else if (pedantic
+ && TREE_CODE (TREE_TYPE (type0)) == FUNCTION_TYPE)
+ pedwarn ("ANSI C forbids ordered comparisons of pointers to functions");
+ result_type = common_type (type0, type1);
+ }
+ break;
+
+ case LE_EXPR:
+ case GE_EXPR:
+ case LT_EXPR:
+ case GT_EXPR:
+ if ((code0 == INTEGER_TYPE || code0 == REAL_TYPE)
+ && (code1 == INTEGER_TYPE || code1 == REAL_TYPE))
+ short_compare = 1;
+ else if (code0 == POINTER_TYPE && code1 == POINTER_TYPE)
+ {
+ if (! comp_target_types (type0, type1))
+ pedwarn ("comparison of distinct pointer types lacks a cast");
+ else if ((TYPE_SIZE (TREE_TYPE (type0)) != 0)
+ != (TYPE_SIZE (TREE_TYPE (type1)) != 0))
+ pedwarn ("comparison of complete and incomplete pointers");
+ else if (pedantic
+ && TREE_CODE (TREE_TYPE (type0)) == FUNCTION_TYPE)
+ pedwarn ("ANSI C forbids ordered comparisons of pointers to functions");
+ result_type = integer_type_node;
+ }
+ else if (code0 == POINTER_TYPE && TREE_CODE (op1) == INTEGER_CST
+ && integer_zerop (op1))
+ {
+ result_type = integer_type_node;
+ op1 = null_pointer_node;
+ if (pedantic)
+ pedwarn ("ordered comparison of pointer with integer zero");
+ }
+ else if (code1 == POINTER_TYPE && TREE_CODE (op0) == INTEGER_CST
+ && integer_zerop (op0))
+ {
+ result_type = integer_type_node;
+ op0 = null_pointer_node;
+ if (pedantic)
+ pedwarn ("ordered comparison of pointer with integer zero");
+ }
+ else if (code0 == POINTER_TYPE && code1 == INTEGER_TYPE)
+ {
+ result_type = integer_type_node;
+ if (! flag_traditional)
+ pedwarn ("comparison between pointer and integer");
+ op1 = convert (TREE_TYPE (op0), op1);
+ }
+ else if (code0 == INTEGER_TYPE && code1 == POINTER_TYPE)
+ {
+ result_type = integer_type_node;
+ if (! flag_traditional)
+ pedwarn ("comparison between pointer and integer");
+ op0 = convert (TREE_TYPE (op1), op0);
+ }
+ converted = 1;
+ break;
+ }
+
+ if ((code0 == INTEGER_TYPE || code0 == REAL_TYPE || code0 == COMPLEX_TYPE)
+ &&
+ (code1 == INTEGER_TYPE || code1 == REAL_TYPE || code1 == COMPLEX_TYPE))
+ {
+ int none_complex = (code0 != COMPLEX_TYPE && code1 != COMPLEX_TYPE);
+
+ if (shorten || common || short_compare)
+ result_type = common_type (type0, type1);
+
+ /* For certain operations (which identify themselves by shorten != 0)
+ if both args were extended from the same smaller type,
+ do the arithmetic in that type and then extend.
+
+ shorten !=0 and !=1 indicates a bitwise operation.
+ For them, this optimization is safe only if
+ both args are zero-extended or both are sign-extended.
+ Otherwise, we might change the result.
+ Eg, (short)-1 | (unsigned short)-1 is (int)-1
+ but calculated in (unsigned short) it would be (unsigned short)-1. */
+
+ if (shorten && none_complex)
+ {
+ int unsigned0, unsigned1;
+ tree arg0 = get_narrower (op0, &unsigned0);
+ tree arg1 = get_narrower (op1, &unsigned1);
+ /* UNS is 1 if the operation to be done is an unsigned one. */
+ int uns = TREE_UNSIGNED (result_type);
+ tree type;
+
+ final_type = result_type;
+
+ /* Handle the case that OP0 (or OP1) does not *contain* a conversion
+ but it *requires* conversion to FINAL_TYPE. */
+
+ if ((TYPE_PRECISION (TREE_TYPE (op0))
+ == TYPE_PRECISION (TREE_TYPE (arg0)))
+ && TREE_TYPE (op0) != final_type)
+ unsigned0 = TREE_UNSIGNED (TREE_TYPE (op0));
+ if ((TYPE_PRECISION (TREE_TYPE (op1))
+ == TYPE_PRECISION (TREE_TYPE (arg1)))
+ && TREE_TYPE (op1) != final_type)
+ unsigned1 = TREE_UNSIGNED (TREE_TYPE (op1));
+
+ /* Now UNSIGNED0 is 1 if ARG0 zero-extends to FINAL_TYPE. */
+
+ /* For bitwise operations, signedness of nominal type
+ does not matter. Consider only how operands were extended. */
+ if (shorten == -1)
+ uns = unsigned0;
+
+ /* Note that in all three cases below we refrain from optimizing
+ an unsigned operation on sign-extended args.
+ That would not be valid. */
+
+ /* Both args variable: if both extended in same way
+ from same width, do it in that width.
+ Do it unsigned if args were zero-extended. */
+ if ((TYPE_PRECISION (TREE_TYPE (arg0))
+ < TYPE_PRECISION (result_type))
+ && (TYPE_PRECISION (TREE_TYPE (arg1))
+ == TYPE_PRECISION (TREE_TYPE (arg0)))
+ && unsigned0 == unsigned1
+ && (unsigned0 || !uns))
+ result_type
+ = signed_or_unsigned_type (unsigned0,
+ common_type (TREE_TYPE (arg0), TREE_TYPE (arg1)));
+ else if (TREE_CODE (arg0) == INTEGER_CST
+ && (unsigned1 || !uns)
+ && (TYPE_PRECISION (TREE_TYPE (arg1))
+ < TYPE_PRECISION (result_type))
+ && (type = signed_or_unsigned_type (unsigned1,
+ TREE_TYPE (arg1)),
+ int_fits_type_p (arg0, type)))
+ result_type = type;
+ else if (TREE_CODE (arg1) == INTEGER_CST
+ && (unsigned0 || !uns)
+ && (TYPE_PRECISION (TREE_TYPE (arg0))
+ < TYPE_PRECISION (result_type))
+ && (type = signed_or_unsigned_type (unsigned0,
+ TREE_TYPE (arg0)),
+ int_fits_type_p (arg1, type)))
+ result_type = type;
+ }
+
+ /* Shifts can be shortened if shifting right. */
+
+ if (short_shift)
+ {
+ int unsigned_arg;
+ tree arg0 = get_narrower (op0, &unsigned_arg);
+
+ final_type = result_type;
+
+ if (arg0 == op0 && final_type == TREE_TYPE (op0))
+ unsigned_arg = TREE_UNSIGNED (TREE_TYPE (op0));
+
+ if (TYPE_PRECISION (TREE_TYPE (arg0)) < TYPE_PRECISION (result_type)
+ /* If arg is sign-extended and then unsigned-shifted,
+ we can simulate this with a signed shift in arg's type
+ only if the extended result is at least twice as wide
+ as the arg. Otherwise, the shift could use up all the
+ ones made by sign-extension and bring in zeros.
+ We can't optimize that case at all, but in most machines
+ it never happens because available widths are 2**N. */
+ && (!TREE_UNSIGNED (final_type)
+ || unsigned_arg
+ || 2 * TYPE_PRECISION (TREE_TYPE (arg0)) <= TYPE_PRECISION (result_type)))
+ {
+ /* Do an unsigned shift if the operand was zero-extended. */
+ result_type
+ = signed_or_unsigned_type (unsigned_arg,
+ TREE_TYPE (arg0));
+ /* Convert value-to-be-shifted to that type. */
+ if (TREE_TYPE (op0) != result_type)
+ op0 = convert (result_type, op0);
+ converted = 1;
+ }
+ }
+
+ /* Comparison operations are shortened too but differently.
+ They identify themselves by setting short_compare = 1. */
+
+ if (short_compare)
+ {
+ /* Don't write &op0, etc., because that would prevent op0
+ from being kept in a register.
+ Instead, make copies of the our local variables and
+ pass the copies by reference, then copy them back afterward. */
+ tree xop0 = op0, xop1 = op1, xresult_type = result_type;
+ enum tree_code xresultcode = resultcode;
+ tree val
+ = shorten_compare (&xop0, &xop1, &xresult_type, &xresultcode);
+ if (val != 0)
+ return val;
+ op0 = xop0, op1 = xop1, result_type = xresult_type;
+ resultcode = xresultcode;
+
+ if (extra_warnings)
+ {
+ tree op0_type = TREE_TYPE (orig_op0);
+ tree op1_type = TREE_TYPE (orig_op1);
+ int op0_unsigned = TREE_UNSIGNED (op0_type);
+ int op1_unsigned = TREE_UNSIGNED (op1_type);
+
+ /* Give warnings for comparisons between signed and unsigned
+ quantities that will fail. Do not warn if the signed quantity
+ is an unsuffixed integer literal (or some static constant
+ expression involving such literals) and it is positive.
+ Do not warn if the width of the unsigned quantity is less
+ than that of the signed quantity, since in this case all
+ values of the unsigned quantity fit in the signed quantity.
+ Do not warn if the signed type is the same size as the
+ result_type since sign extension does not cause trouble in
+ this case. */
+ /* Do the checking based on the original operand trees, so that
+ casts will be considered, but default promotions won't be. */
+ if (op0_unsigned != op1_unsigned
+ && ((op0_unsigned
+ && TYPE_PRECISION (op0_type) >= TYPE_PRECISION (op1_type)
+ && TYPE_PRECISION (op0_type) < TYPE_PRECISION (result_type)
+ && (TREE_CODE (op1) != INTEGER_CST
+ || (TREE_CODE (op1) == INTEGER_CST
+ && INT_CST_LT (op1, integer_zero_node))))
+ ||
+ (op1_unsigned
+ && TYPE_PRECISION (op1_type) >= TYPE_PRECISION (op0_type)
+ && TYPE_PRECISION (op1_type) < TYPE_PRECISION (result_type)
+ && (TREE_CODE (op0) != INTEGER_CST
+ || (TREE_CODE (op0) == INTEGER_CST
+ && INT_CST_LT (op0, integer_zero_node))))))
+ warning ("comparison between signed and unsigned");
+ }
+ }
+ }
+
+ /* At this point, RESULT_TYPE must be nonzero to avoid an error message.
+ If CONVERTED is zero, both args will be converted to type RESULT_TYPE.
+ Then the expression will be built.
+ It will be given type FINAL_TYPE if that is nonzero;
+ otherwise, it will be given type RESULT_TYPE. */
+
+ if (!result_type)
+ {
+ binary_op_error (code);
+ return error_mark_node;
+ }
+
+ if (! converted)
+ {
+ if (TREE_TYPE (op0) != result_type)
+ op0 = convert (result_type, op0);
+ if (TREE_TYPE (op1) != result_type)
+ op1 = convert (result_type, op1);
+ }
+
+ {
+ register tree result = build (resultcode, result_type, op0, op1);
+ register tree folded;
+
+ folded = fold (result);
+ if (folded == result)
+ TREE_CONSTANT (folded) = TREE_CONSTANT (op0) & TREE_CONSTANT (op1);
+ if (final_type != 0)
+ return convert (final_type, folded);
+ return folded;
+ }
+}
+
+/* Return a tree for the sum or difference (RESULTCODE says which)
+ of pointer PTROP and integer INTOP. */
+
+static tree
+pointer_int_sum (resultcode, ptrop, intop)
+ enum tree_code resultcode;
+ register tree ptrop, intop;
+{
+ tree size_exp;
+
+ register tree result;
+ register tree folded;
+
+ /* The result is a pointer of the same type that is being added. */
+
+ register tree result_type = TREE_TYPE (ptrop);
+
+ if (TREE_CODE (TREE_TYPE (result_type)) == VOID_TYPE)
+ {
+ if (pedantic || warn_pointer_arith)
+ pedwarn ("pointer of type `void *' used in arithmetic");
+ size_exp = integer_one_node;
+ }
+ else if (TREE_CODE (TREE_TYPE (result_type)) == FUNCTION_TYPE)
+ {
+ if (pedantic || warn_pointer_arith)
+ pedwarn ("pointer to a function used in arithmetic");
+ size_exp = integer_one_node;
+ }
+ else
+ size_exp = c_size_in_bytes (TREE_TYPE (result_type));
+
+ /* If what we are about to multiply by the size of the elements
+ contains a constant term, apply distributive law
+ and multiply that constant term separately.
+ This helps produce common subexpressions. */
+
+ if ((TREE_CODE (intop) == PLUS_EXPR || TREE_CODE (intop) == MINUS_EXPR)
+ && ! TREE_CONSTANT (intop)
+ && TREE_CONSTANT (TREE_OPERAND (intop, 1))
+ && TREE_CONSTANT (size_exp)
+ /* If the constant comes from pointer subtraction,
+ skip this optimization--it would cause an error. */
+ && TREE_CODE (TREE_TYPE (TREE_OPERAND (intop, 0))) == INTEGER_TYPE)
+ {
+ enum tree_code subcode = resultcode;
+ tree int_type = TREE_TYPE (intop);
+ if (TREE_CODE (intop) == MINUS_EXPR)
+ subcode = (subcode == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR);
+ /* Convert both subexpression types to the type of intop,
+ because weird cases involving pointer arithmetic
+ can result in a sum or difference with different type args. */
+ ptrop = build_binary_op (subcode, ptrop,
+ convert (int_type, TREE_OPERAND (intop, 1)), 1);
+ intop = convert (int_type, TREE_OPERAND (intop, 0));
+ }
+
+ /* Convert the integer argument to a type the same size as a pointer
+ so the multiply won't overflow spuriously. */
+
+ if (TYPE_PRECISION (TREE_TYPE (intop)) != POINTER_SIZE)
+ intop = convert (type_for_size (POINTER_SIZE, 0), intop);
+
+ /* Replace the integer argument with a suitable product by the object size.
+ Do this multiplication as signed, then convert to the appropriate
+ pointer type (actually unsigned integral). */
+
+ intop = convert (result_type,
+ build_binary_op (MULT_EXPR, intop,
+ convert (TREE_TYPE (intop), size_exp), 1));
+
+ /* Create the sum or difference. */
+
+ result = build (resultcode, result_type, ptrop, intop);
+
+ folded = fold (result);
+ if (folded == result)
+ TREE_CONSTANT (folded) = TREE_CONSTANT (ptrop) & TREE_CONSTANT (intop);
+ return folded;
+}
+
+/* Return a tree for the difference of pointers OP0 and OP1.
+ The resulting tree has type int. */
+
+static tree
+pointer_diff (op0, op1)
+ register tree op0, op1;
+{
+ register tree result, folded;
+ tree restype = ptrdiff_type_node;
+
+ tree target_type = TREE_TYPE (TREE_TYPE (op0));
+
+ if (pedantic || warn_pointer_arith)
+ {
+ if (TREE_CODE (target_type) == VOID_TYPE)
+ pedwarn ("pointer of type `void *' used in subtraction");
+ if (TREE_CODE (target_type) == FUNCTION_TYPE)
+ pedwarn ("pointer to a function used in subtraction");
+ }
+
+ /* First do the subtraction as integers;
+ then drop through to build the divide operator. */
+
+ op0 = build_binary_op (MINUS_EXPR, convert (restype, op0),
+ convert (restype, op1), 1);
+ /* This generates an error if op1 is pointer to incomplete type. */
+ if (TYPE_SIZE (TREE_TYPE (TREE_TYPE (op1))) == 0)
+ error ("arithmetic on pointer to an incomplete type");
+
+ /* This generates an error if op0 is pointer to incomplete type. */
+ op1 = c_size_in_bytes (target_type);
+
+ /* Divide by the size, in easiest possible way. */
+
+ result = build (EXACT_DIV_EXPR, restype, op0, convert (restype, op1));
+
+ folded = fold (result);
+ if (folded == result)
+ TREE_CONSTANT (folded) = TREE_CONSTANT (op0) & TREE_CONSTANT (op1);
+ return folded;
+}
+
+/* Construct and perhaps optimize a tree representation
+ for a unary operation. CODE, a tree_code, specifies the operation
+ and XARG is the operand. NOCONVERT nonzero suppresses
+ the default promotions (such as from short to int). */
+
+tree
+build_unary_op (code, xarg, noconvert)
+ enum tree_code code;
+ tree xarg;
+ int noconvert;
+{
+ /* No default_conversion here. It causes trouble for ADDR_EXPR. */
+ register tree arg = xarg;
+ register tree argtype = 0;
+ register enum tree_code typecode = TREE_CODE (TREE_TYPE (arg));
+ char *errstring = NULL;
+ tree val;
+
+ if (typecode == ERROR_MARK)
+ return error_mark_node;
+ if (typecode == ENUMERAL_TYPE)
+ typecode = INTEGER_TYPE;
+
+ switch (code)
+ {
+ case CONVERT_EXPR:
+ /* This is used for unary plus, because a CONVERT_EXPR
+ is enough to prevent anybody from looking inside for
+ associativity, but won't generate any code. */
+ if (!(typecode == INTEGER_TYPE || typecode == REAL_TYPE
+ || typecode == COMPLEX_TYPE))
+ errstring = "wrong type argument to unary plus";
+ else if (!noconvert)
+ arg = default_conversion (arg);
+ break;
+
+ case NEGATE_EXPR:
+ if (!(typecode == INTEGER_TYPE || typecode == REAL_TYPE
+ || typecode == COMPLEX_TYPE))
+ errstring = "wrong type argument to unary minus";
+ else if (!noconvert)
+ arg = default_conversion (arg);
+ break;
+
+ case BIT_NOT_EXPR:
+ if (typecode == COMPLEX_TYPE)
+ {
+ code = CONJ_EXPR;
+ if (!noconvert)
+ arg = default_conversion (arg);
+ }
+ else if (typecode != INTEGER_TYPE)
+ errstring = "wrong type argument to bit-complement";
+ else if (!noconvert)
+ arg = default_conversion (arg);
+ break;
+
+ case ABS_EXPR:
+ if (!(typecode == INTEGER_TYPE || typecode == REAL_TYPE
+ || typecode == COMPLEX_TYPE))
+ errstring = "wrong type argument to abs";
+ else if (!noconvert)
+ arg = default_conversion (arg);
+ break;
+
+ case CONJ_EXPR:
+ /* Conjugating a real value is a no-op, but allow it anyway. */
+ if (!(typecode == INTEGER_TYPE || typecode == REAL_TYPE
+ || typecode == COMPLEX_TYPE))
+ errstring = "wrong type argument to conjugation";
+ else if (!noconvert)
+ arg = default_conversion (arg);
+ break;
+
+ case TRUTH_NOT_EXPR:
+ if (typecode != INTEGER_TYPE
+ && typecode != REAL_TYPE && typecode != POINTER_TYPE
+ && typecode != COMPLEX_TYPE
+ /* These will convert to a pointer. */
+ && typecode != ARRAY_TYPE && typecode != FUNCTION_TYPE)
+ {
+ errstring = "wrong type argument to unary exclamation mark";
+ break;
+ }
+ arg = truthvalue_conversion (arg);
+ return invert_truthvalue (arg);
+
+ case NOP_EXPR:
+ break;
+
+ case REALPART_EXPR:
+ if (TREE_CODE (arg) == COMPLEX_CST)
+ return TREE_REALPART (arg);
+ else if (TREE_CODE (TREE_TYPE (arg)) == COMPLEX_TYPE)
+ return fold (build1 (REALPART_EXPR, TREE_TYPE (TREE_TYPE (arg)), arg));
+ else
+ return arg;
+
+ case IMAGPART_EXPR:
+ if (TREE_CODE (arg) == COMPLEX_CST)
+ return TREE_IMAGPART (arg);
+ else if (TREE_CODE (TREE_TYPE (arg)) == COMPLEX_TYPE)
+ return fold (build1 (IMAGPART_EXPR, TREE_TYPE (TREE_TYPE (arg)), arg));
+ else
+ return convert (TREE_TYPE (arg), integer_zero_node);
+
+ case PREINCREMENT_EXPR:
+ case POSTINCREMENT_EXPR:
+ case PREDECREMENT_EXPR:
+ case POSTDECREMENT_EXPR:
+ /* Handle complex lvalues (when permitted)
+ by reduction to simpler cases. */
+
+ val = unary_complex_lvalue (code, arg);
+ if (val != 0)
+ return val;
+
+ /* Increment or decrement the real part of the value,
+ and don't change the imaginary part. */
+ if (typecode == COMPLEX_TYPE)
+ {
+ tree real, imag;
+
+ arg = stabilize_reference (arg);
+ real = build_unary_op (REALPART_EXPR, arg, 1);
+ imag = build_unary_op (IMAGPART_EXPR, arg, 1);
+ return build (COMPLEX_EXPR, TREE_TYPE (arg),
+ build_unary_op (code, real, 1), imag);
+ }
+
+ /* Report invalid types. */
+
+ if (typecode != POINTER_TYPE
+ && typecode != INTEGER_TYPE && typecode != REAL_TYPE)
+ {
+ if (code == PREINCREMENT_EXPR || code == POSTINCREMENT_EXPR)
+ errstring ="wrong type argument to increment";
+ else
+ errstring ="wrong type argument to decrement";
+ break;
+ }
+
+ {
+ register tree inc;
+ tree result_type = TREE_TYPE (arg);
+
+ arg = get_unwidened (arg, 0);
+ argtype = TREE_TYPE (arg);
+
+ /* Compute the increment. */
+
+ if (typecode == POINTER_TYPE)
+ {
+ /* If pointer target is an undefined struct,
+ we just cannot know how to do the arithmetic. */
+ if (TYPE_SIZE (TREE_TYPE (result_type)) == 0)
+ error ("%s of pointer to unknown structure",
+ ((code == PREINCREMENT_EXPR
+ || code == POSTINCREMENT_EXPR)
+ ? "increment" : "decrement"));
+ else if ((pedantic || warn_pointer_arith)
+ && (TREE_CODE (TREE_TYPE (result_type)) == FUNCTION_TYPE
+ || TREE_CODE (TREE_TYPE (result_type)) == VOID_TYPE))
+ pedwarn ("wrong type argument to %s",
+ ((code == PREINCREMENT_EXPR
+ || code == POSTINCREMENT_EXPR)
+ ? "increment" : "decrement"));
+ inc = c_sizeof_nowarn (TREE_TYPE (result_type));
+ }
+ else
+ inc = integer_one_node;
+
+ inc = convert (argtype, inc);
+
+ /* Handle incrementing a cast-expression. */
+
+ while (1)
+ switch (TREE_CODE (arg))
+ {
+ case NOP_EXPR:
+ case CONVERT_EXPR:
+ case FLOAT_EXPR:
+ case FIX_TRUNC_EXPR:
+ case FIX_FLOOR_EXPR:
+ case FIX_ROUND_EXPR:
+ case FIX_CEIL_EXPR:
+ pedantic_lvalue_warning (CONVERT_EXPR);
+ /* If the real type has the same machine representation
+ as the type it is cast to, we can make better output
+ by adding directly to the inside of the cast. */
+ if ((TREE_CODE (TREE_TYPE (arg))
+ == TREE_CODE (TREE_TYPE (TREE_OPERAND (arg, 0))))
+ && (TYPE_MODE (TREE_TYPE (arg))
+ == TYPE_MODE (TREE_TYPE (TREE_OPERAND (arg, 0)))))
+ arg = TREE_OPERAND (arg, 0);
+ else
+ {
+ tree incremented, modify, value;
+ arg = stabilize_reference (arg);
+ if (code == PREINCREMENT_EXPR || code == PREDECREMENT_EXPR)
+ value = arg;
+ else
+ value = save_expr (arg);
+ incremented = build (((code == PREINCREMENT_EXPR
+ || code == POSTINCREMENT_EXPR)
+ ? PLUS_EXPR : MINUS_EXPR),
+ argtype, value, inc);
+ TREE_SIDE_EFFECTS (incremented) = 1;
+ modify = build_modify_expr (arg, NOP_EXPR, incremented);
+ value = build (COMPOUND_EXPR, TREE_TYPE (arg), modify, value);
+ TREE_USED (value) = 1;
+ return value;
+ }
+ break;
+
+ default:
+ goto give_up;
+ }
+ give_up:
+
+ /* Complain about anything else that is not a true lvalue. */
+ if (!lvalue_or_else (arg, ((code == PREINCREMENT_EXPR
+ || code == POSTINCREMENT_EXPR)
+ ? "increment" : "decrement")))
+ return error_mark_node;
+
+ /* Report a read-only lvalue. */
+ if (TREE_READONLY (arg))
+ readonly_warning (arg,
+ ((code == PREINCREMENT_EXPR
+ || code == POSTINCREMENT_EXPR)
+ ? "increment" : "decrement"));
+
+ val = build (code, TREE_TYPE (arg), arg, inc);
+ TREE_SIDE_EFFECTS (val) = 1;
+ val = convert (result_type, val);
+ if (TREE_CODE (val) != code)
+ TREE_NO_UNUSED_WARNING (val) = 1;
+ return val;
+ }
+
+ case ADDR_EXPR:
+ /* Note that this operation never does default_conversion
+ regardless of NOCONVERT. */
+
+ /* Let &* cancel out to simplify resulting code. */
+ if (TREE_CODE (arg) == INDIRECT_REF)
+ {
+ /* Don't let this be an lvalue. */
+ if (lvalue_p (TREE_OPERAND (arg, 0)))
+ return non_lvalue (TREE_OPERAND (arg, 0));
+ return TREE_OPERAND (arg, 0);
+ }
+
+ /* For &x[y], return x+y */
+ if (TREE_CODE (arg) == ARRAY_REF)
+ {
+ if (mark_addressable (TREE_OPERAND (arg, 0)) == 0)
+ return error_mark_node;
+ return build_binary_op (PLUS_EXPR, TREE_OPERAND (arg, 0),
+ TREE_OPERAND (arg, 1), 1);
+ }
+
+ /* Handle complex lvalues (when permitted)
+ by reduction to simpler cases. */
+ val = unary_complex_lvalue (code, arg);
+ if (val != 0)
+ return val;
+
+#if 0 /* Turned off because inconsistent;
+ float f; *&(int)f = 3.4 stores in int format
+ whereas (int)f = 3.4 stores in float format. */
+ /* Address of a cast is just a cast of the address
+ of the operand of the cast. */
+ switch (TREE_CODE (arg))
+ {
+ case NOP_EXPR:
+ case CONVERT_EXPR:
+ case FLOAT_EXPR:
+ case FIX_TRUNC_EXPR:
+ case FIX_FLOOR_EXPR:
+ case FIX_ROUND_EXPR:
+ case FIX_CEIL_EXPR:
+ if (pedantic)
+ pedwarn ("ANSI C forbids the address of a cast expression");
+ return convert (build_pointer_type (TREE_TYPE (arg)),
+ build_unary_op (ADDR_EXPR, TREE_OPERAND (arg, 0),
+ 0));
+ }
+#endif
+
+ /* Allow the address of a constructor if all the elements
+ are constant. */
+ if (TREE_CODE (arg) == CONSTRUCTOR && TREE_CONSTANT (arg))
+ ;
+ /* Anything not already handled and not a true memory reference
+ is an error. */
+ else if (typecode != FUNCTION_TYPE && !lvalue_or_else (arg, "unary `&'"))
+ return error_mark_node;
+
+ /* Ordinary case; arg is a COMPONENT_REF or a decl. */
+ argtype = TREE_TYPE (arg);
+ /* If the lvalue is const or volatile,
+ merge that into the type that the address will point to. */
+ if (TREE_CODE_CLASS (TREE_CODE (arg)) == 'd'
+ || TREE_CODE_CLASS (TREE_CODE (arg)) == 'r')
+ {
+ if (TREE_READONLY (arg) || TREE_THIS_VOLATILE (arg))
+ argtype = c_build_type_variant (argtype,
+ TREE_READONLY (arg),
+ TREE_THIS_VOLATILE (arg));
+ }
+
+ argtype = build_pointer_type (argtype);
+
+ if (mark_addressable (arg) == 0)
+ return error_mark_node;
+
+ {
+ tree addr;
+
+ if (TREE_CODE (arg) == COMPONENT_REF)
+ {
+ tree field = TREE_OPERAND (arg, 1);
+
+ addr = build_unary_op (ADDR_EXPR, TREE_OPERAND (arg, 0), 0);
+
+ if (DECL_BIT_FIELD (field))
+ {
+ error ("attempt to take address of bit-field structure member `%s'",
+ IDENTIFIER_POINTER (DECL_NAME (field)));
+ return error_mark_node;
+ }
+
+ addr = convert (argtype, addr);
+
+ if (! integer_zerop (DECL_FIELD_BITPOS (field)))
+ {
+ tree offset
+ = size_binop (EASY_DIV_EXPR, DECL_FIELD_BITPOS (field),
+ size_int (BITS_PER_UNIT));
+ int flag = TREE_CONSTANT (addr);
+ addr = fold (build (PLUS_EXPR, argtype,
+ addr, convert (argtype, offset)));
+ TREE_CONSTANT (addr) = flag;
+ }
+ }
+ else
+ addr = build1 (code, argtype, arg);
+
+ /* Address of a static or external variable or
+ file-scope function counts as a constant. */
+ if (staticp (arg)
+ && ! (TREE_CODE (arg) == FUNCTION_DECL
+ && DECL_CONTEXT (arg) != 0))
+ TREE_CONSTANT (addr) = 1;
+ return addr;
+ }
+ }
+
+ if (!errstring)
+ {
+ if (argtype == 0)
+ argtype = TREE_TYPE (arg);
+ return fold (build1 (code, argtype, arg));
+ }
+
+ error (errstring);
+ return error_mark_node;
+}
+
+#if 0
+/* If CONVERSIONS is a conversion expression or a nested sequence of such,
+ convert ARG with the same conversions in the same order
+ and return the result. */
+
+static tree
+convert_sequence (conversions, arg)
+ tree conversions;
+ tree arg;
+{
+ switch (TREE_CODE (conversions))
+ {
+ case NOP_EXPR:
+ case CONVERT_EXPR:
+ case FLOAT_EXPR:
+ case FIX_TRUNC_EXPR:
+ case FIX_FLOOR_EXPR:
+ case FIX_ROUND_EXPR:
+ case FIX_CEIL_EXPR:
+ return convert (TREE_TYPE (conversions),
+ convert_sequence (TREE_OPERAND (conversions, 0),
+ arg));
+
+ default:
+ return arg;
+ }
+}
+#endif /* 0 */
+
+/* Return nonzero if REF is an lvalue valid for this language.
+ Lvalues can be assigned, unless their type has TYPE_READONLY.
+ Lvalues can have their address taken, unless they have DECL_REGISTER. */
+
+int
+lvalue_p (ref)
+ tree ref;
+{
+ register enum tree_code code = TREE_CODE (ref);
+
+ switch (code)
+ {
+ case REALPART_EXPR:
+ case IMAGPART_EXPR:
+ case COMPONENT_REF:
+ return lvalue_p (TREE_OPERAND (ref, 0));
+
+ case STRING_CST:
+ return 1;
+
+ case INDIRECT_REF:
+ case ARRAY_REF:
+ case VAR_DECL:
+ case PARM_DECL:
+ case RESULT_DECL:
+ case ERROR_MARK:
+ if (TREE_CODE (TREE_TYPE (ref)) != FUNCTION_TYPE
+ && TREE_CODE (TREE_TYPE (ref)) != METHOD_TYPE)
+ return 1;
+ break;
+ }
+ return 0;
+}
+
+/* Return nonzero if REF is an lvalue valid for this language;
+ otherwise, print an error message and return zero. */
+
+int
+lvalue_or_else (ref, string)
+ tree ref;
+ char *string;
+{
+ int win = lvalue_p (ref);
+ if (! win)
+ error ("invalid lvalue in %s", string);
+ return win;
+}
+
+/* Apply unary lvalue-demanding operator CODE to the expression ARG
+ for certain kinds of expressions which are not really lvalues
+ but which we can accept as lvalues.
+
+ If ARG is not a kind of expression we can handle, return zero. */
+
+static tree
+unary_complex_lvalue (code, arg)
+ enum tree_code code;
+ tree arg;
+{
+ /* Handle (a, b) used as an "lvalue". */
+ if (TREE_CODE (arg) == COMPOUND_EXPR)
+ {
+ tree real_result = build_unary_op (code, TREE_OPERAND (arg, 1), 0);
+ pedantic_lvalue_warning (COMPOUND_EXPR);
+ return build (COMPOUND_EXPR, TREE_TYPE (real_result),
+ TREE_OPERAND (arg, 0), real_result);
+ }
+
+ /* Handle (a ? b : c) used as an "lvalue". */
+ if (TREE_CODE (arg) == COND_EXPR)
+ {
+ pedantic_lvalue_warning (COND_EXPR);
+ return (build_conditional_expr
+ (TREE_OPERAND (arg, 0),
+ build_unary_op (code, TREE_OPERAND (arg, 1), 0),
+ build_unary_op (code, TREE_OPERAND (arg, 2), 0)));
+ }
+
+ return 0;
+}
+
+/* If pedantic, warn about improper lvalue. CODE is either COND_EXPR
+ COMPOUND_EXPR, or CONVERT_EXPR (for casts). */
+
+static void
+pedantic_lvalue_warning (code)
+ enum tree_code code;
+{
+ if (pedantic)
+ pedwarn ("ANSI C forbids use of %s expressions as lvalues",
+ code == COND_EXPR ? "conditional"
+ : code == COMPOUND_EXPR ? "compound" : "cast");
+}
+
+/* Warn about storing in something that is `const'. */
+
+void
+readonly_warning (arg, string)
+ tree arg;
+ char *string;
+{
+ char buf[80];
+ strcpy (buf, string);
+
+ /* Forbid assignments to iterators. */
+ if (TREE_CODE (arg) == VAR_DECL && ITERATOR_P (arg))
+ {
+ strcat (buf, " of iterator `%s'");
+ pedwarn (buf, IDENTIFIER_POINTER (DECL_NAME (arg)));
+ }
+
+ if (TREE_CODE (arg) == COMPONENT_REF)
+ {
+ if (TYPE_READONLY (TREE_TYPE (TREE_OPERAND (arg, 0))))
+ readonly_warning (TREE_OPERAND (arg, 0), string);
+ else
+ {
+ strcat (buf, " of read-only member `%s'");
+ pedwarn (buf, IDENTIFIER_POINTER (DECL_NAME (TREE_OPERAND (arg, 1))));
+ }
+ }
+ else if (TREE_CODE (arg) == VAR_DECL)
+ {
+ strcat (buf, " of read-only variable `%s'");
+ pedwarn (buf, IDENTIFIER_POINTER (DECL_NAME (arg)));
+ }
+ else
+ {
+ pedwarn ("%s of read-only location", buf);
+ }
+}
+
+/* Mark EXP saying that we need to be able to take the
+ address of it; it should not be allocated in a register.
+ Value is 1 if successful. */
+
+int
+mark_addressable (exp)
+ tree exp;
+{
+ register tree x = exp;
+ while (1)
+ switch (TREE_CODE (x))
+ {
+ case ADDR_EXPR:
+ case COMPONENT_REF:
+ case ARRAY_REF:
+ case REALPART_EXPR:
+ case IMAGPART_EXPR:
+ x = TREE_OPERAND (x, 0);
+ break;
+
+ case CONSTRUCTOR:
+ TREE_ADDRESSABLE (x) = 1;
+ return 1;
+
+ case VAR_DECL:
+ case CONST_DECL:
+ case PARM_DECL:
+ case RESULT_DECL:
+ if (DECL_REGISTER (x) && !TREE_ADDRESSABLE (x)
+ && DECL_NONLOCAL (x))
+ {
+ if (TREE_PUBLIC (x))
+ {
+ error ("global register variable `%s' used in nested function",
+ IDENTIFIER_POINTER (DECL_NAME (x)));
+ return 0;
+ }
+ pedwarn ("register variable `%s' used in nested function",
+ IDENTIFIER_POINTER (DECL_NAME (x)));
+ }
+ else if (DECL_REGISTER (x) && !TREE_ADDRESSABLE (x))
+ {
+ if (TREE_PUBLIC (x))
+ {
+ error ("address of global register variable `%s' requested",
+ IDENTIFIER_POINTER (DECL_NAME (x)));
+ return 0;
+ }
+ pedwarn ("address of register variable `%s' requested",
+ IDENTIFIER_POINTER (DECL_NAME (x)));
+ }
+ put_var_into_stack (x);
+
+ /* drops in */
+ case FUNCTION_DECL:
+ TREE_ADDRESSABLE (x) = 1;
+#if 0 /* poplevel deals with this now. */
+ if (DECL_CONTEXT (x) == 0)
+ TREE_ADDRESSABLE (DECL_ASSEMBLER_NAME (x)) = 1;
+#endif
+
+ default:
+ return 1;
+ }
+}
+
+/* Build and return a conditional expression IFEXP ? OP1 : OP2. */
+
+tree
+build_conditional_expr (ifexp, op1, op2)
+ tree ifexp, op1, op2;
+{
+ register tree type1;
+ register tree type2;
+ register enum tree_code code1;
+ register enum tree_code code2;
+ register tree result_type = NULL;
+ tree orig_op1 = op1, orig_op2 = op2;
+
+ /* If second operand is omitted, it is the same as the first one;
+ make sure it is calculated only once. */
+ if (op1 == 0)
+ {
+ if (pedantic)
+ pedwarn ("ANSI C forbids omitting the middle term of a ?: expression");
+ ifexp = op1 = save_expr (ifexp);
+ }
+
+ ifexp = truthvalue_conversion (default_conversion (ifexp));
+
+#if 0 /* Produces wrong result if within sizeof. */
+ /* Don't promote the operands separately if they promote
+ the same way. Return the unpromoted type and let the combined
+ value get promoted if necessary. */
+
+ if (TREE_TYPE (op1) == TREE_TYPE (op2)
+ && TREE_CODE (TREE_TYPE (op1)) != ARRAY_TYPE
+ && TREE_CODE (TREE_TYPE (op1)) != ENUMERAL_TYPE
+ && TREE_CODE (TREE_TYPE (op1)) != FUNCTION_TYPE)
+ {
+ if (TREE_CODE (ifexp) == INTEGER_CST)
+ return pedantic_non_lvalue (integer_zerop (ifexp) ? op2 : op1);
+
+ return fold (build (COND_EXPR, TREE_TYPE (op1), ifexp, op1, op2));
+ }
+#endif
+
+ /* Promote both alternatives. */
+
+ if (TREE_CODE (TREE_TYPE (op1)) != VOID_TYPE)
+ op1 = default_conversion (op1);
+ if (TREE_CODE (TREE_TYPE (op2)) != VOID_TYPE)
+ op2 = default_conversion (op2);
+
+ if (TREE_CODE (ifexp) == ERROR_MARK
+ || TREE_CODE (TREE_TYPE (op1)) == ERROR_MARK
+ || TREE_CODE (TREE_TYPE (op2)) == ERROR_MARK)
+ return error_mark_node;
+
+ type1 = TREE_TYPE (op1);
+ code1 = TREE_CODE (type1);
+ type2 = TREE_TYPE (op2);
+ code2 = TREE_CODE (type2);
+
+ /* Quickly detect the usual case where op1 and op2 have the same type
+ after promotion. */
+ if (TYPE_MAIN_VARIANT (type1) == TYPE_MAIN_VARIANT (type2))
+ {
+ if (type1 == type2)
+ result_type = type1;
+ else
+ result_type = TYPE_MAIN_VARIANT (type1);
+ }
+ else if ((code1 == INTEGER_TYPE || code1 == REAL_TYPE)
+ && (code2 == INTEGER_TYPE || code2 == REAL_TYPE))
+ {
+ result_type = common_type (type1, type2);
+ }
+ else if (code1 == VOID_TYPE || code2 == VOID_TYPE)
+ {
+ if (pedantic && (code1 != VOID_TYPE || code2 != VOID_TYPE))
+ pedwarn ("ANSI C forbids conditional expr with only one void side");
+ result_type = void_type_node;
+ }
+ else if (code1 == POINTER_TYPE && code2 == POINTER_TYPE)
+ {
+ if (comp_target_types (type1, type2))
+ result_type = common_type (type1, type2);
+ else if (integer_zerop (op1) && TREE_TYPE (type1) == void_type_node
+ && TREE_CODE (orig_op1) != NOP_EXPR)
+ result_type = qualify_type (type2, type1);
+ else if (integer_zerop (op2) && TREE_TYPE (type2) == void_type_node
+ && TREE_CODE (orig_op2) != NOP_EXPR)
+ result_type = qualify_type (type1, type2);
+ else if (TYPE_MAIN_VARIANT (TREE_TYPE (type1)) == void_type_node)
+ {
+ if (pedantic && TREE_CODE (TREE_TYPE (type2)) == FUNCTION_TYPE)
+ pedwarn ("ANSI C forbids conditional expr between `void *' and function pointer");
+ result_type = qualify_type (type1, type2);
+ }
+ else if (TYPE_MAIN_VARIANT (TREE_TYPE (type2)) == void_type_node)
+ {
+ if (pedantic && TREE_CODE (TREE_TYPE (type1)) == FUNCTION_TYPE)
+ pedwarn ("ANSI C forbids conditional expr between `void *' and function pointer");
+ result_type = qualify_type (type2, type1);
+ }
+ else
+ {
+ pedwarn ("pointer type mismatch in conditional expression");
+ result_type = build_pointer_type (void_type_node);
+ }
+ }
+ else if (code1 == POINTER_TYPE && code2 == INTEGER_TYPE)
+ {
+ if (! integer_zerop (op2))
+ pedwarn ("pointer/integer type mismatch in conditional expression");
+ else
+ {
+ op2 = null_pointer_node;
+#if 0 /* The spec seems to say this is permitted. */
+ if (pedantic && TREE_CODE (type1) == FUNCTION_TYPE)
+ pedwarn ("ANSI C forbids conditional expr between 0 and function pointer");
+#endif
+ }
+ result_type = type1;
+ }
+ else if (code2 == POINTER_TYPE && code1 == INTEGER_TYPE)
+ {
+ if (!integer_zerop (op1))
+ pedwarn ("pointer/integer type mismatch in conditional expression");
+ else
+ {
+ op1 = null_pointer_node;
+#if 0 /* The spec seems to say this is permitted. */
+ if (pedantic && TREE_CODE (type2) == FUNCTION_TYPE)
+ pedwarn ("ANSI C forbids conditional expr between 0 and function pointer");
+#endif
+ }
+ result_type = type2;
+ }
+
+ if (!result_type)
+ {
+ if (flag_cond_mismatch)
+ result_type = void_type_node;
+ else
+ {
+ error ("type mismatch in conditional expression");
+ return error_mark_node;
+ }
+ }
+
+ /* Merge const and volatile flags of the incoming types. */
+ result_type
+ = build_type_variant (result_type,
+ TREE_READONLY (op1) || TREE_READONLY (op2),
+ TREE_THIS_VOLATILE (op1) || TREE_THIS_VOLATILE (op2));
+
+ if (result_type != TREE_TYPE (op1))
+ op1 = convert_and_check (result_type, op1);
+ if (result_type != TREE_TYPE (op2))
+ op2 = convert_and_check (result_type, op2);
+
+#if 0
+ if (code1 == RECORD_TYPE || code1 == UNION_TYPE)
+ {
+ result_type = TREE_TYPE (op1);
+ if (TREE_CONSTANT (ifexp))
+ return pedantic_non_lvalue (integer_zerop (ifexp) ? op2 : op1);
+
+ if (TYPE_MODE (result_type) == BLKmode)
+ {
+ register tree tempvar
+ = build_decl (VAR_DECL, NULL_TREE, result_type);
+ register tree xop1 = build_modify_expr (tempvar, op1);
+ register tree xop2 = build_modify_expr (tempvar, op2);
+ register tree result = fold (build (COND_EXPR, result_type,
+ ifexp, xop1, xop2));
+
+ layout_decl (tempvar, TYPE_ALIGN (result_type));
+ /* No way to handle variable-sized objects here.
+ I fear that the entire handling of BLKmode conditional exprs
+ needs to be redone. */
+ if (TREE_CODE (DECL_SIZE (tempvar)) != INTEGER_CST)
+ abort ();
+ DECL_RTL (tempvar)
+ = assign_stack_local (DECL_MODE (tempvar),
+ (TREE_INT_CST_LOW (DECL_SIZE (tempvar))
+ + BITS_PER_UNIT - 1)
+ / BITS_PER_UNIT,
+ 0);
+
+ TREE_SIDE_EFFECTS (result)
+ = TREE_SIDE_EFFECTS (ifexp) | TREE_SIDE_EFFECTS (op1)
+ | TREE_SIDE_EFFECTS (op2);
+ return build (COMPOUND_EXPR, result_type, result, tempvar);
+ }
+ }
+#endif /* 0 */
+
+ if (TREE_CODE (ifexp) == INTEGER_CST)
+ return pedantic_non_lvalue (integer_zerop (ifexp) ? op2 : op1);
+
+ return fold (build (COND_EXPR, result_type, ifexp, op1, op2));
+}
+
+/* Given a list of expressions, return a compound expression
+ that performs them all and returns the value of the last of them. */
+
+tree
+build_compound_expr (list)
+ tree list;
+{
+ return internal_build_compound_expr (list, TRUE);
+}
+
+static tree
+internal_build_compound_expr (list, first_p)
+ tree list;
+ int first_p;
+{
+ register tree rest;
+
+ if (TREE_CHAIN (list) == 0)
+ {
+#if 0 /* If something inside inhibited lvalueness, we should not override. */
+ /* Consider (x, y+0), which is not an lvalue since y+0 is not. */
+
+ /* Strip NON_LVALUE_EXPRs since we aren't using as an lvalue. */
+ if (TREE_CODE (list) == NON_LVALUE_EXPR)
+ list = TREE_OPERAND (list, 0);
+#endif
+
+ /* Don't let (0, 0) be null pointer constant. */
+ if (!first_p && integer_zerop (TREE_VALUE (list)))
+ return non_lvalue (TREE_VALUE (list));
+ return TREE_VALUE (list);
+ }
+
+ if (TREE_CHAIN (list) != 0 && TREE_CHAIN (TREE_CHAIN (list)) == 0)
+ {
+ /* Convert arrays to pointers when there really is a comma operator. */
+ if (TREE_CODE (TREE_TYPE (TREE_VALUE (TREE_CHAIN (list)))) == ARRAY_TYPE)
+ TREE_VALUE (TREE_CHAIN (list))
+ = default_conversion (TREE_VALUE (TREE_CHAIN (list)));
+ }
+
+ rest = internal_build_compound_expr (TREE_CHAIN (list), FALSE);
+
+ /* When pedantic, a compound expression can be neither an lvalue
+ nor an integer constant expression. */
+ if (! TREE_SIDE_EFFECTS (TREE_VALUE (list)) && ! pedantic)
+ return rest;
+
+ return build (COMPOUND_EXPR, TREE_TYPE (rest), TREE_VALUE (list), rest);
+}
+
+/* Build an expression representing a cast to type TYPE of expression EXPR. */
+
+tree
+build_c_cast (type, expr)
+ register tree type;
+ tree expr;
+{
+ register tree value = expr;
+
+ if (type == error_mark_node || expr == error_mark_node)
+ return error_mark_node;
+ type = TYPE_MAIN_VARIANT (type);
+
+#if 0
+ /* Strip NON_LVALUE_EXPRs since we aren't using as an lvalue. */
+ if (TREE_CODE (value) == NON_LVALUE_EXPR)
+ value = TREE_OPERAND (value, 0);
+#endif
+
+ if (TREE_CODE (type) == ARRAY_TYPE)
+ {
+ error ("cast specifies array type");
+ return error_mark_node;
+ }
+
+ if (TREE_CODE (type) == FUNCTION_TYPE)
+ {
+ error ("cast specifies function type");
+ return error_mark_node;
+ }
+
+ if (type == TREE_TYPE (value))
+ {
+ if (pedantic)
+ {
+ if (TREE_CODE (type) == RECORD_TYPE
+ || TREE_CODE (type) == UNION_TYPE)
+ pedwarn ("ANSI C forbids casting nonscalar to the same type");
+ }
+ }
+ else if (TREE_CODE (type) == UNION_TYPE)
+ {
+ tree field;
+ if (TREE_CODE (TREE_TYPE (value)) == ARRAY_TYPE
+ || TREE_CODE (TREE_TYPE (value)) == FUNCTION_TYPE)
+ value = default_conversion (value);
+
+ for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
+ if (comptypes (TYPE_MAIN_VARIANT (TREE_TYPE (field)),
+ TYPE_MAIN_VARIANT (TREE_TYPE (value))))
+ break;
+
+ if (field)
+ {
+ char *name;
+ tree t;
+
+ if (pedantic)
+ pedwarn ("ANSI C forbids casts to union type");
+ if (TYPE_NAME (type) != 0)
+ {
+ if (TREE_CODE (TYPE_NAME (type)) == IDENTIFIER_NODE)
+ name = IDENTIFIER_POINTER (TYPE_NAME (type));
+ else
+ name = IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (type)));
+ }
+ else
+ name = "";
+ t = digest_init (type, build (CONSTRUCTOR, type, NULL_TREE,
+ build_tree_list (field, value)),
+ 0, 0);
+ TREE_CONSTANT (t) = TREE_CONSTANT (value);
+ return t;
+ }
+ error ("cast to union type from type not present in union");
+ return error_mark_node;
+ }
+ else
+ {
+ tree otype, ovalue;
+
+ /* If casting to void, avoid the error that would come
+ from default_conversion in the case of a non-lvalue array. */
+ if (type == void_type_node)
+ return build1 (CONVERT_EXPR, type, value);
+
+ /* Convert functions and arrays to pointers,
+ but don't convert any other types. */
+ if (TREE_CODE (TREE_TYPE (value)) == FUNCTION_TYPE
+ || TREE_CODE (TREE_TYPE (value)) == ARRAY_TYPE)
+ value = default_conversion (value);
+ otype = TREE_TYPE (value);
+
+ /* Optionally warn about potentially worrisome casts. */
+
+ if (warn_cast_qual
+ && TREE_CODE (type) == POINTER_TYPE
+ && TREE_CODE (otype) == POINTER_TYPE)
+ {
+ if (TYPE_VOLATILE (TREE_TYPE (otype))
+ && ! TYPE_VOLATILE (TREE_TYPE (type)))
+ pedwarn ("cast discards `volatile' from pointer target type");
+ if (TYPE_READONLY (TREE_TYPE (otype))
+ && ! TYPE_READONLY (TREE_TYPE (type)))
+ pedwarn ("cast discards `const' from pointer target type");
+ }
+
+ /* Warn about possible alignment problems. */
+ if (STRICT_ALIGNMENT && warn_cast_align
+ && TREE_CODE (type) == POINTER_TYPE
+ && TREE_CODE (otype) == POINTER_TYPE
+ && TREE_CODE (TREE_TYPE (otype)) != VOID_TYPE
+ && TREE_CODE (TREE_TYPE (otype)) != FUNCTION_TYPE
+ && TYPE_ALIGN (TREE_TYPE (type)) > TYPE_ALIGN (TREE_TYPE (otype)))
+ warning ("cast increases required alignment of target type");
+
+ if (TREE_CODE (type) == INTEGER_TYPE
+ && TREE_CODE (otype) == POINTER_TYPE
+ && TYPE_PRECISION (type) != TYPE_PRECISION (otype)
+ && !TREE_CONSTANT (value))
+ warning ("cast from pointer to integer of different size");
+
+ if (warn_bad_function_cast
+ && TREE_CODE (value) == CALL_EXPR
+ && TREE_CODE (type) != TREE_CODE (otype))
+ warning ("cast does not match function type");
+
+ if (TREE_CODE (type) == POINTER_TYPE
+ && TREE_CODE (otype) == INTEGER_TYPE
+ && TYPE_PRECISION (type) != TYPE_PRECISION (otype)
+#if 0
+ /* Don't warn about converting 0 to pointer,
+ provided the 0 was explicit--not cast or made by folding. */
+ && !(TREE_CODE (value) == INTEGER_CST && integer_zerop (value))
+#endif
+ /* Don't warn about converting any constant. */
+ && !TREE_CONSTANT (value))
+ warning ("cast to pointer from integer of different size");
+
+ ovalue = value;
+ value = convert (type, value);
+
+ /* Ignore any integer overflow caused by the cast. */
+ if (TREE_CODE (value) == INTEGER_CST)
+ {
+ TREE_OVERFLOW (value) = TREE_OVERFLOW (ovalue);
+ TREE_CONSTANT_OVERFLOW (value) = TREE_CONSTANT_OVERFLOW (ovalue);
+ }
+ }
+
+ /* Pedantically, don't ley (void *) (FOO *) 0 be a null pointer constant. */
+ if (pedantic && TREE_CODE (value) == INTEGER_CST
+ && TREE_CODE (expr) == INTEGER_CST
+ && TREE_CODE (TREE_TYPE (expr)) != INTEGER_TYPE)
+ value = non_lvalue (value);
+
+ /* If pedantic, don't let a cast be an lvalue. */
+ if (value == expr && pedantic)
+ value = non_lvalue (value);
+
+ return value;
+}
+
+/* Build an assignment expression of lvalue LHS from value RHS.
+ MODIFYCODE is the code for a binary operator that we use
+ to combine the old value of LHS with RHS to get the new value.
+ Or else MODIFYCODE is NOP_EXPR meaning do a simple assignment. */
+
+tree
+build_modify_expr (lhs, modifycode, rhs)
+ tree lhs, rhs;
+ enum tree_code modifycode;
+{
+ register tree result;
+ tree newrhs;
+ tree lhstype = TREE_TYPE (lhs);
+ tree olhstype = lhstype;
+
+ /* Types that aren't fully specified cannot be used in assignments. */
+ lhs = require_complete_type (lhs);
+
+ /* Avoid duplicate error messages from operands that had errors. */
+ if (TREE_CODE (lhs) == ERROR_MARK || TREE_CODE (rhs) == ERROR_MARK)
+ return error_mark_node;
+
+ /* Strip NON_LVALUE_EXPRs since we aren't using as an lvalue. */
+ /* Do not use STRIP_NOPS here. We do not want an enumerator
+ whose value is 0 to count as a null pointer constant. */
+ if (TREE_CODE (rhs) == NON_LVALUE_EXPR)
+ rhs = TREE_OPERAND (rhs, 0);
+
+ newrhs = rhs;
+
+ /* Handle control structure constructs used as "lvalues". */
+
+ switch (TREE_CODE (lhs))
+ {
+ /* Handle (a, b) used as an "lvalue". */
+ case COMPOUND_EXPR:
+ pedantic_lvalue_warning (COMPOUND_EXPR);
+ newrhs = build_modify_expr (TREE_OPERAND (lhs, 1),
+ modifycode, rhs);
+ if (TREE_CODE (newrhs) == ERROR_MARK)
+ return error_mark_node;
+ return build (COMPOUND_EXPR, lhstype,
+ TREE_OPERAND (lhs, 0), newrhs);
+
+ /* Handle (a ? b : c) used as an "lvalue". */
+ case COND_EXPR:
+ pedantic_lvalue_warning (COND_EXPR);
+ rhs = save_expr (rhs);
+ {
+ /* Produce (a ? (b = rhs) : (c = rhs))
+ except that the RHS goes through a save-expr
+ so the code to compute it is only emitted once. */
+ tree cond
+ = build_conditional_expr (TREE_OPERAND (lhs, 0),
+ build_modify_expr (TREE_OPERAND (lhs, 1),
+ modifycode, rhs),
+ build_modify_expr (TREE_OPERAND (lhs, 2),
+ modifycode, rhs));
+ if (TREE_CODE (cond) == ERROR_MARK)
+ return cond;
+ /* Make sure the code to compute the rhs comes out
+ before the split. */
+ return build (COMPOUND_EXPR, TREE_TYPE (lhs),
+ /* But cast it to void to avoid an "unused" error. */
+ convert (void_type_node, rhs), cond);
+ }
+ }
+
+ /* If a binary op has been requested, combine the old LHS value with the RHS
+ producing the value we should actually store into the LHS. */
+
+ if (modifycode != NOP_EXPR)
+ {
+ lhs = stabilize_reference (lhs);
+ newrhs = build_binary_op (modifycode, lhs, rhs, 1);
+ }
+
+ /* Handle a cast used as an "lvalue".
+ We have already performed any binary operator using the value as cast.
+ Now convert the result to the cast type of the lhs,
+ and then true type of the lhs and store it there;
+ then convert result back to the cast type to be the value
+ of the assignment. */
+
+ switch (TREE_CODE (lhs))
+ {
+ case NOP_EXPR:
+ case CONVERT_EXPR:
+ case FLOAT_EXPR:
+ case FIX_TRUNC_EXPR:
+ case FIX_FLOOR_EXPR:
+ case FIX_ROUND_EXPR:
+ case FIX_CEIL_EXPR:
+ if (TREE_CODE (TREE_TYPE (newrhs)) == ARRAY_TYPE
+ || TREE_CODE (TREE_TYPE (newrhs)) == FUNCTION_TYPE)
+ newrhs = default_conversion (newrhs);
+ {
+ tree inner_lhs = TREE_OPERAND (lhs, 0);
+ tree result;
+ result = build_modify_expr (inner_lhs, NOP_EXPR,
+ convert (TREE_TYPE (inner_lhs),
+ convert (lhstype, newrhs)));
+ if (TREE_CODE (result) == ERROR_MARK)
+ return result;
+ pedantic_lvalue_warning (CONVERT_EXPR);
+ return convert (TREE_TYPE (lhs), result);
+ }
+ }
+
+ /* Now we have handled acceptable kinds of LHS that are not truly lvalues.
+ Reject anything strange now. */
+
+ if (!lvalue_or_else (lhs, "assignment"))
+ return error_mark_node;
+
+ /* Warn about storing in something that is `const'. */
+
+ if (TREE_READONLY (lhs) || TYPE_READONLY (lhstype)
+ || ((TREE_CODE (lhstype) == RECORD_TYPE
+ || TREE_CODE (lhstype) == UNION_TYPE)
+ && C_TYPE_FIELDS_READONLY (lhstype)))
+ readonly_warning (lhs, "assignment");
+
+ /* If storing into a structure or union member,
+ it has probably been given type `int'.
+ Compute the type that would go with
+ the actual amount of storage the member occupies. */
+
+ if (TREE_CODE (lhs) == COMPONENT_REF
+ && (TREE_CODE (lhstype) == INTEGER_TYPE
+ || TREE_CODE (lhstype) == REAL_TYPE
+ || TREE_CODE (lhstype) == ENUMERAL_TYPE))
+ lhstype = TREE_TYPE (get_unwidened (lhs, 0));
+
+ /* If storing in a field that is in actuality a short or narrower than one,
+ we must store in the field in its actual type. */
+
+ if (lhstype != TREE_TYPE (lhs))
+ {
+ lhs = copy_node (lhs);
+ TREE_TYPE (lhs) = lhstype;
+ }
+
+ /* Convert new value to destination type. */
+
+ newrhs = convert_for_assignment (lhstype, newrhs, "assignment",
+ NULL_TREE, NULL_TREE, 0);
+ if (TREE_CODE (newrhs) == ERROR_MARK)
+ return error_mark_node;
+
+ result = build (MODIFY_EXPR, lhstype, lhs, newrhs);
+ TREE_SIDE_EFFECTS (result) = 1;
+
+ /* If we got the LHS in a different type for storing in,
+ convert the result back to the nominal type of LHS
+ so that the value we return always has the same type
+ as the LHS argument. */
+
+ if (olhstype == TREE_TYPE (result))
+ return result;
+ return convert_for_assignment (olhstype, result, "assignment",
+ NULL_TREE, NULL_TREE, 0);
+}
+
+/* Convert value RHS to type TYPE as preparation for an assignment
+ to an lvalue of type TYPE.
+ The real work of conversion is done by `convert'.
+ The purpose of this function is to generate error messages
+ for assignments that are not allowed in C.
+ ERRTYPE is a string to use in error messages:
+ "assignment", "return", etc. If it is null, this is parameter passing
+ for a function call (and different error messages are output). Otherwise,
+ it may be a name stored in the spelling stack and interpreted by
+ get_spelling.
+
+ FUNNAME is the name of the function being called,
+ as an IDENTIFIER_NODE, or null.
+ PARMNUM is the number of the argument, for printing in error messages. */
+
+static tree
+convert_for_assignment (type, rhs, errtype, fundecl, funname, parmnum)
+ tree type, rhs;
+ char *errtype;
+ tree fundecl, funname;
+ int parmnum;
+{
+ register enum tree_code codel = TREE_CODE (type);
+ register tree rhstype;
+ register enum tree_code coder;
+
+ /* Strip NON_LVALUE_EXPRs since we aren't using as an lvalue. */
+ /* Do not use STRIP_NOPS here. We do not want an enumerator
+ whose value is 0 to count as a null pointer constant. */
+ if (TREE_CODE (rhs) == NON_LVALUE_EXPR)
+ rhs = TREE_OPERAND (rhs, 0);
+
+ if (TREE_CODE (TREE_TYPE (rhs)) == ARRAY_TYPE
+ || TREE_CODE (TREE_TYPE (rhs)) == FUNCTION_TYPE)
+ rhs = default_conversion (rhs);
+ else if (optimize && TREE_CODE (rhs) == VAR_DECL)
+ rhs = decl_constant_value (rhs);
+
+ rhstype = TREE_TYPE (rhs);
+ coder = TREE_CODE (rhstype);
+
+ if (coder == ERROR_MARK)
+ return error_mark_node;
+
+ if (TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (rhstype))
+ {
+ overflow_warning (rhs);
+ /* Check for Objective-C protocols. This will issue a warning if
+ there are protocol violations. No need to use the return value. */
+ maybe_objc_comptypes (type, rhstype, 0);
+ return rhs;
+ }
+
+ if (coder == VOID_TYPE)
+ {
+ error ("void value not ignored as it ought to be");
+ return error_mark_node;
+ }
+ /* Arithmetic types all interconvert, and enum is treated like int. */
+ if ((codel == INTEGER_TYPE || codel == REAL_TYPE || codel == ENUMERAL_TYPE
+ || codel == COMPLEX_TYPE)
+ &&
+ (coder == INTEGER_TYPE || coder == REAL_TYPE || coder == ENUMERAL_TYPE
+ || coder == COMPLEX_TYPE))
+ return convert_and_check (type, rhs);
+ /* Conversion to a union from its member types. */
+ else if (codel == UNION_TYPE)
+ {
+ tree memb_types;
+ for (memb_types = TYPE_FIELDS (type); memb_types;
+ memb_types = TREE_CHAIN (memb_types))
+ {
+ if (comptypes (TREE_TYPE (memb_types), TREE_TYPE (rhs)))
+ {
+ if (pedantic
+ && !(fundecl != 0 && DECL_IN_SYSTEM_HEADER (fundecl)))
+ pedwarn ("ANSI C prohibits argument conversion to union type");
+ return build1 (NOP_EXPR, type, rhs);
+ }
+ else if (coder == POINTER_TYPE
+ && TREE_CODE (TREE_TYPE (memb_types)) == POINTER_TYPE)
+ {
+ tree memb_type = TREE_TYPE (memb_types);
+ register tree ttl = TREE_TYPE (memb_type);
+ register tree ttr = TREE_TYPE (rhstype);
+
+ /* Any non-function converts to a [const][volatile] void *
+ and vice versa; otherwise, targets must be the same.
+ Meanwhile, the lhs target must have all the qualifiers of the rhs. */
+ if (TYPE_MAIN_VARIANT (ttl) == void_type_node
+ || TYPE_MAIN_VARIANT (ttr) == void_type_node
+ || comp_target_types (memb_type, rhstype))
+ {
+ /* Const and volatile mean something different for function types,
+ so the usual warnings are not appropriate. */
+ if (TREE_CODE (ttr) != FUNCTION_TYPE
+ || TREE_CODE (ttl) != FUNCTION_TYPE)
+ {
+ if (! TYPE_READONLY (ttl) && TYPE_READONLY (ttr))
+ warn_for_assignment ("%s discards `const' from pointer target type",
+ get_spelling (errtype), funname, parmnum);
+ if (! TYPE_VOLATILE (ttl) && TYPE_VOLATILE (ttr))
+ warn_for_assignment ("%s discards `volatile' from pointer target type",
+ get_spelling (errtype), funname, parmnum);
+ }
+ else
+ {
+ /* Because const and volatile on functions are restrictions
+ that say the function will not do certain things,
+ it is okay to use a const or volatile function
+ where an ordinary one is wanted, but not vice-versa. */
+ if (TYPE_READONLY (ttl) && ! TYPE_READONLY (ttr))
+ warn_for_assignment ("%s makes `const *' function pointer from non-const",
+ get_spelling (errtype), funname, parmnum);
+ if (TYPE_VOLATILE (ttl) && ! TYPE_VOLATILE (ttr))
+ warn_for_assignment ("%s makes `volatile *' function pointer from non-volatile",
+ get_spelling (errtype), funname, parmnum);
+ }
+ if (pedantic
+ && !(fundecl != 0 && DECL_IN_SYSTEM_HEADER (fundecl)))
+ pedwarn ("ANSI C prohibits argument conversion to union type");
+ return build1 (NOP_EXPR, type, rhs);
+ }
+ }
+ }
+ }
+ /* Conversions among pointers */
+ else if (codel == POINTER_TYPE && coder == POINTER_TYPE)
+ {
+ register tree ttl = TREE_TYPE (type);
+ register tree ttr = TREE_TYPE (rhstype);
+
+ /* Any non-function converts to a [const][volatile] void *
+ and vice versa; otherwise, targets must be the same.
+ Meanwhile, the lhs target must have all the qualifiers of the rhs. */
+ if (TYPE_MAIN_VARIANT (ttl) == void_type_node
+ || TYPE_MAIN_VARIANT (ttr) == void_type_node
+ || comp_target_types (type, rhstype)
+ || (unsigned_type (TYPE_MAIN_VARIANT (ttl))
+ == unsigned_type (TYPE_MAIN_VARIANT (ttr))))
+ {
+ if (pedantic
+ && ((TYPE_MAIN_VARIANT (ttl) == void_type_node
+ && TREE_CODE (ttr) == FUNCTION_TYPE)
+ ||
+ (TYPE_MAIN_VARIANT (ttr) == void_type_node
+ /* Check TREE_CODE to catch cases like (void *) (char *) 0
+ which are not ANSI null ptr constants. */
+ && (!integer_zerop (rhs) || TREE_CODE (rhs) == NOP_EXPR)
+ && TREE_CODE (ttl) == FUNCTION_TYPE)))
+ warn_for_assignment ("ANSI forbids %s between function pointer and `void *'",
+ get_spelling (errtype), funname, parmnum);
+ /* Const and volatile mean something different for function types,
+ so the usual warnings are not appropriate. */
+ else if (TREE_CODE (ttr) != FUNCTION_TYPE
+ || TREE_CODE (ttl) != FUNCTION_TYPE)
+ {
+ if (! TYPE_READONLY (ttl) && TYPE_READONLY (ttr))
+ warn_for_assignment ("%s discards `const' from pointer target type",
+ get_spelling (errtype), funname, parmnum);
+ else if (! TYPE_VOLATILE (ttl) && TYPE_VOLATILE (ttr))
+ warn_for_assignment ("%s discards `volatile' from pointer target type",
+ get_spelling (errtype), funname, parmnum);
+ /* If this is not a case of ignoring a mismatch in signedness,
+ no warning. */
+ else if (TYPE_MAIN_VARIANT (ttl) == void_type_node
+ || TYPE_MAIN_VARIANT (ttr) == void_type_node
+ || comp_target_types (type, rhstype))
+ ;
+ /* If there is a mismatch, do warn. */
+ else if (pedantic)
+ warn_for_assignment ("pointer targets in %s differ in signedness",
+ get_spelling (errtype), funname, parmnum);
+ }
+ else
+ {
+ /* Because const and volatile on functions are restrictions
+ that say the function will not do certain things,
+ it is okay to use a const or volatile function
+ where an ordinary one is wanted, but not vice-versa. */
+ if (TYPE_READONLY (ttl) && ! TYPE_READONLY (ttr))
+ warn_for_assignment ("%s makes `const *' function pointer from non-const",
+ get_spelling (errtype), funname, parmnum);
+ if (TYPE_VOLATILE (ttl) && ! TYPE_VOLATILE (ttr))
+ warn_for_assignment ("%s makes `volatile *' function pointer from non-volatile",
+ get_spelling (errtype), funname, parmnum);
+ }
+ }
+ else
+ warn_for_assignment ("%s from incompatible pointer type",
+ get_spelling (errtype), funname, parmnum);
+ return convert (type, rhs);
+ }
+ else if (codel == POINTER_TYPE && coder == INTEGER_TYPE)
+ {
+ /* An explicit constant 0 can convert to a pointer,
+ or one that results from arithmetic, even including
+ a cast to integer type. */
+ if (! (TREE_CODE (rhs) == INTEGER_CST && integer_zerop (rhs))
+ &&
+ ! (TREE_CODE (rhs) == NOP_EXPR
+ && TREE_CODE (TREE_TYPE (rhs)) == INTEGER_TYPE
+ && TREE_CODE (TREE_OPERAND (rhs, 0)) == INTEGER_CST
+ && integer_zerop (TREE_OPERAND (rhs, 0))))
+ {
+ warn_for_assignment ("%s makes pointer from integer without a cast",
+ get_spelling (errtype), funname, parmnum);
+ return convert (type, rhs);
+ }
+ return null_pointer_node;
+ }
+ else if (codel == INTEGER_TYPE && coder == POINTER_TYPE)
+ {
+ warn_for_assignment ("%s makes integer from pointer without a cast",
+ get_spelling (errtype), funname, parmnum);
+ return convert (type, rhs);
+ }
+
+ if (!errtype)
+ {
+ if (funname)
+ {
+ tree selector = maybe_building_objc_message_expr ();
+
+ if (selector && parmnum > 2)
+ error ("incompatible type for argument %d of `%s'",
+ parmnum - 2, IDENTIFIER_POINTER (selector));
+ else
+ error ("incompatible type for argument %d of `%s'",
+ parmnum, IDENTIFIER_POINTER (funname));
+ }
+ else
+ error ("incompatible type for argument %d of indirect function call",
+ parmnum);
+ }
+ else
+ error ("incompatible types in %s", get_spelling (errtype));
+
+ return error_mark_node;
+}
+
+/* Print a warning using MSG.
+ It gets OPNAME as its one parameter.
+ If OPNAME is null, it is replaced by "passing arg ARGNUM of `FUNCTION'".
+ FUNCTION and ARGNUM are handled specially if we are building an
+ Objective-C selector. */
+
+static void
+warn_for_assignment (msg, opname, function, argnum)
+ char *msg;
+ char *opname;
+ tree function;
+ int argnum;
+{
+ static char argstring[] = "passing arg %d of `%s'";
+ static char argnofun[] = "passing arg %d";
+
+ if (opname == 0)
+ {
+ tree selector = maybe_building_objc_message_expr ();
+
+ if (selector && argnum > 2)
+ {
+ function = selector;
+ argnum -= 2;
+ }
+ if (function)
+ {
+ /* Function name is known; supply it. */
+ opname = (char *) alloca (IDENTIFIER_LENGTH (function)
+ + sizeof (argstring) + 25 /*%d*/ + 1);
+ sprintf (opname, argstring, argnum, IDENTIFIER_POINTER (function));
+ }
+ else
+ {
+ /* Function name unknown (call through ptr); just give arg number. */
+ opname = (char *) alloca (sizeof (argnofun) + 25 /*%d*/ + 1);
+ sprintf (opname, argnofun, argnum);
+ }
+ }
+ pedwarn (msg, opname);
+}
+
+/* Return nonzero if VALUE is a valid constant-valued expression
+ for use in initializing a static variable; one that can be an
+ element of a "constant" initializer.
+
+ Return null_pointer_node if the value is absolute;
+ if it is relocatable, return the variable that determines the relocation.
+ We assume that VALUE has been folded as much as possible;
+ therefore, we do not need to check for such things as
+ arithmetic-combinations of integers. */
+
+tree
+initializer_constant_valid_p (value, endtype)
+ tree value;
+ tree endtype;
+{
+ switch (TREE_CODE (value))
+ {
+ case CONSTRUCTOR:
+ if (TREE_CODE (TREE_TYPE (value)) == UNION_TYPE
+ && TREE_CONSTANT (value))
+ return
+ initializer_constant_valid_p (TREE_VALUE (CONSTRUCTOR_ELTS (value)),
+ endtype);
+
+ return TREE_STATIC (value) ? null_pointer_node : 0;
+
+ case INTEGER_CST:
+ case REAL_CST:
+ case STRING_CST:
+ case COMPLEX_CST:
+ return null_pointer_node;
+
+ case ADDR_EXPR:
+ return TREE_OPERAND (value, 0);
+
+ case NON_LVALUE_EXPR:
+ return initializer_constant_valid_p (TREE_OPERAND (value, 0), endtype);
+
+ case CONVERT_EXPR:
+ case NOP_EXPR:
+ /* Allow conversions between pointer types. */
+ if (TREE_CODE (TREE_TYPE (value)) == POINTER_TYPE
+ && TREE_CODE (TREE_TYPE (TREE_OPERAND (value, 0))) == POINTER_TYPE)
+ return initializer_constant_valid_p (TREE_OPERAND (value, 0), endtype);
+
+ /* Allow conversions between real types. */
+ if (TREE_CODE (TREE_TYPE (value)) == REAL_TYPE
+ && TREE_CODE (TREE_TYPE (TREE_OPERAND (value, 0))) == REAL_TYPE)
+ return initializer_constant_valid_p (TREE_OPERAND (value, 0), endtype);
+
+ /* Allow length-preserving conversions between integer types. */
+ if (TREE_CODE (TREE_TYPE (value)) == INTEGER_TYPE
+ && TREE_CODE (TREE_TYPE (TREE_OPERAND (value, 0))) == INTEGER_TYPE
+ && (TYPE_PRECISION (TREE_TYPE (value))
+ == TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (value, 0)))))
+ return initializer_constant_valid_p (TREE_OPERAND (value, 0), endtype);
+
+ /* Allow conversions between other integer types only if
+ explicit value. */
+ if (TREE_CODE (TREE_TYPE (value)) == INTEGER_TYPE
+ && TREE_CODE (TREE_TYPE (TREE_OPERAND (value, 0))) == INTEGER_TYPE)
+ {
+ tree inner = initializer_constant_valid_p (TREE_OPERAND (value, 0),
+ endtype);
+ if (inner == null_pointer_node)
+ return null_pointer_node;
+ return 0;
+ }
+
+ /* Allow (int) &foo provided int is as wide as a pointer. */
+ if (TREE_CODE (TREE_TYPE (value)) == INTEGER_TYPE
+ && TREE_CODE (TREE_TYPE (TREE_OPERAND (value, 0))) == POINTER_TYPE
+ && (TYPE_PRECISION (TREE_TYPE (value))
+ >= TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (value, 0)))))
+ return initializer_constant_valid_p (TREE_OPERAND (value, 0),
+ endtype);
+
+ /* Likewise conversions from int to pointers. */
+ if (TREE_CODE (TREE_TYPE (value)) == POINTER_TYPE
+ && TREE_CODE (TREE_TYPE (TREE_OPERAND (value, 0))) == INTEGER_TYPE
+ && (TYPE_PRECISION (TREE_TYPE (value))
+ <= TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (value, 0)))))
+ return initializer_constant_valid_p (TREE_OPERAND (value, 0),
+ endtype);
+
+ /* Allow conversions to union types if the value inside is okay. */
+ if (TREE_CODE (TREE_TYPE (value)) == UNION_TYPE)
+ return initializer_constant_valid_p (TREE_OPERAND (value, 0),
+ endtype);
+ return 0;
+
+ case PLUS_EXPR:
+ if (TREE_CODE (endtype) == INTEGER_TYPE
+ && TYPE_PRECISION (endtype) < POINTER_SIZE)
+ return 0;
+ {
+ tree valid0 = initializer_constant_valid_p (TREE_OPERAND (value, 0),
+ endtype);
+ tree valid1 = initializer_constant_valid_p (TREE_OPERAND (value, 1),
+ endtype);
+ /* If either term is absolute, use the other terms relocation. */
+ if (valid0 == null_pointer_node)
+ return valid1;
+ if (valid1 == null_pointer_node)
+ return valid0;
+ return 0;
+ }
+
+ case MINUS_EXPR:
+ if (TREE_CODE (endtype) == INTEGER_TYPE
+ && TYPE_PRECISION (endtype) < POINTER_SIZE)
+ return 0;
+ {
+ tree valid0 = initializer_constant_valid_p (TREE_OPERAND (value, 0),
+ endtype);
+ tree valid1 = initializer_constant_valid_p (TREE_OPERAND (value, 1),
+ endtype);
+ /* Win if second argument is absolute. */
+ if (valid1 == null_pointer_node)
+ return valid0;
+ /* Win if both arguments have the same relocation.
+ Then the value is absolute. */
+ if (valid0 == valid1)
+ return null_pointer_node;
+ return 0;
+ }
+ }
+
+ return 0;
+}
+
+/* If VALUE is a compound expr all of whose expressions are constant, then
+ return its value. Otherwise, return error_mark_node.
+
+ This is for handling COMPOUND_EXPRs as initializer elements
+ which is allowed with a warning when -pedantic is specified. */
+
+static tree
+valid_compound_expr_initializer (value, endtype)
+ tree value;
+ tree endtype;
+{
+ if (TREE_CODE (value) == COMPOUND_EXPR)
+ {
+ if (valid_compound_expr_initializer (TREE_OPERAND (value, 0), endtype)
+ == error_mark_node)
+ return error_mark_node;
+ return valid_compound_expr_initializer (TREE_OPERAND (value, 1),
+ endtype);
+ }
+ else if (! TREE_CONSTANT (value)
+ && ! initializer_constant_valid_p (value, endtype))
+ return error_mark_node;
+ else
+ return value;
+}
+
+/* Perform appropriate conversions on the initial value of a variable,
+ store it in the declaration DECL,
+ and print any error messages that are appropriate.
+ If the init is invalid, store an ERROR_MARK. */
+
+void
+store_init_value (decl, init)
+ tree decl, init;
+{
+ register tree value, type;
+
+ /* If variable's type was invalidly declared, just ignore it. */
+
+ type = TREE_TYPE (decl);
+ if (TREE_CODE (type) == ERROR_MARK)
+ return;
+
+ /* Digest the specified initializer into an expression. */
+
+ value = digest_init (type, init, TREE_STATIC (decl),
+ TREE_STATIC (decl) || pedantic);
+
+ /* Store the expression if valid; else report error. */
+
+#if 0
+ /* Note that this is the only place we can detect the error
+ in a case such as struct foo bar = (struct foo) { x, y };
+ where there is one initial value which is a constructor expression. */
+ if (value == error_mark_node)
+ ;
+ else if (TREE_STATIC (decl) && ! TREE_CONSTANT (value))
+ {
+ error ("initializer for static variable is not constant");
+ value = error_mark_node;
+ }
+ else if (TREE_STATIC (decl)
+ && initializer_constant_valid_p (value, TREE_TYPE (value)) == 0)
+ {
+ error ("initializer for static variable uses complicated arithmetic");
+ value = error_mark_node;
+ }
+ else
+ {
+ if (pedantic && TREE_CODE (value) == CONSTRUCTOR)
+ {
+ if (! TREE_CONSTANT (value))
+ pedwarn ("aggregate initializer is not constant");
+ else if (! TREE_STATIC (value))
+ pedwarn ("aggregate initializer uses complicated arithmetic");
+ }
+ }
+#endif
+
+ DECL_INITIAL (decl) = value;
+
+ /* ANSI wants warnings about out-of-range constant initializers. */
+ STRIP_TYPE_NOPS (value);
+ constant_expression_warning (value);
+}
+
+/* Methods for storing and printing names for error messages. */
+
+/* Implement a spelling stack that allows components of a name to be pushed
+ and popped. Each element on the stack is this structure. */
+
+struct spelling
+{
+ int kind;
+ union
+ {
+ int i;
+ char *s;
+ } u;
+};
+
+#define SPELLING_STRING 1
+#define SPELLING_MEMBER 2
+#define SPELLING_BOUNDS 3
+
+static struct spelling *spelling; /* Next stack element (unused). */
+static struct spelling *spelling_base; /* Spelling stack base. */
+static int spelling_size; /* Size of the spelling stack. */
+
+/* Macros to save and restore the spelling stack around push_... functions.
+ Alternative to SAVE_SPELLING_STACK. */
+
+#define SPELLING_DEPTH() (spelling - spelling_base)
+#define RESTORE_SPELLING_DEPTH(depth) (spelling = spelling_base + depth)
+
+/* Save and restore the spelling stack around arbitrary C code. */
+
+#define SAVE_SPELLING_DEPTH(code) \
+{ \
+ int __depth = SPELLING_DEPTH (); \
+ code; \
+ RESTORE_SPELLING_DEPTH (__depth); \
+}
+
+/* Push an element on the spelling stack with type KIND and assign VALUE
+ to MEMBER. */
+
+#define PUSH_SPELLING(KIND, VALUE, MEMBER) \
+{ \
+ int depth = SPELLING_DEPTH (); \
+ \
+ if (depth >= spelling_size) \
+ { \
+ spelling_size += 10; \
+ if (spelling_base == 0) \
+ spelling_base \
+ = (struct spelling *) xmalloc (spelling_size * sizeof (struct spelling)); \
+ else \
+ spelling_base \
+ = (struct spelling *) xrealloc (spelling_base, \
+ spelling_size * sizeof (struct spelling)); \
+ RESTORE_SPELLING_DEPTH (depth); \
+ } \
+ \
+ spelling->kind = (KIND); \
+ spelling->MEMBER = (VALUE); \
+ spelling++; \
+}
+
+/* Push STRING on the stack. Printed literally. */
+
+static void
+push_string (string)
+ char *string;
+{
+ PUSH_SPELLING (SPELLING_STRING, string, u.s);
+}
+
+/* Push a member name on the stack. Printed as '.' STRING. */
+
+static void
+push_member_name (decl)
+ tree decl;
+
+{
+ char *string
+ = DECL_NAME (decl) ? IDENTIFIER_POINTER (DECL_NAME (decl)) : "<anonymous>";
+ PUSH_SPELLING (SPELLING_MEMBER, string, u.s);
+}
+
+/* Push an array bounds on the stack. Printed as [BOUNDS]. */
+
+static void
+push_array_bounds (bounds)
+ int bounds;
+{
+ PUSH_SPELLING (SPELLING_BOUNDS, bounds, u.i);
+}
+
+/* Compute the maximum size in bytes of the printed spelling. */
+
+static int
+spelling_length ()
+{
+ register int size = 0;
+ register struct spelling *p;
+
+ for (p = spelling_base; p < spelling; p++)
+ {
+ if (p->kind == SPELLING_BOUNDS)
+ size += 25;
+ else
+ size += strlen (p->u.s) + 1;
+ }
+
+ return size;
+}
+
+/* Print the spelling to BUFFER and return it. */
+
+static char *
+print_spelling (buffer)
+ register char *buffer;
+{
+ register char *d = buffer;
+ register char *s;
+ register struct spelling *p;
+
+ for (p = spelling_base; p < spelling; p++)
+ if (p->kind == SPELLING_BOUNDS)
+ {
+ sprintf (d, "[%d]", p->u.i);
+ d += strlen (d);
+ }
+ else
+ {
+ if (p->kind == SPELLING_MEMBER)
+ *d++ = '.';
+ for (s = p->u.s; *d = *s++; d++)
+ ;
+ }
+ *d++ = '\0';
+ return buffer;
+}
+
+/* Provide a means to pass component names derived from the spelling stack. */
+
+char initialization_message;
+
+/* Interpret the spelling of the given ERRTYPE message. */
+
+static char *
+get_spelling (errtype)
+ char *errtype;
+{
+ static char *buffer;
+ static int size = -1;
+
+ if (errtype == &initialization_message)
+ {
+ /* Avoid counting chars */
+ static char message[] = "initialization of `%s'";
+ register int needed = sizeof (message) + spelling_length () + 1;
+ char *temp;
+
+ if (size < 0)
+ buffer = (char *) xmalloc (size = needed);
+ if (needed > size)
+ buffer = (char *) xrealloc (buffer, size = needed);
+
+ temp = (char *) alloca (needed);
+ sprintf (buffer, message, print_spelling (temp));
+ return buffer;
+ }
+
+ return errtype;
+}
+
+/* Issue an error message for a bad initializer component.
+ FORMAT describes the message. OFWHAT is the name for the component.
+ LOCAL is a format string for formatting the insertion of the name
+ into the message.
+
+ If OFWHAT is null, the component name is stored on the spelling stack.
+ If the component name is a null string, then LOCAL is omitted entirely. */
+
+void
+error_init (format, local, ofwhat)
+ char *format, *local, *ofwhat;
+{
+ char *buffer;
+
+ if (ofwhat == 0)
+ ofwhat = print_spelling ((char *) alloca (spelling_length () + 1));
+ buffer = (char *) alloca (strlen (local) + strlen (ofwhat) + 2);
+
+ if (*ofwhat)
+ sprintf (buffer, local, ofwhat);
+ else
+ buffer[0] = 0;
+
+ error (format, buffer);
+}
+
+/* Issue a pedantic warning for a bad initializer component.
+ FORMAT describes the message. OFWHAT is the name for the component.
+ LOCAL is a format string for formatting the insertion of the name
+ into the message.
+
+ If OFWHAT is null, the component name is stored on the spelling stack.
+ If the component name is a null string, then LOCAL is omitted entirely. */
+
+void
+pedwarn_init (format, local, ofwhat)
+ char *format, *local, *ofwhat;
+{
+ char *buffer;
+
+ if (ofwhat == 0)
+ ofwhat = print_spelling ((char *) alloca (spelling_length () + 1));
+ buffer = (char *) alloca (strlen (local) + strlen (ofwhat) + 2);
+
+ if (*ofwhat)
+ sprintf (buffer, local, ofwhat);
+ else
+ buffer[0] = 0;
+
+ pedwarn (format, buffer);
+}
+
+/* Issue a warning for a bad initializer component.
+ FORMAT describes the message. OFWHAT is the name for the component.
+ LOCAL is a format string for formatting the insertion of the name
+ into the message.
+
+ If OFWHAT is null, the component name is stored on the spelling stack.
+ If the component name is a null string, then LOCAL is omitted entirely. */
+
+static void
+warning_init (format, local, ofwhat)
+ char *format, *local, *ofwhat;
+{
+ char *buffer;
+
+ if (ofwhat == 0)
+ ofwhat = print_spelling ((char *) alloca (spelling_length () + 1));
+ buffer = (char *) alloca (strlen (local) + strlen (ofwhat) + 2);
+
+ if (*ofwhat)
+ sprintf (buffer, local, ofwhat);
+ else
+ buffer[0] = 0;
+
+ warning (format, buffer);
+}
+
+/* Digest the parser output INIT as an initializer for type TYPE.
+ Return a C expression of type TYPE to represent the initial value.
+
+ The arguments REQUIRE_CONSTANT and CONSTRUCTOR_CONSTANT request errors
+ if non-constant initializers or elements are seen. CONSTRUCTOR_CONSTANT
+ applies only to elements of constructors. */
+
+static tree
+digest_init (type, init, require_constant, constructor_constant)
+ tree type, init;
+ int require_constant, constructor_constant;
+{
+ enum tree_code code = TREE_CODE (type);
+ tree inside_init = init;
+
+ if (init == error_mark_node)
+ return init;
+
+ /* Strip NON_LVALUE_EXPRs since we aren't using as an lvalue. */
+ /* Do not use STRIP_NOPS here. We do not want an enumerator
+ whose value is 0 to count as a null pointer constant. */
+ if (TREE_CODE (init) == NON_LVALUE_EXPR)
+ inside_init = TREE_OPERAND (init, 0);
+
+ /* Initialization of an array of chars from a string constant
+ optionally enclosed in braces. */
+
+ if (code == ARRAY_TYPE)
+ {
+ tree typ1 = TYPE_MAIN_VARIANT (TREE_TYPE (type));
+ if ((typ1 == char_type_node
+ || typ1 == signed_char_type_node
+ || typ1 == unsigned_char_type_node
+ || typ1 == unsigned_wchar_type_node
+ || typ1 == signed_wchar_type_node)
+ && ((inside_init && TREE_CODE (inside_init) == STRING_CST)))
+ {
+ if (comptypes (TYPE_MAIN_VARIANT (TREE_TYPE (inside_init)),
+ TYPE_MAIN_VARIANT (type)))
+ return inside_init;
+
+ if ((TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (inside_init)))
+ != char_type_node)
+ && TYPE_PRECISION (typ1) == TYPE_PRECISION (char_type_node))
+ {
+ error_init ("char-array%s initialized from wide string",
+ " `%s'", NULL);
+ return error_mark_node;
+ }
+ if ((TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (inside_init)))
+ == char_type_node)
+ && TYPE_PRECISION (typ1) != TYPE_PRECISION (char_type_node))
+ {
+ error_init ("int-array%s initialized from non-wide string",
+ " `%s'", NULL);
+ return error_mark_node;
+ }
+
+ TREE_TYPE (inside_init) = type;
+ if (TYPE_DOMAIN (type) != 0
+ && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
+ {
+ register int size = TREE_INT_CST_LOW (TYPE_SIZE (type));
+ size = (size + BITS_PER_UNIT - 1) / BITS_PER_UNIT;
+ /* Subtract 1 (or sizeof (wchar_t))
+ because it's ok to ignore the terminating null char
+ that is counted in the length of the constant. */
+ if (size < TREE_STRING_LENGTH (inside_init)
+ - (TYPE_PRECISION (typ1) != TYPE_PRECISION (char_type_node)
+ ? TYPE_PRECISION (wchar_type_node) / BITS_PER_UNIT
+ : 1))
+ pedwarn_init (
+ "initializer-string for array of chars%s is too long",
+ " `%s'", NULL);
+ }
+ return inside_init;
+ }
+ }
+
+ /* Any type can be initialized
+ from an expression of the same type, optionally with braces. */
+
+ if (inside_init && TREE_TYPE (inside_init) != 0
+ && (comptypes (TYPE_MAIN_VARIANT (TREE_TYPE (inside_init)),
+ TYPE_MAIN_VARIANT (type))
+ || (code == ARRAY_TYPE
+ && comptypes (TREE_TYPE (inside_init), type))
+ || (code == POINTER_TYPE
+ && (TREE_CODE (TREE_TYPE (inside_init)) == ARRAY_TYPE
+ || TREE_CODE (TREE_TYPE (inside_init)) == FUNCTION_TYPE)
+ && comptypes (TREE_TYPE (TREE_TYPE (inside_init)),
+ TREE_TYPE (type)))))
+ {
+ if (code == POINTER_TYPE
+ && (TREE_CODE (TREE_TYPE (inside_init)) == ARRAY_TYPE
+ || TREE_CODE (TREE_TYPE (inside_init)) == FUNCTION_TYPE))
+ inside_init = default_conversion (inside_init);
+ else if (code == ARRAY_TYPE && TREE_CODE (inside_init) != STRING_CST
+ && TREE_CODE (inside_init) != CONSTRUCTOR)
+ {
+ error_init ("array%s initialized from non-constant array expression",
+ " `%s'", NULL);
+ return error_mark_node;
+ }
+
+ if (optimize && TREE_CODE (inside_init) == VAR_DECL)
+ inside_init = decl_constant_value (inside_init);
+
+ /* Compound expressions can only occur here if -pedantic or
+ -pedantic-errors is specified. In the later case, we always want
+ an error. In the former case, we simply want a warning. */
+ if (require_constant && pedantic
+ && TREE_CODE (inside_init) == COMPOUND_EXPR)
+ {
+ inside_init
+ = valid_compound_expr_initializer (inside_init,
+ TREE_TYPE (inside_init));
+ if (inside_init == error_mark_node)
+ error_init ("initializer element%s is not constant",
+ " for `%s'", NULL);
+ else
+ pedwarn_init ("initializer element%s is not constant",
+ " for `%s'", NULL);
+ if (flag_pedantic_errors)
+ inside_init = error_mark_node;
+ }
+ else if (require_constant && ! TREE_CONSTANT (inside_init))
+ {
+ error_init ("initializer element%s is not constant",
+ " for `%s'", NULL);
+ inside_init = error_mark_node;
+ }
+ else if (require_constant
+ && initializer_constant_valid_p (inside_init, TREE_TYPE (inside_init)) == 0)
+ {
+ error_init ("initializer element%s is not computable at load time",
+ " for `%s'", NULL);
+ inside_init = error_mark_node;
+ }
+
+ return inside_init;
+ }
+
+ /* Handle scalar types, including conversions. */
+
+ if (code == INTEGER_TYPE || code == REAL_TYPE || code == POINTER_TYPE
+ || code == ENUMERAL_TYPE || code == COMPLEX_TYPE)
+ {
+ /* Note that convert_for_assignment calls default_conversion
+ for arrays and functions. We must not call it in the
+ case where inside_init is a null pointer constant. */
+ inside_init
+ = convert_for_assignment (type, init, "initialization",
+ NULL_TREE, NULL_TREE, 0);
+
+ if (require_constant && ! TREE_CONSTANT (inside_init))
+ {
+ error_init ("initializer element%s is not constant",
+ " for `%s'", NULL);
+ inside_init = error_mark_node;
+ }
+ else if (require_constant
+ && initializer_constant_valid_p (inside_init, TREE_TYPE (inside_init)) == 0)
+ {
+ error_init ("initializer element%s is not computable at load time",
+ " for `%s'", NULL);
+ inside_init = error_mark_node;
+ }
+
+ return inside_init;
+ }
+
+ /* Come here only for records and arrays. */
+
+ if (TYPE_SIZE (type) && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
+ {
+ error_init ("variable-sized object%s may not be initialized",
+ " `%s'", NULL);
+ return error_mark_node;
+ }
+
+ /* Traditionally, you can write struct foo x = 0;
+ and it initializes the first element of x to 0. */
+ if (flag_traditional)
+ {
+ tree top = 0, prev = 0;
+ while (TREE_CODE (type) == RECORD_TYPE
+ || TREE_CODE (type) == ARRAY_TYPE
+ || TREE_CODE (type) == QUAL_UNION_TYPE
+ || TREE_CODE (type) == UNION_TYPE)
+ {
+ tree temp = build (CONSTRUCTOR, type, NULL_TREE, NULL_TREE);
+ if (prev == 0)
+ top = temp;
+ else
+ TREE_OPERAND (prev, 1) = build_tree_list (NULL_TREE, temp);
+ prev = temp;
+ if (TREE_CODE (type) == ARRAY_TYPE)
+ type = TREE_TYPE (type);
+ else if (TYPE_FIELDS (type))
+ type = TREE_TYPE (TYPE_FIELDS (type));
+ else
+ {
+ error_init ("invalid initializer%s", " for `%s'", NULL);
+ return error_mark_node;
+ }
+ }
+ TREE_OPERAND (prev, 1)
+ = build_tree_list (NULL_TREE,
+ digest_init (type, init, require_constant,
+ constructor_constant));
+ return top;
+ }
+ error_init ("invalid initializer%s", " for `%s'", NULL);
+ return error_mark_node;
+}
+
+/* Handle initializers that use braces. */
+
+/* Type of object we are accumulating a constructor for.
+ This type is always a RECORD_TYPE, UNION_TYPE or ARRAY_TYPE. */
+static tree constructor_type;
+
+/* For a RECORD_TYPE or UNION_TYPE, this is the chain of fields
+ left to fill. */
+static tree constructor_fields;
+
+/* For an ARRAY_TYPE, this is the specified index
+ at which to store the next element we get.
+ This is a special INTEGER_CST node that we modify in place. */
+static tree constructor_index;
+
+/* For an ARRAY_TYPE, this is the end index of the range
+ to intitialize with the next element, or NULL in the ordinary case
+ where the element is used just once. */
+static tree constructor_range_end;
+
+/* For an ARRAY_TYPE, this is the maximum index. */
+static tree constructor_max_index;
+
+/* For a RECORD_TYPE, this is the first field not yet written out. */
+static tree constructor_unfilled_fields;
+
+/* For an ARRAY_TYPE, this is the index of the first element
+ not yet written out.
+ This is a special INTEGER_CST node that we modify in place. */
+static tree constructor_unfilled_index;
+
+/* In a RECORD_TYPE, the byte index of the next consecutive field.
+ This is so we can generate gaps between fields, when appropriate.
+ This is a special INTEGER_CST node that we modify in place. */
+static tree constructor_bit_index;
+
+/* If we are saving up the elements rather than allocating them,
+ this is the list of elements so far (in reverse order,
+ most recent first). */
+static tree constructor_elements;
+
+/* 1 if so far this constructor's elements are all compile-time constants. */
+static int constructor_constant;
+
+/* 1 if so far this constructor's elements are all valid address constants. */
+static int constructor_simple;
+
+/* 1 if this constructor is erroneous so far. */
+static int constructor_erroneous;
+
+/* 1 if have called defer_addressed_constants. */
+static int constructor_subconstants_deferred;
+
+/* List of pending elements at this constructor level.
+ These are elements encountered out of order
+ which belong at places we haven't reached yet in actually
+ writing the output. */
+static tree constructor_pending_elts;
+
+/* The SPELLING_DEPTH of this constructor. */
+static int constructor_depth;
+
+/* 0 if implicitly pushing constructor levels is allowed. */
+int constructor_no_implicit = 0; /* 0 for C; 1 for some other languages. */
+
+/* 1 if this constructor level was entered implicitly. */
+static int constructor_implicit;
+
+static int require_constant_value;
+static int require_constant_elements;
+
+/* 1 if it is ok to output this constructor as we read it.
+ 0 means must accumulate a CONSTRUCTOR expression. */
+static int constructor_incremental;
+
+/* DECL node for which an initializer is being read.
+ 0 means we are reading a constructor expression
+ such as (struct foo) {...}. */
+static tree constructor_decl;
+
+/* start_init saves the ASMSPEC arg here for really_start_incremental_init. */
+static char *constructor_asmspec;
+
+/* Nonzero if this is an initializer for a top-level decl. */
+static int constructor_top_level;
+
+/* When we finish reading a constructor expression
+ (constructor_decl is 0), the CONSTRUCTOR goes here. */
+static tree constructor_result;
+
+/* This stack has a level for each implicit or explicit level of
+ structuring in the initializer, including the outermost one. It
+ saves the values of most of the variables above. */
+
+struct constructor_stack
+{
+ struct constructor_stack *next;
+ tree type;
+ tree fields;
+ tree index;
+ tree range_end;
+ tree max_index;
+ tree unfilled_index;
+ tree unfilled_fields;
+ tree bit_index;
+ tree elements;
+ int offset;
+ tree pending_elts;
+ int depth;
+ /* If nonzero, this value should replace the entire
+ constructor at this level. */
+ tree replacement_value;
+ char constant;
+ char simple;
+ char implicit;
+ char incremental;
+ char erroneous;
+ char outer;
+};
+
+struct constructor_stack *constructor_stack;
+
+/* This stack records separate initializers that are nested.
+ Nested initializers can't happen in ANSI C, but GNU C allows them
+ in cases like { ... (struct foo) { ... } ... }. */
+
+struct initializer_stack
+{
+ struct initializer_stack *next;
+ tree decl;
+ char *asmspec;
+ struct constructor_stack *constructor_stack;
+ tree elements;
+ struct spelling *spelling;
+ struct spelling *spelling_base;
+ int spelling_size;
+ char top_level;
+ char incremental;
+ char require_constant_value;
+ char require_constant_elements;
+ char deferred;
+};
+
+struct initializer_stack *initializer_stack;
+
+/* Prepare to parse and output the initializer for variable DECL. */
+
+void
+start_init (decl, asmspec_tree, top_level)
+ tree decl;
+ tree asmspec_tree;
+ int top_level;
+{
+ char *locus;
+ struct initializer_stack *p
+ = (struct initializer_stack *) xmalloc (sizeof (struct initializer_stack));
+ char *asmspec = 0;
+
+ if (asmspec_tree)
+ asmspec = TREE_STRING_POINTER (asmspec_tree);
+
+ p->decl = constructor_decl;
+ p->asmspec = constructor_asmspec;
+ p->incremental = constructor_incremental;
+ p->require_constant_value = require_constant_value;
+ p->require_constant_elements = require_constant_elements;
+ p->constructor_stack = constructor_stack;
+ p->elements = constructor_elements;
+ p->spelling = spelling;
+ p->spelling_base = spelling_base;
+ p->spelling_size = spelling_size;
+ p->deferred = constructor_subconstants_deferred;
+ p->top_level = constructor_top_level;
+ p->next = initializer_stack;
+ initializer_stack = p;
+
+ constructor_decl = decl;
+ constructor_incremental = top_level;
+ constructor_asmspec = asmspec;
+ constructor_subconstants_deferred = 0;
+ constructor_top_level = top_level;
+
+ if (decl != 0)
+ {
+ require_constant_value = TREE_STATIC (decl);
+ require_constant_elements
+ = ((TREE_STATIC (decl) || pedantic)
+ /* For a scalar, you can always use any value to initialize,
+ even within braces. */
+ && (TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE
+ || TREE_CODE (TREE_TYPE (decl)) == RECORD_TYPE
+ || TREE_CODE (TREE_TYPE (decl)) == UNION_TYPE
+ || TREE_CODE (TREE_TYPE (decl)) == QUAL_UNION_TYPE));
+ locus = IDENTIFIER_POINTER (DECL_NAME (decl));
+ constructor_incremental |= TREE_STATIC (decl);
+ }
+ else
+ {
+ require_constant_value = 0;
+ require_constant_elements = 0;
+ locus = "(anonymous)";
+ }
+
+ constructor_stack = 0;
+
+ missing_braces_mentioned = 0;
+
+ spelling_base = 0;
+ spelling_size = 0;
+ RESTORE_SPELLING_DEPTH (0);
+
+ if (locus)
+ push_string (locus);
+}
+
+void
+finish_init ()
+{
+ struct initializer_stack *p = initializer_stack;
+
+ /* Output subconstants (string constants, usually)
+ that were referenced within this initializer and saved up.
+ Must do this if and only if we called defer_addressed_constants. */
+ if (constructor_subconstants_deferred)
+ output_deferred_addressed_constants ();
+
+ /* Free the whole constructor stack of this initializer. */
+ while (constructor_stack)
+ {
+ struct constructor_stack *q = constructor_stack;
+ constructor_stack = q->next;
+ free (q);
+ }
+
+ /* Pop back to the data of the outer initializer (if any). */
+ constructor_decl = p->decl;
+ constructor_asmspec = p->asmspec;
+ constructor_incremental = p->incremental;
+ require_constant_value = p->require_constant_value;
+ require_constant_elements = p->require_constant_elements;
+ constructor_stack = p->constructor_stack;
+ constructor_elements = p->elements;
+ spelling = p->spelling;
+ spelling_base = p->spelling_base;
+ spelling_size = p->spelling_size;
+ constructor_subconstants_deferred = p->deferred;
+ constructor_top_level = p->top_level;
+ initializer_stack = p->next;
+ free (p);
+}
+
+/* Call here when we see the initializer is surrounded by braces.
+ This is instead of a call to push_init_level;
+ it is matched by a call to pop_init_level.
+
+ TYPE is the type to initialize, for a constructor expression.
+ For an initializer for a decl, TYPE is zero. */
+
+void
+really_start_incremental_init (type)
+ tree type;
+{
+ struct constructor_stack *p
+ = (struct constructor_stack *) xmalloc (sizeof (struct constructor_stack));
+
+ if (type == 0)
+ type = TREE_TYPE (constructor_decl);
+
+ /* Turn off constructor_incremental if type is a struct with bitfields.
+ Do this before the first push, so that the corrected value
+ is available in finish_init. */
+ check_init_type_bitfields (type);
+
+ p->type = constructor_type;
+ p->fields = constructor_fields;
+ p->index = constructor_index;
+ p->range_end = constructor_range_end;
+ p->max_index = constructor_max_index;
+ p->unfilled_index = constructor_unfilled_index;
+ p->unfilled_fields = constructor_unfilled_fields;
+ p->bit_index = constructor_bit_index;
+ p->elements = constructor_elements;
+ p->constant = constructor_constant;
+ p->simple = constructor_simple;
+ p->erroneous = constructor_erroneous;
+ p->pending_elts = constructor_pending_elts;
+ p->depth = constructor_depth;
+ p->replacement_value = 0;
+ p->implicit = 0;
+ p->incremental = constructor_incremental;
+ p->outer = 0;
+ p->next = 0;
+ constructor_stack = p;
+
+ constructor_constant = 1;
+ constructor_simple = 1;
+ constructor_depth = SPELLING_DEPTH ();
+ constructor_elements = 0;
+ constructor_pending_elts = 0;
+ constructor_type = type;
+
+ if (TREE_CODE (constructor_type) == RECORD_TYPE
+ || TREE_CODE (constructor_type) == UNION_TYPE)
+ {
+ constructor_fields = TYPE_FIELDS (constructor_type);
+ /* Skip any nameless bit fields atthe beginning. */
+ while (constructor_fields != 0 && DECL_BIT_FIELD (constructor_fields)
+ && DECL_NAME (constructor_fields) == 0)
+ constructor_fields = TREE_CHAIN (constructor_fields);
+ constructor_unfilled_fields = constructor_fields;
+ constructor_bit_index = copy_node (integer_zero_node);
+ }
+ else if (TREE_CODE (constructor_type) == ARRAY_TYPE)
+ {
+ constructor_range_end = 0;
+ if (TYPE_DOMAIN (constructor_type))
+ {
+ constructor_max_index
+ = TYPE_MAX_VALUE (TYPE_DOMAIN (constructor_type));
+ constructor_index
+ = copy_node (TYPE_MIN_VALUE (TYPE_DOMAIN (constructor_type)));
+ }
+ else
+ constructor_index = copy_node (integer_zero_node);
+ constructor_unfilled_index = copy_node (constructor_index);
+ }
+ else
+ {
+ /* Handle the case of int x = {5}; */
+ constructor_fields = constructor_type;
+ constructor_unfilled_fields = constructor_type;
+ }
+
+ if (constructor_incremental)
+ {
+ int momentary = suspend_momentary ();
+ push_obstacks_nochange ();
+ if (TREE_PERMANENT (constructor_decl))
+ end_temporary_allocation ();
+ make_decl_rtl (constructor_decl, constructor_asmspec,
+ constructor_top_level);
+ assemble_variable (constructor_decl, constructor_top_level, 0, 1);
+ pop_obstacks ();
+ resume_momentary (momentary);
+ }
+
+ if (constructor_incremental)
+ {
+ defer_addressed_constants ();
+ constructor_subconstants_deferred = 1;
+ }
+}
+
+/* Push down into a subobject, for initialization.
+ If this is for an explicit set of braces, IMPLICIT is 0.
+ If it is because the next element belongs at a lower level,
+ IMPLICIT is 1. */
+
+void
+push_init_level (implicit)
+ int implicit;
+{
+ struct constructor_stack *p;
+
+ /* If we've exhausted any levels that didn't have braces,
+ pop them now. */
+ while (constructor_stack->implicit)
+ {
+ if ((TREE_CODE (constructor_type) == RECORD_TYPE
+ || TREE_CODE (constructor_type) == UNION_TYPE)
+ && constructor_fields == 0)
+ process_init_element (pop_init_level (1));
+ else if (TREE_CODE (constructor_type) == ARRAY_TYPE
+ && tree_int_cst_lt (constructor_max_index, constructor_index))
+ process_init_element (pop_init_level (1));
+ else
+ break;
+ }
+
+ /* Structure elements may require alignment. Do this now
+ if necessary for the subaggregate. */
+ if (constructor_incremental && TREE_CODE (constructor_type) == RECORD_TYPE
+ && constructor_fields)
+ {
+ /* Advance to offset of this element. */
+ if (! tree_int_cst_equal (constructor_bit_index,
+ DECL_FIELD_BITPOS (constructor_fields)))
+ {
+ int next = (TREE_INT_CST_LOW
+ (DECL_FIELD_BITPOS (constructor_fields))
+ / BITS_PER_UNIT);
+ int here = (TREE_INT_CST_LOW (constructor_bit_index)
+ / BITS_PER_UNIT);
+
+ assemble_zeros (next - here);
+ }
+ }
+
+ p = (struct constructor_stack *) xmalloc (sizeof (struct constructor_stack));
+ p->type = constructor_type;
+ p->fields = constructor_fields;
+ p->index = constructor_index;
+ p->range_end = constructor_range_end;
+ p->max_index = constructor_max_index;
+ p->unfilled_index = constructor_unfilled_index;
+ p->unfilled_fields = constructor_unfilled_fields;
+ p->bit_index = constructor_bit_index;
+ p->elements = constructor_elements;
+ p->constant = constructor_constant;
+ p->simple = constructor_simple;
+ p->erroneous = constructor_erroneous;
+ p->pending_elts = constructor_pending_elts;
+ p->depth = constructor_depth;
+ p->replacement_value = 0;
+ p->implicit = implicit;
+ p->incremental = constructor_incremental;
+ p->outer = 0;
+ p->next = constructor_stack;
+ constructor_stack = p;
+
+ constructor_constant = 1;
+ constructor_simple = 1;
+ constructor_depth = SPELLING_DEPTH ();
+ constructor_elements = 0;
+ constructor_pending_elts = 0;
+
+ /* Don't die if an entire brace-pair level is superfluous
+ in the containing level. */
+ if (constructor_type == 0)
+ ;
+ else if (TREE_CODE (constructor_type) == RECORD_TYPE
+ || TREE_CODE (constructor_type) == UNION_TYPE)
+ {
+ /* Don't die if there are extra init elts at the end. */
+ if (constructor_fields == 0)
+ constructor_type = 0;
+ else
+ {
+ constructor_type = TREE_TYPE (constructor_fields);
+ push_member_name (constructor_fields);
+ if (constructor_fields != constructor_unfilled_fields)
+ constructor_incremental = 0;
+ }
+ }
+ else if (TREE_CODE (constructor_type) == ARRAY_TYPE)
+ {
+ constructor_type = TREE_TYPE (constructor_type);
+ push_array_bounds (TREE_INT_CST_LOW (constructor_index));
+ if (! tree_int_cst_equal (constructor_index, constructor_unfilled_index)
+ || constructor_range_end != 0)
+ constructor_incremental = 0;
+ }
+
+ if (constructor_type == 0)
+ {
+ error_init ("extra brace group at end of initializer%s",
+ " for `%s'", NULL);
+ constructor_fields = 0;
+ constructor_unfilled_fields = 0;
+ return;
+ }
+
+ /* Turn off constructor_incremental if type is a struct with bitfields. */
+ check_init_type_bitfields (constructor_type);
+
+ if (implicit && warn_missing_braces && !missing_braces_mentioned)
+ {
+ missing_braces_mentioned = 1;
+ warning_init ("missing braces around initializer%s", " for `%s'", NULL);
+ }
+
+ if (TREE_CODE (constructor_type) == RECORD_TYPE
+ || TREE_CODE (constructor_type) == UNION_TYPE)
+ {
+ constructor_fields = TYPE_FIELDS (constructor_type);
+ /* Skip any nameless bit fields atthe beginning. */
+ while (constructor_fields != 0 && DECL_BIT_FIELD (constructor_fields)
+ && DECL_NAME (constructor_fields) == 0)
+ constructor_fields = TREE_CHAIN (constructor_fields);
+ constructor_unfilled_fields = constructor_fields;
+ constructor_bit_index = copy_node (integer_zero_node);
+ }
+ else if (TREE_CODE (constructor_type) == ARRAY_TYPE)
+ {
+ constructor_range_end = 0;
+ if (TYPE_DOMAIN (constructor_type))
+ {
+ constructor_max_index
+ = TYPE_MAX_VALUE (TYPE_DOMAIN (constructor_type));
+ constructor_index
+ = copy_node (TYPE_MIN_VALUE (TYPE_DOMAIN (constructor_type)));
+ }
+ else
+ constructor_index = copy_node (integer_zero_node);
+ constructor_unfilled_index = copy_node (constructor_index);
+ }
+ else
+ {
+ warning_init ("braces around scalar initializer%s", " for `%s'", NULL);
+ constructor_fields = constructor_type;
+ constructor_unfilled_fields = constructor_type;
+ }
+}
+
+/* Don't read a struct incrementally if it has any bitfields,
+ because the incremental reading code doesn't know how to
+ handle bitfields yet. */
+
+static void
+check_init_type_bitfields (type)
+ tree type;
+{
+ if (TREE_CODE (type) == RECORD_TYPE)
+ {
+ tree tail;
+ for (tail = TYPE_FIELDS (type); tail;
+ tail = TREE_CHAIN (tail))
+ {
+ if (DECL_BIT_FIELD (tail)
+ /* This catches cases like `int foo : 8;'. */
+ || DECL_MODE (tail) != TYPE_MODE (TREE_TYPE (tail)))
+ {
+ constructor_incremental = 0;
+ break;
+ }
+
+ check_init_type_bitfields (TREE_TYPE (tail));
+ }
+ }
+
+ else if (TREE_CODE (type) == ARRAY_TYPE)
+ check_init_type_bitfields (TREE_TYPE (type));
+}
+
+/* At the end of an implicit or explicit brace level,
+ finish up that level of constructor.
+ If we were outputting the elements as they are read, return 0
+ from inner levels (process_init_element ignores that),
+ but return error_mark_node from the outermost level
+ (that's what we want to put in DECL_INITIAL).
+ Otherwise, return a CONSTRUCTOR expression. */
+
+tree
+pop_init_level (implicit)
+ int implicit;
+{
+ struct constructor_stack *p;
+ int size = 0;
+ tree constructor = 0;
+
+ if (implicit == 0)
+ {
+ /* When we come to an explicit close brace,
+ pop any inner levels that didn't have explicit braces. */
+ while (constructor_stack->implicit)
+ process_init_element (pop_init_level (1));
+ }
+
+ p = constructor_stack;
+
+ if (constructor_type != 0)
+ size = int_size_in_bytes (constructor_type);
+
+ /* Now output all pending elements. */
+ output_pending_init_elements (1);
+
+#if 0 /* c-parse.in warns about {}. */
+ /* In ANSI, each brace level must have at least one element. */
+ if (! implicit && pedantic
+ && (TREE_CODE (constructor_type) == ARRAY_TYPE
+ ? integer_zerop (constructor_unfilled_index)
+ : constructor_unfilled_fields == TYPE_FIELDS (constructor_type)))
+ pedwarn_init ("empty braces in initializer%s", " for `%s'", NULL);
+#endif
+
+ /* Pad out the end of the structure. */
+
+ if (p->replacement_value)
+ {
+ /* If this closes a superfluous brace pair,
+ just pass out the element between them. */
+ constructor = p->replacement_value;
+ /* If this is the top level thing within the initializer,
+ and it's for a variable, then since we already called
+ assemble_variable, we must output the value now. */
+ if (p->next == 0 && constructor_decl != 0
+ && constructor_incremental)
+ {
+ constructor = digest_init (constructor_type, constructor,
+ 0, 0);
+
+ /* If initializing an array of unknown size,
+ determine the size now. */
+ if (TREE_CODE (constructor_type) == ARRAY_TYPE
+ && TYPE_DOMAIN (constructor_type) == 0)
+ {
+ int failure;
+ int momentary_p;
+
+ push_obstacks_nochange ();
+ if (TREE_PERMANENT (constructor_type))
+ end_temporary_allocation ();
+
+ momentary_p = suspend_momentary ();
+
+ /* We shouldn't have an incomplete array type within
+ some other type. */
+ if (constructor_stack->next)
+ abort ();
+
+ failure
+ = complete_array_type (constructor_type,
+ constructor, 0);
+ if (failure)
+ abort ();
+
+ size = int_size_in_bytes (constructor_type);
+ resume_momentary (momentary_p);
+ pop_obstacks ();
+ }
+
+ output_constant (constructor, size);
+ }
+ }
+ else if (constructor_type == 0)
+ ;
+ else if (TREE_CODE (constructor_type) != RECORD_TYPE
+ && TREE_CODE (constructor_type) != UNION_TYPE
+ && TREE_CODE (constructor_type) != ARRAY_TYPE
+ && ! constructor_incremental)
+ {
+ /* A nonincremental scalar initializer--just return
+ the element, after verifying there is just one. */
+ if (constructor_elements == 0)
+ {
+ error_init ("empty scalar initializer%s",
+ " for `%s'", NULL);
+ constructor = error_mark_node;
+ }
+ else if (TREE_CHAIN (constructor_elements) != 0)
+ {
+ error_init ("extra elements in scalar initializer%s",
+ " for `%s'", NULL);
+ constructor = TREE_VALUE (constructor_elements);
+ }
+ else
+ constructor = TREE_VALUE (constructor_elements);
+ }
+ else if (! constructor_incremental)
+ {
+ if (constructor_erroneous)
+ constructor = error_mark_node;
+ else
+ {
+ int momentary = suspend_momentary ();
+
+ constructor = build (CONSTRUCTOR, constructor_type, NULL_TREE,
+ nreverse (constructor_elements));
+ if (constructor_constant)
+ TREE_CONSTANT (constructor) = 1;
+ if (constructor_constant && constructor_simple)
+ TREE_STATIC (constructor) = 1;
+
+ resume_momentary (momentary);
+ }
+ }
+ else
+ {
+ tree filled;
+ int momentary = suspend_momentary ();
+
+ if (TREE_CODE (constructor_type) == RECORD_TYPE
+ || TREE_CODE (constructor_type) == UNION_TYPE)
+ {
+ /* Find the offset of the end of that field. */
+ filled = size_binop (CEIL_DIV_EXPR,
+ constructor_bit_index,
+ size_int (BITS_PER_UNIT));
+ }
+ else if (TREE_CODE (constructor_type) == ARRAY_TYPE)
+ {
+ /* If initializing an array of unknown size,
+ determine the size now. */
+ if (TREE_CODE (constructor_type) == ARRAY_TYPE
+ && TYPE_DOMAIN (constructor_type) == 0)
+ {
+ tree maxindex
+ = size_binop (MINUS_EXPR,
+ constructor_unfilled_index,
+ integer_one_node);
+
+ push_obstacks_nochange ();
+ if (TREE_PERMANENT (constructor_type))
+ end_temporary_allocation ();
+ maxindex = copy_node (maxindex);
+ TYPE_DOMAIN (constructor_type) = build_index_type (maxindex);
+ TREE_TYPE (maxindex) = TYPE_DOMAIN (constructor_type);
+
+ /* TYPE_MAX_VALUE is always one less than the number of elements
+ in the array, because we start counting at zero. Therefore,
+ warn only if the value is less than zero. */
+ if (pedantic
+ && (tree_int_cst_sgn (TYPE_MAX_VALUE (TYPE_DOMAIN (constructor_type)))
+ < 0))
+ error_with_decl (constructor_decl,
+ "zero or negative array size `%s'");
+ layout_type (constructor_type);
+ size = int_size_in_bytes (constructor_type);
+ pop_obstacks ();
+ }
+
+ filled = size_binop (MULT_EXPR, constructor_unfilled_index,
+ size_in_bytes (TREE_TYPE (constructor_type)));
+ }
+ else
+ filled = 0;
+
+ if (filled != 0)
+ assemble_zeros (size - TREE_INT_CST_LOW (filled));
+
+ resume_momentary (momentary);
+ }
+
+
+ constructor_type = p->type;
+ constructor_fields = p->fields;
+ constructor_index = p->index;
+ constructor_range_end = p->range_end;
+ constructor_max_index = p->max_index;
+ constructor_unfilled_index = p->unfilled_index;
+ constructor_unfilled_fields = p->unfilled_fields;
+ constructor_bit_index = p->bit_index;
+ constructor_elements = p->elements;
+ constructor_constant = p->constant;
+ constructor_simple = p->simple;
+ constructor_erroneous = p->erroneous;
+ constructor_pending_elts = p->pending_elts;
+ constructor_depth = p->depth;
+ constructor_incremental = p->incremental;
+ RESTORE_SPELLING_DEPTH (constructor_depth);
+
+ constructor_stack = p->next;
+ free (p);
+
+ if (constructor == 0)
+ {
+ if (constructor_stack == 0)
+ return error_mark_node;
+ return NULL_TREE;
+ }
+ return constructor;
+}
+
+/* Within an array initializer, specify the next index to be initialized.
+ FIRST is that index. If LAST is nonzero, then initialize a range
+ of indices, running from FIRST through LAST. */
+
+void
+set_init_index (first, last)
+ tree first, last;
+{
+ while ((TREE_CODE (first) == NOP_EXPR
+ || TREE_CODE (first) == CONVERT_EXPR
+ || TREE_CODE (first) == NON_LVALUE_EXPR)
+ && (TYPE_MODE (TREE_TYPE (first))
+ == TYPE_MODE (TREE_TYPE (TREE_OPERAND (first, 0)))))
+ (first) = TREE_OPERAND (first, 0);
+ if (last)
+ while ((TREE_CODE (last) == NOP_EXPR
+ || TREE_CODE (last) == CONVERT_EXPR
+ || TREE_CODE (last) == NON_LVALUE_EXPR)
+ && (TYPE_MODE (TREE_TYPE (last))
+ == TYPE_MODE (TREE_TYPE (TREE_OPERAND (last, 0)))))
+ (last) = TREE_OPERAND (last, 0);
+
+ if (TREE_CODE (first) != INTEGER_CST)
+ error_init ("nonconstant array index in initializer%s", " for `%s'", NULL);
+ else if (last != 0 && TREE_CODE (last) != INTEGER_CST)
+ error_init ("nonconstant array index in initializer%s", " for `%s'", NULL);
+ else if (tree_int_cst_lt (first, constructor_unfilled_index))
+ error_init ("duplicate array index in initializer%s", " for `%s'", NULL);
+ else
+ {
+ TREE_INT_CST_LOW (constructor_index)
+ = TREE_INT_CST_LOW (first);
+ TREE_INT_CST_HIGH (constructor_index)
+ = TREE_INT_CST_HIGH (first);
+
+ if (last != 0 && tree_int_cst_lt (last, first))
+ error_init ("empty index range in initializer%s", " for `%s'", NULL);
+ else
+ {
+ if (pedantic)
+ pedwarn ("ANSI C forbids specifying element to initialize");
+ constructor_range_end = last;
+ }
+ }
+}
+
+/* Within a struct initializer, specify the next field to be initialized. */
+
+void
+set_init_label (fieldname)
+ tree fieldname;
+{
+ tree tail;
+ int passed = 0;
+
+ for (tail = TYPE_FIELDS (constructor_type); tail;
+ tail = TREE_CHAIN (tail))
+ {
+ if (tail == constructor_unfilled_fields)
+ passed = 1;
+ if (DECL_NAME (tail) == fieldname)
+ break;
+ }
+
+ if (tail == 0)
+ error ("unknown field `%s' specified in initializer",
+ IDENTIFIER_POINTER (fieldname));
+ else if (!passed)
+ error ("field `%s' already initialized",
+ IDENTIFIER_POINTER (fieldname));
+ else
+ {
+ constructor_fields = tail;
+ if (pedantic)
+ pedwarn ("ANSI C forbids specifying structure member to initialize");
+ }
+}
+
+/* "Output" the next constructor element.
+ At top level, really output it to assembler code now.
+ Otherwise, collect it in a list from which we will make a CONSTRUCTOR.
+ TYPE is the data type that the containing data type wants here.
+ FIELD is the field (a FIELD_DECL) or the index that this element fills.
+
+ PENDING if non-nil means output pending elements that belong
+ right after this element. (PENDING is normally 1;
+ it is 0 while outputting pending elements, to avoid recursion.) */
+
+static void
+output_init_element (value, type, field, pending)
+ tree value, type, field;
+ int pending;
+{
+ int duplicate = 0;
+
+ if (TREE_CODE (TREE_TYPE (value)) == FUNCTION_TYPE
+ || (TREE_CODE (TREE_TYPE (value)) == ARRAY_TYPE
+ && !(TREE_CODE (value) == STRING_CST
+ && TREE_CODE (type) == ARRAY_TYPE
+ && TREE_CODE (TREE_TYPE (type)) == INTEGER_TYPE)
+ && !comptypes (TYPE_MAIN_VARIANT (TREE_TYPE (value)),
+ TYPE_MAIN_VARIANT (type))))
+ value = default_conversion (value);
+
+ if (value == error_mark_node)
+ constructor_erroneous = 1;
+ else if (!TREE_CONSTANT (value))
+ constructor_constant = 0;
+ else if (initializer_constant_valid_p (value, TREE_TYPE (value)) == 0)
+ constructor_simple = 0;
+
+ if (require_constant_value && ! TREE_CONSTANT (value))
+ {
+ error_init ("initializer element%s is not constant",
+ " for `%s'", NULL);
+ value = error_mark_node;
+ }
+ else if (require_constant_elements
+ && initializer_constant_valid_p (value, TREE_TYPE (value)) == 0)
+ {
+ error_init ("initializer element%s is not computable at load time",
+ " for `%s'", NULL);
+ value = error_mark_node;
+ }
+
+ /* If this element duplicates one on constructor_pending_elts,
+ print a message and ignore it. Don't do this when we're
+ processing elements taken off constructor_pending_elts,
+ because we'd always get spurious errors. */
+ if (pending)
+ {
+ if (TREE_CODE (constructor_type) == RECORD_TYPE
+ || TREE_CODE (constructor_type) == UNION_TYPE)
+ {
+ if (purpose_member (field, constructor_pending_elts))
+ {
+ error_init ("duplicate initializer%s", " for `%s'", NULL);
+ duplicate = 1;
+ }
+ }
+ if (TREE_CODE (constructor_type) == ARRAY_TYPE)
+ {
+ tree tail;
+ for (tail = constructor_pending_elts; tail;
+ tail = TREE_CHAIN (tail))
+ if (TREE_PURPOSE (tail) != 0
+ && TREE_CODE (TREE_PURPOSE (tail)) == INTEGER_CST
+ && tree_int_cst_equal (TREE_PURPOSE (tail), constructor_index))
+ break;
+
+ if (tail != 0)
+ {
+ error_init ("duplicate initializer%s", " for `%s'", NULL);
+ duplicate = 1;
+ }
+ }
+ }
+
+ /* If this element doesn't come next in sequence,
+ put it on constructor_pending_elts. */
+ if (TREE_CODE (constructor_type) == ARRAY_TYPE
+ && !tree_int_cst_equal (field, constructor_unfilled_index))
+ {
+ if (! duplicate)
+ /* The copy_node is needed in case field is actually
+ constructor_index, which is modified in place. */
+ constructor_pending_elts
+ = tree_cons (copy_node (field),
+ digest_init (type, value, 0, 0),
+ constructor_pending_elts);
+ }
+ else if (TREE_CODE (constructor_type) == RECORD_TYPE
+ && field != constructor_unfilled_fields)
+ {
+ /* We do this for records but not for unions. In a union,
+ no matter which field is specified, it can be initialized
+ right away since it starts at the beginning of the union. */
+ if (!duplicate)
+ constructor_pending_elts
+ = tree_cons (field,
+ digest_init (type, value, 0, 0),
+ constructor_pending_elts);
+ }
+ else
+ {
+ /* Otherwise, output this element either to
+ constructor_elements or to the assembler file. */
+
+ if (!duplicate)
+ {
+ if (! constructor_incremental)
+ {
+ if (field && TREE_CODE (field) == INTEGER_CST)
+ field = copy_node (field);
+ constructor_elements
+ = tree_cons (field, digest_init (type, value, 0, 0),
+ constructor_elements);
+ }
+ else
+ {
+ /* Structure elements may require alignment.
+ Do this, if necessary. */
+ if (TREE_CODE (constructor_type) == RECORD_TYPE)
+ {
+ /* Advance to offset of this element. */
+ if (! tree_int_cst_equal (constructor_bit_index,
+ DECL_FIELD_BITPOS (field)))
+ {
+ int next = (TREE_INT_CST_LOW (DECL_FIELD_BITPOS (field))
+ / BITS_PER_UNIT);
+ int here = (TREE_INT_CST_LOW (constructor_bit_index)
+ / BITS_PER_UNIT);
+
+ assemble_zeros (next - here);
+ }
+ }
+ output_constant (digest_init (type, value, 0, 0),
+ int_size_in_bytes (type));
+
+ /* For a record or union,
+ keep track of end position of last field. */
+ if (TREE_CODE (constructor_type) == RECORD_TYPE
+ || TREE_CODE (constructor_type) == UNION_TYPE)
+ {
+ tree temp = size_binop (PLUS_EXPR, DECL_FIELD_BITPOS (field),
+ DECL_SIZE (field));
+ TREE_INT_CST_LOW (constructor_bit_index)
+ = TREE_INT_CST_LOW (temp);
+ TREE_INT_CST_HIGH (constructor_bit_index)
+ = TREE_INT_CST_HIGH (temp);
+ }
+ }
+ }
+
+ /* Advance the variable that indicates sequential elements output. */
+ if (TREE_CODE (constructor_type) == ARRAY_TYPE)
+ {
+ tree tem = size_binop (PLUS_EXPR, constructor_unfilled_index,
+ integer_one_node);
+ TREE_INT_CST_LOW (constructor_unfilled_index)
+ = TREE_INT_CST_LOW (tem);
+ TREE_INT_CST_HIGH (constructor_unfilled_index)
+ = TREE_INT_CST_HIGH (tem);
+ }
+ else if (TREE_CODE (constructor_type) == RECORD_TYPE)
+ constructor_unfilled_fields = TREE_CHAIN (constructor_unfilled_fields);
+ else if (TREE_CODE (constructor_type) == UNION_TYPE)
+ constructor_unfilled_fields = 0;
+
+ /* Now output any pending elements which have become next. */
+ if (pending)
+ output_pending_init_elements (0);
+ }
+}
+
+/* Output any pending elements which have become next.
+ As we output elements, constructor_unfilled_{fields,index}
+ advances, which may cause other elements to become next;
+ if so, they too are output.
+
+ If ALL is 0, we return when there are
+ no more pending elements to output now.
+
+ If ALL is 1, we output space as necessary so that
+ we can output all the pending elements. */
+
+static void
+output_pending_init_elements (all)
+ int all;
+{
+ tree tail;
+ tree next;
+
+ retry:
+
+ /* Look thru the whole pending list.
+ If we find an element that should be output now,
+ output it. Otherwise, set NEXT to the element
+ that comes first among those still pending. */
+
+ next = 0;
+ for (tail = constructor_pending_elts; tail;
+ tail = TREE_CHAIN (tail))
+ {
+ if (TREE_CODE (constructor_type) == ARRAY_TYPE)
+ {
+ if (tree_int_cst_equal (TREE_PURPOSE (tail),
+ constructor_unfilled_index))
+ {
+ output_init_element (TREE_VALUE (tail),
+ TREE_TYPE (constructor_type),
+ constructor_unfilled_index, 0);
+ goto retry;
+ }
+ else if (tree_int_cst_lt (TREE_PURPOSE (tail),
+ constructor_unfilled_index))
+ ;
+ else if (next == 0
+ || tree_int_cst_lt (TREE_PURPOSE (tail), next))
+ next = TREE_PURPOSE (tail);
+ }
+ else if (TREE_CODE (constructor_type) == RECORD_TYPE
+ || TREE_CODE (constructor_type) == UNION_TYPE)
+ {
+ if (TREE_PURPOSE (tail) == constructor_unfilled_fields)
+ {
+ output_init_element (TREE_VALUE (tail),
+ TREE_TYPE (constructor_unfilled_fields),
+ constructor_unfilled_fields,
+ 0);
+ goto retry;
+ }
+ else if (constructor_unfilled_fields == 0
+ || tree_int_cst_lt (DECL_FIELD_BITPOS (TREE_PURPOSE (tail)),
+ DECL_FIELD_BITPOS (constructor_unfilled_fields)))
+ ;
+ else if (next == 0
+ || tree_int_cst_lt (DECL_FIELD_BITPOS (TREE_PURPOSE (tail)),
+ DECL_FIELD_BITPOS (next)))
+ next = TREE_PURPOSE (tail);
+ }
+ }
+
+ /* Ordinarily return, but not if we want to output all
+ and there are elements left. */
+ if (! (all && next != 0))
+ return;
+
+ /* Generate space up to the position of NEXT. */
+ if (constructor_incremental)
+ {
+ tree filled;
+ tree nextpos_tree = size_int (0);
+
+ if (TREE_CODE (constructor_type) == RECORD_TYPE
+ || TREE_CODE (constructor_type) == UNION_TYPE)
+ {
+ /* Find the last field written out, if any. */
+ for (tail = TYPE_FIELDS (constructor_type); tail;
+ tail = TREE_CHAIN (tail))
+ if (TREE_CHAIN (tail) == constructor_unfilled_fields)
+ break;
+
+ if (tail)
+ /* Find the offset of the end of that field. */
+ filled = size_binop (CEIL_DIV_EXPR,
+ size_binop (PLUS_EXPR,
+ DECL_FIELD_BITPOS (tail),
+ DECL_SIZE (tail)),
+ size_int (BITS_PER_UNIT));
+ else
+ filled = size_int (0);
+
+ nextpos_tree = size_binop (CEIL_DIV_EXPR,
+ DECL_FIELD_BITPOS (next),
+ size_int (BITS_PER_UNIT));
+
+ TREE_INT_CST_HIGH (constructor_bit_index)
+ = TREE_INT_CST_HIGH (DECL_FIELD_BITPOS (next));
+ TREE_INT_CST_LOW (constructor_bit_index)
+ = TREE_INT_CST_LOW (DECL_FIELD_BITPOS (next));
+ constructor_unfilled_fields = next;
+ }
+ else if (TREE_CODE (constructor_type) == ARRAY_TYPE)
+ {
+ filled = size_binop (MULT_EXPR, constructor_unfilled_index,
+ size_in_bytes (TREE_TYPE (constructor_type)));
+ nextpos_tree
+ = size_binop (MULT_EXPR, next,
+ size_in_bytes (TREE_TYPE (constructor_type)));
+ TREE_INT_CST_LOW (constructor_unfilled_index)
+ = TREE_INT_CST_LOW (next);
+ TREE_INT_CST_HIGH (constructor_unfilled_index)
+ = TREE_INT_CST_HIGH (next);
+ }
+ else
+ filled = 0;
+
+ if (filled)
+ {
+ int nextpos = TREE_INT_CST_LOW (nextpos_tree);
+
+ assemble_zeros (nextpos - TREE_INT_CST_LOW (filled));
+ }
+ }
+ else
+ {
+ /* If it's not incremental, just skip over the gap,
+ so that after jumping to retry we will output the next
+ successive element. */
+ if (TREE_CODE (constructor_type) == RECORD_TYPE
+ || TREE_CODE (constructor_type) == UNION_TYPE)
+ constructor_unfilled_fields = next;
+ else if (TREE_CODE (constructor_type) == ARRAY_TYPE)
+ {
+ TREE_INT_CST_LOW (constructor_unfilled_index)
+ = TREE_INT_CST_LOW (next);
+ TREE_INT_CST_HIGH (constructor_unfilled_index)
+ = TREE_INT_CST_HIGH (next);
+ }
+ }
+
+ goto retry;
+}
+
+/* Add one non-braced element to the current constructor level.
+ This adjusts the current position within the constructor's type.
+ This may also start or terminate implicit levels
+ to handle a partly-braced initializer.
+
+ Once this has found the correct level for the new element,
+ it calls output_init_element.
+
+ Note: if we are incrementally outputting this constructor,
+ this function may be called with a null argument
+ representing a sub-constructor that was already incrementally output.
+ When that happens, we output nothing, but we do the bookkeeping
+ to skip past that element of the current constructor. */
+
+void
+process_init_element (value)
+ tree value;
+{
+ tree orig_value = value;
+ int string_flag = value != 0 && TREE_CODE (value) == STRING_CST;
+
+ /* Handle superfluous braces around string cst as in
+ char x[] = {"foo"}; */
+ if (string_flag
+ && constructor_type
+ && TREE_CODE (constructor_type) == ARRAY_TYPE
+ && TREE_CODE (TREE_TYPE (constructor_type)) == INTEGER_TYPE
+ && integer_zerop (constructor_unfilled_index))
+ {
+ constructor_stack->replacement_value = value;
+ return;
+ }
+
+ if (constructor_stack->replacement_value != 0)
+ {
+ error_init ("excess elements in struct initializer%s",
+ " after `%s'", NULL_PTR);
+ return;
+ }
+
+ /* Ignore elements of a brace group if it is entirely superfluous
+ and has already been diagnosed. */
+ if (constructor_type == 0)
+ return;
+
+ /* If we've exhausted any levels that didn't have braces,
+ pop them now. */
+ while (constructor_stack->implicit)
+ {
+ if ((TREE_CODE (constructor_type) == RECORD_TYPE
+ || TREE_CODE (constructor_type) == UNION_TYPE)
+ && constructor_fields == 0)
+ process_init_element (pop_init_level (1));
+ else if (TREE_CODE (constructor_type) == ARRAY_TYPE
+ && tree_int_cst_lt (constructor_max_index, constructor_index))
+ process_init_element (pop_init_level (1));
+ else
+ break;
+ }
+
+ while (1)
+ {
+ if (TREE_CODE (constructor_type) == RECORD_TYPE)
+ {
+ tree fieldtype;
+ enum tree_code fieldcode;
+
+ if (constructor_fields == 0)
+ {
+ pedwarn_init ("excess elements in struct initializer%s",
+ " after `%s'", NULL_PTR);
+ break;
+ }
+
+ fieldtype = TREE_TYPE (constructor_fields);
+ if (fieldtype != error_mark_node)
+ fieldtype = TYPE_MAIN_VARIANT (fieldtype);
+ fieldcode = TREE_CODE (fieldtype);
+
+ /* Accept a string constant to initialize a subarray. */
+ if (value != 0
+ && fieldcode == ARRAY_TYPE
+ && TREE_CODE (TREE_TYPE (fieldtype)) == INTEGER_TYPE
+ && string_flag)
+ value = orig_value;
+ /* Otherwise, if we have come to a subaggregate,
+ and we don't have an element of its type, push into it. */
+ else if (value != 0 && !constructor_no_implicit
+ && TYPE_MAIN_VARIANT (TREE_TYPE (value)) != fieldtype
+ && (fieldcode == RECORD_TYPE || fieldcode == ARRAY_TYPE
+ || fieldcode == UNION_TYPE))
+ {
+ push_init_level (1);
+ continue;
+ }
+
+ if (value)
+ {
+ push_member_name (constructor_fields);
+ output_init_element (value, fieldtype, constructor_fields, 1);
+ RESTORE_SPELLING_DEPTH (constructor_depth);
+ }
+ else
+ /* Do the bookkeeping for an element that was
+ directly output as a constructor. */
+ {
+ /* For a record, keep track of end position of last field. */
+ tree temp = size_binop (PLUS_EXPR,
+ DECL_FIELD_BITPOS (constructor_fields),
+ DECL_SIZE (constructor_fields));
+ TREE_INT_CST_LOW (constructor_bit_index)
+ = TREE_INT_CST_LOW (temp);
+ TREE_INT_CST_HIGH (constructor_bit_index)
+ = TREE_INT_CST_HIGH (temp);
+
+ constructor_unfilled_fields = TREE_CHAIN (constructor_fields);
+ }
+
+ constructor_fields = TREE_CHAIN (constructor_fields);
+ /* Skip any nameless bit fields atthe beginning. */
+ while (constructor_fields != 0 && DECL_BIT_FIELD (constructor_fields)
+ && DECL_NAME (constructor_fields) == 0)
+ constructor_fields = TREE_CHAIN (constructor_fields);
+ break;
+ }
+ if (TREE_CODE (constructor_type) == UNION_TYPE)
+ {
+ tree fieldtype;
+ enum tree_code fieldcode;
+
+ if (constructor_fields == 0)
+ {
+ pedwarn_init ("excess elements in union initializer%s",
+ " after `%s'", NULL_PTR);
+ break;
+ }
+
+ fieldtype = TREE_TYPE (constructor_fields);
+ if (fieldtype != error_mark_node)
+ fieldtype = TYPE_MAIN_VARIANT (fieldtype);
+ fieldcode = TREE_CODE (fieldtype);
+
+ /* Accept a string constant to initialize a subarray. */
+ if (value != 0
+ && fieldcode == ARRAY_TYPE
+ && TREE_CODE (TREE_TYPE (fieldtype)) == INTEGER_TYPE
+ && string_flag)
+ value = orig_value;
+ /* Otherwise, if we have come to a subaggregate,
+ and we don't have an element of its type, push into it. */
+ else if (value != 0 && !constructor_no_implicit
+ && TYPE_MAIN_VARIANT (TREE_TYPE (value)) != fieldtype
+ && (fieldcode == RECORD_TYPE || fieldcode == ARRAY_TYPE
+ || fieldcode == UNION_TYPE))
+ {
+ push_init_level (1);
+ continue;
+ }
+
+ if (value)
+ {
+ push_member_name (constructor_fields);
+ output_init_element (value, fieldtype, constructor_fields, 1);
+ RESTORE_SPELLING_DEPTH (constructor_depth);
+ }
+ else
+ /* Do the bookkeeping for an element that was
+ directly output as a constructor. */
+ {
+ TREE_INT_CST_LOW (constructor_bit_index)
+ = TREE_INT_CST_LOW (DECL_SIZE (constructor_fields));
+ TREE_INT_CST_HIGH (constructor_bit_index)
+ = TREE_INT_CST_HIGH (DECL_SIZE (constructor_fields));
+
+ constructor_unfilled_fields = TREE_CHAIN (constructor_fields);
+ }
+
+ constructor_fields = 0;
+ break;
+ }
+ if (TREE_CODE (constructor_type) == ARRAY_TYPE)
+ {
+ tree elttype = TYPE_MAIN_VARIANT (TREE_TYPE (constructor_type));
+ enum tree_code eltcode = TREE_CODE (elttype);
+
+ /* Accept a string constant to initialize a subarray. */
+ if (value != 0
+ && eltcode == ARRAY_TYPE
+ && TREE_CODE (TREE_TYPE (elttype)) == INTEGER_TYPE
+ && string_flag)
+ value = orig_value;
+ /* Otherwise, if we have come to a subaggregate,
+ and we don't have an element of its type, push into it. */
+ else if (value != 0 && !constructor_no_implicit
+ && TYPE_MAIN_VARIANT (TREE_TYPE (value)) != elttype
+ && (eltcode == RECORD_TYPE || eltcode == ARRAY_TYPE
+ || eltcode == UNION_TYPE))
+ {
+ push_init_level (1);
+ continue;
+ }
+
+ if (constructor_max_index != 0
+ && tree_int_cst_lt (constructor_max_index, constructor_index))
+ {
+ pedwarn_init ("excess elements in array initializer%s",
+ " after `%s'", NULL_PTR);
+ break;
+ }
+
+ /* Now output the actual element.
+ Ordinarily, output once.
+ If there is a range, repeat it till we advance past the range. */
+ do
+ {
+ tree tem;
+
+ if (value)
+ {
+ push_array_bounds (TREE_INT_CST_LOW (constructor_index));
+ output_init_element (value, elttype, constructor_index, 1);
+ RESTORE_SPELLING_DEPTH (constructor_depth);
+ }
+
+ tem = size_binop (PLUS_EXPR, constructor_index,
+ integer_one_node);
+ TREE_INT_CST_LOW (constructor_index)
+ = TREE_INT_CST_LOW (tem);
+ TREE_INT_CST_HIGH (constructor_index)
+ = TREE_INT_CST_HIGH (tem);
+
+ if (!value)
+ /* If we are doing the bookkeeping for an element that was
+ directly output as a constructor,
+ we must update constructor_unfilled_index. */
+ {
+ TREE_INT_CST_LOW (constructor_unfilled_index)
+ = TREE_INT_CST_LOW (constructor_index);
+ TREE_INT_CST_HIGH (constructor_unfilled_index)
+ = TREE_INT_CST_HIGH (constructor_index);
+ }
+ }
+ while (! (constructor_range_end == 0
+ || tree_int_cst_lt (constructor_range_end,
+ constructor_index)));
+
+ break;
+ }
+
+ /* Handle the sole element allowed in a braced initializer
+ for a scalar variable. */
+ if (constructor_fields == 0)
+ {
+ pedwarn_init ("excess elements in scalar initializer%s",
+ " after `%s'", NULL_PTR);
+ break;
+ }
+
+ if (value)
+ output_init_element (value, constructor_type, NULL_TREE, 1);
+ constructor_fields = 0;
+ break;
+ }
+
+ /* If the (lexically) previous elments are not now saved,
+ we can discard the storage for them. */
+ if (constructor_incremental && constructor_pending_elts == 0 && value != 0)
+ clear_momentary ();
+}
+
+/* Expand an ASM statement with operands, handling output operands
+ that are not variables or INDIRECT_REFS by transforming such
+ cases into cases that expand_asm_operands can handle.
+
+ Arguments are same as for expand_asm_operands. */
+
+void
+c_expand_asm_operands (string, outputs, inputs, clobbers, vol, filename, line)
+ tree string, outputs, inputs, clobbers;
+ int vol;
+ char *filename;
+ int line;
+{
+ int noutputs = list_length (outputs);
+ register int i;
+ /* o[I] is the place that output number I should be written. */
+ register tree *o = (tree *) alloca (noutputs * sizeof (tree));
+ register tree tail;
+
+ if (TREE_CODE (string) == ADDR_EXPR)
+ string = TREE_OPERAND (string, 0);
+ if (TREE_CODE (string) != STRING_CST)
+ {
+ error ("asm template is not a string constant");
+ return;
+ }
+
+ /* Record the contents of OUTPUTS before it is modified. */
+ for (i = 0, tail = outputs; tail; tail = TREE_CHAIN (tail), i++)
+ o[i] = TREE_VALUE (tail);
+
+ /* Perform default conversions on array and function inputs. */
+ /* Don't do this for other types--
+ it would screw up operands expected to be in memory. */
+ for (i = 0, tail = inputs; tail; tail = TREE_CHAIN (tail), i++)
+ if (TREE_CODE (TREE_TYPE (TREE_VALUE (tail))) == ARRAY_TYPE
+ || TREE_CODE (TREE_TYPE (TREE_VALUE (tail))) == FUNCTION_TYPE)
+ TREE_VALUE (tail) = default_conversion (TREE_VALUE (tail));
+
+ /* Generate the ASM_OPERANDS insn;
+ store into the TREE_VALUEs of OUTPUTS some trees for
+ where the values were actually stored. */
+ expand_asm_operands (string, outputs, inputs, clobbers, vol, filename, line);
+
+ /* Copy all the intermediate outputs into the specified outputs. */
+ for (i = 0, tail = outputs; tail; tail = TREE_CHAIN (tail), i++)
+ {
+ if (o[i] != TREE_VALUE (tail))
+ {
+ expand_expr (build_modify_expr (o[i], NOP_EXPR, TREE_VALUE (tail)),
+ 0, VOIDmode, 0);
+ free_temp_slots ();
+ }
+ /* Detect modification of read-only values.
+ (Otherwise done by build_modify_expr.) */
+ else
+ {
+ tree type = TREE_TYPE (o[i]);
+ if (TYPE_READONLY (type)
+ || ((TREE_CODE (type) == RECORD_TYPE
+ || TREE_CODE (type) == UNION_TYPE)
+ && C_TYPE_FIELDS_READONLY (type)))
+ readonly_warning (o[i], "modification by `asm'");
+ }
+ }
+
+ /* Those MODIFY_EXPRs could do autoincrements. */
+ emit_queue ();
+}
+
+/* Expand a C `return' statement.
+ RETVAL is the expression for what to return,
+ or a null pointer for `return;' with no value. */
+
+void
+c_expand_return (retval)
+ tree retval;
+{
+ tree valtype = TREE_TYPE (TREE_TYPE (current_function_decl));
+
+ if (TREE_THIS_VOLATILE (current_function_decl))
+ warning ("function declared `noreturn' has a `return' statement");
+
+ if (!retval)
+ {
+ current_function_returns_null = 1;
+ if (warn_return_type && valtype != 0 && TREE_CODE (valtype) != VOID_TYPE)
+ warning ("`return' with no value, in function returning non-void");
+ expand_null_return ();
+ }
+ else if (valtype == 0 || TREE_CODE (valtype) == VOID_TYPE)
+ {
+ current_function_returns_null = 1;
+ if (pedantic || TREE_CODE (TREE_TYPE (retval)) != VOID_TYPE)
+ pedwarn ("`return' with a value, in function returning void");
+ expand_return (retval);
+ }
+ else
+ {
+ tree t = convert_for_assignment (valtype, retval, "return",
+ NULL_TREE, NULL_TREE, 0);
+ tree res = DECL_RESULT (current_function_decl);
+ tree inner;
+
+ if (t == error_mark_node)
+ return;
+
+ inner = t = convert (TREE_TYPE (res), t);
+
+ /* Strip any conversions, additions, and subtractions, and see if
+ we are returning the address of a local variable. Warn if so. */
+ while (TREE_CODE (inner) == NOP_EXPR
+ || TREE_CODE (inner) == NON_LVALUE_EXPR
+ || TREE_CODE (inner) == CONVERT_EXPR
+ || TREE_CODE (inner) == PLUS_EXPR
+ || TREE_CODE (inner) == MINUS_EXPR)
+ inner = TREE_OPERAND (inner, 0);
+
+ if (TREE_CODE (inner) == ADDR_EXPR)
+ {
+ inner = TREE_OPERAND (inner, 0);
+
+ while (TREE_CODE_CLASS (TREE_CODE (inner)) == 'r')
+ inner = TREE_OPERAND (inner, 0);
+
+ if (TREE_CODE (inner) == VAR_DECL
+ && ! DECL_EXTERNAL (inner)
+ && ! TREE_STATIC (inner)
+ && DECL_CONTEXT (inner) == current_function_decl)
+ warning ("function returns address of local variable");
+ }
+
+ t = build (MODIFY_EXPR, TREE_TYPE (res), res, t);
+ TREE_SIDE_EFFECTS (t) = 1;
+ expand_return (t);
+ current_function_returns_value = 1;
+ }
+}
+
+/* Start a C switch statement, testing expression EXP.
+ Return EXP if it is valid, an error node otherwise. */
+
+tree
+c_expand_start_case (exp)
+ tree exp;
+{
+ register enum tree_code code = TREE_CODE (TREE_TYPE (exp));
+ tree type = TREE_TYPE (exp);
+
+ if (code != INTEGER_TYPE && code != ENUMERAL_TYPE && code != ERROR_MARK)
+ {
+ error ("switch quantity not an integer");
+ exp = error_mark_node;
+ }
+ else
+ {
+ tree index;
+ type = TYPE_MAIN_VARIANT (TREE_TYPE (exp));
+
+ if (warn_traditional
+ && (type == long_integer_type_node
+ || type == long_unsigned_type_node))
+ pedwarn ("`long' switch expression not converted to `int' in ANSI C");
+
+ exp = default_conversion (exp);
+ type = TREE_TYPE (exp);
+ index = get_unwidened (exp, NULL_TREE);
+ /* We can't strip a conversion from a signed type to an unsigned,
+ because if we did, int_fits_type_p would do the wrong thing
+ when checking case values for being in range,
+ and it's too hard to do the right thing. */
+ if (TREE_UNSIGNED (TREE_TYPE (exp))
+ == TREE_UNSIGNED (TREE_TYPE (index)))
+ exp = index;
+ }
+
+ expand_start_case (1, exp, type, "switch statement");
+
+ return exp;
+}
diff --git a/gnu/usr.bin/cc/cc1plus/Makefile b/gnu/usr.bin/cc/cc1plus/Makefile
new file mode 100644
index 0000000..a03330d
--- /dev/null
+++ b/gnu/usr.bin/cc/cc1plus/Makefile
@@ -0,0 +1,13 @@
+#
+# $FreeBSD$
+#
+
+PROG = cc1plus
+SRCS = call.c class.c cvt.c decl.c decl2.c edsel.c errfn.c error.c except.c expr.c gc.c init.c lex.c method.c parse.c pt.c ptree.c search.c sig.c spew.c tree.c typeck.c typeck2.c xref.c
+BINDIR= /usr/libexec
+NOMAN= 1
+LDDESTDIR+= -L${.CURDIR}/../cc_int/obj
+LDDESTDIR+= -L${.CURDIR}/../cc_int
+LDADD+= -lcc_int
+
+.include <bsd.prog.mk>
diff --git a/gnu/usr.bin/cc/cc1plus/call.c b/gnu/usr.bin/cc/cc1plus/call.c
new file mode 100644
index 0000000..3392a7a
--- /dev/null
+++ b/gnu/usr.bin/cc/cc1plus/call.c
@@ -0,0 +1,2909 @@
+/* Functions related to invoking methods and overloaded functions.
+ Copyright (C) 1987, 1992, 1993 Free Software Foundation, Inc.
+ Contributed by Michael Tiemann (tiemann@cygnus.com) and
+ hacked by Brendan Kehoe (brendan@cygnus.com).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+/* High-level class interface. */
+
+#include "config.h"
+#include "tree.h"
+#include <stdio.h>
+#include "cp-tree.h"
+#include "class.h"
+#include "flags.h"
+
+#include "obstack.h"
+#define obstack_chunk_alloc xmalloc
+#define obstack_chunk_free free
+
+extern void sorry ();
+
+extern int inhibit_warnings;
+extern int flag_assume_nonnull_objects;
+extern tree ctor_label, dtor_label;
+
+/* From typeck.c: */
+extern tree unary_complex_lvalue ();
+
+/* Compute the ease with which a conversion can be performed
+ between an expected and the given type. */
+static struct harshness_code convert_harshness ();
+
+#define EVIL_RETURN(ARG) ((ARG).code = EVIL_CODE, (ARG))
+#define QUAL_RETURN(ARG) ((ARG).code = QUAL_CODE, (ARG))
+#define TRIVIAL_RETURN(ARG) ((ARG).code = TRIVIAL_CODE, (ARG))
+#define ZERO_RETURN(ARG) ((ARG).code = 0, (ARG))
+
+/* Ordering function for overload resolution. Compare two candidates
+ by gross quality. */
+int
+rank_for_overload (x, y)
+ struct candidate *x, *y;
+{
+ if (y->h.code & (EVIL_CODE|ELLIPSIS_CODE|USER_CODE))
+ return y->h.code - x->h.code;
+ if (x->h.code & (EVIL_CODE|ELLIPSIS_CODE|USER_CODE))
+ return -1;
+
+ /* This is set by compute_conversion_costs, for calling a non-const
+ member function from a const member function. */
+ if ((y->harshness[0].code & CONST_CODE) ^ (x->harshness[0].code & CONST_CODE))
+ return y->harshness[0].code - x->harshness[0].code;
+
+ if (y->h.code & STD_CODE)
+ {
+ if (x->h.code & STD_CODE)
+ return y->h.distance - x->h.distance;
+ return 1;
+ }
+ if (x->h.code & STD_CODE)
+ return -1;
+
+ return y->h.code - x->h.code;
+}
+
+/* Compare two candidates, argument by argument. */
+int
+rank_for_ideal (x, y)
+ struct candidate *x, *y;
+{
+ int i;
+
+ if (x->h_len != y->h_len)
+ abort ();
+
+ for (i = 0; i < x->h_len; i++)
+ {
+ if (y->harshness[i].code - x->harshness[i].code)
+ return y->harshness[i].code - x->harshness[i].code;
+ if ((y->harshness[i].code & STD_CODE)
+ && (y->harshness[i].distance - x->harshness[i].distance))
+ return y->harshness[i].distance - x->harshness[i].distance;
+
+ /* They're both the same code. Now see if we're dealing with an
+ integral promotion that needs a finer grain of accuracy. */
+ if (y->harshness[0].code & PROMO_CODE
+ && (y->harshness[i].int_penalty ^ x->harshness[i].int_penalty))
+ return y->harshness[i].int_penalty - x->harshness[i].int_penalty;
+ }
+ return 0;
+}
+
+/* TYPE is the type we wish to convert to. PARM is the parameter
+ we have to work with. We use a somewhat arbitrary cost function
+ to measure this conversion. */
+static struct harshness_code
+convert_harshness (type, parmtype, parm)
+ register tree type, parmtype;
+ tree parm;
+{
+ struct harshness_code h;
+ register enum tree_code codel;
+ register enum tree_code coder;
+
+ h.code = 0;
+ h.distance = 0;
+ h.int_penalty = 0;
+
+#ifdef GATHER_STATISTICS
+ n_convert_harshness++;
+#endif
+
+ if (TYPE_PTRMEMFUNC_P (type))
+ type = TYPE_PTRMEMFUNC_FN_TYPE (type);
+ if (TYPE_PTRMEMFUNC_P (parmtype))
+ parmtype = TYPE_PTRMEMFUNC_FN_TYPE (parmtype);
+
+ if (TREE_CODE (parmtype) == REFERENCE_TYPE)
+ {
+ if (parm)
+ parm = convert_from_reference (parm);
+ parmtype = TREE_TYPE (parmtype);
+ }
+
+ codel = TREE_CODE (type);
+ coder = TREE_CODE (parmtype);
+
+ if (TYPE_MAIN_VARIANT (parmtype) == TYPE_MAIN_VARIANT (type))
+ return ZERO_RETURN (h);
+
+ if (coder == ERROR_MARK)
+ return EVIL_RETURN (h);
+
+ if (codel == POINTER_TYPE && fntype_p (parmtype))
+ {
+ tree p1, p2;
+ struct harshness_code h1, h2;
+
+ /* Get to the METHOD_TYPE or FUNCTION_TYPE that this might be. */
+ type = TREE_TYPE (type);
+
+ if (coder == POINTER_TYPE)
+ {
+ parmtype = TREE_TYPE (parmtype);
+ coder = TREE_CODE (parmtype);
+ }
+
+ if (coder != TREE_CODE (type))
+ return EVIL_RETURN (h);
+
+ /* We allow the default conversion between function type
+ and pointer-to-function type for free. */
+ if (type == parmtype)
+ return ZERO_RETURN (h);
+
+ /* Compare return types. */
+ p1 = TREE_TYPE (type);
+ p2 = TREE_TYPE (parmtype);
+ h2 = convert_harshness (p1, p2, NULL_TREE);
+ if (h2.code & EVIL_CODE)
+ return h2;
+
+ h1.code = TRIVIAL_CODE;
+ h1.distance = 0;
+
+ if (h2.distance != 0)
+ {
+ tree binfo;
+
+ /* This only works for pointers. */
+ if (TREE_CODE (p1) != POINTER_TYPE
+ && TREE_CODE (p1) != REFERENCE_TYPE)
+ return EVIL_RETURN (h);
+
+ p1 = TREE_TYPE (p1);
+ p2 = TREE_TYPE (p2);
+ /* Don't die if we happen to be dealing with void*. */
+ if (!IS_AGGR_TYPE (p1) || !IS_AGGR_TYPE (p2))
+ return EVIL_RETURN (h);
+ if (h2.distance < 0)
+ binfo = get_binfo (p2, p1, 0);
+ else
+ binfo = get_binfo (p1, p2, 0);
+
+ if (! BINFO_OFFSET_ZEROP (binfo))
+ {
+ static int explained = 0;
+ if (h2.distance < 0)
+ message_2_types (sorry, "cannot cast `%d' to `%d' at function call site", p2, p1);
+ else
+ message_2_types (sorry, "cannot cast `%d' to `%d' at function call site", p1, p2);
+
+ if (! explained++)
+ sorry ("(because pointer values change during conversion)");
+ return EVIL_RETURN (h);
+ }
+ }
+
+ h1.code |= h2.code;
+ if (h2.distance > h1.distance)
+ h1.distance = h2.distance;
+
+ p1 = TYPE_ARG_TYPES (type);
+ p2 = TYPE_ARG_TYPES (parmtype);
+ while (p1 && TREE_VALUE (p1) != void_type_node
+ && p2 && TREE_VALUE (p2) != void_type_node)
+ {
+ h2 = convert_harshness (TREE_VALUE (p1), TREE_VALUE (p2),
+ NULL_TREE);
+ if (h2.code & EVIL_CODE)
+ return h2;
+
+ if (h2.distance)
+ {
+ /* This only works for pointers and references. */
+ if (TREE_CODE (TREE_VALUE (p1)) != POINTER_TYPE
+ && TREE_CODE (TREE_VALUE (p1)) != REFERENCE_TYPE)
+ return EVIL_RETURN (h);
+ h2.distance = - h2.distance;
+ }
+
+ h1.code |= h2.code;
+ if (h2.distance > h1.distance)
+ h1.distance = h2.distance;
+ p1 = TREE_CHAIN (p1);
+ p2 = TREE_CHAIN (p2);
+ }
+ if (p1 == p2)
+ return h1;
+ if (p2)
+ {
+ if (p1)
+ return EVIL_RETURN (h);
+ h1.code |= ELLIPSIS_CODE;
+ return h1;
+ }
+ if (p1)
+ {
+ if (TREE_PURPOSE (p1) == NULL_TREE)
+ h1.code |= EVIL_CODE;
+ return h1;
+ }
+ }
+ else if (codel == POINTER_TYPE && coder == OFFSET_TYPE)
+ {
+ /* Get to the OFFSET_TYPE that this might be. */
+ type = TREE_TYPE (type);
+
+ if (coder != TREE_CODE (type))
+ return EVIL_RETURN (h);
+
+ if (TYPE_OFFSET_BASETYPE (type) == TYPE_OFFSET_BASETYPE (parmtype))
+ h.code = 0;
+ else if (UNIQUELY_DERIVED_FROM_P (TYPE_OFFSET_BASETYPE (type),
+ TYPE_OFFSET_BASETYPE (parmtype)))
+ {
+ h.code = STD_CODE;
+ h.distance = 1;
+ }
+ else if (UNIQUELY_DERIVED_FROM_P (TYPE_OFFSET_BASETYPE (parmtype),
+ TYPE_OFFSET_BASETYPE (type)))
+ {
+ h.code = STD_CODE;
+ h.distance = -1;
+ }
+ else
+ return EVIL_RETURN (h);
+ /* Now test the OFFSET_TYPE's target compatibility. */
+ type = TREE_TYPE (type);
+ parmtype = TREE_TYPE (parmtype);
+ }
+
+ if (coder == UNKNOWN_TYPE)
+ {
+ if (codel == FUNCTION_TYPE
+ || codel == METHOD_TYPE
+ || (codel == POINTER_TYPE
+ && (TREE_CODE (TREE_TYPE (type)) == FUNCTION_TYPE
+ || TREE_CODE (TREE_TYPE (type)) == METHOD_TYPE)))
+ return TRIVIAL_RETURN (h);
+ return EVIL_RETURN (h);
+ }
+
+ if (coder == VOID_TYPE)
+ return EVIL_RETURN (h);
+
+ if (INTEGRAL_CODE_P (codel))
+ {
+ /* Control equivalence of ints an enums. */
+
+ if (codel == ENUMERAL_TYPE
+ && flag_int_enum_equivalence == 0)
+ {
+ /* Enums can be converted to ints, but not vice-versa. */
+ if (coder != ENUMERAL_TYPE
+ || TYPE_MAIN_VARIANT (type) != TYPE_MAIN_VARIANT (parmtype))
+ return EVIL_RETURN (h);
+ }
+
+ /* else enums and ints (almost) freely interconvert. */
+
+ if (INTEGRAL_CODE_P (coder))
+ {
+ if (TYPE_MAIN_VARIANT (type)
+ == TYPE_MAIN_VARIANT (type_promotes_to (parmtype)))
+ {
+ h.code = PROMO_CODE;
+#if 0 /* What purpose does this serve? -jason */
+ /* A char, short, wchar_t, etc., should promote to an int if
+ it can handle it, otherwise to an unsigned. So we'll make
+ an unsigned. */
+ if (type != integer_type_node)
+ h.int_penalty = 1;
+#endif
+ }
+ else
+ h.code = STD_CODE;
+
+ return h;
+ }
+ else if (coder == REAL_TYPE)
+ {
+ h.code = STD_CODE;
+ h.distance = 0;
+ return h;
+ }
+ }
+
+ if (codel == REAL_TYPE)
+ {
+ if (coder == REAL_TYPE)
+ {
+ if (TYPE_MAIN_VARIANT (type)
+ == TYPE_MAIN_VARIANT (type_promotes_to (parmtype)))
+ h.code = PROMO_CODE;
+ else
+ h.code = STD_CODE;
+
+ return h;
+ }
+ else if (INTEGRAL_CODE_P (coder))
+ {
+ h.code = STD_CODE;
+ h.distance = 0;
+ return h;
+ }
+ }
+
+ /* Convert arrays which have not previously been converted. */
+ if (codel == ARRAY_TYPE)
+ codel = POINTER_TYPE;
+ if (coder == ARRAY_TYPE)
+ coder = POINTER_TYPE;
+
+ /* Conversions among pointers */
+ if (codel == POINTER_TYPE && coder == POINTER_TYPE)
+ {
+ register tree ttl = TYPE_MAIN_VARIANT (TREE_TYPE (type));
+ register tree ttr = TYPE_MAIN_VARIANT (TREE_TYPE (parmtype));
+ int penalty = 4 * (ttl != ttr);
+
+ /* Anything converts to void *. void * converts to anything.
+ Since these may be `const void *' (etc.) use VOID_TYPE
+ instead of void_type_node. Otherwise, the targets must be the same,
+ except that we do allow (at some cost) conversion between signed and
+ unsigned pointer types. */
+
+ if ((TREE_CODE (ttl) == METHOD_TYPE
+ || TREE_CODE (ttl) == FUNCTION_TYPE)
+ && TREE_CODE (ttl) == TREE_CODE (ttr))
+ {
+ if (comptypes (ttl, ttr, -1))
+ {
+ h.code = penalty ? STD_CODE : 0;
+ h.distance = 0;
+ }
+ else
+ h.code = EVIL_CODE;
+ return h;
+ }
+
+#if 1
+ if (TREE_CODE (ttl) != VOID_TYPE && TREE_CODE (ttr) != VOID_TYPE)
+ {
+ if (TREE_UNSIGNED (ttl) != TREE_UNSIGNED (ttr))
+ {
+ ttl = unsigned_type (ttl);
+ ttr = unsigned_type (ttr);
+ penalty = 10;
+ }
+ if (! comp_target_types (ttl, ttr, 0))
+ return EVIL_RETURN (h);
+ }
+#else
+ if (!(TREE_CODE (ttl) == VOID_TYPE
+ || TREE_CODE (ttr) == VOID_TYPE
+ || (TREE_UNSIGNED (ttl) ^ TREE_UNSIGNED (ttr)
+ && (ttl = unsigned_type (ttl),
+ ttr = unsigned_type (ttr),
+ penalty = 10, 0))
+ || (comp_target_types (ttl, ttr, 0))))
+ return EVIL_RETURN (h);
+#endif
+
+ if (penalty == 10 || ttr == ttl)
+ {
+ tree tmp1 = TREE_TYPE (type), tmp2 = TREE_TYPE (parmtype);
+
+ /* If one was unsigned but the other wasn't, then we need to
+ do a standard conversion from T to unsigned T. */
+ if (penalty == 10)
+ h.code = PROMO_CODE; /* was STD_CODE */
+ else
+ h.code = 0;
+
+ /* Note conversion from `T*' to `const T*',
+ or `T*' to `volatile T*'. */
+ if (ttl == ttr
+ && ((TYPE_READONLY (tmp1) != TREE_READONLY (tmp2))
+ || (TYPE_VOLATILE (tmp1) != TYPE_VOLATILE (tmp2))))
+ h.code |= QUAL_CODE;
+
+ h.distance = 0;
+ return h;
+ }
+
+
+ if (TREE_CODE (ttl) == RECORD_TYPE && TREE_CODE (ttr) == RECORD_TYPE)
+ {
+ int b_or_d = get_base_distance (ttl, ttr, 0, 0);
+ if (b_or_d < 0)
+ {
+ b_or_d = get_base_distance (ttr, ttl, 0, 0);
+ if (b_or_d < 0)
+ return EVIL_RETURN (h);
+ h.distance = -b_or_d;
+ }
+ else
+ h.distance = b_or_d;
+ h.code = STD_CODE;
+ return h;
+ }
+
+ /* If converting from a `class*' to a `void*', make it
+ less favorable than any inheritance relationship. */
+ if (TREE_CODE (ttl) == VOID_TYPE && IS_AGGR_TYPE (ttr))
+ {
+ h.code = STD_CODE;
+ h.distance = CLASSTYPE_MAX_DEPTH (ttr)+1;
+ return h;
+ }
+ h.code = penalty ? STD_CODE : PROMO_CODE;
+ return h;
+ }
+
+ if (codel == POINTER_TYPE && coder == INTEGER_TYPE)
+ {
+ /* This is not a bad match, but don't let it beat
+ integer-enum combinations. */
+ if (parm && integer_zerop (parm))
+ {
+ h.code = STD_CODE;
+ h.distance = 0;
+ return h;
+ }
+ }
+
+ /* C++: Since the `this' parameter of a signature member function
+ is represented as a signature pointer to handle default implementations
+ correctly, we can have the case that `type' is a signature pointer
+ while `parmtype' is a pointer to a signature table. We don't really
+ do any conversions in this case, so just return 0. */
+
+ if (codel == RECORD_TYPE && coder == POINTER_TYPE
+ && IS_SIGNATURE_POINTER (type) && IS_SIGNATURE (TREE_TYPE (parmtype)))
+ return ZERO_RETURN (h);
+
+ if (codel == REFERENCE_TYPE)
+ {
+ tree ttl, ttr;
+ int constp = parm ? TREE_READONLY (parm) : TYPE_READONLY (parmtype);
+ int volatilep = (parm ? TREE_THIS_VOLATILE (parm)
+ : TYPE_VOLATILE (parmtype));
+ register tree intype = TYPE_MAIN_VARIANT (parmtype);
+ register enum tree_code form = TREE_CODE (intype);
+ int penalty = 0;
+
+ ttl = TREE_TYPE (type);
+
+ /* When passing a non-const argument into a const reference (or vice
+ versa), dig it a little, so a non-const reference is preferred
+ over this one. (mrs) */
+ if (TYPE_READONLY (ttl) != constp
+ || TYPE_VOLATILE (ttl) != volatilep)
+ penalty = 2;
+ else
+ penalty = 0;
+
+ ttl = TYPE_MAIN_VARIANT (ttl);
+
+ if (form == OFFSET_TYPE)
+ {
+ intype = TREE_TYPE (intype);
+ form = TREE_CODE (intype);
+ }
+
+ if (ttl == intype && penalty == 0)
+ return ZERO_RETURN (h);
+ else
+ penalty = 2;
+
+ if (TREE_UNSIGNED (ttl) ^ TREE_UNSIGNED (intype))
+ {
+ ttl = unsigned_type (ttl);
+ intype = unsigned_type (intype);
+ penalty += 2;
+ }
+
+ ttr = intype;
+
+ /* If the initializer is not an lvalue, then it does not
+ matter if we make life easier for the programmer
+ by creating a temporary variable with which to
+ hold the result. */
+ if (parm && (INTEGRAL_CODE_P (coder)
+ || coder == REAL_TYPE)
+ && ! lvalue_p (parm))
+ {
+ h = convert_harshness (ttl, ttr, NULL_TREE);
+ if (penalty > 2 || h.code != 0)
+ h.code |= STD_CODE;
+ else
+ h.code |= TRIVIAL_CODE;
+ h.distance = 0;
+ return h;
+ }
+
+ if (ttl == ttr)
+ {
+ if (penalty > 2)
+ {
+ h.code = STD_CODE;
+ h.distance = 0;
+ }
+ else
+ {
+ h.code = TRIVIAL_CODE;
+ /* We set this here so that build_overload_call_real will be
+ able to see the penalty we found, rather than just looking
+ at a TRIVIAL_CODE with no other information. */
+ h.int_penalty = penalty;
+ }
+ return h;
+ }
+
+ /* Pointers to voids always convert for pointers. But
+ make them less natural than more specific matches. */
+ if (TREE_CODE (ttl) == POINTER_TYPE && TREE_CODE (ttr) == POINTER_TYPE)
+ {
+ if (TREE_TYPE (ttl) == void_type_node
+ || TREE_TYPE (ttr) == void_type_node)
+ {
+ h.code = STD_CODE;
+ h.distance = 0;
+ return h;
+ }
+ }
+
+ /* Here it does matter. If this conversion is from derived to base,
+ allow it. Otherwise, types must be compatible in the strong sense. */
+ if (TREE_CODE (ttl) == RECORD_TYPE && TREE_CODE (ttr) == RECORD_TYPE)
+ {
+ int b_or_d = get_base_distance (ttl, ttr, 0, 0);
+ if (b_or_d < 0)
+ {
+ b_or_d = get_base_distance (ttr, ttl, 0, 0);
+ if (b_or_d < 0)
+ return EVIL_RETURN (h);
+ h.distance = -b_or_d;
+ }
+ /* Say that this conversion is relatively painless.
+ If it turns out that there is a user-defined X(X&)
+ constructor, then that will be invoked, but that's
+ preferable to dealing with other user-defined conversions
+ that may produce surprising results. */
+ else
+ h.distance = b_or_d;
+ h.code = STD_CODE;
+ return h;
+ }
+
+ if (comp_target_types (ttl, intype, 1))
+ {
+ if (penalty)
+ h.code = STD_CODE;
+ h.distance = 0;
+ return h;
+ }
+ }
+ if (codel == RECORD_TYPE && coder == RECORD_TYPE)
+ {
+ int b_or_d = get_base_distance (type, parmtype, 0, 0);
+ if (b_or_d < 0)
+ {
+ b_or_d = get_base_distance (parmtype, type, 0, 0);
+ if (b_or_d < 0)
+ return EVIL_RETURN (h);
+ h.distance = -b_or_d;
+ }
+ else
+ h.distance = b_or_d;
+ h.code = STD_CODE;
+ return h;
+ }
+ return EVIL_RETURN (h);
+}
+
+#ifdef DEBUG_MATCHING
+static char *
+print_harshness (h)
+ struct harshness_code *h;
+{
+ static char buf[1024];
+ char tmp[1024];
+
+ bzero (buf, 1024 * sizeof (char));
+ strcat (buf, "codes=[");
+ if (h->code & EVIL_CODE)
+ strcat (buf, "EVIL");
+ if (h->code & CONST_CODE)
+ strcat (buf, " CONST");
+ if (h->code & ELLIPSIS_CODE)
+ strcat (buf, " ELLIPSIS");
+ if (h->code & USER_CODE)
+ strcat (buf, " USER");
+ if (h->code & STD_CODE)
+ strcat (buf, " STD");
+ if (h->code & PROMO_CODE)
+ strcat (buf, " PROMO");
+ if (h->code & QUAL_CODE)
+ strcat (buf, " QUAL");
+ if (h->code & TRIVIAL_CODE)
+ strcat (buf, " TRIVIAL");
+ if (buf[0] == '\0')
+ strcat (buf, "0");
+
+ sprintf (tmp, "] distance=%d int_penalty=%d", h->distance, h->int_penalty);
+
+ strcat (buf, tmp);
+
+ return buf;
+}
+#endif
+
+/* Algorithm: For each argument, calculate how difficult it is to
+ make FUNCTION accept that argument. If we can easily tell that
+ FUNCTION won't be acceptable to one of the arguments, then we
+ don't need to compute the ease of converting the other arguments,
+ since it will never show up in the intersection of all arguments'
+ favorite functions.
+
+ Conversions between builtin and user-defined types are allowed, but
+ no function involving such a conversion is preferred to one which
+ does not require such a conversion. Furthermore, such conversions
+ must be unique. */
+
+void
+compute_conversion_costs (function, tta_in, cp, arglen)
+ tree function;
+ tree tta_in;
+ struct candidate *cp;
+ int arglen;
+{
+ tree ttf_in = TYPE_ARG_TYPES (TREE_TYPE (function));
+ tree ttf = ttf_in;
+ tree tta = tta_in;
+
+ /* Start out with no strikes against. */
+ int evil_strikes = 0;
+ int ellipsis_strikes = 0;
+ int user_strikes = 0;
+ int b_or_d_strikes = 0;
+ int easy_strikes = 0;
+
+ int strike_index = 0, win;
+ struct harshness_code lose;
+
+#ifdef GATHER_STATISTICS
+ n_compute_conversion_costs++;
+#endif
+
+ cp->function = function;
+ cp->arg = tta ? TREE_VALUE (tta) : NULL_TREE;
+ cp->u.bad_arg = 0; /* optimistic! */
+
+ cp->h.code = 0;
+ cp->h.distance = 0;
+ cp->h.int_penalty = 0;
+ bzero (cp->harshness,
+ (cp->h_len + 1) * sizeof (struct harshness_code));
+
+ while (ttf && tta)
+ {
+ struct harshness_code h;
+
+ if (ttf == void_list_node)
+ break;
+
+ if (type_unknown_p (TREE_VALUE (tta)))
+ {
+ /* Must perform some instantiation here. */
+ tree rhs = TREE_VALUE (tta);
+ tree lhstype = TREE_VALUE (ttf);
+
+ /* Keep quiet about possible contravariance violations. */
+ int old_inhibit_warnings = inhibit_warnings;
+ inhibit_warnings = 1;
+
+ /* @@ This is to undo what `grokdeclarator' does to
+ parameter types. It really should go through
+ something more general. */
+
+ TREE_TYPE (tta) = unknown_type_node;
+ rhs = instantiate_type (lhstype, rhs, 0);
+ inhibit_warnings = old_inhibit_warnings;
+
+ if (TREE_CODE (rhs) == ERROR_MARK)
+ h.code = EVIL_CODE;
+ else
+ h = convert_harshness (lhstype, TREE_TYPE (rhs), rhs);
+ }
+ else
+ {
+#ifdef DEBUG_MATCHING
+ static tree old_function = NULL_TREE;
+
+ if (!old_function || function != old_function)
+ {
+ cp_error ("trying %D", function);
+ old_function = function;
+ }
+
+ cp_error (" doing (%T) %E against arg %T",
+ TREE_TYPE (TREE_VALUE (tta)), TREE_VALUE (tta),
+ TREE_VALUE (ttf));
+#endif
+
+ h = convert_harshness (TREE_VALUE (ttf),
+ TREE_TYPE (TREE_VALUE (tta)),
+ TREE_VALUE (tta));
+
+#ifdef DEBUG_MATCHING
+ cp_error (" evaluated %s", print_harshness (&h));
+#endif
+ }
+
+ cp->harshness[strike_index] = h;
+ if ((h.code & EVIL_CODE)
+ || ((h.code & STD_CODE) && h.distance < 0))
+ {
+ cp->u.bad_arg = strike_index;
+ evil_strikes = 1;
+ }
+ else if (h.code & ELLIPSIS_CODE)
+ ellipsis_strikes += 1;
+#if 0
+ /* This is never set by `convert_harshness'. */
+ else if (h.code & USER_CODE)
+ {
+ user_strikes += 1;
+ }
+#endif
+ else
+ {
+ if ((h.code & STD_CODE) && h.distance)
+ {
+ if (h.distance > b_or_d_strikes)
+ b_or_d_strikes = h.distance;
+ }
+ else
+ easy_strikes += (h.code & (STD_CODE|PROMO_CODE|TRIVIAL_CODE));
+ cp->h.code |= h.code;
+ /* Make sure we communicate this. */
+ cp->h.int_penalty += h.int_penalty;
+ }
+
+ ttf = TREE_CHAIN (ttf);
+ tta = TREE_CHAIN (tta);
+ strike_index += 1;
+ }
+
+ if (tta)
+ {
+ /* ran out of formals, and parmlist is fixed size. */
+ if (ttf /* == void_type_node */)
+ {
+ cp->h.code = EVIL_CODE;
+ cp->u.bad_arg = -1;
+ return;
+ }
+ else
+ {
+ struct harshness_code h;
+ int l = list_length (tta);
+ ellipsis_strikes += l;
+ h.code = ELLIPSIS_CODE;
+ h.distance = 0;
+ h.int_penalty = 0;
+ for (; l; --l)
+ cp->harshness[strike_index++] = h;
+ }
+ }
+ else if (ttf && ttf != void_list_node)
+ {
+ /* ran out of actuals, and no defaults. */
+ if (TREE_PURPOSE (ttf) == NULL_TREE)
+ {
+ cp->h.code = EVIL_CODE;
+ cp->u.bad_arg = -2;
+ return;
+ }
+ /* Store index of first default. */
+ cp->harshness[arglen].distance = strike_index+1;
+ }
+ else
+ cp->harshness[arglen].distance = 0;
+
+ /* Argument list lengths work out, so don't need to check them again. */
+ if (evil_strikes)
+ {
+ /* We do not check for derived->base conversions here, since in
+ no case would they give evil strike counts, unless such conversions
+ are somehow ambiguous. */
+
+ /* See if any user-defined conversions apply.
+ But make sure that we do not loop. */
+ static int dont_convert_types = 0;
+
+ if (dont_convert_types)
+ {
+ cp->h.code = EVIL_CODE;
+ return;
+ }
+
+ win = 0; /* Only get one chance to win. */
+ ttf = TYPE_ARG_TYPES (TREE_TYPE (function));
+ tta = tta_in;
+ strike_index = 0;
+ evil_strikes = 0;
+
+ while (ttf && tta)
+ {
+ if (ttf == void_list_node)
+ break;
+
+ lose = cp->harshness[strike_index];
+ if ((lose.code & EVIL_CODE)
+ || ((lose.code & STD_CODE) && lose.distance < 0))
+ {
+ tree actual_type = TREE_TYPE (TREE_VALUE (tta));
+ tree formal_type = TREE_VALUE (ttf);
+ int extra_conversions = 0;
+
+ dont_convert_types = 1;
+
+ if (TREE_CODE (formal_type) == REFERENCE_TYPE)
+ formal_type = TREE_TYPE (formal_type);
+ if (TREE_CODE (actual_type) == REFERENCE_TYPE)
+ actual_type = TREE_TYPE (actual_type);
+
+ if (formal_type != error_mark_node
+ && actual_type != error_mark_node)
+ {
+ formal_type = TYPE_MAIN_VARIANT (formal_type);
+ actual_type = TYPE_MAIN_VARIANT (actual_type);
+
+ if (TYPE_HAS_CONSTRUCTOR (formal_type))
+ {
+ /* If it has a constructor for this type,
+ try to use it. */
+ /* @@ There is no way to save this result yet, so
+ success is a NULL_TREE for now. */
+ if (convert_to_aggr (formal_type, TREE_VALUE (tta), 0, 1)
+ != error_mark_node)
+ win++;
+ }
+ if (TYPE_LANG_SPECIFIC (actual_type)
+ && TYPE_HAS_CONVERSION (actual_type))
+ {
+ tree conv;
+ /* Don't issue warnings since we're only groping
+ around for the right answer, we haven't yet
+ committed to going with this solution. */
+ int old_inhibit_warnings = inhibit_warnings;
+
+ inhibit_warnings = 1;
+ conv = build_type_conversion
+ (CALL_EXPR, TREE_VALUE (ttf), TREE_VALUE (tta), 0);
+ inhibit_warnings = old_inhibit_warnings;
+
+ if (conv)
+ {
+ if (conv == error_mark_node)
+ win += 2;
+ else
+ {
+ win++;
+ if (TREE_CODE (conv) != CALL_EXPR)
+ extra_conversions = 1;
+ }
+ }
+ else if (TREE_CODE (TREE_VALUE (ttf)) == REFERENCE_TYPE)
+ {
+ conv = build_type_conversion (CALL_EXPR, formal_type,
+ TREE_VALUE (tta), 0);
+ if (conv)
+ {
+ if (conv == error_mark_node)
+ win += 2;
+ else
+ {
+ win++;
+ if (TREE_CODE (conv) != CALL_EXPR)
+ extra_conversions = 1;
+ }
+ }
+ }
+ }
+ }
+ dont_convert_types = 0;
+
+ if (win == 1)
+ {
+ user_strikes += 1;
+ cp->harshness[strike_index].code
+ = USER_CODE | (extra_conversions ? STD_CODE : 0);
+ win = 0;
+ }
+ else
+ {
+ if (cp->u.bad_arg > strike_index)
+ cp->u.bad_arg = strike_index;
+
+ evil_strikes = win ? 2 : 1;
+ break;
+ }
+ }
+
+ ttf = TREE_CHAIN (ttf);
+ tta = TREE_CHAIN (tta);
+ strike_index += 1;
+ }
+ }
+
+ /* Const member functions get a small penalty because defaulting
+ to const is less useful than defaulting to non-const. */
+ /* This is bogus, it does not correspond to anything in the ARM.
+ This code will be fixed when this entire section is rewritten
+ to conform to the ARM. (mrs) */
+ if (TREE_CODE (TREE_TYPE (function)) == METHOD_TYPE)
+ {
+ tree this_parm = TREE_VALUE (ttf_in);
+
+ if (TREE_CODE (this_parm) == RECORD_TYPE /* Is `this' a sig ptr? */
+ ? TYPE_READONLY (TREE_TYPE (TREE_TYPE (TYPE_FIELDS (this_parm))))
+ : TYPE_READONLY (TREE_TYPE (this_parm)))
+ {
+ cp->harshness[0].code |= TRIVIAL_CODE;
+ ++easy_strikes;
+ }
+ else
+ {
+ /* Calling a non-const member function from a const member function
+ is probably invalid, but for now we let it only draw a warning.
+ We indicate that such a mismatch has occurred by setting the
+ harshness to a maximum value. */
+ if (TREE_CODE (TREE_TYPE (TREE_VALUE (tta_in))) == POINTER_TYPE
+ && (TYPE_READONLY (TREE_TYPE (TREE_TYPE (TREE_VALUE (tta_in))))))
+ cp->harshness[0].code |= CONST_CODE;
+ }
+ }
+
+ if (evil_strikes)
+ cp->h.code = EVIL_CODE;
+ if (ellipsis_strikes)
+ cp->h.code |= ELLIPSIS_CODE;
+ if (user_strikes)
+ cp->h.code |= USER_CODE;
+#ifdef DEBUG_MATCHING
+ cp_error ("final eval %s", print_harshness (&cp->h));
+#endif
+}
+
+/* Subroutine of ideal_candidate. See if X or Y is a better match
+ than the other. */
+static int
+strictly_better (x, y)
+ unsigned short x, y;
+{
+ unsigned short xor;
+
+ if (x == y)
+ return 0;
+
+ xor = x ^ y;
+ if (xor >= x || xor >= y)
+ return 1;
+ return 0;
+}
+
+/* When one of several possible overloaded functions and/or methods
+ can be called, choose the best candidate for overloading.
+
+ BASETYPE is the context from which we start method resolution
+ or NULL if we are comparing overloaded functions.
+ CANDIDATES is the array of candidates we have to choose from.
+ N_CANDIDATES is the length of CANDIDATES.
+ PARMS is a TREE_LIST of parameters to the function we'll ultimately
+ choose. It is modified in place when resolving methods. It is not
+ modified in place when resolving overloaded functions.
+ LEN is the length of the parameter list. */
+
+static struct candidate *
+ideal_candidate (basetype, candidates, n_candidates, parms, len)
+ tree basetype;
+ struct candidate *candidates;
+ int n_candidates;
+ tree parms;
+ int len;
+{
+ struct candidate *cp = candidates+n_candidates;
+ int i, j = -1, best_code;
+
+ /* For each argument, sort the functions from best to worst for the arg.
+ For each function that's not best for this arg, set its overall
+ harshness to EVIL so that other args won't like it. The candidate
+ list for the last argument is the intersection of all the best-liked
+ functions. */
+
+#if 0
+ for (i = 0; i < len; i++)
+ {
+ qsort (candidates, n_candidates, sizeof (struct candidate),
+ rank_for_overload);
+ best_code = cp[-1].h.code;
+
+ /* To find out functions that are worse than that represented
+ by BEST_CODE, we can't just do a comparison like h.code>best_code.
+ The total harshness for the "best" fn may be 8|8 for two args, and
+ the harshness for the next-best may be 8|2. If we just compared,
+ that would be checking 8>10, which would lead to the next-best
+ being disqualified. What we actually want to do is get rid
+ of functions that are definitely worse than that represented
+ by best_code, i.e. those which have bits set higher than the
+ highest in best_code. Sooooo, what we do is clear out everything
+ represented by best_code, and see if we still come up with something
+ higher. If so (e.g., 8|8 vs 8|16), it'll disqualify it properly. */
+ for (j = n_candidates-2; j >= 0; j--)
+ if ((candidates[j].h.code & ~best_code) > best_code)
+ candidates[j].h.code = EVIL_CODE;
+ }
+
+ if (cp[-1].h.code & EVIL_CODE)
+ return NULL;
+#else
+ qsort (candidates, n_candidates, sizeof (struct candidate),
+ rank_for_overload);
+ best_code = cp[-1].h.code;
+#endif
+
+ /* If they're at least as good as each other, do an arg-by-arg check. */
+ if (! strictly_better (cp[-1].h.code, cp[-2].h.code))
+ {
+ int better = 0;
+ int worse = 0;
+
+ for (j = 0; j < n_candidates; j++)
+ if (! strictly_better (candidates[j].h.code, best_code))
+ break;
+
+ qsort (candidates+j, n_candidates-j, sizeof (struct candidate),
+ rank_for_ideal);
+ for (i = 0; i < len; i++)
+ {
+ if (cp[-1].harshness[i].code < cp[-2].harshness[i].code)
+ better = 1;
+ else if (cp[-1].harshness[i].code > cp[-2].harshness[i].code)
+ worse = 1;
+ else if (cp[-1].harshness[i].code & STD_CODE)
+ {
+ /* If it involves a standard conversion, let the
+ inheritance lattice be the final arbiter. */
+ if (cp[-1].harshness[i].distance > cp[-2].harshness[i].distance)
+ worse = 1;
+ else if (cp[-1].harshness[i].distance < cp[-2].harshness[i].distance)
+ better = 1;
+ }
+ else if (cp[-1].harshness[i].code & PROMO_CODE)
+ {
+ /* For integral promotions, take into account a finer
+ granularity for determining which types should be favored
+ over others in such promotions. */
+ if (cp[-1].harshness[i].int_penalty > cp[-2].harshness[i].int_penalty)
+ worse = 1;
+ else if (cp[-1].harshness[i].int_penalty < cp[-2].harshness[i].int_penalty)
+ better = 1;
+ }
+ }
+
+ if (! better || worse)
+ return NULL;
+ }
+ return cp-1;
+}
+
+/* Assume that if the class referred to is not in the
+ current class hierarchy, that it may be remote.
+ PARENT is assumed to be of aggregate type here. */
+static int
+may_be_remote (parent)
+ tree parent;
+{
+ if (TYPE_OVERLOADS_METHOD_CALL_EXPR (parent) == 0)
+ return 0;
+
+ if (current_class_type == NULL_TREE)
+ return 0;
+
+ if (parent == current_class_type)
+ return 0;
+
+ if (UNIQUELY_DERIVED_FROM_P (parent, current_class_type))
+ return 0;
+ return 1;
+}
+
+tree
+build_vfield_ref (datum, type)
+ tree datum, type;
+{
+ tree rval;
+ int old_assume_nonnull_objects = flag_assume_nonnull_objects;
+
+ if (datum == error_mark_node)
+ return error_mark_node;
+
+ /* Vtable references are always made from non-null objects. */
+ flag_assume_nonnull_objects = 1;
+ if (TREE_CODE (TREE_TYPE (datum)) == REFERENCE_TYPE)
+ datum = convert_from_reference (datum);
+
+ if (! TYPE_USES_COMPLEX_INHERITANCE (type))
+ rval = build (COMPONENT_REF, TREE_TYPE (CLASSTYPE_VFIELD (type)),
+ datum, CLASSTYPE_VFIELD (type));
+ else
+ rval = build_component_ref (datum, DECL_NAME (CLASSTYPE_VFIELD (type)), 0, 0);
+ flag_assume_nonnull_objects = old_assume_nonnull_objects;
+
+ return rval;
+}
+
+/* Build a call to a member of an object. I.e., one that overloads
+ operator ()(), or is a pointer-to-function or pointer-to-method. */
+static tree
+build_field_call (basetype_path, instance_ptr, name, parms)
+ tree basetype_path, instance_ptr, name, parms;
+{
+ tree field, instance;
+
+ if (instance_ptr == current_class_decl)
+ {
+ /* Check to see if we really have a reference to an instance variable
+ with `operator()()' overloaded. */
+ field = IDENTIFIER_CLASS_VALUE (name);
+
+ if (field == NULL_TREE)
+ {
+ cp_error ("`this' has no member named `%D'", name);
+ return error_mark_node;
+ }
+
+ if (TREE_CODE (field) == FIELD_DECL)
+ {
+ /* If it's a field, try overloading operator (),
+ or calling if the field is a pointer-to-function. */
+ instance = build_component_ref_1 (C_C_D, field, 0);
+ if (instance == error_mark_node)
+ return error_mark_node;
+
+ if (TYPE_LANG_SPECIFIC (TREE_TYPE (instance))
+ && TYPE_OVERLOADS_CALL_EXPR (TREE_TYPE (instance)))
+ return build_opfncall (CALL_EXPR, LOOKUP_NORMAL, instance, parms, NULL_TREE);
+
+ if (TREE_CODE (TREE_TYPE (instance)) == POINTER_TYPE)
+ {
+ if (TREE_CODE (TREE_TYPE (TREE_TYPE (instance))) == FUNCTION_TYPE)
+ return build_function_call (instance, parms);
+ else if (TREE_CODE (TREE_TYPE (TREE_TYPE (instance))) == METHOD_TYPE)
+ return build_function_call (instance, tree_cons (NULL_TREE, current_class_decl, parms));
+ }
+ }
+ return NULL_TREE;
+ }
+
+ /* Check to see if this is not really a reference to an instance variable
+ with `operator()()' overloaded. */
+ field = lookup_field (basetype_path, name, 1, 0);
+
+ /* This can happen if the reference was ambiguous or for access
+ violations. */
+ if (field == error_mark_node)
+ return error_mark_node;
+
+ if (field)
+ {
+ tree basetype;
+ tree ftype = TREE_TYPE (field);
+
+ if (TREE_CODE (ftype) == REFERENCE_TYPE)
+ ftype = TREE_TYPE (ftype);
+
+ if (TYPE_LANG_SPECIFIC (ftype) && TYPE_OVERLOADS_CALL_EXPR (ftype))
+ {
+ /* Make the next search for this field very short. */
+ basetype = DECL_FIELD_CONTEXT (field);
+ instance_ptr = convert_pointer_to (basetype, instance_ptr);
+
+ instance = build_indirect_ref (instance_ptr, NULL_PTR);
+ return build_opfncall (CALL_EXPR, LOOKUP_NORMAL,
+ build_component_ref_1 (instance, field, 0),
+ parms, NULL_TREE);
+ }
+ if (TREE_CODE (ftype) == POINTER_TYPE)
+ {
+ if (TREE_CODE (TREE_TYPE (ftype)) == FUNCTION_TYPE
+ || TREE_CODE (TREE_TYPE (ftype)) == METHOD_TYPE)
+ {
+ /* This is a member which is a pointer to function. */
+ tree ref
+ = build_component_ref_1 (build_indirect_ref (instance_ptr,
+ NULL_PTR),
+ field, LOOKUP_COMPLAIN);
+ if (ref == error_mark_node)
+ return error_mark_node;
+ return build_function_call (ref, parms);
+ }
+ }
+ else if (TREE_CODE (ftype) == METHOD_TYPE)
+ {
+ error ("invalid call via pointer-to-member function");
+ return error_mark_node;
+ }
+ else
+ return NULL_TREE;
+ }
+ return NULL_TREE;
+}
+
+tree
+find_scoped_type (type, inner_name, inner_types)
+ tree type, inner_name, inner_types;
+{
+ tree tags = CLASSTYPE_TAGS (type);
+
+ while (tags)
+ {
+ /* The TREE_PURPOSE of an enum tag (which becomes a member of the
+ enclosing class) is set to the name for the enum type. So, if
+ inner_name is `bar', and we strike `baz' for `enum bar { baz }',
+ then this test will be true. */
+ if (TREE_PURPOSE (tags) == inner_name)
+ {
+ if (inner_types == NULL_TREE)
+ return DECL_NESTED_TYPENAME (TYPE_NAME (TREE_VALUE (tags)));
+ return resolve_scope_to_name (TREE_VALUE (tags), inner_types);
+ }
+ tags = TREE_CHAIN (tags);
+ }
+
+#if 0
+ /* XXX This needs to be fixed better. */
+ if (TREE_CODE (type) == UNINSTANTIATED_P_TYPE)
+ {
+ sorry ("nested class lookup in template type");
+ return NULL_TREE;
+ }
+#endif
+
+ /* Look for a TYPE_DECL. */
+ for (tags = TYPE_FIELDS (type); tags; tags = TREE_CHAIN (tags))
+ if (TREE_CODE (tags) == TYPE_DECL && DECL_NAME (tags) == inner_name)
+ {
+ /* Code by raeburn. */
+ if (inner_types == NULL_TREE)
+ return DECL_NESTED_TYPENAME (tags);
+ return resolve_scope_to_name (TREE_TYPE (tags), inner_types);
+ }
+
+ return NULL_TREE;
+}
+
+/* Resolve an expression NAME1::NAME2::...::NAMEn to
+ the name that names the above nested type. INNER_TYPES
+ is a chain of nested type names (held together by SCOPE_REFs);
+ OUTER_TYPE is the type we know to enclose INNER_TYPES.
+ Returns NULL_TREE if there is an error. */
+tree
+resolve_scope_to_name (outer_type, inner_stuff)
+ tree outer_type, inner_stuff;
+{
+ register tree tmp;
+ tree inner_name, inner_type;
+
+ if (outer_type == NULL_TREE && current_class_type != NULL_TREE)
+ {
+ /* We first try to look for a nesting in our current class context,
+ then try any enclosing classes. */
+ tree type = current_class_type;
+
+ while (type && (TREE_CODE (type) == RECORD_TYPE
+ || TREE_CODE (type) == UNION_TYPE))
+ {
+ tree rval = resolve_scope_to_name (type, inner_stuff);
+
+ if (rval != NULL_TREE)
+ return rval;
+ type = DECL_CONTEXT (TYPE_NAME (type));
+ }
+ }
+
+ if (TREE_CODE (inner_stuff) == SCOPE_REF)
+ {
+ inner_name = TREE_OPERAND (inner_stuff, 0);
+ inner_type = TREE_OPERAND (inner_stuff, 1);
+ }
+ else
+ {
+ inner_name = inner_stuff;
+ inner_type = NULL_TREE;
+ }
+
+ if (outer_type == NULL_TREE)
+ {
+ /* If we have something that's already a type by itself,
+ use that. */
+ if (IDENTIFIER_HAS_TYPE_VALUE (inner_name))
+ {
+ if (inner_type)
+ return resolve_scope_to_name (IDENTIFIER_TYPE_VALUE (inner_name),
+ inner_type);
+ return inner_name;
+ }
+ return NULL_TREE;
+ }
+
+ if (! IS_AGGR_TYPE (outer_type))
+ return NULL_TREE;
+
+ /* Look for member classes or enums. */
+ tmp = find_scoped_type (outer_type, inner_name, inner_type);
+
+ /* If it's not a type in this class, then go down into the
+ base classes and search there. */
+ if (! tmp && TYPE_BINFO (outer_type))
+ {
+ tree binfos = TYPE_BINFO_BASETYPES (outer_type);
+ int i, n_baselinks = binfos ? TREE_VEC_LENGTH (binfos) : 0;
+
+ for (i = 0; i < n_baselinks; i++)
+ {
+ tree base_binfo = TREE_VEC_ELT (binfos, i);
+ tmp = resolve_scope_to_name (BINFO_TYPE (base_binfo), inner_stuff);
+ if (tmp)
+ return tmp;
+ }
+ tmp = NULL_TREE;
+ }
+
+ return tmp;
+}
+
+/* Build a method call of the form `EXP->SCOPES::NAME (PARMS)'.
+ This is how virtual function calls are avoided. */
+tree
+build_scoped_method_call (exp, scopes, name, parms)
+ tree exp, scopes, name, parms;
+{
+ /* Because this syntactic form does not allow
+ a pointer to a base class to be `stolen',
+ we need not protect the derived->base conversion
+ that happens here.
+
+ @@ But we do have to check access privileges later. */
+ tree basename = resolve_scope_to_name (NULL_TREE, scopes);
+ tree basetype, binfo, decl;
+ tree type = TREE_TYPE (exp);
+
+ if (type == error_mark_node
+ || basename == NULL_TREE)
+ return error_mark_node;
+
+ basetype = IDENTIFIER_TYPE_VALUE (basename);
+
+ if (TREE_CODE (type) == REFERENCE_TYPE)
+ type = TREE_TYPE (type);
+
+ /* Destructors can be "called" for simple types; see 5.2.4 and 12.4 Note
+ that explicit ~int is caught in the parser; this deals with typedefs
+ and template parms. */
+ if (TREE_CODE (name) == BIT_NOT_EXPR && ! is_aggr_typedef (basename, 0))
+ {
+ if (type != basetype)
+ cp_error ("type of `%E' does not match destructor type `%T' (type was `%T')",
+ exp, basetype, type);
+ name = IDENTIFIER_TYPE_VALUE (TREE_OPERAND (name, 0));
+ if (basetype != name)
+ cp_error ("qualified type `%T' does not match destructor type `%T'",
+ basetype, name);
+ return void_zero_node;
+ }
+
+ if (! is_aggr_typedef (basename, 1))
+ return error_mark_node;
+
+ if (! IS_AGGR_TYPE (type))
+ {
+ cp_error ("base object `%E' of scoped method call is of non-aggregate type `%T'",
+ exp, type);
+ return error_mark_node;
+ }
+
+ if ((binfo = binfo_or_else (basetype, type)))
+ {
+ if (binfo == error_mark_node)
+ return error_mark_node;
+ if (TREE_CODE (exp) == INDIRECT_REF)
+ decl = build_indirect_ref (convert_pointer_to (binfo,
+ build_unary_op (ADDR_EXPR, exp, 0)), NULL_PTR);
+ else
+ decl = build_scoped_ref (exp, scopes);
+
+ /* Call to a destructor. */
+ if (TREE_CODE (name) == BIT_NOT_EXPR)
+ {
+ /* Explicit call to destructor. */
+ name = TREE_OPERAND (name, 0);
+ if (name != constructor_name (TREE_TYPE (decl)))
+ {
+ cp_error
+ ("qualified type `%T' does not match destructor type `%T'",
+ TREE_TYPE (decl), name);
+ return error_mark_node;
+ }
+ if (! TYPE_HAS_DESTRUCTOR (TREE_TYPE (decl)))
+ return void_zero_node;
+
+ return build_delete (TREE_TYPE (decl), decl, integer_two_node,
+ LOOKUP_NORMAL|LOOKUP_NONVIRTUAL|LOOKUP_DESTRUCTOR,
+ 0);
+ }
+
+ /* Call to a method. */
+ return build_method_call (decl, name, parms, binfo,
+ LOOKUP_NORMAL|LOOKUP_NONVIRTUAL);
+ }
+ return error_mark_node;
+}
+
+static void
+print_candidates (candidates)
+ tree candidates;
+{
+ cp_error_at ("candidates are: %D", TREE_VALUE (candidates));
+ candidates = TREE_CHAIN (candidates);
+
+ while (candidates)
+ {
+ cp_error_at (" %D", TREE_VALUE (candidates));
+ candidates = TREE_CHAIN (candidates);
+ }
+}
+
+static void
+print_n_candidates (candidates, n)
+ struct candidate *candidates;
+ int n;
+{
+ int i;
+
+ cp_error_at ("candidates are: %D", candidates[0].function);
+ for (i = 1; i < n; i++)
+ cp_error_at (" %D", candidates[i].function);
+}
+
+/* Build something of the form ptr->method (args)
+ or object.method (args). This can also build
+ calls to constructors, and find friends.
+
+ Member functions always take their class variable
+ as a pointer.
+
+ INSTANCE is a class instance.
+
+ NAME is the name of the method desired, usually an IDENTIFIER_NODE.
+
+ PARMS help to figure out what that NAME really refers to.
+
+ BASETYPE_PATH, if non-NULL, contains a chain from the type of INSTANCE
+ down to the real instance type to use for access checking. We need this
+ information to get protected accesses correct. This parameter is used
+ by build_member_call.
+
+ FLAGS is the logical disjunction of zero or more LOOKUP_
+ flags. See cp-tree.h for more info.
+
+ If this is all OK, calls build_function_call with the resolved
+ member function.
+
+ This function must also handle being called to perform
+ initialization, promotion/coercion of arguments, and
+ instantiation of default parameters.
+
+ Note that NAME may refer to an instance variable name. If
+ `operator()()' is defined for the type of that field, then we return
+ that result. */
+tree
+build_method_call (instance, name, parms, basetype_path, flags)
+ tree instance, name, parms, basetype_path;
+ int flags;
+{
+ register tree function, fntype, value_type;
+ register tree basetype, save_basetype;
+ register tree baselink, result, method_name, parmtypes, parm;
+ tree last;
+ int pass;
+ enum access_type access = access_public;
+
+ /* Range of cases for vtable optimization. */
+ enum vtable_needs { not_needed, maybe_needed, unneeded, needed };
+ enum vtable_needs need_vtbl = not_needed;
+
+ char *name_kind;
+ int ever_seen = 0;
+ tree instance_ptr = NULL_TREE;
+ int all_virtual = flag_all_virtual;
+ int static_call_context = 0;
+ tree found_fns = NULL_TREE;
+
+ /* Keep track of `const' and `volatile' objects. */
+ int constp, volatilep;
+
+#ifdef GATHER_STATISTICS
+ n_build_method_call++;
+#endif
+
+ if (instance == error_mark_node
+ || name == error_mark_node
+ || parms == error_mark_node
+ || (instance != NULL_TREE && TREE_TYPE (instance) == error_mark_node))
+ return error_mark_node;
+
+ /* This is the logic that magically deletes the second argument to
+ operator delete, if it is not needed. */
+ if (name == ansi_opname[(int) DELETE_EXPR] && list_length (parms)==2)
+ {
+ tree save_last = TREE_CHAIN (parms);
+ tree result;
+ /* get rid of unneeded argument */
+ TREE_CHAIN (parms) = NULL_TREE;
+ result = build_method_call (instance, name, parms, basetype_path,
+ (LOOKUP_SPECULATIVELY|flags)
+ &~LOOKUP_COMPLAIN);
+ /* If it works, return it. */
+ if (result && result != error_mark_node)
+ return build_method_call (instance, name, parms, basetype_path, flags);
+ /* If it doesn't work, two argument delete must work */
+ TREE_CHAIN (parms) = save_last;
+ }
+ /* We already know whether it's needed or not for vec delete. */
+ else if (name == ansi_opname[(int) VEC_DELETE_EXPR]
+ && ! TYPE_VEC_DELETE_TAKES_SIZE (TREE_TYPE (instance)))
+ TREE_CHAIN (parms) = NULL_TREE;
+
+ if (TREE_CODE (name) == BIT_NOT_EXPR)
+ {
+ flags |= LOOKUP_DESTRUCTOR;
+ name = TREE_OPERAND (name, 0);
+ if (parms)
+ error ("destructors take no parameters");
+ basetype = TREE_TYPE (instance);
+ if (IS_AGGR_TYPE (basetype))
+ {
+ if (name == constructor_name (basetype))
+ goto huzzah;
+ }
+ else
+ {
+ if (basetype == get_type_value (name))
+ goto huzzah;
+ }
+ cp_error ("destructor name `~%D' does not match type `%T' of expression",
+ name, basetype);
+ return void_zero_node;
+
+ huzzah:
+ if (! TYPE_HAS_DESTRUCTOR (basetype))
+ return void_zero_node;
+ instance = default_conversion (instance);
+ instance_ptr = build_unary_op (ADDR_EXPR, instance, 0);
+ return build_delete (build_pointer_type (basetype),
+ instance_ptr, integer_two_node,
+ LOOKUP_NORMAL|LOOKUP_DESTRUCTOR, 0);
+ }
+
+ {
+ char *xref_name;
+
+ /* Initialize name for error reporting. */
+ if (IDENTIFIER_OPNAME_P (name) && ! IDENTIFIER_TYPENAME_P (name))
+ {
+ char *p = operator_name_string (name);
+ xref_name = (char *)alloca (strlen (p) + 10);
+ sprintf (xref_name, "operator %s", p);
+ }
+ else if (TREE_CODE (name) == SCOPE_REF)
+ xref_name = IDENTIFIER_POINTER (TREE_OPERAND (name, 1));
+ else
+ xref_name = IDENTIFIER_POINTER (name);
+
+ GNU_xref_call (current_function_decl, xref_name);
+ }
+
+ if (instance == NULL_TREE)
+ {
+ basetype = NULL_TREE;
+ /* Check cases where this is really a call to raise
+ an exception. */
+ if (current_class_type && TREE_CODE (name) == IDENTIFIER_NODE)
+ {
+ basetype = purpose_member (name, CLASSTYPE_TAGS (current_class_type));
+ if (basetype)
+ basetype = TREE_VALUE (basetype);
+ }
+ else if (TREE_CODE (name) == SCOPE_REF
+ && TREE_CODE (TREE_OPERAND (name, 0)) == IDENTIFIER_NODE)
+ {
+ if (! is_aggr_typedef (TREE_OPERAND (name, 0), 1))
+ return error_mark_node;
+ basetype = purpose_member (TREE_OPERAND (name, 1),
+ CLASSTYPE_TAGS (IDENTIFIER_TYPE_VALUE (TREE_OPERAND (name, 0))));
+ if (basetype)
+ basetype = TREE_VALUE (basetype);
+ }
+
+ if (basetype != NULL_TREE)
+ ;
+ /* call to a constructor... */
+ else if (basetype_path)
+ basetype = BINFO_TYPE (basetype_path);
+ else if (IDENTIFIER_HAS_TYPE_VALUE (name))
+ {
+ basetype = IDENTIFIER_TYPE_VALUE (name);
+ name = constructor_name_full (basetype);
+ }
+ else
+ {
+ tree typedef_name = lookup_name (name, 1);
+ if (typedef_name && TREE_CODE (typedef_name) == TYPE_DECL)
+ {
+ /* Canonicalize the typedef name. */
+ basetype = TREE_TYPE (typedef_name);
+ name = TYPE_IDENTIFIER (basetype);
+ }
+ else
+ {
+ cp_error ("no constructor named `%T' in scope",
+ name);
+ return error_mark_node;
+ }
+ }
+
+ if (! IS_AGGR_TYPE (basetype))
+ {
+ non_aggr_error:
+ if ((flags & LOOKUP_COMPLAIN) && TREE_CODE (basetype) != ERROR_MARK)
+ cp_error ("request for member `%D' in `%E', which is of non-aggregate type `%T'",
+ name, instance, basetype);
+
+ return error_mark_node;
+ }
+ }
+ else if (instance == C_C_D || instance == current_class_decl)
+ {
+ /* When doing initialization, we side-effect the TREE_TYPE of
+ C_C_D, hence we cannot set up BASETYPE from CURRENT_CLASS_TYPE. */
+ basetype = TREE_TYPE (C_C_D);
+
+ /* Anything manifestly `this' in constructors and destructors
+ has a known type, so virtual function tables are not needed. */
+ if (TYPE_VIRTUAL_P (basetype)
+ && !(flags & LOOKUP_NONVIRTUAL))
+ need_vtbl = (dtor_label || ctor_label)
+ ? unneeded : maybe_needed;
+
+ instance = C_C_D;
+ instance_ptr = current_class_decl;
+ result = build_field_call (TYPE_BINFO (current_class_type),
+ instance_ptr, name, parms);
+
+ if (result)
+ return result;
+ }
+ else if (TREE_CODE (instance) == RESULT_DECL)
+ {
+ basetype = TREE_TYPE (instance);
+ /* Should we ever have to make a virtual function reference
+ from a RESULT_DECL, know that it must be of fixed type
+ within the scope of this function. */
+ if (!(flags & LOOKUP_NONVIRTUAL) && TYPE_VIRTUAL_P (basetype))
+ need_vtbl = maybe_needed;
+ instance_ptr = build1 (ADDR_EXPR, TYPE_POINTER_TO (basetype), instance);
+ }
+ else
+ {
+ /* The MAIN_VARIANT of the type that `instance_ptr' winds up being. */
+ tree inst_ptr_basetype;
+
+ static_call_context =
+ (TREE_CODE (instance) == INDIRECT_REF
+ && TREE_CODE (TREE_OPERAND (instance, 0)) == NOP_EXPR
+ && TREE_OPERAND (TREE_OPERAND (instance, 0), 0) == error_mark_node);
+
+ if (TREE_CODE (instance) == OFFSET_REF)
+ instance = resolve_offset_ref (instance);
+
+ /* the base type of an instance variable is pointer to class */
+ basetype = TREE_TYPE (instance);
+
+ if (TREE_CODE (basetype) == REFERENCE_TYPE)
+ {
+ basetype = TREE_TYPE (basetype);
+ if (! IS_AGGR_TYPE (basetype))
+ goto non_aggr_error;
+ /* Call to convert not needed because we are remaining
+ within the same type. */
+ instance_ptr = build1 (NOP_EXPR, build_pointer_type (basetype),
+ instance);
+ inst_ptr_basetype = TYPE_MAIN_VARIANT (basetype);
+ }
+ else
+ {
+ if (! IS_AGGR_TYPE (basetype))
+ goto non_aggr_error;
+
+ /* If `instance' is a signature pointer/reference and `name' is
+ not a constructor, we are calling a signature member function.
+ In that case set the `basetype' to the signature type. */
+ if ((IS_SIGNATURE_POINTER (basetype)
+ || IS_SIGNATURE_REFERENCE (basetype))
+ && TYPE_IDENTIFIER (basetype) != name)
+ basetype = SIGNATURE_TYPE (basetype);
+
+ if ((IS_SIGNATURE (basetype)
+ && (instance_ptr = build_optr_ref (instance)))
+ || (lvalue_p (instance)
+ && (instance_ptr = build_unary_op (ADDR_EXPR, instance, 0)))
+ || (instance_ptr = unary_complex_lvalue (ADDR_EXPR, instance)))
+ {
+ if (instance_ptr == error_mark_node)
+ return error_mark_node;
+ }
+ else if (TREE_CODE (instance) == NOP_EXPR
+ || TREE_CODE (instance) == CONSTRUCTOR)
+ {
+ /* A cast is not an lvalue. Initialize a fresh temp
+ with the value we are casting from, and proceed with
+ that temporary. We can't cast to a reference type,
+ so that simplifies the initialization to something
+ we can manage. */
+ tree temp = get_temp_name (TREE_TYPE (instance), 0);
+ if (IS_AGGR_TYPE (TREE_TYPE (instance)))
+ expand_aggr_init (temp, instance, 0);
+ else
+ {
+ store_init_value (temp, instance);
+ expand_decl_init (temp);
+ }
+ instance = temp;
+ instance_ptr = build_unary_op (ADDR_EXPR, instance, 0);
+ }
+ else
+ {
+ if (TREE_CODE (instance) != CALL_EXPR)
+ my_friendly_abort (125);
+ if (TYPE_NEEDS_CONSTRUCTING (basetype))
+ instance = build_cplus_new (basetype, instance, 0);
+ else
+ {
+ instance = get_temp_name (basetype, 0);
+ TREE_ADDRESSABLE (instance) = 1;
+ }
+ instance_ptr = build_unary_op (ADDR_EXPR, instance, 0);
+ }
+ /* @@ Should we call comp_target_types here? */
+ inst_ptr_basetype = TREE_TYPE (TREE_TYPE (instance_ptr));
+ if (TYPE_MAIN_VARIANT (basetype) == TYPE_MAIN_VARIANT (inst_ptr_basetype))
+ basetype = inst_ptr_basetype;
+ else
+ {
+ instance_ptr = convert (TYPE_POINTER_TO (basetype), instance_ptr);
+ if (instance_ptr == error_mark_node)
+ return error_mark_node;
+ }
+ }
+
+ /* After converting `instance_ptr' above, `inst_ptr_basetype' was
+ not updated, so we use `basetype' instead. */
+ if (basetype_path == NULL_TREE
+ && IS_SIGNATURE (basetype))
+ basetype_path = TYPE_BINFO (basetype);
+ else if (basetype_path == NULL_TREE ||
+ BINFO_TYPE (basetype_path) != TYPE_MAIN_VARIANT (inst_ptr_basetype))
+ basetype_path = TYPE_BINFO (inst_ptr_basetype);
+
+ result = build_field_call (basetype_path, instance_ptr, name, parms);
+ if (result)
+ return result;
+
+ if (!(flags & LOOKUP_NONVIRTUAL) && TYPE_VIRTUAL_P (basetype))
+ {
+ if (TREE_SIDE_EFFECTS (instance_ptr))
+ {
+ /* This action is needed because the instance is needed
+ for providing the base of the virtual function table.
+ Without using a SAVE_EXPR, the function we are building
+ may be called twice, or side effects on the instance
+ variable (such as a post-increment), may happen twice. */
+ instance_ptr = save_expr (instance_ptr);
+ instance = build_indirect_ref (instance_ptr, NULL_PTR);
+ }
+ else if (TREE_CODE (TREE_TYPE (instance)) == POINTER_TYPE)
+ {
+ /* This happens when called for operator new (). */
+ instance = build_indirect_ref (instance, NULL_PTR);
+ }
+
+ need_vtbl = maybe_needed;
+ }
+ }
+
+ if (TYPE_SIZE (basetype) == 0)
+ {
+ /* This is worth complaining about, I think. */
+ cp_error ("cannot lookup method in incomplete type `%T'", basetype);
+ return error_mark_node;
+ }
+
+ save_basetype = TYPE_MAIN_VARIANT (basetype);
+
+#if 0
+ if (all_virtual == 1
+ && (! strncmp (IDENTIFIER_POINTER (name), OPERATOR_METHOD_FORMAT,
+ OPERATOR_METHOD_LENGTH)
+ || instance_ptr == NULL_TREE
+ || (TYPE_OVERLOADS_METHOD_CALL_EXPR (basetype) == 0)))
+ all_virtual = 0;
+#endif
+
+ last = NULL_TREE;
+ for (parmtypes = NULL_TREE, parm = parms; parm; parm = TREE_CHAIN (parm))
+ {
+ tree t = TREE_TYPE (TREE_VALUE (parm));
+ if (TREE_CODE (t) == OFFSET_TYPE)
+ {
+ /* Convert OFFSET_TYPE entities to their normal selves. */
+ TREE_VALUE (parm) = resolve_offset_ref (TREE_VALUE (parm));
+ t = TREE_TYPE (TREE_VALUE (parm));
+ }
+ if (TREE_CODE (TREE_VALUE (parm)) == OFFSET_REF
+ && TREE_CODE (t) == METHOD_TYPE)
+ {
+ TREE_VALUE (parm) = build_unary_op (ADDR_EXPR, TREE_VALUE (parm), 0);
+ }
+ if (TREE_CODE (t) == ARRAY_TYPE)
+ {
+ /* Perform the conversion from ARRAY_TYPE to POINTER_TYPE in place.
+ This eliminates needless calls to `compute_conversion_costs'. */
+ TREE_VALUE (parm) = default_conversion (TREE_VALUE (parm));
+ t = TREE_TYPE (TREE_VALUE (parm));
+ }
+ if (t == error_mark_node)
+ return error_mark_node;
+ last = build_tree_list (NULL_TREE, t);
+ parmtypes = chainon (parmtypes, last);
+ }
+
+ if (instance)
+ {
+ /* TREE_READONLY (instance) fails for references. */
+ constp = TYPE_READONLY (TREE_TYPE (TREE_TYPE (instance_ptr)));
+ volatilep = TYPE_VOLATILE (TREE_TYPE (TREE_TYPE (instance_ptr)));
+ parms = tree_cons (NULL_TREE, instance_ptr, parms);
+ }
+ else
+ {
+ /* Raw constructors are always in charge. */
+ if (TYPE_USES_VIRTUAL_BASECLASSES (basetype)
+ && ! (flags & LOOKUP_HAS_IN_CHARGE))
+ {
+ flags |= LOOKUP_HAS_IN_CHARGE;
+ parms = tree_cons (NULL_TREE, integer_one_node, parms);
+ parmtypes = tree_cons (NULL_TREE, integer_type_node, parmtypes);
+ }
+
+ if (flag_this_is_variable > 0)
+ {
+ constp = 0;
+ volatilep = 0;
+ parms = tree_cons (NULL_TREE, build1 (NOP_EXPR, TYPE_POINTER_TO (basetype), integer_zero_node), parms);
+ }
+ else
+ {
+ constp = 0;
+ volatilep = 0;
+ instance_ptr = build_new (NULL_TREE, basetype, void_type_node, 0);
+ if (instance_ptr == error_mark_node)
+ return error_mark_node;
+ instance_ptr = save_expr (instance_ptr);
+ TREE_CALLS_NEW (instance_ptr) = 1;
+ instance = build_indirect_ref (instance_ptr, NULL_PTR);
+
+ /* If it's a default argument initialized from a ctor, what we get
+ from instance_ptr will match the arglist for the FUNCTION_DECL
+ of the constructor. */
+ if (parms && TREE_CODE (TREE_VALUE (parms)) == CALL_EXPR
+ && TREE_OPERAND (TREE_VALUE (parms), 1)
+ && TREE_CALLS_NEW (TREE_VALUE (TREE_OPERAND (TREE_VALUE (parms), 1))))
+ parms = build_tree_list (NULL_TREE, instance_ptr);
+ else
+ parms = tree_cons (NULL_TREE, instance_ptr, parms);
+ }
+ }
+
+ parmtypes = tree_cons (NULL_TREE, TREE_TYPE (instance_ptr), parmtypes);
+
+ if (last == NULL_TREE)
+ last = parmtypes;
+
+ /* Look up function name in the structure type definition. */
+
+ if ((IDENTIFIER_HAS_TYPE_VALUE (name)
+ && ! IDENTIFIER_OPNAME_P (name)
+ && IS_AGGR_TYPE (IDENTIFIER_TYPE_VALUE (name))
+ && TREE_CODE (IDENTIFIER_TYPE_VALUE (name)) != UNINSTANTIATED_P_TYPE)
+ || name == constructor_name (basetype))
+ {
+ tree tmp = NULL_TREE;
+ if (IDENTIFIER_TYPE_VALUE (name) == basetype
+ || name == constructor_name (basetype))
+ tmp = TYPE_BINFO (basetype);
+ else
+ tmp = get_binfo (IDENTIFIER_TYPE_VALUE (name), basetype, 0);
+
+ if (tmp != NULL_TREE)
+ {
+ name_kind = "constructor";
+
+ if (TYPE_USES_VIRTUAL_BASECLASSES (basetype)
+ && ! (flags & LOOKUP_HAS_IN_CHARGE))
+ {
+ /* Constructors called for initialization
+ only are never in charge. */
+ tree tmplist;
+
+ flags |= LOOKUP_HAS_IN_CHARGE;
+ tmplist = tree_cons (NULL_TREE, integer_zero_node,
+ TREE_CHAIN (parms));
+ TREE_CHAIN (parms) = tmplist;
+ tmplist = tree_cons (NULL_TREE, integer_type_node, TREE_CHAIN (parmtypes));
+ TREE_CHAIN (parmtypes) = tmplist;
+ }
+ basetype = BINFO_TYPE (tmp);
+ }
+ else
+ name_kind = "method";
+ }
+ else
+ name_kind = "method";
+
+ if (basetype_path == NULL_TREE
+ || BINFO_TYPE (basetype_path) != TYPE_MAIN_VARIANT (basetype))
+ basetype_path = TYPE_BINFO (basetype);
+ result = lookup_fnfields (basetype_path, name,
+ (flags & LOOKUP_COMPLAIN));
+ if (result == error_mark_node)
+ return error_mark_node;
+
+
+ /* Now, go look for this method name. We do not find destructors here.
+
+ Putting `void_list_node' on the end of the parmtypes
+ fakes out `build_decl_overload' into doing the right thing. */
+ TREE_CHAIN (last) = void_list_node;
+ method_name = build_decl_overload (name, parmtypes,
+ 1 + (name == constructor_name (save_basetype)
+ || name == constructor_name_full (save_basetype)));
+ TREE_CHAIN (last) = NULL_TREE;
+
+ for (pass = 0; pass < 2; pass++)
+ {
+ struct candidate *candidates;
+ struct candidate *cp;
+ int len;
+ unsigned best = 1;
+
+ /* This increments every time we go up the type hierarchy.
+ The idea is to prefer a function of the derived class if possible. */
+ int b_or_d = 0;
+
+ baselink = result;
+
+ if (pass > 0)
+ {
+ candidates
+ = (struct candidate *) alloca ((ever_seen+1)
+ * sizeof (struct candidate));
+ bzero (candidates, (ever_seen + 1) * sizeof (struct candidate));
+ cp = candidates;
+ len = list_length (parms);
+ ever_seen = 0;
+
+ /* First see if a global function has a shot at it. */
+ if (flags & LOOKUP_GLOBAL)
+ {
+ tree friend_parms;
+ tree parm = instance_ptr;
+
+ if (TREE_CODE (TREE_TYPE (parm)) == REFERENCE_TYPE)
+ {
+ /* TREE_VALUE (parms) may have been modified by now;
+ restore it to its original value. */
+ TREE_VALUE (parms) = parm;
+ friend_parms = parms;
+ }
+ else if (TREE_CODE (TREE_TYPE (parm)) == POINTER_TYPE)
+ {
+ tree new_type;
+ parm = build_indirect_ref (parm, "friendifying parms (compiler error)");
+ new_type = c_build_type_variant (TREE_TYPE (parm), constp,
+ volatilep);
+ new_type = build_reference_type (new_type);
+ parm = convert (new_type, parm);
+ friend_parms = tree_cons (NULL_TREE, parm, TREE_CHAIN (parms));
+ }
+ else
+ my_friendly_abort (167);
+
+ cp->h_len = len;
+ cp->harshness = (struct harshness_code *)
+ alloca ((len + 1) * sizeof (struct harshness_code));
+
+ result = build_overload_call (name, friend_parms, 0, cp);
+ /* If it turns out to be the one we were actually looking for
+ (it was probably a friend function), the return the
+ good result. */
+ if (TREE_CODE (result) == CALL_EXPR)
+ return result;
+
+ while ((cp->h.code & EVIL_CODE) == 0)
+ {
+ /* non-standard uses: set the field to 0 to indicate
+ we are using a non-member function. */
+ cp->u.field = 0;
+ if (cp->harshness[len].distance == 0
+ && cp->h.code < best)
+ best = cp->h.code;
+ cp += 1;
+ }
+ }
+ }
+
+ while (baselink)
+ {
+ /* We have a hit (of sorts). If the parameter list is
+ "error_mark_node", or some variant thereof, it won't
+ match any methods. Since we have verified that the is
+ some method vaguely matching this one (in name at least),
+ silently return.
+
+ Don't stop for friends, however. */
+ basetype_path = TREE_PURPOSE (baselink);
+
+ function = TREE_VALUE (baselink);
+ if (TREE_CODE (basetype_path) == TREE_LIST)
+ basetype_path = TREE_VALUE (basetype_path);
+ basetype = BINFO_TYPE (basetype_path);
+
+ /* Cast the instance variable if necessary. */
+ if (basetype != TYPE_MAIN_VARIANT
+ (TREE_TYPE (TREE_TYPE (TREE_VALUE (parms)))))
+ {
+ if (basetype == save_basetype)
+ TREE_VALUE (parms) = instance_ptr;
+ else
+ {
+ tree type = build_pointer_type
+ (build_type_variant (basetype, constp, volatilep));
+ TREE_VALUE (parms) = convert_force (type, instance_ptr);
+ }
+ }
+
+ /* FIXME: this is the wrong place to get an error. Hopefully
+ the access-control rewrite will make this change more cleanly. */
+ if (TREE_VALUE (parms) == error_mark_node)
+ return error_mark_node;
+
+ if (DESTRUCTOR_NAME_P (DECL_ASSEMBLER_NAME (function)))
+ function = DECL_CHAIN (function);
+
+ for (; function; function = DECL_CHAIN (function))
+ {
+#ifdef GATHER_STATISTICS
+ n_inner_fields_searched++;
+#endif
+ ever_seen++;
+ if (pass > 0)
+ found_fns = tree_cons (NULL_TREE, function, found_fns);
+
+ /* Not looking for friends here. */
+ if (TREE_CODE (TREE_TYPE (function)) == FUNCTION_TYPE
+ && ! DECL_STATIC_FUNCTION_P (function))
+ continue;
+
+ if (pass == 0
+ && DECL_ASSEMBLER_NAME (function) == method_name)
+ goto found;
+
+ if (pass > 0)
+ {
+ tree these_parms = parms;
+
+#ifdef GATHER_STATISTICS
+ n_inner_fields_searched++;
+#endif
+ cp->h_len = len;
+ cp->harshness = (struct harshness_code *)
+ alloca ((len + 1) * sizeof (struct harshness_code));
+
+ if (DECL_STATIC_FUNCTION_P (function))
+ these_parms = TREE_CHAIN (these_parms);
+ compute_conversion_costs (function, these_parms, cp, len);
+
+ if ((cp->h.code & EVIL_CODE) == 0)
+ {
+ cp->u.field = function;
+ cp->function = function;
+ cp->basetypes = basetype_path;
+
+ /* No "two-level" conversions. */
+ if (flags & LOOKUP_NO_CONVERSION
+ && (cp->h.code & USER_CODE))
+ continue;
+
+ /* If we used default parameters, we must
+ check to see whether anyone else might
+ use them also, and report a possible
+ ambiguity. */
+ if (! TYPE_USES_MULTIPLE_INHERITANCE (save_basetype)
+ && cp->harshness[len].distance == 0
+ && cp->h.code < best)
+ {
+ if (! DECL_STATIC_FUNCTION_P (function))
+ TREE_VALUE (parms) = cp->arg;
+ if (best == 1)
+ goto found_and_maybe_warn;
+ }
+ cp++;
+ }
+ }
+ }
+ /* Now we have run through one link's member functions.
+ arrange to head-insert this link's links. */
+ baselink = next_baselink (baselink);
+ b_or_d += 1;
+ /* Don't grab functions from base classes. lookup_fnfield will
+ do the work to get us down into the right place. */
+ baselink = NULL_TREE;
+ }
+ if (pass == 0)
+ {
+ tree igv = lookup_name_nonclass (name);
+
+ /* No exact match could be found. Now try to find match
+ using default conversions. */
+ if ((flags & LOOKUP_GLOBAL) && igv)
+ {
+ if (TREE_CODE (igv) == FUNCTION_DECL)
+ ever_seen += 1;
+ else if (TREE_CODE (igv) == TREE_LIST)
+ ever_seen += count_functions (igv);
+ }
+
+ if (ever_seen == 0)
+ {
+ if ((flags & (LOOKUP_SPECULATIVELY|LOOKUP_COMPLAIN))
+ == LOOKUP_SPECULATIVELY)
+ return NULL_TREE;
+
+ TREE_CHAIN (last) = void_list_node;
+ if (flags & LOOKUP_GLOBAL)
+ cp_error ("no global or member function `%D(%A)' defined",
+ name, parmtypes);
+ else
+ cp_error ("no member function `%T::%D(%A)' defined",
+ save_basetype, name, TREE_CHAIN (parmtypes));
+ return error_mark_node;
+ }
+ continue;
+ }
+
+ if (cp - candidates != 0)
+ {
+ /* Rank from worst to best. Then cp will point to best one.
+ Private fields have their bits flipped. For unsigned
+ numbers, this should make them look very large.
+ If the best alternate has a (signed) negative value,
+ then all we ever saw were private members. */
+ if (cp - candidates > 1)
+ {
+ int n_candidates = cp - candidates;
+ TREE_VALUE (parms) = instance_ptr;
+ cp = ideal_candidate (save_basetype, candidates,
+ n_candidates, parms, len);
+ if (cp == (struct candidate *)0)
+ {
+ if (flags & LOOKUP_COMPLAIN)
+ {
+ cp_error ("call of overloaded %s `%D' is ambiguous",
+ name_kind, name);
+ print_n_candidates (candidates, n_candidates);
+ }
+ return error_mark_node;
+ }
+ if (cp->h.code & EVIL_CODE)
+ return error_mark_node;
+ }
+ else if (cp[-1].h.code & EVIL_CODE)
+ {
+ if (flags & LOOKUP_COMPLAIN)
+ cp_error ("ambiguous type conversion requested for %s `%D'",
+ name_kind, name);
+ return error_mark_node;
+ }
+ else
+ cp--;
+
+ /* The global function was the best, so use it. */
+ if (cp->u.field == 0)
+ {
+ /* We must convert the instance pointer into a reference type.
+ Global overloaded functions can only either take
+ aggregate objects (which come for free from references)
+ or reference data types anyway. */
+ TREE_VALUE (parms) = copy_node (instance_ptr);
+ TREE_TYPE (TREE_VALUE (parms)) = build_reference_type (TREE_TYPE (TREE_TYPE (instance_ptr)));
+ return build_function_call (cp->function, parms);
+ }
+
+ function = cp->function;
+ basetype_path = cp->basetypes;
+ if (! DECL_STATIC_FUNCTION_P (function))
+ TREE_VALUE (parms) = cp->arg;
+ goto found_and_maybe_warn;
+ }
+
+ if (flags & (LOOKUP_COMPLAIN|LOOKUP_SPECULATIVELY))
+ {
+ if ((flags & (LOOKUP_SPECULATIVELY|LOOKUP_COMPLAIN))
+ == LOOKUP_SPECULATIVELY)
+ return NULL_TREE;
+
+ if (DECL_STATIC_FUNCTION_P (cp->function))
+ parms = TREE_CHAIN (parms);
+ if (ever_seen)
+ {
+ if (flags & LOOKUP_SPECULATIVELY)
+ return NULL_TREE;
+ if (static_call_context
+ && TREE_CODE (TREE_TYPE (cp->function)) == METHOD_TYPE)
+ cp_error ("object missing in call to `%D'", cp->function);
+ else if (ever_seen > 1)
+ {
+ TREE_CHAIN (last) = void_list_node;
+ cp_error ("no matching function for call to `%T::%D (%A)'",
+ TREE_TYPE (TREE_TYPE (instance_ptr)),
+ name, TREE_CHAIN (parmtypes));
+ TREE_CHAIN (last) = NULL_TREE;
+ print_candidates (found_fns);
+ }
+ else
+ report_type_mismatch (cp, parms, name_kind);
+ return error_mark_node;
+ }
+
+ if ((flags & (LOOKUP_SPECULATIVELY|LOOKUP_COMPLAIN))
+ == LOOKUP_COMPLAIN)
+ {
+ cp_error ("%T has no method named %D", save_basetype, name);
+ return error_mark_node;
+ }
+ return NULL_TREE;
+ }
+ continue;
+
+ found_and_maybe_warn:
+ if ((cp->harshness[0].code & CONST_CODE)
+ /* 12.1p2: Constructors can be called for const objects. */
+ && ! DECL_CONSTRUCTOR_P (cp->function))
+ {
+ if (flags & LOOKUP_COMPLAIN)
+ {
+ cp_error_at ("non-const member function `%D'", cp->function);
+ error ("called for const object at this point in file");
+ }
+ /* Not good enough for a match. */
+ else
+ return error_mark_node;
+ }
+ goto found;
+ }
+ /* Silently return error_mark_node. */
+ return error_mark_node;
+
+ found:
+ if (flags & LOOKUP_PROTECT)
+ access = compute_access (basetype_path, function);
+
+ if (access == access_private)
+ {
+ if (flags & LOOKUP_COMPLAIN)
+ {
+ cp_error_at ("%s `%+#D' is %s", name_kind, function,
+ TREE_PRIVATE (function) ? "private"
+ : "from private base class");
+ error ("within this context");
+ }
+ return error_mark_node;
+ }
+ else if (access == access_protected)
+ {
+ if (flags & LOOKUP_COMPLAIN)
+ {
+ cp_error_at ("%s `%+#D' %s", name_kind, function,
+ TREE_PROTECTED (function) ? "is protected"
+ : "has protected accessibility");
+ error ("within this context");
+ }
+ return error_mark_node;
+ }
+
+ /* From here on down, BASETYPE is the type that INSTANCE_PTR's
+ type (if it exists) is a pointer to. */
+
+ if (DECL_ABSTRACT_VIRTUAL_P (function)
+ && instance == C_C_D
+ && DECL_CONSTRUCTOR_P (current_function_decl)
+ && ! (flags & LOOKUP_NONVIRTUAL)
+ && value_member (function, get_abstract_virtuals (basetype)))
+ cp_error ("abstract virtual `%#D' called from constructor", function);
+
+ if (IS_SIGNATURE (basetype) && static_call_context)
+ {
+ cp_error ("cannot call signature member function `%T::%D' without signature pointer/reference",
+ basetype, name);
+ return error_mark_node;
+ }
+ else if (IS_SIGNATURE (basetype))
+ return build_signature_method_call (basetype, instance, function, parms);
+
+ function = DECL_MAIN_VARIANT (function);
+ /* Declare external function if necessary. */
+ assemble_external (function);
+
+ fntype = TREE_TYPE (function);
+ if (TREE_CODE (fntype) == POINTER_TYPE)
+ fntype = TREE_TYPE (fntype);
+ basetype = DECL_CLASS_CONTEXT (function);
+
+ /* If we are referencing a virtual function from an object
+ of effectively static type, then there is no need
+ to go through the virtual function table. */
+ if (need_vtbl == maybe_needed)
+ {
+ int fixed_type = resolves_to_fixed_type_p (instance, 0);
+
+ if (all_virtual == 1
+ && DECL_VINDEX (function)
+ && may_be_remote (basetype))
+ need_vtbl = needed;
+ else if (DECL_VINDEX (function))
+ need_vtbl = fixed_type ? unneeded : needed;
+ else
+ need_vtbl = not_needed;
+ }
+
+ if (TREE_CODE (fntype) == METHOD_TYPE && static_call_context
+ && !DECL_CONSTRUCTOR_P (function))
+ {
+ /* Let's be nice to the user for now, and give reasonable
+ default behavior. */
+ instance_ptr = current_class_decl;
+ if (instance_ptr)
+ {
+ if (basetype != current_class_type)
+ {
+ tree binfo = get_binfo (basetype, current_class_type, 1);
+ if (binfo == NULL_TREE)
+ {
+ error_not_base_type (function, current_class_type);
+ return error_mark_node;
+ }
+ else if (basetype == error_mark_node)
+ return error_mark_node;
+ }
+ }
+ /* Only allow a static member function to call another static member
+ function. */
+ else if (DECL_LANG_SPECIFIC (function)
+ && !DECL_STATIC_FUNCTION_P (function))
+ {
+ cp_error ("cannot call member function `%D' without object",
+ function);
+ return error_mark_node;
+ }
+ }
+
+ value_type = TREE_TYPE (fntype) ? TREE_TYPE (fntype) : void_type_node;
+
+ if (TYPE_SIZE (value_type) == 0)
+ {
+ if (flags & LOOKUP_COMPLAIN)
+ incomplete_type_error (0, value_type);
+ return error_mark_node;
+ }
+
+ if (DECL_STATIC_FUNCTION_P (function))
+ parms = convert_arguments (NULL_TREE, TYPE_ARG_TYPES (fntype),
+ TREE_CHAIN (parms), function, LOOKUP_NORMAL);
+ else if (need_vtbl == unneeded)
+ {
+ int sub_flags = DECL_CONSTRUCTOR_P (function) ? flags : LOOKUP_NORMAL;
+ basetype = TREE_TYPE (instance);
+ if (TYPE_METHOD_BASETYPE (TREE_TYPE (function)) != TYPE_MAIN_VARIANT (basetype)
+ && TYPE_USES_COMPLEX_INHERITANCE (basetype))
+ {
+ basetype = DECL_CLASS_CONTEXT (function);
+ instance_ptr = convert_pointer_to (basetype, instance_ptr);
+ instance = build_indirect_ref (instance_ptr, NULL_PTR);
+ }
+ parms = tree_cons (NULL_TREE, instance_ptr,
+ convert_arguments (NULL_TREE, TREE_CHAIN (TYPE_ARG_TYPES (fntype)), TREE_CHAIN (parms), function, sub_flags));
+ }
+ else
+ {
+ if ((flags & LOOKUP_NONVIRTUAL) == 0)
+ basetype = DECL_CONTEXT (function);
+
+ /* First parm could be integer_zerop with casts like
+ ((Object*)0)->Object::IsA() */
+ if (!integer_zerop (TREE_VALUE (parms)))
+ {
+ /* Since we can't have inheritance with a union, doing get_binfo
+ on it won't work. We do all the convert_pointer_to_real
+ stuff to handle MI correctly...for unions, that's not
+ an issue, so we must short-circuit that extra work here. */
+ tree tmp = TREE_TYPE (TREE_TYPE (TREE_VALUE (parms)));
+ if (tmp != NULL_TREE && TREE_CODE (tmp) == UNION_TYPE)
+ instance_ptr = TREE_VALUE (parms);
+ else
+ {
+ tree binfo = get_binfo (basetype,
+ TREE_TYPE (TREE_TYPE (TREE_VALUE (parms))),
+ 0);
+ instance_ptr = convert_pointer_to_real (binfo, TREE_VALUE (parms));
+ }
+ instance_ptr
+ = convert_pointer_to (build_type_variant (basetype,
+ constp, volatilep),
+ instance_ptr);
+
+ if (TREE_CODE (instance_ptr) == COND_EXPR)
+ {
+ instance_ptr = save_expr (instance_ptr);
+ instance = build_indirect_ref (instance_ptr, NULL_PTR);
+ }
+ else if (TREE_CODE (instance_ptr) == NOP_EXPR
+ && TREE_CODE (TREE_OPERAND (instance_ptr, 0)) == ADDR_EXPR
+ && TREE_OPERAND (TREE_OPERAND (instance_ptr, 0), 0) == instance)
+ ;
+ /* The call to `convert_pointer_to' may return error_mark_node. */
+ else if (TREE_CODE (instance_ptr) == ERROR_MARK)
+ return instance_ptr;
+ else if (instance == NULL_TREE
+ || TREE_CODE (instance) != INDIRECT_REF
+ || TREE_OPERAND (instance, 0) != instance_ptr)
+ instance = build_indirect_ref (instance_ptr, NULL_PTR);
+ }
+ parms = tree_cons (NULL_TREE, instance_ptr,
+ convert_arguments (NULL_TREE, TREE_CHAIN (TYPE_ARG_TYPES (fntype)), TREE_CHAIN (parms), function, LOOKUP_NORMAL));
+ }
+
+#if 0
+ /* Constructors do not overload method calls. */
+ else if (TYPE_OVERLOADS_METHOD_CALL_EXPR (basetype)
+ && name != TYPE_IDENTIFIER (basetype)
+ && (TREE_CODE (function) != FUNCTION_DECL
+ || strncmp (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (function)),
+ OPERATOR_METHOD_FORMAT,
+ OPERATOR_METHOD_LENGTH))
+ && (may_be_remote (basetype) || instance != C_C_D))
+ {
+ tree fn_as_int;
+
+ parms = TREE_CHAIN (parms);
+
+ if (!all_virtual && TREE_CODE (function) == FUNCTION_DECL)
+ fn_as_int = build_unary_op (ADDR_EXPR, function, 0);
+ else
+ fn_as_int = convert (TREE_TYPE (default_conversion (function)), DECL_VINDEX (function));
+ if (all_virtual == 1)
+ fn_as_int = convert (integer_type_node, fn_as_int);
+
+ result = build_opfncall (METHOD_CALL_EXPR, LOOKUP_NORMAL, instance, fn_as_int, parms);
+
+ if (result == NULL_TREE)
+ {
+ compiler_error ("could not overload `operator->()(...)'");
+ return error_mark_node;
+ }
+ else if (result == error_mark_node)
+ return error_mark_node;
+
+#if 0
+ /* Do this if we want the result of operator->() to inherit
+ the type of the function it is subbing for. */
+ TREE_TYPE (result) = value_type;
+#endif
+
+ return result;
+ }
+#endif
+
+ if (need_vtbl == needed)
+ {
+ function = build_vfn_ref (&TREE_VALUE (parms), instance,
+ DECL_VINDEX (function));
+ TREE_TYPE (function) = build_pointer_type (fntype);
+ }
+
+ if (TREE_CODE (function) == FUNCTION_DECL)
+ GNU_xref_call (current_function_decl,
+ IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (function)));
+
+ {
+ int is_constructor;
+
+ if (TREE_CODE (function) == FUNCTION_DECL)
+ {
+ is_constructor = DECL_CONSTRUCTOR_P (function);
+ if (DECL_INLINE (function))
+ function = build1 (ADDR_EXPR, build_pointer_type (fntype), function);
+ else
+ {
+ assemble_external (function);
+ TREE_USED (function) = 1;
+ function = default_conversion (function);
+ }
+ }
+ else
+ {
+ is_constructor = 0;
+ function = default_conversion (function);
+ }
+
+ result = build_nt (CALL_EXPR, function, parms, NULL_TREE);
+
+ TREE_TYPE (result) = value_type;
+ TREE_SIDE_EFFECTS (result) = 1;
+ TREE_RAISES (result)
+ = TYPE_RAISES_EXCEPTIONS (fntype) || (parms && TREE_RAISES (parms));
+ TREE_HAS_CONSTRUCTOR (result) = is_constructor;
+ return result;
+ }
+}
+
+/* Similar to `build_method_call', but for overloaded non-member functions.
+ The name of this function comes through NAME. The name depends
+ on PARMS.
+
+ Note that this function must handle simple `C' promotions,
+ as well as variable numbers of arguments (...), and
+ default arguments to boot.
+
+ If the overloading is successful, we return a tree node which
+ contains the call to the function.
+
+ If overloading produces candidates which are probable, but not definite,
+ we hold these candidates. If FINAL_CP is non-zero, then we are free
+ to assume that final_cp points to enough storage for all candidates that
+ this function might generate. The `harshness' array is preallocated for
+ the first candidate, but not for subsequent ones.
+
+ Note that the DECL_RTL of FUNCTION must be made to agree with this
+ function's new name. */
+
+tree
+build_overload_call_real (fnname, parms, flags, final_cp, buildxxx)
+ tree fnname, parms;
+ int flags;
+ struct candidate *final_cp;
+ int buildxxx;
+{
+ /* must check for overloading here */
+ tree overload_name, functions, function, parm;
+ tree parmtypes = NULL_TREE, last = NULL_TREE;
+ register tree outer;
+ int length;
+ int parmlength = list_length (parms);
+
+ struct candidate *candidates, *cp;
+
+ if (final_cp)
+ {
+ final_cp[0].h.code = 0;
+ final_cp[0].h.distance = 0;
+ final_cp[0].function = 0;
+ /* end marker. */
+ final_cp[1].h.code = EVIL_CODE;
+ }
+
+ for (parm = parms; parm; parm = TREE_CHAIN (parm))
+ {
+ register tree t = TREE_TYPE (TREE_VALUE (parm));
+
+ if (t == error_mark_node)
+ {
+ if (final_cp)
+ final_cp->h.code = EVIL_CODE;
+ return error_mark_node;
+ }
+ if (TREE_CODE (t) == ARRAY_TYPE || TREE_CODE (t) == OFFSET_TYPE)
+ {
+ /* Perform the conversion from ARRAY_TYPE to POINTER_TYPE in place.
+ Also convert OFFSET_TYPE entities to their normal selves.
+ This eliminates needless calls to `compute_conversion_costs'. */
+ TREE_VALUE (parm) = default_conversion (TREE_VALUE (parm));
+ t = TREE_TYPE (TREE_VALUE (parm));
+ }
+ last = build_tree_list (NULL_TREE, t);
+ parmtypes = chainon (parmtypes, last);
+ }
+ if (last)
+ TREE_CHAIN (last) = void_list_node;
+ else
+ parmtypes = void_list_node;
+
+ if (is_overloaded_fn (fnname))
+ {
+ functions = fnname;
+ if (TREE_CODE (fnname) == TREE_LIST)
+ fnname = TREE_PURPOSE (functions);
+ else if (TREE_CODE (fnname) == FUNCTION_DECL)
+ fnname = DECL_NAME (functions);
+ }
+ else
+ functions = lookup_name_nonclass (fnname);
+
+ if (functions == NULL_TREE)
+ {
+ if (flags & LOOKUP_SPECULATIVELY)
+ return NULL_TREE;
+ if (flags & LOOKUP_COMPLAIN)
+ error ("only member functions apply");
+ if (final_cp)
+ final_cp->h.code = EVIL_CODE;
+ return error_mark_node;
+ }
+
+ if (TREE_CODE (functions) == FUNCTION_DECL && ! IDENTIFIER_OPNAME_P (fnname))
+ {
+ functions = DECL_MAIN_VARIANT (functions);
+ if (final_cp)
+ {
+ /* We are just curious whether this is a viable alternative or
+ not. */
+ compute_conversion_costs (functions, parms, final_cp, parmlength);
+ return functions;
+ }
+ else
+ return build_function_call_real (functions, parms, 1, flags);
+ }
+
+ if (TREE_CODE (functions) == TREE_LIST
+ && TREE_VALUE (functions) == NULL_TREE)
+ {
+ if (flags & LOOKUP_SPECULATIVELY)
+ return NULL_TREE;
+
+ if (flags & LOOKUP_COMPLAIN)
+ cp_error ("function `%D' declared overloaded, but no instances of that function declared",
+ TREE_PURPOSE (functions));
+ if (final_cp)
+ final_cp->h.code = EVIL_CODE;
+ return error_mark_node;
+ }
+
+ length = count_functions (functions);
+
+ if (final_cp)
+ candidates = final_cp;
+ else
+ {
+ candidates
+ = (struct candidate *)alloca ((length+1) * sizeof (struct candidate));
+ bzero (candidates, (length + 1) * sizeof (struct candidate));
+ }
+
+ cp = candidates;
+
+ my_friendly_assert (is_overloaded_fn (functions), 169);
+
+ functions = get_first_fn (functions);
+
+ /* OUTER is the list of FUNCTION_DECLS, in a TREE_LIST. */
+ for (outer = functions; outer; outer = DECL_CHAIN (outer))
+ {
+ int template_cost = 0;
+ function = outer;
+ if (TREE_CODE (function) != FUNCTION_DECL
+ && ! (TREE_CODE (function) == TEMPLATE_DECL
+ && ! DECL_TEMPLATE_IS_CLASS (function)
+ && TREE_CODE (DECL_TEMPLATE_RESULT (function)) == FUNCTION_DECL))
+ {
+ enum tree_code code = TREE_CODE (function);
+ if (code == TEMPLATE_DECL)
+ code = TREE_CODE (DECL_TEMPLATE_RESULT (function));
+ if (code == CONST_DECL)
+ cp_error_at
+ ("enumeral value `%D' conflicts with function of same name",
+ function);
+ else if (code == VAR_DECL)
+ {
+ if (TREE_STATIC (function))
+ cp_error_at
+ ("variable `%D' conflicts with function of same name",
+ function);
+ else
+ cp_error_at
+ ("constant field `%D' conflicts with function of same name",
+ function);
+ }
+ else if (code == TYPE_DECL)
+ continue;
+ else
+ my_friendly_abort (2);
+ error ("at this point in file");
+ continue;
+ }
+ if (TREE_CODE (function) == TEMPLATE_DECL)
+ {
+ int ntparms = TREE_VEC_LENGTH (DECL_TEMPLATE_PARMS (function));
+ tree *targs = (tree *) alloca (sizeof (tree) * ntparms);
+ int i;
+
+ i = type_unification (DECL_TEMPLATE_PARMS (function), targs,
+ TYPE_ARG_TYPES (TREE_TYPE (function)),
+ parms, &template_cost, 0);
+ if (i == 0)
+ function = instantiate_template (function, targs);
+ }
+
+ if (TREE_CODE (function) == TEMPLATE_DECL)
+ {
+ /* Unconverted template -- failed match. */
+ cp->function = function;
+ cp->u.bad_arg = -4;
+ cp->h.code = EVIL_CODE;
+ }
+ else
+ {
+ struct candidate *cp2;
+
+ /* Check that this decl is not the same as a function that's in
+ the list due to some template instantiation. */
+ cp2 = candidates;
+ while (cp2 != cp)
+ if (cp2->function == function)
+ break;
+ else
+ cp2 += 1;
+ if (cp2->function == function)
+ continue;
+
+ function = DECL_MAIN_VARIANT (function);
+
+ /* Can't use alloca here, since result might be
+ passed to calling function. */
+ cp->h_len = parmlength;
+ cp->harshness = (struct harshness_code *)
+ oballoc ((parmlength + 1) * sizeof (struct harshness_code));
+
+ compute_conversion_costs (function, parms, cp, parmlength);
+
+ /* Make sure this is clear as well. */
+ cp->h.int_penalty += template_cost;
+
+ if ((cp[0].h.code & EVIL_CODE) == 0)
+ {
+ cp[1].h.code = EVIL_CODE;
+ cp++;
+ }
+ }
+ }
+
+ if (cp - candidates)
+ {
+ tree rval = error_mark_node;
+
+ /* Leave marker. */
+ cp[0].h.code = EVIL_CODE;
+ if (cp - candidates > 1)
+ {
+ struct candidate *best_cp
+ = ideal_candidate (NULL_TREE, candidates,
+ cp - candidates, parms, parmlength);
+ if (best_cp == (struct candidate *)0)
+ {
+ if (flags & LOOKUP_COMPLAIN)
+ {
+ cp_error ("call of overloaded `%D' is ambiguous", fnname);
+ print_n_candidates (candidates, cp - candidates);
+ }
+ return error_mark_node;
+ }
+ else
+ rval = best_cp->function;
+ }
+ else
+ {
+ cp -= 1;
+ if (cp->h.code & EVIL_CODE)
+ {
+ if (flags & LOOKUP_COMPLAIN)
+ error ("type conversion ambiguous");
+ }
+ else
+ rval = cp->function;
+ }
+
+ if (final_cp)
+ return rval;
+
+ return buildxxx ? build_function_call_real (rval, parms, 0, flags)
+ : build_function_call_real (rval, parms, 1, flags);
+ }
+
+ if (flags & LOOKUP_SPECULATIVELY)
+ return NULL_TREE;
+
+ if (flags & LOOKUP_COMPLAIN)
+ report_type_mismatch (cp, parms, "function",
+ decl_as_string (cp->function, 1));
+
+ return error_mark_node;
+}
+
+tree
+build_overload_call (fnname, parms, flags, final_cp)
+ tree fnname, parms;
+ int flags;
+ struct candidate *final_cp;
+{
+ return build_overload_call_real (fnname, parms, flags, final_cp, 0);
+}
+
+tree
+build_overload_call_maybe (fnname, parms, flags, final_cp)
+ tree fnname, parms;
+ int flags;
+ struct candidate *final_cp;
+{
+ return build_overload_call_real (fnname, parms, flags, final_cp, 1);
+}
diff --git a/gnu/usr.bin/cc/cc1plus/class.c b/gnu/usr.bin/cc/cc1plus/class.c
new file mode 100644
index 0000000..e715c89
--- /dev/null
+++ b/gnu/usr.bin/cc/cc1plus/class.c
@@ -0,0 +1,4940 @@
+/* Functions related to building classes and their related objects.
+ Copyright (C) 1987, 1992, 1993, 1994 Free Software Foundation, Inc.
+ Contributed by Michael Tiemann (tiemann@cygnus.com)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+/* High-level class interface. */
+
+#include "config.h"
+#include "tree.h"
+#include <stdio.h>
+#include "cp-tree.h"
+#include "flags.h"
+
+#include "obstack.h"
+#define obstack_chunk_alloc xmalloc
+#define obstack_chunk_free free
+
+extern struct obstack permanent_obstack;
+
+/* This is how we tell when two virtual member functions are really the
+ same. */
+#define SAME_FN(FN1DECL, FN2DECL) (DECL_ASSEMBLER_NAME (FN1DECL) == DECL_ASSEMBLER_NAME (FN2DECL))
+
+extern void set_class_shadows PROTO ((tree));
+
+/* Way of stacking class types. */
+static tree *current_class_base, *current_class_stack;
+static int current_class_stacksize;
+int current_class_depth;
+
+struct class_level
+{
+ /* The previous class level. */
+ struct class_level *level_chain;
+
+ /* The class instance variable, as a PARM_DECL. */
+ tree decl;
+ /* The class instance variable, as an object. */
+ tree object;
+ /* The virtual function table pointer
+ for the class instance variable. */
+ tree vtable_decl;
+
+ /* Name of the current class. */
+ tree name;
+ /* Type of the current class. */
+ tree type;
+
+ /* Flags for this class level. */
+ int this_is_variable;
+ int memoized_lookups;
+ int save_memoized;
+ int unused;
+};
+
+tree current_class_decl, C_C_D; /* PARM_DECL: the class instance variable */
+tree current_vtable_decl;
+
+/* The following two can be derived from the previous one */
+tree current_class_name; /* IDENTIFIER_NODE: name of current class */
+tree current_class_type; /* _TYPE: the type of the current class */
+tree previous_class_type; /* _TYPE: the previous type that was a class */
+tree previous_class_values; /* TREE_LIST: copy of the class_shadowed list
+ when leaving an outermost class scope. */
+static tree get_vfield_name PROTO((tree));
+tree the_null_vtable_entry;
+
+/* Way of stacking language names. */
+tree *current_lang_base, *current_lang_stack;
+int current_lang_stacksize;
+
+/* Names of languages we recognize. */
+tree lang_name_c, lang_name_cplusplus;
+tree current_lang_name;
+
+/* When layout out an aggregate type, the size of the
+ basetypes (virtual and non-virtual) is passed to layout_record
+ via this node. */
+static tree base_layout_decl;
+
+/* Variables shared between class.c and call.c. */
+
+int n_vtables = 0;
+int n_vtable_entries = 0;
+int n_vtable_searches = 0;
+int n_vtable_elems = 0;
+int n_convert_harshness = 0;
+int n_compute_conversion_costs = 0;
+int n_build_method_call = 0;
+int n_inner_fields_searched = 0;
+
+/* Virtual baseclass things. */
+tree
+build_vbase_pointer (exp, type)
+ tree exp, type;
+{
+ char *name;
+
+ name = (char *) alloca (TYPE_NAME_LENGTH (type) + sizeof (VBASE_NAME) + 1);
+ sprintf (name, VBASE_NAME_FORMAT, TYPE_NAME_STRING (type));
+ return build_component_ref (exp, get_identifier (name), 0, 0);
+}
+
+/* Is the type of the EXPR, the complete type of the object?
+ If we are going to be wrong, we must be conservative, and return 0. */
+int
+complete_type_p (expr)
+ tree expr;
+{
+ tree type = TYPE_MAIN_VARIANT (TREE_TYPE (expr));
+ while (1)
+ {
+ switch (TREE_CODE (expr))
+ {
+ case SAVE_EXPR:
+ case INDIRECT_REF:
+ case ADDR_EXPR:
+ case NOP_EXPR:
+ case CONVERT_EXPR:
+ expr = TREE_OPERAND (expr, 0);
+ continue;
+
+ case CALL_EXPR:
+ if (! TREE_HAS_CONSTRUCTOR (expr))
+ break;
+ /* fall through... */
+ case VAR_DECL:
+ case FIELD_DECL:
+ if (TREE_CODE (TREE_TYPE (expr)) == ARRAY_TYPE
+ && IS_AGGR_TYPE (TREE_TYPE (TREE_TYPE (expr)))
+ && TYPE_MAIN_VARIANT (TREE_TYPE (expr)) == type)
+ return 1;
+ /* fall through... */
+ case TARGET_EXPR:
+ case PARM_DECL:
+ if (IS_AGGR_TYPE (TREE_TYPE (expr))
+ && TYPE_MAIN_VARIANT (TREE_TYPE (expr)) == type)
+ return 1;
+ /* fall through... */
+ case PLUS_EXPR:
+ default:
+ break;
+ }
+ break;
+ }
+ return 0;
+}
+
+/* Build multi-level access to EXPR using hierarchy path PATH.
+ CODE is PLUS_EXPR if we are going with the grain,
+ and MINUS_EXPR if we are not (in which case, we cannot traverse
+ virtual baseclass links).
+
+ TYPE is the type we want this path to have on exit.
+
+ ALIAS_THIS is non-zero if EXPR in an expression involving `this'. */
+tree
+build_vbase_path (code, type, expr, path, alias_this)
+ enum tree_code code;
+ tree type, expr, path;
+ int alias_this;
+{
+ register int changed = 0;
+ tree last = NULL_TREE, last_virtual = NULL_TREE;
+ int nonnull = 0;
+ int fixed_type_p = resolves_to_fixed_type_p (expr, &nonnull);
+ tree null_expr = 0, nonnull_expr;
+ tree basetype;
+ tree offset = integer_zero_node;
+
+ /* We need additional logic to convert back to the unconverted type
+ (the static type of the complete object), and then convert back
+ to the type we want. Until that is done, or until we can
+ recognize when that is, we cannot do the short cut logic. (mrs) */
+ /* Do this, until we can undo any previous convertions. See net35.C
+ for a testcase. */
+ fixed_type_p = complete_type_p (expr);
+
+ if (!fixed_type_p && TREE_SIDE_EFFECTS (expr))
+ expr = save_expr (expr);
+ nonnull_expr = expr;
+
+ if (BINFO_INHERITANCE_CHAIN (path))
+ {
+ tree reverse_path = NULL_TREE;
+
+ while (path)
+ {
+ tree r = copy_node (path);
+ BINFO_INHERITANCE_CHAIN (r) = reverse_path;
+ reverse_path = r;
+ path = BINFO_INHERITANCE_CHAIN (path);
+ }
+ path = reverse_path;
+ }
+
+ basetype = BINFO_TYPE (path);
+
+ while (path)
+ {
+ if (TREE_VIA_VIRTUAL (path))
+ {
+ last_virtual = BINFO_TYPE (path);
+ if (code == PLUS_EXPR)
+ {
+ changed = ! fixed_type_p;
+
+ if (changed)
+ {
+ extern int flag_assume_nonnull_objects;
+ tree ind;
+
+ /* We already check for ambiguous things in the caller, just
+ find a path. */
+ if (last)
+ {
+ tree binfo = get_binfo (last, TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (nonnull_expr))), 0);
+ nonnull_expr = convert_pointer_to_real (binfo, nonnull_expr);
+ }
+ ind = build_indirect_ref (nonnull_expr, NULL_PTR);
+ nonnull_expr = build_vbase_pointer (ind, last_virtual);
+ if (nonnull == 0 && !flag_assume_nonnull_objects
+ && null_expr == NULL_TREE)
+ {
+ null_expr = build1 (NOP_EXPR, TYPE_POINTER_TO (last_virtual), integer_zero_node);
+ expr = build (COND_EXPR, TYPE_POINTER_TO (last_virtual),
+ build (EQ_EXPR, integer_type_node, expr,
+ integer_zero_node),
+ null_expr, nonnull_expr);
+ }
+ }
+ /* else we'll figure out the offset below. */
+
+ /* Happens in the case of parse errors. */
+ if (nonnull_expr == error_mark_node)
+ return error_mark_node;
+ }
+ else
+ {
+ cp_error ("cannot cast up from virtual baseclass `%T'",
+ last_virtual);
+ return error_mark_node;
+ }
+ }
+ last = path;
+ path = BINFO_INHERITANCE_CHAIN (path);
+ }
+ /* LAST is now the last basetype assoc on the path. */
+
+ /* A pointer to a virtual base member of a non-null object
+ is non-null. Therefore, we only need to test for zeroness once.
+ Make EXPR the canonical expression to deal with here. */
+ if (null_expr)
+ {
+ TREE_OPERAND (expr, 2) = nonnull_expr;
+ TREE_TYPE (TREE_OPERAND (expr, 1)) = TREE_TYPE (nonnull_expr);
+ }
+ else
+ expr = nonnull_expr;
+
+ /* If we go through any virtual base pointers, make sure that
+ casts to BASETYPE from the last virtual base class use
+ the right value for BASETYPE. */
+ if (changed)
+ {
+ tree intype = TREE_TYPE (TREE_TYPE (expr));
+ if (TYPE_MAIN_VARIANT (intype) == BINFO_TYPE (last))
+ basetype = intype;
+ else
+ {
+ tree binfo = get_binfo (last, TYPE_MAIN_VARIANT (intype), 0);
+ basetype = last;
+ offset = BINFO_OFFSET (binfo);
+ }
+ }
+ else
+ {
+ if (last_virtual)
+ {
+ offset = BINFO_OFFSET (binfo_member (last_virtual,
+ CLASSTYPE_VBASECLASSES (basetype)));
+ offset = size_binop (PLUS_EXPR, offset, BINFO_OFFSET (last));
+ }
+ else
+ offset = BINFO_OFFSET (last);
+ }
+
+ if (TREE_INT_CST_LOW (offset))
+ {
+ /* For multiple inheritance: if `this' can be set by any
+ function, then it could be 0 on entry to any function.
+ Preserve such zeroness here. Otherwise, only in the
+ case of constructors need we worry, and in those cases,
+ it will be zero, or initialized to some legal value to
+ which we may add. */
+ if (nonnull == 0 && (alias_this == 0 || flag_this_is_variable > 0))
+ {
+ if (null_expr)
+ TREE_TYPE (null_expr) = type;
+ else
+ null_expr = build1 (NOP_EXPR, type, integer_zero_node);
+ if (TREE_SIDE_EFFECTS (expr))
+ expr = save_expr (expr);
+
+ return build (COND_EXPR, type,
+ build (EQ_EXPR, integer_type_node, expr, integer_zero_node),
+ null_expr,
+ build (code, type, expr, offset));
+ }
+ else return build (code, type, expr, offset);
+ }
+
+ /* Cannot change the TREE_TYPE of a NOP_EXPR here, since it may
+ be used multiple times in initialization of multiple inheritance. */
+ if (null_expr)
+ {
+ TREE_TYPE (expr) = type;
+ return expr;
+ }
+ else
+ return build1 (NOP_EXPR, type, expr);
+}
+
+/* Virtual function things. */
+
+/* Virtual functions to be dealt with after laying out our base
+ classes. We do all overrides after we layout virtual base classes.
+ */
+static tree pending_hard_virtuals;
+static int doing_hard_virtuals;
+
+/* Build an entry in the virtual function table.
+ DELTA is the offset for the `this' pointer.
+ PFN is an ADDR_EXPR containing a pointer to the virtual function.
+ Note that the index (DELTA2) in the virtual function table
+ is always 0. */
+tree
+build_vtable_entry (delta, pfn)
+ tree delta, pfn;
+{
+
+ if (flag_vtable_thunks)
+ {
+ HOST_WIDE_INT idelta = TREE_INT_CST_LOW (delta);
+ extern tree make_thunk ();
+ if (idelta)
+ {
+ pfn = build1 (ADDR_EXPR, vtable_entry_type,
+ make_thunk (pfn, idelta));
+ TREE_READONLY (pfn) = 1;
+ TREE_CONSTANT (pfn) = 1;
+ }
+#ifdef GATHER_STATISTICS
+ n_vtable_entries += 1;
+#endif
+ return pfn;
+ }
+ else
+ {
+ extern int flag_huge_objects;
+ tree elems = tree_cons (NULL_TREE, delta,
+ tree_cons (NULL_TREE, integer_zero_node,
+ build_tree_list (NULL_TREE, pfn)));
+ tree entry = build (CONSTRUCTOR, vtable_entry_type, NULL_TREE, elems);
+
+ /* DELTA is constructed by `size_int', which means it may be an
+ unsigned quantity on some platforms. Therefore, we cannot use
+ `int_fits_type_p', because when DELTA is really negative,
+ `force_fit_type' will make it look like a very large number. */
+
+ if ((TREE_INT_CST_LOW (TYPE_MAX_VALUE (delta_type_node))
+ < TREE_INT_CST_LOW (delta))
+ || (TREE_INT_CST_LOW (delta)
+ < TREE_INT_CST_LOW (TYPE_MIN_VALUE (delta_type_node))))
+ if (flag_huge_objects)
+ sorry ("object size exceeds built-in limit for virtual function table implementation");
+ else
+ sorry ("object size exceeds normal limit for virtual function table implementation, recompile all source and use -fhuge-objects");
+
+ TREE_CONSTANT (entry) = 1;
+ TREE_STATIC (entry) = 1;
+ TREE_READONLY (entry) = 1;
+
+#ifdef GATHER_STATISTICS
+ n_vtable_entries += 1;
+#endif
+
+ return entry;
+ }
+}
+
+/* Given an object INSTANCE, return an expression which yields the
+ virtual function corresponding to INDEX. There are many special
+ cases for INSTANCE which we take care of here, mainly to avoid
+ creating extra tree nodes when we don't have to. */
+tree
+build_vfn_ref (ptr_to_instptr, instance, idx)
+ tree *ptr_to_instptr, instance;
+ tree idx;
+{
+ extern int building_cleanup;
+ tree vtbl, aref;
+ tree basetype = TREE_TYPE (instance);
+
+ if (TREE_CODE (basetype) == REFERENCE_TYPE)
+ basetype = TREE_TYPE (basetype);
+
+ if (instance == C_C_D)
+ {
+ if (current_vtable_decl == NULL_TREE
+ || current_vtable_decl == error_mark_node
+ || !UNIQUELY_DERIVED_FROM_P (DECL_FCONTEXT (CLASSTYPE_VFIELD (current_class_type)), basetype))
+ vtbl = build_indirect_ref (build_vfield_ref (instance, basetype), NULL_PTR);
+ else
+ vtbl = current_vtable_decl;
+ }
+ else
+ {
+ if (optimize)
+ {
+ /* Try to figure out what a reference refers to, and
+ access its virtual function table directly. */
+ tree ref = NULL_TREE;
+
+ if (TREE_CODE (instance) == INDIRECT_REF
+ && TREE_CODE (TREE_TYPE (TREE_OPERAND (instance, 0))) == REFERENCE_TYPE)
+ ref = TREE_OPERAND (instance, 0);
+ else if (TREE_CODE (TREE_TYPE (instance)) == REFERENCE_TYPE)
+ ref = instance;
+
+ if (ref && TREE_CODE (ref) == VAR_DECL
+ && DECL_INITIAL (ref))
+ {
+ tree init = DECL_INITIAL (ref);
+
+ while (TREE_CODE (init) == NOP_EXPR
+ || TREE_CODE (init) == NON_LVALUE_EXPR)
+ init = TREE_OPERAND (init, 0);
+ if (TREE_CODE (init) == ADDR_EXPR)
+ {
+ init = TREE_OPERAND (init, 0);
+ if (IS_AGGR_TYPE (TREE_TYPE (init))
+ && (TREE_CODE (init) == PARM_DECL
+ || TREE_CODE (init) == VAR_DECL))
+ instance = init;
+ }
+ }
+ }
+
+ if (IS_AGGR_TYPE (TREE_TYPE (instance))
+ && !IS_SIGNATURE_POINTER (TREE_TYPE (instance))
+ && !IS_SIGNATURE_REFERENCE (TREE_TYPE (instance))
+ && (TREE_CODE (instance) == RESULT_DECL
+ || TREE_CODE (instance) == PARM_DECL
+ || TREE_CODE (instance) == VAR_DECL))
+ vtbl = TYPE_BINFO_VTABLE (basetype);
+ else
+ vtbl = build_indirect_ref (build_vfield_ref (instance, basetype),
+ NULL_PTR);
+ }
+ if (!flag_vtable_thunks)
+ assemble_external (vtbl);
+ aref = build_array_ref (vtbl, idx);
+
+ /* Save the intermediate result in a SAVE_EXPR so we don't have to
+ compute each component of the virtual function pointer twice. */
+ if (!building_cleanup && TREE_CODE (aref) == INDIRECT_REF)
+ TREE_OPERAND (aref, 0) = save_expr (TREE_OPERAND (aref, 0));
+
+ if (flag_vtable_thunks)
+ return aref;
+ else
+ {
+ *ptr_to_instptr
+ = build (PLUS_EXPR, TREE_TYPE (*ptr_to_instptr),
+ *ptr_to_instptr,
+ convert (ptrdiff_type_node,
+ build_component_ref (aref, delta_identifier, 0, 0)));
+ return build_component_ref (aref, pfn_identifier, 0, 0);
+ }
+}
+
+/* Return the name of the virtual function table (as an IDENTIFIER_NODE)
+ for the given TYPE. */
+static tree
+get_vtable_name (type)
+ tree type;
+{
+ tree type_id = build_typename_overload (type);
+ char *buf = (char *)alloca (strlen (VTABLE_NAME_FORMAT)
+ + IDENTIFIER_LENGTH (type_id) + 2);
+ char *ptr = IDENTIFIER_POINTER (type_id);
+ int i;
+ for (i = 0; ptr[i] == OPERATOR_TYPENAME_FORMAT[i]; i++) ;
+#if 0
+ /* We don't take off the numbers; prepare_fresh_vtable uses the
+ DECL_ASSEMBLER_NAME for the type, which includes the number
+ in `3foo'. If we were to pull them off here, we'd end up with
+ something like `_vt.foo.3bar', instead of a uniform definition. */
+ while (ptr[i] >= '0' && ptr[i] <= '9')
+ i += 1;
+#endif
+ sprintf (buf, VTABLE_NAME_FORMAT, ptr+i);
+ return get_identifier (buf);
+}
+
+/* Build a virtual function for type TYPE.
+ If BINFO is non-NULL, build the vtable starting with the initial
+ approximation that it is the same as the one which is the head of
+ the association list. */
+static tree
+build_vtable (binfo, type)
+ tree binfo, type;
+{
+ tree name = get_vtable_name (type);
+ tree virtuals, decl;
+
+ if (binfo)
+ {
+ virtuals = copy_list (BINFO_VIRTUALS (binfo));
+ decl = build_decl (VAR_DECL, name, TREE_TYPE (BINFO_VTABLE (binfo)));
+ }
+ else
+ {
+ virtuals = NULL_TREE;
+ decl = build_decl (VAR_DECL, name, void_type_node);
+ }
+
+#ifdef GATHER_STATISTICS
+ n_vtables += 1;
+ n_vtable_elems += list_length (virtuals);
+#endif
+
+ /* Set TREE_PUBLIC and TREE_EXTERN as appropriate. */
+ if (! flag_vtable_thunks)
+ import_export_vtable (decl, type);
+
+ IDENTIFIER_GLOBAL_VALUE (name) = decl = pushdecl_top_level (decl);
+ /* Initialize the association list for this type, based
+ on our first approximation. */
+ TYPE_BINFO_VTABLE (type) = decl;
+ TYPE_BINFO_VIRTUALS (type) = virtuals;
+
+ TREE_STATIC (decl) = 1;
+#ifndef WRITABLE_VTABLES
+ /* Make them READONLY by default. (mrs) */
+ TREE_READONLY (decl) = 1;
+#endif
+ /* At one time the vtable info was grabbed 2 words at a time. This
+ fails on sparc unless you have 8-byte alignment. (tiemann) */
+ DECL_ALIGN (decl) = MAX (TYPE_ALIGN (double_type_node),
+ DECL_ALIGN (decl));
+
+ /* Why is this conditional? (mrs) */
+ if (binfo && write_virtuals >= 0)
+ DECL_VIRTUAL_P (decl) = 1;
+ DECL_CONTEXT (decl) = type;
+
+ binfo = TYPE_BINFO (type);
+ SET_BINFO_NEW_VTABLE_MARKED (binfo);
+ return decl;
+}
+
+/* Given a base type PARENT, and a derived type TYPE, build
+ a name which distinguishes exactly the PARENT member of TYPE's type.
+
+ FORMAT is a string which controls how sprintf formats the name
+ we have generated.
+
+ For example, given
+
+ class A; class B; class C : A, B;
+
+ it is possible to distinguish "A" from "C's A". And given
+
+ class L;
+ class A : L; class B : L; class C : A, B;
+
+ it is possible to distinguish "L" from "A's L", and also from
+ "C's L from A".
+
+ Make sure to use the DECL_ASSEMBLER_NAME of the TYPE_NAME of the
+ type, as template have DECL_NAMEs like: X<int>, whereas the
+ DECL_ASSEMBLER_NAME is set to be something the assembler can handle.
+ */
+static tree
+build_type_pathname (format, parent, type)
+ char *format;
+ tree parent, type;
+{
+ extern struct obstack temporary_obstack;
+ char *first, *base, *name;
+ int i;
+ tree id;
+
+ parent = TYPE_MAIN_VARIANT (parent);
+
+ /* Remember where to cut the obstack to. */
+ first = obstack_base (&temporary_obstack);
+
+ /* Put on TYPE+PARENT. */
+ obstack_grow (&temporary_obstack,
+ TYPE_ASSEMBLER_NAME_STRING (type),
+ TYPE_ASSEMBLER_NAME_LENGTH (type));
+#ifdef JOINER
+ obstack_1grow (&temporary_obstack, JOINER);
+#else
+ obstack_1grow (&temporary_obstack, '_');
+#endif
+ obstack_grow0 (&temporary_obstack,
+ TYPE_ASSEMBLER_NAME_STRING (parent),
+ TYPE_ASSEMBLER_NAME_LENGTH (parent));
+ i = obstack_object_size (&temporary_obstack);
+ base = obstack_base (&temporary_obstack);
+ obstack_finish (&temporary_obstack);
+
+ /* Put on FORMAT+TYPE+PARENT. */
+ obstack_blank (&temporary_obstack, strlen (format) + i + 1);
+ name = obstack_base (&temporary_obstack);
+ sprintf (name, format, base);
+ id = get_identifier (name);
+ obstack_free (&temporary_obstack, first);
+
+ return id;
+}
+
+/* Give TYPE a new virtual function table which is initialized
+ with a skeleton-copy of its original initialization. The only
+ entry that changes is the `delta' entry, so we can really
+ share a lot of structure.
+
+ FOR_TYPE is the derived type which caused this table to
+ be needed.
+
+ BINFO is the type association which provided TYPE for FOR_TYPE. */
+static void
+prepare_fresh_vtable (binfo, for_type)
+ tree binfo, for_type;
+{
+ tree basetype = BINFO_TYPE (binfo);
+ tree orig_decl = BINFO_VTABLE (binfo);
+ /* This name is too simplistic. We can have multiple basetypes for
+ for_type, and we really want different names. (mrs) */
+ tree name = build_type_pathname (VTABLE_NAME_FORMAT, basetype, for_type);
+ tree new_decl = build_decl (VAR_DECL, name, TREE_TYPE (orig_decl));
+ tree path;
+ int result;
+
+ /* Remember which class this vtable is really for. */
+ DECL_CONTEXT (new_decl) = for_type;
+
+ TREE_STATIC (new_decl) = 1;
+ BINFO_VTABLE (binfo) = pushdecl_top_level (new_decl);
+ DECL_VIRTUAL_P (new_decl) = 1;
+#ifndef WRITABLE_VTABLES
+ /* Make them READONLY by default. (mrs) */
+ TREE_READONLY (new_decl) = 1;
+#endif
+ DECL_ALIGN (new_decl) = DECL_ALIGN (orig_decl);
+
+ /* Make fresh virtual list, so we can smash it later. */
+ BINFO_VIRTUALS (binfo) = copy_list (BINFO_VIRTUALS (binfo));
+ /* Install the value for `headof' if that's what we're doing. */
+ if (flag_dossier)
+ TREE_VALUE (TREE_CHAIN (BINFO_VIRTUALS (binfo)))
+ = build_vtable_entry (size_binop (MINUS_EXPR, integer_zero_node, BINFO_OFFSET (binfo)),
+ FNADDR_FROM_VTABLE_ENTRY (TREE_VALUE (TREE_CHAIN (BINFO_VIRTUALS (binfo)))));
+
+#ifdef GATHER_STATISTICS
+ n_vtables += 1;
+ n_vtable_elems += list_length (BINFO_VIRTUALS (binfo));
+#endif
+
+ /* Set TREE_PUBLIC and TREE_EXTERN as appropriate. */
+ if (! flag_vtable_thunks)
+ import_export_vtable (new_decl, for_type);
+
+ if (TREE_VIA_VIRTUAL (binfo))
+ my_friendly_assert (binfo == binfo_member (BINFO_TYPE (binfo),
+ CLASSTYPE_VBASECLASSES (current_class_type)),
+ 170);
+ SET_BINFO_NEW_VTABLE_MARKED (binfo);
+}
+
+/* Access the virtual function table entry that logically
+ contains BASE_FNDECL. VIRTUALS is the virtual function table's
+ initializer. We can run off the end, when dealing with virtual
+ destructors in MI situations, return NULL_TREE in that case. */
+static tree
+get_vtable_entry (virtuals, base_fndecl)
+ tree virtuals, base_fndecl;
+{
+ unsigned HOST_WIDE_INT i = (HOST_BITS_PER_WIDE_INT >= BITS_PER_WORD
+ ? (TREE_INT_CST_LOW (DECL_VINDEX (base_fndecl))
+ & (((unsigned HOST_WIDE_INT)1<<(BITS_PER_WORD-1))-1))
+ : TREE_INT_CST_LOW (DECL_VINDEX (base_fndecl)));
+
+#ifdef GATHER_STATISTICS
+ n_vtable_searches += i;
+#endif
+
+ while (i > 0 && virtuals)
+ {
+ virtuals = TREE_CHAIN (virtuals);
+ i -= 1;
+ }
+ return virtuals;
+}
+
+/* Put new entry ENTRY into virtual function table initializer
+ VIRTUALS.
+
+ Also update DECL_VINDEX (FNDECL). */
+
+static void
+modify_vtable_entry (old_entry_in_list, new_entry, fndecl)
+ tree old_entry_in_list, new_entry, fndecl;
+{
+ tree base_fndecl = TREE_OPERAND (FNADDR_FROM_VTABLE_ENTRY (TREE_VALUE (old_entry_in_list)), 0);
+
+#ifdef NOTQUITE
+ cp_warning ("replaced %D with %D", DECL_ASSEMBLER_NAME (base_fndecl),
+ DECL_ASSEMBLER_NAME (fndecl));
+#endif
+ TREE_VALUE (old_entry_in_list) = new_entry;
+
+ /* Now assign virtual dispatch information, if unset. */
+ /* We can dispatch this, through any overridden base function. */
+ if (TREE_CODE (DECL_VINDEX (fndecl)) != INTEGER_CST)
+ {
+ DECL_VINDEX (fndecl) = DECL_VINDEX (base_fndecl);
+ DECL_CONTEXT (fndecl) = DECL_CONTEXT (base_fndecl);
+ }
+}
+
+/* Access the virtual function table entry i. VIRTUALS is the virtual
+ function table's initializer. */
+static tree
+get_vtable_entry_n (virtuals, i)
+ tree virtuals;
+ unsigned HOST_WIDE_INT i;
+{
+ while (i > 0)
+ {
+ virtuals = TREE_CHAIN (virtuals);
+ i -= 1;
+ }
+ return virtuals;
+}
+
+/* Add a virtual function to all the appropriate vtables for the class
+ T. DECL_VINDEX(X) should be error_mark_node, if we want to
+ allocate a new slot in our table. If it is error_mark_node, we
+ know that no other function from another vtable is overridden by X.
+ HAS_VIRTUAL keeps track of how many virtuals there are in our main
+ vtable for the type, and we build upon the PENDING_VIRTUALS list
+ and return it. */
+static tree
+add_virtual_function (pending_virtuals, has_virtual, fndecl, t)
+ tree pending_virtuals;
+ int *has_virtual;
+ tree fndecl;
+ tree t; /* Structure type. */
+{
+ /* FUNCTION_TYPEs and OFFSET_TYPEs no longer freely
+ convert to void *. Make such a conversion here. */
+ tree vfn = build1 (ADDR_EXPR, vfunc_ptr_type_node, fndecl);
+ TREE_CONSTANT (vfn) = 1;
+
+#ifndef DUMB_USER
+ if (current_class_type == 0)
+ cp_warning ("internal problem, current_class_type is zero when adding `%D', please report",
+ fndecl);
+ if (current_class_type && t != current_class_type)
+ cp_warning ("internal problem, current_class_type differs when adding `%D', please report",
+ fndecl);
+#endif
+
+ if (!flag_vtable_thunks)
+ TREE_ADDRESSABLE (fndecl) = CLASSTYPE_VTABLE_NEEDS_WRITING (t);
+
+ /* If the virtual function is a redefinition of a prior one,
+ figure out in which base class the new definition goes,
+ and if necessary, make a fresh virtual function table
+ to hold that entry. */
+ if (DECL_VINDEX (fndecl) == error_mark_node)
+ {
+ tree entry;
+
+ if (flag_dossier && *has_virtual == 0)
+ {
+ /* CLASSTYPE_DOSSIER is only used as a Boolean (NULL or not). */
+ CLASSTYPE_DOSSIER (t) = integer_one_node;
+ *has_virtual = 1;
+ }
+
+ /* Build a new INT_CST for this DECL_VINDEX. */
+ {
+ static tree index_table[256];
+ tree index;
+ int i = ++(*has_virtual);
+
+ if (i >= 256 || index_table[i] == 0)
+ {
+ index = build_int_2 (i, 0);
+ if (i < 256)
+ index_table[i] = index;
+ }
+ else
+ index = index_table[i];
+
+ /* Now assign virtual dispatch information. */
+ DECL_VINDEX (fndecl) = index;
+ DECL_CONTEXT (fndecl) = t;
+ }
+ entry = build_vtable_entry (integer_zero_node, vfn);
+ pending_virtuals = tree_cons (DECL_VINDEX (fndecl), entry, pending_virtuals);
+ }
+ /* Might already be INTEGER_CST if declared twice in class. We will
+ give error later or we've already given it. */
+ else if (TREE_CODE (DECL_VINDEX (fndecl)) != INTEGER_CST)
+ {
+ /* Need an entry in some other virtual function table.
+ Deal with this after we have laid out our virtual base classes. */
+ pending_hard_virtuals = temp_tree_cons (fndecl, vfn, pending_hard_virtuals);
+ }
+ return pending_virtuals;
+}
+
+/* Obstack on which to build the vector of class methods. */
+struct obstack class_obstack;
+extern struct obstack *current_obstack;
+
+/* Add method METHOD to class TYPE. This is used when a method
+ has been defined which did not initially appear in the class definition,
+ and helps cut down on spurious error messages.
+
+ FIELDS is the entry in the METHOD_VEC vector entry of the class type where
+ the method should be added. */
+void
+add_method (type, fields, method)
+ tree type, *fields, method;
+{
+ /* We must make a copy of METHOD here, since we must be sure that
+ we have exclusive title to this method's DECL_CHAIN. */
+ tree decl;
+
+ push_obstacks (&permanent_obstack, &permanent_obstack);
+ {
+ decl = copy_node (method);
+ if (DECL_RTL (decl) == 0
+ && (!processing_template_decl
+ || !uses_template_parms (decl)))
+ {
+ make_function_rtl (decl);
+ DECL_RTL (method) = DECL_RTL (decl);
+ }
+ }
+
+ if (fields && *fields)
+ {
+ /* Take care not to hide destructor. */
+ DECL_CHAIN (decl) = DECL_CHAIN (*fields);
+ DECL_CHAIN (*fields) = decl;
+ }
+ else if (CLASSTYPE_METHOD_VEC (type) == 0)
+ {
+ tree method_vec = make_node (TREE_VEC);
+ if (TYPE_IDENTIFIER (type) == DECL_NAME (decl))
+ {
+ TREE_VEC_ELT (method_vec, 0) = decl;
+ TREE_VEC_LENGTH (method_vec) = 1;
+ }
+ else
+ {
+ /* ??? Is it possible for there to have been enough room in the
+ current chunk for the tree_vec structure but not a tree_vec
+ plus a tree*? Will this work in that case? */
+ obstack_free (current_obstack, method_vec);
+ obstack_blank (current_obstack, sizeof (struct tree_vec) + sizeof (tree *));
+ TREE_VEC_ELT (method_vec, 1) = decl;
+ TREE_VEC_LENGTH (method_vec) = 2;
+ obstack_finish (current_obstack);
+ }
+ CLASSTYPE_METHOD_VEC (type) = method_vec;
+ }
+ else
+ {
+ tree method_vec = CLASSTYPE_METHOD_VEC (type);
+ int len = TREE_VEC_LENGTH (method_vec);
+
+ /* Adding a new ctor or dtor. This is easy because our
+ METHOD_VEC always has a slot for such entries. */
+ if (TYPE_IDENTIFIER (type) == DECL_NAME (decl))
+ {
+ /* TREE_VEC_ELT (method_vec, 0) = decl; */
+ if (decl != TREE_VEC_ELT (method_vec, 0))
+ {
+ DECL_CHAIN (decl) = TREE_VEC_ELT (method_vec, 0);
+ TREE_VEC_ELT (method_vec, 0) = decl;
+ }
+ }
+ else
+ {
+ /* This is trickier. We try to extend the TREE_VEC in-place,
+ but if that does not work, we copy all its data to a new
+ TREE_VEC that's large enough. */
+ struct obstack *ob = &class_obstack;
+ tree *end = (tree *)obstack_next_free (ob);
+
+ if (end != TREE_VEC_END (method_vec))
+ {
+ ob = current_obstack;
+ TREE_VEC_LENGTH (method_vec) += 1;
+ TREE_VEC_ELT (method_vec, len) = NULL_TREE;
+ method_vec = copy_node (method_vec);
+ TREE_VEC_LENGTH (method_vec) -= 1;
+ }
+ else
+ {
+ tree tmp_vec = (tree) obstack_base (ob);
+ if (obstack_room (ob) < sizeof (tree))
+ {
+ obstack_blank (ob, sizeof (struct tree_common)
+ + tree_code_length[(int) TREE_VEC]
+ * sizeof (char *)
+ + len * sizeof (tree));
+ tmp_vec = (tree) obstack_base (ob);
+ bcopy (method_vec, tmp_vec,
+ (sizeof (struct tree_common)
+ + tree_code_length[(int) TREE_VEC] * sizeof (char *)
+ + (len-1) * sizeof (tree)));
+ method_vec = tmp_vec;
+ }
+ else
+ obstack_blank (ob, sizeof (tree));
+ }
+
+ obstack_finish (ob);
+ TREE_VEC_ELT (method_vec, len) = decl;
+ TREE_VEC_LENGTH (method_vec) = len + 1;
+ CLASSTYPE_METHOD_VEC (type) = method_vec;
+
+ if (TYPE_BINFO_BASETYPES (type) && CLASSTYPE_BASELINK_VEC (type))
+ {
+ /* ??? May be better to know whether these can be extended? */
+ tree baselink_vec = CLASSTYPE_BASELINK_VEC (type);
+
+ TREE_VEC_LENGTH (baselink_vec) += 1;
+ CLASSTYPE_BASELINK_VEC (type) = copy_node (baselink_vec);
+ TREE_VEC_LENGTH (baselink_vec) -= 1;
+
+ TREE_VEC_ELT (CLASSTYPE_BASELINK_VEC (type), len) = 0;
+ }
+ }
+ }
+ DECL_CONTEXT (decl) = type;
+ DECL_CLASS_CONTEXT (decl) = type;
+
+ pop_obstacks ();
+}
+
+/* Subroutines of finish_struct. */
+
+/* Look through the list of fields for this struct, deleting
+ duplicates as we go. This must be recursive to handle
+ anonymous unions.
+
+ FIELD is the field which may not appear anywhere in FIELDS.
+ FIELD_PTR, if non-null, is the starting point at which
+ chained deletions may take place.
+ The value returned is the first acceptable entry found
+ in FIELDS.
+
+ Note that anonymous fields which are not of UNION_TYPE are
+ not duplicates, they are just anonymous fields. This happens
+ when we have unnamed bitfields, for example. */
+static tree
+delete_duplicate_fields_1 (field, field_ptr, fields)
+ tree field, *field_ptr, fields;
+{
+ tree x;
+ tree prev = field_ptr ? *field_ptr : 0;
+ if (DECL_NAME (field) == 0)
+ {
+ if (TREE_CODE (TREE_TYPE (field)) != UNION_TYPE)
+ return fields;
+
+ for (x = TYPE_FIELDS (TREE_TYPE (field)); x; x = TREE_CHAIN (x))
+ fields = delete_duplicate_fields_1 (x, field_ptr, fields);
+ if (prev)
+ TREE_CHAIN (prev) = fields;
+ return fields;
+ }
+ else
+ {
+ for (x = fields; x; prev = x, x = TREE_CHAIN (x))
+ {
+ if (DECL_NAME (x) == 0)
+ {
+ if (TREE_CODE (TREE_TYPE (x)) != UNION_TYPE)
+ continue;
+ TYPE_FIELDS (TREE_TYPE (x))
+ = delete_duplicate_fields_1 (field, (tree *)0, TYPE_FIELDS (TREE_TYPE (x)));
+ if (TYPE_FIELDS (TREE_TYPE (x)) == 0)
+ {
+ if (prev == 0)
+ fields = TREE_CHAIN (fields);
+ else
+ TREE_CHAIN (prev) = TREE_CHAIN (x);
+ }
+ }
+ else
+ {
+ if (DECL_NAME (field) == DECL_NAME (x))
+ {
+ if (TREE_CODE (field) == CONST_DECL
+ && TREE_CODE (x) == CONST_DECL)
+ cp_error_at ("duplicate enum value `%D'", x);
+ else if (TREE_CODE (field) == CONST_DECL
+ || TREE_CODE (x) == CONST_DECL)
+ cp_error_at ("duplicate field `%D' (as enum and non-enum)",
+ x);
+ else if (TREE_CODE (field) == TYPE_DECL
+ && TREE_CODE (x) == TYPE_DECL)
+ cp_error_at ("duplicate class scope type `%D'", x);
+ else if (TREE_CODE (field) == TYPE_DECL
+ || TREE_CODE (x) == TYPE_DECL)
+ cp_error_at ("duplicate field `%D' (as type and non-type)",
+ x);
+ else
+ cp_error_at ("duplicate member `%D'", x);
+ if (prev == 0)
+ fields = TREE_CHAIN (fields);
+ else
+ TREE_CHAIN (prev) = TREE_CHAIN (x);
+ }
+ }
+ }
+ }
+ return fields;
+}
+
+static void
+delete_duplicate_fields (fields)
+ tree fields;
+{
+ tree x;
+ for (x = fields; x && TREE_CHAIN (x); x = TREE_CHAIN (x))
+ TREE_CHAIN (x) = delete_duplicate_fields_1 (x, &x, TREE_CHAIN (x));
+}
+
+/* Change the access of FDECL to ACCESS in T.
+ Return 1 if change was legit, otherwise return 0. */
+static int
+alter_access (t, fdecl, access)
+ tree t;
+ tree fdecl;
+ enum access_type access;
+{
+ tree elem = purpose_member (t, DECL_ACCESS (fdecl));
+ if (elem && TREE_VALUE (elem) != (tree)access)
+ {
+ if (TREE_CODE (TREE_TYPE (fdecl)) == FUNCTION_DECL)
+ {
+ cp_error_at ("conflicting access specifications for method `%D', ignored", TREE_TYPE (fdecl));
+ }
+ else
+ error ("conflicting access specifications for field `%s', ignored",
+ IDENTIFIER_POINTER (DECL_NAME (fdecl)));
+ }
+ else if (TREE_PRIVATE (fdecl) && access != access_private)
+ cp_error_at ("cannot make private `%D' non-private", fdecl);
+ else if (TREE_PROTECTED (fdecl))
+ {
+ if (access == access_public)
+ cp_error_at ("cannot make protected `%D' public", fdecl);
+ goto alter;
+ }
+ /* ARM 11.3: an access declaration may not be used to restrict access
+ to a member that is accessible in the base class. */
+ else if (TREE_PUBLIC (fdecl)
+ && (access == access_private
+ || access == access_protected))
+ cp_error_at ("cannot reduce access of public member `%D'", fdecl);
+ else if (elem == NULL_TREE)
+ {
+ alter:
+ DECL_ACCESS (fdecl) = tree_cons (t, (tree)access,
+ DECL_ACCESS (fdecl));
+ return 1;
+ }
+ return 0;
+}
+
+/* Return the offset to the main vtable for a given base BINFO. */
+tree
+get_vfield_offset (binfo)
+ tree binfo;
+{
+ return size_binop (PLUS_EXPR,
+ size_binop (FLOOR_DIV_EXPR,
+ DECL_FIELD_BITPOS (CLASSTYPE_VFIELD (BINFO_TYPE (binfo))),
+ size_int (BITS_PER_UNIT)),
+ BINFO_OFFSET (binfo));
+}
+
+/* Get the offset to the start of the original binfo that we derived this
+ binfo from. */
+tree get_derived_offset (binfo)
+ tree binfo;
+{
+ tree offset1 = get_vfield_offset (TYPE_BINFO (BINFO_TYPE (binfo)));
+ tree offset2;
+ int i;
+ while (BINFO_BASETYPES (binfo)
+ && (i=CLASSTYPE_VFIELD_PARENT (BINFO_TYPE (binfo))) != -1)
+ {
+ tree binfos = BINFO_BASETYPES (binfo);
+ binfo = TREE_VEC_ELT (binfos, i);
+ }
+ offset2 = get_vfield_offset (TYPE_BINFO (BINFO_TYPE (binfo)));
+ return size_binop (MINUS_EXPR, offset1, offset2);
+}
+
+/* If FOR_TYPE needs to reinitialize virtual function table pointers
+ for TYPE's sub-objects, add such reinitializations to BASE_INIT_LIST.
+ Returns BASE_INIT_LIST appropriately modified. */
+
+static tree
+maybe_fixup_vptrs (for_type, binfo, base_init_list)
+ tree for_type, binfo, base_init_list;
+{
+ /* Now reinitialize any slots that don't fall under our virtual
+ function table pointer. */
+ tree vfields = CLASSTYPE_VFIELDS (BINFO_TYPE (binfo));
+ while (vfields)
+ {
+ tree basetype = VF_NORMAL_VALUE (vfields)
+ ? TYPE_MAIN_VARIANT (VF_NORMAL_VALUE (vfields))
+ : VF_BASETYPE_VALUE (vfields);
+
+ tree base_binfo = get_binfo (basetype, for_type, 0);
+ /* Punt until this is implemented. */
+ if (1 /* BINFO_MODIFIED (base_binfo) */)
+ {
+ tree base_offset = get_vfield_offset (base_binfo);
+ if (! tree_int_cst_equal (base_offset, get_vfield_offset (TYPE_BINFO (for_type)))
+ && ! tree_int_cst_equal (base_offset, get_vfield_offset (binfo)))
+ base_init_list = tree_cons (error_mark_node, base_binfo,
+ base_init_list);
+ }
+ vfields = TREE_CHAIN (vfields);
+ }
+ return base_init_list;
+}
+
+/* If TYPE does not have a constructor, then the compiler must
+ manually deal with all of the initialization this type requires.
+
+ If a base initializer exists only to fill in the virtual function
+ table pointer, then we mark that fact with the TREE_VIRTUAL bit.
+ This way, we avoid multiple initializations of the same field by
+ each virtual function table up the class hierarchy.
+
+ Virtual base class pointers are not initialized here. They are
+ initialized only at the "top level" of object creation. If we
+ initialized them here, we would have to skip a lot of work. */
+
+static void
+build_class_init_list (type)
+ tree type;
+{
+ tree base_init_list = NULL_TREE;
+ tree member_init_list = NULL_TREE;
+
+ /* Since we build member_init_list and base_init_list using
+ tree_cons, backwards fields the all through work. */
+ tree x;
+ tree binfos = BINFO_BASETYPES (TYPE_BINFO (type));
+ int i, n_baseclasses = binfos ? TREE_VEC_LENGTH (binfos) : 0;
+
+ for (x = TYPE_FIELDS (type); x; x = TREE_CHAIN (x))
+ {
+ if (TREE_CODE (x) != FIELD_DECL)
+ continue;
+
+ if (TYPE_NEEDS_CONSTRUCTING (TREE_TYPE (x))
+ || DECL_INITIAL (x) != NULL_TREE)
+ member_init_list = tree_cons (x, type, member_init_list);
+ }
+ member_init_list = nreverse (member_init_list);
+
+ /* We will end up doing this last. Need special marker
+ to avoid infinite regress. */
+ if (TYPE_VIRTUAL_P (type))
+ {
+ base_init_list = build_tree_list (error_mark_node, TYPE_BINFO (type));
+ if (CLASSTYPE_NEEDS_VIRTUAL_REINIT (type) == 0)
+ TREE_VALUE (base_init_list) = NULL_TREE;
+ TREE_ADDRESSABLE (base_init_list) = 1;
+ }
+
+ /* Each base class which needs to have initialization
+ of some kind gets to make such requests known here. */
+ for (i = n_baseclasses-1; i >= 0; i--)
+ {
+ tree base_binfo = TREE_VEC_ELT (binfos, i);
+ tree blist;
+
+ /* Don't initialize virtual baseclasses this way. */
+ if (TREE_VIA_VIRTUAL (base_binfo))
+ continue;
+
+ if (TYPE_HAS_CONSTRUCTOR (BINFO_TYPE (base_binfo)))
+ {
+ /* ...and the last shall come first... */
+ base_init_list = maybe_fixup_vptrs (type, base_binfo, base_init_list);
+ base_init_list = tree_cons (NULL_TREE, base_binfo, base_init_list);
+ continue;
+ }
+
+ if ((blist = CLASSTYPE_BASE_INIT_LIST (BINFO_TYPE (base_binfo))) == NULL_TREE)
+ /* Nothing to initialize. */
+ continue;
+
+ /* ...ditto... */
+ base_init_list = maybe_fixup_vptrs (type, base_binfo, base_init_list);
+
+ /* This is normally true for single inheritance.
+ The win is we can shrink the chain of initializations
+ to be done by only converting to the actual type
+ we are interested in. */
+ if (TREE_VALUE (blist)
+ && TREE_CODE (TREE_VALUE (blist)) == TREE_VEC
+ && tree_int_cst_equal (BINFO_OFFSET (base_binfo),
+ BINFO_OFFSET (TREE_VALUE (blist))))
+ {
+ if (base_init_list)
+ {
+ /* Does it do more than just fill in a
+ virtual function table pointer? */
+ if (! TREE_ADDRESSABLE (blist))
+ base_init_list = build_tree_list (blist, base_init_list);
+ /* Can we get by just with the virtual function table
+ pointer that it fills in? */
+ else if (TREE_ADDRESSABLE (base_init_list)
+ && TREE_VALUE (base_init_list) == 0)
+ base_init_list = blist;
+ /* Maybe, but it is not obvious as the previous case. */
+ else if (! CLASSTYPE_NEEDS_VIRTUAL_REINIT (type))
+ {
+ tree last = tree_last (base_init_list);
+ while (TREE_VALUE (last)
+ && TREE_CODE (TREE_VALUE (last)) == TREE_LIST)
+ last = tree_last (TREE_VALUE (last));
+ if (TREE_VALUE (last) == 0)
+ base_init_list = build_tree_list (blist, base_init_list);
+ }
+ }
+ else
+ base_init_list = blist;
+ }
+ else
+ {
+ /* The function expand_aggr_init knows how to do the
+ initialization of `basetype' without getting
+ an explicit `blist'. */
+ if (base_init_list)
+ base_init_list = tree_cons (NULL_TREE, base_binfo, base_init_list);
+ else
+ base_init_list = CLASSTYPE_BINFO_AS_LIST (BINFO_TYPE (base_binfo));
+ }
+ }
+
+ if (base_init_list)
+ if (member_init_list)
+ CLASSTYPE_BASE_INIT_LIST (type) = build_tree_list (base_init_list, member_init_list);
+ else
+ CLASSTYPE_BASE_INIT_LIST (type) = base_init_list;
+ else if (member_init_list)
+ CLASSTYPE_BASE_INIT_LIST (type) = member_init_list;
+}
+
+struct base_info
+{
+ int has_virtual;
+ int max_has_virtual;
+ int n_ancestors;
+ tree vfield;
+ tree vfields;
+ char cant_have_default_ctor;
+ char cant_have_const_ctor;
+ char cant_synth_copy_ctor;
+ char cant_synth_asn_ref;
+ char no_const_asn_ref;
+ char needs_virtual_dtor;
+};
+
+/* Record information about type T derived from its base classes.
+ Store most of that information in T itself, and place the
+ remaining information in the struct BASE_INFO.
+
+ Propagate basetype offsets throughout the lattice. Note that the
+ lattice topped by T is really a pair: it's a DAG that gives the
+ structure of the derivation hierarchy, and it's a list of the
+ virtual baseclasses that appear anywhere in the DAG. When a vbase
+ type appears in the DAG, it's offset is 0, and it's children start
+ their offsets from that point. When a vbase type appears in the list,
+ its offset is the offset it has in the hierarchy, and its children's
+ offsets include that offset in theirs.
+
+ Returns the index of the first base class to have virtual functions,
+ or -1 if no such base class.
+
+ Note that at this point TYPE_BINFO (t) != t_binfo. */
+
+static int
+finish_base_struct (t, b, t_binfo)
+ tree t;
+ struct base_info *b;
+ tree t_binfo;
+{
+ tree binfos = BINFO_BASETYPES (t_binfo);
+ int i, n_baseclasses = binfos ? TREE_VEC_LENGTH (binfos) : 0;
+ int first_vfn_base_index = -1;
+ bzero (b, sizeof (struct base_info));
+
+ for (i = 0; i < n_baseclasses; i++)
+ {
+ tree base_binfo = TREE_VEC_ELT (binfos, i);
+ tree basetype = BINFO_TYPE (base_binfo);
+
+ /* If the type of basetype is incomplete, then
+ we already complained about that fact
+ (and we should have fixed it up as well). */
+ if (TYPE_SIZE (basetype) == 0)
+ {
+ int j;
+ /* The base type is of incomplete type. It is
+ probably best to pretend that it does not
+ exist. */
+ if (i == n_baseclasses-1)
+ TREE_VEC_ELT (binfos, i) = NULL_TREE;
+ TREE_VEC_LENGTH (binfos) -= 1;
+ n_baseclasses -= 1;
+ for (j = i; j+1 < n_baseclasses; j++)
+ TREE_VEC_ELT (binfos, j) = TREE_VEC_ELT (binfos, j+1);
+ }
+
+ if (TYPE_HAS_INIT_REF (basetype)
+ && !TYPE_HAS_CONST_INIT_REF (basetype))
+ b->cant_have_const_ctor = 1;
+ if (! TYPE_HAS_INIT_REF (basetype)
+ || (TYPE_HAS_NONPUBLIC_CTOR (basetype) == 2
+ && ! is_friend_type (t, basetype)))
+ b->cant_synth_copy_ctor = 1;
+
+ if (TYPE_HAS_CONSTRUCTOR (basetype)
+ && ! TYPE_HAS_DEFAULT_CONSTRUCTOR (basetype))
+ {
+ b->cant_have_default_ctor = 1;
+ if (! TYPE_HAS_CONSTRUCTOR (t))
+ {
+ cp_pedwarn ("base `%T' with only non-default constructor",
+ basetype);
+ cp_pedwarn ("in class without a constructor");
+ }
+ }
+
+ if (TYPE_HAS_ASSIGN_REF (basetype)
+ && !TYPE_HAS_CONST_ASSIGN_REF (basetype))
+ b->no_const_asn_ref = 1;
+ if (! TYPE_HAS_ASSIGN_REF (basetype)
+ || TYPE_HAS_ABSTRACT_ASSIGN_REF (basetype)
+ || (TYPE_HAS_NONPUBLIC_ASSIGN_REF (basetype) == 2
+ && ! is_friend_type (t, basetype)))
+ b->cant_synth_asn_ref = 1;
+
+ b->n_ancestors += CLASSTYPE_N_SUPERCLASSES (basetype);
+ TYPE_NEEDS_CONSTRUCTING (t) |= TYPE_NEEDS_CONSTRUCTING (basetype);
+ TYPE_NEEDS_DESTRUCTOR (t) |= TYPE_NEEDS_DESTRUCTOR (basetype);
+ TYPE_HAS_COMPLEX_ASSIGN_REF (t) |= TYPE_HAS_COMPLEX_ASSIGN_REF (basetype);
+ TYPE_HAS_COMPLEX_INIT_REF (t) |= (TYPE_HAS_COMPLEX_INIT_REF (basetype)
+ || TYPE_NEEDS_CONSTRUCTING (basetype));
+
+ TYPE_OVERLOADS_CALL_EXPR (t) |= TYPE_OVERLOADS_CALL_EXPR (basetype);
+ TYPE_OVERLOADS_ARRAY_REF (t) |= TYPE_OVERLOADS_ARRAY_REF (basetype);
+ TYPE_OVERLOADS_ARROW (t) |= TYPE_OVERLOADS_ARROW (basetype);
+
+ if (! TREE_VIA_VIRTUAL (base_binfo)
+#if 0
+ /* This cannot be done, as prepare_fresh_vtable wants to modify
+ binfos associated with vfields anywhere in the hierarchy, not
+ just immediate base classes. Due to unsharing, the compiler
+ might consume 3% more memory on a real program.
+ */
+ && ! BINFO_OFFSET_ZEROP (base_binfo)
+#endif
+ && BINFO_BASETYPES (base_binfo))
+ {
+ tree base_binfos = BINFO_BASETYPES (base_binfo);
+ tree chain = NULL_TREE;
+ int j;
+
+ /* Now unshare the structure beneath BASE_BINFO. */
+ for (j = TREE_VEC_LENGTH (base_binfos)-1;
+ j >= 0; j--)
+ {
+ tree base_base_binfo = TREE_VEC_ELT (base_binfos, j);
+ if (! TREE_VIA_VIRTUAL (base_base_binfo))
+ TREE_VEC_ELT (base_binfos, j)
+ = make_binfo (BINFO_OFFSET (base_base_binfo),
+ base_base_binfo,
+ BINFO_VTABLE (base_base_binfo),
+ BINFO_VIRTUALS (base_base_binfo),
+ chain);
+ chain = TREE_VEC_ELT (base_binfos, j);
+ TREE_VIA_PUBLIC (chain) = TREE_VIA_PUBLIC (base_base_binfo);
+ TREE_VIA_PROTECTED (chain) = TREE_VIA_PROTECTED (base_base_binfo);
+ }
+
+ /* Completely unshare potentially shared data, and
+ update what is ours. */
+ propagate_binfo_offsets (base_binfo, BINFO_OFFSET (base_binfo));
+ }
+
+ if (! TREE_VIA_VIRTUAL (base_binfo))
+ CLASSTYPE_N_SUPERCLASSES (t) += 1;
+
+ if (TYPE_VIRTUAL_P (basetype))
+ {
+ /* If there's going to be a destructor needed, make
+ sure it will be virtual. */
+ b->needs_virtual_dtor = 1;
+
+ /* Don't borrow virtuals from virtual baseclasses. */
+ if (TREE_VIA_VIRTUAL (base_binfo))
+ continue;
+
+ if (first_vfn_base_index < 0)
+ {
+ tree vfields;
+ first_vfn_base_index = i;
+
+ /* Update these two, now that we know what vtable we are
+ going to extend. This is so that we can add virtual
+ functions, and override them properly. */
+ BINFO_VTABLE (t_binfo) = TYPE_BINFO_VTABLE (basetype);
+ BINFO_VIRTUALS (t_binfo) = TYPE_BINFO_VIRTUALS (basetype);
+ b->has_virtual = CLASSTYPE_VSIZE (basetype);
+ b->vfield = CLASSTYPE_VFIELD (basetype);
+ b->vfields = copy_list (CLASSTYPE_VFIELDS (basetype));
+ vfields = b->vfields;
+ while (vfields)
+ {
+ if (VF_BINFO_VALUE (vfields) == NULL_TREE
+ || ! TREE_VIA_VIRTUAL (VF_BINFO_VALUE (vfields)))
+ {
+ tree value = VF_BASETYPE_VALUE (vfields);
+ if (DECL_NAME (CLASSTYPE_VFIELD (value))
+ == DECL_NAME (CLASSTYPE_VFIELD (basetype)))
+ VF_NORMAL_VALUE (b->vfields) = basetype;
+ else
+ VF_NORMAL_VALUE (b->vfields) = VF_NORMAL_VALUE (vfields);
+ }
+ vfields = TREE_CHAIN (vfields);
+ }
+ CLASSTYPE_VFIELD (t) = b->vfield;
+ }
+ else
+ {
+ /* Only add unique vfields, and flatten them out as we go. */
+ tree vfields = CLASSTYPE_VFIELDS (basetype);
+ while (vfields)
+ {
+ if (VF_BINFO_VALUE (vfields) == NULL_TREE
+ || ! TREE_VIA_VIRTUAL (VF_BINFO_VALUE (vfields)))
+ {
+ tree value = VF_BASETYPE_VALUE (vfields);
+ b->vfields = tree_cons (base_binfo, value, b->vfields);
+ if (DECL_NAME (CLASSTYPE_VFIELD (value))
+ == DECL_NAME (CLASSTYPE_VFIELD (basetype)))
+ VF_NORMAL_VALUE (b->vfields) = basetype;
+ else
+ VF_NORMAL_VALUE (b->vfields) = VF_NORMAL_VALUE (vfields);
+ }
+ vfields = TREE_CHAIN (vfields);
+ }
+
+ if (b->has_virtual == 0)
+ {
+ first_vfn_base_index = i;
+
+ /* Update these two, now that we know what vtable we are
+ going to extend. This is so that we can add virtual
+ functions, and override them properly. */
+ BINFO_VTABLE (t_binfo) = TYPE_BINFO_VTABLE (basetype);
+ BINFO_VIRTUALS (t_binfo) = TYPE_BINFO_VIRTUALS (basetype);
+ b->has_virtual = CLASSTYPE_VSIZE (basetype);
+ b->vfield = CLASSTYPE_VFIELD (basetype);
+ CLASSTYPE_VFIELD (t) = b->vfield;
+ /* When we install the first one, set the VF_NORMAL_VALUE
+ to be the current class, as this it is the most derived
+ class. Hopefully, this is not set to something else
+ later. (mrs) */
+ vfields = b->vfields;
+ while (vfields)
+ {
+ if (DECL_NAME (CLASSTYPE_VFIELD (t))
+ == DECL_NAME (CLASSTYPE_VFIELD (basetype)))
+ {
+ VF_NORMAL_VALUE (vfields) = t;
+ /* There should only be one of them! And it should
+ always be found, if we get into here. (mrs) */
+ break;
+ }
+ vfields = TREE_CHAIN (vfields);
+ }
+ }
+ }
+ }
+ }
+
+ /* Must come after offsets are fixed for all bases. */
+ for (i = 0; i < n_baseclasses; i++)
+ {
+ tree base_binfo = TREE_VEC_ELT (binfos, i);
+ tree basetype = BINFO_TYPE (base_binfo);
+
+ if (get_base_distance (basetype, t_binfo, 0, (tree*)0) == -2)
+ {
+ cp_warning ("direct base `%T' inaccessible in `%T' due to ambiguity",
+ basetype, t);
+ b->cant_synth_asn_ref = 1;
+ b->cant_synth_copy_ctor = 1;
+ }
+ }
+ {
+ tree v = get_vbase_types (t_binfo);
+
+ for (; v; v = TREE_CHAIN (v))
+ {
+ tree basetype = BINFO_TYPE (v);
+ if (get_base_distance (basetype, t_binfo, 0, (tree*)0) == -2)
+ {
+ if (extra_warnings)
+ cp_warning ("virtual base `%T' inaccessible in `%T' due to ambiguity",
+ basetype, t);
+ b->cant_synth_asn_ref = 1;
+ b->cant_synth_copy_ctor = 1;
+ }
+ }
+ }
+
+ {
+ tree vfields;
+ /* Find the base class with the largest number of virtual functions. */
+ for (vfields = b->vfields; vfields; vfields = TREE_CHAIN (vfields))
+ {
+ if (CLASSTYPE_VSIZE (VF_BASETYPE_VALUE (vfields)) > b->max_has_virtual)
+ b->max_has_virtual = CLASSTYPE_VSIZE (VF_BASETYPE_VALUE (vfields));
+ if (VF_DERIVED_VALUE (vfields)
+ && CLASSTYPE_VSIZE (VF_DERIVED_VALUE (vfields)) > b->max_has_virtual)
+ b->max_has_virtual = CLASSTYPE_VSIZE (VF_DERIVED_VALUE (vfields));
+ }
+ }
+
+ if (b->vfield == 0)
+ /* If all virtual functions come only from virtual baseclasses. */
+ return -1;
+ return first_vfn_base_index;
+}
+
+static int
+typecode_p (type, code)
+ tree type;
+ enum tree_code code;
+{
+ return (TREE_CODE (type) == code
+ || (TREE_CODE (type) == REFERENCE_TYPE
+ && TREE_CODE (TREE_TYPE (type)) == code));
+}
+
+/* Set memoizing fields and bits of T (and its variants) for later use.
+ MAX_HAS_VIRTUAL is the largest size of any T's virtual function tables. */
+static void
+finish_struct_bits (t, max_has_virtual)
+ tree t;
+ int max_has_virtual;
+{
+ int i, n_baseclasses = CLASSTYPE_N_BASECLASSES (t);
+ tree method_vec = CLASSTYPE_METHOD_VEC (t);
+
+ /* Fix up variants (if any). */
+ tree variants = TYPE_NEXT_VARIANT (t);
+ while (variants)
+ {
+ /* These fields are in the _TYPE part of the node, not in
+ the TYPE_LANG_SPECIFIC component, so they are not shared. */
+ TYPE_HAS_CONSTRUCTOR (variants) = TYPE_HAS_CONSTRUCTOR (t);
+ TYPE_HAS_DESTRUCTOR (variants) = TYPE_HAS_DESTRUCTOR (t);
+ TYPE_NEEDS_CONSTRUCTING (variants) = TYPE_NEEDS_CONSTRUCTING (t);
+ TYPE_NEEDS_DESTRUCTOR (variants) = TYPE_NEEDS_DESTRUCTOR (t);
+
+ TYPE_USES_COMPLEX_INHERITANCE (variants) = TYPE_USES_COMPLEX_INHERITANCE (t);
+ TYPE_VIRTUAL_P (variants) = TYPE_VIRTUAL_P (t);
+ TYPE_USES_VIRTUAL_BASECLASSES (variants) = TYPE_USES_VIRTUAL_BASECLASSES (t);
+ /* Copy whatever these are holding today. */
+ TYPE_MIN_VALUE (variants) = TYPE_MIN_VALUE (t);
+ TYPE_MAX_VALUE (variants) = TYPE_MAX_VALUE (t);
+ variants = TYPE_NEXT_VARIANT (variants);
+ }
+
+ if (n_baseclasses && max_has_virtual)
+ {
+ /* Done by `finish_struct' for classes without baseclasses. */
+ int might_have_abstract_virtuals = CLASSTYPE_ABSTRACT_VIRTUALS (t) != 0;
+ tree binfos = TYPE_BINFO_BASETYPES (t);
+ for (i = n_baseclasses-1; i >= 0; i--)
+ {
+ might_have_abstract_virtuals
+ |= (CLASSTYPE_ABSTRACT_VIRTUALS (BINFO_TYPE (TREE_VEC_ELT (binfos, i))) != 0);
+ if (might_have_abstract_virtuals)
+ break;
+ }
+ if (might_have_abstract_virtuals)
+ {
+ /* We use error_mark_node from override_one_vtable to signal
+ an artificial abstract. */
+ if (CLASSTYPE_ABSTRACT_VIRTUALS (t) == error_mark_node)
+ CLASSTYPE_ABSTRACT_VIRTUALS (t) = NULL_TREE;
+ CLASSTYPE_ABSTRACT_VIRTUALS (t) = get_abstract_virtuals (t);
+ }
+ }
+
+ if (n_baseclasses)
+ {
+ /* Notice whether this class has type conversion functions defined. */
+ tree binfo = TYPE_BINFO (t);
+ tree binfos = BINFO_BASETYPES (binfo);
+ tree basetype;
+
+ for (i = n_baseclasses-1; i >= 0; i--)
+ {
+ basetype = BINFO_TYPE (TREE_VEC_ELT (binfos, i));
+
+ if (TYPE_HAS_CONVERSION (basetype))
+ {
+ TYPE_HAS_CONVERSION (t) = 1;
+ TYPE_HAS_INT_CONVERSION (t) |= TYPE_HAS_INT_CONVERSION (basetype);
+ TYPE_HAS_REAL_CONVERSION (t) |= TYPE_HAS_REAL_CONVERSION (basetype);
+ }
+ if (CLASSTYPE_MAX_DEPTH (basetype) >= CLASSTYPE_MAX_DEPTH (t))
+ CLASSTYPE_MAX_DEPTH (t) = CLASSTYPE_MAX_DEPTH (basetype) + 1;
+ }
+ }
+
+ /* Need to test METHOD_VEC here in case all methods
+ (conversions and otherwise) are inherited. */
+ if (TYPE_HAS_CONVERSION (t) && method_vec != NULL_TREE)
+ {
+ tree first_conversions[last_conversion_type];
+ tree last_conversions[last_conversion_type];
+ enum conversion_type conv_index;
+ tree *tmp;
+ int i;
+
+ bzero (first_conversions, sizeof (first_conversions));
+ bzero (last_conversions, sizeof (last_conversions));
+ for (tmp = &TREE_VEC_ELT (method_vec, 1);
+ tmp != TREE_VEC_END (method_vec); tmp += 1)
+ {
+ /* ??? This should compare DECL_NAME (*tmp) == ansi_opname[TYPE_EXPR]. */
+ if (IDENTIFIER_TYPENAME_P (DECL_ASSEMBLER_NAME (*tmp)))
+ {
+ tree fntype = TREE_TYPE (*tmp);
+ tree return_type = TREE_TYPE (fntype);
+ my_friendly_assert (TREE_CODE (fntype) == METHOD_TYPE, 171);
+
+ if (typecode_p (return_type, POINTER_TYPE))
+ {
+ if (TYPE_READONLY (TREE_TYPE (return_type)))
+ conv_index = constptr_conv;
+ else
+ conv_index = ptr_conv;
+ }
+ else if (typecode_p (return_type, INTEGER_TYPE)
+ || typecode_p (return_type, BOOLEAN_TYPE)
+ || typecode_p (return_type, ENUMERAL_TYPE))
+ {
+ TYPE_HAS_INT_CONVERSION (t) = 1;
+ conv_index = int_conv;
+ }
+ else if (typecode_p (return_type, REAL_TYPE))
+ {
+ TYPE_HAS_REAL_CONVERSION (t) = 1;
+ conv_index = real_conv;
+ }
+ else
+ continue;
+
+ if (first_conversions[(int) conv_index] == NULL_TREE)
+ first_conversions[(int) conv_index] = *tmp;
+ last_conversions[(int) conv_index] = *tmp;
+ }
+ }
+
+ for (i = 0; i < (int) last_conversion_type; i++)
+ if (first_conversions[i] != last_conversions[i])
+ CLASSTYPE_CONVERSION (t, i) = error_mark_node;
+ else
+ CLASSTYPE_CONVERSION (t, i) = first_conversions[i];
+ }
+
+ /* If this type has constructors, force its mode to be BLKmode,
+ and force its TREE_ADDRESSABLE bit to be nonzero. */
+ if (TYPE_NEEDS_CONSTRUCTING (t) || TYPE_NEEDS_DESTRUCTOR (t))
+ {
+ tree variants = t;
+
+ if (TREE_CODE (TYPE_NAME (t)) == TYPE_DECL)
+ DECL_MODE (TYPE_NAME (t)) = BLKmode;
+ while (variants)
+ {
+ TYPE_MODE (variants) = BLKmode;
+ TREE_ADDRESSABLE (variants) = 1;
+ variants = TYPE_NEXT_VARIANT (variants);
+ }
+ }
+}
+
+/* Warn about duplicate methods in fn_fields. Also compact method
+ lists so that lookup can be made faster.
+
+ Algorithm: Outer loop builds lists by method name. Inner loop
+ checks for redundant method names within a list.
+
+ Data Structure: List of method lists. The outer list is a
+ TREE_LIST, whose TREE_PURPOSE field is the field name and the
+ TREE_VALUE is the TREE_CHAIN of the FUNCTION_DECLs. Friends are
+ chained in the same way as member functions, but they live in the
+ TREE_TYPE field of the outer list. That allows them to be quickly
+ deleted, and requires no extra storage.
+
+ If there are any constructors/destructors, they are moved to the
+ front of the list. This makes pushclass more efficient.
+
+ We also link each field which has shares a name with its baseclass
+ to the head of the list of fields for that base class. This allows
+ us to reduce search time in places like `build_method_call' to
+ consider only reasonably likely functions. */
+
+static tree
+finish_struct_methods (t, fn_fields, nonprivate_method)
+ tree t;
+ tree fn_fields;
+ int nonprivate_method;
+{
+ tree method_vec;
+ tree name = constructor_name (t);
+ int i, n_baseclasses = CLASSTYPE_N_BASECLASSES (t);
+
+ /* Now prepare to gather fn_fields into vector. */
+ struct obstack *ambient_obstack = current_obstack;
+ current_obstack = &class_obstack;
+ method_vec = make_node (TREE_VEC);
+ /* Room has been saved for constructors and destructors. */
+ current_obstack = ambient_obstack;
+ /* Now make this a live vector. */
+ obstack_free (&class_obstack, method_vec);
+ obstack_blank (&class_obstack, sizeof (struct tree_vec));
+
+ while (fn_fields)
+ {
+ /* NEXT Pointer, TEST Pointer, and BASE Pointer. */
+ tree nextp, *testp;
+ tree fn_name = DECL_NAME (fn_fields);
+ if (fn_name == NULL_TREE)
+ fn_name = name;
+
+ nextp = TREE_CHAIN (fn_fields);
+ TREE_CHAIN (fn_fields) = NULL_TREE;
+
+ /* Clear out this flag.
+
+ @@ Doug may figure out how to break
+ @@ this with nested classes and friends. */
+ DECL_IN_AGGR_P (fn_fields) = 0;
+
+ /* Note here that a copy ctor is private, so we don't dare generate
+ a default copy constructor for a class that has a member
+ of this type without making sure they have access to it. */
+ if (fn_name == name)
+ {
+ tree parmtypes = FUNCTION_ARG_CHAIN (fn_fields);
+ tree parmtype = parmtypes ? TREE_VALUE (parmtypes) : void_type_node;
+
+ if (TREE_CODE (parmtype) == REFERENCE_TYPE
+ && TYPE_MAIN_VARIANT (TREE_TYPE (parmtype)) == t)
+ {
+ if (TREE_CHAIN (parmtypes) == NULL_TREE
+ || TREE_CHAIN (parmtypes) == void_list_node
+ || TREE_PURPOSE (TREE_CHAIN (parmtypes)))
+ {
+ if (TREE_PROTECTED (fn_fields))
+ TYPE_HAS_NONPUBLIC_CTOR (t) = 1;
+ else if (TREE_PRIVATE (fn_fields))
+ TYPE_HAS_NONPUBLIC_CTOR (t) = 2;
+ }
+ }
+ }
+ else if (fn_name == ansi_opname[(int) MODIFY_EXPR])
+ {
+ tree parmtype = TREE_VALUE (FUNCTION_ARG_CHAIN (fn_fields));
+
+ if (TREE_CODE (parmtype) == REFERENCE_TYPE
+ && TYPE_MAIN_VARIANT (TREE_TYPE (parmtype)) == t)
+ {
+ if (TREE_PROTECTED (fn_fields))
+ TYPE_HAS_NONPUBLIC_ASSIGN_REF (t) = 1;
+ else if (TREE_PRIVATE (fn_fields))
+ TYPE_HAS_NONPUBLIC_ASSIGN_REF (t) = 2;
+ }
+ }
+
+ /* Constructors are handled easily in search routines. */
+ if (fn_name == name)
+ {
+ DECL_CHAIN (fn_fields) = TREE_VEC_ELT (method_vec, 0);
+ TREE_VEC_ELT (method_vec, 0) = fn_fields;
+ }
+ else
+ {
+ testp = &TREE_VEC_ELT (method_vec, 0);
+ if (*testp == NULL_TREE)
+ testp++;
+ while (((HOST_WIDE_INT) testp
+ < (HOST_WIDE_INT) obstack_next_free (&class_obstack))
+ && DECL_NAME (*testp) != fn_name)
+ testp++;
+ if ((HOST_WIDE_INT) testp
+ < (HOST_WIDE_INT) obstack_next_free (&class_obstack))
+ {
+ tree x, prev_x;
+
+ for (x = *testp; x; x = DECL_CHAIN (x))
+ {
+ if (DECL_NAME (fn_fields) == ansi_opname[(int) DELETE_EXPR]
+ || DECL_NAME (fn_fields)
+ == ansi_opname[(int) VEC_DELETE_EXPR])
+ {
+ /* ANSI C++ June 5 1992 WP 12.5.5.1 */
+ cp_error_at ("`%D' overloaded", fn_fields);
+ cp_error_at ("previous declaration as `%D' here", x);
+ }
+ if (DECL_ASSEMBLER_NAME (fn_fields)==DECL_ASSEMBLER_NAME (x))
+ {
+ /* We complain about multiple destructors on sight,
+ so we do not repeat the warning here. Friend-friend
+ ambiguities are warned about outside this loop. */
+ if (!DESTRUCTOR_NAME_P (DECL_ASSEMBLER_NAME (fn_fields)))
+ cp_error_at ("ambiguous method `%#D' in structure",
+ fn_fields);
+ break;
+ }
+ prev_x = x;
+ }
+ if (x == 0)
+ {
+ if (*testp)
+ DECL_CHAIN (prev_x) = fn_fields;
+ else
+ *testp = fn_fields;
+ }
+ }
+ else
+ {
+ obstack_ptr_grow (&class_obstack, fn_fields);
+ method_vec = (tree)obstack_base (&class_obstack);
+ }
+ }
+ fn_fields = nextp;
+ }
+
+ TREE_VEC_LENGTH (method_vec) = (tree *)obstack_next_free (&class_obstack)
+ - (&TREE_VEC_ELT (method_vec, 0));
+ obstack_finish (&class_obstack);
+ CLASSTYPE_METHOD_VEC (t) = method_vec;
+
+ if (nonprivate_method == 0
+ && CLASSTYPE_FRIEND_CLASSES (t) == NULL_TREE
+ && DECL_FRIENDLIST (TYPE_NAME (t)) == NULL_TREE)
+ {
+ tree binfos = BINFO_BASETYPES (TYPE_BINFO (t));
+ for (i = 0; i < n_baseclasses; i++)
+ if (TREE_VIA_PUBLIC (TREE_VEC_ELT (binfos, i))
+ || TREE_VIA_PROTECTED (TREE_VEC_ELT (binfos, i)))
+ {
+ nonprivate_method = 1;
+ break;
+ }
+ if (nonprivate_method == 0)
+ cp_warning ("all member functions in class `%T' are private", t);
+ }
+
+ /* If there are constructors (and destructors), they are at the
+ front. Place destructors at very front. Also warn if all
+ constructors and/or destructors are private (in which case this
+ class is effectively unusable. */
+ if (TYPE_HAS_DESTRUCTOR (t))
+ {
+ tree dtor, prev;
+
+ for (dtor = TREE_VEC_ELT (method_vec, 0);
+ dtor;
+ prev = dtor, dtor = DECL_CHAIN (dtor))
+ {
+ if (DESTRUCTOR_NAME_P (DECL_ASSEMBLER_NAME (dtor)))
+ {
+ if (TREE_PRIVATE (dtor)
+ && CLASSTYPE_FRIEND_CLASSES (t) == NULL_TREE
+ && DECL_FRIENDLIST (TYPE_NAME (t)) == NULL_TREE
+ && warn_ctor_dtor_privacy)
+ warning ("class `%s' only defines a private destructor and has no friends",
+ TYPE_NAME_STRING (t));
+ break;
+ }
+ }
+
+ /* Wild parse errors can cause this to happen. */
+ if (dtor == NULL_TREE)
+ TYPE_HAS_DESTRUCTOR (t) = 0;
+ else if (dtor != TREE_VEC_ELT (method_vec, 0))
+ {
+ DECL_CHAIN (prev) = DECL_CHAIN (dtor);
+ DECL_CHAIN (dtor) = TREE_VEC_ELT (method_vec, 0);
+ TREE_VEC_ELT (method_vec, 0) = dtor;
+ }
+ }
+
+ /* Now for each member function (except for constructors and
+ destructors), compute where member functions of the same
+ name reside in base classes. */
+ if (n_baseclasses != 0
+ && TREE_VEC_LENGTH (method_vec) > 1)
+ {
+ int len = TREE_VEC_LENGTH (method_vec);
+ tree baselink_vec = make_tree_vec (len);
+ int any_links = 0;
+ tree baselink_binfo = build_tree_list (NULL_TREE, TYPE_BINFO (t));
+
+ for (i = 1; i < len; i++)
+ {
+ TREE_VEC_ELT (baselink_vec, i)
+ = get_baselinks (baselink_binfo, t, DECL_NAME (TREE_VEC_ELT (method_vec, i)));
+ if (TREE_VEC_ELT (baselink_vec, i) != 0)
+ any_links = 1;
+ }
+ if (any_links != 0)
+ CLASSTYPE_BASELINK_VEC (t) = baselink_vec;
+ else
+ obstack_free (current_obstack, baselink_vec);
+ }
+
+ /* Now add the methods to the TYPE_METHODS of T, arranged in a chain. */
+ {
+ tree x, last_x = NULL_TREE;
+ int limit = TREE_VEC_LENGTH (method_vec);
+
+ for (i = 1; i < limit; i++)
+ {
+ for (x = TREE_VEC_ELT (method_vec, i); x; x = DECL_CHAIN (x))
+ {
+ if (last_x != NULL_TREE)
+ TREE_CHAIN (last_x) = x;
+ last_x = x;
+ }
+ }
+
+ /* Put ctors and dtors at the front of the list. */
+ x = TREE_VEC_ELT (method_vec, 0);
+ if (x)
+ {
+ while (DECL_CHAIN (x))
+ {
+ /* Let's avoid being circular about this. */
+ if (x == DECL_CHAIN (x))
+ break;
+ TREE_CHAIN (x) = DECL_CHAIN (x);
+ x = DECL_CHAIN (x);
+ }
+ if (TREE_VEC_LENGTH (method_vec) > 1)
+ TREE_CHAIN (x) = TREE_VEC_ELT (method_vec, 1);
+ else
+ TREE_CHAIN (x) = NULL_TREE;
+ }
+ }
+
+ TYPE_METHODS (t) = method_vec;
+
+ return method_vec;
+}
+
+/* Emit error when a duplicate definition of a type is seen. Patch up. */
+
+void
+duplicate_tag_error (t)
+ tree t;
+{
+ cp_error ("redefinition of `%#T'", t);
+
+ /* Pretend we haven't defined this type. */
+
+ /* All of the component_decl's were TREE_CHAINed together in the parser.
+ finish_struct_methods walks these chains and assembles all methods with
+ the same base name into DECL_CHAINs. Now we don't need the parser chains
+ anymore, so we unravel them.
+ */
+ /*
+ * This used to be in finish_struct, but it turns out that the
+ * TREE_CHAIN is used by dbxout_type_methods and perhaps some other things...
+ */
+ if (CLASSTYPE_METHOD_VEC(t))
+ {
+ tree tv = CLASSTYPE_METHOD_VEC(t);
+ int i, len = TREE_VEC_LENGTH (tv);
+ for (i = 0; i < len; i++)
+ {
+ tree unchain = TREE_VEC_ELT (tv, i);
+ while (unchain != NULL_TREE)
+ {
+ TREE_CHAIN (unchain) = NULL_TREE;
+ unchain = DECL_CHAIN(unchain);
+ }
+ }
+ }
+
+ if (TYPE_LANG_SPECIFIC (t))
+ {
+ tree as_list = CLASSTYPE_AS_LIST (t);
+ tree binfo = TYPE_BINFO (t);
+ tree binfo_as_list = CLASSTYPE_BINFO_AS_LIST (t);
+ int interface_only = CLASSTYPE_INTERFACE_ONLY (t);
+ int interface_unknown = CLASSTYPE_INTERFACE_UNKNOWN (t);
+
+ bzero (TYPE_LANG_SPECIFIC (t), sizeof (struct lang_type));
+ BINFO_BASETYPES(binfo) = NULL_TREE;
+
+ CLASSTYPE_AS_LIST (t) = as_list;
+ TYPE_BINFO (t) = binfo;
+ CLASSTYPE_BINFO_AS_LIST (t) = binfo_as_list;
+ CLASSTYPE_INTERFACE_ONLY (t) = interface_only;
+ SET_CLASSTYPE_INTERFACE_UNKNOWN_X (t, interface_unknown);
+ CLASSTYPE_VBASE_SIZE (t) = integer_zero_node;
+ TYPE_REDEFINED (t) = 1;
+ }
+ TYPE_SIZE (t) = NULL_TREE;
+ TYPE_MODE (t) = VOIDmode;
+ TYPE_FIELDS (t) = NULL_TREE;
+ TYPE_METHODS (t) = NULL_TREE;
+ TYPE_VFIELD (t) = NULL_TREE;
+ TYPE_CONTEXT (t) = NULL_TREE;
+}
+
+/* finish up all new vtables. */
+static void
+finish_vtbls (binfo, do_self, t)
+ tree binfo, t;
+ int do_self;
+{
+ tree binfos = BINFO_BASETYPES (binfo);
+ int i, n_baselinks = binfos ? TREE_VEC_LENGTH (binfos) : 0;
+
+ /* Should we use something besides CLASSTYPE_VFIELDS? */
+ if (do_self && CLASSTYPE_VFIELDS (BINFO_TYPE (binfo)))
+ {
+ if (BINFO_NEW_VTABLE_MARKED (binfo))
+ {
+ tree decl, context;
+
+ decl = BINFO_VTABLE (binfo);
+ context = DECL_CONTEXT (decl);
+ DECL_CONTEXT (decl) = 0;
+ if (write_virtuals >= 0
+ && DECL_INITIAL (decl) != BINFO_VIRTUALS (binfo))
+ DECL_INITIAL (decl) = build_nt (CONSTRUCTOR, NULL_TREE,
+ BINFO_VIRTUALS (binfo));
+ finish_decl (decl, DECL_INITIAL (decl), NULL_TREE, 0);
+ DECL_CONTEXT (decl) = context;
+ }
+ CLEAR_BINFO_NEW_VTABLE_MARKED (binfo);
+ }
+
+ for (i = 0; i < n_baselinks; i++)
+ {
+ tree base_binfo = TREE_VEC_ELT (binfos, i);
+ int is_not_base_vtable =
+ i != CLASSTYPE_VFIELD_PARENT (BINFO_TYPE (binfo));
+ if (TREE_VIA_VIRTUAL (base_binfo))
+ {
+ base_binfo = binfo_member (BINFO_TYPE (base_binfo), CLASSTYPE_VBASECLASSES (t));
+ }
+ finish_vtbls (base_binfo, is_not_base_vtable, t);
+ }
+}
+
+/* True if we should override the given BASE_FNDECL with the given
+ FNDECL. */
+static int
+overrides (fndecl, base_fndecl)
+ tree fndecl, base_fndecl;
+{
+ /* Destructors have special names. */
+ if (DESTRUCTOR_NAME_P (DECL_ASSEMBLER_NAME (base_fndecl)) &&
+ DESTRUCTOR_NAME_P (DECL_ASSEMBLER_NAME (fndecl)))
+ return 1;
+ if (DESTRUCTOR_NAME_P (DECL_ASSEMBLER_NAME (base_fndecl)) ||
+ DESTRUCTOR_NAME_P (DECL_ASSEMBLER_NAME (fndecl)))
+ return 0;
+ if (DECL_NAME (fndecl) == DECL_NAME (base_fndecl))
+ {
+ tree rettype, base_rettype, types, base_types;
+#if 0
+ retypes = TREE_TYPE (TREE_TYPE (fndecl));
+ base_retypes = TREE_TYPE (TREE_TYPE (base_fndecl));
+#endif
+ types = TYPE_ARG_TYPES (TREE_TYPE (fndecl));
+ base_types = TYPE_ARG_TYPES (TREE_TYPE (base_fndecl));
+ if ((TYPE_READONLY (TREE_TYPE (TREE_VALUE (base_types)))
+ == TYPE_READONLY (TREE_TYPE (TREE_VALUE (types))))
+ && compparms (TREE_CHAIN (base_types), TREE_CHAIN (types), 3))
+ return 1;
+ }
+ return 0;
+}
+
+static void
+modify_one_vtable (binfo, t, fndecl, pfn)
+ tree binfo, t, fndecl, pfn;
+{
+ tree virtuals = BINFO_VIRTUALS (binfo);
+ unsigned HOST_WIDE_INT n;
+
+ n = 0;
+ /* Skip initial vtable length field and RTTI fake object. */
+ for (; virtuals && n < 1 + flag_dossier; n++)
+ virtuals = TREE_CHAIN (virtuals);
+ while (virtuals)
+ {
+ tree current_fndecl = TREE_VALUE (virtuals);
+ current_fndecl = FNADDR_FROM_VTABLE_ENTRY (current_fndecl);
+ current_fndecl = TREE_OPERAND (current_fndecl, 0);
+ if (current_fndecl && overrides (fndecl, current_fndecl))
+ {
+ tree base_offset, offset;
+ tree context = DECL_CLASS_CONTEXT (fndecl);
+ tree vfield = CLASSTYPE_VFIELD (t);
+ tree this_offset;
+
+ offset = integer_zero_node;
+ if (context != t && TYPE_USES_COMPLEX_INHERITANCE (t))
+ {
+ offset = virtual_offset (context, CLASSTYPE_VBASECLASSES (t), offset);
+ if (offset == NULL_TREE)
+ {
+ tree binfo = get_binfo (context, t, 0);
+ offset = BINFO_OFFSET (binfo);
+ }
+ }
+
+ /* Find the right offset for the this pointer based on the
+ base class we just found. We have to take into
+ consideration the virtual base class pointers that we
+ stick in before the virtual function table pointer.
+
+ Also, we want just the delta bewteen the most base class
+ that we derived this vfield from and us. */
+ base_offset = size_binop (PLUS_EXPR,
+ get_derived_offset (binfo),
+ BINFO_OFFSET (binfo));
+ this_offset = size_binop (MINUS_EXPR, offset, base_offset);
+
+ /* Make sure we can modify the derived association with immunity. */
+ if (TREE_USED (binfo)) {
+ my_friendly_assert (0, 999);
+#if 0
+ my_friendly_assert (*binfo2_ptr == binfo, 999);
+ *binfo2_ptr = copy_binfo (binfo);
+#endif
+ }
+ if (binfo == TYPE_BINFO (t))
+ {
+ /* In this case, it is *type*'s vtable we are modifying.
+ We start with the approximation that it's vtable is that
+ of the immediate base class. */
+ if (! BINFO_NEW_VTABLE_MARKED (binfo))
+ build_vtable (TYPE_BINFO (DECL_CONTEXT (vfield)), t);
+ }
+ else
+ {
+ /* This is our very own copy of `basetype' to play with.
+ Later, we will fill in all the virtual functions
+ that override the virtual functions in these base classes
+ which are not defined by the current type. */
+ if (! BINFO_NEW_VTABLE_MARKED (binfo))
+ prepare_fresh_vtable (binfo, t);
+ }
+
+#ifdef NOTQUITE
+ cp_warning ("in %D", DECL_NAME (BINFO_VTABLE (binfo)));
+#endif
+ modify_vtable_entry (get_vtable_entry_n (BINFO_VIRTUALS (binfo), n),
+ build_vtable_entry (this_offset, pfn),
+ fndecl);
+ }
+ ++n;
+ virtuals = TREE_CHAIN (virtuals);
+ }
+}
+
+/* These are the ones that are not through virtual base classes. */
+static void
+modify_all_direct_vtables (binfo, do_self, t, fndecl, pfn)
+ tree binfo, t, fndecl, pfn;
+ int do_self;
+{
+ tree binfos = BINFO_BASETYPES (binfo);
+ int i, n_baselinks = binfos ? TREE_VEC_LENGTH (binfos) : 0;
+
+ /* Should we use something besides CLASSTYPE_VFIELDS? */
+ if (do_self && CLASSTYPE_VFIELDS (BINFO_TYPE (binfo)))
+ {
+ modify_one_vtable (binfo, t, fndecl, pfn);
+ }
+
+ for (i = 0; i < n_baselinks; i++)
+ {
+ tree base_binfo = TREE_VEC_ELT (binfos, i);
+ int is_not_base_vtable =
+ i != CLASSTYPE_VFIELD_PARENT (BINFO_TYPE (binfo));
+ if (! TREE_VIA_VIRTUAL (base_binfo))
+ modify_all_direct_vtables (base_binfo, is_not_base_vtable, t, fndecl, pfn);
+ }
+}
+
+/* Fixup all the delta entries in this vtable that need updating.
+ This happens when we have non-overridden virtual functions from a
+ virtual base class, that are at a different offset, in the new
+ hierarchy, because the layout of the virtual bases has changed. */
+static void
+fixup_vtable_deltas (binfo, t)
+ tree binfo, t;
+{
+ tree virtuals = BINFO_VIRTUALS (binfo);
+ unsigned HOST_WIDE_INT n;
+
+ n = 0;
+ /* Skip initial vtable length field and RTTI fake object. */
+ for (; virtuals && n < 1 + flag_dossier; n++)
+ virtuals = TREE_CHAIN (virtuals);
+ while (virtuals)
+ {
+ tree fndecl = TREE_VALUE (virtuals);
+ tree pfn = FNADDR_FROM_VTABLE_ENTRY (fndecl);
+ tree delta = DELTA_FROM_VTABLE_ENTRY (fndecl);
+ fndecl = TREE_OPERAND (pfn, 0);
+ if (fndecl)
+ {
+ tree base_offset, offset;
+ tree context = DECL_CLASS_CONTEXT (fndecl);
+ tree vfield = CLASSTYPE_VFIELD (t);
+ tree this_offset;
+
+ offset = integer_zero_node;
+ if (context != t && TYPE_USES_COMPLEX_INHERITANCE (t))
+ {
+ offset = virtual_offset (context, CLASSTYPE_VBASECLASSES (t), offset);
+ if (offset == NULL_TREE)
+ {
+ tree binfo = get_binfo (context, t, 0);
+ offset = BINFO_OFFSET (binfo);
+ }
+ }
+
+ /* Find the right offset for the this pointer based on the
+ base class we just found. We have to take into
+ consideration the virtual base class pointers that we
+ stick in before the virtual function table pointer.
+
+ Also, we want just the delta bewteen the most base class
+ that we derived this vfield from and us. */
+ base_offset = size_binop (PLUS_EXPR,
+ get_derived_offset (binfo),
+ BINFO_OFFSET (binfo));
+ this_offset = size_binop (MINUS_EXPR, offset, base_offset);
+
+ if (! tree_int_cst_equal (this_offset, delta))
+ {
+ /* Make sure we can modify the derived association with immunity. */
+ if (TREE_USED (binfo))
+ my_friendly_assert (0, 999);
+
+ if (binfo == TYPE_BINFO (t))
+ {
+ /* In this case, it is *type*'s vtable we are modifying.
+ We start with the approximation that it's vtable is that
+ of the immediate base class. */
+ if (! BINFO_NEW_VTABLE_MARKED (binfo))
+ build_vtable (TYPE_BINFO (DECL_CONTEXT (vfield)), t);
+ }
+ else
+ {
+ /* This is our very own copy of `basetype' to play with.
+ Later, we will fill in all the virtual functions
+ that override the virtual functions in these base classes
+ which are not defined by the current type. */
+ if (! BINFO_NEW_VTABLE_MARKED (binfo))
+ prepare_fresh_vtable (binfo, t);
+ }
+
+ modify_vtable_entry (get_vtable_entry_n (BINFO_VIRTUALS (binfo), n),
+ build_vtable_entry (this_offset, pfn),
+ fndecl);
+ }
+ }
+ ++n;
+ virtuals = TREE_CHAIN (virtuals);
+ }
+}
+
+/* These are the ones that are through virtual base classes. */
+static void
+modify_all_indirect_vtables (binfo, do_self, via_virtual, t, fndecl, pfn)
+ tree binfo, t, fndecl, pfn;
+ int do_self, via_virtual;
+{
+ tree binfos = BINFO_BASETYPES (binfo);
+ int i, n_baselinks = binfos ? TREE_VEC_LENGTH (binfos) : 0;
+
+ /* Should we use something besides CLASSTYPE_VFIELDS? */
+ if (do_self && via_virtual && CLASSTYPE_VFIELDS (BINFO_TYPE (binfo)))
+ {
+ modify_one_vtable (binfo, t, fndecl, pfn);
+ }
+
+ for (i = 0; i < n_baselinks; i++)
+ {
+ tree base_binfo = TREE_VEC_ELT (binfos, i);
+ int is_not_base_vtable =
+ i != CLASSTYPE_VFIELD_PARENT (BINFO_TYPE (binfo));
+ if (TREE_VIA_VIRTUAL (base_binfo))
+ {
+ via_virtual = 1;
+ base_binfo = binfo_member (BINFO_TYPE (base_binfo), CLASSTYPE_VBASECLASSES (t));
+ }
+ modify_all_indirect_vtables (base_binfo, is_not_base_vtable, via_virtual, t, fndecl, pfn);
+ }
+}
+
+static void
+modify_all_vtables (t, fndecl, vfn)
+ tree t, fndecl, vfn;
+{
+ /* Do these first, so that we will make use of any non-virtual class's
+ vtable, over a virtual classes vtable. */
+ modify_all_direct_vtables (TYPE_BINFO (t), 1, t, fndecl, vfn);
+ if (TYPE_USES_VIRTUAL_BASECLASSES (t))
+ modify_all_indirect_vtables (TYPE_BINFO (t), 1, 0, t, fndecl, vfn);
+}
+
+/* Here, we already know that they match in every respect.
+ All we have to check is where they had their declarations. */
+static int
+strictly_overrides (fndecl1, fndecl2)
+ tree fndecl1, fndecl2;
+{
+ int distance = get_base_distance (DECL_CLASS_CONTEXT (fndecl2),
+ DECL_CLASS_CONTEXT (fndecl1),
+ 0, (tree *)0);
+ if (distance == -2 || distance > 0)
+ return 1;
+ return 0;
+}
+
+/* Merge overrides for one vtable.
+ If we want to merge in same function, we are fine.
+ else
+ if one has a DECL_CLASS_CONTEXT that is a parent of the
+ other, than choose the more derived one
+ else
+ potentially ill-formed (see 10.3 [class.virtual])
+ we have to check later to see if there was an
+ override in this class. If there was ok, if not
+ then it is ill-formed. (mrs)
+
+ We take special care to reuse a vtable, if we can. */
+static void
+override_one_vtable (binfo, old, t)
+ tree binfo, old, t;
+{
+ tree virtuals = BINFO_VIRTUALS (binfo);
+ tree old_virtuals = BINFO_VIRTUALS (old);
+ enum { REUSE_NEW, REUSE_OLD, UNDECIDED, NEITHER } choose = UNDECIDED;
+
+ /* If we have already committed to modifying it, then don't try and
+ reuse another vtable. */
+ if (BINFO_NEW_VTABLE_MARKED (binfo))
+ choose = NEITHER;
+
+ /* Skip size entry. */
+ virtuals = TREE_CHAIN (virtuals);
+ /* Skip RTTI fake object. */
+ if (flag_dossier)
+ {
+ virtuals = TREE_CHAIN (virtuals);
+ }
+
+ /* Skip size entry. */
+ old_virtuals = TREE_CHAIN (old_virtuals);
+ /* Skip RTTI fake object. */
+ if (flag_dossier)
+ {
+ old_virtuals = TREE_CHAIN (old_virtuals);
+ }
+
+ while (virtuals)
+ {
+ tree fndecl = TREE_VALUE (virtuals);
+ tree old_fndecl = TREE_VALUE (old_virtuals);
+ fndecl = FNADDR_FROM_VTABLE_ENTRY (fndecl);
+ old_fndecl = FNADDR_FROM_VTABLE_ENTRY (old_fndecl);
+ fndecl = TREE_OPERAND (fndecl, 0);
+ old_fndecl = TREE_OPERAND (old_fndecl, 0);
+ /* First check to see if they are the same. */
+ if (DECL_ASSEMBLER_NAME (fndecl) == DECL_ASSEMBLER_NAME (old_fndecl))
+ {
+ /* No need to do anything. */
+ }
+ else if (strictly_overrides (fndecl, old_fndecl))
+ {
+ if (choose == UNDECIDED)
+ choose = REUSE_NEW;
+ else if (choose == REUSE_OLD)
+ {
+ choose = NEITHER;
+ if (! BINFO_NEW_VTABLE_MARKED (binfo))
+ {
+ prepare_fresh_vtable (binfo, t);
+ override_one_vtable (binfo, old, t);
+ return;
+ }
+ }
+ }
+ else if (strictly_overrides (old_fndecl, fndecl))
+ {
+ if (choose == UNDECIDED)
+ choose = REUSE_OLD;
+ else if (choose == REUSE_NEW)
+ {
+ choose = NEITHER;
+ if (! BINFO_NEW_VTABLE_MARKED (binfo))
+ {
+ prepare_fresh_vtable (binfo, t);
+ override_one_vtable (binfo, old, t);
+ return;
+ }
+ TREE_VALUE (virtuals) = TREE_VALUE (old_virtuals);
+ }
+ else if (choose == NEITHER)
+ {
+ TREE_VALUE (virtuals) = TREE_VALUE (old_virtuals);
+ }
+ }
+ else
+ {
+ choose = NEITHER;
+ if (! BINFO_NEW_VTABLE_MARKED (binfo))
+ {
+ prepare_fresh_vtable (binfo, t);
+ override_one_vtable (binfo, old, t);
+ return;
+ }
+ {
+ /* This MUST be overriden, or the class is ill-formed. */
+ /* For now, we just make it abstract. */
+ tree fndecl = TREE_OPERAND (FNADDR_FROM_VTABLE_ENTRY (TREE_VALUE (virtuals)), 0);
+ tree vfn;
+
+ fndecl = copy_node (fndecl);
+ copy_lang_decl (fndecl);
+ DECL_ABSTRACT_VIRTUAL_P (fndecl) = 1;
+ /* Make sure we search for it later. */
+ if (! CLASSTYPE_ABSTRACT_VIRTUALS (t))
+ CLASSTYPE_ABSTRACT_VIRTUALS (t) = error_mark_node;
+
+ vfn = build1 (ADDR_EXPR, vfunc_ptr_type_node, fndecl);
+ TREE_CONSTANT (vfn) = 1;
+
+ /* We can use integer_zero_node, as we will will core dump
+ if this is used anyway. */
+ TREE_VALUE (virtuals) = build_vtable_entry (integer_zero_node, vfn);
+ }
+ }
+ virtuals = TREE_CHAIN (virtuals);
+ old_virtuals = TREE_CHAIN (old_virtuals);
+ }
+
+ /* Let's reuse the old vtable. */
+ if (choose == REUSE_OLD)
+ {
+ BINFO_VTABLE (binfo) = BINFO_VTABLE (old);
+ BINFO_VIRTUALS (binfo) = BINFO_VIRTUALS (old);
+ }
+}
+
+/* Merge in overrides for virtual bases.
+ BINFO is the hierarchy we want to modify, and OLD has the potential
+ overrides. */
+static void
+merge_overrides (binfo, old, do_self, t)
+ tree binfo, old, t;
+ int do_self;
+{
+ tree binfos = BINFO_BASETYPES (binfo);
+ tree old_binfos = BINFO_BASETYPES (old);
+ int i, n_baselinks = binfos ? TREE_VEC_LENGTH (binfos) : 0;
+
+ /* Should we use something besides CLASSTYPE_VFIELDS? */
+ if (do_self && CLASSTYPE_VFIELDS (BINFO_TYPE (binfo)))
+ {
+ override_one_vtable (binfo, old, t);
+ }
+
+ for (i = 0; i < n_baselinks; i++)
+ {
+ tree base_binfo = TREE_VEC_ELT (binfos, i);
+ tree old_base_binfo = TREE_VEC_ELT (old_binfos, i);
+ int is_not_base_vtable =
+ i != CLASSTYPE_VFIELD_PARENT (BINFO_TYPE (binfo));
+ if (! TREE_VIA_VIRTUAL (base_binfo))
+ merge_overrides (base_binfo, old_base_binfo, is_not_base_vtable, t);
+ }
+}
+
+/* Create a RECORD_TYPE or UNION_TYPE node for a C struct or union declaration
+ (or C++ class declaration).
+
+ For C++, we must handle the building of derived classes.
+ Also, C++ allows static class members. The way that this is
+ handled is to keep the field name where it is (as the DECL_NAME
+ of the field), and place the overloaded decl in the DECL_FIELD_BITPOS
+ of the field. layout_record and layout_union will know about this.
+
+ More C++ hair: inline functions have text in their
+ DECL_PENDING_INLINE_INFO nodes which must somehow be parsed into
+ meaningful tree structure. After the struct has been laid out, set
+ things up so that this can happen.
+
+ And still more: virtual functions. In the case of single inheritance,
+ when a new virtual function is seen which redefines a virtual function
+ from the base class, the new virtual function is placed into
+ the virtual function table at exactly the same address that
+ it had in the base class. When this is extended to multiple
+ inheritance, the same thing happens, except that multiple virtual
+ function tables must be maintained. The first virtual function
+ table is treated in exactly the same way as in the case of single
+ inheritance. Additional virtual function tables have different
+ DELTAs, which tell how to adjust `this' to point to the right thing.
+
+ LIST_OF_FIELDLISTS is just that. The elements of the list are
+ TREE_LIST elements, whose TREE_PURPOSE field tells what access
+ the list has, and the TREE_VALUE slot gives the actual fields.
+
+ If flag_all_virtual == 1, then we lay all functions into
+ the virtual function table, as though they were declared
+ virtual. Constructors do not lay down in the virtual function table.
+
+ If flag_all_virtual == 2, then we lay all functions into
+ the virtual function table, such that virtual functions
+ occupy a space by themselves, and then all functions
+ of the class occupy a space by themselves. This is illustrated
+ in the following diagram:
+
+ class A; class B : A;
+
+ Class A's vtbl: Class B's vtbl:
+ --------------------------------------------------------------------
+ | A's virtual functions| | B's virtual functions |
+ | | | (may inherit some from A). |
+ --------------------------------------------------------------------
+ | All of A's functions | | All of A's functions |
+ | (such as a->A::f). | | (such as b->A::f) |
+ --------------------------------------------------------------------
+ | B's new virtual functions |
+ | (not defined in A.) |
+ -------------------------------
+ | All of B's functions |
+ | (such as b->B::f) |
+ -------------------------------
+
+ this allows the program to make references to any function, virtual
+ or otherwise in a type-consistent manner. */
+
+tree
+finish_struct (t, list_of_fieldlists, warn_anon)
+ tree t;
+ tree list_of_fieldlists;
+ int warn_anon;
+{
+ extern int interface_only, interface_unknown;
+
+ int old;
+ int round_up_size = 1;
+
+ enum tree_code code = TREE_CODE (t);
+ register tree x, last_x, method_vec;
+ int needs_virtual_dtor;
+ tree name = TYPE_NAME (t), fields, fn_fields, *tail;
+ tree *tail_user_methods = &CLASSTYPE_METHODS (t);
+ enum access_type access;
+ int all_virtual;
+ int has_virtual;
+ int max_has_virtual;
+ tree pending_virtuals = NULL_TREE;
+ tree abstract_virtuals = NULL_TREE;
+ tree vfield;
+ tree vfields;
+ int cant_have_default_ctor;
+ int cant_have_const_ctor;
+ int cant_synth_copy_ctor;
+ int cant_synth_asn_ref;
+ int no_const_asn_ref;
+
+ /* The index of the first base class which has virtual
+ functions. Only applied to non-virtual baseclasses. */
+ int first_vfn_base_index;
+
+ int n_baseclasses;
+ int any_default_members = 0;
+ int const_sans_init = 0;
+ int ref_sans_init = 0;
+ int nonprivate_method = 0;
+ tree t_binfo = TYPE_BINFO (t);
+ tree access_decls = NULL_TREE;
+
+ if (TREE_CODE (name) == TYPE_DECL)
+ {
+#if 0 /* Maybe later. -jason */
+ struct tinst_level *til = tinst_for_decl();
+
+ if (til)
+ {
+ DECL_SOURCE_FILE (name) = til->file;
+ if (DECL_SOURCE_LINE (name))
+ DECL_SOURCE_LINE (name) = til->line;
+ }
+ else
+#endif
+ {
+ extern int lineno;
+
+ DECL_SOURCE_FILE (name) = input_filename;
+ /* For TYPE_DECL that are not typedefs (those marked with a line
+ number of zero, we don't want to mark them as real typedefs.
+ If this fails one needs to make sure real typedefs have a
+ previous line number, even if it is wrong, that way the below
+ will fill in the right line number. (mrs) */
+ if (DECL_SOURCE_LINE (name))
+ DECL_SOURCE_LINE (name) = lineno;
+ }
+ name = DECL_NAME (name);
+ }
+
+ if (warn_anon && code != UNION_TYPE && ANON_AGGRNAME_P (name))
+ pedwarn ("anonymous class type not used to declare any objects");
+
+ if (TYPE_SIZE (t))
+ {
+ if (IS_AGGR_TYPE (t))
+ cp_error ("redefinition of `%#T'", t);
+ else
+ my_friendly_abort (172);
+ popclass (0);
+ return t;
+ }
+
+ /* Append the fields we need for constructing signature tables. */
+ if (IS_SIGNATURE (t))
+ append_signature_fields (list_of_fieldlists);
+
+ GNU_xref_decl (current_function_decl, t);
+
+ /* If this type was previously laid out as a forward reference,
+ make sure we lay it out again. */
+
+ TYPE_SIZE (t) = NULL_TREE;
+ CLASSTYPE_GOT_SEMICOLON (t) = 0;
+
+ /* A signature type will contain the fields of the signature table.
+ Therefore, it's not only an interface. */
+ if (IS_SIGNATURE (t))
+ {
+ CLASSTYPE_INTERFACE_ONLY (t) = 0;
+ SET_CLASSTYPE_INTERFACE_KNOWN (t);
+ }
+ else
+ {
+ CLASSTYPE_INTERFACE_ONLY (t) = interface_only;
+ SET_CLASSTYPE_INTERFACE_UNKNOWN_X (t, interface_unknown);
+ }
+
+ if (flag_dossier)
+ build_t_desc (t, 0);
+
+ TYPE_BINFO (t) = NULL_TREE;
+
+ old = suspend_momentary ();
+
+ /* Install struct as DECL_FIELD_CONTEXT of each field decl.
+ Also process specified field sizes.
+ Set DECL_FIELD_SIZE to the specified size, or 0 if none specified.
+ The specified size is found in the DECL_INITIAL.
+ Store 0 there, except for ": 0" fields (so we can find them
+ and delete them, below). */
+
+ if (t_binfo && BINFO_BASETYPES (t_binfo))
+ n_baseclasses = TREE_VEC_LENGTH (BINFO_BASETYPES (t_binfo));
+ else
+ n_baseclasses = 0;
+
+ if (n_baseclasses > 0)
+ {
+ struct base_info base_info;
+
+ /* If using multiple inheritance, this may cause variants of our
+ basetypes to be used (instead of their canonical forms). */
+ fields = layout_basetypes (t, BINFO_BASETYPES (t_binfo));
+ last_x = tree_last (fields);
+
+ first_vfn_base_index = finish_base_struct (t, &base_info, t_binfo);
+ /* Remember where we got our vfield from */
+ CLASSTYPE_VFIELD_PARENT (t) = first_vfn_base_index;
+ has_virtual = base_info.has_virtual;
+ max_has_virtual = base_info.max_has_virtual;
+ CLASSTYPE_N_SUPERCLASSES (t) += base_info.n_ancestors;
+ vfield = base_info.vfield;
+ vfields = base_info.vfields;
+ cant_have_default_ctor = base_info.cant_have_default_ctor;
+ cant_have_const_ctor = base_info.cant_have_const_ctor;
+ cant_synth_copy_ctor = base_info.cant_synth_copy_ctor;
+ cant_synth_asn_ref = base_info.cant_synth_asn_ref;
+ no_const_asn_ref = base_info.no_const_asn_ref;
+ needs_virtual_dtor = base_info.needs_virtual_dtor;
+ n_baseclasses = TREE_VEC_LENGTH (BINFO_BASETYPES (t_binfo));
+ }
+ else
+ {
+ first_vfn_base_index = -1;
+ has_virtual = 0;
+ max_has_virtual = has_virtual;
+ vfield = NULL_TREE;
+ vfields = NULL_TREE;
+ fields = NULL_TREE;
+ last_x = NULL_TREE;
+ cant_have_default_ctor = 0;
+ cant_have_const_ctor = 0;
+ cant_synth_copy_ctor = 0;
+ cant_synth_asn_ref = 0;
+ no_const_asn_ref = 0;
+ needs_virtual_dtor = 0;
+ }
+
+ if (write_virtuals == 3 && CLASSTYPE_INTERFACE_KNOWN (t)
+ && ! IS_SIGNATURE (t))
+ {
+ CLASSTYPE_INTERFACE_ONLY (t) = interface_only;
+ CLASSTYPE_VTABLE_NEEDS_WRITING (t) = ! interface_only;
+ }
+ else if (IS_SIGNATURE (t))
+ CLASSTYPE_VTABLE_NEEDS_WRITING (t) = 0;
+
+ /* The three of these are approximations which may later be
+ modified. Needed at this point to make add_virtual_function
+ and modify_vtable_entries work. */
+ TREE_CHAIN (t_binfo) = TYPE_BINFO (t);
+ TYPE_BINFO (t) = t_binfo;
+ CLASSTYPE_VFIELDS (t) = vfields;
+ CLASSTYPE_VFIELD (t) = vfield;
+
+ tail = &fn_fields;
+ if (last_x && list_of_fieldlists)
+ TREE_CHAIN (last_x) = TREE_VALUE (list_of_fieldlists);
+
+ if (IS_SIGNATURE (t))
+ all_virtual = 0;
+ else if (flag_all_virtual == 1 && TYPE_OVERLOADS_METHOD_CALL_EXPR (t))
+ all_virtual = 1;
+ else
+ all_virtual = 0;
+
+ /* For signatures, we made all methods `public' in the parser and
+ reported an error if a access specifier was used. */
+ if (CLASSTYPE_DECLARED_CLASS (t) == 0)
+ {
+ nonprivate_method = 1;
+ if (list_of_fieldlists
+ && TREE_PURPOSE (list_of_fieldlists) == (tree)access_default)
+ TREE_PURPOSE (list_of_fieldlists) = (tree)access_public;
+ }
+ else if (list_of_fieldlists
+ && TREE_PURPOSE (list_of_fieldlists) == (tree)access_default)
+ TREE_PURPOSE (list_of_fieldlists) = (tree)access_private;
+
+ while (list_of_fieldlists)
+ {
+ access = (enum access_type)TREE_PURPOSE (list_of_fieldlists);
+
+ for (x = TREE_VALUE (list_of_fieldlists); x; x = TREE_CHAIN (x))
+ {
+ TREE_PRIVATE (x) = access == access_private;
+ TREE_PROTECTED (x) = access == access_protected;
+ GNU_xref_member (current_class_name, x);
+
+ if (TREE_CODE (x) == TYPE_DECL)
+ {
+ /* Make sure we set this up. In find_scoped_type, it explicitly
+ looks for a TYPE_DECL in the TYPE_FIELDS list. If we don't
+ do this here, we'll miss including this TYPE_DECL in the
+ list. */
+ if (! fields)
+ fields = x;
+ last_x = x;
+ continue;
+ }
+
+ /* Check for inconsistent use of this name in the class body.
+ Enums, types and static vars have already been checked. */
+ if (TREE_CODE (x) != CONST_DECL && TREE_CODE (x) != VAR_DECL)
+ {
+ tree name = DECL_NAME (x);
+ tree icv;
+
+ /* Don't get confused by access decls. */
+ if (name && TREE_CODE (name) == IDENTIFIER_NODE)
+ icv = IDENTIFIER_CLASS_VALUE (name);
+ else
+ icv = NULL_TREE;
+
+ if (icv
+ /* Don't complain about constructors. */
+ && name != constructor_name (current_class_type)
+ /* Or inherited names. */
+ && id_in_current_class (name)
+ /* Or shadowed tags. */
+ && !(TREE_CODE (icv) == TYPE_DECL
+ && DECL_CONTEXT (icv) == t))
+ {
+ cp_error_at ("declaration of identifier `%D' as `%+#D'",
+ name, x);
+ cp_error_at ("conflicts with other use in class as `%#D'",
+ icv);
+ }
+ }
+
+ if (TREE_CODE (x) == FUNCTION_DECL)
+ {
+ nonprivate_method |= ! TREE_PRIVATE (x);
+
+ /* If this was an evil function, don't keep it in class. */
+ if (IDENTIFIER_ERROR_LOCUS (DECL_ASSEMBLER_NAME (x)))
+ continue;
+
+ if (last_x)
+ TREE_CHAIN (last_x) = TREE_CHAIN (x);
+ /* Link x onto end of fn_fields and CLASSTYPE_METHODS. */
+ *tail = x;
+ tail = &TREE_CHAIN (x);
+ *tail_user_methods = x;
+ tail_user_methods = &DECL_NEXT_METHOD (x);
+
+ DECL_CLASS_CONTEXT (x) = t;
+
+ DECL_FIELD_SIZE (x) = 0;
+
+ /* The name of the field is the original field name
+ Save this in auxiliary field for later overloading. */
+ if (DECL_VINDEX (x)
+ || (all_virtual == 1 && ! DECL_CONSTRUCTOR_P (x)))
+ {
+ pending_virtuals = add_virtual_function (pending_virtuals,
+ &has_virtual, x, t);
+ if (DECL_ABSTRACT_VIRTUAL_P (x))
+ abstract_virtuals = tree_cons (NULL_TREE, x, abstract_virtuals);
+ }
+ continue;
+ }
+
+ /* Handle access declarations. */
+ if (DECL_NAME (x) && TREE_CODE (DECL_NAME (x)) == SCOPE_REF)
+ {
+ tree fdecl = TREE_OPERAND (DECL_NAME (x), 1);
+
+ if (last_x)
+ TREE_CHAIN (last_x) = TREE_CHAIN (x);
+ access_decls = tree_cons ((tree) access, fdecl, access_decls);
+ continue;
+ }
+
+ /* If we've gotten this far, it's a data member, possibly static,
+ or an enumerator. */
+
+ DECL_FIELD_CONTEXT (x) = t;
+
+ /* ``A local class cannot have static data members.'' ARM 9.4 */
+ if (current_function_decl && TREE_STATIC (x))
+ cp_error_at ("field `%D' in local class cannot be static", x);
+
+ /* Perform error checking that did not get done in
+ grokdeclarator. */
+ if (TREE_CODE (TREE_TYPE (x)) == FUNCTION_TYPE)
+ {
+ cp_error_at ("field `%D' invalidly declared function type",
+ x);
+ TREE_TYPE (x) = build_pointer_type (TREE_TYPE (x));
+ }
+ else if (TREE_CODE (TREE_TYPE (x)) == METHOD_TYPE)
+ {
+ cp_error_at ("field `%D' invalidly declared method type", x);
+ TREE_TYPE (x) = build_pointer_type (TREE_TYPE (x));
+ }
+ else if (TREE_CODE (TREE_TYPE (x)) == OFFSET_TYPE)
+ {
+ cp_error_at ("field `%D' invalidly declared offset type", x);
+ TREE_TYPE (x) = build_pointer_type (TREE_TYPE (x));
+ }
+
+ if (TREE_TYPE (x) == error_mark_node)
+ continue;
+
+ if (! fields)
+ fields = x;
+ last_x = x;
+
+ DECL_FIELD_SIZE (x) = 0;
+
+ /* When this goes into scope, it will be a non-local reference. */
+ DECL_NONLOCAL (x) = 1;
+
+ if (TREE_CODE (x) == CONST_DECL)
+ continue;
+
+ if (TREE_CODE (x) == VAR_DECL)
+ {
+ if (TREE_CODE (t) == UNION_TYPE)
+ /* Unions cannot have static members. */
+ cp_error_at ("field `%D' declared static in union", x);
+
+ continue;
+ }
+
+ /* Now it can only be a FIELD_DECL. */
+
+ /* If this is of reference type, check if it needs an init.
+ Also do a little ANSI jig if necessary. */
+ if (TREE_CODE (TREE_TYPE (x)) == REFERENCE_TYPE)
+ {
+ if (DECL_INITIAL (x) == NULL_TREE)
+ ref_sans_init = 1;
+
+ /* ARM $12.6.2: [A member initializer list] (or, for an
+ aggregate, initialization by a brace-enclosed list) is the
+ only way to initialize nonstatic const and reference
+ members. */
+ cant_synth_asn_ref = 1;
+ cant_have_default_ctor = 1;
+ TYPE_HAS_COMPLEX_INIT_REF (t) = 1;
+
+ if (! TYPE_HAS_CONSTRUCTOR (t) && extra_warnings)
+ {
+ if (DECL_NAME (x))
+ cp_warning_at ("non-static reference `%#D' in class without a constructor", x);
+ else
+ cp_warning_at ("non-static reference in class without a constructor", x);
+ }
+ }
+
+ /* If any field is const, the structure type is pseudo-const. */
+ if (TREE_READONLY (x))
+ {
+ C_TYPE_FIELDS_READONLY (t) = 1;
+ if (DECL_INITIAL (x) == NULL_TREE)
+ const_sans_init = 1;
+
+ /* ARM $12.6.2: [A member initializer list] (or, for an
+ aggregate, initialization by a brace-enclosed list) is the
+ only way to initialize nonstatic const and reference
+ members. */
+ cant_synth_asn_ref = 1;
+ cant_have_default_ctor = 1;
+ TYPE_HAS_COMPLEX_INIT_REF (t) = 1;
+
+ if (! TYPE_HAS_CONSTRUCTOR (t) && !IS_SIGNATURE (t)
+ && extra_warnings)
+ {
+ if (DECL_NAME (x))
+ cp_warning_at ("non-static const member `%#D' in class without a constructor", x);
+ else
+ cp_warning_at ("non-static const member in class without a constructor", x);
+ }
+ }
+ else
+ {
+ /* A field that is pseudo-const makes the structure
+ likewise. */
+ tree t1 = TREE_TYPE (x);
+ while (TREE_CODE (t1) == ARRAY_TYPE)
+ t1 = TREE_TYPE (t1);
+ if (IS_AGGR_TYPE (t1))
+ {
+ if (C_TYPE_FIELDS_READONLY (t1))
+ C_TYPE_FIELDS_READONLY (t) = 1;
+ if (CLASSTYPE_READONLY_FIELDS_NEED_INIT (t1))
+ const_sans_init = 1;
+ }
+ }
+
+ /* We set DECL_BIT_FIELD tentatively in grokbitfield.
+ If the type and width are valid, we'll keep it set.
+ Otherwise, the flag is cleared. */
+ if (DECL_BIT_FIELD (x))
+ {
+ DECL_BIT_FIELD (x) = 0;
+ /* Invalid bit-field size done by grokfield. */
+ /* Detect invalid bit-field type. */
+ if (DECL_INITIAL (x)
+ && ! INTEGRAL_TYPE_P (TREE_TYPE (x)))
+ {
+ cp_error_at ("bit-field `%#D' with non-integral type", x);
+ DECL_INITIAL (x) = NULL;
+ }
+
+ /* Detect and ignore out of range field width. */
+ if (DECL_INITIAL (x))
+ {
+ register int width = TREE_INT_CST_LOW (DECL_INITIAL (x));
+
+ if (width < 0)
+ {
+ DECL_INITIAL (x) = NULL;
+ cp_error_at ("negative width in bit-field `%D'", x);
+ }
+ else if (width == 0 && DECL_NAME (x) != 0)
+ {
+ DECL_INITIAL (x) = NULL;
+ cp_error_at ("zero width for bit-field `%D'", x);
+ }
+ else if ((unsigned)width > TYPE_PRECISION (TREE_TYPE (x)))
+ {
+ DECL_INITIAL (x) = NULL;
+ cp_error_at ("width of `%D' exceeds its type", x);
+ }
+ }
+
+ /* Process valid field width. */
+ if (DECL_INITIAL (x))
+ {
+ register int width = TREE_INT_CST_LOW (DECL_INITIAL (x));
+
+ if (width == 0)
+ {
+#ifdef EMPTY_FIELD_BOUNDARY
+ /* field size 0 => mark following field as "aligned" */
+ if (TREE_CHAIN (x))
+ DECL_ALIGN (TREE_CHAIN (x))
+ = MAX (DECL_ALIGN (TREE_CHAIN (x)), EMPTY_FIELD_BOUNDARY);
+ /* field of size 0 at the end => round up the size. */
+ else
+ round_up_size = EMPTY_FIELD_BOUNDARY;
+#endif
+#ifdef PCC_BITFIELD_TYPE_MATTERS
+ DECL_ALIGN (x) = MAX (DECL_ALIGN (x),
+ TYPE_ALIGN (TREE_TYPE (x)));
+#endif
+ }
+ else
+ {
+ DECL_INITIAL (x) = NULL_TREE;
+ DECL_FIELD_SIZE (x) = width;
+ DECL_BIT_FIELD (x) = 1;
+ /* Traditionally a bit field is unsigned
+ even if declared signed. */
+ if (flag_traditional
+ && TREE_CODE (TREE_TYPE (x)) == INTEGER_TYPE)
+ TREE_TYPE (x) = unsigned_type_node;
+ }
+ }
+ else
+ /* Non-bit-fields are aligned for their type. */
+ DECL_ALIGN (x) = MAX (DECL_ALIGN (x), TYPE_ALIGN (TREE_TYPE (x)));
+ }
+ else
+ {
+ tree type = TREE_TYPE (x);
+
+ if (TREE_CODE (type) == ARRAY_TYPE)
+ type = TREE_TYPE (type);
+
+ if (TYPE_LANG_SPECIFIC (type) && ! ANON_UNION_P (x)
+ && ! TYPE_PTRMEMFUNC_P (type))
+ {
+ /* Never let anything with uninheritable virtuals
+ make it through without complaint. */
+ if (CLASSTYPE_ABSTRACT_VIRTUALS (type))
+ abstract_virtuals_error (x, type);
+
+ /* Don't let signatures make it through either. */
+ if (IS_SIGNATURE (type))
+ signature_error (x, type);
+
+ if (code == UNION_TYPE)
+ {
+ char *fie = NULL;
+ if (TYPE_NEEDS_CONSTRUCTING (type))
+ fie = "constructor";
+ else if (TYPE_NEEDS_DESTRUCTOR (type))
+ fie = "destructor";
+ else if (TYPE_HAS_REAL_ASSIGNMENT (type))
+ fie = "assignment operator";
+ if (fie)
+ cp_error_at ("member `%#D' with %s not allowed in union", x,
+ fie);
+ }
+ else
+ {
+ TYPE_NEEDS_CONSTRUCTING (t) |= TYPE_NEEDS_CONSTRUCTING (type);
+ TYPE_NEEDS_DESTRUCTOR (t) |= TYPE_NEEDS_DESTRUCTOR (type);
+ TYPE_HAS_COMPLEX_ASSIGN_REF (t) |= TYPE_HAS_COMPLEX_ASSIGN_REF (type);
+ TYPE_HAS_COMPLEX_INIT_REF (t)
+ |= (TYPE_HAS_COMPLEX_INIT_REF (type)
+ || TYPE_NEEDS_CONSTRUCTING (type));
+ }
+
+ if (! TYPE_HAS_INIT_REF (type)
+ || (TYPE_HAS_NONPUBLIC_CTOR (type)
+ && ! is_friend (t, type)))
+ cant_synth_copy_ctor = 1;
+ else if (!TYPE_HAS_CONST_INIT_REF (type))
+ cant_have_const_ctor = 1;
+
+ if (! TYPE_HAS_ASSIGN_REF (type)
+ || (TYPE_HAS_NONPUBLIC_ASSIGN_REF (type)
+ && ! is_friend (t, type)))
+ cant_synth_asn_ref = 1;
+ else if (!TYPE_HAS_CONST_ASSIGN_REF (type))
+ no_const_asn_ref = 1;
+
+ if (TYPE_HAS_CONSTRUCTOR (type)
+ && ! TYPE_HAS_DEFAULT_CONSTRUCTOR (type))
+ {
+ cant_have_default_ctor = 1;
+ if (! TYPE_HAS_CONSTRUCTOR (t))
+ {
+ if (DECL_NAME (x))
+ cp_pedwarn_at ("member `%#D' with only non-default constructor", x);
+ else
+ cp_pedwarn_at ("member with only non-default constructor", x);
+ cp_pedwarn_at ("in class without a constructor",
+ x);
+ }
+ }
+ }
+ if (DECL_INITIAL (x) != NULL_TREE)
+ {
+ /* `build_class_init_list' does not recognize
+ non-FIELD_DECLs. */
+ if (code == UNION_TYPE && any_default_members != 0)
+ cp_error_at ("multiple fields in union `%T' initialized");
+ any_default_members = 1;
+ }
+ }
+ }
+ list_of_fieldlists = TREE_CHAIN (list_of_fieldlists);
+ /* link the tail while we have it! */
+ if (last_x)
+ {
+ TREE_CHAIN (last_x) = NULL_TREE;
+
+ if (list_of_fieldlists
+ && TREE_VALUE (list_of_fieldlists)
+ && TREE_CODE (TREE_VALUE (list_of_fieldlists)) != FUNCTION_DECL)
+ TREE_CHAIN (last_x) = TREE_VALUE (list_of_fieldlists);
+ }
+ }
+
+ /* If this type has any constant members which did not come
+ with their own initialization, mark that fact here. It is
+ not an error here, since such types can be saved either by their
+ constructors, or by fortuitous initialization. */
+ CLASSTYPE_READONLY_FIELDS_NEED_INIT (t) = const_sans_init;
+ CLASSTYPE_REF_FIELDS_NEED_INIT (t) = ref_sans_init;
+ CLASSTYPE_ABSTRACT_VIRTUALS (t) = abstract_virtuals;
+
+ if (TYPE_NEEDS_DESTRUCTOR (t) && !TYPE_HAS_DESTRUCTOR (t)
+ && !IS_SIGNATURE (t))
+ {
+ /* Here we must cons up a destructor on the fly. */
+ tree dtor = cons_up_default_function (t, name, fields,
+ needs_virtual_dtor != 0);
+
+ /* If we couldn't make it work, then pretend we didn't need it. */
+ if (dtor == void_type_node)
+ TYPE_NEEDS_DESTRUCTOR (t) = 0;
+ else
+ {
+ /* Link dtor onto end of fn_fields. */
+ *tail = dtor;
+ tail = &TREE_CHAIN (dtor);
+
+ if (DECL_VINDEX (dtor) == NULL_TREE
+ && ! CLASSTYPE_DECLARED_EXCEPTION (t)
+ && (needs_virtual_dtor
+ || pending_virtuals != NULL_TREE
+ || pending_hard_virtuals != NULL_TREE))
+ DECL_VINDEX (dtor) = error_mark_node;
+ if (DECL_VINDEX (dtor))
+ pending_virtuals = add_virtual_function (pending_virtuals,
+ &has_virtual, dtor, t);
+ nonprivate_method = 1;
+ }
+ }
+
+ *tail = NULL_TREE;
+ *tail_user_methods = NULL_TREE;
+
+ TYPE_NEEDS_DESTRUCTOR (t) |= TYPE_HAS_DESTRUCTOR (t);
+
+ /* Synthesize any needed methods. Note that methods will be synthesized
+ for anonymous unions; grok_x_components undoes that. */
+
+ if (! fn_fields)
+ nonprivate_method = 1;
+
+ TYPE_HAS_COMPLEX_INIT_REF (t)
+ |= (TYPE_HAS_INIT_REF (t) || TYPE_USES_VIRTUAL_BASECLASSES (t)
+ || has_virtual || any_default_members || first_vfn_base_index >= 0);
+ TYPE_NEEDS_CONSTRUCTING (t)
+ |= (TYPE_HAS_CONSTRUCTOR (t) || TYPE_USES_VIRTUAL_BASECLASSES (t)
+ || has_virtual || any_default_members || first_vfn_base_index >= 0);
+
+ /* ARM $12.1: A default constructor will be generated for a class X
+ only if no constructor has been declared for class X. So we
+ check TYPE_HAS_CONSTRUCTOR also, to make sure we don't generate
+ one if they declared a constructor in this class. */
+ if (! TYPE_HAS_CONSTRUCTOR (t) && ! cant_have_default_ctor
+ && ! IS_SIGNATURE (t))
+ {
+ tree default_fn = cons_up_default_function (t, name, fields, 2);
+ TREE_CHAIN (default_fn) = fn_fields;
+ fn_fields = default_fn;
+ }
+
+ /* Create default copy constructor, if needed. */
+ if (! TYPE_HAS_INIT_REF (t) && ! cant_synth_copy_ctor
+ && ! IS_SIGNATURE (t))
+ {
+ /* ARM 12.18: You get either X(X&) or X(const X&), but
+ not both. --Chip */
+ tree default_fn =
+ cons_up_default_function (t, name, fields,
+ cant_have_const_ctor ? 4 : 3);
+ TREE_CHAIN (default_fn) = fn_fields;
+ fn_fields = default_fn;
+ }
+
+ TYPE_HAS_REAL_ASSIGNMENT (t) |= TYPE_HAS_ASSIGNMENT (t);
+ TYPE_HAS_REAL_ASSIGN_REF (t) |= TYPE_HAS_ASSIGN_REF (t);
+ TYPE_HAS_COMPLEX_ASSIGN_REF (t)
+ |= (TYPE_HAS_ASSIGN_REF (t) || TYPE_USES_VIRTUAL_BASECLASSES (t)
+ || has_virtual || first_vfn_base_index >= 0);
+
+ if (! TYPE_HAS_ASSIGN_REF (t) && ! cant_synth_asn_ref
+ && ! IS_SIGNATURE (t))
+ {
+ tree default_fn =
+ cons_up_default_function (t, name, fields,
+ no_const_asn_ref ? 6 : 5);
+ TREE_CHAIN (default_fn) = fn_fields;
+ fn_fields = default_fn;
+ }
+
+ if (fn_fields)
+ {
+ method_vec = finish_struct_methods (t, fn_fields, nonprivate_method);
+
+ if (TYPE_HAS_CONSTRUCTOR (t)
+ && ! CLASSTYPE_DECLARED_EXCEPTION (t)
+ && CLASSTYPE_FRIEND_CLASSES (t) == NULL_TREE
+ && DECL_FRIENDLIST (TYPE_NAME (t)) == NULL_TREE)
+ {
+ int nonprivate_ctor = 0;
+ tree ctor;
+
+ for (ctor = TREE_VEC_ELT (method_vec, 0);
+ ctor;
+ ctor = DECL_CHAIN (ctor))
+ if (! TREE_PRIVATE (ctor))
+ {
+ nonprivate_ctor = 1;
+ break;
+ }
+
+ if (nonprivate_ctor == 0 && warn_ctor_dtor_privacy)
+ cp_warning ("`%#T' only defines private constructors and has no friends",
+ t);
+ }
+ }
+ else
+ {
+ method_vec = 0;
+
+ /* Just in case these got accidentally
+ filled in by syntax errors. */
+ TYPE_HAS_CONSTRUCTOR (t) = 0;
+ TYPE_HAS_DESTRUCTOR (t) = 0;
+ }
+
+ {
+ int n_methods = method_vec ? TREE_VEC_LENGTH (method_vec) : 0;
+
+ for (access_decls = nreverse (access_decls); access_decls;
+ access_decls = TREE_CHAIN (access_decls))
+ {
+ tree fdecl = TREE_VALUE (access_decls);
+ tree flist = NULL_TREE;
+ tree name;
+ enum access_type access = (enum access_type)TREE_PURPOSE(access_decls);
+ int i = 0;
+ tree tmp;
+
+ if (TREE_CODE (fdecl) == TREE_LIST)
+ {
+ flist = fdecl;
+ fdecl = TREE_VALUE (flist);
+ }
+
+ name = DECL_NAME (fdecl);
+
+ for (; i < n_methods; i++)
+ if (DECL_NAME (TREE_VEC_ELT (method_vec, i)) == name)
+ {
+ cp_error ("cannot adjust access to `%#D' in `%#T'", fdecl, t);
+ cp_error_at (" because of local method `%#D' with same name",
+ TREE_VEC_ELT (method_vec, i));
+ fdecl = NULL_TREE;
+ break;
+ }
+
+ if (! fdecl)
+ continue;
+
+ for (tmp = fields; tmp; tmp = TREE_CHAIN (tmp))
+ if (DECL_NAME (tmp) == name)
+ {
+ cp_error ("cannot adjust access to `%#D' in `%#T'", fdecl, t);
+ cp_error_at (" because of local field `%#D' with same name", tmp);
+ fdecl = NULL_TREE;
+ break;
+ }
+
+ if (!fdecl)
+ continue;
+
+ /* Make type T see field decl FDECL with access ACCESS.*/
+ if (flist)
+ {
+ fdecl = TREE_VALUE (flist);
+ while (fdecl)
+ {
+ if (alter_access (t, fdecl, access) == 0)
+ break;
+ fdecl = DECL_CHAIN (fdecl);
+ }
+ }
+ else
+ alter_access (t, fdecl, access);
+ }
+
+ }
+
+ if (vfield == NULL_TREE && has_virtual)
+ {
+ /* We build this decl with ptr_type_node, and
+ change the type when we know what it should be. */
+ vfield = build_lang_field_decl (FIELD_DECL, get_vfield_name (t),
+ ptr_type_node);
+ /* If you change any of the below, take a look at all the
+ other VFIELD_BASEs and VTABLE_BASEs in the code, and change
+ them too. */
+ DECL_ASSEMBLER_NAME (vfield) = get_identifier (VFIELD_BASE);
+ CLASSTYPE_VFIELD (t) = vfield;
+ DECL_VIRTUAL_P (vfield) = 1;
+ DECL_FIELD_CONTEXT (vfield) = t;
+ DECL_CLASS_CONTEXT (vfield) = t;
+ DECL_FCONTEXT (vfield) = t;
+ DECL_FIELD_SIZE (vfield) = 0;
+ DECL_ALIGN (vfield) = TYPE_ALIGN (ptr_type_node);
+ if (CLASSTYPE_DOSSIER (t))
+ {
+ /* vfield is always first entry in structure. */
+ TREE_CHAIN (vfield) = fields;
+ fields = vfield;
+ }
+ else if (last_x)
+ {
+ my_friendly_assert (TREE_CHAIN (last_x) == NULL_TREE, 175);
+ TREE_CHAIN (last_x) = vfield;
+ last_x = vfield;
+ }
+ else
+ fields = vfield;
+ vfields = chainon (vfields, CLASSTYPE_AS_LIST (t));
+ }
+
+ /* Now DECL_INITIAL is null on all members except for zero-width bit-fields.
+ And they have already done their work.
+
+ C++: maybe we will support default field initialization some day... */
+
+ /* Delete all zero-width bit-fields from the front of the fieldlist */
+ while (fields && DECL_BIT_FIELD (fields)
+ && DECL_INITIAL (fields))
+ fields = TREE_CHAIN (fields);
+ /* Delete all such fields from the rest of the fields. */
+ for (x = fields; x;)
+ {
+ if (TREE_CHAIN (x) && DECL_BIT_FIELD (TREE_CHAIN (x))
+ && DECL_INITIAL (TREE_CHAIN (x)))
+ TREE_CHAIN (x) = TREE_CHAIN (TREE_CHAIN (x));
+ else
+ x = TREE_CHAIN (x);
+ }
+ /* Delete all duplicate fields from the fields */
+ delete_duplicate_fields (fields);
+
+ /* Now we have the final fieldlist for the data fields. Record it,
+ then lay out the structure or union (including the fields). */
+
+ TYPE_FIELDS (t) = fields;
+
+ /* If there's a :0 field at the end, round the size to the
+ EMPTY_FIELD_BOUNDARY. */
+ TYPE_ALIGN (t) = round_up_size;
+
+ /* Pass layout information about base classes to layout_type, if any. */
+
+ {
+ tree field;
+ for (field = TYPE_FIELDS (t); field; field = TREE_CHAIN (field))
+ {
+ if (TREE_STATIC (field))
+ continue;
+ if (TREE_CODE (field) != FIELD_DECL)
+ continue;
+
+ /* If this field is an anonymous union,
+ give each union-member the same position as the union has.
+
+ ??? This is a real kludge because it makes the structure
+ of the types look strange. This feature is only used by
+ C++, which should have build_component_ref build two
+ COMPONENT_REF operations, one for the union and one for
+ the inner field. We set the offset of this field to zero
+ so that either the old or the correct method will work.
+ Setting DECL_FIELD_CONTEXT is wrong unless the inner fields are
+ moved into the type of this field, but nothing seems to break
+ by doing this. */
+
+ if (DECL_NAME (field) == NULL_TREE
+ && TREE_CODE (TREE_TYPE (field)) == UNION_TYPE)
+ {
+ tree uelt = TYPE_FIELDS (TREE_TYPE (field));
+ for (; uelt; uelt = TREE_CHAIN (uelt))
+ {
+ DECL_FIELD_CONTEXT (uelt) = DECL_FIELD_CONTEXT (field);
+ DECL_FIELD_BITPOS (uelt) = DECL_FIELD_BITPOS (field);
+ }
+
+ DECL_FIELD_BITPOS (field) = integer_zero_node;
+ }
+ }
+ }
+
+ if (n_baseclasses)
+ {
+ tree pseudo_basetype = TREE_TYPE (base_layout_decl);
+
+ TREE_CHAIN (base_layout_decl) = TYPE_FIELDS (t);
+ TYPE_FIELDS (t) = base_layout_decl;
+
+ TYPE_SIZE (pseudo_basetype) = CLASSTYPE_SIZE (t);
+ TYPE_MODE (pseudo_basetype) = TYPE_MODE (t);
+ TYPE_ALIGN (pseudo_basetype) = CLASSTYPE_ALIGN (t);
+ DECL_ALIGN (base_layout_decl) = TYPE_ALIGN (pseudo_basetype);
+ /* Don't re-use old size. */
+ DECL_SIZE (base_layout_decl) = NULL_TREE;
+ }
+
+ layout_type (t);
+
+ {
+ tree field;
+ for (field = TYPE_FIELDS (t); field; field = TREE_CHAIN (field))
+ {
+ if (TREE_STATIC (field))
+ continue;
+ if (TREE_CODE (field) != FIELD_DECL)
+ continue;
+
+ /* If this field is an anonymous union,
+ give each union-member the same position as the union has.
+
+ ??? This is a real kludge because it makes the structure
+ of the types look strange. This feature is only used by
+ C++, which should have build_component_ref build two
+ COMPONENT_REF operations, one for the union and one for
+ the inner field. We set the offset of this field to zero
+ so that either the old or the correct method will work.
+ Setting DECL_FIELD_CONTEXT is wrong unless the inner fields are
+ moved into the type of this field, but nothing seems to break
+ by doing this. */
+
+ if (DECL_NAME (field) == NULL_TREE
+ && TREE_CODE (TREE_TYPE (field)) == UNION_TYPE)
+ {
+ tree uelt = TYPE_FIELDS (TREE_TYPE (field));
+ for (; uelt; uelt = TREE_CHAIN (uelt))
+ {
+ DECL_FIELD_CONTEXT (uelt) = DECL_FIELD_CONTEXT (field);
+ DECL_FIELD_BITPOS (uelt) = DECL_FIELD_BITPOS (field);
+ }
+
+ DECL_FIELD_BITPOS (field) = integer_zero_node;
+ }
+ }
+ }
+
+ if (n_baseclasses)
+ TYPE_FIELDS (t) = TREE_CHAIN (TYPE_FIELDS (t));
+
+ /* C++: do not let empty structures exist. */
+ if (integer_zerop (TYPE_SIZE (t)))
+ TYPE_SIZE (t) = TYPE_SIZE (char_type_node);
+
+ /* Set the TYPE_DECL for this type to contain the right
+ value for DECL_OFFSET, so that we can use it as part
+ of a COMPONENT_REF for multiple inheritance. */
+
+ if (TREE_CODE (TYPE_NAME (t)) == TYPE_DECL)
+ layout_decl (TYPE_NAME (t), 0);
+
+ /* Now fix up any virtual base class types that we left lying
+ around. We must get these done before we try to lay out the
+ virtual function table. */
+ doing_hard_virtuals = 1;
+ pending_hard_virtuals = nreverse (pending_hard_virtuals);
+
+ if (TYPE_USES_VIRTUAL_BASECLASSES (t))
+ {
+ tree vbases;
+
+ max_has_virtual = layout_vbasetypes (t, max_has_virtual);
+ vbases = CLASSTYPE_VBASECLASSES (t);
+ CLASSTYPE_N_VBASECLASSES (t) = list_length (vbases);
+
+ while (vbases)
+ {
+ /* The rtti code should do this. (mrs) */
+ /* Update dossier info with offsets for virtual baseclasses. */
+ if (flag_dossier && ! BINFO_NEW_VTABLE_MARKED (vbases))
+ prepare_fresh_vtable (vbases, t);
+ vbases = TREE_CHAIN (vbases);
+ }
+
+ {
+ /* Now fixup overrides of all functions in vtables from all
+ direct or indirect virtual base classes. */
+ tree binfos = BINFO_BASETYPES (TYPE_BINFO (t));
+ int i, n_baseclasses = binfos ? TREE_VEC_LENGTH (binfos) : 0;
+
+ for (i = 0; i < n_baseclasses; i++)
+ {
+ tree base_binfo = TREE_VEC_ELT (binfos, i);
+ tree basetype = BINFO_TYPE (base_binfo);
+ tree vbases;
+
+ vbases = CLASSTYPE_VBASECLASSES (basetype);
+ while (vbases)
+ {
+ merge_overrides (binfo_member (BINFO_TYPE (vbases),
+ CLASSTYPE_VBASECLASSES (t)),
+ vbases, 1, t);
+ vbases = TREE_CHAIN (vbases);
+ }
+ }
+ }
+
+ /* Now fixup any virtual function entries from virtual bases
+ that have different deltas. */
+ vbases = CLASSTYPE_VBASECLASSES (t);
+ while (vbases)
+ {
+ /* We might be able to shorten the ammount of work we do by
+ only doing this for vtables that come from virtual bases
+ that have differing offsets, but don't want to miss any
+ entries. */
+ fixup_vtable_deltas (vbases, t);
+ vbases = TREE_CHAIN (vbases);
+ }
+ }
+
+ /* Set up the DECL_FIELD_BITPOS of the vfield if we need to, as we
+ might need to know it for setting up the offsets in the vtable
+ (or in thunks) below. */
+ if (vfield != NULL_TREE
+ && DECL_FIELD_CONTEXT (vfield) != t)
+ {
+ tree binfo = get_binfo (DECL_FIELD_CONTEXT (vfield), t, 0);
+ tree offset = BINFO_OFFSET (binfo);
+
+ vfield = copy_node (vfield);
+ copy_lang_decl (vfield);
+
+ if (! integer_zerop (offset))
+ offset = size_binop (MULT_EXPR, offset, size_int (BITS_PER_UNIT));
+ DECL_FIELD_CONTEXT (vfield) = t;
+ DECL_CLASS_CONTEXT (vfield) = t;
+ DECL_FIELD_BITPOS (vfield)
+ = size_binop (PLUS_EXPR, offset, DECL_FIELD_BITPOS (vfield));
+ CLASSTYPE_VFIELD (t) = vfield;
+ }
+
+#ifdef NOTQUITE
+ cp_warning ("Doing hard virtuals for %T...", t);
+#endif
+ while (pending_hard_virtuals)
+ {
+ modify_all_vtables (t,
+ TREE_PURPOSE (pending_hard_virtuals),
+ TREE_VALUE (pending_hard_virtuals));
+ pending_hard_virtuals = TREE_CHAIN (pending_hard_virtuals);
+ }
+ doing_hard_virtuals = 0;
+
+ /* Under our model of GC, every C++ class gets its own virtual
+ function table, at least virtually. */
+ if (pending_virtuals || CLASSTYPE_DOSSIER (t))
+ {
+ pending_virtuals = nreverse (pending_virtuals);
+ /* We must enter these virtuals into the table. */
+ if (first_vfn_base_index < 0)
+ {
+ if (flag_dossier)
+ pending_virtuals = tree_cons (NULL_TREE,
+ build_vtable_entry (integer_zero_node,
+ build_t_desc (t, 0)),
+ pending_virtuals);
+ pending_virtuals = tree_cons (NULL_TREE, the_null_vtable_entry,
+ pending_virtuals);
+ build_vtable (NULL_TREE, t);
+ }
+ else
+ {
+ /* Here we know enough to change the type of our virtual
+ function table, but we will wait until later this function. */
+
+ if (! BINFO_NEW_VTABLE_MARKED (TYPE_BINFO (t)))
+ build_vtable (TREE_VEC_ELT (TYPE_BINFO_BASETYPES (t), first_vfn_base_index), t);
+
+ /* Update the dossier pointer for this class. */
+ if (flag_dossier)
+ TREE_VALUE (TREE_CHAIN (TYPE_BINFO_VIRTUALS (t)))
+ = build_vtable_entry (integer_zero_node, build_t_desc (t, 0));
+ }
+
+ /* If this type has basetypes with constructors, then those
+ constructors might clobber the virtual function table. But
+ they don't if the derived class shares the exact vtable of the base
+ class. */
+
+ CLASSTYPE_NEEDS_VIRTUAL_REINIT (t) = 1;
+ }
+ else if (first_vfn_base_index >= 0)
+ {
+ tree binfo = TREE_VEC_ELT (TYPE_BINFO_BASETYPES (t), first_vfn_base_index);
+ /* This class contributes nothing new to the virtual function
+ table. However, it may have declared functions which
+ went into the virtual function table "inherited" from the
+ base class. If so, we grab a copy of those updated functions,
+ and pretend they are ours. */
+
+ /* See if we should steal the virtual info from base class. */
+ if (TYPE_BINFO_VTABLE (t) == NULL_TREE)
+ TYPE_BINFO_VTABLE (t) = BINFO_VTABLE (binfo);
+ if (TYPE_BINFO_VIRTUALS (t) == NULL_TREE)
+ TYPE_BINFO_VIRTUALS (t) = BINFO_VIRTUALS (binfo);
+ if (TYPE_BINFO_VTABLE (t) != BINFO_VTABLE (binfo))
+ CLASSTYPE_NEEDS_VIRTUAL_REINIT (t) = 1;
+ }
+
+ if (has_virtual > max_has_virtual)
+ max_has_virtual = has_virtual;
+ if (max_has_virtual || first_vfn_base_index >= 0)
+ {
+ TYPE_VIRTUAL_P (t) = 1;
+ CLASSTYPE_VSIZE (t) = has_virtual;
+ if (first_vfn_base_index >= 0)
+ {
+ if (pending_virtuals)
+ TYPE_BINFO_VIRTUALS (t) = chainon (TYPE_BINFO_VIRTUALS (t),
+ pending_virtuals);
+ }
+ else if (has_virtual)
+ {
+ TYPE_BINFO_VIRTUALS (t) = pending_virtuals;
+ if (write_virtuals >= 0)
+ DECL_VIRTUAL_P (TYPE_BINFO_VTABLE (t)) = 1;
+ }
+ }
+
+ /* Now lay out the virtual function table. */
+ if (has_virtual)
+ {
+ tree atype, itype;
+
+ if (TREE_TYPE (vfield) == ptr_type_node)
+ {
+ /* We must create a pointer to this table because
+ the one inherited from base class does not exist.
+ We will fill in the type when we know what it
+ should really be. Use `size_int' so values are memoized
+ in common cases. */
+ itype = build_index_type (size_int (has_virtual));
+ atype = build_array_type (vtable_entry_type, itype);
+ layout_type (atype);
+ TREE_TYPE (vfield) = build_pointer_type (atype);
+ }
+ else
+ {
+ atype = TREE_TYPE (TREE_TYPE (vfield));
+
+ if (has_virtual != TREE_INT_CST_LOW (TYPE_MAX_VALUE (TYPE_DOMAIN (atype))))
+ {
+ /* We must extend (or create) the boundaries on this array,
+ because we picked up virtual functions from multiple
+ base classes. */
+ itype = build_index_type (size_int (has_virtual));
+ atype = build_array_type (vtable_entry_type, itype);
+ layout_type (atype);
+ vfield = copy_node (vfield);
+ TREE_TYPE (vfield) = build_pointer_type (atype);
+ }
+ }
+
+ CLASSTYPE_VFIELD (t) = vfield;
+ if (TREE_TYPE (TYPE_BINFO_VTABLE (t)) != atype)
+ {
+ TREE_TYPE (TYPE_BINFO_VTABLE (t)) = atype;
+ layout_decl (TYPE_BINFO_VTABLE (t), 0);
+ /* At one time the vtable info was grabbed 2 words at a time. This
+ fails on sparc unless you have 8-byte alignment. (tiemann) */
+ DECL_ALIGN (TYPE_BINFO_VTABLE (t))
+ = MAX (TYPE_ALIGN (double_type_node),
+ DECL_ALIGN (TYPE_BINFO_VTABLE (t)));
+ }
+ }
+ else if (first_vfn_base_index >= 0)
+ CLASSTYPE_VFIELD (t) = vfield;
+ CLASSTYPE_VFIELDS (t) = vfields;
+
+ finish_struct_bits (t, max_has_virtual);
+
+ /* Promote each bit-field's type to int if it is narrower than that.
+ There's more: complete the rtl for any static member objects which
+ is of the same type we're working on. */
+ for (x = fields; x; x = TREE_CHAIN (x))
+ {
+ if (DECL_BIT_FIELD (x)
+ && (C_PROMOTING_INTEGER_TYPE_P (TREE_TYPE (x))
+ || DECL_FIELD_SIZE (x) < TYPE_PRECISION (integer_type_node)))
+ {
+ tree type = TREE_TYPE (x);
+
+ /* Preserve unsignedness if traditional or if not really getting
+ any wider. */
+ if (TREE_UNSIGNED (type)
+ && (flag_traditional
+ ||
+ (TYPE_PRECISION (type) == TYPE_PRECISION (integer_type_node)
+ && DECL_FIELD_SIZE (x) == TYPE_PRECISION (integer_type_node))))
+ TREE_TYPE (x) = unsigned_type_node;
+ else
+ TREE_TYPE (x) = integer_type_node;
+ }
+
+ if (TREE_CODE (x) == VAR_DECL && TREE_STATIC (x)
+ && TREE_TYPE (x) == t)
+ {
+ DECL_MODE (x) = TYPE_MODE (t);
+ make_decl_rtl (x, NULL, 0);
+ }
+ }
+
+ /* Now add the tags, if any, to the list of TYPE_DECLs
+ defined for this type. */
+ if (CLASSTYPE_TAGS (t))
+ {
+ x = CLASSTYPE_TAGS (t);
+ last_x = tree_last (TYPE_FIELDS (t));
+ while (x)
+ {
+#if 0 /* What's wrong with using the decl the type already has? */
+ tree tag = build_decl (TYPE_DECL, TREE_PURPOSE (x), TREE_VALUE (x));
+ DECL_CONTEXT (tag) = t;
+#else
+ tree tag = TYPE_NAME (TREE_VALUE (x));
+#endif
+
+#ifdef DWARF_DEBUGGING_INFO
+ if (write_symbols == DWARF_DEBUG)
+ {
+ /* Notify dwarfout.c that this TYPE_DECL node represent a
+ gratuitous typedef. */
+ DECL_IGNORED_P (tag) = 1;
+ }
+#endif /* DWARF_DEBUGGING_INFO */
+
+ TREE_NONLOCAL_FLAG (TREE_VALUE (x)) = 0;
+ x = TREE_CHAIN (x);
+ last_x = chainon (last_x, tag);
+ }
+ if (TYPE_FIELDS (t) == NULL_TREE)
+ TYPE_FIELDS (t) = last_x;
+ CLASSTYPE_LOCAL_TYPEDECLS (t) = 1;
+ }
+
+ if (TYPE_HAS_CONSTRUCTOR (t))
+ {
+ tree vfields = CLASSTYPE_VFIELDS (t);
+
+ while (vfields)
+ {
+ /* Mark the fact that constructor for T
+ could affect anybody inheriting from T
+ who wants to initialize vtables for VFIELDS's type. */
+ if (VF_DERIVED_VALUE (vfields))
+ TREE_ADDRESSABLE (vfields) = 1;
+ vfields = TREE_CHAIN (vfields);
+ }
+ if (any_default_members != 0)
+ build_class_init_list (t);
+ }
+ else if (TYPE_NEEDS_CONSTRUCTING (t))
+ build_class_init_list (t);
+
+ if (! CLASSTYPE_DECLARED_EXCEPTION (t) && ! IS_SIGNATURE (t))
+ embrace_waiting_friends (t);
+
+ /* Write out inline function definitions. */
+ do_inline_function_hair (t, CLASSTYPE_INLINE_FRIENDS (t));
+ CLASSTYPE_INLINE_FRIENDS (t) = 0;
+
+ if (CLASSTYPE_VSIZE (t) != 0)
+ {
+ if ((flag_this_is_variable & 1) == 0)
+ {
+ tree vtbl_ptr = build_decl (VAR_DECL, get_identifier (VPTR_NAME),
+ TREE_TYPE (vfield));
+ DECL_REGISTER (vtbl_ptr) = 1;
+ CLASSTYPE_VTBL_PTR (t) = vtbl_ptr;
+ }
+#if 0
+ /* This is now done above. */
+ if (DECL_FIELD_CONTEXT (vfield) != t)
+ {
+ tree binfo = get_binfo (DECL_FIELD_CONTEXT (vfield), t, 0);
+ tree offset = BINFO_OFFSET (binfo);
+
+ vfield = copy_node (vfield);
+ copy_lang_decl (vfield);
+
+ if (! integer_zerop (offset))
+ offset = size_binop (MULT_EXPR, offset, size_int (BITS_PER_UNIT));
+ DECL_FIELD_CONTEXT (vfield) = t;
+ DECL_CLASS_CONTEXT (vfield) = t;
+ DECL_FIELD_BITPOS (vfield)
+ = size_binop (PLUS_EXPR, offset, DECL_FIELD_BITPOS (vfield));
+ CLASSTYPE_VFIELD (t) = vfield;
+ }
+#endif
+
+ /* In addition to this one, all the other vfields should be listed. */
+ /* Before that can be done, we have to have FIELD_DECLs for them, and
+ a place to find them. */
+ TYPE_NONCOPIED_PARTS (t) = build_tree_list (default_conversion (TYPE_BINFO_VTABLE (t)), vfield);
+
+ if (warn_nonvdtor && TYPE_HAS_DESTRUCTOR (t)
+ && DECL_VINDEX (TREE_VEC_ELT (method_vec, 0)) == NULL_TREE)
+ cp_warning ("`%#T' has virtual functions but non-virtual destructor",
+ t);
+ }
+
+ /* Make the rtl for any new vtables we have created, and unmark
+ the base types we marked. */
+ finish_vtbls (TYPE_BINFO (t), 1, t);
+ TYPE_BEING_DEFINED (t) = 0;
+
+ if (flag_dossier && CLASSTYPE_VTABLE_NEEDS_WRITING (t))
+ {
+ tree variants;
+ tree tdecl;
+
+ /* Now instantiate its type descriptors. */
+ tdecl = TREE_OPERAND (build_t_desc (t, 1), 0);
+ variants = TYPE_POINTER_TO (t);
+ build_type_variant (variants, 1, 0);
+ while (variants)
+ {
+ build_t_desc (variants, 1);
+ variants = TYPE_NEXT_VARIANT (variants);
+ }
+ variants = build_reference_type (t);
+ build_type_variant (variants, 1, 0);
+ while (variants)
+ {
+ build_t_desc (variants, 1);
+ variants = TYPE_NEXT_VARIANT (variants);
+ }
+ DECL_CONTEXT (tdecl) = t;
+ }
+ /* Still need to instantiate this C struct's type descriptor. */
+ else if (flag_dossier && ! CLASSTYPE_DOSSIER (t))
+ build_t_desc (t, 1);
+
+#if 0
+ if (TYPE_NAME (t) && TYPE_IDENTIFIER (t))
+ undo_template_name_overload (TYPE_IDENTIFIER (t), 1);
+#endif
+ if (current_class_type)
+ popclass (0);
+ else
+ error ("trying to finish struct, but kicked out due to previous parse errors.");
+
+ hack_incomplete_structures (t);
+
+ resume_momentary (old);
+
+ if (flag_cadillac)
+ cadillac_finish_struct (t);
+
+#if 0
+ /* This has to be done after we have sorted out what to do with
+ the enclosing type. */
+ if (write_symbols != DWARF_DEBUG)
+ {
+ /* Be smarter about nested classes here. If a type is nested,
+ only output it if we would output the enclosing type. */
+ if (DECL_CONTEXT (TYPE_NAME (t))
+ && TREE_CODE_CLASS (TREE_CODE (DECL_CONTEXT (TYPE_NAME (t)))) == 't')
+ DECL_IGNORED_P (TYPE_NAME (t)) = TREE_ASM_WRITTEN (TYPE_NAME (t));
+ }
+#endif
+
+ if (write_symbols != DWARF_DEBUG)
+ {
+ /* If the type has methods, we want to think about cutting down
+ the amount of symbol table stuff we output. The value stored in
+ the TYPE_DECL's DECL_IGNORED_P slot is a first approximation.
+ For example, if a member function is seen and we decide to
+ write out that member function, then we can change the value
+ of the DECL_IGNORED_P slot, and the type will be output when
+ that member function's debug info is written out. */
+ if (CLASSTYPE_METHOD_VEC (t))
+ {
+ extern tree pending_vtables;
+
+ /* Don't output full info about any type
+ which does not have its implementation defined here. */
+ if (TYPE_VIRTUAL_P (t) && write_virtuals == 2)
+ TYPE_DECL_SUPPRESS_DEBUG (TYPE_NAME (t))
+ = (value_member (TYPE_IDENTIFIER (t), pending_vtables) == 0);
+ else if (CLASSTYPE_INTERFACE_ONLY (t))
+ TYPE_DECL_SUPPRESS_DEBUG (TYPE_NAME (t)) = 1;
+ else if (CLASSTYPE_INTERFACE_UNKNOWN (t))
+ /* Only a first approximation! */
+ TYPE_DECL_SUPPRESS_DEBUG (TYPE_NAME (t)) = 1;
+ }
+ else if (CLASSTYPE_INTERFACE_ONLY (t))
+ TYPE_DECL_SUPPRESS_DEBUG (TYPE_NAME (t)) = 1;
+ }
+
+ /* Finish debugging output for this type. */
+ rest_of_type_compilation (t, global_bindings_p ());
+
+ return t;
+}
+
+/* Return non-zero if the effective type of INSTANCE is static.
+ Used to determine whether the virtual function table is needed
+ or not.
+
+ *NONNULL is set iff INSTANCE can be known to be nonnull, regardless
+ of our knowledge of its type. */
+int
+resolves_to_fixed_type_p (instance, nonnull)
+ tree instance;
+ int *nonnull;
+{
+ switch (TREE_CODE (instance))
+ {
+ case INDIRECT_REF:
+ /* Check that we are not going through a cast of some sort. */
+ if (TREE_TYPE (instance)
+ == TREE_TYPE (TREE_TYPE (TREE_OPERAND (instance, 0))))
+ instance = TREE_OPERAND (instance, 0);
+ /* fall through... */
+ case CALL_EXPR:
+ /* This is a call to a constructor, hence it's never zero. */
+ if (TREE_HAS_CONSTRUCTOR (instance))
+ {
+ if (nonnull)
+ *nonnull = 1;
+ return 1;
+ }
+ return 0;
+
+ case SAVE_EXPR:
+ /* This is a call to a constructor, hence it's never zero. */
+ if (TREE_HAS_CONSTRUCTOR (instance))
+ {
+ if (nonnull)
+ *nonnull = 1;
+ return 1;
+ }
+ return resolves_to_fixed_type_p (TREE_OPERAND (instance, 0), nonnull);
+
+ case RTL_EXPR:
+ /* This is a call to `new', hence it's never zero. */
+ if (TREE_CALLS_NEW (instance))
+ {
+ if (nonnull)
+ *nonnull = 1;
+ return 1;
+ }
+ return 0;
+
+ case PLUS_EXPR:
+ case MINUS_EXPR:
+ if (TREE_CODE (TREE_OPERAND (instance, 1)) == INTEGER_CST)
+ /* Propagate nonnull. */
+ resolves_to_fixed_type_p (TREE_OPERAND (instance, 0), nonnull);
+ if (TREE_CODE (TREE_OPERAND (instance, 0)) == ADDR_EXPR)
+ return resolves_to_fixed_type_p (TREE_OPERAND (instance, 0), nonnull);
+ return 0;
+
+ case NOP_EXPR:
+ case CONVERT_EXPR:
+ return resolves_to_fixed_type_p (TREE_OPERAND (instance, 0), nonnull);
+
+ case ADDR_EXPR:
+ if (nonnull)
+ *nonnull = 1;
+ return resolves_to_fixed_type_p (TREE_OPERAND (instance, 0), nonnull);
+
+ case COMPONENT_REF:
+ return resolves_to_fixed_type_p (TREE_OPERAND (instance, 1), nonnull);
+
+ case WITH_CLEANUP_EXPR:
+ if (TREE_CODE (TREE_OPERAND (instance, 0)) == ADDR_EXPR)
+ return resolves_to_fixed_type_p (TREE_OPERAND (instance, 0), nonnull);
+ /* fall through... */
+ case VAR_DECL:
+ case FIELD_DECL:
+ if (TREE_CODE (TREE_TYPE (instance)) == ARRAY_TYPE
+ && IS_AGGR_TYPE (TREE_TYPE (TREE_TYPE (instance))))
+ {
+ if (nonnull)
+ *nonnull = 1;
+ return 1;
+ }
+ /* fall through... */
+ case TARGET_EXPR:
+ case PARM_DECL:
+ if (IS_AGGR_TYPE (TREE_TYPE (instance)))
+ {
+ if (nonnull)
+ *nonnull = 1;
+ return 1;
+ }
+ else if (nonnull)
+ {
+ if (instance == current_class_decl
+ && flag_this_is_variable <= 0)
+ {
+ /* Some people still use `this = 0' inside destructors. */
+ *nonnull = ! DESTRUCTOR_NAME_P (DECL_ASSEMBLER_NAME (current_function_decl));
+ /* In a constructor, we know our type. */
+ if (flag_this_is_variable < 0)
+ return 1;
+ }
+ else if (TREE_CODE (TREE_TYPE (instance)) == REFERENCE_TYPE)
+ /* Reference variables should be references to objects. */
+ *nonnull = 1;
+ }
+ return 0;
+
+ default:
+ return 0;
+ }
+}
+
+void
+init_class_processing ()
+{
+ current_class_depth = 0;
+ current_class_stacksize = 10;
+ current_class_base = (tree *)xmalloc(current_class_stacksize * sizeof (tree));
+ current_class_stack = current_class_base;
+
+ current_lang_stacksize = 10;
+ current_lang_base = (tree *)xmalloc(current_lang_stacksize * sizeof (tree));
+ current_lang_stack = current_lang_base;
+
+ /* Keep these values lying around. */
+ the_null_vtable_entry = build_vtable_entry (integer_zero_node, integer_zero_node);
+ base_layout_decl = build_lang_field_decl (FIELD_DECL, NULL_TREE, error_mark_node);
+ TREE_TYPE (base_layout_decl) = make_node (RECORD_TYPE);
+
+ gcc_obstack_init (&class_obstack);
+}
+
+/* Set current scope to NAME. CODE tells us if this is a
+ STRUCT, UNION, or ENUM environment.
+
+ NAME may end up being NULL_TREE if this is an anonymous or
+ late-bound struct (as in "struct { ... } foo;") */
+
+/* Set global variables CURRENT_CLASS_NAME and CURRENT_CLASS_TYPE to
+ appropriate values, found by looking up the type definition of
+ NAME (as a CODE).
+
+ If MODIFY is 1, we set IDENTIFIER_CLASS_VALUE's of names
+ which can be seen locally to the class. They are shadowed by
+ any subsequent local declaration (including parameter names).
+
+ If MODIFY is 2, we set IDENTIFIER_CLASS_VALUE's of names
+ which have static meaning (i.e., static members, static
+ member functions, enum declarations, etc).
+
+ If MODIFY is 3, we set IDENTIFIER_CLASS_VALUE of names
+ which can be seen locally to the class (as in 1), but
+ know that we are doing this for declaration purposes
+ (i.e. friend foo::bar (int)).
+
+ So that we may avoid calls to lookup_name, we cache the _TYPE
+ nodes of local TYPE_DECLs in the TREE_TYPE field of the name.
+
+ For multiple inheritance, we perform a two-pass depth-first search
+ of the type lattice. The first pass performs a pre-order search,
+ marking types after the type has had its fields installed in
+ the appropriate IDENTIFIER_CLASS_VALUE slot. The second pass merely
+ unmarks the marked types. If a field or member function name
+ appears in an ambiguous way, the IDENTIFIER_CLASS_VALUE of
+ that name becomes `error_mark_node'. */
+
+void
+pushclass (type, modify)
+ tree type;
+ int modify;
+{
+ push_memoized_context (type, modify);
+
+ current_class_depth++;
+ *current_class_stack++ = current_class_name;
+ *current_class_stack++ = current_class_type;
+ if (current_class_stack >= current_class_base + current_class_stacksize)
+ {
+ current_class_base =
+ (tree *)xrealloc (current_class_base,
+ sizeof (tree) * (current_class_stacksize + 10));
+ current_class_stack = current_class_base + current_class_stacksize;
+ current_class_stacksize += 10;
+ }
+
+ current_class_name = TYPE_NAME (type);
+ if (TREE_CODE (current_class_name) == TYPE_DECL)
+ current_class_name = DECL_NAME (current_class_name);
+ current_class_type = type;
+
+ if (previous_class_type != NULL_TREE
+ && (type != previous_class_type || TYPE_SIZE (previous_class_type) == NULL_TREE)
+ && current_class_depth == 1)
+ {
+ /* Forcibly remove any old class remnants. */
+ popclass (-1);
+ previous_class_type = NULL_TREE;
+ }
+
+ pushlevel_class ();
+
+ if (modify)
+ {
+ tree tags;
+ tree this_fndecl = current_function_decl;
+
+ if (current_function_decl
+ && DECL_CONTEXT (current_function_decl)
+ && TREE_CODE (DECL_CONTEXT (current_function_decl)) == FUNCTION_DECL)
+ current_function_decl = DECL_CONTEXT (current_function_decl);
+ else
+ current_function_decl = NULL_TREE;
+
+ if (TREE_CODE (type) == UNINSTANTIATED_P_TYPE)
+ declare_uninstantiated_type_level ();
+ else if (type != previous_class_type || current_class_depth > 1)
+ {
+ build_mi_matrix (type);
+ push_class_decls (type);
+ free_mi_matrix ();
+ if (current_class_depth == 1)
+ previous_class_type = type;
+ }
+ else
+ {
+ tree item;
+
+ /* Hooray, our cacheing was successful, let's just install the
+ cached class_shadowed list, and walk through it to get the
+ IDENTIFIER_TYPE_VALUEs correct. */
+ set_class_shadows (previous_class_values);
+ for (item = previous_class_values; item; item = TREE_CHAIN (item))
+ {
+ tree id = TREE_PURPOSE (item);
+ tree decl = IDENTIFIER_CLASS_VALUE (id);
+
+ if (TREE_CODE (decl) == TYPE_DECL)
+ set_identifier_type_value (id, TREE_TYPE (decl));
+ }
+ unuse_fields (type);
+ }
+
+ if (IDENTIFIER_TEMPLATE (TYPE_IDENTIFIER (type)))
+ overload_template_name (current_class_name, 0);
+
+ for (tags = CLASSTYPE_TAGS (type); tags; tags = TREE_CHAIN (tags))
+ {
+ TREE_NONLOCAL_FLAG (TREE_VALUE (tags)) = 1;
+ if (! TREE_PURPOSE (tags))
+ continue;
+ pushtag (TREE_PURPOSE (tags), TREE_VALUE (tags), 0);
+ }
+
+ current_function_decl = this_fndecl;
+ }
+
+ if (flag_cadillac)
+ cadillac_push_class (type);
+}
+
+/* Get out of the current class scope. If we were in a class scope
+ previously, that is the one popped to. The flag MODIFY tells whether
+ the current scope declarations needs to be modified as a result of
+ popping to the previous scope. 0 is used for class definitions. */
+void
+popclass (modify)
+ int modify;
+{
+ if (flag_cadillac)
+ cadillac_pop_class ();
+
+ if (modify < 0)
+ {
+ /* Back this old class out completely. */
+ tree tags = CLASSTYPE_TAGS (previous_class_type);
+ tree t;
+
+ /* This code can be seen as a cache miss. When we've cached a
+ class' scope's bindings and we can't use them, we need to reset
+ them. This is it! */
+ for (t = previous_class_values; t; t = TREE_CHAIN (t))
+ IDENTIFIER_CLASS_VALUE (TREE_PURPOSE (t)) = NULL_TREE;
+ while (tags)
+ {
+ TREE_NONLOCAL_FLAG (TREE_VALUE (tags)) = 0;
+ tags = TREE_CHAIN (tags);
+ }
+ goto ret;
+ }
+
+ if (modify)
+ {
+ /* Just remove from this class what didn't make
+ it into IDENTIFIER_CLASS_VALUE. */
+ tree tags = CLASSTYPE_TAGS (current_class_type);
+
+ while (tags)
+ {
+ TREE_NONLOCAL_FLAG (TREE_VALUE (tags)) = 0;
+ tags = TREE_CHAIN (tags);
+ }
+ if (IDENTIFIER_TEMPLATE (TYPE_IDENTIFIER (current_class_type)))
+ undo_template_name_overload (current_class_name, 0);
+ }
+
+ /* Force clearing of IDENTIFIER_CLASS_VALUEs after a class definition,
+ since not all class decls make it there currently. */
+ poplevel_class (! modify);
+
+ /* Since poplevel_class does the popping of class decls nowadays,
+ this really only frees the obstack used for these decls.
+ That's why it had to be moved down here. */
+ if (modify)
+ pop_class_decls (current_class_type);
+
+ current_class_depth--;
+ current_class_type = *--current_class_stack;
+ current_class_name = *--current_class_stack;
+
+ if (current_class_type)
+ {
+ if (CLASSTYPE_VTBL_PTR (current_class_type))
+ {
+ current_vtable_decl
+ = lookup_name (DECL_NAME (CLASSTYPE_VTBL_PTR (current_class_type)),
+ 0);
+ if (current_vtable_decl)
+ current_vtable_decl = build_indirect_ref (current_vtable_decl,
+ NULL_PTR);
+ }
+ current_class_decl = lookup_name (this_identifier, 0);
+ if (current_class_decl)
+ {
+ if (TREE_CODE (TREE_TYPE (current_class_decl)) == POINTER_TYPE)
+ {
+ tree temp;
+ /* Can't call build_indirect_ref here, because it has special
+ logic to return C_C_D given this argument. */
+ C_C_D = build1 (INDIRECT_REF, current_class_type, current_class_decl);
+ temp = TREE_TYPE (TREE_TYPE (current_class_decl));
+ TREE_READONLY (C_C_D) = TYPE_READONLY (temp);
+ TREE_SIDE_EFFECTS (C_C_D) = TYPE_VOLATILE (temp);
+ TREE_THIS_VOLATILE (C_C_D) = TYPE_VOLATILE (temp);
+ }
+ else
+ C_C_D = current_class_decl;
+ }
+ else
+ C_C_D = NULL_TREE;
+ }
+ else
+ {
+ current_class_decl = NULL_TREE;
+ current_vtable_decl = NULL_TREE;
+ C_C_D = NULL_TREE;
+ }
+
+ pop_memoized_context (modify);
+
+ ret:
+ ;
+}
+
+/* When entering a class scope, all enclosing class scopes' names with
+ static meaning (static variables, static functions, types and enumerators)
+ have to be visible. This recursive function calls pushclass for all
+ enclosing class contexts until global or a local scope is reached.
+ TYPE is the enclosed class and MODIFY is equivalent with the pushclass
+ formal of the same name. */
+
+void
+push_nested_class (type, modify)
+ tree type;
+ int modify;
+{
+ tree context;
+
+ if (type == error_mark_node || ! IS_AGGR_TYPE (type))
+ return;
+
+ context = DECL_CONTEXT (TYPE_NAME (type));
+
+ if (context && TREE_CODE (context) == RECORD_TYPE)
+ push_nested_class (context, 2);
+ pushclass (type, modify);
+}
+
+/* Undoes a push_nested_class call. MODIFY is passed on to popclass. */
+
+void
+pop_nested_class (modify)
+ int modify;
+{
+ tree context = DECL_CONTEXT (TYPE_NAME (current_class_type));
+
+ popclass (modify);
+ if (context && TREE_CODE (context) == RECORD_TYPE)
+ pop_nested_class (modify);
+}
+
+/* Set global variables CURRENT_LANG_NAME to appropriate value
+ so that behavior of name-mangling machinery is correct. */
+
+void
+push_lang_context (name)
+ tree name;
+{
+ *current_lang_stack++ = current_lang_name;
+ if (current_lang_stack >= current_lang_base + current_lang_stacksize)
+ {
+ current_lang_base =
+ (tree *)xrealloc (current_lang_base,
+ sizeof (tree) * (current_lang_stacksize + 10));
+ current_lang_stack = current_lang_base + current_lang_stacksize;
+ current_lang_stacksize += 10;
+ }
+
+ if (name == lang_name_cplusplus)
+ {
+ strict_prototype = strict_prototypes_lang_cplusplus;
+ current_lang_name = name;
+ }
+ else if (name == lang_name_c)
+ {
+ strict_prototype = strict_prototypes_lang_c;
+ current_lang_name = name;
+ }
+ else
+ error ("language string `\"%s\"' not recognized", IDENTIFIER_POINTER (name));
+
+ if (flag_cadillac)
+ cadillac_push_lang (name);
+}
+
+/* Get out of the current language scope. */
+void
+pop_lang_context ()
+{
+ if (flag_cadillac)
+ cadillac_pop_lang ();
+
+ current_lang_name = *--current_lang_stack;
+ if (current_lang_name == lang_name_cplusplus)
+ strict_prototype = strict_prototypes_lang_cplusplus;
+ else if (current_lang_name == lang_name_c)
+ strict_prototype = strict_prototypes_lang_c;
+}
+
+int
+root_lang_context_p ()
+{
+ return current_lang_stack == current_lang_base;
+}
+
+/* Type instantiation routines. */
+
+/* This function will instantiate the type of the expression given
+ in RHS to match the type of LHSTYPE. If LHSTYPE is NULL_TREE,
+ or other errors exist, the TREE_TYPE of RHS will be ERROR_MARK_NODE.
+
+ This function is used in build_modify_expr, convert_arguments,
+ build_c_cast, and compute_conversion_costs. */
+tree
+instantiate_type (lhstype, rhs, complain)
+ tree lhstype, rhs;
+ int complain;
+{
+ if (TREE_CODE (lhstype) == UNKNOWN_TYPE)
+ {
+ if (complain)
+ error ("not enough type information");
+ return error_mark_node;
+ }
+
+ if (TREE_TYPE (rhs) != NULL_TREE && ! (type_unknown_p (rhs)))
+ return rhs;
+
+ /* This should really only be used when attempting to distinguish
+ what sort of a pointer to function we have. For now, any
+ arithmetic operation which is not supported on pointers
+ is rejected as an error. */
+
+ switch (TREE_CODE (rhs))
+ {
+ case TYPE_EXPR:
+ case CONVERT_EXPR:
+ case SAVE_EXPR:
+ case CONSTRUCTOR:
+ case BUFFER_REF:
+ my_friendly_abort (177);
+ return error_mark_node;
+
+ case INDIRECT_REF:
+ case ARRAY_REF:
+ TREE_TYPE (rhs) = lhstype;
+ lhstype = build_pointer_type (lhstype);
+ TREE_OPERAND (rhs, 0)
+ = instantiate_type (lhstype, TREE_OPERAND (rhs, 0), complain);
+ if (TREE_OPERAND (rhs, 0) == error_mark_node)
+ return error_mark_node;
+
+ return rhs;
+
+ case NOP_EXPR:
+ rhs = copy_node (TREE_OPERAND (rhs, 0));
+ TREE_TYPE (rhs) = unknown_type_node;
+ return instantiate_type (lhstype, rhs, complain);
+
+ case COMPONENT_REF:
+ {
+ tree field = TREE_OPERAND (rhs, 1);
+ if (TREE_CODE (field) == TREE_LIST)
+ {
+ tree function = instantiate_type (lhstype, field, complain);
+ if (function == error_mark_node)
+ return error_mark_node;
+ my_friendly_assert (TREE_CODE (function) == FUNCTION_DECL, 185);
+ if (DECL_VINDEX (function))
+ {
+ tree base = TREE_OPERAND (rhs, 0);
+ tree base_ptr = build_unary_op (ADDR_EXPR, base, 0);
+ if (base_ptr == error_mark_node)
+ return error_mark_node;
+ base_ptr = convert_pointer_to (DECL_CONTEXT (function), base_ptr);
+ if (base_ptr == error_mark_node)
+ return error_mark_node;
+ return build_vfn_ref (&base_ptr, base, DECL_VINDEX (function));
+ }
+ return function;
+ }
+
+ my_friendly_assert (TREE_CODE (field) == FIELD_DECL, 178);
+ my_friendly_assert (!(TREE_CODE (TREE_TYPE (field)) == FUNCTION_TYPE
+ || TREE_CODE (TREE_TYPE (field)) == METHOD_TYPE),
+ 179);
+
+ TREE_TYPE (rhs) = lhstype;
+ /* First look for an exact match */
+
+ while (field && TREE_TYPE (field) != lhstype)
+ field = TREE_CHAIN (field);
+ if (field)
+ {
+ TREE_OPERAND (rhs, 1) = field;
+ return rhs;
+ }
+
+ /* No exact match found, look for a compatible function. */
+ field = TREE_OPERAND (rhs, 1);
+ while (field && ! comptypes (lhstype, TREE_TYPE (field), 0))
+ field = TREE_CHAIN (field);
+ if (field)
+ {
+ TREE_OPERAND (rhs, 1) = field;
+ field = TREE_CHAIN (field);
+ while (field && ! comptypes (lhstype, TREE_TYPE (field), 0))
+ field = TREE_CHAIN (field);
+ if (field)
+ {
+ if (complain)
+ error ("ambiguous overload for COMPONENT_REF requested");
+ return error_mark_node;
+ }
+ }
+ else
+ {
+ if (complain)
+ error ("no appropriate overload exists for COMPONENT_REF");
+ return error_mark_node;
+ }
+ return rhs;
+ }
+
+ case TREE_LIST:
+ {
+ tree elem, baselink, name;
+ int globals = overloaded_globals_p (rhs);
+
+#if 0 /* obsolete */
+ /* If there's only one function we know about, return that. */
+ if (globals > 0 && TREE_CHAIN (rhs) == NULL_TREE)
+ return TREE_VALUE (rhs);
+#endif
+
+ /* First look for an exact match. Search either overloaded
+ functions or member functions. May have to undo what
+ `default_conversion' might do to lhstype. */
+
+ if (TREE_CODE (lhstype) == POINTER_TYPE)
+ if (TREE_CODE (TREE_TYPE (lhstype)) == FUNCTION_TYPE
+ || TREE_CODE (TREE_TYPE (lhstype)) == METHOD_TYPE)
+ lhstype = TREE_TYPE (lhstype);
+ else
+ {
+ if (complain)
+ error ("invalid type combination for overload");
+ return error_mark_node;
+ }
+
+ if (TREE_CODE (lhstype) != FUNCTION_TYPE && globals > 0)
+ {
+ if (complain)
+ cp_error ("cannot resolve overloaded function `%D' based on non-function type",
+ TREE_PURPOSE (rhs));
+ return error_mark_node;
+ }
+
+ if (globals > 0)
+ {
+ elem = get_first_fn (rhs);
+ while (elem)
+ if (TREE_TYPE (elem) != lhstype)
+ elem = DECL_CHAIN (elem);
+ else
+ return elem;
+ /* No exact match found, look for a compatible function. */
+ elem = get_first_fn (rhs);
+ while (elem && ! comp_target_types (lhstype, TREE_TYPE (elem), 1))
+ elem = DECL_CHAIN (elem);
+ if (elem)
+ {
+ tree save_elem = elem;
+ elem = DECL_CHAIN (elem);
+ while (elem && ! comp_target_types (lhstype, TREE_TYPE (elem),
+ 0))
+ elem = DECL_CHAIN (elem);
+ if (elem)
+ {
+ if (complain)
+ {
+ cp_error ("cannot resolve overload to target type `%#T'",
+ lhstype);
+ cp_error_at (" ambiguity between `%#D'", save_elem);
+ cp_error_at (" and `%#D', at least", elem);
+ }
+ return error_mark_node;
+ }
+ if (TREE_CODE (save_elem) == TEMPLATE_DECL)
+ {
+ int ntparms = TREE_VEC_LENGTH
+ (DECL_TEMPLATE_PARMS (save_elem));
+ tree *targs = (tree *) alloca (sizeof (tree) * ntparms);
+ int i, dummy;
+ i = type_unification
+ (DECL_TEMPLATE_PARMS (save_elem), targs,
+ TYPE_ARG_TYPES (TREE_TYPE (save_elem)),
+ TYPE_ARG_TYPES (lhstype), &dummy, 0);
+ save_elem = instantiate_template (save_elem, targs);
+ }
+ return save_elem;
+ }
+ if (complain)
+ {
+ cp_error ("cannot resolve overload to target type `%#T'",
+ lhstype);
+ cp_error (" because no suitable overload of function `%D' exists",
+ TREE_PURPOSE (rhs));
+ }
+ return error_mark_node;
+ }
+
+ if (TREE_NONLOCAL_FLAG (rhs))
+ {
+ /* Got to get it as a baselink. */
+ rhs = lookup_fnfields (TYPE_BINFO (current_class_type),
+ TREE_PURPOSE (rhs), 0);
+ }
+ else
+ {
+ my_friendly_assert (TREE_CHAIN (rhs) == NULL_TREE, 181);
+ if (TREE_CODE (TREE_VALUE (rhs)) == TREE_LIST)
+ rhs = TREE_VALUE (rhs);
+ my_friendly_assert (TREE_CODE (TREE_VALUE (rhs)) == FUNCTION_DECL,
+ 182);
+ }
+
+ for (baselink = rhs; baselink;
+ baselink = next_baselink (baselink))
+ {
+ elem = TREE_VALUE (baselink);
+ while (elem)
+ if (comptypes (lhstype, TREE_TYPE (elem), 1))
+ return elem;
+ else
+ elem = TREE_CHAIN (elem);
+ }
+
+ /* No exact match found, look for a compatible method. */
+ for (baselink = rhs; baselink;
+ baselink = next_baselink (baselink))
+ {
+ elem = TREE_VALUE (baselink);
+ while (elem && ! comp_target_types (lhstype, TREE_TYPE (elem), 1))
+ elem = TREE_CHAIN (elem);
+ if (elem)
+ {
+ tree save_elem = elem;
+ elem = TREE_CHAIN (elem);
+ while (elem && ! comp_target_types (lhstype, TREE_TYPE (elem), 0))
+ elem = TREE_CHAIN (elem);
+ if (elem)
+ {
+ if (complain)
+ error ("ambiguous overload for overloaded method requested");
+ return error_mark_node;
+ }
+ return save_elem;
+ }
+ name = DECL_NAME (TREE_VALUE (rhs));
+#if 0
+ if (TREE_CODE (lhstype) == FUNCTION_TYPE && globals < 0)
+ {
+ /* Try to instantiate from non-member functions. */
+ rhs = lookup_name_nonclass (name);
+ if (rhs && TREE_CODE (rhs) == TREE_LIST)
+ {
+ /* This code seems to be missing a `return'. */
+ my_friendly_abort (4);
+ instantiate_type (lhstype, rhs, complain);
+ }
+ }
+#endif
+ }
+ if (complain)
+ error ("no static member functions named `%s'",
+ IDENTIFIER_POINTER (name));
+ return error_mark_node;
+ }
+
+ case CALL_EXPR:
+ /* This is too hard for now. */
+ my_friendly_abort (183);
+ return error_mark_node;
+
+ case PLUS_EXPR:
+ case MINUS_EXPR:
+ case COMPOUND_EXPR:
+ TREE_OPERAND (rhs, 0)
+ = instantiate_type (lhstype, TREE_OPERAND (rhs, 0), complain);
+ if (TREE_OPERAND (rhs, 0) == error_mark_node)
+ return error_mark_node;
+ TREE_OPERAND (rhs, 1)
+ = instantiate_type (lhstype, TREE_OPERAND (rhs, 1), complain);
+ if (TREE_OPERAND (rhs, 1) == error_mark_node)
+ return error_mark_node;
+
+ TREE_TYPE (rhs) = lhstype;
+ return rhs;
+
+ case MULT_EXPR:
+ case TRUNC_DIV_EXPR:
+ case FLOOR_DIV_EXPR:
+ case CEIL_DIV_EXPR:
+ case ROUND_DIV_EXPR:
+ case RDIV_EXPR:
+ case TRUNC_MOD_EXPR:
+ case FLOOR_MOD_EXPR:
+ case CEIL_MOD_EXPR:
+ case ROUND_MOD_EXPR:
+ case FIX_ROUND_EXPR:
+ case FIX_FLOOR_EXPR:
+ case FIX_CEIL_EXPR:
+ case FIX_TRUNC_EXPR:
+ case FLOAT_EXPR:
+ case NEGATE_EXPR:
+ case ABS_EXPR:
+ case MAX_EXPR:
+ case MIN_EXPR:
+ case FFS_EXPR:
+
+ case BIT_AND_EXPR:
+ case BIT_IOR_EXPR:
+ case BIT_XOR_EXPR:
+ case LSHIFT_EXPR:
+ case RSHIFT_EXPR:
+ case LROTATE_EXPR:
+ case RROTATE_EXPR:
+
+ case PREINCREMENT_EXPR:
+ case PREDECREMENT_EXPR:
+ case POSTINCREMENT_EXPR:
+ case POSTDECREMENT_EXPR:
+ if (complain)
+ error ("illegal operation on uninstantiated type");
+ return error_mark_node;
+
+ case TRUTH_AND_EXPR:
+ case TRUTH_OR_EXPR:
+ case TRUTH_XOR_EXPR:
+ case LT_EXPR:
+ case LE_EXPR:
+ case GT_EXPR:
+ case GE_EXPR:
+ case EQ_EXPR:
+ case NE_EXPR:
+ case TRUTH_ANDIF_EXPR:
+ case TRUTH_ORIF_EXPR:
+ case TRUTH_NOT_EXPR:
+ if (complain)
+ error ("not enough type information");
+ return error_mark_node;
+
+ case COND_EXPR:
+ if (type_unknown_p (TREE_OPERAND (rhs, 0)))
+ {
+ if (complain)
+ error ("not enough type information");
+ return error_mark_node;
+ }
+ TREE_OPERAND (rhs, 1)
+ = instantiate_type (lhstype, TREE_OPERAND (rhs, 1), complain);
+ if (TREE_OPERAND (rhs, 1) == error_mark_node)
+ return error_mark_node;
+ TREE_OPERAND (rhs, 2)
+ = instantiate_type (lhstype, TREE_OPERAND (rhs, 2), complain);
+ if (TREE_OPERAND (rhs, 2) == error_mark_node)
+ return error_mark_node;
+
+ TREE_TYPE (rhs) = lhstype;
+ return rhs;
+
+ case MODIFY_EXPR:
+ TREE_OPERAND (rhs, 1)
+ = instantiate_type (lhstype, TREE_OPERAND (rhs, 1), complain);
+ if (TREE_OPERAND (rhs, 1) == error_mark_node)
+ return error_mark_node;
+
+ TREE_TYPE (rhs) = lhstype;
+ return rhs;
+
+ case ADDR_EXPR:
+ if (TYPE_PTRMEMFUNC_P (lhstype))
+ lhstype = TYPE_PTRMEMFUNC_FN_TYPE (lhstype);
+ else if (TREE_CODE (lhstype) != POINTER_TYPE)
+ {
+ if (complain)
+ error ("type for resolving address of overloaded function must be pointer type");
+ return error_mark_node;
+ }
+ TREE_TYPE (rhs) = lhstype;
+ lhstype = TREE_TYPE (lhstype);
+ TREE_OPERAND (rhs, 0)
+ = instantiate_type (lhstype, TREE_OPERAND (rhs, 0), complain);
+ if (TREE_OPERAND (rhs, 0) == error_mark_node)
+ return error_mark_node;
+
+ mark_addressable (TREE_OPERAND (rhs, 0));
+ return rhs;
+
+ case ENTRY_VALUE_EXPR:
+ my_friendly_abort (184);
+ return error_mark_node;
+
+ case ERROR_MARK:
+ return error_mark_node;
+
+ default:
+ my_friendly_abort (185);
+ return error_mark_node;
+ }
+}
+
+/* Return the name of the virtual function pointer field
+ (as an IDENTIFIER_NODE) for the given TYPE. Note that
+ this may have to look back through base types to find the
+ ultimate field name. (For single inheritance, these could
+ all be the same name. Who knows for multiple inheritance). */
+static tree
+get_vfield_name (type)
+ tree type;
+{
+ tree binfo = TYPE_BINFO (type);
+ char *buf;
+
+ while (BINFO_BASETYPES (binfo)
+ && TYPE_VIRTUAL_P (BINFO_TYPE (BINFO_BASETYPE (binfo, 0)))
+ && ! TREE_VIA_VIRTUAL (BINFO_BASETYPE (binfo, 0)))
+ binfo = BINFO_BASETYPE (binfo, 0);
+
+ type = BINFO_TYPE (binfo);
+ buf = (char *)alloca (sizeof (VFIELD_NAME_FORMAT)
+ + TYPE_NAME_LENGTH (type) + 2);
+ sprintf (buf, VFIELD_NAME_FORMAT, TYPE_NAME_STRING (type));
+ return get_identifier (buf);
+}
+
+void
+print_class_statistics ()
+{
+#ifdef GATHER_STATISTICS
+ fprintf (stderr, "convert_harshness = %d\n", n_convert_harshness);
+ fprintf (stderr, "compute_conversion_costs = %d\n", n_compute_conversion_costs);
+ fprintf (stderr, "build_method_call = %d (inner = %d)\n",
+ n_build_method_call, n_inner_fields_searched);
+ if (n_vtables)
+ {
+ fprintf (stderr, "vtables = %d; vtable searches = %d\n",
+ n_vtables, n_vtable_searches);
+ fprintf (stderr, "vtable entries = %d; vtable elems = %d\n",
+ n_vtable_entries, n_vtable_elems);
+ }
+#endif
+}
+
+/* Push an obstack which is sufficiently long-lived to hold such class
+ decls that may be cached in the previous_class_values list. For now, let's
+ use the permanent obstack, later we may create a dedicated obstack just
+ for this purpose. The effect is undone by pop_obstacks. */
+void
+maybe_push_cache_obstack ()
+{
+ push_obstacks_nochange ();
+ if (current_class_depth == 1)
+ current_obstack = &permanent_obstack;
+}
diff --git a/gnu/usr.bin/cc/cc1plus/class.h b/gnu/usr.bin/cc/cc1plus/class.h
new file mode 100644
index 0000000..6f31e15
--- /dev/null
+++ b/gnu/usr.bin/cc/cc1plus/class.h
@@ -0,0 +1,116 @@
+/* Variables and structures for overloading rules.
+ Copyright (C) 1993 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+/* The following structure is used when comparing various alternatives
+ for overloading. The unsigned quantity `strikes.i' is used
+ for fast comparison of two possibilities. This number is an
+ aggregate of four constituents:
+
+ EVIL: if this is non-zero, then the candidate should not be considered
+ ELLIPSIS: if this is non-zero, then some actual argument has been matched
+ against an ellipsis
+ USER: if this is non-zero, then a user-defined type conversion is needed
+ B_OR_D: if this is non-zero, then use a base pointer instead of the
+ type of the pointer we started with.
+ EASY: if this is non-zero, then we have a builtin conversion
+ (such as int to long, int to float, etc) to do.
+
+ If two candidates require user-defined type conversions, and the
+ type conversions are not identical, then an ambiguity error
+ is reported.
+
+ If two candidates agree on user-defined type conversions,
+ and one uses pointers of strictly higher type (derived where
+ another uses base), then that alternative is silently chosen.
+
+ Note that this technique really only works for 255 arguments. Perhaps
+ this is not enough. */
+
+/* These macros and harshness_code are used by the NEW METHOD. */
+#define EVIL_CODE (1<<7)
+#define CONST_CODE (1<<6)
+#define ELLIPSIS_CODE (1<<5)
+#define USER_CODE (1<<4)
+#define STD_CODE (1<<3)
+#define PROMO_CODE (1<<2)
+#define QUAL_CODE (1<<1)
+#define TRIVIAL_CODE (1<<0)
+
+struct harshness_code
+{
+ /* What kind of conversion is involved. */
+ unsigned short code;
+
+ /* The inheritance distance. */
+ short distance;
+
+ /* For a PROMO_CODE, Any special penalties involved in integral conversions.
+ This exists because $4.1 of the ARM states that something like
+ `short unsigned int' should promote to `int', not `unsigned int'.
+ If, for example, it tries to match two fns, f(int) and f(unsigned),
+ f(int) should be a better match than f(unsigned) by this rule. Without
+ this extra metric, they both only appear as "integral promotions", which
+ will lead to an ambiguity.
+ For a TRIVIAL_CODE, This is also used by build_overload_call_real and
+ convert_harshness to keep track of other information we need. */
+ unsigned short int_penalty;
+};
+
+struct candidate
+{
+ struct harshness_code h; /* Used for single-argument conversions. */
+
+ int h_len; /* The length of the harshness vector. */
+
+ tree function; /* A FUNCTION_DECL */
+ tree basetypes; /* The path to function. */
+ tree arg; /* first parm to function. */
+
+ /* Indexed by argument number, encodes evil, user, d_to_b, and easy
+ strikes for that argument. At end of array, we store the index+1
+ of where we started using default parameters, or 0 if there are
+ none. */
+ struct harshness_code *harshness;
+
+ union
+ {
+ tree field; /* If no evil strikes, the FUNCTION_DECL of
+ the function (if a member function). */
+ int bad_arg; /* the index of the first bad argument:
+ 0 if no bad arguments
+ > 0 is first bad argument
+ -1 if extra actual arguments
+ -2 if too few actual arguments.
+ -3 if const/non const method mismatch.
+ -4 if type unification failed.
+ -5 if contravariance violation. */
+ } u;
+};
+int rank_for_overload ();
+
+/* Variables shared between class.c and call.c. */
+
+extern int n_vtables;
+extern int n_vtable_entries;
+extern int n_vtable_searches;
+extern int n_vtable_elems;
+extern int n_convert_harshness;
+extern int n_compute_conversion_costs;
+extern int n_build_method_call;
+extern int n_inner_fields_searched;
diff --git a/gnu/usr.bin/cc/cc1plus/cp-tree.h b/gnu/usr.bin/cc/cc1plus/cp-tree.h
new file mode 100644
index 0000000..80fd832
--- /dev/null
+++ b/gnu/usr.bin/cc/cc1plus/cp-tree.h
@@ -0,0 +1,2373 @@
+/* Definitions for C++ parsing and type checking.
+ Copyright (C) 1987, 1993 Free Software Foundation, Inc.
+ Hacked by Michael Tiemann (tiemann@cygnus.com)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+#ifndef _CP_TREE_H
+#define _CP_TREE_H
+
+/* Borrow everything that is C from c-tree.h,
+ but do so by copy, not by inclusion, since c-tree.h defines
+ lang_identifier. */
+
+#ifndef STDIO_PROTO
+#ifdef BUFSIZ
+#define STDIO_PROTO(ARGS) PROTO(ARGS)
+#else
+#define STDIO_PROTO(ARGS) ()
+#endif
+#endif
+
+/* Language-dependent contents of an identifier. */
+
+struct lang_identifier
+{
+ struct tree_identifier ignore;
+ tree global_value, local_value;
+ tree class_value;
+ tree class_template_info;
+ struct lang_id2 *x;
+};
+
+struct lang_id2
+{
+ tree label_value, implicit_decl;
+ tree type_desc, as_list, error_locus;
+};
+
+/* To identify to the debug emitters if it should pay attention to the
+ flag `-Wtemplate-debugging'. */
+#define HAVE_TEMPLATES 1
+
+/* Macros for access to language-specific slots in an identifier. */
+
+#define IDENTIFIER_GLOBAL_VALUE(NODE) \
+ (((struct lang_identifier *)(NODE))->global_value)
+#define IDENTIFIER_CLASS_VALUE(NODE) \
+ (((struct lang_identifier *)(NODE))->class_value)
+#define IDENTIFIER_LOCAL_VALUE(NODE) \
+ (((struct lang_identifier *)(NODE))->local_value)
+#define IDENTIFIER_TEMPLATE(NODE) \
+ (((struct lang_identifier *)(NODE))->class_template_info)
+
+#define IDENTIFIER_TYPE_VALUE(NODE) (TREE_TYPE (NODE))
+#define SET_IDENTIFIER_TYPE_VALUE(NODE,TYPE) (TREE_TYPE (NODE) = TYPE)
+#define IDENTIFIER_HAS_TYPE_VALUE(NODE) (TREE_TYPE (NODE) ? 1 : 0)
+
+#define LANG_ID_FIELD(NAME,NODE) \
+ (((struct lang_identifier *)(NODE))->x \
+ ? ((struct lang_identifier *)(NODE))->x->NAME : 0)
+#define SET_LANG_ID(NODE,VALUE,NAME) \
+ (((struct lang_identifier *)(NODE))->x == 0 \
+ ? ((struct lang_identifier *)(NODE))->x \
+ = (struct lang_id2 *)perm_calloc (1, sizeof (struct lang_id2)) : 0, \
+ ((struct lang_identifier *)(NODE))->x->NAME = (VALUE))
+
+#define IDENTIFIER_LABEL_VALUE(NODE) LANG_ID_FIELD(label_value, NODE)
+#define SET_IDENTIFIER_LABEL_VALUE(NODE,VALUE) \
+ SET_LANG_ID(NODE, VALUE, label_value)
+
+#define IDENTIFIER_IMPLICIT_DECL(NODE) LANG_ID_FIELD(implicit_decl, NODE)
+#define SET_IDENTIFIER_IMPLICIT_DECL(NODE,VALUE) \
+ SET_LANG_ID(NODE, VALUE, implicit_decl)
+
+#define IDENTIFIER_AS_DESC(NODE) LANG_ID_FIELD(type_desc, NODE)
+#define SET_IDENTIFIER_AS_DESC(NODE,DESC) \
+ SET_LANG_ID(NODE, DESC, type_desc)
+
+#define IDENTIFIER_AS_LIST(NODE) LANG_ID_FIELD(as_list, NODE)
+#define SET_IDENTIFIER_AS_LIST(NODE,LIST) \
+ SET_LANG_ID(NODE, LIST, as_list)
+
+#define IDENTIFIER_ERROR_LOCUS(NODE) LANG_ID_FIELD(error_locus, NODE)
+#define SET_IDENTIFIER_ERROR_LOCUS(NODE,VALUE) \
+ SET_LANG_ID(NODE, VALUE, error_locus)
+
+
+#define IDENTIFIER_VIRTUAL_P(NODE) TREE_LANG_FLAG_1(NODE)
+
+/* Nonzero if this identifier is the prefix for a mangled C++ operator name. */
+#define IDENTIFIER_OPNAME_P(NODE) TREE_LANG_FLAG_2(NODE)
+
+#define IDENTIFIER_TYPENAME_P(NODE) \
+ (! strncmp (IDENTIFIER_POINTER (NODE), \
+ IDENTIFIER_POINTER (ansi_opname[(int) TYPE_EXPR]), \
+ IDENTIFIER_LENGTH (ansi_opname[(int) TYPE_EXPR])))
+
+/* Nonzero means reject anything that ANSI standard C forbids. */
+extern int pedantic;
+
+/* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is read-only. */
+#define C_TYPE_FIELDS_READONLY(type) TYPE_LANG_FLAG_0 (type)
+
+/* If non-zero, a VAR_DECL whose cleanup will cause a throw to the
+ next exception handler. */
+extern tree exception_throw_decl;
+
+extern tree double_type_node, long_double_type_node, float_type_node;
+extern tree char_type_node, unsigned_char_type_node, signed_char_type_node;
+extern tree ptrdiff_type_node;
+
+extern tree short_integer_type_node, short_unsigned_type_node;
+extern tree long_integer_type_node, long_unsigned_type_node;
+extern tree long_long_integer_type_node, long_long_unsigned_type_node;
+extern tree unsigned_type_node;
+extern tree string_type_node, char_array_type_node, int_array_type_node;
+extern tree wchar_array_type_node;
+extern tree wchar_type_node, signed_wchar_type_node, unsigned_wchar_type_node;
+extern tree intQI_type_node, unsigned_intQI_type_node;
+extern tree intHI_type_node, unsigned_intHI_type_node;
+extern tree intSI_type_node, unsigned_intSI_type_node;
+extern tree intDI_type_node, unsigned_intDI_type_node;
+
+extern int current_function_returns_value;
+extern int current_function_returns_null;
+extern tree current_function_return_value;
+
+extern tree ridpointers[];
+extern tree ansi_opname[];
+extern tree ansi_assopname[];
+
+/* Nonzero means `$' can be in an identifier. */
+
+extern int dollars_in_ident;
+
+/* Nonzero means allow type mismatches in conditional expressions;
+ just make their values `void'. */
+
+extern int flag_cond_mismatch;
+
+/* Nonzero means don't recognize the keyword `asm'. */
+
+extern int flag_no_asm;
+
+/* For cross referencing. */
+
+extern int flag_gnu_xref;
+
+/* For environments where you can use GNU binutils (as, ld in particular). */
+
+extern int flag_gnu_binutils;
+
+/* Nonzero means ignore `#ident' directives. */
+
+extern int flag_no_ident;
+
+/* Nonzero means warn about implicit declarations. */
+
+extern int warn_implicit;
+
+/* Nonzero means warn when all ctors or dtors are private, and the class
+ has no friends. */
+
+extern int warn_ctor_dtor_privacy;
+
+/* Nonzero means warn about function definitions that default the return type
+ or that use a null return and have a return-type other than void. */
+
+extern int warn_return_type;
+
+/* Nonzero means give string constants the type `const char *'
+ to get extra warnings from them. These warnings will be too numerous
+ to be useful, except in thoroughly ANSIfied programs. */
+
+extern int warn_write_strings;
+
+/* Nonzero means warn about sizeof(function) or addition/subtraction
+ of function pointers. */
+
+extern int warn_pointer_arith;
+
+/* Nonzero means warn for all old-style non-prototype function decls. */
+
+extern int warn_strict_prototypes;
+
+/* Nonzero means warn about suggesting putting in ()'s. */
+
+extern int warn_parentheses;
+
+/* Nonzero means warn about multiple (redundant) decls for the same single
+ variable or function. */
+
+extern int warn_redundant_decls;
+
+/* Warn if initializer is not completely bracketed. */
+
+extern int warn_missing_braces;
+
+/* Warn about a subscript that has type char. */
+
+extern int warn_char_subscripts;
+
+/* Nonzero means warn about pointer casts that can drop a type qualifier
+ from the pointer target type. */
+
+extern int warn_cast_qual;
+
+/* Warn about traditional constructs whose meanings changed in ANSI C. */
+
+extern int warn_traditional;
+
+/* Warn about *printf or *scanf format/argument anomalies. */
+
+extern int warn_format;
+
+/* Nonzero means warn about non virtual destructors in classes that have
+ virtual functions. */
+
+extern int warn_nonvdtor;
+
+/* Non-zero means warn when a function is declared extern and later inline. */
+extern int warn_extern_inline;
+
+/* Nonzero means do some things the same way PCC does. */
+
+extern int flag_traditional;
+
+/* Nonzero means to treat bitfields as unsigned unless they say `signed'. */
+
+extern int flag_signed_bitfields;
+
+/* 3 means write out only virtuals function tables `defined'
+ in this implementation file.
+ 2 means write out only specific virtual function tables
+ and give them (C) public access.
+ 1 means write out virtual function tables and give them
+ (C) public access.
+ 0 means write out virtual function tables and give them
+ (C) static access (default).
+ -1 means declare virtual function tables extern. */
+
+extern int write_virtuals;
+
+/* True for more efficient but incompatible (not not fully tested)
+ vtable implementation (using thunks).
+ 0 is old behavior; 1 is new behavior. */
+extern int flag_vtable_thunks;
+
+/* INTERFACE_ONLY nonzero means that we are in an "interface"
+ section of the compiler. INTERFACE_UNKNOWN nonzero means
+ we cannot trust the value of INTERFACE_ONLY. If INTERFACE_UNKNOWN
+ is zero and INTERFACE_ONLY is zero, it means that we are responsible
+ for exporting definitions that others might need. */
+extern int interface_only, interface_unknown;
+
+/* Nonzero means we should attempt to elide constructors when possible. */
+
+extern int flag_elide_constructors;
+
+/* Nonzero means handle things in ANSI, instead of GNU fashion. */
+
+extern int flag_ansi;
+
+/* Nonzero means recognize and handle ansi-style exception handling
+ constructs. */
+
+extern int flag_handle_exceptions;
+
+/* Nonzero means recognize and handle signature language constructs. */
+
+extern int flag_handle_signatures;
+
+/* Nonzero means that member functions defined in class scope are
+ inline by default. */
+
+extern int flag_default_inline;
+
+/* Nonzero means emit cadillac protocol. */
+
+extern int flag_cadillac;
+
+/* C++ language-specific tree codes. */
+#define DEFTREECODE(SYM, NAME, TYPE, LENGTH) SYM,
+enum cplus_tree_code {
+ __DUMMY = LAST_AND_UNUSED_TREE_CODE,
+#include "tree.def"
+ LAST_CPLUS_TREE_CODE
+};
+#undef DEFTREECODE
+
+/* Override OFFSET_REFs from the back-end, as we want our very own. */
+/* Allow complex pointer to members to work correctly. */
+#define OFFSET_REF CP_OFFSET_REF
+
+enum languages { lang_c, lang_cplusplus };
+
+/* Macros to make error reporting functions' lives easier. */
+#define TYPE_IDENTIFIER(NODE) (DECL_NAME (TYPE_NAME (NODE)))
+#define TYPE_NAME_STRING(NODE) (IDENTIFIER_POINTER (TYPE_IDENTIFIER (NODE)))
+#define TYPE_NAME_LENGTH(NODE) (IDENTIFIER_LENGTH (TYPE_IDENTIFIER (NODE)))
+
+#define TYPE_ASSEMBLER_NAME_STRING(NODE) (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (TYPE_NAME (NODE))))
+#define TYPE_ASSEMBLER_NAME_LENGTH(NODE) (IDENTIFIER_LENGTH (DECL_ASSEMBLER_NAME (TYPE_NAME (NODE))))
+
+/* The _DECL for this _TYPE. */
+#define TYPE_MAIN_DECL(NODE) (TYPE_NAME (NODE))
+
+#define IS_AGGR_TYPE(t) (TYPE_LANG_FLAG_5 (t))
+#define IS_AGGR_TYPE_CODE(t) (t == RECORD_TYPE || t == UNION_TYPE || t == UNINSTANTIATED_P_TYPE)
+#define IS_AGGR_TYPE_2(TYPE1,TYPE2) \
+ (TREE_CODE (TYPE1) == TREE_CODE (TYPE2) \
+ && IS_AGGR_TYPE (TYPE1)&IS_AGGR_TYPE (TYPE2))
+#define IS_OVERLOAD_TYPE_CODE(t) (IS_AGGR_TYPE_CODE (t) || t == ENUMERAL_TYPE)
+#define IS_OVERLOAD_TYPE(t) (IS_OVERLOAD_TYPE_CODE (TREE_CODE (t)))
+
+/* In a *_TYPE, nonzero means a built-in type. */
+#define TYPE_BUILT_IN(NODE) TYPE_LANG_FLAG_6(NODE)
+
+/* Macros which might want to be replaced by function calls. */
+
+#define DELTA_FROM_VTABLE_ENTRY(ENTRY) \
+ (!flag_vtable_thunks ? \
+ TREE_VALUE (CONSTRUCTOR_ELTS (ENTRY)) \
+ : TREE_CODE (TREE_OPERAND ((ENTRY), 0)) != THUNK_DECL ? integer_zero_node \
+ : build_int_2 (THUNK_DELTA (TREE_OPERAND ((ENTRY), 0)), 0))
+#if 1
+/* Virtual function addresses can be gotten from a virtual function
+ table entry using this macro. */
+#define FNADDR_FROM_VTABLE_ENTRY(ENTRY) \
+ (!flag_vtable_thunks ? \
+ TREE_VALUE (TREE_CHAIN (TREE_CHAIN (CONSTRUCTOR_ELTS (ENTRY)))) \
+ : TREE_CODE (TREE_OPERAND ((ENTRY), 0)) != THUNK_DECL ? (ENTRY) \
+ : DECL_INITIAL (TREE_OPERAND ((ENTRY), 0)))
+#define SET_FNADDR_FROM_VTABLE_ENTRY(ENTRY,VALUE) \
+ (TREE_VALUE (TREE_CHAIN (TREE_CHAIN (CONSTRUCTOR_ELTS (ENTRY)))) = (VALUE))
+#define FUNCTION_ARG_CHAIN(NODE) (TREE_CHAIN (TYPE_ARG_TYPES (TREE_TYPE (NODE))))
+#define PROMOTES_TO_AGGR_TYPE(NODE,CODE) \
+ (((CODE) == TREE_CODE (NODE) \
+ && IS_AGGR_TYPE (TREE_TYPE (NODE))) \
+ || IS_AGGR_TYPE (NODE))
+
+#else
+#define FNADDR_FROM_VTABLE_ENTRY(ENTRY) (fnaddr_from_vtable_entry (ENTRY))
+#define SET_FNADDR_FROM_VTABLE_ENTRY(ENTRY,VALUE) \
+ (set_fnaddr_from_vtable_entry (ENTRY, VALUE))
+/* #define TYPE_NAME_STRING(NODE) (type_name_string (NODE)) */
+#define FUNCTION_ARG_CHAIN(NODE) (function_arg_chain (NODE))
+#define PROMOTES_TO_AGGR_TYPE(NODE,CODE) (promotes_to_aggr_type (NODE, CODE))
+/* #define IS_AGGR_TYPE_2(TYPE1, TYPE2) (is_aggr_type_2 (TYPE1, TYPE2)) */
+#endif
+/* Nonzero iff TYPE is uniquely derived from PARENT. Under MI, PARENT can
+ be an ambiguous base class of TYPE, and this macro will be false. */
+#define UNIQUELY_DERIVED_FROM_P(PARENT, TYPE) (get_base_distance (PARENT, TYPE, 0, (tree *)0) >= 0)
+#define ACCESSIBLY_DERIVED_FROM_P(PARENT, TYPE) (get_base_distance (PARENT, TYPE, -1, (tree *)0) >= 0)
+#define ACCESSIBLY_UNIQUELY_DERIVED_P(PARENT, TYPE) (get_base_distance (PARENT, TYPE, 1, (tree *)0) >= 0)
+#define DERIVED_FROM_P(PARENT, TYPE) (get_base_distance (PARENT, TYPE, 0, (tree *)0) != -1)
+
+enum conversion_type { ptr_conv, constptr_conv, int_conv,
+ real_conv, last_conversion_type };
+
+/* Statistics show that while the GNU C++ compiler may generate
+ thousands of different types during a compilation run, it
+ generates relatively few (tens) of classtypes. Because of this,
+ it is not costly to store a generous amount of information
+ in classtype nodes. This struct must fill out to a multiple of 4 bytes. */
+struct lang_type
+{
+ struct
+ {
+ unsigned has_type_conversion : 1;
+ unsigned has_int_conversion : 1;
+ unsigned has_float_conversion : 1;
+ unsigned has_init_ref : 1;
+ unsigned gets_init_aggr : 1;
+ unsigned has_assignment : 1;
+ unsigned has_default_ctor : 1;
+ unsigned uses_multiple_inheritance : 1;
+
+ unsigned has_nonpublic_ctor : 2;
+ unsigned has_nonpublic_assign_ref : 2;
+ unsigned const_needs_init : 1;
+ unsigned ref_needs_init : 1;
+ unsigned has_const_assign_ref : 1;
+ unsigned vtable_needs_writing : 1;
+
+ unsigned has_assign_ref : 1;
+ unsigned gets_new : 2;
+ unsigned gets_delete : 2;
+ unsigned has_call_overloaded : 1;
+ unsigned has_array_ref_overloaded : 1;
+ unsigned has_arrow_overloaded : 1;
+
+ unsigned local_typedecls : 1;
+ unsigned interface_only : 1;
+ unsigned interface_unknown : 1;
+ unsigned needs_virtual_reinit : 1;
+ unsigned declared_exception : 1;
+ unsigned declared_class : 1;
+ unsigned being_defined : 1;
+ unsigned redefined : 1;
+
+ unsigned no_globalize : 1;
+ unsigned marked : 1;
+ unsigned marked2 : 1;
+ unsigned marked3 : 1;
+ unsigned marked4 : 1;
+ unsigned marked5 : 1;
+ unsigned marked6 : 1;
+
+ unsigned use_template : 2;
+ unsigned debug_requested : 1;
+ unsigned has_method_call_overloaded : 1;
+ unsigned private_attr : 1;
+ unsigned got_semicolon : 1;
+ unsigned ptrmemfunc_flag : 1;
+ unsigned is_signature : 1;
+ unsigned is_signature_pointer : 1;
+
+ unsigned is_signature_reference : 1;
+ unsigned has_default_implementation : 1;
+ unsigned grokking_typedef : 1;
+ unsigned has_opaque_typedecls : 1;
+ unsigned sigtable_has_been_generated : 1;
+ unsigned was_anonymous : 1;
+ unsigned has_real_assignment : 1;
+ unsigned has_real_assign_ref : 1;
+
+ unsigned has_const_init_ref : 1;
+ unsigned has_complex_init_ref : 1;
+ unsigned has_complex_assign_ref : 1;
+ unsigned vec_delete_takes_size : 1;
+ unsigned has_abstract_assign_ref : 1;
+
+ /* The MIPS compiler gets it wrong if this struct also
+ does not fill out to a multiple of 4 bytes. Add a
+ member `dummy' with new bits if you go over the edge. */
+ unsigned dummy : 19;
+
+ unsigned n_vancestors : 16;
+ } type_flags;
+
+ int cid;
+ int n_ancestors;
+ int vsize;
+ int max_depth;
+ int vfield_parent;
+
+ union tree_node *vbinfo[2];
+ union tree_node *baselink_vec;
+ union tree_node *vfields;
+ union tree_node *vbases;
+ union tree_node *vbase_size;
+
+ union tree_node *tags;
+ char *memoized_table_entry;
+
+ char *search_slot;
+
+#ifdef ONLY_INT_FIELDS
+ unsigned int mode : 8;
+#else
+ enum machine_mode mode : 8;
+#endif
+
+ unsigned char size_unit;
+ unsigned char align;
+ unsigned char sep_unit;
+
+ union tree_node *sep;
+ union tree_node *size;
+
+ union tree_node *base_init_list;
+ union tree_node *abstract_virtuals;
+ union tree_node *as_list;
+ union tree_node *id_as_list;
+ union tree_node *binfo_as_list;
+ union tree_node *vtbl_ptr;
+ union tree_node *instance_variable;
+ union tree_node *friend_classes;
+
+ char *mi_matrix;
+ union tree_node *conversions[last_conversion_type];
+
+ union tree_node *dossier;
+
+ union tree_node *methods;
+
+ union tree_node *signature;
+ union tree_node *signature_pointer_to;
+ union tree_node *signature_reference_to;
+};
+
+/* Indicates whether or not (and how) a template was expanded for this class.
+ 0=no information yet/non-template class
+ 1=implicit template instantiation
+ 2=explicit template specialization
+ 3=explicit template instantiation */
+#define CLASSTYPE_USE_TEMPLATE(NODE) (TYPE_LANG_SPECIFIC(NODE)->type_flags.use_template)
+
+/* Fields used for storing information before the class is defined.
+ After the class is defined, these fields hold other information. */
+
+/* List of friends which were defined inline in this class definition. */
+#define CLASSTYPE_INLINE_FRIENDS(NODE) (TYPE_NONCOPIED_PARTS (NODE))
+
+/* Nonzero for _CLASSTYPE means that the _CLASSTYPE either has
+ a special meaning for the assignment operator ("operator="),
+ or one of its fields (or base members) has a special meaning
+ defined. */
+#define TYPE_HAS_ASSIGNMENT(NODE) (TYPE_LANG_SPECIFIC(NODE)->type_flags.has_assignment)
+#define TYPE_HAS_REAL_ASSIGNMENT(NODE) (TYPE_LANG_SPECIFIC(NODE)->type_flags.has_real_assignment)
+
+/* Nonzero for _CLASSTYPE means that operator new and delete are defined,
+ respectively. */
+#define TYPE_GETS_NEW(NODE) (TYPE_LANG_SPECIFIC(NODE)->type_flags.gets_new)
+#define TYPE_GETS_DELETE(NODE) (TYPE_LANG_SPECIFIC(NODE)->type_flags.gets_delete)
+#define TYPE_GETS_REG_DELETE(NODE) (TYPE_GETS_DELETE (NODE) & 1)
+
+/* Nonzero for _CLASSTYPE means that operator vec delete is defined and
+ takes the optional size_t argument. */
+#define TYPE_VEC_DELETE_TAKES_SIZE(NODE) \
+ (TYPE_LANG_SPECIFIC(NODE)->type_flags.vec_delete_takes_size)
+#define TYPE_VEC_NEW_USES_COOKIE(NODE) \
+ (TYPE_NEEDS_DESTRUCTOR (NODE) \
+ || (TYPE_LANG_SPECIFIC (NODE) && TYPE_VEC_DELETE_TAKES_SIZE (NODE)))
+
+/* Nonzero for TREE_LIST or _TYPE node means that this node is class-local. */
+#define TREE_NONLOCAL_FLAG(NODE) (TREE_LANG_FLAG_0 (NODE))
+
+/* Nonzero for a _CLASSTYPE node which we know to be private. */
+#define TYPE_PRIVATE_P(NODE) (TYPE_LANG_SPECIFIC(NODE)->type_flags.private_attr)
+
+/* Nonzero means that this _CLASSTYPE node defines ways of converting
+ itself to other types. */
+#define TYPE_HAS_CONVERSION(NODE) (TYPE_LANG_SPECIFIC(NODE)->type_flags.has_type_conversion)
+
+/* Nonzero means that this _CLASSTYPE node can convert itself to an
+ INTEGER_TYPE. */
+#define TYPE_HAS_INT_CONVERSION(NODE) (TYPE_LANG_SPECIFIC(NODE)->type_flags.has_int_conversion)
+
+/* Nonzero means that this _CLASSTYPE node can convert itself to an
+ REAL_TYPE. */
+#define TYPE_HAS_REAL_CONVERSION(NODE) (TYPE_LANG_SPECIFIC(NODE)->type_flags.has_float_conversion)
+
+/* Nonzero means that this _CLASSTYPE node overloads operator=(X&). */
+#define TYPE_HAS_ASSIGN_REF(NODE) (TYPE_LANG_SPECIFIC(NODE)->type_flags.has_assign_ref)
+#define TYPE_HAS_CONST_ASSIGN_REF(NODE) (TYPE_LANG_SPECIFIC(NODE)->type_flags.has_const_assign_ref)
+
+/* Nonzero means that this _CLASSTYPE node has an X(X&) constructor. */
+#define TYPE_HAS_INIT_REF(NODE) (TYPE_LANG_SPECIFIC(NODE)->type_flags.has_init_ref)
+#define TYPE_HAS_CONST_INIT_REF(NODE) (TYPE_LANG_SPECIFIC(NODE)->type_flags.has_const_init_ref)
+
+/* Nonzero means that this _CLASSTYPE node has an X(X ...) constructor.
+ Note that there must be other arguments, or this constructor is flagged
+ as being erroneous. */
+#define TYPE_GETS_INIT_AGGR(NODE) (TYPE_LANG_SPECIFIC(NODE)->type_flags.gets_init_aggr)
+
+/* Nonzero means that this type is being defined. I.e., the left brace
+ starting the definition of this type has been seen. */
+#define TYPE_BEING_DEFINED(NODE) (TYPE_LANG_SPECIFIC(NODE)->type_flags.being_defined)
+/* Nonzero means that this type has been redefined. In this case, if
+ convenient, don't reprocess any methods that appear in its redefinition. */
+#define TYPE_REDEFINED(NODE) (TYPE_LANG_SPECIFIC(NODE)->type_flags.redefined)
+
+/* Nonzero means that this _CLASSTYPE node overloads the method call
+ operator. In this case, all method calls go through `operator->()(...). */
+#define TYPE_OVERLOADS_METHOD_CALL_EXPR(NODE) (TYPE_LANG_SPECIFIC(NODE)->type_flags.has_method_call_overloaded)
+
+/* Nonzero means that this type is a signature. */
+# define IS_SIGNATURE(NODE) (TYPE_LANG_SPECIFIC(NODE)?TYPE_LANG_SPECIFIC(NODE)->type_flags.is_signature:0)
+# define SET_SIGNATURE(NODE) (TYPE_LANG_SPECIFIC(NODE)->type_flags.is_signature=1)
+# define CLEAR_SIGNATURE(NODE) (TYPE_LANG_SPECIFIC(NODE)->type_flags.is_signature=0)
+
+/* Nonzero means that this type is a signature pointer type. */
+# define IS_SIGNATURE_POINTER(NODE) (TYPE_LANG_SPECIFIC(NODE)->type_flags.is_signature_pointer)
+
+/* Nonzero means that this type is a signature reference type. */
+# define IS_SIGNATURE_REFERENCE(NODE) (TYPE_LANG_SPECIFIC(NODE)->type_flags.is_signature_reference)
+
+/* Nonzero means that this signature type has a default implementation. */
+# define HAS_DEFAULT_IMPLEMENTATION(NODE) (TYPE_LANG_SPECIFIC(NODE)->type_flags.has_default_implementation)
+
+/* Nonzero means that grokdeclarator works on a signature-local typedef. */
+#define SIGNATURE_GROKKING_TYPEDEF(NODE) (TYPE_LANG_SPECIFIC(NODE)->type_flags.grokking_typedef)
+
+/* Nonzero means that this signature contains opaque type declarations. */
+#define SIGNATURE_HAS_OPAQUE_TYPEDECLS(NODE) (TYPE_LANG_SPECIFIC(NODE)->type_flags.has_opaque_typedecls)
+
+/* Nonzero means that a signature table has been generated
+ for this signature. */
+#define SIGTABLE_HAS_BEEN_GENERATED(NODE) (TYPE_LANG_SPECIFIC(NODE)->type_flags.sigtable_has_been_generated)
+
+/* If NODE is a class, this is the signature type that contains NODE's
+ signature after it has been computed using sigof(). */
+#define CLASSTYPE_SIGNATURE(NODE) (TYPE_LANG_SPECIFIC(NODE)->signature)
+
+/* If NODE is a signature pointer or signature reference, this is the
+ signature type the pointer/reference points to. */
+#define SIGNATURE_TYPE(NODE) (TYPE_LANG_SPECIFIC(NODE)->signature)
+
+/* If NODE is a signature, this is a vector of all methods defined
+ in the signature or in its base types together with their default
+ implementations. */
+#define SIGNATURE_METHOD_VEC(NODE) (TYPE_LANG_SPECIFIC(NODE)->signature)
+
+/* If NODE is a signature, this is the _TYPE node that contains NODE's
+ signature pointer type. */
+#define SIGNATURE_POINTER_TO(NODE) (TYPE_LANG_SPECIFIC(NODE)->signature_pointer_to)
+
+/* If NODE is a signature, this is the _TYPE node that contains NODE's
+ signature reference type. */
+#define SIGNATURE_REFERENCE_TO(NODE) (TYPE_LANG_SPECIFIC(NODE)->signature_reference_to)
+
+/* The is the VAR_DECL that contains NODE's dossier. */
+#define CLASSTYPE_DOSSIER(NODE) (TYPE_LANG_SPECIFIC(NODE)->dossier)
+
+/* List of all explicit methods (chained using DECL_NEXT_METHOD),
+ in order they were parsed. */
+#define CLASSTYPE_METHODS(NODE) (TYPE_LANG_SPECIFIC(NODE)->methods)
+
+/* Nonzero means that this _CLASSTYPE node overloads operator(). */
+#define TYPE_OVERLOADS_CALL_EXPR(NODE) (TYPE_LANG_SPECIFIC(NODE)->type_flags.has_call_overloaded)
+
+/* Nonzero means that this _CLASSTYPE node overloads operator[]. */
+#define TYPE_OVERLOADS_ARRAY_REF(NODE) (TYPE_LANG_SPECIFIC(NODE)->type_flags.has_array_ref_overloaded)
+
+/* Nonzero means that this _CLASSTYPE node overloads operator->. */
+#define TYPE_OVERLOADS_ARROW(NODE) (TYPE_LANG_SPECIFIC(NODE)->type_flags.has_arrow_overloaded)
+
+/* Nonzero means that this _CLASSTYPE (or one of its ancestors) uses
+ multiple inheritance. If this is 0 for the root of a type
+ hierarchy, then we can use more efficient search techniques. */
+#define TYPE_USES_MULTIPLE_INHERITANCE(NODE) (TYPE_LANG_SPECIFIC(NODE)->type_flags.uses_multiple_inheritance)
+
+/* Nonzero means that this _CLASSTYPE (or one of its ancestors) uses
+ virtual base classes. If this is 0 for the root of a type
+ hierarchy, then we can use more efficient search techniques. */
+#define TYPE_USES_VIRTUAL_BASECLASSES(NODE) (TREE_LANG_FLAG_3(NODE))
+
+/* List of lists of member functions defined in this class. */
+#define CLASSTYPE_METHOD_VEC(NODE) TYPE_METHODS(NODE)
+
+/* Pointer from any member function to the head of the list of
+ member functions of the type that member function belongs to. */
+#define CLASSTYPE_BASELINK_VEC(NODE) (TYPE_LANG_SPECIFIC(NODE)->baselink_vec)
+
+/* Mark bits for depth-first and breath-first searches. */
+#define CLASSTYPE_MARKED(NODE) (TYPE_LANG_SPECIFIC(NODE)->type_flags.marked)
+#define CLASSTYPE_MARKED2(NODE) (TYPE_LANG_SPECIFIC(NODE)->type_flags.marked2)
+#define CLASSTYPE_MARKED3(NODE) (TYPE_LANG_SPECIFIC(NODE)->type_flags.marked3)
+#define CLASSTYPE_MARKED4(NODE) (TYPE_LANG_SPECIFIC(NODE)->type_flags.marked4)
+#define CLASSTYPE_MARKED5(NODE) (TYPE_LANG_SPECIFIC(NODE)->type_flags.marked5)
+#define CLASSTYPE_MARKED6(NODE) (TYPE_LANG_SPECIFIC(NODE)->type_flags.marked6)
+/* Macros to modify the above flags */
+#define SET_CLASSTYPE_MARKED(NODE) (CLASSTYPE_MARKED(NODE) = 1)
+#define CLEAR_CLASSTYPE_MARKED(NODE) (CLASSTYPE_MARKED(NODE) = 0)
+#define SET_CLASSTYPE_MARKED2(NODE) (CLASSTYPE_MARKED2(NODE) = 1)
+#define CLEAR_CLASSTYPE_MARKED2(NODE) (CLASSTYPE_MARKED2(NODE) = 0)
+#define SET_CLASSTYPE_MARKED3(NODE) (CLASSTYPE_MARKED3(NODE) = 1)
+#define CLEAR_CLASSTYPE_MARKED3(NODE) (CLASSTYPE_MARKED3(NODE) = 0)
+#define SET_CLASSTYPE_MARKED4(NODE) (CLASSTYPE_MARKED4(NODE) = 1)
+#define CLEAR_CLASSTYPE_MARKED4(NODE) (CLASSTYPE_MARKED4(NODE) = 0)
+#define SET_CLASSTYPE_MARKED5(NODE) (CLASSTYPE_MARKED5(NODE) = 1)
+#define CLEAR_CLASSTYPE_MARKED5(NODE) (CLASSTYPE_MARKED5(NODE) = 0)
+#define SET_CLASSTYPE_MARKED6(NODE) (CLASSTYPE_MARKED6(NODE) = 1)
+#define CLEAR_CLASSTYPE_MARKED6(NODE) (CLASSTYPE_MARKED6(NODE) = 0)
+
+#define CLASSTYPE_TAGS(NODE) (TYPE_LANG_SPECIFIC(NODE)->tags)
+
+/* If this class has any bases, this is the number of the base class from
+ which our VFIELD is based, -1 otherwise. If this class has no base
+ classes, this is not used.
+ In D : B1, B2, PARENT would be 0, if D's vtable came from B1,
+ 1, if D's vtable came from B2. */
+#define CLASSTYPE_VFIELD_PARENT(NODE) (TYPE_LANG_SPECIFIC(NODE)->vfield_parent)
+
+/* Remove when done merging. */
+#define CLASSTYPE_VFIELD(NODE) TYPE_VFIELD(NODE)
+
+/* The number of virtual functions defined for this
+ _CLASSTYPE node. */
+#define CLASSTYPE_VSIZE(NODE) (TYPE_LANG_SPECIFIC(NODE)->vsize)
+/* The virtual base classes that this type uses. */
+#define CLASSTYPE_VBASECLASSES(NODE) (TYPE_LANG_SPECIFIC(NODE)->vbases)
+/* The virtual function pointer fields that this type contains. */
+#define CLASSTYPE_VFIELDS(NODE) (TYPE_LANG_SPECIFIC(NODE)->vfields)
+
+/* Number of baseclasses defined for this type.
+ 0 means no base classes. */
+#define CLASSTYPE_N_BASECLASSES(NODE) \
+ (TYPE_BINFO_BASETYPES (NODE) ? TREE_VEC_LENGTH (TYPE_BINFO_BASETYPES(NODE)) : 0)
+
+/* Memoize the number of super classes (base classes) tha this node
+ has. That way we can know immediately (albeit conservatively how
+ large a multiple-inheritance matrix we need to build to find
+ derivation information. */
+#define CLASSTYPE_N_SUPERCLASSES(NODE) (TYPE_LANG_SPECIFIC(NODE)->n_ancestors)
+#define CLASSTYPE_N_VBASECLASSES(NODE) (TYPE_LANG_SPECIFIC(NODE)->type_flags.n_vancestors)
+
+/* Record how deep the inheritance is for this class so `void*' conversions
+ are less favorable than a conversion to the most base type. */
+#define CLASSTYPE_MAX_DEPTH(NODE) (TYPE_LANG_SPECIFIC(NODE)->max_depth)
+
+/* Used for keeping search-specific information. Any search routine
+ which uses this must define what exactly this slot is used for. */
+#define CLASSTYPE_SEARCH_SLOT(NODE) (TYPE_LANG_SPECIFIC(NODE)->search_slot)
+
+/* Entry for keeping memoization tables for this type to
+ hopefully speed up search routines. Since it is a pointer,
+ it can mean almost anything. */
+#define CLASSTYPE_MTABLE_ENTRY(NODE) (TYPE_LANG_SPECIFIC(NODE)->memoized_table_entry)
+
+/* This is the total size of the baseclasses defined for this type.
+ Needed because it is desirable to layout such information
+ before beginning to process the class itself, and we
+ don't want to compute it second time when actually laying
+ out the type for real. */
+#define CLASSTYPE_SIZE(NODE) (TYPE_LANG_SPECIFIC(NODE)->size)
+#define CLASSTYPE_SIZE_UNIT(NODE) (TYPE_LANG_SPECIFIC(NODE)->size_unit)
+#define CLASSTYPE_MODE(NODE) (TYPE_LANG_SPECIFIC(NODE)->mode)
+#define CLASSTYPE_ALIGN(NODE) (TYPE_LANG_SPECIFIC(NODE)->align)
+
+/* This is the space needed for virtual base classes. NULL if
+ there are no virtual basetypes. */
+#define CLASSTYPE_VBASE_SIZE(NODE) (TYPE_LANG_SPECIFIC(NODE)->vbase_size)
+
+/* A cons list of structure elements which either have constructors
+ to be called, or virtual function table pointers which
+ need initializing. Depending on what is being initialized,
+ the TREE_PURPOSE and TREE_VALUE fields have different meanings:
+
+ Member initialization: <FIELD_DECL, TYPE>
+ Base class construction: <NULL_TREE, BASETYPE>
+ Base class initialization: <BASE_INITIALIZATION, THESE_INITIALIZATIONS>
+ Whole type: <MEMBER_INIT, BASE_INIT>. */
+#define CLASSTYPE_BASE_INIT_LIST(NODE) (TYPE_LANG_SPECIFIC(NODE)->base_init_list)
+
+/* A cons list of virtual functions which cannot be inherited by
+ derived classes. When deriving from this type, the derived
+ class must provide its own definition for each of these functions. */
+#define CLASSTYPE_ABSTRACT_VIRTUALS(NODE) (TYPE_LANG_SPECIFIC(NODE)->abstract_virtuals)
+
+/* Nonzero means that this aggr type has been `closed' by a semicolon. */
+#define CLASSTYPE_GOT_SEMICOLON(NODE) (TYPE_LANG_SPECIFIC (NODE)->type_flags.got_semicolon)
+
+/* Nonzero means that the main virtual function table pointer needs to be
+ set because base constructors have placed the wrong value there.
+ If this is zero, it means that they placed the right value there,
+ and there is no need to change it. */
+#define CLASSTYPE_NEEDS_VIRTUAL_REINIT(NODE) (TYPE_LANG_SPECIFIC(NODE)->type_flags.needs_virtual_reinit)
+
+/* Nonzero means that if this type has virtual functions, that
+ the virtual function table will be written out. */
+#define CLASSTYPE_VTABLE_NEEDS_WRITING(NODE) (TYPE_LANG_SPECIFIC(NODE)->type_flags.vtable_needs_writing)
+
+/* Nonzero means that this type defines its own local type declarations. */
+#define CLASSTYPE_LOCAL_TYPEDECLS(NODE) (TYPE_LANG_SPECIFIC(NODE)->type_flags.local_typedecls)
+
+/* Nonzero means that this type has an X() constructor. */
+#define TYPE_HAS_DEFAULT_CONSTRUCTOR(NODE) (TYPE_LANG_SPECIFIC(NODE)->type_flags.has_default_ctor)
+
+/* Nonzero means the type declared a ctor as private or protected. We
+ use this to make sure we don't try to generate a copy ctor for a
+ class that has a member of type NODE. */
+#define TYPE_HAS_NONPUBLIC_CTOR(NODE) (TYPE_LANG_SPECIFIC(NODE)->type_flags.has_nonpublic_ctor)
+
+/* Ditto, for operator=. */
+#define TYPE_HAS_NONPUBLIC_ASSIGN_REF(NODE) (TYPE_LANG_SPECIFIC(NODE)->type_flags.has_nonpublic_assign_ref)
+
+/* Many routines need to cons up a list of basetypes for access
+ checking. This field contains a TREE_LIST node whose TREE_VALUE
+ is the main variant of the type, and whose TREE_VIA_PUBLIC
+ and TREE_VIA_VIRTUAL bits are correctly set. */
+#define CLASSTYPE_AS_LIST(NODE) (TYPE_LANG_SPECIFIC(NODE)->as_list)
+/* Same, but cache a list whose value is the name of this type. */
+#define CLASSTYPE_ID_AS_LIST(NODE) (TYPE_LANG_SPECIFIC(NODE)->id_as_list)
+/* Same, but cache a list whose value is the binfo of this type. */
+#define CLASSTYPE_BINFO_AS_LIST(NODE) (TYPE_LANG_SPECIFIC(NODE)->binfo_as_list)
+
+/* Slot in which to cache a copy of the local vtable pointer. */
+#define CLASSTYPE_VTBL_PTR(NODE) (TYPE_LANG_SPECIFIC(NODE)->vtbl_ptr)
+
+/* Hold the instance object associated with this method. */
+#define CLASSTYPE_INST_VAR(NODE) (TYPE_LANG_SPECIFIC(NODE)->instance_variable)
+
+/* A list of class types with which this type is a friend. */
+#define CLASSTYPE_FRIEND_CLASSES(NODE) (TYPE_LANG_SPECIFIC(NODE)->friend_classes)
+
+/* Keep an inheritance lattice around so we can quickly tell whether
+ a type is derived from another or not. */
+#define CLASSTYPE_MI_MATRIX(NODE) (TYPE_LANG_SPECIFIC(NODE)->mi_matrix)
+
+/* If there is exactly one conversion to a non-void, non-const pointer type,
+ remember that here. If there are more than one, put
+ `error_mark_node' here. If there are none, this holds NULL_TREE. */
+#define CLASSTYPE_CONVERSION(NODE,KIND) \
+ (TYPE_LANG_SPECIFIC(NODE)->conversions[(int) KIND])
+
+/* Say whether this node was declared as a "class" or a "struct". */
+#define CLASSTYPE_DECLARED_CLASS(NODE) (TYPE_LANG_SPECIFIC(NODE)->type_flags.declared_class)
+/* Say whether this node was declared as a "class" or a "struct". */
+#define CLASSTYPE_DECLARED_EXCEPTION(NODE) (TYPE_LANG_SPECIFIC(NODE)->type_flags.declared_exception)
+/* whether this can be globalized. */
+#define CLASSTYPE_NO_GLOBALIZE(NODE) (TYPE_LANG_SPECIFIC(NODE)->type_flags.no_globalize)
+
+/* Nonzero if this class has const members which have no specified initialization. */
+#define CLASSTYPE_READONLY_FIELDS_NEED_INIT(NODE) (TYPE_LANG_SPECIFIC(NODE)->type_flags.const_needs_init)
+
+/* Nonzero if this class has ref members which have no specified initialization. */
+#define CLASSTYPE_REF_FIELDS_NEED_INIT(NODE) (TYPE_LANG_SPECIFIC(NODE)->type_flags.ref_needs_init)
+
+/* Nonzero if this class is included from a header file which employs
+ `#pragma interface', and it is not included in its implementation file. */
+#define CLASSTYPE_INTERFACE_ONLY(NODE) (TYPE_LANG_SPECIFIC(NODE)->type_flags.interface_only)
+
+/* Same as above, but for classes whose purpose we do not know. */
+#define CLASSTYPE_INTERFACE_UNKNOWN(NODE) (TYPE_LANG_SPECIFIC(NODE)->type_flags.interface_unknown)
+#define CLASSTYPE_INTERFACE_KNOWN(NODE) (TYPE_LANG_SPECIFIC(NODE)->type_flags.interface_unknown == 0)
+#define SET_CLASSTYPE_INTERFACE_UNKNOWN_X(NODE,X) (TYPE_LANG_SPECIFIC(NODE)->type_flags.interface_unknown = !!(X))
+#define SET_CLASSTYPE_INTERFACE_UNKNOWN(NODE) (TYPE_LANG_SPECIFIC(NODE)->type_flags.interface_unknown = 1)
+#define SET_CLASSTYPE_INTERFACE_KNOWN(NODE) (TYPE_LANG_SPECIFIC(NODE)->type_flags.interface_unknown = 0)
+
+/* Nonzero if a _DECL node requires us to output debug info for this class. */
+#define CLASSTYPE_DEBUG_REQUESTED(NODE) (TYPE_LANG_SPECIFIC(NODE)->type_flags.debug_requested)
+
+/* Additional macros for inheritance information. */
+
+#define CLASSTYPE_VBINFO(NODE,VIA_PUBLIC) \
+ (TYPE_LANG_SPECIFIC (NODE)->vbinfo[VIA_PUBLIC])
+
+/* When following an binfo-specific chain, this is the cumulative
+ via-public flag. */
+#define BINFO_VIA_PUBLIC(NODE) TREE_LANG_FLAG_5 (NODE)
+
+/* When building a matrix to determine by a single lookup
+ whether one class is derived from another or not,
+ this field is the index of the class in the table. */
+#define CLASSTYPE_CID(NODE) (TYPE_LANG_SPECIFIC(NODE)->cid)
+#define BINFO_CID(NODE) CLASSTYPE_CID(BINFO_TYPE(NODE))
+
+/* Nonzero means marked by DFS or BFS search, including searches
+ by `get_binfo' and `get_base_distance'. */
+#define BINFO_MARKED(NODE) (TREE_VIA_VIRTUAL(NODE)?CLASSTYPE_MARKED(BINFO_TYPE(NODE)):TREE_LANG_FLAG_0(NODE))
+/* Macros needed because of C compilers that don't allow conditional
+ expressions to be lvalues. Grr! */
+#define SET_BINFO_MARKED(NODE) (TREE_VIA_VIRTUAL(NODE)?SET_CLASSTYPE_MARKED(BINFO_TYPE(NODE)):(TREE_LANG_FLAG_0(NODE)=1))
+#define CLEAR_BINFO_MARKED(NODE) (TREE_VIA_VIRTUAL(NODE)?CLEAR_CLASSTYPE_MARKED(BINFO_TYPE(NODE)):(TREE_LANG_FLAG_0(NODE)=0))
+
+/* Nonzero means marked in building initialization list. */
+#define BINFO_BASEINIT_MARKED(NODE) CLASSTYPE_MARKED2 (BINFO_TYPE (NODE))
+/* Modifier macros */
+#define SET_BINFO_BASEINIT_MARKED(NODE) SET_CLASSTYPE_MARKED2 (BINFO_TYPE (NODE))
+#define CLEAR_BINFO_BASEINIT_MARKED(NODE) CLEAR_CLASSTYPE_MARKED2 (BINFO_TYPE (NODE))
+
+/* Nonzero means marked in search through virtual inheritance hierarchy. */
+#define BINFO_VBASE_MARKED(NODE) CLASSTYPE_MARKED2 (BINFO_TYPE (NODE))
+/* Modifier macros */
+#define SET_BINFO_VBASE_MARKED(NODE) SET_CLASSTYPE_MARKED2 (BINFO_TYPE (NODE))
+#define CLEAR_BINFO_VBASE_MARKED(NODE) CLEAR_CLASSTYPE_MARKED2 (BINFO_TYPE (NODE))
+
+/* Nonzero means marked in search for members or member functions. */
+#define BINFO_FIELDS_MARKED(NODE) \
+ (TREE_VIA_VIRTUAL(NODE)?CLASSTYPE_MARKED2 (BINFO_TYPE (NODE)):TREE_LANG_FLAG_2(NODE))
+#define SET_BINFO_FIELDS_MARKED(NODE) (TREE_VIA_VIRTUAL(NODE)?SET_CLASSTYPE_MARKED2(BINFO_TYPE(NODE)):(TREE_LANG_FLAG_2(NODE)=1))
+#define CLEAR_BINFO_FIELDS_MARKED(NODE) (TREE_VIA_VIRTUAL(NODE)?CLEAR_CLASSTYPE_MARKED2(BINFO_TYPE(NODE)):(TREE_LANG_FLAG_2(NODE)=0))
+
+/* Nonzero means that this class is on a path leading to a new vtable. */
+#define BINFO_VTABLE_PATH_MARKED(NODE) \
+ (TREE_VIA_VIRTUAL(NODE)?CLASSTYPE_MARKED3(BINFO_TYPE(NODE)):TREE_LANG_FLAG_3(NODE))
+#define SET_BINFO_VTABLE_PATH_MARKED(NODE) (TREE_VIA_VIRTUAL(NODE)?SET_CLASSTYPE_MARKED3(BINFO_TYPE(NODE)):(TREE_LANG_FLAG_3(NODE)=1))
+#define CLEAR_BINFO_VTABLE_PATH_MARKED(NODE) (TREE_VIA_VIRTUAL(NODE)?CLEAR_CLASSTYPE_MARKED3(BINFO_TYPE(NODE)):(TREE_LANG_FLAG_3(NODE)=0))
+
+/* Nonzero means that this class has a new vtable. */
+#define BINFO_NEW_VTABLE_MARKED(NODE) \
+ (TREE_VIA_VIRTUAL(NODE)?CLASSTYPE_MARKED4(BINFO_TYPE(NODE)):TREE_LANG_FLAG_4(NODE))
+#define SET_BINFO_NEW_VTABLE_MARKED(NODE) (TREE_VIA_VIRTUAL(NODE)?SET_CLASSTYPE_MARKED4(BINFO_TYPE(NODE)):(TREE_LANG_FLAG_4(NODE)=1))
+#define CLEAR_BINFO_NEW_VTABLE_MARKED(NODE) (TREE_VIA_VIRTUAL(NODE)?CLEAR_CLASSTYPE_MARKED4(BINFO_TYPE(NODE)):(TREE_LANG_FLAG_4(NODE)=0))
+
+/* Nonzero means this class has initialized its virtual baseclasses. */
+#define BINFO_VBASE_INIT_MARKED(NODE) \
+ (TREE_VIA_VIRTUAL(NODE)?CLASSTYPE_MARKED5(BINFO_TYPE(NODE)):TREE_LANG_FLAG_5(NODE))
+#define SET_BINFO_VBASE_INIT_MARKED(NODE) (TREE_VIA_VIRTUAL(NODE)?SET_CLASSTYPE_MARKED5(BINFO_TYPE(NODE)):(TREE_LANG_FLAG_5(NODE)=1))
+#define CLEAR_BINFO_VBASE_INIT_MARKED(NODE) (TREE_VIA_VIRTUAL(NODE)?CLEAR_CLASSTYPE_MARKED5(BINFO_TYPE(NODE)):(TREE_LANG_FLAG_5(NODE)=0))
+
+/* Accessor macros for the vfield slots in structures. */
+
+/* Get the assoc info that caused this vfield to exist. */
+#define VF_BINFO_VALUE(NODE) TREE_PURPOSE (NODE)
+
+/* Get that same information as a _TYPE. */
+#define VF_BASETYPE_VALUE(NODE) TREE_VALUE (NODE)
+
+/* Get the value of the top-most type dominating the non-`normal' vfields. */
+#define VF_DERIVED_VALUE(NODE) (VF_BINFO_VALUE (NODE) ? BINFO_TYPE (VF_BINFO_VALUE (NODE)) : NULL_TREE)
+
+/* Get the value of the top-most type that's `normal' for the vfield. */
+#define VF_NORMAL_VALUE(NODE) TREE_TYPE (NODE)
+
+/* Nonzero for TREE_LIST node means that this list of things
+ is a list of parameters, as opposed to a list of expressions. */
+#define TREE_PARMLIST(NODE) ((NODE)->common.unsigned_flag) /* overloaded! */
+
+/* For FUNCTION_TYPE or METHOD_TYPE, a list of the exceptions that
+ this type can raise. */
+#define TYPE_RAISES_EXCEPTIONS(NODE) TYPE_NONCOPIED_PARTS (NODE)
+
+struct lang_decl_flags
+{
+#ifdef ONLY_INT_FIELDS
+ int language : 8;
+#else
+ enum languages language : 8;
+#endif
+
+ unsigned operator_attr : 1;
+ unsigned constructor_attr : 1;
+ unsigned returns_first_arg : 1;
+ unsigned preserves_first_arg : 1;
+ unsigned friend_attr : 1;
+ unsigned static_function : 1;
+ unsigned const_memfunc : 1;
+ unsigned volatile_memfunc : 1;
+
+ unsigned abstract_virtual : 1;
+ unsigned permanent_attr : 1 ;
+ unsigned constructor_for_vbase_attr : 1;
+ unsigned mutable_flag : 1;
+ unsigned is_default_implementation : 1;
+ unsigned saved_inline : 1;
+ unsigned use_template : 2;
+
+ unsigned dummy : 8;
+
+ tree access;
+ tree context;
+ tree memfunc_pointer_to;
+};
+
+struct lang_decl
+{
+ struct lang_decl_flags decl_flags;
+
+ struct template_info *template_info;
+ tree main_decl_variant;
+ struct pending_inline *pending_inline_info;
+ tree next_method;
+ tree chain;
+};
+
+/* Non-zero if NODE is a _DECL with TREE_READONLY set. */
+#define TREE_READONLY_DECL_P(NODE) \
+ (TREE_READONLY (NODE) && TREE_CODE_CLASS (TREE_CODE (NODE)) == 'd')
+
+/* For FUNCTION_DECLs: return the language in which this decl
+ was declared. */
+#define DECL_LANGUAGE(NODE) (DECL_LANG_SPECIFIC(NODE)->decl_flags.language)
+
+/* For FUNCTION_DECLs: nonzero means that this function is a constructor. */
+#define DECL_CONSTRUCTOR_P(NODE) (DECL_LANG_SPECIFIC(NODE)->decl_flags.constructor_attr)
+/* For FUNCTION_DECLs: nonzero means that this function is a constructor
+ for an object with virtual baseclasses. */
+#define DECL_CONSTRUCTOR_FOR_VBASE_P(NODE) (DECL_LANG_SPECIFIC(NODE)->decl_flags.constructor_for_vbase_attr)
+
+/* For FUNCTION_DECLs: nonzero means that this function is a default
+ implementation of a signature method. */
+#define IS_DEFAULT_IMPLEMENTATION(NODE) (DECL_LANG_SPECIFIC(NODE)->decl_flags.is_default_implementation)
+
+/* For FUNCTION_DECLs: nonzero means that the constructor
+ is known to return a non-zero `this' unchanged. */
+#define DECL_RETURNS_FIRST_ARG(NODE) (DECL_LANG_SPECIFIC(NODE)->decl_flags.returns_first_arg)
+
+/* Nonzero for FUNCTION_DECL means that this constructor is known to
+ not make any assignment to `this', and therefore can be trusted
+ to return it unchanged. Otherwise, we must re-assign `current_class_decl'
+ after performing base initializations. */
+#define DECL_PRESERVES_THIS(NODE) (DECL_LANG_SPECIFIC(NODE)->decl_flags.preserves_first_arg)
+
+/* Nonzero for _DECL means that this decl appears in (or will appear
+ in) as a member in a RECORD_TYPE or UNION_TYPE node. It is also for
+ detecting circularity in case members are multiply defined. In the
+ case of a VAR_DECL, it is also used to determine how program storage
+ should be allocated. */
+#define DECL_IN_AGGR_P(NODE) (DECL_LANG_FLAG_3(NODE))
+
+/* Nonzero for FUNCTION_DECL means that this decl is just a
+ friend declaration, and should not be added to the list of
+ member functions for this class. */
+#define DECL_FRIEND_P(NODE) (DECL_LANG_SPECIFIC(NODE)->decl_flags.friend_attr)
+
+/* Nonzero for FUNCTION_DECL means that this decl is a static
+ member function. */
+#define DECL_STATIC_FUNCTION_P(NODE) (DECL_LANG_SPECIFIC(NODE)->decl_flags.static_function)
+
+/* Nonzero for a class member means that it is shared between all objects
+ of that class. */
+#define SHARED_MEMBER_P(NODE) \
+ (TREE_CODE (NODE) == VAR_DECL || TREE_CODE (NODE) == TYPE_DECL \
+ || TREE_CODE (NODE) == CONST_DECL)
+
+/* Nonzero for FUNCTION_DECL means that this decl is a member function
+ (static or non-static). */
+#define DECL_FUNCTION_MEMBER_P(NODE) \
+ (TREE_CODE (TREE_TYPE (NODE)) == METHOD_TYPE || DECL_STATIC_FUNCTION_P (NODE))
+
+/* Nonzero for FUNCTION_DECL means that this member function
+ has `this' as const X *const. */
+#define DECL_CONST_MEMFUNC_P(NODE) (DECL_LANG_SPECIFIC(NODE)->decl_flags.const_memfunc)
+
+/* Nonzero for FUNCTION_DECL means that this member function
+ has `this' as volatile X *const. */
+#define DECL_VOLATILE_MEMFUNC_P(NODE) (DECL_LANG_SPECIFIC(NODE)->decl_flags.volatile_memfunc)
+
+/* Nonzero for _DECL means that this member object type
+ is mutable. */
+#define DECL_MUTABLE_P(NODE) (DECL_LANG_SPECIFIC(NODE)->decl_flags.mutable_flag)
+
+/* Nonzero for FUNCTION_DECL means that this member function
+ exists as part of an abstract class's interface. */
+#define DECL_ABSTRACT_VIRTUAL_P(NODE) (DECL_LANG_SPECIFIC(NODE)->decl_flags.abstract_virtual)
+
+/* Nonzero if allocated on permanent_obstack. */
+#define LANG_DECL_PERMANENT(LANGDECL) ((LANGDECL)->decl_flags.permanent_attr)
+
+/* The _TYPE context in which this _DECL appears. This field holds the
+ class where a virtual function instance is actually defined, and the
+ lexical scope of a friend function defined in a class body. */
+#define DECL_CLASS_CONTEXT(NODE) (DECL_LANG_SPECIFIC(NODE)->decl_flags.context)
+
+/* For a FUNCTION_DECL: the chain through which the next method
+ in the method chain is found. We now use TREE_CHAIN to
+ link into the FIELD_DECL chain. */
+#if 1
+#define DECL_CHAIN(NODE) (DECL_LANG_SPECIFIC(NODE)->chain)
+#else
+#define DECL_CHAIN(NODE) (TREE_CHAIN (NODE))
+#endif
+
+/* Next method in CLASSTYPE_METHODS list. */
+#define DECL_NEXT_METHOD(NODE) (DECL_LANG_SPECIFIC(NODE)->next_method)
+
+/* Points back to the decl which caused this lang_decl to be allocated. */
+#define DECL_MAIN_VARIANT(NODE) (DECL_LANG_SPECIFIC(NODE)->main_decl_variant)
+
+/* For a FUNCTION_DECL: if this function was declared inline inside of
+ a class declaration, this is where the text for the function is
+ squirreled away. */
+#define DECL_PENDING_INLINE_INFO(NODE) (DECL_LANG_SPECIFIC(NODE)->pending_inline_info)
+
+/* True if on the saved_inlines (see decl2.c) list. */
+#define DECL_SAVED_INLINE(DECL) \
+ (DECL_LANG_SPECIFIC(DECL)->decl_flags.saved_inline)
+
+/* For a FUNCTION_DECL: if this function was declared inside a signature
+ declaration, this is the corresponding member function pointer that was
+ created for it. */
+#define DECL_MEMFUNC_POINTER_TO(NODE) (DECL_LANG_SPECIFIC(NODE)->decl_flags.memfunc_pointer_to)
+
+/* For a FIELD_DECL: this points to the signature member function from
+ which this signature member function pointer was created. */
+#define DECL_MEMFUNC_POINTING_TO(NODE) (DECL_LANG_SPECIFIC(NODE)->decl_flags.memfunc_pointer_to)
+
+/* For a TEMPLATE_DECL: template-specific information. */
+#define DECL_TEMPLATE_INFO(NODE) (DECL_LANG_SPECIFIC(NODE)->template_info)
+
+/* Nonzero in INT_CST means that this int is negative by dint of
+ using a twos-complement negated operand. */
+#define TREE_NEGATED_INT(NODE) (TREE_LANG_FLAG_0 (NODE))
+
+/* Nonzero in any kind of _EXPR or _REF node means that it is a call
+ to a storage allocation routine. If, later, alternate storage
+ is found to hold the object, this call can be ignored. */
+#define TREE_CALLS_NEW(NODE) (TREE_LANG_FLAG_1 (NODE))
+
+/* Nonzero in any kind of _TYPE that uses multiple inheritance
+ or virtual baseclasses. */
+#define TYPE_USES_COMPLEX_INHERITANCE(NODE) (TREE_LANG_FLAG_1 (NODE))
+
+/* Nonzero in IDENTIFIER_NODE means that this name is not the name the user
+ gave; it's a DECL_NESTED_TYPENAME. Someone may want to set this on
+ mangled function names, too, but it isn't currently. */
+#define TREE_MANGLED(NODE) (TREE_LANG_FLAG_0 (NODE))
+
+#if 0 /* UNUSED */
+/* Nonzero in IDENTIFIER_NODE means that this name is overloaded, and
+ should be looked up in a non-standard way. */
+#define DECL_OVERLOADED(NODE) (DECL_LANG_FLAG_4 (NODE))
+#endif
+
+/* Nonzero if this (non-TYPE)_DECL has its virtual attribute set.
+ For a FUNCTION_DECL, this is when the function is a virtual function.
+ For a VAR_DECL, this is when the variable is a virtual function table.
+ For a FIELD_DECL, when the field is the field for the virtual function table.
+ For an IDENTIFIER_NODE, nonzero if any function with this name
+ has been declared virtual.
+
+ For a _TYPE if it uses virtual functions (or is derived from
+ one that does). */
+#define TYPE_VIRTUAL_P(NODE) (TREE_LANG_FLAG_2 (NODE))
+
+#if 0
+/* Same, but tells if this field is private in current context. */
+#define DECL_PRIVATE(NODE) (DECL_LANG_FLAG_5 (NODE))
+
+/* Same, but tells if this field is private in current context. */
+#define DECL_PROTECTED(NODE) (DECL_LANG_FLAG_6 (NODE))
+
+#define DECL_PUBLIC(NODE) (DECL_LANG_FLAG_7 (NODE))
+#endif
+
+/* This _DECL represents a compiler-generated entity. */
+#define DECL_ARTIFICIAL(NODE) (DECL_SOURCE_LINE (NODE) == 0)
+#define SET_DECL_ARTIFICIAL(NODE) (DECL_SOURCE_LINE (NODE) = 0)
+
+/* Record whether a typedef for type `int' was actually `signed int'. */
+#define C_TYPEDEF_EXPLICITLY_SIGNED(exp) DECL_LANG_FLAG_1 ((exp))
+
+/* Nonzero if the type T promotes to itself.
+ ANSI C states explicitly the list of types that promote;
+ in particular, short promotes to int even if they have the same width. */
+#define C_PROMOTING_INTEGER_TYPE_P(t) \
+ (TREE_CODE ((t)) == INTEGER_TYPE \
+ && (TYPE_MAIN_VARIANT (t) == char_type_node \
+ || TYPE_MAIN_VARIANT (t) == signed_char_type_node \
+ || TYPE_MAIN_VARIANT (t) == unsigned_char_type_node \
+ || TYPE_MAIN_VARIANT (t) == short_integer_type_node \
+ || TYPE_MAIN_VARIANT (t) == short_unsigned_type_node))
+
+#define INTEGRAL_CODE_P(CODE) \
+ (CODE == INTEGER_TYPE || CODE == ENUMERAL_TYPE || CODE == BOOLEAN_TYPE)
+#define ARITHMETIC_TYPE_P(TYPE) (INTEGRAL_TYPE_P (TYPE) || FLOAT_TYPE_P (TYPE))
+
+/* Mark which labels are explicitly declared.
+ These may be shadowed, and may be referenced from nested functions. */
+#define C_DECLARED_LABEL_FLAG(label) TREE_LANG_FLAG_1 (label)
+
+/* Record whether a type or decl was written with nonconstant size.
+ Note that TYPE_SIZE may have simplified to a constant. */
+#define C_TYPE_VARIABLE_SIZE(type) TREE_LANG_FLAG_4 (type)
+#define C_DECL_VARIABLE_SIZE(type) DECL_LANG_FLAG_8 (type)
+
+/* Nonzero for _TYPE means that the _TYPE defines
+ at least one constructor. */
+#define TYPE_HAS_CONSTRUCTOR(NODE) (TYPE_LANG_FLAG_1(NODE))
+
+/* When appearing in an INDIRECT_REF, it means that the tree structure
+ underneath is actually a call to a constructor. This is needed
+ when the constructor must initialize local storage (which can
+ be automatically destroyed), rather than allowing it to allocate
+ space from the heap.
+
+ When appearing in a SAVE_EXPR, it means that underneath
+ is a call to a constructor.
+
+ When appearing in a CONSTRUCTOR, it means that it was
+ a GNU C constructor expression.
+
+ When appearing in a FIELD_DECL, it means that this field
+ has been duly initialized in its constructor. */
+#define TREE_HAS_CONSTRUCTOR(NODE) (TREE_LANG_FLAG_4(NODE))
+
+#define EMPTY_CONSTRUCTOR_P(NODE) (TREE_CODE (NODE) == CONSTRUCTOR \
+ && CONSTRUCTOR_ELTS (NODE) == NULL_TREE)
+
+/* Indicates that a NON_LVALUE_EXPR came from a C++ reference.
+ Used to generate more helpful error message in case somebody
+ tries to take its address. */
+#define TREE_REFERENCE_EXPR(NODE) (TREE_LANG_FLAG_3(NODE))
+
+/* Nonzero for _TYPE means that the _TYPE defines a destructor. */
+#define TYPE_HAS_DESTRUCTOR(NODE) (TYPE_LANG_FLAG_2(NODE))
+
+#if 0
+/* Nonzero for _TYPE node means that creating an object of this type
+ will involve a call to a constructor. This can apply to objects
+ of ARRAY_TYPE if the type of the elements needs a constructor. */
+#define TYPE_NEEDS_CONSTRUCTING(NODE) (TYPE_LANG_FLAG_3(NODE))
+#endif
+
+/* Nonzero if there is a user-defined X::op=(x&) for this class. */
+#define TYPE_HAS_REAL_ASSIGN_REF(NODE) (TYPE_LANG_SPECIFIC(NODE)->type_flags.has_real_assign_ref)
+#define TYPE_HAS_COMPLEX_ASSIGN_REF(NODE) (TYPE_LANG_SPECIFIC(NODE)->type_flags.has_complex_assign_ref)
+#define TYPE_HAS_ABSTRACT_ASSIGN_REF(NODE) (TYPE_LANG_SPECIFIC(NODE)->type_flags.has_abstract_assign_ref)
+#define TYPE_HAS_COMPLEX_INIT_REF(NODE) (TYPE_LANG_SPECIFIC(NODE)->type_flags.has_complex_init_ref)
+
+/* Nonzero for _TYPE node means that destroying an object of this type
+ will involve a call to a destructor. This can apply to objects
+ of ARRAY_TYPE is the type of the elements needs a destructor. */
+#define TYPE_NEEDS_DESTRUCTOR(NODE) (TYPE_LANG_FLAG_4(NODE))
+
+/* Nonzero for _TYPE node means that this type is a pointer to member
+ function type. */
+#define TYPE_PTRMEMFUNC_P(NODE) (TREE_CODE(NODE) == RECORD_TYPE && TYPE_LANG_SPECIFIC(NODE)->type_flags.ptrmemfunc_flag)
+#define TYPE_PTRMEMFUNC_FLAG(NODE) (TYPE_LANG_SPECIFIC(NODE)->type_flags.ptrmemfunc_flag)
+/* Get the POINTER_TYPE to the METHOD_TYPE associated with this
+ pointer to member function. TYPE_PTRMEMFUNC_P _must_ be true,
+ before using this macro. */
+#define TYPE_PTRMEMFUNC_FN_TYPE(NODE) (TREE_TYPE (TYPE_FIELDS (TREE_TYPE (TREE_CHAIN (TREE_CHAIN (TYPE_FIELDS (NODE)))))))
+/* These are use to manipulate the the canonical RECORD_TYPE from the
+ hashed POINTER_TYPE, and can only be used on the POINTER_TYPE. */
+#define TYPE_GET_PTRMEMFUNC_TYPE(NODE) ((tree)TYPE_LANG_SPECIFIC(NODE))
+#define TYPE_SET_PTRMEMFUNC_TYPE(NODE, VALUE) (TYPE_LANG_SPECIFIC(NODE) = ((struct lang_type *)(void*)(VALUE)))
+/* These are to get the delta2 and pfn fields from a TYPE_PTRMEMFUNC_P. */
+#define DELTA2_FROM_PTRMEMFUNC(NODE) (build_component_ref (build_component_ref ((NODE), pfn_or_delta2_identifier, 0, 0), delta2_identifier, 0, 0))
+#define PFN_FROM_PTRMEMFUNC(NODE) (build_component_ref (build_component_ref ((NODE), pfn_or_delta2_identifier, 0, 0), pfn_identifier, 0, 0))
+
+/* Nonzero for VAR_DECL and FUNCTION_DECL node means that `external' was
+ specified in its declaration. */
+#define DECL_THIS_EXTERN(NODE) (DECL_LANG_FLAG_2(NODE))
+
+/* Nonzero for SAVE_EXPR if used to initialize a PARM_DECL. */
+#define PARM_DECL_EXPR(NODE) (TREE_LANG_FLAG_2(NODE))
+
+/* Nonzero in FUNCTION_DECL means it is really an operator.
+ Just used to communicate formatting information to dbxout.c. */
+#define DECL_OPERATOR(NODE) (DECL_LANG_SPECIFIC(NODE)->decl_flags.operator_attr)
+
+#define ANON_UNION_P(NODE) (DECL_NAME (NODE) == 0)
+
+#define UNKNOWN_TYPE LANG_TYPE
+
+/* Define fields and accessors for nodes representing declared names. */
+
+#if 0
+/* C++: A derived class may be able to directly use the virtual
+ function table of a base class. When it does so, it may
+ still have a decl node used to access the virtual function
+ table (so that variables of this type can initialize their
+ virtual function table pointers by name). When such thievery
+ is committed, know exactly which base class's virtual function
+ table is the one being stolen. This effectively computes the
+ transitive closure. */
+#define DECL_VPARENT(NODE) ((NODE)->decl.arguments)
+#endif
+
+/* Make a slot so we can implement nested types. This slot holds
+ the IDENTIFIER_NODE that uniquely names the nested type. This
+ is for TYPE_DECLs only. */
+#define DECL_NESTED_TYPENAME(NODE) ((NODE)->decl.arguments)
+#define TYPE_NESTED_NAME(NODE) (DECL_NESTED_TYPENAME (TYPE_NAME (NODE)))
+
+#define TYPE_WAS_ANONYMOUS(NODE) (TYPE_LANG_SPECIFIC (NODE)->type_flags.was_anonymous)
+
+/* C++: all of these are overloaded! These apply only to TYPE_DECLs. */
+#define DECL_FRIENDLIST(NODE) (DECL_INITIAL (NODE))
+#if 0
+#define DECL_UNDEFINED_FRIENDS(NODE) ((NODE)->decl.result)
+#endif
+#define DECL_WAITING_FRIENDS(NODE) ((tree)(NODE)->decl.rtl)
+#define SET_DECL_WAITING_FRIENDS(NODE,VALUE) \
+ ((NODE)->decl.rtl=(struct rtx_def*)VALUE)
+
+/* The DECL_ACCESS is used to record under which context
+ special access rules apply. */
+#define DECL_ACCESS(NODE) (DECL_LANG_SPECIFIC(NODE)->decl_flags.access)
+
+/* C++: all of these are overloaded!
+ These apply to PARM_DECLs and VAR_DECLs. */
+#define DECL_REFERENCE_SLOT(NODE) ((tree)(NODE)->decl.arguments)
+#define SET_DECL_REFERENCE_SLOT(NODE,VAL) ((NODE)->decl.arguments=VAL)
+
+/* For local VAR_DECLs, holds index into gc-protected obstack. */
+#define DECL_GC_OFFSET(NODE) ((NODE)->decl.result)
+
+/* Accessor macros for C++ template decl nodes. */
+#define DECL_TEMPLATE_IS_CLASS(NODE) (DECL_RESULT(NODE) == NULL_TREE)
+#define DECL_TEMPLATE_PARMS(NODE) DECL_ARGUMENTS(NODE)
+/* For class templates. */
+#define DECL_TEMPLATE_MEMBERS(NODE) DECL_SIZE(NODE)
+/* For function, method, class-data templates. */
+#define DECL_TEMPLATE_RESULT(NODE) DECL_RESULT(NODE)
+#define DECL_TEMPLATE_INSTANTIATIONS(NODE) DECL_VINDEX(NODE)
+
+/* Indicates whether or not (and how) a template was expanded for this
+ FUNCTION_DECL or VAR_DECL.
+ 0=normal declaration, e.g. int min (int, int);
+ 1=implicit template instantiation
+ 2=explicit template specialization, e.g. int min<int> (int, int);
+ 3=explicit template instantiation, e.g. template int min<int> (int, int);
+ */
+#define DECL_USE_TEMPLATE(NODE) (DECL_LANG_SPECIFIC(NODE)->decl_flags.use_template)
+
+#define DECL_TEMPLATE_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) & 1)
+#define CLASSTYPE_TEMPLATE_INSTANTIATION(NODE) \
+ (CLASSTYPE_USE_TEMPLATE (NODE) & 1)
+
+#define DECL_TEMPLATE_SPECIALIZATION(NODE) (DECL_USE_TEMPLATE (NODE) == 2)
+#define SET_DECL_TEMPLATE_SPECIALIZATION(NODE) (DECL_USE_TEMPLATE (NODE) = 2)
+#define CLASSTYPE_TEMPLATE_SPECIALIZATION(NODE) \
+ (CLASSTYPE_USE_TEMPLATE (NODE) == 2)
+#define SET_CLASSTYPE_TEMPLATE_SPECIALIZATION(NODE) \
+ (CLASSTYPE_USE_TEMPLATE (NODE) = 2)
+
+#define DECL_IMPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) == 1)
+#define SET_DECL_IMPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) = 1)
+#define CLASSTYPE_IMPLICIT_INSTANTIATION(NODE) \
+ (CLASSTYPE_USE_TEMPLATE(NODE) == 1)
+#define SET_CLASSTYPE_IMPLICIT_INSTANTIATION(NODE) \
+ (CLASSTYPE_USE_TEMPLATE(NODE) = 1)
+
+#define DECL_EXPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) == 3)
+#define SET_DECL_EXPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) = 3)
+#define CLASSTYPE_EXPLICIT_INSTANTIATION(NODE) \
+ (CLASSTYPE_USE_TEMPLATE(NODE) == 3)
+#define SET_CLASSTYPE_EXPLICIT_INSTANTIATION(NODE) \
+ (CLASSTYPE_USE_TEMPLATE(NODE) = 3)
+
+#define THUNK_DELTA(DECL) ((DECL)->decl.frame_size.i)
+
+/* ...and for unexpanded-parameterized-type nodes. */
+#define UPT_TEMPLATE(NODE) TREE_PURPOSE(TYPE_VALUES(NODE))
+#define UPT_PARMS(NODE) TREE_VALUE(TYPE_VALUES(NODE))
+
+/* An enumeration of the kind of tags that C++ accepts. */
+enum tag_types { record_type, class_type, union_type, enum_type,
+ exception_type, signature_type };
+
+/* Zero means prototype weakly, as in ANSI C (no args means nothing).
+ Each language context defines how this variable should be set. */
+extern int strict_prototype;
+extern int strict_prototypes_lang_c, strict_prototypes_lang_cplusplus;
+
+/* Non-zero means that if a label exists, and no other identifier
+ applies, use the value of the label. */
+extern int flag_labels_ok;
+
+/* Non-zero means to collect statistics which might be expensive
+ and to print them when we are done. */
+extern int flag_detailed_statistics;
+
+/* Non-zero means warn in function declared in derived class has the
+ same name as a virtual in the base class, but fails to match the
+ type signature of any virtual function in the base class. */
+extern int warn_overloaded_virtual;
+
+/* in c-common.c */
+extern void declare_function_name PROTO((void));
+extern void decl_attributes PROTO((tree, tree));
+extern void init_function_format_info PROTO((void));
+extern void record_function_format PROTO((tree, tree, int, int, int));
+extern void check_function_format PROTO((tree, tree, tree));
+/* Print an error message for invalid operands to arith operation CODE.
+ NOP_EXPR is used as a special case (see truthvalue_conversion). */
+extern void binary_op_error PROTO((enum tree_code));
+extern tree c_build_type_variant PROTO((tree, int, int));
+extern void c_expand_expr_stmt PROTO((tree));
+/* Validate the expression after `case' and apply default promotions. */
+extern tree check_case_value PROTO((tree));
+/* Concatenate a list of STRING_CST nodes into one STRING_CST. */
+extern tree combine_strings PROTO((tree));
+extern void constant_expression_warning PROTO((tree));
+extern tree convert_and_check PROTO((tree, tree));
+extern void overflow_warning PROTO((tree));
+extern void unsigned_conversion_warning PROTO((tree, tree));
+/* Read the rest of the current #-directive line. */
+extern char *get_directive_line STDIO_PROTO((FILE *));
+/* Subroutine of build_binary_op, used for comparison operations.
+ See if the operands have both been converted from subword integer types
+ and, if so, perhaps change them both back to their original type. */
+extern tree shorten_compare PROTO((tree *, tree *, tree *, enum tree_code *));
+/* Prepare expr to be an argument of a TRUTH_NOT_EXPR,
+ or validate its data type for an `if' or `while' statement or ?..: exp. */
+extern tree truthvalue_conversion PROTO((tree));
+extern tree type_for_mode PROTO((enum machine_mode, int));
+extern tree type_for_size PROTO((unsigned, int));
+
+/* in decl{2}.c */
+extern tree void_list_node;
+extern tree void_zero_node;
+extern tree default_function_type;
+extern tree vtable_entry_type;
+extern tree sigtable_entry_type;
+extern tree __t_desc_type_node, __i_desc_type_node, __m_desc_type_node;
+extern tree Type_info_type_node;
+extern tree class_star_type_node;
+extern tree this_identifier;
+extern tree pfn_identifier;
+extern tree index_identifier;
+extern tree delta_identifier;
+extern tree delta2_identifier;
+extern tree pfn_or_delta2_identifier;
+
+/* A node that is a list (length 1) of error_mark_nodes. */
+extern tree error_mark_list;
+
+extern tree ptr_type_node, const_ptr_type_node;
+extern tree class_type_node, record_type_node, union_type_node, enum_type_node;
+extern tree exception_type_node, unknown_type_node;
+extern tree opaque_type_node, signature_type_node;
+
+/* Node for "pointer to (virtual) function".
+ This may be distinct from ptr_type_node so gdb can distinuish them. */
+#define vfunc_ptr_type_node \
+ (flag_vtable_thunks ? vtable_entry_type : ptr_type_node)
+
+/* Array type `(void *)[]' */
+extern tree vtbl_type_node;
+extern tree delta_type_node;
+
+extern tree long_long_integer_type_node, long_long_unsigned_type_node;
+/* For building calls to `delete'. */
+extern tree integer_two_node, integer_three_node;
+extern tree bool_type_node, true_node, false_node;
+
+/* in except.c */
+extern tree current_exception_type;
+extern tree current_exception_decl;
+extern tree current_exception_object;
+
+/* in pt.c */
+/* PARM_VEC is a vector of template parameters, either IDENTIFIER_NODEs or
+ PARM_DECLs. BINDINGS, if non-null, is a vector of bindings for those
+ parameters. */
+struct template_info {
+ /* Vector of template parameters, either PARM_DECLs or IDENTIFIER_NODEs. */
+ tree parm_vec;
+ /* If non-null, a vector of bindings for the template parms. */
+ tree bindings;
+
+ /* Text of template, and length. */
+ char *text;
+ int length;
+ /* Where it came from. */
+ char *filename;
+ int lineno;
+
+ /* What kind of aggregate -- struct, class, or null. */
+ tree aggr;
+};
+extern int processing_template_decl, processing_template_defn;
+
+/* The template currently being instantiated, and where the instantiation
+ was triggered. */
+struct tinst_level
+{
+ tree classname;
+ int line;
+ char *file;
+ struct tinst_level *next;
+};
+
+extern struct tinst_level *current_tinst_level;
+
+/* in class.c */
+extern tree current_class_name;
+extern tree current_class_type;
+extern tree previous_class_type;
+
+extern tree current_lang_name, lang_name_cplusplus, lang_name_c;
+
+/* Points to the name of that function. May not be the DECL_NAME
+ of CURRENT_FUNCTION_DECL due to overloading */
+extern tree original_function_name;
+
+extern tree current_class_name, current_class_type, current_class_decl, C_C_D;
+extern tree current_vtable_decl;
+
+/* in init.c */
+extern tree global_base_init_list;
+extern tree current_base_init_list, current_member_init_list;
+
+extern int current_function_assigns_this;
+extern int current_function_just_assigned_this;
+extern int current_function_parms_stored;
+
+/* Here's where we control how name mangling takes place. */
+
+#define OPERATOR_ASSIGN_FORMAT "__a%s"
+#define OPERATOR_FORMAT "__%s"
+#define OPERATOR_TYPENAME_FORMAT "__op"
+#define OPERATOR_TYPENAME_P(ID_NODE) \
+ (IDENTIFIER_POINTER (ID_NODE)[0] == '_' \
+ && IDENTIFIER_POINTER (ID_NODE)[1] == '_' \
+ && IDENTIFIER_POINTER (ID_NODE)[2] == 'o' \
+ && IDENTIFIER_POINTER (ID_NODE)[3] == 'p')
+
+
+/* Cannot use '$' up front, because this confuses gdb
+ (names beginning with '$' are gdb-local identifiers).
+
+ Note that all forms in which the '$' is significant are long enough
+ for direct indexing (meaning that if we know there is a '$'
+ at a particular location, we can index into the string at
+ any other location that provides distinguishing characters). */
+
+/* Define NO_DOLLAR_IN_LABEL in your favorite tm file if your assembler
+ doesn't allow '$' in symbol names. */
+#ifndef NO_DOLLAR_IN_LABEL
+
+#define JOINER '$'
+
+#define VPTR_NAME "$v"
+#define THROW_NAME "$eh_throw"
+#define DESTRUCTOR_DECL_PREFIX "_$_"
+#define AUTO_VTABLE_NAME "__vtbl$me__"
+#define AUTO_TEMP_NAME "_$tmp_"
+#define AUTO_TEMP_FORMAT "_$tmp_%d"
+#define VTABLE_BASE "$vb"
+#define VTABLE_NAME_FORMAT (flag_vtable_thunks ? "__vt_%s" : "_vt$%s")
+#define VFIELD_BASE "$vf"
+#define VFIELD_NAME "_vptr$"
+#define VFIELD_NAME_FORMAT "_vptr$%s"
+#define VBASE_NAME "_vb$"
+#define VBASE_NAME_FORMAT "_vb$%s"
+#define STATIC_NAME_FORMAT "_%s$%s"
+#define ANON_AGGRNAME_FORMAT "$_%d"
+
+#else /* NO_DOLLAR_IN_LABEL */
+
+#ifndef NO_DOT_IN_LABEL
+
+#define JOINER '.'
+
+#define VPTR_NAME ".v"
+#define THROW_NAME ".eh_throw"
+#define DESTRUCTOR_DECL_PREFIX "_._"
+#define AUTO_VTABLE_NAME "__vtbl.me__"
+#define AUTO_TEMP_NAME "_.tmp_"
+#define AUTO_TEMP_FORMAT "_.tmp_%d"
+#define VTABLE_BASE ".vb"
+#define VTABLE_NAME_FORMAT (flag_vtable_thunks ? "__vt_%s" : "_vt.%s")
+#define VFIELD_BASE ".vf"
+#define VFIELD_NAME "_vptr."
+#define VFIELD_NAME_FORMAT "_vptr.%s"
+#define VBASE_NAME "_vb."
+#define VBASE_NAME_FORMAT "_vb.%s"
+#define STATIC_NAME_FORMAT "_%s.%s"
+
+#define ANON_AGGRNAME_FORMAT "._%d"
+
+#else /* NO_DOT_IN_LABEL */
+
+#define VPTR_NAME "__vptr"
+#define VPTR_NAME_P(ID_NODE) \
+ (!strncmp (IDENTIFIER_POINTER (ID_NODE), VPTR_NAME, sizeof (VPTR_NAME) - 1))
+#define THROW_NAME "__eh_throw"
+#define DESTRUCTOR_DECL_PREFIX "__destr_"
+#define DESTRUCTOR_NAME_P(ID_NODE) \
+ (!strncmp (IDENTIFIER_POINTER (ID_NODE), DESTRUCTOR_DECL_PREFIX, \
+ sizeof (DESTRUCTOR_DECL_PREFIX) - 1))
+#define IN_CHARGE_NAME "__in_chrg"
+#define AUTO_VTABLE_NAME "__vtbl_me__"
+#define AUTO_TEMP_NAME "__tmp_"
+#define TEMP_NAME_P(ID_NODE) \
+ (!strncmp (IDENTIFIER_POINTER (ID_NODE), AUTO_TEMP_NAME, \
+ sizeof (AUTO_TEMP_NAME) - 1))
+#define AUTO_TEMP_FORMAT "__tmp_%d"
+#define VTABLE_BASE "__vtb"
+#define VTABLE_NAME "__vt_"
+#define VTABLE_NAME_FORMAT (flag_vtable_thunks ? "__vt_%s" : "_vt_%s")
+#define VTABLE_NAME_P(ID_NODE) \
+ (!strncmp (IDENTIFIER_POINTER (ID_NODE), VTABLE_NAME, \
+ sizeof (VTABLE_NAME) - 1))
+#define VFIELD_BASE "__vfb"
+#define VFIELD_NAME "__vptr_"
+#define VFIELD_NAME_P(ID_NODE) \
+ (!strncmp (IDENTIFIER_POINTER (ID_NODE), VFIELD_NAME, \
+ sizeof (VFIELD_NAME) - 1))
+#define VFIELD_NAME_FORMAT "_vptr_%s"
+#define VBASE_NAME "__vb_"
+#define VBASE_NAME_P(ID_NODE) \
+ (!strncmp (IDENTIFIER_POINTER (ID_NODE), VBASE_NAME, \
+ sizeof (VBASE_NAME) - 1))
+#define VBASE_NAME_FORMAT "__vb_%s"
+#define STATIC_NAME_FORMAT "__static_%s_%s"
+
+#define ANON_AGGRNAME_PREFIX "__anon_"
+#define ANON_AGGRNAME_P(ID_NODE) \
+ (!strncmp (IDENTIFIER_POINTER (ID_NODE), ANON_AGGRNAME_PREFIX, \
+ sizeof (ANON_AGGRNAME_PREFIX) - 1))
+#define ANON_AGGRNAME_FORMAT "__anon_%d"
+#define ANON_PARMNAME_FORMAT "__%d"
+#define ANON_PARMNAME_P(ID_NODE) (IDENTIFIER_POINTER (ID_NODE)[0] == '_' \
+ && IDENTIFIER_POINTER (ID_NODE)[1] == '_' \
+ && IDENTIFIER_POINTER (ID_NODE)[2] <= '9')
+
+#endif /* NO_DOT_IN_LABEL */
+#endif /* NO_DOLLAR_IN_LABEL */
+
+#define THIS_NAME "this"
+#define DESTRUCTOR_NAME_FORMAT "~%s"
+#define FILE_FUNCTION_PREFIX_LEN 9
+
+#define IN_CHARGE_NAME "__in_chrg"
+
+#define VTBL_PTR_TYPE "__vtbl_ptr_type"
+#define VTABLE_DELTA_NAME "__delta"
+#define VTABLE_INDEX_NAME "__index"
+#define VTABLE_PFN_NAME "__pfn"
+#define VTABLE_DELTA2_NAME "__delta2"
+
+#define SIGNATURE_FIELD_NAME "__s_"
+#define SIGNATURE_FIELD_NAME_FORMAT "__s_%s"
+#define SIGNATURE_OPTR_NAME "__optr"
+#define SIGNATURE_SPTR_NAME "__sptr"
+#define SIGNATURE_VPTR_NAME "__vptr"
+#define SIGNATURE_POINTER_NAME "__sp_"
+#define SIGNATURE_POINTER_NAME_FORMAT "__%s%ssp_%s"
+#define SIGNATURE_REFERENCE_NAME "__sr_"
+#define SIGNATURE_REFERENCE_NAME_FORMAT "__%s%ssr_%s"
+
+#define SIGTABLE_PTR_TYPE "__sigtbl_ptr_type"
+#define SIGTABLE_NAME_FORMAT "__st_%s_%s"
+#define SIGTABLE_NAME_FORMAT_LONG "__st_%s_%s_%d"
+#define SIGTABLE_CODE_NAME "__code"
+#define SIGTABLE_OFFSET_NAME "__offset"
+#define SIGTABLE_PFN_NAME "__pfn"
+#define EXCEPTION_CLEANUP_NAME "exception cleanup"
+
+#define THIS_NAME_P(ID_NODE) (strcmp(IDENTIFIER_POINTER (ID_NODE), "this") == 0)
+
+#if !defined(NO_DOLLAR_IN_LABEL) || !defined(NO_DOT_IN_LABEL)
+
+#define VPTR_NAME_P(ID_NODE) (IDENTIFIER_POINTER (ID_NODE)[0] == JOINER \
+ && IDENTIFIER_POINTER (ID_NODE)[1] == 'v')
+#define DESTRUCTOR_NAME_P(ID_NODE) (IDENTIFIER_POINTER (ID_NODE)[1] == JOINER \
+ && IDENTIFIER_POINTER (ID_NODE)[2] == '_')
+
+#define VTABLE_NAME_P(ID_NODE) (IDENTIFIER_POINTER (ID_NODE)[1] == 'v' \
+ && IDENTIFIER_POINTER (ID_NODE)[2] == 't' \
+ && IDENTIFIER_POINTER (ID_NODE)[3] == JOINER)
+
+#define VBASE_NAME_P(ID_NODE) (IDENTIFIER_POINTER (ID_NODE)[1] == 'v' \
+ && IDENTIFIER_POINTER (ID_NODE)[2] == 'b' \
+ && IDENTIFIER_POINTER (ID_NODE)[3] == JOINER)
+
+#define TEMP_NAME_P(ID_NODE) (!strncmp (IDENTIFIER_POINTER (ID_NODE), AUTO_TEMP_NAME, sizeof (AUTO_TEMP_NAME)-1))
+#define VFIELD_NAME_P(ID_NODE) (!strncmp (IDENTIFIER_POINTER (ID_NODE), VFIELD_NAME, sizeof(VFIELD_NAME)-1))
+
+/* For anonymous aggregate types, we need some sort of name to
+ hold on to. In practice, this should not appear, but it should
+ not be harmful if it does. */
+#define ANON_AGGRNAME_P(ID_NODE) (IDENTIFIER_POINTER (ID_NODE)[0] == JOINER \
+ && IDENTIFIER_POINTER (ID_NODE)[1] == '_')
+#define ANON_PARMNAME_FORMAT "_%d"
+#define ANON_PARMNAME_P(ID_NODE) (IDENTIFIER_POINTER (ID_NODE)[0] == '_' \
+ && IDENTIFIER_POINTER (ID_NODE)[1] <= '9')
+#endif /* !defined(NO_DOLLAR_IN_LABEL) || !defined(NO_DOT_IN_LABEL) */
+
+/* Define the sets of attributes that member functions and baseclasses
+ can have. These are sensible combinations of {public,private,protected}
+ cross {virtual,non-virtual}. */
+
+enum access_type {
+ access_default,
+ access_public,
+ access_protected,
+ access_private,
+ access_default_virtual,
+ access_public_virtual,
+ access_private_virtual
+};
+
+/* in lex.c */
+extern tree current_unit_name, current_unit_language;
+
+/* Things for handling inline functions. */
+
+struct pending_inline
+{
+ struct pending_inline *next; /* pointer to next in chain */
+ int lineno; /* line number we got the text from */
+ char *filename; /* name of file we were processing */
+ tree fndecl; /* FUNCTION_DECL that brought us here */
+ int token; /* token we were scanning */
+ int token_value; /* value of token we were scanning (YYSTYPE) */
+
+ char *buf; /* pointer to character stream */
+ int len; /* length of stream */
+ tree parm_vec, bindings; /* in case this is derived from a template */
+ unsigned int can_free : 1; /* free this after we're done with it? */
+ unsigned int deja_vu : 1; /* set iff we don't want to see it again. */
+ unsigned int interface : 2; /* 0=interface 1=unknown 2=implementation */
+};
+
+/* in method.c */
+extern struct pending_inline *pending_inlines;
+
+/* 1 for -fall-virtual: make every member function (except
+ constructors) lay down in the virtual function table.
+ Calls can then either go through the virtual function table or not,
+ depending on whether we know what function will actually be called. */
+
+extern int flag_all_virtual;
+
+/* Positive values means that we cannot make optimizing assumptions about
+ `this'. Negative values means we know `this' to be of static type. */
+
+extern int flag_this_is_variable;
+
+/* Controls whether enums and ints freely convert.
+ 1 means with complete freedom.
+ 0 means enums can convert to ints, but not vice-versa. */
+
+extern int flag_int_enum_equivalence;
+
+/* Nonzero means layout structures so that we can do garbage collection. */
+
+extern int flag_gc;
+
+/* Nonzero means generate 'dossiers' that give run-time type information. */
+
+extern int flag_dossier;
+
+/* Nonzero means do emit exported implementations of functions even if
+ they can be inlined. */
+
+extern int flag_implement_inlines;
+
+/* Nonzero means templates obey #pragma interface and implementation. */
+
+extern int flag_external_templates;
+
+/* Nonzero means templates are emitted where they are instantiated. */
+
+extern int flag_alt_external_templates;
+
+/* Nonzero means implicit template instantatiations are emitted. */
+
+extern int flag_implicit_templates;
+
+/* Current end of entries in the gc obstack for stack pointer variables. */
+
+extern int current_function_obstack_index;
+
+/* Flag saying whether we have used the obstack in this function or not. */
+
+extern int current_function_obstack_usage;
+
+enum overload_flags { NO_SPECIAL = 0, DTOR_FLAG, OP_FLAG, TYPENAME_FLAG };
+
+extern tree current_class_decl, C_C_D; /* PARM_DECL: the class instance variable */
+
+/* The following two can be derived from the previous one */
+extern tree current_class_name; /* IDENTIFIER_NODE: name of current class */
+extern tree current_class_type; /* _TYPE: the type of the current class */
+
+/* Some macros for char-based bitfields. */
+#define B_SET(a,x) (a[x>>3] |= (1 << (x&7)))
+#define B_CLR(a,x) (a[x>>3] &= ~(1 << (x&7)))
+#define B_TST(a,x) (a[x>>3] & (1 << (x&7)))
+
+/* These are uses as bits in flags passed to build_method_call
+ to control its error reporting behavior.
+
+ LOOKUP_PROTECT means flag access violations.
+ LOOKUP_COMPLAIN mean complain if no suitable member function
+ matching the arguments is found.
+ LOOKUP_NORMAL is just a combination of these two.
+ LOOKUP_AGGR requires the instance to be of aggregate type.
+ LOOKUP_NONVIRTUAL means make a direct call to the member function found
+ LOOKUP_GLOBAL means search through the space of overloaded functions,
+ as well as the space of member functions.
+ LOOKUP_HAS_IN_CHARGE means that the "in charge" variable is already
+ in the parameter list.
+ LOOKUP_NO_CONVERSION means that user-defined conversions are not
+ permitted. Built-in conversions are permitted.
+ LOOKUP_DESTRUCTOR means explicit call to destructor. */
+
+#define LOOKUP_PROTECT (1)
+#define LOOKUP_COMPLAIN (2)
+#define LOOKUP_NORMAL (3)
+#define LOOKUP_AGGR (4)
+#define LOOKUP_NONVIRTUAL (8)
+#define LOOKUP_GLOBAL (16)
+#define LOOKUP_HAS_IN_CHARGE (32)
+#define LOOKUP_SPECULATIVELY (64)
+/* 128 & 256 are free */
+#define LOOKUP_NO_CONVERSION (512)
+#define LOOKUP_DESTRUCTOR (512)
+
+/* These flags are used by the conversion code.
+ CONV_IMPLICIT : Perform implicit conversions (standard and user-defined).
+ CONV_STATIC : Perform the explicit conversions for static_cast.
+ CONV_CONST : Perform the explicit conversions for const_cast.
+ CONV_REINTERPRET: Perform the explicit conversions for reinterpret_cast.
+ CONV_PRIVATE : Perform upcasts to private bases. */
+
+#define CONV_IMPLICIT 1
+#define CONV_STATIC 2
+#define CONV_CONST 4
+#define CONV_REINTERPRET 8
+#define CONV_PRIVATE 16
+#define CONV_STATIC_CAST (CONV_IMPLICIT | CONV_STATIC)
+#define CONV_OLD_CONVERT (CONV_IMPLICIT | CONV_STATIC | CONV_CONST \
+ | CONV_REINTERPRET)
+#define CONV_C_CAST (CONV_IMPLICIT | CONV_STATIC | CONV_CONST \
+ | CONV_REINTERPRET | CONV_PRIVATE)
+
+/* Anatomy of a DECL_FRIENDLIST (which is a TREE_LIST):
+ purpose = friend name (IDENTIFIER_NODE);
+ value = TREE_LIST of FUNCTION_DECLS;
+ chain, type = EMPTY; */
+#define FRIEND_NAME(LIST) (TREE_PURPOSE (LIST))
+#define FRIEND_DECLS(LIST) (TREE_VALUE (LIST))
+
+/* These macros are for accessing the fields of TEMPLATE...PARM nodes. */
+#define TEMPLATE_TYPE_TPARMLIST(NODE) TREE_PURPOSE (TYPE_FIELDS (NODE))
+#define TEMPLATE_TYPE_IDX(NODE) TREE_INT_CST_LOW (TREE_VALUE (TYPE_FIELDS (NODE)))
+#define TEMPLATE_TYPE_SET_INFO(NODE,P,I) \
+ (TYPE_FIELDS (NODE) = build_tree_list (P, build_int_2 (I, 0)))
+#define TEMPLATE_CONST_TPARMLIST(NODE) (*(tree*)&TREE_INT_CST_LOW(NODE))
+#define TEMPLATE_CONST_IDX(NODE) (TREE_INT_CST_HIGH(NODE))
+#define TEMPLATE_CONST_SET_INFO(NODE,P,I) \
+ (TEMPLATE_CONST_TPARMLIST (NODE) = saved_parmlist, \
+ TEMPLATE_CONST_IDX (NODE) = I)
+
+/* in lex.c */
+/* Indexed by TREE_CODE, these tables give C-looking names to
+ operators represented by TREE_CODES. For example,
+ opname_tab[(int) MINUS_EXPR] == "-". */
+extern char **opname_tab, **assignop_tab;
+
+/* in c-common.c */
+extern tree convert_and_check PROTO((tree, tree));
+extern void overflow_warning PROTO((tree));
+extern void unsigned_conversion_warning PROTO((tree, tree));
+
+/* in call.c */
+extern struct candidate *ansi_c_bullshit;
+
+extern int rank_for_overload PROTO((struct candidate *, struct candidate *));
+extern void compute_conversion_costs PROTO((tree, tree, struct candidate *, int));
+extern int get_arglist_len_in_bytes PROTO((tree));
+extern tree build_vfield_ref PROTO((tree, tree));
+extern tree find_scoped_type PROTO((tree, tree, tree));
+extern tree resolve_scope_to_name PROTO((tree, tree));
+extern tree build_scoped_method_call PROTO((tree, tree, tree, tree));
+extern tree build_method_call PROTO((tree, tree, tree, tree, int));
+extern tree build_overload_call_real PROTO((tree, tree, int, struct candidate *, int));
+extern tree build_overload_call PROTO((tree, tree, int, struct candidate *));
+extern tree build_overload_call_maybe PROTO((tree, tree, int, struct candidate *));
+
+/* in class.c */
+extern tree build_vbase_pointer PROTO((tree, tree));
+extern tree build_vbase_path PROTO((enum tree_code, tree, tree, tree, int));
+extern tree build_vtable_entry PROTO((tree, tree));
+extern tree build_vfn_ref PROTO((tree *, tree, tree));
+extern void add_method PROTO((tree, tree *, tree));
+extern tree get_vfield_offset PROTO((tree));
+extern void duplicate_tag_error PROTO((tree));
+extern tree finish_struct PROTO((tree, tree, int));
+extern int resolves_to_fixed_type_p PROTO((tree, int *));
+extern void init_class_processing PROTO((void));
+extern void pushclass PROTO((tree, int));
+extern void popclass PROTO((int));
+extern void push_nested_class PROTO((tree, int));
+extern void pop_nested_class PROTO((int));
+extern void push_lang_context PROTO((tree));
+extern void pop_lang_context PROTO((void));
+extern int root_lang_context_p PROTO((void));
+extern tree instantiate_type PROTO((tree, tree, int));
+extern void print_class_statistics PROTO((void));
+extern void maybe_push_cache_obstack PROTO((void));
+
+/* in cvt.c */
+extern tree convert_to_reference PROTO((tree, tree, int, int, tree));
+extern tree convert_from_reference PROTO((tree));
+extern tree convert_to_aggr PROTO((tree, tree, char **, int));
+extern tree convert_pointer_to PROTO((tree, tree));
+extern tree convert_pointer_to_real PROTO((tree, tree));
+extern tree convert_pointer_to_vbase PROTO((tree, tree));
+extern tree convert PROTO((tree, tree));
+extern tree convert_force PROTO((tree, tree));
+extern tree build_type_conversion PROTO((enum tree_code, tree, tree, int));
+extern int build_default_binary_type_conversion PROTO((enum tree_code, tree *, tree *));
+extern int build_default_unary_type_conversion PROTO((enum tree_code, tree *));
+extern tree type_promotes_to PROTO((tree));
+
+/* decl.c */
+extern int global_bindings_p PROTO((void));
+extern void keep_next_level PROTO((void));
+extern int kept_level_p PROTO((void));
+extern void declare_parm_level PROTO((void));
+extern void declare_implicit_exception PROTO((void));
+extern int have_exceptions_p PROTO((void));
+extern void declare_uninstantiated_type_level PROTO((void));
+extern int uninstantiated_type_level_p PROTO((void));
+extern void declare_pseudo_global_level PROTO((void));
+extern int pseudo_global_level_p PROTO((void));
+extern void pushlevel PROTO((int));
+extern void pushlevel_temporary PROTO((int));
+extern tree poplevel PROTO((int, int, int));
+extern void delete_block PROTO((tree));
+extern void insert_block PROTO((tree));
+extern void add_block_current_level PROTO((tree));
+extern void set_block PROTO((tree));
+extern void pushlevel_class PROTO((void));
+extern tree poplevel_class PROTO((int));
+/* skip print_other_binding_stack and print_binding_level */
+extern void print_binding_stack PROTO((void));
+extern void push_to_top_level PROTO((void));
+extern void pop_from_top_level PROTO((void));
+extern void set_identifier_type_value PROTO((tree, tree));
+extern void pop_everything PROTO((void));
+extern tree make_type_decl PROTO((tree, tree));
+extern void pushtag PROTO((tree, tree, int));
+extern tree make_anon_name PROTO((void));
+extern void clear_anon_tags PROTO((void));
+extern tree pushdecl PROTO((tree));
+extern tree pushdecl_top_level PROTO((tree));
+extern void push_class_level_binding PROTO((tree, tree));
+extern void push_overloaded_decl_top_level PROTO((tree, int));
+extern tree pushdecl_class_level PROTO((tree));
+extern int overloaded_globals_p PROTO((tree));
+extern tree push_overloaded_decl PROTO((tree, int));
+extern tree implicitly_declare PROTO((tree));
+extern tree lookup_label PROTO((tree));
+extern tree shadow_label PROTO((tree));
+extern tree define_label PROTO((char *, int, tree));
+extern void define_case_label PROTO((tree));
+extern tree getdecls PROTO((void));
+extern tree gettags PROTO((void));
+extern void set_current_level_tags_transparency PROTO((int));
+extern tree typedecl_for_tag PROTO((tree));
+extern tree lookup_name PROTO((tree, int));
+extern tree lookup_name_current_level PROTO((tree));
+extern void init_decl_processing PROTO((void));
+/* skipped define_function */
+extern void shadow_tag PROTO((tree));
+extern int grok_ctor_properties PROTO((tree, tree));
+extern tree groktypename PROTO((tree));
+extern tree start_decl PROTO((tree, tree, int, tree));
+extern void finish_decl PROTO((tree, tree, tree, int));
+extern void expand_static_init PROTO((tree, tree));
+extern int complete_array_type PROTO((tree, tree, int));
+extern tree build_ptrmemfunc_type PROTO((tree));
+extern tree grokdeclarator (); /* PROTO((tree, tree, enum decl_context, int, tree)); */
+extern int parmlist_is_exprlist PROTO((tree));
+extern tree xref_defn_tag PROTO((tree, tree, tree));
+extern tree xref_tag PROTO((tree, tree, tree, int));
+extern tree start_enum PROTO((tree));
+extern tree finish_enum PROTO((tree, tree));
+extern tree build_enumerator PROTO((tree, tree));
+extern tree grok_enum_decls PROTO((tree, tree));
+extern int start_function PROTO((tree, tree, tree, int));
+extern void store_parm_decls PROTO((void));
+extern void store_return_init PROTO((tree, tree));
+extern void finish_function PROTO((int, int));
+extern tree start_method PROTO((tree, tree, tree));
+extern tree finish_method PROTO((tree));
+extern void hack_incomplete_structures PROTO((tree));
+extern tree maybe_build_cleanup PROTO((tree));
+extern void cplus_expand_expr_stmt PROTO((tree));
+extern void finish_stmt PROTO((void));
+extern void pop_implicit_try_blocks PROTO((tree));
+extern void push_exception_cleanup PROTO((tree));
+extern void revert_static_member_fn PROTO((tree *, tree *, tree *));
+
+/* in decl2.c */
+extern int lang_decode_option PROTO((char *));
+extern tree grok_method_quals PROTO((tree, tree, tree));
+extern void grokclassfn PROTO((tree, tree, tree, enum overload_flags, tree));
+extern tree grok_alignof PROTO((tree));
+extern tree grok_array_decl PROTO((tree, tree));
+extern tree delete_sanity PROTO((tree, tree, int, int));
+extern void check_classfn PROTO((tree, tree, tree));
+extern tree grokfield PROTO((tree, tree, tree, tree, tree));
+extern tree grokbitfield PROTO((tree, tree, tree));
+extern tree groktypefield PROTO((tree, tree));
+extern tree grokoptypename PROTO((tree, tree));
+extern tree build_push_scope PROTO((tree, tree));
+extern tree constructor_name_full PROTO((tree));
+extern tree constructor_name PROTO((tree));
+extern void setup_vtbl_ptr PROTO((void));
+extern void mark_inline_for_output PROTO((tree));
+extern void clear_temp_name PROTO((void));
+extern tree get_temp_name PROTO((tree, int));
+extern tree get_temp_regvar PROTO((tree, tree));
+extern void finish_anon_union PROTO((tree));
+extern tree finish_table PROTO((tree, tree, tree, int));
+extern void finish_builtin_type PROTO((tree, char *, tree *, int, tree));
+extern tree coerce_new_type PROTO((tree));
+extern tree coerce_delete_type PROTO((tree));
+extern void walk_vtables PROTO((void (*)(), void (*)()));
+extern void walk_sigtables PROTO((void (*)(), void (*)()));
+extern void finish_file PROTO((void));
+extern void warn_if_unknown_interface PROTO((void));
+extern tree grok_x_components PROTO((tree, tree));
+extern tree reparse_absdcl_as_expr PROTO((tree, tree));
+extern tree reparse_absdcl_as_casts PROTO((tree, tree));
+extern tree reparse_decl_as_expr PROTO((tree, tree));
+extern tree finish_decl_parsing PROTO((tree));
+extern tree lookup_name_nonclass PROTO((tree));
+extern tree check_cp_case_value PROTO((tree));
+
+/* in edsel.c */
+
+/* in except.c */
+
+extern void start_protect PROTO((void));
+extern void end_protect PROTO((tree));
+extern void expand_exception_blocks PROTO((void));
+extern void expand_start_try_stmts PROTO((void));
+extern void expand_end_try_stmts PROTO((void));
+extern void expand_start_all_catch PROTO((void));
+extern void expand_end_all_catch PROTO((void));
+extern void start_catch_block PROTO((tree, tree));
+extern void end_catch_block PROTO((void));
+extern void expand_throw PROTO((tree));
+extern void build_exception_table PROTO((void));
+extern tree build_throw PROTO((tree));
+extern void init_exception_processing PROTO((void));
+
+/* in expr.c */
+/* skip cplus_expand_expr */
+extern void init_cplus_expand PROTO((void));
+extern void fixup_result_decl PROTO((tree, struct rtx_def *));
+extern int decl_in_memory_p PROTO((tree));
+
+/* in gc.c */
+extern int type_needs_gc_entry PROTO((tree));
+extern int value_safe_from_gc PROTO((tree, tree));
+extern void build_static_gc_entry PROTO((tree, tree));
+extern tree protect_value_from_gc PROTO((tree, tree));
+extern tree build_headof PROTO((tree));
+extern tree build_classof PROTO((tree));
+extern tree build_t_desc PROTO((tree, int));
+extern tree build_i_desc PROTO((tree));
+extern tree build_m_desc PROTO((tree));
+extern void expand_gc_prologue_and_epilogue PROTO((void));
+extern void lang_expand_end_bindings PROTO((struct rtx_def *, struct rtx_def *));
+extern void init_gc_processing PROTO((void));
+extern tree build_typeid PROTO((tree));
+extern tree get_typeid PROTO((tree));
+extern tree build_dynamic_cast PROTO((tree, tree));
+
+/* in init.c */
+extern void emit_base_init PROTO((tree, int));
+extern void check_base_init PROTO((tree));
+extern void expand_direct_vtbls_init PROTO((tree, tree, int, int, tree));
+extern void do_member_init PROTO((tree, tree, tree));
+extern void expand_member_init PROTO((tree, tree, tree));
+extern void expand_aggr_init PROTO((tree, tree, int));
+extern int is_aggr_typedef PROTO((tree, int));
+extern tree get_aggr_from_typedef PROTO((tree, int));
+extern tree get_type_value PROTO((tree));
+extern tree build_member_call PROTO((tree, tree, tree));
+extern tree build_offset_ref PROTO((tree, tree));
+extern tree get_member_function PROTO((tree *, tree, tree));
+extern tree get_member_function_from_ptrfunc PROTO((tree *, tree, tree));
+extern tree resolve_offset_ref PROTO((tree));
+extern tree decl_constant_value PROTO((tree));
+extern int is_friend_type PROTO((tree, tree));
+extern int is_friend PROTO((tree, tree));
+extern void make_friend_class PROTO((tree, tree));
+extern tree do_friend PROTO((tree, tree, tree, tree, enum overload_flags, tree));
+extern void embrace_waiting_friends PROTO((tree));
+extern tree build_builtin_call PROTO((tree, tree, tree));
+extern tree build_new PROTO((tree, tree, tree, int));
+extern tree expand_vec_init PROTO((tree, tree, tree, tree, int));
+extern tree build_x_delete PROTO((tree, tree, int, tree));
+extern tree build_delete PROTO((tree, tree, tree, int, int));
+extern tree build_vbase_delete PROTO((tree, tree));
+extern tree build_vec_delete PROTO((tree, tree, tree, tree, tree, int));
+
+/* in input.c */
+
+/* in lex.c */
+extern tree make_pointer_declarator PROTO((tree, tree));
+extern tree make_reference_declarator PROTO((tree, tree));
+extern char *operator_name_string PROTO((tree));
+extern void lang_init PROTO((void));
+extern void lang_finish PROTO((void));
+extern void init_filename_times PROTO((void));
+extern void reinit_lang_specific PROTO((void));
+extern void init_lex PROTO((void));
+extern void reinit_parse_for_function PROTO((void));
+extern int *init_parse PROTO((void));
+extern void print_parse_statistics PROTO((void));
+extern void extract_interface_info PROTO((void));
+extern void set_vardecl_interface_info PROTO((tree, tree));
+extern void do_pending_inlines PROTO((void));
+extern void process_next_inline PROTO((tree));
+/* skip restore_pending_input */
+extern void yyungetc PROTO((int, int));
+extern void reinit_parse_for_method PROTO((int, tree));
+#if 0
+extern void reinit_parse_for_block PROTO((int, struct obstack *, int));
+#endif
+extern tree cons_up_default_function PROTO((tree, tree, tree, int));
+extern void check_for_missing_semicolon PROTO((tree));
+extern void note_got_semicolon PROTO((tree));
+extern void note_list_got_semicolon PROTO((tree));
+extern int check_newline PROTO((void));
+extern void dont_see_typename PROTO((void));
+extern int identifier_type PROTO((tree));
+extern void see_typename PROTO((void));
+extern tree do_identifier PROTO((tree));
+extern tree identifier_typedecl_value PROTO((tree));
+extern int real_yylex PROTO((void));
+extern tree build_lang_decl PROTO((enum tree_code, tree, tree));
+extern tree build_lang_field_decl PROTO((enum tree_code, tree, tree));
+extern void copy_lang_decl PROTO((tree));
+extern tree make_lang_type PROTO((enum tree_code));
+extern void copy_decl_lang_specific PROTO((tree));
+extern void dump_time_statistics PROTO((void));
+/* extern void compiler_error PROTO((char *, HOST_WIDE_INT, HOST_WIDE_INT)); */
+extern void compiler_error_with_decl PROTO((tree, char *));
+extern void yyerror PROTO((char *));
+
+/* in errfn.c */
+extern void cp_error ();
+extern void cp_error_at ();
+extern void cp_warning ();
+extern void cp_warning_at ();
+extern void cp_pedwarn ();
+extern void cp_pedwarn_at ();
+extern void cp_compiler_error ();
+extern void cp_sprintf ();
+
+/* in error.c */
+extern void init_error PROTO((void));
+extern char *fndecl_as_string PROTO((tree, tree, int));
+extern char *type_as_string PROTO((tree, int));
+extern char *args_as_string PROTO((tree, int));
+extern char *decl_as_string PROTO((tree, int));
+extern char *expr_as_string PROTO((tree, int));
+extern char *code_as_string PROTO((enum tree_code, int));
+extern char *language_as_string PROTO((enum languages, int));
+extern char *parm_as_string PROTO((int, int));
+extern char *op_as_string PROTO((enum tree_code, int));
+
+/* in method.c */
+extern void init_method PROTO((void));
+extern tree make_anon_parm_name PROTO((void));
+extern void clear_anon_parm_name PROTO((void));
+extern void do_inline_function_hair PROTO((tree, tree));
+/* skip report_type_mismatch */
+extern char *build_overload_name PROTO((tree, int, int));
+extern tree cplus_exception_name PROTO((tree));
+extern tree build_decl_overload PROTO((tree, tree, int));
+extern tree build_typename_overload PROTO((tree));
+extern tree build_t_desc_overload PROTO((tree));
+extern void declare_overloaded PROTO((tree));
+#ifdef NO_AUTO_OVERLOAD
+extern int is_overloaded PROTO((tree));
+#endif
+extern tree build_opfncall PROTO((enum tree_code, int, tree, tree, tree));
+extern tree hack_identifier PROTO((tree, tree, int));
+extern tree build_component_type_expr PROTO((tree, tree, tree, int));
+
+/* in pt.c */
+extern void begin_template_parm_list PROTO((void));
+extern tree process_template_parm PROTO((tree, tree));
+extern tree end_template_parm_list PROTO((tree));
+extern void end_template_decl PROTO((tree, tree, tree, int));
+extern tree lookup_template_class PROTO((tree, tree, tree));
+extern void push_template_decls PROTO((tree, tree, int));
+extern void pop_template_decls PROTO((tree, tree, int));
+extern int uses_template_parms PROTO((tree));
+extern void instantiate_member_templates PROTO((tree));
+extern tree instantiate_class_template PROTO((tree, int));
+extern tree instantiate_template PROTO((tree, tree *));
+extern void undo_template_name_overload PROTO((tree, int));
+extern void overload_template_name PROTO((tree, int));
+extern void end_template_instantiation PROTO((tree));
+extern void reinit_parse_for_template PROTO((int, tree, tree));
+extern int type_unification PROTO((tree, tree *, tree, tree, int *, int));
+extern int do_pending_expansions PROTO((void));
+extern void do_pending_templates PROTO((void));
+struct tinst_level *tinst_for_decl PROTO((void));
+extern void do_function_instantiation PROTO((tree, tree, tree));
+extern void do_type_instantiation PROTO((tree, tree));
+extern tree create_nested_upt PROTO((tree, tree));
+
+/* in search.c */
+extern tree make_memoized_table_entry PROTO((tree, tree, int));
+extern void push_memoized_context PROTO((tree, int));
+extern void pop_memoized_context PROTO((int));
+extern tree get_binfo PROTO((tree, tree, int));
+extern int get_base_distance PROTO((tree, tree, int, tree *));
+extern enum access_type compute_access PROTO((tree, tree));
+extern tree lookup_field PROTO((tree, tree, int, int));
+extern tree lookup_nested_field PROTO((tree, int));
+extern tree lookup_fnfields PROTO((tree, tree, int));
+extern tree lookup_nested_tag PROTO((tree, tree));
+extern HOST_WIDE_INT breadth_first_search PROTO((tree, int (*)(), int (*)()));
+extern int tree_needs_constructor_p PROTO((tree, int));
+extern int tree_has_any_destructor_p PROTO((tree, int));
+extern tree get_matching_virtual PROTO((tree, tree, int));
+extern tree get_abstract_virtuals PROTO((tree));
+extern tree get_baselinks PROTO((tree, tree, tree));
+extern tree next_baselink PROTO((tree));
+extern tree init_vbase_pointers PROTO((tree, tree));
+extern void expand_indirect_vtbls_init PROTO((tree, tree, tree, int));
+extern void clear_search_slots PROTO((tree));
+extern tree get_vbase_types PROTO((tree));
+extern void build_mi_matrix PROTO((tree));
+extern void free_mi_matrix PROTO((void));
+extern void build_mi_virtuals PROTO((int, int));
+extern void add_mi_virtuals PROTO((int, tree));
+extern void report_ambiguous_mi_virtuals PROTO((int, tree));
+extern void note_debug_info_needed PROTO((tree));
+extern void push_class_decls PROTO((tree));
+extern void pop_class_decls PROTO((tree));
+extern void unuse_fields PROTO((tree));
+extern void unmark_finished_struct PROTO((tree));
+extern void print_search_statistics PROTO((void));
+extern void init_search_processing PROTO((void));
+extern void reinit_search_statistics PROTO((void));
+extern tree current_scope PROTO((void));
+
+/* in sig.c */
+extern tree build_signature_pointer_type PROTO((tree, int, int));
+extern tree build_signature_reference_type PROTO((tree, int, int));
+extern tree build_signature_pointer_constructor PROTO((tree, tree));
+extern tree build_signature_method_call PROTO((tree, tree, tree, tree));
+extern tree build_optr_ref PROTO((tree));
+extern tree build_sptr_ref PROTO((tree));
+extern tree build_vptr_ref PROTO((tree));
+
+/* in spew.c */
+extern void init_spew PROTO((void));
+extern int yylex PROTO((void));
+extern tree arbitrate_lookup PROTO((tree, tree, tree));
+
+/* in tree.c */
+extern int lvalue_p PROTO((tree));
+extern int lvalue_or_else PROTO((tree, char *));
+extern tree build_cplus_new PROTO((tree, tree, int));
+extern tree break_out_cleanups PROTO((tree));
+extern tree break_out_calls PROTO((tree));
+extern tree build_cplus_method_type PROTO((tree, tree, tree));
+extern tree build_cplus_staticfn_type PROTO((tree, tree, tree));
+extern tree build_cplus_array_type PROTO((tree, tree));
+extern void propagate_binfo_offsets PROTO((tree, tree));
+extern int layout_vbasetypes PROTO((tree, int));
+extern tree layout_basetypes PROTO((tree, tree));
+extern int list_hash PROTO((tree));
+extern tree list_hash_lookup PROTO((int, tree));
+extern void list_hash_add PROTO((int, tree));
+extern tree list_hash_canon PROTO((int, tree));
+extern tree hash_tree_cons PROTO((int, int, int, tree, tree, tree));
+extern tree hash_tree_chain PROTO((tree, tree));
+extern tree hash_chainon PROTO((tree, tree));
+extern tree get_decl_list PROTO((tree));
+extern tree list_hash_lookup_or_cons PROTO((tree));
+extern tree make_binfo PROTO((tree, tree, tree, tree, tree));
+extern tree copy_binfo PROTO((tree));
+extern tree binfo_value PROTO((tree, tree));
+extern tree reverse_path PROTO((tree));
+extern tree virtual_member PROTO((tree, tree));
+extern tree virtual_offset PROTO((tree, tree, tree));
+extern void debug_binfo PROTO((tree));
+extern int decl_list_length PROTO((tree));
+extern int count_functions PROTO((tree));
+extern tree decl_value_member PROTO((tree, tree));
+extern int is_overloaded_fn PROTO((tree));
+extern tree get_first_fn PROTO((tree));
+extern tree fnaddr_from_vtable_entry PROTO((tree));
+extern void set_fnaddr_from_vtable_entry PROTO((tree, tree));
+extern tree function_arg_chain PROTO((tree));
+extern int promotes_to_aggr_type PROTO((tree, enum tree_code));
+extern int is_aggr_type_2 PROTO((tree, tree));
+extern void message_2_types PROTO((void (*)(), char *, tree, tree));
+extern char *lang_printable_name PROTO((tree));
+extern tree build_exception_variant PROTO((tree, tree, tree));
+extern tree copy_to_permanent PROTO((tree));
+extern void print_lang_statistics PROTO((void));
+/* skip __eprintf */
+extern tree array_type_nelts_total PROTO((tree));
+extern tree array_type_nelts_top PROTO((tree));
+
+/* in typeck.c */
+extern tree bool_truthvalue_conversion PROTO((tree));
+extern tree target_type PROTO((tree));
+extern tree require_complete_type PROTO((tree));
+extern int type_unknown_p PROTO((tree));
+extern int fntype_p PROTO((tree));
+extern tree require_instantiated_type PROTO((tree, tree, tree));
+extern tree commonparms PROTO((tree, tree));
+extern tree common_type PROTO((tree, tree));
+extern int compexcepttypes PROTO((tree, tree, int));
+extern int comptypes PROTO((tree, tree, int));
+extern int comp_target_types PROTO((tree, tree, int));
+extern tree common_base_types PROTO((tree, tree));
+extern int compparms PROTO((tree, tree, int));
+extern int comp_target_types PROTO((tree, tree, int));
+extern int self_promoting_args_p PROTO((tree));
+extern tree unsigned_type PROTO((tree));
+extern tree signed_type PROTO((tree));
+extern tree signed_or_unsigned_type PROTO((int, tree));
+extern tree c_sizeof PROTO((tree));
+extern tree c_sizeof_nowarn PROTO((tree));
+extern tree c_alignof PROTO((tree));
+extern tree default_conversion PROTO((tree));
+extern tree build_object_ref PROTO((tree, tree, tree));
+extern tree build_component_ref_1 PROTO((tree, tree, int));
+extern tree build_component_ref PROTO((tree, tree, tree, int));
+extern tree build_x_indirect_ref PROTO((tree, char *));
+extern tree build_indirect_ref PROTO((tree, char *));
+extern tree build_x_array_ref PROTO((tree, tree));
+extern tree build_array_ref PROTO((tree, tree));
+extern tree build_x_function_call PROTO((tree, tree, tree));
+extern tree build_function_call_real PROTO((tree, tree, int, int));
+extern tree build_function_call PROTO((tree, tree));
+extern tree build_function_call_maybe PROTO((tree, tree));
+extern tree convert_arguments PROTO((tree, tree, tree, tree, int));
+extern tree build_x_binary_op PROTO((enum tree_code, tree, tree));
+extern tree build_binary_op PROTO((enum tree_code, tree, tree, int));
+extern tree build_binary_op_nodefault PROTO((enum tree_code, tree, tree, enum tree_code));
+extern tree build_component_addr PROTO((tree, tree, char *));
+extern tree build_x_unary_op PROTO((enum tree_code, tree));
+extern tree build_unary_op PROTO((enum tree_code, tree, int));
+extern tree unary_complex_lvalue PROTO((enum tree_code, tree));
+extern int mark_addressable PROTO((tree));
+extern tree build_x_conditional_expr PROTO((tree, tree, tree));
+extern tree build_conditional_expr PROTO((tree, tree, tree));
+extern tree build_x_compound_expr PROTO((tree));
+extern tree build_compound_expr PROTO((tree));
+extern tree build_static_cast PROTO((tree, tree));
+extern tree build_reinterpret_cast PROTO((tree, tree));
+extern tree build_const_cast PROTO((tree, tree));
+extern tree build_c_cast PROTO((tree, tree));
+extern tree build_modify_expr PROTO((tree, enum tree_code, tree));
+extern int language_lvalue_valid PROTO((tree));
+extern void warn_for_assignment PROTO((char *, char *, char *, tree, int, int));
+extern tree convert_for_initialization PROTO((tree, tree, tree, int, char *, tree, int));
+extern void c_expand_asm_operands PROTO((tree, tree, tree, tree, int, char *, int));
+extern void c_expand_return PROTO((tree));
+extern tree c_expand_start_case PROTO((tree));
+extern tree build_component_ref PROTO((tree, tree, tree, int));
+extern tree build_ptrmemfunc PROTO((tree, tree, int));
+
+/* in typeck2.c */
+extern tree error_not_base_type PROTO((tree, tree));
+extern tree binfo_or_else PROTO((tree, tree));
+extern void error_with_aggr_type (); /* PROTO((tree, char *, HOST_WIDE_INT)); */
+extern void readonly_error PROTO((tree, char *, int));
+extern void abstract_virtuals_error PROTO((tree, tree));
+extern void incomplete_type_error PROTO((tree, tree));
+extern void my_friendly_abort PROTO((int));
+extern void my_friendly_assert PROTO((int, int));
+extern tree store_init_value PROTO((tree, tree));
+extern tree digest_init PROTO((tree, tree, tree *));
+extern tree build_scoped_ref PROTO((tree, tree));
+extern tree build_x_arrow PROTO((tree));
+extern tree build_m_component_ref PROTO((tree, tree));
+extern tree build_functional_cast PROTO((tree, tree));
+extern char *enum_name_string PROTO((tree, tree));
+extern void report_case_error PROTO((int, tree, tree, tree));
+
+/* in xref.c */
+extern void GNU_xref_begin PROTO((char *));
+extern void GNU_xref_end PROTO((int));
+extern void GNU_xref_file PROTO((char *));
+extern void GNU_xref_start_scope PROTO((HOST_WIDE_INT));
+extern void GNU_xref_end_scope PROTO((HOST_WIDE_INT, HOST_WIDE_INT, int, int, int));
+extern void GNU_xref_def PROTO((tree, char *));
+extern void GNU_xref_decl PROTO((tree, tree));
+extern void GNU_xref_call PROTO((tree, char *));
+extern void GNU_xref_function PROTO((tree, tree));
+extern void GNU_xref_assign PROTO((tree));
+extern void GNU_xref_hier PROTO((char *, char *, int, int, int));
+extern void GNU_xref_member PROTO((tree, tree));
+
+/* -- end of C++ */
+
+#endif /* not _CP_TREE_H */
diff --git a/gnu/usr.bin/cc/cc1plus/cvt.c b/gnu/usr.bin/cc/cc1plus/cvt.c
new file mode 100644
index 0000000..cb8d968
--- /dev/null
+++ b/gnu/usr.bin/cc/cc1plus/cvt.c
@@ -0,0 +1,2044 @@
+/* Language-level data type conversion for GNU C++.
+ Copyright (C) 1987, 1988, 1992, 1993 Free Software Foundation, Inc.
+ Hacked by Michael Tiemann (tiemann@cygnus.com)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+/* This file contains the functions for converting C expressions
+ to different data types. The only entry point is `convert'.
+ Every language front end must have a `convert' function
+ but what kind of conversions it does will depend on the language. */
+
+#include "config.h"
+#include "tree.h"
+#include "flags.h"
+#include "cp-tree.h"
+#include "class.h"
+#include "convert.h"
+
+#undef NULL
+#define NULL (char *)0
+
+/* Change of width--truncation and extension of integers or reals--
+ is represented with NOP_EXPR. Proper functioning of many things
+ assumes that no other conversions can be NOP_EXPRs.
+
+ Conversion between integer and pointer is represented with CONVERT_EXPR.
+ Converting integer to real uses FLOAT_EXPR
+ and real to integer uses FIX_TRUNC_EXPR.
+
+ Here is a list of all the functions that assume that widening and
+ narrowing is always done with a NOP_EXPR:
+ In convert.c, convert_to_integer.
+ In c-typeck.c, build_binary_op_nodefault (boolean ops),
+ and truthvalue_conversion.
+ In expr.c: expand_expr, for operands of a MULT_EXPR.
+ In fold-const.c: fold.
+ In tree.c: get_narrower and get_unwidened.
+
+ C++: in multiple-inheritance, converting between pointers may involve
+ adjusting them by a delta stored within the class definition. */
+
+/* Subroutines of `convert'. */
+
+/* Build a thunk. What it is, is an entry point that when called will
+ adjust the this pointer (the first argument) by offset, and then
+ goto the real address of the function given by REAL_ADDR that we
+ would like called. What we return is the address of the thunk. */
+static tree
+build_thunk (offset, real_addr)
+ tree offset, real_addr;
+{
+ if (TREE_CODE (real_addr) != ADDR_EXPR
+ || TREE_CODE (TREE_OPERAND (real_addr, 0)) != FUNCTION_DECL)
+ {
+ sorry ("MI pointer to member conversion too complex");
+ return error_mark_node;
+ }
+ sorry ("MI pointer to member conversion too complex");
+ return error_mark_node;
+}
+
+/* Convert a `pointer to member' (POINTER_TYPE to METHOD_TYPE) into
+ another `pointer to method'. This may involved the creation of
+ a thunk to handle the this offset calculation. */
+static tree
+convert_fn_ptr (type, expr)
+ tree type, expr;
+{
+ tree binfo = get_binfo (TYPE_METHOD_BASETYPE (TREE_TYPE (TREE_TYPE (expr))),
+ TYPE_METHOD_BASETYPE (TREE_TYPE (type)),
+ 1);
+ if (binfo == error_mark_node)
+ {
+ error (" in pointer to member conversion");
+ return error_mark_node;
+ }
+ if (binfo == NULL_TREE)
+ {
+ /* ARM 4.8 restriction. */
+ error ("invalid pointer to member conversion");
+ return error_mark_node;
+ }
+ if (BINFO_OFFSET_ZEROP (binfo))
+ return build1 (NOP_EXPR, type, expr);
+ return build1 (NOP_EXPR, type, build_thunk (BINFO_OFFSET (binfo), expr));
+}
+
+/* if converting pointer to pointer
+ if dealing with classes, check for derived->base or vice versa
+ else if dealing with method pointers, delegate
+ else convert blindly
+ else if converting class, pass off to build_type_conversion
+ else try C-style pointer conversion */
+static tree
+cp_convert_to_pointer (type, expr)
+ tree type, expr;
+{
+ register tree intype = TREE_TYPE (expr);
+ register enum tree_code form = TREE_CODE (intype);
+
+ if (form == POINTER_TYPE)
+ {
+ intype = TYPE_MAIN_VARIANT (intype);
+
+ if (TYPE_MAIN_VARIANT (type) != intype
+ && TREE_CODE (TREE_TYPE (type)) == RECORD_TYPE
+ && TREE_CODE (TREE_TYPE (intype)) == RECORD_TYPE)
+ {
+ enum tree_code code = PLUS_EXPR;
+ tree binfo = get_binfo (TREE_TYPE (type), TREE_TYPE (intype), 1);
+ if (binfo == error_mark_node)
+ return error_mark_node;
+ if (binfo == NULL_TREE)
+ {
+ binfo = get_binfo (TREE_TYPE (intype), TREE_TYPE (type), 1);
+ if (binfo == error_mark_node)
+ return error_mark_node;
+ code = MINUS_EXPR;
+ }
+ if (binfo)
+ {
+ if (TYPE_USES_VIRTUAL_BASECLASSES (TREE_TYPE (type))
+ || TYPE_USES_VIRTUAL_BASECLASSES (TREE_TYPE (intype))
+ || ! BINFO_OFFSET_ZEROP (binfo))
+ {
+ /* Need to get the path we took. */
+ tree path;
+
+ if (code == PLUS_EXPR)
+ get_base_distance (TREE_TYPE (type), TREE_TYPE (intype), 0, &path);
+ else
+ get_base_distance (TREE_TYPE (intype), TREE_TYPE (type), 0, &path);
+ return build_vbase_path (code, type, expr, path, 0);
+ }
+ }
+ }
+ if (TYPE_MAIN_VARIANT (type) != intype
+ && TREE_CODE (TREE_TYPE (type)) == METHOD_TYPE
+ && TREE_CODE (type) == POINTER_TYPE
+ && TREE_CODE (TREE_TYPE (intype)) == METHOD_TYPE)
+ return convert_fn_ptr (type, expr);
+
+ return build1 (NOP_EXPR, type, expr);
+ }
+
+ my_friendly_assert (form != OFFSET_TYPE, 186);
+
+ if (TYPE_LANG_SPECIFIC (intype)
+ && (IS_SIGNATURE_POINTER (intype) || IS_SIGNATURE_REFERENCE (intype)))
+ return convert_to_pointer (type, build_optr_ref (expr));
+
+ if (IS_AGGR_TYPE (intype))
+ {
+ tree rval;
+ rval = build_type_conversion (CONVERT_EXPR, type, expr, 1);
+ if (rval)
+ {
+ if (rval == error_mark_node)
+ cp_error ("conversion of `%E' from `%T' to `%T' is ambiguous",
+ expr, intype, type);
+ return rval;
+ }
+ }
+
+ if (integer_zerop (expr))
+ {
+ if (type == TREE_TYPE (null_pointer_node))
+ return null_pointer_node;
+ expr = build_int_2 (0, 0);
+ TREE_TYPE (expr) = type;
+ return expr;
+ }
+
+ if (INTEGRAL_CODE_P (form))
+ {
+ if (type_precision (intype) == POINTER_SIZE)
+ return build1 (CONVERT_EXPR, type, expr);
+ expr = convert (type_for_size (POINTER_SIZE, 0), expr);
+ /* Modes may be different but sizes should be the same. */
+ if (GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (expr)))
+ != GET_MODE_SIZE (TYPE_MODE (type)))
+ /* There is supposed to be some integral type
+ that is the same width as a pointer. */
+ abort ();
+ return convert_to_pointer (type, expr);
+ }
+
+ cp_error ("cannot convert `%E' from type `%T' to type `%T'",
+ expr, intype, type);
+ return error_mark_node;
+}
+
+/* Like convert, except permit conversions to take place which
+ are not normally allowed due to access restrictions
+ (such as conversion from sub-type to private super-type). */
+static tree
+convert_to_pointer_force (type, expr)
+ tree type, expr;
+{
+ register tree intype = TREE_TYPE (expr);
+ register enum tree_code form = TREE_CODE (intype);
+
+ if (integer_zerop (expr))
+ {
+ if (type == TREE_TYPE (null_pointer_node))
+ return null_pointer_node;
+ expr = build_int_2 (0, 0);
+ TREE_TYPE (expr) = type;
+ return expr;
+ }
+
+ /* Convert signature pointer/reference to `void *' first. */
+ if (form == RECORD_TYPE
+ && (IS_SIGNATURE_POINTER (intype) || IS_SIGNATURE_REFERENCE (intype)))
+ {
+ expr = build_optr_ref (expr);
+ intype = TREE_TYPE (expr);
+ form = TREE_CODE (intype);
+ }
+
+ if (form == POINTER_TYPE)
+ {
+ intype = TYPE_MAIN_VARIANT (intype);
+
+ if (TYPE_MAIN_VARIANT (type) != intype
+ && TREE_CODE (TREE_TYPE (type)) == RECORD_TYPE
+ && TREE_CODE (TREE_TYPE (intype)) == RECORD_TYPE)
+ {
+ enum tree_code code = PLUS_EXPR;
+ tree path;
+ int distance = get_base_distance (TREE_TYPE (type),
+ TREE_TYPE (intype), 0, &path);
+ if (distance == -2)
+ {
+ ambig:
+ cp_error ("type `%T' is ambiguous baseclass of `%s'", TREE_TYPE (type),
+ TYPE_NAME_STRING (TREE_TYPE (intype)));
+ return error_mark_node;
+ }
+ if (distance == -1)
+ {
+ distance = get_base_distance (TREE_TYPE (intype),
+ TREE_TYPE (type), 0, &path);
+ if (distance == -2)
+ goto ambig;
+ if (distance < 0)
+ /* Doesn't need any special help from us. */
+ return build1 (NOP_EXPR, type, expr);
+
+ code = MINUS_EXPR;
+ }
+ return build_vbase_path (code, type, expr, path, 0);
+ }
+ return build1 (NOP_EXPR, type, expr);
+ }
+
+ return cp_convert_to_pointer (type, expr);
+}
+
+/* We are passing something to a function which requires a reference.
+ The type we are interested in is in TYPE. The initial
+ value we have to begin with is in ARG.
+
+ FLAGS controls how we manage access checking.
+ CHECKCONST controls if we report error messages on const subversion. */
+static tree
+build_up_reference (type, arg, flags, checkconst)
+ tree type, arg;
+ int flags, checkconst;
+{
+ tree rval, targ;
+ int literal_flag = 0;
+ tree argtype = TREE_TYPE (arg);
+ tree target_type = TREE_TYPE (type);
+ tree binfo = NULL_TREE;
+
+ my_friendly_assert (TREE_CODE (type) == REFERENCE_TYPE, 187);
+ if ((flags & LOOKUP_PROTECT)
+ && TYPE_MAIN_VARIANT (argtype) != TYPE_MAIN_VARIANT (target_type)
+ && IS_AGGR_TYPE (argtype)
+ && IS_AGGR_TYPE (target_type))
+ {
+ binfo = get_binfo (target_type, argtype, 1);
+ if (binfo == error_mark_node)
+ return error_mark_node;
+ if (binfo == NULL_TREE)
+ return error_not_base_type (target_type, argtype);
+ }
+
+ /* Pass along const and volatile down into the type. */
+ if (TYPE_READONLY (type) || TYPE_VOLATILE (type))
+ target_type = c_build_type_variant (target_type, TYPE_READONLY (type),
+ TYPE_VOLATILE (type));
+ targ = arg;
+ if (TREE_CODE (targ) == SAVE_EXPR)
+ targ = TREE_OPERAND (targ, 0);
+
+ switch (TREE_CODE (targ))
+ {
+ case INDIRECT_REF:
+ /* This is a call to a constructor which did not know what it was
+ initializing until now: it needs to initialize a temporary. */
+ if (TREE_HAS_CONSTRUCTOR (targ))
+ {
+ tree temp = build_cplus_new (argtype, TREE_OPERAND (targ, 0), 1);
+ TREE_HAS_CONSTRUCTOR (targ) = 0;
+ return build_up_reference (type, temp, flags, 1);
+ }
+ /* Let &* cancel out to simplify resulting code.
+ Also, throw away intervening NOP_EXPRs. */
+ arg = TREE_OPERAND (targ, 0);
+ if (TREE_CODE (arg) == NOP_EXPR || TREE_CODE (arg) == NON_LVALUE_EXPR
+ || (TREE_CODE (arg) == CONVERT_EXPR && TREE_REFERENCE_EXPR (arg)))
+ arg = TREE_OPERAND (arg, 0);
+
+ /* in doing a &*, we have to get rid of the const'ness on the pointer
+ value. Haven't thought about volatile here. Pointers come to mind
+ here. */
+ if (TREE_READONLY (arg))
+ {
+ arg = copy_node (arg);
+ TREE_READONLY (arg) = 0;
+ }
+
+ rval = build1 (CONVERT_EXPR, type, arg);
+ TREE_REFERENCE_EXPR (rval) = 1;
+
+ /* propagate the const flag on something like:
+
+ class Base {
+ public:
+ int foo;
+ };
+
+ class Derived : public Base {
+ public:
+ int bar;
+ };
+
+ void func(Base&);
+
+ void func2(const Derived& d) {
+ func(d);
+ }
+
+ on the d parameter. The below could have been avoided, if the flags
+ were down in the tree, not sure why they are not. (mrs) */
+ /* The below code may have to be propagated to other parts of this
+ switch. */
+ if (TREE_READONLY (targ) && !TREE_READONLY (arg)
+ && (TREE_CODE (arg) == PARM_DECL || TREE_CODE (arg) == VAR_DECL)
+ && TREE_CODE (TREE_TYPE (arg)) == REFERENCE_TYPE
+ && (TYPE_READONLY (target_type) && checkconst))
+ {
+ arg = copy_node (arg);
+ TREE_READONLY (arg) = TREE_READONLY (targ);
+ }
+ literal_flag = TREE_CONSTANT (arg);
+
+ goto done;
+
+ /* Get this out of a register if we happened to be in one by accident.
+ Also, build up references to non-lvalues it we must. */
+ /* For &x[y], return (&) x+y */
+ case ARRAY_REF:
+ if (mark_addressable (TREE_OPERAND (targ, 0)) == 0)
+ return error_mark_node;
+ rval = build_binary_op (PLUS_EXPR, TREE_OPERAND (targ, 0),
+ TREE_OPERAND (targ, 1), 1);
+ TREE_TYPE (rval) = type;
+ if (TREE_CONSTANT (TREE_OPERAND (targ, 1))
+ && staticp (TREE_OPERAND (targ, 0)))
+ TREE_CONSTANT (rval) = 1;
+ goto done;
+
+ case SCOPE_REF:
+ /* Could be a reference to a static member. */
+ {
+ tree field = TREE_OPERAND (targ, 1);
+ if (TREE_STATIC (field))
+ {
+ rval = build1 (ADDR_EXPR, type, field);
+ literal_flag = 1;
+ goto done;
+ }
+ }
+
+ /* We should have farmed out member pointers above. */
+ my_friendly_abort (188);
+
+ case COMPONENT_REF:
+ rval = build_component_addr (targ, build_pointer_type (argtype),
+ "attempt to make a reference to bit-field structure member `%s'");
+ TREE_TYPE (rval) = type;
+ literal_flag = staticp (TREE_OPERAND (targ, 0));
+
+ goto done;
+
+ /* Anything not already handled and not a true memory reference
+ needs to have a reference built up. Do so silently for
+ things like integers and return values from function,
+ but complain if we need a reference to something declared
+ as `register'. */
+
+ case RESULT_DECL:
+ if (staticp (targ))
+ literal_flag = 1;
+ TREE_ADDRESSABLE (targ) = 1;
+ put_var_into_stack (targ);
+ break;
+
+ case PARM_DECL:
+ if (targ == current_class_decl)
+ {
+ error ("address of `this' not available");
+#if 0
+ /* This code makes the following core dump the compiler on a sun4,
+ if the code below is used.
+
+ class e_decl;
+ class a_decl;
+ typedef a_decl* a_ref;
+
+ class a_s {
+ public:
+ a_s();
+ void* append(a_ref& item);
+ };
+ class a_decl {
+ public:
+ a_decl (e_decl *parent);
+ a_s generic_s;
+ a_s decls;
+ e_decl* parent;
+ };
+
+ class e_decl {
+ public:
+ e_decl();
+ a_s implementations;
+ };
+
+ void foobar(void *);
+
+ a_decl::a_decl(e_decl *parent) {
+ parent->implementations.append(this);
+ }
+ */
+
+ TREE_ADDRESSABLE (targ) = 1; /* so compiler doesn't die later */
+ put_var_into_stack (targ);
+ break;
+#else
+ return error_mark_node;
+#endif
+ }
+ /* Fall through. */
+ case VAR_DECL:
+ case CONST_DECL:
+ if (DECL_REGISTER (targ) && !TREE_ADDRESSABLE (targ))
+ warning ("address needed to build reference for `%s', which is declared `register'",
+ IDENTIFIER_POINTER (DECL_NAME (targ)));
+ else if (staticp (targ))
+ literal_flag = 1;
+
+ TREE_ADDRESSABLE (targ) = 1;
+ put_var_into_stack (targ);
+ break;
+
+ case COMPOUND_EXPR:
+ {
+ tree real_reference = build_up_reference (type, TREE_OPERAND (targ, 1),
+ LOOKUP_PROTECT, checkconst);
+ rval = build (COMPOUND_EXPR, type, TREE_OPERAND (targ, 0), real_reference);
+ TREE_CONSTANT (rval) = staticp (TREE_OPERAND (targ, 1));
+ return rval;
+ }
+
+ case MODIFY_EXPR:
+ case INIT_EXPR:
+ {
+ tree real_reference = build_up_reference (type, TREE_OPERAND (targ, 0),
+ LOOKUP_PROTECT, checkconst);
+ rval = build (COMPOUND_EXPR, type, arg, real_reference);
+ TREE_CONSTANT (rval) = staticp (TREE_OPERAND (targ, 0));
+ return rval;
+ }
+
+ case COND_EXPR:
+ return build (COND_EXPR, type,
+ TREE_OPERAND (targ, 0),
+ build_up_reference (type, TREE_OPERAND (targ, 1),
+ LOOKUP_PROTECT, checkconst),
+ build_up_reference (type, TREE_OPERAND (targ, 2),
+ LOOKUP_PROTECT, checkconst));
+
+ case WITH_CLEANUP_EXPR:
+ return build (WITH_CLEANUP_EXPR, type,
+ build_up_reference (type, TREE_OPERAND (targ, 0),
+ LOOKUP_PROTECT, checkconst),
+ 0, TREE_OPERAND (targ, 2));
+
+ case BIND_EXPR:
+ arg = TREE_OPERAND (targ, 1);
+ if (arg == NULL_TREE)
+ {
+ compiler_error ("({ ... }) expression not expanded when needed for reference");
+ return error_mark_node;
+ }
+ rval = build1 (ADDR_EXPR, type, arg);
+ TREE_REFERENCE_EXPR (rval) = 1;
+ return rval;
+
+ default:
+ break;
+ }
+
+ if (TREE_ADDRESSABLE (targ) == 0)
+ {
+ tree temp;
+
+ if (TREE_CODE (targ) == CALL_EXPR && IS_AGGR_TYPE (argtype))
+ {
+ temp = build_cplus_new (argtype, targ, 1);
+ if (TREE_CODE (temp) == WITH_CLEANUP_EXPR)
+ rval = build (WITH_CLEANUP_EXPR, type,
+ build1 (ADDR_EXPR, type, TREE_OPERAND (temp, 0)),
+ 0, TREE_OPERAND (temp, 2));
+ else
+ rval = build1 (ADDR_EXPR, type, temp);
+ goto done;
+ }
+ else
+ {
+ temp = get_temp_name (argtype, 0);
+ if (global_bindings_p ())
+ {
+ /* Give this new temp some rtl and initialize it. */
+ DECL_INITIAL (temp) = targ;
+ TREE_STATIC (temp) = 1;
+ finish_decl (temp, targ, NULL_TREE, 0);
+ /* Do this after declaring it static. */
+ rval = build_unary_op (ADDR_EXPR, temp, 0);
+ TREE_TYPE (rval) = type;
+ literal_flag = TREE_CONSTANT (rval);
+ goto done;
+ }
+ else
+ {
+ rval = build_unary_op (ADDR_EXPR, temp, 0);
+ if (binfo && !BINFO_OFFSET_ZEROP (binfo))
+ rval = convert_pointer_to (target_type, rval);
+ else
+ TREE_TYPE (rval) = type;
+
+ temp = build (MODIFY_EXPR, argtype, temp, arg);
+ TREE_SIDE_EFFECTS (temp) = 1;
+ return build (COMPOUND_EXPR, type, temp, rval);
+ }
+ }
+ }
+ else
+ rval = build1 (ADDR_EXPR, type, arg);
+
+ done:
+ if (TYPE_USES_COMPLEX_INHERITANCE (argtype)
+ || TYPE_USES_COMPLEX_INHERITANCE (target_type))
+ {
+ TREE_TYPE (rval) = build_pointer_type (argtype);
+ if (flags & LOOKUP_PROTECT)
+ rval = convert_pointer_to (target_type, rval);
+ else
+ rval
+ = convert_to_pointer_force (build_pointer_type (target_type), rval);
+ TREE_TYPE (rval) = type;
+ }
+ TREE_CONSTANT (rval) = literal_flag;
+ return rval;
+}
+
+/* For C++: Only need to do one-level references, but cannot
+ get tripped up on signed/unsigned differences.
+
+ DECL is either NULL_TREE or the _DECL node for a reference that is being
+ initialized. It can be error_mark_node if we don't know the _DECL but
+ we know it's an initialization. */
+
+tree cp_convert PROTO((tree, tree, int, int));
+
+tree
+convert_to_reference (reftype, expr, convtype, flags, decl)
+ tree reftype, expr;
+ int convtype, flags;
+ tree decl;
+{
+ register tree type = TYPE_MAIN_VARIANT (TREE_TYPE (reftype));
+ register tree intype = TREE_TYPE (expr);
+ register enum tree_code form = TREE_CODE (intype);
+ tree rval = NULL_TREE;
+
+ if (form == REFERENCE_TYPE)
+ intype = TREE_TYPE (intype);
+ intype = TYPE_MAIN_VARIANT (intype);
+
+ if (((convtype & CONV_STATIC) && comptypes (type, intype, -1))
+ || ((convtype & CONV_IMPLICIT) && comptypes (type, intype, 0)))
+ {
+ if (flags & LOOKUP_COMPLAIN)
+ {
+ tree ttl = TREE_TYPE (reftype);
+ tree ttr;
+
+ if (form == REFERENCE_TYPE)
+ ttr = TREE_TYPE (TREE_TYPE (expr));
+ else
+ {
+ int r = TREE_READONLY (expr);
+ int v = TREE_THIS_VOLATILE (expr);
+ ttr = c_build_type_variant (TREE_TYPE (expr), r, v);
+ }
+
+ if (! lvalue_p (expr) &&
+ (decl == NULL_TREE || ! TYPE_READONLY (ttl)))
+ {
+ if (decl)
+ /* Ensure semantics of [dcl.init.ref] */
+ cp_pedwarn ("initialization of non-const `%T' from rvalue `%T'",
+ reftype, intype);
+ else
+ cp_pedwarn ("conversion to `%T' from rvalue `%T'",
+ reftype, intype);
+ }
+ else if (! (convtype & CONV_CONST))
+ {
+ if (! TYPE_READONLY (ttl) && TYPE_READONLY (ttr))
+ cp_pedwarn ("conversion from `%T' to `%T' discards const",
+ ttr, reftype);
+ else if (! TYPE_VOLATILE (ttl) && TYPE_VOLATILE (ttr))
+ cp_pedwarn ("conversion from `%T' to `%T' discards volatile",
+ ttr, reftype);
+ }
+ }
+
+ if (form == REFERENCE_TYPE)
+ {
+ rval = copy_node (expr);
+ TREE_TYPE (rval) = build_pointer_type (TREE_TYPE (TREE_TYPE (expr)));
+ rval = cp_convert (build_pointer_type (TREE_TYPE (reftype)), rval,
+ convtype, flags);
+ TREE_TYPE (rval) = reftype;
+ return rval;
+ }
+
+ return build_up_reference (reftype, expr, flags,
+ ! (convtype & CONV_CONST));
+ }
+
+ if ((convtype & CONV_IMPLICIT)
+ && IS_AGGR_TYPE (intype)
+ && ! (flags & LOOKUP_NO_CONVERSION)
+ && (rval = build_type_conversion (CONVERT_EXPR, reftype, expr, 1)))
+ {
+ if (rval == error_mark_node)
+ cp_error ("conversion from `%T' to `%T' is ambiguous",
+ intype, reftype);
+ return rval;
+ }
+ else if ((convtype & CONV_REINTERPRET) && lvalue_p (expr))
+ {
+ /* When casting an lvalue to a reference type, just convert into
+ a pointer to the new type and deference it. This is allowed
+ by San Diego WP section 5.2.9 paragraph 12, though perhaps it
+ should be done directly (jason). (int &)ri ---> *(int*)&ri */
+
+ /* B* bp; A& ar = (A&)bp; is legal, but it's probably not what they
+ meant. */
+ if (form == POINTER_TYPE
+ && (comptypes (TREE_TYPE (intype), type, -1)))
+ cp_warning ("casting `%T' to `%T' does not dereference pointer",
+ intype, reftype);
+
+ rval = build_unary_op (ADDR_EXPR, expr, 0);
+ if (rval != error_mark_node)
+ rval = convert_force (build_pointer_type (TREE_TYPE (reftype)), rval);
+ if (rval != error_mark_node)
+ rval = build1 (NOP_EXPR, reftype, rval);
+ }
+ else if (decl)
+ {
+ tree rval_as_conversion = NULL_TREE;
+ tree rval_as_ctor = NULL_TREE;
+
+ if (IS_AGGR_TYPE (intype)
+ && (rval = build_type_conversion (CONVERT_EXPR, type, expr, 1)))
+ {
+ if (rval == error_mark_node)
+ return rval;
+
+ rval_as_conversion = build_up_reference (reftype, rval, flags, 1);
+ }
+
+ /* Definitely need to go through a constructor here. */
+ if (TYPE_HAS_CONSTRUCTOR (type)
+ && ! CLASSTYPE_ABSTRACT_VIRTUALS (type)
+ && (rval = build_method_call
+ (NULL_TREE, constructor_name_full (type),
+ build_tree_list (NULL_TREE, expr), TYPE_BINFO (type),
+ LOOKUP_NO_CONVERSION|LOOKUP_SPECULATIVELY)))
+ {
+ tree init;
+
+ if (global_bindings_p ())
+ {
+ extern tree static_aggregates;
+ tree t = get_temp_name (type, global_bindings_p ());
+ init = build_method_call (t, constructor_name_full (type),
+ build_tree_list (NULL_TREE, expr),
+ TYPE_BINFO (type),
+ LOOKUP_NORMAL|LOOKUP_NO_CONVERSION);
+
+ if (init == error_mark_node)
+ return error_mark_node;
+
+ make_decl_rtl (t, NULL_PTR, 1);
+ static_aggregates = perm_tree_cons (expr, t, static_aggregates);
+ rval = build_unary_op (ADDR_EXPR, t, 0);
+ }
+ else
+ {
+ init = build_method_call (NULL_TREE, constructor_name_full (type),
+ build_tree_list (NULL_TREE, expr),
+ TYPE_BINFO (type),
+ LOOKUP_NORMAL|LOOKUP_NO_CONVERSION);
+
+ if (init == error_mark_node)
+ return error_mark_node;
+
+ rval = build_cplus_new (type, init, 1);
+ rval = build_up_reference (reftype, rval, flags, 1);
+ }
+ rval_as_ctor = rval;
+ }
+
+ if (rval_as_ctor && rval_as_conversion)
+ {
+ cp_error ("ambiguous conversion from `%T' to `%T'; both user-defined conversion and constructor apply",
+ intype, reftype);
+ return error_mark_node;
+ }
+ else if (rval_as_ctor)
+ rval = rval_as_ctor;
+ else if (rval_as_conversion)
+ rval = rval_as_conversion;
+ else if (! IS_AGGR_TYPE (type) && ! IS_AGGR_TYPE (intype))
+ {
+ rval = convert (type, expr);
+ if (rval == error_mark_node)
+ return error_mark_node;
+
+ rval = build_up_reference (reftype, rval, flags, 1);
+ }
+
+ if (rval && ! TYPE_READONLY (TREE_TYPE (reftype)))
+ cp_pedwarn ("initializing non-const `%T' with `%T' will use a temporary",
+ reftype, intype);
+ }
+
+ if (rval)
+ {
+ /* If we found a way to convert earlier, then use it. */
+ return rval;
+ }
+
+ my_friendly_assert (form != OFFSET_TYPE, 189);
+
+ if (flags & LOOKUP_SPECULATIVELY)
+ return NULL_TREE;
+
+ else if (flags & LOOKUP_COMPLAIN)
+ cp_error ("cannot convert type `%T' to type `%T'", intype, reftype);
+
+ return error_mark_node;
+}
+
+/* We are using a reference VAL for its value. Bash that reference all the
+ way down to its lowest form. */
+tree
+convert_from_reference (val)
+ tree val;
+{
+ tree type = TREE_TYPE (val);
+
+ if (TREE_CODE (type) == OFFSET_TYPE)
+ type = TREE_TYPE (type);
+ if (TREE_CODE (type) == REFERENCE_TYPE)
+ {
+ tree target_type = TREE_TYPE (type);
+ tree nval;
+
+ /* This can happen if we cast to a reference type. */
+ if (TREE_CODE (val) == ADDR_EXPR)
+ {
+ nval = build1 (NOP_EXPR, build_pointer_type (target_type), val);
+ nval = build_indirect_ref (nval, NULL_PTR);
+ /* The below was missing, are other important flags missing too? */
+ TREE_SIDE_EFFECTS (nval) = TREE_SIDE_EFFECTS (val);
+ return nval;
+ }
+
+ nval = build1 (INDIRECT_REF, target_type, val);
+
+ TREE_THIS_VOLATILE (nval) = TYPE_VOLATILE (target_type);
+ TREE_SIDE_EFFECTS (nval) = TYPE_VOLATILE (target_type);
+ TREE_READONLY (nval) = TYPE_READONLY (target_type);
+ /* The below was missing, are other important flags missing too? */
+ TREE_SIDE_EFFECTS (nval) |= TREE_SIDE_EFFECTS (val);
+ return nval;
+ }
+ return val;
+}
+
+/* See if there is a constructor of type TYPE which will convert
+ EXPR. The reference manual seems to suggest (8.5.6) that we need
+ not worry about finding constructors for base classes, then converting
+ to the derived class.
+
+ MSGP is a pointer to a message that would be an appropriate error
+ string. If MSGP is NULL, then we are not interested in reporting
+ errors. */
+tree
+convert_to_aggr (type, expr, msgp, protect)
+ tree type, expr;
+ char **msgp;
+ int protect;
+{
+ tree basetype = type;
+ tree name = TYPE_IDENTIFIER (basetype);
+ tree function, fndecl, fntype, parmtypes, parmlist, result;
+ tree method_name;
+ enum access_type access;
+ int can_be_private, can_be_protected;
+
+ if (! TYPE_HAS_CONSTRUCTOR (basetype))
+ {
+ if (msgp)
+ *msgp = "type `%s' does not have a constructor";
+ return error_mark_node;
+ }
+
+ access = access_public;
+ can_be_private = 0;
+ can_be_protected = IDENTIFIER_CLASS_VALUE (name) || name == current_class_name;
+
+ parmlist = build_tree_list (NULL_TREE, expr);
+ parmtypes = tree_cons (NULL_TREE, TREE_TYPE (expr), void_list_node);
+
+ if (TYPE_USES_VIRTUAL_BASECLASSES (basetype))
+ {
+ parmtypes = tree_cons (NULL_TREE, integer_type_node, parmtypes);
+ parmlist = tree_cons (NULL_TREE, integer_one_node, parmlist);
+ }
+
+ /* The type of the first argument will be filled in inside the loop. */
+ parmlist = tree_cons (NULL_TREE, integer_zero_node, parmlist);
+ parmtypes = tree_cons (NULL_TREE, TYPE_POINTER_TO (basetype), parmtypes);
+
+ method_name = build_decl_overload (name, parmtypes, 1);
+
+ /* constructors are up front. */
+ fndecl = TREE_VEC_ELT (CLASSTYPE_METHOD_VEC (basetype), 0);
+ if (TYPE_HAS_DESTRUCTOR (basetype))
+ fndecl = DECL_CHAIN (fndecl);
+
+ while (fndecl)
+ {
+ if (DECL_ASSEMBLER_NAME (fndecl) == method_name)
+ {
+ function = fndecl;
+ if (protect)
+ {
+ if (TREE_PRIVATE (fndecl))
+ {
+ can_be_private =
+ (basetype == current_class_type
+ || is_friend (basetype, current_function_decl)
+ || purpose_member (basetype, DECL_ACCESS (fndecl)));
+ if (! can_be_private)
+ goto found;
+ }
+ else if (TREE_PROTECTED (fndecl))
+ {
+ if (! can_be_protected)
+ goto found;
+ }
+ }
+ goto found_and_ok;
+ }
+ fndecl = DECL_CHAIN (fndecl);
+ }
+
+ /* No exact conversion was found. See if an approximate
+ one will do. */
+ fndecl = TREE_VEC_ELT (CLASSTYPE_METHOD_VEC (basetype), 0);
+ if (TYPE_HAS_DESTRUCTOR (basetype))
+ fndecl = DECL_CHAIN (fndecl);
+
+ {
+ int saw_private = 0;
+ int saw_protected = 0;
+ struct candidate *candidates =
+ (struct candidate *) alloca ((decl_list_length (fndecl)+1) * sizeof (struct candidate));
+ struct candidate *cp = candidates;
+
+ while (fndecl)
+ {
+ function = fndecl;
+ cp->h_len = 2;
+ cp->harshness = (struct harshness_code *)
+ alloca (3 * sizeof (struct harshness_code));
+
+ compute_conversion_costs (fndecl, parmlist, cp, 2);
+ if ((cp->h.code & EVIL_CODE) == 0)
+ {
+ cp->u.field = fndecl;
+ if (protect)
+ {
+ if (TREE_PRIVATE (fndecl))
+ access = access_private;
+ else if (TREE_PROTECTED (fndecl))
+ access = access_protected;
+ else
+ access = access_public;
+ }
+ else
+ access = access_public;
+
+ if (access == access_private
+ ? (basetype == current_class_type
+ || is_friend (basetype, cp->function)
+ || purpose_member (basetype, DECL_ACCESS (fndecl)))
+ : access == access_protected
+ ? (can_be_protected
+ || purpose_member (basetype, DECL_ACCESS (fndecl)))
+ : 1)
+ {
+ if (cp->h.code <= TRIVIAL_CODE)
+ goto found_and_ok;
+ cp++;
+ }
+ else
+ {
+ if (access == access_private)
+ saw_private = 1;
+ else
+ saw_protected = 1;
+ }
+ }
+ fndecl = DECL_CHAIN (fndecl);
+ }
+ if (cp - candidates)
+ {
+ /* Rank from worst to best. Then cp will point to best one.
+ Private fields have their bits flipped. For unsigned
+ numbers, this should make them look very large.
+ If the best alternate has a (signed) negative value,
+ then all we ever saw were private members. */
+ if (cp - candidates > 1)
+ qsort (candidates, /* char *base */
+ cp - candidates, /* int nel */
+ sizeof (struct candidate), /* int width */
+ rank_for_overload); /* int (*compar)() */
+
+ --cp;
+ if (cp->h.code & EVIL_CODE)
+ {
+ if (msgp)
+ *msgp = "ambiguous type conversion possible for `%s'";
+ return error_mark_node;
+ }
+
+ function = cp->function;
+ fndecl = cp->u.field;
+ goto found_and_ok;
+ }
+ else if (msgp)
+ {
+ if (saw_private)
+ if (saw_protected)
+ *msgp = "only private and protected conversions apply";
+ else
+ *msgp = "only private conversions apply";
+ else if (saw_protected)
+ *msgp = "only protected conversions apply";
+ else
+ *msgp = "no appropriate conversion to type `%s'";
+ }
+ return error_mark_node;
+ }
+ /* NOTREACHED */
+
+ found:
+ if (access == access_private)
+ if (! can_be_private)
+ {
+ if (msgp)
+ *msgp = TREE_PRIVATE (fndecl)
+ ? "conversion to type `%s' is private"
+ : "conversion to type `%s' is from private base class";
+ return error_mark_node;
+ }
+ if (access == access_protected)
+ if (! can_be_protected)
+ {
+ if (msgp)
+ *msgp = TREE_PRIVATE (fndecl)
+ ? "conversion to type `%s' is protected"
+ : "conversion to type `%s' is from protected base class";
+ return error_mark_node;
+ }
+ function = fndecl;
+ found_and_ok:
+
+ /* It will convert, but we don't do anything about it yet. */
+ if (msgp == 0)
+ return NULL_TREE;
+
+ fntype = TREE_TYPE (function);
+ if (DECL_INLINE (function) && TREE_CODE (function) == FUNCTION_DECL)
+ function = build1 (ADDR_EXPR, build_pointer_type (fntype), function);
+ else
+ function = default_conversion (function);
+
+ result = build_nt (CALL_EXPR, function,
+ convert_arguments (NULL_TREE, TYPE_ARG_TYPES (fntype),
+ parmlist, NULL_TREE, LOOKUP_NORMAL),
+ NULL_TREE);
+ TREE_TYPE (result) = TREE_TYPE (fntype);
+ TREE_SIDE_EFFECTS (result) = 1;
+ TREE_RAISES (result) = !! TYPE_RAISES_EXCEPTIONS (fntype);
+ return result;
+}
+
+/* Call this when we know (for any reason) that expr is not, in fact,
+ zero. This routine is like convert_pointer_to, but it pays
+ attention to which specific instance of what type we want to
+ convert to. This routine should eventually become
+ convert_to_pointer after all references to convert_to_pointer
+ are removed. */
+tree
+convert_pointer_to_real (binfo, expr)
+ tree binfo, expr;
+{
+ register tree intype = TREE_TYPE (expr);
+ tree ptr_type;
+ tree type, rval;
+
+ if (TREE_CODE (binfo) == TREE_VEC)
+ type = BINFO_TYPE (binfo);
+ else if (IS_AGGR_TYPE (binfo))
+ {
+ type = binfo;
+ }
+ else
+ {
+ type = binfo;
+ binfo = NULL_TREE;
+ }
+
+ ptr_type = build_pointer_type (type);
+ if (ptr_type == TYPE_MAIN_VARIANT (intype))
+ return expr;
+
+ if (intype == error_mark_node)
+ return error_mark_node;
+
+ my_friendly_assert (!integer_zerop (expr), 191);
+
+ if (TREE_CODE (type) == RECORD_TYPE
+ && TREE_CODE (TREE_TYPE (intype)) == RECORD_TYPE
+ && type != TYPE_MAIN_VARIANT (TREE_TYPE (intype)))
+ {
+ tree path;
+ int distance
+ = get_base_distance (binfo, TYPE_MAIN_VARIANT (TREE_TYPE (intype)),
+ 0, &path);
+
+ /* This function shouldn't be called with unqualified arguments
+ but if it is, give them an error message that they can read. */
+ if (distance < 0)
+ {
+ cp_error ("cannot convert a pointer of type `%T' to a pointer of type `%T'",
+ TREE_TYPE (intype), type);
+
+ if (distance == -2)
+ cp_error ("because `%T' is an ambiguous base class", type);
+ return error_mark_node;
+ }
+
+ return build_vbase_path (PLUS_EXPR, ptr_type, expr, path, 1);
+ }
+ rval = build1 (NOP_EXPR, ptr_type,
+ TREE_CODE (expr) == NOP_EXPR ? TREE_OPERAND (expr, 0) : expr);
+ TREE_CONSTANT (rval) = TREE_CONSTANT (expr);
+ return rval;
+}
+
+/* Call this when we know (for any reason) that expr is
+ not, in fact, zero. This routine gets a type out of the first
+ argument and uses it to search for the type to convert to. If there
+ is more than one instance of that type in the expr, the conversion is
+ ambiguous. This routine should eventually go away, and all
+ callers should use convert_to_pointer_real. */
+tree
+convert_pointer_to (binfo, expr)
+ tree binfo, expr;
+{
+ tree type;
+
+ if (TREE_CODE (binfo) == TREE_VEC)
+ type = BINFO_TYPE (binfo);
+ else if (IS_AGGR_TYPE (binfo))
+ type = binfo;
+ else
+ type = binfo;
+ return convert_pointer_to_real (type, expr);
+}
+
+/* Same as above, but don't abort if we get an "ambiguous" baseclass.
+ There's only one virtual baseclass we are looking for, and once
+ we find one such virtual baseclass, we have found them all. */
+
+tree
+convert_pointer_to_vbase (binfo, expr)
+ tree binfo;
+ tree expr;
+{
+ tree intype = TREE_TYPE (TREE_TYPE (expr));
+ tree binfos = TYPE_BINFO_BASETYPES (intype);
+ int i;
+
+ for (i = TREE_VEC_LENGTH (binfos)-1; i >= 0; i--)
+ {
+ tree basetype = BINFO_TYPE (TREE_VEC_ELT (binfos, i));
+ if (BINFO_TYPE (binfo) == basetype)
+ return convert_pointer_to (binfo, expr);
+ if (binfo_member (BINFO_TYPE (binfo), CLASSTYPE_VBASECLASSES (basetype)))
+ return convert_pointer_to_vbase (binfo, convert_pointer_to (basetype, expr));
+ }
+ my_friendly_abort (6);
+ /* NOTREACHED */
+ return NULL_TREE;
+}
+
+tree
+cp_convert (type, expr, convtype, flags)
+ tree type, expr;
+ int convtype, flags;
+{
+ register tree e = expr;
+ register enum tree_code code = TREE_CODE (type);
+
+ if (type == TREE_TYPE (e)
+ || TREE_CODE (e) == ERROR_MARK)
+ return e;
+ if (TREE_CODE (TREE_TYPE (e)) == ERROR_MARK)
+ return error_mark_node;
+
+ /* Trivial conversion: cv-qualifiers do not matter on rvalues. */
+ if (TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (TREE_TYPE (e)))
+ return fold (build1 (NOP_EXPR, type, e));
+
+ if (code == VOID_TYPE && (convtype & CONV_STATIC))
+ return build1 (CONVERT_EXPR, type, e);
+
+#if 0
+ /* This is incorrect. A truncation can't be stripped this way.
+ Extensions will be stripped by the use of get_unwidened. */
+ if (TREE_CODE (e) == NOP_EXPR)
+ return convert (type, TREE_OPERAND (e, 0));
+#endif
+
+ /* Just convert to the type of the member. */
+ if (code == OFFSET_TYPE)
+ {
+ type = TREE_TYPE (type);
+ code = TREE_CODE (type);
+ }
+
+ if (code == REFERENCE_TYPE)
+ return fold (convert_to_reference (type, e, convtype, flags, NULL_TREE));
+ else if (TREE_CODE (TREE_TYPE (e)) == REFERENCE_TYPE)
+ e = convert_from_reference (e);
+
+ if (TREE_READONLY_DECL_P (e))
+ e = decl_constant_value (e);
+
+ if (INTEGRAL_CODE_P (code))
+ {
+ tree intype = TREE_TYPE (e);
+ enum tree_code form = TREE_CODE (intype);
+ /* enum = enum, enum = int, enum = float are all errors. */
+ if (flag_int_enum_equivalence == 0
+ && TREE_CODE (type) == ENUMERAL_TYPE
+ && ARITHMETIC_TYPE_P (intype))
+ {
+ cp_pedwarn ("conversion from `%#T' to `%#T'", intype, type);
+
+ if (flag_pedantic_errors)
+ return error_mark_node;
+ }
+ if (form == OFFSET_TYPE)
+ cp_error_at ("pointer-to-member expression object not composed with type `%D' object",
+ TYPE_NAME (TYPE_OFFSET_BASETYPE (intype)));
+ else if (IS_AGGR_TYPE (intype))
+ {
+ tree rval;
+ rval = build_type_conversion (CONVERT_EXPR, type, e, 1);
+ if (rval) return rval;
+ if (code == BOOLEAN_TYPE)
+ cp_error ("`%#T' used where a `bool' was expected", intype);
+ else
+ cp_error ("`%#T' used where an `int' was expected", intype);
+ return error_mark_node;
+ }
+ if (code == BOOLEAN_TYPE)
+ {
+ tree newe = truthvalue_conversion (e);
+ /* Avoid stupid (infinite) recursion from backend. */
+ if (TREE_CODE (newe) != NOP_EXPR || e != TREE_OPERAND (newe, 0))
+ e = newe;
+ if (TREE_TYPE (e) == bool_type_node)
+ return e;
+ else if (TREE_CODE (e) == INTEGER_CST)
+ {
+ if (e == integer_zero_node)
+ e = false_node;
+ else
+ e = true_node;
+ }
+ else
+ return build1 (NOP_EXPR, bool_type_node, e);
+ }
+ return fold (convert_to_integer (type, e));
+ }
+ if (code == POINTER_TYPE)
+ return fold (cp_convert_to_pointer (type, e));
+ if (code == REAL_TYPE)
+ {
+ if (IS_AGGR_TYPE (TREE_TYPE (e)))
+ {
+ tree rval;
+ rval = build_type_conversion (CONVERT_EXPR, type, e, 1);
+ if (rval)
+ return rval;
+ else
+ cp_error ("`%#T' used where a floating point value was expected",
+ TREE_TYPE (e));
+ }
+ return fold (convert_to_real (type, e));
+ }
+
+ /* New C++ semantics: since assignment is now based on
+ memberwise copying, if the rhs type is derived from the
+ lhs type, then we may still do a conversion. */
+ if (IS_AGGR_TYPE_CODE (code))
+ {
+ tree dtype = TREE_TYPE (e);
+
+ if (TREE_CODE (dtype) == REFERENCE_TYPE)
+ {
+ e = convert_from_reference (e);
+ dtype = TREE_TYPE (e);
+ }
+ dtype = TYPE_MAIN_VARIANT (dtype);
+
+ /* Conversion of object pointers or signature pointers/references
+ to signature pointers/references. */
+
+ if (TYPE_LANG_SPECIFIC (type)
+ && (IS_SIGNATURE_POINTER (type) || IS_SIGNATURE_REFERENCE (type)))
+ {
+ tree constructor = build_signature_pointer_constructor (type, expr);
+ tree sig_ty = SIGNATURE_TYPE (type);
+ tree sig_ptr;
+
+ if (constructor == error_mark_node)
+ return error_mark_node;
+
+ sig_ptr = get_temp_name (type, 1);
+ DECL_INITIAL (sig_ptr) = constructor;
+ CLEAR_SIGNATURE (sig_ty);
+ finish_decl (sig_ptr, constructor, 0, 0);
+ SET_SIGNATURE (sig_ty);
+ TREE_READONLY (sig_ptr) = 1;
+
+ return sig_ptr;
+ }
+
+ /* Conversion between aggregate types. New C++ semantics allow
+ objects of derived type to be cast to objects of base type.
+ Old semantics only allowed this between pointers.
+
+ There may be some ambiguity between using a constructor
+ vs. using a type conversion operator when both apply. */
+
+ else if (IS_AGGR_TYPE (dtype))
+ {
+ tree binfo;
+
+ tree conversion;
+
+ if (! DERIVED_FROM_P (type, dtype) && TYPE_HAS_CONVERSION (dtype))
+ conversion = build_type_conversion (CONVERT_EXPR, type, e, 1);
+ else
+ conversion = NULL_TREE;
+
+ if (TYPE_HAS_CONSTRUCTOR (type))
+ {
+ tree rval = build_method_call (NULL_TREE, constructor_name_full (type),
+ build_tree_list (NULL_TREE, e),
+ TYPE_BINFO (type),
+ conversion ? LOOKUP_NO_CONVERSION : 0);
+
+ if (rval != error_mark_node)
+ {
+ if (conversion)
+ {
+ error ("both constructor and type conversion operator apply");
+ return error_mark_node;
+ }
+ /* call to constructor successful. */
+ rval = build_cplus_new (type, rval, 1);
+ return rval;
+ }
+ }
+ /* Type conversion successful/applies. */
+ if (conversion)
+ {
+ if (conversion == error_mark_node)
+ error ("ambiguous pointer conversion");
+ return conversion;
+ }
+
+ /* now try normal C++ assignment semantics. */
+ binfo = TYPE_BINFO (dtype);
+ if (BINFO_TYPE (binfo) == type
+ || (binfo = get_binfo (type, dtype, 1)))
+ {
+ if (binfo == error_mark_node)
+ return error_mark_node;
+ }
+ if (binfo != NULL_TREE)
+ {
+ if (lvalue_p (e))
+ {
+ e = build_unary_op (ADDR_EXPR, e, 0);
+
+ if (! BINFO_OFFSET_ZEROP (binfo))
+ e = build (PLUS_EXPR, TYPE_POINTER_TO (type),
+ e, BINFO_OFFSET (binfo));
+ return build1 (INDIRECT_REF, type, e);
+ }
+
+ sorry ("addressable aggregates");
+ return error_mark_node;
+ }
+ error ("conversion between incompatible aggregate types requested");
+ return error_mark_node;
+ }
+ /* conversion from non-aggregate to aggregate type requires
+ constructor. */
+ else if (TYPE_HAS_CONSTRUCTOR (type))
+ {
+ tree rval;
+ tree init = build_method_call (NULL_TREE, constructor_name_full (type),
+ build_tree_list (NULL_TREE, e),
+ TYPE_BINFO (type), LOOKUP_NORMAL);
+ if (init == error_mark_node)
+ {
+ cp_error ("in conversion to type `%T'", type);
+ return error_mark_node;
+ }
+ /* We can't pass 1 to the with_cleanup_p arg here, because that
+ screws up passing classes by value. */
+ rval = build_cplus_new (type, init, 0);
+ return rval;
+ }
+ }
+
+ /* If TYPE or TREE_TYPE (E) is not on the permanent_obstack,
+ then the it won't be hashed and hence compare as not equal,
+ even when it is. */
+ if (code == ARRAY_TYPE
+ && TREE_TYPE (TREE_TYPE (e)) == TREE_TYPE (type)
+ && index_type_equal (TYPE_DOMAIN (TREE_TYPE (e)), TYPE_DOMAIN (type)))
+ return e;
+
+ cp_error ("conversion from `%T' to non-scalar type `%T' requested",
+ TREE_TYPE (expr), type);
+ return error_mark_node;
+}
+
+/* Create an expression whose value is that of EXPR,
+ converted to type TYPE. The TREE_TYPE of the value
+ is always TYPE. This function implements all reasonable
+ conversions; callers should filter out those that are
+ not permitted by the language being compiled. */
+
+tree
+convert (type, expr)
+ tree type, expr;
+{
+ return cp_convert (type, expr, CONV_OLD_CONVERT, 0);
+}
+
+/* Like convert, except permit conversions to take place which
+ are not normally allowed due to access restrictions
+ (such as conversion from sub-type to private super-type). */
+tree
+convert_force (type, expr)
+ tree type;
+ tree expr;
+{
+ register tree e = expr;
+ register enum tree_code code = TREE_CODE (type);
+
+ if (code == REFERENCE_TYPE)
+ return fold (convert_to_reference (type, e, CONV_C_CAST, LOOKUP_COMPLAIN,
+ NULL_TREE));
+ else if (TREE_CODE (TREE_TYPE (e)) == REFERENCE_TYPE)
+ e = convert_from_reference (e);
+
+ if (code == POINTER_TYPE)
+ return fold (convert_to_pointer_force (type, e));
+
+ /* From typeck.c convert_for_assignment */
+ if (((TREE_CODE (TREE_TYPE (e)) == POINTER_TYPE && TREE_CODE (e) == ADDR_EXPR
+ && TREE_CODE (TREE_TYPE (e)) == POINTER_TYPE
+ && TREE_CODE (TREE_TYPE (TREE_TYPE (e))) == METHOD_TYPE)
+ || integer_zerop (e)
+ || TYPE_PTRMEMFUNC_P (TREE_TYPE (e)))
+ && TYPE_PTRMEMFUNC_P (type))
+ {
+ /* compatible pointer to member functions. */
+ return build_ptrmemfunc (TYPE_PTRMEMFUNC_FN_TYPE (type), e, 1);
+ }
+ {
+ int old_equiv = flag_int_enum_equivalence;
+ flag_int_enum_equivalence = 1;
+ e = convert (type, e);
+ flag_int_enum_equivalence = old_equiv;
+ }
+ return e;
+}
+
+/* Subroutine of build_type_conversion. */
+static tree
+build_type_conversion_1 (xtype, basetype, expr, typename, for_sure)
+ tree xtype, basetype;
+ tree expr;
+ tree typename;
+ int for_sure;
+{
+ tree rval;
+ int flags;
+
+ if (for_sure == 0)
+ flags = LOOKUP_PROTECT;
+ else
+ flags = LOOKUP_NORMAL;
+
+ rval = build_method_call (expr, typename, NULL_TREE, NULL_TREE, flags);
+ if (rval == error_mark_node)
+ {
+ if (for_sure == 0)
+ return NULL_TREE;
+ return error_mark_node;
+ }
+ if (TREE_CODE (TREE_TYPE (rval)) == REFERENCE_TYPE
+ && TREE_CODE (xtype) != REFERENCE_TYPE)
+ rval = default_conversion (rval);
+
+ if (warn_cast_qual
+ && TREE_TYPE (xtype)
+ && (TREE_READONLY (TREE_TYPE (TREE_TYPE (rval)))
+ > TREE_READONLY (TREE_TYPE (xtype))))
+ warning ("user-defined conversion casting away `const'");
+ return convert (xtype, rval);
+}
+
+/* Convert an aggregate EXPR to type XTYPE. If a conversion
+ exists, return the attempted conversion. This may
+ return ERROR_MARK_NODE if the conversion is not
+ allowed (references private members, etc).
+ If no conversion exists, NULL_TREE is returned.
+
+ If (FOR_SURE & 1) is non-zero, then we allow this type conversion
+ to take place immediately. Otherwise, we build a SAVE_EXPR
+ which can be evaluated if the results are ever needed.
+
+ If FOR_SURE >= 2, then we only look for exact conversions.
+
+ TYPE may be a reference type, in which case we first look
+ for something that will convert to a reference type. If
+ that fails, we will try to look for something of the
+ reference's target type, and then return a reference to that. */
+tree
+build_type_conversion (code, xtype, expr, for_sure)
+ enum tree_code code;
+ tree xtype, expr;
+ int for_sure;
+{
+ /* C++: check to see if we can convert this aggregate type
+ into the required scalar type. */
+ tree type, type_default;
+ tree typename = build_typename_overload (xtype), *typenames;
+ int n_variants = 0;
+ tree basetype, save_basetype;
+ tree rval;
+ int exact_conversion = for_sure >= 2;
+ for_sure &= 1;
+
+ if (expr == error_mark_node)
+ return error_mark_node;
+
+ basetype = TREE_TYPE (expr);
+ if (TREE_CODE (basetype) == REFERENCE_TYPE)
+ basetype = TREE_TYPE (basetype);
+
+ if (TYPE_PTRMEMFUNC_P (basetype) && TREE_CODE (xtype) == BOOLEAN_TYPE)
+ {
+ /* We convert a pointer to member function into a boolean,
+ by just checking the index value, for == 0, we want false, for
+ != 0, we want true. */
+ return convert (xtype, build_component_ref (expr, index_identifier, 0, 0));
+ }
+
+ basetype = TYPE_MAIN_VARIANT (basetype);
+ if (! TYPE_LANG_SPECIFIC (basetype) || ! TYPE_HAS_CONVERSION (basetype))
+ return NULL_TREE;
+
+ if (TREE_CODE (xtype) == POINTER_TYPE
+ || TREE_CODE (xtype) == REFERENCE_TYPE)
+ {
+ /* Prepare to match a variant of this type. */
+ type = TYPE_MAIN_VARIANT (TREE_TYPE (xtype));
+ for (n_variants = 0; type; type = TYPE_NEXT_VARIANT (type))
+ n_variants++;
+ typenames = (tree *)alloca (n_variants * sizeof (tree));
+ for (n_variants = 0, type = TYPE_MAIN_VARIANT (TREE_TYPE (xtype));
+ type; n_variants++, type = TYPE_NEXT_VARIANT (type))
+ {
+ if (type == TREE_TYPE (xtype))
+ typenames[n_variants] = typename;
+ else if (TREE_CODE (xtype) == POINTER_TYPE)
+ typenames[n_variants] = build_typename_overload (build_pointer_type (type));
+ else
+ typenames[n_variants] = build_typename_overload (build_reference_type (type));
+ }
+ }
+
+ save_basetype = basetype;
+ type = xtype;
+
+ while (TYPE_HAS_CONVERSION (basetype))
+ {
+ int i;
+ if (lookup_fnfields (TYPE_BINFO (basetype), typename, 0))
+ return build_type_conversion_1 (xtype, basetype, expr, typename, for_sure);
+ for (i = 0; i < n_variants; i++)
+ if (typenames[i] != typename
+ && lookup_fnfields (TYPE_BINFO (basetype), typenames[i], 0))
+ return build_type_conversion_1 (xtype, basetype, expr, typenames[i], for_sure);
+
+ if (TYPE_BINFO_BASETYPES (basetype))
+ basetype = TYPE_BINFO_BASETYPE (basetype, 0);
+ else
+ break;
+ }
+
+ if (TREE_CODE (type) == REFERENCE_TYPE)
+ {
+#if 0
+ /* Only reference variable initializations can use a temporary; this
+ must be handled elsewhere (like convert_to_reference and
+ compute_conversion_costs). */
+
+ type = TYPE_MAIN_VARIANT (TREE_TYPE (type));
+ typename = build_typename_overload (type);
+ basetype = save_basetype;
+
+ /* May need to build a temporary for this. */
+ while (TYPE_HAS_CONVERSION (basetype))
+ {
+ if (lookup_fnfields (TYPE_BINFO (basetype), typename, 0))
+ {
+ int flags;
+
+ if (for_sure == 0)
+ flags = LOOKUP_PROTECT;
+ else
+ flags = LOOKUP_NORMAL;
+ rval = build_method_call (expr,
+ constructor_name_full (typename),
+ NULL_TREE, NULL_TREE, flags);
+ if (rval == error_mark_node)
+ {
+ if (for_sure == 0)
+ return NULL_TREE;
+ return error_mark_node;
+ }
+
+ return convert (xtype, rval);
+ }
+ if (TYPE_BINFO_BASETYPES (basetype))
+ basetype = TYPE_BINFO_BASETYPE (basetype, 0);
+ else
+ break;
+ }
+#endif
+ /* No free conversions for reference types, right?. */
+ return NULL_TREE;
+ }
+
+ if (exact_conversion)
+ return NULL_TREE;
+
+ if (TREE_CODE (type) == BOOLEAN_TYPE)
+ {
+ tree as_int = build_type_conversion (code, long_long_unsigned_type_node, expr, 0);
+ tree as_ptr = build_type_conversion (code, ptr_type_node, expr, 0);
+ /* We are missing the conversion to pointer to member type. */
+ /* We are missing the conversion to floating type. */
+ if (as_int && as_ptr && for_sure)
+ {
+ cp_error ("ambiguous conversion from `%T' to `bool', can convert to integral type or pointer", TREE_TYPE (expr));
+ return error_mark_node;
+ }
+ if (as_int)
+ {
+ as_int = build_type_conversion (code, long_long_unsigned_type_node, expr, for_sure+exact_conversion*2);
+ return convert (xtype, as_int);
+ }
+ if (as_ptr)
+ {
+ as_ptr = build_type_conversion (code, ptr_type_node, expr, for_sure+exact_conversion*2);
+ return convert (xtype, as_ptr);
+ }
+ return NULL_TREE;
+ }
+
+ /* No perfect match found, try default. */
+#if 0 /* This is wrong; there is no standard conversion from void* to
+ anything. -jason */
+ if (code == CONVERT_EXPR && TREE_CODE (type) == POINTER_TYPE)
+ type_default = ptr_type_node;
+ else
+#endif
+ if (type == void_type_node)
+ return NULL_TREE;
+ else
+ {
+ tree tmp = default_conversion (build1 (NOP_EXPR, type, integer_zero_node));
+ if (tmp == error_mark_node)
+ return NULL_TREE;
+ type_default = TREE_TYPE (tmp);
+ }
+
+ basetype = save_basetype;
+
+ if (type_default != type)
+ {
+ type = type_default;
+ typename = build_typename_overload (type);
+
+ while (TYPE_HAS_CONVERSION (basetype))
+ {
+ if (lookup_fnfields (TYPE_BINFO (basetype), typename, 0))
+ return build_type_conversion_1 (xtype, basetype, expr, typename, for_sure);
+ if (TYPE_BINFO_BASETYPES (basetype))
+ basetype = TYPE_BINFO_BASETYPE (basetype, 0);
+ else
+ break;
+ }
+ }
+
+ if (TREE_CODE (type) == POINTER_TYPE && TYPE_READONLY (TREE_TYPE (type)))
+ {
+ /* Try converting to some other const pointer type and then using
+ standard conversions. */
+
+ while (TYPE_HAS_CONVERSION (basetype))
+ {
+ if (CLASSTYPE_CONVERSION (basetype, constptr_conv) != 0)
+ {
+ if (CLASSTYPE_CONVERSION (basetype, constptr_conv) == error_mark_node)
+ return error_mark_node;
+ typename = DECL_NAME (CLASSTYPE_CONVERSION (basetype, constptr_conv));
+ return build_type_conversion_1 (xtype, basetype, expr, typename, for_sure);
+ }
+ if (TYPE_BINFO_BASETYPES (basetype))
+ basetype = TYPE_BINFO_BASETYPE (basetype, 0);
+ else
+ break;
+ }
+ }
+ if (TREE_CODE (type) == POINTER_TYPE)
+ {
+ /* Try converting to some other pointer type and then using standard
+ conversions. */
+
+ while (TYPE_HAS_CONVERSION (basetype))
+ {
+ if (CLASSTYPE_CONVERSION (basetype, ptr_conv) != 0)
+ {
+ if (CLASSTYPE_CONVERSION (basetype, ptr_conv) == error_mark_node)
+ return error_mark_node;
+ typename = DECL_NAME (CLASSTYPE_CONVERSION (basetype, ptr_conv));
+ return build_type_conversion_1 (xtype, basetype, expr, typename, for_sure);
+ }
+ if (TYPE_BINFO_BASETYPES (basetype))
+ basetype = TYPE_BINFO_BASETYPE (basetype, 0);
+ else
+ break;
+ }
+ }
+
+ /* Use the longer or shorter conversion that is appropriate. Have
+ to check against 0 because the conversion may come from a baseclass. */
+ if (TREE_CODE (type) == INTEGER_TYPE
+ && TYPE_HAS_INT_CONVERSION (basetype)
+ && CLASSTYPE_CONVERSION (basetype, int_conv) != 0
+ && CLASSTYPE_CONVERSION (basetype, int_conv) != error_mark_node)
+ {
+ typename = DECL_NAME (CLASSTYPE_CONVERSION (basetype, int_conv));
+ return build_type_conversion_1 (xtype, basetype, expr, typename, for_sure);
+ }
+
+ if (TREE_CODE (type) == REAL_TYPE
+ && TYPE_HAS_REAL_CONVERSION (basetype)
+ && CLASSTYPE_CONVERSION (basetype, real_conv) != 0
+ && CLASSTYPE_CONVERSION (basetype, real_conv) != error_mark_node)
+ {
+ /* Only accept using an operator double() if there isn't a conflicting
+ operator int(). */
+ if (TYPE_HAS_INT_CONVERSION (basetype))
+ {
+ if (for_sure)
+ {
+ cp_error ("two possible conversions for type `%T'", type);
+ return error_mark_node;
+ }
+ else
+ return NULL_TREE;
+ }
+
+ typename = DECL_NAME (CLASSTYPE_CONVERSION (basetype, real_conv));
+ return build_type_conversion_1 (xtype, basetype, expr, typename, for_sure);
+ }
+
+ /* THESE ARE TOTAL KLUDGES. */
+ /* Default promotion yields no new alternatives, try
+ conversions which are anti-default, such as
+
+ double -> float or int -> unsigned or unsigned -> long
+
+ */
+ if (type_default == type
+ && (INTEGRAL_TYPE_P (type) || TREE_CODE (type) == REAL_TYPE))
+ {
+ int not_again = 0;
+
+ if (type == double_type_node)
+ typename = build_typename_overload (float_type_node);
+ else if (type == integer_type_node)
+ typename = build_typename_overload (unsigned_type_node);
+ else if (type == unsigned_type_node)
+ typename = build_typename_overload (long_integer_type_node);
+
+ again:
+ basetype = save_basetype;
+ while (TYPE_HAS_CONVERSION (basetype))
+ {
+ if (lookup_fnfields (TYPE_BINFO (basetype), typename, 0))
+ return build_type_conversion_1 (xtype, basetype, expr, typename, for_sure);
+ if (TYPE_BINFO_BASETYPES (basetype))
+ basetype = TYPE_BINFO_BASETYPE (basetype, 0);
+ else
+ break;
+ }
+ if (! not_again)
+ {
+ if (type == integer_type_node)
+ {
+ typename = build_typename_overload (long_integer_type_node);
+ not_again = 1;
+ goto again;
+ }
+ else
+ {
+ typename = build_typename_overload (integer_type_node);
+ not_again = 1;
+ goto again;
+ }
+ }
+ }
+
+ /* Now, try C promotions...
+
+ float -> int
+ int -> float */
+
+ basetype = save_basetype;
+ if (TREE_CODE (type) == REAL_TYPE)
+ type = integer_type_node;
+ else if (TREE_CODE (type) == INTEGER_TYPE)
+ if (TYPE_HAS_REAL_CONVERSION (basetype))
+ type = double_type_node;
+ else
+ return NULL_TREE;
+ else
+ return NULL_TREE;
+
+ typename = build_typename_overload (type);
+ while (TYPE_HAS_CONVERSION (basetype))
+ {
+ if (lookup_fnfields (TYPE_BINFO (basetype), typename, 0))
+ {
+ rval = build_type_conversion_1 (xtype, basetype, expr, typename, for_sure);
+ return rval;
+ }
+ if (TYPE_BINFO_BASETYPES (basetype))
+ basetype = TYPE_BINFO_BASETYPE (basetype, 0);
+ else
+ break;
+ }
+
+ return NULL_TREE;
+}
+
+/* Must convert two aggregate types to non-aggregate type.
+ Attempts to find a non-ambiguous, "best" type conversion.
+
+ Return 1 on success, 0 on failure.
+
+ @@ What are the real semantics of this supposed to be??? */
+int
+build_default_binary_type_conversion (code, arg1, arg2)
+ enum tree_code code;
+ tree *arg1, *arg2;
+{
+ tree type1 = TREE_TYPE (*arg1);
+ tree type2 = TREE_TYPE (*arg2);
+
+ if (TREE_CODE (type1) == REFERENCE_TYPE
+ || TREE_CODE (type1) == POINTER_TYPE)
+ type1 = TREE_TYPE (type1);
+ if (TREE_CODE (type2) == REFERENCE_TYPE
+ || TREE_CODE (type2) == POINTER_TYPE)
+ type2 = TREE_TYPE (type2);
+
+ if (TREE_CODE (TYPE_NAME (type1)) != TYPE_DECL)
+ {
+ tree decl = typedecl_for_tag (type1);
+ if (decl)
+ error ("type conversion nonexistent for type `%s'",
+ IDENTIFIER_POINTER (DECL_NAME (decl)));
+ else
+ error ("type conversion nonexistent for non-C++ type");
+ return 0;
+ }
+ if (TREE_CODE (TYPE_NAME (type2)) != TYPE_DECL)
+ {
+ tree decl = typedecl_for_tag (type2);
+ if (decl)
+ error ("type conversion nonexistent for type `%s'",
+ IDENTIFIER_POINTER (decl));
+ else
+ error ("type conversion nonexistent for non-C++ type");
+ return 0;
+ }
+
+ if (!IS_AGGR_TYPE (type1) || !TYPE_HAS_CONVERSION (type1))
+ {
+ if (!IS_AGGR_TYPE (type2) || !TYPE_HAS_CONVERSION (type2))
+ cp_error ("no conversion from `%T' and `%T' to types with default `%O' ",
+ type1, type2, code);
+ else
+ cp_error ("no conversion from `%T' to type with default `%O'",
+ type1, code);
+ return 0;
+ }
+ else if (!IS_AGGR_TYPE (type2) || !TYPE_HAS_CONVERSION (type2))
+ {
+ cp_error ("no conversion from `%T' to type with default `%O'",
+ type2, code);
+ return 0;
+ }
+
+ if (code == TRUTH_ANDIF_EXPR
+ || code == TRUTH_ORIF_EXPR)
+ {
+ *arg1 = convert (bool_type_node, *arg1);
+ *arg2 = convert (bool_type_node, *arg2);
+ }
+ else if (TYPE_HAS_INT_CONVERSION (type1))
+ {
+ if (TYPE_HAS_REAL_CONVERSION (type1))
+ cp_pedwarn ("ambiguous type conversion for type `%T', defaulting to int",
+ type1);
+ *arg1 = build_type_conversion (code, integer_type_node, *arg1, 1);
+ *arg2 = build_type_conversion (code, integer_type_node, *arg2, 1);
+ }
+ else if (TYPE_HAS_REAL_CONVERSION (type1))
+ {
+ *arg1 = build_type_conversion (code, double_type_node, *arg1, 1);
+ *arg2 = build_type_conversion (code, double_type_node, *arg2, 1);
+ }
+ else
+ {
+ *arg1 = build_type_conversion (code, ptr_type_node, *arg1, 1);
+ if (*arg1 == error_mark_node)
+ error ("ambiguous pointer conversion");
+ *arg2 = build_type_conversion (code, ptr_type_node, *arg2, 1);
+ if (*arg1 != error_mark_node && *arg2 == error_mark_node)
+ error ("ambiguous pointer conversion");
+ }
+ if (*arg1 == 0)
+ {
+ if (*arg2 == 0 && type1 != type2)
+ cp_error ("default type conversion for types `%T' and `%T' failed",
+ type1, type2);
+ else
+ cp_error ("default type conversion for type `%T' failed", type1);
+ return 0;
+ }
+ else if (*arg2 == 0)
+ {
+ cp_error ("default type conversion for type `%T' failed", type2);
+ return 0;
+ }
+ return 1;
+}
+
+/* Must convert an aggregate type to non-aggregate type.
+ Attempts to find a non-ambiguous, "best" type conversion.
+
+ Return 1 on success, 0 on failure.
+
+ The type of the argument is expected to be of aggregate type here.
+
+ @@ What are the real semantics of this supposed to be??? */
+int
+build_default_unary_type_conversion (code, arg)
+ enum tree_code code;
+ tree *arg;
+{
+ tree type = TREE_TYPE (*arg);
+
+ if (! TYPE_HAS_CONVERSION (type))
+ {
+ cp_error ("type conversion required for type `%T'", type);
+ return 0;
+ }
+
+ if (code == TRUTH_NOT_EXPR)
+ *arg = convert (bool_type_node, *arg);
+ else if (TYPE_HAS_INT_CONVERSION (type))
+ {
+ if (TYPE_HAS_REAL_CONVERSION (type))
+ cp_pedwarn ("ambiguous type conversion for type `%T', defaulting to int",
+ type);
+ *arg = build_type_conversion (code, integer_type_node, *arg, 1);
+ }
+ else if (TYPE_HAS_REAL_CONVERSION (type))
+ *arg = build_type_conversion (code, double_type_node, *arg, 1);
+ else
+ {
+ *arg = build_type_conversion (code, ptr_type_node, *arg, 1);
+ if (*arg == error_mark_node)
+ error ("ambiguous pointer conversion");
+ }
+ if (*arg == NULL_TREE)
+ {
+ cp_error ("default type conversion for type `%T' failed", type);
+ return 0;
+ }
+ return 1;
+}
+
+/* Implements integral promotion (4.1) and float->double promotion. */
+tree
+type_promotes_to (type)
+ tree type;
+{
+ int constp = TYPE_READONLY (type);
+ int volatilep = TYPE_VOLATILE (type);
+ type = TYPE_MAIN_VARIANT (type);
+
+ /* bool always promotes to int (not unsigned), even if it's the same
+ size. */
+ if (type == bool_type_node)
+ type = integer_type_node;
+
+ /* Normally convert enums to int, but convert wide enums to something
+ wider. */
+ else if (TREE_CODE (type) == ENUMERAL_TYPE
+ || type == wchar_type_node)
+ type = type_for_size
+ (MAX (TYPE_PRECISION (type), TYPE_PRECISION (integer_type_node)),
+ (flag_traditional
+ || (TYPE_PRECISION (type) >= TYPE_PRECISION (integer_type_node)))
+ && TREE_UNSIGNED (type));
+ else if (C_PROMOTING_INTEGER_TYPE_P (type))
+ {
+ /* Traditionally, unsignedness is preserved in default promotions.
+ Otherwise, retain unsignedness if really not getting bigger. */
+ if (TREE_UNSIGNED (type)
+ && (flag_traditional
+ || TYPE_PRECISION (type) == TYPE_PRECISION (integer_type_node)))
+ type = unsigned_type_node;
+ else
+ type = integer_type_node;
+ }
+ else if (type == float_type_node)
+ type = double_type_node;
+
+ return c_build_type_variant (type, constp, volatilep);
+}
diff --git a/gnu/usr.bin/cc/cc1plus/decl.c b/gnu/usr.bin/cc/cc1plus/decl.c
new file mode 100644
index 0000000..0690340
--- /dev/null
+++ b/gnu/usr.bin/cc/cc1plus/decl.c
@@ -0,0 +1,12030 @@
+/* Process declarations and variables for C compiler.
+ Copyright (C) 1988, 1992, 1993, 1994 Free Software Foundation, Inc.
+ Hacked by Michael Tiemann (tiemann@cygnus.com)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+/* Process declarations and symbol lookup for C front end.
+ Also constructs types; the standard scalar types at initialization,
+ and structure, union, array and enum types when they are declared. */
+
+/* ??? not all decl nodes are given the most useful possible
+ line numbers. For example, the CONST_DECLs for enum values. */
+
+#include <stdio.h>
+#include "config.h"
+#include "tree.h"
+#include "rtl.h"
+#include "flags.h"
+#include "cp-tree.h"
+#include "decl.h"
+#include "lex.h"
+#include <sys/types.h>
+#include <signal.h>
+#include "obstack.h"
+
+#define obstack_chunk_alloc xmalloc
+#define obstack_chunk_free free
+
+extern tree builtin_return_address_fndecl;
+
+extern struct obstack permanent_obstack;
+
+extern int current_class_depth;
+
+extern tree cleanups_this_call;
+
+/* Stack of places to restore the search obstack back to. */
+
+/* Obstack used for remembering local class declarations (like
+ enums and static (const) members. */
+#include "stack.h"
+static struct obstack decl_obstack;
+static struct stack_level *decl_stack;
+
+#ifndef CHAR_TYPE_SIZE
+#define CHAR_TYPE_SIZE BITS_PER_UNIT
+#endif
+
+#ifndef SHORT_TYPE_SIZE
+#define SHORT_TYPE_SIZE (BITS_PER_UNIT * MIN ((UNITS_PER_WORD + 1) / 2, 2))
+#endif
+
+#ifndef INT_TYPE_SIZE
+#define INT_TYPE_SIZE BITS_PER_WORD
+#endif
+
+#ifndef LONG_TYPE_SIZE
+#define LONG_TYPE_SIZE BITS_PER_WORD
+#endif
+
+#ifndef LONG_LONG_TYPE_SIZE
+#define LONG_LONG_TYPE_SIZE (BITS_PER_WORD * 2)
+#endif
+
+#ifndef WCHAR_UNSIGNED
+#define WCHAR_UNSIGNED 0
+#endif
+
+#ifndef FLOAT_TYPE_SIZE
+#define FLOAT_TYPE_SIZE BITS_PER_WORD
+#endif
+
+#ifndef DOUBLE_TYPE_SIZE
+#define DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
+#endif
+
+#ifndef LONG_DOUBLE_TYPE_SIZE
+#define LONG_DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
+#endif
+
+/* We let tm.h override the types used here, to handle trivial differences
+ such as the choice of unsigned int or long unsigned int for size_t.
+ When machines start needing nontrivial differences in the size type,
+ it would be best to do something here to figure out automatically
+ from other information what type to use. */
+
+#ifndef SIZE_TYPE
+#define SIZE_TYPE "long unsigned int"
+#endif
+
+#ifndef PTRDIFF_TYPE
+#define PTRDIFF_TYPE "long int"
+#endif
+
+#ifndef WCHAR_TYPE
+#define WCHAR_TYPE "int"
+#endif
+
+#define builtin_function(NAME, TYPE, CODE, LIBNAME) \
+ define_function (NAME, TYPE, CODE, (void (*)())pushdecl, LIBNAME)
+#define auto_function(NAME, TYPE, CODE) \
+ do { \
+ tree __name = NAME; \
+ tree __type = TYPE; \
+ define_function (IDENTIFIER_POINTER (__name), __type, CODE, \
+ (void (*)())push_overloaded_decl_1, \
+ IDENTIFIER_POINTER (build_decl_overload (__name, TYPE_ARG_TYPES (__type), 0)));\
+ } while (0)
+
+static tree grokparms PROTO((tree, int));
+static tree lookup_nested_type PROTO((tree, tree));
+static char *redeclaration_error_message PROTO((tree, tree));
+static void grok_op_properties PROTO((tree, int, int));
+
+tree define_function
+ PROTO((char *, tree, enum built_in_function, void (*)(), char *));
+
+/* a node which has tree code ERROR_MARK, and whose type is itself.
+ All erroneous expressions are replaced with this node. All functions
+ that accept nodes as arguments should avoid generating error messages
+ if this node is one of the arguments, since it is undesirable to get
+ multiple error messages from one error in the input. */
+
+tree error_mark_node;
+
+/* Erroneous argument lists can use this *IFF* they do not modify it. */
+tree error_mark_list;
+
+/* INTEGER_TYPE and REAL_TYPE nodes for the standard data types */
+
+tree short_integer_type_node;
+tree integer_type_node;
+tree long_integer_type_node;
+tree long_long_integer_type_node;
+
+tree short_unsigned_type_node;
+tree unsigned_type_node;
+tree long_unsigned_type_node;
+tree long_long_unsigned_type_node;
+
+tree ptrdiff_type_node;
+
+tree unsigned_char_type_node;
+tree signed_char_type_node;
+tree char_type_node;
+tree wchar_type_node;
+tree signed_wchar_type_node;
+tree unsigned_wchar_type_node;
+
+tree float_type_node;
+tree double_type_node;
+tree long_double_type_node;
+
+tree intQI_type_node;
+tree intHI_type_node;
+tree intSI_type_node;
+tree intDI_type_node;
+
+tree unsigned_intQI_type_node;
+tree unsigned_intHI_type_node;
+tree unsigned_intSI_type_node;
+tree unsigned_intDI_type_node;
+
+/* a VOID_TYPE node, and the same, packaged in a TREE_LIST. */
+
+tree void_type_node, void_list_node;
+tree void_zero_node;
+
+/* Nodes for types `void *' and `const void *'. */
+
+tree ptr_type_node, const_ptr_type_node;
+
+/* Nodes for types `char *' and `const char *'. */
+
+tree string_type_node, const_string_type_node;
+
+/* Type `char[256]' or something like it.
+ Used when an array of char is needed and the size is irrelevant. */
+
+tree char_array_type_node;
+
+/* Type `int[256]' or something like it.
+ Used when an array of int needed and the size is irrelevant. */
+
+tree int_array_type_node;
+
+/* Type `wchar_t[256]' or something like it.
+ Used when a wide string literal is created. */
+
+tree wchar_array_type_node;
+
+/* The bool data type, and constants */
+tree bool_type_node, true_node, false_node;
+
+/* type `int ()' -- used for implicit declaration of functions. */
+
+tree default_function_type;
+
+/* function types `double (double)' and `double (double, double)', etc. */
+
+tree double_ftype_double, double_ftype_double_double;
+tree int_ftype_int, long_ftype_long;
+
+/* Function type `void (void *, void *, int)' and similar ones. */
+
+tree void_ftype_ptr_ptr_int, int_ftype_ptr_ptr_int, void_ftype_ptr_int_int;
+
+/* Function type `char *(char *, char *)' and similar ones */
+tree string_ftype_ptr_ptr, int_ftype_string_string;
+
+/* Function type `size_t (const char *)' */
+tree sizet_ftype_string;
+
+/* Function type `int (const void *, const void *, size_t)' */
+tree int_ftype_cptr_cptr_sizet;
+
+/* C++ extensions */
+tree vtable_entry_type;
+tree delta_type_node;
+tree __t_desc_type_node, __i_desc_type_node, __m_desc_type_node;
+tree __t_desc_array_type, __i_desc_array_type, __m_desc_array_type;
+tree class_star_type_node;
+tree class_type_node, record_type_node, union_type_node, enum_type_node;
+tree exception_type_node, unknown_type_node;
+tree opaque_type_node, signature_type_node;
+tree sigtable_entry_type;
+tree maybe_gc_cleanup;
+
+/* Array type `vtable_entry_type[]' */
+tree vtbl_type_node;
+
+/* In a destructor, the point at which all derived class destroying
+ has been done, just before any base class destroying will be done. */
+
+tree dtor_label;
+
+/* In a constructor, the point at which we are ready to return
+ the pointer to the initialized object. */
+
+tree ctor_label;
+
+/* A FUNCTION_DECL which can call `abort'. Not necessarily the
+ one that the user will declare, but sufficient to be called
+ by routines that want to abort the program. */
+
+tree abort_fndecl;
+
+extern rtx cleanup_label, return_label;
+
+/* If original DECL_RESULT of current function was a register,
+ but due to being an addressable named return value, would up
+ on the stack, this variable holds the named return value's
+ original location. */
+rtx original_result_rtx;
+
+/* Sequence of insns which represents base initialization. */
+rtx base_init_insns;
+
+/* C++: Keep these around to reduce calls to `get_identifier'.
+ Identifiers for `this' in member functions and the auto-delete
+ parameter for destructors. */
+tree this_identifier, in_charge_identifier;
+/* Used in pointer to member functions, and in vtables. */
+tree pfn_identifier, index_identifier, delta_identifier, delta2_identifier;
+tree pfn_or_delta2_identifier;
+
+/* A list (chain of TREE_LIST nodes) of named label uses.
+ The TREE_PURPOSE field is the list of variables defined
+ the the label's scope defined at the point of use.
+ The TREE_VALUE field is the LABEL_DECL used.
+ The TREE_TYPE field holds `current_binding_level' at the
+ point of the label's use.
+
+ Used only for jumps to as-yet undefined labels, since
+ jumps to defined labels can have their validity checked
+ by stmt.c. */
+
+static tree named_label_uses;
+
+/* A list of objects which have constructors or destructors
+ which reside in the global scope. The decl is stored in
+ the TREE_VALUE slot and the initializer is stored
+ in the TREE_PURPOSE slot. */
+tree static_aggregates;
+
+/* -- end of C++ */
+
+/* Two expressions that are constants with value zero.
+ The first is of type `int', the second of type `void *'. */
+
+tree integer_zero_node;
+tree null_pointer_node;
+
+/* A node for the integer constants 1, 2, and 3. */
+
+tree integer_one_node, integer_two_node, integer_three_node;
+
+/* Nonzero if we have seen an invalid cross reference
+ to a struct, union, or enum, but not yet printed the message. */
+
+tree pending_invalid_xref;
+/* File and line to appear in the eventual error message. */
+char *pending_invalid_xref_file;
+int pending_invalid_xref_line;
+
+/* While defining an enum type, this is 1 plus the last enumerator
+ constant value. */
+
+static tree enum_next_value;
+
+/* Nonzero means that there was overflow computing enum_next_value. */
+
+static int enum_overflow;
+
+/* Parsing a function declarator leaves a list of parameter names
+ or a chain or parameter decls here. */
+
+tree last_function_parms;
+
+/* Parsing a function declarator leaves here a chain of structure
+ and enum types declared in the parmlist. */
+
+static tree last_function_parm_tags;
+
+/* After parsing the declarator that starts a function definition,
+ `start_function' puts here the list of parameter names or chain of decls.
+ `store_parm_decls' finds it here. */
+
+static tree current_function_parms;
+
+/* Similar, for last_function_parm_tags. */
+static tree current_function_parm_tags;
+
+/* A list (chain of TREE_LIST nodes) of all LABEL_DECLs in the function
+ that have names. Here so we can clear out their names' definitions
+ at the end of the function. */
+
+static tree named_labels;
+
+/* A list of LABEL_DECLs from outer contexts that are currently shadowed. */
+
+static tree shadowed_labels;
+
+#if 0 /* Not needed by C++ */
+/* Nonzero when store_parm_decls is called indicates a varargs function.
+ Value not meaningful after store_parm_decls. */
+
+static int c_function_varargs;
+#endif
+
+/* The FUNCTION_DECL for the function currently being compiled,
+ or 0 if between functions. */
+tree current_function_decl;
+
+/* Set to 0 at beginning of a function definition, set to 1 if
+ a return statement that specifies a return value is seen. */
+
+int current_function_returns_value;
+
+/* Set to 0 at beginning of a function definition, set to 1 if
+ a return statement with no argument is seen. */
+
+int current_function_returns_null;
+
+/* Set to 0 at beginning of a function definition, and whenever
+ a label (case or named) is defined. Set to value of expression
+ returned from function when that value can be transformed into
+ a named return value. */
+
+tree current_function_return_value;
+
+/* Set to nonzero by `grokdeclarator' for a function
+ whose return type is defaulted, if warnings for this are desired. */
+
+static int warn_about_return_type;
+
+/* Nonzero when starting a function declared `extern inline'. */
+
+static int current_extern_inline;
+
+/* Nonzero means give `double' the same size as `float'. */
+
+extern int flag_short_double;
+
+/* Nonzero means don't recognize any builtin functions. */
+
+extern int flag_no_builtin;
+
+/* Nonzero means disable GNU extensions. */
+
+extern int flag_ansi;
+
+/* Nonzero if we want to support huge (> 2^(sizeof(short)*8-1) bytes)
+ objects. */
+extern int flag_huge_objects;
+
+/* Nonzero if we want to conserve space in the .o files. We do this
+ by putting uninitialized data and runtime initialized data into
+ .common instead of .data at the expense of not flaging multiple
+ definitions. */
+extern int flag_conserve_space;
+
+/* Pointers to the base and current top of the language name stack. */
+
+extern tree *current_lang_base, *current_lang_stack;
+
+/* C and C++ flags are in decl2.c. */
+
+/* Set to 0 at beginning of a constructor, set to 1
+ if that function does an allocation before referencing its
+ instance variable. */
+int current_function_assigns_this;
+int current_function_just_assigned_this;
+
+/* Set to 0 at beginning of a function. Set non-zero when
+ store_parm_decls is called. Don't call store_parm_decls
+ if this flag is non-zero! */
+int current_function_parms_stored;
+
+/* Current end of entries in the gc obstack for stack pointer variables. */
+
+int current_function_obstack_index;
+
+/* Flag saying whether we have used the obstack in this function or not. */
+
+int current_function_obstack_usage;
+
+/* Flag used when debugging spew.c */
+
+extern int spew_debug;
+
+/* This is a copy of the class_shadowed list of the previous class binding
+ contour when at global scope. It's used to reset IDENTIFIER_CLASS_VALUEs
+ when entering another class scope (i.e. a cache miss). */
+extern tree previous_class_values;
+
+
+/* Allocate a level of searching. */
+struct stack_level *
+push_decl_level (stack, obstack)
+ struct stack_level *stack;
+ struct obstack *obstack;
+{
+ struct stack_level tem;
+ tem.prev = stack;
+
+ return push_stack_level (obstack, (char *)&tem, sizeof (tem));
+}
+
+/* For each binding contour we allocate a binding_level structure
+ * which records the names defined in that contour.
+ * Contours include:
+ * 0) the global one
+ * 1) one for each function definition,
+ * where internal declarations of the parameters appear.
+ * 2) one for each compound statement,
+ * to record its declarations.
+ *
+ * The current meaning of a name can be found by searching the levels from
+ * the current one out to the global one.
+ *
+ * Off to the side, may be the class_binding_level. This exists
+ * only to catch class-local declarations. It is otherwise
+ * nonexistent.
+ *
+ * Also there may be binding levels that catch cleanups that
+ * must be run when exceptions occur.
+ */
+
+/* Note that the information in the `names' component of the global contour
+ is duplicated in the IDENTIFIER_GLOBAL_VALUEs of all identifiers. */
+
+struct binding_level
+ {
+ /* A chain of _DECL nodes for all variables, constants, functions,
+ * and typedef types. These are in the reverse of the order supplied.
+ */
+ tree names;
+
+ /* A list of structure, union and enum definitions,
+ * for looking up tag names.
+ * It is a chain of TREE_LIST nodes, each of whose TREE_PURPOSE is a name,
+ * or NULL_TREE; and whose TREE_VALUE is a RECORD_TYPE, UNION_TYPE,
+ * or ENUMERAL_TYPE node.
+ *
+ * C++: the TREE_VALUE nodes can be simple types for component_bindings.
+ *
+ */
+ tree tags;
+
+ /* For each level, a list of shadowed outer-level local definitions
+ to be restored when this level is popped.
+ Each link is a TREE_LIST whose TREE_PURPOSE is an identifier and
+ whose TREE_VALUE is its old definition (a kind of ..._DECL node). */
+ tree shadowed;
+
+ /* Same, for IDENTIFIER_CLASS_VALUE. */
+ tree class_shadowed;
+
+ /* Same, for IDENTIFIER_TYPE_VALUE. */
+ tree type_shadowed;
+
+ /* For each level (except not the global one),
+ a chain of BLOCK nodes for all the levels
+ that were entered and exited one level down. */
+ tree blocks;
+
+ /* The BLOCK node for this level, if one has been preallocated.
+ If 0, the BLOCK is allocated (if needed) when the level is popped. */
+ tree this_block;
+
+ /* The binding level which this one is contained in (inherits from). */
+ struct binding_level *level_chain;
+
+ /* Number of decls in `names' that have incomplete
+ structure or union types. */
+ unsigned short n_incomplete;
+
+ /* 1 for the level that holds the parameters of a function.
+ 2 for the level that holds a class declaration.
+ 3 for levels that hold parameter declarations. */
+ unsigned parm_flag : 4;
+
+ /* 1 means make a BLOCK for this level regardless of all else.
+ 2 for temporary binding contours created by the compiler. */
+ unsigned keep : 3;
+
+ /* Nonzero if this level "doesn't exist" for tags. */
+ unsigned tag_transparent : 1;
+
+ /* Nonzero if this level can safely have additional
+ cleanup-needing variables added to it. */
+ unsigned more_cleanups_ok : 1;
+ unsigned have_cleanups : 1;
+
+ /* Nonzero if we should accept any name as an identifier in
+ this scope. This happens in some template definitions. */
+ unsigned accept_any : 1;
+
+ /* Nonzero if this level is for completing a template class definition
+ inside a binding level that temporarily binds the parameters. This
+ means that definitions here should not be popped off when unwinding
+ this binding level. (Not actually implemented this way,
+ unfortunately.) */
+ unsigned pseudo_global : 1;
+
+ /* Two bits left for this word. */
+
+#if defined(DEBUG_CP_BINDING_LEVELS)
+ /* Binding depth at which this level began. */
+ unsigned binding_depth;
+#endif /* defined(DEBUG_CP_BINDING_LEVELS) */
+ };
+
+#define NULL_BINDING_LEVEL ((struct binding_level *) NULL)
+
+/* The (non-class) binding level currently in effect. */
+
+static struct binding_level *current_binding_level;
+
+/* The binding level of the current class, if any. */
+
+static struct binding_level *class_binding_level;
+
+/* The current (class or non-class) binding level currently in effect. */
+
+#define inner_binding_level \
+ (class_binding_level ? class_binding_level : current_binding_level)
+
+/* A chain of binding_level structures awaiting reuse. */
+
+static struct binding_level *free_binding_level;
+
+/* The outermost binding level, for names of file scope.
+ This is created when the compiler is started and exists
+ through the entire run. */
+
+static struct binding_level *global_binding_level;
+
+/* Binding level structures are initialized by copying this one. */
+
+static struct binding_level clear_binding_level;
+
+/* Nonzero means unconditionally make a BLOCK for the next level pushed. */
+
+static int keep_next_level_flag;
+
+#if defined(DEBUG_CP_BINDING_LEVELS)
+static int binding_depth = 0;
+static int is_class_level = 0;
+
+static void
+indent ()
+{
+ register unsigned i;
+
+ for (i = 0; i < binding_depth*2; i++)
+ putc (' ', stderr);
+}
+#endif /* defined(DEBUG_CP_BINDING_LEVELS) */
+
+static tree pushdecl_with_scope PROTO((tree, struct binding_level *));
+
+static void
+push_binding_level (newlevel, tag_transparent, keep)
+ struct binding_level *newlevel;
+ int tag_transparent, keep;
+{
+ /* Add this level to the front of the chain (stack) of levels that
+ are active. */
+ *newlevel = clear_binding_level;
+ if (class_binding_level)
+ {
+ newlevel->level_chain = class_binding_level;
+ class_binding_level = (struct binding_level *)0;
+ }
+ else
+ {
+ newlevel->level_chain = current_binding_level;
+ }
+ current_binding_level = newlevel;
+ newlevel->tag_transparent = tag_transparent;
+ newlevel->more_cleanups_ok = 1;
+ newlevel->keep = keep;
+#if defined(DEBUG_CP_BINDING_LEVELS)
+ newlevel->binding_depth = binding_depth;
+ indent ();
+ fprintf (stderr, "push %s level 0x%08x line %d\n",
+ (is_class_level) ? "class" : "block", newlevel, lineno);
+ is_class_level = 0;
+ binding_depth++;
+#endif /* defined(DEBUG_CP_BINDING_LEVELS) */
+}
+
+static void
+pop_binding_level ()
+{
+ if (class_binding_level)
+ current_binding_level = class_binding_level;
+
+ if (global_binding_level)
+ {
+ /* cannot pop a level, if there are none left to pop. */
+ if (current_binding_level == global_binding_level)
+ my_friendly_abort (123);
+ }
+ /* Pop the current level, and free the structure for reuse. */
+#if defined(DEBUG_CP_BINDING_LEVELS)
+ binding_depth--;
+ indent ();
+ fprintf (stderr, "pop %s level 0x%08x line %d\n",
+ (is_class_level) ? "class" : "block",
+ current_binding_level, lineno);
+ if (is_class_level != (current_binding_level == class_binding_level))
+#if 0 /* XXX Don't abort when we're watching how things are being managed. */
+ abort ();
+#else
+ {
+ indent ();
+ fprintf (stderr, "XXX is_class_level != (current_binding_level == class_binding_level)\n");
+ }
+#endif
+ is_class_level = 0;
+#endif /* defined(DEBUG_CP_BINDING_LEVELS) */
+ {
+ register struct binding_level *level = current_binding_level;
+ current_binding_level = current_binding_level->level_chain;
+ level->level_chain = free_binding_level;
+#if 0 /* defined(DEBUG_CP_BINDING_LEVELS) */
+ if (level->binding_depth != binding_depth)
+ abort ();
+#endif /* defined(DEBUG_CP_BINDING_LEVELS) */
+ free_binding_level = level;
+
+ class_binding_level = current_binding_level;
+ if (class_binding_level->parm_flag != 2)
+ class_binding_level = 0;
+ while (current_binding_level->parm_flag == 2)
+ current_binding_level = current_binding_level->level_chain;
+ }
+}
+
+/* Nonzero if we are currently in the global binding level. */
+
+int
+global_bindings_p ()
+{
+ return current_binding_level == global_binding_level;
+}
+
+void
+keep_next_level ()
+{
+ keep_next_level_flag = 1;
+}
+
+/* Nonzero if the current level needs to have a BLOCK made. */
+
+int
+kept_level_p ()
+{
+ return (current_binding_level->blocks != NULL_TREE
+ || current_binding_level->keep
+ || current_binding_level->names != NULL_TREE
+ || (current_binding_level->tags != NULL_TREE
+ && !current_binding_level->tag_transparent));
+}
+
+/* Identify this binding level as a level of parameters. */
+
+void
+declare_parm_level ()
+{
+ current_binding_level->parm_flag = 1;
+}
+
+void
+declare_uninstantiated_type_level ()
+{
+ current_binding_level->accept_any = 1;
+}
+
+int
+uninstantiated_type_level_p ()
+{
+ return current_binding_level->accept_any;
+}
+
+void
+declare_pseudo_global_level ()
+{
+ current_binding_level->pseudo_global = 1;
+}
+
+int
+pseudo_global_level_p ()
+{
+ return current_binding_level->pseudo_global;
+}
+
+void
+set_class_shadows (shadows)
+ tree shadows;
+{
+ class_binding_level->class_shadowed = shadows;
+}
+
+/* Enter a new binding level.
+ If TAG_TRANSPARENT is nonzero, do so only for the name space of variables,
+ not for that of tags. */
+
+void
+pushlevel (tag_transparent)
+ int tag_transparent;
+{
+ register struct binding_level *newlevel = NULL_BINDING_LEVEL;
+
+ /* If this is the top level of a function,
+ just make sure that NAMED_LABELS is 0.
+ They should have been set to 0 at the end of the previous function. */
+
+ if (current_binding_level == global_binding_level)
+ my_friendly_assert (named_labels == NULL_TREE, 134);
+
+ /* Reuse or create a struct for this binding level. */
+
+#if defined(DEBUG_CP_BINDING_LEVELS)
+ if (0)
+#else /* !defined(DEBUG_CP_BINDING_LEVELS) */
+ if (free_binding_level)
+#endif /* !defined(DEBUG_CP_BINDING_LEVELS) */
+ {
+ newlevel = free_binding_level;
+ free_binding_level = free_binding_level->level_chain;
+ }
+ else
+ {
+ /* Create a new `struct binding_level'. */
+ newlevel = (struct binding_level *) xmalloc (sizeof (struct binding_level));
+ }
+ push_binding_level (newlevel, tag_transparent, keep_next_level_flag);
+ GNU_xref_start_scope ((HOST_WIDE_INT) newlevel);
+ keep_next_level_flag = 0;
+}
+
+void
+pushlevel_temporary (tag_transparent)
+ int tag_transparent;
+{
+ pushlevel (tag_transparent);
+ current_binding_level->keep = 2;
+ clear_last_expr ();
+
+ /* Note we don't call push_momentary() here. Otherwise, it would cause
+ cleanups to be allocated on the momentary obstack, and they will be
+ overwritten by the next statement. */
+
+ expand_start_bindings (0);
+}
+
+/* Exit a binding level.
+ Pop the level off, and restore the state of the identifier-decl mappings
+ that were in effect when this level was entered.
+
+ If KEEP == 1, this level had explicit declarations, so
+ and create a "block" (a BLOCK node) for the level
+ to record its declarations and subblocks for symbol table output.
+
+ If KEEP == 2, this level's subblocks go to the front,
+ not the back of the current binding level. This happens,
+ for instance, when code for constructors and destructors
+ need to generate code at the end of a function which must
+ be moved up to the front of the function.
+
+ If FUNCTIONBODY is nonzero, this level is the body of a function,
+ so create a block as if KEEP were set and also clear out all
+ label names.
+
+ If REVERSE is nonzero, reverse the order of decls before putting
+ them into the BLOCK. */
+
+tree
+poplevel (keep, reverse, functionbody)
+ int keep;
+ int reverse;
+ int functionbody;
+{
+ register tree link;
+ /* The chain of decls was accumulated in reverse order.
+ Put it into forward order, just for cleanliness. */
+ tree decls;
+ int tmp = functionbody;
+ int implicit_try_block = current_binding_level->parm_flag == 3;
+ int real_functionbody = current_binding_level->keep == 2
+ ? ((functionbody = 0), tmp) : functionbody;
+ tree tags = functionbody >= 0 ? current_binding_level->tags : 0;
+ tree subblocks = functionbody >= 0 ? current_binding_level->blocks : 0;
+ tree block = NULL_TREE;
+ tree decl;
+ int block_previously_created;
+
+ GNU_xref_end_scope ((HOST_WIDE_INT) current_binding_level,
+ (HOST_WIDE_INT) current_binding_level->level_chain,
+ current_binding_level->parm_flag,
+ current_binding_level->keep,
+ current_binding_level->tag_transparent);
+
+ if (current_binding_level->keep == 1)
+ keep = 1;
+
+ /* This warning is turned off because it causes warnings for
+ declarations like `extern struct foo *x'. */
+#if 0
+ /* Warn about incomplete structure types in this level. */
+ for (link = tags; link; link = TREE_CHAIN (link))
+ if (TYPE_SIZE (TREE_VALUE (link)) == NULL_TREE)
+ {
+ tree type = TREE_VALUE (link);
+ char *errmsg;
+ switch (TREE_CODE (type))
+ {
+ case RECORD_TYPE:
+ errmsg = "`struct %s' incomplete in scope ending here";
+ break;
+ case UNION_TYPE:
+ errmsg = "`union %s' incomplete in scope ending here";
+ break;
+ case ENUMERAL_TYPE:
+ errmsg = "`enum %s' incomplete in scope ending here";
+ break;
+ }
+ if (TREE_CODE (TYPE_NAME (type)) == IDENTIFIER_NODE)
+ error (errmsg, IDENTIFIER_POINTER (TYPE_NAME (type)));
+ else
+ /* If this type has a typedef-name, the TYPE_NAME is a TYPE_DECL. */
+ error (errmsg, TYPE_NAME_STRING (type));
+ }
+#endif /* 0 */
+
+ /* Get the decls in the order they were written.
+ Usually current_binding_level->names is in reverse order.
+ But parameter decls were previously put in forward order. */
+
+ if (reverse)
+ current_binding_level->names
+ = decls = nreverse (current_binding_level->names);
+ else
+ decls = current_binding_level->names;
+
+ /* Output any nested inline functions within this block
+ if they weren't already output. */
+
+ for (decl = decls; decl; decl = TREE_CHAIN (decl))
+ if (TREE_CODE (decl) == FUNCTION_DECL
+ && ! TREE_ASM_WRITTEN (decl)
+ && DECL_INITIAL (decl) != NULL_TREE
+ && TREE_ADDRESSABLE (decl))
+ {
+ /* If this decl was copied from a file-scope decl
+ on account of a block-scope extern decl,
+ propagate TREE_ADDRESSABLE to the file-scope decl. */
+ if (DECL_ABSTRACT_ORIGIN (decl) != NULL_TREE)
+ TREE_ADDRESSABLE (DECL_ABSTRACT_ORIGIN (decl)) = 1;
+ else
+ {
+ push_function_context ();
+ output_inline_function (decl);
+ pop_function_context ();
+ }
+ }
+
+ /* If there were any declarations or structure tags in that level,
+ or if this level is a function body,
+ create a BLOCK to record them for the life of this function. */
+
+ block = NULL_TREE;
+ block_previously_created = (current_binding_level->this_block != NULL_TREE);
+ if (block_previously_created)
+ block = current_binding_level->this_block;
+ else if (keep == 1 || functionbody)
+ block = make_node (BLOCK);
+ if (block != NULL_TREE)
+ {
+ BLOCK_VARS (block) = decls;
+ BLOCK_TYPE_TAGS (block) = tags;
+ BLOCK_SUBBLOCKS (block) = subblocks;
+ /* If we created the block earlier on, and we are just diddling it now,
+ then it already should have a proper BLOCK_END_NOTE value associated
+ with it, so avoid trashing that. Otherwise, for a new block, install
+ a new BLOCK_END_NOTE value. */
+ if (! block_previously_created)
+ remember_end_note (block);
+ }
+
+ /* In each subblock, record that this is its superior. */
+
+ if (keep >= 0)
+ for (link = subblocks; link; link = TREE_CHAIN (link))
+ BLOCK_SUPERCONTEXT (link) = block;
+
+ /* Clear out the meanings of the local variables of this level. */
+
+ for (link = decls; link; link = TREE_CHAIN (link))
+ {
+ if (DECL_NAME (link) != NULL_TREE)
+ {
+ /* If the ident. was used or addressed via a local extern decl,
+ don't forget that fact. */
+ if (DECL_EXTERNAL (link))
+ {
+ if (TREE_USED (link))
+ TREE_USED (DECL_ASSEMBLER_NAME (link)) = 1;
+ if (TREE_ADDRESSABLE (link))
+ TREE_ADDRESSABLE (DECL_ASSEMBLER_NAME (link)) = 1;
+ }
+ IDENTIFIER_LOCAL_VALUE (DECL_NAME (link)) = NULL_TREE;
+ }
+ }
+
+ /* Restore all name-meanings of the outer levels
+ that were shadowed by this level. */
+
+ for (link = current_binding_level->shadowed; link; link = TREE_CHAIN (link))
+ IDENTIFIER_LOCAL_VALUE (TREE_PURPOSE (link)) = TREE_VALUE (link);
+ for (link = current_binding_level->class_shadowed;
+ link; link = TREE_CHAIN (link))
+ IDENTIFIER_CLASS_VALUE (TREE_PURPOSE (link)) = TREE_VALUE (link);
+ for (link = current_binding_level->type_shadowed;
+ link; link = TREE_CHAIN (link))
+ IDENTIFIER_TYPE_VALUE (TREE_PURPOSE (link)) = TREE_VALUE (link);
+
+ /* If the level being exited is the top level of a function,
+ check over all the labels. */
+
+ if (functionbody)
+ {
+ /* If this is the top level block of a function,
+ the vars are the function's parameters.
+ Don't leave them in the BLOCK because they are
+ found in the FUNCTION_DECL instead. */
+
+ BLOCK_VARS (block) = 0;
+
+ /* Clear out the definitions of all label names,
+ since their scopes end here. */
+
+ for (link = named_labels; link; link = TREE_CHAIN (link))
+ {
+ register tree label = TREE_VALUE (link);
+
+ if (DECL_INITIAL (label) == NULL_TREE)
+ {
+ cp_error_at ("label `%D' used but not defined", label);
+ /* Avoid crashing later. */
+ define_label (input_filename, 1, DECL_NAME (label));
+ }
+ else if (warn_unused && !TREE_USED (label))
+ cp_warning_at ("label `%D' defined but not used", label);
+ SET_IDENTIFIER_LABEL_VALUE (DECL_NAME (label), NULL_TREE);
+
+ /* Put the labels into the "variables" of the
+ top-level block, so debugger can see them. */
+ TREE_CHAIN (label) = BLOCK_VARS (block);
+ BLOCK_VARS (block) = label;
+ }
+
+ named_labels = NULL_TREE;
+ }
+
+ /* Any uses of undefined labels now operate under constraints
+ of next binding contour. */
+ {
+ struct binding_level *level_chain;
+ level_chain = current_binding_level->level_chain;
+ if (level_chain)
+ {
+ tree labels;
+ for (labels = named_label_uses; labels; labels = TREE_CHAIN (labels))
+ if (TREE_TYPE (labels) == (tree)current_binding_level)
+ {
+ TREE_TYPE (labels) = (tree)level_chain;
+ TREE_PURPOSE (labels) = level_chain->names;
+ }
+ }
+ }
+
+ tmp = current_binding_level->keep;
+
+ pop_binding_level ();
+ if (functionbody)
+ DECL_INITIAL (current_function_decl) = block;
+ else if (block)
+ {
+ if (!block_previously_created)
+ current_binding_level->blocks
+ = chainon (current_binding_level->blocks, block);
+ }
+ /* If we did not make a block for the level just exited,
+ any blocks made for inner levels
+ (since they cannot be recorded as subblocks in that level)
+ must be carried forward so they will later become subblocks
+ of something else. */
+ else if (subblocks)
+ {
+ if (keep == 2)
+ current_binding_level->blocks
+ = chainon (subblocks, current_binding_level->blocks);
+ else
+ current_binding_level->blocks
+ = chainon (current_binding_level->blocks, subblocks);
+ }
+
+ /* Take care of compiler's internal binding structures. */
+ if (tmp == 2)
+ {
+#if 0
+ /* We did not call push_momentary for this
+ binding contour, so there is nothing to pop. */
+ pop_momentary ();
+#endif
+ expand_end_bindings (getdecls (), keep, 1);
+ /* Each and every BLOCK node created here in `poplevel' is important
+ (e.g. for proper debugging information) so if we created one
+ earlier, mark it as "used". */
+ if (block)
+ TREE_USED (block) = 1;
+ block = poplevel (keep, reverse, real_functionbody);
+ }
+
+ /* Each and every BLOCK node created here in `poplevel' is important
+ (e.g. for proper debugging information) so if we created one
+ earlier, mark it as "used". */
+ if (block)
+ TREE_USED (block) = 1;
+ return block;
+}
+
+/* Delete the node BLOCK from the current binding level.
+ This is used for the block inside a stmt expr ({...})
+ so that the block can be reinserted where appropriate. */
+
+void
+delete_block (block)
+ tree block;
+{
+ tree t;
+ if (current_binding_level->blocks == block)
+ current_binding_level->blocks = TREE_CHAIN (block);
+ for (t = current_binding_level->blocks; t;)
+ {
+ if (TREE_CHAIN (t) == block)
+ TREE_CHAIN (t) = TREE_CHAIN (block);
+ else
+ t = TREE_CHAIN (t);
+ }
+ TREE_CHAIN (block) = NULL_TREE;
+ /* Clear TREE_USED which is always set by poplevel.
+ The flag is set again if insert_block is called. */
+ TREE_USED (block) = 0;
+}
+
+/* Insert BLOCK at the end of the list of subblocks of the
+ current binding level. This is used when a BIND_EXPR is expanded,
+ to handle the BLOCK node inside the BIND_EXPR. */
+
+void
+insert_block (block)
+ tree block;
+{
+ TREE_USED (block) = 1;
+ current_binding_level->blocks
+ = chainon (current_binding_level->blocks, block);
+}
+
+/* Add BLOCK to the current list of blocks for this binding contour. */
+void
+add_block_current_level (block)
+ tree block;
+{
+ current_binding_level->blocks
+ = chainon (current_binding_level->blocks, block);
+}
+
+/* Set the BLOCK node for the innermost scope
+ (the one we are currently in). */
+
+void
+set_block (block)
+ register tree block;
+{
+ current_binding_level->this_block = block;
+}
+
+/* Do a pushlevel for class declarations. */
+void
+pushlevel_class ()
+{
+ register struct binding_level *newlevel;
+
+ /* Reuse or create a struct for this binding level. */
+#if defined(DEBUG_CP_BINDING_LEVELS)
+ if (0)
+#else /* !defined(DEBUG_CP_BINDING_LEVELS) */
+ if (free_binding_level)
+#endif /* !defined(DEBUG_CP_BINDING_LEVELS) */
+ {
+ newlevel = free_binding_level;
+ free_binding_level = free_binding_level->level_chain;
+ }
+ else
+ {
+ /* Create a new `struct binding_level'. */
+ newlevel = (struct binding_level *) xmalloc (sizeof (struct binding_level));
+ }
+
+#if defined(DEBUG_CP_BINDING_LEVELS)
+ is_class_level = 1;
+#endif /* defined(DEBUG_CP_BINDING_LEVELS) */
+
+ push_binding_level (newlevel, 0, 0);
+
+ decl_stack = push_decl_level (decl_stack, &decl_obstack);
+ class_binding_level = current_binding_level;
+ class_binding_level->parm_flag = 2;
+ /* We have just pushed into a new binding level. Now, fake out the rest
+ of the compiler. Set the `current_binding_level' back to point to
+ the most closely containing non-class binding level. */
+ do
+ {
+ current_binding_level = current_binding_level->level_chain;
+ }
+ while (current_binding_level->parm_flag == 2);
+}
+
+/* ...and a poplevel for class declarations. FORCE is used to force
+ clearing out of CLASS_VALUEs after a class definition. */
+tree
+poplevel_class (force)
+ int force;
+{
+ register struct binding_level *level = class_binding_level;
+ tree block = NULL_TREE;
+ tree shadowed;
+
+ my_friendly_assert (level != 0, 354);
+
+ decl_stack = pop_stack_level (decl_stack);
+ for (shadowed = level->shadowed; shadowed; shadowed = TREE_CHAIN (shadowed))
+ IDENTIFIER_LOCAL_VALUE (TREE_PURPOSE (shadowed)) = TREE_VALUE (shadowed);
+ /* If we're leaving a toplevel class, don't bother to do the setting
+ of IDENTIFER_CLASS_VALUE to NULL_TREE, since first of all this slot
+ shouldn't even be used when current_class_type isn't set, and second,
+ if we don't touch it here, we're able to use the caching effect if the
+ next time we're entering a class scope, it is the same class. */
+ if (current_class_depth != 1 || force)
+ for (shadowed = level->class_shadowed;
+ shadowed;
+ shadowed = TREE_CHAIN (shadowed))
+ IDENTIFIER_CLASS_VALUE (TREE_PURPOSE (shadowed)) = TREE_VALUE (shadowed);
+ else
+ /* Remember to save what IDENTIFIER's were bound in this scope so we
+ can recover from cache misses. */
+ previous_class_values = class_binding_level->class_shadowed;
+ for (shadowed = level->type_shadowed;
+ shadowed;
+ shadowed = TREE_CHAIN (shadowed))
+ IDENTIFIER_TYPE_VALUE (TREE_PURPOSE (shadowed)) = TREE_VALUE (shadowed);
+
+ GNU_xref_end_scope ((HOST_WIDE_INT) class_binding_level,
+ (HOST_WIDE_INT) class_binding_level->level_chain,
+ class_binding_level->parm_flag,
+ class_binding_level->keep,
+ class_binding_level->tag_transparent);
+
+ if (class_binding_level->parm_flag != 2)
+ class_binding_level = (struct binding_level *)0;
+
+ /* Now, pop out of the the binding level which we created up in the
+ `pushlevel_class' routine. */
+#if defined(DEBUG_CP_BINDING_LEVELS)
+ is_class_level = 1;
+#endif /* defined(DEBUG_CP_BINDING_LEVELS) */
+
+ pop_binding_level ();
+
+ return block;
+}
+
+/* For debugging. */
+int no_print_functions = 0;
+int no_print_builtins = 0;
+
+void
+print_binding_level (lvl)
+ struct binding_level *lvl;
+{
+ tree t;
+ int i = 0, len;
+ fprintf (stderr, " blocks=");
+ fprintf (stderr, HOST_PTR_PRINTF, lvl->blocks);
+ fprintf (stderr, " n_incomplete=%d parm_flag=%d keep=%d",
+ lvl->n_incomplete, lvl->parm_flag, lvl->keep);
+ if (lvl->tag_transparent)
+ fprintf (stderr, " tag-transparent");
+ if (lvl->more_cleanups_ok)
+ fprintf (stderr, " more-cleanups-ok");
+ if (lvl->have_cleanups)
+ fprintf (stderr, " have-cleanups");
+ fprintf (stderr, "\n");
+ if (lvl->names)
+ {
+ fprintf (stderr, " names:\t");
+ /* We can probably fit 3 names to a line? */
+ for (t = lvl->names; t; t = TREE_CHAIN (t))
+ {
+ if (no_print_functions && (TREE_CODE(t) == FUNCTION_DECL))
+ continue;
+ if (no_print_builtins
+ && (TREE_CODE(t) == TYPE_DECL)
+ && (!strcmp(DECL_SOURCE_FILE(t),"<built-in>")))
+ continue;
+
+ /* Function decls tend to have longer names. */
+ if (TREE_CODE (t) == FUNCTION_DECL)
+ len = 3;
+ else
+ len = 2;
+ i += len;
+ if (i > 6)
+ {
+ fprintf (stderr, "\n\t");
+ i = len;
+ }
+ print_node_brief (stderr, "", t, 0);
+ if (TREE_CODE (t) == ERROR_MARK)
+ break;
+ }
+ if (i)
+ fprintf (stderr, "\n");
+ }
+ if (lvl->tags)
+ {
+ fprintf (stderr, " tags:\t");
+ i = 0;
+ for (t = lvl->tags; t; t = TREE_CHAIN (t))
+ {
+ if (TREE_PURPOSE (t) == NULL_TREE)
+ len = 3;
+ else if (TREE_PURPOSE (t) == TYPE_IDENTIFIER (TREE_VALUE (t)))
+ len = 2;
+ else
+ len = 4;
+ i += len;
+ if (i > 5)
+ {
+ fprintf (stderr, "\n\t");
+ i = len;
+ }
+ if (TREE_PURPOSE (t) == NULL_TREE)
+ {
+ print_node_brief (stderr, "<unnamed-typedef", TREE_VALUE (t), 0);
+ fprintf (stderr, ">");
+ }
+ else if (TREE_PURPOSE (t) == TYPE_IDENTIFIER (TREE_VALUE (t)))
+ print_node_brief (stderr, "", TREE_VALUE (t), 0);
+ else
+ {
+ print_node_brief (stderr, "<typedef", TREE_PURPOSE (t), 0);
+ print_node_brief (stderr, "", TREE_VALUE (t), 0);
+ fprintf (stderr, ">");
+ }
+ }
+ if (i)
+ fprintf (stderr, "\n");
+ }
+ if (lvl->shadowed)
+ {
+ fprintf (stderr, " shadowed:");
+ for (t = lvl->shadowed; t; t = TREE_CHAIN (t))
+ {
+ fprintf (stderr, " %s ", IDENTIFIER_POINTER (TREE_PURPOSE (t)));
+ }
+ fprintf (stderr, "\n");
+ }
+ if (lvl->class_shadowed)
+ {
+ fprintf (stderr, " class-shadowed:");
+ for (t = lvl->class_shadowed; t; t = TREE_CHAIN (t))
+ {
+ fprintf (stderr, " %s ", IDENTIFIER_POINTER (TREE_PURPOSE (t)));
+ }
+ fprintf (stderr, "\n");
+ }
+ if (lvl->type_shadowed)
+ {
+ fprintf (stderr, " type-shadowed:");
+ for (t = lvl->type_shadowed; t; t = TREE_CHAIN (t))
+ {
+#if 0
+ fprintf (stderr, "\n\t");
+ print_node_brief (stderr, "<", TREE_PURPOSE (t), 0);
+ if (TREE_VALUE (t))
+ print_node_brief (stderr, " ", TREE_VALUE (t), 0);
+ else
+ fprintf (stderr, " (none)");
+ fprintf (stderr, ">");
+#else
+ fprintf (stderr, " %s ", IDENTIFIER_POINTER (TREE_PURPOSE (t)));
+#endif
+ }
+ fprintf (stderr, "\n");
+ }
+}
+
+void
+print_other_binding_stack (stack)
+ struct binding_level *stack;
+{
+ struct binding_level *level;
+ for (level = stack; level != global_binding_level; level = level->level_chain)
+ {
+ fprintf (stderr, "binding level ");
+ fprintf (stderr, HOST_PTR_PRINTF, level);
+ fprintf (stderr, "\n");
+ print_binding_level (level);
+ }
+}
+
+void
+print_binding_stack ()
+{
+ struct binding_level *b;
+ fprintf (stderr, "current_binding_level=");
+ fprintf (stderr, HOST_PTR_PRINTF, current_binding_level);
+ fprintf (stderr, "\nclass_binding_level=");
+ fprintf (stderr, HOST_PTR_PRINTF, class_binding_level);
+ fprintf (stderr, "\nglobal_binding_level=");
+ fprintf (stderr, HOST_PTR_PRINTF, global_binding_level);
+ fprintf (stderr, "\n");
+ if (class_binding_level)
+ {
+ for (b = class_binding_level; b; b = b->level_chain)
+ if (b == current_binding_level)
+ break;
+ if (b)
+ b = class_binding_level;
+ else
+ b = current_binding_level;
+ }
+ else
+ b = current_binding_level;
+ print_other_binding_stack (b);
+ fprintf (stderr, "global:\n");
+ print_binding_level (global_binding_level);
+}
+
+/* Subroutines for reverting temporarily to top-level for instantiation
+ of templates and such. We actually need to clear out the class- and
+ local-value slots of all identifiers, so that only the global values
+ are at all visible. Simply setting current_binding_level to the global
+ scope isn't enough, because more binding levels may be pushed. */
+struct saved_scope {
+ struct binding_level *old_binding_level;
+ tree old_bindings;
+ struct saved_scope *prev;
+ tree class_name, class_type, class_decl, function_decl;
+ struct binding_level *class_bindings;
+ tree previous_class_type;
+ tree *lang_base, *lang_stack, lang_name;
+ int lang_stacksize;
+ tree named_labels;
+};
+static struct saved_scope *current_saved_scope;
+extern tree prev_class_type;
+
+void
+push_to_top_level ()
+{
+ extern int current_lang_stacksize;
+ struct saved_scope *s =
+ (struct saved_scope *) xmalloc (sizeof (struct saved_scope));
+ struct binding_level *b = current_binding_level;
+ tree old_bindings = NULL_TREE;
+
+ /* Have to include global_binding_level, because class-level decls
+ aren't listed anywhere useful. */
+ for (; b; b = b->level_chain)
+ {
+ tree t;
+
+ if (b == global_binding_level)
+ continue;
+
+ for (t = b->names; t; t = TREE_CHAIN (t))
+ {
+ tree binding, t1, t2 = t;
+ tree id = DECL_ASSEMBLER_NAME (t2);
+
+ if (!id
+ || (!IDENTIFIER_LOCAL_VALUE (id)
+ && !IDENTIFIER_CLASS_VALUE (id)))
+ continue;
+
+ for (t1 = old_bindings; t1; t1 = TREE_CHAIN (t1))
+ if (TREE_VEC_ELT (t1, 0) == id)
+ goto skip_it;
+
+ binding = make_tree_vec (4);
+ if (id)
+ {
+ my_friendly_assert (TREE_CODE (id) == IDENTIFIER_NODE, 135);
+ TREE_VEC_ELT (binding, 0) = id;
+ TREE_VEC_ELT (binding, 1) = IDENTIFIER_TYPE_VALUE (id);
+ TREE_VEC_ELT (binding, 2) = IDENTIFIER_LOCAL_VALUE (id);
+ TREE_VEC_ELT (binding, 3) = IDENTIFIER_CLASS_VALUE (id);
+ IDENTIFIER_LOCAL_VALUE (id) = NULL_TREE;
+ IDENTIFIER_CLASS_VALUE (id) = NULL_TREE;
+ }
+ TREE_CHAIN (binding) = old_bindings;
+ old_bindings = binding;
+ skip_it:
+ ;
+ }
+ /* Unwind type-value slots back to top level. */
+ for (t = b->type_shadowed; t; t = TREE_CHAIN (t))
+ SET_IDENTIFIER_TYPE_VALUE (TREE_PURPOSE (t), TREE_VALUE (t));
+ }
+ /* Clear out class-level bindings cache. */
+ if (current_binding_level == global_binding_level
+ && previous_class_type != NULL_TREE)
+ {
+ popclass (-1);
+ previous_class_type = NULL_TREE;
+ }
+
+ s->old_binding_level = current_binding_level;
+ current_binding_level = global_binding_level;
+
+ s->class_name = current_class_name;
+ s->class_type = current_class_type;
+ s->class_decl = current_class_decl;
+ s->function_decl = current_function_decl;
+ s->class_bindings = class_binding_level;
+ s->previous_class_type = previous_class_type;
+ s->lang_stack = current_lang_stack;
+ s->lang_base = current_lang_base;
+ s->lang_stacksize = current_lang_stacksize;
+ s->lang_name = current_lang_name;
+ s->named_labels = named_labels;
+ current_class_name = current_class_type = current_class_decl = NULL_TREE;
+ current_function_decl = NULL_TREE;
+ class_binding_level = (struct binding_level *)0;
+ previous_class_type = NULL_TREE;
+ current_lang_stacksize = 10;
+ current_lang_stack = current_lang_base
+ = (tree *) xmalloc (current_lang_stacksize * sizeof (tree));
+ current_lang_name = lang_name_cplusplus;
+ strict_prototype = strict_prototypes_lang_cplusplus;
+ named_labels = NULL_TREE;
+
+ s->prev = current_saved_scope;
+ s->old_bindings = old_bindings;
+ current_saved_scope = s;
+}
+
+void
+pop_from_top_level ()
+{
+ extern int current_lang_stacksize;
+ struct saved_scope *s = current_saved_scope;
+ tree t;
+
+ if (previous_class_type)
+ previous_class_type = NULL_TREE;
+
+ current_binding_level = s->old_binding_level;
+ current_saved_scope = s->prev;
+ for (t = s->old_bindings; t; t = TREE_CHAIN (t))
+ {
+ tree id = TREE_VEC_ELT (t, 0);
+ if (id)
+ {
+ IDENTIFIER_TYPE_VALUE (id) = TREE_VEC_ELT (t, 1);
+ IDENTIFIER_LOCAL_VALUE (id) = TREE_VEC_ELT (t, 2);
+ IDENTIFIER_CLASS_VALUE (id) = TREE_VEC_ELT (t, 3);
+ }
+ }
+ current_class_name = s->class_name;
+ current_class_type = s->class_type;
+ current_class_decl = s->class_decl;
+ if (current_class_type)
+ C_C_D = CLASSTYPE_INST_VAR (current_class_type);
+ else
+ C_C_D = NULL_TREE;
+ current_function_decl = s->function_decl;
+ class_binding_level = s->class_bindings;
+ previous_class_type = s->previous_class_type;
+ free (current_lang_base);
+ current_lang_base = s->lang_base;
+ current_lang_stack = s->lang_stack;
+ current_lang_name = s->lang_name;
+ current_lang_stacksize = s->lang_stacksize;
+ if (current_lang_name == lang_name_cplusplus)
+ strict_prototype = strict_prototypes_lang_cplusplus;
+ else if (current_lang_name == lang_name_c)
+ strict_prototype = strict_prototypes_lang_c;
+ named_labels = s->named_labels;
+
+ free (s);
+}
+
+/* Push a definition of struct, union or enum tag "name".
+ into binding_level "b". "type" should be the type node,
+ We assume that the tag "name" is not already defined.
+
+ Note that the definition may really be just a forward reference.
+ In that case, the TYPE_SIZE will be a NULL_TREE.
+
+ C++ gratuitously puts all these tags in the name space. */
+
+/* When setting the IDENTIFIER_TYPE_VALUE field of an identifier ID,
+ record the shadowed value for this binding contour. TYPE is
+ the type that ID maps to. */
+
+static void
+set_identifier_type_value_with_scope (id, type, b)
+ tree id;
+ tree type;
+ struct binding_level *b;
+{
+ if (b != global_binding_level)
+ {
+ tree old_type_value = IDENTIFIER_TYPE_VALUE (id);
+ b->type_shadowed
+ = tree_cons (id, old_type_value, b->type_shadowed);
+ }
+ SET_IDENTIFIER_TYPE_VALUE (id, type);
+}
+
+/* As set_identifier_type_value_with_scope, but using inner_binding_level. */
+
+void
+set_identifier_type_value (id, type)
+ tree id;
+ tree type;
+{
+ set_identifier_type_value_with_scope (id, type, inner_binding_level);
+}
+
+/* Subroutine "set_nested_typename" builds the nested-typename of
+ the type decl in question. (Argument CLASSNAME can actually be
+ a function as well, if that's the smallest containing scope.) */
+
+static void
+set_nested_typename (decl, classname, name, type)
+ tree decl, classname, name, type;
+{
+ my_friendly_assert (TREE_CODE (decl) == TYPE_DECL, 136);
+ if (classname != NULL_TREE)
+ {
+ char *buf;
+ my_friendly_assert (TREE_CODE (classname) == IDENTIFIER_NODE, 137);
+ my_friendly_assert (TREE_CODE (name) == IDENTIFIER_NODE, 138);
+ buf = (char *) alloca (4 + IDENTIFIER_LENGTH (classname)
+ + IDENTIFIER_LENGTH (name));
+ sprintf (buf, "%s::%s", IDENTIFIER_POINTER (classname),
+ IDENTIFIER_POINTER (name));
+ DECL_NESTED_TYPENAME (decl) = get_identifier (buf);
+ TREE_MANGLED (DECL_NESTED_TYPENAME (decl)) = 1;
+
+ /* This is a special usage of IDENTIFIER_TYPE_VALUE which have no
+ correspondence in any binding_level. This is ok since the
+ DECL_NESTED_TYPENAME is just a convenience identifier whose
+ IDENTIFIER_TYPE_VALUE will remain constant from now on. */
+ SET_IDENTIFIER_TYPE_VALUE (DECL_NESTED_TYPENAME (decl), type);
+ }
+ else
+ DECL_NESTED_TYPENAME (decl) = name;
+}
+
+/* Pop off extraneous binding levels left over due to syntax errors. */
+void
+pop_everything ()
+{
+#ifdef DEBUG_CP_BINDING_LEVELS
+ fprintf (stderr, "XXX entering pop_everything ()\n");
+#endif
+ while (current_binding_level != global_binding_level
+ && ! current_binding_level->pseudo_global)
+ {
+ if (class_binding_level)
+ pop_nested_class (1);
+ else
+ poplevel (0, 0, 0);
+ }
+#ifdef DEBUG_CP_BINDING_LEVELS
+ fprintf (stderr, "XXX leaving pop_everything ()\n");
+#endif
+}
+
+#if 0 /* not yet, should get fixed properly later */
+/* Create a TYPE_DECL node with the correct DECL_ASSEMBLER_NAME.
+ Other routines shouldn't use build_decl directly; they'll produce
+ incorrect results with `-g' unless they duplicate this code.
+
+ This is currently needed mainly for dbxout.c, but we can make
+ use of it in method.c later as well. */
+tree
+make_type_decl (name, type)
+ tree name, type;
+{
+ tree decl, id;
+ decl = build_decl (TYPE_DECL, name, type);
+ if (TYPE_NAME (type) == name)
+ /* Class/union/enum definition, or a redundant typedef for same. */
+ {
+ id = get_identifier (build_overload_name (type, 1, 1));
+ DECL_ASSEMBLER_NAME (decl) = id;
+ }
+ else if (TYPE_NAME (type) != NULL_TREE)
+ /* Explicit typedef, or implicit typedef for template expansion. */
+ DECL_ASSEMBLER_NAME (decl) = DECL_ASSEMBLER_NAME (TYPE_NAME (type));
+ else
+ {
+ /* XXX: Typedef for unnamed struct; some other situations.
+ TYPE_NAME is null; what's right here? */
+ }
+ return decl;
+}
+#endif
+
+/* Push a tag name NAME for struct/class/union/enum type TYPE.
+ Normally put into into the inner-most non-tag-tranparent scope,
+ but if GLOBALIZE is true, put it in the inner-most non-class scope.
+ The latter is needed for implicit declarations. */
+
+void
+pushtag (name, type, globalize)
+ tree name, type;
+ int globalize;
+{
+ register struct binding_level *b;
+ tree context = 0;
+ tree c_decl = 0;
+
+ b = inner_binding_level;
+ while (b->tag_transparent
+ || (globalize && b->parm_flag == 2))
+ b = b->level_chain;
+
+ if (b == global_binding_level)
+ b->tags = perm_tree_cons (name, type, b->tags);
+ else
+ b->tags = saveable_tree_cons (name, type, b->tags);
+
+ if (name)
+ {
+ context = type ? TYPE_CONTEXT (type) : NULL_TREE;
+ if (! context && ! globalize)
+ context = current_scope ();
+ if (context)
+ c_decl = TREE_CODE (context) == FUNCTION_DECL
+ ? context : TYPE_NAME (context);
+
+ /* Record the identifier as the type's name if it has none. */
+ if (TYPE_NAME (type) == NULL_TREE)
+ TYPE_NAME (type) = name;
+
+ /* Do C++ gratuitous typedefing. */
+ if (IDENTIFIER_TYPE_VALUE (name) != type
+ && (TREE_CODE (type) != RECORD_TYPE
+ || b->parm_flag != 2
+ || !CLASSTYPE_DECLARED_EXCEPTION (type)))
+ {
+ register tree d;
+ int newdecl = 0;
+
+ if (b->parm_flag != 2
+ || TYPE_SIZE (current_class_type) != NULL_TREE)
+ {
+ d = lookup_nested_type (type, c_decl);
+
+ if (d == NULL_TREE)
+ {
+ newdecl = 1;
+#if 0 /* not yet, should get fixed properly later */
+ d = make_type_decl (name, type);
+#else
+ d = build_decl (TYPE_DECL, name, type);
+#endif
+#ifdef DWARF_DEBUGGING_INFO
+ if (write_symbols == DWARF_DEBUG)
+ {
+ /* Mark the TYPE_DECL node we created just above as an
+ gratuitous one. We need to do this so that dwarfout.c
+ will understand that it is not supposed to output a
+ TAG_typedef DIE for it. */
+ DECL_IGNORED_P (d) = 1;
+ }
+#endif /* DWARF_DEBUGGING_INFO */
+ set_identifier_type_value_with_scope (name, type, b);
+ }
+ else
+ d = TYPE_NAME (d);
+
+ /* If it is anonymous, then we are called from pushdecl,
+ and we don't want to infinitely recurse. Also, if the
+ name is already in scope, we don't want to push it
+ again--pushdecl is only for pushing new decls. */
+ if (! ANON_AGGRNAME_P (name)
+ && TYPE_NAME (type)
+ && (TREE_CODE (TYPE_NAME (type)) != TYPE_DECL
+ || lookup_name (name, 1) != TYPE_NAME (type)))
+ {
+ if (b->parm_flag == 2)
+ d = pushdecl_class_level (d);
+ else
+ d = pushdecl_with_scope (d, b);
+ }
+ }
+ else
+ {
+ /* Make nested declarations go into class-level scope. */
+ newdecl = 1;
+ d = build_decl (TYPE_DECL, name, type);
+#ifdef DWARF_DEBUGGING_INFO
+ if (write_symbols == DWARF_DEBUG)
+ {
+ /* Mark the TYPE_DECL node we created just above as an
+ gratuitous one. We need to do this so that dwarfout.c
+ will understand that it is not supposed to output a
+ TAG_typedef DIE for it. */
+ DECL_IGNORED_P (d) = 1;
+ }
+#endif /* DWARF_DEBUGGING_INFO */
+ /* Make sure we're in this type's scope when we push the
+ decl for a template, otherwise class_binding_level will
+ be NULL and we'll end up dying inside of
+ push_class_level_binding. */
+ if (TREE_CODE (type) == UNINSTANTIATED_P_TYPE)
+ pushclass (type, 0);
+ d = pushdecl_class_level (d);
+ if (TREE_CODE (type) == UNINSTANTIATED_P_TYPE)
+ popclass (0);
+ }
+ if (write_symbols != DWARF_DEBUG)
+ {
+ if (ANON_AGGRNAME_P (name))
+ DECL_IGNORED_P (d) = 1;
+ }
+ TYPE_NAME (type) = d;
+
+ if (context == NULL_TREE)
+ /* Non-nested class. */
+ DECL_NESTED_TYPENAME (d) = name;
+ else if (context && TREE_CODE (context) == FUNCTION_DECL)
+ {
+ /* Function-nested class. */
+ set_nested_typename (d, DECL_ASSEMBLER_NAME (c_decl),
+ name, type);
+ /* This builds the links for classes nested in fn scope. */
+ DECL_CONTEXT (d) = context;
+ }
+/* else if (TYPE_SIZE (current_class_type) == NULL_TREE)
+*/
+ else if (context && IS_AGGR_TYPE (context))
+ {
+ /* Class-nested class. */
+ set_nested_typename (d, DECL_NESTED_TYPENAME (c_decl),
+ name, type);
+ /* This builds the links for classes nested in type scope. */
+ DECL_CONTEXT (d) = context;
+ }
+ TYPE_CONTEXT (type) = DECL_CONTEXT (d);
+ if (newdecl)
+ DECL_ASSEMBLER_NAME (d)
+ = get_identifier (build_overload_name (type, 1, 1));
+ }
+ if (b->parm_flag == 2)
+ {
+ TREE_NONLOCAL_FLAG (type) = 1;
+ if (TYPE_SIZE (current_class_type) == NULL_TREE)
+ CLASSTYPE_TAGS (current_class_type) = b->tags;
+ }
+ }
+
+ if (TREE_CODE (TYPE_NAME (type)) == TYPE_DECL)
+ /* Use the canonical TYPE_DECL for this node. */
+ TYPE_STUB_DECL (type) = TYPE_NAME (type);
+ else
+ {
+ /* Create a fake NULL-named TYPE_DECL node whose TREE_TYPE
+ will be the tagged type we just added to the current
+ binding level. This fake NULL-named TYPE_DECL node helps
+ dwarfout.c to know when it needs to output a
+ representation of a tagged type, and it also gives us a
+ convenient place to record the "scope start" address for
+ the tagged type. */
+
+#if 0 /* not yet, should get fixed properly later */
+ tree d = make_type_decl (NULL_TREE, type);
+#else
+ tree d = build_decl (TYPE_DECL, NULL_TREE, type);
+#endif
+ TYPE_STUB_DECL (type) = pushdecl_with_scope (d, b);
+ }
+}
+
+/* Counter used to create anonymous type names. */
+static int anon_cnt = 0;
+
+/* Return an IDENTIFIER which can be used as a name for
+ anonymous structs and unions. */
+tree
+make_anon_name ()
+{
+ char buf[32];
+
+ sprintf (buf, ANON_AGGRNAME_FORMAT, anon_cnt++);
+ return get_identifier (buf);
+}
+
+/* Clear the TREE_PURPOSE slot of tags which have anonymous typenames.
+ This keeps dbxout from getting confused. */
+void
+clear_anon_tags ()
+{
+ register struct binding_level *b;
+ register tree tags;
+ static int last_cnt = 0;
+
+ /* Fast out if no new anon names were declared. */
+ if (last_cnt == anon_cnt)
+ return;
+
+ b = current_binding_level;
+ while (b->tag_transparent)
+ b = b->level_chain;
+ tags = b->tags;
+ while (tags)
+ {
+ /* A NULL purpose means we have already processed all tags
+ from here to the end of the list. */
+ if (TREE_PURPOSE (tags) == NULL_TREE)
+ break;
+ if (ANON_AGGRNAME_P (TREE_PURPOSE (tags)))
+ TREE_PURPOSE (tags) = NULL_TREE;
+ tags = TREE_CHAIN (tags);
+ }
+ last_cnt = anon_cnt;
+}
+
+/* Subroutine of duplicate_decls: return truthvalue of whether
+ or not types of these decls match.
+
+ For C++, we must compare the parameter list so that `int' can match
+ `int&' in a parameter position, but `int&' is not confused with
+ `const int&'. */
+static int
+decls_match (newdecl, olddecl)
+ tree newdecl, olddecl;
+{
+ int types_match;
+
+ if (TREE_CODE (newdecl) == FUNCTION_DECL
+ && TREE_CODE (olddecl) == FUNCTION_DECL)
+ {
+ tree f1 = TREE_TYPE (newdecl);
+ tree f2 = TREE_TYPE (olddecl);
+ tree p1 = TYPE_ARG_TYPES (f1);
+ tree p2 = TYPE_ARG_TYPES (f2);
+
+ /* When we parse a static member function definition,
+ we put together a FUNCTION_DECL which thinks its type
+ is METHOD_TYPE. Change that to FUNCTION_TYPE, and
+ proceed. */
+ if (TREE_CODE (f1) == METHOD_TYPE && DECL_STATIC_FUNCTION_P (olddecl))
+ revert_static_member_fn (&newdecl, &f1, &p1);
+ else if (TREE_CODE (f2) == METHOD_TYPE
+ && DECL_STATIC_FUNCTION_P (newdecl))
+ revert_static_member_fn (&olddecl, &f2, &p2);
+
+ /* Here we must take care of the case where new default
+ parameters are specified. Also, warn if an old
+ declaration becomes ambiguous because default
+ parameters may cause the two to be ambiguous. */
+ if (TREE_CODE (f1) != TREE_CODE (f2))
+ {
+ if (TREE_CODE (f1) == OFFSET_TYPE)
+ cp_compiler_error ("`%D' redeclared as member function", newdecl);
+ else
+ cp_compiler_error ("`%D' redeclared as non-member function", newdecl);
+ return 0;
+ }
+
+ if (comptypes (TREE_TYPE (f1), TREE_TYPE (f2), 1))
+ {
+ if (! strict_prototypes_lang_c && DECL_LANGUAGE (olddecl) == lang_c
+ && p2 == NULL_TREE)
+ {
+ types_match = self_promoting_args_p (p1);
+ if (p1 == void_list_node)
+ TREE_TYPE (newdecl) = TREE_TYPE (olddecl);
+ }
+ else if (!strict_prototypes_lang_c && DECL_LANGUAGE (olddecl)==lang_c
+ && DECL_LANGUAGE (newdecl) == lang_c && p1 == NULL_TREE)
+ {
+ types_match = self_promoting_args_p (p2);
+ TREE_TYPE (newdecl) = TREE_TYPE (olddecl);
+ }
+ else
+ types_match = compparms (p1, p2, 3);
+ }
+ else
+ types_match = 0;
+ }
+ else if (TREE_CODE (newdecl) == TEMPLATE_DECL
+ && TREE_CODE (olddecl) == TEMPLATE_DECL)
+ {
+ tree newargs = DECL_TEMPLATE_PARMS (newdecl);
+ tree oldargs = DECL_TEMPLATE_PARMS (olddecl);
+ int i, len = TREE_VEC_LENGTH (newargs);
+
+ if (TREE_VEC_LENGTH (oldargs) != len)
+ return 0;
+
+ for (i = 0; i < len; i++)
+ {
+ tree newarg = TREE_VEC_ELT (newargs, i);
+ tree oldarg = TREE_VEC_ELT (oldargs, i);
+ if (TREE_CODE (newarg) != TREE_CODE (oldarg))
+ return 0;
+ else if (TREE_CODE (newarg) == IDENTIFIER_NODE)
+ /* continue */;
+ else if (! comptypes (TREE_TYPE (newarg), TREE_TYPE (oldarg), 1))
+ return 0;
+ }
+
+ if (DECL_TEMPLATE_IS_CLASS (newdecl)
+ != DECL_TEMPLATE_IS_CLASS (olddecl))
+ types_match = 0;
+ else if (DECL_TEMPLATE_IS_CLASS (newdecl))
+ types_match = 1;
+ else
+ types_match = decls_match (DECL_TEMPLATE_RESULT (olddecl),
+ DECL_TEMPLATE_RESULT (newdecl));
+ }
+ else
+ {
+ if (TREE_TYPE (newdecl) == error_mark_node)
+ types_match = TREE_TYPE (olddecl) == error_mark_node;
+ else if (TREE_TYPE (olddecl) == NULL_TREE)
+ types_match = TREE_TYPE (newdecl) == NULL_TREE;
+ else if (TREE_TYPE (newdecl) == NULL_TREE)
+ types_match = 0;
+ else
+ types_match = comptypes (TREE_TYPE (newdecl), TREE_TYPE (olddecl), 1);
+ }
+
+ return types_match;
+}
+
+/* If NEWDECL is `static' and an `extern' was seen previously,
+ warn about it. (OLDDECL may be NULL_TREE; NAME contains
+ information about previous usage as an `extern'.)
+
+ Note that this does not apply to the C++ case of declaring
+ a variable `extern const' and then later `const'.
+
+ Don't complain if -traditional, since traditional compilers
+ don't complain.
+
+ Don't complain about built-in functions, since they are beyond
+ the user's control. */
+
+static void
+warn_extern_redeclared_static (newdecl, olddecl)
+ tree newdecl, olddecl;
+{
+ tree name;
+
+ static char *explicit_extern_static_warning
+ = "`%D' was declared `extern' and later `static'";
+ static char *implicit_extern_static_warning
+ = "`%D' was declared implicitly `extern' and later `static'";
+
+ if (flag_traditional
+ || TREE_CODE (newdecl) == TYPE_DECL
+ || (! warn_extern_inline
+ && DECL_INLINE (newdecl)))
+ return;
+
+ name = DECL_ASSEMBLER_NAME (newdecl);
+ if (TREE_PUBLIC (name) && ! TREE_PUBLIC (newdecl))
+ {
+ /* It's okay to redeclare an ANSI built-in function as static,
+ or to declare a non-ANSI built-in function as anything. */
+ if (! (TREE_CODE (newdecl) == FUNCTION_DECL
+ && olddecl != NULL_TREE
+ && TREE_CODE (olddecl) == FUNCTION_DECL
+ && (DECL_BUILT_IN (olddecl)
+ || DECL_BUILT_IN_NONANSI (olddecl))))
+ {
+ cp_warning (IDENTIFIER_IMPLICIT_DECL (name)
+ ? implicit_extern_static_warning
+ : explicit_extern_static_warning, newdecl);
+ if (olddecl != NULL_TREE)
+ cp_warning_at ("previous declaration of `%D'", olddecl);
+ }
+ }
+}
+
+/* Handle when a new declaration NEWDECL has the same name as an old
+ one OLDDECL in the same binding contour. Prints an error message
+ if appropriate.
+
+ If safely possible, alter OLDDECL to look like NEWDECL, and return 1.
+ Otherwise, return 0. */
+
+int
+duplicate_decls (newdecl, olddecl)
+ register tree newdecl, olddecl;
+{
+ extern struct obstack permanent_obstack;
+ unsigned olddecl_uid = DECL_UID (olddecl);
+ int olddecl_friend = 0, types_match = 0;
+ int new_defines_function;
+ tree previous_c_decl = NULL_TREE;
+
+ types_match = decls_match (newdecl, olddecl);
+
+ if (TREE_CODE (olddecl) != TREE_LIST)
+ olddecl_friend = DECL_LANG_SPECIFIC (olddecl) && DECL_FRIEND_P (olddecl);
+
+ /* If either the type of the new decl or the type of the old decl is an
+ error_mark_node, then that implies that we have already issued an
+ error (earlier) for some bogus type specification, and in that case,
+ it is rather pointless to harass the user with yet more error message
+ about the same declaration, so well just pretent the types match here. */
+ if ((TREE_TYPE (newdecl)
+ && TREE_CODE (TREE_TYPE (newdecl)) == ERROR_MARK)
+ || (TREE_TYPE (olddecl)
+ && TREE_CODE (TREE_TYPE (olddecl)) == ERROR_MARK))
+ types_match = 1;
+
+ if (flag_traditional && TREE_CODE (newdecl) == FUNCTION_DECL
+ && IDENTIFIER_IMPLICIT_DECL (DECL_ASSEMBLER_NAME (newdecl)) == olddecl)
+ /* If -traditional, avoid error for redeclaring fcn
+ after implicit decl. */
+ ;
+ else if (TREE_CODE (olddecl) == FUNCTION_DECL
+ && (DECL_BUILT_IN (olddecl) || DECL_BUILT_IN_NONANSI (olddecl)))
+ {
+ /* If you declare a built-in or predefined function name as static,
+ the old definition is overridden, but optionally warn this was a
+ bad choice of name. Ditto for overloads. */
+ if (! TREE_PUBLIC (newdecl)
+ || (TREE_CODE (newdecl) == FUNCTION_DECL
+ && DECL_LANGUAGE (newdecl) != DECL_LANGUAGE (olddecl)))
+ {
+ if (warn_shadow)
+ cp_warning ("shadowing %s function `%#D'",
+ DECL_BUILT_IN (olddecl) ? "built-in" : "library",
+ olddecl);
+ /* Discard the old built-in function. */
+ return 0;
+ }
+ else if (! types_match)
+ {
+ if (TREE_CODE (newdecl) != FUNCTION_DECL)
+ {
+ /* If the built-in is not ansi, then programs can override
+ it even globally without an error. */
+ if (! DECL_BUILT_IN (olddecl))
+ cp_warning ("library function `%#D' redeclared as non-function `%#D'",
+ olddecl, newdecl);
+ else
+ {
+ cp_error ("declaration of `%#D'", newdecl);
+ cp_error ("conflicts with built-in declaration `%#D'",
+ olddecl);
+ }
+ return 0;
+ }
+
+ cp_warning ("declaration of `%#D'", newdecl);
+ cp_warning ("conflicts with built-in declaration `%#D'",
+ olddecl);
+ }
+ }
+ else if (TREE_CODE (olddecl) != TREE_CODE (newdecl))
+ {
+ if ((TREE_CODE (newdecl) == FUNCTION_DECL
+ && TREE_CODE (olddecl) == TEMPLATE_DECL
+ && ! DECL_TEMPLATE_IS_CLASS (olddecl))
+ || (TREE_CODE (olddecl) == FUNCTION_DECL
+ && TREE_CODE (newdecl) == TEMPLATE_DECL
+ && ! DECL_TEMPLATE_IS_CLASS (newdecl)))
+ return 0;
+
+ cp_error ("`%#D' redeclared as different kind of symbol", newdecl);
+ if (TREE_CODE (olddecl) == TREE_LIST)
+ olddecl = TREE_VALUE (olddecl);
+ cp_error_at ("previous declaration of `%#D'", olddecl);
+
+ /* New decl is completely inconsistent with the old one =>
+ tell caller to replace the old one. */
+
+ return 0;
+ }
+ else if (!types_match)
+ {
+ if (TREE_CODE (newdecl) == TEMPLATE_DECL)
+ {
+ /* The name of a class template may not be declared to refer to
+ any other template, class, function, object, namespace, value,
+ or type in the same scope. */
+ if (DECL_TEMPLATE_IS_CLASS (olddecl)
+ || DECL_TEMPLATE_IS_CLASS (newdecl))
+ {
+ cp_error ("declaration of template `%#D'", newdecl);
+ cp_error_at ("conflicts with previous declaration `%#D'",
+ olddecl);
+ }
+ return 0;
+ }
+ if (TREE_CODE (newdecl) == FUNCTION_DECL)
+ {
+ if (DECL_LANGUAGE (newdecl) == lang_c
+ && DECL_LANGUAGE (olddecl) == lang_c)
+ {
+ cp_error ("declaration of C function `%#D' conflicts with",
+ newdecl);
+ cp_error_at ("previous declaration `%#D' here", olddecl);
+ }
+
+ if (compparms (TYPE_ARG_TYPES (TREE_TYPE (newdecl)),
+ TYPE_ARG_TYPES (TREE_TYPE (olddecl)), 2))
+ {
+ cp_error ("new declaration `%#D'", newdecl);
+ cp_error_at ("ambiguates old declaration `%#D'", olddecl);
+ }
+ else
+ return 0;
+ }
+
+ /* Already complained about this, so don't do so again. */
+ else if (current_class_type == NULL_TREE
+ || IDENTIFIER_ERROR_LOCUS (DECL_ASSEMBLER_NAME (newdecl)) != current_class_type)
+ {
+ /* Since we're doing this before finish_struct can set the
+ line number on NEWDECL, we just do a regular error here. */
+ if (DECL_SOURCE_LINE (newdecl) == 0)
+ cp_error ("conflicting types for `%#D'", newdecl);
+ else
+ cp_error_at ("conflicting types for `%#D'", newdecl);
+ cp_error_at ("previous declaration as `%#D'", olddecl);
+ }
+ }
+ else
+ {
+ char *errmsg = redeclaration_error_message (newdecl, olddecl);
+ if (errmsg)
+ {
+ cp_error (errmsg, newdecl);
+ if (DECL_NAME (olddecl) != NULL_TREE)
+ cp_error_at ((DECL_INITIAL (olddecl)
+ && current_binding_level == global_binding_level)
+ ? "`%#D' previously defined here"
+ : "`%#D' previously declared here", olddecl);
+ }
+ else if (TREE_CODE (olddecl) == FUNCTION_DECL
+ && DECL_INITIAL (olddecl) != NULL_TREE
+ && TYPE_ARG_TYPES (TREE_TYPE (olddecl)) == NULL_TREE
+ && TYPE_ARG_TYPES (TREE_TYPE (newdecl)) != NULL_TREE)
+ {
+ /* Prototype decl follows defn w/o prototype. */
+ cp_warning_at ("prototype for `%#D'", newdecl);
+ cp_warning_at ("follows non-prototype definition here", olddecl);
+ }
+ else if (TREE_CODE (olddecl) == FUNCTION_DECL
+ && DECL_LANGUAGE (newdecl) != DECL_LANGUAGE (olddecl))
+ {
+ /* extern "C" int foo ();
+ int foo () { bar (); }
+ is OK. */
+ if (current_lang_stack == current_lang_base)
+ DECL_LANGUAGE (newdecl) = DECL_LANGUAGE (olddecl);
+ else
+ {
+ cp_error_at ("previous declaration of `%#D' with %L linkage",
+ olddecl, DECL_LANGUAGE (olddecl));
+ cp_error ("conflicts with new declaration with %L linkage",
+ DECL_LANGUAGE (newdecl));
+ }
+ }
+
+ /* These bits are logically part of the type. */
+ if (pedantic
+ && (TREE_READONLY (newdecl) != TREE_READONLY (olddecl)
+ || TREE_THIS_VOLATILE (newdecl) != TREE_THIS_VOLATILE (olddecl)))
+ cp_error_at ("type qualifiers for `%D' conflict with previous decl",
+ newdecl);
+ }
+
+ /* If new decl is `static' and an `extern' was seen previously,
+ warn about it. */
+ warn_extern_redeclared_static (newdecl, olddecl);
+
+ /* We have committed to returning 1 at this point. */
+ if (TREE_CODE (newdecl) == FUNCTION_DECL)
+ {
+ /* Now that functions must hold information normally held
+ by field decls, there is extra work to do so that
+ declaration information does not get destroyed during
+ definition. */
+ if (DECL_VINDEX (olddecl))
+ DECL_VINDEX (newdecl) = DECL_VINDEX (olddecl);
+ if (DECL_CONTEXT (olddecl))
+ DECL_CONTEXT (newdecl) = DECL_CONTEXT (olddecl);
+ if (DECL_CLASS_CONTEXT (olddecl))
+ DECL_CLASS_CONTEXT (newdecl) = DECL_CLASS_CONTEXT (olddecl);
+ if (DECL_CHAIN (newdecl) == NULL_TREE)
+ DECL_CHAIN (newdecl) = DECL_CHAIN (olddecl);
+ if (DECL_NEXT_METHOD (newdecl) == NULL_TREE)
+ DECL_NEXT_METHOD (newdecl) = DECL_NEXT_METHOD (olddecl);
+ if (DECL_PENDING_INLINE_INFO (newdecl) == (struct pending_inline *)0)
+ DECL_PENDING_INLINE_INFO (newdecl) = DECL_PENDING_INLINE_INFO (olddecl);
+ }
+
+ /* Deal with C++: must preserve virtual function table size. */
+ if (TREE_CODE (olddecl) == TYPE_DECL)
+ {
+ register tree newtype = TREE_TYPE (newdecl);
+ register tree oldtype = TREE_TYPE (olddecl);
+
+ if (newtype != error_mark_node && oldtype != error_mark_node
+ && TYPE_LANG_SPECIFIC (newtype) && TYPE_LANG_SPECIFIC (oldtype))
+ {
+ CLASSTYPE_VSIZE (newtype) = CLASSTYPE_VSIZE (oldtype);
+ CLASSTYPE_FRIEND_CLASSES (newtype)
+ = CLASSTYPE_FRIEND_CLASSES (oldtype);
+ }
+#if 0
+ /* why assert here? Just because debugging information is
+ messed up? (mrs) */
+ /* it happens on something like:
+ typedef struct Thing {
+ Thing();
+ int x;
+ } Thing;
+ */
+ my_friendly_assert (DECL_IGNORED_P (olddecl) == DECL_IGNORED_P (newdecl),
+ 139);
+#endif
+ }
+
+ /* Special handling ensues if new decl is a function definition. */
+ new_defines_function = (TREE_CODE (newdecl) == FUNCTION_DECL
+ && DECL_INITIAL (newdecl) != NULL_TREE);
+
+ /* Optionally warn about more than one declaration for the same name,
+ but don't warn about a function declaration followed by a definition. */
+ if (warn_redundant_decls
+ && ! DECL_ARTIFICIAL (olddecl)
+ && !(new_defines_function && DECL_INITIAL (olddecl) == NULL_TREE)
+ /* Don't warn about extern decl followed by (tentative) definition. */
+ && !(DECL_EXTERNAL (olddecl) && ! DECL_EXTERNAL (newdecl)))
+ {
+ cp_warning ("redundant redeclaration of `%D' in same scope", newdecl);
+ cp_warning ("previous declaration of `%D'", olddecl);
+ }
+
+ /* Copy all the DECL_... slots specified in the new decl
+ except for any that we copy here from the old type. */
+
+ if (types_match)
+ {
+ /* Automatically handles default parameters. */
+ tree oldtype = TREE_TYPE (olddecl);
+ /* Merge the data types specified in the two decls. */
+ tree newtype = common_type (TREE_TYPE (newdecl), TREE_TYPE (olddecl));
+
+ /* Make sure we put the new type in the same obstack as the old ones.
+ If the old types are not both in the same obstack, use the permanent
+ one. */
+ if (oldtype && TYPE_OBSTACK (oldtype) == TYPE_OBSTACK (newtype))
+ push_obstacks (TYPE_OBSTACK (oldtype), TYPE_OBSTACK (oldtype));
+ else
+ {
+ push_obstacks_nochange ();
+ end_temporary_allocation ();
+ }
+
+ if (TREE_CODE (newdecl) == VAR_DECL)
+ DECL_THIS_EXTERN (newdecl) |= DECL_THIS_EXTERN (olddecl);
+ /* Do this after calling `common_type' so that default
+ parameters don't confuse us. */
+ else if (TREE_CODE (newdecl) == FUNCTION_DECL
+ && (TYPE_RAISES_EXCEPTIONS (TREE_TYPE (newdecl))
+ != TYPE_RAISES_EXCEPTIONS (TREE_TYPE (olddecl))))
+ {
+ tree ctype = NULL_TREE;
+ ctype = DECL_CLASS_CONTEXT (newdecl);
+ TREE_TYPE (newdecl) = build_exception_variant (ctype, newtype,
+ TYPE_RAISES_EXCEPTIONS (TREE_TYPE (newdecl)));
+ TREE_TYPE (olddecl) = build_exception_variant (ctype, newtype,
+ TYPE_RAISES_EXCEPTIONS (oldtype));
+
+ if (! compexcepttypes (TREE_TYPE (newdecl), TREE_TYPE (olddecl), 0))
+ {
+ cp_error ("declaration of `%D' raises different exceptions...",
+ newdecl);
+ cp_error_at ("...from previous declaration here", olddecl);
+ }
+ }
+ TREE_TYPE (newdecl) = TREE_TYPE (olddecl) = newtype;
+
+ /* Lay the type out, unless already done. */
+ if (oldtype != TREE_TYPE (newdecl))
+ {
+ if (TREE_TYPE (newdecl) != error_mark_node)
+ layout_type (TREE_TYPE (newdecl));
+ if (TREE_CODE (newdecl) != FUNCTION_DECL
+ && TREE_CODE (newdecl) != TYPE_DECL
+ && TREE_CODE (newdecl) != CONST_DECL
+ && TREE_CODE (newdecl) != TEMPLATE_DECL)
+ layout_decl (newdecl, 0);
+ }
+ else
+ {
+ /* Since the type is OLDDECL's, make OLDDECL's size go with. */
+ DECL_SIZE (newdecl) = DECL_SIZE (olddecl);
+ }
+
+ /* Merge the type qualifiers. */
+ if (TREE_READONLY (newdecl))
+ TREE_READONLY (olddecl) = 1;
+ if (TREE_THIS_VOLATILE (newdecl))
+ TREE_THIS_VOLATILE (olddecl) = 1;
+
+ /* Merge the initialization information. */
+ if (DECL_INITIAL (newdecl) == NULL_TREE
+ && DECL_INITIAL (olddecl) != NULL_TREE)
+ {
+ DECL_INITIAL (newdecl) = DECL_INITIAL (olddecl);
+ DECL_SOURCE_FILE (newdecl) = DECL_SOURCE_FILE (olddecl);
+ DECL_SOURCE_LINE (newdecl) = DECL_SOURCE_LINE (olddecl);
+ }
+
+ /* Merge the section attribute.
+ We want to issue an error if the sections conflict but that must be
+ done later in decl_attributes since we are called before attributes
+ are assigned. */
+ if (DECL_SECTION_NAME (newdecl) == NULL_TREE)
+ DECL_SECTION_NAME (newdecl) = DECL_SECTION_NAME (olddecl);
+
+ /* Keep the old rtl since we can safely use it, unless it's the
+ call to abort() used for abstract virtuals. */
+ if ((DECL_LANG_SPECIFIC (olddecl)
+ && !DECL_ABSTRACT_VIRTUAL_P (olddecl))
+ || DECL_RTL (olddecl) != DECL_RTL (abort_fndecl))
+ DECL_RTL (newdecl) = DECL_RTL (olddecl);
+
+ pop_obstacks ();
+ }
+ /* If cannot merge, then use the new type and qualifiers,
+ and don't preserve the old rtl. */
+ else
+ {
+ /* Clean out any memory we had of the old declaration. */
+ tree oldstatic = value_member (olddecl, static_aggregates);
+ if (oldstatic)
+ TREE_VALUE (oldstatic) = error_mark_node;
+
+ TREE_TYPE (olddecl) = TREE_TYPE (newdecl);
+ TREE_READONLY (olddecl) = TREE_READONLY (newdecl);
+ TREE_THIS_VOLATILE (olddecl) = TREE_THIS_VOLATILE (newdecl);
+ TREE_SIDE_EFFECTS (olddecl) = TREE_SIDE_EFFECTS (newdecl);
+ }
+
+ /* Merge the storage class information. */
+ if (DECL_EXTERNAL (newdecl))
+ {
+ TREE_STATIC (newdecl) = TREE_STATIC (olddecl);
+ DECL_EXTERNAL (newdecl) = DECL_EXTERNAL (olddecl);
+
+ if (TREE_CODE (newdecl) != FUNCTION_DECL)
+ TREE_PUBLIC (newdecl) = TREE_PUBLIC (olddecl);
+ }
+ else
+ {
+ TREE_STATIC (olddecl) = TREE_STATIC (newdecl);
+ /* A `const' which was not declared `extern' and is
+ in static storage is invisible. */
+ if (TREE_CODE (newdecl) == VAR_DECL
+ && TREE_READONLY (newdecl) && TREE_STATIC (newdecl)
+ && ! DECL_THIS_EXTERN (newdecl))
+ TREE_PUBLIC (newdecl) = 0;
+ else if (TREE_CODE (newdecl) != FUNCTION_DECL)
+ TREE_PUBLIC (olddecl) = TREE_PUBLIC (newdecl);
+ }
+
+ /* For functions, static overrides non-static. */
+ if (TREE_CODE (newdecl) == FUNCTION_DECL)
+ {
+ TREE_PUBLIC (newdecl) &= TREE_PUBLIC (olddecl);
+ /* This is since we don't automatically
+ copy the attributes of NEWDECL into OLDDECL. */
+ TREE_PUBLIC (olddecl) = TREE_PUBLIC (newdecl);
+ /* If this clears `static', clear it in the identifier too. */
+ if (! TREE_PUBLIC (olddecl))
+ TREE_PUBLIC (DECL_ASSEMBLER_NAME (olddecl)) = 0;
+ }
+
+ /* If either decl says `inline', this fn is inline,
+ unless its definition was passed already. */
+ if (DECL_INLINE (newdecl) && DECL_INITIAL (olddecl) == NULL_TREE)
+ DECL_INLINE (olddecl) = 1;
+ DECL_INLINE (newdecl) = DECL_INLINE (olddecl);
+
+ if (TREE_CODE (newdecl) == FUNCTION_DECL)
+ {
+ if (! types_match)
+ {
+ DECL_LANGUAGE (olddecl) = DECL_LANGUAGE (newdecl);
+ DECL_ASSEMBLER_NAME (olddecl) = DECL_ASSEMBLER_NAME (newdecl);
+ DECL_ARGUMENTS (olddecl) = DECL_ARGUMENTS (newdecl);
+ DECL_RESULT (olddecl) = DECL_RESULT (newdecl);
+ DECL_RTL (olddecl) = DECL_RTL (newdecl);
+ }
+ if (new_defines_function)
+ /* If defining a function declared with other language
+ linkage, use the previously declared language linkage. */
+ DECL_LANGUAGE (newdecl) = DECL_LANGUAGE (olddecl);
+ else
+ {
+ /* If redeclaring a builtin function, and not a definition,
+ it stays built in. */
+ if (DECL_BUILT_IN (olddecl))
+ {
+ DECL_BUILT_IN (newdecl) = 1;
+ DECL_FUNCTION_CODE (newdecl) = DECL_FUNCTION_CODE (olddecl);
+ /* If we're keeping the built-in definition, keep the rtl,
+ regardless of declaration matches. */
+ DECL_RTL (newdecl) = DECL_RTL (olddecl);
+ }
+ else
+ DECL_FRAME_SIZE (newdecl) = DECL_FRAME_SIZE (olddecl);
+
+ DECL_RESULT (newdecl) = DECL_RESULT (olddecl);
+ if ((DECL_SAVED_INSNS (newdecl) = DECL_SAVED_INSNS (olddecl)))
+ /* Previously saved insns go together with
+ the function's previous definition. */
+ DECL_INITIAL (newdecl) = DECL_INITIAL (olddecl);
+ /* Don't clear out the arguments if we're redefining a function. */
+ if (DECL_ARGUMENTS (olddecl))
+ DECL_ARGUMENTS (newdecl) = DECL_ARGUMENTS (olddecl);
+ }
+ }
+
+ if (TREE_CODE (newdecl) == TEMPLATE_DECL)
+ {
+ if (DECL_TEMPLATE_INFO (olddecl)->length)
+ DECL_TEMPLATE_INFO (newdecl) = DECL_TEMPLATE_INFO (olddecl);
+ DECL_TEMPLATE_MEMBERS (newdecl) = DECL_TEMPLATE_MEMBERS (olddecl);
+ DECL_TEMPLATE_INSTANTIATIONS (newdecl)
+ = DECL_TEMPLATE_INSTANTIATIONS (olddecl);
+ }
+
+ /* Now preserve various other info from the definition. */
+ TREE_ADDRESSABLE (newdecl) = TREE_ADDRESSABLE (olddecl);
+ TREE_ASM_WRITTEN (newdecl) = TREE_ASM_WRITTEN (olddecl);
+ DECL_COMMON (newdecl) = DECL_COMMON (olddecl);
+
+ /* Don't really know how much of the language-specific
+ values we should copy from old to new. */
+ if (DECL_LANG_SPECIFIC (olddecl))
+ {
+ DECL_IN_AGGR_P (newdecl) = DECL_IN_AGGR_P (olddecl);
+ DECL_ACCESS (newdecl) = DECL_ACCESS (olddecl);
+ }
+
+ if (TREE_CODE (newdecl) == FUNCTION_DECL)
+ {
+ int function_size;
+ struct lang_decl *ol = DECL_LANG_SPECIFIC (olddecl);
+ struct lang_decl *nl = DECL_LANG_SPECIFIC (newdecl);
+
+ function_size = sizeof (struct tree_decl);
+
+ bcopy ((char *) newdecl + sizeof (struct tree_common),
+ (char *) olddecl + sizeof (struct tree_common),
+ function_size - sizeof (struct tree_common));
+
+ /* Can we safely free the storage used by newdecl? */
+
+#define ROUND(x) ((x + obstack_alignment_mask (&permanent_obstack)) \
+ & ~ obstack_alignment_mask (&permanent_obstack))
+
+ if ((char *)newdecl + ROUND (function_size)
+ + ROUND (sizeof (struct lang_decl))
+ == obstack_next_free (&permanent_obstack))
+ {
+ DECL_MAIN_VARIANT (newdecl) = olddecl;
+ DECL_LANG_SPECIFIC (olddecl) = ol;
+ bcopy ((char *)nl, (char *)ol, sizeof (struct lang_decl));
+
+ obstack_free (&permanent_obstack, newdecl);
+ }
+ else if (LANG_DECL_PERMANENT (ol))
+ {
+ if (DECL_MAIN_VARIANT (olddecl) == olddecl)
+ {
+ /* Save these lang_decls that would otherwise be lost. */
+ extern tree free_lang_decl_chain;
+ tree free_lang_decl = (tree) ol;
+ TREE_CHAIN (free_lang_decl) = free_lang_decl_chain;
+ free_lang_decl_chain = free_lang_decl;
+ }
+ else
+ {
+ /* Storage leak. */
+ }
+ }
+ }
+ else
+ {
+ bcopy ((char *) newdecl + sizeof (struct tree_common),
+ (char *) olddecl + sizeof (struct tree_common),
+ sizeof (struct tree_decl) - sizeof (struct tree_common)
+ + tree_code_length [(int)TREE_CODE (newdecl)] * sizeof (char *));
+ }
+
+ DECL_UID (olddecl) = olddecl_uid;
+ if (olddecl_friend)
+ DECL_FRIEND_P (olddecl) = 1;
+
+ return 1;
+}
+
+/* Record a decl-node X as belonging to the current lexical scope.
+ Check for errors (such as an incompatible declaration for the same
+ name already seen in the same scope).
+
+ Returns either X or an old decl for the same name.
+ If an old decl is returned, it may have been smashed
+ to agree with what X says. */
+
+tree
+pushdecl (x)
+ tree x;
+{
+ register tree t;
+#if 0 /* not yet, should get fixed properly later */
+ register tree name;
+#else
+ register tree name = DECL_ASSEMBLER_NAME (x);
+#endif
+ register struct binding_level *b = current_binding_level;
+
+#if 0
+ static int nglobals; int len;
+
+ len = list_length (global_binding_level->names);
+ if (len < nglobals)
+ my_friendly_abort (8);
+ else if (len > nglobals)
+ nglobals = len;
+#endif
+
+ if (x != current_function_decl
+ /* Don't change DECL_CONTEXT of virtual methods. */
+ && (TREE_CODE (x) != FUNCTION_DECL || !DECL_VIRTUAL_P (x))
+ && ! DECL_CONTEXT (x))
+ DECL_CONTEXT (x) = current_function_decl;
+ /* A local declaration for a function doesn't constitute nesting. */
+ if (TREE_CODE (x) == FUNCTION_DECL && DECL_INITIAL (x) == 0)
+ DECL_CONTEXT (x) = 0;
+
+#if 0 /* not yet, should get fixed properly later */
+ /* For functions and class static data, we currently look up the encoded
+ form of the name. For types, we want the real name. The former will
+ probably be changed soon, according to MDT. */
+ if (TREE_CODE (x) == FUNCTION_DECL || TREE_CODE (x) == VAR_DECL)
+ name = DECL_ASSEMBLER_NAME (x);
+ else
+ name = DECL_NAME (x);
+#else
+ /* Type are looked up using the DECL_NAME, as that is what the rest of the
+ compiler wants to use. */
+ if (TREE_CODE (x) == TYPE_DECL)
+ name = DECL_NAME (x);
+#endif
+
+ if (name)
+ {
+ char *file;
+ int line;
+
+ t = lookup_name_current_level (name);
+ if (t == error_mark_node)
+ {
+ /* error_mark_node is 0 for a while during initialization! */
+ t = NULL_TREE;
+ cp_error_at ("`%#D' used prior to declaration", x);
+ }
+
+ else if (t != NULL_TREE)
+ {
+ if (TREE_CODE (t) == PARM_DECL)
+ {
+ if (DECL_CONTEXT (t) == NULL_TREE)
+ fatal ("parse errors have confused me too much");
+ }
+ file = DECL_SOURCE_FILE (t);
+ line = DECL_SOURCE_LINE (t);
+
+ if (((TREE_CODE (x) == FUNCTION_DECL && DECL_LANGUAGE (x) == lang_c)
+ || (TREE_CODE (x) == TEMPLATE_DECL
+ && ! DECL_TEMPLATE_IS_CLASS (x)))
+ && is_overloaded_fn (t))
+ /* don't do anything just yet */;
+ else if (TREE_CODE (t) != TREE_CODE (x))
+ {
+ if (TREE_CODE (t) == TYPE_DECL || TREE_CODE (x) == TYPE_DECL)
+ {
+ /* We do nothing special here, because C++ does such nasty
+ things with TYPE_DECLs. Instead, just let the TYPE_DECL
+ get shadowed, and know that if we need to find a TYPE_DECL
+ for a given name, we can look in the IDENTIFIER_TYPE_VALUE
+ slot of the identifier. */
+ ;
+ }
+ else if (duplicate_decls (x, t))
+ return t;
+ }
+ else if (duplicate_decls (x, t))
+ {
+#if 0
+ /* This is turned off until I have time to do it right (bpk). */
+
+ /* Also warn if they did a prototype with `static' on it, but
+ then later left the `static' off. */
+ if (! TREE_PUBLIC (name) && TREE_PUBLIC (x))
+ {
+ if (DECL_LANG_SPECIFIC (t) && DECL_FRIEND_P (t))
+ return t;
+
+ if (extra_warnings)
+ {
+ cp_warning ("`static' missing from declaration of `%D'",
+ t);
+ warning_with_file_and_line (file, line,
+ "previous declaration of `%s'",
+ decl_as_string (t, 0));
+ }
+
+ /* Now fix things so it'll do what they expect. */
+ if (current_function_decl)
+ TREE_PUBLIC (current_function_decl) = 0;
+ }
+ /* Due to interference in memory reclamation (X may be
+ obstack-deallocated at this point), we must guard against
+ one really special case. [jason: This should be handled
+ by start_function] */
+ if (current_function_decl == x)
+ current_function_decl = t;
+#endif
+ if (TREE_CODE (t) == TYPE_DECL)
+ SET_IDENTIFIER_TYPE_VALUE (name, TREE_TYPE (t));
+
+ return t;
+ }
+ }
+
+ if (TREE_CODE (x) == FUNCTION_DECL && ! DECL_FUNCTION_MEMBER_P (x))
+ {
+ t = push_overloaded_decl (x, 1);
+ if (t != x || DECL_LANGUAGE (x) == lang_c)
+ return t;
+ }
+ else if (TREE_CODE (x) == TEMPLATE_DECL && ! DECL_TEMPLATE_IS_CLASS (x))
+ return push_overloaded_decl (x, 0);
+
+ /* If declaring a type as a typedef, and the type has no known
+ typedef name, install this TYPE_DECL as its typedef name. */
+ if (TREE_CODE (x) == TYPE_DECL)
+ {
+ tree type = TREE_TYPE (x);
+ tree name = (type != error_mark_node) ? TYPE_NAME (type) : x;
+
+ if (name == NULL_TREE || TREE_CODE (name) != TYPE_DECL)
+ {
+ /* If these are different names, and we're at the global
+ binding level, make two equivalent definitions. */
+ name = x;
+ if (global_bindings_p ())
+ TYPE_NAME (type) = x;
+ }
+ else
+ {
+ tree tname = DECL_NAME (name);
+
+ if (global_bindings_p () && ANON_AGGRNAME_P (tname))
+ {
+ /* do gratuitous C++ typedefing, and make sure that
+ we access this type either through TREE_TYPE field
+ or via the tags list. */
+ TYPE_NAME (TREE_TYPE (x)) = x;
+ pushtag (tname, TREE_TYPE (x), 0);
+ }
+ }
+ my_friendly_assert (TREE_CODE (name) == TYPE_DECL, 140);
+
+ if (DECL_NAME (name) && !DECL_NESTED_TYPENAME (name))
+ set_nested_typename (x, current_class_name,
+ DECL_NAME (name), type);
+
+ if (type != error_mark_node
+ && TYPE_NAME (type)
+ && TYPE_IDENTIFIER (type))
+ set_identifier_type_value_with_scope (DECL_NAME (x), type, b);
+ }
+
+ /* Multiple external decls of the same identifier ought to match.
+
+ We get warnings about inline functions where they are defined.
+ We get warnings about other functions from push_overloaded_decl.
+
+ Avoid duplicate warnings where they are used. */
+ if (TREE_PUBLIC (x) && TREE_CODE (x) != FUNCTION_DECL)
+ {
+ tree decl;
+
+ if (IDENTIFIER_GLOBAL_VALUE (name) != NULL_TREE
+ && (DECL_EXTERNAL (IDENTIFIER_GLOBAL_VALUE (name))
+ || TREE_PUBLIC (IDENTIFIER_GLOBAL_VALUE (name))))
+ decl = IDENTIFIER_GLOBAL_VALUE (name);
+ else
+ decl = NULL_TREE;
+
+ if (decl
+ /* If different sort of thing, we already gave an error. */
+ && TREE_CODE (decl) == TREE_CODE (x)
+ && ! comptypes (TREE_TYPE (x), TREE_TYPE (decl), 1))
+ {
+ cp_pedwarn ("type mismatch with previous external decl", x);
+ cp_pedwarn_at ("previous external decl of `%#D'", decl);
+ }
+ }
+
+ /* In PCC-compatibility mode, extern decls of vars with no current decl
+ take effect at top level no matter where they are. */
+ if (flag_traditional && DECL_EXTERNAL (x)
+ && lookup_name (name, 0) == NULL_TREE)
+ b = global_binding_level;
+
+ /* This name is new in its binding level.
+ Install the new declaration and return it. */
+ if (b == global_binding_level)
+ {
+ /* Install a global value. */
+
+ /* Rule for VAR_DECLs, but not for other kinds of _DECLs:
+ A `const' which was not declared `extern' is invisible. */
+ if (TREE_CODE (x) == VAR_DECL
+ && TREE_READONLY (x) && ! DECL_THIS_EXTERN (x))
+ TREE_PUBLIC (x) = 0;
+
+ /* If the first global decl has external linkage,
+ warn if we later see static one. */
+ if (IDENTIFIER_GLOBAL_VALUE (name) == NULL_TREE && TREE_PUBLIC (x))
+ TREE_PUBLIC (name) = 1;
+
+ /* Don't install a TYPE_DECL if we already have another
+ sort of _DECL with that name. */
+ if (TREE_CODE (x) != TYPE_DECL
+ || t == NULL_TREE
+ || TREE_CODE (t) == TYPE_DECL)
+ IDENTIFIER_GLOBAL_VALUE (name) = x;
+
+ /* Don't forget if the function was used via an implicit decl. */
+ if (IDENTIFIER_IMPLICIT_DECL (name)
+ && TREE_USED (IDENTIFIER_IMPLICIT_DECL (name)))
+ TREE_USED (x) = 1;
+
+ /* Don't forget if its address was taken in that way. */
+ if (IDENTIFIER_IMPLICIT_DECL (name)
+ && TREE_ADDRESSABLE (IDENTIFIER_IMPLICIT_DECL (name)))
+ TREE_ADDRESSABLE (x) = 1;
+
+ /* Warn about mismatches against previous implicit decl. */
+ if (IDENTIFIER_IMPLICIT_DECL (name) != NULL_TREE
+ /* If this real decl matches the implicit, don't complain. */
+ && ! (TREE_CODE (x) == FUNCTION_DECL
+ && TREE_TYPE (TREE_TYPE (x)) == integer_type_node))
+ cp_warning
+ ("`%D' was previously implicitly declared to return `int'", x);
+
+ /* If new decl is `static' and an `extern' was seen previously,
+ warn about it. */
+ if (x != NULL_TREE && t != NULL_TREE && decls_match (x, t))
+ warn_extern_redeclared_static (x, t);
+ }
+ else
+ {
+ /* Here to install a non-global value. */
+ tree oldlocal = IDENTIFIER_LOCAL_VALUE (name);
+ tree oldglobal = IDENTIFIER_GLOBAL_VALUE (name);
+
+ b->shadowed = tree_cons (name, oldlocal, b->shadowed);
+ IDENTIFIER_LOCAL_VALUE (name) = x;
+
+ /* If this is a TYPE_DECL, push it into the type value slot. */
+ if (TREE_CODE (x) == TYPE_DECL)
+ set_identifier_type_value_with_scope (name, TREE_TYPE (x), b);
+
+ /* If this is an extern function declaration, see if we
+ have a global definition or declaration for the function. */
+ if (oldlocal == NULL_TREE
+ && DECL_EXTERNAL (x) && !DECL_INLINE (x)
+ && oldglobal != NULL_TREE
+ && TREE_CODE (x) == FUNCTION_DECL
+ && TREE_CODE (oldglobal) == FUNCTION_DECL)
+ {
+ /* We have one. Their types must agree. */
+ if (! comptypes (TREE_TYPE (x), TREE_TYPE (oldglobal), 1))
+ {
+ cp_warning ("extern declaration of `%#D' doesn't match", x);
+ cp_warning_at ("global declaration `%#D'", oldglobal);
+ }
+ else
+ {
+ /* Inner extern decl is inline if global one is.
+ Copy enough to really inline it. */
+ if (DECL_INLINE (oldglobal))
+ {
+ DECL_INLINE (x) = DECL_INLINE (oldglobal);
+ DECL_INITIAL (x) = (current_function_decl == oldglobal
+ ? NULL_TREE : DECL_INITIAL (oldglobal));
+ DECL_SAVED_INSNS (x) = DECL_SAVED_INSNS (oldglobal);
+ DECL_FRAME_SIZE (x) = DECL_FRAME_SIZE (oldglobal);
+ DECL_ARGUMENTS (x) = DECL_ARGUMENTS (oldglobal);
+ DECL_RESULT (x) = DECL_RESULT (oldglobal);
+ TREE_ASM_WRITTEN (x) = TREE_ASM_WRITTEN (oldglobal);
+ DECL_ABSTRACT_ORIGIN (x) = oldglobal;
+ }
+ /* Inner extern decl is built-in if global one is. */
+ if (DECL_BUILT_IN (oldglobal))
+ {
+ DECL_BUILT_IN (x) = DECL_BUILT_IN (oldglobal);
+ DECL_FUNCTION_CODE (x) = DECL_FUNCTION_CODE (oldglobal);
+ }
+ /* Keep the arg types from a file-scope fcn defn. */
+ if (TYPE_ARG_TYPES (TREE_TYPE (oldglobal)) != NULL_TREE
+ && DECL_INITIAL (oldglobal)
+ && TYPE_ARG_TYPES (TREE_TYPE (x)) == NULL_TREE)
+ TREE_TYPE (x) = TREE_TYPE (oldglobal);
+ }
+ }
+ /* If we have a local external declaration,
+ and no file-scope declaration has yet been seen,
+ then if we later have a file-scope decl it must not be static. */
+ if (oldlocal == NULL_TREE
+ && oldglobal == NULL_TREE
+ && DECL_EXTERNAL (x)
+ && TREE_PUBLIC (x))
+ {
+ TREE_PUBLIC (name) = 1;
+ }
+
+ if (DECL_FROM_INLINE (x))
+ /* Inline decls shadow nothing. */;
+
+ /* Warn if shadowing an argument at the top level of the body. */
+ else if (oldlocal != NULL_TREE && !DECL_EXTERNAL (x)
+ && TREE_CODE (oldlocal) == PARM_DECL
+ && TREE_CODE (x) != PARM_DECL)
+ {
+ /* Go to where the parms should be and see if we
+ find them there. */
+ struct binding_level *b = current_binding_level->level_chain;
+
+ if (cleanup_label)
+ b = b->level_chain;
+
+ /* ARM $8.3 */
+ if (b->parm_flag == 1)
+ cp_error ("declaration of `%#D' shadows a parameter", name);
+ }
+ /* Maybe warn if shadowing something else. */
+ else if (warn_shadow && !DECL_EXTERNAL (x)
+ /* No shadow warnings for internally generated vars. */
+ && ! DECL_ARTIFICIAL (x)
+ /* No shadow warnings for vars made for inlining. */
+ && ! DECL_FROM_INLINE (x))
+ {
+ char *warnstring = NULL;
+
+ if (oldlocal != NULL_TREE && TREE_CODE (oldlocal) == PARM_DECL)
+ warnstring = "declaration of `%s' shadows a parameter";
+ else if (IDENTIFIER_CLASS_VALUE (name) != NULL_TREE
+ && !TREE_STATIC (name))
+ warnstring = "declaration of `%s' shadows a member of `this'";
+ else if (oldlocal != NULL_TREE)
+ warnstring = "declaration of `%s' shadows previous local";
+ else if (oldglobal != NULL_TREE)
+ warnstring = "declaration of `%s' shadows global declaration";
+
+ if (warnstring)
+ warning (warnstring, IDENTIFIER_POINTER (name));
+ }
+
+ /* If storing a local value, there may already be one (inherited).
+ If so, record it for restoration when this binding level ends. */
+ if (oldlocal != NULL_TREE)
+ b->shadowed = tree_cons (name, oldlocal, b->shadowed);
+ }
+
+ /* Keep count of variables in this level with incomplete type. */
+ if (TREE_CODE (x) != TEMPLATE_DECL
+ && TYPE_SIZE (TREE_TYPE (x)) == NULL_TREE
+ && PROMOTES_TO_AGGR_TYPE (TREE_TYPE (x), ARRAY_TYPE))
+ {
+ if (++b->n_incomplete == 0)
+ error ("too many incomplete variables at this point");
+ }
+ }
+
+ if (TREE_CODE (x) == TYPE_DECL && name != NULL_TREE)
+ {
+ if (current_class_name)
+ {
+ if (! TREE_MANGLED (name))
+ set_nested_typename (x, current_class_name, DECL_NAME (x),
+ TREE_TYPE (x));
+ }
+ }
+
+ /* Put decls on list in reverse order.
+ We will reverse them later if necessary. */
+ TREE_CHAIN (x) = b->names;
+ b->names = x;
+ if (! (b != global_binding_level || TREE_PERMANENT (x)))
+ my_friendly_abort (124);
+
+ return x;
+}
+
+/* Same as pushdecl, but define X in binding-level LEVEL. */
+
+static tree
+pushdecl_with_scope (x, level)
+ tree x;
+ struct binding_level *level;
+{
+ register struct binding_level *b = current_binding_level;
+
+ current_binding_level = level;
+ x = pushdecl (x);
+ current_binding_level = b;
+ return x;
+}
+
+/* Like pushdecl, only it places X in GLOBAL_BINDING_LEVEL,
+ if appropriate. */
+tree
+pushdecl_top_level (x)
+ tree x;
+{
+ register struct binding_level *b = inner_binding_level;
+ register tree t = pushdecl_with_scope (x, global_binding_level);
+
+ /* Now, the type_shadowed stack may screw us. Munge it so it does
+ what we want. */
+ if (TREE_CODE (x) == TYPE_DECL)
+ {
+ tree name = DECL_NAME (x);
+ tree newval;
+ tree *ptr = (tree *)0;
+ for (; b != global_binding_level; b = b->level_chain)
+ {
+ tree shadowed = b->type_shadowed;
+ for (; shadowed; shadowed = TREE_CHAIN (shadowed))
+ if (TREE_PURPOSE (shadowed) == name)
+ {
+ ptr = &TREE_VALUE (shadowed);
+ /* Can't break out of the loop here because sometimes
+ a binding level will have duplicate bindings for
+ PT names. It's gross, but I haven't time to fix it. */
+ }
+ }
+ newval = TREE_TYPE (x);
+ if (ptr == (tree *)0)
+ {
+ /* @@ This shouldn't be needed. My test case "zstring.cc" trips
+ up here if this is changed to an assertion. --KR */
+ SET_IDENTIFIER_TYPE_VALUE (name, newval);
+ }
+ else
+ {
+#if 0
+ /* Disabled this 11/10/92, since there are many cases which
+ behave just fine when *ptr doesn't satisfy either of these.
+ For example, nested classes declared as friends of their enclosing
+ class will not meet this criteria. (bpk) */
+ my_friendly_assert (*ptr == NULL_TREE || *ptr == newval, 141);
+#endif
+ *ptr = newval;
+ }
+ }
+ return t;
+}
+
+/* Like push_overloaded_decl, only it places X in GLOBAL_BINDING_LEVEL,
+ if appropriate. */
+void
+push_overloaded_decl_top_level (x, forget)
+ tree x;
+ int forget;
+{
+ struct binding_level *b = current_binding_level;
+
+ current_binding_level = global_binding_level;
+ push_overloaded_decl (x, forget);
+ current_binding_level = b;
+}
+
+/* Make the declaration of X appear in CLASS scope. */
+tree
+pushdecl_class_level (x)
+ tree x;
+{
+ /* Don't use DECL_ASSEMBLER_NAME here! Everything that looks in class
+ scope looks for the pre-mangled name. */
+ register tree name = DECL_NAME (x);
+
+ if (name)
+ {
+ if (TYPE_BEING_DEFINED (current_class_type))
+ {
+ /* Check for inconsistent use of this name in the class body.
+ Types, enums, and static vars are checked here; other
+ members are checked in finish_struct. */
+ tree icv = IDENTIFIER_CLASS_VALUE (name);
+
+ if (icv
+ /* Don't complain about inherited names. */
+ && id_in_current_class (name)
+ /* Or shadowed tags. */
+ && !(TREE_CODE (icv) == TYPE_DECL
+ && DECL_CONTEXT (icv) == current_class_type))
+ {
+ cp_error ("declaration of identifier `%D' as `%#D'", name, x);
+ cp_error_at ("conflicts with previous use in class as `%#D'",
+ icv);
+ }
+ }
+
+ push_class_level_binding (name, x);
+ if (TREE_CODE (x) == TYPE_DECL)
+ {
+ set_identifier_type_value (name, TREE_TYPE (x));
+ if (!DECL_NESTED_TYPENAME (x))
+ set_nested_typename (x, current_class_name, name, TREE_TYPE (x));
+ }
+ }
+ return x;
+}
+
+/* This function is used to push the mangled decls for nested types into
+ the appropriate scope. Previously pushdecl_top_level was used, but that
+ is incorrect for members of local classes. */
+tree
+pushdecl_nonclass_level (x)
+ tree x;
+{
+ struct binding_level *b = current_binding_level;
+
+#if 0
+ /* Get out of class scope -- this isn't necessary, because class scope
+ doesn't make it into current_binding_level. */
+ while (b->parm_flag == 2)
+ b = b->level_chain;
+#else
+ my_friendly_assert (b->parm_flag != 2, 180);
+#endif
+
+ /* Get out of template binding levels */
+ while (b->pseudo_global)
+ b = b->level_chain;
+
+ pushdecl_with_scope (x, b);
+}
+
+/* Make the declaration(s) of X appear in CLASS scope
+ under the name NAME. */
+void
+push_class_level_binding (name, x)
+ tree name;
+ tree x;
+{
+ maybe_push_cache_obstack ();
+ class_binding_level->class_shadowed
+ = tree_cons (name, IDENTIFIER_CLASS_VALUE (name),
+ class_binding_level->class_shadowed);
+ pop_obstacks ();
+ IDENTIFIER_CLASS_VALUE (name) = x;
+ obstack_ptr_grow (&decl_obstack, x);
+}
+
+/* Tell caller how to interpret a TREE_LIST which contains
+ chains of FUNCTION_DECLS. */
+int
+overloaded_globals_p (list)
+ tree list;
+{
+ my_friendly_assert (TREE_CODE (list) == TREE_LIST, 142);
+
+ /* Don't commit caller to seeing them as globals. */
+ if (TREE_NONLOCAL_FLAG (list))
+ return -1;
+ /* Do commit caller to seeing them as globals. */
+ if (TREE_CODE (TREE_PURPOSE (list)) == IDENTIFIER_NODE)
+ return 1;
+ /* Do commit caller to not seeing them as globals. */
+ return 0;
+}
+
+/* DECL is a FUNCTION_DECL which may have other definitions already in
+ place. We get around this by making the value of the identifier point
+ to a list of all the things that want to be referenced by that name. It
+ is then up to the users of that name to decide what to do with that
+ list.
+
+ DECL may also be a TEMPLATE_DECL, with a FUNCTION_DECL in its DECL_RESULT
+ slot. It is dealt with the same way.
+
+ The value returned may be a previous declaration if we guessed wrong
+ about what language DECL should belong to (C or C++). Otherwise,
+ it's always DECL (and never something that's not a _DECL). */
+tree
+push_overloaded_decl (decl, forgettable)
+ tree decl;
+ int forgettable;
+{
+ tree orig_name = DECL_NAME (decl);
+ tree old;
+ int doing_global = (global_bindings_p () || ! forgettable
+ || flag_traditional || pseudo_global_level_p ());
+
+ if (doing_global)
+ {
+ old = IDENTIFIER_GLOBAL_VALUE (orig_name);
+ if (old && TREE_CODE (old) == FUNCTION_DECL
+ && DECL_ARTIFICIAL (old)
+ && (DECL_BUILT_IN (old) || DECL_BUILT_IN_NONANSI (old)))
+ {
+ if (duplicate_decls (decl, old))
+ return old;
+ old = NULL_TREE;
+ }
+ }
+ else
+ {
+ old = IDENTIFIER_LOCAL_VALUE (orig_name);
+
+ if (! purpose_member (orig_name, current_binding_level->shadowed))
+ {
+ current_binding_level->shadowed
+ = tree_cons (orig_name, old, current_binding_level->shadowed);
+ old = NULL_TREE;
+ }
+ }
+
+ if (old)
+ {
+#if 0
+ /* We cache the value of builtin functions as ADDR_EXPRs
+ in the name space. Convert it to some kind of _DECL after
+ remembering what to forget. */
+ if (TREE_CODE (old) == ADDR_EXPR)
+ old = TREE_OPERAND (old, 0);
+ else
+#endif
+ if (TREE_CODE (old) == VAR_DECL)
+ {
+ cp_error_at ("previous non-function declaration `%#D'", old);
+ cp_error ("conflicts with function declaration `%#D'", decl);
+ return error_mark_node;
+ }
+ else if (TREE_CODE (old) == TYPE_DECL)
+ {
+ tree t = TREE_TYPE (old);
+ if (IS_AGGR_TYPE (t) && warn_shadow)
+ cp_warning ("`%#D' hides constructor for `%#T'", decl, t);
+ old = NULL_TREE;
+ }
+ else if (is_overloaded_fn (old))
+ {
+ tree tmp;
+
+ for (tmp = get_first_fn (old); tmp; tmp = DECL_CHAIN (tmp))
+ if (decl == tmp || duplicate_decls (decl, tmp))
+ return tmp;
+ }
+ }
+
+ if (old || TREE_CODE (decl) == TEMPLATE_DECL)
+ {
+ if (old && is_overloaded_fn (old))
+ DECL_CHAIN (decl) = get_first_fn (old);
+ else
+ DECL_CHAIN (decl) = NULL_TREE;
+ old = tree_cons (orig_name, decl, NULL_TREE);
+ TREE_TYPE (old) = unknown_type_node;
+ }
+ else
+ /* orig_name is not ambiguous. */
+ old = decl;
+
+ if (doing_global)
+ IDENTIFIER_GLOBAL_VALUE (orig_name) = old;
+ else
+ IDENTIFIER_LOCAL_VALUE (orig_name) = old;
+
+ return decl;
+}
+
+/* Generate an implicit declaration for identifier FUNCTIONID
+ as a function of type int (). Print a warning if appropriate. */
+
+tree
+implicitly_declare (functionid)
+ tree functionid;
+{
+ register tree decl;
+ int temp = allocation_temporary_p ();
+
+ push_obstacks_nochange ();
+
+ /* Save the decl permanently so we can warn if definition follows.
+ In ANSI C, warn_implicit is usually false, so the saves little space.
+ But in C++, it's usually true, hence the extra code. */
+ if (temp && (flag_traditional || !warn_implicit
+ || current_binding_level == global_binding_level))
+ end_temporary_allocation ();
+
+ /* We used to reuse an old implicit decl here,
+ but this loses with inline functions because it can clobber
+ the saved decl chains. */
+ decl = build_lang_decl (FUNCTION_DECL, functionid, default_function_type);
+
+ DECL_EXTERNAL (decl) = 1;
+ TREE_PUBLIC (decl) = 1;
+
+ /* ANSI standard says implicit declarations are in the innermost block.
+ So we record the decl in the standard fashion.
+ If flag_traditional is set, pushdecl does it top-level. */
+ pushdecl (decl);
+ rest_of_decl_compilation (decl, NULL_PTR, 0, 0);
+
+ if (warn_implicit
+ /* Only one warning per identifier. */
+ && IDENTIFIER_IMPLICIT_DECL (functionid) == NULL_TREE)
+ {
+ cp_pedwarn ("implicit declaration of function `%#D'", decl);
+ }
+
+ SET_IDENTIFIER_IMPLICIT_DECL (functionid, decl);
+
+ pop_obstacks ();
+
+ return decl;
+}
+
+/* Return zero if the declaration NEWDECL is valid
+ when the declaration OLDDECL (assumed to be for the same name)
+ has already been seen.
+ Otherwise return an error message format string with a %s
+ where the identifier should go. */
+
+static char *
+redeclaration_error_message (newdecl, olddecl)
+ tree newdecl, olddecl;
+{
+ if (TREE_CODE (newdecl) == TYPE_DECL)
+ {
+ /* Because C++ can put things into name space for free,
+ constructs like "typedef struct foo { ... } foo"
+ would look like an erroneous redeclaration. */
+ if (comptypes (TREE_TYPE (newdecl), TREE_TYPE (olddecl), 0))
+ return 0;
+ else
+ return "redefinition of `%#D'";
+ }
+ else if (TREE_CODE (newdecl) == FUNCTION_DECL)
+ {
+ /* If this is a pure function, its olddecl will actually be
+ the original initialization to `0' (which we force to call
+ abort()). Don't complain about redefinition in this case. */
+ if (DECL_LANG_SPECIFIC (olddecl) && DECL_ABSTRACT_VIRTUAL_P (olddecl))
+ return 0;
+
+ /* Declarations of functions can insist on internal linkage
+ but they can't be inconsistent with internal linkage,
+ so there can be no error on that account.
+ However defining the same name twice is no good. */
+ if (DECL_INITIAL (olddecl) != NULL_TREE
+ && DECL_INITIAL (newdecl) != NULL_TREE
+ /* However, defining once as extern inline and a second
+ time in another way is ok. */
+ && !(DECL_INLINE (olddecl) && DECL_EXTERNAL (olddecl)
+ && !(DECL_INLINE (newdecl) && DECL_EXTERNAL (newdecl))))
+ {
+ if (DECL_NAME (olddecl) == NULL_TREE)
+ return "`%#D' not declared in class";
+ else
+ return "redefinition of `%#D'";
+ }
+
+ {
+ tree t1 = TYPE_ARG_TYPES (TREE_TYPE (olddecl));
+ tree t2 = TYPE_ARG_TYPES (TREE_TYPE (newdecl));
+
+ if (TREE_CODE (TREE_TYPE (newdecl)) == METHOD_TYPE)
+ t1 = TREE_CHAIN (t1), t2 = TREE_CHAIN (t2);
+
+ for (; t1; t1 = TREE_CHAIN (t1), t2 = TREE_CHAIN (t2))
+ if (TREE_PURPOSE (t1) && TREE_PURPOSE (t2))
+ return "duplicate default arguments given for `%#D'";
+ }
+ return 0;
+ }
+ else if (TREE_CODE (newdecl) == TEMPLATE_DECL)
+ {
+ if (DECL_INITIAL (olddecl) && DECL_INITIAL (newdecl))
+ return "redefinition of `%#D'";
+ return 0;
+ }
+ else if (current_binding_level == global_binding_level)
+ {
+ /* Objects declared at top level: */
+ /* If at least one is a reference, it's ok. */
+ if (DECL_EXTERNAL (newdecl) || DECL_EXTERNAL (olddecl))
+ return 0;
+ /* Now we have two tentative defs, or one tentative and one real def. */
+ /* Insist that the linkage match. */
+ if (TREE_PUBLIC (olddecl) != TREE_PUBLIC (newdecl))
+ return "conflicting declarations of `%#D'";
+ /* Reject two definitions. */
+ return "redefinition of `%#D'";
+ }
+ else
+ {
+ /* Objects declared with block scope: */
+ /* Reject two definitions, and reject a definition
+ together with an external reference. */
+ if (!(DECL_EXTERNAL (newdecl) && DECL_EXTERNAL (olddecl)))
+ return "redeclaration of `%#D'";
+ return 0;
+ }
+}
+
+/* Get the LABEL_DECL corresponding to identifier ID as a label.
+ Create one if none exists so far for the current function.
+ This function is called for both label definitions and label references. */
+
+tree
+lookup_label (id)
+ tree id;
+{
+ register tree decl = IDENTIFIER_LABEL_VALUE (id);
+
+ if (current_function_decl == NULL_TREE)
+ {
+ error ("label `%s' referenced outside of any function",
+ IDENTIFIER_POINTER (id));
+ return NULL_TREE;
+ }
+
+ if ((decl == NULL_TREE
+ || DECL_SOURCE_LINE (decl) == 0)
+ && (named_label_uses == NULL_TREE
+ || TREE_PURPOSE (named_label_uses) != current_binding_level->names
+ || TREE_VALUE (named_label_uses) != decl))
+ {
+ named_label_uses
+ = tree_cons (current_binding_level->names, decl, named_label_uses);
+ TREE_TYPE (named_label_uses) = (tree)current_binding_level;
+ }
+
+ /* Use a label already defined or ref'd with this name. */
+ if (decl != NULL_TREE)
+ {
+ /* But not if it is inherited and wasn't declared to be inheritable. */
+ if (DECL_CONTEXT (decl) != current_function_decl
+ && ! C_DECLARED_LABEL_FLAG (decl))
+ return shadow_label (id);
+ return decl;
+ }
+
+ decl = build_decl (LABEL_DECL, id, void_type_node);
+
+ /* A label not explicitly declared must be local to where it's ref'd. */
+ DECL_CONTEXT (decl) = current_function_decl;
+
+ DECL_MODE (decl) = VOIDmode;
+
+ /* Say where one reference is to the label,
+ for the sake of the error if it is not defined. */
+ DECL_SOURCE_LINE (decl) = lineno;
+ DECL_SOURCE_FILE (decl) = input_filename;
+
+ SET_IDENTIFIER_LABEL_VALUE (id, decl);
+
+ named_labels = tree_cons (NULL_TREE, decl, named_labels);
+ TREE_VALUE (named_label_uses) = decl;
+
+ return decl;
+}
+
+/* Make a label named NAME in the current function,
+ shadowing silently any that may be inherited from containing functions
+ or containing scopes.
+
+ Note that valid use, if the label being shadowed
+ comes from another scope in the same function,
+ requires calling declare_nonlocal_label right away. */
+
+tree
+shadow_label (name)
+ tree name;
+{
+ register tree decl = IDENTIFIER_LABEL_VALUE (name);
+
+ if (decl != NULL_TREE)
+ {
+ shadowed_labels = tree_cons (NULL_TREE, decl, shadowed_labels);
+ SET_IDENTIFIER_LABEL_VALUE (name, NULL_TREE);
+ SET_IDENTIFIER_LABEL_VALUE (decl, NULL_TREE);
+ }
+
+ return lookup_label (name);
+}
+
+/* Define a label, specifying the location in the source file.
+ Return the LABEL_DECL node for the label, if the definition is valid.
+ Otherwise return 0. */
+
+tree
+define_label (filename, line, name)
+ char *filename;
+ int line;
+ tree name;
+{
+ tree decl = lookup_label (name);
+
+ /* After labels, make any new cleanups go into their
+ own new (temporary) binding contour. */
+ current_binding_level->more_cleanups_ok = 0;
+
+ /* If label with this name is known from an outer context, shadow it. */
+ if (decl != NULL_TREE && DECL_CONTEXT (decl) != current_function_decl)
+ {
+ shadowed_labels = tree_cons (NULL_TREE, decl, shadowed_labels);
+ SET_IDENTIFIER_LABEL_VALUE (name, NULL_TREE);
+ decl = lookup_label (name);
+ }
+
+ if (DECL_INITIAL (decl) != NULL_TREE)
+ {
+ cp_error ("duplicate label `%D'", decl);
+ return 0;
+ }
+ else
+ {
+ tree uses, prev;
+
+ /* Mark label as having been defined. */
+ DECL_INITIAL (decl) = error_mark_node;
+ /* Say where in the source. */
+ DECL_SOURCE_FILE (decl) = filename;
+ DECL_SOURCE_LINE (decl) = line;
+
+ for (prev = NULL_TREE, uses = named_label_uses;
+ uses;
+ prev = uses, uses = TREE_CHAIN (uses))
+ if (TREE_VALUE (uses) == decl)
+ {
+ struct binding_level *b = current_binding_level;
+ while (b)
+ {
+ tree new_decls = b->names;
+ tree old_decls = ((tree)b == TREE_TYPE (uses)
+ ? TREE_PURPOSE (uses) : NULL_TREE);
+ while (new_decls != old_decls)
+ {
+ if (TREE_CODE (new_decls) == VAR_DECL
+ /* Don't complain about crossing initialization
+ of internal entities. They can't be accessed,
+ and they should be cleaned up
+ by the time we get to the label. */
+ && ! DECL_ARTIFICIAL (new_decls)
+ && ((DECL_INITIAL (new_decls) != NULL_TREE
+ && DECL_INITIAL (new_decls) != error_mark_node)
+ || TYPE_NEEDS_CONSTRUCTING (TREE_TYPE (new_decls))))
+ {
+ if (IDENTIFIER_ERROR_LOCUS (decl) == NULL_TREE)
+ cp_error ("invalid jump to label `%D'", decl);
+ SET_IDENTIFIER_ERROR_LOCUS (decl, current_function_decl);
+ cp_error ("crosses initialization of `%D'", new_decls);
+ }
+ new_decls = TREE_CHAIN (new_decls);
+ }
+ if ((tree)b == TREE_TYPE (uses))
+ break;
+ b = b->level_chain;
+ }
+
+ if (prev)
+ TREE_CHAIN (prev) = TREE_CHAIN (uses);
+ else
+ named_label_uses = TREE_CHAIN (uses);
+ }
+ current_function_return_value = NULL_TREE;
+ return decl;
+ }
+}
+
+/* Same, but for CASE labels. If DECL is NULL_TREE, it's the default. */
+/* XXX Note decl is never actually used. (bpk) */
+void
+define_case_label (decl)
+ tree decl;
+{
+ tree cleanup = last_cleanup_this_contour ();
+ if (cleanup)
+ {
+ static int explained = 0;
+ cp_error_at ("destructor needed for `%#D'", TREE_PURPOSE (cleanup));
+ error ("where case label appears here");
+ if (!explained)
+ {
+ error ("(enclose actions of previous case statements requiring");
+ error ("destructors in their own binding contours.)");
+ explained = 1;
+ }
+ }
+
+ /* After labels, make any new cleanups go into their
+ own new (temporary) binding contour. */
+
+ current_binding_level->more_cleanups_ok = 0;
+ current_function_return_value = NULL_TREE;
+}
+
+/* Return the list of declarations of the current level.
+ Note that this list is in reverse order unless/until
+ you nreverse it; and when you do nreverse it, you must
+ store the result back using `storedecls' or you will lose. */
+
+tree
+getdecls ()
+{
+ return current_binding_level->names;
+}
+
+/* Return the list of type-tags (for structs, etc) of the current level. */
+
+tree
+gettags ()
+{
+ return current_binding_level->tags;
+}
+
+/* Store the list of declarations of the current level.
+ This is done for the parameter declarations of a function being defined,
+ after they are modified in the light of any missing parameters. */
+
+static void
+storedecls (decls)
+ tree decls;
+{
+ current_binding_level->names = decls;
+}
+
+/* Similarly, store the list of tags of the current level. */
+
+static void
+storetags (tags)
+ tree tags;
+{
+ current_binding_level->tags = tags;
+}
+
+/* Given NAME, an IDENTIFIER_NODE,
+ return the structure (or union or enum) definition for that name.
+ Searches binding levels from BINDING_LEVEL up to the global level.
+ If THISLEVEL_ONLY is nonzero, searches only the specified context
+ (but skips any tag-transparent contexts to find one that is
+ meaningful for tags).
+ FORM says which kind of type the caller wants;
+ it is RECORD_TYPE or UNION_TYPE or ENUMERAL_TYPE.
+ If the wrong kind of type is found, and it's not a template, an error is
+ reported. */
+
+static tree
+lookup_tag (form, name, binding_level, thislevel_only)
+ enum tree_code form;
+ struct binding_level *binding_level;
+ tree name;
+ int thislevel_only;
+{
+ register struct binding_level *level;
+
+ for (level = binding_level; level; level = level->level_chain)
+ {
+ register tree tail;
+ if (ANON_AGGRNAME_P (name))
+ for (tail = level->tags; tail; tail = TREE_CHAIN (tail))
+ {
+ /* There's no need for error checking here, because
+ anon names are unique throughout the compilation. */
+ if (TYPE_IDENTIFIER (TREE_VALUE (tail)) == name)
+ return TREE_VALUE (tail);
+ }
+ else
+ for (tail = level->tags; tail; tail = TREE_CHAIN (tail))
+ {
+ if (TREE_PURPOSE (tail) == name)
+ {
+ enum tree_code code = TREE_CODE (TREE_VALUE (tail));
+ /* Should tighten this up; it'll probably permit
+ UNION_TYPE and a struct template, for example. */
+ if (code != form
+ && !(form != ENUMERAL_TYPE
+ && (code == TEMPLATE_DECL
+ || code == UNINSTANTIATED_P_TYPE)))
+
+ {
+ /* Definition isn't the kind we were looking for. */
+ cp_error ("`%#D' redeclared as %C", TREE_VALUE (tail),
+ form);
+ }
+ return TREE_VALUE (tail);
+ }
+ }
+ if (thislevel_only && ! level->tag_transparent)
+ return NULL_TREE;
+ if (current_class_type && level->level_chain == global_binding_level)
+ {
+ /* Try looking in this class's tags before heading into
+ global binding level. */
+ tree context = current_class_type;
+ while (context)
+ {
+ switch (TREE_CODE_CLASS (TREE_CODE (context)))
+ {
+ tree these_tags;
+ case 't':
+ these_tags = CLASSTYPE_TAGS (context);
+ if (ANON_AGGRNAME_P (name))
+ while (these_tags)
+ {
+ if (TYPE_IDENTIFIER (TREE_VALUE (these_tags))
+ == name)
+ return TREE_VALUE (tail);
+ these_tags = TREE_CHAIN (these_tags);
+ }
+ else
+ while (these_tags)
+ {
+ if (TREE_PURPOSE (these_tags) == name)
+ {
+ if (TREE_CODE (TREE_VALUE (these_tags)) != form)
+ {
+ cp_error ("`%#D' redeclared as %C in class scope",
+ TREE_VALUE (tail), form);
+ }
+ return TREE_VALUE (tail);
+ }
+ these_tags = TREE_CHAIN (these_tags);
+ }
+ /* If this type is not yet complete, then don't
+ look at its context. */
+ if (TYPE_SIZE (context) == NULL_TREE)
+ goto no_context;
+ /* Go to next enclosing type, if any. */
+ context = DECL_CONTEXT (TYPE_NAME (context));
+ break;
+ case 'd':
+ context = DECL_CONTEXT (context);
+ break;
+ default:
+ my_friendly_abort (10);
+ }
+ continue;
+ no_context:
+ break;
+ }
+ }
+ }
+ return NULL_TREE;
+}
+
+void
+set_current_level_tags_transparency (tags_transparent)
+ int tags_transparent;
+{
+ current_binding_level->tag_transparent = tags_transparent;
+}
+
+/* Given a type, find the tag that was defined for it and return the tag name.
+ Otherwise return 0. However, the value can never be 0
+ in the cases in which this is used.
+
+ C++: If NAME is non-zero, this is the new name to install. This is
+ done when replacing anonymous tags with real tag names. */
+
+static tree
+lookup_tag_reverse (type, name)
+ tree type;
+ tree name;
+{
+ register struct binding_level *level;
+
+ for (level = current_binding_level; level; level = level->level_chain)
+ {
+ register tree tail;
+ for (tail = level->tags; tail; tail = TREE_CHAIN (tail))
+ {
+ if (TREE_VALUE (tail) == type)
+ {
+ if (name)
+ TREE_PURPOSE (tail) = name;
+ return TREE_PURPOSE (tail);
+ }
+ }
+ }
+ return NULL_TREE;
+}
+
+/* Given type TYPE which was not declared in C++ language context,
+ attempt to find a name by which it is referred. */
+tree
+typedecl_for_tag (tag)
+ tree tag;
+{
+ struct binding_level *b = current_binding_level;
+
+ if (TREE_CODE (TYPE_NAME (tag)) == TYPE_DECL)
+ return TYPE_NAME (tag);
+
+ while (b)
+ {
+ tree decls = b->names;
+ while (decls)
+ {
+ if (TREE_CODE (decls) == TYPE_DECL && TREE_TYPE (decls) == tag)
+ break;
+ decls = TREE_CHAIN (decls);
+ }
+ if (decls)
+ return decls;
+ b = b->level_chain;
+ }
+ return NULL_TREE;
+}
+
+/* Lookup TYPE in CONTEXT (a chain of nested types or a FUNCTION_DECL).
+ Return the type value, or NULL_TREE if not found. */
+static tree
+lookup_nested_type (type, context)
+ tree type;
+ tree context;
+{
+ if (context == NULL_TREE)
+ return NULL_TREE;
+ while (context)
+ {
+ switch (TREE_CODE (context))
+ {
+ case TYPE_DECL:
+ {
+ tree ctype = TREE_TYPE (context);
+ tree match = value_member (type, CLASSTYPE_TAGS (ctype));
+ if (match)
+ return TREE_VALUE (match);
+ context = DECL_CONTEXT (context);
+
+ /* When we have a nested class whose member functions have
+ local types (e.g., a set of enums), we'll arrive here
+ with the DECL_CONTEXT as the actual RECORD_TYPE node for
+ the enclosing class. Instead, we want to make sure we
+ come back in here with the TYPE_DECL, not the RECORD_TYPE. */
+ if (context && TREE_CODE (context) == RECORD_TYPE)
+ context = TREE_CHAIN (context);
+ }
+ break;
+ case FUNCTION_DECL:
+ return TYPE_IDENTIFIER (type) ?
+ lookup_name (TYPE_IDENTIFIER (type), 1) : NULL_TREE;
+ break;
+ default:
+ my_friendly_abort (12);
+ }
+ }
+ return NULL_TREE;
+}
+
+/* Look up NAME in the current binding level and its superiors in the
+ namespace of variables, functions and typedefs. Return a ..._DECL
+ node of some kind representing its definition if there is only one
+ such declaration, or return a TREE_LIST with all the overloaded
+ definitions if there are many, or return 0 if it is undefined.
+
+ If PREFER_TYPE is > 0, we prefer TYPE_DECLs.
+ If PREFER_TYPE is -2, we're being called from yylex(). (UGLY)
+ Otherwise we prefer non-TYPE_DECLs. */
+
+tree
+lookup_name_real (name, prefer_type, nonclass)
+ tree name;
+ int prefer_type, nonclass;
+{
+ register tree val;
+ int yylex = 0;
+
+ if (prefer_type == -2)
+ {
+ extern int looking_for_typename;
+
+ yylex = 1;
+ prefer_type = looking_for_typename;
+
+ if (got_scope != NULL_TREE)
+ {
+ if (got_scope == void_type_node)
+ val = IDENTIFIER_GLOBAL_VALUE (name);
+ else if (TREE_CODE (got_scope) == TEMPLATE_TYPE_PARM
+ /* TFIXME -- don't do this for UPTs in new model. */
+ || TREE_CODE (got_scope) == UNINSTANTIATED_P_TYPE)
+ {
+ if (prefer_type > 0)
+ val = create_nested_upt (got_scope, name);
+ else
+ val = NULL_TREE;
+ }
+ else if (! IS_AGGR_TYPE (got_scope))
+ /* Someone else will give an error about this if needed. */
+ val = NULL_TREE;
+ else if (TYPE_BEING_DEFINED (got_scope))
+ {
+ val = IDENTIFIER_CLASS_VALUE (name);
+ if (val && DECL_CONTEXT (val) != got_scope)
+ {
+ struct binding_level *b = class_binding_level;
+ for (val = NULL_TREE; b; b = b->level_chain)
+ {
+ tree t = purpose_member (name, b->class_shadowed);
+ if (t && TREE_VALUE (t)
+ && DECL_CONTEXT (TREE_VALUE (t)) == got_scope)
+ {
+ val = TREE_VALUE (t);
+ break;
+ }
+ }
+ }
+ if (val == NULL_TREE
+ && CLASSTYPE_LOCAL_TYPEDECLS (got_scope))
+ val = lookup_field (got_scope, name, 0, 1);
+ }
+ else if (got_scope == current_class_type)
+ val = IDENTIFIER_CLASS_VALUE (name);
+ else
+ val = lookup_field (got_scope, name, 0, 0);
+
+ goto done;
+ }
+ }
+
+ if (current_binding_level != global_binding_level
+ && IDENTIFIER_LOCAL_VALUE (name))
+ val = IDENTIFIER_LOCAL_VALUE (name);
+ /* In C++ class fields are between local and global scope,
+ just before the global scope. */
+ else if (current_class_type && ! nonclass)
+ {
+ val = IDENTIFIER_CLASS_VALUE (name);
+ if (val == NULL_TREE
+ && TYPE_BEING_DEFINED (current_class_type)
+ && CLASSTYPE_LOCAL_TYPEDECLS (current_class_type))
+ /* Try to find values from base classes if we are presently
+ defining a type. We are presently only interested in
+ TYPE_DECLs. */
+ val = lookup_field (current_class_type, name, 0, 1);
+
+ /* yylex() calls this with -2, since we should never start digging for
+ the nested name at the point where we haven't even, for example,
+ created the COMPONENT_REF or anything like that. */
+ if (val == NULL_TREE)
+ val = lookup_nested_field (name, ! yylex);
+
+ if (val == NULL_TREE)
+ val = IDENTIFIER_GLOBAL_VALUE (name);
+ }
+ else
+ val = IDENTIFIER_GLOBAL_VALUE (name);
+
+ done:
+ if (val)
+ {
+ if ((TREE_CODE (val) == TEMPLATE_DECL && looking_for_template)
+ || TREE_CODE (val) == TYPE_DECL || prefer_type <= 0)
+ return val;
+
+ if (IDENTIFIER_HAS_TYPE_VALUE (name))
+ return TYPE_NAME (IDENTIFIER_TYPE_VALUE (name));
+
+ if (TREE_TYPE (val) == error_mark_node)
+ return error_mark_node;
+ }
+
+ return val;
+}
+
+tree
+lookup_name_nonclass (name)
+ tree name;
+{
+ return lookup_name_real (name, 0, 1);
+}
+
+tree
+lookup_name (name, prefer_type)
+ tree name;
+ int prefer_type;
+{
+ return lookup_name_real (name, prefer_type, 0);
+}
+
+/* Similar to `lookup_name' but look only at current binding level. */
+
+tree
+lookup_name_current_level (name)
+ tree name;
+{
+ register tree t = NULL_TREE;
+
+ if (current_binding_level == global_binding_level)
+ {
+ t = IDENTIFIER_GLOBAL_VALUE (name);
+
+ /* extern "C" function() */
+ if (t != NULL_TREE && TREE_CODE (t) == TREE_LIST)
+ t = TREE_VALUE (t);
+ }
+ else if (IDENTIFIER_LOCAL_VALUE (name) != NULL_TREE)
+ {
+ struct binding_level *b = current_binding_level;
+ while (1)
+ {
+ for (t = b->names; t; t = TREE_CHAIN (t))
+ if (DECL_NAME (t) == name)
+ goto out;
+ if (b->keep == 2)
+ b = b->level_chain;
+ else
+ break;
+ }
+ out:
+ ;
+ }
+
+ return t;
+}
+
+/* Arrange for the user to get a source line number, even when the
+ compiler is going down in flames, so that she at least has a
+ chance of working around problems in the compiler. We used to
+ call error(), but that let the segmentation fault continue
+ through; now, it's much more passive by asking them to send the
+ maintainers mail about the problem. */
+
+static void
+signal_catch (sig)
+ int sig;
+{
+ signal (SIGSEGV, SIG_DFL);
+#ifdef SIGIOT
+ signal (SIGIOT, SIG_DFL);
+#endif
+#ifdef SIGILL
+ signal (SIGILL, SIG_DFL);
+#endif
+#ifdef SIGABRT
+ signal (SIGABRT, SIG_DFL);
+#endif
+#ifdef SIGBUS
+ signal (SIGBUS, SIG_DFL);
+#endif
+ my_friendly_abort (0);
+}
+
+/* Array for holding types considered "built-in". These types
+ are output in the module in which `main' is defined. */
+static tree *builtin_type_tdescs_arr;
+static int builtin_type_tdescs_len, builtin_type_tdescs_max;
+
+/* Push the declarations of builtin types into the namespace.
+ RID_INDEX, if < RID_MAX is the index of the builtin type
+ in the array RID_POINTERS. NAME is the name used when looking
+ up the builtin type. TYPE is the _TYPE node for the builtin type. */
+
+static void
+record_builtin_type (rid_index, name, type)
+ enum rid rid_index;
+ char *name;
+ tree type;
+{
+ tree rname = NULL_TREE, tname = NULL_TREE;
+ tree tdecl;
+
+ if ((int) rid_index < (int) RID_MAX)
+ rname = ridpointers[(int) rid_index];
+ if (name)
+ tname = get_identifier (name);
+
+ TYPE_BUILT_IN (type) = 1;
+
+ if (tname)
+ {
+#if 0 /* not yet, should get fixed properly later */
+ tdecl = pushdecl (make_type_decl (tname, type));
+#else
+ tdecl = pushdecl (build_decl (TYPE_DECL, tname, type));
+#endif
+ set_identifier_type_value (tname, NULL_TREE);
+ if ((int) rid_index < (int) RID_MAX)
+ IDENTIFIER_GLOBAL_VALUE (tname) = tdecl;
+ }
+ if (rname != NULL_TREE)
+ {
+ if (tname != NULL_TREE)
+ {
+ set_identifier_type_value (rname, NULL_TREE);
+ IDENTIFIER_GLOBAL_VALUE (rname) = tdecl;
+ }
+ else
+ {
+#if 0 /* not yet, should get fixed properly later */
+ tdecl = pushdecl (make_type_decl (rname, type));
+#else
+ tdecl = pushdecl (build_decl (TYPE_DECL, rname, type));
+#endif
+ set_identifier_type_value (rname, NULL_TREE);
+ }
+ }
+
+ if (flag_dossier)
+ {
+ if (builtin_type_tdescs_len+5 >= builtin_type_tdescs_max)
+ {
+ builtin_type_tdescs_max *= 2;
+ builtin_type_tdescs_arr
+ = (tree *)xrealloc (builtin_type_tdescs_arr,
+ builtin_type_tdescs_max * sizeof (tree));
+ }
+ builtin_type_tdescs_arr[builtin_type_tdescs_len++] = type;
+ if (TREE_CODE (type) != POINTER_TYPE)
+ {
+ builtin_type_tdescs_arr[builtin_type_tdescs_len++]
+ = build_pointer_type (type);
+ builtin_type_tdescs_arr[builtin_type_tdescs_len++]
+ = build_pointer_type (build_type_variant (type, 1, 0));
+ }
+ if (TREE_CODE (type) != VOID_TYPE)
+ {
+ builtin_type_tdescs_arr[builtin_type_tdescs_len++]
+ = build_reference_type (type);
+ builtin_type_tdescs_arr[builtin_type_tdescs_len++]
+ = build_reference_type (build_type_variant (type, 1, 0));
+ }
+ }
+}
+
+static void
+output_builtin_tdesc_entries ()
+{
+ extern struct obstack permanent_obstack;
+
+ /* If there's more than one main in this file, don't crash. */
+ if (builtin_type_tdescs_arr == 0)
+ return;
+
+ push_obstacks (&permanent_obstack, &permanent_obstack);
+ while (builtin_type_tdescs_len > 0)
+ {
+ tree type = builtin_type_tdescs_arr[--builtin_type_tdescs_len];
+ tree tdesc = build_t_desc (type, 0);
+ TREE_ASM_WRITTEN (tdesc) = 0;
+ build_t_desc (type, 2);
+ }
+ free (builtin_type_tdescs_arr);
+ builtin_type_tdescs_arr = 0;
+ pop_obstacks ();
+}
+
+/* Push overloaded decl, in global scope, with one argument so it
+ can be used as a callback from define_function. */
+static void
+push_overloaded_decl_1 (x)
+ tree x;
+{
+ push_overloaded_decl (x, 0);
+}
+
+/* Create the predefined scalar types of C,
+ and some nodes representing standard constants (0, 1, (void *)0).
+ Initialize the global binding level.
+ Make definitions for built-in primitive functions. */
+
+void
+init_decl_processing ()
+{
+ tree decl;
+ register tree endlink, int_endlink, double_endlink, ptr_endlink;
+ tree fields[20];
+ /* Either char* or void*. */
+ tree traditional_ptr_type_node;
+ /* Data type of memcpy. */
+ tree memcpy_ftype;
+#if 0 /* Not yet. */
+ /* Data type of strncpy. */
+ tree strncpy_ftype;
+#endif
+ int wchar_type_size;
+ tree temp;
+ tree array_domain_type;
+
+ /* Have to make these distinct before we try using them. */
+ lang_name_cplusplus = get_identifier ("C++");
+ lang_name_c = get_identifier ("C");
+
+ if (flag_ansi || pedantic)
+ strict_prototypes_lang_c = strict_prototypes_lang_cplusplus;
+
+ /* Initially, C. */
+ current_lang_name = lang_name_c;
+
+ current_function_decl = NULL_TREE;
+ named_labels = NULL_TREE;
+ named_label_uses = NULL_TREE;
+ current_binding_level = NULL_BINDING_LEVEL;
+ free_binding_level = NULL_BINDING_LEVEL;
+
+ /* Because most segmentation signals can be traced back into user
+ code, catch them and at least give the user a chance of working
+ around compiler bugs. */
+ signal (SIGSEGV, signal_catch);
+
+ /* We will also catch aborts in the back-end through signal_catch and
+ give the user a chance to see where the error might be, and to defeat
+ aborts in the back-end when there have been errors previously in their
+ code. */
+#ifdef SIGIOT
+ signal (SIGIOT, signal_catch);
+#endif
+#ifdef SIGILL
+ signal (SIGILL, signal_catch);
+#endif
+#ifdef SIGABRT
+ signal (SIGABRT, signal_catch);
+#endif
+#ifdef SIGBUS
+ signal (SIGBUS, signal_catch);
+#endif
+
+ gcc_obstack_init (&decl_obstack);
+ if (flag_dossier)
+ {
+ builtin_type_tdescs_max = 100;
+ builtin_type_tdescs_arr = (tree *)xmalloc (100 * sizeof (tree));
+ }
+
+ /* Must lay these out before anything else gets laid out. */
+ error_mark_node = make_node (ERROR_MARK);
+ TREE_PERMANENT (error_mark_node) = 1;
+ TREE_TYPE (error_mark_node) = error_mark_node;
+ error_mark_list = build_tree_list (error_mark_node, error_mark_node);
+ TREE_TYPE (error_mark_list) = error_mark_node;
+
+ /* Make the binding_level structure for global names. */
+ pushlevel (0);
+ global_binding_level = current_binding_level;
+
+ this_identifier = get_identifier (THIS_NAME);
+ in_charge_identifier = get_identifier (IN_CHARGE_NAME);
+ pfn_identifier = get_identifier (VTABLE_PFN_NAME);
+ index_identifier = get_identifier (VTABLE_INDEX_NAME);
+ delta_identifier = get_identifier (VTABLE_DELTA_NAME);
+ delta2_identifier = get_identifier (VTABLE_DELTA2_NAME);
+ pfn_or_delta2_identifier = get_identifier ("__pfn_or_delta2");
+
+ /* Define `int' and `char' first so that dbx will output them first. */
+
+ integer_type_node = make_signed_type (INT_TYPE_SIZE);
+ record_builtin_type (RID_INT, NULL_PTR, integer_type_node);
+
+ /* Define `char', which is like either `signed char' or `unsigned char'
+ but not the same as either. */
+
+ char_type_node =
+ (flag_signed_char
+ ? make_signed_type (CHAR_TYPE_SIZE)
+ : make_unsigned_type (CHAR_TYPE_SIZE));
+ record_builtin_type (RID_CHAR, "char", char_type_node);
+
+ long_integer_type_node = make_signed_type (LONG_TYPE_SIZE);
+ record_builtin_type (RID_LONG, "long int", long_integer_type_node);
+
+ unsigned_type_node = make_unsigned_type (INT_TYPE_SIZE);
+ record_builtin_type (RID_UNSIGNED, "unsigned int", unsigned_type_node);
+
+ long_unsigned_type_node = make_unsigned_type (LONG_TYPE_SIZE);
+ record_builtin_type (RID_MAX, "long unsigned int", long_unsigned_type_node);
+ record_builtin_type (RID_MAX, "unsigned long", long_unsigned_type_node);
+
+ long_long_integer_type_node = make_signed_type (LONG_LONG_TYPE_SIZE);
+ record_builtin_type (RID_MAX, "long long int", long_long_integer_type_node);
+
+ long_long_unsigned_type_node = make_unsigned_type (LONG_LONG_TYPE_SIZE);
+ record_builtin_type (RID_MAX, "long long unsigned int",
+ long_long_unsigned_type_node);
+ record_builtin_type (RID_MAX, "long long unsigned",
+ long_long_unsigned_type_node);
+
+ /* `unsigned long' is the standard type for sizeof.
+ Traditionally, use a signed type.
+ Note that stddef.h uses `unsigned long',
+ and this must agree, even of long and int are the same size. */
+ sizetype
+ = TREE_TYPE (IDENTIFIER_GLOBAL_VALUE (get_identifier (SIZE_TYPE)));
+ if (flag_traditional && TREE_UNSIGNED (sizetype))
+ sizetype = signed_type (sizetype);
+
+ ptrdiff_type_node
+ = TREE_TYPE (IDENTIFIER_GLOBAL_VALUE (get_identifier (PTRDIFF_TYPE)));
+
+ TREE_TYPE (TYPE_SIZE (integer_type_node)) = sizetype;
+ TREE_TYPE (TYPE_SIZE (char_type_node)) = sizetype;
+ TREE_TYPE (TYPE_SIZE (unsigned_type_node)) = sizetype;
+ TREE_TYPE (TYPE_SIZE (long_unsigned_type_node)) = sizetype;
+ TREE_TYPE (TYPE_SIZE (long_integer_type_node)) = sizetype;
+ TREE_TYPE (TYPE_SIZE (long_long_integer_type_node)) = sizetype;
+ TREE_TYPE (TYPE_SIZE (long_long_unsigned_type_node)) = sizetype;
+
+ short_integer_type_node = make_signed_type (SHORT_TYPE_SIZE);
+ record_builtin_type (RID_SHORT, "short int", short_integer_type_node);
+ short_unsigned_type_node = make_unsigned_type (SHORT_TYPE_SIZE);
+ record_builtin_type (RID_MAX, "short unsigned int", short_unsigned_type_node);
+ record_builtin_type (RID_MAX, "unsigned short", short_unsigned_type_node);
+
+ /* Define both `signed char' and `unsigned char'. */
+ signed_char_type_node = make_signed_type (CHAR_TYPE_SIZE);
+ record_builtin_type (RID_MAX, "signed char", signed_char_type_node);
+ unsigned_char_type_node = make_unsigned_type (CHAR_TYPE_SIZE);
+ record_builtin_type (RID_MAX, "unsigned char", unsigned_char_type_node);
+
+ /* These are types that type_for_size and type_for_mode use. */
+ intQI_type_node = make_signed_type (GET_MODE_BITSIZE (QImode));
+ pushdecl (build_decl (TYPE_DECL, NULL_TREE, intQI_type_node));
+ intHI_type_node = make_signed_type (GET_MODE_BITSIZE (HImode));
+ pushdecl (build_decl (TYPE_DECL, NULL_TREE, intHI_type_node));
+ intSI_type_node = make_signed_type (GET_MODE_BITSIZE (SImode));
+ pushdecl (build_decl (TYPE_DECL, NULL_TREE, intSI_type_node));
+ intDI_type_node = make_signed_type (GET_MODE_BITSIZE (DImode));
+ pushdecl (build_decl (TYPE_DECL, NULL_TREE, intDI_type_node));
+ unsigned_intQI_type_node = make_unsigned_type (GET_MODE_BITSIZE (QImode));
+ pushdecl (build_decl (TYPE_DECL, NULL_TREE, unsigned_intQI_type_node));
+ unsigned_intHI_type_node = make_unsigned_type (GET_MODE_BITSIZE (HImode));
+ pushdecl (build_decl (TYPE_DECL, NULL_TREE, unsigned_intHI_type_node));
+ unsigned_intSI_type_node = make_unsigned_type (GET_MODE_BITSIZE (SImode));
+ pushdecl (build_decl (TYPE_DECL, NULL_TREE, unsigned_intSI_type_node));
+ unsigned_intDI_type_node = make_unsigned_type (GET_MODE_BITSIZE (DImode));
+ pushdecl (build_decl (TYPE_DECL, NULL_TREE, unsigned_intDI_type_node));
+
+ float_type_node = make_node (REAL_TYPE);
+ TYPE_PRECISION (float_type_node) = FLOAT_TYPE_SIZE;
+ record_builtin_type (RID_FLOAT, NULL_PTR, float_type_node);
+ layout_type (float_type_node);
+
+ double_type_node = make_node (REAL_TYPE);
+ if (flag_short_double)
+ TYPE_PRECISION (double_type_node) = FLOAT_TYPE_SIZE;
+ else
+ TYPE_PRECISION (double_type_node) = DOUBLE_TYPE_SIZE;
+ record_builtin_type (RID_DOUBLE, NULL_PTR, double_type_node);
+ layout_type (double_type_node);
+
+ long_double_type_node = make_node (REAL_TYPE);
+ TYPE_PRECISION (long_double_type_node) = LONG_DOUBLE_TYPE_SIZE;
+ record_builtin_type (RID_MAX, "long double", long_double_type_node);
+ layout_type (long_double_type_node);
+
+ integer_zero_node = build_int_2 (0, 0);
+ TREE_TYPE (integer_zero_node) = integer_type_node;
+ integer_one_node = build_int_2 (1, 0);
+ TREE_TYPE (integer_one_node) = integer_type_node;
+ integer_two_node = build_int_2 (2, 0);
+ TREE_TYPE (integer_two_node) = integer_type_node;
+ integer_three_node = build_int_2 (3, 0);
+ TREE_TYPE (integer_three_node) = integer_type_node;
+
+ bool_type_node = make_unsigned_type (CHAR_TYPE_SIZE);
+ TREE_SET_CODE (bool_type_node, BOOLEAN_TYPE);
+ record_builtin_type (RID_BOOL, "bool", bool_type_node);
+ false_node = build_int_2 (0, 0);
+ TREE_TYPE (false_node) = bool_type_node;
+ true_node = build_int_2 (1, 0);
+ TREE_TYPE (true_node) = bool_type_node;
+
+ /* These are needed by stor-layout.c. */
+ size_zero_node = size_int (0);
+ size_one_node = size_int (1);
+
+ void_type_node = make_node (VOID_TYPE);
+ record_builtin_type (RID_VOID, NULL_PTR, void_type_node);
+ layout_type (void_type_node); /* Uses integer_zero_node. */
+ void_list_node = build_tree_list (NULL_TREE, void_type_node);
+ TREE_PARMLIST (void_list_node) = 1;
+
+ null_pointer_node = build_int_2 (0, 0);
+ TREE_TYPE (null_pointer_node) = build_pointer_type (void_type_node);
+ layout_type (TREE_TYPE (null_pointer_node));
+
+ /* Used for expressions that do nothing, but are not errors. */
+ void_zero_node = build_int_2 (0, 0);
+ TREE_TYPE (void_zero_node) = void_type_node;
+
+ string_type_node = build_pointer_type (char_type_node);
+ const_string_type_node =
+ build_pointer_type (build_type_variant (char_type_node, 1, 0));
+ record_builtin_type (RID_MAX, NULL_PTR, string_type_node);
+
+ /* Make a type to be the domain of a few array types
+ whose domains don't really matter.
+ 200 is small enough that it always fits in size_t
+ and large enough that it can hold most function names for the
+ initializations of __FUNCTION__ and __PRETTY_FUNCTION__. */
+ array_domain_type = build_index_type (build_int_2 (200, 0));
+
+ /* make a type for arrays of characters.
+ With luck nothing will ever really depend on the length of this
+ array type. */
+ char_array_type_node
+ = build_array_type (char_type_node, array_domain_type);
+ /* Likewise for arrays of ints. */
+ int_array_type_node
+ = build_array_type (integer_type_node, array_domain_type);
+
+ /* This is just some anonymous class type. Nobody should ever
+ need to look inside this envelope. */
+ class_star_type_node = build_pointer_type (make_lang_type (RECORD_TYPE));
+
+ default_function_type
+ = build_function_type (integer_type_node, NULL_TREE);
+ build_pointer_type (default_function_type);
+
+ ptr_type_node = build_pointer_type (void_type_node);
+ const_ptr_type_node =
+ build_pointer_type (build_type_variant (void_type_node, 1, 0));
+ record_builtin_type (RID_MAX, NULL_PTR, ptr_type_node);
+ endlink = void_list_node;
+ int_endlink = tree_cons (NULL_TREE, integer_type_node, endlink);
+ double_endlink = tree_cons (NULL_TREE, double_type_node, endlink);
+ ptr_endlink = tree_cons (NULL_TREE, ptr_type_node, endlink);
+
+ double_ftype_double
+ = build_function_type (double_type_node, double_endlink);
+
+ double_ftype_double_double
+ = build_function_type (double_type_node,
+ tree_cons (NULL_TREE, double_type_node,
+ double_endlink));
+
+ int_ftype_int
+ = build_function_type (integer_type_node, int_endlink);
+
+ long_ftype_long
+ = build_function_type (long_integer_type_node,
+ tree_cons (NULL_TREE, long_integer_type_node,
+ endlink));
+
+ void_ftype_ptr_ptr_int
+ = build_function_type (void_type_node,
+ tree_cons (NULL_TREE, ptr_type_node,
+ tree_cons (NULL_TREE, ptr_type_node,
+ int_endlink)));
+
+ int_ftype_cptr_cptr_sizet
+ = build_function_type (integer_type_node,
+ tree_cons (NULL_TREE, const_ptr_type_node,
+ tree_cons (NULL_TREE, const_ptr_type_node,
+ tree_cons (NULL_TREE,
+ sizetype,
+ endlink))));
+
+ void_ftype_ptr_int_int
+ = build_function_type (void_type_node,
+ tree_cons (NULL_TREE, ptr_type_node,
+ tree_cons (NULL_TREE, integer_type_node,
+ int_endlink)));
+
+ string_ftype_ptr_ptr /* strcpy prototype */
+ = build_function_type (string_type_node,
+ tree_cons (NULL_TREE, string_type_node,
+ tree_cons (NULL_TREE,
+ const_string_type_node,
+ endlink)));
+
+#if 0
+ /* Not yet. */
+ strncpy_ftype /* strncpy prototype */
+ = build_function_type (string_type_node,
+ tree_cons (NULL_TREE, string_type_node,
+ tree_cons (NULL_TREE, const_string_type_node,
+ tree_cons (NULL_TREE,
+ sizetype,
+ endlink))));
+#endif
+
+ int_ftype_string_string /* strcmp prototype */
+ = build_function_type (integer_type_node,
+ tree_cons (NULL_TREE, const_string_type_node,
+ tree_cons (NULL_TREE,
+ const_string_type_node,
+ endlink)));
+
+ sizet_ftype_string /* strlen prototype */
+ = build_function_type (sizetype,
+ tree_cons (NULL_TREE, const_string_type_node,
+ endlink));
+
+ traditional_ptr_type_node
+ = (flag_traditional ? string_type_node : ptr_type_node);
+
+ memcpy_ftype /* memcpy prototype */
+ = build_function_type (traditional_ptr_type_node,
+ tree_cons (NULL_TREE, ptr_type_node,
+ tree_cons (NULL_TREE, const_ptr_type_node,
+ tree_cons (NULL_TREE,
+ sizetype,
+ endlink))));
+
+ if (flag_huge_objects)
+ delta_type_node = long_integer_type_node;
+ else
+ delta_type_node = short_integer_type_node;
+
+ builtin_function ("__builtin_constant_p", int_ftype_int,
+ BUILT_IN_CONSTANT_P, NULL_PTR);
+
+ builtin_return_address_fndecl =
+ builtin_function ("__builtin_return_address",
+ build_function_type (ptr_type_node,
+ tree_cons (NULL_TREE,
+ unsigned_type_node,
+ endlink)),
+ BUILT_IN_RETURN_ADDRESS, NULL_PTR);
+
+ builtin_function ("__builtin_frame_address",
+ build_function_type (ptr_type_node,
+ tree_cons (NULL_TREE,
+ unsigned_type_node,
+ endlink)),
+ BUILT_IN_FRAME_ADDRESS, NULL_PTR);
+
+
+ builtin_function ("__builtin_alloca",
+ build_function_type (ptr_type_node,
+ tree_cons (NULL_TREE,
+ sizetype,
+ endlink)),
+ BUILT_IN_ALLOCA, "alloca");
+#if 0
+ builtin_function ("alloca",
+ build_function_type (ptr_type_node,
+ tree_cons (NULL_TREE,
+ sizetype,
+ endlink)),
+ BUILT_IN_ALLOCA, NULL_PTR);
+#endif
+
+ builtin_function ("__builtin_abs", int_ftype_int,
+ BUILT_IN_ABS, NULL_PTR);
+ builtin_function ("__builtin_fabs", double_ftype_double,
+ BUILT_IN_FABS, NULL_PTR);
+ builtin_function ("__builtin_labs", long_ftype_long,
+ BUILT_IN_LABS, NULL_PTR);
+ builtin_function ("__builtin_ffs", int_ftype_int,
+ BUILT_IN_FFS, NULL_PTR);
+ builtin_function ("__builtin_fsqrt", double_ftype_double,
+ BUILT_IN_FSQRT, NULL_PTR);
+ builtin_function ("__builtin_sin", double_ftype_double,
+ BUILT_IN_SIN, "sin");
+ builtin_function ("__builtin_cos", double_ftype_double,
+ BUILT_IN_COS, "cos");
+ builtin_function ("__builtin_saveregs",
+ build_function_type (ptr_type_node, NULL_TREE),
+ BUILT_IN_SAVEREGS, NULL_PTR);
+/* EXPAND_BUILTIN_VARARGS is obsolete. */
+#if 0
+ builtin_function ("__builtin_varargs",
+ build_function_type (ptr_type_node,
+ tree_cons (NULL_TREE,
+ integer_type_node,
+ endlink)),
+ BUILT_IN_VARARGS, NULL_PTR);
+#endif
+ builtin_function ("__builtin_classify_type", default_function_type,
+ BUILT_IN_CLASSIFY_TYPE, NULL_PTR);
+ builtin_function ("__builtin_next_arg",
+ build_function_type (ptr_type_node, NULL_TREE),
+ BUILT_IN_NEXT_ARG, NULL_PTR);
+ builtin_function ("__builtin_args_info",
+ build_function_type (integer_type_node,
+ tree_cons (NULL_TREE,
+ integer_type_node,
+ endlink)),
+ BUILT_IN_ARGS_INFO, NULL_PTR);
+
+ /* Untyped call and return. */
+ builtin_function ("__builtin_apply_args",
+ build_function_type (ptr_type_node, NULL_TREE),
+ BUILT_IN_APPLY_ARGS, NULL_PTR);
+
+ temp = tree_cons (NULL_TREE,
+ build_pointer_type (build_function_type (void_type_node,
+ NULL_TREE)),
+ tree_cons (NULL_TREE,
+ ptr_type_node,
+ tree_cons (NULL_TREE,
+ sizetype,
+ endlink)));
+ builtin_function ("__builtin_apply",
+ build_function_type (ptr_type_node, temp),
+ BUILT_IN_APPLY, NULL_PTR);
+ builtin_function ("__builtin_return",
+ build_function_type (void_type_node,
+ tree_cons (NULL_TREE,
+ ptr_type_node,
+ endlink)),
+ BUILT_IN_RETURN, NULL_PTR);
+
+ /* Currently under experimentation. */
+ builtin_function ("__builtin_memcpy", memcpy_ftype,
+ BUILT_IN_MEMCPY, "memcpy");
+ builtin_function ("__builtin_memcmp", int_ftype_cptr_cptr_sizet,
+ BUILT_IN_MEMCMP, "memcmp");
+ builtin_function ("__builtin_strcmp", int_ftype_string_string,
+ BUILT_IN_STRCMP, "strcmp");
+ builtin_function ("__builtin_strcpy", string_ftype_ptr_ptr,
+ BUILT_IN_STRCPY, "strcpy");
+#if 0
+ /* Not yet. */
+ builtin_function ("__builtin_strncpy", strncpy_ftype,
+ BUILT_IN_STRNCPY, "strncpy");
+#endif
+ builtin_function ("__builtin_strlen", sizet_ftype_string,
+ BUILT_IN_STRLEN, "strlen");
+
+ if (!flag_no_builtin)
+ {
+#if 0 /* These do not work well with libg++. */
+ builtin_function ("abs", int_ftype_int, BUILT_IN_ABS, NULL_PTR);
+ builtin_function ("fabs", double_ftype_double, BUILT_IN_FABS, NULL_PTR);
+ builtin_function ("labs", long_ftype_long, BUILT_IN_LABS, NULL_PTR);
+#endif
+ builtin_function ("memcpy", memcpy_ftype, BUILT_IN_MEMCPY, NULL_PTR);
+ builtin_function ("memcmp", int_ftype_cptr_cptr_sizet, BUILT_IN_MEMCMP,
+ NULL_PTR);
+ builtin_function ("strcmp", int_ftype_string_string, BUILT_IN_STRCMP, NULL_PTR);
+ builtin_function ("strcpy", string_ftype_ptr_ptr, BUILT_IN_STRCPY,
+ NULL_PTR);
+#if 0
+ /* Not yet. */
+ builtin_function ("strncpy", strncpy_ftype, BUILT_IN_STRNCPY, NULL_PTR);
+#endif
+ builtin_function ("strlen", sizet_ftype_string, BUILT_IN_STRLEN, NULL_PTR);
+ builtin_function ("sin", double_ftype_double, BUILT_IN_SIN, NULL_PTR);
+ builtin_function ("cos", double_ftype_double, BUILT_IN_COS, NULL_PTR);
+ }
+
+#if 0
+ /* Support for these has not been written in either expand_builtin
+ or build_function_call. */
+ builtin_function ("__builtin_div", default_ftype, BUILT_IN_DIV, 0);
+ builtin_function ("__builtin_ldiv", default_ftype, BUILT_IN_LDIV, 0);
+ builtin_function ("__builtin_ffloor", double_ftype_double, BUILT_IN_FFLOOR,
+ 0);
+ builtin_function ("__builtin_fceil", double_ftype_double, BUILT_IN_FCEIL, 0);
+ builtin_function ("__builtin_fmod", double_ftype_double_double,
+ BUILT_IN_FMOD, 0);
+ builtin_function ("__builtin_frem", double_ftype_double_double,
+ BUILT_IN_FREM, 0);
+ builtin_function ("__builtin_memset", ptr_ftype_ptr_int_int, BUILT_IN_MEMSET,
+ 0);
+ builtin_function ("__builtin_getexp", double_ftype_double, BUILT_IN_GETEXP,
+ 0);
+ builtin_function ("__builtin_getman", double_ftype_double, BUILT_IN_GETMAN,
+ 0);
+#endif
+
+ /* C++ extensions */
+
+ unknown_type_node = make_node (UNKNOWN_TYPE);
+#if 0 /* not yet, should get fixed properly later */
+ pushdecl (make_type_decl (get_identifier ("unknown type"),
+ unknown_type_node));
+#else
+ decl = pushdecl (build_decl (TYPE_DECL, get_identifier ("unknown type"),
+ unknown_type_node));
+ /* Make sure the "unknown type" typedecl gets ignored for debug info. */
+ DECL_IGNORED_P (decl) = 1;
+ TYPE_DECL_SUPPRESS_DEBUG (decl) = 1;
+#endif
+ TYPE_SIZE (unknown_type_node) = TYPE_SIZE (void_type_node);
+ TYPE_ALIGN (unknown_type_node) = 1;
+ TYPE_MODE (unknown_type_node) = TYPE_MODE (void_type_node);
+ /* Indirecting an UNKNOWN_TYPE node yields an UNKNOWN_TYPE node. */
+ TREE_TYPE (unknown_type_node) = unknown_type_node;
+ /* Looking up TYPE_POINTER_TO and TYPE_REFERENCE_TO yield the same result. */
+ TYPE_POINTER_TO (unknown_type_node) = unknown_type_node;
+ TYPE_REFERENCE_TO (unknown_type_node) = unknown_type_node;
+
+ /* This is for handling opaque types in signatures. */
+ opaque_type_node = copy_node (ptr_type_node);
+ TYPE_MAIN_VARIANT (opaque_type_node) = opaque_type_node;
+ record_builtin_type (RID_MAX, 0, opaque_type_node);
+
+ /* This is special for C++ so functions can be overloaded. */
+ wchar_type_node
+ = TREE_TYPE (IDENTIFIER_GLOBAL_VALUE (get_identifier (WCHAR_TYPE)));
+ wchar_type_size = TYPE_PRECISION (wchar_type_node);
+ signed_wchar_type_node = make_signed_type (wchar_type_size);
+ unsigned_wchar_type_node = make_unsigned_type (wchar_type_size);
+ wchar_type_node
+ = TREE_UNSIGNED (wchar_type_node)
+ ? unsigned_wchar_type_node
+ : signed_wchar_type_node;
+ record_builtin_type (RID_WCHAR, "__wchar_t", wchar_type_node);
+
+ /* This is for wide string constants. */
+ wchar_array_type_node
+ = build_array_type (wchar_type_node, array_domain_type);
+
+ /* This is a hack that should go away when we deliver the
+ real gc code. */
+ if (flag_gc)
+ {
+ builtin_function ("__gc_main", default_function_type, NOT_BUILT_IN, 0);
+ pushdecl (lookup_name (get_identifier ("__gc_main"), 0));
+ }
+
+ if (flag_vtable_thunks)
+ {
+ /* Make sure we get a unique function type, so we can give
+ its pointer type a name. (This wins for gdb.) */
+ tree vfunc_type = make_node (FUNCTION_TYPE);
+ TREE_TYPE (vfunc_type) = integer_type_node;
+ TYPE_ARG_TYPES (vfunc_type) = NULL_TREE;
+ layout_type (vfunc_type);
+
+ vtable_entry_type = build_pointer_type (vfunc_type);
+ }
+ else
+ {
+ vtable_entry_type = make_lang_type (RECORD_TYPE);
+ fields[0] = build_lang_field_decl (FIELD_DECL, delta_identifier,
+ delta_type_node);
+ fields[1] = build_lang_field_decl (FIELD_DECL, index_identifier,
+ delta_type_node);
+ fields[2] = build_lang_field_decl (FIELD_DECL, pfn_identifier,
+ ptr_type_node);
+ finish_builtin_type (vtable_entry_type, VTBL_PTR_TYPE, fields, 2,
+ double_type_node);
+
+ /* Make this part of an invisible union. */
+ fields[3] = copy_node (fields[2]);
+ TREE_TYPE (fields[3]) = delta_type_node;
+ DECL_NAME (fields[3]) = delta2_identifier;
+ DECL_MODE (fields[3]) = TYPE_MODE (delta_type_node);
+ DECL_SIZE (fields[3]) = TYPE_SIZE (delta_type_node);
+ TREE_UNSIGNED (fields[3]) = 0;
+ TREE_CHAIN (fields[2]) = fields[3];
+ vtable_entry_type = build_type_variant (vtable_entry_type, 1, 0);
+ }
+ record_builtin_type (RID_MAX, VTBL_PTR_TYPE, vtable_entry_type);
+
+ vtbl_type_node
+ = build_array_type (vtable_entry_type, NULL_TREE);
+ layout_type (vtbl_type_node);
+ vtbl_type_node = c_build_type_variant (vtbl_type_node, 1, 0);
+ record_builtin_type (RID_MAX, NULL_PTR, vtbl_type_node);
+
+ /* Simplify life by making a "sigtable_entry_type". Give its
+ fields names so that the debugger can use them. */
+
+ if (flag_handle_signatures)
+ {
+ sigtable_entry_type = make_lang_type (RECORD_TYPE);
+ fields[0] = build_lang_field_decl (FIELD_DECL,
+ get_identifier (SIGTABLE_CODE_NAME),
+ short_integer_type_node);
+ fields[1] = build_lang_field_decl (FIELD_DECL,
+ get_identifier (SIGTABLE_OFFSET_NAME),
+ short_integer_type_node);
+ fields[2] = build_lang_field_decl (FIELD_DECL,
+ get_identifier (SIGTABLE_PFN_NAME),
+ ptr_type_node);
+ finish_builtin_type (sigtable_entry_type, SIGTABLE_PTR_TYPE, fields, 2,
+ double_type_node);
+ sigtable_entry_type = build_type_variant (sigtable_entry_type, 1, 0);
+ record_builtin_type (RID_MAX, SIGTABLE_PTR_TYPE, sigtable_entry_type);
+ }
+
+ if (flag_dossier)
+ {
+ /* Must build __t_desc type. Currently, type descriptors look like this:
+
+ struct __t_desc
+ {
+ const char *name;
+ int size;
+ int bits;
+ struct __t_desc *points_to;
+ int ivars_count, meths_count;
+ struct __i_desc *ivars[];
+ struct __m_desc *meths[];
+ struct __t_desc *parents[];
+ struct __t_desc *vbases[];
+ int offsets[];
+ };
+
+ ...as per Linton's paper. */
+
+ __t_desc_type_node = make_lang_type (RECORD_TYPE);
+ __i_desc_type_node = make_lang_type (RECORD_TYPE);
+ __m_desc_type_node = make_lang_type (RECORD_TYPE);
+ __t_desc_array_type =
+ build_array_type (TYPE_POINTER_TO (__t_desc_type_node), NULL_TREE);
+ __i_desc_array_type =
+ build_array_type (TYPE_POINTER_TO (__i_desc_type_node), NULL_TREE);
+ __m_desc_array_type =
+ build_array_type (TYPE_POINTER_TO (__m_desc_type_node), NULL_TREE);
+
+ fields[0] = build_lang_field_decl (FIELD_DECL, get_identifier ("name"),
+ string_type_node);
+ fields[1] = build_lang_field_decl (FIELD_DECL, get_identifier ("size"),
+ unsigned_type_node);
+ fields[2] = build_lang_field_decl (FIELD_DECL, get_identifier ("bits"),
+ unsigned_type_node);
+ fields[3] = build_lang_field_decl (FIELD_DECL,
+ get_identifier ("points_to"),
+ TYPE_POINTER_TO (__t_desc_type_node));
+ fields[4] = build_lang_field_decl (FIELD_DECL,
+ get_identifier ("ivars_count"),
+ integer_type_node);
+ fields[5] = build_lang_field_decl (FIELD_DECL,
+ get_identifier ("meths_count"),
+ integer_type_node);
+ fields[6] = build_lang_field_decl (FIELD_DECL, get_identifier ("ivars"),
+ build_pointer_type (__i_desc_array_type));
+ fields[7] = build_lang_field_decl (FIELD_DECL, get_identifier ("meths"),
+ build_pointer_type (__m_desc_array_type));
+ fields[8] = build_lang_field_decl (FIELD_DECL, get_identifier ("parents"),
+ build_pointer_type (__t_desc_array_type));
+ fields[9] = build_lang_field_decl (FIELD_DECL, get_identifier ("vbases"),
+ build_pointer_type (__t_desc_array_type));
+ fields[10] = build_lang_field_decl (FIELD_DECL, get_identifier ("offsets"),
+ build_pointer_type (integer_type_node));
+ finish_builtin_type (__t_desc_type_node, "__t_desc", fields, 10, integer_type_node);
+
+ /* ivar descriptors look like this:
+
+ struct __i_desc
+ {
+ const char *name;
+ int offset;
+ struct __t_desc *type;
+ };
+ */
+
+ fields[0] = build_lang_field_decl (FIELD_DECL, get_identifier ("name"),
+ string_type_node);
+ fields[1] = build_lang_field_decl (FIELD_DECL, get_identifier ("offset"),
+ integer_type_node);
+ fields[2] = build_lang_field_decl (FIELD_DECL, get_identifier ("type"),
+ TYPE_POINTER_TO (__t_desc_type_node));
+ finish_builtin_type (__i_desc_type_node, "__i_desc", fields, 2,
+ integer_type_node);
+
+ /* method descriptors look like this:
+
+ struct __m_desc
+ {
+ const char *name;
+ int vindex;
+ struct __t_desc *vcontext;
+ struct __t_desc *return_type;
+ void (*address)();
+ short parm_count;
+ short required_parms;
+ struct __t_desc *parm_types[];
+ };
+ */
+
+ fields[0] = build_lang_field_decl (FIELD_DECL, get_identifier ("name"),
+ string_type_node);
+ fields[1] = build_lang_field_decl (FIELD_DECL, get_identifier ("vindex"),
+ integer_type_node);
+ fields[2] = build_lang_field_decl (FIELD_DECL, get_identifier ("vcontext"),
+ TYPE_POINTER_TO (__t_desc_type_node));
+ fields[3] = build_lang_field_decl (FIELD_DECL, get_identifier ("return_type"),
+ TYPE_POINTER_TO (__t_desc_type_node));
+ fields[4] = build_lang_field_decl (FIELD_DECL, get_identifier ("address"),
+ build_pointer_type (default_function_type));
+ fields[5] = build_lang_field_decl (FIELD_DECL, get_identifier ("parm_count"),
+ short_integer_type_node);
+ fields[6] = build_lang_field_decl (FIELD_DECL, get_identifier ("required_parms"),
+ short_integer_type_node);
+ fields[7] = build_lang_field_decl (FIELD_DECL, get_identifier ("parm_types"),
+ build_pointer_type (build_array_type (TYPE_POINTER_TO (__t_desc_type_node), NULL_TREE)));
+ finish_builtin_type (__m_desc_type_node, "__m_desc", fields, 7,
+ integer_type_node);
+ }
+
+ /* Now, C++. */
+ current_lang_name = lang_name_cplusplus;
+ if (flag_dossier)
+ {
+ int i = builtin_type_tdescs_len;
+ while (i > 0)
+ {
+ tree tdesc = build_t_desc (builtin_type_tdescs_arr[--i], 0);
+ TREE_ASM_WRITTEN (tdesc) = 1;
+ TREE_PUBLIC (TREE_OPERAND (tdesc, 0)) = 1;
+ }
+ }
+
+ auto_function (ansi_opname[(int) NEW_EXPR],
+ build_function_type (ptr_type_node,
+ tree_cons (NULL_TREE, sizetype,
+ void_list_node)),
+ NOT_BUILT_IN);
+ auto_function (ansi_opname[(int) VEC_NEW_EXPR],
+ build_function_type (ptr_type_node,
+ tree_cons (NULL_TREE, sizetype,
+ void_list_node)),
+ NOT_BUILT_IN);
+ auto_function (ansi_opname[(int) DELETE_EXPR],
+ build_function_type (void_type_node,
+ tree_cons (NULL_TREE, ptr_type_node,
+ void_list_node)),
+ NOT_BUILT_IN);
+ auto_function (ansi_opname[(int) VEC_DELETE_EXPR],
+ build_function_type (void_type_node,
+ tree_cons (NULL_TREE, ptr_type_node,
+ void_list_node)),
+ NOT_BUILT_IN);
+
+ abort_fndecl
+ = define_function ("abort",
+ build_function_type (void_type_node, void_list_node),
+ NOT_BUILT_IN, 0, 0);
+
+ /* Perform other language dependent initializations. */
+ init_class_processing ();
+ init_init_processing ();
+ init_search_processing ();
+
+ if (flag_handle_exceptions)
+ init_exception_processing ();
+ if (flag_gc)
+ init_gc_processing ();
+ if (flag_no_inline)
+ {
+ flag_inline_functions = 0;
+#if 0
+ /* This causes uneccessary emission of inline functions. */
+ flag_default_inline = 0;
+#endif
+ }
+ if (flag_cadillac)
+ init_cadillac ();
+
+ /* Create the global bindings for __FUNCTION__ and __PRETTY_FUNCTION__. */
+ declare_function_name ();
+
+ /* Prepare to check format strings against argument lists. */
+ init_function_format_info ();
+}
+
+/* Make a definition for a builtin function named NAME and whose data type
+ is TYPE. TYPE should be a function type with argument types.
+ FUNCTION_CODE tells later passes how to compile calls to this function.
+ See tree.h for its possible values.
+
+ If LIBRARY_NAME is nonzero, use that for DECL_ASSEMBLER_NAME,
+ the name to be called if we can't opencode the function. */
+
+tree
+define_function (name, type, function_code, pfn, library_name)
+ char *name;
+ tree type;
+ enum built_in_function function_code;
+ void (*pfn)();
+ char *library_name;
+{
+ tree decl = build_lang_decl (FUNCTION_DECL, get_identifier (name), type);
+ DECL_EXTERNAL (decl) = 1;
+ TREE_PUBLIC (decl) = 1;
+
+ /* Since `pushdecl' relies on DECL_ASSEMBLER_NAME instead of DECL_NAME,
+ we cannot change DECL_ASSEMBLER_NAME until we have installed this
+ function in the namespace. */
+ if (pfn) (*pfn) (decl);
+ if (library_name)
+ DECL_ASSEMBLER_NAME (decl) = get_identifier (library_name);
+ make_function_rtl (decl);
+ if (function_code != NOT_BUILT_IN)
+ {
+ DECL_BUILT_IN (decl) = 1;
+ DECL_FUNCTION_CODE (decl) = function_code;
+ }
+ return decl;
+}
+
+/* Called when a declaration is seen that contains no names to declare.
+ If its type is a reference to a structure, union or enum inherited
+ from a containing scope, shadow that tag name for the current scope
+ with a forward reference.
+ If its type defines a new named structure or union
+ or defines an enum, it is valid but we need not do anything here.
+ Otherwise, it is an error.
+
+ C++: may have to grok the declspecs to learn about static,
+ complain for anonymous unions. */
+
+void
+shadow_tag (declspecs)
+ tree declspecs;
+{
+ int found_tag = 0;
+ int warned = 0;
+ tree ob_modifier = NULL_TREE;
+ register tree link;
+ register enum tree_code code, ok_code = ERROR_MARK;
+ register tree t = NULL_TREE;
+
+ for (link = declspecs; link; link = TREE_CHAIN (link))
+ {
+ register tree value = TREE_VALUE (link);
+
+ code = TREE_CODE (value);
+ if (IS_AGGR_TYPE_CODE (code) || code == ENUMERAL_TYPE)
+ {
+ register tree name = TYPE_NAME (value);
+
+ if (code == ENUMERAL_TYPE && TYPE_SIZE (value) == 0)
+ cp_error ("forward declaration of `%#T'", value);
+
+ if (name == NULL_TREE)
+ name = lookup_tag_reverse (value, NULL_TREE);
+
+ if (name && TREE_CODE (name) == TYPE_DECL)
+ name = DECL_NAME (name);
+
+ t = lookup_tag (code, name, inner_binding_level, 1);
+
+ if (t == NULL_TREE)
+ {
+ push_obstacks (&permanent_obstack, &permanent_obstack);
+ if (IS_AGGR_TYPE_CODE (code))
+ t = make_lang_type (code);
+ else
+ t = make_node (code);
+ pushtag (name, t, 0);
+ pop_obstacks ();
+ ok_code = code;
+ }
+ else if (name != NULL_TREE || code == ENUMERAL_TYPE)
+ ok_code = code;
+
+ if (ok_code != ERROR_MARK)
+ found_tag++;
+ else
+ {
+ if (!warned)
+ pedwarn ("useless keyword or type name in declaration");
+ warned = 1;
+ }
+ }
+ else if (value == ridpointers[(int) RID_STATIC]
+ || value == ridpointers[(int) RID_EXTERN]
+ || value == ridpointers[(int) RID_AUTO]
+ || value == ridpointers[(int) RID_REGISTER])
+ ob_modifier = value;
+ }
+
+ /* This is where the variables in an anonymous union are
+ declared. An anonymous union declaration looks like:
+ union { ... } ;
+ because there is no declarator after the union, the parser
+ sends that declaration here. */
+ if (ok_code == UNION_TYPE
+ && t != NULL_TREE
+ && ((TREE_CODE (TYPE_NAME (t)) == IDENTIFIER_NODE
+ && ANON_AGGRNAME_P (TYPE_NAME (t)))
+ || (TREE_CODE (TYPE_NAME (t)) == TYPE_DECL
+ && ANON_AGGRNAME_P (TYPE_IDENTIFIER (t)))))
+ {
+ /* ANSI C++ June 5 1992 WP 9.5.3. Anonymous unions may not have
+ function members. */
+ if (TYPE_FIELDS (t))
+ {
+ tree decl = grokdeclarator (NULL_TREE, declspecs, NORMAL, 0,
+ NULL_TREE);
+ finish_anon_union (decl);
+ }
+ else
+ error ("anonymous union cannot have a function member");
+ }
+ else
+ {
+ /* Anonymous unions are objects, that's why we only check for
+ inappropriate specifiers in this branch. */
+ if (ob_modifier)
+ cp_error ("`%D' can only be specified for objects and functions",
+ ob_modifier);
+
+ if (ok_code == RECORD_TYPE
+ && found_tag == 1
+ && TYPE_LANG_SPECIFIC (t)
+ && CLASSTYPE_DECLARED_EXCEPTION (t))
+ {
+ if (TYPE_SIZE (t))
+ cp_error ("redeclaration of exception `%T'", t);
+ else
+ {
+ tree ename, decl;
+
+ push_obstacks (&permanent_obstack, &permanent_obstack);
+
+ pushclass (t, 0);
+
+ ename = TYPE_NAME (t);
+ if (TREE_CODE (ename) == TYPE_DECL)
+ ename = DECL_NAME (ename);
+ decl = build_lang_field_decl (VAR_DECL, ename, t);
+
+ pop_obstacks ();
+ }
+ }
+ else if (found_tag == 0)
+ pedwarn ("abstract declarator used as declaration");
+ else if (!warned && found_tag > 1)
+ pedwarn ("multiple types in one declaration");
+ }
+}
+
+/* Decode a "typename", such as "int **", returning a ..._TYPE node. */
+
+tree
+groktypename (typename)
+ tree typename;
+{
+ if (TREE_CODE (typename) != TREE_LIST)
+ return typename;
+ return grokdeclarator (TREE_VALUE (typename),
+ TREE_PURPOSE (typename),
+ TYPENAME, 0, NULL_TREE);
+}
+
+/* Decode a declarator in an ordinary declaration or data definition.
+ This is called as soon as the type information and variable name
+ have been parsed, before parsing the initializer if any.
+ Here we create the ..._DECL node, fill in its type,
+ and put it on the list of decls for the current context.
+ The ..._DECL node is returned as the value.
+
+ Exception: for arrays where the length is not specified,
+ the type is left null, to be filled in by `finish_decl'.
+
+ Function definitions do not come here; they go to start_function
+ instead. However, external and forward declarations of functions
+ do go through here. Structure field declarations are done by
+ grokfield and not through here. */
+
+/* Set this to zero to debug not using the temporary obstack
+ to parse initializers. */
+int debug_temp_inits = 1;
+
+tree
+start_decl (declarator, declspecs, initialized, raises)
+ tree declarator, declspecs;
+ int initialized;
+ tree raises;
+{
+ register tree decl;
+ register tree type, tem;
+ tree context;
+ extern int have_extern_spec;
+ extern int used_extern_spec;
+
+ int init_written = initialized;
+
+ /* This should only be done once on the top most decl. */
+ if (have_extern_spec && !used_extern_spec)
+ {
+ declspecs = decl_tree_cons (NULL_TREE, get_identifier ("extern"),
+ declspecs);
+ used_extern_spec = 1;
+ }
+
+ decl = grokdeclarator (declarator, declspecs, NORMAL, initialized, raises);
+ if (decl == NULL_TREE || decl == void_type_node)
+ return NULL_TREE;
+
+ type = TREE_TYPE (decl);
+
+ /* Don't lose if destructors must be executed at file-level. */
+ if (TREE_STATIC (decl)
+ && TYPE_NEEDS_DESTRUCTOR (type)
+ && !TREE_PERMANENT (decl))
+ {
+ push_obstacks (&permanent_obstack, &permanent_obstack);
+ decl = copy_node (decl);
+ if (TREE_CODE (type) == ARRAY_TYPE)
+ {
+ tree itype = TYPE_DOMAIN (type);
+ if (itype && ! TREE_PERMANENT (itype))
+ {
+ itype = build_index_type (copy_to_permanent (TYPE_MAX_VALUE (itype)));
+ type = build_cplus_array_type (TREE_TYPE (type), itype);
+ TREE_TYPE (decl) = type;
+ }
+ }
+ pop_obstacks ();
+ }
+
+ /* Interesting work for this is done in `finish_exception_decl'. */
+ if (TREE_CODE (type) == RECORD_TYPE
+ && CLASSTYPE_DECLARED_EXCEPTION (type))
+ return decl;
+
+ /* Corresponding pop_obstacks is done in `finish_decl'. */
+ push_obstacks_nochange ();
+
+ context
+ = (TREE_CODE (decl) == FUNCTION_DECL && DECL_VIRTUAL_P (decl))
+ ? DECL_CLASS_CONTEXT (decl)
+ : DECL_CONTEXT (decl);
+
+ if (processing_template_decl)
+ {
+ tree d;
+ if (TREE_CODE (decl) == FUNCTION_DECL)
+ {
+ /* Declarator is a call_expr; extract arguments from it, since
+ grokdeclarator didn't do it. */
+ tree args;
+ args = copy_to_permanent (last_function_parms);
+ if (TREE_CODE (TREE_TYPE (decl)) == METHOD_TYPE)
+ {
+ tree t = TREE_TYPE (decl);
+
+ t = TYPE_METHOD_BASETYPE (t); /* type method belongs to */
+ if (TREE_CODE (t) != UNINSTANTIATED_P_TYPE)
+ {
+ t = build_pointer_type (t); /* base type of `this' */
+#if 1
+ /* I suspect this is wrong. */
+ t = build_type_variant (t, flag_this_is_variable <= 0,
+ 0); /* type of `this' */
+#else
+ t = build_type_variant (t, 0, 0); /* type of `this' */
+#endif
+ t = build (PARM_DECL, t, this_identifier);
+ TREE_CHAIN (t) = args;
+ args = t;
+ }
+ }
+ DECL_ARGUMENTS (decl) = args;
+ }
+ d = build_lang_decl (TEMPLATE_DECL, DECL_NAME (decl), TREE_TYPE (decl));
+ if (interface_unknown && flag_external_templates
+ && ! DECL_IN_SYSTEM_HEADER (decl))
+ warn_if_unknown_interface ();
+ TREE_PUBLIC (d) = TREE_PUBLIC (decl) =
+ flag_external_templates && !interface_unknown;
+ TREE_STATIC (d) = TREE_STATIC (decl);
+ DECL_EXTERNAL (d) = (DECL_EXTERNAL (decl)
+ && !(context && !DECL_THIS_EXTERN (decl)));
+ DECL_TEMPLATE_RESULT (d) = decl;
+ decl = d;
+ }
+
+ /* If this type of object needs a cleanup, and control may
+ jump past it, make a new binding level so that it is cleaned
+ up only when it is initialized first. */
+ if (TYPE_NEEDS_DESTRUCTOR (type)
+ && current_binding_level->more_cleanups_ok == 0)
+ pushlevel_temporary (1);
+
+ if (initialized)
+ /* Is it valid for this decl to have an initializer at all?
+ If not, set INITIALIZED to zero, which will indirectly
+ tell `finish_decl' to ignore the initializer once it is parsed. */
+ switch (TREE_CODE (decl))
+ {
+ case TYPE_DECL:
+ /* typedef foo = bar means give foo the same type as bar.
+ We haven't parsed bar yet, so `finish_decl' will fix that up.
+ Any other case of an initialization in a TYPE_DECL is an error. */
+ if (pedantic || list_length (declspecs) > 1)
+ {
+ cp_error ("typedef `%D' is initialized", decl);
+ initialized = 0;
+ }
+ break;
+
+ case FUNCTION_DECL:
+ cp_error ("function `%#D' is initialized like a variable", decl);
+ initialized = 0;
+ break;
+
+ default:
+ /* Don't allow initializations for incomplete types except for
+ arrays which might be completed by the initialization. */
+ if (type == error_mark_node)
+ ; /* Don't complain again. */
+ else if (TYPE_SIZE (type) != NULL_TREE)
+ ; /* A complete type is ok. */
+ else if (TREE_CODE (type) != ARRAY_TYPE)
+ {
+ cp_error ("variable `%#D' has initializer but incomplete type",
+ decl);
+ initialized = 0;
+ }
+ else if (TYPE_SIZE (TREE_TYPE (type)) == NULL_TREE)
+ {
+ cp_error ("elements of array `%#D' have incomplete type", decl);
+ initialized = 0;
+ }
+ }
+
+ if (!initialized
+ && TREE_CODE (decl) != TYPE_DECL
+ && TREE_CODE (decl) != TEMPLATE_DECL
+ && IS_AGGR_TYPE (type) && ! DECL_EXTERNAL (decl))
+ {
+ if (TYPE_SIZE (type) == NULL_TREE)
+ {
+ cp_error ("aggregate `%#D' has incomplete type and cannot be initialized",
+ decl);
+ /* Change the type so that assemble_variable will give
+ DECL an rtl we can live with: (mem (const_int 0)). */
+ TREE_TYPE (decl) = error_mark_node;
+ type = error_mark_node;
+ }
+ else
+ {
+ /* If any base type in the hierarchy of TYPE needs a constructor,
+ then we set initialized to 1. This way any nodes which are
+ created for the purposes of initializing this aggregate
+ will live as long as it does. This is necessary for global
+ aggregates which do not have their initializers processed until
+ the end of the file. */
+ initialized = TYPE_NEEDS_CONSTRUCTING (type);
+ }
+ }
+
+ if (initialized)
+ {
+ if (current_binding_level != global_binding_level
+ && DECL_EXTERNAL (decl))
+ cp_warning ("declaration of `%#D' has `extern' and is initialized",
+ decl);
+ DECL_EXTERNAL (decl) = 0;
+ if (current_binding_level == global_binding_level)
+ TREE_STATIC (decl) = 1;
+
+ /* Tell `pushdecl' this is an initialized decl
+ even though we don't yet have the initializer expression.
+ Also tell `finish_decl' it may store the real initializer. */
+ DECL_INITIAL (decl) = error_mark_node;
+ }
+
+ if (context && TYPE_SIZE (context) != NULL_TREE)
+ {
+ if (TREE_CODE (decl) == VAR_DECL)
+ {
+ tree field = lookup_field (context, DECL_NAME (decl), 0, 0);
+ if (field == NULL_TREE || TREE_CODE (field) != VAR_DECL)
+ cp_error ("`%#D' is not a static member of `%#T'", decl, context);
+ else if (duplicate_decls (decl, field))
+ decl = field;
+ }
+
+ /* If it was not explicitly declared `extern',
+ revoke any previous claims of DECL_EXTERNAL. */
+ if (DECL_THIS_EXTERN (decl) == 0)
+ DECL_EXTERNAL (decl) = 0;
+ if (DECL_LANG_SPECIFIC (decl))
+ DECL_IN_AGGR_P (decl) = 0;
+ pushclass (context, 2);
+ }
+
+ /* Add this decl to the current binding level, but not if it
+ comes from another scope, e.g. a static member variable.
+ TEM may equal DECL or it may be a previous decl of the same name. */
+
+ if ((TREE_CODE (decl) != PARM_DECL && DECL_CONTEXT (decl) != NULL_TREE)
+ || (TREE_CODE (decl) == TEMPLATE_DECL && !global_bindings_p ())
+ || TREE_CODE (type) == LANG_TYPE)
+ tem = decl;
+ else
+ tem = pushdecl (decl);
+
+ /* Tell the back-end to use or not use .common as appropriate. If we say
+ -fconserve-space, we want this to save space, at the expense of wrong
+ semantics. If we say -fno-conserve-space, we want this to produce
+ errors about redefs; to do this we force variables into the data
+ segment. Common storage is okay for non-public uninitialized data;
+ the linker can't match it with storage from other files, and we may
+ save some disk space. */
+ DECL_COMMON (tem) = flag_conserve_space || ! TREE_PUBLIC (tem);
+
+#if 0
+ /* We don't do this yet for GNU C++. */
+ /* For a local variable, define the RTL now. */
+ if (current_binding_level != global_binding_level
+ /* But not if this is a duplicate decl
+ and we preserved the rtl from the previous one
+ (which may or may not happen). */
+ && DECL_RTL (tem) == NULL_RTX)
+ {
+ if (TYPE_SIZE (TREE_TYPE (tem)) != NULL_TREE)
+ expand_decl (tem);
+ else if (TREE_CODE (TREE_TYPE (tem)) == ARRAY_TYPE
+ && DECL_INITIAL (tem) != NULL_TREE)
+ expand_decl (tem);
+ }
+#endif
+
+ if (TREE_CODE (decl) == TEMPLATE_DECL)
+ {
+ tree result = DECL_TEMPLATE_RESULT (decl);
+ if (DECL_CONTEXT (result) != NULL_TREE)
+ {
+ tree type;
+ type = DECL_CONTEXT (result);
+
+ if (TREE_CODE (type) != UNINSTANTIATED_P_TYPE)
+ {
+ cp_error ("declaration of `%D' in non-template type `%T'",
+ decl, type);
+ return NULL_TREE;
+ }
+
+ if (TREE_CODE (result) == FUNCTION_DECL)
+ return tem;
+ else if (TREE_CODE (result) == VAR_DECL)
+ {
+#if 0
+ tree tmpl = UPT_TEMPLATE (type);
+
+ fprintf (stderr, "%s:%d: adding ", __FILE__, __LINE__);
+ print_node_brief (stderr, "", DECL_NAME (tem), 0);
+ fprintf (stderr, " to class %s\n",
+ IDENTIFIER_POINTER (DECL_NAME (tmpl)));
+ DECL_TEMPLATE_MEMBERS (tmpl)
+ = perm_tree_cons (DECL_NAME (tem), tem,
+ DECL_TEMPLATE_MEMBERS (tmpl));
+ return tem;
+#else
+ sorry ("static data member templates");
+ return NULL_TREE;
+#endif
+ }
+ else
+ my_friendly_abort (13);
+ }
+ else if (TREE_CODE (result) == FUNCTION_DECL)
+ /*tem = push_overloaded_decl (tem, 0)*/;
+ else if (TREE_CODE (result) == VAR_DECL)
+ {
+ cp_error ("data template `%#D' must be member of a class template",
+ result);
+ return NULL_TREE;
+ }
+ else if (TREE_CODE (result) == TYPE_DECL)
+ {
+ cp_error ("invalid template `%#D'", result);
+ return NULL_TREE;
+ }
+ else
+ my_friendly_abort (14);
+ }
+
+ if (init_written
+ && ! (TREE_CODE (tem) == PARM_DECL
+ || (TREE_READONLY (tem)
+ && (TREE_CODE (tem) == VAR_DECL
+ || TREE_CODE (tem) == FIELD_DECL))))
+ {
+ /* When parsing and digesting the initializer,
+ use temporary storage. Do this even if we will ignore the value. */
+ if (current_binding_level == global_binding_level && debug_temp_inits)
+ {
+ if (TYPE_NEEDS_CONSTRUCTING (type)
+ || TREE_CODE (type) == REFERENCE_TYPE)
+ /* In this case, the initializer must lay down in permanent
+ storage, since it will be saved until `finish_file' is run. */
+ ;
+ else
+ temporary_allocation ();
+ }
+ }
+
+ if (flag_cadillac)
+ cadillac_start_decl (tem);
+
+ return tem;
+}
+
+#if 0 /* unused */
+static void
+make_temporary_for_reference (decl, ctor_call, init, cleanupp)
+ tree decl, ctor_call, init;
+ tree *cleanupp;
+{
+ tree type = TREE_TYPE (decl);
+ tree target_type = TREE_TYPE (type);
+ tree tmp, tmp_addr;
+
+ if (ctor_call)
+ {
+ tmp_addr = TREE_VALUE (TREE_OPERAND (ctor_call, 1));
+ if (TREE_CODE (tmp_addr) == NOP_EXPR)
+ tmp_addr = TREE_OPERAND (tmp_addr, 0);
+ my_friendly_assert (TREE_CODE (tmp_addr) == ADDR_EXPR, 146);
+ tmp = TREE_OPERAND (tmp_addr, 0);
+ }
+ else
+ {
+ tmp = get_temp_name (target_type,
+ current_binding_level == global_binding_level);
+ tmp_addr = build_unary_op (ADDR_EXPR, tmp, 0);
+ }
+
+ TREE_TYPE (tmp_addr) = build_pointer_type (target_type);
+ DECL_INITIAL (decl) = convert (TYPE_POINTER_TO (target_type), tmp_addr);
+ TREE_TYPE (DECL_INITIAL (decl)) = type;
+ if (TYPE_NEEDS_CONSTRUCTING (target_type))
+ {
+ if (current_binding_level == global_binding_level)
+ {
+ /* lay this variable out now. Otherwise `output_addressed_constants'
+ gets confused by its initializer. */
+ make_decl_rtl (tmp, NULL_PTR, 1);
+ static_aggregates = perm_tree_cons (init, tmp, static_aggregates);
+ }
+ else
+ {
+ if (ctor_call != NULL_TREE)
+ init = ctor_call;
+ else
+ init = build_method_call (tmp, constructor_name_full (target_type),
+ build_tree_list (NULL_TREE, init),
+ NULL_TREE, LOOKUP_NORMAL);
+ DECL_INITIAL (decl) = build (COMPOUND_EXPR, type, init,
+ DECL_INITIAL (decl));
+ *cleanupp = maybe_build_cleanup (tmp);
+ }
+ }
+ else
+ {
+ DECL_INITIAL (tmp) = init;
+ TREE_STATIC (tmp) = current_binding_level == global_binding_level;
+ finish_decl (tmp, init, 0, 0);
+ }
+ if (TREE_STATIC (tmp))
+ preserve_initializer ();
+}
+#endif
+
+/* Handle initialization of references.
+ These three arguments from from `finish_decl', and have the
+ same meaning here that they do there. */
+/* quotes on semantics can be found in ARM 8.4.3. */
+static void
+grok_reference_init (decl, type, init, cleanupp)
+ tree decl, type, init;
+ tree *cleanupp;
+{
+ tree tmp;
+
+ if (init == NULL_TREE)
+ {
+ if (DECL_LANG_SPECIFIC (decl) == 0
+ || DECL_IN_AGGR_P (decl) == 0)
+ {
+ cp_error ("`%D' declared as reference but not initialized", decl);
+ if (TREE_CODE (decl) == VAR_DECL)
+ SET_DECL_REFERENCE_SLOT (decl, error_mark_node);
+ }
+ return;
+ }
+
+ if (init == error_mark_node)
+ return;
+
+ if (TREE_CODE (type) == REFERENCE_TYPE
+ && TREE_CODE (init) == CONSTRUCTOR)
+ {
+ cp_error ("ANSI C++ forbids use of initializer list to initialize reference `%D'", decl);
+ return;
+ }
+
+ if (TREE_CODE (init) == TREE_LIST)
+ init = build_compound_expr (init);
+
+ if (TREE_CODE (TREE_TYPE (type)) != ARRAY_TYPE
+ && TREE_CODE (TREE_TYPE (init)) == ARRAY_TYPE)
+ {
+ /* Note: default conversion is only called in very special cases. */
+ init = default_conversion (init);
+ }
+
+ tmp = convert_to_reference
+ (type, init, CONV_IMPLICIT, LOOKUP_SPECULATIVELY|LOOKUP_NORMAL, decl);
+
+ if (tmp == error_mark_node)
+ goto fail;
+ else if (tmp != NULL_TREE)
+ {
+ tree subtype = TREE_TYPE (type);
+ init = tmp;
+
+ /* Associate the cleanup with the reference so that we
+ don't get burned by "aggressive" cleanup policy. */
+ if (TYPE_NEEDS_DESTRUCTOR (subtype))
+ {
+ if (TREE_CODE (init) == WITH_CLEANUP_EXPR)
+ {
+ *cleanupp = TREE_OPERAND (init, 2);
+ TREE_OPERAND (init, 2) = error_mark_node;
+ }
+ else
+ {
+ if (TREE_CODE (tmp) == ADDR_EXPR)
+ tmp = TREE_OPERAND (tmp, 0);
+ if (TREE_CODE (tmp) == TARGET_EXPR)
+ {
+ *cleanupp = build_delete
+ (TYPE_POINTER_TO (subtype),
+ build_unary_op (ADDR_EXPR, TREE_OPERAND (tmp, 0), 0),
+ integer_two_node, LOOKUP_NORMAL|LOOKUP_DESTRUCTOR, 0);
+ TREE_OPERAND (tmp, 2) = error_mark_node;
+ }
+ }
+ }
+
+ DECL_INITIAL (decl) = save_expr (init);
+ }
+ else
+ {
+ cp_error ("cannot initialize `%T' from `%T'", type, TREE_TYPE (init));
+ goto fail;
+ }
+
+ /* ?? Can this be optimized in some cases to
+ hand back the DECL_INITIAL slot?? */
+ if (TYPE_SIZE (TREE_TYPE (type)))
+ {
+ init = convert_from_reference (decl);
+ if (TREE_PERMANENT (decl))
+ init = copy_to_permanent (init);
+ SET_DECL_REFERENCE_SLOT (decl, init);
+ }
+
+ if (TREE_STATIC (decl) && ! TREE_CONSTANT (DECL_INITIAL (decl)))
+ {
+ expand_static_init (decl, DECL_INITIAL (decl));
+ DECL_INITIAL (decl) = NULL_TREE;
+ }
+ return;
+
+ fail:
+ if (TREE_CODE (decl) == VAR_DECL)
+ SET_DECL_REFERENCE_SLOT (decl, error_mark_node);
+ return;
+}
+
+/* Finish processing of a declaration;
+ install its line number and initial value.
+ If the length of an array type is not known before,
+ it must be determined now, from the initial value, or it is an error.
+
+ Call `pop_obstacks' iff NEED_POP is nonzero.
+
+ For C++, `finish_decl' must be fairly evasive: it must keep initializers
+ for aggregates that have constructors alive on the permanent obstack,
+ so that the global initializing functions can be written at the end.
+
+ INIT0 holds the value of an initializer that should be allowed to escape
+ the normal rules.
+
+ For functions that take default parameters, DECL points to its
+ "maximal" instantiation. `finish_decl' must then also declared its
+ subsequently lower and lower forms of instantiation, checking for
+ ambiguity as it goes. This can be sped up later. */
+
+void
+finish_decl (decl, init, asmspec_tree, need_pop)
+ tree decl, init;
+ tree asmspec_tree;
+ int need_pop;
+{
+ register tree type;
+ tree cleanup = NULL_TREE, ttype;
+ int was_incomplete;
+ int temporary = allocation_temporary_p ();
+ char *asmspec = NULL;
+ int was_readonly = 0;
+
+ /* If this is 0, then we did not change obstacks. */
+ if (! decl)
+ {
+ if (init)
+ error ("assignment (not initialization) in declaration");
+ return;
+ }
+
+ /* If a name was specified, get the string. */
+ if (asmspec_tree)
+ asmspec = TREE_STRING_POINTER (asmspec_tree);
+
+ /* If the type of the thing we are declaring either has
+ a constructor, or has a virtual function table pointer,
+ AND its initialization was accepted by `start_decl',
+ then we stayed on the permanent obstack through the
+ declaration, otherwise, changed obstacks as GCC would. */
+
+ type = TREE_TYPE (decl);
+
+ was_incomplete = (DECL_SIZE (decl) == NULL_TREE);
+
+ /* Take care of TYPE_DECLs up front. */
+ if (TREE_CODE (decl) == TYPE_DECL)
+ {
+ if (init && DECL_INITIAL (decl))
+ {
+ /* typedef foo = bar; store the type of bar as the type of foo. */
+ TREE_TYPE (decl) = type = TREE_TYPE (init);
+ DECL_INITIAL (decl) = init = NULL_TREE;
+ }
+ if (type != error_mark_node
+ && IS_AGGR_TYPE (type) && DECL_NAME (decl))
+ {
+ if (TREE_TYPE (DECL_NAME (decl)) && TREE_TYPE (decl) != type)
+ cp_warning ("shadowing previous type declaration of `%#D'", decl);
+ set_identifier_type_value (DECL_NAME (decl), type);
+ CLASSTYPE_GOT_SEMICOLON (type) = 1;
+ }
+ GNU_xref_decl (current_function_decl, decl);
+ rest_of_decl_compilation (decl, NULL_PTR,
+ DECL_CONTEXT (decl) == NULL_TREE, 0);
+ goto finish_end;
+ }
+ if (type != error_mark_node && IS_AGGR_TYPE (type)
+ && CLASSTYPE_DECLARED_EXCEPTION (type))
+ {
+ CLASSTYPE_GOT_SEMICOLON (type) = 1;
+ goto finish_end;
+ }
+ if (TREE_CODE (decl) != FUNCTION_DECL)
+ {
+ ttype = target_type (type);
+#if 0 /* WTF? -KR
+ Leave this out until we can figure out why it was
+ needed/desirable in the first place. Then put a comment
+ here explaining why. Or just delete the code if no ill
+ effects arise. */
+ if (TYPE_NAME (ttype)
+ && TREE_CODE (TYPE_NAME (ttype)) == TYPE_DECL
+ && ANON_AGGRNAME_P (TYPE_IDENTIFIER (ttype)))
+ {
+ tree old_id = TYPE_IDENTIFIER (ttype);
+ char *newname = (char *)alloca (IDENTIFIER_LENGTH (old_id) + 2);
+ /* Need to preserve template data for UPT nodes. */
+ tree old_template = IDENTIFIER_TEMPLATE (old_id);
+ newname[0] = '_';
+ bcopy (IDENTIFIER_POINTER (old_id), newname + 1,
+ IDENTIFIER_LENGTH (old_id) + 1);
+ old_id = get_identifier (newname);
+ lookup_tag_reverse (ttype, old_id);
+ TYPE_IDENTIFIER (ttype) = old_id;
+ IDENTIFIER_TEMPLATE (old_id) = old_template;
+ }
+#endif
+ }
+
+ if (! DECL_EXTERNAL (decl) && TREE_READONLY (decl)
+ && TYPE_NEEDS_CONSTRUCTING (type))
+ {
+
+ /* Currently, GNU C++ puts constants in text space, making them
+ impossible to initialize. In the future, one would hope for
+ an operating system which understood the difference between
+ initialization and the running of a program. */
+ was_readonly = 1;
+ TREE_READONLY (decl) = 0;
+ }
+
+ if (TREE_CODE (decl) == FIELD_DECL)
+ {
+ if (init && init != error_mark_node)
+ my_friendly_assert (TREE_PERMANENT (init), 147);
+
+ if (asmspec)
+ {
+ /* This must override the asm specifier which was placed
+ by grokclassfn. Lay this out fresh.
+
+ @@ Should emit an error if this redefines an asm-specified
+ @@ name, or if we have already used the function's name. */
+ DECL_RTL (TREE_TYPE (decl)) = NULL_RTX;
+ DECL_ASSEMBLER_NAME (decl) = get_identifier (asmspec);
+ make_decl_rtl (decl, asmspec, 0);
+ }
+ }
+ /* If `start_decl' didn't like having an initialization, ignore it now. */
+ else if (init != NULL_TREE && DECL_INITIAL (decl) == NULL_TREE)
+ init = NULL_TREE;
+ else if (DECL_EXTERNAL (decl))
+ ;
+ else if (TREE_CODE (type) == REFERENCE_TYPE
+ || (TYPE_LANG_SPECIFIC (type) && IS_SIGNATURE_REFERENCE (type)))
+ {
+ grok_reference_init (decl, type, init, &cleanup);
+ init = NULL_TREE;
+ }
+
+ GNU_xref_decl (current_function_decl, decl);
+
+ if (TREE_CODE (decl) == FIELD_DECL)
+ ;
+ else if (TREE_CODE (decl) == CONST_DECL)
+ {
+ my_friendly_assert (TREE_CODE (decl) != REFERENCE_TYPE, 148);
+
+ DECL_INITIAL (decl) = init;
+
+ /* This will keep us from needing to worry about our obstacks. */
+ my_friendly_assert (init != NULL_TREE, 149);
+ init = NULL_TREE;
+ }
+ else if (init)
+ {
+ if (TYPE_HAS_CONSTRUCTOR (type) || TYPE_NEEDS_CONSTRUCTING (type))
+ {
+ if (TREE_CODE (type) == ARRAY_TYPE)
+ init = digest_init (type, init, (tree *) 0);
+ else if (TREE_CODE (init) == CONSTRUCTOR)
+ {
+ if (TYPE_NEEDS_CONSTRUCTING (type))
+ {
+ cp_error ("`%D' must be initialized by constructor, not by `{...}'",
+ decl);
+ init = error_mark_node;
+ }
+ else
+ goto dont_use_constructor;
+ }
+#if 0
+ /* fix this in `build_functional_cast' instead.
+ Here's the trigger code:
+
+ struct ostream
+ {
+ ostream ();
+ ostream (int, char *);
+ ostream (char *);
+ operator char *();
+ ostream (void *);
+ operator void *();
+ operator << (int);
+ };
+ int buf_size = 1024;
+ static char buf[buf_size];
+ const char *debug(int i) {
+ char *b = &buf[0];
+ ostream o = ostream(buf_size, b);
+ o << i;
+ return buf;
+ }
+ */
+
+ else if (TREE_CODE (init) == TARGET_EXPR
+ && TREE_CODE (TREE_OPERAND (init, 1) == NEW_EXPR))
+ {
+ /* User wrote something like `foo x = foo (args)' */
+ my_friendly_assert (TREE_CODE (TREE_OPERAND (init, 0)) == VAR_DECL, 150);
+ my_friendly_assert (DECL_NAME (TREE_OPERAND (init, 0)) == NULL_TREE, 151);
+
+ /* User wrote exactly `foo x = foo (args)' */
+ if (TYPE_MAIN_VARIANT (type) == TREE_TYPE (init))
+ {
+ init = build (CALL_EXPR, TREE_TYPE (init),
+ TREE_OPERAND (TREE_OPERAND (init, 1), 0),
+ TREE_OPERAND (TREE_OPERAND (init, 1), 1), 0);
+ TREE_SIDE_EFFECTS (init) = 1;
+ }
+ }
+#endif
+
+ /* We must hide the initializer so that expand_decl
+ won't try to do something it does not understand. */
+ if (current_binding_level == global_binding_level)
+ {
+ tree value;
+ if (DECL_COMMON (decl))
+ /* Should this be a NULL_TREE? */
+ value = error_mark_node;
+ else
+ value = build (CONSTRUCTOR, type, NULL_TREE, NULL_TREE);
+ DECL_INITIAL (decl) = value;
+ }
+ else
+ DECL_INITIAL (decl) = error_mark_node;
+ }
+ else
+ {
+ dont_use_constructor:
+ if (TREE_CODE (init) != TREE_VEC)
+ init = store_init_value (decl, init);
+
+ /* Don't let anyone try to initialize this variable
+ until we are ready to do so. */
+ if (init)
+ {
+ tree value;
+ if (DECL_COMMON (decl))
+ value = error_mark_node;
+ else
+ value = build (CONSTRUCTOR, type, NULL_TREE, NULL_TREE);
+ DECL_INITIAL (decl) = value;
+ }
+ }
+ }
+ else if (DECL_EXTERNAL (decl))
+ ;
+ else if (TREE_CODE_CLASS (TREE_CODE (type)) == 't'
+ && (IS_AGGR_TYPE (type) || TYPE_NEEDS_CONSTRUCTING (type)))
+ {
+ tree ctype = type;
+ while (TREE_CODE (ctype) == ARRAY_TYPE)
+ ctype = TREE_TYPE (ctype);
+ if (! TYPE_NEEDS_CONSTRUCTING (ctype))
+ {
+ if (CLASSTYPE_READONLY_FIELDS_NEED_INIT (ctype))
+ cp_error ("structure `%D' with uninitialized const members", decl);
+ if (CLASSTYPE_REF_FIELDS_NEED_INIT (ctype))
+ cp_error ("structure `%D' with uninitialized reference members",
+ decl);
+ }
+
+ if (TREE_CODE (decl) == VAR_DECL
+ && !DECL_INITIAL (decl)
+ && !TYPE_NEEDS_CONSTRUCTING (type)
+ && (TYPE_READONLY (type) || TREE_READONLY (decl)))
+ cp_error ("uninitialized const `%D'", decl);
+
+ /* Initialize variables in need of static initialization with
+ an empty CONSTRUCTOR to keep assemble_variable from putting them in
+ the wrong program space. */
+ if (flag_pic == 0
+ && TREE_STATIC (decl)
+ && TREE_PUBLIC (decl)
+ && ! DECL_EXTERNAL (decl)
+ && TREE_CODE (decl) == VAR_DECL
+ && TYPE_NEEDS_CONSTRUCTING (type)
+ && (DECL_INITIAL (decl) == NULL_TREE
+ || DECL_INITIAL (decl) == error_mark_node)
+ && ! DECL_COMMON (decl))
+ DECL_INITIAL (decl) = build (CONSTRUCTOR, type, NULL_TREE,
+ NULL_TREE);
+ }
+ else if (TREE_CODE (decl) == VAR_DECL
+ && TREE_CODE (type) != REFERENCE_TYPE
+ && (TYPE_READONLY (type) || TREE_READONLY (decl)))
+ {
+ /* ``Unless explicitly declared extern, a const object does not have
+ external linkage and must be initialized. ($8.4; $12.1)'' ARM 7.1.6
+ However, if it's `const int foo = 1; const int foo;', don't complain
+ about the second decl, since it does have an initializer before.
+ We deliberately don't complain about arrays, because they're
+ supposed to be initialized by a constructor. */
+ if (! DECL_INITIAL (decl)
+ && TREE_CODE (type) != ARRAY_TYPE
+ && (!pedantic || !current_class_type))
+ cp_error ("uninitialized const `%#D'", decl);
+ }
+
+ /* For top-level declaration, the initial value was read in
+ the temporary obstack. MAXINDEX, rtl, etc. to be made below
+ must go in the permanent obstack; but don't discard the
+ temporary data yet. */
+
+ if (current_binding_level == global_binding_level && temporary)
+ end_temporary_allocation ();
+
+ /* Deduce size of array from initialization, if not already known. */
+
+ if (TREE_CODE (type) == ARRAY_TYPE
+ && TYPE_DOMAIN (type) == NULL_TREE
+ && TREE_CODE (decl) != TYPE_DECL)
+ {
+ int do_default
+ = (TREE_STATIC (decl)
+ /* Even if pedantic, an external linkage array
+ may have incomplete type at first. */
+ ? pedantic && ! DECL_EXTERNAL (decl)
+ : !DECL_EXTERNAL (decl));
+ tree initializer = init ? init : DECL_INITIAL (decl);
+ int failure = complete_array_type (type, initializer, do_default);
+
+ if (failure == 1)
+ cp_error ("initializer fails to determine size of `%D'", decl);
+
+ if (failure == 2)
+ {
+ if (do_default)
+ cp_error ("array size missing in `%D'", decl);
+ /* If a `static' var's size isn't known, make it extern as
+ well as static, so it does not get allocated. If it's not
+ `static', then don't mark it extern; finish_incomplete_decl
+ will give it a default size and it will get allocated. */
+ else if (!pedantic && TREE_STATIC (decl) && !TREE_PUBLIC (decl))
+ DECL_EXTERNAL (decl) = 1;
+ }
+
+ if (pedantic && TYPE_DOMAIN (type) != NULL_TREE
+ && tree_int_cst_lt (TYPE_MAX_VALUE (TYPE_DOMAIN (type)),
+ integer_zero_node))
+ cp_error ("zero-size array `%D'", decl);
+
+ layout_decl (decl, 0);
+ }
+
+ if (TREE_CODE (decl) == VAR_DECL)
+ {
+ if (DECL_SIZE (decl) == NULL_TREE
+ && TYPE_SIZE (TREE_TYPE (decl)) != NULL_TREE)
+ layout_decl (decl, 0);
+
+ if (TREE_STATIC (decl) && DECL_SIZE (decl) == NULL_TREE)
+ {
+ /* A static variable with an incomplete type:
+ that is an error if it is initialized.
+ Otherwise, let it through, but if it is not `extern'
+ then it may cause an error message later. */
+ if (DECL_INITIAL (decl) != NULL_TREE)
+ cp_error ("storage size of `%D' isn't known", decl);
+ init = NULL_TREE;
+ }
+ else if (!DECL_EXTERNAL (decl) && DECL_SIZE (decl) == NULL_TREE)
+ {
+ /* An automatic variable with an incomplete type: that is an error.
+ Don't talk about array types here, since we took care of that
+ message in grokdeclarator. */
+ cp_error ("storage size of `%D' isn't known", decl);
+ TREE_TYPE (decl) = error_mark_node;
+ }
+ else if (!DECL_EXTERNAL (decl) && IS_AGGR_TYPE (ttype))
+ /* Let debugger know it should output info for this type. */
+ note_debug_info_needed (ttype);
+
+ if ((DECL_EXTERNAL (decl) || TREE_STATIC (decl))
+ && DECL_SIZE (decl) != NULL_TREE
+ && ! TREE_CONSTANT (DECL_SIZE (decl)))
+ {
+ if (TREE_CODE (DECL_SIZE (decl)) == INTEGER_CST)
+ constant_expression_warning (DECL_SIZE (decl));
+ else
+ cp_error ("storage size of `%D' isn't constant", decl);
+ }
+
+ if (!DECL_EXTERNAL (decl) && TYPE_NEEDS_DESTRUCTOR (type))
+ {
+ int yes = suspend_momentary ();
+
+ /* If INIT comes from a functional cast, use the cleanup
+ we built for that. Otherwise, make our own cleanup. */
+ if (init && TREE_CODE (init) == WITH_CLEANUP_EXPR
+ && comptypes (TREE_TYPE (decl), TREE_TYPE (init), 1))
+ {
+ cleanup = TREE_OPERAND (init, 2);
+ init = TREE_OPERAND (init, 0);
+ current_binding_level->have_cleanups = 1;
+ }
+ else
+ cleanup = maybe_build_cleanup (decl);
+ resume_momentary (yes);
+ }
+ }
+ /* PARM_DECLs get cleanups, too. */
+ else if (TREE_CODE (decl) == PARM_DECL && TYPE_NEEDS_DESTRUCTOR (type))
+ {
+ if (temporary)
+ end_temporary_allocation ();
+ cleanup = maybe_build_cleanup (decl);
+ if (temporary)
+ resume_temporary_allocation ();
+ }
+
+ /* Output the assembler code and/or RTL code for variables and functions,
+ unless the type is an undefined structure or union.
+ If not, it will get done when the type is completed. */
+
+ if (TREE_CODE (decl) == VAR_DECL || TREE_CODE (decl) == FUNCTION_DECL
+ || TREE_CODE (decl) == RESULT_DECL)
+ {
+ /* ??? FIXME: What about nested classes? */
+ int toplev = (current_binding_level == global_binding_level
+ || pseudo_global_level_p ());
+ int was_temp
+ = ((flag_traditional
+ || (TREE_STATIC (decl) && TYPE_NEEDS_DESTRUCTOR (type)))
+ && allocation_temporary_p ());
+
+ if (was_temp)
+ end_temporary_allocation ();
+
+ if (TREE_CODE (decl) == VAR_DECL
+ && current_binding_level != global_binding_level
+ && ! TREE_STATIC (decl)
+ && type_needs_gc_entry (type))
+ DECL_GC_OFFSET (decl) = size_int (++current_function_obstack_index);
+
+ if (TREE_CODE (decl) == VAR_DECL && DECL_VIRTUAL_P (decl))
+ make_decl_rtl (decl, NULL_PTR, toplev);
+ else if (TREE_CODE (decl) == VAR_DECL
+ && TREE_READONLY (decl)
+ && DECL_INITIAL (decl) != NULL_TREE
+ && DECL_INITIAL (decl) != error_mark_node
+ && ! EMPTY_CONSTRUCTOR_P (DECL_INITIAL (decl)))
+ {
+ DECL_INITIAL (decl) = save_expr (DECL_INITIAL (decl));
+
+ if (asmspec)
+ DECL_ASSEMBLER_NAME (decl) = get_identifier (asmspec);
+
+ if (! toplev
+ && TREE_STATIC (decl)
+ && ! TREE_SIDE_EFFECTS (decl)
+ && ! TREE_PUBLIC (decl)
+ && ! DECL_EXTERNAL (decl)
+ && ! TYPE_NEEDS_DESTRUCTOR (type)
+ && DECL_MODE (decl) != BLKmode)
+ {
+ /* If this variable is really a constant, then fill its DECL_RTL
+ slot with something which won't take up storage.
+ If something later should take its address, we can always give
+ it legitimate RTL at that time. */
+ DECL_RTL (decl) = gen_reg_rtx (DECL_MODE (decl));
+ store_expr (DECL_INITIAL (decl), DECL_RTL (decl), 0);
+ TREE_ASM_WRITTEN (decl) = 1;
+ }
+ else if (toplev && ! TREE_PUBLIC (decl))
+ {
+ /* If this is a static const, change its apparent linkage
+ if it belongs to a #pragma interface. */
+ if (!interface_unknown)
+ {
+ TREE_PUBLIC (decl) = 1;
+ DECL_EXTERNAL (decl) = interface_only;
+ }
+ make_decl_rtl (decl, asmspec, toplev);
+ }
+ else
+ rest_of_decl_compilation (decl, asmspec, toplev, 0);
+ }
+ else if (TREE_CODE (decl) == VAR_DECL
+ && DECL_LANG_SPECIFIC (decl)
+ && DECL_IN_AGGR_P (decl))
+ {
+ if (TREE_STATIC (decl))
+ {
+ if (init == NULL_TREE
+#ifdef DEFAULT_STATIC_DEFS
+ /* If this code is dead, then users must
+ explicitly declare static member variables
+ outside the class def'n as well. */
+ && TYPE_NEEDS_CONSTRUCTING (type)
+#endif
+ )
+ {
+ DECL_EXTERNAL (decl) = 1;
+ make_decl_rtl (decl, asmspec, 1);
+ }
+ else
+ rest_of_decl_compilation (decl, asmspec, toplev, 0);
+ }
+ else
+ /* Just a constant field. Should not need any rtl. */
+ goto finish_end0;
+ }
+ else
+ rest_of_decl_compilation (decl, asmspec, toplev, 0);
+
+ if (was_temp)
+ resume_temporary_allocation ();
+
+ if (type != error_mark_node
+ && TYPE_LANG_SPECIFIC (type)
+ && CLASSTYPE_ABSTRACT_VIRTUALS (type))
+ abstract_virtuals_error (decl, type);
+ else if ((TREE_CODE (type) == FUNCTION_TYPE
+ || TREE_CODE (type) == METHOD_TYPE)
+ && TYPE_LANG_SPECIFIC (TREE_TYPE (type))
+ && CLASSTYPE_ABSTRACT_VIRTUALS (TREE_TYPE (type)))
+ abstract_virtuals_error (decl, TREE_TYPE (type));
+
+ if (TYPE_LANG_SPECIFIC (type) && IS_SIGNATURE (type))
+ signature_error (decl, type);
+ else if ((TREE_CODE (type) == FUNCTION_TYPE
+ || TREE_CODE (type) == METHOD_TYPE)
+ && TYPE_LANG_SPECIFIC (TREE_TYPE (type))
+ && IS_SIGNATURE (TREE_TYPE (type)))
+ signature_error (decl, TREE_TYPE (type));
+
+ if (TREE_CODE (decl) == FUNCTION_DECL)
+ {
+#if 0
+ /* C++: Handle overloaded functions with default parameters. */
+ if (DECL_OVERLOADED (decl))
+ {
+ tree parmtypes = TYPE_ARG_TYPES (type);
+ tree prev = NULL_TREE;
+ tree original_name = DECL_NAME (decl);
+ struct lang_decl *tmp_lang_decl = DECL_LANG_SPECIFIC (decl);
+ /* All variants will share an uncollectible lang_decl. */
+ copy_decl_lang_specific (decl);
+
+ while (parmtypes && parmtypes != void_list_node)
+ {
+ /* The default value for the parameter in parmtypes is
+ stored in the TREE_PURPOSE of the TREE_LIST. */
+ if (TREE_PURPOSE (parmtypes))
+ {
+ tree fnname, fndecl;
+ tree *argp;
+
+ argp = prev ? & TREE_CHAIN (prev)
+ : & TYPE_ARG_TYPES (type);
+
+ *argp = NULL_TREE;
+ fnname = build_decl_overload (original_name,
+ TYPE_ARG_TYPES (type), 0);
+ *argp = parmtypes;
+ fndecl = build_decl (FUNCTION_DECL, fnname, type);
+ DECL_EXTERNAL (fndecl) = DECL_EXTERNAL (decl);
+ TREE_PUBLIC (fndecl) = TREE_PUBLIC (decl);
+ DECL_INLINE (fndecl) = DECL_INLINE (decl);
+ /* Keep G++ from thinking this function is unused.
+ It is only used to speed up search in name space. */
+ TREE_USED (fndecl) = 1;
+ TREE_ASM_WRITTEN (fndecl) = 1;
+ DECL_INITIAL (fndecl) = NULL_TREE;
+ DECL_LANG_SPECIFIC (fndecl) = DECL_LANG_SPECIFIC (decl);
+ fndecl = pushdecl (fndecl);
+ DECL_INITIAL (fndecl) = error_mark_node;
+ DECL_RTL (fndecl) = DECL_RTL (decl);
+ }
+ prev = parmtypes;
+ parmtypes = TREE_CHAIN (parmtypes);
+ }
+ DECL_LANG_SPECIFIC (decl) = tmp_lang_decl;
+ }
+#endif
+ }
+ else if (DECL_EXTERNAL (decl))
+ ;
+ else if (TREE_STATIC (decl) && type != error_mark_node)
+ {
+ /* Cleanups for static variables are handled by `finish_file'. */
+ if (TYPE_NEEDS_CONSTRUCTING (type) || init != NULL_TREE)
+ expand_static_init (decl, init);
+ else if (TYPE_NEEDS_DESTRUCTOR (type))
+ static_aggregates = perm_tree_cons (NULL_TREE, decl,
+ static_aggregates);
+
+ /* Make entry in appropriate vector. */
+ if (flag_gc && type_needs_gc_entry (type))
+ build_static_gc_entry (decl, type);
+ }
+ else if (! toplev)
+ {
+ tree old_cleanups = cleanups_this_call;
+ /* This is a declared decl which must live until the
+ end of the binding contour. It may need a cleanup. */
+
+ /* Recompute the RTL of a local array now
+ if it used to be an incomplete type. */
+ if (was_incomplete && ! TREE_STATIC (decl))
+ {
+ /* If we used it already as memory, it must stay in memory. */
+ TREE_ADDRESSABLE (decl) = TREE_USED (decl);
+ /* If it's still incomplete now, no init will save it. */
+ if (DECL_SIZE (decl) == NULL_TREE)
+ DECL_INITIAL (decl) = NULL_TREE;
+ expand_decl (decl);
+ }
+ else if (! TREE_ASM_WRITTEN (decl)
+ && (TYPE_SIZE (type) != NULL_TREE
+ || TREE_CODE (type) == ARRAY_TYPE))
+ {
+ /* Do this here, because we did not expand this decl's
+ rtl in start_decl. */
+ if (DECL_RTL (decl) == NULL_RTX)
+ expand_decl (decl);
+ else if (cleanup)
+ {
+ /* XXX: Why don't we use decl here? */
+ /* Ans: Because it was already expanded? */
+ if (! expand_decl_cleanup (NULL_TREE, cleanup))
+ cp_error ("parser lost in parsing declaration of `%D'",
+ decl);
+ /* Cleanup used up here. */
+ cleanup = NULL_TREE;
+ }
+ }
+
+ if (DECL_SIZE (decl) && type != error_mark_node)
+ {
+ /* Compute and store the initial value. */
+ expand_decl_init (decl);
+
+ if (init || TYPE_NEEDS_CONSTRUCTING (type))
+ {
+ emit_line_note (DECL_SOURCE_FILE (decl),
+ DECL_SOURCE_LINE (decl));
+ expand_aggr_init (decl, init, 0);
+ }
+
+ /* Set this to 0 so we can tell whether an aggregate
+ which was initialized was ever used. */
+ if (TYPE_NEEDS_CONSTRUCTING (type))
+ TREE_USED (decl) = 0;
+
+ /* Store the cleanup, if there was one. */
+ if (cleanup)
+ {
+ if (! expand_decl_cleanup (decl, cleanup))
+ cp_error ("parser lost in parsing declaration of `%D'",
+ decl);
+ }
+ }
+ /* Cleanup any temporaries needed for the initial value. */
+ expand_cleanups_to (old_cleanups);
+ }
+ finish_end0:
+
+ /* Undo call to `pushclass' that was done in `start_decl'
+ due to initialization of qualified member variable.
+ I.e., Foo::x = 10; */
+ {
+ tree context = DECL_CONTEXT (decl);
+ if (context
+ && TREE_CODE_CLASS (TREE_CODE (context)) == 't'
+ && (TREE_CODE (decl) == VAR_DECL
+ /* We also have a pushclass done that we need to undo here
+ if we're at top level and declare a method. */
+ || (TREE_CODE (decl) == FUNCTION_DECL
+ /* If size hasn't been set, we're still defining it,
+ and therefore inside the class body; don't pop
+ the binding level.. */
+ && TYPE_SIZE (context) != NULL_TREE
+ /* The binding level gets popped elsewhere for a
+ friend declaration inside another class. */
+ /*
+ && TYPE_IDENTIFIER (context) == current_class_name
+ */
+ && context == current_class_type
+ )))
+ popclass (1);
+ }
+ }
+
+ finish_end:
+
+ /* If requested, warn about definitions of large data objects. */
+
+ if (warn_larger_than
+ && (TREE_CODE (decl) == VAR_DECL || TREE_CODE (decl) == PARM_DECL)
+ && !DECL_EXTERNAL (decl))
+ {
+ register tree decl_size = DECL_SIZE (decl);
+
+ if (decl_size && TREE_CODE (decl_size) == INTEGER_CST)
+ {
+ unsigned units = TREE_INT_CST_LOW (decl_size) / BITS_PER_UNIT;
+
+ if (units > larger_than_size)
+ warning_with_decl (decl, "size of `%s' is %u bytes", units);
+ }
+ }
+
+ if (need_pop)
+ {
+ /* Resume permanent allocation, if not within a function. */
+ /* The corresponding push_obstacks_nochange is in start_decl,
+ start_method, groktypename, and in grokfield. */
+ pop_obstacks ();
+ }
+
+ if (was_readonly)
+ TREE_READONLY (decl) = 1;
+
+ if (flag_cadillac)
+ cadillac_finish_decl (decl);
+}
+
+void
+expand_static_init (decl, init)
+ tree decl;
+ tree init;
+{
+ tree oldstatic = value_member (decl, static_aggregates);
+ tree old_cleanups;
+
+ if (oldstatic)
+ {
+ if (TREE_PURPOSE (oldstatic) && init != NULL_TREE)
+ cp_error ("multiple initializations given for `%D'", decl);
+ }
+ else if (current_binding_level != global_binding_level
+ && current_binding_level->pseudo_global == 0)
+ {
+ /* Emit code to perform this initialization but once. */
+ tree temp;
+
+ /* Remember this information until end of file. */
+ push_obstacks (&permanent_obstack, &permanent_obstack);
+
+ /* Emit code to perform this initialization but once. */
+ temp = get_temp_name (integer_type_node, 1);
+ rest_of_decl_compilation (temp, NULL_PTR, 0, 0);
+ expand_start_cond (build_binary_op (EQ_EXPR, temp,
+ integer_zero_node, 1), 0);
+ old_cleanups = cleanups_this_call;
+ expand_assignment (temp, integer_one_node, 0, 0);
+ if (TYPE_NEEDS_CONSTRUCTING (TREE_TYPE (decl)))
+ {
+ expand_aggr_init (decl, init, 0);
+ do_pending_stack_adjust ();
+ }
+ else
+ expand_assignment (decl, init, 0, 0);
+ /* Cleanup any temporaries needed for the initial value. */
+ expand_cleanups_to (old_cleanups);
+ expand_end_cond ();
+ if (TYPE_NEEDS_DESTRUCTOR (TREE_TYPE (decl)))
+ {
+ static_aggregates = perm_tree_cons (temp, decl, static_aggregates);
+ TREE_STATIC (static_aggregates) = 1;
+ }
+
+ /* Resume old (possibly temporary) allocation. */
+ pop_obstacks ();
+ }
+ else
+ {
+ /* This code takes into account memory allocation
+ policy of `start_decl'. Namely, if TYPE_NEEDS_CONSTRUCTING
+ does not hold for this object, then we must make permanent
+ the storage currently in the temporary obstack. */
+ if (! TYPE_NEEDS_CONSTRUCTING (TREE_TYPE (decl)))
+ preserve_initializer ();
+ static_aggregates = perm_tree_cons (init, decl, static_aggregates);
+ }
+}
+
+/* Make TYPE a complete type based on INITIAL_VALUE.
+ Return 0 if successful, 1 if INITIAL_VALUE can't be deciphered,
+ 2 if there was no information (in which case assume 1 if DO_DEFAULT). */
+
+int
+complete_array_type (type, initial_value, do_default)
+ tree type, initial_value;
+ int do_default;
+{
+ register tree maxindex = NULL_TREE;
+ int value = 0;
+
+ if (initial_value)
+ {
+ /* Note MAXINDEX is really the maximum index,
+ one less than the size. */
+ if (TREE_CODE (initial_value) == STRING_CST)
+ maxindex = build_int_2 (TREE_STRING_LENGTH (initial_value) - 1, 0);
+ else if (TREE_CODE (initial_value) == CONSTRUCTOR)
+ {
+ register int nelts
+ = list_length (CONSTRUCTOR_ELTS (initial_value));
+ maxindex = build_int_2 (nelts - 1, - (nelts == 0));
+ }
+ else
+ {
+ /* Make an error message unless that happened already. */
+ if (initial_value != error_mark_node)
+ value = 1;
+
+ /* Prevent further error messages. */
+ maxindex = build_int_2 (0, 0);
+ }
+ }
+
+ if (!maxindex)
+ {
+ if (do_default)
+ maxindex = build_int_2 (0, 0);
+ value = 2;
+ }
+
+ if (maxindex)
+ {
+ tree itype;
+
+ TYPE_DOMAIN (type) = build_index_type (maxindex);
+ if (!TREE_TYPE (maxindex))
+ TREE_TYPE (maxindex) = TYPE_DOMAIN (type);
+ if (initial_value)
+ itype = TREE_TYPE (initial_value);
+ else
+ itype = NULL;
+ if (itype && !TYPE_DOMAIN (itype))
+ TYPE_DOMAIN (itype) = TYPE_DOMAIN (type);
+ }
+
+ /* Lay out the type now that we can get the real answer. */
+
+ layout_type (type);
+
+ return value;
+}
+
+/* Return zero if something is declared to be a member of type
+ CTYPE when in the context of CUR_TYPE. STRING is the error
+ message to print in that case. Otherwise, quietly return 1. */
+static int
+member_function_or_else (ctype, cur_type, string)
+ tree ctype, cur_type;
+ char *string;
+{
+ if (ctype && ctype != cur_type)
+ {
+ error (string, TYPE_NAME_STRING (ctype));
+ return 0;
+ }
+ return 1;
+}
+
+/* Subroutine of `grokdeclarator'. */
+
+/* Generate errors possibly applicable for a given set of specifiers.
+ This is for ARM $7.1.2. */
+static void
+bad_specifiers (object, type, virtualp, quals, inlinep, friendp, raises)
+ tree object;
+ char *type;
+ int virtualp, quals, friendp, raises, inlinep;
+{
+ if (virtualp)
+ cp_error ("`%D' declared as a `virtual' %s", object, type);
+ if (inlinep)
+ cp_error ("`%D' declared as an `inline' %s", object, type);
+ if (quals)
+ cp_error ("`const' and `volatile' function specifiers on `%D' invalid in %s declaration",
+ object, type);
+ if (friendp)
+ cp_error_at ("invalid friend declaration", object);
+ if (raises)
+ cp_error_at ("invalid raises declaration", object);
+}
+
+/* CTYPE is class type, or null if non-class.
+ TYPE is type this FUNCTION_DECL should have, either FUNCTION_TYPE
+ or METHOD_TYPE.
+ DECLARATOR is the function's name.
+ VIRTUALP is truthvalue of whether the function is virtual or not.
+ FLAGS are to be passed through to `grokclassfn'.
+ QUALS are qualifiers indicating whether the function is `const'
+ or `volatile'.
+ RAISES is a list of exceptions that this function can raise.
+ CHECK is 1 if we must find this method in CTYPE, 0 if we should
+ not look, and -1 if we should not call `grokclassfn' at all. */
+static tree
+grokfndecl (ctype, type, declarator, virtualp, flags, quals,
+ raises, check, publicp)
+ tree ctype, type;
+ tree declarator;
+ int virtualp;
+ enum overload_flags flags;
+ tree quals, raises;
+ int check, publicp;
+{
+ tree cname, decl;
+ int staticp = ctype && TREE_CODE (type) == FUNCTION_TYPE;
+
+ if (ctype)
+ cname = TREE_CODE (TYPE_NAME (ctype)) == TYPE_DECL
+ ? TYPE_IDENTIFIER (ctype) : TYPE_NAME (ctype);
+ else
+ cname = NULL_TREE;
+
+ if (raises)
+ {
+ type = build_exception_variant (ctype, type, raises);
+ raises = TYPE_RAISES_EXCEPTIONS (type);
+ }
+ decl = build_lang_decl (FUNCTION_DECL, declarator, type);
+ /* propagate volatile out from type to decl */
+ if (TYPE_VOLATILE (type))
+ TREE_THIS_VOLATILE (decl) = 1;
+
+ /* Should probably propagate const out from type to decl I bet (mrs). */
+ if (staticp)
+ {
+ DECL_STATIC_FUNCTION_P (decl) = 1;
+ DECL_CONTEXT (decl) = ctype;
+ DECL_CLASS_CONTEXT (decl) = ctype;
+ }
+
+ if (publicp)
+ TREE_PUBLIC (decl) = 1;
+
+ DECL_EXTERNAL (decl) = 1;
+ if (quals != NULL_TREE && TREE_CODE (type) == FUNCTION_TYPE)
+ {
+ cp_error ("%smember function `%D' cannot have `%T' method qualifier",
+ (ctype ? "static " : "non-"), decl, TREE_VALUE (quals));
+ quals = NULL_TREE;
+ }
+
+ if (IDENTIFIER_OPNAME_P (DECL_NAME (decl)))
+ grok_op_properties (decl, virtualp, check < 0);
+
+ /* Caller will do the rest of this. */
+ if (check < 0)
+ return decl;
+
+ if (flags == NO_SPECIAL && ctype && constructor_name (cname) == declarator)
+ {
+ tree tmp;
+ /* Just handle constructors here. We could do this
+ inside the following if stmt, but I think
+ that the code is more legible by breaking this
+ case out. See comments below for what each of
+ the following calls is supposed to do. */
+ DECL_CONSTRUCTOR_P (decl) = 1;
+
+ grokclassfn (ctype, declarator, decl, flags, quals);
+ if (check)
+ check_classfn (ctype, declarator, decl);
+ if (! grok_ctor_properties (ctype, decl))
+ return NULL_TREE;
+
+ if (check == 0 && ! current_function_decl)
+ {
+ /* FIXME: this should only need to look at
+ IDENTIFIER_GLOBAL_VALUE. */
+ tmp = lookup_name (DECL_ASSEMBLER_NAME (decl), 0);
+ if (tmp == NULL_TREE)
+ IDENTIFIER_GLOBAL_VALUE (DECL_ASSEMBLER_NAME (decl)) = decl;
+ else if (TREE_CODE (tmp) != TREE_CODE (decl))
+ cp_error ("inconsistent declarations for `%D'", decl);
+ else
+ {
+ duplicate_decls (decl, tmp);
+ decl = tmp;
+ /* avoid creating circularities. */
+ DECL_CHAIN (decl) = NULL_TREE;
+ }
+ make_decl_rtl (decl, NULL_PTR, 1);
+ }
+ }
+ else
+ {
+ tree tmp;
+
+ /* Function gets the ugly name, field gets the nice one.
+ This call may change the type of the function (because
+ of default parameters)! */
+ if (ctype != NULL_TREE)
+ grokclassfn (ctype, cname, decl, flags, quals);
+
+ if (ctype != NULL_TREE && check)
+ check_classfn (ctype, cname, decl);
+
+ if (ctype == NULL_TREE || check)
+ return decl;
+
+ /* Now install the declaration of this function so that others may
+ find it (esp. its DECL_FRIENDLIST). Don't do this for local class
+ methods, though. */
+ if (! current_function_decl)
+ {
+ /* FIXME: this should only need to look at
+ IDENTIFIER_GLOBAL_VALUE. */
+ tmp = lookup_name (DECL_ASSEMBLER_NAME (decl), 0);
+ if (tmp == NULL_TREE)
+ IDENTIFIER_GLOBAL_VALUE (DECL_ASSEMBLER_NAME (decl)) = decl;
+ else if (TREE_CODE (tmp) != TREE_CODE (decl))
+ cp_error ("inconsistent declarations for `%D'", decl);
+ else
+ {
+ duplicate_decls (decl, tmp);
+ decl = tmp;
+ /* avoid creating circularities. */
+ DECL_CHAIN (decl) = NULL_TREE;
+ }
+ make_decl_rtl (decl, NULL_PTR, 1);
+ }
+
+ /* If this declaration supersedes the declaration of
+ a method declared virtual in the base class, then
+ mark this field as being virtual as well. */
+ {
+ tree binfos = BINFO_BASETYPES (TYPE_BINFO (ctype));
+ int i, n_baselinks = binfos ? TREE_VEC_LENGTH (binfos) : 0;
+
+ for (i = 0; i < n_baselinks; i++)
+ {
+ tree base_binfo = TREE_VEC_ELT (binfos, i);
+ if (TYPE_VIRTUAL_P (BINFO_TYPE (base_binfo))
+ || flag_all_virtual == 1)
+ {
+ tmp = get_matching_virtual (base_binfo, decl,
+ flags == DTOR_FLAG);
+ if (tmp)
+ {
+ /* If this function overrides some virtual in some base
+ class, then the function itself is also necessarily
+ virtual, even if the user didn't explicitly say so. */
+ DECL_VIRTUAL_P (decl) = 1;
+
+ /* The TMP we really want is the one from the deepest
+ baseclass on this path, taking care not to
+ duplicate if we have already found it (via another
+ path to its virtual baseclass. */
+ if (staticp)
+ {
+ cp_error ("method `%D' may not be declared static",
+ decl);
+ cp_error_at ("(since `%D' declared virtual in base class.)",
+ tmp);
+ break;
+ }
+ virtualp = 1;
+
+ {
+ /* The argument types may have changed... */
+ tree argtypes = TYPE_ARG_TYPES (TREE_TYPE (decl));
+ tree base_variant = TREE_TYPE (TREE_VALUE (argtypes));
+
+ argtypes = commonparms (TREE_CHAIN (TYPE_ARG_TYPES (TREE_TYPE (tmp))),
+ TREE_CHAIN (argtypes));
+ /* But the return type has not. */
+ type = build_cplus_method_type (base_variant, TREE_TYPE (type), argtypes);
+ if (raises)
+ {
+ type = build_exception_variant (ctype, type, raises);
+ raises = TYPE_RAISES_EXCEPTIONS (type);
+ }
+ TREE_TYPE (decl) = type;
+ DECL_VINDEX (decl)
+ = tree_cons (NULL_TREE, tmp, DECL_VINDEX (decl));
+ }
+ break;
+ }
+ }
+ }
+ }
+ if (virtualp)
+ {
+ if (DECL_VINDEX (decl) == NULL_TREE)
+ DECL_VINDEX (decl) = error_mark_node;
+ IDENTIFIER_VIRTUAL_P (DECL_NAME (decl)) = 1;
+ if (ctype && CLASSTYPE_VTABLE_NEEDS_WRITING (ctype)
+ /* If this function is derived from a template, don't
+ make it public. This shouldn't be here, but there's
+ no good way to override the interface pragmas for one
+ function or class only. Bletch. */
+ && IDENTIFIER_TEMPLATE (TYPE_IDENTIFIER (ctype)) == NULL_TREE
+ && (write_virtuals == 2
+ || (write_virtuals == 3
+ && CLASSTYPE_INTERFACE_KNOWN (ctype))))
+ TREE_PUBLIC (decl) = 1;
+ }
+ }
+ return decl;
+}
+
+static tree
+grokvardecl (type, declarator, specbits, initialized)
+ tree type;
+ tree declarator;
+ RID_BIT_TYPE specbits;
+ int initialized;
+{
+ tree decl;
+
+ if (TREE_CODE (type) == OFFSET_TYPE)
+ {
+ /* If you declare a static member so that it
+ can be initialized, the code will reach here. */
+ tree basetype = TYPE_OFFSET_BASETYPE (type);
+ type = TREE_TYPE (type);
+ decl = build_lang_field_decl (VAR_DECL, declarator, type);
+ DECL_CONTEXT (decl) = basetype;
+ DECL_CLASS_CONTEXT (decl) = basetype;
+ }
+ else
+ decl = build_decl (VAR_DECL, declarator, type);
+
+ if (RIDBIT_SETP (RID_EXTERN, specbits))
+ {
+ DECL_THIS_EXTERN (decl) = 1;
+ DECL_EXTERNAL (decl) = !initialized;
+ }
+
+ /* In class context, static means one per class,
+ public access, and static storage. */
+ if (DECL_FIELD_CONTEXT (decl) != NULL_TREE
+ && IS_AGGR_TYPE (DECL_FIELD_CONTEXT (decl)))
+ {
+ TREE_PUBLIC (decl) = 1;
+ TREE_STATIC (decl) = 1;
+ DECL_EXTERNAL (decl) = 0;
+ }
+ /* At top level, either `static' or no s.c. makes a definition
+ (perhaps tentative), and absence of `static' makes it public. */
+ else if (current_binding_level == global_binding_level)
+ {
+ TREE_PUBLIC (decl) = RIDBIT_NOTSETP (RID_STATIC, specbits);
+ TREE_STATIC (decl) = ! DECL_EXTERNAL (decl);
+ }
+ /* Not at top level, only `static' makes a static definition. */
+ else
+ {
+ TREE_STATIC (decl) = !! RIDBIT_SETP (RID_STATIC, specbits);
+ TREE_PUBLIC (decl) = DECL_EXTERNAL (decl);
+ }
+ return decl;
+}
+
+/* Create a canonical pointer to member function type. */
+
+tree
+build_ptrmemfunc_type (type)
+ tree type;
+{
+ tree fields[4];
+ tree t;
+ tree u;
+
+ /* If a canonical type already exists for this type, use it. We use
+ this method instead of type_hash_canon, because it only does a
+ simple equality check on the list of field members. */
+
+ if ((t = TYPE_GET_PTRMEMFUNC_TYPE (type)))
+ return t;
+
+ push_obstacks (TYPE_OBSTACK (type), TYPE_OBSTACK (type));
+
+ u = make_lang_type (UNION_TYPE);
+ fields[0] = build_lang_field_decl (FIELD_DECL, pfn_identifier, type);
+ fields[1] = build_lang_field_decl (FIELD_DECL, delta2_identifier,
+ delta_type_node);
+ finish_builtin_type (u, "__ptrmemfunc_type", fields, 1, ptr_type_node);
+ TYPE_NAME (u) = NULL_TREE;
+
+ t = make_lang_type (RECORD_TYPE);
+
+ /* Let the front-end know this is a pointer to member function. */
+ TYPE_PTRMEMFUNC_FLAG(t) = 1;
+
+ fields[0] = build_lang_field_decl (FIELD_DECL, delta_identifier,
+ delta_type_node);
+ fields[1] = build_lang_field_decl (FIELD_DECL, index_identifier,
+ delta_type_node);
+ fields[2] = build_lang_field_decl (FIELD_DECL, pfn_or_delta2_identifier, u);
+ finish_builtin_type (t, "__ptrmemfunc_type", fields, 2, ptr_type_node);
+
+ pop_obstacks ();
+
+ /* Zap out the name so that the back-end will give us the debugging
+ information for this anonymous RECORD_TYPE. */
+ TYPE_NAME (t) = NULL_TREE;
+
+ TYPE_SET_PTRMEMFUNC_TYPE (type, t);
+
+ /* Seems to be wanted. */
+ CLASSTYPE_GOT_SEMICOLON (t) = 1;
+ return t;
+}
+
+/* Given declspecs and a declarator,
+ determine the name and type of the object declared
+ and construct a ..._DECL node for it.
+ (In one case we can return a ..._TYPE node instead.
+ For invalid input we sometimes return 0.)
+
+ DECLSPECS is a chain of tree_list nodes whose value fields
+ are the storage classes and type specifiers.
+
+ DECL_CONTEXT says which syntactic context this declaration is in:
+ NORMAL for most contexts. Make a VAR_DECL or FUNCTION_DECL or TYPE_DECL.
+ FUNCDEF for a function definition. Like NORMAL but a few different
+ error messages in each case. Return value may be zero meaning
+ this definition is too screwy to try to parse.
+ MEMFUNCDEF for a function definition. Like FUNCDEF but prepares to
+ handle member functions (which have FIELD context).
+ Return value may be zero meaning this definition is too screwy to
+ try to parse.
+ PARM for a parameter declaration (either within a function prototype
+ or before a function body). Make a PARM_DECL, or return void_type_node.
+ TYPENAME if for a typename (in a cast or sizeof).
+ Don't make a DECL node; just return the ..._TYPE node.
+ FIELD for a struct or union field; make a FIELD_DECL.
+ BITFIELD for a field with specified width.
+ INITIALIZED is 1 if the decl has an initializer.
+
+ In the TYPENAME case, DECLARATOR is really an absolute declarator.
+ It may also be so in the PARM case, for a prototype where the
+ argument type is specified but not the name.
+
+ This function is where the complicated C meanings of `static'
+ and `extern' are interpreted.
+
+ For C++, if there is any monkey business to do, the function which
+ calls this one must do it, i.e., prepending instance variables,
+ renaming overloaded function names, etc.
+
+ Note that for this C++, it is an error to define a method within a class
+ which does not belong to that class.
+
+ Except in the case where SCOPE_REFs are implicitly known (such as
+ methods within a class being redundantly qualified),
+ declarations which involve SCOPE_REFs are returned as SCOPE_REFs
+ (class_name::decl_name). The caller must also deal with this.
+
+ If a constructor or destructor is seen, and the context is FIELD,
+ then the type gains the attribute TREE_HAS_x. If such a declaration
+ is erroneous, NULL_TREE is returned.
+
+ QUALS is used only for FUNCDEF and MEMFUNCDEF cases. For a member
+ function, these are the qualifiers to give to the `this' pointer.
+
+ May return void_type_node if the declarator turned out to be a friend.
+ See grokfield for details. */
+
+enum return_types { return_normal, return_ctor, return_dtor, return_conversion };
+
+tree
+grokdeclarator (declarator, declspecs, decl_context, initialized, raises)
+ tree declspecs;
+ tree declarator;
+ enum decl_context decl_context;
+ int initialized;
+ tree raises;
+{
+ RID_BIT_TYPE specbits;
+ int nclasses = 0;
+ tree spec;
+ tree type = NULL_TREE;
+ int longlong = 0;
+ int constp;
+ int volatilep;
+ int virtualp, friendp, inlinep, staticp;
+ int explicit_int = 0;
+ int explicit_char = 0;
+ int opaque_typedef = 0;
+ tree typedef_decl = NULL_TREE;
+ char *name;
+ tree typedef_type = NULL_TREE;
+ int funcdef_flag = 0;
+ enum tree_code innermost_code = ERROR_MARK;
+ int bitfield = 0;
+ int size_varies = 0;
+ /* Set this to error_mark_node for FIELD_DECLs we could not handle properly.
+ All FIELD_DECLs we build here have `init' put into their DECL_INITIAL. */
+ tree init = NULL_TREE;
+
+ /* Keep track of what sort of function is being processed
+ so that we can warn about default return values, or explicit
+ return values which do not match prescribed defaults. */
+ enum return_types return_type = return_normal;
+
+ tree dname = NULL_TREE;
+ tree ctype = current_class_type;
+ tree ctor_return_type = NULL_TREE;
+ enum overload_flags flags = NO_SPECIAL;
+ tree quals = NULL_TREE;
+
+ RIDBIT_RESET_ALL (specbits);
+ if (decl_context == FUNCDEF)
+ funcdef_flag = 1, decl_context = NORMAL;
+ else if (decl_context == MEMFUNCDEF)
+ funcdef_flag = -1, decl_context = FIELD;
+ else if (decl_context == BITFIELD)
+ bitfield = 1, decl_context = FIELD;
+
+ if (flag_traditional && allocation_temporary_p ())
+ end_temporary_allocation ();
+
+ /* Look inside a declarator for the name being declared
+ and get it as a string, for an error message. */
+ {
+ tree last = NULL_TREE;
+ register tree decl = declarator;
+ name = NULL;
+
+ while (decl)
+ switch (TREE_CODE (decl))
+ {
+ case COND_EXPR:
+ ctype = NULL_TREE;
+ decl = TREE_OPERAND (decl, 0);
+ break;
+
+ case BIT_NOT_EXPR: /* for C++ destructors! */
+ {
+ tree name = TREE_OPERAND (decl, 0);
+ tree rename = NULL_TREE;
+
+ my_friendly_assert (flags == NO_SPECIAL, 152);
+ flags = DTOR_FLAG;
+ return_type = return_dtor;
+ my_friendly_assert (TREE_CODE (name) == IDENTIFIER_NODE, 153);
+ if (ctype == NULL_TREE)
+ {
+ if (current_class_type == NULL_TREE)
+ {
+ error ("destructors must be member functions");
+ flags = NO_SPECIAL;
+ }
+ else
+ {
+ tree t = constructor_name (current_class_name);
+ if (t != name)
+ rename = t;
+ }
+ }
+ else
+ {
+ tree t = constructor_name (ctype);
+ if (t != name)
+ rename = t;
+ }
+
+ if (rename)
+ {
+ error ("destructor `%s' must match class name `%s'",
+ IDENTIFIER_POINTER (name),
+ IDENTIFIER_POINTER (rename));
+ TREE_OPERAND (decl, 0) = rename;
+ }
+ decl = name;
+ }
+ break;
+
+ case ADDR_EXPR: /* C++ reference declaration */
+ /* fall through */
+ case ARRAY_REF:
+ case INDIRECT_REF:
+ ctype = NULL_TREE;
+ innermost_code = TREE_CODE (decl);
+ last = decl;
+ decl = TREE_OPERAND (decl, 0);
+ break;
+
+ case CALL_EXPR:
+ if (parmlist_is_exprlist (TREE_OPERAND (decl, 1)))
+ {
+ /* This is actually a variable declaration using constructor
+ syntax. We need to call start_decl and finish_decl so we
+ can get the variable initialized... */
+
+ if (last)
+ /* We need to insinuate ourselves into the declarator in place
+ of the CALL_EXPR. */
+ TREE_OPERAND (last, 0) = TREE_OPERAND (decl, 0);
+ else
+ declarator = TREE_OPERAND (decl, 0);
+
+ init = TREE_OPERAND (decl, 1);
+
+ decl = start_decl (declarator, declspecs, 1, NULL_TREE);
+ finish_decl (decl, init, NULL_TREE, 1);
+ return 0;
+ }
+ innermost_code = TREE_CODE (decl);
+ decl = TREE_OPERAND (decl, 0);
+ if (decl_context == FIELD && ctype == NULL_TREE)
+ ctype = current_class_type;
+ if (ctype != NULL_TREE
+ && decl != NULL_TREE && flags != DTOR_FLAG
+ && decl == constructor_name (ctype))
+ {
+ return_type = return_ctor;
+ ctor_return_type = ctype;
+ }
+ ctype = NULL_TREE;
+ break;
+
+ case IDENTIFIER_NODE:
+ dname = decl;
+ decl = NULL_TREE;
+
+ if (IDENTIFIER_OPNAME_P (dname))
+ {
+ if (IDENTIFIER_TYPENAME_P (dname))
+ {
+ my_friendly_assert (flags == NO_SPECIAL, 154);
+ flags = TYPENAME_FLAG;
+ ctor_return_type = TREE_TYPE (dname);
+ return_type = return_conversion;
+ }
+ name = operator_name_string (dname);
+ }
+ else
+ name = IDENTIFIER_POINTER (dname);
+ break;
+
+ case RECORD_TYPE:
+ case UNION_TYPE:
+ case ENUMERAL_TYPE:
+ /* Parse error puts this typespec where
+ a declarator should go. */
+ error ("declarator name missing");
+ dname = TYPE_NAME (decl);
+ if (dname && TREE_CODE (dname) == TYPE_DECL)
+ dname = DECL_NAME (dname);
+ name = dname ? IDENTIFIER_POINTER (dname) : "<nameless>";
+ declspecs = temp_tree_cons (NULL_TREE, decl, declspecs);
+ decl = NULL_TREE;
+ break;
+
+ /* C++ extension */
+ case SCOPE_REF:
+ {
+ /* Perform error checking, and convert class names to types.
+ We may call grokdeclarator multiple times for the same
+ tree structure, so only do the conversion once. In this
+ case, we have exactly what we want for `ctype'. */
+ tree cname = TREE_OPERAND (decl, 0);
+ if (cname == NULL_TREE)
+ ctype = NULL_TREE;
+ /* Can't use IS_AGGR_TYPE because CNAME might not be a type. */
+ else if (IS_AGGR_TYPE_CODE (TREE_CODE (cname))
+ || TREE_CODE (cname) == UNINSTANTIATED_P_TYPE)
+ ctype = cname;
+ else if (! is_aggr_typedef (cname, 1))
+ {
+ TREE_OPERAND (decl, 0) = NULL_TREE;
+ }
+ /* Must test TREE_OPERAND (decl, 1), in case user gives
+ us `typedef (class::memfunc)(int); memfunc *memfuncptr;' */
+ else if (TREE_OPERAND (decl, 1)
+ && TREE_CODE (TREE_OPERAND (decl, 1)) == INDIRECT_REF)
+ {
+ TREE_OPERAND (decl, 0) = IDENTIFIER_TYPE_VALUE (cname);
+ }
+ else if (ctype == NULL_TREE)
+ {
+ ctype = IDENTIFIER_TYPE_VALUE (cname);
+ TREE_OPERAND (decl, 0) = ctype;
+ }
+ else if (TREE_COMPLEXITY (decl) == current_class_depth)
+ TREE_OPERAND (decl, 0) = ctype;
+ else
+ {
+ if (! UNIQUELY_DERIVED_FROM_P (IDENTIFIER_TYPE_VALUE (cname),
+ ctype))
+ {
+ cp_error ("type `%T' is not derived from type `%T'",
+ IDENTIFIER_TYPE_VALUE (cname), ctype);
+ TREE_OPERAND (decl, 0) = NULL_TREE;
+ }
+ else
+ {
+ ctype = IDENTIFIER_TYPE_VALUE (cname);
+ TREE_OPERAND (decl, 0) = ctype;
+ }
+ }
+
+ decl = TREE_OPERAND (decl, 1);
+ if (ctype)
+ {
+ if (TREE_CODE (decl) == IDENTIFIER_NODE
+ && constructor_name (ctype) == decl)
+ {
+ return_type = return_ctor;
+ ctor_return_type = ctype;
+ }
+ else if (TREE_CODE (decl) == BIT_NOT_EXPR
+ && TREE_CODE (TREE_OPERAND (decl, 0)) == IDENTIFIER_NODE
+ && constructor_name (ctype) == TREE_OPERAND (decl, 0))
+ {
+ return_type = return_dtor;
+ ctor_return_type = ctype;
+ flags = DTOR_FLAG;
+ decl = TREE_OPERAND (decl, 0);
+ }
+ }
+ }
+ break;
+
+ case ERROR_MARK:
+ decl = NULL_TREE;
+ break;
+
+ default:
+ return 0; /* We used to do a 155 abort here. */
+ }
+ if (name == NULL)
+ name = "type name";
+ }
+
+ /* A function definition's declarator must have the form of
+ a function declarator. */
+
+ if (funcdef_flag && innermost_code != CALL_EXPR)
+ return 0;
+
+ /* Anything declared one level down from the top level
+ must be one of the parameters of a function
+ (because the body is at least two levels down). */
+
+ /* This heuristic cannot be applied to C++ nodes! Fixed, however,
+ by not allowing C++ class definitions to specify their parameters
+ with xdecls (must be spec.d in the parmlist).
+
+ Since we now wait to push a class scope until we are sure that
+ we are in a legitimate method context, we must set oldcname
+ explicitly (since current_class_name is not yet alive). */
+
+ if (decl_context == NORMAL
+ && current_binding_level->level_chain == global_binding_level)
+ decl_context = PARM;
+
+ /* Look through the decl specs and record which ones appear.
+ Some typespecs are defined as built-in typenames.
+ Others, the ones that are modifiers of other types,
+ are represented by bits in SPECBITS: set the bits for
+ the modifiers that appear. Storage class keywords are also in SPECBITS.
+
+ If there is a typedef name or a type, store the type in TYPE.
+ This includes builtin typedefs such as `int'.
+
+ Set EXPLICIT_INT if the type is `int' or `char' and did not
+ come from a user typedef.
+
+ Set LONGLONG if `long' is mentioned twice.
+
+ For C++, constructors and destructors have their own fast treatment. */
+
+ for (spec = declspecs; spec; spec = TREE_CHAIN (spec))
+ {
+ register int i;
+ register tree id;
+
+ /* Certain parse errors slip through. For example,
+ `int class;' is not caught by the parser. Try
+ weakly to recover here. */
+ if (TREE_CODE (spec) != TREE_LIST)
+ return 0;
+
+ id = TREE_VALUE (spec);
+
+ if (TREE_CODE (id) == IDENTIFIER_NODE)
+ {
+ if (id == ridpointers[(int) RID_INT]
+ || id == ridpointers[(int) RID_CHAR]
+ || id == ridpointers[(int) RID_BOOL]
+ || id == ridpointers[(int) RID_WCHAR])
+ {
+ if (type)
+ error ("extraneous `%T' ignored", id);
+ else
+ {
+ if (id == ridpointers[(int) RID_INT])
+ explicit_int = 1;
+ else if (id == ridpointers[(int) RID_CHAR])
+ explicit_char = 1;
+ type = TREE_TYPE (IDENTIFIER_GLOBAL_VALUE (id));
+ }
+ goto found;
+ }
+ /* C++ aggregate types. */
+ if (IDENTIFIER_HAS_TYPE_VALUE (id))
+ {
+ if (type)
+ cp_error ("multiple declarations `%T' and `%T'", type, id);
+ else
+ type = IDENTIFIER_TYPE_VALUE (id);
+ goto found;
+ }
+
+ for (i = (int) RID_FIRST_MODIFIER; i < (int) RID_MAX; i++)
+ {
+ if (ridpointers[i] == id)
+ {
+ if (i == (int) RID_LONG && RIDBIT_SETP (i, specbits))
+ {
+ if (pedantic && flag_ansi)
+ pedwarn ("duplicate `long'");
+ else if (longlong)
+ error ("`long long long' is too long for GCC");
+ else
+ longlong = 1;
+ }
+ else if (RIDBIT_SETP (i, specbits))
+ pedwarn ("duplicate `%s'", IDENTIFIER_POINTER (id));
+ RIDBIT_SET (i, specbits);
+ goto found;
+ }
+ }
+ }
+ if (type)
+ error ("two or more data types in declaration of `%s'", name);
+ else if (TREE_CODE (id) == IDENTIFIER_NODE)
+ {
+ register tree t = lookup_name (id, 1);
+ if (!t || TREE_CODE (t) != TYPE_DECL)
+ error ("`%s' fails to be a typedef or built in type",
+ IDENTIFIER_POINTER (id));
+ else
+ {
+ type = TREE_TYPE (t);
+ typedef_decl = t;
+ }
+ }
+ else if (TREE_CODE (id) != ERROR_MARK)
+ /* Can't change CLASS nodes into RECORD nodes here! */
+ type = id;
+
+ found: ;
+ }
+
+ typedef_type = type;
+
+ /* No type at all: default to `int', and set EXPLICIT_INT
+ because it was not a user-defined typedef.
+ Except when we have a `typedef' inside a signature, in
+ which case the type defaults to `unknown type' and is
+ instantiated when assigning to a signature pointer or ref. */
+
+ if (type == NULL_TREE
+ && (RIDBIT_SETP (RID_SIGNED, specbits)
+ || RIDBIT_SETP (RID_UNSIGNED, specbits)
+ || RIDBIT_SETP (RID_LONG, specbits)
+ || RIDBIT_SETP (RID_SHORT, specbits)))
+ {
+ /* These imply 'int'. */
+ type = integer_type_node;
+ explicit_int = 1;
+ }
+
+ if (type == NULL_TREE)
+ {
+ explicit_int = -1;
+ if (return_type == return_dtor)
+ type = void_type_node;
+ else if (return_type == return_ctor)
+ type = TYPE_POINTER_TO (ctor_return_type);
+ else if (return_type == return_conversion)
+ type = ctor_return_type;
+ else if (current_class_type
+ && IS_SIGNATURE (current_class_type)
+ && (RIDBIT_SETP (RID_TYPEDEF, specbits)
+ || SIGNATURE_GROKKING_TYPEDEF (current_class_type))
+ && (decl_context == FIELD || decl_context == NORMAL))
+ {
+ explicit_int = 0;
+ opaque_typedef = 1;
+ type = copy_node (opaque_type_node);
+ }
+ /* access declaration */
+ else if (decl_context == FIELD && declarator
+ && TREE_CODE (declarator) == SCOPE_REF)
+ type = void_type_node;
+ else
+ {
+ if (funcdef_flag)
+ {
+ if (warn_return_type
+ && return_type == return_normal)
+ /* Save warning until we know what is really going on. */
+ warn_about_return_type = 1;
+ }
+ else if (RIDBIT_SETP (RID_TYPEDEF, specbits))
+ pedwarn ("ANSI C++ forbids typedef which does not specify a type");
+ else if (declspecs == NULL_TREE &&
+ (innermost_code != CALL_EXPR || pedantic))
+ cp_pedwarn ("ANSI C++ forbids declaration `%D' with no type or storage class",
+ dname);
+ type = integer_type_node;
+ }
+ }
+ else if (return_type == return_dtor)
+ {
+ error ("return type specification for destructor invalid");
+ type = void_type_node;
+ }
+ else if (return_type == return_ctor)
+ {
+ error ("return type specification for constructor invalid");
+ type = TYPE_POINTER_TO (ctor_return_type);
+ }
+ else if (return_type == return_conversion)
+ {
+ if (comp_target_types (type, ctor_return_type, 1) == 0)
+ cp_error ("operator `%T' declared to return `%T'",
+ ctor_return_type, type);
+ else
+ cp_pedwarn ("return type specified for `operator %T'",
+ ctor_return_type);
+
+ type = ctor_return_type;
+ }
+ /* Catch typedefs that only specify a type, like 'typedef int;'. */
+ else if (RIDBIT_SETP (RID_TYPEDEF, specbits) && declarator == NULL_TREE)
+ {
+ /* Template "this is a type" syntax; just ignore for now. */
+ if (processing_template_defn)
+ return void_type_node;
+ }
+
+ ctype = NULL_TREE;
+
+ /* Now process the modifiers that were specified
+ and check for invalid combinations. */
+
+ /* Long double is a special combination. */
+
+ if (RIDBIT_SETP (RID_LONG, specbits)
+ && TYPE_MAIN_VARIANT (type) == double_type_node)
+ {
+ RIDBIT_RESET (RID_LONG, specbits);
+ type = build_type_variant (long_double_type_node, TYPE_READONLY (type),
+ TYPE_VOLATILE (type));
+ }
+
+ /* Check all other uses of type modifiers. */
+
+ if (RIDBIT_SETP (RID_UNSIGNED, specbits)
+ || RIDBIT_SETP (RID_SIGNED, specbits)
+ || RIDBIT_SETP (RID_LONG, specbits)
+ || RIDBIT_SETP (RID_SHORT, specbits))
+ {
+ int ok = 0;
+
+ if (TREE_CODE (type) == REAL_TYPE)
+ error ("short, signed or unsigned invalid for `%s'", name);
+ else if (TREE_CODE (type) != INTEGER_TYPE || type == wchar_type_node)
+ error ("long, short, signed or unsigned invalid for `%s'", name);
+ else if (RIDBIT_SETP (RID_LONG, specbits)
+ && RIDBIT_SETP (RID_SHORT, specbits))
+ error ("long and short specified together for `%s'", name);
+ else if ((RIDBIT_SETP (RID_LONG, specbits)
+ || RIDBIT_SETP (RID_SHORT, specbits))
+ && explicit_char)
+ error ("long or short specified with char for `%s'", name);
+ else if ((RIDBIT_SETP (RID_LONG, specbits)
+ || RIDBIT_SETP (RID_SHORT, specbits))
+ && TREE_CODE (type) == REAL_TYPE)
+ error ("long or short specified with floating type for `%s'", name);
+ else if (RIDBIT_SETP (RID_SIGNED, specbits)
+ && RIDBIT_SETP (RID_UNSIGNED, specbits))
+ error ("signed and unsigned given together for `%s'", name);
+ else
+ {
+ ok = 1;
+ if (!explicit_int && !explicit_char && pedantic)
+ {
+ pedwarn ("long, short, signed or unsigned used invalidly for `%s'",
+ name);
+ if (flag_pedantic_errors)
+ ok = 0;
+ }
+ }
+
+ /* Discard the type modifiers if they are invalid. */
+ if (! ok)
+ {
+ RIDBIT_RESET (RID_UNSIGNED, specbits);
+ RIDBIT_RESET (RID_SIGNED, specbits);
+ RIDBIT_RESET (RID_LONG, specbits);
+ RIDBIT_RESET (RID_SHORT, specbits);
+ longlong = 0;
+ }
+ }
+
+ /* Decide whether an integer type is signed or not.
+ Optionally treat bitfields as signed by default. */
+ if (RIDBIT_SETP (RID_UNSIGNED, specbits)
+ /* Traditionally, all bitfields are unsigned. */
+ || (bitfield && flag_traditional)
+ || (bitfield && ! flag_signed_bitfields
+ && (explicit_int || explicit_char
+ /* A typedef for plain `int' without `signed'
+ can be controlled just like plain `int'. */
+ || ! (typedef_decl != NULL_TREE
+ && C_TYPEDEF_EXPLICITLY_SIGNED (typedef_decl)))
+ && TREE_CODE (type) != ENUMERAL_TYPE
+ && RIDBIT_NOTSETP (RID_SIGNED, specbits)))
+ {
+ if (longlong)
+ type = long_long_unsigned_type_node;
+ else if (RIDBIT_SETP (RID_LONG, specbits))
+ type = long_unsigned_type_node;
+ else if (RIDBIT_SETP (RID_SHORT, specbits))
+ type = short_unsigned_type_node;
+ else if (type == char_type_node)
+ type = unsigned_char_type_node;
+ else if (typedef_decl)
+ type = unsigned_type (type);
+ else
+ type = unsigned_type_node;
+ }
+ else if (RIDBIT_SETP (RID_SIGNED, specbits)
+ && type == char_type_node)
+ type = signed_char_type_node;
+ else if (longlong)
+ type = long_long_integer_type_node;
+ else if (RIDBIT_SETP (RID_LONG, specbits))
+ type = long_integer_type_node;
+ else if (RIDBIT_SETP (RID_SHORT, specbits))
+ type = short_integer_type_node;
+
+ /* Set CONSTP if this declaration is `const', whether by
+ explicit specification or via a typedef.
+ Likewise for VOLATILEP. */
+
+ constp = !! RIDBIT_SETP (RID_CONST, specbits) + TYPE_READONLY (type);
+ volatilep = !! RIDBIT_SETP (RID_VOLATILE, specbits) + TYPE_VOLATILE (type);
+ staticp = 0;
+ inlinep = !! RIDBIT_SETP (RID_INLINE, specbits);
+ if (constp > 1)
+ warning ("duplicate `const'");
+ if (volatilep > 1)
+ warning ("duplicate `volatile'");
+ virtualp = RIDBIT_SETP (RID_VIRTUAL, specbits);
+
+ if (RIDBIT_SETP (RID_STATIC, specbits))
+ staticp = 1 + (decl_context == FIELD);
+
+ if (virtualp && staticp == 2)
+ {
+ cp_error ("member `%D' cannot be declared both virtual and static",
+ dname);
+ staticp = 0;
+ }
+ friendp = RIDBIT_SETP (RID_FRIEND, specbits);
+ RIDBIT_RESET (RID_VIRTUAL, specbits);
+ RIDBIT_RESET (RID_FRIEND, specbits);
+
+ if (RIDBIT_SETP (RID_MUTABLE, specbits))
+ {
+ if (decl_context == PARM)
+ {
+ error ("non-member `%s' cannot be declared mutable", name);
+ RIDBIT_RESET (RID_MUTABLE, specbits);
+ }
+ else if (friendp || decl_context == TYPENAME)
+ {
+ error ("non-object member `%s' cannot be declared mutable", name);
+ RIDBIT_RESET (RID_MUTABLE, specbits);
+ }
+ else if (staticp)
+ {
+ error ("static `%s' cannot be declared mutable", name);
+ RIDBIT_RESET (RID_MUTABLE, specbits);
+ }
+#if 0
+ if (RIDBIT_SETP (RID_TYPEDEF, specbits))
+ {
+ error ("non-object member `%s' cannot be declared mutable", name);
+ RIDBIT_RESET (RID_MUTABLE, specbits);
+ }
+ /* Because local typedefs are parsed twice, we don't want this
+ message here. */
+ else if (decl_context != FIELD)
+ {
+ error ("non-member `%s' cannot be declared mutable", name);
+ RIDBIT_RESET (RID_MUTABLE, specbits);
+ }
+#endif
+ }
+
+ /* Warn if two storage classes are given. Default to `auto'. */
+
+ if (RIDBIT_ANY_SET (specbits))
+ {
+ if (RIDBIT_SETP (RID_STATIC, specbits)) nclasses++;
+ if (RIDBIT_SETP (RID_EXTERN, specbits)) nclasses++;
+ if (decl_context == PARM && nclasses > 0)
+ error ("storage class specifiers invalid in parameter declarations");
+ if (RIDBIT_SETP (RID_TYPEDEF, specbits))
+ {
+ if (decl_context == PARM)
+ error ("typedef declaration invalid in parameter declaration");
+ nclasses++;
+ }
+ if (RIDBIT_SETP (RID_AUTO, specbits)) nclasses++;
+ if (RIDBIT_SETP (RID_REGISTER, specbits)) nclasses++;
+ }
+
+ /* Give error if `virtual' is used outside of class declaration. */
+ if (virtualp && current_class_name == NULL_TREE)
+ {
+ error ("virtual outside class declaration");
+ virtualp = 0;
+ }
+ if (current_class_name == NULL_TREE && RIDBIT_SETP (RID_MUTABLE, specbits))
+ {
+ error ("only members can be declared mutable");
+ RIDBIT_RESET (RID_MUTABLE, specbits);
+ }
+
+ /* Static anonymous unions are dealt with here. */
+ if (staticp && decl_context == TYPENAME
+ && TREE_CODE (declspecs) == TREE_LIST
+ && TREE_CODE (TREE_VALUE (declspecs)) == UNION_TYPE
+ && ANON_AGGRNAME_P (TYPE_IDENTIFIER (TREE_VALUE (declspecs))))
+ decl_context = FIELD;
+
+ /* Give error if `const,' `volatile,' `inline,' `friend,' or `virtual'
+ is used in a signature member function declaration. */
+ if (decl_context == FIELD
+ && IS_SIGNATURE (current_class_type)
+ && RIDBIT_NOTSETP(RID_TYPEDEF, specbits)
+ && !SIGNATURE_GROKKING_TYPEDEF (current_class_type))
+ {
+ if (constp)
+ {
+ error ("`const' specified for signature member function `%s'", name);
+ constp = 0;
+ }
+ if (volatilep)
+ {
+ error ("`volatile' specified for signature member function `%s'",
+ name);
+ volatilep = 0;
+ }
+ if (inlinep)
+ {
+ error ("`inline' specified for signature member function `%s'", name);
+ /* Later, we'll make signature member functions inline. */
+ inlinep = 0;
+ }
+ if (friendp)
+ {
+ error ("`friend' declaration in signature definition");
+ friendp = 0;
+ }
+ if (virtualp)
+ {
+ error ("`virtual' specified for signature member function `%s'",
+ name);
+ /* Later, we'll make signature member functions virtual. */
+ virtualp = 0;
+ }
+ }
+
+ /* Warn about storage classes that are invalid for certain
+ kinds of declarations (parameters, typenames, etc.). */
+
+ if (nclasses > 1)
+ error ("multiple storage classes in declaration of `%s'", name);
+ else if (decl_context != NORMAL && nclasses > 0)
+ {
+ if (decl_context == PARM
+ && (RIDBIT_SETP (RID_REGISTER, specbits)
+ || RIDBIT_SETP (RID_AUTO, specbits)))
+ ;
+ else if (decl_context == FIELD
+ && RIDBIT_SETP (RID_TYPEDEF, specbits))
+ {
+ /* Processing a typedef declaration nested within a class type
+ definition. */
+ register tree scanner;
+ register tree previous_declspec;
+ tree loc_typedecl;
+
+ if (initialized)
+ error ("typedef declaration includes an initializer");
+
+ /* To process a class-local typedef declaration, we descend down
+ the chain of declspecs looking for the `typedef' spec. When
+ we find it, we replace it with `static', and then recursively
+ call `grokdeclarator' with the original declarator and with
+ the newly adjusted declspecs. This call should return a
+ FIELD_DECL node with the TREE_TYPE (and other parts) set
+ appropriately. We can then just change the TREE_CODE on that
+ from FIELD_DECL to TYPE_DECL and we're done. */
+
+ for (previous_declspec = NULL_TREE, scanner = declspecs;
+ scanner;
+ previous_declspec = scanner, scanner = TREE_CHAIN (scanner))
+ {
+ if (TREE_VALUE (scanner) == ridpointers[(int) RID_TYPEDEF])
+ break;
+ }
+
+ if (scanner == IDENTIFIER_AS_LIST (ridpointers [(int) RID_TYPEDEF]))
+ {
+ if (previous_declspec)
+ TREE_CHAIN (previous_declspec)
+ = IDENTIFIER_AS_LIST (ridpointers [(int) RID_STATIC]);
+ else
+ declspecs
+ = IDENTIFIER_AS_LIST (ridpointers [(int) RID_STATIC]);
+ }
+ else
+ TREE_VALUE (scanner) = ridpointers[(int) RID_STATIC];
+
+ /* In the recursive call to grokdeclarator we need to know
+ whether we are working on a signature-local typedef. */
+ if (IS_SIGNATURE (current_class_type))
+ SIGNATURE_GROKKING_TYPEDEF (current_class_type) = 1;
+
+ loc_typedecl =
+ grokdeclarator (declarator, declspecs, FIELD, 0, NULL_TREE);
+
+ if (loc_typedecl != error_mark_node)
+ {
+ register int i = sizeof (struct lang_decl_flags) / sizeof (int);
+ register int *pi;
+
+ TREE_SET_CODE (loc_typedecl, TYPE_DECL);
+
+ pi = (int *) permalloc (sizeof (struct lang_decl_flags));
+ while (i > 0)
+ pi[--i] = 0;
+ DECL_LANG_SPECIFIC (loc_typedecl) = (struct lang_decl *) pi;
+ }
+
+ if (IS_SIGNATURE (current_class_type))
+ {
+ SIGNATURE_GROKKING_TYPEDEF (current_class_type) = 0;
+ if (loc_typedecl != error_mark_node && opaque_typedef)
+ SIGNATURE_HAS_OPAQUE_TYPEDECLS (current_class_type) = 1;
+ }
+
+ return loc_typedecl;
+ }
+ else if (decl_context == FIELD
+ && (! IS_SIGNATURE (current_class_type)
+ || SIGNATURE_GROKKING_TYPEDEF (current_class_type))
+ /* C++ allows static class elements */
+ && RIDBIT_SETP (RID_STATIC, specbits))
+ /* C++ also allows inlines and signed and unsigned elements,
+ but in those cases we don't come in here. */
+ ;
+ else
+ {
+ if (decl_context == FIELD)
+ {
+ tree tmp = TREE_OPERAND (declarator, 0);
+ register int op = IDENTIFIER_OPNAME_P (tmp);
+ error ("storage class specified for %s `%s'",
+ IS_SIGNATURE (current_class_type)
+ ? (op
+ ? "signature member operator"
+ : "signature member function")
+ : (op ? "member operator" : "structure field"),
+ op ? operator_name_string (tmp) : name);
+ }
+ else
+ error ((decl_context == PARM
+ ? "storage class specified for parameter `%s'"
+ : "storage class specified for typename"), name);
+ RIDBIT_RESET (RID_REGISTER, specbits);
+ RIDBIT_RESET (RID_AUTO, specbits);
+ RIDBIT_RESET (RID_EXTERN, specbits);
+
+ if (decl_context == FIELD && IS_SIGNATURE (current_class_type))
+ {
+ RIDBIT_RESET (RID_STATIC, specbits);
+ staticp = 0;
+ }
+ }
+ }
+ else if (RIDBIT_SETP (RID_EXTERN, specbits) && initialized && !funcdef_flag)
+ {
+ if (current_binding_level == global_binding_level)
+ {
+ /* It's common practice (and completely legal) to have a const
+ be initialized and declared extern. */
+ if (! constp)
+ warning ("`%s' initialized and declared `extern'", name);
+ }
+ else
+ error ("`%s' has both `extern' and initializer", name);
+ }
+ else if (RIDBIT_SETP (RID_EXTERN, specbits) && funcdef_flag
+ && current_binding_level != global_binding_level)
+ error ("nested function `%s' declared `extern'", name);
+ else if (current_binding_level == global_binding_level)
+ {
+ if (RIDBIT_SETP (RID_AUTO, specbits))
+ error ("top-level declaration of `%s' specifies `auto'", name);
+#if 0
+ if (RIDBIT_SETP (RID_REGISTER, specbits))
+ error ("top-level declaration of `%s' specifies `register'", name);
+#endif
+#if 0
+ /* I'm not sure under what circumstances we should turn
+ on the extern bit, and under what circumstances we should
+ warn if other bits are turned on. */
+ if (decl_context == NORMAL
+ && RIDBIT_NOSETP (RID_EXTERN, specbits)
+ && ! root_lang_context_p ())
+ {
+ RIDBIT_SET (RID_EXTERN, specbits);
+ }
+#endif
+ }
+
+ /* Now figure out the structure of the declarator proper.
+ Descend through it, creating more complex types, until we reach
+ the declared identifier (or NULL_TREE, in an absolute declarator). */
+
+ while (declarator && TREE_CODE (declarator) != IDENTIFIER_NODE)
+ {
+ /* Each level of DECLARATOR is either an ARRAY_REF (for ...[..]),
+ an INDIRECT_REF (for *...),
+ a CALL_EXPR (for ...(...)),
+ an identifier (for the name being declared)
+ or a null pointer (for the place in an absolute declarator
+ where the name was omitted).
+ For the last two cases, we have just exited the loop.
+
+ For C++ it could also be
+ a SCOPE_REF (for class :: ...). In this case, we have converted
+ sensible names to types, and those are the values we use to
+ qualify the member name.
+ an ADDR_EXPR (for &...),
+ a BIT_NOT_EXPR (for destructors)
+
+ At this point, TYPE is the type of elements of an array,
+ or for a function to return, or for a pointer to point to.
+ After this sequence of ifs, TYPE is the type of the
+ array or function or pointer, and DECLARATOR has had its
+ outermost layer removed. */
+
+ if (TREE_CODE (type) == ERROR_MARK)
+ {
+ if (TREE_CODE (declarator) == SCOPE_REF)
+ declarator = TREE_OPERAND (declarator, 1);
+ else
+ declarator = TREE_OPERAND (declarator, 0);
+ continue;
+ }
+ if (quals != NULL_TREE
+ && (declarator == NULL_TREE
+ || TREE_CODE (declarator) != SCOPE_REF))
+ {
+ if (ctype == NULL_TREE && TREE_CODE (type) == METHOD_TYPE)
+ ctype = TYPE_METHOD_BASETYPE (type);
+ if (ctype != NULL_TREE)
+ {
+#if 0 /* not yet, should get fixed properly later */
+ tree dummy = make_type_decl (NULL_TREE, type);
+#else
+ tree dummy = build_decl (TYPE_DECL, NULL_TREE, type);
+#endif
+ ctype = grok_method_quals (ctype, dummy, quals);
+ type = TREE_TYPE (dummy);
+ quals = NULL_TREE;
+ }
+ }
+ switch (TREE_CODE (declarator))
+ {
+ case ARRAY_REF:
+ {
+ register tree itype = NULL_TREE;
+ register tree size = TREE_OPERAND (declarator, 1);
+
+ declarator = TREE_OPERAND (declarator, 0);
+
+ /* Check for some types that there cannot be arrays of. */
+
+ if (TYPE_MAIN_VARIANT (type) == void_type_node)
+ {
+ cp_error ("declaration of `%D' as array of voids", dname);
+ type = error_mark_node;
+ }
+
+ if (TREE_CODE (type) == FUNCTION_TYPE)
+ {
+ cp_error ("declaration of `%D' as array of functions", dname);
+ type = error_mark_node;
+ }
+
+ /* ARM $8.4.3: Since you can't have a pointer to a reference,
+ you can't have arrays of references. If we allowed them,
+ then we'd be saying x[i] is legal for an array x, but
+ then you'd have to ask: what does `*(x + i)' mean? */
+ if (TREE_CODE (type) == REFERENCE_TYPE)
+ {
+ if (decl_context == TYPENAME)
+ cp_error ("cannot make arrays of references");
+ else
+ cp_error ("declaration of `%D' as array of references",
+ dname);
+ type = error_mark_node;
+ }
+
+ if (TREE_CODE (type) == OFFSET_TYPE)
+ {
+ cp_error ("declaration of `%D' as array of data members",
+ dname);
+ type = error_mark_node;
+ }
+
+ if (TREE_CODE (type) == METHOD_TYPE)
+ {
+ cp_error ("declaration of `%D' as array of function members",
+ dname);
+ type = error_mark_node;
+ }
+
+ if (size == error_mark_node)
+ type = error_mark_node;
+
+ if (type == error_mark_node)
+ continue;
+
+ if (size)
+ {
+ /* Must suspend_momentary here because the index
+ type may need to live until the end of the function.
+ For example, it is used in the declaration of a
+ variable which requires destructing at the end of
+ the function; then build_vec_delete will need this
+ value. */
+ int yes = suspend_momentary ();
+ /* might be a cast */
+ if (TREE_CODE (size) == NOP_EXPR
+ && TREE_TYPE (size) == TREE_TYPE (TREE_OPERAND (size, 0)))
+ size = TREE_OPERAND (size, 0);
+
+ /* If this is a template parameter, it'll be constant, but
+ we don't know what the value is yet. */
+ if (TREE_CODE (size) == TEMPLATE_CONST_PARM)
+ goto dont_grok_size;
+
+ if (TREE_CODE (TREE_TYPE (size)) != INTEGER_TYPE
+ && TREE_CODE (TREE_TYPE (size)) != ENUMERAL_TYPE)
+ {
+ cp_error ("size of array `%D' has non-integer type",
+ dname);
+ size = integer_one_node;
+ }
+ if (TREE_READONLY_DECL_P (size))
+ size = decl_constant_value (size);
+ if (flag_ansi && integer_zerop (size))
+ cp_pedwarn ("ANSI C++ forbids zero-size array `%D'", dname);
+ if (TREE_CONSTANT (size))
+ {
+ constant_expression_warning (size);
+ if (INT_CST_LT (size, integer_zero_node))
+ {
+ cp_error ("size of array `%D' is negative", dname);
+ size = integer_one_node;
+ }
+ itype = build_index_type (size_binop (MINUS_EXPR, size,
+ integer_one_node));
+ }
+ else
+ {
+ if (flag_ansi)
+ {
+ if (dname)
+ cp_pedwarn ("ANSI C++ forbids variable-size array `%D'",
+ dname);
+ else
+ cp_pedwarn ("ANSI C++ forbids variable-size array");
+ }
+ dont_grok_size:
+ itype =
+ build_binary_op (MINUS_EXPR, size, integer_one_node, 1);
+ /* Make sure the array size remains visibly nonconstant
+ even if it is (eg) a const variable with known value. */
+ size_varies = 1;
+ itype = variable_size (itype);
+ itype = build_index_type (itype);
+ }
+ resume_momentary (yes);
+ }
+
+ /* Build the array type itself, then merge any constancy or
+ volatility into the target type. We must do it in this order
+ to ensure that the TYPE_MAIN_VARIANT field of the array type
+ is set correctly. */
+
+ type = build_cplus_array_type (type, itype);
+ if (constp || volatilep)
+ type = c_build_type_variant (type, constp, volatilep);
+
+ ctype = NULL_TREE;
+ }
+ break;
+
+ case CALL_EXPR:
+ {
+ tree arg_types;
+
+ /* Declaring a function type.
+ Make sure we have a valid type for the function to return. */
+#if 0
+ /* Is this an error? Should they be merged into TYPE here? */
+ if (pedantic && (constp || volatilep))
+ pedwarn ("function declared to return const or volatile result");
+#else
+ /* Merge any constancy or volatility into the function return
+ type. */
+
+ if (constp || volatilep)
+ {
+ type = c_build_type_variant (type, constp, volatilep);
+ if (IS_AGGR_TYPE (type))
+ build_pointer_type (type);
+ constp = 0;
+ volatilep = 0;
+ }
+#endif
+
+ /* Warn about some types functions can't return. */
+
+ if (TREE_CODE (type) == FUNCTION_TYPE)
+ {
+ error ("`%s' declared as function returning a function", name);
+ type = integer_type_node;
+ }
+ if (TREE_CODE (type) == ARRAY_TYPE)
+ {
+ error ("`%s' declared as function returning an array", name);
+ type = integer_type_node;
+ }
+
+ if (ctype == NULL_TREE
+ && decl_context == FIELD
+ && (friendp == 0 || dname == current_class_name))
+ ctype = current_class_type;
+
+ if (ctype && return_type == return_conversion)
+ TYPE_HAS_CONVERSION (ctype) = 1;
+ if (ctype && constructor_name (ctype) == dname)
+ {
+ /* We are within a class's scope. If our declarator name
+ is the same as the class name, and we are defining
+ a function, then it is a constructor/destructor, and
+ therefore returns a void type. */
+
+ if (flags == DTOR_FLAG)
+ {
+ /* ANSI C++ June 5 1992 WP 12.4.1. A destructor may
+ not be declared const or volatile. A destructor
+ may not be static. */
+ if (staticp == 2)
+ error ("destructor cannot be static member function");
+ if (TYPE_READONLY (type))
+ {
+ error ("destructors cannot be declared `const'");
+ return void_type_node;
+ }
+ if (TYPE_VOLATILE (type))
+ {
+ error ("destructors cannot be declared `volatile'");
+ return void_type_node;
+ }
+ if (decl_context == FIELD)
+ {
+ if (! member_function_or_else (ctype, current_class_type,
+ "destructor for alien class `%s' cannot be a member"))
+ return void_type_node;
+ }
+ }
+ else /* it's a constructor. */
+ {
+ /* ANSI C++ June 5 1992 WP 12.1.2. A constructor may
+ not be declared const or volatile. A constructor may
+ not be virtual. A constructor may not be static. */
+ if (staticp == 2)
+ error ("constructor cannot be static member function");
+ if (virtualp)
+ {
+ pedwarn ("constructors cannot be declared virtual");
+ virtualp = 0;
+ }
+ if (TYPE_READONLY (type))
+ {
+ error ("constructors cannot be declared `const'");
+ return void_type_node;
+ }
+ if (TYPE_VOLATILE (type))
+ {
+ error ("constructors cannot be declared `volatile'");
+ return void_type_node;
+ }
+ {
+ RID_BIT_TYPE tmp_bits;
+ bcopy ((void*)&specbits, (void*)&tmp_bits, sizeof(RID_BIT_TYPE));
+ RIDBIT_RESET (RID_INLINE, tmp_bits);
+ RIDBIT_RESET (RID_STATIC, tmp_bits);
+ if (RIDBIT_ANY_SET (tmp_bits))
+ error ("return value type specifier for constructor ignored");
+ }
+ type = TYPE_POINTER_TO (ctype);
+ if (decl_context == FIELD &&
+ IS_SIGNATURE (current_class_type))
+ {
+ error ("constructor not allowed in signature");
+ return void_type_node;
+ }
+ else if (decl_context == FIELD)
+ {
+ if (! member_function_or_else (ctype, current_class_type,
+ "constructor for alien class `%s' cannot be member"))
+ return void_type_node;
+ TYPE_HAS_CONSTRUCTOR (ctype) = 1;
+ if (return_type != return_ctor)
+ return NULL_TREE;
+ }
+ }
+ if (decl_context == FIELD)
+ staticp = 0;
+ }
+ else if (friendp && virtualp)
+ {
+ /* Cannot be both friend and virtual. */
+ error ("virtual functions cannot be friends");
+ RIDBIT_RESET (RID_FRIEND, specbits);
+ friendp = 0;
+ }
+
+ if (decl_context == NORMAL && friendp)
+ error ("friend declaration not in class definition");
+
+ /* Pick up type qualifiers which should be applied to `this'. */
+ quals = TREE_OPERAND (declarator, 2);
+
+ /* Traditionally, declaring return type float means double. */
+
+ if (flag_traditional
+ && TYPE_MAIN_VARIANT (type) == float_type_node)
+ {
+ type = build_type_variant (double_type_node,
+ TYPE_READONLY (type),
+ TYPE_VOLATILE (type));
+ }
+
+ /* Construct the function type and go to the next
+ inner layer of declarator. */
+
+ {
+ int funcdef_p;
+ tree inner_parms = TREE_OPERAND (declarator, 1);
+ tree inner_decl = TREE_OPERAND (declarator, 0);
+
+ declarator = TREE_OPERAND (declarator, 0);
+
+ if (inner_decl && TREE_CODE (inner_decl) == SCOPE_REF)
+ inner_decl = TREE_OPERAND (inner_decl, 1);
+
+ /* Say it's a definition only for the CALL_EXPR
+ closest to the identifier. */
+ funcdef_p =
+ (inner_decl && TREE_CODE (inner_decl) == IDENTIFIER_NODE)
+ ? funcdef_flag : 0;
+
+ /* FIXME: This is where default args should be fully
+ processed. */
+
+ arg_types = grokparms (inner_parms, funcdef_p);
+ }
+
+ if (declarator)
+ {
+ /* Get past destructors, etc.
+ We know we have one because FLAGS will be non-zero.
+
+ Complain about improper parameter lists here. */
+ if (TREE_CODE (declarator) == BIT_NOT_EXPR)
+ {
+ declarator = TREE_OPERAND (declarator, 0);
+
+ if (strict_prototype == 0 && arg_types == NULL_TREE)
+ arg_types = void_list_node;
+ else if (arg_types == NULL_TREE
+ || arg_types != void_list_node)
+ {
+ error ("destructors cannot be specified with parameters");
+ arg_types = void_list_node;
+ }
+ }
+ }
+
+ /* ANSI seems to say that `const int foo ();'
+ does not make the function foo const. */
+ type = build_function_type (type,
+ flag_traditional ? 0 : arg_types);
+ }
+ break;
+
+ case ADDR_EXPR:
+ case INDIRECT_REF:
+ /* Filter out pointers-to-references and references-to-references.
+ We can get these if a TYPE_DECL is used. */
+
+ if (TREE_CODE (type) == REFERENCE_TYPE)
+ {
+ error ("cannot declare %s to references",
+ TREE_CODE (declarator) == ADDR_EXPR
+ ? "references" : "pointers");
+ declarator = TREE_OPERAND (declarator, 0);
+ continue;
+ }
+
+ /* Merge any constancy or volatility into the target type
+ for the pointer. */
+
+ if (constp || volatilep)
+ {
+ /* A const or volatile signature pointer/reference is
+ pointing to a const or volatile object, i.e., the
+ `optr' is const or volatile, respectively, not the
+ signature pointer/reference itself. */
+ if (! IS_SIGNATURE (type))
+ {
+ type = c_build_type_variant (type, constp, volatilep);
+ if (IS_AGGR_TYPE (type))
+ build_pointer_type (type);
+ constp = 0;
+ volatilep = 0;
+ }
+ }
+
+ if (IS_SIGNATURE (type))
+ {
+ if (TREE_CODE (declarator) == ADDR_EXPR)
+ {
+ if (CLASSTYPE_METHOD_VEC (type) == NULL_TREE
+ && TYPE_SIZE (type))
+ cp_warning ("empty signature `%T' used in signature reference declaration",
+ type);
+#if 0
+ type = build_signature_reference_type (type,
+ constp, volatilep);
+#else
+ sorry ("signature reference");
+ return NULL_TREE;
+#endif
+ }
+ else
+ {
+ if (CLASSTYPE_METHOD_VEC (type) == NULL_TREE
+ && TYPE_SIZE (type))
+ cp_warning ("empty signature `%T' used in signature pointer declaration",
+ type);
+ type = build_signature_pointer_type (type,
+ constp, volatilep);
+ }
+ constp = 0;
+ volatilep = 0;
+ }
+ else if (TREE_CODE (declarator) == ADDR_EXPR)
+ {
+ if (TREE_CODE (type) == FUNCTION_TYPE)
+ {
+ error ("cannot declare references to functions; use pointer to function instead");
+ type = build_pointer_type (type);
+ }
+ else
+ {
+ if (TYPE_MAIN_VARIANT (type) == void_type_node)
+ error ("invalid type: `void &'");
+ else
+ type = build_reference_type (type);
+ }
+ }
+ else if (TREE_CODE (type) == METHOD_TYPE)
+ {
+ type = build_ptrmemfunc_type (build_pointer_type (type));
+ }
+ else
+ type = build_pointer_type (type);
+
+ /* Process a list of type modifier keywords (such as
+ const or volatile) that were given inside the `*' or `&'. */
+
+ if (TREE_TYPE (declarator))
+ {
+ register tree typemodlist;
+ int erred = 0;
+ for (typemodlist = TREE_TYPE (declarator); typemodlist;
+ typemodlist = TREE_CHAIN (typemodlist))
+ {
+ if (TREE_VALUE (typemodlist) == ridpointers[(int) RID_CONST])
+ constp++;
+ else if (TREE_VALUE (typemodlist) == ridpointers[(int) RID_VOLATILE])
+ volatilep++;
+ else if (!erred)
+ {
+ erred = 1;
+ error ("invalid type modifier within %s declarator",
+ TREE_CODE (declarator) == ADDR_EXPR
+ ? "reference" : "pointer");
+ }
+ }
+ if (constp > 1)
+ pedwarn ("duplicate `const'");
+ if (volatilep > 1)
+ pedwarn ("duplicate `volatile'");
+ if (TREE_CODE (declarator) == ADDR_EXPR
+ && (constp || volatilep))
+ {
+ if (constp)
+ warning ("discarding `const' applied to a reference");
+ if (volatilep)
+ warning ("discarding `volatile' applied to a reference");
+ constp = volatilep = 0;
+ }
+ }
+ declarator = TREE_OPERAND (declarator, 0);
+ ctype = NULL_TREE;
+ break;
+
+ case SCOPE_REF:
+ {
+ /* We have converted type names to NULL_TREE if the
+ name was bogus, or to a _TYPE node, if not.
+
+ The variable CTYPE holds the type we will ultimately
+ resolve to. The code here just needs to build
+ up appropriate member types. */
+ tree sname = TREE_OPERAND (declarator, 1);
+ /* Destructors can have their visibilities changed as well. */
+ if (TREE_CODE (sname) == BIT_NOT_EXPR)
+ sname = TREE_OPERAND (sname, 0);
+
+ if (TREE_COMPLEXITY (declarator) == 0)
+ /* This needs to be here, in case we are called
+ multiple times. */ ;
+ else if (friendp && (TREE_COMPLEXITY (declarator) < 2))
+ /* don't fall out into global scope. Hides real bug? --eichin */ ;
+ else if (TREE_COMPLEXITY (declarator) == current_class_depth)
+ {
+ /* This pop_nested_class corresponds to the
+ push_nested_class used to push into class scope for
+ parsing the argument list of a function decl, in
+ qualified_id. */
+ pop_nested_class (1);
+ TREE_COMPLEXITY (declarator) = current_class_depth;
+ }
+ else
+ my_friendly_abort (16);
+
+ if (TREE_OPERAND (declarator, 0) == NULL_TREE)
+ {
+ /* We had a reference to a global decl, or
+ perhaps we were given a non-aggregate typedef,
+ in which case we cleared this out, and should just
+ keep going as though it wasn't there. */
+ declarator = sname;
+ continue;
+ }
+ ctype = TREE_OPERAND (declarator, 0);
+
+ if (sname == NULL_TREE)
+ goto done_scoping;
+
+ if (TREE_CODE (sname) == IDENTIFIER_NODE)
+ {
+ /* This is the `standard' use of the scoping operator:
+ basetype :: member . */
+
+ if (TREE_CODE (type) == FUNCTION_TYPE)
+ {
+ if (current_class_type == NULL_TREE
+ || TYPE_MAIN_VARIANT (ctype) == current_class_type
+ || friendp)
+ type = build_cplus_method_type (build_type_variant (ctype, constp, volatilep),
+ TREE_TYPE (type), TYPE_ARG_TYPES (type));
+ else
+ {
+ cp_error ("cannot declare member function `%T::%s' within `%T'",
+ ctype, name, current_class_type);
+ return void_type_node;
+ }
+ }
+ else if (TYPE_MAIN_VARIANT (ctype) == current_class_type)
+ {
+ if (extra_warnings)
+ cp_warning ("redundant qualification `%T' on member `%s' ignored",
+ ctype, name);
+ type = build_offset_type (ctype, type);
+ }
+ else if (TYPE_SIZE (ctype) != NULL_TREE
+ || (RIDBIT_SETP (RID_TYPEDEF, specbits)))
+ {
+ tree t;
+ /* have to move this code elsewhere in this function.
+ this code is used for i.e., typedef int A::M; M *pm; */
+
+ if (explicit_int == -1 && decl_context == FIELD
+ && funcdef_flag == 0)
+ {
+ /* The code in here should only be used to build
+ stuff that will be grokked as access decls. */
+ t = lookup_field (ctype, sname, 0, 0);
+ if (t)
+ {
+ t = build_lang_field_decl (FIELD_DECL, build_nt (SCOPE_REF, ctype, t), type);
+ DECL_INITIAL (t) = init;
+ return t;
+ }
+ /* No such field, try member functions. */
+ t = lookup_fnfields (TYPE_BINFO (ctype), sname, 0);
+ if (t)
+ {
+ if (flags == DTOR_FLAG)
+ t = TREE_VALUE (t);
+ else if (CLASSTYPE_METHOD_VEC (ctype)
+ && TREE_VALUE (t) == TREE_VEC_ELT (CLASSTYPE_METHOD_VEC (ctype), 0))
+ {
+ /* Don't include destructor with constructors. */
+ t = DECL_CHAIN (TREE_VALUE (t));
+ if (t == NULL_TREE)
+ error ("class `%s' does not have any constructors", IDENTIFIER_POINTER (sname));
+ t = build_tree_list (NULL_TREE, t);
+ }
+ t = build_lang_field_decl (FIELD_DECL, build_nt (SCOPE_REF, ctype, t), type);
+ DECL_INITIAL (t) = init;
+ return t;
+ }
+
+ cp_error
+ ("field `%D' is not a member of structure `%T'",
+ sname, ctype);
+ }
+
+ if (current_class_type)
+ {
+ if (TYPE_MAIN_VARIANT (ctype) != current_class_type)
+ {
+ cp_error ("cannot declare member `%T::%s' within `%T'",
+ ctype, name, current_class_type);
+ return void_type_node;
+ }
+ else if (extra_warnings)
+ cp_warning ("extra qualification `%T' on member `%s' ignored",
+ ctype, name);
+ }
+ type = build_offset_type (ctype, type);
+ }
+ else if (uses_template_parms (ctype))
+ {
+ enum tree_code c;
+ if (TREE_CODE (type) == FUNCTION_TYPE)
+ {
+ type = build_cplus_method_type (build_type_variant (ctype, constp, volatilep),
+ TREE_TYPE (type),
+ TYPE_ARG_TYPES (type));
+ c = FUNCTION_DECL;
+ }
+ }
+ else
+ {
+ cp_error ("structure `%T' not yet defined", ctype);
+ return error_mark_node;
+ }
+
+ declarator = sname;
+ }
+ else if (TREE_CODE (sname) == SCOPE_REF)
+ my_friendly_abort (17);
+ else
+ {
+ done_scoping:
+ declarator = TREE_OPERAND (declarator, 1);
+ if (declarator && TREE_CODE (declarator) == CALL_EXPR)
+ /* In this case, we will deal with it later. */
+ ;
+ else
+ {
+ if (TREE_CODE (type) == FUNCTION_TYPE)
+ type = build_cplus_method_type (build_type_variant (ctype, constp, volatilep), TREE_TYPE (type), TYPE_ARG_TYPES (type));
+ else
+ type = build_offset_type (ctype, type);
+ }
+ }
+ }
+ break;
+
+ case BIT_NOT_EXPR:
+ declarator = TREE_OPERAND (declarator, 0);
+ break;
+
+ case RECORD_TYPE:
+ case UNION_TYPE:
+ case ENUMERAL_TYPE:
+ declarator = NULL_TREE;
+ break;
+
+ case ERROR_MARK:
+ declarator = NULL_TREE;
+ break;
+
+ default:
+ my_friendly_abort (158);
+ }
+ }
+
+ /* Now TYPE has the actual type. */
+
+ /* If this is declaring a typedef name, return a TYPE_DECL. */
+
+ if (RIDBIT_SETP (RID_TYPEDEF, specbits))
+ {
+ tree decl;
+
+ /* Note that the grammar rejects storage classes
+ in typenames, fields or parameters. */
+ if (constp || volatilep)
+ type = c_build_type_variant (type, constp, volatilep);
+
+ /* If the user declares "struct {...} foo" then `foo' will have
+ an anonymous name. Fill that name in now. Nothing can
+ refer to it, so nothing needs know about the name change.
+ The TYPE_NAME field was filled in by build_struct_xref. */
+ if (type != error_mark_node
+ && TYPE_NAME (type)
+ && TREE_CODE (TYPE_NAME (type)) == TYPE_DECL
+ && ANON_AGGRNAME_P (TYPE_IDENTIFIER (type)))
+ {
+ /* replace the anonymous name with the real name everywhere. */
+ lookup_tag_reverse (type, declarator);
+ TYPE_IDENTIFIER (type) = declarator;
+
+ if (TYPE_LANG_SPECIFIC (type))
+ TYPE_WAS_ANONYMOUS (type) = 1;
+
+ {
+ tree d = TYPE_NAME (type), c = DECL_CONTEXT (d);
+
+ if (!c)
+ set_nested_typename (d, 0, declarator, type);
+ else if (TREE_CODE (c) == FUNCTION_DECL)
+ set_nested_typename (d, DECL_ASSEMBLER_NAME (c),
+ declarator, type);
+ else
+ set_nested_typename (d, TYPE_NESTED_NAME (c), declarator, type);
+ }
+ }
+
+#if 0 /* not yet, should get fixed properly later */
+ decl = make_type_decl (declarator, type);
+#else
+ decl = build_decl (TYPE_DECL, declarator, type);
+#endif
+ if (TREE_CODE (type) == OFFSET_TYPE || TREE_CODE (type) == METHOD_TYPE)
+ {
+ cp_error_at ("typedef name may not be class-qualified", decl);
+ return NULL_TREE;
+ }
+ else if (quals)
+ {
+ if (ctype == NULL_TREE)
+ {
+ if (TREE_CODE (type) != METHOD_TYPE)
+ cp_error_at ("invalid type qualifier for non-method type", decl);
+ else
+ ctype = TYPE_METHOD_BASETYPE (type);
+ }
+ if (ctype != NULL_TREE)
+ grok_method_quals (ctype, decl, quals);
+ }
+
+ if (RIDBIT_SETP (RID_SIGNED, specbits)
+ || (typedef_decl && C_TYPEDEF_EXPLICITLY_SIGNED (typedef_decl)))
+ C_TYPEDEF_EXPLICITLY_SIGNED (decl) = 1;
+
+ if (RIDBIT_SETP (RID_MUTABLE, specbits))
+ {
+ error ("non-object member `%s' cannot be declared mutable", name);
+ }
+
+ return decl;
+ }
+
+ /* Detect the case of an array type of unspecified size
+ which came, as such, direct from a typedef name.
+ We must copy the type, so that each identifier gets
+ a distinct type, so that each identifier's size can be
+ controlled separately by its own initializer. */
+
+ if (type == typedef_type && TREE_CODE (type) == ARRAY_TYPE
+ && TYPE_DOMAIN (type) == NULL_TREE)
+ {
+ type = build_cplus_array_type (TREE_TYPE (type), TYPE_DOMAIN (type));
+ }
+
+ /* If this is a type name (such as, in a cast or sizeof),
+ compute the type and return it now. */
+
+ if (decl_context == TYPENAME)
+ {
+ /* Note that the grammar rejects storage classes
+ in typenames, fields or parameters. */
+ if (constp || volatilep)
+ if (IS_SIGNATURE (type))
+ error ("`const' or `volatile' specified with signature type");
+ else
+ type = c_build_type_variant (type, constp, volatilep);
+
+ /* Special case: "friend class foo" looks like a TYPENAME context. */
+ if (friendp)
+ {
+ /* A friendly class? */
+ if (current_class_type)
+ make_friend_class (current_class_type, TYPE_MAIN_VARIANT (type));
+ else
+ error("trying to make class `%s' a friend of global scope",
+ TYPE_NAME_STRING (type));
+ type = void_type_node;
+ }
+ else if (quals)
+ {
+#if 0 /* not yet, should get fixed properly later */
+ tree dummy = make_type_decl (declarator, type);
+#else
+ tree dummy = build_decl (TYPE_DECL, declarator, type);
+#endif
+ if (ctype == NULL_TREE)
+ {
+ my_friendly_assert (TREE_CODE (type) == METHOD_TYPE, 159);
+ ctype = TYPE_METHOD_BASETYPE (type);
+ }
+ grok_method_quals (ctype, dummy, quals);
+ type = TREE_TYPE (dummy);
+ }
+
+ return type;
+ }
+ else if (declarator == NULL_TREE && decl_context != PARM
+ && TREE_CODE (type) != UNION_TYPE
+ && ! bitfield)
+ {
+ cp_error ("abstract declarator `%T' used as declaration", type);
+ declarator = make_anon_name ();
+ }
+
+ /* `void' at top level (not within pointer)
+ is allowed only in typedefs or type names.
+ We don't complain about parms either, but that is because
+ a better error message can be made later. */
+
+ if (TYPE_MAIN_VARIANT (type) == void_type_node && decl_context != PARM)
+ {
+ if (TREE_CODE (declarator) == IDENTIFIER_NODE)
+ {
+ if (IDENTIFIER_OPNAME_P (declarator))
+#if 0 /* How could this happen? */
+ error ("operator `%s' declared void",
+ operator_name_string (declarator));
+#else
+ my_friendly_abort (356);
+#endif
+ else
+ error ("variable or field `%s' declared void", name);
+ }
+ else
+ error ("variable or field declared void");
+ type = integer_type_node;
+ }
+
+ /* Now create the decl, which may be a VAR_DECL, a PARM_DECL
+ or a FUNCTION_DECL, depending on DECL_CONTEXT and TYPE. */
+
+ {
+ register tree decl;
+
+ if (decl_context == PARM)
+ {
+ if (ctype)
+ error ("cannot use `::' in parameter declaration");
+
+ /* A parameter declared as an array of T is really a pointer to T.
+ One declared as a function is really a pointer to a function.
+ One declared as a member is really a pointer to member. */
+
+ if (TREE_CODE (type) == ARRAY_TYPE)
+ {
+ /* Transfer const-ness of array into that of type pointed to. */
+ type = build_pointer_type
+ (c_build_type_variant (TREE_TYPE (type), constp, volatilep));
+ volatilep = constp = 0;
+ }
+ else if (TREE_CODE (type) == FUNCTION_TYPE)
+ type = build_pointer_type (type);
+ else if (TREE_CODE (type) == OFFSET_TYPE)
+ type = build_pointer_type (type);
+
+ decl = build_decl (PARM_DECL, declarator, type);
+
+ bad_specifiers (decl, "parameter", virtualp, quals != NULL_TREE,
+ inlinep, friendp, raises != NULL_TREE);
+ if (current_class_type
+ && IS_SIGNATURE (current_class_type))
+ {
+ if (inlinep)
+ error ("parameter of signature member function declared `inline'");
+ if (RIDBIT_SETP (RID_AUTO, specbits))
+ error ("parameter of signature member function declared `auto'");
+ if (RIDBIT_SETP (RID_REGISTER, specbits))
+ error ("parameter of signature member function declared `register'");
+ }
+
+ /* Compute the type actually passed in the parmlist,
+ for the case where there is no prototype.
+ (For example, shorts and chars are passed as ints.)
+ When there is a prototype, this is overridden later. */
+
+ DECL_ARG_TYPE (decl) = type_promotes_to (type);
+ }
+ else if (decl_context == FIELD)
+ {
+ if (type == error_mark_node)
+ {
+ /* Happens when declaring arrays of sizes which
+ are error_mark_node, for example. */
+ decl = NULL_TREE;
+ }
+ else if (TREE_CODE (type) == FUNCTION_TYPE)
+ {
+ int publicp = 0;
+
+ if (friendp == 0)
+ {
+ if (ctype == NULL_TREE)
+ ctype = current_class_type;
+
+ if (ctype == NULL_TREE)
+ {
+ cp_error ("can't make `%D' into a method -- not in a class",
+ declarator);
+ return void_type_node;
+ }
+
+ /* ``A union may [ ... ] not [ have ] virtual functions.''
+ ARM 9.5 */
+ if (virtualp && TREE_CODE (ctype) == UNION_TYPE)
+ {
+ cp_error ("function `%D' declared virtual inside a union",
+ declarator);
+ return void_type_node;
+ }
+
+ if (declarator == ansi_opname[(int) NEW_EXPR]
+ || declarator == ansi_opname[(int) VEC_NEW_EXPR]
+ || declarator == ansi_opname[(int) DELETE_EXPR]
+ || declarator == ansi_opname[(int) VEC_DELETE_EXPR])
+ {
+ if (virtualp)
+ {
+ cp_error ("`%D' cannot be declared virtual, since it is always static",
+ declarator);
+ virtualp = 0;
+ }
+ }
+ else if (staticp < 2)
+ type = build_cplus_method_type (build_type_variant (ctype, constp, volatilep),
+ TREE_TYPE (type), TYPE_ARG_TYPES (type));
+ }
+
+ /* Tell grokfndecl if it needs to set TREE_PUBLIC on the node. */
+ publicp = (RIDBIT_SETP (RID_EXTERN, specbits)
+ || (ctype != NULL_TREE
+ && funcdef_flag >= 0
+ && RIDBIT_NOTSETP (RID_INLINE, specbits))
+ || (friendp
+ && ! funcdef_flag
+ && RIDBIT_NOTSETP (RID_STATIC, specbits)
+ && RIDBIT_NOTSETP (RID_INLINE, specbits)));
+ decl = grokfndecl (ctype, type, declarator,
+ virtualp, flags, quals,
+ raises, friendp ? -1 : 0, publicp);
+ if (decl == NULL_TREE)
+ return NULL_TREE;
+
+ DECL_INLINE (decl) = inlinep;
+ }
+ else if (TREE_CODE (type) == METHOD_TYPE)
+ {
+ /* All method decls are public, so tell grokfndecl to set
+ TREE_PUBLIC, also. */
+ decl = grokfndecl (ctype, type, declarator,
+ virtualp, flags, quals,
+ raises, friendp ? -1 : 0, 1);
+ if (decl == NULL_TREE)
+ return NULL_TREE;
+
+ DECL_INLINE (decl) = inlinep;
+ }
+ else if (TREE_CODE (type) == RECORD_TYPE
+ && CLASSTYPE_DECLARED_EXCEPTION (type))
+ {
+ /* Handle a class-local exception declaration. */
+ decl = build_lang_field_decl (VAR_DECL, declarator, type);
+ if (ctype == NULL_TREE)
+ ctype = current_class_type;
+ return void_type_node;
+ }
+ else if (TYPE_SIZE (type) == NULL_TREE && !staticp
+ && (TREE_CODE (type) != ARRAY_TYPE || initialized == 0))
+ {
+ error ("field `%s' has incomplete type",
+ IDENTIFIER_POINTER (declarator));
+
+ /* If we're instantiating a template, tell them which
+ instantiation made the field's type be incomplete. */
+ if (current_class_type
+ && TYPE_NAME (current_class_type)
+ && IDENTIFIER_TEMPLATE (DECL_NAME (TYPE_NAME (current_class_type)))
+ && declspecs && TREE_VALUE (declspecs)
+ && TREE_TYPE (TREE_VALUE (declspecs)) == type)
+ error (" in instantiation of template `%s'",
+ TYPE_NAME_STRING (current_class_type));
+
+ type = error_mark_node;
+ decl = NULL_TREE;
+ }
+ else
+ {
+ if (friendp)
+ {
+ error ("`%s' is neither function nor method; cannot be declared friend",
+ IDENTIFIER_POINTER (declarator));
+ friendp = 0;
+ }
+ decl = NULL_TREE;
+ }
+
+ if (friendp)
+ {
+ /* Friends are treated specially. */
+ if (ctype == current_class_type)
+ warning ("member functions are implicitly friends of their class");
+ else
+ {
+ tree t = NULL_TREE;
+ if (decl && DECL_NAME (decl))
+ t = do_friend (ctype, declarator, decl,
+ last_function_parms, flags, quals);
+ if (t && funcdef_flag)
+ return t;
+
+ return void_type_node;
+ }
+ }
+
+ /* Structure field. It may not be a function, except for C++ */
+
+ if (decl == NULL_TREE)
+ {
+ if (initialized)
+ {
+ /* Motion 10 at San Diego: If a static const integral data
+ member is initialized with an integral constant
+ expression, the initializer may appear either in the
+ declaration (within the class), or in the definition,
+ but not both. If it appears in the class, the member is
+ a member constant. The file-scope definition is always
+ required. */
+ if (staticp)
+ {
+ if (pedantic)
+ {
+ if (! constp)
+ cp_pedwarn ("ANSI C++ forbids in-class initialization of non-const static member `%D'",
+ declarator);
+
+ else if (! INTEGRAL_TYPE_P (type))
+ cp_pedwarn ("ANSI C++ forbids member constant `%D' of non-integral type `%T'", declarator, type);
+ }
+ }
+
+ /* Note that initialization of const members is prohibited
+ by the draft ANSI standard, though it appears to be in
+ common practice. 12.6.2: The argument list is used to
+ initialize the named nonstatic member.... This (or an
+ initializer list) is the only way to initialize
+ nonstatic const and reference members. */
+ else if (flag_ansi || ! constp)
+ cp_pedwarn ("ANSI C++ forbids initialization of %s `%D'",
+ constp ? "const member" : "member", declarator);
+ }
+
+ if (staticp || (constp && initialized))
+ {
+ /* C++ allows static class members.
+ All other work for this is done by grokfield.
+ This VAR_DECL is built by build_lang_field_decl.
+ All other VAR_DECLs are built by build_decl. */
+ decl = build_lang_field_decl (VAR_DECL, declarator, type);
+ TREE_STATIC (decl) = 1;
+ /* In class context, 'static' means public access. */
+ TREE_PUBLIC (decl) = DECL_EXTERNAL (decl) = !!staticp;
+ }
+ else
+ {
+ decl = build_lang_field_decl (FIELD_DECL, declarator, type);
+ if (RIDBIT_SETP (RID_MUTABLE, specbits))
+ {
+ DECL_MUTABLE_P (decl) = 1;
+ RIDBIT_RESET (RID_MUTABLE, specbits);
+ }
+ }
+
+ bad_specifiers (decl, "field", virtualp, quals != NULL_TREE,
+ inlinep, friendp, raises != NULL_TREE);
+ }
+ }
+ else if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
+ {
+ tree original_name = declarator;
+ int publicp = 0;
+
+ if (! declarator)
+ return NULL_TREE;
+
+ if (RIDBIT_SETP (RID_AUTO, specbits))
+ error ("storage class `auto' invalid for function `%s'", name);
+ else if (RIDBIT_SETP (RID_REGISTER, specbits))
+ error ("storage class `register' invalid for function `%s'", name);
+
+ /* Function declaration not at top level.
+ Storage classes other than `extern' are not allowed
+ and `extern' makes no difference. */
+ if (current_binding_level != global_binding_level
+ && ! processing_template_decl
+ && (RIDBIT_SETP (RID_STATIC, specbits)
+ || RIDBIT_SETP (RID_INLINE, specbits))
+ && pedantic)
+ {
+ if (RIDBIT_SETP (RID_STATIC, specbits))
+ pedwarn ("storage class `static' invalid for function `%s' declared out of global scope", name);
+ else
+ pedwarn ("storage class `inline' invalid for function `%s' declared out of global scope", name);
+ }
+
+ if (ctype == NULL_TREE)
+ {
+ if (virtualp)
+ {
+ error ("virtual non-class function `%s'", name);
+ virtualp = 0;
+ }
+
+ if (current_lang_name == lang_name_cplusplus
+ && ! (IDENTIFIER_LENGTH (original_name) == 4
+ && IDENTIFIER_POINTER (original_name)[0] == 'm'
+ && strcmp (IDENTIFIER_POINTER (original_name), "main") == 0)
+ && ! (IDENTIFIER_LENGTH (original_name) > 10
+ && IDENTIFIER_POINTER (original_name)[0] == '_'
+ && IDENTIFIER_POINTER (original_name)[1] == '_'
+ && strncmp (IDENTIFIER_POINTER (original_name)+2, "builtin_", 8) == 0))
+ /* Plain overloading: will not be grok'd by grokclassfn. */
+ declarator = build_decl_overload (dname, TYPE_ARG_TYPES (type), 0);
+ }
+ else if (TREE_CODE (type) == FUNCTION_TYPE && staticp < 2)
+ type = build_cplus_method_type (build_type_variant (ctype, constp, volatilep),
+ TREE_TYPE (type), TYPE_ARG_TYPES (type));
+
+ /* Record presence of `static'. In C++, `inline' is like `static'.
+ Methods of classes should be public, unless we're dropping them
+ into some other file, so we don't clear TREE_PUBLIC for them. */
+ publicp
+ = ((ctype
+ && CLASSTYPE_INTERFACE_KNOWN (ctype))
+ || !(RIDBIT_SETP (RID_STATIC, specbits)
+ || RIDBIT_SETP (RID_INLINE, specbits)));
+
+ decl = grokfndecl (ctype, type, original_name,
+ virtualp, flags, quals,
+ raises,
+ processing_template_decl ? 0 : friendp ? 2 : 1,
+ publicp);
+ if (decl == NULL_TREE)
+ return NULL_TREE;
+
+ if (ctype == NULL_TREE && DECL_LANGUAGE (decl) != lang_c)
+ DECL_ASSEMBLER_NAME (decl) = declarator;
+
+ if (staticp == 1)
+ {
+ int illegal_static = 0;
+
+ /* Don't allow a static member function in a class, and forbid
+ declaring main to be static. */
+ if (TREE_CODE (type) == METHOD_TYPE)
+ {
+ cp_error_at ("cannot declare member function `%D' to have static linkage", decl);
+ illegal_static = 1;
+ }
+ else if (! ctype
+ && IDENTIFIER_LENGTH (original_name) == 4
+ && IDENTIFIER_POINTER (original_name)[0] == 'm'
+ && ! strcmp (IDENTIFIER_POINTER (original_name), "main"))
+ {
+ error ("cannot declare function `main' to have static linkage");
+ illegal_static = 1;
+ }
+ else if (current_function_decl)
+ {
+ /* FIXME need arm citation */
+ error ("cannot declare static function inside another function");
+ illegal_static = 1;
+ }
+
+ if (illegal_static)
+ {
+ staticp = 0;
+ RIDBIT_RESET (RID_STATIC, specbits);
+ }
+ }
+
+ /* Record presence of `inline', if it is reasonable. */
+ if (inlinep)
+ {
+ tree last = tree_last (TYPE_ARG_TYPES (type));
+
+ if (! ctype
+ && ! strcmp (IDENTIFIER_POINTER (original_name), "main"))
+ error ("cannot inline function `main'");
+ else if (last && last != void_list_node)
+ cp_warning ("cannot inline function `%D' which takes `...'", original_name);
+ else
+ /* Assume that otherwise the function can be inlined. */
+ DECL_INLINE (decl) = 1;
+
+ if (RIDBIT_SETP (RID_EXTERN, specbits))
+ {
+ current_extern_inline = 1;
+ if (flag_ansi)
+ pedwarn ("ANSI C++ does not permit `extern inline'");
+ }
+ }
+ }
+ else
+ {
+ /* It's a variable. */
+
+ /* An uninitialized decl with `extern' is a reference. */
+ decl = grokvardecl (type, declarator, specbits, initialized);
+ bad_specifiers (decl, "variable", virtualp, quals != NULL_TREE,
+ inlinep, friendp, raises != NULL_TREE);
+
+ if (ctype)
+ {
+ DECL_CONTEXT (decl) = ctype;
+ if (staticp == 1)
+ {
+ cp_error ("static member `%D' re-declared as static",
+ decl);
+ staticp = 0;
+ RIDBIT_RESET (RID_STATIC, specbits);
+ }
+ if (RIDBIT_SETP (RID_EXTERN, specbits))
+ {
+ cp_error ("cannot explicitly declare member `%#D' to have extern linkage",
+ decl);
+ RIDBIT_RESET (RID_EXTERN, specbits);
+ }
+ }
+ }
+
+ if (RIDBIT_SETP (RID_MUTABLE, specbits))
+ {
+ error ("`%s' cannot be declared mutable", name);
+ }
+
+ /* Record `register' declaration for warnings on &
+ and in case doing stupid register allocation. */
+
+ if (RIDBIT_SETP (RID_REGISTER, specbits))
+ DECL_REGISTER (decl) = 1;
+
+ if (RIDBIT_SETP (RID_EXTERN, specbits))
+ DECL_THIS_EXTERN (decl) = 1;
+
+ /* Record constancy and volatility. */
+
+ if (constp)
+ TREE_READONLY (decl) = TREE_CODE (type) != REFERENCE_TYPE;
+ if (volatilep)
+ {
+ TREE_SIDE_EFFECTS (decl) = 1;
+ TREE_THIS_VOLATILE (decl) = 1;
+ }
+
+ return decl;
+ }
+}
+
+/* Tell if a parmlist/exprlist looks like an exprlist or a parmlist.
+ An empty exprlist is a parmlist. An exprlist which
+ contains only identifiers at the global level
+ is a parmlist. Otherwise, it is an exprlist. */
+int
+parmlist_is_exprlist (exprs)
+ tree exprs;
+{
+ if (exprs == NULL_TREE || TREE_PARMLIST (exprs))
+ return 0;
+
+ if (current_binding_level == global_binding_level)
+ {
+ /* At the global level, if these are all identifiers,
+ then it is a parmlist. */
+ while (exprs)
+ {
+ if (TREE_CODE (TREE_VALUE (exprs)) != IDENTIFIER_NODE)
+ return 1;
+ exprs = TREE_CHAIN (exprs);
+ }
+ return 0;
+ }
+ return 1;
+}
+
+/* Subroutine of `grokparms'. In a fcn definition, arg types must
+ be complete.
+
+ C++: also subroutine of `start_function'. */
+static void
+require_complete_types_for_parms (parms)
+ tree parms;
+{
+ while (parms)
+ {
+ tree type = TREE_TYPE (parms);
+ if (TYPE_SIZE (type) == NULL_TREE)
+ {
+ if (DECL_NAME (parms))
+ error ("parameter `%s' has incomplete type",
+ IDENTIFIER_POINTER (DECL_NAME (parms)));
+ else
+ error ("parameter has incomplete type");
+ TREE_TYPE (parms) = error_mark_node;
+ }
+#if 0
+ /* If the arg types are incomplete in a declaration,
+ they must include undefined tags.
+ These tags can never be defined in the scope of the declaration,
+ so the types can never be completed,
+ and no call can be compiled successfully. */
+ /* This is not the right behavior for C++, but not having
+ it is also probably wrong. */
+ else
+ {
+ /* Now warn if is a pointer to an incomplete type. */
+ while (TREE_CODE (type) == POINTER_TYPE
+ || TREE_CODE (type) == REFERENCE_TYPE)
+ type = TREE_TYPE (type);
+ type = TYPE_MAIN_VARIANT (type);
+ if (TYPE_SIZE (type) == NULL_TREE)
+ {
+ if (DECL_NAME (parm) != NULL_TREE)
+ warning ("parameter `%s' points to incomplete type",
+ IDENTIFIER_POINTER (DECL_NAME (parm)));
+ else
+ warning ("parameter points to incomplete type");
+ }
+ }
+#endif
+ parms = TREE_CHAIN (parms);
+ }
+}
+
+/* Decode the list of parameter types for a function type.
+ Given the list of things declared inside the parens,
+ return a list of types.
+
+ The list we receive can have three kinds of elements:
+ an IDENTIFIER_NODE for names given without types,
+ a TREE_LIST node for arguments given as typespecs or names with typespecs,
+ or void_type_node, to mark the end of an argument list
+ when additional arguments are not permitted (... was not used).
+
+ FUNCDEF_FLAG is nonzero for a function definition, 0 for
+ a mere declaration. A nonempty identifier-list gets an error message
+ when FUNCDEF_FLAG is zero.
+ If FUNCDEF_FLAG is 1, then parameter types must be complete.
+ If FUNCDEF_FLAG is -1, then parameter types may be incomplete.
+
+ If all elements of the input list contain types,
+ we return a list of the types.
+ If all elements contain no type (except perhaps a void_type_node
+ at the end), we return a null list.
+ If some have types and some do not, it is an error, and we
+ return a null list.
+
+ Also set last_function_parms to either
+ a list of names (IDENTIFIER_NODEs) or a chain of PARM_DECLs.
+ A list of names is converted to a chain of PARM_DECLs
+ by store_parm_decls so that ultimately it is always a chain of decls.
+
+ Note that in C++, parameters can take default values. These default
+ values are in the TREE_PURPOSE field of the TREE_LIST. It is
+ an error to specify default values which are followed by parameters
+ that have no default values, or an ELLIPSES. For simplicities sake,
+ only parameters which are specified with their types can take on
+ default values. */
+
+static tree
+grokparms (first_parm, funcdef_flag)
+ tree first_parm;
+ int funcdef_flag;
+{
+ tree result = NULL_TREE;
+ tree decls = NULL_TREE;
+
+ if (first_parm != NULL_TREE
+ && TREE_CODE (TREE_VALUE (first_parm)) == IDENTIFIER_NODE)
+ {
+ if (! funcdef_flag)
+ pedwarn ("parameter names (without types) in function declaration");
+ last_function_parms = first_parm;
+ return NULL_TREE;
+ }
+ else if (first_parm != NULL_TREE
+ && TREE_CODE (TREE_VALUE (first_parm)) != TREE_LIST
+ && TREE_VALUE (first_parm) != void_type_node)
+ my_friendly_abort (145);
+ else
+ {
+ /* Types were specified. This is a list of declarators
+ each represented as a TREE_LIST node. */
+ register tree parm, chain;
+ int any_init = 0, any_error = 0, saw_void = 0;
+
+ if (first_parm != NULL_TREE)
+ {
+ tree last_result = NULL_TREE;
+ tree last_decl = NULL_TREE;
+
+ for (parm = first_parm; parm != NULL_TREE; parm = chain)
+ {
+ tree type, list_node = parm;
+ register tree decl = TREE_VALUE (parm);
+ tree init = TREE_PURPOSE (parm);
+
+ chain = TREE_CHAIN (parm);
+ /* @@ weak defense against parse errors. */
+ if (decl != void_type_node && TREE_CODE (decl) != TREE_LIST)
+ {
+ /* Give various messages as the need arises. */
+ if (TREE_CODE (decl) == STRING_CST)
+ error ("invalid string constant `%s'",
+ TREE_STRING_POINTER (decl));
+ else if (TREE_CODE (decl) == INTEGER_CST)
+ error ("invalid integer constant in parameter list, did you forget to give parameter name?");
+ continue;
+ }
+
+ if (decl != void_type_node)
+ {
+ /* @@ May need to fetch out a `raises' here. */
+ decl = grokdeclarator (TREE_VALUE (decl),
+ TREE_PURPOSE (decl),
+ PARM, init != NULL_TREE, NULL_TREE);
+ if (! decl)
+ continue;
+ type = TREE_TYPE (decl);
+ if (TYPE_MAIN_VARIANT (type) == void_type_node)
+ decl = void_type_node;
+ else if (TREE_CODE (type) == METHOD_TYPE)
+ {
+ if (DECL_NAME (decl))
+ /* Cannot use `error_with_decl' here because
+ we don't have DECL_CONTEXT set up yet. */
+ error ("parameter `%s' invalidly declared method type",
+ IDENTIFIER_POINTER (DECL_NAME (decl)));
+ else
+ error ("parameter invalidly declared method type");
+ type = build_pointer_type (type);
+ TREE_TYPE (decl) = type;
+ }
+ else if (TREE_CODE (type) == OFFSET_TYPE)
+ {
+ if (DECL_NAME (decl))
+ error ("parameter `%s' invalidly declared offset type",
+ IDENTIFIER_POINTER (DECL_NAME (decl)));
+ else
+ error ("parameter invalidly declared offset type");
+ type = build_pointer_type (type);
+ TREE_TYPE (decl) = type;
+ }
+ else if (TREE_CODE (type) == RECORD_TYPE
+ && TYPE_LANG_SPECIFIC (type)
+ && CLASSTYPE_ABSTRACT_VIRTUALS (type))
+ {
+ abstract_virtuals_error (decl, type);
+ any_error = 1; /* seems like a good idea */
+ }
+ else if (TREE_CODE (type) == RECORD_TYPE
+ && TYPE_LANG_SPECIFIC (type)
+ && IS_SIGNATURE (type))
+ {
+ signature_error (decl, type);
+ any_error = 1; /* seems like a good idea */
+ }
+ }
+
+ if (decl == void_type_node)
+ {
+ if (result == NULL_TREE)
+ {
+ result = void_list_node;
+ last_result = result;
+ }
+ else
+ {
+ TREE_CHAIN (last_result) = void_list_node;
+ last_result = void_list_node;
+ }
+ saw_void = 1;
+ if (chain
+ && (chain != void_list_node || TREE_CHAIN (chain)))
+ error ("`void' in parameter list must be entire list");
+ break;
+ }
+
+ /* Since there is a prototype, args are passed in their own types. */
+ DECL_ARG_TYPE (decl) = TREE_TYPE (decl);
+#ifdef PROMOTE_PROTOTYPES
+ if ((TREE_CODE (type) == INTEGER_TYPE
+ || TREE_CODE (type) == ENUMERAL_TYPE)
+ && TYPE_PRECISION (type) < TYPE_PRECISION (integer_type_node))
+ DECL_ARG_TYPE (decl) = integer_type_node;
+#endif
+ if (!any_error)
+ {
+ if (init)
+ {
+ any_init++;
+ if (TREE_CODE (init) == SAVE_EXPR)
+ PARM_DECL_EXPR (init) = 1;
+ else if (TREE_CODE (init) == VAR_DECL)
+ {
+ if (IDENTIFIER_LOCAL_VALUE (DECL_NAME (init)))
+ {
+ /* ``Local variables may not be used in default
+ argument expressions.'' dpANSI C++ 8.2.6 */
+ /* If extern int i; within a function is not
+ considered a local variable, then this code is
+ wrong. */
+ cp_error ("local variable `%D' may not be used as a default argument", init);
+ any_error = 1;
+ }
+ else if (TREE_READONLY_DECL_P (init))
+ init = decl_constant_value (init);
+ }
+ else
+ init = require_instantiated_type (type, init, integer_zero_node);
+ }
+ else if (any_init)
+ {
+ error ("all trailing parameters must have default arguments");
+ any_error = 1;
+ }
+ }
+ else
+ init = NULL_TREE;
+
+ if (decls == NULL_TREE)
+ {
+ decls = decl;
+ last_decl = decls;
+ }
+ else
+ {
+ TREE_CHAIN (last_decl) = decl;
+ last_decl = decl;
+ }
+ if (TREE_PERMANENT (list_node))
+ {
+ TREE_PURPOSE (list_node) = init;
+ TREE_VALUE (list_node) = type;
+ TREE_CHAIN (list_node) = NULL_TREE;
+ }
+ else
+ list_node = saveable_tree_cons (init, type, NULL_TREE);
+ if (result == NULL_TREE)
+ {
+ result = list_node;
+ last_result = result;
+ }
+ else
+ {
+ TREE_CHAIN (last_result) = list_node;
+ last_result = list_node;
+ }
+ }
+ if (last_result)
+ TREE_CHAIN (last_result) = NULL_TREE;
+ /* If there are no parameters, and the function does not end
+ with `...', then last_decl will be NULL_TREE. */
+ if (last_decl != NULL_TREE)
+ TREE_CHAIN (last_decl) = NULL_TREE;
+ }
+ }
+
+ last_function_parms = decls;
+
+ /* In a fcn definition, arg types must be complete. */
+ if (funcdef_flag > 0)
+ require_complete_types_for_parms (last_function_parms);
+
+ return result;
+}
+
+/* These memoizing functions keep track of special properties which
+ a class may have. `grok_ctor_properties' notices whether a class
+ has a constructor of the form X(X&), and also complains
+ if the class has a constructor of the form X(X).
+ `grok_op_properties' takes notice of the various forms of
+ operator= which are defined, as well as what sorts of type conversion
+ may apply. Both functions take a FUNCTION_DECL as an argument. */
+int
+grok_ctor_properties (ctype, decl)
+ tree ctype, decl;
+{
+ tree parmtypes = FUNCTION_ARG_CHAIN (decl);
+ tree parmtype = parmtypes ? TREE_VALUE (parmtypes) : void_type_node;
+
+ /* When a type has virtual baseclasses, a magical first int argument is
+ added to any ctor so we can tell if the class has been initialized
+ yet. This could screw things up in this function, so we deliberately
+ ignore the leading int if we're in that situation. */
+ if (parmtypes
+ && TREE_VALUE (parmtypes) == integer_type_node
+ && TYPE_USES_VIRTUAL_BASECLASSES (ctype))
+ {
+ parmtypes = TREE_CHAIN (parmtypes);
+ parmtype = TREE_VALUE (parmtypes);
+ }
+
+ if (TREE_CODE (parmtype) == REFERENCE_TYPE
+ && TYPE_MAIN_VARIANT (TREE_TYPE (parmtype)) == ctype)
+ {
+ if (TREE_CHAIN (parmtypes) == NULL_TREE
+ || TREE_CHAIN (parmtypes) == void_list_node
+ || TREE_PURPOSE (TREE_CHAIN (parmtypes)))
+ {
+ TYPE_HAS_INIT_REF (ctype) = 1;
+ if (TYPE_READONLY (TREE_TYPE (parmtype)))
+ TYPE_HAS_CONST_INIT_REF (ctype) = 1;
+ }
+ else
+ TYPE_GETS_INIT_AGGR (ctype) = 1;
+ }
+ else if (TYPE_MAIN_VARIANT (parmtype) == ctype)
+ {
+ if (TREE_CHAIN (parmtypes) != NULL_TREE
+ && TREE_CHAIN (parmtypes) == void_list_node)
+ {
+ cp_error ("invalid constructor; you probably meant `%T (%T&)'",
+ ctype, ctype);
+ SET_IDENTIFIER_ERROR_LOCUS (DECL_NAME (decl), ctype);
+
+ return 0;
+ }
+ else
+ TYPE_GETS_INIT_AGGR (ctype) = 1;
+ }
+ else if (TREE_CODE (parmtype) == VOID_TYPE
+ || TREE_PURPOSE (parmtypes) != NULL_TREE)
+ TYPE_HAS_DEFAULT_CONSTRUCTOR (ctype) = 1;
+
+ return 1;
+}
+
+/* An operator with this name can be either unary or binary. */
+static int
+ambi_op_p (name)
+ tree name;
+{
+ return (name == ansi_opname [(int) INDIRECT_REF]
+ || name == ansi_opname [(int) ADDR_EXPR]
+ || name == ansi_opname [(int) NEGATE_EXPR]
+ || name == ansi_opname[(int) POSTINCREMENT_EXPR]
+ || name == ansi_opname[(int) POSTDECREMENT_EXPR]
+ || name == ansi_opname [(int) CONVERT_EXPR]);
+}
+
+/* An operator with this name can only be unary. */
+static int
+unary_op_p (name)
+ tree name;
+{
+ return (name == ansi_opname [(int) TRUTH_NOT_EXPR]
+ || name == ansi_opname [(int) BIT_NOT_EXPR]
+ || name == ansi_opname [(int) COMPONENT_REF]
+ || OPERATOR_TYPENAME_P (name));
+}
+
+/* Do a little sanity-checking on how they declared their operator. */
+static void
+grok_op_properties (decl, virtualp, friendp)
+ tree decl;
+ int virtualp, friendp;
+{
+ tree argtypes = TYPE_ARG_TYPES (TREE_TYPE (decl));
+ int methodp = (TREE_CODE (TREE_TYPE (decl)) == METHOD_TYPE);
+ tree name = DECL_NAME (decl);
+
+ if (current_class_type == NULL_TREE)
+ friendp = 1;
+
+ if (! friendp)
+ {
+ if (name == ansi_opname[(int) MODIFY_EXPR])
+ TYPE_HAS_ASSIGNMENT (current_class_type) = 1;
+ else if (name == ansi_opname[(int) CALL_EXPR])
+ TYPE_OVERLOADS_CALL_EXPR (current_class_type) = 1;
+ else if (name == ansi_opname[(int) ARRAY_REF])
+ TYPE_OVERLOADS_ARRAY_REF (current_class_type) = 1;
+ else if (name == ansi_opname[(int) COMPONENT_REF]
+ || name == ansi_opname[(int) MEMBER_REF])
+ TYPE_OVERLOADS_ARROW (current_class_type) = 1;
+ else if (name == ansi_opname[(int) NEW_EXPR])
+ TYPE_GETS_NEW (current_class_type) |= 1;
+ else if (name == ansi_opname[(int) DELETE_EXPR])
+ TYPE_GETS_DELETE (current_class_type) |= 1;
+ else if (name == ansi_opname[(int) VEC_NEW_EXPR])
+ TYPE_GETS_NEW (current_class_type) |= 2;
+ else if (name == ansi_opname[(int) VEC_DELETE_EXPR])
+ TYPE_GETS_DELETE (current_class_type) |= 2;
+ }
+
+ if (name == ansi_opname[(int) NEW_EXPR]
+ || name == ansi_opname[(int) VEC_NEW_EXPR])
+ {
+ /* When the compiler encounters the definition of A::operator new, it
+ doesn't look at the class declaration to find out if it's static. */
+ if (methodp)
+ revert_static_member_fn (&decl, NULL, NULL);
+
+ /* Take care of function decl if we had syntax errors. */
+ if (argtypes == NULL_TREE)
+ TREE_TYPE (decl) =
+ build_function_type (ptr_type_node,
+ hash_tree_chain (integer_type_node,
+ void_list_node));
+ else
+ TREE_TYPE (decl) = coerce_new_type (TREE_TYPE (decl));
+ }
+ else if (name == ansi_opname[(int) DELETE_EXPR]
+ || name == ansi_opname[(int) VEC_DELETE_EXPR])
+ {
+ if (methodp)
+ revert_static_member_fn (&decl, NULL, NULL);
+
+ if (argtypes == NULL_TREE)
+ TREE_TYPE (decl) =
+ build_function_type (void_type_node,
+ hash_tree_chain (ptr_type_node,
+ void_list_node));
+ else
+ {
+ TREE_TYPE (decl) = coerce_delete_type (TREE_TYPE (decl));
+
+ if (! friendp && name == ansi_opname[(int) VEC_DELETE_EXPR]
+ && (TREE_CHAIN (TYPE_ARG_TYPES (TREE_TYPE (decl)))
+ != void_list_node))
+ TYPE_VEC_DELETE_TAKES_SIZE (current_class_type) = 1;
+ }
+ }
+ else
+ {
+ /* An operator function must either be a non-static member function
+ or have at least one parameter of a class, a reference to a class,
+ an enumeration, or a reference to an enumeration. 13.4.0.6 */
+ if (! methodp || DECL_STATIC_FUNCTION_P (decl))
+ {
+ if (OPERATOR_TYPENAME_P (name)
+ || name == ansi_opname[(int) CALL_EXPR]
+ || name == ansi_opname[(int) MODIFY_EXPR]
+ || name == ansi_opname[(int) COMPONENT_REF]
+ || name == ansi_opname[(int) ARRAY_REF])
+ cp_error ("`%D' must be a nonstatic member function", decl);
+ else
+ {
+ tree p = argtypes;
+
+ if (DECL_STATIC_FUNCTION_P (decl))
+ cp_error ("`%D' must be either a non-static member function or a non-member function", decl);
+
+ if (p)
+ for (; TREE_VALUE (p) != void_type_node ; p = TREE_CHAIN (p))
+ {
+ tree arg = TREE_VALUE (p);
+ if (TREE_CODE (arg) == REFERENCE_TYPE)
+ arg = TREE_TYPE (arg);
+
+ /* This lets bad template code slip through. */
+ if (IS_AGGR_TYPE (arg)
+ || TREE_CODE (arg) == ENUMERAL_TYPE
+ || TREE_CODE (arg) == TEMPLATE_TYPE_PARM)
+ goto foundaggr;
+ }
+ cp_error
+ ("`%D' must have an argument of class or enumerated type",
+ decl);
+ foundaggr:
+ ;
+ }
+ }
+
+ if (name == ansi_opname[(int) CALL_EXPR]
+ || name == ansi_opname[(int) METHOD_CALL_EXPR])
+ return; /* no restrictions on args */
+
+ if (IDENTIFIER_TYPENAME_P (name))
+ {
+ tree t = TREE_TYPE (name);
+ if (TREE_CODE (t) == VOID_TYPE)
+ pedwarn ("void is not a valid type conversion operator");
+ else if (! friendp)
+ {
+ int ref = (TREE_CODE (t) == REFERENCE_TYPE);
+ char *what = 0;
+ if (ref)
+ t = TYPE_MAIN_VARIANT (TREE_TYPE (t));
+
+ if (t == current_class_type)
+ what = "the same type";
+ else if (IS_AGGR_TYPE (t)
+ && DERIVED_FROM_P (t, current_class_type))
+ what = "a base class";
+
+ if (what)
+ warning ("conversion to %s%s will never use a type conversion operator",
+ ref ? "a reference to " : "", what);
+ }
+ }
+
+ if (name == ansi_opname[(int) MODIFY_EXPR])
+ {
+ tree parmtype;
+
+ if (list_length (argtypes) != 3 && methodp)
+ {
+ cp_error ("`%D' must take exactly one argument", decl);
+ return;
+ }
+ parmtype = TREE_VALUE (TREE_CHAIN (argtypes));
+
+ if (copy_assignment_arg_p (parmtype, virtualp)
+ && ! friendp)
+ {
+ TYPE_HAS_ASSIGN_REF (current_class_type) = 1;
+ if (TREE_CODE (parmtype) != REFERENCE_TYPE
+ || TYPE_READONLY (TREE_TYPE (parmtype)))
+ TYPE_HAS_CONST_ASSIGN_REF (current_class_type) = 1;
+#if 0 /* Too soon; done in grok_function_init */
+ if (DECL_ABSTRACT_VIRTUAL_P (decl))
+ TYPE_HAS_ABSTRACT_ASSIGN_REF (current_class_type) = 1;
+#endif
+ }
+ }
+ else if (name == ansi_opname[(int) COND_EXPR])
+ {
+ /* 13.4.0.3 */
+ pedwarn ("ANSI C++ prohibits overloading operator ?:");
+ if (list_length (argtypes) != 4)
+ cp_error ("`%D' must take exactly three arguments", decl);
+ }
+ else if (ambi_op_p (name))
+ {
+ if (list_length (argtypes) == 2)
+ /* prefix */;
+ else if (list_length (argtypes) == 3)
+ {
+ if ((name == ansi_opname[(int) POSTINCREMENT_EXPR]
+ || name == ansi_opname[(int) POSTDECREMENT_EXPR])
+ && TREE_VALUE (TREE_CHAIN (argtypes)) != integer_type_node)
+ {
+ if (methodp)
+ cp_error ("postfix `%D' must take `int' as its argument",
+ decl);
+ else
+ cp_error
+ ("postfix `%D' must take `int' as its second argument",
+ decl);
+ }
+ }
+ else
+ {
+ if (methodp)
+ cp_error ("`%D' must take either zero or one argument", decl);
+ else
+ cp_error ("`%D' must take either one or two arguments", decl);
+ }
+ }
+ else if (unary_op_p (name))
+ {
+ if (list_length (argtypes) != 2)
+ {
+ if (methodp)
+ cp_error ("`%D' must take `void'", decl);
+ else
+ cp_error ("`%D' must take exactly one argument", decl);
+ }
+ }
+ else /* if (binary_op_p (name)) */
+ {
+ if (list_length (argtypes) != 3)
+ {
+ if (methodp)
+ cp_error ("`%D' must take exactly one argument", decl);
+ else
+ cp_error ("`%D' must take exactly two arguments", decl);
+ }
+ }
+
+ /* 13.4.0.8 */
+ if (argtypes)
+ for (; argtypes != void_list_node ; argtypes = TREE_CHAIN (argtypes))
+ if (TREE_PURPOSE (argtypes))
+ {
+ TREE_PURPOSE (argtypes) = NULL_TREE;
+ if (name == ansi_opname[(int) POSTINCREMENT_EXPR]
+ || name == ansi_opname[(int) POSTDECREMENT_EXPR])
+ {
+ if (pedantic)
+ cp_pedwarn ("`%D' cannot have default arguments", decl);
+ }
+ else
+ cp_error ("`%D' cannot have default arguments", decl);
+ }
+ }
+}
+
+/* Get the struct, enum or union (CODE says which) with tag NAME.
+ Define the tag as a forward-reference if it is not defined.
+
+ C++: If a class derivation is given, process it here, and report
+ an error if multiple derivation declarations are not identical.
+
+ If this is a definition, come in through xref_tag and only look in
+ the current frame for the name (since C++ allows new names in any
+ scope.) */
+
+/* avoid rewriting all callers of xref_tag */
+static int xref_next_defn = 0;
+
+tree
+xref_defn_tag (code_type_node, name, binfo)
+ tree code_type_node;
+ tree name, binfo;
+{
+ tree rv, ncp;
+ xref_next_defn = 1;
+
+ if (class_binding_level)
+ {
+ tree n1;
+ char *buf;
+ /* we need to build a new IDENTIFIER_NODE for name which nukes
+ * the pieces... */
+/*
+ n1 = IDENTIFIER_LOCAL_VALUE (current_class_name);
+ if (n1)
+ n1 = DECL_NAME (n1);
+ else
+ n1 = current_class_name;
+*/
+ n1 = TYPE_NAME (current_class_type);
+ if (n1)
+ n1 = DECL_NESTED_TYPENAME(n1);
+ else
+ n1 = current_class_name;
+
+ buf = (char *) alloca (4 + IDENTIFIER_LENGTH (n1)
+ + IDENTIFIER_LENGTH (name));
+
+ sprintf (buf, "%s::%s", IDENTIFIER_POINTER (n1),
+ IDENTIFIER_POINTER (name));
+ ncp = get_identifier (buf);
+#ifdef SPEW_DEBUG
+ if (spew_debug)
+ printf("*** %s ***\n", IDENTIFIER_POINTER (ncp));
+#endif
+#if 0
+ IDENTIFIER_LOCAL_VALUE (name) =
+ build_decl (TYPE_DECL, ncp, NULL_TREE);
+#endif
+ rv = xref_tag (code_type_node, name, binfo, 0);
+ if (! ANON_AGGRNAME_P (name))
+ {
+ register tree type_decl = build_decl (TYPE_DECL, ncp, rv);
+#ifdef DWARF_DEBUGGING_INFO
+ /* Mark the TYPE_DECL node created just above as a gratuitous one
+ so that dwarfout.c will know not to generate a TAG_typedef DIE
+ for it. */
+ if (write_symbols == DWARF_DEBUG)
+ DECL_IGNORED_P (type_decl) = 1;
+#endif /* DWARF_DEBUGGING_INFO */
+ pushdecl_nonclass_level (type_decl);
+ }
+ }
+ else
+ {
+ rv = xref_tag (code_type_node, name, binfo, 0);
+ }
+ xref_next_defn = 0;
+ return rv;
+}
+
+tree
+xref_tag (code_type_node, name, binfo, globalize)
+ tree code_type_node;
+ tree name, binfo;
+ int globalize;
+{
+ enum tag_types tag_code;
+ enum tree_code code;
+ int temp = 0;
+ int i, len;
+ register tree ref, t;
+ struct binding_level *b = inner_binding_level;
+
+ tag_code = (enum tag_types) TREE_INT_CST_LOW (code_type_node);
+ switch (tag_code)
+ {
+ case record_type:
+ case class_type:
+ case exception_type:
+ case signature_type:
+ code = RECORD_TYPE;
+ len = list_length (binfo);
+ break;
+ case union_type:
+ code = UNION_TYPE;
+ if (binfo)
+ {
+ cp_error ("derived union `%T' invalid", name);
+ binfo = NULL_TREE;
+ }
+ len = 0;
+ break;
+ case enum_type:
+ code = ENUMERAL_TYPE;
+ break;
+ default:
+ my_friendly_abort (18);
+ }
+
+ /* If a cross reference is requested, look up the type
+ already defined for this tag and return it. */
+ t = IDENTIFIER_TYPE_VALUE (name);
+ if (t && TREE_CODE (t) != code)
+ t = NULL_TREE;
+
+ if (xref_next_defn)
+ {
+ /* If we know we are defining this tag, only look it up in this scope
+ * and don't try to find it as a type. */
+ xref_next_defn = 0;
+ if (t && TYPE_CONTEXT(t))
+ {
+ if (TREE_MANGLED (name))
+ ref = t;
+ else
+ ref = lookup_tag (code, name, b, 1);
+ }
+ else
+ ref = lookup_tag (code, name, b, 1);
+ }
+ else
+ {
+ if (t)
+ ref = t;
+ else
+ ref = lookup_tag (code, name, b, 0);
+
+ if (! ref)
+ {
+ /* Try finding it as a type declaration. If that wins, use it. */
+ ref = lookup_name (name, 1);
+ if (ref && TREE_CODE (ref) == TYPE_DECL
+ && TREE_CODE (TREE_TYPE (ref)) == code)
+ ref = TREE_TYPE (ref);
+ else
+ ref = NULL_TREE;
+ }
+ }
+
+ push_obstacks_nochange ();
+
+ if (! ref)
+ {
+ /* If no such tag is yet defined, create a forward-reference node
+ and record it as the "definition".
+ When a real declaration of this type is found,
+ the forward-reference will be altered into a real type. */
+
+ /* In C++, since these migrate into the global scope, we must
+ build them on the permanent obstack. */
+
+ temp = allocation_temporary_p ();
+ if (temp)
+ end_temporary_allocation ();
+
+ if (code == ENUMERAL_TYPE)
+ {
+ ref = make_node (ENUMERAL_TYPE);
+
+ /* Give the type a default layout like unsigned int
+ to avoid crashing if it does not get defined. */
+ TYPE_MODE (ref) = TYPE_MODE (unsigned_type_node);
+ TYPE_ALIGN (ref) = TYPE_ALIGN (unsigned_type_node);
+ TREE_UNSIGNED (ref) = 1;
+ TYPE_PRECISION (ref) = TYPE_PRECISION (unsigned_type_node);
+ TYPE_MIN_VALUE (ref) = TYPE_MIN_VALUE (unsigned_type_node);
+ TYPE_MAX_VALUE (ref) = TYPE_MAX_VALUE (unsigned_type_node);
+
+ /* Enable us to recognize when a type is created in class context.
+ To do nested classes correctly, this should probably be cleared
+ out when we leave this classes scope. Currently this in only
+ done in `start_enum'. */
+
+ pushtag (name, ref, globalize);
+ if (flag_cadillac)
+ cadillac_start_enum (ref);
+ }
+ else if (tag_code == exception_type)
+ {
+ ref = make_lang_type (code);
+ /* Enable us to recognize when an exception type is created in
+ class context. To do nested classes correctly, this should
+ probably be cleared out when we leave this class's scope. */
+ CLASSTYPE_DECLARED_EXCEPTION (ref) = 1;
+ pushtag (name, ref, globalize);
+ if (flag_cadillac)
+ cadillac_start_struct (ref);
+ }
+ else
+ {
+ extern tree pending_vtables;
+ struct binding_level *old_b = class_binding_level;
+ int needs_writing;
+
+ ref = make_lang_type (code);
+
+ /* A signature type will contain the fields of the signature
+ table. Therefore, it's not only an interface. */
+ if (tag_code == signature_type)
+ {
+ SET_SIGNATURE (ref);
+ CLASSTYPE_INTERFACE_ONLY (ref) = 0;
+ CLASSTYPE_INTERFACE_UNKNOWN (ref) = 0;
+ }
+
+ /* Record how to set the access of this class's
+ virtual functions. If write_virtuals == 2 or 3, then
+ inline virtuals are ``extern inline''. */
+ switch (write_virtuals)
+ {
+ case 0:
+ case 1:
+ needs_writing = 1;
+ break;
+ case 2:
+ needs_writing = !! value_member (name, pending_vtables);
+ break;
+ case 3:
+ needs_writing = ! CLASSTYPE_INTERFACE_ONLY (ref)
+ && CLASSTYPE_INTERFACE_KNOWN (ref);
+ break;
+ default:
+ needs_writing = 0;
+ }
+
+ /* Signatures don't have a vtable. As long as we don't have default
+ implementations, they behave as if `write_virtuals' were 3. */
+ if (tag_code == signature_type)
+ CLASSTYPE_VTABLE_NEEDS_WRITING (ref) = 0;
+ else
+ CLASSTYPE_VTABLE_NEEDS_WRITING (ref) = needs_writing;
+
+#ifdef NONNESTED_CLASSES
+ /* Class types don't nest the way enums do. */
+ class_binding_level = (struct binding_level *)0;
+#endif
+ pushtag (name, ref, globalize);
+ class_binding_level = old_b;
+
+ if (flag_cadillac)
+ cadillac_start_struct (ref);
+ }
+ }
+ else
+ {
+ if (IS_AGGR_TYPE_CODE (code))
+ {
+ if (IS_AGGR_TYPE (ref)
+ && ((tag_code == exception_type)
+ != (CLASSTYPE_DECLARED_EXCEPTION (ref) == 1)))
+ {
+ cp_error ("type `%T' is both exception and aggregate type", ref);
+ CLASSTYPE_DECLARED_EXCEPTION (ref) = (tag_code == exception_type);
+ }
+ }
+
+ /* If it no longer looks like a nested type, make sure it's
+ in global scope. */
+ if (b == global_binding_level && !class_binding_level
+ && IDENTIFIER_GLOBAL_VALUE (name) == NULL_TREE)
+ IDENTIFIER_GLOBAL_VALUE (name) = TYPE_NAME (ref);
+
+ if (binfo)
+ {
+ tree tt1 = binfo;
+ tree tt2 = TYPE_BINFO_BASETYPES (ref);
+
+ if (TYPE_BINFO_BASETYPES (ref))
+ for (i = 0; tt1; i++, tt1 = TREE_CHAIN (tt1))
+ if (TREE_VALUE (tt1) != TYPE_IDENTIFIER (BINFO_TYPE (TREE_VEC_ELT (tt2, i))))
+ {
+ cp_error ("redeclaration of derivation chain of type `%#T'",
+ ref);
+ break;
+ }
+
+ if (tt1 == NULL_TREE)
+ /* The user told us something we already knew. */
+ goto just_return;
+
+ /* In C++, since these migrate into the global scope, we must
+ build them on the permanent obstack. */
+ end_temporary_allocation ();
+ }
+ }
+
+ if (binfo)
+ {
+ /* In the declaration `A : X, Y, ... Z' we mark all the types
+ (A, X, Y, ..., Z) so we can check for duplicates. */
+ tree binfos;
+
+ SET_CLASSTYPE_MARKED (ref);
+ BINFO_BASETYPES (TYPE_BINFO (ref)) = binfos = make_tree_vec (len);
+
+ for (i = 0; binfo; binfo = TREE_CHAIN (binfo))
+ {
+ /* The base of a derived struct is public by default. */
+ int via_public
+ = (TREE_PURPOSE (binfo) == (tree)access_public
+ || TREE_PURPOSE (binfo) == (tree)access_public_virtual
+ || (tag_code != class_type
+ && (TREE_PURPOSE (binfo) == (tree)access_default
+ || TREE_PURPOSE (binfo) == (tree)access_default_virtual)));
+ int via_protected = TREE_PURPOSE (binfo) == (tree)access_protected;
+ int via_virtual
+ = (TREE_PURPOSE (binfo) == (tree)access_private_virtual
+ || TREE_PURPOSE (binfo) == (tree)access_public_virtual
+ || TREE_PURPOSE (binfo) == (tree)access_default_virtual);
+ tree basetype = TREE_TYPE (TREE_VALUE (binfo));
+ tree base_binfo;
+
+ GNU_xref_hier (IDENTIFIER_POINTER (name),
+ IDENTIFIER_POINTER (TREE_VALUE (binfo)),
+ via_public, via_virtual, 0);
+
+ if (basetype && TREE_CODE (basetype) == TYPE_DECL)
+ basetype = TREE_TYPE (basetype);
+ if (!basetype || TREE_CODE (basetype) != RECORD_TYPE)
+ {
+ error ("base type `%s' fails to be a struct or class type",
+ IDENTIFIER_POINTER (TREE_VALUE (binfo)));
+ continue;
+ }
+#if 1
+ /* This code replaces similar code in layout_basetypes. */
+ else if (TYPE_SIZE (basetype) == NULL_TREE)
+ {
+ cp_error ("base class `%T' has incomplete type", basetype);
+ continue;
+ }
+#endif
+ else
+ {
+ if (CLASSTYPE_MARKED (basetype))
+ {
+ if (basetype == ref)
+ cp_error ("recursive type `%T' undefined", basetype);
+ else
+ cp_error ("duplicate base type `%T' invalid", basetype);
+ continue;
+ }
+
+ /* Note that the BINFO records which describe individual
+ inheritances are *not* shared in the lattice! They
+ cannot be shared because a given baseclass may be
+ inherited with different `accessibility' by different
+ derived classes. (Each BINFO record describing an
+ individual inheritance contains flags which say what
+ the `accessibility' of that particular inheritance is.) */
+
+ base_binfo = make_binfo (integer_zero_node, basetype,
+ TYPE_BINFO_VTABLE (basetype),
+ TYPE_BINFO_VIRTUALS (basetype), NULL_TREE);
+
+ TREE_VEC_ELT (binfos, i) = base_binfo;
+ TREE_VIA_PUBLIC (base_binfo) = via_public;
+ TREE_VIA_PROTECTED (base_binfo) = via_protected;
+ TREE_VIA_VIRTUAL (base_binfo) = via_virtual;
+
+ SET_CLASSTYPE_MARKED (basetype);
+#if 0
+/* XYZZY TEST VIRTUAL BASECLASSES */
+if (CLASSTYPE_N_BASECLASSES (basetype) == NULL_TREE
+ && TYPE_HAS_DEFAULT_CONSTRUCTOR (basetype)
+ && via_virtual == 0)
+ {
+ warning ("making type `%s' a virtual baseclass",
+ TYPE_NAME_STRING (basetype));
+ via_virtual = 1;
+ }
+#endif
+ /* We are free to modify these bits because they are meaningless
+ at top level, and BASETYPE is a top-level type. */
+ if (via_virtual || TYPE_USES_VIRTUAL_BASECLASSES (basetype))
+ {
+ TYPE_USES_VIRTUAL_BASECLASSES (ref) = 1;
+ TYPE_USES_COMPLEX_INHERITANCE (ref) = 1;
+ }
+
+ TYPE_OVERLOADS_METHOD_CALL_EXPR (ref) |= TYPE_OVERLOADS_METHOD_CALL_EXPR (basetype);
+ TYPE_GETS_NEW (ref) |= TYPE_GETS_NEW (basetype);
+ TYPE_GETS_DELETE (ref) |= TYPE_GETS_DELETE (basetype);
+ CLASSTYPE_LOCAL_TYPEDECLS (ref) |= CLASSTYPE_LOCAL_TYPEDECLS (basetype);
+ i += 1;
+ }
+ }
+ if (i)
+ TREE_VEC_LENGTH (binfos) = i;
+ else
+ BINFO_BASETYPES (TYPE_BINFO (ref)) = NULL_TREE;
+
+ if (i > 1)
+ TYPE_USES_MULTIPLE_INHERITANCE (ref) = 1;
+ else if (i == 1)
+ TYPE_USES_MULTIPLE_INHERITANCE (ref)
+ = TYPE_USES_MULTIPLE_INHERITANCE (BINFO_TYPE (TREE_VEC_ELT (binfos, 0)));
+ if (TYPE_USES_MULTIPLE_INHERITANCE (ref))
+ TYPE_USES_COMPLEX_INHERITANCE (ref) = 1;
+
+ /* Unmark all the types. */
+ while (--i >= 0)
+ CLEAR_CLASSTYPE_MARKED (BINFO_TYPE (TREE_VEC_ELT (binfos, i)));
+ CLEAR_CLASSTYPE_MARKED (ref);
+ }
+
+ just_return:
+
+ /* Until the type is defined, tentatively accept whatever
+ structure tag the user hands us. */
+ if (TYPE_SIZE (ref) == NULL_TREE
+ && ref != current_class_type
+ /* Have to check this, in case we have contradictory tag info. */
+ && IS_AGGR_TYPE_CODE (TREE_CODE (ref)))
+ {
+ if (tag_code == class_type)
+ CLASSTYPE_DECLARED_CLASS (ref) = 1;
+ else if (tag_code == record_type || tag_code == signature_type)
+ CLASSTYPE_DECLARED_CLASS (ref) = 0;
+ }
+
+ pop_obstacks ();
+
+ return ref;
+}
+
+static tree current_local_enum = NULL_TREE;
+
+/* Begin compiling the definition of an enumeration type.
+ NAME is its name (or null if anonymous).
+ Returns the type object, as yet incomplete.
+ Also records info about it so that build_enumerator
+ may be used to declare the individual values as they are read. */
+
+tree
+start_enum (name)
+ tree name;
+{
+ register tree enumtype = NULL_TREE;
+ struct binding_level *b = inner_binding_level;
+
+ /* If this is the real definition for a previous forward reference,
+ fill in the contents in the same object that used to be the
+ forward reference. */
+
+ if (name != NULL_TREE)
+ enumtype = lookup_tag (ENUMERAL_TYPE, name, b, 1);
+
+ if (enumtype != NULL_TREE && TREE_CODE (enumtype) == ENUMERAL_TYPE)
+ cp_error ("multiple definition of enum `%T'", enumtype);
+ else
+ {
+ enumtype = make_node (ENUMERAL_TYPE);
+ pushtag (name, enumtype, 0);
+ }
+
+ if (current_class_type)
+ TREE_ADDRESSABLE (b->tags) = 1;
+ current_local_enum = NULL_TREE;
+
+ if (TYPE_VALUES (enumtype) != NULL_TREE)
+ /* Completely replace its old definition.
+ The old enumerators remain defined, however. */
+ TYPE_VALUES (enumtype) = NULL_TREE;
+
+ /* Initially, set up this enum as like `int'
+ so that we can create the enumerators' declarations and values.
+ Later on, the precision of the type may be changed and
+ it may be laid out again. */
+
+ TYPE_PRECISION (enumtype) = TYPE_PRECISION (integer_type_node);
+ TYPE_SIZE (enumtype) = NULL_TREE;
+ fixup_unsigned_type (enumtype);
+
+ /* We copy this value because enumerated type constants
+ are really of the type of the enumerator, not integer_type_node. */
+ enum_next_value = copy_node (integer_zero_node);
+ enum_overflow = 0;
+
+ GNU_xref_decl (current_function_decl, enumtype);
+ return enumtype;
+}
+
+/* After processing and defining all the values of an enumeration type,
+ install their decls in the enumeration type and finish it off.
+ ENUMTYPE is the type object and VALUES a list of name-value pairs.
+ Returns ENUMTYPE. */
+
+tree
+finish_enum (enumtype, values)
+ register tree enumtype, values;
+{
+ register tree pair, tem;
+ register HOST_WIDE_INT maxvalue = 0;
+ register HOST_WIDE_INT minvalue = 0;
+ register HOST_WIDE_INT i;
+
+ /* Calculate the maximum value of any enumerator in this type. */
+
+ if (values)
+ {
+ /* Speed up the main loop by performing some precalculations */
+
+ HOST_WIDE_INT value;
+ TREE_TYPE (TREE_VALUE (values)) = enumtype;
+ TREE_TYPE (DECL_INITIAL (TREE_VALUE (values))) = enumtype;
+ TREE_VALUE (values) = DECL_INITIAL (TREE_VALUE (values));
+ value = TREE_INT_CST_LOW (TREE_VALUE (values));
+ minvalue = maxvalue = value;
+
+ for (pair = TREE_CHAIN (values); pair; pair = TREE_CHAIN (pair))
+ {
+ TREE_TYPE (TREE_VALUE (pair)) = enumtype;
+ TREE_TYPE (DECL_INITIAL (TREE_VALUE (pair))) = enumtype;
+ TREE_VALUE (pair) = DECL_INITIAL (TREE_VALUE (pair));
+ value = TREE_INT_CST_LOW (TREE_VALUE (pair));
+ if (value > maxvalue)
+ maxvalue = value;
+ else if (value < minvalue)
+ minvalue = value;
+ }
+ }
+
+ TYPE_VALUES (enumtype) = values;
+
+ if (flag_short_enums)
+ {
+ /* Determine the precision this type needs, lay it out, and define
+ it. */
+
+ /* First reset precision */
+ TYPE_PRECISION (enumtype) = 0;
+
+ for (i = maxvalue; i; i >>= 1)
+ TYPE_PRECISION (enumtype)++;
+
+ if (!TYPE_PRECISION (enumtype))
+ TYPE_PRECISION (enumtype) = 1;
+
+ /* Cancel the laying out previously done for the enum type,
+ so that fixup_unsigned_type will do it over. */
+ TYPE_SIZE (enumtype) = NULL_TREE;
+
+ fixup_unsigned_type (enumtype);
+ }
+
+ TREE_INT_CST_LOW (TYPE_MAX_VALUE (enumtype)) = maxvalue;
+
+ /* An enum can have some negative values; then it is signed. */
+ if (minvalue < 0)
+ {
+ TREE_INT_CST_LOW (TYPE_MIN_VALUE (enumtype)) = minvalue;
+ TREE_INT_CST_HIGH (TYPE_MIN_VALUE (enumtype)) = -1;
+ TREE_UNSIGNED (enumtype) = 0;
+ }
+ if (flag_cadillac)
+ cadillac_finish_enum (enumtype);
+
+ /* Fix up all variant types of this enum type. */
+ for (tem = TYPE_MAIN_VARIANT (enumtype); tem; tem = TYPE_NEXT_VARIANT (tem))
+ {
+ TYPE_VALUES (tem) = TYPE_VALUES (enumtype);
+ TYPE_MIN_VALUE (tem) = TYPE_MIN_VALUE (enumtype);
+ TYPE_MAX_VALUE (tem) = TYPE_MAX_VALUE (enumtype);
+ TYPE_SIZE (tem) = TYPE_SIZE (enumtype);
+ TYPE_MODE (tem) = TYPE_MODE (enumtype);
+ TYPE_PRECISION (tem) = TYPE_PRECISION (enumtype);
+ TYPE_ALIGN (tem) = TYPE_ALIGN (enumtype);
+ TREE_UNSIGNED (tem) = TREE_UNSIGNED (enumtype);
+ }
+
+ /* Finish debugging output for this type. */
+#if 0
+ /* @@ Do we ever generate generate ENUMERAL_TYPE nodes for which debugging
+ information should *not* be generated? I think not. */
+ if (! DECL_IGNORED_P (TYPE_NAME (enumtype)))
+#endif
+ rest_of_type_compilation (enumtype, global_bindings_p ());
+
+ return enumtype;
+}
+
+/* Build and install a CONST_DECL for one value of the
+ current enumeration type (one that was begun with start_enum).
+ Return a tree-list containing the name and its value.
+ Assignment of sequential values by default is handled here. */
+
+tree
+build_enumerator (name, value)
+ tree name, value;
+{
+ tree decl, result;
+ /* Change this to zero if we find VALUE is not shareable. */
+ int shareable = 1;
+
+ /* Remove no-op casts from the value. */
+ if (value)
+ STRIP_TYPE_NOPS (value);
+
+ /* Validate and default VALUE. */
+ if (value != NULL_TREE)
+ {
+ if (TREE_READONLY_DECL_P (value))
+ {
+ value = decl_constant_value (value);
+ shareable = 0;
+ }
+
+ if (TREE_CODE (value) == INTEGER_CST)
+ {
+ value = default_conversion (value);
+ constant_expression_warning (value);
+ }
+ else
+ {
+ cp_error ("enumerator value for `%D' not integer constant", name);
+ value = NULL_TREE;
+ }
+ }
+
+ /* The order of things is reversed here so that we
+ can check for possible sharing of enum values,
+ to keep that from happening. */
+ /* Default based on previous value. */
+ if (value == NULL_TREE)
+ {
+ value = enum_next_value;
+ if (enum_overflow)
+ cp_error ("overflow in enumeration values at `%D'", name);
+ }
+
+ /* Remove no-op casts from the value. */
+ if (value)
+ STRIP_TYPE_NOPS (value);
+
+ /* Make up for hacks in lex.c. */
+ if (value == integer_zero_node)
+ value = build_int_2 (0, 0);
+ else if (value == integer_one_node)
+ value = build_int_2 (1, 0);
+ else if (TREE_CODE (value) == INTEGER_CST
+ && (shareable == 0
+ || TREE_CODE (TREE_TYPE (value)) == ENUMERAL_TYPE))
+ {
+ value = copy_node (value);
+ TREE_TYPE (value) = integer_type_node;
+ }
+
+ /* C++ associates enums with global, function, or class declarations. */
+
+ decl = current_scope ();
+ if (decl && decl == current_class_type)
+ {
+ /* This enum declaration is local to the class, so we must put
+ it in that class's list of decls. */
+ decl = build_lang_field_decl (CONST_DECL, name, integer_type_node);
+ DECL_INITIAL (decl) = value;
+ TREE_READONLY (decl) = 1;
+ pushdecl_class_level (decl);
+ TREE_CHAIN (decl) = current_local_enum;
+ current_local_enum = decl;
+ }
+ else
+ {
+ /* It's a global enum, or it's local to a function. (Note local to
+ a function could mean local to a class method. */
+ decl = build_decl (CONST_DECL, name, integer_type_node);
+ DECL_INITIAL (decl) = value;
+
+ pushdecl (decl);
+ GNU_xref_decl (current_function_decl, decl);
+ }
+
+ /* Set basis for default for next value. */
+ enum_next_value = build_binary_op_nodefault (PLUS_EXPR, value,
+ integer_one_node, PLUS_EXPR);
+ enum_overflow = tree_int_cst_lt (enum_next_value, value);
+
+ if (enum_next_value == integer_one_node)
+ enum_next_value = copy_node (enum_next_value);
+
+ result = saveable_tree_cons (name, decl, NULL_TREE);
+ return result;
+}
+
+tree
+grok_enum_decls (type, decl)
+ tree type, decl;
+{
+ tree d = current_local_enum;
+
+ if (d == NULL_TREE)
+ return decl;
+
+ while (1)
+ {
+ TREE_TYPE (d) = type;
+ if (TREE_CHAIN (d) == NULL_TREE)
+ {
+ TREE_CHAIN (d) = decl;
+ break;
+ }
+ d = TREE_CHAIN (d);
+ }
+
+ decl = current_local_enum;
+ current_local_enum = NULL_TREE;
+
+ return decl;
+}
+
+/* Create the FUNCTION_DECL for a function definition.
+ DECLSPECS and DECLARATOR are the parts of the declaration;
+ they describe the function's name and the type it returns,
+ but twisted together in a fashion that parallels the syntax of C.
+
+ This function creates a binding context for the function body
+ as well as setting up the FUNCTION_DECL in current_function_decl.
+
+ Returns 1 on success. If the DECLARATOR is not suitable for a function
+ (it defines a datum instead), we return 0, which tells
+ yyparse to report a parse error.
+
+ For C++, we must first check whether that datum makes any sense.
+ For example, "class A local_a(1,2);" means that variable local_a
+ is an aggregate of type A, which should have a constructor
+ applied to it with the argument list [1, 2].
+
+ @@ There is currently no way to retrieve the storage
+ @@ allocated to FUNCTION (or all of its parms) if we return
+ @@ something we had previously. */
+
+int
+start_function (declspecs, declarator, raises, pre_parsed_p)
+ tree declarator, declspecs, raises;
+ int pre_parsed_p;
+{
+ tree decl1, olddecl;
+ tree ctype = NULL_TREE;
+ tree fntype;
+ tree restype;
+ extern int have_extern_spec;
+ extern int used_extern_spec;
+ int doing_friend = 0;
+
+ /* Sanity check. */
+ my_friendly_assert (TREE_VALUE (void_list_node) == void_type_node, 160);
+ my_friendly_assert (TREE_CHAIN (void_list_node) == NULL_TREE, 161);
+
+ /* Assume, until we see it does. */
+ current_function_returns_value = 0;
+ current_function_returns_null = 0;
+ warn_about_return_type = 0;
+ current_extern_inline = 0;
+ current_function_assigns_this = 0;
+ current_function_just_assigned_this = 0;
+ current_function_parms_stored = 0;
+ original_result_rtx = NULL_RTX;
+ current_function_obstack_index = 0;
+ current_function_obstack_usage = 0;
+
+ clear_temp_name ();
+
+ /* This should only be done once on the top most decl. */
+ if (have_extern_spec && !used_extern_spec)
+ {
+ declspecs = decl_tree_cons (NULL_TREE, get_identifier ("extern"), declspecs);
+ used_extern_spec = 1;
+ }
+
+ if (pre_parsed_p)
+ {
+ decl1 = declarator;
+
+ if (! DECL_ARGUMENTS (decl1)
+ && !DECL_STATIC_FUNCTION_P (decl1)
+ && DECL_CONTEXT (decl1)
+ && DECL_NAME (TYPE_NAME (DECL_CONTEXT (decl1)))
+ && IDENTIFIER_TEMPLATE (DECL_NAME (TYPE_NAME (DECL_CONTEXT (decl1)))))
+ {
+ cp_error ("redeclaration of `%#D'", decl1);
+ if (IDENTIFIER_CLASS_VALUE (DECL_NAME (decl1)))
+ cp_error_at ("previous declaration here", IDENTIFIER_CLASS_VALUE (DECL_NAME (decl1)));
+ else if (IDENTIFIER_GLOBAL_VALUE (DECL_NAME (decl1)))
+ cp_error_at ("previous declaration here", IDENTIFIER_GLOBAL_VALUE (DECL_NAME (decl1)));
+ }
+
+ last_function_parms = DECL_ARGUMENTS (decl1);
+ last_function_parm_tags = NULL_TREE;
+ fntype = TREE_TYPE (decl1);
+ if (TREE_CODE (fntype) == METHOD_TYPE)
+ ctype = TYPE_METHOD_BASETYPE (fntype);
+
+ /* ANSI C++ June 5 1992 WP 11.4.5. A friend function defined in a
+ class is in the (lexical) scope of the class in which it is
+ defined. */
+ if (!ctype && DECL_FRIEND_P (decl1))
+ {
+ ctype = DECL_CLASS_CONTEXT (decl1);
+
+ /* CTYPE could be null here if we're dealing with a template;
+ for example, `inline friend float foo()' inside a template
+ will have no CTYPE set. */
+ if (ctype && TREE_CODE (ctype) != RECORD_TYPE)
+ ctype = NULL_TREE;
+ else
+ doing_friend = 1;
+ }
+
+ if ( !(DECL_VINDEX (decl1)
+ && write_virtuals >= 2
+ && CLASSTYPE_VTABLE_NEEDS_WRITING (ctype)))
+ current_extern_inline = DECL_THIS_EXTERN (decl1) && DECL_INLINE (decl1);
+
+ raises = TYPE_RAISES_EXCEPTIONS (fntype);
+
+ /* In a fcn definition, arg types must be complete. */
+ require_complete_types_for_parms (last_function_parms);
+ }
+ else
+ {
+ decl1 = grokdeclarator (declarator, declspecs, FUNCDEF, 1, raises);
+ /* If the declarator is not suitable for a function definition,
+ cause a syntax error. */
+ if (decl1 == NULL_TREE || TREE_CODE (decl1) != FUNCTION_DECL) return 0;
+
+ fntype = TREE_TYPE (decl1);
+
+ restype = TREE_TYPE (fntype);
+ if (IS_AGGR_TYPE (restype) && ! TYPE_PTRMEMFUNC_P (restype)
+ && ! CLASSTYPE_GOT_SEMICOLON (restype))
+ {
+ cp_error ("semicolon missing after declaration of `%#T'", restype);
+ shadow_tag (build_tree_list (NULL_TREE, restype));
+ CLASSTYPE_GOT_SEMICOLON (restype) = 1;
+ if (TREE_CODE (fntype) == FUNCTION_TYPE)
+ fntype = build_function_type (integer_type_node,
+ TYPE_ARG_TYPES (fntype));
+ else
+ fntype = build_cplus_method_type (build_type_variant (TYPE_METHOD_BASETYPE (fntype), TREE_READONLY (decl1), TREE_SIDE_EFFECTS (decl1)),
+ integer_type_node,
+ TYPE_ARG_TYPES (fntype));
+ TREE_TYPE (decl1) = fntype;
+ }
+
+ if (TREE_CODE (fntype) == METHOD_TYPE)
+ ctype = TYPE_METHOD_BASETYPE (fntype);
+ else if (IDENTIFIER_LENGTH (DECL_NAME (decl1)) == 4
+ && ! strcmp (IDENTIFIER_POINTER (DECL_NAME (decl1)), "main")
+ && DECL_CONTEXT (decl1) == NULL_TREE)
+ {
+ /* If this doesn't return integer_type, complain. */
+ if (TREE_TYPE (TREE_TYPE (decl1)) != integer_type_node)
+ {
+ if (pedantic || warn_return_type)
+ warning ("return type for `main' changed to integer type");
+ TREE_TYPE (decl1) = fntype = default_function_type;
+ }
+ warn_about_return_type = 0;
+ }
+ }
+
+ /* Warn if function was previously implicitly declared
+ (but not if we warned then). */
+ if (! warn_implicit
+ && IDENTIFIER_IMPLICIT_DECL (DECL_NAME (decl1)) != NULL_TREE)
+ cp_warning_at ("`%D' implicitly declared before its definition", IDENTIFIER_IMPLICIT_DECL (DECL_NAME (decl1)));
+
+ current_function_decl = decl1;
+
+ if (flag_cadillac)
+ cadillac_start_function (decl1);
+ else
+ announce_function (decl1);
+
+ if (TYPE_SIZE (TREE_TYPE (fntype)) == NULL_TREE)
+ {
+ if (IS_AGGR_TYPE (TREE_TYPE (fntype)))
+ error_with_aggr_type (TREE_TYPE (fntype),
+ "return-type `%s' is an incomplete type");
+ else
+ error ("return-type is an incomplete type");
+
+ /* Make it return void instead, but don't change the
+ type of the DECL_RESULT, in case we have a named return value. */
+ if (ctype)
+ TREE_TYPE (decl1)
+ = build_cplus_method_type (build_type_variant (ctype,
+ TREE_READONLY (decl1),
+ TREE_SIDE_EFFECTS (decl1)),
+ void_type_node,
+ FUNCTION_ARG_CHAIN (decl1));
+ else
+ TREE_TYPE (decl1)
+ = build_function_type (void_type_node,
+ TYPE_ARG_TYPES (TREE_TYPE (decl1)));
+ DECL_RESULT (decl1) = build_decl (RESULT_DECL, 0, TREE_TYPE (fntype));
+ }
+
+ if (warn_about_return_type)
+ warning ("return-type defaults to `int'");
+
+ /* Make the init_value nonzero so pushdecl knows this is not tentative.
+ error_mark_node is replaced below (in poplevel) with the BLOCK. */
+ DECL_INITIAL (decl1) = error_mark_node;
+
+ /* Didn't get anything from C. */
+ olddecl = NULL_TREE;
+
+ /* This function exists in static storage.
+ (This does not mean `static' in the C sense!) */
+ TREE_STATIC (decl1) = 1;
+
+ /* Record the decl so that the function name is defined.
+ If we already have a decl for this name, and it is a FUNCTION_DECL,
+ use the old decl. */
+
+ if (pre_parsed_p == 0)
+ {
+ current_function_decl = decl1 = pushdecl (decl1);
+ DECL_MAIN_VARIANT (decl1) = decl1;
+ fntype = TREE_TYPE (decl1);
+ }
+ else
+ current_function_decl = decl1;
+
+ /* If this function belongs to an interface, it is public.
+ If it belongs to someone else's interface, it is also external.
+ It doesn't matter whether it's inline or not. */
+ if (interface_unknown == 0)
+ {
+ TREE_PUBLIC (decl1) = 1;
+ DECL_EXTERNAL (decl1)
+ = (interface_only
+ || (DECL_INLINE (decl1) && ! flag_implement_inlines));
+ }
+ else if (DECL_EXPLICIT_INSTANTIATION (decl1))
+ /* PUBLIC and EXTERNAL set by do_*_instantiation */;
+ else
+ {
+ /* This is a definition, not a reference.
+ So normally clear DECL_EXTERNAL.
+ However, `extern inline' acts like a declaration except for
+ defining how to inline. So set DECL_EXTERNAL in that case. */
+ DECL_EXTERNAL (decl1) = current_extern_inline;
+
+ DECL_DEFER_OUTPUT (decl1) = DECL_INLINE (decl1);
+ }
+
+ if (ctype != NULL_TREE && DECL_STATIC_FUNCTION_P (decl1))
+ {
+ if (TREE_CODE (fntype) == METHOD_TYPE)
+ TREE_TYPE (decl1) = fntype
+ = build_function_type (TREE_TYPE (fntype),
+ TREE_CHAIN (TYPE_ARG_TYPES (fntype)));
+ last_function_parms = TREE_CHAIN (last_function_parms);
+ DECL_ARGUMENTS (decl1) = last_function_parms;
+ ctype = NULL_TREE;
+ }
+ restype = TREE_TYPE (fntype);
+
+ pushlevel (0);
+ current_binding_level->parm_flag = 1;
+
+ /* Save the parm names or decls from this function's declarator
+ where store_parm_decls will find them. */
+ current_function_parms = last_function_parms;
+ current_function_parm_tags = last_function_parm_tags;
+
+ GNU_xref_function (decl1, current_function_parms);
+
+ make_function_rtl (decl1);
+
+ if (ctype)
+ {
+ push_nested_class (ctype, 1);
+
+ /* If we're compiling a friend function, neither of the variables
+ current_class_decl nor current_class_type will have values. */
+ if (! doing_friend)
+ {
+ /* We know that this was set up by `grokclassfn'.
+ We do not wait until `store_parm_decls', since evil
+ parse errors may never get us to that point. Here
+ we keep the consistency between `current_class_type'
+ and `current_class_decl'. */
+ current_class_decl = last_function_parms;
+ my_friendly_assert (current_class_decl != NULL_TREE
+ && TREE_CODE (current_class_decl) == PARM_DECL, 162);
+ if (TREE_CODE (TREE_TYPE (current_class_decl)) == POINTER_TYPE)
+ {
+ tree variant = TREE_TYPE (TREE_TYPE (current_class_decl));
+ if (CLASSTYPE_INST_VAR (ctype) == NULL_TREE)
+ {
+ /* Can't call build_indirect_ref here, because it has special
+ logic to return C_C_D given this argument. */
+ C_C_D = build1 (INDIRECT_REF, current_class_type, current_class_decl);
+ CLASSTYPE_INST_VAR (ctype) = C_C_D;
+ }
+ else
+ {
+ C_C_D = CLASSTYPE_INST_VAR (ctype);
+ /* `current_class_decl' is different for every
+ function we compile. */
+ TREE_OPERAND (C_C_D, 0) = current_class_decl;
+ }
+ TREE_READONLY (C_C_D) = TYPE_READONLY (variant);
+ TREE_SIDE_EFFECTS (C_C_D) = TYPE_VOLATILE (variant);
+ TREE_THIS_VOLATILE (C_C_D) = TYPE_VOLATILE (variant);
+ }
+ else
+ C_C_D = current_class_decl;
+ }
+ }
+ else
+ {
+ if (DECL_STATIC_FUNCTION_P (decl1))
+ push_nested_class (DECL_CONTEXT (decl1), 2);
+ else
+ push_memoized_context (0, 1);
+ }
+
+ /* Allocate further tree nodes temporarily during compilation
+ of this function only. Tiemann moved up here from bottom of fn. */
+ temporary_allocation ();
+
+ /* Promote the value to int before returning it. */
+ if (C_PROMOTING_INTEGER_TYPE_P (restype))
+ {
+ /* It retains unsignedness if traditional or if it isn't
+ really getting wider. */
+ if (TREE_UNSIGNED (restype)
+ && (flag_traditional
+ || TYPE_PRECISION (restype)
+ == TYPE_PRECISION (integer_type_node)))
+ restype = unsigned_type_node;
+ else
+ restype = integer_type_node;
+ }
+ if (DECL_RESULT (decl1) == NULL_TREE)
+ DECL_RESULT (decl1) = build_decl (RESULT_DECL, 0, restype);
+
+ if (DESTRUCTOR_NAME_P (DECL_ASSEMBLER_NAME (decl1)))
+ {
+ dtor_label = build_decl (LABEL_DECL, NULL_TREE, NULL_TREE);
+ ctor_label = NULL_TREE;
+ }
+ else
+ {
+ dtor_label = NULL_TREE;
+ if (DECL_CONSTRUCTOR_P (decl1))
+ ctor_label = build_decl (LABEL_DECL, NULL_TREE, NULL_TREE);
+ }
+
+ /* If this fcn was already referenced via a block-scope `extern' decl
+ (or an implicit decl), propagate certain information about the usage. */
+ if (TREE_ADDRESSABLE (DECL_ASSEMBLER_NAME (decl1)))
+ TREE_ADDRESSABLE (decl1) = 1;
+
+ return 1;
+}
+
+/* Store the parameter declarations into the current function declaration.
+ This is called after parsing the parameter declarations, before
+ digesting the body of the function.
+
+ Also install to binding contour return value identifier, if any. */
+
+void
+store_parm_decls ()
+{
+ register tree fndecl = current_function_decl;
+ register tree parm;
+ int parms_have_cleanups = 0;
+
+ /* This is either a chain of PARM_DECLs (when a prototype is used). */
+ tree specparms = current_function_parms;
+
+ /* This is a list of types declared among parms in a prototype. */
+ tree parmtags = current_function_parm_tags;
+
+ /* This is a chain of any other decls that came in among the parm
+ declarations. If a parm is declared with enum {foo, bar} x;
+ then CONST_DECLs for foo and bar are put here. */
+ tree nonparms = NULL_TREE;
+
+ if (current_binding_level == global_binding_level)
+ fatal ("parse errors have confused me too much");
+
+ /* Initialize RTL machinery. */
+ init_function_start (fndecl, input_filename, lineno);
+
+ /* Declare __FUNCTION__ and __PRETTY_FUNCTION__ for this function. */
+ declare_function_name ();
+
+ /* Create a binding level for the parms. */
+ expand_start_bindings (0);
+
+ if (specparms != NULL_TREE)
+ {
+ /* This case is when the function was defined with an ANSI prototype.
+ The parms already have decls, so we need not do anything here
+ except record them as in effect
+ and complain if any redundant old-style parm decls were written. */
+
+ register tree next;
+
+ /* Must clear this because it might contain TYPE_DECLs declared
+ at class level. */
+ storedecls (NULL_TREE);
+ for (parm = nreverse (specparms); parm; parm = next)
+ {
+ next = TREE_CHAIN (parm);
+ if (TREE_CODE (parm) == PARM_DECL)
+ {
+ tree cleanup = maybe_build_cleanup (parm);
+ if (DECL_NAME (parm) == NULL_TREE)
+ {
+#if 0
+ cp_error_at ("parameter name omitted", parm);
+#else
+ /* for C++, this is not an error. */
+ pushdecl (parm);
+#endif
+ }
+ else if (TYPE_MAIN_VARIANT (TREE_TYPE (parm)) == void_type_node)
+ cp_error ("parameter `%D' declared void", parm);
+ else
+ {
+ /* Now fill in DECL_REFERENCE_SLOT for any of the parm decls.
+ A parameter is assumed not to have any side effects.
+ If this should change for any reason, then this
+ will have to wrap the bashed reference type in a save_expr.
+
+ Also, if the parameter type is declared to be an X
+ and there is an X(X&) constructor, we cannot lay it
+ into the stack (any more), so we make this parameter
+ look like it is really of reference type. Functions
+ which pass parameters to this function will know to
+ create a temporary in their frame, and pass a reference
+ to that. */
+
+ if (TREE_CODE (TREE_TYPE (parm)) == REFERENCE_TYPE
+ && TYPE_SIZE (TREE_TYPE (TREE_TYPE (parm))))
+ SET_DECL_REFERENCE_SLOT (parm, convert_from_reference (parm));
+
+ pushdecl (parm);
+ }
+ if (cleanup)
+ {
+ expand_decl (parm);
+ if (! expand_decl_cleanup (parm, cleanup))
+ cp_error ("parser lost in parsing declaration of `%D'",
+ parm);
+ parms_have_cleanups = 1;
+ }
+ }
+ else
+ {
+ /* If we find an enum constant or a type tag,
+ put it aside for the moment. */
+ TREE_CHAIN (parm) = NULL_TREE;
+ nonparms = chainon (nonparms, parm);
+ }
+ }
+
+ /* Get the decls in their original chain order
+ and record in the function. This is all and only the
+ PARM_DECLs that were pushed into scope by the loop above. */
+ DECL_ARGUMENTS (fndecl) = getdecls ();
+
+ storetags (chainon (parmtags, gettags ()));
+ }
+ else
+ DECL_ARGUMENTS (fndecl) = NULL_TREE;
+
+ /* Now store the final chain of decls for the arguments
+ as the decl-chain of the current lexical scope.
+ Put the enumerators in as well, at the front so that
+ DECL_ARGUMENTS is not modified. */
+
+ storedecls (chainon (nonparms, DECL_ARGUMENTS (fndecl)));
+
+ /* Initialize the RTL code for the function. */
+ DECL_SAVED_INSNS (fndecl) = NULL_RTX;
+ expand_function_start (fndecl, parms_have_cleanups);
+
+ /* Create a binding contour which can be used to catch
+ cleanup-generated temporaries. Also, if the return value needs or
+ has initialization, deal with that now. */
+ if (parms_have_cleanups)
+ {
+ pushlevel (0);
+ expand_start_bindings (0);
+ }
+
+ current_function_parms_stored = 1;
+
+ if (flag_gc)
+ {
+ maybe_gc_cleanup = build_tree_list (NULL_TREE, error_mark_node);
+ if (! expand_decl_cleanup (NULL_TREE, maybe_gc_cleanup))
+ cp_error ("parser lost in parsing declaration of `%D'", fndecl);
+ }
+
+ /* If this function is `main', emit a call to `__main'
+ to run global initializers, etc. */
+ if (DECL_NAME (fndecl)
+ && IDENTIFIER_LENGTH (DECL_NAME (fndecl)) == 4
+ && strcmp (IDENTIFIER_POINTER (DECL_NAME (fndecl)), "main") == 0
+ && DECL_CONTEXT (fndecl) == NULL_TREE)
+ {
+ expand_main_function ();
+
+ if (flag_gc)
+ expand_expr (build_function_call (lookup_name (get_identifier ("__gc_main"), 0), NULL_TREE),
+ 0, VOIDmode, 0);
+
+ if (flag_dossier)
+ output_builtin_tdesc_entries ();
+ }
+}
+
+/* Bind a name and initialization to the return value of
+ the current function. */
+void
+store_return_init (return_id, init)
+ tree return_id, init;
+{
+ tree decl = DECL_RESULT (current_function_decl);
+
+ if (flag_ansi)
+ /* Give this error as many times as there are occurrences,
+ so that users can use Emacs compilation buffers to find
+ and fix all such places. */
+ pedwarn ("ANSI C++ does not permit named return values");
+
+ if (return_id != NULL_TREE)
+ {
+ if (DECL_NAME (decl) == NULL_TREE)
+ {
+ DECL_NAME (decl) = return_id;
+ DECL_ASSEMBLER_NAME (decl) = return_id;
+ }
+ else
+ error ("return identifier `%s' already in place",
+ IDENTIFIER_POINTER (DECL_NAME (decl)));
+ }
+
+ /* Can't let this happen for constructors. */
+ if (DECL_CONSTRUCTOR_P (current_function_decl))
+ {
+ error ("can't redefine default return value for constructors");
+ return;
+ }
+
+ /* If we have a named return value, put that in our scope as well. */
+ if (DECL_NAME (decl) != NULL_TREE)
+ {
+ /* If this named return value comes in a register,
+ put it in a pseudo-register. */
+ if (DECL_REGISTER (decl))
+ {
+ original_result_rtx = DECL_RTL (decl);
+ DECL_RTL (decl) = gen_reg_rtx (DECL_MODE (decl));
+ }
+
+ /* Let `finish_decl' know that this initializer is ok. */
+ DECL_INITIAL (decl) = init;
+ pushdecl (decl);
+ finish_decl (decl, init, 0, 0);
+ }
+}
+
+#if 0
+/* Generate code for default X() constructor. */
+static void
+build_default_constructor (fndecl)
+ tree fndecl;
+{
+ int i = CLASSTYPE_N_BASECLASSES (current_class_type);
+ tree parm = TREE_CHAIN (DECL_ARGUMENTS (fndecl));
+ tree fields = TYPE_FIELDS (current_class_type);
+ tree binfos = TYPE_BINFO_BASETYPES (current_class_type);
+
+ if (TYPE_USES_VIRTUAL_BASECLASSES (current_class_type))
+ parm = TREE_CHAIN (parm);
+ parm = DECL_REFERENCE_SLOT (parm);
+
+ while (--i >= 0)
+ {
+ tree basetype = TREE_VEC_ELT (binfos, i);
+ if (TYPE_HAS_INIT_REF (basetype))
+ {
+ tree name = TYPE_NAME (basetype);
+ if (TREE_CODE (name) == TYPE_DECL)
+ name = DECL_NAME (name);
+ current_base_init_list = tree_cons (name, parm, current_base_init_list);
+ }
+ }
+ for (; fields; fields = TREE_CHAIN (fields))
+ {
+ tree name, init;
+ if (TREE_STATIC (fields))
+ continue;
+ if (TREE_CODE (fields) != FIELD_DECL)
+ continue;
+ if (DECL_NAME (fields))
+ {
+ if (VFIELD_NAME_P (DECL_NAME (fields)))
+ continue;
+ if (VBASE_NAME_P (DECL_NAME (fields)))
+ continue;
+
+ /* True for duplicate members. */
+ if (IDENTIFIER_CLASS_VALUE (DECL_NAME (fields)) != fields)
+ continue;
+ }
+
+ init = build (COMPONENT_REF, TREE_TYPE (fields), parm, fields);
+ init = build_tree_list (NULL_TREE, init);
+
+ current_member_init_list
+ = tree_cons (DECL_NAME (fields), init, current_member_init_list);
+ }
+}
+#endif
+
+/* Finish up a function declaration and compile that function
+ all the way to assembler language output. The free the storage
+ for the function definition.
+
+ This is called after parsing the body of the function definition.
+ LINENO is the current line number.
+
+ C++: CALL_POPLEVEL is non-zero if an extra call to poplevel
+ (and expand_end_bindings) must be made to take care of the binding
+ contour for the base initializers. This is only relevant for
+ constructors. */
+
+void
+finish_function (lineno, call_poplevel)
+ int lineno;
+ int call_poplevel;
+{
+ register tree fndecl = current_function_decl;
+ tree fntype, ctype = NULL_TREE;
+ rtx head, last_parm_insn, mark;
+ /* Label to use if this function is supposed to return a value. */
+ tree no_return_label = NULL_TREE;
+ tree decls = NULL_TREE;
+
+ /* When we get some parse errors, we can end up without a
+ current_function_decl, so cope. */
+ if (fndecl == NULL_TREE)
+ return;
+
+ fntype = TREE_TYPE (fndecl);
+
+/* TREE_READONLY (fndecl) = 1;
+ This caused &foo to be of type ptr-to-const-function
+ which then got a warning when stored in a ptr-to-function variable. */
+
+ /* This happens on strange parse errors. */
+ if (! current_function_parms_stored)
+ {
+ call_poplevel = 0;
+ store_parm_decls ();
+ }
+
+ if (write_symbols != NO_DEBUG && TREE_CODE (fntype) != METHOD_TYPE)
+ {
+ tree ttype = target_type (fntype);
+ tree parmdecl;
+
+ if (IS_AGGR_TYPE (ttype))
+ /* Let debugger know it should output info for this type. */
+ note_debug_info_needed (ttype);
+
+ for (parmdecl = DECL_ARGUMENTS (fndecl); parmdecl; parmdecl = TREE_CHAIN (parmdecl))
+ {
+ ttype = target_type (TREE_TYPE (parmdecl));
+ if (IS_AGGR_TYPE (ttype))
+ /* Let debugger know it should output info for this type. */
+ note_debug_info_needed (ttype);
+ }
+ }
+
+ /* Clean house because we will need to reorder insns here. */
+ do_pending_stack_adjust ();
+
+ if (dtor_label)
+ {
+ tree binfo = TYPE_BINFO (current_class_type);
+ tree cond = integer_one_node;
+ tree exprstmt, vfields;
+ tree in_charge_node = lookup_name (in_charge_identifier, 0);
+ tree virtual_size;
+ int ok_to_optimize_dtor = 0;
+
+ if (current_function_assigns_this)
+ cond = build (NE_EXPR, integer_type_node,
+ current_class_decl, integer_zero_node);
+ else
+ {
+ int n_baseclasses = CLASSTYPE_N_BASECLASSES (current_class_type);
+
+ /* If this destructor is empty, then we don't need to check
+ whether `this' is NULL in some cases. */
+ mark = get_last_insn ();
+ last_parm_insn = get_first_nonparm_insn ();
+
+ if ((flag_this_is_variable & 1) == 0)
+ ok_to_optimize_dtor = 1;
+ else if (mark == last_parm_insn)
+ ok_to_optimize_dtor
+ = (n_baseclasses == 0
+ || (n_baseclasses == 1
+ && TYPE_HAS_DESTRUCTOR (TYPE_BINFO_BASETYPE (current_class_type, 0))));
+ }
+
+ /* These initializations might go inline. Protect
+ the binding level of the parms. */
+ pushlevel (0);
+ expand_start_bindings (0);
+
+ if (current_function_assigns_this)
+ {
+ current_function_assigns_this = 0;
+ current_function_just_assigned_this = 0;
+ }
+
+ /* Generate the code to call destructor on base class.
+ If this destructor belongs to a class with virtual
+ functions, then set the virtual function table
+ pointer to represent the type of our base class. */
+
+ /* This side-effect makes call to `build_delete' generate the
+ code we have to have at the end of this destructor. */
+ TYPE_HAS_DESTRUCTOR (current_class_type) = 0;
+
+ /* These are two cases where we cannot delegate deletion. */
+ if (TYPE_USES_VIRTUAL_BASECLASSES (current_class_type)
+ || TYPE_GETS_REG_DELETE (current_class_type))
+ exprstmt = build_delete (current_class_type, C_C_D, integer_zero_node,
+ LOOKUP_NONVIRTUAL|LOOKUP_DESTRUCTOR, 0);
+ else
+ exprstmt = build_delete (current_class_type, C_C_D, in_charge_node,
+ LOOKUP_NONVIRTUAL|LOOKUP_DESTRUCTOR, 0);
+
+ /* If we did not assign to this, then `this' is non-zero at
+ the end of a destructor. As a special optimization, don't
+ emit test if this is an empty destructor. If it does nothing,
+ it does nothing. If it calls a base destructor, the base
+ destructor will perform the test. */
+
+ if (exprstmt != error_mark_node
+ && (TREE_CODE (exprstmt) != NOP_EXPR
+ || TREE_OPERAND (exprstmt, 0) != integer_zero_node
+ || TYPE_USES_VIRTUAL_BASECLASSES (current_class_type)))
+ {
+ expand_label (dtor_label);
+ if (cond != integer_one_node)
+ expand_start_cond (cond, 0);
+ if (exprstmt != void_zero_node)
+ /* Don't call `expand_expr_stmt' if we're not going to do
+ anything, since -Wall will give a diagnostic. */
+ expand_expr_stmt (exprstmt);
+
+ /* Run destructor on all virtual baseclasses. */
+ if (TYPE_USES_VIRTUAL_BASECLASSES (current_class_type))
+ {
+ tree vbases = nreverse (copy_list (CLASSTYPE_VBASECLASSES (current_class_type)));
+ expand_start_cond (build (BIT_AND_EXPR, integer_type_node,
+ in_charge_node, integer_two_node), 0);
+ while (vbases)
+ {
+ if (TYPE_NEEDS_DESTRUCTOR (BINFO_TYPE (vbases)))
+ {
+ tree ptr = convert_pointer_to_vbase (vbases, current_class_decl);
+ expand_expr_stmt (build_delete (TYPE_POINTER_TO (BINFO_TYPE (vbases)),
+ ptr, integer_zero_node,
+ LOOKUP_NONVIRTUAL|LOOKUP_DESTRUCTOR|LOOKUP_HAS_IN_CHARGE, 0));
+ }
+ vbases = TREE_CHAIN (vbases);
+ }
+ expand_end_cond ();
+ }
+
+ do_pending_stack_adjust ();
+ if (cond != integer_one_node)
+ expand_end_cond ();
+ }
+
+ TYPE_HAS_DESTRUCTOR (current_class_type) = 1;
+
+ virtual_size = c_sizeof (current_class_type);
+
+ /* At the end, call delete if that's what's requested. */
+ if (TYPE_GETS_REG_DELETE (current_class_type))
+ /* This NOP_EXPR means we are in a static call context. */
+ exprstmt =
+ build_method_call
+ (build_indirect_ref
+ (build1 (NOP_EXPR, TYPE_POINTER_TO (current_class_type),
+ error_mark_node),
+ NULL_PTR),
+ ansi_opname[(int) DELETE_EXPR],
+ tree_cons (NULL_TREE, current_class_decl,
+ build_tree_list (NULL_TREE, virtual_size)),
+ NULL_TREE, LOOKUP_NORMAL);
+ else if (TYPE_USES_VIRTUAL_BASECLASSES (current_class_type))
+ exprstmt = build_x_delete (ptr_type_node, current_class_decl, 0,
+ virtual_size);
+ else
+ exprstmt = NULL_TREE;
+
+ if (exprstmt)
+ {
+ cond = build (BIT_AND_EXPR, integer_type_node,
+ in_charge_node, integer_one_node);
+ expand_start_cond (cond, 0);
+ expand_expr_stmt (exprstmt);
+ expand_end_cond ();
+ }
+
+ /* End of destructor. */
+ expand_end_bindings (NULL_TREE, getdecls() != NULL_TREE, 0);
+ poplevel (2, 0, 0); /* XXX change to 1 */
+
+ /* Back to the top of destructor. */
+ /* Dont execute destructor code if `this' is NULL. */
+ mark = get_last_insn ();
+ last_parm_insn = get_first_nonparm_insn ();
+ if (last_parm_insn == NULL_RTX)
+ last_parm_insn = mark;
+ else
+ last_parm_insn = previous_insn (last_parm_insn);
+
+ /* Make all virtual function table pointers in non-virtual base
+ classes point to CURRENT_CLASS_TYPE's virtual function
+ tables. */
+ expand_direct_vtbls_init (binfo, binfo, 1, 0, current_class_decl);
+ if (TYPE_USES_VIRTUAL_BASECLASSES (current_class_type))
+ expand_indirect_vtbls_init (binfo, C_C_D, current_class_decl, 0);
+ if (! ok_to_optimize_dtor)
+ {
+ cond = build_binary_op (NE_EXPR,
+ current_class_decl, integer_zero_node, 1);
+ expand_start_cond (cond, 0);
+ }
+ if (mark != get_last_insn ())
+ reorder_insns (next_insn (mark), get_last_insn (), last_parm_insn);
+ if (! ok_to_optimize_dtor)
+ expand_end_cond ();
+ }
+ else if (current_function_assigns_this)
+ {
+ /* Does not need to call emit_base_init, because
+ that is done (if needed) just after assignment to this
+ is seen. */
+
+ if (DECL_CONSTRUCTOR_P (current_function_decl))
+ {
+ expand_label (ctor_label);
+ ctor_label = NULL_TREE;
+
+ if (call_poplevel)
+ {
+ decls = getdecls ();
+ expand_end_bindings (decls, decls != NULL_TREE, 0);
+ poplevel (decls != NULL_TREE, 0, 0);
+ }
+ c_expand_return (current_class_decl);
+ }
+ else if (TYPE_MAIN_VARIANT (TREE_TYPE (
+ DECL_RESULT (current_function_decl))) != void_type_node
+ && return_label != NULL_RTX)
+ no_return_label = build_decl (LABEL_DECL, NULL_TREE, NULL_TREE);
+
+ current_function_assigns_this = 0;
+ current_function_just_assigned_this = 0;
+ base_init_insns = NULL_RTX;
+ }
+ else if (DECL_CONSTRUCTOR_P (fndecl))
+ {
+ tree allocated_this;
+ tree cond, thenclause;
+ /* Allow constructor for a type to get a new instance of the object
+ using `build_new'. */
+ tree abstract_virtuals = CLASSTYPE_ABSTRACT_VIRTUALS (current_class_type);
+ CLASSTYPE_ABSTRACT_VIRTUALS (current_class_type) = NULL_TREE;
+
+ DECL_RETURNS_FIRST_ARG (fndecl) = 1;
+
+ if (flag_this_is_variable > 0)
+ {
+ cond = build_binary_op (EQ_EXPR,
+ current_class_decl, integer_zero_node, 1);
+ thenclause = build_modify_expr (current_class_decl, NOP_EXPR,
+ build_new (NULL_TREE, current_class_type, void_type_node, 0));
+ }
+
+ CLASSTYPE_ABSTRACT_VIRTUALS (current_class_type) = abstract_virtuals;
+
+ /* must keep the first insn safe. */
+ head = get_insns ();
+
+ /* this note will come up to the top with us. */
+ mark = get_last_insn ();
+
+ if (flag_this_is_variable > 0)
+ {
+ expand_start_cond (cond, 0);
+ expand_expr_stmt (thenclause);
+ expand_end_cond ();
+ }
+
+#if 0
+ if (DECL_NAME (fndecl) == NULL_TREE
+ && TREE_CHAIN (DECL_ARGUMENTS (fndecl)) != NULL_TREE)
+ build_default_constructor (fndecl);
+#endif
+
+ /* Emit insns from `emit_base_init' which sets up virtual
+ function table pointer(s). */
+ emit_insns (base_init_insns);
+ base_init_insns = NULL_RTX;
+
+ /* This is where the body of the constructor begins.
+ If there were no insns in this function body, then the
+ last_parm_insn is also the last insn.
+
+ If optimization is enabled, last_parm_insn may move, so
+ we don't hold on to it (across emit_base_init). */
+ last_parm_insn = get_first_nonparm_insn ();
+ if (last_parm_insn == NULL_RTX)
+ last_parm_insn = mark;
+ else
+ last_parm_insn = previous_insn (last_parm_insn);
+
+ if (mark != get_last_insn ())
+ reorder_insns (next_insn (mark), get_last_insn (), last_parm_insn);
+
+ /* This is where the body of the constructor ends. */
+ expand_label (ctor_label);
+ ctor_label = NULL_TREE;
+
+ if (call_poplevel)
+ {
+ expand_end_bindings (decls = getdecls (), decls != NULL_TREE, 0);
+ poplevel (decls != NULL_TREE, 1, 0);
+ }
+
+ c_expand_return (current_class_decl);
+
+ current_function_assigns_this = 0;
+ current_function_just_assigned_this = 0;
+ }
+ else if (IDENTIFIER_LENGTH (DECL_NAME (fndecl)) == 4
+ && ! strcmp (IDENTIFIER_POINTER (DECL_NAME (fndecl)), "main")
+ && DECL_CONTEXT (fndecl) == NULL_TREE)
+ {
+ /* Make it so that `main' always returns 0 by default. */
+#ifdef VMS
+ c_expand_return (integer_one_node);
+#else
+ c_expand_return (integer_zero_node);
+#endif
+ }
+ else if (return_label != NULL_RTX
+ && current_function_return_value == NULL_TREE
+ && ! DECL_NAME (DECL_RESULT (current_function_decl)))
+ no_return_label = build_decl (LABEL_DECL, NULL_TREE, NULL_TREE);
+
+ if (flag_gc)
+ expand_gc_prologue_and_epilogue ();
+
+ /* That's the end of the vtable decl's life. Need to mark it such
+ if doing stupid register allocation.
+
+ Note that current_vtable_decl is really an INDIRECT_REF
+ on top of a VAR_DECL here. */
+ if (obey_regdecls && current_vtable_decl)
+ use_variable (DECL_RTL (TREE_OPERAND (current_vtable_decl, 0)));
+
+ /* If this function is supposed to return a value, ensure that
+ we do not fall into the cleanups by mistake. The end of our
+ function will look like this:
+
+ user code (may have return stmt somewhere)
+ goto no_return_label
+ cleanup_label:
+ cleanups
+ goto return_label
+ no_return_label:
+ NOTE_INSN_FUNCTION_END
+ return_label:
+ things for return
+
+ If the user omits a return stmt in the USER CODE section, we
+ will have a control path which reaches NOTE_INSN_FUNCTION_END.
+ Otherwise, we won't. */
+ if (no_return_label)
+ {
+ DECL_CONTEXT (no_return_label) = fndecl;
+ DECL_INITIAL (no_return_label) = error_mark_node;
+ DECL_SOURCE_FILE (no_return_label) = input_filename;
+ DECL_SOURCE_LINE (no_return_label) = lineno;
+ expand_goto (no_return_label);
+ }
+
+ if (cleanup_label)
+ {
+ /* remove the binding contour which is used
+ to catch cleanup-generated temporaries. */
+ expand_end_bindings (0, 0, 0);
+ poplevel (0, 0, 0);
+ }
+
+ if (cleanup_label)
+ /* Emit label at beginning of cleanup code for parameters. */
+ emit_label (cleanup_label);
+
+ /* Get return value into register if that's where it's supposed to be. */
+ if (original_result_rtx)
+ fixup_result_decl (DECL_RESULT (fndecl), original_result_rtx);
+
+ /* Finish building code that will trigger warnings if users forget
+ to make their functions return values. */
+ if (no_return_label || cleanup_label)
+ emit_jump (return_label);
+ if (no_return_label)
+ {
+ /* We don't need to call `expand_*_return' here because we
+ don't need any cleanups here--this path of code is only
+ for error checking purposes. */
+ expand_label (no_return_label);
+ }
+
+ /* reset scope for C++: if we were in the scope of a class,
+ then when we finish this function, we are not longer so.
+ This cannot be done until we know for sure that no more
+ class members will ever be referenced in this function
+ (i.e., calls to destructors). */
+ if (current_class_name)
+ {
+ ctype = current_class_type;
+ pop_nested_class (1);
+ }
+ else
+ pop_memoized_context (1);
+
+ /* Generate rtl for function exit. */
+ expand_function_end (input_filename, lineno, 1);
+
+ if (flag_handle_exceptions)
+ expand_exception_blocks();
+
+ /* This must come after expand_function_end because cleanups might
+ have declarations (from inline functions) that need to go into
+ this function's blocks. */
+ if (current_binding_level->parm_flag != 1)
+ my_friendly_abort (122);
+ poplevel (1, 0, 1);
+
+ /* Must mark the RESULT_DECL as being in this function. */
+ DECL_CONTEXT (DECL_RESULT (fndecl)) = DECL_INITIAL (fndecl);
+
+ /* Obey `register' declarations if `setjmp' is called in this fn. */
+ if (flag_traditional && current_function_calls_setjmp)
+ setjmp_protect (DECL_INITIAL (fndecl));
+
+ /* Set the BLOCK_SUPERCONTEXT of the outermost function scope to point
+ to the FUNCTION_DECL node itself. */
+ BLOCK_SUPERCONTEXT (DECL_INITIAL (fndecl)) = fndecl;
+
+ /* So we can tell if jump_optimize sets it to 1. */
+ can_reach_end = 0;
+
+ /* ??? Compensate for Sun brain damage in dealing with data segments
+ of PIC code. */
+ if (flag_pic
+ && (DECL_CONSTRUCTOR_P (fndecl)
+ || DESTRUCTOR_NAME_P (DECL_ASSEMBLER_NAME (fndecl)))
+ && CLASSTYPE_NEEDS_VIRTUAL_REINIT (TYPE_METHOD_BASETYPE (fntype)))
+ DECL_INLINE (fndecl) = 0;
+
+ if (DECL_EXTERNAL (fndecl)
+ /* This function is just along for the ride. If we can make
+ it inline, that's great. Otherwise, just punt it. */
+ && (DECL_INLINE (fndecl) == 0
+ || flag_no_inline
+ || function_cannot_inline_p (fndecl)))
+ {
+ extern int rtl_dump_and_exit;
+ int old_rtl_dump_and_exit = rtl_dump_and_exit;
+ int inline_spec = DECL_INLINE (fndecl);
+
+ /* This throws away the code for FNDECL. */
+ rtl_dump_and_exit = 1;
+ /* This throws away the memory of the code for FNDECL. */
+ if (flag_no_inline)
+ DECL_INLINE (fndecl) = 0;
+ rest_of_compilation (fndecl);
+ rtl_dump_and_exit = old_rtl_dump_and_exit;
+ DECL_INLINE (fndecl) = inline_spec;
+ }
+ else
+ {
+ /* Run the optimizers and output the assembler code for this
+ function. */
+ rest_of_compilation (fndecl);
+ }
+
+ if (DECL_INLINE (fndecl)
+ && !TREE_ASM_WRITTEN (fndecl) && DECL_FUNCTION_MEMBER_P (fndecl))
+ {
+ mark_inline_for_output (fndecl);
+ }
+
+ if (ctype && TREE_ASM_WRITTEN (fndecl))
+ note_debug_info_needed (ctype);
+
+ current_function_returns_null |= can_reach_end;
+
+ /* Since we don't normally go through c_expand_return for constructors,
+ this normally gets the wrong value.
+ Also, named return values have their return codes emitted after
+ NOTE_INSN_FUNCTION_END, confusing jump.c. */
+ if (DECL_CONSTRUCTOR_P (fndecl)
+ || DECL_NAME (DECL_RESULT (fndecl)) != NULL_TREE)
+ current_function_returns_null = 0;
+
+ if (TREE_THIS_VOLATILE (fndecl) && current_function_returns_null)
+ cp_warning ("`noreturn' function `%D' does return", fndecl);
+ else if ((warn_return_type || pedantic)
+ && current_function_returns_null
+ && TYPE_MAIN_VARIANT (TREE_TYPE (fntype)) != void_type_node)
+ {
+ /* If this function returns non-void and control can drop through,
+ complain. */
+ cp_pedwarn ("control reaches end of non-void function `%D'", fndecl);
+ }
+ /* With just -W, complain only if function returns both with
+ and without a value. */
+ else if (extra_warnings
+ && current_function_returns_value && current_function_returns_null)
+ warning ("this function may return with or without a value");
+
+ /* Free all the tree nodes making up this function. */
+ /* Switch back to allocating nodes permanently
+ until we start another function. */
+ permanent_allocation (1);
+
+ if (flag_cadillac)
+ cadillac_finish_function (fndecl);
+
+ if (DECL_SAVED_INSNS (fndecl) == NULL_RTX)
+ {
+ /* Stop pointing to the local nodes about to be freed. */
+ /* But DECL_INITIAL must remain nonzero so we know this
+ was an actual function definition. */
+ DECL_INITIAL (fndecl) = error_mark_node;
+ if (! DECL_CONSTRUCTOR_P (fndecl)
+ || !TYPE_USES_VIRTUAL_BASECLASSES (TYPE_METHOD_BASETYPE (fntype)))
+ DECL_ARGUMENTS (fndecl) = NULL_TREE;
+ }
+
+ /* Let the error reporting routines know that we're outside a function. */
+ current_function_decl = NULL_TREE;
+ named_label_uses = NULL_TREE;
+}
+
+/* Create the FUNCTION_DECL for a function definition.
+ LINE1 is the line number that the definition absolutely begins on.
+ LINE2 is the line number that the name of the function appears on.
+ DECLSPECS and DECLARATOR are the parts of the declaration;
+ they describe the return type and the name of the function,
+ but twisted together in a fashion that parallels the syntax of C.
+
+ This function creates a binding context for the function body
+ as well as setting up the FUNCTION_DECL in current_function_decl.
+
+ Returns a FUNCTION_DECL on success.
+
+ If the DECLARATOR is not suitable for a function (it defines a datum
+ instead), we return 0, which tells yyparse to report a parse error.
+
+ May return void_type_node indicating that this method is actually
+ a friend. See grokfield for more details.
+
+ Came here with a `.pushlevel' .
+
+ DO NOT MAKE ANY CHANGES TO THIS CODE WITHOUT MAKING CORRESPONDING
+ CHANGES TO CODE IN `grokfield'. */
+tree
+start_method (declspecs, declarator, raises)
+ tree declarator, declspecs, raises;
+{
+ tree fndecl = grokdeclarator (declarator, declspecs, MEMFUNCDEF, 0, raises);
+
+ /* Something too ugly to handle. */
+ if (fndecl == NULL_TREE)
+ return NULL_TREE;
+
+ /* Pass friends other than inline friend functions back. */
+ if (TYPE_MAIN_VARIANT (fndecl) == void_type_node)
+ return fndecl;
+
+ if (TREE_CODE (fndecl) != FUNCTION_DECL)
+ /* Not a function, tell parser to report parse error. */
+ return NULL_TREE;
+
+ if (IS_SIGNATURE (current_class_type))
+ {
+ IS_DEFAULT_IMPLEMENTATION (fndecl) = 1;
+ /* In case we need this info later. */
+ HAS_DEFAULT_IMPLEMENTATION (current_class_type) = 1;
+ }
+
+ if (DECL_IN_AGGR_P (fndecl))
+ {
+ if (IDENTIFIER_ERROR_LOCUS (DECL_ASSEMBLER_NAME (fndecl)) != current_class_type)
+ {
+ if (DECL_CONTEXT (fndecl))
+ cp_error ("`%D' is already defined in class %s", fndecl,
+ TYPE_NAME_STRING (DECL_CONTEXT (fndecl)));
+ }
+ return void_type_node;
+ }
+
+ if (flag_default_inline)
+ DECL_INLINE (fndecl) = 1;
+
+ if (processing_template_defn)
+ SET_DECL_IMPLICIT_INSTANTIATION (fndecl);
+
+ /* We read in the parameters on the maybepermanent_obstack,
+ but we won't be getting back to them until after we
+ may have clobbered them. So the call to preserve_data
+ will keep them safe. */
+ preserve_data ();
+
+ if (! DECL_FRIEND_P (fndecl))
+ {
+ if (DECL_CHAIN (fndecl) != NULL_TREE)
+ {
+ /* Need a fresh node here so that we don't get circularity
+ when we link these together. If FNDECL was a friend, then
+ `pushdecl' does the right thing, which is nothing wrt its
+ current value of DECL_CHAIN. */
+ fndecl = copy_node (fndecl);
+ }
+ if (TREE_CHAIN (fndecl))
+ {
+ fndecl = copy_node (fndecl);
+ TREE_CHAIN (fndecl) = NULL_TREE;
+ }
+
+ if (DECL_CONSTRUCTOR_P (fndecl))
+ {
+ if (! grok_ctor_properties (current_class_type, fndecl))
+ return void_type_node;
+ }
+ else if (IDENTIFIER_OPNAME_P (DECL_NAME (fndecl)))
+ grok_op_properties (fndecl, DECL_VIRTUAL_P (fndecl), 0);
+ }
+
+ finish_decl (fndecl, NULL_TREE, NULL_TREE, 0);
+
+ /* Make a place for the parms */
+ pushlevel (0);
+ current_binding_level->parm_flag = 1;
+
+ DECL_IN_AGGR_P (fndecl) = 1;
+ return fndecl;
+}
+
+/* Go through the motions of finishing a function definition.
+ We don't compile this method until after the whole class has
+ been processed.
+
+ FINISH_METHOD must return something that looks as though it
+ came from GROKFIELD (since we are defining a method, after all).
+
+ This is called after parsing the body of the function definition.
+ STMTS is the chain of statements that makes up the function body.
+
+ DECL is the ..._DECL that `start_method' provided. */
+
+tree
+finish_method (decl)
+ tree decl;
+{
+ register tree fndecl = decl;
+ tree old_initial;
+
+ register tree link;
+
+ if (TYPE_MAIN_VARIANT (decl) == void_type_node)
+ return decl;
+
+ old_initial = DECL_INITIAL (fndecl);
+
+ /* Undo the level for the parms (from start_method).
+ This is like poplevel, but it causes nothing to be
+ saved. Saving information here confuses symbol-table
+ output routines. Besides, this information will
+ be correctly output when this method is actually
+ compiled. */
+
+ /* Clear out the meanings of the local variables of this level;
+ also record in each decl which block it belongs to. */
+
+ for (link = current_binding_level->names; link; link = TREE_CHAIN (link))
+ {
+ if (DECL_NAME (link) != NULL_TREE)
+ IDENTIFIER_LOCAL_VALUE (DECL_NAME (link)) = 0;
+ my_friendly_assert (TREE_CODE (link) != FUNCTION_DECL, 163);
+ DECL_CONTEXT (link) = NULL_TREE;
+ }
+
+ /* Restore all name-meanings of the outer levels
+ that were shadowed by this level. */
+
+ for (link = current_binding_level->shadowed; link; link = TREE_CHAIN (link))
+ IDENTIFIER_LOCAL_VALUE (TREE_PURPOSE (link)) = TREE_VALUE (link);
+ for (link = current_binding_level->class_shadowed;
+ link; link = TREE_CHAIN (link))
+ IDENTIFIER_CLASS_VALUE (TREE_PURPOSE (link)) = TREE_VALUE (link);
+ for (link = current_binding_level->type_shadowed;
+ link; link = TREE_CHAIN (link))
+ IDENTIFIER_TYPE_VALUE (TREE_PURPOSE (link)) = TREE_VALUE (link);
+
+ GNU_xref_end_scope ((HOST_WIDE_INT) current_binding_level,
+ (HOST_WIDE_INT) current_binding_level->level_chain,
+ current_binding_level->parm_flag,
+ current_binding_level->keep,
+ current_binding_level->tag_transparent);
+
+ poplevel (0, 0, 0);
+
+ DECL_INITIAL (fndecl) = old_initial;
+
+ /* We used to check if the context of FNDECL was different from
+ current_class_type as another way to get inside here. This didn't work
+ for String.cc in libg++. */
+ if (DECL_FRIEND_P (fndecl))
+ {
+ CLASSTYPE_INLINE_FRIENDS (current_class_type)
+ = tree_cons (NULL_TREE, fndecl, CLASSTYPE_INLINE_FRIENDS (current_class_type));
+ decl = void_type_node;
+ }
+
+ return decl;
+}
+
+/* Called when a new struct TYPE is defined.
+ If this structure or union completes the type of any previous
+ variable declaration, lay it out and output its rtl. */
+
+void
+hack_incomplete_structures (type)
+ tree type;
+{
+ tree decl;
+
+ if (current_binding_level->n_incomplete == 0)
+ return;
+
+ if (!type) /* Don't do this for class templates. */
+ return;
+
+ for (decl = current_binding_level->names; decl; decl = TREE_CHAIN (decl))
+ if (TREE_TYPE (decl) == type
+ || (TREE_TYPE (decl)
+ && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE
+ && TREE_TYPE (TREE_TYPE (decl)) == type))
+ {
+ if (TREE_CODE (decl) == TYPE_DECL)
+ layout_type (TREE_TYPE (decl));
+ else
+ {
+ int toplevel = global_binding_level == current_binding_level;
+ if (TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE
+ && TREE_TYPE (TREE_TYPE (decl)) == type)
+ layout_type (TREE_TYPE (decl));
+ layout_decl (decl, 0);
+ rest_of_decl_compilation (decl, NULL_PTR, toplevel, 0);
+ if (! toplevel)
+ {
+ tree cleanup;
+ expand_decl (decl);
+ cleanup = maybe_build_cleanup (decl);
+ expand_decl_init (decl);
+ if (! expand_decl_cleanup (decl, cleanup))
+ cp_error ("parser lost in parsing declaration of `%D'",
+ decl);
+ }
+ }
+ my_friendly_assert (current_binding_level->n_incomplete > 0, 164);
+ --current_binding_level->n_incomplete;
+ }
+}
+
+/* Nonzero if presently building a cleanup. Needed because
+ SAVE_EXPRs are not the right things to use inside of cleanups.
+ They are only ever evaluated once, where the cleanup
+ might be evaluated several times. In this case, a later evaluation
+ of the cleanup might fill in the SAVE_EXPR_RTL, and it will
+ not be valid for an earlier cleanup. */
+
+int building_cleanup;
+
+/* If DECL is of a type which needs a cleanup, build that cleanup here.
+ We don't build cleanups if just going for syntax checking, since
+ fixup_cleanups does not know how to not handle them.
+
+ Don't build these on the momentary obstack; they must live
+ the life of the binding contour. */
+tree
+maybe_build_cleanup (decl)
+ tree decl;
+{
+ tree type = TREE_TYPE (decl);
+ if (TYPE_NEEDS_DESTRUCTOR (type))
+ {
+ int temp = 0, flags = LOOKUP_NORMAL|LOOKUP_DESTRUCTOR;
+ tree rval;
+ int old_building_cleanup = building_cleanup;
+ building_cleanup = 1;
+
+ if (TREE_CODE (decl) != PARM_DECL)
+ temp = suspend_momentary ();
+
+ if (TREE_CODE (type) == ARRAY_TYPE)
+ rval = decl;
+ else
+ {
+ mark_addressable (decl);
+ rval = build_unary_op (ADDR_EXPR, decl, 0);
+ }
+
+ /* Optimize for space over speed here. */
+ if (! TYPE_USES_VIRTUAL_BASECLASSES (type)
+ || flag_expensive_optimizations)
+ flags |= LOOKUP_NONVIRTUAL;
+
+ /* Use TYPE_MAIN_VARIANT so we don't get a warning about
+ calling delete on a `const' variable. */
+ if (TYPE_READONLY (TREE_TYPE (TREE_TYPE (rval))))
+ rval = build1 (NOP_EXPR, TYPE_POINTER_TO (TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (rval)))), rval);
+
+ rval = build_delete (TREE_TYPE (rval), rval, integer_two_node, flags, 0);
+
+ if (TYPE_USES_VIRTUAL_BASECLASSES (type)
+ && ! TYPE_HAS_DESTRUCTOR (type))
+ rval = build_compound_expr (tree_cons (NULL_TREE, rval,
+ build_tree_list (NULL_TREE, build_vbase_delete (type, decl))));
+
+ if (TREE_CODE (decl) != PARM_DECL)
+ resume_momentary (temp);
+
+ building_cleanup = old_building_cleanup;
+
+ return rval;
+ }
+ return 0;
+}
+
+/* Expand a C++ expression at the statement level.
+ This is needed to ferret out nodes which have UNKNOWN_TYPE.
+ The C++ type checker should get all of these out when
+ expressions are combined with other, type-providing, expressions,
+ leaving only orphan expressions, such as:
+
+ &class::bar; / / takes its address, but does nothing with it.
+
+ */
+void
+cplus_expand_expr_stmt (exp)
+ tree exp;
+{
+ if (TREE_TYPE (exp) == unknown_type_node)
+ {
+ if (TREE_CODE (exp) == ADDR_EXPR || TREE_CODE (exp) == TREE_LIST)
+ error ("address of overloaded function with no contextual type information");
+ else if (TREE_CODE (exp) == COMPONENT_REF)
+ warning ("useless reference to a member function name, did you forget the ()?");
+ }
+ else
+ {
+ int remove_implicit_immediately = 0;
+
+ if (TREE_CODE (exp) == FUNCTION_DECL)
+ {
+ cp_warning ("reference, not call, to function `%D'", exp);
+ warning ("at this point in file");
+ }
+
+#if 0
+ /* We should do this eventually, but right now this causes regex.o from
+ libg++ to miscompile, and tString to core dump. */
+ exp = build1 (CLEANUP_POINT_EXPR, TREE_TYPE (exp), exp);
+#endif
+ expand_expr_stmt (break_out_cleanups (exp));
+ }
+
+ /* Clean up any pending cleanups. This happens when a function call
+ returns a cleanup-needing value that nobody uses. */
+ expand_cleanups_to (NULL_TREE);
+}
+
+/* When a stmt has been parsed, this function is called.
+
+ Currently, this function only does something within a
+ constructor's scope: if a stmt has just assigned to this,
+ and we are in a derived class, we call `emit_base_init'. */
+
+void
+finish_stmt ()
+{
+ extern struct nesting *cond_stack, *loop_stack, *case_stack;
+
+
+ if (current_function_assigns_this
+ || ! current_function_just_assigned_this)
+ return;
+ if (DECL_CONSTRUCTOR_P (current_function_decl))
+ {
+ /* Constructors must wait until we are out of control
+ zones before calling base constructors. */
+ if (cond_stack || loop_stack || case_stack)
+ return;
+ emit_insns (base_init_insns);
+ check_base_init (current_class_type);
+ }
+ current_function_assigns_this = 1;
+
+ if (flag_cadillac)
+ cadillac_finish_stmt ();
+}
+
+/* Change a static member function definition into a FUNCTION_TYPE, instead
+ of the METHOD_TYPE that we create when it's originally parsed.
+
+ WARNING: DO NOT pass &TREE_TYPE (decl) to FN or &TYPE_ARG_TYPES
+ (TREE_TYPE (decl)) to ARGTYPES, as doing so will corrupt the types of
+ other decls. Either pass the addresses of local variables or NULL. */
+
+void
+revert_static_member_fn (decl, fn, argtypes)
+ tree *decl, *fn, *argtypes;
+{
+ tree tmp;
+ tree function = fn ? *fn : TREE_TYPE (*decl);
+ tree args = argtypes ? *argtypes : TYPE_ARG_TYPES (function);
+
+ args = TREE_CHAIN (args);
+ tmp = build_function_type (TREE_TYPE (function), args);
+ tmp = build_type_variant (tmp, TYPE_READONLY (function),
+ TYPE_VOLATILE (function));
+ tmp = build_exception_variant (TYPE_METHOD_BASETYPE (function), tmp,
+ TYPE_RAISES_EXCEPTIONS (function));
+ TREE_TYPE (*decl) = tmp;
+ DECL_STATIC_FUNCTION_P (*decl) = 1;
+ if (fn)
+ *fn = tmp;
+ if (argtypes)
+ *argtypes = args;
+}
+
+int
+id_in_current_class (id)
+ tree id;
+{
+ return !!purpose_member (id, class_binding_level->class_shadowed);
+}
diff --git a/gnu/usr.bin/cc/cc1plus/decl.h b/gnu/usr.bin/cc/cc1plus/decl.h
new file mode 100644
index 0000000..b088179
--- /dev/null
+++ b/gnu/usr.bin/cc/cc1plus/decl.h
@@ -0,0 +1,54 @@
+/* Variables and structures for declaration processing.
+ Copyright (C) 1993 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+/* In grokdeclarator, distinguish syntactic contexts of declarators. */
+enum decl_context
+{ NORMAL, /* Ordinary declaration */
+ FUNCDEF, /* Function definition */
+ PARM, /* Declaration of parm before function body */
+ FIELD, /* Declaration inside struct or union */
+ BITFIELD, /* Likewise but with specified width */
+ TYPENAME, /* Typename (inside cast or sizeof) */
+ MEMFUNCDEF /* Member function definition */
+};
+
+/* C++: Keep these around to reduce calls to `get_identifier'.
+ Identifiers for `this' in member functions and the auto-delete
+ parameter for destructors. */
+extern tree this_identifier, in_charge_identifier;
+
+/* Parsing a function declarator leaves a list of parameter names
+ or a chain or parameter decls here. */
+extern tree last_function_parms;
+
+/* A list of static class variables. This is needed, because a
+ static class variable can be declared inside the class without
+ an initializer, and then initialized, staticly, outside the class. */
+extern tree pending_statics;
+
+/* A list of objects which have constructors or destructors
+ which reside in the global scope. The decl is stored in
+ the TREE_VALUE slot and the initializer is stored
+ in the TREE_PURPOSE slot. */
+extern tree static_aggregates;
+
+#ifdef DEBUG_CP_BINDING_LEVELS
+/* Purely for debugging purposes. */
+extern int debug_bindings_indentation;
+#endif
diff --git a/gnu/usr.bin/cc/cc1plus/decl2.c b/gnu/usr.bin/cc/cc1plus/decl2.c
new file mode 100644
index 0000000..2b9f8a7
--- /dev/null
+++ b/gnu/usr.bin/cc/cc1plus/decl2.c
@@ -0,0 +1,3102 @@
+/* Process declarations and variables for C compiler.
+ Copyright (C) 1988, 1992, 1993 Free Software Foundation, Inc.
+ Hacked by Michael Tiemann (tiemann@cygnus.com)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+/* Process declarations and symbol lookup for C front end.
+ Also constructs types; the standard scalar types at initialization,
+ and structure, union, array and enum types when they are declared. */
+
+/* ??? not all decl nodes are given the most useful possible
+ line numbers. For example, the CONST_DECLs for enum values. */
+
+#include "config.h"
+#include <stdio.h>
+#include "tree.h"
+#include "rtl.h"
+#include "flags.h"
+#include "cp-tree.h"
+#include "decl.h"
+#include "lex.h"
+
+extern tree grokdeclarator ();
+extern tree get_file_function_name ();
+extern tree cleanups_this_call;
+static void grok_function_init ();
+
+/* A list of virtual function tables we must make sure to write out. */
+tree pending_vtables;
+
+/* A list of static class variables. This is needed, because a
+ static class variable can be declared inside the class without
+ an initializer, and then initialized, staticly, outside the class. */
+tree pending_statics;
+
+/* A list of functions which were declared inline, but which we
+ may need to emit outline anyway. */
+static tree saved_inlines;
+
+/* Used to help generate temporary names which are unique within
+ a function. Reset to 0 by start_function. */
+
+static int temp_name_counter;
+
+/* Same, but not reset. Local temp variables and global temp variables
+ can have the same name. */
+static int global_temp_name_counter;
+
+/* Flag used when debugging spew.c */
+
+extern int spew_debug;
+
+/* C (and C++) language-specific option variables. */
+
+/* Nonzero means allow type mismatches in conditional expressions;
+ just make their values `void'. */
+
+int flag_cond_mismatch;
+
+/* Nonzero means give `double' the same size as `float'. */
+
+int flag_short_double;
+
+/* Nonzero means don't recognize the keyword `asm'. */
+
+int flag_no_asm;
+
+/* Nonzero means don't recognize the non-ANSI builtin functions. */
+
+int flag_no_builtin;
+
+/* Nonzero means do some things the same way PCC does. */
+
+int flag_traditional;
+
+/* Nonzero means to treat bitfields as unsigned unless they say `signed'. */
+
+int flag_signed_bitfields = 1;
+
+/* Nonzero means handle `#ident' directives. 0 means ignore them. */
+
+int flag_no_ident = 0;
+
+/* Nonzero means disable GNU extensions. */
+
+int flag_ansi = 0;
+
+/* Nonzero means do emit exported implementations of functions even if
+ they can be inlined. */
+
+int flag_implement_inlines = 1;
+
+/* Nonzero means do emit exported implementations of templates, instead of
+ multiple static copies in each file that needs a definition. */
+
+int flag_external_templates = 0;
+
+/* Nonzero means that the decision to emit or not emit the implementation of a
+ template depends on where the template is instantiated, rather than where
+ it is defined. */
+
+int flag_alt_external_templates = 0;
+
+/* Nonzero means that implicit instantiations will be emitted if needed. */
+
+int flag_implicit_templates = 1;
+
+/* Nonzero means warn about implicit declarations. */
+
+int warn_implicit = 1;
+
+/* Nonzero means warn when all ctors or dtors are private, and the class
+ has no friends. */
+
+int warn_ctor_dtor_privacy = 1;
+
+/* True if we want to implement vtbvales using "thunks".
+ The default is off now, but will be on later.
+
+ Also causes output of vtables to be controlled by whether
+ we seen the class's first non-inline virtual function. */
+int flag_vtable_thunks = 0;
+
+/* Nonzero means give string constants the type `const char *'
+ to get extra warnings from them. These warnings will be too numerous
+ to be useful, except in thoroughly ANSIfied programs. */
+
+int warn_write_strings;
+
+/* Nonzero means warn about pointer casts that can drop a type qualifier
+ from the pointer target type. */
+
+int warn_cast_qual;
+
+/* Nonzero means warn that dbx info for template class methods isn't fully
+ supported yet. */
+
+int warn_template_debugging;
+
+/* Warn about traditional constructs whose meanings changed in ANSI C. */
+
+int warn_traditional;
+
+/* Nonzero means warn about sizeof(function) or addition/subtraction
+ of function pointers. */
+
+int warn_pointer_arith;
+
+/* Nonzero means warn for non-prototype function decls
+ or non-prototyped defs without previous prototype. */
+
+int warn_strict_prototypes;
+
+/* Nonzero means warn for any function def without prototype decl. */
+
+int warn_missing_prototypes;
+
+/* Nonzero means warn about multiple (redundant) decls for the same single
+ variable or function. */
+
+int warn_redundant_decls;
+
+/* Warn if initializer is not completely bracketed. */
+
+int warn_missing_braces;
+
+/* Warn about *printf or *scanf format/argument anomalies. */
+
+int warn_format;
+
+/* Warn about a subscript that has type char. */
+
+int warn_char_subscripts;
+
+/* Warn if a type conversion is done that might have confusing results. */
+
+int warn_conversion;
+
+/* Warn if adding () is suggested. */
+
+int warn_parentheses = 1;
+
+/* Non-zero means warn in function declared in derived class has the
+ same name as a virtual in the base class, but fails to match the
+ type signature of any virtual function in the base class. */
+int warn_overloaded_virtual;
+
+/* Non-zero means warn when declaring a class that has a non virtual
+ destructor, when it really ought to have a virtual one. */
+int warn_nonvdtor;
+
+/* Non-zero means warn when a function is declared extern and later inline. */
+int warn_extern_inline;
+
+/* Nonzero means `$' can be in an identifier.
+ See cccp.c for reasons why this breaks some obscure ANSI C programs. */
+
+#ifndef DOLLARS_IN_IDENTIFIERS
+#define DOLLARS_IN_IDENTIFIERS 1
+#endif
+int dollars_in_ident = DOLLARS_IN_IDENTIFIERS;
+
+/* Nonzero for -fno-strict-prototype switch: do not consider empty
+ argument prototype to mean function takes no arguments. */
+
+int strict_prototype = 1;
+int strict_prototypes_lang_c, strict_prototypes_lang_cplusplus = 1;
+
+/* Nonzero means that labels can be used as first-class objects */
+
+int flag_labels_ok;
+
+/* Non-zero means to collect statistics which might be expensive
+ and to print them when we are done. */
+int flag_detailed_statistics;
+
+/* C++ specific flags. */
+/* Nonzero for -fall-virtual: make every member function (except
+ constructors) lay down in the virtual function table. Calls
+ can then either go through the virtual function table or not,
+ depending. */
+
+int flag_all_virtual;
+
+/* Zero means that `this' is a *const. This gives nice behavior in the
+ 2.0 world. 1 gives 1.2-compatible behavior. 2 gives Spring behavior.
+ -2 means we're constructing an object and it has fixed type. */
+
+int flag_this_is_variable;
+
+/* Nonzero means memoize our member lookups. */
+
+int flag_memoize_lookups; int flag_save_memoized_contexts;
+
+/* 3 means write out only virtuals function tables `defined'
+ in this implementation file.
+ 2 means write out only specific virtual function tables
+ and give them (C) public access.
+ 1 means write out virtual function tables and give them
+ (C) public access.
+ 0 means write out virtual function tables and give them
+ (C) static access (default).
+ -1 means declare virtual function tables extern. */
+
+int write_virtuals;
+
+/* Nonzero means we should attempt to elide constructors when possible. */
+
+int flag_elide_constructors;
+
+/* Nonzero means recognize and handle exception handling constructs.
+ Use ansi syntax and semantics. WORK IN PROGRESS! */
+
+int flag_handle_exceptions;
+
+/* Nonzero means recognize and handle signature language constructs. */
+
+int flag_handle_signatures;
+
+/* Nonzero means that member functions defined in class scope are
+ inline by default. */
+
+int flag_default_inline = 1;
+
+/* Controls whether enums and ints freely convert.
+ 1 means with complete freedom.
+ 0 means enums can convert to ints, but not vice-versa. */
+int flag_int_enum_equivalence;
+
+/* Controls whether compiler is operating under LUCID's Cadillac
+ system. 1 means yes, 0 means no. */
+int flag_cadillac;
+
+/* Controls whether compiler generates code to build objects
+ that can be collected when they become garbage. */
+int flag_gc;
+
+/* Controls whether compiler generates 'dossiers' that give
+ run-time type information. */
+int flag_dossier;
+
+/* Nonzero if we wish to output cross-referencing information
+ for the GNU class browser. */
+extern int flag_gnu_xref;
+
+/* Nonzero if compiler can make `reasonable' assumptions about
+ references and objects. For example, the compiler must be
+ conservative about the following and not assume that `a' is nonnull:
+
+ obj &a = g ();
+ a.f (2);
+
+ In general, it is `reasonable' to assume that for many programs,
+ and better code can be generated in that case. */
+
+int flag_assume_nonnull_objects;
+
+/* Nonzero if we want to support huge (> 2^(sizeof(short)*8-1) bytes)
+ objects. */
+int flag_huge_objects;
+
+/* Nonzero if we want to conserve space in the .o files. We do this
+ by putting uninitialized data and runtime initialized data into
+ .common instead of .data at the expense of not flaging multiple
+ definitions. */
+int flag_conserve_space;
+
+/* Table of language-dependent -f options.
+ STRING is the option name. VARIABLE is the address of the variable.
+ ON_VALUE is the value to store in VARIABLE
+ if `-fSTRING' is seen as an option.
+ (If `-fno-STRING' is seen as an option, the opposite value is stored.) */
+
+static struct { char *string; int *variable; int on_value;} lang_f_options[] =
+{
+ {"signed-char", &flag_signed_char, 1},
+ {"unsigned-char", &flag_signed_char, 0},
+ {"signed-bitfields", &flag_signed_bitfields, 1},
+ {"unsigned-bitfields", &flag_signed_bitfields, 0},
+ {"short-enums", &flag_short_enums, 1},
+ {"short-double", &flag_short_double, 1},
+ {"cond-mismatch", &flag_cond_mismatch, 1},
+ {"asm", &flag_no_asm, 0},
+ {"builtin", &flag_no_builtin, 0},
+ {"ident", &flag_no_ident, 0},
+ {"labels-ok", &flag_labels_ok, 1},
+ {"stats", &flag_detailed_statistics, 1},
+ {"this-is-variable", &flag_this_is_variable, 1},
+ {"strict-prototype", &strict_prototypes_lang_cplusplus, 1},
+ {"all-virtual", &flag_all_virtual, 1},
+ {"memoize-lookups", &flag_memoize_lookups, 1},
+ {"elide-constructors", &flag_elide_constructors, 1},
+ {"handle-exceptions", &flag_handle_exceptions, 1},
+ {"handle-signatures", &flag_handle_signatures, 1},
+ {"default-inline", &flag_default_inline, 1},
+ {"dollars-in-identifiers", &dollars_in_ident, 1},
+ {"enum-int-equiv", &flag_int_enum_equivalence, 1},
+ {"gc", &flag_gc, 1},
+ {"dossier", &flag_dossier, 1},
+ {"xref", &flag_gnu_xref, 1},
+ {"nonnull-objects", &flag_assume_nonnull_objects, 1},
+ {"implement-inlines", &flag_implement_inlines, 1},
+ {"external-templates", &flag_external_templates, 1},
+ {"implicit-templates", &flag_implicit_templates, 1},
+ {"huge-objects", &flag_huge_objects, 1},
+ {"conserve-space", &flag_conserve_space, 1},
+ {"vtable-thunks", &flag_vtable_thunks, 1},
+ {"short-temps", &flag_short_temps, 1},
+};
+
+/* Decode the string P as a language-specific option.
+ Return 1 if it is recognized (and handle it);
+ return 0 if not recognized. */
+
+int
+lang_decode_option (p)
+ char *p;
+{
+ if (!strcmp (p, "-ftraditional") || !strcmp (p, "-traditional"))
+ flag_traditional = 1, dollars_in_ident = 1, flag_writable_strings = 1,
+ flag_this_is_variable = 1;
+ /* The +e options are for cfront compatibility. They come in as
+ `-+eN', to kludge around gcc.c's argument handling. */
+ else if (p[0] == '-' && p[1] == '+' && p[2] == 'e')
+ {
+ int old_write_virtuals = write_virtuals;
+ if (p[3] == '1')
+ write_virtuals = 1;
+ else if (p[3] == '0')
+ write_virtuals = -1;
+ else if (p[3] == '2')
+ write_virtuals = 2;
+ else error ("invalid +e option");
+ if (old_write_virtuals != 0
+ && write_virtuals != old_write_virtuals)
+ error ("conflicting +e options given");
+ }
+ else if (p[0] == '-' && p[1] == 'f')
+ {
+ /* Some kind of -f option.
+ P's value is the option sans `-f'.
+ Search for it in the table of options. */
+ int found = 0, j;
+
+ p += 2;
+ /* Try special -f options. */
+
+ if (!strcmp (p, "save-memoized"))
+ {
+ flag_memoize_lookups = 1;
+ flag_save_memoized_contexts = 1;
+ found = 1;
+ }
+ if (!strcmp (p, "no-save-memoized"))
+ {
+ flag_memoize_lookups = 0;
+ flag_save_memoized_contexts = 0;
+ found = 1;
+ }
+ else if (! strncmp (p, "cadillac", 8))
+ {
+ flag_cadillac = atoi (p+9);
+ found = 1;
+ }
+ else if (! strncmp (p, "no-cadillac", 11))
+ {
+ flag_cadillac = 0;
+ found = 1;
+ }
+ else if (! strcmp (p, "gc"))
+ {
+ flag_gc = 1;
+ /* This must come along for the ride. */
+ flag_dossier = 1;
+ found = 1;
+ }
+ else if (! strcmp (p, "no-gc"))
+ {
+ flag_gc = 0;
+ /* This must come along for the ride. */
+ flag_dossier = 0;
+ found = 1;
+ }
+ else if (! strcmp (p, "alt-external-templates"))
+ {
+ flag_external_templates = 1;
+ flag_alt_external_templates = 1;
+ found = 1;
+ }
+ else if (! strcmp (p, "no-alt-external-templates"))
+ {
+ flag_alt_external_templates = 0;
+ found = 1;
+ }
+ else for (j = 0;
+ !found && j < sizeof (lang_f_options) / sizeof (lang_f_options[0]);
+ j++)
+ {
+ if (!strcmp (p, lang_f_options[j].string))
+ {
+ *lang_f_options[j].variable = lang_f_options[j].on_value;
+ /* A goto here would be cleaner,
+ but breaks the vax pcc. */
+ found = 1;
+ }
+ if (p[0] == 'n' && p[1] == 'o' && p[2] == '-'
+ && ! strcmp (p+3, lang_f_options[j].string))
+ {
+ *lang_f_options[j].variable = ! lang_f_options[j].on_value;
+ found = 1;
+ }
+ }
+ return found;
+ }
+ else if (p[0] == '-' && p[1] == 'W')
+ {
+ int setting = 1;
+
+ /* The -W options control the warning behavior of the compiler. */
+ p += 2;
+
+ if (p[0] == 'n' && p[1] == 'o' && p[2] == '-')
+ setting = 0, p += 3;
+
+ if (!strcmp (p, "implicit"))
+ warn_implicit = setting;
+ else if (!strcmp (p, "return-type"))
+ warn_return_type = setting;
+ else if (!strcmp (p, "ctor-dtor-privacy"))
+ warn_ctor_dtor_privacy = setting;
+ else if (!strcmp (p, "write-strings"))
+ warn_write_strings = setting;
+ else if (!strcmp (p, "cast-qual"))
+ warn_cast_qual = setting;
+ else if (!strcmp (p, "traditional"))
+ warn_traditional = setting;
+ else if (!strcmp (p, "char-subscripts"))
+ warn_char_subscripts = setting;
+ else if (!strcmp (p, "pointer-arith"))
+ warn_pointer_arith = setting;
+ else if (!strcmp (p, "strict-prototypes"))
+ warn_strict_prototypes = setting;
+ else if (!strcmp (p, "missing-prototypes"))
+ warn_missing_prototypes = setting;
+ else if (!strcmp (p, "redundant-decls"))
+ warn_redundant_decls = setting;
+ else if (!strcmp (p, "missing-braces"))
+ warn_missing_braces = setting;
+ else if (!strcmp (p, "format"))
+ warn_format = setting;
+ else if (!strcmp (p, "conversion"))
+ warn_conversion = setting;
+ else if (!strcmp (p, "parentheses"))
+ warn_parentheses = setting;
+ else if (!strcmp (p, "non-virtual-dtor"))
+ warn_nonvdtor = setting;
+ else if (!strcmp (p, "extern-inline"))
+ warn_extern_inline = setting;
+ else if (!strcmp (p, "comment"))
+ ; /* cpp handles this one. */
+ else if (!strcmp (p, "comments"))
+ ; /* cpp handles this one. */
+ else if (!strcmp (p, "trigraphs"))
+ ; /* cpp handles this one. */
+ else if (!strcmp (p, "import"))
+ ; /* cpp handles this one. */
+ else if (!strcmp (p, "all"))
+ {
+ extra_warnings = setting;
+ warn_return_type = setting;
+ warn_unused = setting;
+ warn_implicit = setting;
+ warn_ctor_dtor_privacy = setting;
+ warn_switch = setting;
+ warn_format = setting;
+ warn_missing_braces = setting;
+ warn_extern_inline = setting;
+ warn_nonvdtor = setting;
+ /* We save the value of warn_uninitialized, since if they put
+ -Wuninitialized on the command line, we need to generate a
+ warning about not using it without also specifying -O. */
+ if (warn_uninitialized != 1)
+ warn_uninitialized = (setting ? 2 : 0);
+ warn_template_debugging = setting;
+ }
+
+ else if (!strcmp (p, "overloaded-virtual"))
+ warn_overloaded_virtual = setting;
+ else return 0;
+ }
+ else if (!strcmp (p, "-ansi"))
+ flag_no_asm = 1, dollars_in_ident = 0, flag_ansi = 1;
+#ifdef SPEW_DEBUG
+ /* Undocumented, only ever used when you're invoking cc1plus by hand, since
+ it's probably safe to assume no sane person would ever want to use this
+ under normal circumstances. */
+ else if (!strcmp (p, "-spew-debug"))
+ spew_debug = 1;
+#endif
+ else
+ return 0;
+
+ return 1;
+}
+
+/* Incorporate `const' and `volatile' qualifiers for member functions.
+ FUNCTION is a TYPE_DECL or a FUNCTION_DECL.
+ QUALS is a list of qualifiers. */
+tree
+grok_method_quals (ctype, function, quals)
+ tree ctype, function, quals;
+{
+ tree fntype = TREE_TYPE (function);
+ tree raises = TYPE_RAISES_EXCEPTIONS (fntype);
+
+ do
+ {
+ extern tree ridpointers[];
+
+ if (TREE_VALUE (quals) == ridpointers[(int)RID_CONST])
+ {
+ if (TYPE_READONLY (ctype))
+ error ("duplicate `%s' %s",
+ IDENTIFIER_POINTER (TREE_VALUE (quals)),
+ (TREE_CODE (function) == FUNCTION_DECL
+ ? "for member function" : "in type declaration"));
+ ctype = build_type_variant (ctype, 1, TYPE_VOLATILE (ctype));
+ build_pointer_type (ctype);
+ }
+ else if (TREE_VALUE (quals) == ridpointers[(int)RID_VOLATILE])
+ {
+ if (TYPE_VOLATILE (ctype))
+ error ("duplicate `%s' %s",
+ IDENTIFIER_POINTER (TREE_VALUE (quals)),
+ (TREE_CODE (function) == FUNCTION_DECL
+ ? "for member function" : "in type declaration"));
+ ctype = build_type_variant (ctype, TYPE_READONLY (ctype), 1);
+ build_pointer_type (ctype);
+ }
+ else
+ my_friendly_abort (20);
+ quals = TREE_CHAIN (quals);
+ }
+ while (quals);
+ fntype = build_cplus_method_type (ctype, TREE_TYPE (fntype),
+ (TREE_CODE (fntype) == METHOD_TYPE
+ ? TREE_CHAIN (TYPE_ARG_TYPES (fntype))
+ : TYPE_ARG_TYPES (fntype)));
+ if (raises)
+ fntype = build_exception_variant (ctype, fntype, raises);
+
+ TREE_TYPE (function) = fntype;
+ return ctype;
+}
+
+#if 0 /* Not used. */
+/* This routine replaces cryptic DECL_NAMEs with readable DECL_NAMEs.
+ It leaves DECL_ASSEMBLER_NAMEs with the correct value. */
+/* This does not yet work with user defined conversion operators
+ It should. */
+static void
+substitute_nice_name (decl)
+ tree decl;
+{
+ if (DECL_NAME (decl) && TREE_CODE (DECL_NAME (decl)) == IDENTIFIER_NODE)
+ {
+ char *n = decl_as_string (DECL_NAME (decl), 1);
+ if (n[strlen (n) - 1] == ' ')
+ n[strlen (n) - 1] = 0;
+ DECL_NAME (decl) = get_identifier (n);
+ }
+}
+#endif
+
+/* Warn when -fexternal-templates is used and #pragma
+ interface/implementation is not used all the times it should be,
+ inform the user. */
+void
+warn_if_unknown_interface ()
+{
+ static int already_warned = 0;
+ if (++already_warned == 1)
+ warning ("templates that are built with -fexternal-templates should be in files that have #pragma interface/implementation");
+}
+
+/* A subroutine of the parser, to handle a component list. */
+tree
+grok_x_components (specs, components)
+ tree specs, components;
+{
+ register tree t, x, tcode;
+
+ /* We just got some friends. They have been recorded elsewhere. */
+ if (components == void_type_node)
+ return NULL_TREE;
+
+ if (components == NULL_TREE)
+ {
+ t = groktypename (build_decl_list (specs, NULL_TREE));
+
+ if (t == NULL_TREE)
+ {
+ error ("error in component specification");
+ return NULL_TREE;
+ }
+
+ switch (TREE_CODE (t))
+ {
+ case VAR_DECL:
+ /* Static anonymous unions come out as VAR_DECLs. */
+ if (TREE_CODE (TREE_TYPE (t)) == UNION_TYPE
+ && ANON_AGGRNAME_P (TYPE_IDENTIFIER (TREE_TYPE (t))))
+ return t;
+
+ /* We return SPECS here, because in the parser it was ending
+ up with not doing anything to $$, which is what SPECS
+ represents. */
+ return specs;
+ break;
+
+ case RECORD_TYPE:
+ /* This code may be needed for UNION_TYPEs as
+ well. */
+ tcode = record_type_node;
+ if (CLASSTYPE_DECLARED_CLASS(t))
+ tcode = class_type_node;
+ else if (IS_SIGNATURE(t))
+ tcode = signature_type_node;
+ else if (CLASSTYPE_DECLARED_EXCEPTION(t))
+ tcode = exception_type_node;
+
+ t = xref_defn_tag(tcode, TYPE_IDENTIFIER(t), NULL_TREE);
+ if (TYPE_CONTEXT(t))
+ CLASSTYPE_NO_GLOBALIZE(t) = 1;
+ if (TYPE_LANG_SPECIFIC (t)
+ && CLASSTYPE_DECLARED_EXCEPTION (t))
+ shadow_tag (specs);
+ return NULL_TREE;
+ break;
+
+ case UNION_TYPE:
+ case ENUMERAL_TYPE:
+ if (TREE_CODE(t) == UNION_TYPE)
+ tcode = union_type_node;
+ else
+ tcode = enum_type_node;
+
+ t = xref_defn_tag(tcode, TYPE_IDENTIFIER(t), NULL_TREE);
+ if (TREE_CODE(t) == UNION_TYPE && TYPE_CONTEXT(t))
+ CLASSTYPE_NO_GLOBALIZE(t) = 1;
+ if (TREE_CODE (t) == UNION_TYPE
+ && ANON_AGGRNAME_P (TYPE_IDENTIFIER (t)))
+ {
+ struct pending_inline **p;
+ x = build_lang_field_decl (FIELD_DECL, NULL_TREE, t);
+
+ /* Wipe out memory of synthesized methods */
+ TYPE_HAS_CONSTRUCTOR (t) = 0;
+ TYPE_HAS_DEFAULT_CONSTRUCTOR (t) = 0;
+ TYPE_HAS_INIT_REF (t) = 0;
+ TYPE_HAS_CONST_INIT_REF (t) = 0;
+ TYPE_HAS_ASSIGN_REF (t) = 0;
+ TYPE_HAS_ASSIGNMENT (t) = 0;
+ TYPE_HAS_CONST_ASSIGN_REF (t) = 0;
+
+ p = &pending_inlines;
+ for (; *p; *p = (*p)->next)
+ if (DECL_CONTEXT ((*p)->fndecl) != t)
+ break;
+ }
+ else if (TREE_CODE (t) == ENUMERAL_TYPE)
+ x = grok_enum_decls (t, NULL_TREE);
+ else
+ x = NULL_TREE;
+ return x;
+ break;
+
+ default:
+ if (t != void_type_node)
+ error ("empty component declaration");
+ return NULL_TREE;
+ }
+ }
+ else
+ {
+ t = TREE_TYPE (components);
+ if (TREE_CODE (t) == ENUMERAL_TYPE && TREE_NONLOCAL_FLAG (t))
+ return grok_enum_decls (t, components);
+ else
+ return components;
+ }
+}
+
+/* Classes overload their constituent function names automatically.
+ When a function name is declared in a record structure,
+ its name is changed to it overloaded name. Since names for
+ constructors and destructors can conflict, we place a leading
+ '$' for destructors.
+
+ CNAME is the name of the class we are grokking for.
+
+ FUNCTION is a FUNCTION_DECL. It was created by `grokdeclarator'.
+
+ FLAGS contains bits saying what's special about today's
+ arguments. 1 == DESTRUCTOR. 2 == OPERATOR.
+
+ If FUNCTION is a destructor, then we must add the `auto-delete' field
+ as a second parameter. There is some hair associated with the fact
+ that we must "declare" this variable in the manner consistent with the
+ way the rest of the arguments were declared.
+
+ QUALS are the qualifiers for the this pointer. */
+
+void
+grokclassfn (ctype, cname, function, flags, quals)
+ tree ctype, cname, function;
+ enum overload_flags flags;
+ tree quals;
+{
+ tree fn_name = DECL_NAME (function);
+ tree arg_types;
+ tree parm;
+ tree qualtype;
+
+ if (fn_name == NULL_TREE)
+ {
+ error ("name missing for member function");
+ fn_name = get_identifier ("<anonymous>");
+ DECL_NAME (function) = fn_name;
+ }
+
+ if (quals)
+ qualtype = grok_method_quals (ctype, function, quals);
+ else
+ qualtype = ctype;
+
+ arg_types = TYPE_ARG_TYPES (TREE_TYPE (function));
+ if (TREE_CODE (TREE_TYPE (function)) == METHOD_TYPE)
+ {
+ /* Must add the class instance variable up front. */
+ /* Right now we just make this a pointer. But later
+ we may wish to make it special. */
+ tree type = TREE_VALUE (arg_types);
+
+ if ((flag_this_is_variable > 0)
+ && (flags == DTOR_FLAG || DECL_CONSTRUCTOR_P (function)))
+ type = TYPE_MAIN_VARIANT (type);
+
+ if (DECL_CONSTRUCTOR_P (function))
+ {
+ if (TYPE_USES_VIRTUAL_BASECLASSES (ctype))
+ {
+ DECL_CONSTRUCTOR_FOR_VBASE_P (function) = 1;
+ /* In this case we need "in-charge" flag saying whether
+ this constructor is responsible for initialization
+ of virtual baseclasses or not. */
+ parm = build_decl (PARM_DECL, in_charge_identifier, integer_type_node);
+ /* Mark the artificial `__in_chrg' parameter as "artificial". */
+ SET_DECL_ARTIFICIAL (parm);
+ DECL_ARG_TYPE (parm) = integer_type_node;
+ DECL_REGISTER (parm) = 1;
+ TREE_CHAIN (parm) = last_function_parms;
+ last_function_parms = parm;
+ }
+ }
+
+ parm = build_decl (PARM_DECL, this_identifier, type);
+ /* Mark the artificial `this' parameter as "artificial". */
+ SET_DECL_ARTIFICIAL (parm);
+ DECL_ARG_TYPE (parm) = type;
+ /* We can make this a register, so long as we don't
+ accidentally complain if someone tries to take its address. */
+ DECL_REGISTER (parm) = 1;
+ if (TYPE_READONLY (type))
+ TREE_READONLY (parm) = 1;
+ TREE_CHAIN (parm) = last_function_parms;
+ last_function_parms = parm;
+ }
+
+ if (flags == DTOR_FLAG)
+ {
+ char *buf, *dbuf;
+ tree const_integer_type = build_type_variant (integer_type_node, 1, 0);
+ int len = sizeof (DESTRUCTOR_DECL_PREFIX)-1;
+
+ arg_types = hash_tree_chain (const_integer_type, void_list_node);
+ TREE_SIDE_EFFECTS (arg_types) = 1;
+ /* Build the overload name. It will look like `7Example'. */
+ if (IDENTIFIER_TYPE_VALUE (cname))
+ dbuf = build_overload_name (IDENTIFIER_TYPE_VALUE (cname), 1, 1);
+ else if (IDENTIFIER_LOCAL_VALUE (cname))
+ dbuf = build_overload_name (TREE_TYPE (IDENTIFIER_LOCAL_VALUE (cname)), 1, 1);
+ else
+ /* Using ctype fixes the `X::Y::~Y()' crash. The cname has no type when
+ it's defined out of the class definition, since poplevel_class wipes
+ it out. This used to be internal error 346. */
+ dbuf = build_overload_name (ctype, 1, 1);
+ buf = (char *) alloca (strlen (dbuf) + sizeof (DESTRUCTOR_DECL_PREFIX));
+ bcopy (DESTRUCTOR_DECL_PREFIX, buf, len);
+ buf[len] = '\0';
+ strcat (buf, dbuf);
+ DECL_ASSEMBLER_NAME (function) = get_identifier (buf);
+ parm = build_decl (PARM_DECL, in_charge_identifier, const_integer_type);
+ /* Mark the artificial `__in_chrg' parameter as "artificial". */
+ SET_DECL_ARTIFICIAL (parm);
+ TREE_USED (parm) = 1;
+#if 0
+ /* We don't need to mark the __in_chrg parameter itself as `const'
+ since its type is already `const int'. In fact we MUST NOT mark
+ it as `const' cuz that will screw up the debug info (causing it
+ to say that the type of __in_chrg is `const const int'). */
+ TREE_READONLY (parm) = 1;
+#endif
+ DECL_ARG_TYPE (parm) = const_integer_type;
+ /* This is the same chain as DECL_ARGUMENTS (...). */
+ TREE_CHAIN (last_function_parms) = parm;
+
+ TREE_TYPE (function) = build_cplus_method_type (qualtype, void_type_node,
+ arg_types);
+ TYPE_HAS_DESTRUCTOR (ctype) = 1;
+ }
+ else
+ {
+ tree these_arg_types;
+
+ if (DECL_CONSTRUCTOR_FOR_VBASE_P (function))
+ {
+ arg_types = hash_tree_chain (integer_type_node,
+ TREE_CHAIN (arg_types));
+ TREE_TYPE (function)
+ = build_cplus_method_type (qualtype,
+ TREE_TYPE (TREE_TYPE (function)),
+ arg_types);
+ arg_types = TYPE_ARG_TYPES (TREE_TYPE (function));
+ }
+
+ these_arg_types = arg_types;
+
+ if (TREE_CODE (TREE_TYPE (function)) == FUNCTION_TYPE)
+ /* Only true for static member functions. */
+ these_arg_types = hash_tree_chain (TYPE_POINTER_TO (qualtype),
+ arg_types);
+
+ DECL_ASSEMBLER_NAME (function)
+ = build_decl_overload (fn_name, these_arg_types,
+ 1 + DECL_CONSTRUCTOR_P (function));
+
+#if 0
+ /* This code is going into the compiler, but currently, it makes
+ libg++/src/Interger.cc not compile. The problem is that the nice name
+ winds up going into the symbol table, and conversion operations look
+ for the manged name. */
+ substitute_nice_name (function);
+#endif
+ }
+
+ DECL_ARGUMENTS (function) = last_function_parms;
+ /* First approximations. */
+ DECL_CONTEXT (function) = ctype;
+ DECL_CLASS_CONTEXT (function) = ctype;
+}
+
+/* Work on the expr used by alignof (this is only called by the parser). */
+tree
+grok_alignof (expr)
+ tree expr;
+{
+ tree best, t;
+ int bestalign;
+
+ if (TREE_CODE (expr) == COMPONENT_REF
+ && DECL_BIT_FIELD (TREE_OPERAND (expr, 1)))
+ error ("`__alignof__' applied to a bit-field");
+
+ if (TREE_CODE (expr) == INDIRECT_REF)
+ {
+ best = t = TREE_OPERAND (expr, 0);
+ bestalign = TYPE_ALIGN (TREE_TYPE (TREE_TYPE (t)));
+
+ while (TREE_CODE (t) == NOP_EXPR
+ && TREE_CODE (TREE_TYPE (TREE_OPERAND (t, 0))) == POINTER_TYPE)
+ {
+ int thisalign;
+ t = TREE_OPERAND (t, 0);
+ thisalign = TYPE_ALIGN (TREE_TYPE (TREE_TYPE (t)));
+ if (thisalign > bestalign)
+ best = t, bestalign = thisalign;
+ }
+ return c_alignof (TREE_TYPE (TREE_TYPE (best)));
+ }
+ else
+ {
+ /* ANSI says arrays and fns are converted inside comma.
+ But we can't convert them in build_compound_expr
+ because that would break commas in lvalues.
+ So do the conversion here if operand was a comma. */
+ if (TREE_CODE (expr) == COMPOUND_EXPR
+ && (TREE_CODE (TREE_TYPE (expr)) == ARRAY_TYPE
+ || TREE_CODE (TREE_TYPE (expr)) == FUNCTION_TYPE))
+ expr = default_conversion (expr);
+ return c_alignof (TREE_TYPE (expr));
+ }
+}
+
+/* Create an ARRAY_REF, checking for the user doing things backwards
+ along the way. */
+tree
+grok_array_decl (array_expr, index_exp)
+ tree array_expr, index_exp;
+{
+ tree type = TREE_TYPE (array_expr);
+
+ if (type == error_mark_node || index_exp == error_mark_node)
+ return error_mark_node;
+ if (type == NULL_TREE)
+ {
+ /* Something has gone very wrong. Assume we are mistakenly reducing
+ an expression instead of a declaration. */
+ error ("parser may be lost: is there a '{' missing somewhere?");
+ return NULL_TREE;
+ }
+
+ if (TREE_CODE (type) == OFFSET_TYPE
+ || TREE_CODE (type) == REFERENCE_TYPE)
+ type = TREE_TYPE (type);
+
+ /* If they have an `operator[]', use that. */
+ if (TYPE_LANG_SPECIFIC (type)
+ && TYPE_OVERLOADS_ARRAY_REF (type))
+ return build_opfncall (ARRAY_REF, LOOKUP_NORMAL,
+ array_expr, index_exp, NULL_TREE);
+
+ /* Otherwise, create an ARRAY_REF for a pointer or array type. */
+ if (TREE_CODE (type) == POINTER_TYPE
+ || TREE_CODE (type) == ARRAY_TYPE)
+ return build_array_ref (array_expr, index_exp);
+
+ /* Woops, looks like they did something like `5[a]' instead of `a[5]'.
+ We don't emit a warning or error for this, since it's allowed
+ by ARM $8.2.4. */
+
+ type = TREE_TYPE (index_exp);
+
+ if (TREE_CODE (type) == OFFSET_TYPE
+ || TREE_CODE (type) == REFERENCE_TYPE)
+ type = TREE_TYPE (type);
+
+ if (TYPE_LANG_SPECIFIC (type)
+ && TYPE_OVERLOADS_ARRAY_REF (type))
+ error ("array expression backwards");
+ else if (TREE_CODE (type) == POINTER_TYPE
+ || TREE_CODE (type) == ARRAY_TYPE)
+ return build_array_ref (index_exp, array_expr);
+ else
+ error("`[]' applied to non-pointer type");
+
+ /* We gave an error, so give an error. Huh? */
+ return error_mark_node;
+}
+
+/* Given the cast expression EXP, checking out its validity. Either return
+ an error_mark_node if there was an unavoidable error, return a cast to
+ void for trying to delete a pointer w/ the value 0, or return the
+ call to delete. If DOING_VEC is 1, we handle things differently
+ for doing an array delete. If DOING_VEC is 2, they gave us the
+ array size as an argument to delete.
+ Implements ARM $5.3.4. This is called from the parser. */
+tree
+delete_sanity (exp, size, doing_vec, use_global_delete)
+ tree exp, size;
+ int doing_vec, use_global_delete;
+{
+ tree t = stabilize_reference (convert_from_reference (exp));
+ tree type = TREE_TYPE (t);
+ enum tree_code code = TREE_CODE (type);
+ /* For a regular vector delete (aka, no size argument) we will pass
+ this down as a NULL_TREE into build_vec_delete. */
+ tree maxindex = NULL_TREE;
+ /* This is used for deleting arrays. */
+ tree elt_size;
+
+ switch (doing_vec)
+ {
+ case 2:
+ maxindex = build_binary_op (MINUS_EXPR, size, integer_one_node, 1);
+ if (! flag_traditional)
+ pedwarn ("anachronistic use of array size in vector delete");
+ /* Fall through. */
+ case 1:
+ elt_size = c_sizeof (type);
+ break;
+ default:
+ if (code != POINTER_TYPE)
+ {
+ cp_error ("type `%#T' argument given to `delete', expected pointer",
+ type);
+ return error_mark_node;
+ }
+
+ /* Deleting a pointer with the value zero is legal and has no effect. */
+ if (integer_zerop (t))
+ return build1 (NOP_EXPR, void_type_node, t);
+ }
+
+ /* You can't delete a pointer to constant. */
+ if (code == POINTER_TYPE && TREE_READONLY (TREE_TYPE (type)))
+ {
+ error ("`const *' cannot be deleted");
+ return error_mark_node;
+ }
+
+#if 0
+ /* If the type has no destructor, then we should build a regular
+ delete, instead of a vector delete. Otherwise, we would end
+ up passing a bogus offset into __builtin_delete, which is
+ not expecting it. */
+ if (doing_vec
+ && TREE_CODE (type) == POINTER_TYPE
+ && !TYPE_HAS_DESTRUCTOR (TREE_TYPE (type)))
+ {
+ doing_vec = 0;
+ use_global_delete = 1;
+ }
+#endif
+
+ if (doing_vec)
+ return build_vec_delete (t, maxindex, elt_size, integer_one_node,
+ integer_two_node, use_global_delete);
+ else
+ return build_delete (type, t, integer_three_node,
+ LOOKUP_NORMAL|LOOKUP_HAS_IN_CHARGE,
+ use_global_delete);
+}
+
+/* Sanity check: report error if this function FUNCTION is not
+ really a member of the class (CTYPE) it is supposed to belong to.
+ CNAME is the same here as it is for grokclassfn above. */
+
+void
+check_classfn (ctype, cname, function)
+ tree ctype, cname, function;
+{
+ tree fn_name = DECL_NAME (function);
+ tree fndecl;
+ tree method_vec = CLASSTYPE_METHOD_VEC (ctype);
+ tree *methods = 0;
+ tree *end = 0;
+
+ if (method_vec != 0)
+ {
+ methods = &TREE_VEC_ELT (method_vec, 0);
+ end = TREE_VEC_END (method_vec);
+
+ /* First suss out ctors and dtors. */
+ if (*methods && fn_name == cname)
+ goto got_it;
+
+ while (++methods != end)
+ {
+ if (fn_name == DECL_NAME (*methods))
+ {
+ got_it:
+ fndecl = *methods;
+ while (fndecl)
+ {
+ if (DECL_ASSEMBLER_NAME (function) == DECL_ASSEMBLER_NAME (fndecl))
+ return;
+ fndecl = DECL_CHAIN (fndecl);
+ }
+ break; /* loser */
+ }
+ }
+ }
+
+ if (methods != end)
+ cp_error ("argument list for `%D' does not match any in class `%T'",
+ fn_name, ctype);
+ else
+ {
+ methods = 0;
+ cp_error ("no `%D' member function declared in class `%T'",
+ fn_name, ctype);
+ }
+
+ /* If we did not find the method in the class, add it to
+ avoid spurious errors. */
+ add_method (ctype, methods, function);
+}
+
+/* Process the specs, declarator (NULL if omitted) and width (NULL if omitted)
+ of a structure component, returning a FIELD_DECL node.
+ QUALS is a list of type qualifiers for this decl (such as for declaring
+ const member functions).
+
+ This is done during the parsing of the struct declaration.
+ The FIELD_DECL nodes are chained together and the lot of them
+ are ultimately passed to `build_struct' to make the RECORD_TYPE node.
+
+ C++:
+
+ If class A defines that certain functions in class B are friends, then
+ the way I have set things up, it is B who is interested in permission
+ granted by A. However, it is in A's context that these declarations
+ are parsed. By returning a void_type_node, class A does not attempt
+ to incorporate the declarations of the friends within its structure.
+
+ DO NOT MAKE ANY CHANGES TO THIS CODE WITHOUT MAKING CORRESPONDING
+ CHANGES TO CODE IN `start_method'. */
+
+tree
+grokfield (declarator, declspecs, raises, init, asmspec_tree)
+ tree declarator, declspecs, raises, init, asmspec_tree;
+{
+ register tree value;
+ char *asmspec = 0;
+
+ /* Convert () initializers to = initializers. */
+ if (init == NULL_TREE && declarator != NULL_TREE
+ && TREE_CODE (declarator) == CALL_EXPR
+ && TREE_OPERAND (declarator, 0)
+ && (TREE_CODE (TREE_OPERAND (declarator, 0)) == IDENTIFIER_NODE
+ || TREE_CODE (TREE_OPERAND (declarator, 0)) == SCOPE_REF)
+ && parmlist_is_exprlist (TREE_OPERAND (declarator, 1)))
+ {
+ init = TREE_OPERAND (declarator, 1);
+ declarator = TREE_OPERAND (declarator, 0);
+ }
+
+ if (init
+ && TREE_CODE (init) == TREE_LIST
+ && TREE_VALUE (init) == error_mark_node
+ && TREE_CHAIN (init) == NULL_TREE)
+ init = NULL_TREE;
+
+ value = grokdeclarator (declarator, declspecs, FIELD, init != 0, raises);
+ if (! value)
+ return value; /* friend or constructor went bad. */
+
+ /* Pass friendly classes back. */
+ if (TREE_CODE (value) == VOID_TYPE)
+ return void_type_node;
+
+ if (DECL_NAME (value) != NULL_TREE
+ && IDENTIFIER_POINTER (DECL_NAME (value))[0] == '_'
+ && ! strcmp (IDENTIFIER_POINTER (DECL_NAME (value)), "_vptr"))
+ cp_error ("member `%D' conflicts with virtual function table field name", value);
+
+ /* Stash away type declarations. */
+ if (TREE_CODE (value) == TYPE_DECL)
+ {
+ DECL_NONLOCAL (value) = 1;
+ DECL_CONTEXT (value) = current_class_type;
+ DECL_CLASS_CONTEXT (value) = current_class_type;
+ CLASSTYPE_LOCAL_TYPEDECLS (current_class_type) = 1;
+ pushdecl_class_level (value);
+ return value;
+ }
+
+ if (IS_SIGNATURE (current_class_type)
+ && TREE_CODE (value) != FUNCTION_DECL)
+ {
+ error ("field declaration not allowed in signature");
+ return void_type_node;
+ }
+
+ if (DECL_IN_AGGR_P (value))
+ {
+ cp_error ("`%D' is already defined in the class %T", value,
+ DECL_CONTEXT (value));
+ return void_type_node;
+ }
+
+ if (flag_cadillac)
+ cadillac_start_decl (value);
+
+ if (asmspec_tree)
+ asmspec = TREE_STRING_POINTER (asmspec_tree);
+
+ if (init)
+ {
+ if (IS_SIGNATURE (current_class_type)
+ && TREE_CODE (value) == FUNCTION_DECL)
+ {
+ error ("function declarations cannot have initializers in signature");
+ init = NULL_TREE;
+ }
+ else if (TREE_CODE (value) == FUNCTION_DECL)
+ {
+ grok_function_init (value, init);
+ init = NULL_TREE;
+ }
+ else if (pedantic && TREE_CODE (value) != VAR_DECL)
+ /* Already complained in grokdeclarator. */
+ init = NULL_TREE;
+ else
+ {
+ /* We allow initializers to become parameters to base
+ initializers. */
+ if (TREE_CODE (init) == TREE_LIST)
+ {
+ if (TREE_CHAIN (init) == NULL_TREE)
+ init = TREE_VALUE (init);
+ else
+ init = digest_init (TREE_TYPE (value), init, (tree *)0);
+ }
+
+ if (TREE_CODE (init) == CONST_DECL)
+ init = DECL_INITIAL (init);
+ else if (TREE_READONLY_DECL_P (init))
+ init = decl_constant_value (init);
+ else if (TREE_CODE (init) == CONSTRUCTOR)
+ init = digest_init (TREE_TYPE (value), init, (tree *)0);
+ my_friendly_assert (TREE_PERMANENT (init), 192);
+ if (init == error_mark_node)
+ /* We must make this look different than `error_mark_node'
+ because `decl_const_value' would mis-interpret it
+ as only meaning that this VAR_DECL is defined. */
+ init = build1 (NOP_EXPR, TREE_TYPE (value), init);
+ else if (! TREE_CONSTANT (init))
+ {
+ /* We can allow references to things that are effectively
+ static, since references are initialized with the address. */
+ if (TREE_CODE (TREE_TYPE (value)) != REFERENCE_TYPE
+ || (TREE_STATIC (init) == 0
+ && (TREE_CODE_CLASS (TREE_CODE (init)) != 'd'
+ || DECL_EXTERNAL (init) == 0)))
+ {
+ error ("field initializer is not constant");
+ init = error_mark_node;
+ }
+ }
+ }
+ }
+
+ /* The corresponding pop_obstacks is in finish_decl. */
+ push_obstacks_nochange ();
+
+ if (TREE_CODE (value) == VAR_DECL)
+ {
+ /* We cannot call pushdecl here, because that would
+ fill in the value of our TREE_CHAIN. Instead, we
+ modify finish_decl to do the right thing, namely, to
+ put this decl out straight away. */
+ if (TREE_PUBLIC (value))
+ {
+ /* current_class_type can be NULL_TREE in case of error. */
+ if (asmspec == 0 && current_class_type)
+ {
+ tree name;
+ char *buf, *buf2;
+
+ buf2 = build_overload_name (current_class_type, 1, 1);
+ buf = (char *)alloca (IDENTIFIER_LENGTH (DECL_NAME (value))
+ + sizeof (STATIC_NAME_FORMAT)
+ + strlen (buf2));
+ sprintf (buf, STATIC_NAME_FORMAT, buf2,
+ IDENTIFIER_POINTER (DECL_NAME (value)));
+ name = get_identifier (buf);
+ TREE_PUBLIC (value) = 1;
+ DECL_INITIAL (value) = error_mark_node;
+ DECL_ASSEMBLER_NAME (value) = name;
+ }
+ pending_statics = perm_tree_cons (NULL_TREE, value, pending_statics);
+
+ /* Static consts need not be initialized in the class definition. */
+ if (init != NULL_TREE && TYPE_NEEDS_CONSTRUCTING (TREE_TYPE (value)))
+ {
+ static int explanation = 0;
+
+ error ("initializer invalid for static member with constructor");
+ if (explanation++ == 0)
+ error ("(you really want to initialize it separately)");
+ init = 0;
+ }
+ /* Force the compiler to know when an uninitialized static
+ const member is being used. */
+ if (TYPE_READONLY (value) && init == 0)
+ TREE_USED (value) = 1;
+ }
+ DECL_INITIAL (value) = init;
+ DECL_IN_AGGR_P (value) = 1;
+
+ finish_decl (value, init, asmspec_tree, 1);
+ pushdecl_class_level (value);
+ return value;
+ }
+ if (TREE_CODE (value) == FIELD_DECL)
+ {
+ if (asmspec)
+ DECL_ASSEMBLER_NAME (value) = get_identifier (asmspec);
+ if (DECL_INITIAL (value) == error_mark_node)
+ init = error_mark_node;
+ finish_decl (value, init, asmspec_tree, 1);
+ DECL_INITIAL (value) = init;
+ DECL_IN_AGGR_P (value) = 1;
+ return value;
+ }
+ if (TREE_CODE (value) == FUNCTION_DECL)
+ {
+ if (DECL_CHAIN (value) != NULL_TREE)
+ {
+ /* Need a fresh node here so that we don't get circularity
+ when we link these together. */
+ value = copy_node (value);
+ /* When does this happen? */
+ my_friendly_assert (init == NULL_TREE, 193);
+ }
+ finish_decl (value, init, asmspec_tree, 1);
+
+ /* Pass friends back this way. */
+ if (DECL_FRIEND_P (value))
+ return void_type_node;
+
+ if (current_function_decl && ! IS_SIGNATURE (current_class_type))
+ cp_error ("method `%#D' of local class must be defined in class body",
+ value);
+
+ DECL_IN_AGGR_P (value) = 1;
+ return value;
+ }
+ my_friendly_abort (21);
+ /* NOTREACHED */
+ return NULL_TREE;
+}
+
+/* Like `grokfield', but for bitfields.
+ WIDTH is non-NULL for bit fields only, and is an INTEGER_CST node. */
+
+tree
+grokbitfield (declarator, declspecs, width)
+ tree declarator, declspecs, width;
+{
+ register tree value = grokdeclarator (declarator, declspecs, BITFIELD, 0, NULL_TREE);
+
+ if (! value) return NULL_TREE; /* friends went bad. */
+
+ /* Pass friendly classes back. */
+ if (TREE_CODE (value) == VOID_TYPE)
+ return void_type_node;
+
+ if (TREE_CODE (value) == TYPE_DECL)
+ {
+ cp_error ("cannot declare `%D' to be a bitfield type", value);
+ return NULL_TREE;
+ }
+
+ if (IS_SIGNATURE (current_class_type))
+ {
+ error ("field declaration not allowed in signature");
+ return void_type_node;
+ }
+
+ if (DECL_IN_AGGR_P (value))
+ {
+ cp_error ("`%D' is already defined in the class %T", value,
+ DECL_CONTEXT (value));
+ return void_type_node;
+ }
+
+ GNU_xref_member (current_class_name, value);
+
+ if (TREE_STATIC (value))
+ {
+ cp_error ("static member `%D' cannot be a bitfield", value);
+ return NULL_TREE;
+ }
+ finish_decl (value, NULL_TREE, NULL_TREE, 0);
+
+ if (width != error_mark_node)
+ {
+ /* detect invalid field size. */
+ if (TREE_CODE (width) == CONST_DECL)
+ width = DECL_INITIAL (width);
+ else if (TREE_READONLY_DECL_P (width))
+ width = decl_constant_value (width);
+ if (TREE_CODE (width) != INTEGER_CST)
+ {
+ cp_error ("structure field `%D' width not an integer constant",
+ value);
+ DECL_INITIAL (value) = NULL_TREE;
+ }
+ else
+ {
+ constant_expression_warning (width);
+ DECL_INITIAL (value) = width;
+ DECL_BIT_FIELD (value) = 1;
+ }
+ }
+
+ DECL_IN_AGGR_P (value) = 1;
+ return value;
+}
+
+#if 0
+/* Like GROKFIELD, except that the declarator has been
+ buried in DECLSPECS. Find the declarator, and
+ return something that looks like it came from
+ GROKFIELD. */
+tree
+groktypefield (declspecs, parmlist)
+ tree declspecs;
+ tree parmlist;
+{
+ tree spec = declspecs;
+ tree prev = NULL_TREE;
+
+ tree type_id = NULL_TREE;
+ tree quals = NULL_TREE;
+ tree lengths = NULL_TREE;
+ tree decl = NULL_TREE;
+
+ while (spec)
+ {
+ register tree id = TREE_VALUE (spec);
+
+ if (TREE_CODE (spec) != TREE_LIST)
+ /* Certain parse errors slip through. For example,
+ `int class ();' is not caught by the parser. Try
+ weakly to recover here. */
+ return NULL_TREE;
+
+ if (TREE_CODE (id) == TYPE_DECL
+ || (TREE_CODE (id) == IDENTIFIER_NODE && TREE_TYPE (id)))
+ {
+ /* We have a constructor/destructor or
+ conversion operator. Use it. */
+ if (prev)
+ TREE_CHAIN (prev) = TREE_CHAIN (spec);
+ else
+ declspecs = TREE_CHAIN (spec);
+
+ type_id = id;
+ goto found;
+ }
+ prev = spec;
+ spec = TREE_CHAIN (spec);
+ }
+
+ /* Nope, we have a conversion operator to a scalar type or something
+ else, that includes things like constructor declarations for
+ templates. */
+ spec = declspecs;
+ while (spec)
+ {
+ tree id = TREE_VALUE (spec);
+
+ if (TREE_CODE (id) == IDENTIFIER_NODE)
+ {
+ if (id == ridpointers[(int)RID_INT]
+ || id == ridpointers[(int)RID_DOUBLE]
+ || id == ridpointers[(int)RID_FLOAT]
+ || id == ridpointers[(int)RID_WCHAR])
+ {
+ if (type_id)
+ error ("extra `%s' ignored",
+ IDENTIFIER_POINTER (id));
+ else
+ type_id = id;
+ }
+ else if (id == ridpointers[(int)RID_LONG]
+ || id == ridpointers[(int)RID_SHORT]
+ || id == ridpointers[(int)RID_CHAR])
+ {
+ lengths = tree_cons (NULL_TREE, id, lengths);
+ }
+ else if (id == ridpointers[(int)RID_VOID])
+ {
+ if (type_id)
+ error ("spurious `void' type ignored");
+ else
+ error ("conversion to `void' type invalid");
+ }
+ else if (id == ridpointers[(int)RID_AUTO]
+ || id == ridpointers[(int)RID_REGISTER]
+ || id == ridpointers[(int)RID_TYPEDEF]
+ || id == ridpointers[(int)RID_CONST]
+ || id == ridpointers[(int)RID_VOLATILE])
+ {
+ error ("type specifier `%s' used invalidly",
+ IDENTIFIER_POINTER (id));
+ }
+ else if (id == ridpointers[(int)RID_FRIEND]
+ || id == ridpointers[(int)RID_VIRTUAL]
+ || id == ridpointers[(int)RID_INLINE]
+ || id == ridpointers[(int)RID_UNSIGNED]
+ || id == ridpointers[(int)RID_SIGNED]
+ || id == ridpointers[(int)RID_STATIC]
+ || id == ridpointers[(int)RID_EXTERN])
+ {
+ quals = tree_cons (NULL_TREE, id, quals);
+ }
+ else
+ {
+ /* Happens when we have a global typedef
+ and a class-local member function with
+ the same name. */
+ type_id = id;
+ goto found;
+ }
+ }
+ else if (TREE_CODE (id) == RECORD_TYPE)
+ {
+ type_id = TYPE_NAME (id);
+ if (TREE_CODE (type_id) == TYPE_DECL)
+ type_id = DECL_NAME (type_id);
+ if (type_id == NULL_TREE)
+ error ("identifier for aggregate type conversion omitted");
+ }
+ else if (TREE_CODE_CLASS (TREE_CODE (id)) == 't')
+ error ("`operator' missing on conversion operator or tag missing from type");
+ else
+ my_friendly_abort (194);
+ spec = TREE_CHAIN (spec);
+ }
+
+ if (type_id)
+ declspecs = chainon (lengths, quals);
+ else if (lengths)
+ {
+ if (TREE_CHAIN (lengths))
+ error ("multiple length specifiers");
+ type_id = ridpointers[(int)RID_INT];
+ declspecs = chainon (lengths, quals);
+ }
+ else if (quals)
+ {
+ error ("no type given, defaulting to `operator int ...'");
+ type_id = ridpointers[(int)RID_INT];
+ declspecs = quals;
+ }
+ else
+ return NULL_TREE;
+
+ found:
+ decl = grokdeclarator (build_parse_node (CALL_EXPR, type_id, parmlist, NULL_TREE),
+ declspecs, FIELD, 0, NULL_TREE);
+ if (decl == NULL_TREE)
+ return NULL_TREE;
+
+ if (TREE_CODE (decl) == FUNCTION_DECL && DECL_CHAIN (decl) != NULL_TREE)
+ {
+ /* Need a fresh node here so that we don't get circularity
+ when we link these together. */
+ decl = copy_node (decl);
+ }
+
+ if (decl == void_type_node
+ || (TREE_CODE (decl) == FUNCTION_DECL
+ && TREE_CODE (TREE_TYPE (decl)) != METHOD_TYPE))
+ /* bunch of friends. */
+ return decl;
+
+ if (DECL_IN_AGGR_P (decl))
+ {
+ cp_error ("`%D' already defined in the class ", decl);
+ return void_type_node;
+ }
+
+ finish_decl (decl, NULL_TREE, NULL_TREE, 0);
+
+ /* If this declaration is common to another declaration
+ complain about such redundancy, and return NULL_TREE
+ so that we don't build a circular list. */
+ if (DECL_CHAIN (decl))
+ {
+ cp_error ("function `%D' declared twice in class %T", decl,
+ DECL_CONTEXT (decl));
+ return NULL_TREE;
+ }
+ DECL_IN_AGGR_P (decl) = 1;
+ return decl;
+}
+#endif
+
+tree
+grokoptypename (declspecs, declarator)
+ tree declspecs, declarator;
+{
+ tree t = grokdeclarator (declarator, declspecs, TYPENAME, 0, NULL_TREE);
+ return build_typename_overload (t);
+}
+
+/* When a function is declared with an initializer,
+ do the right thing. Currently, there are two possibilities:
+
+ class B
+ {
+ public:
+ // initialization possibility #1.
+ virtual void f () = 0;
+ int g ();
+ };
+
+ class D1 : B
+ {
+ public:
+ int d1;
+ // error, no f ();
+ };
+
+ class D2 : B
+ {
+ public:
+ int d2;
+ void f ();
+ };
+
+ class D3 : B
+ {
+ public:
+ int d3;
+ // initialization possibility #2
+ void f () = B::f;
+ };
+
+*/
+
+int
+copy_assignment_arg_p (parmtype, virtualp)
+ tree parmtype;
+ int virtualp;
+{
+ if (TREE_CODE (parmtype) == REFERENCE_TYPE)
+ parmtype = TREE_TYPE (parmtype);
+
+ if ((TYPE_MAIN_VARIANT (parmtype) == current_class_type)
+ || (virtualp && DERIVED_FROM_P (parmtype, current_class_type)))
+ return 1;
+
+ return 0;
+}
+
+static void
+grok_function_init (decl, init)
+ tree decl;
+ tree init;
+{
+ /* An initializer for a function tells how this function should
+ be inherited. */
+ tree type = TREE_TYPE (decl);
+
+ if (TREE_CODE (type) == FUNCTION_TYPE)
+ cp_error ("initializer specified for non-member function `%D'", decl);
+ else if (DECL_VINDEX (decl) == NULL_TREE)
+ cp_error ("initializer specified for non-virtual method `%D'", decl);
+ else if (integer_zerop (init))
+ {
+#if 0
+ /* Mark this function as being "defined". */
+ DECL_INITIAL (decl) = error_mark_node;
+ /* pure virtual destructors must be defined. */
+ /* pure virtual needs to be defined (as abort) only when put in
+ vtbl. For wellformed call, it should be itself. pr4737 */
+ if (!DESTRUCTOR_NAME_P (DECL_ASSEMBLER_NAME (decl)))
+ {
+ extern tree abort_fndecl;
+ /* Give this node rtl from `abort'. */
+ DECL_RTL (decl) = DECL_RTL (abort_fndecl);
+ }
+#endif
+ DECL_ABSTRACT_VIRTUAL_P (decl) = 1;
+ if (DECL_NAME (decl) == ansi_opname [(int) MODIFY_EXPR])
+ {
+ tree parmtype
+ = TREE_VALUE (TREE_CHAIN (TYPE_ARG_TYPES (TREE_TYPE (decl))));
+
+ if (copy_assignment_arg_p (parmtype, 1))
+ TYPE_HAS_ABSTRACT_ASSIGN_REF (current_class_type) = 1;
+ }
+ }
+ else if (TREE_CODE (init) == OFFSET_REF
+ && TREE_OPERAND (init, 0) == NULL_TREE
+ && TREE_CODE (TREE_TYPE (init)) == METHOD_TYPE)
+ {
+ tree basetype = DECL_CLASS_CONTEXT (init);
+ tree basefn = TREE_OPERAND (init, 1);
+ if (TREE_CODE (basefn) != FUNCTION_DECL)
+ cp_error ("non-method initializer invalid for method `%D'", decl);
+ else if (! BINFO_OFFSET_ZEROP (TYPE_BINFO (DECL_CLASS_CONTEXT (basefn))))
+ sorry ("base member function from other than first base class");
+ else
+ {
+ tree binfo = get_binfo (basetype, TYPE_METHOD_BASETYPE (type), 1);
+ if (binfo == error_mark_node)
+ ;
+ else if (binfo == 0)
+ error_not_base_type (TYPE_METHOD_BASETYPE (TREE_TYPE (init)),
+ TYPE_METHOD_BASETYPE (type));
+ else
+ {
+ /* Mark this function as being defined,
+ and give it new rtl. */
+ DECL_INITIAL (decl) = error_mark_node;
+ DECL_RTL (decl) = DECL_RTL (basefn);
+ }
+ }
+ }
+ else
+ cp_error ("invalid initializer for virtual method `%D'", decl);
+}
+
+/* When we get a declaration of the form
+
+ type cname::fname ...
+
+ the node for `cname::fname' gets built here in a special way.
+ Namely, we push into `cname's scope. When this declaration is
+ processed, we pop back out. */
+tree
+build_push_scope (cname, name)
+ tree cname;
+ tree name;
+{
+ extern int current_class_depth;
+ tree ctype, rval;
+ int is_ttp = 0;
+
+ if (cname == error_mark_node)
+ return error_mark_node;
+
+ ctype = IDENTIFIER_TYPE_VALUE (cname);
+
+ if (TREE_CODE (ctype) == TEMPLATE_TYPE_PARM)
+ is_ttp = 1;
+ else if (ctype == NULL_TREE || ! IS_AGGR_TYPE (ctype))
+ {
+ cp_error ("`%T' not defined as aggregate type", cname);
+ return name;
+ }
+ else if (IS_SIGNATURE (ctype))
+ {
+ error ("cannot push into signature scope, scope resolution operator ignored");
+ return name;
+ }
+
+ rval = build_parse_node (SCOPE_REF, cname, name);
+
+ /* Don't need to push the scope if we're already in it.
+ We also don't need to push the scope for a ptr-to-member/method. */
+
+ if (ctype == current_class_type || TREE_CODE (name) != IDENTIFIER_NODE
+ || is_ttp)
+ return rval;
+
+ /* We do need to push the scope in this case, since CTYPE helps
+ determine subsequent intializers (i.e., Foo::Bar x = foo_enum_1;). */
+
+ push_nested_class (ctype, 3);
+ TREE_COMPLEXITY (rval) = current_class_depth;
+ return rval;
+}
+
+void cplus_decl_attributes (decl, attributes)
+ tree decl, attributes;
+{
+ if (decl && decl != void_type_node)
+ decl_attributes (decl, attributes);
+}
+
+/* CONSTRUCTOR_NAME:
+ Return the name for the constructor (or destructor) for the
+ specified class. Argument can be RECORD_TYPE, TYPE_DECL, or
+ IDENTIFIER_NODE. When given a template, this routine doesn't
+ lose the specialization. */
+tree
+constructor_name_full (thing)
+ tree thing;
+{
+ if (TREE_CODE (thing) == UNINSTANTIATED_P_TYPE)
+ return DECL_NAME (UPT_TEMPLATE (thing));
+ if (IS_AGGR_TYPE_CODE (TREE_CODE (thing)))
+ {
+ if (TYPE_WAS_ANONYMOUS (thing) && TYPE_HAS_CONSTRUCTOR (thing))
+ thing = DECL_NAME (TREE_VEC_ELT (TYPE_METHODS (thing), 0));
+ else
+ thing = TYPE_NAME (thing);
+ }
+ if (TREE_CODE (thing) == TYPE_DECL
+ || (TREE_CODE (thing) == TEMPLATE_DECL
+ && DECL_TEMPLATE_IS_CLASS (thing)))
+ thing = DECL_NAME (thing);
+ my_friendly_assert (TREE_CODE (thing) == IDENTIFIER_NODE, 197);
+ return thing;
+}
+
+/* CONSTRUCTOR_NAME:
+ Return the name for the constructor (or destructor) for the
+ specified class. Argument can be RECORD_TYPE, TYPE_DECL, or
+ IDENTIFIER_NODE. When given a template, return the plain
+ unspecialized name. */
+tree
+constructor_name (thing)
+ tree thing;
+{
+ tree t;
+ thing = constructor_name_full (thing);
+ t = IDENTIFIER_TEMPLATE (thing);
+ if (!t)
+ return thing;
+ t = TREE_PURPOSE (t);
+ return DECL_NAME (t);
+}
+
+/* Cache the value of this class's main virtual function table pointer
+ in a register variable. This will save one indirection if a
+ more than one virtual function call is made this function. */
+void
+setup_vtbl_ptr ()
+{
+ extern rtx base_init_insns;
+
+ if (base_init_insns == 0
+ && DECL_CONSTRUCTOR_P (current_function_decl))
+ emit_base_init (current_class_type, 0);
+
+#if 0
+ /* This has something a little wrong with it.
+
+ On a sun4, code like:
+
+ be L6
+ ld [%i0],%o1
+
+ is generated, when the below is used when -O4 is given. The delay
+ slot it filled with an instruction that is safe, when this isn't
+ used, like in:
+
+ be L6
+ sethi %hi(LC1),%o0
+ ld [%i0],%o1
+
+ on code like:
+
+ struct A {
+ virtual void print() { printf("xxx"); }
+ void f();
+ };
+
+ void A::f() {
+ if (this) {
+ print();
+ } else {
+ printf("0");
+ }
+ }
+
+ And that is why this is disabled for now. (mrs)
+ */
+
+ if ((flag_this_is_variable & 1) == 0
+ && optimize
+ && current_class_type
+ && CLASSTYPE_VSIZE (current_class_type)
+ && ! DECL_STATIC_FUNCTION_P (current_function_decl))
+ {
+ tree vfield = build_vfield_ref (C_C_D, current_class_type);
+ current_vtable_decl = CLASSTYPE_VTBL_PTR (current_class_type);
+ DECL_RTL (current_vtable_decl) = 0;
+ DECL_INITIAL (current_vtable_decl) = error_mark_node;
+ /* Have to cast the initializer, since it may have come from a
+ more base class then we ascribe CURRENT_VTABLE_DECL to be. */
+ finish_decl (current_vtable_decl, convert_force (TREE_TYPE (current_vtable_decl), vfield), 0, 0);
+ current_vtable_decl = build_indirect_ref (current_vtable_decl, NULL_PTR);
+ }
+ else
+#endif
+ current_vtable_decl = NULL_TREE;
+}
+
+/* Record the existence of an addressable inline function. */
+void
+mark_inline_for_output (decl)
+ tree decl;
+{
+ if (DECL_SAVED_INLINE (decl))
+ return;
+ DECL_SAVED_INLINE (decl) = 1;
+ if (DECL_PENDING_INLINE_INFO (decl) != 0
+ && ! DECL_PENDING_INLINE_INFO (decl)->deja_vu)
+ {
+ struct pending_inline *t = pending_inlines;
+ my_friendly_assert (DECL_SAVED_INSNS (decl) == 0, 198);
+ while (t)
+ {
+ if (t == DECL_PENDING_INLINE_INFO (decl))
+ break;
+ t = t->next;
+ }
+ if (t == 0)
+ {
+ t = DECL_PENDING_INLINE_INFO (decl);
+ t->next = pending_inlines;
+ pending_inlines = t;
+ }
+ DECL_PENDING_INLINE_INFO (decl) = 0;
+ }
+ saved_inlines = perm_tree_cons (NULL_TREE, decl, saved_inlines);
+}
+
+void
+clear_temp_name ()
+{
+ temp_name_counter = 0;
+}
+
+/* Hand off a unique name which can be used for variable we don't really
+ want to know about anyway, for example, the anonymous variables which
+ are needed to make references work. Declare this thing so we can use it.
+ The variable created will be of type TYPE.
+
+ STATICP is nonzero if this variable should be static. */
+
+tree
+get_temp_name (type, staticp)
+ tree type;
+ int staticp;
+{
+ char buf[sizeof (AUTO_TEMP_FORMAT) + 20];
+ tree decl;
+ int toplev = global_bindings_p ();
+
+ push_obstacks_nochange ();
+ if (toplev || staticp)
+ {
+ end_temporary_allocation ();
+ sprintf (buf, AUTO_TEMP_FORMAT, global_temp_name_counter++);
+ decl = pushdecl_top_level (build_decl (VAR_DECL, get_identifier (buf), type));
+ }
+ else
+ {
+ sprintf (buf, AUTO_TEMP_FORMAT, temp_name_counter++);
+ decl = pushdecl (build_decl (VAR_DECL, get_identifier (buf), type));
+ }
+ TREE_USED (decl) = 1;
+ TREE_STATIC (decl) = staticp;
+
+ /* If this is a local variable, then lay out its rtl now.
+ Otherwise, callers of this function are responsible for dealing
+ with this variable's rtl. */
+ if (! toplev)
+ {
+ expand_decl (decl);
+ expand_decl_init (decl);
+ }
+ pop_obstacks ();
+
+ return decl;
+}
+
+/* Get a variable which we can use for multiple assignments.
+ It is not entered into current_binding_level, because
+ that breaks things when it comes time to do final cleanups
+ (which take place "outside" the binding contour of the function). */
+tree
+get_temp_regvar (type, init)
+ tree type, init;
+{
+ static char buf[sizeof (AUTO_TEMP_FORMAT) + 20] = { '_' };
+ tree decl;
+
+ sprintf (buf+1, AUTO_TEMP_FORMAT, temp_name_counter++);
+ decl = build_decl (VAR_DECL, get_identifier (buf), type);
+ TREE_USED (decl) = 1;
+ DECL_REGISTER (decl) = 1;
+
+ if (init)
+ store_init_value (decl, init);
+
+ /* We can expand these without fear, since they cannot need
+ constructors or destructors. */
+ expand_decl (decl);
+ expand_decl_init (decl);
+
+ if (type_needs_gc_entry (type))
+ DECL_GC_OFFSET (decl) = size_int (++current_function_obstack_index);
+
+ return decl;
+}
+
+/* Make the macro TEMP_NAME_P available to units which do not
+ include c-tree.h. */
+int
+temp_name_p (decl)
+ tree decl;
+{
+ return TEMP_NAME_P (decl);
+}
+
+/* Finish off the processing of a UNION_TYPE structure.
+ If there are static members, then all members are
+ static, and must be laid out together. If the
+ union is an anonymous union, we arrange for that
+ as well. PUBLIC_P is nonzero if this union is
+ not declared static. */
+void
+finish_anon_union (anon_union_decl)
+ tree anon_union_decl;
+{
+ tree type = TREE_TYPE (anon_union_decl);
+ tree field, main_decl = NULL_TREE;
+ tree elems = NULL_TREE;
+ int public_p = TREE_PUBLIC (anon_union_decl);
+ int static_p = TREE_STATIC (anon_union_decl);
+ int external_p = DECL_EXTERNAL (anon_union_decl);
+
+ if ((field = TYPE_FIELDS (type)) == NULL_TREE)
+ return;
+
+ if (public_p)
+ {
+ error ("global anonymous unions must be declared static");
+ return;
+ }
+
+ while (field)
+ {
+ tree decl = build_decl (VAR_DECL, DECL_NAME (field), TREE_TYPE (field));
+ /* tell `pushdecl' that this is not tentative. */
+ DECL_INITIAL (decl) = error_mark_node;
+ TREE_PUBLIC (decl) = public_p;
+ TREE_STATIC (decl) = static_p;
+ DECL_EXTERNAL (decl) = external_p;
+ decl = pushdecl (decl);
+
+ /* Only write out one anon union element--choose the one that
+ can hold them all. */
+ if (main_decl == NULL_TREE
+ && simple_cst_equal (DECL_SIZE (decl), DECL_SIZE (anon_union_decl)))
+ {
+ main_decl = decl;
+ }
+ else
+ {
+ /* ??? This causes there to be no debug info written out
+ about this decl. */
+ TREE_ASM_WRITTEN (decl) = 1;
+ }
+
+ DECL_INITIAL (decl) = NULL_TREE;
+ /* If there's a cleanup to do, it belongs in the
+ TREE_PURPOSE of the following TREE_LIST. */
+ elems = tree_cons (NULL_TREE, decl, elems);
+ TREE_TYPE (elems) = type;
+ field = TREE_CHAIN (field);
+ }
+ if (static_p)
+ {
+ make_decl_rtl (main_decl, 0, global_bindings_p ());
+ DECL_RTL (anon_union_decl) = DECL_RTL (main_decl);
+ }
+
+ /* The following call assumes that there are never any cleanups
+ for anonymous unions--a reasonable assumption. */
+ expand_anon_union_decl (anon_union_decl, NULL_TREE, elems);
+
+ if (flag_cadillac)
+ cadillac_finish_anon_union (anon_union_decl);
+}
+
+/* Finish and output a table which is generated by the compiler.
+ NAME is the name to give the table.
+ TYPE is the type of the table entry.
+ INIT is all the elements in the table.
+ PUBLICP is non-zero if this table should be given external access. */
+tree
+finish_table (name, type, init, publicp)
+ tree name, type, init;
+ int publicp;
+{
+ tree itype, atype, decl;
+ static tree empty_table;
+ int is_empty = 0;
+ tree asmspec;
+
+ itype = build_index_type (size_int (list_length (init) - 1));
+ atype = build_cplus_array_type (type, itype);
+ layout_type (atype);
+
+ if (TREE_VALUE (init) == integer_zero_node
+ && TREE_CHAIN (init) == NULL_TREE)
+ {
+ if (empty_table == NULL_TREE)
+ {
+ empty_table = get_temp_name (atype, 1);
+ init = build (CONSTRUCTOR, atype, NULL_TREE, init);
+ TREE_CONSTANT (init) = 1;
+ TREE_STATIC (init) = 1;
+ DECL_INITIAL (empty_table) = init;
+ asmspec = build_string (IDENTIFIER_LENGTH (DECL_NAME (empty_table)),
+ IDENTIFIER_POINTER (DECL_NAME (empty_table)));
+ finish_decl (empty_table, init, asmspec, 0);
+ }
+ is_empty = 1;
+ }
+
+ if (name == NULL_TREE)
+ {
+ if (is_empty)
+ return empty_table;
+ decl = get_temp_name (atype, 1);
+ }
+ else
+ {
+ decl = build_decl (VAR_DECL, name, atype);
+ decl = pushdecl (decl);
+ TREE_STATIC (decl) = 1;
+ }
+
+ if (is_empty == 0)
+ {
+ TREE_PUBLIC (decl) = publicp;
+ init = build (CONSTRUCTOR, atype, NULL_TREE, init);
+ TREE_CONSTANT (init) = 1;
+ TREE_STATIC (init) = 1;
+ DECL_INITIAL (decl) = init;
+ asmspec = build_string (IDENTIFIER_LENGTH (DECL_NAME (decl)),
+ IDENTIFIER_POINTER (DECL_NAME (decl)));
+ }
+ else
+ {
+ /* This will cause DECL to point to EMPTY_TABLE in rtl-land. */
+ DECL_EXTERNAL (decl) = 1;
+ TREE_STATIC (decl) = 0;
+ init = 0;
+ asmspec = build_string (IDENTIFIER_LENGTH (DECL_NAME (empty_table)),
+ IDENTIFIER_POINTER (DECL_NAME (empty_table)));
+ }
+
+ finish_decl (decl, init, asmspec, 0);
+ return decl;
+}
+
+/* Finish processing a builtin type TYPE. It's name is NAME,
+ its fields are in the array FIELDS. LEN is the number of elements
+ in FIELDS minus one, or put another way, it is the maximum subscript
+ used in FIELDS.
+
+ It is given the same alignment as ALIGN_TYPE. */
+void
+finish_builtin_type (type, name, fields, len, align_type)
+ tree type;
+ char *name;
+ tree fields[];
+ int len;
+ tree align_type;
+{
+ register int i;
+
+ TYPE_FIELDS (type) = fields[0];
+ for (i = 0; i < len; i++)
+ {
+ layout_type (TREE_TYPE (fields[i]));
+ DECL_FIELD_CONTEXT (fields[i]) = type;
+ TREE_CHAIN (fields[i]) = fields[i+1];
+ }
+ DECL_FIELD_CONTEXT (fields[i]) = type;
+ DECL_CLASS_CONTEXT (fields[i]) = type;
+ TYPE_ALIGN (type) = TYPE_ALIGN (align_type);
+ layout_type (type);
+#if 0 /* not yet, should get fixed properly later */
+ TYPE_NAME (type) = make_type_decl (get_identifier (name), type);
+#else
+ TYPE_NAME (type) = build_decl (TYPE_DECL, get_identifier (name), type);
+#endif
+ layout_decl (TYPE_NAME (type), 0);
+}
+
+/* Auxiliary functions to make type signatures for
+ `operator new' and `operator delete' correspond to
+ what compiler will be expecting. */
+
+extern tree sizetype;
+
+tree
+coerce_new_type (type)
+ tree type;
+{
+ int e1 = 0, e2 = 0;
+
+ if (TREE_CODE (type) == METHOD_TYPE)
+ type = build_function_type (TREE_TYPE (type), TREE_CHAIN (TYPE_ARG_TYPES (type)));
+ if (TREE_TYPE (type) != ptr_type_node)
+ e1 = 1, error ("`operator new' must return type `void *'");
+
+ /* Technically the type must be `size_t', but we may not know
+ what that is. */
+ if (TYPE_ARG_TYPES (type) == NULL_TREE)
+ e1 = 1, error ("`operator new' takes type `size_t' parameter");
+ else if (TREE_CODE (TREE_VALUE (TYPE_ARG_TYPES (type))) != INTEGER_TYPE
+ || TYPE_PRECISION (TREE_VALUE (TYPE_ARG_TYPES (type))) != TYPE_PRECISION (sizetype))
+ e2 = 1, error ("`operator new' takes type `size_t' as first parameter");
+ if (e2)
+ type = build_function_type (ptr_type_node, tree_cons (NULL_TREE, sizetype, TREE_CHAIN (TYPE_ARG_TYPES (type))));
+ else if (e1)
+ type = build_function_type (ptr_type_node, TYPE_ARG_TYPES (type));
+ return type;
+}
+
+tree
+coerce_delete_type (type)
+ tree type;
+{
+ int e1 = 0, e2 = 0, e3 = 0;
+ tree arg_types = TYPE_ARG_TYPES (type);
+
+ if (TREE_CODE (type) == METHOD_TYPE)
+ {
+ type = build_function_type (TREE_TYPE (type), TREE_CHAIN (arg_types));
+ arg_types = TREE_CHAIN (arg_types);
+ }
+ if (TREE_TYPE (type) != void_type_node)
+ e1 = 1, error ("`operator delete' must return type `void'");
+ if (arg_types == NULL_TREE
+ || TREE_VALUE (arg_types) != ptr_type_node)
+ e2 = 1, error ("`operator delete' takes type `void *' as first parameter");
+
+ if (arg_types
+ && TREE_CHAIN (arg_types)
+ && TREE_CHAIN (arg_types) != void_list_node)
+ {
+ /* Again, technically this argument must be `size_t', but again
+ we may not know what that is. */
+ tree t2 = TREE_VALUE (TREE_CHAIN (arg_types));
+ if (TREE_CODE (t2) != INTEGER_TYPE
+ || TYPE_PRECISION (t2) != TYPE_PRECISION (sizetype))
+ e3 = 1, error ("second argument to `operator delete' must be of type `size_t'");
+ else if (TREE_CHAIN (TREE_CHAIN (arg_types)) != void_list_node)
+ {
+ e3 = 1;
+ if (TREE_CHAIN (TREE_CHAIN (arg_types)))
+ error ("too many arguments in declaration of `operator delete'");
+ else
+ error ("`...' invalid in specification of `operator delete'");
+ }
+ }
+ if (e3)
+ arg_types = tree_cons (NULL_TREE, ptr_type_node, build_tree_list (NULL_TREE, sizetype));
+ else if (e3 |= e2)
+ {
+ if (arg_types == NULL_TREE)
+ arg_types = tree_cons (NULL_TREE, ptr_type_node, void_list_node);
+ else
+ arg_types = tree_cons (NULL_TREE, ptr_type_node, TREE_CHAIN (arg_types));
+ }
+ else e3 |= e1;
+
+ if (e3)
+ type = build_function_type (void_type_node, arg_types);
+
+ return type;
+}
+
+static void
+mark_vtable_entries (decl)
+ tree decl;
+{
+ tree entries = TREE_CHAIN (CONSTRUCTOR_ELTS (DECL_INITIAL (decl)));
+
+ if (flag_dossier)
+ entries = TREE_CHAIN (entries);
+
+ for (; entries; entries = TREE_CHAIN (entries))
+ {
+ tree fnaddr = FNADDR_FROM_VTABLE_ENTRY (TREE_VALUE (entries));
+ tree fn = TREE_OPERAND (fnaddr, 0);
+ TREE_ADDRESSABLE (fn) = 1;
+ if (DECL_ABSTRACT_VIRTUAL_P (fn))
+ {
+ extern tree abort_fndecl;
+ if (flag_vtable_thunks)
+ fnaddr = TREE_VALUE (entries);
+ TREE_OPERAND (fnaddr, 0) = abort_fndecl;
+ }
+ }
+}
+
+/* Set TREE_PUBLIC and/or TREE_EXTERN on the vtable DECL,
+ based on TYPE and other static flags.
+
+ Note that anything public is tagged TREE_PUBLIC, whether
+ it's public in this file or in another one. */
+
+void
+import_export_vtable (decl, type)
+ tree decl, type;
+{
+ if (write_virtuals >= 2
+ || CLASSTYPE_TEMPLATE_INSTANTIATION (type))
+ {
+ if (CLASSTYPE_INTERFACE_KNOWN (type))
+ {
+ TREE_PUBLIC (decl) = 1;
+ DECL_EXTERNAL (decl) = ! CLASSTYPE_VTABLE_NEEDS_WRITING (type);
+ }
+ }
+ else if (write_virtuals != 0)
+ {
+ TREE_PUBLIC (decl) = 1;
+ if (write_virtuals < 0)
+ DECL_EXTERNAL (decl) = 1;
+ }
+}
+
+static void
+import_export_template (type)
+ tree type;
+{
+ if (CLASSTYPE_IMPLICIT_INSTANTIATION (type)
+ && ! flag_implicit_templates
+ && CLASSTYPE_INTERFACE_UNKNOWN (type))
+ {
+ SET_CLASSTYPE_INTERFACE_KNOWN (type);
+ CLASSTYPE_INTERFACE_ONLY (type) = 1;
+ CLASSTYPE_VTABLE_NEEDS_WRITING (type) = 0;
+ }
+}
+
+static void
+finish_vtable_vardecl (prev, vars)
+ tree prev, vars;
+{
+ tree ctype = DECL_CONTEXT (vars);
+ import_export_template (ctype);
+ import_export_vtable (vars, ctype);
+
+ if (flag_vtable_thunks && !CLASSTYPE_INTERFACE_KNOWN (ctype))
+ {
+ tree method;
+ for (method = CLASSTYPE_METHODS (ctype); method != NULL_TREE;
+ method = DECL_NEXT_METHOD (method))
+ {
+ if (DECL_VINDEX (method) != NULL_TREE && !DECL_SAVED_INSNS (method)
+ && !DECL_ABSTRACT_VIRTUAL_P (method))
+ {
+ SET_CLASSTYPE_INTERFACE_KNOWN (ctype);
+ CLASSTYPE_INTERFACE_ONLY (ctype) = DECL_EXTERNAL (method);
+ TREE_PUBLIC (vars) = 1;
+ DECL_EXTERNAL (vars) = DECL_EXTERNAL (method);
+ break;
+ }
+ }
+ }
+
+ if (write_virtuals >= 0
+ && ! DECL_EXTERNAL (vars) && (TREE_PUBLIC (vars) || TREE_USED (vars)))
+ {
+ extern tree the_null_vtable_entry;
+
+ /* Stuff this virtual function table's size into
+ `pfn' slot of `the_null_vtable_entry'. */
+ tree nelts = array_type_nelts (TREE_TYPE (vars));
+ if (flag_vtable_thunks)
+ TREE_VALUE (CONSTRUCTOR_ELTS (DECL_INITIAL (vars))) = nelts;
+ else
+ SET_FNADDR_FROM_VTABLE_ENTRY (the_null_vtable_entry, nelts);
+ /* Kick out the dossier before writing out the vtable. */
+ if (flag_dossier)
+ rest_of_decl_compilation (TREE_OPERAND (FNADDR_FROM_VTABLE_ENTRY (TREE_VALUE (TREE_CHAIN (CONSTRUCTOR_ELTS (DECL_INITIAL (vars))))), 0), 0, 1, 1);
+
+ /* Write it out. */
+ mark_vtable_entries (vars);
+ if (TREE_TYPE (DECL_INITIAL (vars)) == 0)
+ store_init_value (vars, DECL_INITIAL (vars));
+
+#ifdef DWARF_DEBUGGING_INFO
+ if (write_symbols == DWARF_DEBUG)
+ {
+ /* Mark the VAR_DECL node representing the vtable itself as a
+ "gratuitous" one, thereby forcing dwarfout.c to ignore it.
+ It is rather important that such things be ignored because
+ any effort to actually generate DWARF for them will run
+ into trouble when/if we encounter code like:
+
+ #pragma interface
+ struct S { virtual void member (); };
+
+ because the artificial declaration of the vtable itself (as
+ manufactured by the g++ front end) will say that the vtable
+ is a static member of `S' but only *after* the debug output
+ for the definition of `S' has already been output. This causes
+ grief because the DWARF entry for the definition of the vtable
+ will try to refer back to an earlier *declaration* of the
+ vtable as a static member of `S' and there won't be one.
+ We might be able to arrange to have the "vtable static member"
+ attached to the member list for `S' before the debug info for
+ `S' get written (which would solve the problem) but that would
+ require more intrusive changes to the g++ front end. */
+
+ DECL_IGNORED_P (vars) = 1;
+ }
+#endif /* DWARF_DEBUGGING_INFO */
+
+ rest_of_decl_compilation (vars, NULL_PTR, 1, 1);
+ }
+ else if (TREE_USED (vars) && flag_vtable_thunks)
+ assemble_external (vars);
+ /* We know that PREV must be non-zero here. */
+ TREE_CHAIN (prev) = TREE_CHAIN (vars);
+}
+
+void
+walk_vtables (typedecl_fn, vardecl_fn)
+ register void (*typedecl_fn)();
+ register void (*vardecl_fn)();
+{
+ tree prev, vars;
+
+ for (prev = 0, vars = getdecls (); vars; vars = TREE_CHAIN (vars))
+ {
+ register tree type = TREE_TYPE (vars);
+
+ if (TREE_CODE (vars) == TYPE_DECL
+ && type != error_mark_node
+ && TYPE_LANG_SPECIFIC (type)
+ && CLASSTYPE_VSIZE (type))
+ {
+ if (typedecl_fn) (*typedecl_fn) (prev, vars);
+ }
+ else if (TREE_CODE (vars) == VAR_DECL && DECL_VIRTUAL_P (vars))
+ {
+ if (vardecl_fn) (*vardecl_fn) (prev, vars);
+ }
+ else
+ prev = vars;
+ }
+}
+
+static void
+finish_sigtable_vardecl (prev, vars)
+ tree prev, vars;
+{
+ /* We don't need to mark sigtable entries as addressable here as is done
+ for vtables. Since sigtables, unlike vtables, are always written out,
+ that was already done in build_signature_table_constructor. */
+
+ rest_of_decl_compilation (vars, NULL_PTR, 1, 1);
+
+ /* We know that PREV must be non-zero here. */
+ TREE_CHAIN (prev) = TREE_CHAIN (vars);
+}
+
+void
+walk_sigtables (typedecl_fn, vardecl_fn)
+ register void (*typedecl_fn)();
+ register void (*vardecl_fn)();
+{
+ tree prev, vars;
+
+ for (prev = 0, vars = getdecls (); vars; vars = TREE_CHAIN (vars))
+ {
+ register tree type = TREE_TYPE (vars);
+
+ if (TREE_CODE (vars) == TYPE_DECL
+ && type != error_mark_node
+ && IS_SIGNATURE (type))
+ {
+ if (typedecl_fn) (*typedecl_fn) (prev, vars);
+ }
+ else if (TREE_CODE (vars) == VAR_DECL
+ && TREE_TYPE (vars) != error_mark_node
+ && IS_SIGNATURE (TREE_TYPE (vars)))
+ {
+ if (vardecl_fn) (*vardecl_fn) (prev, vars);
+ }
+ else
+ prev = vars;
+ }
+}
+
+extern int parse_time, varconst_time;
+
+#define TIMEVAR(VAR, BODY) \
+do { int otime = get_run_time (); BODY; VAR += get_run_time () - otime; } while (0)
+
+/* This routine is called from the last rule in yyparse ().
+ Its job is to create all the code needed to initialize and
+ destroy the global aggregates. We do the destruction
+ first, since that way we only need to reverse the decls once. */
+
+void
+finish_file ()
+{
+ extern int lineno;
+ int start_time, this_time;
+
+ tree fnname;
+ tree vars = static_aggregates;
+ int needs_cleaning = 0, needs_messing_up = 0;
+
+ build_exception_table ();
+
+ if (flag_detailed_statistics)
+ dump_tree_statistics ();
+
+ /* Bad parse errors. Just forget about it. */
+ if (! global_bindings_p () || current_class_type)
+ return;
+
+ start_time = get_run_time ();
+
+ /* Push into C language context, because that's all
+ we'll need here. */
+ push_lang_context (lang_name_c);
+
+ /* Set up the name of the file-level functions we may need. */
+ /* Use a global object (which is already required to be unique over
+ the program) rather than the file name (which imposes extra
+ constraints). -- Raeburn@MIT.EDU, 10 Jan 1990. */
+
+ /* See if we really need the hassle. */
+ while (vars && needs_cleaning == 0)
+ {
+ tree decl = TREE_VALUE (vars);
+ tree type = TREE_TYPE (decl);
+ if (TYPE_NEEDS_DESTRUCTOR (type))
+ {
+ needs_cleaning = 1;
+ needs_messing_up = 1;
+ break;
+ }
+ else
+ needs_messing_up |= TYPE_NEEDS_CONSTRUCTING (type);
+ vars = TREE_CHAIN (vars);
+ }
+ if (needs_cleaning == 0)
+ goto mess_up;
+
+ /* Otherwise, GDB can get confused, because in only knows
+ about source for LINENO-1 lines. */
+ lineno -= 1;
+
+ fnname = get_file_function_name ('D');
+ start_function (void_list_node, build_parse_node (CALL_EXPR, fnname, void_list_node, NULL_TREE), 0, 0);
+ fnname = DECL_ASSEMBLER_NAME (current_function_decl);
+ store_parm_decls ();
+
+ pushlevel (0);
+ clear_last_expr ();
+ push_momentary ();
+ expand_start_bindings (0);
+
+ /* These must be done in backward order to destroy,
+ in which they happen to be! */
+ while (vars)
+ {
+ tree decl = TREE_VALUE (vars);
+ tree type = TREE_TYPE (decl);
+ tree temp = TREE_PURPOSE (vars);
+
+ if (TYPE_NEEDS_DESTRUCTOR (type))
+ {
+ if (TREE_STATIC (vars))
+ expand_start_cond (build_binary_op (NE_EXPR, temp, integer_zero_node, 1), 0);
+ if (TREE_CODE (type) == ARRAY_TYPE)
+ temp = decl;
+ else
+ {
+ mark_addressable (decl);
+ temp = build1 (ADDR_EXPR, TYPE_POINTER_TO (type), decl);
+ }
+ temp = build_delete (TREE_TYPE (temp), temp,
+ integer_two_node, LOOKUP_NORMAL|LOOKUP_NONVIRTUAL|LOOKUP_DESTRUCTOR, 0);
+ expand_expr_stmt (temp);
+
+ if (TREE_STATIC (vars))
+ expand_end_cond ();
+ }
+ vars = TREE_CHAIN (vars);
+ }
+
+ expand_end_bindings (getdecls(), 1, 0);
+ poplevel (1, 0, 0);
+ pop_momentary ();
+
+ finish_function (lineno, 0);
+
+ assemble_destructor (IDENTIFIER_POINTER (fnname));
+
+ /* if it needed cleaning, then it will need messing up: drop through */
+
+ mess_up:
+ /* Must do this while we think we are at the top level. */
+ vars = nreverse (static_aggregates);
+ if (vars != NULL_TREE)
+ {
+ fnname = get_file_function_name ('I');
+ start_function (void_list_node, build_parse_node (CALL_EXPR, fnname, void_list_node, NULL_TREE), 0, 0);
+ fnname = DECL_ASSEMBLER_NAME (current_function_decl);
+ store_parm_decls ();
+
+ pushlevel (0);
+ clear_last_expr ();
+ push_momentary ();
+ expand_start_bindings (0);
+
+ while (vars)
+ {
+ tree decl = TREE_VALUE (vars);
+ tree init = TREE_PURPOSE (vars);
+ tree old_cleanups = cleanups_this_call;
+
+ /* If this was a static attribute within some function's scope,
+ then don't initialize it here. Also, don't bother
+ with initializers that contain errors. */
+ if (TREE_STATIC (vars)
+ || (init && TREE_CODE (init) == TREE_LIST
+ && value_member (error_mark_node, init)))
+ {
+ vars = TREE_CHAIN (vars);
+ continue;
+ }
+
+ if (TREE_CODE (decl) == VAR_DECL)
+ {
+ /* Set these global variables so that GDB at least puts
+ us near the declaration which required the initialization. */
+ input_filename = DECL_SOURCE_FILE (decl);
+ lineno = DECL_SOURCE_LINE (decl);
+ emit_note (input_filename, lineno);
+
+ /* 9.5p5: The initializer of a static member of a class has
+ the same acess rights as a member function. */
+ DECL_CLASS_CONTEXT (current_function_decl) = DECL_CONTEXT (decl);
+
+ if (init)
+ {
+ if (TREE_CODE (init) == VAR_DECL)
+ {
+ /* This behavior results when there are
+ multiple declarations of an aggregate,
+ the last of which defines it. */
+ if (DECL_RTL (init) == DECL_RTL (decl))
+ {
+ my_friendly_assert (DECL_INITIAL (decl) == error_mark_node
+ || (TREE_CODE (DECL_INITIAL (decl)) == CONSTRUCTOR
+ && CONSTRUCTOR_ELTS (DECL_INITIAL (decl)) == NULL_TREE),
+ 199);
+ init = DECL_INITIAL (init);
+ if (TREE_CODE (init) == CONSTRUCTOR
+ && CONSTRUCTOR_ELTS (init) == NULL_TREE)
+ init = NULL_TREE;
+ }
+#if 0
+ else if (TREE_TYPE (decl) == TREE_TYPE (init))
+ {
+#if 1
+ my_friendly_abort (200);
+#else
+ /* point to real decl's rtl anyway. */
+ DECL_RTL (init) = DECL_RTL (decl);
+ my_friendly_assert (DECL_INITIAL (decl) == error_mark_node,
+ 201);
+ init = DECL_INITIAL (init);
+#endif /* 1 */
+ }
+#endif /* 0 */
+ }
+ }
+ if (IS_AGGR_TYPE (TREE_TYPE (decl))
+ || init == 0
+ || TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
+ expand_aggr_init (decl, init, 0);
+ else if (TREE_CODE (init) == TREE_VEC)
+ {
+ expand_expr (expand_vec_init (decl, TREE_VEC_ELT (init, 0),
+ TREE_VEC_ELT (init, 1),
+ TREE_VEC_ELT (init, 2), 0),
+ const0_rtx, VOIDmode, 0);
+ free_temp_slots ();
+ }
+ else
+ expand_assignment (decl, init, 0, 0);
+
+ DECL_CLASS_CONTEXT (current_function_decl) = NULL_TREE;
+ }
+ else if (TREE_CODE (decl) == SAVE_EXPR)
+ {
+ if (! PARM_DECL_EXPR (decl))
+ {
+ /* a `new' expression at top level. */
+ expand_expr (decl, const0_rtx, VOIDmode, 0);
+ free_temp_slots ();
+ expand_aggr_init (build_indirect_ref (decl, NULL_PTR), init, 0);
+ }
+ }
+ else if (decl == error_mark_node)
+ ;
+ else my_friendly_abort (22);
+ vars = TREE_CHAIN (vars);
+ /* Cleanup any temporaries needed for the initial value. */
+ expand_cleanups_to (old_cleanups);
+ }
+
+ expand_end_bindings (getdecls(), 1, 0);
+ poplevel (1, 0, 0);
+ pop_momentary ();
+
+ finish_function (lineno, 0);
+ assemble_constructor (IDENTIFIER_POINTER (fnname));
+ }
+
+ /* Done with C language context needs. */
+ pop_lang_context ();
+
+ /* Now write out any static class variables (which may have since
+ learned how to be initialized). */
+ while (pending_statics)
+ {
+ tree decl = TREE_VALUE (pending_statics);
+ if (TREE_USED (decl) == 1
+ || TREE_READONLY (decl) == 0
+ || DECL_INITIAL (decl) == 0)
+ rest_of_decl_compilation (decl, IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)), 1, 1);
+ pending_statics = TREE_CHAIN (pending_statics);
+ }
+
+ this_time = get_run_time ();
+ parse_time -= this_time - start_time;
+ varconst_time += this_time - start_time;
+
+ start_time = get_run_time ();
+
+ /* Now delete from the chain of variables all virtual function tables.
+ We output them all ourselves, because each will be treated specially. */
+
+#if 1
+ /* The reason for pushing garbage onto the global_binding_level is to
+ ensure that we can slice out _DECLs which pertain to virtual function
+ tables. If the last thing pushed onto the global_binding_level was a
+ virtual function table, then slicing it out would slice away all the
+ decls (i.e., we lose the head of the chain).
+
+ There are several ways of getting the same effect, from changing the
+ way that iterators over the chain treat the elements that pertain to
+ virtual function tables, moving the implementation of this code to
+ decl.c (where we can manipulate global_binding_level directly),
+ popping the garbage after pushing it and slicing away the vtable
+ stuff, or just leaving it alone. */
+
+ /* Make last thing in global scope not be a virtual function table. */
+#if 0 /* not yet, should get fixed properly later */
+ vars = make_type_decl (get_identifier (" @%$#@!"), integer_type_node);
+#else
+ vars = build_decl (TYPE_DECL, get_identifier (" @%$#@!"), integer_type_node);
+#endif
+ DECL_IGNORED_P (vars) = 1;
+ SET_DECL_ARTIFICIAL (vars);
+ pushdecl (vars);
+#endif
+
+ walk_vtables ((void (*)())0, finish_vtable_vardecl);
+ if (flag_handle_signatures)
+ walk_sigtables ((void (*)())0, finish_sigtable_vardecl);
+
+ for (vars = getdecls (); vars; vars = TREE_CHAIN (vars))
+ {
+ if (TREE_CODE (vars) == THUNK_DECL)
+ emit_thunk (vars);
+ }
+
+ {
+ int reconsider = 0; /* More may be referenced; check again */
+ tree delayed = NULL_TREE; /* These might be referenced later */
+
+ /* Now write out inline functions which had their addresses taken and
+ which were not declared virtual and which were not declared `extern
+ inline'. */
+ while (saved_inlines)
+ {
+ tree decl = TREE_VALUE (saved_inlines);
+ saved_inlines = TREE_CHAIN (saved_inlines);
+ /* Redefinition of a member function can cause DECL_SAVED_INSNS to be
+ 0; don't crash. */
+ if (TREE_ASM_WRITTEN (decl) || DECL_SAVED_INSNS (decl) == 0)
+ continue;
+ if (DECL_FUNCTION_MEMBER_P (decl) && !TREE_PUBLIC (decl))
+ {
+ tree ctype = DECL_CLASS_CONTEXT (decl);
+ if (CLASSTYPE_INTERFACE_KNOWN (ctype))
+ {
+ TREE_PUBLIC (decl) = 1;
+ DECL_EXTERNAL (decl)
+ = (CLASSTYPE_INTERFACE_ONLY (ctype)
+ || (DECL_INLINE (decl) && ! flag_implement_inlines));
+ }
+ }
+ if (TREE_PUBLIC (decl)
+ || TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl))
+ || flag_keep_inline_functions)
+ {
+ if (DECL_EXTERNAL (decl)
+ || (DECL_IMPLICIT_INSTANTIATION (decl)
+ && ! flag_implicit_templates))
+ assemble_external (decl);
+ else
+ {
+ reconsider = 1;
+ temporary_allocation ();
+ output_inline_function (decl);
+ permanent_allocation (1);
+ }
+ }
+ else if (TREE_USED (decl)
+ || TREE_USED (DECL_ASSEMBLER_NAME (decl)))
+ delayed = tree_cons (NULL_TREE, decl, delayed);
+ }
+
+ if (reconsider && delayed)
+ {
+ while (reconsider)
+ {
+ tree place;
+ reconsider = 0;
+ for (place = delayed; place; place = TREE_CHAIN (place))
+ {
+ tree decl = TREE_VALUE (place);
+ if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl))
+ && ! TREE_ASM_WRITTEN (decl))
+ {
+ if (DECL_EXTERNAL (decl)
+ || (DECL_IMPLICIT_INSTANTIATION (decl)
+ && ! flag_implicit_templates))
+ assemble_external (decl);
+ else
+ {
+ reconsider = 1;
+ temporary_allocation ();
+ output_inline_function (decl);
+ permanent_allocation (1);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ if (write_virtuals == 2)
+ {
+ /* Now complain about an virtual function tables promised
+ but not delivered. */
+ while (pending_vtables)
+ {
+ if (TREE_PURPOSE (pending_vtables) == NULL_TREE)
+ error ("virtual function table for `%s' not defined",
+ IDENTIFIER_POINTER (TREE_VALUE (pending_vtables)));
+ pending_vtables = TREE_CHAIN (pending_vtables);
+ }
+ }
+
+ permanent_allocation (1);
+ this_time = get_run_time ();
+ parse_time -= this_time - start_time;
+ varconst_time += this_time - start_time;
+
+ if (flag_detailed_statistics)
+ dump_time_statistics ();
+}
+
+/* This is something of the form 'A()()()()()+1' that has turned out to be an
+ expr. Since it was parsed like a type, we need to wade through and fix
+ that. Unfortunately, since operator() is left-associative, we can't use
+ tail recursion. In the above example, TYPE is `A', and DECL is
+ `()()()()()'.
+
+ Maybe this shouldn't be recursive, but how often will it actually be
+ used? (jason) */
+tree
+reparse_absdcl_as_expr (type, decl)
+ tree type, decl;
+{
+ /* do build_functional_cast (type, NULL_TREE) at bottom */
+ if (TREE_OPERAND (decl, 0) == NULL_TREE)
+ return build_functional_cast (type, NULL_TREE);
+
+ /* recurse */
+ decl = reparse_decl_as_expr (type, TREE_OPERAND (decl, 0));
+
+ decl = build_x_function_call (decl, NULL_TREE, current_class_decl);
+
+ if (TREE_CODE (decl) == CALL_EXPR && TREE_TYPE (decl) != void_type_node)
+ decl = require_complete_type (decl);
+
+ return decl;
+}
+
+/* This is something of the form `int ((int)(int)(int)1)' that has turned
+ out to be an expr. Since it was parsed like a type, we need to wade
+ through and fix that. Since casts are right-associative, we are
+ reversing the order, so we don't have to recurse.
+
+ In the above example, DECL is the `(int)(int)(int)', and EXPR is the
+ `1'. */
+tree
+reparse_absdcl_as_casts (decl, expr)
+ tree decl, expr;
+{
+ tree type;
+
+ if (TREE_CODE (expr) == CONSTRUCTOR)
+ {
+ type = groktypename (TREE_VALUE (TREE_OPERAND (decl, 1)));
+ decl = TREE_OPERAND (decl, 0);
+
+ if (IS_SIGNATURE (type))
+ {
+ error ("cast specifies signature type");
+ return error_mark_node;
+ }
+
+ expr = digest_init (type, expr, (tree *) 0);
+ if (TREE_CODE (type) == ARRAY_TYPE && TYPE_SIZE (type) == 0)
+ {
+ int failure = complete_array_type (type, expr, 1);
+ if (failure)
+ my_friendly_abort (78);
+ }
+ }
+
+ while (decl)
+ {
+ type = groktypename (TREE_VALUE (TREE_OPERAND (decl, 1)));
+ decl = TREE_OPERAND (decl, 0);
+ expr = build_c_cast (type, expr);
+ }
+
+ return expr;
+}
+
+/* Recursive helper function for reparse_decl_as_expr. It may be a good
+ idea to reimplement this using an explicit stack, rather than recursion. */
+static tree
+reparse_decl_as_expr1 (decl)
+ tree decl;
+{
+ switch (TREE_CODE (decl))
+ {
+ case IDENTIFIER_NODE:
+ return do_identifier (decl);
+ case INDIRECT_REF:
+ return build_x_indirect_ref
+ (reparse_decl_as_expr1 (TREE_OPERAND (decl, 0)), "unary *");
+ case ADDR_EXPR:
+ return build_x_unary_op (ADDR_EXPR,
+ reparse_decl_as_expr1 (TREE_OPERAND (decl, 0)));
+ case BIT_NOT_EXPR:
+ return build_x_unary_op (BIT_NOT_EXPR,
+ reparse_decl_as_expr1 (TREE_OPERAND (decl, 0)));
+ case SCOPE_REF:
+ return build_offset_ref (TREE_OPERAND (decl, 0), TREE_OPERAND (decl, 1));
+ case ARRAY_REF:
+ return grok_array_decl (reparse_decl_as_expr1 (TREE_OPERAND (decl, 0)),
+ TREE_OPERAND (decl, 1));
+ default:
+ my_friendly_abort (5);
+ return NULL_TREE;
+ }
+}
+
+/* This is something of the form `int (*a)++' that has turned out to be an
+ expr. It was only converted into parse nodes, so we need to go through
+ and build up the semantics. Most of the work is done by
+ reparse_decl_as_expr1, above.
+
+ In the above example, TYPE is `int' and DECL is `*a'. */
+tree
+reparse_decl_as_expr (type, decl)
+ tree type, decl;
+{
+ decl = build_tree_list (NULL_TREE, reparse_decl_as_expr1 (decl));
+ return build_functional_cast (type, decl);
+}
+
+/* This is something of the form `int (*a)' that has turned out to be a
+ decl. It was only converted into parse nodes, so we need to do the
+ checking that make_{pointer,reference}_declarator do. */
+
+tree
+finish_decl_parsing (decl)
+ tree decl;
+{
+ extern int current_class_depth;
+
+ switch (TREE_CODE (decl))
+ {
+ case IDENTIFIER_NODE:
+ return decl;
+ case INDIRECT_REF:
+ return make_pointer_declarator
+ (NULL_TREE, finish_decl_parsing (TREE_OPERAND (decl, 0)));
+ case ADDR_EXPR:
+ return make_reference_declarator
+ (NULL_TREE, finish_decl_parsing (TREE_OPERAND (decl, 0)));
+ case BIT_NOT_EXPR:
+ TREE_OPERAND (decl, 0) = finish_decl_parsing (TREE_OPERAND (decl, 0));
+ return decl;
+ case SCOPE_REF:
+ push_nested_class (TREE_TYPE (TREE_OPERAND (decl, 0)), 3);
+ TREE_COMPLEXITY (decl) = current_class_depth;
+ return decl;
+ case ARRAY_REF:
+ TREE_OPERAND (decl, 0) = finish_decl_parsing (TREE_OPERAND (decl, 0));
+ return decl;
+ default:
+ my_friendly_abort (5);
+ return NULL_TREE;
+ }
+}
+
+tree
+check_cp_case_value (value)
+ tree value;
+{
+ if (value == NULL_TREE)
+ return value;
+
+ /* build_c_cast puts on a NOP_EXPR to make a non-lvalue.
+ Strip such NOP_EXPRs. */
+ if (TREE_CODE (value) == NOP_EXPR
+ && TREE_TYPE (value) == TREE_TYPE (TREE_OPERAND (value, 0)))
+ value = TREE_OPERAND (value, 0);
+
+ if (TREE_READONLY_DECL_P (value))
+ {
+ value = decl_constant_value (value);
+ /* build_c_cast puts on a NOP_EXPR to make a non-lvalue.
+ Strip such NOP_EXPRs. */
+ if (TREE_CODE (value) == NOP_EXPR
+ && TREE_TYPE (value) == TREE_TYPE (TREE_OPERAND (value, 0)))
+ value = TREE_OPERAND (value, 0);
+ }
+ value = fold (value);
+
+ if (TREE_CODE (value) != INTEGER_CST
+ && value != error_mark_node)
+ {
+ cp_error ("case label `%E' does not reduce to an integer constant",
+ value);
+ value = error_mark_node;
+ }
+ else
+ /* Promote char or short to int. */
+ value = default_conversion (value);
+
+ constant_expression_warning (value);
+
+ return value;
+}
diff --git a/gnu/usr.bin/cc/cc1plus/edsel.c b/gnu/usr.bin/cc/cc1plus/edsel.c
new file mode 100644
index 0000000..78b2637
--- /dev/null
+++ b/gnu/usr.bin/cc/cc1plus/edsel.c
@@ -0,0 +1,927 @@
+/* Interface to LUCID Cadillac system for GNU compiler.
+ Copyright (C) 1988, 1992, 1993 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+#include "config.h"
+
+#include "tree.h"
+#include "flags.h"
+#include <stdio.h>
+#include "cp-tree.h"
+#include "obstack.h"
+
+#ifdef CADILLAC
+#include <compilerreq.h>
+#include <compilerconn.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <errno.h>
+#include <sys/file.h>
+
+#define obstack_chunk_alloc xmalloc
+#define obstack_chunk_free free
+
+void init_cadillac ();
+
+extern char *input_filename;
+extern int lineno;
+
+/* Put random information we might want to get back from
+ Cadillac here. */
+typedef struct
+{
+ /* The connection to the Cadillac kernel. */
+ Connection *conn;
+
+ /* Input and output file descriptors for Cadillac. */
+ short fd_input, fd_output;
+
+ /* #include nesting of current file. */
+ short depth;
+
+ /* State variables for the connection. */
+ char messages;
+ char conversion;
+ char emission;
+ char process_until;
+
+ /* #if level of current file. */
+ int iflevel;
+
+ /* Line number that starts current source file. */
+ int lineno;
+
+ /* Name of current file. */
+ char *filename;
+
+ /* Where to stop processing (if process_until is set). */
+ char *end_filename;
+ int end_position;
+
+} cadillac_struct;
+static cadillac_struct cadillacObj;
+
+/* Nonzero if in the process of exiting. */
+static int exiting;
+
+void cadillac_note_source ();
+static void CWriteLanguageDecl ();
+static void CWriteLanguageType ();
+static void CWriteTopLevel ();
+static void cadillac_note_filepos ();
+static void cadillac_process_request (), cadillac_process_requests ();
+static void cadillac_switch_source ();
+static void exit_cadillac ();
+
+/* Blocking test. */
+static int
+readable_p (fd)
+ int fd;
+{
+ fd_set f;
+
+ FD_ZERO (&f);
+ FD_SET (fd, &f);
+
+ return select (32, &f, NULL, NULL, 0) == 1;
+}
+
+static CObjectType *tree_to_cadillac_map;
+struct obstack cadillac_obstack;
+
+
+#include "stack.h"
+
+struct context_level
+{
+ struct stack_level base;
+
+ tree context;
+};
+
+/* Stack for maintaining contexts (in case functions or types are nested).
+ When defining a struct type, the `context' field is the RECORD_TYPE.
+ When defining a function, the `context' field is the FUNCTION_DECL. */
+
+static struct context_level *context_stack;
+
+static struct context_level *
+push_context_level (stack, obstack)
+ struct stack_level *stack;
+ struct obstack *obstack;
+{
+ struct context_level tem;
+
+ tem.base.prev = stack;
+ return (struct context_level *)push_stack_level (obstack, &tem, sizeof (tem));
+}
+
+/* Discard a level of search allocation. */
+
+static struct context_level *
+pop_context_level (stack)
+ struct context_level *stack;
+{
+ stack = (struct context_level *)pop_stack_level (stack);
+ return stack;
+}
+
+void
+init_cadillac ()
+{
+ extern FILE *finput;
+ extern int errno;
+ CCompilerMessage* req;
+ cadillac_struct *cp = &cadillacObj;
+ int i;
+
+ if (! flag_cadillac)
+ return;
+
+ tree_to_cadillac_map = (CObjectType*) xmalloc (sizeof (CObjectType) * LAST_CPLUS_TREE_CODE);
+ for (i = 0; i < LAST_CPLUS_TREE_CODE; i++)
+ tree_to_cadillac_map[i] = MiscOType;
+ tree_to_cadillac_map[RECORD_TYPE] = StructOType;
+ tree_to_cadillac_map[UNION_TYPE] = UnionOType;
+ tree_to_cadillac_map[ENUMERAL_TYPE] = EnumTypeOType;
+ tree_to_cadillac_map[TYPE_DECL] = TypedefOType;
+ tree_to_cadillac_map[VAR_DECL] = VariableOType;
+ tree_to_cadillac_map[CONST_DECL] = EnumConstantOType;
+ tree_to_cadillac_map[FUNCTION_DECL] = FunctionOType;
+ tree_to_cadillac_map[FIELD_DECL] = FieldOType;
+
+#ifdef sun
+ on_exit (&exit_cadillac, 0);
+#endif
+
+ gcc_obstack_init (&cadillac_obstack);
+
+ /* Yow! This is the way Cadillac was designed to deal with
+ Oregon C++ compiler! */
+ cp->fd_input = flag_cadillac;
+ cp->fd_output = flag_cadillac;
+
+ /* Start in "turned-on" state. */
+ cp->messages = 1;
+ cp->conversion = 1;
+ cp->emission = 1;
+
+ /* Establish a connection with Cadillac here. */
+ cp->conn = NewConnection (cp, cp->fd_input, cp->fd_output);
+
+ CWriteHeader (cp->conn, WaitingMType, 0);
+ CWriteRequestBuffer (cp->conn);
+
+ if (!readable_p (cp->fd_input))
+ ;
+
+ req = CReadCompilerMessage (cp->conn);
+
+ if (!req)
+ switch (errno)
+ {
+ case EWOULDBLOCK:
+ sleep (5);
+ return;
+
+ case 0:
+ fatal ("init_cadillac: EOF on connection to kernel, exiting\n");
+ break;
+
+ default:
+ perror ("Editor to kernel connection");
+ exit (0);
+ }
+}
+
+static void
+cadillac_process_requests (conn)
+ Connection *conn;
+{
+ CCompilerMessage *req;
+ while (req = (CCompilerMessage*) CPeekNextRequest (conn))
+ {
+ req = CReadCompilerMessage (conn);
+ cadillac_process_request (&cadillacObj, req);
+ }
+}
+
+static void
+cadillac_process_request (cp, req)
+ cadillac_struct *cp;
+ CCompilerMessage *req;
+{
+ if (! req)
+ return;
+
+ switch (req->reqType)
+ {
+ case ProcessUntilMType:
+ if (cp->process_until)
+ my_friendly_abort (23);
+ cp->process_until = 1;
+ /* This is not really right. */
+ cp->end_position = ((CCompilerCommand*)req)->processuntil.position;
+#if 0
+ cp->end_filename = req->processuntil.filename;
+#endif
+ break;
+
+ case CommandMType:
+ switch (req->header.data)
+ {
+ case MessagesOnCType:
+ cp->messages = 1;
+ break;
+ case MessagesOffCType:
+ cp->messages = 0;
+ break;
+ case ConversionOnCType:
+ cp->conversion = 1;
+ break;
+ case ConversionOffCType:
+ cp->conversion = 0;
+ break;
+ case EmissionOnCType:
+ cp->emission = 1;
+ break;
+ case EmissionOffCType:
+ cp->emission = 0;
+ break;
+
+ case FinishAnalysisCType:
+ return;
+
+ case PuntAnalysisCType:
+ case ContinueAnalysisCType:
+ case GotoFileposCType:
+ case OpenSucceededCType:
+ case OpenFailedCType:
+ fprintf (stderr, "request type %d not implemented\n", req->reqType);
+ return;
+
+ case DieCType:
+ if (! exiting)
+ my_friendly_abort (24);
+ return;
+
+ }
+ break;
+
+ default:
+ fatal ("unknown request type %d", req->reqType);
+ }
+}
+
+void
+cadillac_start ()
+{
+ Connection *conn = cadillacObj.conn;
+ CCompilerMessage *req;
+
+ /* Let Cadillac know that we start in C++ language scope. */
+ CWriteHeader (conn, ForeignLinkageMType, LinkCPlus);
+ CWriteLength (conn);
+ CWriteRequestBuffer (conn);
+
+ cadillac_process_requests (conn);
+}
+
+static void
+cadillac_printf (msg, name)
+{
+ if (cadillacObj.messages)
+ printf ("[%s,%4d] %s `%s'\n", input_filename, lineno, msg, name);
+}
+
+void
+cadillac_start_decl (decl)
+ tree decl;
+{
+ Connection *conn = cadillacObj.conn;
+ CObjectType object_type = tree_to_cadillac_map [TREE_CODE (decl)];
+
+ if (context_stack)
+ switch (TREE_CODE (context_stack->context))
+ {
+ case FUNCTION_DECL:
+ /* Currently, cadillac only implements top-level forms. */
+ return;
+ case RECORD_TYPE:
+ case UNION_TYPE:
+ cadillac_printf ("start class-level decl", IDENTIFIER_POINTER (DECL_NAME (decl)));
+ break;
+ default:
+ my_friendly_abort (25);
+ }
+ else
+ {
+ cadillac_printf ("start top-level decl", IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)));
+ CWriteTopLevel (conn, StartMType);
+ }
+
+ CWriteLanguageDecl (conn, decl, tree_to_cadillac_map[TREE_CODE (decl)]);
+ CWriteRequestBuffer (conn);
+ cadillac_process_requests (conn);
+}
+
+void
+cadillac_finish_decl (decl)
+ tree decl;
+{
+ Connection *conn = cadillacObj.conn;
+
+ if (context_stack)
+ switch (TREE_CODE (context_stack->context))
+ {
+ case FUNCTION_DECL:
+ return;
+ case RECORD_TYPE:
+ case UNION_TYPE:
+ cadillac_printf ("end class-level decl", IDENTIFIER_POINTER (DECL_NAME (decl)));
+ CWriteHeader (conn, EndDefMType, 0);
+ CWriteLength (conn);
+ break;
+ default:
+ my_friendly_abort (26);
+ }
+ else
+ {
+ cadillac_printf ("end top-level decl", IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)));
+ CWriteHeader (conn, EndDefMType, 0);
+ CWriteLength (conn);
+ CWriteTopLevel (conn, StopMType);
+ }
+
+ CWriteRequestBuffer (conn);
+ cadillac_process_requests (conn);
+}
+
+void
+cadillac_start_function (fndecl)
+ tree fndecl;
+{
+ Connection *conn = cadillacObj.conn;
+
+ if (context_stack)
+ /* nested functions not yet handled. */
+ my_friendly_abort (27);
+
+ cadillac_printf ("start top-level function", lang_printable_name (fndecl));
+ context_stack = push_context_level (context_stack, &cadillac_obstack);
+ context_stack->context = fndecl;
+
+ CWriteTopLevel (conn, StartMType);
+ my_friendly_assert (TREE_CODE (fndecl) == FUNCTION_DECL, 202);
+ CWriteLanguageDecl (conn, fndecl,
+ (TREE_CODE (TREE_TYPE (fndecl)) == METHOD_TYPE
+ ? MemberFnOType : FunctionOType));
+ CWriteRequestBuffer (conn);
+ cadillac_process_requests (conn);
+}
+
+void
+cadillac_finish_function (fndecl)
+ tree fndecl;
+{
+ Connection *conn = cadillacObj.conn;
+
+ cadillac_printf ("end top-level function", lang_printable_name (fndecl));
+ context_stack = pop_context_level (context_stack);
+
+ if (context_stack)
+ /* nested functions not yet implemented. */
+ my_friendly_abort (28);
+
+ CWriteHeader (conn, EndDefMType, 0);
+ CWriteLength (conn);
+ CWriteTopLevel (conn, StopMType);
+ CWriteRequestBuffer (conn);
+ cadillac_process_requests (conn);
+}
+
+void
+cadillac_finish_anon_union (decl)
+ tree decl;
+{
+ Connection *conn = cadillacObj.conn;
+
+ if (! global_bindings_p ())
+ return;
+ cadillac_printf ("finish top-level anon union", "");
+ CWriteHeader (conn, EndDefMType, 0);
+ CWriteLength (conn);
+ CWriteTopLevel (conn, StopMType);
+ CWriteRequestBuffer (conn);
+ cadillac_process_requests (conn);
+}
+
+void
+cadillac_start_enum (type)
+ tree type;
+{
+ Connection *conn = cadillacObj.conn;
+
+ tree name = TYPE_NAME (type);
+
+ if (TREE_CODE (name) == TYPE_DECL)
+ name = DECL_NAME (name);
+
+ if (context_stack)
+ switch (TREE_CODE (context_stack->context))
+ {
+ case FUNCTION_DECL:
+ return;
+ case RECORD_TYPE:
+ case UNION_TYPE:
+ break;
+ default:
+ my_friendly_abort (29);
+ }
+ else
+ {
+ cadillac_printf ("start top-level enum", IDENTIFIER_POINTER (name));
+ CWriteTopLevel (conn, StartMType);
+ }
+
+ CWriteLanguageType (conn, type, tree_to_cadillac_map[ENUMERAL_TYPE]);
+}
+
+void
+cadillac_finish_enum (type)
+ tree type;
+{
+ Connection *conn = cadillacObj.conn;
+ tree name = TYPE_NAME (type);
+
+ if (TREE_CODE (name) == TYPE_DECL)
+ name = DECL_NAME (name);
+
+ if (context_stack)
+ switch (TREE_CODE (context_stack->context))
+ {
+ case FUNCTION_DECL:
+ return;
+ case RECORD_TYPE:
+ case UNION_TYPE:
+ CWriteHeader (conn, EndDefMType, 0);
+ CWriteLength (conn);
+ break;
+ default:
+ my_friendly_abort (30);
+ }
+ else
+ {
+ CWriteHeader (conn, EndDefMType, 0);
+ CWriteLength (conn);
+ cadillac_printf ("finish top-level enum", IDENTIFIER_POINTER (name));
+ CWriteTopLevel (conn, StopMType);
+ }
+
+ CWriteRequestBuffer (conn);
+ cadillac_process_requests (conn);
+}
+
+void
+cadillac_start_struct (type)
+ tree type;
+{
+ Connection *conn = cadillacObj.conn;
+ tree name = TYPE_NAME (type);
+
+ if (TREE_CODE (name) == TYPE_DECL)
+ name = DECL_NAME (name);
+
+ if (context_stack)
+ switch (TREE_CODE (context_stack->context))
+ {
+ case FUNCTION_DECL:
+ return;
+ case RECORD_TYPE:
+ case UNION_TYPE:
+ return;
+ default:
+ my_friendly_abort (31);
+ }
+ else
+ {
+ cadillac_printf ("start struct", IDENTIFIER_POINTER (name));
+ CWriteTopLevel (conn, StartMType);
+ }
+
+ context_stack = push_context_level (context_stack, &cadillac_obstack);
+ context_stack->context = type;
+
+ CWriteLanguageType (conn, type,
+ TYPE_LANG_SPECIFIC (type) && CLASSTYPE_DECLARED_CLASS (type) ? ClassOType : tree_to_cadillac_map[TREE_CODE (type)]);
+}
+
+void
+cadillac_finish_struct (type)
+ tree type;
+{
+ Connection *conn = cadillacObj.conn;
+ tree name = TYPE_NAME (type);
+
+ if (TREE_CODE (name) == TYPE_DECL)
+ name = DECL_NAME (name);
+
+ context_stack = pop_context_level (context_stack);
+ if (context_stack)
+ return;
+
+ cadillac_printf ("finish struct", IDENTIFIER_POINTER (name));
+ CWriteHeader (conn, EndDefMType, 0);
+ CWriteLength (conn);
+ CWriteTopLevel (conn, StopMType);
+ CWriteRequestBuffer (conn);
+ cadillac_process_requests (conn);
+}
+
+void
+cadillac_finish_exception (type)
+ tree type;
+{
+ Connection *conn = cadillacObj.conn;
+
+ fatal ("cadillac_finish_exception");
+ CWriteHeader (conn, EndDefMType, 0);
+ CWriteLength (conn);
+ CWriteTopLevel (conn, StopMType);
+ CWriteRequestBuffer (conn);
+ cadillac_process_requests (conn);
+}
+
+void
+cadillac_push_class (type)
+ tree type;
+{
+}
+
+void
+cadillac_pop_class ()
+{
+}
+
+void
+cadillac_push_lang (name)
+ tree name;
+{
+ Connection *conn = cadillacObj.conn;
+ CLinkLanguageType m;
+
+ if (name == lang_name_cplusplus)
+ m = LinkCPlus;
+ else if (name == lang_name_c)
+ m = LinkC;
+ else
+ my_friendly_abort (32);
+ CWriteHeader (conn, ForeignLinkageMType, m);
+ CWriteRequestBuffer (conn);
+ cadillac_process_requests (conn);
+}
+
+void
+cadillac_pop_lang ()
+{
+ Connection *conn = cadillacObj.conn;
+
+ CWriteHeader (conn, ForeignLinkageMType, LinkPop);
+ CWriteRequestBuffer (conn);
+ cadillac_process_requests (conn);
+}
+
+void
+cadillac_finish_stmt ()
+{
+}
+
+void
+cadillac_note_source ()
+{
+ cadillacObj.lineno = lineno;
+ cadillacObj.filename = input_filename;
+}
+
+static void
+CWriteTopLevel (conn, m)
+ Connection *conn;
+ CMessageSubType m;
+{
+ static context_id = 0;
+ CWriteHeader (conn, TopLevelFormMType, m);
+ cadillac_note_filepos ();
+
+ /* Eventually, this will point somewhere into the digest file. */
+ context_id += 1;
+ CWriteSomething (conn, &context_id, sizeof (BITS32));
+
+ CWriteSomething (conn, &cadillacObj.iflevel, sizeof (BITS32));
+ CWriteLength (conn);
+}
+
+static void
+cadillac_note_filepos ()
+{
+ extern FILE *finput;
+ int pos = ftell (finput);
+ CWriteSomething (cadillacObj.conn, &pos, sizeof (BITS32));
+}
+
+void
+cadillac_switch_source (startflag)
+ int startflag;
+{
+ Connection *conn = cadillacObj.conn;
+ /* Send out the name of the source file being compiled. */
+
+ CWriteHeader (conn, SourceFileMType, startflag ? StartMType : StopMType);
+ CWriteSomething (conn, &cadillacObj.depth, sizeof (BITS16));
+ CWriteVstring0 (conn, input_filename);
+ CWriteLength (conn);
+ CWriteRequestBuffer (conn);
+ cadillac_process_requests (conn);
+}
+
+void
+cadillac_push_source ()
+{
+ cadillacObj.depth += 1;
+ cadillac_switch_source (1);
+}
+
+void
+cadillac_pop_source ()
+{
+ cadillacObj.depth -= 1;
+ cadillac_switch_source (0);
+}
+
+struct cadillac_mdep
+{
+ short object_type;
+ char linkage;
+ char access;
+ short length;
+};
+
+static void
+CWriteLanguageElem (conn, p, name)
+ Connection *conn;
+ struct cadillac_mdep *p;
+ char *name;
+{
+ CWriteSomething (conn, &p->object_type, sizeof (BITS16));
+ CWriteSomething (conn, &p->linkage, sizeof (BITS8));
+ CWriteSomething (conn, &p->access, sizeof (BITS8));
+ CWriteSomething (conn, &p->length, sizeof (BITS16));
+ CWriteVstring0 (conn, name);
+
+#if 0
+ /* Don't write date_type. */
+ CWriteVstring0 (conn, "");
+#endif
+ CWriteLength (conn);
+}
+
+static void
+CWriteLanguageDecl (conn, decl, object_type)
+ Connection *conn;
+ tree decl;
+ CObjectType object_type;
+{
+ struct cadillac_mdep foo;
+ tree name;
+
+ CWriteHeader (conn, LanguageElementMType, StartDefineMType);
+ foo.object_type = object_type;
+ if (decl_type_context (decl))
+ {
+ foo.linkage = ParentLinkage;
+ if (TREE_PRIVATE (decl))
+ foo.access = PrivateAccess;
+ else if (TREE_PROTECTED (decl))
+ foo.access = ProtectedAccess;
+ else
+ foo.access = PublicAccess;
+ }
+ else
+ {
+ if (TREE_PUBLIC (decl))
+ foo.linkage = GlobalLinkage;
+ else
+ foo.linkage = FileLinkage;
+ foo.access = PublicAccess;
+ }
+ name = DECL_NAME (decl);
+ foo.length = IDENTIFIER_LENGTH (name);
+
+ CWriteLanguageElem (conn, &foo, IDENTIFIER_POINTER (name));
+ CWriteRequestBuffer (conn);
+ cadillac_process_requests (conn);
+}
+
+static void
+CWriteLanguageType (conn, type, object_type)
+ Connection *conn;
+ tree type;
+ CObjectType object_type;
+{
+ struct cadillac_mdep foo;
+ tree name = TYPE_NAME (type);
+
+ CWriteHeader (conn, LanguageElementMType, StartDefineMType);
+ foo.object_type = object_type;
+ if (current_class_type)
+ {
+ foo.linkage = ParentLinkage;
+ if (TREE_PRIVATE (type))
+ foo.access = PrivateAccess;
+ else if (TREE_PROTECTED (type))
+ foo.access = ProtectedAccess;
+ else
+ foo.access = PublicAccess;
+ }
+ else
+ {
+ foo.linkage = NoLinkage;
+ foo.access = PublicAccess;
+ }
+ if (TREE_CODE (name) == TYPE_DECL)
+ name = DECL_NAME (name);
+
+ foo.length = IDENTIFIER_LENGTH (name);
+
+ CWriteLanguageElem (conn, &foo, IDENTIFIER_POINTER (name));
+ CWriteRequestBuffer (conn);
+ cadillac_process_requests (conn);
+}
+
+static void
+CWriteUseObject (conn, type, object_type, use)
+ Connection *conn;
+ tree type;
+ CObjectType object_type;
+ CMessageSubType use;
+{
+ struct cadillac_mdep foo;
+ tree name = NULL_TREE;
+
+ CWriteHeader (conn, LanguageElementMType, use);
+ foo.object_type = object_type;
+ if (current_class_type)
+ {
+ foo.linkage = ParentLinkage;
+ if (TREE_PRIVATE (type))
+ foo.access = PrivateAccess;
+ else if (TREE_PROTECTED (type))
+ foo.access = ProtectedAccess;
+ else
+ foo.access = PublicAccess;
+ }
+ else
+ {
+ foo.linkage = NoLinkage;
+ foo.access = PublicAccess;
+ }
+ switch (TREE_CODE (type))
+ {
+ case VAR_DECL:
+ case FIELD_DECL:
+ case TYPE_DECL:
+ case CONST_DECL:
+ case FUNCTION_DECL:
+ name = DECL_NAME (type);
+ break;
+
+ default:
+ my_friendly_abort (33);
+ }
+
+ foo.length = IDENTIFIER_LENGTH (name);
+
+ CWriteLanguageElem (conn, &foo, IDENTIFIER_POINTER (name));
+ CWriteRequestBuffer (conn);
+ cadillac_process_requests (conn);
+}
+
+/* Here's how we exit under cadillac. */
+
+static void
+exit_cadillac ()
+{
+ extern int errorcount;
+
+ Connection *conn = cadillacObj.conn;
+
+ if (flag_cadillac)
+ {
+ CCompilerMessage *req;
+
+ CWriteHeader (conn, FinishedMType,
+ errorcount ? 0 : CsObjectWritten | CsComplete);
+ /* Bye, bye! */
+ CWriteRequestBuffer (conn);
+
+ /* Block on read. */
+ while (! readable_p (cadillacObj.fd_input))
+ {
+ if (exiting)
+ my_friendly_abort (34);
+ exiting = 1;
+ }
+ exiting = 1;
+
+ req = CReadCompilerMessage (conn);
+ cadillac_process_request (&cadillacObj, req);
+ }
+}
+
+#else
+/* Stubs. */
+void init_cadillac () {}
+void cadillac_start () {}
+void cadillac_start_decl (decl)
+ tree decl;
+{}
+void
+cadillac_finish_decl (decl)
+ tree decl;
+{}
+void
+cadillac_start_function (fndecl)
+ tree fndecl;
+{}
+void
+cadillac_finish_function (fndecl)
+ tree fndecl;
+{}
+void
+cadillac_finish_anon_union (decl)
+ tree decl;
+{}
+void
+cadillac_start_enum (type)
+ tree type;
+{}
+void
+cadillac_finish_enum (type)
+ tree type;
+{}
+void
+cadillac_start_struct (type)
+ tree type;
+{}
+void
+cadillac_finish_struct (type)
+ tree type;
+{}
+void
+cadillac_finish_exception (type)
+ tree type;
+{}
+void
+cadillac_push_class (type)
+ tree type;
+{}
+void
+cadillac_pop_class ()
+{}
+void
+cadillac_push_lang (name)
+ tree name;
+{}
+void
+cadillac_pop_lang ()
+{}
+void
+cadillac_note_source ()
+{}
+void
+cadillac_finish_stmt ()
+{}
+void
+cadillac_switch_source ()
+{}
+void
+cadillac_push_source ()
+{}
+void
+cadillac_pop_source ()
+{}
+#endif
diff --git a/gnu/usr.bin/cc/cc1plus/errfn.c b/gnu/usr.bin/cc/cc1plus/errfn.c
new file mode 100644
index 0000000..1b345fd
--- /dev/null
+++ b/gnu/usr.bin/cc/cc1plus/errfn.c
@@ -0,0 +1,217 @@
+/* Provide a call-back mechanism for handling error output.
+ Copyright (C) 1993 Free Software Foundation, Inc.
+ Contributed by Jason Merrill (jason@cygnus.com)
+
+ This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+#include "config.h"
+#include "tree.h"
+#include <ctype.h>
+
+/* cp_printer is the type of a function which converts an argument into
+ a string for digestion by printf. The cp_printer function should deal
+ with all memory management; the functions in this file will not free
+ the char*s returned. See error.c for an example use of this code. */
+
+typedef char* cp_printer PROTO((HOST_WIDE_INT, int));
+extern cp_printer * cp_printers[256];
+
+typedef void errorfn (); /* deliberately vague */
+
+extern char* cp_file_of PROTO((tree));
+extern int cp_line_of PROTO((tree));
+
+#define STRDUP(f) (ap = (char *) alloca (strlen (f) +1), strcpy (ap, (f)), ap)
+
+#define NARGS 3
+#define arglist a1, a2, a3
+#define arglist_dcl HOST_WIDE_INT a1, a2, a3;
+#define ARGSINIT args[0] = a1; args[1] = a2; args[2] = a3;
+#define ARGSLIST args[0], args[1], args[2]
+
+static void
+cp_thing (errfn, atarg1, format, arglist)
+ errorfn *errfn;
+ int atarg1;
+ char *format;
+ arglist_dcl
+{
+ char *fmt;
+ char *f;
+ char *ap;
+ int arg;
+ HOST_WIDE_INT atarg = atarg1 ? a1 : 0;
+ HOST_WIDE_INT args[NARGS];
+ ARGSINIT
+
+ fmt = STRDUP(format);
+
+ for (f = fmt, arg = 0; *f; ++f)
+ {
+ cp_printer * function;
+ int alternate;
+ int maybe_here;
+
+ /* ignore text */
+ if (*f != '%') continue;
+
+ ++f;
+
+ alternate = 0;
+ maybe_here = 0;
+
+ /* ignore most flags */
+ while (*f == ' ' || *f == '-' || *f == '+' || *f == '#')
+ {
+ if (*f == '+')
+ maybe_here = 1;
+ else if (*f == '#')
+ alternate = 1;
+ ++f;
+ }
+
+ /* ignore field width */
+ if (*f == '*')
+ {
+ ++f;
+ ++arg;
+ }
+ else
+ while (isdigit (*f))
+ ++f;
+
+ /* ignore precision */
+ if (*f == '.')
+ {
+ ++f;
+ if (*f == '*')
+ {
+ ++f;
+ ++arg;
+ }
+ else
+ while (isdigit (*f))
+ ++f;
+ }
+
+ /* ignore "long" */
+ if (*f == 'l')
+ ++f;
+
+ function = cp_printers[(int)*f];
+
+ if (function)
+ {
+ char *p;
+
+ if (arg >= NARGS) abort ();
+
+ if (maybe_here && atarg1)
+ atarg = args[arg];
+
+ /* Must use a temporary to avoid calling *function twice */
+ p = (*function) (args[arg], alternate);
+ args[arg] = (HOST_WIDE_INT) STRDUP(p);
+ *f = 's';
+ }
+
+ ++arg; /* Assume valid format string */
+
+ }
+
+ if (atarg)
+ {
+ char *file = cp_file_of ((tree) atarg);
+ int line = cp_line_of ((tree) atarg);
+ (*errfn) (file, line, fmt, ARGSLIST);
+ }
+ else
+ (*errfn) (fmt, ARGSLIST);
+
+}
+
+void
+cp_error (format, arglist)
+ char *format;
+ arglist_dcl
+{
+ extern errorfn error;
+ cp_thing (error, 0, format, arglist);
+}
+
+void
+cp_warning (format, arglist)
+ char *format;
+ arglist_dcl
+{
+ extern errorfn warning;
+ cp_thing (warning, 0, format, arglist);
+}
+
+void
+cp_pedwarn (format, arglist)
+ char *format;
+ arglist_dcl
+{
+ extern errorfn pedwarn;
+ cp_thing (pedwarn, 0, format, arglist);
+}
+
+void
+cp_compiler_error (format, arglist)
+ char *format;
+ arglist_dcl
+{
+ extern errorfn compiler_error;
+ cp_thing (compiler_error, 0, format, arglist);
+}
+
+void
+cp_sprintf (format, arglist)
+ char *format;
+ arglist_dcl
+{
+ extern errorfn sprintf;
+ cp_thing (sprintf, 0, format, arglist);
+}
+
+void
+cp_error_at (format, arglist)
+ char *format;
+ arglist_dcl
+{
+ extern errorfn error_with_file_and_line;
+ cp_thing (error_with_file_and_line, 1, format, arglist);
+}
+
+void
+cp_warning_at (format, arglist)
+ char *format;
+ arglist_dcl
+{
+ extern errorfn warning_with_file_and_line;
+ cp_thing (warning_with_file_and_line, 1, format, arglist);
+}
+
+void
+cp_pedwarn_at (format, arglist)
+ char *format;
+ arglist_dcl
+{
+ extern errorfn pedwarn_with_file_and_line;
+ cp_thing (pedwarn_with_file_and_line, 1, format, arglist);
+}
diff --git a/gnu/usr.bin/cc/cc1plus/error.c b/gnu/usr.bin/cc/cc1plus/error.c
new file mode 100644
index 0000000..f431f6a
--- /dev/null
+++ b/gnu/usr.bin/cc/cc1plus/error.c
@@ -0,0 +1,1404 @@
+/* Call-backs for C++ error reporting.
+ This code is non-reentrant.
+ Copyright (C) 1993 Free Software Foundation, Inc.
+
+ This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+#include "config.h"
+#include "tree.h"
+#include "cp-tree.h"
+#include "obstack.h"
+#include <ctype.h>
+
+typedef char* cp_printer ();
+
+#define A args_as_string
+#define C code_as_string
+#define D decl_as_string
+#define E expr_as_string
+#define L language_as_string
+#define O op_as_string
+#define P parm_as_string
+#define T type_as_string
+
+#define _ (cp_printer *) 0
+cp_printer * cp_printers[256] =
+{
+/*0 1 2 3 4 5 6 7 8 9 A B C D E F */
+ _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, /* 0x00 */
+ _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, /* 0x10 */
+ _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, /* 0x20 */
+ _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, /* 0x30 */
+ _, A, _, C, D, E, _, _, _, _, _, _, L, _, _, O, /* 0x40 */
+ P, _, _, _, T, _, _, _, _, _, _, _, _, _, _, _, /* 0x50 */
+ _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, /* 0x60 */
+ _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, /* 0x70 */
+};
+#undef C
+#undef D
+#undef E
+#undef L
+#undef O
+#undef P
+#undef T
+#undef _
+
+#define obstack_chunk_alloc xmalloc
+#define obstack_chunk_free free
+
+/* Obstack where we build text strings for overloading, etc. */
+static struct obstack scratch_obstack;
+static char *scratch_firstobj;
+
+# define OB_INIT() (scratch_firstobj ? (obstack_free (&scratch_obstack, scratch_firstobj), 0) : 0)
+# define OB_PUTC(C) (obstack_1grow (&scratch_obstack, (C)))
+# define OB_PUTC2(C1,C2) \
+ (obstack_1grow (&scratch_obstack, (C1)), obstack_1grow (&scratch_obstack, (C2)))
+# define OB_PUTS(S) (obstack_grow (&scratch_obstack, (S), sizeof (S) - 1))
+# define OB_PUTID(ID) \
+ (obstack_grow (&scratch_obstack, IDENTIFIER_POINTER (ID), \
+ IDENTIFIER_LENGTH (ID)))
+# define OB_PUTCP(S) (obstack_grow (&scratch_obstack, (S), strlen (S)))
+# define OB_FINISH() (obstack_1grow (&scratch_obstack, '\0'))
+# define OB_PUTI(CST) do { sprintf (digit_buffer, "%d", (CST)); \
+ OB_PUTCP (digit_buffer); } while (0)
+# define OB_UNPUT(N) obstack_blank (&scratch_obstack, - (N));
+
+# define NEXT_CODE(t) (TREE_CODE (TREE_TYPE (t)))
+
+static void dump_type (), dump_decl (), dump_function_decl ();
+static void dump_expr (), dump_unary_op (), dump_binary_op ();
+static void dump_aggr_type (), dump_type_prefix (), dump_type_suffix ();
+static void dump_function_name ();
+
+void
+init_error ()
+{
+ gcc_obstack_init (&scratch_obstack);
+ scratch_firstobj = (char *)obstack_alloc (&scratch_obstack, 0);
+}
+
+enum pad { none, before, after };
+
+static void
+dump_readonly_or_volatile (t, p)
+ tree t;
+ enum pad p;
+{
+ if (TYPE_READONLY (t) || TYPE_VOLATILE (t))
+ {
+ if (p == before) OB_PUTC (' ');
+ if (TYPE_READONLY (t))
+ OB_PUTS ("const");
+ if (TYPE_VOLATILE (t))
+ OB_PUTS ("volatile");
+ if (p == after) OB_PUTC (' ');
+ }
+}
+
+/* This must be large enough to hold any printed integer or floating-point
+ value. */
+static char digit_buffer[128];
+
+/* Dump into the obstack a human-readable equivalent of TYPE. */
+static void
+dump_type (t, v)
+ tree t;
+ int v; /* verbose? */
+{
+ if (t == NULL_TREE)
+ return;
+
+ if (TYPE_PTRMEMFUNC_P (t))
+ goto offset_type;
+
+ switch (TREE_CODE (t))
+ {
+ case ERROR_MARK:
+ OB_PUTS ("{error}");
+ break;
+
+ case UNKNOWN_TYPE:
+ OB_PUTS ("{unknown type}");
+ break;
+
+ case TREE_LIST:
+ /* i.e. function taking no arguments */
+ if (t != void_list_node)
+ {
+ dump_type (TREE_VALUE (t), v);
+ /* Can this happen other than for default arguments? */
+ if (TREE_PURPOSE (t) && v)
+ {
+ OB_PUTS (" = ");
+ dump_expr (TREE_PURPOSE (t));
+ }
+ if (TREE_CHAIN (t))
+ {
+ if (TREE_CHAIN (t) != void_list_node)
+ {
+ OB_PUTC2 (',', ' ');
+ dump_type (TREE_CHAIN (t), v);
+ }
+ }
+ else OB_PUTS (" ...");
+ }
+ break;
+
+ case IDENTIFIER_NODE:
+ OB_PUTID (t);
+ break;
+
+ case TREE_VEC:
+ dump_type (BINFO_TYPE (t), v);
+ break;
+
+ case RECORD_TYPE:
+ case UNION_TYPE:
+ case ENUMERAL_TYPE:
+ if (TYPE_LANG_SPECIFIC (t)
+ && (IS_SIGNATURE_POINTER (t) || IS_SIGNATURE_REFERENCE (t)))
+ {
+ if (TYPE_READONLY (t) | TYPE_VOLATILE (t))
+ dump_readonly_or_volatile (t);
+ dump_type (SIGNATURE_TYPE (t), v);
+ if (IS_SIGNATURE_POINTER (t))
+ OB_PUTC ('*');
+ else
+ OB_PUTC ('&');
+ }
+ else
+ dump_aggr_type (t, v);
+ break;
+
+ case TYPE_DECL:
+ dump_decl (t, v);
+ break;
+
+ case INTEGER_TYPE:
+ if (!TREE_UNSIGNED (TYPE_MAIN_VARIANT (t)) && TREE_UNSIGNED (t))
+ OB_PUTS ("unsigned ");
+ else if (TREE_UNSIGNED (TYPE_MAIN_VARIANT (t)) && !TREE_UNSIGNED (t))
+ OB_PUTS ("signed ");
+
+ /* fall through. */
+ case REAL_TYPE:
+ case VOID_TYPE:
+ case BOOLEAN_TYPE:
+ dump_readonly_or_volatile (t, after);
+ OB_PUTID (TYPE_IDENTIFIER (t));
+ break;
+
+ case TEMPLATE_TYPE_PARM:
+ OB_PUTID (TYPE_IDENTIFIER (t));
+ break;
+
+ case UNINSTANTIATED_P_TYPE:
+ OB_PUTID (DECL_NAME (UPT_TEMPLATE (t)));
+ OB_PUTS ("<...>");
+ break;
+
+ /* This is not always necessary for pointers and such, but doing this
+ reduces code size. */
+ case ARRAY_TYPE:
+ case POINTER_TYPE:
+ case REFERENCE_TYPE:
+ case OFFSET_TYPE:
+ offset_type:
+ case FUNCTION_TYPE:
+ case METHOD_TYPE:
+ dump_type_prefix (t, v);
+ dump_type_suffix (t, v);
+ break;
+
+ default:
+ sorry ("`%s' not supported by dump_type",
+ tree_code_name[(int) TREE_CODE (t)]);
+ }
+}
+
+static char *
+aggr_variety (t)
+ tree t;
+{
+ if (TREE_CODE (t) == ENUMERAL_TYPE)
+ return "enum";
+ else if (TREE_CODE (t) == UNION_TYPE)
+ return "union";
+ else if (TYPE_LANG_SPECIFIC (t) && CLASSTYPE_DECLARED_CLASS (t))
+ return "class";
+ else if (TYPE_LANG_SPECIFIC (t) && IS_SIGNATURE (t))
+ return "signature";
+ else
+ return "struct";
+}
+
+/* Print out a class declaration, in the form `class foo'. */
+static void
+dump_aggr_type (t, v)
+ tree t;
+ int v; /* verbose? */
+{
+ tree name;
+ char *variety = aggr_variety (t);
+
+ dump_readonly_or_volatile (t, after);
+
+ if (v > 0)
+ {
+ OB_PUTCP (variety);
+ OB_PUTC (' ');
+ }
+
+ name = TYPE_NAME (t);
+
+ if (DECL_CONTEXT (name))
+ {
+ /* FUNCTION_DECL or RECORD_TYPE */
+ dump_decl (DECL_CONTEXT (name), 0);
+ OB_PUTC2 (':', ':');
+ }
+
+ /* kludge around wierd behavior on g++.brendan/line1.C */
+ if (TREE_CODE (name) != IDENTIFIER_NODE)
+ name = DECL_NAME (name);
+
+ if (ANON_AGGRNAME_P (name))
+ {
+ OB_PUTS ("{anonymous");
+ if (!v)
+ {
+ OB_PUTC (' ');
+ OB_PUTCP (variety);
+ }
+ OB_PUTC ('}');
+ }
+ else
+ OB_PUTID (name);
+}
+
+/* Dump into the obstack the initial part of the output for a given type.
+ This is necessary when dealing with things like functions returning
+ functions. Examples:
+
+ return type of `int (* fee ())()': pointer -> function -> int. Both
+ pointer (and reference and offset) and function (and member) types must
+ deal with prefix and suffix.
+
+ Arrays must also do this for DECL nodes, like int a[], and for things like
+ int *[]&. */
+
+static void
+dump_type_prefix (t, v)
+ tree t;
+ int v; /* verbosity */
+{
+ if (TYPE_PTRMEMFUNC_P (t))
+ {
+ t = TYPE_PTRMEMFUNC_FN_TYPE (t);
+ goto offset_type;
+ }
+
+ switch (TREE_CODE (t))
+ {
+ case POINTER_TYPE:
+ {
+ tree sub = TREE_TYPE (t);
+
+ dump_type_prefix (sub, v);
+ /* A tree for a member pointer looks like pointer to offset,
+ so let the OFFSET_TYPE case handle it. */
+ if (TREE_CODE (sub) != OFFSET_TYPE)
+ {
+ switch (TREE_CODE (sub))
+ {
+ /* We don't want int ( *)() */
+ case FUNCTION_TYPE:
+ case METHOD_TYPE:
+ break;
+
+ case ARRAY_TYPE:
+ OB_PUTC2 (' ', '(');
+ break;
+
+ case POINTER_TYPE:
+ /* We don't want "char * *" */
+ if (! (TYPE_READONLY (sub) || TYPE_VOLATILE (sub)))
+ break;
+ /* But we do want "char *const *" */
+
+ default:
+ OB_PUTC (' ');
+ }
+ OB_PUTC ('*');
+ dump_readonly_or_volatile (t, none);
+ }
+ }
+ break;
+
+ case REFERENCE_TYPE:
+ {
+ tree sub = TREE_TYPE (t);
+ dump_type_prefix (sub, v);
+
+ switch (TREE_CODE (sub))
+ {
+ case ARRAY_TYPE:
+ OB_PUTC2 (' ', '(');
+ break;
+
+ case POINTER_TYPE:
+ /* We don't want "char * &" */
+ if (! (TYPE_READONLY (sub) || TYPE_VOLATILE (sub)))
+ break;
+ /* But we do want "char *const &" */
+
+ default:
+ OB_PUTC (' ');
+ }
+ }
+ OB_PUTC ('&');
+ dump_readonly_or_volatile (t, none);
+ break;
+
+ case OFFSET_TYPE:
+ offset_type:
+ dump_type_prefix (TREE_TYPE (t), v);
+ if (TREE_CODE (t) == OFFSET_TYPE) /* pmfs deal with this in d_t_p */
+ {
+ OB_PUTC (' ');
+ dump_type (TYPE_OFFSET_BASETYPE (t), 0);
+ OB_PUTC2 (':', ':');
+ }
+ OB_PUTC ('*');
+ dump_readonly_or_volatile (t, none);
+ break;
+
+ /* Can only be reached through function pointer -- this would not be
+ correct if FUNCTION_DECLs used it. */
+ case FUNCTION_TYPE:
+ dump_type_prefix (TREE_TYPE (t), v);
+ OB_PUTC2 (' ', '(');
+ break;
+
+ case METHOD_TYPE:
+ dump_type_prefix (TREE_TYPE (t), v);
+ OB_PUTC2 (' ', '(');
+ dump_aggr_type (TYPE_METHOD_BASETYPE (t), 0);
+ OB_PUTC2 (':', ':');
+ break;
+
+ case ARRAY_TYPE:
+ dump_type_prefix (TREE_TYPE (t), v);
+ break;
+
+ case ENUMERAL_TYPE:
+ case ERROR_MARK:
+ case IDENTIFIER_NODE:
+ case INTEGER_TYPE:
+ case BOOLEAN_TYPE:
+ case REAL_TYPE:
+ case RECORD_TYPE:
+ case TEMPLATE_TYPE_PARM:
+ case TREE_LIST:
+ case TYPE_DECL:
+ case TREE_VEC:
+ case UNINSTANTIATED_P_TYPE:
+ case UNION_TYPE:
+ case UNKNOWN_TYPE:
+ case VOID_TYPE:
+ dump_type (t, v);
+ break;
+
+ default:
+ sorry ("`%s' not supported by dump_type_prefix",
+ tree_code_name[(int) TREE_CODE (t)]);
+ }
+}
+
+static void
+dump_type_suffix (t, v)
+ tree t;
+ int v; /* verbose? */
+{
+ if (TYPE_PTRMEMFUNC_P (t))
+ t = TYPE_PTRMEMFUNC_FN_TYPE (t);
+
+ switch (TREE_CODE (t))
+ {
+ case POINTER_TYPE:
+ case REFERENCE_TYPE:
+ case OFFSET_TYPE:
+ if (TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE)
+ OB_PUTC (')');
+ dump_type_suffix (TREE_TYPE (t), v);
+ break;
+
+ /* Can only be reached through function pointer */
+ case FUNCTION_TYPE:
+ case METHOD_TYPE:
+ {
+ tree arg;
+ OB_PUTC2 (')', '(');
+ arg = TYPE_ARG_TYPES (t);
+ if (TREE_CODE (t) == METHOD_TYPE)
+ arg = TREE_CHAIN (arg);
+
+ if (arg)
+ dump_type (arg, v);
+ else
+ OB_PUTS ("...");
+ OB_PUTC (')');
+ if (TREE_CODE (t) == METHOD_TYPE)
+ dump_readonly_or_volatile
+ (TREE_TYPE (TREE_VALUE (TYPE_ARG_TYPES (t))), before);
+ dump_type_suffix (TREE_TYPE (t), v);
+ break;
+ }
+
+ case ARRAY_TYPE:
+ OB_PUTC ('[');
+ if (TYPE_DOMAIN (t))
+ OB_PUTI (TREE_INT_CST_LOW (TYPE_MAX_VALUE (TYPE_DOMAIN (t))) + 1);
+ OB_PUTC (']');
+ dump_type_suffix (TREE_TYPE (t), v);
+ break;
+
+ case ENUMERAL_TYPE:
+ case ERROR_MARK:
+ case IDENTIFIER_NODE:
+ case INTEGER_TYPE:
+ case BOOLEAN_TYPE:
+ case REAL_TYPE:
+ case RECORD_TYPE:
+ case TEMPLATE_TYPE_PARM:
+ case TREE_LIST:
+ case TYPE_DECL:
+ case TREE_VEC:
+ case UNINSTANTIATED_P_TYPE:
+ case UNION_TYPE:
+ case UNKNOWN_TYPE:
+ case VOID_TYPE:
+ break;
+
+ default:
+ sorry ("`%s' not supported by dump_type_suffix",
+ tree_code_name[(int) TREE_CODE (t)]);
+ }
+}
+
+/* Return a function declaration which corresponds to the IDENTIFIER_NODE
+ argument. */
+tree
+ident_fndecl (t)
+ tree t;
+{
+ tree n = lookup_name (t, 0);
+
+ if (TREE_CODE (n) == FUNCTION_DECL)
+ return n;
+ else if (TREE_CODE (n) == TREE_LIST
+ && TREE_CODE (TREE_VALUE (n)) == FUNCTION_DECL)
+ return TREE_VALUE (n);
+
+ my_friendly_abort (66);
+ return NULL_TREE;
+}
+
+#ifndef NO_DOLLAR_IN_LABEL
+# define GLOBAL_THING "_GLOBAL_$"
+#else
+# ifndef NO_DOT_IN_LABEL
+# define GLOBAL_THING "_GLOBAL_."
+# else
+# define GLOBAL_THING "_GLOBAL__"
+# endif
+#endif
+
+#define GLOBAL_IORD_P(NODE) \
+ !strncmp(IDENTIFIER_POINTER(NODE),GLOBAL_THING,sizeof(GLOBAL_THING)-1)
+
+void
+dump_global_iord (t)
+ tree t;
+{
+ char *name = IDENTIFIER_POINTER (t);
+
+ OB_PUTS ("(static ");
+ if (name [sizeof (GLOBAL_THING) - 1] == 'I')
+ OB_PUTS ("initializers");
+ else if (name [sizeof (GLOBAL_THING) - 1] == 'D')
+ OB_PUTS ("destructors");
+ else
+ my_friendly_abort (352);
+
+ OB_PUTS (" for ");
+ OB_PUTCP (input_filename);
+ OB_PUTC (')');
+}
+
+static void
+dump_decl (t, v)
+ tree t;
+ int v; /* verbosity */
+{
+ if (t == NULL_TREE)
+ return;
+
+ switch (TREE_CODE (t))
+ {
+ case ERROR_MARK:
+ OB_PUTS (" /* decl error */ ");
+ break;
+
+ case TYPE_DECL:
+ {
+ /* Don't say 'typedef class A' */
+ tree type = TREE_TYPE (t);
+ if (IS_AGGR_TYPE (type) && ! TYPE_PTRMEMFUNC_P (type)
+ && type == TYPE_MAIN_VARIANT (type))
+ {
+ dump_type (type, v);
+ break;
+ }
+ }
+ if (v > 0)
+ OB_PUTS ("typedef ");
+ goto general;
+ break;
+
+ case VAR_DECL:
+ if (VTABLE_NAME_P (DECL_NAME (t)))
+ {
+ OB_PUTS ("vtable for ");
+ dump_type (DECL_CONTEXT (t), v);
+ break;
+ }
+ /* else fall through */
+ case FIELD_DECL:
+ case PARM_DECL:
+ general:
+ if (v > 0)
+ {
+ dump_type_prefix (TREE_TYPE (t), v);
+ OB_PUTC (' ');
+ }
+ /* DECL_CLASS_CONTEXT isn't being set in some cases. Hmm... */
+ if (DECL_CONTEXT (t)
+ && TREE_CODE_CLASS (TREE_CODE (DECL_CONTEXT (t))) == 't')
+ {
+ dump_type (DECL_CONTEXT (t), 0);
+ OB_PUTC2 (':', ':');
+ }
+ if (DECL_NAME (t))
+ dump_decl (DECL_NAME (t), v);
+ else
+ OB_PUTS ("{anon}");
+ if (v > 0)
+ dump_type_suffix (TREE_TYPE (t), v);
+ break;
+
+ case ARRAY_REF:
+ dump_decl (TREE_OPERAND (t, 0), v);
+ OB_PUTC ('[');
+ dump_decl (TREE_OPERAND (t, 1), v);
+ OB_PUTC (']');
+ break;
+
+ /* So that we can do dump_decl in dump_aggr_type and have it work for
+ both class and function scope. */
+ case RECORD_TYPE:
+ case UNION_TYPE:
+ case ENUMERAL_TYPE:
+ dump_type (t, v);
+ break;
+
+ case TYPE_EXPR:
+ my_friendly_abort (69);
+ break;
+
+ /* These special cases are duplicated here so that other functions
+ can feed identifiers to cp_error and get them demangled properly. */
+ case IDENTIFIER_NODE:
+ if (DESTRUCTOR_NAME_P (t))
+ {
+ OB_PUTC ('~');
+ dump_decl (DECL_NAME (ident_fndecl (t)), 0);
+ }
+ else if (IDENTIFIER_TYPENAME_P (t))
+ {
+ OB_PUTS ("operator ");
+ /* Not exactly IDENTIFIER_TYPE_VALUE. */
+ dump_type (TREE_TYPE (t), 0);
+ break;
+ }
+ else if (IDENTIFIER_OPNAME_P (t))
+ {
+ char *name_string = operator_name_string (t);
+ OB_PUTS ("operator ");
+ OB_PUTCP (name_string);
+ }
+ else
+ OB_PUTID (t);
+ break;
+
+ case FUNCTION_DECL:
+ if (GLOBAL_IORD_P (DECL_ASSEMBLER_NAME (t)))
+ dump_global_iord (DECL_ASSEMBLER_NAME (t));
+ else
+ dump_function_decl (t, v);
+ break;
+
+ case TEMPLATE_DECL:
+ {
+ tree args = DECL_TEMPLATE_PARMS (t);
+ int i, len = TREE_VEC_LENGTH (args);
+ OB_PUTS ("template <");
+ for (i = 0; i < len; i++)
+ {
+ tree arg = TREE_VEC_ELT (args, i);
+ if (TREE_CODE (arg) == IDENTIFIER_NODE)
+ {
+ OB_PUTS ("class ");
+ OB_PUTID (arg);
+ }
+ else
+ dump_decl (arg, 1);
+ OB_PUTC2 (',', ' ');
+ }
+ OB_UNPUT (2);
+ OB_PUTC2 ('>', ' ');
+
+ if (DECL_TEMPLATE_IS_CLASS (t))
+ {
+ OB_PUTS ("class ");
+ OB_PUTID (DECL_NAME (t));
+ }
+ else switch (NEXT_CODE (t))
+ {
+ case METHOD_TYPE:
+ case FUNCTION_TYPE:
+ dump_function_decl (t, v);
+ break;
+
+ default:
+ my_friendly_abort (353);
+ }
+ }
+ break;
+
+ case LABEL_DECL:
+ OB_PUTID (DECL_NAME (t));
+ break;
+
+ case CONST_DECL:
+ if (NEXT_CODE (t) == ENUMERAL_TYPE)
+ goto general;
+ else
+ dump_expr (DECL_INITIAL (t), 0);
+ break;
+
+ default:
+ sorry ("`%s' not supported by dump_decl",
+ tree_code_name[(int) TREE_CODE (t)]);
+ }
+}
+
+/* Pretty printing for announce_function. T is the declaration of the
+ function we are interested in seeing. V is non-zero if we should print
+ the type that this function returns. */
+
+static void
+dump_function_decl (t, v)
+ tree t;
+ int v;
+{
+ tree name = DECL_ASSEMBLER_NAME (t);
+ tree fntype = TREE_TYPE (t);
+ tree parmtypes = TYPE_ARG_TYPES (fntype);
+ tree cname = NULL_TREE;
+
+ /* Friends have DECL_CLASS_CONTEXT set, but not DECL_CONTEXT. */
+ if (DECL_CONTEXT (t))
+ cname = DECL_CLASS_CONTEXT (t);
+ /* this is for partially instantiated template methods */
+ else if (TREE_CODE (fntype) == METHOD_TYPE)
+ cname = TREE_TYPE (TREE_VALUE (parmtypes));
+
+ v = (v > 0);
+
+ if (v)
+ {
+ if (DECL_STATIC_FUNCTION_P (t))
+ OB_PUTS ("static ");
+
+ if (! IDENTIFIER_TYPENAME_P (name)
+ && ! DECL_CONSTRUCTOR_P (t)
+ && ! DESTRUCTOR_NAME_P (name))
+ {
+ dump_type_prefix (TREE_TYPE (fntype), 1);
+ OB_PUTC (' ');
+ }
+ }
+
+ if (cname)
+ {
+ dump_type (cname, 0);
+ OB_PUTC2 (':', ':');
+ if (TREE_CODE (fntype) == METHOD_TYPE && parmtypes)
+ parmtypes = TREE_CHAIN (parmtypes);
+ if (DECL_CONSTRUCTOR_FOR_VBASE_P (t))
+ /* Skip past "in_charge" identifier. */
+ parmtypes = TREE_CHAIN (parmtypes);
+ }
+
+ if (DESTRUCTOR_NAME_P (name))
+ parmtypes = TREE_CHAIN (parmtypes);
+
+ dump_function_name (t);
+
+ OB_PUTC ('(');
+
+ if (parmtypes)
+ dump_type (parmtypes, v);
+ else
+ OB_PUTS ("...");
+
+ OB_PUTC (')');
+
+ if (v && ! IDENTIFIER_TYPENAME_P (name))
+ dump_type_suffix (TREE_TYPE (fntype), 1);
+
+ if (TREE_CODE (fntype) == METHOD_TYPE)
+ {
+ if (IS_SIGNATURE (cname))
+ /* We look at the type pointed to by the `optr' field of `this.' */
+ dump_readonly_or_volatile
+ (TREE_TYPE (TREE_TYPE (TYPE_FIELDS (TREE_VALUE (TYPE_ARG_TYPES (fntype))))), before);
+ else
+ dump_readonly_or_volatile
+ (TREE_TYPE (TREE_VALUE (TYPE_ARG_TYPES (fntype))), before);
+ }
+}
+
+/* Handle the function name for a FUNCTION_DECL node, grokking operators
+ and destructors properly. */
+static void
+dump_function_name (t)
+ tree t;
+{
+ tree name = DECL_NAME (t);
+
+ /* There ought to be a better way to find out whether or not something is
+ a destructor. */
+ if (DESTRUCTOR_NAME_P (DECL_ASSEMBLER_NAME (t)))
+ {
+ OB_PUTC ('~');
+ dump_decl (name, 0);
+ }
+ else if (IDENTIFIER_TYPENAME_P (name))
+ {
+ /* This cannot use the hack that the operator's return
+ type is stashed off of its name because it may be
+ used for error reporting. In the case of conflicting
+ declarations, both will have the same name, yet
+ the types will be different, hence the TREE_TYPE field
+ of the first name will be clobbered by the second. */
+ OB_PUTS ("operator ");
+ dump_type (TREE_TYPE (TREE_TYPE (t)), 0);
+ }
+ else if (IDENTIFIER_OPNAME_P (name))
+ {
+ char *name_string = operator_name_string (name);
+ OB_PUTS ("operator ");
+ OB_PUTCP (name_string);
+ }
+ else
+ dump_decl (name, 0);
+}
+
+static void
+dump_char (c)
+ char c;
+{
+ switch (c)
+ {
+ case TARGET_NEWLINE:
+ OB_PUTS ("\\n");
+ break;
+ case TARGET_TAB:
+ OB_PUTS ("\\t");
+ break;
+ case TARGET_VT:
+ OB_PUTS ("\\v");
+ break;
+ case TARGET_BS:
+ OB_PUTS ("\\b");
+ break;
+ case TARGET_CR:
+ OB_PUTS ("\\r");
+ break;
+ case TARGET_FF:
+ OB_PUTS ("\\f");
+ break;
+ case TARGET_BELL:
+ OB_PUTS ("\\a");
+ break;
+ case '\\':
+ OB_PUTS ("\\\\");
+ break;
+ case '\'':
+ OB_PUTS ("\\'");
+ break;
+ case '\"':
+ OB_PUTS ("\\\"");
+ break;
+ default:
+ if (isprint (c))
+ OB_PUTC (c);
+ else
+ {
+ sprintf (digit_buffer, "\\%03o", (int) c);
+ OB_PUTCP (digit_buffer);
+ }
+ }
+}
+
+/* Print out a list of initializers (subr of dump_expr) */
+static void
+dump_expr_list (l)
+ tree l;
+{
+ while (l)
+ {
+ dump_expr (TREE_VALUE (l), 0);
+ if (TREE_CHAIN (l))
+ OB_PUTC2 (',', ' ');
+ l = TREE_CHAIN (l);
+ }
+}
+
+/* Print out an expression */
+static void
+dump_expr (t, nop)
+ tree t;
+ int nop; /* suppress parens */
+{
+ switch (TREE_CODE (t))
+ {
+ case VAR_DECL:
+ case PARM_DECL:
+ case FIELD_DECL:
+ case CONST_DECL:
+ case FUNCTION_DECL:
+ dump_decl (t, -1);
+ break;
+
+ case INTEGER_CST:
+ {
+ tree type = TREE_TYPE (t);
+ my_friendly_assert (type != 0, 81);
+
+ /* If it's an enum, output its tag, rather than its value. */
+ if (TREE_CODE (type) == ENUMERAL_TYPE)
+ {
+ char *p = enum_name_string (t, type);
+ OB_PUTCP (p);
+ }
+ else if (type == char_type_node
+ || type == signed_char_type_node
+ || type == unsigned_char_type_node)
+ {
+ OB_PUTC ('\'');
+ dump_char (TREE_INT_CST_LOW (t));
+ OB_PUTC ('\'');
+ }
+ else if (TREE_INT_CST_HIGH (t)
+ != (TREE_INT_CST_LOW (t) >> (HOST_BITS_PER_WIDE_INT - 1)))
+ {
+ tree val = t;
+ if (TREE_INT_CST_HIGH (val) < 0)
+ {
+ OB_PUTC ('-');
+ val = build_int_2 (~TREE_INT_CST_LOW (val),
+ -TREE_INT_CST_HIGH (val));
+ }
+ /* Would "%x%0*x" or "%x%*0x" get zero-padding on all
+ systems? */
+ {
+ static char format[10]; /* "%x%09999x\0" */
+ if (!format[0])
+ sprintf (format, "%%x%%0%dx", HOST_BITS_PER_INT / 4);
+ sprintf (digit_buffer, format, TREE_INT_CST_HIGH (val),
+ TREE_INT_CST_LOW (val));
+ OB_PUTCP (digit_buffer);
+ }
+ }
+ else
+ OB_PUTI (TREE_INT_CST_LOW (t));
+ }
+ break;
+
+ case REAL_CST:
+#ifndef REAL_IS_NOT_DOUBLE
+ sprintf (digit_buffer, "%g", TREE_REAL_CST (t));
+#else
+ {
+ unsigned char *p = (unsigned char *) &TREE_REAL_CST (t);
+ int i;
+ strcpy (digit_buffer, "0x");
+ for (i = 0; i < sizeof TREE_REAL_CST (t); i++)
+ sprintf (digit_buffer + 2 + 2*i, "%02x", *p++);
+ }
+#endif
+ OB_PUTCP (digit_buffer);
+ break;
+
+ case STRING_CST:
+ {
+ char *p = TREE_STRING_POINTER (t);
+ int len = TREE_STRING_LENGTH (t) - 1;
+ int i;
+
+ OB_PUTC ('\"');
+ for (i = 0; i < len; i++)
+ dump_char (p[i]);
+ OB_PUTC ('\"');
+ }
+ break;
+
+ case COMPOUND_EXPR:
+ dump_binary_op (",", t);
+ break;
+
+ case COND_EXPR:
+ OB_PUTC ('(');
+ dump_expr (TREE_OPERAND (t, 0), 0);
+ OB_PUTS (" ? ");
+ dump_expr (TREE_OPERAND (t, 1), 0);
+ OB_PUTS (" : ");
+ dump_expr (TREE_OPERAND (t, 2), 0);
+ OB_PUTC (')');
+ break;
+
+ case SAVE_EXPR:
+ if (TREE_HAS_CONSTRUCTOR (t))
+ {
+ OB_PUTS ("new ");
+ dump_type (TREE_TYPE (TREE_TYPE (t)), 0);
+ PARM_DECL_EXPR (t) = 1;
+ }
+ else
+ {
+ sorry ("operand of SAVE_EXPR not understood");
+ goto error;
+ }
+ break;
+
+ case NEW_EXPR:
+ OB_PUTID (TYPE_IDENTIFIER (TREE_TYPE (t)));
+ OB_PUTC ('(');
+ dump_expr_list (TREE_CHAIN (TREE_OPERAND (t, 1)));
+ OB_PUTC (')');
+ break;
+
+ case CALL_EXPR:
+ {
+ tree fn = TREE_OPERAND (t, 0);
+ tree args = TREE_OPERAND (t, 1);
+
+ if (TREE_CODE (fn) == ADDR_EXPR)
+ fn = TREE_OPERAND (fn, 0);
+
+ if (NEXT_CODE (fn) == METHOD_TYPE)
+ {
+ tree ob = TREE_VALUE (args);
+ if (TREE_CODE (ob) == ADDR_EXPR)
+ {
+ dump_expr (TREE_OPERAND (ob, 0), 0);
+ OB_PUTC ('.');
+ }
+ else if (TREE_CODE (ob) != PARM_DECL
+ || strcmp (IDENTIFIER_POINTER (DECL_NAME (ob)), "this"))
+ {
+ dump_expr (ob, 0);
+ OB_PUTC2 ('-', '>');
+ }
+ args = TREE_CHAIN (args);
+ }
+ dump_expr (fn, 0);
+ OB_PUTC('(');
+ dump_expr_list (args);
+ OB_PUTC (')');
+ }
+ break;
+
+ case WITH_CLEANUP_EXPR:
+ /* Note that this only works for G++ cleanups. If somebody
+ builds a general cleanup, there's no way to represent it. */
+ dump_expr (TREE_OPERAND (t, 0), 0);
+ break;
+
+ case TARGET_EXPR:
+ /* Note that this only works for G++ target exprs. If somebody
+ builds a general TARGET_EXPR, there's no way to represent that
+ it initializes anything other that the parameter slot for the
+ default argument. Note we may have cleared out the first
+ operand in expand_expr, so don't go killing ourselves. */
+ if (TREE_OPERAND (t, 1))
+ dump_expr (TREE_OPERAND (t, 1), 0);
+ break;
+
+ case MODIFY_EXPR:
+ case PLUS_EXPR:
+ case MINUS_EXPR:
+ case MULT_EXPR:
+ case TRUNC_DIV_EXPR:
+ case TRUNC_MOD_EXPR:
+ case MIN_EXPR:
+ case MAX_EXPR:
+ case LSHIFT_EXPR:
+ case RSHIFT_EXPR:
+ case BIT_IOR_EXPR:
+ case BIT_XOR_EXPR:
+ case BIT_AND_EXPR:
+ case BIT_ANDTC_EXPR:
+ case TRUTH_ANDIF_EXPR:
+ case TRUTH_ORIF_EXPR:
+ case LT_EXPR:
+ case LE_EXPR:
+ case GT_EXPR:
+ case GE_EXPR:
+ case EQ_EXPR:
+ case NE_EXPR:
+ dump_binary_op (opname_tab[(int) TREE_CODE (t)], t);
+ break;
+
+ case CEIL_DIV_EXPR:
+ case FLOOR_DIV_EXPR:
+ case ROUND_DIV_EXPR:
+ dump_binary_op ("/", t);
+ break;
+
+ case CEIL_MOD_EXPR:
+ case FLOOR_MOD_EXPR:
+ case ROUND_MOD_EXPR:
+ dump_binary_op ("%", t);
+ break;
+
+ case COMPONENT_REF:
+ {
+ tree ob = TREE_OPERAND (t, 0);
+ if (TREE_CODE (ob) == INDIRECT_REF)
+ {
+ ob = TREE_OPERAND (ob, 0);
+ if (TREE_CODE (ob) != PARM_DECL
+ || strcmp (IDENTIFIER_POINTER (DECL_NAME (ob)), "this"))
+ {
+ dump_expr (ob, 0);
+ OB_PUTC2 ('-', '>');
+ }
+ }
+ else
+ {
+ dump_expr (ob, 0);
+ OB_PUTC ('.');
+ }
+ dump_expr (TREE_OPERAND (t, 1), 1);
+ }
+ break;
+
+ case CONVERT_EXPR:
+ dump_unary_op ("+", t, nop);
+ break;
+
+ case ADDR_EXPR:
+ if (TREE_CODE (TREE_OPERAND (t, 0)) == FUNCTION_DECL
+ || TREE_CODE (TREE_OPERAND (t, 0)) == STRING_CST)
+ dump_expr (TREE_OPERAND (t, 0), 0);
+ else
+ dump_unary_op ("&", t, nop);
+ break;
+
+ case INDIRECT_REF:
+ if (TREE_HAS_CONSTRUCTOR (t))
+ {
+ t = TREE_OPERAND (t, 0);
+ my_friendly_assert (TREE_CODE (t) == CALL_EXPR, 237);
+ dump_expr (TREE_OPERAND (t, 0), 0);
+ OB_PUTC ('(');
+ dump_expr_list (TREE_CHAIN (TREE_OPERAND (t, 1)));
+ OB_PUTC (')');
+ }
+ else
+ {
+ if (NEXT_CODE (TREE_OPERAND (t, 0)) == REFERENCE_TYPE)
+ dump_expr (TREE_OPERAND (t, 0), nop);
+ else
+ dump_unary_op ("*", t, nop);
+ }
+ break;
+
+ case NEGATE_EXPR:
+ case BIT_NOT_EXPR:
+ case TRUTH_NOT_EXPR:
+ case PREDECREMENT_EXPR:
+ case PREINCREMENT_EXPR:
+ dump_unary_op (opname_tab [(int)TREE_CODE (t)], t, nop);
+ break;
+
+ case POSTDECREMENT_EXPR:
+ case POSTINCREMENT_EXPR:
+ OB_PUTC ('(');
+ dump_expr (TREE_OPERAND (t, 0), 0);
+ OB_PUTCP (opname_tab[(int)TREE_CODE (t)]);
+ OB_PUTC (')');
+ break;
+
+ case NON_LVALUE_EXPR:
+ /* FIXME: This is a KLUDGE workaround for a parsing problem. There
+ should be another level of INDIRECT_REF so that I don't have to do
+ this. */
+ if (NEXT_CODE (t) == POINTER_TYPE)
+ {
+ tree next = TREE_TYPE (TREE_TYPE (t));
+
+ while (TREE_CODE (next) == POINTER_TYPE)
+ next = TREE_TYPE (next);
+
+ if (TREE_CODE (next) == FUNCTION_TYPE)
+ {
+ if (!nop) OB_PUTC ('(');
+ OB_PUTC ('*');
+ dump_expr (TREE_OPERAND (t, 0), 1);
+ if (!nop) OB_PUTC (')');
+ break;
+ }
+ /* else FALLTHRU */
+ }
+ dump_expr (TREE_OPERAND (t, 0), 0);
+ break;
+
+ case NOP_EXPR:
+ dump_expr (TREE_OPERAND (t, 0), nop);
+ break;
+
+ case CONSTRUCTOR:
+ OB_PUTC ('{');
+ dump_expr_list (CONSTRUCTOR_ELTS (t), 0);
+ OB_PUTC ('}');
+ break;
+
+ case OFFSET_REF:
+ {
+ tree ob = TREE_OPERAND (t, 0);
+ if (TREE_CODE (ob) == NOP_EXPR
+ && TREE_OPERAND (ob, 0) == error_mark_node
+ && TREE_CODE (TREE_OPERAND (t, 1)) == FUNCTION_DECL)
+ /* A::f */
+ dump_expr (TREE_OPERAND (t, 1), 0);
+ else
+ {
+ sorry ("operand of OFFSET_REF not understood");
+ goto error;
+ }
+ break;
+ }
+
+ /* This list is incomplete, but should suffice for now.
+ It is very important that `sorry' does not call
+ `report_error_function'. That could cause an infinite loop. */
+ default:
+ sorry ("`%s' not supported by dump_expr",
+ tree_code_name[(int) TREE_CODE (t)]);
+
+ /* fall through to ERROR_MARK... */
+ case ERROR_MARK:
+ error:
+ OB_PUTCP ("{error}");
+ break;
+ }
+}
+
+static void
+dump_binary_op (opstring, t)
+ char *opstring;
+ tree t;
+{
+ OB_PUTC ('(');
+ dump_expr (TREE_OPERAND (t, 0), 1);
+ OB_PUTC (' ');
+ OB_PUTCP (opstring);
+ OB_PUTC (' ');
+ dump_expr (TREE_OPERAND (t, 1), 1);
+ OB_PUTC (')');
+}
+
+static void
+dump_unary_op (opstring, t, nop)
+ char *opstring;
+ tree t;
+ int nop;
+{
+ if (!nop) OB_PUTC ('(');
+ OB_PUTCP (opstring);
+ dump_expr (TREE_OPERAND (t, 0), 1);
+ if (!nop) OB_PUTC (')');
+}
+
+char *
+fndecl_as_string (cname, fndecl, print_ret_type_p)
+ tree cname, fndecl;
+ int print_ret_type_p;
+{
+ return decl_as_string (fndecl, print_ret_type_p);
+}
+
+/* Same, but handtype a _TYPE.
+ Called from convert_to_reference, mangle_class_name_for_template,
+ build_unary_op, and GNU_xref_decl. */
+char *
+type_as_string (typ, v)
+ tree typ;
+ int v;
+{
+ OB_INIT ();
+
+ dump_type (typ, v);
+
+ OB_FINISH ();
+
+ return (char *)obstack_base (&scratch_obstack);
+}
+
+char *
+expr_as_string (decl, v)
+ tree decl;
+ int v;
+{
+ OB_INIT ();
+
+ dump_expr (decl, 1);
+
+ OB_FINISH ();
+
+ return (char *)obstack_base (&scratch_obstack);
+}
+
+/* A cross between type_as_string and fndecl_as_string.
+ Only called from substitute_nice_name. */
+char *
+decl_as_string (decl, v)
+ tree decl;
+ int v;
+{
+ OB_INIT ();
+
+ dump_decl (decl, v);
+
+ OB_FINISH ();
+
+ return (char *)obstack_base (&scratch_obstack);
+}
+
+char *
+cp_file_of (t)
+ tree t;
+{
+ if (TREE_CODE (t) == PARM_DECL)
+ return DECL_SOURCE_FILE (DECL_CONTEXT (t));
+ else if (TREE_CODE_CLASS (TREE_CODE (t)) == 't')
+ return DECL_SOURCE_FILE (TYPE_NAME (t));
+ else
+ return DECL_SOURCE_FILE (t);
+}
+
+int
+cp_line_of (t)
+ tree t;
+{
+ if (TREE_CODE (t) == PARM_DECL)
+ return DECL_SOURCE_LINE (DECL_CONTEXT (t));
+ else if (TREE_CODE_CLASS (TREE_CODE (t)) == 't')
+ return DECL_SOURCE_LINE (TYPE_NAME (t));
+ else
+ return DECL_SOURCE_LINE (t);
+}
+
+char *
+code_as_string (c, v)
+ enum tree_code c;
+ int v;
+{
+ return tree_code_name [c];
+}
+
+char *
+language_as_string (c, v)
+ enum languages c;
+ int v;
+{
+ switch (c)
+ {
+ case lang_c:
+ return "C";
+
+ case lang_cplusplus:
+ return "C++";
+
+ default:
+ my_friendly_abort (355);
+ return 0;
+ }
+}
+
+/* Return the proper printed version of a parameter to a C++ function. */
+char *
+parm_as_string (p, v)
+ int p, v;
+{
+ if (p < 0)
+ return "`this'";
+
+ sprintf (digit_buffer, "%d", p+1);
+ return digit_buffer;
+}
+
+char *
+op_as_string (p, v)
+ enum tree_code p;
+ int v;
+{
+ static char buf[] = "operator ";
+
+ if (p == 0)
+ return "{unknown}";
+
+ strcpy (buf + 9, opname_tab [p]);
+ return buf;
+}
+
+char *
+args_as_string (p, v)
+ tree p;
+ int v;
+{
+ if (p == NULL_TREE)
+ return "...";
+
+ return type_as_string (p, v);
+}
diff --git a/gnu/usr.bin/cc/cc1plus/except.c b/gnu/usr.bin/cc/cc1plus/except.c
new file mode 100644
index 0000000..dc91b9d
--- /dev/null
+++ b/gnu/usr.bin/cc/cc1plus/except.c
@@ -0,0 +1,1481 @@
+/* Handle exceptional things in C++.
+ Copyright (C) 1989, 1992, 1993, 1994 Free Software Foundation, Inc.
+ Contributed by Michael Tiemann <tiemann@cygnus.com>
+ Rewritten by Mike Stump <mrs@cygnus.com>, based upon an
+ initial re-implementation courtesy Tad Hunt.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+/* High-level class interface. */
+
+#include "config.h"
+#include "tree.h"
+#include "rtl.h"
+#include "cp-tree.h"
+#include "flags.h"
+#include "obstack.h"
+#include "expr.h"
+
+extern void (*interim_eh_hook) PROTO((tree));
+
+/* holds the fndecl for __builtin_return_address () */
+tree builtin_return_address_fndecl;
+
+/* Define at your own risk! */
+#ifndef CROSS_COMPILE
+#ifdef sun
+#ifdef sparc
+#define TRY_NEW_EH
+#endif
+#endif
+#endif
+
+#ifndef TRY_NEW_EH
+
+static void
+sorry_no_eh ()
+{
+ static int warned = 0;
+ if (! warned)
+ {
+ sorry ("exception handling not supported");
+ warned = 1;
+ }
+}
+
+void
+build_exception_table ()
+{
+}
+
+void
+expand_exception_blocks ()
+{
+}
+
+void
+start_protect ()
+{
+}
+
+void
+end_protect (finalization)
+ tree finalization;
+{
+}
+
+void
+expand_start_try_stmts ()
+{
+ sorry_no_eh ();
+}
+
+void
+expand_end_try_stmts ()
+{
+}
+
+void
+expand_start_all_catch ()
+{
+}
+
+void
+expand_end_all_catch ()
+{
+}
+
+void
+expand_start_catch_block (declspecs, declarator)
+ tree declspecs, declarator;
+{
+}
+
+void
+expand_end_catch_block ()
+{
+}
+
+void
+init_exception_processing ()
+{
+}
+
+void
+expand_throw (exp)
+ tree exp;
+{
+ sorry_no_eh ();
+}
+
+#else
+
+static int
+doing_eh (do_warn)
+ int do_warn;
+{
+ if (! flag_handle_exceptions)
+ {
+ static int warned = 0;
+ if (! warned && do_warn)
+ {
+ error ("exception handling disabled, use -fhandle-exceptions to enable.");
+ warned = 1;
+ }
+ return 0;
+ }
+ return 1;
+}
+
+
+/*
+NO GNEWS IS GOOD GNEWS WITH GARRY GNUS: This version is much closer
+to supporting exception handling as per Stroustrup's 2nd edition.
+It is a complete rewrite of all the EH stuff that was here before
+ Shortcomings:
+ 1. The type of the throw and catch must still match
+ exactly (no support yet for matching base classes)
+ 2. Throw specifications of functions still doesnt't work.
+ Cool Things:
+ 1. Destructors are called properly :-)
+ 2. No overhead for the non-exception thrown case.
+ 3. Fixing shortcomings 1 and 2 is simple.
+ -Tad Hunt (tad@mail.csh.rit.edu)
+
+*/
+
+/* A couple of backend routines from m88k.c */
+
+/* used to cache a call to __builtin_return_address () */
+static tree BuiltinReturnAddress;
+
+
+
+
+
+#include <stdio.h>
+
+/* XXX - Tad: for EH */
+/* output an exception table entry */
+
+static void
+output_exception_table_entry (file, start_label, end_label, eh_label)
+ FILE *file;
+ rtx start_label, end_label, eh_label;
+{
+ char label[100];
+
+ fprintf (file, "\t%s\t ", ASM_LONG);
+ if (GET_CODE (start_label) == CODE_LABEL)
+ {
+ ASM_GENERATE_INTERNAL_LABEL (label, "L", CODE_LABEL_NUMBER (start_label));
+ assemble_name (file, label);
+ }
+ else if (GET_CODE (start_label) == SYMBOL_REF)
+ {
+ fprintf (stderr, "YYYYYYYYYEEEEEEEESSSSSSSSSSSS!!!!!!!!!!\n");
+ assemble_name (file, XSTR (start_label, 0));
+ }
+ putc ('\n', file);
+
+ fprintf (file, "\t%s\t ", ASM_LONG);
+ ASM_GENERATE_INTERNAL_LABEL (label, "L", CODE_LABEL_NUMBER (end_label));
+ assemble_name (file, label);
+ putc ('\n', file);
+
+ fprintf (file, "\t%s\t ", ASM_LONG);
+ ASM_GENERATE_INTERNAL_LABEL (label, "L", CODE_LABEL_NUMBER (eh_label));
+ assemble_name (file, label);
+ putc ('\n', file);
+
+ putc ('\n', file); /* blank line */
+}
+
+static void
+easy_expand_asm (str)
+ char *str;
+{
+ expand_asm (build_string (strlen (str)+1, str));
+}
+
+/* unwind the stack. */
+static void
+do_unwind (throw_label)
+ rtx throw_label;
+{
+#ifdef sparc
+ extern FILE *asm_out_file;
+ tree fcall;
+ tree params;
+ rtx return_val_rtx;
+
+ /* call to __builtin_return_address () */
+ params=tree_cons (NULL_TREE, integer_zero_node, NULL_TREE);
+ fcall = build_function_call (BuiltinReturnAddress, params);
+ return_val_rtx = expand_expr (fcall, NULL_RTX, SImode, 0);
+ /* In the return, the new pc is pc+8, as the value comming in is
+ really the address of the call insn, not the next insn. */
+ emit_move_insn (return_val_rtx, plus_constant(gen_rtx (LABEL_REF,
+ Pmode,
+ throw_label), -8));
+ /* We use three values, PC, type, and value */
+ easy_expand_asm ("st %l0,[%fp]");
+ easy_expand_asm ("st %l1,[%fp+4]");
+ easy_expand_asm ("st %l2,[%fp+8]");
+ easy_expand_asm ("ret");
+ easy_expand_asm ("restore");
+ emit_barrier ();
+#endif
+#if m88k
+ rtx temp_frame = frame_pointer_rtx;
+
+ temp_frame = memory_address (Pmode, temp_frame);
+ temp_frame = copy_to_reg (gen_rtx (MEM, Pmode, temp_frame));
+
+ /* hopefully this will successfully pop the frame! */
+ emit_move_insn (frame_pointer_rtx, temp_frame);
+ emit_move_insn (stack_pointer_rtx, frame_pointer_rtx);
+ emit_move_insn (arg_pointer_rtx, frame_pointer_rtx);
+ emit_insn (gen_add2_insn (stack_pointer_rtx, gen_rtx (CONST_INT, VOIDmode,
+ (HOST_WIDE_INT)m88k_debugger_offset (stack_pointer_rtx, 0))));
+
+#if 0
+ emit_insn (gen_add2_insn (arg_pointer_rtx, gen_rtx (CONST_INT, VOIDmode,
+ -(HOST_WIDE_INT)m88k_debugger_offset (arg_pointer_rtx, 0))));
+
+ emit_move_insn (stack_pointer_rtx, arg_pointer_rtx);
+
+ emit_insn (gen_add2_insn (stack_pointer_rtx, gen_rtx (CONST_INT, VOIDmode,
+ (HOST_WIDE_INT)m88k_debugger_offset (arg_pointer_rtx, 0))));
+#endif
+#endif
+}
+
+
+
+#if 0
+/* This is the startup, and finish stuff per exception table. */
+
+/* XXX - Tad: exception handling section */
+#ifndef EXCEPT_SECTION_ASM_OP
+#define EXCEPT_SECTION_ASM_OP "section\t.gcc_except_table,\"a\",@progbits"
+#endif
+
+#ifdef EXCEPT_SECTION_ASM_OP
+typedef struct {
+ void *start_protect;
+ void *end_protect;
+ void *exception_handler;
+ } exception_table;
+#endif /* EXCEPT_SECTION_ASM_OP */
+
+#ifdef EXCEPT_SECTION_ASM_OP
+
+ /* on machines which support it, the exception table lives in another section,
+ but it needs a label so we can reference it... This sets up that
+ label! */
+asm (EXCEPT_SECTION_ASM_OP);
+exception_table __EXCEPTION_TABLE__[1] = { (void*)0, (void*)0, (void*)0 };
+asm (TEXT_SECTION_ASM_OP);
+
+#endif /* EXCEPT_SECTION_ASM_OP */
+
+#ifdef EXCEPT_SECTION_ASM_OP
+
+ /* we need to know where the end of the exception table is... so this
+ is how we do it! */
+
+asm (EXCEPT_SECTION_ASM_OP);
+exception_table __EXCEPTION_END__[1] = { (void*)-1, (void*)-1, (void*)-1 };
+asm (TEXT_SECTION_ASM_OP);
+
+#endif /* EXCEPT_SECTION_ASM_OP */
+
+#endif
+
+void
+exception_section ()
+{
+#ifdef ASM_OUTPUT_SECTION_NAME
+ named_section (".gcc_except_table");
+#else
+ text_section ();
+#endif
+}
+
+
+
+
+/* from: my-cp-except.c */
+
+/* VI: ":set ts=4" */
+#if 0
+#include <stdio.h> */
+#include "config.h"
+#include "tree.h"
+#include "rtl.h"
+#include "cp-tree.h"
+#endif
+#include "decl.h"
+#if 0
+#include "flags.h"
+#endif
+#include "insn-flags.h"
+#include "obstack.h"
+#if 0
+#include "expr.h"
+#endif
+
+/* ======================================================================
+ Briefly the algorithm works like this:
+
+ When a constructor or start of a try block is encountered,
+ push_eh_entry (&eh_stack) is called. Push_eh_entry () creates a
+ new entry in the unwind protection stack and returns a label to
+ output to start the protection for that block.
+
+ When a destructor or end try block is encountered, pop_eh_entry
+ (&eh_stack) is called. Pop_eh_entry () returns the ehEntry it
+ created when push_eh_entry () was called. The ehEntry structure
+ contains three things at this point. The start protect label,
+ the end protect label, and the exception handler label. The end
+ protect label should be output before the call to the destructor
+ (if any). If it was a destructor, then its parse tree is stored
+ in the finalization variable in the ehEntry structure. Otherwise
+ the finalization variable is set to NULL to reflect the fact that
+ is the the end of a try block. Next, this modified ehEntry node
+ is enqueued in the finalizations queue by calling
+ enqueue_eh_entry (&queue,entry).
+
+ +---------------------------------------------------------------+
+ |XXX: Will need modification to deal with partially |
+ | constructed arrays of objects |
+ | |
+ | Basically, this consists of keeping track of how many |
+ | of the objects have been constructed already (this |
+ | should be in a register though, so that shouldn't be a |
+ | problem. |
+ +---------------------------------------------------------------+
+
+ When a catch block is encountered, there is a lot of work to be
+ done.
+
+ Since we don't want to generate the catch block inline with the
+ regular flow of the function, we need to have some way of doing
+ so. Luckily, we have a couple of routines "get_last_insn ()" and
+ "set_last_insn ()" provided. When the start of a catch block is
+ encountered, we save a pointer to the last insn generated. After
+ the catch block is generated, we save a pointer to the first
+ catch block insn and the last catch block insn with the routines
+ "NEXT_INSN ()" and "get_last_insn ()". We then set the last insn
+ to be the last insn generated before the catch block, and set the
+ NEXT_INSN (last_insn) to zero.
+
+ Since catch blocks might be nested inside other catch blocks, and
+ we munge the chain of generated insns after the catch block is
+ generated, we need to store the pointers to the last insn
+ generated in a stack, so that when the end of a catch block is
+ encountered, the last insn before the current catch block can be
+ popped and set to be the last insn, and the first and last insns
+ of the catch block just generated can be enqueue'd for output at
+ a later time.
+
+ Next we must insure that when the catch block is executed, all
+ finalizations for the matching try block have been completed. If
+ any of those finalizations throw an exception, we must call
+ terminate according to the ARM (section r.15.6.1). What this
+ means is that we need to dequeue and emit finalizations for each
+ entry in the ehQueue until we get to an entry with a NULL
+ finalization field. For any of the finalization entries, if it
+ is not a call to terminate (), we must protect it by giving it
+ another start label, end label, and exception handler label,
+ setting its finalization tree to be a call to terminate (), and
+ enqueue'ing this new ehEntry to be output at an outer level.
+ Finally, after all that is done, we can get around to outputting
+ the catch block which basically wraps all the "catch (...) {...}"
+ statements in a big if/then/else construct that matches the
+ correct block to call.
+
+ ===================================================================== */
+
+extern rtx emit_insn PROTO((rtx));
+extern rtx gen_nop PROTO(());
+
+/* local globals for function calls
+ ====================================================================== */
+
+/* used to cache "terminate ()", "unexpected ()", "set_terminate ()", and
+ "set_unexpected ()" after default_conversion. (lib-except.c) */
+static tree Terminate, Unexpected, SetTerminate, SetUnexpected, CatchMatch;
+
+/* used to cache __find_first_exception_table_match ()
+ for throw (lib-except.c) */
+static tree FirstExceptionMatch;
+
+/* used to cache a call to __unwind_function () (lib-except.c) */
+static tree Unwind;
+
+/* holds a ready to emit call to "terminate ()". */
+static tree TerminateFunctionCall;
+
+/* ====================================================================== */
+
+
+
+/* data structures for my various quick and dirty stacks and queues
+ Eventually, most of this should go away, because I think it can be
+ integrated with stuff already built into the compiler. */
+
+/* =================================================================== */
+
+struct labelNode {
+ rtx label;
+ struct labelNode *chain;
+ };
+
+
+/* this is the most important structure here. Basically this is how I store
+ an exception table entry internally. */
+struct ehEntry {
+ rtx start_label;
+ rtx end_label;
+ rtx exception_handler_label;
+
+ tree finalization;
+ };
+
+struct ehNode {
+ struct ehEntry *entry;
+ struct ehNode *chain;
+ };
+
+struct ehStack {
+ struct ehNode *top;
+ };
+
+struct ehQueue {
+ struct ehNode *head;
+ struct ehNode *tail;
+ };
+
+struct exceptNode {
+ rtx catchstart;
+ rtx catchend;
+
+ struct exceptNode *chain;
+ };
+
+struct exceptStack {
+ struct exceptNode *top;
+ };
+/* ========================================================================= */
+
+
+
+/* local globals - these local globals are for storing data necessary for
+ generating the exception table and code in the correct order.
+
+ ========================================================================= */
+
+/* Holds the pc for doing "throw" */
+rtx saved_pc;
+/* Holds the type of the thing being thrown. */
+rtx saved_throw_type;
+/* Holds the value being thrown. */
+rtx saved_throw_value;
+
+rtx throw_label;
+
+static struct ehStack ehstack;
+static struct ehQueue ehqueue;
+static struct ehQueue eh_table_output_queue;
+static struct exceptStack exceptstack;
+static struct labelNode *false_label_stack = NULL;
+static struct labelNode *caught_return_label_stack = NULL;
+/* ========================================================================= */
+
+/* function prototypes */
+static struct ehEntry *pop_eh_entry PROTO((struct ehStack *stack));
+static void enqueue_eh_entry PROTO((struct ehQueue *queue, struct ehEntry *entry));
+static void push_except_stmts PROTO((struct exceptStack *exceptstack,
+ rtx catchstart, rtx catchend));
+static int pop_except_stmts PROTO((struct exceptStack *exceptstack,
+ rtx *catchstart, rtx *catchend));
+static rtx push_eh_entry PROTO((struct ehStack *stack));
+static struct ehEntry *dequeue_eh_entry PROTO((struct ehQueue *queue));
+static void new_eh_queue PROTO((struct ehQueue *queue));
+static void new_eh_stack PROTO((struct ehStack *stack));
+static void new_except_stack PROTO((struct exceptStack *queue));
+static void push_last_insn PROTO(());
+static rtx pop_last_insn PROTO(());
+static void push_label_entry PROTO((struct labelNode **labelstack, rtx label));
+static rtx pop_label_entry PROTO((struct labelNode **labelstack));
+static rtx top_label_entry PROTO((struct labelNode **labelstack));
+static struct ehEntry *copy_eh_entry PROTO((struct ehEntry *entry));
+
+
+
+/* All my cheesy stack/queue/misc data structure handling routines
+
+ ========================================================================= */
+
+static void
+push_label_entry (labelstack, label)
+ struct labelNode **labelstack;
+ rtx label;
+{
+ struct labelNode *newnode=(struct labelNode*)xmalloc (sizeof (struct labelNode));
+
+ newnode->label = label;
+ newnode->chain = *labelstack;
+ *labelstack = newnode;
+}
+
+static rtx
+pop_label_entry (labelstack)
+ struct labelNode **labelstack;
+{
+ rtx label;
+ struct labelNode *tempnode;
+
+ if (! *labelstack) return NULL_RTX;
+
+ tempnode = *labelstack;
+ label = tempnode->label;
+ *labelstack = (*labelstack)->chain;
+ free (tempnode);
+
+ return label;
+}
+
+static rtx
+top_label_entry (labelstack)
+ struct labelNode **labelstack;
+{
+ if (! *labelstack) return NULL_RTX;
+
+ return (*labelstack)->label;
+}
+
+static void
+push_except_stmts (exceptstack, catchstart, catchend)
+ struct exceptStack *exceptstack;
+ rtx catchstart, catchend;
+{
+ struct exceptNode *newnode = (struct exceptNode*)
+ xmalloc (sizeof (struct exceptNode));
+
+ newnode->catchstart = catchstart;
+ newnode->catchend = catchend;
+ newnode->chain = exceptstack->top;
+
+ exceptstack->top = newnode;
+}
+
+static int
+pop_except_stmts (exceptstack, catchstart, catchend)
+ struct exceptStack *exceptstack;
+ rtx *catchstart, *catchend;
+{
+ struct exceptNode *tempnode;
+
+ if (!exceptstack->top) {
+ *catchstart = *catchend = NULL_RTX;
+ return 0;
+ }
+
+ tempnode = exceptstack->top;
+ exceptstack->top = exceptstack->top->chain;
+
+ *catchstart = tempnode->catchstart;
+ *catchend = tempnode->catchend;
+ free (tempnode);
+
+ return 1;
+}
+
+/* Push to permanent obstack for rtl generation.
+ One level only! */
+static struct obstack *saved_rtl_obstack;
+void
+push_rtl_perm ()
+{
+ extern struct obstack permanent_obstack;
+ extern struct obstack *rtl_obstack;
+
+ saved_rtl_obstack = rtl_obstack;
+ rtl_obstack = &permanent_obstack;
+}
+
+/* Pop back to normal rtl handling. */
+static void
+pop_rtl_from_perm ()
+{
+ extern struct obstack permanent_obstack;
+ extern struct obstack *rtl_obstack;
+
+ rtl_obstack = saved_rtl_obstack;
+}
+
+static rtx
+push_eh_entry (stack)
+ struct ehStack *stack;
+{
+ struct ehNode *node = (struct ehNode*)xmalloc (sizeof (struct ehNode));
+ struct ehEntry *entry = (struct ehEntry*)xmalloc (sizeof (struct ehEntry));
+
+ if (stack == NULL) {
+ free (node);
+ free (entry);
+ return NULL_RTX;
+ }
+
+ /* These are saved for the exception table. */
+ push_rtl_perm ();
+ entry->start_label = gen_label_rtx ();
+ entry->end_label = gen_label_rtx ();
+ entry->exception_handler_label = gen_label_rtx ();
+ pop_rtl_from_perm ();
+
+ entry->finalization = NULL_TREE;
+
+ node->entry = entry;
+ node->chain = stack->top;
+ stack->top = node;
+
+ enqueue_eh_entry (&eh_table_output_queue, copy_eh_entry (entry));
+
+ return entry->start_label;
+}
+
+static struct ehEntry *
+pop_eh_entry (stack)
+ struct ehStack *stack;
+{
+ struct ehNode *tempnode;
+ struct ehEntry *tempentry;
+
+ if (stack && (tempnode = stack->top)) {
+ tempentry = tempnode->entry;
+ stack->top = stack->top->chain;
+ free (tempnode);
+
+ return tempentry;
+ }
+
+ return NULL;
+}
+
+static struct ehEntry *
+copy_eh_entry (entry)
+ struct ehEntry *entry;
+{
+ struct ehEntry *newentry;
+
+ newentry = (struct ehEntry*)xmalloc (sizeof (struct ehEntry));
+ memcpy ((void*)newentry, (void*)entry, sizeof (struct ehEntry));
+
+ return newentry;
+}
+
+static void
+enqueue_eh_entry (queue, entry)
+ struct ehQueue *queue;
+ struct ehEntry *entry;
+{
+ struct ehNode *node = (struct ehNode*)xmalloc (sizeof (struct ehNode));
+
+ node->entry = entry;
+ node->chain = NULL;
+
+ if (queue->head == NULL)
+ {
+ queue->head = node;
+ }
+ else
+ {
+ queue->tail->chain = node;
+ }
+ queue->tail = node;
+}
+
+static struct ehEntry *
+dequeue_eh_entry (queue)
+ struct ehQueue *queue;
+{
+ struct ehNode *tempnode;
+ struct ehEntry *tempentry;
+
+ if (queue->head == NULL)
+ return NULL;
+
+ tempnode = queue->head;
+ queue->head = queue->head->chain;
+
+ tempentry = tempnode->entry;
+ free (tempnode);
+
+ return tempentry;
+}
+
+static void
+new_eh_queue (queue)
+ struct ehQueue *queue;
+{
+ queue->head = queue->tail = NULL;
+}
+
+static void
+new_eh_stack (stack)
+ struct ehStack *stack;
+{
+ stack->top = NULL;
+}
+
+static void
+new_except_stack (stack)
+ struct exceptStack *stack;
+{
+ stack->top = NULL;
+}
+/* ========================================================================= */
+
+void
+lang_interim_eh (finalization)
+ tree finalization;
+{
+ if (finalization)
+ end_protect (finalization);
+ else
+ start_protect ();
+}
+
+/* sets up all the global eh stuff that needs to be initialized at the
+ start of compilation.
+
+ This includes:
+ - Setting up all the function call trees
+ - Initializing the ehqueue
+ - Initializing the eh_table_output_queue
+ - Initializing the ehstack
+ - Initializing the exceptstack
+*/
+
+void
+init_exception_processing ()
+{
+ extern tree define_function ();
+ tree unexpected_fndecl, terminate_fndecl;
+ tree set_unexpected_fndecl, set_terminate_fndecl;
+ tree catch_match_fndecl;
+ tree find_first_exception_match_fndecl;
+ tree unwind_fndecl;
+ tree temp, PFV;
+
+ interim_eh_hook = lang_interim_eh;
+
+ /* void (*)() */
+ PFV = build_pointer_type (build_function_type (void_type_node, void_list_node));
+
+ /* arg list for the build_function_type call for set_terminate () and
+ set_unexpected () */
+ temp = tree_cons (NULL_TREE, PFV, void_list_node);
+
+ push_lang_context (lang_name_c);
+
+ set_terminate_fndecl =
+ define_function ("set_terminate",
+ build_function_type (PFV, temp),
+ NOT_BUILT_IN,
+ pushdecl,
+ 0);
+ set_unexpected_fndecl =
+ define_function ("set_unexpected",
+ build_function_type (PFV, temp),
+ NOT_BUILT_IN,
+ pushdecl,
+ 0);
+
+ unexpected_fndecl =
+ define_function ("unexpected",
+ build_function_type (void_type_node, void_list_node),
+ NOT_BUILT_IN,
+ pushdecl,
+ 0);
+ terminate_fndecl =
+ define_function ("terminate",
+ build_function_type (void_type_node, void_list_node),
+ NOT_BUILT_IN,
+ pushdecl,
+ 0);
+ catch_match_fndecl =
+ define_function ("__throw_type_match",
+ build_function_type (integer_type_node,
+ tree_cons (NULL_TREE, string_type_node, tree_cons (NULL_TREE, ptr_type_node, void_list_node))),
+ NOT_BUILT_IN,
+ pushdecl,
+ 0);
+ find_first_exception_match_fndecl =
+ define_function ("__find_first_exception_table_match",
+ build_function_type (ptr_type_node,
+ tree_cons (NULL_TREE, ptr_type_node,
+ void_list_node)),
+ NOT_BUILT_IN,
+ pushdecl,
+ 0);
+ unwind_fndecl =
+ define_function ("__unwind_function",
+ build_function_type (void_type_node,
+ tree_cons (NULL_TREE, ptr_type_node, void_list_node)),
+ NOT_BUILT_IN,
+ pushdecl,
+ 0);
+
+ Unexpected = default_conversion (unexpected_fndecl);
+ Terminate = default_conversion (terminate_fndecl);
+ SetTerminate = default_conversion (set_terminate_fndecl);
+ SetUnexpected = default_conversion (set_unexpected_fndecl);
+ CatchMatch = default_conversion (catch_match_fndecl);
+ FirstExceptionMatch = default_conversion (find_first_exception_match_fndecl);
+ Unwind = default_conversion (unwind_fndecl);
+ BuiltinReturnAddress = default_conversion (builtin_return_address_fndecl);
+
+ TerminateFunctionCall = build_function_call (Terminate, NULL_TREE);
+
+ pop_lang_context ();
+ throw_label = gen_label_rtx ();
+ saved_pc = gen_rtx (REG, Pmode, 16);
+ saved_throw_type = gen_rtx (REG, Pmode, 17);
+ saved_throw_value = gen_rtx (REG, Pmode, 18);
+
+ new_eh_queue (&ehqueue);
+ new_eh_queue (&eh_table_output_queue);
+ new_eh_stack (&ehstack);
+ new_except_stack (&exceptstack);
+}
+
+/* call this to begin a block of unwind protection (ie: when an object is
+ constructed) */
+void
+start_protect ()
+{
+ if (doing_eh (0))
+ {
+ emit_label (push_eh_entry (&ehstack));
+ }
+}
+
+/* call this to end a block of unwind protection. the finalization tree is
+ the finalization which needs to be run in order to cleanly unwind through
+ this level of protection. (ie: call this when a scope is exited)*/
+void
+end_protect (finalization)
+ tree finalization;
+{
+ struct ehEntry *entry = pop_eh_entry (&ehstack);
+
+ if (! doing_eh (0))
+ return;
+
+ emit_label (entry->end_label);
+
+ entry->finalization = finalization;
+
+ enqueue_eh_entry (&ehqueue, entry);
+}
+
+/* call this on start of a try block. */
+void
+expand_start_try_stmts ()
+{
+ if (doing_eh (1))
+ {
+ start_protect ();
+ }
+}
+
+void
+expand_end_try_stmts ()
+{
+ end_protect (integer_zero_node);
+}
+
+struct insn_save_node {
+ rtx last;
+ struct insn_save_node *chain;
+ };
+
+static struct insn_save_node *InsnSave = NULL;
+
+
+/* Used to keep track of where the catch blocks start. */
+static void
+push_last_insn ()
+{
+ struct insn_save_node *newnode = (struct insn_save_node*)
+ xmalloc (sizeof (struct insn_save_node));
+
+ newnode->last = get_last_insn ();
+ newnode->chain = InsnSave;
+ InsnSave = newnode;
+}
+
+/* Use to keep track of where the catch blocks start. */
+static rtx
+pop_last_insn ()
+{
+ struct insn_save_node *tempnode;
+ rtx temprtx;
+
+ if (!InsnSave) return NULL_RTX;
+
+ tempnode = InsnSave;
+ temprtx = tempnode->last;
+ InsnSave = InsnSave->chain;
+
+ free (tempnode);
+
+ return temprtx;
+}
+
+/* call this to start processing of all the catch blocks. */
+void
+expand_start_all_catch ()
+{
+ struct ehEntry *entry;
+ rtx label;
+
+ if (! doing_eh (1))
+ return;
+
+ emit_line_note (input_filename, lineno);
+ label = gen_label_rtx ();
+ /* The label for the exception handling block we will save. */
+ emit_label (label);
+
+ push_label_entry (&caught_return_label_stack, label);
+
+ /* Remember where we started. */
+ push_last_insn ();
+
+ emit_insn (gen_nop ());
+
+ /* Will this help us not stomp on it? */
+ emit_insn (gen_rtx (USE, VOIDmode, saved_throw_type));
+ emit_insn (gen_rtx (USE, VOIDmode, saved_throw_value));
+
+ while (1)
+ {
+ entry = dequeue_eh_entry (&ehqueue);
+ emit_label (entry->exception_handler_label);
+
+ expand_expr (entry->finalization, const0_rtx, VOIDmode, 0);
+
+ /* When we get down to the matching entry, stop. */
+ if (entry->finalization == integer_zero_node)
+ break;
+
+ free (entry);
+ }
+
+ /* This goes when the below moves out of our way. */
+#if 1
+ label = gen_label_rtx ();
+ emit_jump (label);
+#endif
+
+ /* All this should be out of line, and saved back in the exception handler
+ block area. */
+#if 1
+ entry->start_label = entry->exception_handler_label;
+ /* These are saved for the exception table. */
+ push_rtl_perm ();
+ entry->end_label = gen_label_rtx ();
+ entry->exception_handler_label = gen_label_rtx ();
+ entry->finalization = TerminateFunctionCall;
+ pop_rtl_from_perm ();
+ emit_label (entry->end_label);
+
+ enqueue_eh_entry (&eh_table_output_queue, copy_eh_entry (entry));
+
+ /* After running the finalization, continue on out to the next
+ cleanup, if we have nothing better to do. */
+ emit_move_insn (saved_pc, gen_rtx (LABEL_REF, Pmode, entry->end_label));
+ /* Will this help us not stomp on it? */
+ emit_insn (gen_rtx (USE, VOIDmode, saved_throw_type));
+ emit_insn (gen_rtx (USE, VOIDmode, saved_throw_value));
+ emit_jump (throw_label);
+ emit_label (entry->exception_handler_label);
+ expand_expr (entry->finalization, const0_rtx, VOIDmode, 0);
+ emit_barrier ();
+#endif
+ emit_label (label);
+}
+
+/* call this to end processing of all the catch blocks. */
+void
+expand_end_all_catch ()
+{
+ rtx catchstart, catchend, last;
+ rtx label;
+
+ if (! doing_eh (1))
+ return;
+
+ /* Find the start of the catch block. */
+ last = pop_last_insn ();
+ catchstart = NEXT_INSN (last);
+ catchend = get_last_insn ();
+
+ NEXT_INSN (last) = 0;
+ set_last_insn (last);
+
+ /* this level of catch blocks is done, so set up the successful catch jump
+ label for the next layer of catch blocks. */
+ pop_label_entry (&caught_return_label_stack);
+
+ push_except_stmts (&exceptstack, catchstart, catchend);
+
+ /* Here we fall through into the continuation code. */
+}
+
+
+/* this is called from expand_exception_blocks () to expand the toplevel
+ finalizations for a function. */
+void
+expand_leftover_cleanups ()
+{
+ struct ehEntry *entry;
+ rtx first_label = NULL_RTX;
+
+ if (! doing_eh (0))
+ return;
+
+ /* Will this help us not stomp on it? */
+ emit_insn (gen_rtx (USE, VOIDmode, saved_throw_type));
+ emit_insn (gen_rtx (USE, VOIDmode, saved_throw_value));
+
+ while ((entry = dequeue_eh_entry (&ehqueue)) != 0)
+ {
+ if (! first_label)
+ first_label = entry->exception_handler_label;
+ emit_label (entry->exception_handler_label);
+
+ expand_expr (entry->finalization, const0_rtx, VOIDmode, 0);
+
+ /* leftover try block, opps. */
+ if (entry->finalization == integer_zero_node)
+ abort ();
+
+ free (entry);
+ }
+ if (first_label)
+ {
+ rtx label;
+ struct ehEntry entry;
+ /* These are saved for the exception table. */
+ push_rtl_perm ();
+ label = gen_label_rtx ();
+ entry.start_label = first_label;
+ entry.end_label = label;
+ entry.exception_handler_label = gen_label_rtx ();
+ entry.finalization = TerminateFunctionCall;
+ pop_rtl_from_perm ();
+ emit_label (label);
+
+ enqueue_eh_entry (&eh_table_output_queue, copy_eh_entry (&entry));
+
+ /* After running the finalization, continue on out to the next
+ cleanup, if we have nothing better to do. */
+ emit_move_insn (saved_pc, gen_rtx (LABEL_REF, Pmode, entry.end_label));
+ /* Will this help us not stomp on it? */
+ emit_insn (gen_rtx (USE, VOIDmode, saved_throw_type));
+ emit_insn (gen_rtx (USE, VOIDmode, saved_throw_value));
+ emit_jump (throw_label);
+ emit_label (entry.exception_handler_label);
+ expand_expr (entry.finalization, const0_rtx, VOIDmode, 0);
+ emit_barrier ();
+ }
+}
+
+/* call this to start a catch block. Typename is the typename, and identifier
+ is the variable to place the object in or NULL if the variable doesn't
+ matter. If typename is NULL, that means its a "catch (...)" or catch
+ everything. In that case we don't need to do any type checking.
+ (ie: it ends up as the "else" clause rather than an "else if" clause) */
+void
+expand_start_catch_block (declspecs, declarator)
+ tree declspecs, declarator;
+{
+ rtx false_label_rtx;
+ rtx protect_label_rtx;
+ tree type;
+ tree decl;
+ tree init;
+
+ if (! doing_eh (1))
+ return;
+
+ /* Create a binding level for the parm. */
+ expand_start_bindings (0);
+
+ if (declspecs)
+ {
+ tree init_type;
+ decl = grokdeclarator (declarator, declspecs, NORMAL, 1, NULL_TREE);
+
+ /* Figure out the type that the initializer is. */
+ init_type = TREE_TYPE (decl);
+ if (TREE_CODE (init_type) != REFERENCE_TYPE)
+ init_type = build_reference_type (init_type);
+
+ init = convert_from_reference (save_expr (make_tree (init_type, saved_throw_value)));
+
+ /* Do we need the below two lines? */
+ /* Let `finish_decl' know that this initializer is ok. */
+ DECL_INITIAL (decl) = init;
+ /* This needs to be preallocated under the try block,
+ in a union of all catch variables. */
+ pushdecl (decl);
+ type = TREE_TYPE (decl);
+
+ /* peel back references, so they match. */
+ if (TREE_CODE (type) == REFERENCE_TYPE)
+ type = TREE_TYPE (type);
+ }
+ else
+ type = NULL_TREE;
+
+ false_label_rtx = gen_label_rtx ();
+ push_label_entry (&false_label_stack, false_label_rtx);
+
+ /* This is saved for the exception table. */
+ push_rtl_perm ();
+ protect_label_rtx = gen_label_rtx ();
+ pop_rtl_from_perm ();
+ push_label_entry (&false_label_stack, protect_label_rtx);
+
+ if (type)
+ {
+ tree params;
+ char *typestring;
+ rtx call_rtx, return_value_rtx;
+ tree catch_match_fcall;
+ tree catchmatch_arg, argval;
+
+ typestring = build_overload_name (type, 1, 1);
+
+ params = tree_cons (NULL_TREE,
+ combine_strings (build_string (strlen (typestring)+1, typestring)),
+ tree_cons (NULL_TREE,
+ make_tree (ptr_type_node, saved_throw_type),
+ NULL_TREE));
+ catch_match_fcall = build_function_call (CatchMatch, params);
+ call_rtx = expand_call (catch_match_fcall, NULL_RTX, 0);
+
+ return_value_rtx =
+ hard_function_value (integer_type_node, catch_match_fcall);
+
+ /* did the throw type match function return TRUE? */
+ emit_cmp_insn (return_value_rtx, const0_rtx, NE, NULL_RTX,
+ GET_MODE (return_value_rtx), 0, 0);
+
+ /* if it returned FALSE, jump over the catch block, else fall into it */
+ emit_jump_insn (gen_bne (false_label_rtx));
+ finish_decl (decl, init, NULL_TREE, 0);
+ }
+ else
+ {
+ /* Fall into the catch all section. */
+ }
+
+ /* This is the starting of something to protect. */
+ emit_label (protect_label_rtx);
+
+ emit_line_note (input_filename, lineno);
+}
+
+
+/* Call this to end a catch block. Its responsible for emitting the
+ code to handle jumping back to the correct place, and for emitting
+ the label to jump to if this catch block didn't match. */
+void expand_end_catch_block ()
+{
+ if (doing_eh (1))
+ {
+ rtx start_protect_label_rtx;
+ rtx end_protect_label_rtx;
+ tree decls;
+ struct ehEntry entry;
+
+ /* label we jump to if we caught the exception */
+ emit_jump (top_label_entry (&caught_return_label_stack));
+
+ /* Code to throw out to outer context, if we get an throw from within
+ our catch handler. */
+ /* These are saved for the exception table. */
+ push_rtl_perm ();
+ entry.exception_handler_label = gen_label_rtx ();
+ pop_rtl_from_perm ();
+ emit_label (entry.exception_handler_label);
+ emit_move_insn (saved_pc, gen_rtx (LABEL_REF,
+ Pmode,
+ top_label_entry (&caught_return_label_stack)));
+ emit_jump (throw_label);
+ /* No associated finalization. */
+ entry.finalization = NULL_TREE;
+
+ /* Because we are reordered out of line, we have to protect this. */
+ /* label for the start of the protection region. */
+ start_protect_label_rtx = pop_label_entry (&false_label_stack);
+
+ /* Cleanup the EH paramater. */
+ expand_end_bindings (decls = getdecls (), decls != NULL_TREE, 0);
+
+ /* label we emit to jump to if this catch block didn't match. */
+ emit_label (end_protect_label_rtx = pop_label_entry (&false_label_stack));
+
+ /* Because we are reordered out of line, we have to protect this. */
+ entry.start_label = start_protect_label_rtx;
+ entry.end_label = end_protect_label_rtx;
+
+ /* These set up a call to throw the caught exception into the outer
+ context. */
+ enqueue_eh_entry (&eh_table_output_queue, copy_eh_entry (&entry));
+ }
+}
+
+/* cheesyness to save some typing. returns the return value rtx */
+rtx
+do_function_call (func, params, return_type)
+ tree func, params, return_type;
+{
+ tree func_call;
+ func_call = build_function_call (func, params);
+ expand_call (func_call, NULL_RTX, 0);
+ if (return_type != NULL_TREE)
+ return hard_function_value (return_type, func_call);
+ return NULL_RTX;
+}
+
+
+/* is called from expand_excpetion_blocks () to generate the code in a function
+ to "throw" if anything in the function needs to preform a throw.
+
+ expands "throw" as the following psuedo code:
+
+ throw:
+ eh = find_first_exception_match (saved_pc);
+ if (!eh) goto gotta_rethrow_it;
+ goto eh;
+
+ gotta_rethrow_it:
+ saved_pc = __builtin_return_address (0);
+ pop_to_previous_level ();
+ goto throw;
+
+ */
+static void
+expand_builtin_throw ()
+{
+ tree fcall;
+ tree params;
+ rtx return_val_rtx;
+ rtx gotta_rethrow_it = gen_label_rtx ();
+ rtx gotta_call_terminate = gen_label_rtx ();
+ rtx unwind_and_throw = gen_label_rtx ();
+ rtx goto_unwind_and_throw = gen_label_rtx ();
+
+ emit_label (throw_label);
+
+ /* search for an exception handler for the saved_pc */
+ return_val_rtx = do_function_call (FirstExceptionMatch,
+ tree_cons (NULL_TREE, make_tree (ptr_type_node, saved_pc), NULL_TREE),
+ ptr_type_node);
+
+ /* did we find one? */
+ emit_cmp_insn (return_val_rtx, const0_rtx, EQ, NULL_RTX,
+ GET_MODE (return_val_rtx), 0, 0);
+
+ /* if not, jump to gotta_rethrow_it */
+ emit_jump_insn (gen_beq (gotta_rethrow_it));
+
+ /* we found it, so jump to it */
+ emit_indirect_jump (return_val_rtx);
+
+ /* code to deal with unwinding and looking for it again */
+ emit_label (gotta_rethrow_it);
+
+ /* call to __builtin_return_address () */
+ params=tree_cons (NULL_TREE, integer_zero_node, NULL_TREE);
+ fcall = build_function_call (BuiltinReturnAddress, params);
+ return_val_rtx = expand_expr (fcall, NULL_RTX, SImode, 0);
+
+ /* did __builtin_return_address () return a valid address? */
+ emit_cmp_insn (return_val_rtx, const0_rtx, EQ, NULL_RTX,
+ GET_MODE (return_val_rtx), 0, 0);
+
+ emit_jump_insn (gen_beq (gotta_call_terminate));
+
+ /* yes it did */
+ emit_move_insn (saved_pc, return_val_rtx);
+ do_unwind (throw_label);
+ emit_jump (throw_label);
+
+ /* no it didn't --> therefore we need to call terminate */
+ emit_label (gotta_call_terminate);
+ do_function_call (Terminate, NULL_TREE, NULL_TREE);
+}
+
+
+/* This is called to expand all the toplevel exception handling
+ finalization for a function. It should only be called once per
+ function. */
+void
+expand_exception_blocks ()
+{
+ rtx catchstart, catchend;
+ rtx last;
+ static rtx funcend;
+
+ funcend = gen_label_rtx ();
+ emit_jump (funcend);
+ /* expand_null_return (); */
+
+ while (pop_except_stmts (&exceptstack, &catchstart, &catchend)) {
+ last = get_last_insn ();
+ NEXT_INSN (last) = catchstart;
+ PREV_INSN (catchstart) = last;
+ NEXT_INSN (catchend) = 0;
+ set_last_insn (catchend);
+ }
+
+ expand_leftover_cleanups ();
+
+ {
+ static int have_done = 0;
+ if (! have_done && TREE_PUBLIC (current_function_decl)
+ && ! DECL_INLINE (current_function_decl))
+ {
+ have_done = 1;
+ expand_builtin_throw ();
+ }
+ }
+ emit_label (funcend);
+}
+
+
+/* call this to expand a throw statement. This follows the following
+ algorithm:
+
+ 1. Allocate space to save the current PC onto the stack.
+ 2. Generate and emit a label and save its address into the
+ newly allocate stack space since we can't save the pc directly.
+ 3. If this is the first call to throw in this function:
+ generate a label for the throw block
+ 4. jump to the throw block label. */
+void
+expand_throw (exp)
+ tree exp;
+{
+ rtx label;
+ tree type;
+
+ if (! doing_eh (1))
+ return;
+
+ /* This is the label that represents where in the code we were, when
+ we got an exception. This needs to be updated when we rethrow an
+ exception, so that the matching routine knows to search out. */
+ label = gen_label_rtx ();
+ emit_label (label);
+ emit_move_insn (saved_pc, gen_rtx (LABEL_REF, Pmode, label));
+
+ if (exp)
+ {
+ /* throw expression */
+ /* First, decay it. */
+ exp = default_conversion (exp);
+ type = TREE_TYPE (exp);
+
+ {
+ char *typestring = build_overload_name (type, 1, 1);
+ tree throw_type = build1 (ADDR_EXPR, ptr_type_node, combine_strings (build_string (strlen (typestring)+1, typestring)));
+ rtx throw_type_rtx = expand_expr (throw_type, NULL_RTX, VOIDmode, 0);
+ rtx throw_value_rtx;
+
+ emit_move_insn (saved_throw_type, throw_type_rtx);
+ exp = convert_to_reference (build_reference_type (build_type_variant (TREE_TYPE (exp), 1, 0)), exp, CONV_STATIC, LOOKUP_COMPLAIN, NULL_TREE);
+ if (exp == error_mark_node)
+ error (" in thrown expression");
+ throw_value_rtx = expand_expr (build_unary_op (ADDR_EXPR, exp, 0), NULL_RTX, VOIDmode, 0);
+ emit_move_insn (saved_throw_value, throw_value_rtx);
+ }
+ }
+ else
+ {
+ /* rethrow current exception */
+ /* This part is easy, as we dont' have to do anything else. */
+ }
+
+ emit_jump (throw_label);
+}
+
+
+/* output the exception table */
+void
+build_exception_table ()
+{
+ extern FILE *asm_out_file;
+ struct ehEntry *entry;
+
+ if (! doing_eh (0))
+ return;
+
+ exception_section ();
+
+ /* Beginning marker for table. */
+ fprintf (asm_out_file, " .global ___EXCEPTION_TABLE__\n");
+ fprintf (asm_out_file, " .align 4\n");
+ fprintf (asm_out_file, "___EXCEPTION_TABLE__:\n");
+ fprintf (asm_out_file, " .word 0, 0, 0\n");
+
+ while (entry = dequeue_eh_entry (&eh_table_output_queue)) {
+ output_exception_table_entry (asm_out_file,
+ entry->start_label, entry->end_label, entry->exception_handler_label);
+ }
+
+ /* Ending marker for table. */
+ fprintf (asm_out_file, " .global ___EXCEPTION_END__\n");
+ fprintf (asm_out_file, "___EXCEPTION_END__:\n");
+ fprintf (asm_out_file, " .word -1, -1, -1\n");
+}
+
+/* end of: my-cp-except.c */
+#endif
+
+
+/* Build a throw expression. */
+tree
+build_throw (e)
+ tree e;
+{
+ e = build1 (THROW_EXPR, void_type_node, e);
+ TREE_SIDE_EFFECTS (e) = 1;
+ return e;
+}
diff --git a/gnu/usr.bin/cc/cc1plus/expr.c b/gnu/usr.bin/cc/cc1plus/expr.c
new file mode 100644
index 0000000..c2213d5
--- /dev/null
+++ b/gnu/usr.bin/cc/cc1plus/expr.c
@@ -0,0 +1,275 @@
+/* Convert language-specific tree expression to rtl instructions,
+ for GNU compiler.
+ Copyright (C) 1988, 1992, 1993 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+#include "config.h"
+#include "rtl.h"
+#include "tree.h"
+#include "flags.h"
+#include "expr.h"
+#include "cp-tree.h"
+
+#undef NULL
+#define NULL 0
+
+/* Hook used by expand_expr to expand language-specific tree codes. */
+
+rtx
+cplus_expand_expr (exp, target, tmode, modifier)
+ tree exp;
+ rtx target;
+ enum machine_mode tmode;
+ enum expand_modifier modifier;
+{
+ tree type = TREE_TYPE (exp);
+ register enum machine_mode mode = TYPE_MODE (type);
+ register enum tree_code code = TREE_CODE (exp);
+ rtx original_target = target;
+ int ignore = target == const0_rtx;
+
+ if (ignore)
+ target = 0, original_target = 0;
+
+ /* No sense saving up arithmetic to be done
+ if it's all in the wrong mode to form part of an address.
+ And force_operand won't know whether to sign-extend or zero-extend. */
+
+ if (mode != Pmode && modifier == EXPAND_SUM)
+ modifier = EXPAND_NORMAL;
+
+ switch (code)
+ {
+ case NEW_EXPR:
+ {
+ /* Something needs to be initialized, but we didn't know
+ where that thing was when building the tree. For example,
+ it could be the return value of a function, or a parameter
+ to a function which lays down in the stack, or a temporary
+ variable which must be passed by reference.
+
+ Cleanups are handled in a language-specific way: they
+ might be run by the called function (true in GNU C++
+ for parameters with cleanups), or they might be
+ run by the caller, after the call (true in GNU C++
+ for other cleanup needs). */
+
+ tree func = TREE_OPERAND (exp, 0);
+ tree args = TREE_OPERAND (exp, 1);
+ tree type = TREE_TYPE (exp), slot;
+ tree fn_type = TREE_TYPE (TREE_TYPE (func));
+ tree return_type = TREE_TYPE (fn_type);
+ tree call_exp;
+ rtx call_target, return_target;
+ int pcc_struct_return = 0;
+
+ /* The expression `init' wants to initialize what
+ `target' represents. SLOT holds the slot for TARGET. */
+ slot = TREE_OPERAND (exp, 2);
+
+ if (target == 0)
+ {
+ /* Should always be called with a target in BLKmode case. */
+ my_friendly_assert (mode != BLKmode, 205);
+ my_friendly_assert (DECL_RTL (slot) != 0, 206);
+
+ target = gen_reg_rtx (mode);
+ }
+
+ /* The target the initializer will initialize (CALL_TARGET)
+ must now be directed to initialize the target we are
+ supposed to initialize (TARGET). The semantics for
+ choosing what CALL_TARGET is is language-specific,
+ as is building the call which will perform the
+ initialization. It is left here to show the choices that
+ exist for C++. */
+
+ if (TREE_CODE (func) == ADDR_EXPR
+ && TREE_CODE (TREE_OPERAND (func, 0)) == FUNCTION_DECL
+ && DECL_CONSTRUCTOR_P (TREE_OPERAND (func, 0)))
+ {
+ type = TYPE_POINTER_TO (type);
+ /* Don't clobber a value that might be part of a default
+ parameter value. */
+ if (TREE_PERMANENT (args))
+ args = tree_cons (0, build1 (ADDR_EXPR, type, slot),
+ TREE_CHAIN (args));
+ else
+ TREE_VALUE (args) = build1 (ADDR_EXPR, type, slot);
+ call_target = 0;
+ }
+ else if (TREE_CODE (return_type) == REFERENCE_TYPE)
+ {
+ type = return_type;
+ call_target = 0;
+ }
+ else
+ {
+#ifdef PCC_STATIC_STRUCT_RETURN
+ pcc_struct_return = 1;
+ call_target = 0;
+#else
+ call_target = target;
+#endif
+ }
+ if (call_target)
+ {
+ preserve_temp_slots (call_target);
+
+ /* Make this a valid memory address now. The code below assumes
+ that it can compare rtx and make assumptions based on the
+ result. The assumptions are true only if the address was
+ valid to begin with. */
+ call_target = validize_mem (call_target);
+ }
+
+ preserve_temp_slots (DECL_RTL (slot));
+ call_exp = build (CALL_EXPR, type, func, args, 0);
+ TREE_SIDE_EFFECTS (call_exp) = 1;
+ return_target = expand_expr (call_exp, call_target, mode, 0);
+ free_temp_slots ();
+ if (call_target == 0)
+ {
+ if (pcc_struct_return)
+ {
+ tree init = build (RTL_EXPR, type, 0, return_target);
+ TREE_ADDRESSABLE (init) = 1;
+ expand_aggr_init (slot, init, 0);
+ if (TYPE_NEEDS_DESTRUCTOR (type))
+ {
+ init = build (RTL_EXPR, build_reference_type (type), 0,
+ XEXP (return_target, 0));
+ init = maybe_build_cleanup (convert_from_reference (init));
+ if (init != NULL_TREE)
+ expand_expr (init, 0, 0, 0);
+ }
+ call_target = return_target = DECL_RTL (slot);
+ }
+ else
+ call_target = return_target;
+ }
+
+ if (call_target != return_target)
+ {
+ my_friendly_assert (! TYPE_NEEDS_CONSTRUCTING (type), 317);
+ if (GET_MODE (return_target) == BLKmode)
+ emit_block_move (call_target, return_target, expr_size (exp),
+ TYPE_ALIGN (type) / BITS_PER_UNIT);
+ else
+ emit_move_insn (call_target, return_target);
+ }
+
+ if (TREE_CODE (return_type) == REFERENCE_TYPE)
+ {
+ tree init;
+
+ if (GET_CODE (call_target) == REG
+ && REGNO (call_target) < FIRST_PSEUDO_REGISTER)
+ my_friendly_abort (39);
+
+ type = TREE_TYPE (exp);
+
+ init = build (RTL_EXPR, return_type, 0, call_target);
+ /* We got back a reference to the type we want. Now initialize
+ target with that. */
+ expand_aggr_init (slot, init, 0);
+ }
+
+ if (DECL_RTL (slot) != target)
+ emit_move_insn (DECL_RTL (slot), target);
+ return DECL_RTL (slot);
+ }
+
+ case OFFSET_REF:
+ {
+#if 1
+ return expand_expr (default_conversion (resolve_offset_ref (exp)),
+ target, tmode, EXPAND_NORMAL);
+#else
+ /* This is old crusty code, and does not handle all that the
+ resolve_offset_ref function does. (mrs) */
+ tree base = build_unary_op (ADDR_EXPR, TREE_OPERAND (exp, 0), 0);
+ tree offset = build_unary_op (ADDR_EXPR, TREE_OPERAND (exp, 1), 0);
+ return expand_expr (build (PLUS_EXPR, TREE_TYPE (exp), base, offset),
+ target, tmode, EXPAND_NORMAL);
+#endif
+ }
+
+ case THUNK_DECL:
+ return DECL_RTL (exp);
+
+ case THROW_EXPR:
+ expand_throw (TREE_OPERAND (exp, 0));
+ return NULL;
+
+ default:
+ break;
+ }
+ my_friendly_abort (40);
+ /* NOTREACHED */
+ return NULL;
+}
+
+void
+init_cplus_expand ()
+{
+ lang_expand_expr = cplus_expand_expr;
+}
+
+/* If DECL had its rtl moved from where callers expect it
+ to be, fix it up. RESULT is the nominal rtl for the RESULT_DECL,
+ which may be a pseudo instead of a hard register. */
+
+void
+fixup_result_decl (decl, result)
+ tree decl;
+ rtx result;
+{
+ if (REG_P (result))
+ {
+ if (REGNO (result) >= FIRST_PSEUDO_REGISTER)
+ {
+ rtx real_decl_result;
+
+#ifdef FUNCTION_OUTGOING_VALUE
+ real_decl_result
+ = FUNCTION_OUTGOING_VALUE (TREE_TYPE (decl), current_function_decl);
+#else
+ real_decl_result
+ = FUNCTION_VALUE (TREE_TYPE (decl), current_function_decl);
+#endif
+ REG_FUNCTION_VALUE_P (real_decl_result) = 1;
+ result = real_decl_result;
+ }
+ emit_move_insn (result, DECL_RTL (decl));
+ emit_insn (gen_rtx (USE, VOIDmode, result));
+ }
+}
+
+/* Return nonzero iff DECL is memory-based. The DECL_RTL of
+ certain const variables might be a CONST_INT, or a REG
+ in some cases. We cannot use `memory_operand' as a test
+ here because on most RISC machines, a variable's address
+ is not, by itself, a legitimate address. */
+int
+decl_in_memory_p (decl)
+ tree decl;
+{
+ return DECL_RTL (decl) != 0 && GET_CODE (DECL_RTL (decl)) == MEM;
+}
diff --git a/gnu/usr.bin/cc/cc1plus/gc.c b/gnu/usr.bin/cc/cc1plus/gc.c
new file mode 100644
index 0000000..9db4d62
--- /dev/null
+++ b/gnu/usr.bin/cc/cc1plus/gc.c
@@ -0,0 +1,988 @@
+/* Garbage collection primitives for GNU C++.
+ Copyright (C) 1992, 1993 Free Software Foundation, Inc.
+ Contributed by Michael Tiemann (tiemann@cygnus.com)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+#include "config.h"
+#include "tree.h"
+#include "cp-tree.h"
+#include "flags.h"
+
+#undef NULL
+#define NULL 0
+
+extern tree define_function ();
+extern tree build_t_desc_overload ();
+
+/* This is the function decl for the (pseudo-builtin) __gc_protect
+ function. Args are (class *value, int index); Returns value. */
+tree gc_protect_fndecl;
+
+/* This is the function decl for the (pseudo-builtin) __gc_unprotect
+ function. Args are (int index); void return. */
+tree gc_unprotect_fndecl;
+
+/* This is the function decl for the (pseudo-builtin) __gc_push
+ function. Args are (int length); void return. */
+tree gc_push_fndecl;
+
+/* This is the function decl for the (pseudo-builtin) __gc_pop
+ function. Args are void; void return. */
+tree gc_pop_fndecl;
+
+/* Special integers that are used to represent bits in gc-safe objects. */
+tree gc_nonobject;
+tree gc_visible;
+tree gc_white;
+tree gc_offwhite;
+tree gc_grey;
+tree gc_black;
+
+/* in c-common.c */
+extern tree combine_strings PROTO((tree));
+
+/* Predicate that returns non-zero if TYPE needs some kind of
+ entry for the GC. Returns zero otherwise. */
+int
+type_needs_gc_entry (type)
+ tree type;
+{
+ tree ttype = type;
+
+ if (! flag_gc || type == error_mark_node)
+ return 0;
+
+ /* Aggregate types need gc entries if any of their members
+ need gc entries. */
+ if (IS_AGGR_TYPE (type))
+ {
+ tree binfos;
+ tree fields = TYPE_FIELDS (type);
+ int i;
+
+ /* We don't care about certain pointers. Pointers
+ to virtual baseclasses are always up front. We also
+ cull out virtual function table pointers because it's
+ easy, and it simplifies the logic.*/
+ while (fields
+ && (DECL_NAME (fields) == NULL_TREE
+ || VFIELD_NAME_P (DECL_NAME (fields))
+ || VBASE_NAME_P (DECL_NAME (fields))
+ || !strcmp (IDENTIFIER_POINTER (DECL_NAME (fields)), "__bits")))
+ fields = TREE_CHAIN (fields);
+
+ while (fields)
+ {
+ if (type_needs_gc_entry (TREE_TYPE (fields)))
+ return 1;
+ fields = TREE_CHAIN (fields);
+ }
+
+ binfos = TYPE_BINFO_BASETYPES (type);
+ if (binfos)
+ for (i = TREE_VEC_LENGTH (binfos)-1; i >= 0; i--)
+ if (type_needs_gc_entry (BINFO_TYPE (TREE_VEC_ELT (binfos, i))))
+ return 1;
+
+ return 0;
+ }
+
+ while (TREE_CODE (ttype) == ARRAY_TYPE
+ && TREE_CODE (TREE_TYPE (ttype)) == ARRAY_TYPE)
+ ttype = TREE_TYPE (ttype);
+ if ((TREE_CODE (ttype) == POINTER_TYPE
+ || TREE_CODE (ttype) == ARRAY_TYPE
+ || TREE_CODE (ttype) == REFERENCE_TYPE)
+ && IS_AGGR_TYPE (TREE_TYPE (ttype))
+ && CLASSTYPE_DOSSIER (TREE_TYPE (ttype)))
+ return 1;
+
+ return 0;
+}
+
+/* Predicate that returns non-zero iff FROM is safe from the GC.
+
+ If TO is nonzero, it means we know that FROM is being stored
+ in TO, which make make it safe. */
+int
+value_safe_from_gc (to, from)
+ tree to, from;
+{
+ /* First, return non-zero for easy cases: parameters,
+ static variables. */
+ if (TREE_CODE (from) == PARM_DECL
+ || (TREE_CODE (from) == VAR_DECL
+ && TREE_STATIC (from)))
+ return 1;
+
+ /* If something has its address taken, it cannot be
+ in the heap, so it doesn't need to be protected. */
+ if (TREE_CODE (from) == ADDR_EXPR || TREE_REFERENCE_EXPR (from))
+ return 1;
+
+ /* If we are storing into a static variable, then what
+ we store will be safe from the gc. */
+ if (to && TREE_CODE (to) == VAR_DECL
+ && TREE_STATIC (to))
+ return 1;
+
+ /* Now recurse on structure of FROM. */
+ switch (TREE_CODE (from))
+ {
+ case COMPONENT_REF:
+ /* These guys are special, and safe. */
+ if (TREE_CODE (TREE_OPERAND (from, 1)) == FIELD_DECL
+ && (VFIELD_NAME_P (DECL_NAME (TREE_OPERAND (from, 1)))
+ || VBASE_NAME_P (DECL_NAME (TREE_OPERAND (from, 1)))))
+ return 1;
+ /* fall through... */
+ case NOP_EXPR:
+ case CONVERT_EXPR:
+ case NON_LVALUE_EXPR:
+ case WITH_CLEANUP_EXPR:
+ case SAVE_EXPR:
+ case PREDECREMENT_EXPR:
+ case PREINCREMENT_EXPR:
+ case POSTDECREMENT_EXPR:
+ case POSTINCREMENT_EXPR:
+ if (value_safe_from_gc (to, TREE_OPERAND (from, 0)))
+ return 1;
+ break;
+
+ case VAR_DECL:
+ case PARM_DECL:
+ /* We can safely pass these things as parameters to functions. */
+ if (to == 0)
+ return 1;
+
+ case ARRAY_REF:
+ case INDIRECT_REF:
+ case RESULT_DECL:
+ case OFFSET_REF:
+ case CALL_EXPR:
+ case METHOD_CALL_EXPR:
+ break;
+
+ case COMPOUND_EXPR:
+ case TARGET_EXPR:
+ if (value_safe_from_gc (to, TREE_OPERAND (from, 1)))
+ return 1;
+ break;
+
+ case COND_EXPR:
+ if (value_safe_from_gc (to, TREE_OPERAND (from, 1))
+ && value_safe_from_gc (to, TREE_OPERAND (from, 2)))
+ return 1;
+ break;
+
+ case PLUS_EXPR:
+ case MINUS_EXPR:
+ if ((type_needs_gc_entry (TREE_TYPE (TREE_OPERAND (from, 0)))
+ || value_safe_from_gc (to, TREE_OPERAND (from, 0)))
+ && (type_needs_gc_entry (TREE_TYPE (TREE_OPERAND (from, 1))) == 0
+ || value_safe_from_gc (to, TREE_OPERAND (from, 1))))
+ return 1;
+ break;
+
+ case RTL_EXPR:
+ /* Every time we build an RTL_EXPR in the front-end, we must
+ ensure that everything in it is safe from the garbage collector.
+ ??? This has only been done for `build_new'. */
+ return 1;
+
+ default:
+ my_friendly_abort (41);
+ }
+
+ if (to == 0)
+ return 0;
+
+ /* FROM wasn't safe. But other properties of TO might make it safe. */
+ switch (TREE_CODE (to))
+ {
+ case VAR_DECL:
+ case PARM_DECL:
+ /* We already culled out static VAR_DECLs above. */
+ return 0;
+
+ case COMPONENT_REF:
+ /* These guys are special, and safe. */
+ if (TREE_CODE (TREE_OPERAND (to, 1)) == FIELD_DECL
+ && (VFIELD_NAME_P (DECL_NAME (TREE_OPERAND (to, 1)))
+ || VBASE_NAME_P (DECL_NAME (TREE_OPERAND (to, 1)))))
+ return 1;
+ /* fall through... */
+
+ case NOP_EXPR:
+ case NON_LVALUE_EXPR:
+ case WITH_CLEANUP_EXPR:
+ case SAVE_EXPR:
+ case PREDECREMENT_EXPR:
+ case PREINCREMENT_EXPR:
+ case POSTDECREMENT_EXPR:
+ case POSTINCREMENT_EXPR:
+ return value_safe_from_gc (TREE_OPERAND (to, 0), from);
+
+ case COMPOUND_EXPR:
+ case TARGET_EXPR:
+ return value_safe_from_gc (TREE_OPERAND (to, 1), from);
+
+ case COND_EXPR:
+ return (value_safe_from_gc (TREE_OPERAND (to, 1), from)
+ && value_safe_from_gc (TREE_OPERAND (to, 2), from));
+
+ case INDIRECT_REF:
+ case ARRAY_REF:
+ /* This used to be 0, but our current restricted model
+ allows this to be 1. We'll never get arrays this way. */
+ return 1;
+
+ default:
+ my_friendly_abort (42);
+ }
+
+ /* Catch-all case is that TO/FROM is not safe. */
+ return 0;
+}
+
+/* Function to build a static GC entry for DECL. TYPE is DECL's type.
+
+ For objects of type `class *', this is just an entry in the
+ static vector __PTR_LIST__.
+
+ For objects of type `class[]', this requires building an entry
+ in the static vector __ARR_LIST__.
+
+ For aggregates, this records all fields of type `class *'
+ and `class[]' in the respective lists above. */
+void
+build_static_gc_entry (decl, type)
+ tree decl;
+ tree type;
+{
+ /* Now, figure out what sort of entry to build. */
+ if (TREE_CODE (type) == POINTER_TYPE
+ || TREE_CODE (type) == REFERENCE_TYPE)
+ assemble_gc_entry (IDENTIFIER_POINTER (DECL_NAME (decl)));
+ else if (TREE_CODE (type) == RECORD_TYPE)
+ {
+ tree ref = get_temp_name (build_reference_type (type), 1);
+ DECL_INITIAL (ref) = build1 (ADDR_EXPR, TREE_TYPE (ref), decl);
+ TREE_CONSTANT (DECL_INITIAL (ref)) = 1;
+ finish_decl (ref, DECL_INITIAL (ref), 0, 0);
+ }
+ else
+ {
+ /* Not yet implemented.
+
+ Cons up a static variable that holds address and length info
+ and add that to ___ARR_LIST__. */
+ my_friendly_abort (43);
+ }
+}
+
+/* Protect FROM from the GC, assuming FROM is going to be
+ stored into TO. We handle three cases for TO here:
+
+ case 1: TO is a stack variable.
+ case 2: TO is zero (which means it is a parameter).
+ case 3: TO is a return value. */
+
+tree
+protect_value_from_gc (to, from)
+ tree to, from;
+{
+ if (to == 0)
+ {
+ tree cleanup;
+
+ to = get_temp_regvar (TREE_TYPE (from), from);
+
+ /* Convert from integer to list form since we'll use it twice. */
+ DECL_GC_OFFSET (to) = build_tree_list (NULL_TREE, DECL_GC_OFFSET (to));
+ cleanup = build_function_call (gc_unprotect_fndecl,
+ DECL_GC_OFFSET (to));
+
+ if (! expand_decl_cleanup (to, cleanup))
+ {
+ compiler_error ("cannot unprotect parameter in this scope");
+ return error_mark_node;
+ }
+ }
+
+ /* Should never need to protect a value that's headed for static storage. */
+ if (TREE_STATIC (to))
+ my_friendly_abort (44);
+
+ switch (TREE_CODE (to))
+ {
+ case COMPONENT_REF:
+ case INDIRECT_REF:
+ return protect_value_from_gc (TREE_OPERAND (to, 0), from);
+
+ case VAR_DECL:
+ case PARM_DECL:
+ {
+ tree rval;
+ if (DECL_GC_OFFSET (to) == NULL_TREE)
+ {
+ /* Because of a cast or a conversion, we might stick
+ a value into a variable that would not normally
+ have a GC entry. */
+ DECL_GC_OFFSET (to) = size_int (++current_function_obstack_index);
+ }
+
+ if (TREE_CODE (DECL_GC_OFFSET (to)) != TREE_LIST)
+ {
+ DECL_GC_OFFSET (to)
+ = build_tree_list (NULL_TREE, DECL_GC_OFFSET (to));
+ }
+
+ current_function_obstack_usage = 1;
+ rval = build_function_call (gc_protect_fndecl,
+ tree_cons (NULL_TREE, from,
+ DECL_GC_OFFSET (to)));
+ TREE_TYPE (rval) = TREE_TYPE (from);
+ return rval;
+ }
+ }
+
+ /* If we fall through the switch, assume we lost. */
+ my_friendly_abort (45);
+ /* NOTREACHED */
+ return NULL_TREE;
+}
+
+/* Given the expression EXP of type `class *', return the head
+ of the object pointed to by EXP. */
+tree
+build_headof (exp)
+ tree exp;
+{
+ tree type = TREE_TYPE (exp);
+ tree vptr, offset;
+
+ if (TREE_CODE (type) != POINTER_TYPE)
+ {
+ error ("`headof' applied to non-pointer type");
+ return error_mark_node;
+ }
+
+ if (flag_vtable_thunks)
+ abort();
+
+ vptr = build1 (INDIRECT_REF, TYPE_POINTER_TO (vtable_entry_type), exp);
+ offset = build_component_ref (build_array_ref (vptr, integer_one_node),
+ get_identifier (VTABLE_DELTA_NAME),
+ NULL_TREE, 0);
+ return build (PLUS_EXPR, class_star_type_node, exp,
+ convert (integer_type_node, offset));
+}
+
+/* Given the expression EXP of type `class *', return the
+ type descriptor for the object pointed to by EXP. */
+tree
+build_classof (exp)
+ tree exp;
+{
+ tree type = TREE_TYPE (exp);
+ tree vptr;
+ tree t_desc_entry;
+
+ if (TREE_CODE (type) != POINTER_TYPE)
+ {
+ error ("`classof' applied to non-pointer type");
+ return error_mark_node;
+ }
+
+ vptr = build1 (INDIRECT_REF, TYPE_POINTER_TO (vtable_entry_type), exp);
+ t_desc_entry = build_component_ref (build_array_ref (vptr, integer_one_node),
+ get_identifier (VTABLE_PFN_NAME),
+ NULL_TREE, 0);
+ TREE_TYPE (t_desc_entry) = TYPE_POINTER_TO (__t_desc_type_node);
+ return t_desc_entry;
+}
+
+/* Return the Type_info node associated with the expression EXP. If EXP is
+ a reference to a polymorphic class, return the dynamic type; otherwise
+ return the static type of the expression. */
+tree
+build_typeid (exp)
+ tree exp;
+{
+ tree type;
+
+ if (exp == error_mark_node)
+ return error_mark_node;
+
+ type = TREE_TYPE (exp);
+
+ /* if b is an instance of B, typeid(b) == typeid(B). Do this before
+ reference trickiness. */
+ if (TREE_CODE (exp) == VAR_DECL && TREE_CODE (type) == RECORD_TYPE)
+ return get_typeid (type);
+
+ /* Apply trivial conversion T -> T& for dereferenced ptrs. */
+ if (TREE_CODE (type) == RECORD_TYPE)
+ type = build_reference_type (type);
+
+ /* If exp is a reference to polymorphic type, get the real Type_info. */
+ if (TREE_CODE (type) == REFERENCE_TYPE && TYPE_VIRTUAL_P (TREE_TYPE (type)))
+ {
+ /* build reference to Type_info from vtable. */
+
+ sorry ("finding Type_info for an object");
+ return error_mark_node;
+ }
+
+ /* otherwise return the Type_info for the static type of the expr. */
+ return get_typeid (type);
+}
+
+/* Return the Type_info object for TYPE, creating it if necessary. */
+tree
+get_typeid (type)
+ tree type;
+{
+ if (type == error_mark_node)
+ return error_mark_node;
+
+ /* Is it useful (and/or correct) to have different typeids for `T &'
+ and `T'? */
+ if (TREE_CODE (type) == REFERENCE_TYPE)
+ type = TREE_TYPE (type);
+
+ /* build reference to static Type_info */
+#if 1
+ sorry ("finding Type_info for a type");
+ return error_mark_node;
+#else
+ register tree t = TYPE_TINFO (type);
+
+ if (t)
+ return t;
+
+ /* ... */
+
+#endif
+}
+
+/* Execute a dynamic cast, as described in section 5.2.6 of the 9/93 working
+ paper. */
+tree
+build_dynamic_cast (type, expr)
+ tree type, expr;
+{
+ enum tree_code tc = TREE_CODE (type);
+ tree exprtype = TREE_TYPE (expr);
+ enum tree_code ec = TREE_CODE (exprtype);
+ tree retval;
+
+ if (type == error_mark_node || expr == error_mark_node)
+ return error_mark_node;
+
+ switch (tc)
+ {
+ case POINTER_TYPE:
+ if (TREE_TYPE (type) == void_type_node)
+ break;
+ /* else fall through */
+ case REFERENCE_TYPE:
+ if (TREE_CODE (TREE_TYPE (type)) == RECORD_TYPE
+ && TYPE_SIZE (TREE_TYPE (type)) != NULL_TREE)
+ break;
+ /* else fall through */
+ default:
+ cp_error ("`%#T' is not a valid type argument for dynamic_cast", type);
+ error ("(must be either pointer or reference to defined class or void *)");
+ return error_mark_node;
+ }
+
+ /* Apply trivial conversion T -> T& for dereferenced ptrs. */
+ if (ec == RECORD_TYPE)
+ {
+ exprtype = build_reference_type (exprtype);
+ ec = REFERENCE_TYPE;
+ }
+
+ /* the TREE_CODE of exprtype must match that of type. */
+ if (ec != tc)
+ {
+ cp_error ("`%E' (of type `%#T') fails to be of %s type", expr, exprtype,
+ tc == POINTER_TYPE ? "pointer" : "reference");
+ return error_mark_node;
+ }
+
+ /* If *type is an unambiguous accessible base class of *exprtype,
+ convert statically. */
+ {
+ int distance;
+ tree path;
+
+ distance = get_base_distance (TREE_TYPE (type), TREE_TYPE (exprtype), 1,
+ &path);
+ if (distance >= 0)
+ return build_vbase_path (PLUS_EXPR, type, expr, path, 0);
+ }
+
+ /* Otherwise *exprtype must be a polymorphic class (have a vtbl). */
+ if (TYPE_VIRTUAL_P (TREE_TYPE (exprtype)))
+ {
+ /* if TYPE is `void *', return pointer to complete object. */
+ if (tc == POINTER_TYPE && TREE_TYPE (type) == void_type_node)
+ {
+ /* if b is an object, dynamic_cast<void *>(&b) == (void *)&b. */
+ if (TREE_CODE (expr) == ADDR_EXPR
+ && TREE_CODE (TREE_OPERAND (expr, 0)) == VAR_DECL
+ && TREE_CODE (TREE_TYPE (TREE_OPERAND (expr, 0))) == RECORD_TYPE)
+ return build1 (NOP_EXPR, type, expr);
+
+ sorry ("finding pointer to complete object");
+ return build1 (NOP_EXPR, type, expr);
+ }
+ else
+ {
+ tree retval;
+
+ /* If we got here, we can't convert statically. Therefore,
+ dynamic_cast<D&>(b) (b an object) cannot succeed. */
+ if (ec == REFERENCE_TYPE)
+ {
+ if (TREE_CODE (expr) == VAR_DECL
+ && TREE_CODE (TREE_TYPE (expr)) == RECORD_TYPE)
+ {
+ cp_warning ("dynamic_cast of `%#D' to `%#T' can never succeed",
+ expr, type);
+ /* cplus_expand_throw (Bad_cast_node); */
+ sorry ("throwing Bad_cast");
+ return error_mark_node;
+ }
+ }
+ /* Ditto for dynamic_cast<D*>(&b). */
+ else if (TREE_CODE (expr) == ADDR_EXPR)
+ {
+ tree op = TREE_OPERAND (expr, 0);
+ if (TREE_CODE (op) == VAR_DECL
+ && TREE_CODE (TREE_TYPE (op)) == RECORD_TYPE)
+ {
+ cp_warning ("dynamic_cast of `%E' to `%#T' can never succeed",
+ expr, type);
+ retval = build_int_2 (0, 0);
+ TREE_TYPE (retval) = type;
+ return retval;
+ }
+ }
+ /* Build run-time conversion. */
+ sorry ("run-time type conversion");
+ retval = build_int_2 (0, 0);
+ TREE_TYPE (retval) = type;
+ return retval;
+ }
+ }
+
+ cp_error ("cannot dynamic_cast `%E' (of type `%#T') to type `%#T'",
+ expr, exprtype, type);
+ return error_mark_node;
+}
+
+/* Build and initialize various sorts of descriptors. Every descriptor
+ node has a name associated with it (the name created by mangling).
+ For this reason, we use the identifier as our access to the __*_desc
+ nodes, instead of sticking them directly in the types. Otherwise we
+ would burden all built-in types (and pointer types) with slots that
+ we don't necessarily want to use.
+
+ For each descriptor we build, we build a variable that contains
+ the descriptor's information. When we need this info at runtime,
+ all we need is access to these variables.
+
+ Note: these constructors always return the address of the descriptor
+ info, since that is simplest for their mutual interaction. */
+
+static tree
+build_generic_desc (decl, elems)
+ tree decl;
+ tree elems;
+{
+ tree init = build (CONSTRUCTOR, TREE_TYPE (decl), NULL_TREE, elems);
+ TREE_CONSTANT (init) = 1;
+ TREE_STATIC (init) = 1;
+ TREE_READONLY (init) = 1;
+
+ DECL_INITIAL (decl) = init;
+ TREE_STATIC (decl) = 1;
+ layout_decl (decl, 0);
+ finish_decl (decl, init, 0, 0);
+
+ return IDENTIFIER_AS_DESC (DECL_NAME (decl));
+}
+
+/* Build an initializer for a __t_desc node. So that we can take advantage
+ of recursion, we accept NULL for TYPE.
+ DEFINITION is greater than zero iff we must define the type descriptor
+ (as opposed to merely referencing it). 1 means treat according to
+ #pragma interface/#pragma implementation rules. 2 means define as
+ global and public, no matter what. */
+tree
+build_t_desc (type, definition)
+ tree type;
+ int definition;
+{
+ tree tdecl;
+ tree tname, name_string;
+ tree elems, fields;
+ tree parents, vbases, offsets, ivars, methods, target_type;
+ int method_count = 0, field_count = 0;
+
+ if (type == NULL_TREE)
+ return NULL_TREE;
+
+ tname = build_t_desc_overload (type);
+ if (IDENTIFIER_AS_DESC (tname)
+ && (!definition || TREE_ASM_WRITTEN (IDENTIFIER_AS_DESC (tname))))
+ return IDENTIFIER_AS_DESC (tname);
+
+ tdecl = lookup_name (tname, 0);
+ if (tdecl == NULL_TREE)
+ {
+ tdecl = build_decl (VAR_DECL, tname, __t_desc_type_node);
+ DECL_EXTERNAL (tdecl) = 1;
+ TREE_PUBLIC (tdecl) = 1;
+ tdecl = pushdecl_top_level (tdecl);
+ }
+ /* If we previously defined it, return the defined result. */
+ else if (definition && DECL_INITIAL (tdecl))
+ return IDENTIFIER_AS_DESC (tname);
+
+ if (definition)
+ {
+ tree taggr = type;
+ /* Let T* and T& be written only when T is written (if T is an aggr).
+ We do this for const, but not for volatile, since volatile
+ is rare and const is not. */
+ if (!TYPE_VOLATILE (taggr)
+ && (TREE_CODE (taggr) == POINTER_TYPE
+ || TREE_CODE (taggr) == REFERENCE_TYPE)
+ && IS_AGGR_TYPE (TREE_TYPE (taggr)))
+ taggr = TREE_TYPE (taggr);
+
+ /* If we know that we don't need to write out this type's
+ vtable, then don't write out it's dossier. Somebody
+ else will take care of that. */
+ if (IS_AGGR_TYPE (taggr) && CLASSTYPE_VFIELD (taggr))
+ {
+ if (CLASSTYPE_VTABLE_NEEDS_WRITING (taggr))
+ {
+ TREE_PUBLIC (tdecl) = ! CLASSTYPE_INTERFACE_ONLY (taggr)
+ && CLASSTYPE_INTERFACE_KNOWN (taggr);
+ TREE_STATIC (tdecl) = 1;
+ DECL_EXTERNAL (tdecl) = 0;
+ }
+ else
+ {
+ if (write_virtuals != 0)
+ TREE_PUBLIC (tdecl) = 1;
+ }
+ }
+ else
+ {
+ DECL_EXTERNAL (tdecl) = 0;
+ TREE_STATIC (tdecl) = 1;
+ TREE_PUBLIC (tdecl) = (definition > 1);
+ }
+ }
+ SET_IDENTIFIER_AS_DESC (tname, build_unary_op (ADDR_EXPR, tdecl, 0));
+ if (!definition || DECL_EXTERNAL (tdecl))
+ {
+ /* That's it! */
+ finish_decl (tdecl, 0, 0, 0);
+ return IDENTIFIER_AS_DESC (tname);
+ }
+
+ /* Show that we are defining the t_desc for this type. */
+ DECL_INITIAL (tdecl) = error_mark_node;
+
+ parents = build_tree_list (NULL_TREE, integer_zero_node);
+ vbases = build_tree_list (NULL_TREE, integer_zero_node);
+ offsets = build_tree_list (NULL_TREE, integer_zero_node);
+ methods = NULL_TREE;
+ ivars = NULL_TREE;
+
+ if (TYPE_LANG_SPECIFIC (type))
+ {
+ int i = CLASSTYPE_N_BASECLASSES (type);
+ tree method_vec = CLASSTYPE_METHOD_VEC (type);
+ tree *meth, *end;
+ tree binfos = TYPE_BINFO_BASETYPES (type);
+ tree vb = CLASSTYPE_VBASECLASSES (type);
+
+ while (--i >= 0)
+ parents = tree_cons (NULL_TREE, build_t_desc (BINFO_TYPE (TREE_VEC_ELT (binfos, i)), 0), parents);
+
+ while (vb)
+ {
+ vbases = tree_cons (NULL_TREE, build_t_desc (BINFO_TYPE (vb), 0), vbases);
+ offsets = tree_cons (NULL_TREE, BINFO_OFFSET (vb), offsets);
+ vb = TREE_CHAIN (vb);
+ }
+
+ if (method_vec)
+ for (meth = TREE_VEC_END (method_vec),
+ end = &TREE_VEC_ELT (method_vec, 0); meth-- != end; )
+ if (*meth)
+ {
+ methods = tree_cons (NULL_TREE, build_m_desc (*meth), methods);
+ method_count++;
+ }
+ }
+
+ if (IS_AGGR_TYPE (type))
+ {
+ for (fields = TYPE_FIELDS (type); fields; fields = TREE_CHAIN (fields))
+ if (TREE_CODE (fields) == FIELD_DECL
+ || TREE_CODE (fields) == VAR_DECL)
+ {
+ ivars = tree_cons (NULL_TREE, build_i_desc (fields), ivars);
+ field_count++;
+ }
+ ivars = nreverse (ivars);
+ }
+
+ parents = finish_table (0, TYPE_POINTER_TO (__t_desc_type_node), parents, 0);
+ vbases = finish_table (0, TYPE_POINTER_TO (__t_desc_type_node), vbases, 0);
+ offsets = finish_table (0, integer_type_node, offsets, 0);
+ if (methods == NULL_TREE)
+ methods = null_pointer_node;
+ else
+ methods = build_unary_op (ADDR_EXPR,
+ finish_table (0, __m_desc_type_node, methods, 0),
+ 0);
+ if (ivars == NULL_TREE)
+ ivars = null_pointer_node;
+ else
+ ivars = build_unary_op (ADDR_EXPR,
+ finish_table (0, __i_desc_type_node, ivars, 0),
+ 0);
+ if (TREE_TYPE (type))
+ target_type = build_t_desc (TREE_TYPE (type), definition);
+ else
+ target_type = integer_zero_node;
+
+ name_string = combine_strings (build_string (IDENTIFIER_LENGTH (tname)+1, IDENTIFIER_POINTER (tname)));
+
+ elems = tree_cons (NULL_TREE, build_unary_op (ADDR_EXPR, name_string, 0),
+ tree_cons (NULL_TREE,
+ TYPE_SIZE(type)? size_in_bytes(type) : integer_zero_node,
+ /* really should use bitfield initialization here. */
+ tree_cons (NULL_TREE, integer_zero_node,
+ tree_cons (NULL_TREE, target_type,
+ tree_cons (NULL_TREE, build_int_2 (field_count, 2),
+ tree_cons (NULL_TREE, build_int_2 (method_count, 2),
+ tree_cons (NULL_TREE, ivars,
+ tree_cons (NULL_TREE, methods,
+ tree_cons (NULL_TREE, build_unary_op (ADDR_EXPR, parents, 0),
+ tree_cons (NULL_TREE, build_unary_op (ADDR_EXPR, vbases, 0),
+ build_tree_list (NULL_TREE, build_unary_op (ADDR_EXPR, offsets, 0))))))))))));
+ return build_generic_desc (tdecl, elems);
+}
+
+/* Build an initializer for a __i_desc node. */
+tree
+build_i_desc (decl)
+ tree decl;
+{
+ tree elems, name_string;
+ tree taggr;
+
+ name_string = DECL_NAME (decl);
+ name_string = combine_strings (build_string (IDENTIFIER_LENGTH (name_string)+1, IDENTIFIER_POINTER (name_string)));
+
+ /* Now decide whether this ivar should cause it's type to get
+ def'd or ref'd in this file. If the type we are looking at
+ has a proxy definition, we look at the proxy (i.e., a
+ `foo *' is equivalent to a `foo'). */
+ taggr = TREE_TYPE (decl);
+
+ if ((TREE_CODE (taggr) == POINTER_TYPE
+ || TREE_CODE (taggr) == REFERENCE_TYPE)
+ && TYPE_VOLATILE (taggr) == 0)
+ taggr = TREE_TYPE (taggr);
+
+ elems = tree_cons (NULL_TREE, build_unary_op (ADDR_EXPR, name_string, 0),
+ tree_cons (NULL_TREE, DECL_FIELD_BITPOS (decl),
+ build_tree_list (NULL_TREE, build_t_desc (TREE_TYPE (decl),
+ ! IS_AGGR_TYPE (taggr)))));
+ taggr = build (CONSTRUCTOR, __i_desc_type_node, NULL_TREE, elems);
+ TREE_CONSTANT (taggr) = 1;
+ TREE_STATIC (taggr) = 1;
+ TREE_READONLY (taggr) = 1;
+ return taggr;
+}
+
+/* Build an initializer for a __m_desc node. */
+tree
+build_m_desc (decl)
+ tree decl;
+{
+ tree taggr, elems, name_string;
+ tree parm_count, req_count, vindex, vcontext;
+ tree parms;
+ int p_count, r_count;
+ tree parm_types = NULL_TREE;
+
+ for (parms = TYPE_ARG_TYPES (TREE_TYPE (decl)), p_count = 0, r_count = 0;
+ parms != NULL_TREE; parms = TREE_CHAIN (parms), p_count++)
+ {
+ taggr = TREE_VALUE (parms);
+ if ((TREE_CODE (taggr) == POINTER_TYPE
+ || TREE_CODE (taggr) == REFERENCE_TYPE)
+ && TYPE_VOLATILE (taggr) == 0)
+ taggr = TREE_TYPE (taggr);
+
+ parm_types = tree_cons (NULL_TREE, build_t_desc (TREE_VALUE (parms),
+ ! IS_AGGR_TYPE (taggr)),
+ parm_types);
+ if (TREE_PURPOSE (parms) == NULL_TREE)
+ r_count++;
+ }
+
+ parm_types = finish_table (0, TYPE_POINTER_TO (__t_desc_type_node),
+ nreverse (parm_types), 0);
+ parm_count = build_int_2 (p_count, 0);
+ req_count = build_int_2 (r_count, 0);
+
+ if (DECL_VINDEX (decl))
+ vindex = DECL_VINDEX (decl);
+ else
+ vindex = integer_zero_node;
+ if (DECL_CONTEXT (decl)
+ && TREE_CODE_CLASS (TREE_CODE (DECL_CONTEXT (decl))) == 't')
+ vcontext = build_t_desc (DECL_CONTEXT (decl), 0);
+ else
+ vcontext = integer_zero_node;
+ name_string = DECL_NAME (decl);
+ if (name_string == NULL)
+ name_string = DECL_ASSEMBLER_NAME (decl);
+ name_string = combine_strings (build_string (IDENTIFIER_LENGTH (name_string)+1, IDENTIFIER_POINTER (name_string)));
+
+ /* Now decide whether the return type of this mvar
+ should cause it's type to get def'd or ref'd in this file.
+ If the type we are looking at has a proxy definition,
+ we look at the proxy (i.e., a `foo *' is equivalent to a `foo'). */
+ taggr = TREE_TYPE (TREE_TYPE (decl));
+
+ if ((TREE_CODE (taggr) == POINTER_TYPE
+ || TREE_CODE (taggr) == REFERENCE_TYPE)
+ && TYPE_VOLATILE (taggr) == 0)
+ taggr = TREE_TYPE (taggr);
+
+ elems = tree_cons (NULL_TREE, build_unary_op (ADDR_EXPR, name_string, 0),
+ tree_cons (NULL_TREE, vindex,
+ tree_cons (NULL_TREE, vcontext,
+ tree_cons (NULL_TREE, build_t_desc (TREE_TYPE (TREE_TYPE (decl)),
+ ! IS_AGGR_TYPE (taggr)),
+ tree_cons (NULL_TREE, build_c_cast (TYPE_POINTER_TO (default_function_type), build_unary_op (ADDR_EXPR, decl, 0)),
+ tree_cons (NULL_TREE, parm_count,
+ tree_cons (NULL_TREE, req_count,
+ build_tree_list (NULL_TREE, build_unary_op (ADDR_EXPR, parm_types, 0)))))))));
+
+ taggr = build (CONSTRUCTOR, __m_desc_type_node, NULL_TREE, elems);
+ TREE_CONSTANT (taggr) = 1;
+ TREE_STATIC (taggr) = 1;
+ TREE_READONLY (taggr) = 1;
+ return taggr;
+}
+
+/* Conditionally emit code to set up an unwind-protect for the
+ garbage collector. If this function doesn't do anything that involves
+ the garbage collector, then do nothing. Otherwise, call __gc_push
+ at the beginning and __gc_pop at the end.
+
+ NOTE! The __gc_pop function must operate transparently, since
+ it comes where the logical return label lies. This means that
+ at runtime *it* must preserve any return value registers. */
+
+void
+expand_gc_prologue_and_epilogue ()
+{
+ extern tree maybe_gc_cleanup;
+ struct rtx_def *last_parm_insn, *mark;
+ extern struct rtx_def *get_last_insn ();
+ extern struct rtx_def *get_first_nonparm_insn ();
+ extern struct rtx_def *previous_insn ();
+ tree action;
+
+ /* If we didn't need the obstack, don't cons any space. */
+ if (current_function_obstack_index == 0
+ || current_function_obstack_usage == 0)
+ return;
+
+ mark = get_last_insn ();
+ last_parm_insn = get_first_nonparm_insn ();
+ if (last_parm_insn == 0) last_parm_insn = mark;
+ else last_parm_insn = previous_insn (last_parm_insn);
+
+ action = build_function_call (gc_push_fndecl,
+ build_tree_list (NULL_TREE, size_int (++current_function_obstack_index)));
+ expand_expr_stmt (action);
+
+ reorder_insns (next_insn (mark), get_last_insn (), last_parm_insn);
+
+ /* This will be expanded as a cleanup. */
+ TREE_VALUE (maybe_gc_cleanup)
+ = build_function_call (gc_pop_fndecl, NULL_TREE);
+}
+
+/* Some day we'll use this function as a call-back and clean
+ up all the unnecessary gc dribble that we otherwise create. */
+void
+lang_expand_end_bindings (first, last)
+ struct rtx_def *first, *last;
+{
+}
+
+void
+init_gc_processing ()
+{
+ tree parmtypes = hash_tree_chain (class_star_type_node,
+ hash_tree_chain (integer_type_node, NULL_TREE));
+ gc_protect_fndecl = define_function ("__gc_protect",
+ build_function_type (class_star_type_node, parmtypes),
+ NOT_BUILT_IN, 0, 0);
+
+ parmtypes = hash_tree_chain (integer_type_node, NULL_TREE);
+ gc_unprotect_fndecl = define_function ("__gc_unprotect",
+ build_function_type (void_type_node, parmtypes),
+ NOT_BUILT_IN, 0, 0);
+
+ gc_push_fndecl = define_function ("__gc_push",
+ TREE_TYPE (gc_unprotect_fndecl),
+ NOT_BUILT_IN, 0, 0);
+
+ gc_pop_fndecl = define_function ("__gc_pop",
+ build_function_type (void_type_node,
+ void_list_node),
+ NOT_BUILT_IN, 0, 0);
+ gc_nonobject = build_int_2 (0x80000000, 0);
+ gc_visible = build_int_2 (0x40000000, 0);
+ gc_white = integer_zero_node;
+ gc_offwhite = build_int_2 (0x10000000, 0);
+ gc_grey = build_int_2 (0x20000000, 0);
+ gc_black = build_int_2 (0x30000000, 0);
+}
diff --git a/gnu/usr.bin/cc/cc1plus/hash.h b/gnu/usr.bin/cc/cc1plus/hash.h
new file mode 100644
index 0000000..8453c4b
--- /dev/null
+++ b/gnu/usr.bin/cc/cc1plus/hash.h
@@ -0,0 +1,197 @@
+/* C code produced by gperf version 2.5 (GNU C++ version) */
+/* Command-line: gperf -p -j1 -g -o -t -N is_reserved_word -k1,4,7,$ /deneb/blob/jason/g++/small/devo/gcc/cp/gxx.gperf */
+/* Command-line: gperf -p -j1 -g -o -t -N is_reserved_word -k1,4,$,7 gplus.gperf */
+struct resword { char *name; short token; enum rid rid;};
+
+#define TOTAL_KEYWORDS 86
+#define MIN_WORD_LENGTH 2
+#define MAX_WORD_LENGTH 16
+#define MIN_HASH_VALUE 4
+#define MAX_HASH_VALUE 171
+/* maximum key range = 168, duplicates = 0 */
+
+#ifdef __GNUC__
+inline
+#endif
+static unsigned int
+hash (str, len)
+ register char *str;
+ register int unsigned len;
+{
+ static unsigned char asso_values[] =
+ {
+ 172, 172, 172, 172, 172, 172, 172, 172, 172, 172,
+ 172, 172, 172, 172, 172, 172, 172, 172, 172, 172,
+ 172, 172, 172, 172, 172, 172, 172, 172, 172, 172,
+ 172, 172, 172, 172, 172, 172, 172, 172, 172, 172,
+ 172, 172, 172, 172, 172, 172, 172, 172, 172, 172,
+ 172, 172, 172, 172, 172, 172, 172, 172, 172, 172,
+ 172, 172, 172, 172, 172, 172, 172, 172, 172, 172,
+ 172, 172, 172, 172, 172, 172, 172, 172, 172, 172,
+ 172, 172, 172, 172, 172, 172, 172, 172, 172, 172,
+ 172, 172, 172, 172, 172, 0, 172, 36, 1, 61,
+ 0, 0, 30, 44, 44, 35, 172, 7, 12, 53,
+ 40, 17, 6, 172, 28, 2, 4, 35, 31, 51,
+ 5, 7, 172, 172, 172, 172, 172, 172,
+ };
+ register int hval = len;
+
+ switch (hval)
+ {
+ default:
+ case 7:
+ hval += asso_values[str[6]];
+ case 6:
+ case 5:
+ case 4:
+ hval += asso_values[str[3]];
+ case 3:
+ case 2:
+ case 1:
+ hval += asso_values[str[0]];
+ }
+ return hval + asso_values[str[len - 1]];
+}
+
+#ifdef __GNUC__
+inline
+#endif
+struct resword *
+is_reserved_word (str, len)
+ register char *str;
+ register unsigned int len;
+{
+ static struct resword wordlist[] =
+ {
+ {"",}, {"",}, {"",}, {"",},
+ {"else", ELSE, NORID,},
+ {"",},
+ {"delete", DELETE, NORID,},
+ {"double", TYPESPEC, RID_DOUBLE,},
+ {"true", CXX_TRUE, NORID,},
+ {"__asm__", GCC_ASM_KEYWORD, NORID},
+ {"typeid", TYPEID, NORID,},
+ {"",},
+ {"this", THIS, NORID,},
+ {"",},
+ {"try", TRY, NORID,},
+ {"",}, {"",}, {"",}, {"",},
+ {"do", DO, NORID,},
+ {"",},
+ {"static_cast", STATIC_CAST, NORID,},
+ {"template", TEMPLATE, RID_TEMPLATE,},
+ {"protected", VISSPEC, RID_PROTECTED,},
+ {"",},
+ {"__classof__", CLASSOF, NORID},
+ {"",},
+ {"__headof__", HEADOF, NORID},
+ {"",},
+ {"bool", TYPESPEC, RID_BOOL,},
+ {"__const__", TYPE_QUAL, RID_CONST},
+ {"__volatile", TYPE_QUAL, RID_VOLATILE},
+ {"__const", TYPE_QUAL, RID_CONST},
+ {"__volatile__", TYPE_QUAL, RID_VOLATILE},
+ {"__typeof__", TYPEOF, NORID},
+ {"void", TYPESPEC, RID_VOID,},
+ {"friend", SCSPEC, RID_FRIEND,},
+ {"false", CXX_FALSE, NORID,},
+ {"sizeof", SIZEOF, NORID,},
+ {"short", TYPESPEC, RID_SHORT,},
+ {"typeof", TYPEOF, NORID,},
+ {"",},
+ {"int", TYPESPEC, RID_INT,},
+ {"__signed", TYPESPEC, RID_SIGNED},
+ {"private", VISSPEC, RID_PRIVATE,},
+ {"__signed__", TYPESPEC, RID_SIGNED},
+ {"extern", SCSPEC, RID_EXTERN,},
+ {"struct", AGGR, RID_RECORD,},
+ {"signed", TYPESPEC, RID_SIGNED,},
+ {"break", BREAK, NORID,},
+ {"__attribute", ATTRIBUTE, NORID},
+ {"default", DEFAULT, NORID,},
+ {"__attribute__", ATTRIBUTE, NORID},
+ {"__classof", CLASSOF, NORID},
+ {"sigof", SIGOF, NORID /* Extension */,},
+ {"__headof", HEADOF, NORID},
+ {"switch", SWITCH, NORID,},
+ {"__label__", LABEL, NORID},
+ {"__extension__", EXTENSION, NORID},
+ {"",},
+ {"__asm", GCC_ASM_KEYWORD, NORID},
+ {"for", FOR, NORID,},
+ {"__typeof", TYPEOF, NORID},
+ {"__alignof__", ALIGNOF, NORID},
+ {"",},
+ {"case", CASE, NORID,},
+ {"virtual", SCSPEC, RID_VIRTUAL,},
+ {"if", IF, NORID,},
+ {"while", WHILE, NORID,},
+ {"",},
+ {"class", AGGR, RID_CLASS,},
+ {"typedef", SCSPEC, RID_TYPEDEF,},
+ {"const", TYPE_QUAL, RID_CONST,},
+ {"static", SCSPEC, RID_STATIC,},
+ {"auto", SCSPEC, RID_AUTO,},
+ {"float", TYPESPEC, RID_FLOAT,},
+ {"inline", SCSPEC, RID_INLINE,},
+ {"throw", THROW, NORID,},
+ {"unsigned", TYPESPEC, RID_UNSIGNED,},
+ {"",},
+ {"headof", HEADOF, NORID,},
+ {"",},
+ {"goto", GOTO, NORID,},
+ {"",}, {"",},
+ {"public", VISSPEC, RID_PUBLIC,},
+ {"signature", AGGR, RID_SIGNATURE /* Extension */,},
+ {"volatile", TYPE_QUAL, RID_VOLATILE,},
+ {"__inline", SCSPEC, RID_INLINE},
+ {"overload", OVERLOAD, NORID,},
+ {"__inline__", SCSPEC, RID_INLINE},
+ {"__alignof", ALIGNOF, NORID},
+ {"asm", ASM_KEYWORD, NORID,},
+ {"",},
+ {"new", NEW, NORID,},
+ {"",},
+ {"mutable", SCSPEC, RID_MUTABLE,},
+ {"union", AGGR, RID_UNION,},
+ {"operator", OPERATOR, NORID,},
+ {"register", SCSPEC, RID_REGISTER,},
+ {"",}, {"",},
+ {"__wchar_t", TYPESPEC, RID_WCHAR /* Unique to ANSI C++ */,},
+ {"",},
+ {"long", TYPESPEC, RID_LONG,},
+ {"",}, {"",}, {"",},
+ {"continue", CONTINUE, NORID,},
+ {"return", RETURN, NORID,},
+ {"enum", ENUM, NORID,},
+ {"",}, {"",},
+ {"dynamic_cast", DYNAMIC_CAST, NORID,},
+ {"",}, {"",},
+ {"reinterpret_cast", REINTERPRET_CAST, NORID,},
+ {"",}, {"",}, {"",}, {"",},
+ {"char", TYPESPEC, RID_CHAR,},
+ {"",}, {"",}, {"",}, {"",}, {"",}, {"",}, {"",}, {"",},
+ {"classof", CLASSOF, NORID,},
+ {"",}, {"",}, {"",}, {"",}, {"",}, {"",}, {"",},
+ {"const_cast", CONST_CAST, NORID,},
+ {"",}, {"",}, {"",}, {"",}, {"",}, {"",}, {"",}, {"",}, {"",},
+ {"",}, {"",}, {"",}, {"",}, {"",}, {"",}, {"",}, {"",}, {"",},
+ {"",}, {"",}, {"",}, {"",}, {"",}, {"",}, {"",}, {"",}, {"",},
+ {"",}, {"",}, {"",}, {"",}, {"",},
+ {"catch", CATCH, NORID,},
+ };
+
+ if (len <= MAX_WORD_LENGTH && len >= MIN_WORD_LENGTH)
+ {
+ register int key = hash (str, len);
+
+ if (key <= MAX_HASH_VALUE && key >= 0)
+ {
+ register char *s = wordlist[key].name;
+
+ if (*s == *str && !strcmp (str + 1, s + 1))
+ return &wordlist[key];
+ }
+ }
+ return 0;
+}
diff --git a/gnu/usr.bin/cc/cc1plus/init.c b/gnu/usr.bin/cc/cc1plus/init.c
new file mode 100644
index 0000000..5e5d580
--- /dev/null
+++ b/gnu/usr.bin/cc/cc1plus/init.c
@@ -0,0 +1,4077 @@
+/* Handle initialization things in C++.
+ Copyright (C) 1987, 1989, 1992, 1993, 1994 Free Software Foundation, Inc.
+ Contributed by Michael Tiemann (tiemann@cygnus.com)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+/* High-level class interface. */
+
+#include "config.h"
+#include "tree.h"
+#include "rtl.h"
+#include "cp-tree.h"
+#include "flags.h"
+
+#undef NULL
+#define NULL 0
+
+/* In C++, structures with well-defined constructors are initialized by
+ those constructors, unasked. CURRENT_BASE_INIT_LIST
+ holds a list of stmts for a BASE_INIT term in the grammar.
+ This list has one element for each base class which must be
+ initialized. The list elements are [basename, init], with
+ type basetype. This allows the possibly anachronistic form
+ (assuming d : a, b, c) "d (int a) : c(a+5), b (a-4), a (a+3)"
+ where each successive term can be handed down the constructor
+ line. Perhaps this was not intended. */
+tree current_base_init_list, current_member_init_list;
+
+void emit_base_init ();
+void check_base_init ();
+static void expand_aggr_vbase_init ();
+void expand_member_init ();
+void expand_aggr_init ();
+
+static void expand_aggr_init_1 ();
+static void expand_recursive_init_1 ();
+static void expand_recursive_init ();
+static void expand_virtual_init PROTO((tree, tree));
+tree expand_vec_init ();
+
+static void add_friend (), add_friends ();
+
+/* Cache _builtin_new and _builtin_delete exprs. */
+static tree BIN, BID, BIVN, BIVD;
+
+/* Cache the identifier nodes for the two magic field of a new cookie. */
+static tree nc_nelts_field_id;
+#if 0
+static tree nc_ptr_2comp_field_id;
+#endif
+
+static tree minus_one;
+
+/* Set up local variable for this file. MUST BE CALLED AFTER
+ INIT_DECL_PROCESSING. */
+
+tree BI_header_type, BI_header_size;
+
+void init_init_processing ()
+{
+ tree fields[1];
+
+ /* Define implicit `operator new' and `operator delete' functions. */
+ BIN = default_conversion (get_first_fn (IDENTIFIER_GLOBAL_VALUE (ansi_opname[(int) NEW_EXPR])));
+ TREE_USED (TREE_OPERAND (BIN, 0)) = 0;
+ BID = default_conversion (get_first_fn (IDENTIFIER_GLOBAL_VALUE (ansi_opname[(int) DELETE_EXPR])));
+ TREE_USED (TREE_OPERAND (BID, 0)) = 0;
+ BIVN = default_conversion (get_first_fn (IDENTIFIER_GLOBAL_VALUE (ansi_opname[(int) VEC_NEW_EXPR])));
+ TREE_USED (TREE_OPERAND (BIVN, 0)) = 0;
+ BIVD = default_conversion (get_first_fn (IDENTIFIER_GLOBAL_VALUE (ansi_opname[(int) VEC_DELETE_EXPR])));
+ TREE_USED (TREE_OPERAND (BIVD, 0)) = 0;
+ minus_one = build_int_2 (-1, -1);
+
+ /* Define the structure that holds header information for
+ arrays allocated via operator new. */
+ BI_header_type = make_lang_type (RECORD_TYPE);
+ nc_nelts_field_id = get_identifier ("nelts");
+ fields[0] = build_lang_field_decl (FIELD_DECL, nc_nelts_field_id, sizetype);
+ finish_builtin_type (BI_header_type, "__new_cookie", fields,
+ 0, double_type_node);
+ BI_header_size = size_in_bytes (BI_header_type);
+}
+
+/* Subroutine of emit_base_init. For BINFO, initialize all the
+ virtual function table pointers, except those that come from
+ virtual base classes. Initialize binfo's vtable pointer, if
+ INIT_SELF is true. CAN_ELIDE is true when we know that all virtual
+ function table pointers in all bases have been initialized already,
+ probably because their constructors have just be run. ADDR is the
+ pointer to the object whos vtables we are going to initialize.
+
+ REAL_BINFO is usually the same as BINFO, except when addr is not of
+ pointer to the type of the real derived type that we want to
+ initialize for. This is the case when addr is a pointer to a sub
+ object of a complete object, and we only want to do part of the
+ complete object's initiailzation of vtable pointers. This is done
+ for all virtual table pointers in virtual base classes. REAL_BINFO
+ is used to find the BINFO_VTABLE that we initialize with. BINFO is
+ used for conversions of addr to subobjects.
+
+ BINFO_TYPE (real_binfo) must be BINFO_TYPE (binfo).
+
+ Relies upon binfo being inside TYPE_BINFO (TREE_TYPE (TREE_TYPE
+ (addr))). */
+void
+expand_direct_vtbls_init (real_binfo, binfo, init_self, can_elide, addr)
+ tree real_binfo, binfo, addr;
+ int init_self, can_elide;
+{
+ tree real_binfos = BINFO_BASETYPES (real_binfo);
+ tree binfos = BINFO_BASETYPES (binfo);
+ int i, n_baselinks = real_binfos ? TREE_VEC_LENGTH (real_binfos) : 0;
+
+ for (i = 0; i < n_baselinks; i++)
+ {
+ tree real_base_binfo = TREE_VEC_ELT (real_binfos, i);
+ tree base_binfo = TREE_VEC_ELT (binfos, i);
+ int is_not_base_vtable =
+ i != CLASSTYPE_VFIELD_PARENT (BINFO_TYPE (real_binfo));
+ if (! TREE_VIA_VIRTUAL (real_base_binfo))
+ expand_direct_vtbls_init (real_base_binfo, base_binfo,
+ is_not_base_vtable, can_elide, addr);
+ }
+#if 0
+ /* Before turning this on, make sure it is correct. */
+ if (can_elide && ! BINFO_MODIFIED (binfo))
+ return;
+#endif
+ /* Should we use something besides CLASSTYPE_VFIELDS? */
+ if (init_self && CLASSTYPE_VFIELDS (BINFO_TYPE (real_binfo)))
+ {
+ tree base_ptr = convert_pointer_to_real (binfo, addr);
+ expand_virtual_init (real_binfo, base_ptr);
+ }
+}
+
+/* 348 - 351 */
+/* Subroutine of emit_base_init. */
+static void
+perform_member_init (member, name, init, explicit)
+ tree member, name, init;
+ int explicit;
+{
+ tree decl;
+ tree type = TREE_TYPE (member);
+
+ if (TYPE_NEEDS_CONSTRUCTING (type)
+ || (init && TYPE_HAS_CONSTRUCTOR (type)))
+ {
+ /* Since `init' is already a TREE_LIST on the current_member_init_list,
+ only build it into one if we aren't already a list. */
+ if (init != NULL_TREE && TREE_CODE (init) != TREE_LIST)
+ init = build_tree_list (NULL_TREE, init);
+
+ decl = build_component_ref (C_C_D, name, 0, explicit);
+
+ if (explicit
+ && TREE_CODE (type) == ARRAY_TYPE
+ && init != NULL_TREE
+ && TREE_CHAIN (init) == NULL_TREE
+ && TREE_CODE (TREE_TYPE (TREE_VALUE (init))) == ARRAY_TYPE)
+ {
+ /* Initialization of one array from another. */
+ expand_vec_init (TREE_OPERAND (decl, 1), decl,
+ array_type_nelts (type), TREE_VALUE (init), 1);
+ }
+ else
+ expand_aggr_init (decl, init, 0);
+ }
+ else
+ {
+ if (init == NULL_TREE)
+ {
+ if (explicit)
+ {
+ cp_error ("incomplete initializer for member `%D' of class `%T' which has no constructor",
+ member, current_class_type);
+ init = error_mark_node;
+ }
+ /* member traversal: note it leaves init NULL */
+ else if (TREE_CODE (TREE_TYPE (member)) == REFERENCE_TYPE)
+ cp_pedwarn ("uninitialized reference member `%D'", member);
+ }
+ else if (TREE_CODE (init) == TREE_LIST)
+ {
+ /* There was an explicit member initialization. Do some
+ work in that case. */
+ if (TREE_CHAIN (init))
+ {
+ warning ("initializer list treated as compound expression");
+ init = build_compound_expr (init);
+ }
+ else
+ init = TREE_VALUE (init);
+ }
+
+ /* We only build this with a null init if we got it from the
+ current_member_init_list. */
+ if (init || explicit)
+ {
+ decl = build_component_ref (C_C_D, name, 0, explicit);
+ expand_expr_stmt (build_modify_expr (decl, INIT_EXPR, init));
+ }
+ }
+ if (flag_handle_exceptions && TYPE_NEEDS_DESTRUCTOR (type))
+ cp_warning ("caution, member `%D' may not be destroyed in the presense of an exception during construction", member);
+}
+
+/* Subroutine of emit_member_init. */
+static tree
+sort_member_init (t)
+ tree t;
+{
+ tree x, member, name, field, init;
+ tree init_list = NULL_TREE;
+ tree fields_to_unmark = NULL_TREE;
+ int found;
+
+ for (member = TYPE_FIELDS (t); member ; member = TREE_CHAIN (member))
+ {
+ found = 0;
+ for (x = current_member_init_list ; x ; x = TREE_CHAIN (x))
+ {
+ /* If we cleared this out, then pay no attention to it. */
+ if (TREE_PURPOSE (x) == NULL_TREE)
+ continue;
+ name = TREE_PURPOSE (x);
+
+#if 0
+ field = (TREE_CODE (name) == COMPONENT_REF
+ ? TREE_OPERAND (name, 1) : IDENTIFIER_CLASS_VALUE (name));
+#else
+ /* Let's find out when this happens. */
+ my_friendly_assert (TREE_CODE (name) != COMPONENT_REF, 348);
+ field = IDENTIFIER_CLASS_VALUE (name);
+#endif
+
+ /* If one member shadows another, get the outermost one. */
+ if (TREE_CODE (field) == TREE_LIST)
+ field = TREE_VALUE (field);
+
+ if (field == member)
+ {
+ /* See if we already found an initializer for this field. */
+ if (found)
+ {
+ if (DECL_NAME (field))
+ cp_error ("multiple initializations given for member `%D'",
+ field);
+ continue;
+ }
+
+ init_list = chainon (init_list,
+ build_tree_list (name, TREE_VALUE (x)));
+ /* Make sure we won't try to work on this init again. */
+ TREE_PURPOSE (x) = NULL_TREE;
+ found = 1;
+ break;
+ }
+ }
+
+ /* If we didn't find MEMBER in the list, create a dummy entry
+ so the two lists (INIT_LIST and the list of members) will be
+ symmetrical. */
+ if (! found)
+ init_list = chainon (init_list, build_tree_list (NULL_TREE, NULL_TREE));
+ }
+
+ for (x = current_member_init_list ; x ; x = TREE_CHAIN (x))
+ {
+ if (TREE_PURPOSE (x))
+ {
+ name = TREE_PURPOSE (x);
+ init = TREE_VALUE (x);
+ /* XXX: this may need the COMPONENT_REF operand 0 check if
+ it turns out we actually get them. */
+ field = IDENTIFIER_CLASS_VALUE (name);
+
+ /* If one member shadows another, get the outermost one. */
+ if (TREE_CODE (field) == TREE_LIST)
+ {
+ field = TREE_VALUE (field);
+ if (decl_type_context (field) != current_class_type)
+ cp_error ("field `%D' not in immediate context", field);
+ }
+
+#if 0
+ /* It turns out if you have an anonymous union in the
+ class, a member from it can end up not being on the
+ list of fields (rather, the type is), and therefore
+ won't be seen by the for loop above. */
+
+ /* The code in this for loop is derived from a general loop
+ which had this check in it. Theoretically, we've hit
+ every initialization for the list of members in T, so
+ we shouldn't have anything but these left in this list. */
+ my_friendly_assert (DECL_FIELD_CONTEXT (field) != t, 351);
+#endif
+
+ if (TREE_HAS_CONSTRUCTOR (field))
+ {
+ if (DECL_NAME (field))
+ error ("multiple initializations given for member `%s'",
+ IDENTIFIER_POINTER (DECL_NAME (field)));
+ continue;
+ }
+
+ TREE_HAS_CONSTRUCTOR (field) = 1;
+ fields_to_unmark = tree_cons (NULL_TREE, field, fields_to_unmark);
+
+ perform_member_init (field, name, init, 1);
+ TREE_PURPOSE (x) = NULL_TREE;
+ }
+ }
+
+ /* Unmark fields which are initialized for the base class. */
+ while (fields_to_unmark)
+ {
+ TREE_HAS_CONSTRUCTOR (TREE_VALUE (fields_to_unmark)) = 0;
+ /* XXX is this a memory leak? */
+ fields_to_unmark = TREE_CHAIN (fields_to_unmark);
+ }
+
+ return init_list;
+}
+
+/* Perform whatever initializations have yet to be done on the base
+ class of the class variable. These actions are in the global
+ variable CURRENT_BASE_INIT_LIST. Such an action could be
+ NULL_TREE, meaning that the user has explicitly called the base
+ class constructor with no arguments.
+
+ If there is a need for a call to a constructor, we must surround
+ that call with a pushlevel/poplevel pair, since we are technically
+ at the PARM level of scope.
+
+ Argument IMMEDIATELY, if zero, forces a new sequence to be
+ generated to contain these new insns, so it can be emitted later.
+ This sequence is saved in the global variable BASE_INIT_INSNS.
+ Otherwise, the insns are emitted into the current sequence.
+
+ Note that emit_base_init does *not* initialize virtual base
+ classes. That is done specially, elsewhere. */
+
+void
+emit_base_init (t, immediately)
+ tree t;
+ int immediately;
+{
+ extern tree in_charge_identifier;
+
+ tree member, vbases;
+ tree init_list;
+ int pass, start;
+ tree t_binfo = TYPE_BINFO (t);
+ tree binfos = BINFO_BASETYPES (t_binfo);
+ int i, n_baseclasses = binfos ? TREE_VEC_LENGTH (binfos) : 0;
+ int have_init_list = 0, from_init_list;
+
+ if (! immediately)
+ {
+ do_pending_stack_adjust ();
+ start_sequence ();
+ }
+
+ if (write_symbols == NO_DEBUG)
+ /* As a matter of principle, `start_sequence' should do this. */
+ emit_note (0, -1);
+ else
+ /* Always emit a line number note so we can step into constructors. */
+ emit_line_note_force (DECL_SOURCE_FILE (current_function_decl),
+ DECL_SOURCE_LINE (current_function_decl));
+
+ start = ! TYPE_USES_VIRTUAL_BASECLASSES (t);
+ for (pass = start; pass < 2; pass++)
+ {
+ tree vbase_init_list = NULL_TREE;
+
+ for (init_list = current_base_init_list; init_list;
+ init_list = TREE_CHAIN (init_list))
+ {
+ tree basename = TREE_PURPOSE (init_list);
+ tree binfo;
+ tree init = TREE_VALUE (init_list);
+
+ if (basename == NULL_TREE)
+ {
+ /* Initializer for single base class. Must not
+ use multiple inheritance or this is ambiguous. */
+ switch (n_baseclasses)
+ {
+ case 0:
+ error ("type `%s' does not have a base class to initialize",
+ IDENTIFIER_POINTER (current_class_name));
+ return;
+ case 1:
+ break;
+ default:
+ error ("unnamed initializer ambiguous for type `%s' which uses multiple inheritance", IDENTIFIER_POINTER (current_class_name));
+ return;
+ }
+ binfo = TREE_VEC_ELT (binfos, 0);
+ }
+ else if (is_aggr_typedef (basename, 1))
+ {
+ binfo = binfo_or_else (IDENTIFIER_TYPE_VALUE (basename), t);
+ if (binfo == NULL_TREE)
+ continue;
+
+ /* Virtual base classes are special cases. Their initializers
+ are recorded with this constructor, and they are used when
+ this constructor is the top-level constructor called. */
+ if (! TREE_VIA_VIRTUAL (binfo))
+ {
+ /* Otherwise, if it is not an immediate base class, complain. */
+ for (i = n_baseclasses-1; i >= 0; i--)
+ if (BINFO_TYPE (binfo) == BINFO_TYPE (TREE_VEC_ELT (binfos, i)))
+ break;
+ if (i < 0)
+ {
+ error ("type `%s' is not an immediate base class of type `%s'",
+ IDENTIFIER_POINTER (basename),
+ IDENTIFIER_POINTER (current_class_name));
+ continue;
+ }
+ }
+ }
+ else
+ continue;
+
+ /* The base initialization list goes up to the first
+ base class which can actually use it. */
+
+ if (pass == start)
+ {
+ char *msgp = (! TYPE_HAS_CONSTRUCTOR (BINFO_TYPE (binfo)))
+ ? "cannot pass initialization up to class `%s'" : 0;
+
+ while (! TYPE_HAS_CONSTRUCTOR (BINFO_TYPE (binfo))
+ && BINFO_BASETYPES (binfo) != NULL_TREE
+ && TREE_VEC_LENGTH (BINFO_BASETYPES (binfo)) == 1)
+ {
+ /* ?? This should be fixed in RENO by forcing
+ default constructors to exist. */
+ SET_BINFO_BASEINIT_MARKED (binfo);
+ binfo = BINFO_BASETYPE (binfo, 0);
+ }
+
+ /* We used to give an error if this wasn't true, saying that
+ there's no constructor for the initialization of basename.
+ This turned out to be incorrect---it should use the
+ default constructor, since a user could try to initialize
+ the class in a derived class's base initializer list. */
+ if (TYPE_HAS_CONSTRUCTOR (BINFO_TYPE (binfo)))
+ {
+ if (msgp)
+ {
+ if (pedantic)
+ error_with_aggr_type (binfo, msgp);
+ else
+ msgp = NULL;
+ }
+ }
+
+ if (BINFO_BASEINIT_MARKED (binfo))
+ {
+ msgp = "class `%s' initializer already specified";
+ error (msgp, IDENTIFIER_POINTER (basename));
+ }
+
+ if (msgp)
+ continue;
+
+ SET_BINFO_BASEINIT_MARKED (binfo);
+ if (TREE_VIA_VIRTUAL (binfo))
+ {
+ vbase_init_list = tree_cons (init, BINFO_TYPE (binfo),
+ vbase_init_list);
+ continue;
+ }
+ if (pass == 0)
+ continue;
+ }
+ else if (TREE_VIA_VIRTUAL (binfo))
+ continue;
+
+ member = convert_pointer_to (binfo, current_class_decl);
+ expand_aggr_init_1 (t_binfo, 0,
+ build_indirect_ref (member, NULL_PTR), init,
+ BINFO_OFFSET_ZEROP (binfo), LOOKUP_COMPLAIN);
+ }
+
+ if (pass == 0)
+ {
+ tree first_arg = TREE_CHAIN (DECL_ARGUMENTS (current_function_decl));
+ tree vbases;
+
+ if (DECL_NAME (current_function_decl) == NULL_TREE
+ && TREE_CHAIN (first_arg) != NULL_TREE)
+ {
+ /* If there are virtual baseclasses without initialization
+ specified, and this is a default X(X&) constructor,
+ build the initialization list so that each virtual baseclass
+ of the new object is initialized from the virtual baseclass
+ of the incoming arg. */
+ tree init_arg = build_unary_op (ADDR_EXPR, TREE_CHAIN (first_arg), 0);
+ for (vbases = CLASSTYPE_VBASECLASSES (t);
+ vbases; vbases = TREE_CHAIN (vbases))
+ {
+ if (BINFO_BASEINIT_MARKED (vbases) == 0)
+ {
+ member = convert_pointer_to (vbases, init_arg);
+ if (member == init_arg)
+ member = TREE_CHAIN (first_arg);
+ else
+ TREE_TYPE (member) = build_reference_type (BINFO_TYPE (vbases));
+ vbase_init_list = tree_cons (convert_from_reference (member),
+ vbases, vbase_init_list);
+ SET_BINFO_BASEINIT_MARKED (vbases);
+ }
+ }
+ }
+ expand_start_cond (first_arg, 0);
+ expand_aggr_vbase_init (t_binfo, C_C_D, current_class_decl,
+ vbase_init_list);
+ expand_end_cond ();
+ }
+ }
+ current_base_init_list = NULL_TREE;
+
+ /* Now, perform default initialization of all base classes which
+ have not yet been initialized, and unmark baseclasses which
+ have been initialized. */
+ for (i = 0; i < n_baseclasses; i++)
+ {
+ tree base = current_class_decl;
+ tree base_binfo = TREE_VEC_ELT (binfos, i);
+
+ if (TYPE_NEEDS_CONSTRUCTING (BINFO_TYPE (base_binfo)))
+ {
+ if (! TREE_VIA_VIRTUAL (base_binfo)
+ && ! BINFO_BASEINIT_MARKED (base_binfo))
+ {
+ tree ref;
+
+ if (BINFO_OFFSET_ZEROP (base_binfo))
+ base = build1 (NOP_EXPR,
+ TYPE_POINTER_TO (BINFO_TYPE (base_binfo)),
+ current_class_decl);
+ else
+ base = build (PLUS_EXPR,
+ TYPE_POINTER_TO (BINFO_TYPE (base_binfo)),
+ current_class_decl, BINFO_OFFSET (base_binfo));
+
+ ref = build_indirect_ref (base, NULL_PTR);
+ expand_aggr_init_1 (t_binfo, 0, ref, NULL_TREE,
+ BINFO_OFFSET_ZEROP (base_binfo),
+ LOOKUP_COMPLAIN);
+ }
+ }
+ CLEAR_BINFO_BASEINIT_MARKED (base_binfo);
+
+ if (! TYPE_USES_VIRTUAL_BASECLASSES (t))
+ {
+ while (! TYPE_HAS_CONSTRUCTOR (BINFO_TYPE (base_binfo))
+ && BINFO_BASETYPES (base_binfo) != NULL_TREE
+ && TREE_VEC_LENGTH (BINFO_BASETYPES (base_binfo)) == 1)
+ {
+ /* ?? This should be fixed in RENO by forcing
+ default constructors to exist. It is needed for symmetry
+ with code above. */
+ base_binfo = BINFO_BASETYPE (base_binfo, 0);
+ CLEAR_BINFO_BASEINIT_MARKED (base_binfo);
+ }
+ }
+ }
+
+ /* Initialize all the virtual function table fields that
+ do come from virtual base classes. */
+ if (TYPE_USES_VIRTUAL_BASECLASSES (t))
+ expand_indirect_vtbls_init (t_binfo, C_C_D, current_class_decl, 0);
+ for (vbases = CLASSTYPE_VBASECLASSES (t); vbases; vbases = TREE_CHAIN (vbases))
+ CLEAR_BINFO_BASEINIT_MARKED (vbases);
+
+ /* Initialize all the virtual function table fields that
+ do not come from virtual base classes. */
+ expand_direct_vtbls_init (t_binfo, t_binfo, 1, 1, current_class_decl);
+
+ if (current_member_init_list)
+ {
+ init_list = sort_member_init (t);
+ have_init_list = 1;
+ }
+
+ for (member = TYPE_FIELDS (t); member; member = TREE_CHAIN (member))
+ {
+ tree init, name;
+ from_init_list = 0;
+
+ /* See if we had a user-specified member initialization. */
+ if (have_init_list)
+ {
+ if (TREE_PURPOSE (init_list))
+ {
+ name = TREE_PURPOSE (init_list);
+ init = TREE_VALUE (init_list);
+ from_init_list = 1;
+
+ if (TREE_STATIC (member))
+ {
+ error_with_aggr_type (DECL_FIELD_CONTEXT (member),
+ "field `%s::%s' is static; only point of initialization is its declaration",
+ IDENTIFIER_POINTER (TREE_PURPOSE (init_list)));
+ continue;
+ }
+
+ /* Also see if it's ever a COMPONENT_REF here. If it is, we
+ need to do `expand_assignment (name, init, 0, 0);' and
+ a continue. */
+ my_friendly_assert (TREE_CODE (name) != COMPONENT_REF, 349);
+ }
+
+ init_list = TREE_CHAIN (init_list);
+ }
+
+ if (! from_init_list)
+ {
+ /* member could be, for example, a CONST_DECL for an enumerated
+ tag; we don't want to try to initialize that, since it already
+ has a value. */
+ if (TREE_CODE (member) != FIELD_DECL || !DECL_NAME (member))
+ continue;
+
+ name = DECL_NAME (member);
+ init = DECL_INITIAL (member);
+ }
+
+ perform_member_init (member, name, init, from_init_list);
+ }
+
+ current_member_init_list = NULL_TREE;
+
+ /* It is possible for the initializers to need cleanups.
+ Expand those cleanups now that all the initialization
+ has been done. */
+ expand_cleanups_to (NULL_TREE);
+
+ if (! immediately)
+ {
+ extern rtx base_init_insns;
+
+ do_pending_stack_adjust ();
+ my_friendly_assert (base_init_insns == 0, 207);
+ base_init_insns = get_insns ();
+ end_sequence ();
+ }
+
+ /* All the implicit try blocks we built up will be zapped
+ when we come to a real binding contour boundary. */
+}
+
+/* Check that all fields are properly initialized after
+ an assignment to `this'. */
+void
+check_base_init (t)
+ tree t;
+{
+ tree member;
+ for (member = TYPE_FIELDS (t); member; member = TREE_CHAIN (member))
+ if (DECL_NAME (member) && TREE_USED (member))
+ cp_error ("field `%D' used before initialized (after assignment to `this')",
+ member);
+}
+
+/* This code sets up the virtual function tables appropriate for
+ the pointer DECL. It is a one-ply initialization.
+
+ BINFO is the exact type that DECL is supposed to be. In
+ multiple inheritance, this might mean "C's A" if C : A, B. */
+static void
+expand_virtual_init (binfo, decl)
+ tree binfo, decl;
+{
+ tree type = BINFO_TYPE (binfo);
+ tree vtbl, vtbl_ptr;
+ tree vtype, vtype_binfo;
+
+ /* This code is crusty. Should be simple, like:
+ vtbl = BINFO_VTABLE (binfo);
+ */
+ vtype = DECL_CONTEXT (CLASSTYPE_VFIELD (type));
+ vtype_binfo = get_binfo (vtype, TREE_TYPE (TREE_TYPE (decl)), 0);
+ vtbl = BINFO_VTABLE (binfo_value (DECL_FIELD_CONTEXT (CLASSTYPE_VFIELD (type)), binfo));
+ if (!flag_vtable_thunks)
+ assemble_external (vtbl);
+ TREE_USED (vtbl) = 1;
+ vtbl = build1 (ADDR_EXPR, TYPE_POINTER_TO (TREE_TYPE (vtbl)), vtbl);
+ decl = convert_pointer_to_real (vtype_binfo, decl);
+ vtbl_ptr = build_vfield_ref (build_indirect_ref (decl, NULL_PTR), vtype);
+ if (vtbl_ptr == error_mark_node)
+ return;
+
+ /* Have to convert VTBL since array sizes may be different. */
+ vtbl = convert_force (TREE_TYPE (vtbl_ptr), vtbl);
+ expand_expr_stmt (build_modify_expr (vtbl_ptr, NOP_EXPR, vtbl));
+}
+
+/* Subroutine of `expand_aggr_vbase_init'.
+ BINFO is the binfo of the type that is being initialized.
+ INIT_LIST is the list of initializers for the virtual baseclass. */
+static void
+expand_aggr_vbase_init_1 (binfo, exp, addr, init_list)
+ tree binfo, exp, addr, init_list;
+{
+ tree init = value_member (BINFO_TYPE (binfo), init_list);
+ tree ref = build_indirect_ref (addr, NULL_PTR);
+ if (init)
+ init = TREE_PURPOSE (init);
+ /* Call constructors, but don't set up vtables. */
+ expand_aggr_init_1 (binfo, exp, ref, init, 0,
+ LOOKUP_COMPLAIN|LOOKUP_SPECULATIVELY);
+ CLEAR_BINFO_VBASE_INIT_MARKED (binfo);
+}
+
+/* Initialize this object's virtual base class pointers. This must be
+ done only at the top-level of the object being constructed.
+
+ INIT_LIST is list of initialization for constructor to perform. */
+static void
+expand_aggr_vbase_init (binfo, exp, addr, init_list)
+ tree binfo;
+ tree exp;
+ tree addr;
+ tree init_list;
+{
+ tree type = BINFO_TYPE (binfo);
+
+ if (TYPE_USES_VIRTUAL_BASECLASSES (type))
+ {
+ tree result = init_vbase_pointers (type, addr);
+ tree vbases;
+
+ if (result)
+ expand_expr_stmt (build_compound_expr (result));
+
+ /* Mark everything as having an initializer
+ (either explicit or default). */
+ for (vbases = CLASSTYPE_VBASECLASSES (type);
+ vbases; vbases = TREE_CHAIN (vbases))
+ SET_BINFO_VBASE_INIT_MARKED (vbases);
+
+ /* First, initialize baseclasses which could be baseclasses
+ for other virtual baseclasses. */
+ for (vbases = CLASSTYPE_VBASECLASSES (type);
+ vbases; vbases = TREE_CHAIN (vbases))
+ /* Don't initialize twice. */
+ if (BINFO_VBASE_INIT_MARKED (vbases))
+ {
+ tree tmp = result;
+
+ while (BINFO_TYPE (vbases) != BINFO_TYPE (TREE_PURPOSE (tmp)))
+ tmp = TREE_CHAIN (tmp);
+ expand_aggr_vbase_init_1 (vbases, exp,
+ TREE_OPERAND (TREE_VALUE (tmp), 0),
+ init_list);
+ }
+
+ /* Now initialize the baseclasses which don't have virtual baseclasses. */
+ for (; result; result = TREE_CHAIN (result))
+ /* Don't initialize twice. */
+ if (BINFO_VBASE_INIT_MARKED (TREE_PURPOSE (result)))
+ {
+ my_friendly_abort (47);
+ expand_aggr_vbase_init_1 (TREE_PURPOSE (result), exp,
+ TREE_OPERAND (TREE_VALUE (result), 0),
+ init_list);
+ }
+ }
+}
+
+/* Subroutine to perform parser actions for member initialization.
+ S_ID is the scoped identifier.
+ NAME is the name of the member.
+ INIT is the initializer, or `void_type_node' if none. */
+void
+do_member_init (s_id, name, init)
+ tree s_id, name, init;
+{
+ tree binfo, base;
+
+ if (current_class_type == NULL_TREE
+ || ! is_aggr_typedef (s_id, 1))
+ return;
+ binfo = get_binfo (IDENTIFIER_TYPE_VALUE (s_id),
+ current_class_type, 1);
+ if (binfo == error_mark_node)
+ return;
+ if (binfo == 0)
+ {
+ error_not_base_type (IDENTIFIER_TYPE_VALUE (s_id), current_class_type);
+ return;
+ }
+
+ base = convert_pointer_to (binfo, current_class_decl);
+ expand_member_init (build_indirect_ref (base, NULL_PTR), name, init);
+}
+
+/* Function to give error message if member initialization specification
+ is erroneous. FIELD is the member we decided to initialize.
+ TYPE is the type for which the initialization is being performed.
+ FIELD must be a member of TYPE, or the base type from which FIELD
+ comes must not need a constructor.
+
+ MEMBER_NAME is the name of the member. */
+
+static int
+member_init_ok_or_else (field, type, member_name)
+ tree field;
+ tree type;
+ char *member_name;
+{
+ if (field == error_mark_node)
+ return 0;
+ if (field == NULL_TREE)
+ {
+ cp_error ("class `%T' does not have any field named `%s'", type,
+ member_name);
+ return 0;
+ }
+ if (DECL_CONTEXT (field) != type
+ && TYPE_NEEDS_CONSTRUCTING (DECL_CONTEXT (field)))
+ {
+ cp_error ("member `%D' comes from base class needing constructor",
+ field);
+ return 0;
+ }
+ return 1;
+}
+
+/* If NAME is a viable field name for the aggregate DECL,
+ and PARMS is a viable parameter list, then expand an _EXPR
+ which describes this initialization.
+
+ Note that we do not need to chase through the class's base classes
+ to look for NAME, because if it's in that list, it will be handled
+ by the constructor for that base class.
+
+ We do not yet have a fixed-point finder to instantiate types
+ being fed to overloaded constructors. If there is a unique
+ constructor, then argument types can be got from that one.
+
+ If INIT is non-NULL, then it the initialization should
+ be placed in `current_base_init_list', where it will be processed
+ by `emit_base_init'. */
+void
+expand_member_init (exp, name, init)
+ tree exp, name, init;
+{
+ extern tree ptr_type_node; /* should be in tree.h */
+
+ tree basetype = NULL_TREE, field;
+ tree parm;
+ tree rval, type;
+ tree actual_name;
+
+ if (exp == NULL_TREE)
+ return; /* complain about this later */
+
+ type = TYPE_MAIN_VARIANT (TREE_TYPE (exp));
+
+ if (name == NULL_TREE && IS_AGGR_TYPE (type))
+ switch (CLASSTYPE_N_BASECLASSES (type))
+ {
+ case 0:
+ error ("base class initializer specified, but no base class to initialize");
+ return;
+ case 1:
+ basetype = TYPE_BINFO_BASETYPE (type, 0);
+ break;
+ default:
+ error ("initializer for unnamed base class ambiguous");
+ cp_error ("(type `%T' uses multiple inheritance)", type);
+ return;
+ }
+
+ if (init)
+ {
+ /* The grammar should not allow fields which have names
+ that are TYPENAMEs. Therefore, if the field has
+ a non-NULL TREE_TYPE, we may assume that this is an
+ attempt to initialize a base class member of the current
+ type. Otherwise, it is an attempt to initialize a
+ member field. */
+
+ if (init == void_type_node)
+ init = NULL_TREE;
+
+ if (name == NULL_TREE || IDENTIFIER_HAS_TYPE_VALUE (name))
+ {
+ tree base_init;
+
+ if (name == NULL_TREE)
+ {
+/*
+ if (basetype)
+ name = TYPE_IDENTIFIER (basetype);
+ else
+ {
+ error ("no base class to initialize");
+ return;
+ }
+*/
+ }
+ else
+ {
+ basetype = IDENTIFIER_TYPE_VALUE (name);
+ if (basetype != type
+ && ! binfo_member (basetype, TYPE_BINFO (type))
+ && ! binfo_member (basetype, CLASSTYPE_VBASECLASSES (type)))
+ {
+ if (IDENTIFIER_CLASS_VALUE (name))
+ goto try_member;
+ if (TYPE_USES_VIRTUAL_BASECLASSES (type))
+ error ("type `%s' is not an immediate or virtual basetype for `%s'",
+ IDENTIFIER_POINTER (name),
+ TYPE_NAME_STRING (type));
+ else
+ error ("type `%s' is not an immediate basetype for `%s'",
+ IDENTIFIER_POINTER (name),
+ TYPE_NAME_STRING (type));
+ return;
+ }
+ }
+
+ if (purpose_member (name, current_base_init_list))
+ {
+ error ("base class `%s' already initialized",
+ IDENTIFIER_POINTER (name));
+ return;
+ }
+
+ base_init = build_tree_list (name, init);
+ TREE_TYPE (base_init) = basetype;
+ current_base_init_list = chainon (current_base_init_list, base_init);
+ }
+ else
+ {
+ tree member_init;
+
+ try_member:
+ field = lookup_field (type, name, 1, 0);
+
+ if (! member_init_ok_or_else (field, type, IDENTIFIER_POINTER (name)))
+ return;
+
+ if (purpose_member (name, current_member_init_list))
+ {
+ error ("field `%s' already initialized", IDENTIFIER_POINTER (name));
+ return;
+ }
+
+ member_init = build_tree_list (name, init);
+ TREE_TYPE (member_init) = TREE_TYPE (field);
+ current_member_init_list = chainon (current_member_init_list, member_init);
+ }
+ return;
+ }
+ else if (name == NULL_TREE)
+ {
+ compiler_error ("expand_member_init: name == NULL_TREE");
+ return;
+ }
+
+ basetype = type;
+ field = lookup_field (basetype, name, 0, 0);
+
+ if (! member_init_ok_or_else (field, basetype, IDENTIFIER_POINTER (name)))
+ return;
+
+ /* now see if there is a constructor for this type
+ which will take these args. */
+
+ if (TYPE_HAS_CONSTRUCTOR (TREE_TYPE (field)))
+ {
+ tree parmtypes, fndecl;
+
+ if (TREE_CODE (exp) == VAR_DECL || TREE_CODE (exp) == PARM_DECL)
+ {
+ /* just know that we've seen something for this node */
+ DECL_INITIAL (exp) = error_mark_node;
+ TREE_USED (exp) = 1;
+ }
+ type = TYPE_MAIN_VARIANT (TREE_TYPE (field));
+ actual_name = TYPE_IDENTIFIER (type);
+ parm = build_component_ref (exp, name, 0, 0);
+
+ /* Now get to the constructor. */
+ fndecl = TREE_VEC_ELT (CLASSTYPE_METHOD_VEC (type), 0);
+ /* Get past destructor, if any. */
+ if (TYPE_HAS_DESTRUCTOR (type))
+ fndecl = DECL_CHAIN (fndecl);
+
+ if (fndecl)
+ my_friendly_assert (TREE_CODE (fndecl) == FUNCTION_DECL, 209);
+
+ /* If the field is unique, we can use the parameter
+ types to guide possible type instantiation. */
+ if (DECL_CHAIN (fndecl) == NULL_TREE)
+ {
+ /* There was a confusion here between
+ FIELD and FNDECL. The following code
+ should be correct, but abort is here
+ to make sure. */
+ my_friendly_abort (48);
+ parmtypes = FUNCTION_ARG_CHAIN (fndecl);
+ }
+ else
+ {
+ parmtypes = NULL_TREE;
+ fndecl = NULL_TREE;
+ }
+
+ init = convert_arguments (parm, parmtypes, NULL_TREE, fndecl, LOOKUP_NORMAL);
+ if (init == NULL_TREE || TREE_TYPE (init) != error_mark_node)
+ rval = build_method_call (NULL_TREE, actual_name, init, NULL_TREE, LOOKUP_NORMAL);
+ else
+ return;
+
+ if (rval != error_mark_node)
+ {
+ /* Now, fill in the first parm with our guy */
+ TREE_VALUE (TREE_OPERAND (rval, 1))
+ = build_unary_op (ADDR_EXPR, parm, 0);
+ TREE_TYPE (rval) = ptr_type_node;
+ TREE_SIDE_EFFECTS (rval) = 1;
+ }
+ }
+ else if (TYPE_NEEDS_CONSTRUCTING (TREE_TYPE (field)))
+ {
+ parm = build_component_ref (exp, name, 0, 0);
+ expand_aggr_init (parm, NULL_TREE, 0);
+ rval = error_mark_node;
+ }
+
+ /* Now initialize the member. It does not have to
+ be of aggregate type to receive initialization. */
+ if (rval != error_mark_node)
+ expand_expr_stmt (rval);
+}
+
+/* This is like `expand_member_init', only it stores one aggregate
+ value into another.
+
+ INIT comes in two flavors: it is either a value which
+ is to be stored in EXP, or it is a parameter list
+ to go to a constructor, which will operate on EXP.
+ If `init' is a CONSTRUCTOR, then we emit a warning message,
+ explaining that such initializations are illegal.
+
+ ALIAS_THIS is nonzero iff we are initializing something which is
+ essentially an alias for C_C_D. In this case, the base constructor
+ may move it on us, and we must keep track of such deviations.
+
+ If INIT resolves to a CALL_EXPR which happens to return
+ something of the type we are looking for, then we know
+ that we can safely use that call to perform the
+ initialization.
+
+ The virtual function table pointer cannot be set up here, because
+ we do not really know its type.
+
+ Virtual baseclass pointers are also set up here.
+
+ This never calls operator=().
+
+ When initializing, nothing is CONST.
+
+ A default copy constructor may have to be used to perform the
+ initialization.
+
+ A constructor or a conversion operator may have to be used to
+ perform the initialization, but not both, as it would be ambiguous.
+ */
+
+void
+expand_aggr_init (exp, init, alias_this)
+ tree exp, init;
+ int alias_this;
+{
+ tree type = TREE_TYPE (exp);
+ int was_const = TREE_READONLY (exp);
+
+ if (init == error_mark_node)
+ return;
+
+ TREE_READONLY (exp) = 0;
+
+ if (TREE_CODE (type) == ARRAY_TYPE)
+ {
+ /* Must arrange to initialize each element of EXP
+ from elements of INIT. */
+ int was_const_elts = TYPE_READONLY (TREE_TYPE (type));
+ tree itype = init ? TREE_TYPE (init) : NULL_TREE;
+ if (was_const_elts)
+ TREE_TYPE (exp) = TYPE_MAIN_VARIANT (type);
+ if (init && TREE_TYPE (init) == NULL_TREE)
+ {
+ /* Handle bad initializers like:
+ class COMPLEX {
+ public:
+ double re, im;
+ COMPLEX(double r = 0.0, double i = 0.0) {re = r; im = i;};
+ ~COMPLEX() {};
+ };
+
+ int main(int argc, char **argv) {
+ COMPLEX zees(1.0, 0.0)[10];
+ }
+ */
+ error ("bad array initializer");
+ return;
+ }
+ expand_vec_init (exp, exp, array_type_nelts (type), init,
+ init && comptypes (TREE_TYPE (init), TREE_TYPE (exp), 1));
+ TREE_READONLY (exp) = was_const;
+ TREE_TYPE (exp) = type;
+ if (init) TREE_TYPE (init) = itype;
+ return;
+ }
+
+ if (TREE_CODE (exp) == VAR_DECL || TREE_CODE (exp) == PARM_DECL)
+ /* just know that we've seen something for this node */
+ TREE_USED (exp) = 1;
+
+#if 0
+ /* If initializing from a GNU C CONSTRUCTOR, consider the elts in the
+ constructor as parameters to an implicit GNU C++ constructor. */
+ if (init && TREE_CODE (init) == CONSTRUCTOR
+ && TYPE_HAS_CONSTRUCTOR (type)
+ && TREE_TYPE (init) == type)
+ init = CONSTRUCTOR_ELTS (init);
+#endif
+ expand_aggr_init_1 (TYPE_BINFO (type), exp, exp,
+ init, alias_this, LOOKUP_NORMAL);
+ TREE_READONLY (exp) = was_const;
+}
+
+static void
+expand_default_init (binfo, true_exp, exp, type, init, alias_this, flags)
+ tree binfo;
+ tree true_exp, exp;
+ tree type;
+ tree init;
+ int alias_this;
+ int flags;
+{
+ /* It fails because there may not be a constructor which takes
+ its own type as the first (or only parameter), but which does
+ take other types via a conversion. So, if the thing initializing
+ the expression is a unit element of type X, first try X(X&),
+ followed by initialization by X. If neither of these work
+ out, then look hard. */
+ tree rval;
+ tree parms;
+ int xxref_init_possible;
+
+ if (init == NULL_TREE || TREE_CODE (init) == TREE_LIST)
+ {
+ parms = init;
+ if (parms) init = TREE_VALUE (parms);
+ }
+ else if (TREE_CODE (init) == INDIRECT_REF && TREE_HAS_CONSTRUCTOR (init))
+ {
+ rval = convert_for_initialization (exp, type, init, 0, 0, 0, 0);
+ expand_expr_stmt (rval);
+ return;
+ }
+ else
+ parms = build_tree_list (NULL_TREE, init);
+
+ if (TYPE_HAS_INIT_REF (type)
+ || init == NULL_TREE
+ || TREE_CHAIN (parms) != NULL_TREE)
+ xxref_init_possible = 0;
+ else
+ {
+ xxref_init_possible = LOOKUP_SPECULATIVELY;
+ flags &= ~LOOKUP_COMPLAIN;
+ }
+
+ if (TYPE_USES_VIRTUAL_BASECLASSES (type))
+ {
+ if (true_exp == exp)
+ parms = tree_cons (NULL_TREE, integer_one_node, parms);
+ else
+ parms = tree_cons (NULL_TREE, integer_zero_node, parms);
+ flags |= LOOKUP_HAS_IN_CHARGE;
+ }
+
+ rval = build_method_call (exp, constructor_name_full (type),
+ parms, binfo, flags|xxref_init_possible);
+ if (rval == NULL_TREE && xxref_init_possible)
+ {
+ /* It is an error to implement a default copy constructor if
+ (see ARM 12.8 for details) ... one case being if another
+ copy constructor already exists. */
+ tree init_type = TREE_TYPE (init);
+ if (TREE_CODE (init_type) == REFERENCE_TYPE)
+ init_type = TREE_TYPE (init_type);
+ if (TYPE_MAIN_VARIANT (init_type) == TYPE_MAIN_VARIANT (type)
+ || (IS_AGGR_TYPE (init_type)
+ && UNIQUELY_DERIVED_FROM_P (type, init_type)))
+ {
+ if (type == BINFO_TYPE (binfo)
+ && TYPE_USES_VIRTUAL_BASECLASSES (type))
+ {
+ tree addr = build_unary_op (ADDR_EXPR, exp, 0);
+ expand_aggr_vbase_init (binfo, exp, addr, NULL_TREE);
+
+ expand_indirect_vtbls_init (binfo, exp, addr, 1);
+ }
+ expand_expr_stmt (build_modify_expr (exp, INIT_EXPR, init));
+ return;
+ }
+ else
+ rval = build_method_call (exp, constructor_name_full (type), parms,
+ binfo, flags);
+ }
+
+ /* Private, protected, or otherwise unavailable. */
+ if (rval == error_mark_node && (flags&LOOKUP_COMPLAIN))
+ cp_error ("in base initialization for class `%T'", binfo);
+ /* A valid initialization using constructor. */
+ else if (rval != error_mark_node && rval != NULL_TREE)
+ {
+ /* p. 222: if the base class assigns to `this', then that
+ value is used in the derived class. */
+ if ((flag_this_is_variable & 1) && alias_this)
+ {
+ TREE_TYPE (rval) = TREE_TYPE (current_class_decl);
+ expand_assignment (current_class_decl, rval, 0, 0);
+ }
+ else
+ expand_expr_stmt (rval);
+ }
+ else if (parms && TREE_CHAIN (parms) == NULL_TREE)
+ {
+ /* If we are initializing one aggregate value
+ from another, and though there are constructors,
+ and none accept the initializer, just do a bitwise
+ copy.
+
+ The above sounds wrong, ``If a class has any copy
+ constructor defined, the default copy constructor will
+ not be generated.'' 12.8 Copying Class Objects (mrs)
+
+ @@ This should reject initializer which a constructor
+ @@ rejected on access gounds, but there is
+ @@ no way right now to recognize that case with
+ @@ just `error_mark_node'. */
+ tree itype;
+ init = TREE_VALUE (parms);
+ itype = TREE_TYPE (init);
+ if (TREE_CODE (itype) == REFERENCE_TYPE)
+ {
+ init = convert_from_reference (init);
+ itype = TREE_TYPE (init);
+ }
+ itype = TYPE_MAIN_VARIANT (itype);
+
+ /* This is currently how the default X(X&) constructor
+ is implemented. */
+ if (comptypes (TYPE_MAIN_VARIANT (type), itype, 0))
+ {
+#if 0
+ warning ("bitwise copy in initialization of type `%s'",
+ TYPE_NAME_STRING (type));
+#endif
+ rval = build (INIT_EXPR, type, exp, init);
+ expand_expr_stmt (rval);
+ }
+ else
+ {
+ cp_error ("in base initialization for class `%T',", binfo);
+ cp_error ("invalid initializer to constructor for type `%T'", type);
+ return;
+ }
+ }
+ else
+ {
+ if (init == NULL_TREE)
+ my_friendly_assert (parms == NULL_TREE, 210);
+ if (parms == NULL_TREE && TREE_VIA_VIRTUAL (binfo))
+ cp_error ("virtual baseclass `%T' does not have default initializer", binfo);
+ else
+ {
+ cp_error ("in base initialization for class `%T',", binfo);
+ /* This will make an error message for us. */
+ build_method_call (exp, constructor_name_full (type), parms, binfo,
+ (TYPE_USES_VIRTUAL_BASECLASSES (type)
+ ? LOOKUP_NORMAL|LOOKUP_HAS_IN_CHARGE
+ : LOOKUP_NORMAL));
+ }
+ return;
+ }
+ /* Constructor has been called, but vtables may be for TYPE
+ rather than for FOR_TYPE. */
+}
+
+/* This function is responsible for initializing EXP with INIT
+ (if any).
+
+ BINFO is the binfo of the type for who we are performing the
+ initialization. For example, if W is a virtual base class of A and B,
+ and C : A, B.
+ If we are initializing B, then W must contain B's W vtable, whereas
+ were we initializing C, W must contain C's W vtable.
+
+ TRUE_EXP is nonzero if it is the true expression being initialized.
+ In this case, it may be EXP, or may just contain EXP. The reason we
+ need this is because if EXP is a base element of TRUE_EXP, we
+ don't necessarily know by looking at EXP where its virtual
+ baseclass fields should really be pointing. But we do know
+ from TRUE_EXP. In constructors, we don't know anything about
+ the value being initialized.
+
+ ALIAS_THIS serves the same purpose it serves for expand_aggr_init.
+
+ FLAGS is just passes to `build_method_call'. See that function for
+ its description. */
+
+static void
+expand_aggr_init_1 (binfo, true_exp, exp, init, alias_this, flags)
+ tree binfo;
+ tree true_exp, exp;
+ tree init;
+ int alias_this;
+ int flags;
+{
+ tree type = TREE_TYPE (exp);
+ tree init_type = NULL_TREE;
+
+ my_friendly_assert (init != error_mark_node && type != error_mark_node, 211);
+
+ /* Use a function returning the desired type to initialize EXP for us.
+ If the function is a constructor, and its first argument is
+ NULL_TREE, know that it was meant for us--just slide exp on
+ in and expand the constructor. Constructors now come
+ as TARGET_EXPRs. */
+ if (init)
+ {
+ tree init_list = NULL_TREE;
+
+ if (TREE_CODE (init) == TREE_LIST)
+ {
+ init_list = init;
+ if (TREE_CHAIN (init) == NULL_TREE)
+ init = TREE_VALUE (init);
+ }
+
+ init_type = TREE_TYPE (init);
+
+ if (TREE_CODE (init) != TREE_LIST)
+ {
+ if (TREE_CODE (init_type) == ERROR_MARK)
+ return;
+
+#if 0
+ /* These lines are found troublesome 5/11/89. */
+ if (TREE_CODE (init_type) == REFERENCE_TYPE)
+ init_type = TREE_TYPE (init_type);
+#endif
+
+ /* This happens when we use C++'s functional cast notation.
+ If the types match, then just use the TARGET_EXPR
+ directly. Otherwise, we need to create the initializer
+ separately from the object being initialized. */
+ if (TREE_CODE (init) == TARGET_EXPR)
+ {
+ if (init_type == type)
+ {
+ if (TREE_CODE (exp) == VAR_DECL
+ || TREE_CODE (exp) == RESULT_DECL)
+ /* Unify the initialization targets. */
+ DECL_RTL (TREE_OPERAND (init, 0)) = DECL_RTL (exp);
+ else
+ DECL_RTL (TREE_OPERAND (init, 0)) = expand_expr (exp, NULL_RTX, 0, 0);
+
+ expand_expr_stmt (init);
+ return;
+ }
+ else
+ {
+ init = TREE_OPERAND (init, 1);
+ init = build (CALL_EXPR, init_type,
+ TREE_OPERAND (init, 0), TREE_OPERAND (init, 1), 0);
+ TREE_SIDE_EFFECTS (init) = 1;
+#if 0
+ TREE_RAISES (init) = ??
+#endif
+ if (init_list)
+ TREE_VALUE (init_list) = init;
+ }
+ }
+
+ if (init_type == type && TREE_CODE (init) == CALL_EXPR
+#if 0
+ /* It is legal to directly initialize from a CALL_EXPR
+ without going through X(X&), apparently. */
+ && ! TYPE_GETS_INIT_REF (type)
+#endif
+ )
+ {
+ /* A CALL_EXPR is a legitimate form of initialization, so
+ we should not print this warning message. */
+#if 0
+ /* Should have gone away due to 5/11/89 change. */
+ if (TREE_CODE (TREE_TYPE (init)) == REFERENCE_TYPE)
+ init = convert_from_reference (init);
+#endif
+ expand_assignment (exp, init, 0, 0);
+ if (exp == DECL_RESULT (current_function_decl))
+ {
+ /* Failing this assertion means that the return value
+ from receives multiple initializations. */
+ my_friendly_assert (DECL_INITIAL (exp) == NULL_TREE
+ || DECL_INITIAL (exp) == error_mark_node,
+ 212);
+ DECL_INITIAL (exp) = init;
+ }
+ return;
+ }
+ else if (init_type == type
+ && TREE_CODE (init) == COND_EXPR)
+ {
+ /* Push value to be initialized into the cond, where possible.
+ Avoid spurious warning messages when initializing the
+ result of this function. */
+ TREE_OPERAND (init, 1)
+ = build_modify_expr (exp, INIT_EXPR, TREE_OPERAND (init, 1));
+ if (exp == DECL_RESULT (current_function_decl))
+ DECL_INITIAL (exp) = NULL_TREE;
+ TREE_OPERAND (init, 2)
+ = build_modify_expr (exp, INIT_EXPR, TREE_OPERAND (init, 2));
+ if (exp == DECL_RESULT (current_function_decl))
+ DECL_INITIAL (exp) = init;
+ TREE_SIDE_EFFECTS (init) = 1;
+ expand_expr (init, const0_rtx, VOIDmode, 0);
+ free_temp_slots ();
+ return;
+ }
+ }
+
+ /* We did not know what we were initializing before. Now we do. */
+ if (TREE_CODE (init) == TARGET_EXPR)
+ {
+ tree tmp = TREE_OPERAND (TREE_OPERAND (init, 1), 1);
+
+ if (TREE_CODE (TREE_VALUE (tmp)) == NOP_EXPR
+ && TREE_OPERAND (TREE_VALUE (tmp), 0) == integer_zero_node)
+ {
+ /* In order for this to work for RESULT_DECLs, if their
+ type has a constructor, then they must be BLKmode
+ so that they will be meaningfully addressable. */
+ tree arg = build_unary_op (ADDR_EXPR, exp, 0);
+ init = TREE_OPERAND (init, 1);
+ init = build (CALL_EXPR, build_pointer_type (TREE_TYPE (init)),
+ TREE_OPERAND (init, 0), TREE_OPERAND (init, 1), 0);
+ TREE_SIDE_EFFECTS (init) = 1;
+#if 0
+ TREE_RAISES (init) = ??
+#endif
+ TREE_VALUE (TREE_OPERAND (init, 1))
+ = convert_pointer_to (TREE_TYPE (TREE_TYPE (TREE_VALUE (tmp))), arg);
+
+ if (alias_this)
+ {
+ expand_assignment (current_function_decl, init, 0, 0);
+ return;
+ }
+ if (exp == DECL_RESULT (current_function_decl))
+ {
+ if (DECL_INITIAL (DECL_RESULT (current_function_decl)))
+ fatal ("return value from function receives multiple initializations");
+ DECL_INITIAL (exp) = init;
+ }
+ expand_expr_stmt (init);
+ return;
+ }
+ }
+
+ if (TREE_CODE (exp) == VAR_DECL
+ && TREE_CODE (init) == CONSTRUCTOR
+ && TREE_HAS_CONSTRUCTOR (init))
+ {
+ tree t = store_init_value (exp, init);
+ if (!t)
+ {
+ expand_decl_init (exp);
+ return;
+ }
+ t = build (INIT_EXPR, type, exp, init);
+ TREE_SIDE_EFFECTS (t) = 1;
+ expand_expr_stmt (t);
+ return;
+ }
+
+ /* Handle this case: when calling a constructor: xyzzy foo(bar);
+ which really means: xyzzy foo = bar; Ugh!
+
+ More useful for this case: xyzzy *foo = new xyzzy (bar); */
+
+ if (! TYPE_NEEDS_CONSTRUCTING (type) && ! IS_AGGR_TYPE (type))
+ {
+ if (init_list && TREE_CHAIN (init_list))
+ {
+ warning ("initializer list being treated as compound expression");
+ init = convert (type, build_compound_expr (init_list));
+ if (init == error_mark_node)
+ return;
+ }
+
+ expand_assignment (exp, init, 0, 0);
+
+ return;
+ }
+ /* See whether we can go through a type conversion operator.
+ This wins over going through a non-existent constructor. If
+ there is a constructor, it is ambiguous. */
+ if (TREE_CODE (init) != TREE_LIST)
+ {
+ tree ttype = TREE_CODE (init_type) == REFERENCE_TYPE
+ ? TREE_TYPE (init_type) : init_type;
+
+ if (ttype != type && IS_AGGR_TYPE (ttype))
+ {
+ tree rval = build_type_conversion (CONVERT_EXPR, type, init, 0);
+
+ if (rval)
+ {
+ /* See if there is a constructor for``type'' that takes a
+ ``ttype''-typed object. */
+ tree parms = build_tree_list (NULL_TREE, init);
+ tree as_cons = NULL_TREE;
+ if (TYPE_HAS_CONSTRUCTOR (type))
+ as_cons = build_method_call (exp, constructor_name_full (type),
+ parms, binfo,
+ LOOKUP_SPECULATIVELY|LOOKUP_NO_CONVERSION);
+ if (as_cons != NULL_TREE && as_cons != error_mark_node)
+ /* ANSI C++ June 5 1992 WP 12.3.2.6.1 */
+ cp_error ("ambiguity between conversion to `%T' and constructor",
+ type);
+ else
+ expand_assignment (exp, rval, 0, 0);
+ return;
+ }
+ }
+ }
+ }
+
+ /* Handle default copy constructors here, does not matter if there is
+ a constructor or not. */
+ if (type == init_type && IS_AGGR_TYPE (type)
+ && init && TREE_CODE (init) != TREE_LIST)
+ expand_default_init (binfo, true_exp, exp, type, init, alias_this, flags);
+ /* Not sure why this is here... */
+ else if (TYPE_HAS_CONSTRUCTOR (type))
+ expand_default_init (binfo, true_exp, exp, type, init, alias_this, flags);
+ else if (TREE_CODE (type) == ARRAY_TYPE)
+ {
+ if (TYPE_NEEDS_CONSTRUCTING (TREE_TYPE (type)))
+ expand_vec_init (exp, exp, array_type_nelts (type), init, 0);
+ else if (TYPE_VIRTUAL_P (TREE_TYPE (type)))
+ sorry ("arrays of objects with virtual functions but no constructors");
+ }
+ else
+ expand_recursive_init (binfo, true_exp, exp, init,
+ CLASSTYPE_BASE_INIT_LIST (type), alias_this);
+}
+
+/* A pointer which holds the initializer. First call to
+ expand_aggr_init gets this value pointed to, and sets it to init_null. */
+static tree *init_ptr, init_null;
+
+/* Subroutine of expand_recursive_init:
+
+ ADDR is the address of the expression being initialized.
+ INIT_LIST is the cons-list of initializations to be performed.
+ ALIAS_THIS is its same, lovable self. */
+static void
+expand_recursive_init_1 (binfo, true_exp, addr, init_list, alias_this)
+ tree binfo, true_exp, addr;
+ tree init_list;
+ int alias_this;
+{
+ while (init_list)
+ {
+ if (TREE_PURPOSE (init_list))
+ {
+ if (TREE_CODE (TREE_PURPOSE (init_list)) == FIELD_DECL)
+ {
+ tree member = TREE_PURPOSE (init_list);
+ tree subexp = build_indirect_ref (convert_pointer_to (TREE_VALUE (init_list), addr), NULL_PTR);
+ tree member_base = build (COMPONENT_REF, TREE_TYPE (member), subexp, member);
+ if (IS_AGGR_TYPE (TREE_TYPE (member)))
+ expand_aggr_init (member_base, DECL_INITIAL (member), 0);
+ else if (TREE_CODE (TREE_TYPE (member)) == ARRAY_TYPE
+ && TYPE_NEEDS_CONSTRUCTING (TREE_TYPE (member)))
+ {
+ member_base = save_expr (default_conversion (member_base));
+ expand_vec_init (member, member_base,
+ array_type_nelts (TREE_TYPE (member)),
+ DECL_INITIAL (member), 0);
+ }
+ else
+ expand_expr_stmt (build_modify_expr (member_base, INIT_EXPR, DECL_INITIAL (member)));
+ }
+ else if (TREE_CODE (TREE_PURPOSE (init_list)) == TREE_LIST)
+ {
+ expand_recursive_init_1 (binfo, true_exp, addr, TREE_PURPOSE (init_list), alias_this);
+ expand_recursive_init_1 (binfo, true_exp, addr, TREE_VALUE (init_list), alias_this);
+ }
+ else if (TREE_CODE (TREE_PURPOSE (init_list)) == ERROR_MARK)
+ {
+ /* Only initialize the virtual function tables if we
+ are initializing the ultimate users of those vtables. */
+ if (TREE_VALUE (init_list))
+ {
+ /* We have to ensure that the first argment to
+ expand_virtual_init is in binfo's hierarchy. */
+ /* Is it the case that this is exactly the right binfo? */
+ /* If it is ok, then fixup expand_virtual_init, to make
+ it much simpler. */
+ expand_virtual_init (get_binfo (TREE_VALUE (init_list), binfo, 0),
+ addr);
+ if (TREE_VALUE (init_list) == binfo
+ && TYPE_USES_VIRTUAL_BASECLASSES (BINFO_TYPE (binfo)))
+ expand_indirect_vtbls_init (binfo, true_exp, addr, 1);
+ }
+ }
+ else
+ my_friendly_abort (49);
+ }
+ else if (TREE_VALUE (init_list)
+ && TREE_CODE (TREE_VALUE (init_list)) == TREE_VEC)
+ {
+ tree subexp = build_indirect_ref (convert_pointer_to (TREE_VALUE (init_list), addr), NULL_PTR);
+ expand_aggr_init_1 (binfo, true_exp, subexp, *init_ptr,
+ alias_this && BINFO_OFFSET_ZEROP (TREE_VALUE (init_list)),
+ LOOKUP_COMPLAIN);
+
+ /* INIT_PTR is used up. */
+ init_ptr = &init_null;
+ }
+ else
+ my_friendly_abort (50);
+ init_list = TREE_CHAIN (init_list);
+ }
+}
+
+/* Initialize EXP with INIT. Type EXP does not have a constructor,
+ but it has a baseclass with a constructor or a virtual function
+ table which needs initializing.
+
+ INIT_LIST is a cons-list describing what parts of EXP actually
+ need to be initialized. INIT is given to the *unique*, first
+ constructor within INIT_LIST. If there are multiple first
+ constructors, such as with multiple inheritance, INIT must
+ be zero or an ambiguity error is reported.
+
+ ALIAS_THIS is passed from `expand_aggr_init'. See comments
+ there. */
+
+static void
+expand_recursive_init (binfo, true_exp, exp, init, init_list, alias_this)
+ tree binfo, true_exp, exp, init;
+ tree init_list;
+ int alias_this;
+{
+ tree *old_init_ptr = init_ptr;
+ tree addr = build_unary_op (ADDR_EXPR, exp, 0);
+ init_ptr = &init;
+
+ if (true_exp == exp && TYPE_USES_VIRTUAL_BASECLASSES (BINFO_TYPE (binfo)))
+ {
+ expand_aggr_vbase_init (binfo, exp, addr, init_list);
+ expand_indirect_vtbls_init (binfo, true_exp, addr, 1);
+ }
+ expand_recursive_init_1 (binfo, true_exp, addr, init_list, alias_this);
+
+ if (*init_ptr)
+ {
+ tree type = TREE_TYPE (exp);
+
+ if (TREE_CODE (type) == REFERENCE_TYPE)
+ type = TREE_TYPE (type);
+ if (IS_AGGR_TYPE (type))
+ cp_error ("unexpected argument to constructor `%T'", type);
+ else
+ error ("unexpected argument to constructor");
+ }
+ init_ptr = old_init_ptr;
+}
+
+/* Report an error if NAME is not the name of a user-defined,
+ aggregate type. If OR_ELSE is nonzero, give an error message. */
+int
+is_aggr_typedef (name, or_else)
+ tree name;
+ int or_else;
+{
+ tree type;
+
+ if (name == error_mark_node)
+ return 0;
+
+ if (IDENTIFIER_HAS_TYPE_VALUE (name))
+ type = IDENTIFIER_TYPE_VALUE (name);
+ else
+ {
+ if (or_else)
+ cp_error ("`%T' is not an aggregate typedef", name);
+ return 0;
+ }
+
+ if (! IS_AGGR_TYPE (type)
+ && TREE_CODE (type) != TEMPLATE_TYPE_PARM)
+ {
+ if (or_else)
+ cp_error ("`%T' is not an aggregate type", type);
+ return 0;
+ }
+ return 1;
+}
+
+/* Like is_aggr_typedef, but returns typedef if successful. */
+tree
+get_aggr_from_typedef (name, or_else)
+ tree name;
+ int or_else;
+{
+ tree type;
+
+ if (name == error_mark_node)
+ return NULL_TREE;
+
+ if (IDENTIFIER_HAS_TYPE_VALUE (name))
+ type = IDENTIFIER_TYPE_VALUE (name);
+ else
+ {
+ if (or_else)
+ cp_error ("`%T' fails to be an aggregate typedef", name);
+ return NULL_TREE;
+ }
+
+ if (! IS_AGGR_TYPE (type)
+ && TREE_CODE (type) != TEMPLATE_TYPE_PARM)
+ {
+ if (or_else)
+ cp_error ("type `%T' is of non-aggregate type", type);
+ return NULL_TREE;
+ }
+ return type;
+}
+
+tree
+get_type_value (name)
+ tree name;
+{
+ if (name == error_mark_node)
+ return NULL_TREE;
+
+ if (IDENTIFIER_HAS_TYPE_VALUE (name))
+ return IDENTIFIER_TYPE_VALUE (name);
+ else
+ return NULL_TREE;
+}
+
+
+/* This code could just as well go in `class.c', but is placed here for
+ modularity. */
+
+/* For an expression of the form CNAME :: NAME (PARMLIST), build
+ the appropriate function call. */
+tree
+build_member_call (cname, name, parmlist)
+ tree cname, name, parmlist;
+{
+ tree type, t;
+ tree method_name = name;
+ int dtor = 0;
+ int dont_use_this = 0;
+ tree basetype_path, decl;
+
+ if (TREE_CODE (method_name) == BIT_NOT_EXPR)
+ {
+ method_name = TREE_OPERAND (method_name, 0);
+ dtor = 1;
+ }
+
+ if (TREE_CODE (cname) == SCOPE_REF)
+ cname = resolve_scope_to_name (NULL_TREE, cname);
+
+ if (cname == NULL_TREE || ! (type = get_aggr_from_typedef (cname, 1)))
+ return error_mark_node;
+
+ /* An operator we did not like. */
+ if (name == NULL_TREE)
+ return error_mark_node;
+
+ if (dtor)
+ {
+#if 0
+ /* Everything can explicitly call a destructor; see 12.4 */
+ if (! TYPE_HAS_DESTRUCTOR (type))
+ cp_error ("type `%#T' does not have a destructor", type);
+ else
+#endif
+ cp_error ("cannot call destructor `%T::~%T' without object", type,
+ method_name);
+ return error_mark_node;
+ }
+
+ /* No object? Then just fake one up, and let build_method_call
+ figure out what to do. */
+ if (current_class_type == 0
+ || get_base_distance (type, current_class_type, 0, &basetype_path) == -1)
+ dont_use_this = 1;
+
+ if (dont_use_this)
+ {
+ basetype_path = TYPE_BINFO (type);
+ decl = build1 (NOP_EXPR, TYPE_POINTER_TO (type), error_mark_node);
+ }
+ else if (current_class_decl == 0)
+ {
+ dont_use_this = 1;
+ decl = build1 (NOP_EXPR, TYPE_POINTER_TO (type), error_mark_node);
+ }
+ else
+ {
+ tree olddecl = current_class_decl;
+ tree oldtype = TREE_TYPE (TREE_TYPE (olddecl));
+ if (oldtype != type)
+ {
+ tree newtype = build_type_variant (type, TYPE_READONLY (oldtype),
+ TYPE_VOLATILE (oldtype));
+ decl = convert_force (build_pointer_type (newtype), olddecl);
+ }
+ else
+ decl = olddecl;
+ }
+
+ decl = build_indirect_ref (decl, NULL_PTR);
+
+ if (t = lookup_fnfields (basetype_path, method_name, 0))
+ return build_method_call (decl, method_name, parmlist, basetype_path,
+ LOOKUP_NORMAL|LOOKUP_NONVIRTUAL);
+ if (TREE_CODE (name) == IDENTIFIER_NODE
+ && ((t = lookup_field (TYPE_BINFO (type), name, 1, 0))))
+ {
+ if (t == error_mark_node)
+ return error_mark_node;
+ if (TREE_CODE (t) == FIELD_DECL)
+ {
+ if (dont_use_this)
+ {
+ cp_error ("invalid use of non-static field `%D'", t);
+ return error_mark_node;
+ }
+ decl = build (COMPONENT_REF, TREE_TYPE (t), decl, t);
+ }
+ else if (TREE_CODE (t) == VAR_DECL)
+ decl = t;
+ else
+ {
+ cp_error ("invalid use of member `%D'", t);
+ return error_mark_node;
+ }
+ if (TYPE_LANG_SPECIFIC (TREE_TYPE (decl))
+ && TYPE_OVERLOADS_CALL_EXPR (TREE_TYPE (decl)))
+ return build_opfncall (CALL_EXPR, LOOKUP_NORMAL, decl, parmlist, NULL_TREE);
+ return build_function_call (decl, parmlist);
+ }
+ else
+ {
+ cp_error ("no method `%T::%D'", type, name);
+ return error_mark_node;
+ }
+}
+
+/* Build a reference to a member of an aggregate. This is not a
+ C++ `&', but really something which can have its address taken,
+ and then act as a pointer to member, for example CNAME :: FIELD
+ can have its address taken by saying & CNAME :: FIELD.
+
+ @@ Prints out lousy diagnostics for operator <typename>
+ @@ fields.
+
+ @@ This function should be rewritten and placed in search.c. */
+tree
+build_offset_ref (cname, name)
+ tree cname, name;
+{
+ tree decl, type, fnfields, fields, t = error_mark_node;
+ tree basetypes = NULL_TREE;
+ int dtor = 0;
+
+ if (TREE_CODE (cname) == SCOPE_REF)
+ cname = resolve_scope_to_name (NULL_TREE, cname);
+
+ if (cname == NULL_TREE || ! is_aggr_typedef (cname, 1))
+ return error_mark_node;
+
+ type = IDENTIFIER_TYPE_VALUE (cname);
+
+ if (TREE_CODE (name) == BIT_NOT_EXPR)
+ {
+ dtor = 1;
+ name = TREE_OPERAND (name, 0);
+ }
+
+ if (TYPE_SIZE (type) == 0)
+ {
+ t = IDENTIFIER_CLASS_VALUE (name);
+ if (t == 0)
+ {
+ cp_error ("incomplete type `%T' does not have member `%D'", type,
+ name);
+ return error_mark_node;
+ }
+ if (TREE_CODE (t) == TYPE_DECL || TREE_CODE (t) == VAR_DECL
+ || TREE_CODE (t) == CONST_DECL)
+ {
+ TREE_USED (t) = 1;
+ return t;
+ }
+ if (TREE_CODE (t) == FIELD_DECL)
+ sorry ("use of member in incomplete aggregate type");
+ else if (TREE_CODE (t) == FUNCTION_DECL)
+ sorry ("use of member function in incomplete aggregate type");
+ else
+ my_friendly_abort (52);
+ return error_mark_node;
+ }
+
+#if 0
+ if (TREE_CODE (name) == TYPE_EXPR)
+ /* Pass a TYPE_DECL to build_component_type_expr. */
+ return build_component_type_expr (TYPE_NAME (TREE_TYPE (cname)),
+ name, NULL_TREE, 1);
+#endif
+
+ fnfields = lookup_fnfields (TYPE_BINFO (type), name, 1);
+ fields = lookup_field (type, name, 0, 0);
+
+ if (fields == error_mark_node || fnfields == error_mark_node)
+ return error_mark_node;
+
+ if (current_class_type == 0
+ || get_base_distance (type, current_class_type, 0, &basetypes) == -1)
+ {
+ basetypes = TYPE_BINFO (type);
+ decl = build1 (NOP_EXPR,
+ IDENTIFIER_TYPE_VALUE (cname),
+ error_mark_node);
+ }
+ else if (current_class_decl == 0)
+ decl = build1 (NOP_EXPR, IDENTIFIER_TYPE_VALUE (cname),
+ error_mark_node);
+ else
+ decl = C_C_D;
+
+ /* A lot of this logic is now handled in lookup_field and
+ lookup_fnfield. */
+ if (fnfields)
+ {
+ basetypes = TREE_PURPOSE (fnfields);
+
+ /* Go from the TREE_BASELINK to the member function info. */
+ t = TREE_VALUE (fnfields);
+
+ if (fields)
+ {
+ if (DECL_FIELD_CONTEXT (fields) == DECL_FIELD_CONTEXT (t))
+ {
+ error ("ambiguous member reference: member `%s' defined as both field and function",
+ IDENTIFIER_POINTER (name));
+ return error_mark_node;
+ }
+ if (UNIQUELY_DERIVED_FROM_P (DECL_FIELD_CONTEXT (fields), DECL_FIELD_CONTEXT (t)))
+ ;
+ else if (UNIQUELY_DERIVED_FROM_P (DECL_FIELD_CONTEXT (t), DECL_FIELD_CONTEXT (fields)))
+ t = fields;
+ else
+ {
+ error ("ambiguous member reference: member `%s' derives from distinct classes in multiple inheritance lattice");
+ return error_mark_node;
+ }
+ }
+
+ if (t == TREE_VALUE (fnfields))
+ {
+ extern int flag_save_memoized_contexts;
+
+ /* This does not handle access checking yet. */
+ if (DECL_CHAIN (t) == NULL_TREE || dtor)
+ {
+ enum access_type access;
+
+ /* unique functions are handled easily. */
+ unique:
+ access = compute_access (basetypes, t);
+ if (access == access_protected)
+ {
+ cp_error_at ("member function `%#D' is protected", t);
+ error ("in this context");
+ return error_mark_node;
+ }
+ if (access == access_private)
+ {
+ cp_error_at ("member function `%#D' is private", t);
+ error ("in this context");
+ return error_mark_node;
+ }
+ assemble_external (t);
+ return build (OFFSET_REF, TREE_TYPE (t), decl, t);
+ }
+
+ /* overloaded functions may need more work. */
+ if (cname == name)
+ {
+ if (TYPE_HAS_DESTRUCTOR (type)
+ && DECL_CHAIN (DECL_CHAIN (t)) == NULL_TREE)
+ {
+ t = DECL_CHAIN (t);
+ goto unique;
+ }
+ }
+ /* FNFIELDS is most likely allocated on the search_obstack,
+ which will go away after this class scope. If we need
+ to save this value for later (either for memoization
+ or for use as an initializer for a static variable), then
+ do so here.
+
+ ??? The smart thing to do for the case of saving initializers
+ is to resolve them before we're done with this scope. */
+ if (!TREE_PERMANENT (fnfields)
+ && ((flag_save_memoized_contexts && global_bindings_p ())
+ || ! allocation_temporary_p ()))
+ fnfields = copy_list (fnfields);
+ t = build_tree_list (error_mark_node, fnfields);
+ TREE_TYPE (t) = build_offset_type (type, unknown_type_node);
+ return t;
+ }
+ }
+
+ /* Now that we know we are looking for a field, see if we
+ have access to that field. Lookup_field will give us the
+ error message. */
+
+ t = lookup_field (basetypes, name, 1, 0);
+
+ if (t == error_mark_node)
+ return error_mark_node;
+
+ if (t == NULL_TREE)
+ {
+ cp_error ("`%D' is not a member of type `%T'", name, type);
+ return error_mark_node;
+ }
+
+ if (TREE_CODE (t) == TYPE_DECL)
+ {
+ TREE_USED (t) = 1;
+ return t;
+ }
+ /* static class members and class-specific enum
+ values can be returned without further ado. */
+ if (TREE_CODE (t) == VAR_DECL || TREE_CODE (t) == CONST_DECL)
+ {
+ assemble_external (t);
+ TREE_USED (t) = 1;
+ return t;
+ }
+
+ /* static class functions too. */
+ if (TREE_CODE (t) == FUNCTION_DECL && TREE_CODE (TREE_TYPE (t)) == FUNCTION_TYPE)
+ my_friendly_abort (53);
+
+ /* In member functions, the form `cname::name' is no longer
+ equivalent to `this->cname::name'. */
+ return build (OFFSET_REF, build_offset_type (type, TREE_TYPE (t)), decl, t);
+}
+
+/* Given an object EXP and a member function reference MEMBER,
+ return the address of the actual member function. */
+tree
+get_member_function (exp_addr_ptr, exp, member)
+ tree *exp_addr_ptr;
+ tree exp, member;
+{
+ tree ctype = TREE_TYPE (exp);
+ tree function = save_expr (build_unary_op (ADDR_EXPR, member, 0));
+
+ if (TYPE_VIRTUAL_P (ctype)
+ || (flag_all_virtual == 1 && TYPE_OVERLOADS_METHOD_CALL_EXPR (ctype)))
+ {
+ tree e0, e1, e3;
+ tree exp_addr;
+
+ /* Save away the unadulterated `this' pointer. */
+ exp_addr = save_expr (*exp_addr_ptr);
+
+ /* Cast function to signed integer. */
+ e0 = build1 (NOP_EXPR, integer_type_node, function);
+
+ /* There is a hack here that takes advantage of
+ twos complement arithmetic, and the fact that
+ there are more than one UNITS to the WORD.
+ If the high bit is set for the `function',
+ then we pretend it is a virtual function,
+ and the array indexing will knock this bit
+ out the top, leaving a valid index. */
+ if (UNITS_PER_WORD <= 1)
+ my_friendly_abort (54);
+
+ e1 = build (GT_EXPR, integer_type_node, e0, integer_zero_node);
+ e1 = build_compound_expr (tree_cons (NULL_TREE, exp_addr,
+ build_tree_list (NULL_TREE, e1)));
+ e1 = save_expr (e1);
+
+ if (TREE_SIDE_EFFECTS (*exp_addr_ptr))
+ {
+ exp = build_indirect_ref (exp_addr, NULL_PTR);
+ *exp_addr_ptr = exp_addr;
+ }
+
+ /* This is really hairy: if the function pointer is a pointer
+ to a non-virtual member function, then we can't go mucking
+ with the `this' pointer (any more than we already have to
+ this point). If it is a pointer to a virtual member function,
+ then we have to adjust the `this' pointer according to
+ what the virtual function table tells us. */
+
+ e3 = build_vfn_ref (exp_addr_ptr, exp, e0);
+ my_friendly_assert (e3 != error_mark_node, 213);
+
+ /* Change this pointer type from `void *' to the
+ type it is really supposed to be. */
+ TREE_TYPE (e3) = TREE_TYPE (function);
+
+ /* If non-virtual, use what we had originally. Otherwise,
+ use the value we get from the virtual function table. */
+ *exp_addr_ptr = build_conditional_expr (e1, exp_addr, *exp_addr_ptr);
+
+ function = build_conditional_expr (e1, function, e3);
+ }
+ return build_indirect_ref (function, NULL_PTR);
+}
+
+/* If a OFFSET_REF made it through to here, then it did
+ not have its address taken. */
+
+tree
+resolve_offset_ref (exp)
+ tree exp;
+{
+ tree type = TREE_TYPE (exp);
+ tree base = NULL_TREE;
+ tree member;
+ tree basetype, addr;
+
+ if (TREE_CODE (exp) == TREE_LIST)
+ return build_unary_op (ADDR_EXPR, exp, 0);
+
+ if (TREE_CODE (exp) != OFFSET_REF)
+ {
+ my_friendly_assert (TREE_CODE (type) == OFFSET_TYPE, 214);
+ if (TYPE_OFFSET_BASETYPE (type) != current_class_type)
+ {
+ error ("object missing in use of pointer-to-member construct");
+ return error_mark_node;
+ }
+ member = exp;
+ type = TREE_TYPE (type);
+ base = C_C_D;
+ }
+ else
+ {
+ member = TREE_OPERAND (exp, 1);
+ base = TREE_OPERAND (exp, 0);
+ }
+
+ if ((TREE_CODE (member) == VAR_DECL
+ && ! TYPE_PTRMEMFUNC_P (TREE_TYPE (member)))
+ || TREE_CODE (TREE_TYPE (member)) == FUNCTION_TYPE)
+ {
+ /* These were static members. */
+ if (mark_addressable (member) == 0)
+ return error_mark_node;
+ return member;
+ }
+
+ /* Syntax error can cause a member which should
+ have been seen as static to be grok'd as non-static. */
+ if (TREE_CODE (member) == FIELD_DECL && C_C_D == NULL_TREE)
+ {
+ if (TREE_ADDRESSABLE (member) == 0)
+ {
+ cp_error_at ("member `%D' is non-static in static member function context", member);
+ error ("at this point in file");
+ TREE_ADDRESSABLE (member) = 1;
+ }
+ return error_mark_node;
+ }
+
+ /* The first case is really just a reference to a member of `this'. */
+ if (TREE_CODE (member) == FIELD_DECL
+ && (base == C_C_D
+ || (TREE_CODE (base) == NOP_EXPR
+ && TREE_OPERAND (base, 0) == error_mark_node)))
+ {
+ tree basetype_path;
+ enum access_type access;
+
+ if (TREE_CODE (exp) == OFFSET_REF && TREE_CODE (type) == OFFSET_TYPE)
+ basetype = TYPE_OFFSET_BASETYPE (type);
+ else
+ basetype = DECL_CONTEXT (member);
+
+ base = current_class_decl;
+
+ if (get_base_distance (basetype, TREE_TYPE (TREE_TYPE (base)), 0, &basetype_path) < 0)
+ {
+ error_not_base_type (basetype, TREE_TYPE (TREE_TYPE (base)));
+ return error_mark_node;
+ }
+ addr = convert_pointer_to (basetype, base);
+ access = compute_access (basetype_path, member);
+ if (access == access_public)
+ return build (COMPONENT_REF, TREE_TYPE (member),
+ build_indirect_ref (addr, NULL_PTR), member);
+ if (access == access_protected)
+ {
+ cp_error_at ("member `%D' is protected", member);
+ error ("in this context");
+ return error_mark_node;
+ }
+ if (access == access_private)
+ {
+ cp_error_at ("member `%D' is private", member);
+ error ("in this context");
+ return error_mark_node;
+ }
+ my_friendly_abort (55);
+ }
+
+ /* If this is a reference to a member function, then return
+ the address of the member function (which may involve going
+ through the object's vtable), otherwise, return an expression
+ for the dereferenced pointer-to-member construct. */
+ addr = build_unary_op (ADDR_EXPR, base, 0);
+
+ if (TREE_CODE (TREE_TYPE (member)) == METHOD_TYPE)
+ {
+ basetype = DECL_CLASS_CONTEXT (member);
+ addr = convert_pointer_to (basetype, addr);
+ return build_unary_op (ADDR_EXPR, get_member_function (&addr, build_indirect_ref (addr, NULL_PTR), member), 0);
+ }
+ else if (TREE_CODE (TREE_TYPE (member)) == OFFSET_TYPE)
+ {
+ basetype = TYPE_OFFSET_BASETYPE (TREE_TYPE (member));
+ addr = convert_pointer_to (basetype, addr);
+ member = convert (ptr_type_node, build_unary_op (ADDR_EXPR, member, 0));
+ return build1 (INDIRECT_REF, type,
+ build (PLUS_EXPR, ptr_type_node, addr, member));
+ }
+ else if (TYPE_PTRMEMFUNC_P (TREE_TYPE (member)))
+ {
+ return get_member_function_from_ptrfunc (&addr, base, member);
+ }
+ my_friendly_abort (56);
+ /* NOTREACHED */
+ return NULL_TREE;
+}
+
+/* Return either DECL or its known constant value (if it has one). */
+
+tree
+decl_constant_value (decl)
+ tree decl;
+{
+ if (! TREE_THIS_VOLATILE (decl)
+#if 0
+ /* These may be necessary for C, but they break C++. */
+ ! TREE_PUBLIC (decl)
+ /* Don't change a variable array bound or initial value to a constant
+ in a place where a variable is invalid. */
+ && ! pedantic
+#endif /* 0 */
+ && DECL_INITIAL (decl) != 0
+ && TREE_CODE (DECL_INITIAL (decl)) != ERROR_MARK
+ /* This is invalid if initial value is not constant.
+ If it has either a function call, a memory reference,
+ or a variable, then re-evaluating it could give different results. */
+ && TREE_CONSTANT (DECL_INITIAL (decl))
+ /* Check for cases where this is sub-optimal, even though valid. */
+ && TREE_CODE (DECL_INITIAL (decl)) != CONSTRUCTOR
+#if 0
+ /* We must allow this to work outside of functions so that
+ static constants can be used for array sizes. */
+ && current_function_decl != 0
+ && DECL_MODE (decl) != BLKmode
+#endif
+ )
+ return DECL_INITIAL (decl);
+ return decl;
+}
+
+/* Friend handling routines. */
+/* Friend data structures:
+
+ Lists of friend functions come from TYPE_DECL nodes. Since all
+ aggregate types are automatically typedef'd, these nodes are guaranteed
+ to exist.
+
+ The TREE_PURPOSE of a friend list is the name of the friend,
+ and its TREE_VALUE is another list.
+
+ For each element of that list, either the TREE_VALUE or the TREE_PURPOSE
+ will be filled in, but not both. The TREE_VALUE of that list is an
+ individual function which is a friend. The TREE_PURPOSE of that list
+ indicates a type in which all functions by that name are friends.
+
+ Lists of friend classes come from _TYPE nodes. Love that consistency
+ thang. */
+
+int
+is_friend_type (type1, type2)
+ tree type1, type2;
+{
+ return is_friend (type1, type2);
+}
+
+int
+is_friend (type, supplicant)
+ tree type, supplicant;
+{
+ int declp;
+ register tree list;
+
+ if (supplicant == NULL_TREE || type == NULL_TREE)
+ return 0;
+
+ declp = (TREE_CODE_CLASS (TREE_CODE (supplicant)) == 'd');
+
+ if (declp)
+ /* It's a function decl. */
+ {
+ tree list = DECL_FRIENDLIST (TYPE_NAME (type));
+ tree name = DECL_NAME (supplicant);
+ tree ctype = DECL_CLASS_CONTEXT (supplicant);
+ for (; list ; list = TREE_CHAIN (list))
+ {
+ if (name == TREE_PURPOSE (list))
+ {
+ tree friends = TREE_VALUE (list);
+ name = DECL_ASSEMBLER_NAME (supplicant);
+ for (; friends ; friends = TREE_CHAIN (friends))
+ {
+ if (ctype == TREE_PURPOSE (friends))
+ return 1;
+ if (name == DECL_ASSEMBLER_NAME (TREE_VALUE (friends)))
+ return 1;
+ }
+ break;
+ }
+ }
+ }
+ else
+ /* It's a type. */
+ {
+ if (type == supplicant)
+ return 1;
+
+ list = CLASSTYPE_FRIEND_CLASSES (TREE_TYPE (TYPE_NAME (type)));
+ for (; list ; list = TREE_CHAIN (list))
+ if (supplicant == TREE_VALUE (list))
+ return 1;
+ }
+
+ {
+ tree context = declp ? DECL_CLASS_CONTEXT (supplicant)
+ : DECL_CONTEXT (TYPE_NAME (supplicant));
+
+ if (context)
+ return is_friend (type, context);
+ }
+
+ return 0;
+}
+
+/* Add a new friend to the friends of the aggregate type TYPE.
+ DECL is the FUNCTION_DECL of the friend being added. */
+static void
+add_friend (type, decl)
+ tree type, decl;
+{
+ tree typedecl = TYPE_NAME (type);
+ tree list = DECL_FRIENDLIST (typedecl);
+ tree name = DECL_NAME (decl);
+
+ while (list)
+ {
+ if (name == TREE_PURPOSE (list))
+ {
+ tree friends = TREE_VALUE (list);
+ for (; friends ; friends = TREE_CHAIN (friends))
+ {
+ if (decl == TREE_VALUE (friends))
+ {
+ cp_pedwarn ("`%D' is already a friend of class `%T'",
+ decl, type);
+ cp_pedwarn_at ("previous friend declaration of `%D'",
+ TREE_VALUE (friends));
+ return;
+ }
+ }
+ TREE_VALUE (list) = tree_cons (error_mark_node, decl,
+ TREE_VALUE (list));
+ return;
+ }
+ list = TREE_CHAIN (list);
+ }
+ DECL_FRIENDLIST (typedecl)
+ = tree_cons (DECL_NAME (decl), build_tree_list (error_mark_node, decl),
+ DECL_FRIENDLIST (typedecl));
+ if (DECL_NAME (decl) == ansi_opname[(int) MODIFY_EXPR])
+ {
+ tree parmtypes = TYPE_ARG_TYPES (TREE_TYPE (decl));
+ TYPE_HAS_ASSIGNMENT (TREE_TYPE (typedecl)) = 1;
+ if (parmtypes && TREE_CHAIN (parmtypes))
+ {
+ tree parmtype = TREE_VALUE (TREE_CHAIN (parmtypes));
+ if (TREE_CODE (parmtype) == REFERENCE_TYPE
+ && TREE_TYPE (parmtypes) == TREE_TYPE (typedecl))
+ TYPE_HAS_ASSIGN_REF (TREE_TYPE (typedecl)) = 1;
+ }
+ }
+}
+
+/* Declare that every member function NAME in FRIEND_TYPE
+ (which may be NULL_TREE) is a friend of type TYPE. */
+static void
+add_friends (type, name, friend_type)
+ tree type, name, friend_type;
+{
+ tree typedecl = TYPE_NAME (type);
+ tree list = DECL_FRIENDLIST (typedecl);
+
+ while (list)
+ {
+ if (name == TREE_PURPOSE (list))
+ {
+ tree friends = TREE_VALUE (list);
+ while (friends && TREE_PURPOSE (friends) != friend_type)
+ friends = TREE_CHAIN (friends);
+ if (friends)
+ if (friend_type)
+ warning ("method `%s::%s' is already a friend of class",
+ TYPE_NAME_STRING (friend_type),
+ IDENTIFIER_POINTER (name));
+ else
+ warning ("function `%s' is already a friend of class `%s'",
+ IDENTIFIER_POINTER (name),
+ IDENTIFIER_POINTER (DECL_NAME (typedecl)));
+ else
+ TREE_VALUE (list) = tree_cons (friend_type, NULL_TREE,
+ TREE_VALUE (list));
+ return;
+ }
+ list = TREE_CHAIN (list);
+ }
+ DECL_FRIENDLIST (typedecl) =
+ tree_cons (name,
+ build_tree_list (friend_type, NULL_TREE),
+ DECL_FRIENDLIST (typedecl));
+ if (! strncmp (IDENTIFIER_POINTER (name),
+ IDENTIFIER_POINTER (ansi_opname[(int) MODIFY_EXPR]),
+ strlen (IDENTIFIER_POINTER (ansi_opname[(int) MODIFY_EXPR]))))
+ {
+ TYPE_HAS_ASSIGNMENT (TREE_TYPE (typedecl)) = 1;
+ sorry ("declaring \"friend operator =\" will not find \"operator = (X&)\" if it exists");
+ }
+}
+
+/* Set up a cross reference so that type TYPE will make member function
+ CTYPE::DECL a friend when CTYPE is finally defined. For more than
+ one, set up a cross reference so that functions with the name DECL
+ and type CTYPE know that they are friends of TYPE. */
+static void
+xref_friend (type, decl, ctype)
+ tree type, decl, ctype;
+{
+ tree friend_decl = TYPE_NAME (ctype);
+#if 0
+ tree typedecl = TYPE_NAME (type);
+ tree t = tree_cons (NULL_TREE, ctype, DECL_UNDEFINED_FRIENDS (typedecl));
+
+ DECL_UNDEFINED_FRIENDS (typedecl) = t;
+#else
+ tree t = 0;
+#endif
+ SET_DECL_WAITING_FRIENDS (friend_decl,
+ tree_cons (type, t,
+ DECL_WAITING_FRIENDS (friend_decl)));
+ TREE_TYPE (DECL_WAITING_FRIENDS (friend_decl)) = decl;
+}
+
+/* Make FRIEND_TYPE a friend class to TYPE. If FRIEND_TYPE has already
+ been defined, we make all of its member functions friends of
+ TYPE. If not, we make it a pending friend, which can later be added
+ when its definition is seen. If a type is defined, then its TYPE_DECL's
+ DECL_UNDEFINED_FRIENDS contains a (possibly empty) list of friend
+ classes that are not defined. If a type has not yet been defined,
+ then the DECL_WAITING_FRIENDS contains a list of types
+ waiting to make it their friend. Note that these two can both
+ be in use at the same time! */
+void
+make_friend_class (type, friend_type)
+ tree type, friend_type;
+{
+ tree classes;
+
+ if (IS_SIGNATURE (type))
+ {
+ error ("`friend' declaration in signature definition");
+ return;
+ }
+ if (IS_SIGNATURE (friend_type))
+ {
+ error ("signature type `%s' declared `friend'",
+ IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (friend_type))));
+ return;
+ }
+ if (type == friend_type)
+ {
+ warning ("class `%s' is implicitly friends with itself",
+ TYPE_NAME_STRING (type));
+ return;
+ }
+
+ GNU_xref_hier (TYPE_NAME_STRING (type),
+ TYPE_NAME_STRING (friend_type), 0, 0, 1);
+
+ classes = CLASSTYPE_FRIEND_CLASSES (type);
+ while (classes && TREE_VALUE (classes) != friend_type)
+ classes = TREE_CHAIN (classes);
+ if (classes)
+ warning ("class `%s' is already friends with class `%s'",
+ TYPE_NAME_STRING (TREE_VALUE (classes)), TYPE_NAME_STRING (type));
+ else
+ {
+ CLASSTYPE_FRIEND_CLASSES (type)
+ = tree_cons (NULL_TREE, friend_type, CLASSTYPE_FRIEND_CLASSES (type));
+ }
+}
+
+/* Main friend processor. This is large, and for modularity purposes,
+ has been removed from grokdeclarator. It returns `void_type_node'
+ to indicate that something happened, though a FIELD_DECL is
+ not returned.
+
+ CTYPE is the class this friend belongs to.
+
+ DECLARATOR is the name of the friend.
+
+ DECL is the FUNCTION_DECL that the friend is.
+
+ In case we are parsing a friend which is part of an inline
+ definition, we will need to store PARM_DECL chain that comes
+ with it into the DECL_ARGUMENTS slot of the FUNCTION_DECL.
+
+ FLAGS is just used for `grokclassfn'.
+
+ QUALS say what special qualifies should apply to the object
+ pointed to by `this'. */
+tree
+do_friend (ctype, declarator, decl, parmdecls, flags, quals)
+ tree ctype, declarator, decl, parmdecls;
+ enum overload_flags flags;
+ tree quals;
+{
+ /* Every decl that gets here is a friend of something. */
+ DECL_FRIEND_P (decl) = 1;
+
+ if (ctype)
+ {
+ tree cname = TYPE_NAME (ctype);
+ if (TREE_CODE (cname) == TYPE_DECL)
+ cname = DECL_NAME (cname);
+
+ /* A method friend. */
+ if (TREE_CODE (decl) == FUNCTION_DECL)
+ {
+ if (flags == NO_SPECIAL && ctype && declarator == cname)
+ DECL_CONSTRUCTOR_P (decl) = 1;
+
+ /* This will set up DECL_ARGUMENTS for us. */
+ grokclassfn (ctype, cname, decl, flags, quals);
+ if (TYPE_SIZE (ctype) != 0)
+ check_classfn (ctype, cname, decl);
+
+ if (TREE_TYPE (decl) != error_mark_node)
+ {
+ if (TYPE_SIZE (ctype))
+ {
+ /* We don't call pushdecl here yet, or ever on this
+ actual FUNCTION_DECL. We must preserve its TREE_CHAIN
+ until the end. */
+ make_decl_rtl (decl, NULL_PTR, 1);
+ add_friend (current_class_type, decl);
+ }
+ else
+ {
+ register char *classname
+ = IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (ctype)));
+
+ error ("member declared as friend before type `%s' defined",
+ classname);
+ }
+ }
+ }
+ else
+ {
+ /* Possibly a bunch of method friends. */
+
+ /* Get the class they belong to. */
+ tree ctype = IDENTIFIER_TYPE_VALUE (cname);
+
+ /* This class is defined, use its methods now. */
+ if (TYPE_SIZE (ctype))
+ {
+ tree fields = lookup_fnfields (TYPE_BINFO (ctype), declarator, 0);
+ if (fields)
+ add_friends (current_class_type, declarator, ctype);
+ else
+ error ("method `%s' is not a member of class `%s'",
+ IDENTIFIER_POINTER (declarator),
+ IDENTIFIER_POINTER (cname));
+ }
+ else
+ /* Note: DECLARATOR actually has more than one; in this
+ case, we're making sure that fns with the name DECLARATOR
+ and type CTYPE know they are friends of the current
+ class type. */
+ xref_friend (current_class_type, declarator, ctype);
+ decl = void_type_node;
+ }
+ }
+ else if (TREE_CODE (decl) == FUNCTION_DECL
+ && ((IDENTIFIER_LENGTH (declarator) == 4
+ && IDENTIFIER_POINTER (declarator)[0] == 'm'
+ && ! strcmp (IDENTIFIER_POINTER (declarator), "main"))
+ || (IDENTIFIER_LENGTH (declarator) > 10
+ && IDENTIFIER_POINTER (declarator)[0] == '_'
+ && IDENTIFIER_POINTER (declarator)[1] == '_'
+ && strncmp (IDENTIFIER_POINTER (declarator)+2,
+ "builtin_", 8) == 0)))
+ {
+ /* raw "main", and builtin functions never gets overloaded,
+ but they can become friends. */
+ TREE_PUBLIC (decl) = 1;
+ add_friend (current_class_type, decl);
+ DECL_FRIEND_P (decl) = 1;
+ decl = void_type_node;
+ }
+ /* A global friend.
+ @@ or possibly a friend from a base class ?!? */
+ else if (TREE_CODE (decl) == FUNCTION_DECL)
+ {
+ /* Friends must all go through the overload machinery,
+ even though they may not technically be overloaded.
+
+ Note that because classes all wind up being top-level
+ in their scope, their friend wind up in top-level scope as well. */
+ DECL_ASSEMBLER_NAME (decl)
+ = build_decl_overload (declarator, TYPE_ARG_TYPES (TREE_TYPE (decl)),
+ TREE_CODE (TREE_TYPE (decl)) == METHOD_TYPE);
+ DECL_ARGUMENTS (decl) = parmdecls;
+ DECL_CLASS_CONTEXT (decl) = current_class_type;
+
+ /* We can call pushdecl here, because the TREE_CHAIN of this
+ FUNCTION_DECL is not needed for other purposes. */
+ decl = pushdecl (decl);
+
+ make_decl_rtl (decl, NULL_PTR, 1);
+ add_friend (current_class_type, decl);
+
+ DECL_FRIEND_P (decl) = 1;
+#if 0
+ TREE_OVERLOADED (declarator) = 1;
+#endif
+ }
+ else
+ {
+ /* @@ Should be able to ingest later definitions of this function
+ before use. */
+ tree decl = lookup_name_nonclass (declarator);
+ if (decl == NULL_TREE)
+ {
+ warning ("implicitly declaring `%s' as struct",
+ IDENTIFIER_POINTER (declarator));
+ decl = xref_tag (record_type_node, declarator, NULL_TREE, 1);
+ decl = TYPE_NAME (decl);
+ }
+
+ /* Allow abbreviated declarations of overloaded functions,
+ but not if those functions are really class names. */
+ if (TREE_CODE (decl) == TREE_LIST && TREE_TYPE (TREE_PURPOSE (decl)))
+ {
+ warning ("`friend %s' archaic, use `friend class %s' instead",
+ IDENTIFIER_POINTER (declarator),
+ IDENTIFIER_POINTER (declarator));
+ decl = TREE_TYPE (TREE_PURPOSE (decl));
+ }
+
+ if (TREE_CODE (decl) == TREE_LIST)
+ add_friends (current_class_type, TREE_PURPOSE (decl), NULL_TREE);
+ else
+ make_friend_class (current_class_type, TREE_TYPE (decl));
+ decl = void_type_node;
+ }
+ return decl;
+}
+
+/* TYPE has now been defined. It may, however, have a number of things
+ waiting make make it their friend. We resolve these references
+ here. */
+void
+embrace_waiting_friends (type)
+ tree type;
+{
+ tree decl = TYPE_NAME (type);
+ tree waiters;
+
+ if (TREE_CODE (decl) != TYPE_DECL)
+ return;
+
+ for (waiters = DECL_WAITING_FRIENDS (decl); waiters;
+ waiters = TREE_CHAIN (waiters))
+ {
+ tree waiter = TREE_PURPOSE (waiters);
+#if 0
+ tree waiter_prev = TREE_VALUE (waiters);
+#endif
+ tree decl = TREE_TYPE (waiters);
+ tree name = decl ? (TREE_CODE (decl) == IDENTIFIER_NODE
+ ? decl : DECL_NAME (decl)) : NULL_TREE;
+ if (name)
+ {
+ /* @@ There may be work to be done since we have not verified
+ @@ consistency between original and friend declarations
+ @@ of the functions waiting to become friends. */
+ tree field = lookup_fnfields (TYPE_BINFO (type), name, 0);
+ if (field)
+ if (decl == name)
+ add_friends (waiter, name, type);
+ else
+ add_friend (waiter, decl);
+ else
+ error_with_file_and_line (DECL_SOURCE_FILE (TYPE_NAME (waiter)),
+ DECL_SOURCE_LINE (TYPE_NAME (waiter)),
+ "no method `%s' defined in class `%s' to be friend",
+ IDENTIFIER_POINTER (DECL_NAME (TREE_TYPE (waiters))),
+ TYPE_NAME_STRING (type));
+ }
+ else
+ make_friend_class (type, waiter);
+
+#if 0
+ if (TREE_CHAIN (waiter_prev))
+ TREE_CHAIN (waiter_prev) = TREE_CHAIN (TREE_CHAIN (waiter_prev));
+ else
+ DECL_UNDEFINED_FRIENDS (TYPE_NAME (waiter)) = NULL_TREE;
+#endif
+ }
+}
+
+/* Common subroutines of build_new and build_vec_delete. */
+
+/* Common interface for calling "builtin" functions that are not
+ really builtin. */
+
+tree
+build_builtin_call (type, node, arglist)
+ tree type;
+ tree node;
+ tree arglist;
+{
+ tree rval = build (CALL_EXPR, type, node, arglist, 0);
+ TREE_SIDE_EFFECTS (rval) = 1;
+ assemble_external (TREE_OPERAND (node, 0));
+ TREE_USED (TREE_OPERAND (node, 0)) = 1;
+ return rval;
+}
+
+/* Generate a C++ "new" expression. DECL is either a TREE_LIST
+ (which needs to go through some sort of groktypename) or it
+ is the name of the class we are newing. INIT is an initialization value.
+ It is either an EXPRLIST, an EXPR_NO_COMMAS, or something in braces.
+ If INIT is void_type_node, it means do *not* call a constructor
+ for this instance.
+
+ For types with constructors, the data returned is initialized
+ by the appropriate constructor.
+
+ Whether the type has a constructor or not, if it has a pointer
+ to a virtual function table, then that pointer is set up
+ here.
+
+ Unless I am mistaken, a call to new () will return initialized
+ data regardless of whether the constructor itself is private or
+ not. NOPE; new fails if the constructor is private (jcm).
+
+ Note that build_new does nothing to assure that any special
+ alignment requirements of the type are met. Rather, it leaves
+ it up to malloc to do the right thing. Otherwise, folding to
+ the right alignment cal cause problems if the user tries to later
+ free the memory returned by `new'.
+
+ PLACEMENT is the `placement' list for user-defined operator new (). */
+
+tree
+build_new (placement, decl, init, use_global_new)
+ tree placement;
+ tree decl, init;
+ int use_global_new;
+{
+ tree type, true_type, size, rval;
+ tree nelts;
+ int has_array = 0;
+ enum tree_code code = NEW_EXPR;
+
+ tree pending_sizes = NULL_TREE;
+
+ if (decl == error_mark_node)
+ return error_mark_node;
+
+ if (TREE_CODE (decl) == TREE_LIST)
+ {
+ tree absdcl = TREE_VALUE (decl);
+ tree last_absdcl = NULL_TREE;
+ int old_immediate_size_expand;
+
+ if (current_function_decl
+ && DECL_CONSTRUCTOR_P (current_function_decl))
+ {
+ old_immediate_size_expand = immediate_size_expand;
+ immediate_size_expand = 0;
+ }
+
+ nelts = integer_one_node;
+
+ if (absdcl && TREE_CODE (absdcl) == CALL_EXPR)
+ my_friendly_abort (215);
+ while (absdcl && TREE_CODE (absdcl) == INDIRECT_REF)
+ {
+ last_absdcl = absdcl;
+ absdcl = TREE_OPERAND (absdcl, 0);
+ }
+
+ if (absdcl && TREE_CODE (absdcl) == ARRAY_REF)
+ {
+ /* probably meant to be a vec new */
+ tree this_nelts;
+
+ while (TREE_OPERAND (absdcl, 0)
+ && TREE_CODE (TREE_OPERAND (absdcl, 0)) == ARRAY_REF)
+ {
+ last_absdcl = absdcl;
+ absdcl = TREE_OPERAND (absdcl, 0);
+ }
+
+ has_array = 1;
+ this_nelts = TREE_OPERAND (absdcl, 1);
+ if (this_nelts != error_mark_node)
+ {
+ if (this_nelts == NULL_TREE)
+ error ("new of array type fails to specify size");
+ else
+ {
+ this_nelts = save_expr (convert (sizetype, this_nelts));
+ absdcl = TREE_OPERAND (absdcl, 0);
+ if (this_nelts == integer_zero_node)
+ {
+ warning ("zero size array reserves no space");
+ nelts = integer_zero_node;
+ }
+ else
+ nelts = build_binary_op (MULT_EXPR, nelts, this_nelts, 1);
+ }
+ }
+ else
+ nelts = integer_zero_node;
+ }
+
+ if (last_absdcl)
+ TREE_OPERAND (last_absdcl, 0) = absdcl;
+ else
+ TREE_VALUE (decl) = absdcl;
+
+ type = true_type = groktypename (decl);
+ if (! type || type == error_mark_node)
+ {
+ immediate_size_expand = old_immediate_size_expand;
+ return error_mark_node;
+ }
+
+ if (current_function_decl
+ && DECL_CONSTRUCTOR_P (current_function_decl))
+ {
+ pending_sizes = get_pending_sizes ();
+ immediate_size_expand = old_immediate_size_expand;
+ }
+ }
+ else if (TREE_CODE (decl) == IDENTIFIER_NODE)
+ {
+ if (IDENTIFIER_HAS_TYPE_VALUE (decl))
+ {
+ /* An aggregate type. */
+ type = IDENTIFIER_TYPE_VALUE (decl);
+ decl = TYPE_NAME (type);
+ }
+ else
+ {
+ /* A builtin type. */
+ decl = lookup_name (decl, 1);
+ my_friendly_assert (TREE_CODE (decl) == TYPE_DECL, 215);
+ type = TREE_TYPE (decl);
+ }
+ true_type = type;
+ }
+ else if (TREE_CODE (decl) == TYPE_DECL)
+ {
+ type = TREE_TYPE (decl);
+ true_type = type;
+ }
+ else
+ {
+ type = decl;
+ true_type = type;
+ decl = TYPE_NAME (type);
+ }
+
+ /* ``A reference cannot be created by the new operator. A reference
+ is not an object (8.2.2, 8.4.3), so a pointer to it could not be
+ returned by new.'' ARM 5.3.3 */
+ if (TREE_CODE (type) == REFERENCE_TYPE)
+ {
+ error ("new cannot be applied to a reference type");
+ type = true_type = TREE_TYPE (type);
+ }
+
+ /* When the object being created is an array, the new-expression yields a
+ pointer to the initial element (if any) of the array. For example,
+ both new int and new int[10] return an int*. 5.3.4. */
+ if (TREE_CODE (type) == ARRAY_TYPE && has_array == 0)
+ {
+ nelts = array_type_nelts_top (type);
+ has_array = 1;
+ type = true_type = TREE_TYPE (type);
+ }
+
+ if (TYPE_READONLY (type) || TYPE_VOLATILE (type))
+ {
+ pedwarn ("const and volatile types cannot be created with operator new");
+ type = true_type = TYPE_MAIN_VARIANT (type);
+ }
+
+ /* If our base type is an array, then make sure we know how many elements
+ it has. */
+ while (TREE_CODE (true_type) == ARRAY_TYPE)
+ {
+ tree this_nelts = array_type_nelts_top (true_type);
+ nelts = build_binary_op (MULT_EXPR, nelts, this_nelts, 1);
+ true_type = TREE_TYPE (true_type);
+ }
+ if (has_array)
+ size = fold (build_binary_op (MULT_EXPR, size_in_bytes (true_type),
+ nelts, 1));
+ else
+ size = size_in_bytes (type);
+
+ if (TYPE_SIZE (true_type) == 0)
+ {
+ if (true_type == void_type_node)
+ error ("invalid type for new: `void'");
+ else
+ incomplete_type_error (0, true_type);
+ return error_mark_node;
+ }
+
+ if (TYPE_LANG_SPECIFIC (true_type)
+ && CLASSTYPE_ABSTRACT_VIRTUALS (true_type))
+ {
+ abstract_virtuals_error (NULL_TREE, true_type);
+ return error_mark_node;
+ }
+
+ if (TYPE_LANG_SPECIFIC (true_type) && IS_SIGNATURE (true_type))
+ {
+ signature_error (NULL_TREE, true_type);
+ return error_mark_node;
+ }
+
+ /* Get a little extra space to store a couple of things before the new'ed
+ array. */
+ if (has_array && TYPE_VEC_NEW_USES_COOKIE (true_type))
+ {
+ tree extra = BI_header_size;
+
+ size = size_binop (PLUS_EXPR, size, extra);
+ }
+
+ if (has_array)
+ code = VEC_NEW_EXPR;
+
+ /* Allocate the object. */
+ if (! use_global_new && TYPE_LANG_SPECIFIC (true_type)
+ && (TYPE_GETS_NEW (true_type) & (1 << has_array)))
+ rval = build_opfncall (code, LOOKUP_NORMAL,
+ TYPE_POINTER_TO (true_type), size, placement);
+ else if (placement)
+ {
+ rval = build_opfncall (code, LOOKUP_GLOBAL|LOOKUP_COMPLAIN,
+ ptr_type_node, size, placement);
+ rval = convert (TYPE_POINTER_TO (true_type), rval);
+ }
+ else if (! has_array && flag_this_is_variable > 0
+ && TYPE_HAS_CONSTRUCTOR (true_type) && init != void_type_node)
+ {
+ if (init == NULL_TREE || TREE_CODE (init) == TREE_LIST)
+ rval = NULL_TREE;
+ else
+ {
+ error ("constructors take parameter lists");
+ return error_mark_node;
+ }
+ }
+ else
+ {
+ rval = build_builtin_call (build_pointer_type (true_type),
+ has_array ? BIVN : BIN,
+ build_tree_list (NULL_TREE, size));
+#if 0
+ /* See comment above as to why this is disabled. */
+ if (alignment)
+ {
+ rval = build (PLUS_EXPR, TYPE_POINTER_TO (true_type), rval,
+ alignment);
+ rval = build (BIT_AND_EXPR, TYPE_POINTER_TO (true_type),
+ rval, build1 (BIT_NOT_EXPR, integer_type_node,
+ alignment));
+ }
+#endif
+ TREE_CALLS_NEW (rval) = 1;
+ }
+
+ /* if rval is NULL_TREE I don't have to allocate it, but are we totally
+ sure we have some extra bytes in that case for the BI_header_size
+ cookies? And how does that interact with the code below? (mrs) */
+ /* Finish up some magic for new'ed arrays */
+ if (has_array && TYPE_VEC_NEW_USES_COOKIE (true_type) && rval != NULL_TREE)
+ {
+ tree extra = BI_header_size;
+ tree cookie, exp1;
+ rval = convert (ptr_type_node, rval); /* convert to void * first */
+ rval = convert (string_type_node, rval); /* lets not add void* and ints */
+ rval = save_expr (build_binary_op (PLUS_EXPR, rval, extra, 1));
+ /* Store header info. */
+ cookie = build_indirect_ref (build (MINUS_EXPR, TYPE_POINTER_TO (BI_header_type),
+ rval, extra), NULL_PTR);
+ exp1 = build (MODIFY_EXPR, void_type_node,
+ build_component_ref (cookie, nc_nelts_field_id, 0, 0),
+ nelts);
+ TREE_SIDE_EFFECTS (exp1) = 1;
+ rval = convert (build_pointer_type (true_type), rval);
+ TREE_CALLS_NEW (rval) = 1;
+ TREE_SIDE_EFFECTS (rval) = 1;
+ rval = build_compound_expr (tree_cons (NULL_TREE, exp1,
+ build_tree_list (NULL_TREE, rval)));
+ }
+
+ /* We've figured out where the allocation is to go.
+ If we're not eliding constructors, then if a constructor
+ is defined, we must go through it. */
+ if (!has_array && (rval == NULL_TREE || !flag_elide_constructors)
+ && TYPE_HAS_CONSTRUCTOR (true_type) && init != void_type_node)
+ {
+ tree newrval;
+ /* Constructors are never virtual. If it has an initialization, we
+ need to complain if we aren't allowed to use the ctor that took
+ that argument. */
+ int flags = LOOKUP_NORMAL|LOOKUP_NONVIRTUAL|LOOKUP_COMPLAIN;
+
+ /* If a copy constructor might work, set things up so that we can
+ try that after this. We deliberately don't clear LOOKUP_COMPLAIN
+ any more, since that would make it impossible to rationally use
+ the access of a constructor that matches perfectly. */
+#if 0
+ if (rval != NULL_TREE)
+ flags |= LOOKUP_SPECULATIVELY;
+#endif
+
+ if (rval && TYPE_USES_VIRTUAL_BASECLASSES (true_type))
+ {
+ init = tree_cons (NULL_TREE, integer_one_node, init);
+ flags |= LOOKUP_HAS_IN_CHARGE;
+ }
+
+ {
+ tree tmp = rval;
+
+ if (TREE_CODE (TREE_TYPE (tmp)) == POINTER_TYPE)
+ tmp = build_indirect_ref (tmp, NULL_PTR);
+
+ newrval = build_method_call (tmp, constructor_name_full (true_type),
+ init, NULL_TREE, flags);
+ }
+
+ if (newrval)
+ {
+ rval = newrval;
+ TREE_HAS_CONSTRUCTOR (rval) = 1;
+ }
+ else
+ rval = error_mark_node;
+ goto done;
+ }
+
+ if (rval == error_mark_node)
+ return error_mark_node;
+ rval = save_expr (rval);
+ TREE_HAS_CONSTRUCTOR (rval) = 1;
+
+ /* Don't call any constructors or do any initialization. */
+ if (init == void_type_node)
+ goto done;
+
+ if (TYPE_NEEDS_CONSTRUCTING (type) || init)
+ {
+ if (! TYPE_NEEDS_CONSTRUCTING (type) && ! IS_AGGR_TYPE (type))
+ {
+ /* New 2.0 interpretation: `new int (10)' means
+ allocate an int, and initialize it with 10. */
+
+ init = build_c_cast (type, init);
+ rval = build (COMPOUND_EXPR, TREE_TYPE (rval),
+ build_modify_expr (build_indirect_ref (rval, NULL_PTR),
+ NOP_EXPR, init),
+ rval);
+ TREE_SIDE_EFFECTS (rval) = 1;
+ TREE_CALLS_NEW (rval) = 1;
+ }
+ else if (current_function_decl == NULL_TREE)
+ {
+ extern tree static_aggregates;
+
+ /* In case of static initialization, SAVE_EXPR is good enough. */
+ init = copy_to_permanent (init);
+ rval = copy_to_permanent (rval);
+ static_aggregates = perm_tree_cons (init, rval, static_aggregates);
+ }
+ else
+ {
+ /* Have to wrap this in RTL_EXPR for two cases:
+ in base or member initialization and if we
+ are a branch of a ?: operator. Since we
+ can't easily know the latter, just do it always. */
+ tree xval = make_node (RTL_EXPR);
+
+ TREE_TYPE (xval) = TREE_TYPE (rval);
+ do_pending_stack_adjust ();
+ start_sequence_for_rtl_expr (xval);
+
+ /* As a matter of principle, `start_sequence' should do this. */
+ emit_note (0, -1);
+
+ if (has_array)
+ rval = expand_vec_init (decl, rval,
+ build_binary_op (MINUS_EXPR, nelts, integer_one_node, 1),
+ init, 0);
+ else
+ expand_aggr_init (build_indirect_ref (rval, NULL_PTR), init, 0);
+
+ do_pending_stack_adjust ();
+
+ TREE_SIDE_EFFECTS (xval) = 1;
+ TREE_CALLS_NEW (xval) = 1;
+ RTL_EXPR_SEQUENCE (xval) = get_insns ();
+ end_sequence ();
+
+ if (TREE_CODE (rval) == SAVE_EXPR)
+ {
+ /* Errors may cause this to not get evaluated. */
+ if (SAVE_EXPR_RTL (rval) == 0)
+ SAVE_EXPR_RTL (rval) = const0_rtx;
+ RTL_EXPR_RTL (xval) = SAVE_EXPR_RTL (rval);
+ }
+ else
+ {
+ my_friendly_assert (TREE_CODE (rval) == VAR_DECL, 217);
+ RTL_EXPR_RTL (xval) = DECL_RTL (rval);
+ }
+ rval = xval;
+ }
+ }
+ done:
+ if (rval && TREE_TYPE (rval) != build_pointer_type (type))
+ {
+ /* The type of new int [3][3] is not int *, but int [3] * */
+ rval = build_c_cast (build_pointer_type (type), rval);
+ }
+
+ if (pending_sizes)
+ rval = build_compound_expr (chainon (pending_sizes,
+ build_tree_list (NULL_TREE, rval)));
+
+ if (flag_gc)
+ {
+ extern tree gc_visible;
+ tree objbits;
+ tree update_expr;
+
+ rval = save_expr (rval);
+ /* We don't need a `headof' operation to do this because
+ we know where the object starts. */
+ objbits = build1 (INDIRECT_REF, unsigned_type_node,
+ build (MINUS_EXPR, ptr_type_node,
+ rval, c_sizeof_nowarn (unsigned_type_node)));
+ update_expr = build_modify_expr (objbits, BIT_IOR_EXPR, gc_visible);
+ rval = build_compound_expr (tree_cons (NULL_TREE, rval,
+ tree_cons (NULL_TREE, update_expr,
+ build_tree_list (NULL_TREE, rval))));
+ }
+
+ return save_expr (rval);
+}
+
+/* `expand_vec_init' performs initialization of a vector of aggregate
+ types.
+
+ DECL is passed only for error reporting, and provides line number
+ and source file name information.
+ BASE is the space where the vector will be.
+ MAXINDEX is the maximum index of the array (one less than the
+ number of elements).
+ INIT is the (possibly NULL) initializer.
+
+ FROM_ARRAY is 0 if we should init everything with INIT
+ (i.e., every element initialized from INIT).
+ FROM_ARRAY is 1 if we should index into INIT in parallel
+ with initialization of DECL.
+ FROM_ARRAY is 2 if we should index into INIT in parallel,
+ but use assignment instead of initialization. */
+
+tree
+expand_vec_init (decl, base, maxindex, init, from_array)
+ tree decl, base, maxindex, init;
+ int from_array;
+{
+ tree rval;
+ tree iterator, base2 = NULL_TREE;
+ tree type = TREE_TYPE (TREE_TYPE (base));
+ tree size;
+
+ maxindex = convert (integer_type_node, maxindex);
+ if (maxindex == error_mark_node)
+ return error_mark_node;
+
+ if (current_function_decl == NULL_TREE)
+ {
+ rval = make_tree_vec (3);
+ TREE_VEC_ELT (rval, 0) = base;
+ TREE_VEC_ELT (rval, 1) = maxindex;
+ TREE_VEC_ELT (rval, 2) = init;
+ return rval;
+ }
+
+ size = size_in_bytes (type);
+
+ /* Set to zero in case size is <= 0. Optimizer will delete this if
+ it is not needed. */
+ rval = get_temp_regvar (TYPE_POINTER_TO (type),
+ convert (TYPE_POINTER_TO (type), null_pointer_node));
+ base = default_conversion (base);
+ base = convert (TYPE_POINTER_TO (type), base);
+ expand_assignment (rval, base, 0, 0);
+ base = get_temp_regvar (TYPE_POINTER_TO (type), base);
+
+ if (init != NULL_TREE
+ && TREE_CODE (init) == CONSTRUCTOR
+ && TREE_TYPE (init) == TREE_TYPE (decl))
+ {
+ /* Initialization of array from {...}. */
+ tree elts = CONSTRUCTOR_ELTS (init);
+ tree baseref = build1 (INDIRECT_REF, type, base);
+ tree baseinc = build (PLUS_EXPR, TYPE_POINTER_TO (type), base, size);
+ int host_i = TREE_INT_CST_LOW (maxindex);
+
+ if (IS_AGGR_TYPE (type))
+ {
+ while (elts)
+ {
+ host_i -= 1;
+ expand_aggr_init (baseref, TREE_VALUE (elts), 0);
+
+ expand_assignment (base, baseinc, 0, 0);
+ elts = TREE_CHAIN (elts);
+ }
+ /* Initialize any elements by default if possible. */
+ if (host_i >= 0)
+ {
+ if (TYPE_NEEDS_CONSTRUCTING (type) == 0)
+ {
+ if (obey_regdecls)
+ use_variable (DECL_RTL (base));
+ goto done_init;
+ }
+
+ iterator = get_temp_regvar (integer_type_node,
+ build_int_2 (host_i, 0));
+ init = NULL_TREE;
+ goto init_by_default;
+ }
+ }
+ else
+ while (elts)
+ {
+ expand_assignment (baseref, TREE_VALUE (elts), 0, 0);
+
+ expand_assignment (base, baseinc, 0, 0);
+ elts = TREE_CHAIN (elts);
+ }
+
+ if (obey_regdecls)
+ use_variable (DECL_RTL (base));
+ }
+ else
+ {
+ tree itype;
+
+ iterator = get_temp_regvar (integer_type_node, maxindex);
+
+ init_by_default:
+
+ /* If initializing one array from another,
+ initialize element by element. */
+ if (from_array)
+ {
+ /* We rely upon the below calls the do argument checking */
+ if (decl == NULL_TREE)
+ {
+ sorry ("initialization of array from dissimilar array type");
+ return error_mark_node;
+ }
+ if (init)
+ {
+ base2 = default_conversion (init);
+ itype = TREE_TYPE (base2);
+ base2 = get_temp_regvar (itype, base2);
+ itype = TREE_TYPE (itype);
+ }
+ else if (TYPE_LANG_SPECIFIC (type)
+ && TYPE_NEEDS_CONSTRUCTING (type)
+ && ! TYPE_HAS_DEFAULT_CONSTRUCTOR (type))
+ {
+ error ("initializer ends prematurely");
+ return error_mark_node;
+ }
+ }
+
+ expand_start_cond (build (GE_EXPR, integer_type_node,
+ iterator, integer_zero_node), 0);
+ expand_start_loop_continue_elsewhere (1);
+
+ if (from_array)
+ {
+ tree to = build1 (INDIRECT_REF, type, base);
+ tree from;
+
+ if (base2)
+ from = build1 (INDIRECT_REF, itype, base2);
+ else
+ from = NULL_TREE;
+
+ if (from_array == 2)
+ expand_expr_stmt (build_modify_expr (to, NOP_EXPR, from));
+ else if (TYPE_NEEDS_CONSTRUCTING (type))
+ expand_aggr_init (to, from, 0);
+ else if (from)
+ expand_assignment (to, from, 0, 0);
+ else
+ my_friendly_abort (57);
+ }
+ else if (TREE_CODE (type) == ARRAY_TYPE)
+ {
+ if (init != 0)
+ sorry ("cannot initialize multi-dimensional array with initializer");
+ expand_vec_init (decl, build1 (NOP_EXPR, TYPE_POINTER_TO (TREE_TYPE (type)), base),
+ array_type_nelts (type), 0, 0);
+ }
+ else
+ expand_aggr_init (build1 (INDIRECT_REF, type, base), init, 0);
+
+ expand_assignment (base,
+ build (PLUS_EXPR, TYPE_POINTER_TO (type), base, size),
+ 0, 0);
+ if (base2)
+ expand_assignment (base2,
+ build (PLUS_EXPR, TYPE_POINTER_TO (type), base2, size), 0, 0);
+ expand_loop_continue_here ();
+ expand_exit_loop_if_false (0, build (NE_EXPR, integer_type_node,
+ build (PREDECREMENT_EXPR, integer_type_node, iterator, integer_one_node), minus_one));
+
+ if (obey_regdecls)
+ {
+ use_variable (DECL_RTL (base));
+ if (base2)
+ use_variable (DECL_RTL (base2));
+ }
+ expand_end_loop ();
+ expand_end_cond ();
+ if (obey_regdecls)
+ use_variable (DECL_RTL (iterator));
+ }
+ done_init:
+
+ if (obey_regdecls)
+ use_variable (DECL_RTL (rval));
+ return rval;
+}
+
+/* Free up storage of type TYPE, at address ADDR.
+
+ TYPE is a POINTER_TYPE and can be ptr_type_node for no special type
+ of pointer.
+
+ VIRTUAL_SIZE is the amount of storage that was allocated, and is
+ used as the second argument to operator delete. It can include
+ things like padding and magic size cookies. It has virtual in it,
+ because if you have a base pointer and you delete through a virtual
+ destructor, it should be the size of the dynamic object, not the
+ static object, see Free Store 12.5 ANSI C++ WP.
+
+ This does not call any destructors. */
+tree
+build_x_delete (type, addr, which_delete, virtual_size)
+ tree type, addr;
+ int which_delete;
+ tree virtual_size;
+{
+ int use_global_delete = which_delete & 1;
+ int use_vec_delete = !!(which_delete & 2);
+ tree rval;
+ enum tree_code code = use_vec_delete ? VEC_DELETE_EXPR : DELETE_EXPR;
+
+ if (! use_global_delete && TYPE_LANG_SPECIFIC (TREE_TYPE (type))
+ && (TYPE_GETS_DELETE (TREE_TYPE (type)) & (1 << use_vec_delete)))
+ rval = build_opfncall (code, LOOKUP_NORMAL, addr, virtual_size, NULL_TREE);
+ else
+ rval = build_builtin_call (void_type_node, use_vec_delete ? BIVD : BID,
+ build_tree_list (NULL_TREE, addr));
+ return rval;
+}
+
+/* Generate a call to a destructor. TYPE is the type to cast ADDR to.
+ ADDR is an expression which yields the store to be destroyed.
+ AUTO_DELETE is nonzero if a call to DELETE should be made or not.
+ If in the program, (AUTO_DELETE & 2) is non-zero, we tear down the
+ virtual baseclasses.
+ If in the program, (AUTO_DELETE & 1) is non-zero, then we deallocate.
+
+ FLAGS is the logical disjunction of zero or more LOOKUP_
+ flags. See cp-tree.h for more info.
+
+ This function does not delete an object's virtual base classes. */
+tree
+build_delete (type, addr, auto_delete, flags, use_global_delete)
+ tree type, addr;
+ tree auto_delete;
+ int flags;
+ int use_global_delete;
+{
+ tree function, parms;
+ tree member;
+ tree expr;
+ tree ref;
+ int ptr;
+
+ if (addr == error_mark_node)
+ return error_mark_node;
+
+ /* Can happen when CURRENT_EXCEPTION_OBJECT gets its type
+ set to `error_mark_node' before it gets properly cleaned up. */
+ if (type == error_mark_node)
+ return error_mark_node;
+
+ type = TYPE_MAIN_VARIANT (type);
+
+ if (TREE_CODE (type) == POINTER_TYPE)
+ {
+ type = TYPE_MAIN_VARIANT (TREE_TYPE (type));
+ if (TYPE_SIZE (type) == 0)
+ {
+ incomplete_type_error (0, type);
+ return error_mark_node;
+ }
+ if (TREE_CODE (type) == ARRAY_TYPE)
+ goto handle_array;
+ if (! IS_AGGR_TYPE (type))
+ {
+ /* Call the builtin operator delete. */
+ return build_builtin_call (void_type_node, BID,
+ build_tree_list (NULL_TREE, addr));
+ }
+ if (TREE_SIDE_EFFECTS (addr))
+ addr = save_expr (addr);
+
+ /* throw away const and volatile on target type of addr */
+ addr = convert_force (build_pointer_type (type), addr);
+ ref = build_indirect_ref (addr, NULL_PTR);
+ ptr = 1;
+ }
+ else if (TREE_CODE (type) == ARRAY_TYPE)
+ {
+ handle_array:
+ if (TREE_SIDE_EFFECTS (addr))
+ addr = save_expr (addr);
+ return build_vec_delete (addr, array_type_nelts (type),
+ c_sizeof_nowarn (TREE_TYPE (type)),
+ auto_delete, integer_two_node,
+ use_global_delete);
+ }
+ else
+ {
+ /* Don't check PROTECT here; leave that decision to the
+ destructor. If the destructor is accessible, call it,
+ else report error. */
+ addr = build_unary_op (ADDR_EXPR, addr, 0);
+ if (TREE_SIDE_EFFECTS (addr))
+ addr = save_expr (addr);
+
+ if (TREE_CONSTANT (addr))
+ addr = convert_pointer_to (type, addr);
+ else
+ addr = convert_force (build_pointer_type (type), addr);
+
+ if (TREE_CODE (addr) == NOP_EXPR
+ && TREE_OPERAND (addr, 0) == current_class_decl)
+ ref = C_C_D;
+ else
+ ref = build_indirect_ref (addr, NULL_PTR);
+ ptr = 0;
+ }
+
+ my_friendly_assert (IS_AGGR_TYPE (type), 220);
+
+ if (! TYPE_NEEDS_DESTRUCTOR (type))
+ {
+ if (auto_delete == integer_zero_node)
+ return void_zero_node;
+
+ /* Pass the size of the object down to the operator delete() in
+ addition to the ADDR. */
+ if (TYPE_GETS_REG_DELETE (type) && !use_global_delete)
+ {
+ tree virtual_size = c_sizeof_nowarn (type);
+ return build_opfncall (DELETE_EXPR, LOOKUP_NORMAL, addr,
+ virtual_size, NULL_TREE);
+ }
+
+ /* Call the builtin operator delete. */
+ return build_builtin_call (void_type_node, BID,
+ build_tree_list (NULL_TREE, addr));
+ }
+ parms = build_tree_list (NULL_TREE, addr);
+
+ /* Below, we will reverse the order in which these calls are made.
+ If we have a destructor, then that destructor will take care
+ of the base classes; otherwise, we must do that here. */
+ if (TYPE_HAS_DESTRUCTOR (type))
+ {
+ tree dtor = DECL_MAIN_VARIANT (TREE_VEC_ELT (CLASSTYPE_METHOD_VEC (type), 0));
+ tree basetypes = TYPE_BINFO (type);
+ tree passed_auto_delete;
+ tree do_delete = NULL_TREE;
+
+ if (use_global_delete)
+ {
+ tree cond = fold (build (BIT_AND_EXPR, integer_type_node,
+ auto_delete, integer_one_node));
+ tree call = build_builtin_call
+ (void_type_node, BID, build_tree_list (NULL_TREE, addr));
+
+ cond = fold (build (COND_EXPR, void_type_node, cond,
+ call, void_zero_node));
+ if (cond != void_zero_node)
+ do_delete = cond;
+
+ passed_auto_delete = fold (build (BIT_AND_EXPR, integer_type_node,
+ auto_delete, integer_two_node));
+ }
+ else
+ passed_auto_delete = auto_delete;
+
+ if (flags & LOOKUP_PROTECT)
+ {
+ enum access_type access = compute_access (basetypes, dtor);
+
+ if (access == access_private)
+ {
+ if (flags & LOOKUP_COMPLAIN)
+ cp_error ("destructor for type `%T' is private in this scope", type);
+ return error_mark_node;
+ }
+ else if (access == access_protected)
+ {
+ if (flags & LOOKUP_COMPLAIN)
+ cp_error ("destructor for type `%T' is protected in this scope", type);
+ return error_mark_node;
+ }
+ }
+
+ /* Once we are in a destructor, try not going through
+ the virtual function table to find the next destructor. */
+ if (DECL_VINDEX (dtor)
+ && ! (flags & LOOKUP_NONVIRTUAL)
+ && TREE_CODE (auto_delete) != PARM_DECL
+ && (ptr == 1 || ! resolves_to_fixed_type_p (ref, 0)))
+ {
+ tree binfo, basetype;
+ /* The code below is probably all broken. See call.c for the
+ complete right way to do this. this offsets may not be right
+ in the below. (mrs) */
+ /* This destructor must be called via virtual function table. */
+ dtor = TREE_VEC_ELT (CLASSTYPE_METHOD_VEC (DECL_CONTEXT (dtor)), 0);
+ basetype = DECL_CLASS_CONTEXT (dtor);
+ binfo = get_binfo (basetype,
+ TREE_TYPE (TREE_TYPE (TREE_VALUE (parms))),
+ 0);
+ expr = convert_pointer_to_real (binfo, TREE_VALUE (parms));
+ if (expr != TREE_VALUE (parms))
+ {
+ expr = fold (expr);
+ ref = build_indirect_ref (expr, NULL_PTR);
+ TREE_VALUE (parms) = expr;
+ }
+ function = build_vfn_ref (&TREE_VALUE (parms), ref, DECL_VINDEX (dtor));
+ if (function == error_mark_node)
+ return error_mark_node;
+ TREE_TYPE (function) = build_pointer_type (TREE_TYPE (dtor));
+ TREE_CHAIN (parms) = build_tree_list (NULL_TREE, passed_auto_delete);
+ expr = build_function_call (function, parms);
+ if (do_delete)
+ expr = build (COMPOUND_EXPR, void_type_node, expr, do_delete);
+ if (ptr && (flags & LOOKUP_DESTRUCTOR) == 0)
+ {
+ /* Handle the case where a virtual destructor is
+ being called on an item that is 0.
+
+ @@ Does this really need to be done? */
+ tree ifexp = build_binary_op(NE_EXPR, addr, integer_zero_node,1);
+#if 0
+ if (TREE_CODE (ref) == VAR_DECL
+ || TREE_CODE (ref) == COMPONENT_REF)
+ warning ("losing in build_delete");
+#endif
+ expr = build (COND_EXPR, void_type_node,
+ ifexp, expr, void_zero_node);
+ }
+ }
+ else
+ {
+ tree ifexp;
+
+ if ((flags & LOOKUP_DESTRUCTOR)
+ || TREE_CODE (ref) == VAR_DECL
+ || TREE_CODE (ref) == PARM_DECL
+ || TREE_CODE (ref) == COMPONENT_REF
+ || TREE_CODE (ref) == ARRAY_REF)
+ /* These can't be 0. */
+ ifexp = integer_one_node;
+ else
+ /* Handle the case where a non-virtual destructor is
+ being called on an item that is 0. */
+ ifexp = build_binary_op (NE_EXPR, addr, integer_zero_node, 1);
+
+ /* Used to mean that this destructor was known to be empty,
+ but that's now obsolete. */
+ my_friendly_assert (DECL_INITIAL (dtor) != void_type_node, 221);
+
+ TREE_CHAIN (parms) = build_tree_list (NULL_TREE, passed_auto_delete);
+ expr = build_function_call (dtor, parms);
+ if (do_delete)
+ expr = build (COMPOUND_EXPR, void_type_node, expr, do_delete);
+
+ if (ifexp != integer_one_node)
+ expr = build (COND_EXPR, void_type_node,
+ ifexp, expr, void_zero_node);
+ }
+ return expr;
+ }
+ else
+ {
+ /* This can get visibilities wrong. */
+ tree binfos = BINFO_BASETYPES (TYPE_BINFO (type));
+ int i, n_baseclasses = binfos ? TREE_VEC_LENGTH (binfos) : 0;
+ tree base_binfo = n_baseclasses > 0 ? TREE_VEC_ELT (binfos, 0) : NULL_TREE;
+ tree exprstmt = NULL_TREE;
+ tree parent_auto_delete = auto_delete;
+ tree cond;
+
+ /* If this type does not have a destructor, but does have
+ operator delete, call the parent parent destructor (if any),
+ but let this node do the deleting. Otherwise, it is ok
+ to let the parent destructor do the deleting. */
+ if (TYPE_GETS_REG_DELETE (type) && !use_global_delete)
+ {
+ parent_auto_delete = integer_zero_node;
+ if (auto_delete == integer_zero_node)
+ cond = NULL_TREE;
+ else
+ {
+ tree virtual_size;
+
+ /* This is probably wrong. It should be the size of the
+ virtual object being deleted. */
+ virtual_size = c_sizeof_nowarn (type);
+
+ expr = build_opfncall (DELETE_EXPR, LOOKUP_NORMAL, addr,
+ virtual_size, NULL_TREE);
+ if (expr == error_mark_node)
+ return error_mark_node;
+ if (auto_delete != integer_one_node)
+ cond = build (COND_EXPR, void_type_node,
+ build (BIT_AND_EXPR, integer_type_node,
+ auto_delete, integer_one_node),
+ expr, void_zero_node);
+ else
+ cond = expr;
+ }
+ }
+ else if (base_binfo == NULL_TREE
+ || (TREE_VIA_VIRTUAL (base_binfo) == 0
+ && ! TYPE_NEEDS_DESTRUCTOR (BINFO_TYPE (base_binfo))))
+ {
+ tree virtual_size;
+
+ /* This is probably wrong. It should be the size of the virtual
+ object being deleted. */
+ virtual_size = c_sizeof_nowarn (type);
+
+ cond = build (COND_EXPR, void_type_node,
+ build (BIT_AND_EXPR, integer_type_node, auto_delete, integer_one_node),
+ build_builtin_call (void_type_node, BID,
+ build_tree_list (NULL_TREE, addr)),
+ void_zero_node);
+ }
+ else
+ cond = NULL_TREE;
+
+ if (cond)
+ exprstmt = build_tree_list (NULL_TREE, cond);
+
+ if (base_binfo
+ && ! TREE_VIA_VIRTUAL (base_binfo)
+ && TYPE_NEEDS_DESTRUCTOR (BINFO_TYPE (base_binfo)))
+ {
+ tree this_auto_delete;
+
+ if (BINFO_OFFSET_ZEROP (base_binfo))
+ this_auto_delete = parent_auto_delete;
+ else
+ this_auto_delete = integer_zero_node;
+
+ expr = build_delete (TYPE_POINTER_TO (BINFO_TYPE (base_binfo)), addr,
+ this_auto_delete, flags, 0);
+ exprstmt = tree_cons (NULL_TREE, expr, exprstmt);
+ }
+
+ /* Take care of the remaining baseclasses. */
+ for (i = 1; i < n_baseclasses; i++)
+ {
+ base_binfo = TREE_VEC_ELT (binfos, i);
+ if (! TYPE_NEEDS_DESTRUCTOR (BINFO_TYPE (base_binfo))
+ || TREE_VIA_VIRTUAL (base_binfo))
+ continue;
+
+ /* May be zero offset if other baseclasses are virtual. */
+ expr = fold (build (PLUS_EXPR, TYPE_POINTER_TO (BINFO_TYPE (base_binfo)),
+ addr, BINFO_OFFSET (base_binfo)));
+
+ expr = build_delete (TYPE_POINTER_TO (BINFO_TYPE (base_binfo)), expr,
+ integer_zero_node,
+ flags, 0);
+
+ exprstmt = tree_cons (NULL_TREE, expr, exprstmt);
+ }
+
+ for (member = TYPE_FIELDS (type); member; member = TREE_CHAIN (member))
+ {
+ if (TREE_CODE (member) != FIELD_DECL)
+ continue;
+ if (TYPE_NEEDS_DESTRUCTOR (TREE_TYPE (member)))
+ {
+ tree this_member = build_component_ref (ref, DECL_NAME (member), 0, 0);
+ tree this_type = TREE_TYPE (member);
+ expr = build_delete (this_type, this_member, integer_two_node, flags, 0);
+ exprstmt = tree_cons (NULL_TREE, expr, exprstmt);
+ }
+ }
+
+ if (exprstmt)
+ return build_compound_expr (exprstmt);
+ /* Virtual base classes make this function do nothing. */
+ return void_zero_node;
+ }
+}
+
+/* For type TYPE, delete the virtual baseclass objects of DECL. */
+
+tree
+build_vbase_delete (type, decl)
+ tree type, decl;
+{
+ tree vbases = CLASSTYPE_VBASECLASSES (type);
+ tree result = NULL_TREE;
+ tree addr = build_unary_op (ADDR_EXPR, decl, 0);
+
+ my_friendly_assert (addr != error_mark_node, 222);
+
+ while (vbases)
+ {
+ tree this_addr = convert_force (TYPE_POINTER_TO (BINFO_TYPE (vbases)),
+ addr);
+ result = tree_cons (NULL_TREE,
+ build_delete (TREE_TYPE (this_addr), this_addr,
+ integer_zero_node,
+ LOOKUP_NORMAL|LOOKUP_DESTRUCTOR, 0),
+ result);
+ vbases = TREE_CHAIN (vbases);
+ }
+ return build_compound_expr (nreverse (result));
+}
+
+/* Build a C++ vector delete expression.
+ MAXINDEX is the number of elements to be deleted.
+ ELT_SIZE is the nominal size of each element in the vector.
+ BASE is the expression that should yield the store to be deleted.
+ This function expands (or synthesizes) these calls itself.
+ AUTO_DELETE_VEC says whether the container (vector) should be deallocated.
+ AUTO_DELETE say whether each item in the container should be deallocated.
+
+ This also calls delete for virtual baseclasses of elements of the vector.
+
+ Update: MAXINDEX is no longer needed. The size can be extracted from the
+ start of the vector for pointers, and from the type for arrays. We still
+ use MAXINDEX for arrays because it happens to already have one of the
+ values we'd have to extract. (We could use MAXINDEX with pointers to
+ confirm the size, and trap if the numbers differ; not clear that it'd
+ be worth bothering.) */
+tree
+build_vec_delete (base, maxindex, elt_size, auto_delete_vec, auto_delete,
+ use_global_delete)
+ tree base, maxindex, elt_size;
+ tree auto_delete_vec, auto_delete;
+ int use_global_delete;
+{
+ tree ptype = TREE_TYPE (base);
+ tree type;
+ tree virtual_size;
+ /* Temporary variables used by the loop. */
+ tree tbase, size_exp, tbase_init;
+
+ /* This is the body of the loop that implements the deletion of a
+ single element, and moves temp variables to next elements. */
+ tree body;
+
+ /* This is the LOOP_EXPR that governs the deletion of the elements. */
+ tree loop;
+
+ /* This is the thing that governs what to do after the loop has run. */
+ tree deallocate_expr = 0;
+
+ /* This is the BIND_EXPR which holds the outermost iterator of the
+ loop. It is convenient to set this variable up and test it before
+ executing any other code in the loop.
+ This is also the containing expression returned by this function. */
+ tree controller = NULL_TREE;
+
+ /* This is the BLOCK to record the symbol binding for debugging. */
+ tree block;
+
+ base = stabilize_reference (base);
+
+ /* Since we can use base many times, save_expr it. */
+ if (TREE_SIDE_EFFECTS (base))
+ base = save_expr (base);
+
+ if (TREE_CODE (ptype) == POINTER_TYPE)
+ {
+ /* Step back one from start of vector, and read dimension. */
+ tree cookie_addr = build (MINUS_EXPR, TYPE_POINTER_TO (BI_header_type),
+ base, BI_header_size);
+ tree cookie = build_indirect_ref (cookie_addr, NULL_PTR);
+ maxindex = build_component_ref (cookie, nc_nelts_field_id, 0, 0);
+ do
+ ptype = TREE_TYPE (ptype);
+ while (TREE_CODE (ptype) == ARRAY_TYPE);
+ }
+ else if (TREE_CODE (ptype) == ARRAY_TYPE)
+ {
+ /* get the total number of things in the array, maxindex is a bad name */
+ maxindex = array_type_nelts_total (ptype);
+ while (TREE_CODE (ptype) == ARRAY_TYPE)
+ ptype = TREE_TYPE (ptype);
+ base = build_unary_op (ADDR_EXPR, base, 1);
+ }
+ else
+ {
+ error ("type to vector delete is neither pointer or array type");
+ return error_mark_node;
+ }
+ type = ptype;
+ ptype = TYPE_POINTER_TO (type);
+
+ size_exp = size_in_bytes (type);
+
+ if (! IS_AGGR_TYPE (type) || ! TYPE_NEEDS_DESTRUCTOR (type))
+ {
+ loop = integer_zero_node;
+ goto no_destructor;
+ }
+
+ /* The below is short by BI_header_size */
+ virtual_size = fold (size_binop (MULT_EXPR, size_exp, maxindex));
+
+ tbase = build_decl (VAR_DECL, NULL_TREE, ptype);
+ tbase_init = build_modify_expr (tbase, NOP_EXPR,
+ fold (build (PLUS_EXPR, ptype,
+ base,
+ virtual_size)));
+ DECL_REGISTER (tbase) = 1;
+ controller = build (BIND_EXPR, void_type_node, tbase, 0, 0);
+ TREE_SIDE_EFFECTS (controller) = 1;
+ block = build_block (tbase, 0, 0, 0, 0);
+ add_block_current_level (block);
+
+ if (auto_delete != integer_zero_node
+ && auto_delete != integer_two_node)
+ {
+ tree base_tbd = convert (ptype,
+ build_binary_op (MINUS_EXPR,
+ convert (ptr_type_node, base),
+ BI_header_size,
+ 1));
+ /* This is the real size */
+ virtual_size = size_binop (PLUS_EXPR, virtual_size, BI_header_size);
+ body = build_tree_list (NULL_TREE,
+ build_x_delete (ptype, base_tbd,
+ 2 | use_global_delete,
+ virtual_size));
+ body = build (COND_EXPR, void_type_node,
+ build (BIT_AND_EXPR, integer_type_node,
+ auto_delete, integer_one_node),
+ body, integer_zero_node);
+ }
+ else
+ body = NULL_TREE;
+
+ body = tree_cons (NULL_TREE,
+ build_delete (ptype, tbase, auto_delete,
+ LOOKUP_NORMAL|LOOKUP_DESTRUCTOR, 1),
+ body);
+
+ body = tree_cons (NULL_TREE,
+ build_modify_expr (tbase, NOP_EXPR, build (MINUS_EXPR, ptype, tbase, size_exp)),
+ body);
+
+ body = tree_cons (NULL_TREE,
+ build (EXIT_EXPR, void_type_node,
+ build (EQ_EXPR, integer_type_node, base, tbase)),
+ body);
+
+ loop = build (LOOP_EXPR, void_type_node, build_compound_expr (body));
+
+ loop = tree_cons (NULL_TREE, tbase_init,
+ tree_cons (NULL_TREE, loop, NULL_TREE));
+ loop = build_compound_expr (loop);
+
+ no_destructor:
+ /* If the delete flag is one, or anything else with the low bit set,
+ delete the storage. */
+ if (auto_delete_vec == integer_zero_node
+ || auto_delete_vec == integer_two_node)
+ deallocate_expr = integer_zero_node;
+ else
+ {
+ tree base_tbd;
+
+ /* The below is short by BI_header_size */
+ virtual_size = fold (size_binop (MULT_EXPR, size_exp, maxindex));
+
+ if (! TYPE_VEC_NEW_USES_COOKIE (type))
+ /* no header */
+ base_tbd = base;
+ else
+ {
+ base_tbd = convert (ptype,
+ build_binary_op (MINUS_EXPR,
+ convert (string_type_node, base),
+ BI_header_size,
+ 1));
+ /* True size with header. */
+ virtual_size = size_binop (PLUS_EXPR, virtual_size, BI_header_size);
+ }
+ deallocate_expr = build_x_delete (ptype, base_tbd,
+ 2 | use_global_delete,
+ virtual_size);
+ if (auto_delete_vec != integer_one_node)
+ deallocate_expr = build (COND_EXPR, void_type_node,
+ build (BIT_AND_EXPR, integer_type_node,
+ auto_delete_vec, integer_one_node),
+ deallocate_expr, integer_zero_node);
+ }
+
+ if (loop && deallocate_expr != integer_zero_node)
+ {
+ body = tree_cons (NULL_TREE, loop,
+ tree_cons (NULL_TREE, deallocate_expr, NULL_TREE));
+ body = build_compound_expr (body);
+ }
+ else
+ body = loop;
+
+ /* Outermost wrapper: If pointer is null, punt. */
+ body = build (COND_EXPR, void_type_node,
+ build (NE_EXPR, integer_type_node, base, integer_zero_node),
+ body, integer_zero_node);
+ body = build1 (NOP_EXPR, void_type_node, body);
+
+ if (controller)
+ {
+ TREE_OPERAND (controller, 1) = body;
+ return controller;
+ }
+ else
+ return convert (void_type_node, body);
+}
diff --git a/gnu/usr.bin/cc/cc1plus/input.c b/gnu/usr.bin/cc/cc1plus/input.c
new file mode 100644
index 0000000..1570489
--- /dev/null
+++ b/gnu/usr.bin/cc/cc1plus/input.c
@@ -0,0 +1,184 @@
+/* Input handling for G++.
+ Copyright (C) 1992, 1993 Free Software Foundation, Inc.
+ Written by Ken Raeburn (raeburn@cygnus.com) while at Watchmaker Computing.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+/* G++ needs to do enough saving and re-parsing of text that it is
+ necessary to abandon the simple FILE* model and use a mechanism where
+ we can pre-empt one input stream with another derived from saved text;
+ we may need to do this arbitrarily often, and cannot depend on having
+ the GNU library available, so FILE objects just don't cut it.
+
+ This file is written as a separate module, but can be included by
+ lex.c for very minor efficiency gains (primarily in function
+ inlining). */
+
+#include <stdio.h>
+#include "obstack.h"
+
+extern FILE *finput;
+
+struct pending_input *save_pending_input ();
+void restore_pending_input ();
+
+struct input_source {
+ /* saved string */
+ char *str;
+ int length;
+ /* current position, when reading as input */
+ int offset;
+ /* obstack to free this input string from when finished, if any */
+ struct obstack *obstack;
+ /* linked list maintenance */
+ struct input_source *next;
+ /* values to restore after reading all of current string */
+ char *filename;
+ int lineno;
+ struct pending_input *input;
+ int putback_char;
+};
+
+static struct input_source *input, *free_inputs;
+
+extern char *input_filename;
+extern int lineno;
+
+#ifdef __GNUC__
+#define inline __inline__
+#else
+#define inline
+#endif
+
+static inline struct input_source *
+allocate_input ()
+{
+ struct input_source *inp;
+ if (free_inputs)
+ {
+ inp = free_inputs;
+ free_inputs = inp->next;
+ inp->next = 0;
+ return inp;
+ }
+ inp = (struct input_source *) xmalloc (sizeof (struct input_source));
+ inp->next = 0;
+ inp->obstack = 0;
+ return inp;
+}
+
+static inline void
+free_input (inp)
+ struct input_source *inp;
+{
+ if (inp->obstack)
+ obstack_free (inp->obstack, inp->str);
+ inp->obstack = 0;
+ inp->str = 0;
+ inp->length = 0;
+ inp->next = free_inputs;
+ free_inputs = inp;
+}
+
+static int putback_char = -1;
+
+/* Some of these external functions are declared inline in case this file
+ is included in lex.c. */
+
+inline
+void
+feed_input (str, len, delete)
+ char *str;
+ int len;
+ struct obstack *delete;
+{
+ struct input_source *inp = allocate_input ();
+
+ /* This shouldn't be necessary. */
+ while (len && !str[len-1])
+ len--;
+
+ inp->str = str;
+ inp->length = len;
+ inp->obstack = delete;
+ inp->offset = 0;
+ inp->next = input;
+ inp->filename = input_filename;
+ inp->lineno = lineno;
+ inp->input = save_pending_input ();
+ inp->putback_char = putback_char;
+ putback_char = -1;
+ input = inp;
+}
+
+struct pending_input *to_be_restored; /* XXX */
+extern int end_of_file;
+
+int
+getch ()
+{
+ if (putback_char != -1)
+ {
+ int ch = putback_char;
+ putback_char = -1;
+ return ch;
+ }
+ if (input)
+ {
+ if (input->offset == input->length)
+ {
+ struct input_source *inp = input;
+ my_friendly_assert (putback_char == -1, 223);
+ to_be_restored = inp->input;
+ input->offset++;
+ return EOF;
+ }
+ else if (input->offset > input->length)
+ {
+ struct input_source *inp = input;
+
+ end_of_file = 0;
+ input = inp->next;
+ input_filename = inp->filename;
+ lineno = inp->lineno;
+ /* Get interface/implementation back in sync. */
+ extract_interface_info ();
+ putback_char = inp->putback_char;
+ free_input (inp);
+ return getch ();
+ }
+ if (input)
+ return input->str[input->offset++];
+ }
+ return getc (finput);
+}
+
+inline
+void
+put_back (ch)
+ int ch;
+{
+ my_friendly_assert (putback_char == -1, 224);
+ putback_char = ch;
+}
+
+inline
+int
+input_redirected ()
+{
+ return input != 0;
+}
diff --git a/gnu/usr.bin/cc/cc1plus/lex.c b/gnu/usr.bin/cc/cc1plus/lex.c
new file mode 100644
index 0000000..5edf659
--- /dev/null
+++ b/gnu/usr.bin/cc/cc1plus/lex.c
@@ -0,0 +1,4818 @@
+/* Separate lexical analyzer for GNU C++.
+ Copyright (C) 1987, 1989, 1992, 1993, 1994 Free Software Foundation, Inc.
+ Hacked by Michael Tiemann (tiemann@cygnus.com)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+/* This file is the lexical analyzer for GNU C++. */
+
+/* Cause the `yydebug' variable to be defined. */
+#define YYDEBUG 1
+
+#include <sys/types.h>
+#include <stdio.h>
+#include <errno.h>
+#include <setjmp.h>
+#include "config.h"
+#include "input.h"
+#include "tree.h"
+#include "lex.h"
+#include "parse.h"
+#include "cp-tree.h"
+#include "flags.h"
+#include "obstack.h"
+
+#ifdef MULTIBYTE_CHARS
+#include <stdlib.h>
+#include <locale.h>
+#endif
+
+#ifndef errno
+extern int errno; /* needed for VAX. */
+#endif
+extern jmp_buf toplevel;
+
+#define obstack_chunk_alloc xmalloc
+#define obstack_chunk_free free
+
+extern struct obstack *expression_obstack, permanent_obstack;
+extern struct obstack *current_obstack, *saveable_obstack;
+
+extern double atof ();
+
+extern char *get_directive_line (); /* In c-common.c */
+
+/* Given a file name X, return the nondirectory portion.
+ Keep in mind that X can be computed more than once. */
+#ifndef FILE_NAME_NONDIRECTORY
+#define FILE_NAME_NONDIRECTORY(X) \
+ (rindex (X, '/') != 0 ? rindex (X, '/') + 1 : X)
+#endif
+
+extern char *index ();
+extern char *rindex ();
+
+void extract_interface_info ();
+void yyerror ();
+
+/* This obstack is needed to hold text. It is not safe to use
+ TOKEN_BUFFER because `check_newline' calls `yylex'. */
+struct obstack inline_text_obstack;
+static char *inline_text_firstobj;
+
+int end_of_file;
+
+/* Pending language change.
+ Positive is push count, negative is pop count. */
+int pending_lang_change = 0;
+
+/* Wrap the current header file in extern "C". */
+static int c_header_level = 0;
+
+extern int first_token;
+extern struct obstack token_obstack;
+
+/* ??? Don't really know where this goes yet. */
+#if 1
+#include "input.c"
+#else
+extern void put_back (/* int */);
+extern int input_redirected ();
+extern void feed_input (/* char *, int, struct obstack * */);
+#endif
+
+/* Holds translations from TREE_CODEs to operator name strings,
+ i.e., opname_tab[PLUS_EXPR] == "+". */
+char **opname_tab;
+char **assignop_tab;
+
+extern int yychar; /* the lookahead symbol */
+extern YYSTYPE yylval; /* the semantic value of the */
+ /* lookahead symbol */
+
+#if 0
+YYLTYPE yylloc; /* location data for the lookahead */
+ /* symbol */
+#endif
+
+
+/* the declaration found for the last IDENTIFIER token read in.
+ yylex must look this up to detect typedefs, which get token type TYPENAME,
+ so it is left around in case the identifier is not a typedef but is
+ used in a context which makes it a reference to a variable. */
+tree lastiddecl;
+
+/* The elements of `ridpointers' are identifier nodes
+ for the reserved type names and storage classes.
+ It is indexed by a RID_... value. */
+tree ridpointers[(int) RID_MAX];
+
+/* We may keep statistics about how long which files took to compile. */
+static int header_time, body_time;
+static tree get_time_identifier ();
+static tree filename_times;
+static tree this_filename_time;
+
+/* For implementing #pragma unit. */
+tree current_unit_name;
+tree current_unit_language;
+
+/* Array for holding counts of the numbers of tokens seen. */
+extern int *token_count;
+
+/* Textual definition used for default functions. */
+static void default_copy_constructor_body ();
+static void default_assign_ref_body ();
+
+/* Return something to represent absolute declarators containing a *.
+ TARGET is the absolute declarator that the * contains.
+ TYPE_QUALS is a list of modifiers such as const or volatile
+ to apply to the pointer type, represented as identifiers.
+
+ We return an INDIRECT_REF whose "contents" are TARGET
+ and whose type is the modifier list. */
+
+tree
+make_pointer_declarator (type_quals, target)
+ tree type_quals, target;
+{
+ if (target && TREE_CODE (target) == IDENTIFIER_NODE
+ && ANON_AGGRNAME_P (target))
+ error ("type name expected before `*'");
+ target = build_parse_node (INDIRECT_REF, target);
+ TREE_TYPE (target) = type_quals;
+ return target;
+}
+
+/* Return something to represent absolute declarators containing a &.
+ TARGET is the absolute declarator that the & contains.
+ TYPE_QUALS is a list of modifiers such as const or volatile
+ to apply to the reference type, represented as identifiers.
+
+ We return an ADDR_EXPR whose "contents" are TARGET
+ and whose type is the modifier list. */
+
+tree
+make_reference_declarator (type_quals, target)
+ tree type_quals, target;
+{
+ if (target)
+ {
+ if (TREE_CODE (target) == ADDR_EXPR)
+ {
+ error ("cannot declare references to references");
+ return target;
+ }
+ if (TREE_CODE (target) == INDIRECT_REF)
+ {
+ error ("cannot declare pointers to references");
+ return target;
+ }
+ if (TREE_CODE (target) == IDENTIFIER_NODE && ANON_AGGRNAME_P (target))
+ error ("type name expected before `&'");
+ }
+ target = build_parse_node (ADDR_EXPR, target);
+ TREE_TYPE (target) = type_quals;
+ return target;
+}
+
+/* Build names and nodes for overloaded operators. */
+
+tree ansi_opname[LAST_CPLUS_TREE_CODE];
+tree ansi_assopname[LAST_CPLUS_TREE_CODE];
+
+char *
+operator_name_string (name)
+ tree name;
+{
+ char *opname = IDENTIFIER_POINTER (name) + 2;
+ tree *opname_table;
+ int i, assign;
+
+ /* Works for builtin and user defined types. */
+ if (IDENTIFIER_GLOBAL_VALUE (name)
+ && TREE_CODE (IDENTIFIER_GLOBAL_VALUE (name)) == TYPE_DECL)
+ return IDENTIFIER_POINTER (name);
+
+ if (opname[0] == 'a' && opname[2] != '\0' && opname[2] != '_')
+ {
+ opname += 1;
+ assign = 1;
+ opname_table = ansi_assopname;
+ }
+ else
+ {
+ assign = 0;
+ opname_table = ansi_opname;
+ }
+
+ for (i = 0; i < (int) LAST_CPLUS_TREE_CODE; i++)
+ {
+ if (opname[0] == IDENTIFIER_POINTER (opname_table[i])[2+assign]
+ && opname[1] == IDENTIFIER_POINTER (opname_table[i])[3+assign])
+ break;
+ }
+
+ if (i == LAST_CPLUS_TREE_CODE)
+ return "<invalid operator>";
+
+ if (assign)
+ return assignop_tab[i];
+ else
+ return opname_tab[i];
+}
+
+int interface_only; /* whether or not current file is only for
+ interface definitions. */
+int interface_unknown; /* whether or not we know this class
+ to behave according to #pragma interface. */
+
+/* lexical analyzer */
+
+/* File used for outputting assembler code. */
+extern FILE *asm_out_file;
+
+#ifndef WCHAR_TYPE_SIZE
+#ifdef INT_TYPE_SIZE
+#define WCHAR_TYPE_SIZE INT_TYPE_SIZE
+#else
+#define WCHAR_TYPE_SIZE BITS_PER_WORD
+#endif
+#endif
+
+/* Number of bytes in a wide character. */
+#define WCHAR_BYTES (WCHAR_TYPE_SIZE / BITS_PER_UNIT)
+
+static int maxtoken; /* Current nominal length of token buffer. */
+char *token_buffer; /* Pointer to token buffer.
+ Actual allocated length is maxtoken + 2. */
+
+#include "hash.h"
+
+int check_newline ();
+
+/* Nonzero tells yylex to ignore \ in string constants. */
+static int ignore_escape_flag = 0;
+
+static int skip_white_space ();
+
+static tree
+get_time_identifier (name)
+ char *name;
+{
+ tree time_identifier;
+ int len = strlen (name);
+ char *buf = (char *) alloca (len + 6);
+ strcpy (buf, "file ");
+ bcopy (name, buf+5, len);
+ buf[len+5] = '\0';
+ time_identifier = get_identifier (buf);
+ if (IDENTIFIER_LOCAL_VALUE (time_identifier) == NULL_TREE)
+ {
+ push_obstacks_nochange ();
+ end_temporary_allocation ();
+ IDENTIFIER_LOCAL_VALUE (time_identifier) = build_int_2 (0, 0);
+ IDENTIFIER_CLASS_VALUE (time_identifier) = build_int_2 (0, 1);
+ IDENTIFIER_GLOBAL_VALUE (time_identifier) = filename_times;
+ filename_times = time_identifier;
+ pop_obstacks ();
+ }
+ return time_identifier;
+}
+
+#ifdef __GNUC__
+__inline
+#endif
+static int
+my_get_run_time ()
+{
+ int old_quiet_flag = quiet_flag;
+ int this_time;
+ quiet_flag = 0;
+ this_time = get_run_time ();
+ quiet_flag = old_quiet_flag;
+ return this_time;
+}
+
+/* Table indexed by tree code giving a string containing a character
+ classifying the tree code. Possibilities are
+ t, d, s, c, r, <, 1 and 2. See cp/tree.def for details. */
+
+#define DEFTREECODE(SYM, NAME, TYPE, LENGTH) TYPE,
+
+char *cplus_tree_code_type[] = {
+ "x",
+#include "tree.def"
+};
+#undef DEFTREECODE
+
+/* Table indexed by tree code giving number of expression
+ operands beyond the fixed part of the node structure.
+ Not used for types or decls. */
+
+#define DEFTREECODE(SYM, NAME, TYPE, LENGTH) LENGTH,
+
+int cplus_tree_code_length[] = {
+ 0,
+#include "tree.def"
+};
+#undef DEFTREECODE
+
+/* Names of tree components.
+ Used for printing out the tree and error messages. */
+#define DEFTREECODE(SYM, NAME, TYPE, LEN) NAME,
+
+char *cplus_tree_code_name[] = {
+ "@@dummy",
+#include "tree.def"
+};
+#undef DEFTREECODE
+
+/* toplev.c needs to call these. */
+
+void
+lang_init ()
+{
+ /* the beginning of the file is a new line; check for # */
+ /* With luck, we discover the real source file's name from that
+ and put it in input_filename. */
+ put_back (check_newline ());
+
+ if (flag_cadillac)
+ cadillac_start ();
+ if (flag_gnu_xref) GNU_xref_begin (input_filename);
+}
+
+void
+lang_finish ()
+{
+ extern int errorcount, sorrycount;
+ if (flag_gnu_xref) GNU_xref_end (errorcount+sorrycount);
+}
+
+char *
+lang_identify ()
+{
+ return "cplusplus";
+}
+
+void
+init_filename_times ()
+{
+ this_filename_time = get_time_identifier ("<top level>");
+ if (flag_detailed_statistics)
+ {
+ header_time = 0;
+ body_time = my_get_run_time ();
+ TREE_INT_CST_LOW (IDENTIFIER_LOCAL_VALUE (this_filename_time)) = body_time;
+ }
+}
+
+/* Change by Bryan Boreham, Kewill, Thu Jul 27 09:46:05 1989.
+ Stuck this hack in to get the files open correctly; this is called
+ in place of init_lex if we are an unexec'd binary. */
+void
+reinit_lang_specific ()
+{
+ init_filename_times ();
+ reinit_search_statistics ();
+}
+
+void
+init_lex ()
+{
+ extern char *(*decl_printable_name) ();
+
+ int i;
+
+ /* Initialize the lookahead machinery. */
+ init_spew ();
+
+ /* Make identifier nodes long enough for the language-specific slots. */
+ set_identifier_size (sizeof (struct lang_identifier));
+ decl_printable_name = lang_printable_name;
+
+ init_cplus_expand ();
+
+ tree_code_type
+ = (char **) realloc (tree_code_type,
+ sizeof (char *) * LAST_CPLUS_TREE_CODE);
+ tree_code_length
+ = (int *) realloc (tree_code_length,
+ sizeof (int) * LAST_CPLUS_TREE_CODE);
+ tree_code_name
+ = (char **) realloc (tree_code_name,
+ sizeof (char *) * LAST_CPLUS_TREE_CODE);
+ bcopy ((char *)cplus_tree_code_type,
+ (char *)(tree_code_type + (int) LAST_AND_UNUSED_TREE_CODE),
+ (LAST_CPLUS_TREE_CODE - (int)LAST_AND_UNUSED_TREE_CODE) * sizeof (char *));
+ bcopy ((char *)cplus_tree_code_length,
+ (char *)(tree_code_length + (int) LAST_AND_UNUSED_TREE_CODE),
+ (LAST_CPLUS_TREE_CODE - (int)LAST_AND_UNUSED_TREE_CODE) * sizeof (int));
+ bcopy ((char *)cplus_tree_code_name,
+ (char *)(tree_code_name + (int) LAST_AND_UNUSED_TREE_CODE),
+ (LAST_CPLUS_TREE_CODE - (int)LAST_AND_UNUSED_TREE_CODE) * sizeof (char *));
+
+ opname_tab = (char **)oballoc ((int)LAST_CPLUS_TREE_CODE * sizeof (char *));
+ bzero ((char *)opname_tab, (int)LAST_CPLUS_TREE_CODE * sizeof (char *));
+ assignop_tab = (char **)oballoc ((int)LAST_CPLUS_TREE_CODE * sizeof (char *));
+ bzero ((char *)assignop_tab, (int)LAST_CPLUS_TREE_CODE * sizeof (char *));
+
+ ansi_opname[0] = get_identifier ("<invalid operator>");
+ for (i = 0; i < (int) LAST_CPLUS_TREE_CODE; i++)
+ {
+ ansi_opname[i] = ansi_opname[0];
+ ansi_assopname[i] = ansi_opname[0];
+ }
+
+ ansi_opname[(int) MULT_EXPR] = get_identifier ("__ml");
+ IDENTIFIER_OPNAME_P (ansi_opname[(int) MULT_EXPR]) = 1;
+ ansi_opname[(int) INDIRECT_REF] = ansi_opname[(int) MULT_EXPR];
+ ansi_assopname[(int) MULT_EXPR] = get_identifier ("__aml");
+ IDENTIFIER_OPNAME_P (ansi_assopname[(int) MULT_EXPR]) = 1;
+ ansi_assopname[(int) INDIRECT_REF] = ansi_assopname[(int) MULT_EXPR];
+ ansi_opname[(int) TRUNC_MOD_EXPR] = get_identifier ("__md");
+ IDENTIFIER_OPNAME_P (ansi_opname[(int) TRUNC_MOD_EXPR]) = 1;
+ ansi_assopname[(int) TRUNC_MOD_EXPR] = get_identifier ("__amd");
+ IDENTIFIER_OPNAME_P (ansi_assopname[(int) TRUNC_MOD_EXPR]) = 1;
+ ansi_opname[(int) CEIL_MOD_EXPR] = ansi_opname[(int) TRUNC_MOD_EXPR];
+ ansi_opname[(int) FLOOR_MOD_EXPR] = ansi_opname[(int) TRUNC_MOD_EXPR];
+ ansi_opname[(int) ROUND_MOD_EXPR] = ansi_opname[(int) TRUNC_MOD_EXPR];
+ ansi_opname[(int) MINUS_EXPR] = get_identifier ("__mi");
+ IDENTIFIER_OPNAME_P (ansi_opname[(int) MINUS_EXPR]) = 1;
+ ansi_opname[(int) NEGATE_EXPR] = ansi_opname[(int) MINUS_EXPR];
+ ansi_assopname[(int) MINUS_EXPR] = get_identifier ("__ami");
+ IDENTIFIER_OPNAME_P (ansi_assopname[(int) MINUS_EXPR]) = 1;
+ ansi_assopname[(int) NEGATE_EXPR] = ansi_assopname[(int) MINUS_EXPR];
+ ansi_opname[(int) RSHIFT_EXPR] = get_identifier ("__rs");
+ IDENTIFIER_OPNAME_P (ansi_opname[(int) RSHIFT_EXPR]) = 1;
+ ansi_assopname[(int) RSHIFT_EXPR] = get_identifier ("__ars");
+ IDENTIFIER_OPNAME_P (ansi_assopname[(int) RSHIFT_EXPR]) = 1;
+ ansi_opname[(int) NE_EXPR] = get_identifier ("__ne");
+ IDENTIFIER_OPNAME_P (ansi_opname[(int) NE_EXPR]) = 1;
+ ansi_opname[(int) GT_EXPR] = get_identifier ("__gt");
+ IDENTIFIER_OPNAME_P (ansi_opname[(int) GT_EXPR]) = 1;
+ ansi_opname[(int) GE_EXPR] = get_identifier ("__ge");
+ IDENTIFIER_OPNAME_P (ansi_opname[(int) GE_EXPR]) = 1;
+ ansi_opname[(int) BIT_IOR_EXPR] = get_identifier ("__or");
+ IDENTIFIER_OPNAME_P (ansi_opname[(int) BIT_IOR_EXPR]) = 1;
+ ansi_assopname[(int) BIT_IOR_EXPR] = get_identifier ("__aor");
+ IDENTIFIER_OPNAME_P (ansi_assopname[(int) BIT_IOR_EXPR]) = 1;
+ ansi_opname[(int) TRUTH_ANDIF_EXPR] = get_identifier ("__aa");
+ IDENTIFIER_OPNAME_P (ansi_opname[(int) TRUTH_ANDIF_EXPR]) = 1;
+ ansi_opname[(int) TRUTH_NOT_EXPR] = get_identifier ("__nt");
+ IDENTIFIER_OPNAME_P (ansi_opname[(int) TRUTH_NOT_EXPR]) = 1;
+ ansi_opname[(int) PREINCREMENT_EXPR] = get_identifier ("__pp");
+ IDENTIFIER_OPNAME_P (ansi_opname[(int) PREINCREMENT_EXPR]) = 1;
+ ansi_opname[(int) POSTINCREMENT_EXPR] = ansi_opname[(int) PREINCREMENT_EXPR];
+ ansi_opname[(int) MODIFY_EXPR] = get_identifier ("__as");
+ IDENTIFIER_OPNAME_P (ansi_opname[(int) MODIFY_EXPR]) = 1;
+ ansi_assopname[(int) NOP_EXPR] = ansi_opname[(int) MODIFY_EXPR];
+ ansi_opname[(int) COMPOUND_EXPR] = get_identifier ("__cm");
+ IDENTIFIER_OPNAME_P (ansi_opname[(int) COMPOUND_EXPR]) = 1;
+ ansi_opname[(int) EXACT_DIV_EXPR] = get_identifier ("__dv");
+ IDENTIFIER_OPNAME_P (ansi_opname[(int) EXACT_DIV_EXPR]) = 1;
+ ansi_assopname[(int) EXACT_DIV_EXPR] = get_identifier ("__adv");
+ IDENTIFIER_OPNAME_P (ansi_assopname[(int) EXACT_DIV_EXPR]) = 1;
+ ansi_opname[(int) TRUNC_DIV_EXPR] = ansi_opname[(int) EXACT_DIV_EXPR];
+ ansi_opname[(int) CEIL_DIV_EXPR] = ansi_opname[(int) EXACT_DIV_EXPR];
+ ansi_opname[(int) FLOOR_DIV_EXPR] = ansi_opname[(int) EXACT_DIV_EXPR];
+ ansi_opname[(int) ROUND_DIV_EXPR] = ansi_opname[(int) EXACT_DIV_EXPR];
+ ansi_opname[(int) PLUS_EXPR] = get_identifier ("__pl");
+ ansi_assopname[(int) TRUNC_DIV_EXPR] = ansi_assopname[(int) EXACT_DIV_EXPR];
+ ansi_assopname[(int) CEIL_DIV_EXPR] = ansi_assopname[(int) EXACT_DIV_EXPR];
+ ansi_assopname[(int) FLOOR_DIV_EXPR] = ansi_assopname[(int) EXACT_DIV_EXPR];
+ ansi_assopname[(int) ROUND_DIV_EXPR] = ansi_assopname[(int) EXACT_DIV_EXPR];
+ IDENTIFIER_OPNAME_P (ansi_opname[(int) PLUS_EXPR]) = 1;
+ ansi_assopname[(int) PLUS_EXPR] = get_identifier ("__apl");
+ IDENTIFIER_OPNAME_P (ansi_assopname[(int) PLUS_EXPR]) = 1;
+ ansi_opname[(int) CONVERT_EXPR] = ansi_opname[(int) PLUS_EXPR];
+ ansi_assopname[(int) CONVERT_EXPR] = ansi_assopname[(int) PLUS_EXPR];
+ ansi_opname[(int) LSHIFT_EXPR] = get_identifier ("__ls");
+ IDENTIFIER_OPNAME_P (ansi_opname[(int) LSHIFT_EXPR]) = 1;
+ ansi_assopname[(int) LSHIFT_EXPR] = get_identifier ("__als");
+ IDENTIFIER_OPNAME_P (ansi_assopname[(int) LSHIFT_EXPR]) = 1;
+ ansi_opname[(int) EQ_EXPR] = get_identifier ("__eq");
+ IDENTIFIER_OPNAME_P (ansi_opname[(int) EQ_EXPR]) = 1;
+ ansi_opname[(int) LT_EXPR] = get_identifier ("__lt");
+ IDENTIFIER_OPNAME_P (ansi_opname[(int) LT_EXPR]) = 1;
+ ansi_opname[(int) LE_EXPR] = get_identifier ("__le");
+ IDENTIFIER_OPNAME_P (ansi_opname[(int) LE_EXPR]) = 1;
+ ansi_opname[(int) BIT_AND_EXPR] = get_identifier ("__ad");
+ IDENTIFIER_OPNAME_P (ansi_opname[(int) BIT_AND_EXPR]) = 1;
+ ansi_assopname[(int) BIT_AND_EXPR] = get_identifier ("__aad");
+ IDENTIFIER_OPNAME_P (ansi_assopname[(int) BIT_AND_EXPR]) = 1;
+ ansi_opname[(int) ADDR_EXPR] = ansi_opname[(int) BIT_AND_EXPR];
+ ansi_assopname[(int) ADDR_EXPR] = ansi_assopname[(int) BIT_AND_EXPR];
+ ansi_opname[(int) BIT_XOR_EXPR] = get_identifier ("__er");
+ IDENTIFIER_OPNAME_P (ansi_opname[(int) BIT_XOR_EXPR]) = 1;
+ ansi_assopname[(int) BIT_XOR_EXPR] = get_identifier ("__aer");
+ IDENTIFIER_OPNAME_P (ansi_assopname[(int) BIT_XOR_EXPR]) = 1;
+ ansi_opname[(int) TRUTH_ORIF_EXPR] = get_identifier ("__oo");
+ IDENTIFIER_OPNAME_P (ansi_opname[(int) TRUTH_ORIF_EXPR]) = 1;
+ ansi_opname[(int) BIT_NOT_EXPR] = get_identifier ("__co");
+ IDENTIFIER_OPNAME_P (ansi_opname[(int) BIT_NOT_EXPR]) = 1;
+ ansi_opname[(int) PREDECREMENT_EXPR] = get_identifier ("__mm");
+ IDENTIFIER_OPNAME_P (ansi_opname[(int) PREDECREMENT_EXPR]) = 1;
+ ansi_opname[(int) POSTDECREMENT_EXPR] = ansi_opname[(int) PREDECREMENT_EXPR];
+ ansi_opname[(int) COMPONENT_REF] = get_identifier ("__rf");
+ IDENTIFIER_OPNAME_P (ansi_opname[(int) COMPONENT_REF]) = 1;
+ ansi_opname[(int) MEMBER_REF] = get_identifier ("__rm");
+ IDENTIFIER_OPNAME_P (ansi_opname[(int) MEMBER_REF]) = 1;
+ ansi_opname[(int) CALL_EXPR] = get_identifier ("__cl");
+ IDENTIFIER_OPNAME_P (ansi_opname[(int) CALL_EXPR]) = 1;
+ ansi_opname[(int) ARRAY_REF] = get_identifier ("__vc");
+ IDENTIFIER_OPNAME_P (ansi_opname[(int) ARRAY_REF]) = 1;
+ ansi_opname[(int) NEW_EXPR] = get_identifier ("__nw");
+ IDENTIFIER_OPNAME_P (ansi_opname[(int) NEW_EXPR]) = 1;
+ ansi_opname[(int) DELETE_EXPR] = get_identifier ("__dl");
+ IDENTIFIER_OPNAME_P (ansi_opname[(int) DELETE_EXPR]) = 1;
+ ansi_opname[(int) VEC_NEW_EXPR] = get_identifier ("__vn");
+ IDENTIFIER_OPNAME_P (ansi_opname[(int) VEC_NEW_EXPR]) = 1;
+ ansi_opname[(int) VEC_DELETE_EXPR] = get_identifier ("__vd");
+ IDENTIFIER_OPNAME_P (ansi_opname[(int) VEC_DELETE_EXPR]) = 1;
+ ansi_opname[(int) TYPE_EXPR] = get_identifier ("__op");
+ IDENTIFIER_OPNAME_P (ansi_opname[(int) TYPE_EXPR]) = 1;
+
+ /* This is not true: these operators are not defined in ANSI,
+ but we need them anyway. */
+ ansi_opname[(int) MIN_EXPR] = get_identifier ("__mn");
+ IDENTIFIER_OPNAME_P (ansi_opname[(int) MIN_EXPR]) = 1;
+ ansi_opname[(int) MAX_EXPR] = get_identifier ("__mx");
+ IDENTIFIER_OPNAME_P (ansi_opname[(int) MAX_EXPR]) = 1;
+ ansi_opname[(int) COND_EXPR] = get_identifier ("__cn");
+ IDENTIFIER_OPNAME_P (ansi_opname[(int) COND_EXPR]) = 1;
+ ansi_opname[(int) METHOD_CALL_EXPR] = get_identifier ("__wr");
+ IDENTIFIER_OPNAME_P (ansi_opname[(int) METHOD_CALL_EXPR]) = 1;
+
+ init_method ();
+ init_error ();
+ gcc_obstack_init (&inline_text_obstack);
+ inline_text_firstobj = (char *) obstack_alloc (&inline_text_obstack, 0);
+
+ /* Start it at 0, because check_newline is called at the very beginning
+ and will increment it to 1. */
+ lineno = 0;
+ input_filename = "<internal>";
+ current_function_decl = NULL;
+
+ maxtoken = 40;
+ token_buffer = (char *) xmalloc (maxtoken + 2);
+
+ ridpointers[(int) RID_INT] = get_identifier ("int");
+ SET_IDENTIFIER_AS_LIST (ridpointers[(int) RID_INT],
+ build_tree_list (NULL_TREE, ridpointers[(int) RID_INT]));
+ ridpointers[(int) RID_BOOL] = get_identifier ("bool");
+ SET_IDENTIFIER_AS_LIST (ridpointers[(int) RID_BOOL],
+ build_tree_list (NULL_TREE, ridpointers[(int) RID_BOOL]));
+ ridpointers[(int) RID_CHAR] = get_identifier ("char");
+ SET_IDENTIFIER_AS_LIST (ridpointers[(int) RID_CHAR],
+ build_tree_list (NULL_TREE, ridpointers[(int) RID_CHAR]));
+ ridpointers[(int) RID_VOID] = get_identifier ("void");
+ SET_IDENTIFIER_AS_LIST (ridpointers[(int) RID_VOID],
+ build_tree_list (NULL_TREE, ridpointers[(int) RID_VOID]));
+ ridpointers[(int) RID_FLOAT] = get_identifier ("float");
+ SET_IDENTIFIER_AS_LIST (ridpointers[(int) RID_FLOAT],
+ build_tree_list (NULL_TREE, ridpointers[(int) RID_FLOAT]));
+ ridpointers[(int) RID_DOUBLE] = get_identifier ("double");
+ SET_IDENTIFIER_AS_LIST (ridpointers[(int) RID_DOUBLE],
+ build_tree_list (NULL_TREE, ridpointers[(int) RID_DOUBLE]));
+ ridpointers[(int) RID_SHORT] = get_identifier ("short");
+ SET_IDENTIFIER_AS_LIST (ridpointers[(int) RID_SHORT],
+ build_tree_list (NULL_TREE, ridpointers[(int) RID_SHORT]));
+ ridpointers[(int) RID_LONG] = get_identifier ("long");
+ SET_IDENTIFIER_AS_LIST (ridpointers[(int) RID_LONG],
+ build_tree_list (NULL_TREE, ridpointers[(int) RID_LONG]));
+ ridpointers[(int) RID_UNSIGNED] = get_identifier ("unsigned");
+ SET_IDENTIFIER_AS_LIST (ridpointers[(int) RID_UNSIGNED],
+ build_tree_list (NULL_TREE, ridpointers[(int) RID_UNSIGNED]));
+ ridpointers[(int) RID_SIGNED] = get_identifier ("signed");
+ SET_IDENTIFIER_AS_LIST (ridpointers[(int) RID_SIGNED],
+ build_tree_list (NULL_TREE, ridpointers[(int) RID_SIGNED]));
+ ridpointers[(int) RID_INLINE] = get_identifier ("inline");
+ SET_IDENTIFIER_AS_LIST (ridpointers[(int) RID_INLINE],
+ build_tree_list (NULL_TREE, ridpointers[(int) RID_INLINE]));
+ ridpointers[(int) RID_CONST] = get_identifier ("const");
+ SET_IDENTIFIER_AS_LIST (ridpointers[(int) RID_CONST],
+ build_tree_list (NULL_TREE, ridpointers[(int) RID_CONST]));
+ ridpointers[(int) RID_VOLATILE] = get_identifier ("volatile");
+ SET_IDENTIFIER_AS_LIST (ridpointers[(int) RID_VOLATILE],
+ build_tree_list (NULL_TREE, ridpointers[(int) RID_VOLATILE]));
+ ridpointers[(int) RID_AUTO] = get_identifier ("auto");
+ SET_IDENTIFIER_AS_LIST (ridpointers[(int) RID_AUTO],
+ build_tree_list (NULL_TREE, ridpointers[(int) RID_AUTO]));
+ ridpointers[(int) RID_STATIC] = get_identifier ("static");
+ SET_IDENTIFIER_AS_LIST (ridpointers[(int) RID_STATIC],
+ build_tree_list (NULL_TREE, ridpointers[(int) RID_STATIC]));
+ ridpointers[(int) RID_EXTERN] = get_identifier ("extern");
+ SET_IDENTIFIER_AS_LIST (ridpointers[(int) RID_EXTERN],
+ build_tree_list (NULL_TREE, ridpointers[(int) RID_EXTERN]));
+ ridpointers[(int) RID_TYPEDEF] = get_identifier ("typedef");
+ SET_IDENTIFIER_AS_LIST (ridpointers[(int) RID_TYPEDEF],
+ build_tree_list (NULL_TREE, ridpointers[(int) RID_TYPEDEF]));
+ ridpointers[(int) RID_REGISTER] = get_identifier ("register");
+ SET_IDENTIFIER_AS_LIST (ridpointers[(int) RID_REGISTER],
+ build_tree_list (NULL_TREE, ridpointers[(int) RID_REGISTER]));
+
+ /* C++ extensions. These are probably not correctly named. */
+ ridpointers[(int) RID_WCHAR] = get_identifier ("__wchar_t");
+ SET_IDENTIFIER_AS_LIST (ridpointers[(int) RID_WCHAR],
+ build_tree_list (NULL_TREE, ridpointers[(int) RID_WCHAR]));
+ class_type_node = build_int_2 (class_type, 0);
+ TREE_TYPE (class_type_node) = class_type_node;
+ ridpointers[(int) RID_CLASS] = class_type_node;
+
+ record_type_node = build_int_2 (record_type, 0);
+ TREE_TYPE (record_type_node) = record_type_node;
+ ridpointers[(int) RID_RECORD] = record_type_node;
+
+ union_type_node = build_int_2 (union_type, 0);
+ TREE_TYPE (union_type_node) = union_type_node;
+ ridpointers[(int) RID_UNION] = union_type_node;
+
+ enum_type_node = build_int_2 (enum_type, 0);
+ TREE_TYPE (enum_type_node) = enum_type_node;
+ ridpointers[(int) RID_ENUM] = enum_type_node;
+
+ ridpointers[(int) RID_VIRTUAL] = get_identifier ("virtual");
+ SET_IDENTIFIER_AS_LIST (ridpointers[(int) RID_VIRTUAL],
+ build_tree_list (NULL_TREE, ridpointers[(int) RID_VIRTUAL]));
+ ridpointers[(int) RID_FRIEND] = get_identifier ("friend");
+ SET_IDENTIFIER_AS_LIST (ridpointers[(int) RID_FRIEND],
+ build_tree_list (NULL_TREE, ridpointers[(int) RID_FRIEND]));
+
+ ridpointers[(int) RID_PUBLIC] = get_identifier ("public");
+ SET_IDENTIFIER_AS_LIST (ridpointers[(int) RID_PUBLIC],
+ build_tree_list (NULL_TREE, ridpointers[(int) RID_PUBLIC]));
+ ridpointers[(int) RID_PRIVATE] = get_identifier ("private");
+ SET_IDENTIFIER_AS_LIST (ridpointers[(int) RID_PRIVATE],
+ build_tree_list (NULL_TREE, ridpointers[(int) RID_PRIVATE]));
+ ridpointers[(int) RID_PROTECTED] = get_identifier ("protected");
+ SET_IDENTIFIER_AS_LIST (ridpointers[(int) RID_PROTECTED],
+ build_tree_list (NULL_TREE, ridpointers[(int) RID_PROTECTED]));
+ ridpointers[(int) RID_TEMPLATE] = get_identifier ("template");
+ SET_IDENTIFIER_AS_LIST (ridpointers[(int) RID_TEMPLATE],
+ build_tree_list (NULL_TREE, ridpointers[(int) RID_TEMPLATE]));
+ /* This is for ANSI C++. */
+ ridpointers[(int) RID_MUTABLE] = get_identifier ("mutable");
+ SET_IDENTIFIER_AS_LIST (ridpointers[(int) RID_MUTABLE],
+ build_tree_list (NULL_TREE, ridpointers[(int) RID_MUTABLE]));
+
+ /* Exception handling extensions. */
+ exception_type_node = build_int_2 (exception_type, 0);
+ TREE_TYPE (exception_type_node) = exception_type_node;
+ ridpointers[(int) RID_EXCEPTION] = exception_type_node;
+
+ /* Signature handling extensions. */
+ signature_type_node = build_int_2 (signature_type, 0);
+ TREE_TYPE (signature_type_node) = signature_type_node;
+ ridpointers[(int) RID_SIGNATURE] = signature_type_node;
+
+ opname_tab[(int) COMPONENT_REF] = "->";
+ opname_tab[(int) MEMBER_REF] = "->*";
+ opname_tab[(int) METHOD_CALL_EXPR] = "->()";
+ opname_tab[(int) INDIRECT_REF] = "(unary *)";
+ opname_tab[(int) ARRAY_REF] = "[]";
+ opname_tab[(int) MODIFY_EXPR] = "=";
+ opname_tab[(int) NEW_EXPR] = "new";
+ opname_tab[(int) DELETE_EXPR] = "delete";
+ opname_tab[(int) VEC_NEW_EXPR] = "new []";
+ opname_tab[(int) VEC_DELETE_EXPR] = "delete []";
+ opname_tab[(int) COND_EXPR] = "... ? ... : ...";
+ opname_tab[(int) CALL_EXPR] = "()";
+ opname_tab[(int) PLUS_EXPR] = "+";
+ opname_tab[(int) MINUS_EXPR] = "-";
+ opname_tab[(int) MULT_EXPR] = "*";
+ opname_tab[(int) TRUNC_DIV_EXPR] = "/";
+ opname_tab[(int) CEIL_DIV_EXPR] = "(ceiling /)";
+ opname_tab[(int) FLOOR_DIV_EXPR] = "(floor /)";
+ opname_tab[(int) ROUND_DIV_EXPR] = "(round /)";
+ opname_tab[(int) TRUNC_MOD_EXPR] = "%";
+ opname_tab[(int) CEIL_MOD_EXPR] = "(ceiling %)";
+ opname_tab[(int) FLOOR_MOD_EXPR] = "(floor %)";
+ opname_tab[(int) ROUND_MOD_EXPR] = "(round %)";
+ opname_tab[(int) NEGATE_EXPR] = "-";
+ opname_tab[(int) MIN_EXPR] = "<?";
+ opname_tab[(int) MAX_EXPR] = ">?";
+ opname_tab[(int) ABS_EXPR] = "abs";
+ opname_tab[(int) FFS_EXPR] = "ffs";
+ opname_tab[(int) LSHIFT_EXPR] = "<<";
+ opname_tab[(int) RSHIFT_EXPR] = ">>";
+ opname_tab[(int) BIT_IOR_EXPR] = "|";
+ opname_tab[(int) BIT_XOR_EXPR] = "^";
+ opname_tab[(int) BIT_AND_EXPR] = "&";
+ opname_tab[(int) BIT_ANDTC_EXPR] = "&~";
+ opname_tab[(int) BIT_NOT_EXPR] = "~";
+ opname_tab[(int) TRUTH_ANDIF_EXPR] = "&&";
+ opname_tab[(int) TRUTH_ORIF_EXPR] = "||";
+ opname_tab[(int) TRUTH_AND_EXPR] = "strict &&";
+ opname_tab[(int) TRUTH_OR_EXPR] = "strict ||";
+ opname_tab[(int) TRUTH_NOT_EXPR] = "!";
+ opname_tab[(int) LT_EXPR] = "<";
+ opname_tab[(int) LE_EXPR] = "<=";
+ opname_tab[(int) GT_EXPR] = ">";
+ opname_tab[(int) GE_EXPR] = ">=";
+ opname_tab[(int) EQ_EXPR] = "==";
+ opname_tab[(int) NE_EXPR] = "!=";
+ opname_tab[(int) IN_EXPR] = "in";
+ opname_tab[(int) RANGE_EXPR] = "..";
+ opname_tab[(int) CONVERT_EXPR] = "(unary +)";
+ opname_tab[(int) ADDR_EXPR] = "(unary &)";
+ opname_tab[(int) PREDECREMENT_EXPR] = "--";
+ opname_tab[(int) PREINCREMENT_EXPR] = "++";
+ opname_tab[(int) POSTDECREMENT_EXPR] = "--";
+ opname_tab[(int) POSTINCREMENT_EXPR] = "++";
+ opname_tab[(int) COMPOUND_EXPR] = ",";
+
+ assignop_tab[(int) NOP_EXPR] = "=";
+ assignop_tab[(int) PLUS_EXPR] = "+=";
+ assignop_tab[(int) CONVERT_EXPR] = "+=";
+ assignop_tab[(int) MINUS_EXPR] = "-=";
+ assignop_tab[(int) NEGATE_EXPR] = "-=";
+ assignop_tab[(int) MULT_EXPR] = "*=";
+ assignop_tab[(int) INDIRECT_REF] = "*=";
+ assignop_tab[(int) TRUNC_DIV_EXPR] = "/=";
+ assignop_tab[(int) EXACT_DIV_EXPR] = "(exact /=)";
+ assignop_tab[(int) CEIL_DIV_EXPR] = "(ceiling /=)";
+ assignop_tab[(int) FLOOR_DIV_EXPR] = "(floor /=)";
+ assignop_tab[(int) ROUND_DIV_EXPR] = "(round /=)";
+ assignop_tab[(int) TRUNC_MOD_EXPR] = "%=";
+ assignop_tab[(int) CEIL_MOD_EXPR] = "(ceiling %=)";
+ assignop_tab[(int) FLOOR_MOD_EXPR] = "(floor %=)";
+ assignop_tab[(int) ROUND_MOD_EXPR] = "(round %=)";
+ assignop_tab[(int) MIN_EXPR] = "<?=";
+ assignop_tab[(int) MAX_EXPR] = ">?=";
+ assignop_tab[(int) LSHIFT_EXPR] = "<<=";
+ assignop_tab[(int) RSHIFT_EXPR] = ">>=";
+ assignop_tab[(int) BIT_IOR_EXPR] = "|=";
+ assignop_tab[(int) BIT_XOR_EXPR] = "^=";
+ assignop_tab[(int) BIT_AND_EXPR] = "&=";
+ assignop_tab[(int) ADDR_EXPR] = "&=";
+
+ init_filename_times ();
+
+ /* Some options inhibit certain reserved words.
+ Clear those words out of the hash table so they won't be recognized. */
+#define UNSET_RESERVED_WORD(STRING) \
+ do { struct resword *s = is_reserved_word (STRING, sizeof (STRING) - 1); \
+ if (s) s->name = ""; } while (0)
+
+#if 0
+ /* let's parse things, and if they use it, then give them an error. */
+ if (!flag_handle_exceptions)
+ {
+ UNSET_RESERVED_WORD ("throw");
+ UNSET_RESERVED_WORD ("try");
+ UNSET_RESERVED_WORD ("catch");
+ }
+#endif
+
+ if (! (flag_gc || flag_dossier))
+ {
+ UNSET_RESERVED_WORD ("classof");
+ UNSET_RESERVED_WORD ("headof");
+ }
+ if (! flag_handle_signatures)
+ {
+ /* Easiest way to not recognize signature
+ handling extensions... */
+ UNSET_RESERVED_WORD ("signature");
+ UNSET_RESERVED_WORD ("sigof");
+ }
+ if (flag_no_asm)
+ UNSET_RESERVED_WORD ("asm");
+ if (flag_no_asm || flag_traditional)
+ UNSET_RESERVED_WORD ("typeof");
+
+ token_count = init_parse ();
+ interface_unknown = 1;
+}
+
+void
+reinit_parse_for_function ()
+{
+ current_base_init_list = NULL_TREE;
+ current_member_init_list = NULL_TREE;
+}
+
+#ifdef __GNUC__
+__inline
+#endif
+void
+yyprint (file, yychar, yylval)
+ FILE *file;
+ int yychar;
+ YYSTYPE yylval;
+{
+ tree t;
+ switch (yychar)
+ {
+ case IDENTIFIER:
+ case TYPENAME:
+ case TYPESPEC:
+ case PTYPENAME:
+ case IDENTIFIER_DEFN:
+ case TYPENAME_DEFN:
+ case PTYPENAME_DEFN:
+ case TYPENAME_ELLIPSIS:
+ case SCSPEC:
+ case PRE_PARSED_CLASS_DECL:
+ t = yylval.ttype;
+ my_friendly_assert (TREE_CODE (t) == IDENTIFIER_NODE, 224);
+ if (IDENTIFIER_POINTER (t))
+ fprintf (file, " `%s'", IDENTIFIER_POINTER (t));
+ break;
+ case AGGR:
+ if (yylval.ttype == class_type_node)
+ fprintf (file, " `class'");
+ else if (yylval.ttype == record_type_node)
+ fprintf (file, " `struct'");
+ else if (yylval.ttype == union_type_node)
+ fprintf (file, " `union'");
+ else if (yylval.ttype == enum_type_node)
+ fprintf (file, " `enum'");
+ else if (yylval.ttype == signature_type_node)
+ fprintf (file, " `signature'");
+ else
+ my_friendly_abort (80);
+ break;
+ }
+}
+
+static int *reduce_count;
+int *token_count;
+
+#define REDUCE_LENGTH (sizeof (yyr2) / sizeof (yyr2[0]))
+#define TOKEN_LENGTH (256 + sizeof (yytname) / sizeof (yytname[0]))
+
+int *
+init_parse ()
+{
+#ifdef GATHER_STATISTICS
+ reduce_count = (int *)malloc (sizeof (int) * (REDUCE_LENGTH + 1));
+ bzero (reduce_count, sizeof (int) * (REDUCE_LENGTH + 1));
+ reduce_count += 1;
+ token_count = (int *)malloc (sizeof (int) * (TOKEN_LENGTH + 1));
+ bzero (token_count, sizeof (int) * (TOKEN_LENGTH + 1));
+ token_count += 1;
+#endif
+ return token_count;
+}
+
+#ifdef GATHER_STATISTICS
+void
+yyhook (yyn)
+ int yyn;
+{
+ reduce_count[yyn] += 1;
+}
+
+static int
+reduce_cmp (p, q)
+ int *p, *q;
+{
+ return reduce_count[*q] - reduce_count[*p];
+}
+
+static int
+token_cmp (p, q)
+ int *p, *q;
+{
+ return token_count[*q] - token_count[*p];
+}
+#endif
+
+void
+print_parse_statistics ()
+{
+#ifdef GATHER_STATISTICS
+#if YYDEBUG != 0
+ int i;
+ int maxlen = REDUCE_LENGTH;
+ unsigned *sorted;
+
+ if (reduce_count[-1] == 0)
+ return;
+
+ if (TOKEN_LENGTH > REDUCE_LENGTH)
+ maxlen = TOKEN_LENGTH;
+ sorted = (unsigned *) alloca (sizeof (int) * maxlen);
+
+ for (i = 0; i < TOKEN_LENGTH; i++)
+ sorted[i] = i;
+ qsort (sorted, TOKEN_LENGTH, sizeof (int), token_cmp);
+ for (i = 0; i < TOKEN_LENGTH; i++)
+ {
+ int index = sorted[i];
+ if (token_count[index] == 0)
+ break;
+ if (token_count[index] < token_count[-1])
+ break;
+ fprintf (stderr, "token %d, `%s', count = %d\n",
+ index, yytname[YYTRANSLATE (index)], token_count[index]);
+ }
+ fprintf (stderr, "\n");
+ for (i = 0; i < REDUCE_LENGTH; i++)
+ sorted[i] = i;
+ qsort (sorted, REDUCE_LENGTH, sizeof (int), reduce_cmp);
+ for (i = 0; i < REDUCE_LENGTH; i++)
+ {
+ int index = sorted[i];
+ if (reduce_count[index] == 0)
+ break;
+ if (reduce_count[index] < reduce_count[-1])
+ break;
+ fprintf (stderr, "rule %d, line %d, count = %d\n",
+ index, yyrline[index], reduce_count[index]);
+ }
+ fprintf (stderr, "\n");
+#endif
+#endif
+}
+
+/* Sets the value of the 'yydebug' variable to VALUE.
+ This is a function so we don't have to have YYDEBUG defined
+ in order to build the compiler. */
+void
+set_yydebug (value)
+ int value;
+{
+#if YYDEBUG != 0
+ extern int yydebug;
+ yydebug = value;
+#else
+ warning ("YYDEBUG not defined.");
+#endif
+}
+
+#ifdef SPEW_DEBUG
+const char *
+debug_yytranslate (value)
+ int value;
+{
+ return yytname[YYTRANSLATE (value)];
+}
+
+#endif
+
+/* Functions and data structures for #pragma interface.
+
+ `#pragma implementation' means that the main file being compiled
+ is considered to implement (provide) the classes that appear in
+ its main body. I.e., if this is file "foo.cc", and class `bar'
+ is defined in "foo.cc", then we say that "foo.cc implements bar".
+
+ All main input files "implement" themselves automagically.
+
+ `#pragma interface' means that unless this file (of the form "foo.h"
+ is not presently being included by file "foo.cc", the
+ CLASSTYPE_INTERFACE_ONLY bit gets set. The effect is that none
+ of the vtables nor any of the inline functions defined in foo.h
+ will ever be output.
+
+ There are cases when we want to link files such as "defs.h" and
+ "main.cc". In this case, we give "defs.h" a `#pragma interface',
+ and "main.cc" has `#pragma implementation "defs.h"'. */
+
+struct impl_files
+{
+ char *filename;
+ struct impl_files *next;
+};
+
+static struct impl_files *impl_file_chain;
+
+/* Helper function to load global variables with interface
+ information. */
+void
+extract_interface_info ()
+{
+ tree fileinfo = 0;
+
+ if (flag_alt_external_templates)
+ {
+ struct tinst_level *til = tinst_for_decl ();
+
+ if (til)
+ fileinfo = get_time_identifier (til->file);
+ }
+ if (!fileinfo)
+ fileinfo = get_time_identifier (input_filename);
+ fileinfo = IDENTIFIER_CLASS_VALUE (fileinfo);
+ interface_only = TREE_INT_CST_LOW (fileinfo);
+ if (!processing_template_defn || flag_external_templates)
+ interface_unknown = TREE_INT_CST_HIGH (fileinfo);
+}
+
+/* Return nonzero if S is not considered part of an
+ INTERFACE/IMPLEMENTATION pair. Otherwise, return 0. */
+static int
+interface_strcmp (s)
+ char *s;
+{
+ /* Set the interface/implementation bits for this scope. */
+ struct impl_files *ifiles;
+ char *s1;
+
+ for (ifiles = impl_file_chain; ifiles; ifiles = ifiles->next)
+ {
+ char *t1 = ifiles->filename;
+ s1 = s;
+
+ if (*s1 != *t1 || *s1 == 0)
+ continue;
+
+ while (*s1 == *t1 && *s1 != 0)
+ s1++, t1++;
+
+ /* A match. */
+ if (*s1 == *t1)
+ return 0;
+
+ /* Don't get faked out by xxx.yyy.cc vs xxx.zzz.cc. */
+ if (index (s1, '.') || index (t1, '.'))
+ continue;
+
+ if (*s1 == '\0' || s1[-1] != '.' || t1[-1] != '.')
+ continue;
+
+ /* A match. */
+ return 0;
+ }
+
+ /* No matches. */
+ return 1;
+}
+
+void
+set_typedecl_interface_info (prev, vars)
+ tree prev, vars;
+{
+ tree id = get_time_identifier (DECL_SOURCE_FILE (vars));
+ tree fileinfo = IDENTIFIER_CLASS_VALUE (id);
+ tree type = TREE_TYPE (vars);
+
+ CLASSTYPE_INTERFACE_ONLY (type) = TREE_INT_CST_LOW (fileinfo)
+ = interface_strcmp (FILE_NAME_NONDIRECTORY (DECL_SOURCE_FILE (vars)));
+}
+
+void
+set_vardecl_interface_info (prev, vars)
+ tree prev, vars;
+{
+ tree type = DECL_CONTEXT (vars);
+
+ if (CLASSTYPE_INTERFACE_KNOWN (type))
+ {
+ if (CLASSTYPE_INTERFACE_ONLY (type))
+ set_typedecl_interface_info (prev, TYPE_NAME (type));
+ else
+ CLASSTYPE_VTABLE_NEEDS_WRITING (type) = 1;
+ DECL_EXTERNAL (vars) = CLASSTYPE_INTERFACE_ONLY (type);
+ TREE_PUBLIC (vars) = 1;
+ }
+}
+
+/* Called from the top level: if there are any pending inlines to
+ do, set up to process them now. This function sets up the first function
+ to be parsed; after it has been, the rule for fndef in parse.y will
+ call process_next_inline to start working on the next one. */
+void
+do_pending_inlines ()
+{
+ struct pending_inline *prev = 0, *tail;
+ struct pending_inline *t;
+
+ /* Oops, we're still dealing with the last batch. */
+ if (yychar == PRE_PARSED_FUNCTION_DECL)
+ return;
+
+ /* Reverse the pending inline functions, since
+ they were cons'd instead of appended. */
+
+ for (t = pending_inlines; t; t = tail)
+ {
+ t->deja_vu = 1;
+ tail = t->next;
+ t->next = prev;
+ prev = t;
+ }
+ /* Reset to zero so that if the inline functions we are currently
+ processing define inline functions of their own, that is handled
+ correctly. ??? This hasn't been checked in a while. */
+ pending_inlines = 0;
+
+ /* Now start processing the first inline function. */
+ t = prev;
+ my_friendly_assert ((t->parm_vec == NULL_TREE) == (t->bindings == NULL_TREE),
+ 226);
+ if (t->parm_vec)
+ push_template_decls (t->parm_vec, t->bindings, 0);
+ if (t->len > 0)
+ {
+ feed_input (t->buf, t->len, t->can_free ? &inline_text_obstack : 0);
+ lineno = t->lineno;
+#if 0
+ if (input_filename != t->filename)
+ {
+ input_filename = t->filename;
+ /* Get interface/implementation back in sync. */
+ extract_interface_info ();
+ }
+#else
+ input_filename = t->filename;
+ interface_unknown = t->interface == 1;
+ interface_only = t->interface == 0;
+#endif
+ yychar = PRE_PARSED_FUNCTION_DECL;
+ }
+ /* Pass back a handle on the rest of the inline functions, so that they
+ can be processed later. */
+ yylval.ttype = build_tree_list ((tree) t, t->fndecl);
+#if 0
+ if (flag_default_inline && t->fndecl
+ /* If we're working from a template, don't change
+ the `inline' state. */
+ && t->parm_vec == NULL_TREE)
+ DECL_INLINE (t->fndecl) = 1;
+#endif
+ DECL_PENDING_INLINE_INFO (t->fndecl) = 0;
+}
+
+extern struct pending_input *to_be_restored;
+static int nextchar = -1;
+
+/* Called from the fndecl rule in the parser when the function just parsed
+ was declared using a PRE_PARSED_FUNCTION_DECL (i.e. came from
+ do_pending_inlines). */
+void
+process_next_inline (t)
+ tree t;
+{
+ struct pending_inline *i = (struct pending_inline *) TREE_PURPOSE (t);
+ my_friendly_assert ((i->parm_vec == NULL_TREE) == (i->bindings == NULL_TREE),
+ 227);
+ if (i->parm_vec)
+ pop_template_decls (i->parm_vec, i->bindings, 0);
+ i = i->next;
+ if (yychar == YYEMPTY)
+ yychar = yylex ();
+ if (yychar != END_OF_SAVED_INPUT)
+ {
+ error ("parse error at end of saved function text");
+ /* restore_pending_input will abort unless yychar is either
+ * END_OF_SAVED_INPUT or YYEMPTY; since we already know we're
+ * hosed, feed back YYEMPTY.
+ * We also need to discard nextchar, since that may have gotten
+ * set as well.
+ */
+ nextchar = -1;
+ }
+ yychar = YYEMPTY;
+ if (to_be_restored == 0)
+ my_friendly_abort (123);
+ restore_pending_input (to_be_restored);
+ to_be_restored = 0;
+ if (i && i->fndecl != NULL_TREE)
+ {
+ my_friendly_assert ((i->parm_vec == NULL_TREE) == (i->bindings == NULL_TREE),
+ 228);
+ if (i->parm_vec)
+ push_template_decls (i->parm_vec, i->bindings, 0);
+ feed_input (i->buf, i->len, i->can_free ? &inline_text_obstack : 0);
+ lineno = i->lineno;
+ input_filename = i->filename;
+ yychar = PRE_PARSED_FUNCTION_DECL;
+ yylval.ttype = build_tree_list ((tree) i, i->fndecl);
+#if 0
+ if (flag_default_inline
+ /* If we're working from a template, don't change
+ the `inline' state. */
+ && i->parm_vec == NULL_TREE)
+ DECL_INLINE (i->fndecl) = 1;
+#endif
+ DECL_PENDING_INLINE_INFO (i->fndecl) = 0;
+ }
+ if (i)
+ {
+ interface_unknown = i->interface == 1;
+ interface_only = i->interface == 0;
+ }
+ else
+ extract_interface_info ();
+}
+
+/* Since inline methods can refer to text which has not yet been seen,
+ we store the text of the method in a structure which is placed in the
+ DECL_PENDING_INLINE_INFO field of the FUNCTION_DECL.
+ After parsing the body of the class definition, the FUNCTION_DECL's are
+ scanned to see which ones have this field set. Those are then digested
+ one at a time.
+
+ This function's FUNCTION_DECL will have a bit set in its common so
+ that we know to watch out for it. */
+
+static void
+consume_string (this_obstack, matching_char)
+ register struct obstack *this_obstack;
+ int matching_char;
+{
+ register int c;
+ int starting_lineno = lineno;
+ do
+ {
+ c = getch ();
+ if (c == EOF)
+ {
+ int save_lineno = lineno;
+ lineno = starting_lineno;
+ if (matching_char == '"')
+ error ("end of file encountered inside string constant");
+ else
+ error ("end of file encountered inside character constant");
+ lineno = save_lineno;
+ return;
+ }
+ if (c == '\\')
+ {
+ obstack_1grow (this_obstack, c);
+ c = getch ();
+ obstack_1grow (this_obstack, c);
+
+ /* Make sure we continue the loop */
+ c = 0;
+ continue;
+ }
+ if (c == '\n')
+ {
+ if (pedantic)
+ pedwarn ("ANSI C++ forbids newline in string constant");
+ lineno++;
+ }
+ obstack_1grow (this_obstack, c);
+ }
+ while (c != matching_char);
+}
+
+static int nextyychar = YYEMPTY;
+static YYSTYPE nextyylval;
+
+struct pending_input {
+ int nextchar, yychar, nextyychar, eof;
+ YYSTYPE yylval, nextyylval;
+ struct obstack token_obstack;
+ int first_token;
+};
+
+struct pending_input *
+save_pending_input ()
+{
+ struct pending_input *p;
+ p = (struct pending_input *) xmalloc (sizeof (struct pending_input));
+ p->nextchar = nextchar;
+ p->yychar = yychar;
+ p->nextyychar = nextyychar;
+ p->yylval = yylval;
+ p->nextyylval = nextyylval;
+ p->eof = end_of_file;
+ yychar = nextyychar = YYEMPTY;
+ nextchar = -1;
+ p->first_token = first_token;
+ p->token_obstack = token_obstack;
+
+ first_token = 0;
+ gcc_obstack_init (&token_obstack);
+ end_of_file = 0;
+ return p;
+}
+
+void
+restore_pending_input (p)
+ struct pending_input *p;
+{
+ my_friendly_assert (nextchar == -1, 229);
+ nextchar = p->nextchar;
+ my_friendly_assert (yychar == YYEMPTY || yychar == END_OF_SAVED_INPUT, 230);
+ yychar = p->yychar;
+ my_friendly_assert (nextyychar == YYEMPTY, 231);
+ nextyychar = p->nextyychar;
+ yylval = p->yylval;
+ nextyylval = p->nextyylval;
+ first_token = p->first_token;
+ obstack_free (&token_obstack, (char *) 0);
+ token_obstack = p->token_obstack;
+ end_of_file = p->eof;
+ free (p);
+}
+
+/* Return next non-whitespace input character, which may come
+ from `finput', or from `nextchar'. */
+static int
+yynextch ()
+{
+ int c;
+
+ if (nextchar >= 0)
+ {
+ c = nextchar;
+ nextchar = -1;
+ }
+ else c = getch ();
+ return skip_white_space (c);
+}
+
+/* Unget character CH from the input stream.
+ If RESCAN is non-zero, then we want to `see' this
+ character as the next input token. */
+void
+yyungetc (ch, rescan)
+ int ch;
+ int rescan;
+{
+ /* Unget a character from the input stream. */
+ if (yychar == YYEMPTY || rescan == 0)
+ {
+ if (nextchar >= 0)
+ put_back (nextchar);
+ nextchar = ch;
+ }
+ else
+ {
+ my_friendly_assert (nextyychar == YYEMPTY, 232);
+ nextyychar = yychar;
+ nextyylval = yylval;
+ yychar = ch;
+ }
+}
+
+/* This function stores away the text for an inline function that should
+ be processed later. It decides how much later, and may need to move
+ the info between obstacks; therefore, the caller should not refer to
+ the T parameter after calling this function.
+
+ This function also stores the list of template-parameter bindings that
+ will be needed for expanding the template, if any. */
+
+static void
+store_pending_inline (decl, t)
+ tree decl;
+ struct pending_inline *t;
+{
+ extern int processing_template_defn;
+ int delay_to_eof = 0;
+ struct pending_inline **inlines;
+
+ t->fndecl = decl;
+ /* Default: compile right away, and no extra bindings are needed. */
+ t->parm_vec = t->bindings = 0;
+ if (processing_template_defn)
+ {
+ tree type = current_class_type;
+ /* Assumption: In this (possibly) nested class sequence, only
+ one name will have template parms. */
+ while (type && TREE_CODE_CLASS (TREE_CODE (type)) == 't')
+ {
+ tree decl = TYPE_NAME (type);
+ tree tmpl = IDENTIFIER_TEMPLATE (DECL_NAME (decl));
+ if (tmpl)
+ {
+ t->parm_vec = DECL_TEMPLATE_INFO (TREE_PURPOSE (tmpl))->parm_vec;
+ t->bindings = TREE_VALUE (tmpl);
+ }
+ type = DECL_CONTEXT (decl);
+ }
+ if (TREE_CODE (TREE_TYPE (decl)) == METHOD_TYPE
+ || TREE_CODE (TREE_TYPE (decl)) == FUNCTION_TYPE)
+ {
+ if (TREE_CODE (TREE_TYPE (decl)) == METHOD_TYPE)
+ my_friendly_assert (TYPE_MAX_VALUE (TREE_TYPE (decl)) == current_class_type,
+ 233);
+
+ /* Inline functions can be compiled immediately. Other functions
+ will be output separately, so if we're in interface-only mode,
+ punt them now, or output them now if we're doing implementations
+ and we know no overrides will exist. Otherwise, we delay until
+ end-of-file, to see if the definition is really required. */
+ if (DECL_INLINE (decl))
+ /* delay_to_eof == 0 */;
+ else if (current_class_type && !interface_unknown)
+ {
+ if (interface_only)
+ {
+#if 0
+ print_node_brief (stderr, "\ndiscarding text for ", decl, 0);
+#endif
+ if (t->can_free)
+ obstack_free (&inline_text_obstack, t->buf);
+ DECL_PENDING_INLINE_INFO (decl) = 0;
+ return;
+ }
+ }
+ /* Don't delay the processing of virtual functions. */
+ else if (DECL_VINDEX (decl) == NULL_TREE)
+ delay_to_eof = 1;
+ }
+ else
+ my_friendly_abort (58);
+ }
+
+ if (delay_to_eof)
+ {
+ extern struct pending_inline *pending_template_expansions;
+
+ if (t->can_free)
+ {
+ char *free_to = t->buf;
+ t->buf = (char *) obstack_copy (&permanent_obstack, t->buf,
+ t->len + 1);
+ t = (struct pending_inline *) obstack_copy (&permanent_obstack,
+ (char *)t, sizeof (*t));
+ obstack_free (&inline_text_obstack, free_to);
+ }
+ inlines = &pending_template_expansions;
+ t->can_free = 0;
+ }
+ else
+ {
+ inlines = &pending_inlines;
+ DECL_PENDING_INLINE_INFO (decl) = t;
+ }
+
+ /* Because we use obstacks, we must process these in precise order. */
+ t->next = *inlines;
+ *inlines = t;
+}
+
+void reinit_parse_for_block ();
+
+void
+reinit_parse_for_method (yychar, decl)
+ int yychar;
+ tree decl;
+{
+ int len;
+ int starting_lineno = lineno;
+ char *starting_filename = input_filename;
+
+ reinit_parse_for_block (yychar, &inline_text_obstack, 0);
+
+ len = obstack_object_size (&inline_text_obstack);
+ current_base_init_list = NULL_TREE;
+ current_member_init_list = NULL_TREE;
+ if (decl == void_type_node
+ || (current_class_type && TYPE_REDEFINED (current_class_type)))
+ {
+ /* Happens when we get two declarations of the same
+ function in the same scope. */
+ char *buf = obstack_finish (&inline_text_obstack);
+ obstack_free (&inline_text_obstack, buf);
+ return;
+ }
+ else
+ {
+ struct pending_inline *t;
+ char *buf = obstack_finish (&inline_text_obstack);
+
+ t = (struct pending_inline *) obstack_alloc (&inline_text_obstack,
+ sizeof (struct pending_inline));
+ t->lineno = starting_lineno;
+ t->filename = starting_filename;
+ t->token = YYEMPTY;
+ t->token_value = 0;
+ t->buf = buf;
+ t->len = len;
+ t->can_free = 1;
+ t->deja_vu = 0;
+ if (interface_unknown && processing_template_defn && flag_external_templates && ! DECL_IN_SYSTEM_HEADER (decl))
+ warn_if_unknown_interface ();
+ t->interface = (interface_unknown ? 1 : (interface_only ? 0 : 2));
+ store_pending_inline (decl, t);
+ }
+}
+
+/* Consume a block -- actually, a method or template definition beginning
+ with `:' or `{' -- and save it away on the specified obstack.
+
+ Argument IS_TEMPLATE indicates which set of error messages should be
+ output if something goes wrong. This should really be cleaned up somehow,
+ without loss of clarity. */
+void
+reinit_parse_for_block (yychar, obstackp, is_template)
+ int yychar;
+ struct obstack *obstackp;
+ int is_template;
+{
+ register int c = 0;
+ int blev = 1;
+ int starting_lineno = lineno;
+ char *starting_filename = input_filename;
+ int len;
+ int look_for_semicolon = 0;
+ int look_for_lbrac = 0;
+
+ if (yychar == '{')
+ obstack_1grow (obstackp, '{');
+ else if (yychar == '=')
+ look_for_semicolon = 1;
+ else if (yychar != ':' && (yychar != RETURN || is_template))
+ {
+ yyerror (is_template
+ ? "parse error in template specification"
+ : "parse error in method specification");
+ obstack_1grow (obstackp, '{');
+ }
+ else
+ {
+ obstack_1grow (obstackp, yychar);
+ look_for_lbrac = 1;
+ blev = 0;
+ }
+
+ if (nextchar != EOF)
+ {
+ c = nextchar;
+ nextchar = EOF;
+ }
+ else
+ c = getch ();
+
+ while (c != EOF)
+ {
+ int this_lineno = lineno;
+
+ c = skip_white_space (c);
+
+ /* Don't lose our cool if there are lots of comments. */
+ if (lineno == this_lineno + 1)
+ obstack_1grow (obstackp, '\n');
+ else if (lineno == this_lineno)
+ ;
+ else if (lineno - this_lineno < 10)
+ {
+ int i;
+ for (i = lineno - this_lineno; i > 0; i--)
+ obstack_1grow (obstackp, '\n');
+ }
+ else
+ {
+ char buf[16];
+ sprintf (buf, "\n# %d \"", lineno);
+ len = strlen (buf);
+ obstack_grow (obstackp, buf, len);
+
+ len = strlen (input_filename);
+ obstack_grow (obstackp, input_filename, len);
+ obstack_1grow (obstackp, '\"');
+ obstack_1grow (obstackp, '\n');
+ }
+
+ while (c > ' ') /* ASCII dependent... */
+ {
+ obstack_1grow (obstackp, c);
+ if (c == '{')
+ {
+ look_for_lbrac = 0;
+ blev++;
+ }
+ else if (c == '}')
+ {
+ blev--;
+ if (blev == 0 && !look_for_semicolon)
+ goto done;
+ }
+ else if (c == '\\')
+ {
+ /* Don't act on the next character...e.g, doing an escaped
+ double-quote. */
+ c = getch ();
+ if (c == EOF)
+ {
+ error_with_file_and_line (starting_filename,
+ starting_lineno,
+ "end of file read inside definition");
+ goto done;
+ }
+ obstack_1grow (obstackp, c);
+ }
+ else if (c == '\"')
+ consume_string (obstackp, c);
+ else if (c == '\'')
+ consume_string (obstackp, c);
+ else if (c == ';')
+ {
+ if (look_for_lbrac)
+ {
+ error (is_template
+ ? "template body missing"
+ : "function body for constructor missing");
+ obstack_1grow (obstackp, '{');
+ obstack_1grow (obstackp, '}');
+ len += 2;
+ goto done;
+ }
+ else if (look_for_semicolon && blev == 0)
+ goto done;
+ }
+ c = getch ();
+ }
+
+ if (c == EOF)
+ {
+ error_with_file_and_line (starting_filename,
+ starting_lineno,
+ "end of file read inside definition");
+ goto done;
+ }
+ else if (c != '\n')
+ {
+ obstack_1grow (obstackp, c);
+ c = getch ();
+ }
+ }
+ done:
+ obstack_1grow (obstackp, '\0');
+}
+
+/* Build a default function named NAME for type TYPE.
+ KIND says what to build.
+
+ When KIND == 0, build default destructor.
+ When KIND == 1, build virtual destructor.
+ When KIND == 2, build default constructor.
+ When KIND == 3, build default X(const X&) constructor.
+ When KIND == 4, build default X(X&) constructor.
+ When KIND == 5, build default operator = (const X&).
+ When KIND == 6, build default operator = (X&). */
+
+tree
+cons_up_default_function (type, name, fields, kind)
+ tree type, name, fields;
+ int kind;
+{
+ extern tree void_list_node;
+ char *func_buf = NULL;
+ int func_len = 0;
+ tree declspecs = NULL_TREE;
+ tree fn, args;
+ tree argtype;
+ int retref = 0;
+
+ name = constructor_name (name);
+ switch (kind)
+ {
+ /* Destructors. */
+ case 1:
+ declspecs = build_decl_list (NULL_TREE, ridpointers [(int) RID_VIRTUAL]);
+ /* Fall through... */
+ case 0:
+ name = build_parse_node (BIT_NOT_EXPR, name);
+ /* Fall through... */
+ case 2:
+ /* Default constructor. */
+ args = void_list_node;
+ {
+ if (declspecs)
+ declspecs = decl_tree_cons (NULL_TREE,
+ ridpointers [(int) RID_INLINE],
+ declspecs);
+ else
+ declspecs = build_decl_list (NULL_TREE, ridpointers [(int) RID_INLINE]);
+ }
+ break;
+
+ case 3:
+ type = build_type_variant (type, 1, 0);
+ /* Fall through... */
+ case 4:
+ /* According to ARM $12.8, the default copy ctor will be declared, but
+ not defined, unless it's needed. So we mark this as `inline'; that
+ way, if it's never used it won't be emitted. */
+ declspecs = build_decl_list (NULL_TREE, ridpointers [(int) RID_INLINE]);
+
+ argtype = build_reference_type (type);
+ args = tree_cons (NULL_TREE,
+ build_tree_list (hash_tree_chain (argtype, NULL_TREE),
+ get_identifier ("_ctor_arg")),
+ void_list_node);
+ default_copy_constructor_body (&func_buf, &func_len, type, fields);
+ break;
+
+ case 5:
+ type = build_type_variant (type, 1, 0);
+ /* Fall through... */
+ case 6:
+ retref = 1;
+ declspecs =
+ decl_tree_cons (NULL_TREE, name,
+ decl_tree_cons (NULL_TREE,
+ ridpointers [(int) RID_INLINE],
+ NULL_TREE));
+
+ name = ansi_opname [(int) MODIFY_EXPR];
+
+ argtype = build_reference_type (type);
+ args = tree_cons (NULL_TREE,
+ build_tree_list (hash_tree_chain (argtype, NULL_TREE),
+ get_identifier ("_ctor_arg")),
+ void_list_node);
+ default_assign_ref_body (&func_buf, &func_len, type, fields);
+ break;
+
+ default:
+ my_friendly_abort (59);
+ }
+
+ if (!func_buf)
+ {
+ func_len = 2;
+ func_buf = obstack_alloc (&inline_text_obstack, func_len);
+ strcpy (func_buf, "{}");
+ }
+
+ TREE_PARMLIST (args) = 1;
+
+ {
+ tree declarator = build_parse_node (CALL_EXPR, name, args, NULL_TREE);
+ if (retref)
+ declarator = build_parse_node (ADDR_EXPR, declarator);
+
+ fn = start_method (declspecs, declarator, NULL_TREE);
+ }
+
+ if (fn == void_type_node)
+ return fn;
+
+ current_base_init_list = NULL_TREE;
+ current_member_init_list = NULL_TREE;
+
+ {
+ struct pending_inline *t;
+
+ t = (struct pending_inline *) obstack_alloc (&inline_text_obstack,
+ sizeof (struct pending_inline));
+ t->lineno = lineno;
+
+#if 1
+ t->filename = input_filename;
+#else /* This breaks; why? */
+#define MGMSG "(synthetic code at) "
+ t->filename = obstack_alloc (&inline_text_obstack,
+ strlen (input_filename) + sizeof (MGMSG) + 1);
+ strcpy (t->filename, MGMSG);
+ strcat (t->filename, input_filename);
+#endif
+ t->token = YYEMPTY;
+ t->token_value = 0;
+ t->buf = func_buf;
+ t->len = func_len;
+ t->can_free = 1;
+ t->deja_vu = 0;
+ if (interface_unknown && processing_template_defn && flag_external_templates && ! DECL_IN_SYSTEM_HEADER (fn))
+ warn_if_unknown_interface ();
+ t->interface = (interface_unknown ? 1 : (interface_only ? 0 : 2));
+ store_pending_inline (fn, t);
+ if (interface_unknown)
+ TREE_PUBLIC (fn) = 0;
+ else
+ {
+ TREE_PUBLIC (fn) = 1;
+ DECL_EXTERNAL (fn) = interface_only;
+ }
+ }
+
+ finish_method (fn);
+
+#ifdef DEBUG_DEFAULT_FUNCTIONS
+ { char *fn_type = NULL;
+ tree t = name;
+ switch (kind)
+ {
+ case 0: fn_type = "default destructor"; break;
+ case 1: fn_type = "virtual destructor"; break;
+ case 2: fn_type = "default constructor"; break;
+ case 3: fn_type = "default X(const X&)"; break;
+ case 4: fn_type = "default X(X&)"; break;
+ }
+ if (fn_type)
+ {
+ if (TREE_CODE (name) == BIT_NOT_EXPR)
+ t = TREE_OPERAND (name, 0);
+ fprintf (stderr, "[[[[ %s for %s:\n%s]]]]\n", fn_type,
+ IDENTIFIER_POINTER (t), func_buf);
+ }
+ }
+#endif /* DEBUG_DEFAULT_FUNCTIONS */
+
+ DECL_CLASS_CONTEXT (fn) = TYPE_MAIN_VARIANT (type);
+
+ /* Show that this function was generated by the compiler. */
+ SET_DECL_ARTIFICIAL (fn);
+
+ return fn;
+}
+
+/* Used by default_copy_constructor_body. For the anonymous union
+ in TYPE, return the member that is at least as large as the rest
+ of the members, so we can copy it. */
+static tree
+largest_union_member (type)
+ tree type;
+{
+ tree f, type_size = TYPE_SIZE (type);
+
+ for (f = TYPE_FIELDS (type); f; f = TREE_CHAIN (f))
+ if (simple_cst_equal (DECL_SIZE (f), type_size))
+ return f;
+
+ /* We should always find one. */
+ my_friendly_abort (323);
+ return NULL_TREE;
+}
+
+/* Construct the body of a default assignment operator.
+ Mostly copied directly from default_copy_constructor_body. */
+static void
+default_assign_ref_body (bufp, lenp, type, fields)
+ char **bufp;
+ int *lenp;
+ tree type, fields;
+{
+ static struct obstack body;
+ static int inited = FALSE;
+ int n_bases = CLASSTYPE_N_BASECLASSES (type);
+ char *tbuf;
+ int tgot, tneed;
+
+ if (!inited)
+ {
+ obstack_init (&body);
+ inited = TRUE;
+ }
+ body.next_free = body.object_base;
+
+ obstack_1grow (&body, '{');
+
+ /* Small buffer for sprintf(). */
+
+ tgot = 100;
+ tbuf = (char *) alloca (tgot);
+
+ /* If we don't need a real op=, just do a bitwise copy. */
+ if (! TYPE_HAS_COMPLEX_ASSIGN_REF (type))
+ {
+ tbuf = "{__builtin_memcpy(this,&_ctor_arg,sizeof(_ctor_arg));return *this;}";
+ *lenp = strlen (tbuf);
+ *bufp = obstack_alloc (&inline_text_obstack, *lenp + 1);
+ strcpy (*bufp, tbuf);
+ return;
+ }
+
+ if (TREE_CODE (type) == UNION_TYPE)
+ {
+ if (fields)
+ {
+ tree main = fields;
+ char * s;
+ tree f;
+
+ for (f = TREE_CHAIN (fields); f; f = TREE_CHAIN (f))
+ if (tree_int_cst_lt (TYPE_SIZE (TREE_TYPE (main)),
+ TYPE_SIZE (TREE_TYPE (f))))
+ main = f;
+
+ s = IDENTIFIER_POINTER (DECL_NAME (main));
+
+ tneed = (2 * strlen (s)) + 28;
+ if (tgot < tneed)
+ {
+ tgot = tneed;
+ tbuf = (char *) alloca (tgot);
+ }
+
+ sprintf (tbuf, "{%s=_ctor_arg.%s;return *this;}", s, s);
+ }
+ else
+ tbuf = "{}";
+
+ *lenp = strlen (tbuf);
+ *bufp = obstack_alloc (&inline_text_obstack, *lenp + 1);
+ strcpy (*bufp, tbuf);
+ return;
+ }
+
+ /* Construct base classes...
+ FIXME: Does not deal with multiple inheritance and virtual bases
+ correctly. See g++.old-deja/g++.jason/opeq5.C for a testcase.
+ We need to do wacky things if everything between us and the virtual
+ base (by all paths) has a "complex" op=. */
+
+ if (n_bases)
+ {
+ tree bases = TYPE_BINFO_BASETYPES (type);
+ int i = 0;
+
+ for (i = 0; i < n_bases; i++)
+ {
+ tree binfo = TREE_VEC_ELT (bases, i);
+ tree btype, name;
+ char *s;
+
+ btype = BINFO_TYPE (binfo);
+ name = TYPE_NESTED_NAME (btype);
+ s = IDENTIFIER_POINTER (name);
+
+ tneed = (2 * strlen (s)) + 42;
+ if (tgot < tneed)
+ {
+ tgot = tneed;
+ tbuf = (char *) alloca (tgot);
+ }
+
+ sprintf (tbuf, "%s::operator=((%s%s ::%s&)_ctor_arg);", s,
+ TYPE_READONLY (type) ? "const " : "",
+ CLASSTYPE_DECLARED_CLASS (btype) ? "class" : "struct",
+ s);
+ obstack_grow (&body, tbuf, strlen (tbuf));
+ }
+ }
+
+ /* Construct fields. */
+
+ if (fields)
+ {
+ tree f;
+
+ for (f = fields; f; f = TREE_CHAIN (f))
+ {
+ if (TREE_CODE (f) == FIELD_DECL && ! DECL_VIRTUAL_P (f))
+ {
+ char *s;
+ tree x;
+ tree t = TREE_TYPE (f);
+
+ if (DECL_NAME (f))
+ x = f;
+ else if (t != NULL_TREE
+ && TREE_CODE (t) == UNION_TYPE
+ && ((TREE_CODE (TYPE_NAME (t)) == IDENTIFIER_NODE
+ && ANON_AGGRNAME_P (TYPE_NAME (t)))
+ || (TREE_CODE (TYPE_NAME (t)) == TYPE_DECL
+ && ANON_AGGRNAME_P (TYPE_IDENTIFIER (t))))
+ && TYPE_FIELDS (t) != NULL_TREE)
+ x = largest_union_member (t);
+ else
+ continue;
+
+ s = IDENTIFIER_POINTER (DECL_NAME (x));
+ tneed = (2 * strlen (s)) + 13;
+ if (tgot < tneed)
+ {
+ tgot = tneed;
+ tbuf = (char *) alloca (tgot);
+ }
+
+ sprintf (tbuf, "%s=_ctor_arg.%s;", s, s);
+ obstack_grow (&body, tbuf, strlen (tbuf));
+ }
+ }
+ }
+
+ obstack_grow (&body, "return *this;}", 15);
+
+ *lenp = obstack_object_size (&body) - 1;
+ *bufp = obstack_alloc (&inline_text_obstack, *lenp);
+
+ strcpy (*bufp, body.object_base);
+}
+
+/* Construct the body of a default copy constructor. */
+static void
+default_copy_constructor_body (bufp, lenp, type, fields)
+ char **bufp;
+ int *lenp;
+ tree type, fields;
+{
+ static struct obstack prologue;
+ static int inited = FALSE;
+ int n_bases = CLASSTYPE_N_BASECLASSES (type);
+ char sep = ':';
+ char *tbuf;
+ int tgot, tneed;
+
+ /* Create a buffer to call base class constructors and construct members
+ (fields). */
+
+ if (!inited)
+ {
+ obstack_init (&prologue);
+ inited = TRUE;
+ }
+ prologue.next_free = prologue.object_base;
+
+ /* If we don't need a real copy ctor, just do a bitwise copy. */
+ if (! TYPE_HAS_COMPLEX_INIT_REF (type))
+ {
+ tbuf = "{__builtin_memcpy(this,&_ctor_arg,sizeof(_ctor_arg));}";
+ *lenp = strlen (tbuf);
+ *bufp = obstack_alloc (&inline_text_obstack, *lenp + 1);
+ strcpy (*bufp, tbuf);
+ return;
+ }
+
+ /* Small buffer for sprintf(). */
+
+ tgot = 100;
+ tbuf = (char *) alloca (tgot);
+
+ if (TREE_CODE (type) == UNION_TYPE)
+ {
+ if (fields)
+ {
+ tree main = fields;
+ char * s;
+ tree f;
+
+ for (f = TREE_CHAIN (fields); f; f = TREE_CHAIN (f))
+ if (tree_int_cst_lt (TYPE_SIZE (TREE_TYPE (main)),
+ TYPE_SIZE (TREE_TYPE (f))))
+ main = f;
+
+ s = IDENTIFIER_POINTER (DECL_NAME (main));
+ tneed = (2 * strlen (s)) + 16;
+ if (tgot < tneed)
+ {
+ tgot = tneed;
+ tbuf = (char *) alloca (tgot);
+ }
+
+ sprintf (tbuf, ":%s(_ctor_arg.%s){}", s, s);
+ }
+ else
+ tbuf = "{}";
+
+ *lenp = strlen (tbuf);
+ *bufp = obstack_alloc (&inline_text_obstack, *lenp + 1);
+ strcpy (*bufp, tbuf);
+ return;
+ }
+
+ /* Construct base classes... */
+
+ if (n_bases)
+ {
+ /* Note that CLASSTYPE_VBASECLASSES isn't set yet... */
+ tree v = get_vbase_types (type);
+ tree bases = TYPE_BINFO_BASETYPES (type);
+ int i = 0;
+
+ for (;;)
+ {
+ tree binfo, btype, name;
+ char *s;
+
+ if (v)
+ {
+ binfo = v;
+ v = TREE_CHAIN (v);
+ }
+ else if (i < n_bases)
+ {
+ binfo = TREE_VEC_ELT (bases, i++);
+ if (TREE_VIA_VIRTUAL (binfo))
+ continue;
+ }
+ else
+ break;
+
+ btype = BINFO_TYPE (binfo);
+ name = TYPE_NESTED_NAME (btype);
+ s = IDENTIFIER_POINTER (name);
+
+ tneed = (2 * strlen (s)) + 39;
+ if (tgot < tneed)
+ {
+ tgot = tneed;
+ tbuf = (char *) alloca (tgot);
+ }
+
+ sprintf (tbuf, "%c%s((%s%s ::%s&)_ctor_arg)", sep, s,
+ TYPE_READONLY (type) ? "const " : "",
+ CLASSTYPE_DECLARED_CLASS (btype) ? "class" : "struct",
+ s);
+ sep = ',';
+ obstack_grow (&prologue, tbuf, strlen (tbuf));
+ }
+ }
+
+ /* Construct fields. */
+
+ if (fields)
+ {
+ tree f;
+
+ for (f = fields; f; f = TREE_CHAIN (f))
+ {
+ if (TREE_CODE (f) == FIELD_DECL && ! DECL_VIRTUAL_P (f))
+ {
+ char *s;
+ tree x;
+ tree t = TREE_TYPE (f);
+
+ if (DECL_NAME (f))
+ x = f;
+ else if (t != NULL_TREE
+ && TREE_CODE (t) == UNION_TYPE
+ && ((TREE_CODE (TYPE_NAME (t)) == IDENTIFIER_NODE
+ && ANON_AGGRNAME_P (TYPE_NAME (t)))
+ || (TREE_CODE (TYPE_NAME (t)) == TYPE_DECL
+ && ANON_AGGRNAME_P (TYPE_IDENTIFIER (t))))
+ && TYPE_FIELDS (t) != NULL_TREE)
+ x = largest_union_member (t);
+ else
+ continue;
+
+ s = IDENTIFIER_POINTER (DECL_NAME (x));
+ tneed = (2 * strlen (s)) + 30;
+ if (tgot < tneed)
+ {
+ tgot = tneed;
+ tbuf = (char *) alloca (tgot);
+ }
+
+ sprintf (tbuf, "%c%s(_ctor_arg.%s)", sep, s, s);
+ sep = ',';
+ obstack_grow (&prologue, tbuf, strlen (tbuf));
+ }
+ }
+ }
+
+ /* Concatenate constructor body to prologue. */
+
+ *lenp = obstack_object_size (&prologue) + 2;
+ *bufp = obstack_alloc (&inline_text_obstack, *lenp + 1);
+
+ obstack_1grow (&prologue, '\0');
+
+ strcpy (*bufp, prologue.object_base);
+ strcat (*bufp, "{}");
+}
+
+/* Heuristic to tell whether the user is missing a semicolon
+ after a struct or enum declaration. Emit an error message
+ if we know the user has blown it. */
+void
+check_for_missing_semicolon (type)
+ tree type;
+{
+ if (yychar < 0)
+ yychar = yylex ();
+
+ if (yychar > 255
+ && yychar != SCSPEC
+ && yychar != IDENTIFIER
+ && yychar != TYPENAME)
+ {
+ if (ANON_AGGRNAME_P (TYPE_IDENTIFIER (type)))
+ error ("semicolon missing after %s declaration",
+ TREE_CODE (type) == ENUMERAL_TYPE ? "enum" : "struct");
+ else
+ error ("semicolon missing after declaration of `%s'",
+ TYPE_NAME_STRING (type));
+ shadow_tag (build_tree_list (0, type));
+ }
+ /* Could probably also hack cases where class { ... } f (); appears. */
+ clear_anon_tags ();
+}
+
+void
+note_got_semicolon (type)
+ tree type;
+{
+ if (TREE_CODE_CLASS (TREE_CODE (type)) != 't')
+ my_friendly_abort (60);
+ if (IS_AGGR_TYPE (type))
+ CLASSTYPE_GOT_SEMICOLON (type) = 1;
+}
+
+void
+note_list_got_semicolon (declspecs)
+ tree declspecs;
+{
+ tree link;
+
+ for (link = declspecs; link; link = TREE_CHAIN (link))
+ {
+ tree type = TREE_VALUE (link);
+ if (TREE_CODE_CLASS (TREE_CODE (type)) == 't')
+ note_got_semicolon (type);
+ }
+ clear_anon_tags ();
+}
+
+/* If C is not whitespace, return C.
+ Otherwise skip whitespace and return first nonwhite char read. */
+
+static int
+skip_white_space (c)
+ register int c;
+{
+ for (;;)
+ {
+ switch (c)
+ {
+ case '\n':
+ c = check_newline ();
+ break;
+
+ case ' ':
+ case '\t':
+ case '\f':
+ case '\r':
+ case '\v':
+ case '\b':
+ do
+ c = getch ();
+ while (c == ' ' || c == '\t');
+ break;
+
+ case '\\':
+ c = getch ();
+ if (c == '\n')
+ lineno++;
+ else
+ error ("stray '\\' in program");
+ c = getch ();
+ break;
+
+ default:
+ return (c);
+ }
+ }
+}
+
+
+
+/* Make the token buffer longer, preserving the data in it.
+ P should point to just beyond the last valid character in the old buffer.
+ The value we return is a pointer to the new buffer
+ at a place corresponding to P. */
+
+static char *
+extend_token_buffer (p)
+ char *p;
+{
+ int offset = p - token_buffer;
+
+ maxtoken = maxtoken * 2 + 10;
+ token_buffer = (char *) xrealloc (token_buffer, maxtoken + 2);
+
+ return token_buffer + offset;
+}
+
+static int
+get_last_nonwhite_on_line ()
+{
+ register int c;
+
+ /* Is this the last nonwhite stuff on the line? */
+ if (nextchar >= 0)
+ c = nextchar, nextchar = -1;
+ else
+ c = getch ();
+
+ while (c == ' ' || c == '\t')
+ c = getch ();
+ return c;
+}
+
+/* At the beginning of a line, increment the line number
+ and process any #-directive on this line.
+ If the line is a #-directive, read the entire line and return a newline.
+ Otherwise, return the line's first non-whitespace character. */
+
+int
+check_newline ()
+{
+ register int c;
+ register int token;
+
+ /* Read first nonwhite char on the line. Do this before incrementing the
+ line number, in case we're at the end of saved text. */
+
+ do
+ c = getch ();
+ while (c == ' ' || c == '\t');
+
+ lineno++;
+
+ if (c != '#')
+ {
+ /* If not #, return it so caller will use it. */
+ return c;
+ }
+
+ /* Read first nonwhite char after the `#'. */
+
+ do
+ c = getch ();
+ while (c == ' ' || c == '\t');
+
+ /* If a letter follows, then if the word here is `line', skip
+ it and ignore it; otherwise, ignore the line, with an error
+ if the word isn't `pragma'. */
+
+ if ((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z'))
+ {
+ if (c == 'p')
+ {
+ if (getch () == 'r'
+ && getch () == 'a'
+ && getch () == 'g'
+ && getch () == 'm'
+ && getch () == 'a')
+ {
+ /* Read first nonwhite char after the `#pragma'. */
+
+ do
+ c = getch ();
+ while (c == ' ' || c == '\t');
+
+ if (c == 'v'
+ && getch () == 't'
+ && getch () == 'a'
+ && getch () == 'b'
+ && getch () == 'l'
+ && getch () == 'e'
+ && ((c = getch ()) == ' ' || c == '\t' || c == '\n'))
+ {
+ extern tree pending_vtables;
+
+ /* More follows: it must be a string constant (class name). */
+ token = real_yylex ();
+ if (token != STRING || TREE_CODE (yylval.ttype) != STRING_CST)
+ {
+ error ("invalid #pragma vtable");
+ goto skipline;
+ }
+ if (write_virtuals != 2)
+ {
+ warning ("use `+e2' option to enable #pragma vtable");
+ goto skipline;
+ }
+ pending_vtables = perm_tree_cons (NULL_TREE, get_identifier (TREE_STRING_POINTER (yylval.ttype)), pending_vtables);
+ if (nextchar < 0)
+ nextchar = getch ();
+ c = nextchar;
+ if (c != '\n')
+ warning ("trailing characters ignored");
+ }
+ else if (c == 'u'
+ && getch () == 'n'
+ && getch () == 'i'
+ && getch () == 't'
+ && ((c = getch ()) == ' ' || c == '\t' || c == '\n'))
+ {
+ /* More follows: it must be a string constant (unit name). */
+ token = real_yylex ();
+ if (token != STRING || TREE_CODE (yylval.ttype) != STRING_CST)
+ {
+ error ("invalid #pragma unit");
+ goto skipline;
+ }
+ current_unit_name = get_identifier (TREE_STRING_POINTER (yylval.ttype));
+ current_unit_language = current_lang_name;
+ if (nextchar < 0)
+ nextchar = getch ();
+ c = nextchar;
+ if (c != '\n')
+ warning ("trailing characters ignored");
+ }
+ else if (c == 'i')
+ {
+ tree fileinfo = IDENTIFIER_CLASS_VALUE (get_time_identifier (input_filename));
+ c = getch ();
+
+ if (c == 'n'
+ && getch () == 't'
+ && getch () == 'e'
+ && getch () == 'r'
+ && getch () == 'f'
+ && getch () == 'a'
+ && getch () == 'c'
+ && getch () == 'e'
+ && ((c = getch ()) == ' ' || c == '\t' || c == '\n'))
+ {
+ int warned_already = 0;
+ char *main_filename = input_filename;
+
+ main_filename = FILE_NAME_NONDIRECTORY (main_filename);
+ while (c == ' ' || c == '\t')
+ c = getch ();
+ if (c != '\n')
+ {
+ put_back (c);
+ token = real_yylex ();
+ if (token != STRING
+ || TREE_CODE (yylval.ttype) != STRING_CST)
+ {
+ error ("invalid `#pragma interface'");
+ goto skipline;
+ }
+ main_filename = TREE_STRING_POINTER (yylval.ttype);
+ c = getch();
+ put_back (c);
+ }
+
+ while (c == ' ' || c == '\t')
+ c = getch ();
+
+ while (c != '\n')
+ {
+ if (!warned_already && extra_warnings
+ && c != ' ' && c != '\t' && c != '\n')
+ {
+ warning ("garbage after `#pragma interface' ignored");
+ warned_already = 1;
+ }
+ c = getch ();
+ }
+
+ write_virtuals = 3;
+
+ if (impl_file_chain == 0)
+ {
+ /* If this is zero at this point, then we are
+ auto-implementing. */
+ if (main_input_filename == 0)
+ main_input_filename = input_filename;
+
+#ifdef AUTO_IMPLEMENT
+ filename = FILE_NAME_NONDIRECTORY (main_input_filename);
+ fi = get_time_identifier (filename);
+ fi = IDENTIFIER_CLASS_VALUE (fi);
+ TREE_INT_CST_LOW (fi) = 0;
+ TREE_INT_CST_HIGH (fi) = 1;
+ /* Get default. */
+ impl_file_chain = (struct impl_files *)permalloc (sizeof (struct impl_files));
+ impl_file_chain->filename = filename;
+ impl_file_chain->next = 0;
+#endif
+ }
+
+ interface_only = interface_strcmp (main_filename);
+ interface_unknown = 0;
+ TREE_INT_CST_LOW (fileinfo) = interface_only;
+ TREE_INT_CST_HIGH (fileinfo) = interface_unknown;
+ }
+ else if (c == 'm'
+ && getch () == 'p'
+ && getch () == 'l'
+ && getch () == 'e'
+ && getch () == 'm'
+ && getch () == 'e'
+ && getch () == 'n'
+ && getch () == 't'
+ && getch () == 'a'
+ && getch () == 't'
+ && getch () == 'i'
+ && getch () == 'o'
+ && getch () == 'n'
+ && ((c = getch ()) == ' ' || c == '\t' || c == '\n'))
+ {
+ int warned_already = 0;
+ char *main_filename = main_input_filename ? main_input_filename : input_filename;
+
+ main_filename = FILE_NAME_NONDIRECTORY (main_filename);
+ while (c == ' ' || c == '\t')
+ c = getch ();
+ if (c != '\n')
+ {
+ put_back (c);
+ token = real_yylex ();
+ if (token != STRING
+ || TREE_CODE (yylval.ttype) != STRING_CST)
+ {
+ error ("invalid `#pragma implementation'");
+ goto skipline;
+ }
+ main_filename = TREE_STRING_POINTER (yylval.ttype);
+ c = getch();
+ put_back (c);
+ }
+
+ while (c == ' ' || c == '\t')
+ c = getch ();
+
+ while (c != '\n')
+ {
+ if (!warned_already && extra_warnings
+ && c != ' ' && c != '\t' && c != '\n')
+ {
+ warning ("garbage after `#pragma implementation' ignored");
+ warned_already = 1;
+ }
+ c = getch ();
+ }
+
+ if (write_virtuals == 3)
+ {
+ struct impl_files *ifiles = impl_file_chain;
+ while (ifiles)
+ {
+ if (! strcmp (ifiles->filename, main_filename))
+ break;
+ ifiles = ifiles->next;
+ }
+ if (ifiles == 0)
+ {
+ ifiles = (struct impl_files*) permalloc (sizeof (struct impl_files));
+ ifiles->filename = main_filename;
+ ifiles->next = impl_file_chain;
+ impl_file_chain = ifiles;
+ }
+ }
+ else if ((main_input_filename != 0
+ && ! strcmp (main_input_filename, input_filename))
+ || ! strcmp (input_filename, main_filename))
+ {
+ write_virtuals = 3;
+ if (impl_file_chain == 0)
+ {
+ impl_file_chain = (struct impl_files*) permalloc (sizeof (struct impl_files));
+ impl_file_chain->filename = main_filename;
+ impl_file_chain->next = 0;
+ }
+ }
+ else
+ error ("`#pragma implementation' can only appear at top-level");
+ interface_only = 0;
+#if 1
+ /* We make this non-zero so that we infer decl linkage
+ in the impl file only for variables first declared
+ in the interface file. */
+ interface_unknown = 1;
+#else
+ /* We make this zero so that templates in the impl
+ file will be emitted properly. */
+ interface_unknown = 0;
+#endif
+ TREE_INT_CST_LOW (fileinfo) = interface_only;
+ TREE_INT_CST_HIGH (fileinfo) = interface_unknown;
+ }
+ }
+ }
+ goto skipline;
+ }
+ else if (c == 'd')
+ {
+ if (getch () == 'e'
+ && getch () == 'f'
+ && getch () == 'i'
+ && getch () == 'n'
+ && getch () == 'e'
+ && ((c = getch ()) == ' ' || c == '\t' || c == '\n'))
+ {
+#ifdef DWARF_DEBUGGING_INFO
+ if ((debug_info_level == DINFO_LEVEL_VERBOSE)
+ && (write_symbols == DWARF_DEBUG))
+ dwarfout_define (lineno, get_directive_line (finput));
+#endif /* DWARF_DEBUGGING_INFO */
+ goto skipline;
+ }
+ }
+ else if (c == 'u')
+ {
+ if (getch () == 'n'
+ && getch () == 'd'
+ && getch () == 'e'
+ && getch () == 'f'
+ && ((c = getch ()) == ' ' || c == '\t' || c == '\n'))
+ {
+#ifdef DWARF_DEBUGGING_INFO
+ if ((debug_info_level == DINFO_LEVEL_VERBOSE)
+ && (write_symbols == DWARF_DEBUG))
+ dwarfout_undef (lineno, get_directive_line (finput));
+#endif /* DWARF_DEBUGGING_INFO */
+ goto skipline;
+ }
+ }
+ else if (c == 'l')
+ {
+ if (getch () == 'i'
+ && getch () == 'n'
+ && getch () == 'e'
+ && ((c = getch ()) == ' ' || c == '\t'))
+ goto linenum;
+ }
+ else if (c == 'i')
+ {
+ if (getch () == 'd'
+ && getch () == 'e'
+ && getch () == 'n'
+ && getch () == 't'
+ && ((c = getch ()) == ' ' || c == '\t'))
+ {
+#ifdef ASM_OUTPUT_IDENT
+ extern FILE *asm_out_file;
+#endif
+ /* #ident. The pedantic warning is now in cccp.c. */
+
+ /* Here we have just seen `#ident '.
+ A string constant should follow. */
+
+ while (c == ' ' || c == '\t')
+ c = getch ();
+
+ /* If no argument, ignore the line. */
+ if (c == '\n')
+ return c;
+
+ put_back (c);
+ token = real_yylex ();
+ if (token != STRING
+ || TREE_CODE (yylval.ttype) != STRING_CST)
+ {
+ error ("invalid #ident");
+ goto skipline;
+ }
+
+ if (! flag_no_ident)
+ {
+#ifdef ASM_OUTPUT_IDENT
+ ASM_OUTPUT_IDENT (asm_out_file,
+ TREE_STRING_POINTER (yylval.ttype));
+#endif
+ }
+
+ /* Skip the rest of this line. */
+ goto skipline;
+ }
+ }
+ else if (c == 'n')
+ {
+ if (getch () == 'e'
+ && getch () == 'w'
+ && getch () == 'w'
+ && getch () == 'o'
+ && getch () == 'r'
+ && getch () == 'l'
+ && getch () == 'd'
+ && ((c = getch ()) == ' ' || c == '\t'))
+ {
+ /* Used to test incremental compilation. */
+ sorry ("#pragma newworld");
+ goto skipline;
+ }
+ }
+ error ("undefined or invalid # directive");
+ goto skipline;
+ }
+
+linenum:
+ /* Here we have either `#line' or `# <nonletter>'.
+ In either case, it should be a line number; a digit should follow. */
+
+ while (c == ' ' || c == '\t')
+ c = getch ();
+
+ /* If the # is the only nonwhite char on the line,
+ just ignore it. Check the new newline. */
+ if (c == '\n')
+ return c;
+
+ /* Something follows the #; read a token. */
+
+ put_back (c);
+ token = real_yylex ();
+
+ if (token == CONSTANT
+ && TREE_CODE (yylval.ttype) == INTEGER_CST)
+ {
+ int old_lineno = lineno;
+ enum { act_none, act_push, act_pop } action = act_none;
+ int entering_system_header = 0;
+ int entering_c_header = 0;
+
+ /* subtract one, because it is the following line that
+ gets the specified number */
+
+ int l = TREE_INT_CST_LOW (yylval.ttype) - 1;
+ c = get_last_nonwhite_on_line ();
+ if (c == '\n')
+ {
+ /* No more: store the line number and check following line. */
+ lineno = l;
+ return c;
+ }
+ put_back (c);
+
+ /* More follows: it must be a string constant (filename). */
+
+ /* Read the string constant, but don't treat \ as special. */
+ ignore_escape_flag = 1;
+ token = real_yylex ();
+ ignore_escape_flag = 0;
+
+ if (token != STRING || TREE_CODE (yylval.ttype) != STRING_CST)
+ {
+ error ("invalid #line");
+ goto skipline;
+ }
+
+ /* Changing files again. This means currently collected time
+ is charged against header time, and body time starts back
+ at 0. */
+ if (flag_detailed_statistics)
+ {
+ int this_time = my_get_run_time ();
+ tree time_identifier = get_time_identifier (TREE_STRING_POINTER (yylval.ttype));
+ header_time += this_time - body_time;
+ TREE_INT_CST_LOW (IDENTIFIER_LOCAL_VALUE (this_filename_time))
+ += this_time - body_time;
+ this_filename_time = time_identifier;
+ body_time = this_time;
+ }
+
+ if (flag_cadillac)
+ cadillac_note_source ();
+
+ input_filename
+ = (char *) permalloc (TREE_STRING_LENGTH (yylval.ttype) + 1);
+ strcpy (input_filename, TREE_STRING_POINTER (yylval.ttype));
+ lineno = l;
+ GNU_xref_file (input_filename);
+
+ if (main_input_filename == 0)
+ {
+ struct impl_files *ifiles = impl_file_chain;
+
+ if (ifiles)
+ {
+ while (ifiles->next)
+ ifiles = ifiles->next;
+ ifiles->filename = FILE_NAME_NONDIRECTORY (input_filename);
+ }
+
+ main_input_filename = input_filename;
+ if (write_virtuals == 3)
+ walk_vtables (set_typedecl_interface_info, set_vardecl_interface_info);
+ }
+
+ extract_interface_info ();
+
+ c = get_last_nonwhite_on_line ();
+ if (c != '\n')
+ {
+ put_back (c);
+
+ token = real_yylex ();
+
+ /* `1' after file name means entering new file.
+ `2' after file name means just left a file. */
+
+ if (token == CONSTANT
+ && TREE_CODE (yylval.ttype) == INTEGER_CST)
+ {
+ if (TREE_INT_CST_LOW (yylval.ttype) == 1)
+ action = act_push;
+ else if (TREE_INT_CST_LOW (yylval.ttype) == 2)
+ action = act_pop;
+
+ if (action)
+ {
+ c = get_last_nonwhite_on_line ();
+ if (c != '\n')
+ {
+ put_back (c);
+ token = real_yylex ();
+ }
+ }
+ }
+
+ /* `3' after file name means this is a system header file. */
+
+ if (token == CONSTANT
+ && TREE_CODE (yylval.ttype) == INTEGER_CST
+ && TREE_INT_CST_LOW (yylval.ttype) == 3)
+ {
+ entering_system_header = 1;
+
+ c = get_last_nonwhite_on_line ();
+ if (c != '\n')
+ {
+ put_back (c);
+ token = real_yylex ();
+ }
+ }
+
+ /* `4' after file name means this is a C header file. */
+
+ if (token == CONSTANT
+ && TREE_CODE (yylval.ttype) == INTEGER_CST
+ && TREE_INT_CST_LOW (yylval.ttype) == 4)
+ {
+ entering_c_header = 1;
+
+ c = get_last_nonwhite_on_line ();
+ if (c != '\n')
+ {
+ put_back (c);
+ token = real_yylex ();
+ }
+ }
+
+ /* Do the actions implied by the preceeding numbers. */
+
+ if (action == act_push)
+ {
+ /* Pushing to a new file. */
+ struct file_stack *p;
+
+ p = (struct file_stack *) xmalloc (sizeof (struct file_stack));
+ input_file_stack->line = old_lineno;
+ p->next = input_file_stack;
+ p->name = input_filename;
+ input_file_stack = p;
+ input_file_stack_tick++;
+#ifdef DWARF_DEBUGGING_INFO
+ if (debug_info_level == DINFO_LEVEL_VERBOSE
+ && write_symbols == DWARF_DEBUG)
+ dwarfout_start_new_source_file (input_filename);
+#endif /* DWARF_DEBUGGING_INFO */
+ if (flag_cadillac)
+ cadillac_push_source ();
+ in_system_header = entering_system_header;
+ if (c_header_level)
+ ++c_header_level;
+ else if (entering_c_header)
+ {
+ c_header_level = 1;
+ ++pending_lang_change;
+ }
+ }
+ else if (action == act_pop)
+ {
+ /* Popping out of a file. */
+ if (input_file_stack->next)
+ {
+ struct file_stack *p;
+
+ if (c_header_level && --c_header_level == 0)
+ {
+ if (entering_c_header)
+ warning ("Badly nested C headers from preprocessor");
+ --pending_lang_change;
+ }
+ if (flag_cadillac)
+ cadillac_pop_source ();
+ in_system_header = entering_system_header;
+
+ p = input_file_stack;
+ input_file_stack = p->next;
+ free (p);
+ input_file_stack_tick++;
+#ifdef DWARF_DEBUGGING_INFO
+ if (debug_info_level == DINFO_LEVEL_VERBOSE
+ && write_symbols == DWARF_DEBUG)
+ dwarfout_resume_previous_source_file (input_file_stack->line);
+#endif /* DWARF_DEBUGGING_INFO */
+ }
+ else
+ error ("#-lines for entering and leaving files don't match");
+ }
+ else
+ {
+ in_system_header = entering_system_header;
+ if (flag_cadillac)
+ cadillac_switch_source (-1);
+ }
+ }
+
+ /* If NEXTCHAR is not end of line, we don't care what it is. */
+ if (nextchar == '\n')
+ return '\n';
+ }
+ else
+ error ("invalid #-line");
+
+ /* skip the rest of this line. */
+ skipline:
+ if (c == '\n')
+ return c;
+ while ((c = getch ()) != EOF && c != '\n');
+ return c;
+}
+
+void
+do_pending_lang_change ()
+{
+ for (; pending_lang_change > 0; --pending_lang_change)
+ push_lang_context (lang_name_c);
+ for (; pending_lang_change < 0; ++pending_lang_change)
+ pop_lang_context ();
+}
+
+#if 0
+#define isalnum(char) (char >= 'a' ? char <= 'z' : char >= '0' ? char <= '9' || (char >= 'A' && char <= 'Z') : 0)
+#define isdigit(char) (char >= '0' && char <= '9')
+#else
+#include <ctype.h>
+#endif
+
+#define ENDFILE -1 /* token that represents end-of-file */
+
+/* Read an escape sequence, returning its equivalent as a character,
+ or store 1 in *ignore_ptr if it is backslash-newline. */
+
+static int
+readescape (ignore_ptr)
+ int *ignore_ptr;
+{
+ register int c = getch ();
+ register int code;
+ register unsigned count;
+ unsigned firstdig;
+ int nonnull;
+
+ switch (c)
+ {
+ case 'x':
+ if (warn_traditional)
+ warning ("the meaning of `\\x' varies with -traditional");
+
+ if (flag_traditional)
+ return c;
+
+ code = 0;
+ count = 0;
+ nonnull = 0;
+ while (1)
+ {
+ c = getch ();
+ if (! isxdigit (c))
+ {
+ put_back (c);
+ break;
+ }
+ code *= 16;
+ if (c >= 'a' && c <= 'f')
+ code += c - 'a' + 10;
+ if (c >= 'A' && c <= 'F')
+ code += c - 'A' + 10;
+ if (c >= '0' && c <= '9')
+ code += c - '0';
+ if (code != 0 || count != 0)
+ {
+ if (count == 0)
+ firstdig = code;
+ count++;
+ }
+ nonnull = 1;
+ }
+ if (! nonnull)
+ error ("\\x used with no following hex digits");
+ else if (count == 0)
+ /* Digits are all 0's. Ok. */
+ ;
+ else if ((count - 1) * 4 >= TYPE_PRECISION (integer_type_node)
+ || (count > 1
+ && ((1 << (TYPE_PRECISION (integer_type_node) - (count - 1) * 4))
+ <= firstdig)))
+ pedwarn ("hex escape out of range");
+ return code;
+
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7':
+ code = 0;
+ count = 0;
+ while ((c <= '7') && (c >= '0') && (count++ < 3))
+ {
+ code = (code * 8) + (c - '0');
+ c = getch ();
+ }
+ put_back (c);
+ return code;
+
+ case '\\': case '\'': case '"':
+ return c;
+
+ case '\n':
+ lineno++;
+ *ignore_ptr = 1;
+ return 0;
+
+ case 'n':
+ return TARGET_NEWLINE;
+
+ case 't':
+ return TARGET_TAB;
+
+ case 'r':
+ return TARGET_CR;
+
+ case 'f':
+ return TARGET_FF;
+
+ case 'b':
+ return TARGET_BS;
+
+ case 'a':
+ if (warn_traditional)
+ warning ("the meaning of `\\a' varies with -traditional");
+
+ if (flag_traditional)
+ return c;
+ return TARGET_BELL;
+
+ case 'v':
+ return TARGET_VT;
+
+ case 'e':
+ case 'E':
+ if (pedantic)
+ pedwarn ("non-ANSI-standard escape sequence, `\\%c'", c);
+ return 033;
+
+ case '?':
+ return c;
+
+ /* `\(', etc, are used at beginning of line to avoid confusing Emacs. */
+ case '(':
+ case '{':
+ case '[':
+ /* `\%' is used to prevent SCCS from getting confused. */
+ case '%':
+ if (pedantic)
+ pedwarn ("unknown escape sequence `\\%c'", c);
+ return c;
+ }
+ if (c >= 040 && c < 0177)
+ pedwarn ("unknown escape sequence `\\%c'", c);
+ else
+ pedwarn ("unknown escape sequence: `\\' followed by char code 0x%x", c);
+ return c;
+}
+
+/* Value is 1 (or 2) if we should try to make the next identifier look like
+ a typename (when it may be a local variable or a class variable).
+ Value is 0 if we treat this name in a default fashion. */
+int looking_for_typename = 0;
+
+#if 0
+/* NO LONGER USED: Value is -1 if we must not see a type name. */
+void
+dont_see_typename ()
+{
+ looking_for_typename = -1;
+ if (yychar == TYPENAME || yychar == PTYPENAME)
+ {
+ yychar = IDENTIFIER;
+ lastiddecl = 0;
+ }
+}
+#endif
+
+#ifdef __GNUC__
+extern __inline int identifier_type ();
+__inline
+#endif
+int
+identifier_type (decl)
+ tree decl;
+{
+ if (TREE_CODE (decl) == TEMPLATE_DECL
+ && DECL_TEMPLATE_IS_CLASS (decl))
+ return PTYPENAME;
+ if (TREE_CODE (decl) != TYPE_DECL)
+ return IDENTIFIER;
+ return TYPENAME;
+}
+
+void
+see_typename ()
+{
+ looking_for_typename = 0;
+ if (yychar == IDENTIFIER)
+ {
+ lastiddecl = lookup_name (yylval.ttype, -2);
+ if (lastiddecl == 0)
+ {
+ if (flag_labels_ok)
+ lastiddecl = IDENTIFIER_LABEL_VALUE (yylval.ttype);
+ }
+ else
+ yychar = identifier_type (lastiddecl);
+ }
+}
+
+tree
+do_identifier (token)
+ register tree token;
+{
+ register tree id = lastiddecl;
+
+ if (yychar == YYEMPTY)
+ yychar = yylex ();
+ /* Scope class declarations before global
+ declarations. */
+ if (id == IDENTIFIER_GLOBAL_VALUE (token)
+ && current_class_type != 0
+ && TYPE_SIZE (current_class_type) == 0
+ && TREE_CODE (current_class_type) != UNINSTANTIATED_P_TYPE)
+ {
+ /* Could be from one of the base classes. */
+ tree field = lookup_field (current_class_type, token, 1, 0);
+ if (field == 0)
+ ;
+ else if (field == error_mark_node)
+ /* We have already generated the error message.
+ But we still want to return this value. */
+ id = lookup_field (current_class_type, token, 0, 0);
+ else if (TREE_CODE (field) == VAR_DECL
+ || TREE_CODE (field) == CONST_DECL)
+ id = field;
+ else if (TREE_CODE (field) != FIELD_DECL)
+ my_friendly_abort (61);
+ else
+ {
+ cp_error ("invalid use of member `%D' from base class `%T'", field,
+ DECL_FIELD_CONTEXT (field));
+ id = error_mark_node;
+ return id;
+ }
+ }
+
+ /* Remember that this name has been used in the class definition, as per
+ [class.scope0] */
+ if (id && current_class_type
+ && TYPE_BEING_DEFINED (current_class_type)
+ && ! IDENTIFIER_CLASS_VALUE (token))
+ pushdecl_class_level (id);
+
+ if (!id || id == error_mark_node)
+ {
+ if (id == error_mark_node && current_class_type != NULL_TREE)
+ {
+ id = lookup_nested_field (token, 1);
+ /* In lookup_nested_field(), we marked this so we can gracefully
+ leave this whole mess. */
+ if (id && id != error_mark_node && TREE_TYPE (id) == error_mark_node)
+ return id;
+ }
+ if (yychar == '(' || yychar == LEFT_RIGHT)
+ {
+ id = implicitly_declare (token);
+ }
+ else if (current_function_decl == 0)
+ {
+ cp_error ("`%D' was not declared in this scope", token);
+ id = error_mark_node;
+ }
+ else
+ {
+ if (IDENTIFIER_GLOBAL_VALUE (token) != error_mark_node
+ || IDENTIFIER_ERROR_LOCUS (token) != current_function_decl)
+ {
+ static int undeclared_variable_notice;
+
+ cp_error ("`%D' undeclared (first use this function)", token);
+
+ if (! undeclared_variable_notice)
+ {
+ error ("(Each undeclared identifier is reported only once");
+ error ("for each function it appears in.)");
+ undeclared_variable_notice = 1;
+ }
+ }
+ id = error_mark_node;
+ /* Prevent repeated error messages. */
+ IDENTIFIER_GLOBAL_VALUE (token) = error_mark_node;
+ SET_IDENTIFIER_ERROR_LOCUS (token, current_function_decl);
+ }
+ }
+ /* TREE_USED is set in `hack_identifier'. */
+ if (TREE_CODE (id) == CONST_DECL)
+ {
+ if (IDENTIFIER_CLASS_VALUE (token) == id)
+ {
+ /* Check access. */
+ enum access_type access
+ = compute_access (TYPE_BINFO (current_class_type), id);
+ if (access == access_private)
+ cp_error ("enum `%D' is private", id);
+ /* protected is OK, since it's an enum of `this'. */
+ }
+ id = DECL_INITIAL (id);
+ }
+ else
+ id = hack_identifier (id, token, yychar);
+ return id;
+}
+
+tree
+identifier_typedecl_value (node)
+ tree node;
+{
+ tree t, type;
+ type = IDENTIFIER_TYPE_VALUE (node);
+ if (type == NULL_TREE)
+ return NULL_TREE;
+#define do(X) \
+ { \
+ t = (X); \
+ if (t && TREE_CODE (t) == TYPE_DECL && TREE_TYPE (t) == type) \
+ return t; \
+ }
+ do (IDENTIFIER_LOCAL_VALUE (node));
+ do (IDENTIFIER_CLASS_VALUE (node));
+ do (IDENTIFIER_GLOBAL_VALUE (node));
+#undef do
+ /* Will this one ever happen? */
+ if (TYPE_NAME (type))
+ return TYPE_NAME (type);
+
+ /* We used to do an internal error of 62 here, but instead we will
+ handle the return of a null appropriately in the callers. */
+ return NULL_TREE;
+}
+
+struct try_type
+{
+ tree *node_var;
+ char unsigned_flag;
+ char long_flag;
+ char long_long_flag;
+};
+
+struct try_type type_sequence[] =
+{
+ { &integer_type_node, 0, 0, 0},
+ { &unsigned_type_node, 1, 0, 0},
+ { &long_integer_type_node, 0, 1, 0},
+ { &long_unsigned_type_node, 1, 1, 0},
+ { &long_long_integer_type_node, 0, 1, 1},
+ { &long_long_unsigned_type_node, 1, 1, 1}
+};
+
+int
+real_yylex ()
+{
+ register int c;
+ register int value;
+ int wide_flag = 0;
+ int dollar_seen = 0;
+ int i;
+
+ if (nextchar >= 0)
+ c = nextchar, nextchar = -1;
+ else
+ c = getch ();
+
+ /* Effectively do c = skip_white_space (c)
+ but do it faster in the usual cases. */
+ while (1)
+ switch (c)
+ {
+ case ' ':
+ case '\t':
+ case '\f':
+ case '\v':
+ case '\b':
+ c = getch ();
+ break;
+
+ case '\r':
+ /* Call skip_white_space so we can warn if appropriate. */
+
+ case '\n':
+ case '/':
+ case '\\':
+ c = skip_white_space (c);
+ default:
+ goto found_nonwhite;
+ }
+ found_nonwhite:
+
+ token_buffer[0] = c;
+ token_buffer[1] = 0;
+
+/* yylloc.first_line = lineno; */
+
+ switch (c)
+ {
+ case EOF:
+ token_buffer[0] = '\0';
+ end_of_file = 1;
+ if (input_redirected ())
+ value = END_OF_SAVED_INPUT;
+ else if (do_pending_expansions ())
+ /* this will set yychar for us */
+ return yychar;
+ else
+ value = ENDFILE;
+ break;
+
+ case '$':
+ if (dollars_in_ident)
+ {
+ dollar_seen = 1;
+ goto letter;
+ }
+ value = '$';
+ goto done;
+
+ case 'L':
+ /* Capital L may start a wide-string or wide-character constant. */
+ {
+ register int c = getch ();
+ if (c == '\'')
+ {
+ wide_flag = 1;
+ goto char_constant;
+ }
+ if (c == '"')
+ {
+ wide_flag = 1;
+ goto string_constant;
+ }
+ put_back (c);
+ }
+
+ case 'A': case 'B': case 'C': case 'D': case 'E':
+ case 'F': case 'G': case 'H': case 'I': case 'J':
+ case 'K': case 'M': case 'N': case 'O':
+ case 'P': case 'Q': case 'R': case 'S': case 'T':
+ case 'U': case 'V': case 'W': case 'X': case 'Y':
+ case 'Z':
+ case 'a': case 'b': case 'c': case 'd': case 'e':
+ case 'f': case 'g': case 'h': case 'i': case 'j':
+ case 'k': case 'l': case 'm': case 'n': case 'o':
+ case 'p': case 'q': case 'r': case 's': case 't':
+ case 'u': case 'v': case 'w': case 'x': case 'y':
+ case 'z':
+ case '_':
+ letter:
+ {
+ register char *p;
+
+ p = token_buffer;
+ if (input == 0)
+ {
+ /* We know that `token_buffer' can hold at least on char,
+ so we install C immediately.
+ We may have to read the value in `putback_char', so call
+ `getch' once. */
+ *p++ = c;
+ c = getch ();
+
+ /* Make this run fast. We know that we are reading straight
+ from FINPUT in this case (since identifiers cannot straddle
+ input sources. */
+ while (isalnum (c) || (c == '_') || c == '$')
+ {
+ if (c == '$' && ! dollars_in_ident)
+ break;
+ if (p >= token_buffer + maxtoken)
+ p = extend_token_buffer (p);
+
+ *p++ = c;
+ c = getc (finput);
+ }
+ }
+ else
+ {
+ /* We know that `token_buffer' can hold at least on char,
+ so we install C immediately. */
+ *p++ = c;
+ c = getch ();
+
+ while (isalnum (c) || (c == '_') || c == '$')
+ {
+ if (c == '$' && ! dollars_in_ident)
+ break;
+ if (p >= token_buffer + maxtoken)
+ p = extend_token_buffer (p);
+
+ *p++ = c;
+ c = getch ();
+ }
+ }
+
+ *p = 0;
+ nextchar = c;
+
+ value = IDENTIFIER;
+ yylval.itype = 0;
+
+ /* Try to recognize a keyword. Uses minimum-perfect hash function */
+
+ {
+ register struct resword *ptr;
+
+ if (ptr = is_reserved_word (token_buffer, p - token_buffer))
+ {
+ if (ptr->rid)
+ {
+ tree old_ttype = ridpointers[(int) ptr->rid];
+
+ /* If this provides a type for us, then revert lexical
+ state to standard state. */
+ if (TREE_CODE (old_ttype) == IDENTIFIER_NODE
+ && IDENTIFIER_GLOBAL_VALUE (old_ttype) != 0
+ && TREE_CODE (IDENTIFIER_GLOBAL_VALUE (old_ttype)) == TYPE_DECL)
+ looking_for_typename = 0;
+ else if (ptr->token == AGGR || ptr->token == ENUM)
+ looking_for_typename = 1;
+
+ /* Check if this is a language-type declaration.
+ Just glimpse the next non-white character. */
+ nextchar = skip_white_space (nextchar);
+ if (nextchar == '"')
+ {
+ /* We are looking at a string. Complain
+ if the token before the string is no `extern'.
+
+ Could cheat some memory by placing this string
+ on the temporary_, instead of the saveable_
+ obstack. */
+
+ if (ptr->rid != RID_EXTERN)
+ error ("invalid modifier `%s' for language string",
+ ptr->name);
+ real_yylex ();
+ value = EXTERN_LANG_STRING;
+ yylval.ttype = get_identifier (TREE_STRING_POINTER (yylval.ttype));
+ break;
+ }
+ if (ptr->token == VISSPEC)
+ {
+ switch (ptr->rid)
+ {
+ case RID_PUBLIC:
+ yylval.itype = access_public;
+ break;
+ case RID_PRIVATE:
+ yylval.itype = access_private;
+ break;
+ case RID_PROTECTED:
+ yylval.itype = access_protected;
+ break;
+ default:
+ my_friendly_abort (63);
+ }
+ }
+ else
+ yylval.ttype = old_ttype;
+ }
+ value = (int) ptr->token;
+ }
+ }
+
+ /* If we did not find a keyword, look for an identifier
+ (or a typename). */
+
+ if (strcmp ("catch", token_buffer) == 0
+ || strcmp ("throw", token_buffer) == 0
+ || strcmp ("try", token_buffer) == 0)
+ {
+ static int did_warn = 0;
+ if (! did_warn && ! flag_handle_exceptions)
+ {
+ pedwarn ("`catch', `throw', and `try' are all C++ reserved words");
+ did_warn = 1;
+ }
+ }
+
+ if (value == IDENTIFIER || value == TYPESPEC)
+ GNU_xref_ref (current_function_decl, token_buffer);
+
+ if (value == IDENTIFIER)
+ {
+ register tree tmp = get_identifier (token_buffer);
+
+#if !defined(VMS) && defined(JOINER)
+ /* Make sure that user does not collide with our internal
+ naming scheme. */
+ if (JOINER == '$'
+ && dollar_seen
+ && (THIS_NAME_P (tmp)
+ || VPTR_NAME_P (tmp)
+ || DESTRUCTOR_NAME_P (tmp)
+ || VTABLE_NAME_P (tmp)
+ || TEMP_NAME_P (tmp)
+ || ANON_AGGRNAME_P (tmp)
+ || ANON_PARMNAME_P (tmp)))
+ warning ("identifier name `%s' conflicts with GNU C++ internal naming strategy",
+ token_buffer);
+#endif
+
+ yylval.ttype = tmp;
+
+ /* A user-invisible read-only initialized variable
+ should be replaced by its value. We only handle strings
+ since that's the only case used in C (and C++). */
+ /* Note we go right after the local value for the identifier
+ (e.g., __FUNCTION__ or __PRETTY_FUNCTION__). We used to
+ call lookup_name, but that could result in an error about
+ ambiguities. */
+ tmp = IDENTIFIER_LOCAL_VALUE (yylval.ttype);
+ if (tmp != NULL_TREE
+ && TREE_CODE (tmp) == VAR_DECL
+ && DECL_IGNORED_P (tmp)
+ && TREE_READONLY (tmp)
+ && DECL_INITIAL (tmp) != NULL_TREE
+ && TREE_CODE (DECL_INITIAL (tmp)) == STRING_CST)
+ {
+ yylval.ttype = DECL_INITIAL (tmp);
+ value = STRING;
+ }
+ }
+ if (value == NEW && ! global_bindings_p ())
+ {
+ value = NEW;
+ goto done;
+ }
+ }
+ break;
+
+ case '.':
+ {
+ register int c1 = getch ();
+ token_buffer[0] = c;
+ token_buffer[1] = c1;
+ if (c1 == '*')
+ {
+ value = DOT_STAR;
+ token_buffer[2] = 0;
+ goto done;
+ }
+ if (c1 == '.')
+ {
+ c1 = getch ();
+ if (c1 == '.')
+ {
+ token_buffer[2] = c1;
+ token_buffer[3] = 0;
+ value = ELLIPSIS;
+ goto done;
+ }
+ error ("parse error at `..'");
+ }
+ if (isdigit (c1))
+ {
+ put_back (c1);
+ goto resume_numerical_scan;
+ }
+ nextchar = c1;
+ value = '.';
+ token_buffer[1] = 0;
+ goto done;
+ }
+ case '0': case '1':
+ /* Optimize for most frequent case. */
+ {
+ register int c1 = getch ();
+ if (! isalnum (c1) && c1 != '.')
+ {
+ /* Terminate string. */
+ token_buffer[0] = c;
+ token_buffer[1] = 0;
+ if (c == '0')
+ yylval.ttype = integer_zero_node;
+ else
+ yylval.ttype = integer_one_node;
+ nextchar = c1;
+ value = CONSTANT;
+ goto done;
+ }
+ put_back (c1);
+ }
+ /* fall through... */
+ case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ resume_numerical_scan:
+ {
+ register char *p;
+ int base = 10;
+ int count = 0;
+ int largest_digit = 0;
+ int numdigits = 0;
+ /* for multi-precision arithmetic,
+ we actually store only HOST_BITS_PER_CHAR bits in each part.
+ The number of parts is chosen so as to be sufficient to hold
+ the enough bits to fit into the two HOST_WIDE_INTs that contain
+ the integer value (this is always at least as many bits as are
+ in a target `long long' value, but may be wider). */
+#define TOTAL_PARTS ((HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR) * 2 + 2)
+ int parts[TOTAL_PARTS];
+ int overflow = 0;
+
+ enum anon1 { NOT_FLOAT, AFTER_POINT, TOO_MANY_POINTS} floatflag
+ = NOT_FLOAT;
+
+ p = token_buffer;
+ *p++ = c;
+
+ for (count = 0; count < TOTAL_PARTS; count++)
+ parts[count] = 0;
+
+ if (c == '0')
+ {
+ *p++ = (c = getch ());
+ if ((c == 'x') || (c == 'X'))
+ {
+ base = 16;
+ *p++ = (c = getch ());
+ }
+ /* Leading 0 forces octal unless the 0 is the only digit. */
+ else if (c >= '0' && c <= '9')
+ {
+ base = 8;
+ numdigits++;
+ }
+ else
+ numdigits++;
+ }
+
+ /* Read all the digits-and-decimal-points. */
+
+ while (c == '.'
+ || (isalnum (c) && (c != 'l') && (c != 'L')
+ && (c != 'u') && (c != 'U')
+ && (floatflag == NOT_FLOAT || ((c != 'f') && (c != 'F')))))
+ {
+ if (c == '.')
+ {
+ if (base == 16)
+ error ("floating constant may not be in radix 16");
+ if (floatflag == AFTER_POINT)
+ {
+ error ("malformed floating constant");
+ floatflag = TOO_MANY_POINTS;
+ }
+ else
+ floatflag = AFTER_POINT;
+
+ base = 10;
+ *p++ = c = getch ();
+ /* Accept '.' as the start of a floating-point number
+ only when it is followed by a digit.
+ Otherwise, unread the following non-digit
+ and use the '.' as a structural token. */
+ if (p == token_buffer + 2 && !isdigit (c))
+ {
+ if (c == '.')
+ {
+ c = getch ();
+ if (c == '.')
+ {
+ *p++ = '.';
+ *p = '\0';
+ value = ELLIPSIS;
+ goto done;
+ }
+ error ("parse error at `..'");
+ }
+ nextchar = c;
+ token_buffer[1] = '\0';
+ value = '.';
+ goto done;
+ }
+ }
+ else
+ {
+ /* It is not a decimal point.
+ It should be a digit (perhaps a hex digit). */
+
+ if (isdigit (c))
+ {
+ c = c - '0';
+ }
+ else if (base <= 10)
+ {
+ if (c == 'e' || c == 'E')
+ {
+ base = 10;
+ floatflag = AFTER_POINT;
+ break; /* start of exponent */
+ }
+ error ("nondigits in number and not hexadecimal");
+ c = 0;
+ }
+ else if (c >= 'a')
+ {
+ c = c - 'a' + 10;
+ }
+ else
+ {
+ c = c - 'A' + 10;
+ }
+ if (c >= largest_digit)
+ largest_digit = c;
+ numdigits++;
+
+ for (count = 0; count < TOTAL_PARTS; count++)
+ {
+ parts[count] *= base;
+ if (count)
+ {
+ parts[count]
+ += (parts[count-1] >> HOST_BITS_PER_CHAR);
+ parts[count-1]
+ &= (1 << HOST_BITS_PER_CHAR) - 1;
+ }
+ else
+ parts[0] += c;
+ }
+
+ /* If the extra highest-order part ever gets anything in it,
+ the number is certainly too big. */
+ if (parts[TOTAL_PARTS - 1] != 0)
+ overflow = 1;
+
+ if (p >= token_buffer + maxtoken - 3)
+ p = extend_token_buffer (p);
+ *p++ = (c = getch ());
+ }
+ }
+
+ if (numdigits == 0)
+ error ("numeric constant with no digits");
+
+ if (largest_digit >= base)
+ error ("numeric constant contains digits beyond the radix");
+
+ /* Remove terminating char from the token buffer and delimit the string */
+ *--p = 0;
+
+ if (floatflag != NOT_FLOAT)
+ {
+ tree type = double_type_node;
+ char f_seen = 0;
+ char l_seen = 0;
+ int garbage_chars = 0;
+ REAL_VALUE_TYPE value;
+ jmp_buf handler;
+
+ /* Read explicit exponent if any, and put it in tokenbuf. */
+
+ if ((c == 'e') || (c == 'E'))
+ {
+ if (p >= token_buffer + maxtoken - 3)
+ p = extend_token_buffer (p);
+ *p++ = c;
+ c = getch ();
+ if ((c == '+') || (c == '-'))
+ {
+ *p++ = c;
+ c = getch ();
+ }
+ if (! isdigit (c))
+ error ("floating constant exponent has no digits");
+ while (isdigit (c))
+ {
+ if (p >= token_buffer + maxtoken - 3)
+ p = extend_token_buffer (p);
+ *p++ = c;
+ c = getch ();
+ }
+ }
+
+ *p = 0;
+ errno = 0;
+
+ /* Convert string to a double, checking for overflow. */
+ if (setjmp (handler))
+ {
+ error ("floating constant out of range");
+ value = dconst0;
+ }
+ else
+ {
+ set_float_handler (handler);
+ /* The second argument, machine_mode, of REAL_VALUE_ATOF
+ tells the desired precision of the binary result of
+ decimal-to-binary conversion. */
+
+ /* Read the suffixes to choose a data type. */
+ switch (c)
+ {
+ case 'f': case 'F':
+ type = float_type_node;
+ value = REAL_VALUE_ATOF (token_buffer, TYPE_MODE (type));
+ garbage_chars = -1;
+ break;
+
+ case 'l': case 'L':
+ type = long_double_type_node;
+ value = REAL_VALUE_ATOF (token_buffer, TYPE_MODE (type));
+ garbage_chars = -1;
+ break;
+
+ default:
+ value = REAL_VALUE_ATOF (token_buffer, TYPE_MODE (type));
+ }
+ set_float_handler (NULL_PTR);
+ }
+ if (pedantic
+ && (REAL_VALUE_ISINF (value)
+#ifdef ERANGE
+ || (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
+ && errno == ERANGE
+ /* ERANGE is also reported for underflow, so test the
+ value to distinguish overflow from that. */
+ && (REAL_VALUES_LESS (dconst1, value)
+ || REAL_VALUES_LESS (value, dconstm1)))
+#endif
+ ))
+ {
+ pedwarn ("floating point number exceeds range of `%s'",
+ IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (type))));
+ }
+ /* Note: garbage_chars is -1 if first char is *not* garbage. */
+ while (isalnum (c))
+ {
+ if (c == 'f' || c == 'F')
+ {
+ if (f_seen)
+ error ("two `f's in floating constant");
+ f_seen = 1;
+ }
+ if (c == 'l' || c == 'L')
+ {
+ if (l_seen)
+ error ("two `l's in floating constant");
+ l_seen = 1;
+ }
+ if (p >= token_buffer + maxtoken - 3)
+ p = extend_token_buffer (p);
+ *p++ = c;
+ c = getch ();
+ garbage_chars++;
+ }
+
+ if (garbage_chars > 0)
+ error ("garbage at end of number");
+
+ /* Create a node with determined type and value. */
+ yylval.ttype = build_real (type, value);
+
+ put_back (c);
+ *p = 0;
+ }
+ else
+ {
+ tree type;
+ HOST_WIDE_INT high, low;
+ int spec_unsigned = 0;
+ int spec_long = 0;
+ int spec_long_long = 0;
+ int bytes, warn;
+
+ while (1)
+ {
+ if (c == 'u' || c == 'U')
+ {
+ if (spec_unsigned)
+ error ("two `u's in integer constant");
+ spec_unsigned = 1;
+ }
+ else if (c == 'l' || c == 'L')
+ {
+ if (spec_long)
+ {
+ if (spec_long_long)
+ error ("three `l's in integer constant");
+ else if (pedantic)
+ pedwarn ("ANSI C++ forbids long long integer constants");
+ spec_long_long = 1;
+ }
+ spec_long = 1;
+ }
+ else
+ {
+ if (isalnum (c))
+ {
+ error ("garbage at end of number");
+ while (isalnum (c))
+ {
+ if (p >= token_buffer + maxtoken - 3)
+ p = extend_token_buffer (p);
+ *p++ = c;
+ c = getch ();
+ }
+ }
+ break;
+ }
+ if (p >= token_buffer + maxtoken - 3)
+ p = extend_token_buffer (p);
+ *p++ = c;
+ c = getch ();
+ }
+
+ put_back (c);
+
+ /* If the constant is not long long and it won't fit in an
+ unsigned long, or if the constant is long long and won't fit
+ in an unsigned long long, then warn that the constant is out
+ of range. */
+
+ /* ??? This assumes that long long and long integer types are
+ a multiple of 8 bits. This better than the original code
+ though which assumed that long was exactly 32 bits and long
+ long was exactly 64 bits. */
+
+ if (spec_long_long)
+ bytes = TYPE_PRECISION (long_long_integer_type_node) / 8;
+ else
+ bytes = TYPE_PRECISION (long_integer_type_node) / 8;
+
+ warn = overflow;
+ for (i = bytes; i < TOTAL_PARTS; i++)
+ if (parts[i])
+ warn = 1;
+ if (warn)
+ pedwarn ("integer constant out of range");
+
+ /* This is simplified by the fact that our constant
+ is always positive. */
+ high = low = 0;
+
+ for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR; i++)
+ {
+ high |= ((HOST_WIDE_INT) parts[i + (HOST_BITS_PER_WIDE_INT
+ / HOST_BITS_PER_CHAR)]
+ << (i * HOST_BITS_PER_CHAR));
+ low |= (HOST_WIDE_INT) parts[i] << (i * HOST_BITS_PER_CHAR);
+ }
+
+
+ yylval.ttype = build_int_2 (low, high);
+ TREE_TYPE (yylval.ttype) = long_long_unsigned_type_node;
+
+#if 0
+ /* Find the first allowable type that the value fits in. */
+ type = 0;
+ for (i = 0; i < sizeof (type_sequence) / sizeof (type_sequence[0]);
+ i++)
+ if (!(spec_long && !type_sequence[i].long_flag)
+ && !(spec_long_long && !type_sequence[i].long_long_flag)
+ && !(spec_unsigned && !type_sequence[i].unsigned_flag)
+ /* A hex or octal constant traditionally is unsigned. */
+ && !(base != 10 && flag_traditional
+ && !type_sequence[i].unsigned_flag)
+ /* A decimal constant can't be unsigned int
+ unless explicitly specified. */
+ && !(base == 10 && !spec_unsigned
+ && *type_sequence[i].node_var == unsigned_type_node))
+ if (int_fits_type_p (yylval.ttype, *type_sequence[i].node_var))
+ {
+ type = *type_sequence[i].node_var;
+ break;
+ }
+ if (flag_traditional && type == long_unsigned_type_node
+ && !spec_unsigned)
+ type = long_integer_type_node;
+
+ if (type == 0)
+ {
+ type = long_long_integer_type_node;
+ warning ("integer constant out of range");
+ }
+
+ /* Warn about some cases where the type of a given constant
+ changes from traditional C to ANSI C. */
+ if (warn_traditional)
+ {
+ tree other_type = 0;
+
+ /* This computation is the same as the previous one
+ except that flag_traditional is used backwards. */
+ for (i = 0; i < sizeof (type_sequence) / sizeof (type_sequence[0]);
+ i++)
+ if (!(spec_long && !type_sequence[i].long_flag)
+ && !(spec_long_long && !type_sequence[i].long_long_flag)
+ && !(spec_unsigned && !type_sequence[i].unsigned_flag)
+ /* A hex or octal constant traditionally is unsigned. */
+ && !(base != 10 && !flag_traditional
+ && !type_sequence[i].unsigned_flag)
+ /* A decimal constant can't be unsigned int
+ unless explicitly specified. */
+ && !(base == 10 && !spec_unsigned
+ && *type_sequence[i].node_var == unsigned_type_node))
+ if (int_fits_type_p (yylval.ttype, *type_sequence[i].node_var))
+ {
+ other_type = *type_sequence[i].node_var;
+ break;
+ }
+ if (!flag_traditional && type == long_unsigned_type_node
+ && !spec_unsigned)
+ type = long_integer_type_node;
+
+ if (other_type != 0 && other_type != type)
+ {
+ if (flag_traditional)
+ warning ("type of integer constant would be different without -traditional");
+ else
+ warning ("type of integer constant would be different with -traditional");
+ }
+ }
+
+#else /* 1 */
+ if (!spec_long && !spec_unsigned
+ && !(flag_traditional && base != 10)
+ && int_fits_type_p (yylval.ttype, integer_type_node))
+ {
+#if 0
+ if (warn_traditional && base != 10)
+ warning ("small nondecimal constant becomes signed in ANSI C++");
+#endif
+ type = integer_type_node;
+ }
+ else if (!spec_long && (base != 10 || spec_unsigned)
+ && int_fits_type_p (yylval.ttype, unsigned_type_node))
+ {
+ /* Nondecimal constants try unsigned even in traditional C. */
+ type = unsigned_type_node;
+ }
+
+ else if (!spec_unsigned && !spec_long_long
+ && int_fits_type_p (yylval.ttype, long_integer_type_node))
+ type = long_integer_type_node;
+
+ else if (! spec_long_long
+ && int_fits_type_p (yylval.ttype,
+ long_unsigned_type_node))
+ {
+#if 0
+ if (warn_traditional && !spec_unsigned)
+ warning ("large integer constant becomes unsigned in ANSI C++");
+#endif
+ if (flag_traditional && !spec_unsigned)
+ type = long_integer_type_node;
+ else
+ type = long_unsigned_type_node;
+ }
+
+ else if (! spec_unsigned
+ /* Verify value does not overflow into sign bit. */
+ && TREE_INT_CST_HIGH (yylval.ttype) >= 0
+ && int_fits_type_p (yylval.ttype,
+ long_long_integer_type_node))
+ type = long_long_integer_type_node;
+
+ else if (int_fits_type_p (yylval.ttype,
+ long_long_unsigned_type_node))
+ {
+#if 0
+ if (warn_traditional && !spec_unsigned)
+ warning ("large nondecimal constant is unsigned in ANSI C++");
+#endif
+
+ if (flag_traditional && !spec_unsigned)
+ type = long_long_integer_type_node;
+ else
+ type = long_long_unsigned_type_node;
+ }
+
+ else
+ {
+ type = long_long_integer_type_node;
+ warning ("integer constant out of range");
+
+ if (base == 10 && ! spec_unsigned && TREE_UNSIGNED (type))
+ warning ("decimal integer constant is so large that it is unsigned");
+ }
+#endif
+
+ TREE_TYPE (yylval.ttype) = type;
+ *p = 0;
+ }
+
+ value = CONSTANT; break;
+ }
+
+ case '\'':
+ char_constant:
+ {
+ register int result = 0;
+ register int num_chars = 0;
+ unsigned width = TYPE_PRECISION (char_type_node);
+ int max_chars;
+
+ if (wide_flag)
+ {
+ width = WCHAR_TYPE_SIZE;
+#ifdef MULTIBYTE_CHARS
+ max_chars = MB_CUR_MAX;
+#else
+ max_chars = 1;
+#endif
+ }
+ else
+ max_chars = TYPE_PRECISION (integer_type_node) / width;
+
+ while (1)
+ {
+ tryagain:
+
+ c = getch ();
+
+ if (c == '\'' || c == EOF)
+ break;
+
+ if (c == '\\')
+ {
+ int ignore = 0;
+ c = readescape (&ignore);
+ if (ignore)
+ goto tryagain;
+ if (width < HOST_BITS_PER_INT
+ && (unsigned) c >= (1 << width))
+ pedwarn ("escape sequence out of range for character");
+#ifdef MAP_CHARACTER
+ if (isprint (c))
+ c = MAP_CHARACTER (c);
+#endif
+ }
+ else if (c == '\n')
+ {
+ if (pedantic)
+ pedwarn ("ANSI C++ forbids newline in character constant");
+ lineno++;
+ }
+#ifdef MAP_CHARACTER
+ else
+ c = MAP_CHARACTER (c);
+#endif
+
+ num_chars++;
+ if (num_chars > maxtoken - 4)
+ extend_token_buffer (token_buffer);
+
+ token_buffer[num_chars] = c;
+
+ /* Merge character into result; ignore excess chars. */
+ if (num_chars < max_chars + 1)
+ {
+ if (width < HOST_BITS_PER_INT)
+ result = (result << width) | (c & ((1 << width) - 1));
+ else
+ result = c;
+ }
+ }
+
+ token_buffer[num_chars + 1] = '\'';
+ token_buffer[num_chars + 2] = 0;
+
+ if (c != '\'')
+ error ("malformatted character constant");
+ else if (num_chars == 0)
+ error ("empty character constant");
+ else if (num_chars > max_chars)
+ {
+ num_chars = max_chars;
+ error ("character constant too long");
+ }
+ else if (num_chars != 1 && ! flag_traditional)
+ warning ("multi-character character constant");
+
+ /* If char type is signed, sign-extend the constant. */
+ if (! wide_flag)
+ {
+ int num_bits = num_chars * width;
+ if (num_bits == 0)
+ /* We already got an error; avoid invalid shift. */
+ yylval.ttype = build_int_2 (0, 0);
+ else if (TREE_UNSIGNED (char_type_node)
+ || ((result >> (num_bits - 1)) & 1) == 0)
+ yylval.ttype
+ = build_int_2 (result & ((unsigned HOST_WIDE_INT) ~0
+ >> (HOST_BITS_PER_INT - num_bits)),
+ 0);
+ else
+ yylval.ttype
+ = build_int_2 (result | ~((unsigned HOST_WIDE_INT) ~0
+ >> (HOST_BITS_PER_INT - num_bits)),
+ -1);
+ if (num_chars<=1)
+ TREE_TYPE (yylval.ttype) = char_type_node;
+ else
+ TREE_TYPE (yylval.ttype) = integer_type_node;
+ }
+ else
+ {
+#ifdef MULTIBYTE_CHARS
+ /* Set the initial shift state and convert the next sequence. */
+ result = 0;
+ /* In all locales L'\0' is zero and mbtowc will return zero,
+ so don't use it. */
+ if (num_chars > 1
+ || (num_chars == 1 && token_buffer[1] != '\0'))
+ {
+ wchar_t wc;
+ (void) mbtowc (NULL, NULL, 0);
+ if (mbtowc (& wc, token_buffer + 1, num_chars) == num_chars)
+ result = wc;
+ else
+ warning ("Ignoring invalid multibyte character");
+ }
+#endif
+ yylval.ttype = build_int_2 (result, 0);
+ TREE_TYPE (yylval.ttype) = wchar_type_node;
+ }
+
+ value = CONSTANT;
+ break;
+ }
+
+ case '"':
+ string_constant:
+ {
+ register char *p;
+
+ c = getch ();
+ p = token_buffer + 1;
+
+ while (c != '"' && c >= 0)
+ {
+ /* ignore_escape_flag is set for reading the filename in #line. */
+ if (!ignore_escape_flag && c == '\\')
+ {
+ int ignore = 0;
+ c = readescape (&ignore);
+ if (ignore)
+ goto skipnewline;
+ if (!wide_flag
+ && TYPE_PRECISION (char_type_node) < HOST_BITS_PER_INT
+ && c >= ((unsigned) 1 << TYPE_PRECISION (char_type_node)))
+ pedwarn ("escape sequence out of range for character");
+ }
+ else if (c == '\n')
+ {
+ if (pedantic)
+ pedwarn ("ANSI C++ forbids newline in string constant");
+ lineno++;
+ }
+
+ if (p == token_buffer + maxtoken)
+ p = extend_token_buffer (p);
+ *p++ = c;
+
+ skipnewline:
+ c = getch ();
+ if (c == EOF) {
+ error("Unterminated string");
+ break;
+ }
+ }
+ *p = 0;
+
+ /* We have read the entire constant.
+ Construct a STRING_CST for the result. */
+
+ if (wide_flag)
+ {
+ /* If this is a L"..." wide-string, convert the multibyte string
+ to a wide character string. */
+ char *widep = (char *) alloca ((p - token_buffer) * WCHAR_BYTES);
+ int len;
+
+#ifdef MULTIBYTE_CHARS
+ len = mbstowcs ((wchar_t *) widep, token_buffer + 1, p - token_buffer);
+ if (len < 0 || len >= (p - token_buffer))
+ {
+ warning ("Ignoring invalid multibyte string");
+ len = 0;
+ }
+ bzero (widep + (len * WCHAR_BYTES), WCHAR_BYTES);
+#else
+ {
+ union { long l; char c[sizeof (long)]; } u;
+ int big_endian;
+ char *wp, *cp;
+
+ /* Determine whether host is little or big endian. */
+ u.l = 1;
+ big_endian = u.c[sizeof (long) - 1];
+ wp = widep + (big_endian ? WCHAR_BYTES - 1 : 0);
+
+ bzero (widep, (p - token_buffer) * WCHAR_BYTES);
+ for (cp = token_buffer + 1; cp < p; cp++)
+ *wp = *cp, wp += WCHAR_BYTES;
+ len = p - token_buffer - 1;
+ }
+#endif
+ yylval.ttype = build_string ((len + 1) * WCHAR_BYTES, widep);
+ TREE_TYPE (yylval.ttype) = wchar_array_type_node;
+ }
+ else
+ {
+ yylval.ttype = build_string (p - token_buffer, token_buffer + 1);
+ TREE_TYPE (yylval.ttype) = char_array_type_node;
+ }
+
+ *p++ = '"';
+ *p = 0;
+
+ value = STRING; break;
+ }
+
+ case '+':
+ case '-':
+ case '&':
+ case '|':
+ case '<':
+ case '>':
+ case '*':
+ case '/':
+ case '%':
+ case '^':
+ case '!':
+ case '=':
+ {
+ register int c1;
+
+ combine:
+
+ switch (c)
+ {
+ case '+':
+ yylval.code = PLUS_EXPR; break;
+ case '-':
+ yylval.code = MINUS_EXPR; break;
+ case '&':
+ yylval.code = BIT_AND_EXPR; break;
+ case '|':
+ yylval.code = BIT_IOR_EXPR; break;
+ case '*':
+ yylval.code = MULT_EXPR; break;
+ case '/':
+ yylval.code = TRUNC_DIV_EXPR; break;
+ case '%':
+ yylval.code = TRUNC_MOD_EXPR; break;
+ case '^':
+ yylval.code = BIT_XOR_EXPR; break;
+ case LSHIFT:
+ yylval.code = LSHIFT_EXPR; break;
+ case RSHIFT:
+ yylval.code = RSHIFT_EXPR; break;
+ case '<':
+ yylval.code = LT_EXPR; break;
+ case '>':
+ yylval.code = GT_EXPR; break;
+ }
+
+ token_buffer[1] = c1 = getch ();
+ token_buffer[2] = 0;
+
+ if (c1 == '=')
+ {
+ switch (c)
+ {
+ case '<':
+ value = ARITHCOMPARE; yylval.code = LE_EXPR; goto done;
+ case '>':
+ value = ARITHCOMPARE; yylval.code = GE_EXPR; goto done;
+ case '!':
+ value = EQCOMPARE; yylval.code = NE_EXPR; goto done;
+ case '=':
+ value = EQCOMPARE; yylval.code = EQ_EXPR; goto done;
+ }
+ value = ASSIGN; goto done;
+ }
+ else if (c == c1)
+ switch (c)
+ {
+ case '+':
+ value = PLUSPLUS; goto done;
+ case '-':
+ value = MINUSMINUS; goto done;
+ case '&':
+ value = ANDAND; goto done;
+ case '|':
+ value = OROR; goto done;
+ case '<':
+ c = LSHIFT;
+ goto combine;
+ case '>':
+ c = RSHIFT;
+ goto combine;
+ }
+ else if ((c == '-') && (c1 == '>'))
+ {
+ nextchar = skip_white_space (getch ());
+ if (nextchar == '*')
+ {
+ nextchar = -1;
+ value = POINTSAT_STAR;
+ }
+ else
+ value = POINTSAT;
+ goto done;
+ }
+ else if (c1 == '?' && (c == '<' || c == '>'))
+ {
+ token_buffer[3] = 0;
+
+ c1 = getch ();
+ yylval.code = (c == '<' ? MIN_EXPR : MAX_EXPR);
+ if (c1 == '=')
+ {
+ /* <?= or >?= expression. */
+ token_buffer[2] = c1;
+ value = ASSIGN;
+ }
+ else
+ {
+ value = MIN_MAX;
+ nextchar = c1;
+ }
+ if (flag_ansi)
+ pedwarn ("use of `operator %s' is not standard C++",
+ token_buffer);
+ goto done;
+ }
+
+ nextchar = c1;
+ token_buffer[1] = 0;
+
+ value = c;
+ goto done;
+ }
+
+ case ':':
+ c = getch ();
+ if (c == ':')
+ {
+ token_buffer[1] = ':';
+ token_buffer[2] = '\0';
+ value = SCOPE;
+ yylval.itype = 1;
+ }
+ else
+ {
+ nextchar = c;
+ value = ':';
+ }
+ break;
+
+ case 0:
+ /* Don't make yyparse think this is eof. */
+ value = 1;
+ break;
+
+ case '(':
+ /* try, weakly, to handle casts to pointers to functions. */
+ nextchar = skip_white_space (getch ());
+ if (nextchar == '*')
+ {
+ int next_c = skip_white_space (getch ());
+ if (next_c == ')')
+ {
+ nextchar = -1;
+ yylval.ttype = build1 (INDIRECT_REF, 0, 0);
+ value = PAREN_STAR_PAREN;
+ }
+ else
+ {
+ put_back (next_c);
+ value = c;
+ }
+ }
+ else if (nextchar == ')')
+ {
+ nextchar = -1;
+ yylval.ttype = NULL_TREE;
+ value = LEFT_RIGHT;
+ }
+ else value = c;
+ break;
+
+ default:
+ value = c;
+ }
+
+done:
+/* yylloc.last_line = lineno; */
+#ifdef GATHER_STATISTICS
+ token_count[value] += 1;
+#endif
+
+ return value;
+}
+
+typedef enum
+{
+ d_kind, t_kind, s_kind, r_kind, e_kind, c_kind,
+ id_kind, op_id_kind, perm_list_kind, temp_list_kind,
+ vec_kind, x_kind, lang_decl, lang_type, all_kinds
+} tree_node_kind;
+extern int tree_node_counts[];
+extern int tree_node_sizes[];
+extern char *tree_node_kind_names[];
+
+/* Place to save freed lang_decls which were allocated on the
+ permanent_obstack. @@ Not currently used. */
+tree free_lang_decl_chain;
+
+tree
+build_lang_decl (code, name, type)
+ enum tree_code code;
+ tree name;
+ tree type;
+{
+ register tree t = build_decl (code, name, type);
+ struct obstack *obstack = current_obstack;
+ register int i = sizeof (struct lang_decl) / sizeof (int);
+ register int *pi;
+
+ if (! TREE_PERMANENT (t))
+ obstack = saveable_obstack;
+ else
+ /* Could be that saveable is permanent and current is not. */
+ obstack = &permanent_obstack;
+
+ if (free_lang_decl_chain && obstack == &permanent_obstack)
+ {
+ pi = (int *)free_lang_decl_chain;
+ free_lang_decl_chain = TREE_CHAIN (free_lang_decl_chain);
+ }
+ else
+ pi = (int *) obstack_alloc (obstack, sizeof (struct lang_decl));
+
+ while (i > 0)
+ pi[--i] = 0;
+
+ DECL_LANG_SPECIFIC (t) = (struct lang_decl *) pi;
+ LANG_DECL_PERMANENT ((struct lang_decl *) pi)
+ = obstack == &permanent_obstack;
+ my_friendly_assert (LANG_DECL_PERMANENT ((struct lang_decl *) pi)
+ == TREE_PERMANENT (t), 234);
+ DECL_MAIN_VARIANT (t) = t;
+ if (current_lang_name == lang_name_cplusplus)
+ {
+ DECL_LANGUAGE (t) = lang_cplusplus;
+#if 0
+#ifndef NO_AUTO_OVERLOAD
+ if (code == FUNCTION_DECL && name != 0
+ && ! (IDENTIFIER_LENGTH (name) == 4
+ && IDENTIFIER_POINTER (name)[0] == 'm'
+ && strcmp (IDENTIFIER_POINTER (name), "main") == 0)
+ && ! (IDENTIFIER_LENGTH (name) > 10
+ && IDENTIFIER_POINTER (name)[0] == '_'
+ && IDENTIFIER_POINTER (name)[1] == '_'
+ && strncmp (IDENTIFIER_POINTER (name)+2, "builtin_", 8) == 0))
+ TREE_OVERLOADED (name) = 1;
+#endif
+#endif
+ }
+ else if (current_lang_name == lang_name_c)
+ DECL_LANGUAGE (t) = lang_c;
+ else my_friendly_abort (64);
+
+#if 0 /* not yet, should get fixed properly later */
+ if (code == TYPE_DECL)
+ {
+ tree id;
+ id = get_identifier (build_overload_name (type, 1, 1));
+ DECL_ASSEMBLER_NAME (t) = id;
+ }
+
+#endif
+#ifdef GATHER_STATISTICS
+ tree_node_counts[(int)lang_decl] += 1;
+ tree_node_sizes[(int)lang_decl] += sizeof(struct lang_decl);
+#endif
+
+ return t;
+}
+
+tree
+build_lang_field_decl (code, name, type)
+ enum tree_code code;
+ tree name;
+ tree type;
+{
+ extern struct obstack *current_obstack, *saveable_obstack;
+ register tree t = build_decl (code, name, type);
+ struct obstack *obstack = current_obstack;
+ register int i = sizeof (struct lang_decl_flags) / sizeof (int);
+ register int *pi;
+#if 0 /* not yet, should get fixed properly later */
+
+ if (code == TYPE_DECL)
+ {
+ tree id;
+ id = get_identifier (build_overload_name (type, 1, 1));
+ DECL_ASSEMBLER_NAME (t) = id;
+ }
+#endif
+
+ if (! TREE_PERMANENT (t))
+ obstack = saveable_obstack;
+ else
+ my_friendly_assert (obstack == &permanent_obstack, 235);
+
+ pi = (int *) obstack_alloc (obstack, sizeof (struct lang_decl_flags));
+ while (i > 0)
+ pi[--i] = 0;
+
+ DECL_LANG_SPECIFIC (t) = (struct lang_decl *) pi;
+ return t;
+}
+
+void
+copy_lang_decl (node)
+ tree node;
+{
+ int size;
+ int *pi;
+
+ if (TREE_CODE (node) == FIELD_DECL)
+ size = sizeof (struct lang_decl_flags);
+ else
+ size = sizeof (struct lang_decl);
+ pi = (int *)obstack_alloc (&permanent_obstack, size);
+ bcopy ((char *)DECL_LANG_SPECIFIC (node), (char *)pi, size);
+ DECL_LANG_SPECIFIC (node) = (struct lang_decl *)pi;
+}
+
+tree
+make_lang_type (code)
+ enum tree_code code;
+{
+ extern struct obstack *current_obstack, *saveable_obstack;
+ register tree t = make_node (code);
+ struct obstack *obstack = current_obstack;
+ register int i = sizeof (struct lang_type) / sizeof (int);
+ register int *pi;
+
+ /* Set up some flags that give proper default behavior. */
+ IS_AGGR_TYPE (t) = 1;
+
+ if (! TREE_PERMANENT (t))
+ obstack = saveable_obstack;
+ else
+ my_friendly_assert (obstack == &permanent_obstack, 236);
+
+ pi = (int *) obstack_alloc (obstack, sizeof (struct lang_type));
+ while (i > 0)
+ pi[--i] = 0;
+
+ TYPE_LANG_SPECIFIC (t) = (struct lang_type *) pi;
+ CLASSTYPE_AS_LIST (t) = build_tree_list (NULL_TREE, t);
+ SET_CLASSTYPE_INTERFACE_UNKNOWN_X (t, interface_unknown);
+ CLASSTYPE_INTERFACE_ONLY (t) = interface_only;
+ CLASSTYPE_VBASE_SIZE (t) = integer_zero_node;
+ TYPE_BINFO (t) = make_binfo (integer_zero_node, t, NULL_TREE, NULL_TREE,
+ NULL_TREE);
+ CLASSTYPE_BINFO_AS_LIST (t) = build_tree_list (NULL_TREE, TYPE_BINFO (t));
+
+ /* Make sure this is laid out, for ease of use later.
+ In the presence of parse errors, the normal was of assuring
+ this might not ever get executed, so we lay it out *immediately*. */
+ build_pointer_type (t);
+
+#ifdef GATHER_STATISTICS
+ tree_node_counts[(int)lang_type] += 1;
+ tree_node_sizes[(int)lang_type] += sizeof(struct lang_type);
+#endif
+
+ return t;
+}
+
+void
+copy_decl_lang_specific (decl)
+ tree decl;
+{
+ extern struct obstack *current_obstack, *saveable_obstack;
+ register int *old = (int *)DECL_LANG_SPECIFIC (decl);
+ struct obstack *obstack = current_obstack;
+ register int i = sizeof (struct lang_decl) / sizeof (int);
+ register int *pi;
+
+ if (! TREE_PERMANENT (decl))
+ obstack = saveable_obstack;
+ else
+ my_friendly_assert (obstack == &permanent_obstack, 237);
+
+ pi = (int *) obstack_alloc (obstack, sizeof (struct lang_decl));
+ while (i-- > 0)
+ pi[i] = old[i];
+
+ DECL_LANG_SPECIFIC (decl) = (struct lang_decl *) pi;
+
+#ifdef GATHER_STATISTICS
+ tree_node_counts[(int)lang_decl] += 1;
+ tree_node_sizes[(int)lang_decl] += sizeof(struct lang_decl);
+#endif
+}
+
+void
+dump_time_statistics ()
+{
+ register tree prev = 0, decl, next;
+ int this_time = my_get_run_time ();
+ TREE_INT_CST_LOW (IDENTIFIER_LOCAL_VALUE (this_filename_time))
+ += this_time - body_time;
+
+ fprintf (stderr, "\n******\n");
+ print_time ("header files (total)", header_time);
+ print_time ("main file (total)", this_time - body_time);
+ fprintf (stderr, "ratio = %g : 1\n",
+ (double)header_time / (double)(this_time - body_time));
+ fprintf (stderr, "\n******\n");
+
+ for (decl = filename_times; decl; decl = next)
+ {
+ next = IDENTIFIER_GLOBAL_VALUE (decl);
+ IDENTIFIER_GLOBAL_VALUE (decl) = prev;
+ prev = decl;
+ }
+
+ for (decl = prev; decl; decl = IDENTIFIER_GLOBAL_VALUE (decl))
+ print_time (IDENTIFIER_POINTER (decl),
+ TREE_INT_CST_LOW (IDENTIFIER_LOCAL_VALUE (decl)));
+}
+
+void
+compiler_error (s, v, v2)
+ char *s;
+ HOST_WIDE_INT v, v2; /* @@also used as pointer */
+{
+ char buf[1024];
+ sprintf (buf, s, v, v2);
+ error_with_file_and_line (input_filename, lineno, "%s (compiler error)", buf);
+}
+
+void
+compiler_error_with_decl (decl, s)
+ tree decl;
+ char *s;
+{
+ char *name;
+ count_error (0);
+
+ report_error_function (0);
+
+ if (TREE_CODE (decl) == PARM_DECL)
+ fprintf (stderr, "%s:%d: ",
+ DECL_SOURCE_FILE (DECL_CONTEXT (decl)),
+ DECL_SOURCE_LINE (DECL_CONTEXT (decl)));
+ else
+ fprintf (stderr, "%s:%d: ",
+ DECL_SOURCE_FILE (decl), DECL_SOURCE_LINE (decl));
+
+ name = lang_printable_name (decl);
+ if (name)
+ fprintf (stderr, s, name);
+ else
+ fprintf (stderr, s, "((anonymous))");
+ fprintf (stderr, " (compiler error)\n");
+}
+
+void
+yyerror (string)
+ char *string;
+{
+ extern int end_of_file;
+ char buf[200];
+
+ strcpy (buf, string);
+
+ /* We can't print string and character constants well
+ because the token_buffer contains the result of processing escapes. */
+ if (end_of_file)
+ strcat (buf, input_redirected ()
+ ? " at end of saved text"
+ : " at end of input");
+ else if (token_buffer[0] == 0)
+ strcat (buf, " at null character");
+ else if (token_buffer[0] == '"')
+ strcat (buf, " before string constant");
+ else if (token_buffer[0] == '\'')
+ strcat (buf, " before character constant");
+ else if (token_buffer[0] < 040 || (unsigned char) token_buffer[0] >= 0177)
+ sprintf (buf + strlen (buf), " before character 0%o",
+ (unsigned char) token_buffer[0]);
+ else
+ strcat (buf, " before `%s'");
+
+ error (buf, token_buffer);
+}
diff --git a/gnu/usr.bin/cc/cc1plus/lex.h b/gnu/usr.bin/cc/cc1plus/lex.h
new file mode 100644
index 0000000..3da4635
--- /dev/null
+++ b/gnu/usr.bin/cc/cc1plus/lex.h
@@ -0,0 +1,130 @@
+/* Define constants and variables for communication with parse.y.
+ Copyright (C) 1987, 1992, 1993 Free Software Foundation, Inc.
+ Hacked by Michael Tiemann (tiemann@cygnus.com)
+ and by Brendan Kehoe (brendan@cygnus.com).
+
+This file is part of GNU CC.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY. No author or distributor
+accepts responsibility to anyone for the consequences of using it
+or for whether it serves any particular purpose or works at all,
+unless he says so in writing. Refer to the GNU CC General Public
+License for full details.
+
+Everyone is granted permission to copy, modify and redistribute
+GNU CC, but only under the conditions described in the
+GNU CC General Public License. A copy of this license is
+supposed to have been given to you along with GNU CC so you
+can know your rights and responsibilities. It should be in a
+file named COPYING. Among other things, the copyright notice
+and this notice must be preserved on all copies. */
+
+
+
+enum rid
+{
+ RID_UNUSED,
+ RID_INT,
+ RID_BOOL,
+ RID_CHAR,
+ RID_WCHAR,
+ RID_FLOAT,
+ RID_DOUBLE,
+ RID_VOID,
+
+ /* C++ extension */
+ RID_CLASS,
+ RID_RECORD,
+ RID_UNION,
+ RID_ENUM,
+ RID_LONGLONG,
+
+ /* This is where grokdeclarator starts its search when setting the specbits.
+ The first seven are in the order of most frequently used, as found
+ building libg++. */
+
+ RID_EXTERN,
+ RID_CONST,
+ RID_LONG,
+ RID_TYPEDEF,
+ RID_UNSIGNED,
+ RID_SHORT,
+ RID_INLINE,
+
+ RID_STATIC,
+
+ RID_REGISTER,
+ RID_VOLATILE,
+ RID_FRIEND,
+ RID_VIRTUAL,
+ RID_PUBLIC,
+ RID_PRIVATE,
+ RID_PROTECTED,
+ RID_SIGNED,
+ RID_EXCEPTION,
+ RID_RAISES,
+ RID_AUTO,
+ RID_MUTABLE,
+ RID_TEMPLATE,
+ RID_SIGNATURE,
+ /* Before adding enough to get up to 64, the RIDBIT_* macros
+ will have to be changed a little. */
+ RID_MAX
+};
+
+#define NORID RID_UNUSED
+
+#define RID_FIRST_MODIFIER RID_EXTERN
+
+/* The type that can represent all values of RIDBIT. */
+/* We assume that we can stick in at least 32 bits into this. */
+typedef struct { unsigned long idata[2]; }
+ RID_BIT_TYPE;
+
+/* Be careful, all these modify N twice. */
+#define RIDBIT_SETP(N, V) (((unsigned long)1 << (int) ((N)%32)) \
+ & (V).idata[(N)/32])
+#define RIDBIT_NOTSETP(NN, VV) (! RIDBIT_SETP (NN, VV))
+#define RIDBIT_SET(N, V) do { \
+ (V).idata[(N)/32] \
+ |= ((unsigned long)1 << (int) ((N)%32)); \
+ } while (0)
+#define RIDBIT_RESET(N, V) do { \
+ (V).idata[(N)/32] \
+ &= ~((unsigned long)1 << (int) ((N)%32)); \
+ } while (0)
+#define RIDBIT_RESET_ALL(V) do { \
+ (V).idata[0] = 0; \
+ (V).idata[1] = 0; \
+ } while (0)
+#define RIDBIT_ANY_SET(V) ((V).idata[0] || (V).idata[1])
+
+/* The elements of `ridpointers' are identifier nodes
+ for the reserved type names and storage classes.
+ It is indexed by a RID_... value. */
+extern tree ridpointers[(int) RID_MAX];
+
+/* the declaration found for the last IDENTIFIER token read in.
+ yylex must look this up to detect typedefs, which get token type TYPENAME,
+ so it is left around in case the identifier is not a typedef but is
+ used in a context which makes it a reference to a variable. */
+extern tree lastiddecl;
+
+extern char *token_buffer; /* Pointer to token buffer. */
+
+/* Back-door communication channel to the lexer. */
+extern int looking_for_typename;
+extern int looking_for_template;
+
+/* Tell the lexer where to look for names. */
+extern tree got_scope;
+
+/* Pending language change.
+ Positive is push count, negative is pop count. */
+extern int pending_lang_change;
+
+extern tree make_pointer_declarator (), make_reference_declarator ();
+extern void reinit_parse_for_function ();
+extern void reinit_parse_for_method ();
+extern int yylex ();
diff --git a/gnu/usr.bin/cc/cc1plus/method.c b/gnu/usr.bin/cc/cc1plus/method.c
new file mode 100644
index 0000000..f64a16a
--- /dev/null
+++ b/gnu/usr.bin/cc/cc1plus/method.c
@@ -0,0 +1,1948 @@
+/* Handle the hair of processing (but not expanding) inline functions.
+ Also manage function and variable name overloading.
+ Copyright (C) 1987, 1989, 1992, 1993 Free Software Foundation, Inc.
+ Contributed by Michael Tiemann (tiemann@cygnus.com)
+
+ This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+#ifndef PARM_CAN_BE_ARRAY_TYPE
+#define PARM_CAN_BE_ARRAY_TYPE 1
+#endif
+
+/* Handle method declarations. */
+#include <stdio.h>
+#include "config.h"
+#include "tree.h"
+#include "cp-tree.h"
+#include "class.h"
+#include "obstack.h"
+#include <ctype.h>
+#include "rtl.h"
+#include "expr.h"
+#include "output.h"
+#include "hard-reg-set.h"
+#include "flags.h"
+
+/* TREE_LIST of the current inline functions that need to be
+ processed. */
+struct pending_inline *pending_inlines;
+
+#define obstack_chunk_alloc xmalloc
+#define obstack_chunk_free free
+
+/* Obstack where we build text strings for overloading, etc. */
+static struct obstack scratch_obstack;
+static char *scratch_firstobj;
+
+# define OB_INIT() (scratch_firstobj ? (obstack_free (&scratch_obstack, scratch_firstobj), 0) : 0)
+# define OB_PUTC(C) (obstack_1grow (&scratch_obstack, (C)))
+# define OB_PUTC2(C1,C2) \
+ (obstack_1grow (&scratch_obstack, (C1)), obstack_1grow (&scratch_obstack, (C2)))
+# define OB_PUTS(S) (obstack_grow (&scratch_obstack, (S), sizeof (S) - 1))
+# define OB_PUTID(ID) \
+ (obstack_grow (&scratch_obstack, IDENTIFIER_POINTER (ID), \
+ IDENTIFIER_LENGTH (ID)))
+# define OB_PUTCP(S) (obstack_grow (&scratch_obstack, (S), strlen (S)))
+# define OB_FINISH() (obstack_1grow (&scratch_obstack, '\0'))
+
+#ifdef NO_AUTO_OVERLOAD
+int is_overloaded ();
+#endif
+
+void
+init_method ()
+{
+ gcc_obstack_init (&scratch_obstack);
+ scratch_firstobj = (char *)obstack_alloc (&scratch_obstack, 0);
+}
+
+/* This must be large enough to hold any printed integer or floating-point
+ value. */
+static char digit_buffer[128];
+
+/* Move inline function definitions out of structure so that they
+ can be processed normally. CNAME is the name of the class
+ we are working from, METHOD_LIST is the list of method lists
+ of the structure. We delete friend methods here, after
+ saving away their inline function definitions (if any). */
+
+void
+do_inline_function_hair (type, friend_list)
+ tree type, friend_list;
+{
+ tree method = TYPE_METHODS (type);
+
+ if (method && TREE_CODE (method) == TREE_VEC)
+ {
+ if (TREE_VEC_ELT (method, 0))
+ method = TREE_VEC_ELT (method, 0);
+ else
+ method = TREE_VEC_ELT (method, 1);
+ }
+
+ while (method)
+ {
+ /* Do inline member functions. */
+ struct pending_inline *info = DECL_PENDING_INLINE_INFO (method);
+ if (info)
+ {
+ tree args;
+
+ my_friendly_assert (info->fndecl == method, 238);
+ args = DECL_ARGUMENTS (method);
+ while (args)
+ {
+ DECL_CONTEXT (args) = method;
+ args = TREE_CHAIN (args);
+ }
+
+ /* Allow this decl to be seen in global scope. Don't do this for
+ local class methods, though. */
+ if (! current_function_decl)
+ IDENTIFIER_GLOBAL_VALUE (DECL_ASSEMBLER_NAME (method)) = method;
+ }
+ method = TREE_CHAIN (method);
+ }
+ while (friend_list)
+ {
+ tree fndecl = TREE_VALUE (friend_list);
+ struct pending_inline *info = DECL_PENDING_INLINE_INFO (fndecl);
+ if (info)
+ {
+ tree args;
+
+ my_friendly_assert (info->fndecl == fndecl, 239);
+ args = DECL_ARGUMENTS (fndecl);
+ while (args)
+ {
+ DECL_CONTEXT (args) = fndecl;
+ args = TREE_CHAIN (args);
+ }
+
+ /* Allow this decl to be seen in global scope */
+ if (! current_function_decl)
+ IDENTIFIER_GLOBAL_VALUE (DECL_ASSEMBLER_NAME (fndecl)) = fndecl;
+ }
+
+ friend_list = TREE_CHAIN (friend_list);
+ }
+}
+
+/* Report an argument type mismatch between the best declared function
+ we could find and the current argument list that we have. */
+void
+report_type_mismatch (cp, parmtypes, name_kind)
+ struct candidate *cp;
+ tree parmtypes;
+ char *name_kind;
+{
+ int i = cp->u.bad_arg;
+ tree ttf, tta;
+ char *tmp_firstobj;
+
+ switch (i)
+ {
+ case -4:
+ my_friendly_assert (TREE_CODE (cp->function) == TEMPLATE_DECL, 240);
+ cp_error ("type unification failed for function template `%#D'",
+ cp->function);
+ return;
+
+ case -3:
+ if (TYPE_READONLY (TREE_TYPE (TREE_VALUE (parmtypes))))
+ cp_error ("call to const %s `%#D' with non-const object", name_kind,
+ cp->function);
+ else
+ cp_error ("call to non-const %s `%#D' with const object", name_kind,
+ cp->function);
+ return;
+ case -2:
+ cp_error ("too few arguments for %s `%#D'", name_kind, cp->function);
+ return;
+ case -1:
+ cp_error ("too many arguments for %s `%#D'", name_kind, cp->function);
+ return;
+ case 0:
+ if (TREE_CODE (TREE_TYPE (cp->function)) == METHOD_TYPE)
+ {
+ /* Happens when we have an ambiguous base class. */
+ my_friendly_assert (get_binfo (DECL_CLASS_CONTEXT (cp->function),
+ TREE_TYPE (TREE_TYPE (TREE_VALUE (parmtypes))), 1) == error_mark_node,
+ 241);
+ return;
+ }
+ }
+
+ ttf = TYPE_ARG_TYPES (TREE_TYPE (cp->function));
+ tta = parmtypes;
+
+ while (i-- > 0)
+ {
+ ttf = TREE_CHAIN (ttf);
+ tta = TREE_CHAIN (tta);
+ }
+
+ OB_INIT ();
+ OB_PUTS ("bad argument ");
+ sprintf (digit_buffer, "%d", cp->u.bad_arg
+ - (TREE_CODE (TREE_TYPE (cp->function)) == METHOD_TYPE)
+ + 1);
+ OB_PUTCP (digit_buffer);
+
+ OB_PUTS (" for function `");
+ OB_PUTCP (decl_as_string (cp->function, 1));
+ OB_PUTS ("' (type was ");
+
+ /* Reset `i' so that type printing routines do the right thing. */
+ if (tta)
+ {
+ enum tree_code code = TREE_CODE (TREE_TYPE (TREE_VALUE (tta)));
+ if (code == ERROR_MARK)
+ OB_PUTS ("(failed type instantiation)");
+ else
+ {
+ i = (code == FUNCTION_TYPE || code == METHOD_TYPE);
+ OB_PUTCP (type_as_string (TREE_TYPE (TREE_VALUE (tta)), 1));
+ }
+ }
+ else OB_PUTS ("void");
+ OB_PUTC (')');
+ OB_FINISH ();
+
+ tmp_firstobj = (char *)alloca (obstack_object_size (&scratch_obstack));
+ bcopy (obstack_base (&scratch_obstack), tmp_firstobj,
+ obstack_object_size (&scratch_obstack));
+ error (tmp_firstobj);
+}
+
+/* Here is where overload code starts. */
+
+/* Array of types seen so far in top-level call to `build_overload_name'.
+ Allocated and deallocated by caller. */
+static tree *typevec;
+
+/* Number of types interned by `build_overload_name' so far. */
+static int maxtype;
+
+/* Number of occurrences of last type seen. */
+static int nrepeats;
+
+/* Nonzero if we should not try folding parameter types. */
+static int nofold;
+
+#define ALLOCATE_TYPEVEC(PARMTYPES) \
+ do { maxtype = 0, nrepeats = 0; \
+ typevec = (tree *)alloca (list_length (PARMTYPES) * sizeof (tree)); } while (0)
+
+#define DEALLOCATE_TYPEVEC(PARMTYPES) \
+ do { tree t = (PARMTYPES); \
+ while (t) { TREE_USED (TREE_VALUE (t)) = 0; t = TREE_CHAIN (t); } \
+ } while (0)
+
+/* Code to concatenate an asciified integer to a string. */
+static
+#ifdef __GNUC__
+__inline
+#endif
+void
+icat (i)
+ int i;
+{
+ /* Handle this case first, to go really quickly. For many common values,
+ the result of i/10 below is 1. */
+ if (i == 1)
+ {
+ OB_PUTC ('1');
+ return;
+ }
+
+ if (i < 0)
+ {
+ OB_PUTC ('m');
+ i = -i;
+ }
+ if (i < 10)
+ OB_PUTC ('0' + i);
+ else
+ {
+ icat (i / 10);
+ OB_PUTC ('0' + (i % 10));
+ }
+}
+
+static
+#ifdef __GNUC__
+__inline
+#endif
+void
+flush_repeats (type)
+ tree type;
+{
+ int tindex = 0;
+
+ while (typevec[tindex] != type)
+ tindex++;
+
+ if (nrepeats > 1)
+ {
+ OB_PUTC ('N');
+ icat (nrepeats);
+ if (nrepeats > 9)
+ OB_PUTC ('_');
+ }
+ else
+ OB_PUTC ('T');
+ nrepeats = 0;
+ icat (tindex);
+ if (tindex > 9)
+ OB_PUTC ('_');
+}
+
+static void build_overload_identifier ();
+
+static void
+build_overload_nested_name (context)
+ tree context;
+{
+ /* We use DECL_NAME here, because pushtag now sets the DECL_ASSEMBLER_NAME. */
+ tree name = DECL_NAME (context);
+ if (DECL_CONTEXT (context))
+ {
+ context = DECL_CONTEXT (context);
+ if (TREE_CODE_CLASS (TREE_CODE (context)) == 't')
+ context = TYPE_NAME (context);
+ build_overload_nested_name (context);
+ }
+ build_overload_identifier (name);
+}
+
+static void
+build_overload_value (type, value)
+ tree type, value;
+{
+ while (TREE_CODE (value) == NON_LVALUE_EXPR
+ || TREE_CODE (value) == NOP_EXPR)
+ value = TREE_OPERAND (value, 0);
+ my_friendly_assert (TREE_CODE (type) == PARM_DECL, 242);
+ type = TREE_TYPE (type);
+ switch (TREE_CODE (type))
+ {
+ case INTEGER_TYPE:
+ case ENUMERAL_TYPE:
+ {
+ my_friendly_assert (TREE_CODE (value) == INTEGER_CST, 243);
+ if (TYPE_PRECISION (value) == 2 * HOST_BITS_PER_WIDE_INT)
+ {
+ if (tree_int_cst_lt (value, integer_zero_node))
+ {
+ OB_PUTC ('m');
+ value = build_int_2 (~ TREE_INT_CST_LOW (value),
+ - TREE_INT_CST_HIGH (value));
+ }
+ if (TREE_INT_CST_HIGH (value)
+ != (TREE_INT_CST_LOW (value) >> (HOST_BITS_PER_WIDE_INT - 1)))
+ {
+ /* need to print a DImode value in decimal */
+ sorry ("conversion of long long as PT parameter");
+ }
+ /* else fall through to print in smaller mode */
+ }
+ /* Wordsize or smaller */
+ icat (TREE_INT_CST_LOW (value));
+ return;
+ }
+ case BOOLEAN_TYPE:
+ {
+ icat (TREE_INT_CST_LOW (value));
+ return;
+ }
+#ifndef REAL_IS_NOT_DOUBLE
+ case REAL_TYPE:
+ {
+ REAL_VALUE_TYPE val;
+ char *bufp = digit_buffer;
+ extern char *index ();
+
+ my_friendly_assert (TREE_CODE (value) == REAL_CST, 244);
+ val = TREE_REAL_CST (value);
+ if (val < 0)
+ {
+ val = -val;
+ *bufp++ = 'm';
+ }
+ sprintf (bufp, "%e", val);
+ bufp = (char *) index (bufp, 'e');
+ if (!bufp)
+ strcat (digit_buffer, "e0");
+ else
+ {
+ char *p;
+ bufp++;
+ if (*bufp == '-')
+ {
+ *bufp++ = 'm';
+ }
+ p = bufp;
+ if (*p == '+')
+ p++;
+ while (*p == '0')
+ p++;
+ if (*p == 0)
+ {
+ *bufp++ = '0';
+ *bufp = 0;
+ }
+ else if (p != bufp)
+ {
+ while (*p)
+ *bufp++ = *p++;
+ *bufp = 0;
+ }
+ }
+ OB_PUTCP (digit_buffer);
+ return;
+ }
+#endif
+ case POINTER_TYPE:
+ value = TREE_OPERAND (value, 0);
+ if (TREE_CODE (value) == VAR_DECL)
+ {
+ my_friendly_assert (DECL_NAME (value) != 0, 245);
+ build_overload_identifier (DECL_NAME (value));
+ return;
+ }
+ else if (TREE_CODE (value) == FUNCTION_DECL)
+ {
+ my_friendly_assert (DECL_NAME (value) != 0, 246);
+ build_overload_identifier (DECL_NAME (value));
+ return;
+ }
+ else
+ my_friendly_abort (71);
+ break; /* not really needed */
+
+ default:
+ sorry ("conversion of %s as template parameter",
+ tree_code_name [(int) TREE_CODE (type)]);
+ my_friendly_abort (72);
+ }
+}
+
+static void
+build_overload_identifier (name)
+ tree name;
+{
+ if (IDENTIFIER_TEMPLATE (name))
+ {
+ tree template, parmlist, arglist, tname;
+ int i, nparms;
+ template = IDENTIFIER_TEMPLATE (name);
+ arglist = TREE_VALUE (template);
+ template = TREE_PURPOSE (template);
+ tname = DECL_NAME (template);
+ parmlist = DECL_ARGUMENTS (template);
+ nparms = TREE_VEC_LENGTH (parmlist);
+ OB_PUTC ('t');
+ icat (IDENTIFIER_LENGTH (tname));
+ OB_PUTID (tname);
+ icat (nparms);
+ for (i = 0; i < nparms; i++)
+ {
+ tree parm = TREE_VEC_ELT (parmlist, i);
+ tree arg = TREE_VEC_ELT (arglist, i);
+ if (TREE_CODE (parm) == IDENTIFIER_NODE)
+ {
+ /* This parameter is a type. */
+ OB_PUTC ('Z');
+ build_overload_name (arg, 0, 0);
+ }
+ else
+ {
+ /* It's a PARM_DECL. */
+ build_overload_name (TREE_TYPE (parm), 0, 0);
+ build_overload_value (parm, arg);
+ }
+ }
+ }
+ else
+ {
+ icat (IDENTIFIER_LENGTH (name));
+ OB_PUTID (name);
+ }
+}
+
+/* Given a list of parameters in PARMTYPES, create an unambiguous
+ overload string. Should distinguish any type that C (or C++) can
+ distinguish. I.e., pointers to functions are treated correctly.
+
+ Caller must deal with whether a final `e' goes on the end or not.
+
+ Any default conversions must take place before this function
+ is called.
+
+ BEGIN and END control initialization and finalization of the
+ obstack where we build the string. */
+
+char *
+build_overload_name (parmtypes, begin, end)
+ tree parmtypes;
+ int begin, end;
+{
+ int just_one;
+ tree parmtype;
+
+ if (begin) OB_INIT ();
+
+ if ((just_one = (TREE_CODE (parmtypes) != TREE_LIST)))
+ {
+ parmtype = parmtypes;
+ goto only_one;
+ }
+
+ while (parmtypes)
+ {
+ parmtype = TREE_VALUE (parmtypes);
+
+ only_one:
+
+ if (! nofold)
+ {
+ if (! just_one)
+ /* Every argument gets counted. */
+ typevec[maxtype++] = parmtype;
+
+ if (TREE_USED (parmtype))
+ {
+ if (! just_one && parmtype == typevec[maxtype-2])
+ nrepeats++;
+ else
+ {
+ if (nrepeats)
+ flush_repeats (parmtype);
+ if (! just_one && TREE_CHAIN (parmtypes)
+ && parmtype == TREE_VALUE (TREE_CHAIN (parmtypes)))
+ nrepeats++;
+ else
+ {
+ int tindex = 0;
+
+ while (typevec[tindex] != parmtype)
+ tindex++;
+ OB_PUTC ('T');
+ icat (tindex);
+ if (tindex > 9)
+ OB_PUTC ('_');
+ }
+ }
+ goto next;
+ }
+ if (nrepeats)
+ flush_repeats (typevec[maxtype-2]);
+ if (! just_one
+ /* Only cache types which take more than one character. */
+ && (parmtype != TYPE_MAIN_VARIANT (parmtype)
+ || (TREE_CODE (parmtype) != INTEGER_TYPE
+ && TREE_CODE (parmtype) != REAL_TYPE)))
+ TREE_USED (parmtype) = 1;
+ }
+
+ if (TYPE_PTRMEMFUNC_P (parmtype))
+ parmtype = TYPE_PTRMEMFUNC_FN_TYPE (parmtype);
+
+ if (TREE_READONLY (parmtype))
+ OB_PUTC ('C');
+ if (TREE_CODE (parmtype) == INTEGER_TYPE
+ && TYPE_MAIN_VARIANT (parmtype) == unsigned_type (TYPE_MAIN_VARIANT (parmtype)))
+ OB_PUTC ('U');
+ if (TYPE_VOLATILE (parmtype))
+ OB_PUTC ('V');
+
+ switch (TREE_CODE (parmtype))
+ {
+ case OFFSET_TYPE:
+ OB_PUTC ('O');
+ build_overload_name (TYPE_OFFSET_BASETYPE (parmtype), 0, 0);
+ OB_PUTC ('_');
+ build_overload_name (TREE_TYPE (parmtype), 0, 0);
+ break;
+
+ case REFERENCE_TYPE:
+ OB_PUTC ('R');
+ goto more;
+
+ case ARRAY_TYPE:
+#if PARM_CAN_BE_ARRAY_TYPE
+ {
+ tree length;
+
+ OB_PUTC ('A');
+ if (TYPE_DOMAIN (parmtype) == NULL_TREE)
+ error ("pointer or reference to array of unknown bound in parm type");
+ else
+ {
+ length = array_type_nelts (parmtype);
+ if (TREE_CODE (length) == INTEGER_CST)
+ icat (TREE_INT_CST_LOW (length) + 1);
+ }
+ OB_PUTC ('_');
+ goto more;
+ }
+#else
+ OB_PUTC ('P');
+ goto more;
+#endif
+
+ case POINTER_TYPE:
+ OB_PUTC ('P');
+ more:
+ build_overload_name (TREE_TYPE (parmtype), 0, 0);
+ break;
+
+ case FUNCTION_TYPE:
+ case METHOD_TYPE:
+ {
+ tree firstarg = TYPE_ARG_TYPES (parmtype);
+ /* Otherwise have to implement reentrant typevecs,
+ unmark and remark types, etc. */
+ int old_nofold = nofold;
+ nofold = 1;
+
+ if (nrepeats)
+ flush_repeats (typevec[maxtype-1]);
+
+ /* @@ It may be possible to pass a function type in
+ which is not preceded by a 'P'. */
+ if (TREE_CODE (parmtype) == FUNCTION_TYPE)
+ {
+ OB_PUTC ('F');
+ if (firstarg == NULL_TREE)
+ OB_PUTC ('e');
+ else if (firstarg == void_list_node)
+ OB_PUTC ('v');
+ else
+ build_overload_name (firstarg, 0, 0);
+ }
+ else
+ {
+ int constp = TYPE_READONLY (TREE_TYPE (TREE_VALUE (firstarg)));
+ int volatilep = TYPE_VOLATILE (TREE_TYPE (TREE_VALUE (firstarg)));
+ OB_PUTC ('M');
+ firstarg = TREE_CHAIN (firstarg);
+
+ build_overload_name (TYPE_METHOD_BASETYPE (parmtype), 0, 0);
+ if (constp)
+ OB_PUTC ('C');
+ if (volatilep)
+ OB_PUTC ('V');
+
+ /* For cfront 2.0 compatibility. */
+ OB_PUTC ('F');
+
+ if (firstarg == NULL_TREE)
+ OB_PUTC ('e');
+ else if (firstarg == void_list_node)
+ OB_PUTC ('v');
+ else
+ build_overload_name (firstarg, 0, 0);
+ }
+
+ /* Separate args from return type. */
+ OB_PUTC ('_');
+ build_overload_name (TREE_TYPE (parmtype), 0, 0);
+ nofold = old_nofold;
+ break;
+ }
+
+ case INTEGER_TYPE:
+ parmtype = TYPE_MAIN_VARIANT (parmtype);
+ if (parmtype == integer_type_node
+ || parmtype == unsigned_type_node)
+ OB_PUTC ('i');
+ else if (parmtype == long_integer_type_node
+ || parmtype == long_unsigned_type_node)
+ OB_PUTC ('l');
+ else if (parmtype == short_integer_type_node
+ || parmtype == short_unsigned_type_node)
+ OB_PUTC ('s');
+ else if (parmtype == signed_char_type_node)
+ {
+ OB_PUTC ('S');
+ OB_PUTC ('c');
+ }
+ else if (parmtype == char_type_node
+ || parmtype == unsigned_char_type_node)
+ OB_PUTC ('c');
+ else if (parmtype == wchar_type_node)
+ OB_PUTC ('w');
+ else if (parmtype == long_long_integer_type_node
+ || parmtype == long_long_unsigned_type_node)
+ OB_PUTC ('x');
+#if 0
+ /* it would seem there is no way to enter these in source code,
+ yet. (mrs) */
+ else if (parmtype == long_long_long_integer_type_node
+ || parmtype == long_long_long_unsigned_type_node)
+ OB_PUTC ('q');
+#endif
+ else
+ my_friendly_abort (73);
+ break;
+
+ case BOOLEAN_TYPE:
+ OB_PUTC ('b');
+ break;
+
+ case REAL_TYPE:
+ parmtype = TYPE_MAIN_VARIANT (parmtype);
+ if (parmtype == long_double_type_node)
+ OB_PUTC ('r');
+ else if (parmtype == double_type_node)
+ OB_PUTC ('d');
+ else if (parmtype == float_type_node)
+ OB_PUTC ('f');
+ else my_friendly_abort (74);
+ break;
+
+ case VOID_TYPE:
+ if (! just_one)
+ {
+#if 0
+ extern tree void_list_node;
+
+ /* See if anybody is wasting memory. */
+ my_friendly_assert (parmtypes == void_list_node, 247);
+#endif
+ /* This is the end of a parameter list. */
+ if (end) OB_FINISH ();
+ return (char *)obstack_base (&scratch_obstack);
+ }
+ OB_PUTC ('v');
+ break;
+
+ case ERROR_MARK: /* not right, but nothing is anyway */
+ break;
+
+ /* have to do these */
+ case UNION_TYPE:
+ case RECORD_TYPE:
+ if (! just_one)
+ /* Make this type signature look incompatible
+ with AT&T. */
+ OB_PUTC ('G');
+ goto common;
+ case ENUMERAL_TYPE:
+ common:
+ {
+ tree name = TYPE_NAME (parmtype);
+ int i = 1;
+
+ if (TREE_CODE (name) == TYPE_DECL)
+ {
+ tree context = name;
+ while (DECL_CONTEXT (context))
+ {
+ i += 1;
+ context = DECL_CONTEXT (context);
+ if (TREE_CODE_CLASS (TREE_CODE (context)) == 't')
+ context = TYPE_NAME (context);
+ }
+ name = DECL_NAME (name);
+ }
+ my_friendly_assert (TREE_CODE (name) == IDENTIFIER_NODE, 248);
+ if (i > 1)
+ {
+ OB_PUTC ('Q');
+ if (i > 9)
+ OB_PUTC ('_');
+ icat (i);
+ if (i > 9)
+ OB_PUTC ('_');
+ build_overload_nested_name (TYPE_NAME (parmtype));
+ }
+ else
+ build_overload_identifier (name);
+ break;
+ }
+
+ case UNKNOWN_TYPE:
+ /* This will take some work. */
+ OB_PUTC ('?');
+ break;
+
+ case TEMPLATE_TYPE_PARM:
+ case TEMPLATE_CONST_PARM:
+ case UNINSTANTIATED_P_TYPE:
+ /* We don't ever want this output, but it's inconvenient not to
+ be able to build the string. This should cause assembler
+ errors we'll notice. */
+ {
+ static int n;
+ sprintf (digit_buffer, " *%d", n++);
+ OB_PUTCP (digit_buffer);
+ }
+ break;
+
+ default:
+ my_friendly_abort (75);
+ }
+
+ next:
+ if (just_one) break;
+ parmtypes = TREE_CHAIN (parmtypes);
+ }
+ if (! just_one)
+ {
+ if (nrepeats)
+ flush_repeats (typevec[maxtype-1]);
+
+ /* To get here, parms must end with `...'. */
+ OB_PUTC ('e');
+ }
+
+ if (end) OB_FINISH ();
+ return (char *)obstack_base (&scratch_obstack);
+}
+
+/* Generate an identifier that encodes the (ANSI) exception TYPE. */
+
+/* This should be part of `ansi_opname', or at least be defined by the std. */
+#define EXCEPTION_NAME_PREFIX "__ex"
+#define EXCEPTION_NAME_LENGTH 4
+
+tree
+cplus_exception_name (type)
+ tree type;
+{
+ OB_INIT ();
+ OB_PUTS (EXCEPTION_NAME_PREFIX);
+ return get_identifier (build_overload_name (type, 0, 1));
+}
+
+/* Change the name of a function definition so that it may be
+ overloaded. NAME is the name of the function to overload,
+ PARMS is the parameter list (which determines what name the
+ final function obtains).
+
+ FOR_METHOD is 1 if this overload is being performed
+ for a method, rather than a function type. It is 2 if
+ this overload is being performed for a constructor. */
+tree
+build_decl_overload (dname, parms, for_method)
+ tree dname;
+ tree parms;
+ int for_method;
+{
+ char *name = IDENTIFIER_POINTER (dname);
+
+ /* member operators new and delete look like methods at this point. */
+ if (! for_method && parms != NULL_TREE && TREE_CODE (parms) == TREE_LIST)
+ {
+ if (TREE_VALUE (parms) == sizetype
+ && TREE_CHAIN (parms) == void_list_node)
+ {
+ if (dname == ansi_opname[(int) NEW_EXPR])
+ return get_identifier ("__builtin_new");
+ else if (dname == ansi_opname[(int) VEC_NEW_EXPR])
+ return get_identifier ("__builtin_vec_new");
+ }
+ else if (dname == ansi_opname[(int) DELETE_EXPR])
+ return get_identifier ("__builtin_delete");
+ else if (dname == ansi_opname[(int) VEC_DELETE_EXPR])
+ return get_identifier ("__builtin_vec_delete");
+ }
+
+ OB_INIT ();
+ if (for_method != 2)
+ OB_PUTCP (name);
+ /* Otherwise, we can divine that this is a constructor,
+ and figure out its name without any extra encoding. */
+
+ OB_PUTC2 ('_', '_');
+ if (for_method)
+ {
+#if 0
+ /* We can get away without doing this. */
+ OB_PUTC ('M');
+#endif
+ {
+ tree this_type = TREE_VALUE (parms);
+
+ if (TREE_CODE (this_type) == RECORD_TYPE) /* a signature pointer */
+ parms = temp_tree_cons (NULL_TREE, SIGNATURE_TYPE (this_type),
+ TREE_CHAIN (parms));
+ else
+ parms = temp_tree_cons (NULL_TREE, TREE_TYPE (this_type),
+ TREE_CHAIN (parms));
+ }
+ }
+ else
+ OB_PUTC ('F');
+
+ if (parms == NULL_TREE)
+ OB_PUTC2 ('e', '\0');
+ else if (parms == void_list_node)
+ OB_PUTC2 ('v', '\0');
+ else
+ {
+ ALLOCATE_TYPEVEC (parms);
+ nofold = 0;
+ if (for_method)
+ {
+ build_overload_name (TREE_VALUE (parms), 0, 0);
+
+ typevec[maxtype++] = TREE_VALUE (parms);
+ TREE_USED (TREE_VALUE (parms)) = 1;
+
+ if (TREE_CHAIN (parms))
+ build_overload_name (TREE_CHAIN (parms), 0, 1);
+ else
+ OB_PUTC2 ('e', '\0');
+ }
+ else
+ build_overload_name (parms, 0, 1);
+ DEALLOCATE_TYPEVEC (parms);
+ }
+ {
+ tree n = get_identifier (obstack_base (&scratch_obstack));
+ if (IDENTIFIER_OPNAME_P (dname))
+ IDENTIFIER_OPNAME_P (n) = 1;
+ return n;
+ }
+}
+
+/* Build an overload name for the type expression TYPE. */
+tree
+build_typename_overload (type)
+ tree type;
+{
+ tree id;
+
+ OB_INIT ();
+ OB_PUTID (ansi_opname[(int) TYPE_EXPR]);
+ nofold = 1;
+ build_overload_name (type, 0, 1);
+ id = get_identifier (obstack_base (&scratch_obstack));
+ IDENTIFIER_OPNAME_P (id) = 1;
+#if 0
+ IDENTIFIER_GLOBAL_VALUE (id) = TYPE_NAME (type);
+#endif
+ TREE_TYPE (id) = type;
+ return id;
+}
+
+#ifndef NO_DOLLAR_IN_LABEL
+#define T_DESC_FORMAT "TD$"
+#define I_DESC_FORMAT "ID$"
+#define M_DESC_FORMAT "MD$"
+#else
+#if !defined(NO_DOT_IN_LABEL)
+#define T_DESC_FORMAT "TD."
+#define I_DESC_FORMAT "ID."
+#define M_DESC_FORMAT "MD."
+#else
+#define T_DESC_FORMAT "__t_desc_"
+#define I_DESC_FORMAT "__i_desc_"
+#define M_DESC_FORMAT "__m_desc_"
+#endif
+#endif
+
+/* Build an overload name for the type expression TYPE. */
+tree
+build_t_desc_overload (type)
+ tree type;
+{
+ OB_INIT ();
+ OB_PUTS (T_DESC_FORMAT);
+ nofold = 1;
+
+#if 0
+ /* Use a different format if the type isn't defined yet. */
+ if (TYPE_SIZE (type) == NULL_TREE)
+ {
+ char *p;
+ int changed;
+
+ for (p = tname; *p; p++)
+ if (isupper (*p))
+ {
+ changed = 1;
+ *p = tolower (*p);
+ }
+ /* If there's no change, we have an inappropriate T_DESC_FORMAT. */
+ my_friendly_assert (changed != 0, 249);
+ }
+#endif
+
+ build_overload_name (type, 0, 1);
+ return get_identifier (obstack_base (&scratch_obstack));
+}
+
+/* Top-level interface to explicit overload requests. Allow NAME
+ to be overloaded. Error if NAME is already declared for the current
+ scope. Warning if function is redundantly overloaded. */
+
+void
+declare_overloaded (name)
+ tree name;
+{
+#ifdef NO_AUTO_OVERLOAD
+ if (is_overloaded (name))
+ warning ("function `%s' already declared overloaded",
+ IDENTIFIER_POINTER (name));
+ else if (IDENTIFIER_GLOBAL_VALUE (name))
+ error ("overloading function `%s' that is already defined",
+ IDENTIFIER_POINTER (name));
+ else
+ {
+ TREE_OVERLOADED (name) = 1;
+ IDENTIFIER_GLOBAL_VALUE (name) = build_tree_list (name, NULL_TREE);
+ TREE_TYPE (IDENTIFIER_GLOBAL_VALUE (name)) = unknown_type_node;
+ }
+#else
+ if (current_lang_name == lang_name_cplusplus)
+ {
+ if (0)
+ warning ("functions are implicitly overloaded in C++");
+ }
+ else if (current_lang_name == lang_name_c)
+ error ("overloading function `%s' cannot be done in C language context");
+ else
+ my_friendly_abort (76);
+#endif
+}
+
+#ifdef NO_AUTO_OVERLOAD
+/* Check to see if NAME is overloaded. For first approximation,
+ check to see if its TREE_OVERLOADED is set. This is used on
+ IDENTIFIER nodes. */
+int
+is_overloaded (name)
+ tree name;
+{
+ /* @@ */
+ return (TREE_OVERLOADED (name)
+ && (! IDENTIFIER_CLASS_VALUE (name) || current_class_type == 0)
+ && ! IDENTIFIER_LOCAL_VALUE (name));
+}
+#endif
+
+/* Given a tree_code CODE, and some arguments (at least one),
+ attempt to use an overloaded operator on the arguments.
+
+ For unary operators, only the first argument need be checked.
+ For binary operators, both arguments may need to be checked.
+
+ Member functions can convert class references to class pointers,
+ for one-level deep indirection. More than that is not supported.
+ Operators [](), ()(), and ->() must be member functions.
+
+ We call function call building calls with LOOKUP_COMPLAIN if they
+ are our only hope. This is true when we see a vanilla operator
+ applied to something of aggregate type. If this fails, we are free
+ to return `error_mark_node', because we will have reported the
+ error.
+
+ Operators NEW and DELETE overload in funny ways: operator new takes
+ a single `size' parameter, and operator delete takes a pointer to the
+ storage being deleted. When overloading these operators, success is
+ assumed. If there is a failure, report an error message and return
+ `error_mark_node'. */
+
+/* NOSTRICT */
+tree
+build_opfncall (code, flags, xarg1, xarg2, arg3)
+ enum tree_code code;
+ int flags;
+ tree xarg1, xarg2, arg3;
+{
+ tree rval = 0;
+ tree arg1, arg2;
+ tree type1, type2, fnname;
+ tree fields1 = 0, parms = 0;
+ tree global_fn;
+ int try_second;
+ int binary_is_unary;
+
+ if (xarg1 == error_mark_node)
+ return error_mark_node;
+
+ if (code == COND_EXPR)
+ {
+ if (TREE_CODE (xarg2) == ERROR_MARK
+ || TREE_CODE (arg3) == ERROR_MARK)
+ return error_mark_node;
+ }
+ if (code == COMPONENT_REF)
+ if (TREE_CODE (TREE_TYPE (xarg1)) == POINTER_TYPE)
+ return rval;
+
+ /* First, see if we can work with the first argument */
+ type1 = TREE_TYPE (xarg1);
+
+ /* Some tree codes have length > 1, but we really only want to
+ overload them if their first argument has a user defined type. */
+ switch (code)
+ {
+ case PREINCREMENT_EXPR:
+ case PREDECREMENT_EXPR:
+ case POSTINCREMENT_EXPR:
+ case POSTDECREMENT_EXPR:
+ case COMPONENT_REF:
+ binary_is_unary = 1;
+ try_second = 0;
+ break;
+
+ /* ARRAY_REFs and CALL_EXPRs must overload successfully.
+ If they do not, return error_mark_node instead of NULL_TREE. */
+ case ARRAY_REF:
+ if (xarg2 == error_mark_node)
+ return error_mark_node;
+ case CALL_EXPR:
+ rval = error_mark_node;
+ binary_is_unary = 0;
+ try_second = 0;
+ break;
+
+ case VEC_NEW_EXPR:
+ case NEW_EXPR:
+ {
+ tree args = tree_cons (NULL_TREE, xarg2, arg3);
+ fnname = ansi_opname[(int) code];
+ if (flags & LOOKUP_GLOBAL)
+ return build_overload_call (fnname, args, flags & LOOKUP_COMPLAIN,
+ (struct candidate *)0);
+
+ rval = build_method_call
+ (build_indirect_ref (build1 (NOP_EXPR, xarg1, error_mark_node),
+ "new"),
+ fnname, args, NULL_TREE, flags);
+ if (rval == error_mark_node)
+ /* User might declare fancy operator new, but invoke it
+ like standard one. */
+ return rval;
+
+ TREE_TYPE (rval) = xarg1;
+ TREE_CALLS_NEW (rval) = 1;
+ return rval;
+ }
+ break;
+
+ case VEC_DELETE_EXPR:
+ case DELETE_EXPR:
+ {
+ fnname = ansi_opname[(int) code];
+ if (flags & LOOKUP_GLOBAL)
+ return build_overload_call (fnname,
+ build_tree_list (NULL_TREE, xarg1),
+ flags & LOOKUP_COMPLAIN,
+ (struct candidate *)0);
+
+ rval = build_method_call
+ (build_indirect_ref (build1 (NOP_EXPR, TREE_TYPE (xarg1),
+ error_mark_node),
+ NULL_PTR),
+ fnname, tree_cons (NULL_TREE, xarg1,
+ build_tree_list (NULL_TREE, xarg2)),
+ NULL_TREE, flags);
+ /* This happens when the user mis-declares `operator delete'.
+ Should now be impossible. */
+ my_friendly_assert (rval != error_mark_node, 250);
+ TREE_TYPE (rval) = void_type_node;
+ return rval;
+ }
+ break;
+
+ default:
+ binary_is_unary = 0;
+ try_second = tree_code_length [(int) code] == 2;
+ if (try_second && xarg2 == error_mark_node)
+ return error_mark_node;
+ break;
+ }
+
+ if (try_second && xarg2 == error_mark_node)
+ return error_mark_node;
+
+ /* What ever it was, we do not know how to deal with it. */
+ if (type1 == NULL_TREE)
+ return rval;
+
+ if (TREE_CODE (type1) == OFFSET_TYPE)
+ type1 = TREE_TYPE (type1);
+
+ if (TREE_CODE (type1) == REFERENCE_TYPE)
+ {
+ arg1 = convert_from_reference (xarg1);
+ type1 = TREE_TYPE (arg1);
+ }
+ else
+ {
+ arg1 = xarg1;
+ }
+
+ if (!IS_AGGR_TYPE (type1) || TYPE_PTRMEMFUNC_P (type1))
+ {
+ /* Try to fail. First, fail if unary */
+ if (! try_second)
+ return rval;
+ /* Second, see if second argument is non-aggregate. */
+ type2 = TREE_TYPE (xarg2);
+ if (TREE_CODE (type2) == OFFSET_TYPE)
+ type2 = TREE_TYPE (type2);
+ if (TREE_CODE (type2) == REFERENCE_TYPE)
+ {
+ arg2 = convert_from_reference (xarg2);
+ type2 = TREE_TYPE (arg2);
+ }
+ else
+ {
+ arg2 = xarg2;
+ }
+
+ if (!IS_AGGR_TYPE (type2))
+ return rval;
+ try_second = 0;
+ }
+
+ if (try_second)
+ {
+ /* First arg may succeed; see whether second should. */
+ type2 = TREE_TYPE (xarg2);
+ if (TREE_CODE (type2) == OFFSET_TYPE)
+ type2 = TREE_TYPE (type2);
+ if (TREE_CODE (type2) == REFERENCE_TYPE)
+ {
+ arg2 = convert_from_reference (xarg2);
+ type2 = TREE_TYPE (arg2);
+ }
+ else
+ {
+ arg2 = xarg2;
+ }
+
+ if (! IS_AGGR_TYPE (type2))
+ try_second = 0;
+ }
+
+ if (type1 == unknown_type_node
+ || (try_second && TREE_TYPE (xarg2) == unknown_type_node))
+ {
+ /* This will not be implemented in the foreseeable future. */
+ return rval;
+ }
+
+ if (code == MODIFY_EXPR)
+ fnname = ansi_assopname[(int) TREE_CODE (arg3)];
+ else
+ fnname = ansi_opname[(int) code];
+
+ global_fn = lookup_name_nonclass (fnname);
+
+ /* This is the last point where we will accept failure. This
+ may be too eager if we wish an overloaded operator not to match,
+ but would rather a normal operator be called on a type-converted
+ argument. */
+
+ if (IS_AGGR_TYPE (type1))
+ {
+ fields1 = lookup_fnfields (TYPE_BINFO (type1), fnname, 0);
+ /* ARM $13.4.7, prefix/postfix ++/--. */
+ if (code == POSTINCREMENT_EXPR || code == POSTDECREMENT_EXPR)
+ {
+ xarg2 = integer_zero_node;
+ binary_is_unary = 0;
+
+ if (fields1)
+ {
+ tree t, t2;
+ int have_postfix = 0;
+
+ /* Look for an `operator++ (int)'. If they didn't have
+ one, then we fall back to the old way of doing things. */
+ for (t = TREE_VALUE (fields1); t ; t = TREE_CHAIN (t))
+ {
+ t2 = TYPE_ARG_TYPES (TREE_TYPE (t));
+ if (TREE_CHAIN (t2) != NULL_TREE
+ && TREE_VALUE (TREE_CHAIN (t2)) == integer_type_node)
+ {
+ have_postfix = 1;
+ break;
+ }
+ }
+
+ if (! have_postfix)
+ {
+ char *op = POSTINCREMENT_EXPR ? "++" : "--";
+
+ /* There's probably a LOT of code in the world that
+ relies upon this old behavior. So we'll only give this
+ warning when we've been given -pedantic. A few
+ releases after 2.4, we'll convert this to be a pedwarn
+ or something else more appropriate. */
+ if (pedantic)
+ warning ("no `operator%s (int)' declared for postfix `%s'",
+ op, op);
+ xarg2 = NULL_TREE;
+ binary_is_unary = 1;
+ }
+ }
+ }
+ }
+
+ if (fields1 == NULL_TREE && global_fn == NULL_TREE)
+ return rval;
+
+ /* If RVAL winds up being `error_mark_node', we will return
+ that... There is no way that normal semantics of these
+ operators will succeed. */
+
+ /* This argument may be an uncommitted OFFSET_REF. This is
+ the case for example when dealing with static class members
+ which are referenced from their class name rather than
+ from a class instance. */
+ if (TREE_CODE (xarg1) == OFFSET_REF
+ && TREE_CODE (TREE_OPERAND (xarg1, 1)) == VAR_DECL)
+ xarg1 = TREE_OPERAND (xarg1, 1);
+ if (try_second && xarg2 && TREE_CODE (xarg2) == OFFSET_REF
+ && TREE_CODE (TREE_OPERAND (xarg2, 1)) == VAR_DECL)
+ xarg2 = TREE_OPERAND (xarg2, 1);
+
+ if (global_fn)
+ flags |= LOOKUP_GLOBAL;
+
+ if (code == CALL_EXPR)
+ {
+ /* This can only be a member function. */
+ return build_method_call (xarg1, fnname, xarg2,
+ NULL_TREE, LOOKUP_NORMAL);
+ }
+ else if (tree_code_length[(int) code] == 1 || binary_is_unary)
+ {
+ parms = NULL_TREE;
+ rval = build_method_call (xarg1, fnname, NULL_TREE, NULL_TREE, flags);
+ }
+ else if (code == COND_EXPR)
+ {
+ parms = tree_cons (0, xarg2, build_tree_list (NULL_TREE, arg3));
+ rval = build_method_call (xarg1, fnname, parms, NULL_TREE, flags);
+ }
+ else if (code == METHOD_CALL_EXPR)
+ {
+ /* must be a member function. */
+ parms = tree_cons (NULL_TREE, xarg2, arg3);
+ return build_method_call (xarg1, fnname, parms, NULL_TREE,
+ LOOKUP_NORMAL);
+ }
+ else if (fields1)
+ {
+ parms = build_tree_list (NULL_TREE, xarg2);
+ rval = build_method_call (xarg1, fnname, parms, NULL_TREE, flags);
+ }
+ else
+ {
+ parms = tree_cons (NULL_TREE, xarg1,
+ build_tree_list (NULL_TREE, xarg2));
+ rval = build_overload_call (fnname, parms, flags,
+ (struct candidate *)0);
+ }
+
+ return rval;
+}
+
+/* This function takes an identifier, ID, and attempts to figure out what
+ it means. There are a number of possible scenarios, presented in increasing
+ order of hair:
+
+ 1) not in a class's scope
+ 2) in class's scope, member name of the class's method
+ 3) in class's scope, but not a member name of the class
+ 4) in class's scope, member name of a class's variable
+
+ NAME is $1 from the bison rule. It is an IDENTIFIER_NODE.
+ VALUE is $$ from the bison rule. It is the value returned by lookup_name ($1)
+ yychar is the pending input character (suitably encoded :-).
+
+ As a last ditch, try to look up the name as a label and return that
+ address.
+
+ Values which are declared as being of REFERENCE_TYPE are
+ automatically dereferenced here (as a hack to make the
+ compiler faster). */
+
+tree
+hack_identifier (value, name, yychar)
+ tree value, name;
+ int yychar;
+{
+ tree type;
+
+ if (TREE_CODE (value) == ERROR_MARK)
+ {
+ if (current_class_name)
+ {
+ tree fields = lookup_fnfields (TYPE_BINFO (current_class_type), name, 1);
+ if (fields == error_mark_node)
+ return error_mark_node;
+ if (fields)
+ {
+ tree fndecl;
+
+ fndecl = TREE_VALUE (fields);
+ my_friendly_assert (TREE_CODE (fndecl) == FUNCTION_DECL, 251);
+ if (DECL_CHAIN (fndecl) == NULL_TREE)
+ {
+ warning ("methods cannot be converted to function pointers");
+ return fndecl;
+ }
+ else
+ {
+ error ("ambiguous request for method pointer `%s'",
+ IDENTIFIER_POINTER (name));
+ return error_mark_node;
+ }
+ }
+ }
+ if (flag_labels_ok && IDENTIFIER_LABEL_VALUE (name))
+ {
+ return IDENTIFIER_LABEL_VALUE (name);
+ }
+ return error_mark_node;
+ }
+
+ type = TREE_TYPE (value);
+ if (TREE_CODE (value) == FIELD_DECL)
+ {
+ if (current_class_decl == NULL_TREE)
+ {
+ error ("request for member `%s' in static member function",
+ IDENTIFIER_POINTER (DECL_NAME (value)));
+ return error_mark_node;
+ }
+ TREE_USED (current_class_decl) = 1;
+ if (yychar == '(')
+ if (! ((TYPE_LANG_SPECIFIC (type)
+ && TYPE_OVERLOADS_CALL_EXPR (type))
+ || (TREE_CODE (type) == REFERENCE_TYPE
+ && TYPE_LANG_SPECIFIC (TREE_TYPE (type))
+ && TYPE_OVERLOADS_CALL_EXPR (TREE_TYPE (type))))
+ && TREE_CODE (type) != FUNCTION_TYPE
+ && TREE_CODE (type) != METHOD_TYPE
+ && !TYPE_PTRMEMFUNC_P (type)
+ && (TREE_CODE (type) != POINTER_TYPE
+ || (TREE_CODE (TREE_TYPE (type)) != FUNCTION_TYPE
+ && TREE_CODE (TREE_TYPE (type)) != METHOD_TYPE)))
+ {
+ error ("component `%s' is not a method",
+ IDENTIFIER_POINTER (name));
+ return error_mark_node;
+ }
+ /* Mark so that if we are in a constructor, and then find that
+ this field was initialized by a base initializer,
+ we can emit an error message. */
+ TREE_USED (value) = 1;
+ return build_component_ref (C_C_D, name, 0, 1);
+ }
+
+ if (really_overloaded_fn (value))
+ {
+ tree t = get_first_fn (value);
+ while (t)
+ {
+ assemble_external (t);
+ TREE_USED (t) = 1;
+ t = DECL_CHAIN (t);
+ }
+ }
+ else if (TREE_CODE (value) == TREE_LIST)
+ {
+ tree t = value;
+ while (t && TREE_CODE (t) == TREE_LIST)
+ {
+ assemble_external (TREE_VALUE (t));
+ TREE_USED (t) = 1;
+ t = TREE_CHAIN (t);
+ }
+ }
+ else
+ {
+ assemble_external (value);
+ TREE_USED (value) = 1;
+ }
+
+ if (TREE_CODE_CLASS (TREE_CODE (value)) == 'd' && DECL_NONLOCAL (value))
+ {
+ if (DECL_LANG_SPECIFIC (value)
+ && DECL_CLASS_CONTEXT (value) != current_class_type)
+ {
+ tree path;
+ enum access_type access;
+ register tree context
+ = (TREE_CODE (value) == FUNCTION_DECL && DECL_VIRTUAL_P (value))
+ ? DECL_CLASS_CONTEXT (value)
+ : DECL_CONTEXT (value);
+
+ get_base_distance (context, current_class_type, 0, &path);
+ if (path)
+ {
+ access = compute_access (path, value);
+ if (access != access_public)
+ {
+ if (TREE_CODE (value) == VAR_DECL)
+ error ("static member `%s' is %s",
+ IDENTIFIER_POINTER (name),
+ TREE_PRIVATE (value) ? "private" :
+ "from a private base class");
+ else
+ error ("enum `%s' is from private base class",
+ IDENTIFIER_POINTER (name));
+ return error_mark_node;
+ }
+ }
+ }
+ return value;
+ }
+ if (TREE_CODE (value) == TREE_LIST && TREE_NONLOCAL_FLAG (value))
+ {
+ if (type == 0)
+ {
+ error ("request for member `%s' is ambiguous in multiple inheritance lattice",
+ IDENTIFIER_POINTER (name));
+ return error_mark_node;
+ }
+
+ return value;
+ }
+
+ if (TREE_CODE (type) == REFERENCE_TYPE)
+ {
+ my_friendly_assert (TREE_CODE (value) == VAR_DECL
+ || TREE_CODE (value) == PARM_DECL
+ || TREE_CODE (value) == RESULT_DECL, 252);
+ if (DECL_REFERENCE_SLOT (value))
+ return DECL_REFERENCE_SLOT (value);
+ }
+ return value;
+}
+
+
+#if 0
+/* Given an object OF, and a type conversion operator COMPONENT
+ build a call to the conversion operator, if a call is requested,
+ or return the address (as a pointer to member function) if one is not.
+
+ OF can be a TYPE_DECL or any kind of datum that would normally
+ be passed to `build_component_ref'. It may also be NULL_TREE,
+ in which case `current_class_type' and `current_class_decl'
+ provide default values.
+
+ BASETYPE_PATH, if non-null, is the path of basetypes
+ to go through before we get the the instance of interest.
+
+ PROTECT says whether we apply C++ scoping rules or not. */
+tree
+build_component_type_expr (of, component, basetype_path, protect)
+ tree of, component, basetype_path;
+ int protect;
+{
+ tree cname = NULL_TREE;
+ tree tmp, last;
+ tree name;
+ int flags = protect ? LOOKUP_NORMAL : LOOKUP_COMPLAIN;
+
+ if (of)
+ my_friendly_assert (IS_AGGR_TYPE (TREE_TYPE (of)), 253);
+ my_friendly_assert (TREE_CODE (component) == TYPE_EXPR, 254);
+
+ tmp = TREE_OPERAND (component, 0);
+ last = NULL_TREE;
+
+ while (tmp)
+ {
+ switch (TREE_CODE (tmp))
+ {
+ case CALL_EXPR:
+ if (last)
+ TREE_OPERAND (last, 0) = TREE_OPERAND (tmp, 0);
+ else
+ TREE_OPERAND (component, 0) = TREE_OPERAND (tmp, 0);
+
+ last = groktypename (build_tree_list (TREE_TYPE (component),
+ TREE_OPERAND (component, 0)));
+ name = build_typename_overload (last);
+ TREE_TYPE (name) = last;
+
+ if (TREE_OPERAND (tmp, 0)
+ && TREE_OPERAND (tmp, 0) != void_list_node)
+ {
+ cp_error ("`operator %T' requires empty parameter list", last);
+ TREE_OPERAND (tmp, 0) = NULL_TREE;
+ }
+
+ if (of && TREE_CODE (of) != TYPE_DECL)
+ return build_method_call (of, name, NULL_TREE, NULL_TREE, flags);
+ else if (of)
+ {
+ tree this_this;
+
+ if (current_class_decl == NULL_TREE)
+ {
+ cp_error ("object required for `operator %T' call",
+ TREE_TYPE (name));
+ return error_mark_node;
+ }
+
+ this_this = convert_pointer_to (TREE_TYPE (of),
+ current_class_decl);
+ this_this = build_indirect_ref (this_this, NULL_PTR);
+ return build_method_call (this_this, name, NULL_TREE,
+ NULL_TREE, flags | LOOKUP_NONVIRTUAL);
+ }
+ else if (current_class_decl)
+ return build_method_call (tmp, name, NULL_TREE, NULL_TREE, flags);
+
+ cp_error ("object required for `operator %T' call",
+ TREE_TYPE (name));
+ return error_mark_node;
+
+ case INDIRECT_REF:
+ case ADDR_EXPR:
+ case ARRAY_REF:
+ break;
+
+ case SCOPE_REF:
+ my_friendly_assert (cname == 0, 255);
+ cname = TREE_OPERAND (tmp, 0);
+ tmp = TREE_OPERAND (tmp, 1);
+ break;
+
+ default:
+ my_friendly_abort (77);
+ }
+ last = tmp;
+ tmp = TREE_OPERAND (tmp, 0);
+ }
+
+ last = groktypename (build_tree_list (TREE_TYPE (component), TREE_OPERAND (component, 0)));
+ name = build_typename_overload (last);
+ TREE_TYPE (name) = last;
+ if (of && TREE_CODE (of) == TYPE_DECL)
+ {
+ if (cname == NULL_TREE)
+ {
+ cname = DECL_NAME (of);
+ of = NULL_TREE;
+ }
+ else my_friendly_assert (cname == DECL_NAME (of), 256);
+ }
+
+ if (of)
+ {
+ tree this_this;
+
+ if (current_class_decl == NULL_TREE)
+ {
+ cp_error ("object required for `operator %T' call",
+ TREE_TYPE (name));
+ return error_mark_node;
+ }
+
+ this_this = convert_pointer_to (TREE_TYPE (of), current_class_decl);
+ return build_component_ref (this_this, name, 0, protect);
+ }
+ else if (cname)
+ return build_offset_ref (cname, name);
+ else if (current_class_name)
+ return build_offset_ref (current_class_name, name);
+
+ cp_error ("object required for `operator %T' member reference",
+ TREE_TYPE (name));
+ return error_mark_node;
+}
+#endif
+
+static char *
+thunk_printable_name (decl)
+ tree decl;
+{
+ return "<thunk function>";
+}
+
+tree
+make_thunk (function, delta)
+ tree function;
+ int delta;
+{
+ char buffer[250];
+ tree thunk_fndecl, thunk_id;
+ tree thunk;
+ char *func_name;
+ static int thunk_number = 0;
+ tree func_decl;
+ if (TREE_CODE (function) != ADDR_EXPR)
+ abort ();
+ func_decl = TREE_OPERAND (function, 0);
+ if (TREE_CODE (func_decl) != FUNCTION_DECL)
+ abort ();
+ func_name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (func_decl));
+ sprintf (buffer, "__thunk_%d_%s", -delta, func_name);
+ thunk_id = get_identifier (buffer);
+ thunk = IDENTIFIER_GLOBAL_VALUE (thunk_id);
+ if (thunk && TREE_CODE (thunk) != THUNK_DECL)
+ {
+ error_with_decl ("implementation-reserved name `%s' used");
+ IDENTIFIER_GLOBAL_VALUE (thunk_id) = thunk = NULL_TREE;
+ }
+ if (thunk == NULL_TREE)
+ {
+ thunk = build_decl (THUNK_DECL, thunk_id, TREE_TYPE (func_decl));
+ DECL_RESULT (thunk)
+ = build_decl (RESULT_DECL, NULL_TREE, TREE_TYPE (vtable_entry_type));
+ make_function_rtl (thunk);
+ DECL_INITIAL (thunk) = function;
+ THUNK_DELTA (thunk) = delta;
+ /* So that finish_file can write out any thunks that need to be: */
+ pushdecl_top_level (thunk);
+ }
+ return thunk;
+}
+
+void
+emit_thunk (thunk_fndecl)
+ tree thunk_fndecl;
+{
+ rtx insns;
+ char *fnname;
+ char buffer[250];
+ tree argp;
+ struct args_size stack_args_size;
+ tree function = TREE_OPERAND (DECL_INITIAL (thunk_fndecl), 0);
+ int delta = THUNK_DELTA (thunk_fndecl);
+ int tem;
+ int failure = 0;
+ int current_call_is_indirect = 0; /* needed for HPPA FUNCTION_ARG */
+
+ /* Used to remember which regs we need to emit a USE rtx for. */
+ rtx need_use[FIRST_PSEUDO_REGISTER];
+ int need_use_count = 0;
+
+ /* rtx for the 'this' parameter. */
+ rtx this_rtx = 0, this_reg_rtx = 0, fixed_this_rtx;
+
+ char *(*save_decl_printable_name) () = decl_printable_name;
+ /* Data on reg parms scanned so far. */
+ CUMULATIVE_ARGS args_so_far;
+
+ if (TREE_ASM_WRITTEN (thunk_fndecl))
+ return;
+
+ TREE_ASM_WRITTEN (thunk_fndecl) = 1;
+
+ if (TREE_PUBLIC (function))
+ {
+ TREE_PUBLIC (thunk_fndecl) = 1;
+ if (DECL_EXTERNAL (function))
+ {
+ DECL_EXTERNAL (thunk_fndecl) = 1;
+ assemble_external (thunk_fndecl);
+ return;
+ }
+ }
+
+ decl_printable_name = thunk_printable_name;
+ if (current_function_decl)
+ abort ();
+ current_function_decl = thunk_fndecl;
+ init_function_start (thunk_fndecl, input_filename, lineno);
+ pushlevel (0);
+ expand_start_bindings (1);
+
+ /* Start updating where the next arg would go. */
+ INIT_CUMULATIVE_ARGS (args_so_far, TREE_TYPE (function), NULL_RTX);
+ stack_args_size.constant = 0;
+ stack_args_size.var = 0;
+ /* SETUP for possible structure return address FIXME */
+
+ /* Now look through all the parameters, make sure that we
+ don't clobber any registers used for parameters.
+ Also, pick up an rtx for the first "this" parameter. */
+ for (argp = TYPE_ARG_TYPES (TREE_TYPE (function));
+ argp != NULL_TREE;
+ argp = TREE_CHAIN (argp))
+
+ {
+ tree passed_type = TREE_VALUE (argp);
+ register rtx entry_parm;
+ int named = 1; /* FIXME */
+ struct args_size stack_offset;
+ struct args_size arg_size;
+
+ if (passed_type == void_type_node)
+ break;
+
+ if ((TREE_CODE (TYPE_SIZE (passed_type)) != INTEGER_CST
+ && contains_placeholder_p (TYPE_SIZE (passed_type)))
+#ifdef FUNCTION_ARG_PASS_BY_REFERENCE
+ || FUNCTION_ARG_PASS_BY_REFERENCE (args_so_far,
+ TYPE_MODE (passed_type),
+ passed_type, named)
+#endif
+ )
+ passed_type = build_pointer_type (passed_type);
+
+ entry_parm = FUNCTION_ARG (args_so_far,
+ TYPE_MODE (passed_type),
+ passed_type,
+ named);
+ if (entry_parm != 0)
+ need_use[need_use_count++] = entry_parm;
+
+ locate_and_pad_parm (TYPE_MODE (passed_type), passed_type,
+#ifdef STACK_PARMS_IN_REG_PARM_AREA
+ 1,
+#else
+ entry_parm != 0,
+#endif
+ thunk_fndecl,
+ &stack_args_size, &stack_offset, &arg_size);
+
+/* REGNO (entry_parm);*/
+ if (this_rtx == 0)
+ {
+ this_reg_rtx = entry_parm;
+ if (!entry_parm)
+ {
+ rtx offset_rtx = ARGS_SIZE_RTX (stack_offset);
+
+ rtx internal_arg_pointer, stack_parm;
+
+ if ((ARG_POINTER_REGNUM == STACK_POINTER_REGNUM
+ || ! (fixed_regs[ARG_POINTER_REGNUM]
+ || ARG_POINTER_REGNUM == FRAME_POINTER_REGNUM)))
+ internal_arg_pointer = copy_to_reg (virtual_incoming_args_rtx);
+ else
+ internal_arg_pointer = virtual_incoming_args_rtx;
+
+ if (offset_rtx == const0_rtx)
+ entry_parm = gen_rtx (MEM, TYPE_MODE (passed_type),
+ internal_arg_pointer);
+ else
+ entry_parm = gen_rtx (MEM, TYPE_MODE (passed_type),
+ gen_rtx (PLUS, Pmode,
+ internal_arg_pointer,
+ offset_rtx));
+ }
+
+ this_rtx = entry_parm;
+ }
+
+ FUNCTION_ARG_ADVANCE (args_so_far,
+ TYPE_MODE (passed_type),
+ passed_type,
+ named);
+ }
+
+ fixed_this_rtx = plus_constant (this_rtx, delta);
+ if (this_rtx != fixed_this_rtx)
+ emit_move_insn (this_rtx, fixed_this_rtx);
+
+ if (this_reg_rtx)
+ emit_insn (gen_rtx (USE, VOIDmode, this_reg_rtx));
+
+ emit_indirect_jump (XEXP (DECL_RTL (function), 0));
+
+ while (need_use_count > 0)
+ emit_insn (gen_rtx (USE, VOIDmode, need_use[--need_use_count]));
+
+ expand_end_bindings (NULL, 1, 0);
+ poplevel (0, 0, 0);
+
+ /* From now on, allocate rtl in current_obstack, not in saveable_obstack.
+ Note that that may have been done above, in save_for_inline_copying.
+ The call to resume_temporary_allocation near the end of this function
+ goes back to the usual state of affairs. */
+
+ rtl_in_current_obstack ();
+
+ insns = get_insns ();
+
+ /* Copy any shared structure that should not be shared. */
+
+ unshare_all_rtl (insns);
+
+ /* We are no longer anticipating cse in this function, at least. */
+
+ cse_not_expected = 1;
+
+ /* Now we choose between stupid (pcc-like) register allocation
+ (if we got the -noreg switch and not -opt)
+ and smart register allocation. */
+
+ if (optimize > 0) /* Stupid allocation probably won't work */
+ obey_regdecls = 0; /* if optimizations being done. */
+
+ regclass_init ();
+
+ regclass (insns, max_reg_num ());
+ if (obey_regdecls)
+ {
+ stupid_life_analysis (insns, max_reg_num (), NULL);
+ failure = reload (insns, 0, NULL);
+ }
+ else
+ {
+ /* Do control and data flow analysis,
+ and write some of the results to dump file. */
+
+ flow_analysis (insns, max_reg_num (), NULL);
+ local_alloc ();
+ failure = global_alloc (NULL);
+ }
+
+ reload_completed = 1;
+
+#ifdef LEAF_REGISTERS
+ leaf_function = 0;
+ if (optimize > 0 && only_leaf_regs_used () && leaf_function_p ())
+ leaf_function = 1;
+#endif
+
+ /* If a machine dependent reorganization is needed, call it. */
+#ifdef MACHINE_DEPENDENT_REORG
+ MACHINE_DEPENDENT_REORG (insns);
+#endif
+
+ /* Now turn the rtl into assembler code. */
+
+ {
+ char *fnname = XSTR (XEXP (DECL_RTL (thunk_fndecl), 0), 0);
+ assemble_start_function (thunk_fndecl, fnname);
+ final (insns, asm_out_file, optimize, 0);
+ assemble_end_function (thunk_fndecl, fnname);
+ };
+
+ exit_rest_of_compilation:
+
+ reload_completed = 0;
+
+ /* Cancel the effect of rtl_in_current_obstack. */
+
+ resume_temporary_allocation ();
+
+ decl_printable_name = save_decl_printable_name;
+ current_function_decl = 0;
+}
diff --git a/gnu/usr.bin/cc/cc1plus/parse.c b/gnu/usr.bin/cc/cc1plus/parse.c
new file mode 100644
index 0000000..fb8c2e15
--- /dev/null
+++ b/gnu/usr.bin/cc/cc1plus/parse.c
@@ -0,0 +1,7604 @@
+
+/* A Bison parser, made from parse.y with Bison version GNU Bison version 1.22
+ */
+
+#define YYBISON 1 /* Identify Bison output. */
+
+#define IDENTIFIER 258
+#define TYPENAME 259
+#define SCSPEC 260
+#define TYPESPEC 261
+#define TYPE_QUAL 262
+#define CONSTANT 263
+#define STRING 264
+#define ELLIPSIS 265
+#define SIZEOF 266
+#define ENUM 267
+#define IF 268
+#define ELSE 269
+#define WHILE 270
+#define DO 271
+#define FOR 272
+#define SWITCH 273
+#define CASE 274
+#define DEFAULT 275
+#define BREAK 276
+#define CONTINUE 277
+#define RETURN 278
+#define GOTO 279
+#define ASM_KEYWORD 280
+#define GCC_ASM_KEYWORD 281
+#define TYPEOF 282
+#define ALIGNOF 283
+#define HEADOF 284
+#define CLASSOF 285
+#define SIGOF 286
+#define ATTRIBUTE 287
+#define EXTENSION 288
+#define LABEL 289
+#define AGGR 290
+#define VISSPEC 291
+#define DELETE 292
+#define NEW 293
+#define OVERLOAD 294
+#define THIS 295
+#define OPERATOR 296
+#define CXX_TRUE 297
+#define CXX_FALSE 298
+#define LEFT_RIGHT 299
+#define TEMPLATE 300
+#define TYPEID 301
+#define DYNAMIC_CAST 302
+#define STATIC_CAST 303
+#define REINTERPRET_CAST 304
+#define CONST_CAST 305
+#define SCOPE 306
+#define EMPTY 307
+#define PTYPENAME 308
+#define ASSIGN 309
+#define OROR 310
+#define ANDAND 311
+#define MIN_MAX 312
+#define EQCOMPARE 313
+#define ARITHCOMPARE 314
+#define LSHIFT 315
+#define RSHIFT 316
+#define POINTSAT_STAR 317
+#define DOT_STAR 318
+#define UNARY 319
+#define PLUSPLUS 320
+#define MINUSMINUS 321
+#define HYPERUNARY 322
+#define PAREN_STAR_PAREN 323
+#define POINTSAT 324
+#define TRY 325
+#define CATCH 326
+#define THROW 327
+#define TYPENAME_ELLIPSIS 328
+#define PRE_PARSED_FUNCTION_DECL 329
+#define EXTERN_LANG_STRING 330
+#define ALL 331
+#define PRE_PARSED_CLASS_DECL 332
+#define TYPENAME_DEFN 333
+#define IDENTIFIER_DEFN 334
+#define PTYPENAME_DEFN 335
+#define END_OF_SAVED_INPUT 336
+
+#line 42 "parse.y"
+
+/* Cause the `yydebug' variable to be defined. */
+#define YYDEBUG 1
+
+#include "config.h"
+
+#include <stdio.h>
+#include <errno.h>
+
+#include "tree.h"
+#include "input.h"
+#include "flags.h"
+#include "lex.h"
+#include "cp-tree.h"
+
+/* Since parsers are distinct for each language, put the language string
+ definition here. (fnf) */
+char *language_string = "GNU C++";
+
+extern tree void_list_node;
+extern struct obstack permanent_obstack;
+
+#ifndef errno
+extern int errno;
+#endif
+
+extern int end_of_file;
+extern int current_class_depth;
+
+void yyerror ();
+
+/* Like YYERROR but do call yyerror. */
+#define YYERROR1 { yyerror ("syntax error"); YYERROR; }
+
+#define OP0(NODE) (TREE_OPERAND (NODE, 0))
+#define OP1(NODE) (TREE_OPERAND (NODE, 1))
+
+/* Contains the statement keyword (if/while/do) to include in an
+ error message if the user supplies an empty conditional expression. */
+static char *cond_stmt_keyword;
+
+/* Nonzero if we have an `extern "C"' acting as an extern specifier. */
+int have_extern_spec;
+int used_extern_spec;
+
+void yyhook ();
+
+/* Cons up an empty parameter list. */
+#ifdef __GNUC__
+__inline
+#endif
+static tree
+empty_parms ()
+{
+ tree parms;
+
+ if (strict_prototype)
+ parms = void_list_node;
+ else
+ parms = NULL_TREE;
+ return parms;
+}
+
+#line 108 "parse.y"
+typedef union {long itype; tree ttype; char *strtype; enum tree_code code; } YYSTYPE;
+#line 276 "parse.y"
+
+/* List of types and structure classes of the current declaration. */
+static tree current_declspecs;
+
+/* When defining an aggregate, this is the most recent one being defined. */
+static tree current_aggr;
+
+/* Tell yyparse how to print a token's value, if yydebug is set. */
+
+#define YYPRINT(FILE,YYCHAR,YYLVAL) yyprint(FILE,YYCHAR,YYLVAL)
+extern void yyprint ();
+extern tree combine_strings PROTO((tree));
+
+#ifndef YYLTYPE
+typedef
+ struct yyltype
+ {
+ int timestamp;
+ int first_line;
+ int first_column;
+ int last_line;
+ int last_column;
+ char *text;
+ }
+ yyltype;
+
+#define YYLTYPE yyltype
+#endif
+
+#include <stdio.h>
+
+#ifndef __cplusplus
+#ifndef __STDC__
+#define const
+#endif
+#endif
+
+
+
+#define YYFINAL 1346
+#define YYFLAG -32768
+#define YYNTBASE 106
+
+#define YYTRANSLATE(x) ((unsigned)(x) <= 336 ? yytranslate[x] : 336)
+
+static const char yytranslate[] = { 0,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 104, 2, 2, 2, 77, 65, 2, 88,
+ 102, 75, 73, 55, 74, 87, 76, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 60, 56, 69,
+ 58, 70, 59, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 89, 2, 105, 64, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 54, 63, 103, 83, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 1, 2, 3, 4, 5,
+ 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
+ 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
+ 36, 37, 38, 39, 40, 41, 42, 43, 44, 45,
+ 46, 47, 48, 49, 50, 51, 52, 53, 57, 61,
+ 62, 66, 67, 68, 71, 72, 78, 79, 80, 81,
+ 82, 84, 85, 86, 90, 91, 92, 93, 94, 95,
+ 96, 97, 98, 99, 100, 101
+};
+
+#if YYDEBUG != 0
+static const short yyprhs[] = { 0,
+ 0, 1, 3, 4, 7, 10, 11, 12, 14, 16,
+ 17, 20, 22, 24, 26, 28, 34, 39, 43, 48,
+ 53, 55, 56, 62, 64, 68, 71, 76, 78, 82,
+ 84, 88, 89, 95, 96, 102, 103, 109, 110, 116,
+ 120, 124, 131, 139, 144, 148, 152, 154, 156, 158,
+ 160, 162, 165, 169, 173, 177, 181, 184, 187, 190,
+ 193, 196, 198, 202, 207, 211, 217, 222, 226, 230,
+ 233, 237, 241, 244, 246, 253, 258, 262, 266, 269,
+ 272, 274, 278, 283, 286, 290, 291, 292, 294, 298,
+ 301, 305, 307, 312, 315, 320, 323, 328, 331, 333,
+ 335, 337, 339, 341, 343, 345, 347, 351, 355, 360,
+ 365, 369, 374, 379, 380, 382, 386, 388, 390, 391,
+ 398, 399, 401, 402, 405, 407, 409, 411, 413, 415,
+ 417, 419, 421, 425, 427, 431, 432, 434, 436, 437,
+ 446, 448, 451, 456, 461, 463, 467, 471, 475, 479,
+ 481, 483, 485, 486, 490, 493, 496, 499, 502, 505,
+ 508, 513, 516, 521, 524, 528, 532, 537, 542, 548,
+ 554, 561, 564, 569, 575, 579, 583, 587, 589, 593,
+ 596, 600, 605, 607, 610, 616, 618, 623, 628, 633,
+ 635, 639, 643, 647, 651, 655, 659, 663, 667, 671,
+ 675, 679, 683, 687, 691, 695, 699, 703, 707, 711,
+ 717, 721, 725, 727, 730, 734, 736, 738, 740, 742,
+ 744, 746, 748, 751, 754, 756, 758, 760, 762, 764,
+ 766, 768, 772, 776, 777, 782, 783, 790, 793, 798,
+ 801, 804, 806, 811, 813, 821, 829, 837, 845, 850,
+ 855, 858, 861, 863, 868, 871, 874, 877, 883, 887,
+ 893, 897, 902, 909, 911, 914, 916, 919, 921, 923,
+ 925, 928, 929, 932, 935, 939, 943, 947, 951, 955,
+ 958, 961, 963, 965, 967, 970, 973, 976, 979, 981,
+ 983, 985, 987, 990, 993, 997, 1001, 1006, 1008, 1011,
+ 1014, 1016, 1018, 1021, 1024, 1026, 1029, 1032, 1036, 1038,
+ 1041, 1043, 1045, 1047, 1052, 1057, 1062, 1067, 1069, 1071,
+ 1073, 1075, 1079, 1081, 1085, 1087, 1091, 1092, 1097, 1098,
+ 1106, 1111, 1112, 1120, 1125, 1126, 1134, 1139, 1140, 1148,
+ 1153, 1154, 1156, 1158, 1161, 1168, 1170, 1174, 1175, 1177,
+ 1182, 1189, 1194, 1196, 1198, 1200, 1202, 1204, 1208, 1210,
+ 1213, 1217, 1222, 1224, 1226, 1230, 1235, 1242, 1246, 1252,
+ 1253, 1261, 1266, 1267, 1274, 1278, 1281, 1284, 1289, 1291,
+ 1292, 1294, 1295, 1297, 1299, 1302, 1305, 1308, 1311, 1315,
+ 1318, 1321, 1324, 1328, 1332, 1334, 1337, 1338, 1339, 1343,
+ 1347, 1350, 1352, 1354, 1355, 1357, 1360, 1362, 1366, 1368,
+ 1371, 1373, 1378, 1383, 1385, 1387, 1390, 1393, 1395, 1396,
+ 1398, 1403, 1407, 1409, 1412, 1415, 1418, 1421, 1424, 1427,
+ 1430, 1433, 1438, 1441, 1443, 1449, 1453, 1454, 1456, 1460,
+ 1461, 1463, 1467, 1469, 1471, 1473, 1475, 1480, 1487, 1492,
+ 1497, 1504, 1509, 1513, 1518, 1525, 1530, 1535, 1542, 1547,
+ 1551, 1553, 1557, 1559, 1563, 1566, 1568, 1575, 1576, 1579,
+ 1581, 1584, 1585, 1588, 1592, 1596, 1599, 1602, 1606, 1608,
+ 1610, 1612, 1615, 1621, 1627, 1631, 1637, 1642, 1646, 1650,
+ 1653, 1655, 1659, 1663, 1666, 1669, 1673, 1675, 1679, 1683,
+ 1686, 1689, 1693, 1695, 1701, 1707, 1711, 1717, 1721, 1725,
+ 1730, 1734, 1737, 1740, 1742, 1745, 1750, 1755, 1758, 1760,
+ 1762, 1764, 1767, 1770, 1773, 1775, 1778, 1780, 1783, 1786,
+ 1790, 1792, 1796, 1799, 1803, 1806, 1809, 1813, 1815, 1819,
+ 1824, 1828, 1831, 1834, 1836, 1840, 1843, 1846, 1848, 1851,
+ 1855, 1857, 1861, 1863, 1869, 1873, 1878, 1882, 1887, 1890,
+ 1893, 1897, 1900, 1902, 1904, 1907, 1910, 1913, 1914, 1915,
+ 1917, 1919, 1922, 1926, 1928, 1931, 1935, 1941, 1948, 1954,
+ 1955, 1956, 1963, 1965, 1968, 1970, 1972, 1974, 1977, 1978,
+ 1983, 1985, 1986, 1987, 1994, 1995, 1996, 2004, 2005, 2006,
+ 2007, 2018, 2019, 2020, 2021, 2032, 2033, 2041, 2042, 2048,
+ 2049, 2057, 2058, 2063, 2066, 2069, 2072, 2076, 2083, 2092,
+ 2103, 2116, 2121, 2125, 2128, 2131, 2133, 2135, 2136, 2137,
+ 2145, 2147, 2150, 2153, 2154, 2155, 2161, 2163, 2165, 2169,
+ 2173, 2176, 2179, 2183, 2188, 2193, 2197, 2202, 2209, 2216,
+ 2217, 2219, 2220, 2222, 2224, 2225, 2227, 2229, 2233, 2238,
+ 2240, 2244, 2245, 2247, 2249, 2251, 2254, 2257, 2260, 2262,
+ 2264, 2267, 2270, 2273, 2276, 2278, 2282, 2285, 2290, 2293,
+ 2298, 2301, 2304, 2307, 2310, 2313, 2316, 2318, 2321, 2323,
+ 2325, 2326, 2327, 2329, 2330, 2335, 2337, 2339, 2343, 2344,
+ 2348, 2352, 2356, 2358, 2361, 2364, 2367, 2370, 2373, 2376,
+ 2379, 2382, 2385, 2388, 2391, 2394, 2397, 2400, 2403, 2406,
+ 2409, 2412, 2415, 2418, 2421, 2424, 2427, 2431, 2434, 2437,
+ 2440, 2443, 2447, 2450, 2453, 2458, 2463, 2467
+};
+
+static const short yyrhs[] = { -1,
+ 107, 0, 0, 108, 112, 0, 107, 112, 0, 0,
+ 0, 25, 0, 26, 0, 0, 113, 114, 0, 129,
+ 0, 128, 0, 122, 0, 120, 0, 111, 88, 179,
+ 102, 56, 0, 115, 54, 107, 103, 0, 115, 54,
+ 103, 0, 115, 109, 129, 110, 0, 115, 109, 128,
+ 110, 0, 95, 0, 0, 45, 69, 117, 118, 70,
+ 0, 119, 0, 118, 55, 119, 0, 220, 138, 0,
+ 220, 140, 60, 232, 0, 327, 0, 39, 121, 56,
+ 0, 3, 0, 121, 55, 3, 0, 0, 116, 222,
+ 54, 123, 56, 0, 0, 116, 223, 54, 124, 56,
+ 0, 0, 116, 222, 60, 125, 56, 0, 0, 116,
+ 223, 60, 126, 56, 0, 116, 222, 56, 0, 116,
+ 223, 56, 0, 116, 258, 330, 197, 206, 127, 0,
+ 116, 186, 183, 330, 197, 206, 127, 0, 116, 189,
+ 258, 127, 0, 116, 1, 103, 0, 116, 1, 56,
+ 0, 54, 0, 60, 0, 56, 0, 58, 0, 23,
+ 0, 196, 56, 0, 189, 195, 56, 0, 189, 258,
+ 56, 0, 186, 194, 56, 0, 186, 183, 56, 0,
+ 189, 56, 0, 141, 56, 0, 186, 56, 0, 1,
+ 56, 0, 1, 103, 0, 56, 0, 130, 134, 282,
+ 0, 130, 133, 134, 282, 0, 130, 180, 282, 0,
+ 130, 133, 56, 180, 282, 0, 130, 133, 180, 282,
+ 0, 186, 183, 1, 0, 189, 258, 1, 0, 258,
+ 1, 0, 186, 183, 330, 0, 189, 258, 330, 0,
+ 258, 330, 0, 94, 0, 186, 88, 322, 102, 250,
+ 330, 0, 186, 44, 250, 330, 0, 186, 183, 330,
+ 0, 189, 258, 330, 0, 258, 330, 0, 23, 3,
+ 0, 132, 0, 132, 58, 213, 0, 132, 88, 160,
+ 102, 0, 132, 44, 0, 60, 135, 136, 0, 0,
+ 0, 137, 0, 136, 55, 137, 0, 136, 1, 0,
+ 88, 160, 102, 0, 44, 0, 139, 88, 160, 102,
+ 0, 139, 44, 0, 268, 88, 160, 102, 0, 268,
+ 44, 0, 262, 88, 160, 102, 0, 262, 44, 0,
+ 3, 0, 4, 0, 53, 0, 3, 0, 53, 0,
+ 99, 0, 98, 0, 100, 0, 45, 221, 149, 0,
+ 45, 186, 183, 0, 5, 45, 221, 149, 0, 5,
+ 45, 186, 183, 0, 143, 144, 149, 0, 53, 69,
+ 145, 70, 0, 4, 69, 145, 70, 0, 0, 146,
+ 0, 145, 55, 146, 0, 185, 0, 168, 0, 0,
+ 97, 229, 148, 234, 235, 103, 0, 0, 147, 0,
+ 0, 147, 150, 0, 74, 0, 73, 0, 81, 0,
+ 82, 0, 104, 0, 159, 0, 168, 0, 44, 0,
+ 88, 152, 102, 0, 44, 0, 88, 156, 102, 0,
+ 0, 156, 0, 1, 0, 0, 311, 183, 330, 197,
+ 206, 58, 157, 213, 0, 152, 0, 54, 103, 0,
+ 54, 279, 276, 103, 0, 54, 279, 1, 103, 0,
+ 289, 0, 168, 55, 168, 0, 168, 55, 1, 0,
+ 159, 55, 168, 0, 159, 55, 1, 0, 168, 0,
+ 159, 0, 173, 0, 0, 33, 162, 166, 0, 75,
+ 166, 0, 65, 166, 0, 83, 166, 0, 151, 166,
+ 0, 62, 138, 0, 11, 161, 0, 11, 88, 185,
+ 102, 0, 28, 161, 0, 28, 88, 185, 102, 0,
+ 176, 249, 0, 176, 249, 164, 0, 176, 163, 249,
+ 0, 176, 163, 249, 164, 0, 176, 88, 185, 102,
+ 0, 176, 88, 185, 102, 164, 0, 176, 163, 88,
+ 185, 102, 0, 176, 163, 88, 185, 102, 164, 0,
+ 177, 166, 0, 177, 89, 105, 166, 0, 177, 89,
+ 152, 105, 166, 0, 88, 160, 102, 0, 54, 160,
+ 103, 0, 88, 160, 102, 0, 44, 0, 88, 192,
+ 102, 0, 58, 213, 0, 88, 185, 102, 0, 165,
+ 88, 185, 102, 0, 167, 0, 165, 167, 0, 165,
+ 54, 214, 218, 103, 0, 161, 0, 29, 88, 152,
+ 102, 0, 30, 88, 152, 102, 0, 30, 88, 4,
+ 102, 0, 166, 0, 168, 78, 168, 0, 168, 79,
+ 168, 0, 168, 73, 168, 0, 168, 74, 168, 0,
+ 168, 75, 168, 0, 168, 76, 168, 0, 168, 77,
+ 168, 0, 168, 71, 168, 0, 168, 72, 168, 0,
+ 168, 68, 168, 0, 168, 69, 168, 0, 168, 70,
+ 168, 0, 168, 67, 168, 0, 168, 66, 168, 0,
+ 168, 65, 168, 0, 168, 63, 168, 0, 168, 64,
+ 168, 0, 168, 62, 168, 0, 168, 61, 168, 0,
+ 168, 59, 317, 60, 168, 0, 168, 58, 168, 0,
+ 168, 57, 168, 0, 92, 0, 92, 168, 0, 83,
+ 328, 138, 0, 335, 0, 3, 0, 53, 0, 169,
+ 0, 4, 0, 169, 0, 262, 0, 75, 171, 0,
+ 65, 171, 0, 260, 0, 169, 0, 262, 0, 169,
+ 0, 8, 0, 178, 0, 179, 0, 88, 152, 102,
+ 0, 88, 1, 102, 0, 0, 88, 174, 283, 102,
+ 0, 0, 173, 88, 160, 102, 175, 150, 0, 173,
+ 44, 0, 173, 89, 152, 105, 0, 173, 81, 0,
+ 173, 82, 0, 40, 0, 7, 88, 160, 102, 0,
+ 264, 0, 47, 69, 185, 70, 88, 152, 102, 0,
+ 48, 69, 185, 70, 88, 152, 102, 0, 49, 69,
+ 185, 70, 88, 152, 102, 0, 50, 69, 185, 70,
+ 88, 152, 102, 0, 46, 88, 152, 102, 0, 46,
+ 88, 185, 102, 0, 271, 3, 0, 271, 335, 0,
+ 263, 0, 263, 88, 160, 102, 0, 263, 44, 0,
+ 181, 170, 0, 181, 261, 0, 181, 170, 88, 160,
+ 102, 0, 181, 170, 44, 0, 181, 261, 88, 160,
+ 102, 0, 181, 261, 44, 0, 181, 83, 6, 44,
+ 0, 181, 6, 51, 83, 6, 44, 0, 38, 0,
+ 271, 38, 0, 37, 0, 271, 177, 0, 42, 0,
+ 43, 0, 9, 0, 179, 9, 0, 0, 173, 87,
+ 0, 173, 86, 0, 192, 183, 56, 0, 186, 183,
+ 56, 0, 192, 194, 56, 0, 186, 194, 56, 0,
+ 189, 195, 56, 0, 186, 56, 0, 189, 56, 0,
+ 254, 0, 258, 0, 44, 0, 184, 44, 0, 190,
+ 274, 0, 251, 274, 0, 192, 274, 0, 190, 0,
+ 251, 0, 190, 0, 187, 0, 189, 192, 0, 192,
+ 188, 0, 189, 192, 188, 0, 189, 192, 191, 0,
+ 189, 192, 191, 188, 0, 5, 0, 188, 193, 0,
+ 188, 5, 0, 251, 0, 5, 0, 189, 7, 0,
+ 189, 5, 0, 192, 0, 251, 192, 0, 192, 191,
+ 0, 251, 192, 191, 0, 193, 0, 191, 193, 0,
+ 215, 0, 6, 0, 268, 0, 27, 88, 152, 102,
+ 0, 27, 88, 185, 102, 0, 31, 88, 152, 102,
+ 0, 31, 88, 185, 102, 0, 6, 0, 7, 0,
+ 215, 0, 198, 0, 194, 55, 200, 0, 202, 0,
+ 195, 55, 200, 0, 204, 0, 196, 55, 200, 0,
+ 0, 111, 88, 179, 102, 0, 0, 183, 330, 197,
+ 206, 58, 199, 213, 0, 183, 330, 197, 206, 0,
+ 0, 183, 330, 197, 206, 58, 201, 213, 0, 183,
+ 330, 197, 206, 0, 0, 258, 330, 197, 206, 58,
+ 203, 213, 0, 258, 330, 197, 206, 0, 0, 258,
+ 330, 197, 206, 58, 205, 213, 0, 258, 330, 197,
+ 206, 0, 0, 207, 0, 208, 0, 207, 208, 0,
+ 32, 88, 88, 209, 102, 102, 0, 210, 0, 209,
+ 55, 210, 0, 0, 211, 0, 211, 88, 3, 102,
+ 0, 211, 88, 3, 55, 160, 102, 0, 211, 88,
+ 160, 102, 0, 138, 0, 5, 0, 6, 0, 7,
+ 0, 138, 0, 212, 55, 138, 0, 168, 0, 54,
+ 103, 0, 54, 214, 103, 0, 54, 214, 55, 103,
+ 0, 1, 0, 213, 0, 214, 55, 213, 0, 89,
+ 168, 105, 213, 0, 214, 55, 19, 168, 60, 213,
+ 0, 138, 60, 213, 0, 214, 55, 138, 60, 213,
+ 0, 0, 12, 138, 54, 216, 247, 219, 103, 0,
+ 12, 138, 54, 103, 0, 0, 12, 54, 217, 247,
+ 219, 103, 0, 12, 54, 103, 0, 12, 138, 0,
+ 12, 269, 0, 228, 234, 235, 103, 0, 228, 0,
+ 0, 55, 0, 0, 55, 0, 35, 0, 220, 5,
+ 0, 220, 6, 0, 220, 7, 0, 220, 35, 0,
+ 220, 143, 56, 0, 220, 138, 0, 220, 269, 0,
+ 220, 142, 0, 220, 143, 54, 0, 220, 143, 60,
+ 0, 221, 0, 220, 140, 0, 0, 0, 222, 224,
+ 229, 0, 223, 225, 229, 0, 220, 54, 0, 227,
+ 0, 226, 0, 0, 60, 0, 60, 230, 0, 231,
+ 0, 230, 55, 231, 0, 232, 0, 233, 232, 0,
+ 268, 0, 31, 88, 152, 102, 0, 31, 88, 185,
+ 102, 0, 36, 0, 5, 0, 233, 36, 0, 233,
+ 5, 0, 54, 0, 0, 236, 0, 235, 36, 60,
+ 236, 0, 235, 36, 60, 0, 237, 0, 236, 237,
+ 0, 236, 56, 0, 238, 56, 0, 238, 103, 0,
+ 131, 60, 0, 131, 54, 0, 186, 239, 0, 189,
+ 240, 0, 258, 330, 197, 206, 0, 60, 168, 0,
+ 1, 0, 186, 88, 322, 102, 250, 0, 186, 44,
+ 250, 0, 0, 241, 0, 239, 55, 242, 0, 0,
+ 244, 0, 240, 55, 246, 0, 243, 0, 244, 0,
+ 245, 0, 246, 0, 254, 330, 197, 206, 0, 254,
+ 330, 197, 206, 58, 213, 0, 4, 60, 168, 206,
+ 0, 258, 330, 197, 206, 0, 258, 330, 197, 206,
+ 58, 213, 0, 3, 60, 168, 206, 0, 60, 168,
+ 206, 0, 254, 330, 197, 206, 0, 254, 330, 197,
+ 206, 58, 213, 0, 4, 60, 168, 206, 0, 258,
+ 330, 197, 206, 0, 258, 330, 197, 206, 58, 213,
+ 0, 3, 60, 168, 206, 0, 60, 168, 206, 0,
+ 248, 0, 247, 55, 248, 0, 138, 0, 138, 58,
+ 168, 0, 311, 272, 0, 311, 0, 88, 185, 102,
+ 89, 152, 105, 0, 0, 250, 7, 0, 7, 0,
+ 251, 7, 0, 0, 253, 152, 0, 75, 251, 254,
+ 0, 65, 251, 254, 0, 75, 254, 0, 65, 254,
+ 0, 270, 250, 254, 0, 257, 0, 265, 0, 256,
+ 0, 266, 265, 0, 257, 88, 160, 102, 250, 0,
+ 257, 88, 322, 102, 250, 0, 257, 44, 250, 0,
+ 257, 88, 1, 102, 250, 0, 257, 89, 252, 105,
+ 0, 257, 89, 105, 0, 88, 254, 102, 0, 266,
+ 265, 0, 265, 0, 75, 251, 258, 0, 65, 251,
+ 258, 0, 75, 258, 0, 65, 258, 0, 270, 250,
+ 258, 0, 172, 0, 75, 251, 258, 0, 65, 251,
+ 258, 0, 75, 259, 0, 65, 259, 0, 270, 250,
+ 258, 0, 260, 0, 172, 88, 160, 102, 250, 0,
+ 172, 88, 322, 102, 250, 0, 172, 44, 250, 0,
+ 172, 88, 1, 102, 250, 0, 88, 171, 102, 0,
+ 88, 259, 102, 0, 172, 89, 252, 105, 0, 172,
+ 89, 105, 0, 266, 170, 0, 266, 169, 0, 262,
+ 0, 271, 262, 0, 192, 88, 160, 102, 0, 192,
+ 88, 171, 102, 0, 192, 184, 0, 4, 0, 142,
+ 0, 267, 0, 266, 267, 0, 4, 51, 0, 142,
+ 51, 0, 255, 0, 271, 255, 0, 256, 0, 271,
+ 255, 0, 266, 75, 0, 271, 266, 75, 0, 51,
+ 0, 75, 250, 272, 0, 75, 250, 0, 65, 250,
+ 272, 0, 65, 250, 0, 270, 250, 0, 270, 250,
+ 272, 0, 273, 0, 89, 152, 105, 0, 273, 89,
+ 252, 105, 0, 75, 251, 274, 0, 75, 274, 0,
+ 75, 251, 0, 75, 0, 65, 251, 274, 0, 65,
+ 274, 0, 65, 251, 0, 65, 0, 270, 250, 0,
+ 270, 250, 274, 0, 275, 0, 88, 274, 102, 0,
+ 85, 0, 275, 88, 322, 102, 250, 0, 275, 44,
+ 250, 0, 275, 89, 252, 105, 0, 275, 89, 105,
+ 0, 88, 323, 102, 250, 0, 165, 250, 0, 184,
+ 250, 0, 89, 252, 105, 0, 89, 105, 0, 288,
+ 0, 277, 0, 276, 288, 0, 276, 277, 0, 1,
+ 56, 0, 0, 0, 280, 0, 281, 0, 280, 281,
+ 0, 34, 212, 56, 0, 283, 0, 1, 283, 0,
+ 54, 278, 103, 0, 54, 278, 279, 276, 103, 0,
+ 54, 278, 279, 276, 1, 103, 0, 54, 278, 279,
+ 1, 103, 0, 0, 0, 13, 285, 278, 154, 286,
+ 287, 0, 283, 0, 278, 289, 0, 283, 0, 289,
+ 0, 182, 0, 152, 56, 0, 0, 284, 14, 290,
+ 287, 0, 284, 0, 0, 0, 15, 291, 278, 154,
+ 292, 158, 0, 0, 0, 16, 293, 287, 15, 294,
+ 153, 56, 0, 0, 0, 0, 314, 295, 278, 155,
+ 56, 296, 317, 102, 297, 158, 0, 0, 0, 0,
+ 315, 298, 278, 155, 56, 299, 317, 102, 300, 158,
+ 0, 0, 18, 278, 88, 156, 102, 301, 287, 0,
+ 0, 19, 168, 60, 302, 288, 0, 0, 19, 168,
+ 10, 168, 60, 303, 288, 0, 0, 20, 60, 304,
+ 288, 0, 21, 56, 0, 22, 56, 0, 23, 56,
+ 0, 23, 152, 56, 0, 111, 316, 88, 179, 102,
+ 56, 0, 111, 316, 88, 179, 60, 318, 102, 56,
+ 0, 111, 316, 88, 179, 60, 318, 60, 318, 102,
+ 56, 0, 111, 316, 88, 179, 60, 318, 60, 318,
+ 60, 321, 102, 56, 0, 24, 75, 152, 56, 0,
+ 24, 138, 56, 0, 313, 288, 0, 313, 103, 0,
+ 56, 0, 305, 0, 0, 0, 90, 54, 278, 306,
+ 308, 307, 309, 0, 103, 0, 276, 103, 0, 1,
+ 103, 0, 0, 0, 309, 91, 310, 312, 283, 0,
+ 190, 0, 251, 0, 88, 10, 102, 0, 88, 327,
+ 102, 0, 3, 60, 0, 53, 60, 0, 17, 88,
+ 56, 0, 17, 88, 152, 56, 0, 17, 88, 54,
+ 103, 0, 17, 88, 182, 0, 17, 88, 1, 56,
+ 0, 17, 88, 54, 278, 276, 103, 0, 17, 88,
+ 54, 278, 1, 103, 0, 0, 7, 0, 0, 152,
+ 0, 1, 0, 0, 319, 0, 320, 0, 319, 55,
+ 320, 0, 9, 88, 152, 102, 0, 9, 0, 321,
+ 55, 9, 0, 0, 323, 0, 185, 0, 324, 0,
+ 325, 10, 0, 324, 10, 0, 185, 10, 0, 10,
+ 0, 93, 0, 324, 93, 0, 185, 93, 0, 324,
+ 60, 0, 185, 60, 0, 326, 0, 327, 58, 213,
+ 0, 325, 327, 0, 325, 327, 58, 213, 0, 325,
+ 329, 0, 325, 329, 58, 213, 0, 324, 55, 0,
+ 185, 55, 0, 187, 183, 0, 190, 183, 0, 192,
+ 183, 0, 187, 274, 0, 187, 0, 189, 258, 0,
+ 326, 0, 185, 0, 0, 0, 258, 0, 0, 92,
+ 88, 332, 102, 0, 185, 0, 331, 0, 332, 55,
+ 331, 0, 0, 75, 250, 333, 0, 65, 250, 333,
+ 0, 270, 250, 333, 0, 41, 0, 334, 75, 0,
+ 334, 76, 0, 334, 77, 0, 334, 73, 0, 334,
+ 74, 0, 334, 65, 0, 334, 63, 0, 334, 64,
+ 0, 334, 83, 0, 334, 55, 0, 334, 68, 0,
+ 334, 69, 0, 334, 70, 0, 334, 67, 0, 334,
+ 57, 0, 334, 58, 0, 334, 71, 0, 334, 72,
+ 0, 334, 81, 0, 334, 82, 0, 334, 62, 0,
+ 334, 61, 0, 334, 104, 0, 334, 59, 60, 0,
+ 334, 66, 0, 334, 86, 0, 334, 78, 0, 334,
+ 44, 0, 334, 89, 105, 0, 334, 38, 0, 334,
+ 37, 0, 334, 38, 89, 105, 0, 334, 37, 89,
+ 105, 0, 334, 311, 333, 0, 334, 1, 0
+};
+
+#endif
+
+#if YYDEBUG != 0
+static const short yyrline[] = { 0,
+ 291, 292, 306, 308, 309, 313, 318, 322, 324, 327,
+ 330, 334, 337, 339, 341, 342, 345, 347, 349, 352,
+ 357, 362, 365, 369, 372, 376, 392, 401, 404, 409,
+ 411, 415, 421, 421, 424, 424, 427, 427, 442, 442,
+ 447, 452, 469, 492, 502, 503, 506, 507, 508, 509,
+ 510, 513, 516, 519, 524, 529, 535, 537, 538, 557,
+ 558, 559, 562, 576, 589, 592, 595, 598, 600, 602,
+ 606, 612, 617, 622, 629, 640, 647, 649, 651, 655,
+ 663, 665, 667, 669, 673, 686, 709, 712, 714, 715,
+ 718, 724, 730, 732, 734, 736, 739, 743, 749, 751,
+ 752, 755, 757, 760, 762, 763, 766, 769, 771, 773,
+ 777, 782, 785, 789, 794, 797, 801, 804, 807, 841,
+ 857, 860, 864, 867, 871, 873, 875, 877, 879, 883,
+ 885, 888, 893, 897, 902, 906, 909, 910, 914, 933,
+ 940, 943, 946, 948, 950, 954, 958, 961, 963, 967,
+ 970, 973, 982, 985, 988, 990, 992, 994, 1001, 1012,
+ 1032, 1034, 1036, 1041, 1043, 1045, 1047, 1049, 1052, 1054,
+ 1056, 1059, 1061, 1065, 1071, 1074, 1081, 1084, 1086, 1094,
+ 1103, 1109, 1115, 1117, 1119, 1132, 1134, 1136, 1138, 1155,
+ 1158, 1160, 1162, 1164, 1166, 1168, 1170, 1172, 1174, 1176,
+ 1178, 1180, 1182, 1184, 1186, 1188, 1190, 1192, 1194, 1196,
+ 1198, 1200, 1207, 1209, 1226, 1229, 1230, 1231, 1234, 1236,
+ 1239, 1241, 1242, 1244, 1248, 1250, 1251, 1256, 1276, 1277,
+ 1278, 1280, 1282, 1284, 1292, 1313, 1318, 1325, 1332, 1334,
+ 1343, 1348, 1371, 1415, 1416, 1419, 1422, 1425, 1428, 1430,
+ 1433, 1472, 1479, 1481, 1483, 1485, 1487, 1489, 1504, 1519,
+ 1530, 1542, 1549, 1598, 1600, 1604, 1606, 1610, 1613, 1618,
+ 1620, 1624, 1637, 1638, 1644, 1655, 1663, 1669, 1674, 1676,
+ 1681, 1688, 1690, 1694, 1698, 1704, 1707, 1709, 1711, 1713,
+ 1721, 1723, 1725, 1728, 1730, 1732, 1734, 1739, 1745, 1747,
+ 1758, 1761, 1763, 1766, 1781, 1784, 1786, 1788, 1792, 1795,
+ 1803, 1804, 1805, 1806, 1810, 1814, 1828, 1846, 1847, 1848,
+ 1851, 1853, 1856, 1858, 1861, 1863, 1866, 1869, 1873, 1890,
+ 1892, 1910, 1916, 1917, 1923, 1931, 1933, 1942, 1950, 1952,
+ 1963, 1966, 1970, 1973, 1977, 1982, 1985, 1989, 1992, 1994,
+ 1996, 1998, 2005, 2007, 2008, 2009, 2013, 2016, 2020, 2022,
+ 2025, 2028, 2031, 2037, 2040, 2043, 2045, 2047, 2049, 2053,
+ 2057, 2061, 2064, 2067, 2071, 2074, 2076, 2080, 2131, 2146,
+ 2148, 2151, 2153, 2157, 2158, 2160, 2162, 2164, 2168, 2177,
+ 2180, 2182, 2184, 2190, 2192, 2195, 2200, 2203, 2206, 2215,
+ 2226, 2231, 2231, 2233, 2236, 2238, 2242, 2244, 2248, 2276,
+ 2307, 2309, 2331, 2355, 2357, 2361, 2387, 2396, 2424, 2427,
+ 2434, 2445, 2454, 2458, 2471, 2474, 2476, 2481, 2483, 2487,
+ 2495, 2499, 2502, 2504, 2515, 2520, 2528, 2531, 2532, 2543,
+ 2546, 2547, 2558, 2560, 2563, 2565, 2568, 2573, 2577, 2583,
+ 2588, 2592, 2596, 2602, 2606, 2609, 2614, 2618, 2621, 2624,
+ 2633, 2635, 2639, 2642, 2647, 2650, 2654, 2663, 2666, 2670,
+ 2673, 2681, 2683, 2688, 2691, 2693, 2695, 2697, 2701, 2704,
+ 2718, 2721, 2726, 2729, 2731, 2733, 2735, 2737, 2739, 2741,
+ 2745, 2751, 2754, 2756, 2758, 2760, 2764, 2767, 2770, 2772,
+ 2774, 2776, 2780, 2783, 2786, 2788, 2790, 2792, 2794, 2796,
+ 2798, 2802, 2808, 2814, 2816, 2820, 2823, 2825, 2829, 2831,
+ 2834, 2836, 2842, 2845, 2859, 2861, 2865, 2867, 2871, 2874,
+ 2880, 2886, 2889, 2891, 2893, 2895, 2899, 2903, 2907, 2910,
+ 2915, 2918, 2920, 2922, 2924, 2926, 2928, 2930, 2932, 2936,
+ 2940, 2944, 2948, 2949, 2951, 2953, 2955, 2957, 2959, 2961,
+ 2963, 2965, 2973, 2975, 2976, 2977, 2980, 2987, 2997, 2999,
+ 3004, 3006, 3009, 3023, 3026, 3029, 3033, 3037, 3041, 3047,
+ 3050, 3054, 3056, 3059, 3065, 3068, 3071, 3074, 3087, 3090,
+ 3095, 3101, 3106, 3109, 3114, 3118, 3121, 3127, 3132, 3135,
+ 3140, 3149, 3153, 3156, 3162, 3172, 3179, 3185, 3210, 3210,
+ 3242, 3242, 3258, 3258, 3262, 3266, 3269, 3274, 3281, 3290,
+ 3299, 3308, 3311, 3317, 3319, 3323, 3325, 3328, 3332, 3335,
+ 3338, 3346, 3350, 3356, 3358, 3360, 3364, 3366, 3369, 3382,
+ 3387, 3395, 3399, 3402, 3404, 3408, 3411, 3413, 3415, 3421,
+ 3425, 3429, 3432, 3433, 3439, 3441, 3444, 3446, 3450, 3455,
+ 3458, 3468, 3475, 3476, 3483, 3489, 3494, 3498, 3503, 3510,
+ 3514, 3518, 3523, 3534, 3548, 3551, 3553, 3555, 3557, 3559,
+ 3563, 3565, 3573, 3590, 3592, 3594, 3596, 3598, 3602, 3604,
+ 3607, 3629, 3635, 3642, 3645, 3649, 3654, 3656, 3663, 3666,
+ 3668, 3670, 3676, 3680, 3683, 3685, 3687, 3689, 3691, 3693,
+ 3695, 3697, 3699, 3701, 3703, 3705, 3707, 3709, 3711, 3713,
+ 3715, 3717, 3719, 3721, 3723, 3725, 3727, 3729, 3731, 3733,
+ 3735, 3737, 3739, 3741, 3743, 3745, 3748, 3750
+};
+
+static const char * const yytname[] = { "$","error","$illegal.","IDENTIFIER",
+"TYPENAME","SCSPEC","TYPESPEC","TYPE_QUAL","CONSTANT","STRING","ELLIPSIS","SIZEOF",
+"ENUM","IF","ELSE","WHILE","DO","FOR","SWITCH","CASE","DEFAULT","BREAK","CONTINUE",
+"RETURN","GOTO","ASM_KEYWORD","GCC_ASM_KEYWORD","TYPEOF","ALIGNOF","HEADOF",
+"CLASSOF","SIGOF","ATTRIBUTE","EXTENSION","LABEL","AGGR","VISSPEC","DELETE",
+"NEW","OVERLOAD","THIS","OPERATOR","CXX_TRUE","CXX_FALSE","LEFT_RIGHT","TEMPLATE",
+"TYPEID","DYNAMIC_CAST","STATIC_CAST","REINTERPRET_CAST","CONST_CAST","SCOPE",
+"EMPTY","PTYPENAME","'{'","','","';'","ASSIGN","'='","'?'","':'","OROR","ANDAND",
+"'|'","'^'","'&'","MIN_MAX","EQCOMPARE","ARITHCOMPARE","'<'","'>'","LSHIFT",
+"RSHIFT","'+'","'-'","'*'","'/'","'%'","POINTSAT_STAR","DOT_STAR","UNARY","PLUSPLUS",
+"MINUSMINUS","'~'","HYPERUNARY","PAREN_STAR_PAREN","POINTSAT","'.'","'('","'['",
+"TRY","CATCH","THROW","TYPENAME_ELLIPSIS","PRE_PARSED_FUNCTION_DECL","EXTERN_LANG_STRING",
+"ALL","PRE_PARSED_CLASS_DECL","TYPENAME_DEFN","IDENTIFIER_DEFN","PTYPENAME_DEFN",
+"END_OF_SAVED_INPUT","')'","'}'","'!'","']'","program","extdefs","@1",".hush_warning",
+".warning_ok","asm_keyword","lang_extdef","@2","extdef","extern_lang_string",
+"template_header","@3","template_parm_list","template_parm","overloaddef","ov_identifiers",
+"template_def","@4","@5","@6","@7","fn_tmpl_end","datadef","fndef","fn.def1",
+"fn.def2","return_id","return_init","base_init",".set_base_init","member_init_list",
+"member_init","identifier","notype_identifier","identifier_defn","explicit_instantiation",
+"template_type","template_type_name","tmpl.2","template_arg_list","template_arg",
+"template_instantiate_once","@8","template_instantiation","template_instantiate_some",
+"unop","expr","paren_expr_or_null","paren_cond_or_null","xcond","condition",
+"@9","already_scoped_stmt","nontrivial_exprlist","nonnull_exprlist","unary_expr",
+"@10","new_placement","new_initializer","regcast_or_absdcl","cast_expr","sub_cast_expr",
+"expr_no_commas","notype_unqualified_id","unqualified_id","expr_or_declarator",
+"direct_notype_declarator","primary","@11","@12","new","delete","boolean.literal",
+"string","nodecls","object","decl","declarator","fcast_or_absdcl","type_id",
+"typed_declspecs","typed_declspecs1","reserved_declspecs","declmods","typed_typespecs",
+"reserved_typespecquals","typespec","typespecqual_reserved","initdecls","notype_initdecls",
+"nomods_initdecls","maybeasm","initdcl0","@13","initdcl","@14","notype_initdcl0",
+"@15","nomods_initdcl0","@16","maybe_attribute","attributes","attribute","attribute_list",
+"attrib","any_word","identifiers_or_typenames","init","initlist","structsp",
+"@17","@18","maybecomma","maybecomma_warn","aggr","specialization","named_class_head_sans_basetype",
+"named_class_head_sans_basetype_defn","do_xref","do_xref_defn","named_class_head",
+"unnamed_class_head","class_head","maybe_base_class_list","base_class_list",
+"base_class","base_class.1","base_class_access_list","left_curly","opt.component_decl_list",
+"component_decl_list","component_decl","component_decl_1","components","notype_components",
+"component_declarator0","component_declarator","after_type_component_declarator0",
+"notype_component_declarator0","after_type_component_declarator","notype_component_declarator",
+"enumlist","enumerator","new_type_id","type_quals","nonempty_type_quals","nonmomentary_expr",
+"@19","after_type_declarator","qualified_type_name","nested_type","direct_after_type_declarator",
+"notype_declarator","complex_notype_declarator","complex_direct_notype_declarator",
+"qualified_id","notype_qualified_id","overqualified_id","functional_cast","type_name",
+"nested_name_specifier","nested_name_specifier_1","complete_type_name","complex_type_name",
+"ptr_to_mem","global_scope","new_declarator","direct_new_declarator","absdcl",
+"direct_abstract_declarator","stmts","errstmt",".pushlevel","maybe_label_decls",
+"label_decls","label_decl","compstmt_or_error","compstmt","simple_if","@20",
+"@21","implicitly_scoped_stmt","stmt","simple_stmt","@22","@23","@24","@25",
+"@26","@27","@28","@29","@30","@31","@32","@33","@34","@35","@36","try_block",
+"@37","@38","ansi_try_stmts","handler_seq","@39","type_specifier_seq","handler_args",
+"label_colon","forhead.1","forhead.2","maybe_type_qual","xexpr","asm_operands",
+"nonnull_asm_operands","asm_operand","asm_clobbers","parmlist","complex_parmlist",
+"parms","parms_comma","named_parm","parm","see_typename","bad_parm","maybe_raises",
+"ansi_raise_identifier","ansi_raise_identifiers","conversion_declarator","operator",
+"operator_name",""
+};
+#endif
+
+static const short yyr1[] = { 0,
+ 106, 106, 108, 107, 107, 109, 110, 111, 111, 113,
+ 112, 114, 114, 114, 114, 114, 114, 114, 114, 114,
+ 115, 117, 116, 118, 118, 119, 119, 119, 120, 121,
+ 121, 123, 122, 124, 122, 125, 122, 126, 122, 122,
+ 122, 122, 122, 122, 122, 122, 127, 127, 127, 127,
+ 127, 128, 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 129, 129, 129, 129, 129, 129, 129, 129,
+ 130, 130, 130, 130, 131, 131, 131, 131, 131, 132,
+ 133, 133, 133, 133, 134, 135, 136, 136, 136, 136,
+ 137, 137, 137, 137, 137, 137, 137, 137, 138, 138,
+ 138, 139, 139, 140, 140, 140, 141, 141, 141, 141,
+ 142, 143, 143, 144, 145, 145, 146, 146, 148, 147,
+ 149, 149, 150, 150, 151, 151, 151, 151, 151, 152,
+ 152, 153, 153, 154, 154, 155, 155, 155, 157, 156,
+ 156, 158, 158, 158, 158, 159, 159, 159, 159, 160,
+ 160, 161, 162, 161, 161, 161, 161, 161, 161, 161,
+ 161, 161, 161, 161, 161, 161, 161, 161, 161, 161,
+ 161, 161, 161, 161, 163, 163, 164, 164, 164, 164,
+ 165, 165, 166, 166, 166, 167, 167, 167, 167, 168,
+ 168, 168, 168, 168, 168, 168, 168, 168, 168, 168,
+ 168, 168, 168, 168, 168, 168, 168, 168, 168, 168,
+ 168, 168, 168, 168, 169, 169, 169, 169, 170, 170,
+ 171, 171, 171, 171, 172, 172, 172, 173, 173, 173,
+ 173, 173, 173, 174, 173, 175, 173, 173, 173, 173,
+ 173, 173, 173, 173, 173, 173, 173, 173, 173, 173,
+ 173, 173, 173, 173, 173, 173, 173, 173, 173, 173,
+ 173, 173, 173, 176, 176, 177, 177, 178, 178, 179,
+ 179, 180, 181, 181, 182, 182, 182, 182, 182, 182,
+ 182, 183, 183, 184, 184, 185, 185, 185, 185, 185,
+ 186, 186, 187, 187, 187, 187, 187, 188, 188, 188,
+ 189, 189, 189, 189, 190, 190, 190, 190, 191, 191,
+ 192, 192, 192, 192, 192, 192, 192, 193, 193, 193,
+ 194, 194, 195, 195, 196, 196, 197, 197, 199, 198,
+ 198, 201, 200, 200, 203, 202, 202, 205, 204, 204,
+ 206, 206, 207, 207, 208, 209, 209, 210, 210, 210,
+ 210, 210, 211, 211, 211, 211, 212, 212, 213, 213,
+ 213, 213, 213, 214, 214, 214, 214, 214, 214, 216,
+ 215, 215, 217, 215, 215, 215, 215, 215, 215, 218,
+ 218, 219, 219, 220, 220, 220, 220, 220, 221, 222,
+ 222, 222, 222, 222, 222, 223, 224, 225, 226, 226,
+ 227, 228, 228, 229, 229, 229, 230, 230, 231, 231,
+ 232, 232, 232, 233, 233, 233, 233, 234, 235, 235,
+ 235, 235, 236, 236, 236, 237, 237, 237, 237, 238,
+ 238, 238, 238, 238, 238, 238, 239, 239, 239, 240,
+ 240, 240, 241, 241, 242, 242, 243, 243, 243, 244,
+ 244, 244, 244, 245, 245, 245, 246, 246, 246, 246,
+ 247, 247, 248, 248, 249, 249, 249, 250, 250, 251,
+ 251, 253, 252, 254, 254, 254, 254, 254, 254, 255,
+ 255, 256, 257, 257, 257, 257, 257, 257, 257, 257,
+ 257, 258, 258, 258, 258, 258, 258, 259, 259, 259,
+ 259, 259, 259, 260, 260, 260, 260, 260, 260, 260,
+ 260, 261, 262, 263, 263, 264, 264, 264, 265, 265,
+ 266, 266, 267, 267, 268, 268, 269, 269, 270, 270,
+ 271, 272, 272, 272, 272, 272, 272, 272, 273, 273,
+ 274, 274, 274, 274, 274, 274, 274, 274, 274, 274,
+ 274, 275, 275, 275, 275, 275, 275, 275, 275, 275,
+ 275, 275, 276, 276, 276, 276, 277, 278, 279, 279,
+ 280, 280, 281, 282, 282, 283, 283, 283, 283, 285,
+ 286, 284, 287, 287, 288, 288, 289, 289, 290, 289,
+ 289, 291, 292, 289, 293, 294, 289, 295, 296, 297,
+ 289, 298, 299, 300, 289, 301, 289, 302, 289, 303,
+ 289, 304, 289, 289, 289, 289, 289, 289, 289, 289,
+ 289, 289, 289, 289, 289, 289, 289, 306, 307, 305,
+ 308, 308, 308, 309, 310, 309, 311, 311, 312, 312,
+ 313, 313, 314, 314, 314, 315, 315, 315, 315, 316,
+ 316, 317, 317, 317, 318, 318, 319, 319, 320, 321,
+ 321, 322, 322, 322, 323, 323, 323, 323, 323, 323,
+ 323, 323, 323, 323, 324, 324, 324, 324, 324, 324,
+ 325, 325, 326, 326, 326, 326, 326, 326, 327, 327,
+ 328, 329, 329, 330, 330, 331, 332, 332, 333, 333,
+ 333, 333, 334, 335, 335, 335, 335, 335, 335, 335,
+ 335, 335, 335, 335, 335, 335, 335, 335, 335, 335,
+ 335, 335, 335, 335, 335, 335, 335, 335, 335, 335,
+ 335, 335, 335, 335, 335, 335, 335, 335
+};
+
+static const short yyr2[] = { 0,
+ 0, 1, 0, 2, 2, 0, 0, 1, 1, 0,
+ 2, 1, 1, 1, 1, 5, 4, 3, 4, 4,
+ 1, 0, 5, 1, 3, 2, 4, 1, 3, 1,
+ 3, 0, 5, 0, 5, 0, 5, 0, 5, 3,
+ 3, 6, 7, 4, 3, 3, 1, 1, 1, 1,
+ 1, 2, 3, 3, 3, 3, 2, 2, 2, 2,
+ 2, 1, 3, 4, 3, 5, 4, 3, 3, 2,
+ 3, 3, 2, 1, 6, 4, 3, 3, 2, 2,
+ 1, 3, 4, 2, 3, 0, 0, 1, 3, 2,
+ 3, 1, 4, 2, 4, 2, 4, 2, 1, 1,
+ 1, 1, 1, 1, 1, 1, 3, 3, 4, 4,
+ 3, 4, 4, 0, 1, 3, 1, 1, 0, 6,
+ 0, 1, 0, 2, 1, 1, 1, 1, 1, 1,
+ 1, 1, 3, 1, 3, 0, 1, 1, 0, 8,
+ 1, 2, 4, 4, 1, 3, 3, 3, 3, 1,
+ 1, 1, 0, 3, 2, 2, 2, 2, 2, 2,
+ 4, 2, 4, 2, 3, 3, 4, 4, 5, 5,
+ 6, 2, 4, 5, 3, 3, 3, 1, 3, 2,
+ 3, 4, 1, 2, 5, 1, 4, 4, 4, 1,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 5,
+ 3, 3, 1, 2, 3, 1, 1, 1, 1, 1,
+ 1, 1, 2, 2, 1, 1, 1, 1, 1, 1,
+ 1, 3, 3, 0, 4, 0, 6, 2, 4, 2,
+ 2, 1, 4, 1, 7, 7, 7, 7, 4, 4,
+ 2, 2, 1, 4, 2, 2, 2, 5, 3, 5,
+ 3, 4, 6, 1, 2, 1, 2, 1, 1, 1,
+ 2, 0, 2, 2, 3, 3, 3, 3, 3, 2,
+ 2, 1, 1, 1, 2, 2, 2, 2, 1, 1,
+ 1, 1, 2, 2, 3, 3, 4, 1, 2, 2,
+ 1, 1, 2, 2, 1, 2, 2, 3, 1, 2,
+ 1, 1, 1, 4, 4, 4, 4, 1, 1, 1,
+ 1, 3, 1, 3, 1, 3, 0, 4, 0, 7,
+ 4, 0, 7, 4, 0, 7, 4, 0, 7, 4,
+ 0, 1, 1, 2, 6, 1, 3, 0, 1, 4,
+ 6, 4, 1, 1, 1, 1, 1, 3, 1, 2,
+ 3, 4, 1, 1, 3, 4, 6, 3, 5, 0,
+ 7, 4, 0, 6, 3, 2, 2, 4, 1, 0,
+ 1, 0, 1, 1, 2, 2, 2, 2, 3, 2,
+ 2, 2, 3, 3, 1, 2, 0, 0, 3, 3,
+ 2, 1, 1, 0, 1, 2, 1, 3, 1, 2,
+ 1, 4, 4, 1, 1, 2, 2, 1, 0, 1,
+ 4, 3, 1, 2, 2, 2, 2, 2, 2, 2,
+ 2, 4, 2, 1, 5, 3, 0, 1, 3, 0,
+ 1, 3, 1, 1, 1, 1, 4, 6, 4, 4,
+ 6, 4, 3, 4, 6, 4, 4, 6, 4, 3,
+ 1, 3, 1, 3, 2, 1, 6, 0, 2, 1,
+ 2, 0, 2, 3, 3, 2, 2, 3, 1, 1,
+ 1, 2, 5, 5, 3, 5, 4, 3, 3, 2,
+ 1, 3, 3, 2, 2, 3, 1, 3, 3, 2,
+ 2, 3, 1, 5, 5, 3, 5, 3, 3, 4,
+ 3, 2, 2, 1, 2, 4, 4, 2, 1, 1,
+ 1, 2, 2, 2, 1, 2, 1, 2, 2, 3,
+ 1, 3, 2, 3, 2, 2, 3, 1, 3, 4,
+ 3, 2, 2, 1, 3, 2, 2, 1, 2, 3,
+ 1, 3, 1, 5, 3, 4, 3, 4, 2, 2,
+ 3, 2, 1, 1, 2, 2, 2, 0, 0, 1,
+ 1, 2, 3, 1, 2, 3, 5, 6, 5, 0,
+ 0, 6, 1, 2, 1, 1, 1, 2, 0, 4,
+ 1, 0, 0, 6, 0, 0, 7, 0, 0, 0,
+ 10, 0, 0, 0, 10, 0, 7, 0, 5, 0,
+ 7, 0, 4, 2, 2, 2, 3, 6, 8, 10,
+ 12, 4, 3, 2, 2, 1, 1, 0, 0, 7,
+ 1, 2, 2, 0, 0, 5, 1, 1, 3, 3,
+ 2, 2, 3, 4, 4, 3, 4, 6, 6, 0,
+ 1, 0, 1, 1, 0, 1, 1, 3, 4, 1,
+ 3, 0, 1, 1, 1, 2, 2, 2, 1, 1,
+ 2, 2, 2, 2, 1, 3, 2, 4, 2, 4,
+ 2, 2, 2, 2, 2, 2, 1, 2, 1, 1,
+ 0, 0, 1, 0, 4, 1, 1, 3, 0, 3,
+ 3, 3, 1, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 3, 2, 2, 2,
+ 2, 3, 2, 2, 4, 4, 3, 2
+};
+
+static const short yydefact[] = { 3,
+ 10, 10, 5, 0, 4, 0, 217, 519, 302, 312,
+ 470, 0, 8, 9, 0, 0, 384, 0, 703, 0,
+ 531, 218, 62, 0, 0, 691, 0, 74, 21, 0,
+ 11, 6, 0, 15, 14, 13, 12, 272, 0, 520,
+ 114, 226, 497, 0, 292, 0, 291, 305, 0, 325,
+ 311, 0, 395, 397, 398, 403, 402, 379, 301, 525,
+ 481, 0, 225, 227, 480, 0, 521, 313, 468, 0,
+ 0, 216, 60, 61, 523, 0, 0, 99, 100, 101,
+ 373, 376, 0, 527, 0, 377, 0, 0, 0, 30,
+ 0, 302, 0, 22, 0, 0, 395, 0, 0, 0,
+ 0, 495, 0, 0, 0, 494, 0, 0, 0, 226,
+ 0, 0, 0, 225, 227, 468, 0, 3, 0, 0,
+ 0, 0, 397, 398, 694, 0, 86, 81, 272, 0,
+ 0, 58, 524, 121, 468, 0, 472, 59, 0, 0,
+ 0, 0, 0, 321, 282, 479, 283, 491, 0, 468,
+ 304, 303, 57, 293, 0, 323, 0, 298, 318, 319,
+ 294, 307, 309, 320, 0, 52, 385, 386, 387, 388,
+ 401, 105, 104, 106, 390, 396, 392, 114, 391, 404,
+ 404, 418, 0, 471, 306, 70, 0, 73, 529, 513,
+ 482, 522, 0, 526, 0, 738, 734, 733, 731, 713,
+ 718, 719, 0, 725, 724, 710, 711, 709, 728, 717,
+ 714, 715, 716, 720, 721, 707, 708, 704, 705, 706,
+ 730, 722, 723, 712, 729, 0, 726, 637, 305, 638,
+ 699, 470, 229, 270, 0, 0, 0, 0, 153, 266,
+ 264, 242, 268, 269, 0, 0, 0, 0, 0, 0,
+ 0, 126, 125, 0, 127, 128, 0, 0, 213, 129,
+ 0, 115, 0, 186, 0, 190, 183, 118, 228, 152,
+ 0, 0, 230, 231, 0, 117, 289, 305, 290, 514,
+ 253, 244, 0, 0, 0, 395, 375, 0, 370, 528,
+ 0, 130, 131, 0, 0, 0, 0, 29, 0, 108,
+ 404, 122, 107, 0, 493, 0, 492, 100, 101, 215,
+ 224, 0, 501, 223, 0, 500, 508, 509, 0, 0,
+ 18, 10, 0, 7, 7, 46, 45, 694, 0, 32,
+ 40, 36, 34, 41, 38, 327, 80, 87, 84, 0,
+ 0, 272, 0, 0, 0, 568, 63, 574, 65, 111,
+ 506, 0, 669, 670, 151, 0, 150, 664, 687, 0,
+ 289, 305, 290, 0, 663, 665, 692, 675, 0, 511,
+ 0, 0, 0, 477, 0, 476, 0, 0, 0, 468,
+ 68, 56, 71, 0, 55, 468, 0, 472, 490, 0,
+ 295, 296, 0, 53, 69, 54, 72, 300, 299, 310,
+ 694, 326, 393, 389, 394, 405, 399, 400, 434, 0,
+ 0, 437, 440, 0, 0, 423, 0, 694, 308, 0,
+ 0, 341, 469, 496, 530, 0, 0, 727, 732, 468,
+ 468, 0, 468, 737, 0, 0, 0, 160, 0, 0,
+ 162, 0, 0, 0, 0, 0, 0, 0, 0, 159,
+ 156, 155, 157, 0, 0, 0, 0, 214, 0, 113,
+ 158, 0, 0, 184, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 238, 240, 241, 274,
+ 273, 0, 0, 0, 0, 0, 164, 466, 0, 172,
+ 271, 220, 0, 691, 219, 256, 257, 0, 284, 548,
+ 544, 553, 0, 472, 468, 468, 468, 286, 551, 0,
+ 518, 288, 0, 287, 255, 0, 251, 265, 267, 515,
+ 0, 252, 110, 109, 463, 382, 461, 372, 0, 314,
+ 0, 0, 315, 316, 317, 31, 0, 24, 690, 305,
+ 0, 689, 28, 119, 112, 499, 498, 502, 0, 17,
+ 20, 19, 327, 51, 47, 49, 50, 48, 44, 0,
+ 0, 0, 0, 341, 102, 92, 103, 0, 85, 88,
+ 0, 0, 0, 363, 0, 359, 82, 0, 0, 64,
+ 67, 575, 569, 468, 468, 668, 682, 674, 672, 548,
+ 544, 0, 683, 468, 686, 688, 684, 0, 685, 468,
+ 667, 681, 673, 671, 666, 693, 677, 679, 0, 510,
+ 473, 475, 474, 0, 0, 489, 0, 341, 322, 485,
+ 0, 0, 0, 488, 0, 478, 297, 324, 341, 327,
+ 415, 0, 414, 406, 407, 409, 0, 411, 433, 429,
+ 428, 217, 519, 468, 0, 662, 694, 430, 438, 443,
+ 444, 694, 694, 431, 441, 694, 0, 378, 425, 424,
+ 426, 427, 327, 696, 305, 697, 0, 0, 0, 340,
+ 342, 343, 736, 735, 699, 699, 699, 0, 0, 0,
+ 518, 0, 0, 519, 0, 154, 0, 0, 0, 0,
+ 0, 0, 233, 232, 0, 181, 116, 217, 519, 218,
+ 0, 0, 364, 380, 0, 212, 211, 654, 653, 0,
+ 209, 208, 206, 207, 205, 204, 203, 200, 201, 202,
+ 198, 199, 193, 194, 195, 196, 197, 191, 192, 0,
+ 0, 0, 0, 0, 0, 166, 178, 0, 0, 165,
+ 468, 468, 0, 468, 465, 538, 0, 0, 0, 0,
+ 259, 0, 261, 0, 512, 547, 546, 543, 542, 690,
+ 0, 0, 562, 0, 0, 559, 285, 560, 549, 468,
+ 662, 472, 548, 544, 0, 0, 468, 228, 0, 514,
+ 0, 0, 0, 383, 0, 382, 149, 148, 147, 146,
+ 0, 23, 390, 396, 0, 16, 341, 33, 37, 35,
+ 39, 0, 0, 90, 0, 94, 0, 98, 0, 96,
+ 0, 360, 0, 83, 66, 0, 576, 0, 570, 571,
+ 507, 504, 547, 543, 548, 544, 480, 0, 468, 549,
+ 548, 544, 0, 228, 0, 514, 505, 0, 0, 676,
+ 331, 468, 468, 468, 487, 337, 341, 0, 0, 417,
+ 416, 410, 0, 0, 436, 341, 0, 77, 0, 327,
+ 327, 0, 327, 0, 341, 0, 695, 0, 0, 338,
+ 344, 701, 700, 702, 243, 161, 0, 0, 163, 187,
+ 189, 188, 249, 250, 0, 0, 0, 0, 235, 0,
+ 0, 0, 0, 182, 0, 236, 239, 176, 175, 168,
+ 0, 167, 180, 0, 0, 535, 533, 0, 536, 472,
+ 173, 0, 0, 262, 0, 0, 545, 541, 552, 468,
+ 561, 550, 555, 0, 557, 0, 548, 544, 516, 517,
+ 0, 254, 464, 462, 374, 0, 25, 0, 0, 0,
+ 42, 91, 89, 0, 0, 0, 0, 361, 357, 0,
+ 0, 217, 580, 592, 595, 0, 568, 0, 0, 0,
+ 0, 0, 0, 218, 626, 0, 650, 0, 587, 0,
+ 0, 305, 0, 564, 585, 591, 563, 586, 627, 0,
+ 598, 602, 572, 547, 543, 482, 549, 517, 678, 680,
+ 329, 486, 483, 484, 335, 334, 0, 0, 408, 341,
+ 341, 76, 453, 468, 217, 519, 0, 439, 445, 446,
+ 694, 694, 341, 341, 442, 0, 432, 698, 328, 348,
+ 0, 0, 0, 0, 0, 0, 368, 0, 0, 365,
+ 185, 210, 123, 0, 169, 170, 177, 179, 534, 532,
+ 539, 537, 0, 174, 0, 258, 260, 558, 468, 556,
+ 371, 27, 0, 43, 93, 97, 95, 362, 0, 573,
+ 567, 579, 641, 568, 568, 568, 0, 0, 0, 612,
+ 614, 615, 616, 0, 0, 0, 642, 568, 651, 0,
+ 588, 280, 694, 0, 281, 0, 694, 0, 694, 0,
+ 0, 577, 566, 565, 589, 625, 624, 568, 568, 0,
+ 0, 332, 412, 413, 452, 449, 435, 0, 0, 341,
+ 327, 327, 447, 450, 354, 355, 356, 353, 0, 346,
+ 349, 339, 0, 0, 0, 0, 366, 0, 0, 123,
+ 237, 0, 171, 540, 263, 554, 120, 358, 0, 0,
+ 0, 583, 0, 0, 568, 643, 0, 646, 0, 0,
+ 608, 0, 617, 0, 623, 628, 0, 276, 327, 278,
+ 279, 327, 0, 0, 0, 275, 277, 578, 568, 0,
+ 0, 330, 336, 0, 75, 341, 341, 460, 341, 341,
+ 0, 0, 348, 0, 0, 245, 246, 247, 248, 0,
+ 369, 124, 467, 134, 0, 581, 593, 584, 596, 647,
+ 645, 0, 644, 141, 0, 305, 0, 0, 0, 613,
+ 622, 0, 0, 590, 138, 0, 137, 0, 333, 459,
+ 456, 454, 457, 448, 451, 347, 345, 217, 0, 367,
+ 0, 568, 0, 0, 0, 0, 606, 694, 610, 609,
+ 0, 631, 0, 629, 655, 0, 599, 603, 0, 0,
+ 0, 350, 352, 135, 582, 569, 594, 145, 132, 0,
+ 0, 649, 0, 648, 568, 327, 0, 633, 632, 634,
+ 0, 0, 656, 657, 618, 0, 0, 455, 458, 0,
+ 142, 0, 0, 597, 607, 341, 611, 630, 0, 655,
+ 0, 0, 0, 0, 351, 0, 0, 133, 0, 635,
+ 0, 0, 619, 658, 600, 604, 144, 143, 139, 0,
+ 659, 0, 0, 0, 0, 0, 0, 0, 660, 0,
+ 620, 601, 605, 140, 0, 0, 636, 0, 0, 639,
+ 640, 661, 621, 0, 0, 0
+};
+
+static const short yydefgoto[] = { 1344,
+ 1, 2, 119, 561, 977, 3, 4, 31, 32, 33,
+ 299, 547, 548, 34, 91, 35, 570, 572, 571, 573,
+ 569, 36, 37, 38, 411, 128, 129, 130, 338, 579,
+ 580, 535, 581, 176, 39, 40, 41, 134, 261, 262,
+ 302, 805, 303, 1141, 263, 978, 1271, 1206, 1226, 1227,
+ 1326, 1267, 292, 786, 264, 444, 496, 750, 265, 266,
+ 267, 293, 269, 506, 311, 43, 270, 456, 1043, 271,
+ 272, 273, 274, 131, 275, 979, 401, 516, 770, 980,
+ 45, 161, 981, 47, 162, 439, 163, 143, 155, 49,
+ 628, 144, 1110, 402, 1184, 156, 1111, 50, 1031, 680,
+ 681, 682, 1129, 1130, 1131, 960, 713, 714, 51, 539,
+ 288, 903, 795, 52, 53, 54, 55, 180, 181, 56,
+ 57, 58, 407, 644, 645, 646, 647, 183, 414, 415,
+ 416, 417, 658, 664, 659, 1018, 660, 661, 1019, 1020,
+ 536, 537, 497, 776, 59, 371, 372, 145, 60, 61,
+ 146, 147, 113, 63, 507, 280, 281, 282, 65, 283,
+ 67, 68, 179, 69, 284, 755, 756, 767, 519, 983,
+ 984, 1151, 828, 829, 830, 347, 985, 986, 1074, 1242,
+ 1153, 987, 988, 1179, 1075, 1243, 1076, 1244, 1108, 1286,
+ 1324, 1109, 1287, 1325, 1275, 1219, 1277, 1162, 989, 1222,
+ 1280, 1254, 1298, 1320, 1217, 1328, 990, 991, 992, 1090,
+ 720, 1282, 1283, 1284, 1330, 364, 772, 366, 367, 368,
+ 369, 107, 618, 1169, 676, 677, 434, 71, 72
+};
+
+static const short yypact[] = { 71,
+ 88,-32768,-32768, 2859,-32768, 188,-32768, 266, 64,-32768,
+-32768, 555,-32768,-32768, 140, 146,-32768, 235,-32768, 1989,
+-32768, 237,-32768, 1666, 1666,-32768, 1814,-32768,-32768, 222,
+-32768, 285, 3383,-32768,-32768,-32768,-32768, 201, 286, 315,
+-32768,-32768, 80, 1865,-32768, 4890,-32768, 846, 619,-32768,
+-32768, 142,-32768,-32768,-32768,-32768,-32768, 316, 1575,-32768,
+-32768, 675,-32768,-32768,-32768, 347,-32768,-32768,-32768, 215,
+ 3638,-32768,-32768,-32768,-32768, 8393, 4228,-32768, 266, 237,
+ 284, 340, 315,-32768, 215,-32768, 215, 8393, 8393,-32768,
+ 623,-32768, 237,-32768, 2940, 4475, 399, 215, 8393, 266,
+ 2258,-32768, 410, 245, 2258,-32768, 420, 2904, 2904, 296,
+ 325, 80, 331, 345, 348,-32768, 444, 361, 2291, 262,
+ 2940, 9527, 664, 847, 375, 475,-32768, 382, 149, 119,
+ 119,-32768,-32768, 387,-32768, 6095, 390,-32768, 3062, 3062,
+ 4178, 1327, 637,-32768,-32768, 242,-32768,-32768, 347,-32768,
+-32768,-32768,-32768, 846, 688,-32768, 1462,-32768,-32768,-32768,
+ 959, 730,-32768,-32768, 2940,-32768,-32768,-32768,-32768,-32768,
+-32768,-32768,-32768,-32768,-32768,-32768, 315, 871,-32768, 443,
+ 443,-32768, 2169,-32768, 730,-32768, 469, 861,-32768,-32768,
+-32768,-32768, 3286,-32768, 155,-32768, 419, 499,-32768,-32768,
+-32768,-32768, 463,-32768,-32768,-32768,-32768,-32768,-32768,-32768,
+-32768,-32768,-32768,-32768,-32768,-32768,-32768,-32768,-32768,-32768,
+-32768,-32768,-32768,-32768,-32768, 485,-32768,-32768, 730, 1575,
+ 342, 504,-32768,-32768, 3906, 9334, 542, 573,-32768,-32768,
+-32768,-32768,-32768,-32768, 577, 588, 616, 643, 646, 420,
+ 9085,-32768,-32768, 9085,-32768,-32768, 9085, 6659, 4696,-32768,
+ 41,-32768, 9085,-32768, 8480,-32768,-32768, 9696,-32768, 898,
+ 3155, 8563,-32768, 678, 713,-32768, 1174, 2042, 3006,-32768,
+ 157,-32768, 393, 808, 2940, 399,-32768, 420, 622,-32768,
+ 630, 666, 9604, 647, 656, 674, 780,-32768, 4228,-32768,
+ 443,-32768,-32768, 282,-32768, 195,-32768,-32768,-32768,-32768,
+-32768, 2258,-32768,-32768, 2258,-32768,-32768,-32768, 3286, 42,
+-32768, 686, 4228,-32768,-32768,-32768,-32768, 375, 732,-32768,
+-32768,-32768,-32768,-32768,-32768, 589,-32768, 209,-32768, 6749,
+ 8650,-32768, 119, 119, 745,-32768,-32768,-32768,-32768,-32768,
+ 803, 721,-32768,-32768, 666, 726, 9604, 74, 2165, 9527,
+ 2165, 3271, 3772, 742,-32768, 143, 9420, 763, 776,-32768,
+ 749, 8650, 3372,-32768, 3372,-32768, 3542, 3542, 748,-32768,
+-32768,-32768, 861, 2940,-32768,-32768, 6197, 751,-32768, 4114,
+ 959, 846, 2940,-32768,-32768,-32768, 861,-32768,-32768,-32768,
+ 375,-32768,-32768,-32768,-32768, 764,-32768,-32768,-32768, 8650,
+ 39, 1890, 9437, 72, 1839,-32768, 269, 375, 730, 3218,
+ 759, 832,-32768,-32768,-32768, 794, 801,-32768,-32768,-32768,
+-32768, 229,-32768,-32768, 8650, 504, 6659,-32768, 191, 6659,
+-32768, 8650, 8737, 9085, 8393, 3218, 3218, 3218, 3218,-32768,
+-32768,-32768,-32768, 767, 790, 745, 812,-32768, 8393,-32768,
+-32768, 2485, 6659,-32768, 8650, 8650, 6839, 8650, 8650, 8650,
+ 8650, 8650, 8650, 8650, 8650, 8650, 8650, 8650, 8650, 8650,
+ 8650, 8650, 8650, 8650, 8650, 8650,-32768,-32768,-32768,-32768,
+-32768, 8650, 8650, 8650, 8393, 997, 558, 225, 7388,-32768,
+-32768, 266, 873, 920,-32768, 477, 543, 631,-32768, 99,
+ 99,-32768, 3482, 817, 848, 894,-32768,-32768, 539, 7863,
+ 1305,-32768, 252,-32768,-32768, 8650,-32768,-32768,-32768,-32768,
+ 174,-32768,-32768,-32768, 893, 905,-32768,-32768, 420,-32768,
+ 7211, 7301,-32768,-32768,-32768,-32768, 320,-32768,-32768, 4012,
+ 142,-32768,-32768,-32768,-32768,-32768,-32768,-32768, 897,-32768,
+-32768,-32768, 589,-32768,-32768,-32768,-32768,-32768,-32768, 907,
+ 927, 939, 942, 832,-32768,-32768, 237, 8650, 945,-32768,
+ 559, 563, 567,-32768, 6299, 9673,-32768, 906, 119,-32768,
+-32768,-32768, 36,-32768,-32768,-32768,-32768,-32768,-32768, 2346,
+ 2346, 3148,-32768,-32768,-32768,-32768,-32768, 7954,-32768,-32768,
+-32768,-32768,-32768,-32768,-32768,-32768, 944, 949, 6749,-32768,
+-32768,-32768,-32768, 3372, 3372,-32768, 4114, 832,-32768, 803,
+ 908, 912, 913,-32768, 911,-32768, 959,-32768, 832, 589,
+-32768, 930,-32768, 962,-32768,-32768, 916,-32768, 9673,-32768,
+-32768, 961, 639,-32768, 8650, 4879, 375, 970,-32768,-32768,
+-32768, 594, 596, 971,-32768, 375, 969,-32768,-32768,-32768,
+-32768,-32768, 535,-32768, 4103,-32768, 210, 444, 943, 976,
+ 832,-32768,-32768,-32768, 424, 424, 424, 933, 934, 8824,
+ 894, 935, 938, 141, 940,-32768, 950, 951, 974, 975,
+ 985, 986,-32768,-32768, 955,-32768,-32768, 998, 849, 509,
+ 8650, 1003,-32768, 991, 968, 9673, 9673,-32768,-32768, 1004,
+ 9714, 9731, 9747, 4614, 4486, 2373, 3583, 1046, 1046, 1046,
+ 1055, 1055, 800, 800, 540, 540, 540,-32768,-32768, 973,
+ 966, 977, 980, 984, 3218, 558,-32768, 6749, 8650,-32768,
+-32768,-32768, 8650,-32768,-32768, 983, 9085, 982, 995, 1035,
+-32768, 8650,-32768, 8650,-32768, 467,-32768, 467,-32768, 70,
+ 989, 990,-32768, 988, 3218, 803,-32768, 803, 719,-32768,
+ 1540, 993, 8045, 8045, 5119, 994, 8480, 296, 1000, 348,
+ 808, 1001, 8650, 420, 996, 905,-32768, 9673,-32768, 9673,
+ 4228,-32768, 428, 1040, 316,-32768, 832,-32768,-32768,-32768,
+-32768, 732, 1005,-32768, 209,-32768, 8650,-32768, 8650,-32768,
+ 8650,-32768, 24,-32768,-32768, 420,-32768, 5723, 1071,-32768,
+ 803, 803, 2634, 2634, 2729, 2729,-32768, 347,-32768, 2804,
+ 8132, 8132, 5993, 317, 1007, 412, 803, 6749, 6749,-32768,
+ 1048,-32768,-32768,-32768,-32768, 1052, 832, 8393, 764,-32768,
+-32768,-32768, 8650, 8650, 68, 9579, 1009,-32768, 1369, 589,
+ 589, 2412, 731, 2580, 832, 3218,-32768, 60, 1025,-32768,
+-32768,-32768,-32768,-32768,-32768,-32768, 9168, 9168,-32768,-32768,
+-32768,-32768,-32768,-32768, 1027, 1038, 1047, 1053,-32768, 9555,
+ 6749, 6389, 1033,-32768, 8650,-32768,-32768,-32768,-32768, 536,
+ 1041,-32768,-32768, 1042, 265, 273, 273, 1049, 273,-32768,
+-32768, 9085, 1139,-32768, 1044, 1051,-32768,-32768,-32768,-32768,
+-32768,-32768, 803, 1056,-32768, 1050, 8219, 8219,-32768,-32768,
+ 821,-32768, 9673,-32768,-32768, 1057,-32768, 435, 2169, 732,
+-32768,-32768,-32768, 1059, 1061, 1063, 6479,-32768,-32768, 705,
+ 354, 1099,-32768,-32768,-32768, 1084,-32768, 8650, 1114, 1119,
+ 1120, 8306, 329, 524,-32768, 1127, 1177, 1129,-32768, 4142,
+ 9510, 2148, 5029,-32768,-32768, 1178,-32768,-32768,-32768, 7491,
+-32768,-32768,-32768, 2634, 2634,-32768, 2804, 1719,-32768,-32768,
+-32768, 803, 803, 803,-32768, 1130, 1089, 1092,-32768, 9579,
+ 9579,-32768,-32768,-32768, 1136, 899, 8650,-32768,-32768,-32768,
+ 375, 375, 832, 832,-32768, 1978,-32768,-32768,-32768, 569,
+ 6749, 8650, 8650, 8650, 8650, 6749,-32768, 8650, 1137,-32768,
+-32768, 4942, 387, 8650,-32768, 536,-32768,-32768,-32768,-32768,
+-32768,-32768, 1094,-32768, 1159,-32768,-32768, 803,-32768,-32768,
+-32768,-32768, 83,-32768,-32768,-32768,-32768,-32768, 420,-32768,
+-32768,-32768,-32768,-32768,-32768, 745, 6569, 1117, 5185,-32768,
+-32768,-32768,-32768, 1151, 8650, 1152,-32768,-32768,-32768, 1122,
+-32768,-32768, 31, 750,-32768, 787, 375, 8911, 34, 900,
+ 362,-32768,-32768,-32768,-32768,-32768,-32768,-32768,-32768, 6749,
+ 6749,-32768,-32768,-32768,-32768,-32768, 68, 8650, 8650, 9579,
+ 589, 589, 1153, 1154,-32768,-32768,-32768,-32768, 330,-32768,
+ 1126,-32768, 1113, 1115, 1121, 1131,-32768, 9627, 6749, 387,
+-32768, 1111,-32768,-32768,-32768, 803,-32768,-32768, 582, 582,
+ 7773,-32768, 1204, 1165, 1138,-32768, 1166,-32768, 8393, 8650,
+-32768, 7593,-32768, 1168,-32768,-32768, 444,-32768, 589,-32768,
+-32768, 589, 9251, 9251, 6929,-32768,-32768,-32768, 745, 7019,
+ 7019,-32768,-32768, 6749,-32768, 9579, 9579,-32768, 832, 832,
+ 6749, 6749, 569, 1142, 8998,-32768,-32768,-32768,-32768, 6749,
+-32768,-32768,-32768,-32768, 8393,-32768,-32768,-32768,-32768,-32768,
+-32768, 5813,-32768,-32768, 1144, 257, 2940, 9650, 7593,-32768,
+-32768, 5271, 53,-32768,-32768, 1176,-32768, 1191,-32768,-32768,
+-32768, 1190, 1192,-32768,-32768,-32768,-32768, 380, 1149,-32768,
+ 1156, 745, 7683, 585, 423, 5411,-32768, 375,-32768,-32768,
+ 434,-32768, 5515,-32768, 1245, 1208,-32768,-32768, 6749, 6749,
+ 8650,-32768,-32768,-32768,-32768, 48,-32768,-32768,-32768, 8650,
+ 1209,-32768, 1213,-32768, 745, 589, 7593,-32768,-32768,-32768,
+ 1183, 8, 1217,-32768,-32768, 7109, 7109,-32768,-32768, 1173,
+-32768, 5903, 1184,-32768,-32768, 832,-32768, 1187, 8650, 1245,
+ 1223, 1245, 1188, 1189,-32768, 498, 5619,-32768, 1222,-32768,
+ 1195, 239,-32768,-32768,-32768,-32768,-32768,-32768,-32768, 1201,
+-32768, 1283, 1237, 7683, 7683, 6749, 4122, 745,-32768, 436,
+-32768,-32768,-32768,-32768, 1196, 1199,-32768, 1294, 1248,-32768,
+-32768,-32768,-32768, 1306, 1307,-32768
+};
+
+static const short yypgoto[] = {-32768,
+ 1193,-32768,-32768, 992, 18, 1303,-32768,-32768,-32768,-32768,
+-32768,-32768, 513,-32768,-32768,-32768,-32768,-32768,-32768,-32768,
+ -756, 1200, 1202,-32768,-32768,-32768,-32768, 1181,-32768,-32768,
+ 503, 0,-32768, 771,-32768, 3899, -26,-32768, 1226, 867,
+ -998,-32768, -84, 187,-32768, 196,-32768, 179, 150, -1074,
+-32768, -350, 929, 118, 753,-32768,-32768, -703, 2564, 926,
+ -251, 2283, 2895, 822, 618, 425,-32768,-32768,-32768,-32768,
+ -248,-32768, -108, -83,-32768, 255, 17, -273, 82, 21,
+ -121, -120, -3, 1868, 46, 1413, -98, -418, 352,-32768,
+ -160,-32768,-32768, 207,-32768,-32768,-32768,-32768,-32768, 66,
+-32768, 653,-32768, 144,-32768,-32768, 394, 756, 29,-32768,
+-32768,-32768, 546, -261, 37, 1302, 1310,-32768,-32768,-32768,
+-32768,-32768, -116,-32768, 479, -614,-32768, 531, 395, 471,
+ -409,-32768,-32768,-32768,-32768,-32768,-32768, 941,-32768, 474,
+ 809, 553, 860, 1539, 1625, -357,-32768, 2462, -63, 20,
+-32768, 4399, -90, 441,-32768, 3481,-32768,-32768, 4001, -4,
+ 154, -311, 1345, 3650, 789, -169,-32768, 4093,-32768, -1136,
+ -924, -335, 92,-32768, 532, -92, -128,-32768,-32768,-32768,
+ -1121, -930, -1102,-32768,-32768,-32768,-32768,-32768,-32768,-32768,
+-32768,-32768,-32768,-32768,-32768,-32768,-32768,-32768,-32768,-32768,
+-32768,-32768,-32768,-32768, -24,-32768,-32768,-32768,-32768,-32768,
+ -281, 67,-32768, 76,-32768, -343, -132,-32768,-32768, -289,
+ -286,-32768,-32768, 40, 490,-32768, -18,-32768, -247
+};
+
+
+#define YYLAST 9826
+
+
+static const short yytable[] = { 66,
+ 46, 348, 348, 365, 521, 670, 194, 85, 320, 552,
+ 593, 82, 553, 464, 359, 85, 96, 313, 316, 103,
+ 103, 30, 103, 290, 44, 178, 583, 422, 66, 122,
+ 635, 84, 862, 391, 194, 529, 532, 551, 349, 149,
+ 95, 66, 912, 633, 1140, 344, 231, 85, 1208, 350,
+ 501, 175, 1104, 121, 85, 951, 97, 1224, 1103, 1107,
+ 142, 501, 399, 400, 408, 195, 85, 1300, 501, 826,
+ -1, 84, 85, 96, 423, 1246, 164, 552, 957, 596,
+ 617, 826, 85, 596, 1215, 1253, 1168, -2, 521, 1176,
+ 149, 85, 650, 85, 648, 459, 103, 285, 651, 306,
+ 103, 188, 100, 103, 103, 11, 310, 667, 77, 1301,
+ 460, 300, 1255, 286, 66, 46, 149, 66, 667, 345,
+ 1265, -694, 187, 135, 597, 187, 958, -694, 597, 598,
+ 1241, -690, 360, 598, 149, 149, 149, 328, 827, 44,
+ 1268, 1140, 509, 559, 78, 79, 167, 168, 169, 21,
+ 1291, 93, 611, 1295, 1256, 1307, 774, 276, 8, 187,
+ 149, 1029, 599, 510, 336, 691, 599, 136, 137, 294,
+ 296, 706, 346, 511, 668, 574, 170, 359, 66, 413,
+ 276, 383, 164, 512, 554, 1147, 513, 514, 103, 164,
+ 164, 75, 21, 1064, 80, 171, 397, 612, 100, 392,
+ 525, 534, 613, 412, 342, 421, 1045, 93, 127, 76,
+ 240, 575, 8, 164, 348, 348, 592, 358, 8, 192,
+ 194, 1268, 1268, 126, 21, 85, 432, 88, 100, 425,
+ 419, 1220, 100, 89, 509, 614, 639, 90, 192, 172,
+ 173, 174, 891, 73, 526, 359, 498, 93, 100, 450,
+ 590, 591, 576, 356, 365, 8, 192, 164, 589, 21,
+ 127, 577, 159, 160, 876, 359, 85, 93, 12, 425,
+ 508, 637, 432, 432, 523, 21, 100, 93, 690, 423,
+ 149, 93, 529, 291, 295, 386, 313, 316, 1250, 751,
+ 74, 17, 399, 400, 85, 360, 578, 93, 1322, 752,
+ 509, 533, 192, 189, 93, 99, 164, 103, 509, 117,
+ 103, 877, 867, 753, 103, 1104, 75, 326, 85, 96,
+ 400, 1103, 1104, 21, 671, 93, 189, 705, 1103, 387,
+ 388, 78, 308, 1062, 76, 648, 459, 751, 118, 457,
+ 1323, 132, 1143, 95, 690, 100, 1297, 752, 192, 7,
+ 8, 555, 690, 421, 149, 66, 149, 149, 523, 97,
+ -226, 753, 66, 360, 327, 133, 1048, 563, 149, 182,
+ 149, 672, 149, 149, 801, 603, 1104, 607, 609, 149,
+ 549, 309, 1103, 360, 1193, 149, 287, 19, 149, 802,
+ 164, 359, 21, 289, 93, 7, 8, -221, 359, 22,
+ 421, 85, 807, 1085, -226, -226, 430, 149, 66, 1071,
+ 66, 413, 7, 100, 421, 85, 431, 1071, -221, 164,
+ 164, 189, 78, 308, 936, 339, 317, 100, 657, 26,
+ 423, 1194, 318, 19, 1261, 412, 192, 934, 8, 340,
+ 640, 85, 85, 85, 85, 22, -503, 164, 549, -222,
+ 19, 112, 234, 455, -121, -227, 1072, 673, 588, 192,
+ 348, 712, 22, 321, 1178, 642, 187, 114, 358, 341,
+ 100, 498, 309, 184, 21, 26, 93, 337, 1071, 857,
+ 359, 1262, -26, 301, 189, 21, 359, 93, 430, 1071,
+ 1338, 85, 26, 432, 370, 301, 825, -26, 431, -227,
+ -227, 674, 406, 583, 632, 432, 432, 426, 523, 360,
+ 509, 552, 875, -222, 553, 66, 360, 21, 689, 93,
+ 761, 692, 428, 365, 178, 1272, 698, 699, 700, 701,
+ 702, 510, 112, 112, 359, 464, 1278, 1339, 399, 551,
+ 276, 511, 529, 532, 715, 149, 85, 648, 114, 114,
+ 803, 512, 688, 1071, 513, 514, 420, 78, 79, 13,
+ 14, 1094, 1053, 1100, 762, 112, 609, 621, -101, 878,
+ 84, 78, 308, 1125, 1126, 1127, 744, 99, 164, 747,
+ 421, 114, 780, 1087, 712, 192, 763, 427, -79, 429,
+ 629, 435, 99, 748, -79, 149, 149, 838, 360, 638,
+ 1317, 747, 816, 838, 360, 21, 818, 80, 81, 740,
+ 820, 742, 743, 13, 14, 748, 670, 485, 486, 149,
+ 149, 309, 149, 749, 1044, 1204, 781, 782, 1269, 442,
+ 764, 1078, 455, 7, 502, 455, 648, 693, 695, 812,
+ 697, 691, 85, 792, 111, 749, 817, -282, 365, -283,
+ 819, 838, 360, -282, 821, -283, 446, 421, 455, 359,
+ 443, 192, 719, 359, 445, 164, 882, 883, 884, 1205,
+ 432, 19, 1270, 165, 166, 186, 192, 297, 298, 359,
+ 432, 432, 432, 22, 447, 187, 501, 187, 741, 75,
+ 421, 384, 385, 851, 758, 813, 868, -694, 864, -694,
+ -694, 870, 871, 164, 856, 873, -694, 76, 691, 1023,
+ 1024, 448, 1024, 26, 449, 7, 502, 330, 503, 331,
+ 541, 359, 100, 332, 538, 423, 314, 194, -694, -694,
+ -694, 540, -694, 587, -694, 159, 160, 358, 1149, 1150,
+ 85, 12, 393, 394, 313, 316, 1049, 1050, 543, 1052,
+ 313, 316, 1166, 19, 564, 13, 14, 544, 111, 1069,
+ 1070, 432, 509, 432, 17, 22, 187, 8, 641, 21,
+ 85, 93, 1180, 1181, 432, 545, 85, 360, 66, 66,
+ 66, 360, 546, 510, -78, 565, 941, 566, 560, 567,
+ -78, 568, 70, 511, 642, 504, 85, 360, 346, 643,
+ 87, 112, 112, 512, 384, 1170, 513, 514, 98, 423,
+ 527, 8, 104, 104, 21, 104, 93, 114, 114, 1212,
+ -689, 70, 594, 7, 8, 959, 911, 595, 149, 149,
+ 149, 149, 104, 619, 70, 149, 838, 838, 838, 360,
+ 87, 393, 1171, 610, 240, 528, 678, 98, 19, 626,
+ 158, 159, 160, 620, 85, 634, 715, 12, 21, 98,
+ 93, 19, 358, 679, 149, 98, 914, 103, 703, 66,
+ 413, 85, 950, 22, 482, 483, 484, 485, 486, 925,
+ 17, 926, 549, 104, 98, 13, 14, 421, 421, 104,
+ 421, 704, -327, 104, 412, 425, 104, 104, 683, 75,
+ 333, 1039, 334, 26, 1012, 684, 335, 70, -100, 104,
+ 70, 432, 432, 706, 432, -327, -327, 76, -327, 8,
+ 860, 773, 1006, 759, 403, 760, 404, 104, 104, 104,
+ 405, 1013, 66, 66, 954, 775, 955, 777, 956, 1008,
+ 1027, 487, 691, 85, 66, 413, 642, 1152, 918, 75,
+ 793, 861, 806, 104, 384, 1177, 1039, 674, 1119, 794,
+ 1189, 1190, 808, 398, 159, 160, 21, 76, 93, 412,
+ 12, 70, 1086, 1332, 1333, 149, 66, 149, 488, 489,
+ 455, 104, 809, 490, 491, 492, 493, 438, 441, 149,
+ 149, 192, 149, 17, 810, 314, 1093, 811, 1099, 815,
+ 8, 848, 10, 11, 1303, 1304, 849, 824, 12, 852,
+ 164, 639, 850, 853, 854, 855, 859, 858, 98, 104,
+ 863, 66, 413, 15, 869, 872, 112, 16, 874, 1128,
+ 879, 17, 112, 880, 885, 886, 889, 552, 455, 890,
+ 1336, 892, 114, 895, 896, 902, 412, 21, 114, 93,
+ 1152, 893, 894, 1007, 897, 898, 899, -99, 1223, 98,
+ 1121, 1122, 901, 905, 355, 104, 104, 70, 1148, 904,
+ 907, 920, 531, 104, 906, 1115, 1116, 923, 924, 908,
+ 112, 909, 313, 316, 745, 910, 922, 98, 1123, 1124,
+ 929, 930, 931, 838, 192, 939, 114, 935, 945, 948,
+ 104, 940, 942, 104, 826, 1001, 952, 104, 998, 1005,
+ 1014, 98, 1030, 1152, 1032, 1296, 478, 479, 480, 481,
+ 482, 483, 484, 485, 486, 1033, 98, 480, 481, 482,
+ 483, 484, 485, 486, 1034, 1041, 1172, 789, 421, 421,
+ 1035, 913, 1046, 1047, 1055, 1056, 1152, 104, 70, 104,
+ 104, 70, 1057, 1051, 1060, 70, 1185, 1059, 1073, 1061,
+ 1065, 104, 1066, 104, 1067, 104, 104, 1084, 838, 838,
+ 838, 1077, 104, 1080, 1081, 1082, 451, 100, 104, 452,
+ 1088, 104, 453, 1089, 1091, 1188, 421, 1112, 461, 421,
+ 1113, 1105, 1128, 1114, 98, 1118, 1139, 500, 1144, 1337,
+ 104, 70, 1145, 70, 1159, 359, 1163, 1165, 98, 1167,
+ 1191, 1192, 149, 1195, 1196, 1203, 1197, 509, 1209, 111,
+ 1210, 1213, 1198, 1221, 21, 845, 93, 1133, 1134, 1135,
+ 1136, 1257, 1199, 1248, 98, 98, 98, 98, 510, 1142,
+ 1211, 999, 1000, 1237, 164, 1247, 1258, 1259, 511, 1260,
+ 1263, 1230, 1231, 1281, 1232, 1233, 457, 1264, 512, 112,
+ 112, 513, 514, 1285, 1294, 112, 112, 112, 1071, 355,
+ 1299, 1302, 1157, 111, 1305, 114, 114, 1310, 1313, 1319,
+ 1164, 114, 114, 114, 98, 1308, 104, 1276, 1327, 1315,
+ 1316, 1329, 1331, 421, 1037, 1040, 1321, 1340, 104, 104,
+ 1341, 70, 1342, 1343, 5, 1345, 1346, 789, 791, 343,
+ 322, -468, 1239, 947, -468, 355, 562, 953, 324, 531,
+ 325, 804, 85, 360, 304, 707, 1202, 381, 1207, 765,
+ 1228, 1158, 1096, 881, 123, 949, 1236, 1009, 104, 87,
+ 823, 946, 124, 1063, 1026, 1025, 944, 796, 777, -694,
+ 1040, -694, -694, 665, 1214, 746, 86, 1292, -694, -468,
+ 993, 1309, -468, 355, -468, 1028, 1312, 0, 0, 696,
+ 455, 1015, 1016, 0, -468, 1214, 1214, 1314, 1290, 0,
+ -694, -694, 382, 0, -694, 0, -694, 0, 104, 104,
+ 70, 0, -468, -468, 0, 0, 791, -468, 0, 0,
+ 1214, 314, 0, 0, 0, 0, -468, 0, 549, 19,
+ 0, 0, 104, 104, 0, 104, 48, 0, 187, 21,
+ 355, 22, 355, 355, 1132, 0, 0, 0, 1017, 1137,
+ 0, 0, 48, 139, 0, 98, 0, 0, 0, 0,
+ 0, 0, 0, 140, 70, 48, 0, 0, 355, 0,
+ 0, 26, 0, 314, 355, 0, 141, 0, 154, 314,
+ 111, 0, 395, 104, 0, 1293, 0, 0, 0, 0,
+ 0, 185, 0, 104, 104, 104, 0, 0, 0, 0,
+ 0, 719, 719, 229, -694, 0, -694, -694, 278, 48,
+ 0, 0, 0, -694, 1311, 0, 0, 0, 0, 0,
+ 278, 278, 0, 1182, 1183, 314, 355, 0, 154, 0,
+ 0, 278, 0, 0, 0, -694, -694, 396, 0, -694,
+ 0, -694, 112, 0, 0, 0, 0, 0, 0, 0,
+ 0, 48, 1201, 98, 154, 0, 355, 0, 114, 0,
+ 0, 0, 0, 8, 92, 10, 11, 0, 362, 353,
+ 0, 12, 0, 187, 104, 0, 104, 0, 0, 0,
+ 0, 0, 0, 98, 0, 0, 15, 104, 0, 98,
+ 16, 791, 791, 791, 17, 0, 0, 1229, 8, 531,
+ 10, 184, 0, 0, 1234, 1235, 12, 0, 0, 98,
+ 21, 0, 93, 1240, 0, 48, 0, 112, 112, 112,
+ 0, 15, 0, 98, 0, 16, 0, 193, 0, 17,
+ 0, 0, 0, 114, 114, 114, 0, 0, 355, 0,
+ 0, 104, 104, 104, 104, 21, 0, 93, 104, 791,
+ 791, 791, 354, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 185, 0, 0, 0, 0, 98, 101, 105,
+ 0, 0, 1288, 1289, 319, 0, 0, 104, 0, 0,
+ 104, 0, 70, 0, 98, 0, 0, 0, 7, 100,
+ 278, 0, 11, 351, 0, 0, 0, 355, 0, 0,
+ 0, 0, 921, 229, 0, 0, 0, 0, 390, 0,
+ 355, 185, 355, 0, 0, 230, 0, 0, 0, 0,
+ 279, 0, 0, 0, 104, 104, 19, 104, 451, 452,
+ 0, 550, 279, 279, 0, 845, 21, 0, 22, 1334,
+ 0, 0, 0, 279, 0, 791, 791, 0, -508, 0,
+ 24, 0, 312, 315, 0, 48, 98, 70, 0, 0,
+ 25, 0, 0, -508, -508, 355, 0, 355, 26, 355,
+ -508, 0, 0, 27, 0, 0, 0, 0, 0, 0,
+ 363, 0, -508, 373, 375, 0, 451, 452, 104, 70,
+ 104, 0, 154, -508, -508, 185, -508, 0, -508, 550,
+ 0, 0, 104, 104, 0, 104, 0, 0, 0, 0,
+ 0, 314, 111, 0, 0, 0, 0, 0, 0, 362,
+ 0, 0, 0, 0, 0, 0, -508, -508, 0, 0,
+ -508, -508, 451, 452, 70, 0, 7, 100, 0, 0,
+ -508, 0, 0, 0, 0, 154, 0, 48, 0, 0,
+ 0, 0, 675, 0, 0, 0, 0, 0, 0, 409,
+ 0, 7, 8, 92, 10, 11, 0, 1054, 0, 278,
+ 12, 0, 278, 0, 19, 0, 0, 278, 675, 675,
+ 675, 675, 451, 452, 21, 15, 22, 7, 8, 16,
+ 0, 278, 0, 17, -420, 278, 0, 0, 108, 19,
+ 0, 0, 279, 0, 0, 0, 791, 0, 109, 21,
+ 0, 22, 652, 653, 669, 230, 26, 0, 410, 0,
+ 0, 27, 0, 24, 0, 19, 0, 278, 229, 0,
+ 0, 0, 0, 25, 0, 21, 0, 22, 627, 0,
+ 138, 26, 0, 363, 630, 550, 27, 0, 0, 139,
+ 19, 0, 362, 654, 0, 0, 0, 0, 228, 140,
+ 21, -420, 22, 277, 0, 0, 0, 26, 0, 655,
+ 0, 0, 141, 0, 139, 277, 277, 0, 0, 0,
+ 0, 791, 791, 791, 140, 0, 277, 0, 685, 686,
+ 0, 687, 26, 0, 0, 0, 0, 656, 409, 0,
+ 7, 8, 92, 10, 11, 0, 0, 0, 0, 12,
+ 0, 363, 8, 92, 10, 11, 0, 0, 0, 0,
+ 12, 624, 625, 361, 15, 104, 0, 0, 16, 0,
+ 0, 363, 17, -421, 550, 15, 0, 0, 19, 16,
+ 362, 0, 0, 17, 0, 0, 355, 0, 21, 0,
+ 22, 0, 0, 669, 0, 0, 0, 410, 0, 21,
+ 0, 93, 24, 0, 279, 100, 0, 159, 160, 0,
+ 0, 0, 25, 12, 778, 779, 0, 94, 0, 778,
+ 26, 279, 0, 0, 279, 27, 0, 0, 550, 279,
+ 279, 279, 279, 279, 0, 0, 17, 0, 0, 0,
+ -421, 0, 0, 279, 0, 509, 0, 279, 0, 0,
+ 0, 0, 21, 0, 93, 0, 0, 0, 451, 452,
+ 0, 0, 0, 0, 0, 0, 510, 0, 0, 0,
+ 0, 0, 0, 0, 0, 98, 511, 0, 0, 279,
+ 230, 0, 0, 355, 0, 277, 512, 0, 0, 520,
+ 514, 0, 831, 832, 766, 768, 0, 363, 228, 0,
+ 0, 0, 840, 0, 363, 0, 0, 0, 847, 0,
+ 7, 8, 158, 159, 160, 0, 0, 675, 0, 12,
+ 0, 915, 0, 0, 0, 0, 361, 7, 8, 409,
+ 0, 7, 8, 92, 10, 11, 0, 0, 0, 0,
+ 12, 0, 17, 0, 0, 0, 0, 675, 19, 355,
+ 0, 509, 865, 550, 0, 15, 0, 362, 21, 16,
+ 22, 0, 0, 17, -419, 19, 0, 0, 509, 19,
+ 0, 0, 139, 550, 0, 21, 0, 22, 0, 21,
+ 0, 22, 140, 0, 833, 834, 363, 0, 410, 600,
+ 26, 0, 363, 24, 361, 1098, 0, 0, 0, 601,
+ 982, 0, 0, 25, 0, 0, 0, 26, 0, 512,
+ 0, 26, 602, 514, 361, 362, 27, 0, 0, 0,
+ 7, 100, 0, 0, 184, 0, 0, 0, 0, 0,
+ 278, -419, 0, 0, 0, 0, 0, 0, 0, 0,
+ 363, 0, 0, 0, 0, 0, 48, 277, 675, 916,
+ 917, 6, 919, 7, 8, 9, 10, 11, 19, 0,
+ 0, 0, 12, 0, 277, 0, 0, 277, 21, 0,
+ 22, 0, 277, 277, 277, 277, 277, 15, 933, 0,
+ 0, 16, 24, 0, 0, 17, 277, 0, 0, 0,
+ 277, 19, 25, 0, 0, 323, 0, 0, 0, 0,
+ 26, 21, 0, 22, 0, 27, 23, 0, 7, 8,
+ 0, 0, 11, 0, 0, 24, 0, 0, 268, 0,
+ 0, 48, 277, 228, 0, 25, 0, 0, 0, 279,
+ 0, 0, 0, 26, 0, 0, 0, 997, 27, 0,
+ 361, 268, 0, 0, 28, 0, 19, 361, 0, 509,
+ 1002, 1003, 1004, 154, 0, 982, 21, 0, 22, 279,
+ 0, 0, 982, 0, 0, 363, 0, 766, 768, 363,
+ 600, 0, 0, 0, 1015, 100, 0, 0, 357, 0,
+ 601, 0, 0, 0, 0, 363, 0, 0, 26, 0,
+ 512, 0, 0, 602, 514, 0, 0, 0, 48, 474,
+ 475, 476, 477, 478, 479, 480, 481, 482, 483, 484,
+ 485, 486, 19, 0, 0, 0, 0, 0, 0, 994,
+ 995, 0, 21, 0, 22, 994, 995, 363, 1058, 361,
+ 0, 1017, 0, 0, 0, 361, 24, 0, 0, 0,
+ 0, 0, 279, 0, 0, 584, 25, 708, 709, 982,
+ 10, 436, 233, 234, 26, 235, 12, 0, 0, 27,
+ 279, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 15, 236, 237, 238, 16, 0, 239, 0, 17,
+ 0, 240, 241, 361, 242, 19, 243, 244, 0, 0,
+ 245, 246, 247, 248, 249, 21, 0, 710, 585, 0,
+ 0, 458, 0, 0, 0, 0, 250, 0, 0, 251,
+ 0, 0, 1117, 0, 0, 0, 0, 252, 253, 254,
+ 0, 766, 768, 982, 0, 255, 256, 257, 0, 0,
+ 0, 1216, 258, 711, 982, 0, 259, 0, 0, 0,
+ 409, 0, 7, 8, 92, 10, 11, 278, 260, 0,
+ 0, 12, 1216, 1216, 0, 0, 0, 1146, 0, 0,
+ 374, 376, 379, 0, 0, 0, 15, 0, 0, 0,
+ 16, 0, 277, 0, 17, -422, 0, 1216, 0, 0,
+ 19, 0, 586, 357, 982, 0, 0, 0, 0, 0,
+ 21, 982, 22, 0, 982, 0, 7, 8, 0, 410,
+ 184, 0, 277, 0, 24, 0, 0, 0, 361, 0,
+ 0, 0, 361, 0, 25, 982, 0, 0, 982, 0,
+ 0, 0, 26, 0, 0, 982, 0, 27, 361, 357,
+ 0, 0, 0, 0, 19, 0, 0, 509, 0, 0,
+ 0, 0, -422, 0, 21, 0, 22, 0, 0, 982,
+ 0, 0, 649, 0, 0, 0, 0, 0, 600, 0,
+ 0, 0, 0, 0, 982, 0, 0, 0, 601, 0,
+ 361, 0, 0, 0, 0, 0, 26, 357, 512, 982,
+ 0, 602, 514, 0, 0, 277, 0, 0, 0, 0,
+ 0, 7, 8, 0, 0, 11, 982, 982, 0, 550,
+ 0, 268, 0, 277, 586, 0, 0, 716, 717, 0,
+ 721, 722, 723, 724, 725, 726, 727, 728, 729, 730,
+ 731, 732, 733, 734, 735, 736, 737, 738, 739, 19,
+ 0, 0, 509, 0, 357, 0, 357, 357, 0, 21,
+ 0, 22, 0, 230, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 835, 0, 0, 0, 624, 625, 279,
+ 0, 0, 357, 836, 230, 230, 7, 8, 357, 0,
+ 423, 26, 0, 512, 0, 0, 602, 514, 0, 0,
+ 0, 0, 0, 798, 800, 0, 0, 0, 0, 230,
+ 0, 0, 0, 0, 622, 0, 623, 0, 374, 376,
+ 515, 515, 515, 0, 19, 0, 0, 509, 0, 0,
+ 0, 636, 0, 0, 21, 0, 22, 0, 0, 6,
+ 357, 7, 8, 9, 10, 11, 0, 586, 600, 0,
+ 12, 0, 0, 662, 0, 0, 0, 0, 601, 0,
+ 0, 0, 0, 13, 14, 15, 26, 0, 512, 16,
+ 357, 602, 514, 17, 0, 0, 0, 18, 42, 19,
+ 0, 586, 0, 20, 0, 0, 7, 100, 0, 21,
+ 11, 22, 0, 0, 23, 0, 0, 0, 42, 42,
+ 0, 110, 515, 24, 515, 515, 515, 42, 0, 0,
+ 0, 0, 0, 25, 0, 0, 0, 866, 42, 0,
+ 42, 26, 7, 8, 19, 0, 27, 0, 0, 0,
+ 0, 363, 28, 29, 21, 0, 22, 0, 0, 0,
+ 190, 0, 0, 0, 0, 0, 0, 0, 108, 0,
+ 0, 0, 357, 0, 0, 0, 0, 0, 109, 0,
+ 19, 0, 0, 0, 0, 0, 26, 0, 0, 42,
+ 21, 27, 22, 900, 0, 42, 0, 190, 0, 42,
+ 0, 0, 110, 110, 139, 0, 0, 0, 0, 8,
+ 0, 10, 184, 42, 140, 42, 42, 12, 0, 0,
+ 0, 0, 26, 0, 0, 0, 228, 141, 0, 0,
+ 586, 357, 15, 42, 42, 110, 16, 0, 0, 0,
+ 17, 0, 277, 190, 357, 0, 357, 228, 228, 509,
+ 0, 0, 0, 0, 0, 0, 21, 0, 93, 42,
+ 0, 374, 376, 379, 7, 8, 0, 0, 11, 379,
+ 510, 0, 228, 515, 515, 943, 515, 42, 0, 0,
+ 511, 0, 0, 787, 0, 622, 623, 42, 636, 0,
+ 512, 0, 0, 513, 514, 0, 0, 0, 0, 357,
+ 0, 357, 19, 357, 0, 0, 0, 0, 0, 0,
+ 0, 0, 21, 515, 22, 0, 0, 379, 0, 0,
+ 0, 0, 0, 0, 0, 0, 139, 0, 0, 0,
+ 586, 586, 0, 0, 0, 0, 140, 0, 0, 0,
+ 0, 0, 0, 0, 26, 1010, 1011, 0, 0, 141,
+ 7, 8, 92, 10, 11, 0, 0, 353, 8, 12,
+ 10, 11, 0, 515, 515, 515, 12, 0, 0, 505,
+ 0, 787, 0, 0, 15, 0, 0, 190, 16, 42,
+ 0, 15, 17, 586, 586, 16, 0, 1042, 19, 17,
+ 0, 509, 0, 0, 361, 0, 0, 0, 21, 0,
+ 22, 0, 0, 0, 0, 21, 42, 93, 494, 42,
+ 0, 0, 835, 42, 0, 0, 0, 0, 0, 0,
+ 0, 8, 836, 10, 11, 0, 0, 0, 0, 12,
+ 26, 0, 512, 0, 0, 602, 514, 0, 515, 586,
+ 354, 0, 495, 0, 15, 0, 0, 0, 16, 0,
+ 1079, 0, 17, 42, 42, 42, 42, 0, 0, 0,
+ 0, 42, 0, 0, 0, 0, 0, 42, 21, 42,
+ 93, 110, 110, 7, 8, 158, 159, 160, 42, 0,
+ 0, 0, 12, 0, 42, 0, 0, 42, 7, 100,
+ 0, 0, 423, 0, 622, 623, 374, 376, 0, 1120,
+ 0, 636, 374, 376, 379, 17, 42, 42, 0, 42,
+ 0, 19, 0, 586, 509, 0, 0, 0, 586, 0,
+ 1138, 21, 0, 22, 0, 0, 19, 0, 0, 515,
+ 1021, 515, 0, 0, 0, 600, 21, 0, 22, 0,
+ 0, 0, 515, 0, 0, 601, 787, 787, 787, 0,
+ 24, 0, 0, 26, 0, 512, 0, 0, 608, 514,
+ 25, 0, 0, 0, 0, 0, 0, 0, 26, 0,
+ 0, 0, 0, 27, 7, 8, 0, 0, 184, 0,
+ 357, 0, 0, 120, 0, 7, 8, 92, 10, 11,
+ 0, 0, 586, 586, 12, 0, 515, 515, 515, 515,
+ 1186, 1187, 505, 515, 787, 787, 787, 0, 0, 15,
+ 0, 0, 19, 16, 788, 0, 0, 17, 0, 0,
+ 0, 586, 21, 19, 22, 0, 0, 0, 0, 0,
+ 0, 0, 0, 21, 0, 22, 139, 0, 0, 0,
+ 0, 0, 1218, 0, 42, 0, 140, 24, 0, 0,
+ 0, 0, 0, 0, 26, 622, 623, 25, 636, 141,
+ 0, 0, 0, 0, 0, 26, 586, 0, 0, 0,
+ 27, 0, 0, 586, 586, 0, 0, 357, 0, 0,
+ 0, 0, 586, 0, 64, 8, 92, 10, 11, 0,
+ 0, 353, 0, 12, 42, 42, 110, 0, 0, 0,
+ 787, 787, 844, 0, 64, 64, 0, 115, 15, 0,
+ 0, 0, 16, 64, 0, 0, 17, 0, 42, 42,
+ 0, 42, 0, 0, 64, 509, 64, 0, 0, 0,
+ 0, 0, 21, 0, 93, 0, 0, 0, 0, 0,
+ 0, 586, 586, 357, 7, 8, 510, 0, 11, 0,
+ 110, 0, 0, 0, 0, 0, 511, 515, 515, 379,
+ 515, 0, 0, 0, 0, 0, 512, 0, 0, 513,
+ 514, 0, 0, 0, 354, 64, 0, 0, 0, 0,
+ 0, 64, 19, 0, 788, 64, 0, 0, 115, 115,
+ 0, 0, 21, 0, 22, 0, 0, 0, 0, 64,
+ 0, 64, 64, 0, 0, 0, 377, 0, 586, 0,
+ 0, 0, 0, 0, 0, 0, 378, 0, 0, 64,
+ 64, 115, 0, 0, 26, 0, 0, 0, 0, 141,
+ 0, 0, 0, 0, 374, 376, 379, 0, 196, 0,
+ 0, 8, 0, 10, 11, 64, 0, 0, 0, 12,
+ 475, 476, 477, 478, 479, 480, 481, 482, 483, 484,
+ 485, 486, 0, 64, 15, 0, 0, 0, 16, 0,
+ 0, 0, 17, 64, 197, 198, 116, 788, 788, 0,
+ 0, 199, 0, 0, 0, 0, 0, 0, 21, 0,
+ 93, 0, 200, 150, 201, 202, 203, 0, 204, 205,
+ 206, 207, 208, 209, 210, 211, 212, 213, 214, 215,
+ 216, 217, 218, 219, 220, 221, 0, 0, 222, 223,
+ 224, 0, 0, 225, 0, 0, 226, 42, 42, 110,
+ 110, 0, 190, 0, 42, 844, 844, 844, 0, 0,
+ 0, 227, 0, 0, 150, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 116, 116, 0,
+ 0, 0, 0, 42, 530, 64, 42, 0, 42, 0,
+ 150, 0, 0, 0, -301, 8, -301, 10, 184, 0,
+ 0, 788, 788, 12, 0, 0, 0, 0, 150, 150,
+ 380, 0, 64, 0, 0, 64, 0, 0, 15, 64,
+ 0, 0, 16, 0, 0, 0, 17, 0, 0, 0,
+ 0, 0, -301, 0, 150, 509, 0, 0, 582, 0,
+ 0, 0, 21, 0, 93, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 190, 510, 0, 0, 64,
+ 64, 64, 64, 42, 0, 0, 511, 64, 0, 0,
+ 0, 0, 0, 64, -301, 64, 512, 115, 115, 513,
+ 514, 0, 0, 0, 64, 0, 0, 0, 0, 0,
+ 64, 0, 0, 64, 42, 42, 42, 0, 0, 0,
+ 433, 0, 0, 0, 0, 0, 0, 0, 42, 42,
+ 0, 42, 64, 64, 0, 64, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 7, 8,
+ 83, 10, 436, 233, 234, 0, 235, 12, 0, 0,
+ 42, 0, 83, 83, 0, 83, 517, 517, 517, 0,
+ 0, 0, 15, 236, 150, 0, 16, 0, 239, 0,
+ 17, 0, 240, 241, 0, 242, 19, 243, 244, 0,
+ 177, 245, 246, 247, 248, 249, 21, 0, 22, 0,
+ 0, 0, 0, 0, 0, 0, 0, 250, 0, 0,
+ 251, 0, 0, 0, 0, 0, 0, 0, 252, 253,
+ 254, 0, 0, 0, 0, 0, 255, 256, 257, 0,
+ 0, 0, 844, 437, 0, 0, 0, 0, 0, 83,
+ 790, 83, 83, 83, 0, 0, 83, 83, 604, 260,
+ 604, 604, 517, 0, 7, 8, 158, 159, 160, 0,
+ 0, 0, 150, 12, 150, 0, 380, 380, 0, 0,
+ 64, 0, 0, 150, 0, 0, 0, 0, 0, 150,
+ 0, 0, 150, 0, 148, 0, 17, 0, 0, 0,
+ 0, 0, 19, 0, 0, 509, 0, 0, 0, 0,
+ 0, 150, 21, 0, 22, 0, 191, 844, 844, 844,
+ 0, 0, 0, 0, 0, 0, 600, 0, 0, 0,
+ 64, 64, 115, 0, 0, 191, 601, 0, 846, 0,
+ 0, 83, 0, 0, 26, 148, 512, 0, 0, 602,
+ 514, 0, 0, 0, 64, 64, 100, 64, 159, 160,
+ 0, 42, 0, 0, 12, 0, 7, 8, 0, 0,
+ 423, 148, 0, 0, 0, 8, 92, 10, 11, 83,
+ 0, 1335, 0, 12, 0, 0, 115, 17, 0, 148,
+ 148, 148, 0, 0, 7, 8, 509, 754, 15, 389,
+ 0, 0, 16, 21, 19, 93, 17, 0, 0, 517,
+ 517, 0, 517, 0, 21, 148, 22, 510, 0, 517,
+ 790, 0, 21, 83, 93, 83, 83, 511, 139, 0,
+ 7, 8, 19, 0, 0, 0, 0, 512, 140, 0,
+ 513, 514, 21, 0, 22, 191, 26, 1092, 0, 604,
+ 0, 141, 0, 0, 83, 0, 139, 0, 0, 0,
+ 83, 0, 0, 83, 0, 0, 140, 83, 19, 0,
+ 0, 0, 0, 0, 26, 0, 0, 0, 21, 141,
+ 22, 8, 92, 10, 11, 0, 0, 0, 0, 12,
+ 0, 0, 377, 0, 0, 0, 0, 0, 0, 604,
+ 604, 839, 378, 0, 15, 0, 0, 839, 16, 0,
+ 26, 0, 17, 790, 790, 141, 0, 0, 0, 0,
+ 0, 530, 0, 150, 150, 0, 150, 0, 21, 0,
+ 93, 0, 0, 191, 0, 148, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 582, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 380, 0, 0, 0, 0,
+ 0, 0, 0, 64, 64, 115, 115, 0, 0, 0,
+ 64, 846, 846, 846, 517, 0, 0, 0, 0, 0,
+ 83, 0, 0, 0, 433, 433, 433, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 64,
+ 0, 0, 64, 0, 64, 0, 0, 0, 0, 148,
+ 0, 148, 148, 0, 0, 0, 0, 790, 790, 518,
+ 522, 524, 0, 148, 0, 148, 0, 148, 148, 0,
+ 0, 0, 0, 0, 148, 0, 0, 0, 0, 0,
+ 148, 0, 0, 148, 0, 0, 83, 0, 0, 0,
+ 0, 0, 62, 0, 0, 0, 83, 0, 83, 83,
+ 0, 0, 148, 0, 0, 517, 0, 517, 0, 0,
+ 0, 0, 102, 106, 0, 0, 0, 0, 517, 64,
+ 0, 125, 517, 517, 517, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 157, 0, 0, 0, 0, 177,
+ 0, 605, 0, 518, 522, 524, 0, 0, 0, 0,
+ 64, 64, 64, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 64, 64, 0, 64, 8, 151,
+ 10, 152, 604, 604, 839, 839, 12, 0, 0, 604,
+ 839, 839, 839, 0, 0, 0, 0, 0, 0, 305,
+ 0, 15, 0, 307, 0, 16, 64, 0, 0, 17,
+ 0, 0, 0, 0, 0, 0, 0, 62, 150, 0,
+ 329, 0, 0, 191, 0, 21, 0, 93, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 102, 106, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 148, 473, 474, 475, 476, 477, 478, 479, 480, 481,
+ 482, 483, 484, 485, 486, 754, 754, 0, 754, 0,
+ 0, 0, 0, 83, 0, 0, 0, 0, 846, 0,
+ 0, 418, 0, 83, 83, 83, 517, 517, 0, 0,
+ 0, 424, 0, 0, 0, 0, 0, 0, 0, 0,
+ 148, 148, 837, 769, 0, 771, 0, 0, 837, 0,
+ 0, 0, 771, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 148, 148, 0, 148, 0, 150,
+ 0, 150, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 522, 604, 604, 0, 604, 0, 0, 0,
+ 0, 0, 0, 846, 846, 846, 837, 0, 0, 0,
+ 0, 0, 0, 0, 83, 0, 83, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 83, 472, 473,
+ 474, 475, 476, 477, 478, 479, 480, 481, 482, 483,
+ 484, 485, 486, 769, 771, 0, 0, 64, 7, 8,
+ 771, 10, 436, 233, 234, 0, 235, 12, 0, 0,
+ 556, 0, 0, 557, 0, 0, 0, 558, 0, 0,
+ 0, 0, 15, 236, 237, 238, 16, 0, 239, 0,
+ 17, 0, 240, 241, 0, 242, 19, 243, 244, 0,
+ 0, 245, 246, 247, 248, 249, 21, 380, 22, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 606, 0,
+ 0, 0, 0, 0, 0, 616, 0, 522, 0, 0,
+ 83, 305, 0, 307, 0, 0, 255, 256, 257, 0,
+ 0, 0, 0, 258, 0, 0, 0, 259, 424, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 260,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 663, 666, 0, 418, 83, 83, 0, 83, 0, 0,
+ 0, 0, 380, 380, 380, 0, 0, 0, 0, 0,
+ 0, 0, 0, 148, 148, 148, 148, 0, 996, 0,
+ 148, 837, 837, 837, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 927, 0,
+ 928, 0, 0, 0, 0, 0, 150, 0, 0, 148,
+ 0, 932, 0, 0, 0, 0, 769, 771, 0, 0,
+ 0, 7, 8, 92, 10, 11, 0, 0, 353, 0,
+ 12, 0, 7, 8, 151, 10, 152, 0, 0, 0,
+ 0, 12, 0, 0, 0, 15, 0, 0, 0, 16,
+ 0, 0, 0, 17, 0, 0, 15, 0, 0, 19,
+ 16, 0, 0, 0, 17, 927, 928, 0, 769, 21,
+ 19, 22, 932, 0, 769, 771, 0, 0, 0, 0,
+ 21, 191, 22, 377, 0, 153, 0, 0, 0, 0,
+ 0, 0, 0, 378, 24, 0, 0, 0, 0, 0,
+ 0, 26, 0, 0, 25, 0, 141, 0, 0, 0,
+ 0, 354, 26, 0, 0, 0, 0, 27, 0, 0,
+ 148, 0, 148, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 148, 148, 0, 148, 102, 106,
+ 467, 0, 468, 469, 470, 471, 472, 473, 474, 475,
+ 476, 477, 478, 479, 480, 481, 482, 483, 484, 485,
+ 486, 0, 556, 557, 0, 558, 0, 0, 0, 1101,
+ 769, 962, 8, 92, 10, 232, 233, 234, 0, 235,
+ 12, 963, 0, 964, 965, 966, 967, 968, 969, 970,
+ 971, 972, 973, 13, 14, 15, 236, 237, 238, 16,
+ 0, 239, 0, 17, 0, 240, 241, 0, 242, 19,
+ 243, 244, 0, 0, 245, 246, 247, 248, 249, 21,
+ 0, 974, 346, 0, 975, 0, 927, 928, 0, 932,
+ 250, 0, 0, 251, 0, 0, 0, 0, 837, 0,
+ 0, 252, 253, 254, 0, 0, 0, 0, 0, 255,
+ 256, 257, 0, 0, 0, 0, 258, 0, 976, 454,
+ 259, 7, 8, 92, 10, 232, 233, 234, 353, 235,
+ 12, 1102, 260, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 15, 236, 237, 238, 16,
+ 0, 239, 0, 17, 0, 240, 241, 0, 242, 19,
+ 243, 244, 509, 0, 245, 246, 247, 248, 249, 21,
+ 0, 22, -234, 837, 837, 837, 0, 0, 0, 0,
+ 250, 0, 0, 937, 0, 0, 0, 0, 0, 0,
+ 0, 252, 253, 938, 1160, 0, 0, 0, 0, 255,
+ 256, 257, 0, 512, 0, 0, 785, 514, 0, 0,
+ 259, 354, 0, 0, 0, 0, 0, 148, 0, 0,
+ 0, 0, 260, 0, 0, 0, 0, 0, 0, 0,
+ 0, 305, 307, 0, 0, 0, 0, 0, 424, 0,
+ 0, 465, 466, 467, 1161, 468, 469, 470, 471, 472,
+ 473, 474, 475, 476, 477, 478, 479, 480, 481, 482,
+ 483, 484, 485, 486, 0, 0, 0, 1022, 0, 0,
+ 1022, 1251, 418, 962, 8, 92, 10, 232, 233, 234,
+ 0, 235, 12, 963, 0, 964, 965, 966, 967, 968,
+ 969, 970, 971, 972, 973, 13, 14, 15, 236, 237,
+ 238, 16, 0, 239, 0, 17, 0, 240, 241, 0,
+ 242, 19, 243, 244, 0, 0, 245, 246, 247, 248,
+ 249, 21, 0, 974, 346, 0, 975, 0, 0, 0,
+ 0, 0, 250, 0, 0, 251, 0, 0, 0, 0,
+ 0, 0, 0, 252, 253, 254, 0, 418, 0, 0,
+ 0, 255, 256, 257, 0, 0, 0, 0, 258, 0,
+ 976, 0, 259, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 1252, 260, 0, 0, 0, 0, 1097,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 556, 557, 0, 558, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 1273, 0, 962, 8, 92, 10, 232, 233, 234,
+ 0, 235, 12, 963, 418, 964, 965, 966, 967, 968,
+ 969, 970, 971, 972, 973, 13, 14, 15, 236, 237,
+ 238, 16, 0, 239, 0, 17, 0, 240, 241, 0,
+ 242, 19, 243, 244, 0, 0, 245, 246, 247, 248,
+ 249, 21, 0, 974, 346, 0, 975, 0, 0, 0,
+ 0, 0, 250, 0, 0, 251, 0, 0, 0, 0,
+ 0, 0, 0, 252, 253, 254, 0, 0, 0, 0,
+ 0, 255, 256, 257, 0, 0, 0, 0, 258, 0,
+ 976, 0, 259, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 1274, 260, 1273, 0, 962, 8, 92,
+ 10, 232, 233, 234, 0, 235, 12, 963, 0, 964,
+ 965, 966, 967, 968, 969, 970, 971, 972, 973, 13,
+ 14, 15, 236, 237, 238, 16, 0, 239, 0, 17,
+ 0, 240, 241, 0, 242, 19, 243, 244, 0, 0,
+ 245, 246, 247, 248, 249, 21, 0, 974, 346, 0,
+ 975, 0, 0, 0, 0, 0, 250, 0, 0, 251,
+ 0, 0, 0, 0, 0, 0, 0, 252, 253, 254,
+ 0, 0, 0, 0, 0, 255, 256, 257, 0, 0,
+ 0, 0, 258, 0, 976, 0, 259, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 1279, 260, 1273,
+ 0, 962, 8, 92, 10, 232, 233, 234, 0, 235,
+ 12, 963, 0, 964, 965, 966, 967, 968, 969, 970,
+ 971, 972, 973, 13, 14, 15, 236, 237, 238, 16,
+ 0, 239, 0, 17, 0, 240, 241, 0, 242, 19,
+ 243, 244, 0, 0, 245, 246, 247, 248, 249, 21,
+ 0, 974, 346, 0, 975, 0, 0, 0, 0, 0,
+ 250, 0, 0, 251, 0, 0, 0, 0, 0, 0,
+ 0, 252, 253, 254, 0, 0, 0, 0, 0, 255,
+ 256, 257, 0, 0, 0, 0, 258, 0, 976, 0,
+ 259, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 1318, 260, 961, 0, 962, 8, 92, 10, 232,
+ 233, 234, 0, 235, 12, 963, 0, 964, 965, 966,
+ 967, 968, 969, 970, 971, 972, 973, 13, 14, 15,
+ 236, 237, 238, 16, 0, 239, 0, 17, 0, 240,
+ 241, 0, 242, 19, 243, 244, 0, 0, 245, 246,
+ 247, 248, 249, 21, 0, 974, 346, 0, 975, 0,
+ 0, 0, 0, 0, 250, 0, 0, 251, 0, 0,
+ 0, 0, 0, 0, 0, 252, 253, 254, 0, 0,
+ 0, 0, 0, 255, 256, 257, 0, 0, 0, 0,
+ 258, 0, 976, 1245, 259, 962, 8, 92, 10, 232,
+ 233, 234, 0, 235, 12, 963, 260, 964, 965, 966,
+ 967, 968, 969, 970, 971, 972, 973, 13, 14, 15,
+ 236, 237, 238, 16, 0, 239, 0, 17, 0, 240,
+ 241, 0, 242, 19, 243, 244, 0, 0, 245, 246,
+ 247, 248, 249, 21, 0, 974, 346, 0, 975, 0,
+ 0, 0, 0, 0, 250, 0, 0, 251, 0, 0,
+ 0, 0, 0, 0, 0, 252, 253, 254, 0, 0,
+ 0, 0, 0, 255, 256, 257, 0, 0, 0, 0,
+ 258, 0, 976, 1306, 259, 962, 8, 92, 10, 232,
+ 233, 234, 0, 235, 12, 963, 260, 964, 965, 966,
+ 967, 968, 969, 970, 971, 972, 973, 13, 14, 15,
+ 236, 237, 238, 16, 0, 239, 0, 17, 0, 240,
+ 241, 0, 242, 19, 243, 244, 0, 0, 245, 246,
+ 247, 248, 249, 21, 0, 974, 346, 0, 975, 0,
+ 0, 0, 0, 0, 250, 0, 0, 251, 0, 0,
+ 0, 0, 0, 0, 0, 252, 253, 254, 0, 0,
+ 0, 0, 0, 255, 256, 257, 0, 0, 0, 0,
+ 258, 0, 976, 454, 259, 7, 8, 92, 10, 232,
+ 233, 234, 353, 235, 12, 0, 260, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 15,
+ 236, 237, 238, 16, 0, 239, 0, 17, 0, 240,
+ 241, 0, 242, 19, 243, 244, 509, 0, 245, 246,
+ 247, 248, 249, 21, 0, 22, -234, 0, 0, 0,
+ 0, 0, 0, 0, 250, 0, 0, 841, 0, 0,
+ 0, 0, 0, 0, 0, 252, 253, 842, 0, 0,
+ 0, 0, 0, 255, 256, 257, 0, 512, 0, 0,
+ 843, 514, 0, 0, 259, 354, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 352, 260, 7, 8, 92,
+ 10, 232, 233, 234, 353, 235, 12, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 15, 236, 237, 238, 16, 0, 239, 0, 17,
+ 0, 240, 241, 0, 242, 19, 243, 244, 0, 0,
+ 245, 246, 247, 248, 249, 21, 0, 22, 0, 0,
+ 0, 0, 0, 0, 0, 0, 250, 0, 0, 251,
+ 0, 0, 0, 0, 0, 0, 0, 252, 253, 254,
+ 0, 0, 0, 0, 0, 255, 256, 257, 0, 0,
+ 0, 0, 258, 0, 0, 0, 259, 354, 0, 0,
+ 0, 0, 0, 0, 0, 0, -662, 631, 260, 7,
+ 8, 92, 10, 232, 233, 234, 353, 235, 12, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 15, 236, 237, 238, 16, 0, 239,
+ 0, 17, 0, 240, 241, 0, 242, 19, 243, 244,
+ 0, 0, 245, 246, 247, 248, 249, 21, 0, 22,
+ 0, 0, 0, 0, 0, 0, 0, 0, 250, 0,
+ 0, 251, 0, 0, 0, 0, 0, 0, 0, 252,
+ 253, 254, 0, 0, 0, 0, 0, 255, 256, 257,
+ 0, 0, 0, 0, 258, 0, 0, 0, 259, 354,
+ 0, 0, 0, 0, 0, 0, 0, 0, -662, 584,
+ 260, 708, 709, 0, 10, 436, 233, 234, 0, 235,
+ 12, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 15, 236, 237, 238, 16,
+ 0, 239, 0, 17, 0, 240, 241, 0, 242, 19,
+ 243, 244, 0, 0, 245, 246, 247, 248, 249, 21,
+ 0, 710, 585, 0, 0, 0, 0, 0, 0, 0,
+ 250, 0, 0, 251, 0, 0, 0, 0, 0, 0,
+ 0, 252, 253, 254, 0, 0, 0, 0, 0, 255,
+ 256, 257, 0, 0, 0, 0, 258, 711, 0, 584,
+ 259, 708, 709, 0, 10, 436, 233, 234, 0, 235,
+ 12, 822, 260, 0, 0, 0, 0, 1038, 0, 0,
+ 0, 0, 0, 0, 0, 15, 236, 237, 238, 16,
+ 0, 239, 0, 17, 0, 240, 241, 0, 242, 19,
+ 243, 244, 0, 0, 245, 246, 247, 248, 249, 21,
+ 0, 710, 585, 0, 0, 0, 0, 0, 0, 0,
+ 250, 0, 0, 251, 0, 0, 0, 0, 0, 0,
+ 0, 252, 253, 254, 0, 0, 0, 0, 0, 255,
+ 256, 257, 0, 0, 0, 0, 258, 0, 0, 584,
+ 259, 708, 709, 0, 10, 436, 233, 234, 0, 235,
+ 12, -381, 260, 0, 0, 0, 0, 1038, 0, 0,
+ 0, 0, 0, 0, 0, 15, 236, 237, 238, 16,
+ 0, 239, 0, 17, 0, 240, 241, 0, 242, 19,
+ 243, 244, 0, 0, 245, 246, 247, 248, 249, 21,
+ 0, 710, 585, 0, 0, 0, 0, 0, 0, 0,
+ 250, 0, 0, 251, 0, 0, 0, 0, 0, 0,
+ 0, 252, 253, 254, 0, 0, 0, 0, 0, 255,
+ 256, 257, 0, 0, 0, 0, 258, 0, 0, 1154,
+ 259, 7, 8, 92, 10, 232, 233, 234, 0, 235,
+ 12, 1068, 260, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 15, 236, 237, 238, 16,
+ 0, 239, 0, 17, 0, 240, 241, 0, 242, 19,
+ 243, 244, 0, 0, 245, 246, 247, 248, 249, 21,
+ 0, 22, 1155, 0, 1156, 0, 0, 0, 0, 0,
+ 250, 0, 0, 251, 0, 0, 0, 0, 0, 0,
+ 0, 252, 253, 254, 0, 0, 0, 0, 0, 255,
+ 256, 257, 0, 0, 0, 0, 258, 0, 0, 454,
+ 259, 7, 8, 0, 10, 232, 233, 234, 0, 235,
+ 12, 0, 260, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 15, 236, 237, 238, 16,
+ 0, 239, 0, 17, 0, 240, 241, 0, 242, 19,
+ 243, 244, 0, 0, 245, 246, 247, 248, 249, 21,
+ 0, 22, -234, 0, 0, 0, 0, 0, 0, 0,
+ 250, 0, 0, 251, 0, 0, 0, 0, 0, 0,
+ 0, 252, 253, 254, 0, 0, 0, 0, 0, 255,
+ 256, 257, 0, 0, 0, 0, 258, 0, 0, 584,
+ 259, 7, 8, 0, 10, 436, 233, 234, 0, 235,
+ 12, 0, 260, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 15, 236, 237, 238, 16,
+ 0, 239, 0, 17, 0, 240, 241, 0, 242, 19,
+ 243, 244, 0, 0, 245, 246, 247, 248, 249, 21,
+ 0, 22, 585, 0, 0, 0, 0, 0, 0, 0,
+ 250, 0, 0, 251, 0, 0, 0, 0, 0, 0,
+ 0, 252, 253, 254, 0, 0, 0, 0, 0, 255,
+ 256, 257, 0, 0, 0, 0, 258, 0, 0, 718,
+ 259, 7, 8, 0, 10, 436, 233, 234, 0, 235,
+ 12, 0, 260, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 15, 236, 237, 238, 16,
+ 0, 239, 0, 17, 0, 240, 241, 0, 242, 19,
+ 243, 244, 0, 0, 245, 246, 247, 248, 249, 21,
+ 0, 22, 0, 0, 0, 0, 0, 0, -652, 0,
+ 250, 0, 0, 251, 0, 0, 0, 0, 0, 0,
+ 0, 252, 253, 254, 0, 0, 0, 0, 0, 255,
+ 256, 257, 0, 0, 0, 0, 258, 0, 0, 454,
+ 259, 7, 8, 0, 10, 232, 233, 234, 0, 235,
+ 12, 0, 260, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 15, 236, 237, 238, 16,
+ 0, 239, 0, 17, 0, 240, 241, 0, 242, 19,
+ 243, 244, 0, 0, 245, 246, 247, 248, 249, 21,
+ 0, 22, -234, 0, 0, 0, 0, 0, 0, 0,
+ 250, 0, 0, 1173, 0, 0, 0, 0, 0, 0,
+ 0, 252, 253, 1174, 0, 0, 0, 0, 0, 255,
+ 256, 257, 0, 0, 0, 0, 1175, 0, 0, 1225,
+ 259, 7, 8, 0, 10, 232, 233, 234, 0, 235,
+ 12, 0, 260, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 15, 236, 237, 238, 16,
+ 0, 239, 0, 17, 0, 240, 241, 0, 242, 19,
+ 243, 244, 0, 0, 245, 246, 247, 248, 249, 21,
+ 0, 22, 0, 0, -136, 0, 0, 0, 0, 0,
+ 250, 0, 0, 251, 0, 0, 0, 0, 0, 0,
+ 0, 252, 253, 254, 0, 0, 0, 0, 0, 255,
+ 256, 257, 0, 0, 0, 0, 258, 0, 0, 718,
+ 259, 7, 8, 0, 10, 436, 233, 234, 0, 235,
+ 12, 0, 260, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 15, 236, 237, 238, 16,
+ 0, 239, 0, 17, 0, 240, 241, 0, 242, 19,
+ 243, 244, 0, 0, 245, 246, 247, 248, 249, 21,
+ 0, 22, 0, 0, 0, 0, 0, 0, 0, 0,
+ 250, 0, 0, 251, 0, 0, 0, 0, 0, 0,
+ 0, 252, 253, 254, 0, 0, 0, 0, 0, 255,
+ 256, 257, 0, 0, 0, 0, 258, 0, 0, 0,
+ 259, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ -652, 797, 260, 7, 8, 0, 10, 436, 233, 234,
+ 0, 235, 12, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 15, 236, 237,
+ 238, 16, 0, 239, 0, 17, 0, 240, 241, 0,
+ 242, 19, 243, 244, 0, 0, 245, 246, 247, 248,
+ 249, 21, 0, 22, 0, 0, 0, 0, 0, 0,
+ 0, 0, 250, 0, 0, 251, 0, 0, 0, 0,
+ 0, 0, 0, 252, 253, 254, 0, 0, 0, 0,
+ 0, 255, 256, 257, 0, 0, 0, 0, 258, 0,
+ 0, 799, 259, 7, 8, 0, 10, 436, 233, 234,
+ 0, 235, 12, 0, 260, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 15, 236, 237,
+ 238, 16, 0, 239, 0, 17, 0, 240, 241, 0,
+ 242, 19, 243, 244, 0, 0, 245, 246, 247, 248,
+ 249, 21, 0, 22, 0, 0, 0, 0, 0, 0,
+ 0, 0, 250, 0, 0, 251, 0, 0, 0, 0,
+ 0, 0, 0, 252, 253, 254, 0, 0, 0, 0,
+ 0, 255, 256, 257, 0, 0, 0, 0, 258, 0,
+ 7, 8, 259, 10, 436, 233, 234, 0, 235, 12,
+ 0, 0, 0, 0, 260, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 15, 236, 237, 238, 16, 0,
+ 239, 0, 17, 0, 240, 241, 0, 242, 19, 243,
+ 244, 0, 0, 245, 246, 247, 248, 249, 21, 0,
+ 22, 0, 0, 0, 0, 0, 0, 0, 0, 250,
+ 0, 0, 251, 0, 0, 0, 0, 0, 0, 0,
+ 252, 253, 254, 0, 0, 0, 0, 0, 255, 256,
+ 257, 0, 0, 0, 0, 258, 0, 0, 0, 259,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 260, 757, 962, 8, 92, 10, 232, 233, 234,
+ 0, 235, 12, 963, 0, 964, 965, 966, 967, 968,
+ 969, 970, 971, 972, 973, 13, 14, 15, 236, 237,
+ 238, 16, 0, 239, 0, 17, 0, 240, 241, 0,
+ 242, 19, 243, 244, 0, 0, 245, 246, 247, 248,
+ 249, 21, 0, 974, 346, 0, 975, 0, 0, 0,
+ 0, 0, 250, 0, 0, 251, 0, 0, 0, 0,
+ 0, 0, 0, 252, 253, 254, 0, 0, 0, 0,
+ 0, 255, 256, 257, 0, 0, 0, 0, 258, 0,
+ 976, 0, 259, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 1106, 260, 962, 8, 92, 10, 232,
+ 233, 234, 0, 235, 12, 963, 0, 964, 965, 966,
+ 967, 968, 969, 970, 971, 972, 973, 13, 14, 15,
+ 236, 237, 238, 16, 0, 239, 0, 17, 0, 240,
+ 241, 0, 242, 19, 243, 244, 0, 0, 245, 246,
+ 247, 248, 249, 21, 0, 974, 346, 0, 975, 0,
+ 0, 0, 0, 0, 250, 0, 0, 251, 0, 0,
+ 0, 0, 0, 0, 0, 252, 253, 254, 0, 0,
+ 0, 0, 0, 255, 256, 257, 0, 0, 0, 0,
+ 258, 0, 976, 0, 259, 962, 8, 92, 10, 232,
+ 233, 234, 0, 235, 12, 963, 260, 964, 965, 966,
+ 967, 968, 969, 970, 971, 972, 973, 13, 14, 15,
+ 236, 237, 238, 16, 0, 239, 0, 17, 0, 240,
+ 241, 0, 242, 19, 243, 244, 0, 0, 245, 246,
+ 247, 248, 249, 21, 0, 974, 1266, 0, 975, 0,
+ 0, 0, 0, 0, 250, 0, 0, 251, 0, 0,
+ 0, 0, 0, 0, 0, 252, 253, 254, 0, 0,
+ 0, 0, 0, 255, 256, 257, 0, 0, 0, 0,
+ 258, 0, 976, 0, 259, 962, 8, 92, 10, 232,
+ 233, 234, 0, 235, 12, 963, 260, 964, 965, 966,
+ 967, 968, 969, 970, 971, 972, 973, 13, 14, 15,
+ 236, 237, 238, 16, 0, 239, 0, 17, 0, 240,
+ 241, 0, 242, 19, 243, 244, 0, 0, 245, 246,
+ 247, 248, 249, 21, 0, 974, 0, 0, 975, 0,
+ 0, 0, 0, 0, 250, 0, 0, 251, 0, 0,
+ 0, 0, 0, 0, 0, 252, 253, 254, 0, 0,
+ 0, 0, 0, 255, 256, 257, 0, 0, 0, 0,
+ 258, 0, 976, 0, 259, 7, 8, 92, 10, 232,
+ 233, 234, 353, 235, 12, 0, 260, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 15,
+ 236, 237, 238, 16, 0, 239, 0, 17, 0, 240,
+ 241, 0, 242, 19, 243, 244, 509, 0, 245, 246,
+ 247, 248, 249, 21, 0, 22, 0, 0, 0, 0,
+ 0, 0, 0, 0, 250, 0, 0, 783, 0, 0,
+ 0, 0, 0, 0, 0, 252, 253, 784, 0, 0,
+ 0, 0, 0, 255, 256, 257, 0, 512, 0, 0,
+ 785, 514, 0, 0, 259, 354, 7, 8, 92, 10,
+ 232, 233, 234, 353, 235, 12, 260, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 15, 236, 237, 238, 16, 0, 239, 0, 17, 0,
+ 240, 241, 0, 242, 19, 243, 244, 509, 0, 245,
+ 246, 247, 248, 249, 21, 0, 22, 0, 0, 0,
+ 0, 0, 0, 0, 0, 250, 0, 0, 841, 0,
+ 0, 0, 0, 0, 0, 0, 252, 253, 842, 0,
+ 0, 0, 0, 0, 255, 256, 257, 0, 512, 0,
+ 0, 843, 514, 0, 0, 259, 354, 7, 8, 0,
+ 10, 232, 233, 234, 0, 235, 12, 260, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 15, 236, 237, 238, 16, 0, 239, 0, 17,
+ 0, 240, 241, 0, 242, 19, 243, 244, 509, 0,
+ 245, 246, 247, 248, 249, 21, 0, 22, 0, 0,
+ 0, 0, 0, 0, 0, 0, 250, 0, 0, 783,
+ 0, 0, 0, 0, 0, 0, 0, 252, 253, 784,
+ 0, 0, 0, 0, 0, 255, 256, 257, 0, 512,
+ 0, 0, 785, 514, 7, 8, 0, 10, 232, 233,
+ 234, 0, 235, 12, 0, 0, 0, 0, 260, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 15, 236,
+ 237, 238, 16, 0, 239, 0, 17, 0, 240, 241,
+ 0, 242, 19, 243, 244, 509, 0, 245, 246, 247,
+ 248, 249, 21, 0, 22, 0, 0, 0, 0, 0,
+ 0, 0, 0, 250, 0, 0, 841, 0, 0, 0,
+ 0, 0, 0, 0, 252, 253, 842, 0, 0, 0,
+ 0, 0, 255, 256, 257, 0, 512, 0, 0, 843,
+ 514, 7, 8, 0, 10, 232, 233, 234, 0, 235,
+ 12, 0, 0, 0, 0, 260, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 15, 236, 237, 238, 16,
+ 0, 239, 0, 17, 0, 240, 241, 0, 242, 19,
+ 243, 244, 509, 0, 245, 246, 247, 248, 249, 21,
+ 0, 22, 0, 0, 0, 0, 0, 0, 0, 0,
+ 250, 0, 0, 937, 0, 0, 0, 0, 0, 0,
+ 0, 252, 253, 938, 0, 0, 0, 0, 0, 255,
+ 256, 257, 0, 512, 0, 0, 785, 514, 7, 8,
+ 0, 10, 436, 233, 234, 0, 235, 12, 0, 0,
+ 0, 0, 260, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 15, 236, 237, 238, 16, 0, 239, 0,
+ 17, 0, 240, 241, 0, 242, 19, 243, 244, 0,
+ 0, 245, 246, 247, 248, 249, 21, 0, 22, 0,
+ 0, 1083, 0, 0, 0, 0, 0, 250, 0, 0,
+ 251, 0, 0, 0, 0, 0, 0, 0, 252, 253,
+ 254, 0, 0, 0, 0, 0, 255, 256, 257, 0,
+ 0, 0, 0, 258, 0, 7, 8, 259, 10, 232,
+ 233, 234, 0, 235, 12, 0, 0, 0, 0, 260,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 15,
+ 236, 237, 238, 16, 0, 239, 0, 17, 0, 240,
+ 241, 0, 242, 19, 243, 244, 0, 0, 245, 246,
+ 247, 248, 249, 21, 0, 22, 0, 0, 0, 0,
+ 0, 0, 0, 0, 250, 0, 0, 251, 0, 0,
+ 0, 0, 0, 0, 0, 252, 253, 254, 0, 0,
+ 0, 0, 0, 255, 256, 257, 0, 0, 0, 0,
+ 258, 0, 7, 8, 259, 10, 436, 233, 234, 0,
+ 235, 12, 0, 0, 0, 0, 260, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 15, 236, 237, 238,
+ 16, 0, 239, 0, 17, 0, 240, 241, 0, 242,
+ 19, 243, 244, 0, 0, 245, 246, 247, 248, 249,
+ 21, 0, 22, 462, 0, 0, 0, 0, 0, 0,
+ 0, 250, 0, 0, 251, 0, 0, 0, 0, 0,
+ 0, 0, 252, 253, 254, 0, 0, 0, 0, 0,
+ 255, 256, 257, 0, 0, 7, 8, 463, 10, 436,
+ 233, 234, 0, 235, 12, 0, 0, 0, 0, 0,
+ 0, 0, 0, 260, 0, 0, 0, 0, 0, 15,
+ 236, 237, 238, 16, 0, 239, 0, 17, 0, 240,
+ 241, 0, 242, 19, 243, 244, 0, 0, 245, 246,
+ 247, 248, 249, 21, 0, 22, 0, 0, 0, 0,
+ 0, 0, 0, 0, 250, 0, 0, 251, 0, 0,
+ 0, 0, 0, 0, 0, 252, 253, 254, 0, 0,
+ 0, 0, 0, 255, 256, 257, 0, 0, 0, 0,
+ 258, 499, 7, 8, 0, 10, 436, 233, 234, 0,
+ 235, 12, 0, 0, 0, 0, 260, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 15, 236, 237, 238,
+ 16, 0, 239, 0, 17, 0, 240, 241, 0, 242,
+ 19, 243, 244, 0, 0, 245, 246, 247, 248, 249,
+ 21, 0, 22, 0, 0, 0, 0, 0, 0, 0,
+ 0, 250, 0, 0, 251, 0, 0, 0, 0, 0,
+ 0, 0, 252, 253, 254, 0, 0, 0, 0, 0,
+ 255, 256, 257, 0, 0, 0, 0, 258, 0, 7,
+ 694, 259, 10, 436, 233, 234, 0, 235, 12, 0,
+ 0, 0, 0, 260, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 15, 236, 237, 238, 16, 0, 239,
+ 0, 17, 0, 240, 241, 0, 242, 19, 243, 244,
+ 0, 0, 245, 246, 247, 248, 249, 21, 0, 22,
+ 0, 0, 0, 0, 0, 0, 0, 0, 250, 0,
+ 0, 251, 0, 0, 0, 0, 0, 0, 0, 252,
+ 253, 254, 0, 0, 0, 0, 0, 255, 256, 257,
+ 0, 0, 0, 0, 258, 0, 7, 8, 259, 10,
+ 436, 233, 234, 0, 235, 12, 0, 0, 0, 0,
+ 260, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 15, 236, 237, 238, 16, 0, 239, 0, 17, 0,
+ 240, 241, 0, 242, 19, 243, 244, 0, 0, 245,
+ 246, 247, 248, 249, 21, 0, 22, 0, 0, 0,
+ 0, 0, 0, 0, 0, 250, 0, 0, 887, 0,
+ 0, 0, 0, 0, 0, 0, 252, 253, 888, 0,
+ 0, 0, 0, 0, 255, 256, 257, 0, 0, 0,
+ 0, 258, 0, 7, 8, 259, 10, 436, 233, 234,
+ 0, 235, 12, 0, 0, 0, 0, 260, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 15, 236, 237,
+ 238, 16, 0, 239, 0, 17, 0, 240, 241, 0,
+ 242, 19, 243, 244, 0, 0, 245, 246, 247, 248,
+ 249, 21, 0, 22, 0, 0, 0, 0, 0, 0,
+ 0, 0, 250, 0, 0, 1173, 0, 0, 0, 0,
+ 0, 0, 0, 252, 253, 1174, 0, 0, 0, 0,
+ 0, 255, 256, 257, 0, 0, 0, 0, 1175, 0,
+ 1238, 8, 259, 10, 436, 233, 234, 0, 235, 12,
+ 0, 0, 0, 0, 260, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 15, 236, 237, 238, 16, 0,
+ 239, 0, 17, 0, 240, 241, 0, 242, 19, 243,
+ 244, 0, 0, 245, 246, 247, 248, 249, 21, 0,
+ 22, 0, 0, 0, 0, 0, 0, 0, 0, 250,
+ 0, 0, 251, 0, 0, 0, 0, 0, 0, 0,
+ 252, 253, 254, 0, 0, 0, 0, 0, 255, 256,
+ 257, 0, 0, 0, 0, 258, 0, 7, 8, 259,
+ 10, 436, 233, 234, 0, 235, 12, 0, 0, 0,
+ 0, 260, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 15, 236, 237, 238, 16, 0, 239, 0, 17,
+ 0, 240, 241, 0, 242, 19, 243, 244, 0, 0,
+ 245, 246, 247, 248, 249, 21, 0, 22, 0, 0,
+ 0, 0, 0, 0, 0, 0, 250, 0, 0, 251,
+ 0, 0, 0, 0, 0, 0, 0, 252, 253, 254,
+ 0, 0, 0, 0, 0, 255, 256, 257, 0, 0,
+ 7, 8, 258, 10, 436, 233, 234, 0, 235, 12,
+ 0, 0, 0, 0, 0, 0, 0, 0, 260, 0,
+ 0, 0, 0, 0, 15, 236, 237, 238, 16, 0,
+ 239, 0, 17, 0, 240, 241, 0, 242, 19, 243,
+ 244, 0, 0, 245, 246, 247, 248, 249, 21, 0,
+ 22, 0, 0, 0, 0, 0, 0, 0, 0, 250,
+ 0, 0, 887, 0, 0, 0, 0, 0, 0, 0,
+ 252, 253, 888, 0, 0, 0, 0, 0, 255, 256,
+ 257, 0, 0, 7, 8, 258, 10, 232, 233, 234,
+ 0, 235, 12, 0, 0, 0, 0, 0, 0, 0,
+ 0, 260, 0, 0, 0, 0, 0, 15, 236, 237,
+ 238, 16, 0, 239, 0, 17, 0, 240, 241, 0,
+ 242, 19, 243, 244, 0, 0, 245, 246, 247, 248,
+ 249, 21, 0, 22, 0, 0, 0, 0, 0, 0,
+ 0, 0, 250, 0, 0, 1173, 0, 0, 0, 0,
+ 0, 0, 0, 252, 253, 1174, 0, 0, 0, 0,
+ 0, 255, 256, 257, 0, 0, 7, 8, 1175, 10,
+ 436, 233, 234, 0, 235, 12, 0, 0, 0, 0,
+ 0, 0, 0, 0, 260, 0, 0, 0, 0, 0,
+ 15, 236, 0, 0, 16, 0, 239, 0, 17, 0,
+ 240, 241, 0, 242, 19, 243, 244, 0, 0, 245,
+ 246, 247, 248, 249, 21, 0, 22, 0, 0, 0,
+ 0, 0, 0, 0, 0, 250, 0, 0, 251, 0,
+ 0, 0, 0, 0, 0, 0, 252, 253, 254, 0,
+ 0, 0, 0, 0, 255, 256, 257, 0, 0, 0,
+ 0, 440, 7, 8, 92, 10, 11, 0, 0, 615,
+ 0, 12, 0, 0, 0, 0, 0, 260, 0, 652,
+ 8, 151, 10, 152, 0, 0, 15, 0, 12, 0,
+ 16, 0, 0, 0, 17, 0, 0, 0, 0, 0,
+ 19, 0, 0, 15, 0, 0, 0, 16, 0, 0,
+ 21, 17, 22, 0, 0, 0, 0, 19, 0, 0,
+ 0, 0, 0, 0, 24, 0, 0, 21, 0, 22,
+ 0, 0, 0, 0, 25, 0, 655, 0, 0, 0,
+ 0, 24, 26, 0, 0, 0, 0, 27, 0, 0,
+ 0, 25, 7, 8, 151, 10, 152, 0, 0, 26,
+ 0, 12, 0, 0, 27, 0, 0, 0, 0, 7,
+ 8, 151, 10, 152, 0, 0, 15, 0, 12, 0,
+ 16, 0, 0, 0, 17, 0, 0, 0, 0, 0,
+ 19, 0, 0, 15, 0, 0, 0, 16, 0, 0,
+ 21, 17, 22, 0, 0, 1095, 0, 19, 0, 0,
+ 0, 0, 0, 0, 24, 0, 0, 21, 0, 22,
+ 0, 0, 0, 0, 25, 0, 0, 0, 0, 0,
+ 0, 24, 26, 0, 0, 0, 0, 27, 0, 0,
+ 0, 25, 0, 0, 0, 0, 0, 0, 0, 26,
+ 679, 465, 466, 467, 27, 468, 469, 470, 471, 472,
+ 473, 474, 475, 476, 477, 478, 479, 480, 481, 482,
+ 483, 484, 485, 486, 0, 465, 466, 467, 0, 468,
+ 469, 470, 471, 472, 473, 474, 475, 476, 477, 478,
+ 479, 480, 481, 482, 483, 484, 485, 486, 542, 1036,
+ 465, 466, 467, 0, 468, 469, 470, 471, 472, 473,
+ 474, 475, 476, 477, 478, 479, 480, 481, 482, 483,
+ 484, 485, 486, 465, 466, 467, 1200, 468, 469, 470,
+ 471, 472, 473, 474, 475, 476, 477, 478, 479, 480,
+ 481, 482, 483, 484, 485, 486, 465, 466, 467, 1249,
+ 468, 469, 470, 471, 472, 473, 474, 475, 476, 477,
+ 478, 479, 480, 481, 482, 483, 484, 485, 486, 465,
+ 466, 467, 0, 468, 469, 470, 471, 472, 473, 474,
+ 475, 476, 477, 478, 479, 480, 481, 482, 483, 484,
+ 485, 486, 465, 466, 467, 0, 468, 469, 470, 471,
+ 472, 473, 474, 475, 476, 0, 478, 479, 480, 481,
+ 482, 483, 484, 485, 486, 469, 470, 471, 472, 473,
+ 474, 475, 476, 477, 478, 479, 480, 481, 482, 483,
+ 484, 485, 486, 470, 471, 472, 473, 474, 475, 476,
+ 477, 478, 479, 480, 481, 482, 483, 484, 485, 486,
+ 471, 472, 473, 474, 475, 476, 477, 478, 479, 480,
+ 481, 482, 483, 484, 485, 486
+};
+
+static const short yycheck[] = { 4,
+ 4, 130, 131, 136, 278, 415, 70, 12, 117, 299,
+ 346, 12, 299, 265, 136, 20, 20, 108, 109, 24,
+ 25, 4, 27, 87, 4, 52, 338, 188, 33, 33,
+ 388, 12, 647, 154, 98, 284, 284, 299, 131, 44,
+ 20, 46, 746, 387, 1043, 129, 71, 52, 1151, 134,
+ 9, 52, 983, 33, 59, 812, 20, 1179, 983, 990,
+ 44, 9, 161, 162, 181, 70, 71, 60, 9, 34,
+ 0, 52, 77, 77, 7, 1212, 48, 367, 55, 10,
+ 367, 34, 87, 10, 1159, 1222, 56, 0, 362, 56,
+ 95, 96, 54, 98, 406, 55, 101, 77, 60, 104,
+ 105, 62, 4, 108, 109, 7, 107, 36, 45, 102,
+ 70, 95, 60, 77, 119, 119, 121, 122, 36, 1,
+ 1242, 54, 92, 44, 55, 92, 103, 60, 55, 60,
+ 1205, 58, 136, 60, 139, 140, 141, 121, 103, 119,
+ 1243, 1140, 44, 102, 3, 4, 5, 6, 7, 51,
+ 103, 53, 10, 1275, 102, 1292, 514, 76, 4, 92,
+ 165, 102, 93, 65, 125, 439, 93, 88, 89, 88,
+ 89, 102, 54, 75, 103, 336, 35, 299, 183, 183,
+ 99, 142, 154, 85, 301, 103, 88, 89, 193, 161,
+ 162, 51, 51, 950, 53, 54, 157, 55, 4, 154,
+ 44, 286, 60, 183, 56, 188, 910, 53, 60, 69,
+ 37, 3, 4, 185, 343, 344, 345, 136, 4, 66,
+ 284, 1324, 1325, 23, 51, 230, 231, 88, 4, 75,
+ 185, 1162, 4, 88, 44, 93, 397, 3, 85, 98,
+ 99, 100, 102, 56, 88, 367, 271, 53, 4, 250,
+ 343, 344, 44, 136, 387, 4, 103, 229, 342, 51,
+ 60, 53, 6, 7, 55, 387, 271, 53, 12, 75,
+ 275, 392, 277, 278, 279, 51, 4, 53, 88, 7,
+ 285, 53, 531, 88, 89, 44, 377, 378, 1219, 65,
+ 103, 35, 391, 392, 299, 299, 88, 53, 60, 75,
+ 44, 285, 149, 75, 53, 69, 278, 312, 44, 88,
+ 315, 102, 656, 89, 319, 1246, 51, 56, 323, 323,
+ 419, 1246, 1253, 51, 56, 53, 75, 456, 1253, 88,
+ 89, 3, 4, 948, 69, 647, 55, 65, 54, 258,
+ 102, 56, 1046, 323, 88, 4, 1277, 75, 195, 3,
+ 4, 70, 88, 336, 359, 360, 361, 362, 363, 323,
+ 44, 89, 367, 367, 103, 51, 102, 328, 373, 54,
+ 375, 103, 377, 378, 55, 359, 1307, 361, 362, 384,
+ 299, 53, 1307, 387, 55, 390, 103, 41, 393, 70,
+ 362, 513, 51, 54, 53, 3, 4, 102, 520, 53,
+ 383, 406, 563, 75, 88, 89, 65, 412, 413, 56,
+ 415, 415, 3, 4, 397, 420, 75, 56, 102, 391,
+ 392, 75, 3, 4, 782, 44, 102, 4, 412, 83,
+ 7, 102, 102, 41, 55, 415, 283, 781, 4, 58,
+ 401, 446, 447, 448, 449, 53, 102, 419, 367, 102,
+ 41, 27, 9, 258, 56, 44, 103, 418, 341, 306,
+ 589, 462, 53, 103, 103, 31, 92, 27, 387, 88,
+ 4, 496, 53, 7, 51, 83, 53, 3, 56, 640,
+ 602, 102, 55, 97, 75, 51, 608, 53, 65, 56,
+ 55, 496, 83, 498, 105, 97, 589, 70, 75, 88,
+ 89, 420, 60, 815, 387, 510, 511, 89, 513, 513,
+ 44, 801, 673, 102, 801, 520, 520, 51, 437, 53,
+ 44, 440, 60, 656, 551, 103, 445, 446, 447, 448,
+ 449, 65, 108, 109, 656, 787, 103, 102, 637, 801,
+ 459, 75, 791, 791, 463, 550, 551, 859, 108, 109,
+ 551, 85, 435, 56, 88, 89, 88, 3, 4, 25,
+ 26, 980, 920, 982, 88, 141, 550, 372, 60, 678,
+ 551, 3, 4, 5, 6, 7, 495, 69, 550, 44,
+ 563, 141, 44, 60, 585, 432, 44, 89, 54, 105,
+ 384, 88, 69, 58, 60, 600, 601, 602, 602, 393,
+ 103, 44, 44, 608, 608, 51, 44, 53, 54, 492,
+ 44, 494, 495, 25, 26, 58, 1026, 78, 79, 624,
+ 625, 53, 627, 88, 89, 44, 88, 89, 44, 88,
+ 88, 967, 437, 3, 4, 440, 948, 442, 443, 574,
+ 445, 915, 647, 526, 27, 88, 88, 54, 781, 54,
+ 88, 656, 656, 60, 88, 60, 69, 640, 463, 781,
+ 88, 508, 467, 785, 88, 637, 685, 686, 687, 88,
+ 675, 41, 88, 55, 56, 1, 523, 55, 56, 801,
+ 685, 686, 687, 53, 69, 92, 9, 92, 493, 51,
+ 673, 55, 56, 628, 499, 578, 657, 23, 60, 25,
+ 26, 662, 663, 675, 639, 666, 32, 69, 982, 870,
+ 871, 69, 873, 83, 69, 3, 4, 54, 6, 56,
+ 55, 843, 4, 60, 103, 7, 109, 791, 54, 55,
+ 56, 102, 58, 340, 60, 6, 7, 656, 1074, 1075,
+ 745, 12, 55, 56, 835, 836, 916, 917, 102, 919,
+ 841, 842, 1088, 41, 23, 25, 26, 102, 141, 55,
+ 56, 766, 44, 768, 35, 53, 92, 4, 5, 51,
+ 775, 53, 1108, 1109, 779, 102, 781, 781, 783, 784,
+ 785, 785, 3, 65, 54, 54, 791, 56, 103, 58,
+ 60, 60, 4, 75, 31, 83, 801, 801, 54, 36,
+ 12, 377, 378, 85, 55, 56, 88, 89, 20, 7,
+ 3, 4, 24, 25, 51, 27, 53, 377, 378, 1155,
+ 58, 33, 102, 3, 4, 826, 745, 102, 833, 834,
+ 835, 836, 44, 58, 46, 840, 841, 842, 843, 843,
+ 52, 55, 56, 102, 37, 38, 88, 59, 41, 102,
+ 5, 6, 7, 105, 859, 105, 775, 12, 51, 71,
+ 53, 41, 781, 32, 869, 77, 749, 872, 102, 874,
+ 874, 876, 807, 53, 75, 76, 77, 78, 79, 762,
+ 35, 764, 801, 95, 96, 25, 26, 870, 871, 101,
+ 873, 102, 32, 105, 874, 75, 108, 109, 105, 51,
+ 54, 902, 56, 83, 865, 105, 60, 119, 60, 121,
+ 122, 916, 917, 102, 919, 55, 56, 69, 58, 4,
+ 5, 105, 857, 51, 54, 6, 56, 139, 140, 141,
+ 60, 866, 937, 938, 817, 88, 819, 44, 821, 858,
+ 875, 44, 1216, 948, 949, 949, 31, 1076, 753, 51,
+ 58, 36, 56, 165, 55, 56, 957, 876, 60, 55,
+ 1121, 1122, 56, 5, 6, 7, 51, 69, 53, 949,
+ 12, 183, 973, 1324, 1325, 980, 981, 982, 81, 82,
+ 785, 193, 56, 86, 87, 88, 89, 235, 236, 994,
+ 995, 838, 997, 35, 56, 378, 980, 56, 982, 55,
+ 4, 58, 6, 7, 1286, 1287, 58, 102, 12, 102,
+ 982, 1172, 619, 102, 102, 105, 55, 88, 230, 231,
+ 60, 1026, 1026, 27, 55, 55, 602, 31, 60, 1030,
+ 88, 35, 608, 58, 102, 102, 102, 1327, 843, 102,
+ 1327, 102, 602, 70, 70, 55, 1026, 51, 608, 53,
+ 1179, 102, 102, 858, 70, 70, 102, 60, 1167, 271,
+ 1021, 1022, 60, 60, 136, 277, 278, 279, 1069, 102,
+ 105, 89, 284, 285, 102, 1010, 1011, 83, 44, 103,
+ 656, 102, 1173, 1174, 88, 102, 105, 299, 1023, 1024,
+ 102, 102, 105, 1098, 941, 102, 656, 105, 103, 60,
+ 312, 102, 102, 315, 34, 58, 102, 319, 102, 58,
+ 102, 323, 88, 1242, 88, 1276, 71, 72, 73, 74,
+ 75, 76, 77, 78, 79, 88, 338, 73, 74, 75,
+ 76, 77, 78, 79, 88, 103, 1097, 520, 1121, 1122,
+ 88, 748, 102, 102, 6, 102, 1275, 359, 360, 361,
+ 362, 363, 102, 105, 105, 367, 1117, 102, 60, 103,
+ 102, 373, 102, 375, 102, 377, 378, 972, 1173, 1174,
+ 1175, 88, 384, 60, 56, 56, 251, 4, 390, 254,
+ 54, 393, 257, 7, 56, 1120, 1169, 58, 263, 1172,
+ 102, 14, 1193, 102, 406, 60, 60, 272, 105, 1328,
+ 412, 413, 44, 415, 88, 1327, 56, 56, 420, 88,
+ 58, 58, 1217, 88, 102, 105, 102, 44, 15, 602,
+ 56, 56, 102, 56, 51, 608, 53, 1032, 1033, 1034,
+ 1035, 56, 102, 1217, 446, 447, 448, 449, 65, 1044,
+ 103, 848, 849, 102, 1216, 102, 56, 58, 75, 58,
+ 102, 1186, 1187, 9, 1189, 1190, 1175, 102, 85, 835,
+ 836, 88, 89, 56, 56, 841, 842, 843, 56, 341,
+ 88, 55, 1077, 656, 102, 835, 836, 91, 56, 58,
+ 1085, 841, 842, 843, 496, 102, 498, 1248, 88, 102,
+ 102, 9, 56, 1276, 901, 902, 102, 102, 510, 511,
+ 102, 513, 9, 56, 2, 0, 0, 690, 520, 129,
+ 118, 7, 1195, 801, 10, 387, 325, 815, 119, 531,
+ 119, 551, 1327, 1327, 99, 459, 1140, 1, 1150, 508,
+ 1181, 1077, 981, 681, 33, 805, 1193, 859, 550, 551,
+ 585, 796, 33, 949, 874, 872, 794, 539, 44, 23,
+ 957, 25, 26, 413, 1159, 496, 12, 1266, 32, 55,
+ 829, 1296, 58, 435, 60, 876, 1300, -1, -1, 444,
+ 1175, 3, 4, -1, 70, 1180, 1181, 1302, 1261, -1,
+ 54, 55, 56, -1, 58, -1, 60, -1, 600, 601,
+ 602, -1, 88, 89, -1, -1, 608, 93, -1, -1,
+ 1205, 784, -1, -1, -1, -1, 102, -1, 1327, 41,
+ -1, -1, 624, 625, -1, 627, 4, -1, 92, 51,
+ 492, 53, 494, 495, 1031, -1, -1, -1, 60, 1036,
+ -1, -1, 20, 65, -1, 647, -1, -1, -1, -1,
+ -1, -1, -1, 75, 656, 33, -1, -1, 520, -1,
+ -1, 83, -1, 836, 526, -1, 88, -1, 46, 842,
+ 843, -1, 1, 675, -1, 1270, -1, -1, -1, -1,
+ -1, 59, -1, 685, 686, 687, -1, -1, -1, -1,
+ -1, 1286, 1287, 71, 23, -1, 25, 26, 76, 77,
+ -1, -1, -1, 32, 1299, -1, -1, -1, -1, -1,
+ 88, 89, -1, 1110, 1111, 888, 578, -1, 96, -1,
+ -1, 99, -1, -1, -1, 54, 55, 56, -1, 58,
+ -1, 60, 1098, -1, -1, -1, -1, -1, -1, -1,
+ -1, 119, 1139, 745, 122, -1, 608, -1, 1098, -1,
+ -1, -1, -1, 4, 5, 6, 7, -1, 136, 10,
+ -1, 12, -1, 92, 766, -1, 768, -1, -1, -1,
+ -1, -1, -1, 775, -1, -1, 27, 779, -1, 781,
+ 31, 783, 784, 785, 35, -1, -1, 1184, 4, 791,
+ 6, 7, -1, -1, 1191, 1192, 12, -1, -1, 801,
+ 51, -1, 53, 1200, -1, 183, -1, 1173, 1174, 1175,
+ -1, 27, -1, 815, -1, 31, -1, 69, -1, 35,
+ -1, -1, -1, 1173, 1174, 1175, -1, -1, 690, -1,
+ -1, 833, 834, 835, 836, 51, -1, 53, 840, 841,
+ 842, 843, 93, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, 230, -1, -1, -1, -1, 859, 24, 25,
+ -1, -1, 1259, 1260, 116, -1, -1, 869, -1, -1,
+ 872, -1, 874, -1, 876, -1, -1, -1, 3, 4,
+ 258, -1, 7, 135, -1, -1, -1, 749, -1, -1,
+ -1, -1, 757, 271, -1, -1, -1, -1, 150, -1,
+ 762, 279, 764, -1, -1, 71, -1, -1, -1, -1,
+ 76, -1, -1, -1, 916, 917, 41, 919, 783, 784,
+ -1, 299, 88, 89, -1, 1098, 51, -1, 53, 1326,
+ -1, -1, -1, 99, -1, 937, 938, -1, 10, -1,
+ 65, -1, 108, 109, -1, 323, 948, 949, -1, -1,
+ 75, -1, -1, 25, 26, 817, -1, 819, 83, 821,
+ 32, -1, -1, 88, -1, -1, -1, -1, -1, -1,
+ 136, -1, 44, 139, 140, -1, 841, 842, 980, 981,
+ 982, -1, 360, 55, 56, 363, 58, -1, 60, 367,
+ -1, -1, 994, 995, -1, 997, -1, -1, -1, -1,
+ -1, 1174, 1175, -1, -1, -1, -1, -1, -1, 387,
+ -1, -1, -1, -1, -1, -1, 88, 89, -1, -1,
+ 92, 93, 887, 888, 1026, -1, 3, 4, -1, -1,
+ 102, -1, -1, -1, -1, 413, -1, 415, -1, -1,
+ -1, -1, 420, -1, -1, -1, -1, -1, -1, 1,
+ -1, 3, 4, 5, 6, 7, -1, 922, -1, 437,
+ 12, -1, 440, -1, 41, -1, -1, 445, 446, 447,
+ 448, 449, 937, 938, 51, 27, 53, 3, 4, 31,
+ -1, 459, -1, 35, 36, 463, -1, -1, 65, 41,
+ -1, -1, 258, -1, -1, -1, 1098, -1, 75, 51,
+ -1, 53, 3, 4, 56, 271, 83, -1, 60, -1,
+ -1, 88, -1, 65, -1, 41, -1, 495, 496, -1,
+ -1, -1, -1, 75, -1, 51, -1, 53, 380, -1,
+ 56, 83, -1, 299, 386, 513, 88, -1, -1, 65,
+ 41, -1, 520, 44, -1, -1, -1, -1, 71, 75,
+ 51, 103, 53, 76, -1, -1, -1, 83, -1, 60,
+ -1, -1, 88, -1, 65, 88, 89, -1, -1, -1,
+ -1, 1173, 1174, 1175, 75, -1, 99, -1, 430, 431,
+ -1, 433, 83, -1, -1, -1, -1, 88, 1, -1,
+ 3, 4, 5, 6, 7, -1, -1, -1, -1, 12,
+ -1, 367, 4, 5, 6, 7, -1, -1, -1, -1,
+ 12, 377, 378, 136, 27, 1217, -1, -1, 31, -1,
+ -1, 387, 35, 36, 602, 27, -1, -1, 41, 31,
+ 608, -1, -1, 35, -1, -1, 1098, -1, 51, -1,
+ 53, -1, -1, 56, -1, -1, -1, 60, -1, 51,
+ -1, 53, 65, -1, 420, 4, -1, 6, 7, -1,
+ -1, -1, 75, 12, 516, 517, -1, 69, -1, 521,
+ 83, 437, -1, -1, 440, 88, -1, -1, 656, 445,
+ 446, 447, 448, 449, -1, -1, 35, -1, -1, -1,
+ 103, -1, -1, 459, -1, 44, -1, 463, -1, -1,
+ -1, -1, 51, -1, 53, -1, -1, -1, 1173, 1174,
+ -1, -1, -1, -1, -1, -1, 65, -1, -1, -1,
+ -1, -1, -1, -1, -1, 1327, 75, -1, -1, 495,
+ 496, -1, -1, 1195, -1, 258, 85, -1, -1, 88,
+ 89, -1, 594, 595, 510, 511, -1, 513, 271, -1,
+ -1, -1, 604, -1, 520, -1, -1, -1, 610, -1,
+ 3, 4, 5, 6, 7, -1, -1, 745, -1, 12,
+ -1, 749, -1, -1, -1, -1, 299, 3, 4, 1,
+ -1, 3, 4, 5, 6, 7, -1, -1, -1, -1,
+ 12, -1, 35, -1, -1, -1, -1, 775, 41, 1261,
+ -1, 44, 654, 781, -1, 27, -1, 785, 51, 31,
+ 53, -1, -1, 35, 36, 41, -1, -1, 44, 41,
+ -1, -1, 65, 801, -1, 51, -1, 53, -1, 51,
+ -1, 53, 75, -1, 600, 601, 602, -1, 60, 65,
+ 83, -1, 608, 65, 367, 88, -1, -1, -1, 75,
+ 828, -1, -1, 75, -1, -1, -1, 83, -1, 85,
+ -1, 83, 88, 89, 387, 843, 88, -1, -1, -1,
+ 3, 4, -1, -1, 7, -1, -1, -1, -1, -1,
+ 858, 103, -1, -1, -1, -1, -1, -1, -1, -1,
+ 656, -1, -1, -1, -1, -1, 874, 420, 876, 751,
+ 752, 1, 754, 3, 4, 5, 6, 7, 41, -1,
+ -1, -1, 12, -1, 437, -1, -1, 440, 51, -1,
+ 53, -1, 445, 446, 447, 448, 449, 27, 780, -1,
+ -1, 31, 65, -1, -1, 35, 459, -1, -1, -1,
+ 463, 41, 75, -1, -1, 45, -1, -1, -1, -1,
+ 83, 51, -1, 53, -1, 88, 56, -1, 3, 4,
+ -1, -1, 7, -1, -1, 65, -1, -1, 76, -1,
+ -1, 949, 495, 496, -1, 75, -1, -1, -1, 745,
+ -1, -1, -1, 83, -1, -1, -1, 839, 88, -1,
+ 513, 99, -1, -1, 94, -1, 41, 520, -1, 44,
+ 852, 853, 854, 981, -1, 983, 51, -1, 53, 775,
+ -1, -1, 990, -1, -1, 781, -1, 783, 784, 785,
+ 65, -1, -1, -1, 3, 4, -1, -1, 136, -1,
+ 75, -1, -1, -1, -1, 801, -1, -1, 83, -1,
+ 85, -1, -1, 88, 89, -1, -1, -1, 1026, 67,
+ 68, 69, 70, 71, 72, 73, 74, 75, 76, 77,
+ 78, 79, 41, -1, -1, -1, -1, -1, -1, 835,
+ 836, -1, 51, -1, 53, 841, 842, 843, 930, 602,
+ -1, 60, -1, -1, -1, 608, 65, -1, -1, -1,
+ -1, -1, 858, -1, -1, 1, 75, 3, 4, 1077,
+ 6, 7, 8, 9, 83, 11, 12, -1, -1, 88,
+ 876, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, 27, 28, 29, 30, 31, -1, 33, -1, 35,
+ -1, 37, 38, 656, 40, 41, 42, 43, -1, -1,
+ 46, 47, 48, 49, 50, 51, -1, 53, 54, -1,
+ -1, 259, -1, -1, -1, -1, 62, -1, -1, 65,
+ -1, -1, 1014, -1, -1, -1, -1, 73, 74, 75,
+ -1, 937, 938, 1151, -1, 81, 82, 83, -1, -1,
+ -1, 1159, 88, 89, 1162, -1, 92, -1, -1, -1,
+ 1, -1, 3, 4, 5, 6, 7, 1175, 104, -1,
+ -1, 12, 1180, 1181, -1, -1, -1, 1059, -1, -1,
+ 139, 140, 141, -1, -1, -1, 27, -1, -1, -1,
+ 31, -1, 745, -1, 35, 36, -1, 1205, -1, -1,
+ 41, -1, 340, 341, 1212, -1, -1, -1, -1, -1,
+ 51, 1219, 53, -1, 1222, -1, 3, 4, -1, 60,
+ 7, -1, 775, -1, 65, -1, -1, -1, 781, -1,
+ -1, -1, 785, -1, 75, 1243, -1, -1, 1246, -1,
+ -1, -1, 83, -1, -1, 1253, -1, 88, 801, 387,
+ -1, -1, -1, -1, 41, -1, -1, 44, -1, -1,
+ -1, -1, 103, -1, 51, -1, 53, -1, -1, 1277,
+ -1, -1, 410, -1, -1, -1, -1, -1, 65, -1,
+ -1, -1, -1, -1, 1292, -1, -1, -1, 75, -1,
+ 843, -1, -1, -1, -1, -1, 83, 435, 85, 1307,
+ -1, 88, 89, -1, -1, 858, -1, -1, -1, -1,
+ -1, 3, 4, -1, -1, 7, 1324, 1325, -1, 1327,
+ -1, 459, -1, 876, 462, -1, -1, 465, 466, -1,
+ 468, 469, 470, 471, 472, 473, 474, 475, 476, 477,
+ 478, 479, 480, 481, 482, 483, 484, 485, 486, 41,
+ -1, -1, 44, -1, 492, -1, 494, 495, -1, 51,
+ -1, 53, -1, 1159, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, 65, -1, -1, -1, 1173, 1174, 1175,
+ -1, -1, 520, 75, 1180, 1181, 3, 4, 526, -1,
+ 7, 83, -1, 85, -1, -1, 88, 89, -1, -1,
+ -1, -1, -1, 541, 542, -1, -1, -1, -1, 1205,
+ -1, -1, -1, -1, 373, -1, 375, -1, 377, 378,
+ 277, 278, 279, -1, 41, -1, -1, 44, -1, -1,
+ -1, 390, -1, -1, 51, -1, 53, -1, -1, 1,
+ 578, 3, 4, 5, 6, 7, -1, 585, 65, -1,
+ 12, -1, -1, 412, -1, -1, -1, -1, 75, -1,
+ -1, -1, -1, 25, 26, 27, 83, -1, 85, 31,
+ 608, 88, 89, 35, -1, -1, -1, 39, 4, 41,
+ -1, 619, -1, 45, -1, -1, 3, 4, -1, 51,
+ 7, 53, -1, -1, 56, -1, -1, -1, 24, 25,
+ -1, 27, 359, 65, 361, 362, 363, 33, -1, -1,
+ -1, -1, -1, 75, -1, -1, -1, 655, 44, -1,
+ 46, 83, 3, 4, 41, -1, 88, -1, -1, -1,
+ -1, 1327, 94, 95, 51, -1, 53, -1, -1, -1,
+ 66, -1, -1, -1, -1, -1, -1, -1, 65, -1,
+ -1, -1, 690, -1, -1, -1, -1, -1, 75, -1,
+ 41, -1, -1, -1, -1, -1, 83, -1, -1, 95,
+ 51, 88, 53, 711, -1, 101, -1, 103, -1, 105,
+ -1, -1, 108, 109, 65, -1, -1, -1, -1, 4,
+ -1, 6, 7, 119, 75, 121, 122, 12, -1, -1,
+ -1, -1, 83, -1, -1, -1, 1159, 88, -1, -1,
+ 748, 749, 27, 139, 140, 141, 31, -1, -1, -1,
+ 35, -1, 1175, 149, 762, -1, 764, 1180, 1181, 44,
+ -1, -1, -1, -1, -1, -1, 51, -1, 53, 165,
+ -1, 600, 601, 602, 3, 4, -1, -1, 7, 608,
+ 65, -1, 1205, 510, 511, 793, 513, 183, -1, -1,
+ 75, -1, -1, 520, -1, 624, 625, 193, 627, -1,
+ 85, -1, -1, 88, 89, -1, -1, -1, -1, 817,
+ -1, 819, 41, 821, -1, -1, -1, -1, -1, -1,
+ -1, -1, 51, 550, 53, -1, -1, 656, -1, -1,
+ -1, -1, -1, -1, -1, -1, 65, -1, -1, -1,
+ 848, 849, -1, -1, -1, -1, 75, -1, -1, -1,
+ -1, -1, -1, -1, 83, 863, 864, -1, -1, 88,
+ 3, 4, 5, 6, 7, -1, -1, 10, 4, 12,
+ 6, 7, -1, 600, 601, 602, 12, -1, -1, 275,
+ -1, 608, -1, -1, 27, -1, -1, 283, 31, 285,
+ -1, 27, 35, 901, 902, 31, -1, 905, 41, 35,
+ -1, 44, -1, -1, 1327, -1, -1, -1, 51, -1,
+ 53, -1, -1, -1, -1, 51, 312, 53, 54, 315,
+ -1, -1, 65, 319, -1, -1, -1, -1, -1, -1,
+ -1, 4, 75, 6, 7, -1, -1, -1, -1, 12,
+ 83, -1, 85, -1, -1, 88, 89, -1, 675, 957,
+ 93, -1, 88, -1, 27, -1, -1, -1, 31, -1,
+ 968, -1, 35, 359, 360, 361, 362, -1, -1, -1,
+ -1, 367, -1, -1, -1, -1, -1, 373, 51, 375,
+ 53, 377, 378, 3, 4, 5, 6, 7, 384, -1,
+ -1, -1, 12, -1, 390, -1, -1, 393, 3, 4,
+ -1, -1, 7, -1, 833, 834, 835, 836, -1, 1017,
+ -1, 840, 841, 842, 843, 35, 412, 413, -1, 415,
+ -1, 41, -1, 1031, 44, -1, -1, -1, 1036, -1,
+ 1038, 51, -1, 53, -1, -1, 41, -1, -1, 766,
+ 869, 768, -1, -1, -1, 65, 51, -1, 53, -1,
+ -1, -1, 779, -1, -1, 75, 783, 784, 785, -1,
+ 65, -1, -1, 83, -1, 85, -1, -1, 88, 89,
+ 75, -1, -1, -1, -1, -1, -1, -1, 83, -1,
+ -1, -1, -1, 88, 3, 4, -1, -1, 7, -1,
+ 1098, -1, -1, 1, -1, 3, 4, 5, 6, 7,
+ -1, -1, 1110, 1111, 12, -1, 833, 834, 835, 836,
+ 1118, 1119, 508, 840, 841, 842, 843, -1, -1, 27,
+ -1, -1, 41, 31, 520, -1, -1, 35, -1, -1,
+ -1, 1139, 51, 41, 53, -1, -1, -1, -1, -1,
+ -1, -1, -1, 51, -1, 53, 65, -1, -1, -1,
+ -1, -1, 1160, -1, 550, -1, 75, 65, -1, -1,
+ -1, -1, -1, -1, 83, 994, 995, 75, 997, 88,
+ -1, -1, -1, -1, -1, 83, 1184, -1, -1, -1,
+ 88, -1, -1, 1191, 1192, -1, -1, 1195, -1, -1,
+ -1, -1, 1200, -1, 4, 4, 5, 6, 7, -1,
+ -1, 10, -1, 12, 600, 601, 602, -1, -1, -1,
+ 937, 938, 608, -1, 24, 25, -1, 27, 27, -1,
+ -1, -1, 31, 33, -1, -1, 35, -1, 624, 625,
+ -1, 627, -1, -1, 44, 44, 46, -1, -1, -1,
+ -1, -1, 51, -1, 53, -1, -1, -1, -1, -1,
+ -1, 1259, 1260, 1261, 3, 4, 65, -1, 7, -1,
+ 656, -1, -1, -1, -1, -1, 75, 994, 995, 1098,
+ 997, -1, -1, -1, -1, -1, 85, -1, -1, 88,
+ 89, -1, -1, -1, 93, 95, -1, -1, -1, -1,
+ -1, 101, 41, -1, 690, 105, -1, -1, 108, 109,
+ -1, -1, 51, -1, 53, -1, -1, -1, -1, 119,
+ -1, 121, 122, -1, -1, -1, 65, -1, 1326, -1,
+ -1, -1, -1, -1, -1, -1, 75, -1, -1, 139,
+ 140, 141, -1, -1, 83, -1, -1, -1, -1, 88,
+ -1, -1, -1, -1, 1173, 1174, 1175, -1, 1, -1,
+ -1, 4, -1, 6, 7, 165, -1, -1, -1, 12,
+ 68, 69, 70, 71, 72, 73, 74, 75, 76, 77,
+ 78, 79, -1, 183, 27, -1, -1, -1, 31, -1,
+ -1, -1, 35, 193, 37, 38, 27, 783, 784, -1,
+ -1, 44, -1, -1, -1, -1, -1, -1, 51, -1,
+ 53, -1, 55, 44, 57, 58, 59, -1, 61, 62,
+ 63, 64, 65, 66, 67, 68, 69, 70, 71, 72,
+ 73, 74, 75, 76, 77, 78, -1, -1, 81, 82,
+ 83, -1, -1, 86, -1, -1, 89, 833, 834, 835,
+ 836, -1, 838, -1, 840, 841, 842, 843, -1, -1,
+ -1, 104, -1, -1, 95, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, 108, 109, -1,
+ -1, -1, -1, 869, 284, 285, 872, -1, 874, -1,
+ 121, -1, -1, -1, 3, 4, 5, 6, 7, -1,
+ -1, 887, 888, 12, -1, -1, -1, -1, 139, 140,
+ 141, -1, 312, -1, -1, 315, -1, -1, 27, 319,
+ -1, -1, 31, -1, -1, -1, 35, -1, -1, -1,
+ -1, -1, 41, -1, 165, 44, -1, -1, 338, -1,
+ -1, -1, 51, -1, 53, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, 941, 65, -1, -1, 359,
+ 360, 361, 362, 949, -1, -1, 75, 367, -1, -1,
+ -1, -1, -1, 373, 83, 375, 85, 377, 378, 88,
+ 89, -1, -1, -1, 384, -1, -1, -1, -1, -1,
+ 390, -1, -1, 393, 980, 981, 982, -1, -1, -1,
+ 231, -1, -1, -1, -1, -1, -1, -1, 994, 995,
+ -1, 997, 412, 413, -1, 415, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, 3, 4,
+ 12, 6, 7, 8, 9, -1, 11, 12, -1, -1,
+ 1026, -1, 24, 25, -1, 27, 277, 278, 279, -1,
+ -1, -1, 27, 28, 285, -1, 31, -1, 33, -1,
+ 35, -1, 37, 38, -1, 40, 41, 42, 43, -1,
+ 52, 46, 47, 48, 49, 50, 51, -1, 53, -1,
+ -1, -1, -1, -1, -1, -1, -1, 62, -1, -1,
+ 65, -1, -1, -1, -1, -1, -1, -1, 73, 74,
+ 75, -1, -1, -1, -1, -1, 81, 82, 83, -1,
+ -1, -1, 1098, 88, -1, -1, -1, -1, -1, 101,
+ 520, 103, 104, 105, -1, -1, 108, 109, 359, 104,
+ 361, 362, 363, -1, 3, 4, 5, 6, 7, -1,
+ -1, -1, 373, 12, 375, -1, 377, 378, -1, -1,
+ 550, -1, -1, 384, -1, -1, -1, -1, -1, 390,
+ -1, -1, 393, -1, 44, -1, 35, -1, -1, -1,
+ -1, -1, 41, -1, -1, 44, -1, -1, -1, -1,
+ -1, 412, 51, -1, 53, -1, 66, 1173, 1174, 1175,
+ -1, -1, -1, -1, -1, -1, 65, -1, -1, -1,
+ 600, 601, 602, -1, -1, 85, 75, -1, 608, -1,
+ -1, 193, -1, -1, 83, 95, 85, -1, -1, 88,
+ 89, -1, -1, -1, 624, 625, 4, 627, 6, 7,
+ -1, 1217, -1, -1, 12, -1, 3, 4, -1, -1,
+ 7, 121, -1, -1, -1, 4, 5, 6, 7, 231,
+ -1, 10, -1, 12, -1, -1, 656, 35, -1, 139,
+ 140, 141, -1, -1, 3, 4, 44, 498, 27, 149,
+ -1, -1, 31, 51, 41, 53, 35, -1, -1, 510,
+ 511, -1, 513, -1, 51, 165, 53, 65, -1, 520,
+ 690, -1, 51, 275, 53, 277, 278, 75, 65, -1,
+ 3, 4, 41, -1, -1, -1, -1, 85, 75, -1,
+ 88, 89, 51, -1, 53, 195, 83, 56, -1, 550,
+ -1, 88, -1, -1, 306, -1, 65, -1, -1, -1,
+ 312, -1, -1, 315, -1, -1, 75, 319, 41, -1,
+ -1, -1, -1, -1, 83, -1, -1, -1, 51, 88,
+ 53, 4, 5, 6, 7, -1, -1, -1, -1, 12,
+ -1, -1, 65, -1, -1, -1, -1, -1, -1, 600,
+ 601, 602, 75, -1, 27, -1, -1, 608, 31, -1,
+ 83, -1, 35, 783, 784, 88, -1, -1, -1, -1,
+ -1, 791, -1, 624, 625, -1, 627, -1, 51, -1,
+ 53, -1, -1, 283, -1, 285, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, 815, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, 656, -1, -1, -1, -1,
+ -1, -1, -1, 833, 834, 835, 836, -1, -1, -1,
+ 840, 841, 842, 843, 675, -1, -1, -1, -1, -1,
+ 432, -1, -1, -1, 685, 686, 687, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, 869,
+ -1, -1, 872, -1, 874, -1, -1, -1, -1, 359,
+ -1, 361, 362, -1, -1, -1, -1, 887, 888, 277,
+ 278, 279, -1, 373, -1, 375, -1, 377, 378, -1,
+ -1, -1, -1, -1, 384, -1, -1, -1, -1, -1,
+ 390, -1, -1, 393, -1, -1, 498, -1, -1, -1,
+ -1, -1, 4, -1, -1, -1, 508, -1, 510, 511,
+ -1, -1, 412, -1, -1, 766, -1, 768, -1, -1,
+ -1, -1, 24, 25, -1, -1, -1, -1, 779, 949,
+ -1, 33, 783, 784, 785, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, 46, -1, -1, -1, -1, 551,
+ -1, 359, -1, 361, 362, 363, -1, -1, -1, -1,
+ 980, 981, 982, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, 994, 995, -1, 997, 4, 5,
+ 6, 7, 833, 834, 835, 836, 12, -1, -1, 840,
+ 841, 842, 843, -1, -1, -1, -1, -1, -1, 101,
+ -1, 27, -1, 105, -1, 31, 1026, -1, -1, 35,
+ -1, -1, -1, -1, -1, -1, -1, 119, 869, -1,
+ 122, -1, -1, 523, -1, 51, -1, 53, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, 139, 140, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ 550, 66, 67, 68, 69, 70, 71, 72, 73, 74,
+ 75, 76, 77, 78, 79, 916, 917, -1, 919, -1,
+ -1, -1, -1, 675, -1, -1, -1, -1, 1098, -1,
+ -1, 183, -1, 685, 686, 687, 937, 938, -1, -1,
+ -1, 193, -1, -1, -1, -1, -1, -1, -1, -1,
+ 600, 601, 602, 511, -1, 513, -1, -1, 608, -1,
+ -1, -1, 520, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, 624, 625, -1, 627, -1, 980,
+ -1, 982, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, 550, 994, 995, -1, 997, -1, -1, -1,
+ -1, -1, -1, 1173, 1174, 1175, 656, -1, -1, -1,
+ -1, -1, -1, -1, 766, -1, 768, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, 779, 65, 66,
+ 67, 68, 69, 70, 71, 72, 73, 74, 75, 76,
+ 77, 78, 79, 601, 602, -1, -1, 1217, 3, 4,
+ 608, 6, 7, 8, 9, -1, 11, 12, -1, -1,
+ 312, -1, -1, 315, -1, -1, -1, 319, -1, -1,
+ -1, -1, 27, 28, 29, 30, 31, -1, 33, -1,
+ 35, -1, 37, 38, -1, 40, 41, 42, 43, -1,
+ -1, 46, 47, 48, 49, 50, 51, 1098, 53, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, 360, -1,
+ -1, -1, -1, -1, -1, 367, -1, 675, -1, -1,
+ 872, 373, -1, 375, -1, -1, 81, 82, 83, -1,
+ -1, -1, -1, 88, -1, -1, -1, 92, 390, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, 104,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ 412, 413, -1, 415, 916, 917, -1, 919, -1, -1,
+ -1, -1, 1173, 1174, 1175, -1, -1, -1, -1, -1,
+ -1, -1, -1, 833, 834, 835, 836, -1, 838, -1,
+ 840, 841, 842, 843, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, 766, -1,
+ 768, -1, -1, -1, -1, -1, 1217, -1, -1, 869,
+ -1, 779, -1, -1, -1, -1, 784, 785, -1, -1,
+ -1, 3, 4, 5, 6, 7, -1, -1, 10, -1,
+ 12, -1, 3, 4, 5, 6, 7, -1, -1, -1,
+ -1, 12, -1, -1, -1, 27, -1, -1, -1, 31,
+ -1, -1, -1, 35, -1, -1, 27, -1, -1, 41,
+ 31, -1, -1, -1, 35, 833, 834, -1, 836, 51,
+ 41, 53, 840, -1, 842, 843, -1, -1, -1, -1,
+ 51, 941, 53, 65, -1, 56, -1, -1, -1, -1,
+ -1, -1, -1, 75, 65, -1, -1, -1, -1, -1,
+ -1, 83, -1, -1, 75, -1, 88, -1, -1, -1,
+ -1, 93, 83, -1, -1, -1, -1, 88, -1, -1,
+ 980, -1, 982, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, 994, 995, -1, 997, 600, 601,
+ 59, -1, 61, 62, 63, 64, 65, 66, 67, 68,
+ 69, 70, 71, 72, 73, 74, 75, 76, 77, 78,
+ 79, -1, 624, 625, -1, 627, -1, -1, -1, 1,
+ 938, 3, 4, 5, 6, 7, 8, 9, -1, 11,
+ 12, 13, -1, 15, 16, 17, 18, 19, 20, 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ -1, 33, -1, 35, -1, 37, 38, -1, 40, 41,
+ 42, 43, -1, -1, 46, 47, 48, 49, 50, 51,
+ -1, 53, 54, -1, 56, -1, 994, 995, -1, 997,
+ 62, -1, -1, 65, -1, -1, -1, -1, 1098, -1,
+ -1, 73, 74, 75, -1, -1, -1, -1, -1, 81,
+ 82, 83, -1, -1, -1, -1, 88, -1, 90, 1,
+ 92, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+ 12, 103, 104, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, 27, 28, 29, 30, 31,
+ -1, 33, -1, 35, -1, 37, 38, -1, 40, 41,
+ 42, 43, 44, -1, 46, 47, 48, 49, 50, 51,
+ -1, 53, 54, 1173, 1174, 1175, -1, -1, -1, -1,
+ 62, -1, -1, 65, -1, -1, -1, -1, -1, -1,
+ -1, 73, 74, 75, 10, -1, -1, -1, -1, 81,
+ 82, 83, -1, 85, -1, -1, 88, 89, -1, -1,
+ 92, 93, -1, -1, -1, -1, -1, 1217, -1, -1,
+ -1, -1, 104, -1, -1, -1, -1, -1, -1, -1,
+ -1, 833, 834, -1, -1, -1, -1, -1, 840, -1,
+ -1, 57, 58, 59, 60, 61, 62, 63, 64, 65,
+ 66, 67, 68, 69, 70, 71, 72, 73, 74, 75,
+ 76, 77, 78, 79, -1, -1, -1, 869, -1, -1,
+ 872, 1, 874, 3, 4, 5, 6, 7, 8, 9,
+ -1, 11, 12, 13, -1, 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+ 30, 31, -1, 33, -1, 35, -1, 37, 38, -1,
+ 40, 41, 42, 43, -1, -1, 46, 47, 48, 49,
+ 50, 51, -1, 53, 54, -1, 56, -1, -1, -1,
+ -1, -1, 62, -1, -1, 65, -1, -1, -1, -1,
+ -1, -1, -1, 73, 74, 75, -1, 949, -1, -1,
+ -1, 81, 82, 83, -1, -1, -1, -1, 88, -1,
+ 90, -1, 92, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, 103, 104, -1, -1, -1, -1, 981,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, 994, 995, -1, 997, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, 1, -1, 3, 4, 5, 6, 7, 8, 9,
+ -1, 11, 12, 13, 1026, 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+ 30, 31, -1, 33, -1, 35, -1, 37, 38, -1,
+ 40, 41, 42, 43, -1, -1, 46, 47, 48, 49,
+ 50, 51, -1, 53, 54, -1, 56, -1, -1, -1,
+ -1, -1, 62, -1, -1, 65, -1, -1, -1, -1,
+ -1, -1, -1, 73, 74, 75, -1, -1, -1, -1,
+ -1, 81, 82, 83, -1, -1, -1, -1, 88, -1,
+ 90, -1, 92, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, 103, 104, 1, -1, 3, 4, 5,
+ 6, 7, 8, 9, -1, 11, 12, 13, -1, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
+ 26, 27, 28, 29, 30, 31, -1, 33, -1, 35,
+ -1, 37, 38, -1, 40, 41, 42, 43, -1, -1,
+ 46, 47, 48, 49, 50, 51, -1, 53, 54, -1,
+ 56, -1, -1, -1, -1, -1, 62, -1, -1, 65,
+ -1, -1, -1, -1, -1, -1, -1, 73, 74, 75,
+ -1, -1, -1, -1, -1, 81, 82, 83, -1, -1,
+ -1, -1, 88, -1, 90, -1, 92, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, 103, 104, 1,
+ -1, 3, 4, 5, 6, 7, 8, 9, -1, 11,
+ 12, 13, -1, 15, 16, 17, 18, 19, 20, 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ -1, 33, -1, 35, -1, 37, 38, -1, 40, 41,
+ 42, 43, -1, -1, 46, 47, 48, 49, 50, 51,
+ -1, 53, 54, -1, 56, -1, -1, -1, -1, -1,
+ 62, -1, -1, 65, -1, -1, -1, -1, -1, -1,
+ -1, 73, 74, 75, -1, -1, -1, -1, -1, 81,
+ 82, 83, -1, -1, -1, -1, 88, -1, 90, -1,
+ 92, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, 103, 104, 1, -1, 3, 4, 5, 6, 7,
+ 8, 9, -1, 11, 12, 13, -1, 15, 16, 17,
+ 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
+ 28, 29, 30, 31, -1, 33, -1, 35, -1, 37,
+ 38, -1, 40, 41, 42, 43, -1, -1, 46, 47,
+ 48, 49, 50, 51, -1, 53, 54, -1, 56, -1,
+ -1, -1, -1, -1, 62, -1, -1, 65, -1, -1,
+ -1, -1, -1, -1, -1, 73, 74, 75, -1, -1,
+ -1, -1, -1, 81, 82, 83, -1, -1, -1, -1,
+ 88, -1, 90, 1, 92, 3, 4, 5, 6, 7,
+ 8, 9, -1, 11, 12, 13, 104, 15, 16, 17,
+ 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
+ 28, 29, 30, 31, -1, 33, -1, 35, -1, 37,
+ 38, -1, 40, 41, 42, 43, -1, -1, 46, 47,
+ 48, 49, 50, 51, -1, 53, 54, -1, 56, -1,
+ -1, -1, -1, -1, 62, -1, -1, 65, -1, -1,
+ -1, -1, -1, -1, -1, 73, 74, 75, -1, -1,
+ -1, -1, -1, 81, 82, 83, -1, -1, -1, -1,
+ 88, -1, 90, 1, 92, 3, 4, 5, 6, 7,
+ 8, 9, -1, 11, 12, 13, 104, 15, 16, 17,
+ 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
+ 28, 29, 30, 31, -1, 33, -1, 35, -1, 37,
+ 38, -1, 40, 41, 42, 43, -1, -1, 46, 47,
+ 48, 49, 50, 51, -1, 53, 54, -1, 56, -1,
+ -1, -1, -1, -1, 62, -1, -1, 65, -1, -1,
+ -1, -1, -1, -1, -1, 73, 74, 75, -1, -1,
+ -1, -1, -1, 81, 82, 83, -1, -1, -1, -1,
+ 88, -1, 90, 1, 92, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, -1, 104, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, 27,
+ 28, 29, 30, 31, -1, 33, -1, 35, -1, 37,
+ 38, -1, 40, 41, 42, 43, 44, -1, 46, 47,
+ 48, 49, 50, 51, -1, 53, 54, -1, -1, -1,
+ -1, -1, -1, -1, 62, -1, -1, 65, -1, -1,
+ -1, -1, -1, -1, -1, 73, 74, 75, -1, -1,
+ -1, -1, -1, 81, 82, 83, -1, 85, -1, -1,
+ 88, 89, -1, -1, 92, 93, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, 1, 104, 3, 4, 5,
+ 6, 7, 8, 9, 10, 11, 12, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, 27, 28, 29, 30, 31, -1, 33, -1, 35,
+ -1, 37, 38, -1, 40, 41, 42, 43, -1, -1,
+ 46, 47, 48, 49, 50, 51, -1, 53, -1, -1,
+ -1, -1, -1, -1, -1, -1, 62, -1, -1, 65,
+ -1, -1, -1, -1, -1, -1, -1, 73, 74, 75,
+ -1, -1, -1, -1, -1, 81, 82, 83, -1, -1,
+ -1, -1, 88, -1, -1, -1, 92, 93, -1, -1,
+ -1, -1, -1, -1, -1, -1, 102, 1, 104, 3,
+ 4, 5, 6, 7, 8, 9, 10, 11, 12, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, 27, 28, 29, 30, 31, -1, 33,
+ -1, 35, -1, 37, 38, -1, 40, 41, 42, 43,
+ -1, -1, 46, 47, 48, 49, 50, 51, -1, 53,
+ -1, -1, -1, -1, -1, -1, -1, -1, 62, -1,
+ -1, 65, -1, -1, -1, -1, -1, -1, -1, 73,
+ 74, 75, -1, -1, -1, -1, -1, 81, 82, 83,
+ -1, -1, -1, -1, 88, -1, -1, -1, 92, 93,
+ -1, -1, -1, -1, -1, -1, -1, -1, 102, 1,
+ 104, 3, 4, -1, 6, 7, 8, 9, -1, 11,
+ 12, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, 27, 28, 29, 30, 31,
+ -1, 33, -1, 35, -1, 37, 38, -1, 40, 41,
+ 42, 43, -1, -1, 46, 47, 48, 49, 50, 51,
+ -1, 53, 54, -1, -1, -1, -1, -1, -1, -1,
+ 62, -1, -1, 65, -1, -1, -1, -1, -1, -1,
+ -1, 73, 74, 75, -1, -1, -1, -1, -1, 81,
+ 82, 83, -1, -1, -1, -1, 88, 89, -1, 1,
+ 92, 3, 4, -1, 6, 7, 8, 9, -1, 11,
+ 12, 103, 104, -1, -1, -1, -1, 19, -1, -1,
+ -1, -1, -1, -1, -1, 27, 28, 29, 30, 31,
+ -1, 33, -1, 35, -1, 37, 38, -1, 40, 41,
+ 42, 43, -1, -1, 46, 47, 48, 49, 50, 51,
+ -1, 53, 54, -1, -1, -1, -1, -1, -1, -1,
+ 62, -1, -1, 65, -1, -1, -1, -1, -1, -1,
+ -1, 73, 74, 75, -1, -1, -1, -1, -1, 81,
+ 82, 83, -1, -1, -1, -1, 88, -1, -1, 1,
+ 92, 3, 4, -1, 6, 7, 8, 9, -1, 11,
+ 12, 103, 104, -1, -1, -1, -1, 19, -1, -1,
+ -1, -1, -1, -1, -1, 27, 28, 29, 30, 31,
+ -1, 33, -1, 35, -1, 37, 38, -1, 40, 41,
+ 42, 43, -1, -1, 46, 47, 48, 49, 50, 51,
+ -1, 53, 54, -1, -1, -1, -1, -1, -1, -1,
+ 62, -1, -1, 65, -1, -1, -1, -1, -1, -1,
+ -1, 73, 74, 75, -1, -1, -1, -1, -1, 81,
+ 82, 83, -1, -1, -1, -1, 88, -1, -1, 1,
+ 92, 3, 4, 5, 6, 7, 8, 9, -1, 11,
+ 12, 103, 104, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, 27, 28, 29, 30, 31,
+ -1, 33, -1, 35, -1, 37, 38, -1, 40, 41,
+ 42, 43, -1, -1, 46, 47, 48, 49, 50, 51,
+ -1, 53, 54, -1, 56, -1, -1, -1, -1, -1,
+ 62, -1, -1, 65, -1, -1, -1, -1, -1, -1,
+ -1, 73, 74, 75, -1, -1, -1, -1, -1, 81,
+ 82, 83, -1, -1, -1, -1, 88, -1, -1, 1,
+ 92, 3, 4, -1, 6, 7, 8, 9, -1, 11,
+ 12, -1, 104, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, 27, 28, 29, 30, 31,
+ -1, 33, -1, 35, -1, 37, 38, -1, 40, 41,
+ 42, 43, -1, -1, 46, 47, 48, 49, 50, 51,
+ -1, 53, 54, -1, -1, -1, -1, -1, -1, -1,
+ 62, -1, -1, 65, -1, -1, -1, -1, -1, -1,
+ -1, 73, 74, 75, -1, -1, -1, -1, -1, 81,
+ 82, 83, -1, -1, -1, -1, 88, -1, -1, 1,
+ 92, 3, 4, -1, 6, 7, 8, 9, -1, 11,
+ 12, -1, 104, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, 27, 28, 29, 30, 31,
+ -1, 33, -1, 35, -1, 37, 38, -1, 40, 41,
+ 42, 43, -1, -1, 46, 47, 48, 49, 50, 51,
+ -1, 53, 54, -1, -1, -1, -1, -1, -1, -1,
+ 62, -1, -1, 65, -1, -1, -1, -1, -1, -1,
+ -1, 73, 74, 75, -1, -1, -1, -1, -1, 81,
+ 82, 83, -1, -1, -1, -1, 88, -1, -1, 1,
+ 92, 3, 4, -1, 6, 7, 8, 9, -1, 11,
+ 12, -1, 104, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, 27, 28, 29, 30, 31,
+ -1, 33, -1, 35, -1, 37, 38, -1, 40, 41,
+ 42, 43, -1, -1, 46, 47, 48, 49, 50, 51,
+ -1, 53, -1, -1, -1, -1, -1, -1, 60, -1,
+ 62, -1, -1, 65, -1, -1, -1, -1, -1, -1,
+ -1, 73, 74, 75, -1, -1, -1, -1, -1, 81,
+ 82, 83, -1, -1, -1, -1, 88, -1, -1, 1,
+ 92, 3, 4, -1, 6, 7, 8, 9, -1, 11,
+ 12, -1, 104, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, 27, 28, 29, 30, 31,
+ -1, 33, -1, 35, -1, 37, 38, -1, 40, 41,
+ 42, 43, -1, -1, 46, 47, 48, 49, 50, 51,
+ -1, 53, 54, -1, -1, -1, -1, -1, -1, -1,
+ 62, -1, -1, 65, -1, -1, -1, -1, -1, -1,
+ -1, 73, 74, 75, -1, -1, -1, -1, -1, 81,
+ 82, 83, -1, -1, -1, -1, 88, -1, -1, 1,
+ 92, 3, 4, -1, 6, 7, 8, 9, -1, 11,
+ 12, -1, 104, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, 27, 28, 29, 30, 31,
+ -1, 33, -1, 35, -1, 37, 38, -1, 40, 41,
+ 42, 43, -1, -1, 46, 47, 48, 49, 50, 51,
+ -1, 53, -1, -1, 56, -1, -1, -1, -1, -1,
+ 62, -1, -1, 65, -1, -1, -1, -1, -1, -1,
+ -1, 73, 74, 75, -1, -1, -1, -1, -1, 81,
+ 82, 83, -1, -1, -1, -1, 88, -1, -1, 1,
+ 92, 3, 4, -1, 6, 7, 8, 9, -1, 11,
+ 12, -1, 104, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, 27, 28, 29, 30, 31,
+ -1, 33, -1, 35, -1, 37, 38, -1, 40, 41,
+ 42, 43, -1, -1, 46, 47, 48, 49, 50, 51,
+ -1, 53, -1, -1, -1, -1, -1, -1, -1, -1,
+ 62, -1, -1, 65, -1, -1, -1, -1, -1, -1,
+ -1, 73, 74, 75, -1, -1, -1, -1, -1, 81,
+ 82, 83, -1, -1, -1, -1, 88, -1, -1, -1,
+ 92, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ 102, 1, 104, 3, 4, -1, 6, 7, 8, 9,
+ -1, 11, 12, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, 27, 28, 29,
+ 30, 31, -1, 33, -1, 35, -1, 37, 38, -1,
+ 40, 41, 42, 43, -1, -1, 46, 47, 48, 49,
+ 50, 51, -1, 53, -1, -1, -1, -1, -1, -1,
+ -1, -1, 62, -1, -1, 65, -1, -1, -1, -1,
+ -1, -1, -1, 73, 74, 75, -1, -1, -1, -1,
+ -1, 81, 82, 83, -1, -1, -1, -1, 88, -1,
+ -1, 1, 92, 3, 4, -1, 6, 7, 8, 9,
+ -1, 11, 12, -1, 104, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, 27, 28, 29,
+ 30, 31, -1, 33, -1, 35, -1, 37, 38, -1,
+ 40, 41, 42, 43, -1, -1, 46, 47, 48, 49,
+ 50, 51, -1, 53, -1, -1, -1, -1, -1, -1,
+ -1, -1, 62, -1, -1, 65, -1, -1, -1, -1,
+ -1, -1, -1, 73, 74, 75, -1, -1, -1, -1,
+ -1, 81, 82, 83, -1, -1, -1, -1, 88, -1,
+ 3, 4, 92, 6, 7, 8, 9, -1, 11, 12,
+ -1, -1, -1, -1, 104, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, 27, 28, 29, 30, 31, -1,
+ 33, -1, 35, -1, 37, 38, -1, 40, 41, 42,
+ 43, -1, -1, 46, 47, 48, 49, 50, 51, -1,
+ 53, -1, -1, -1, -1, -1, -1, -1, -1, 62,
+ -1, -1, 65, -1, -1, -1, -1, -1, -1, -1,
+ 73, 74, 75, -1, -1, -1, -1, -1, 81, 82,
+ 83, -1, -1, -1, -1, 88, -1, -1, -1, 92,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, 104, 105, 3, 4, 5, 6, 7, 8, 9,
+ -1, 11, 12, 13, -1, 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+ 30, 31, -1, 33, -1, 35, -1, 37, 38, -1,
+ 40, 41, 42, 43, -1, -1, 46, 47, 48, 49,
+ 50, 51, -1, 53, 54, -1, 56, -1, -1, -1,
+ -1, -1, 62, -1, -1, 65, -1, -1, -1, -1,
+ -1, -1, -1, 73, 74, 75, -1, -1, -1, -1,
+ -1, 81, 82, 83, -1, -1, -1, -1, 88, -1,
+ 90, -1, 92, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, 103, 104, 3, 4, 5, 6, 7,
+ 8, 9, -1, 11, 12, 13, -1, 15, 16, 17,
+ 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
+ 28, 29, 30, 31, -1, 33, -1, 35, -1, 37,
+ 38, -1, 40, 41, 42, 43, -1, -1, 46, 47,
+ 48, 49, 50, 51, -1, 53, 54, -1, 56, -1,
+ -1, -1, -1, -1, 62, -1, -1, 65, -1, -1,
+ -1, -1, -1, -1, -1, 73, 74, 75, -1, -1,
+ -1, -1, -1, 81, 82, 83, -1, -1, -1, -1,
+ 88, -1, 90, -1, 92, 3, 4, 5, 6, 7,
+ 8, 9, -1, 11, 12, 13, 104, 15, 16, 17,
+ 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
+ 28, 29, 30, 31, -1, 33, -1, 35, -1, 37,
+ 38, -1, 40, 41, 42, 43, -1, -1, 46, 47,
+ 48, 49, 50, 51, -1, 53, 54, -1, 56, -1,
+ -1, -1, -1, -1, 62, -1, -1, 65, -1, -1,
+ -1, -1, -1, -1, -1, 73, 74, 75, -1, -1,
+ -1, -1, -1, 81, 82, 83, -1, -1, -1, -1,
+ 88, -1, 90, -1, 92, 3, 4, 5, 6, 7,
+ 8, 9, -1, 11, 12, 13, 104, 15, 16, 17,
+ 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
+ 28, 29, 30, 31, -1, 33, -1, 35, -1, 37,
+ 38, -1, 40, 41, 42, 43, -1, -1, 46, 47,
+ 48, 49, 50, 51, -1, 53, -1, -1, 56, -1,
+ -1, -1, -1, -1, 62, -1, -1, 65, -1, -1,
+ -1, -1, -1, -1, -1, 73, 74, 75, -1, -1,
+ -1, -1, -1, 81, 82, 83, -1, -1, -1, -1,
+ 88, -1, 90, -1, 92, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, -1, 104, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, 27,
+ 28, 29, 30, 31, -1, 33, -1, 35, -1, 37,
+ 38, -1, 40, 41, 42, 43, 44, -1, 46, 47,
+ 48, 49, 50, 51, -1, 53, -1, -1, -1, -1,
+ -1, -1, -1, -1, 62, -1, -1, 65, -1, -1,
+ -1, -1, -1, -1, -1, 73, 74, 75, -1, -1,
+ -1, -1, -1, 81, 82, 83, -1, 85, -1, -1,
+ 88, 89, -1, -1, 92, 93, 3, 4, 5, 6,
+ 7, 8, 9, 10, 11, 12, 104, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ 27, 28, 29, 30, 31, -1, 33, -1, 35, -1,
+ 37, 38, -1, 40, 41, 42, 43, 44, -1, 46,
+ 47, 48, 49, 50, 51, -1, 53, -1, -1, -1,
+ -1, -1, -1, -1, -1, 62, -1, -1, 65, -1,
+ -1, -1, -1, -1, -1, -1, 73, 74, 75, -1,
+ -1, -1, -1, -1, 81, 82, 83, -1, 85, -1,
+ -1, 88, 89, -1, -1, 92, 93, 3, 4, -1,
+ 6, 7, 8, 9, -1, 11, 12, 104, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, 27, 28, 29, 30, 31, -1, 33, -1, 35,
+ -1, 37, 38, -1, 40, 41, 42, 43, 44, -1,
+ 46, 47, 48, 49, 50, 51, -1, 53, -1, -1,
+ -1, -1, -1, -1, -1, -1, 62, -1, -1, 65,
+ -1, -1, -1, -1, -1, -1, -1, 73, 74, 75,
+ -1, -1, -1, -1, -1, 81, 82, 83, -1, 85,
+ -1, -1, 88, 89, 3, 4, -1, 6, 7, 8,
+ 9, -1, 11, 12, -1, -1, -1, -1, 104, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, 27, 28,
+ 29, 30, 31, -1, 33, -1, 35, -1, 37, 38,
+ -1, 40, 41, 42, 43, 44, -1, 46, 47, 48,
+ 49, 50, 51, -1, 53, -1, -1, -1, -1, -1,
+ -1, -1, -1, 62, -1, -1, 65, -1, -1, -1,
+ -1, -1, -1, -1, 73, 74, 75, -1, -1, -1,
+ -1, -1, 81, 82, 83, -1, 85, -1, -1, 88,
+ 89, 3, 4, -1, 6, 7, 8, 9, -1, 11,
+ 12, -1, -1, -1, -1, 104, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, 27, 28, 29, 30, 31,
+ -1, 33, -1, 35, -1, 37, 38, -1, 40, 41,
+ 42, 43, 44, -1, 46, 47, 48, 49, 50, 51,
+ -1, 53, -1, -1, -1, -1, -1, -1, -1, -1,
+ 62, -1, -1, 65, -1, -1, -1, -1, -1, -1,
+ -1, 73, 74, 75, -1, -1, -1, -1, -1, 81,
+ 82, 83, -1, 85, -1, -1, 88, 89, 3, 4,
+ -1, 6, 7, 8, 9, -1, 11, 12, -1, -1,
+ -1, -1, 104, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, 27, 28, 29, 30, 31, -1, 33, -1,
+ 35, -1, 37, 38, -1, 40, 41, 42, 43, -1,
+ -1, 46, 47, 48, 49, 50, 51, -1, 53, -1,
+ -1, 56, -1, -1, -1, -1, -1, 62, -1, -1,
+ 65, -1, -1, -1, -1, -1, -1, -1, 73, 74,
+ 75, -1, -1, -1, -1, -1, 81, 82, 83, -1,
+ -1, -1, -1, 88, -1, 3, 4, 92, 6, 7,
+ 8, 9, -1, 11, 12, -1, -1, -1, -1, 104,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, 27,
+ 28, 29, 30, 31, -1, 33, -1, 35, -1, 37,
+ 38, -1, 40, 41, 42, 43, -1, -1, 46, 47,
+ 48, 49, 50, 51, -1, 53, -1, -1, -1, -1,
+ -1, -1, -1, -1, 62, -1, -1, 65, -1, -1,
+ -1, -1, -1, -1, -1, 73, 74, 75, -1, -1,
+ -1, -1, -1, 81, 82, 83, -1, -1, -1, -1,
+ 88, -1, 3, 4, 92, 6, 7, 8, 9, -1,
+ 11, 12, -1, -1, -1, -1, 104, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, 27, 28, 29, 30,
+ 31, -1, 33, -1, 35, -1, 37, 38, -1, 40,
+ 41, 42, 43, -1, -1, 46, 47, 48, 49, 50,
+ 51, -1, 53, 54, -1, -1, -1, -1, -1, -1,
+ -1, 62, -1, -1, 65, -1, -1, -1, -1, -1,
+ -1, -1, 73, 74, 75, -1, -1, -1, -1, -1,
+ 81, 82, 83, -1, -1, 3, 4, 88, 6, 7,
+ 8, 9, -1, 11, 12, -1, -1, -1, -1, -1,
+ -1, -1, -1, 104, -1, -1, -1, -1, -1, 27,
+ 28, 29, 30, 31, -1, 33, -1, 35, -1, 37,
+ 38, -1, 40, 41, 42, 43, -1, -1, 46, 47,
+ 48, 49, 50, 51, -1, 53, -1, -1, -1, -1,
+ -1, -1, -1, -1, 62, -1, -1, 65, -1, -1,
+ -1, -1, -1, -1, -1, 73, 74, 75, -1, -1,
+ -1, -1, -1, 81, 82, 83, -1, -1, -1, -1,
+ 88, 89, 3, 4, -1, 6, 7, 8, 9, -1,
+ 11, 12, -1, -1, -1, -1, 104, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, 27, 28, 29, 30,
+ 31, -1, 33, -1, 35, -1, 37, 38, -1, 40,
+ 41, 42, 43, -1, -1, 46, 47, 48, 49, 50,
+ 51, -1, 53, -1, -1, -1, -1, -1, -1, -1,
+ -1, 62, -1, -1, 65, -1, -1, -1, -1, -1,
+ -1, -1, 73, 74, 75, -1, -1, -1, -1, -1,
+ 81, 82, 83, -1, -1, -1, -1, 88, -1, 3,
+ 4, 92, 6, 7, 8, 9, -1, 11, 12, -1,
+ -1, -1, -1, 104, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, 27, 28, 29, 30, 31, -1, 33,
+ -1, 35, -1, 37, 38, -1, 40, 41, 42, 43,
+ -1, -1, 46, 47, 48, 49, 50, 51, -1, 53,
+ -1, -1, -1, -1, -1, -1, -1, -1, 62, -1,
+ -1, 65, -1, -1, -1, -1, -1, -1, -1, 73,
+ 74, 75, -1, -1, -1, -1, -1, 81, 82, 83,
+ -1, -1, -1, -1, 88, -1, 3, 4, 92, 6,
+ 7, 8, 9, -1, 11, 12, -1, -1, -1, -1,
+ 104, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ 27, 28, 29, 30, 31, -1, 33, -1, 35, -1,
+ 37, 38, -1, 40, 41, 42, 43, -1, -1, 46,
+ 47, 48, 49, 50, 51, -1, 53, -1, -1, -1,
+ -1, -1, -1, -1, -1, 62, -1, -1, 65, -1,
+ -1, -1, -1, -1, -1, -1, 73, 74, 75, -1,
+ -1, -1, -1, -1, 81, 82, 83, -1, -1, -1,
+ -1, 88, -1, 3, 4, 92, 6, 7, 8, 9,
+ -1, 11, 12, -1, -1, -1, -1, 104, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, 27, 28, 29,
+ 30, 31, -1, 33, -1, 35, -1, 37, 38, -1,
+ 40, 41, 42, 43, -1, -1, 46, 47, 48, 49,
+ 50, 51, -1, 53, -1, -1, -1, -1, -1, -1,
+ -1, -1, 62, -1, -1, 65, -1, -1, -1, -1,
+ -1, -1, -1, 73, 74, 75, -1, -1, -1, -1,
+ -1, 81, 82, 83, -1, -1, -1, -1, 88, -1,
+ 3, 4, 92, 6, 7, 8, 9, -1, 11, 12,
+ -1, -1, -1, -1, 104, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, 27, 28, 29, 30, 31, -1,
+ 33, -1, 35, -1, 37, 38, -1, 40, 41, 42,
+ 43, -1, -1, 46, 47, 48, 49, 50, 51, -1,
+ 53, -1, -1, -1, -1, -1, -1, -1, -1, 62,
+ -1, -1, 65, -1, -1, -1, -1, -1, -1, -1,
+ 73, 74, 75, -1, -1, -1, -1, -1, 81, 82,
+ 83, -1, -1, -1, -1, 88, -1, 3, 4, 92,
+ 6, 7, 8, 9, -1, 11, 12, -1, -1, -1,
+ -1, 104, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, 27, 28, 29, 30, 31, -1, 33, -1, 35,
+ -1, 37, 38, -1, 40, 41, 42, 43, -1, -1,
+ 46, 47, 48, 49, 50, 51, -1, 53, -1, -1,
+ -1, -1, -1, -1, -1, -1, 62, -1, -1, 65,
+ -1, -1, -1, -1, -1, -1, -1, 73, 74, 75,
+ -1, -1, -1, -1, -1, 81, 82, 83, -1, -1,
+ 3, 4, 88, 6, 7, 8, 9, -1, 11, 12,
+ -1, -1, -1, -1, -1, -1, -1, -1, 104, -1,
+ -1, -1, -1, -1, 27, 28, 29, 30, 31, -1,
+ 33, -1, 35, -1, 37, 38, -1, 40, 41, 42,
+ 43, -1, -1, 46, 47, 48, 49, 50, 51, -1,
+ 53, -1, -1, -1, -1, -1, -1, -1, -1, 62,
+ -1, -1, 65, -1, -1, -1, -1, -1, -1, -1,
+ 73, 74, 75, -1, -1, -1, -1, -1, 81, 82,
+ 83, -1, -1, 3, 4, 88, 6, 7, 8, 9,
+ -1, 11, 12, -1, -1, -1, -1, -1, -1, -1,
+ -1, 104, -1, -1, -1, -1, -1, 27, 28, 29,
+ 30, 31, -1, 33, -1, 35, -1, 37, 38, -1,
+ 40, 41, 42, 43, -1, -1, 46, 47, 48, 49,
+ 50, 51, -1, 53, -1, -1, -1, -1, -1, -1,
+ -1, -1, 62, -1, -1, 65, -1, -1, -1, -1,
+ -1, -1, -1, 73, 74, 75, -1, -1, -1, -1,
+ -1, 81, 82, 83, -1, -1, 3, 4, 88, 6,
+ 7, 8, 9, -1, 11, 12, -1, -1, -1, -1,
+ -1, -1, -1, -1, 104, -1, -1, -1, -1, -1,
+ 27, 28, -1, -1, 31, -1, 33, -1, 35, -1,
+ 37, 38, -1, 40, 41, 42, 43, -1, -1, 46,
+ 47, 48, 49, 50, 51, -1, 53, -1, -1, -1,
+ -1, -1, -1, -1, -1, 62, -1, -1, 65, -1,
+ -1, -1, -1, -1, -1, -1, 73, 74, 75, -1,
+ -1, -1, -1, -1, 81, 82, 83, -1, -1, -1,
+ -1, 88, 3, 4, 5, 6, 7, -1, -1, 10,
+ -1, 12, -1, -1, -1, -1, -1, 104, -1, 3,
+ 4, 5, 6, 7, -1, -1, 27, -1, 12, -1,
+ 31, -1, -1, -1, 35, -1, -1, -1, -1, -1,
+ 41, -1, -1, 27, -1, -1, -1, 31, -1, -1,
+ 51, 35, 53, -1, -1, -1, -1, 41, -1, -1,
+ -1, -1, -1, -1, 65, -1, -1, 51, -1, 53,
+ -1, -1, -1, -1, 75, -1, 60, -1, -1, -1,
+ -1, 65, 83, -1, -1, -1, -1, 88, -1, -1,
+ -1, 75, 3, 4, 5, 6, 7, -1, -1, 83,
+ -1, 12, -1, -1, 88, -1, -1, -1, -1, 3,
+ 4, 5, 6, 7, -1, -1, 27, -1, 12, -1,
+ 31, -1, -1, -1, 35, -1, -1, -1, -1, -1,
+ 41, -1, -1, 27, -1, -1, -1, 31, -1, -1,
+ 51, 35, 53, -1, -1, 56, -1, 41, -1, -1,
+ -1, -1, -1, -1, 65, -1, -1, 51, -1, 53,
+ -1, -1, -1, -1, 75, -1, -1, -1, -1, -1,
+ -1, 65, 83, -1, -1, -1, -1, 88, -1, -1,
+ -1, 75, -1, -1, -1, -1, -1, -1, -1, 83,
+ 32, 57, 58, 59, 88, 61, 62, 63, 64, 65,
+ 66, 67, 68, 69, 70, 71, 72, 73, 74, 75,
+ 76, 77, 78, 79, -1, 57, 58, 59, -1, 61,
+ 62, 63, 64, 65, 66, 67, 68, 69, 70, 71,
+ 72, 73, 74, 75, 76, 77, 78, 79, 55, 105,
+ 57, 58, 59, -1, 61, 62, 63, 64, 65, 66,
+ 67, 68, 69, 70, 71, 72, 73, 74, 75, 76,
+ 77, 78, 79, 57, 58, 59, 60, 61, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71, 72, 73,
+ 74, 75, 76, 77, 78, 79, 57, 58, 59, 60,
+ 61, 62, 63, 64, 65, 66, 67, 68, 69, 70,
+ 71, 72, 73, 74, 75, 76, 77, 78, 79, 57,
+ 58, 59, -1, 61, 62, 63, 64, 65, 66, 67,
+ 68, 69, 70, 71, 72, 73, 74, 75, 76, 77,
+ 78, 79, 57, 58, 59, -1, 61, 62, 63, 64,
+ 65, 66, 67, 68, 69, -1, 71, 72, 73, 74,
+ 75, 76, 77, 78, 79, 62, 63, 64, 65, 66,
+ 67, 68, 69, 70, 71, 72, 73, 74, 75, 76,
+ 77, 78, 79, 63, 64, 65, 66, 67, 68, 69,
+ 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
+ 64, 65, 66, 67, 68, 69, 70, 71, 72, 73,
+ 74, 75, 76, 77, 78, 79
+};
+/* -*-C-*- Note some compilers choke on comments on `#line' lines. */
+#line 3 "/usr/local/lib/bison.simple"
+
+/* Skeleton output parser for bison,
+ Copyright (C) 1984, 1989, 1990 Bob Corbett and Richard Stallman
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 1, or (at your option)
+ any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+#ifndef alloca
+#ifdef __GNUC__
+#define alloca __builtin_alloca
+#else /* not GNU C. */
+#if (!defined (__STDC__) && defined (sparc)) || defined (__sparc__) || defined (__sparc) || defined (__sgi)
+#include <alloca.h>
+#else /* not sparc */
+#if defined (MSDOS) && !defined (__TURBOC__)
+#include <malloc.h>
+#else /* not MSDOS, or __TURBOC__ */
+#if defined(_AIX)
+#include <malloc.h>
+ #pragma alloca
+#else /* not MSDOS, __TURBOC__, or _AIX */
+#ifdef __hpux
+#ifdef __cplusplus
+extern "C" {
+void *alloca (unsigned int);
+};
+#else /* not __cplusplus */
+void *alloca ();
+#endif /* not __cplusplus */
+#endif /* __hpux */
+#endif /* not _AIX */
+#endif /* not MSDOS, or __TURBOC__ */
+#endif /* not sparc. */
+#endif /* not GNU C. */
+#endif /* alloca not defined. */
+
+/* This is the parser code that is written into each bison parser
+ when the %semantic_parser declaration is not specified in the grammar.
+ It was written by Richard Stallman by simplifying the hairy parser
+ used when %semantic_parser is specified. */
+
+/* Note: there must be only one dollar sign in this file.
+ It is replaced by the list of actions, each action
+ as one case of the switch. */
+
+#define yyerrok (yyerrstatus = 0)
+#define yyclearin (yychar = YYEMPTY)
+#define YYEMPTY -2
+#define YYEOF 0
+#define YYACCEPT return(0)
+#define YYABORT return(1)
+#define YYERROR goto yyerrlab1
+/* Like YYERROR except do call yyerror.
+ This remains here temporarily to ease the
+ transition to the new meaning of YYERROR, for GCC.
+ Once GCC version 2 has supplanted version 1, this can go. */
+#define YYFAIL goto yyerrlab
+#define YYRECOVERING() (!!yyerrstatus)
+#define YYBACKUP(token, value) \
+do \
+ if (yychar == YYEMPTY && yylen == 1) \
+ { yychar = (token), yylval = (value); \
+ yychar1 = YYTRANSLATE (yychar); \
+ YYPOPSTACK; \
+ goto yybackup; \
+ } \
+ else \
+ { yyerror ("syntax error: cannot back up"); YYERROR; } \
+while (0)
+
+#define YYTERROR 1
+#define YYERRCODE 256
+
+#ifndef YYPURE
+#define YYLEX yylex()
+#endif
+
+#ifdef YYPURE
+#ifdef YYLSP_NEEDED
+#define YYLEX yylex(&yylval, &yylloc)
+#else
+#define YYLEX yylex(&yylval)
+#endif
+#endif
+
+/* If nonreentrant, generate the variables here */
+
+#ifndef YYPURE
+
+int yychar; /* the lookahead symbol */
+YYSTYPE yylval; /* the semantic value of the */
+ /* lookahead symbol */
+
+#ifdef YYLSP_NEEDED
+YYLTYPE yylloc; /* location data for the lookahead */
+ /* symbol */
+#endif
+
+int yynerrs; /* number of parse errors so far */
+#endif /* not YYPURE */
+
+#if YYDEBUG != 0
+int yydebug; /* nonzero means print parse trace */
+/* Since this is uninitialized, it does not stop multiple parsers
+ from coexisting. */
+#endif
+
+/* YYINITDEPTH indicates the initial size of the parser's stacks */
+
+#ifndef YYINITDEPTH
+#define YYINITDEPTH 200
+#endif
+
+/* YYMAXDEPTH is the maximum size the stacks can grow to
+ (effective only if the built-in stack extension method is used). */
+
+#if YYMAXDEPTH == 0
+#undef YYMAXDEPTH
+#endif
+
+#ifndef YYMAXDEPTH
+#define YYMAXDEPTH 10000
+#endif
+
+/* Prevent warning if -Wstrict-prototypes. */
+#ifdef __GNUC__
+int yyparse (void);
+#endif
+
+#if __GNUC__ > 1 /* GNU C and GNU C++ define this. */
+#define __yy_bcopy(FROM,TO,COUNT) __builtin_memcpy(TO,FROM,COUNT)
+#else /* not GNU C or C++ */
+#ifndef __cplusplus
+
+/* This is the most reliable way to avoid incompatibilities
+ in available built-in functions on various systems. */
+static void
+__yy_bcopy (from, to, count)
+ char *from;
+ char *to;
+ int count;
+{
+ register char *f = from;
+ register char *t = to;
+ register int i = count;
+
+ while (i-- > 0)
+ *t++ = *f++;
+}
+
+#else /* __cplusplus */
+
+/* This is the most reliable way to avoid incompatibilities
+ in available built-in functions on various systems. */
+static void
+__yy_bcopy (char *from, char *to, int count)
+{
+ register char *f = from;
+ register char *t = to;
+ register int i = count;
+
+ while (i-- > 0)
+ *t++ = *f++;
+}
+
+#endif
+#endif
+
+#line 184 "/usr/local/lib/bison.simple"
+
+/* The user can define YYPARSE_PARAM as the name of an argument to be passed
+ into yyparse. The argument should have type void *.
+ It should actually point to an object.
+ Grammar actions can access the variable by casting it
+ to the proper pointer type. */
+
+#ifdef YYPARSE_PARAM
+#define YYPARSE_PARAM_DECL void *YYPARSE_PARAM;
+#else
+#define YYPARSE_PARAM
+#define YYPARSE_PARAM_DECL
+#endif
+
+int
+yyparse(YYPARSE_PARAM)
+ YYPARSE_PARAM_DECL
+{
+ register int yystate;
+ register int yyn;
+ register short *yyssp;
+ register YYSTYPE *yyvsp;
+ int yyerrstatus; /* number of tokens to shift before error messages enabled */
+ int yychar1 = 0; /* lookahead token as an internal (translated) token number */
+
+ short yyssa[YYINITDEPTH]; /* the state stack */
+ YYSTYPE yyvsa[YYINITDEPTH]; /* the semantic value stack */
+
+ short *yyss = yyssa; /* refer to the stacks thru separate pointers */
+ YYSTYPE *yyvs = yyvsa; /* to allow yyoverflow to reallocate them elsewhere */
+
+#ifdef YYLSP_NEEDED
+ YYLTYPE yylsa[YYINITDEPTH]; /* the location stack */
+ YYLTYPE *yyls = yylsa;
+ YYLTYPE *yylsp;
+
+#define YYPOPSTACK (yyvsp--, yyssp--, yylsp--)
+#else
+#define YYPOPSTACK (yyvsp--, yyssp--)
+#endif
+
+ int yystacksize = YYINITDEPTH;
+
+#ifdef YYPURE
+ int yychar;
+ YYSTYPE yylval;
+ int yynerrs;
+#ifdef YYLSP_NEEDED
+ YYLTYPE yylloc;
+#endif
+#endif
+
+ YYSTYPE yyval; /* the variable used to return */
+ /* semantic values from the action */
+ /* routines */
+
+ int yylen;
+
+#if YYDEBUG != 0
+ if (yydebug)
+ fprintf(stderr, "Starting parse\n");
+#endif
+
+ yystate = 0;
+ yyerrstatus = 0;
+ yynerrs = 0;
+ yychar = YYEMPTY; /* Cause a token to be read. */
+
+ /* Initialize stack pointers.
+ Waste one element of value and location stack
+ so that they stay on the same level as the state stack.
+ The wasted elements are never initialized. */
+
+ yyssp = yyss - 1;
+ yyvsp = yyvs;
+#ifdef YYLSP_NEEDED
+ yylsp = yyls;
+#endif
+
+/* Push a new state, which is found in yystate . */
+/* In all cases, when you get here, the value and location stacks
+ have just been pushed. so pushing a state here evens the stacks. */
+yynewstate:
+
+ *++yyssp = yystate;
+
+ if (yyssp >= yyss + yystacksize - 1)
+ {
+ /* Give user a chance to reallocate the stack */
+ /* Use copies of these so that the &'s don't force the real ones into memory. */
+ YYSTYPE *yyvs1 = yyvs;
+ short *yyss1 = yyss;
+#ifdef YYLSP_NEEDED
+ YYLTYPE *yyls1 = yyls;
+#endif
+
+ /* Get the current used size of the three stacks, in elements. */
+ int size = yyssp - yyss + 1;
+
+#ifdef yyoverflow
+ /* Each stack pointer address is followed by the size of
+ the data in use in that stack, in bytes. */
+#ifdef YYLSP_NEEDED
+ /* This used to be a conditional around just the two extra args,
+ but that might be undefined if yyoverflow is a macro. */
+ yyoverflow("parser stack overflow",
+ &yyss1, size * sizeof (*yyssp),
+ &yyvs1, size * sizeof (*yyvsp),
+ &yyls1, size * sizeof (*yylsp),
+ &yystacksize);
+#else
+ yyoverflow("parser stack overflow",
+ &yyss1, size * sizeof (*yyssp),
+ &yyvs1, size * sizeof (*yyvsp),
+ &yystacksize);
+#endif
+
+ yyss = yyss1; yyvs = yyvs1;
+#ifdef YYLSP_NEEDED
+ yyls = yyls1;
+#endif
+#else /* no yyoverflow */
+ /* Extend the stack our own way. */
+ if (yystacksize >= YYMAXDEPTH)
+ {
+ yyerror("parser stack overflow");
+ return 2;
+ }
+ yystacksize *= 2;
+ if (yystacksize > YYMAXDEPTH)
+ yystacksize = YYMAXDEPTH;
+ yyss = (short *) alloca (yystacksize * sizeof (*yyssp));
+ __yy_bcopy ((char *)yyss1, (char *)yyss, size * sizeof (*yyssp));
+ yyvs = (YYSTYPE *) alloca (yystacksize * sizeof (*yyvsp));
+ __yy_bcopy ((char *)yyvs1, (char *)yyvs, size * sizeof (*yyvsp));
+#ifdef YYLSP_NEEDED
+ yyls = (YYLTYPE *) alloca (yystacksize * sizeof (*yylsp));
+ __yy_bcopy ((char *)yyls1, (char *)yyls, size * sizeof (*yylsp));
+#endif
+#endif /* no yyoverflow */
+
+ yyssp = yyss + size - 1;
+ yyvsp = yyvs + size - 1;
+#ifdef YYLSP_NEEDED
+ yylsp = yyls + size - 1;
+#endif
+
+#if YYDEBUG != 0
+ if (yydebug)
+ fprintf(stderr, "Stack size increased to %d\n", yystacksize);
+#endif
+
+ if (yyssp >= yyss + yystacksize - 1)
+ YYABORT;
+ }
+
+#if YYDEBUG != 0
+ if (yydebug)
+ fprintf(stderr, "Entering state %d\n", yystate);
+#endif
+
+ goto yybackup;
+ yybackup:
+
+/* Do appropriate processing given the current state. */
+/* Read a lookahead token if we need one and don't already have one. */
+/* yyresume: */
+
+ /* First try to decide what to do without reference to lookahead token. */
+
+ yyn = yypact[yystate];
+ if (yyn == YYFLAG)
+ goto yydefault;
+
+ /* Not known => get a lookahead token if don't already have one. */
+
+ /* yychar is either YYEMPTY or YYEOF
+ or a valid token in external form. */
+
+ if (yychar == YYEMPTY)
+ {
+#if YYDEBUG != 0
+ if (yydebug)
+ fprintf(stderr, "Reading a token: ");
+#endif
+ yychar = YYLEX;
+ }
+
+ /* Convert token to internal form (in yychar1) for indexing tables with */
+
+ if (yychar <= 0) /* This means end of input. */
+ {
+ yychar1 = 0;
+ yychar = YYEOF; /* Don't call YYLEX any more */
+
+#if YYDEBUG != 0
+ if (yydebug)
+ fprintf(stderr, "Now at end of input.\n");
+#endif
+ }
+ else
+ {
+ yychar1 = YYTRANSLATE(yychar);
+
+#if YYDEBUG != 0
+ if (yydebug)
+ {
+ fprintf (stderr, "Next token is %d (%s", yychar, yytname[yychar1]);
+ /* Give the individual parser a way to print the precise meaning
+ of a token, for further debugging info. */
+#ifdef YYPRINT
+ YYPRINT (stderr, yychar, yylval);
+#endif
+ fprintf (stderr, ")\n");
+ }
+#endif
+ }
+
+ yyn += yychar1;
+ if (yyn < 0 || yyn > YYLAST || yycheck[yyn] != yychar1)
+ goto yydefault;
+
+ yyn = yytable[yyn];
+
+ /* yyn is what to do for this token type in this state.
+ Negative => reduce, -yyn is rule number.
+ Positive => shift, yyn is new state.
+ New state is final state => don't bother to shift,
+ just return success.
+ 0, or most negative number => error. */
+
+ if (yyn < 0)
+ {
+ if (yyn == YYFLAG)
+ goto yyerrlab;
+ yyn = -yyn;
+ goto yyreduce;
+ }
+ else if (yyn == 0)
+ goto yyerrlab;
+
+ if (yyn == YYFINAL)
+ YYACCEPT;
+
+ /* Shift the lookahead token. */
+
+#if YYDEBUG != 0
+ if (yydebug)
+ fprintf(stderr, "Shifting token %d (%s), ", yychar, yytname[yychar1]);
+#endif
+
+ /* Discard the token being shifted unless it is eof. */
+ if (yychar != YYEOF)
+ yychar = YYEMPTY;
+
+ *++yyvsp = yylval;
+#ifdef YYLSP_NEEDED
+ *++yylsp = yylloc;
+#endif
+
+ /* count tokens shifted since error; after three, turn off error status. */
+ if (yyerrstatus) yyerrstatus--;
+
+ yystate = yyn;
+ goto yynewstate;
+
+/* Do the default action for the current state. */
+yydefault:
+
+ yyn = yydefact[yystate];
+ if (yyn == 0)
+ goto yyerrlab;
+
+/* Do a reduction. yyn is the number of a rule to reduce with. */
+yyreduce:
+ yylen = yyr2[yyn];
+ if (yylen > 0)
+ yyval = yyvsp[1-yylen]; /* implement default value of the action */
+
+#if YYDEBUG != 0
+ if (yydebug)
+ {
+ int i;
+
+ fprintf (stderr, "Reducing via rule %d (line %d), ",
+ yyn, yyrline[yyn]);
+
+ /* Print the symbols being reduced, and their result. */
+ for (i = yyprhs[yyn]; yyrhs[i] > 0; i++)
+ fprintf (stderr, "%s ", yytname[yyrhs[i]]);
+ fprintf (stderr, " -> %s\n", yytname[yyr1[yyn]]);
+ }
+#endif
+
+
+ switch (yyn) {
+
+case 2:
+#line 293 "parse.y"
+{
+ /* In case there were missing closebraces,
+ get us back to the global binding level. */
+ while (! global_bindings_p ())
+ poplevel (0, 0, 0);
+ finish_file ();
+ ;
+ break;}
+case 3:
+#line 307 "parse.y"
+{ yyval.ttype = NULL_TREE; ;
+ break;}
+case 4:
+#line 308 "parse.y"
+{ yyval.ttype = NULL_TREE; ;
+ break;}
+case 5:
+#line 310 "parse.y"
+{ yyval.ttype = NULL_TREE; ;
+ break;}
+case 6:
+#line 314 "parse.y"
+{ have_extern_spec = 1;
+ used_extern_spec = 0;
+ yyval.ttype = NULL_TREE; ;
+ break;}
+case 7:
+#line 319 "parse.y"
+{ have_extern_spec = 0; ;
+ break;}
+case 10:
+#line 328 "parse.y"
+{ if (pending_lang_change) do_pending_lang_change(); ;
+ break;}
+case 11:
+#line 330 "parse.y"
+{ if (! global_bindings_p () && ! pseudo_global_level_p())
+ pop_everything (); ;
+ break;}
+case 12:
+#line 336 "parse.y"
+{ if (pending_inlines) do_pending_inlines (); ;
+ break;}
+case 13:
+#line 338 "parse.y"
+{ if (pending_inlines) do_pending_inlines (); ;
+ break;}
+case 14:
+#line 340 "parse.y"
+{ if (pending_inlines) do_pending_inlines (); ;
+ break;}
+case 16:
+#line 343 "parse.y"
+{ if (TREE_CHAIN (yyvsp[-2].ttype)) yyvsp[-2].ttype = combine_strings (yyvsp[-2].ttype);
+ assemble_asm (yyvsp[-2].ttype); ;
+ break;}
+case 17:
+#line 346 "parse.y"
+{ pop_lang_context (); ;
+ break;}
+case 18:
+#line 348 "parse.y"
+{ pop_lang_context (); ;
+ break;}
+case 19:
+#line 350 "parse.y"
+{ if (pending_inlines) do_pending_inlines ();
+ pop_lang_context (); ;
+ break;}
+case 20:
+#line 353 "parse.y"
+{ if (pending_inlines) do_pending_inlines ();
+ pop_lang_context (); ;
+ break;}
+case 21:
+#line 359 "parse.y"
+{ push_lang_context (yyvsp[0].ttype); ;
+ break;}
+case 22:
+#line 364 "parse.y"
+{ begin_template_parm_list (); ;
+ break;}
+case 23:
+#line 366 "parse.y"
+{ yyval.ttype = end_template_parm_list (yyvsp[-1].ttype); ;
+ break;}
+case 24:
+#line 371 "parse.y"
+{ yyval.ttype = process_template_parm (NULL_TREE, yyvsp[0].ttype); ;
+ break;}
+case 25:
+#line 373 "parse.y"
+{ yyval.ttype = process_template_parm (yyvsp[-2].ttype, yyvsp[0].ttype); ;
+ break;}
+case 26:
+#line 385 "parse.y"
+{
+ if (yyvsp[-1].ttype == signature_type_node)
+ sorry ("signature as template type parameter");
+ else if (yyvsp[-1].ttype != class_type_node)
+ error ("template type parameter must use keyword `class'");
+ yyval.ttype = build_tree_list (yyvsp[0].ttype, NULL_TREE);
+ ;
+ break;}
+case 27:
+#line 393 "parse.y"
+{
+ if (yyvsp[-3].ttype == signature_type_node)
+ sorry ("signature as template type parameter");
+ else if (yyvsp[-3].ttype != class_type_node)
+ error ("template type parameter must use keyword `class'");
+ warning ("restricted template type parameters not yet implemented");
+ yyval.ttype = build_tree_list (yyvsp[-2].ttype, yyvsp[0].ttype);
+ ;
+ break;}
+case 29:
+#line 406 "parse.y"
+{ warning ("use of `overload' is an anachronism"); ;
+ break;}
+case 30:
+#line 410 "parse.y"
+{ declare_overloaded (yyvsp[0].ttype); ;
+ break;}
+case 31:
+#line 412 "parse.y"
+{ declare_overloaded (yyvsp[0].ttype); ;
+ break;}
+case 32:
+#line 419 "parse.y"
+{ yychar = '{'; goto template1; ;
+ break;}
+case 34:
+#line 422 "parse.y"
+{ yychar = '{'; goto template1; ;
+ break;}
+case 36:
+#line 425 "parse.y"
+{ yychar = ':'; goto template1; ;
+ break;}
+case 38:
+#line 428 "parse.y"
+{
+ yychar = ':';
+ template1:
+ if (current_aggr == exception_type_node)
+ error ("template type must define an aggregate or union");
+ else if (current_aggr == signature_type_node)
+ sorry ("template type defining a signature");
+ /* Maybe pedantic warning for union?
+ How about an enum? :-) */
+ end_template_decl (yyvsp[-2].ttype, yyvsp[-1].ttype, current_aggr, 1);
+ reinit_parse_for_template (yychar, yyvsp[-2].ttype, yyvsp[-1].ttype);
+ yychar = YYEMPTY;
+ ;
+ break;}
+case 40:
+#line 443 "parse.y"
+{
+ end_template_decl (yyvsp[-2].ttype, yyvsp[-1].ttype, current_aggr, 0);
+ /* declare $2 as template name with $1 parm list */
+ ;
+ break;}
+case 41:
+#line 448 "parse.y"
+{
+ end_template_decl (yyvsp[-2].ttype, yyvsp[-1].ttype, current_aggr, 0);
+ /* declare $2 as template name with $1 parm list */
+ ;
+ break;}
+case 42:
+#line 455 "parse.y"
+{
+ tree d;
+ int momentary;
+ int def = (yyvsp[0].itype != ';');
+ momentary = suspend_momentary ();
+ d = start_decl (yyvsp[-4].ttype, /*current_declspecs*/NULL_TREE, 0,
+ yyvsp[-3].ttype);
+ cplus_decl_attributes (d, yyvsp[-1].ttype);
+ finish_decl (d, NULL_TREE, yyvsp[-2].ttype, 0);
+ end_template_decl (yyvsp[-5].ttype, d, 0, def);
+ if (def)
+ reinit_parse_for_template ((int) yyvsp[0].itype, yyvsp[-5].ttype, d);
+ resume_momentary (momentary);
+ ;
+ break;}
+case 43:
+#line 472 "parse.y"
+{
+ tree d;
+ int momentary;
+ int def = (yyvsp[0].itype != ';');
+
+ current_declspecs = yyvsp[-5].ttype;
+ momentary = suspend_momentary ();
+ d = start_decl (yyvsp[-4].ttype, current_declspecs,
+ 0, yyvsp[-3].ttype);
+ cplus_decl_attributes (d, yyvsp[-1].ttype);
+ finish_decl (d, NULL_TREE, yyvsp[-2].ttype, 0);
+ end_template_decl (yyvsp[-6].ttype, d, 0, def);
+ if (def)
+ {
+ reinit_parse_for_template ((int) yyvsp[0].itype, yyvsp[-6].ttype, d);
+ yychar = YYEMPTY;
+ }
+ note_list_got_semicolon (yyvsp[-5].ttype);
+ resume_momentary (momentary);
+ ;
+ break;}
+case 44:
+#line 493 "parse.y"
+{
+ int def = (yyvsp[0].itype != ';');
+ tree d = start_decl (yyvsp[-1].ttype, yyvsp[-2].ttype, 0, NULL_TREE);
+ finish_decl (d, NULL_TREE, NULL_TREE, 0);
+ end_template_decl (yyvsp[-3].ttype, d, 0, def);
+ if (def)
+ reinit_parse_for_template ((int) yyvsp[0].itype, yyvsp[-3].ttype, d);
+ ;
+ break;}
+case 45:
+#line 502 "parse.y"
+{ end_template_decl (yyvsp[-2].ttype, 0, 0, 0); ;
+ break;}
+case 46:
+#line 503 "parse.y"
+{ end_template_decl (yyvsp[-2].ttype, 0, 0, 0); ;
+ break;}
+case 47:
+#line 506 "parse.y"
+{ yyval.itype = '{'; ;
+ break;}
+case 48:
+#line 507 "parse.y"
+{ yyval.itype = ':'; ;
+ break;}
+case 49:
+#line 508 "parse.y"
+{ yyval.itype = ';'; ;
+ break;}
+case 50:
+#line 509 "parse.y"
+{ yyval.itype = '='; ;
+ break;}
+case 51:
+#line 510 "parse.y"
+{ yyval.itype = RETURN; ;
+ break;}
+case 52:
+#line 515 "parse.y"
+{;
+ break;}
+case 53:
+#line 517 "parse.y"
+{;
+ break;}
+case 54:
+#line 520 "parse.y"
+{ tree d;
+ d = start_decl (yyvsp[-1].ttype, yyval.ttype, 0, NULL_TREE);
+ finish_decl (d, NULL_TREE, NULL_TREE, 0);
+ ;
+ break;}
+case 55:
+#line 525 "parse.y"
+{
+ note_list_got_semicolon (yyval.ttype);
+ ;
+ break;}
+case 56:
+#line 530 "parse.y"
+{ tree d;
+ d = start_decl (yyvsp[-1].ttype, yyval.ttype, 0, NULL_TREE);
+ finish_decl (d, NULL_TREE, NULL_TREE, 0);
+ note_list_got_semicolon (yyval.ttype);
+ ;
+ break;}
+case 57:
+#line 536 "parse.y"
+{ pedwarn ("empty declaration"); ;
+ break;}
+case 59:
+#line 539 "parse.y"
+{
+ tree t = yyval.ttype;
+ shadow_tag (t);
+ if (TREE_CODE (t) == TREE_LIST
+ && TREE_PURPOSE (t) == NULL_TREE)
+ {
+ t = TREE_VALUE (t);
+ if (IS_AGGR_TYPE (t)
+ && IDENTIFIER_TEMPLATE (TYPE_IDENTIFIER (t)))
+ {
+ if (CLASSTYPE_USE_TEMPLATE (t) == 0)
+ SET_CLASSTYPE_TEMPLATE_SPECIALIZATION (t);
+ else if (CLASSTYPE_TEMPLATE_INSTANTIATION (t))
+ error ("override declaration for already-expanded template");
+ }
+ }
+ note_list_got_semicolon (yyval.ttype);
+ ;
+ break;}
+case 63:
+#line 564 "parse.y"
+{
+ finish_function (lineno, 1);
+ /* finish_function performs these three statements:
+
+ expand_end_bindings (getdecls (), 1, 0);
+ poplevel (1, 1, 0);
+
+ expand_end_bindings (0, 0, 0);
+ poplevel (0, 0, 1);
+ */
+ if (yyval.ttype) process_next_inline (yyval.ttype);
+ ;
+ break;}
+case 64:
+#line 577 "parse.y"
+{
+ finish_function (lineno, 1);
+ /* finish_function performs these three statements:
+
+ expand_end_bindings (getdecls (), 1, 0);
+ poplevel (1, 1, 0);
+
+ expand_end_bindings (0, 0, 0);
+ poplevel (0, 0, 1);
+ */
+ if (yyval.ttype) process_next_inline (yyval.ttype);
+ ;
+ break;}
+case 65:
+#line 590 "parse.y"
+{ finish_function (lineno, 0);
+ if (yyval.ttype) process_next_inline (yyval.ttype); ;
+ break;}
+case 66:
+#line 593 "parse.y"
+{ finish_function (lineno, 0);
+ if (yyval.ttype) process_next_inline (yyval.ttype); ;
+ break;}
+case 67:
+#line 596 "parse.y"
+{ finish_function (lineno, 0);
+ if (yyval.ttype) process_next_inline (yyval.ttype); ;
+ break;}
+case 68:
+#line 599 "parse.y"
+{;
+ break;}
+case 69:
+#line 601 "parse.y"
+{;
+ break;}
+case 70:
+#line 603 "parse.y"
+{;
+ break;}
+case 71:
+#line 608 "parse.y"
+{ if (! start_function (yyval.ttype, yyvsp[-1].ttype, yyvsp[0].ttype, 0))
+ YYERROR1;
+ reinit_parse_for_function ();
+ yyval.ttype = NULL_TREE; ;
+ break;}
+case 72:
+#line 613 "parse.y"
+{ if (! start_function (yyval.ttype, yyvsp[-1].ttype, yyvsp[0].ttype, 0))
+ YYERROR1;
+ reinit_parse_for_function ();
+ yyval.ttype = NULL_TREE; ;
+ break;}
+case 73:
+#line 618 "parse.y"
+{ if (! start_function (NULL_TREE, yyval.ttype, yyvsp[0].ttype, 0))
+ YYERROR1;
+ reinit_parse_for_function ();
+ yyval.ttype = NULL_TREE; ;
+ break;}
+case 74:
+#line 623 "parse.y"
+{ start_function (NULL_TREE, TREE_VALUE (yyval.ttype), NULL_TREE, 1);
+ reinit_parse_for_function (); ;
+ break;}
+case 75:
+#line 631 "parse.y"
+{
+ yyval.ttype = build_parse_node (CALL_EXPR, TREE_VALUE (yyvsp[-5].ttype), yyvsp[-3].ttype, yyvsp[-1].ttype);
+ yyval.ttype = start_method (TREE_CHAIN (yyvsp[-5].ttype), yyval.ttype, yyvsp[0].ttype);
+ rest_of_mdef:
+ if (! yyval.ttype)
+ YYERROR1;
+ if (yychar == YYEMPTY)
+ yychar = YYLEX;
+ reinit_parse_for_method (yychar, yyval.ttype); ;
+ break;}
+case 76:
+#line 641 "parse.y"
+{
+ yyval.ttype = build_parse_node (CALL_EXPR, TREE_VALUE (yyvsp[-3].ttype),
+ empty_parms (), yyvsp[-1].ttype);
+ yyval.ttype = start_method (TREE_CHAIN (yyvsp[-3].ttype), yyval.ttype, yyvsp[0].ttype);
+ goto rest_of_mdef;
+ ;
+ break;}
+case 77:
+#line 648 "parse.y"
+{ yyval.ttype = start_method (yyval.ttype, yyvsp[-1].ttype, yyvsp[0].ttype); goto rest_of_mdef; ;
+ break;}
+case 78:
+#line 650 "parse.y"
+{ yyval.ttype = start_method (yyval.ttype, yyvsp[-1].ttype, yyvsp[0].ttype); goto rest_of_mdef; ;
+ break;}
+case 79:
+#line 652 "parse.y"
+{ yyval.ttype = start_method (NULL_TREE, yyval.ttype, yyvsp[0].ttype); goto rest_of_mdef; ;
+ break;}
+case 80:
+#line 656 "parse.y"
+{
+ if (! current_function_parms_stored)
+ store_parm_decls ();
+ yyval.ttype = yyvsp[0].ttype;
+ ;
+ break;}
+case 81:
+#line 664 "parse.y"
+{ store_return_init (yyval.ttype, NULL_TREE); ;
+ break;}
+case 82:
+#line 666 "parse.y"
+{ store_return_init (yyval.ttype, yyvsp[0].ttype); ;
+ break;}
+case 83:
+#line 668 "parse.y"
+{ store_return_init (yyval.ttype, yyvsp[-1].ttype); ;
+ break;}
+case 84:
+#line 670 "parse.y"
+{ store_return_init (yyval.ttype, NULL_TREE); ;
+ break;}
+case 85:
+#line 675 "parse.y"
+{
+ if (yyvsp[0].itype == 0)
+ error ("no base initializers given following ':'");
+ setup_vtbl_ptr ();
+ /* Always keep the BLOCK node associated with the outermost
+ pair of curley braces of a function. These are needed
+ for correct operation of dwarfout.c. */
+ keep_next_level ();
+ ;
+ break;}
+case 86:
+#line 688 "parse.y"
+{
+ if (! current_function_parms_stored)
+ store_parm_decls ();
+
+ /* Flag that we are processing base and member initializers. */
+ current_vtable_decl = error_mark_node;
+
+ if (DECL_CONSTRUCTOR_P (current_function_decl))
+ {
+ /* Make a contour for the initializer list. */
+ pushlevel (0);
+ clear_last_expr ();
+ expand_start_bindings (0);
+ }
+ else if (current_class_type == NULL_TREE)
+ error ("base initializers not allowed for non-member functions");
+ else if (! DECL_CONSTRUCTOR_P (current_function_decl))
+ error ("only constructors take base initializers");
+ ;
+ break;}
+case 87:
+#line 711 "parse.y"
+{ yyval.itype = 0; ;
+ break;}
+case 88:
+#line 713 "parse.y"
+{ yyval.itype = 1; ;
+ break;}
+case 91:
+#line 719 "parse.y"
+{
+ if (current_class_name && !flag_traditional)
+ pedwarn ("anachronistic old style base class initializer");
+ expand_member_init (C_C_D, NULL_TREE, yyvsp[-1].ttype);
+ ;
+ break;}
+case 92:
+#line 725 "parse.y"
+{
+ if (current_class_name && !flag_traditional)
+ pedwarn ("anachronistic old style base class initializer");
+ expand_member_init (C_C_D, NULL_TREE, void_type_node);
+ ;
+ break;}
+case 93:
+#line 731 "parse.y"
+{ expand_member_init (C_C_D, yyval.ttype, yyvsp[-1].ttype); ;
+ break;}
+case 94:
+#line 733 "parse.y"
+{ expand_member_init (C_C_D, yyval.ttype, void_type_node); ;
+ break;}
+case 95:
+#line 735 "parse.y"
+{ expand_member_init (C_C_D, yyval.ttype, yyvsp[-1].ttype); ;
+ break;}
+case 96:
+#line 737 "parse.y"
+{ expand_member_init (C_C_D, yyval.ttype, void_type_node); ;
+ break;}
+case 97:
+#line 740 "parse.y"
+{
+ do_member_init (OP0 (yyvsp[-3].ttype), OP1 (yyvsp[-3].ttype), yyvsp[-1].ttype);
+ ;
+ break;}
+case 98:
+#line 744 "parse.y"
+{
+ do_member_init (OP0 (yyvsp[-1].ttype), OP1 (yyvsp[-1].ttype), void_type_node);
+ ;
+ break;}
+case 107:
+#line 768 "parse.y"
+{ do_type_instantiation (yyvsp[0].ttype ? yyvsp[0].ttype : yyvsp[-1].ttype, NULL_TREE); ;
+ break;}
+case 108:
+#line 770 "parse.y"
+{ do_function_instantiation (yyvsp[-1].ttype, yyvsp[0].ttype, NULL_TREE); ;
+ break;}
+case 109:
+#line 772 "parse.y"
+{ do_type_instantiation (yyvsp[0].ttype ? yyvsp[0].ttype : yyvsp[-1].ttype, yyvsp[-3].ttype); ;
+ break;}
+case 110:
+#line 774 "parse.y"
+{ do_function_instantiation (yyvsp[-1].ttype, yyvsp[0].ttype, yyvsp[-3].ttype); ;
+ break;}
+case 111:
+#line 779 "parse.y"
+{ if (yyvsp[0].ttype) yyval.ttype = yyvsp[0].ttype; ;
+ break;}
+case 112:
+#line 784 "parse.y"
+{ yyval.ttype = lookup_template_class (yyval.ttype, yyvsp[-1].ttype, NULL_TREE); ;
+ break;}
+case 113:
+#line 786 "parse.y"
+{ yyval.ttype = lookup_template_class (yyval.ttype, yyvsp[-1].ttype, NULL_TREE); ;
+ break;}
+case 114:
+#line 791 "parse.y"
+{ yyval.ttype = instantiate_class_template (yyvsp[0].ttype, 1); ;
+ break;}
+case 115:
+#line 796 "parse.y"
+{ yyval.ttype = build_tree_list (NULL_TREE, yyval.ttype); ;
+ break;}
+case 116:
+#line 798 "parse.y"
+{ yyval.ttype = chainon (yyval.ttype, build_tree_list (NULL_TREE, yyvsp[0].ttype)); ;
+ break;}
+case 117:
+#line 803 "parse.y"
+{ yyval.ttype = groktypename (yyval.ttype); ;
+ break;}
+case 119:
+#line 809 "parse.y"
+{
+ tree t, decl, tmpl;
+
+ tmpl = TREE_PURPOSE (IDENTIFIER_TEMPLATE (yyvsp[-1].ttype));
+ t = xref_tag (DECL_TEMPLATE_INFO (tmpl)->aggr, yyvsp[-1].ttype, yyvsp[0].ttype, 0);
+ set_current_level_tags_transparency (1);
+ my_friendly_assert (TREE_CODE (t) == RECORD_TYPE
+ || TREE_CODE (t) == UNION_TYPE, 257);
+ yyval.ttype = t;
+
+ /* Now, put a copy of the decl in global scope, to avoid
+ recursive expansion. */
+ decl = IDENTIFIER_LOCAL_VALUE (yyvsp[-1].ttype);
+ if (!decl)
+ decl = IDENTIFIER_CLASS_VALUE (yyvsp[-1].ttype);
+ /* Now, put a copy of the decl in global scope, to avoid
+ recursive expansion. */
+ if (decl)
+ {
+ /* Need to copy it to clear the chain pointer,
+ and need to get it into permanent storage. */
+ my_friendly_assert (TREE_CODE (decl) == TYPE_DECL, 258);
+ push_obstacks (&permanent_obstack, &permanent_obstack);
+ decl = copy_node (decl);
+ if (DECL_LANG_SPECIFIC (decl))
+ copy_lang_decl (decl);
+ pop_obstacks ();
+ pushdecl_top_level (decl);
+ }
+ /* Kludge; see instantiate_class_template. */
+ TYPE_BEING_DEFINED (t) = 0;
+ ;
+ break;}
+case 120:
+#line 842 "parse.y"
+{
+ tree t = finish_struct (yyvsp[-3].ttype, yyvsp[-1].ttype, 0);
+
+ pop_obstacks ();
+ end_template_instantiation (yyvsp[-5].ttype);
+
+ /* Now go after the methods & class data. */
+ instantiate_member_templates (yyvsp[-5].ttype);
+
+ pop_tinst_level();
+
+ CLASSTYPE_GOT_SEMICOLON (t) = 1;
+ ;
+ break;}
+case 121:
+#line 859 "parse.y"
+{ yyval.ttype = NULL_TREE; ;
+ break;}
+case 122:
+#line 861 "parse.y"
+{ yyval.ttype = yyvsp[0].ttype; ;
+ break;}
+case 123:
+#line 866 "parse.y"
+{ yyval.ttype = NULL_TREE; /* never used from here... */;
+ break;}
+case 124:
+#line 868 "parse.y"
+{ yyval.ttype = yyvsp[-1].ttype; /*???*/ ;
+ break;}
+case 125:
+#line 872 "parse.y"
+{ yyval.code = NEGATE_EXPR; ;
+ break;}
+case 126:
+#line 874 "parse.y"
+{ yyval.code = CONVERT_EXPR; ;
+ break;}
+case 127:
+#line 876 "parse.y"
+{ yyval.code = PREINCREMENT_EXPR; ;
+ break;}
+case 128:
+#line 878 "parse.y"
+{ yyval.code = PREDECREMENT_EXPR; ;
+ break;}
+case 129:
+#line 880 "parse.y"
+{ yyval.code = TRUTH_NOT_EXPR; ;
+ break;}
+case 130:
+#line 884 "parse.y"
+{ yyval.ttype = build_x_compound_expr (yyval.ttype); ;
+ break;}
+case 132:
+#line 890 "parse.y"
+{ error ("ANSI C++ forbids an empty condition for `%s'",
+ cond_stmt_keyword);
+ yyval.ttype = integer_zero_node; ;
+ break;}
+case 133:
+#line 894 "parse.y"
+{ yyval.ttype = yyvsp[-1].ttype; ;
+ break;}
+case 134:
+#line 899 "parse.y"
+{ error ("ANSI C++ forbids an empty condition for `%s'",
+ cond_stmt_keyword);
+ yyval.ttype = integer_zero_node; ;
+ break;}
+case 135:
+#line 903 "parse.y"
+{ yyval.ttype = yyvsp[-1].ttype; ;
+ break;}
+case 136:
+#line 908 "parse.y"
+{ yyval.ttype = NULL_TREE; ;
+ break;}
+case 138:
+#line 911 "parse.y"
+{ yyval.ttype = NULL_TREE; ;
+ break;}
+case 139:
+#line 916 "parse.y"
+{ {
+ tree d;
+ for (d = getdecls (); d; d = TREE_CHAIN (d))
+ if (TREE_CODE (d) == TYPE_DECL) {
+ tree s = TREE_TYPE (d);
+ if (TREE_CODE (s) == RECORD_TYPE)
+ cp_error ("definition of class `%T' in condition", s);
+ else if (TREE_CODE (s) == ENUMERAL_TYPE)
+ cp_error ("definition of enum `%T' in condition", s);
+ }
+ }
+ current_declspecs = yyvsp[-5].ttype;
+ yyvsp[0].itype = suspend_momentary ();
+ yyval.ttype = start_decl (yyvsp[-4].ttype, current_declspecs, 1, yyvsp[-3].ttype);
+ cplus_decl_attributes (yyval.ttype, yyvsp[-1].ttype);
+ ;
+ break;}
+case 140:
+#line 933 "parse.y"
+{
+ finish_decl (yyvsp[-1].ttype, yyvsp[0].ttype, yyvsp[-3].ttype, 0);
+ resume_momentary (yyvsp[-2].itype);
+ yyval.ttype = yyvsp[-1].ttype;
+ if (TREE_CODE (TREE_TYPE (yyval.ttype)) == ARRAY_TYPE)
+ cp_error ("definition of array `%#D' in condition", yyval.ttype);
+ ;
+ break;}
+case 142:
+#line 945 "parse.y"
+{ finish_stmt (); ;
+ break;}
+case 143:
+#line 947 "parse.y"
+{ finish_stmt (); ;
+ break;}
+case 144:
+#line 949 "parse.y"
+{ finish_stmt (); ;
+ break;}
+case 146:
+#line 956 "parse.y"
+{ yyval.ttype = tree_cons (NULL_TREE, yyval.ttype,
+ build_tree_list (NULL_TREE, yyvsp[0].ttype)); ;
+ break;}
+case 147:
+#line 959 "parse.y"
+{ yyval.ttype = tree_cons (NULL_TREE, yyval.ttype,
+ build_tree_list (NULL_TREE, error_mark_node)); ;
+ break;}
+case 148:
+#line 962 "parse.y"
+{ chainon (yyval.ttype, build_tree_list (NULL_TREE, yyvsp[0].ttype)); ;
+ break;}
+case 149:
+#line 964 "parse.y"
+{ chainon (yyval.ttype, build_tree_list (NULL_TREE, error_mark_node)); ;
+ break;}
+case 150:
+#line 969 "parse.y"
+{ yyval.ttype = build_tree_list (NULL_TREE, yyval.ttype); ;
+ break;}
+case 152:
+#line 975 "parse.y"
+{
+#if 0
+ if (TREE_CODE (yyval.ttype) == TYPE_EXPR)
+ yyval.ttype = build_component_type_expr (C_C_D, yyval.ttype, NULL_TREE, 1);
+#endif
+ ;
+ break;}
+case 153:
+#line 983 "parse.y"
+{ yyvsp[0].itype = pedantic;
+ pedantic = 0; ;
+ break;}
+case 154:
+#line 986 "parse.y"
+{ yyval.ttype = yyvsp[0].ttype;
+ pedantic = yyvsp[-2].itype; ;
+ break;}
+case 155:
+#line 989 "parse.y"
+{ yyval.ttype = build_x_indirect_ref (yyvsp[0].ttype, "unary *"); ;
+ break;}
+case 156:
+#line 991 "parse.y"
+{ yyval.ttype = build_x_unary_op (ADDR_EXPR, yyvsp[0].ttype); ;
+ break;}
+case 157:
+#line 993 "parse.y"
+{ yyval.ttype = build_x_unary_op (BIT_NOT_EXPR, yyvsp[0].ttype); ;
+ break;}
+case 158:
+#line 995 "parse.y"
+{ yyval.ttype = build_x_unary_op (yyvsp[-1].code, yyvsp[0].ttype);
+ if (yyvsp[-1].code == NEGATE_EXPR && TREE_CODE (yyvsp[0].ttype) == INTEGER_CST)
+ TREE_NEGATED_INT (yyval.ttype) = 1;
+ overflow_warning (yyval.ttype);
+ ;
+ break;}
+case 159:
+#line 1002 "parse.y"
+{ tree label = lookup_label (yyvsp[0].ttype);
+ if (label == NULL_TREE)
+ yyval.ttype = null_pointer_node;
+ else
+ {
+ TREE_USED (label) = 1;
+ yyval.ttype = build1 (ADDR_EXPR, ptr_type_node, label);
+ TREE_CONSTANT (yyval.ttype) = 1;
+ }
+ ;
+ break;}
+case 160:
+#line 1013 "parse.y"
+{ if (TREE_CODE (yyvsp[0].ttype) == COMPONENT_REF
+ && DECL_BIT_FIELD (TREE_OPERAND (yyvsp[0].ttype, 1)))
+ error ("sizeof applied to a bit-field");
+ /* ANSI says arrays and functions are converted inside comma.
+ But we can't really convert them in build_compound_expr
+ because that would break commas in lvalues.
+ So do the conversion here if operand was a comma. */
+ if (TREE_CODE (yyvsp[0].ttype) == COMPOUND_EXPR
+ && (TREE_CODE (TREE_TYPE (yyvsp[0].ttype)) == ARRAY_TYPE
+ || TREE_CODE (TREE_TYPE (yyvsp[0].ttype)) == FUNCTION_TYPE))
+ yyvsp[0].ttype = default_conversion (yyvsp[0].ttype);
+ else if (TREE_CODE (yyvsp[0].ttype) == TREE_LIST)
+ {
+ tree t = TREE_VALUE (yyvsp[0].ttype);
+ if (t != NULL_TREE
+ && TREE_CODE (TREE_TYPE (t)) == FUNCTION_TYPE)
+ pedwarn ("ANSI C++ forbids using sizeof() on a function");
+ }
+ yyval.ttype = c_sizeof (TREE_TYPE (yyvsp[0].ttype)); ;
+ break;}
+case 161:
+#line 1033 "parse.y"
+{ yyval.ttype = c_sizeof (groktypename (yyvsp[-1].ttype)); ;
+ break;}
+case 162:
+#line 1035 "parse.y"
+{ yyval.ttype = grok_alignof (yyvsp[0].ttype); ;
+ break;}
+case 163:
+#line 1037 "parse.y"
+{ yyval.ttype = c_alignof (groktypename (yyvsp[-1].ttype)); ;
+ break;}
+case 164:
+#line 1042 "parse.y"
+{ yyval.ttype = build_new (NULL_TREE, yyvsp[0].ttype, NULL_TREE, yyvsp[-1].itype); ;
+ break;}
+case 165:
+#line 1044 "parse.y"
+{ yyval.ttype = build_new (NULL_TREE, yyvsp[-1].ttype, yyvsp[0].ttype, yyvsp[-2].itype); ;
+ break;}
+case 166:
+#line 1046 "parse.y"
+{ yyval.ttype = build_new (yyvsp[-1].ttype, yyvsp[0].ttype, NULL_TREE, yyvsp[-2].itype); ;
+ break;}
+case 167:
+#line 1048 "parse.y"
+{ yyval.ttype = build_new (yyvsp[-2].ttype, yyvsp[-1].ttype, yyvsp[0].ttype, yyvsp[-3].itype); ;
+ break;}
+case 168:
+#line 1050 "parse.y"
+{ yyval.ttype = build_new (NULL_TREE, groktypename(yyvsp[-1].ttype),
+ NULL_TREE, yyvsp[-3].itype); ;
+ break;}
+case 169:
+#line 1053 "parse.y"
+{ yyval.ttype = build_new (NULL_TREE, groktypename(yyvsp[-2].ttype), yyvsp[0].ttype, yyvsp[-4].itype); ;
+ break;}
+case 170:
+#line 1055 "parse.y"
+{ yyval.ttype = build_new (yyvsp[-3].ttype, groktypename(yyvsp[-1].ttype), NULL_TREE, yyvsp[-4].itype); ;
+ break;}
+case 171:
+#line 1057 "parse.y"
+{ yyval.ttype = build_new (yyvsp[-4].ttype, groktypename(yyvsp[-2].ttype), yyvsp[0].ttype, yyvsp[-5].itype); ;
+ break;}
+case 172:
+#line 1060 "parse.y"
+{ yyval.ttype = delete_sanity (yyvsp[0].ttype, NULL_TREE, 0, yyvsp[-1].itype); ;
+ break;}
+case 173:
+#line 1062 "parse.y"
+{ yyval.ttype = delete_sanity (yyvsp[0].ttype, NULL_TREE, 1, yyvsp[-3].itype);
+ if (yychar == YYEMPTY)
+ yychar = YYLEX; ;
+ break;}
+case 174:
+#line 1066 "parse.y"
+{ yyval.ttype = delete_sanity (yyvsp[0].ttype, yyvsp[-2].ttype, 2, yyvsp[-4].itype);
+ if (yychar == YYEMPTY)
+ yychar = YYLEX; ;
+ break;}
+case 175:
+#line 1073 "parse.y"
+{ yyval.ttype = yyvsp[-1].ttype; ;
+ break;}
+case 176:
+#line 1075 "parse.y"
+{
+ yyval.ttype = yyvsp[-1].ttype;
+ pedwarn ("old style placement syntax, use () instead");
+ ;
+ break;}
+case 177:
+#line 1083 "parse.y"
+{ yyval.ttype = yyvsp[-1].ttype; ;
+ break;}
+case 178:
+#line 1085 "parse.y"
+{ yyval.ttype = NULL_TREE; ;
+ break;}
+case 179:
+#line 1087 "parse.y"
+{
+ cp_error ("`%T' is not a valid expression", yyvsp[-1].ttype);
+ yyval.ttype = error_mark_node;
+ ;
+ break;}
+case 180:
+#line 1095 "parse.y"
+{
+ if (flag_ansi)
+ pedwarn ("ANSI C++ forbids initialization of new expression with `='");
+ yyval.ttype = yyvsp[0].ttype;
+ ;
+ break;}
+case 181:
+#line 1105 "parse.y"
+{ yyvsp[-1].ttype = tree_cons (NULL_TREE, yyvsp[-1].ttype, void_list_node);
+ TREE_PARMLIST (yyvsp[-1].ttype) = 1;
+ yyval.ttype = build_parse_node (CALL_EXPR, NULL_TREE, yyvsp[-1].ttype,
+ NULL_TREE); ;
+ break;}
+case 182:
+#line 1110 "parse.y"
+{ yyvsp[-1].ttype = tree_cons (NULL_TREE, yyvsp[-1].ttype, void_list_node);
+ TREE_PARMLIST (yyvsp[-1].ttype) = 1;
+ yyval.ttype = build_parse_node (CALL_EXPR, yyval.ttype, yyvsp[-1].ttype, NULL_TREE); ;
+ break;}
+case 184:
+#line 1118 "parse.y"
+{ yyval.ttype = reparse_absdcl_as_casts (yyval.ttype, yyvsp[0].ttype); ;
+ break;}
+case 185:
+#line 1120 "parse.y"
+{
+ tree init = build_nt (CONSTRUCTOR, NULL_TREE,
+ nreverse (yyvsp[-2].ttype));
+ if (flag_ansi)
+ pedwarn ("ANSI C++ forbids constructor-expressions");
+ /* Indicate that this was a GNU C constructor expression. */
+ TREE_HAS_CONSTRUCTOR (init) = 1;
+
+ yyval.ttype = reparse_absdcl_as_casts (yyval.ttype, init);
+ ;
+ break;}
+case 187:
+#line 1135 "parse.y"
+{ yyval.ttype = build_headof (yyvsp[-1].ttype); ;
+ break;}
+case 188:
+#line 1137 "parse.y"
+{ yyval.ttype = build_classof (yyvsp[-1].ttype); ;
+ break;}
+case 189:
+#line 1139 "parse.y"
+{ if (is_aggr_typedef (yyvsp[-1].ttype, 1))
+ {
+ tree type = IDENTIFIER_TYPE_VALUE (yyvsp[-1].ttype);
+ if (! IS_SIGNATURE(type))
+ yyval.ttype = CLASSTYPE_DOSSIER (type);
+ else
+ {
+ sorry ("signature name as argument of `classof'");
+ yyval.ttype = error_mark_node;
+ }
+ }
+ else
+ yyval.ttype = error_mark_node;
+ ;
+ break;}
+case 191:
+#line 1159 "parse.y"
+{ yyval.ttype = build_x_binary_op (MEMBER_REF, yyval.ttype, yyvsp[0].ttype); ;
+ break;}
+case 192:
+#line 1161 "parse.y"
+{ yyval.ttype = build_m_component_ref (yyval.ttype, yyvsp[0].ttype); ;
+ break;}
+case 193:
+#line 1163 "parse.y"
+{ yyval.ttype = build_x_binary_op (yyvsp[-1].code, yyval.ttype, yyvsp[0].ttype); ;
+ break;}
+case 194:
+#line 1165 "parse.y"
+{ yyval.ttype = build_x_binary_op (yyvsp[-1].code, yyval.ttype, yyvsp[0].ttype); ;
+ break;}
+case 195:
+#line 1167 "parse.y"
+{ yyval.ttype = build_x_binary_op (yyvsp[-1].code, yyval.ttype, yyvsp[0].ttype); ;
+ break;}
+case 196:
+#line 1169 "parse.y"
+{ yyval.ttype = build_x_binary_op (yyvsp[-1].code, yyval.ttype, yyvsp[0].ttype); ;
+ break;}
+case 197:
+#line 1171 "parse.y"
+{ yyval.ttype = build_x_binary_op (yyvsp[-1].code, yyval.ttype, yyvsp[0].ttype); ;
+ break;}
+case 198:
+#line 1173 "parse.y"
+{ yyval.ttype = build_x_binary_op (yyvsp[-1].code, yyval.ttype, yyvsp[0].ttype); ;
+ break;}
+case 199:
+#line 1175 "parse.y"
+{ yyval.ttype = build_x_binary_op (yyvsp[-1].code, yyval.ttype, yyvsp[0].ttype); ;
+ break;}
+case 200:
+#line 1177 "parse.y"
+{ yyval.ttype = build_x_binary_op (yyvsp[-1].code, yyval.ttype, yyvsp[0].ttype); ;
+ break;}
+case 201:
+#line 1179 "parse.y"
+{ yyval.ttype = build_x_binary_op (LT_EXPR, yyval.ttype, yyvsp[0].ttype); ;
+ break;}
+case 202:
+#line 1181 "parse.y"
+{ yyval.ttype = build_x_binary_op (GT_EXPR, yyval.ttype, yyvsp[0].ttype); ;
+ break;}
+case 203:
+#line 1183 "parse.y"
+{ yyval.ttype = build_x_binary_op (yyvsp[-1].code, yyval.ttype, yyvsp[0].ttype); ;
+ break;}
+case 204:
+#line 1185 "parse.y"
+{ yyval.ttype = build_x_binary_op (yyvsp[-1].code, yyval.ttype, yyvsp[0].ttype); ;
+ break;}
+case 205:
+#line 1187 "parse.y"
+{ yyval.ttype = build_x_binary_op (yyvsp[-1].code, yyval.ttype, yyvsp[0].ttype); ;
+ break;}
+case 206:
+#line 1189 "parse.y"
+{ yyval.ttype = build_x_binary_op (yyvsp[-1].code, yyval.ttype, yyvsp[0].ttype); ;
+ break;}
+case 207:
+#line 1191 "parse.y"
+{ yyval.ttype = build_x_binary_op (yyvsp[-1].code, yyval.ttype, yyvsp[0].ttype); ;
+ break;}
+case 208:
+#line 1193 "parse.y"
+{ yyval.ttype = build_x_binary_op (TRUTH_ANDIF_EXPR, yyval.ttype, yyvsp[0].ttype); ;
+ break;}
+case 209:
+#line 1195 "parse.y"
+{ yyval.ttype = build_x_binary_op (TRUTH_ORIF_EXPR, yyval.ttype, yyvsp[0].ttype); ;
+ break;}
+case 210:
+#line 1197 "parse.y"
+{ yyval.ttype = build_x_conditional_expr (yyval.ttype, yyvsp[-2].ttype, yyvsp[0].ttype); ;
+ break;}
+case 211:
+#line 1199 "parse.y"
+{ yyval.ttype = build_modify_expr (yyval.ttype, NOP_EXPR, yyvsp[0].ttype); ;
+ break;}
+case 212:
+#line 1201 "parse.y"
+{ register tree rval;
+ if ((rval = build_opfncall (MODIFY_EXPR, LOOKUP_NORMAL, yyval.ttype, yyvsp[0].ttype,
+ make_node (yyvsp[-1].code))))
+ yyval.ttype = rval;
+ else
+ yyval.ttype = build_modify_expr (yyval.ttype, yyvsp[-1].code, yyvsp[0].ttype); ;
+ break;}
+case 213:
+#line 1208 "parse.y"
+{ yyval.ttype = build_throw (NULL_TREE); ;
+ break;}
+case 214:
+#line 1210 "parse.y"
+{ yyval.ttype = build_throw (yyvsp[0].ttype); ;
+ break;}
+case 215:
+#line 1228 "parse.y"
+{ yyval.ttype = build_parse_node (BIT_NOT_EXPR, yyvsp[0].ttype); ;
+ break;}
+case 223:
+#line 1243 "parse.y"
+{ yyval.ttype = build_parse_node (INDIRECT_REF, yyvsp[0].ttype); ;
+ break;}
+case 224:
+#line 1245 "parse.y"
+{ yyval.ttype = build_parse_node (ADDR_EXPR, yyvsp[0].ttype); ;
+ break;}
+case 227:
+#line 1252 "parse.y"
+{ push_nested_class (TREE_TYPE (OP0 (yyval.ttype)), 3);
+ TREE_COMPLEXITY (yyval.ttype) = current_class_depth; ;
+ break;}
+case 228:
+#line 1258 "parse.y"
+{
+ if (TREE_CODE (yyval.ttype) == BIT_NOT_EXPR)
+ yyval.ttype = build_x_unary_op (BIT_NOT_EXPR, TREE_OPERAND (yyval.ttype, 0));
+ else if (IDENTIFIER_OPNAME_P (yyval.ttype))
+ {
+ tree op = yyval.ttype;
+ yyval.ttype = lookup_name (op, 0);
+ if (yyval.ttype == NULL_TREE)
+ {
+ if (op != ansi_opname[ERROR_MARK])
+ error ("operator %s not defined",
+ operator_name_string (op));
+ yyval.ttype = error_mark_node;
+ }
+ }
+ else
+ yyval.ttype = do_identifier (yyval.ttype);
+ ;
+ break;}
+case 231:
+#line 1279 "parse.y"
+{ yyval.ttype = combine_strings (yyval.ttype); ;
+ break;}
+case 232:
+#line 1281 "parse.y"
+{ yyval.ttype = yyvsp[-1].ttype; ;
+ break;}
+case 233:
+#line 1283 "parse.y"
+{ yyval.ttype = error_mark_node; ;
+ break;}
+case 234:
+#line 1285 "parse.y"
+{ if (current_function_decl == 0)
+ {
+ error ("braced-group within expression allowed only inside a function");
+ YYERROR;
+ }
+ keep_next_level ();
+ yyval.ttype = expand_start_stmt_expr (); ;
+ break;}
+case 235:
+#line 1293 "parse.y"
+{ tree rtl_exp;
+ if (flag_ansi)
+ pedwarn ("ANSI C++ forbids braced-groups within expressions");
+ rtl_exp = expand_end_stmt_expr (yyvsp[-2].ttype);
+ /* The statements have side effects, so the group does. */
+ TREE_SIDE_EFFECTS (rtl_exp) = 1;
+
+ if (TREE_CODE (yyvsp[-1].ttype) == BLOCK)
+ {
+ /* Make a BIND_EXPR for the BLOCK already made. */
+ yyval.ttype = build (BIND_EXPR, TREE_TYPE (rtl_exp),
+ NULL_TREE, rtl_exp, yyvsp[-1].ttype);
+ /* Remove the block from the tree at this point.
+ It gets put back at the proper place
+ when the BIND_EXPR is expanded. */
+ delete_block (yyvsp[-1].ttype);
+ }
+ else
+ yyval.ttype = yyvsp[-1].ttype;
+ ;
+ break;}
+case 236:
+#line 1314 "parse.y"
+{ /* [eichin:19911016.1902EST] */
+ yyval.ttype = build_x_function_call (yyvsp[-3].ttype, yyvsp[-1].ttype, current_class_decl);
+ /* here we instantiate_class_template as needed... */
+ do_pending_templates ();
+ ;
+ break;}
+case 237:
+#line 1318 "parse.y"
+{
+ if (TREE_CODE (yyvsp[-1].ttype) == CALL_EXPR
+ && TREE_TYPE (yyvsp[-1].ttype) != void_type_node)
+ yyval.ttype = require_complete_type (yyvsp[-1].ttype);
+ else
+ yyval.ttype = yyvsp[-1].ttype;
+ ;
+ break;}
+case 238:
+#line 1326 "parse.y"
+{
+ yyval.ttype = build_x_function_call (yyval.ttype, NULL_TREE, current_class_decl);
+ if (TREE_CODE (yyval.ttype) == CALL_EXPR
+ && TREE_TYPE (yyval.ttype) != void_type_node)
+ yyval.ttype = require_complete_type (yyval.ttype);
+ ;
+ break;}
+case 239:
+#line 1333 "parse.y"
+{ yyval.ttype = grok_array_decl (yyval.ttype, yyvsp[-1].ttype); ;
+ break;}
+case 240:
+#line 1335 "parse.y"
+{ /* If we get an OFFSET_REF, turn it into what it really
+ means (e.g., a COMPONENT_REF). This way if we've got,
+ say, a reference to a static member that's being operated
+ on, we don't end up trying to find a member operator for
+ the class it's in. */
+ if (TREE_CODE (yyval.ttype) == OFFSET_REF)
+ yyval.ttype = resolve_offset_ref (yyval.ttype);
+ yyval.ttype = build_x_unary_op (POSTINCREMENT_EXPR, yyval.ttype); ;
+ break;}
+case 241:
+#line 1344 "parse.y"
+{ if (TREE_CODE (yyval.ttype) == OFFSET_REF)
+ yyval.ttype = resolve_offset_ref (yyval.ttype);
+ yyval.ttype = build_x_unary_op (POSTDECREMENT_EXPR, yyval.ttype); ;
+ break;}
+case 242:
+#line 1349 "parse.y"
+{ if (current_class_decl)
+ {
+#ifdef WARNING_ABOUT_CCD
+ TREE_USED (current_class_decl) = 1;
+#endif
+ yyval.ttype = current_class_decl;
+ }
+ else if (current_function_decl
+ && DECL_STATIC_FUNCTION_P (current_function_decl))
+ {
+ error ("`this' is unavailable for static member functions");
+ yyval.ttype = error_mark_node;
+ }
+ else
+ {
+ if (current_function_decl)
+ error ("invalid use of `this' in non-member function");
+ else
+ error ("invalid use of `this' at top level");
+ yyval.ttype = error_mark_node;
+ }
+ ;
+ break;}
+case 243:
+#line 1372 "parse.y"
+{
+ tree type;
+ tree id = yyval.ttype;
+
+ /* This is a C cast in C++'s `functional' notation. */
+ if (yyvsp[-1].ttype == error_mark_node)
+ {
+ yyval.ttype = error_mark_node;
+ break;
+ }
+#if 0
+ if (yyvsp[-1].ttype == NULL_TREE)
+ {
+ error ("cannot cast null list to type `%s'",
+ IDENTIFIER_POINTER (TYPE_NAME (id)));
+ yyval.ttype = error_mark_node;
+ break;
+ }
+#endif
+#if 0
+ /* type is not set! (mrs) */
+ if (type == error_mark_node)
+ yyval.ttype = error_mark_node;
+ else
+#endif
+ {
+ if (id == ridpointers[(int) RID_CONST])
+ type = build_type_variant (integer_type_node, 1, 0);
+ else if (id == ridpointers[(int) RID_VOLATILE])
+ type = build_type_variant (integer_type_node, 0, 1);
+#if 0
+ /* should not be able to get here (mrs) */
+ else if (id == ridpointers[(int) RID_FRIEND])
+ {
+ error ("cannot cast expression to `friend' type");
+ yyval.ttype = error_mark_node;
+ break;
+ }
+#endif
+ else my_friendly_abort (79);
+ yyval.ttype = build_c_cast (type, build_compound_expr (yyvsp[-1].ttype));
+ }
+ ;
+ break;}
+case 245:
+#line 1417 "parse.y"
+{ tree type = groktypename (yyvsp[-4].ttype);
+ yyval.ttype = build_dynamic_cast (type, yyvsp[-1].ttype); ;
+ break;}
+case 246:
+#line 1420 "parse.y"
+{ tree type = groktypename (yyvsp[-4].ttype);
+ yyval.ttype = build_static_cast (type, yyvsp[-1].ttype); ;
+ break;}
+case 247:
+#line 1423 "parse.y"
+{ tree type = groktypename (yyvsp[-4].ttype);
+ yyval.ttype = build_reinterpret_cast (type, yyvsp[-1].ttype); ;
+ break;}
+case 248:
+#line 1426 "parse.y"
+{ tree type = groktypename (yyvsp[-4].ttype);
+ yyval.ttype = build_const_cast (type, yyvsp[-1].ttype); ;
+ break;}
+case 249:
+#line 1429 "parse.y"
+{ yyval.ttype = build_typeid (yyvsp[-1].ttype); ;
+ break;}
+case 250:
+#line 1431 "parse.y"
+{ tree type = groktypename (yyvsp[-1].ttype);
+ yyval.ttype = get_typeid (type); ;
+ break;}
+case 251:
+#line 1434 "parse.y"
+{
+ do_scoped_id:
+ yyval.ttype = IDENTIFIER_GLOBAL_VALUE (yyvsp[0].ttype);
+ if (yychar == YYEMPTY)
+ yychar = YYLEX;
+ if (! yyval.ttype)
+ {
+ if (yychar == '(' || yychar == LEFT_RIGHT)
+ yyval.ttype = implicitly_declare (yyvsp[0].ttype);
+ else
+ {
+ if (IDENTIFIER_GLOBAL_VALUE (yyvsp[0].ttype) != error_mark_node)
+ error ("undeclared variable `%s' (first use here)",
+ IDENTIFIER_POINTER (yyvsp[0].ttype));
+ yyval.ttype = error_mark_node;
+ /* Prevent repeated error messages. */
+ IDENTIFIER_GLOBAL_VALUE (yyvsp[0].ttype) = error_mark_node;
+ }
+ }
+ else
+ {
+ if (TREE_CODE (yyval.ttype) == ADDR_EXPR)
+ assemble_external (TREE_OPERAND (yyval.ttype, 0));
+ else
+ assemble_external (yyval.ttype);
+ TREE_USED (yyval.ttype) = 1;
+ }
+ if (TREE_CODE (yyval.ttype) == CONST_DECL)
+ {
+ /* XXX CHS - should we set TREE_USED of the constant? */
+ yyval.ttype = DECL_INITIAL (yyval.ttype);
+ /* This is to prevent an enum whose value is 0
+ from being considered a null pointer constant. */
+ yyval.ttype = build1 (NOP_EXPR, TREE_TYPE (yyval.ttype), yyval.ttype);
+ TREE_CONSTANT (yyval.ttype) = 1;
+ }
+
+ ;
+ break;}
+case 252:
+#line 1473 "parse.y"
+{
+ got_scope = NULL_TREE;
+ if (TREE_CODE (yyvsp[0].ttype) == IDENTIFIER_NODE)
+ goto do_scoped_id;
+ yyval.ttype = yyvsp[0].ttype;
+ ;
+ break;}
+case 253:
+#line 1480 "parse.y"
+{ yyval.ttype = build_offset_ref (OP0 (yyval.ttype), OP1 (yyval.ttype)); ;
+ break;}
+case 254:
+#line 1482 "parse.y"
+{ yyval.ttype = build_member_call (OP0 (yyval.ttype), OP1 (yyval.ttype), yyvsp[-1].ttype); ;
+ break;}
+case 255:
+#line 1484 "parse.y"
+{ yyval.ttype = build_member_call (OP0 (yyval.ttype), OP1 (yyval.ttype), NULL_TREE); ;
+ break;}
+case 256:
+#line 1486 "parse.y"
+{ yyval.ttype = build_component_ref (yyval.ttype, yyvsp[0].ttype, NULL_TREE, 1); ;
+ break;}
+case 257:
+#line 1488 "parse.y"
+{ yyval.ttype = build_object_ref (yyval.ttype, OP0 (yyvsp[0].ttype), OP1 (yyvsp[0].ttype)); ;
+ break;}
+case 258:
+#line 1490 "parse.y"
+{
+#if 0
+ /* This is a future direction of this code, but because
+ build_x_function_call cannot always undo what is done
+ in build_component_ref entirely yet, we cannot do this. */
+ yyval.ttype = build_x_function_call (build_component_ref (yyval.ttype, yyvsp[-3].ttype, NULL_TREE, 1), yyvsp[-1].ttype, yyval.ttype);
+ if (TREE_CODE (yyval.ttype) == CALL_EXPR
+ && TREE_TYPE (yyval.ttype) != void_type_node)
+ yyval.ttype = require_complete_type (yyval.ttype);
+#else
+ yyval.ttype = build_method_call (yyval.ttype, yyvsp[-3].ttype, yyvsp[-1].ttype, NULL_TREE,
+ (LOOKUP_NORMAL|LOOKUP_AGGR));
+#endif
+ ;
+ break;}
+case 259:
+#line 1505 "parse.y"
+{
+#if 0
+ /* This is a future direction of this code, but because
+ build_x_function_call cannot always undo what is done
+ in build_component_ref entirely yet, we cannot do this. */
+ yyval.ttype = build_x_function_call (build_component_ref (yyval.ttype, yyvsp[-1].ttype, NULL_TREE, 1), NULL_TREE, yyval.ttype);
+ if (TREE_CODE (yyval.ttype) == CALL_EXPR
+ && TREE_TYPE (yyval.ttype) != void_type_node)
+ yyval.ttype = require_complete_type (yyval.ttype);
+#else
+ yyval.ttype = build_method_call (yyval.ttype, yyvsp[-1].ttype, NULL_TREE, NULL_TREE,
+ (LOOKUP_NORMAL|LOOKUP_AGGR));
+#endif
+ ;
+ break;}
+case 260:
+#line 1520 "parse.y"
+{
+ if (IS_SIGNATURE (IDENTIFIER_TYPE_VALUE (OP0 (yyvsp[-3].ttype))))
+ {
+ warning ("signature name in scope resolution ignored");
+ yyval.ttype = build_method_call (yyval.ttype, OP1 (yyvsp[-3].ttype), yyvsp[-1].ttype, NULL_TREE,
+ (LOOKUP_NORMAL|LOOKUP_AGGR));
+ }
+ else
+ yyval.ttype = build_scoped_method_call (yyval.ttype, OP0 (yyvsp[-3].ttype), OP1 (yyvsp[-3].ttype), yyvsp[-1].ttype);
+ ;
+ break;}
+case 261:
+#line 1531 "parse.y"
+{
+ if (IS_SIGNATURE (IDENTIFIER_TYPE_VALUE (OP0 (yyvsp[-1].ttype))))
+ {
+ warning ("signature name in scope resolution ignored");
+ yyval.ttype = build_method_call (yyval.ttype, OP1 (yyvsp[-1].ttype), NULL_TREE, NULL_TREE,
+ (LOOKUP_NORMAL|LOOKUP_AGGR));
+ }
+ else
+ yyval.ttype = build_scoped_method_call (yyval.ttype, OP0 (yyvsp[-1].ttype), OP1 (yyvsp[-1].ttype), NULL_TREE);
+ ;
+ break;}
+case 262:
+#line 1543 "parse.y"
+{
+ if (TREE_CODE (TREE_TYPE (yyvsp[-3].ttype))
+ != TREE_CODE (TREE_TYPE (IDENTIFIER_GLOBAL_VALUE (yyvsp[-1].ttype))))
+ cp_error ("`%E' is not of type `%T'", yyvsp[-3].ttype, yyvsp[-1].ttype);
+ yyval.ttype = void_zero_node;
+ ;
+ break;}
+case 263:
+#line 1550 "parse.y"
+{
+ if (yyvsp[-4].ttype != yyvsp[-1].ttype)
+ cp_error ("destructor specifier `%T::~%T()' must have matching names", yyvsp[-4].ttype, yyvsp[-1].ttype);
+ if (TREE_CODE (TREE_TYPE (yyvsp[-5].ttype))
+ != TREE_CODE (TREE_TYPE (IDENTIFIER_GLOBAL_VALUE (yyvsp[-4].ttype))))
+ cp_error ("`%E' is not of type `%T'", yyvsp[-5].ttype, yyvsp[-4].ttype);
+ yyval.ttype = void_zero_node;
+ ;
+ break;}
+case 264:
+#line 1599 "parse.y"
+{ yyval.itype = 0; ;
+ break;}
+case 265:
+#line 1601 "parse.y"
+{ got_scope = NULL_TREE; yyval.itype = 1; ;
+ break;}
+case 266:
+#line 1605 "parse.y"
+{ yyval.itype = 0; ;
+ break;}
+case 267:
+#line 1607 "parse.y"
+{ got_scope = NULL_TREE; yyval.itype = 1; ;
+ break;}
+case 268:
+#line 1612 "parse.y"
+{ yyval.ttype = true_node; ;
+ break;}
+case 269:
+#line 1614 "parse.y"
+{ yyval.ttype = false_node; ;
+ break;}
+case 271:
+#line 1621 "parse.y"
+{ yyval.ttype = chainon (yyval.ttype, yyvsp[0].ttype); ;
+ break;}
+case 272:
+#line 1626 "parse.y"
+{
+ if (! current_function_parms_stored)
+ store_parm_decls ();
+ setup_vtbl_ptr ();
+ /* Always keep the BLOCK node associated with the outermost
+ pair of curley braces of a function. These are needed
+ for correct operation of dwarfout.c. */
+ keep_next_level ();
+ ;
+ break;}
+case 274:
+#line 1639 "parse.y"
+{
+ yyval.ttype = build_x_arrow (yyval.ttype);
+ ;
+ break;}
+case 275:
+#line 1647 "parse.y"
+{ tree d = get_decl_list (yyvsp[-2].ttype);
+ int yes = suspend_momentary ();
+ d = start_decl (yyvsp[-1].ttype, d, 0, NULL_TREE);
+ finish_decl (d, NULL_TREE, NULL_TREE, 0);
+ resume_momentary (yes);
+ if (IS_AGGR_TYPE_CODE (TREE_CODE (yyvsp[-2].ttype)))
+ note_got_semicolon (yyvsp[-2].ttype);
+ ;
+ break;}
+case 276:
+#line 1656 "parse.y"
+{ tree d = yyvsp[-2].ttype;
+ int yes = suspend_momentary ();
+ d = start_decl (yyvsp[-1].ttype, d, 0, NULL_TREE);
+ finish_decl (d, NULL_TREE, NULL_TREE, 0);
+ resume_momentary (yes);
+ note_list_got_semicolon (yyvsp[-2].ttype);
+ ;
+ break;}
+case 277:
+#line 1664 "parse.y"
+{
+ resume_momentary (yyvsp[-1].itype);
+ if (IS_AGGR_TYPE_CODE (TREE_CODE (yyvsp[-2].ttype)))
+ note_got_semicolon (yyvsp[-2].ttype);
+ ;
+ break;}
+case 278:
+#line 1670 "parse.y"
+{
+ resume_momentary (yyvsp[-1].itype);
+ note_list_got_semicolon (yyvsp[-2].ttype);
+ ;
+ break;}
+case 279:
+#line 1675 "parse.y"
+{ resume_momentary (yyvsp[-1].itype); ;
+ break;}
+case 280:
+#line 1677 "parse.y"
+{
+ shadow_tag (yyvsp[-1].ttype);
+ note_list_got_semicolon (yyvsp[-1].ttype);
+ ;
+ break;}
+case 281:
+#line 1682 "parse.y"
+{ warning ("empty declaration"); ;
+ break;}
+case 284:
+#line 1696 "parse.y"
+{ yyval.ttype = build_parse_node (CALL_EXPR, NULL_TREE, empty_parms (),
+ NULL_TREE); ;
+ break;}
+case 285:
+#line 1699 "parse.y"
+{ yyval.ttype = build_parse_node (CALL_EXPR, yyval.ttype, empty_parms (),
+ NULL_TREE); ;
+ break;}
+case 286:
+#line 1706 "parse.y"
+{ yyval.ttype = build_decl_list (yyval.ttype, yyvsp[0].ttype); ;
+ break;}
+case 287:
+#line 1708 "parse.y"
+{ yyval.ttype = build_decl_list (yyval.ttype, yyvsp[0].ttype); ;
+ break;}
+case 288:
+#line 1710 "parse.y"
+{ yyval.ttype = build_decl_list (get_decl_list (yyval.ttype), yyvsp[0].ttype); ;
+ break;}
+case 289:
+#line 1712 "parse.y"
+{ yyval.ttype = build_decl_list (yyval.ttype, NULL_TREE); ;
+ break;}
+case 290:
+#line 1714 "parse.y"
+{ yyval.ttype = build_decl_list (yyval.ttype, NULL_TREE); ;
+ break;}
+case 293:
+#line 1727 "parse.y"
+{ yyval.ttype = decl_tree_cons (NULL_TREE, yyvsp[0].ttype, yyval.ttype); ;
+ break;}
+case 294:
+#line 1729 "parse.y"
+{ yyval.ttype = decl_tree_cons (NULL_TREE, yyval.ttype, yyvsp[0].ttype); ;
+ break;}
+case 295:
+#line 1731 "parse.y"
+{ yyval.ttype = decl_tree_cons (NULL_TREE, yyvsp[-1].ttype, chainon (yyvsp[0].ttype, yyval.ttype)); ;
+ break;}
+case 296:
+#line 1733 "parse.y"
+{ yyval.ttype = decl_tree_cons (NULL_TREE, yyvsp[-1].ttype, chainon (yyvsp[0].ttype, yyval.ttype)); ;
+ break;}
+case 297:
+#line 1735 "parse.y"
+{ yyval.ttype = decl_tree_cons (NULL_TREE, yyvsp[-2].ttype,
+ chainon (yyvsp[-1].ttype, chainon (yyvsp[0].ttype, yyval.ttype))); ;
+ break;}
+case 298:
+#line 1741 "parse.y"
+{ if (extra_warnings)
+ warning ("`%s' is not at beginning of declaration",
+ IDENTIFIER_POINTER (yyval.ttype));
+ yyval.ttype = build_decl_list (NULL_TREE, yyval.ttype); ;
+ break;}
+case 299:
+#line 1746 "parse.y"
+{ yyval.ttype = decl_tree_cons (NULL_TREE, yyvsp[0].ttype, yyval.ttype); ;
+ break;}
+case 300:
+#line 1748 "parse.y"
+{ if (extra_warnings)
+ warning ("`%s' is not at beginning of declaration",
+ IDENTIFIER_POINTER (yyvsp[0].ttype));
+ yyval.ttype = decl_tree_cons (NULL_TREE, yyvsp[0].ttype, yyval.ttype); ;
+ break;}
+case 301:
+#line 1760 "parse.y"
+{ TREE_STATIC (yyval.ttype) = 1; ;
+ break;}
+case 302:
+#line 1762 "parse.y"
+{ yyval.ttype = IDENTIFIER_AS_LIST (yyval.ttype); ;
+ break;}
+case 303:
+#line 1764 "parse.y"
+{ yyval.ttype = decl_tree_cons (NULL_TREE, yyvsp[0].ttype, yyval.ttype);
+ TREE_STATIC (yyval.ttype) = 1; ;
+ break;}
+case 304:
+#line 1767 "parse.y"
+{ if (extra_warnings && TREE_STATIC (yyval.ttype))
+ warning ("`%s' is not at beginning of declaration",
+ IDENTIFIER_POINTER (yyvsp[0].ttype));
+ yyval.ttype = decl_tree_cons (NULL_TREE, yyvsp[0].ttype, yyval.ttype);
+ TREE_STATIC (yyval.ttype) = TREE_STATIC (yyvsp[-1].ttype); ;
+ break;}
+case 305:
+#line 1783 "parse.y"
+{ yyval.ttype = get_decl_list (yyval.ttype); ;
+ break;}
+case 306:
+#line 1785 "parse.y"
+{ yyval.ttype = decl_tree_cons (NULL_TREE, yyvsp[0].ttype, yyval.ttype); ;
+ break;}
+case 307:
+#line 1787 "parse.y"
+{ yyval.ttype = decl_tree_cons (NULL_TREE, yyval.ttype, yyvsp[0].ttype); ;
+ break;}
+case 308:
+#line 1789 "parse.y"
+{ yyval.ttype = decl_tree_cons (NULL_TREE, yyvsp[-1].ttype, chainon (yyvsp[0].ttype, yyval.ttype)); ;
+ break;}
+case 309:
+#line 1794 "parse.y"
+{ yyval.ttype = build_decl_list (NULL_TREE, yyval.ttype); ;
+ break;}
+case 310:
+#line 1796 "parse.y"
+{ yyval.ttype = decl_tree_cons (NULL_TREE, yyvsp[0].ttype, yyval.ttype); ;
+ break;}
+case 314:
+#line 1807 "parse.y"
+{ yyval.ttype = TREE_TYPE (yyvsp[-1].ttype);
+ if (flag_ansi)
+ pedwarn ("ANSI C++ forbids `typeof'"); ;
+ break;}
+case 315:
+#line 1811 "parse.y"
+{ yyval.ttype = groktypename (yyvsp[-1].ttype);
+ if (flag_ansi)
+ pedwarn ("ANSI C++ forbids `typeof'"); ;
+ break;}
+case 316:
+#line 1815 "parse.y"
+{ tree type = TREE_TYPE (yyvsp[-1].ttype);
+
+ if (IS_AGGR_TYPE (type))
+ {
+ sorry ("sigof type specifier");
+ yyval.ttype = type;
+ }
+ else
+ {
+ error ("`sigof' applied to non-aggregate expression");
+ yyval.ttype = error_mark_node;
+ }
+ ;
+ break;}
+case 317:
+#line 1829 "parse.y"
+{ tree type = groktypename (yyvsp[-1].ttype);
+
+ if (IS_AGGR_TYPE (type))
+ {
+ sorry ("sigof type specifier");
+ yyval.ttype = type;
+ }
+ else
+ {
+ error("`sigof' applied to non-aggregate type");
+ yyval.ttype = error_mark_node;
+ }
+ ;
+ break;}
+case 327:
+#line 1868 "parse.y"
+{ yyval.ttype = NULL_TREE; ;
+ break;}
+case 328:
+#line 1870 "parse.y"
+{ if (TREE_CHAIN (yyvsp[-1].ttype)) yyvsp[-1].ttype = combine_strings (yyvsp[-1].ttype); yyval.ttype = yyvsp[-1].ttype; ;
+ break;}
+case 329:
+#line 1875 "parse.y"
+{ current_declspecs = yyvsp[-5].ttype;
+ if (TREE_CODE (current_declspecs) != TREE_LIST)
+ current_declspecs = get_decl_list (current_declspecs);
+ if (have_extern_spec && !used_extern_spec)
+ {
+ current_declspecs = decl_tree_cons
+ (NULL_TREE, get_identifier ("extern"),
+ current_declspecs);
+ used_extern_spec = 1;
+ }
+ yyvsp[0].itype = suspend_momentary ();
+ yyval.ttype = start_decl (yyvsp[-4].ttype, current_declspecs, 1, yyvsp[-3].ttype);
+ cplus_decl_attributes (yyval.ttype, yyvsp[-1].ttype); ;
+ break;}
+case 330:
+#line 1890 "parse.y"
+{ finish_decl (yyvsp[-1].ttype, yyvsp[0].ttype, yyvsp[-4].ttype, 0);
+ yyval.itype = yyvsp[-2].itype; ;
+ break;}
+case 331:
+#line 1893 "parse.y"
+{ tree d;
+ current_declspecs = yyvsp[-4].ttype;
+ if (TREE_CODE (current_declspecs) != TREE_LIST)
+ current_declspecs = get_decl_list (current_declspecs);
+ if (have_extern_spec && !used_extern_spec)
+ {
+ current_declspecs = decl_tree_cons
+ (NULL_TREE, get_identifier ("extern"),
+ current_declspecs);
+ used_extern_spec = 1;
+ }
+ yyval.itype = suspend_momentary ();
+ d = start_decl (yyvsp[-3].ttype, current_declspecs, 0, yyvsp[-2].ttype);
+ cplus_decl_attributes (d, yyvsp[0].ttype);
+ finish_decl (d, NULL_TREE, yyvsp[-1].ttype, 0); ;
+ break;}
+case 332:
+#line 1912 "parse.y"
+{ yyval.ttype = start_decl (yyvsp[-4].ttype, current_declspecs, 1, yyvsp[-3].ttype);
+ cplus_decl_attributes (yyval.ttype, yyvsp[-1].ttype); ;
+ break;}
+case 333:
+#line 1916 "parse.y"
+{ finish_decl (yyvsp[-1].ttype, yyvsp[0].ttype, yyvsp[-4].ttype, 0); ;
+ break;}
+case 334:
+#line 1918 "parse.y"
+{ yyval.ttype = start_decl (yyvsp[-3].ttype, current_declspecs, 0, yyvsp[-2].ttype);
+ cplus_decl_attributes (yyval.ttype, yyvsp[0].ttype);
+ finish_decl (yyval.ttype, NULL_TREE, yyvsp[-1].ttype, 0); ;
+ break;}
+case 335:
+#line 1925 "parse.y"
+{ current_declspecs = yyvsp[-5].ttype;
+ yyvsp[0].itype = suspend_momentary ();
+ yyval.ttype = start_decl (yyvsp[-4].ttype, current_declspecs, 1, yyvsp[-3].ttype);
+ cplus_decl_attributes (yyval.ttype, yyvsp[-1].ttype); ;
+ break;}
+case 336:
+#line 1931 "parse.y"
+{ finish_decl (yyvsp[-1].ttype, yyvsp[0].ttype, yyvsp[-4].ttype, 0);
+ yyval.itype = yyvsp[-2].itype; ;
+ break;}
+case 337:
+#line 1934 "parse.y"
+{ tree d;
+ current_declspecs = yyvsp[-4].ttype;
+ yyval.itype = suspend_momentary ();
+ d = start_decl (yyvsp[-3].ttype, current_declspecs, 0, yyvsp[-2].ttype);
+ cplus_decl_attributes (d, yyvsp[0].ttype);
+ finish_decl (d, NULL_TREE, yyvsp[-1].ttype, 0); ;
+ break;}
+case 338:
+#line 1944 "parse.y"
+{ current_declspecs = NULL_TREE;
+ yyvsp[0].itype = suspend_momentary ();
+ yyval.ttype = start_decl (yyvsp[-4].ttype, current_declspecs, 1, yyvsp[-3].ttype);
+ cplus_decl_attributes (yyval.ttype, yyvsp[-1].ttype); ;
+ break;}
+case 339:
+#line 1950 "parse.y"
+{ finish_decl (yyvsp[-1].ttype, yyvsp[0].ttype, yyvsp[-4].ttype, 0);
+ yyval.itype = yyvsp[-2].itype; ;
+ break;}
+case 340:
+#line 1953 "parse.y"
+{ tree d;
+ current_declspecs = NULL_TREE;
+ yyval.itype = suspend_momentary ();
+ d = start_decl (yyvsp[-3].ttype, current_declspecs, 0, yyvsp[-2].ttype);
+ cplus_decl_attributes (d, yyvsp[0].ttype);
+ finish_decl (d, NULL_TREE, yyvsp[-1].ttype, 0); ;
+ break;}
+case 341:
+#line 1965 "parse.y"
+{ yyval.ttype = NULL_TREE; ;
+ break;}
+case 342:
+#line 1967 "parse.y"
+{ yyval.ttype = yyvsp[0].ttype; ;
+ break;}
+case 343:
+#line 1972 "parse.y"
+{ yyval.ttype = yyvsp[0].ttype; ;
+ break;}
+case 344:
+#line 1974 "parse.y"
+{ yyval.ttype = chainon (yyvsp[-1].ttype, yyvsp[0].ttype); ;
+ break;}
+case 345:
+#line 1979 "parse.y"
+{ yyval.ttype = yyvsp[-2].ttype; ;
+ break;}
+case 346:
+#line 1984 "parse.y"
+{ yyval.ttype = build_tree_list (NULL_TREE, yyvsp[0].ttype); ;
+ break;}
+case 347:
+#line 1986 "parse.y"
+{ yyval.ttype = chainon (yyvsp[-2].ttype, build_tree_list (NULL_TREE, yyvsp[0].ttype)); ;
+ break;}
+case 348:
+#line 1991 "parse.y"
+{ yyval.ttype = NULL_TREE; ;
+ break;}
+case 349:
+#line 1993 "parse.y"
+{ yyval.ttype = yyvsp[0].ttype; ;
+ break;}
+case 350:
+#line 1995 "parse.y"
+{ yyval.ttype = tree_cons (yyvsp[-3].ttype, NULL_TREE, build_tree_list (NULL_TREE, yyvsp[-1].ttype)); ;
+ break;}
+case 351:
+#line 1997 "parse.y"
+{ yyval.ttype = tree_cons (yyvsp[-5].ttype, NULL_TREE, tree_cons (NULL_TREE, yyvsp[-3].ttype, yyvsp[-1].ttype)); ;
+ break;}
+case 352:
+#line 1999 "parse.y"
+{ yyval.ttype = tree_cons (yyvsp[-3].ttype, NULL_TREE, yyvsp[-1].ttype); ;
+ break;}
+case 357:
+#line 2015 "parse.y"
+{ yyval.ttype = build_tree_list (NULL_TREE, yyvsp[0].ttype); ;
+ break;}
+case 358:
+#line 2017 "parse.y"
+{ yyval.ttype = chainon (yyvsp[-2].ttype, build_tree_list (NULL_TREE, yyvsp[0].ttype)); ;
+ break;}
+case 360:
+#line 2023 "parse.y"
+{ yyval.ttype = build_nt (CONSTRUCTOR, NULL_TREE, NULL_TREE);
+ TREE_HAS_CONSTRUCTOR (yyval.ttype) = 1; ;
+ break;}
+case 361:
+#line 2026 "parse.y"
+{ yyval.ttype = build_nt (CONSTRUCTOR, NULL_TREE, nreverse (yyvsp[-1].ttype));
+ TREE_HAS_CONSTRUCTOR (yyval.ttype) = 1; ;
+ break;}
+case 362:
+#line 2029 "parse.y"
+{ yyval.ttype = build_nt (CONSTRUCTOR, NULL_TREE, nreverse (yyvsp[-2].ttype));
+ TREE_HAS_CONSTRUCTOR (yyval.ttype) = 1; ;
+ break;}
+case 363:
+#line 2032 "parse.y"
+{ yyval.ttype = NULL_TREE; ;
+ break;}
+case 364:
+#line 2039 "parse.y"
+{ yyval.ttype = build_tree_list (NULL_TREE, yyval.ttype); ;
+ break;}
+case 365:
+#line 2041 "parse.y"
+{ yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyval.ttype); ;
+ break;}
+case 366:
+#line 2044 "parse.y"
+{ yyval.ttype = build_tree_list (yyvsp[-2].ttype, yyvsp[0].ttype); ;
+ break;}
+case 367:
+#line 2046 "parse.y"
+{ yyval.ttype = tree_cons (yyvsp[-2].ttype, yyvsp[0].ttype, yyval.ttype); ;
+ break;}
+case 368:
+#line 2048 "parse.y"
+{ yyval.ttype = build_tree_list (yyval.ttype, yyvsp[0].ttype); ;
+ break;}
+case 369:
+#line 2050 "parse.y"
+{ yyval.ttype = tree_cons (yyvsp[-2].ttype, yyvsp[0].ttype, yyval.ttype); ;
+ break;}
+case 370:
+#line 2055 "parse.y"
+{ yyvsp[0].itype = suspend_momentary ();
+ yyval.ttype = start_enum (yyvsp[-1].ttype); ;
+ break;}
+case 371:
+#line 2058 "parse.y"
+{ yyval.ttype = finish_enum (yyvsp[-3].ttype, yyvsp[-2].ttype);
+ resume_momentary ((int) yyvsp[-4].itype);
+ check_for_missing_semicolon (yyvsp[-3].ttype); ;
+ break;}
+case 372:
+#line 2062 "parse.y"
+{ yyval.ttype = finish_enum (start_enum (yyvsp[-2].ttype), NULL_TREE);
+ check_for_missing_semicolon (yyval.ttype); ;
+ break;}
+case 373:
+#line 2065 "parse.y"
+{ yyvsp[0].itype = suspend_momentary ();
+ yyval.ttype = start_enum (make_anon_name ()); ;
+ break;}
+case 374:
+#line 2068 "parse.y"
+{ yyval.ttype = finish_enum (yyvsp[-3].ttype, yyvsp[-2].ttype);
+ resume_momentary ((int) yyvsp[-5].itype);
+ check_for_missing_semicolon (yyvsp[-3].ttype); ;
+ break;}
+case 375:
+#line 2072 "parse.y"
+{ yyval.ttype = finish_enum (start_enum (make_anon_name()), NULL_TREE);
+ check_for_missing_semicolon (yyval.ttype); ;
+ break;}
+case 376:
+#line 2075 "parse.y"
+{ yyval.ttype = xref_tag (enum_type_node, yyvsp[0].ttype, NULL_TREE, 0); ;
+ break;}
+case 377:
+#line 2077 "parse.y"
+{ yyval.ttype = xref_tag (enum_type_node, yyvsp[0].ttype, NULL_TREE, 0); ;
+ break;}
+case 378:
+#line 2081 "parse.y"
+{
+ int semi;
+ tree id;
+
+#if 0
+ /* Need to rework class nesting in the
+ presence of nested classes, etc. */
+ shadow_tag (CLASSTYPE_AS_LIST (yyval.ttype)); */
+#endif
+ if (yychar == YYEMPTY)
+ yychar = YYLEX;
+ semi = yychar == ';';
+ /* finish_struct nukes this anyway; if
+ finish_exception does too, then it can go. */
+ if (semi)
+ note_got_semicolon (yyval.ttype);
+
+ if (TREE_CODE (yyval.ttype) == ENUMERAL_TYPE)
+ /* $$ = $1 from default rule. */;
+ else if (CLASSTYPE_DECLARED_EXCEPTION (yyval.ttype))
+ {
+ }
+ else
+ {
+ yyval.ttype = finish_struct (yyval.ttype, yyvsp[-1].ttype, semi);
+ if (semi) note_got_semicolon (yyval.ttype);
+ }
+
+ pop_obstacks ();
+
+ id = TYPE_IDENTIFIER (yyval.ttype);
+ if (id && IDENTIFIER_TEMPLATE (id))
+ {
+ tree decl;
+
+ /* I don't know if the copying of this TYPE_DECL is
+ * really needed. However, it's such a small per-
+ * formance penalty that the extra safety is a bargain.
+ * - niklas@appli.se
+ */
+ push_obstacks (&permanent_obstack, &permanent_obstack);
+ decl = copy_node (lookup_name (id, 0));
+ if (DECL_LANG_SPECIFIC (decl))
+ copy_lang_decl (decl);
+ pop_obstacks ();
+ undo_template_name_overload (id, 0);
+ pushdecl_top_level (decl);
+ }
+ if (! semi)
+ check_for_missing_semicolon (yyval.ttype); ;
+ break;}
+case 379:
+#line 2132 "parse.y"
+{
+#if 0
+ /* It's no longer clear what the following error is supposed to
+ accomplish. If it turns out to be needed, add a comment why. */
+ if (TYPE_BINFO_BASETYPES (yyval.ttype) && !TYPE_SIZE (yyval.ttype))
+ {
+ error ("incomplete definition of type `%s'",
+ TYPE_NAME_STRING (yyval.ttype));
+ yyval.ttype = error_mark_node;
+ }
+#endif
+ ;
+ break;}
+case 383:
+#line 2154 "parse.y"
+{ if (pedantic) pedwarn ("comma at end of enumerator list"); ;
+ break;}
+case 385:
+#line 2159 "parse.y"
+{ error ("storage class specifier `%s' not allowed after struct or class", IDENTIFIER_POINTER (yyvsp[0].ttype)); ;
+ break;}
+case 386:
+#line 2161 "parse.y"
+{ error ("type specifier `%s' not allowed after struct or class", IDENTIFIER_POINTER (yyvsp[0].ttype)); ;
+ break;}
+case 387:
+#line 2163 "parse.y"
+{ error ("type qualifier `%s' not allowed after struct or class", IDENTIFIER_POINTER (yyvsp[0].ttype)); ;
+ break;}
+case 388:
+#line 2165 "parse.y"
+{ error ("no body nor ';' separates two class, struct or union declarations"); ;
+ break;}
+case 389:
+#line 2170 "parse.y"
+{
+ yyungetc (';', 1); current_aggr = yyval.ttype; yyval.ttype = yyvsp[-1].ttype;
+ if (yyvsp[-3].ttype == ridpointers[(int) RID_TEMPLATE])
+ instantiate_class_template (yyval.ttype, 2);
+ ;
+ break;}
+case 390:
+#line 2179 "parse.y"
+{ current_aggr = yyval.ttype; yyval.ttype = yyvsp[0].ttype; ;
+ break;}
+case 391:
+#line 2181 "parse.y"
+{ current_aggr = yyval.ttype; yyval.ttype = yyvsp[0].ttype; ;
+ break;}
+case 392:
+#line 2183 "parse.y"
+{ current_aggr = yyval.ttype; yyval.ttype = yyvsp[0].ttype; ;
+ break;}
+case 393:
+#line 2185 "parse.y"
+{ yyungetc ('{', 1);
+ aggr2:
+ current_aggr = yyval.ttype;
+ yyval.ttype = yyvsp[-1].ttype;
+ overload_template_name (yyval.ttype, 0); ;
+ break;}
+case 394:
+#line 2191 "parse.y"
+{ yyungetc (':', 1); goto aggr2; ;
+ break;}
+case 396:
+#line 2197 "parse.y"
+{ current_aggr = yyval.ttype; yyval.ttype = yyvsp[0].ttype; ;
+ break;}
+case 397:
+#line 2201 "parse.y"
+{ yyval.ttype = xref_tag (current_aggr, yyvsp[0].ttype, NULL_TREE, 1); ;
+ break;}
+case 398:
+#line 2204 "parse.y"
+{ yyval.ttype = xref_defn_tag (current_aggr, yyvsp[0].ttype, NULL_TREE); ;
+ break;}
+case 399:
+#line 2209 "parse.y"
+{
+ if (yyvsp[0].ttype)
+ yyval.ttype = xref_tag (current_aggr, yyvsp[-2].ttype, yyvsp[0].ttype, 1);
+ else
+ yyval.ttype = yyvsp[-1].ttype;
+ ;
+ break;}
+case 400:
+#line 2218 "parse.y"
+{
+ if (yyvsp[0].ttype)
+ yyval.ttype = xref_defn_tag (current_aggr, yyvsp[-2].ttype, yyvsp[0].ttype);
+ else
+ yyval.ttype = yyvsp[-1].ttype;
+ ;
+ break;}
+case 401:
+#line 2227 "parse.y"
+{ yyval.ttype = xref_tag (yyval.ttype, make_anon_name (), NULL_TREE, 0);
+ yyungetc ('{', 1); ;
+ break;}
+case 404:
+#line 2235 "parse.y"
+{ yyval.ttype = NULL_TREE; ;
+ break;}
+case 405:
+#line 2237 "parse.y"
+{ yyungetc(':', 1); yyval.ttype = NULL_TREE; ;
+ break;}
+case 406:
+#line 2239 "parse.y"
+{ yyval.ttype = yyvsp[0].ttype; ;
+ break;}
+case 408:
+#line 2245 "parse.y"
+{ yyval.ttype = chainon (yyval.ttype, yyvsp[0].ttype); ;
+ break;}
+case 409:
+#line 2250 "parse.y"
+{
+ tree type;
+ do_base_class1:
+ type = IDENTIFIER_TYPE_VALUE (yyval.ttype);
+ if (! is_aggr_typedef (yyval.ttype, 1))
+ yyval.ttype = NULL_TREE;
+ else if (current_aggr == signature_type_node
+ && (! type) && (! IS_SIGNATURE (type)))
+ {
+ error ("class name not allowed as base signature");
+ yyval.ttype = NULL_TREE;
+ }
+ else if (current_aggr == signature_type_node)
+ {
+ sorry ("signature inheritance, base type `%s' ignored",
+ IDENTIFIER_POINTER (yyval.ttype));
+ yyval.ttype = build_tree_list ((tree)access_public, yyval.ttype);
+ }
+ else if (type && IS_SIGNATURE (type))
+ {
+ error ("signature name not allowed as base class");
+ yyval.ttype = NULL_TREE;
+ }
+ else
+ yyval.ttype = build_tree_list ((tree)access_default, yyval.ttype);
+ ;
+ break;}
+case 410:
+#line 2277 "parse.y"
+{
+ tree type;
+ do_base_class2:
+ type = IDENTIFIER_TYPE_VALUE (yyvsp[0].ttype);
+ if (current_aggr == signature_type_node)
+ error ("access and source specifiers not allowed in signature");
+ if (! is_aggr_typedef (yyvsp[0].ttype, 1))
+ yyval.ttype = NULL_TREE;
+ else if (current_aggr == signature_type_node
+ && (! type) && (! IS_SIGNATURE (type)))
+ {
+ error ("class name not allowed as base signature");
+ yyval.ttype = NULL_TREE;
+ }
+ else if (current_aggr == signature_type_node)
+ {
+ sorry ("signature inheritance, base type `%s' ignored",
+ IDENTIFIER_POINTER (yyval.ttype));
+ yyval.ttype = build_tree_list ((tree)access_public, yyvsp[0].ttype);
+ }
+ else if (type && IS_SIGNATURE (type))
+ {
+ error ("signature name not allowed as base class");
+ yyval.ttype = NULL_TREE;
+ }
+ else
+ yyval.ttype = build_tree_list ((tree) yyval.ttype, yyvsp[0].ttype);
+ ;
+ break;}
+case 412:
+#line 2310 "parse.y"
+{
+ if (current_aggr == signature_type_node)
+ {
+ if (IS_AGGR_TYPE (TREE_TYPE (yyvsp[-1].ttype)))
+ {
+ sorry ("`sigof' as base signature specifier");
+ /* need to return some dummy signature identifier */
+ yyval.ttype = yyvsp[-1].ttype;
+ }
+ else
+ {
+ error ("`sigof' applied to non-aggregate expression");
+ yyval.ttype = error_mark_node;
+ }
+ }
+ else
+ {
+ error ("`sigof' in struct or class declaration");
+ yyval.ttype = error_mark_node;
+ }
+ ;
+ break;}
+case 413:
+#line 2332 "parse.y"
+{
+ if (current_aggr == signature_type_node)
+ {
+ if (IS_AGGR_TYPE (groktypename (yyvsp[-1].ttype)))
+ {
+ sorry ("`sigof' as base signature specifier");
+ /* need to return some dummy signature identifier */
+ yyval.ttype = yyvsp[-1].ttype;
+ }
+ else
+ {
+ error ("`sigof' applied to non-aggregate expression");
+ yyval.ttype = error_mark_node;
+ }
+ }
+ else
+ {
+ error ("`sigof' in struct or class declaration");
+ yyval.ttype = error_mark_node;
+ }
+ ;
+ break;}
+case 415:
+#line 2358 "parse.y"
+{ if (yyval.ttype != ridpointers[(int)RID_VIRTUAL])
+ sorry ("non-virtual access");
+ yyval.itype = access_default_virtual; ;
+ break;}
+case 416:
+#line 2362 "parse.y"
+{ int err = 0;
+ if (yyvsp[0].itype == access_protected)
+ {
+ warning ("`protected' access not implemented");
+ yyvsp[0].itype = access_public;
+ err++;
+ }
+ else if (yyvsp[0].itype == access_public)
+ {
+ if (yyvsp[-1].itype == access_private)
+ {
+ mixed:
+ error ("base class cannot be public and private");
+ }
+ else if (yyvsp[-1].itype == access_default_virtual)
+ yyval.itype = access_public_virtual;
+ }
+ else /* $2 == access_private */
+ {
+ if (yyvsp[-1].itype == access_public)
+ goto mixed;
+ else if (yyvsp[-1].itype == access_default_virtual)
+ yyval.itype = access_private_virtual;
+ }
+ ;
+ break;}
+case 417:
+#line 2388 "parse.y"
+{ if (yyvsp[0].ttype != ridpointers[(int)RID_VIRTUAL])
+ sorry ("non-virtual access");
+ if (yyval.itype == access_public)
+ yyval.itype = access_public_virtual;
+ else if (yyval.itype == access_private)
+ yyval.itype = access_private_virtual; ;
+ break;}
+case 418:
+#line 2397 "parse.y"
+{ tree t;
+ push_obstacks_nochange ();
+ end_temporary_allocation ();
+
+ if (! IS_AGGR_TYPE (yyvsp[-1].ttype))
+ {
+ yyvsp[-1].ttype = make_lang_type (RECORD_TYPE);
+ TYPE_NAME (yyvsp[-1].ttype) = get_identifier ("erroneous type");
+ }
+ if (TYPE_SIZE (yyvsp[-1].ttype))
+ duplicate_tag_error (yyvsp[-1].ttype);
+ if (TYPE_SIZE (yyvsp[-1].ttype) || TYPE_BEING_DEFINED (yyvsp[-1].ttype))
+ {
+ t = make_lang_type (TREE_CODE (yyvsp[-1].ttype));
+ pushtag (TYPE_IDENTIFIER (yyvsp[-1].ttype), t, 0);
+ yyvsp[-1].ttype = t;
+ }
+ pushclass (yyvsp[-1].ttype, 0);
+ TYPE_BEING_DEFINED (yyvsp[-1].ttype) = 1;
+#if 0
+ t = TYPE_IDENTIFIER (yyvsp[-1].ttype);
+ if (t && IDENTIFIER_TEMPLATE (t))
+ overload_template_name (t, 1);
+#endif
+ ;
+ break;}
+case 419:
+#line 2426 "parse.y"
+{ yyval.ttype = NULL_TREE; ;
+ break;}
+case 420:
+#line 2428 "parse.y"
+{
+ if (current_aggr == signature_type_node)
+ yyval.ttype = build_tree_list ((tree) access_public, yyval.ttype);
+ else
+ yyval.ttype = build_tree_list ((tree) access_default, yyval.ttype);
+ ;
+ break;}
+case 421:
+#line 2435 "parse.y"
+{
+ tree visspec = (tree) yyvsp[-2].itype;
+
+ if (current_aggr == signature_type_node)
+ {
+ error ("access specifier not allowed in signature");
+ visspec = (tree) access_public;
+ }
+ yyval.ttype = chainon (yyval.ttype, build_tree_list (visspec, yyvsp[0].ttype));
+ ;
+ break;}
+case 422:
+#line 2446 "parse.y"
+{
+ if (current_aggr == signature_type_node)
+ error ("access specifier not allowed in signature");
+ ;
+ break;}
+case 423:
+#line 2456 "parse.y"
+{ if (yyval.ttype == void_type_node) yyval.ttype = NULL_TREE;
+ ;
+ break;}
+case 424:
+#line 2459 "parse.y"
+{ /* In pushdecl, we created a reverse list of names
+ in this binding level. Make sure that the chain
+ of what we're trying to add isn't the item itself
+ (which can happen with what pushdecl's doing). */
+ if (yyvsp[0].ttype != NULL_TREE && yyvsp[0].ttype != void_type_node)
+ {
+ if (TREE_CHAIN (yyvsp[0].ttype) != yyval.ttype)
+ yyval.ttype = chainon (yyval.ttype, yyvsp[0].ttype);
+ else
+ yyval.ttype = yyvsp[0].ttype;
+ }
+ ;
+ break;}
+case 427:
+#line 2477 "parse.y"
+{ error ("missing ';' before right brace");
+ yyungetc ('}', 0); ;
+ break;}
+case 428:
+#line 2482 "parse.y"
+{ yyval.ttype = finish_method (yyval.ttype); ;
+ break;}
+case 429:
+#line 2484 "parse.y"
+{ yyval.ttype = finish_method (yyval.ttype); ;
+ break;}
+case 430:
+#line 2492 "parse.y"
+{
+ yyval.ttype = grok_x_components (yyval.ttype, yyvsp[0].ttype);
+ ;
+ break;}
+case 431:
+#line 2496 "parse.y"
+{
+ yyval.ttype = grok_x_components (yyval.ttype, yyvsp[0].ttype);
+ ;
+ break;}
+case 432:
+#line 2500 "parse.y"
+{ yyval.ttype = grokfield (yyval.ttype, NULL_TREE, yyvsp[-2].ttype, NULL_TREE, yyvsp[-1].ttype);
+ cplus_decl_attributes (yyval.ttype, yyvsp[0].ttype); ;
+ break;}
+case 433:
+#line 2503 "parse.y"
+{ yyval.ttype = grokbitfield (NULL_TREE, NULL_TREE, yyvsp[0].ttype); ;
+ break;}
+case 434:
+#line 2505 "parse.y"
+{ yyval.ttype = NULL_TREE; ;
+ break;}
+case 435:
+#line 2516 "parse.y"
+{ yyval.ttype = build_parse_node (CALL_EXPR, TREE_VALUE (yyvsp[-4].ttype),
+ yyvsp[-2].ttype, yyvsp[0].ttype);
+ yyval.ttype = grokfield (yyval.ttype, TREE_CHAIN (yyvsp[-4].ttype), NULL_TREE, NULL_TREE,
+ NULL_TREE); ;
+ break;}
+case 436:
+#line 2521 "parse.y"
+{ yyval.ttype = build_parse_node (CALL_EXPR, TREE_VALUE (yyvsp[-2].ttype),
+ empty_parms (), yyvsp[0].ttype);
+ yyval.ttype = grokfield (yyval.ttype, TREE_CHAIN (yyvsp[-2].ttype), NULL_TREE, NULL_TREE,
+ NULL_TREE); ;
+ break;}
+case 437:
+#line 2530 "parse.y"
+{ yyval.ttype = NULL_TREE; ;
+ break;}
+case 439:
+#line 2533 "parse.y"
+{
+ /* In this context, void_type_node encodes
+ friends. They have been recorded elsewhere. */
+ if (yyval.ttype == void_type_node)
+ yyval.ttype = yyvsp[0].ttype;
+ else
+ yyval.ttype = chainon (yyval.ttype, yyvsp[0].ttype);
+ ;
+ break;}
+case 440:
+#line 2545 "parse.y"
+{ yyval.ttype = NULL_TREE; ;
+ break;}
+case 442:
+#line 2548 "parse.y"
+{
+ /* In this context, void_type_node encodes
+ friends. They have been recorded elsewhere. */
+ if (yyval.ttype == void_type_node)
+ yyval.ttype = yyvsp[0].ttype;
+ else
+ yyval.ttype = chainon (yyval.ttype, yyvsp[0].ttype);
+ ;
+ break;}
+case 447:
+#line 2570 "parse.y"
+{ current_declspecs = yyvsp[-4].ttype;
+ yyval.ttype = grokfield (yyval.ttype, current_declspecs, yyvsp[-2].ttype, NULL_TREE, yyvsp[-1].ttype);
+ cplus_decl_attributes (yyval.ttype, yyvsp[0].ttype); ;
+ break;}
+case 448:
+#line 2574 "parse.y"
+{ current_declspecs = yyvsp[-6].ttype;
+ yyval.ttype = grokfield (yyval.ttype, current_declspecs, yyvsp[-4].ttype, yyvsp[0].ttype, yyvsp[-3].ttype);
+ cplus_decl_attributes (yyval.ttype, yyvsp[-2].ttype); ;
+ break;}
+case 449:
+#line 2578 "parse.y"
+{ current_declspecs = yyvsp[-4].ttype;
+ yyval.ttype = grokbitfield (yyval.ttype, current_declspecs, yyvsp[-1].ttype);
+ cplus_decl_attributes (yyval.ttype, yyvsp[0].ttype); ;
+ break;}
+case 450:
+#line 2585 "parse.y"
+{ current_declspecs = yyvsp[-4].ttype;
+ yyval.ttype = grokfield (yyval.ttype, current_declspecs, yyvsp[-2].ttype, NULL_TREE, yyvsp[-1].ttype);
+ cplus_decl_attributes (yyval.ttype, yyvsp[0].ttype); ;
+ break;}
+case 451:
+#line 2589 "parse.y"
+{ current_declspecs = yyvsp[-6].ttype;
+ yyval.ttype = grokfield (yyval.ttype, current_declspecs, yyvsp[-4].ttype, yyvsp[0].ttype, yyvsp[-3].ttype);
+ cplus_decl_attributes (yyval.ttype, yyvsp[-2].ttype); ;
+ break;}
+case 452:
+#line 2593 "parse.y"
+{ current_declspecs = yyvsp[-4].ttype;
+ yyval.ttype = grokbitfield (yyval.ttype, current_declspecs, yyvsp[-1].ttype);
+ cplus_decl_attributes (yyval.ttype, yyvsp[0].ttype); ;
+ break;}
+case 453:
+#line 2597 "parse.y"
+{ current_declspecs = yyvsp[-3].ttype;
+ yyval.ttype = grokbitfield (NULL_TREE, current_declspecs, yyvsp[-1].ttype);
+ cplus_decl_attributes (yyval.ttype, yyvsp[0].ttype); ;
+ break;}
+case 454:
+#line 2604 "parse.y"
+{ yyval.ttype = grokfield (yyval.ttype, current_declspecs, yyvsp[-2].ttype, NULL_TREE, yyvsp[-1].ttype);
+ cplus_decl_attributes (yyval.ttype, yyvsp[0].ttype); ;
+ break;}
+case 455:
+#line 2607 "parse.y"
+{ yyval.ttype = grokfield (yyval.ttype, current_declspecs, yyvsp[-4].ttype, yyvsp[0].ttype, yyvsp[-3].ttype);
+ cplus_decl_attributes (yyval.ttype, yyvsp[-2].ttype); ;
+ break;}
+case 456:
+#line 2610 "parse.y"
+{ yyval.ttype = grokbitfield (yyval.ttype, current_declspecs, yyvsp[-1].ttype);
+ cplus_decl_attributes (yyval.ttype, yyvsp[0].ttype); ;
+ break;}
+case 457:
+#line 2616 "parse.y"
+{ yyval.ttype = grokfield (yyval.ttype, current_declspecs, yyvsp[-2].ttype, NULL_TREE, yyvsp[-1].ttype);
+ cplus_decl_attributes (yyval.ttype, yyvsp[0].ttype); ;
+ break;}
+case 458:
+#line 2619 "parse.y"
+{ yyval.ttype = grokfield (yyval.ttype, current_declspecs, yyvsp[-4].ttype, yyvsp[0].ttype, yyvsp[-3].ttype);
+ cplus_decl_attributes (yyval.ttype, yyvsp[-2].ttype); ;
+ break;}
+case 459:
+#line 2622 "parse.y"
+{ yyval.ttype = grokbitfield (yyval.ttype, current_declspecs, yyvsp[-1].ttype);
+ cplus_decl_attributes (yyval.ttype, yyvsp[0].ttype); ;
+ break;}
+case 460:
+#line 2625 "parse.y"
+{ yyval.ttype = grokbitfield (NULL_TREE, current_declspecs, yyvsp[-1].ttype);
+ cplus_decl_attributes (yyval.ttype, yyvsp[0].ttype); ;
+ break;}
+case 462:
+#line 2636 "parse.y"
+{ TREE_CHAIN (yyvsp[0].ttype) = yyval.ttype; yyval.ttype = yyvsp[0].ttype; ;
+ break;}
+case 463:
+#line 2641 "parse.y"
+{ yyval.ttype = build_enumerator (yyval.ttype, NULL_TREE); ;
+ break;}
+case 464:
+#line 2643 "parse.y"
+{ yyval.ttype = build_enumerator (yyval.ttype, yyvsp[0].ttype); ;
+ break;}
+case 465:
+#line 2649 "parse.y"
+{ yyval.ttype = build_decl_list (yyval.ttype, yyvsp[0].ttype); ;
+ break;}
+case 466:
+#line 2651 "parse.y"
+{ yyval.ttype = build_decl_list (yyval.ttype, NULL_TREE); ;
+ break;}
+case 467:
+#line 2655 "parse.y"
+{
+ if (flag_ansi)
+ pedwarn ("ANSI C++ forbids array dimensions with parenthesized type in new");
+ yyval.ttype = build_parse_node (ARRAY_REF, TREE_VALUE (yyvsp[-4].ttype), yyvsp[-1].ttype);
+ yyval.ttype = build_decl_list (TREE_PURPOSE (yyvsp[-4].ttype), yyval.ttype);
+ ;
+ break;}
+case 468:
+#line 2665 "parse.y"
+{ yyval.ttype = NULL_TREE; ;
+ break;}
+case 469:
+#line 2667 "parse.y"
+{ yyval.ttype = decl_tree_cons (NULL_TREE, yyvsp[0].ttype, yyval.ttype); ;
+ break;}
+case 470:
+#line 2672 "parse.y"
+{ yyval.ttype = IDENTIFIER_AS_LIST (yyval.ttype); ;
+ break;}
+case 471:
+#line 2674 "parse.y"
+{ yyval.ttype = decl_tree_cons (NULL_TREE, yyvsp[0].ttype, yyval.ttype); ;
+ break;}
+case 472:
+#line 2682 "parse.y"
+{ yyval.itype = suspend_momentary (); ;
+ break;}
+case 473:
+#line 2683 "parse.y"
+{ resume_momentary ((int) yyvsp[-1].itype); yyval.ttype = yyvsp[0].ttype; ;
+ break;}
+case 474:
+#line 2690 "parse.y"
+{ yyval.ttype = make_pointer_declarator (yyvsp[-1].ttype, yyvsp[0].ttype); ;
+ break;}
+case 475:
+#line 2692 "parse.y"
+{ yyval.ttype = make_reference_declarator (yyvsp[-1].ttype, yyvsp[0].ttype); ;
+ break;}
+case 476:
+#line 2694 "parse.y"
+{ yyval.ttype = make_pointer_declarator (NULL_TREE, yyvsp[0].ttype); ;
+ break;}
+case 477:
+#line 2696 "parse.y"
+{ yyval.ttype = make_reference_declarator (NULL_TREE, yyvsp[0].ttype); ;
+ break;}
+case 478:
+#line 2698 "parse.y"
+{ tree arg = make_pointer_declarator (yyvsp[-1].ttype, yyvsp[0].ttype);
+ yyval.ttype = build_parse_node (SCOPE_REF, yyvsp[-2].ttype, arg);
+ ;
+ break;}
+case 480:
+#line 2706 "parse.y"
+{
+ /* Remember that this name has been used in the class
+ definition, as per [class.scope0] */
+ if (current_class_type
+ && TYPE_BEING_DEFINED (current_class_type)
+ && ! IDENTIFIER_CLASS_VALUE (yyval.ttype))
+ {
+ tree t = lookup_name (yyval.ttype, -2);
+ if (t)
+ pushdecl_class_level (t);
+ }
+ ;
+ break;}
+case 482:
+#line 2723 "parse.y"
+{ yyval.ttype = yyvsp[0].ttype; ;
+ break;}
+case 483:
+#line 2728 "parse.y"
+{ yyval.ttype = build_parse_node (CALL_EXPR, yyval.ttype, yyvsp[-2].ttype, yyvsp[0].ttype); ;
+ break;}
+case 484:
+#line 2730 "parse.y"
+{ yyval.ttype = build_parse_node (CALL_EXPR, yyval.ttype, yyvsp[-2].ttype, yyvsp[0].ttype); ;
+ break;}
+case 485:
+#line 2732 "parse.y"
+{ yyval.ttype = build_parse_node (CALL_EXPR, yyval.ttype, empty_parms (), yyvsp[0].ttype); ;
+ break;}
+case 486:
+#line 2734 "parse.y"
+{ yyval.ttype = build_parse_node (CALL_EXPR, yyval.ttype, NULL_TREE, NULL_TREE); ;
+ break;}
+case 487:
+#line 2736 "parse.y"
+{ yyval.ttype = build_parse_node (ARRAY_REF, yyval.ttype, yyvsp[-1].ttype); ;
+ break;}
+case 488:
+#line 2738 "parse.y"
+{ yyval.ttype = build_parse_node (ARRAY_REF, yyval.ttype, NULL_TREE); ;
+ break;}
+case 489:
+#line 2740 "parse.y"
+{ yyval.ttype = yyvsp[-1].ttype; ;
+ break;}
+case 490:
+#line 2742 "parse.y"
+{ push_nested_class (TREE_TYPE (yyval.ttype), 3);
+ yyval.ttype = build_parse_node (SCOPE_REF, yyval.ttype, yyvsp[0].ttype);
+ TREE_COMPLEXITY (yyval.ttype) = current_class_depth; ;
+ break;}
+case 492:
+#line 2753 "parse.y"
+{ yyval.ttype = make_pointer_declarator (yyvsp[-1].ttype, yyvsp[0].ttype); ;
+ break;}
+case 493:
+#line 2755 "parse.y"
+{ yyval.ttype = make_reference_declarator (yyvsp[-1].ttype, yyvsp[0].ttype); ;
+ break;}
+case 494:
+#line 2757 "parse.y"
+{ yyval.ttype = make_pointer_declarator (NULL_TREE, yyvsp[0].ttype); ;
+ break;}
+case 495:
+#line 2759 "parse.y"
+{ yyval.ttype = make_reference_declarator (NULL_TREE, yyvsp[0].ttype); ;
+ break;}
+case 496:
+#line 2761 "parse.y"
+{ tree arg = make_pointer_declarator (yyvsp[-1].ttype, yyvsp[0].ttype);
+ yyval.ttype = build_parse_node (SCOPE_REF, yyvsp[-2].ttype, arg);
+ ;
+ break;}
+case 498:
+#line 2769 "parse.y"
+{ yyval.ttype = make_pointer_declarator (yyvsp[-1].ttype, yyvsp[0].ttype); ;
+ break;}
+case 499:
+#line 2771 "parse.y"
+{ yyval.ttype = make_reference_declarator (yyvsp[-1].ttype, yyvsp[0].ttype); ;
+ break;}
+case 500:
+#line 2773 "parse.y"
+{ yyval.ttype = make_pointer_declarator (NULL_TREE, yyvsp[0].ttype); ;
+ break;}
+case 501:
+#line 2775 "parse.y"
+{ yyval.ttype = make_reference_declarator (NULL_TREE, yyvsp[0].ttype); ;
+ break;}
+case 502:
+#line 2777 "parse.y"
+{ tree arg = make_pointer_declarator (yyvsp[-1].ttype, yyvsp[0].ttype);
+ yyval.ttype = build_parse_node (SCOPE_REF, yyvsp[-2].ttype, arg);
+ ;
+ break;}
+case 504:
+#line 2785 "parse.y"
+{ yyval.ttype = build_parse_node (CALL_EXPR, yyval.ttype, yyvsp[-2].ttype, yyvsp[0].ttype); ;
+ break;}
+case 505:
+#line 2787 "parse.y"
+{ yyval.ttype = build_parse_node (CALL_EXPR, yyval.ttype, yyvsp[-2].ttype, yyvsp[0].ttype); ;
+ break;}
+case 506:
+#line 2789 "parse.y"
+{ yyval.ttype = build_parse_node (CALL_EXPR, yyval.ttype, empty_parms (), yyvsp[0].ttype); ;
+ break;}
+case 507:
+#line 2791 "parse.y"
+{ yyval.ttype = build_parse_node (CALL_EXPR, yyval.ttype, NULL_TREE, NULL_TREE); ;
+ break;}
+case 508:
+#line 2793 "parse.y"
+{ yyval.ttype = finish_decl_parsing (yyvsp[-1].ttype); ;
+ break;}
+case 509:
+#line 2795 "parse.y"
+{ yyval.ttype = yyvsp[-1].ttype; ;
+ break;}
+case 510:
+#line 2797 "parse.y"
+{ yyval.ttype = build_parse_node (ARRAY_REF, yyval.ttype, yyvsp[-1].ttype); ;
+ break;}
+case 511:
+#line 2799 "parse.y"
+{ yyval.ttype = build_parse_node (ARRAY_REF, yyval.ttype, NULL_TREE); ;
+ break;}
+case 512:
+#line 2804 "parse.y"
+{ got_scope = NULL_TREE;
+ yyval.ttype = build_parse_node (SCOPE_REF, yyval.ttype, yyvsp[0].ttype); ;
+ break;}
+case 513:
+#line 2810 "parse.y"
+{ got_scope = NULL_TREE;
+ yyval.ttype = build_parse_node (SCOPE_REF, yyval.ttype, yyvsp[0].ttype); ;
+ break;}
+case 515:
+#line 2817 "parse.y"
+{ yyval.ttype = yyvsp[0].ttype; ;
+ break;}
+case 516:
+#line 2822 "parse.y"
+{ yyval.ttype = build_functional_cast (yyval.ttype, yyvsp[-1].ttype); ;
+ break;}
+case 517:
+#line 2824 "parse.y"
+{ yyval.ttype = reparse_decl_as_expr (yyval.ttype, yyvsp[-1].ttype); ;
+ break;}
+case 518:
+#line 2826 "parse.y"
+{ yyval.ttype = reparse_absdcl_as_expr (yyval.ttype, yyvsp[0].ttype); ;
+ break;}
+case 522:
+#line 2837 "parse.y"
+{ yyval.ttype = yyvsp[0].ttype; ;
+ break;}
+case 523:
+#line 2844 "parse.y"
+{ got_scope = TREE_TYPE (yyval.ttype); ;
+ break;}
+case 524:
+#line 2846 "parse.y"
+{ got_scope = TREE_TYPE (yyval.ttype); ;
+ break;}
+case 526:
+#line 2862 "parse.y"
+{ yyval.ttype = yyvsp[0].ttype; ;
+ break;}
+case 528:
+#line 2868 "parse.y"
+{ yyval.ttype = yyvsp[0].ttype; ;
+ break;}
+case 529:
+#line 2873 "parse.y"
+{ got_scope = NULL_TREE; ;
+ break;}
+case 530:
+#line 2875 "parse.y"
+{ yyval.ttype = yyvsp[-1].ttype; got_scope = NULL_TREE; ;
+ break;}
+case 531:
+#line 2882 "parse.y"
+{ got_scope = void_type_node; ;
+ break;}
+case 532:
+#line 2888 "parse.y"
+{ yyval.ttype = make_pointer_declarator (yyvsp[-1].ttype, yyvsp[0].ttype); ;
+ break;}
+case 533:
+#line 2890 "parse.y"
+{ yyval.ttype = make_pointer_declarator (yyvsp[0].ttype, NULL_TREE); ;
+ break;}
+case 534:
+#line 2892 "parse.y"
+{ yyval.ttype = make_reference_declarator (yyvsp[-1].ttype, yyvsp[0].ttype); ;
+ break;}
+case 535:
+#line 2894 "parse.y"
+{ yyval.ttype = make_reference_declarator (yyvsp[0].ttype, NULL_TREE); ;
+ break;}
+case 536:
+#line 2896 "parse.y"
+{ tree arg = make_pointer_declarator (yyvsp[0].ttype, NULL_TREE);
+ yyval.ttype = build_parse_node (SCOPE_REF, yyvsp[-1].ttype, arg);
+ ;
+ break;}
+case 537:
+#line 2900 "parse.y"
+{ tree arg = make_pointer_declarator (yyvsp[-1].ttype, yyvsp[0].ttype);
+ yyval.ttype = build_parse_node (SCOPE_REF, yyvsp[-2].ttype, arg);
+ ;
+ break;}
+case 539:
+#line 2909 "parse.y"
+{ yyval.ttype = build_parse_node (ARRAY_REF, NULL_TREE, yyvsp[-1].ttype); ;
+ break;}
+case 540:
+#line 2911 "parse.y"
+{ yyval.ttype = build_parse_node (ARRAY_REF, yyval.ttype, yyvsp[-1].ttype); ;
+ break;}
+case 541:
+#line 2917 "parse.y"
+{ yyval.ttype = make_pointer_declarator (yyvsp[-1].ttype, yyvsp[0].ttype); ;
+ break;}
+case 542:
+#line 2919 "parse.y"
+{ yyval.ttype = make_pointer_declarator (NULL_TREE, yyvsp[0].ttype); ;
+ break;}
+case 543:
+#line 2921 "parse.y"
+{ yyval.ttype = make_pointer_declarator (yyvsp[0].ttype, NULL_TREE); ;
+ break;}
+case 544:
+#line 2923 "parse.y"
+{ yyval.ttype = make_pointer_declarator (NULL_TREE, NULL_TREE); ;
+ break;}
+case 545:
+#line 2925 "parse.y"
+{ yyval.ttype = make_reference_declarator (yyvsp[-1].ttype, yyvsp[0].ttype); ;
+ break;}
+case 546:
+#line 2927 "parse.y"
+{ yyval.ttype = make_reference_declarator (NULL_TREE, yyvsp[0].ttype); ;
+ break;}
+case 547:
+#line 2929 "parse.y"
+{ yyval.ttype = make_reference_declarator (yyvsp[0].ttype, NULL_TREE); ;
+ break;}
+case 548:
+#line 2931 "parse.y"
+{ yyval.ttype = make_reference_declarator (NULL_TREE, NULL_TREE); ;
+ break;}
+case 549:
+#line 2933 "parse.y"
+{ tree arg = make_pointer_declarator (yyvsp[0].ttype, NULL_TREE);
+ yyval.ttype = build_parse_node (SCOPE_REF, yyvsp[-1].ttype, arg);
+ ;
+ break;}
+case 550:
+#line 2937 "parse.y"
+{ tree arg = make_pointer_declarator (yyvsp[-1].ttype, yyvsp[0].ttype);
+ yyval.ttype = build_parse_node (SCOPE_REF, yyvsp[-2].ttype, arg);
+ ;
+ break;}
+case 552:
+#line 2946 "parse.y"
+{ yyval.ttype = yyvsp[-1].ttype; ;
+ break;}
+case 554:
+#line 2950 "parse.y"
+{ yyval.ttype = build_parse_node (CALL_EXPR, yyval.ttype, yyvsp[-2].ttype, yyvsp[0].ttype); ;
+ break;}
+case 555:
+#line 2952 "parse.y"
+{ yyval.ttype = build_parse_node (CALL_EXPR, yyval.ttype, empty_parms (), yyvsp[0].ttype); ;
+ break;}
+case 556:
+#line 2954 "parse.y"
+{ yyval.ttype = build_parse_node (ARRAY_REF, yyval.ttype, yyvsp[-1].ttype); ;
+ break;}
+case 557:
+#line 2956 "parse.y"
+{ yyval.ttype = build_parse_node (ARRAY_REF, yyval.ttype, NULL_TREE); ;
+ break;}
+case 558:
+#line 2958 "parse.y"
+{ yyval.ttype = build_parse_node (CALL_EXPR, NULL_TREE, yyvsp[-2].ttype, yyvsp[0].ttype); ;
+ break;}
+case 559:
+#line 2960 "parse.y"
+{ TREE_OPERAND (yyval.ttype, 2) = yyvsp[0].ttype; ;
+ break;}
+case 560:
+#line 2962 "parse.y"
+{ TREE_OPERAND (yyval.ttype, 2) = yyvsp[0].ttype; ;
+ break;}
+case 561:
+#line 2964 "parse.y"
+{ yyval.ttype = build_parse_node (ARRAY_REF, NULL_TREE, yyvsp[-1].ttype); ;
+ break;}
+case 562:
+#line 2966 "parse.y"
+{ yyval.ttype = build_parse_node (ARRAY_REF, NULL_TREE, NULL_TREE); ;
+ break;}
+case 568:
+#line 2988 "parse.y"
+{ emit_line_note (input_filename, lineno);
+ pushlevel (0);
+ clear_last_expr ();
+ push_momentary ();
+ expand_start_bindings (0); ;
+ break;}
+case 570:
+#line 3000 "parse.y"
+{ if (flag_ansi)
+ pedwarn ("ANSI C++ forbids label declarations"); ;
+ break;}
+case 573:
+#line 3011 "parse.y"
+{ tree link;
+ for (link = yyvsp[-1].ttype; link; link = TREE_CHAIN (link))
+ {
+ tree label = shadow_label (TREE_VALUE (link));
+ C_DECLARED_LABEL_FLAG (label) = 1;
+ declare_nonlocal_label (label);
+ }
+ ;
+ break;}
+case 574:
+#line 3025 "parse.y"
+{;
+ break;}
+case 576:
+#line 3030 "parse.y"
+{ expand_end_bindings (getdecls (), kept_level_p(), 1);
+ yyval.ttype = poplevel (kept_level_p (), 1, 0);
+ pop_momentary (); ;
+ break;}
+case 577:
+#line 3034 "parse.y"
+{ expand_end_bindings (getdecls (), kept_level_p(), 1);
+ yyval.ttype = poplevel (kept_level_p (), 1, 0);
+ pop_momentary (); ;
+ break;}
+case 578:
+#line 3038 "parse.y"
+{ expand_end_bindings (getdecls (), kept_level_p(), 1);
+ yyval.ttype = poplevel (kept_level_p (), 0, 0);
+ pop_momentary (); ;
+ break;}
+case 579:
+#line 3042 "parse.y"
+{ expand_end_bindings (getdecls (), kept_level_p(), 1);
+ yyval.ttype = poplevel (kept_level_p (), 0, 0);
+ pop_momentary (); ;
+ break;}
+case 580:
+#line 3049 "parse.y"
+{ cond_stmt_keyword = "if"; ;
+ break;}
+case 581:
+#line 3051 "parse.y"
+{ emit_line_note (input_filename, lineno);
+ expand_start_cond (bool_truthvalue_conversion (yyvsp[0].ttype), 0); ;
+ break;}
+case 583:
+#line 3058 "parse.y"
+{ finish_stmt (); ;
+ break;}
+case 584:
+#line 3060 "parse.y"
+{ expand_end_bindings (getdecls (), kept_level_p (), 1);
+ yyval.ttype = poplevel (kept_level_p (), 1, 0);
+ pop_momentary (); ;
+ break;}
+case 585:
+#line 3067 "parse.y"
+{ finish_stmt (); ;
+ break;}
+case 587:
+#line 3073 "parse.y"
+{ finish_stmt (); ;
+ break;}
+case 588:
+#line 3075 "parse.y"
+{
+ tree expr = yyvsp[-1].ttype;
+ emit_line_note (input_filename, lineno);
+ /* Do default conversion if safe and possibly important,
+ in case within ({...}). */
+ if ((TREE_CODE (TREE_TYPE (expr)) == ARRAY_TYPE
+ && lvalue_p (expr))
+ || TREE_CODE (TREE_TYPE (expr)) == FUNCTION_TYPE)
+ expr = default_conversion (expr);
+ cplus_expand_expr_stmt (expr);
+ clear_momentary ();
+ finish_stmt (); ;
+ break;}
+case 589:
+#line 3088 "parse.y"
+{ expand_start_else (); ;
+ break;}
+case 590:
+#line 3090 "parse.y"
+{ expand_end_cond ();
+ expand_end_bindings (getdecls (), kept_level_p (), 1);
+ poplevel (kept_level_p (), 1, 0);
+ pop_momentary ();
+ finish_stmt (); ;
+ break;}
+case 591:
+#line 3096 "parse.y"
+{ expand_end_cond ();
+ expand_end_bindings (getdecls (), kept_level_p (), 1);
+ poplevel (kept_level_p (), 1, 0);
+ pop_momentary ();
+ finish_stmt (); ;
+ break;}
+case 592:
+#line 3102 "parse.y"
+{ emit_nop ();
+ emit_line_note (input_filename, lineno);
+ expand_start_loop (1);
+ cond_stmt_keyword = "while"; ;
+ break;}
+case 593:
+#line 3107 "parse.y"
+{ expand_exit_loop_if_false (0, bool_truthvalue_conversion (yyvsp[0].ttype)); ;
+ break;}
+case 594:
+#line 3109 "parse.y"
+{ expand_end_bindings (getdecls (), kept_level_p (), 1);
+ poplevel (kept_level_p (), 1, 0);
+ pop_momentary ();
+ expand_end_loop ();
+ finish_stmt (); ;
+ break;}
+case 595:
+#line 3115 "parse.y"
+{ emit_nop ();
+ emit_line_note (input_filename, lineno);
+ expand_start_loop_continue_elsewhere (1); ;
+ break;}
+case 596:
+#line 3119 "parse.y"
+{ expand_loop_continue_here ();
+ cond_stmt_keyword = "do"; ;
+ break;}
+case 597:
+#line 3122 "parse.y"
+{ emit_line_note (input_filename, lineno);
+ expand_exit_loop_if_false (0, bool_truthvalue_conversion (yyvsp[-1].ttype));
+ expand_end_loop ();
+ clear_momentary ();
+ finish_stmt (); ;
+ break;}
+case 598:
+#line 3128 "parse.y"
+{ emit_nop ();
+ emit_line_note (input_filename, lineno);
+ if (yyvsp[0].ttype) cplus_expand_expr_stmt (yyvsp[0].ttype);
+ expand_start_loop_continue_elsewhere (1); ;
+ break;}
+case 599:
+#line 3133 "parse.y"
+{ emit_line_note (input_filename, lineno);
+ if (yyvsp[-1].ttype) expand_exit_loop_if_false (0, bool_truthvalue_conversion (yyvsp[-1].ttype)); ;
+ break;}
+case 600:
+#line 3138 "parse.y"
+{ push_momentary (); ;
+ break;}
+case 601:
+#line 3140 "parse.y"
+{ emit_line_note (input_filename, lineno);
+ expand_end_bindings (getdecls (), kept_level_p (), 1);
+ poplevel (kept_level_p (), 1, 0);
+ pop_momentary ();
+ expand_loop_continue_here ();
+ if (yyvsp[-3].ttype) cplus_expand_expr_stmt (yyvsp[-3].ttype);
+ pop_momentary ();
+ expand_end_loop ();
+ finish_stmt (); ;
+ break;}
+case 602:
+#line 3150 "parse.y"
+{ emit_nop ();
+ emit_line_note (input_filename, lineno);
+ expand_start_loop_continue_elsewhere (1); ;
+ break;}
+case 603:
+#line 3154 "parse.y"
+{ emit_line_note (input_filename, lineno);
+ if (yyvsp[-1].ttype) expand_exit_loop_if_false (0, bool_truthvalue_conversion (yyvsp[-1].ttype)); ;
+ break;}
+case 604:
+#line 3159 "parse.y"
+{ push_momentary ();
+ yyvsp[0].itype = lineno; ;
+ break;}
+case 605:
+#line 3162 "parse.y"
+{ emit_line_note (input_filename, (int) yyvsp[-2].itype);
+ expand_end_bindings (getdecls (), kept_level_p (), 1);
+ poplevel (kept_level_p (), 1, 0);
+ pop_momentary ();
+ expand_loop_continue_here ();
+ if (yyvsp[-3].ttype) cplus_expand_expr_stmt (yyvsp[-3].ttype);
+ pop_momentary ();
+ expand_end_loop ();
+ finish_stmt ();
+ ;
+ break;}
+case 606:
+#line 3173 "parse.y"
+{ emit_line_note (input_filename, lineno);
+ c_expand_start_case (yyvsp[-1].ttype);
+ /* Don't let the tree nodes for $4 be discarded by
+ clear_momentary during the parsing of the next stmt. */
+ push_momentary (); ;
+ break;}
+case 607:
+#line 3179 "parse.y"
+{ expand_end_case (yyvsp[-3].ttype);
+ pop_momentary ();
+ expand_end_bindings (getdecls (), kept_level_p (), 1);
+ poplevel (kept_level_p (), 1, 0);
+ pop_momentary ();
+ finish_stmt (); ;
+ break;}
+case 608:
+#line 3186 "parse.y"
+{ register tree value = check_cp_case_value (yyvsp[-1].ttype);
+ register tree label
+ = build_decl (LABEL_DECL, NULL_TREE, NULL_TREE);
+
+ if (value != error_mark_node)
+ {
+ tree duplicate;
+ int success = pushcase (value, convert_and_check,
+ label, &duplicate);
+ if (success == 1)
+ cp_error ("case label `%E' not within a switch statement", yyvsp[-1].ttype);
+ else if (success == 2)
+ {
+ cp_error ("duplicate case value `%E'", yyvsp[-1].ttype);
+ cp_error_at ("`%E' previously used here", duplicate);
+ }
+ else if (success == 3)
+ warning ("case value out of range");
+ else if (success == 5)
+ cp_error ("case label `%E' within scope of cleanup or variable array", yyvsp[-1].ttype);
+ }
+ define_case_label (label);
+ ;
+ break;}
+case 610:
+#line 3211 "parse.y"
+{ register tree value1 = check_cp_case_value (yyvsp[-3].ttype);
+ register tree value2 = check_cp_case_value (yyvsp[-1].ttype);
+ register tree label
+ = build_decl (LABEL_DECL, NULL_TREE, NULL_TREE);
+
+ if (flag_ansi)
+ pedwarn ("ANSI C++ forbids range expressions in switch statement");
+ if (value1 != error_mark_node
+ && value2 != error_mark_node)
+ {
+ tree duplicate;
+ int success = pushcase_range (value1, value2,
+ convert_and_check, label,
+ &duplicate);
+ if (success == 1)
+ error ("case label not within a switch statement");
+ else if (success == 2)
+ {
+ error ("duplicate (or overlapping) case value");
+ error_with_decl (duplicate, "this is the first entry overlapping that value");
+ }
+ else if (success == 3)
+ warning ("case value out of range");
+ else if (success == 4)
+ warning ("empty range specified");
+ else if (success == 5)
+ error ("case label within scope of cleanup or variable array");
+ }
+ define_case_label (label);
+ ;
+ break;}
+case 612:
+#line 3243 "parse.y"
+{
+ tree duplicate;
+ register tree label
+ = build_decl (LABEL_DECL, NULL_TREE, NULL_TREE);
+ int success = pushcase (NULL_TREE, 0, label, &duplicate);
+ if (success == 1)
+ error ("default label not within a switch statement");
+ else if (success == 2)
+ {
+ error ("multiple default labels in one switch");
+ error_with_decl (duplicate, "this is the first default label");
+ }
+ define_case_label (NULL_TREE);
+ ;
+ break;}
+case 614:
+#line 3259 "parse.y"
+{ emit_line_note (input_filename, lineno);
+ if ( ! expand_exit_something ())
+ error ("break statement not within loop or switch"); ;
+ break;}
+case 615:
+#line 3263 "parse.y"
+{ emit_line_note (input_filename, lineno);
+ if (! expand_continue_loop (0))
+ error ("continue statement not within a loop"); ;
+ break;}
+case 616:
+#line 3267 "parse.y"
+{ emit_line_note (input_filename, lineno);
+ c_expand_return (NULL_TREE); ;
+ break;}
+case 617:
+#line 3270 "parse.y"
+{ emit_line_note (input_filename, lineno);
+ c_expand_return (yyvsp[-1].ttype);
+ finish_stmt ();
+ ;
+ break;}
+case 618:
+#line 3275 "parse.y"
+{ if (TREE_CHAIN (yyvsp[-2].ttype)) yyvsp[-2].ttype = combine_strings (yyvsp[-2].ttype);
+ emit_line_note (input_filename, lineno);
+ expand_asm (yyvsp[-2].ttype);
+ finish_stmt ();
+ ;
+ break;}
+case 619:
+#line 3282 "parse.y"
+{ if (TREE_CHAIN (yyvsp[-4].ttype)) yyvsp[-4].ttype = combine_strings (yyvsp[-4].ttype);
+ emit_line_note (input_filename, lineno);
+ c_expand_asm_operands (yyvsp[-4].ttype, yyvsp[-2].ttype, NULL_TREE, NULL_TREE,
+ yyvsp[-6].ttype == ridpointers[(int)RID_VOLATILE],
+ input_filename, lineno);
+ finish_stmt ();
+ ;
+ break;}
+case 620:
+#line 3291 "parse.y"
+{ if (TREE_CHAIN (yyvsp[-6].ttype)) yyvsp[-6].ttype = combine_strings (yyvsp[-6].ttype);
+ emit_line_note (input_filename, lineno);
+ c_expand_asm_operands (yyvsp[-6].ttype, yyvsp[-4].ttype, yyvsp[-2].ttype, NULL_TREE,
+ yyvsp[-8].ttype == ridpointers[(int)RID_VOLATILE],
+ input_filename, lineno);
+ finish_stmt ();
+ ;
+ break;}
+case 621:
+#line 3301 "parse.y"
+{ if (TREE_CHAIN (yyvsp[-8].ttype)) yyvsp[-8].ttype = combine_strings (yyvsp[-8].ttype);
+ emit_line_note (input_filename, lineno);
+ c_expand_asm_operands (yyvsp[-8].ttype, yyvsp[-6].ttype, yyvsp[-4].ttype, yyvsp[-2].ttype,
+ yyvsp[-10].ttype == ridpointers[(int)RID_VOLATILE],
+ input_filename, lineno);
+ finish_stmt ();
+ ;
+ break;}
+case 622:
+#line 3309 "parse.y"
+{ emit_line_note (input_filename, lineno);
+ expand_computed_goto (yyvsp[-1].ttype); ;
+ break;}
+case 623:
+#line 3312 "parse.y"
+{ tree decl;
+ emit_line_note (input_filename, lineno);
+ decl = lookup_label (yyvsp[-1].ttype);
+ TREE_USED (decl) = 1;
+ expand_goto (decl); ;
+ break;}
+case 624:
+#line 3318 "parse.y"
+{ finish_stmt (); ;
+ break;}
+case 625:
+#line 3320 "parse.y"
+{ error ("label must be followed by statement");
+ yyungetc ('}', 0);
+ finish_stmt (); ;
+ break;}
+case 626:
+#line 3324 "parse.y"
+{ finish_stmt (); ;
+ break;}
+case 628:
+#line 3330 "parse.y"
+{ expand_start_try_stmts (); ;
+ break;}
+case 629:
+#line 3332 "parse.y"
+{ expand_end_try_stmts ();
+ expand_start_all_catch (); ;
+ break;}
+case 630:
+#line 3335 "parse.y"
+{ expand_end_all_catch (); ;
+ break;}
+case 631:
+#line 3343 "parse.y"
+{ expand_end_bindings (0,1,1);
+ poplevel (2,0,0);
+ ;
+ break;}
+case 632:
+#line 3347 "parse.y"
+{ expand_end_bindings (0,1,1);
+ poplevel (2,0,0);
+ ;
+ break;}
+case 633:
+#line 3351 "parse.y"
+{ expand_end_bindings (0,1,1);
+ poplevel (2,0,0);
+ ;
+ break;}
+case 635:
+#line 3359 "parse.y"
+{ emit_line_note (input_filename, lineno); ;
+ break;}
+case 636:
+#line 3361 "parse.y"
+{ expand_end_catch_block (); ;
+ break;}
+case 639:
+#line 3371 "parse.y"
+{ expand_start_catch_block (NULL_TREE, NULL_TREE); ;
+ break;}
+case 640:
+#line 3383 "parse.y"
+{ expand_start_catch_block (TREE_PURPOSE (yyvsp[-1].ttype),
+ TREE_VALUE (yyvsp[-1].ttype)); ;
+ break;}
+case 641:
+#line 3389 "parse.y"
+{ tree label;
+ do_label:
+ label = define_label (input_filename, lineno, yyvsp[-1].ttype);
+ if (label)
+ expand_label (label);
+ ;
+ break;}
+case 642:
+#line 3396 "parse.y"
+{ goto do_label; ;
+ break;}
+case 643:
+#line 3401 "parse.y"
+{ yyval.ttype = NULL_TREE; ;
+ break;}
+case 644:
+#line 3403 "parse.y"
+{ yyval.ttype = yyvsp[-1].ttype; ;
+ break;}
+case 645:
+#line 3405 "parse.y"
+{ yyval.ttype = NULL_TREE; ;
+ break;}
+case 646:
+#line 3410 "parse.y"
+{ yyval.itype = 0; ;
+ break;}
+case 647:
+#line 3412 "parse.y"
+{ yyval.itype = 0; ;
+ break;}
+case 648:
+#line 3414 "parse.y"
+{ yyval.itype = 1; ;
+ break;}
+case 649:
+#line 3416 "parse.y"
+{ yyval.itype = -1; ;
+ break;}
+case 650:
+#line 3423 "parse.y"
+{ emit_line_note (input_filename, lineno);
+ yyval.ttype = NULL_TREE; ;
+ break;}
+case 651:
+#line 3426 "parse.y"
+{ emit_line_note (input_filename, lineno); ;
+ break;}
+case 652:
+#line 3431 "parse.y"
+{ yyval.ttype = NULL_TREE; ;
+ break;}
+case 654:
+#line 3434 "parse.y"
+{ yyval.ttype = NULL_TREE; ;
+ break;}
+case 655:
+#line 3440 "parse.y"
+{ yyval.ttype = NULL_TREE; ;
+ break;}
+case 658:
+#line 3447 "parse.y"
+{ yyval.ttype = chainon (yyval.ttype, yyvsp[0].ttype); ;
+ break;}
+case 659:
+#line 3452 "parse.y"
+{ yyval.ttype = build_tree_list (yyval.ttype, yyvsp[-1].ttype); ;
+ break;}
+case 660:
+#line 3457 "parse.y"
+{ yyval.ttype = tree_cons (NULL_TREE, yyval.ttype, NULL_TREE); ;
+ break;}
+case 661:
+#line 3459 "parse.y"
+{ yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyval.ttype); ;
+ break;}
+case 662:
+#line 3469 "parse.y"
+{
+ if (strict_prototype)
+ yyval.ttype = void_list_node;
+ else
+ yyval.ttype = NULL_TREE;
+ ;
+ break;}
+case 664:
+#line 3477 "parse.y"
+{ yyval.ttype = tree_cons (NULL_TREE, yyval.ttype, void_list_node);
+ TREE_PARMLIST (yyval.ttype) = 1; ;
+ break;}
+case 665:
+#line 3485 "parse.y"
+{
+ yyval.ttype = chainon (yyval.ttype, void_list_node);
+ TREE_PARMLIST (yyval.ttype) = 1;
+ ;
+ break;}
+case 666:
+#line 3490 "parse.y"
+{
+ TREE_PARMLIST (yyval.ttype) = 1;
+ ;
+ break;}
+case 667:
+#line 3495 "parse.y"
+{
+ TREE_PARMLIST (yyval.ttype) = 1;
+ ;
+ break;}
+case 668:
+#line 3499 "parse.y"
+{
+ yyval.ttype = build_tree_list (NULL_TREE, yyval.ttype);
+ TREE_PARMLIST (yyval.ttype) = 1;
+ ;
+ break;}
+case 669:
+#line 3504 "parse.y"
+{
+ /* ARM $8.2.5 has this as a boxed-off comment. */
+ if (pedantic)
+ warning ("use of `...' without a first argument is non-portable");
+ yyval.ttype = NULL_TREE;
+ ;
+ break;}
+case 670:
+#line 3511 "parse.y"
+{
+ TREE_PARMLIST (yyval.ttype) = 1;
+ ;
+ break;}
+case 671:
+#line 3515 "parse.y"
+{
+ TREE_PARMLIST (yyval.ttype) = 1;
+ ;
+ break;}
+case 672:
+#line 3519 "parse.y"
+{
+ yyval.ttype = build_tree_list (NULL_TREE, yyval.ttype);
+ TREE_PARMLIST (yyval.ttype) = 1;
+ ;
+ break;}
+case 673:
+#line 3524 "parse.y"
+{
+ /* This helps us recover from really nasty
+ parse errors, for example, a missing right
+ parenthesis. */
+ yyerror ("possibly missing ')'");
+ yyval.ttype = chainon (yyval.ttype, void_list_node);
+ TREE_PARMLIST (yyval.ttype) = 1;
+ yyungetc (':', 0);
+ yychar = ')';
+ ;
+ break;}
+case 674:
+#line 3535 "parse.y"
+{
+ /* This helps us recover from really nasty
+ parse errors, for example, a missing right
+ parenthesis. */
+ yyerror ("possibly missing ')'");
+ yyval.ttype = tree_cons (NULL_TREE, yyval.ttype, void_list_node);
+ TREE_PARMLIST (yyval.ttype) = 1;
+ yyungetc (':', 0);
+ yychar = ')';
+ ;
+ break;}
+case 675:
+#line 3550 "parse.y"
+{ yyval.ttype = build_tree_list (NULL_TREE, yyval.ttype); ;
+ break;}
+case 676:
+#line 3552 "parse.y"
+{ yyval.ttype = build_tree_list (yyvsp[0].ttype, yyval.ttype); ;
+ break;}
+case 677:
+#line 3554 "parse.y"
+{ yyval.ttype = chainon (yyval.ttype, build_tree_list (NULL_TREE, yyvsp[0].ttype)); ;
+ break;}
+case 678:
+#line 3556 "parse.y"
+{ yyval.ttype = chainon (yyval.ttype, build_tree_list (yyvsp[0].ttype, yyvsp[-2].ttype)); ;
+ break;}
+case 679:
+#line 3558 "parse.y"
+{ yyval.ttype = chainon (yyval.ttype, build_tree_list (NULL_TREE, yyvsp[0].ttype)); ;
+ break;}
+case 680:
+#line 3560 "parse.y"
+{ yyval.ttype = chainon (yyval.ttype, build_tree_list (yyvsp[0].ttype, yyvsp[-2].ttype)); ;
+ break;}
+case 682:
+#line 3566 "parse.y"
+{ yyval.ttype = build_tree_list (NULL_TREE, yyval.ttype); ;
+ break;}
+case 683:
+#line 3589 "parse.y"
+{ yyval.ttype = build_tree_list (yyval.ttype, yyvsp[0].ttype); ;
+ break;}
+case 684:
+#line 3591 "parse.y"
+{ yyval.ttype = build_tree_list (yyval.ttype, yyvsp[0].ttype); ;
+ break;}
+case 685:
+#line 3593 "parse.y"
+{ yyval.ttype = build_tree_list (get_decl_list (yyval.ttype), yyvsp[0].ttype); ;
+ break;}
+case 686:
+#line 3595 "parse.y"
+{ yyval.ttype = build_tree_list (yyval.ttype, yyvsp[0].ttype); ;
+ break;}
+case 687:
+#line 3597 "parse.y"
+{ yyval.ttype = build_tree_list (yyval.ttype, NULL_TREE); ;
+ break;}
+case 688:
+#line 3599 "parse.y"
+{ yyval.ttype = build_tree_list (yyval.ttype, yyvsp[0].ttype); ;
+ break;}
+case 691:
+#line 3608 "parse.y"
+{ see_typename (); ;
+ break;}
+case 692:
+#line 3631 "parse.y"
+{
+ warning ("type specifier omitted for parameter");
+ yyval.ttype = build_tree_list (TREE_PURPOSE (TREE_VALUE (yyvsp[-1].ttype)), NULL_TREE);
+ ;
+ break;}
+case 693:
+#line 3636 "parse.y"
+{
+ warning ("type specifier omitted for parameter");
+ yyval.ttype = build_tree_list (TREE_PURPOSE (TREE_VALUE (yyvsp[-2].ttype)), yyval.ttype);
+ ;
+ break;}
+case 694:
+#line 3644 "parse.y"
+{ yyval.ttype = NULL_TREE; ;
+ break;}
+case 695:
+#line 3646 "parse.y"
+{ yyval.ttype = yyvsp[-1].ttype; ;
+ break;}
+case 696:
+#line 3651 "parse.y"
+{ yyval.ttype = build_decl_list (NULL_TREE, yyval.ttype); ;
+ break;}
+case 698:
+#line 3657 "parse.y"
+{
+ TREE_CHAIN (yyvsp[0].ttype) = yyval.ttype;
+ yyval.ttype = yyvsp[0].ttype;
+ ;
+ break;}
+case 699:
+#line 3665 "parse.y"
+{ yyval.ttype = NULL_TREE; ;
+ break;}
+case 700:
+#line 3667 "parse.y"
+{ yyval.ttype = make_pointer_declarator (yyvsp[-1].ttype, yyvsp[0].ttype); ;
+ break;}
+case 701:
+#line 3669 "parse.y"
+{ yyval.ttype = make_reference_declarator (yyvsp[-1].ttype, yyvsp[0].ttype); ;
+ break;}
+case 702:
+#line 3671 "parse.y"
+{ tree arg = make_pointer_declarator (yyvsp[-1].ttype, yyvsp[0].ttype);
+ yyval.ttype = build_parse_node (SCOPE_REF, yyvsp[-2].ttype, arg);
+ ;
+ break;}
+case 703:
+#line 3677 "parse.y"
+{ got_scope = NULL_TREE; ;
+ break;}
+case 704:
+#line 3682 "parse.y"
+{ yyval.ttype = ansi_opname[MULT_EXPR]; ;
+ break;}
+case 705:
+#line 3684 "parse.y"
+{ yyval.ttype = ansi_opname[TRUNC_DIV_EXPR]; ;
+ break;}
+case 706:
+#line 3686 "parse.y"
+{ yyval.ttype = ansi_opname[TRUNC_MOD_EXPR]; ;
+ break;}
+case 707:
+#line 3688 "parse.y"
+{ yyval.ttype = ansi_opname[PLUS_EXPR]; ;
+ break;}
+case 708:
+#line 3690 "parse.y"
+{ yyval.ttype = ansi_opname[MINUS_EXPR]; ;
+ break;}
+case 709:
+#line 3692 "parse.y"
+{ yyval.ttype = ansi_opname[BIT_AND_EXPR]; ;
+ break;}
+case 710:
+#line 3694 "parse.y"
+{ yyval.ttype = ansi_opname[BIT_IOR_EXPR]; ;
+ break;}
+case 711:
+#line 3696 "parse.y"
+{ yyval.ttype = ansi_opname[BIT_XOR_EXPR]; ;
+ break;}
+case 712:
+#line 3698 "parse.y"
+{ yyval.ttype = ansi_opname[BIT_NOT_EXPR]; ;
+ break;}
+case 713:
+#line 3700 "parse.y"
+{ yyval.ttype = ansi_opname[COMPOUND_EXPR]; ;
+ break;}
+case 714:
+#line 3702 "parse.y"
+{ yyval.ttype = ansi_opname[yyvsp[0].code]; ;
+ break;}
+case 715:
+#line 3704 "parse.y"
+{ yyval.ttype = ansi_opname[LT_EXPR]; ;
+ break;}
+case 716:
+#line 3706 "parse.y"
+{ yyval.ttype = ansi_opname[GT_EXPR]; ;
+ break;}
+case 717:
+#line 3708 "parse.y"
+{ yyval.ttype = ansi_opname[yyvsp[0].code]; ;
+ break;}
+case 718:
+#line 3710 "parse.y"
+{ yyval.ttype = ansi_assopname[yyvsp[0].code]; ;
+ break;}
+case 719:
+#line 3712 "parse.y"
+{ yyval.ttype = ansi_opname [MODIFY_EXPR]; ;
+ break;}
+case 720:
+#line 3714 "parse.y"
+{ yyval.ttype = ansi_opname[yyvsp[0].code]; ;
+ break;}
+case 721:
+#line 3716 "parse.y"
+{ yyval.ttype = ansi_opname[yyvsp[0].code]; ;
+ break;}
+case 722:
+#line 3718 "parse.y"
+{ yyval.ttype = ansi_opname[POSTINCREMENT_EXPR]; ;
+ break;}
+case 723:
+#line 3720 "parse.y"
+{ yyval.ttype = ansi_opname[PREDECREMENT_EXPR]; ;
+ break;}
+case 724:
+#line 3722 "parse.y"
+{ yyval.ttype = ansi_opname[TRUTH_ANDIF_EXPR]; ;
+ break;}
+case 725:
+#line 3724 "parse.y"
+{ yyval.ttype = ansi_opname[TRUTH_ORIF_EXPR]; ;
+ break;}
+case 726:
+#line 3726 "parse.y"
+{ yyval.ttype = ansi_opname[TRUTH_NOT_EXPR]; ;
+ break;}
+case 727:
+#line 3728 "parse.y"
+{ yyval.ttype = ansi_opname[COND_EXPR]; ;
+ break;}
+case 728:
+#line 3730 "parse.y"
+{ yyval.ttype = ansi_opname[yyvsp[0].code]; ;
+ break;}
+case 729:
+#line 3732 "parse.y"
+{ yyval.ttype = ansi_opname[COMPONENT_REF]; ;
+ break;}
+case 730:
+#line 3734 "parse.y"
+{ yyval.ttype = ansi_opname[MEMBER_REF]; ;
+ break;}
+case 731:
+#line 3736 "parse.y"
+{ yyval.ttype = ansi_opname[CALL_EXPR]; ;
+ break;}
+case 732:
+#line 3738 "parse.y"
+{ yyval.ttype = ansi_opname[ARRAY_REF]; ;
+ break;}
+case 733:
+#line 3740 "parse.y"
+{ yyval.ttype = ansi_opname[NEW_EXPR]; ;
+ break;}
+case 734:
+#line 3742 "parse.y"
+{ yyval.ttype = ansi_opname[DELETE_EXPR]; ;
+ break;}
+case 735:
+#line 3744 "parse.y"
+{ yyval.ttype = ansi_opname[VEC_NEW_EXPR]; ;
+ break;}
+case 736:
+#line 3746 "parse.y"
+{ yyval.ttype = ansi_opname[VEC_DELETE_EXPR]; ;
+ break;}
+case 737:
+#line 3749 "parse.y"
+{ yyval.ttype = grokoptypename (yyvsp[-1].ttype, yyvsp[0].ttype); ;
+ break;}
+case 738:
+#line 3751 "parse.y"
+{ yyval.ttype = ansi_opname[ERROR_MARK]; ;
+ break;}
+}
+ /* the action file gets copied in in place of this dollarsign */
+#line 480 "/usr/local/lib/bison.simple"
+
+ yyvsp -= yylen;
+ yyssp -= yylen;
+#ifdef YYLSP_NEEDED
+ yylsp -= yylen;
+#endif
+
+#if YYDEBUG != 0
+ if (yydebug)
+ {
+ short *ssp1 = yyss - 1;
+ fprintf (stderr, "state stack now");
+ while (ssp1 != yyssp)
+ fprintf (stderr, " %d", *++ssp1);
+ fprintf (stderr, "\n");
+ }
+#endif
+
+ *++yyvsp = yyval;
+
+#ifdef YYLSP_NEEDED
+ yylsp++;
+ if (yylen == 0)
+ {
+ yylsp->first_line = yylloc.first_line;
+ yylsp->first_column = yylloc.first_column;
+ yylsp->last_line = (yylsp-1)->last_line;
+ yylsp->last_column = (yylsp-1)->last_column;
+ yylsp->text = 0;
+ }
+ else
+ {
+ yylsp->last_line = (yylsp+yylen-1)->last_line;
+ yylsp->last_column = (yylsp+yylen-1)->last_column;
+ }
+#endif
+
+ /* Now "shift" the result of the reduction.
+ Determine what state that goes to,
+ based on the state we popped back to
+ and the rule number reduced by. */
+
+ yyn = yyr1[yyn];
+
+ yystate = yypgoto[yyn - YYNTBASE] + *yyssp;
+ if (yystate >= 0 && yystate <= YYLAST && yycheck[yystate] == *yyssp)
+ yystate = yytable[yystate];
+ else
+ yystate = yydefgoto[yyn - YYNTBASE];
+
+ goto yynewstate;
+
+yyerrlab: /* here on detecting error */
+
+ if (! yyerrstatus)
+ /* If not already recovering from an error, report this error. */
+ {
+ ++yynerrs;
+
+#ifdef YYERROR_VERBOSE
+ yyn = yypact[yystate];
+
+ if (yyn > YYFLAG && yyn < YYLAST)
+ {
+ int size = 0;
+ char *msg;
+ int x, count;
+
+ count = 0;
+ /* Start X at -yyn if nec to avoid negative indexes in yycheck. */
+ for (x = (yyn < 0 ? -yyn : 0);
+ x < (sizeof(yytname) / sizeof(char *)); x++)
+ if (yycheck[x + yyn] == x)
+ size += strlen(yytname[x]) + 15, count++;
+ msg = (char *) malloc(size + 15);
+ if (msg != 0)
+ {
+ strcpy(msg, "parse error");
+
+ if (count < 5)
+ {
+ count = 0;
+ for (x = (yyn < 0 ? -yyn : 0);
+ x < (sizeof(yytname) / sizeof(char *)); x++)
+ if (yycheck[x + yyn] == x)
+ {
+ strcat(msg, count == 0 ? ", expecting `" : " or `");
+ strcat(msg, yytname[x]);
+ strcat(msg, "'");
+ count++;
+ }
+ }
+ yyerror(msg);
+ free(msg);
+ }
+ else
+ yyerror ("parse error; also virtual memory exceeded");
+ }
+ else
+#endif /* YYERROR_VERBOSE */
+ yyerror("parse error");
+ }
+
+ goto yyerrlab1;
+yyerrlab1: /* here on error raised explicitly by an action */
+
+ if (yyerrstatus == 3)
+ {
+ /* if just tried and failed to reuse lookahead token after an error, discard it. */
+
+ /* return failure if at end of input */
+ if (yychar == YYEOF)
+ YYABORT;
+
+#if YYDEBUG != 0
+ if (yydebug)
+ fprintf(stderr, "Discarding token %d (%s).\n", yychar, yytname[yychar1]);
+#endif
+
+ yychar = YYEMPTY;
+ }
+
+ /* Else will try to reuse lookahead token
+ after shifting the error token. */
+
+ yyerrstatus = 3; /* Each real token shifted decrements this */
+
+ goto yyerrhandle;
+
+yyerrdefault: /* current state does not do anything special for the error token. */
+
+#if 0
+ /* This is wrong; only states that explicitly want error tokens
+ should shift them. */
+ yyn = yydefact[yystate]; /* If its default is to accept any token, ok. Otherwise pop it.*/
+ if (yyn) goto yydefault;
+#endif
+
+yyerrpop: /* pop the current state because it cannot handle the error token */
+
+ if (yyssp == yyss) YYABORT;
+ yyvsp--;
+ yystate = *--yyssp;
+#ifdef YYLSP_NEEDED
+ yylsp--;
+#endif
+
+#if YYDEBUG != 0
+ if (yydebug)
+ {
+ short *ssp1 = yyss - 1;
+ fprintf (stderr, "Error: state stack now");
+ while (ssp1 != yyssp)
+ fprintf (stderr, " %d", *++ssp1);
+ fprintf (stderr, "\n");
+ }
+#endif
+
+yyerrhandle:
+
+ yyn = yypact[yystate];
+ if (yyn == YYFLAG)
+ goto yyerrdefault;
+
+ yyn += YYTERROR;
+ if (yyn < 0 || yyn > YYLAST || yycheck[yyn] != YYTERROR)
+ goto yyerrdefault;
+
+ yyn = yytable[yyn];
+ if (yyn < 0)
+ {
+ if (yyn == YYFLAG)
+ goto yyerrpop;
+ yyn = -yyn;
+ goto yyreduce;
+ }
+ else if (yyn == 0)
+ goto yyerrpop;
+
+ if (yyn == YYFINAL)
+ YYACCEPT;
+
+#if YYDEBUG != 0
+ if (yydebug)
+ fprintf(stderr, "Shifting error token, ");
+#endif
+
+ *++yyvsp = yylval;
+#ifdef YYLSP_NEEDED
+ *++yylsp = yylloc;
+#endif
+
+ yystate = yyn;
+ goto yynewstate;
+}
+#line 3754 "parse.y"
+
diff --git a/gnu/usr.bin/cc/cc1plus/parse.h b/gnu/usr.bin/cc/cc1plus/parse.h
new file mode 100644
index 0000000..18ef379
--- /dev/null
+++ b/gnu/usr.bin/cc/cc1plus/parse.h
@@ -0,0 +1,84 @@
+typedef union {long itype; tree ttype; char *strtype; enum tree_code code; } YYSTYPE;
+#define IDENTIFIER 258
+#define TYPENAME 259
+#define SCSPEC 260
+#define TYPESPEC 261
+#define TYPE_QUAL 262
+#define CONSTANT 263
+#define STRING 264
+#define ELLIPSIS 265
+#define SIZEOF 266
+#define ENUM 267
+#define IF 268
+#define ELSE 269
+#define WHILE 270
+#define DO 271
+#define FOR 272
+#define SWITCH 273
+#define CASE 274
+#define DEFAULT 275
+#define BREAK 276
+#define CONTINUE 277
+#define RETURN 278
+#define GOTO 279
+#define ASM_KEYWORD 280
+#define GCC_ASM_KEYWORD 281
+#define TYPEOF 282
+#define ALIGNOF 283
+#define HEADOF 284
+#define CLASSOF 285
+#define SIGOF 286
+#define ATTRIBUTE 287
+#define EXTENSION 288
+#define LABEL 289
+#define AGGR 290
+#define VISSPEC 291
+#define DELETE 292
+#define NEW 293
+#define OVERLOAD 294
+#define THIS 295
+#define OPERATOR 296
+#define CXX_TRUE 297
+#define CXX_FALSE 298
+#define LEFT_RIGHT 299
+#define TEMPLATE 300
+#define TYPEID 301
+#define DYNAMIC_CAST 302
+#define STATIC_CAST 303
+#define REINTERPRET_CAST 304
+#define CONST_CAST 305
+#define SCOPE 306
+#define EMPTY 307
+#define PTYPENAME 308
+#define ASSIGN 309
+#define OROR 310
+#define ANDAND 311
+#define MIN_MAX 312
+#define EQCOMPARE 313
+#define ARITHCOMPARE 314
+#define LSHIFT 315
+#define RSHIFT 316
+#define POINTSAT_STAR 317
+#define DOT_STAR 318
+#define UNARY 319
+#define PLUSPLUS 320
+#define MINUSMINUS 321
+#define HYPERUNARY 322
+#define PAREN_STAR_PAREN 323
+#define POINTSAT 324
+#define TRY 325
+#define CATCH 326
+#define THROW 327
+#define TYPENAME_ELLIPSIS 328
+#define PRE_PARSED_FUNCTION_DECL 329
+#define EXTERN_LANG_STRING 330
+#define ALL 331
+#define PRE_PARSED_CLASS_DECL 332
+#define TYPENAME_DEFN 333
+#define IDENTIFIER_DEFN 334
+#define PTYPENAME_DEFN 335
+#define END_OF_SAVED_INPUT 336
+
+
+extern YYSTYPE yylval;
+#define YYEMPTY -2
diff --git a/gnu/usr.bin/cc/cc1plus/pt.c b/gnu/usr.bin/cc/cc1plus/pt.c
new file mode 100644
index 0000000..c808cf4
--- /dev/null
+++ b/gnu/usr.bin/cc/cc1plus/pt.c
@@ -0,0 +1,2465 @@
+/* Handle parameterized types (templates) for GNU C++.
+ Copyright (C) 1992, 1993 Free Software Foundation, Inc.
+ Written by Ken Raeburn (raeburn@cygnus.com) while at Watchmaker Computing.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+/* Known bugs or deficiencies include:
+ * templates for class static data don't work (methods only)
+ * duplicated method templates can crash the compiler
+ * interface/impl data is taken from file defining the template
+ * all methods must be provided in header files; can't use a source
+ file that contains only the method templates and "just win"
+ * method templates must be seen before the expansion of the
+ class template is done
+ */
+
+#include "config.h"
+#include <stdio.h>
+#include "obstack.h"
+
+#include "tree.h"
+#include "flags.h"
+#include "cp-tree.h"
+#include "decl.h"
+#include "parse.h"
+#include "lex.h"
+
+extern struct obstack permanent_obstack;
+extern tree grokdeclarator ();
+
+extern int lineno;
+extern char *input_filename;
+struct pending_inline *pending_template_expansions;
+
+int processing_template_decl;
+int processing_template_defn;
+
+#define obstack_chunk_alloc xmalloc
+#define obstack_chunk_free free
+
+static int unify ();
+static void add_pending_template ();
+
+void overload_template_name (), pop_template_decls ();
+
+/* We've got a template header coming up; set obstacks up to save the
+ nodes created permanently. (There might be cases with nested templates
+ where we don't have to do this, but they aren't implemented, and it
+ probably wouldn't be worth the effort.) */
+void
+begin_template_parm_list ()
+{
+ pushlevel (0);
+ push_obstacks (&permanent_obstack, &permanent_obstack);
+ pushlevel (0);
+}
+
+/* Process information from new template parameter NEXT and append it to the
+ LIST being built. The rules for use of a template parameter type name
+ by later parameters are not well-defined for us just yet. However, the
+ only way to avoid having to parse expressions of unknown complexity (and
+ with tokens of unknown types) is to disallow it completely. So for now,
+ that is what is assumed. */
+tree
+process_template_parm (list, next)
+ tree list, next;
+{
+ tree parm;
+ tree decl = 0;
+ int is_type;
+ parm = next;
+ my_friendly_assert (TREE_CODE (parm) == TREE_LIST, 259);
+ is_type = TREE_CODE (TREE_PURPOSE (parm)) == IDENTIFIER_NODE;
+ if (!is_type)
+ {
+ tree tinfo = 0;
+ parm = TREE_PURPOSE (parm);
+ my_friendly_assert (TREE_CODE (parm) == TREE_LIST, 260);
+ parm = TREE_VALUE (parm);
+ /* is a const-param */
+ parm = grokdeclarator (TREE_VALUE (next), TREE_PURPOSE (next),
+ PARM, 0, NULL_TREE);
+ /* A template parameter is not modifiable. */
+ TREE_READONLY (parm) = 1;
+ if (TREE_CODE (TREE_TYPE (parm)) == RECORD_TYPE
+ || TREE_CODE (TREE_TYPE (parm)) == UNION_TYPE)
+ {
+ sorry ("aggregate template parameter types");
+ TREE_TYPE (parm) = void_type_node;
+ }
+ tinfo = make_node (TEMPLATE_CONST_PARM);
+ my_friendly_assert (TREE_PERMANENT (tinfo), 260.5);
+ if (TREE_PERMANENT (parm) == 0)
+ {
+ parm = copy_node (parm);
+ TREE_PERMANENT (parm) = 1;
+ }
+ TREE_TYPE (tinfo) = TREE_TYPE (parm);
+ decl = build_decl (CONST_DECL, DECL_NAME (parm), TREE_TYPE (parm));
+ DECL_INITIAL (decl) = tinfo;
+ DECL_INITIAL (parm) = tinfo;
+ }
+ else
+ {
+ tree t = make_node (TEMPLATE_TYPE_PARM);
+ decl = build_decl (TYPE_DECL, TREE_PURPOSE (parm), t);
+ TYPE_NAME (t) = decl;
+ TREE_VALUE (parm) = t;
+ }
+ pushdecl (decl);
+ return chainon (list, parm);
+}
+
+/* The end of a template parameter list has been reached. Process the
+ tree list into a parameter vector, converting each parameter into a more
+ useful form. Type parameters are saved as IDENTIFIER_NODEs, and others
+ as PARM_DECLs. */
+
+tree
+end_template_parm_list (parms)
+ tree parms;
+{
+ int nparms = 0;
+ tree saved_parmlist;
+ tree parm;
+ for (parm = parms; parm; parm = TREE_CHAIN (parm))
+ nparms++;
+ saved_parmlist = make_tree_vec (nparms);
+
+ for (parm = parms, nparms = 0; parm; parm = TREE_CHAIN (parm), nparms++)
+ {
+ tree p = parm;
+ if (TREE_CODE (p) == TREE_LIST)
+ {
+ tree t = TREE_VALUE (p);
+ TREE_VALUE (p) = NULL_TREE;
+ p = TREE_PURPOSE (p);
+ my_friendly_assert (TREE_CODE (p) == IDENTIFIER_NODE, 261);
+ TEMPLATE_TYPE_SET_INFO (t, saved_parmlist, nparms);
+ }
+ else
+ {
+ tree tinfo = DECL_INITIAL (p);
+ DECL_INITIAL (p) = NULL_TREE;
+ TEMPLATE_CONST_SET_INFO (tinfo, saved_parmlist, nparms);
+ }
+ TREE_VEC_ELT (saved_parmlist, nparms) = p;
+ }
+ set_current_level_tags_transparency (1);
+ processing_template_decl++;
+ return saved_parmlist;
+}
+
+/* end_template_decl is called after a template declaration is seen.
+ D1 is template header; D2 is class_head_sans_basetype or a
+ TEMPLATE_DECL with its DECL_RESULT field set. */
+void
+end_template_decl (d1, d2, is_class, defn)
+ tree d1, d2, is_class;
+ int defn;
+{
+ tree decl;
+ struct template_info *tmpl;
+
+ tmpl = (struct template_info *) obstack_alloc (&permanent_obstack,
+ sizeof (struct template_info));
+ tmpl->text = 0;
+ tmpl->length = 0;
+ tmpl->aggr = is_class;
+
+ /* cloned from reinit_parse_for_template */
+ tmpl->filename = input_filename;
+ tmpl->lineno = lineno;
+ tmpl->parm_vec = d1; /* [eichin:19911015.2306EST] */
+
+ if (d2 == NULL_TREE || d2 == error_mark_node)
+ {
+ decl = 0;
+ goto lose;
+ }
+
+ if (is_class)
+ {
+ decl = build_lang_decl (TEMPLATE_DECL, d2, NULL_TREE);
+ GNU_xref_decl (current_function_decl, decl);
+ }
+ else
+ {
+ if (TREE_CODE (d2) == TEMPLATE_DECL)
+ decl = d2;
+ else
+ {
+ /* Class destructor templates and operator templates are
+ slipping past as non-template nodes. Process them here, since
+ I haven't figured out where to catch them earlier. I could
+ go do that, but it's a choice between getting that done and
+ staying only N months behind schedule. Sorry.... */
+ enum tree_code code;
+ my_friendly_assert (TREE_CODE (d2) == CALL_EXPR, 263);
+ code = TREE_CODE (TREE_OPERAND (d2, 0));
+ my_friendly_assert (code == BIT_NOT_EXPR
+ || code == OP_IDENTIFIER
+ || code == SCOPE_REF, 264);
+ d2 = grokdeclarator (d2, NULL_TREE, MEMFUNCDEF, 0, NULL_TREE);
+ decl = build_lang_decl (TEMPLATE_DECL, DECL_NAME (d2),
+ TREE_TYPE (d2));
+ DECL_TEMPLATE_RESULT (decl) = d2;
+ DECL_CONTEXT (decl) = DECL_CONTEXT (d2);
+ DECL_CLASS_CONTEXT (decl) = DECL_CLASS_CONTEXT (d2);
+ DECL_NAME (decl) = DECL_NAME (d2);
+ TREE_TYPE (decl) = TREE_TYPE (d2);
+ if (interface_unknown && flag_external_templates && ! DECL_IN_SYSTEM_HEADER (decl))
+ warn_if_unknown_interface ();
+ TREE_PUBLIC (decl) = TREE_PUBLIC (d2) = flag_external_templates && !interface_unknown;
+ DECL_EXTERNAL (decl) = (DECL_EXTERNAL (d2)
+ && !(DECL_CLASS_CONTEXT (d2)
+ && !DECL_THIS_EXTERN (d2)));
+ }
+
+ /* All routines creating TEMPLATE_DECL nodes should now be using
+ build_lang_decl, which will have set this up already. */
+ my_friendly_assert (DECL_LANG_SPECIFIC (decl) != 0, 265);
+
+ /* @@ Somewhere, permanent allocation isn't being used. */
+ if (! DECL_TEMPLATE_IS_CLASS (decl)
+ && TREE_CODE (DECL_TEMPLATE_RESULT (decl)) == FUNCTION_DECL)
+ {
+ tree result = DECL_TEMPLATE_RESULT (decl);
+ /* Will do nothing if allocation was already permanent. */
+ DECL_ARGUMENTS (result) = copy_to_permanent (DECL_ARGUMENTS (result));
+ }
+
+ /* If this is for a method, there's an extra binding level here. */
+ if (DECL_CONTEXT (DECL_TEMPLATE_RESULT (decl)) != NULL_TREE)
+ {
+ /* @@ Find out where this should be getting set! */
+ tree r = DECL_TEMPLATE_RESULT (decl);
+ if (DECL_LANG_SPECIFIC (r) && DECL_CLASS_CONTEXT (r) == NULL_TREE)
+ DECL_CLASS_CONTEXT (r) = DECL_CONTEXT (r);
+ }
+ }
+ DECL_TEMPLATE_INFO (decl) = tmpl;
+ DECL_TEMPLATE_PARMS (decl) = d1;
+
+ /* So that duplicate_decls can do the right thing. */
+ if (defn)
+ DECL_INITIAL (decl) = error_mark_node;
+
+ /* If context of decl is non-null (i.e., method template), add it
+ to the appropriate class template, and pop the binding levels. */
+ if (! is_class && DECL_CONTEXT (DECL_TEMPLATE_RESULT (decl)) != NULL_TREE)
+ {
+ tree ctx = DECL_CONTEXT (DECL_TEMPLATE_RESULT (decl));
+ tree tmpl, t;
+ my_friendly_assert (TREE_CODE (ctx) == UNINSTANTIATED_P_TYPE, 266);
+ tmpl = UPT_TEMPLATE (ctx);
+ for (t = DECL_TEMPLATE_MEMBERS (tmpl); t; t = TREE_CHAIN (t))
+ if (TREE_PURPOSE (t) == DECL_NAME (decl)
+ && duplicate_decls (decl, TREE_VALUE (t)))
+ goto already_there;
+ DECL_TEMPLATE_MEMBERS (tmpl) =
+ perm_tree_cons (DECL_NAME (decl), decl, DECL_TEMPLATE_MEMBERS (tmpl));
+ already_there:
+ poplevel (0, 0, 0);
+ poplevel (0, 0, 0);
+ }
+ /* Otherwise, go back to top level first, and push the template decl
+ again there. */
+ else
+ {
+ poplevel (0, 0, 0);
+ poplevel (0, 0, 0);
+ pushdecl (decl);
+ }
+ lose:
+#if 0 /* It happens sometimes, with syntactic or semantic errors.
+
+ One specific case:
+ template <class A, int X, int Y> class Foo { ... };
+ template <class A, int X, int y> Foo<X,Y>::method (Foo& x) { ... }
+ Note the missing "A" in the class containing "method". */
+ my_friendly_assert (global_bindings_p (), 267);
+#else
+ while (! global_bindings_p ())
+ poplevel (0, 0, 0);
+#endif
+ pop_obstacks ();
+ processing_template_decl--;
+ (void) get_pending_sizes ();
+}
+
+/* If TYPE contains a template parm type, then substitute that type
+ with its actual type that is found in TVEC. */
+static void
+grok_template_type (tvec, type)
+ tree tvec;
+ tree* type;
+{
+ switch (TREE_CODE (*type))
+ {
+ case TEMPLATE_TYPE_PARM:
+ if (*type != TYPE_MAIN_VARIANT (*type))
+ {
+ /* we are here for cases like const T* etc. */
+ grok_template_type (tvec, &TYPE_MAIN_VARIANT (*type));
+ *type = c_build_type_variant (TYPE_MAIN_VARIANT (*type),
+ TYPE_READONLY (*type),
+ TYPE_VOLATILE (*type));
+ }
+ else
+ *type = TREE_VEC_ELT (tvec, TEMPLATE_TYPE_IDX (*type));
+ return;
+ case POINTER_TYPE:
+ case REFERENCE_TYPE:
+ grok_template_type (tvec, &TREE_TYPE (*type));
+ return;
+ case FUNCTION_TYPE:
+ {
+ tree p;
+
+ /* take care of function's return type first */
+ grok_template_type (tvec, &TREE_TYPE (*type));
+
+ /* take care of function's arguments */
+ for (p = TYPE_ARG_TYPES (*type); p; p = TREE_CHAIN (p))
+ grok_template_type (tvec, &TREE_VALUE (p));
+ return;
+ }
+ default:
+ break;
+ }
+ return;
+}
+
+/* Convert all template arguments to their appropriate types, and return
+ a vector containing the resulting values. If any error occurs, return
+ error_mark_node. */
+static tree
+coerce_template_parms (parms, arglist, in_decl)
+ tree parms, arglist;
+ tree in_decl;
+{
+ int nparms, i, lost = 0;
+ tree vec;
+
+ if (TREE_CODE (arglist) == TREE_VEC)
+ nparms = TREE_VEC_LENGTH (arglist);
+ else
+ nparms = list_length (arglist);
+ if (nparms != TREE_VEC_LENGTH (parms))
+ {
+ error ("incorrect number of parameters (%d, should be %d)",
+ nparms, TREE_VEC_LENGTH (parms));
+ if (in_decl)
+ cp_error_at ("in template expansion for decl `%D'", in_decl);
+ return error_mark_node;
+ }
+
+ if (TREE_CODE (arglist) == TREE_VEC)
+ vec = copy_node (arglist);
+ else
+ {
+ vec = make_tree_vec (nparms);
+ for (i = 0; i < nparms; i++)
+ {
+ tree arg = arglist;
+ arglist = TREE_CHAIN (arglist);
+ if (arg == error_mark_node)
+ lost++;
+ else
+ arg = TREE_VALUE (arg);
+ TREE_VEC_ELT (vec, i) = arg;
+ }
+ }
+ for (i = 0; i < nparms; i++)
+ {
+ tree arg = TREE_VEC_ELT (vec, i);
+ tree parm = TREE_VEC_ELT (parms, i);
+ tree val = 0;
+ int is_type, requires_type;
+
+ is_type = TREE_CODE_CLASS (TREE_CODE (arg)) == 't';
+ requires_type = TREE_CODE (parm) == IDENTIFIER_NODE;
+ if (is_type != requires_type)
+ {
+ if (in_decl)
+ cp_error_at ("type/value mismatch in template parameter list for `%D'", in_decl);
+ lost++;
+ TREE_VEC_ELT (vec, i) = error_mark_node;
+ continue;
+ }
+ if (is_type)
+ val = groktypename (arg);
+ else if (TREE_CODE (arg) == STRING_CST)
+ {
+ cp_error ("string literal %E is not a valid template argument", arg);
+ error ("because it is the address of an object with static linkage");
+ val = error_mark_node;
+ }
+ else
+ {
+ grok_template_type (vec, &TREE_TYPE (parm));
+ val = digest_init (TREE_TYPE (parm), arg, (tree *) 0);
+
+ if (val == error_mark_node)
+ ;
+
+ /* 14.2: Other template-arguments must be constant-expressions,
+ addresses of objects or functions with external linkage, or of
+ static class members. */
+ else if (!TREE_CONSTANT (val))
+ {
+ cp_error ("non-const `%E' cannot be used as template argument",
+ arg);
+ val = error_mark_node;
+ }
+ else if (TREE_CODE (val) == ADDR_EXPR)
+ {
+ tree a = TREE_OPERAND (val, 0);
+ if ((TREE_CODE (a) == VAR_DECL
+ || TREE_CODE (a) == FUNCTION_DECL)
+ && !TREE_PUBLIC (a))
+ {
+ cp_error ("address of non-extern `%E' cannot be used as template argument", a);
+ val = error_mark_node;
+ }
+ }
+ }
+
+ if (val == error_mark_node)
+ lost++;
+
+ TREE_VEC_ELT (vec, i) = val;
+ }
+ if (lost)
+ return error_mark_node;
+ return vec;
+}
+
+/* Given class template name and parameter list, produce a user-friendly name
+ for the instantiation. */
+static char *
+mangle_class_name_for_template (name, parms, arglist)
+ char *name;
+ tree parms, arglist;
+{
+ static struct obstack scratch_obstack;
+ static char *scratch_firstobj;
+ int i, nparms;
+
+ if (!scratch_firstobj)
+ {
+ gcc_obstack_init (&scratch_obstack);
+ scratch_firstobj = obstack_alloc (&scratch_obstack, 1);
+ }
+ else
+ obstack_free (&scratch_obstack, scratch_firstobj);
+
+#if 0
+#define buflen sizeof(buf)
+#define check if (bufp >= buf+buflen-1) goto too_long
+#define ccat(c) *bufp++=(c); check
+#define advance bufp+=strlen(bufp); check
+#define cat(s) strncpy(bufp, s, buf+buflen-bufp-1); advance
+#else
+#define check
+#define ccat(c) obstack_1grow (&scratch_obstack, (c));
+#define advance
+#define cat(s) obstack_grow (&scratch_obstack, (s), strlen (s))
+#endif
+
+ cat (name);
+ ccat ('<');
+ nparms = TREE_VEC_LENGTH (parms);
+ my_friendly_assert (nparms == TREE_VEC_LENGTH (arglist), 268);
+ for (i = 0; i < nparms; i++)
+ {
+ tree parm = TREE_VEC_ELT (parms, i), arg = TREE_VEC_ELT (arglist, i);
+
+ if (i)
+ ccat (',');
+
+ if (TREE_CODE (parm) == IDENTIFIER_NODE)
+ {
+ cat (type_as_string (arg, 0));
+ continue;
+ }
+ else
+ my_friendly_assert (TREE_CODE (parm) == PARM_DECL, 269);
+
+ if (TREE_CODE (arg) == TREE_LIST)
+ {
+ /* New list cell was built because old chain link was in
+ use. */
+ my_friendly_assert (TREE_PURPOSE (arg) == NULL_TREE, 270);
+ arg = TREE_VALUE (arg);
+ }
+ /* No need to check arglist against parmlist here; we did that
+ in coerce_template_parms, called from lookup_template_class. */
+ cat (expr_as_string (arg, 0));
+ }
+ {
+ char *bufp = obstack_next_free (&scratch_obstack);
+ int offset = 0;
+ while (bufp[offset - 1] == ' ')
+ offset--;
+ obstack_blank_fast (&scratch_obstack, offset);
+
+ /* B<C<char> >, not B<C<char>> */
+ if (bufp[offset - 1] == '>')
+ ccat (' ');
+ }
+ ccat ('>');
+ ccat ('\0');
+ return (char *) obstack_base (&scratch_obstack);
+
+#if 0
+ too_long:
+#endif
+ fatal ("out of (preallocated) string space creating template instantiation name");
+ /* NOTREACHED */
+ return NULL;
+}
+
+/* Given an IDENTIFIER_NODE (type TEMPLATE_DECL) and a chain of
+ parameters, find the desired type.
+
+ D1 is the PTYPENAME terminal, and ARGLIST is the list of arguments.
+ Since ARGLIST is build on the decl_obstack, we must copy it here
+ to keep it from being reclaimed when the decl storage is reclaimed.
+
+ IN_DECL, if non-NULL, is the template declaration we are trying to
+ instantiate. */
+tree
+lookup_template_class (d1, arglist, in_decl)
+ tree d1, arglist;
+ tree in_decl;
+{
+ tree template, parmlist;
+ char *mangled_name;
+ tree id;
+
+ my_friendly_assert (TREE_CODE (d1) == IDENTIFIER_NODE, 272);
+ template = IDENTIFIER_GLOBAL_VALUE (d1); /* XXX */
+ if (! template)
+ template = IDENTIFIER_CLASS_VALUE (d1);
+ /* With something like `template <class T> class X class X { ... };'
+ we could end up with D1 having nothing but an IDENTIFIER_LOCAL_VALUE.
+ We don't want to do that, but we have to deal with the situation, so
+ let's give them some syntax errors to chew on instead of a crash. */
+ if (! template)
+ return error_mark_node;
+ if (TREE_CODE (template) != TEMPLATE_DECL)
+ {
+ cp_error ("non-template type `%T' used as a template", d1);
+ if (in_decl)
+ cp_error_at ("for template declaration `%D'", in_decl);
+ return error_mark_node;
+ }
+ parmlist = DECL_TEMPLATE_PARMS (template);
+
+ arglist = coerce_template_parms (parmlist, arglist, in_decl);
+ if (arglist == error_mark_node)
+ return error_mark_node;
+ if (uses_template_parms (arglist))
+ {
+ tree t = make_lang_type (UNINSTANTIATED_P_TYPE);
+ tree d;
+ id = make_anon_name ();
+ d = build_decl (TYPE_DECL, id, t);
+ TYPE_NAME (t) = d;
+ TYPE_VALUES (t) = build_tree_list (template, arglist);
+ pushdecl_top_level (d);
+ }
+ else
+ {
+ mangled_name = mangle_class_name_for_template (IDENTIFIER_POINTER (d1),
+ parmlist, arglist);
+ id = get_identifier (mangled_name);
+ }
+ if (!IDENTIFIER_TEMPLATE (id))
+ {
+ arglist = copy_to_permanent (arglist);
+ IDENTIFIER_TEMPLATE (id) = perm_tree_cons (template, arglist, NULL_TREE);
+ }
+ return id;
+}
+
+void
+push_template_decls (parmlist, arglist, class_level)
+ tree parmlist, arglist;
+ int class_level;
+{
+ int i, nparms;
+
+ /* Don't want to push values into global context. */
+ if (!class_level)
+ {
+ pushlevel (1);
+ declare_pseudo_global_level ();
+ }
+
+ nparms = TREE_VEC_LENGTH (parmlist);
+
+ for (i = 0; i < nparms; i++)
+ {
+ int requires_type, is_type;
+ tree parm = TREE_VEC_ELT (parmlist, i);
+ tree arg = TREE_VEC_ELT (arglist, i);
+ tree decl = 0;
+
+ requires_type = TREE_CODE (parm) == IDENTIFIER_NODE;
+ is_type = TREE_CODE_CLASS (TREE_CODE (arg)) == 't';
+ if (is_type)
+ {
+ /* add typename to namespace */
+ if (!requires_type)
+ {
+ error ("template use error: type provided where value needed");
+ continue;
+ }
+ decl = arg;
+ my_friendly_assert (TREE_CODE_CLASS (TREE_CODE (decl)) == 't', 273);
+ decl = build_decl (TYPE_DECL, parm, decl);
+ }
+ else
+ {
+ /* add const decl to namespace */
+ tree val;
+ if (requires_type)
+ {
+ error ("template use error: value provided where type needed");
+ continue;
+ }
+ val = digest_init (TREE_TYPE (parm), arg, (tree *) 0);
+ if (val != error_mark_node)
+ {
+ decl = build_decl (VAR_DECL, DECL_NAME (parm), TREE_TYPE (parm));
+ DECL_INITIAL (decl) = val;
+ TREE_READONLY (decl) = 1;
+ }
+ }
+ if (decl != 0)
+ {
+ layout_decl (decl, 0);
+ if (class_level)
+ pushdecl_class_level (decl);
+ else
+ pushdecl (decl);
+ }
+ }
+}
+
+void
+pop_template_decls (parmlist, arglist, class_level)
+ tree parmlist, arglist;
+ int class_level;
+{
+ if (!class_level)
+ poplevel (0, 0, 0);
+}
+
+/* Should be defined in parse.h. */
+extern int yychar;
+
+int
+uses_template_parms (t)
+ tree t;
+{
+ if (!t)
+ return 0;
+ switch (TREE_CODE (t))
+ {
+ case INDIRECT_REF:
+ case COMPONENT_REF:
+ /* We assume that the object must be instantiated in order to build
+ the COMPONENT_REF, so we test only whether the type of the
+ COMPONENT_REF uses template parms. */
+ return uses_template_parms (TREE_TYPE (t));
+
+ case IDENTIFIER_NODE:
+ if (!IDENTIFIER_TEMPLATE (t))
+ return 0;
+ return uses_template_parms (TREE_VALUE (IDENTIFIER_TEMPLATE (t)));
+
+ /* aggregates of tree nodes */
+ case TREE_VEC:
+ {
+ int i = TREE_VEC_LENGTH (t);
+ while (i--)
+ if (uses_template_parms (TREE_VEC_ELT (t, i)))
+ return 1;
+ return 0;
+ }
+ case TREE_LIST:
+ if (uses_template_parms (TREE_PURPOSE (t))
+ || uses_template_parms (TREE_VALUE (t)))
+ return 1;
+ return uses_template_parms (TREE_CHAIN (t));
+
+ /* constructed type nodes */
+ case POINTER_TYPE:
+ case REFERENCE_TYPE:
+ return uses_template_parms (TREE_TYPE (t));
+ case RECORD_TYPE:
+ case UNION_TYPE:
+ if (!TYPE_NAME (t))
+ return 0;
+ if (!TYPE_IDENTIFIER (t))
+ return 0;
+ return uses_template_parms (TYPE_IDENTIFIER (t));
+ case FUNCTION_TYPE:
+ if (uses_template_parms (TYPE_ARG_TYPES (t)))
+ return 1;
+ return uses_template_parms (TREE_TYPE (t));
+ case ARRAY_TYPE:
+ if (uses_template_parms (TYPE_DOMAIN (t)))
+ return 1;
+ return uses_template_parms (TREE_TYPE (t));
+ case OFFSET_TYPE:
+ if (uses_template_parms (TYPE_OFFSET_BASETYPE (t)))
+ return 1;
+ return uses_template_parms (TREE_TYPE (t));
+ case METHOD_TYPE:
+ if (uses_template_parms (TYPE_OFFSET_BASETYPE (t)))
+ return 1;
+ if (uses_template_parms (TYPE_ARG_TYPES (t)))
+ return 1;
+ return uses_template_parms (TREE_TYPE (t));
+
+ /* decl nodes */
+ case TYPE_DECL:
+ return uses_template_parms (DECL_NAME (t));
+ case FUNCTION_DECL:
+ if (uses_template_parms (TREE_TYPE (t)))
+ return 1;
+ /* fall through */
+ case VAR_DECL:
+ case PARM_DECL:
+ /* ??? What about FIELD_DECLs? */
+ /* The type of a decl can't use template parms if the name of the
+ variable doesn't, because it's impossible to resolve them. So
+ ignore the type field for now. */
+ if (DECL_CONTEXT (t) && uses_template_parms (DECL_CONTEXT (t)))
+ return 1;
+ if (uses_template_parms (TREE_TYPE (t)))
+ {
+ error ("template parms used where they can't be resolved");
+ }
+ return 0;
+
+ case CALL_EXPR:
+ return uses_template_parms (TREE_TYPE (t));
+ case ADDR_EXPR:
+ return uses_template_parms (TREE_OPERAND (t, 0));
+
+ /* template parm nodes */
+ case TEMPLATE_TYPE_PARM:
+ case TEMPLATE_CONST_PARM:
+ return 1;
+
+ /* simple type nodes */
+ case INTEGER_TYPE:
+ if (uses_template_parms (TYPE_MIN_VALUE (t)))
+ return 1;
+ return uses_template_parms (TYPE_MAX_VALUE (t));
+
+ case REAL_TYPE:
+ case VOID_TYPE:
+ case ENUMERAL_TYPE:
+ case BOOLEAN_TYPE:
+ return 0;
+
+ /* constants */
+ case INTEGER_CST:
+ case REAL_CST:
+ case STRING_CST:
+ return 0;
+
+ case ERROR_MARK:
+ /* Non-error_mark_node ERROR_MARKs are bad things. */
+ my_friendly_assert (t == error_mark_node, 274);
+ /* NOTREACHED */
+ return 0;
+
+ case UNINSTANTIATED_P_TYPE:
+ return 1;
+
+ default:
+ switch (TREE_CODE_CLASS (TREE_CODE (t)))
+ {
+ case '1':
+ case '2':
+ case '3':
+ case '<':
+ {
+ int i;
+ for (i = tree_code_length[(int) TREE_CODE (t)]; --i >= 0;)
+ if (uses_template_parms (TREE_OPERAND (t, i)))
+ return 1;
+ return 0;
+ }
+ default:
+ break;
+ }
+ sorry ("testing %s for template parms",
+ tree_code_name [(int) TREE_CODE (t)]);
+ my_friendly_abort (82);
+ /* NOTREACHED */
+ return 0;
+ }
+}
+
+void
+instantiate_member_templates (classname)
+ tree classname;
+{
+ tree t;
+ tree id = classname;
+ tree members = DECL_TEMPLATE_MEMBERS (TREE_PURPOSE (IDENTIFIER_TEMPLATE (id)));
+
+ for (t = members; t; t = TREE_CHAIN (t))
+ {
+ tree parmvec, type, classparms, tdecl, t2;
+ int nparms, xxx = 0, i;
+
+ my_friendly_assert (TREE_VALUE (t) != NULL_TREE, 275);
+ my_friendly_assert (TREE_CODE (TREE_VALUE (t)) == TEMPLATE_DECL, 276);
+ /* @@ Should verify that class parm list is a list of
+ distinct template parameters, and covers all the template
+ parameters. */
+ tdecl = TREE_VALUE (t);
+ type = DECL_CONTEXT (DECL_TEMPLATE_RESULT (tdecl));
+ classparms = UPT_PARMS (type);
+ nparms = TREE_VEC_LENGTH (classparms);
+ parmvec = make_tree_vec (nparms);
+ for (i = 0; i < nparms; i++)
+ TREE_VEC_ELT (parmvec, i) = NULL_TREE;
+ switch (unify (DECL_TEMPLATE_PARMS (tdecl),
+ &TREE_VEC_ELT (parmvec, 0), nparms,
+ type, IDENTIFIER_TYPE_VALUE (classname),
+ &xxx))
+ {
+ case 0:
+ /* Success -- well, no inconsistency, at least. */
+ for (i = 0; i < nparms; i++)
+ if (TREE_VEC_ELT (parmvec, i) == NULL_TREE)
+ goto failure;
+ t2 = instantiate_template (tdecl,
+ &TREE_VEC_ELT (parmvec, 0));
+ type = IDENTIFIER_TYPE_VALUE (id);
+ my_friendly_assert (type != 0, 277);
+ if (CLASSTYPE_INTERFACE_UNKNOWN (type))
+ {
+ DECL_EXTERNAL (t2) = 0;
+ TREE_PUBLIC (t2) = 0;
+ }
+ else
+ {
+ DECL_EXTERNAL (t2) = CLASSTYPE_INTERFACE_ONLY (type);
+ TREE_PUBLIC (t2) = 1;
+ }
+ break;
+ case 1:
+ /* Failure. */
+ failure:
+ cp_error ("type unification error instantiating %T::%D",
+ classname, tdecl);
+ cp_error_at ("for template declaration `%D'", tdecl);
+
+ continue /* loop of members */;
+ default:
+ /* Eek, a bug. */
+ my_friendly_abort (83);
+ }
+ }
+}
+
+struct tinst_level *current_tinst_level = 0;
+struct tinst_level *free_tinst_level = 0;
+
+void
+push_tinst_level (name)
+ tree name;
+{
+ struct tinst_level *new;
+ tree global = IDENTIFIER_GLOBAL_VALUE (name);
+
+ if (free_tinst_level)
+ {
+ new = free_tinst_level;
+ free_tinst_level = new->next;
+ }
+ else
+ new = (struct tinst_level *) xmalloc (sizeof (struct tinst_level));
+
+ new->classname = name;
+ if (global)
+ {
+ new->line = DECL_SOURCE_LINE (global);
+ new->file = DECL_SOURCE_FILE (global);
+ }
+ else
+ {
+ new->line = lineno;
+ new->file = input_filename;
+ }
+ new->next = current_tinst_level;
+ current_tinst_level = new;
+}
+
+void
+pop_tinst_level ()
+{
+ struct tinst_level *old = current_tinst_level;
+
+ current_tinst_level = old->next;
+ old->next = free_tinst_level;
+ free_tinst_level = old;
+}
+
+struct tinst_level *
+tinst_for_decl ()
+{
+ struct tinst_level *p = current_tinst_level;
+
+ if (p)
+ for (; p->next ; p = p->next )
+ ;
+ return p;
+}
+
+tree
+instantiate_class_template (classname, setup_parse)
+ tree classname;
+ int setup_parse;
+{
+ struct template_info *template_info;
+ tree template, t1;
+
+ if (classname == error_mark_node)
+ return error_mark_node;
+
+ my_friendly_assert (TREE_CODE (classname) == IDENTIFIER_NODE, 278);
+ template = IDENTIFIER_TEMPLATE (classname);
+
+ if (IDENTIFIER_HAS_TYPE_VALUE (classname))
+ {
+ tree type = IDENTIFIER_TYPE_VALUE (classname);
+ if (TREE_CODE (type) == UNINSTANTIATED_P_TYPE)
+ return type;
+ if (TYPE_BEING_DEFINED (type)
+ || TYPE_SIZE (type)
+ || CLASSTYPE_USE_TEMPLATE (type) != 0)
+ return type;
+ }
+
+ /* If IDENTIFIER_LOCAL_VALUE is already set on this template classname
+ (it's something like `foo<int>'), that means we're already working on
+ the instantiation for it. Normally, a classname comes in with nothing
+ but its IDENTIFIER_TEMPLATE slot set. If we were to try to instantiate
+ this again, we'd get a redeclaration error. Since we're already working
+ on it, we'll pass back this classname's TYPE_DECL (it's the value of
+ the classname's IDENTIFIER_LOCAL_VALUE). Only do this if we're setting
+ things up for the parser, though---if we're just trying to instantiate
+ it (e.g., via tsubst) we can trip up cuz it may not have an
+ IDENTIFIER_TYPE_VALUE when it will need one. */
+ if (setup_parse && IDENTIFIER_LOCAL_VALUE (classname))
+ return IDENTIFIER_LOCAL_VALUE (classname);
+
+ if (uses_template_parms (classname))
+ {
+ if (!TREE_TYPE (classname))
+ {
+ tree t = make_lang_type (RECORD_TYPE);
+ tree d = build_decl (TYPE_DECL, classname, t);
+ DECL_NAME (d) = classname;
+ TYPE_NAME (t) = d;
+ pushdecl (d);
+ }
+ return NULL_TREE;
+ }
+
+ t1 = TREE_PURPOSE (template);
+ my_friendly_assert (TREE_CODE (t1) == TEMPLATE_DECL, 279);
+
+ /* If a template is declared but not defined, accept it; don't crash.
+ Later uses requiring the definition will be flagged as errors by
+ other code. Thanks to niklas@appli.se for this bug fix. */
+ if (DECL_TEMPLATE_INFO (t1)->text == 0)
+ setup_parse = 0;
+
+ push_to_top_level ();
+ template_info = DECL_TEMPLATE_INFO (t1);
+ if (setup_parse)
+ {
+ push_tinst_level (classname);
+ push_template_decls (DECL_TEMPLATE_PARMS (TREE_PURPOSE (template)),
+ TREE_VALUE (template), 0);
+ set_current_level_tags_transparency (1);
+ feed_input (template_info->text, template_info->length, (struct obstack *)0);
+ lineno = template_info->lineno;
+ input_filename = template_info->filename;
+ /* Get interface/implementation back in sync. */
+ extract_interface_info ();
+ overload_template_name (classname, 0);
+ /* Kludge so that we don't get screwed by our own base classes. */
+ TYPE_BEING_DEFINED (TREE_TYPE (classname)) = 1;
+ yychar = PRE_PARSED_CLASS_DECL;
+ yylval.ttype = classname;
+ processing_template_defn++;
+ if (!flag_external_templates)
+ interface_unknown++;
+ }
+ else
+ {
+ tree t, decl, id, tmpl;
+
+ id = classname;
+ tmpl = TREE_PURPOSE (IDENTIFIER_TEMPLATE (id));
+ t = xref_tag (DECL_TEMPLATE_INFO (tmpl)->aggr, id, NULL_TREE, 0);
+ my_friendly_assert (TREE_CODE (t) == RECORD_TYPE
+ || TREE_CODE (t) == UNION_TYPE, 280);
+
+ /* Now, put a copy of the decl in global scope, to avoid
+ * recursive expansion. */
+ decl = IDENTIFIER_LOCAL_VALUE (id);
+ if (!decl)
+ decl = IDENTIFIER_CLASS_VALUE (id);
+ if (decl)
+ {
+ my_friendly_assert (TREE_CODE (decl) == TYPE_DECL, 281);
+ /* We'd better make sure we're on the permanent obstack or else
+ * we'll get a "friendly" abort 124 in pushdecl. Perhaps a
+ * copy_to_permanent would be sufficient here, but then a
+ * sharing problem might occur. I don't know -- niklas@appli.se */
+ push_obstacks (&permanent_obstack, &permanent_obstack);
+ pushdecl_top_level (copy_node (decl));
+ pop_obstacks ();
+ }
+ pop_from_top_level ();
+ }
+
+ return NULL_TREE;
+}
+
+static int
+list_eq (t1, t2)
+ tree t1, t2;
+{
+ if (t1 == NULL_TREE)
+ return t2 == NULL_TREE;
+ if (t2 == NULL_TREE)
+ return 0;
+ /* Don't care if one declares its arg const and the other doesn't -- the
+ main variant of the arg type is all that matters. */
+ if (TYPE_MAIN_VARIANT (TREE_VALUE (t1))
+ != TYPE_MAIN_VARIANT (TREE_VALUE (t2)))
+ return 0;
+ return list_eq (TREE_CHAIN (t1), TREE_CHAIN (t2));
+}
+
+static tree
+lookup_nested_type_by_name (ctype, name)
+ tree ctype, name;
+{
+ tree t;
+
+ t = TREE_VALUE(CLASSTYPE_TAGS(ctype));
+ while (t)
+ {
+ if (strcmp(IDENTIFIER_POINTER(name), IDENTIFIER_POINTER(TYPE_IDENTIFIER(t)))
+ == 0)
+ return t;
+ else
+ t = TREE_CHAIN(t);
+ }
+ return NULL_TREE;
+}
+
+static tree
+search_nested_type_in_tmpl (tmpl, type)
+ tree tmpl, type;
+{
+ tree t;
+
+ if (tmpl == NULL || TYPE_CONTEXT(type) == NULL)
+ return tmpl;
+ t = search_nested_type_in_tmpl (tmpl, TYPE_CONTEXT(type));
+ if (t == NULL) return t;
+ t = lookup_nested_type_by_name(t, DECL_NAME(TYPE_NAME(type)));
+ return t;
+}
+
+static tree
+tsubst (t, args, nargs, in_decl)
+ tree t, *args;
+ int nargs;
+ tree in_decl;
+{
+ tree type;
+
+ if (t == NULL_TREE || t == error_mark_node)
+ return t;
+
+ type = TREE_TYPE (t);
+ if (type
+ /* Minor optimization.
+ ?? Are these really the most frequent cases? Is the savings
+ significant? */
+ && type != integer_type_node
+ && type != void_type_node
+ && type != char_type_node)
+ type = c_build_type_variant (tsubst (type, args, nargs, in_decl),
+ TYPE_READONLY (type),
+ TYPE_VOLATILE (type));
+ switch (TREE_CODE (t))
+ {
+ case RECORD_TYPE:
+ if (TYPE_PTRMEMFUNC_P (t))
+ return build_ptrmemfunc_type
+ (tsubst (TYPE_PTRMEMFUNC_FN_TYPE (t), args, nargs, in_decl));
+
+ /* else fall through */
+
+ case ERROR_MARK:
+ case IDENTIFIER_NODE:
+ case OP_IDENTIFIER:
+ case VOID_TYPE:
+ case REAL_TYPE:
+ case ENUMERAL_TYPE:
+ case BOOLEAN_TYPE:
+ case INTEGER_CST:
+ case REAL_CST:
+ case STRING_CST:
+ case UNION_TYPE:
+ return t;
+
+ case INTEGER_TYPE:
+ if (t == integer_type_node)
+ return t;
+
+ if (TREE_CODE (TYPE_MIN_VALUE (t)) == INTEGER_CST
+ && TREE_CODE (TYPE_MAX_VALUE (t)) == INTEGER_CST)
+ return t;
+ return build_index_2_type
+ (tsubst (TYPE_MIN_VALUE (t), args, nargs, in_decl),
+ tsubst (TYPE_MAX_VALUE (t), args, nargs, in_decl));
+
+ case TEMPLATE_TYPE_PARM:
+ return c_build_type_variant (args[TEMPLATE_TYPE_IDX (t)],
+ TYPE_READONLY (t),
+ TYPE_VOLATILE (t));
+
+ case TEMPLATE_CONST_PARM:
+ return args[TEMPLATE_CONST_IDX (t)];
+
+ case FUNCTION_DECL:
+ {
+ tree r;
+ tree fnargs, result;
+
+ if (type == TREE_TYPE (t)
+ && (DECL_CONTEXT (t) == NULL_TREE
+ || TREE_CODE_CLASS (TREE_CODE (DECL_CONTEXT (t))) != 't'))
+ return t;
+ fnargs = tsubst (DECL_ARGUMENTS (t), args, nargs, t);
+ result = tsubst (DECL_RESULT (t), args, nargs, t);
+ if (DECL_CONTEXT (t) != NULL_TREE
+ && TREE_CODE_CLASS (TREE_CODE (DECL_CONTEXT (t))) == 't')
+ {
+ /* Look it up in that class, and return the decl node there,
+ instead of creating a new one. */
+ tree ctx, methods, name, method;
+ int n_methods;
+ int i, found = 0;
+
+ name = DECL_NAME (t);
+ ctx = tsubst (DECL_CONTEXT (t), args, nargs, t);
+ methods = CLASSTYPE_METHOD_VEC (ctx);
+ if (methods == NULL_TREE)
+ /* No methods at all -- no way this one can match. */
+ goto no_match;
+ n_methods = TREE_VEC_LENGTH (methods);
+
+ r = NULL_TREE;
+
+ if (!strncmp (OPERATOR_TYPENAME_FORMAT,
+ IDENTIFIER_POINTER (name),
+ sizeof (OPERATOR_TYPENAME_FORMAT) - 1))
+ {
+ /* Type-conversion operator. Reconstruct the name, in
+ case it's the name of one of the template's parameters. */
+ name = build_typename_overload (TREE_TYPE (type));
+ }
+
+ if (DECL_CONTEXT (t) != NULL_TREE
+ && TREE_CODE_CLASS (TREE_CODE (DECL_CONTEXT (t))) == 't'
+ && constructor_name (DECL_CONTEXT (t)) == DECL_NAME (t))
+ name = constructor_name (ctx);
+#if 0
+ fprintf (stderr, "\nfor function %s in class %s:\n",
+ IDENTIFIER_POINTER (name),
+ IDENTIFIER_POINTER (TYPE_IDENTIFIER (ctx)));
+#endif
+ for (i = 0; i < n_methods; i++)
+ {
+ int pass;
+
+ method = TREE_VEC_ELT (methods, i);
+ if (method == NULL_TREE || DECL_NAME (method) != name)
+ continue;
+
+ pass = 0;
+ maybe_error:
+ for (; method; method = DECL_CHAIN (method))
+ {
+ my_friendly_assert (TREE_CODE (method) == FUNCTION_DECL,
+ 282);
+ if (! comptypes (type, TREE_TYPE (method), 1))
+ {
+ tree mtype = TREE_TYPE (method);
+ tree t1, t2;
+
+ /* Keep looking for a method that matches
+ perfectly. This takes care of the problem
+ where destructors (which have implicit int args)
+ look like constructors which have an int arg. */
+ if (pass == 0)
+ continue;
+
+ t1 = TYPE_ARG_TYPES (mtype);
+ t2 = TYPE_ARG_TYPES (type);
+ if (TREE_CODE (mtype) == FUNCTION_TYPE)
+ t2 = TREE_CHAIN (t2);
+
+ if (list_eq (t1, t2))
+ {
+ if (TREE_CODE (mtype) == FUNCTION_TYPE)
+ {
+ tree newtype;
+ newtype = build_function_type (TREE_TYPE (type),
+ TYPE_ARG_TYPES (type));
+ newtype = build_type_variant (newtype,
+ TYPE_READONLY (type),
+ TYPE_VOLATILE (type));
+ type = newtype;
+ if (TREE_TYPE (type) != TREE_TYPE (mtype))
+ goto maybe_bad_return_type;
+ }
+ else if (TYPE_METHOD_BASETYPE (mtype)
+ == TYPE_METHOD_BASETYPE (type))
+ {
+ /* Types didn't match, but arg types and
+ `this' do match, so the return type is
+ all that should be messing it up. */
+ maybe_bad_return_type:
+ if (TREE_TYPE (type) != TREE_TYPE (mtype))
+ error ("inconsistent return types for method `%s' in class `%s'",
+ IDENTIFIER_POINTER (name),
+ IDENTIFIER_POINTER (TYPE_IDENTIFIER (ctx)));
+ }
+ r = method;
+ break;
+ }
+ found = 1;
+ continue;
+ }
+#if 0
+ fprintf (stderr, "\tfound %s\n\n",
+ IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (method)));
+#endif
+ if (DECL_ARTIFICIAL (method))
+ {
+ cp_error ("template for method `%D' which has default implementation in class `%T'", name, ctx);
+ if (in_decl)
+ cp_error_at ("in attempt to instantiate `%D' declared at this point in file", in_decl);
+ return error_mark_node;
+ }
+
+ if (DECL_ARGUMENTS (method)
+ && ! TREE_PERMANENT (DECL_ARGUMENTS (method)))
+ /* @@ Is this early enough? Might we want to do
+ this instead while processing the expansion? */
+ DECL_ARGUMENTS (method)
+ = tsubst (DECL_ARGUMENTS (t), args, nargs, t);
+ r = method;
+ break;
+ }
+ if (r == NULL_TREE && pass == 0)
+ {
+ pass = 1;
+ method = TREE_VEC_ELT (methods, i);
+ goto maybe_error;
+ }
+ }
+ if (r == NULL_TREE)
+ {
+ no_match:
+ cp_error
+ (found
+ ? "template for method `%D' doesn't match any in class `%T'"
+ : "method `%D' not found in class `%T'", name, ctx);
+ if (in_decl)
+ cp_error_at ("in attempt to instantiate `%D' declared at this point in file", in_decl);
+ return error_mark_node;
+ }
+ }
+ else
+ {
+ r = DECL_NAME (t);
+ {
+ tree decls;
+ int got_it = 0;
+
+ decls = lookup_name_nonclass (r);
+ if (decls == NULL_TREE)
+ /* no match */;
+ else if (TREE_CODE (decls) == TREE_LIST)
+ for (decls = TREE_VALUE (decls); decls ;
+ decls = DECL_CHAIN (decls))
+ {
+ if (TREE_CODE (decls) == FUNCTION_DECL
+ && TREE_TYPE (decls) == type)
+ {
+ got_it = 1;
+ r = decls;
+ break;
+ }
+ }
+ else
+ {
+ tree val = decls;
+ decls = NULL_TREE;
+ if (TREE_CODE (val) == FUNCTION_DECL
+ && TREE_TYPE (val) == type)
+ {
+ got_it = 1;
+ r = val;
+ }
+ }
+
+ if (!got_it)
+ {
+ r = build_decl_overload (r, TYPE_VALUES (type),
+ DECL_CONTEXT (t) != NULL_TREE);
+ r = build_lang_decl (FUNCTION_DECL, r, type);
+ }
+ else if (DECL_INLINE (r) && DECL_SAVED_INSNS (r))
+ {
+ /* This overrides the template version, use it. */
+ return r;
+ }
+ }
+ }
+ TREE_PUBLIC (r) = TREE_PUBLIC (t);
+ DECL_EXTERNAL (r) = DECL_EXTERNAL (t);
+ TREE_STATIC (r) = TREE_STATIC (t);
+ DECL_INLINE (r) = DECL_INLINE (t);
+ {
+#if 0 /* Maybe later. -jason */
+ struct tinst_level *til = tinst_for_decl();
+
+ /* should always be true under new approach */
+ if (til)
+ {
+ DECL_SOURCE_FILE (r) = til->file;
+ DECL_SOURCE_LINE (r) = til->line;
+ }
+ else
+#endif
+ {
+ DECL_SOURCE_FILE (r) = DECL_SOURCE_FILE (t);
+ DECL_SOURCE_LINE (r) = DECL_SOURCE_LINE (t);
+ }
+ }
+ DECL_CLASS_CONTEXT (r) = tsubst (DECL_CLASS_CONTEXT (t), args, nargs, t);
+ make_decl_rtl (r, NULL_PTR, 1);
+ DECL_ARGUMENTS (r) = fnargs;
+ DECL_RESULT (r) = result;
+ if (DECL_CONTEXT (t) == NULL_TREE
+ || TREE_CODE_CLASS (TREE_CODE (DECL_CONTEXT (t))) != 't')
+ push_overloaded_decl_top_level (r, 0);
+ return r;
+ }
+
+ case PARM_DECL:
+ {
+ tree r;
+ r = build_decl (PARM_DECL, DECL_NAME (t), type);
+ DECL_INITIAL (r) = TREE_TYPE (r);
+ if (TREE_CHAIN (t))
+ TREE_CHAIN (r) = tsubst (TREE_CHAIN (t), args, nargs, TREE_CHAIN (t));
+ return r;
+ }
+
+ case TREE_LIST:
+ {
+ tree purpose, value, chain, result;
+ int via_public, via_virtual, via_protected;
+
+ if (t == void_list_node)
+ return t;
+
+ via_public = TREE_VIA_PUBLIC (t);
+ via_protected = TREE_VIA_PROTECTED (t);
+ via_virtual = TREE_VIA_VIRTUAL (t);
+
+ purpose = TREE_PURPOSE (t);
+ if (purpose)
+ purpose = tsubst (purpose, args, nargs, in_decl);
+ value = TREE_VALUE (t);
+ if (value)
+ value = tsubst (value, args, nargs, in_decl);
+ chain = TREE_CHAIN (t);
+ if (chain && chain != void_type_node)
+ chain = tsubst (chain, args, nargs, in_decl);
+ if (purpose == TREE_PURPOSE (t)
+ && value == TREE_VALUE (t)
+ && chain == TREE_CHAIN (t))
+ return t;
+ result = hash_tree_cons (via_public, via_virtual, via_protected,
+ purpose, value, chain);
+ TREE_PARMLIST (result) = TREE_PARMLIST (t);
+ return result;
+ }
+ case TREE_VEC:
+ {
+ int len = TREE_VEC_LENGTH (t), need_new = 0, i;
+ tree *elts = (tree *) alloca (len * sizeof (tree));
+ bzero (elts, len * sizeof (tree));
+
+ for (i = 0; i < len; i++)
+ {
+ elts[i] = tsubst (TREE_VEC_ELT (t, i), args, nargs, in_decl);
+ if (elts[i] != TREE_VEC_ELT (t, i))
+ need_new = 1;
+ }
+
+ if (!need_new)
+ return t;
+
+ t = make_tree_vec (len);
+ for (i = 0; i < len; i++)
+ TREE_VEC_ELT (t, i) = elts[i];
+ return t;
+ }
+ case POINTER_TYPE:
+ case REFERENCE_TYPE:
+ {
+ tree r;
+ enum tree_code code;
+ if (type == TREE_TYPE (t))
+ return t;
+
+ code = TREE_CODE (t);
+ if (code == POINTER_TYPE)
+ r = build_pointer_type (type);
+ else
+ r = build_reference_type (type);
+ r = c_build_type_variant (r, TYPE_READONLY (t), TYPE_VOLATILE (t));
+ /* Will this ever be needed for TYPE_..._TO values? */
+ layout_type (r);
+ return r;
+ }
+ case OFFSET_TYPE:
+ return build_offset_type
+ (tsubst (TYPE_OFFSET_BASETYPE (t), args, nargs, in_decl), type);
+ case FUNCTION_TYPE:
+ case METHOD_TYPE:
+ {
+ tree values = TYPE_VALUES (t); /* same as TYPE_ARG_TYPES */
+ tree context = TYPE_CONTEXT (t);
+ tree new_value;
+
+ /* Don't bother recursing if we know it won't change anything. */
+ if (values != void_list_node)
+ values = tsubst (values, args, nargs, in_decl);
+ if (context)
+ context = tsubst (context, args, nargs, in_decl);
+ /* Could also optimize cases where return value and
+ values have common elements (e.g., T min(const &T, const T&). */
+
+ /* If the above parameters haven't changed, just return the type. */
+ if (type == TREE_TYPE (t)
+ && values == TYPE_VALUES (t)
+ && context == TYPE_CONTEXT (t))
+ return t;
+
+ /* Construct a new type node and return it. */
+ if (TREE_CODE (t) == FUNCTION_TYPE
+ && context == NULL_TREE)
+ {
+ new_value = build_function_type (type, values);
+ }
+ else if (context == NULL_TREE)
+ {
+ tree base = tsubst (TREE_TYPE (TREE_VALUE (TYPE_ARG_TYPES (t))),
+ args, nargs, in_decl);
+ new_value = build_cplus_method_type (base, type,
+ TREE_CHAIN (values));
+ }
+ else
+ {
+ new_value = make_node (TREE_CODE (t));
+ TREE_TYPE (new_value) = type;
+ TYPE_CONTEXT (new_value) = context;
+ TYPE_VALUES (new_value) = values;
+ TYPE_SIZE (new_value) = TYPE_SIZE (t);
+ TYPE_ALIGN (new_value) = TYPE_ALIGN (t);
+ TYPE_MODE (new_value) = TYPE_MODE (t);
+ if (TYPE_METHOD_BASETYPE (t))
+ TYPE_METHOD_BASETYPE (new_value) = tsubst (TYPE_METHOD_BASETYPE (t),
+ args, nargs, in_decl);
+ /* Need to generate hash value. */
+ my_friendly_abort (84);
+ }
+ new_value = build_type_variant (new_value,
+ TYPE_READONLY (t),
+ TYPE_VOLATILE (t));
+ return new_value;
+ }
+ case ARRAY_TYPE:
+ {
+ tree domain = tsubst (TYPE_DOMAIN (t), args, nargs, in_decl);
+ tree r;
+ if (type == TREE_TYPE (t) && domain == TYPE_DOMAIN (t))
+ return t;
+ r = build_cplus_array_type (type, domain);
+ return r;
+ }
+
+ case UNINSTANTIATED_P_TYPE:
+ {
+ int nparms = TREE_VEC_LENGTH (DECL_TEMPLATE_PARMS (UPT_TEMPLATE (t)));
+ tree argvec = make_tree_vec (nparms);
+ tree parmvec = UPT_PARMS (t);
+ int i;
+ tree id, rt;
+ for (i = 0; i < nparms; i++)
+ TREE_VEC_ELT (argvec, i) = tsubst (TREE_VEC_ELT (parmvec, i),
+ args, nargs, in_decl);
+ id = lookup_template_class (DECL_NAME (UPT_TEMPLATE (t)), argvec, NULL_TREE);
+ if (! IDENTIFIER_HAS_TYPE_VALUE (id)) {
+ instantiate_class_template(id, 0);
+ /* set up pending_classes */
+ add_pending_template (id);
+
+ TYPE_MAIN_VARIANT (IDENTIFIER_TYPE_VALUE (id)) =
+ IDENTIFIER_TYPE_VALUE (id);
+ }
+ rt = IDENTIFIER_TYPE_VALUE (id);
+
+ /* kung: this part handles nested type in template definition */
+
+ if ( !ANON_AGGRNAME_P (DECL_NAME(TYPE_NAME(t))))
+ {
+ rt = search_nested_type_in_tmpl (rt, t);
+ }
+
+ return build_type_variant (rt, TYPE_READONLY (t), TYPE_VOLATILE (t));
+ }
+
+ case MINUS_EXPR:
+ case PLUS_EXPR:
+ return fold (build (TREE_CODE (t), TREE_TYPE (t),
+ tsubst (TREE_OPERAND (t, 0), args, nargs, in_decl),
+ tsubst (TREE_OPERAND (t, 1), args, nargs, in_decl)));
+
+ case NEGATE_EXPR:
+ case NOP_EXPR:
+ return fold (build1 (TREE_CODE (t), TREE_TYPE (t),
+ tsubst (TREE_OPERAND (t, 0), args, nargs, in_decl)));
+
+ default:
+ sorry ("use of `%s' in function template",
+ tree_code_name [(int) TREE_CODE (t)]);
+ return error_mark_node;
+ }
+}
+
+tree
+instantiate_template (tmpl, targ_ptr)
+ tree tmpl, *targ_ptr;
+{
+ tree targs, fndecl;
+ int i, len;
+ struct pending_inline *p;
+ struct template_info *t;
+ struct obstack *old_fmp_obstack;
+ extern struct obstack *function_maybepermanent_obstack;
+
+ push_obstacks (&permanent_obstack, &permanent_obstack);
+ old_fmp_obstack = function_maybepermanent_obstack;
+ function_maybepermanent_obstack = &permanent_obstack;
+
+ my_friendly_assert (TREE_CODE (tmpl) == TEMPLATE_DECL, 283);
+ len = TREE_VEC_LENGTH (DECL_TEMPLATE_PARMS (tmpl));
+
+ for (fndecl = DECL_TEMPLATE_INSTANTIATIONS (tmpl);
+ fndecl; fndecl = TREE_CHAIN (fndecl))
+ {
+ tree *t1 = &TREE_VEC_ELT (TREE_PURPOSE (fndecl), 0);
+ for (i = len - 1; i >= 0; i--)
+ if (t1[i] != targ_ptr[i])
+ goto no_match;
+
+ /* Here, we have a match. */
+ fndecl = TREE_VALUE (fndecl);
+ goto exit;
+
+ no_match:
+ ;
+ }
+
+ targs = make_tree_vec (len);
+ i = len;
+ while (i--)
+ TREE_VEC_ELT (targs, i) = targ_ptr[i];
+
+ /* substitute template parameters */
+ fndecl = tsubst (DECL_RESULT (tmpl), targ_ptr,
+ TREE_VEC_LENGTH (targs), tmpl);
+
+ if (fndecl == error_mark_node)
+ goto exit;
+
+ /* If it's a static member fn in the template, we need to change it
+ into a FUNCTION_TYPE and chop off its this pointer. */
+ if (TREE_CODE (TREE_TYPE (DECL_RESULT (tmpl))) == METHOD_TYPE
+ && DECL_STATIC_FUNCTION_P (fndecl))
+ {
+ tree olddecl = DECL_RESULT (tmpl);
+ revert_static_member_fn (&DECL_RESULT (tmpl), NULL, NULL);
+ /* Chop off the this pointer that grokclassfn so kindly added
+ for us (it didn't know yet if the fn was static or not). */
+ DECL_ARGUMENTS (olddecl) = TREE_CHAIN (DECL_ARGUMENTS (olddecl));
+ DECL_ARGUMENTS (fndecl) = TREE_CHAIN (DECL_ARGUMENTS (fndecl));
+ }
+
+ t = DECL_TEMPLATE_INFO (tmpl);
+
+ /* If we have a preexisting version of this function, don't expand
+ the template version, use the other instead. */
+ if (DECL_INLINE (fndecl) && DECL_SAVED_INSNS (fndecl))
+ {
+ SET_DECL_TEMPLATE_SPECIALIZATION (fndecl);
+ p = (struct pending_inline *)0;
+ }
+ else if (t->text)
+ {
+ SET_DECL_IMPLICIT_INSTANTIATION (fndecl);
+ p = (struct pending_inline *) permalloc (sizeof (struct pending_inline));
+ p->parm_vec = t->parm_vec;
+ p->bindings = targs;
+ p->can_free = 0;
+ p->deja_vu = 0;
+ p->buf = t->text;
+ p->len = t->length;
+ p->fndecl = fndecl;
+ {
+ int l = lineno;
+ char * f = input_filename;
+
+ lineno = p->lineno = t->lineno;
+ input_filename = p->filename = t->filename;
+
+ extract_interface_info ();
+
+ if (interface_unknown && flag_external_templates && ! DECL_IN_SYSTEM_HEADER (tmpl))
+ warn_if_unknown_interface ();
+ if (interface_unknown || !flag_external_templates)
+ p->interface = 1; /* unknown */
+ else
+ p->interface = interface_only ? 0 : 2;
+
+ lineno = l;
+ input_filename = f;
+
+ extract_interface_info ();
+ }
+ }
+ else
+ p = (struct pending_inline *)0;
+
+ DECL_TEMPLATE_INSTANTIATIONS (tmpl) =
+ tree_cons (targs, fndecl, DECL_TEMPLATE_INSTANTIATIONS (tmpl));
+
+ if (p == (struct pending_inline *)0)
+ {
+ /* do nothing */
+ }
+ else if (DECL_INLINE (fndecl))
+ {
+ DECL_PENDING_INLINE_INFO (fndecl) = p;
+ p->next = pending_inlines;
+ pending_inlines = p;
+ }
+ else
+ {
+ p->next = pending_template_expansions;
+ pending_template_expansions = p;
+ }
+ exit:
+ function_maybepermanent_obstack = old_fmp_obstack;
+ pop_obstacks ();
+
+ return fndecl;
+}
+
+/* classlevel should now never be true. jason 4/12/94 */
+void
+undo_template_name_overload (id, classlevel)
+ tree id;
+ int classlevel;
+{
+ tree template;
+
+ template = IDENTIFIER_TEMPLATE (id);
+ if (!template)
+ return;
+
+#if 0 /* not yet, should get fixed properly later */
+ poplevel (0, 0, 0);
+#endif
+#if 1 /* XXX */
+ /* This was a botch... See `overload_template_name' just below. */
+ if (!classlevel)
+ poplevel (0, 0, 0);
+#endif
+}
+
+/* classlevel should now never be true. jason 4/12/94 */
+void
+overload_template_name (id, classlevel)
+ tree id;
+ int classlevel;
+{
+ tree template, t, decl;
+ struct template_info *tinfo;
+
+ my_friendly_assert (TREE_CODE (id) == IDENTIFIER_NODE, 284);
+ template = IDENTIFIER_TEMPLATE (id);
+ if (!template)
+ return;
+
+ template = TREE_PURPOSE (template);
+ tinfo = DECL_TEMPLATE_INFO (template);
+ template = DECL_NAME (template);
+ my_friendly_assert (template != NULL_TREE, 285);
+
+#if 1 /* XXX */
+ /* This was a botch... names of templates do not get their own private
+ scopes. Rather, they should go into the binding level already created
+ by push_template_decls. Except that there isn't one of those for
+ specializations. */
+ if (!classlevel)
+ {
+ pushlevel (1);
+ declare_pseudo_global_level ();
+ }
+#endif
+
+ t = xref_tag (tinfo->aggr, id, NULL_TREE, 0);
+ my_friendly_assert (TREE_CODE (t) == RECORD_TYPE
+ || TREE_CODE (t) == UNION_TYPE
+ || TREE_CODE (t) == UNINSTANTIATED_P_TYPE, 286);
+
+ decl = build_decl (TYPE_DECL, template, t);
+
+#if 0 /* fix this later */
+ /* We don't want to call here if the work has already been done. */
+ t = (classlevel
+ ? IDENTIFIER_CLASS_VALUE (template)
+ : IDENTIFIER_LOCAL_VALUE (template));
+ if (t
+ && TREE_CODE (t) == TYPE_DECL
+ && TREE_TYPE (t) == t)
+ my_friendly_abort (85);
+#endif
+
+ if (classlevel)
+ pushdecl_class_level (decl);
+ else
+ pushdecl (decl);
+
+#if 0 /* This seems bogus to me; if it isn't, explain why. (jason) */
+ /* Fake this for now, just to make dwarfout.c happy. It will have to
+ be done in a proper way later on. */
+ DECL_CONTEXT (decl) = t;
+#endif
+}
+
+/* NAME is the IDENTIFIER value of a PRE_PARSED_CLASS_DECL. */
+void
+end_template_instantiation (name)
+ tree name;
+{
+ extern struct pending_input *to_be_restored;
+ tree t, decl;
+
+ processing_template_defn--;
+ if (!flag_external_templates)
+ interface_unknown--;
+
+ /* Restore the old parser input state. */
+ if (yychar == YYEMPTY)
+ yychar = yylex ();
+ if (yychar != END_OF_SAVED_INPUT)
+ error ("parse error at end of class template");
+ else
+ {
+ restore_pending_input (to_be_restored);
+ to_be_restored = 0;
+ }
+
+ /* Our declarations didn't get stored in the global slot, since
+ there was a (supposedly tags-transparent) scope in between. */
+ t = IDENTIFIER_TYPE_VALUE (name);
+ my_friendly_assert (t != NULL_TREE
+ && TREE_CODE_CLASS (TREE_CODE (t)) == 't',
+ 287);
+ SET_CLASSTYPE_IMPLICIT_INSTANTIATION (t);
+ /* Make methods of template classes static, unless
+ -fexternal-templates is given. */
+ if (!flag_external_templates)
+ SET_CLASSTYPE_INTERFACE_UNKNOWN (t);
+ decl = IDENTIFIER_GLOBAL_VALUE (name);
+ my_friendly_assert (TREE_CODE (decl) == TYPE_DECL, 288);
+
+ undo_template_name_overload (name, 0);
+ t = IDENTIFIER_TEMPLATE (name);
+ pop_template_decls (DECL_TEMPLATE_PARMS (TREE_PURPOSE (t)), TREE_VALUE (t),
+ 0);
+ /* This will fix up the type-value field. */
+ pushdecl (decl);
+ pop_from_top_level ();
+
+#ifdef DWARF_DEBUGGING_INFO
+ if (write_symbols == DWARF_DEBUG && TREE_CODE (decl) == TYPE_DECL)
+ {
+ /* We just completed the definition of a new file-scope type,
+ so we can go ahead and output debug-info for it now. */
+ TYPE_STUB_DECL (TREE_TYPE (decl)) = decl;
+ rest_of_type_compilation (TREE_TYPE (decl), 1);
+ }
+#endif /* DWARF_DEBUGGING_INFO */
+
+ /* Restore interface/implementation settings. */
+ extract_interface_info ();
+}
+
+/* Store away the text of an template. */
+
+void
+reinit_parse_for_template (yychar, d1, d2)
+ int yychar;
+ tree d1, d2;
+{
+ struct template_info *template_info;
+ extern struct obstack inline_text_obstack; /* see comment in lex.c */
+
+ if (d2 == NULL_TREE || d2 == error_mark_node)
+ {
+ lose:
+ /* @@ Should use temp obstack, and discard results. */
+ reinit_parse_for_block (yychar, &inline_text_obstack, 1);
+ return;
+ }
+
+ if (TREE_CODE (d2) == IDENTIFIER_NODE)
+ d2 = IDENTIFIER_GLOBAL_VALUE (d2);
+ if (!d2)
+ goto lose;
+ template_info = DECL_TEMPLATE_INFO (d2);
+ if (!template_info)
+ {
+ template_info = (struct template_info *) permalloc (sizeof (struct template_info));
+ bzero (template_info, sizeof (struct template_info));
+ DECL_TEMPLATE_INFO (d2) = template_info;
+ }
+ template_info->filename = input_filename;
+ template_info->lineno = lineno;
+ reinit_parse_for_block (yychar, &inline_text_obstack, 1);
+ template_info->text = obstack_base (&inline_text_obstack);
+ template_info->length = obstack_object_size (&inline_text_obstack);
+ obstack_finish (&inline_text_obstack);
+ template_info->parm_vec = d1;
+}
+
+/* Type unification.
+
+ We have a function template signature with one or more references to
+ template parameters, and a parameter list we wish to fit to this
+ template. If possible, produce a list of parameters for the template
+ which will cause it to fit the supplied parameter list.
+
+ Return zero for success, 2 for an incomplete match that doesn't resolve
+ all the types, and 1 for complete failure. An error message will be
+ printed only for an incomplete match.
+
+ TPARMS[NTPARMS] is an array of template parameter types;
+ TARGS[NTPARMS] is the array of template parameter values. PARMS is
+ the function template's signature (using TEMPLATE_PARM_IDX nodes),
+ and ARGS is the argument list we're trying to match against it.
+
+ If SUBR is 1, we're being called recursively (to unify the arguments of
+ a function or method parameter of a function template), so don't zero
+ out targs and don't fail on an incomplete match. */
+
+int
+type_unification (tparms, targs, parms, args, nsubsts, subr)
+ tree tparms, *targs, parms, args;
+ int *nsubsts, subr;
+{
+ tree parm, arg;
+ int i;
+ int ntparms = TREE_VEC_LENGTH (tparms);
+
+ my_friendly_assert (TREE_CODE (tparms) == TREE_VEC, 289);
+ my_friendly_assert (TREE_CODE (parms) == TREE_LIST, 290);
+ /* ARGS could be NULL (via a call from parse.y to
+ build_x_function_call). */
+ if (args)
+ my_friendly_assert (TREE_CODE (args) == TREE_LIST, 291);
+ my_friendly_assert (ntparms > 0, 292);
+
+ if (!subr)
+ bzero (targs, sizeof (tree) * ntparms);
+
+ while (parms
+ && parms != void_list_node
+ && args
+ && args != void_list_node)
+ {
+ parm = TREE_VALUE (parms);
+ parms = TREE_CHAIN (parms);
+ arg = TREE_VALUE (args);
+ args = TREE_CHAIN (args);
+
+ if (arg == error_mark_node)
+ return 1;
+ if (arg == unknown_type_node)
+ return 1;
+#if 0
+ if (TREE_CODE (arg) == VAR_DECL)
+ arg = TREE_TYPE (arg);
+ else if (TREE_CODE_CLASS (TREE_CODE (arg)) == 'e')
+ arg = TREE_TYPE (arg);
+#else
+ if (TREE_CODE_CLASS (TREE_CODE (arg)) != 't')
+ {
+ my_friendly_assert (TREE_TYPE (arg) != NULL_TREE, 293);
+ arg = TREE_TYPE (arg);
+ }
+#endif
+ if (TREE_CODE (arg) == FUNCTION_TYPE
+ || TREE_CODE (arg) == METHOD_TYPE)
+ arg = build_pointer_type (arg);
+
+ switch (unify (tparms, targs, ntparms, parm, arg, nsubsts))
+ {
+ case 0:
+ break;
+ case 1:
+ return 1;
+ }
+ }
+ /* Fail if we've reached the end of the parm list, and more args
+ are present, and the parm list isn't variadic. */
+ if (args && args != void_list_node && parms == void_list_node)
+ return 1;
+ /* Fail if parms are left and they don't have default values. */
+ if (parms
+ && parms != void_list_node
+ && TREE_PURPOSE (parms) == NULL_TREE)
+ return 1;
+ if (!subr)
+ for (i = 0; i < ntparms; i++)
+ if (!targs[i])
+ {
+ error ("incomplete type unification");
+ return 2;
+ }
+ return 0;
+}
+
+/* Tail recursion is your friend. */
+static int
+unify (tparms, targs, ntparms, parm, arg, nsubsts)
+ tree tparms, *targs, parm, arg;
+ int *nsubsts, ntparms;
+{
+ int idx;
+
+ /* I don't think this will do the right thing with respect to types.
+ But the only case I've seen it in so far has been array bounds, where
+ signedness is the only information lost, and I think that will be
+ okay. */
+ while (TREE_CODE (parm) == NOP_EXPR)
+ parm = TREE_OPERAND (parm, 0);
+
+ if (arg == error_mark_node)
+ return 1;
+ if (arg == unknown_type_node)
+ return 1;
+ if (arg == parm)
+ return 0;
+
+ if (TREE_CODE (arg) == REFERENCE_TYPE)
+ arg = TREE_TYPE (arg);
+
+ switch (TREE_CODE (parm))
+ {
+ case TEMPLATE_TYPE_PARM:
+ (*nsubsts)++;
+ if (TEMPLATE_TYPE_TPARMLIST (parm) != tparms)
+ {
+ error ("mixed template headers?!");
+ my_friendly_abort (86);
+ return 1;
+ }
+ idx = TEMPLATE_TYPE_IDX (parm);
+ /* Simple cases: Value already set, does match or doesn't. */
+ if (targs[idx] == arg)
+ return 0;
+ else if (targs[idx])
+ {
+ if (TYPE_MAIN_VARIANT (targs[idx]) == TYPE_MAIN_VARIANT (arg))
+ /* allow different parms to have different cv-qualifiers */;
+ else
+ return 1;
+ }
+ /* Check for mixed types and values. */
+ if (TREE_CODE (TREE_VEC_ELT (tparms, idx)) != IDENTIFIER_NODE)
+ return 1;
+ /* Allow trivial conversions. */
+ if (TYPE_READONLY (parm) < TYPE_READONLY (arg)
+ || TYPE_VOLATILE (parm) < TYPE_VOLATILE (arg))
+ return 1;
+ targs[idx] = arg;
+ return 0;
+ case TEMPLATE_CONST_PARM:
+ (*nsubsts)++;
+ idx = TEMPLATE_CONST_IDX (parm);
+ if (targs[idx] == arg)
+ return 0;
+ else if (targs[idx])
+ {
+ tree t = targs[idx];
+ if (TREE_CODE (t) == TREE_CODE (arg))
+ switch (TREE_CODE (arg))
+ {
+ case INTEGER_CST:
+ if (tree_int_cst_equal (t, arg))
+ return 0;
+ break;
+ case REAL_CST:
+ if (REAL_VALUES_EQUAL (TREE_REAL_CST (t), TREE_REAL_CST (arg)))
+ return 0;
+ break;
+ /* STRING_CST values are not valid template const parms. */
+ default:
+ ;
+ }
+ my_friendly_abort (87);
+ return 1;
+ }
+/* else if (typeof arg != tparms[idx])
+ return 1;*/
+
+ targs[idx] = copy_to_permanent (arg);
+ return 0;
+
+ case POINTER_TYPE:
+ if (TREE_CODE (arg) != POINTER_TYPE)
+ return 1;
+ return unify (tparms, targs, ntparms, TREE_TYPE (parm), TREE_TYPE (arg),
+ nsubsts);
+
+ case REFERENCE_TYPE:
+ return unify (tparms, targs, ntparms, TREE_TYPE (parm), arg, nsubsts);
+
+ case ARRAY_TYPE:
+ if (TREE_CODE (arg) != ARRAY_TYPE)
+ return 1;
+ if (unify (tparms, targs, ntparms, TYPE_DOMAIN (parm), TYPE_DOMAIN (arg),
+ nsubsts) != 0)
+ return 1;
+ return unify (tparms, targs, ntparms, TREE_TYPE (parm), TREE_TYPE (arg),
+ nsubsts);
+
+ case REAL_TYPE:
+ case INTEGER_TYPE:
+ if (TREE_CODE (parm) == INTEGER_TYPE && TREE_CODE (arg) == INTEGER_TYPE)
+ {
+ if (TYPE_MIN_VALUE (parm) && TYPE_MIN_VALUE (arg)
+ && unify (tparms, targs, ntparms,
+ TYPE_MIN_VALUE (parm), TYPE_MIN_VALUE (arg), nsubsts))
+ return 1;
+ if (TYPE_MAX_VALUE (parm) && TYPE_MAX_VALUE (arg)
+ && unify (tparms, targs, ntparms,
+ TYPE_MAX_VALUE (parm), TYPE_MAX_VALUE (arg), nsubsts))
+ return 1;
+ }
+ /* As far as unification is concerned, this wins. Later checks
+ will invalidate it if necessary. */
+ return 0;
+
+ /* Types INTEGER_CST and MINUS_EXPR can come from array bounds. */
+ case INTEGER_CST:
+ if (TREE_CODE (arg) != INTEGER_CST)
+ return 1;
+ return !tree_int_cst_equal (parm, arg);
+
+ case MINUS_EXPR:
+ {
+ tree t1, t2;
+ t1 = TREE_OPERAND (parm, 0);
+ t2 = TREE_OPERAND (parm, 1);
+ if (TREE_CODE (t1) != TEMPLATE_CONST_PARM)
+ return 1;
+ return unify (tparms, targs, ntparms, t1,
+ fold (build (PLUS_EXPR, integer_type_node, arg, t2)),
+ nsubsts);
+ }
+
+ case TREE_VEC:
+ {
+ int i;
+ if (TREE_CODE (arg) != TREE_VEC)
+ return 1;
+ if (TREE_VEC_LENGTH (parm) != TREE_VEC_LENGTH (arg))
+ return 1;
+ for (i = TREE_VEC_LENGTH (parm) - 1; i >= 0; i--)
+ if (unify (tparms, targs, ntparms,
+ TREE_VEC_ELT (parm, i), TREE_VEC_ELT (arg, i),
+ nsubsts))
+ return 1;
+ return 0;
+ }
+
+ case UNINSTANTIATED_P_TYPE:
+ {
+ tree a;
+ /* Unification of something that is not a template fails. (mrs) */
+ if (TYPE_NAME (arg) == 0)
+ return 1;
+ a = IDENTIFIER_TEMPLATE (TYPE_IDENTIFIER (arg));
+ /* Unification of something that is not a template fails. (mrs) */
+ if (a == 0)
+ return 1;
+ if (UPT_TEMPLATE (parm) != TREE_PURPOSE (a))
+ /* different templates */
+ return 1;
+ return unify (tparms, targs, ntparms, UPT_PARMS (parm), TREE_VALUE (a),
+ nsubsts);
+ }
+
+ case RECORD_TYPE:
+ if (TYPE_PTRMEMFUNC_P (parm))
+ return unify (tparms, targs, ntparms, TYPE_PTRMEMFUNC_FN_TYPE (parm),
+ arg, nsubsts);
+
+ /* Allow trivial conversions. */
+ if (TYPE_MAIN_VARIANT (parm) != TYPE_MAIN_VARIANT (arg)
+ || TYPE_READONLY (parm) < TYPE_READONLY (arg)
+ || TYPE_VOLATILE (parm) < TYPE_VOLATILE (arg))
+ return 1;
+ return 0;
+
+ case METHOD_TYPE:
+ if (TREE_CODE (arg) != METHOD_TYPE)
+ return 1;
+ goto check_args;
+
+ case FUNCTION_TYPE:
+ if (TREE_CODE (arg) != FUNCTION_TYPE)
+ return 1;
+ check_args:
+ return type_unification (tparms, targs, TYPE_ARG_TYPES (parm),
+ TYPE_ARG_TYPES (arg), nsubsts, 1);
+
+ case OFFSET_TYPE:
+ if (TREE_CODE (arg) != OFFSET_TYPE)
+ return 1;
+ if (unify (tparms, targs, ntparms, TYPE_OFFSET_BASETYPE (parm),
+ TYPE_OFFSET_BASETYPE (arg), nsubsts))
+ return 1;
+ return unify (tparms, targs, ntparms, TREE_TYPE (parm),
+ TREE_TYPE (arg), nsubsts);
+
+ default:
+ sorry ("use of `%s' in template type unification",
+ tree_code_name [(int) TREE_CODE (parm)]);
+ return 1;
+ }
+}
+
+
+#undef DEBUG
+
+int
+do_pending_expansions ()
+{
+ struct pending_inline *i, *new_list = 0;
+
+ if (!pending_template_expansions)
+ return 0;
+
+#ifdef DEBUG
+ fprintf (stderr, "\n\n\t\t IN DO_PENDING_EXPANSIONS\n\n");
+#endif
+
+ i = pending_template_expansions;
+ while (i)
+ {
+ tree context;
+
+ struct pending_inline *next = i->next;
+ tree t = i->fndecl;
+
+ int decision = 0;
+#define DECIDE(N) do {decision=(N); goto decided;} while(0)
+
+ my_friendly_assert (TREE_CODE (t) == FUNCTION_DECL
+ || TREE_CODE (t) == VAR_DECL, 294);
+ if (TREE_ASM_WRITTEN (t))
+ DECIDE (0);
+
+ if (DECL_EXPLICIT_INSTANTIATION (t))
+ DECIDE (! DECL_EXTERNAL (t));
+ else if (! flag_implicit_templates)
+ DECIDE (0);
+
+ /* If it's a method, let the class type decide it.
+ @@ What if the method template is in a separate file?
+ Maybe both file contexts should be taken into account?
+ Maybe only do this if i->interface == 1 (unknown)? */
+ context = DECL_CONTEXT (t);
+ if (context != NULL_TREE
+ && TREE_CODE_CLASS (TREE_CODE (context)) == 't')
+ {
+ /* I'm interested in the context of this version of the function,
+ not the original virtual declaration. */
+ context = DECL_CLASS_CONTEXT (t);
+
+ /* If `unknown', we might want a static copy.
+ If `implementation', we want a global one.
+ If `interface', ext ref. */
+ if (CLASSTYPE_INTERFACE_KNOWN (context))
+ DECIDE (!CLASSTYPE_INTERFACE_ONLY (context));
+#if 0 /* This doesn't get us stuff needed only by the file initializer. */
+ DECIDE (TREE_USED (t));
+#else /* This compiles too much stuff, but that's probably better in
+ most cases than never compiling the stuff we need. */
+ DECIDE (1);
+#endif
+ }
+
+ if (i->interface == 1)
+ DECIDE (TREE_USED (t));
+ else
+ DECIDE (i->interface);
+
+ decided:
+#ifdef DEBUG
+ print_node_brief (stderr, decision ? "yes: " : "no: ", t, 0);
+ fprintf (stderr, "\t%s\n",
+ (DECL_ASSEMBLER_NAME (t)
+ ? IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (t))
+ : ""));
+#endif
+ if (decision)
+ {
+ i->next = pending_inlines;
+ pending_inlines = i;
+ }
+ else
+ {
+ i->next = new_list;
+ new_list = i;
+ }
+ i = next;
+ }
+ pending_template_expansions = new_list;
+ if (!pending_inlines)
+ return 0;
+ do_pending_inlines ();
+ return 1;
+}
+
+
+struct pending_template {
+ struct pending_template *next;
+ tree id;
+};
+
+static struct pending_template* pending_templates;
+
+void
+do_pending_templates ()
+{
+ struct pending_template* t;
+
+ for ( t = pending_templates; t; t = t->next)
+ {
+ instantiate_class_template (t->id, 1);
+ }
+
+ for ( t = pending_templates; t; t = pending_templates)
+ {
+ pending_templates = t->next;
+ free(t);
+ }
+}
+
+static void
+add_pending_template (pt)
+ tree pt;
+{
+ struct pending_template *p;
+
+ p = (struct pending_template *) malloc (sizeof (struct pending_template));
+ p->next = pending_templates;
+ pending_templates = p;
+ p->id = pt;
+}
+
+/* called from the parser. */
+void
+do_function_instantiation (declspecs, declarator, storage)
+ tree declspecs, declarator, storage;
+{
+ tree decl = grokdeclarator (declarator, declspecs, NORMAL, 0, 0);
+ tree name = DECL_NAME (decl);
+ tree fn = IDENTIFIER_GLOBAL_VALUE (name);
+ tree result = NULL_TREE;
+ if (fn)
+ {
+ for (fn = get_first_fn (fn); fn; fn = DECL_CHAIN (fn))
+ if (TREE_CODE (fn) == TEMPLATE_DECL)
+ {
+ int ntparms = TREE_VEC_LENGTH (DECL_TEMPLATE_PARMS (fn));
+ tree *targs = (tree *) malloc (sizeof (tree) * ntparms);
+ int i, dummy;
+ i = type_unification (DECL_TEMPLATE_PARMS (fn), targs,
+ TYPE_ARG_TYPES (TREE_TYPE (fn)),
+ TYPE_ARG_TYPES (TREE_TYPE (decl)),
+ &dummy, 0);
+ if (i == 0)
+ {
+ if (result)
+ cp_error ("ambiguous template instantiation for `%D' requested", decl);
+ else
+ result = instantiate_template (fn, targs);
+ }
+ }
+ }
+ if (! result)
+ cp_error ("no matching template for `%D' found", decl);
+
+ if (flag_external_templates)
+ return;
+
+ if (DECL_EXPLICIT_INSTANTIATION (result) && ! DECL_EXTERNAL (result))
+ return;
+
+ SET_DECL_EXPLICIT_INSTANTIATION (result);
+ TREE_PUBLIC (result) = 1;
+
+ if (storage == NULL_TREE)
+ DECL_EXTERNAL (result) = DECL_INLINE (result) && ! flag_implement_inlines;
+ else if (storage == ridpointers[(int) RID_EXTERN])
+ DECL_EXTERNAL (result) = 1;
+ else
+ cp_error ("storage class `%D' applied to template instantiation",
+ storage);
+}
+
+void
+do_type_instantiation (name, storage)
+ tree name, storage;
+{
+ tree t = TREE_TYPE (name);
+ int extern_p;
+
+ if (flag_external_templates)
+ return;
+
+ if (CLASSTYPE_EXPLICIT_INSTANTIATION (t) && ! CLASSTYPE_INTERFACE_ONLY (t))
+ return;
+
+ if (TYPE_SIZE (t) == NULL_TREE)
+ {
+ cp_error ("explicit instantiation of `%#T' before definition of template",
+ t);
+ return;
+ }
+
+ if (storage == NULL_TREE)
+ extern_p = 0;
+ else if (storage == ridpointers[(int) RID_EXTERN])
+ extern_p = 1;
+ else
+ {
+ cp_error ("storage class `%D' applied to template instantiation",
+ storage);
+ extern_p = 0;
+ }
+
+ SET_CLASSTYPE_EXPLICIT_INSTANTIATION (t);
+ CLASSTYPE_VTABLE_NEEDS_WRITING (t) = ! extern_p;
+ SET_CLASSTYPE_INTERFACE_KNOWN (t);
+ CLASSTYPE_INTERFACE_ONLY (t) = extern_p;
+ if (! extern_p)
+ {
+ CLASSTYPE_DEBUG_REQUESTED (t) = 1;
+ TYPE_DECL_SUPPRESS_DEBUG (TYPE_NAME (t)) = 0;
+ rest_of_type_compilation (t, 1);
+ }
+
+ /* this should really be done by instantiate_member_templates */
+ {
+ tree method = TREE_VEC_ELT (CLASSTYPE_METHOD_VEC (t), 0);
+ for (; method; method = TREE_CHAIN (method))
+ {
+ SET_DECL_EXPLICIT_INSTANTIATION (method);
+ TREE_PUBLIC (method) = 1;
+ DECL_EXTERNAL (method)
+ = (extern_p || (DECL_INLINE (method) && ! flag_implement_inlines));
+ }
+ }
+
+ /* and data member templates, too */
+}
+
+tree
+create_nested_upt (scope, name)
+ tree scope, name;
+{
+ tree t = make_lang_type (UNINSTANTIATED_P_TYPE);
+ tree d = build_decl (TYPE_DECL, name, t);
+
+ TYPE_NAME (t) = d;
+ TYPE_VALUES (t) = TYPE_VALUES (scope);
+ TYPE_CONTEXT (t) = scope;
+
+ pushdecl (d);
+ return d;
+}
diff --git a/gnu/usr.bin/cc/cc1plus/ptree.c b/gnu/usr.bin/cc/cc1plus/ptree.c
new file mode 100644
index 0000000..38e3c77
--- /dev/null
+++ b/gnu/usr.bin/cc/cc1plus/ptree.c
@@ -0,0 +1,167 @@
+/* Prints out trees in human readable form.
+ Copyright (C) 1992, 1993 Free Software Foundation, Inc.
+ Hacked by Michael Tiemann (tiemann@cygnus.com)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+#include "config.h"
+#include "tree.h"
+#include <stdio.h>
+#include "cp-tree.h"
+
+void
+print_lang_decl (file, node, indent)
+ FILE *file;
+ tree node;
+ int indent;
+{
+ if (!DECL_LANG_SPECIFIC (node))
+ return;
+ /* A FIELD_DECL only has the flags structure, which we aren't displaying
+ anyways. */
+ if (DECL_MUTABLE_P (node))
+ {
+ indent_to (file, indent + 3);
+ fprintf (file, " mutable ");
+ }
+ if (TREE_CODE (node) == FIELD_DECL)
+ return;
+ indent_to (file, indent + 3);
+ if (DECL_MAIN_VARIANT (node))
+ {
+ fprintf (file, " decl-main-variant ");
+ fprintf (file, HOST_PTR_PRINTF, DECL_MAIN_VARIANT (node));
+ }
+ if (DECL_PENDING_INLINE_INFO (node))
+ {
+ fprintf (file, " pending-inline-info ");
+ fprintf (file, HOST_PTR_PRINTF, DECL_PENDING_INLINE_INFO (node));
+ }
+ if (DECL_TEMPLATE_INFO (node))
+ {
+ fprintf (file, " template-info ");
+ fprintf (file, HOST_PTR_PRINTF, DECL_TEMPLATE_INFO (node));
+ }
+}
+
+void
+print_lang_type (file, node, indent)
+ FILE *file;
+ register tree node;
+ int indent;
+{
+ if (TREE_CODE (node) == TEMPLATE_TYPE_PARM)
+ {
+ print_node (file, "tinfo", TYPE_VALUES (node), indent + 4);
+ return;
+ }
+
+ if (TREE_CODE (node) == UNINSTANTIATED_P_TYPE)
+ {
+ print_node (file, "template", UPT_TEMPLATE (node), indent + 4);
+ print_node (file, "parameters", UPT_PARMS (node), indent + 4);
+ return;
+ }
+
+ if (! (TREE_CODE (node) == RECORD_TYPE
+ || TREE_CODE (node) == UNION_TYPE))
+ return;
+
+ if (!TYPE_LANG_SPECIFIC (node))
+ return;
+
+ indent_to (file, indent + 3);
+
+ if (TYPE_NEEDS_CONSTRUCTING (node))
+ fputs ( "needs-constructor", file);
+ if (TYPE_NEEDS_DESTRUCTOR (node))
+ fputs (" needs-destructor", file);
+ if (TYPE_HAS_DESTRUCTOR (node))
+ fputs (" ~X()", file);
+ if (TYPE_HAS_DEFAULT_CONSTRUCTOR (node))
+ fputs (" X()", file);
+ if (TYPE_HAS_CONVERSION (node))
+ fputs (" has-type-conversion", file);
+ if (TYPE_HAS_INT_CONVERSION (node))
+ fputs (" has-int-conversion", file);
+ if (TYPE_HAS_REAL_CONVERSION (node))
+ fputs (" has-float-conversion", file);
+ if (TYPE_HAS_INIT_REF (node))
+ {
+ if (TYPE_HAS_CONST_INIT_REF (node))
+ fputs (" X(constX&)", file);
+ else
+ fputs (" X(X&)", file);
+ }
+ if (TYPE_GETS_NEW (node) & 1)
+ fputs (" new", file);
+ if (TYPE_GETS_NEW (node) & 2)
+ fputs (" new[]", file);
+ if (TYPE_GETS_DELETE (node) & 1)
+ fputs (" delete", file);
+ if (TYPE_GETS_DELETE (node) & 2)
+ fputs (" delete[]", file);
+ if (TYPE_HAS_ASSIGNMENT (node))
+ fputs (" has=", file);
+ if (TYPE_HAS_ASSIGN_REF (node))
+ fputs (" this=(X&)", file);
+ if (TYPE_OVERLOADS_METHOD_CALL_EXPR (node))
+ fputs (" op->()", file);
+ if (TYPE_GETS_INIT_AGGR (node))
+ fputs (" gets X(X, ...)", file);
+ if (TYPE_OVERLOADS_CALL_EXPR (node))
+ fputs (" op()", file);
+ if (TYPE_OVERLOADS_ARRAY_REF (node))
+ fputs (" op[]", file);
+ if (TYPE_OVERLOADS_ARROW (node))
+ fputs (" op->", file);
+ if (TYPE_USES_MULTIPLE_INHERITANCE (node))
+ fputs (" uses-multiple-inheritance", file);
+
+ if (TREE_CODE (node) == RECORD_TYPE)
+ {
+ fprintf (file, " n_parents %d n_ancestors %d",
+ CLASSTYPE_N_BASECLASSES (node),
+ CLASSTYPE_N_SUPERCLASSES (node));
+ fprintf (file, " use_template=%d", CLASSTYPE_USE_TEMPLATE (node));
+ if (CLASSTYPE_INTERFACE_ONLY (node))
+ fprintf (file, " interface-only");
+ if (CLASSTYPE_INTERFACE_UNKNOWN (node))
+ fprintf (file, " interface-unknown");
+ print_node (file, "member-functions", CLASSTYPE_METHOD_VEC (node),
+ indent + 4);
+ print_node (file, "baselinks",
+ TYPE_BINFO_BASETYPES (node) ? CLASSTYPE_BASELINK_VEC (node) : NULL_TREE,
+ indent + 4);
+ }
+}
+
+void
+print_lang_identifier (file, node, indent)
+ FILE *file;
+ tree node;
+ int indent;
+{
+ print_node (file, "global", IDENTIFIER_GLOBAL_VALUE (node), indent + 4);
+ print_node (file, "class", IDENTIFIER_CLASS_VALUE (node), indent + 4);
+ print_node (file, "local", IDENTIFIER_LOCAL_VALUE (node), indent + 4);
+ print_node (file, "label", IDENTIFIER_LABEL_VALUE (node), indent + 4);
+ print_node (file, "template", IDENTIFIER_TEMPLATE (node), indent + 4);
+ print_node (file, "implicit", IDENTIFIER_IMPLICIT_DECL (node), indent + 4);
+ print_node (file, "error locus", IDENTIFIER_ERROR_LOCUS (node), indent + 4);
+}
diff --git a/gnu/usr.bin/cc/cc1plus/search.c b/gnu/usr.bin/cc/cc1plus/search.c
new file mode 100644
index 0000000..c4c6a4e
--- /dev/null
+++ b/gnu/usr.bin/cc/cc1plus/search.c
@@ -0,0 +1,3199 @@
+/* Breadth-first and depth-first routines for
+ searching multiple-inheritance lattice for GNU C++.
+ Copyright (C) 1987, 1989, 1992, 1993 Free Software Foundation, Inc.
+ Contributed by Michael Tiemann (tiemann@cygnus.com)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+/* High-level class interface. */
+
+#include "config.h"
+#include "tree.h"
+#include <stdio.h>
+#include "cp-tree.h"
+#include "obstack.h"
+#include "flags.h"
+
+#define obstack_chunk_alloc xmalloc
+#define obstack_chunk_free free
+
+void init_search ();
+extern struct obstack *current_obstack;
+
+#include "stack.h"
+
+/* Obstack used for remembering decision points of breadth-first. */
+static struct obstack search_obstack;
+
+/* Methods for pushing and popping objects to and from obstacks. */
+struct stack_level *
+push_stack_level (obstack, tp, size)
+ struct obstack *obstack;
+ char *tp; /* Sony NewsOS 5.0 compiler doesn't like void * here. */
+ int size;
+{
+ struct stack_level *stack;
+ obstack_grow (obstack, tp, size);
+ stack = (struct stack_level *) ((char*)obstack_next_free (obstack) - size);
+ obstack_finish (obstack);
+ stack->obstack = obstack;
+ stack->first = (tree *) obstack_base (obstack);
+ stack->limit = obstack_room (obstack) / sizeof (tree *);
+ return stack;
+}
+
+struct stack_level *
+pop_stack_level (stack)
+ struct stack_level *stack;
+{
+ struct stack_level *tem = stack;
+ struct obstack *obstack = tem->obstack;
+ stack = tem->prev;
+ obstack_free (obstack, tem);
+ return stack;
+}
+
+#define search_level stack_level
+static struct search_level *search_stack;
+
+static tree lookup_field_1 ();
+static int lookup_fnfields_1 ();
+static void dfs_walk ();
+static int markedp ();
+static void dfs_unmark ();
+static void dfs_init_vbase_pointers ();
+
+static tree vbase_types;
+static tree vbase_decl, vbase_decl_ptr;
+static tree vbase_decl_ptr_intermediate;
+static tree vbase_init_result;
+
+/* Allocate a level of searching. */
+static struct search_level *
+push_search_level (stack, obstack)
+ struct stack_level *stack;
+ struct obstack *obstack;
+{
+ struct search_level tem;
+
+ tem.prev = stack;
+ return push_stack_level (obstack, (char *)&tem, sizeof (tem));
+}
+
+/* Discard a level of search allocation. */
+static struct search_level *
+pop_search_level (obstack)
+ struct stack_level *obstack;
+{
+ register struct search_level *stack = pop_stack_level (obstack);
+
+ return stack;
+}
+
+/* Search memoization. */
+struct type_level
+{
+ struct stack_level base;
+
+ /* First object allocated in obstack of entries. */
+ char *entries;
+
+ /* Number of types memoized in this context. */
+ int len;
+
+ /* Type being memoized; save this if we are saving
+ memoized contexts. */
+ tree type;
+};
+
+/* Obstack used for memoizing member and member function lookup. */
+
+static struct obstack type_obstack, type_obstack_entries;
+static struct type_level *type_stack;
+static tree _vptr_name;
+
+/* Make things that look like tree nodes, but allocate them
+ on type_obstack_entries. */
+static int my_tree_node_counter;
+static tree my_tree_cons (), my_build_string ();
+
+extern int flag_memoize_lookups, flag_save_memoized_contexts;
+
+/* Variables for gathering statistics. */
+static int my_memoized_entry_counter;
+static int memoized_fast_finds[2], memoized_adds[2], memoized_fast_rejects[2];
+static int memoized_fields_searched[2];
+static int n_fields_searched;
+static int n_calls_lookup_field, n_calls_lookup_field_1;
+static int n_calls_lookup_fnfields, n_calls_lookup_fnfields_1;
+static int n_calls_get_base_type;
+static int n_outer_fields_searched;
+static int n_contexts_saved;
+
+/* Local variables to help save memoization contexts. */
+static tree prev_type_memoized;
+static struct type_level *prev_type_stack;
+
+/* This list is used by push_class_decls to know what decls need to
+ be pushed into class scope. */
+static tree closed_envelopes = NULL_TREE;
+
+/* Allocate a level of type memoization context. */
+static struct type_level *
+push_type_level (stack, obstack)
+ struct stack_level *stack;
+ struct obstack *obstack;
+{
+ struct type_level tem;
+
+ tem.base.prev = stack;
+
+ obstack_finish (&type_obstack_entries);
+ tem.entries = (char *) obstack_base (&type_obstack_entries);
+ tem.len = 0;
+ tem.type = NULL_TREE;
+
+ return (struct type_level *)push_stack_level (obstack, (char *)&tem, sizeof (tem));
+}
+
+/* Discard a level of type memoization context. */
+
+static struct type_level *
+pop_type_level (stack)
+ struct type_level *stack;
+{
+ obstack_free (&type_obstack_entries, stack->entries);
+ return (struct type_level *)pop_stack_level ((struct stack_level *)stack);
+}
+
+/* Make something that looks like a TREE_LIST, but
+ do it on the type_obstack_entries obstack. */
+static tree
+my_tree_cons (purpose, value, chain)
+ tree purpose, value, chain;
+{
+ tree p = (tree)obstack_alloc (&type_obstack_entries, sizeof (struct tree_list));
+ ++my_tree_node_counter;
+ TREE_TYPE (p) = NULL_TREE;
+ ((HOST_WIDE_INT *)p)[3] = 0;
+ TREE_SET_CODE (p, TREE_LIST);
+ TREE_PURPOSE (p) = purpose;
+ TREE_VALUE (p) = value;
+ TREE_CHAIN (p) = chain;
+ return p;
+}
+
+static tree
+my_build_string (str)
+ char *str;
+{
+ tree p = (tree)obstack_alloc (&type_obstack_entries, sizeof (struct tree_string));
+ ++my_tree_node_counter;
+ TREE_TYPE (p) = 0;
+ ((int *)p)[3] = 0;
+ TREE_SET_CODE (p, STRING_CST);
+ TREE_STRING_POINTER (p) = str;
+ TREE_STRING_LENGTH (p) = strlen (str);
+ return p;
+}
+
+/* Memoizing machinery to make searches for multiple inheritance
+ reasonably efficient. */
+#define MEMOIZE_HASHSIZE 8
+typedef struct memoized_entry
+{
+ struct memoized_entry *chain;
+ int uid;
+ tree data_members[MEMOIZE_HASHSIZE];
+ tree function_members[MEMOIZE_HASHSIZE];
+} *ME;
+
+#define MEMOIZED_CHAIN(ENTRY) (((ME)ENTRY)->chain)
+#define MEMOIZED_UID(ENTRY) (((ME)ENTRY)->uid)
+#define MEMOIZED_FIELDS(ENTRY,INDEX) (((ME)ENTRY)->data_members[INDEX])
+#define MEMOIZED_FNFIELDS(ENTRY,INDEX) (((ME)ENTRY)->function_members[INDEX])
+/* The following is probably a lousy hash function. */
+#define MEMOIZED_HASH_FN(NODE) (((long)(NODE)>>4)&(MEMOIZE_HASHSIZE - 1))
+
+static struct memoized_entry *
+my_new_memoized_entry (chain)
+ struct memoized_entry *chain;
+{
+ struct memoized_entry *p =
+ (struct memoized_entry *)obstack_alloc (&type_obstack_entries,
+ sizeof (struct memoized_entry));
+ bzero (p, sizeof (struct memoized_entry));
+ MEMOIZED_CHAIN (p) = chain;
+ MEMOIZED_UID (p) = ++my_memoized_entry_counter;
+ return p;
+}
+
+/* Make an entry in the memoized table for type TYPE
+ that the entry for NAME is FIELD. */
+
+tree
+make_memoized_table_entry (type, name, function_p)
+ tree type, name;
+ int function_p;
+{
+ int index = MEMOIZED_HASH_FN (name);
+ tree entry, *prev_entry;
+
+ memoized_adds[function_p] += 1;
+ if (CLASSTYPE_MTABLE_ENTRY (type) == 0)
+ {
+ obstack_ptr_grow (&type_obstack, type);
+ obstack_blank (&type_obstack, sizeof (struct memoized_entry *));
+ CLASSTYPE_MTABLE_ENTRY (type) = (char *)my_new_memoized_entry ((struct memoized_entry *)0);
+ type_stack->len++;
+ if (type_stack->len * 2 >= type_stack->base.limit)
+ my_friendly_abort (88);
+ }
+ if (function_p)
+ prev_entry = &MEMOIZED_FNFIELDS (CLASSTYPE_MTABLE_ENTRY (type), index);
+ else
+ prev_entry = &MEMOIZED_FIELDS (CLASSTYPE_MTABLE_ENTRY (type), index);
+
+ entry = my_tree_cons (name, NULL_TREE, *prev_entry);
+ *prev_entry = entry;
+
+ /* Don't know the error message to give yet. */
+ TREE_TYPE (entry) = error_mark_node;
+
+ return entry;
+}
+
+/* When a new function or class context is entered, we build
+ a table of types which have been searched for members.
+ The table is an array (obstack) of types. When a type is
+ entered into the obstack, its CLASSTYPE_MTABLE_ENTRY
+ field is set to point to a new record, of type struct memoized_entry.
+
+ A non-NULL TREE_TYPE of the entry contains an access control error message.
+
+ The slots for the data members are arrays of tree nodes.
+ These tree nodes are lists, with the TREE_PURPOSE
+ of this list the known member name, and the TREE_VALUE
+ as the FIELD_DECL for the member.
+
+ For member functions, the TREE_PURPOSE is again the
+ name of the member functions for that class,
+ and the TREE_VALUE of the list is a pairs
+ whose TREE_PURPOSE is a member functions of this name,
+ and whose TREE_VALUE is a list of known argument lists this
+ member function has been called with. The TREE_TYPE of the pair,
+ if non-NULL, is an error message to print. */
+
+/* Tell search machinery that we are entering a new context, and
+ to update tables appropriately.
+
+ TYPE is the type of the context we are entering, which can
+ be NULL_TREE if we are not in a class's scope.
+
+ USE_OLD, if nonzero tries to use previous context. */
+void
+push_memoized_context (type, use_old)
+ tree type;
+ int use_old;
+{
+ int len;
+ tree *tem;
+
+ if (prev_type_stack)
+ {
+ if (use_old && prev_type_memoized == type)
+ {
+#ifdef GATHER_STATISTICS
+ n_contexts_saved++;
+#endif
+ type_stack = prev_type_stack;
+ prev_type_stack = 0;
+
+ tem = &type_stack->base.first[0];
+ len = type_stack->len;
+ while (len--)
+ CLASSTYPE_MTABLE_ENTRY (tem[len*2]) = (char *)tem[len*2+1];
+ return;
+ }
+ /* Otherwise, need to pop old stack here. */
+ type_stack = pop_type_level (prev_type_stack);
+ prev_type_memoized = 0;
+ prev_type_stack = 0;
+ }
+
+ type_stack = push_type_level ((struct stack_level *)type_stack,
+ &type_obstack);
+ type_stack->type = type;
+}
+
+/* Tell search machinery that we have left a context.
+ We do not currently save these contexts for later use.
+ If we wanted to, we could not use pop_search_level, since
+ poping that level allows the data we have collected to
+ be clobbered; a stack of obstacks would be needed. */
+void
+pop_memoized_context (use_old)
+ int use_old;
+{
+ int len;
+ tree *tem = &type_stack->base.first[0];
+
+ if (! flag_save_memoized_contexts)
+ use_old = 0;
+ else if (use_old)
+ {
+ len = type_stack->len;
+ while (len--)
+ tem[len*2+1] = (tree)CLASSTYPE_MTABLE_ENTRY (tem[len*2]);
+
+ prev_type_stack = type_stack;
+ prev_type_memoized = type_stack->type;
+ }
+
+ if (flag_memoize_lookups)
+ {
+ len = type_stack->len;
+ while (len--)
+ CLASSTYPE_MTABLE_ENTRY (tem[len*2])
+ = (char *)MEMOIZED_CHAIN (CLASSTYPE_MTABLE_ENTRY (tem[len*2]));
+ }
+ if (! use_old)
+ type_stack = pop_type_level (type_stack);
+ else
+ type_stack = (struct type_level *)type_stack->base.prev;
+}
+
+#if 0 /* unused */
+/* This is the newer recursive depth first search routine. */
+/* Return non-zero if PARENT is directly derived from TYPE. By directly
+ we mean it's only one step up the inheritance lattice. We check this
+ by walking horizontally across the types that TYPE directly inherits
+ from, to see if PARENT is among them. This is used by get_binfo and
+ by compute_access. */
+static int
+immediately_derived (parent, type)
+ tree parent, type;
+{
+ if (TYPE_BINFO (type))
+ {
+ tree binfos = BINFO_BASETYPES (TYPE_BINFO (type));
+ int i, n_baselinks = binfos ? TREE_VEC_LENGTH (binfos) : 0;
+
+ for (i = 0; i < n_baselinks; i++)
+ {
+ tree base_binfo = TREE_VEC_ELT (binfos, i);
+
+ if (parent == BINFO_TYPE (base_binfo))
+ return 1;
+ }
+ }
+ return 0;
+}
+#endif
+
+/* Check whether the type given in BINFO is derived from PARENT. If
+ it isn't, return 0. If it is, but the derivation is MI-ambiguous
+ AND protect != 0, emit an error message and return error_mark_node.
+
+ Otherwise, if TYPE is derived from PARENT, return the actual base
+ information, unless a one of the protection violations below
+ occurs, in which case emit an error message and return error_mark_node.
+
+ If PROTECT is 1, then check if access to a public field of PARENT
+ would be private. Also check for ambiguity. */
+
+tree
+get_binfo (parent, binfo, protect)
+ register tree parent, binfo;
+ int protect;
+{
+ tree type;
+ int dist;
+ tree rval = NULL_TREE;
+
+ if (TREE_CODE (parent) == TREE_VEC)
+ parent = BINFO_TYPE (parent);
+ /* unions cannot participate in inheritance relationships */
+ else if (TREE_CODE (parent) == UNION_TYPE)
+ return NULL_TREE;
+ else if (TREE_CODE (parent) != RECORD_TYPE)
+ my_friendly_abort (89);
+
+ if (TREE_CODE (binfo) == TREE_VEC)
+ type = BINFO_TYPE (binfo);
+ else if (TREE_CODE (binfo) == RECORD_TYPE)
+ type = binfo;
+ else if (TREE_CODE (binfo) == UNION_TYPE)
+ return NULL_TREE;
+ else
+ my_friendly_abort (90);
+
+ dist = get_base_distance (parent, binfo, protect, &rval);
+
+ if (dist == -3)
+ {
+ cp_error ("fields of `%T' are inaccessible in `%T' due to private inheritance",
+ parent, type);
+ return error_mark_node;
+ }
+ else if (dist == -2 && protect)
+ {
+ cp_error ("type `%T' is ambiguous base class for type `%T'", parent,
+ type);
+ return error_mark_node;
+ }
+
+ return rval;
+}
+
+/* This is the newer depth first get_base_distance routine. */
+static int
+get_base_distance_recursive (binfo, depth, is_private, basetype_path, rval,
+ rval_private_ptr, new_binfo_ptr, parent, path_ptr,
+ protect, via_virtual_ptr, via_virtual)
+ tree binfo, basetype_path, *new_binfo_ptr, parent, *path_ptr;
+ int *rval_private_ptr, depth, is_private, rval, protect, *via_virtual_ptr,
+ via_virtual;
+{
+ tree binfos;
+ int i, n_baselinks;
+
+ if (BINFO_TYPE (binfo) == parent || binfo == parent)
+ {
+ if (rval == -1)
+ {
+ rval = depth;
+ *rval_private_ptr = is_private;
+ *new_binfo_ptr = binfo;
+ *via_virtual_ptr = via_virtual;
+ }
+ else
+ {
+ int same_object = (tree_int_cst_equal (BINFO_OFFSET (*new_binfo_ptr),
+ BINFO_OFFSET (binfo))
+ && *via_virtual_ptr && via_virtual);
+
+ if (*via_virtual_ptr && via_virtual==0)
+ {
+ *rval_private_ptr = is_private;
+ *new_binfo_ptr = binfo;
+ *via_virtual_ptr = via_virtual;
+ }
+ else if (same_object)
+ {
+ if (*rval_private_ptr && ! is_private)
+ {
+ *rval_private_ptr = is_private;
+ *new_binfo_ptr = binfo;
+ *via_virtual_ptr = via_virtual;
+ }
+ return rval;
+ }
+
+ rval = -2;
+ }
+ return rval;
+ }
+
+ binfos = BINFO_BASETYPES (binfo);
+ n_baselinks = binfos ? TREE_VEC_LENGTH (binfos) : 0;
+ depth += 1;
+
+ /* Process base types. */
+ for (i = 0; i < n_baselinks; i++)
+ {
+ tree base_binfo = TREE_VEC_ELT (binfos, i);
+
+ /* Find any specific instance of a virtual base, when searching with
+ a binfo... */
+ if (BINFO_MARKED (base_binfo) == 0 || TREE_CODE (parent) == TREE_VEC)
+ {
+ int via_private
+ = (protect
+ && (is_private
+ || (!TREE_VIA_PUBLIC (base_binfo)
+ && !is_friend (BINFO_TYPE (binfo), current_scope ()))));
+ int this_virtual = via_virtual || TREE_VIA_VIRTUAL (base_binfo);
+ int was;
+
+ /* When searching for a non-virtual, we cannot mark
+ virtually found binfos. */
+ if (! this_virtual)
+ SET_BINFO_MARKED (base_binfo);
+
+#define WATCH_VALUES(rval, via_private) (rval == -1 ? 3 : via_private)
+
+ was = WATCH_VALUES (rval, *via_virtual_ptr);
+ rval = get_base_distance_recursive (base_binfo, depth, via_private,
+ binfo, rval, rval_private_ptr,
+ new_binfo_ptr, parent, path_ptr,
+ protect, via_virtual_ptr,
+ this_virtual);
+ /* watch for updates; only update if path is good. */
+ if (path_ptr && WATCH_VALUES (rval, *via_virtual_ptr) != was)
+ BINFO_INHERITANCE_CHAIN (base_binfo) = binfo;
+ if (rval == -2 && *via_virtual_ptr == 0)
+ return rval;
+
+#undef WATCH_VALUES
+
+ }
+ }
+
+ return rval;
+}
+
+/* Return the number of levels between type PARENT and the type given
+ in BINFO, following the leftmost path to PARENT not found along a
+ virtual path, if there are no real PARENTs (all come from virtual
+ base classes), then follow the leftmost path to PARENT.
+
+ Return -1 if TYPE is not derived from PARENT.
+ Return -2 if PARENT is an ambiguous base class of TYPE, and PROTECT is
+ non-negative.
+ Return -3 if PARENT is private to TYPE, and PROTECT is non-zero.
+
+ If PATH_PTR is non-NULL, then also build the list of types
+ from PARENT to TYPE, with TREE_VIA_VIRUAL and TREE_VIA_PUBLIC
+ set.
+
+ PARENT can also be a binfo, in which case that exact parent is found
+ and no other. convert_pointer_to_real uses this functionality.
+
+ If BINFO is a binfo, its BINFO_INHERITANCE_CHAIN will be left alone. */
+
+int
+get_base_distance (parent, binfo, protect, path_ptr)
+ register tree parent, binfo;
+ int protect;
+ tree *path_ptr;
+{
+ int rval;
+ int rval_private = 0;
+ tree type;
+ tree new_binfo = NULL_TREE;
+ int via_virtual;
+ int watch_access = protect;
+
+ if (TREE_CODE (parent) != TREE_VEC)
+ parent = TYPE_MAIN_VARIANT (parent);
+
+ if (TREE_CODE (binfo) == TREE_VEC)
+ type = BINFO_TYPE (binfo);
+ else if (IS_AGGR_TYPE_CODE (TREE_CODE (binfo)))
+ {
+ type = binfo;
+ binfo = TYPE_BINFO (type);
+
+ if (path_ptr)
+ BINFO_INHERITANCE_CHAIN (binfo) = NULL_TREE;
+ }
+ else
+ my_friendly_abort (92);
+
+ if (parent == type || parent == binfo)
+ {
+ /* If the distance is 0, then we don't really need
+ a path pointer, but we shouldn't let garbage go back. */
+ if (path_ptr)
+ *path_ptr = binfo;
+ return 0;
+ }
+
+ if (path_ptr)
+ watch_access = 1;
+
+ rval = get_base_distance_recursive (binfo, 0, 0, NULL_TREE, -1,
+ &rval_private, &new_binfo, parent,
+ path_ptr, watch_access, &via_virtual, 0);
+
+ dfs_walk (binfo, dfs_unmark, markedp);
+
+ /* Access restrictions don't count if we found an ambiguous basetype. */
+ if (rval == -2 && protect >= 0)
+ rval_private = 0;
+
+ if (rval && protect && rval_private)
+ return -3;
+
+ /* find real virtual base classes. */
+ if (rval == -1 && TREE_CODE (parent) == TREE_VEC
+ && parent == binfo_member (BINFO_TYPE (parent),
+ CLASSTYPE_VBASECLASSES (type)))
+ {
+ BINFO_INHERITANCE_CHAIN (parent) = binfo;
+ new_binfo = parent;
+ rval = 1;
+ }
+
+ if (path_ptr)
+ *path_ptr = new_binfo;
+ return rval;
+}
+
+/* Search for a member with name NAME in a multiple inheritance lattice
+ specified by TYPE. If it does not exist, return NULL_TREE.
+ If the member is ambiguously referenced, return `error_mark_node'.
+ Otherwise, return the FIELD_DECL. */
+
+/* Do a 1-level search for NAME as a member of TYPE. The caller must
+ figure out whether it can access this field. (Since it is only one
+ level, this is reasonable.) */
+static tree
+lookup_field_1 (type, name)
+ tree type, name;
+{
+ register tree field = TYPE_FIELDS (type);
+
+#ifdef GATHER_STATISTICS
+ n_calls_lookup_field_1++;
+#endif
+ while (field)
+ {
+#ifdef GATHER_STATISTICS
+ n_fields_searched++;
+#endif
+ if (DECL_NAME (field) == NULL_TREE
+ && TREE_CODE (TREE_TYPE (field)) == UNION_TYPE)
+ {
+ tree temp = lookup_field_1 (TREE_TYPE (field), name);
+ if (temp)
+ return temp;
+ }
+ if (DECL_NAME (field) == name)
+ {
+ if ((TREE_CODE(field) == VAR_DECL || TREE_CODE(field) == CONST_DECL)
+ && DECL_ASSEMBLER_NAME (field) != NULL)
+ GNU_xref_ref(current_function_decl,
+ IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (field)));
+ return field;
+ }
+ field = TREE_CHAIN (field);
+ }
+ /* Not found. */
+ if (name == _vptr_name)
+ {
+ /* Give the user what s/he thinks s/he wants. */
+ if (TYPE_VIRTUAL_P (type))
+ return CLASSTYPE_VFIELD (type);
+ }
+ return NULL_TREE;
+}
+
+/* There are a number of cases we need to be aware of here:
+ current_class_type current_function_decl
+ * global NULL NULL
+ * fn-local NULL SET
+ * class-local SET NULL
+ * class->fn SET SET
+ * fn->class SET SET
+
+ Those last two make life interesting. If we're in a function which is
+ itself inside a class, we need decls to go into the fn's decls (our
+ second case below). But if we're in a class and the class itself is
+ inside a function, we need decls to go into the decls for the class. To
+ achieve this last goal, we must see if, when both current_class_decl and
+ current_function_decl are set, the class was declared inside that
+ function. If so, we know to put the decls into the class's scope. */
+
+tree
+current_scope ()
+{
+ if (current_function_decl == NULL_TREE)
+ return current_class_type;
+ if (current_class_type == NULL_TREE)
+ return current_function_decl;
+ if (DECL_CLASS_CONTEXT (current_function_decl) == current_class_type)
+ return current_function_decl;
+
+ return current_class_type;
+}
+
+/* Compute the access of FIELD. This is done by computing
+ the access available to each type in BASETYPES (which comes
+ as a list of [via_public/basetype] in reverse order, namely base
+ class before derived class). The first one which defines a
+ access defines the access for the field. Otherwise, the
+ access of the field is that which occurs normally.
+
+ Uses global variables CURRENT_CLASS_TYPE and
+ CURRENT_FUNCTION_DECL to use friend relationships
+ if necessary.
+
+ This will be static when lookup_fnfield comes into this file.
+
+ access_public means that the field can be accessed by the current lexical
+ scope.
+
+ access_protected means that the field cannot be accessed by the current
+ lexical scope because it is protected.
+
+ access_private means that the field cannot be accessed by the current
+ lexical scope because it is private. */
+
+#if 0
+#define PUBLIC_RETURN return (DECL_PUBLIC (field) = 1), access_public
+#define PROTECTED_RETURN return (DECL_PROTECTED (field) = 1), access_protected
+#define PRIVATE_RETURN return (DECL_PRIVATE (field) = 1), access_private
+#else
+#define PUBLIC_RETURN return access_public
+#define PROTECTED_RETURN return access_protected
+#define PRIVATE_RETURN return access_private
+#endif
+
+#if 0
+/* Disabled with DECL_PUBLIC &c. */
+static tree previous_scope = NULL_TREE;
+#endif
+
+enum access_type
+compute_access (basetype_path, field)
+ tree basetype_path, field;
+{
+ enum access_type access;
+ tree types;
+ tree context;
+ int protected_ok, via_protected;
+#if 1
+ /* Replaces static decl above. */
+ tree previous_scope;
+#endif
+ int static_mem =
+ ((TREE_CODE (field) == FUNCTION_DECL && DECL_STATIC_FUNCTION_P (field))
+ || (TREE_CODE (field) != FUNCTION_DECL && TREE_STATIC (field)));
+
+ /* The field lives in the current class. */
+ if (BINFO_TYPE (basetype_path) == current_class_type)
+ return access_public;
+
+#if 0
+ /* Disabled until pushing function scope clears these out. If ever. */
+ /* Make these special cases fast. */
+ if (current_scope () == previous_scope)
+ {
+ if (DECL_PUBLIC (field))
+ return access_public;
+ if (DECL_PROTECTED (field))
+ return access_protected;
+ if (DECL_PRIVATE (field))
+ return access_private;
+ }
+#endif
+
+ previous_scope = current_scope ();
+
+ context = DECL_CLASS_CONTEXT (field);
+ if (context == NULL_TREE)
+ context = DECL_CONTEXT (field);
+
+ /* Fields coming from nested anonymous unions have their DECL_CLASS_CONTEXT
+ slot set to the union type rather than the record type containing
+ the anonymous union. In this case, DECL_FIELD_CONTEXT is correct. */
+ if (context && TREE_CODE (context) == UNION_TYPE
+ && ANON_AGGRNAME_P (TYPE_IDENTIFIER (context)))
+ context = DECL_FIELD_CONTEXT (field);
+
+ /* Virtual function tables are never private. But we should know that
+ we are looking for this, and not even try to hide it. */
+ if (DECL_NAME (field) && VFIELD_NAME_P (DECL_NAME (field)) == 1)
+ PUBLIC_RETURN;
+
+ /* Member found immediately within object. */
+ if (BINFO_INHERITANCE_CHAIN (basetype_path) == NULL_TREE)
+ {
+ /* Are we (or an enclosing scope) friends with the class that has
+ FIELD? */
+ if (is_friend (context, previous_scope))
+ PUBLIC_RETURN;
+
+ /* If it's private, it's private, you letch. */
+ if (TREE_PRIVATE (field))
+ PRIVATE_RETURN;
+
+ /* ARM $11.5. Member functions of a derived class can access the
+ non-static protected members of a base class only through a
+ pointer to the derived class, a reference to it, or an object
+ of it. Also any subsequently derived classes also have
+ access. */
+ else if (TREE_PROTECTED (field))
+ {
+ if (current_class_type
+ && static_mem
+ && ACCESSIBLY_DERIVED_FROM_P (context, current_class_type))
+ PUBLIC_RETURN;
+ else
+ PROTECTED_RETURN;
+ }
+ else
+ PUBLIC_RETURN;
+ }
+
+ /* must reverse more than one element */
+ basetype_path = reverse_path (basetype_path);
+ types = basetype_path;
+ via_protected = 0;
+ access = access_default;
+ protected_ok = static_mem && current_class_type
+ && ACCESSIBLY_DERIVED_FROM_P (BINFO_TYPE (types), current_class_type);
+
+ while (1)
+ {
+ tree member;
+ tree binfo = types;
+ tree type = BINFO_TYPE (binfo);
+ int private_ok = 0;
+
+ /* Friends of a class can see protected members of its bases.
+ Note that classes are their own friends. */
+ if (is_friend (type, previous_scope))
+ {
+ protected_ok = 1;
+ private_ok = 1;
+ }
+
+ member = purpose_member (type, DECL_ACCESS (field));
+ if (member)
+ {
+ access = (enum access_type) TREE_VALUE (member);
+ break;
+ }
+
+ types = BINFO_INHERITANCE_CHAIN (types);
+
+ /* If the next type was VIA_PROTECTED, then fields of all remaining
+ classes past that one are *at least* protected. */
+ if (types)
+ {
+ if (TREE_VIA_PROTECTED (types))
+ via_protected = 1;
+ else if (! TREE_VIA_PUBLIC (types) && ! private_ok)
+ {
+ access = access_private;
+ break;
+ }
+ }
+ else
+ break;
+ }
+ reverse_path (basetype_path);
+
+ /* No special visibilities apply. Use normal rules. */
+
+ if (access == access_default)
+ {
+ if (is_friend (context, previous_scope))
+ access = access_public;
+ else if (TREE_PRIVATE (field))
+ access = access_private;
+ else if (TREE_PROTECTED (field))
+ access = access_protected;
+ else
+ access = access_public;
+ }
+
+ if (access == access_public && via_protected)
+ access = access_protected;
+
+ if (access == access_protected && protected_ok)
+ access = access_public;
+
+#if 0
+ if (access == access_public)
+ DECL_PUBLIC (field) = 1;
+ else if (access == access_protected)
+ DECL_PROTECTED (field) = 1;
+ else if (access == access_private)
+ DECL_PRIVATE (field) = 1;
+ else my_friendly_abort (96);
+#endif
+ return access;
+}
+
+/* Routine to see if the sub-object denoted by the binfo PARENT can be
+ found as a base class and sub-object of the object denoted by
+ BINFO. This routine relies upon binfos not being shared, except
+ for binfos for virtual bases. */
+static int
+is_subobject_of_p (parent, binfo)
+ tree parent, binfo;
+{
+ tree binfos = BINFO_BASETYPES (binfo);
+ int i, n_baselinks = binfos ? TREE_VEC_LENGTH (binfos) : 0;
+
+ if (parent == binfo)
+ return 1;
+
+ /* Process and/or queue base types. */
+ for (i = 0; i < n_baselinks; i++)
+ {
+ tree base_binfo = TREE_VEC_ELT (binfos, i);
+ if (TREE_VIA_VIRTUAL (base_binfo))
+ base_binfo = TYPE_BINFO (BINFO_TYPE (base_binfo));
+ if (is_subobject_of_p (parent, base_binfo))
+ return 1;
+ }
+ return 0;
+}
+
+/* See if a one FIELD_DECL hides another. This routine is meant to
+ correspond to ANSI working paper Sept 17, 1992 10p4. The two
+ binfos given are the binfos corresponding to the particular places
+ the FIELD_DECLs are found. This routine relies upon binfos not
+ being shared, except for virtual bases. */
+static int
+hides (hider_binfo, hidee_binfo)
+ tree hider_binfo, hidee_binfo;
+{
+ /* hider hides hidee, if hider has hidee as a base class and
+ the instance of hidee is a sub-object of hider. The first
+ part is always true is the second part is true.
+
+ When hider and hidee are the same (two ways to get to the exact
+ same member) we consider either one as hiding the other. */
+ return is_subobject_of_p (hidee_binfo, hider_binfo);
+}
+
+/* Very similar to lookup_fnfields_1 but it ensures that at least one
+ function was declared inside the class given by TYPE. It really should
+ only return functions that match the given TYPE. */
+static int
+lookup_fnfields_here (type, name)
+ tree type, name;
+{
+ int index = lookup_fnfields_1 (type, name);
+ tree fndecls;
+
+ if (index <= 0)
+ return index;
+ fndecls = TREE_VEC_ELT (CLASSTYPE_METHOD_VEC (type), index);
+ while (fndecls)
+ {
+ if (TYPE_MAIN_VARIANT (DECL_CLASS_CONTEXT (fndecls))
+ == TYPE_MAIN_VARIANT (type))
+ return index;
+ fndecls = TREE_CHAIN (fndecls);
+ }
+ return -1;
+}
+
+/* Look for a field named NAME in an inheritance lattice dominated by
+ XBASETYPE. PROTECT is zero if we can avoid computing access
+ information, otherwise it is 1. WANT_TYPE is 1 when we should only
+ return TYPE_DECLs, if no TYPE_DECL can be found return NULL_TREE.
+
+ It was not clear what should happen if WANT_TYPE is set, and an
+ ambiguity is found. At least one use (lookup_name) to not see
+ the error. */
+tree
+lookup_field (xbasetype, name, protect, want_type)
+ register tree xbasetype, name;
+ int protect, want_type;
+{
+ int head = 0, tail = 0;
+ tree rval, rval_binfo = NULL_TREE, rval_binfo_h;
+ tree type, basetype_chain, basetype_path;
+ enum access_type this_v = access_default;
+ tree entry, binfo, binfo_h;
+ enum access_type own_access = access_default;
+ int vbase_name_p = VBASE_NAME_P (name);
+
+ /* rval_binfo is the binfo associated with the found member, note,
+ this can be set with useful information, even when rval is not
+ set, because it must deal with ALL members, not just non-function
+ members. It is used for ambiguity checking and the hidden
+ checks. Whereas rval is only set if a proper (not hidden)
+ non-function member is found. */
+
+ /* rval_binfo_h and binfo_h are binfo values used when we perform the
+ hiding checks, as virtual base classes may not be shared. The strategy
+ is we always go into the the binfo hierarchy owned by TYPE_BINFO of
+ virtual base classes, as we cross virtual base class lines. This way
+ we know that binfo of a virtual base class will always == itself when
+ found along any line. (mrs) */
+
+ char *errstr = 0;
+
+ /* Set this to nonzero if we don't know how to compute
+ accurate error messages for access control. */
+ int index = MEMOIZED_HASH_FN (name);
+
+ /* If we are looking for a constructor in a templated type, use the
+ unspecialized name, as that is how we store it. */
+ if (IDENTIFIER_TEMPLATE (name))
+ name = constructor_name (name);
+
+ if (TREE_CODE (xbasetype) == TREE_VEC)
+ {
+ type = BINFO_TYPE (xbasetype);
+ basetype_path = xbasetype;
+ }
+ else if (IS_AGGR_TYPE_CODE (TREE_CODE (xbasetype)))
+ {
+ type = xbasetype;
+ basetype_path = TYPE_BINFO (xbasetype);
+ BINFO_VIA_PUBLIC (basetype_path) = 1;
+ BINFO_INHERITANCE_CHAIN (basetype_path) = NULL_TREE;
+ }
+ else my_friendly_abort (97);
+
+ if (CLASSTYPE_MTABLE_ENTRY (type))
+ {
+ tree tem = MEMOIZED_FIELDS (CLASSTYPE_MTABLE_ENTRY (type), index);
+
+ while (tem && TREE_PURPOSE (tem) != name)
+ {
+ memoized_fields_searched[0]++;
+ tem = TREE_CHAIN (tem);
+ }
+ if (tem)
+ {
+ if (protect && TREE_TYPE (tem))
+ {
+ error (TREE_STRING_POINTER (TREE_TYPE (tem)),
+ IDENTIFIER_POINTER (name),
+ TYPE_NAME_STRING (DECL_FIELD_CONTEXT (TREE_VALUE (tem))));
+ return error_mark_node;
+ }
+ if (TREE_VALUE (tem) == NULL_TREE)
+ memoized_fast_rejects[0] += 1;
+ else
+ memoized_fast_finds[0] += 1;
+ return TREE_VALUE (tem);
+ }
+ }
+
+#ifdef GATHER_STATISTICS
+ n_calls_lookup_field++;
+#endif
+ if (protect && flag_memoize_lookups && ! global_bindings_p ())
+ entry = make_memoized_table_entry (type, name, 0);
+ else
+ entry = 0;
+
+ rval = lookup_field_1 (type, name);
+ if (rval || lookup_fnfields_here (type, name)>=0)
+ {
+ rval_binfo = basetype_path;
+ rval_binfo_h = rval_binfo;
+ }
+
+ if (rval && TREE_CODE (rval) != TYPE_DECL && want_type)
+ rval = NULL_TREE;
+
+ if (rval)
+ {
+ if (protect)
+ {
+ if (TREE_PRIVATE (rval) | TREE_PROTECTED (rval))
+ this_v = compute_access (basetype_path, rval);
+ if (TREE_CODE (rval) == CONST_DECL)
+ {
+ if (this_v == access_private)
+ errstr = "enum `%D' is a private value of class `%T'";
+ else if (this_v == access_protected)
+ errstr = "enum `%D' is a protected value of class `%T'";
+ }
+ else
+ {
+ if (this_v == access_private)
+ errstr = "member `%D' is a private member of class `%T'";
+ else if (this_v == access_protected)
+ errstr = "member `%D' is a protected member of class `%T'";
+ }
+ }
+
+ if (entry)
+ {
+ if (errstr)
+ {
+ /* This depends on behavior of lookup_field_1! */
+ tree error_string = my_build_string (errstr);
+ TREE_TYPE (entry) = error_string;
+ }
+ else
+ {
+ /* Let entry know there is no problem with this access. */
+ TREE_TYPE (entry) = NULL_TREE;
+ }
+ TREE_VALUE (entry) = rval;
+ }
+
+ if (errstr && protect)
+ {
+ cp_error (errstr, name, type);
+ return error_mark_node;
+ }
+ return rval;
+ }
+
+ basetype_chain = build_tree_list (NULL_TREE, basetype_path);
+ TREE_VIA_PUBLIC (basetype_chain) = TREE_VIA_PUBLIC (basetype_path);
+ TREE_VIA_PROTECTED (basetype_chain) = TREE_VIA_PROTECTED (basetype_path);
+ TREE_VIA_VIRTUAL (basetype_chain) = TREE_VIA_VIRTUAL (basetype_path);
+
+ /* The ambiguity check relies upon breadth first searching. */
+
+ search_stack = push_search_level (search_stack, &search_obstack);
+ binfo = basetype_path;
+ binfo_h = binfo;
+
+ while (1)
+ {
+ tree binfos = BINFO_BASETYPES (binfo);
+ int i, n_baselinks = binfos ? TREE_VEC_LENGTH (binfos) : 0;
+ tree nval;
+
+ /* Process and/or queue base types. */
+ for (i = 0; i < n_baselinks; i++)
+ {
+ tree base_binfo = TREE_VEC_ELT (binfos, i);
+ if (BINFO_FIELDS_MARKED (base_binfo) == 0)
+ {
+ tree btypes;
+
+ SET_BINFO_FIELDS_MARKED (base_binfo);
+ btypes = my_tree_cons (NULL_TREE, base_binfo, basetype_chain);
+ TREE_VIA_PUBLIC (btypes) = TREE_VIA_PUBLIC (base_binfo);
+ TREE_VIA_PROTECTED (btypes) = TREE_VIA_PROTECTED (base_binfo);
+ TREE_VIA_VIRTUAL (btypes) = TREE_VIA_VIRTUAL (base_binfo);
+ if (TREE_VIA_VIRTUAL (base_binfo))
+ btypes = tree_cons (NULL_TREE,
+ TYPE_BINFO (BINFO_TYPE (TREE_VEC_ELT (BINFO_BASETYPES (binfo_h), i))),
+ btypes);
+ else
+ btypes = tree_cons (NULL_TREE,
+ TREE_VEC_ELT (BINFO_BASETYPES (binfo_h), i),
+ btypes);
+ obstack_ptr_grow (&search_obstack, btypes);
+ tail += 1;
+ if (tail >= search_stack->limit)
+ my_friendly_abort (98);
+ }
+ }
+
+ /* Process head of queue, if one exists. */
+ if (head >= tail)
+ break;
+
+ basetype_chain = search_stack->first[head++];
+ binfo_h = TREE_VALUE (basetype_chain);
+ basetype_chain = TREE_CHAIN (basetype_chain);
+ basetype_path = TREE_VALUE (basetype_chain);
+ if (TREE_CHAIN (basetype_chain))
+ BINFO_INHERITANCE_CHAIN (basetype_path) = TREE_VALUE (TREE_CHAIN (basetype_chain));
+ else
+ BINFO_INHERITANCE_CHAIN (basetype_path) = NULL_TREE;
+
+ binfo = basetype_path;
+ type = BINFO_TYPE (binfo);
+
+ /* See if we can find NAME in TYPE. If RVAL is nonzero,
+ and we do find NAME in TYPE, verify that such a second
+ sighting is in fact legal. */
+
+ nval = lookup_field_1 (type, name);
+
+ if (nval || lookup_fnfields_here (type, name)>=0)
+ {
+ if (nval && nval == rval && SHARED_MEMBER_P (nval))
+ {
+ /* This is ok, the member found is the same [class.ambig] */
+ }
+ else if (rval_binfo && hides (rval_binfo_h, binfo_h))
+ {
+ /* This is ok, the member found is in rval_binfo, not
+ here (binfo). */
+ }
+ else if (rval_binfo==NULL_TREE || hides (binfo_h, rval_binfo_h))
+ {
+ /* This is ok, the member found is here (binfo), not in
+ rval_binfo. */
+ if (nval)
+ {
+ rval = nval;
+ if (entry || protect)
+ this_v = compute_access (basetype_path, rval);
+ /* These may look ambiguous, but they really are not. */
+ if (vbase_name_p)
+ break;
+ }
+ else
+ {
+ /* Undo finding it before, as something else hides it. */
+ rval = NULL_TREE;
+ }
+ rval_binfo = binfo;
+ rval_binfo_h = binfo_h;
+ }
+ else
+ {
+ /* This is ambiguous. */
+ errstr = "request for member `%D' is ambiguous";
+ protect = 2;
+ break;
+ }
+ }
+ }
+ {
+ tree *tp = search_stack->first;
+ tree *search_tail = tp + tail;
+
+ if (entry)
+ TREE_VALUE (entry) = rval;
+
+ if (want_type && (rval == NULL_TREE || TREE_CODE (rval) != TYPE_DECL))
+ {
+ rval = NULL_TREE;
+ errstr = 0;
+ }
+
+ /* If this FIELD_DECL defines its own access level, deal with that. */
+ if (rval && errstr == 0
+ && ((protect&1) || entry)
+ && DECL_LANG_SPECIFIC (rval)
+ && DECL_ACCESS (rval))
+ {
+ while (tp < search_tail)
+ {
+ /* If is possible for one of the derived types on the path to
+ have defined special access for this field. Look for such
+ declarations and report an error if a conflict is found. */
+ enum access_type new_v;
+
+ if (this_v != access_default)
+ new_v = compute_access (TREE_VALUE (TREE_CHAIN (*tp)), rval);
+ if (this_v != access_default && new_v != this_v)
+ {
+ errstr = "conflicting access to member `%D'";
+ this_v = access_default;
+ }
+ own_access = new_v;
+ CLEAR_BINFO_FIELDS_MARKED (TREE_VALUE (TREE_CHAIN (*tp)));
+ tp += 1;
+ }
+ }
+ else
+ {
+ while (tp < search_tail)
+ {
+ CLEAR_BINFO_FIELDS_MARKED (TREE_VALUE (TREE_CHAIN (*tp)));
+ tp += 1;
+ }
+ }
+ }
+ search_stack = pop_search_level (search_stack);
+
+ if (errstr == 0)
+ {
+ if (own_access == access_private)
+ errstr = "member `%D' declared private";
+ else if (own_access == access_protected)
+ errstr = "member `%D' declared protected";
+ else if (this_v == access_private)
+ errstr = TREE_PRIVATE (rval)
+ ? "member `%D' is private"
+ : "member `%D' is from private base class";
+ else if (this_v == access_protected)
+ errstr = TREE_PROTECTED (rval)
+ ? "member `%D' is protected"
+ : "member `%D' is from protected base class";
+ }
+
+ if (entry)
+ {
+ if (errstr)
+ {
+ tree error_string = my_build_string (errstr);
+ /* Save error message with entry. */
+ TREE_TYPE (entry) = error_string;
+ }
+ else
+ {
+ /* Mark entry as having no error string. */
+ TREE_TYPE (entry) = NULL_TREE;
+ }
+ }
+
+ if (errstr && protect)
+ {
+ cp_error (errstr, name, type);
+ rval = error_mark_node;
+ }
+ return rval;
+}
+
+/* Try to find NAME inside a nested class. */
+tree
+lookup_nested_field (name, complain)
+ tree name;
+ int complain;
+{
+ register tree t;
+
+ tree id = NULL_TREE;
+ if (TREE_CHAIN (current_class_type))
+ {
+ /* Climb our way up the nested ladder, seeing if we're trying to
+ modify a field in an enclosing class. If so, we should only
+ be able to modify if it's static. */
+ for (t = TREE_CHAIN (current_class_type);
+ t && DECL_CONTEXT (t);
+ t = TREE_CHAIN (DECL_CONTEXT (t)))
+ {
+ if (TREE_CODE (DECL_CONTEXT (t)) != RECORD_TYPE)
+ break;
+
+ /* N.B.: lookup_field will do the access checking for us */
+ id = lookup_field (DECL_CONTEXT (t), name, complain, 0);
+ if (id == error_mark_node)
+ {
+ id = NULL_TREE;
+ continue;
+ }
+
+ if (id != NULL_TREE)
+ {
+ if (TREE_CODE (id) == FIELD_DECL
+ && ! TREE_STATIC (id)
+ && TREE_TYPE (id) != error_mark_node)
+ {
+ if (complain)
+ {
+ /* At parse time, we don't want to give this error, since
+ we won't have enough state to make this kind of
+ decision properly. But there are times (e.g., with
+ enums in nested classes) when we do need to call
+ this fn at parse time. So, in those cases, we pass
+ complain as a 0 and just return a NULL_TREE. */
+ error ("assignment to non-static member `%s' of enclosing class `%s'",
+ lang_printable_name (id),
+ IDENTIFIER_POINTER (TYPE_IDENTIFIER
+ (DECL_CONTEXT (t))));
+ /* Mark this for do_identifier(). It would otherwise
+ claim that the variable was undeclared. */
+ TREE_TYPE (id) = error_mark_node;
+ }
+ else
+ {
+ id = NULL_TREE;
+ continue;
+ }
+ }
+ break;
+ }
+ }
+ }
+
+ return id;
+}
+
+/* TYPE is a class type. Return the index of the fields within
+ the method vector with name NAME, or -1 is no such field exists. */
+static int
+lookup_fnfields_1 (type, name)
+ tree type, name;
+{
+ register tree method_vec = CLASSTYPE_METHOD_VEC (type);
+
+ if (method_vec != 0)
+ {
+ register tree *methods = &TREE_VEC_ELT (method_vec, 0);
+ register tree *end = TREE_VEC_END (method_vec);
+
+#ifdef GATHER_STATISTICS
+ n_calls_lookup_fnfields_1++;
+#endif
+ if (*methods && name == constructor_name (type))
+ return 0;
+
+ while (++methods != end)
+ {
+#ifdef GATHER_STATISTICS
+ n_outer_fields_searched++;
+#endif
+ if (DECL_NAME (*methods) == name)
+ break;
+ }
+ if (methods != end)
+ return methods - &TREE_VEC_ELT (method_vec, 0);
+ }
+
+ return -1;
+}
+
+/* Starting from BASETYPE, return a TREE_BASELINK-like object
+ which gives the following information (in a list):
+
+ TREE_TYPE: list of basetypes needed to get to...
+ TREE_VALUE: list of all functions in of given type
+ which have name NAME.
+
+ No access information is computed by this function,
+ other then to adorn the list of basetypes with
+ TREE_VIA_PUBLIC.
+
+ If there are two ways to find a name (two members), if COMPLAIN is
+ non-zero, then error_mark_node is returned, and an error message is
+ printed, otherwise, just an error_mark_node is returned.
+
+ As a special case, is COMPLAIN is -1, we don't complain, and we
+ don't return error_mark_node, but rather the complete list of
+ virtuals. This is used by get_virtuals_named_this. */
+tree
+lookup_fnfields (basetype_path, name, complain)
+ tree basetype_path, name;
+ int complain;
+{
+ int head = 0, tail = 0;
+ tree type, rval, rval_binfo = NULL_TREE, rvals = NULL_TREE, rval_binfo_h;
+ tree entry, binfo, basetype_chain, binfo_h;
+ int find_all = 0;
+
+ /* rval_binfo is the binfo associated with the found member, note,
+ this can be set with useful information, even when rval is not
+ set, because it must deal with ALL members, not just function
+ members. It is used for ambiguity checking and the hidden
+ checks. Whereas rval is only set if a proper (not hidden)
+ function member is found. */
+
+ /* rval_binfo_h and binfo_h are binfo values used when we perform the
+ hiding checks, as virtual base classes may not be shared. The strategy
+ is we always go into the the binfo hierarchy owned by TYPE_BINFO of
+ virtual base classes, as we cross virtual base class lines. This way
+ we know that binfo of a virtual base class will always == itself when
+ found along any line. (mrs) */
+
+ /* For now, don't try this. */
+ int protect = complain;
+
+ char *errstr = 0;
+
+ /* Set this to nonzero if we don't know how to compute
+ accurate error messages for access control. */
+ int index = MEMOIZED_HASH_FN (name);
+
+ if (complain == -1)
+ {
+ find_all = 1;
+ protect = complain = 0;
+ }
+
+ /* If we are looking for a constructor in a templated type, use the
+ unspecialized name, as that is how we store it. */
+ if (IDENTIFIER_TEMPLATE (name))
+ name = constructor_name (name);
+
+ binfo = basetype_path;
+ binfo_h = binfo;
+ type = BINFO_TYPE (basetype_path);
+
+ /* The memoization code is in need of maintenance. */
+ if (!find_all && CLASSTYPE_MTABLE_ENTRY (type))
+ {
+ tree tem = MEMOIZED_FNFIELDS (CLASSTYPE_MTABLE_ENTRY (type), index);
+
+ while (tem && TREE_PURPOSE (tem) != name)
+ {
+ memoized_fields_searched[1]++;
+ tem = TREE_CHAIN (tem);
+ }
+ if (tem)
+ {
+ if (protect && TREE_TYPE (tem))
+ {
+ error (TREE_STRING_POINTER (TREE_TYPE (tem)),
+ IDENTIFIER_POINTER (name),
+ TYPE_NAME_STRING (DECL_CLASS_CONTEXT (TREE_VALUE (TREE_VALUE (tem)))));
+ return error_mark_node;
+ }
+ if (TREE_VALUE (tem) == NULL_TREE)
+ {
+ memoized_fast_rejects[1] += 1;
+ return NULL_TREE;
+ }
+ else
+ {
+ /* Want to return this, but we must make sure
+ that access information is consistent. */
+ tree baselink = TREE_VALUE (tem);
+ tree memoized_basetypes = TREE_PURPOSE (baselink);
+ tree these_basetypes = basetype_path;
+ while (memoized_basetypes && these_basetypes)
+ {
+ memoized_fields_searched[1]++;
+ if (TREE_VALUE (memoized_basetypes) != these_basetypes)
+ break;
+ memoized_basetypes = TREE_CHAIN (memoized_basetypes);
+ these_basetypes = BINFO_INHERITANCE_CHAIN (these_basetypes);
+ }
+ /* The following statement is true only when both are NULL. */
+ if (memoized_basetypes == these_basetypes)
+ {
+ memoized_fast_finds[1] += 1;
+ return TREE_VALUE (tem);
+ }
+ /* else, we must re-find this field by hand. */
+ baselink = tree_cons (basetype_path, TREE_VALUE (baselink), TREE_CHAIN (baselink));
+ return baselink;
+ }
+ }
+ }
+
+#ifdef GATHER_STATISTICS
+ n_calls_lookup_fnfields++;
+#endif
+ if (protect && flag_memoize_lookups && ! global_bindings_p ())
+ entry = make_memoized_table_entry (type, name, 1);
+ else
+ entry = 0;
+
+ index = lookup_fnfields_here (type, name);
+ if (index >= 0 || lookup_field_1 (type, name))
+ {
+ rval_binfo = basetype_path;
+ rval_binfo_h = rval_binfo;
+ }
+
+ if (index >= 0)
+ {
+ rval = TREE_VEC_ELT (CLASSTYPE_METHOD_VEC (type), index);
+ rvals = my_tree_cons (basetype_path, rval, rvals);
+ if (BINFO_BASETYPES (binfo) && CLASSTYPE_BASELINK_VEC (type))
+ TREE_TYPE (rvals) = TREE_VEC_ELT (CLASSTYPE_BASELINK_VEC (type), index);
+
+ if (entry)
+ {
+ TREE_VALUE (entry) = rvals;
+ TREE_TYPE (entry) = NULL_TREE;
+ }
+
+ return rvals;
+ }
+ rval = NULL_TREE;
+
+ if (basetype_path == TYPE_BINFO (type))
+ {
+ basetype_chain = CLASSTYPE_BINFO_AS_LIST (type);
+ TREE_VIA_PUBLIC (basetype_chain) = 1;
+ BINFO_VIA_PUBLIC (basetype_path) = 1;
+ BINFO_INHERITANCE_CHAIN (basetype_path) = NULL_TREE;
+ }
+ else
+ {
+ basetype_chain = build_tree_list (NULL_TREE, basetype_path);
+ TREE_VIA_PUBLIC (basetype_chain) = TREE_VIA_PUBLIC (basetype_path);
+ TREE_VIA_PROTECTED (basetype_chain) = TREE_VIA_PROTECTED (basetype_path);
+ TREE_VIA_VIRTUAL (basetype_chain) = TREE_VIA_VIRTUAL (basetype_path);
+ }
+
+ /* The ambiguity check relies upon breadth first searching. */
+
+ search_stack = push_search_level (search_stack, &search_obstack);
+ binfo = basetype_path;
+ binfo_h = binfo;
+
+ while (1)
+ {
+ tree binfos = BINFO_BASETYPES (binfo);
+ int i, n_baselinks = binfos ? TREE_VEC_LENGTH (binfos) : 0;
+ int index;
+
+ /* Process and/or queue base types. */
+ for (i = 0; i < n_baselinks; i++)
+ {
+ tree base_binfo = TREE_VEC_ELT (binfos, i);
+ if (BINFO_FIELDS_MARKED (base_binfo) == 0)
+ {
+ tree btypes;
+
+ SET_BINFO_FIELDS_MARKED (base_binfo);
+ btypes = my_tree_cons (NULL_TREE, base_binfo, basetype_chain);
+ TREE_VIA_PUBLIC (btypes) = TREE_VIA_PUBLIC (base_binfo);
+ TREE_VIA_PROTECTED (btypes) = TREE_VIA_PROTECTED (base_binfo);
+ TREE_VIA_VIRTUAL (btypes) = TREE_VIA_VIRTUAL (base_binfo);
+ if (TREE_VIA_VIRTUAL (base_binfo))
+ btypes = tree_cons (NULL_TREE,
+ TYPE_BINFO (BINFO_TYPE (TREE_VEC_ELT (BINFO_BASETYPES (binfo_h), i))),
+ btypes);
+ else
+ btypes = tree_cons (NULL_TREE,
+ TREE_VEC_ELT (BINFO_BASETYPES (binfo_h), i),
+ btypes);
+ obstack_ptr_grow (&search_obstack, btypes);
+ tail += 1;
+ if (tail >= search_stack->limit)
+ my_friendly_abort (99);
+ }
+ }
+
+ /* Process head of queue, if one exists. */
+ if (head >= tail)
+ break;
+
+ basetype_chain = search_stack->first[head++];
+ binfo_h = TREE_VALUE (basetype_chain);
+ basetype_chain = TREE_CHAIN (basetype_chain);
+ basetype_path = TREE_VALUE (basetype_chain);
+ if (TREE_CHAIN (basetype_chain))
+ BINFO_INHERITANCE_CHAIN (basetype_path) = TREE_VALUE (TREE_CHAIN (basetype_chain));
+ else
+ BINFO_INHERITANCE_CHAIN (basetype_path) = NULL_TREE;
+
+ binfo = basetype_path;
+ type = BINFO_TYPE (binfo);
+
+ /* See if we can find NAME in TYPE. If RVAL is nonzero,
+ and we do find NAME in TYPE, verify that such a second
+ sighting is in fact legal. */
+
+ index = lookup_fnfields_here (type, name);
+
+ if (index >= 0 || (lookup_field_1 (type, name)!=NULL_TREE && !find_all))
+ {
+ if (rval_binfo && !find_all && hides (rval_binfo_h, binfo_h))
+ {
+ /* This is ok, the member found is in rval_binfo, not
+ here (binfo). */
+ }
+ else if (rval_binfo==NULL_TREE || find_all || hides (binfo_h, rval_binfo_h))
+ {
+ /* This is ok, the member found is here (binfo), not in
+ rval_binfo. */
+ if (index >= 0)
+ {
+ rval = TREE_VEC_ELT (CLASSTYPE_METHOD_VEC (type), index);
+ /* Note, rvals can only be previously set if find_all is
+ true. */
+ rvals = my_tree_cons (basetype_path, rval, rvals);
+ if (TYPE_BINFO_BASETYPES (type)
+ && CLASSTYPE_BASELINK_VEC (type))
+ TREE_TYPE (rvals) = TREE_VEC_ELT (CLASSTYPE_BASELINK_VEC (type), index);
+ }
+ else
+ {
+ /* Undo finding it before, as something else hides it. */
+ rval = NULL_TREE;
+ rvals = NULL_TREE;
+ }
+ rval_binfo = binfo;
+ rval_binfo_h = binfo_h;
+ }
+ else
+ {
+ /* This is ambiguous. */
+ errstr = "request for method `%D' is ambiguous";
+ rvals = error_mark_node;
+ break;
+ }
+ }
+ }
+ {
+ tree *tp = search_stack->first;
+ tree *search_tail = tp + tail;
+
+ while (tp < search_tail)
+ {
+ CLEAR_BINFO_FIELDS_MARKED (TREE_VALUE (TREE_CHAIN (*tp)));
+ tp += 1;
+ }
+ }
+ search_stack = pop_search_level (search_stack);
+
+ if (entry)
+ {
+ if (errstr)
+ {
+ tree error_string = my_build_string (errstr);
+ /* Save error message with entry. */
+ TREE_TYPE (entry) = error_string;
+ }
+ else
+ {
+ /* Mark entry as having no error string. */
+ TREE_TYPE (entry) = NULL_TREE;
+ TREE_VALUE (entry) = rvals;
+ }
+ }
+
+ if (errstr && protect)
+ {
+ cp_error (errstr, name);
+ rvals = error_mark_node;
+ }
+
+ return rvals;
+}
+
+/* BREADTH-FIRST SEARCH ROUTINES. */
+
+/* Search a multiple inheritance hierarchy by breadth-first search.
+
+ TYPE is an aggregate type, possibly in a multiple-inheritance hierarchy.
+ TESTFN is a function, which, if true, means that our condition has been met,
+ and its return value should be returned.
+ QFN, if non-NULL, is a predicate dictating whether the type should
+ even be queued. */
+
+HOST_WIDE_INT
+breadth_first_search (binfo, testfn, qfn)
+ tree binfo;
+ int (*testfn)();
+ int (*qfn)();
+{
+ int head = 0, tail = 0;
+ int rval = 0;
+
+ search_stack = push_search_level (search_stack, &search_obstack);
+
+ while (1)
+ {
+ tree binfos = BINFO_BASETYPES (binfo);
+ int n_baselinks = binfos ? TREE_VEC_LENGTH (binfos) : 0;
+ int i;
+
+ /* Process and/or queue base types. */
+ for (i = 0; i < n_baselinks; i++)
+ {
+ tree base_binfo = TREE_VEC_ELT (binfos, i);
+
+ if (BINFO_MARKED (base_binfo) == 0
+ && (qfn == 0 || (*qfn) (binfo, i)))
+ {
+ SET_BINFO_MARKED (base_binfo);
+ obstack_ptr_grow (&search_obstack, binfo);
+ obstack_ptr_grow (&search_obstack, (HOST_WIDE_INT) i);
+ tail += 2;
+ if (tail >= search_stack->limit)
+ my_friendly_abort (100);
+ }
+ }
+ /* Process head of queue, if one exists. */
+ if (head >= tail)
+ {
+ rval = 0;
+ break;
+ }
+
+ binfo = search_stack->first[head++];
+ i = (HOST_WIDE_INT) search_stack->first[head++];
+ if (rval = (*testfn) (binfo, i))
+ break;
+ binfo = BINFO_BASETYPE (binfo, i);
+ }
+ {
+ tree *tp = search_stack->first;
+ tree *search_tail = tp + tail;
+ while (tp < search_tail)
+ {
+ tree binfo = *tp++;
+ int i = (HOST_WIDE_INT)(*tp++);
+ CLEAR_BINFO_MARKED (BINFO_BASETYPE (binfo, i));
+ }
+ }
+
+ search_stack = pop_search_level (search_stack);
+ return rval;
+}
+
+/* Functions to use in breadth first searches. */
+typedef tree (*pft)();
+typedef int (*pfi)();
+
+int tree_needs_constructor_p (binfo, i)
+ tree binfo;
+ int i;
+{
+ tree basetype;
+ my_friendly_assert (i != 0, 296);
+ basetype = BINFO_TYPE (BINFO_BASETYPE (binfo, i));
+ return TYPE_NEEDS_CONSTRUCTING (basetype);
+}
+
+static tree declarator;
+
+static tree
+get_virtuals_named_this (binfo)
+ tree binfo;
+{
+ tree fields;
+
+ fields = lookup_fnfields (binfo, declarator, -1);
+ /* fields cannot be error_mark_node */
+
+ if (fields == 0)
+ return 0;
+
+ /* Get to the function decls, and return the first virtual function
+ with this name, if there is one. */
+ while (fields)
+ {
+ tree fndecl;
+
+ for (fndecl = TREE_VALUE (fields); fndecl; fndecl = DECL_CHAIN (fndecl))
+ if (DECL_VINDEX (fndecl))
+ return fields;
+ fields = next_baselink (fields);
+ }
+ return NULL_TREE;
+}
+
+static tree get_virtual_destructor (binfo, i)
+ tree binfo;
+ int i;
+{
+ tree type = BINFO_TYPE (binfo);
+ if (i >= 0)
+ type = BINFO_TYPE (TREE_VEC_ELT (BINFO_BASETYPES (binfo), i));
+ if (TYPE_HAS_DESTRUCTOR (type)
+ && DECL_VINDEX (TREE_VEC_ELT (CLASSTYPE_METHOD_VEC (type), 0)))
+ return TREE_VEC_ELT (CLASSTYPE_METHOD_VEC (type), 0);
+ return 0;
+}
+
+int tree_has_any_destructor_p (binfo, i)
+ tree binfo;
+ int i;
+{
+ tree type = BINFO_TYPE (binfo);
+ if (i >= 0)
+ type = BINFO_TYPE (TREE_VEC_ELT (BINFO_BASETYPES (binfo), i));
+ return TYPE_NEEDS_DESTRUCTOR (type);
+}
+
+/* Given a class type TYPE, and a function decl FNDECL, look for a
+ virtual function in TYPE's hierarchy which FNDECL could match as a
+ virtual function. It doesn't matter which one we find.
+
+ DTORP is nonzero if we are looking for a destructor. Destructors
+ need special treatment because they do not match by name. */
+tree
+get_matching_virtual (binfo, fndecl, dtorp)
+ tree binfo, fndecl;
+ int dtorp;
+{
+ tree tmp = NULL_TREE;
+
+ /* Breadth first search routines start searching basetypes
+ of TYPE, so we must perform first ply of search here. */
+ if (dtorp)
+ {
+ if (tree_has_any_destructor_p (binfo, -1))
+ tmp = get_virtual_destructor (binfo, -1);
+
+ if (tmp)
+ return tmp;
+
+ tmp = (tree) breadth_first_search (binfo,
+ (pfi) get_virtual_destructor,
+ tree_has_any_destructor_p);
+ return tmp;
+ }
+ else
+ {
+ tree drettype, dtypes, btypes, instptr_type;
+ tree basetype = DECL_CLASS_CONTEXT (fndecl);
+ tree baselink, best = NULL_TREE;
+ tree name = DECL_ASSEMBLER_NAME (fndecl);
+
+ declarator = DECL_NAME (fndecl);
+ if (IDENTIFIER_VIRTUAL_P (declarator) == 0)
+ return NULL_TREE;
+
+ drettype = TREE_TYPE (TREE_TYPE (fndecl));
+ dtypes = TYPE_ARG_TYPES (TREE_TYPE (fndecl));
+ if (DECL_STATIC_FUNCTION_P (fndecl))
+ instptr_type = NULL_TREE;
+ else
+ instptr_type = TREE_TYPE (TREE_VALUE (dtypes));
+
+ for (baselink = get_virtuals_named_this (binfo);
+ baselink; baselink = next_baselink (baselink))
+ {
+ for (tmp = TREE_VALUE (baselink); tmp; tmp = DECL_CHAIN (tmp))
+ {
+ if (! DECL_VINDEX (tmp))
+ continue;
+
+ btypes = TYPE_ARG_TYPES (TREE_TYPE (tmp));
+ if (instptr_type == NULL_TREE)
+ {
+ if (compparms (TREE_CHAIN (btypes), dtypes, 3))
+ /* Caller knows to give error in this case. */
+ return tmp;
+ return NULL_TREE;
+ }
+
+ if ((TYPE_READONLY (TREE_TYPE (TREE_VALUE (btypes)))
+ == TYPE_READONLY (instptr_type))
+ && compparms (TREE_CHAIN (btypes), TREE_CHAIN (dtypes), 3))
+ {
+ if (IDENTIFIER_ERROR_LOCUS (name) == NULL_TREE
+ && ! comptypes (TREE_TYPE (TREE_TYPE (tmp)), drettype, 1))
+ {
+ cp_error ("conflicting return type specified for virtual function `%#D'", fndecl);
+ cp_error ("overriding definition as `%#D'", tmp);
+ SET_IDENTIFIER_ERROR_LOCUS (name, basetype);
+ }
+ break;
+ }
+ }
+ if (tmp)
+ {
+ best = tmp;
+ break;
+ }
+ }
+ if (best == NULL_TREE && warn_overloaded_virtual)
+ cp_warning_at ("conflicting specification deriving virtual function `%D'", fndecl);
+
+ return best;
+ }
+}
+
+/* Return the list of virtual functions which are abstract in type
+ TYPE that come from non virtual base classes. See
+ expand_direct_vtbls_init for the style of search we do. */
+static tree
+get_abstract_virtuals_1 (binfo, do_self, abstract_virtuals)
+ tree binfo, abstract_virtuals;
+ int do_self;
+{
+ tree binfos = BINFO_BASETYPES (binfo);
+ int i, n_baselinks = binfos ? TREE_VEC_LENGTH (binfos) : 0;
+
+ for (i = 0; i < n_baselinks; i++)
+ {
+ tree base_binfo = TREE_VEC_ELT (binfos, i);
+ int is_not_base_vtable =
+ i != CLASSTYPE_VFIELD_PARENT (BINFO_TYPE (binfo));
+ if (! TREE_VIA_VIRTUAL (base_binfo))
+ abstract_virtuals
+ = get_abstract_virtuals_1 (base_binfo, is_not_base_vtable,
+ abstract_virtuals);
+ }
+ /* Should we use something besides CLASSTYPE_VFIELDS? */
+ if (do_self && CLASSTYPE_VFIELDS (BINFO_TYPE (binfo)))
+ {
+ tree tmp = TREE_CHAIN (BINFO_VIRTUALS (binfo));
+
+ /* Get around dossier entry if there is one. */
+ if (flag_dossier)
+ tmp = TREE_CHAIN (tmp);
+
+ while (tmp)
+ {
+ tree base_pfn = FNADDR_FROM_VTABLE_ENTRY (TREE_VALUE (tmp));
+ tree base_fndecl = TREE_OPERAND (base_pfn, 0);
+ if (DECL_ABSTRACT_VIRTUAL_P (base_fndecl))
+ abstract_virtuals = tree_cons (NULL_TREE, base_fndecl, abstract_virtuals);
+ tmp = TREE_CHAIN (tmp);
+ }
+ }
+ return abstract_virtuals;
+}
+
+/* Return the list of virtual functions which are abstract in type TYPE.
+ This information is cached, and so must be built on a
+ non-temporary obstack. */
+tree
+get_abstract_virtuals (type)
+ tree type;
+{
+ tree vbases, tmp;
+ tree abstract_virtuals = CLASSTYPE_ABSTRACT_VIRTUALS (type);
+
+ /* First get all from non-virtual bases. */
+ abstract_virtuals
+ = get_abstract_virtuals_1 (TYPE_BINFO (type), 1, abstract_virtuals);
+
+ for (vbases = CLASSTYPE_VBASECLASSES (type); vbases; vbases = TREE_CHAIN (vbases))
+ {
+ if (! BINFO_VIRTUALS (vbases))
+ continue;
+
+ tmp = TREE_CHAIN (BINFO_VIRTUALS (vbases));
+ while (tmp)
+ {
+ tree base_pfn = FNADDR_FROM_VTABLE_ENTRY (TREE_VALUE (tmp));
+ tree base_fndecl = TREE_OPERAND (base_pfn, 0);
+ if (DECL_ABSTRACT_VIRTUAL_P (base_fndecl))
+ abstract_virtuals = tree_cons (NULL_TREE, base_fndecl, abstract_virtuals);
+ tmp = TREE_CHAIN (tmp);
+ }
+ }
+ return nreverse (abstract_virtuals);
+}
+
+/* For the type TYPE, return a list of member functions available from
+ base classes with name NAME. The TREE_VALUE of the list is a chain of
+ member functions with name NAME. The TREE_PURPOSE of the list is a
+ basetype, or a list of base types (in reverse order) which were
+ traversed to reach the chain of member functions. If we reach a base
+ type which provides a member function of name NAME, and which has at
+ most one base type itself, then we can terminate the search. */
+
+tree
+get_baselinks (type_as_binfo_list, type, name)
+ tree type_as_binfo_list;
+ tree type, name;
+{
+ int head = 0, tail = 0, index;
+ tree rval = 0, nval = 0;
+ tree basetypes = type_as_binfo_list;
+ tree binfo = TYPE_BINFO (type);
+
+ search_stack = push_search_level (search_stack, &search_obstack);
+
+ while (1)
+ {
+ tree binfos = BINFO_BASETYPES (binfo);
+ int i, n_baselinks = binfos ? TREE_VEC_LENGTH (binfos) : 0;
+
+ /* Process and/or queue base types. */
+ for (i = 0; i < n_baselinks; i++)
+ {
+ tree base_binfo = TREE_VEC_ELT (binfos, i);
+ tree btypes;
+
+ btypes = hash_tree_cons (TREE_VIA_PUBLIC (base_binfo),
+ TREE_VIA_VIRTUAL (base_binfo),
+ TREE_VIA_PROTECTED (base_binfo),
+ NULL_TREE, base_binfo,
+ basetypes);
+ obstack_ptr_grow (&search_obstack, btypes);
+ search_stack->first = (tree *)obstack_base (&search_obstack);
+ tail += 1;
+ }
+
+ dont_queue:
+ /* Process head of queue, if one exists. */
+ if (head >= tail)
+ break;
+
+ basetypes = search_stack->first[head++];
+ binfo = TREE_VALUE (basetypes);
+ type = BINFO_TYPE (binfo);
+ index = lookup_fnfields_1 (type, name);
+ if (index >= 0)
+ {
+ nval = TREE_VEC_ELT (CLASSTYPE_METHOD_VEC (type), index);
+ rval = hash_tree_cons (0, 0, 0, basetypes, nval, rval);
+ if (TYPE_BINFO_BASETYPES (type) == 0)
+ goto dont_queue;
+ else if (TREE_VEC_LENGTH (TYPE_BINFO_BASETYPES (type)) == 1)
+ {
+ if (CLASSTYPE_BASELINK_VEC (type))
+ TREE_TYPE (rval) = TREE_VEC_ELT (CLASSTYPE_BASELINK_VEC (type), index);
+ goto dont_queue;
+ }
+ }
+ nval = NULL_TREE;
+ }
+
+ search_stack = pop_search_level (search_stack);
+ return rval;
+}
+
+tree
+next_baselink (baselink)
+ tree baselink;
+{
+ tree tmp = TREE_TYPE (baselink);
+ baselink = TREE_CHAIN (baselink);
+ while (tmp)
+ {
+ /* @@ does not yet add previous base types. */
+ baselink = tree_cons (TREE_PURPOSE (tmp), TREE_VALUE (tmp),
+ baselink);
+ TREE_TYPE (baselink) = TREE_TYPE (tmp);
+ tmp = TREE_CHAIN (tmp);
+ }
+ return baselink;
+}
+
+/* DEPTH-FIRST SEARCH ROUTINES. */
+
+/* Assign unique numbers to _CLASSTYPE members of the lattice
+ specified by TYPE. The root nodes are marked first; the nodes
+ are marked depth-fisrt, left-right. */
+
+static int cid;
+
+/* Matrix implementing a relation from CLASSTYPE X CLASSTYPE => INT.
+ Relation yields 1 if C1 <= C2, 0 otherwise. */
+typedef char mi_boolean;
+static mi_boolean *mi_matrix;
+
+/* Type for which this matrix is defined. */
+static tree mi_type;
+
+/* Size of the matrix for indexing purposes. */
+static int mi_size;
+
+/* Return nonzero if class C2 derives from class C1. */
+#define BINFO_DERIVES_FROM(C1, C2) \
+ ((mi_matrix+mi_size*(BINFO_CID (C1)-1))[BINFO_CID (C2)-1])
+#define TYPE_DERIVES_FROM(C1, C2) \
+ ((mi_matrix+mi_size*(CLASSTYPE_CID (C1)-1))[CLASSTYPE_CID (C2)-1])
+#define BINFO_DERIVES_FROM_STAR(C) \
+ (mi_matrix+(BINFO_CID (C)-1))
+
+/* This routine converts a pointer to be a pointer of an immediate
+ base class. The normal convert_pointer_to routine would diagnose
+ the conversion as ambiguous, under MI code that has the base class
+ as an ambiguous base class. */
+static tree
+convert_pointer_to_single_level (to_type, expr)
+ tree to_type, expr;
+{
+ tree binfo_of_derived;
+ tree last;
+
+ binfo_of_derived = TYPE_BINFO (TREE_TYPE (TREE_TYPE (expr)));
+ last = get_binfo (to_type, TREE_TYPE (TREE_TYPE (expr)), 0);
+ BINFO_INHERITANCE_CHAIN (last) = binfo_of_derived;
+ BINFO_INHERITANCE_CHAIN (binfo_of_derived) = NULL_TREE;
+ return build_vbase_path (PLUS_EXPR, TYPE_POINTER_TO (to_type), expr, last, 1);
+}
+
+/* The main function which implements depth first search.
+
+ This routine has to remember the path it walked up, when
+ dfs_init_vbase_pointers is the work function, as otherwise there
+ would be no record. */
+static void
+dfs_walk (binfo, fn, qfn)
+ tree binfo;
+ void (*fn)();
+ int (*qfn)();
+{
+ tree binfos = BINFO_BASETYPES (binfo);
+ int i, n_baselinks = binfos ? TREE_VEC_LENGTH (binfos) : 0;
+
+ for (i = 0; i < n_baselinks; i++)
+ {
+ tree base_binfo = TREE_VEC_ELT (binfos, i);
+
+ if ((*qfn)(base_binfo))
+ {
+ if (fn == dfs_init_vbase_pointers)
+ {
+ /* When traversing an arbitrary MI hierarchy, we need to keep
+ a record of the path we took to get down to the final base
+ type, as otherwise there would be no record of it, and just
+ trying to blindly convert at the bottom would be ambiguous.
+
+ The easiest way is to do the conversions one step at a time,
+ as we know we want the immediate base class at each step.
+
+ The only special trick to converting one step at a time,
+ is that when we hit the last virtual base class, we must
+ use the SLOT value for it, and not use the normal convert
+ routine. We use the last virtual base class, as in our
+ implementation, we have pointers to all virtual base
+ classes in the base object. */
+
+ tree saved_vbase_decl_ptr_intermediate
+ = vbase_decl_ptr_intermediate;
+
+ if (TREE_VIA_VIRTUAL (base_binfo))
+ {
+ /* No need for the conversion here, as we know it is the
+ right type. */
+ vbase_decl_ptr_intermediate
+ = (tree)CLASSTYPE_SEARCH_SLOT (BINFO_TYPE (base_binfo));
+ }
+ else
+ {
+ vbase_decl_ptr_intermediate
+ = convert_pointer_to_single_level (BINFO_TYPE (base_binfo),
+ vbase_decl_ptr_intermediate);
+ }
+
+ dfs_walk (base_binfo, fn, qfn);
+
+ vbase_decl_ptr_intermediate = saved_vbase_decl_ptr_intermediate;
+ } else
+ dfs_walk (base_binfo, fn, qfn);
+ }
+ }
+
+ fn (binfo);
+}
+
+/* Predicate functions which serve for dfs_walk. */
+static int numberedp (binfo) tree binfo;
+{ return BINFO_CID (binfo); }
+static int unnumberedp (binfo) tree binfo;
+{ return BINFO_CID (binfo) == 0; }
+
+static int markedp (binfo) tree binfo;
+{ return BINFO_MARKED (binfo); }
+static int bfs_markedp (binfo, i) tree binfo; int i;
+{ return BINFO_MARKED (BINFO_BASETYPE (binfo, i)); }
+static int unmarkedp (binfo) tree binfo;
+{ return BINFO_MARKED (binfo) == 0; }
+static int bfs_unmarkedp (binfo, i) tree binfo; int i;
+{ return BINFO_MARKED (BINFO_BASETYPE (binfo, i)) == 0; }
+static int marked_vtable_pathp (binfo) tree binfo;
+{ return BINFO_VTABLE_PATH_MARKED (binfo); }
+static int bfs_marked_vtable_pathp (binfo, i) tree binfo; int i;
+{ return BINFO_VTABLE_PATH_MARKED (BINFO_BASETYPE (binfo, i)); }
+static int unmarked_vtable_pathp (binfo) tree binfo;
+{ return BINFO_VTABLE_PATH_MARKED (binfo) == 0; }
+static int bfs_unmarked_vtable_pathp (binfo, i) tree binfo; int i;
+{ return BINFO_VTABLE_PATH_MARKED (BINFO_BASETYPE (binfo, i)) == 0; }
+static int marked_new_vtablep (binfo) tree binfo;
+{ return BINFO_NEW_VTABLE_MARKED (binfo); }
+static int bfs_marked_new_vtablep (binfo, i) tree binfo; int i;
+{ return BINFO_NEW_VTABLE_MARKED (BINFO_BASETYPE (binfo, i)); }
+static int unmarked_new_vtablep (binfo) tree binfo;
+{ return BINFO_NEW_VTABLE_MARKED (binfo) == 0; }
+static int bfs_unmarked_new_vtablep (binfo, i) tree binfo; int i;
+{ return BINFO_NEW_VTABLE_MARKED (BINFO_BASETYPE (binfo, i)) == 0; }
+
+static int dfs_search_slot_nonempty_p (binfo) tree binfo;
+{ return CLASSTYPE_SEARCH_SLOT (BINFO_TYPE (binfo)) != 0; }
+
+static int dfs_debug_unmarkedp (binfo) tree binfo;
+{ return CLASSTYPE_DEBUG_REQUESTED (BINFO_TYPE (binfo)) == 0; }
+
+/* The worker functions for `dfs_walk'. These do not need to
+ test anything (vis a vis marking) if they are paired with
+ a predicate function (above). */
+
+/* Assign each type within the lattice a number which is unique
+ in the lattice. The first number assigned is 1. */
+
+static void
+dfs_number (binfo)
+ tree binfo;
+{
+ BINFO_CID (binfo) = ++cid;
+}
+
+static void
+dfs_unnumber (binfo)
+ tree binfo;
+{
+ BINFO_CID (binfo) = 0;
+}
+
+static void
+dfs_mark (binfo) tree binfo;
+{ SET_BINFO_MARKED (binfo); }
+
+static void
+dfs_unmark (binfo) tree binfo;
+{ CLEAR_BINFO_MARKED (binfo); }
+
+static void
+dfs_mark_vtable_path (binfo) tree binfo;
+{ SET_BINFO_VTABLE_PATH_MARKED (binfo); }
+
+static void
+dfs_unmark_vtable_path (binfo) tree binfo;
+{ CLEAR_BINFO_VTABLE_PATH_MARKED (binfo); }
+
+static void
+dfs_mark_new_vtable (binfo) tree binfo;
+{ SET_BINFO_NEW_VTABLE_MARKED (binfo); }
+
+static void
+dfs_unmark_new_vtable (binfo) tree binfo;
+{ CLEAR_BINFO_NEW_VTABLE_MARKED (binfo); }
+
+static void
+dfs_clear_search_slot (binfo) tree binfo;
+{ CLASSTYPE_SEARCH_SLOT (BINFO_TYPE (binfo)) = 0; }
+
+static void
+dfs_debug_mark (binfo)
+ tree binfo;
+{
+ tree t = BINFO_TYPE (binfo);
+
+ /* Use heuristic that if there are virtual functions,
+ ignore until we see a non-inline virtual function. */
+ tree methods = CLASSTYPE_METHOD_VEC (t);
+
+ CLASSTYPE_DEBUG_REQUESTED (t) = 1;
+
+ /* If interface info is known, the value of (?@@?) is correct. */
+ if (methods == 0
+ || CLASSTYPE_INTERFACE_KNOWN (t)
+ || (write_virtuals == 2 && TYPE_VIRTUAL_P (t)))
+ return;
+
+ /* If debug info is requested from this context for this type, supply it.
+ If debug info is requested from another context for this type,
+ see if some third context can supply it. */
+ if (current_function_decl == NULL_TREE
+ || DECL_CLASS_CONTEXT (current_function_decl) != t)
+ {
+ if (TREE_VEC_ELT (methods, 0))
+ methods = TREE_VEC_ELT (methods, 0);
+ else
+ methods = TREE_VEC_ELT (methods, 1);
+ while (methods)
+ {
+ if (DECL_VINDEX (methods)
+ && DECL_SAVED_INSNS (methods) == 0
+ && DECL_PENDING_INLINE_INFO (methods) == 0
+ && DECL_ABSTRACT_VIRTUAL_P (methods) == 0)
+ {
+ /* Somebody, somewhere is going to have to define this
+ virtual function. When they do, they will provide
+ the debugging info. */
+ return;
+ }
+ methods = TREE_CHAIN (methods);
+ }
+ }
+ /* We cannot rely on some alien method to solve our problems,
+ so we must write out the debug info ourselves. */
+ TYPE_DECL_SUPPRESS_DEBUG (TYPE_NAME (t)) = 0;
+ rest_of_type_compilation (t, global_bindings_p ());
+}
+
+/* Attach to the type of the virtual base class, the pointer to the
+ virtual base class, given the global pointer vbase_decl_ptr.
+
+ We use the global vbase_types. ICK! */
+static void
+dfs_find_vbases (binfo)
+ tree binfo;
+{
+ tree binfos = BINFO_BASETYPES (binfo);
+ int i, n_baselinks = binfos ? TREE_VEC_LENGTH (binfos) : 0;
+
+ for (i = n_baselinks-1; i >= 0; i--)
+ {
+ tree base_binfo = TREE_VEC_ELT (binfos, i);
+
+ if (TREE_VIA_VIRTUAL (base_binfo)
+ && CLASSTYPE_SEARCH_SLOT (BINFO_TYPE (base_binfo)) == 0)
+ {
+ tree vbase = BINFO_TYPE (base_binfo);
+ tree binfo = binfo_member (vbase, vbase_types);
+
+ CLASSTYPE_SEARCH_SLOT (vbase)
+ = (char *) build (PLUS_EXPR, TYPE_POINTER_TO (vbase),
+ vbase_decl_ptr, BINFO_OFFSET (binfo));
+ }
+ }
+ SET_BINFO_VTABLE_PATH_MARKED (binfo);
+ SET_BINFO_NEW_VTABLE_MARKED (binfo);
+}
+
+static void
+dfs_init_vbase_pointers (binfo)
+ tree binfo;
+{
+ tree type = BINFO_TYPE (binfo);
+ tree fields = TYPE_FIELDS (type);
+ tree this_vbase_ptr;
+
+ CLEAR_BINFO_VTABLE_PATH_MARKED (binfo);
+
+ /* If there is a dossier, it is the first field, though perhaps from
+ the base class. Otherwise, the first fields are virtual base class
+ pointer fields. */
+ if (CLASSTYPE_DOSSIER (type) && VFIELD_NAME_P (DECL_NAME (fields)))
+ /* Get past vtable for the object. */
+ fields = TREE_CHAIN (fields);
+
+ if (fields == NULL_TREE
+ || DECL_NAME (fields) == NULL_TREE
+ || ! VBASE_NAME_P (DECL_NAME (fields)))
+ return;
+
+ this_vbase_ptr = vbase_decl_ptr_intermediate;
+
+ if (TYPE_POINTER_TO (type) != TYPE_MAIN_VARIANT (TREE_TYPE (this_vbase_ptr)))
+ my_friendly_abort (125);
+
+ while (fields && DECL_NAME (fields)
+ && VBASE_NAME_P (DECL_NAME (fields)))
+ {
+ tree ref = build (COMPONENT_REF, TREE_TYPE (fields),
+ build_indirect_ref (this_vbase_ptr, NULL_PTR), fields);
+ tree init = (tree)CLASSTYPE_SEARCH_SLOT (TREE_TYPE (TREE_TYPE (fields)));
+ vbase_init_result = tree_cons (binfo_member (TREE_TYPE (TREE_TYPE (fields)),
+ vbase_types),
+ build_modify_expr (ref, NOP_EXPR, init),
+ vbase_init_result);
+ fields = TREE_CHAIN (fields);
+ }
+}
+
+/* Sometimes this needs to clear both VTABLE_PATH and NEW_VTABLE. Other
+ times, just NEW_VTABLE, but optimizer should make both with equal
+ efficiency (though it does not currently). */
+static void
+dfs_clear_vbase_slots (binfo)
+ tree binfo;
+{
+ tree type = BINFO_TYPE (binfo);
+ CLASSTYPE_SEARCH_SLOT (type) = 0;
+ CLEAR_BINFO_VTABLE_PATH_MARKED (binfo);
+ CLEAR_BINFO_NEW_VTABLE_MARKED (binfo);
+}
+
+tree
+init_vbase_pointers (type, decl_ptr)
+ tree type;
+ tree decl_ptr;
+{
+ if (TYPE_USES_VIRTUAL_BASECLASSES (type))
+ {
+ int old_flag = flag_this_is_variable;
+ tree binfo = TYPE_BINFO (type);
+ flag_this_is_variable = -2;
+ vbase_types = CLASSTYPE_VBASECLASSES (type);
+ vbase_decl_ptr = decl_ptr;
+ vbase_decl = build_indirect_ref (decl_ptr, NULL_PTR);
+ vbase_decl_ptr_intermediate = vbase_decl_ptr;
+ vbase_init_result = NULL_TREE;
+ dfs_walk (binfo, dfs_find_vbases, unmarked_vtable_pathp);
+ dfs_walk (binfo, dfs_init_vbase_pointers, marked_vtable_pathp);
+ dfs_walk (binfo, dfs_clear_vbase_slots, marked_new_vtablep);
+ flag_this_is_variable = old_flag;
+ return vbase_init_result;
+ }
+ return 0;
+}
+
+/* Build a COMPOUND_EXPR which when expanded will generate the code
+ needed to initialize all the virtual function table slots of all
+ the virtual baseclasses. MAIN_BINFO is the binfo which determines
+ the virtual baseclasses to use; TYPE is the type of the object to
+ which the initialization applies. TRUE_EXP is the true object we
+ are initializing, and DECL_PTR is the pointer to the sub-object we
+ are initializing.
+
+ When USE_COMPUTED_OFFSETS is non-zero, we can assume that the
+ object was laidout by a top-level contructor and the computed
+ offsets are valid to store vtables. When zero, we must store new
+ vtables through virtual baseclass pointers.
+
+ We setup and use the globals: vbase_decl, vbase_decl_ptr, vbase_types
+ ICK! */
+
+void
+expand_indirect_vtbls_init (binfo, true_exp, decl_ptr, use_computed_offsets)
+ tree binfo;
+ tree true_exp, decl_ptr;
+ int use_computed_offsets;
+{
+ tree type = BINFO_TYPE (binfo);
+ if (TYPE_USES_VIRTUAL_BASECLASSES (type))
+ {
+ int old_flag = flag_this_is_variable;
+ tree vbases = CLASSTYPE_VBASECLASSES (type);
+ vbase_types = vbases;
+ vbase_decl_ptr = true_exp ? build_unary_op (ADDR_EXPR, true_exp, 0) : decl_ptr;
+ vbase_decl = true_exp ? true_exp : build_indirect_ref (decl_ptr, NULL_PTR);
+
+ if (use_computed_offsets)
+ {
+ /* This is an object of type IN_TYPE, */
+ flag_this_is_variable = -2;
+ dfs_walk (binfo, dfs_find_vbases, unmarked_new_vtablep);
+ }
+
+ /* Initialized with vtables of type TYPE. */
+ for (; vbases; vbases = TREE_CHAIN (vbases))
+ {
+ tree addr;
+ if (use_computed_offsets)
+ addr = (tree)CLASSTYPE_SEARCH_SLOT (BINFO_TYPE (vbases));
+ else
+ {
+ tree vbinfo = get_binfo (TREE_TYPE (vbases),
+ TREE_TYPE (vbase_decl),
+ 0);
+
+ /* See is we can get lucky. */
+ if (TREE_VIA_VIRTUAL (vbinfo))
+ addr = convert_pointer_to_real (vbinfo, vbase_decl_ptr);
+ else
+ {
+ /* We go through all these contortions to avoid this
+ call, as it will fail when the virtual base type
+ is ambiguous from here. We don't yet have a way
+ to search for and find just an instance of the
+ virtual base class. Searching for the binfo in
+ vbases won't work, as we don't have the vbase
+ pointer field, for all vbases in the main class,
+ only direct vbases. */
+ addr = convert_pointer_to_real (TREE_TYPE (vbases),
+ vbase_decl_ptr);
+ if (addr == error_mark_node)
+ continue;
+ }
+ }
+
+ /* Do all vtables from this virtual base. */
+ /* This assumes that virtual bases can never serve as parent
+ binfos. (in the CLASSTPE_VFIELD_PARENT sense) */
+ expand_direct_vtbls_init (vbases, TYPE_BINFO (BINFO_TYPE (vbases)),
+ 1, 0, addr);
+ }
+
+ dfs_walk (binfo, dfs_clear_vbase_slots, marked_new_vtablep);
+
+ flag_this_is_variable = old_flag;
+ }
+}
+
+void
+clear_search_slots (type)
+ tree type;
+{
+ dfs_walk (TYPE_BINFO (type),
+ dfs_clear_search_slot, dfs_search_slot_nonempty_p);
+}
+
+/* get virtual base class types.
+ This adds type to the vbase_types list in reverse dfs order.
+ Ordering is very important, so don't change it. */
+
+static void
+dfs_get_vbase_types (binfo)
+ tree binfo;
+{
+ if (TREE_VIA_VIRTUAL (binfo) && ! BINFO_VBASE_MARKED (binfo))
+ {
+ vbase_types = make_binfo (integer_zero_node, binfo,
+ BINFO_VTABLE (binfo),
+ BINFO_VIRTUALS (binfo), vbase_types);
+ TREE_VIA_VIRTUAL (vbase_types) = 1;
+ SET_BINFO_VBASE_MARKED (binfo);
+ }
+ SET_BINFO_MARKED (binfo);
+}
+
+/* get a list of virtual base classes in dfs order. */
+tree
+get_vbase_types (type)
+ tree type;
+{
+ tree vbases;
+ tree binfo;
+
+ if (TREE_CODE (type) == TREE_VEC)
+ binfo = type;
+ else
+ binfo = TYPE_BINFO (type);
+
+ vbase_types = NULL_TREE;
+ dfs_walk (binfo, dfs_get_vbase_types, unmarkedp);
+ dfs_walk (binfo, dfs_unmark, markedp);
+ /* Rely upon the reverse dfs ordering from dfs_get_vbase_types, and now
+ reverse it so that we get normal dfs ordering. */
+ vbase_types = nreverse (vbase_types);
+
+ /* unmark marked vbases */
+ for (vbases = vbase_types; vbases; vbases = TREE_CHAIN (vbases))
+ CLEAR_BINFO_VBASE_MARKED (vbases);
+
+ return vbase_types;
+}
+
+static void
+dfs_record_inheritance (binfo)
+ tree binfo;
+{
+ tree binfos = BINFO_BASETYPES (binfo);
+ int i, n_baselinks = binfos ? TREE_VEC_LENGTH (binfos) : 0;
+ mi_boolean *derived_row = BINFO_DERIVES_FROM_STAR (binfo);
+
+ for (i = n_baselinks-1; i >= 0; i--)
+ {
+ int j;
+ tree base_binfo = TREE_VEC_ELT (binfos, i);
+ tree baseclass = BINFO_TYPE (base_binfo);
+ mi_boolean *base_row = BINFO_DERIVES_FROM_STAR (base_binfo);
+
+ /* Don't search if there's nothing there! MI_SIZE can be
+ zero as a result of parse errors. */
+ if (TYPE_BINFO_BASETYPES (baseclass) && mi_size > 0)
+ for (j = mi_size*(CLASSTYPE_CID (baseclass)-1); j >= 0; j -= mi_size)
+ derived_row[j] |= base_row[j];
+ TYPE_DERIVES_FROM (baseclass, BINFO_TYPE (binfo)) = 1;
+ }
+
+ SET_BINFO_MARKED (binfo);
+}
+
+/* Given a _CLASSTYPE node in a multiple inheritance lattice,
+ convert the lattice into a simple relation such that,
+ given to CIDs, C1 and C2, one can determine if C1 <= C2
+ or C2 <= C1 or C1 <> C2.
+
+ Once constructed, we walk the lattice depth fisrt,
+ applying various functions to elements as they are encountered.
+
+ We use xmalloc here, in case we want to randomly free these tables. */
+
+#define SAVE_MI_MATRIX
+
+void
+build_mi_matrix (type)
+ tree type;
+{
+ tree binfo = TYPE_BINFO (type);
+ cid = 0;
+
+#ifdef SAVE_MI_MATRIX
+ if (CLASSTYPE_MI_MATRIX (type))
+ {
+ mi_size = CLASSTYPE_N_SUPERCLASSES (type) + CLASSTYPE_N_VBASECLASSES (type);
+ mi_matrix = CLASSTYPE_MI_MATRIX (type);
+ mi_type = type;
+ dfs_walk (binfo, dfs_number, unnumberedp);
+ return;
+ }
+#endif
+
+ mi_size = CLASSTYPE_N_SUPERCLASSES (type) + CLASSTYPE_N_VBASECLASSES (type);
+ mi_matrix = (char *)xmalloc ((mi_size + 1) * (mi_size + 1));
+ mi_type = type;
+ bzero (mi_matrix, (mi_size + 1) * (mi_size + 1));
+ dfs_walk (binfo, dfs_number, unnumberedp);
+ dfs_walk (binfo, dfs_record_inheritance, unmarkedp);
+ dfs_walk (binfo, dfs_unmark, markedp);
+}
+
+void
+free_mi_matrix ()
+{
+ dfs_walk (TYPE_BINFO (mi_type), dfs_unnumber, numberedp);
+
+#ifdef SAVE_MI_MATRIX
+ CLASSTYPE_MI_MATRIX (mi_type) = mi_matrix;
+#else
+ free (mi_matrix);
+ mi_size = 0;
+ cid = 0;
+#endif
+}
+
+/* If we want debug info for a type TYPE, make sure all its base types
+ are also marked as being potentially interesting. This avoids
+ the problem of not writing any debug info for intermediate basetypes
+ that have abstract virtual functions. */
+
+void
+note_debug_info_needed (type)
+ tree type;
+{
+ dfs_walk (TYPE_BINFO (type), dfs_debug_mark, dfs_debug_unmarkedp);
+}
+
+/* Subroutines of push_class_decls (). */
+
+/* Add the instance variables which this class contributed to the
+ current class binding contour. When a redefinition occurs,
+ if the redefinition is strictly within a single inheritance path,
+ we just overwrite (in the case of a data field) or
+ cons (in the case of a member function) the old declaration with
+ the new. If the fields are not within a single inheritance path,
+ we must cons them in either case.
+
+ In order to know what decls are new (stemming from the current
+ invocation of push_class_decls) we enclose them in an "envelope",
+ which is a TREE_LIST node where the TREE_PURPOSE slot contains the
+ new decl (or possibly a list of competing ones), the TREE_VALUE slot
+ points to the old value and the TREE_CHAIN slot chains together all
+ envelopes which needs to be "opened" in push_class_decls. Opening an
+ envelope means: push the old value onto the class_shadowed list,
+ install the new one and if it's a TYPE_DECL do the same to the
+ IDENTIFIER_TYPE_VALUE. Such an envelope is recognized by seeing that
+ the TREE_PURPOSE slot is non-null, and that it is not an identifier.
+ Because if it is, it could be a set of overloaded methods from an
+ outer scope. */
+
+static void
+dfs_pushdecls (binfo)
+ tree binfo;
+{
+ tree type = BINFO_TYPE (binfo);
+ tree fields, *methods, *end;
+ tree method_vec;
+
+ for (fields = TYPE_FIELDS (type); fields; fields = TREE_CHAIN (fields))
+ {
+ /* Unmark so that if we are in a constructor, and then find that
+ this field was initialized by a base initializer,
+ we can emit an error message. */
+ if (TREE_CODE (fields) == FIELD_DECL)
+ TREE_USED (fields) = 0;
+
+ /* Recurse into anonymous unions. */
+ if (DECL_NAME (fields) == NULL_TREE
+ && TREE_CODE (TREE_TYPE (fields)) == UNION_TYPE)
+ {
+ dfs_pushdecls (TYPE_BINFO (TREE_TYPE (fields)));
+ continue;
+ }
+
+#if 0
+ if (TREE_CODE (fields) != TYPE_DECL)
+ {
+ DECL_PUBLIC (fields) = 0;
+ DECL_PROTECTED (fields) = 0;
+ DECL_PRIVATE (fields) = 0;
+ }
+#endif
+
+ if (DECL_NAME (fields))
+ {
+ tree class_value = IDENTIFIER_CLASS_VALUE (DECL_NAME (fields));
+
+ /* If the class value is an envelope of the kind described in
+ the comment above, we try to rule out possible ambiguities.
+ If we can't do that, keep a TREE_LIST with possibly ambiguous
+ decls in there. */
+ if (class_value && TREE_CODE (class_value) == TREE_LIST
+ && TREE_PURPOSE (class_value) != NULL_TREE
+ && (TREE_CODE (TREE_PURPOSE (class_value))
+ != IDENTIFIER_NODE))
+ {
+ tree value = TREE_PURPOSE (class_value);
+ tree context;
+
+ /* Possible ambiguity. If its defining type(s)
+ is (are all) derived from us, no problem. */
+ if (TREE_CODE (value) != TREE_LIST)
+ {
+ context = (TREE_CODE (value) == FUNCTION_DECL
+ && DECL_VIRTUAL_P (value))
+ ? DECL_CLASS_CONTEXT (value)
+ : DECL_CONTEXT (value);
+
+ if (context && (context == type
+ || TYPE_DERIVES_FROM (context, type)))
+ value = fields;
+ else
+ value = tree_cons (NULL_TREE, fields,
+ build_tree_list (NULL_TREE, value));
+ }
+ else
+ {
+ /* All children may derive from us, in which case
+ there is no problem. Otherwise, we have to
+ keep lists around of what the ambiguities might be. */
+ tree values;
+ int problem = 0;
+
+ for (values = value; values; values = TREE_CHAIN (values))
+ {
+ tree sub_values = TREE_VALUE (values);
+
+ if (TREE_CODE (sub_values) == TREE_LIST)
+ {
+ for (; sub_values; sub_values = TREE_CHAIN (sub_values))
+ {
+ register tree list_mbr = TREE_VALUE (sub_values);
+
+ context = (TREE_CODE (list_mbr) == FUNCTION_DECL
+ && DECL_VIRTUAL_P (list_mbr))
+ ? DECL_CLASS_CONTEXT (list_mbr)
+ : DECL_CONTEXT (list_mbr);
+
+ if (! TYPE_DERIVES_FROM (context, type))
+ {
+ value = tree_cons (NULL_TREE, TREE_VALUE (values), value);
+ problem = 1;
+ break;
+ }
+ }
+ }
+ else
+ {
+ context = (TREE_CODE (sub_values) == FUNCTION_DECL
+ && DECL_VIRTUAL_P (sub_values))
+ ? DECL_CLASS_CONTEXT (sub_values)
+ : DECL_CONTEXT (sub_values);
+
+ if (context && ! TYPE_DERIVES_FROM (context, type))
+ {
+ value = tree_cons (NULL_TREE, values, value);
+ problem = 1;
+ break;
+ }
+ }
+ }
+ if (! problem) value = fields;
+ }
+
+ /* Mark this as a potentially ambiguous member. */
+ if (TREE_CODE (value) == TREE_LIST)
+ {
+ /* Leaving TREE_TYPE blank is intentional.
+ We cannot use `error_mark_node' (lookup_name)
+ or `unknown_type_node' (all member functions use this). */
+ TREE_NONLOCAL_FLAG (value) = 1;
+ }
+
+ /* Put the new contents in our envelope. */
+ TREE_PURPOSE (class_value) = value;
+ }
+ else
+ {
+ /* See comment above for a description of envelopes. */
+ tree envelope = tree_cons (fields, class_value,
+ closed_envelopes);
+
+ closed_envelopes = envelope;
+ IDENTIFIER_CLASS_VALUE (DECL_NAME (fields)) = envelope;
+ }
+ }
+ }
+
+ method_vec = CLASSTYPE_METHOD_VEC (type);
+ if (method_vec != 0)
+ {
+ /* Farm out constructors and destructors. */
+ methods = &TREE_VEC_ELT (method_vec, 1);
+ end = TREE_VEC_END (method_vec);
+
+ /* This does not work for multiple inheritance yet. */
+ while (methods != end)
+ {
+ /* This will cause lookup_name to return a pointer
+ to the tree_list of possible methods of this name.
+ If the order is a problem, we can nreverse them. */
+ tree tmp;
+ tree class_value = IDENTIFIER_CLASS_VALUE (DECL_NAME (*methods));
+
+ if (class_value && TREE_CODE (class_value) == TREE_LIST
+ && TREE_PURPOSE (class_value) != NULL_TREE
+ && TREE_CODE (TREE_PURPOSE (class_value)) != IDENTIFIER_NODE)
+ {
+ tree old = TREE_PURPOSE (class_value);
+
+ maybe_push_cache_obstack ();
+ if (TREE_CODE (old) == TREE_LIST)
+ tmp = tree_cons (DECL_NAME (*methods), *methods, old);
+ else
+ {
+ /* Only complain if we shadow something we can access. */
+ if (old
+ && warn_shadow
+ && ((DECL_LANG_SPECIFIC (old)
+ && DECL_CLASS_CONTEXT (old) == current_class_type)
+ || ! TREE_PRIVATE (old)))
+ /* Should figure out access control more accurately. */
+ {
+ cp_warning_at ("member `%#D' is shadowed", old);
+ cp_warning_at ("by member function `%#D'", *methods);
+ warning ("in this context");
+ }
+ tmp = build_tree_list (DECL_NAME (*methods), *methods);
+ }
+ pop_obstacks ();
+
+ TREE_TYPE (tmp) = unknown_type_node;
+#if 0
+ TREE_OVERLOADED (tmp) = DECL_OVERLOADED (*methods);
+#endif
+ TREE_NONLOCAL_FLAG (tmp) = 1;
+
+ /* Put the new contents in our envelope. */
+ TREE_PURPOSE (class_value) = tmp;
+ }
+ else
+ {
+ maybe_push_cache_obstack ();
+ tmp = build_tree_list (DECL_NAME (*methods), *methods);
+ pop_obstacks ();
+
+ TREE_TYPE (tmp) = unknown_type_node;
+#if 0
+ TREE_OVERLOADED (tmp) = DECL_OVERLOADED (*methods);
+#endif
+ TREE_NONLOCAL_FLAG (tmp) = 1;
+
+ /* See comment above for a description of envelopes. */
+ closed_envelopes = tree_cons (tmp, class_value,
+ closed_envelopes);
+ IDENTIFIER_CLASS_VALUE (DECL_NAME (*methods)) = closed_envelopes;
+ }
+#if 0
+ tmp = *methods;
+ while (tmp != 0)
+ {
+ DECL_PUBLIC (tmp) = 0;
+ DECL_PROTECTED (tmp) = 0;
+ DECL_PRIVATE (tmp) = 0;
+ tmp = DECL_CHAIN (tmp);
+ }
+#endif
+
+ methods++;
+ }
+ }
+ SET_BINFO_MARKED (binfo);
+}
+
+/* Consolidate unique (by name) member functions. */
+static void
+dfs_compress_decls (binfo)
+ tree binfo;
+{
+ tree type = BINFO_TYPE (binfo);
+ tree method_vec = CLASSTYPE_METHOD_VEC (type);
+
+ if (method_vec != 0)
+ {
+ /* Farm out constructors and destructors. */
+ tree *methods = &TREE_VEC_ELT (method_vec, 1);
+ tree *end = TREE_VEC_END (method_vec);
+
+ for (; methods != end; methods++)
+ {
+ /* This is known to be an envelope of the kind described before
+ dfs_pushdecls. */
+ tree class_value = IDENTIFIER_CLASS_VALUE (DECL_NAME (*methods));
+ tree tmp = TREE_PURPOSE (class_value);
+
+ /* This was replaced in scope by somebody else. Just leave it
+ alone. */
+ if (TREE_CODE (tmp) != TREE_LIST)
+ continue;
+
+ if (TREE_CHAIN (tmp) == NULL_TREE
+ && TREE_VALUE (tmp)
+ && DECL_CHAIN (TREE_VALUE (tmp)) == NULL_TREE)
+ {
+ TREE_PURPOSE (class_value) = TREE_VALUE (tmp);
+ }
+ }
+ }
+ CLEAR_BINFO_MARKED (binfo);
+}
+
+/* When entering the scope of a class, we cache all of the
+ fields that that class provides within its inheritance
+ lattice. Where ambiguities result, we mark them
+ with `error_mark_node' so that if they are encountered
+ without explicit qualification, we can emit an error
+ message. */
+void
+push_class_decls (type)
+ tree type;
+{
+ tree id;
+ struct obstack *ambient_obstack = current_obstack;
+
+#if 0
+ tree tags = CLASSTYPE_TAGS (type);
+
+ while (tags)
+ {
+ tree code_type_node;
+ tree tag;
+
+ switch (TREE_CODE (TREE_VALUE (tags)))
+ {
+ case ENUMERAL_TYPE:
+ code_type_node = enum_type_node;
+ break;
+ case RECORD_TYPE:
+ code_type_node = record_type_node;
+ break;
+ case CLASS_TYPE:
+ code_type_node = class_type_node;
+ break;
+ case UNION_TYPE:
+ code_type_node = union_type_node;
+ break;
+ default:
+ my_friendly_abort (297);
+ }
+ tag = xref_tag (code_type_node, TREE_PURPOSE (tags),
+ TYPE_BINFO_BASETYPE (TREE_VALUE (tags), 0), 0);
+#if 0 /* not yet, should get fixed properly later */
+ pushdecl (make_type_decl (TREE_PURPOSE (tags), TREE_VALUE (tags)));
+#else
+ pushdecl (build_decl (TYPE_DECL, TREE_PURPOSE (tags), TREE_VALUE (tags)));
+#endif
+ }
+#endif
+
+ search_stack = push_search_level (search_stack, &search_obstack);
+
+ id = TYPE_IDENTIFIER (type);
+#if 0
+ if (IDENTIFIER_TEMPLATE (id) != 0)
+ {
+ tree tmpl = IDENTIFIER_TEMPLATE (id);
+ push_template_decls (DECL_ARGUMENTS (TREE_PURPOSE (tmpl)),
+ TREE_VALUE (tmpl), 1);
+ overload_template_name (id, 1);
+ }
+#endif
+
+ /* Push class fields into CLASS_VALUE scope, and mark. */
+ dfs_walk (TYPE_BINFO (type), dfs_pushdecls, unmarkedp);
+
+ /* Compress fields which have only a single entry
+ by a given name, and unmark. */
+ dfs_walk (TYPE_BINFO (type), dfs_compress_decls, markedp);
+
+ /* Open up all the closed envelopes and push the contained decls into
+ class scope. */
+ while (closed_envelopes)
+ {
+ tree new = TREE_PURPOSE (closed_envelopes);
+ tree id;
+
+ /* This is messy because the class value may be a *_DECL, or a
+ TREE_LIST of overloaded *_DECLs or even a TREE_LIST of ambiguous
+ *_DECLs. The name is stored at different places in these three
+ cases. */
+ if (TREE_CODE (new) == TREE_LIST)
+ {
+ if (TREE_PURPOSE (new) != NULL_TREE)
+ id = TREE_PURPOSE (new);
+ else
+ {
+ tree node = TREE_VALUE (new);
+
+ while (TREE_CODE (node) == TREE_LIST)
+ node = TREE_VALUE (node);
+ id = DECL_NAME (node);
+ }
+ }
+ else
+ id = DECL_NAME (new);
+
+ /* Install the original class value in order to make
+ pushdecl_class_level work correctly. */
+ IDENTIFIER_CLASS_VALUE (id) = TREE_VALUE (closed_envelopes);
+ if (TREE_CODE (new) == TREE_LIST)
+ push_class_level_binding (id, new);
+ else
+ pushdecl_class_level (new);
+ closed_envelopes = TREE_CHAIN (closed_envelopes);
+ }
+ current_obstack = ambient_obstack;
+}
+
+/* Here's a subroutine we need because C lacks lambdas. */
+static void
+dfs_unuse_fields (binfo)
+ tree binfo;
+{
+ tree type = TREE_TYPE (binfo);
+ tree fields;
+
+ for (fields = TYPE_FIELDS (type); fields; fields = TREE_CHAIN (fields))
+ {
+ if (TREE_CODE (fields) != FIELD_DECL)
+ continue;
+
+ TREE_USED (fields) = 0;
+ if (DECL_NAME (fields) == NULL_TREE
+ && TREE_CODE (TREE_TYPE (fields)) == UNION_TYPE)
+ unuse_fields (TREE_TYPE (fields));
+ }
+}
+
+void
+unuse_fields (type)
+ tree type;
+{
+ dfs_walk (TYPE_BINFO (type), dfs_unuse_fields, unmarkedp);
+}
+
+void
+pop_class_decls (type)
+ tree type;
+{
+ /* We haven't pushed a search level when dealing with cached classes,
+ so we'd better not try to pop it. */
+ if (search_stack)
+ search_stack = pop_search_level (search_stack);
+}
+
+void
+print_search_statistics ()
+{
+#ifdef GATHER_STATISTICS
+ if (flag_memoize_lookups)
+ {
+ fprintf (stderr, "%d memoized contexts saved\n",
+ n_contexts_saved);
+ fprintf (stderr, "%d local tree nodes made\n", my_tree_node_counter);
+ fprintf (stderr, "%d local hash nodes made\n", my_memoized_entry_counter);
+ fprintf (stderr, "fields statistics:\n");
+ fprintf (stderr, " memoized finds = %d; rejects = %d; (searches = %d)\n",
+ memoized_fast_finds[0], memoized_fast_rejects[0],
+ memoized_fields_searched[0]);
+ fprintf (stderr, " memoized_adds = %d\n", memoized_adds[0]);
+ fprintf (stderr, "fnfields statistics:\n");
+ fprintf (stderr, " memoized finds = %d; rejects = %d; (searches = %d)\n",
+ memoized_fast_finds[1], memoized_fast_rejects[1],
+ memoized_fields_searched[1]);
+ fprintf (stderr, " memoized_adds = %d\n", memoized_adds[1]);
+ }
+ fprintf (stderr, "%d fields searched in %d[%d] calls to lookup_field[_1]\n",
+ n_fields_searched, n_calls_lookup_field, n_calls_lookup_field_1);
+ fprintf (stderr, "%d fnfields searched in %d calls to lookup_fnfields\n",
+ n_outer_fields_searched, n_calls_lookup_fnfields);
+ fprintf (stderr, "%d calls to get_base_type\n", n_calls_get_base_type);
+#else
+ fprintf (stderr, "no search statistics\n");
+#endif
+}
+
+void
+init_search_processing ()
+{
+ gcc_obstack_init (&search_obstack);
+ gcc_obstack_init (&type_obstack);
+ gcc_obstack_init (&type_obstack_entries);
+
+ /* This gives us room to build our chains of basetypes,
+ whether or not we decide to memoize them. */
+ type_stack = push_type_level (0, &type_obstack);
+ _vptr_name = get_identifier ("_vptr");
+}
+
+void
+reinit_search_statistics ()
+{
+ my_memoized_entry_counter = 0;
+ memoized_fast_finds[0] = 0;
+ memoized_fast_finds[1] = 0;
+ memoized_adds[0] = 0;
+ memoized_adds[1] = 0;
+ memoized_fast_rejects[0] = 0;
+ memoized_fast_rejects[1] = 0;
+ memoized_fields_searched[0] = 0;
+ memoized_fields_searched[1] = 0;
+ n_fields_searched = 0;
+ n_calls_lookup_field = 0, n_calls_lookup_field_1 = 0;
+ n_calls_lookup_fnfields = 0, n_calls_lookup_fnfields_1 = 0;
+ n_calls_get_base_type = 0;
+ n_outer_fields_searched = 0;
+ n_contexts_saved = 0;
+}
diff --git a/gnu/usr.bin/cc/cc1plus/sig.c b/gnu/usr.bin/cc/cc1plus/sig.c
new file mode 100644
index 0000000..1426168
--- /dev/null
+++ b/gnu/usr.bin/cc/cc1plus/sig.c
@@ -0,0 +1,1023 @@
+/* Functions dealing with signatures and signature pointers/references.
+ Copyright (C) 1992 Free Software Foundation, Inc.
+ Contributed by Gerald Baumgartner (gb@cs.purdue.edu)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+#include "config.h"
+#include <stdio.h>
+#include "obstack.h"
+#include "tree.h"
+#include "cp-tree.h"
+#include "flags.h"
+#include "assert.h"
+
+extern struct obstack *current_obstack;
+extern struct obstack permanent_obstack;
+extern struct obstack *saveable_obstack;
+
+extern void error ();
+extern void sorry ();
+extern void compiler_error ();
+extern void make_decl_rtl PROTO((tree, char *, int));
+
+/* Used to help generate globally unique names for signature tables. */
+
+static int global_sigtable_name_counter;
+
+/* Build an identifier for a signature pointer or reference, so we
+ can use it's name in function name mangling. */
+
+static tree
+build_signature_pointer_or_reference_name (to_type, constp, volatilep, refp)
+ tree to_type;
+ int constp, volatilep, refp;
+{
+ char * sig_name = TYPE_NAME_STRING (to_type);
+ int name_len = TYPE_NAME_LENGTH (to_type) + constp + volatilep;
+ char * name;
+
+ if (refp)
+ {
+ name = (char *) alloca (name_len + sizeof (SIGNATURE_REFERENCE_NAME) +2);
+ sprintf (name, SIGNATURE_REFERENCE_NAME_FORMAT,
+ constp ? "C" : "", volatilep ? "V": "", sig_name);
+ }
+ else
+ {
+ name = (char *) alloca (name_len + sizeof (SIGNATURE_POINTER_NAME) + 2);
+ sprintf (name, SIGNATURE_POINTER_NAME_FORMAT,
+ constp ? "C" : "", volatilep ? "V": "", sig_name);
+ }
+ return get_identifier (name);
+}
+
+/* Build a DECL node for a signature pointer or reference, so we can
+ tell the debugger the structure of signature pointers/references.
+ This function is called at most eight times for a given signature,
+ once for each [const] [volatile] signature pointer/reference. */
+
+static void
+build_signature_pointer_or_reference_decl (type, name)
+ tree type, name;
+{
+ tree decl;
+
+ /* We don't enter this declaration in any sort of symbol table. */
+ decl = build_decl (TYPE_DECL, name, type);
+ TYPE_NAME (type) = decl;
+ TREE_CHAIN (type) = decl;
+}
+
+/* Construct, lay out and return the type of pointers or references
+ to signature TO_TYPE. If such a type has already been constructed,
+ reuse it. If CONSTP or VOLATILEP is specified, make the `optr' const
+ or volatile, respectively. If we are constructing a const/volatile
+ type variant and the main type variant doesn't exist yet, it is built
+ as well. If REFP is 1, we construct a signature reference, otherwise
+ a signature pointer is constructed.
+
+ This function is a subroutine of `build_signature_pointer_type' and
+ `build_signature_reference_type'. */
+
+static tree
+build_signature_pointer_or_reference_type (to_type, constp, volatilep, refp)
+ tree to_type;
+ int constp, volatilep, refp;
+{
+ register tree t, m;
+ register struct obstack *ambient_obstack = current_obstack;
+ register struct obstack *ambient_saveable_obstack = saveable_obstack;
+
+ m = refp ? SIGNATURE_REFERENCE_TO (to_type) : SIGNATURE_POINTER_TO (to_type);
+
+ /* If we don't have the main variant yet, construct it. */
+ if (m == NULL_TREE
+ && (constp || volatilep))
+ m = build_signature_pointer_or_reference_type (to_type, 0, 0, refp);
+
+ /* Treat any nonzero argument as 1. */
+ constp = !!constp;
+ volatilep = !!volatilep;
+ refp = !!refp;
+
+ /* If not generating auxiliary info, search the chain of variants to see
+ if there is already one there just like the one we need to have. If so,
+ use that existing one.
+
+ We don't do this in the case where we are generating aux info because
+ in that case we want each typedef names to get it's own distinct type
+ node, even if the type of this new typedef is the same as some other
+ (existing) type. */
+
+ if (m && !flag_gen_aux_info)
+ for (t = m; t; t = TYPE_NEXT_VARIANT (t))
+ if (constp == TYPE_READONLY (TREE_TYPE (TREE_TYPE (TYPE_FIELDS (t))))
+ && volatilep == TYPE_VOLATILE (TREE_TYPE (TREE_TYPE (TYPE_FIELDS (t)))))
+ return t;
+
+ /* We need a new one. If TO_TYPE is permanent, make this permanent too. */
+ if (TREE_PERMANENT (to_type))
+ {
+ current_obstack = &permanent_obstack;
+ saveable_obstack = &permanent_obstack;
+ }
+
+ /* A signature pointer or reference to a signature `s' looks like this:
+
+ struct {
+ void * optr;
+ const s * sptr;
+ vtbl_type_node * vptr;
+ };
+
+ A `const' signature pointer/reference is a
+
+ struct {
+ const void * optr;
+ const s * sptr;
+ vtbl_type_node * vptr;
+ };
+
+ Similarly, for `volatile' and `const volatile'.
+ */
+
+ t = make_lang_type (RECORD_TYPE);
+ {
+ tree obj_type = build_type_variant (void_type_node, constp, volatilep);
+ tree optr_type = build_pointer_type (obj_type);
+ tree optr, sptr, vptr;
+
+ optr = build_lang_field_decl (FIELD_DECL,
+ get_identifier (SIGNATURE_OPTR_NAME),
+ optr_type);
+ DECL_FIELD_CONTEXT (optr) = t;
+ DECL_CLASS_CONTEXT (optr) = t;
+
+ if (m)
+ {
+ /* We can share `sptr' and `vptr' among type variants. */
+ sptr = TREE_CHAIN (TYPE_FIELDS (m));
+ vptr = TREE_CHAIN (sptr);
+ }
+ else
+ {
+ tree sig_tbl_type = c_build_type_variant (to_type, 1, 0);
+
+ sptr = build_lang_field_decl (FIELD_DECL,
+ get_identifier (SIGNATURE_SPTR_NAME),
+ build_pointer_type (sig_tbl_type));
+ vptr = build_lang_field_decl (FIELD_DECL,
+ get_identifier (SIGNATURE_VPTR_NAME),
+ build_pointer_type (vtbl_type_node));
+ DECL_FIELD_CONTEXT (sptr) = t;
+ DECL_CLASS_CONTEXT (sptr) = t;
+ DECL_FIELD_CONTEXT (vptr) = t;
+ DECL_CLASS_CONTEXT (vptr) = t;
+ TREE_CHAIN (sptr) = vptr;
+ TREE_CHAIN (vptr) = NULL_TREE;
+ }
+
+ TREE_CHAIN (optr) = sptr;
+ TYPE_FIELDS (t) = optr;
+ /* To make `build_vfn_ref' work when building a signature method call. */
+ CLASSTYPE_VFIELD (t) = vptr;
+ DECL_FCONTEXT (CLASSTYPE_VFIELD (t)) = t;
+ TYPE_ALIGN (t) = TYPE_ALIGN (optr_type);
+ }
+
+ {
+ tree name = build_signature_pointer_or_reference_name (to_type, constp,
+ volatilep, refp);
+
+ /* Build a DECL node for this type, so the debugger has access to it. */
+ build_signature_pointer_or_reference_decl (t, name);
+ }
+
+ CLASSTYPE_GOT_SEMICOLON (t) = 1;
+ IS_SIGNATURE_POINTER (t) = ! refp;
+ IS_SIGNATURE_REFERENCE (t) = refp;
+ SIGNATURE_TYPE (t) = to_type;
+
+ if (m)
+ {
+ /* Add this type to the chain of variants of TYPE.
+ Every type has to be its own TYPE_MAIN_VARIANT. */
+ TYPE_NEXT_VARIANT (t) = TYPE_NEXT_VARIANT (m);
+ TYPE_NEXT_VARIANT (m) = t;
+ }
+ else if (refp)
+ /* Record this type as the reference to TO_TYPE. */
+ SIGNATURE_REFERENCE_TO (to_type) = t;
+ else
+ /* Record this type as the pointer to TO_TYPE. */
+ SIGNATURE_POINTER_TO (to_type) = t;
+
+ /* Lay out the type. This function has many callers that are concerned
+ with expression-construction, and this simplifies them all.
+ Also, it guarantees the TYPE_SIZE is permanent if the type is. */
+ layout_type (t);
+
+ current_obstack = ambient_obstack;
+ saveable_obstack = ambient_saveable_obstack;
+
+ /* Ouput debug information for this type. */
+ rest_of_type_compilation (t, 1);
+
+ return t;
+}
+
+/* Construct, lay out and return the type of pointers to signature TO_TYPE. */
+
+tree
+build_signature_pointer_type (to_type, constp, volatilep)
+ tree to_type;
+ int constp, volatilep;
+{
+ return
+ build_signature_pointer_or_reference_type (to_type, constp, volatilep, 0);
+}
+
+/* Construct, lay out and return the type of pointers to signature TO_TYPE. */
+
+tree
+build_signature_reference_type (to_type, constp, volatilep)
+ tree to_type;
+ int constp, volatilep;
+{
+ return
+ build_signature_pointer_or_reference_type (to_type, constp, volatilep, 1);
+}
+
+/* Return the name of the signature table (as an IDENTIFIER_NODE)
+ for the given signature type SIG_TYPE and rhs type RHS_TYPE. */
+
+static tree
+get_sigtable_name (sig_type, rhs_type)
+ tree sig_type, rhs_type;
+{
+ tree sig_type_id = build_typename_overload (sig_type);
+ tree rhs_type_id = build_typename_overload (rhs_type);
+ char *buf = (char *) alloca (sizeof (SIGTABLE_NAME_FORMAT_LONG)
+ + IDENTIFIER_LENGTH (sig_type_id)
+ + IDENTIFIER_LENGTH (rhs_type_id) + 20);
+ char *sig_ptr = IDENTIFIER_POINTER (sig_type_id);
+ char *rhs_ptr = IDENTIFIER_POINTER (rhs_type_id);
+ int i, j;
+
+ for (i = 0; sig_ptr[i] == OPERATOR_TYPENAME_FORMAT[i]; i++)
+ /* do nothing */;
+ while (sig_ptr[i] >= '0' && sig_ptr[i] <= '9')
+ i += 1;
+
+ for (j = 0; rhs_ptr[j] == OPERATOR_TYPENAME_FORMAT[j]; j++)
+ /* do nothing */;
+ while (rhs_ptr[j] >= '0' && rhs_ptr[j] <= '9')
+ j += 1;
+
+ if (IS_SIGNATURE (rhs_type))
+ sprintf (buf, SIGTABLE_NAME_FORMAT_LONG, sig_ptr+i, rhs_ptr+j,
+ global_sigtable_name_counter++);
+ else
+ sprintf (buf, SIGTABLE_NAME_FORMAT, sig_ptr+i, rhs_ptr+j);
+ return get_identifier (buf);
+}
+
+/* Build a field decl that points to a signature member function. */
+
+static tree
+build_member_function_pointer (member)
+ tree member;
+{
+ char *namstr = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (member));
+ int namlen = IDENTIFIER_LENGTH (DECL_ASSEMBLER_NAME (member));
+ char *name;
+ tree entry;
+
+ name = (char *) alloca (namlen + sizeof (SIGNATURE_FIELD_NAME) + 2);
+ sprintf (name, SIGNATURE_FIELD_NAME_FORMAT, namstr);
+
+ /* @@ Do we really want to xref signature table fields? */
+ GNU_xref_ref (current_function_decl, name);
+
+ entry = build_lang_field_decl (FIELD_DECL, get_identifier (name),
+ TYPE_MAIN_VARIANT (sigtable_entry_type));
+ TREE_CONSTANT (entry) = 1;
+ TREE_READONLY (entry) = 1;
+
+ /* @@ Do we really want to xref signature table fields? */
+ GNU_xref_decl (current_function_decl, entry);
+
+ return entry;
+}
+
+/* For each FUNCTION_DECL in a signature we construct a member function
+ pointer of the appropriate type. We also need two flags to test
+ whether the member function pointer points to a virtual function or
+ to a default implementation. Those flags will be the two lower order
+ bits of the member function pointer (or the two higher order bits,
+ based on the configuration).
+
+ The new FIELD_DECLs are appended at the end of the last (and only)
+ sublist of `list_of_fieldlists.'
+
+ As a side effect, each member function in the signature gets the
+ `decl.ignored' bit turned on, so we don't output debug info for it. */
+
+void
+append_signature_fields (list_of_fieldlists)
+ tree list_of_fieldlists;
+{
+ tree l, x;
+ tree last_x = NULL_TREE;
+ tree mfptr;
+ tree last_mfptr;
+ tree mfptr_list = NULL_TREE;
+
+ /* For signatures it should actually be only a list with one element. */
+ for (l = list_of_fieldlists; l; l = TREE_CHAIN (l))
+ {
+ for (x = TREE_VALUE (l); x; x = TREE_CHAIN (x))
+ {
+ if (TREE_CODE (x) == FUNCTION_DECL)
+ {
+ mfptr = build_member_function_pointer (x);
+ DECL_MEMFUNC_POINTER_TO (x) = mfptr;
+ DECL_MEMFUNC_POINTING_TO (mfptr) = x;
+ DECL_IGNORED_P (x) = 1;
+ DECL_IN_AGGR_P (mfptr) = 1;
+ if (! mfptr_list)
+ mfptr_list = last_mfptr = mfptr;
+ else
+ {
+ TREE_CHAIN (last_mfptr) = mfptr;
+ last_mfptr = mfptr;
+ }
+ }
+ last_x = x;
+ }
+ }
+
+ /* Append the lists. */
+ if (last_x && mfptr_list)
+ {
+ TREE_CHAIN (last_x) = mfptr_list;
+ TREE_CHAIN (last_mfptr) = NULL_TREE;
+ }
+}
+
+/* Compare the types of a signature member function and a class member
+ function. Returns 1 if the types are in the C++ `<=' relationship.
+
+ If we have a signature pointer/reference as argument or return type
+ we don't want to do a recursive conformance check. The conformance
+ check only succeeds if both LHS and RHS refer to the same signature
+ pointer. Otherwise we need to keep information about parameter types
+ around at run time to initialize the signature table correctly. */
+
+static int
+match_method_types (sig_mtype, class_mtype)
+ tree sig_mtype, class_mtype;
+{
+ tree sig_return_type = TREE_TYPE (sig_mtype);
+ tree sig_arg_types = TYPE_ARG_TYPES (sig_mtype);
+ tree class_return_type = TREE_TYPE (class_mtype);
+ tree class_arg_types = TYPE_ARG_TYPES (class_mtype);
+
+ /* The return types have to be the same. */
+ if (! comptypes (sig_return_type, class_return_type, 1))
+ return 0;
+
+ /* Compare the first argument `this.' */
+ {
+ /* Get the type of what the `optr' is pointing to. */
+ tree sig_this =
+ TREE_TYPE (TREE_TYPE (TYPE_FIELDS (TREE_VALUE (sig_arg_types))));
+ tree class_this = TREE_VALUE (class_arg_types);
+
+ if (TREE_CODE (class_this) == RECORD_TYPE) /* Is `this' a sig ptr? */
+ class_this = TREE_TYPE (TREE_TYPE (TYPE_FIELDS (class_this)));
+ else
+ class_this = TREE_TYPE (class_this);
+
+ /* If a signature method's `this' is const or volatile, so has to be
+ the corresponding class method's `this.' */
+ if ((TYPE_READONLY (sig_this) && ! TYPE_READONLY (class_this))
+ || (TYPE_VOLATILE (sig_this) && ! TYPE_VOLATILE (class_this)))
+ return 0;
+ }
+
+ sig_arg_types = TREE_CHAIN (sig_arg_types);
+ class_arg_types = TREE_CHAIN (class_arg_types);
+
+ /* The number of arguments and the argument types have to be the same. */
+ return compparms (sig_arg_types, class_arg_types, 3);
+}
+
+/* Undo casts of opaque type variables to the RHS types. */
+static void
+undo_casts (sig_ty)
+ tree sig_ty;
+{
+ tree field = TYPE_FIELDS (sig_ty);
+
+ /* Since all the FIELD_DECLs for the signature table entries are at the end
+ of the chain (see `append_signature_fields'), we can do it this way. */
+ for (; field && TREE_CODE (field) != FIELD_DECL; field = TREE_CHAIN (field))
+ if (TYPE_MAIN_VARIANT (TREE_TYPE (field)) == opaque_type_node)
+ TREE_TYPE (TREE_TYPE (field)) = TREE_TYPE (ptr_type_node);
+}
+
+/* Do the type checking necessary to see whether the `rhs' conforms to
+ the lhs's `sig_ty'. Depending on the type of `rhs' return a NULL_TREE,
+ an integer_zero_node, a constructor, or an expression offsetting the
+ `rhs' signature table. */
+
+static tree
+build_signature_table_constructor (sig_ty, rhs)
+ tree sig_ty, rhs;
+{
+ tree rhstype = TREE_TYPE (rhs);
+ tree sig_field = TYPE_FIELDS (sig_ty);
+ tree result = NULL_TREE;
+ tree first_rhs_field = NULL_TREE;
+ tree last_rhs_field;
+ int sig_ptr_p = IS_SIGNATURE (rhstype);
+ int offset_p = sig_ptr_p;
+
+ rhstype = sig_ptr_p ? rhstype : TREE_TYPE (rhstype);
+
+ if (CLASSTYPE_TAGS (sig_ty))
+ {
+ sorry ("conformance check with signature containing class declarations");
+ return error_mark_node;
+ }
+
+ for (; sig_field; sig_field = TREE_CHAIN (sig_field))
+ {
+ tree basetype_path, baselink, basetypes;
+ tree sig_method, sig_mname, sig_mtype;
+ tree rhs_method, tbl_entry;
+
+ if (TREE_CODE (sig_field) == TYPE_DECL)
+ {
+ tree sig_field_type = TREE_TYPE (sig_field);
+
+ if (TYPE_MAIN_VARIANT (sig_field_type) == opaque_type_node)
+ {
+ /* We've got an opaque type here. */
+ tree oty_name = DECL_NAME (sig_field);
+ tree oty_type = lookup_field (rhstype, oty_name, 1, 1);
+
+ if (oty_type == NULL_TREE || oty_type == error_mark_node)
+ {
+ cp_error ("class `%T' does not contain type `%T'",
+ rhstype, oty_type);
+ undo_casts (sig_ty);
+ return error_mark_node;
+ }
+ oty_type = TREE_TYPE (oty_type);
+
+ /* Cast `sig_field' to be of type `oty_type'. This will be
+ undone in `undo_casts' by walking over all the TYPE_DECLs. */
+ TREE_TYPE (sig_field_type) = TREE_TYPE (oty_type);
+ }
+ /* If we don't have an opaque type, we can ignore the `typedef'. */
+ continue;
+ }
+
+ /* Find the signature method corresponding to `sig_field'. */
+ sig_method = DECL_MEMFUNC_POINTING_TO (sig_field);
+ sig_mname = DECL_NAME (sig_method);
+ sig_mtype = TREE_TYPE (sig_method);
+
+ basetype_path = TYPE_BINFO (rhstype);
+ baselink = lookup_fnfields (basetype_path, sig_mname, 0);
+ if (baselink == NULL_TREE || baselink == error_mark_node)
+ {
+ if (! IS_DEFAULT_IMPLEMENTATION (sig_method))
+ {
+ cp_error ("class `%T' does not contain method `%D'",
+ rhstype, sig_mname);
+ undo_casts (sig_ty);
+ return error_mark_node;
+ }
+ else
+ {
+ /* We use the signature's default implementation. */
+ rhs_method = sig_method;
+ }
+ }
+ else
+ {
+ /* Find the class method of the correct type. */
+
+ basetypes = TREE_PURPOSE (baselink);
+ if (TREE_CODE (basetypes) == TREE_LIST)
+ basetypes = TREE_VALUE (basetypes);
+
+ rhs_method = TREE_VALUE (baselink);
+ for (; rhs_method; rhs_method = TREE_CHAIN (rhs_method))
+ if (sig_mname == DECL_NAME (rhs_method)
+ && ! DECL_STATIC_FUNCTION_P (rhs_method)
+ && match_method_types (sig_mtype, TREE_TYPE (rhs_method)))
+ break;
+
+ if (rhs_method == NULL_TREE
+ || (compute_access (basetypes, rhs_method)
+ != access_public))
+ {
+ error ("class `%s' does not contain a method conforming to `%s'",
+ TYPE_NAME_STRING (rhstype),
+ fndecl_as_string (NULL, sig_method, 1));
+ undo_casts (sig_ty);
+ return error_mark_node;
+ }
+ }
+
+ if (sig_ptr_p && rhs_method != sig_method)
+ {
+ tree rhs_field = DECL_MEMFUNC_POINTER_TO (rhs_method);
+
+ if (first_rhs_field == NULL_TREE)
+ {
+ first_rhs_field = rhs_field;
+ last_rhs_field = rhs_field;
+ }
+ else if (TREE_CHAIN (last_rhs_field) == rhs_field)
+ last_rhs_field = rhs_field;
+ else
+ offset_p = 0;
+
+ tbl_entry = build_component_ref (rhs, DECL_NAME (rhs_field),
+ NULL_TREE, 1);
+ }
+ else
+ {
+ tree code, offset, pfn;
+
+ if (rhs_method == sig_method)
+ {
+ code = integer_two_node;
+ offset = integer_zero_node;
+ pfn = build_unary_op (ADDR_EXPR, rhs_method, 0);
+ TREE_TYPE (pfn) = ptr_type_node;
+ offset_p = 0; /* we can't offset the rhs sig table */
+ }
+ else if (DECL_VINDEX (rhs_method))
+ {
+ code = integer_one_node;
+ offset = DECL_VINDEX (rhs_method);
+ pfn = null_pointer_node;
+ }
+ else
+ {
+ code = integer_zero_node;
+ offset = integer_zero_node;
+ pfn = build_unary_op (ADDR_EXPR, rhs_method, 0);
+ TREE_TYPE (pfn) = ptr_type_node;
+ TREE_ADDRESSABLE (rhs_method) = 1;
+ }
+
+ tbl_entry = tree_cons (NULL_TREE, code,
+ tree_cons (NULL_TREE, offset,
+ build_tree_list (NULL_TREE, pfn)));
+ tbl_entry = build_nt (CONSTRUCTOR, NULL_TREE, tbl_entry);
+ TREE_HAS_CONSTRUCTOR (tbl_entry) = 1;
+ TREE_CONSTANT (tbl_entry) = 1;
+ }
+
+ /* Chain those function address expressions together. */
+ if (result)
+ result = tree_cons (NULL_TREE, tbl_entry, result);
+ else
+ result = build_tree_list (NULL_TREE, tbl_entry);
+ }
+
+ if (result == NULL_TREE)
+ {
+ /* The signature was empty, we don't need a signature table. */
+ undo_casts (sig_ty);
+ return NULL_TREE;
+ }
+
+ if (offset_p)
+ {
+ if (first_rhs_field == TYPE_FIELDS (rhstype))
+ {
+ /* The sptr field on the lhs can be copied from the rhs. */
+ undo_casts (sig_ty);
+ return integer_zero_node;
+ }
+ else
+ {
+ /* The sptr field on the lhs will point into the rhs sigtable. */
+ undo_casts (sig_ty);
+ return build_component_ref (rhs, DECL_NAME (first_rhs_field),
+ NULL_TREE, 0);
+ }
+ }
+
+ /* We need to construct a new signature table. */
+ result = build_nt (CONSTRUCTOR, NULL_TREE, nreverse (result));
+ TREE_HAS_CONSTRUCTOR (result) = 1;
+ TREE_CONSTANT (result) = !sig_ptr_p;
+
+ undo_casts (sig_ty);
+ return result;
+}
+
+/* Build a signature table declaration and initialize it or return an
+ existing one if we built one already. If we don't get a constructor
+ as initialization expression, we don't need a new signature table
+ variable and just hand back the init expression.
+
+ The declaration processing is done by hand instead of using `finish_decl'
+ so that we can make signature pointers global variables instead of
+ static ones. */
+
+static tree
+build_sigtable (sig_type, rhs_type, init_from)
+ tree sig_type, rhs_type, init_from;
+{
+ tree name = NULL_TREE;
+ tree decl = NULL_TREE;
+ tree init_expr;
+
+ push_obstacks_nochange ();
+ end_temporary_allocation ();
+
+ if (! IS_SIGNATURE (rhs_type))
+ {
+ name = get_sigtable_name (sig_type, rhs_type);
+ decl = IDENTIFIER_GLOBAL_VALUE (name);
+ }
+ if (decl == NULL_TREE)
+ {
+ tree init;
+
+ /* We allow only one signature table to be generated for signatures
+ with opaque types. Otherwise we create a loophole in the type
+ system since we could cast data from one classes implementation
+ of the opaque type to that of another class. */
+ if (SIGNATURE_HAS_OPAQUE_TYPEDECLS (sig_type)
+ && SIGTABLE_HAS_BEEN_GENERATED (sig_type))
+ {
+ error ("signature with opaque type implemented by multiple classes");
+ return error_mark_node;
+ }
+ SIGTABLE_HAS_BEEN_GENERATED (sig_type) = 1;
+
+ init_expr = build_signature_table_constructor (sig_type, init_from);
+ if (init_expr == NULL_TREE || TREE_CODE (init_expr) != CONSTRUCTOR)
+ return init_expr;
+
+ if (name == NULL_TREE)
+ name = get_sigtable_name (sig_type, rhs_type);
+ {
+ tree context = current_function_decl;
+
+ /* Make the signature table global, not just static in whichever
+ function a signature pointer/ref is used for the first time. */
+ current_function_decl = NULL_TREE;
+ decl = pushdecl_top_level (build_decl (VAR_DECL, name, sig_type));
+ current_function_decl = context;
+ }
+ IDENTIFIER_GLOBAL_VALUE (name) = decl;
+ store_init_value (decl, init_expr);
+ if (IS_SIGNATURE (rhs_type))
+ {
+ init = DECL_INITIAL (decl);
+ DECL_INITIAL (decl) = error_mark_node;
+ }
+
+ DECL_ALIGN (decl) = MAX (TYPE_ALIGN (double_type_node),
+ DECL_ALIGN (decl));
+#if 0
+ /* GDB-4.7 doesn't find the initialization value of a signature table
+ when it is constant. */
+ TREE_READONLY (decl) = 1;
+#endif
+ TREE_STATIC (decl) = 1;
+ TREE_USED (decl) = 1;
+
+ make_decl_rtl (decl, NULL, 1);
+ if (IS_SIGNATURE (rhs_type))
+ expand_static_init (decl, init);
+ }
+
+ pop_obstacks ();
+
+ return decl;
+}
+
+/* Create a constructor or modify expression if the LHS of an assignment
+ is a signature pointer or a signature reference. If LHS is a record
+ type node, we build a constructor, otherwise a compound expression. */
+
+tree
+build_signature_pointer_constructor (lhs, rhs)
+ tree lhs, rhs;
+{
+ register struct obstack *ambient_obstack = current_obstack;
+ register struct obstack *ambient_saveable_obstack = saveable_obstack;
+ int initp = (TREE_CODE (lhs) == RECORD_TYPE);
+ tree lhstype = initp ? lhs : TREE_TYPE (lhs);
+ tree rhstype = TREE_TYPE (rhs);
+ tree sig_ty = SIGNATURE_TYPE (lhstype);
+ tree sig_tbl, sptr_expr, optr_expr, vptr_expr;
+ tree result;
+
+ if (! ((TREE_CODE (rhstype) == POINTER_TYPE
+ && TREE_CODE (TREE_TYPE (rhstype)) == RECORD_TYPE)
+ || (TYPE_LANG_SPECIFIC (rhstype) &&
+ (IS_SIGNATURE_POINTER (rhstype)
+ || IS_SIGNATURE_REFERENCE (rhstype)))))
+ {
+ error ("invalid assignment to signature pointer or reference");
+ return error_mark_node;
+ }
+
+ if (TYPE_SIZE (sig_ty) == NULL_TREE)
+ {
+ cp_error ("undefined signature `%T' used in signature %s declaration",
+ sig_ty,
+ IS_SIGNATURE_POINTER (lhstype) ? "pointer" : "reference");
+ return error_mark_node;
+ }
+
+ /* If SIG_TY is permanent, make the signature table constructor and
+ the signature pointer/reference constructor permanent too. */
+ if (TREE_PERMANENT (sig_ty))
+ {
+ current_obstack = &permanent_obstack;
+ saveable_obstack = &permanent_obstack;
+ }
+
+ if (TYPE_LANG_SPECIFIC (rhstype) &&
+ (IS_SIGNATURE_POINTER (rhstype) || IS_SIGNATURE_REFERENCE (rhstype)))
+ {
+ if (SIGNATURE_TYPE (rhstype) == sig_ty)
+ {
+ /* LHS and RHS are signature pointers/refs of the same signature. */
+ optr_expr = build_optr_ref (rhs);
+ sptr_expr = build_sptr_ref (rhs);
+ vptr_expr = build_vptr_ref (rhs);
+ }
+ else
+ {
+ /* We need to create a new signature table and copy
+ elements from the rhs signature table. */
+ tree rhs_sptr_ref = build_sptr_ref (rhs);
+ tree rhs_tbl = build1 (INDIRECT_REF, SIGNATURE_TYPE (rhstype),
+ rhs_sptr_ref);
+
+ sig_tbl = build_sigtable (sig_ty, SIGNATURE_TYPE (rhstype), rhs_tbl);
+ if (sig_tbl == error_mark_node)
+ return error_mark_node;
+
+ optr_expr = build_optr_ref (rhs);
+ if (sig_tbl == NULL_TREE)
+ /* The signature was empty. The signature pointer is
+ pretty useless, but the user has been warned. */
+ sptr_expr = copy_node (null_pointer_node);
+ else if (sig_tbl == integer_zero_node)
+ sptr_expr = rhs_sptr_ref;
+ else
+ sptr_expr = build_unary_op (ADDR_EXPR, sig_tbl, 0);
+ TREE_TYPE (sptr_expr) = build_pointer_type (sig_ty);
+ vptr_expr = build_vptr_ref (rhs);
+ }
+ }
+ else
+ {
+ tree rhs_vptr;
+
+ if (TYPE_USES_COMPLEX_INHERITANCE (TREE_TYPE (rhstype)))
+ {
+ sorry ("class with multiple inheritance as implementation of signature");
+ return error_mark_node;
+ }
+
+ sig_tbl = build_sigtable (sig_ty, TREE_TYPE (rhstype), rhs);
+ if (sig_tbl == error_mark_node)
+ return error_mark_node;
+
+ optr_expr = rhs;
+ if (sig_tbl == NULL_TREE)
+ /* The signature was empty. The signature pointer is
+ pretty useless, but the user has been warned. */
+ {
+ sptr_expr = copy_node (null_pointer_node);
+ TREE_TYPE (sptr_expr) = build_pointer_type (sig_ty);
+ }
+ else
+ sptr_expr = build_unary_op (ADDR_EXPR, sig_tbl, 0);
+ if (CLASSTYPE_VFIELD (TREE_TYPE (rhstype)))
+ {
+ rhs_vptr = DECL_NAME (CLASSTYPE_VFIELD (TREE_TYPE (rhstype)));
+ vptr_expr = build_component_ref (build_indirect_ref (rhs, 0),
+ rhs_vptr, NULL_TREE, 0);
+ }
+ else
+ vptr_expr = copy_node (null_pointer_node);
+ TREE_TYPE (vptr_expr) = build_pointer_type (vtbl_type_node);
+ }
+
+ if (initp)
+ {
+ result = tree_cons (NULL_TREE, optr_expr,
+ tree_cons (NULL_TREE, sptr_expr,
+ build_tree_list (NULL_TREE, vptr_expr)));
+ result = build_nt (CONSTRUCTOR, NULL_TREE, result);
+ TREE_HAS_CONSTRUCTOR (result) = 1;
+ result = digest_init (lhstype, result, 0);
+ }
+ else
+ {
+ if (TREE_READONLY (lhs) || TYPE_READONLY (lhstype))
+ readonly_error (lhs, "assignment", 0);
+
+ optr_expr = build_modify_expr (build_optr_ref (lhs), NOP_EXPR,
+ optr_expr);
+ sptr_expr = build_modify_expr (build_sptr_ref (lhs), NOP_EXPR,
+ sptr_expr);
+ vptr_expr = build_modify_expr (build_vptr_ref (lhs), NOP_EXPR,
+ vptr_expr);
+
+ result = tree_cons (NULL_TREE, optr_expr,
+ tree_cons (NULL_TREE, sptr_expr,
+ tree_cons (NULL_TREE, vptr_expr,
+ build_tree_list (NULL_TREE,
+ lhs))));
+ result = build_compound_expr (result);
+ }
+
+ current_obstack = ambient_obstack;
+ saveable_obstack = ambient_saveable_obstack;
+ return result;
+}
+
+/* Build a temporary variable declaration for the instance of a signature
+ member function call if it isn't a declaration node already. Simply
+ using a SAVE_EXPR doesn't work since we need `this' in both branches
+ of a conditional expression. */
+
+static tree
+save_this (instance)
+ tree instance;
+{
+ tree decl;
+
+ if (TREE_CODE_CLASS (TREE_CODE (instance)) == 'd')
+ decl = instance;
+ else
+ {
+ decl = build_decl (VAR_DECL, NULL_TREE, TREE_TYPE (instance));
+ DECL_REGISTER (decl) = 1;
+ layout_decl (decl, 0);
+ expand_decl (decl);
+ }
+
+ return decl;
+}
+
+/* Build a signature member function call. Looks up the signature table
+ entry corresponding to FUNCTION. Depending on the value of the CODE
+ field, either call the function in PFN directly, or use OFFSET to
+ index INSTANCE's virtual function table. */
+
+tree
+build_signature_method_call (basetype, instance, function, parms)
+ tree basetype, instance, function, parms;
+{
+ tree saved_instance = save_this (instance); /* Create temp for `this'. */
+ tree signature_tbl_ptr = build_sptr_ref (saved_instance);
+ tree sig_field_name = DECL_NAME (DECL_MEMFUNC_POINTER_TO (function));
+ tree basetype_path = TYPE_BINFO (basetype);
+ tree tbl_entry = build_component_ref (build1 (INDIRECT_REF, basetype,
+ signature_tbl_ptr),
+ sig_field_name, basetype_path, 1);
+ tree code, offset, pfn, vfn;
+ tree deflt_call = NULL_TREE, direct_call, virtual_call, result;
+
+ code = build_component_ref (tbl_entry, get_identifier (SIGTABLE_CODE_NAME),
+ NULL_TREE, 1);
+ offset = build_component_ref (tbl_entry,
+ get_identifier (SIGTABLE_OFFSET_NAME),
+ NULL_TREE, 1);
+ pfn = build_component_ref (tbl_entry, get_identifier (SIGTABLE_PFN_NAME),
+ NULL_TREE, 1);
+ TREE_TYPE (pfn) = build_pointer_type (TREE_TYPE (function));
+
+ if (IS_DEFAULT_IMPLEMENTATION (function))
+ {
+ pfn = save_expr (pfn);
+ deflt_call = build_function_call (pfn,
+ tree_cons (NULL_TREE, saved_instance,
+ TREE_CHAIN (parms)));
+ }
+
+ {
+ /* Cast the signature method to have `this' of a normal pointer type. */
+ tree old_this = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (TREE_TYPE (pfn))));
+
+ TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (TREE_TYPE (pfn)))) =
+ build_type_variant (TYPE_POINTER_TO (basetype),
+ TYPE_READONLY (old_this),
+ TYPE_VOLATILE (old_this));
+
+ direct_call = build_function_call (pfn, parms);
+
+ vfn = build_vfn_ref (&TREE_VALUE (parms), saved_instance, offset);
+ TREE_TYPE (vfn) = build_pointer_type (TREE_TYPE (function));
+ virtual_call = build_function_call (vfn, parms);
+
+ /* Undo the cast, make `this' a signature pointer again. */
+ TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (TREE_TYPE (pfn)))) = old_this;
+ }
+
+ /* Once the function was found, there should be no reason why we
+ couldn't build the member function pointer call. */
+ if (!direct_call || direct_call == error_mark_node
+ || !virtual_call || virtual_call == error_mark_node
+ || (IS_DEFAULT_IMPLEMENTATION (function)
+ && (!deflt_call || deflt_call == error_mark_node)))
+ {
+ compiler_error ("cannot build call of signature member function `%s'",
+ fndecl_as_string (NULL, function, 1));
+ return error_mark_node;
+ }
+
+ if (IS_DEFAULT_IMPLEMENTATION (function))
+ {
+ tree test = build_binary_op_nodefault (EQ_EXPR, code, integer_one_node,
+ EQ_EXPR);
+ result = build_conditional_expr (code,
+ build_conditional_expr (test,
+ virtual_call,
+ deflt_call),
+ direct_call);
+ }
+ else
+ result = build_conditional_expr (code, virtual_call, direct_call);
+
+ /* If we created a temporary variable for `this', initialize it first. */
+ if (instance != saved_instance)
+ result = build (COMPOUND_EXPR, TREE_TYPE (result),
+ build_modify_expr (saved_instance, NOP_EXPR, instance),
+ result);
+
+ return result;
+}
+
+/* Create a COMPONENT_REF expression for referencing the OPTR field
+ of a signature pointer or reference. */
+
+tree
+build_optr_ref (instance)
+ tree instance;
+{
+ tree field = get_identifier (SIGNATURE_OPTR_NAME);
+
+ return build_component_ref (instance, field, NULL_TREE, 1);
+}
+
+/* Create a COMPONENT_REF expression for referencing the SPTR field
+ of a signature pointer or reference. */
+
+tree
+build_sptr_ref (instance)
+ tree instance;
+{
+ tree field = get_identifier (SIGNATURE_SPTR_NAME);
+
+ return build_component_ref (instance, field, NULL_TREE, 1);
+}
+
+/* Create a COMPONENT_REF expression for referencing the VPTR field
+ of a signature pointer or reference. */
+
+tree
+build_vptr_ref (instance)
+ tree instance;
+{
+ tree field = get_identifier (SIGNATURE_VPTR_NAME);
+
+ return build_component_ref (instance, field, NULL_TREE, 1);
+}
diff --git a/gnu/usr.bin/cc/cc1plus/spew.c b/gnu/usr.bin/cc/cc1plus/spew.c
new file mode 100644
index 0000000..ea00ba2
--- /dev/null
+++ b/gnu/usr.bin/cc/cc1plus/spew.c
@@ -0,0 +1,436 @@
+/* Type Analyzer for GNU C++.
+ Copyright (C) 1987, 1989, 1992, 1993 Free Software Foundation, Inc.
+ Hacked... nay, bludgeoned... by Mark Eichin (eichin@cygnus.com)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+/* This file is the type analyzer for GNU C++. To debug it, define SPEW_DEBUG
+ when compiling parse.c and spew.c. */
+
+#include "config.h"
+#include <stdio.h>
+#include "input.h"
+#include "tree.h"
+#include "lex.h"
+#include "parse.h"
+#include "cp-tree.h"
+#include "flags.h"
+#include "obstack.h"
+
+/* This takes a token stream that hasn't decided much about types and
+ tries to figure out as much as it can, with excessive lookahead and
+ backtracking. */
+
+/* fifo of tokens recognized and available to parser. */
+struct token {
+ /* The values for YYCHAR will fit in a short. */
+ short yychar;
+ short end_of_file;
+ YYSTYPE yylval;
+};
+
+static int do_aggr ();
+
+/* From lex.c: */
+/* the declaration found for the last IDENTIFIER token read in.
+ yylex must look this up to detect typedefs, which get token type TYPENAME,
+ so it is left around in case the identifier is not a typedef but is
+ used in a context which makes it a reference to a variable. */
+extern tree lastiddecl; /* let our brains leak out here too */
+extern int yychar; /* the lookahead symbol */
+extern YYSTYPE yylval; /* the semantic value of the */
+ /* lookahead symbol */
+extern int end_of_file;
+
+struct obstack token_obstack;
+int first_token;
+
+#ifdef SPEW_DEBUG
+int spew_debug = 0;
+static unsigned int yylex_ctr = 0;
+static int debug_yychar ();
+#endif
+
+/* Initialize token_obstack. Called once, from init_lex. */
+void
+init_spew ()
+{
+ gcc_obstack_init(&token_obstack);
+}
+
+#ifdef SPEW_DEBUG
+/* Use functions for debugging... */
+
+/* Return the number of tokens available on the fifo. */
+static int
+num_tokens ()
+{
+ return (obstack_object_size(&token_obstack)/sizeof(struct token))
+ - first_token;
+}
+
+/* Fetch the token N down the line from the head of the fifo. */
+static struct token*
+nth_token (n)
+ int n;
+{
+ /* could just have this do slurp_ implicitly, but this way is easier
+ * to debug... */
+ my_friendly_assert (n < num_tokens(), 298);
+ return ((struct token*)obstack_base(&token_obstack))+n+first_token;
+}
+
+/* Add a token to the token fifo. */
+static void
+add_token (t)
+ struct token* t;
+{
+ obstack_grow(&token_obstack,t,sizeof (struct token));
+}
+
+/* Consume the next token out of the fifo. */
+static void
+consume_token()
+{
+ if (num_tokens() == 1)
+ {
+ obstack_free(&token_obstack, obstack_base (&token_obstack));
+ first_token = 0;
+ }
+ else
+ first_token++;
+}
+
+#else
+/* ...otherwise use macros. */
+
+#define num_tokens() \
+ ((obstack_object_size(&token_obstack)/sizeof(struct token)) - first_token)
+
+#define nth_token(N) \
+ (((struct token*)obstack_base(&token_obstack))+(N)+first_token)
+
+#define add_token(T) obstack_grow(&token_obstack, (T), sizeof (struct token))
+
+#define consume_token() \
+ (num_tokens() == 1 \
+ ? (obstack_free (&token_obstack, obstack_base (&token_obstack)), \
+ (first_token = 0)) \
+ : first_token++)
+#endif
+
+/* Pull in enough tokens from real_yylex that the queue is N long beyond
+ the current token. */
+
+static void
+scan_tokens (n)
+ int n;
+{
+ int i;
+ struct token *tmp;
+
+ /* We cannot read past certain tokens, so make sure we don't. */
+ i = num_tokens ();
+ if (i > n)
+ return;
+ while (i-- > 0)
+ {
+ tmp = nth_token (i);
+ /* Never read past these characters: they might separate
+ the current input stream from one we save away later. */
+ if (tmp->yychar == '{' || tmp->yychar == ':' || tmp->yychar == ';')
+ goto pad_tokens;
+ }
+
+ while (num_tokens() <= n)
+ {
+ obstack_blank(&token_obstack,sizeof (struct token));
+ tmp = ((struct token *)obstack_next_free (&token_obstack))-1;
+ tmp->yychar = real_yylex();
+ tmp->end_of_file = end_of_file;
+ tmp->yylval = yylval;
+ end_of_file = 0;
+ if (tmp->yychar == '{'
+ || tmp->yychar == ':'
+ || tmp->yychar == ';')
+ {
+ pad_tokens:
+ while (num_tokens () <= n)
+ {
+ obstack_blank(&token_obstack,sizeof (struct token));
+ tmp = ((struct token *)obstack_next_free (&token_obstack))-1;
+ tmp->yychar = EMPTY;
+ tmp->end_of_file = 0;
+ }
+ }
+ }
+}
+
+/* Create room for N tokens at the front of the fifo. This is used
+ to insert new tokens into the stream ahead of the current token. */
+
+static void
+shift_tokens (n)
+ int n;
+{
+ if (first_token >= n)
+ first_token -= n;
+ else
+ {
+ int old_token_count = num_tokens ();
+ char *tmp;
+
+ obstack_blank (&token_obstack, (n-first_token) * sizeof (struct token));
+ if (old_token_count)
+ {
+ tmp = (char *)alloca ((num_tokens () + (n-first_token))
+ * sizeof (struct token));
+ /* This move does not rely on the system being able to handle
+ overlapping moves. */
+ bcopy (nth_token (0), tmp, old_token_count * sizeof (struct token));
+ bcopy (tmp, nth_token (n), old_token_count * sizeof (struct token));
+ }
+ first_token = 0;
+ }
+}
+
+static int
+probe_obstack (h, obj, nlevels)
+ struct obstack *h;
+ tree obj;
+ unsigned int nlevels;
+{
+ register struct _obstack_chunk* lp; /* below addr of any objects in this chunk */
+ register struct _obstack_chunk* plp; /* point to previous chunk if any */
+
+ lp = (h)->chunk;
+ /* We use >= rather than > since the object cannot be exactly at
+ the beginning of the chunk but might be an empty object exactly
+ at the end of an adjacent chunk. */
+ for (; nlevels != 0 && lp != 0 && ((tree)lp >= obj || (tree)lp->limit < obj);
+ nlevels -= 1)
+ {
+ plp = lp->prev;
+ lp = plp;
+ }
+ return nlevels != 0 && lp != 0;
+}
+
+/* from lex.c: */
+/* Value is 1 (or 2) if we should try to make the next identifier look like
+ a typename (when it may be a local variable or a class variable).
+ Value is 0 if we treat this name in a default fashion. */
+extern int looking_for_typename;
+int looking_for_template;
+
+extern struct obstack *current_obstack, *saveable_obstack;
+tree got_scope;
+
+int
+yylex()
+{
+ struct token tmp_token;
+ tree trrr;
+
+ retry:
+#ifdef SPEW_DEBUG
+ if (spew_debug)
+ {
+ yylex_ctr ++;
+ fprintf(stderr, "\t\t## %d ##",yylex_ctr);
+ }
+#endif
+
+ /* if we've got tokens, send them */
+ if (num_tokens())
+ {
+ tmp_token= *nth_token(0);
+
+ /* TMP_TOKEN.YYLVAL.TTYPE may have been allocated on the wrong obstack.
+ If we don't find it in CURRENT_OBSTACK's current or immediately
+ previous chunk, assume it was and copy it to the current obstack. */
+ if ((tmp_token.yychar == CONSTANT
+ || tmp_token.yychar == STRING)
+ && ! TREE_PERMANENT (tmp_token.yylval.ttype)
+ && ! probe_obstack (current_obstack, tmp_token.yylval.ttype, 2)
+ && ! probe_obstack (saveable_obstack, tmp_token.yylval.ttype, 2))
+ tmp_token.yylval.ttype = copy_node (tmp_token.yylval.ttype);
+ }
+ else
+ {
+ /* if not, grab the next one and think about it */
+ tmp_token.yychar = real_yylex ();
+ tmp_token.yylval = yylval;
+ tmp_token.end_of_file = end_of_file;
+ add_token(&tmp_token);
+ }
+
+ /* many tokens just need to be returned. At first glance, all we
+ * have to do is send them back up, but some of them are needed to
+ * figure out local context. */
+ switch(tmp_token.yychar)
+ {
+ case EMPTY:
+ /* This is a lexical no-op. */
+ consume_token ();
+#ifdef SPEW_DEBUG
+ if (spew_debug)
+ debug_yychar (tmp_token.yychar);
+#endif
+ goto retry;
+
+ case IDENTIFIER:
+ scan_tokens (1);
+ if (nth_token (1)->yychar == SCOPE)
+ /* Don't interfere with the setting from an 'aggr' prefix. */
+ looking_for_typename++;
+ else if (nth_token (1)->yychar == '<')
+ looking_for_template = 1;
+
+ trrr = lookup_name (tmp_token.yylval.ttype, -2);
+
+ if (trrr)
+ {
+ tmp_token.yychar = identifier_type (trrr);
+ switch (tmp_token.yychar)
+ {
+ case TYPENAME:
+ lastiddecl = identifier_typedecl_value (tmp_token.yylval.ttype);
+ if (lastiddecl != trrr)
+ {
+ lastiddecl = trrr;
+ if (got_scope)
+ tmp_token.yylval.ttype = DECL_NESTED_TYPENAME (trrr);
+ }
+ break;
+ case IDENTIFIER:
+ lastiddecl = trrr;
+ break;
+ case PTYPENAME:
+ lastiddecl = NULL_TREE;
+ break;
+ default:
+ my_friendly_abort (101);
+ }
+ }
+ else
+ lastiddecl = trrr;
+ got_scope = NULL_TREE;
+ /* and fall through to... */
+ case TYPENAME:
+ case PTYPENAME:
+ consume_token ();
+ if (looking_for_typename > 0)
+ looking_for_typename--;
+ looking_for_template = 0;
+ break;
+
+ case SCSPEC:
+ /* do_aggr needs to check if the previous token was RID_FRIEND,
+ so just increment first_token instead of calling consume_token. */
+ first_token++;
+ break;
+ case TYPESPEC:
+ consume_token ();
+ break;
+
+ case AGGR:
+ *nth_token(0) = tmp_token;
+ do_aggr ();
+ /* fall through to output... */
+ case ENUM:
+ /* Set this again, in case we are rescanning. */
+ looking_for_typename = 1;
+ /* fall through... */
+ default:
+ consume_token();
+ }
+
+ yylval = tmp_token.yylval;
+ yychar = tmp_token.yychar;
+ end_of_file = tmp_token.end_of_file;
+#ifdef SPEW_DEBUG
+ if (spew_debug)
+ debug_yychar(yychar);
+#endif
+ return yychar;
+}
+
+/* token[0] == AGGR (struct/union/enum)
+ * Thus, token[1] is either a TYPENAME or a TYPENAME_DEFN.
+ * If token[2] == '{' or ':' then it's TYPENAME_DEFN.
+ * It's also a definition if it's a forward declaration (as in 'struct Foo;')
+ * which we can tell lf token[2] == ';' *and* token[-1] != FRIEND.
+ */
+static int
+do_aggr ()
+{
+ int yc1, yc2;
+
+ scan_tokens (2);
+ yc1 = nth_token (1)->yychar;
+ if (yc1 != TYPENAME && yc1 != IDENTIFIER && yc1 != PTYPENAME)
+ return 0;
+ yc2 = nth_token (2)->yychar;
+ if (yc2 == ';')
+ {
+ /* It's a forward declaration iff we were not preceded by 'friend'. */
+ if (first_token > 0 && nth_token (-1)->yychar == SCSPEC
+ && nth_token (-1)->yylval.ttype == ridpointers[(int) RID_FRIEND])
+ return 0;
+ }
+ else if (yc2 != '{' && yc2 != ':')
+ return 0;
+
+ switch (yc1)
+ {
+ case TYPENAME:
+ nth_token (1)->yychar = TYPENAME_DEFN;
+ break;
+ case PTYPENAME:
+ nth_token (1)->yychar = PTYPENAME_DEFN;
+ break;
+ case IDENTIFIER:
+ nth_token (1)->yychar = IDENTIFIER_DEFN;
+ break;
+ default:
+ my_friendly_abort (102);
+ }
+ return 0;
+}
+
+#ifdef SPEW_DEBUG
+/* debug_yychar takes a yychar (token number) value and prints its name. */
+static int
+debug_yychar (yy)
+ int yy;
+{
+ /* In parse.y: */
+ extern char *debug_yytranslate ();
+
+ int i;
+
+ if(yy<256) {
+ fprintf (stderr, "<%d: %c >\n", yy, yy);
+ return 0;
+ }
+ fprintf (stderr, "<%d:%s>\n", yy, debug_yytranslate (yy));
+ return 1;
+}
+
+#endif
diff --git a/gnu/usr.bin/cc/cc1plus/tree.c b/gnu/usr.bin/cc/cc1plus/tree.c
new file mode 100644
index 0000000..88466b8
--- /dev/null
+++ b/gnu/usr.bin/cc/cc1plus/tree.c
@@ -0,0 +1,1763 @@
+/* Language-dependent node constructors for parse phase of GNU compiler.
+ Copyright (C) 1987, 1988, 1992, 1993 Free Software Foundation, Inc.
+ Hacked by Michael Tiemann (tiemann@cygnus.com)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+#include "config.h"
+#include <stdio.h>
+#include "obstack.h"
+#include "tree.h"
+#include "cp-tree.h"
+#include "flags.h"
+
+#define CEIL(x,y) (((x) + (y) - 1) / (y))
+
+/* Return nonzero if REF is an lvalue valid for this language.
+ Lvalues can be assigned, unless they have TREE_READONLY.
+ Lvalues can have their address taken, unless they have DECL_REGISTER. */
+
+int
+lvalue_p (ref)
+ tree ref;
+{
+ register enum tree_code code = TREE_CODE (ref);
+
+ if (language_lvalue_valid (ref))
+ {
+ if (TREE_CODE (TREE_TYPE (ref)) == REFERENCE_TYPE)
+ return 1;
+
+ switch (code)
+ {
+ /* preincrements and predecrements are valid lvals, provided
+ what they refer to are valid lvals. */
+ case PREINCREMENT_EXPR:
+ case PREDECREMENT_EXPR:
+ case COMPONENT_REF:
+ case SAVE_EXPR:
+ return lvalue_p (TREE_OPERAND (ref, 0));
+
+ case STRING_CST:
+ return 1;
+
+ case VAR_DECL:
+ if (TREE_READONLY (ref) && ! TREE_STATIC (ref)
+ && DECL_LANG_SPECIFIC (ref)
+ && DECL_IN_AGGR_P (ref))
+ return 0;
+ case INDIRECT_REF:
+ case ARRAY_REF:
+ case PARM_DECL:
+ case RESULT_DECL:
+ case ERROR_MARK:
+ if (TREE_CODE (TREE_TYPE (ref)) != FUNCTION_TYPE
+ && TREE_CODE (TREE_TYPE (ref)) != METHOD_TYPE)
+ return 1;
+ break;
+
+ case TARGET_EXPR:
+ case WITH_CLEANUP_EXPR:
+ return 1;
+
+ /* A currently unresolved scope ref. */
+ case SCOPE_REF:
+ my_friendly_abort (103);
+ case OFFSET_REF:
+ if (TREE_CODE (TREE_OPERAND (ref, 1)) == FUNCTION_DECL)
+ return 1;
+ return lvalue_p (TREE_OPERAND (ref, 0))
+ && lvalue_p (TREE_OPERAND (ref, 1));
+ break;
+
+ case COND_EXPR:
+ return (lvalue_p (TREE_OPERAND (ref, 1))
+ && lvalue_p (TREE_OPERAND (ref, 2)));
+
+ case MODIFY_EXPR:
+ return 1;
+
+ case COMPOUND_EXPR:
+ return lvalue_p (TREE_OPERAND (ref, 1));
+ }
+ }
+ return 0;
+}
+
+/* Return nonzero if REF is an lvalue valid for this language;
+ otherwise, print an error message and return zero. */
+
+int
+lvalue_or_else (ref, string)
+ tree ref;
+ char *string;
+{
+ int win = lvalue_p (ref);
+ if (! win)
+ error ("non-lvalue in %s", string);
+ return win;
+}
+
+/* INIT is a CALL_EXPR which needs info about its target.
+ TYPE is the type that this initialization should appear to have.
+
+ Build an encapsulation of the initialization to perform
+ and return it so that it can be processed by language-independent
+ and language-specific expression expanders.
+
+ If WITH_CLEANUP_P is nonzero, we build a cleanup for this expression.
+ Otherwise, cleanups are not built here. For example, when building
+ an initialization for a stack slot, since the called function handles
+ the cleanup, we would not want to do it here. */
+tree
+build_cplus_new (type, init, with_cleanup_p)
+ tree type;
+ tree init;
+ int with_cleanup_p;
+{
+ tree slot = build (VAR_DECL, type);
+ tree rval = build (NEW_EXPR, type,
+ TREE_OPERAND (init, 0), TREE_OPERAND (init, 1), slot);
+ TREE_SIDE_EFFECTS (rval) = 1;
+ TREE_ADDRESSABLE (rval) = 1;
+ rval = build (TARGET_EXPR, type, slot, rval, 0);
+ TREE_SIDE_EFFECTS (rval) = 1;
+ TREE_ADDRESSABLE (rval) = 1;
+
+#if 0
+ if (with_cleanup_p && TYPE_NEEDS_DESTRUCTOR (type))
+ {
+ TREE_OPERAND (rval, 2) = error_mark_node;
+ rval = build (WITH_CLEANUP_EXPR, type, rval, 0,
+ build_delete (TYPE_POINTER_TO (type),
+ build_unary_op (ADDR_EXPR, slot, 0),
+ integer_two_node,
+ LOOKUP_NORMAL|LOOKUP_DESTRUCTOR, 0));
+ TREE_SIDE_EFFECTS (rval) = 1;
+ TREE_ADDRESSABLE (rval) = 1;
+ }
+#endif
+ return rval;
+}
+
+/* Recursively search EXP for CALL_EXPRs that need cleanups and replace
+ these CALL_EXPRs with tree nodes that will perform the cleanups. */
+
+tree
+break_out_cleanups (exp)
+ tree exp;
+{
+ tree tmp = exp;
+
+ if (TREE_CODE (tmp) == CALL_EXPR
+ && TYPE_NEEDS_DESTRUCTOR (TREE_TYPE (tmp)))
+ return build_cplus_new (TREE_TYPE (tmp), tmp, 1);
+
+ while (TREE_CODE (tmp) == NOP_EXPR
+ || TREE_CODE (tmp) == CONVERT_EXPR
+ || TREE_CODE (tmp) == NON_LVALUE_EXPR)
+ {
+ if (TREE_CODE (TREE_OPERAND (tmp, 0)) == CALL_EXPR
+ && TYPE_NEEDS_DESTRUCTOR (TREE_TYPE (TREE_OPERAND (tmp, 0))))
+ {
+ TREE_OPERAND (tmp, 0)
+ = build_cplus_new (TREE_TYPE (TREE_OPERAND (tmp, 0)),
+ TREE_OPERAND (tmp, 0), 1);
+ break;
+ }
+ else
+ tmp = TREE_OPERAND (tmp, 0);
+ }
+ return exp;
+}
+
+/* Recursively perform a preorder search EXP for CALL_EXPRs, making
+ copies where they are found. Returns a deep copy all nodes transitively
+ containing CALL_EXPRs. */
+
+tree
+break_out_calls (exp)
+ tree exp;
+{
+ register tree t1, t2;
+ register enum tree_code code;
+ register int changed = 0;
+ register int i;
+
+ if (exp == NULL_TREE)
+ return exp;
+
+ code = TREE_CODE (exp);
+
+ if (code == CALL_EXPR)
+ return copy_node (exp);
+
+ /* Don't try and defeat a save_expr, as it should only be done once. */
+ if (code == SAVE_EXPR)
+ return exp;
+
+ switch (TREE_CODE_CLASS (code))
+ {
+ default:
+ abort ();
+
+ case 'c': /* a constant */
+ case 't': /* a type node */
+ case 'x': /* something random, like an identifier or an ERROR_MARK. */
+ return exp;
+
+ case 'd': /* A decl node */
+ t1 = break_out_calls (DECL_INITIAL (exp));
+ if (t1 != DECL_INITIAL (exp))
+ {
+ exp = copy_node (exp);
+ DECL_INITIAL (exp) = t1;
+ }
+ return exp;
+
+ case 'b': /* A block node */
+ {
+ /* Don't know how to handle these correctly yet. Must do a
+ break_out_calls on all DECL_INITIAL values for local variables,
+ and also break_out_calls on all sub-blocks and sub-statements. */
+ abort ();
+ }
+ return exp;
+
+ case 'e': /* an expression */
+ case 'r': /* a reference */
+ case 's': /* an expression with side effects */
+ for (i = tree_code_length[(int) code] - 1; i >= 0; i--)
+ {
+ t1 = break_out_calls (TREE_OPERAND (exp, i));
+ if (t1 != TREE_OPERAND (exp, i))
+ {
+ exp = copy_node (exp);
+ TREE_OPERAND (exp, i) = t1;
+ }
+ }
+ return exp;
+
+ case '<': /* a comparison expression */
+ case '2': /* a binary arithmetic expression */
+ t2 = break_out_calls (TREE_OPERAND (exp, 1));
+ if (t2 != TREE_OPERAND (exp, 1))
+ changed = 1;
+ case '1': /* a unary arithmetic expression */
+ t1 = break_out_calls (TREE_OPERAND (exp, 0));
+ if (t1 != TREE_OPERAND (exp, 0))
+ changed = 1;
+ if (changed)
+ {
+ if (tree_code_length[(int) code] == 1)
+ return build1 (code, TREE_TYPE (exp), t1);
+ else
+ return build (code, TREE_TYPE (exp), t1, t2);
+ }
+ return exp;
+ }
+
+}
+
+extern struct obstack *current_obstack;
+extern struct obstack permanent_obstack, class_obstack;
+extern struct obstack *saveable_obstack;
+
+/* Here is how primitive or already-canonicalized types' hash
+ codes are made. MUST BE CONSISTENT WITH tree.c !!! */
+#define TYPE_HASH(TYPE) ((HOST_WIDE_INT) (TYPE) & 0777777)
+
+/* Construct, lay out and return the type of methods belonging to class
+ BASETYPE and whose arguments are described by ARGTYPES and whose values
+ are described by RETTYPE. If each type exists already, reuse it. */
+tree
+build_cplus_method_type (basetype, rettype, argtypes)
+ tree basetype, rettype, argtypes;
+{
+ register tree t;
+ tree ptype;
+ int hashcode;
+
+ /* Make a node of the sort we want. */
+ t = make_node (METHOD_TYPE);
+
+ TYPE_METHOD_BASETYPE (t) = TYPE_MAIN_VARIANT (basetype);
+ TREE_TYPE (t) = rettype;
+ if (IS_SIGNATURE (basetype))
+ ptype = build_signature_pointer_type (TYPE_MAIN_VARIANT (basetype),
+ TYPE_READONLY (basetype),
+ TYPE_VOLATILE (basetype));
+ else
+ {
+ ptype = build_pointer_type (basetype);
+ ptype = build_type_variant (ptype, 1, 0);
+ }
+ /* The actual arglist for this function includes a "hidden" argument
+ which is "this". Put it into the list of argument types. */
+
+ argtypes = tree_cons (NULL_TREE, ptype, argtypes);
+ TYPE_ARG_TYPES (t) = argtypes;
+ TREE_SIDE_EFFECTS (argtypes) = 1; /* Mark first argtype as "artificial". */
+
+ /* If we already have such a type, use the old one and free this one.
+ Note that it also frees up the above cons cell if found. */
+ hashcode = TYPE_HASH (basetype) + TYPE_HASH (rettype) + type_hash_list (argtypes);
+ t = type_hash_canon (hashcode, t);
+
+ if (TYPE_SIZE (t) == 0)
+ layout_type (t);
+
+ return t;
+}
+
+tree
+build_cplus_staticfn_type (basetype, rettype, argtypes)
+ tree basetype, rettype, argtypes;
+{
+ register tree t;
+ int hashcode;
+
+ /* Make a node of the sort we want. */
+ t = make_node (FUNCTION_TYPE);
+
+ TYPE_METHOD_BASETYPE (t) = TYPE_MAIN_VARIANT (basetype);
+ TREE_TYPE (t) = rettype;
+
+ TYPE_ARG_TYPES (t) = argtypes;
+
+ /* If we already have such a type, use the old one and free this one.
+ Note that it also frees up the above cons cell if found. */
+ hashcode = TYPE_HASH (basetype) + TYPE_HASH (rettype) + type_hash_list (argtypes);
+ t = type_hash_canon (hashcode, t);
+
+ if (TYPE_SIZE (t) == 0)
+ layout_type (t);
+
+ return t;
+}
+
+tree
+build_cplus_array_type (elt_type, index_type)
+ tree elt_type;
+ tree index_type;
+{
+ register struct obstack *ambient_obstack = current_obstack;
+ register struct obstack *ambient_saveable_obstack = saveable_obstack;
+ tree t;
+
+ /* We need a new one. If both ELT_TYPE and INDEX_TYPE are permanent,
+ make this permanent too. */
+ if (TREE_PERMANENT (elt_type)
+ && (index_type == 0 || TREE_PERMANENT (index_type)))
+ {
+ current_obstack = &permanent_obstack;
+ saveable_obstack = &permanent_obstack;
+ }
+
+ t = build_array_type (elt_type, index_type);
+
+ /* Push these needs up so that initialization takes place
+ more easily. */
+ TYPE_NEEDS_CONSTRUCTING (t) = TYPE_NEEDS_CONSTRUCTING (TYPE_MAIN_VARIANT (elt_type));
+ TYPE_NEEDS_DESTRUCTOR (t) = TYPE_NEEDS_DESTRUCTOR (TYPE_MAIN_VARIANT (elt_type));
+ current_obstack = ambient_obstack;
+ saveable_obstack = ambient_saveable_obstack;
+ return t;
+}
+
+/* Add OFFSET to all base types of T.
+
+ OFFSET, which is a type offset, is number of bytes.
+
+ Note that we don't have to worry about having two paths to the
+ same base type, since this type owns its association list. */
+void
+propagate_binfo_offsets (binfo, offset)
+ tree binfo;
+ tree offset;
+{
+ tree binfos = BINFO_BASETYPES (binfo);
+ int i, n_baselinks = binfos ? TREE_VEC_LENGTH (binfos) : 0;
+
+ for (i = 0; i < n_baselinks; /* note increment is done in the loop. */)
+ {
+ tree base_binfo = TREE_VEC_ELT (binfos, i);
+
+ if (TREE_VIA_VIRTUAL (base_binfo))
+ i += 1;
+ else
+ {
+ int j;
+ tree base_binfos = BINFO_BASETYPES (base_binfo);
+ tree delta;
+
+ for (j = i+1; j < n_baselinks; j++)
+ if (! TREE_VIA_VIRTUAL (TREE_VEC_ELT (binfos, j)))
+ {
+ /* The next basetype offset must take into account the space
+ between the classes, not just the size of each class. */
+ delta = size_binop (MINUS_EXPR,
+ BINFO_OFFSET (TREE_VEC_ELT (binfos, j)),
+ BINFO_OFFSET (base_binfo));
+ break;
+ }
+
+#if 0
+ if (BINFO_OFFSET_ZEROP (base_binfo))
+ BINFO_OFFSET (base_binfo) = offset;
+ else
+ BINFO_OFFSET (base_binfo)
+ = size_binop (PLUS_EXPR, BINFO_OFFSET (base_binfo), offset);
+#else
+ BINFO_OFFSET (base_binfo) = offset;
+#endif
+ if (base_binfos)
+ {
+ int k;
+ tree chain = NULL_TREE;
+
+ /* Now unshare the structure beneath BASE_BINFO. */
+ for (k = TREE_VEC_LENGTH (base_binfos)-1;
+ k >= 0; k--)
+ {
+ tree base_base_binfo = TREE_VEC_ELT (base_binfos, k);
+ if (! TREE_VIA_VIRTUAL (base_base_binfo))
+ TREE_VEC_ELT (base_binfos, k)
+ = make_binfo (BINFO_OFFSET (base_base_binfo),
+ base_base_binfo,
+ BINFO_VTABLE (base_base_binfo),
+ BINFO_VIRTUALS (base_base_binfo),
+ chain);
+ chain = TREE_VEC_ELT (base_binfos, k);
+ TREE_VIA_PUBLIC (chain) = TREE_VIA_PUBLIC (base_base_binfo);
+ TREE_VIA_PROTECTED (chain) = TREE_VIA_PROTECTED (base_base_binfo);
+ }
+ /* Now propagate the offset to the base types. */
+ propagate_binfo_offsets (base_binfo, offset);
+ }
+
+ /* Go to our next class that counts for offset propagation. */
+ i = j;
+ if (i < n_baselinks)
+ offset = size_binop (PLUS_EXPR, offset, delta);
+ }
+ }
+}
+
+/* Compute the actual offsets that our virtual base classes
+ will have *for this type*. This must be performed after
+ the fields are laid out, since virtual baseclasses must
+ lay down at the end of the record.
+
+ Returns the maximum number of virtual functions any of the virtual
+ baseclasses provide. */
+int
+layout_vbasetypes (rec, max)
+ tree rec;
+ int max;
+{
+ /* Get all the virtual base types that this type uses.
+ The TREE_VALUE slot holds the virtual baseclass type. */
+ tree vbase_types = get_vbase_types (rec);
+
+#ifdef STRUCTURE_SIZE_BOUNDARY
+ unsigned record_align = MAX (STRUCTURE_SIZE_BOUNDARY, TYPE_ALIGN (rec));
+#else
+ unsigned record_align = MAX (BITS_PER_UNIT, TYPE_ALIGN (rec));
+#endif
+ int desired_align;
+
+ /* Record size so far is CONST_SIZE + VAR_SIZE bits,
+ where CONST_SIZE is an integer
+ and VAR_SIZE is a tree expression.
+ If VAR_SIZE is null, the size is just CONST_SIZE.
+ Naturally we try to avoid using VAR_SIZE. */
+ register unsigned const_size = 0;
+ register tree var_size = 0;
+ int nonvirtual_const_size;
+ tree nonvirtual_var_size;
+
+ CLASSTYPE_VBASECLASSES (rec) = vbase_types;
+
+ if (TREE_CODE (TYPE_SIZE (rec)) == INTEGER_CST)
+ const_size = TREE_INT_CST_LOW (TYPE_SIZE (rec));
+ else
+ var_size = TYPE_SIZE (rec);
+
+ nonvirtual_const_size = const_size;
+ nonvirtual_var_size = var_size;
+
+ while (vbase_types)
+ {
+ tree basetype = BINFO_TYPE (vbase_types);
+ tree offset;
+
+ desired_align = TYPE_ALIGN (basetype);
+ record_align = MAX (record_align, desired_align);
+
+ if (const_size == 0)
+ offset = integer_zero_node;
+ else
+ {
+ /* Give each virtual base type the alignment it wants. */
+ const_size = CEIL (const_size, TYPE_ALIGN (basetype))
+ * TYPE_ALIGN (basetype);
+ offset = size_int (CEIL (const_size, BITS_PER_UNIT));
+ }
+
+ if (CLASSTYPE_VSIZE (basetype) > max)
+ max = CLASSTYPE_VSIZE (basetype);
+ BINFO_OFFSET (vbase_types) = offset;
+
+ if (TREE_CODE (TYPE_SIZE (basetype)) == INTEGER_CST)
+ const_size += MAX (BITS_PER_UNIT,
+ TREE_INT_CST_LOW (TYPE_SIZE (basetype))
+ - TREE_INT_CST_LOW (CLASSTYPE_VBASE_SIZE (basetype)));
+ else if (var_size == 0)
+ var_size = TYPE_SIZE (basetype);
+ else
+ var_size = size_binop (PLUS_EXPR, var_size, TYPE_SIZE (basetype));
+
+ vbase_types = TREE_CHAIN (vbase_types);
+ }
+
+ /* Set the alignment in the complete type. We don't set CLASSTYPE_ALIGN
+ here, as that is for this class, without any virtual base classes. */
+ TYPE_ALIGN (rec) = record_align;
+ if (const_size != nonvirtual_const_size)
+ {
+ CLASSTYPE_VBASE_SIZE (rec)
+ = size_int (const_size - nonvirtual_const_size);
+ TYPE_SIZE (rec) = size_int (const_size);
+ }
+
+ /* Now propagate offset information throughout the lattice
+ under the vbase type. */
+ for (vbase_types = CLASSTYPE_VBASECLASSES (rec); vbase_types;
+ vbase_types = TREE_CHAIN (vbase_types))
+ {
+ tree base_binfos = BINFO_BASETYPES (vbase_types);
+
+ if (base_binfos)
+ {
+ tree chain = NULL_TREE;
+ int j;
+ /* Now unshare the structure beneath BASE_BINFO. */
+
+ for (j = TREE_VEC_LENGTH (base_binfos)-1;
+ j >= 0; j--)
+ {
+ tree base_base_binfo = TREE_VEC_ELT (base_binfos, j);
+ if (! TREE_VIA_VIRTUAL (base_base_binfo))
+ TREE_VEC_ELT (base_binfos, j)
+ = make_binfo (BINFO_OFFSET (base_base_binfo),
+ base_base_binfo,
+ BINFO_VTABLE (base_base_binfo),
+ BINFO_VIRTUALS (base_base_binfo),
+ chain);
+ chain = TREE_VEC_ELT (base_binfos, j);
+ TREE_VIA_PUBLIC (chain) = TREE_VIA_PUBLIC (base_base_binfo);
+ TREE_VIA_PROTECTED (chain) = TREE_VIA_PROTECTED (base_base_binfo);
+ }
+
+ propagate_binfo_offsets (vbase_types, BINFO_OFFSET (vbase_types));
+ }
+ }
+
+ return max;
+}
+
+/* Lay out the base types of a record type, REC.
+ Tentatively set the size and alignment of REC
+ according to the base types alone.
+
+ Offsets for immediate nonvirtual baseclasses are also computed here.
+
+ TYPE_BINFO (REC) should be NULL_TREE on entry, and this routine
+ creates a list of base_binfos in TYPE_BINFO (REC) from BINFOS.
+
+ Returns list of virtual base classes in a FIELD_DECL chain. */
+tree
+layout_basetypes (rec, binfos)
+ tree rec, binfos;
+{
+ /* Chain to hold all the new FIELD_DECLs which point at virtual
+ base classes. */
+ tree vbase_decls = NULL_TREE;
+
+#ifdef STRUCTURE_SIZE_BOUNDARY
+ unsigned record_align = MAX (STRUCTURE_SIZE_BOUNDARY, TYPE_ALIGN (rec));
+#else
+ unsigned record_align = MAX (BITS_PER_UNIT, TYPE_ALIGN (rec));
+#endif
+
+ /* Record size so far is CONST_SIZE + VAR_SIZE bits, where CONST_SIZE is
+ an integer and VAR_SIZE is a tree expression. If VAR_SIZE is null,
+ the size is just CONST_SIZE. Naturally we try to avoid using
+ VAR_SIZE. And so far, we've been sucessful. */
+#if 0
+ register tree var_size = 0;
+#endif
+
+ register unsigned const_size = 0;
+ int i, n_baseclasses = binfos ? TREE_VEC_LENGTH (binfos) : 0;
+
+ /* Handle basetypes almost like fields, but record their
+ offsets differently. */
+
+ for (i = 0; i < n_baseclasses; i++)
+ {
+ int inc, desired_align, int_vbase_size;
+ register tree base_binfo = TREE_VEC_ELT (binfos, i);
+ register tree basetype = BINFO_TYPE (base_binfo);
+ tree decl, offset;
+
+ if (TYPE_SIZE (basetype) == 0)
+ {
+#if 0
+ /* This error is now reported in xref_tag, thus giving better
+ location information. */
+ error_with_aggr_type (base_binfo,
+ "base class `%s' has incomplete type");
+
+ TREE_VIA_PUBLIC (base_binfo) = 1;
+ TREE_VIA_PROTECTED (base_binfo) = 0;
+ TREE_VIA_VIRTUAL (base_binfo) = 0;
+
+ /* Should handle this better so that
+
+ class A;
+ class B: private A { virtual void F(); };
+
+ does not dump core when compiled. */
+ my_friendly_abort (121);
+#endif
+ continue;
+ }
+
+ /* All basetypes are recorded in the association list of the
+ derived type. */
+
+ if (TREE_VIA_VIRTUAL (base_binfo))
+ {
+ int j;
+ char *name = (char *)alloca (TYPE_NAME_LENGTH (basetype)
+ + sizeof (VBASE_NAME) + 1);
+
+ /* The offset for a virtual base class is only used in computing
+ virtual function tables and for initializing virtual base
+ pointers. It is built once `get_vbase_types' is called. */
+
+ /* If this basetype can come from another vbase pointer
+ without an additional indirection, we will share
+ that pointer. If an indirection is involved, we
+ make our own pointer. */
+ for (j = 0; j < n_baseclasses; j++)
+ {
+ tree other_base_binfo = TREE_VEC_ELT (binfos, j);
+ if (! TREE_VIA_VIRTUAL (other_base_binfo)
+ && binfo_member (basetype,
+ CLASSTYPE_VBASECLASSES (BINFO_TYPE (other_base_binfo))))
+ goto got_it;
+ }
+ sprintf (name, VBASE_NAME_FORMAT, TYPE_NAME_STRING (basetype));
+ decl = build_lang_decl (FIELD_DECL, get_identifier (name),
+ build_pointer_type (basetype));
+ /* If you change any of the below, take a look at all the
+ other VFIELD_BASEs and VTABLE_BASEs in the code, and change
+ them too. */
+ DECL_ASSEMBLER_NAME (decl) = get_identifier (VTABLE_BASE);
+ DECL_VIRTUAL_P (decl) = 1;
+ DECL_FIELD_CONTEXT (decl) = rec;
+ DECL_CLASS_CONTEXT (decl) = rec;
+ DECL_FCONTEXT (decl) = basetype;
+ DECL_FIELD_SIZE (decl) = 0;
+ DECL_ALIGN (decl) = TYPE_ALIGN (ptr_type_node);
+ TREE_CHAIN (decl) = vbase_decls;
+ BINFO_VPTR_FIELD (base_binfo) = decl;
+ vbase_decls = decl;
+
+ if (warn_nonvdtor && TYPE_HAS_DESTRUCTOR (basetype)
+ && DECL_VINDEX (TREE_VEC_ELT (CLASSTYPE_METHOD_VEC (basetype), 0)) == NULL_TREE)
+ {
+ warning_with_decl (TREE_VEC_ELT (CLASSTYPE_METHOD_VEC (basetype), 0),
+ "destructor `%s' non-virtual");
+ warning ("in inheritance relationship `%s: virtual %s'",
+ TYPE_NAME_STRING (rec),
+ TYPE_NAME_STRING (basetype));
+ }
+ got_it:
+ /* The space this decl occupies has already been accounted for. */
+ continue;
+ }
+
+ if (const_size == 0)
+ offset = integer_zero_node;
+ else
+ {
+ /* Give each base type the alignment it wants. */
+ const_size = CEIL (const_size, TYPE_ALIGN (basetype))
+ * TYPE_ALIGN (basetype);
+ offset = size_int ((const_size + BITS_PER_UNIT - 1) / BITS_PER_UNIT);
+
+#if 0
+ /* bpk: Disabled this check until someone is willing to
+ claim it as theirs and explain exactly what circumstances
+ warrant the warning. */
+ if (warn_nonvdtor && TYPE_HAS_DESTRUCTOR (basetype)
+ && DECL_VINDEX (TREE_VEC_ELT (CLASSTYPE_METHOD_VEC (basetype), 0)) == NULL_TREE)
+ {
+ warning_with_decl (TREE_VEC_ELT (CLASSTYPE_METHOD_VEC (basetype), 0),
+ "destructor `%s' non-virtual");
+ warning ("in inheritance relationship `%s:%s %s'",
+ TYPE_NAME_STRING (rec),
+ TREE_VIA_VIRTUAL (base_binfo) ? " virtual" : "",
+ TYPE_NAME_STRING (basetype));
+ }
+#endif
+ }
+ BINFO_OFFSET (base_binfo) = offset;
+ if (CLASSTYPE_VSIZE (basetype))
+ {
+ BINFO_VTABLE (base_binfo) = TYPE_BINFO_VTABLE (basetype);
+ BINFO_VIRTUALS (base_binfo) = TYPE_BINFO_VIRTUALS (basetype);
+ }
+ TREE_CHAIN (base_binfo) = TYPE_BINFO (rec);
+ TYPE_BINFO (rec) = base_binfo;
+
+ /* Add only the amount of storage not present in
+ the virtual baseclasses. */
+
+ int_vbase_size = TREE_INT_CST_LOW (CLASSTYPE_VBASE_SIZE (basetype));
+ if (TREE_INT_CST_LOW (TYPE_SIZE (basetype)) > int_vbase_size)
+ {
+ inc = MAX (record_align,
+ (TREE_INT_CST_LOW (TYPE_SIZE (basetype))
+ - int_vbase_size));
+
+ /* Record must have at least as much alignment as any field. */
+ desired_align = TYPE_ALIGN (basetype);
+ record_align = MAX (record_align, desired_align);
+
+ const_size += inc;
+ }
+ }
+
+ if (const_size)
+ CLASSTYPE_SIZE (rec) = size_int (const_size);
+ else
+ CLASSTYPE_SIZE (rec) = integer_zero_node;
+ CLASSTYPE_ALIGN (rec) = record_align;
+
+ return vbase_decls;
+}
+
+/* Hashing of lists so that we don't make duplicates.
+ The entry point is `list_hash_canon'. */
+
+/* Each hash table slot is a bucket containing a chain
+ of these structures. */
+
+struct list_hash
+{
+ struct list_hash *next; /* Next structure in the bucket. */
+ int hashcode; /* Hash code of this list. */
+ tree list; /* The list recorded here. */
+};
+
+/* Now here is the hash table. When recording a list, it is added
+ to the slot whose index is the hash code mod the table size.
+ Note that the hash table is used for several kinds of lists.
+ While all these live in the same table, they are completely independent,
+ and the hash code is computed differently for each of these. */
+
+#define TYPE_HASH_SIZE 59
+struct list_hash *list_hash_table[TYPE_HASH_SIZE];
+
+/* Compute a hash code for a list (chain of TREE_LIST nodes
+ with goodies in the TREE_PURPOSE, TREE_VALUE, and bits of the
+ TREE_COMMON slots), by adding the hash codes of the individual entries. */
+
+int
+list_hash (list)
+ tree list;
+{
+ register int hashcode = 0;
+
+ if (TREE_CHAIN (list))
+ hashcode += TYPE_HASH (TREE_CHAIN (list));
+
+ if (TREE_VALUE (list))
+ hashcode += TYPE_HASH (TREE_VALUE (list));
+ else
+ hashcode += 1007;
+ if (TREE_PURPOSE (list))
+ hashcode += TYPE_HASH (TREE_PURPOSE (list));
+ else
+ hashcode += 1009;
+ return hashcode;
+}
+
+/* Look in the type hash table for a type isomorphic to TYPE.
+ If one is found, return it. Otherwise return 0. */
+
+tree
+list_hash_lookup (hashcode, list)
+ int hashcode;
+ tree list;
+{
+ register struct list_hash *h;
+ for (h = list_hash_table[hashcode % TYPE_HASH_SIZE]; h; h = h->next)
+ if (h->hashcode == hashcode
+ && TREE_VIA_VIRTUAL (h->list) == TREE_VIA_VIRTUAL (list)
+ && TREE_VIA_PUBLIC (h->list) == TREE_VIA_PUBLIC (list)
+ && TREE_VIA_PROTECTED (h->list) == TREE_VIA_PROTECTED (list)
+ && TREE_PURPOSE (h->list) == TREE_PURPOSE (list)
+ && TREE_VALUE (h->list) == TREE_VALUE (list)
+ && TREE_CHAIN (h->list) == TREE_CHAIN (list))
+ {
+ my_friendly_assert (TREE_TYPE (h->list) == TREE_TYPE (list), 299);
+ return h->list;
+ }
+ return 0;
+}
+
+/* Add an entry to the list-hash-table
+ for a list TYPE whose hash code is HASHCODE. */
+
+void
+list_hash_add (hashcode, list)
+ int hashcode;
+ tree list;
+{
+ register struct list_hash *h;
+
+ h = (struct list_hash *) obstack_alloc (&class_obstack, sizeof (struct list_hash));
+ h->hashcode = hashcode;
+ h->list = list;
+ h->next = list_hash_table[hashcode % TYPE_HASH_SIZE];
+ list_hash_table[hashcode % TYPE_HASH_SIZE] = h;
+}
+
+/* Given TYPE, and HASHCODE its hash code, return the canonical
+ object for an identical list if one already exists.
+ Otherwise, return TYPE, and record it as the canonical object
+ if it is a permanent object.
+
+ To use this function, first create a list of the sort you want.
+ Then compute its hash code from the fields of the list that
+ make it different from other similar lists.
+ Then call this function and use the value.
+ This function frees the list you pass in if it is a duplicate. */
+
+/* Set to 1 to debug without canonicalization. Never set by program. */
+static int debug_no_list_hash = 0;
+
+tree
+list_hash_canon (hashcode, list)
+ int hashcode;
+ tree list;
+{
+ tree t1;
+
+ if (debug_no_list_hash)
+ return list;
+
+ t1 = list_hash_lookup (hashcode, list);
+ if (t1 != 0)
+ {
+ obstack_free (&class_obstack, list);
+ return t1;
+ }
+
+ /* If this is a new list, record it for later reuse. */
+ list_hash_add (hashcode, list);
+
+ return list;
+}
+
+tree
+hash_tree_cons (via_public, via_virtual, via_protected, purpose, value, chain)
+ int via_public, via_virtual, via_protected;
+ tree purpose, value, chain;
+{
+ struct obstack *ambient_obstack = current_obstack;
+ tree t;
+ int hashcode;
+
+ current_obstack = &class_obstack;
+ t = tree_cons (purpose, value, chain);
+ TREE_VIA_PUBLIC (t) = via_public;
+ TREE_VIA_PROTECTED (t) = via_protected;
+ TREE_VIA_VIRTUAL (t) = via_virtual;
+ hashcode = list_hash (t);
+ t = list_hash_canon (hashcode, t);
+ current_obstack = ambient_obstack;
+ return t;
+}
+
+/* Constructor for hashed lists. */
+tree
+hash_tree_chain (value, chain)
+ tree value, chain;
+{
+ struct obstack *ambient_obstack = current_obstack;
+ tree t;
+ int hashcode;
+
+ current_obstack = &class_obstack;
+ t = tree_cons (NULL_TREE, value, chain);
+ hashcode = list_hash (t);
+ t = list_hash_canon (hashcode, t);
+ current_obstack = ambient_obstack;
+ return t;
+}
+
+/* Similar, but used for concatenating two lists. */
+tree
+hash_chainon (list1, list2)
+ tree list1, list2;
+{
+ if (list2 == 0)
+ return list1;
+ if (list1 == 0)
+ return list2;
+ if (TREE_CHAIN (list1) == NULL_TREE)
+ return hash_tree_chain (TREE_VALUE (list1), list2);
+ return hash_tree_chain (TREE_VALUE (list1),
+ hash_chainon (TREE_CHAIN (list1), list2));
+}
+
+static tree
+get_identifier_list (value)
+ tree value;
+{
+ tree list = IDENTIFIER_AS_LIST (value);
+ if (list != NULL_TREE
+ && (TREE_CODE (list) != TREE_LIST
+ || TREE_VALUE (list) != value))
+ list = NULL_TREE;
+ else if (IDENTIFIER_HAS_TYPE_VALUE (value)
+ && TREE_CODE (IDENTIFIER_TYPE_VALUE (value)) == RECORD_TYPE
+ && IDENTIFIER_TYPE_VALUE (value)
+ == TYPE_MAIN_VARIANT (IDENTIFIER_TYPE_VALUE (value)))
+ {
+ tree type = IDENTIFIER_TYPE_VALUE (value);
+
+ if (TYPE_PTRMEMFUNC_P (type))
+ list = NULL_TREE;
+ else if (type == current_class_type)
+ /* Don't mess up the constructor name. */
+ list = tree_cons (NULL_TREE, value, NULL_TREE);
+ else
+ {
+ register tree id;
+ /* This will return the correct thing for regular types,
+ nested types, and templates. Yay! */
+ if (TYPE_NESTED_NAME (type))
+ id = TYPE_NESTED_NAME (type);
+ else
+ id = TYPE_IDENTIFIER (type);
+
+ if (CLASSTYPE_ID_AS_LIST (type) == NULL_TREE)
+ CLASSTYPE_ID_AS_LIST (type)
+ = perm_tree_cons (NULL_TREE, id, NULL_TREE);
+ list = CLASSTYPE_ID_AS_LIST (type);
+ }
+ }
+ return list;
+}
+
+tree
+get_decl_list (value)
+ tree value;
+{
+ tree list = NULL_TREE;
+
+ if (TREE_CODE (value) == IDENTIFIER_NODE)
+ list = get_identifier_list (value);
+ else if (TREE_CODE (value) == RECORD_TYPE
+ && TYPE_LANG_SPECIFIC (value))
+ list = CLASSTYPE_AS_LIST (value);
+
+ if (list != NULL_TREE)
+ {
+ my_friendly_assert (TREE_CHAIN (list) == NULL_TREE, 301);
+ return list;
+ }
+
+ return build_decl_list (NULL_TREE, value);
+}
+
+/* Look in the type hash table for a type isomorphic to
+ `build_tree_list (NULL_TREE, VALUE)'.
+ If one is found, return it. Otherwise return 0. */
+
+tree
+list_hash_lookup_or_cons (value)
+ tree value;
+{
+ register int hashcode = TYPE_HASH (value);
+ register struct list_hash *h;
+ struct obstack *ambient_obstack;
+ tree list = NULL_TREE;
+
+ if (TREE_CODE (value) == IDENTIFIER_NODE)
+ list = get_identifier_list (value);
+ else if (TREE_CODE (value) == TYPE_DECL
+ && TREE_CODE (TREE_TYPE (value)) == RECORD_TYPE
+ && TYPE_LANG_SPECIFIC (TREE_TYPE (value)))
+ list = CLASSTYPE_ID_AS_LIST (TREE_TYPE (value));
+ else if (TREE_CODE (value) == RECORD_TYPE
+ && TYPE_LANG_SPECIFIC (value))
+ list = CLASSTYPE_AS_LIST (value);
+
+ if (list != NULL_TREE)
+ {
+ my_friendly_assert (TREE_CHAIN (list) == NULL_TREE, 302);
+ return list;
+ }
+
+ if (debug_no_list_hash)
+ return hash_tree_chain (value, NULL_TREE);
+
+ for (h = list_hash_table[hashcode % TYPE_HASH_SIZE]; h; h = h->next)
+ if (h->hashcode == hashcode
+ && TREE_VIA_VIRTUAL (h->list) == 0
+ && TREE_VIA_PUBLIC (h->list) == 0
+ && TREE_VIA_PROTECTED (h->list) == 0
+ && TREE_PURPOSE (h->list) == 0
+ && TREE_VALUE (h->list) == value)
+ {
+ my_friendly_assert (TREE_TYPE (h->list) == 0, 303);
+ my_friendly_assert (TREE_CHAIN (h->list) == 0, 304);
+ return h->list;
+ }
+
+ ambient_obstack = current_obstack;
+ current_obstack = &class_obstack;
+ list = build_tree_list (NULL_TREE, value);
+ list_hash_add (hashcode, list);
+ current_obstack = ambient_obstack;
+ return list;
+}
+
+/* Build an association between TYPE and some parameters:
+
+ OFFSET is the offset added to `this' to convert it to a pointer
+ of type `TYPE *'
+
+ BINFO is the base binfo to use, if we are deriving from one. This
+ is necessary, as we want specialized parent binfos from base
+ classes, so that the VTABLE_NAMEs of bases are for the most derived
+ type, instead of of the simple type.
+
+ VTABLE is the virtual function table with which to initialize
+ sub-objects of type TYPE.
+
+ VIRTUALS are the virtual functions sitting in VTABLE.
+
+ CHAIN are more associations we must retain. */
+
+tree
+make_binfo (offset, binfo, vtable, virtuals, chain)
+ tree offset, binfo;
+ tree vtable, virtuals;
+ tree chain;
+{
+ tree new_binfo = make_tree_vec (6);
+ tree type;
+
+ if (TREE_CODE (binfo) == TREE_VEC)
+ type = BINFO_TYPE (binfo);
+ else
+ {
+ type = binfo;
+ binfo = TYPE_BINFO (binfo);
+ }
+
+ TREE_CHAIN (new_binfo) = chain;
+ if (chain)
+ TREE_USED (new_binfo) = TREE_USED (chain);
+
+ TREE_TYPE (new_binfo) = TYPE_MAIN_VARIANT (type);
+ BINFO_OFFSET (new_binfo) = offset;
+ BINFO_VTABLE (new_binfo) = vtable;
+ BINFO_VIRTUALS (new_binfo) = virtuals;
+ BINFO_VPTR_FIELD (new_binfo) = NULL_TREE;
+
+ if (binfo && BINFO_BASETYPES (binfo) != NULL_TREE)
+ BINFO_BASETYPES (new_binfo) = copy_node (BINFO_BASETYPES (binfo));
+ return new_binfo;
+}
+
+tree
+copy_binfo (list)
+ tree list;
+{
+ tree binfo = copy_list (list);
+ tree rval = binfo;
+ while (binfo)
+ {
+ TREE_USED (binfo) = 0;
+ if (BINFO_BASETYPES (binfo))
+ BINFO_BASETYPES (binfo) = copy_node (BINFO_BASETYPES (binfo));
+ binfo = TREE_CHAIN (binfo);
+ }
+ return rval;
+}
+
+/* Return the binfo value for ELEM in TYPE. */
+
+tree
+binfo_value (elem, type)
+ tree elem;
+ tree type;
+{
+ if (get_base_distance (elem, type, 0, (tree *)0) == -2)
+ compiler_error ("base class `%s' ambiguous in binfo_value",
+ TYPE_NAME_STRING (elem));
+ if (elem == type)
+ return TYPE_BINFO (type);
+ if (TREE_CODE (elem) == RECORD_TYPE && TYPE_BINFO (elem) == type)
+ return type;
+ return get_binfo (elem, type, 0);
+}
+
+tree
+reverse_path (path)
+ tree path;
+{
+ register tree prev = 0, tmp, next;
+ for (tmp = path; tmp; tmp = next)
+ {
+ next = BINFO_INHERITANCE_CHAIN (tmp);
+ BINFO_INHERITANCE_CHAIN (tmp) = prev;
+ prev = tmp;
+ }
+ return prev;
+}
+
+tree
+virtual_member (elem, list)
+ tree elem;
+ tree list;
+{
+ tree t;
+ tree rval, nval;
+
+ for (t = list; t; t = TREE_CHAIN (t))
+ if (elem == BINFO_TYPE (t))
+ return t;
+ rval = 0;
+ for (t = list; t; t = TREE_CHAIN (t))
+ {
+ tree binfos = BINFO_BASETYPES (t);
+ int i;
+
+ if (binfos != NULL_TREE)
+ for (i = TREE_VEC_LENGTH (binfos)-1; i >= 0; i--)
+ {
+ nval = binfo_value (elem, BINFO_TYPE (TREE_VEC_ELT (binfos, i)));
+ if (nval)
+ {
+ if (rval && BINFO_OFFSET (nval) != BINFO_OFFSET (rval))
+ my_friendly_abort (104);
+ rval = nval;
+ }
+ }
+ }
+ return rval;
+}
+
+/* Return the offset (as an INTEGER_CST) for ELEM in LIST.
+ INITIAL_OFFSET is the value to add to the offset that ELEM's
+ binfo entry in LIST provides.
+
+ Returns NULL if ELEM does not have an binfo value in LIST. */
+
+tree
+virtual_offset (elem, list, initial_offset)
+ tree elem;
+ tree list;
+ tree initial_offset;
+{
+ tree vb, offset;
+ tree rval, nval;
+
+ for (vb = list; vb; vb = TREE_CHAIN (vb))
+ if (elem == BINFO_TYPE (vb))
+ return size_binop (PLUS_EXPR, initial_offset, BINFO_OFFSET (vb));
+ rval = 0;
+ for (vb = list; vb; vb = TREE_CHAIN (vb))
+ {
+ tree binfos = BINFO_BASETYPES (vb);
+ int i;
+
+ if (binfos == NULL_TREE)
+ continue;
+
+ for (i = TREE_VEC_LENGTH (binfos)-1; i >= 0; i--)
+ {
+ nval = binfo_value (elem, BINFO_TYPE (TREE_VEC_ELT (binfos, i)));
+ if (nval)
+ {
+ if (rval && BINFO_OFFSET (nval) != BINFO_OFFSET (rval))
+ my_friendly_abort (105);
+ offset = BINFO_OFFSET (vb);
+ rval = nval;
+ }
+ }
+ }
+ if (rval == NULL_TREE)
+ return rval;
+ return size_binop (PLUS_EXPR, offset, BINFO_OFFSET (rval));
+}
+
+void
+debug_binfo (elem)
+ tree elem;
+{
+ int i;
+ tree virtuals;
+
+ fprintf (stderr, "type \"%s\"; offset = %d\n",
+ TYPE_NAME_STRING (BINFO_TYPE (elem)),
+ TREE_INT_CST_LOW (BINFO_OFFSET (elem)));
+ fprintf (stderr, "vtable type:\n");
+ debug_tree (BINFO_TYPE (elem));
+ if (BINFO_VTABLE (elem))
+ fprintf (stderr, "vtable decl \"%s\"\n", IDENTIFIER_POINTER (DECL_NAME (BINFO_VTABLE (elem))));
+ else
+ fprintf (stderr, "no vtable decl yet\n");
+ fprintf (stderr, "virtuals:\n");
+ virtuals = BINFO_VIRTUALS (elem);
+ if (virtuals != 0)
+ {
+ virtuals = TREE_CHAIN (virtuals);
+ if (flag_dossier)
+ virtuals = TREE_CHAIN (virtuals);
+ }
+ i = 1;
+ while (virtuals)
+ {
+ tree fndecl = TREE_OPERAND (FNADDR_FROM_VTABLE_ENTRY (TREE_VALUE (virtuals)), 0);
+ fprintf (stderr, "%s [%d =? %d]\n",
+ IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (fndecl)),
+ i, TREE_INT_CST_LOW (DECL_VINDEX (fndecl)));
+ virtuals = TREE_CHAIN (virtuals);
+ i += 1;
+ }
+}
+
+/* Return the length of a chain of nodes chained through DECL_CHAIN.
+ We expect a null pointer to mark the end of the chain.
+ This is the Lisp primitive `length'. */
+
+int
+decl_list_length (t)
+ tree t;
+{
+ register tree tail;
+ register int len = 0;
+
+ my_friendly_assert (TREE_CODE (t) == FUNCTION_DECL
+ || TREE_CODE (t) == TEMPLATE_DECL, 300);
+ for (tail = t; tail; tail = DECL_CHAIN (tail))
+ len++;
+
+ return len;
+}
+
+int
+count_functions (t)
+ tree t;
+{
+ if (TREE_CODE (t) == FUNCTION_DECL)
+ return 1;
+ else if (TREE_CODE (t) == TREE_LIST)
+ return decl_list_length (TREE_VALUE (t));
+
+ my_friendly_abort (359);
+ return 0;
+}
+
+/* Like value_member, but for DECL_CHAINs. */
+tree
+decl_value_member (elem, list)
+ tree elem, list;
+{
+ while (list)
+ {
+ if (elem == list)
+ return list;
+ list = DECL_CHAIN (list);
+ }
+ return NULL_TREE;
+}
+
+int
+is_overloaded_fn (x)
+ tree x;
+{
+ if (TREE_CODE (x) == FUNCTION_DECL)
+ return 1;
+
+ if (TREE_CODE (x) == TREE_LIST
+ && (TREE_CODE (TREE_VALUE (x)) == FUNCTION_DECL
+ || TREE_CODE (TREE_VALUE (x)) == TEMPLATE_DECL))
+ return 1;
+
+ return 0;
+}
+
+int
+really_overloaded_fn (x)
+ tree x;
+{
+ if (TREE_CODE (x) == TREE_LIST
+ && (TREE_CODE (TREE_VALUE (x)) == FUNCTION_DECL
+ || TREE_CODE (TREE_VALUE (x)) == TEMPLATE_DECL))
+ return 1;
+
+ return 0;
+}
+
+tree
+get_first_fn (from)
+ tree from;
+{
+ if (TREE_CODE (from) == FUNCTION_DECL)
+ return from;
+
+ my_friendly_assert (TREE_CODE (from) == TREE_LIST, 9);
+
+ return TREE_VALUE (from);
+}
+
+tree
+fnaddr_from_vtable_entry (entry)
+ tree entry;
+{
+ if (flag_vtable_thunks)
+ {
+ tree func = entry;
+ if (TREE_CODE (func) == ADDR_EXPR)
+ func = TREE_OPERAND (func, 0);
+ if (TREE_CODE (func) == THUNK_DECL)
+ return DECL_INITIAL (func);
+ else
+ return entry;
+ }
+ else
+ return TREE_VALUE (TREE_CHAIN (TREE_CHAIN (CONSTRUCTOR_ELTS (entry))));
+}
+
+void
+set_fnaddr_from_vtable_entry (entry, value)
+ tree entry, value;
+{
+ if (flag_vtable_thunks)
+ abort ();
+ else
+ TREE_VALUE (TREE_CHAIN (TREE_CHAIN (CONSTRUCTOR_ELTS (entry)))) = value;
+}
+
+tree
+function_arg_chain (t)
+ tree t;
+{
+ return TREE_CHAIN (TYPE_ARG_TYPES (TREE_TYPE (t)));
+}
+
+int
+promotes_to_aggr_type (t, code)
+ tree t;
+ enum tree_code code;
+{
+ if (TREE_CODE (t) == code)
+ t = TREE_TYPE (t);
+ return IS_AGGR_TYPE (t);
+}
+
+int
+is_aggr_type_2 (t1, t2)
+ tree t1, t2;
+{
+ if (TREE_CODE (t1) != TREE_CODE (t2))
+ return 0;
+ return IS_AGGR_TYPE (t1) && IS_AGGR_TYPE (t2);
+}
+
+/* Give message using types TYPE1 and TYPE2 as arguments.
+ PFN is the function which will print the message;
+ S is the format string for PFN to use. */
+void
+message_2_types (pfn, s, type1, type2)
+ void (*pfn) ();
+ char *s;
+ tree type1, type2;
+{
+ tree name1 = TYPE_NAME (type1);
+ tree name2 = TYPE_NAME (type2);
+ if (TREE_CODE (name1) == TYPE_DECL)
+ name1 = DECL_NAME (name1);
+ if (TREE_CODE (name2) == TYPE_DECL)
+ name2 = DECL_NAME (name2);
+ (*pfn) (s, IDENTIFIER_POINTER (name1), IDENTIFIER_POINTER (name2));
+}
+
+#define PRINT_RING_SIZE 4
+
+char *
+lang_printable_name (decl)
+ tree decl;
+{
+ static tree decl_ring[PRINT_RING_SIZE];
+ static char *print_ring[PRINT_RING_SIZE];
+ static int ring_counter;
+ int i;
+
+ /* Only cache functions. */
+ if (TREE_CODE (decl) != FUNCTION_DECL
+ || DECL_LANG_SPECIFIC (decl) == 0)
+ return decl_as_string (decl, 1);
+
+ /* See if this print name is lying around. */
+ for (i = 0; i < PRINT_RING_SIZE; i++)
+ if (decl_ring[i] == decl)
+ /* yes, so return it. */
+ return print_ring[i];
+
+ if (++ring_counter == PRINT_RING_SIZE)
+ ring_counter = 0;
+
+ if (current_function_decl != NULL_TREE)
+ {
+ if (decl_ring[ring_counter] == current_function_decl)
+ ring_counter += 1;
+ if (ring_counter == PRINT_RING_SIZE)
+ ring_counter = 0;
+ if (decl_ring[ring_counter] == current_function_decl)
+ my_friendly_abort (106);
+ }
+
+ if (print_ring[ring_counter])
+ free (print_ring[ring_counter]);
+
+ {
+ int print_ret_type_p
+ = (!DECL_CONSTRUCTOR_P (decl)
+ && !DESTRUCTOR_NAME_P (DECL_ASSEMBLER_NAME (decl)));
+
+ char *name = (char *)decl_as_string (decl, print_ret_type_p);
+ print_ring[ring_counter] = (char *)malloc (strlen (name) + 1);
+ strcpy (print_ring[ring_counter], name);
+ decl_ring[ring_counter] = decl;
+ }
+ return print_ring[ring_counter];
+}
+
+/* Comparison function for sorting identifiers in RAISES lists.
+ Note that because IDENTIFIER_NODEs are unique, we can sort
+ them by address, saving an indirection. */
+static int
+id_cmp (p1, p2)
+ tree *p1, *p2;
+{
+ return (HOST_WIDE_INT)TREE_VALUE (*p1) - (HOST_WIDE_INT)TREE_VALUE (*p2);
+}
+
+/* Build the FUNCTION_TYPE or METHOD_TYPE which may raise exceptions
+ listed in RAISES. */
+tree
+build_exception_variant (ctype, type, raises)
+ tree ctype, type;
+ tree raises;
+{
+ int i;
+ tree v = TYPE_MAIN_VARIANT (type);
+ tree t, t2, cname;
+ tree *a = (tree *)alloca ((list_length (raises)+1) * sizeof (tree));
+ int constp = TYPE_READONLY (type);
+ int volatilep = TYPE_VOLATILE (type);
+
+ for (v = TYPE_NEXT_VARIANT (v); v; v = TYPE_NEXT_VARIANT (v))
+ {
+ if (TYPE_READONLY (v) != constp
+ || TYPE_VOLATILE (v) != volatilep)
+ continue;
+
+ t = raises;
+ t2 = TYPE_RAISES_EXCEPTIONS (v);
+ while (t && t2)
+ {
+ if (TREE_TYPE (t) == TREE_TYPE (t2))
+ {
+ t = TREE_CHAIN (t);
+ t2 = TREE_CHAIN (t2);
+ }
+ else break;
+ }
+ if (t || t2)
+ continue;
+ /* List of exceptions raised matches previously found list.
+
+ @@ Nice to free up storage used in consing up the
+ @@ list of exceptions raised. */
+ return v;
+ }
+
+ /* Need to build a new variant. */
+ v = copy_node (type);
+ TYPE_NEXT_VARIANT (v) = TYPE_NEXT_VARIANT (type);
+ TYPE_NEXT_VARIANT (type) = v;
+ if (raises && ! TREE_PERMANENT (raises))
+ {
+ push_obstacks_nochange ();
+ end_temporary_allocation ();
+ raises = copy_list (raises);
+ pop_obstacks ();
+ }
+ TYPE_RAISES_EXCEPTIONS (v) = raises;
+ return v;
+}
+
+/* Subroutine of copy_to_permanent
+
+ Assuming T is a node build bottom-up, make it all exist on
+ permanent obstack, if it is not permanent already. */
+static tree
+make_deep_copy (t)
+ tree t;
+{
+ enum tree_code code;
+
+ if (t == NULL_TREE || TREE_PERMANENT (t))
+ return t;
+
+ switch (code = TREE_CODE (t))
+ {
+ case ERROR_MARK:
+ return error_mark_node;
+
+ case VAR_DECL:
+ case FUNCTION_DECL:
+ case CONST_DECL:
+ break;
+
+ case PARM_DECL:
+ {
+ tree chain = TREE_CHAIN (t);
+ t = copy_node (t);
+ TREE_CHAIN (t) = make_deep_copy (chain);
+ TREE_TYPE (t) = make_deep_copy (TREE_TYPE (t));
+ DECL_INITIAL (t) = make_deep_copy (DECL_INITIAL (t));
+ DECL_SIZE (t) = make_deep_copy (DECL_SIZE (t));
+ return t;
+ }
+
+ case TREE_LIST:
+ {
+ tree chain = TREE_CHAIN (t);
+ t = copy_node (t);
+ TREE_PURPOSE (t) = make_deep_copy (TREE_PURPOSE (t));
+ TREE_VALUE (t) = make_deep_copy (TREE_VALUE (t));
+ TREE_CHAIN (t) = make_deep_copy (chain);
+ return t;
+ }
+
+ case TREE_VEC:
+ {
+ int len = TREE_VEC_LENGTH (t);
+
+ t = copy_node (t);
+ while (len--)
+ TREE_VEC_ELT (t, len) = make_deep_copy (TREE_VEC_ELT (t, len));
+ return t;
+ }
+
+ case INTEGER_CST:
+ case REAL_CST:
+ case STRING_CST:
+ return copy_node (t);
+
+ case COND_EXPR:
+ case TARGET_EXPR:
+ case NEW_EXPR:
+ t = copy_node (t);
+ TREE_OPERAND (t, 0) = make_deep_copy (TREE_OPERAND (t, 0));
+ TREE_OPERAND (t, 1) = make_deep_copy (TREE_OPERAND (t, 1));
+ TREE_OPERAND (t, 2) = make_deep_copy (TREE_OPERAND (t, 2));
+ return t;
+
+ case SAVE_EXPR:
+ t = copy_node (t);
+ TREE_OPERAND (t, 0) = make_deep_copy (TREE_OPERAND (t, 0));
+ return t;
+
+ case MODIFY_EXPR:
+ case PLUS_EXPR:
+ case MINUS_EXPR:
+ case MULT_EXPR:
+ case TRUNC_DIV_EXPR:
+ case TRUNC_MOD_EXPR:
+ case MIN_EXPR:
+ case MAX_EXPR:
+ case LSHIFT_EXPR:
+ case RSHIFT_EXPR:
+ case BIT_IOR_EXPR:
+ case BIT_XOR_EXPR:
+ case BIT_AND_EXPR:
+ case BIT_ANDTC_EXPR:
+ case TRUTH_ANDIF_EXPR:
+ case TRUTH_ORIF_EXPR:
+ case LT_EXPR:
+ case LE_EXPR:
+ case GT_EXPR:
+ case GE_EXPR:
+ case EQ_EXPR:
+ case NE_EXPR:
+ case CEIL_DIV_EXPR:
+ case FLOOR_DIV_EXPR:
+ case ROUND_DIV_EXPR:
+ case CEIL_MOD_EXPR:
+ case FLOOR_MOD_EXPR:
+ case ROUND_MOD_EXPR:
+ case COMPOUND_EXPR:
+ case PREDECREMENT_EXPR:
+ case PREINCREMENT_EXPR:
+ case POSTDECREMENT_EXPR:
+ case POSTINCREMENT_EXPR:
+ case CALL_EXPR:
+ t = copy_node (t);
+ TREE_OPERAND (t, 0) = make_deep_copy (TREE_OPERAND (t, 0));
+ TREE_OPERAND (t, 1) = make_deep_copy (TREE_OPERAND (t, 1));
+ return t;
+
+ case CONVERT_EXPR:
+ case ADDR_EXPR:
+ case INDIRECT_REF:
+ case NEGATE_EXPR:
+ case BIT_NOT_EXPR:
+ case TRUTH_NOT_EXPR:
+ case NOP_EXPR:
+ case COMPONENT_REF:
+ t = copy_node (t);
+ TREE_OPERAND (t, 0) = make_deep_copy (TREE_OPERAND (t, 0));
+ return t;
+
+ /* This list is incomplete, but should suffice for now.
+ It is very important that `sorry' does not call
+ `report_error_function'. That could cause an infinite loop. */
+ default:
+ sorry ("initializer contains unrecognized tree code");
+ return error_mark_node;
+
+ }
+ my_friendly_abort (107);
+ /* NOTREACHED */
+ return NULL_TREE;
+}
+
+/* Assuming T is a node built bottom-up, make it all exist on
+ permanent obstack, if it is not permanent already. */
+tree
+copy_to_permanent (t)
+ tree t;
+{
+ register struct obstack *ambient_obstack = current_obstack;
+ register struct obstack *ambient_saveable_obstack = saveable_obstack;
+
+ if (t == NULL_TREE || TREE_PERMANENT (t))
+ return t;
+
+ saveable_obstack = &permanent_obstack;
+ current_obstack = saveable_obstack;
+
+ t = make_deep_copy (t);
+
+ current_obstack = ambient_obstack;
+ saveable_obstack = ambient_saveable_obstack;
+
+ return t;
+}
+
+void
+print_lang_statistics ()
+{
+ extern struct obstack maybepermanent_obstack;
+ print_obstack_statistics ("class_obstack", &class_obstack);
+ print_obstack_statistics ("permanent_obstack", &permanent_obstack);
+ print_obstack_statistics ("maybepermanent_obstack", &maybepermanent_obstack);
+ print_search_statistics ();
+ print_class_statistics ();
+}
+
+/* This is used by the `assert' macro. It is provided in libgcc.a,
+ which `cc' doesn't know how to link. Note that the C++ front-end
+ no longer actually uses the `assert' macro (instead, it calls
+ my_friendly_assert). But all of the back-end files still need this. */
+void
+__eprintf (string, expression, line, filename)
+#ifdef __STDC__
+ const char *string;
+ const char *expression;
+ unsigned line;
+ const char *filename;
+#else
+ char *string;
+ char *expression;
+ unsigned line;
+ char *filename;
+#endif
+{
+ fprintf (stderr, string, expression, line, filename);
+ fflush (stderr);
+ abort ();
+}
+
+/* Return, as an INTEGER_CST node, the number of elements for
+ TYPE (which is an ARRAY_TYPE). This counts only elements of the top array. */
+
+tree
+array_type_nelts_top (type)
+ tree type;
+{
+ return fold (build (PLUS_EXPR, integer_type_node,
+ array_type_nelts (type),
+ integer_one_node));
+}
+
+/* Return, as an INTEGER_CST node, the number of elements for
+ TYPE (which is an ARRAY_TYPE). This one is a recursive count of all
+ ARRAY_TYPEs that are clumped together. */
+
+tree
+array_type_nelts_total (type)
+ tree type;
+{
+ tree sz = array_type_nelts_top (type);
+ type = TREE_TYPE (type);
+ while (TREE_CODE (type) == ARRAY_TYPE)
+ {
+ tree n = array_type_nelts_top (type);
+ sz = fold (build (MULT_EXPR, integer_type_node, sz, n));
+ type = TREE_TYPE (type);
+ }
+ return sz;
+}
diff --git a/gnu/usr.bin/cc/cc1plus/tree.def b/gnu/usr.bin/cc/cc1plus/tree.def
new file mode 100644
index 0000000..4f33c9f
--- /dev/null
+++ b/gnu/usr.bin/cc/cc1plus/tree.def
@@ -0,0 +1,103 @@
+/* This file contains the definitions and documentation for the
+ additional tree codes used in the GNU C++ compiler (see tree.def
+ for the standard codes).
+ Copyright (C) 1987, 1988, 1990, 1993 Free Software Foundation, Inc.
+ Hacked by Michael Tiemann (tiemann@cygnus.com)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+/* Reference to the contents of an offset
+ (a value whose type is an OFFSET_TYPE).
+ Operand 0 is the object within which the offset is taken.
+ Operand 1 is the offset. The language independent OFFSET_REF
+ just won't work for us. */
+DEFTREECODE (CP_OFFSET_REF, "cp_offset_ref", "r", 2)
+
+/* For DELETE_EXPR, operand 0 is the store to be destroyed.
+ Operand 1 is the value to pass to the destroying function
+ saying whether the store should be deallocated as well. */
+DEFTREECODE (DELETE_EXPR, "dl_expr", "e", 2)
+DEFTREECODE (VEC_DELETE_EXPR, "vec_dl_expr", "e", 2)
+
+/* Value is reference to particular overloaded class method.
+ Operand 0 is the class name (an IDENTIFIER_NODE);
+ operand 1 is the field (also an IDENTIFIER_NODE).
+ The COMPLEXITY field holds the class level (usually 0). */
+DEFTREECODE (SCOPE_REF, "scope_ref", "r", 2)
+
+/* When composing an object with a member, this is the result.
+ Operand 0 is the object. Operand 1 is the member (usually
+ a dereferenced pointer to member). */
+DEFTREECODE (MEMBER_REF, "member_ref", "r", 2)
+
+/* Type conversion operator in C++. TREE_TYPE is type that this
+ operator converts to. Operand is expression to be converted. */
+DEFTREECODE (TYPE_EXPR, "type_expr", "e", 1)
+
+/* For CPLUS_NEW_EXPR, operand 0 is function which performs initialization,
+ operand 1 is argument list to initialization function,
+ and operand 2 is the slot which was allocated for this expression. */
+DEFTREECODE (NEW_EXPR, "nw_expr", "e", 3)
+DEFTREECODE (VEC_NEW_EXPR, "vec_nw_expr", "e", 3)
+
+/* A throw expression. operand 0 is the expression, if there was one,
+ else it is NULL_TREE. */
+DEFTREECODE (THROW_EXPR, "throw_expr", "e", 1)
+
+/* Template definition. The following fields have the specified uses,
+ although there are other macros in cp-tree.h that should be used for
+ accessing this data.
+ DECL_ARGUMENTS template parm vector
+ DECL_TEMPLATE_INFO template text &c
+ DECL_VINDEX list of instantiations already produced;
+ only done for functions so far
+ For class template:
+ DECL_INITIAL associated templates (methods &c)
+ DECL_RESULT null
+ For non-class templates:
+ TREE_TYPE type of object to be constructed
+ DECL_RESULT decl for object to be created
+ (e.g., FUNCTION_DECL with tmpl parms used)
+ */
+DEFTREECODE (TEMPLATE_DECL, "template_decl", "d", 0)
+
+/* Index into a template parameter list. This parameter must be a type.
+ Use TYPE_FIELDS to find parmlist and index. */
+DEFTREECODE (TEMPLATE_TYPE_PARM, "template_type_parm", "t", 0)
+
+/* Index into a template parameter list. This parameter must not be a
+ type. */
+DEFTREECODE (TEMPLATE_CONST_PARM, "template_const_parm", "c", 2)
+
+/* For uninstantiated parameterized types.
+ TYPE_VALUES tree list:
+ TREE_PURPOSE template decl
+ TREE_VALUE parm vector
+ TREE_CHAIN null
+ Other useful fields to be defined later. */
+DEFTREECODE (UNINSTANTIATED_P_TYPE, "uninstantiated_p_type", "t", 0)
+
+/* A thunk is a stub function.
+
+ Thunks are used to implement multiple inheritance:
+ At run-time, such a thunk subtracts THUNK_DELTA (an int, not a tree)
+ from the this pointer, and then jumps to DECL_INITIAL
+ (which is an ADDR_EXPR whose operand is a FUNCTION_DECL).
+
+ Other kinds of thunks may be defined later. */
+DEFTREECODE (THUNK_DECL, "thunk_decl", "d", 0)
diff --git a/gnu/usr.bin/cc/cc1plus/typeck.c b/gnu/usr.bin/cc/cc1plus/typeck.c
new file mode 100644
index 0000000..fe8e7ba
--- /dev/null
+++ b/gnu/usr.bin/cc/cc1plus/typeck.c
@@ -0,0 +1,7233 @@
+/* Build expressions with type checking for C++ compiler.
+ Copyright (C) 1987, 88, 89, 92, 93, 1994 Free Software Foundation, Inc.
+ Hacked by Michael Tiemann (tiemann@cygnus.com)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+/* This file is part of the C++ front end.
+ It contains routines to build C++ expressions given their operands,
+ including computing the types of the result, C and C++ specific error
+ checks, and some optimization.
+
+ There are also routines to build RETURN_STMT nodes and CASE_STMT nodes,
+ and to process initializations in declarations (since they work
+ like a strange sort of assignment). */
+
+extern void error ();
+extern void warning ();
+
+#include "config.h"
+#include <stdio.h>
+#include "tree.h"
+#include "rtl.h"
+#include "cp-tree.h"
+#include "flags.h"
+
+int mark_addressable ();
+static tree convert_for_assignment ();
+/* static */ tree convert_for_initialization ();
+extern tree shorten_compare ();
+extern void binary_op_error ();
+static tree pointer_int_sum ();
+static tree pointer_diff ();
+static tree convert_sequence ();
+/* static */ tree unary_complex_lvalue ();
+
+extern rtx original_result_rtx;
+
+/* Return the target type of TYPE, which meas return T for:
+ T*, T&, T[], T (...), and otherwise, just T. */
+
+tree
+target_type (type)
+ tree type;
+{
+ if (TREE_CODE (type) == REFERENCE_TYPE)
+ type = TREE_TYPE (type);
+ while (TREE_CODE (type) == POINTER_TYPE
+ || TREE_CODE (type) == ARRAY_TYPE
+ || TREE_CODE (type) == FUNCTION_TYPE
+ || TREE_CODE (type) == METHOD_TYPE
+ || TREE_CODE (type) == OFFSET_TYPE)
+ type = TREE_TYPE (type);
+ return type;
+}
+
+/* Do `exp = require_complete_type (exp);' to make sure exp
+ does not have an incomplete type. (That includes void types.) */
+
+tree
+require_complete_type (value)
+ tree value;
+{
+ tree type = TREE_TYPE (value);
+
+ /* First, detect a valid value with a complete type. */
+ if (TYPE_SIZE (type) != 0
+ && type != void_type_node
+ && ! (TYPE_LANG_SPECIFIC (type)
+ && (IS_SIGNATURE_POINTER (type) || IS_SIGNATURE_REFERENCE (type))
+ && TYPE_SIZE (SIGNATURE_TYPE (type)) == 0))
+ return value;
+
+ /* If we see X::Y, we build an OFFSET_TYPE which has
+ not been laid out. Try to avoid an error by interpreting
+ it as this->X::Y, if reasonable. */
+ if (TREE_CODE (value) == OFFSET_REF
+ && C_C_D != 0
+ && TREE_OPERAND (value, 0) == C_C_D)
+ {
+ tree base, member = TREE_OPERAND (value, 1);
+ tree basetype = TYPE_OFFSET_BASETYPE (type);
+ my_friendly_assert (TREE_CODE (member) == FIELD_DECL, 305);
+ base = convert_pointer_to (basetype, current_class_decl);
+ value = build (COMPONENT_REF, TREE_TYPE (member),
+ build_indirect_ref (base, NULL_PTR), member);
+ return require_complete_type (value);
+ }
+
+ incomplete_type_error (value, type);
+ return error_mark_node;
+}
+
+/* Return truthvalue of whether type of EXP is instantiated. */
+int
+type_unknown_p (exp)
+ tree exp;
+{
+ return (TREE_CODE (exp) == TREE_LIST
+ || TREE_TYPE (exp) == unknown_type_node
+ || (TREE_CODE (TREE_TYPE (exp)) == OFFSET_TYPE
+ && TREE_TYPE (TREE_TYPE (exp)) == unknown_type_node));
+}
+
+/* Return truthvalue of whether T is function (or pfn) type. */
+int
+fntype_p (t)
+ tree t;
+{
+ return (TREE_CODE (t) == FUNCTION_TYPE || TREE_CODE (t) == METHOD_TYPE
+ || (TREE_CODE (t) == POINTER_TYPE
+ && (TREE_CODE (TREE_TYPE (t)) == FUNCTION_TYPE
+ || TREE_CODE (TREE_TYPE (t)) == METHOD_TYPE)));
+}
+
+/* Do `exp = require_instantiated_type (type, exp);' to make sure EXP
+ does not have an uninstantiated type.
+ TYPE is type to instantiate with, if uninstantiated. */
+tree
+require_instantiated_type (type, exp, errval)
+ tree type, exp, errval;
+{
+ if (TREE_TYPE (exp) == NULL_TREE)
+ {
+ error ("argument list may not have an initializer list");
+ return errval;
+ }
+
+ if (TREE_TYPE (exp) == unknown_type_node
+ || (TREE_CODE (TREE_TYPE (exp)) == OFFSET_TYPE
+ && TREE_TYPE (TREE_TYPE (exp)) == unknown_type_node))
+ {
+ exp = instantiate_type (type, exp, 1);
+ if (TREE_TYPE (exp) == error_mark_node)
+ return errval;
+ }
+ return exp;
+}
+
+/* Return a variant of TYPE which has all the type qualifiers of LIKE
+ as well as those of TYPE. */
+
+static tree
+qualify_type (type, like)
+ tree type, like;
+{
+ int constflag = TYPE_READONLY (type) || TYPE_READONLY (like);
+ int volflag = TYPE_VOLATILE (type) || TYPE_VOLATILE (like);
+ /* @@ Must do member pointers here. */
+ return c_build_type_variant (type, constflag, volflag);
+}
+
+/* Return the common type of two parameter lists.
+ We assume that comptypes has already been done and returned 1;
+ if that isn't so, this may crash.
+
+ As an optimization, free the space we allocate if the parameter
+ lists are already common. */
+
+tree
+commonparms (p1, p2)
+ tree p1, p2;
+{
+ tree oldargs = p1, newargs, n;
+ int i, len;
+ int any_change = 0;
+ char *first_obj = (char *) oballoc (0);
+
+ len = list_length (p1);
+ newargs = tree_last (p1);
+
+ if (newargs == void_list_node)
+ i = 1;
+ else
+ {
+ i = 0;
+ newargs = 0;
+ }
+
+ for (; i < len; i++)
+ newargs = tree_cons (NULL_TREE, NULL_TREE, newargs);
+
+ n = newargs;
+
+ for (i = 0; p1;
+ p1 = TREE_CHAIN (p1), p2 = TREE_CHAIN (p2), n = TREE_CHAIN (n), i++)
+ {
+ if (TREE_PURPOSE (p1) && !TREE_PURPOSE (p2))
+ {
+ /* We used to give a warning here that advised about a default
+ argument being given in the prototype but not in the function's
+ declaration. It's best not to bother. */
+ TREE_PURPOSE (n) = TREE_PURPOSE (p1);
+ any_change = 1;
+ }
+ else if (! TREE_PURPOSE (p1))
+ {
+ if (TREE_PURPOSE (p2))
+ {
+ TREE_PURPOSE (n) = TREE_PURPOSE (p2);
+ any_change = 1;
+ }
+ }
+ else
+ {
+ int cmp = simple_cst_equal (TREE_PURPOSE (p1), TREE_PURPOSE (p2));
+ if (cmp < 0)
+ my_friendly_abort (111);
+ if (cmp == 0)
+ any_change = 1;
+ TREE_PURPOSE (n) = TREE_PURPOSE (p2);
+ }
+ if (TREE_VALUE (p1) != TREE_VALUE (p2))
+ {
+ any_change = 1;
+ TREE_VALUE (n) = common_type (TREE_VALUE (p1), TREE_VALUE (p2));
+ }
+ else
+ TREE_VALUE (n) = TREE_VALUE (p1);
+ }
+ if (! any_change)
+ {
+ obfree (first_obj);
+ return oldargs;
+ }
+
+ return newargs;
+}
+
+/* Return the common type of two types.
+ We assume that comptypes has already been done and returned 1;
+ if that isn't so, this may crash.
+
+ This is the type for the result of most arithmetic operations
+ if the operands have the given two types.
+
+ We do not deal with enumeral types here because they have already been
+ converted to integer types. */
+
+tree
+common_type (t1, t2)
+ tree t1, t2;
+{
+ register enum tree_code code1;
+ register enum tree_code code2;
+ tree attributes;
+
+ /* Save time if the two types are the same. */
+
+ if (t1 == t2) return t1;
+
+ /* If one type is nonsense, use the other. */
+ if (t1 == error_mark_node)
+ return t2;
+ if (t2 == error_mark_node)
+ return t1;
+
+ /* Merge the attributes */
+
+ { register tree a1, a2;
+ a1 = TYPE_ATTRIBUTES (t1);
+ a2 = TYPE_ATTRIBUTES (t2);
+
+ /* Either one unset? Take the set one. */
+
+ if (!(attributes = a1))
+ attributes = a2;
+
+ /* One that completely contains the other? Take it. */
+
+ else if (a2 && !attribute_list_contained (a1, a2))
+ if (attribute_list_contained (a2, a1))
+ attributes = a2;
+ else
+ {
+ /* Pick the longest list, and hang on the other
+ list. */
+
+ if (list_length (a1) < list_length (a2))
+ attributes = a2, a2 = a1;
+
+ for (; a2; a2 = TREE_CHAIN (a2))
+ if (!value_member (attributes, a2))
+ {
+ a1 = copy_node (a2);
+ TREE_CHAIN (a1) = attributes;
+ attributes = a1;
+ }
+ }
+ }
+
+ /* Treat an enum type as the unsigned integer type of the same width. */
+
+ if (TREE_CODE (t1) == ENUMERAL_TYPE)
+ t1 = type_for_size (TYPE_PRECISION (t1), 1);
+ if (TREE_CODE (t2) == ENUMERAL_TYPE)
+ t2 = type_for_size (TYPE_PRECISION (t2), 1);
+
+ code1 = TREE_CODE (t1);
+ code2 = TREE_CODE (t2);
+
+ switch (code1)
+ {
+ case INTEGER_TYPE:
+ case REAL_TYPE:
+ /* If only one is real, use it as the result. */
+
+ if (code1 == REAL_TYPE && code2 != REAL_TYPE)
+ return build_type_attribute_variant (t1, attributes);
+
+ if (code2 == REAL_TYPE && code1 != REAL_TYPE)
+ return build_type_attribute_variant (t2, attributes);
+
+ /* Both real or both integers; use the one with greater precision. */
+
+ if (TYPE_PRECISION (t1) > TYPE_PRECISION (t2))
+ return build_type_attribute_variant (t1, attributes);
+ else if (TYPE_PRECISION (t2) > TYPE_PRECISION (t1))
+ return build_type_attribute_variant (t2, attributes);
+
+ /* Same precision. Prefer longs to ints even when same size. */
+
+ if (TYPE_MAIN_VARIANT (t1) == long_unsigned_type_node
+ || TYPE_MAIN_VARIANT (t2) == long_unsigned_type_node)
+ return build_type_attribute_variant (long_unsigned_type_node,
+ attributes);
+
+ if (TYPE_MAIN_VARIANT (t1) == long_integer_type_node
+ || TYPE_MAIN_VARIANT (t2) == long_integer_type_node)
+ {
+ /* But preserve unsignedness from the other type,
+ since long cannot hold all the values of an unsigned int. */
+ if (TREE_UNSIGNED (t1) || TREE_UNSIGNED (t2))
+ t1 = long_unsigned_type_node;
+ else
+ t1 = long_integer_type_node;
+ return build_type_attribute_variant (t1, attributes);
+ }
+
+ /* Otherwise prefer the unsigned one. */
+
+ if (TREE_UNSIGNED (t1))
+ return build_type_attribute_variant (t1, attributes);
+ else
+ return build_type_attribute_variant (t2, attributes);
+
+ case POINTER_TYPE:
+ case REFERENCE_TYPE:
+ /* For two pointers, do this recursively on the target type,
+ and combine the qualifiers of the two types' targets. */
+ /* This code was turned off; I don't know why.
+ But ANSI C++ specifies doing this with the qualifiers.
+ So I turned it on again. */
+ {
+ tree target = common_type (TYPE_MAIN_VARIANT (TREE_TYPE (t1)),
+ TYPE_MAIN_VARIANT (TREE_TYPE (t2)));
+ int constp
+ = TYPE_READONLY (TREE_TYPE (t1)) || TYPE_READONLY (TREE_TYPE (t2));
+ int volatilep
+ = TYPE_VOLATILE (TREE_TYPE (t1)) || TYPE_VOLATILE (TREE_TYPE (t2));
+ target = c_build_type_variant (target, constp, volatilep);
+ if (code1 == POINTER_TYPE)
+ t1 = build_pointer_type (target);
+ else
+ t1 = build_reference_type (target);
+ return build_type_attribute_variant (t1, attributes);
+ }
+#if 0
+ case POINTER_TYPE:
+ t1 = build_pointer_type (common_type (TREE_TYPE (t1), TREE_TYPE (t2)));
+ return build_type_attribute_variant (t1, attributes);
+
+ case REFERENCE_TYPE:
+ t1 = build_reference_type (common_type (TREE_TYPE (t1), TREE_TYPE (t2)));
+ return build_type_attribute_variant (t1, attributes);
+#endif
+
+ case ARRAY_TYPE:
+ {
+ tree elt = common_type (TREE_TYPE (t1), TREE_TYPE (t2));
+ /* Save space: see if the result is identical to one of the args. */
+ if (elt == TREE_TYPE (t1) && TYPE_DOMAIN (t1))
+ return build_type_attribute_variant (t1, attributes);
+ if (elt == TREE_TYPE (t2) && TYPE_DOMAIN (t2))
+ return build_type_attribute_variant (t2, attributes);
+ /* Merge the element types, and have a size if either arg has one. */
+ t1 = build_array_type (elt, TYPE_DOMAIN (TYPE_DOMAIN (t1) ? t1 : t2));
+ return build_type_attribute_variant (t1, attributes);
+ }
+
+ case FUNCTION_TYPE:
+ /* Function types: prefer the one that specified arg types.
+ If both do, merge the arg types. Also merge the return types. */
+ {
+ tree valtype = common_type (TREE_TYPE (t1), TREE_TYPE (t2));
+ tree p1 = TYPE_ARG_TYPES (t1);
+ tree p2 = TYPE_ARG_TYPES (t2);
+ tree rval, raises;
+
+ /* Save space: see if the result is identical to one of the args. */
+ if (valtype == TREE_TYPE (t1) && ! p2)
+ return build_type_attribute_variant (t1, attributes);
+ if (valtype == TREE_TYPE (t2) && ! p1)
+ return build_type_attribute_variant (t2, attributes);
+
+ /* Simple way if one arg fails to specify argument types. */
+ if (p1 == NULL_TREE || TREE_VALUE (p1) == void_type_node)
+ {
+ rval = build_function_type (valtype, p2);
+ if ((raises = TYPE_RAISES_EXCEPTIONS (t2)))
+ rval = build_exception_variant (NULL_TREE, rval, raises);
+ return build_type_attribute_variant (rval, attributes);
+ }
+ raises = TYPE_RAISES_EXCEPTIONS (t1);
+ if (p2 == NULL_TREE || TREE_VALUE (p2) == void_type_node)
+ {
+ rval = build_function_type (valtype, p1);
+ if (raises)
+ rval = build_exception_variant (NULL_TREE, rval, raises);
+ return build_type_attribute_variant (rval, attributes);
+ }
+
+ rval = build_function_type (valtype, commonparms (p1, p2));
+ rval = build_exception_variant (NULL_TREE, rval, raises);
+ return build_type_attribute_variant (rval, attributes);
+ }
+
+ case RECORD_TYPE:
+ case UNION_TYPE:
+ my_friendly_assert (TYPE_MAIN_VARIANT (t1) == t1
+ && TYPE_MAIN_VARIANT (t2) == t2, 306);
+
+ if (! binfo_or_else (t1, t2))
+ compiler_error ("common_type called with uncommon aggregate types");
+ return build_type_attribute_variant (t1, attributes);
+
+ case METHOD_TYPE:
+ if (comptypes (TYPE_METHOD_BASETYPE (t1), TYPE_METHOD_BASETYPE (t2), 1)
+ && TREE_CODE (TREE_TYPE (t1)) == TREE_CODE (TREE_TYPE (t2)))
+ {
+ /* Get this value the long way, since TYPE_METHOD_BASETYPE
+ is just the main variant of this. */
+ tree basetype = TREE_TYPE (TREE_VALUE (TYPE_ARG_TYPES (t1)));
+ tree raises, t3;
+
+ raises = TYPE_RAISES_EXCEPTIONS (t1);
+
+ /* If this was a member function type, get back to the
+ original type of type member function (i.e., without
+ the class instance variable up front. */
+ t1 = build_function_type (TREE_TYPE (t1), TREE_CHAIN (TYPE_ARG_TYPES (t1)));
+ t2 = build_function_type (TREE_TYPE (t2), TREE_CHAIN (TYPE_ARG_TYPES (t2)));
+ t3 = common_type (t1, t2);
+ t3 = build_cplus_method_type (basetype, TREE_TYPE (t3), TYPE_ARG_TYPES (t3));
+ t1 = build_exception_variant (basetype, t3, raises);
+ }
+ else
+ compiler_error ("common_type called with uncommon method types");
+
+ return build_type_attribute_variant (t1, attributes);
+
+ case OFFSET_TYPE:
+ if (TYPE_OFFSET_BASETYPE (t1) == TYPE_OFFSET_BASETYPE (t2)
+ && TREE_CODE (TREE_TYPE (t1)) == TREE_CODE (TREE_TYPE (t2)))
+ {
+ tree basetype = TYPE_OFFSET_BASETYPE (t1);
+ t1 = build_offset_type (basetype,
+ common_type (TREE_TYPE (t1), TREE_TYPE (t2)));
+ }
+ else
+ compiler_error ("common_type called with uncommon member types");
+
+ /* ... falls through ... */
+
+ default:
+ return build_type_attribute_variant (t1, attributes);
+ }
+}
+
+/* Return 1 if TYPE1 and TYPE2 raise the same exceptions. */
+int
+compexcepttypes (t1, t2, strict)
+ tree t1, t2;
+ int strict;
+{
+ return TYPE_RAISES_EXCEPTIONS (t1) == TYPE_RAISES_EXCEPTIONS (t2);
+}
+
+static int
+comp_array_types (cmp, t1, t2, strict)
+ register int (*cmp)();
+ tree t1, t2;
+ int strict;
+{
+ tree d1 = TYPE_DOMAIN (t1);
+ tree d2 = TYPE_DOMAIN (t2);
+
+ /* Target types must match incl. qualifiers. */
+ if (!(TREE_TYPE (t1) == TREE_TYPE (t2)
+ || (*cmp) (TREE_TYPE (t1), TREE_TYPE (t2), strict)))
+ return 0;
+
+ /* Sizes must match unless one is missing or variable. */
+ if (d1 == 0 || d2 == 0 || d1 == d2
+ || TREE_CODE (TYPE_MIN_VALUE (d1)) != INTEGER_CST
+ || TREE_CODE (TYPE_MIN_VALUE (d2)) != INTEGER_CST
+ || TREE_CODE (TYPE_MAX_VALUE (d1)) != INTEGER_CST
+ || TREE_CODE (TYPE_MAX_VALUE (d2)) != INTEGER_CST)
+ return 1;
+
+ return ((TREE_INT_CST_LOW (TYPE_MIN_VALUE (d1))
+ == TREE_INT_CST_LOW (TYPE_MIN_VALUE (d2)))
+ && (TREE_INT_CST_HIGH (TYPE_MIN_VALUE (d1))
+ == TREE_INT_CST_HIGH (TYPE_MIN_VALUE (d2)))
+ && (TREE_INT_CST_LOW (TYPE_MAX_VALUE (d1))
+ == TREE_INT_CST_LOW (TYPE_MAX_VALUE (d2)))
+ && (TREE_INT_CST_HIGH (TYPE_MAX_VALUE (d1))
+ == TREE_INT_CST_HIGH (TYPE_MAX_VALUE (d2))));
+}
+
+/* Return 1 if TYPE1 and TYPE2 are compatible types for assignment
+ or various other operations. This is what ANSI C++ speaks of as
+ "being the same".
+
+ For C++: argument STRICT says we should be strict about this
+ comparison:
+
+ 2 : strict, except that if one type is a reference and
+ the other is not, compare the target type of the
+ reference to the type that's not a reference (ARM, p308).
+ This is used for checking for illegal overloading.
+ 1 : strict (compared according to ANSI C)
+ This is used for checking whether two function decls match.
+ 0 : <= (compared according to C++)
+ -1: <= or >= (relaxed)
+
+ Otherwise, pointers involving base classes and derived classes
+ can be mixed as legal: i.e. a pointer to a base class may be assigned
+ to a pointer to one of its derived classes, as per C++. A pointer to
+ a derived class may be passed as a parameter to a function expecting a
+ pointer to a base classes. These allowances do not commute. In this
+ case, TYPE1 is assumed to be the base class, and TYPE2 is assumed to
+ be the derived class. */
+int
+comptypes (type1, type2, strict)
+ tree type1, type2;
+ int strict;
+{
+ register tree t1 = type1;
+ register tree t2 = type2;
+ int attrval, val;
+
+ /* Suppress errors caused by previously reported errors */
+
+ if (t1 == t2)
+ return 1;
+
+ /* This should never happen. */
+ my_friendly_assert (t1 != error_mark_node, 307);
+
+ if (t2 == error_mark_node)
+ return 0;
+
+ if (strict < 0)
+ {
+ /* Treat an enum type as the unsigned integer type of the same width. */
+
+ if (TREE_CODE (t1) == ENUMERAL_TYPE)
+ t1 = type_for_size (TYPE_PRECISION (t1), 1);
+ if (TREE_CODE (t2) == ENUMERAL_TYPE)
+ t2 = type_for_size (TYPE_PRECISION (t2), 1);
+
+ if (t1 == t2)
+ return 1;
+ }
+
+ /* Different classes of types can't be compatible. */
+
+ if (TREE_CODE (t1) != TREE_CODE (t2))
+ {
+ if (strict == 2
+ && ((TREE_CODE (t1) == REFERENCE_TYPE)
+ ^ (TREE_CODE (t2) == REFERENCE_TYPE)))
+ {
+ if (TREE_CODE (t1) == REFERENCE_TYPE)
+ return comptypes (TREE_TYPE (t1), t2, 1);
+ return comptypes (t1, TREE_TYPE (t2), 1);
+ }
+
+ return 0;
+ }
+ if (strict > 1)
+ strict = 1;
+
+ /* Qualifiers must match. */
+
+ if (TYPE_READONLY (t1) != TYPE_READONLY (t2))
+ return 0;
+ if (TYPE_VOLATILE (t1) != TYPE_VOLATILE (t2))
+ return 0;
+
+ /* Allow for two different type nodes which have essentially the same
+ definition. Note that we already checked for equality of the type
+ type qualifiers (just above). */
+
+ if (TYPE_MAIN_VARIANT (t1) == TYPE_MAIN_VARIANT (t2))
+ return 1;
+
+#ifdef COMP_TYPE_ATTRIBUTES
+ if (! (attrval = COMP_TYPE_ATTRIBUTES (t1, t2)))
+ return 0;
+#else
+ /* 1 if no need for warning yet, 2 if warning cause has been seen. */
+ attrval = 1;
+#endif
+
+ /* 1 if no need for warning yet, 2 if warning cause has been seen. */
+ val = 0;
+
+ switch (TREE_CODE (t1))
+ {
+ case RECORD_TYPE:
+ case UNION_TYPE:
+ if (strict <= 0)
+ goto look_hard;
+ return 0;
+
+ case OFFSET_TYPE:
+ val = (comptypes (TYPE_POINTER_TO (TYPE_OFFSET_BASETYPE (t1)),
+ TYPE_POINTER_TO (TYPE_OFFSET_BASETYPE (t2)), strict)
+ && comptypes (TREE_TYPE (t1), TREE_TYPE (t2), strict));
+ break;
+
+ case METHOD_TYPE:
+ if (! compexcepttypes (t1, t2, strict))
+ return 0;
+
+ /* This case is anti-symmetrical!
+ One can pass a base member (or member function)
+ to something expecting a derived member (or member function),
+ but not vice-versa! */
+
+ val = (comptypes (TYPE_POINTER_TO (TYPE_METHOD_BASETYPE (t2)),
+ TYPE_POINTER_TO (TYPE_METHOD_BASETYPE (t1)), strict)
+ && comptypes (TREE_TYPE (t1), TREE_TYPE (t2), strict)
+ && compparms (TREE_CHAIN (TYPE_ARG_TYPES (t1)),
+ TREE_CHAIN (TYPE_ARG_TYPES (t2)), strict));
+ break;
+
+ case POINTER_TYPE:
+ case REFERENCE_TYPE:
+ t1 = TREE_TYPE (t1);
+ t2 = TREE_TYPE (t2);
+ if (t1 == t2)
+ {
+ val = 1;
+ break;
+ }
+ if (strict <= 0)
+ {
+ if (TREE_CODE (t1) == RECORD_TYPE && TREE_CODE (t2) == RECORD_TYPE)
+ {
+ int rval;
+ look_hard:
+ rval = t1 == t2 || UNIQUELY_DERIVED_FROM_P (t1, t2);
+
+ if (rval)
+ {
+ val = 1;
+ break;
+ }
+ if (strict < 0)
+ {
+ val = UNIQUELY_DERIVED_FROM_P (t2, t1);
+ break;
+ }
+ }
+ return 0;
+ }
+ else
+ val = comptypes (t1, t2, strict);
+ break;
+
+ case FUNCTION_TYPE:
+ if (! compexcepttypes (t1, t2, strict))
+ return 0;
+
+ val = ((TREE_TYPE (t1) == TREE_TYPE (t2)
+ || comptypes (TREE_TYPE (t1), TREE_TYPE (t2), strict))
+ && compparms (TYPE_ARG_TYPES (t1), TYPE_ARG_TYPES (t2), strict));
+ break;
+
+ case ARRAY_TYPE:
+ /* Target types must match incl. qualifiers. */
+ val = comp_array_types (comptypes, t1, t2, strict);
+ break;
+
+ case TEMPLATE_TYPE_PARM:
+ return 1;
+
+ case UNINSTANTIATED_P_TYPE:
+ return UPT_TEMPLATE (t1) == UPT_TEMPLATE (t2);
+ }
+ return attrval == 2 && val == 1 ? 2 : val;
+}
+
+/* Return 1 if TTL and TTR are pointers to types that are equivalent,
+ ignoring their qualifiers.
+
+ NPTRS is the number of pointers we can strip off and keep cool.
+ This is used to permit (for aggr A, aggr B) A, B* to convert to A*,
+ but to not permit B** to convert to A**. */
+
+int
+comp_target_types (ttl, ttr, nptrs)
+ tree ttl, ttr;
+ int nptrs;
+{
+ ttl = TYPE_MAIN_VARIANT (ttl);
+ ttr = TYPE_MAIN_VARIANT (ttr);
+ if (ttl == ttr)
+ return 1;
+ if (TREE_CODE (ttr) == TEMPLATE_TYPE_PARM)
+ return 1;
+
+ if (TREE_CODE (ttr) != TREE_CODE (ttl))
+ return 0;
+
+ if (TREE_CODE (ttr) == POINTER_TYPE)
+ {
+ if (TREE_CODE (TREE_TYPE (ttl)) == POINTER_TYPE
+ || TREE_CODE (TREE_TYPE (ttl)) == ARRAY_TYPE)
+ return comp_ptr_ttypes (TREE_TYPE (ttl), TREE_TYPE (ttr));
+ else
+ return comp_target_types (TREE_TYPE (ttl), TREE_TYPE (ttr), nptrs - 1);
+ }
+
+ if (TREE_CODE (ttr) == REFERENCE_TYPE)
+ return comp_target_types (TREE_TYPE (ttl), TREE_TYPE (ttr), nptrs);
+ if (TREE_CODE (ttr) == ARRAY_TYPE)
+ return comp_array_types (comp_target_types, ttl, ttr, 0);
+ else if (TREE_CODE (ttr) == FUNCTION_TYPE || TREE_CODE (ttr) == METHOD_TYPE)
+ if (comp_target_types (TREE_TYPE (ttl), TREE_TYPE (ttr), nptrs))
+ switch (comp_target_parms (TYPE_ARG_TYPES (ttl), TYPE_ARG_TYPES (ttr), 1))
+ {
+ case 0:
+ return 0;
+ case 1:
+ return 1;
+ case 2:
+ cp_pedwarn ("converting `%T' to `%T' is a contravariance violation",
+ ttr, ttl);
+ return 1;
+ default:
+ my_friendly_abort (112);
+ }
+ else
+ return 0;
+
+ /* for C++ */
+ else if (TREE_CODE (ttr) == OFFSET_TYPE)
+ {
+ /* Contravariance: we can assign a pointer to base member to a pointer
+ to derived member. Note difference from simple pointer case, where
+ we can pass a pointer to derived to a pointer to base. */
+ if (comptypes (TYPE_OFFSET_BASETYPE (ttr), TYPE_OFFSET_BASETYPE (ttl), 0))
+ return comp_target_types (TREE_TYPE (ttl), TREE_TYPE (ttr), nptrs);
+ else if (comptypes (TYPE_OFFSET_BASETYPE (ttl), TYPE_OFFSET_BASETYPE (ttr), 0)
+ && comp_target_types (TREE_TYPE (ttl), TREE_TYPE (ttr), nptrs))
+ {
+ cp_pedwarn ("converting `%T' to `%T' is a contravariance violation",
+ ttr, ttl);
+ return 1;
+ }
+ }
+ else if (IS_AGGR_TYPE (ttl))
+ {
+ if (nptrs < 0)
+ return 0;
+ return comptypes (TYPE_POINTER_TO (ttl), TYPE_POINTER_TO (ttr), 0);
+ }
+
+ return 0;
+}
+
+/* If two types share a common base type, return that basetype.
+ If there is not a unique most-derived base type, this function
+ returns ERROR_MARK_NODE. */
+tree
+common_base_type (tt1, tt2)
+ tree tt1, tt2;
+{
+ tree best = NULL_TREE, tmp;
+ int i;
+
+ /* If one is a baseclass of another, that's good enough. */
+ if (UNIQUELY_DERIVED_FROM_P (tt1, tt2))
+ return tt1;
+ if (UNIQUELY_DERIVED_FROM_P (tt2, tt1))
+ return tt2;
+
+ /* If they share a virtual baseclass, that's good enough. */
+ for (tmp = CLASSTYPE_VBASECLASSES (tt1); tmp; tmp = TREE_CHAIN (tmp))
+ {
+ if (binfo_member (BINFO_TYPE (tmp), CLASSTYPE_VBASECLASSES (tt2)))
+ return BINFO_TYPE (tmp);
+ }
+
+ /* Otherwise, try to find a unique baseclass of TT1
+ that is shared by TT2, and follow that down. */
+ for (i = CLASSTYPE_N_BASECLASSES (tt1)-1; i >= 0; i--)
+ {
+ tree basetype = TYPE_BINFO_BASETYPE (tt1, i);
+ tree trial = common_base_type (basetype, tt2);
+ if (trial)
+ {
+ if (trial == error_mark_node)
+ return trial;
+ if (best == NULL_TREE)
+ best = trial;
+ else if (best != trial)
+ return error_mark_node;
+ }
+ }
+
+ /* Same for TT2. */
+ for (i = CLASSTYPE_N_BASECLASSES (tt2)-1; i >= 0; i--)
+ {
+ tree basetype = TYPE_BINFO_BASETYPE (tt2, i);
+ tree trial = common_base_type (tt1, basetype);
+ if (trial)
+ {
+ if (trial == error_mark_node)
+ return trial;
+ if (best == NULL_TREE)
+ best = trial;
+ else if (best != trial)
+ return error_mark_node;
+ }
+ }
+ return best;
+}
+
+/* Subroutines of `comptypes'. */
+
+/* Return 1 if two parameter type lists PARMS1 and PARMS2
+ are equivalent in the sense that functions with those parameter types
+ can have equivalent types.
+ If either list is empty, we win.
+ Otherwise, the two lists must be equivalent, element by element.
+
+ C++: See comment above about TYPE1, TYPE2, STRICT.
+ If STRICT == 3, it means checking is strict, but do not compare
+ default parameter values. */
+int
+compparms (parms1, parms2, strict)
+ tree parms1, parms2;
+ int strict;
+{
+ register tree t1 = parms1, t2 = parms2;
+
+ /* An unspecified parmlist matches any specified parmlist
+ whose argument types don't need default promotions. */
+
+ if (strict <= 0 && t1 == 0)
+ return self_promoting_args_p (t2);
+ if (strict < 0 && t2 == 0)
+ return self_promoting_args_p (t1);
+
+ while (1)
+ {
+ if (t1 == 0 && t2 == 0)
+ return 1;
+ /* If one parmlist is shorter than the other,
+ they fail to match, unless STRICT is <= 0. */
+ if (t1 == 0 || t2 == 0)
+ {
+ if (strict > 0)
+ return 0;
+ if (strict < 0)
+ return 1;
+ if (strict == 0)
+ return t1 && TREE_PURPOSE (t1);
+ }
+ if (! comptypes (TREE_VALUE (t2), TREE_VALUE (t1), strict))
+ {
+ if (strict > 0)
+ return 0;
+ if (strict == 0)
+ return t2 == void_list_node && TREE_PURPOSE (t1);
+ return TREE_PURPOSE (t1) || TREE_PURPOSE (t2);
+ }
+ if (strict != 3 && TREE_PURPOSE (t1) && TREE_PURPOSE (t2))
+ {
+ int cmp = simple_cst_equal (TREE_PURPOSE (t1), TREE_PURPOSE (t2));
+ if (cmp < 0)
+ my_friendly_abort (113);
+ if (cmp == 0)
+ return 0;
+ }
+
+ t1 = TREE_CHAIN (t1);
+ t2 = TREE_CHAIN (t2);
+ }
+}
+
+/* This really wants return whether or not parameter type lists
+ would make their owning functions assignment compatible or not. */
+int
+comp_target_parms (parms1, parms2, strict)
+ tree parms1, parms2;
+ int strict;
+{
+ register tree t1 = parms1, t2 = parms2;
+ int warn_contravariance = 0;
+
+ /* An unspecified parmlist matches any specified parmlist
+ whose argument types don't need default promotions.
+ @@@ see 13.3.3 for a counterexample... */
+
+ if (t1 == 0 && t2 != 0)
+ {
+ cp_pedwarn ("ANSI C++ prohibits conversion from `(%#T)' to `(...)'",
+ parms2);
+ return self_promoting_args_p (t2);
+ }
+ if (t2 == 0)
+ return self_promoting_args_p (t1);
+
+ for (; t1 || t2; t1 = TREE_CHAIN (t1), t2 = TREE_CHAIN (t2))
+ {
+ tree p1, p2;
+
+ /* If one parmlist is shorter than the other,
+ they fail to match, unless STRICT is <= 0. */
+ if (t1 == 0 || t2 == 0)
+ {
+ if (strict > 0)
+ return 0;
+ if (strict < 0)
+ return 1 + warn_contravariance;
+ return ((t1 && TREE_PURPOSE (t1)) + warn_contravariance);
+ }
+ p1 = TREE_VALUE (t1);
+ p2 = TREE_VALUE (t2);
+ if (p1 == p2)
+ continue;
+ if (TREE_CODE (p2) == TEMPLATE_TYPE_PARM)
+ continue;
+
+ if ((TREE_CODE (p1) == POINTER_TYPE && TREE_CODE (p2) == POINTER_TYPE)
+ || (TREE_CODE (p1) == REFERENCE_TYPE && TREE_CODE (p2) == REFERENCE_TYPE))
+ {
+ if (strict <= 0
+ && (TYPE_MAIN_VARIANT (TREE_TYPE (p1))
+ == TYPE_MAIN_VARIANT (TREE_TYPE (p2))))
+ continue;
+
+ if (TREE_CODE (TREE_TYPE (p2)) == TEMPLATE_TYPE_PARM)
+ continue;
+
+ /* The following is wrong for contravariance,
+ but many programs depend on it. */
+ if (TREE_TYPE (p1) == void_type_node)
+ continue;
+ if (TREE_TYPE (p2) == void_type_node)
+ {
+ warn_contravariance = 1;
+ continue;
+ }
+ if (IS_AGGR_TYPE (TREE_TYPE (p1)))
+ {
+ if (comptypes (p2, p1, 0) == 0)
+ {
+ if (comptypes (p1, p2, 0) != 0)
+ warn_contravariance = 1;
+ else
+ return 0;
+ }
+ continue;
+ }
+ }
+ /* Note backwards order due to contravariance. */
+ if (comp_target_types (p2, p1, 1) == 0)
+ {
+ if (comp_target_types (p1, p2, 1))
+ {
+ warn_contravariance = 1;
+ continue;
+ }
+ if (strict != 0)
+ return 0;
+#if 0
+ /* What good do these cases do? */
+ if (strict == 0)
+ return p2 == void_type_node && TREE_PURPOSE (t1);
+ return TREE_PURPOSE (t1) || TREE_PURPOSE (t2);
+#endif
+ }
+ /* Target types are compatible--just make sure that if
+ we use parameter lists, that they are ok as well. */
+ if (TREE_CODE (p1) == FUNCTION_TYPE || TREE_CODE (p1) == METHOD_TYPE)
+ switch (comp_target_parms (TYPE_ARG_TYPES (p1),
+ TYPE_ARG_TYPES (p2),
+ strict))
+ {
+ case 0:
+ return 0;
+ case 1:
+ break;
+ case 2:
+ warn_contravariance = 1;
+ }
+
+ if (TREE_PURPOSE (t1) && TREE_PURPOSE (t2))
+ {
+ int cmp = simple_cst_equal (TREE_PURPOSE (t1), TREE_PURPOSE (t2));
+ if (cmp < 0)
+ my_friendly_abort (114);
+ if (cmp == 0)
+ return 0;
+ }
+ }
+ return 1 + warn_contravariance;
+}
+
+/* Return 1 if PARMS specifies a fixed number of parameters
+ and none of their types is affected by default promotions. */
+
+int
+self_promoting_args_p (parms)
+ tree parms;
+{
+ register tree t;
+ for (t = parms; t; t = TREE_CHAIN (t))
+ {
+ register tree type = TREE_VALUE (t);
+
+ if (TREE_CHAIN (t) == 0 && type != void_type_node)
+ return 0;
+
+ if (TYPE_MAIN_VARIANT (type) == float_type_node)
+ return 0;
+
+ if (type == 0)
+ return 0;
+
+ if (C_PROMOTING_INTEGER_TYPE_P (type))
+ return 0;
+ }
+ return 1;
+}
+
+/* Return an unsigned type the same as TYPE in other respects.
+
+ C++: must make these work for type variants as well. */
+
+tree
+unsigned_type (type)
+ tree type;
+{
+ tree type1 = TYPE_MAIN_VARIANT (type);
+ if (type1 == signed_char_type_node || type1 == char_type_node)
+ return unsigned_char_type_node;
+ if (type1 == integer_type_node)
+ return unsigned_type_node;
+ if (type1 == short_integer_type_node)
+ return short_unsigned_type_node;
+ if (type1 == long_integer_type_node)
+ return long_unsigned_type_node;
+ if (type1 == long_long_integer_type_node)
+ return long_long_unsigned_type_node;
+ return type;
+}
+
+/* Return a signed type the same as TYPE in other respects. */
+
+tree
+signed_type (type)
+ tree type;
+{
+ tree type1 = TYPE_MAIN_VARIANT (type);
+ if (type1 == unsigned_char_type_node || type1 == char_type_node)
+ return signed_char_type_node;
+ if (type1 == unsigned_type_node)
+ return integer_type_node;
+ if (type1 == short_unsigned_type_node)
+ return short_integer_type_node;
+ if (type1 == long_unsigned_type_node)
+ return long_integer_type_node;
+ if (type1 == long_long_unsigned_type_node)
+ return long_long_integer_type_node;
+ return type;
+}
+
+/* Return a type the same as TYPE except unsigned or
+ signed according to UNSIGNEDP. */
+
+tree
+signed_or_unsigned_type (unsignedp, type)
+ int unsignedp;
+ tree type;
+{
+ if (! INTEGRAL_TYPE_P (type))
+ return type;
+ if (TYPE_PRECISION (type) == TYPE_PRECISION (signed_char_type_node))
+ return unsignedp ? unsigned_char_type_node : signed_char_type_node;
+ if (TYPE_PRECISION (type) == TYPE_PRECISION (integer_type_node))
+ return unsignedp ? unsigned_type_node : integer_type_node;
+ if (TYPE_PRECISION (type) == TYPE_PRECISION (short_integer_type_node))
+ return unsignedp ? short_unsigned_type_node : short_integer_type_node;
+ if (TYPE_PRECISION (type) == TYPE_PRECISION (long_integer_type_node))
+ return unsignedp ? long_unsigned_type_node : long_integer_type_node;
+ if (TYPE_PRECISION (type) == TYPE_PRECISION (long_long_integer_type_node))
+ return (unsignedp ? long_long_unsigned_type_node
+ : long_long_integer_type_node);
+ return type;
+}
+
+tree
+c_sizeof (type)
+ tree type;
+{
+ enum tree_code code = TREE_CODE (type);
+ tree t;
+
+ if (code == FUNCTION_TYPE)
+ {
+ if (pedantic || warn_pointer_arith)
+ pedwarn ("ANSI C++ forbids taking the sizeof a function type");
+ return size_int (1);
+ }
+ if (code == METHOD_TYPE)
+ {
+ if (pedantic || warn_pointer_arith)
+ pedwarn ("ANSI C++ forbids taking the sizeof a method type");
+ return size_int (1);
+ }
+ if (code == VOID_TYPE)
+ {
+ if (pedantic || warn_pointer_arith)
+ pedwarn ("ANSI C++ forbids taking the sizeof a void type");
+ return size_int (1);
+ }
+ if (code == ERROR_MARK)
+ return size_int (1);
+
+ /* ARM $5.3.2: ``When applied to a reference, the result is the size of the
+ referenced object.'' */
+ if (code == REFERENCE_TYPE)
+ type = TREE_TYPE (type);
+
+ /* We couldn't find anything in the ARM or the draft standard that says,
+ one way or the other, if doing sizeof on something that doesn't have
+ an object associated with it is correct or incorrect. For example, if
+ you declare `struct S { char str[16]; };', and in your program do
+ a `sizeof (S::str)', should we flag that as an error or should we give
+ the size of it? Since it seems like a reasonable thing to do, we'll go
+ with giving the value. */
+ if (code == OFFSET_TYPE)
+ type = TREE_TYPE (type);
+
+ /* @@ This also produces an error for a signature ref.
+ In that case we should be able to do better. */
+ if (IS_SIGNATURE (type))
+ {
+ error ("`sizeof' applied to a signature type");
+ return size_int (0);
+ }
+
+ if (TYPE_SIZE (type) == 0)
+ {
+ error ("`sizeof' applied to an incomplete type");
+ return size_int (0);
+ }
+
+ /* Convert in case a char is more than one unit. */
+ t = size_binop (CEIL_DIV_EXPR, TYPE_SIZE (type),
+ size_int (TYPE_PRECISION (char_type_node)));
+ /* size_binop does not put the constant in range, so do it now. */
+ if (TREE_CODE (t) == INTEGER_CST && force_fit_type (t, 0))
+ TREE_CONSTANT_OVERFLOW (t) = TREE_OVERFLOW (t) = 1;
+ return t;
+}
+
+tree
+c_sizeof_nowarn (type)
+ tree type;
+{
+ enum tree_code code = TREE_CODE (type);
+ tree t;
+
+ if (code == FUNCTION_TYPE
+ || code == METHOD_TYPE
+ || code == VOID_TYPE
+ || code == ERROR_MARK)
+ return size_int (1);
+ if (code == REFERENCE_TYPE)
+ type = TREE_TYPE (type);
+
+ if (TYPE_SIZE (type) == 0)
+ {
+#if 0
+ /* ??? Tiemann, why have any diagnostic here?
+ There is none in the corresponding function for C. */
+ warning ("sizeof applied to an incomplete type");
+#endif
+ return size_int (0);
+ }
+
+ /* Convert in case a char is more than one unit. */
+ t = size_binop (CEIL_DIV_EXPR, TYPE_SIZE (type),
+ size_int (TYPE_PRECISION (char_type_node)));
+ force_fit_type (t, 0);
+ return t;
+}
+
+/* Implement the __alignof keyword: Return the minimum required
+ alignment of TYPE, measured in bytes. */
+
+tree
+c_alignof (type)
+ tree type;
+{
+ enum tree_code code = TREE_CODE (type);
+ tree t;
+
+ if (code == FUNCTION_TYPE || code == METHOD_TYPE)
+ return size_int (FUNCTION_BOUNDARY / BITS_PER_UNIT);
+
+ if (code == VOID_TYPE || code == ERROR_MARK)
+ return size_int (1);
+
+ /* C++: this is really correct! */
+ if (code == REFERENCE_TYPE)
+ type = TREE_TYPE (type);
+
+ /* @@ This also produces an error for a signature ref.
+ In that case we should be able to do better. */
+ if (IS_SIGNATURE (type))
+ {
+ error ("`__alignof' applied to a signature type");
+ return size_int (1);
+ }
+
+ t = size_int (TYPE_ALIGN (type) / BITS_PER_UNIT);
+ force_fit_type (t, 0);
+ return t;
+}
+
+/* Perform default promotions for C data used in expressions.
+ Arrays and functions are converted to pointers;
+ enumeral types or short or char, to int.
+ In addition, manifest constants symbols are replaced by their values.
+
+ C++: this will automatically bash references to their target type. */
+
+tree
+default_conversion (exp)
+ tree exp;
+{
+ register tree type = TREE_TYPE (exp);
+ register enum tree_code code = TREE_CODE (type);
+
+ if (code == OFFSET_TYPE /* || TREE_CODE (exp) == OFFSET_REF */ )
+ {
+ if (TREE_CODE (exp) == OFFSET_REF)
+ return default_conversion (resolve_offset_ref (exp));
+
+ type = TREE_TYPE (type);
+ code = TREE_CODE (type);
+ }
+
+ if (code == REFERENCE_TYPE)
+ {
+ exp = convert_from_reference (exp);
+ type = TREE_TYPE (exp);
+ code = TREE_CODE (type);
+ }
+
+ /* Constants can be used directly unless they're not loadable. */
+ if (TREE_CODE (exp) == CONST_DECL)
+ exp = DECL_INITIAL (exp);
+ /* Replace a nonvolatile const static variable with its value. */
+ else if (TREE_READONLY_DECL_P (exp) && DECL_MODE (exp) != BLKmode)
+ {
+ exp = decl_constant_value (exp);
+ type = TREE_TYPE (exp);
+ }
+
+ /* build_c_cast puts on a NOP_EXPR to make the result not an lvalue.
+ Leave such NOP_EXPRs, since RHS is being used in non-lvalue context. */
+
+ if (INTEGRAL_CODE_P (code))
+ {
+ tree t = type_promotes_to (type);
+ if (t != TYPE_MAIN_VARIANT (type))
+ return convert (t, exp);
+ }
+ if (flag_traditional
+ && TYPE_MAIN_VARIANT (type) == float_type_node)
+ return convert (double_type_node, exp);
+ if (code == VOID_TYPE)
+ {
+ error ("void value not ignored as it ought to be");
+ return error_mark_node;
+ }
+ if (code == FUNCTION_TYPE)
+ {
+ return build_unary_op (ADDR_EXPR, exp, 0);
+ }
+ if (code == METHOD_TYPE)
+ {
+ if (TREE_CODE (exp) == OFFSET_REF)
+ {
+ my_friendly_assert (TREE_CODE (TREE_OPERAND (exp, 1)) == FUNCTION_DECL,
+ 308);
+ return build_unary_op (ADDR_EXPR, TREE_OPERAND (exp, 1), 0);
+ }
+ return build_unary_op (ADDR_EXPR, exp, 0);
+ }
+ if (code == ARRAY_TYPE)
+ {
+ register tree adr;
+ tree restype;
+ tree ptrtype;
+ int constp, volatilep;
+
+ if (TREE_CODE (exp) == INDIRECT_REF)
+ {
+ /* Stripping away the INDIRECT_REF is not the right
+ thing to do for references... */
+ tree inner = TREE_OPERAND (exp, 0);
+ if (TREE_CODE (TREE_TYPE (inner)) == REFERENCE_TYPE)
+ {
+ inner = build1 (CONVERT_EXPR,
+ build_pointer_type (TREE_TYPE (TREE_TYPE (inner))),
+ inner);
+ TREE_REFERENCE_EXPR (inner) = 1;
+ }
+ return convert (TYPE_POINTER_TO (TREE_TYPE (type)), inner);
+ }
+
+ if (TREE_CODE (exp) == COMPOUND_EXPR)
+ {
+ tree op1 = default_conversion (TREE_OPERAND (exp, 1));
+ return build (COMPOUND_EXPR, TREE_TYPE (op1),
+ TREE_OPERAND (exp, 0), op1);
+ }
+
+ if (!lvalue_p (exp)
+ && ! (TREE_CODE (exp) == CONSTRUCTOR && TREE_STATIC (exp)))
+ {
+ error ("invalid use of non-lvalue array");
+ return error_mark_node;
+ }
+
+ constp = volatilep = 0;
+ if (TREE_CODE_CLASS (TREE_CODE (exp)) == 'r'
+ || TREE_CODE_CLASS (TREE_CODE (exp)) == 'd')
+ {
+ constp = TREE_READONLY (exp);
+ volatilep = TREE_THIS_VOLATILE (exp);
+ }
+
+ restype = TREE_TYPE (type);
+ if (TYPE_READONLY (type) || TYPE_VOLATILE (type)
+ || constp || volatilep)
+ restype = c_build_type_variant (restype,
+ TYPE_READONLY (type) || constp,
+ TYPE_VOLATILE (type) || volatilep);
+ ptrtype = build_pointer_type (restype);
+
+ if (TREE_CODE (exp) == VAR_DECL)
+ {
+ /* ??? This is not really quite correct
+ in that the type of the operand of ADDR_EXPR
+ is not the target type of the type of the ADDR_EXPR itself.
+ Question is, can this lossage be avoided? */
+ adr = build1 (ADDR_EXPR, ptrtype, exp);
+ if (mark_addressable (exp) == 0)
+ return error_mark_node;
+ TREE_CONSTANT (adr) = staticp (exp);
+ TREE_SIDE_EFFECTS (adr) = 0; /* Default would be, same as EXP. */
+ return adr;
+ }
+ /* This way is better for a COMPONENT_REF since it can
+ simplify the offset for a component. */
+ adr = build_unary_op (ADDR_EXPR, exp, 1);
+ return convert (ptrtype, adr);
+ }
+ return exp;
+}
+
+tree
+build_object_ref (datum, basetype, field)
+ tree datum, basetype, field;
+{
+ if (datum == error_mark_node)
+ return error_mark_node;
+ else if (IS_SIGNATURE (IDENTIFIER_TYPE_VALUE (basetype)))
+ {
+ warning ("signature name in scope resolution ignored");
+ return build_component_ref (datum, field, NULL_TREE, 1);
+ }
+ else if (is_aggr_typedef (basetype, 1))
+ {
+ tree real_basetype = IDENTIFIER_TYPE_VALUE (basetype);
+ tree binfo = binfo_or_else (real_basetype, TREE_TYPE (datum));
+ if (binfo)
+ return build_component_ref (build_scoped_ref (datum, basetype),
+ field, binfo, 1);
+ }
+ return error_mark_node;
+}
+
+/* Like `build_component_ref, but uses an already found field.
+ Must compute access for C_C_D. Otherwise, ok. */
+tree
+build_component_ref_1 (datum, field, protect)
+ tree datum, field;
+ int protect;
+{
+ register tree basetype = TREE_TYPE (datum);
+ register enum tree_code code = TREE_CODE (basetype);
+ register tree ref;
+
+ if (code == REFERENCE_TYPE)
+ {
+ datum = convert_from_reference (datum);
+ basetype = TREE_TYPE (datum);
+ code = TREE_CODE (basetype);
+ }
+
+ if (! IS_AGGR_TYPE_CODE (code))
+ {
+ if (code != ERROR_MARK)
+ cp_error ("request for member `%D' in `%E', which is of non-aggregate type `%T'",
+ field, datum, basetype);
+ return error_mark_node;
+ }
+
+ if (TYPE_SIZE (basetype) == 0)
+ {
+ incomplete_type_error (0, basetype);
+ return error_mark_node;
+ }
+
+ /* Look up component name in the structure type definition. */
+
+ if (field == error_mark_node)
+ my_friendly_abort (115);
+
+ if (TREE_STATIC (field))
+ return field;
+
+ if (datum == C_C_D)
+ {
+ enum access_type access
+ = compute_access (TYPE_BINFO (current_class_type), field);
+
+ if (access == access_private)
+ {
+ cp_error ("field `%D' is private", field);
+ return error_mark_node;
+ }
+ else if (access == access_protected)
+ {
+ cp_error ("field `%D' is protected", field);
+ return error_mark_node;
+ }
+ }
+
+ ref = build (COMPONENT_REF, TREE_TYPE (field), datum, field);
+
+ if (TREE_READONLY (datum) || TREE_READONLY (field))
+ TREE_READONLY (ref) = 1;
+ if (TREE_THIS_VOLATILE (datum) || TREE_THIS_VOLATILE (field))
+ TREE_THIS_VOLATILE (ref) = 1;
+ if (DECL_MUTABLE_P (field))
+ TREE_READONLY (ref) = 0;
+
+ return ref;
+}
+
+/* Given a COND_EXPR in T, return it in a form that we can, for
+ example, use as an lvalue. This code used to be in unary_complex_lvalue,
+ but we needed it to deal with `a = (d == c) ? b : c' expressions, where
+ we're dealing with aggregates. So, we now call this in unary_complex_lvalue,
+ and in build_modify_expr. The case (in particular) that led to this was
+ with CODE == ADDR_EXPR, since it's not an lvalue when we'd get it there. */
+static tree
+rationalize_conditional_expr (code, t)
+ enum tree_code code;
+ tree t;
+{
+ return
+ build_conditional_expr (TREE_OPERAND (t, 0),
+ build_unary_op (code, TREE_OPERAND (t, 1), 0),
+ build_unary_op (code, TREE_OPERAND (t, 2), 0));
+}
+
+tree
+build_component_ref (datum, component, basetype_path, protect)
+ tree datum, component, basetype_path;
+ int protect;
+{
+ register tree basetype = TREE_TYPE (datum);
+ register enum tree_code code = TREE_CODE (basetype);
+ register tree field = NULL;
+ register tree ref;
+
+ /* If DATUM is a COMPOUND_EXPR or COND_EXPR, move our reference inside it
+ unless we are not to support things not strictly ANSI. */
+ switch (TREE_CODE (datum))
+ {
+ case COMPOUND_EXPR:
+ {
+ tree value = build_component_ref (TREE_OPERAND (datum, 1), component,
+ basetype_path, protect);
+ return build (COMPOUND_EXPR, TREE_TYPE (value),
+ TREE_OPERAND (datum, 0), value);
+ }
+ case COND_EXPR:
+ return build_conditional_expr
+ (TREE_OPERAND (datum, 0),
+ build_component_ref (TREE_OPERAND (datum, 1), component,
+ basetype_path, protect),
+ build_component_ref (TREE_OPERAND (datum, 2), component,
+ basetype_path, protect));
+ }
+
+ if (code == REFERENCE_TYPE)
+ {
+#if 0
+ /* TREE_REFERENCE_EXPRs are not converted by `convert_from_reference'.
+ @@ Maybe that is not right. */
+ if (TREE_REFERENCE_EXPR (datum))
+ datum = build1 (INDIRECT_REF, TREE_TYPE (basetype), datum);
+ else
+#endif
+ datum = convert_from_reference (datum);
+ basetype = TREE_TYPE (datum);
+ code = TREE_CODE (basetype);
+ }
+
+ /* First, see if there is a field or component with name COMPONENT. */
+ if (TREE_CODE (component) == TREE_LIST)
+ {
+ my_friendly_assert (!(TREE_CHAIN (component) == NULL_TREE
+ && DECL_CHAIN (TREE_VALUE (component)) == NULL_TREE), 309);
+ return build (COMPONENT_REF, TREE_TYPE (component), datum, component);
+ }
+#if 0
+ if (TREE_CODE (component) == TYPE_EXPR)
+ return build_component_type_expr (datum, component, NULL_TREE, protect);
+#endif
+
+ if (! IS_AGGR_TYPE_CODE (code))
+ {
+ if (code != ERROR_MARK)
+ cp_error ("request for member `%D' in `%E', which is of non-aggregate type `%T'",
+ component, datum, basetype);
+ return error_mark_node;
+ }
+
+ if (TYPE_SIZE (basetype) == 0)
+ {
+ incomplete_type_error (0, basetype);
+ return error_mark_node;
+ }
+
+ if (TREE_CODE (component) == BIT_NOT_EXPR)
+ {
+ if (TYPE_IDENTIFIER (basetype) != TREE_OPERAND (component, 0))
+ {
+ cp_error ("destructor specifier `%T::~%T' must have matching names",
+ basetype, TREE_OPERAND (component, 0));
+ return error_mark_node;
+ }
+ if (! TYPE_HAS_DESTRUCTOR (basetype))
+ {
+ cp_error ("type `%T' has no destructor", basetype);
+ return error_mark_node;
+ }
+ return TREE_VEC_ELT (CLASSTYPE_METHOD_VEC (basetype), 0);
+ }
+
+ /* Look up component name in the structure type definition. */
+ if (CLASSTYPE_VFIELD (basetype)
+ && DECL_NAME (CLASSTYPE_VFIELD (basetype)) == component)
+ /* Special-case this because if we use normal lookups in an ambiguous
+ hierarchy, the compiler will abort (because vptr lookups are
+ not supposed to be ambiguous. */
+ field = CLASSTYPE_VFIELD (basetype);
+ else
+ {
+ if (basetype_path == NULL_TREE)
+ basetype_path = TYPE_BINFO (basetype);
+ field = lookup_field (basetype_path, component,
+ protect && ! VFIELD_NAME_P (component), 0);
+ if (field == error_mark_node)
+ return error_mark_node;
+
+ if (field == NULL_TREE)
+ {
+ /* Not found as a data field, look for it as a method. If found,
+ then if this is the only possible one, return it, else
+ report ambiguity error. */
+ tree fndecls = lookup_fnfields (basetype_path, component, 1);
+ if (fndecls == error_mark_node)
+ return error_mark_node;
+ if (fndecls)
+ {
+ if (TREE_CHAIN (fndecls) == NULL_TREE
+ && DECL_CHAIN (TREE_VALUE (fndecls)) == NULL_TREE)
+ {
+ enum access_type access;
+ tree fndecl;
+
+ /* Unique, so use this one now. */
+ basetype = TREE_PURPOSE (fndecls);
+ fndecl = TREE_VALUE (fndecls);
+ access = compute_access (TREE_PURPOSE (fndecls), fndecl);
+ if (access == access_public)
+ {
+ if (DECL_VINDEX (fndecl)
+ && ! resolves_to_fixed_type_p (datum, 0))
+ {
+ tree addr = build_unary_op (ADDR_EXPR, datum, 0);
+ addr = convert_pointer_to (DECL_CONTEXT (fndecl), addr);
+ datum = build_indirect_ref (addr, NULL_PTR);
+ my_friendly_assert (datum != error_mark_node, 310);
+ fndecl = build_vfn_ref (&addr, datum, DECL_VINDEX (fndecl));
+ }
+ return fndecl;
+ }
+ if (access == access_protected)
+ cp_error ("member function `%D' is protected", fndecl);
+ else
+ cp_error ("member function `%D' is private", fndecl);
+ return error_mark_node;
+ }
+ else
+ return build (COMPONENT_REF, unknown_type_node, datum, fndecls);
+ }
+
+#if 0
+ if (component == ansi_opname[(int) TYPE_EXPR])
+ cp_error ("`%#T' has no such type conversion operator", basetype);
+ else
+#endif
+ cp_error ("`%#T' has no member named `%D'", basetype, component);
+ return error_mark_node;
+ }
+ else if (TREE_TYPE (field) == error_mark_node)
+ return error_mark_node;
+
+ if (TREE_CODE (field) != FIELD_DECL)
+ {
+ if (TREE_CODE (field) == TYPE_DECL)
+ {
+ cp_error ("invalid use of type decl `%#D' as expression", field);
+ return error_mark_node;
+ }
+ if (DECL_RTL (field) != 0)
+ assemble_external (field);
+ TREE_USED (field) = 1;
+ return field;
+ }
+ }
+
+ if (DECL_FIELD_CONTEXT (field) != basetype
+ && TYPE_USES_COMPLEX_INHERITANCE (basetype))
+ {
+ tree addr = build_unary_op (ADDR_EXPR, datum, 0);
+ if (integer_zerop (addr))
+ {
+ error ("invalid reference to NULL ptr, use ptr-to-member instead");
+ return error_mark_node;
+ }
+ addr = convert_pointer_to (DECL_FIELD_CONTEXT (field), addr);
+ datum = build_indirect_ref (addr, NULL_PTR);
+ my_friendly_assert (datum != error_mark_node, 311);
+ }
+ ref = build (COMPONENT_REF, TREE_TYPE (field), break_out_cleanups (datum), field);
+
+ if (TREE_READONLY (datum) || TREE_READONLY (field))
+ TREE_READONLY (ref) = 1;
+ if (TREE_THIS_VOLATILE (datum) || TREE_THIS_VOLATILE (field))
+ TREE_THIS_VOLATILE (ref) = 1;
+ if (DECL_MUTABLE_P (field))
+ TREE_READONLY (ref) = 0;
+
+ return ref;
+}
+
+/* Given an expression PTR for a pointer, return an expression
+ for the value pointed to.
+ ERRORSTRING is the name of the operator to appear in error messages.
+
+ This function may need to overload OPERATOR_FNNAME.
+ Must also handle REFERENCE_TYPEs for C++. */
+
+tree
+build_x_indirect_ref (ptr, errorstring)
+ tree ptr;
+ char *errorstring;
+{
+ tree rval = build_opfncall (INDIRECT_REF, LOOKUP_NORMAL, ptr, NULL_TREE, NULL_TREE);
+ if (rval)
+ return rval;
+ return build_indirect_ref (ptr, errorstring);
+}
+
+tree
+build_indirect_ref (ptr, errorstring)
+ tree ptr;
+ char *errorstring;
+{
+ register tree pointer = default_conversion (ptr);
+ register tree type = TREE_TYPE (pointer);
+
+ if (ptr == current_class_decl)
+ return C_C_D;
+
+ if (TREE_CODE (type) == POINTER_TYPE || TREE_CODE (type) == REFERENCE_TYPE)
+ {
+ if (TREE_CODE (pointer) == ADDR_EXPR
+ && (TREE_TYPE (TREE_OPERAND (pointer, 0))
+ == TREE_TYPE (type)))
+ return TREE_OPERAND (pointer, 0);
+ else
+ {
+ tree t = TREE_TYPE (type);
+ register tree ref = build1 (INDIRECT_REF,
+ TYPE_MAIN_VARIANT (t), pointer);
+
+ TREE_READONLY (ref) = TYPE_READONLY (t);
+ TREE_THIS_VOLATILE (ref) = TYPE_VOLATILE (t);
+ TREE_SIDE_EFFECTS (ref)
+ = TYPE_VOLATILE (t) || TREE_SIDE_EFFECTS (pointer);
+ return ref;
+ }
+ }
+ /* `pointer' won't be an error_mark_node if we were given a
+ pointer to member, so it's cool to check for this here. */
+ else if (TYPE_PTRMEMFUNC_P (type))
+ error ("invalid use of `%s' on pointer to member function", errorstring);
+ else if (TREE_CODE (type) == RECORD_TYPE
+ && (IS_SIGNATURE_POINTER (type) || IS_SIGNATURE_REFERENCE (type)))
+ error ("cannot dereference signature pointer/reference");
+ else if (pointer != error_mark_node)
+ {
+ if (errorstring)
+ error ("invalid type argument of `%s'", errorstring);
+ else
+ error ("invalid type argument");
+ }
+ return error_mark_node;
+}
+
+/* This handles expressions of the form "a[i]", which denotes
+ an array reference.
+
+ This is logically equivalent in C to *(a+i), but we may do it differently.
+ If A is a variable or a member, we generate a primitive ARRAY_REF.
+ This avoids forcing the array out of registers, and can work on
+ arrays that are not lvalues (for example, members of structures returned
+ by functions).
+
+ If INDEX is of some user-defined type, it must be converted to
+ integer type. Otherwise, to make a compatible PLUS_EXPR, it
+ will inherit the type of the array, which will be some pointer type. */
+
+tree
+build_x_array_ref (array, index)
+ tree array, index;
+{
+ tree rval = build_opfncall (ARRAY_REF, LOOKUP_NORMAL, array, index, NULL_TREE);
+ if (rval)
+ return rval;
+ return build_array_ref (array, index);
+}
+
+tree
+build_array_ref (array, idx)
+ tree array, idx;
+{
+ tree itype;
+
+ if (idx == 0)
+ {
+ error ("subscript missing in array reference");
+ return error_mark_node;
+ }
+
+ if (TREE_TYPE (array) == error_mark_node
+ || TREE_TYPE (idx) == error_mark_node)
+ return error_mark_node;
+
+ itype = TREE_TYPE (idx);
+ /* We must check here for the reference, so we can do the possible
+ conversions immediately afterwards. */
+ if (TREE_CODE (itype) == REFERENCE_TYPE)
+ {
+ idx = convert_from_reference (idx);
+ itype = TREE_TYPE (idx);
+ }
+
+ if (IS_AGGR_TYPE (itype))
+ {
+ if (TYPE_HAS_INT_CONVERSION (itype))
+ idx = build_type_conversion (CONVERT_EXPR,
+ integer_type_node, idx, 1);
+ else
+ {
+ error_with_aggr_type (itype,
+ "type `%s' requires integer conversion for array indexing");
+ return error_mark_node;
+ }
+ }
+
+ if (TREE_CODE (TREE_TYPE (array)) == ARRAY_TYPE
+ && TREE_CODE (array) != INDIRECT_REF)
+ {
+ tree rval, type;
+
+ /* Subscripting with type char is likely to lose
+ on a machine where chars are signed.
+ So warn on any machine, but optionally.
+ Don't warn for unsigned char since that type is safe.
+ Don't warn for signed char because anyone who uses that
+ must have done so deliberately. */
+ if (warn_char_subscripts
+ && TYPE_MAIN_VARIANT (TREE_TYPE (idx)) == char_type_node)
+ warning ("array subscript has type `char'");
+
+ /* Apply default promotions *after* noticing character types. */
+ idx = default_conversion (idx);
+
+ if (TREE_CODE (TREE_TYPE (idx)) != INTEGER_TYPE)
+ {
+ error ("array subscript is not an integer");
+ return error_mark_node;
+ }
+
+ /* An array that is indexed by a non-constant
+ cannot be stored in a register; we must be able to do
+ address arithmetic on its address.
+ Likewise an array of elements of variable size. */
+ if (TREE_CODE (idx) != INTEGER_CST
+ || (TYPE_SIZE (TREE_TYPE (TREE_TYPE (array))) != 0
+ && TREE_CODE (TYPE_SIZE (TREE_TYPE (TREE_TYPE (array)))) != INTEGER_CST))
+ {
+ if (mark_addressable (array) == 0)
+ return error_mark_node;
+ }
+ /* An array that is indexed by a constant value which is not within
+ the array bounds cannot be stored in a register either; because we
+ would get a crash in store_bit_field/extract_bit_field when trying
+ to access a non-existent part of the register. */
+ if (TREE_CODE (idx) == INTEGER_CST
+ && TYPE_VALUES (TREE_TYPE (array))
+ && ! int_fits_type_p (idx, TYPE_VALUES (TREE_TYPE (array))))
+ {
+ if (mark_addressable (array) == 0)
+ return error_mark_node;
+ }
+
+ /* Note in C++ we don't bother warning about subscripting a
+ `register' array, since it's legal in C++ to take the address
+ of something with that storage specification. */
+ if (pedantic && !lvalue_p (array))
+ pedwarn ("ANSI C++ forbids subscripting non-lvalue array");
+
+ if (pedantic)
+ {
+ tree foo = array;
+ while (TREE_CODE (foo) == COMPONENT_REF)
+ foo = TREE_OPERAND (foo, 0);
+ if (TREE_CODE (foo) == VAR_DECL && DECL_REGISTER (foo))
+ pedwarn ("ANSI C++ forbids subscripting non-lvalue array");
+ }
+
+ type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (array)));
+ rval = build (ARRAY_REF, type, array, idx);
+ /* Array ref is const/volatile if the array elements are
+ or if the array is.. */
+ TREE_READONLY (rval)
+ |= (TYPE_READONLY (TREE_TYPE (TREE_TYPE (array)))
+ | TREE_READONLY (array));
+ TREE_SIDE_EFFECTS (rval)
+ |= (TYPE_VOLATILE (TREE_TYPE (TREE_TYPE (array)))
+ | TREE_SIDE_EFFECTS (array));
+ TREE_THIS_VOLATILE (rval)
+ |= (TYPE_VOLATILE (TREE_TYPE (TREE_TYPE (array)))
+ /* This was added by rms on 16 Nov 91.
+ It fixes vol struct foo *a; a->elts[1]
+ in an inline function.
+ Hope it doesn't break something else. */
+ | TREE_THIS_VOLATILE (array));
+ return require_complete_type (fold (rval));
+ }
+
+ {
+ tree ar = default_conversion (array);
+ tree ind = default_conversion (idx);
+
+ /* Put the integer in IND to simplify error checking. */
+ if (TREE_CODE (TREE_TYPE (ar)) == INTEGER_TYPE)
+ {
+ tree temp = ar;
+ ar = ind;
+ ind = temp;
+ }
+
+ if (ar == error_mark_node)
+ return ar;
+
+ if (TREE_CODE (TREE_TYPE (ar)) != POINTER_TYPE)
+ {
+ error ("subscripted value is neither array nor pointer");
+ return error_mark_node;
+ }
+ if (TREE_CODE (TREE_TYPE (ind)) != INTEGER_TYPE)
+ {
+ error ("array subscript is not an integer");
+ return error_mark_node;
+ }
+
+ return build_indirect_ref (build_binary_op_nodefault (PLUS_EXPR, ar, ind, PLUS_EXPR),
+ "array indexing");
+ }
+}
+
+/* Build a function call to function FUNCTION with parameters PARAMS.
+ PARAMS is a list--a chain of TREE_LIST nodes--in which the
+ TREE_VALUE of each node is a parameter-expression.
+ FUNCTION's data type may be a function type or a pointer-to-function.
+
+ For C++: If FUNCTION's data type is a TREE_LIST, then the tree list
+ is the list of possible methods that FUNCTION could conceivably
+ be. If the list of methods comes from a class, then it will be
+ a list of lists (where each element is associated with the class
+ that produced it), otherwise it will be a simple list (for
+ functions overloaded in global scope).
+
+ In the first case, TREE_VALUE (function) is the head of one of those
+ lists, and TREE_PURPOSE is the name of the function.
+
+ In the second case, TREE_PURPOSE (function) is the function's
+ name directly.
+
+ DECL is the class instance variable, usually CURRENT_CLASS_DECL. */
+
+/*
+ * [eichin:19911015.1726EST] actually return a possibly incomplete
+ * type
+ */
+tree
+build_x_function_call (function, params, decl)
+ tree function, params, decl;
+{
+ tree type;
+ int is_method;
+
+ if (function == error_mark_node)
+ return error_mark_node;
+
+ type = TREE_TYPE (function);
+ is_method = ((TREE_CODE (function) == TREE_LIST
+ && current_class_type != NULL_TREE
+ && IDENTIFIER_CLASS_VALUE (TREE_PURPOSE (function)) == function)
+ || TREE_CODE (function) == IDENTIFIER_NODE
+ || TREE_CODE (type) == METHOD_TYPE
+ || TYPE_PTRMEMFUNC_P (type));
+
+ /* Handle methods, friends, and overloaded functions, respectively. */
+ if (is_method)
+ {
+ if (TREE_CODE (function) == FUNCTION_DECL)
+ {
+ if (DECL_NAME (function))
+ function = DECL_NAME (function);
+ else
+ function = TYPE_IDENTIFIER (DECL_CLASS_CONTEXT (function));
+ }
+ else if (TREE_CODE (function) == TREE_LIST)
+ {
+#if 0
+ if (TREE_CODE (TREE_VALUE (function)) == TREE_LIST)
+ function = TREE_PURPOSE (TREE_VALUE (function));
+ else
+ function = TREE_PURPOSE (function);
+#else
+ my_friendly_assert (TREE_CODE (TREE_VALUE (function)) == FUNCTION_DECL, 312);
+ function = TREE_PURPOSE (function);
+#endif
+ }
+ else if (TREE_CODE (function) != IDENTIFIER_NODE)
+ {
+ if (TREE_CODE (function) == OFFSET_REF)
+ {
+ if (TREE_OPERAND (function, 0))
+ decl = TREE_OPERAND (function, 0);
+ }
+ /* Call via a pointer to member function. */
+ if (decl == NULL_TREE)
+ {
+ error ("pointer to member function called, but not in class scope");
+ return error_mark_node;
+ }
+ /* What other type of POINTER_TYPE could this be? */
+ if (TREE_CODE (TREE_TYPE (function)) != POINTER_TYPE
+ && ! TYPE_PTRMEMFUNC_P (TREE_TYPE (function))
+ && TREE_CODE (function) != OFFSET_REF)
+ function = build (OFFSET_REF, TREE_TYPE (type), NULL_TREE, function);
+ goto do_x_function;
+ }
+
+ /* this is an abbreviated method call.
+ must go through here in case it is a virtual function.
+ @@ Perhaps this could be optimized. */
+
+ if (decl == NULL_TREE)
+ {
+ if (current_class_type == NULL_TREE)
+ {
+ error ("object missing in call to method `%s'",
+ IDENTIFIER_POINTER (function));
+ return error_mark_node;
+ }
+ /* Yow: call from a static member function. */
+ decl = build1 (NOP_EXPR, TYPE_POINTER_TO (current_class_type),
+ error_mark_node);
+ decl = build_indirect_ref (decl, NULL_PTR);
+ }
+
+ return build_method_call (decl, function, params,
+ NULL_TREE, LOOKUP_NORMAL);
+ }
+ else if (TREE_CODE (function) == COMPONENT_REF
+ && type == unknown_type_node)
+ {
+ /* Should we undo what was done in build_component_ref? */
+ if (TREE_CODE (TREE_PURPOSE (TREE_OPERAND (function, 1))) == TREE_VEC)
+ /* Get the name that build_component_ref hid. */
+ function = DECL_NAME (TREE_VALUE (TREE_OPERAND (function, 1)));
+ else
+ function = TREE_PURPOSE (TREE_OPERAND (function, 1));
+ return build_method_call (decl, function, params,
+ NULL_TREE, LOOKUP_NORMAL);
+ }
+ else if (TREE_CODE (function) == TREE_LIST)
+ {
+ if (TREE_VALUE (function) == NULL_TREE)
+ {
+ cp_error ("function `%D' declared overloaded, but no definitions appear with which to resolve it?!?",
+ TREE_PURPOSE (function));
+ return error_mark_node;
+ }
+ else
+ {
+ tree val = TREE_VALUE (function);
+
+ if (TREE_CODE (val) == TEMPLATE_DECL)
+ return build_overload_call_maybe
+ (function, params, LOOKUP_COMPLAIN, (struct candidate *)0);
+ else if (DECL_CHAIN (val) != NULL_TREE)
+ return build_overload_call
+ (function, params, LOOKUP_COMPLAIN, (struct candidate *)0);
+ else
+ my_friendly_abort (360);
+ }
+ }
+
+ do_x_function:
+ if (TREE_CODE (function) == OFFSET_REF)
+ {
+ /* If the component is a data element (or a virtual function), we play
+ games here to make things work. */
+ tree decl_addr;
+
+ if (TREE_OPERAND (function, 0))
+ decl = TREE_OPERAND (function, 0);
+ else
+ decl = C_C_D;
+
+ decl_addr = build_unary_op (ADDR_EXPR, decl, 0);
+ function = get_member_function_from_ptrfunc (&decl_addr, decl,
+ TREE_OPERAND (function, 1));
+ params = tree_cons (NULL_TREE, decl_addr, params);
+ return build_function_call (function, params);
+ }
+
+ type = TREE_TYPE (function);
+ if (type != error_mark_node)
+ {
+ if (TREE_CODE (type) == REFERENCE_TYPE)
+ type = TREE_TYPE (type);
+
+ if (TYPE_LANG_SPECIFIC (type) && TYPE_OVERLOADS_CALL_EXPR (type))
+ return build_opfncall (CALL_EXPR, LOOKUP_NORMAL, function, params, NULL_TREE);
+ }
+
+ if (is_method)
+ {
+ tree fntype = TREE_TYPE (function);
+ tree ctypeptr;
+
+ /* Explicitly named method? */
+ if (TREE_CODE (function) == FUNCTION_DECL)
+ ctypeptr = TYPE_POINTER_TO (DECL_CLASS_CONTEXT (function));
+ /* Expression with ptr-to-method type? It could either be a plain
+ usage, or it might be a case where the ptr-to-method is being
+ passed in as an argument. */
+ else if (TYPE_PTRMEMFUNC_P (fntype))
+ {
+ tree rec = TYPE_METHOD_BASETYPE (TREE_TYPE (TYPE_PTRMEMFUNC_FN_TYPE (fntype)));
+ ctypeptr = TYPE_POINTER_TO (rec);
+ }
+ /* Unexpected node type? */
+ else
+ my_friendly_abort (116);
+ if (decl == NULL_TREE)
+ {
+ if (current_function_decl
+ && DECL_STATIC_FUNCTION_P (current_function_decl))
+ error ("invalid call to member function needing `this' in static member function scope");
+ else
+ error ("pointer to member function called, but not in class scope");
+ return error_mark_node;
+ }
+ if (TREE_CODE (TREE_TYPE (decl)) != POINTER_TYPE
+ && ! TYPE_PTRMEMFUNC_P (TREE_TYPE (decl)))
+ {
+ decl = build_unary_op (ADDR_EXPR, decl, 0);
+ decl = convert_pointer_to (TREE_TYPE (ctypeptr), decl);
+ }
+ else
+ decl = build_c_cast (ctypeptr, decl);
+ params = tree_cons (NULL_TREE, decl, params);
+ }
+
+ return build_function_call (function, params);
+}
+
+/* Resolve a pointer to member function. INSTANCE is the object
+ instance to use, if the member points to a virtual member. */
+
+tree
+get_member_function_from_ptrfunc (instance_ptrptr, instance, function)
+ tree *instance_ptrptr;
+ tree instance;
+ tree function;
+{
+ if (TREE_CODE (function) == OFFSET_REF)
+ {
+ function = TREE_OPERAND (function, 1);
+ }
+
+ if (TYPE_PTRMEMFUNC_P (TREE_TYPE (function)))
+ {
+ tree fntype = TYPE_PTRMEMFUNC_FN_TYPE (TREE_TYPE (function));
+ tree index = save_expr (convert (integer_type_node,
+ build_component_ref (function,
+ index_identifier,
+ 0, 0)));
+ tree e1 = build (GT_EXPR, integer_type_node, index, integer_zero_node);
+ tree delta = build_component_ref (function, delta_identifier, 0, 0);
+ tree delta2 = DELTA2_FROM_PTRMEMFUNC (function);
+ tree e2;
+ tree e3;
+ tree aref, vtbl;
+
+ /* convert down to the right base, before using the instance. */
+ instance = convert_pointer_to_real (TYPE_METHOD_BASETYPE (TREE_TYPE (fntype)),
+ build_unary_op (ADDR_EXPR, instance, 0));
+ if (instance == error_mark_node)
+ return instance;
+
+ vtbl = convert_pointer_to (ptr_type_node, instance);
+ vtbl
+ = build (PLUS_EXPR,
+ build_pointer_type (build_pointer_type (vtable_entry_type)),
+ vtbl, convert (sizetype, delta2));
+ vtbl = build_indirect_ref (vtbl, NULL_PTR);
+ aref = build_array_ref (vtbl, size_binop (MINUS_EXPR,
+ index,
+ integer_one_node));
+ if (! flag_vtable_thunks)
+ {
+ aref = save_expr (aref);
+
+ /* Save the intermediate result in a SAVE_EXPR so we don't have to
+ compute each component of the virtual function pointer twice. */
+ if (/* !building_cleanup && */ TREE_CODE (aref) == INDIRECT_REF)
+ TREE_OPERAND (aref, 0) = save_expr (TREE_OPERAND (aref, 0));
+
+ delta = build (PLUS_EXPR, integer_type_node,
+ build_conditional_expr (e1, build_component_ref (aref, delta_identifier, 0, 0), integer_zero_node),
+ delta);
+ }
+
+ *instance_ptrptr = build (PLUS_EXPR, TREE_TYPE (*instance_ptrptr),
+ *instance_ptrptr,
+ convert (integer_type_node, delta));
+ if (flag_vtable_thunks)
+ e2 = aref;
+ else
+ e2 = build_component_ref (aref, pfn_identifier, 0, 0);
+
+ e3 = PFN_FROM_PTRMEMFUNC (function);
+ TREE_TYPE (e2) = TREE_TYPE (e3);
+ function = build_conditional_expr (e1, e2, e3);
+ }
+ return function;
+}
+
+tree
+build_function_call_real (function, params, require_complete, flags)
+ tree function, params;
+ int require_complete, flags;
+{
+ register tree fntype, fndecl;
+ register tree value_type;
+ register tree coerced_params;
+ tree name = NULL_TREE, assembler_name = NULL_TREE;
+ int is_method;
+
+ /* build_c_cast puts on a NOP_EXPR to make the result not an lvalue.
+ Strip such NOP_EXPRs, since FUNCTION is used in non-lvalue context. */
+ if (TREE_CODE (function) == NOP_EXPR
+ && TREE_TYPE (function) == TREE_TYPE (TREE_OPERAND (function, 0)))
+ function = TREE_OPERAND (function, 0);
+
+ if (TREE_CODE (function) == FUNCTION_DECL)
+ {
+ name = DECL_NAME (function);
+ assembler_name = DECL_ASSEMBLER_NAME (function);
+
+ GNU_xref_call (current_function_decl,
+ IDENTIFIER_POINTER (name ? name
+ : TYPE_IDENTIFIER (DECL_CLASS_CONTEXT (function))));
+ assemble_external (function);
+ fndecl = function;
+
+ /* Convert anything with function type to a pointer-to-function. */
+ if (pedantic
+ && name
+ && IDENTIFIER_LENGTH (name) == 4
+ && ! strcmp (IDENTIFIER_POINTER (name), "main")
+ && DECL_CONTEXT (function) == NULL_TREE)
+ {
+ pedwarn ("ANSI C++ forbids calling `main' from within program");
+ }
+
+ /* Differs from default_conversion by not setting TREE_ADDRESSABLE
+ (because calling an inline function does not mean the function
+ needs to be separately compiled). */
+
+ if (DECL_INLINE (function))
+ {
+ fntype = build_type_variant (TREE_TYPE (function),
+ TREE_READONLY (function),
+ TREE_THIS_VOLATILE (function));
+ function = build1 (ADDR_EXPR, build_pointer_type (fntype), function);
+ }
+ else
+ {
+ assemble_external (function);
+ TREE_USED (function) = 1;
+ function = default_conversion (function);
+ }
+ }
+ else
+ {
+ fndecl = NULL_TREE;
+
+ /* Convert anything with function type to a pointer-to-function. */
+ if (function == error_mark_node)
+ return error_mark_node;
+ function = default_conversion (function);
+ }
+
+ fntype = TREE_TYPE (function);
+
+ if (TYPE_PTRMEMFUNC_P (fntype))
+ {
+ tree instance_ptr = build_unary_op (ADDR_EXPR, C_C_D, 0);
+ fntype = TYPE_PTRMEMFUNC_FN_TYPE (fntype);
+ function = get_member_function_from_ptrfunc (&instance_ptr, C_C_D, function);
+ }
+
+ is_method = (TREE_CODE (fntype) == POINTER_TYPE
+ && TREE_CODE (TREE_TYPE (fntype)) == METHOD_TYPE);
+
+ if (!((TREE_CODE (fntype) == POINTER_TYPE
+ && TREE_CODE (TREE_TYPE (fntype)) == FUNCTION_TYPE)
+ || is_method))
+ {
+ error ("called object is not a function");
+ return error_mark_node;
+ }
+
+ /* fntype now gets the type of function pointed to. */
+ fntype = TREE_TYPE (fntype);
+
+ /* Convert the parameters to the types declared in the
+ function prototype, or apply default promotions. */
+
+ if (flags & LOOKUP_COMPLAIN)
+ coerced_params = convert_arguments (NULL_TREE, TYPE_ARG_TYPES (fntype),
+ params, fndecl, LOOKUP_NORMAL);
+ else
+ coerced_params = convert_arguments (NULL_TREE, TYPE_ARG_TYPES (fntype),
+ params, fndecl, 0);
+
+ /* Check for errors in format strings. */
+
+ if (warn_format && (name || assembler_name))
+ check_function_format (name, assembler_name, coerced_params);
+
+ /* Recognize certain built-in functions so we can make tree-codes
+ other than CALL_EXPR. We do this when it enables fold-const.c
+ to do something useful. */
+
+ if (TREE_CODE (function) == ADDR_EXPR
+ && TREE_CODE (TREE_OPERAND (function, 0)) == FUNCTION_DECL
+ && DECL_BUILT_IN (TREE_OPERAND (function, 0)))
+ switch (DECL_FUNCTION_CODE (TREE_OPERAND (function, 0)))
+ {
+ case BUILT_IN_ABS:
+ case BUILT_IN_LABS:
+ case BUILT_IN_FABS:
+ if (coerced_params == 0)
+ return integer_zero_node;
+ return build_unary_op (ABS_EXPR, TREE_VALUE (coerced_params), 0);
+ }
+
+ /* C++ */
+ value_type = TREE_TYPE (fntype) ? TREE_TYPE (fntype) : void_type_node;
+ {
+ register tree result =
+ build (CALL_EXPR, value_type,
+ function, coerced_params, NULL_TREE);
+
+ TREE_SIDE_EFFECTS (result) = 1;
+ /* Remove this sometime. */
+ TREE_RAISES (result) |= !! TYPE_RAISES_EXCEPTIONS (fntype);
+ if (! require_complete)
+ return result;
+ if (value_type == void_type_node)
+ return result;
+ return require_complete_type (result);
+ }
+}
+
+tree
+build_function_call (function, params)
+ tree function, params;
+{
+ return build_function_call_real (function, params, 1, LOOKUP_NORMAL);
+}
+
+tree
+build_function_call_maybe (function, params)
+ tree function, params;
+{
+ return build_function_call_real (function, params, 0, 0);
+}
+
+
+/* Convert the actual parameter expressions in the list VALUES
+ to the types in the list TYPELIST.
+ If parmdecls is exhausted, or when an element has NULL as its type,
+ perform the default conversions.
+
+ RETURN_LOC is the location of the return value, if known, NULL_TREE
+ otherwise. This is useful in the case where we can avoid creating
+ a temporary variable in the case where we can initialize the return
+ value directly. If we are not eliding constructors, then we set this
+ to NULL_TREE to avoid this avoidance.
+
+ NAME is an IDENTIFIER_NODE or 0. It is used only for error messages.
+
+ This is also where warnings about wrong number of args are generated.
+
+ Return a list of expressions for the parameters as converted.
+
+ Both VALUES and the returned value are chains of TREE_LIST nodes
+ with the elements of the list in the TREE_VALUE slots of those nodes.
+
+ In C++, unspecified trailing parameters can be filled in with their
+ default arguments, if such were specified. Do so here. */
+
+tree
+convert_arguments (return_loc, typelist, values, fndecl, flags)
+ tree return_loc, typelist, values, fndecl;
+ int flags;
+{
+ extern tree gc_protect_fndecl;
+ register tree typetail, valtail;
+ register tree result = NULL_TREE;
+ char *called_thing;
+ int maybe_raises = 0;
+ int i = 0;
+
+ if (! flag_elide_constructors)
+ return_loc = 0;
+
+ if (fndecl)
+ {
+ if (TREE_CODE (TREE_TYPE (fndecl)) == METHOD_TYPE)
+ {
+ if (DECL_NAME (fndecl) == NULL_TREE
+ || IDENTIFIER_HAS_TYPE_VALUE (DECL_NAME (fndecl)))
+ called_thing = "constructor";
+ else
+ called_thing = "member function";
+ }
+ else
+ called_thing = "function";
+ }
+
+ for (valtail = values, typetail = typelist;
+ valtail;
+ valtail = TREE_CHAIN (valtail), i++)
+ {
+ register tree type = typetail ? TREE_VALUE (typetail) : 0;
+ register tree val = TREE_VALUE (valtail);
+
+ if (type == void_type_node)
+ {
+ if (fndecl)
+ {
+ char *buf = (char *)alloca (40 + strlen (called_thing));
+ sprintf (buf, "too many arguments to %s `%%s'", called_thing);
+ error_with_decl (fndecl, buf);
+ error ("at this point in file");
+ }
+ else
+ error ("too many arguments to function");
+ /* In case anybody wants to know if this argument
+ list is valid. */
+ if (result)
+ TREE_TYPE (tree_last (result)) = error_mark_node;
+ break;
+ }
+
+ /* The tree type of the parameter being passed may not yet be
+ known. In this case, its type is TYPE_UNKNOWN, and will
+ be instantiated by the type given by TYPE. If TYPE
+ is also NULL, the tree type of VAL is ERROR_MARK_NODE. */
+ if (type && type_unknown_p (val))
+ val = require_instantiated_type (type, val, integer_zero_node);
+ else if (type_unknown_p (val))
+ {
+ /* Strip the `&' from an overloaded FUNCTION_DECL. */
+ if (TREE_CODE (val) == ADDR_EXPR)
+ val = TREE_OPERAND (val, 0);
+ if (TREE_CODE (val) == TREE_LIST
+ && TREE_CHAIN (val) == NULL_TREE
+ && TREE_TYPE (TREE_VALUE (val)) != NULL_TREE
+ && (TREE_TYPE (val) == unknown_type_node
+ || DECL_CHAIN (TREE_VALUE (val)) == NULL_TREE))
+ /* Instantiates automatically. */
+ val = TREE_VALUE (val);
+ else
+ {
+ error ("insufficient type information in parameter list");
+ val = integer_zero_node;
+ }
+ }
+ else if (TREE_CODE (val) == OFFSET_REF
+ && TREE_CODE (TREE_TYPE (val)) == METHOD_TYPE)
+ {
+ /* This is unclean. Should be handled elsewhere. */
+ val = build_unary_op (ADDR_EXPR, val, 0);
+ }
+ else if (TREE_CODE (val) == OFFSET_REF)
+ val = resolve_offset_ref (val);
+
+ {
+#if 0
+ /* This code forces the assumption that if we have a ptr-to-func
+ type in an arglist, that every routine that wants to check
+ its validity has done so, and thus we need not do any
+ more conversion. I don't remember why this is necessary. */
+ else if (TREE_CODE (ttype) == FUNCTION_TYPE
+ && (type == NULL
+ || TREE_CODE (TREE_TYPE (type)) == FUNCTION_TYPE
+ || TREE_CODE (TREE_TYPE (type)) == VOID_TYPE))
+ {
+ type = build_pointer_type (ttype);
+ }
+#endif
+ }
+
+ /* build_c_cast puts on a NOP_EXPR to make the result not an lvalue.
+ Strip such NOP_EXPRs, since VAL is used in non-lvalue context. */
+ if (TREE_CODE (val) == NOP_EXPR
+ && TREE_TYPE (val) == TREE_TYPE (TREE_OPERAND (val, 0))
+ && (type == 0 || TREE_CODE (type) != REFERENCE_TYPE))
+ val = TREE_OPERAND (val, 0);
+
+ if ((type == 0 || TREE_CODE (type) != REFERENCE_TYPE)
+ && (TREE_CODE (TREE_TYPE (val)) == ARRAY_TYPE
+ || TREE_CODE (TREE_TYPE (val)) == FUNCTION_TYPE
+ || TREE_CODE (TREE_TYPE (val)) == METHOD_TYPE))
+ val = default_conversion (val);
+
+ val = require_complete_type (val);
+
+ if (val == error_mark_node)
+ continue;
+
+ maybe_raises |= TREE_RAISES (val);
+
+ if (type != 0)
+ {
+ /* Formal parm type is specified by a function prototype. */
+ tree parmval;
+
+ if (TYPE_SIZE (type) == 0)
+ {
+ error ("parameter type of called function is incomplete");
+ parmval = val;
+ }
+ else
+ {
+#ifdef PROMOTE_PROTOTYPES
+ /* Rather than truncating and then reextending,
+ convert directly to int, if that's the type we will want. */
+ if (! flag_traditional
+ && (TREE_CODE (type) == INTEGER_TYPE
+ || TREE_CODE (type) == ENUMERAL_TYPE)
+ && (TYPE_PRECISION (type) < TYPE_PRECISION (integer_type_node)))
+ type = integer_type_node;
+#endif
+ parmval = convert_for_initialization (return_loc, type, val, flags,
+ "argument passing", fndecl, i);
+#ifdef PROMOTE_PROTOTYPES
+ if ((TREE_CODE (type) == INTEGER_TYPE
+ || TREE_CODE (type) == ENUMERAL_TYPE)
+ && (TYPE_PRECISION (type) < TYPE_PRECISION (integer_type_node)))
+ parmval = default_conversion (parmval);
+#endif
+ }
+ result = tree_cons (NULL_TREE, parmval, result);
+ }
+ else
+ {
+ if (TREE_CODE (TREE_TYPE (val)) == REFERENCE_TYPE)
+ val = convert_from_reference (val);
+
+ if (TREE_CODE (TREE_TYPE (val)) == REAL_TYPE
+ && (TYPE_PRECISION (TREE_TYPE (val))
+ < TYPE_PRECISION (double_type_node)))
+ /* Convert `float' to `double'. */
+ result = tree_cons (NULL_TREE, convert (double_type_node, val), result);
+ else if (TYPE_LANG_SPECIFIC (TREE_TYPE (val))
+ && (TYPE_HAS_INIT_REF (TREE_TYPE (val))
+ || TYPE_HAS_ASSIGN_REF (TREE_TYPE (val))))
+ {
+ cp_warning ("cannot pass objects of type `%T' through `...'",
+ TREE_TYPE (val));
+ result = tree_cons (NULL_TREE, val, result);
+ }
+ else
+ /* Convert `short' and `char' to full-size `int'. */
+ result = tree_cons (NULL_TREE, default_conversion (val), result);
+ }
+
+ if (flag_gc
+ /* There are certain functions for which we don't need
+ to protect our arguments. GC_PROTECT_FNDECL is one. */
+ && fndecl != gc_protect_fndecl
+ && type_needs_gc_entry (TREE_TYPE (TREE_VALUE (result)))
+ && ! value_safe_from_gc (NULL_TREE, TREE_VALUE (result)))
+ /* This will build a temporary variable whose cleanup is
+ to clear the obstack entry. */
+ TREE_VALUE (result) = protect_value_from_gc (NULL_TREE,
+ TREE_VALUE (result));
+
+ if (typetail)
+ typetail = TREE_CHAIN (typetail);
+ }
+
+ if (typetail != 0 && typetail != void_list_node)
+ {
+ /* See if there are default arguments that can be used */
+ if (TREE_PURPOSE (typetail))
+ {
+ while (typetail != void_list_node)
+ {
+ tree type = TREE_VALUE (typetail);
+ tree val = TREE_PURPOSE (typetail);
+ tree parmval;
+
+ if (val == NULL_TREE)
+ parmval = error_mark_node;
+ else if (TREE_CODE (val) == CONSTRUCTOR)
+ {
+ parmval = digest_init (type, val, (tree *)0);
+ parmval = convert_for_initialization (return_loc, type, parmval, flags,
+ "default constructor", fndecl, i);
+ }
+ else
+ {
+ /* This could get clobbered by the following call. */
+ if (TREE_HAS_CONSTRUCTOR (val))
+ val = copy_node (val);
+
+ parmval = convert_for_initialization (return_loc, type, val, flags,
+ "default argument", fndecl, i);
+#ifdef PROMOTE_PROTOTYPES
+ if ((TREE_CODE (type) == INTEGER_TYPE
+ || TREE_CODE (type) == ENUMERAL_TYPE)
+ && (TYPE_PRECISION (type) < TYPE_PRECISION (integer_type_node)))
+ parmval = default_conversion (parmval);
+#endif
+ }
+ maybe_raises |= TREE_RAISES (parmval);
+
+ if (flag_gc
+ && type_needs_gc_entry (TREE_TYPE (parmval))
+ && ! value_safe_from_gc (NULL_TREE, parmval))
+ parmval = protect_value_from_gc (NULL_TREE, parmval);
+
+ result = tree_cons (0, parmval, result);
+ typetail = TREE_CHAIN (typetail);
+ /* ends with `...'. */
+ if (typetail == NULL_TREE)
+ break;
+ }
+ }
+ else
+ {
+ if (fndecl)
+ {
+ char *buf = (char *)alloca (32 + strlen (called_thing));
+ sprintf (buf, "too few arguments to %s `%%#D'", called_thing);
+ cp_error_at (buf, fndecl);
+ error ("at this point in file");
+ }
+ else
+ error ("too few arguments to function");
+ return error_mark_list;
+ }
+ }
+ if (result)
+ TREE_RAISES (result) = maybe_raises;
+
+ return nreverse (result);
+}
+
+/* Build a binary-operation expression, after performing default
+ conversions on the operands. CODE is the kind of expression to build. */
+
+tree
+build_x_binary_op (code, arg1, arg2)
+ enum tree_code code;
+ tree arg1, arg2;
+{
+ tree rval = build_opfncall (code, LOOKUP_SPECULATIVELY,
+ arg1, arg2, NULL_TREE);
+ if (rval)
+ return build_opfncall (code, LOOKUP_NORMAL, arg1, arg2, NULL_TREE);
+ if (code == MEMBER_REF)
+ return build_m_component_ref (build_indirect_ref (arg1, NULL_PTR),
+ arg2);
+ return build_binary_op (code, arg1, arg2, 1);
+}
+
+tree
+build_binary_op (code, arg1, arg2, convert_p)
+ enum tree_code code;
+ tree arg1, arg2;
+ int convert_p;
+{
+ tree type1, type2;
+ tree args[2];
+
+ args[0] = arg1;
+ args[1] = arg2;
+
+ if (convert_p)
+ {
+ args[0] = default_conversion (args[0]);
+ args[1] = default_conversion (args[1]);
+
+ if (type_unknown_p (args[0]))
+ {
+ args[0] = instantiate_type (TREE_TYPE (args[1]), args[0], 1);
+ args[0] = default_conversion (args[0]);
+ }
+ else if (type_unknown_p (args[1]))
+ {
+ args[1] = require_instantiated_type (TREE_TYPE (args[0]),
+ args[1],
+ error_mark_node);
+ args[1] = default_conversion (args[1]);
+ }
+
+ type1 = TREE_TYPE (args[0]);
+ type2 = TREE_TYPE (args[1]);
+
+ if (IS_AGGR_TYPE_2 (type1, type2) && ! TYPE_PTRMEMFUNC_P (type1))
+ {
+ /* Try to convert this to something reasonable. */
+ if (! build_default_binary_type_conversion(code, &args[0], &args[1]))
+ return error_mark_node;
+ }
+ else if ((IS_AGGR_TYPE (type1) && ! TYPE_PTRMEMFUNC_P (type1))
+ || (IS_AGGR_TYPE (type2) && ! TYPE_PTRMEMFUNC_P (type2)))
+ {
+ int convert_index = IS_AGGR_TYPE (type2);
+ /* Avoid being tripped up by things like (ARG1 != 0). */
+ tree types[2], try;
+
+ types[0] = type1; types[1] = type2;
+ if (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR)
+ try = build_type_conversion (code, bool_type_node,
+ args[convert_index], 1);
+ else
+ {
+ try = build_type_conversion (code, types[convert_index ^ 1],
+ args[convert_index], 1);
+
+ if (try == 0
+ && args[1] == integer_zero_node
+ && (code == NE_EXPR || code == EQ_EXPR))
+ try = build_type_conversion (code, ptr_type_node,
+ args[convert_index], 1);
+ }
+
+ if (try == 0)
+ {
+ cp_error ("no match for `%O(%#T, %#T)'", code,
+ TREE_TYPE (arg1), TREE_TYPE (arg2));
+ return error_mark_node;
+ }
+ if (try == error_mark_node)
+ error ("ambiguous pointer conversion");
+ args[convert_index] = try;
+ }
+ }
+ return build_binary_op_nodefault (code, args[0], args[1], code);
+}
+
+/* Build a binary-operation expression without default conversions.
+ CODE is the kind of expression to build.
+ This function differs from `build' in several ways:
+ the data type of the result is computed and recorded in it,
+ warnings are generated if arg data types are invalid,
+ special handling for addition and subtraction of pointers is known,
+ and some optimization is done (operations on narrow ints
+ are done in the narrower type when that gives the same result).
+ Constant folding is also done before the result is returned.
+
+ ERROR_CODE is the code that determines what to say in error messages.
+ It is usually, but not always, the same as CODE.
+
+ Note that the operands will never have enumeral types
+ because either they have just had the default conversions performed
+ or they have both just been converted to some other type in which
+ the arithmetic is to be done.
+
+ C++: must do special pointer arithmetic when implementing
+ multiple inheritance, and deal with pointer to member functions. */
+
+tree
+build_binary_op_nodefault (code, orig_op0, orig_op1, error_code)
+ enum tree_code code;
+ tree orig_op0, orig_op1;
+ enum tree_code error_code;
+{
+ tree op0, op1;
+ register enum tree_code code0, code1;
+ tree type0, type1;
+
+ /* Expression code to give to the expression when it is built.
+ Normally this is CODE, which is what the caller asked for,
+ but in some special cases we change it. */
+ register enum tree_code resultcode = code;
+
+ /* Data type in which the computation is to be performed.
+ In the simplest cases this is the common type of the arguments. */
+ register tree result_type = NULL;
+
+ /* Nonzero means operands have already been type-converted
+ in whatever way is necessary.
+ Zero means they need to be converted to RESULT_TYPE. */
+ int converted = 0;
+
+ /* Nonzero means after finally constructing the expression
+ give it this type. Otherwise, give it type RESULT_TYPE. */
+ tree final_type = 0;
+
+ /* Nonzero if this is an operation like MIN or MAX which can
+ safely be computed in short if both args are promoted shorts.
+ Also implies COMMON.
+ -1 indicates a bitwise operation; this makes a difference
+ in the exact conditions for when it is safe to do the operation
+ in a narrower mode. */
+ int shorten = 0;
+
+ /* Nonzero if this is a comparison operation;
+ if both args are promoted shorts, compare the original shorts.
+ Also implies COMMON. */
+ int short_compare = 0;
+
+ /* Nonzero if this is a right-shift operation, which can be computed on the
+ original short and then promoted if the operand is a promoted short. */
+ int short_shift = 0;
+
+ /* Nonzero means set RESULT_TYPE to the common type of the args. */
+ int common = 0;
+
+ /* Apply default conversions. */
+ op0 = default_conversion (orig_op0);
+ op1 = default_conversion (orig_op1);
+
+ type0 = TREE_TYPE (op0);
+ type1 = TREE_TYPE (op1);
+
+ /* The expression codes of the data types of the arguments tell us
+ whether the arguments are integers, floating, pointers, etc. */
+ code0 = TREE_CODE (type0);
+ code1 = TREE_CODE (type1);
+
+ /* Strip NON_LVALUE_EXPRs, etc., since we aren't using as an lvalue. */
+ STRIP_TYPE_NOPS (op0);
+ STRIP_TYPE_NOPS (op1);
+
+ /* If an error was already reported for one of the arguments,
+ avoid reporting another error. */
+
+ if (code0 == ERROR_MARK || code1 == ERROR_MARK)
+ return error_mark_node;
+
+ switch (code)
+ {
+ case PLUS_EXPR:
+ /* Handle the pointer + int case. */
+ if (code0 == POINTER_TYPE && code1 == INTEGER_TYPE)
+ return pointer_int_sum (PLUS_EXPR, op0, op1);
+ else if (code1 == POINTER_TYPE && code0 == INTEGER_TYPE)
+ return pointer_int_sum (PLUS_EXPR, op1, op0);
+ else
+ common = 1;
+ break;
+
+ case MINUS_EXPR:
+ /* Subtraction of two similar pointers.
+ We must subtract them as integers, then divide by object size. */
+ if (code0 == POINTER_TYPE && code1 == POINTER_TYPE
+ && comp_target_types (type0, type1, 1))
+ return pointer_diff (op0, op1);
+ /* Handle pointer minus int. Just like pointer plus int. */
+ else if (code0 == POINTER_TYPE && code1 == INTEGER_TYPE)
+ return pointer_int_sum (MINUS_EXPR, op0, op1);
+ else
+ common = 1;
+ break;
+
+ case MULT_EXPR:
+ common = 1;
+ break;
+
+ case TRUNC_DIV_EXPR:
+ case CEIL_DIV_EXPR:
+ case FLOOR_DIV_EXPR:
+ case ROUND_DIV_EXPR:
+ case EXACT_DIV_EXPR:
+ if ((code0 == INTEGER_TYPE || code0 == REAL_TYPE)
+ && (code1 == INTEGER_TYPE || code1 == REAL_TYPE))
+ {
+ if (TREE_CODE (op1) == INTEGER_CST && integer_zerop (op1))
+ cp_warning ("division by zero in `%E / 0'", op0);
+ else if (TREE_CODE (op1) == REAL_CST && real_zerop (op1))
+ cp_warning ("division by zero in `%E / 0.'", op0);
+
+ if (!(code0 == INTEGER_TYPE && code1 == INTEGER_TYPE))
+ resultcode = RDIV_EXPR;
+ else
+ /* When dividing two signed integers, we have to promote to int.
+ unless we divide by a conatant != -1. Note that default
+ conversion will have been performed on the operands at this
+ point, so we have to dig out the original type to find out if
+ it was unsigned. */
+ shorten = ((TREE_CODE (op0) == NOP_EXPR
+ && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (op0, 0))))
+ || (TREE_CODE (op1) == INTEGER_CST
+ && (TREE_INT_CST_LOW (op1) != -1
+ || TREE_INT_CST_HIGH (op1) != -1)));
+ common = 1;
+ }
+ break;
+
+ case BIT_AND_EXPR:
+ case BIT_ANDTC_EXPR:
+ case BIT_IOR_EXPR:
+ case BIT_XOR_EXPR:
+ if (code0 == INTEGER_TYPE && code1 == INTEGER_TYPE)
+ shorten = -1;
+ /* If one operand is a constant, and the other is a short type
+ that has been converted to an int,
+ really do the work in the short type and then convert the
+ result to int. If we are lucky, the constant will be 0 or 1
+ in the short type, making the entire operation go away. */
+ if (TREE_CODE (op0) == INTEGER_CST
+ && TREE_CODE (op1) == NOP_EXPR
+ && TYPE_PRECISION (type1) > TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (op1, 0)))
+ && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (op1, 0))))
+ {
+ final_type = result_type;
+ op1 = TREE_OPERAND (op1, 0);
+ result_type = TREE_TYPE (op1);
+ }
+ if (TREE_CODE (op1) == INTEGER_CST
+ && TREE_CODE (op0) == NOP_EXPR
+ && TYPE_PRECISION (type0) > TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (op0, 0)))
+ && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (op0, 0))))
+ {
+ final_type = result_type;
+ op0 = TREE_OPERAND (op0, 0);
+ result_type = TREE_TYPE (op0);
+ }
+ break;
+
+ case TRUNC_MOD_EXPR:
+ case FLOOR_MOD_EXPR:
+ if (code1 == INTEGER_TYPE && integer_zerop (op1))
+ cp_warning ("division by zero in `%E % 0'", op0);
+ else if (code1 == REAL_TYPE && real_zerop (op1))
+ cp_warning ("division by zero in `%E % 0.'", op0);
+
+ if (code0 == INTEGER_TYPE && code1 == INTEGER_TYPE)
+ {
+ /* Although it would be tempting to shorten always here, that loses
+ on some targets, since the modulo instruction is undefined if the
+ quotient can't be represented in the computation mode. We shorten
+ only if unsigned or if dividing by something we know != -1. */
+ shorten = ((TREE_CODE (op0) == NOP_EXPR
+ && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (op0, 0))))
+ || (TREE_CODE (op1) == INTEGER_CST
+ && (TREE_INT_CST_LOW (op1) != -1
+ || TREE_INT_CST_HIGH (op1) != -1)));
+ common = 1;
+ }
+ break;
+
+ case TRUTH_ANDIF_EXPR:
+ case TRUTH_ORIF_EXPR:
+ case TRUTH_AND_EXPR:
+ case TRUTH_OR_EXPR:
+ result_type = bool_type_node;
+ op0 = bool_truthvalue_conversion (op0);
+ op1 = bool_truthvalue_conversion (op1);
+ converted = 1;
+ break;
+
+ /* Shift operations: result has same type as first operand;
+ always convert second operand to int.
+ Also set SHORT_SHIFT if shifting rightward. */
+
+ case RSHIFT_EXPR:
+ if (code0 == INTEGER_TYPE && code1 == INTEGER_TYPE)
+ {
+ result_type = type0;
+ if (TREE_CODE (op1) == INTEGER_CST)
+ {
+ if (tree_int_cst_lt (op1, integer_zero_node))
+ warning ("right shift count is negative");
+ else
+ {
+ if (TREE_INT_CST_LOW (op1) | TREE_INT_CST_HIGH (op1))
+ short_shift = 1;
+ if (TREE_INT_CST_HIGH (op1) != 0
+ || ((unsigned HOST_WIDE_INT) TREE_INT_CST_LOW (op1)
+ >= TYPE_PRECISION (type0)))
+ warning ("right shift count >= width of type");
+ }
+ }
+ /* Convert the shift-count to an integer, regardless of
+ size of value being shifted. */
+ if (TYPE_MAIN_VARIANT (TREE_TYPE (op1)) != integer_type_node)
+ op1 = convert (integer_type_node, op1);
+ /* Avoid converting op1 to result_type later. */
+ converted = 1;
+ }
+ break;
+
+ case LSHIFT_EXPR:
+ if (code0 == INTEGER_TYPE && code1 == INTEGER_TYPE)
+ {
+ result_type = type0;
+ if (TREE_CODE (op1) == INTEGER_CST)
+ {
+ if (tree_int_cst_lt (op1, integer_zero_node))
+ warning ("left shift count is negative");
+ else if (TREE_INT_CST_HIGH (op1) != 0
+ || ((unsigned HOST_WIDE_INT) TREE_INT_CST_LOW (op1)
+ >= TYPE_PRECISION (type0)))
+ warning ("left shift count >= width of type");
+ }
+ /* Convert the shift-count to an integer, regardless of
+ size of value being shifted. */
+ if (TYPE_MAIN_VARIANT (TREE_TYPE (op1)) != integer_type_node)
+ op1 = convert (integer_type_node, op1);
+ /* Avoid converting op1 to result_type later. */
+ converted = 1;
+ }
+ break;
+
+ case RROTATE_EXPR:
+ case LROTATE_EXPR:
+ if (code0 == INTEGER_TYPE && code1 == INTEGER_TYPE)
+ {
+ result_type = type0;
+ if (TREE_CODE (op1) == INTEGER_CST)
+ {
+ if (tree_int_cst_lt (op1, integer_zero_node))
+ warning ("%s rotate count is negative",
+ (code == LROTATE_EXPR) ? "left" : "right");
+ else if (TREE_INT_CST_HIGH (op1) != 0
+ || ((unsigned HOST_WIDE_INT) TREE_INT_CST_LOW (op1)
+ >= TYPE_PRECISION (type0)))
+ warning ("%s rotate count >= width of type",
+ (code == LROTATE_EXPR) ? "left" : "right");
+ }
+ /* Convert the shift-count to an integer, regardless of
+ size of value being shifted. */
+ if (TYPE_MAIN_VARIANT (TREE_TYPE (op1)) != integer_type_node)
+ op1 = convert (integer_type_node, op1);
+ }
+ break;
+
+ case EQ_EXPR:
+ case NE_EXPR:
+ result_type = bool_type_node;
+ converted = 1;
+ if ((code0 == INTEGER_TYPE || code0 == REAL_TYPE)
+ && (code1 == INTEGER_TYPE || code1 == REAL_TYPE))
+ short_compare = 1;
+ else if (code0 == POINTER_TYPE && code1 == POINTER_TYPE)
+ {
+ register tree tt0 = TYPE_MAIN_VARIANT (TREE_TYPE (type0));
+ register tree tt1 = TYPE_MAIN_VARIANT (TREE_TYPE (type1));
+ /* Anything compares with void *. void * compares with anything.
+ Otherwise, the targets must be the same. */
+ if (tt0 != tt1 && TREE_CODE (tt0) == RECORD_TYPE
+ && TREE_CODE (tt1) == RECORD_TYPE)
+ {
+ tree base = common_base_type (tt0, tt1);
+ if (base == NULL_TREE)
+ cp_warning ("comparison of distinct object pointer types `%T' and `%T'", type0, type1);
+ else if (base == error_mark_node)
+ {
+ cp_error ("comparison of pointer types `%T' and `%T' requires conversion to ambiguous supertype", type0, type1);
+ return error_mark_node;
+ }
+ else
+ {
+ if (integer_zerop (op0))
+ op0 = null_pointer_node;
+ else
+ op0 = convert_pointer_to (base, op0);
+ if (integer_zerop (op1))
+ op1 = null_pointer_node;
+ else
+ op1 = convert_pointer_to (base, op1);
+ }
+ }
+ else if (comp_target_types (type0, type1, 1))
+ ;
+ else if (tt0 == void_type_node)
+ {
+ if (pedantic && TREE_CODE (tt1) == FUNCTION_TYPE
+ && tree_int_cst_lt (TYPE_SIZE (type0), TYPE_SIZE (type1)))
+ pedwarn ("ANSI C++ forbids comparison of `void *' with function pointer");
+ }
+ else if (tt1 == void_type_node)
+ {
+ if (pedantic && TREE_CODE (tt0) == FUNCTION_TYPE
+ && tree_int_cst_lt (TYPE_SIZE (type1), TYPE_SIZE (type0)))
+ pedwarn ("ANSI C++ forbids comparison of `void *' with function pointer");
+ }
+ else if ((TYPE_SIZE (tt0) != 0) != (TYPE_SIZE (tt1) != 0))
+ cp_pedwarn ("comparison of %scomplete and %scomplete pointers `%T' and `%T'",
+ TYPE_SIZE (tt0) == 0 ? "in" : "",
+ TYPE_SIZE (tt1) == 0 ? "in" : "",
+ type0, type1);
+ else
+ cp_pedwarn ("comparison of distinct pointer types `%T' and `%T' lacks a cast",
+ type0, type1);
+ }
+ else if (code0 == POINTER_TYPE && TREE_CODE (op1) == INTEGER_CST
+ && integer_zerop (op1))
+ op1 = null_pointer_node;
+ else if (code1 == POINTER_TYPE && TREE_CODE (op0) == INTEGER_CST
+ && integer_zerop (op0))
+ op0 = null_pointer_node;
+ else if (code0 == POINTER_TYPE && code1 == INTEGER_TYPE)
+ {
+ error ("ANSI C++ forbids comparison between pointer and integer");
+ op1 = convert (TREE_TYPE (op0), op1);
+ }
+ else if (code0 == INTEGER_TYPE && code1 == POINTER_TYPE)
+ {
+ error ("ANSI C++ forbids comparison between pointer and integer");
+ op0 = convert (TREE_TYPE (op1), op0);
+ }
+ else if (TYPE_PTRMEMFUNC_P (type0) && TREE_CODE (op1) == INTEGER_CST
+ && integer_zerop (op1))
+ {
+ op0 = build_component_ref (op0, index_identifier, 0, 0);
+ op1 = integer_zero_node;
+ }
+ else if (TYPE_PTRMEMFUNC_P (type1) && TREE_CODE (op0) == INTEGER_CST
+ && integer_zerop (op0))
+ {
+ op0 = build_component_ref (op1, index_identifier, 0, 0);
+ op1 = integer_zero_node;
+ }
+ else if (TYPE_PTRMEMFUNC_P (type0) && TYPE_PTRMEMFUNC_P (type1)
+ && (TYPE_PTRMEMFUNC_FN_TYPE (type0)
+ == TYPE_PTRMEMFUNC_FN_TYPE (type1)))
+ {
+ /* The code we generate for the test is:
+
+ (op0.index == op1.index
+ && ((op1.index != -1 && op0.delta2 == op1.delta2)
+ || op0.pfn == op1.pfn)) */
+
+ tree index0 = build_component_ref (op0, index_identifier, 0, 0);
+ tree index1 = save_expr (build_component_ref (op1, index_identifier, 0, 0));
+ tree pfn0 = PFN_FROM_PTRMEMFUNC (op0);
+ tree pfn1 = PFN_FROM_PTRMEMFUNC (op1);
+ tree delta20 = DELTA2_FROM_PTRMEMFUNC (op0);
+ tree delta21 = DELTA2_FROM_PTRMEMFUNC (op1);
+ tree e1, e2, e3;
+ tree integer_neg_one_node
+ = size_binop (MINUS_EXPR, integer_zero_node, integer_one_node);
+ e1 = build_binary_op (EQ_EXPR, index0, index1, 1);
+ e2 = build_binary_op (NE_EXPR, index1, integer_neg_one_node, 1);
+ e2 = build_binary_op (TRUTH_ANDIF_EXPR, e2, build_binary_op (EQ_EXPR, delta20, delta21, 1), 1);
+ e3 = build_binary_op (EQ_EXPR, pfn0, pfn1, 1);
+ e2 = build_binary_op (TRUTH_ORIF_EXPR, e2, e3, 1);
+ e2 = build_binary_op (TRUTH_ANDIF_EXPR, e1, e2, 1);
+ if (code == EQ_EXPR)
+ return e2;
+ return build_binary_op (EQ_EXPR, e2, integer_zero_node, 1);
+ }
+ else if (TYPE_PTRMEMFUNC_P (type0)
+ && TYPE_PTRMEMFUNC_FN_TYPE (type0) == type1)
+ {
+ tree index0 = build_component_ref (op0, index_identifier, 0, 0);
+ tree index1;
+ tree pfn0 = PFN_FROM_PTRMEMFUNC (op0);
+ tree delta20 = DELTA2_FROM_PTRMEMFUNC (op0);
+ tree delta21 = integer_zero_node;
+ tree e1, e2, e3;
+ tree integer_neg_one_node
+ = size_binop (MINUS_EXPR, integer_zero_node, integer_one_node);
+ if (TREE_CODE (TREE_OPERAND (op1, 0)) == FUNCTION_DECL
+ && DECL_VINDEX (TREE_OPERAND (op1, 0)))
+ {
+ /* Map everything down one to make room for the null pointer to member. */
+ index1 = size_binop (PLUS_EXPR,
+ DECL_VINDEX (TREE_OPERAND (op1, 0)),
+ integer_one_node);
+ op1 = integer_zero_node;
+ delta21 = CLASSTYPE_VFIELD (TYPE_METHOD_BASETYPE (TREE_TYPE (type1)));
+ delta21 = DECL_FIELD_BITPOS (delta21);
+ delta21 = size_binop (FLOOR_DIV_EXPR, delta21, size_int (BITS_PER_UNIT));
+ }
+ else
+ index1 = integer_neg_one_node;
+ {
+ tree nop1 = build1 (NOP_EXPR, TYPE_PTRMEMFUNC_FN_TYPE (type0), op1);
+ TREE_CONSTANT (nop1) = TREE_CONSTANT (op1);
+ op1 = nop1;
+ }
+ e1 = build_binary_op (EQ_EXPR, index0, index1, 1);
+ e2 = build_binary_op (NE_EXPR, index1, integer_neg_one_node, 1);
+ e2 = build_binary_op (TRUTH_ANDIF_EXPR, e2, build_binary_op (EQ_EXPR, delta20, delta21, 1), 1);
+ e3 = build_binary_op (EQ_EXPR, pfn0, op1, 1);
+ e2 = build_binary_op (TRUTH_ORIF_EXPR, e2, e3, 1);
+ e2 = build_binary_op (TRUTH_ANDIF_EXPR, e1, e2, 1);
+ if (code == EQ_EXPR)
+ return e2;
+ return build_binary_op (EQ_EXPR, e2, integer_zero_node, 1);
+ }
+ else if (TYPE_PTRMEMFUNC_P (type1)
+ && TYPE_PTRMEMFUNC_FN_TYPE (type1) == type0)
+ {
+ return build_binary_op (code, op1, op0, 1);
+ }
+ else
+ /* If args are not valid, clear out RESULT_TYPE
+ to cause an error message later. */
+ result_type = 0;
+ break;
+
+ case MAX_EXPR:
+ case MIN_EXPR:
+ if ((code0 == INTEGER_TYPE || code0 == REAL_TYPE)
+ && (code1 == INTEGER_TYPE || code1 == REAL_TYPE))
+ shorten = 1;
+ else if (code0 == POINTER_TYPE && code1 == POINTER_TYPE)
+ {
+ if (! comp_target_types (type0, type1, 1))
+ cp_pedwarn ("comparison of distinct pointer types `%T' and `%T' lacks a cast",
+ type0, type1);
+ else if ((TYPE_SIZE (TREE_TYPE (type0)) != 0)
+ != (TYPE_SIZE (TREE_TYPE (type1)) != 0))
+ cp_pedwarn ("comparison of %scomplete and %scomplete pointers",
+ TYPE_SIZE (TREE_TYPE (type0)) == 0 ? "in" : "",
+ TYPE_SIZE (TREE_TYPE (type1)) == 0 ? "in" : "",
+ type0, type1);
+ else if (pedantic
+ && TREE_CODE (TREE_TYPE (type0)) == FUNCTION_TYPE)
+ pedwarn ("ANSI C++ forbids ordered comparisons of pointers to functions");
+ result_type = common_type (type0, type1);
+ }
+ break;
+
+ case LE_EXPR:
+ case GE_EXPR:
+ case LT_EXPR:
+ case GT_EXPR:
+ if ((code0 == INTEGER_TYPE || code0 == REAL_TYPE)
+ && (code1 == INTEGER_TYPE || code1 == REAL_TYPE))
+ short_compare = 1;
+ else if (code0 == POINTER_TYPE && code1 == POINTER_TYPE)
+ {
+ if (! comp_target_types (type0, type1, 1))
+ cp_pedwarn ("comparison of distinct pointer types `%T' and `%T' lacks a cast",
+ type0, type1);
+ else if ((TYPE_SIZE (TREE_TYPE (type0)) != 0)
+ != (TYPE_SIZE (TREE_TYPE (type1)) != 0))
+ cp_pedwarn ("comparison of %scomplete and %scomplete pointers",
+ TYPE_SIZE (TREE_TYPE (type0)) == 0 ? "in" : "",
+ TYPE_SIZE (TREE_TYPE (type1)) == 0 ? "in" : "",
+ type0, type1);
+ else if (pedantic
+ && TREE_CODE (TREE_TYPE (type0)) == FUNCTION_TYPE)
+ pedwarn ("ANSI C++ forbids ordered comparisons of pointers to functions");
+ }
+ else if (code0 == POINTER_TYPE && TREE_CODE (op1) == INTEGER_CST
+ && integer_zerop (op1))
+ {
+ op1 = null_pointer_node;
+ if (pedantic)
+ pedwarn ("ordered comparison of pointer with integer zero");
+ }
+ else if (code1 == POINTER_TYPE && TREE_CODE (op0) == INTEGER_CST
+ && integer_zerop (op0))
+ {
+ op0 = null_pointer_node;
+ if (pedantic)
+ pedwarn ("ANSI C++ forbids ordered comparison of pointer with integer zero");
+ }
+ else if (code0 == POINTER_TYPE && code1 == INTEGER_TYPE)
+ {
+ if (pedantic)
+ pedwarn ("ANSI C++ forbids comparison between pointer and integer");
+ else if (! flag_traditional)
+ warning ("comparison between pointer and integer");
+ op1 = convert (TREE_TYPE (op0), op1);
+ }
+ else if (code0 == INTEGER_TYPE && code1 == POINTER_TYPE)
+ {
+ if (pedantic)
+ pedwarn ("ANSI C++ forbids comparison between pointer and integer");
+ else if (! flag_traditional)
+ warning ("comparison between pointer and integer");
+ op0 = convert (TREE_TYPE (op1), op0);
+ }
+ result_type = bool_type_node;
+ converted = 1;
+ break;
+ }
+
+ if ((code0 == INTEGER_TYPE || code0 == REAL_TYPE)
+ && (code1 == INTEGER_TYPE || code1 == REAL_TYPE))
+ {
+ if (shorten || common || short_compare)
+ result_type = common_type (type0, type1);
+
+ /* For certain operations (which identify themselves by shorten != 0)
+ if both args were extended from the same smaller type,
+ do the arithmetic in that type and then extend.
+
+ shorten !=0 and !=1 indicates a bitwise operation.
+ For them, this optimization is safe only if
+ both args are zero-extended or both are sign-extended.
+ Otherwise, we might change the result.
+ Eg, (short)-1 | (unsigned short)-1 is (int)-1
+ but calculated in (unsigned short) it would be (unsigned short)-1. */
+
+ if (shorten)
+ {
+ int unsigned0, unsigned1;
+ tree arg0 = get_narrower (op0, &unsigned0);
+ tree arg1 = get_narrower (op1, &unsigned1);
+ /* UNS is 1 if the operation to be done is an unsigned one. */
+ int uns = TREE_UNSIGNED (result_type);
+ tree type;
+
+ final_type = result_type;
+
+ /* Handle the case that OP0 does not *contain* a conversion
+ but it *requires* conversion to FINAL_TYPE. */
+
+ if (op0 == arg0 && TREE_TYPE (op0) != final_type)
+ unsigned0 = TREE_UNSIGNED (TREE_TYPE (op0));
+ if (op1 == arg1 && TREE_TYPE (op1) != final_type)
+ unsigned1 = TREE_UNSIGNED (TREE_TYPE (op1));
+
+ /* Now UNSIGNED0 is 1 if ARG0 zero-extends to FINAL_TYPE. */
+
+ /* For bitwise operations, signedness of nominal type
+ does not matter. Consider only how operands were extended. */
+ if (shorten == -1)
+ uns = unsigned0;
+
+ /* Note that in all three cases below we refrain from optimizing
+ an unsigned operation on sign-extended args.
+ That would not be valid. */
+
+ /* Both args variable: if both extended in same way
+ from same width, do it in that width.
+ Do it unsigned if args were zero-extended. */
+ if ((TYPE_PRECISION (TREE_TYPE (arg0))
+ < TYPE_PRECISION (result_type))
+ && (TYPE_PRECISION (TREE_TYPE (arg1))
+ == TYPE_PRECISION (TREE_TYPE (arg0)))
+ && unsigned0 == unsigned1
+ && (unsigned0 || !uns))
+ result_type
+ = signed_or_unsigned_type (unsigned0,
+ common_type (TREE_TYPE (arg0), TREE_TYPE (arg1)));
+ else if (TREE_CODE (arg0) == INTEGER_CST
+ && (unsigned1 || !uns)
+ && (TYPE_PRECISION (TREE_TYPE (arg1))
+ < TYPE_PRECISION (result_type))
+ && (type = signed_or_unsigned_type (unsigned1,
+ TREE_TYPE (arg1)),
+ int_fits_type_p (arg0, type)))
+ result_type = type;
+ else if (TREE_CODE (arg1) == INTEGER_CST
+ && (unsigned0 || !uns)
+ && (TYPE_PRECISION (TREE_TYPE (arg0))
+ < TYPE_PRECISION (result_type))
+ && (type = signed_or_unsigned_type (unsigned0,
+ TREE_TYPE (arg0)),
+ int_fits_type_p (arg1, type)))
+ result_type = type;
+ }
+
+ /* Shifts can be shortened if shifting right. */
+
+ if (short_shift)
+ {
+ int unsigned_arg;
+ tree arg0 = get_narrower (op0, &unsigned_arg);
+
+ final_type = result_type;
+
+ if (arg0 == op0 && final_type == TREE_TYPE (op0))
+ unsigned_arg = TREE_UNSIGNED (TREE_TYPE (op0));
+
+ if (TYPE_PRECISION (TREE_TYPE (arg0)) < TYPE_PRECISION (result_type)
+ /* If arg is sign-extended and then unsigned-shifted,
+ we can simulate this with a signed shift in arg's type
+ only if the extended result is at least twice as wide
+ as the arg. Otherwise, the shift could use up all the
+ ones made by sign-extension and bring in zeros.
+ We can't optimize that case at all, but in most machines
+ it never happens because available widths are 2**N. */
+ && (!TREE_UNSIGNED (final_type)
+ || unsigned_arg
+ || ((unsigned) 2 * TYPE_PRECISION (TREE_TYPE (arg0))
+ <= TYPE_PRECISION (result_type))))
+ {
+ /* Do an unsigned shift if the operand was zero-extended. */
+ result_type
+ = signed_or_unsigned_type (unsigned_arg,
+ TREE_TYPE (arg0));
+ /* Convert value-to-be-shifted to that type. */
+ if (TREE_TYPE (op0) != result_type)
+ op0 = convert (result_type, op0);
+ converted = 1;
+ }
+ }
+
+ /* Comparison operations are shortened too but differently.
+ They identify themselves by setting short_compare = 1. */
+
+ if (short_compare)
+ {
+ /* Don't write &op0, etc., because that would prevent op0
+ from being kept in a register.
+ Instead, make copies of the our local variables and
+ pass the copies by reference, then copy them back afterward. */
+ tree xop0 = op0, xop1 = op1, xresult_type = result_type;
+ enum tree_code xresultcode = resultcode;
+ tree val
+ = shorten_compare (&xop0, &xop1, &xresult_type, &xresultcode);
+ if (val != 0)
+ return convert (bool_type_node, val);
+ op0 = xop0, op1 = xop1, result_type = bool_type_node;
+ resultcode = xresultcode;
+ }
+
+ if (short_compare && extra_warnings)
+ {
+ int unsignedp0, unsignedp1;
+ tree primop0 = get_narrower (op0, &unsignedp0);
+ tree primop1 = get_narrower (op1, &unsignedp1);
+
+ /* Warn if signed and unsigned are being compared in a size larger
+ than their original size, as this will always fail. */
+
+ if (unsignedp0 != unsignedp1
+ && (TYPE_PRECISION (TREE_TYPE (primop0))
+ < TYPE_PRECISION (result_type))
+ && (TYPE_PRECISION (TREE_TYPE (primop1))
+ < TYPE_PRECISION (result_type)))
+ warning ("comparison between promoted unsigned and signed");
+
+ /* Warn if two unsigned values are being compared in a size
+ larger than their original size, and one (and only one) is the
+ result of a `~' operator. This comparison will always fail.
+
+ Also warn if one operand is a constant, and the constant does not
+ have all bits set that are set in the ~ operand when it is
+ extended. */
+
+ else if (TREE_CODE (primop0) == BIT_NOT_EXPR
+ ^ TREE_CODE (primop1) == BIT_NOT_EXPR)
+ {
+ if (TREE_CODE (primop0) == BIT_NOT_EXPR)
+ primop0 = get_narrower (TREE_OPERAND (op0, 0), &unsignedp0);
+ if (TREE_CODE (primop1) == BIT_NOT_EXPR)
+ primop1 = get_narrower (TREE_OPERAND (op1, 0), &unsignedp1);
+
+ if (TREE_CODE (primop0) == INTEGER_CST
+ || TREE_CODE (primop1) == INTEGER_CST)
+ {
+ tree primop;
+ HOST_WIDE_INT constant, mask;
+ int unsignedp;
+ unsigned bits;
+
+ if (TREE_CODE (primop0) == INTEGER_CST)
+ {
+ primop = primop1;
+ unsignedp = unsignedp1;
+ constant = TREE_INT_CST_LOW (primop0);
+ }
+ else
+ {
+ primop = primop0;
+ unsignedp = unsignedp0;
+ constant = TREE_INT_CST_LOW (primop1);
+ }
+
+ bits = TYPE_PRECISION (TREE_TYPE (primop));
+ if (bits < TYPE_PRECISION (result_type)
+ && bits < HOST_BITS_PER_LONG && unsignedp)
+ {
+ mask = (~ (HOST_WIDE_INT) 0) << bits;
+ if ((mask & constant) != mask)
+ warning ("comparison of promoted ~unsigned with constant");
+ }
+ }
+ else if (unsignedp0 && unsignedp1
+ && (TYPE_PRECISION (TREE_TYPE (primop0))
+ < TYPE_PRECISION (result_type))
+ && (TYPE_PRECISION (TREE_TYPE (primop1))
+ < TYPE_PRECISION (result_type)))
+ warning ("comparison of promoted ~unsigned with unsigned");
+ }
+ }
+ }
+
+ /* At this point, RESULT_TYPE must be nonzero to avoid an error message.
+ If CONVERTED is zero, both args will be converted to type RESULT_TYPE.
+ Then the expression will be built.
+ It will be given type FINAL_TYPE if that is nonzero;
+ otherwise, it will be given type RESULT_TYPE. */
+
+ if (!result_type)
+ {
+ binary_op_error (error_code);
+ return error_mark_node;
+ }
+
+ if (! converted)
+ {
+ if (TREE_TYPE (op0) != result_type)
+ op0 = convert (result_type, op0);
+ if (TREE_TYPE (op1) != result_type)
+ op1 = convert (result_type, op1);
+ }
+
+ {
+ register tree result = build (resultcode, result_type, op0, op1);
+ register tree folded;
+
+ folded = fold (result);
+ if (folded == result)
+ TREE_CONSTANT (folded) = TREE_CONSTANT (op0) & TREE_CONSTANT (op1);
+ if (final_type != 0)
+ return convert (final_type, folded);
+ return folded;
+ }
+}
+
+/* Return a tree for the sum or difference (RESULTCODE says which)
+ of pointer PTROP and integer INTOP. */
+
+static tree
+pointer_int_sum (resultcode, ptrop, intop)
+ enum tree_code resultcode;
+ register tree ptrop, intop;
+{
+ tree size_exp;
+
+ register tree result;
+ register tree folded = fold (intop);
+
+ /* The result is a pointer of the same type that is being added. */
+
+ register tree result_type = TREE_TYPE (ptrop);
+
+ if (TREE_CODE (TREE_TYPE (result_type)) == VOID_TYPE)
+ {
+ if (pedantic || warn_pointer_arith)
+ pedwarn ("ANSI C++ forbids using pointer of type `void *' in arithmetic");
+ size_exp = integer_one_node;
+ }
+ else if (TREE_CODE (TREE_TYPE (result_type)) == FUNCTION_TYPE)
+ {
+ if (pedantic || warn_pointer_arith)
+ pedwarn ("ANSI C++ forbids using pointer to a function in arithmetic");
+ size_exp = integer_one_node;
+ }
+ else if (TREE_CODE (TREE_TYPE (result_type)) == METHOD_TYPE)
+ {
+ if (pedantic || warn_pointer_arith)
+ pedwarn ("ANSI C++ forbids using pointer to a method in arithmetic");
+ size_exp = integer_one_node;
+ }
+ else if (TREE_CODE (TREE_TYPE (result_type)) == OFFSET_TYPE)
+ {
+ if (pedantic)
+ pedwarn ("ANSI C++ forbids using pointer to a member in arithmetic");
+ size_exp = integer_one_node;
+ }
+ else
+ size_exp = size_in_bytes (TREE_TYPE (result_type));
+
+ /* Needed to make OOPS V2R3 work. */
+ intop = folded;
+ if (TREE_CODE (intop) == INTEGER_CST
+ && TREE_INT_CST_LOW (intop) == 0
+ && TREE_INT_CST_HIGH (intop) == 0)
+ return ptrop;
+
+ /* If what we are about to multiply by the size of the elements
+ contains a constant term, apply distributive law
+ and multiply that constant term separately.
+ This helps produce common subexpressions. */
+
+ if ((TREE_CODE (intop) == PLUS_EXPR || TREE_CODE (intop) == MINUS_EXPR)
+ && ! TREE_CONSTANT (intop)
+ && TREE_CONSTANT (TREE_OPERAND (intop, 1))
+ && TREE_CONSTANT (size_exp))
+ {
+ enum tree_code subcode = resultcode;
+ if (TREE_CODE (intop) == MINUS_EXPR)
+ subcode = (subcode == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR);
+ ptrop = build_binary_op (subcode, ptrop, TREE_OPERAND (intop, 1), 1);
+ intop = TREE_OPERAND (intop, 0);
+ }
+
+ /* Convert the integer argument to a type the same size as a pointer
+ so the multiply won't overflow spuriously. */
+
+ if (TYPE_PRECISION (TREE_TYPE (intop)) != POINTER_SIZE)
+ intop = convert (type_for_size (POINTER_SIZE, 0), intop);
+
+ /* Replace the integer argument with a suitable product by the object size.
+ Do this multiplication as signed, then convert to the appropriate
+ pointer type (actually unsigned integral). */
+
+ intop = convert (result_type,
+ build_binary_op (MULT_EXPR, intop,
+ convert (TREE_TYPE (intop), size_exp), 1));
+
+ /* Create the sum or difference. */
+
+ result = build (resultcode, result_type, ptrop, intop);
+
+ folded = fold (result);
+ if (folded == result)
+ TREE_CONSTANT (folded) = TREE_CONSTANT (ptrop) & TREE_CONSTANT (intop);
+ return folded;
+}
+
+/* Return a tree for the difference of pointers OP0 and OP1.
+ The resulting tree has type int. */
+
+static tree
+pointer_diff (op0, op1)
+ register tree op0, op1;
+{
+ register tree result, folded;
+ tree restype = ptrdiff_type_node;
+ tree target_type = TREE_TYPE (TREE_TYPE (op0));
+
+ if (pedantic)
+ {
+ if (TREE_CODE (target_type) == VOID_TYPE)
+ pedwarn ("ANSI C++ forbids using pointer of type `void *' in subtraction");
+ if (TREE_CODE (target_type) == FUNCTION_TYPE)
+ pedwarn ("ANSI C++ forbids using pointer to a function in subtraction");
+ if (TREE_CODE (target_type) == METHOD_TYPE)
+ pedwarn ("ANSI C++ forbids using pointer to a method in subtraction");
+ if (TREE_CODE (target_type) == OFFSET_TYPE)
+ pedwarn ("ANSI C++ forbids using pointer to a member in subtraction");
+ }
+
+ /* First do the subtraction as integers;
+ then drop through to build the divide operator. */
+
+ op0 = build_binary_op (MINUS_EXPR,
+ convert (restype, op0), convert (restype, op1), 1);
+
+ /* This generates an error if op1 is a pointer to an incomplete type. */
+ if (TYPE_SIZE (TREE_TYPE (TREE_TYPE (op1))) == 0)
+ error ("arithmetic on pointer to an incomplete type");
+
+ op1 = ((TREE_CODE (target_type) == VOID_TYPE
+ || TREE_CODE (target_type) == FUNCTION_TYPE
+ || TREE_CODE (target_type) == METHOD_TYPE
+ || TREE_CODE (target_type) == OFFSET_TYPE)
+ ? integer_one_node
+ : size_in_bytes (target_type));
+
+ /* Do the division. */
+
+ result = build (EXACT_DIV_EXPR, restype, op0, convert (restype, op1));
+
+ folded = fold (result);
+ if (folded == result)
+ TREE_CONSTANT (folded) = TREE_CONSTANT (op0) & TREE_CONSTANT (op1);
+ return folded;
+}
+
+/* Handle the case of taking the address of a COMPONENT_REF.
+ Called by `build_unary_op' and `build_up_reference'.
+
+ ARG is the COMPONENT_REF whose address we want.
+ ARGTYPE is the pointer type that this address should have.
+ MSG is an error message to print if this COMPONENT_REF is not
+ addressable (such as a bitfield). */
+
+tree
+build_component_addr (arg, argtype, msg)
+ tree arg, argtype;
+ char *msg;
+{
+ tree field = TREE_OPERAND (arg, 1);
+ tree basetype = decl_type_context (field);
+ tree rval = build_unary_op (ADDR_EXPR, TREE_OPERAND (arg, 0), 0);
+
+ if (DECL_BIT_FIELD (field))
+ {
+ error (msg, IDENTIFIER_POINTER (DECL_NAME (field)));
+ return error_mark_node;
+ }
+
+ if (flag_gc)
+ cp_warning ("address of `%T::%D' taken", basetype, field);
+
+ if (TREE_CODE (field) == FIELD_DECL
+ && TYPE_USES_COMPLEX_INHERITANCE (basetype))
+ {
+ /* Can't convert directly to ARGTYPE, since that
+ may have the same pointer type as one of our
+ baseclasses. */
+ rval = build1 (NOP_EXPR, argtype,
+ convert_pointer_to (basetype, rval));
+ TREE_CONSTANT (rval) = TREE_CONSTANT (TREE_OPERAND (rval, 0));
+ }
+ else
+ /* This conversion is harmless. */
+ rval = convert (argtype, rval);
+
+ if (! integer_zerop (DECL_FIELD_BITPOS (field)))
+ {
+ tree offset = size_binop (EASY_DIV_EXPR, DECL_FIELD_BITPOS (field),
+ size_int (BITS_PER_UNIT));
+ int flag = TREE_CONSTANT (rval);
+ rval = fold (build (PLUS_EXPR, argtype,
+ rval, convert (argtype, offset)));
+ TREE_CONSTANT (rval) = flag;
+ }
+ return rval;
+}
+
+/* Construct and perhaps optimize a tree representation
+ for a unary operation. CODE, a tree_code, specifies the operation
+ and XARG is the operand. */
+
+tree
+build_x_unary_op (code, xarg)
+ enum tree_code code;
+ tree xarg;
+{
+ /* & rec, on incomplete RECORD_TYPEs is the simple opr &, not an
+ error message. */
+ if (code != ADDR_EXPR || TREE_CODE (TREE_TYPE (xarg)) != RECORD_TYPE
+ || TYPE_SIZE (TREE_TYPE (xarg)))
+ {
+ tree rval = build_opfncall (code, LOOKUP_SPECULATIVELY, xarg,
+ NULL_TREE, NULL_TREE);
+ if (rval)
+ return build_opfncall (code, LOOKUP_NORMAL, xarg,
+ NULL_TREE, NULL_TREE);
+ }
+ return build_unary_op (code, xarg, 0);
+}
+
+/* Just like truthvalue_conversion, but we want a BOOLEAN_TYPE */
+tree
+bool_truthvalue_conversion (expr)
+ tree expr;
+{
+ /* We really want to preform the optimizations in truthvalue_conversion
+ but, not this way. */
+ /* expr = truthvalue_conversion (expr); */
+ return convert (bool_type_node, expr);
+}
+
+/* C++: Must handle pointers to members.
+
+ Perhaps type instantiation should be extended to handle conversion
+ from aggregates to types we don't yet know we want? (Or are those
+ cases typically errors which should be reported?)
+
+ NOCONVERT nonzero suppresses the default promotions
+ (such as from short to int). */
+tree
+build_unary_op (code, xarg, noconvert)
+ enum tree_code code;
+ tree xarg;
+ int noconvert;
+{
+ /* No default_conversion here. It causes trouble for ADDR_EXPR. */
+ register tree arg = xarg;
+ register tree argtype = 0;
+ register enum tree_code typecode = TREE_CODE (TREE_TYPE (arg));
+ char *errstring = NULL;
+ tree val;
+ int isaggrtype;
+
+ if (typecode == ERROR_MARK)
+ return error_mark_node;
+
+ if (typecode == REFERENCE_TYPE && code != ADDR_EXPR && ! noconvert)
+ {
+ arg = convert_from_reference (arg);
+ typecode = TREE_CODE (TREE_TYPE (arg));
+ }
+
+ if (typecode == ENUMERAL_TYPE)
+ typecode = INTEGER_TYPE;
+
+ isaggrtype = IS_AGGR_TYPE_CODE (typecode);
+
+ switch (code)
+ {
+ case CONVERT_EXPR:
+ /* This is used for unary plus, because a CONVERT_EXPR
+ is enough to prevent anybody from looking inside for
+ associativity, but won't generate any code. */
+ if (!(typecode == INTEGER_TYPE || typecode == REAL_TYPE))
+ errstring = "wrong type argument to unary plus";
+ else if (!noconvert)
+ arg = default_conversion (arg);
+ break;
+
+ case NEGATE_EXPR:
+ if (isaggrtype)
+ {
+ if (!noconvert)
+ arg = default_conversion (arg);
+ else
+ {
+ cp_error ("type conversion for type `%T' not allowed",
+ TREE_TYPE (arg));
+ return error_mark_node;
+ }
+ typecode = TREE_CODE (TREE_TYPE (arg));
+ noconvert = 1;
+ }
+
+ if (!(typecode == INTEGER_TYPE || typecode == REAL_TYPE))
+ errstring = "wrong type argument to unary minus";
+ else if (!noconvert)
+ arg = default_conversion (arg);
+ break;
+
+ case BIT_NOT_EXPR:
+ if (isaggrtype)
+ {
+ if (!noconvert)
+ arg = default_conversion (arg);
+ else
+ {
+ cp_error ("type conversion for type `%T' not allowed",
+ TREE_TYPE (arg));
+ return error_mark_node;
+ }
+ typecode = TREE_CODE (TREE_TYPE (arg));
+ noconvert = 1;
+ }
+
+ if (typecode != INTEGER_TYPE)
+ errstring = "wrong type argument to bit-complement";
+ else if (!noconvert)
+ arg = default_conversion (arg);
+ break;
+
+ case ABS_EXPR:
+ if (isaggrtype)
+ {
+ if (!noconvert)
+ arg = default_conversion (arg);
+ else
+ {
+ cp_error ("type conversion for type `%T' not allowed",
+ TREE_TYPE (arg));
+ return error_mark_node;
+ }
+ typecode = TREE_CODE (TREE_TYPE (arg));
+ noconvert = 1;
+ }
+
+ if (!(typecode == INTEGER_TYPE || typecode == REAL_TYPE))
+ errstring = "wrong type argument to abs";
+ else if (!noconvert)
+ arg = default_conversion (arg);
+ break;
+
+ case TRUTH_NOT_EXPR:
+ arg = bool_truthvalue_conversion (arg);
+ val = invert_truthvalue (arg);
+ if (arg != error_mark_node)
+ return val;
+ errstring = "in argument to unary !";
+ break;
+
+ case NOP_EXPR:
+ break;
+
+ case PREINCREMENT_EXPR:
+ case POSTINCREMENT_EXPR:
+ case PREDECREMENT_EXPR:
+ case POSTDECREMENT_EXPR:
+ /* Handle complex lvalues (when permitted)
+ by reduction to simpler cases. */
+
+ val = unary_complex_lvalue (code, arg);
+ if (val != 0)
+ return val;
+
+ /* Report invalid types. */
+
+ if (isaggrtype)
+ {
+ arg = default_conversion (arg);
+ typecode = TREE_CODE (TREE_TYPE (arg));
+ }
+
+ if (typecode != POINTER_TYPE
+ && typecode != INTEGER_TYPE && typecode != REAL_TYPE)
+ {
+ if (code == PREINCREMENT_EXPR)
+ errstring ="no pre-increment operator for type";
+ else if (code == POSTINCREMENT_EXPR)
+ errstring ="no post-increment operator for type";
+ else if (code == PREDECREMENT_EXPR)
+ errstring ="no pre-decrement operator for type";
+ else
+ errstring ="no post-decrement operator for type";
+ break;
+ }
+
+ /* Report something read-only. */
+
+ if (TYPE_READONLY (TREE_TYPE (arg))
+ || TREE_READONLY (arg))
+ readonly_error (arg, ((code == PREINCREMENT_EXPR
+ || code == POSTINCREMENT_EXPR)
+ ? "increment" : "decrement"),
+ 0);
+
+ {
+ register tree inc;
+ tree result_type = TREE_TYPE (arg);
+
+ arg = get_unwidened (arg, 0);
+ argtype = TREE_TYPE (arg);
+
+ /* ARM $5.2.5 last annotation says this should be forbidden. */
+ if (TREE_CODE (argtype) == ENUMERAL_TYPE)
+ pedwarn ("ANSI C++ forbids %sing an enum",
+ (code == PREINCREMENT_EXPR || code == POSTINCREMENT_EXPR)
+ ? "increment" : "decrement");
+
+ /* Compute the increment. */
+
+ if (typecode == POINTER_TYPE)
+ {
+ enum tree_code tmp = TREE_CODE (TREE_TYPE (argtype));
+ if (TYPE_SIZE (TREE_TYPE (argtype)) == 0)
+ cp_error ("cannot %s a pointer to incomplete type `%T'",
+ ((code == PREINCREMENT_EXPR
+ || code == POSTINCREMENT_EXPR)
+ ? "increment" : "decrement"), TREE_TYPE (argtype));
+ else if (tmp == FUNCTION_TYPE || tmp == METHOD_TYPE
+ || tmp == VOID_TYPE || tmp == OFFSET_TYPE)
+ cp_pedwarn ("ANSI C++ forbids %sing a pointer of type `%T'",
+ ((code == PREINCREMENT_EXPR
+ || code == POSTINCREMENT_EXPR)
+ ? "increment" : "decrement"), argtype);
+ inc = c_sizeof_nowarn (TREE_TYPE (argtype));
+ }
+ else
+ inc = integer_one_node;
+
+ inc = convert (argtype, inc);
+
+ /* Handle incrementing a cast-expression. */
+
+ switch (TREE_CODE (arg))
+ {
+ case NOP_EXPR:
+ case CONVERT_EXPR:
+ case FLOAT_EXPR:
+ case FIX_TRUNC_EXPR:
+ case FIX_FLOOR_EXPR:
+ case FIX_ROUND_EXPR:
+ case FIX_CEIL_EXPR:
+ {
+ tree incremented, modify, value;
+ if (! lvalue_p (arg) && pedantic)
+ pedwarn ("cast to non-reference type used as lvalue");
+ arg = stabilize_reference (arg);
+ if (code == PREINCREMENT_EXPR || code == PREDECREMENT_EXPR)
+ value = arg;
+ else
+ value = save_expr (arg);
+ incremented = build (((code == PREINCREMENT_EXPR
+ || code == POSTINCREMENT_EXPR)
+ ? PLUS_EXPR : MINUS_EXPR),
+ argtype, value, inc);
+ TREE_SIDE_EFFECTS (incremented) = 1;
+ modify = build_modify_expr (arg, NOP_EXPR, incremented);
+ return build (COMPOUND_EXPR, TREE_TYPE (arg), modify, value);
+ }
+ }
+
+ if (TREE_CODE (arg) == OFFSET_REF)
+ arg = resolve_offset_ref (arg);
+
+ /* Complain about anything else that is not a true lvalue. */
+ if (!lvalue_or_else (arg, ((code == PREINCREMENT_EXPR
+ || code == POSTINCREMENT_EXPR)
+ ? "increment" : "decrement")))
+ return error_mark_node;
+
+ val = build (code, TREE_TYPE (arg), arg, inc);
+ TREE_SIDE_EFFECTS (val) = 1;
+ return convert (result_type, val);
+ }
+
+ case ADDR_EXPR:
+ /* Note that this operation never does default_conversion
+ regardless of NOCONVERT. */
+
+ if (typecode == REFERENCE_TYPE)
+ {
+ arg = build1 (CONVERT_EXPR, build_pointer_type (TREE_TYPE (TREE_TYPE (arg))), arg);
+ TREE_REFERENCE_EXPR (arg) = 1;
+ return arg;
+ }
+ else if (pedantic
+ && TREE_CODE (arg) == FUNCTION_DECL
+ && DECL_NAME (arg)
+ && DECL_CONTEXT (arg) == NULL_TREE
+ && IDENTIFIER_LENGTH (DECL_NAME (arg)) == 4
+ && IDENTIFIER_POINTER (DECL_NAME (arg))[0] == 'm'
+ && ! strcmp (IDENTIFIER_POINTER (DECL_NAME (arg)), "main"))
+ /* ARM $3.4 */
+ pedwarn ("taking address of function `main'");
+
+ /* Let &* cancel out to simplify resulting code. */
+ if (TREE_CODE (arg) == INDIRECT_REF)
+ {
+ /* We don't need to have `current_class_decl' wrapped in a
+ NON_LVALUE_EXPR node. */
+ if (arg == C_C_D)
+ return current_class_decl;
+
+ /* Keep `default_conversion' from converting if
+ ARG is of REFERENCE_TYPE. */
+ arg = TREE_OPERAND (arg, 0);
+ if (TREE_CODE (TREE_TYPE (arg)) == REFERENCE_TYPE)
+ {
+ if (TREE_CODE (arg) == VAR_DECL && DECL_INITIAL (arg)
+ && !TREE_SIDE_EFFECTS (DECL_INITIAL (arg)))
+ arg = DECL_INITIAL (arg);
+ arg = build1 (CONVERT_EXPR, build_pointer_type (TREE_TYPE (TREE_TYPE (arg))), arg);
+ TREE_REFERENCE_EXPR (arg) = 1;
+ TREE_CONSTANT (arg) = TREE_CONSTANT (TREE_OPERAND (arg, 0));
+ }
+ else if (lvalue_p (arg))
+ /* Don't let this be an lvalue. */
+ return non_lvalue (arg);
+ return arg;
+ }
+
+ /* For &x[y], return x+y */
+ if (TREE_CODE (arg) == ARRAY_REF)
+ {
+ if (mark_addressable (TREE_OPERAND (arg, 0)) == 0)
+ return error_mark_node;
+ return build_binary_op (PLUS_EXPR, TREE_OPERAND (arg, 0),
+ TREE_OPERAND (arg, 1), 1);
+ }
+
+ /* For &(++foo), we are really taking the address of the variable
+ being acted upon by the increment/decrement operator. ARM $5.3.1
+ However, according to ARM $5.2.5, we don't allow postfix ++ and
+ --, since the prefix operators return lvalues, but the postfix
+ operators do not. */
+ if (TREE_CODE (arg) == PREINCREMENT_EXPR
+ || TREE_CODE (arg) == PREDECREMENT_EXPR)
+ arg = TREE_OPERAND (arg, 0);
+
+ /* Uninstantiated types are all functions. Taking the
+ address of a function is a no-op, so just return the
+ argument. */
+
+ if (TREE_CODE (arg) == IDENTIFIER_NODE
+ && IDENTIFIER_OPNAME_P (arg))
+ {
+ my_friendly_abort (117);
+ /* We don't know the type yet, so just work around the problem.
+ We know that this will resolve to an lvalue. */
+ return build1 (ADDR_EXPR, unknown_type_node, arg);
+ }
+
+ if (TREE_CODE (arg) == TREE_LIST)
+ {
+ /* Look at methods with only this name. */
+ if (TREE_CODE (TREE_VALUE (arg)) == FUNCTION_DECL)
+ {
+ tree targ = TREE_VALUE (arg);
+
+ /* If this function is unique, or it is a unique
+ constructor, we can take its address easily. */
+ if (DECL_CHAIN (targ) == NULL_TREE
+ || (DESTRUCTOR_NAME_P (DECL_ASSEMBLER_NAME (targ))
+ && DECL_CHAIN (DECL_CHAIN (targ)) == NULL_TREE))
+ {
+ if (DECL_CHAIN (targ))
+ targ = DECL_CHAIN (targ);
+ if (DECL_CLASS_CONTEXT (targ))
+ targ = build (OFFSET_REF, TREE_TYPE (targ), C_C_D, targ);
+
+ val = unary_complex_lvalue (ADDR_EXPR, targ);
+ if (val)
+ return val;
+ }
+
+ /* This possible setting of TREE_CONSTANT is what makes it possible
+ with an initializer list to emit the entire thing in the data
+ section, rather than a run-time initialization. */
+ arg = build1 (ADDR_EXPR, unknown_type_node, arg);
+ if (staticp (targ))
+ TREE_CONSTANT (arg) = 1;
+ return arg;
+ }
+ if (TREE_CHAIN (arg) == NULL_TREE
+ && TREE_CODE (TREE_VALUE (arg)) == TREE_LIST
+ && DECL_CHAIN (TREE_VALUE (TREE_VALUE (arg))) == NULL_TREE)
+ {
+ /* Unique overloaded member function. */
+ return build_unary_op (ADDR_EXPR, TREE_VALUE (TREE_VALUE (arg)), 0);
+ }
+ return build1 (ADDR_EXPR, unknown_type_node, arg);
+ }
+
+ /* Handle complex lvalues (when permitted)
+ by reduction to simpler cases. */
+ val = unary_complex_lvalue (code, arg);
+ if (val != 0)
+ return val;
+
+ switch (TREE_CODE (arg))
+ {
+ case NOP_EXPR:
+ case CONVERT_EXPR:
+ case FLOAT_EXPR:
+ case FIX_TRUNC_EXPR:
+ case FIX_FLOOR_EXPR:
+ case FIX_ROUND_EXPR:
+ case FIX_CEIL_EXPR:
+ if (! lvalue_p (arg) && pedantic)
+ pedwarn ("taking the address of a cast to non-reference type");
+ }
+
+ /* Allow the address of a constructor if all the elements
+ are constant. */
+ if (TREE_CODE (arg) == CONSTRUCTOR && TREE_CONSTANT (arg))
+ ;
+ /* Anything not already handled and not a true memory reference
+ is an error. */
+ else if (typecode != FUNCTION_TYPE
+ && typecode != METHOD_TYPE
+ && !lvalue_or_else (arg, "unary `&'"))
+ return error_mark_node;
+
+ /* Ordinary case; arg is a COMPONENT_REF or a decl. */
+ argtype = TREE_TYPE (arg);
+ /* If the lvalue is const or volatile,
+ merge that into the type that the address will point to. */
+ if (TREE_CODE_CLASS (TREE_CODE (arg)) == 'd'
+ || TREE_CODE_CLASS (TREE_CODE (arg)) == 'r')
+ {
+ if (TREE_READONLY (arg) || TREE_THIS_VOLATILE (arg))
+ argtype = c_build_type_variant (argtype,
+ TREE_READONLY (arg),
+ TREE_THIS_VOLATILE (arg));
+ }
+
+ argtype = build_pointer_type (argtype);
+
+ if (mark_addressable (arg) == 0)
+ return error_mark_node;
+
+ {
+ tree addr;
+
+ if (TREE_CODE (arg) == COMPONENT_REF)
+ addr = build_component_addr (arg, argtype,
+ "attempt to take address of bit-field structure member `%s'");
+ else
+ addr = build1 (code, argtype, arg);
+
+ /* Address of a static or external variable or
+ function counts as a constant */
+ if (staticp (arg))
+ TREE_CONSTANT (addr) = 1;
+ return addr;
+ }
+ }
+
+ if (!errstring)
+ {
+ if (argtype == 0)
+ argtype = TREE_TYPE (arg);
+ return fold (build1 (code, argtype, arg));
+ }
+
+ error (errstring);
+ return error_mark_node;
+}
+
+/* If CONVERSIONS is a conversion expression or a nested sequence of such,
+ convert ARG with the same conversions in the same order
+ and return the result. */
+
+static tree
+convert_sequence (conversions, arg)
+ tree conversions;
+ tree arg;
+{
+ switch (TREE_CODE (conversions))
+ {
+ case NOP_EXPR:
+ case CONVERT_EXPR:
+ case FLOAT_EXPR:
+ case FIX_TRUNC_EXPR:
+ case FIX_FLOOR_EXPR:
+ case FIX_ROUND_EXPR:
+ case FIX_CEIL_EXPR:
+ return convert (TREE_TYPE (conversions),
+ convert_sequence (TREE_OPERAND (conversions, 0),
+ arg));
+
+ default:
+ return arg;
+ }
+}
+
+/* Apply unary lvalue-demanding operator CODE to the expression ARG
+ for certain kinds of expressions which are not really lvalues
+ but which we can accept as lvalues.
+
+ If ARG is not a kind of expression we can handle, return zero. */
+
+tree
+unary_complex_lvalue (code, arg)
+ enum tree_code code;
+ tree arg;
+{
+ /* Handle (a, b) used as an "lvalue". */
+ if (TREE_CODE (arg) == COMPOUND_EXPR)
+ {
+ tree real_result = build_unary_op (code, TREE_OPERAND (arg, 1), 0);
+ return build (COMPOUND_EXPR, TREE_TYPE (real_result),
+ TREE_OPERAND (arg, 0), real_result);
+ }
+
+ /* Handle (a ? b : c) used as an "lvalue". */
+ if (TREE_CODE (arg) == COND_EXPR)
+ return rationalize_conditional_expr (code, arg);
+
+ if (TREE_CODE (arg) == MODIFY_EXPR)
+ return unary_complex_lvalue
+ (code, build (COMPOUND_EXPR, TREE_TYPE (TREE_OPERAND (arg, 0)),
+ arg, TREE_OPERAND (arg, 0)));
+
+ if (code != ADDR_EXPR)
+ return 0;
+
+ /* Handle (a = b) used as an "lvalue" for `&'. */
+ if (TREE_CODE (arg) == MODIFY_EXPR
+ || TREE_CODE (arg) == INIT_EXPR)
+ {
+ tree real_result = build_unary_op (code, TREE_OPERAND (arg, 0), 0);
+ return build (COMPOUND_EXPR, TREE_TYPE (real_result), arg, real_result);
+ }
+
+ if (TREE_CODE (arg) == WITH_CLEANUP_EXPR)
+ {
+ tree real_result = build_unary_op (code, TREE_OPERAND (arg, 0), 0);
+ real_result = build (WITH_CLEANUP_EXPR, TREE_TYPE (real_result),
+ real_result, 0, TREE_OPERAND (arg, 2));
+ return real_result;
+ }
+
+ if (TREE_CODE (TREE_TYPE (arg)) == FUNCTION_TYPE
+ || TREE_CODE (TREE_TYPE (arg)) == METHOD_TYPE
+ || TREE_CODE (TREE_TYPE (arg)) == OFFSET_TYPE)
+ {
+ /* The representation of something of type OFFSET_TYPE
+ is really the representation of a pointer to it.
+ Here give the representation its true type. */
+ tree t;
+ tree offset;
+
+ my_friendly_assert (TREE_CODE (arg) != SCOPE_REF, 313);
+
+ if (TREE_CODE (arg) != OFFSET_REF)
+ return 0;
+
+ t = TREE_OPERAND (arg, 1);
+
+ if (TREE_CODE (t) == FUNCTION_DECL) /* Check all this code for right semantics. */
+ return build_unary_op (ADDR_EXPR, t, 0);
+ if (TREE_CODE (t) == VAR_DECL)
+ return build_unary_op (ADDR_EXPR, t, 0);
+ else
+ {
+ /* Can't build a pointer to member if the member must
+ go through virtual base classes. */
+ if (virtual_member (DECL_FIELD_CONTEXT (t),
+ CLASSTYPE_VBASECLASSES (TREE_TYPE (TREE_OPERAND (arg, 0)))))
+ {
+ sorry ("pointer to member via virtual baseclass");
+ return error_mark_node;
+ }
+
+ if (TREE_OPERAND (arg, 0)
+ && (TREE_CODE (TREE_OPERAND (arg, 0)) != NOP_EXPR
+ || TREE_OPERAND (TREE_OPERAND (arg, 0), 0) != error_mark_node))
+ {
+ /* Don't know if this should return address to just
+ _DECL, or actual address resolved in this expression. */
+ sorry ("address of bound pointer-to-member expression");
+ return error_mark_node;
+ }
+
+ return convert (build_pointer_type (TREE_TYPE (arg)),
+ size_binop (EASY_DIV_EXPR,
+ DECL_FIELD_BITPOS (t),
+ size_int (BITS_PER_UNIT)));
+ }
+ }
+
+ if (TREE_CODE (arg) == OFFSET_REF)
+ {
+ tree left = TREE_OPERAND (arg, 0), left_addr;
+ tree right_addr = build_unary_op (ADDR_EXPR, TREE_OPERAND (arg, 1), 0);
+
+ if (left == 0)
+ if (current_class_decl)
+ left_addr = current_class_decl;
+ else
+ {
+ error ("no `this' for pointer to member");
+ return error_mark_node;
+ }
+ else
+ left_addr = build_unary_op (ADDR_EXPR, left, 0);
+
+ return build (PLUS_EXPR, build_pointer_type (TREE_TYPE (arg)),
+ build1 (NOP_EXPR, integer_type_node, left_addr),
+ build1 (NOP_EXPR, integer_type_node, right_addr));
+ }
+
+ /* We permit compiler to make function calls returning
+ objects of aggregate type look like lvalues. */
+ {
+ tree targ = arg;
+
+ if (TREE_CODE (targ) == SAVE_EXPR)
+ targ = TREE_OPERAND (targ, 0);
+
+ if (TREE_CODE (targ) == CALL_EXPR && IS_AGGR_TYPE (TREE_TYPE (targ)))
+ {
+ if (TREE_CODE (arg) == SAVE_EXPR)
+ targ = arg;
+ else
+ targ = build_cplus_new (TREE_TYPE (arg), arg, 1);
+ return build1 (ADDR_EXPR, TYPE_POINTER_TO (TREE_TYPE (arg)), targ);
+ }
+
+ if (TREE_CODE (arg) == SAVE_EXPR && TREE_CODE (targ) == INDIRECT_REF)
+ return build (SAVE_EXPR, TYPE_POINTER_TO (TREE_TYPE (arg)),
+ TREE_OPERAND (targ, 0), current_function_decl, NULL);
+
+ /* We shouldn't wrap WITH_CLEANUP_EXPRs inside of SAVE_EXPRs, but in case
+ we do, here's how to handle it. */
+ if (TREE_CODE (arg) == SAVE_EXPR && TREE_CODE (targ) == WITH_CLEANUP_EXPR)
+ {
+#if 0
+ /* Not really a bug, but something to turn on when testing. */
+ compiler_error ("WITH_CLEANUP_EXPR wrapped in SAVE_EXPR");
+#endif
+ return unary_complex_lvalue (ADDR_EXPR, targ);
+ }
+ }
+
+ /* Don't let anything else be handled specially. */
+ return 0;
+}
+
+/* Mark EXP saying that we need to be able to take the
+ address of it; it should not be allocated in a register.
+ Value is 1 if successful.
+
+ C++: we do not allow `current_class_decl' to be addressable. */
+
+int
+mark_addressable (exp)
+ tree exp;
+{
+ register tree x = exp;
+
+ if (TREE_ADDRESSABLE (x) == 1)
+ return 1;
+
+ while (1)
+ switch (TREE_CODE (x))
+ {
+ case ADDR_EXPR:
+ case COMPONENT_REF:
+ case ARRAY_REF:
+ x = TREE_OPERAND (x, 0);
+ break;
+
+ case PARM_DECL:
+ if (x == current_class_decl)
+ {
+ error ("address of `this' not available");
+ TREE_ADDRESSABLE (x) = 1; /* so compiler doesn't die later */
+ put_var_into_stack (x);
+ return 1;
+ }
+ case VAR_DECL:
+ if (TREE_STATIC (x)
+ && TREE_READONLY (x)
+ && DECL_RTL (x) != 0
+ && ! decl_in_memory_p (x))
+ {
+ /* We thought this would make a good constant variable,
+ but we were wrong. */
+ push_obstacks_nochange ();
+ end_temporary_allocation ();
+
+ TREE_ASM_WRITTEN (x) = 0;
+ DECL_RTL (x) = 0;
+ rest_of_decl_compilation (x, 0, IDENTIFIER_LOCAL_VALUE (x) == 0, 0);
+ TREE_ADDRESSABLE (x) = 1;
+
+ pop_obstacks ();
+
+ return 1;
+ }
+ /* Caller should not be trying to mark initialized
+ constant fields addressable. */
+ my_friendly_assert (DECL_LANG_SPECIFIC (x) == 0
+ || DECL_IN_AGGR_P (x) == 0
+ || TREE_STATIC (x)
+ || DECL_EXTERNAL (x), 314);
+
+ case CONST_DECL:
+ case RESULT_DECL:
+ /* For C++, we don't warn about taking the address of a register
+ variable for CONST_DECLs; ARM p97 explicitly says it's okay. */
+ put_var_into_stack (x);
+ TREE_ADDRESSABLE (x) = 1;
+ return 1;
+
+ case FUNCTION_DECL:
+ /* We have to test both conditions here. The first may
+ be non-zero in the case of processing a default function.
+ The second may be non-zero in the case of a template function. */
+ x = DECL_MAIN_VARIANT (x);
+ if ((DECL_INLINE (x) || DECL_PENDING_INLINE_INFO (x))
+ && (DECL_CONTEXT (x) == NULL_TREE
+ || TREE_CODE_CLASS (TREE_CODE (DECL_CONTEXT (x))) != 't'
+ || ! CLASSTYPE_INTERFACE_ONLY (DECL_CONTEXT (x))))
+ {
+ mark_inline_for_output (x);
+ if (x == current_function_decl)
+ DECL_EXTERNAL (x) = 0;
+ }
+ TREE_ADDRESSABLE (x) = 1;
+ TREE_USED (x) = 1;
+ TREE_ADDRESSABLE (DECL_ASSEMBLER_NAME (x)) = 1;
+ return 1;
+
+ default:
+ return 1;
+ }
+}
+
+/* Build and return a conditional expression IFEXP ? OP1 : OP2. */
+
+tree
+build_x_conditional_expr (ifexp, op1, op2)
+ tree ifexp, op1, op2;
+{
+ tree rval = NULL_TREE;
+
+ /* See comments in `build_x_binary_op'. */
+ if (op1 != 0)
+ rval = build_opfncall (COND_EXPR, LOOKUP_SPECULATIVELY, ifexp, op1, op2);
+ if (rval)
+ return build_opfncall (COND_EXPR, LOOKUP_NORMAL, ifexp, op1, op2);
+
+ return build_conditional_expr (ifexp, op1, op2);
+}
+
+tree
+build_conditional_expr (ifexp, op1, op2)
+ tree ifexp, op1, op2;
+{
+ register tree type1;
+ register tree type2;
+ register enum tree_code code1;
+ register enum tree_code code2;
+ register tree result_type = NULL_TREE;
+ tree orig_op1 = op1, orig_op2 = op2;
+
+ /* If second operand is omitted, it is the same as the first one;
+ make sure it is calculated only once. */
+ if (op1 == 0)
+ {
+ if (pedantic)
+ pedwarn ("ANSI C++ forbids omitting the middle term of a ?: expression");
+ ifexp = op1 = save_expr (ifexp);
+ }
+
+ ifexp = bool_truthvalue_conversion (default_conversion (ifexp));
+
+ if (TREE_CODE (ifexp) == ERROR_MARK)
+ return error_mark_node;
+
+ op1 = require_instantiated_type (TREE_TYPE (op2), op1, error_mark_node);
+ if (op1 == error_mark_node)
+ return error_mark_node;
+ op2 = require_instantiated_type (TREE_TYPE (op1), op2, error_mark_node);
+ if (op2 == error_mark_node)
+ return error_mark_node;
+
+ /* C++: REFERENCE_TYPES must be dereferenced. */
+ type1 = TREE_TYPE (op1);
+ code1 = TREE_CODE (type1);
+ type2 = TREE_TYPE (op2);
+ code2 = TREE_CODE (type2);
+
+ if (code1 == REFERENCE_TYPE)
+ {
+ op1 = convert_from_reference (op1);
+ type1 = TREE_TYPE (op1);
+ code1 = TREE_CODE (type1);
+ }
+ if (code2 == REFERENCE_TYPE)
+ {
+ op2 = convert_from_reference (op2);
+ type2 = TREE_TYPE (op2);
+ code2 = TREE_CODE (type2);
+ }
+
+#if 1 /* Produces wrong result if within sizeof. Sorry. */
+ /* Don't promote the operands separately if they promote
+ the same way. Return the unpromoted type and let the combined
+ value get promoted if necessary. */
+
+ if (TYPE_MAIN_VARIANT (type1) == TYPE_MAIN_VARIANT (type2)
+ && code2 != ARRAY_TYPE
+#if 0
+ /* For C++, let the enumeral type come through. */
+ && code2 != ENUMERAL_TYPE
+#endif
+ && code2 != FUNCTION_TYPE
+ && code2 != METHOD_TYPE)
+ {
+ tree result;
+
+ if (TREE_CONSTANT (ifexp)
+ && (TREE_CODE (ifexp) == INTEGER_CST
+ || TREE_CODE (ifexp) == ADDR_EXPR))
+ return (integer_zerop (ifexp) ? op2 : op1);
+
+ if (TREE_CODE (op1) == CONST_DECL)
+ op1 = DECL_INITIAL (op1);
+ else if (TREE_READONLY_DECL_P (op1))
+ op1 = decl_constant_value (op1);
+ if (TREE_CODE (op2) == CONST_DECL)
+ op2 = DECL_INITIAL (op2);
+ else if (TREE_READONLY_DECL_P (op2))
+ op2 = decl_constant_value (op2);
+ if (type1 != type2)
+ type1 = c_build_type_variant
+ (type1,
+ TREE_READONLY (op1) || TREE_READONLY (op2),
+ TREE_THIS_VOLATILE (op1) || TREE_THIS_VOLATILE (op2));
+ /* ??? This is a kludge to deal with the fact that
+ we don't sort out integers and enums properly, yet. */
+ result = fold (build (COND_EXPR, type1, ifexp, op1, op2));
+ if (TREE_TYPE (result) != type1)
+ result = build1 (NOP_EXPR, type1, result);
+ return result;
+ }
+#endif
+
+ /* They don't match; promote them both and then try to reconcile them.
+ But don't permit mismatching enum types. */
+ if (code1 == ENUMERAL_TYPE)
+ {
+ if (code2 == ENUMERAL_TYPE)
+ {
+ message_2_types (error, "enumeral mismatch in conditional expression: `%s' vs `%s'", type1, type2);
+ return error_mark_node;
+ }
+ else if (extra_warnings && ! IS_AGGR_TYPE_CODE (code2))
+ warning ("enumeral and non-enumeral type in conditional expression");
+ }
+ else if (extra_warnings
+ && code2 == ENUMERAL_TYPE && ! IS_AGGR_TYPE_CODE (code1))
+ warning ("enumeral and non-enumeral type in conditional expression");
+
+ if (code1 != VOID_TYPE)
+ {
+ op1 = default_conversion (op1);
+ type1 = TREE_TYPE (op1);
+ code1 = TREE_CODE (type1);
+ }
+ if (code2 != VOID_TYPE)
+ {
+ op2 = default_conversion (op2);
+ type2 = TREE_TYPE (op2);
+ code2 = TREE_CODE (type2);
+ }
+
+ /* Quickly detect the usual case where op1 and op2 have the same type
+ after promotion. */
+ if (TYPE_MAIN_VARIANT (type1) == TYPE_MAIN_VARIANT (type2))
+ {
+ if (type1 == type2)
+ result_type = type1;
+ else
+ result_type = c_build_type_variant
+ (type1,
+ TREE_READONLY (op1) || TREE_READONLY (op2),
+ TREE_THIS_VOLATILE (op1) || TREE_THIS_VOLATILE (op2));
+ }
+ else if ((code1 == INTEGER_TYPE || code1 == REAL_TYPE)
+ && (code2 == INTEGER_TYPE || code2 == REAL_TYPE))
+ {
+ result_type = common_type (type1, type2);
+ }
+ else if (code1 == VOID_TYPE || code2 == VOID_TYPE)
+ {
+ if (pedantic && (code1 != VOID_TYPE || code2 != VOID_TYPE))
+ pedwarn ("ANSI C++ forbids conditional expr with only one void side");
+ result_type = void_type_node;
+ }
+ else if (code1 == POINTER_TYPE && code2 == POINTER_TYPE)
+ {
+ if (comp_target_types (type1, type2, 1))
+ result_type = common_type (type1, type2);
+ else if (integer_zerop (op1) && TREE_TYPE (type1) == void_type_node
+ && TREE_CODE (orig_op1) != NOP_EXPR)
+ result_type = qualify_type (type2, type1);
+ else if (integer_zerop (op2) && TREE_TYPE (type2) == void_type_node
+ && TREE_CODE (orig_op2) != NOP_EXPR)
+ result_type = qualify_type (type1, type2);
+ else if (TYPE_MAIN_VARIANT (TREE_TYPE (type1)) == void_type_node)
+ {
+ if (pedantic && TREE_CODE (type2) == FUNCTION_TYPE)
+ pedwarn ("ANSI C++ forbids conditional expr between `void *' and function pointer");
+ result_type = qualify_type (type1, type2);
+ }
+ else if (TYPE_MAIN_VARIANT (TREE_TYPE (type2)) == void_type_node)
+ {
+ if (pedantic && TREE_CODE (type1) == FUNCTION_TYPE)
+ pedwarn ("ANSI C++ forbids conditional expr between `void *' and function pointer");
+ result_type = qualify_type (type2, type1);
+ }
+ /* C++ */
+ else if (comptypes (type2, type1, 0))
+ result_type = type2;
+ else if (IS_AGGR_TYPE (TREE_TYPE (type1))
+ && IS_AGGR_TYPE (TREE_TYPE (type2))
+ && (result_type = common_base_type (TREE_TYPE (type1), TREE_TYPE (type2))))
+ {
+ if (result_type == error_mark_node)
+ {
+ message_2_types (error, "common base type of types `%s' and `%s' is ambiguous",
+ TREE_TYPE (type1), TREE_TYPE (type2));
+ result_type = ptr_type_node;
+ }
+ else result_type = TYPE_POINTER_TO (result_type);
+ }
+ else
+ {
+ pedwarn ("pointer type mismatch in conditional expression");
+ result_type = ptr_type_node;
+ }
+ }
+ else if (code1 == POINTER_TYPE && code2 == INTEGER_TYPE)
+ {
+ if (!integer_zerop (op2))
+ pedwarn ("pointer/integer type mismatch in conditional expression");
+ else
+ {
+ op2 = null_pointer_node;
+#if 0 /* Sez who? */
+ if (pedantic && TREE_CODE (type1) == FUNCTION_TYPE)
+ pedwarn ("ANSI C++ forbids conditional expr between 0 and function pointer");
+#endif
+ }
+ result_type = type1;
+ }
+ else if (code2 == POINTER_TYPE && code1 == INTEGER_TYPE)
+ {
+ if (!integer_zerop (op1))
+ pedwarn ("pointer/integer type mismatch in conditional expression");
+ else
+ {
+ op1 = null_pointer_node;
+#if 0 /* Sez who? */
+ if (pedantic && TREE_CODE (type2) == FUNCTION_TYPE)
+ pedwarn ("ANSI C++ forbids conditional expr between 0 and function pointer");
+#endif
+ }
+ result_type = type2;
+ }
+
+ if (!result_type)
+ {
+ /* The match does not look good. If either is
+ an aggregate value, try converting to a scalar type. */
+ if (code1 == RECORD_TYPE && code2 == RECORD_TYPE)
+ {
+ message_2_types (error, "aggregate mismatch in conditional expression: `%s' vs `%s'", type1, type2);
+ return error_mark_node;
+ }
+ if (code1 == RECORD_TYPE && TYPE_HAS_CONVERSION (type1))
+ {
+ tree tmp = build_type_conversion (CONVERT_EXPR, type2, op1, 0);
+ if (tmp == NULL_TREE)
+ {
+ cp_error ("aggregate type `%T' could not convert on lhs of `:'", type1);
+ return error_mark_node;
+ }
+ if (tmp == error_mark_node)
+ error ("ambiguous pointer conversion");
+ result_type = type2;
+ op1 = tmp;
+ }
+ else if (code2 == RECORD_TYPE && TYPE_HAS_CONVERSION (type2))
+ {
+ tree tmp = build_type_conversion (CONVERT_EXPR, type1, op2, 0);
+ if (tmp == NULL_TREE)
+ {
+ cp_error ("aggregate type `%T' could not convert on rhs of `:'", type2);
+ return error_mark_node;
+ }
+ if (tmp == error_mark_node)
+ error ("ambiguous pointer conversion");
+ result_type = type1;
+ op2 = tmp;
+ }
+ else if (flag_cond_mismatch)
+ result_type = void_type_node;
+ else
+ {
+ error ("type mismatch in conditional expression");
+ return error_mark_node;
+ }
+ }
+
+ if (result_type != TREE_TYPE (op1))
+ op1 = convert_and_check (result_type, op1);
+ if (result_type != TREE_TYPE (op2))
+ op2 = convert_and_check (result_type, op2);
+
+#if 0
+ /* XXX delete me, I've been here for years. */
+ if (IS_AGGR_TYPE_CODE (code1))
+ {
+ result_type = TREE_TYPE (op1);
+ if (TREE_CONSTANT (ifexp))
+ return (integer_zerop (ifexp) ? op2 : op1);
+
+ if (TYPE_MODE (result_type) == BLKmode)
+ {
+ register tree tempvar
+ = build_decl (VAR_DECL, NULL_TREE, result_type);
+ register tree xop1 = build_modify_expr (tempvar, NOP_EXPR, op1);
+ register tree xop2 = build_modify_expr (tempvar, NOP_EXPR, op2);
+ register tree result = fold (build (COND_EXPR, result_type,
+ ifexp, xop1, xop2));
+
+ layout_decl (tempvar, 0);
+ /* No way to handle variable-sized objects here.
+ I fear that the entire handling of BLKmode conditional exprs
+ needs to be redone. */
+ my_friendly_assert (TREE_CONSTANT (DECL_SIZE (tempvar)), 315);
+ DECL_RTL (tempvar)
+ = assign_stack_local (DECL_MODE (tempvar),
+ (TREE_INT_CST_LOW (DECL_SIZE (tempvar))
+ + BITS_PER_UNIT - 1)
+ / BITS_PER_UNIT,
+ 0);
+
+ TREE_SIDE_EFFECTS (result)
+ = TREE_SIDE_EFFECTS (ifexp) | TREE_SIDE_EFFECTS (op1)
+ | TREE_SIDE_EFFECTS (op2);
+ return build (COMPOUND_EXPR, result_type, result, tempvar);
+ }
+ }
+#endif /* 0 */
+
+ if (TREE_CONSTANT (ifexp))
+ return integer_zerop (ifexp) ? op2 : op1;
+
+ return fold (build (COND_EXPR, result_type, ifexp, op1, op2));
+}
+
+/* Handle overloading of the ',' operator when needed. Otherwise,
+ this function just builds an expression list. */
+tree
+build_x_compound_expr (list)
+ tree list;
+{
+ tree rest = TREE_CHAIN (list);
+ tree result;
+
+ if (rest == NULL_TREE)
+ return build_compound_expr (list);
+
+ result = build_opfncall (COMPOUND_EXPR, LOOKUP_NORMAL,
+ TREE_VALUE (list), TREE_VALUE (rest), NULL_TREE);
+ if (result)
+ return build_x_compound_expr (tree_cons (NULL_TREE, result, TREE_CHAIN (rest)));
+ return build_compound_expr (tree_cons (NULL_TREE, TREE_VALUE (list),
+ build_tree_list (NULL_TREE, build_x_compound_expr (rest))));
+}
+
+/* Given a list of expressions, return a compound expression
+ that performs them all and returns the value of the last of them. */
+
+tree
+build_compound_expr (list)
+ tree list;
+{
+ register tree rest;
+
+ if (TREE_READONLY_DECL_P (TREE_VALUE (list)))
+ TREE_VALUE (list) = decl_constant_value (TREE_VALUE (list));
+
+ if (TREE_CHAIN (list) == 0)
+ {
+ /* build_c_cast puts on a NOP_EXPR to make the result not an lvalue.
+ Strip such NOP_EXPRs, since LIST is used in non-lvalue context. */
+ if (TREE_CODE (list) == NOP_EXPR
+ && TREE_TYPE (list) == TREE_TYPE (TREE_OPERAND (list, 0)))
+ list = TREE_OPERAND (list, 0);
+
+ /* Convert arrays to pointers. */
+ if (TREE_CODE (TREE_TYPE (TREE_VALUE (list))) == ARRAY_TYPE)
+ return default_conversion (TREE_VALUE (list));
+ else
+ return TREE_VALUE (list);
+ }
+
+ rest = build_compound_expr (TREE_CHAIN (list));
+
+ /* When pedantic, a compound expression can be neither an lvalue
+ nor an integer constant expression. */
+ if (! TREE_SIDE_EFFECTS (TREE_VALUE (list)) && ! pedantic)
+ return rest;
+
+ return build (COMPOUND_EXPR, TREE_TYPE (rest),
+ break_out_cleanups (TREE_VALUE (list)), rest);
+}
+
+tree build_static_cast (type, expr)
+ tree type, expr;
+{
+ return build_c_cast (type, expr);
+}
+
+tree build_reinterpret_cast (type, expr)
+ tree type, expr;
+{
+ return build_c_cast (type, expr);
+}
+
+tree build_const_cast (type, expr)
+ tree type, expr;
+{
+ return build_c_cast (type, expr);
+}
+
+/* Build an expression representing a cast to type TYPE of expression EXPR. */
+
+tree
+build_c_cast (type, expr)
+ register tree type;
+ tree expr;
+{
+ register tree value = expr;
+
+ if (type == error_mark_node || expr == error_mark_node)
+ return error_mark_node;
+
+ /* build_c_cast puts on a NOP_EXPR to make the result not an lvalue.
+ Strip such NOP_EXPRs, since VALUE is being used in non-lvalue context. */
+ if (TREE_CODE (value) == NOP_EXPR
+ && TREE_TYPE (value) == TREE_TYPE (TREE_OPERAND (value, 0)))
+ value = TREE_OPERAND (value, 0);
+
+ if (TREE_TYPE (expr)
+ && TREE_CODE (TREE_TYPE (expr)) == OFFSET_TYPE
+ && TREE_CODE (type) != OFFSET_TYPE)
+ value = resolve_offset_ref (value);
+
+ if (TREE_CODE (type) == ARRAY_TYPE)
+ {
+ /* Allow casting from T1* to T2[] because Cfront allows it.
+ NIHCL uses it. It is not valid ANSI C however, and hence, not
+ valid ANSI C++. */
+ if (TREE_CODE (TREE_TYPE (expr)) == POINTER_TYPE)
+ {
+ if (pedantic)
+ pedwarn ("ANSI C++ forbids casting to an array type");
+ type = build_pointer_type (TREE_TYPE (type));
+ }
+ else
+ {
+ error ("ANSI C++ forbids casting to an array type");
+ return error_mark_node;
+ }
+ }
+
+ if (TREE_CODE (type) == FUNCTION_TYPE
+ || TREE_CODE (type) == METHOD_TYPE)
+ {
+ cp_error ("casting to function type `%T'", type);
+ return error_mark_node;
+ }
+
+ if (IS_SIGNATURE (type))
+ {
+ error ("cast specifies signature type");
+ return error_mark_node;
+ }
+
+ /* If there's only one function in the overloaded space,
+ just take it. */
+ if (TREE_CODE (value) == TREE_LIST
+ && TREE_CHAIN (value) == NULL_TREE)
+ value = TREE_VALUE (value);
+
+ if (TREE_CODE (type) == VOID_TYPE)
+ value = build1 (NOP_EXPR, type, value);
+ else if (TREE_TYPE (value) == NULL_TREE
+ || type_unknown_p (value))
+ {
+ value = instantiate_type (type, value, 1);
+ /* Did we lose? */
+ if (value == error_mark_node)
+ return error_mark_node;
+ }
+ else
+ {
+ tree otype, ovalue;
+
+ /* Convert functions and arrays to pointers and
+ convert references to their expanded types,
+ but don't convert any other types. */
+ if (TREE_CODE (TREE_TYPE (value)) == FUNCTION_TYPE
+ || TREE_CODE (TREE_TYPE (value)) == METHOD_TYPE
+ || TREE_CODE (TREE_TYPE (value)) == ARRAY_TYPE
+ || TREE_CODE (TREE_TYPE (value)) == REFERENCE_TYPE)
+ value = default_conversion (value);
+ otype = TREE_TYPE (value);
+
+ /* Optionally warn about potentially worrisome casts. */
+
+ if (warn_cast_qual
+ && TREE_CODE (type) == POINTER_TYPE
+ && TREE_CODE (otype) == POINTER_TYPE)
+ {
+ /* For C++ we make these regular warnings, rather than
+ softening them into pedwarns. */
+ if (TYPE_VOLATILE (TREE_TYPE (otype))
+ && ! TYPE_VOLATILE (TREE_TYPE (type)))
+ warning ("cast discards `volatile' from pointer target type");
+ if (TYPE_READONLY (TREE_TYPE (otype))
+ && ! TYPE_READONLY (TREE_TYPE (type)))
+ warning ("cast discards `const' from pointer target type");
+ }
+
+ /* Warn about possible alignment problems. */
+ if (STRICT_ALIGNMENT && warn_cast_align
+ && TREE_CODE (type) == POINTER_TYPE
+ && TREE_CODE (otype) == POINTER_TYPE
+ && TREE_CODE (TREE_TYPE (otype)) != VOID_TYPE
+ && TREE_CODE (TREE_TYPE (otype)) != FUNCTION_TYPE
+ && TYPE_ALIGN (TREE_TYPE (type)) > TYPE_ALIGN (TREE_TYPE (otype)))
+ warning ("cast increases required alignment of target type");
+
+#if 0
+ if (TREE_CODE (type) == INTEGER_TYPE
+ && TREE_CODE (otype) == POINTER_TYPE
+ && TYPE_PRECISION (type) != TYPE_PRECISION (otype))
+ warning ("cast from pointer to integer of different size");
+
+ if (TREE_CODE (type) == POINTER_TYPE
+ && TREE_CODE (otype) == INTEGER_TYPE
+ && TYPE_PRECISION (type) != TYPE_PRECISION (otype)
+ /* Don't warn about converting 0 to pointer,
+ provided the 0 was explicit--not cast or made by folding. */
+ && !(TREE_CODE (value) == INTEGER_CST && integer_zerop (value)))
+ warning ("cast to pointer from integer of different size");
+#endif
+
+ ovalue = value;
+ value = convert_force (type, value);
+
+ /* Ignore any integer overflow caused by the cast. */
+ if (TREE_CODE (value) == INTEGER_CST)
+ {
+ TREE_OVERFLOW (value) = TREE_OVERFLOW (ovalue);
+ TREE_CONSTANT_OVERFLOW (value) = TREE_CONSTANT_OVERFLOW (ovalue);
+ }
+ }
+
+ /* Always produce some operator for an explicit cast,
+ so we can tell (for -pedantic) that the cast is no lvalue.
+ Also, pedantically, don't let (void *) (FOO *) 0 be a null
+ pointer constant. */
+ if (value == expr
+ || (pedantic
+ && TREE_CODE (value) == INTEGER_CST
+ && TREE_CODE (expr) == INTEGER_CST
+ && TREE_CODE (TREE_TYPE (expr)) != INTEGER_TYPE))
+ {
+ tree nvalue = build1 (NOP_EXPR, type, value);
+ TREE_CONSTANT (nvalue) = TREE_CONSTANT (value);
+ return nvalue;
+ }
+
+ return value;
+}
+
+#if 0
+/* Build an assignment expression of lvalue LHS from value RHS.
+
+ In C++, if the left hand side of the assignment is a REFERENCE_TYPE,
+ that reference becomes deferenced down to it base type. */
+
+/* Return a reference to the BASE_INDEX part of EXPR. TYPE is
+ the type to which BASE_INDEX applies. */
+static tree
+get_base_ref (type, base_index, expr)
+ tree type;
+ int base_index;
+ tree expr;
+{
+ tree binfos = TYPE_BINFO_BASETYPES (type);
+ tree base_binfo = TREE_VEC_ELT (binfos, base_index);
+ tree ref;
+
+ if (TREE_CODE (expr) == ARRAY_REF
+ || ! BINFO_OFFSET_ZEROP (base_binfo)
+ || TREE_VIA_VIRTUAL (base_binfo)
+ || TYPE_MODE (type) != TYPE_MODE (BINFO_TYPE (base_binfo)))
+ {
+ tree addr = build_unary_op (ADDR_EXPR, expr, 0);
+ ref = build_indirect_ref (convert_pointer_to (base_binfo, addr),
+ NULL_PTR);
+ }
+ else
+ {
+ ref = copy_node (expr);
+ TREE_TYPE (ref) = BINFO_TYPE (base_binfo);
+ }
+ return ref;
+}
+
+/* Build an assignment expression of lvalue LHS from value RHS.
+ MODIFYCODE is the code for a binary operator that we use
+ to combine the old value of LHS with RHS to get the new value.
+ Or else MODIFYCODE is NOP_EXPR meaning do a simple assignment.
+
+ C++: If MODIFYCODE is INIT_EXPR, then leave references unbashed.
+
+ `build_modify_expr_1' implements recursive part of memberwise
+ assignment operation. */
+static tree
+build_modify_expr_1 (lhs, modifycode, rhs, basetype_path)
+ tree lhs, rhs;
+ enum tree_code modifycode;
+ tree basetype_path;
+{
+ register tree result;
+ tree newrhs = rhs;
+ tree lhstype = TREE_TYPE (lhs);
+ tree olhstype = lhstype;
+
+ /* Avoid duplicate error messages from operands that had errors. */
+ if (TREE_CODE (lhs) == ERROR_MARK || TREE_CODE (rhs) == ERROR_MARK)
+ return error_mark_node;
+
+ /* If a binary op has been requested, combine the old LHS value with the RHS
+ producing the value we should actually store into the LHS. */
+
+ if (modifycode == INIT_EXPR)
+ ;
+ else if (modifycode == NOP_EXPR)
+ {
+ /* must deal with overloading of `operator=' here. */
+ if (TREE_CODE (lhstype) == REFERENCE_TYPE)
+ lhstype = TREE_TYPE (lhstype);
+ else
+ lhstype = olhstype;
+ }
+ else
+ {
+ lhs = stabilize_reference (lhs);
+ newrhs = build_binary_op (modifycode, lhs, rhs, 1);
+ modifycode = NOP_EXPR;
+ }
+
+ /* If storing into a structure or union member,
+ it has probably been given type `int'.
+ Compute the type that would go with
+ the actual amount of storage the member occupies. */
+
+ if (TREE_CODE (lhs) == COMPONENT_REF
+ && (TREE_CODE (lhstype) == INTEGER_TYPE
+ || TREE_CODE (lhstype) == REAL_TYPE
+ || TREE_CODE (lhstype) == ENUMERAL_TYPE))
+ lhstype = TREE_TYPE (get_unwidened (lhs, 0));
+
+ /* C++: The semantics of C++ differ from those of C when an
+ assignment of an aggregate is desired. Assignment in C++ is
+ now defined as memberwise assignment of non-static members
+ and base class objects. This rule applies recursively
+ until a member of a built-in type is found.
+
+ Also, we cannot do a bit-wise copy of aggregates which
+ contain virtual function table pointers. Those
+ pointer values must be preserved through the copy.
+ However, this is handled in expand_expr, and not here.
+ This is because much better code can be generated at
+ that stage than this one. */
+ if (TREE_CODE (lhstype) == RECORD_TYPE
+ && TYPE_LANG_SPECIFIC (lhstype)
+ && TYPE_MAIN_VARIANT (lhstype) == TYPE_MAIN_VARIANT (TREE_TYPE (newrhs)))
+ {
+ register tree elt;
+ int i;
+
+ /* Perform operation on object. */
+ if (modifycode == INIT_EXPR && TYPE_HAS_INIT_REF (lhstype))
+ {
+ result = build_method_call (lhs, constructor_name_full (lhstype),
+ build_tree_list (NULL_TREE, rhs),
+ basetype_path, LOOKUP_NORMAL);
+ return build_indirect_ref (result, NULL_PTR);
+ }
+ else if (modifycode == NOP_EXPR)
+ {
+ /* `operator=' is not an inheritable operator; see 13.4.3. */
+ if (TYPE_LANG_SPECIFIC (lhstype) && TYPE_HAS_ASSIGNMENT (lhstype))
+ {
+ result = build_opfncall (MODIFY_EXPR, LOOKUP_NORMAL,
+ lhs, rhs, make_node (NOP_EXPR));
+ if (result == NULL_TREE)
+ return error_mark_node;
+ return result;
+ }
+ }
+
+ if (TYPE_USES_VIRTUAL_BASECLASSES (lhstype)
+ || (modifycode == NOP_EXPR && TYPE_GETS_ASSIGNMENT (lhstype))
+ || (modifycode == INIT_EXPR && TYPE_GETS_INIT_REF (lhstype)))
+ {
+ tree binfos = BINFO_BASETYPES (TYPE_BINFO (lhstype));
+ result = NULL_TREE;
+
+ if (binfos != NULL_TREE)
+ /* Perform operation on each member, depth-first, left-right. */
+ for (i = 0; i <= TREE_VEC_LENGTH (binfos)-1; i++)
+ {
+ tree base_binfo = TREE_VEC_ELT (binfos, i);
+ tree base_lhs, base_rhs;
+ tree new_result;
+
+ /* Assignments from virtual baseclasses handled elsewhere. */
+ if (TREE_VIA_VIRTUAL (base_binfo))
+ continue;
+
+ base_lhs = get_base_ref (lhstype, i, lhs);
+ base_rhs = get_base_ref (lhstype, i, newrhs);
+
+ BINFO_INHERITANCE_CHAIN (base_binfo) = basetype_path;
+ new_result
+ = build_modify_expr_1 (base_lhs, modifycode, base_rhs,
+ base_binfo);
+
+ /* We either get back a compound stmt, or a simple one. */
+ if (new_result && TREE_CODE (new_result) == TREE_LIST)
+ new_result = build_compound_expr (new_result);
+ result = tree_cons (NULL_TREE, new_result, result);
+ }
+
+ for (elt = TYPE_FIELDS (lhstype); elt; elt = TREE_CHAIN (elt))
+ {
+ tree vbases = NULL_TREE;
+ tree elt_lhs, elt_rhs;
+
+ if (TREE_CODE (elt) != FIELD_DECL)
+ continue;
+ if (DECL_NAME (elt)
+ && (VFIELD_NAME_P (DECL_NAME (elt))
+ || VBASE_NAME_P (DECL_NAME (elt))))
+ continue;
+
+ if (TREE_READONLY (elt)
+ || TREE_CODE (TREE_TYPE (elt)) == REFERENCE_TYPE)
+ {
+ cp_error ("cannot generate default `%T::operator ='",
+ lhstype);
+ if (TREE_CODE (TREE_TYPE (elt)) == REFERENCE_TYPE)
+ cp_error_at ("because member `%#D' is a reference", elt);
+ else
+ cp_error_at ("because member `%#D' is const", elt);
+
+ return error_mark_node;
+ }
+
+ if (IS_AGGR_TYPE (TREE_TYPE (elt))
+ && TYPE_LANG_SPECIFIC (TREE_TYPE (elt)))
+ vbases = CLASSTYPE_VBASECLASSES (TREE_TYPE (elt));
+
+ elt_lhs = build (COMPONENT_REF, TREE_TYPE (elt), lhs, elt);
+ elt_rhs = build (COMPONENT_REF, TREE_TYPE (elt), newrhs, elt);
+ /* It is not always safe to go through `build_modify_expr_1'
+ when performing element-wise copying. This is because
+ an element may be of ARRAY_TYPE, which will not
+ be properly copied as a naked element. */
+ if (TREE_CODE (TREE_TYPE (elt)) == RECORD_TYPE
+ && TYPE_LANG_SPECIFIC (TREE_TYPE (elt)))
+ basetype_path = TYPE_BINFO (TREE_TYPE (elt));
+
+ while (vbases)
+ {
+ tree elt_lhs_addr = build_unary_op (ADDR_EXPR, elt_lhs, 0);
+ tree elt_rhs_addr = build_unary_op (ADDR_EXPR, elt_rhs, 0);
+
+ elt_lhs_addr = convert_pointer_to (vbases, elt_lhs_addr);
+ elt_rhs_addr = convert_pointer_to (vbases, elt_rhs_addr);
+ result
+ = tree_cons (NULL_TREE,
+ build_modify_expr_1
+ (build_indirect_ref (elt_lhs_addr, NULL_PTR),
+ modifycode,
+ build_indirect_ref (elt_rhs_addr, NULL_PTR),
+ basetype_path),
+ result);
+ if (TREE_VALUE (result) == error_mark_node)
+ return error_mark_node;
+ vbases = TREE_CHAIN (vbases);
+ }
+ elt_lhs = build_modify_expr_1 (elt_lhs, modifycode, elt_rhs,
+ basetype_path);
+ result = tree_cons (NULL_TREE, elt_lhs, result);
+ }
+
+ if (result)
+ return build_compound_expr (result);
+ /* No fields to move. */
+ return integer_zero_node;
+ }
+ else
+ {
+ result = build (modifycode == NOP_EXPR ? MODIFY_EXPR : INIT_EXPR,
+ void_type_node, lhs, rhs);
+ TREE_SIDE_EFFECTS (result) = 1;
+ return result;
+ }
+ }
+
+ result = build_modify_expr (lhs, modifycode, newrhs);
+ /* ARRAY_TYPEs cannot be converted to anything meaningful,
+ and leaving it there screws up `build_compound_expr' when
+ it tries to defaultly convert everything. */
+ if (TREE_CODE (TREE_TYPE (result)) == ARRAY_TYPE)
+ TREE_TYPE (result) = void_type_node;
+ return result;
+}
+#endif
+
+/* Taken from expr.c:
+ Subroutine of expand_expr:
+ record the non-copied parts (LIST) of an expr (LHS), and return a list
+ which specifies the initial values of these parts. */
+
+static tree
+init_noncopied_parts (lhs, list)
+ tree lhs;
+ tree list;
+{
+ tree tail;
+ tree parts = 0;
+
+ for (tail = list; tail; tail = TREE_CHAIN (tail))
+ if (TREE_CODE (TREE_VALUE (tail)) == TREE_LIST)
+ parts = chainon (parts, init_noncopied_parts (lhs, TREE_VALUE (tail)));
+ else
+ {
+ tree part = TREE_VALUE (tail);
+ tree part_type = TREE_TYPE (part);
+ tree to_be_initialized = build (COMPONENT_REF, part_type, lhs, part);
+ parts = tree_cons (TREE_PURPOSE (tail), to_be_initialized, parts);
+ }
+ return parts;
+}
+
+/* Build an assignment expression of lvalue LHS from value RHS.
+ MODIFYCODE is the code for a binary operator that we use
+ to combine the old value of LHS with RHS to get the new value.
+ Or else MODIFYCODE is NOP_EXPR meaning do a simple assignment.
+
+ C++: If MODIFYCODE is INIT_EXPR, then leave references unbashed.
+*/
+tree
+build_modify_expr (lhs, modifycode, rhs)
+ tree lhs;
+ enum tree_code modifycode;
+ tree rhs;
+{
+ register tree result;
+ tree newrhs = rhs;
+ tree lhstype = TREE_TYPE (lhs);
+ tree olhstype = lhstype;
+ tree olhs = lhs;
+
+ /* Types that aren't fully specified cannot be used in assignments. */
+ lhs = require_complete_type (lhs);
+
+ /* Avoid duplicate error messages from operands that had errors. */
+ if (TREE_CODE (lhs) == ERROR_MARK || TREE_CODE (rhs) == ERROR_MARK)
+ return error_mark_node;
+
+ /* Decide early if we are going to protect RHS from GC
+ before assigning it to LHS. */
+ if (type_needs_gc_entry (TREE_TYPE (rhs))
+ && ! value_safe_from_gc (lhs, rhs))
+ rhs = protect_value_from_gc (lhs, rhs);
+
+ newrhs = rhs;
+
+ /* Handle assignment to signature pointers/refs. */
+
+ if (TYPE_LANG_SPECIFIC (lhstype) &&
+ (IS_SIGNATURE_POINTER (lhstype) || IS_SIGNATURE_REFERENCE (lhstype)))
+ {
+ return build_signature_pointer_constructor (lhs, rhs);
+ }
+
+ /* Handle control structure constructs used as "lvalues". */
+
+ switch (TREE_CODE (lhs))
+ {
+ /* Handle --foo = 5; as these are valid constructs in C++ */
+ case PREDECREMENT_EXPR:
+ case PREINCREMENT_EXPR:
+ if (TREE_SIDE_EFFECTS (TREE_OPERAND (lhs, 0)))
+ lhs = build (TREE_CODE (lhs), TREE_TYPE (lhs),
+ stabilize_reference (TREE_OPERAND (lhs, 0)));
+ return build (COMPOUND_EXPR, lhstype,
+ lhs,
+ build_modify_expr (TREE_OPERAND (lhs, 0),
+ modifycode, rhs));
+
+ /* Handle (a, b) used as an "lvalue". */
+ case COMPOUND_EXPR:
+ newrhs = build_modify_expr (TREE_OPERAND (lhs, 1),
+ modifycode, rhs);
+ if (TREE_CODE (newrhs) == ERROR_MARK)
+ return error_mark_node;
+ return build (COMPOUND_EXPR, lhstype,
+ TREE_OPERAND (lhs, 0), newrhs);
+
+ case MODIFY_EXPR:
+ newrhs = build_modify_expr (TREE_OPERAND (lhs, 0), modifycode, rhs);
+ if (TREE_CODE (newrhs) == ERROR_MARK)
+ return error_mark_node;
+ return build (COMPOUND_EXPR, lhstype, lhs, newrhs);
+
+ /* Handle (a ? b : c) used as an "lvalue". */
+ case COND_EXPR:
+ rhs = save_expr (rhs);
+ {
+ /* Produce (a ? (b = rhs) : (c = rhs))
+ except that the RHS goes through a save-expr
+ so the code to compute it is only emitted once. */
+ tree cond
+ = build_conditional_expr (TREE_OPERAND (lhs, 0),
+ build_modify_expr (TREE_OPERAND (lhs, 1),
+ modifycode, rhs),
+ build_modify_expr (TREE_OPERAND (lhs, 2),
+ modifycode, rhs));
+ if (TREE_CODE (cond) == ERROR_MARK)
+ return cond;
+ /* Make sure the code to compute the rhs comes out
+ before the split. */
+ return build (COMPOUND_EXPR, TREE_TYPE (lhs),
+ /* Case to void to suppress warning
+ from warn_if_unused_value. */
+ convert (void_type_node, rhs), cond);
+ }
+ }
+
+ if (TREE_CODE (lhs) == OFFSET_REF)
+ {
+ if (TREE_OPERAND (lhs, 0) == NULL_TREE)
+ {
+ /* Static class member? */
+ tree member = TREE_OPERAND (lhs, 1);
+ if (TREE_CODE (member) == VAR_DECL)
+ lhs = member;
+ else
+ {
+ compiler_error ("invalid static class member");
+ return error_mark_node;
+ }
+ }
+ else
+ lhs = resolve_offset_ref (lhs);
+
+ olhstype = lhstype = TREE_TYPE (lhs);
+ }
+
+ if (TREE_CODE (lhstype) == REFERENCE_TYPE
+ && modifycode != INIT_EXPR)
+ {
+ lhs = convert_from_reference (lhs);
+ olhstype = lhstype = TREE_TYPE (lhs);
+ }
+
+ /* If a binary op has been requested, combine the old LHS value with the RHS
+ producing the value we should actually store into the LHS. */
+
+ if (modifycode == INIT_EXPR)
+ {
+ if (TYPE_LANG_SPECIFIC (lhstype) && TYPE_HAS_CONSTRUCTOR (lhstype))
+ {
+ result = build_method_call (lhs, constructor_name_full (lhstype),
+ build_tree_list (NULL_TREE, rhs),
+ NULL_TREE, LOOKUP_NORMAL);
+ if (result == NULL_TREE)
+ return error_mark_node;
+ return result;
+ }
+ }
+ else if (modifycode == NOP_EXPR)
+ {
+#if 1
+ /* `operator=' is not an inheritable operator. */
+ if (TYPE_LANG_SPECIFIC (lhstype) && TYPE_HAS_ASSIGNMENT (lhstype))
+ {
+ result = build_opfncall (MODIFY_EXPR, LOOKUP_NORMAL,
+ lhs, rhs, make_node (NOP_EXPR));
+ if (result == NULL_TREE)
+ return error_mark_node;
+ return result;
+ }
+#else
+ /* Treat `operator=' as an inheritable operator. */
+ if (TYPE_LANG_SPECIFIC (lhstype) && TYPE_GETS_ASSIGNMENT (lhstype))
+ {
+ tree orig_lhstype = lhstype;
+ while (! TYPE_HAS_ASSIGNMENT (lhstype))
+ {
+ int i, n_baseclasses = CLASSTYPE_N_BASECLASSES (lhstype);
+ tree basetype = NULL_TREE;
+ for (i = 0; i < n_baseclasses; i++)
+ if (TYPE_GETS_ASSIGNMENT (TYPE_BINFO_BASETYPE (lhstype, i)))
+ {
+ if (basetype != NULL_TREE)
+ {
+ message_2_types (error, "base classes `%s' and `%s' both have operator ='",
+ basetype,
+ TYPE_BINFO_BASETYPE (lhstype, i));
+ return error_mark_node;
+ }
+ basetype = TYPE_BINFO_BASETYPE (lhstype, i);
+ }
+ lhstype = basetype;
+ }
+ if (orig_lhstype != lhstype)
+ {
+ lhs = build_indirect_ref (convert_pointer_to (lhstype,
+ build_unary_op (ADDR_EXPR, lhs, 0)), NULL_PTR);
+ if (lhs == error_mark_node)
+ {
+ cp_error ("conversion to private basetype `%T'", lhstype);
+ return error_mark_node;
+ }
+ }
+ result = build_opfncall (MODIFY_EXPR, LOOKUP_NORMAL,
+ lhs, rhs, make_node (NOP_EXPR));
+ if (result == NULL_TREE)
+ return error_mark_node;
+ return result;
+ }
+#endif
+ lhstype = olhstype;
+ }
+ else if (PROMOTES_TO_AGGR_TYPE (lhstype, REFERENCE_TYPE))
+ {
+ /* This case must convert to some sort of lvalue that
+ can participate in an op= operation. */
+ tree lhs_tmp = lhs;
+ tree rhs_tmp = rhs;
+ if (build_default_binary_type_conversion (modifycode, &lhs_tmp, &rhs_tmp))
+ {
+ lhs = stabilize_reference (lhs_tmp);
+ /* Forget is was ever anything else. */
+ olhstype = lhstype = TREE_TYPE (lhs);
+ newrhs = build_binary_op (modifycode, lhs, rhs_tmp, 1);
+ }
+ else
+ return error_mark_node;
+ }
+ else
+ {
+ lhs = stabilize_reference (lhs);
+ newrhs = build_binary_op (modifycode, lhs, rhs, 1);
+ }
+
+ /* Handle a cast used as an "lvalue".
+ We have already performed any binary operator using the value as cast.
+ Now convert the result to the cast type of the lhs,
+ and then true type of the lhs and store it there;
+ then convert result back to the cast type to be the value
+ of the assignment. */
+
+ switch (TREE_CODE (lhs))
+ {
+ case NOP_EXPR:
+ case CONVERT_EXPR:
+ case FLOAT_EXPR:
+ case FIX_TRUNC_EXPR:
+ case FIX_FLOOR_EXPR:
+ case FIX_ROUND_EXPR:
+ case FIX_CEIL_EXPR:
+ if (TREE_CODE (TREE_TYPE (newrhs)) == ARRAY_TYPE
+ || TREE_CODE (TREE_TYPE (newrhs)) == FUNCTION_TYPE
+ || TREE_CODE (TREE_TYPE (newrhs)) == METHOD_TYPE
+ || TREE_CODE (TREE_TYPE (newrhs)) == OFFSET_TYPE)
+ newrhs = default_conversion (newrhs);
+ {
+ tree inner_lhs = TREE_OPERAND (lhs, 0);
+ tree result;
+ if (! lvalue_p (lhs) && pedantic)
+ pedwarn ("cast to non-reference type used as lvalue");
+
+ result = build_modify_expr (inner_lhs, NOP_EXPR,
+ convert (TREE_TYPE (inner_lhs),
+ convert (lhstype, newrhs)));
+ if (TREE_CODE (result) == ERROR_MARK)
+ return result;
+ return convert (TREE_TYPE (lhs), result);
+ }
+ }
+
+ /* Now we have handled acceptable kinds of LHS that are not truly lvalues.
+ Reject anything strange now. */
+
+ if (!lvalue_or_else (lhs, "assignment"))
+ return error_mark_node;
+
+ GNU_xref_assign (lhs);
+
+ /* Warn about storing in something that is `const'. */
+ /* For C++, don't warn if this is initialization. */
+ if (modifycode != INIT_EXPR
+ /* For assignment to `const' signature pointer/reference fields,
+ don't warn either, we already printed a better message before. */
+ && ! (TREE_CODE (lhs) == COMPONENT_REF
+ && (IS_SIGNATURE_POINTER (TREE_TYPE (TREE_OPERAND (lhs, 0)))
+ || IS_SIGNATURE_REFERENCE (TREE_TYPE (TREE_OPERAND (lhs, 0)))))
+ && (TREE_READONLY (lhs) || TYPE_READONLY (lhstype)
+ || ((TREE_CODE (lhstype) == RECORD_TYPE
+ || TREE_CODE (lhstype) == UNION_TYPE)
+ && C_TYPE_FIELDS_READONLY (lhstype))
+ || (TREE_CODE (lhstype) == REFERENCE_TYPE
+ && TYPE_READONLY (TREE_TYPE (lhstype)))))
+ readonly_error (lhs, "assignment", 0);
+
+ /* If storing into a structure or union member,
+ it has probably been given type `int'.
+ Compute the type that would go with
+ the actual amount of storage the member occupies. */
+
+ if (TREE_CODE (lhs) == COMPONENT_REF
+ && (TREE_CODE (lhstype) == INTEGER_TYPE
+ || TREE_CODE (lhstype) == REAL_TYPE
+ || TREE_CODE (lhstype) == ENUMERAL_TYPE))
+ {
+ lhstype = TREE_TYPE (get_unwidened (lhs, 0));
+
+ /* If storing in a field that is in actuality a short or narrower
+ than one, we must store in the field in its actual type. */
+
+ if (lhstype != TREE_TYPE (lhs))
+ {
+ lhs = copy_node (lhs);
+ TREE_TYPE (lhs) = lhstype;
+ }
+ }
+
+ /* check to see if there is an assignment to `this' */
+ if (lhs == current_class_decl)
+ {
+ if (flag_this_is_variable > 0
+ && DECL_NAME (current_function_decl) != NULL_TREE
+ && current_class_name != DECL_NAME (current_function_decl))
+ warning ("assignment to `this' not in constructor or destructor");
+ current_function_just_assigned_this = 1;
+ }
+
+ /* The TREE_TYPE of RHS may be TYPE_UNKNOWN. This can happen
+ when the type of RHS is not yet known, i.e. its type
+ is inherited from LHS. */
+ rhs = require_instantiated_type (lhstype, newrhs, error_mark_node);
+ if (rhs == error_mark_node)
+ return error_mark_node;
+ newrhs = rhs;
+
+ if (modifycode != INIT_EXPR)
+ {
+ /* Make modifycode now either a NOP_EXPR or an INIT_EXPR. */
+ modifycode = NOP_EXPR;
+ /* Reference-bashing */
+ if (TREE_CODE (lhstype) == REFERENCE_TYPE)
+ {
+ tree tmp = convert_from_reference (lhs);
+ lhstype = TREE_TYPE (tmp);
+ if (TYPE_SIZE (lhstype) == 0)
+ {
+ incomplete_type_error (lhs, lhstype);
+ return error_mark_node;
+ }
+ lhs = tmp;
+ olhstype = lhstype;
+ }
+ if (TREE_CODE (TREE_TYPE (newrhs)) == REFERENCE_TYPE)
+ {
+ tree tmp = convert_from_reference (newrhs);
+ if (TYPE_SIZE (TREE_TYPE (tmp)) == 0)
+ {
+ incomplete_type_error (newrhs, TREE_TYPE (tmp));
+ return error_mark_node;
+ }
+ newrhs = tmp;
+ }
+ }
+
+ if (TREE_SIDE_EFFECTS (lhs))
+ lhs = stabilize_reference (lhs);
+ if (TREE_SIDE_EFFECTS (newrhs))
+ newrhs = stabilize_reference (newrhs);
+
+ /* C++: The semantics of C++ differ from those of C when an
+ assignment of an aggregate is desired. Assignment in C++ is
+ now defined as memberwise assignment of non-static members
+ and base class objects. This rule applies recursively
+ until a member of a built-in type is found.
+
+ Also, we cannot do a bit-wise copy of aggregates which
+ contain virtual function table pointers. Those
+ pointer values must be preserved through the copy.
+ However, this is handled in expand_expr, and not here.
+ This is because much better code can be generated at
+ that stage than this one. */
+ if (TREE_CODE (lhstype) == RECORD_TYPE
+ && ! TYPE_PTRMEMFUNC_P (lhstype)
+ && (TYPE_MAIN_VARIANT (lhstype) == TYPE_MAIN_VARIANT (TREE_TYPE (newrhs))
+ || (TREE_CODE (TREE_TYPE (newrhs)) == RECORD_TYPE
+ && UNIQUELY_DERIVED_FROM_P (lhstype, TREE_TYPE (newrhs)))))
+ {
+ /* This was decided in finish_struct. */
+ if (modifycode == INIT_EXPR)
+ cp_error ("can't generate default copy constructor for `%T'", lhstype);
+ else
+ cp_error ("can't generate default assignment operator for `%T'",
+ lhstype);
+#if 0
+ /* This is now done by generating X(X&) and operator=(X&). */
+ tree vbases = CLASSTYPE_VBASECLASSES (lhstype);
+ tree lhs_addr = build_unary_op (ADDR_EXPR, lhs, 0);
+ tree rhs_addr;
+
+ /* Memberwise assignment would cause NEWRHS to be
+ evaluated for every member that gets assigned.
+ By wrapping side-effecting exprs in a SAVE_EXPR,
+ NEWRHS will only be evaluated once. */
+ if (IS_AGGR_TYPE (TREE_TYPE (newrhs))
+ && TREE_SIDE_EFFECTS (newrhs)
+ /* This are things we don't have to save. */
+ && TREE_CODE (newrhs) != COND_EXPR
+ && TREE_CODE (newrhs) != TARGET_EXPR
+ && TREE_CODE (newrhs) != WITH_CLEANUP_EXPR)
+ /* Call `break_out_cleanups' on NEWRHS in case there are cleanups.
+ If NEWRHS is a CALL_EXPR that needs a cleanup, failure to do so
+ will result in expand_expr expanding the call without knowing
+ that it should run the cleanup. */
+ newrhs = save_expr (break_out_cleanups (newrhs));
+
+ if (TREE_CODE (newrhs) == COND_EXPR)
+ rhs_addr = rationalize_conditional_expr (ADDR_EXPR, newrhs);
+ else
+ rhs_addr = build_unary_op (ADDR_EXPR, newrhs, 0);
+
+ result = tree_cons (NULL_TREE,
+ convert (build_reference_type (lhstype), lhs),
+ NULL_TREE);
+
+ if (! comptypes (TREE_TYPE (lhs_addr), TREE_TYPE (rhs_addr), 1))
+ rhs_addr = convert_pointer_to (TREE_TYPE (TREE_TYPE (lhs_addr)), rhs_addr);
+ {
+ tree noncopied_parts = NULL_TREE;
+
+ if (TYPE_NONCOPIED_PARTS (lhstype) != 0)
+ noncopied_parts = init_noncopied_parts (lhs,
+ TYPE_NONCOPIED_PARTS (lhstype));
+ while (noncopied_parts != 0)
+ {
+ result = tree_cons (NULL_TREE,
+ build_modify_expr (convert (ptr_type_node, TREE_VALUE (noncopied_parts)),
+ NOP_EXPR,
+ TREE_PURPOSE (noncopied_parts)),
+ result);
+ noncopied_parts = TREE_CHAIN (noncopied_parts);
+ }
+ }
+ /* Once we have our hands on an address, we must change NEWRHS
+ to work from there. Otherwise we can get multiple evaluations
+ of NEWRHS. */
+ if (TREE_CODE (newrhs) != SAVE_EXPR)
+ newrhs = build_indirect_ref (rhs_addr, NULL_PTR);
+
+ while (vbases)
+ {
+ tree elt_lhs = convert_pointer_to (vbases, lhs_addr);
+ tree elt_rhs = convert_pointer_to (vbases, rhs_addr);
+ result
+ = tree_cons (NULL_TREE,
+ build_modify_expr_1 (build_indirect_ref (elt_lhs, NULL_PTR),
+ modifycode,
+ build_indirect_ref (elt_rhs, NULL_PTR),
+ TYPE_BINFO (lhstype)),
+ result);
+ if (TREE_VALUE (result) == error_mark_node)
+ return error_mark_node;
+ vbases = TREE_CHAIN (vbases);
+ }
+ result = tree_cons (NULL_TREE,
+ build_modify_expr_1 (lhs,
+ modifycode,
+ newrhs,
+ TYPE_BINFO (lhstype)),
+ result);
+ return build_compound_expr (result);
+#endif
+ }
+
+ /* Convert new value to destination type. */
+
+ if (TREE_CODE (lhstype) == ARRAY_TYPE)
+ {
+ /* Allow array assignment in compiler-generated code. */
+ if ((pedantic || flag_ansi)
+ && ! DECL_ARTIFICIAL (current_function_decl))
+ pedwarn ("ANSI C++ forbids assignment between arrays");
+
+ /* Have to wrap this in RTL_EXPR for two cases:
+ in base or member initialization and if we
+ are a branch of a ?: operator. Since we
+ can't easily know the latter, just do it always. */
+
+ result = make_node (RTL_EXPR);
+
+ TREE_TYPE (result) = void_type_node;
+ do_pending_stack_adjust ();
+ start_sequence_for_rtl_expr (result);
+
+ /* As a matter of principle, `start_sequence' should do this. */
+ emit_note (0, -1);
+
+ expand_vec_init (lhs, lhs, array_type_nelts (lhstype), newrhs,
+ 1 + (modifycode != INIT_EXPR));
+
+ do_pending_stack_adjust ();
+
+ TREE_SIDE_EFFECTS (result) = 1;
+ RTL_EXPR_SEQUENCE (result) = get_insns ();
+ RTL_EXPR_RTL (result) = const0_rtx;
+ end_sequence ();
+ return result;
+ }
+
+ if (modifycode == INIT_EXPR)
+ {
+ newrhs = convert_for_initialization (lhs, lhstype, newrhs, LOOKUP_NORMAL,
+ "assignment", NULL_TREE, 0);
+ if (lhs == DECL_RESULT (current_function_decl))
+ {
+ if (DECL_INITIAL (lhs))
+ warning ("return value from function receives multiple initializations");
+ DECL_INITIAL (lhs) = newrhs;
+ }
+ }
+ else
+ {
+ if (IS_AGGR_TYPE (lhstype))
+ {
+ if (result = build_opfncall (MODIFY_EXPR,
+ LOOKUP_NORMAL, lhs, newrhs,
+ make_node (NOP_EXPR)))
+ return result;
+ }
+ /* Avoid warnings on enum bit fields. */
+ if (TREE_CODE (olhstype) == ENUMERAL_TYPE
+ && TREE_CODE (lhstype) == INTEGER_TYPE)
+ {
+ newrhs = convert_for_assignment (olhstype, newrhs, "assignment",
+ NULL_TREE, 0);
+ newrhs = convert_force (lhstype, newrhs);
+ }
+ else
+ newrhs = convert_for_assignment (lhstype, newrhs, "assignment",
+ NULL_TREE, 0);
+ if (flag_elide_constructors == 0
+ && TREE_CODE (newrhs) == CALL_EXPR
+ && TREE_ADDRESSABLE (lhstype))
+ {
+ /* Can't initialized directly from a CALL_EXPR, since
+ we don't know about what doesn't alias what. */
+
+ tree temp = get_temp_name (lhstype, 0);
+ newrhs = build (COMPOUND_EXPR, lhstype,
+ build_modify_expr (temp, INIT_EXPR, newrhs),
+ temp);
+ }
+ }
+
+ if (TREE_CODE (newrhs) == ERROR_MARK)
+ return error_mark_node;
+
+ if (TREE_CODE (newrhs) == COND_EXPR)
+ {
+ tree lhs1;
+ tree cond = TREE_OPERAND (newrhs, 0);
+
+ if (TREE_SIDE_EFFECTS (lhs))
+ cond = build_compound_expr (tree_cons
+ (NULL_TREE, lhs,
+ build_tree_list (NULL_TREE, cond)));
+
+ /* Cannot have two identical lhs on this one tree (result) as preexpand
+ calls will rip them out and fill in RTL for them, but when the
+ rtl is generated, the calls will only be in the first side of the
+ condition, not on both, or before the conditional jump! (mrs) */
+ lhs1 = break_out_calls (lhs);
+
+ if (lhs == lhs1)
+ /* If there's no change, the COND_EXPR behaves like any other rhs. */
+ result = build (modifycode == NOP_EXPR ? MODIFY_EXPR : INIT_EXPR,
+ lhstype, lhs, newrhs);
+ else
+ {
+ tree result_type = TREE_TYPE (newrhs);
+ /* We have to convert each arm to the proper type because the
+ types may have been munged by constant folding. */
+ result
+ = build (COND_EXPR, result_type, cond,
+ build_modify_expr (lhs, modifycode,
+ convert (result_type,
+ TREE_OPERAND (newrhs, 1))),
+ build_modify_expr (lhs1, modifycode,
+ convert (result_type,
+ TREE_OPERAND (newrhs, 2))));
+ }
+ }
+ else if (modifycode != INIT_EXPR && TREE_CODE (newrhs) == WITH_CLEANUP_EXPR)
+ {
+ tree cleanup = TREE_OPERAND (newrhs, 2);
+ tree slot;
+
+ /* Finish up by running cleanups and having the "value" of the lhs. */
+ tree exprlist = tree_cons (NULL_TREE, cleanup,
+ build_tree_list (NULL_TREE, lhs));
+ newrhs = TREE_OPERAND (newrhs, 0);
+ if (TREE_CODE (newrhs) == TARGET_EXPR)
+ slot = TREE_OPERAND (newrhs, 0);
+ else if (TREE_CODE (newrhs) == ADDR_EXPR)
+ {
+ /* Bad but legal. */
+ slot = newrhs;
+ warning ("address taken of temporary object");
+ }
+ else
+ my_friendly_abort (118);
+
+ /* Copy the value computed in SLOT into LHS. */
+ exprlist = tree_cons (NULL_TREE,
+ build_modify_expr (lhs, modifycode, slot),
+ exprlist);
+ /* Evaluate the expression that needs CLEANUP. This will
+ compute the value into SLOT. */
+ exprlist = tree_cons (NULL_TREE, newrhs, exprlist);
+ result = convert (lhstype, build_compound_expr (exprlist));
+ }
+ else
+ result = build (modifycode == NOP_EXPR ? MODIFY_EXPR : INIT_EXPR,
+ lhstype, lhs, newrhs);
+ TREE_SIDE_EFFECTS (result) = 1;
+
+ /* If we got the LHS in a different type for storing in,
+ convert the result back to the nominal type of LHS
+ so that the value we return always has the same type
+ as the LHS argument. */
+
+ if (olhstype == TREE_TYPE (result))
+ return result;
+ /* Avoid warnings converting integral types back into enums
+ for enum bit fields. */
+ if (TREE_CODE (TREE_TYPE (result)) == INTEGER_TYPE
+ && TREE_CODE (olhstype) == ENUMERAL_TYPE)
+ {
+ result = build (COMPOUND_EXPR, olhstype, result, olhs);
+ TREE_NO_UNUSED_WARNING (result) = 1;
+ return result;
+ }
+ return convert_for_assignment (olhstype, result, "assignment",
+ NULL_TREE, 0);
+}
+
+
+/* Return 0 if EXP is not a valid lvalue in this language
+ even though `lvalue_or_else' would accept it. */
+
+int
+language_lvalue_valid (exp)
+ tree exp;
+{
+ return 1;
+}
+
+/* Get differnce in deltas for different pointer to member function
+ types. Return inetger_zero_node, if FROM cannot be converted to a
+ TO type. If FORCE is true, then allow reverse conversions as well. */
+static tree
+get_delta_difference (from, to, force)
+ tree from, to;
+ int force;
+{
+ tree delta = integer_zero_node;
+ tree binfo;
+
+ if (to == from)
+ return delta;
+
+ /* Should get_base_distance here, so we can check if any thing along the
+ path is virtual, and we need to make sure we stay
+ inside the real binfos when going through virtual bases.
+ Maybe we should replace virtual bases with
+ binfo_member (...CLASSTYPE_VBASECLASSES...)... (mrs) */
+ binfo = get_binfo (from, to, 1);
+ if (binfo == error_mark_node)
+ {
+ error (" in pointer to member function conversion");
+ return delta;
+ }
+ if (binfo == 0)
+ {
+ if (!force)
+ {
+ error_not_base_type (from, to);
+ error (" in pointer to member function conversion");
+ return delta;
+ }
+ binfo = get_binfo (to, from, 1);
+ if (binfo == error_mark_node)
+ {
+ error (" in pointer to member function conversion");
+ return delta;
+ }
+ if (binfo == 0)
+ {
+ error ("cannot convert pointer to member of type %T to unrelated pointer to member of type %T", from, to);
+ return delta;
+ }
+ if (TREE_VIA_VIRTUAL (binfo))
+ {
+ warning ("pointer to member conversion to virtual base class will only work if your very careful");
+ }
+ return fold (size_binop (MINUS_EXPR,
+ integer_zero_node,
+ BINFO_OFFSET (binfo)));
+ }
+ if (TREE_VIA_VIRTUAL (binfo))
+ {
+ warning ("pointer to member conversion from virtual base class will only work if your very careful");
+ }
+ return BINFO_OFFSET (binfo);
+}
+
+/* Build a constructor for a pointer to member function. It can be
+ used to initialize global variables, local variable, or used
+ as a value in expressions. TYPE is the POINTER to METHOD_TYPE we
+ want to be.
+
+ If FORCE is non-zero, then force this conversion, even if
+ we would rather not do it. Usually set when using an explicit
+ cast.
+
+ Return error_mark_node, if something goes wrong. */
+
+tree
+build_ptrmemfunc (type, pfn, force)
+ tree type, pfn;
+ int force;
+{
+ tree index = integer_zero_node;
+ tree delta = integer_zero_node;
+ tree delta2 = integer_zero_node;
+ tree vfield_offset;
+ tree npfn;
+ tree u;
+
+ /* Handle multiple conversions of pointer to member fucntions. */
+ if (TYPE_PTRMEMFUNC_P (TREE_TYPE (pfn)))
+ {
+ tree ndelta, ndelta2, nindex;
+ /* Is is already the right type? */
+#if 0
+ /* Sorry, can't do this, the backend is too stupid. */
+ if (TYPE_METHOD_BASETYPE (TREE_TYPE (type))
+ == TYPE_METHOD_BASETYPE (TREE_TYPE (TYPE_PTRMEMFUNC_FN_TYPE (TREE_TYPE (pfn)))))
+ {
+ if (type != TYPE_PTRMEMFUNC_FN_TYPE (TREE_TYPE (pfn)))
+ {
+ npfn = build1 (NOP_EXPR, TYPE_GET_PTRMEMFUNC_TYPE (type), pfn);
+ TREE_CONSTANT (npfn) = TREE_CONSTANT (pfn);
+ }
+ return pfn;
+ }
+#else
+ if (type == TYPE_PTRMEMFUNC_FN_TYPE (TREE_TYPE (pfn)))
+ return pfn;
+#endif
+
+ if (TREE_CODE (pfn) != CONSTRUCTOR)
+ {
+ tree e1, e2, e3;
+ ndelta = convert (sizetype, build_component_ref (pfn, delta_identifier, 0, 0));
+ ndelta2 = convert (sizetype, DELTA2_FROM_PTRMEMFUNC (pfn));
+ index = build_component_ref (pfn, index_identifier, 0, 0);
+ delta = get_delta_difference (TYPE_METHOD_BASETYPE (TREE_TYPE (TYPE_PTRMEMFUNC_FN_TYPE (TREE_TYPE (pfn)))),
+ TYPE_METHOD_BASETYPE (TREE_TYPE (type)),
+ force);
+ delta = fold (size_binop (PLUS_EXPR, delta, ndelta));
+ delta2 = fold (size_binop (PLUS_EXPR, ndelta2, delta2));
+ e1 = fold (build (GT_EXPR, integer_type_node, index, integer_zero_node));
+
+ u = build_nt (CONSTRUCTOR, 0, tree_cons (delta2_identifier, delta2, NULL_TREE));
+ u = build_nt (CONSTRUCTOR, 0, tree_cons (NULL_TREE, delta,
+ tree_cons (NULL_TREE, index,
+ tree_cons (NULL_TREE, u, NULL_TREE))));
+ e2 = digest_init (TYPE_GET_PTRMEMFUNC_TYPE (type), u, (tree*)0);
+
+ pfn = PFN_FROM_PTRMEMFUNC (pfn);
+ npfn = build1 (NOP_EXPR, type, pfn);
+ TREE_CONSTANT (npfn) = TREE_CONSTANT (pfn);
+
+ u = build_nt (CONSTRUCTOR, 0, tree_cons (pfn_identifier, npfn, NULL_TREE));
+ u = build_nt (CONSTRUCTOR, 0, tree_cons (NULL_TREE, delta,
+ tree_cons (NULL_TREE, index,
+ tree_cons (NULL_TREE, u, NULL_TREE))));
+ e3 = digest_init (TYPE_GET_PTRMEMFUNC_TYPE (type), u, (tree*)0);
+ return build_conditional_expr (e1, e2, e3);
+ }
+
+ ndelta = TREE_VALUE (CONSTRUCTOR_ELTS (pfn));
+ nindex = TREE_VALUE (TREE_CHAIN (CONSTRUCTOR_ELTS (pfn)));
+ npfn = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (CONSTRUCTOR_ELTS (pfn))));
+ npfn = TREE_VALUE (CONSTRUCTOR_ELTS (npfn));
+ if (integer_zerop (nindex))
+ pfn = integer_zero_node;
+ else
+ {
+ sorry ("value casting of varible nonnull pointer to member functions not supported");
+ return error_mark_node;
+ }
+ }
+
+ /* Handle null pointer to member function conversions. */
+ if (integer_zerop (pfn))
+ {
+ pfn = build_c_cast (type, integer_zero_node);
+ u = build_nt (CONSTRUCTOR, 0, tree_cons (pfn_identifier, pfn, NULL_TREE));
+ u = build_nt (CONSTRUCTOR, 0, tree_cons (NULL_TREE, integer_zero_node,
+ tree_cons (NULL_TREE, integer_zero_node,
+ tree_cons (NULL_TREE, u, NULL_TREE))));
+ return digest_init (TYPE_GET_PTRMEMFUNC_TYPE (type), u, (tree*)0);
+ }
+
+ if (TREE_CODE (pfn) == TREE_LIST)
+ {
+ pfn = instantiate_type (type, pfn, 1);
+ if (pfn == error_mark_node)
+ return error_mark_node;
+ pfn = build_unary_op (ADDR_EXPR, pfn, 0);
+ }
+
+ /* Allow pointer to member conversions here. */
+ delta = get_delta_difference (TYPE_METHOD_BASETYPE (TREE_TYPE (TREE_TYPE (pfn))),
+ TYPE_METHOD_BASETYPE (TREE_TYPE (type)),
+ force);
+ delta2 = fold (size_binop (PLUS_EXPR, delta2, delta));
+
+ if (TREE_CODE (TREE_OPERAND (pfn, 0)) != FUNCTION_DECL)
+ warning ("assuming pointer to member function is non-virtual");
+
+ if (TREE_CODE (TREE_OPERAND (pfn, 0)) == FUNCTION_DECL
+ && DECL_VINDEX (TREE_OPERAND (pfn, 0)))
+ {
+ /* Find the offset to the vfield pointer in the object. */
+ vfield_offset = get_binfo (DECL_CONTEXT (TREE_OPERAND (pfn, 0)),
+ DECL_CLASS_CONTEXT (TREE_OPERAND (pfn, 0)),
+ 0);
+ vfield_offset = get_vfield_offset (vfield_offset);
+ delta2 = size_binop (PLUS_EXPR, vfield_offset, delta2);
+
+ /* Map everything down one to make room for the null pointer to member. */
+ index = size_binop (PLUS_EXPR,
+ DECL_VINDEX (TREE_OPERAND (pfn, 0)),
+ integer_one_node);
+ u = build_nt (CONSTRUCTOR, 0, tree_cons (delta2_identifier, delta2, NULL_TREE));
+ }
+ else
+ {
+ index = fold (size_binop (MINUS_EXPR, integer_zero_node, integer_one_node));
+
+ npfn = build1 (NOP_EXPR, type, pfn);
+ TREE_CONSTANT (npfn) = TREE_CONSTANT (pfn);
+
+ u = build_nt (CONSTRUCTOR, 0, tree_cons (pfn_identifier, npfn, NULL_TREE));
+ }
+
+ u = build_nt (CONSTRUCTOR, 0, tree_cons (NULL_TREE, delta,
+ tree_cons (NULL_TREE, index,
+ tree_cons (NULL_TREE, u, NULL_TREE))));
+ return digest_init (TYPE_GET_PTRMEMFUNC_TYPE (type), u, (tree*)0);
+}
+
+/* Convert value RHS to type TYPE as preparation for an assignment
+ to an lvalue of type TYPE.
+ The real work of conversion is done by `convert'.
+ The purpose of this function is to generate error messages
+ for assignments that are not allowed in C.
+ ERRTYPE is a string to use in error messages:
+ "assignment", "return", etc.
+
+ C++: attempts to allow `convert' to find conversions involving
+ implicit type conversion between aggregate and scalar types
+ as per 8.5.6 of C++ manual. Does not randomly dereference
+ pointers to aggregates! */
+
+static tree
+convert_for_assignment (type, rhs, errtype, fndecl, parmnum)
+ tree type, rhs;
+ char *errtype;
+ tree fndecl;
+ int parmnum;
+{
+ register enum tree_code codel = TREE_CODE (type);
+ register tree rhstype;
+ register enum tree_code coder = TREE_CODE (TREE_TYPE (rhs));
+
+ if (coder == UNKNOWN_TYPE)
+ rhs = instantiate_type (type, rhs, 1);
+
+ if (coder == ERROR_MARK)
+ return error_mark_node;
+
+ if (codel == OFFSET_TYPE)
+ {
+ type = TREE_TYPE (type);
+ codel = TREE_CODE (type);
+ }
+
+ /* Strip NON_LVALUE_EXPRs since we aren't using as an lvalue. */
+ if (TREE_CODE (rhs) == NON_LVALUE_EXPR)
+ rhs = TREE_OPERAND (rhs, 0);
+
+ if (rhs == error_mark_node)
+ return error_mark_node;
+
+ if (TREE_VALUE (rhs) == error_mark_node)
+ return error_mark_node;
+
+ if (TREE_CODE (TREE_TYPE (rhs)) == OFFSET_TYPE)
+ {
+ rhs = resolve_offset_ref (rhs);
+ if (rhs == error_mark_node)
+ return error_mark_node;
+ rhstype = TREE_TYPE (rhs);
+ coder = TREE_CODE (rhstype);
+ }
+
+ if (TREE_CODE (TREE_TYPE (rhs)) == ARRAY_TYPE
+ || TREE_CODE (TREE_TYPE (rhs)) == FUNCTION_TYPE
+ || TREE_CODE (TREE_TYPE (rhs)) == METHOD_TYPE)
+ rhs = default_conversion (rhs);
+ else if (TREE_CODE (TREE_TYPE (rhs)) == REFERENCE_TYPE)
+ rhs = convert_from_reference (rhs);
+
+ rhstype = TREE_TYPE (rhs);
+ coder = TREE_CODE (rhstype);
+
+ /* This should no longer change types on us. */
+ if (TREE_CODE (rhs) == CONST_DECL)
+ rhs = DECL_INITIAL (rhs);
+ else if (TREE_READONLY_DECL_P (rhs))
+ rhs = decl_constant_value (rhs);
+
+ if (type == rhstype)
+ {
+ overflow_warning (rhs);
+ return rhs;
+ }
+
+ if (coder == VOID_TYPE)
+ {
+ error ("void value not ignored as it ought to be");
+ return error_mark_node;
+ }
+ /* Arithmetic types all interconvert. */
+ if ((codel == INTEGER_TYPE || codel == REAL_TYPE || codel == BOOLEAN_TYPE)
+ && (coder == INTEGER_TYPE || coder == REAL_TYPE || coder == BOOLEAN_TYPE))
+ {
+ /* But we should warn if assigning REAL_TYPE to INTEGER_TYPE. */
+ if (coder == REAL_TYPE && codel == INTEGER_TYPE)
+ {
+ if (fndecl)
+ cp_warning ("`%T' used for argument %P of `%D'",
+ rhstype, parmnum, fndecl);
+ else
+ cp_warning ("%s to `%T' from `%T'", errtype, type, rhstype);
+ }
+ /* And we should warn if assigning a negative value to
+ an unsigned variable. */
+ else if (TREE_UNSIGNED (type) && codel != BOOLEAN_TYPE)
+ {
+ if (TREE_CODE (rhs) == INTEGER_CST
+ && TREE_NEGATED_INT (rhs))
+ {
+ if (fndecl)
+ cp_warning ("negative value `%E' passed as argument %P of `%D'",
+ rhs, parmnum, fndecl);
+ else
+ cp_warning ("%s of negative value `%E' to `%T'",
+ errtype, rhs, type);
+ }
+ overflow_warning (rhs);
+ if (TREE_CONSTANT (rhs))
+ rhs = fold (rhs);
+ }
+
+ return convert_and_check (type, rhs);
+ }
+ /* Conversions involving enums. */
+ else if ((codel == ENUMERAL_TYPE
+ && (coder == ENUMERAL_TYPE || coder == INTEGER_TYPE || coder == REAL_TYPE))
+ || (coder == ENUMERAL_TYPE
+ && (codel == ENUMERAL_TYPE || codel == INTEGER_TYPE || codel == REAL_TYPE)))
+ {
+ return convert (type, rhs);
+ }
+ /* Conversions among pointers */
+ else if (codel == POINTER_TYPE
+ && (coder == POINTER_TYPE
+ || (coder == RECORD_TYPE
+ && (IS_SIGNATURE_POINTER (rhstype)
+ || IS_SIGNATURE_REFERENCE (rhstype)))))
+ {
+ register tree ttl = TREE_TYPE (type);
+ register tree ttr;
+
+ if (coder == RECORD_TYPE)
+ {
+ rhs = build_optr_ref (rhs);
+ rhstype = TREE_TYPE (rhs);
+ }
+ ttr = TREE_TYPE (rhstype);
+
+ /* If both pointers are of aggregate type, then we
+ can give better error messages, and save some work
+ as well. */
+ if (TREE_CODE (ttl) == RECORD_TYPE && TREE_CODE (ttr) == RECORD_TYPE)
+ {
+ tree binfo;
+
+ if (TYPE_MAIN_VARIANT (ttl) == TYPE_MAIN_VARIANT (ttr)
+ || type == class_star_type_node
+ || rhstype == class_star_type_node)
+ binfo = TYPE_BINFO (ttl);
+ else
+ binfo = get_binfo (ttl, ttr, 1);
+
+ if (binfo == error_mark_node)
+ return error_mark_node;
+ if (binfo == 0)
+ return error_not_base_type (ttl, ttr);
+
+ if (! TYPE_READONLY (ttl) && TYPE_READONLY (ttr))
+ {
+ if (fndecl)
+ cp_pedwarn ("passing `%T' as argument %P of `%D' discards const",
+ rhstype, parmnum, fndecl);
+ else
+ cp_pedwarn ("%s to `%T' from `%T' discards const",
+ errtype, type, rhstype);
+ }
+ if (! TYPE_VOLATILE (ttl) && TYPE_VOLATILE (ttr))
+ {
+ if (fndecl)
+ cp_pedwarn ("passing `%T' as argument %P of `%D' discards volatile",
+ rhstype, parmnum, fndecl);
+ else
+ cp_pedwarn ("%s to `%T' from `%T' discards volatile",
+ errtype, type, rhstype);
+ }
+ }
+
+ /* Any non-function converts to a [const][volatile] void *
+ and vice versa; otherwise, targets must be the same.
+ Meanwhile, the lhs target must have all the qualifiers of the rhs. */
+ else if (TYPE_MAIN_VARIANT (ttl) == void_type_node
+ || TYPE_MAIN_VARIANT (ttr) == void_type_node
+ || comp_target_types (type, rhstype, 1)
+ || (unsigned_type (TYPE_MAIN_VARIANT (ttl))
+ == unsigned_type (TYPE_MAIN_VARIANT (ttr))))
+ {
+ /* ARM $4.8, commentary on p39. */
+ if (TYPE_MAIN_VARIANT (ttl) == void_type_node
+ && TREE_CODE (ttr) == OFFSET_TYPE)
+ {
+ error ("no standard conversion from pointer to member to `void *'");
+ return error_mark_node;
+ }
+
+ if (TYPE_MAIN_VARIANT (ttl) != void_type_node
+ && TYPE_MAIN_VARIANT (ttr) == void_type_node
+ && rhs != null_pointer_node)
+ {
+ if (coder == RECORD_TYPE)
+ pedwarn ("implicit conversion of signature pointer to type `%s'",
+ type_as_string (type, 0));
+ else
+ pedwarn ("ANSI C++ forbids implicit conversion from `void *' in %s",
+ errtype);
+ }
+ /* Const and volatile mean something different for function types,
+ so the usual warnings are not appropriate. */
+ else if ((TREE_CODE (ttr) != FUNCTION_TYPE && TREE_CODE (ttr) != METHOD_TYPE)
+ || (TREE_CODE (ttl) != FUNCTION_TYPE && TREE_CODE (ttl) != METHOD_TYPE))
+ {
+ if (TREE_CODE (ttl) == OFFSET_TYPE
+ && binfo_member (TYPE_OFFSET_BASETYPE (ttr),
+ CLASSTYPE_VBASECLASSES (TYPE_OFFSET_BASETYPE (ttl))))
+ {
+ sorry ("%s between pointer to members converting across virtual baseclasses", errtype);
+ return error_mark_node;
+ }
+ else if (! TYPE_READONLY (ttl) && TYPE_READONLY (ttr))
+ {
+ if (fndecl)
+ cp_pedwarn ("passing `%T' as argument %P of `%D' discards const",
+ rhstype, parmnum, fndecl);
+ else
+ cp_pedwarn ("%s to `%T' from `%T' discards const",
+ errtype, type, rhstype);
+ }
+ else if (! TYPE_VOLATILE (ttl) && TYPE_VOLATILE (ttr))
+ {
+ if (fndecl)
+ cp_pedwarn ("passing `%T' as argument %P of `%D' discards volatile",
+ rhstype, parmnum, fndecl);
+ else
+ cp_pedwarn ("%s to `%T' from `%T' discards volatile",
+ errtype, type, rhstype);
+ }
+ else if (TREE_CODE (ttl) == TREE_CODE (ttr)
+ && ! comp_target_types (type, rhstype, 1))
+ {
+ if (fndecl)
+ cp_pedwarn ("passing `%T' as argument %P of `%D' changes signedness",
+ rhstype, parmnum, fndecl);
+ else
+ cp_pedwarn ("%s to `%T' from `%T' changes signedness",
+ errtype, type, rhstype);
+ }
+ }
+ }
+ else if (TREE_CODE (ttr) == OFFSET_TYPE
+ && TREE_CODE (ttl) != OFFSET_TYPE)
+ {
+ /* Normally, pointers to different type codes (other
+ than void) are not compatible, but we perform
+ some type instantiation if that resolves the
+ ambiguity of (X Y::*) and (X *). */
+
+ if (current_class_decl)
+ {
+ if (TREE_CODE (rhs) == INTEGER_CST)
+ {
+ rhs = build (PLUS_EXPR, build_pointer_type (TREE_TYPE (ttr)),
+ current_class_decl, rhs);
+ return convert_for_assignment (type, rhs,
+ errtype, fndecl, parmnum);
+ }
+ }
+ if (TREE_CODE (ttl) == METHOD_TYPE)
+ error ("%s between pointer-to-method and pointer-to-member types",
+ errtype);
+ else
+ error ("%s between pointer and pointer-to-member types", errtype);
+ return error_mark_node;
+ }
+ else
+ {
+ int add_quals = 0, const_parity = 0, volatile_parity = 0;
+ int left_const = 1;
+ int unsigned_parity;
+ int nptrs = 0;
+
+ /* This code is basically a duplicate of comp_ptr_ttypes_real. */
+ for (; ; ttl = TREE_TYPE (ttl), ttr = TREE_TYPE (ttr))
+ {
+ nptrs -= 1;
+ const_parity |= TYPE_READONLY (ttl) < TYPE_READONLY (ttr);
+ volatile_parity |= TYPE_VOLATILE (ttl) < TYPE_VOLATILE (ttr);
+
+ if (! left_const
+ && (TYPE_READONLY (ttl) > TYPE_READONLY (ttr)
+ || TYPE_VOLATILE (ttl) > TYPE_VOLATILE (ttr)))
+ add_quals = 1;
+ left_const &= TYPE_READONLY (ttl);
+
+ if (TREE_CODE (ttl) != POINTER_TYPE)
+ break;
+ }
+ unsigned_parity = TREE_UNSIGNED (ttl) - TREE_UNSIGNED (ttr);
+ if (unsigned_parity)
+ {
+ if (TREE_UNSIGNED (ttl))
+ ttr = unsigned_type (ttr);
+ else
+ ttl = unsigned_type (ttl);
+ }
+
+ if (comp_target_types (ttl, ttr, nptrs))
+ {
+ if (add_quals)
+ {
+ if (fndecl)
+ cp_pedwarn ("passing `%T' as argument %P of `%D' adds cv-quals without intervening `const'",
+ rhstype, parmnum, fndecl);
+ else
+ cp_pedwarn ("%s to `%T' from `%T' adds cv-quals without intervening `const'",
+ errtype, type, rhstype);
+ }
+ if (const_parity)
+ {
+ if (fndecl)
+ cp_pedwarn ("passing `%T' as argument %P of `%D' discards const",
+ rhstype, parmnum, fndecl);
+ else
+ cp_pedwarn ("%s to `%T' from `%T' discards const",
+ errtype, type, rhstype);
+ }
+ if (volatile_parity)
+ {
+ if (fndecl)
+ cp_pedwarn ("passing `%T' as argument %P of `%D' discards volatile",
+ rhstype, parmnum, fndecl);
+ else
+ cp_pedwarn ("%s to `%T' from `%T' discards volatile",
+ errtype, type, rhstype);
+ }
+ if (unsigned_parity > 0)
+ {
+ if (fndecl)
+ cp_pedwarn ("passing `%T' as argument %P of `%D' changes signed to unsigned",
+ rhstype, parmnum, fndecl);
+ else
+ cp_pedwarn ("%s to `%T' from `%T' changes signed to unsigned",
+ errtype, type, rhstype);
+ }
+ else if (unsigned_parity < 0)
+ {
+ if (fndecl)
+ cp_pedwarn ("passing `%T' as argument %P of `%D' changes unsigned to signed",
+ rhstype, parmnum, fndecl);
+ else
+ cp_pedwarn ("%s to `%T' from `%T' changes unsigned to signed",
+ errtype, type, rhstype);
+ }
+
+ /* C++ is not so friendly about converting function and
+ member function pointers as C. Emit warnings here. */
+ if (TREE_CODE (ttl) == FUNCTION_TYPE
+ || TREE_CODE (ttl) == METHOD_TYPE)
+ if (! comptypes (ttl, ttr, 0))
+ {
+ warning ("conflicting function types in %s:", errtype);
+ cp_warning ("\t`%T' != `%T'", type, rhstype);
+ }
+ }
+ else if (TREE_CODE (TREE_TYPE (rhs)) == METHOD_TYPE)
+ {
+ /* When does this happen? */
+ my_friendly_abort (119);
+ /* Conversion of a pointer-to-member type to void *. */
+ rhs = build_unary_op (ADDR_EXPR, rhs, 0);
+ TREE_TYPE (rhs) = type;
+ return rhs;
+ }
+ else if (TREE_CODE (TREE_TYPE (rhs)) == OFFSET_TYPE)
+ {
+ /* When does this happen? */
+ my_friendly_abort (120);
+ /* Conversion of a pointer-to-member type to void *. */
+ rhs = build_unary_op (ADDR_EXPR, rhs, 0);
+ TREE_TYPE (rhs) = type;
+ return rhs;
+ }
+ else
+ {
+ if (fndecl)
+ cp_error ("passing `%T' as argument %P of `%D'",
+ rhstype, parmnum, fndecl);
+ else
+ cp_error ("%s to `%T' from `%T'", errtype, type, rhstype);
+ return error_mark_node;
+ }
+ }
+ return convert (type, rhs);
+ }
+ else if (codel == POINTER_TYPE && coder == INTEGER_TYPE)
+ {
+ /* An explicit constant 0 can convert to a pointer,
+ but not a 0 that results from casting or folding. */
+ if (! (TREE_CODE (rhs) == INTEGER_CST && integer_zerop (rhs)))
+ {
+ if (fndecl)
+ cp_pedwarn ("passing `%T' to argument %P of `%D' lacks a cast",
+ rhstype, parmnum, fndecl);
+ else
+ cp_pedwarn ("%s to `%T' from `%T' lacks a cast",
+ errtype, type, rhstype);
+ return convert (type, rhs);
+ }
+ return null_pointer_node;
+ }
+ else if (codel == INTEGER_TYPE
+ && (coder == POINTER_TYPE
+ || (coder == RECORD_TYPE
+ && (IS_SIGNATURE_POINTER (rhstype)
+ || IS_SIGNATURE_REFERENCE (rhstype)))))
+ {
+ if (fndecl)
+ cp_pedwarn ("passing `%T' to argument %P of `%D' lacks a cast",
+ rhstype, parmnum, fndecl);
+ else
+ cp_pedwarn ("%s to `%T' from `%T' lacks a cast",
+ errtype, type, rhstype);
+ return convert (type, rhs);
+ }
+
+ /* C++ */
+ else if (((coder == POINTER_TYPE && TREE_CODE (rhs) == ADDR_EXPR
+ && TREE_CODE (rhstype) == POINTER_TYPE
+ && TREE_CODE (TREE_TYPE (rhstype)) == METHOD_TYPE)
+ || integer_zerop (rhs)
+ || TYPE_PTRMEMFUNC_P (TREE_TYPE (rhs)))
+ && TYPE_PTRMEMFUNC_P (type))
+ {
+ /* compatible pointer to member functions. */
+ return build_ptrmemfunc (TYPE_PTRMEMFUNC_FN_TYPE (type), rhs, 0);
+ }
+ else if (codel == ERROR_MARK || coder == ERROR_MARK)
+ return error_mark_node;
+
+ /* This should no longer happen. References are initialized via
+ `convert_for_initialization'. They should otherwise be
+ bashed before coming here. */
+ else if (codel == REFERENCE_TYPE)
+ /* Force an abort. */
+ my_friendly_assert (codel != REFERENCE_TYPE, 317);
+ else if (TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (TREE_TYPE (rhs)))
+ {
+ tree nrhs = build1 (NOP_EXPR, type, rhs);
+ TREE_CONSTANT (nrhs) = TREE_CONSTANT (rhs);
+ return nrhs;
+ }
+ else if (TYPE_HAS_CONSTRUCTOR (type) || IS_AGGR_TYPE (TREE_TYPE (rhs)))
+ return convert (type, rhs);
+
+ cp_error ("%s to `%T' from `%T'", errtype, type, rhstype);
+ return error_mark_node;
+}
+
+/* Convert RHS to be of type TYPE. If EXP is non-zero,
+ it is the target of the initialization.
+ ERRTYPE is a string to use in error messages.
+
+ Two major differences between the behavior of
+ `convert_for_assignment' and `convert_for_initialization'
+ are that references are bashed in the former, while
+ copied in the latter, and aggregates are assigned in
+ the former (operator=) while initialized in the
+ latter (X(X&)).
+
+ If using constructor make sure no conversion operator exists, if one does
+ exist, an ambiguity exists. */
+tree
+convert_for_initialization (exp, type, rhs, flags, errtype, fndecl, parmnum)
+ tree exp, type, rhs;
+ int flags;
+ char *errtype;
+ tree fndecl;
+ int parmnum;
+{
+ register enum tree_code codel = TREE_CODE (type);
+ register tree rhstype;
+ register enum tree_code coder;
+
+ /* build_c_cast puts on a NOP_EXPR to make the result not an lvalue.
+ Strip such NOP_EXPRs, since RHS is used in non-lvalue context. */
+ if (TREE_CODE (rhs) == NOP_EXPR
+ && TREE_TYPE (rhs) == TREE_TYPE (TREE_OPERAND (rhs, 0))
+ && codel != REFERENCE_TYPE)
+ rhs = TREE_OPERAND (rhs, 0);
+
+ if (rhs == error_mark_node
+ || (TREE_CODE (rhs) == TREE_LIST && TREE_VALUE (rhs) == error_mark_node))
+ return error_mark_node;
+
+ if (TREE_CODE (TREE_TYPE (rhs)) == OFFSET_TYPE)
+ {
+ rhs = resolve_offset_ref (rhs);
+ if (rhs == error_mark_node)
+ return error_mark_node;
+ rhstype = TREE_TYPE (rhs);
+ coder = TREE_CODE (rhstype);
+ }
+
+ if ((TREE_CODE (TREE_TYPE (rhs)) == ARRAY_TYPE
+ && TREE_CODE (type) != ARRAY_TYPE && TREE_CODE (type) != REFERENCE_TYPE)
+ || TREE_CODE (TREE_TYPE (rhs)) == FUNCTION_TYPE
+ || TREE_CODE (TREE_TYPE (rhs)) == METHOD_TYPE)
+ rhs = default_conversion (rhs);
+
+ rhstype = TREE_TYPE (rhs);
+ coder = TREE_CODE (rhstype);
+
+ if (coder == UNKNOWN_TYPE)
+ {
+ rhs = instantiate_type (type, rhs, 1);
+ rhstype = TREE_TYPE (rhs);
+ coder = TREE_CODE (rhstype);
+ }
+
+ if (coder == ERROR_MARK)
+ return error_mark_node;
+
+#if 0
+ /* This is *not* the quick way out! It is the way to disaster. */
+ if (type == rhstype)
+ goto converted;
+#endif
+
+ /* We accept references to incomplete types, so we can
+ return here before checking if RHS is of complete type. */
+
+ if (codel == REFERENCE_TYPE)
+ {
+ /* This should eventually happen in convert_arguments. */
+ extern int warningcount, errorcount;
+ int savew, savee;
+
+ if (fndecl)
+ savew = warningcount, savee = errorcount;
+ rhs = convert_to_reference (type, rhs, CONV_IMPLICIT, flags,
+ exp ? exp : error_mark_node);
+ if (fndecl)
+ {
+ if (warningcount > savew)
+ cp_warning_at ("in passing argument %P of `%+D'", parmnum, fndecl);
+ else if (errorcount > savee)
+ cp_error_at ("in passing argument %P of `%+D'", parmnum, fndecl);
+ }
+ return rhs;
+ }
+
+ rhs = require_complete_type (rhs);
+ if (rhs == error_mark_node)
+ return error_mark_node;
+
+ if (exp != 0) exp = require_complete_type (exp);
+ if (exp == error_mark_node)
+ return error_mark_node;
+
+ if (TREE_CODE (rhstype) == REFERENCE_TYPE)
+ rhstype = TREE_TYPE (rhstype);
+
+ if (TYPE_LANG_SPECIFIC (type)
+ && (IS_SIGNATURE_POINTER (type) || IS_SIGNATURE_REFERENCE (type)))
+ return build_signature_pointer_constructor (type, rhs);
+
+ if (IS_AGGR_TYPE (type) && TYPE_NEEDS_CONSTRUCTING (type))
+ {
+ if (TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (rhstype))
+ {
+ /* This is sufficient to perform initialization. No need,
+ apparently, to go through X(X&) to do first-cut
+ initialization. Return through a TARGET_EXPR so that we get
+ cleanups if it is used. */
+ if (TREE_CODE (rhs) == CALL_EXPR)
+ {
+ rhs = build_cplus_new (type, rhs, 0);
+ return rhs;
+ }
+ /* Handle the case of default parameter initialization and
+ initialization of static variables. */
+ else if (TREE_CODE (rhs) == INDIRECT_REF && TREE_HAS_CONSTRUCTOR (rhs))
+ {
+ my_friendly_assert (TREE_CODE (TREE_OPERAND (rhs, 0)) == CALL_EXPR, 318);
+ if (exp)
+ {
+ my_friendly_assert (TREE_VALUE (TREE_OPERAND (TREE_OPERAND (rhs, 0), 1)) == NULL_TREE, 316);
+ TREE_VALUE (TREE_OPERAND (TREE_OPERAND (rhs, 0), 1))
+ = build_unary_op (ADDR_EXPR, exp, 0);
+ }
+ else
+ rhs = build_cplus_new (type, TREE_OPERAND (rhs, 0), 0);
+ return rhs;
+ }
+ }
+ if (TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (rhstype)
+ || (IS_AGGR_TYPE (rhstype) && UNIQUELY_DERIVED_FROM_P (type, rhstype)))
+ {
+ if (TYPE_HAS_INIT_REF (type))
+ {
+ tree init = build_method_call (exp, constructor_name_full (type),
+ build_tree_list (NULL_TREE, rhs),
+ TYPE_BINFO (type), LOOKUP_NORMAL);
+
+ if (init == error_mark_node)
+ return error_mark_node;
+
+ if (exp == 0)
+ {
+ exp = build_cplus_new (type, init, 0);
+ return exp;
+ }
+
+ return build (COMPOUND_EXPR, type, init, exp);
+ }
+
+ /* ??? The following warnings are turned off because
+ this is another place where the default X(X&) constructor
+ is implemented. */
+ if (TYPE_HAS_ASSIGNMENT (type))
+ cp_warning ("bitwise copy: `%T' defines operator=", type);
+
+ if (TREE_CODE (TREE_TYPE (rhs)) == REFERENCE_TYPE)
+ rhs = convert_from_reference (rhs);
+ if (type != rhstype)
+ {
+ tree nrhs = build1 (NOP_EXPR, type, rhs);
+ TREE_CONSTANT (nrhs) = TREE_CONSTANT (rhs);
+ rhs = nrhs;
+ }
+ return rhs;
+ }
+
+ return convert (type, rhs);
+ }
+
+ if (type == TREE_TYPE (rhs))
+ {
+ if (TREE_READONLY_DECL_P (rhs))
+ rhs = decl_constant_value (rhs);
+ return rhs;
+ }
+
+ return convert_for_assignment (type, rhs, errtype, fndecl, parmnum);
+}
+
+/* Expand an ASM statement with operands, handling output operands
+ that are not variables or INDIRECT_REFS by transforming such
+ cases into cases that expand_asm_operands can handle.
+
+ Arguments are same as for expand_asm_operands.
+
+ We don't do default conversions on all inputs, because it can screw
+ up operands that are expected to be in memory. */
+
+void
+c_expand_asm_operands (string, outputs, inputs, clobbers, vol, filename, line)
+ tree string, outputs, inputs, clobbers;
+ int vol;
+ char *filename;
+ int line;
+{
+ int noutputs = list_length (outputs);
+ register int i;
+ /* o[I] is the place that output number I should be written. */
+ register tree *o = (tree *) alloca (noutputs * sizeof (tree));
+ register tree tail;
+
+ /* Record the contents of OUTPUTS before it is modified. */
+ for (i = 0, tail = outputs; tail; tail = TREE_CHAIN (tail), i++)
+ o[i] = TREE_VALUE (tail);
+
+ /* Generate the ASM_OPERANDS insn;
+ store into the TREE_VALUEs of OUTPUTS some trees for
+ where the values were actually stored. */
+ expand_asm_operands (string, outputs, inputs, clobbers, vol, filename, line);
+
+ /* Copy all the intermediate outputs into the specified outputs. */
+ for (i = 0, tail = outputs; tail; tail = TREE_CHAIN (tail), i++)
+ {
+ if (o[i] != TREE_VALUE (tail))
+ {
+ expand_expr (build_modify_expr (o[i], NOP_EXPR, TREE_VALUE (tail)),
+ const0_rtx, VOIDmode, 0);
+ free_temp_slots ();
+ }
+ /* Detect modification of read-only values.
+ (Otherwise done by build_modify_expr.) */
+ else
+ {
+ tree type = TREE_TYPE (o[i]);
+ if (TYPE_READONLY (type)
+ || ((TREE_CODE (type) == RECORD_TYPE
+ || TREE_CODE (type) == UNION_TYPE)
+ && C_TYPE_FIELDS_READONLY (type)))
+ readonly_error (o[i], "modification by `asm'", 1);
+ }
+ }
+
+ /* Those MODIFY_EXPRs could do autoincrements. */
+ emit_queue ();
+}
+
+/* Expand a C `return' statement.
+ RETVAL is the expression for what to return,
+ or a null pointer for `return;' with no value.
+
+ C++: upon seeing a `return', we must call destructors on all
+ variables in scope which had constructors called on them.
+ This means that if in a destructor, the base class destructors
+ must be called before returning.
+
+ The RETURN statement in C++ has initialization semantics. */
+
+void
+c_expand_return (retval)
+ tree retval;
+{
+ extern struct nesting *cond_stack, *loop_stack, *case_stack;
+ extern tree dtor_label, ctor_label;
+ tree result = DECL_RESULT (current_function_decl);
+ tree valtype = TREE_TYPE (result);
+ register int use_temp = 0;
+ int returns_value = 1;
+
+ if (TREE_THIS_VOLATILE (current_function_decl))
+ warning ("function declared `noreturn' has a `return' statement");
+
+ if (retval == error_mark_node)
+ {
+ current_function_returns_null = 1;
+ return;
+ }
+
+ if (retval == NULL_TREE)
+ {
+ /* A non-named return value does not count. */
+
+ /* Can't just return from a destructor. */
+ if (dtor_label)
+ {
+ expand_goto (dtor_label);
+ return;
+ }
+
+ if (DECL_CONSTRUCTOR_P (current_function_decl))
+ retval = current_class_decl;
+ else if (DECL_NAME (result) != NULL_TREE
+ && TREE_CODE (valtype) != VOID_TYPE)
+ retval = result;
+ else
+ {
+ current_function_returns_null = 1;
+
+ if (valtype != NULL_TREE && TREE_CODE (valtype) != VOID_TYPE)
+ {
+ if (DECL_NAME (DECL_RESULT (current_function_decl)) == NULL_TREE)
+ {
+ pedwarn ("`return' with no value, in function returning non-void");
+ /* Clear this, so finish_function won't say that we
+ reach the end of a non-void function (which we don't,
+ we gave a return!). */
+ current_function_returns_null = 0;
+ }
+ }
+
+ expand_null_return ();
+ return;
+ }
+ }
+ else if (DECL_CONSTRUCTOR_P (current_function_decl)
+ && retval != current_class_decl)
+ {
+ error ("return from a constructor: use `this = ...' instead");
+ retval = current_class_decl;
+ }
+
+ if (valtype == NULL_TREE || TREE_CODE (valtype) == VOID_TYPE)
+ {
+ current_function_returns_null = 1;
+ /* We do this here so we'll avoid a warning about how the function
+ "may or may not return a value" in finish_function. */
+ returns_value = 0;
+
+ if (retval)
+ pedwarn ("`return' with a value, in function returning void");
+ expand_return (retval);
+ }
+ /* Add some useful error checking for C++. */
+ else if (TREE_CODE (valtype) == REFERENCE_TYPE)
+ {
+ tree whats_returned;
+ tree tmp_result = result;
+
+ /* Don't initialize directly into a non-BLKmode retval, since that
+ could lose when being inlined by another caller. (GCC can't
+ read the function return register in an inline function when
+ the return value is being ignored). */
+ if (result && TYPE_MODE (TREE_TYPE (tmp_result)) != BLKmode)
+ tmp_result = 0;
+
+ /* convert to reference now, so we can give error if we
+ return an reference to a non-lvalue. */
+ retval = convert_for_initialization (tmp_result, valtype, retval,
+ LOOKUP_NORMAL, "return",
+ NULL_TREE, 0);
+
+ /* Sort through common things to see what it is
+ we are returning. */
+ whats_returned = retval;
+ if (TREE_CODE (whats_returned) == COMPOUND_EXPR)
+ {
+ whats_returned = TREE_OPERAND (whats_returned, 1);
+ if (TREE_CODE (whats_returned) == ADDR_EXPR)
+ whats_returned = TREE_OPERAND (whats_returned, 0);
+ }
+ if (TREE_CODE (whats_returned) == ADDR_EXPR)
+ {
+ whats_returned = TREE_OPERAND (whats_returned, 0);
+ while (TREE_CODE (whats_returned) == NEW_EXPR
+ || TREE_CODE (whats_returned) == TARGET_EXPR
+ || TREE_CODE (whats_returned) == WITH_CLEANUP_EXPR)
+ /* Get the target. */
+ whats_returned = TREE_OPERAND (whats_returned, 0);
+ }
+
+ if (TREE_CODE (whats_returned) == VAR_DECL && DECL_NAME (whats_returned))
+ {
+ if (TEMP_NAME_P (DECL_NAME (whats_returned)))
+ warning ("reference to non-lvalue returned");
+ else if (! TREE_STATIC (whats_returned)
+ && IDENTIFIER_LOCAL_VALUE (DECL_NAME (whats_returned)))
+ cp_warning_at ("reference to local variable `%D' returned", whats_returned);
+ }
+ }
+ else if (TREE_CODE (retval) == ADDR_EXPR)
+ {
+ tree whats_returned = TREE_OPERAND (retval, 0);
+
+ if (TREE_CODE (whats_returned) == VAR_DECL
+ && DECL_NAME (whats_returned)
+ && IDENTIFIER_LOCAL_VALUE (DECL_NAME (whats_returned))
+ && !TREE_STATIC (whats_returned))
+ cp_warning_at ("address of local variable `%D' returned", whats_returned);
+ }
+
+ /* Now deal with possible C++ hair:
+ (1) Compute the return value.
+ (2) If there are aggregate values with destructors which
+ must be cleaned up, clean them (taking care
+ not to clobber the return value).
+ (3) If an X(X&) constructor is defined, the return
+ value must be returned via that. */
+
+ if (retval == result
+ /* Watch out for constructors, which "return" aggregates
+ via initialization, but which otherwise "return" a pointer. */
+ || DECL_CONSTRUCTOR_P (current_function_decl))
+ {
+ /* This is just an error--it's already been reported. */
+ if (TYPE_SIZE (valtype) == NULL_TREE)
+ return;
+
+ if (TYPE_MODE (valtype) != BLKmode
+ && any_pending_cleanups (1))
+ {
+ retval = get_temp_regvar (valtype, retval);
+ use_temp = obey_regdecls;
+ }
+ }
+ else if (IS_AGGR_TYPE (valtype) && TYPE_NEEDS_CONSTRUCTING (valtype))
+ {
+ /* Throw away the cleanup that `build_functional_cast' gave us. */
+ if (TREE_CODE (retval) == WITH_CLEANUP_EXPR
+ && TREE_CODE (TREE_OPERAND (retval, 0)) == TARGET_EXPR)
+ retval = TREE_OPERAND (retval, 0);
+ expand_aggr_init (result, retval, 0);
+ DECL_INITIAL (result) = NULL_TREE;
+ retval = 0;
+ }
+ else
+ {
+ if (TYPE_MODE (valtype) == VOIDmode)
+ {
+ if (TYPE_MODE (TREE_TYPE (result)) != VOIDmode
+ && warn_return_type)
+ warning ("return of void value in function returning non-void");
+ expand_expr_stmt (retval);
+ retval = 0;
+ result = 0;
+ }
+ else if (TYPE_MODE (valtype) != BLKmode
+ && any_pending_cleanups (1))
+ {
+ retval = get_temp_regvar (valtype, retval);
+ use_temp = obey_regdecls;
+ result = 0;
+ }
+ else
+ {
+ retval = convert_for_initialization (result, valtype, retval,
+ LOOKUP_NORMAL,
+ "return", NULL_TREE, 0);
+ DECL_INITIAL (result) = NULL_TREE;
+ }
+ if (retval == error_mark_node)
+ return;
+ }
+
+ emit_queue ();
+
+ if (retval != NULL_TREE
+ && TREE_CODE_CLASS (TREE_CODE (retval)) == 'd'
+ && cond_stack == 0 && loop_stack == 0 && case_stack == 0)
+ current_function_return_value = retval;
+
+ if (result)
+ {
+ /* Everything's great--RETVAL is in RESULT. */
+ if (original_result_rtx)
+ store_expr (result, original_result_rtx, 0);
+ else if (retval && retval != result)
+ {
+ /* Clear this out so the later call to decl_function_context
+ won't end up bombing on us. */
+ if (DECL_CONTEXT (result) == error_mark_node)
+ DECL_CONTEXT (result) = NULL_TREE;
+ /* Here is where we finally get RETVAL into RESULT.
+ `expand_return' does the magic of protecting
+ RESULT from cleanups. */
+ retval = build (INIT_EXPR, TREE_TYPE (result), result, retval);
+ TREE_SIDE_EFFECTS (retval) = 1;
+ expand_return (retval);
+ }
+ else
+ expand_return (result);
+
+ use_variable (DECL_RTL (result));
+ if (ctor_label && TREE_CODE (ctor_label) != ERROR_MARK)
+ expand_goto (ctor_label);
+ else
+ expand_null_return ();
+ }
+ else
+ {
+ /* We may still need to put RETVAL into RESULT. */
+ result = DECL_RESULT (current_function_decl);
+ if (original_result_rtx)
+ {
+ /* Here we have a named return value that went
+ into memory. We can compute RETVAL into that. */
+ if (retval)
+ expand_assignment (result, retval, 0, 0);
+ else
+ store_expr (result, original_result_rtx, 0);
+ result = make_tree (TREE_TYPE (result), original_result_rtx);
+ }
+ else if (ctor_label && TREE_CODE (ctor_label) != ERROR_MARK)
+ {
+ /* Here RETVAL is CURRENT_CLASS_DECL, so there's nothing to do. */
+ expand_goto (ctor_label);
+ }
+ else if (retval)
+ {
+ /* Here is where we finally get RETVAL into RESULT.
+ `expand_return' does the magic of protecting
+ RESULT from cleanups. */
+ result = build (INIT_EXPR, TREE_TYPE (result), result, retval);
+ TREE_SIDE_EFFECTS (result) = 1;
+ expand_return (result);
+ }
+ else if (TYPE_MODE (TREE_TYPE (result)) != VOIDmode)
+ expand_return (result);
+ }
+
+ current_function_returns_value = returns_value;
+#if 0
+ /* These wind up after the BARRIER, which causes problems for
+ expand_end_binding. What purpose were they supposed to serve? */
+ if (original_result_rtx)
+ use_variable (original_result_rtx);
+ if (use_temp)
+ use_variable (DECL_RTL (DECL_RESULT (current_function_decl)));
+#endif
+
+ /* One way to clear out cleanups that EXPR might
+ generate. Note that this code will really be
+ dead code, but that is ok--cleanups that were
+ needed were handled by the magic of `return'. */
+ expand_cleanups_to (NULL_TREE);
+}
+
+/* Start a C switch statement, testing expression EXP.
+ Return EXP if it is valid, an error node otherwise. */
+
+tree
+c_expand_start_case (exp)
+ tree exp;
+{
+ tree type;
+ register enum tree_code code;
+
+ /* Convert from references, etc. */
+ exp = default_conversion (exp);
+ type = TREE_TYPE (exp);
+ code = TREE_CODE (type);
+
+ if (IS_AGGR_TYPE_CODE (code))
+ exp = build_type_conversion (CONVERT_EXPR, integer_type_node, exp, 1);
+
+ if (exp == NULL_TREE)
+ {
+ error ("switch quantity not an integer");
+ exp = error_mark_node;
+ }
+ type = TREE_TYPE (exp);
+ code = TREE_CODE (type);
+
+ if (code != INTEGER_TYPE && code != ENUMERAL_TYPE && code != ERROR_MARK)
+ {
+ error ("switch quantity not an integer");
+ exp = error_mark_node;
+ }
+ else
+ {
+ tree index;
+
+ exp = default_conversion (exp);
+ type = TREE_TYPE (exp);
+ index = get_unwidened (exp, 0);
+ /* We can't strip a conversion from a signed type to an unsigned,
+ because if we did, int_fits_type_p would do the wrong thing
+ when checking case values for being in range,
+ and it's too hard to do the right thing. */
+ if (TREE_UNSIGNED (TREE_TYPE (exp))
+ == TREE_UNSIGNED (TREE_TYPE (index)))
+ exp = index;
+ }
+
+ expand_start_case (1, exp, type, "switch statement");
+
+ return exp;
+}
+
+/* CONSTP remembers whether or not all the intervening pointers in the `to'
+ type have been const. */
+int
+comp_ptr_ttypes_real (to, from, constp)
+ tree to, from;
+ int constp;
+{
+ for (; ; to = TREE_TYPE (to), from = TREE_TYPE (from))
+ {
+ if (TREE_CODE (to) != TREE_CODE (from))
+ return 0;
+
+ if (TYPE_READONLY (from) > TYPE_READONLY (to)
+ || TYPE_VOLATILE (from) > TYPE_VOLATILE (to))
+ return 0;
+
+ if (! constp
+ && (TYPE_READONLY (to) > TYPE_READONLY (from)
+ || TYPE_VOLATILE (to) > TYPE_READONLY (from)))
+ return 0;
+ constp &= TYPE_READONLY (to);
+
+ if (TREE_CODE (to) != POINTER_TYPE)
+ return comptypes (TYPE_MAIN_VARIANT (to), TYPE_MAIN_VARIANT (from), 1);
+ }
+}
+
+/* When comparing, say, char ** to char const **, this function takes the
+ 'char *' and 'char const *'. Do not pass non-pointer types to this
+ function. */
+int
+comp_ptr_ttypes (to, from)
+ tree to, from;
+{
+ return comp_ptr_ttypes_real (to, from, 1);
+}
diff --git a/gnu/usr.bin/cc/cc1plus/typeck2.c b/gnu/usr.bin/cc/cc1plus/typeck2.c
new file mode 100644
index 0000000..871173f
--- /dev/null
+++ b/gnu/usr.bin/cc/cc1plus/typeck2.c
@@ -0,0 +1,1607 @@
+/* Report error messages, build initializers, and perform
+ some front-end optimizations for C++ compiler.
+ Copyright (C) '87, '88, '89, '92, 1993, 1994 Free Software Foundation, Inc.
+ Hacked by Michael Tiemann (tiemann@cygnus.com)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+/* This file is part of the C++ front end.
+ It contains routines to build C++ expressions given their operands,
+ including computing the types of the result, C and C++ specific error
+ checks, and some optimization.
+
+ There are also routines to build RETURN_STMT nodes and CASE_STMT nodes,
+ and to process initializations in declarations (since they work
+ like a strange sort of assignment). */
+
+#include "config.h"
+#include <stdio.h>
+#include "tree.h"
+#include "cp-tree.h"
+#include "flags.h"
+
+static tree process_init_constructor ();
+extern void pedwarn (), error ();
+
+extern int errorcount;
+extern int sorrycount;
+
+/* Print an error message stemming from an attempt to use
+ BASETYPE as a base class for TYPE. */
+tree
+error_not_base_type (basetype, type)
+ tree basetype, type;
+{
+ if (TREE_CODE (basetype) == FUNCTION_DECL)
+ basetype = DECL_CLASS_CONTEXT (basetype);
+ cp_error ("type `%T' is not a base type for type `%T'", basetype, type);
+ return error_mark_node;
+}
+
+tree
+binfo_or_else (parent_or_type, type)
+ tree parent_or_type, type;
+{
+ tree binfo;
+ if (TYPE_MAIN_VARIANT (parent_or_type) == TYPE_MAIN_VARIANT (type))
+ return TYPE_BINFO (parent_or_type);
+ if ((binfo = get_binfo (parent_or_type, TYPE_MAIN_VARIANT (type), 0)))
+ {
+ if (binfo == error_mark_node)
+ return NULL_TREE;
+ return binfo;
+ }
+ error_not_base_type (parent_or_type, type);
+ return NULL_TREE;
+}
+
+/* Print an error message stemming from an invalid use of an
+ aggregate type.
+
+ TYPE is the type or binfo which draws the error.
+ MSG is the message to print.
+ ARG is an optional argument which may provide more information. */
+void
+error_with_aggr_type (type, msg, arg)
+ tree type;
+ char *msg;
+ HOST_WIDE_INT arg;
+{
+ tree name;
+
+ if (TREE_CODE (type) == TREE_VEC)
+ type = BINFO_TYPE (type);
+
+ name = TYPE_NAME (type);
+ if (TREE_CODE (name) == TYPE_DECL)
+ name = DECL_NAME (name);
+ error (msg, IDENTIFIER_POINTER (name), arg);
+}
+
+/* According to ARM $7.1.6, "A `const' object may be initialized, but its
+ value may not be changed thereafter. Thus, we emit hard errors for these,
+ rather than just pedwarns. If `SOFT' is 1, then we just pedwarn. (For
+ example, conversions to references.) */
+void
+readonly_error (arg, string, soft)
+ tree arg;
+ char *string;
+ int soft;
+{
+ char *fmt;
+ void (*fn)();
+
+ if (soft)
+ fn = pedwarn;
+ else
+ fn = error;
+
+ if (TREE_CODE (arg) == COMPONENT_REF)
+ {
+ if (TYPE_READONLY (TREE_TYPE (TREE_OPERAND (arg, 0))))
+ fmt = "%s of member `%s' in read-only structure";
+ else
+ fmt = "%s of read-only member `%s'";
+ (*fn) (fmt, string, lang_printable_name (TREE_OPERAND (arg, 1)));
+ }
+ else if (TREE_CODE (arg) == VAR_DECL)
+ {
+ if (DECL_LANG_SPECIFIC (arg)
+ && DECL_IN_AGGR_P (arg)
+ && !TREE_STATIC (arg))
+ fmt = "%s of constant field `%s'";
+ else
+ fmt = "%s of read-only variable `%s'";
+ (*fn) (fmt, string, lang_printable_name (arg));
+ }
+ else if (TREE_CODE (arg) == PARM_DECL)
+ (*fn) ("%s of read-only parameter `%s'", string,
+ lang_printable_name (arg));
+ else if (TREE_CODE (arg) == INDIRECT_REF
+ && TREE_CODE (TREE_TYPE (TREE_OPERAND (arg, 0))) == REFERENCE_TYPE
+ && (TREE_CODE (TREE_OPERAND (arg, 0)) == VAR_DECL
+ || TREE_CODE (TREE_OPERAND (arg, 0)) == PARM_DECL))
+ (*fn) ("%s of read-only reference `%s'",
+ string, lang_printable_name (TREE_OPERAND (arg, 0)));
+ else if (TREE_CODE (arg) == RESULT_DECL)
+ (*fn) ("%s of read-only named return value `%s'",
+ string, lang_printable_name (arg));
+ else
+ (*fn) ("%s of read-only location", string);
+}
+
+/* Print an error message for invalid use of a type which declares
+ virtual functions which are not inheritable. */
+void
+abstract_virtuals_error (decl, type)
+ tree decl;
+ tree type;
+{
+ tree u = CLASSTYPE_ABSTRACT_VIRTUALS (type);
+
+ if (decl)
+ {
+ if (TREE_CODE (decl) == RESULT_DECL)
+ return;
+
+ if (TREE_CODE (decl) == VAR_DECL)
+ cp_error ("cannot declare variable `%D' to be of type `%T'",
+ decl, type);
+ else if (TREE_CODE (decl) == PARM_DECL)
+ cp_error ("cannot declare parameter `%D' to be of type `%T'",
+ decl, type);
+ else if (TREE_CODE (decl) == FIELD_DECL)
+ cp_error ("cannot declare field `%D' to be of type `%T'",
+ decl, type);
+ else if (TREE_CODE (decl) == FUNCTION_DECL
+ && TREE_CODE (TREE_TYPE (decl)) == METHOD_TYPE)
+ cp_error ("invalid return type for method `%#D'", decl);
+ else if (TREE_CODE (decl) == FUNCTION_DECL)
+ cp_error ("invalid return type for function `%#D'", decl);
+ }
+ else cp_error ("cannot allocate an object of type `%T'", type);
+ /* Only go through this once. */
+ if (TREE_PURPOSE (u) == NULL_TREE)
+ {
+ error (" since the following virtual functions are abstract:");
+ TREE_PURPOSE (u) = error_mark_node;
+ while (u)
+ {
+ cp_error ("\t%#D", TREE_VALUE (u));
+ u = TREE_CHAIN (u);
+ }
+ }
+ else cp_error (" since type `%T' has abstract virtual functions", type);
+}
+
+/* Print an error message for invalid use of a signature type.
+ Signatures are treated similar to abstract classes here, they
+ cannot be instantiated. */
+void
+signature_error (decl, type)
+ tree decl;
+ tree type;
+{
+ if (decl)
+ {
+ if (TREE_CODE (decl) == RESULT_DECL)
+ return;
+
+ if (TREE_CODE (decl) == VAR_DECL)
+ cp_error ("cannot declare variable `%D' to be of signature type `%T'",
+ decl, type);
+ else if (TREE_CODE (decl) == PARM_DECL)
+ cp_error ("cannot declare parameter `%D' to be of signature type `%T'",
+ decl, type);
+ else if (TREE_CODE (decl) == FIELD_DECL)
+ cp_error ("cannot declare field `%D' to be of signature type `%T'",
+ decl, type);
+ else if (TREE_CODE (decl) == FUNCTION_DECL
+ && TREE_CODE (TREE_TYPE (decl)) == METHOD_TYPE)
+ cp_error ("invalid return type for method `%#D'", decl);
+ else if (TREE_CODE (decl) == FUNCTION_DECL)
+ cp_error ("invalid return type for function `%#D'", decl);
+ }
+ else
+ cp_error ("cannot allocate an object of signature type `%T'", type);
+}
+
+/* Print an error message for invalid use of an incomplete type.
+ VALUE is the expression that was used (or 0 if that isn't known)
+ and TYPE is the type that was invalid. */
+
+void
+incomplete_type_error (value, type)
+ tree value;
+ tree type;
+{
+ char *errmsg;
+
+ /* Avoid duplicate error message. */
+ if (TREE_CODE (type) == ERROR_MARK)
+ return;
+
+ if (value != 0 && (TREE_CODE (value) == VAR_DECL
+ || TREE_CODE (value) == PARM_DECL))
+ error ("`%s' has an incomplete type",
+ IDENTIFIER_POINTER (DECL_NAME (value)));
+ else
+ {
+ retry:
+ /* We must print an error message. Be clever about what it says. */
+
+ switch (TREE_CODE (type))
+ {
+ case RECORD_TYPE:
+ errmsg = "invalid use of undefined type `struct %s'";
+ break;
+
+ case UNION_TYPE:
+ errmsg = "invalid use of undefined type `union %s'";
+ break;
+
+ case ENUMERAL_TYPE:
+ errmsg = "invalid use of undefined type `enum %s'";
+ break;
+
+ case VOID_TYPE:
+ error ("invalid use of void expression");
+ return;
+
+ case ARRAY_TYPE:
+ if (TYPE_DOMAIN (type))
+ {
+ type = TREE_TYPE (type);
+ goto retry;
+ }
+ error ("invalid use of array with unspecified bounds");
+ return;
+
+ case OFFSET_TYPE:
+ error ("invalid use of member type (did you forget the `&' ?)");
+ return;
+
+ default:
+ my_friendly_abort (108);
+ }
+
+ error_with_aggr_type (type, errmsg);
+ }
+}
+
+/* Like error(), but don't call report_error_function(). */
+static void
+ack (s, v, v2)
+ char *s;
+ HOST_WIDE_INT v;
+ HOST_WIDE_INT v2;
+{
+ extern char * progname;
+
+ if (input_filename)
+ fprintf (stderr, "%s:%d: ", input_filename, lineno);
+ else
+ fprintf (stderr, "%s: ", progname);
+
+ fprintf (stderr, s, v, v2);
+ fprintf (stderr, "\n");
+}
+
+/* There are times when the compiler can get very confused, confused
+ to the point of giving up by aborting, simply because of previous
+ input errors. It is much better to have the user go back and
+ correct those errors first, and see if it makes us happier, than it
+ is to abort on him. This is because when one has a 10,000 line
+ program, and the compiler comes back with ``core dump'', the user
+ is left not knowing even where to begin to fix things and no place
+ to even try and work around things.
+
+ The parameter is to uniquely identify the problem to the user, so
+ that they can say, I am having problem 59, and know that fix 7 will
+ probably solve their problem. Or, we can document what problem
+ 59 is, so they can understand how to work around it, should they
+ ever run into it.
+
+ Note, there will be no more calls in the C++ front end to abort,
+ because the C++ front end is so unreliable still. The C front end
+ can get away with calling abort, because for most of the calls to
+ abort on most machines, it, I suspect, can be proven that it is
+ impossible to ever call abort. The same is not yet true for C++,
+ one day, maybe it will be.
+
+ We used to tell people to "fix the above error[s] and try recompiling
+ the program" via a call to fatal, but that message tended to look
+ silly. So instead, we just do the equivalent of a call to fatal in the
+ same situation (call exit). */
+
+/* First used: 0 (reserved), Last used: 360. Free: */
+
+static int abortcount = 0;
+
+void
+my_friendly_abort (i)
+ int i;
+{
+ /* if the previous error came through here, i.e. report_error_function
+ ended up calling us again, don't just exit; we want a diagnostic of
+ some kind. */
+ if (abortcount == 1)
+ current_function_decl = NULL_TREE;
+ else if (errorcount > 0 || sorrycount > 0)
+ {
+ if (abortcount > 1)
+ {
+ if (i == 0)
+ ack ("Internal compiler error.");
+ else
+ ack ("Internal compiler error %d.", i);
+ ack ("Please submit a full bug report to `bug-g++@prep.ai.mit.edu'.");
+ }
+ else
+ error ("confused by earlier errors, bailing out");
+
+ exit (34);
+ }
+ ++abortcount;
+
+ if (i == 0)
+ error ("Internal compiler error.");
+ else
+ error ("Internal compiler error %d.", i);
+
+ fatal ("Please submit a full bug report to `bug-g++@prep.ai.mit.edu'.");
+}
+
+void
+my_friendly_assert (cond, where)
+ int cond, where;
+{
+ if (cond == 0)
+ my_friendly_abort (where);
+}
+
+/* Return nonzero if VALUE is a valid constant-valued expression
+ for use in initializing a static variable; one that can be an
+ element of a "constant" initializer.
+
+ Return 1 if the value is absolute; return 2 if it is relocatable.
+ We assume that VALUE has been folded as much as possible;
+ therefore, we do not need to check for such things as
+ arithmetic-combinations of integers. */
+
+static int
+initializer_constant_valid_p (value)
+ tree value;
+{
+ switch (TREE_CODE (value))
+ {
+ case CONSTRUCTOR:
+ return TREE_STATIC (value);
+
+ case INTEGER_CST:
+ case REAL_CST:
+ case STRING_CST:
+ return 1;
+
+ case ADDR_EXPR:
+ return 2;
+
+ case CONVERT_EXPR:
+ case NOP_EXPR:
+ /* Allow conversions between types of the same kind. */
+ if (TREE_CODE (TREE_TYPE (value))
+ == TREE_CODE (TREE_TYPE (TREE_OPERAND (value, 0))))
+ return initializer_constant_valid_p (TREE_OPERAND (value, 0));
+ /* Allow (int) &foo provided int is as wide as a pointer. */
+ if (TREE_CODE (TREE_TYPE (value)) == INTEGER_TYPE
+ && TREE_CODE (TREE_TYPE (TREE_OPERAND (value, 0))) == POINTER_TYPE
+ && ! tree_int_cst_lt (TYPE_SIZE (TREE_TYPE (value)),
+ TYPE_SIZE (TREE_TYPE (TREE_OPERAND (value, 0)))))
+ return initializer_constant_valid_p (TREE_OPERAND (value, 0));
+ return 0;
+
+ case PLUS_EXPR:
+ {
+ int valid0 = initializer_constant_valid_p (TREE_OPERAND (value, 0));
+ int valid1 = initializer_constant_valid_p (TREE_OPERAND (value, 1));
+ if (valid0 == 1 && valid1 == 2)
+ return 2;
+ if (valid0 == 2 && valid1 == 1)
+ return 2;
+ return 0;
+ }
+
+ case MINUS_EXPR:
+ {
+ int valid0 = initializer_constant_valid_p (TREE_OPERAND (value, 0));
+ int valid1 = initializer_constant_valid_p (TREE_OPERAND (value, 1));
+ if (valid0 == 2 && valid1 == 1)
+ return 2;
+ return 0;
+ }
+
+ default:
+ return 0;
+ }
+}
+
+/* Perform appropriate conversions on the initial value of a variable,
+ store it in the declaration DECL,
+ and print any error messages that are appropriate.
+ If the init is invalid, store an ERROR_MARK.
+
+ C++: Note that INIT might be a TREE_LIST, which would mean that it is
+ a base class initializer for some aggregate type, hopefully compatible
+ with DECL. If INIT is a single element, and DECL is an aggregate
+ type, we silently convert INIT into a TREE_LIST, allowing a constructor
+ to be called.
+
+ If INIT is a TREE_LIST and there is no constructor, turn INIT
+ into a CONSTRUCTOR and use standard initialization techniques.
+ Perhaps a warning should be generated?
+
+ Returns value of initializer if initialization could not be
+ performed for static variable. In that case, caller must do
+ the storing. */
+
+tree
+store_init_value (decl, init)
+ tree decl, init;
+{
+ register tree value, type;
+
+ /* If variable's type was invalidly declared, just ignore it. */
+
+ type = TREE_TYPE (decl);
+ if (TREE_CODE (type) == ERROR_MARK)
+ return NULL_TREE;
+
+ /* Take care of C++ business up here. */
+ type = TYPE_MAIN_VARIANT (type);
+
+ /* implicitly tests if IS_AGGR_TYPE. */
+ if (TYPE_NEEDS_CONSTRUCTING (type) && TREE_CODE (init) != CONSTRUCTOR)
+ my_friendly_abort (109);
+ else if (IS_AGGR_TYPE (type))
+ {
+ /* Although we are not allowed to declare variables of signature
+ type, we complain about a possible constructor call in such a
+ declaration as well. */
+ if (TREE_CODE (init) == TREE_LIST
+ && IS_SIGNATURE (type))
+ {
+ cp_error ("constructor syntax cannot be used with signature type `%T'",
+ type);
+ init = error_mark_node;
+ }
+ else if (TREE_CODE (init) == TREE_LIST)
+ {
+ cp_error ("constructor syntax used, but no constructor declared for type `%T'", type);
+ init = build_nt (CONSTRUCTOR, NULL_TREE, nreverse (init));
+ }
+#if 0
+ if (TREE_CODE (init) == CONSTRUCTOR)
+ {
+ tree field;
+ tree funcs;
+ int func;
+
+ /* Check that we're really an aggregate as ARM 8.4.1 defines it. */
+ if (CLASSTYPE_N_BASECLASSES (type))
+ cp_error_at ("initializer list construction illegal for derived class object `%D'", decl);
+ if (CLASSTYPE_VTBL_PTR (type))
+ cp_error_at ("initializer list construction illegal for polymorphic class object `%D'", decl);
+ if (TYPE_NEEDS_CONSTRUCTING (type))
+ {
+ cp_error_at ("initializer list construction illegal for `%D'", decl);
+ error ("due to the presence of a constructor");
+ }
+ for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
+ if (TREE_PRIVATE (field) || TREE_PROTECTED (field))
+ {
+ cp_error_at ("initializer list construction illegal for `%D'", decl);
+ cp_error_at ("due to non-public access of member `%D'", field);
+ }
+ funcs = TYPE_METHODS (type);
+ if (funcs)
+ for (func = 0; func < TREE_VEC_LENGTH (funcs); func++)
+ {
+ field = TREE_VEC_ELT (funcs, func);
+ if (field && (TREE_PRIVATE (field) || TREE_PROTECTED (field)))
+ {
+ cp_error_at ("initializer list construction illegal for `%D'", decl);
+ cp_error_at ("due to non-public access of member `%D'", field);
+ }
+ }
+ }
+#endif
+ }
+ else if (TREE_CODE (init) == TREE_LIST
+ && TREE_TYPE (init) != unknown_type_node)
+ {
+ if (TREE_CODE (decl) == RESULT_DECL)
+ {
+ if (TREE_CHAIN (init))
+ {
+ warning ("comma expression used to initialize return value");
+ init = build_compound_expr (init);
+ }
+ else
+ init = TREE_VALUE (init);
+ }
+ else if (TREE_TYPE (init) != 0
+ && TREE_CODE (TREE_TYPE (init)) == OFFSET_TYPE)
+ {
+ /* Use the type of our variable to instantiate
+ the type of our initializer. */
+ init = instantiate_type (type, init, 1);
+ }
+ else if (TREE_CODE (init) == TREE_LIST
+ && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
+ {
+ error ("cannot initialize arrays using this syntax");
+ return NULL_TREE;
+ }
+ else
+ {
+ /* We get here with code like `int a (2);' */
+
+ if (TREE_CHAIN (init) != NULL_TREE)
+ {
+ pedwarn ("initializer list being treated as compound expression");
+ init = build_compound_expr (init);
+ }
+ else
+ init = TREE_VALUE (init);
+ }
+ }
+
+ /* End of special C++ code. */
+
+ /* Digest the specified initializer into an expression. */
+
+ value = digest_init (type, init, (tree *) 0);
+
+ /* Store the expression if valid; else report error. */
+
+ if (TREE_CODE (value) == ERROR_MARK)
+ ;
+ else if (TREE_STATIC (decl)
+ && (! TREE_CONSTANT (value)
+ || ! initializer_constant_valid_p (value)
+#if 0
+ /* A STATIC PUBLIC int variable doesn't have to be
+ run time inited when doing pic. (mrs) */
+ /* Since ctors and dtors are the only things that can
+ reference vtables, and they are always written down
+ the the vtable definition, we can leave the
+ vtables in initialized data space.
+ However, other initialized data cannot be initialized
+ this way. Instead a global file-level initializer
+ must do the job. */
+ || (flag_pic && !DECL_VIRTUAL_P (decl) && TREE_PUBLIC (decl))
+#endif
+ ))
+
+ return value;
+ else
+ {
+ if (pedantic && TREE_CODE (value) == CONSTRUCTOR
+ /* Don't complain about non-constant initializers of
+ signature tables and signature pointers/references. */
+ && ! (TYPE_LANG_SPECIFIC (type)
+ && (IS_SIGNATURE (type)
+ || IS_SIGNATURE_POINTER (type)
+ || IS_SIGNATURE_REFERENCE (type))))
+ {
+ if (! TREE_CONSTANT (value) || ! TREE_STATIC (value))
+ pedwarn ("ANSI C++ forbids non-constant aggregate initializer expressions");
+ }
+ }
+ DECL_INITIAL (decl) = value;
+ return NULL_TREE;
+}
+
+/* Digest the parser output INIT as an initializer for type TYPE.
+ Return a C expression of type TYPE to represent the initial value.
+
+ If TAIL is nonzero, it points to a variable holding a list of elements
+ of which INIT is the first. We update the list stored there by
+ removing from the head all the elements that we use.
+ Normally this is only one; we use more than one element only if
+ TYPE is an aggregate and INIT is not a constructor. */
+
+tree
+digest_init (type, init, tail)
+ tree type, init, *tail;
+{
+ enum tree_code code = TREE_CODE (type);
+ tree element = 0;
+ tree old_tail_contents;
+ /* Nonzero if INIT is a braced grouping, which comes in as a CONSTRUCTOR
+ tree node which has no TREE_TYPE. */
+ int raw_constructor;
+
+ /* By default, assume we use one element from a list.
+ We correct this later in the sole case where it is not true. */
+
+ if (tail)
+ {
+ old_tail_contents = *tail;
+ *tail = TREE_CHAIN (*tail);
+ }
+
+ if (init == error_mark_node || (TREE_CODE (init) == TREE_LIST
+ && TREE_VALUE (init) == error_mark_node))
+ return error_mark_node;
+
+ /* Strip NON_LVALUE_EXPRs since we aren't using as an lvalue. */
+ if (TREE_CODE (init) == NON_LVALUE_EXPR)
+ init = TREE_OPERAND (init, 0);
+
+ if (init && TREE_TYPE (init) && TYPE_PTRMEMFUNC_P (type))
+ init = default_conversion (init);
+
+ if (init && TYPE_PTRMEMFUNC_P (type)
+ && ((TREE_CODE (init) == ADDR_EXPR
+ && TREE_CODE (TREE_TYPE (init)) == POINTER_TYPE
+ && TREE_CODE (TREE_TYPE (TREE_TYPE (init))) == METHOD_TYPE)
+ || TREE_CODE (init) == TREE_LIST
+ || integer_zerop (init)
+ || (TREE_TYPE (init) && TYPE_PTRMEMFUNC_P (TREE_TYPE (init)))))
+ {
+ return build_ptrmemfunc (TYPE_PTRMEMFUNC_FN_TYPE (type), init, 0);
+ }
+
+ raw_constructor = TREE_CODE (init) == CONSTRUCTOR && TREE_TYPE (init) == 0;
+
+ if (init && raw_constructor
+ && CONSTRUCTOR_ELTS (init) != 0
+ && TREE_CHAIN (CONSTRUCTOR_ELTS (init)) == 0)
+ {
+ element = TREE_VALUE (CONSTRUCTOR_ELTS (init));
+ /* Strip NON_LVALUE_EXPRs since we aren't using as an lvalue. */
+ if (element && TREE_CODE (element) == NON_LVALUE_EXPR)
+ element = TREE_OPERAND (element, 0);
+ if (element == error_mark_node)
+ return element;
+ }
+
+ /* Any type can be initialized from an expression of the same type,
+ optionally with braces. */
+
+ if (init && TREE_TYPE (init)
+ && (TYPE_MAIN_VARIANT (TREE_TYPE (init)) == type
+ || (code == ARRAY_TYPE && comptypes (TREE_TYPE (init), type, 1))))
+ {
+ if (pedantic && code == ARRAY_TYPE
+ && TREE_CODE (init) != STRING_CST)
+ pedwarn ("ANSI C++ forbids initializing array from array expression");
+ if (TREE_CODE (init) == CONST_DECL)
+ init = DECL_INITIAL (init);
+ else if (TREE_READONLY_DECL_P (init))
+ init = decl_constant_value (init);
+ return init;
+ }
+
+ if (element && (TREE_TYPE (element) == type
+ || (code == ARRAY_TYPE && TREE_TYPE (element)
+ && comptypes (TREE_TYPE (element), type, 1))))
+ {
+ if (pedantic && code == ARRAY_TYPE)
+ pedwarn ("ANSI C++ forbids initializing array from array expression");
+ if (pedantic && (code == RECORD_TYPE || code == UNION_TYPE))
+ pedwarn ("ANSI C++ forbids single nonscalar initializer with braces");
+ if (TREE_CODE (element) == CONST_DECL)
+ element = DECL_INITIAL (element);
+ else if (TREE_READONLY_DECL_P (element))
+ element = decl_constant_value (element);
+ return element;
+ }
+
+ /* Initialization of an array of chars from a string constant
+ optionally enclosed in braces. */
+
+ if (code == ARRAY_TYPE)
+ {
+ tree typ1 = TYPE_MAIN_VARIANT (TREE_TYPE (type));
+ if ((typ1 == char_type_node
+ || typ1 == signed_char_type_node
+ || typ1 == unsigned_char_type_node
+ || typ1 == unsigned_wchar_type_node
+ || typ1 == signed_wchar_type_node)
+ && ((init && TREE_CODE (init) == STRING_CST)
+ || (element && TREE_CODE (element) == STRING_CST)))
+ {
+ tree string = element ? element : init;
+
+ if ((TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (string)))
+ != char_type_node)
+ && TYPE_PRECISION (typ1) == BITS_PER_UNIT)
+ {
+ error ("char-array initialized from wide string");
+ return error_mark_node;
+ }
+ if ((TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (string)))
+ == char_type_node)
+ && TYPE_PRECISION (typ1) != BITS_PER_UNIT)
+ {
+ error ("int-array initialized from non-wide string");
+ return error_mark_node;
+ }
+
+ if (pedantic
+ && typ1 != char_type_node
+ && typ1 != signed_char_type_node
+ && typ1 != unsigned_char_type_node)
+ pedwarn ("ANSI C++ forbids string initializer except for `char' elements");
+ TREE_TYPE (string) = type;
+ if (TYPE_DOMAIN (type) != 0
+ && TREE_CONSTANT (TYPE_SIZE (type)))
+ {
+ register int size
+ = TREE_INT_CST_LOW (TYPE_SIZE (type));
+ size = (size + BITS_PER_UNIT - 1) / BITS_PER_UNIT;
+ /* In C it is ok to subtract 1 from the length of the string
+ because it's ok to ignore the terminating null char that is
+ counted in the length of the constant, but in C++ this would
+ be invalid. */
+ if (size < TREE_STRING_LENGTH (string))
+ pedwarn ("initializer-string for array of chars is too long");
+ }
+ return string;
+ }
+ }
+
+ /* Handle scalar types, including conversions,
+ and signature pointers and references. */
+
+ if (code == INTEGER_TYPE || code == REAL_TYPE || code == POINTER_TYPE
+ || code == ENUMERAL_TYPE || code == REFERENCE_TYPE
+ || code == BOOLEAN_TYPE
+ || (code == RECORD_TYPE && ! raw_constructor
+ && (IS_SIGNATURE_POINTER (type) || IS_SIGNATURE_REFERENCE (type))))
+ {
+ if (raw_constructor)
+ {
+ if (element == 0)
+ {
+ error ("initializer for scalar variable requires one element");
+ return error_mark_node;
+ }
+ init = element;
+ }
+
+ return convert_for_initialization (0, type, init, LOOKUP_NORMAL,
+ "initialization", NULL_TREE, 0);
+ }
+
+ /* Come here only for records and arrays (and unions with constructors). */
+
+ if (TYPE_SIZE (type) && ! TREE_CONSTANT (TYPE_SIZE (type)))
+ {
+ cp_error ("variable-sized object of type `%T' may not be initialized",
+ type);
+ return error_mark_node;
+ }
+
+ if (code == ARRAY_TYPE || code == RECORD_TYPE || code == UNION_TYPE)
+ {
+ if (raw_constructor)
+ return process_init_constructor (type, init, (tree *)0);
+ else if (TYPE_NEEDS_CONSTRUCTING (type))
+ {
+ /* This can only be reached when caller is initializing
+ ARRAY_TYPE. In that case, we don't want to convert
+ INIT to TYPE. We will let `expand_vec_init' do it. */
+ return init;
+ }
+ else if (tail != 0)
+ {
+ *tail = old_tail_contents;
+ return process_init_constructor (type, 0, tail);
+ }
+ else if (flag_traditional)
+ /* Traditionally one can say `char x[100] = 0;'. */
+ return process_init_constructor (type,
+ build_nt (CONSTRUCTOR, 0,
+ tree_cons (0, init, 0)),
+ 0);
+ if (code != ARRAY_TYPE)
+ return convert_for_initialization (0, type, init, LOOKUP_NORMAL,
+ "initialization", NULL_TREE, 0);
+ }
+
+ error ("invalid initializer");
+ return error_mark_node;
+}
+
+/* Process a constructor for a variable of type TYPE.
+ The constructor elements may be specified either with INIT or with ELTS,
+ only one of which should be non-null.
+
+ If INIT is specified, it is a CONSTRUCTOR node which is specifically
+ and solely for initializing this datum.
+
+ If ELTS is specified, it is the address of a variable containing
+ a list of expressions. We take as many elements as we need
+ from the head of the list and update the list.
+
+ In the resulting constructor, TREE_CONSTANT is set if all elts are
+ constant, and TREE_STATIC is set if, in addition, all elts are simple enough
+ constants that the assembler and linker can compute them. */
+
+static tree
+process_init_constructor (type, init, elts)
+ tree type, init, *elts;
+{
+ register tree tail;
+ /* List of the elements of the result constructor,
+ in reverse order. */
+ register tree members = NULL;
+ tree result;
+ int allconstant = 1;
+ int allsimple = 1;
+ int erroneous = 0;
+
+ /* Make TAIL be the list of elements to use for the initialization,
+ no matter how the data was given to us. */
+
+ if (elts)
+ {
+ if (warn_missing_braces)
+ warning ("aggregate has a partly bracketed initializer");
+ tail = *elts;
+ }
+ else
+ tail = CONSTRUCTOR_ELTS (init);
+
+ /* Gobble as many elements as needed, and make a constructor or initial value
+ for each element of this aggregate. Chain them together in result.
+ If there are too few, use 0 for each scalar ultimate component. */
+
+ if (TREE_CODE (type) == ARRAY_TYPE)
+ {
+ tree domain = TYPE_DOMAIN (type);
+ register long len;
+ register int i;
+
+ if (domain)
+ len = (TREE_INT_CST_LOW (TYPE_MAX_VALUE (domain))
+ - TREE_INT_CST_LOW (TYPE_MIN_VALUE (domain))
+ + 1);
+ else
+ len = -1; /* Take as many as there are */
+
+ for (i = 0; (len < 0 || i < len) && tail != 0; i++)
+ {
+ register tree next1;
+
+ if (TREE_VALUE (tail) != 0)
+ {
+ tree tail1 = tail;
+ next1 = digest_init (TYPE_MAIN_VARIANT (TREE_TYPE (type)),
+ TREE_VALUE (tail), &tail1);
+ my_friendly_assert (tail1 == 0
+ || TREE_CODE (tail1) == TREE_LIST, 319);
+ if (tail == tail1 && len < 0)
+ {
+ error ("non-empty initializer for array of empty elements");
+ /* Just ignore what we were supposed to use. */
+ tail1 = 0;
+ }
+ tail = tail1;
+ }
+ else
+ {
+ next1 = error_mark_node;
+ tail = TREE_CHAIN (tail);
+ }
+
+ if (next1 == error_mark_node)
+ erroneous = 1;
+ else if (!TREE_CONSTANT (next1))
+ allconstant = 0;
+ else if (! initializer_constant_valid_p (next1))
+ allsimple = 0;
+ members = tree_cons (NULL_TREE, next1, members);
+ }
+ }
+ if (TREE_CODE (type) == RECORD_TYPE)
+ {
+ register tree field;
+
+ if (tail)
+ {
+ if (TYPE_USES_VIRTUAL_BASECLASSES (type))
+ {
+ sorry ("initializer list for object of class with virtual baseclasses");
+ return error_mark_node;
+ }
+
+ if (TYPE_BINFO_BASETYPES (type))
+ {
+ sorry ("initializer list for object of class with baseclasses");
+ return error_mark_node;
+ }
+
+ if (TYPE_VIRTUAL_P (type))
+ {
+ sorry ("initializer list for object using virtual functions");
+ return error_mark_node;
+ }
+ }
+
+ for (field = TYPE_FIELDS (type); field && tail;
+ field = TREE_CHAIN (field))
+ {
+ register tree next1;
+
+ if (! DECL_NAME (field))
+ {
+ members = tree_cons (field, integer_zero_node, members);
+ continue;
+ }
+
+ if (TREE_CODE (field) != FIELD_DECL)
+ continue;
+
+ if (TREE_VALUE (tail) != 0)
+ {
+ tree tail1 = tail;
+
+ next1 = digest_init (TREE_TYPE (field),
+ TREE_VALUE (tail), &tail1);
+ my_friendly_assert (tail1 == 0
+ || TREE_CODE (tail1) == TREE_LIST, 320);
+ tail = tail1;
+ }
+ else
+ {
+ next1 = error_mark_node;
+ tail = TREE_CHAIN (tail);
+ }
+
+ if (next1 == error_mark_node)
+ erroneous = 1;
+ else if (!TREE_CONSTANT (next1))
+ allconstant = 0;
+ else if (! initializer_constant_valid_p (next1))
+ allsimple = 0;
+ members = tree_cons (field, next1, members);
+ }
+ for (; field; field = TREE_CHAIN (field))
+ {
+ if (TREE_CODE (field) != FIELD_DECL)
+ continue;
+
+ /* Does this field have a default initialization? */
+ if (DECL_INITIAL (field))
+ {
+ register tree next1 = DECL_INITIAL (field);
+ if (TREE_CODE (next1) == ERROR_MARK)
+ erroneous = 1;
+ else if (!TREE_CONSTANT (next1))
+ allconstant = 0;
+ else if (! initializer_constant_valid_p (next1))
+ allsimple = 0;
+ members = tree_cons (field, next1, members);
+ }
+ else if (TREE_READONLY (field))
+ error ("uninitialized const member `%s'",
+ IDENTIFIER_POINTER (DECL_NAME (field)));
+ else if (TYPE_LANG_SPECIFIC (TREE_TYPE (field))
+ && CLASSTYPE_READONLY_FIELDS_NEED_INIT (TREE_TYPE (field)))
+ error ("member `%s' with uninitialized const fields",
+ IDENTIFIER_POINTER (DECL_NAME (field)));
+ else if (TREE_CODE (TREE_TYPE (field)) == REFERENCE_TYPE)
+ error ("member `%s' is uninitialized reference",
+ IDENTIFIER_POINTER (DECL_NAME (field)));
+ }
+ }
+
+ if (TREE_CODE (type) == UNION_TYPE)
+ {
+ register tree field = TYPE_FIELDS (type);
+ register tree next1;
+
+ /* Find the first named field. ANSI decided in September 1990
+ that only named fields count here. */
+ while (field && DECL_NAME (field) == 0)
+ field = TREE_CHAIN (field);
+
+ /* If this element specifies a field, initialize via that field. */
+ if (TREE_PURPOSE (tail) != NULL_TREE)
+ {
+ int win = 0;
+
+ if (TREE_CODE (TREE_PURPOSE (tail)) == FIELD_DECL)
+ /* Handle the case of a call by build_c_cast. */
+ field = TREE_PURPOSE (tail), win = 1;
+ else if (TREE_CODE (TREE_PURPOSE (tail)) != IDENTIFIER_NODE)
+ error ("index value instead of field name in union initializer");
+ else
+ {
+ tree temp;
+ for (temp = TYPE_FIELDS (type);
+ temp;
+ temp = TREE_CHAIN (temp))
+ if (DECL_NAME (temp) == TREE_PURPOSE (tail))
+ break;
+ if (temp)
+ field = temp, win = 1;
+ else
+ error ("no field `%s' in union being initialized",
+ IDENTIFIER_POINTER (TREE_PURPOSE (tail)));
+ }
+ if (!win)
+ TREE_VALUE (tail) = error_mark_node;
+ }
+ else if (field == 0)
+ {
+ cp_error ("union `%T' with no named members cannot be initialized",
+ type);
+ TREE_VALUE (tail) = error_mark_node;
+ }
+
+ if (TREE_VALUE (tail) != 0)
+ {
+ tree tail1 = tail;
+
+ next1 = digest_init (TREE_TYPE (field),
+ TREE_VALUE (tail), &tail1);
+ if (tail1 != 0 && TREE_CODE (tail1) != TREE_LIST)
+ my_friendly_abort (357);
+ tail = tail1;
+ }
+ else
+ {
+ next1 = error_mark_node;
+ tail = TREE_CHAIN (tail);
+ }
+
+ if (next1 == error_mark_node)
+ erroneous = 1;
+ else if (!TREE_CONSTANT (next1))
+ allconstant = 0;
+ else if (initializer_constant_valid_p (next1) == 0)
+ allsimple = 0;
+ members = tree_cons (field, next1, members);
+ }
+
+ /* If arguments were specified as a list, just remove the ones we used. */
+ if (elts)
+ *elts = tail;
+ /* If arguments were specified as a constructor,
+ complain unless we used all the elements of the constructor. */
+ else if (tail)
+ pedwarn ("excess elements in aggregate initializer");
+
+ if (erroneous)
+ return error_mark_node;
+
+ result = build (CONSTRUCTOR, type, NULL_TREE, nreverse (members));
+ if (init)
+ TREE_HAS_CONSTRUCTOR (result) = TREE_HAS_CONSTRUCTOR (init);
+ if (allconstant) TREE_CONSTANT (result) = 1;
+ if (allconstant && allsimple) TREE_STATIC (result) = 1;
+ return result;
+}
+
+/* Given a structure or union value DATUM, construct and return
+ the structure or union component which results from narrowing
+ that value by the types specified in TYPES. For example, given the
+ hierarchy
+
+ class L { int ii; };
+ class A : L { ... };
+ class B : L { ... };
+ class C : A, B { ... };
+
+ and the declaration
+
+ C x;
+
+ then the expression
+
+ x::C::A::L::ii refers to the ii member of the L part of
+ of A part of the C object named by X. In this case,
+ DATUM would be x, and TYPES would be a SCOPE_REF consisting of
+
+ SCOPE_REF
+ SCOPE_REF
+ C A
+ L
+
+ The last entry in the SCOPE_REF is always an IDENTIFIER_NODE.
+
+*/
+
+tree
+build_scoped_ref (datum, types)
+ tree datum;
+ tree types;
+{
+ tree ref;
+ tree type = TREE_TYPE (datum);
+
+ if (datum == error_mark_node)
+ return error_mark_node;
+
+ if (TREE_CODE (type) == REFERENCE_TYPE)
+ type = TREE_TYPE (type);
+
+ type = TYPE_MAIN_VARIANT (type);
+
+ if (TREE_CODE (types) == SCOPE_REF)
+ {
+ /* We have some work to do. */
+ struct type_chain { tree type; struct type_chain *next; } *chain = 0, *head = 0, scratch;
+ ref = build_unary_op (ADDR_EXPR, datum, 0);
+ while (TREE_CODE (types) == SCOPE_REF)
+ {
+ tree t = TREE_OPERAND (types, 1);
+ if (is_aggr_typedef (t, 1))
+ {
+ head = (struct type_chain *)alloca (sizeof (struct type_chain));
+ head->type = IDENTIFIER_TYPE_VALUE (t);
+ head->next = chain;
+ chain = head;
+ types = TREE_OPERAND (types, 0);
+ }
+ else return error_mark_node;
+ }
+ if (! is_aggr_typedef (types, 1))
+ return error_mark_node;
+
+ head = &scratch;
+ head->type = IDENTIFIER_TYPE_VALUE (types);
+ head->next = chain;
+ chain = head;
+ while (chain)
+ {
+ tree binfo = chain->type;
+ type = TREE_TYPE (TREE_TYPE (ref));
+ if (binfo != TYPE_BINFO (type))
+ {
+ binfo = get_binfo (binfo, type, 1);
+ if (binfo == error_mark_node)
+ return error_mark_node;
+ if (binfo == 0)
+ return error_not_base_type (chain->type, type);
+ ref = convert_pointer_to (binfo, ref);
+ }
+ chain = chain->next;
+ }
+ return build_indirect_ref (ref, "(compiler error in build_scoped_ref)");
+ }
+
+ /* This is an easy conversion. */
+ if (is_aggr_typedef (types, 1))
+ {
+ tree binfo = TYPE_BINFO (IDENTIFIER_TYPE_VALUE (types));
+ if (binfo != TYPE_BINFO (type))
+ {
+ binfo = get_binfo (binfo, type, 1);
+ if (binfo == error_mark_node)
+ return error_mark_node;
+ if (binfo == 0)
+ return error_not_base_type (IDENTIFIER_TYPE_VALUE (types), type);
+ }
+
+ switch (TREE_CODE (datum))
+ {
+ case NOP_EXPR:
+ case CONVERT_EXPR:
+ case FLOAT_EXPR:
+ case FIX_TRUNC_EXPR:
+ case FIX_FLOOR_EXPR:
+ case FIX_ROUND_EXPR:
+ case FIX_CEIL_EXPR:
+ ref = convert_pointer_to (binfo,
+ build_unary_op (ADDR_EXPR, TREE_OPERAND (datum, 0), 0));
+ break;
+ default:
+ ref = convert_pointer_to (binfo,
+ build_unary_op (ADDR_EXPR, datum, 0));
+ }
+ return build_indirect_ref (ref, "(compiler error in build_scoped_ref)");
+ }
+ return error_mark_node;
+}
+
+/* Build a reference to an object specified by the C++ `->' operator.
+ Usually this just involves dereferencing the object, but if the
+ `->' operator is overloaded, then such overloads must be
+ performed until an object which does not have the `->' operator
+ overloaded is found. An error is reported when circular pointer
+ delegation is detected. */
+tree
+build_x_arrow (datum)
+ tree datum;
+{
+ tree types_memoized = NULL_TREE;
+ register tree rval = datum;
+ tree type = TREE_TYPE (rval);
+ tree last_rval;
+
+ if (type == error_mark_node)
+ return error_mark_node;
+
+ if (TREE_CODE (rval) == OFFSET_REF)
+ {
+ rval = resolve_offset_ref (datum);
+ type = TREE_TYPE (rval);
+ }
+
+ if (TREE_CODE (type) == REFERENCE_TYPE)
+ {
+ rval = convert_from_reference (rval);
+ type = TREE_TYPE (rval);
+ }
+
+ if (IS_AGGR_TYPE (type) && TYPE_OVERLOADS_ARROW (type))
+ {
+ while ((rval = build_opfncall (COMPONENT_REF, LOOKUP_NORMAL, rval, NULL_TREE, NULL_TREE)))
+ {
+ if (rval == error_mark_node)
+ return error_mark_node;
+
+ if (value_member (TREE_TYPE (rval), types_memoized))
+ {
+ error ("circular pointer delegation detected");
+ return error_mark_node;
+ }
+ else
+ {
+ types_memoized = tree_cons (NULL_TREE, TREE_TYPE (rval),
+ types_memoized);
+ }
+ last_rval = rval;
+ }
+ if (TREE_CODE (TREE_TYPE (last_rval)) == REFERENCE_TYPE)
+ last_rval = convert_from_reference (last_rval);
+ }
+ else
+ last_rval = default_conversion (rval);
+
+ /* Signature pointers are not dereferenced. */
+ if (TYPE_LANG_SPECIFIC (TREE_TYPE (last_rval))
+ && IS_SIGNATURE_POINTER (TREE_TYPE (last_rval)))
+ return last_rval;
+
+ if (TREE_CODE (TREE_TYPE (last_rval)) == POINTER_TYPE)
+ return build_indirect_ref (last_rval, NULL_PTR);
+
+ if (types_memoized)
+ error ("result of `operator->()' yields non-pointer result");
+ else
+ error ("base operand of `->' is not a pointer");
+ return error_mark_node;
+}
+
+/* Make an expression to refer to the COMPONENT field of
+ structure or union value DATUM. COMPONENT is an arbitrary
+ expression. DATUM has not already been checked out to be of
+ aggregate type.
+
+ For C++, COMPONENT may be a TREE_LIST. This happens when we must
+ return an object of member type to a method of the current class,
+ but there is not yet enough typing information to know which one.
+ As a special case, if there is only one method by that name,
+ it is returned. Otherwise we return an expression which other
+ routines will have to know how to deal with later. */
+tree
+build_m_component_ref (datum, component)
+ tree datum, component;
+{
+ tree type;
+ tree objtype = TREE_TYPE (datum);
+ tree rettype;
+
+ if (TYPE_PTRMEMFUNC_P (TREE_TYPE (component)))
+ {
+ type = TREE_TYPE (TYPE_PTRMEMFUNC_FN_TYPE (TREE_TYPE (component)));
+ rettype = type;
+ }
+ else
+ {
+ component = build_indirect_ref (component, NULL_PTR);
+ type = TREE_TYPE (component);
+ rettype = TREE_TYPE (TREE_TYPE (component));
+ }
+
+ if (datum == error_mark_node || component == error_mark_node)
+ return error_mark_node;
+
+ if (TREE_CODE (type) != OFFSET_TYPE && TREE_CODE (type) != METHOD_TYPE)
+ {
+ cp_error ("`%E' cannot be used as a member pointer, since it is of type `%T'", component, type);
+ return error_mark_node;
+ }
+
+ if (TREE_CODE (objtype) == REFERENCE_TYPE)
+ objtype = TREE_TYPE (objtype);
+
+ if (! IS_AGGR_TYPE (objtype))
+ {
+ cp_error ("cannot apply member pointer `%E' to `%E'", component, datum);
+ cp_error ("which is of non-aggregate type `%T'", objtype);
+ return error_mark_node;
+ }
+
+ if (! comptypes (TYPE_METHOD_BASETYPE (type), objtype, 0))
+ {
+ cp_error ("member type `%T::' incompatible with object type `%T'",
+ TYPE_METHOD_BASETYPE (type), objtype);
+ return error_mark_node;
+ }
+
+ return build (OFFSET_REF, rettype, datum, component);
+}
+
+/* Return a tree node for the expression TYPENAME '(' PARMS ')'.
+
+ Because we cannot tell whether this construct is really a call to a
+ constructor or a request for a type conversion, we try both, and
+ report any ambiguities we find. */
+tree
+build_functional_cast (exp, parms)
+ tree exp;
+ tree parms;
+{
+ /* This is either a call to a constructor,
+ or a C cast in C++'s `functional' notation. */
+ tree type, name = NULL_TREE;
+ tree expr_as_ctor = NULL_TREE;
+
+ if (exp == error_mark_node || parms == error_mark_node)
+ return error_mark_node;
+
+ if (TREE_CODE (exp) == IDENTIFIER_NODE)
+ {
+ name = exp;
+
+ if (IDENTIFIER_HAS_TYPE_VALUE (exp))
+ /* Either an enum or an aggregate type. */
+ type = IDENTIFIER_TYPE_VALUE (exp);
+ else
+ {
+ type = lookup_name (exp, 1);
+ if (!type || TREE_CODE (type) != TYPE_DECL)
+ {
+ cp_error ("`%T' fails to be a typedef or built-in type", name);
+ return error_mark_node;
+ }
+ type = TREE_TYPE (type);
+ }
+ }
+ else
+ type = exp;
+
+ if (IS_SIGNATURE (type))
+ {
+ error ("signature type not allowed in cast or constructor expression");
+ return error_mark_node;
+ }
+
+ /* Prepare to evaluate as a call to a constructor. If this expression
+ is actually used, for example,
+
+ return X (arg1, arg2, ...);
+
+ then the slot being initialized will be filled in. */
+
+ if (name == NULL_TREE)
+ {
+ name = TYPE_NAME (type);
+ if (TREE_CODE (name) == TYPE_DECL)
+ name = DECL_NAME (name);
+ }
+
+ if (! IS_AGGR_TYPE (type))
+ {
+ /* this must build a C cast */
+ if (parms == NULL_TREE)
+ return build1 (NOP_EXPR, type, integer_zero_node);
+ else if (TREE_CHAIN (parms) != NULL_TREE)
+ {
+ pedwarn ("initializer list being treated as compound expression");
+ parms = build_compound_expr (parms);
+ }
+ return build_c_cast (type, parms);
+ }
+
+ if (TYPE_SIZE (type) == NULL_TREE)
+ {
+ cp_error ("type `%T' is not yet defined", type);
+ return error_mark_node;
+ }
+
+ if (parms && TREE_CHAIN (parms) == NULL_TREE)
+ return build_c_cast (type, parms);
+
+ expr_as_ctor = build_method_call (NULL_TREE, name, parms,
+ NULL_TREE, LOOKUP_NORMAL);
+
+ if (expr_as_ctor == error_mark_node)
+ return error_mark_node;
+
+ if (current_function_decl)
+ return build_cplus_new (type, expr_as_ctor, 1);
+
+ {
+ register tree parm = TREE_OPERAND (expr_as_ctor, 1);
+
+ /* Initializers for static variables and parameters have
+ to handle doing the initialization and cleanup themselves. */
+ my_friendly_assert (TREE_CODE (expr_as_ctor) == CALL_EXPR, 322);
+#if 0
+ /* The following assertion fails in cases where we are initializing
+ a static member variable of a particular instance of a template
+ class with a call to a constructor of the given instance, as in:
+
+ TMPL<int> object = TMPL<int>();
+
+ Curiously, the assertion does not fail if we do the same thing
+ for a static member of a non-template class, as in:
+
+ T object = T();
+
+ I can't see why we should care here whether or not the initializer
+ expression involves a call to `new', so for the time being, it
+ seems best to just avoid doing this assertion. */
+ my_friendly_assert (TREE_CALLS_NEW (TREE_VALUE (parm)), 323);
+#endif
+ TREE_VALUE (parm) = NULL_TREE;
+ expr_as_ctor = build_indirect_ref (expr_as_ctor, NULL_PTR);
+ TREE_HAS_CONSTRUCTOR (expr_as_ctor) = 1;
+ }
+ return expr_as_ctor;
+}
+
+/* Return the character string for the name that encodes the
+ enumeral value VALUE in the domain TYPE. */
+char *
+enum_name_string (value, type)
+ tree value;
+ tree type;
+{
+ register tree values = TYPE_VALUES (type);
+ register HOST_WIDE_INT intval = TREE_INT_CST_LOW (value);
+
+ my_friendly_assert (TREE_CODE (type) == ENUMERAL_TYPE, 324);
+ while (values
+ && TREE_INT_CST_LOW (TREE_VALUE (values)) != intval)
+ values = TREE_CHAIN (values);
+ if (values == NULL_TREE)
+ {
+ char *buf = (char *)oballoc (16 + TYPE_NAME_LENGTH (type));
+
+ /* Value must have been cast. */
+ sprintf (buf, "(enum %s)%d",
+ TYPE_NAME_STRING (type), intval);
+ return buf;
+ }
+ return IDENTIFIER_POINTER (TREE_PURPOSE (values));
+}
+
+#if 0
+/* Print out a language-specific error message for
+ (Pascal) case or (C) switch statements.
+ CODE tells what sort of message to print.
+ TYPE is the type of the switch index expression.
+ NEW is the new value that we were trying to add.
+ OLD is the old value that stopped us from adding it. */
+void
+report_case_error (code, type, new_value, old_value)
+ int code;
+ tree type;
+ tree new_value, old_value;
+{
+ if (code == 1)
+ {
+ if (new_value)
+ error ("case label not within a switch statement");
+ else
+ error ("default label not within a switch statement");
+ }
+ else if (code == 2)
+ {
+ if (new_value == 0)
+ {
+ error ("multiple default labels in one switch");
+ return;
+ }
+ if (TREE_CODE (new_value) == RANGE_EXPR)
+ if (TREE_CODE (old_value) == RANGE_EXPR)
+ {
+ char *buf = (char *)alloca (4 * (8 + TYPE_NAME_LENGTH (type)));
+ if (TREE_CODE (type) == ENUMERAL_TYPE)
+ sprintf (buf, "overlapping ranges [%s..%s], [%s..%s] in case expression",
+ enum_name_string (TREE_OPERAND (new_value, 0), type),
+ enum_name_string (TREE_OPERAND (new_value, 1), type),
+ enum_name_string (TREE_OPERAND (old_value, 0), type),
+ enum_name_string (TREE_OPERAND (old_value, 1), type));
+ else
+ sprintf (buf, "overlapping ranges [%d..%d], [%d..%d] in case expression",
+ TREE_INT_CST_LOW (TREE_OPERAND (new_value, 0)),
+ TREE_INT_CST_LOW (TREE_OPERAND (new_value, 1)),
+ TREE_INT_CST_LOW (TREE_OPERAND (old_value, 0)),
+ TREE_INT_CST_LOW (TREE_OPERAND (old_value, 1)));
+ error (buf);
+ }
+ else
+ {
+ char *buf = (char *)alloca (4 * (8 + TYPE_NAME_LENGTH (type)));
+ if (TREE_CODE (type) == ENUMERAL_TYPE)
+ sprintf (buf, "range [%s..%s] includes element `%s' in case expression",
+ enum_name_string (TREE_OPERAND (new_value, 0), type),
+ enum_name_string (TREE_OPERAND (new_value, 1), type),
+ enum_name_string (old_value, type));
+ else
+ sprintf (buf, "range [%d..%d] includes (%d) in case expression",
+ TREE_INT_CST_LOW (TREE_OPERAND (new_value, 0)),
+ TREE_INT_CST_LOW (TREE_OPERAND (new_value, 1)),
+ TREE_INT_CST_LOW (old_value));
+ error (buf);
+ }
+ else if (TREE_CODE (old_value) == RANGE_EXPR)
+ {
+ char *buf = (char *)alloca (4 * (8 + TYPE_NAME_LENGTH (type)));
+ if (TREE_CODE (type) == ENUMERAL_TYPE)
+ sprintf (buf, "range [%s..%s] includes element `%s' in case expression",
+ enum_name_string (TREE_OPERAND (old_value, 0), type),
+ enum_name_string (TREE_OPERAND (old_value, 1), type),
+ enum_name_string (new_value, type));
+ else
+ sprintf (buf, "range [%d..%d] includes (%d) in case expression",
+ TREE_INT_CST_LOW (TREE_OPERAND (old_value, 0)),
+ TREE_INT_CST_LOW (TREE_OPERAND (old_value, 1)),
+ TREE_INT_CST_LOW (new_value));
+ error (buf);
+ }
+ else
+ {
+ if (TREE_CODE (type) == ENUMERAL_TYPE)
+ error ("duplicate label `%s' in switch statement",
+ enum_name_string (new_value, type));
+ else
+ error ("duplicate label (%d) in switch statement",
+ TREE_INT_CST_LOW (new_value));
+ }
+ }
+ else if (code == 3)
+ {
+ if (TREE_CODE (type) == ENUMERAL_TYPE)
+ warning ("case value out of range for enum %s",
+ TYPE_NAME_STRING (type));
+ else
+ warning ("case value out of range");
+ }
+ else if (code == 4)
+ {
+ if (TREE_CODE (type) == ENUMERAL_TYPE)
+ error ("range values `%s' and `%s' reversed",
+ enum_name_string (new_value, type),
+ enum_name_string (old_value, type));
+ else
+ error ("range values reversed");
+ }
+}
+#endif
diff --git a/gnu/usr.bin/cc/cc1plus/xref.c b/gnu/usr.bin/cc/cc1plus/xref.c
new file mode 100644
index 0000000..283771b
--- /dev/null
+++ b/gnu/usr.bin/cc/cc1plus/xref.c
@@ -0,0 +1,839 @@
+/* Code for handling XREF output from GNU C++.
+ Copyright (C) 1992, 1993 Free Software Foundation, Inc.
+ Contributed by Michael Tiemann (tiemann@cygnus.com)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+#include "config.h"
+#include "tree.h"
+#include <stdio.h>
+#include "cp-tree.h"
+#include "input.h"
+
+#include <ctype.h>
+
+extern char *getpwd ();
+
+extern char *index ();
+extern char *rindex ();
+
+/* The character(s) used to join a directory specification (obtained with
+ getwd or equivalent) with a non-absolute file name. */
+
+#ifndef FILE_NAME_JOINER
+#define FILE_NAME_JOINER "/"
+#endif
+
+/* Nonzero if NAME as a file name is absolute. */
+#ifndef FILE_NAME_ABSOLUTE_P
+#define FILE_NAME_ABSOLUTE_P(NAME) (NAME[0] == '/')
+#endif
+
+/* For cross referencing. */
+
+int flag_gnu_xref;
+
+/************************************************************************/
+/* */
+/* Common definitions */
+/* */
+/************************************************************************/
+
+#ifndef TRUE
+#define TRUE 1
+#endif
+#ifndef FALSE
+#define FALSE 0
+#endif
+#ifndef NULL
+#define NULL 0
+#endif
+
+#define PALLOC(typ) ((typ *) calloc(1,sizeof(typ)))
+
+
+/* Return a malloc'd copy of STR. */
+#define SALLOC(str) \
+ ((char *) ((str) == NULL ? NULL \
+ : (char *) strcpy ((char *) malloc (strlen ((str)) + 1), (str))))
+#define SFREE(str) (str != NULL && (free(str),0))
+
+#define STREQL(s1,s2) (strcmp((s1),(s2)) == 0)
+#define STRNEQ(s1,s2) (strcmp((s1),(s2)) != 0)
+#define STRLSS(s1,s2) (strcmp((s1),(s2)) < 0)
+#define STRLEQ(s1,s2) (strcmp((s1),(s2)) <= 0)
+#define STRGTR(s1,s2) (strcmp((s1),(s2)) > 0)
+#define STRGEQ(s1,s2) (strcmp((s1),(s2)) >= 0)
+
+/************************************************************************/
+/* */
+/* Type definitions */
+/* */
+/************************************************************************/
+
+
+typedef struct _XREF_FILE * XREF_FILE;
+typedef struct _XREF_SCOPE * XREF_SCOPE;
+
+typedef struct _XREF_FILE
+{
+ char *name;
+ char *outname;
+ XREF_FILE next;
+} XREF_FILE_INFO;
+
+typedef struct _XREF_SCOPE
+{
+ int gid;
+ int lid;
+ XREF_FILE file;
+ int start;
+ XREF_SCOPE outer;
+} XREF_SCOPE_INFO;
+
+/************************************************************************/
+/* */
+/* Local storage */
+/* */
+/************************************************************************/
+
+static char doing_xref = 0;
+static FILE * xref_file = NULL;
+static char xref_name[1024];
+static XREF_FILE all_files = NULL;
+static char * wd_name = NULL;
+static XREF_SCOPE cur_scope = NULL;
+static int scope_ctr = 0;
+static XREF_FILE last_file = NULL;
+static tree last_fndecl = NULL;
+
+/************************************************************************/
+/* */
+/* Forward definitions */
+/* */
+/************************************************************************/
+
+extern void GNU_xref_begin();
+extern void GNU_xref_end();
+extern void GNU_xref_file();
+extern void GNU_xref_start_scope();
+extern void GNU_xref_end_scope();
+extern void GNU_xref_ref();
+extern void GNU_xref_decl();
+extern void GNU_xref_call();
+extern void GNU_xref_function();
+extern void GNU_xref_assign();
+extern void GNU_xref_hier();
+extern void GNU_xref_member();
+
+static void gen_assign();
+static XREF_FILE find_file();
+static char * filename();
+static char * fctname();
+static char * declname();
+static void simplify_type();
+static char * fixname();
+static void open_xref_file();
+
+extern char * type_as_string();
+
+/* Start cross referencing. FILE is the name of the file we xref. */
+
+void
+GNU_xref_begin (file)
+ char *file;
+{
+ doing_xref = 1;
+
+ if (file != NULL && STRNEQ (file,"-"))
+ {
+ open_xref_file(file);
+ GNU_xref_file(file);
+ }
+}
+
+/* Finish cross-referencing. ERRCNT is the number of errors
+ we encountered. */
+
+void
+GNU_xref_end (ect)
+ int ect;
+{
+ XREF_FILE xf;
+
+ if (!doing_xref) return;
+
+ xf = find_file (input_filename);
+ if (xf == NULL) return;
+
+ while (cur_scope != NULL)
+ GNU_xref_end_scope(cur_scope->gid,0,0,0,0);
+
+ doing_xref = 0;
+
+ if (xref_file == NULL) return;
+
+ fclose (xref_file);
+
+ xref_file = NULL;
+ all_files = NULL;
+
+ if (ect > 0) unlink (xref_name);
+}
+
+/* Write out xref for file named NAME. */
+
+void
+GNU_xref_file (name)
+ char *name;
+{
+ XREF_FILE xf;
+
+ if (!doing_xref || name == NULL) return;
+
+ if (xref_file == NULL)
+ {
+ open_xref_file (name);
+ if (!doing_xref) return;
+ }
+
+ if (all_files == NULL)
+ fprintf(xref_file,"SCP * 0 0 0 0 RESET\n");
+
+ xf = find_file (name);
+ if (xf != NULL) return;
+
+ xf = PALLOC (XREF_FILE_INFO);
+ xf->name = SALLOC (name);
+ xf->next = all_files;
+ all_files = xf;
+
+ if (wd_name == NULL)
+ wd_name = getpwd ();
+
+ if (FILE_NAME_ABSOLUTE_P (name) || ! wd_name)
+ xf->outname = xf->name;
+ else
+ {
+ char *nmbuf
+ = (char *) malloc (strlen (wd_name) + strlen (FILE_NAME_JOINER)
+ + strlen (name) + 1);
+ sprintf (nmbuf, "%s%s%s", wd_name, FILE_NAME_JOINER, name);
+ name = nmbuf;
+ xf->outname = nmbuf;
+ }
+
+ fprintf (xref_file, "FIL %s %s 0\n", name, wd_name);
+
+ filename (xf);
+ fctname (NULL);
+}
+
+/* Start a scope identified at level ID. */
+
+void
+GNU_xref_start_scope (id)
+ HOST_WIDE_INT id;
+{
+ XREF_SCOPE xs;
+ XREF_FILE xf;
+
+ if (!doing_xref) return;
+ xf = find_file (input_filename);
+
+ xs = PALLOC (XREF_SCOPE_INFO);
+ xs->file = xf;
+ xs->start = lineno;
+ if (xs->start <= 0) xs->start = 1;
+ xs->gid = id;
+ xs->lid = ++scope_ctr;
+ xs->outer = cur_scope;
+ cur_scope = xs;
+}
+
+/* Finish a scope at level ID.
+ INID is ???
+ PRM is ???
+ KEEP is nonzero iff this scope is retained (nonzero if it's
+ a compiler-generated invisible scope).
+ TRNS is ??? */
+
+void
+GNU_xref_end_scope (id,inid,prm,keep,trns)
+ HOST_WIDE_INT id;
+ HOST_WIDE_INT inid;
+ int prm,keep,trns;
+{
+ XREF_FILE xf;
+ XREF_SCOPE xs,lxs,oxs;
+ char *stype;
+
+ if (!doing_xref) return;
+ xf = find_file (input_filename);
+ if (xf == NULL) return;
+
+ lxs = NULL;
+ for (xs = cur_scope; xs != NULL; xs = xs->outer)
+ {
+ if (xs->gid == id) break;
+ lxs = xs;
+ }
+ if (xs == NULL) return;
+
+ if (inid != 0) {
+ for (oxs = cur_scope; oxs != NULL; oxs = oxs->outer) {
+ if (oxs->gid == inid) break;
+ }
+ if (oxs == NULL) return;
+ inid = oxs->lid;
+ }
+
+ if (prm == 2) stype = "SUE";
+ else if (prm != 0) stype = "ARGS";
+ else if (keep == 2 || inid != 0) stype = "INTERN";
+ else stype = "EXTERN";
+
+ fprintf (xref_file,"SCP %s %d %d %d %d %s\n",
+ filename (xf), xs->start, lineno,xs->lid, inid, stype);
+
+ if (lxs == NULL) cur_scope = xs->outer;
+ else lxs->outer = xs->outer;
+
+ free (xs);
+}
+
+/* Output a reference to NAME in FNDECL. */
+
+void
+GNU_xref_ref (fndecl,name)
+ tree fndecl;
+ char *name;
+{
+ XREF_FILE xf;
+
+ if (!doing_xref) return;
+ xf = find_file (input_filename);
+ if (xf == NULL) return;
+
+ fprintf (xref_file, "REF %s %d %s %s\n",
+ filename (xf), lineno, fctname (fndecl), name);
+}
+
+/* Output a reference to DECL in FNDECL. */
+
+void
+GNU_xref_decl (fndecl,decl)
+ tree fndecl;
+ tree decl;
+{
+ XREF_FILE xf,xf1;
+ char *cls;
+ char *name;
+ char buf[10240];
+ int uselin;
+
+ if (!doing_xref) return;
+ xf = find_file (input_filename);
+ if (xf == NULL) return;
+
+ uselin = FALSE;
+
+ if (TREE_CODE (decl) == TYPE_DECL) cls = "TYPEDEF";
+ else if (TREE_CODE (decl) == FIELD_DECL) cls = "FIELD";
+ else if (TREE_CODE (decl) == VAR_DECL)
+ {
+ if (fndecl == NULL && TREE_STATIC(decl)
+ && TREE_READONLY(decl) && DECL_INITIAL(decl) != 0
+ && !TREE_PUBLIC(decl) && !DECL_EXTERNAL(decl)
+ && DECL_MODE(decl) != BLKmode) cls = "CONST";
+ else if (DECL_EXTERNAL(decl)) cls = "EXTERN";
+ else if (TREE_PUBLIC(decl)) cls = "EXTDEF";
+ else if (TREE_STATIC(decl)) cls = "STATIC";
+ else if (DECL_REGISTER(decl)) cls = "REGISTER";
+ else cls = "AUTO";
+ }
+ else if (TREE_CODE (decl) == PARM_DECL) cls = "PARAM";
+ else if (TREE_CODE (decl) == FIELD_DECL) cls = "FIELD";
+ else if (TREE_CODE (decl) == CONST_DECL) cls = "CONST";
+ else if (TREE_CODE (decl) == FUNCTION_DECL)
+ {
+ if (DECL_EXTERNAL (decl)) cls = "EXTERN";
+ else if (TREE_PUBLIC (decl)) cls = "EFUNCTION";
+ else cls = "SFUNCTION";
+ }
+ else if (TREE_CODE (decl) == LABEL_DECL) cls = "LABEL";
+ else if (TREE_CODE (decl) == UNION_TYPE)
+ {
+ cls = "UNIONID";
+ decl = TYPE_NAME (decl);
+ uselin = TRUE;
+ }
+ else if (TREE_CODE (decl) == RECORD_TYPE)
+ {
+ if (CLASSTYPE_DECLARED_CLASS (decl)) cls = "CLASSID";
+ else if (IS_SIGNATURE (decl)) cls = "SIGNATUREID";
+ else cls = "STRUCTID";
+ decl = TYPE_NAME (decl);
+ uselin = TRUE;
+ }
+ else if (TREE_CODE (decl) == ENUMERAL_TYPE)
+ {
+ cls = "ENUMID";
+ decl = TYPE_NAME (decl);
+ uselin = TRUE;
+ }
+ else if (TREE_CODE (decl) == TEMPLATE_DECL)
+ {
+ if (DECL_TEMPLATE_IS_CLASS (decl))
+ cls = "CLASSTEMP";
+ else if (TREE_CODE (DECL_RESULT (decl)) == FUNCTION_DECL)
+ cls = "FUNCTEMP";
+ else if (TREE_CODE (DECL_RESULT (decl)) == VAR_DECL)
+ cls = "VARTEMP";
+ else
+ my_friendly_abort (358);
+ uselin = TRUE;
+ }
+ else cls = "UNKNOWN";
+
+ if (decl == NULL || DECL_NAME (decl) == NULL) return;
+
+ if (uselin && decl->decl.linenum > 0 && decl->decl.filename != NULL)
+ {
+ xf1 = find_file (decl->decl.filename);
+ if (xf1 != NULL)
+ {
+ lineno = decl->decl.linenum;
+ xf = xf1;
+ }
+ }
+
+ if (DECL_ASSEMBLER_NAME (decl))
+ name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
+ else
+ name = IDENTIFIER_POINTER (DECL_NAME (decl));
+
+ strcpy (buf, type_as_string (TREE_TYPE (decl), 0));
+ simplify_type (buf);
+
+ fprintf (xref_file, "DCL %s %d %s %d %s %s %s\n",
+ filename(xf), lineno, name,
+ (cur_scope != NULL ? cur_scope->lid : 0),
+ cls, fctname(fndecl), buf);
+
+ if (STREQL (cls, "STRUCTID") || STREQL (cls, "UNIONID")
+ || STREQL (cls, "SIGNATUREID"))
+ {
+ cls = "CLASSID";
+ fprintf (xref_file, "DCL %s %d %s %d %s %s %s\n",
+ filename(xf), lineno,name,
+ (cur_scope != NULL ? cur_scope->lid : 0),
+ cls, fctname(fndecl), buf);
+ }
+}
+
+/* Output a reference to a call to NAME in FNDECL. */
+
+void
+GNU_xref_call (fndecl, name)
+ tree fndecl;
+ char *name;
+{
+ XREF_FILE xf;
+ char buf[1024];
+ char *s;
+
+ if (!doing_xref) return;
+ xf = find_file (input_filename);
+ if (xf == NULL) return;
+ name = fixname (name, buf);
+
+ for (s = name; *s != 0; ++s)
+ if (*s == '_' && s[1] == '_') break;
+ if (*s != 0) GNU_xref_ref (fndecl, name);
+
+ fprintf (xref_file, "CAL %s %d %s %s\n",
+ filename (xf), lineno, name, fctname (fndecl));
+}
+
+/* Output cross-reference info about FNDECL. If non-NULL,
+ ARGS are the arguments for the function (i.e., before the FUNCTION_DECL
+ has been fully built). */
+
+void
+GNU_xref_function (fndecl, args)
+ tree fndecl;
+ tree args;
+{
+ XREF_FILE xf;
+ int ct;
+ char buf[1024];
+
+ if (!doing_xref) return;
+ xf = find_file (input_filename);
+ if (xf == NULL) return;
+
+ ct = 0;
+ buf[0] = 0;
+ if (args == NULL) args = DECL_ARGUMENTS (fndecl);
+
+ GNU_xref_decl (NULL, fndecl);
+
+ for ( ; args != NULL; args = TREE_CHAIN (args))
+ {
+ GNU_xref_decl (fndecl,args);
+ if (ct != 0) strcat (buf,",");
+ strcat (buf, declname (args));
+ ++ct;
+ }
+
+ fprintf (xref_file, "PRC %s %d %s %d %d %s\n",
+ filename(xf), lineno, declname(fndecl),
+ (cur_scope != NULL ? cur_scope->lid : 0),
+ ct, buf);
+}
+
+/* Output cross-reference info about an assignment to NAME. */
+
+void
+GNU_xref_assign(name)
+ tree name;
+{
+ XREF_FILE xf;
+
+ if (!doing_xref) return;
+ xf = find_file(input_filename);
+ if (xf == NULL) return;
+
+ gen_assign(xf, name);
+}
+
+static void
+gen_assign(xf, name)
+ XREF_FILE xf;
+ tree name;
+{
+ char *s;
+
+ s = NULL;
+
+ switch (TREE_CODE (name))
+ {
+ case IDENTIFIER_NODE :
+ s = IDENTIFIER_POINTER(name);
+ break;
+ case VAR_DECL :
+ s = declname(name);
+ break;
+ case COMPONENT_REF :
+ gen_assign(xf, TREE_OPERAND(name, 0));
+ gen_assign(xf, TREE_OPERAND(name, 1));
+ break;
+ case INDIRECT_REF :
+ case OFFSET_REF :
+ case ARRAY_REF :
+ case BUFFER_REF :
+ gen_assign(xf, TREE_OPERAND(name, 0));
+ break;
+ case COMPOUND_EXPR :
+ gen_assign(xf, TREE_OPERAND(name, 1));
+ break;
+ default :
+ break;
+ }
+
+ if (s != NULL)
+ fprintf(xref_file, "ASG %s %d %s\n", filename(xf), lineno, s);
+}
+
+/* Output cross-reference info about a class hierarchy.
+ CLS is the class type of interest. BASE is a baseclass
+ for CLS. PUB and VIRT give the access info about
+ the class derivation. FRND is nonzero iff BASE is a friend
+ of CLS.
+
+ ??? Needs to handle nested classes. */
+void
+GNU_xref_hier(cls, base, pub, virt, frnd)
+ char *cls;
+ char *base;
+ int pub;
+ int virt;
+ int frnd;
+{
+ XREF_FILE xf;
+
+ if (!doing_xref) return;
+ xf = find_file(input_filename);
+ if (xf == NULL) return;
+
+ fprintf(xref_file, "HIE %s %d %s %s %d %d %d\n",
+ filename(xf), lineno, cls, base, pub, virt, frnd);
+}
+
+/* Output cross-reference info about class members. CLS
+ is the containing type; FLD is the class member. */
+
+void
+GNU_xref_member(cls, fld)
+ tree cls;
+ tree fld;
+{
+ XREF_FILE xf;
+ char *prot;
+ int confg, pure;
+ char *d;
+ int i;
+ char buf[1024], bufa[1024];
+
+ if (!doing_xref) return;
+ xf = find_file(fld->decl.filename);
+ if (xf == NULL) return;
+
+ if (TREE_PRIVATE (fld)) prot = "PRIVATE";
+ else if (TREE_PROTECTED(fld)) prot = "PROTECTED";
+ else prot = "PUBLIC";
+
+ confg = 0;
+ if (TREE_CODE (fld) == FUNCTION_DECL && DECL_CONST_MEMFUNC_P(fld))
+ confg = 1;
+ else if (TREE_CODE (fld) == CONST_DECL)
+ confg = 1;
+
+ pure = 0;
+ if (TREE_CODE (fld) == FUNCTION_DECL && DECL_ABSTRACT_VIRTUAL_P(fld))
+ pure = 1;
+
+ d = IDENTIFIER_POINTER(cls);
+ sprintf(buf, "%d%s", strlen(d), d);
+ i = strlen(buf);
+ strcpy(bufa, declname(fld));
+
+#ifdef XREF_SHORT_MEMBER_NAMES
+ for (p = &bufa[1]; *p != 0; ++p)
+ {
+ if (p[0] == '_' && p[1] == '_' && p[2] >= '0' && p[2] <= '9') {
+ if (strncmp(&p[2], buf, i) == 0) *p = 0;
+ break;
+ }
+ else if (p[0] == '_' && p[1] == '_' && p[2] == 'C' && p[3] >= '0' && p[3] <= '9') {
+ if (strncmp(&p[3], buf, i) == 0) *p = 0;
+ break;
+ }
+ }
+#endif
+
+ fprintf(xref_file, "MEM %s %d %s %s %s %d %d %d %d %d %d %d\n",
+ filename(xf), fld->decl.linenum, d, bufa, prot,
+ (TREE_CODE (fld) == FUNCTION_DECL ? 0 : 1),
+ (DECL_INLINE (fld) ? 1 : 0),
+ (DECL_FRIEND_P(fld) ? 1 : 0),
+ (DECL_VINDEX(fld) ? 1 : 0),
+ (TREE_STATIC(fld) ? 1 : 0),
+ pure, confg);
+}
+
+/* Find file entry given name. */
+
+static XREF_FILE
+find_file(name)
+ char *name;
+{
+ XREF_FILE xf;
+
+ for (xf = all_files; xf != NULL; xf = xf->next) {
+ if (STREQL(name, xf->name)) break;
+ }
+
+ return xf;
+}
+
+/* Return filename for output purposes. */
+
+static char *
+filename(xf)
+ XREF_FILE xf;
+{
+ if (xf == NULL) {
+ last_file = NULL;
+ return "*";
+ }
+
+ if (last_file == xf) return "*";
+
+ last_file = xf;
+
+ return xf->outname;
+}
+
+/* Return function name for output purposes. */
+
+static char *
+fctname(fndecl)
+ tree fndecl;
+{
+ static char fctbuf[1024];
+ char *s;
+
+ if (fndecl == NULL && last_fndecl == NULL) return "*";
+
+ if (fndecl == NULL)
+ {
+ last_fndecl = NULL;
+ return "*TOP*";
+ }
+
+ if (fndecl == last_fndecl) return "*";
+
+ last_fndecl = fndecl;
+
+ s = declname(fndecl);
+ s = fixname(s, fctbuf);
+
+ return s;
+}
+
+/* Return decl name for output purposes. */
+
+static char *
+declname(dcl)
+ tree dcl;
+{
+ if (DECL_NAME (dcl) == NULL) return "?";
+
+ if (DECL_ASSEMBLER_NAME (dcl))
+ return IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (dcl));
+ else
+ return IDENTIFIER_POINTER (DECL_NAME (dcl));
+}
+
+/* Simplify a type string by removing unneeded parenthesis. */
+
+static void
+simplify_type(typ)
+ char *typ;
+{
+ char *s;
+ int lvl, i;
+
+ i = strlen(typ);
+ while (i > 0 && isspace(typ[i-1])) typ[--i] = 0;
+
+ if (i > 7 && STREQL(&typ[i-5], "const"))
+ {
+ typ[i-5] = 0;
+ i -= 5;
+ }
+
+ if (typ[i-1] != ')') return;
+
+ s = &typ[i-2];
+ lvl = 1;
+ while (*s != 0) {
+ if (*s == ')') ++lvl;
+ else if (*s == '(')
+ {
+ --lvl;
+ if (lvl == 0)
+ {
+ s[1] = ')';
+ s[2] = 0;
+ break;
+ }
+ }
+ --s;
+ }
+
+ if (*s != 0 && s[-1] == ')')
+ {
+ --s;
+ --s;
+ if (*s == '(') s[2] = 0;
+ else if (*s == ':') {
+ while (*s != '(') --s;
+ s[1] = ')';
+ s[2] = 0;
+ }
+ }
+}
+
+/* Fixup a function name (take care of embedded spaces). */
+
+static char *
+fixname(nam, buf)
+ char *nam;
+ char *buf;
+{
+ char *s, *t;
+ int fg;
+
+ s = nam;
+ t = buf;
+ fg = 0;
+
+ while (*s != 0)
+ {
+ if (*s == ' ')
+ {
+ *t++ = '\36';
+ ++fg;
+ }
+ else *t++ = *s;
+ ++s;
+ }
+ *t = 0;
+
+ if (fg == 0) return nam;
+
+ return buf;
+}
+
+/* Open file for xrefing. */
+
+static void
+open_xref_file(file)
+ char *file;
+{
+ char *s, *t;
+
+#ifdef XREF_FILE_NAME
+ XREF_FILE_NAME (xref_name, file);
+#else
+ s = rindex (file, '/');
+ if (s == NULL)
+ sprintf (xref_name, ".%s.gxref", file);
+ else
+ {
+ ++s;
+ strcpy (xref_name, file);
+ t = rindex (xref_name, '/');
+ ++t;
+ *t++ = '.';
+ strcpy (t, s);
+ strcat (t, ".gxref");
+ }
+#endif /* no XREF_FILE_NAME */
+
+ xref_file = fopen(xref_name, "w");
+
+ if (xref_file == NULL)
+ {
+ error("Can't create cross-reference file `%s'", xref_name);
+ doing_xref = 0;
+ }
+}
diff --git a/gnu/usr.bin/cc/cc_int/Makefile b/gnu/usr.bin/cc/cc_int/Makefile
new file mode 100644
index 0000000..794d760
--- /dev/null
+++ b/gnu/usr.bin/cc/cc_int/Makefile
@@ -0,0 +1,12 @@
+#
+# $FreeBSD$
+#
+
+SRCS = aux-output.c bc-emit.c bc-optab.c c-common.c caller-save.c calls.c combine.c convert.c cse.c dbxout.c dwarfout.c emit-rtl.c explow.c expmed.c expr.c final.c flow.c fold-const.c function.c getpwd.c global.c insn-attrtab.c insn-emit.c insn-extract.c insn-opinit.c insn-output.c insn-peep.c insn-recog.c integrate.c jump.c local-alloc.c loop.c obstack.c optabs.c print-rtl.c print-tree.c real.c recog.c reg-stack.c regclass.c reload.c reload1.c reorg.c rtl.c rtlanal.c sched.c sdbout.c stmt.c stor-layout.c stupid.c toplev.c tree.c unroll.c varasm.c version.c xcoffout.c
+LIB = cc_int
+NOPROFILE= 1
+
+install:
+ @true
+
+.include <bsd.lib.mk>
diff --git a/gnu/usr.bin/cc/cc_int/aux-output.c b/gnu/usr.bin/cc/cc_int/aux-output.c
new file mode 100644
index 0000000..bc498e0
--- /dev/null
+++ b/gnu/usr.bin/cc/cc_int/aux-output.c
@@ -0,0 +1,2138 @@
+/* Subroutines for insn-output.c for Intel 80386.
+ Copyright (C) 1988, 1992 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+#include <stdio.h>
+#include <setjmp.h>
+#include "config.h"
+#include "rtl.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "real.h"
+#include "insn-config.h"
+#include "conditions.h"
+#include "insn-flags.h"
+#include "output.h"
+#include "insn-attr.h"
+#include "tree.h"
+#include "flags.h"
+#include "function.h"
+
+#ifdef EXTRA_CONSTRAINT
+/* If EXTRA_CONSTRAINT is defined, then the 'S'
+ constraint in REG_CLASS_FROM_LETTER will no longer work, and various
+ asm statements that need 'S' for class SIREG will break. */
+ error EXTRA_CONSTRAINT conflicts with S constraint letter
+/* The previous line used to be #error, but some compilers barf
+ even if the conditional was untrue. */
+#endif
+
+#define AT_BP(mode) (gen_rtx (MEM, (mode), frame_pointer_rtx))
+
+extern FILE *asm_out_file;
+extern char *strcat ();
+
+char *singlemove_string ();
+char *output_move_const_single ();
+char *output_fp_cc0_set ();
+
+char *hi_reg_name[] = HI_REGISTER_NAMES;
+char *qi_reg_name[] = QI_REGISTER_NAMES;
+char *qi_high_reg_name[] = QI_HIGH_REGISTER_NAMES;
+
+/* Array of the smallest class containing reg number REGNO, indexed by
+ REGNO. Used by REGNO_REG_CLASS in i386.h. */
+
+enum reg_class regclass_map[FIRST_PSEUDO_REGISTER] =
+{
+ /* ax, dx, cx, bx */
+ AREG, DREG, CREG, BREG,
+ /* si, di, bp, sp */
+ SIREG, DIREG, INDEX_REGS, GENERAL_REGS,
+ /* FP registers */
+ FP_TOP_REG, FP_SECOND_REG, FLOAT_REGS, FLOAT_REGS,
+ FLOAT_REGS, FLOAT_REGS, FLOAT_REGS, FLOAT_REGS,
+ /* arg pointer */
+ INDEX_REGS
+};
+
+/* Test and compare insns in i386.md store the information needed to
+ generate branch and scc insns here. */
+
+struct rtx_def *i386_compare_op0, *i386_compare_op1;
+struct rtx_def *(*i386_compare_gen)(), *(*i386_compare_gen_eq)();
+
+/* Output an insn whose source is a 386 integer register. SRC is the
+ rtx for the register, and TEMPLATE is the op-code template. SRC may
+ be either SImode or DImode.
+
+ The template will be output with operands[0] as SRC, and operands[1]
+ as a pointer to the top of the 386 stack. So a call from floatsidf2
+ would look like this:
+
+ output_op_from_reg (operands[1], AS1 (fild%z0,%1));
+
+ where %z0 corresponds to the caller's operands[1], and is used to
+ emit the proper size suffix.
+
+ ??? Extend this to handle HImode - a 387 can load and store HImode
+ values directly. */
+
+void
+output_op_from_reg (src, template)
+ rtx src;
+ char *template;
+{
+ rtx xops[4];
+ int size = GET_MODE_SIZE (GET_MODE (src));
+
+ xops[0] = src;
+ xops[1] = AT_SP (Pmode);
+ xops[2] = GEN_INT (size);
+ xops[3] = stack_pointer_rtx;
+
+ if (size > UNITS_PER_WORD)
+ {
+ rtx high;
+ if (size > 2 * UNITS_PER_WORD)
+ {
+ high = gen_rtx (REG, SImode, REGNO (src) + 2);
+ output_asm_insn (AS1 (push%L0,%0), &high);
+ }
+ high = gen_rtx (REG, SImode, REGNO (src) + 1);
+ output_asm_insn (AS1 (push%L0,%0), &high);
+ }
+ output_asm_insn (AS1 (push%L0,%0), &src);
+
+ output_asm_insn (template, xops);
+
+ output_asm_insn (AS2 (add%L3,%2,%3), xops);
+}
+
+/* Output an insn to pop an value from the 387 top-of-stack to 386
+ register DEST. The 387 register stack is popped if DIES is true. If
+ the mode of DEST is an integer mode, a `fist' integer store is done,
+ otherwise a `fst' float store is done. */
+
+void
+output_to_reg (dest, dies)
+ rtx dest;
+ int dies;
+{
+ rtx xops[4];
+ int size = GET_MODE_SIZE (GET_MODE (dest));
+
+ xops[0] = AT_SP (Pmode);
+ xops[1] = stack_pointer_rtx;
+ xops[2] = GEN_INT (size);
+ xops[3] = dest;
+
+ output_asm_insn (AS2 (sub%L1,%2,%1), xops);
+
+ if (GET_MODE_CLASS (GET_MODE (dest)) == MODE_INT)
+ {
+ if (dies)
+ output_asm_insn (AS1 (fistp%z3,%y0), xops);
+ else
+ output_asm_insn (AS1 (fist%z3,%y0), xops);
+ }
+ else if (GET_MODE_CLASS (GET_MODE (dest)) == MODE_FLOAT)
+ {
+ if (dies)
+ output_asm_insn (AS1 (fstp%z3,%y0), xops);
+ else
+ {
+ if (GET_MODE (dest) == XFmode)
+ {
+ output_asm_insn (AS1 (fstp%z3,%y0), xops);
+ output_asm_insn (AS1 (fld%z3,%y0), xops);
+ }
+ else
+ output_asm_insn (AS1 (fst%z3,%y0), xops);
+ }
+ }
+ else
+ abort ();
+
+ output_asm_insn (AS1 (pop%L0,%0), &dest);
+
+ if (size > UNITS_PER_WORD)
+ {
+ dest = gen_rtx (REG, SImode, REGNO (dest) + 1);
+ output_asm_insn (AS1 (pop%L0,%0), &dest);
+ if (size > 2 * UNITS_PER_WORD)
+ {
+ dest = gen_rtx (REG, SImode, REGNO (dest) + 1);
+ output_asm_insn (AS1 (pop%L0,%0), &dest);
+ }
+ }
+}
+
+char *
+singlemove_string (operands)
+ rtx *operands;
+{
+ rtx x;
+ if (GET_CODE (operands[0]) == MEM
+ && GET_CODE (x = XEXP (operands[0], 0)) == PRE_DEC)
+ {
+ if (XEXP (x, 0) != stack_pointer_rtx)
+ abort ();
+ return "push%L1 %1";
+ }
+ else if (GET_CODE (operands[1]) == CONST_DOUBLE)
+ {
+ return output_move_const_single (operands);
+ }
+ else if (GET_CODE (operands[0]) == REG || GET_CODE (operands[1]) == REG)
+ return AS2 (mov%L0,%1,%0);
+ else if (CONSTANT_P (operands[1]))
+ return AS2 (mov%L0,%1,%0);
+ else
+ {
+ output_asm_insn ("push%L1 %1", operands);
+ return "pop%L0 %0";
+ }
+}
+
+/* Return a REG that occurs in ADDR with coefficient 1.
+ ADDR can be effectively incremented by incrementing REG. */
+
+static rtx
+find_addr_reg (addr)
+ rtx addr;
+{
+ while (GET_CODE (addr) == PLUS)
+ {
+ if (GET_CODE (XEXP (addr, 0)) == REG)
+ addr = XEXP (addr, 0);
+ else if (GET_CODE (XEXP (addr, 1)) == REG)
+ addr = XEXP (addr, 1);
+ else if (CONSTANT_P (XEXP (addr, 0)))
+ addr = XEXP (addr, 1);
+ else if (CONSTANT_P (XEXP (addr, 1)))
+ addr = XEXP (addr, 0);
+ else
+ abort ();
+ }
+ if (GET_CODE (addr) == REG)
+ return addr;
+ abort ();
+}
+
+/* Output an insn to add the constant N to the register X. */
+
+static void
+asm_add (n, x)
+ int n;
+ rtx x;
+{
+ rtx xops[2];
+ xops[1] = x;
+ if (n < 0)
+ {
+ xops[0] = GEN_INT (-n);
+ output_asm_insn (AS2 (sub%L0,%0,%1), xops);
+ }
+ else if (n > 0)
+ {
+ xops[0] = GEN_INT (n);
+ output_asm_insn (AS2 (add%L0,%0,%1), xops);
+ }
+}
+
+/* Output assembler code to perform a doubleword move insn
+ with operands OPERANDS. */
+
+char *
+output_move_double (operands)
+ rtx *operands;
+{
+ enum {REGOP, OFFSOP, MEMOP, PUSHOP, POPOP, CNSTOP, RNDOP } optype0, optype1;
+ rtx latehalf[2];
+ rtx middlehalf[2];
+ rtx xops[2];
+ rtx addreg0 = 0, addreg1 = 0;
+ int dest_overlapped_low = 0;
+ int size = GET_MODE_SIZE (GET_MODE (operands[1]));
+
+ middlehalf[0] = 0;
+ middlehalf[1] = 0;
+
+ /* First classify both operands. */
+
+ if (REG_P (operands[0]))
+ optype0 = REGOP;
+ else if (offsettable_memref_p (operands[0]))
+ optype0 = OFFSOP;
+ else if (GET_CODE (XEXP (operands[0], 0)) == POST_INC)
+ optype0 = POPOP;
+ else if (GET_CODE (XEXP (operands[0], 0)) == PRE_DEC)
+ optype0 = PUSHOP;
+ else if (GET_CODE (operands[0]) == MEM)
+ optype0 = MEMOP;
+ else
+ optype0 = RNDOP;
+
+ if (REG_P (operands[1]))
+ optype1 = REGOP;
+ else if (CONSTANT_P (operands[1]))
+ optype1 = CNSTOP;
+ else if (offsettable_memref_p (operands[1]))
+ optype1 = OFFSOP;
+ else if (GET_CODE (XEXP (operands[1], 0)) == POST_INC)
+ optype1 = POPOP;
+ else if (GET_CODE (XEXP (operands[1], 0)) == PRE_DEC)
+ optype1 = PUSHOP;
+ else if (GET_CODE (operands[1]) == MEM)
+ optype1 = MEMOP;
+ else
+ optype1 = RNDOP;
+
+ /* Check for the cases that the operand constraints are not
+ supposed to allow to happen. Abort if we get one,
+ because generating code for these cases is painful. */
+
+ if (optype0 == RNDOP || optype1 == RNDOP)
+ abort ();
+
+ /* If one operand is decrementing and one is incrementing
+ decrement the former register explicitly
+ and change that operand into ordinary indexing. */
+
+ if (optype0 == PUSHOP && optype1 == POPOP)
+ {
+ /* ??? Can this ever happen on i386? */
+ operands[0] = XEXP (XEXP (operands[0], 0), 0);
+ asm_add (-size, operands[0]);
+ if (GET_MODE (operands[1]) == XFmode)
+ operands[0] = gen_rtx (MEM, XFmode, operands[0]);
+ else if (GET_MODE (operands[0]) == DFmode)
+ operands[0] = gen_rtx (MEM, DFmode, operands[0]);
+ else
+ operands[0] = gen_rtx (MEM, DImode, operands[0]);
+ optype0 = OFFSOP;
+ }
+
+ if (optype0 == POPOP && optype1 == PUSHOP)
+ {
+ /* ??? Can this ever happen on i386? */
+ operands[1] = XEXP (XEXP (operands[1], 0), 0);
+ asm_add (-size, operands[1]);
+ if (GET_MODE (operands[1]) == XFmode)
+ operands[1] = gen_rtx (MEM, XFmode, operands[1]);
+ else if (GET_MODE (operands[1]) == DFmode)
+ operands[1] = gen_rtx (MEM, DFmode, operands[1]);
+ else
+ operands[1] = gen_rtx (MEM, DImode, operands[1]);
+ optype1 = OFFSOP;
+ }
+
+ /* If an operand is an unoffsettable memory ref, find a register
+ we can increment temporarily to make it refer to the second word. */
+
+ if (optype0 == MEMOP)
+ addreg0 = find_addr_reg (XEXP (operands[0], 0));
+
+ if (optype1 == MEMOP)
+ addreg1 = find_addr_reg (XEXP (operands[1], 0));
+
+ /* Ok, we can do one word at a time.
+ Normally we do the low-numbered word first,
+ but if either operand is autodecrementing then we
+ do the high-numbered word first.
+
+ In either case, set up in LATEHALF the operands to use
+ for the high-numbered word and in some cases alter the
+ operands in OPERANDS to be suitable for the low-numbered word. */
+
+ if (size == 12)
+ {
+ if (optype0 == REGOP)
+ {
+ middlehalf[0] = gen_rtx (REG, SImode, REGNO (operands[0]) + 1);
+ latehalf[0] = gen_rtx (REG, SImode, REGNO (operands[0]) + 2);
+ }
+ else if (optype0 == OFFSOP)
+ {
+ middlehalf[0] = adj_offsettable_operand (operands[0], 4);
+ latehalf[0] = adj_offsettable_operand (operands[0], 8);
+ }
+ else
+ {
+ middlehalf[0] = operands[0];
+ latehalf[0] = operands[0];
+ }
+
+ if (optype1 == REGOP)
+ {
+ middlehalf[1] = gen_rtx (REG, SImode, REGNO (operands[1]) + 1);
+ latehalf[1] = gen_rtx (REG, SImode, REGNO (operands[1]) + 2);
+ }
+ else if (optype1 == OFFSOP)
+ {
+ middlehalf[1] = adj_offsettable_operand (operands[1], 4);
+ latehalf[1] = adj_offsettable_operand (operands[1], 8);
+ }
+ else if (optype1 == CNSTOP)
+ {
+ if (GET_CODE (operands[1]) == CONST_DOUBLE)
+ {
+ REAL_VALUE_TYPE r; long l[3];
+
+ REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]);
+ REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
+ operands[1] = GEN_INT (l[0]);
+ middlehalf[1] = GEN_INT (l[1]);
+ latehalf[1] = GEN_INT (l[2]);
+ }
+ else if (CONSTANT_P (operands[1]))
+ /* No non-CONST_DOUBLE constant should ever appear here. */
+ abort ();
+ }
+ else
+ {
+ middlehalf[1] = operands[1];
+ latehalf[1] = operands[1];
+ }
+ }
+ else /* size is not 12: */
+ {
+ if (optype0 == REGOP)
+ latehalf[0] = gen_rtx (REG, SImode, REGNO (operands[0]) + 1);
+ else if (optype0 == OFFSOP)
+ latehalf[0] = adj_offsettable_operand (operands[0], 4);
+ else
+ latehalf[0] = operands[0];
+
+ if (optype1 == REGOP)
+ latehalf[1] = gen_rtx (REG, SImode, REGNO (operands[1]) + 1);
+ else if (optype1 == OFFSOP)
+ latehalf[1] = adj_offsettable_operand (operands[1], 4);
+ else if (optype1 == CNSTOP)
+ {
+ if (GET_CODE (operands[1]) == CONST_DOUBLE)
+ split_double (operands[1], &operands[1], &latehalf[1]);
+ else if (CONSTANT_P (operands[1]))
+ {
+ /* ??? jrv: Can this really happen? A DImode constant
+ that isn't a CONST_DOUBLE? */
+ if (GET_CODE (operands[1]) == CONST_INT
+ && INTVAL (operands[1]) < 0)
+ latehalf[1] = constm1_rtx;
+ else
+ latehalf[1] = const0_rtx;
+ }
+ }
+ else
+ latehalf[1] = operands[1];
+ }
+
+ /* If insn is effectively movd N (sp),-(sp) then we will do the
+ high word first. We should use the adjusted operand 1
+ (which is N+4 (sp) or N+8 (sp))
+ for the low word and middle word as well,
+ to compensate for the first decrement of sp. */
+ if (optype0 == PUSHOP
+ && REGNO (XEXP (XEXP (operands[0], 0), 0)) == STACK_POINTER_REGNUM
+ && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
+ middlehalf[1] = operands[1] = latehalf[1];
+
+ /* For (set (reg:DI N) (mem:DI ... (reg:SI N) ...)),
+ if the upper part of reg N does not appear in the MEM, arrange to
+ emit the move late-half first. Otherwise, compute the MEM address
+ into the upper part of N and use that as a pointer to the memory
+ operand. */
+ if (optype0 == REGOP
+ && (optype1 == OFFSOP || optype1 == MEMOP))
+ {
+ if (reg_mentioned_p (operands[0], XEXP (operands[1], 0))
+ && reg_mentioned_p (latehalf[0], XEXP (operands[1], 0)))
+ {
+ /* If both halves of dest are used in the src memory address,
+ compute the address into latehalf of dest. */
+compadr:
+ xops[0] = latehalf[0];
+ xops[1] = XEXP (operands[1], 0);
+ output_asm_insn (AS2 (lea%L0,%a1,%0), xops);
+ if( GET_MODE (operands[1]) == XFmode )
+ {
+/* abort (); */
+ operands[1] = gen_rtx (MEM, XFmode, latehalf[0]);
+ middlehalf[1] = adj_offsettable_operand (operands[1], size-8);
+ latehalf[1] = adj_offsettable_operand (operands[1], size-4);
+ }
+ else
+ {
+ operands[1] = gen_rtx (MEM, DImode, latehalf[0]);
+ latehalf[1] = adj_offsettable_operand (operands[1], size-4);
+ }
+ }
+ else if (size == 12
+ && reg_mentioned_p (middlehalf[0], XEXP (operands[1], 0)))
+ {
+ /* Check for two regs used by both source and dest. */
+ if (reg_mentioned_p (operands[0], XEXP (operands[1], 0))
+ || reg_mentioned_p (latehalf[0], XEXP (operands[1], 0)))
+ goto compadr;
+
+ /* JRV says this can't happen: */
+ if (addreg0 || addreg1)
+ abort();
+
+ /* Only the middle reg conflicts; simply put it last. */
+ output_asm_insn (singlemove_string (operands), operands);
+ output_asm_insn (singlemove_string (latehalf), latehalf);
+ output_asm_insn (singlemove_string (middlehalf), middlehalf);
+ return "";
+ }
+ else if (reg_mentioned_p (operands[0], XEXP (operands[1], 0)))
+ /* If the low half of dest is mentioned in the source memory
+ address, the arrange to emit the move late half first. */
+ dest_overlapped_low = 1;
+ }
+
+ /* If one or both operands autodecrementing,
+ do the two words, high-numbered first. */
+
+ /* Likewise, the first move would clobber the source of the second one,
+ do them in the other order. This happens only for registers;
+ such overlap can't happen in memory unless the user explicitly
+ sets it up, and that is an undefined circumstance. */
+
+/*
+ if (optype0 == PUSHOP || optype1 == PUSHOP
+ || (optype0 == REGOP && optype1 == REGOP
+ && REGNO (operands[0]) == REGNO (latehalf[1]))
+ || dest_overlapped_low)
+*/
+ if (optype0 == PUSHOP || optype1 == PUSHOP
+ || (optype0 == REGOP && optype1 == REGOP
+ && ((middlehalf[1] && REGNO (operands[0]) == REGNO (middlehalf[1]))
+ || REGNO (operands[0]) == REGNO (latehalf[1])))
+ || dest_overlapped_low)
+ {
+ /* Make any unoffsettable addresses point at high-numbered word. */
+ if (addreg0)
+ asm_add (size-4, addreg0);
+ if (addreg1)
+ asm_add (size-4, addreg1);
+
+ /* Do that word. */
+ output_asm_insn (singlemove_string (latehalf), latehalf);
+
+ /* Undo the adds we just did. */
+ if (addreg0)
+ asm_add (-4, addreg0);
+ if (addreg1)
+ asm_add (-4, addreg1);
+
+ if (size == 12)
+ {
+ output_asm_insn (singlemove_string (middlehalf), middlehalf);
+ if (addreg0)
+ asm_add (-4, addreg0);
+ if (addreg1)
+ asm_add (-4, addreg1);
+ }
+
+ /* Do low-numbered word. */
+ return singlemove_string (operands);
+ }
+
+ /* Normal case: do the two words, low-numbered first. */
+
+ output_asm_insn (singlemove_string (operands), operands);
+
+ /* Do the middle one of the three words for long double */
+ if (size == 12)
+ {
+ if (addreg0)
+ asm_add (4, addreg0);
+ if (addreg1)
+ asm_add (4, addreg1);
+
+ output_asm_insn (singlemove_string (middlehalf), middlehalf);
+ }
+
+ /* Make any unoffsettable addresses point at high-numbered word. */
+ if (addreg0)
+ asm_add (4, addreg0);
+ if (addreg1)
+ asm_add (4, addreg1);
+
+ /* Do that word. */
+ output_asm_insn (singlemove_string (latehalf), latehalf);
+
+ /* Undo the adds we just did. */
+ if (addreg0)
+ asm_add (4-size, addreg0);
+ if (addreg1)
+ asm_add (4-size, addreg1);
+
+ return "";
+}
+
+int
+standard_80387_constant_p (x)
+ rtx x;
+{
+#if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
+ REAL_VALUE_TYPE d;
+ jmp_buf handler;
+ int is0, is1;
+
+ if (setjmp (handler))
+ return 0;
+
+ set_float_handler (handler);
+ REAL_VALUE_FROM_CONST_DOUBLE (d, x);
+ is0 = REAL_VALUES_EQUAL (d, dconst0);
+ is1 = REAL_VALUES_EQUAL (d, dconst1);
+ set_float_handler (NULL_PTR);
+
+ if (is0)
+ return 1;
+
+ if (is1)
+ return 2;
+
+ /* Note that on the 80387, other constants, such as pi,
+ are much slower to load as standard constants
+ than to load from doubles in memory! */
+#endif
+
+ return 0;
+}
+
+char *
+output_move_const_single (operands)
+ rtx *operands;
+{
+ if (FP_REG_P (operands[0]))
+ {
+ int conval = standard_80387_constant_p (operands[1]);
+
+ if (conval == 1)
+ return "fldz";
+
+ if (conval == 2)
+ return "fld1";
+ }
+ if (GET_CODE (operands[1]) == CONST_DOUBLE)
+ {
+ REAL_VALUE_TYPE r; long l;
+
+ if (GET_MODE (operands[1]) == XFmode)
+ abort ();
+
+ REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]);
+ REAL_VALUE_TO_TARGET_SINGLE (r, l);
+ operands[1] = GEN_INT (l);
+ }
+ return singlemove_string (operands);
+}
+
+/* Returns 1 if OP is either a symbol reference or a sum of a symbol
+ reference and a constant. */
+
+int
+symbolic_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ switch (GET_CODE (op))
+ {
+ case SYMBOL_REF:
+ case LABEL_REF:
+ return 1;
+ case CONST:
+ op = XEXP (op, 0);
+ return ((GET_CODE (XEXP (op, 0)) == SYMBOL_REF
+ || GET_CODE (XEXP (op, 0)) == LABEL_REF)
+ && GET_CODE (XEXP (op, 1)) == CONST_INT);
+ default:
+ return 0;
+ }
+}
+
+/* Test for a valid operand for a call instruction.
+ Don't allow the arg pointer register or virtual regs
+ since they may change into reg + const, which the patterns
+ can't handle yet. */
+
+int
+call_insn_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (GET_CODE (op) == MEM
+ && ((CONSTANT_ADDRESS_P (XEXP (op, 0))
+ /* This makes a difference for PIC. */
+ && general_operand (XEXP (op, 0), Pmode))
+ || (GET_CODE (XEXP (op, 0)) == REG
+ && XEXP (op, 0) != arg_pointer_rtx
+ && !(REGNO (XEXP (op, 0)) >= FIRST_PSEUDO_REGISTER
+ && REGNO (XEXP (op, 0)) <= LAST_VIRTUAL_REGISTER))))
+ return 1;
+ return 0;
+}
+
+/* Like call_insn_operand but allow (mem (symbol_ref ...))
+ even if pic. */
+
+int
+expander_call_insn_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (GET_CODE (op) == MEM
+ && (CONSTANT_ADDRESS_P (XEXP (op, 0))
+ || (GET_CODE (XEXP (op, 0)) == REG
+ && XEXP (op, 0) != arg_pointer_rtx
+ && !(REGNO (XEXP (op, 0)) >= FIRST_PSEUDO_REGISTER
+ && REGNO (XEXP (op, 0)) <= LAST_VIRTUAL_REGISTER))))
+ return 1;
+ return 0;
+}
+
+/* Returns 1 if OP contains a symbol reference */
+
+int
+symbolic_reference_mentioned_p (op)
+ rtx op;
+{
+ register char *fmt;
+ register int i;
+
+ if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
+ return 1;
+
+ fmt = GET_RTX_FORMAT (GET_CODE (op));
+ for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'E')
+ {
+ register int j;
+
+ for (j = XVECLEN (op, i) - 1; j >= 0; j--)
+ if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
+ return 1;
+ }
+ else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Return a legitimate reference for ORIG (an address) using the
+ register REG. If REG is 0, a new pseudo is generated.
+
+ There are three types of references that must be handled:
+
+ 1. Global data references must load the address from the GOT, via
+ the PIC reg. An insn is emitted to do this load, and the reg is
+ returned.
+
+ 2. Static data references must compute the address as an offset
+ from the GOT, whose base is in the PIC reg. An insn is emitted to
+ compute the address into a reg, and the reg is returned. Static
+ data objects have SYMBOL_REF_FLAG set to differentiate them from
+ global data objects.
+
+ 3. Constant pool addresses must be handled special. They are
+ considered legitimate addresses, but only if not used with regs.
+ When printed, the output routines know to print the reference with the
+ PIC reg, even though the PIC reg doesn't appear in the RTL.
+
+ GO_IF_LEGITIMATE_ADDRESS rejects symbolic references unless the PIC
+ reg also appears in the address (except for constant pool references,
+ noted above).
+
+ "switch" statements also require special handling when generating
+ PIC code. See comments by the `casesi' insn in i386.md for details. */
+
+rtx
+legitimize_pic_address (orig, reg)
+ rtx orig;
+ rtx reg;
+{
+ rtx addr = orig;
+ rtx new = orig;
+
+ if (GET_CODE (addr) == SYMBOL_REF || GET_CODE (addr) == LABEL_REF)
+ {
+ if (GET_CODE (addr) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (addr))
+ reg = new = orig;
+ else
+ {
+ if (reg == 0)
+ reg = gen_reg_rtx (Pmode);
+
+ if (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_FLAG (addr))
+ new = gen_rtx (PLUS, Pmode, pic_offset_table_rtx, orig);
+ else
+ new = gen_rtx (MEM, Pmode,
+ gen_rtx (PLUS, Pmode,
+ pic_offset_table_rtx, orig));
+
+ emit_move_insn (reg, new);
+ }
+ current_function_uses_pic_offset_table = 1;
+ return reg;
+ }
+ else if (GET_CODE (addr) == CONST || GET_CODE (addr) == PLUS)
+ {
+ rtx base;
+
+ if (GET_CODE (addr) == CONST)
+ {
+ addr = XEXP (addr, 0);
+ if (GET_CODE (addr) != PLUS)
+ abort ();
+ }
+
+ if (XEXP (addr, 0) == pic_offset_table_rtx)
+ return orig;
+
+ if (reg == 0)
+ reg = gen_reg_rtx (Pmode);
+
+ base = legitimize_pic_address (XEXP (addr, 0), reg);
+ addr = legitimize_pic_address (XEXP (addr, 1),
+ base == reg ? NULL_RTX : reg);
+
+ if (GET_CODE (addr) == CONST_INT)
+ return plus_constant (base, INTVAL (addr));
+
+ if (GET_CODE (addr) == PLUS && CONSTANT_P (XEXP (addr, 1)))
+ {
+ base = gen_rtx (PLUS, Pmode, base, XEXP (addr, 0));
+ addr = XEXP (addr, 1);
+ }
+ return gen_rtx (PLUS, Pmode, base, addr);
+ }
+ return new;
+}
+
+/* Emit insns to move operands[1] into operands[0]. */
+
+void
+emit_pic_move (operands, mode)
+ rtx *operands;
+ enum machine_mode mode;
+{
+ rtx temp = reload_in_progress ? operands[0] : gen_reg_rtx (Pmode);
+
+ if (GET_CODE (operands[0]) == MEM && SYMBOLIC_CONST (operands[1]))
+ operands[1] = (rtx) force_reg (SImode, operands[1]);
+ else
+ operands[1] = legitimize_pic_address (operands[1], temp);
+}
+
+/* This function generates the assembly code for function entry.
+ FILE is an stdio stream to output the code to.
+ SIZE is an int: how many units of temporary storage to allocate. */
+
+void
+function_prologue (file, size)
+ FILE *file;
+ int size;
+{
+ register int regno;
+ int limit;
+ rtx xops[4];
+ int pic_reg_used = flag_pic && (current_function_uses_pic_offset_table
+ || current_function_uses_const_pool
+ || profile_flag || profile_block_flag);
+
+ xops[0] = stack_pointer_rtx;
+ xops[1] = frame_pointer_rtx;
+ xops[2] = GEN_INT (size);
+ if (frame_pointer_needed)
+ {
+ output_asm_insn ("push%L1 %1", xops);
+ output_asm_insn (AS2 (mov%L0,%0,%1), xops);
+ }
+
+ if (size)
+ output_asm_insn (AS2 (sub%L0,%2,%0), xops);
+
+ /* Note If use enter it is NOT reversed args.
+ This one is not reversed from intel!!
+ I think enter is slower. Also sdb doesn't like it.
+ But if you want it the code is:
+ {
+ xops[3] = const0_rtx;
+ output_asm_insn ("enter %2,%3", xops);
+ }
+ */
+ limit = (frame_pointer_needed ? FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM);
+ for (regno = limit - 1; regno >= 0; regno--)
+ if ((regs_ever_live[regno] && ! call_used_regs[regno])
+ || (regno == PIC_OFFSET_TABLE_REGNUM && pic_reg_used))
+ {
+ xops[0] = gen_rtx (REG, SImode, regno);
+ output_asm_insn ("push%L0 %0", xops);
+ }
+
+ if (pic_reg_used)
+ {
+ xops[0] = pic_offset_table_rtx;
+ xops[1] = (rtx) gen_label_rtx ();
+
+ output_asm_insn (AS1 (call,%P1), xops);
+ ASM_OUTPUT_INTERNAL_LABEL (file, "L", CODE_LABEL_NUMBER (xops[1]));
+ output_asm_insn (AS1 (pop%L0,%0), xops);
+ output_asm_insn ("addl $_GLOBAL_OFFSET_TABLE_+[.-%P1],%0", xops);
+ }
+}
+
+/* Return 1 if it is appropriate to emit `ret' instructions in the
+ body of a function. Do this only if the epilogue is simple, needing a
+ couple of insns. Prior to reloading, we can't tell how many registers
+ must be saved, so return 0 then.
+
+ If NON_SAVING_SETJMP is defined and true, then it is not possible
+ for the epilogue to be simple, so return 0. This is a special case
+ since NON_SAVING_SETJMP will not cause regs_ever_live to change until
+ final, but jump_optimize may need to know sooner if a `return' is OK. */
+
+int
+simple_386_epilogue ()
+{
+ int regno;
+ int nregs = 0;
+ int reglimit = (frame_pointer_needed
+ ? FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM);
+
+#ifdef FUNCTION_PROFILER_EPILOGUE
+ if (profile_flag)
+ return 0;
+#endif
+
+ if (flag_pic && (current_function_uses_pic_offset_table
+ || current_function_uses_const_pool
+ || profile_flag || profile_block_flag))
+ return 0;
+
+#ifdef NON_SAVING_SETJMP
+ if (NON_SAVING_SETJMP && current_function_calls_setjmp)
+ return 0;
+#endif
+
+ if (! reload_completed)
+ return 0;
+
+ for (regno = reglimit - 1; regno >= 0; regno--)
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ nregs++;
+
+ return nregs == 0 || ! frame_pointer_needed;
+}
+
+/* This function generates the assembly code for function exit.
+ FILE is an stdio stream to output the code to.
+ SIZE is an int: how many units of temporary storage to deallocate. */
+
+void
+function_epilogue (file, size)
+ FILE *file;
+ int size;
+{
+ register int regno;
+ register int nregs, limit;
+ int offset;
+ rtx xops[3];
+ int pic_reg_used = flag_pic && (current_function_uses_pic_offset_table
+ || current_function_uses_const_pool);
+
+#ifdef FUNCTION_PROFILER_EPILOGUE
+ if (profile_flag)
+ FUNCTION_PROFILER_EPILOGUE (file);
+#endif
+
+ /* Compute the number of registers to pop */
+
+ limit = (frame_pointer_needed
+ ? FRAME_POINTER_REGNUM
+ : STACK_POINTER_REGNUM);
+
+ nregs = 0;
+
+ for (regno = limit - 1; regno >= 0; regno--)
+ if ((regs_ever_live[regno] && ! call_used_regs[regno])
+ || (regno == PIC_OFFSET_TABLE_REGNUM && pic_reg_used))
+ nregs++;
+
+ /* sp is often unreliable so we must go off the frame pointer,
+ */
+
+ /* In reality, we may not care if sp is unreliable, because we can
+ restore the register relative to the frame pointer. In theory,
+ since each move is the same speed as a pop, and we don't need the
+ leal, this is faster. For now restore multiple registers the old
+ way. */
+
+ offset = -size - (nregs * UNITS_PER_WORD);
+
+ xops[2] = stack_pointer_rtx;
+
+ if (nregs > 1 || ! frame_pointer_needed)
+ {
+ if (frame_pointer_needed)
+ {
+ xops[0] = adj_offsettable_operand (AT_BP (Pmode), offset);
+ output_asm_insn (AS2 (lea%L2,%0,%2), xops);
+ }
+
+ for (regno = 0; regno < limit; regno++)
+ if ((regs_ever_live[regno] && ! call_used_regs[regno])
+ || (regno == PIC_OFFSET_TABLE_REGNUM && pic_reg_used))
+ {
+ xops[0] = gen_rtx (REG, SImode, regno);
+ output_asm_insn ("pop%L0 %0", xops);
+ }
+ }
+ else
+ for (regno = 0; regno < limit; regno++)
+ if ((regs_ever_live[regno] && ! call_used_regs[regno])
+ || (regno == PIC_OFFSET_TABLE_REGNUM && pic_reg_used))
+ {
+ xops[0] = gen_rtx (REG, SImode, regno);
+ xops[1] = adj_offsettable_operand (AT_BP (Pmode), offset);
+ output_asm_insn (AS2 (mov%L0,%1,%0), xops);
+ offset += 4;
+ }
+
+ if (frame_pointer_needed)
+ {
+ /* On i486, mov & pop is faster than "leave". */
+
+ if (TARGET_486)
+ {
+ xops[0] = frame_pointer_rtx;
+ output_asm_insn (AS2 (mov%L2,%0,%2), xops);
+ output_asm_insn ("pop%L0 %0", xops);
+ }
+ else
+ output_asm_insn ("leave", xops);
+ }
+ else if (size)
+ {
+ /* If there is no frame pointer, we must still release the frame. */
+
+ xops[0] = GEN_INT (size);
+ output_asm_insn (AS2 (add%L2,%0,%2), xops);
+ }
+
+ if (current_function_pops_args && current_function_args_size)
+ {
+ xops[1] = GEN_INT (current_function_pops_args);
+
+ /* i386 can only pop 32K bytes (maybe 64K? Is it signed?). If
+ asked to pop more, pop return address, do explicit add, and jump
+ indirectly to the caller. */
+
+ if (current_function_pops_args >= 32768)
+ {
+ /* ??? Which register to use here? */
+ xops[0] = gen_rtx (REG, SImode, 2);
+ output_asm_insn ("pop%L0 %0", xops);
+ output_asm_insn (AS2 (add%L2,%1,%2), xops);
+ output_asm_insn ("jmp %*%0", xops);
+ }
+ else
+ output_asm_insn ("ret %1", xops);
+ }
+ else
+ output_asm_insn ("ret", xops);
+}
+
+/* Print an integer constant expression in assembler syntax. Addition
+ and subtraction are the only arithmetic that may appear in these
+ expressions. FILE is the stdio stream to write to, X is the rtx, and
+ CODE is the operand print code from the output string. */
+
+static void
+output_pic_addr_const (file, x, code)
+ FILE *file;
+ rtx x;
+ int code;
+{
+ char buf[256];
+
+ switch (GET_CODE (x))
+ {
+ case PC:
+ if (flag_pic)
+ putc ('.', file);
+ else
+ abort ();
+ break;
+
+ case SYMBOL_REF:
+ case LABEL_REF:
+ if (GET_CODE (x) == SYMBOL_REF)
+ assemble_name (file, XSTR (x, 0));
+ else
+ {
+ ASM_GENERATE_INTERNAL_LABEL (buf, "L",
+ CODE_LABEL_NUMBER (XEXP (x, 0)));
+ assemble_name (asm_out_file, buf);
+ }
+
+ if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
+ fprintf (file, "@GOTOFF(%%ebx)");
+ else if (code == 'P')
+ fprintf (file, "@PLT");
+ else if (GET_CODE (x) == LABEL_REF || ! SYMBOL_REF_FLAG (x))
+ fprintf (file, "@GOT");
+ else
+ fprintf (file, "@GOTOFF");
+
+ break;
+
+ case CODE_LABEL:
+ ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (x));
+ assemble_name (asm_out_file, buf);
+ break;
+
+ case CONST_INT:
+ fprintf (file, "%d", INTVAL (x));
+ break;
+
+ case CONST:
+ /* This used to output parentheses around the expression,
+ but that does not work on the 386 (either ATT or BSD assembler). */
+ output_pic_addr_const (file, XEXP (x, 0), code);
+ break;
+
+ case CONST_DOUBLE:
+ if (GET_MODE (x) == VOIDmode)
+ {
+ /* We can use %d if the number is <32 bits and positive. */
+ if (CONST_DOUBLE_HIGH (x) || CONST_DOUBLE_LOW (x) < 0)
+ fprintf (file, "0x%x%08x",
+ CONST_DOUBLE_HIGH (x), CONST_DOUBLE_LOW (x));
+ else
+ fprintf (file, "%d", CONST_DOUBLE_LOW (x));
+ }
+ else
+ /* We can't handle floating point constants;
+ PRINT_OPERAND must handle them. */
+ output_operand_lossage ("floating constant misused");
+ break;
+
+ case PLUS:
+ /* Some assemblers need integer constants to appear last (eg masm). */
+ if (GET_CODE (XEXP (x, 0)) == CONST_INT)
+ {
+ output_pic_addr_const (file, XEXP (x, 1), code);
+ if (INTVAL (XEXP (x, 0)) >= 0)
+ fprintf (file, "+");
+ output_pic_addr_const (file, XEXP (x, 0), code);
+ }
+ else
+ {
+ output_pic_addr_const (file, XEXP (x, 0), code);
+ if (INTVAL (XEXP (x, 1)) >= 0)
+ fprintf (file, "+");
+ output_pic_addr_const (file, XEXP (x, 1), code);
+ }
+ break;
+
+ case MINUS:
+ output_pic_addr_const (file, XEXP (x, 0), code);
+ fprintf (file, "-");
+ output_pic_addr_const (file, XEXP (x, 1), code);
+ break;
+
+ default:
+ output_operand_lossage ("invalid expression as operand");
+ }
+}
+
+/* Meaning of CODE:
+ f -- float insn (print a CONST_DOUBLE as a float rather than in hex).
+ D,L,W,B,Q,S -- print the opcode suffix for specified size of operand.
+ R -- print the prefix for register names.
+ z -- print the opcode suffix for the size of the current operand.
+ * -- print a star (in certain assembler syntax)
+ w -- print the operand as if it's a "word" (HImode) even if it isn't.
+ c -- don't print special prefixes before constant operands.
+*/
+
+void
+print_operand (file, x, code)
+ FILE *file;
+ rtx x;
+ int code;
+{
+ if (code)
+ {
+ switch (code)
+ {
+ case '*':
+ if (USE_STAR)
+ putc ('*', file);
+ return;
+
+ case 'L':
+ PUT_OP_SIZE (code, 'l', file);
+ return;
+
+ case 'W':
+ PUT_OP_SIZE (code, 'w', file);
+ return;
+
+ case 'B':
+ PUT_OP_SIZE (code, 'b', file);
+ return;
+
+ case 'Q':
+ PUT_OP_SIZE (code, 'l', file);
+ return;
+
+ case 'S':
+ PUT_OP_SIZE (code, 's', file);
+ return;
+
+ case 'T':
+ PUT_OP_SIZE (code, 't', file);
+ return;
+
+ case 'z':
+ /* 387 opcodes don't get size suffixes if the operands are
+ registers. */
+
+ if (STACK_REG_P (x))
+ return;
+
+ /* this is the size of op from size of operand */
+ switch (GET_MODE_SIZE (GET_MODE (x)))
+ {
+ case 1:
+ PUT_OP_SIZE ('B', 'b', file);
+ return;
+
+ case 2:
+ PUT_OP_SIZE ('W', 'w', file);
+ return;
+
+ case 4:
+ if (GET_MODE (x) == SFmode)
+ {
+ PUT_OP_SIZE ('S', 's', file);
+ return;
+ }
+ else
+ PUT_OP_SIZE ('L', 'l', file);
+ return;
+
+ case 12:
+ PUT_OP_SIZE ('T', 't', file);
+ return;
+
+ case 8:
+ if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
+ {
+#ifdef GAS_MNEMONICS
+ PUT_OP_SIZE ('Q', 'q', file);
+ return;
+#else
+ PUT_OP_SIZE ('Q', 'l', file); /* Fall through */
+#endif
+ }
+
+ PUT_OP_SIZE ('Q', 'l', file);
+ return;
+ }
+
+ case 'b':
+ case 'w':
+ case 'k':
+ case 'h':
+ case 'y':
+ case 'P':
+ break;
+
+ default:
+ {
+ char str[50];
+
+ sprintf (str, "invalid operand code `%c'", code);
+ output_operand_lossage (str);
+ }
+ }
+ }
+ if (GET_CODE (x) == REG)
+ {
+ PRINT_REG (x, code, file);
+ }
+ else if (GET_CODE (x) == MEM)
+ {
+ PRINT_PTR (x, file);
+ if (CONSTANT_ADDRESS_P (XEXP (x, 0)))
+ {
+ if (flag_pic)
+ output_pic_addr_const (file, XEXP (x, 0), code);
+ else
+ output_addr_const (file, XEXP (x, 0));
+ }
+ else
+ output_address (XEXP (x, 0));
+ }
+ else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == SFmode)
+ {
+ REAL_VALUE_TYPE r; long l;
+ REAL_VALUE_FROM_CONST_DOUBLE (r, x);
+ REAL_VALUE_TO_TARGET_SINGLE (r, l);
+ PRINT_IMMED_PREFIX (file);
+ fprintf (file, "0x%x", l);
+ }
+ /* These float cases don't actually occur as immediate operands. */
+ else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == DFmode)
+ {
+ REAL_VALUE_TYPE r; char dstr[30];
+ REAL_VALUE_FROM_CONST_DOUBLE (r, x);
+ REAL_VALUE_TO_DECIMAL (r, "%.22e", dstr);
+ fprintf (file, "%s", dstr);
+ }
+ else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == XFmode)
+ {
+ REAL_VALUE_TYPE r; char dstr[30];
+ REAL_VALUE_FROM_CONST_DOUBLE (r, x);
+ REAL_VALUE_TO_DECIMAL (r, "%.22e", dstr);
+ fprintf (file, "%s", dstr);
+ }
+ else
+ {
+ if (code != 'P')
+ {
+ if (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE)
+ PRINT_IMMED_PREFIX (file);
+ else if (GET_CODE (x) == CONST || GET_CODE (x) == SYMBOL_REF
+ || GET_CODE (x) == LABEL_REF)
+ PRINT_OFFSET_PREFIX (file);
+ }
+ if (flag_pic)
+ output_pic_addr_const (file, x, code);
+ else
+ output_addr_const (file, x);
+ }
+}
+
+/* Print a memory operand whose address is ADDR. */
+
+void
+print_operand_address (file, addr)
+ FILE *file;
+ register rtx addr;
+{
+ register rtx reg1, reg2, breg, ireg;
+ rtx offset;
+
+ switch (GET_CODE (addr))
+ {
+ case REG:
+ ADDR_BEG (file);
+ fprintf (file, "%se", RP);
+ fputs (hi_reg_name[REGNO (addr)], file);
+ ADDR_END (file);
+ break;
+
+ case PLUS:
+ reg1 = 0;
+ reg2 = 0;
+ ireg = 0;
+ breg = 0;
+ offset = 0;
+ if (CONSTANT_ADDRESS_P (XEXP (addr, 0)))
+ {
+ offset = XEXP (addr, 0);
+ addr = XEXP (addr, 1);
+ }
+ else if (CONSTANT_ADDRESS_P (XEXP (addr, 1)))
+ {
+ offset = XEXP (addr, 1);
+ addr = XEXP (addr, 0);
+ }
+ if (GET_CODE (addr) != PLUS) ;
+ else if (GET_CODE (XEXP (addr, 0)) == MULT)
+ {
+ reg1 = XEXP (addr, 0);
+ addr = XEXP (addr, 1);
+ }
+ else if (GET_CODE (XEXP (addr, 1)) == MULT)
+ {
+ reg1 = XEXP (addr, 1);
+ addr = XEXP (addr, 0);
+ }
+ else if (GET_CODE (XEXP (addr, 0)) == REG)
+ {
+ reg1 = XEXP (addr, 0);
+ addr = XEXP (addr, 1);
+ }
+ else if (GET_CODE (XEXP (addr, 1)) == REG)
+ {
+ reg1 = XEXP (addr, 1);
+ addr = XEXP (addr, 0);
+ }
+ if (GET_CODE (addr) == REG || GET_CODE (addr) == MULT)
+ {
+ if (reg1 == 0) reg1 = addr;
+ else reg2 = addr;
+ addr = 0;
+ }
+ if (offset != 0)
+ {
+ if (addr != 0) abort ();
+ addr = offset;
+ }
+ if ((reg1 && GET_CODE (reg1) == MULT)
+ || (reg2 != 0 && REGNO_OK_FOR_BASE_P (REGNO (reg2))))
+ {
+ breg = reg2;
+ ireg = reg1;
+ }
+ else if (reg1 != 0 && REGNO_OK_FOR_BASE_P (REGNO (reg1)))
+ {
+ breg = reg1;
+ ireg = reg2;
+ }
+
+ if (ireg != 0 || breg != 0)
+ {
+ int scale = 1;
+
+ if (addr != 0)
+ {
+ if (GET_CODE (addr) == LABEL_REF)
+ output_asm_label (addr);
+ else
+ {
+ if (flag_pic)
+ output_pic_addr_const (file, addr, 0);
+ else
+ output_addr_const (file, addr);
+ }
+ }
+
+ if (ireg != 0 && GET_CODE (ireg) == MULT)
+ {
+ scale = INTVAL (XEXP (ireg, 1));
+ ireg = XEXP (ireg, 0);
+ }
+
+ /* The stack pointer can only appear as a base register,
+ never an index register, so exchange the regs if it is wrong. */
+
+ if (scale == 1 && ireg && REGNO (ireg) == STACK_POINTER_REGNUM)
+ {
+ rtx tmp;
+
+ tmp = breg;
+ breg = ireg;
+ ireg = tmp;
+ }
+
+ /* output breg+ireg*scale */
+ PRINT_B_I_S (breg, ireg, scale, file);
+ break;
+ }
+
+ case MULT:
+ {
+ int scale;
+ if (GET_CODE (XEXP (addr, 0)) == CONST_INT)
+ {
+ scale = INTVAL (XEXP (addr, 0));
+ ireg = XEXP (addr, 1);
+ }
+ else
+ {
+ scale = INTVAL (XEXP (addr, 1));
+ ireg = XEXP (addr, 0);
+ }
+ output_addr_const (file, const0_rtx);
+ PRINT_B_I_S ((rtx) 0, ireg, scale, file);
+ }
+ break;
+
+ default:
+ if (GET_CODE (addr) == CONST_INT
+ && INTVAL (addr) < 0x8000
+ && INTVAL (addr) >= -0x8000)
+ fprintf (file, "%d", INTVAL (addr));
+ else
+ {
+ if (flag_pic)
+ output_pic_addr_const (file, addr, 0);
+ else
+ output_addr_const (file, addr);
+ }
+ }
+}
+
+/* Set the cc_status for the results of an insn whose pattern is EXP.
+ On the 80386, we assume that only test and compare insns, as well
+ as SI, HI, & DI mode ADD, SUB, NEG, AND, IOR, XOR, ASHIFT,
+ ASHIFTRT, and LSHIFTRT instructions set the condition codes usefully.
+ Also, we assume that jumps, moves and sCOND don't affect the condition
+ codes. All else clobbers the condition codes, by assumption.
+
+ We assume that ALL integer add, minus, etc. instructions effect the
+ condition codes. This MUST be consistent with i386.md.
+
+ We don't record any float test or compare - the redundant test &
+ compare check in final.c does not handle stack-like regs correctly. */
+
+void
+notice_update_cc (exp)
+ rtx exp;
+{
+ if (GET_CODE (exp) == SET)
+ {
+ /* Jumps do not alter the cc's. */
+ if (SET_DEST (exp) == pc_rtx)
+ return;
+ /* Moving register or memory into a register:
+ it doesn't alter the cc's, but it might invalidate
+ the RTX's which we remember the cc's came from.
+ (Note that moving a constant 0 or 1 MAY set the cc's). */
+ if (REG_P (SET_DEST (exp))
+ && (REG_P (SET_SRC (exp)) || GET_CODE (SET_SRC (exp)) == MEM
+ || GET_RTX_CLASS (GET_CODE (SET_SRC (exp))) == '<'))
+ {
+ if (cc_status.value1
+ && reg_overlap_mentioned_p (SET_DEST (exp), cc_status.value1))
+ cc_status.value1 = 0;
+ if (cc_status.value2
+ && reg_overlap_mentioned_p (SET_DEST (exp), cc_status.value2))
+ cc_status.value2 = 0;
+ return;
+ }
+ /* Moving register into memory doesn't alter the cc's.
+ It may invalidate the RTX's which we remember the cc's came from. */
+ if (GET_CODE (SET_DEST (exp)) == MEM
+ && (REG_P (SET_SRC (exp))
+ || GET_RTX_CLASS (GET_CODE (SET_SRC (exp))) == '<'))
+ {
+ if (cc_status.value1 && GET_CODE (cc_status.value1) == MEM)
+ cc_status.value1 = 0;
+ if (cc_status.value2 && GET_CODE (cc_status.value2) == MEM)
+ cc_status.value2 = 0;
+ return;
+ }
+ /* Function calls clobber the cc's. */
+ else if (GET_CODE (SET_SRC (exp)) == CALL)
+ {
+ CC_STATUS_INIT;
+ return;
+ }
+ /* Tests and compares set the cc's in predictable ways. */
+ else if (SET_DEST (exp) == cc0_rtx)
+ {
+ CC_STATUS_INIT;
+ cc_status.value1 = SET_SRC (exp);
+ return;
+ }
+ /* Certain instructions effect the condition codes. */
+ else if (GET_MODE (SET_SRC (exp)) == SImode
+ || GET_MODE (SET_SRC (exp)) == HImode
+ || GET_MODE (SET_SRC (exp)) == QImode)
+ switch (GET_CODE (SET_SRC (exp)))
+ {
+ case ASHIFTRT: case LSHIFTRT:
+ case ASHIFT:
+ /* Shifts on the 386 don't set the condition codes if the
+ shift count is zero. */
+ if (GET_CODE (XEXP (SET_SRC (exp), 1)) != CONST_INT)
+ {
+ CC_STATUS_INIT;
+ break;
+ }
+ /* We assume that the CONST_INT is non-zero (this rtx would
+ have been deleted if it were zero. */
+
+ case PLUS: case MINUS: case NEG:
+ case AND: case IOR: case XOR:
+ cc_status.flags = CC_NO_OVERFLOW;
+ cc_status.value1 = SET_SRC (exp);
+ cc_status.value2 = SET_DEST (exp);
+ break;
+
+ default:
+ CC_STATUS_INIT;
+ }
+ else
+ {
+ CC_STATUS_INIT;
+ }
+ }
+ else if (GET_CODE (exp) == PARALLEL
+ && GET_CODE (XVECEXP (exp, 0, 0)) == SET)
+ {
+ if (SET_DEST (XVECEXP (exp, 0, 0)) == pc_rtx)
+ return;
+ if (SET_DEST (XVECEXP (exp, 0, 0)) == cc0_rtx)
+ {
+ CC_STATUS_INIT;
+ if (stack_regs_mentioned_p (SET_SRC (XVECEXP (exp, 0, 0))))
+ cc_status.flags |= CC_IN_80387;
+ else
+ cc_status.value1 = SET_SRC (XVECEXP (exp, 0, 0));
+ return;
+ }
+ CC_STATUS_INIT;
+ }
+ else
+ {
+ CC_STATUS_INIT;
+ }
+}
+
+/* Split one or more DImode RTL references into pairs of SImode
+ references. The RTL can be REG, offsettable MEM, integer constant, or
+ CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
+ split and "num" is its length. lo_half and hi_half are output arrays
+ that parallel "operands". */
+
+void
+split_di (operands, num, lo_half, hi_half)
+ rtx operands[];
+ int num;
+ rtx lo_half[], hi_half[];
+{
+ while (num--)
+ {
+ if (GET_CODE (operands[num]) == REG)
+ {
+ lo_half[num] = gen_rtx (REG, SImode, REGNO (operands[num]));
+ hi_half[num] = gen_rtx (REG, SImode, REGNO (operands[num]) + 1);
+ }
+ else if (CONSTANT_P (operands[num]))
+ {
+ split_double (operands[num], &lo_half[num], &hi_half[num]);
+ }
+ else if (offsettable_memref_p (operands[num]))
+ {
+ lo_half[num] = operands[num];
+ hi_half[num] = adj_offsettable_operand (operands[num], 4);
+ }
+ else
+ abort();
+ }
+}
+
+/* Return 1 if this is a valid binary operation on a 387.
+ OP is the expression matched, and MODE is its mode. */
+
+int
+binary_387_op (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ if (mode != VOIDmode && mode != GET_MODE (op))
+ return 0;
+
+ switch (GET_CODE (op))
+ {
+ case PLUS:
+ case MINUS:
+ case MULT:
+ case DIV:
+ return GET_MODE_CLASS (GET_MODE (op)) == MODE_FLOAT;
+
+ default:
+ return 0;
+ }
+}
+
+/* Return 1 if this is a valid conversion operation on a 387.
+ OP is the expression matched, and MODE is its mode. */
+
+int
+convert_387_op (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ if (mode != VOIDmode && mode != GET_MODE (op))
+ return 0;
+
+ switch (GET_CODE (op))
+ {
+ case FLOAT:
+ return GET_MODE (XEXP (op, 0)) == SImode;
+
+ case FLOAT_EXTEND:
+ return ((mode == DFmode && GET_MODE (XEXP (op, 0)) == SFmode)
+ || (mode == XFmode && GET_MODE (XEXP (op, 0)) == DFmode)
+ || (mode == XFmode && GET_MODE (XEXP (op, 0)) == SFmode));
+
+ default:
+ return 0;
+ }
+}
+
+/* Return 1 if this is a valid shift or rotate operation on a 386.
+ OP is the expression matched, and MODE is its mode. */
+
+int
+shift_op (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ rtx operand = XEXP (op, 0);
+
+ if (mode != VOIDmode && mode != GET_MODE (op))
+ return 0;
+
+ if (GET_MODE (operand) != GET_MODE (op)
+ || GET_MODE_CLASS (GET_MODE (op)) != MODE_INT)
+ return 0;
+
+ return (GET_CODE (op) == ASHIFT
+ || GET_CODE (op) == ASHIFTRT
+ || GET_CODE (op) == LSHIFTRT
+ || GET_CODE (op) == ROTATE
+ || GET_CODE (op) == ROTATERT);
+}
+
+/* Return 1 if OP is COMPARE rtx with mode VOIDmode.
+ MODE is not used. */
+
+int
+VOIDmode_compare_op (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ return GET_CODE (op) == COMPARE && GET_MODE (op) == VOIDmode;
+}
+
+/* Output code to perform a 387 binary operation in INSN, one of PLUS,
+ MINUS, MULT or DIV. OPERANDS are the insn operands, where operands[3]
+ is the expression of the binary operation. The output may either be
+ emitted here, or returned to the caller, like all output_* functions.
+
+ There is no guarantee that the operands are the same mode, as they
+ might be within FLOAT or FLOAT_EXTEND expressions. */
+
+char *
+output_387_binary_op (insn, operands)
+ rtx insn;
+ rtx *operands;
+{
+ rtx temp;
+ char *base_op;
+ static char buf[100];
+
+ switch (GET_CODE (operands[3]))
+ {
+ case PLUS:
+ if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
+ || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
+ base_op = "fiadd";
+ else
+ base_op = "fadd";
+ break;
+
+ case MINUS:
+ if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
+ || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
+ base_op = "fisub";
+ else
+ base_op = "fsub";
+ break;
+
+ case MULT:
+ if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
+ || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
+ base_op = "fimul";
+ else
+ base_op = "fmul";
+ break;
+
+ case DIV:
+ if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
+ || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
+ base_op = "fidiv";
+ else
+ base_op = "fdiv";
+ break;
+
+ default:
+ abort ();
+ }
+
+ strcpy (buf, base_op);
+
+ switch (GET_CODE (operands[3]))
+ {
+ case MULT:
+ case PLUS:
+ if (REG_P (operands[2]) && REGNO (operands[0]) == REGNO (operands[2]))
+ {
+ temp = operands[2];
+ operands[2] = operands[1];
+ operands[1] = temp;
+ }
+
+ if (GET_CODE (operands[2]) == MEM)
+ return strcat (buf, AS1 (%z2,%2));
+
+ if (NON_STACK_REG_P (operands[1]))
+ {
+ output_op_from_reg (operands[1], strcat (buf, AS1 (%z0,%1)));
+ RET;
+ }
+ else if (NON_STACK_REG_P (operands[2]))
+ {
+ output_op_from_reg (operands[2], strcat (buf, AS1 (%z0,%1)));
+ RET;
+ }
+
+ if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
+ return strcat (buf, AS2 (p,%2,%0));
+
+ if (STACK_TOP_P (operands[0]))
+ return strcat (buf, AS2 (,%y2,%0));
+ else
+ return strcat (buf, AS2 (,%2,%0));
+
+ case MINUS:
+ case DIV:
+ if (GET_CODE (operands[1]) == MEM)
+ return strcat (buf, AS1 (r%z1,%1));
+
+ if (GET_CODE (operands[2]) == MEM)
+ return strcat (buf, AS1 (%z2,%2));
+
+ if (NON_STACK_REG_P (operands[1]))
+ {
+ output_op_from_reg (operands[1], strcat (buf, AS1 (r%z0,%1)));
+ RET;
+ }
+ else if (NON_STACK_REG_P (operands[2]))
+ {
+ output_op_from_reg (operands[2], strcat (buf, AS1 (%z0,%1)));
+ RET;
+ }
+
+ if (! STACK_REG_P (operands[1]) || ! STACK_REG_P (operands[2]))
+ abort ();
+
+ if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
+ return strcat (buf, AS2 (rp,%2,%0));
+
+ if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
+ return strcat (buf, AS2 (p,%1,%0));
+
+ if (STACK_TOP_P (operands[0]))
+ {
+ if (STACK_TOP_P (operands[1]))
+ return strcat (buf, AS2 (,%y2,%0));
+ else
+ return strcat (buf, AS2 (r,%y1,%0));
+ }
+ else if (STACK_TOP_P (operands[1]))
+ return strcat (buf, AS2 (,%1,%0));
+ else
+ return strcat (buf, AS2 (r,%2,%0));
+
+ default:
+ abort ();
+ }
+}
+
+/* Output code for INSN to convert a float to a signed int. OPERANDS
+ are the insn operands. The output may be SFmode or DFmode and the
+ input operand may be SImode or DImode. As a special case, make sure
+ that the 387 stack top dies if the output mode is DImode, because the
+ hardware requires this. */
+
+char *
+output_fix_trunc (insn, operands)
+ rtx insn;
+ rtx *operands;
+{
+ int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
+ rtx xops[2];
+
+ if (! STACK_TOP_P (operands[1]) ||
+ (GET_MODE (operands[0]) == DImode && ! stack_top_dies))
+ abort ();
+
+ xops[0] = GEN_INT (12);
+ xops[1] = operands[4];
+
+ output_asm_insn (AS1 (fnstc%W2,%2), operands);
+ output_asm_insn (AS2 (mov%L2,%2,%4), operands);
+ output_asm_insn (AS2 (mov%B1,%0,%h1), xops);
+ output_asm_insn (AS2 (mov%L4,%4,%3), operands);
+ output_asm_insn (AS1 (fldc%W3,%3), operands);
+
+ if (NON_STACK_REG_P (operands[0]))
+ output_to_reg (operands[0], stack_top_dies);
+ else if (GET_CODE (operands[0]) == MEM)
+ {
+ if (stack_top_dies)
+ output_asm_insn (AS1 (fistp%z0,%0), operands);
+ else
+ output_asm_insn (AS1 (fist%z0,%0), operands);
+ }
+ else
+ abort ();
+
+ return AS1 (fldc%W2,%2);
+}
+
+/* Output code for INSN to compare OPERANDS. The two operands might
+ not have the same mode: one might be within a FLOAT or FLOAT_EXTEND
+ expression. If the compare is in mode CCFPEQmode, use an opcode that
+ will not fault if a qNaN is present. */
+
+char *
+output_float_compare (insn, operands)
+ rtx insn;
+ rtx *operands;
+{
+ int stack_top_dies;
+ rtx body = XVECEXP (PATTERN (insn), 0, 0);
+ int unordered_compare = GET_MODE (SET_SRC (body)) == CCFPEQmode;
+
+ if (! STACK_TOP_P (operands[0]))
+ abort ();
+
+ stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
+
+ if (STACK_REG_P (operands[1])
+ && stack_top_dies
+ && find_regno_note (insn, REG_DEAD, REGNO (operands[1]))
+ && REGNO (operands[1]) != FIRST_STACK_REG)
+ {
+ /* If both the top of the 387 stack dies, and the other operand
+ is also a stack register that dies, then this must be a
+ `fcompp' float compare */
+
+ if (unordered_compare)
+ output_asm_insn ("fucompp", operands);
+ else
+ output_asm_insn ("fcompp", operands);
+ }
+ else
+ {
+ static char buf[100];
+
+ /* Decide if this is the integer or float compare opcode, or the
+ unordered float compare. */
+
+ if (unordered_compare)
+ strcpy (buf, "fucom");
+ else if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_FLOAT)
+ strcpy (buf, "fcom");
+ else
+ strcpy (buf, "ficom");
+
+ /* Modify the opcode if the 387 stack is to be popped. */
+
+ if (stack_top_dies)
+ strcat (buf, "p");
+
+ if (NON_STACK_REG_P (operands[1]))
+ output_op_from_reg (operands[1], strcat (buf, AS1 (%z0,%1)));
+ else
+ output_asm_insn (strcat (buf, AS1 (%z1,%y1)), operands);
+ }
+
+ /* Now retrieve the condition code. */
+
+ return output_fp_cc0_set (insn);
+}
+
+/* Output opcodes to transfer the results of FP compare or test INSN
+ from the FPU to the CPU flags. If TARGET_IEEE_FP, ensure that if the
+ result of the compare or test is unordered, no comparison operator
+ succeeds except NE. Return an output template, if any. */
+
+char *
+output_fp_cc0_set (insn)
+ rtx insn;
+{
+ rtx xops[3];
+ rtx unordered_label;
+ rtx next;
+ enum rtx_code code;
+
+ xops[0] = gen_rtx (REG, HImode, 0);
+ output_asm_insn (AS1 (fnsts%W0,%0), xops);
+
+ if (! TARGET_IEEE_FP)
+ return "sahf";
+
+ next = next_cc0_user (insn);
+ if (next == NULL_RTX)
+ abort ();
+
+ if (GET_CODE (next) == JUMP_INSN
+ && GET_CODE (PATTERN (next)) == SET
+ && SET_DEST (PATTERN (next)) == pc_rtx
+ && GET_CODE (SET_SRC (PATTERN (next))) == IF_THEN_ELSE)
+ {
+ code = GET_CODE (XEXP (SET_SRC (PATTERN (next)), 0));
+ }
+ else if (GET_CODE (PATTERN (next)) == SET)
+ {
+ code = GET_CODE (SET_SRC (PATTERN (next)));
+ }
+ else
+ abort ();
+
+ xops[0] = gen_rtx (REG, QImode, 0);
+
+ switch (code)
+ {
+ case GT:
+ xops[1] = GEN_INT (0x45);
+ output_asm_insn (AS2 (and%B0,%1,%h0), xops);
+ /* je label */
+ break;
+
+ case LT:
+ xops[1] = GEN_INT (0x45);
+ xops[2] = GEN_INT (0x01);
+ output_asm_insn (AS2 (and%B0,%1,%h0), xops);
+ output_asm_insn (AS2 (cmp%B0,%2,%h0), xops);
+ /* je label */
+ break;
+
+ case GE:
+ xops[1] = GEN_INT (0x05);
+ output_asm_insn (AS2 (and%B0,%1,%h0), xops);
+ /* je label */
+ break;
+
+ case LE:
+ xops[1] = GEN_INT (0x45);
+ xops[2] = GEN_INT (0x40);
+ output_asm_insn (AS2 (and%B0,%1,%h0), xops);
+ output_asm_insn (AS1 (dec%B0,%h0), xops);
+ output_asm_insn (AS2 (cmp%B0,%2,%h0), xops);
+ /* jb label */
+ break;
+
+ case EQ:
+ xops[1] = GEN_INT (0x45);
+ xops[2] = GEN_INT (0x40);
+ output_asm_insn (AS2 (and%B0,%1,%h0), xops);
+ output_asm_insn (AS2 (cmp%B0,%2,%h0), xops);
+ /* je label */
+ break;
+
+ case NE:
+ xops[1] = GEN_INT (0x44);
+ xops[2] = GEN_INT (0x40);
+ output_asm_insn (AS2 (and%B0,%1,%h0), xops);
+ output_asm_insn (AS2 (xor%B0,%2,%h0), xops);
+ /* jne label */
+ break;
+
+ case GTU:
+ case LTU:
+ case GEU:
+ case LEU:
+ default:
+ abort ();
+ }
+ RET;
+}
+
+#define MAX_386_STACK_LOCALS 2
+
+static rtx i386_stack_locals[(int) MAX_MACHINE_MODE][MAX_386_STACK_LOCALS];
+
+/* Define the structure for the machine field in struct function. */
+struct machine_function
+{
+ rtx i386_stack_locals[(int) MAX_MACHINE_MODE][MAX_386_STACK_LOCALS];
+};
+
+/* Functions to save and restore i386_stack_locals.
+ These will be called, via pointer variables,
+ from push_function_context and pop_function_context. */
+
+void
+save_386_machine_status (p)
+ struct function *p;
+{
+ p->machine = (struct machine_function *) xmalloc (sizeof i386_stack_locals);
+ bcopy (i386_stack_locals, p->machine->i386_stack_locals,
+ sizeof i386_stack_locals);
+}
+
+void
+restore_386_machine_status (p)
+ struct function *p;
+{
+ bcopy (p->machine->i386_stack_locals, i386_stack_locals,
+ sizeof i386_stack_locals);
+ free (p->machine);
+}
+
+/* Clear stack slot assignments remembered from previous functions.
+ This is called from INIT_EXPANDERS once before RTL is emitted for each
+ function. */
+
+void
+clear_386_stack_locals ()
+{
+ enum machine_mode mode;
+ int n;
+
+ for (mode = VOIDmode; (int) mode < (int) MAX_MACHINE_MODE;
+ mode = (enum machine_mode) ((int) mode + 1))
+ for (n = 0; n < MAX_386_STACK_LOCALS; n++)
+ i386_stack_locals[(int) mode][n] = NULL_RTX;
+
+ /* Arrange to save and restore i386_stack_locals around nested functions. */
+ save_machine_status = save_386_machine_status;
+ restore_machine_status = restore_386_machine_status;
+}
+
+/* Return a MEM corresponding to a stack slot with mode MODE.
+ Allocate a new slot if necessary.
+
+ The RTL for a function can have several slots available: N is
+ which slot to use. */
+
+rtx
+assign_386_stack_local (mode, n)
+ enum machine_mode mode;
+ int n;
+{
+ if (n < 0 || n >= MAX_386_STACK_LOCALS)
+ abort ();
+
+ if (i386_stack_locals[(int) mode][n] == NULL_RTX)
+ i386_stack_locals[(int) mode][n]
+ = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
+
+ return i386_stack_locals[(int) mode][n];
+}
diff --git a/gnu/usr.bin/cc/cc_int/bc-emit.c b/gnu/usr.bin/cc/cc_int/bc-emit.c
new file mode 100644
index 0000000..b5d0c6c
--- /dev/null
+++ b/gnu/usr.bin/cc/cc_int/bc-emit.c
@@ -0,0 +1,991 @@
+/* Output bytecodes for GNU C-compiler.
+ Copyright (C) 1993, 1994 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+#include "config.h"
+#ifdef __STDC__
+#include <stdarg.h>
+#else
+#include <varargs.h>
+#endif
+#include "machmode.h"
+#include "rtl.h"
+#include "real.h"
+#include "obstack.h"
+#include "bytecode.h"
+#ifdef __GNUC__
+#include "bytetypes.h"
+#endif
+#include "bc-emit.h"
+#include "bc-opcode.h"
+#include "bc-typecd.h"
+#include "bi-run.h"
+
+#include <stdio.h>
+
+extern char *xmalloc (), *xrealloc ();
+extern void free ();
+
+extern struct obstack *rtl_obstack;
+
+/* Indexed by mode class, gives the narrowest mode for each class. */
+
+extern enum machine_mode class_narrowest_mode[(int) MAX_MODE_CLASS];
+
+/* Commonly used modes. */
+/* Mode whose width is BITS_PER_UNIT */
+extern enum machine_mode byte_mode;
+
+/* Mode whose width is BITS_PER_WORD */
+extern enum machine_mode word_mode;
+
+/* Vector indexed by opcode giving info about the args for each opcode. */
+static struct arityvec arityvec[] = {
+#include "bc-arity.h"
+};
+
+/* How to print a symbol name for the assembler. */
+static void
+prsym (file, s)
+ FILE *file;
+ char *s;
+{
+ if (*s == '*')
+ fprintf (file, "%s", s + 1);
+ else
+
+#ifdef NAMES_HAVE_UNDERSCORES
+ fprintf (file, "_%s", s);
+#else
+ fprintf (file, "%s", s);
+#endif
+
+}
+
+/* Maintain a bucket hash table for symbol names. */
+
+#define HASH_BITS 32
+#define HASH_SIZE 509
+
+static struct bc_sym *hashtab[HASH_SIZE];
+
+static unsigned int
+hash (name)
+ char *name;
+{
+ unsigned int hash = 0;
+
+ while (*name)
+ {
+ hash = hash << 3 | hash >> HASH_BITS - 3;
+ hash += *name++;
+ }
+
+ return hash % HASH_SIZE;
+}
+
+
+/* Look up the named symbol, creating it if it doesn't exist. */
+struct bc_sym *
+sym_lookup (name)
+ char *name;
+{
+ int i;
+ struct bc_sym *s;
+
+ i = hash (name);
+ for (s = hashtab[i]; s; s = s->next)
+ if (!strcmp (s->name, name))
+ return s;
+
+ s = (struct bc_sym *) xmalloc (sizeof (struct bc_sym));
+ s->name = xmalloc (strlen (name) + 1);
+ strcpy (s->name, name);
+ s->defined = s->global = s->common = 0;
+ s->val = 0;
+ s->next = hashtab[i];
+ hashtab[i] = s;
+ return s;
+}
+
+
+/* Write out .globl and common symbols to the named file. */
+static void
+bc_sym_write (file)
+ FILE *file;
+{
+ int i;
+ struct bc_sym *s;
+
+ for (i = 0; i < HASH_SIZE; ++i)
+ for (s = hashtab[i]; s; s = s->next)
+ {
+ if (s->global)
+ {
+ fprintf (file, "\n\t.globl ");
+ prsym (file, s->name);
+ putc ('\n', file);
+ if (s->common)
+ {
+ fprintf (file, "\n\t.comm ");
+ prsym (file, s->name);
+ fprintf (file, ", %d\n", s->val);
+ }
+ }
+ else if (s->common)
+ {
+ fprintf (file, "\n\t.lcomm ");
+ prsym (file, s->name);
+ fprintf (file, ", %d\n", s->val);
+ }
+ }
+}
+
+
+
+
+/* Create and initialize a new segment. */
+static struct bc_seg *
+seg_create ()
+{
+ struct bc_seg *result;
+
+ result = (struct bc_seg *) xmalloc (sizeof (struct bc_seg));
+ result->alloc = 256;
+ result->data = xmalloc (result->alloc);
+ result->size = 0;
+ result->syms = 0;
+ result->relocs = 0;
+ return result;
+}
+
+
+/* Advance the segment index to the next alignment boundary. */
+static void
+seg_align (seg, log)
+ struct bc_seg *seg;
+ int log;
+{
+ unsigned int oldsize = seg->size;
+
+ seg->size = seg->size + (1 << log) - 1 & ~((1 << log) - 1);
+ if (seg->size > seg->alloc)
+ {
+ while (seg->size > seg->alloc)
+ seg->alloc *= 2;
+ seg->data = xrealloc (seg->data, seg->alloc);
+ }
+ bzero (seg->data + oldsize, seg->size - oldsize);
+}
+
+
+/* Append the given data to the given segment. */
+static void
+seg_data (seg, data, size)
+ struct bc_seg *seg;
+ char *data;
+ unsigned int size;
+{
+ if (seg->size + size > seg->alloc)
+ {
+ while (seg->size + size > seg->alloc)
+ seg->alloc *= 2;
+ seg->data = xrealloc (seg->data, seg->alloc);
+ }
+
+ bcopy (data, seg->data + seg->size, size);
+ seg->size += size;
+}
+
+
+/* Append a zero-filled skip to the given segment. */
+static void
+seg_skip (seg, size)
+ struct bc_seg *seg;
+ unsigned int size;
+{
+ if (seg->size + size > seg->alloc)
+ {
+ while (seg->size + size > seg->alloc)
+ seg->alloc *= 2;
+ seg->data = xrealloc (seg->data, seg->alloc);
+ }
+
+ memset (seg->data + seg->size, 0, size);
+ seg->size += size;
+}
+
+
+/* Define the given name as the current offset in the given segment. It
+ is an error if the name is already defined. Return 0 or 1 indicating
+ failure or success respectively. */
+static int
+seg_defsym (seg, name)
+ struct bc_seg *seg;
+ char *name;
+{
+ struct bc_sym *sym;
+ struct bc_segsym *segsym;
+
+ sym = sym_lookup (name);
+ if (sym->defined)
+ return 0;
+
+ sym->defined = 1;
+ sym->val = seg->size;
+ segsym = (struct bc_segsym *) xmalloc (sizeof (struct bc_segsym));
+ segsym->sym = sym;
+ segsym->next = seg->syms;
+ seg->syms = segsym;
+ return 1;
+}
+
+
+/* Generate in seg's data a reference to the given sym, adjusted by
+ the given offset. */
+static void
+seg_refsym (seg, name, offset)
+ struct bc_seg *seg;
+ char *name;
+ int offset;
+{
+ struct bc_sym *sym;
+ struct bc_segreloc *segreloc;
+
+ sym = sym_lookup (name);
+ segreloc = (struct bc_segreloc *) xmalloc (sizeof (struct bc_segreloc));
+ segreloc->offset = seg->size;
+ segreloc->sym = sym;
+ segreloc->next = seg->relocs;
+ seg->relocs = segreloc;
+ seg_data (seg, (char *) &offset, sizeof offset);
+}
+
+
+/* Concatenate the contents of given segments into the first argument. */
+static void
+seg_concat (result, seg)
+ struct bc_seg *result, *seg;
+{
+ unsigned int fix;
+ struct bc_segsym *segsym;
+ struct bc_segreloc *segreloc;
+
+ seg_align (result, MACHINE_SEG_ALIGN);
+ fix = result->size;
+ seg_data (result, seg->data, seg->size);
+ free (seg->data);
+
+ /* Go through the symbols and relocs of SEG, adjusting their offsets
+ for their new location in RESULT. */
+ if (seg->syms)
+ {
+ segsym = seg->syms;
+ do
+ segsym->sym->val += fix;
+ while (segsym->next && (segsym = segsym->next));
+ segsym->next = result->syms;
+ result->syms = seg->syms;
+ }
+ if (seg->relocs)
+ {
+ segreloc = seg->relocs;
+ do
+ segreloc->offset += fix;
+ while (segreloc->next && (segreloc = segreloc->next));
+ segreloc->next = result->relocs;
+ result->relocs = seg->relocs;
+ }
+
+ free ((char *) seg);
+}
+
+/* Write a segment to a file. */
+static void
+bc_seg_write (seg, file)
+ struct bc_seg *seg;
+ FILE *file;
+{
+ struct bc_segsym *segsym, *nsegsym, *psegsym;
+ struct bc_segreloc *segreloc, *nsegreloc, *psegreloc;
+ int i, offset, flag;
+
+ /* Reverse the list of symbols. */
+ for (psegsym = 0, segsym = seg->syms; segsym; segsym = nsegsym)
+ {
+ nsegsym = segsym->next;
+ segsym->next = psegsym;
+ psegsym = segsym;
+ }
+ seg->syms = psegsym;
+
+ /* Reverse the list of relocs. */
+ for (psegreloc = 0, segreloc = seg->relocs; segreloc; segreloc = nsegreloc)
+ {
+ nsegreloc = segreloc->next;
+ segreloc->next = psegreloc;
+ psegreloc = segreloc;
+ }
+ seg->relocs = psegreloc;
+
+ /* Output each byte of the segment. */
+ for (i = 0, segsym = seg->syms, segreloc = seg->relocs; i < seg->size; ++i)
+ {
+ while (segsym && segsym->sym->val == i)
+ {
+ if (i % 8 != 0)
+ putc ('\n', file);
+
+ BC_WRITE_SEGSYM (segsym, file);
+ segsym = segsym->next;
+ flag = 1;
+ }
+ if (segreloc && segreloc->offset == i)
+ {
+ if (i % 8 != 0)
+ putc ('\n', file);
+
+ bcopy (seg->data + i, (char *) &offset, sizeof (int));
+ i += sizeof (int) - 1;
+
+ BC_WRITE_RELOC_ENTRY (segreloc, file, offset);
+ segreloc = segreloc->next;
+ flag = 1;
+ }
+ else
+ {
+ if (i % 8 == 0 || flag)
+ BC_START_BYTECODE_LINE (file);
+
+ BC_WRITE_BYTECODE (i % 8 == 0 || flag ? ' ' : ',',
+ seg->data[i] & 0xFF,
+ file);
+ flag = 0;
+ if (i % 8 == 7)
+ putc ('\n', file);
+ }
+ }
+
+ /* Paranoia check--we should have visited all syms and relocs during
+ the output pass. */
+
+ if (segsym || segreloc)
+ abort ();
+}
+
+
+
+/* Text and data segments of the object file in making. */
+static struct bc_seg *bc_text_seg;
+static struct bc_seg *bc_data_seg;
+
+/* Called before anything else in this module. */
+void
+bc_initialize ()
+{
+ int min_class_size[(int) MAX_MODE_CLASS];
+ enum machine_mode mode;
+ int i;
+
+ bc_init_mode_to_code_map ();
+
+ bc_text_seg = seg_create ();
+ bc_data_seg = seg_create ();
+
+ dconst0 = REAL_VALUE_ATOF ("0", DFmode);
+ dconst1 = REAL_VALUE_ATOF ("1", DFmode);
+ dconst2 = REAL_VALUE_ATOF ("2", DFmode);
+ dconstm1 = REAL_VALUE_ATOF ("-1", DFmode);
+
+ /* Find the narrowest mode for each class and compute the word and byte
+ modes. */
+
+ for (i = 0; i < (int) MAX_MODE_CLASS; i++)
+ min_class_size[i] = 1000;
+
+ for (mode = VOIDmode; (int) mode < (int) MAX_MACHINE_MODE;
+ mode = (enum machine_mode) ((int) mode + 1))
+ {
+ if (GET_MODE_SIZE (mode) < min_class_size[(int) GET_MODE_CLASS (mode)])
+ {
+ class_narrowest_mode[(int) GET_MODE_CLASS (mode)] = mode;
+ min_class_size[(int) GET_MODE_CLASS (mode)] = GET_MODE_SIZE (mode);
+ }
+ if (GET_MODE_CLASS (mode) == MODE_INT
+ && GET_MODE_BITSIZE (mode) == BITS_PER_UNIT)
+ byte_mode = mode;
+
+ if (GET_MODE_CLASS (mode) == MODE_INT
+ && GET_MODE_BITSIZE (mode) == BITS_PER_WORD)
+ word_mode = mode;
+ }
+}
+
+
+/* External addresses referenced in a function. Rather than trying to
+ work relocatable address directly into bytecoded functions (which would
+ require us to provide hairy location info and possibly obey alignment
+ rules imposed by the architecture) we build an auxilary table of
+ pointer constants, and encode just offsets into this table into the
+ actual bytecode. */
+static struct bc_seg *ptrconsts;
+
+/* Trampoline code for the function entry. */
+struct bc_seg *trampoline;
+
+/* Actual byte code of the function. */
+struct bc_seg *bytecode;
+
+/* List of labels defined in the function. */
+struct bc_label *labels;
+
+/* List of label references in the function. */
+struct bc_labelref *labelrefs;
+
+
+/* Add symbol to pointer table. Return offset into table where
+ pointer was stored. The offset usually goes into the bytecode
+ stream as a constP literal. */
+int
+bc_define_pointer (p)
+ char *p;
+{
+ int offset = ptrconsts->size;
+
+ seg_refsym (ptrconsts, p, 0);
+ return offset;
+}
+
+
+/* Begin a bytecoded function. */
+int
+bc_begin_function (name)
+ char *name;
+{
+ ptrconsts = seg_create ();
+ trampoline = seg_create ();
+ bytecode = seg_create ();
+ return seg_defsym (trampoline, name);
+}
+
+
+/* Force alignment in inline bytecode. */
+void
+bc_align_bytecode (align)
+ int align;
+{
+ seg_align (bytecode, align);
+}
+
+
+/* Emit data inline into bytecode. */
+void
+bc_emit_bytecode_const (data, size)
+ char *data;
+ unsigned int size;
+{
+ if (bytecode)
+ seg_data (bytecode, data, size);
+}
+
+
+/* Create a new "bytecode label", to have its value defined later.
+ Bytecode labels have nothing to do with the object file symbol table,
+ and are purely local to a given bytecoded function. */
+struct bc_label *
+bc_get_bytecode_label ()
+{
+ struct bc_label *result;
+
+ result = (struct bc_label *) xmalloc (sizeof (struct bc_label));
+ result->defined = 0;
+ result->next = labels;
+ result->uid = 0;
+ labels = result;
+ return result;
+}
+
+
+/* Define the given label with the current location counter. */
+int
+bc_emit_bytecode_labeldef (label)
+ struct bc_label *label;
+{
+ extern int bc_new_uid ();
+
+ if (!label || label->defined)
+ return 0;
+
+ label->offset = bytecode->size;
+ label->defined = 1;
+ label->uid = bc_new_uid ();
+
+#ifdef DEBUG_PRINT_CODE
+ fprintf (stderr, "$%lx:\n", label);
+#endif
+
+ return 1;
+}
+
+
+/* Generate a location-relative reference to the given bytecode label.
+ It need not be defined yet; label references will be backpatched later. */
+void
+bc_emit_bytecode_labelref (label)
+ struct bc_label *label;
+{
+ struct bc_labelref *labelref;
+ static int zero;
+
+ labelref = (struct bc_labelref *) xmalloc (sizeof (struct bc_labelref));
+ labelref->label = label;
+ labelref->offset = bytecode->size;
+ labelref->next = labelrefs;
+ labelrefs = labelref;
+
+#ifdef DEBUG_PRINT_CODE
+ fprintf (stderr, " $%lx", label);
+#endif
+
+ seg_data (bytecode, (char *) &zero, sizeof zero);
+}
+
+
+/* Emit a reference to an external address; generate the reference in the
+ ptrconst area, and emit an offset in the bytecode. */
+void
+bc_emit_code_labelref (name, offset)
+ char *name;
+ int offset;
+{
+ int ptroff;
+
+ ptroff = ptrconsts->size / sizeof (char *);
+ seg_data (bytecode, (char *) &ptroff, sizeof ptroff);
+ seg_refsym (ptrconsts, name, offset);
+
+#ifdef DEBUG_PRINT_CODE
+ fprintf (stderr, " [external <%x> %s]", ptroff, name);
+#endif
+}
+
+
+/* Backpatch label references in the byte code, and concatenate the bytecode
+ and pointer constant segments to the cumulative text for the object file.
+ Return a label name for the pointer constants region. */
+char *
+bc_end_function ()
+{
+ int addr;
+ struct bc_label *label, *next;
+ struct bc_labelref *ref, *nextref;
+ char ptrconsts_label[20];
+ static int nlab;
+
+ /* Backpatch bytecode label references. */
+ for (ref = labelrefs; ref; ref = ref->next)
+ if (ref->label->defined)
+ {
+ addr = ref->label->offset;
+ bcopy ((char *) &addr, bytecode->data + ref->offset, sizeof addr);
+ }
+
+ /* Free the chains of labelrefs and labeldefs. */
+ for (ref = labelrefs; ref; ref = nextref)
+ {
+ nextref = ref->next;
+ free ((char *) ref);
+ }
+
+ for (label = labels; label; label = next)
+ {
+ next = label->next;
+ free ((char *) label);
+ }
+
+ seg_concat (trampoline, bytecode);
+ seg_align (trampoline, MACHINE_SEG_ALIGN);
+ sprintf (ptrconsts_label, "*LP%d", nlab++);
+ seg_defsym (trampoline, ptrconsts_label);
+ seg_concat (trampoline, ptrconsts);
+ seg_concat (bc_text_seg, trampoline);
+
+ labels = 0;
+ labelrefs = 0;
+ trampoline = 0;
+ bytecode = 0;
+ ptrconsts = 0;
+
+ return sym_lookup (ptrconsts_label)->name;
+}
+
+/* Force alignment in const data. */
+void
+bc_align_const (align)
+ int align;
+{
+ seg_align (bc_text_seg, align);
+}
+
+/* Emit const data. */
+void
+bc_emit_const (data, size)
+ char *data;
+ unsigned int size;
+{
+ seg_data (bc_text_seg, data, size);
+}
+
+/* Emit a zero-filled constant skip. */
+void
+bc_emit_const_skip (size)
+ unsigned int size;
+{
+ seg_skip (bc_text_seg, size);
+}
+
+/* Emit a label definition in const data. */
+int
+bc_emit_const_labeldef (name)
+ char *name;
+{
+ return seg_defsym (bc_text_seg, name);
+}
+
+/* Emit a label reference in const data. */
+void
+bc_emit_const_labelref (name, offset)
+ char *name;
+ int offset;
+{
+ seg_refsym (bc_text_seg, name, offset);
+}
+
+/* Force alignment in data. */
+void
+bc_align_data (align)
+ int align;
+{
+ seg_align (bc_data_seg, align);
+}
+
+/* Emit data. */
+void
+bc_emit_data (data, size)
+ char *data;
+ unsigned int size;
+{
+ seg_data (bc_data_seg, data, size);
+}
+
+/* Emit a zero-filled data skip. */
+void
+bc_emit_data_skip (size)
+ unsigned int size;
+{
+ seg_skip (bc_data_seg, size);
+}
+
+/* Emit label definition in data. */
+int
+bc_emit_data_labeldef (name)
+ char *name;
+{
+ return seg_defsym (bc_data_seg, name);
+}
+
+/* Emit label reference in data. */
+void
+bc_emit_data_labelref (name, offset)
+ char *name;
+ int offset;
+{
+ seg_refsym (bc_data_seg, name, offset);
+}
+
+/* Emit a common block of the given name and size. Note that
+ when the .o file is actually written non-global "common"
+ blocks will have to be turned into space in the data section. */
+int
+bc_emit_common (name, size)
+ char *name;
+ unsigned int size;
+{
+ struct bc_sym *sym;
+
+ sym = sym_lookup (name);
+ if (sym->defined)
+ return 0;
+
+ sym->defined = 1;
+ sym->common = 1;
+ sym->val = size;
+ return 1;
+}
+
+/* Globalize the given label. */
+void
+bc_globalize_label (name)
+ char *name;
+{
+ struct bc_sym *sym;
+
+ sym = sym_lookup (name);
+ sym->global = 1;
+}
+
+static enum { in_text, in_data } section = in_text;
+
+void
+bc_text ()
+{
+ section = in_text;
+}
+
+void
+bc_data ()
+{
+ section = in_data;
+}
+
+void
+bc_align (align)
+ int align;
+{
+ if (section == in_text)
+ bc_align_const (align);
+ else
+ bc_align_data (align);
+}
+
+void
+bc_emit (data, size)
+ char *data;
+ unsigned int size;
+{
+ if (section == in_text)
+ bc_emit_const (data, size);
+ else
+ bc_emit_data (data, size);
+}
+
+void
+bc_emit_skip (size)
+ unsigned int size;
+{
+ if (section == in_text)
+ bc_emit_const_skip (size);
+ else
+ bc_emit_data_skip (size);
+}
+
+int
+bc_emit_labeldef (name)
+ char *name;
+{
+ if (section == in_text)
+ return bc_emit_const_labeldef (name);
+ else
+ return bc_emit_data_labeldef (name);
+}
+
+void
+bc_emit_labelref (name, offset)
+ char *name;
+ int offset;
+{
+ if (section == in_text)
+ bc_emit_const_labelref (name, offset);
+ else
+ bc_emit_data_labelref (name, offset);
+}
+
+void
+bc_write_file (file)
+ FILE *file;
+{
+ BC_WRITE_FILE (file);
+}
+
+
+/* Allocate a new bytecode rtx.
+ If you supply a null BC_LABEL, we generate one. */
+
+rtx
+bc_gen_rtx (label, offset, bc_label)
+ char *label;
+ int offset;
+ struct bc_label *bc_label;
+{
+ rtx r;
+
+ if (bc_label == 0)
+ bc_label = (struct bc_label *) xmalloc (sizeof (struct bc_label));
+
+ r = gen_rtx (CODE_LABEL, VOIDmode, label, bc_label);
+ bc_label->offset = offset;
+
+ return r;
+}
+
+
+/* Print bytecode rtx */
+void
+bc_print_rtl (fp, r)
+ FILE *fp;
+ rtx r;
+{
+#if 0 /* This needs to get fixed to really work again. */
+ /* BC_WRITE_RTL has a definition
+ that doesn't even make sense for this use. */
+ BC_WRITE_RTL (r, fp);
+#endif
+}
+
+
+/* Emit a bytecode, keeping a running tally of the stack depth. */
+void
+bc_emit_bytecode (bytecode)
+ enum bytecode_opcode bytecode;
+{
+ char byte;
+ static int prev_lineno = -1;
+
+ byte = bytecode;
+
+#ifdef BCDEBUG_PRINT_CODE
+ if (lineno != prev_lineno)
+ {
+ fprintf (stderr, "<line %d>\n", lineno);
+ prev_lineno = lineno;
+ }
+
+ fputs (opcode_name[(unsigned int) bytecode], stderr);
+#endif
+
+ /* Due to errors we are often requested to output bytecodes that
+ will cause an interpreter stack undeflow when executed. Instead of
+ dumping core on such occasions, we omit the bytecode. Erroneous code
+ should not be executed, regardless. This makes life much easier, since
+ we don't have to deceive ourselves about the known stack depth. */
+
+ bc_emit_bytecode_const (&byte, 1);
+
+ if ((stack_depth -= arityvec[(int) bytecode].ninputs) >= 0)
+ {
+ if ((stack_depth += arityvec[(int) bytecode].noutputs) > max_stack_depth)
+ max_stack_depth = stack_depth;
+ }
+
+#ifdef VALIDATE_STACK_FOR_BC
+ VALIDATE_STACK_FOR_BC ();
+#endif
+}
+
+
+#ifdef BCDEBUG_PRINT_CODE
+#define PRLIT(TYPE, PTR) fprintf (stderr, " [%x]", *(TYPE *) PTR)
+#else
+#define PRLIT(X,Y)
+#endif
+
+/* Emit a complete bytecode instruction, expecting the correct number
+ of literal values in the call. First argument is the instruction, the
+ remaining arguments are literals of size HOST_WIDE_INT or smaller. */
+void
+bc_emit_instruction VPROTO((enum bytecode_opcode opcode, ...))
+{
+#ifndef __STDC__
+ enum bytecode_opcode opcode;
+#endif
+ va_list arguments;
+ int nliteral, instruction;
+
+ VA_START (arguments, opcode);
+
+#ifndef __STDC__
+ opcode = va_arg (arguments, enum bytecode_opcode);
+#endif
+
+ /* Emit instruction bytecode */
+ bc_emit_bytecode (opcode);
+ instruction = (int) opcode;
+
+ /* Loop literals and emit as bytecode constants */
+ for (nliteral = 0; nliteral < arityvec[instruction].nliterals; nliteral++)
+ {
+ switch (arityvec[instruction].literals[nliteral])
+ {
+/* This conditional is a kludge, but it's necessary
+ because TYPE might be long long. */
+#ifdef __GNUC__
+ /* Expand definitions into case statements */
+#define DEFTYPECODE(CODE, NAME, MODE, TYPE) \
+ case CODE: \
+ { \
+ TYPE temp = va_arg (arguments, TYPE); \
+ bc_emit_bytecode_const ((void *) &temp, sizeof temp); \
+ PRLIT (TYPE, &temp); } \
+ break;
+
+#include "bc-typecd.def"
+
+#undef DEFTYPECODE
+#endif /* __GNUC__ */
+
+ default:
+ abort ();
+ }
+ }
+
+#ifdef BCDEBUG_PRINT_CODE
+ fputc ('\n', stderr);
+#endif
+}
+
+/* Emit the machine-code interface trampoline at the beginning of a byte
+ coded function. The argument is a label name of the interpreter
+ bytecode callinfo structure; the return value is a label name for
+ the beginning of the actual bytecode. */
+char *
+bc_emit_trampoline (callinfo)
+ char *callinfo;
+{
+ char mylab[20];
+ static int n;
+
+ sprintf (mylab, "*LB%d", n++);
+
+ BC_EMIT_TRAMPOLINE (trampoline, callinfo);
+
+ seg_defsym (bytecode, mylab);
+ return sym_lookup (mylab)->name;
+}
+
+
+/* Simple strdup */
+char *
+bc_xstrdup (str)
+ char *str;
+{
+ char *tmp = xmalloc (strlen (str) + 1);
+
+ strcpy (tmp, str);
+ return tmp;
+}
diff --git a/gnu/usr.bin/cc/cc_int/bc-optab.c b/gnu/usr.bin/cc/cc_int/bc-optab.c
new file mode 100644
index 0000000..b8ac57d
--- /dev/null
+++ b/gnu/usr.bin/cc/cc_int/bc-optab.c
@@ -0,0 +1,788 @@
+/* Bytecode conversion definitions for GNU C-compiler.
+ Copyright (C) 1993, 1994 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+#include "config.h"
+#include "tree.h"
+#include "rtl.h"
+#include "machmode.h"
+#include "obstack.h"
+#include "bytecode.h"
+#include "bc-typecd.h"
+#include "bc-opcode.h"
+#include "bc-optab.h"
+
+#define obstack_chunk_alloc xmalloc
+#define obstack_chunk_free free
+
+extern char *xmalloc ();
+extern void free ();
+
+/* Table relating interpreter typecodes to machine modes. */
+#define GET_TYPECODE_MODE(CODE) (typecode_mode[((int) CODE)])
+enum machine_mode typecode_mode[] = {
+#define DEFTYPECODE(CODE, NAME, MODE, TYPE) MODE,
+#include "bc-typecd.def"
+#undef DEFTYPECODE
+};
+
+/* Machine mode to type code map */
+static enum typecode signed_mode_to_code_map[MAX_MACHINE_MODE+1];
+static enum typecode unsigned_mode_to_code_map[MAX_MACHINE_MODE+1];
+
+#define GET_TYPECODE_SIZE(CODE) GET_MODE_SIZE (GET_TYPECODE_MODE (CODE))
+
+#define BIG_ARBITRARY_NUMBER 100000
+
+/* Table of recipes for conversions among scalar types, to be filled
+ in as needed at run time. */
+static struct conversion_recipe
+{
+ unsigned char *opcodes; /* Bytecodes to emit in order. */
+ int nopcodes; /* Count of bytecodes. */
+ int cost; /* A rather arbitrary cost function. */
+} conversion_recipe[NUM_TYPECODES][NUM_TYPECODES];
+
+/* Binary operator tables. */
+struct binary_operator optab_plus_expr[] = {
+ { addSI, SIcode, SIcode, SIcode },
+ { addDI, DIcode, DIcode, DIcode },
+ { addSF, SFcode, SFcode, SFcode },
+ { addDF, DFcode, DFcode, DFcode },
+ { addXF, XFcode, XFcode, XFcode },
+ { addPSI, Pcode, Pcode, SIcode },
+ { -1, -1, -1, -1 },
+};
+
+struct binary_operator optab_minus_expr[] = {
+ { subSI, SIcode, SIcode, SIcode },
+ { subDI, DIcode, DIcode, DIcode },
+ { subSF, SFcode, SFcode, SFcode },
+ { subDF, DFcode, DFcode, DFcode },
+ { subXF, XFcode, XFcode, XFcode },
+ { subPP, SIcode, Pcode, Pcode },
+ { -1, -1, -1, -1 },
+};
+
+/* The ordering of the tables for multiplicative operators
+ is such that unsigned operations will be preferred to signed
+ operations when one argument is unsigned. */
+
+struct binary_operator optab_mult_expr[] = {
+ { mulSU, SUcode, SUcode, SUcode },
+ { mulDU, DUcode, DUcode, DUcode },
+ { mulSI, SIcode, SIcode, SIcode },
+ { mulDI, DIcode, DIcode, DIcode },
+ { mulSF, SFcode, SFcode, SFcode },
+ { mulDF, DFcode, DFcode, DFcode },
+ { mulXF, XFcode, XFcode, XFcode },
+ { -1, -1, -1, -1 },
+};
+
+struct binary_operator optab_trunc_div_expr[] = {
+ { divSU, SUcode, SUcode, SUcode },
+ { divDU, DUcode, DUcode, DUcode },
+ { divSI, SIcode, SIcode, SIcode },
+ { divDI, DIcode, DIcode, DIcode },
+ { -1, -1, -1, -1 },
+};
+
+struct binary_operator optab_trunc_mod_expr[] = {
+ { modSU, SUcode, SUcode, SUcode },
+ { modDU, DUcode, DUcode, DUcode },
+ { modSI, SIcode, SIcode, SIcode },
+ { modDI, DIcode, DIcode, DIcode },
+ { -1, -1, -1, -1 },
+};
+
+struct binary_operator optab_rdiv_expr[] = {
+ { divSF, SFcode, SFcode, SFcode },
+ { divDF, DFcode, DFcode, DFcode },
+ { divXF, XFcode, XFcode, XFcode },
+ { -1, -1, -1, -1 },
+};
+
+struct binary_operator optab_bit_and_expr[] = {
+ { andSI, SIcode, SIcode, SIcode },
+ { andDI, DIcode, DIcode, DIcode },
+ { -1, -1, -1, -1 },
+};
+
+struct binary_operator optab_bit_ior_expr[] = {
+ { iorSI, SIcode, SIcode, SIcode },
+ { iorDI, DIcode, DIcode, DIcode },
+ { -1, -1, -1, -1 },
+};
+
+struct binary_operator optab_bit_xor_expr[] = {
+ { xorSI, SIcode, SIcode, SIcode },
+ { xorDI, DIcode, DIcode, DIcode },
+ { -1, -1, -1, -1 },
+};
+
+struct binary_operator optab_lshift_expr[] = {
+ { lshiftSI, SIcode, SIcode, SIcode },
+ { lshiftSU, SUcode, SUcode, SIcode },
+ { lshiftDI, DIcode, DIcode, SIcode },
+ { lshiftDU, DUcode, DUcode, SIcode },
+ { -1, -1, -1, -1 },
+};
+
+struct binary_operator optab_rshift_expr[] = {
+ { rshiftSI, SIcode, SIcode, SIcode },
+ { rshiftSU, SUcode, SUcode, SIcode },
+ { rshiftDI, DIcode, DIcode, SIcode },
+ { rshiftDU, DUcode, DUcode, SIcode },
+ { -1, -1, -1, -1 },
+};
+
+struct binary_operator optab_truth_and_expr[] = {
+ { andSI, SIcode, Tcode, Tcode },
+ { -1, -1, -1, -1 },
+};
+
+struct binary_operator optab_truth_or_expr[] = {
+ { iorSI, SIcode, Tcode, Tcode },
+ { -1, -1, -1, -1 },
+};
+
+struct binary_operator optab_lt_expr[] = {
+ { ltSI, Tcode, SIcode, SIcode },
+ { ltSU, Tcode, SUcode, SUcode },
+ { ltDI, Tcode, DIcode, DIcode },
+ { ltDU, Tcode, DUcode, DUcode },
+ { ltSF, Tcode, SFcode, SFcode },
+ { ltDF, Tcode, DFcode, DFcode },
+ { ltXF, Tcode, XFcode, XFcode },
+ { ltP, Tcode, Pcode, Pcode },
+ { -1, -1, -1, -1 },
+};
+
+struct binary_operator optab_le_expr[] = {
+ { leSI, Tcode, SIcode, SIcode },
+ { leSU, Tcode, SUcode, SUcode },
+ { leDI, Tcode, DIcode, DIcode },
+ { leDU, Tcode, DUcode, DUcode },
+ { leSF, Tcode, SFcode, SFcode },
+ { leDF, Tcode, DFcode, DFcode },
+ { leXF, Tcode, XFcode, XFcode },
+ { leP, Tcode, Pcode, Pcode },
+ { -1, -1, -1, -1 },
+};
+
+struct binary_operator optab_ge_expr[] = {
+ { geSI, Tcode, SIcode, SIcode },
+ { geSU, Tcode, SUcode, SUcode },
+ { geDI, Tcode, DIcode, DIcode },
+ { geDU, Tcode, DUcode, DUcode },
+ { geSF, Tcode, SFcode, SFcode },
+ { geDF, Tcode, DFcode, DFcode },
+ { geXF, Tcode, XFcode, XFcode },
+ { geP, Tcode, Pcode, Pcode },
+ { -1, -1, -1, -1 },
+};
+
+struct binary_operator optab_gt_expr[] = {
+ { gtSI, Tcode, SIcode, SIcode },
+ { gtSU, Tcode, SUcode, SUcode },
+ { gtDI, Tcode, DIcode, DIcode },
+ { gtDU, Tcode, DUcode, DUcode },
+ { gtSF, Tcode, SFcode, SFcode },
+ { gtDF, Tcode, DFcode, DFcode },
+ { gtXF, Tcode, XFcode, XFcode },
+ { gtP, Tcode, Pcode, Pcode },
+ { -1, -1, -1, -1 },
+};
+
+struct binary_operator optab_eq_expr[] = {
+ { eqSI, Tcode, SIcode, SIcode },
+ { eqDI, Tcode, DIcode, DIcode },
+ { eqSF, Tcode, SFcode, SFcode },
+ { eqDF, Tcode, DFcode, DFcode },
+ { eqXF, Tcode, XFcode, XFcode },
+ { eqP, Tcode, Pcode, Pcode },
+ { -1, -1, -1, -1 },
+};
+
+struct binary_operator optab_ne_expr[] = {
+ { neSI, Tcode, SIcode, SIcode },
+ { neDI, Tcode, DIcode, DIcode },
+ { neSF, Tcode, SFcode, SFcode },
+ { neDF, Tcode, DFcode, DFcode },
+ { neXF, Tcode, XFcode, XFcode },
+ { neP, Tcode, Pcode, Pcode },
+ { -1, -1, -1, -1 },
+};
+
+/* Unary operator tables. */
+struct unary_operator optab_negate_expr[] = {
+ { negSI, SIcode, SIcode },
+ { negDI, DIcode, DIcode },
+ { negSF, SFcode, SFcode },
+ { negDF, DFcode, DFcode },
+ { negXF, XFcode, XFcode },
+ { -1, -1, -1 },
+};
+
+struct unary_operator optab_bit_not_expr[] = {
+ { notSI, SIcode, SIcode },
+ { notDI, DIcode, DIcode },
+ { -1, -1, -1 },
+};
+
+struct unary_operator optab_truth_not_expr[] = {
+ { notT, SIcode, SIcode },
+ { -1, -1, -1 },
+};
+
+/* Increment operator tables. */
+struct increment_operator optab_predecrement_expr[] = {
+ { predecQI, QIcode },
+ { predecQI, QUcode },
+ { predecHI, HIcode },
+ { predecHI, HUcode },
+ { predecSI, SIcode },
+ { predecSI, SUcode },
+ { predecDI, DIcode },
+ { predecDI, DUcode },
+ { predecP, Pcode },
+ { predecSF, SFcode },
+ { predecDF, DFcode },
+ { predecXF, XFcode },
+ { -1, -1 },
+};
+
+struct increment_operator optab_preincrement_expr[] = {
+ { preincQI, QIcode },
+ { preincQI, QUcode },
+ { preincHI, HIcode },
+ { preincHI, HUcode },
+ { preincSI, SIcode },
+ { preincSI, SUcode },
+ { preincDI, DIcode },
+ { preincDI, DUcode },
+ { preincP, Pcode },
+ { preincSF, SFcode },
+ { preincDF, DFcode },
+ { preincXF, XFcode },
+ { -1, -1 },
+};
+
+struct increment_operator optab_postdecrement_expr[] = {
+ { postdecQI, QIcode },
+ { postdecQI, QUcode },
+ { postdecHI, HIcode },
+ { postdecHI, HUcode },
+ { postdecSI, SIcode },
+ { postdecSI, SUcode },
+ { postdecDI, DIcode },
+ { postdecDI, DUcode },
+ { postdecP, Pcode },
+ { postdecSF, SFcode },
+ { postdecDF, DFcode },
+ { postdecXF, XFcode },
+ { -1, -1 },
+};
+
+struct increment_operator optab_postincrement_expr[] = {
+ { postincQI, QIcode },
+ { postincQI, QUcode },
+ { postincHI, HIcode },
+ { postincHI, HUcode },
+ { postincSI, SIcode },
+ { postincSI, SUcode },
+ { postincDI, DIcode },
+ { postincDI, DUcode },
+ { postincP, Pcode },
+ { postincSF, SFcode },
+ { postincDF, DFcode },
+ { postincXF, XFcode },
+ { -1, -1 },
+};
+
+/* Table of conversions supported by the interpreter. */
+static struct conversion_info
+{
+ enum bytecode_opcode opcode; /* here indicates the conversion needs no opcode. */
+ enum typecode from;
+ enum typecode to;
+ int cost; /* 1 for no-op conversions, 2 for widening conversions,
+ 4 for int/float conversions, 8 for narrowing conversions. */
+} conversion_info[] = {
+ { -1, QIcode, QUcode, 1 },
+ { -1, HIcode, HUcode, 1 },
+ { -1, SIcode, SUcode, 1 },
+ { -1, DIcode, DUcode, 1 },
+ { -1, QUcode, QIcode, 1 },
+ { -1, HUcode, HIcode, 1 },
+ { -1, SUcode, SIcode, 1 },
+ { -1, DUcode, DIcode, 1 },
+ { -1, Tcode, SIcode, 1 },
+ { convertQIHI, QIcode, HIcode, 2 },
+ { convertQUHU, QUcode, HUcode, 2 },
+ { convertQUSU, QUcode, SUcode, 2 },
+ { convertHISI, HIcode, SIcode, 2 },
+ { convertHUSU, HUcode, SUcode, 2 },
+ { convertSIDI, SIcode, DIcode, 2 },
+ { convertSUDU, SUcode, DUcode, 2 },
+ { convertSFDF, SFcode, DFcode, 2 },
+ { convertDFXF, DFcode, XFcode, 2 },
+ { convertHIQI, HIcode, QIcode, 8 },
+ { convertSIQI, SIcode, QIcode, 8 },
+ { convertSIHI, SIcode, HIcode, 8 },
+ { convertSUQU, SUcode, QUcode, 8 },
+ { convertDISI, DIcode, SIcode, 8 },
+ { convertDFSF, DFcode, SFcode, 8 },
+ { convertXFDF, XFcode, DFcode, 8 },
+ { convertPSI, Pcode, SIcode, 2 },
+ { convertSIP, SIcode, Pcode, 2 },
+ { convertSIT, SIcode, Tcode, 2 },
+ { convertDIT, DIcode, Tcode, 2 },
+ { convertSFT, SFcode, Tcode, 2 },
+ { convertDFT, DFcode, Tcode, 2 },
+ { convertXFT, XFcode, Tcode, 2 },
+ { convertQISI, QIcode, SIcode, 2 },
+ { convertPT, Pcode, Tcode, 2 },
+ { convertSISF, SIcode, SFcode, 4 },
+ { convertSIDF, SIcode, DFcode, 4 },
+ { convertSIXF, SIcode, XFcode, 4 },
+ { convertSUSF, SUcode, SFcode, 4 },
+ { convertSUDF, SUcode, DFcode, 4 },
+ { convertSUXF, SUcode, XFcode, 4 },
+ { convertDISF, DIcode, SFcode, 4 },
+ { convertDIDF, DIcode, DFcode, 4 },
+ { convertDIXF, DIcode, XFcode, 4 },
+ { convertDUSF, DUcode, SFcode, 4 },
+ { convertDUDF, DUcode, DFcode, 4 },
+ { convertDUXF, DUcode, XFcode, 4 },
+ { convertSFSI, SFcode, SIcode, 4 },
+ { convertDFSI, DFcode, SIcode, 4 },
+ { convertXFSI, XFcode, SIcode, 4 },
+ { convertSFSU, SFcode, SUcode, 4 },
+ { convertDFSU, DFcode, SUcode, 4 },
+ { convertXFSU, XFcode, SUcode, 4 },
+ { convertSFDI, SFcode, DIcode, 4 },
+ { convertDFDI, DFcode, DIcode, 4 },
+ { convertXFDI, XFcode, DIcode, 4 },
+ { convertSFDU, SFcode, DUcode, 4 },
+ { convertDFDU, DFcode, DUcode, 4 },
+ { convertXFDU, XFcode, DUcode, 4 },
+ { convertSIQI, SIcode, QIcode, 8 },
+};
+
+#define NUM_CONVERSIONS (sizeof conversion_info / sizeof (struct conversion_info))
+
+/* List form of a conversion recipe. */
+struct conversion_list
+{
+ enum bytecode_opcode opcode;
+ enum typecode to;
+ int cost;
+ struct conversion_list *prev;
+};
+
+/* Determine if it is "reasonable" to add a given conversion to
+ a given list of conversions. The following criteria define
+ "reasonable" conversion lists:
+ * No typecode appears more than once in the sequence (no loops).
+ * At most one conversion from integer to float or vice versa is present.
+ * Either sign extensions or zero extensions may be present, but not both.
+ * No widening conversions occur after a signed/unsigned conversion.
+ * The sequence of sizes must be strict nonincreasing or nondecreasing. */
+static int
+conversion_reasonable_p (conversion, list)
+ struct conversion_info *conversion;
+ struct conversion_list *list;
+{
+ struct conversion_list *curr;
+ int curr_size, prev_size;
+ int has_int_float, has_float_int;
+ int has_sign_extend, has_zero_extend;
+ int has_signed_unsigned, has_unsigned_signed;
+
+ has_int_float = 0;
+ has_float_int = 0;
+ has_sign_extend = 0;
+ has_zero_extend = 0;
+ has_signed_unsigned = 0;
+ has_unsigned_signed = 0;
+
+ /* Make sure the destination typecode doesn't already appear in
+ the list. */
+ for (curr = list; curr; curr = curr->prev)
+ if (conversion->to == curr->to)
+ return 0;
+
+ /* Check for certain kinds of conversions. */
+ if (TYPECODE_INTEGER_P (conversion->from)
+ && TYPECODE_FLOAT_P (conversion->to))
+ has_int_float = 1;
+ if (TYPECODE_FLOAT_P (conversion->from)
+ && TYPECODE_INTEGER_P (conversion->to))
+ has_float_int = 1;
+ if (TYPECODE_SIGNED_P (conversion->from)
+ && TYPECODE_SIGNED_P (conversion->to)
+ && GET_TYPECODE_SIZE (conversion->from)
+ < GET_TYPECODE_SIZE (conversion->to))
+ has_sign_extend = 1;
+ if (TYPECODE_UNSIGNED_P (conversion->from)
+ && TYPECODE_UNSIGNED_P (conversion->to)
+ && GET_TYPECODE_SIZE (conversion->from)
+ < GET_TYPECODE_SIZE (conversion->to))
+ has_zero_extend = 1;
+
+ for (curr = list; curr && curr->prev; curr = curr->prev)
+ {
+ if (TYPECODE_INTEGER_P (curr->prev->to)
+ && TYPECODE_FLOAT_P (curr->to))
+ has_int_float = 1;
+ if (TYPECODE_FLOAT_P (curr->prev->to)
+ && TYPECODE_INTEGER_P (curr->to))
+ has_float_int = 1;
+ if (TYPECODE_SIGNED_P (curr->prev->to)
+ && TYPECODE_SIGNED_P (curr->to)
+ && GET_TYPECODE_SIZE (curr->prev->to)
+ < GET_TYPECODE_SIZE (curr->to))
+ has_sign_extend = 1;
+ if (TYPECODE_UNSIGNED_P (curr->prev->to)
+ && TYPECODE_UNSIGNED_P (curr->to)
+ && GET_TYPECODE_SIZE (curr->prev->to)
+ < GET_TYPECODE_SIZE (curr->to))
+ has_zero_extend = 1;
+ if (TYPECODE_SIGNED_P (curr->prev->to)
+ && TYPECODE_UNSIGNED_P (curr->to))
+ has_signed_unsigned = 1;
+ if (TYPECODE_UNSIGNED_P (curr->prev->to)
+ && TYPECODE_SIGNED_P (curr->to))
+ has_unsigned_signed = 1;
+ }
+
+ if (TYPECODE_INTEGER_P (conversion->from)
+ && TYPECODE_INTEGER_P (conversion->to)
+ && GET_TYPECODE_SIZE (conversion->to)
+ > GET_TYPECODE_SIZE (conversion->from)
+ && (has_signed_unsigned || has_unsigned_signed))
+ return 0;
+
+ if (has_float_int && has_int_float || has_sign_extend && has_zero_extend)
+ return 0;
+
+ /* Make sure the sequence of destination typecode sizes is
+ strictly nondecreasing or strictly nonincreasing. */
+ prev_size = GET_TYPECODE_SIZE (conversion->to);
+ for (curr = list; curr; curr = curr->prev)
+ {
+ curr_size = GET_TYPECODE_SIZE (curr->to);
+ if (curr_size != prev_size)
+ break;
+ }
+ if (!curr)
+ return 1;
+
+ if (curr_size < prev_size)
+ for (prev_size = curr_size; curr; curr = curr->prev)
+ {
+ curr_size = GET_TYPECODE_SIZE (curr->to);
+ if (curr_size > prev_size)
+ return 0;
+ prev_size = curr_size;
+ }
+ else
+ for (prev_size = curr_size; curr; curr = curr->prev)
+ {
+ curr_size = GET_TYPECODE_SIZE (curr->to);
+ if (curr_size < prev_size)
+ return 0;
+ prev_size = curr_size;
+ }
+ return 1;
+}
+
+
+/* Exhaustively search all reasonable conversions to find one to
+ convert the given types. */
+static struct conversion_recipe
+deduce_conversion (from, to)
+ enum typecode from, to;
+{
+ struct rl
+ {
+ struct conversion_list *list;
+ struct rl *next;
+ } *prev, curr, *good, *temp;
+ struct conversion_list *conv, *best;
+ int i, cost, bestcost;
+ struct conversion_recipe result;
+ struct obstack recipe_obstack;
+
+
+ obstack_init (&recipe_obstack);
+ curr.next = (struct rl *) obstack_alloc (&recipe_obstack, sizeof (struct rl));
+ curr.next->list =
+ (struct conversion_list *) obstack_alloc (&recipe_obstack,
+ sizeof (struct conversion_list));
+ curr.next->list->opcode = -1;
+ curr.next->list->to = from;
+ curr.next->list->cost = 0;
+ curr.next->list->prev = 0;
+ curr.next->next = 0;
+ good = 0;
+
+ while (curr.next)
+ {
+ /* Remove successful conversions from further consideration. */
+ for (prev = &curr; prev; prev = prev->next)
+ if (prev->next && prev->next->list->to == to)
+ {
+ temp = prev->next->next;
+ prev->next->next = good;
+ good = prev->next;
+ prev->next = temp;
+ }
+
+ /* Go through each of the pending conversion chains, trying
+ all possible candidate conversions on them. */
+ for (prev = curr.next, curr.next = 0; prev; prev = prev->next)
+ for (i = 0; i < NUM_CONVERSIONS; ++i)
+ if (conversion_info[i].from == prev->list->to
+ && conversion_reasonable_p (&conversion_info[i], prev->list))
+ {
+ temp = (struct rl *) obstack_alloc (&recipe_obstack,
+ sizeof (struct rl));
+ temp->list = (struct conversion_list *)
+ obstack_alloc (&recipe_obstack,
+ sizeof (struct conversion_list));
+ temp->list->opcode = conversion_info[i].opcode;
+ temp->list->to = conversion_info[i].to;
+ temp->list->cost = conversion_info[i].cost;
+ temp->list->prev = prev->list;
+ temp->next = curr.next;
+ curr.next = temp;
+ }
+ }
+
+ bestcost = BIG_ARBITRARY_NUMBER;
+ best = 0;
+ for (temp = good; temp; temp = temp->next)
+ {
+ for (conv = temp->list, cost = 0; conv; conv = conv->prev)
+ cost += conv->cost;
+ if (cost < bestcost)
+ {
+ bestcost = cost;
+ best = temp->list;
+ }
+ }
+
+ if (!best)
+ abort ();
+
+ for (i = 0, conv = best; conv; conv = conv->prev)
+ if (conv->opcode != -1)
+ ++i;
+
+ result.opcodes = (unsigned char *) xmalloc (i);
+ result.nopcodes = i;
+ for (conv = best; conv; conv = conv->prev)
+ if (conv->opcode != -1)
+ result.opcodes[--i] = conv->opcode;
+ result.cost = bestcost;
+ obstack_free (&recipe_obstack, 0);
+ return result;
+}
+
+#define DEDUCE_CONVERSION(FROM, TO) \
+ (conversion_recipe[(int) FROM][(int) TO].opcodes ? 0 \
+ : (conversion_recipe[(int) FROM][(int) TO] \
+ = deduce_conversion (FROM, TO), 0))
+
+
+/* Emit a conversion between the given scalar types. */
+void
+emit_typecode_conversion (from, to)
+ enum typecode from, to;
+{
+ int i;
+
+ DEDUCE_CONVERSION (from, to);
+ for (i = 0; i < conversion_recipe[(int) from][(int) to].nopcodes; ++i)
+ bc_emit_instruction (conversion_recipe[(int) from][(int) to].opcodes[i]);
+}
+
+
+/* Initialize mode_to_code_map[] */
+void
+bc_init_mode_to_code_map ()
+{
+ int mode;
+
+ for (mode = 0; mode < MAX_MACHINE_MODE + 1; mode++)
+ {
+ signed_mode_to_code_map[mode] =
+ unsigned_mode_to_code_map[mode] =
+ LAST_AND_UNUSED_TYPECODE;
+ }
+
+#define DEF_MODEMAP(SYM, CODE, UCODE, CONST, LOAD, STORE) \
+ { signed_mode_to_code_map[(int) SYM] = CODE; \
+ unsigned_mode_to_code_map[(int) SYM] = UCODE; }
+#include "modemap.def"
+#undef DEF_MODEMAP
+
+ /* Initialize opcode maps for const, load, and store */
+ bc_init_mode_to_opcode_maps ();
+}
+
+/* Given a machine mode return the preferred typecode. */
+enum typecode
+preferred_typecode (mode, unsignedp)
+ enum machine_mode mode;
+ int unsignedp;
+{
+ enum typecode code = (unsignedp
+ ? unsigned_mode_to_code_map
+ : signed_mode_to_code_map) [MIN ((int) mode,
+ (int) MAX_MACHINE_MODE)];
+
+ if (code == LAST_AND_UNUSED_TYPECODE)
+ abort ();
+
+ return code;
+}
+
+
+/* Expand a conversion between the given types. */
+void
+bc_expand_conversion (from, to)
+ tree from, to;
+{
+ enum typecode fcode, tcode;
+
+ fcode = preferred_typecode (TYPE_MODE (from), TREE_UNSIGNED (from));
+ tcode = preferred_typecode (TYPE_MODE (to), TREE_UNSIGNED (to));
+
+ emit_typecode_conversion (fcode, tcode);
+}
+
+/* Expand a conversion of the given type to a truth value. */
+void
+bc_expand_truth_conversion (from)
+ tree from;
+{
+ enum typecode fcode;
+
+ fcode = preferred_typecode (TYPE_MODE (from), TREE_UNSIGNED (from));
+ emit_typecode_conversion (fcode, Tcode);
+}
+
+/* Emit an appropriate binary operation. */
+void
+bc_expand_binary_operation (optab, resulttype, arg0, arg1)
+ struct binary_operator optab[];
+ tree resulttype, arg0, arg1;
+{
+ int i, besti, cost, bestcost;
+ enum typecode resultcode, arg0code, arg1code;
+
+ resultcode = preferred_typecode (TYPE_MODE (resulttype), TREE_UNSIGNED (resulttype));
+ arg0code = preferred_typecode (TYPE_MODE (TREE_TYPE (arg0)), TREE_UNSIGNED (resulttype));
+ arg1code = preferred_typecode (TYPE_MODE (TREE_TYPE (arg1)), TREE_UNSIGNED (resulttype));
+
+ besti = -1;
+ bestcost = BIG_ARBITRARY_NUMBER;
+
+ for (i = 0; optab[i].opcode != -1; ++i)
+ {
+ cost = 0;
+ DEDUCE_CONVERSION (arg0code, optab[i].arg0);
+ cost += conversion_recipe[(int) arg0code][(int) optab[i].arg0].cost;
+ DEDUCE_CONVERSION (arg1code, optab[i].arg1);
+ cost += conversion_recipe[(int) arg1code][(int) optab[i].arg1].cost;
+ if (cost < bestcost)
+ {
+ besti = i;
+ bestcost = cost;
+ }
+ }
+
+ if (besti == -1)
+ abort ();
+
+ expand_expr (arg1, 0, VOIDmode, 0);
+ emit_typecode_conversion (arg1code, optab[besti].arg1);
+ expand_expr (arg0, 0, VOIDmode, 0);
+ emit_typecode_conversion (arg0code, optab[besti].arg0);
+ bc_emit_instruction (optab[besti].opcode);
+ emit_typecode_conversion (optab[besti].result, resultcode);
+}
+
+/* Emit an appropriate unary operation. */
+void
+bc_expand_unary_operation (optab, resulttype, arg0)
+ struct unary_operator optab[];
+ tree resulttype, arg0;
+{
+ int i, besti, cost, bestcost;
+ enum typecode resultcode, arg0code;
+
+ resultcode = preferred_typecode (TYPE_MODE (resulttype), TREE_UNSIGNED (resulttype));
+ arg0code = preferred_typecode (TYPE_MODE (TREE_TYPE (arg0)), TREE_UNSIGNED (TREE_TYPE (arg0)));
+
+ besti = -1;
+ bestcost = BIG_ARBITRARY_NUMBER;
+
+ for (i = 0; optab[i].opcode != -1; ++i)
+ {
+ DEDUCE_CONVERSION (arg0code, optab[i].arg0);
+ cost = conversion_recipe[(int) arg0code][(int) optab[i].arg0].cost;
+ if (cost < bestcost)
+ {
+ besti = i;
+ bestcost = cost;
+ }
+ }
+
+ if (besti == -1)
+ abort ();
+
+ expand_expr (arg0, 0, VOIDmode, 0);
+ emit_typecode_conversion (arg0code, optab[besti].arg0);
+ bc_emit_instruction (optab[besti].opcode);
+ emit_typecode_conversion (optab[besti].result, resultcode);
+}
+
+
+/* Emit an appropriate increment. */
+void
+bc_expand_increment (optab, type)
+ struct increment_operator optab[];
+ tree type;
+{
+ enum typecode code;
+ int i;
+
+ code = preferred_typecode (TYPE_MODE (type), TREE_UNSIGNED (type));
+ for (i = 0; (int) optab[i].opcode >= 0; ++i)
+ if (code == optab[i].arg)
+ {
+ bc_emit_instruction (optab[i].opcode);
+ return;
+ }
+ abort ();
+}
diff --git a/gnu/usr.bin/cc/cc_int/c-common.c b/gnu/usr.bin/cc/cc_int/c-common.c
new file mode 100644
index 0000000..f18c270
--- /dev/null
+++ b/gnu/usr.bin/cc/cc_int/c-common.c
@@ -0,0 +1,1997 @@
+/* Subroutines shared by all languages that are variants of C.
+ Copyright (C) 1992, 1993, 1994 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+#include "config.h"
+#include "tree.h"
+#include "c-lex.h"
+#include "c-tree.h"
+#include "flags.h"
+#include "obstack.h"
+#include <stdio.h>
+#include <ctype.h>
+
+extern struct obstack permanent_obstack;
+
+static void declare_hidden_char_array PROTO((char *, char *));
+
+/* Make bindings for __FUNCTION__ and __PRETTY_FUNCTION__. */
+
+void
+declare_function_name ()
+{
+ char *name, *printable_name;
+
+ if (current_function_decl == NULL)
+ {
+ name = "";
+ printable_name = "top level";
+ }
+ else
+ {
+ char *kind = "function";
+ if (TREE_CODE (TREE_TYPE (current_function_decl)) == METHOD_TYPE)
+ kind = "method";
+ /* Allow functions to be nameless (such as artificial ones). */
+ if (DECL_NAME (current_function_decl))
+ name = IDENTIFIER_POINTER (DECL_NAME (current_function_decl));
+ else
+ name = "";
+ printable_name = (*decl_printable_name) (current_function_decl, &kind);
+ }
+
+ declare_hidden_char_array ("__FUNCTION__", name);
+ declare_hidden_char_array ("__PRETTY_FUNCTION__", printable_name);
+}
+
+static void
+declare_hidden_char_array (name, value)
+ char *name, *value;
+{
+ tree decl, type, init;
+ int vlen;
+
+ /* If the default size of char arrays isn't big enough for the name,
+ or if we want to give warnings for large objects, make a bigger one. */
+ vlen = strlen (value) + 1;
+ type = char_array_type_node;
+ if (TREE_INT_CST_LOW (TYPE_MAX_VALUE (TREE_TYPE (type))) < vlen
+ || warn_larger_than)
+ type = build_array_type (char_type_node,
+ build_index_type (build_int_2 (vlen, 0)));
+ push_obstacks_nochange ();
+ decl = build_decl (VAR_DECL, get_identifier (name), type);
+ TREE_STATIC (decl) = 1;
+ TREE_READONLY (decl) = 1;
+ TREE_ASM_WRITTEN (decl) = 1;
+ DECL_SOURCE_LINE (decl) = 0;
+ DECL_IN_SYSTEM_HEADER (decl) = 1;
+ DECL_IGNORED_P (decl) = 1;
+ init = build_string (vlen, value);
+ TREE_TYPE (init) = type;
+ DECL_INITIAL (decl) = init;
+ finish_decl (pushdecl (decl), init, NULL_TREE);
+}
+
+/* Given a chain of STRING_CST nodes,
+ concatenate them into one STRING_CST
+ and give it a suitable array-of-chars data type. */
+
+tree
+combine_strings (strings)
+ tree strings;
+{
+ register tree value, t;
+ register int length = 1;
+ int wide_length = 0;
+ int wide_flag = 0;
+ int wchar_bytes = TYPE_PRECISION (wchar_type_node) / BITS_PER_UNIT;
+ int nchars;
+
+ if (TREE_CHAIN (strings))
+ {
+ /* More than one in the chain, so concatenate. */
+ register char *p, *q;
+
+ /* Don't include the \0 at the end of each substring,
+ except for the last one.
+ Count wide strings and ordinary strings separately. */
+ for (t = strings; t; t = TREE_CHAIN (t))
+ {
+ if (TREE_TYPE (t) == wchar_array_type_node)
+ {
+ wide_length += (TREE_STRING_LENGTH (t) - wchar_bytes);
+ wide_flag = 1;
+ }
+ else
+ length += (TREE_STRING_LENGTH (t) - 1);
+ }
+
+ /* If anything is wide, the non-wides will be converted,
+ which makes them take more space. */
+ if (wide_flag)
+ length = length * wchar_bytes + wide_length;
+
+ p = savealloc (length);
+
+ /* Copy the individual strings into the new combined string.
+ If the combined string is wide, convert the chars to ints
+ for any individual strings that are not wide. */
+
+ q = p;
+ for (t = strings; t; t = TREE_CHAIN (t))
+ {
+ int len = (TREE_STRING_LENGTH (t)
+ - ((TREE_TYPE (t) == wchar_array_type_node)
+ ? wchar_bytes : 1));
+ if ((TREE_TYPE (t) == wchar_array_type_node) == wide_flag)
+ {
+ bcopy (TREE_STRING_POINTER (t), q, len);
+ q += len;
+ }
+ else
+ {
+ int i;
+ for (i = 0; i < len; i++)
+ ((int *) q)[i] = TREE_STRING_POINTER (t)[i];
+ q += len * wchar_bytes;
+ }
+ }
+ if (wide_flag)
+ {
+ int i;
+ for (i = 0; i < wchar_bytes; i++)
+ *q++ = 0;
+ }
+ else
+ *q = 0;
+
+ value = make_node (STRING_CST);
+ TREE_STRING_POINTER (value) = p;
+ TREE_STRING_LENGTH (value) = length;
+ TREE_CONSTANT (value) = 1;
+ }
+ else
+ {
+ value = strings;
+ length = TREE_STRING_LENGTH (value);
+ if (TREE_TYPE (value) == wchar_array_type_node)
+ wide_flag = 1;
+ }
+
+ /* Compute the number of elements, for the array type. */
+ nchars = wide_flag ? length / wchar_bytes : length;
+
+ /* Create the array type for the string constant.
+ -Wwrite-strings says make the string constant an array of const char
+ so that copying it to a non-const pointer will get a warning. */
+ if (warn_write_strings
+ && (! flag_traditional && ! flag_writable_strings))
+ {
+ tree elements
+ = build_type_variant (wide_flag ? wchar_type_node : char_type_node,
+ 1, 0);
+ TREE_TYPE (value)
+ = build_array_type (elements,
+ build_index_type (build_int_2 (nchars - 1, 0)));
+ }
+ else
+ TREE_TYPE (value)
+ = build_array_type (wide_flag ? wchar_type_node : char_type_node,
+ build_index_type (build_int_2 (nchars - 1, 0)));
+ TREE_CONSTANT (value) = 1;
+ TREE_STATIC (value) = 1;
+ return value;
+}
+
+/* Process the attributes listed in ATTRIBUTES
+ and install them in DECL. */
+
+void
+decl_attributes (decl, attributes)
+ tree decl, attributes;
+{
+ tree a, name, args, type, new_attr;
+
+ type = TREE_TYPE (decl);
+
+ new_attr = TYPE_ATTRIBUTES (type);
+
+ for (a = attributes; a; a = TREE_CHAIN (a))
+ if (!(name = TREE_VALUE (a)))
+ continue;
+ else if (name == get_identifier ("packed"))
+ {
+ if (TREE_CODE (decl) == FIELD_DECL)
+ DECL_PACKED (decl) = 1;
+ /* We can't set DECL_PACKED for a VAR_DECL, because the bit is
+ used for DECL_REGISTER. It wouldn't mean anything anyway. */
+ else
+ warning_with_decl (decl, "`packed' attribute ignore");
+
+ }
+ else if (TREE_VALUE (a) == get_identifier ("noreturn")
+ || TREE_VALUE (a) == get_identifier ("volatile"))
+ {
+ if (TREE_CODE (decl) == FUNCTION_DECL)
+ TREE_THIS_VOLATILE (decl) = 1;
+ else if (TREE_CODE (type) == POINTER_TYPE
+ && TREE_CODE (TREE_TYPE (type)) == FUNCTION_TYPE)
+ TREE_TYPE (decl) = type
+ = build_pointer_type
+ (build_type_variant (TREE_TYPE (type),
+ TREE_READONLY (TREE_TYPE (type)), 1));
+ else
+ warning_with_decl (decl, "`%s' attribute ignored",
+ IDENTIFIER_POINTER (TREE_VALUE (a)));
+ }
+ else if (TREE_VALUE (a) == get_identifier ("const"))
+ {
+ if (TREE_CODE (decl) == FUNCTION_DECL)
+ TREE_READONLY (decl) = 1;
+ else if (TREE_CODE (type) == POINTER_TYPE
+ && TREE_CODE (TREE_TYPE (type)) == FUNCTION_TYPE)
+ TREE_TYPE (decl) = type
+ = build_pointer_type
+ (build_type_variant (TREE_TYPE (type), 1,
+ TREE_THIS_VOLATILE (TREE_TYPE (type))));
+ else
+ warning_with_decl (decl, "`const' attribute ignored");
+ }
+ else if (TREE_CODE (name) != TREE_LIST)
+ {
+#ifdef VALID_MACHINE_ATTRIBUTE
+ if (VALID_MACHINE_ATTRIBUTE (type, new_attr, name))
+ {
+ register tree atlist;
+
+ for (atlist = new_attr; atlist; atlist = TREE_CHAIN (atlist))
+ if (TREE_VALUE (atlist) == name)
+ goto found_attr;
+
+ new_attr = tree_cons (NULL_TREE, name, new_attr);
+found_attr:;
+ }
+ else
+#endif
+ warning ("`%s' attribute directive ignored",
+ IDENTIFIER_POINTER (name));
+ }
+ else if ( args = TREE_CHAIN(name),
+ !strcmp (IDENTIFIER_POINTER (name = TREE_PURPOSE (name)), "mode")
+ && list_length (args) == 1
+ && TREE_CODE (TREE_VALUE (args)) == IDENTIFIER_NODE)
+ {
+ int i;
+ char *specified_name
+ = IDENTIFIER_POINTER (TREE_VALUE (args));
+
+ /* Give this decl a type with the specified mode. */
+ for (i = 0; i < NUM_MACHINE_MODES; i++)
+ if (!strcmp (specified_name, GET_MODE_NAME (i)))
+ {
+ tree typefm
+ = type_for_mode (i, TREE_UNSIGNED (type));
+ if (typefm != 0)
+ {
+ TREE_TYPE (decl) = type = typefm;
+ DECL_SIZE (decl) = 0;
+ layout_decl (decl, 0);
+ }
+ else
+ error ("no data type for mode `%s'", specified_name);
+ break;
+ }
+ if (i == NUM_MACHINE_MODES)
+ error_with_decl (decl, "unknown machine mode `%s'", specified_name);
+ }
+ else if (!strcmp (IDENTIFIER_POINTER (name), "section")
+ && list_length (args) == 1
+ && TREE_CODE (TREE_VALUE (args)) == STRING_CST)
+ {
+#ifdef ASM_OUTPUT_SECTION_NAME
+ if (TREE_CODE (decl) == FUNCTION_DECL || TREE_CODE (decl) == VAR_DECL)
+ {
+ if (TREE_CODE (decl) == VAR_DECL && current_function_decl != NULL_TREE)
+ error_with_decl (decl,
+ "section attribute cannot be specified for local variables");
+ /* The decl may have already been given a section attribute from
+ a previous declaration. Ensure they match. */
+ else if (DECL_SECTION_NAME (decl) != NULL_TREE
+ && strcmp (TREE_STRING_POINTER (DECL_SECTION_NAME (decl)),
+ TREE_STRING_POINTER (TREE_VALUE (args))) != 0)
+ error_with_decl (decl,
+ "section of `%s' conflicts with previous declaration");
+ else
+ DECL_SECTION_NAME (decl) = TREE_VALUE (args);
+ }
+ else
+ error_with_decl (decl,
+ "section attribute not allowed for `%s'");
+#else
+ error_with_decl (decl, "section attributes are not supported for this target");
+#endif
+ }
+ else if (!strcmp (IDENTIFIER_POINTER (name), "aligned")
+ && list_length (args) == 1
+ && TREE_CODE (TREE_VALUE (args)) == INTEGER_CST)
+ {
+ tree align_expr = TREE_VALUE (args);
+ int align;
+
+ /* Strip any NOPs of any kind. */
+ while (TREE_CODE (align_expr) == NOP_EXPR
+ || TREE_CODE (align_expr) == CONVERT_EXPR
+ || TREE_CODE (align_expr) == NON_LVALUE_EXPR)
+ align_expr = TREE_OPERAND (align_expr, 0);
+
+ if (TREE_CODE (align_expr) != INTEGER_CST)
+ {
+ error_with_decl (decl,
+ "requested alignment of `%s' is not a constant");
+ continue;
+ }
+
+ align = TREE_INT_CST_LOW (align_expr) * BITS_PER_UNIT;
+
+ if (exact_log2 (align) == -1)
+ error_with_decl (decl,
+ "requested alignment of `%s' is not a power of 2");
+ else if (TREE_CODE (decl) != VAR_DECL
+ && TREE_CODE (decl) != FIELD_DECL)
+ error_with_decl (decl,
+ "alignment specified for `%s'");
+ else
+ DECL_ALIGN (decl) = align;
+ }
+ else if (!strcmp (IDENTIFIER_POINTER (name), "format")
+ && list_length (args) == 3
+ && TREE_CODE (TREE_VALUE (args)) == IDENTIFIER_NODE
+ && TREE_CODE (TREE_VALUE (TREE_CHAIN (args))) == INTEGER_CST
+ && TREE_CODE (TREE_VALUE (TREE_CHAIN (TREE_CHAIN (args)))) == INTEGER_CST )
+ {
+ tree format_type = TREE_VALUE (args);
+ tree format_num_expr = TREE_VALUE (TREE_CHAIN (args));
+ tree first_arg_num_expr = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (args)));
+ int format_num;
+ int first_arg_num;
+ int is_scan;
+ tree argument;
+ int arg_num;
+
+ if (TREE_CODE (decl) != FUNCTION_DECL)
+ {
+ error_with_decl (decl,
+ "argument format specified for non-function `%s'");
+ continue;
+ }
+
+ if (!strcmp (IDENTIFIER_POINTER (format_type), "printf"))
+ is_scan = 0;
+ else if (!strcmp (IDENTIFIER_POINTER (format_type), "scanf"))
+ is_scan = 1;
+ else
+ {
+ error_with_decl (decl, "unrecognized format specifier for `%s'");
+ continue;
+ }
+
+ /* Strip any conversions from the string index and first arg number
+ and verify they are constants. */
+ while (TREE_CODE (format_num_expr) == NOP_EXPR
+ || TREE_CODE (format_num_expr) == CONVERT_EXPR
+ || TREE_CODE (format_num_expr) == NON_LVALUE_EXPR)
+ format_num_expr = TREE_OPERAND (format_num_expr, 0);
+
+ while (TREE_CODE (first_arg_num_expr) == NOP_EXPR
+ || TREE_CODE (first_arg_num_expr) == CONVERT_EXPR
+ || TREE_CODE (first_arg_num_expr) == NON_LVALUE_EXPR)
+ first_arg_num_expr = TREE_OPERAND (first_arg_num_expr, 0);
+
+ if (TREE_CODE (format_num_expr) != INTEGER_CST
+ || TREE_CODE (first_arg_num_expr) != INTEGER_CST)
+ {
+ error_with_decl (decl,
+ "format string for `%s' has non-constant operand number");
+ continue;
+ }
+
+ format_num = TREE_INT_CST_LOW (format_num_expr);
+ first_arg_num = TREE_INT_CST_LOW (first_arg_num_expr);
+ if (first_arg_num != 0 && first_arg_num <= format_num)
+ {
+ error_with_decl (decl,
+ "format string arg follows the args to be formatted, for `%s'");
+ continue;
+ }
+
+ /* If a parameter list is specified, verify that the format_num
+ argument is actually a string, in case the format attribute
+ is in error. */
+ argument = TYPE_ARG_TYPES (type);
+ if (argument)
+ {
+ for (arg_num = 1; ; ++arg_num)
+ {
+ if (argument == 0 || arg_num == format_num)
+ break;
+ argument = TREE_CHAIN (argument);
+ }
+ if (! argument
+ || TREE_CODE (TREE_VALUE (argument)) != POINTER_TYPE
+ || (TYPE_MAIN_VARIANT (TREE_TYPE (TREE_VALUE (argument)))
+ != char_type_node))
+ {
+ error_with_decl (decl,
+ "format string arg not a string type, for `%s'");
+ continue;
+ }
+ if (first_arg_num != 0)
+ {
+ /* Verify that first_arg_num points to the last arg, the ... */
+ while (argument)
+ arg_num++, argument = TREE_CHAIN (argument);
+ if (arg_num != first_arg_num)
+ {
+ error_with_decl (decl,
+ "args to be formatted is not ..., for `%s'");
+ continue;
+ }
+ }
+ }
+
+ record_function_format (DECL_NAME (decl), DECL_ASSEMBLER_NAME (decl),
+ is_scan, format_num, first_arg_num);
+ }
+ else
+ warning ("`%s' attribute directive ignored",
+ IDENTIFIER_POINTER (name));
+
+ TREE_TYPE (decl) = build_type_attribute_variant (type, new_attr);
+}
+
+/* Check a printf/fprintf/sprintf/scanf/fscanf/sscanf format against
+ a parameter list. */
+
+#define T_I &integer_type_node
+#define T_L &long_integer_type_node
+#define T_S &short_integer_type_node
+#define T_UI &unsigned_type_node
+#define T_UL &long_unsigned_type_node
+#define T_US &short_unsigned_type_node
+#define T_F &float_type_node
+#define T_D &double_type_node
+#define T_LD &long_double_type_node
+#define T_C &char_type_node
+#define T_V &void_type_node
+#define T_W &wchar_type_node
+#define T_ST &sizetype
+
+typedef struct {
+ char *format_chars;
+ int pointer_count;
+ /* Type of argument if no length modifier is used. */
+ tree *nolen;
+ /* Type of argument if length modifier for shortening is used.
+ If NULL, then this modifier is not allowed. */
+ tree *hlen;
+ /* Type of argument if length modifier `l' is used.
+ If NULL, then this modifier is not allowed. */
+ tree *llen;
+ /* Type of argument if length modifier `L' is used.
+ If NULL, then this modifier is not allowed. */
+ tree *bigllen;
+ /* List of other modifier characters allowed with these options. */
+ char *flag_chars;
+} format_char_info;
+
+static format_char_info print_char_table[] = {
+ { "di", 0, T_I, T_I, T_L, NULL, "-wp0 +" },
+ { "oxX", 0, T_UI, T_UI, T_UL, NULL, "-wp0#" },
+ { "u", 0, T_UI, T_UI, T_UL, NULL, "-wp0" },
+/* Two GNU extensions. */
+ { "Z", 0, T_ST, NULL, NULL, NULL, "-wp0" },
+ { "m", 0, T_UI, T_UI, T_UL, NULL, "-wp" },
+ { "feEgG", 0, T_D, NULL, NULL, T_LD, "-wp0 +#" },
+ { "c", 0, T_I, NULL, T_W, NULL, "-w" },
+ { "C", 0, T_W, NULL, NULL, NULL, "-w" },
+ { "s", 1, T_C, NULL, T_W, NULL, "-wp" },
+ { "S", 1, T_W, NULL, NULL, NULL, "-wp" },
+ { "p", 1, T_V, NULL, NULL, NULL, "-w" },
+ { "n", 1, T_I, T_S, T_L, NULL, "" },
+ { NULL }
+};
+
+static format_char_info scan_char_table[] = {
+ { "di", 1, T_I, T_S, T_L, NULL, "*" },
+ { "ouxX", 1, T_UI, T_US, T_UL, NULL, "*" },
+ { "efgEG", 1, T_F, NULL, T_D, T_LD, "*" },
+ { "sc", 1, T_C, NULL, T_W, NULL, "*a" },
+ { "[", 1, T_C, NULL, NULL, NULL, "*a" },
+ { "C", 1, T_W, NULL, NULL, NULL, "*" },
+ { "S", 1, T_W, NULL, NULL, NULL, "*" },
+ { "p", 2, T_V, NULL, NULL, NULL, "*" },
+ { "n", 1, T_I, T_S, T_L, NULL, "" },
+ { NULL }
+};
+
+typedef struct function_format_info {
+ struct function_format_info *next; /* next structure on the list */
+ tree name; /* identifier such as "printf" */
+ tree assembler_name; /* optional mangled identifier (for C++) */
+ int is_scan; /* TRUE if *scanf */
+ int format_num; /* number of format argument */
+ int first_arg_num; /* number of first arg (zero for varargs) */
+} function_format_info;
+
+static function_format_info *function_format_list = NULL;
+
+static void check_format_info PROTO((function_format_info *, tree));
+
+/* Initialize the table of functions to perform format checking on.
+ The ANSI functions are always checked (whether <stdio.h> is
+ included or not), since it is common to call printf without
+ including <stdio.h>. There shouldn't be a problem with this,
+ since ANSI reserves these function names whether you include the
+ header file or not. In any case, the checking is harmless. */
+
+void
+init_function_format_info ()
+{
+ record_function_format (get_identifier ("printf"), NULL_TREE, 0, 1, 2);
+ record_function_format (get_identifier ("fprintf"), NULL_TREE, 0, 2, 3);
+ record_function_format (get_identifier ("sprintf"), NULL_TREE, 0, 2, 3);
+ record_function_format (get_identifier ("scanf"), NULL_TREE, 1, 1, 2);
+ record_function_format (get_identifier ("fscanf"), NULL_TREE, 1, 2, 3);
+ record_function_format (get_identifier ("sscanf"), NULL_TREE, 1, 2, 3);
+ record_function_format (get_identifier ("vprintf"), NULL_TREE, 0, 1, 0);
+ record_function_format (get_identifier ("vfprintf"), NULL_TREE, 0, 2, 0);
+ record_function_format (get_identifier ("vsprintf"), NULL_TREE, 0, 2, 0);
+}
+
+/* Record information for argument format checking. FUNCTION_IDENT is
+ the identifier node for the name of the function to check (its decl
+ need not exist yet). IS_SCAN is true for scanf-type format checking;
+ false indicates printf-style format checking. FORMAT_NUM is the number
+ of the argument which is the format control string (starting from 1).
+ FIRST_ARG_NUM is the number of the first actual argument to check
+ against teh format string, or zero if no checking is not be done
+ (e.g. for varargs such as vfprintf). */
+
+void
+record_function_format (name, assembler_name, is_scan,
+ format_num, first_arg_num)
+ tree name;
+ tree assembler_name;
+ int is_scan;
+ int format_num;
+ int first_arg_num;
+{
+ function_format_info *info;
+
+ /* Re-use existing structure if it's there. */
+
+ for (info = function_format_list; info; info = info->next)
+ {
+ if (info->name == name && info->assembler_name == assembler_name)
+ break;
+ }
+ if (! info)
+ {
+ info = (function_format_info *) xmalloc (sizeof (function_format_info));
+ info->next = function_format_list;
+ function_format_list = info;
+
+ info->name = name;
+ info->assembler_name = assembler_name;
+ }
+
+ info->is_scan = is_scan;
+ info->format_num = format_num;
+ info->first_arg_num = first_arg_num;
+}
+
+static char tfaff[] = "too few arguments for format";
+
+/* Check the argument list of a call to printf, scanf, etc.
+ NAME is the function identifier.
+ ASSEMBLER_NAME is the function's assembler identifier.
+ (Either NAME or ASSEMBLER_NAME, but not both, may be NULL_TREE.)
+ PARAMS is the list of argument values. */
+
+void
+check_function_format (name, assembler_name, params)
+ tree name;
+ tree assembler_name;
+ tree params;
+{
+ function_format_info *info;
+
+ /* See if this function is a format function. */
+ for (info = function_format_list; info; info = info->next)
+ {
+ if (info->assembler_name
+ ? (info->assembler_name == assembler_name)
+ : (info->name == name))
+ {
+ /* Yup; check it. */
+ check_format_info (info, params);
+ break;
+ }
+ }
+}
+
+/* Check the argument list of a call to printf, scanf, etc.
+ INFO points to the function_format_info structure.
+ PARAMS is the list of argument values. */
+
+static void
+check_format_info (info, params)
+ function_format_info *info;
+ tree params;
+{
+ int i;
+ int arg_num;
+ int suppressed, wide, precise;
+ int length_char;
+ int format_char;
+ int format_length;
+ tree format_tree;
+ tree cur_param;
+ tree cur_type;
+ tree wanted_type;
+ tree first_fillin_param;
+ char *format_chars;
+ format_char_info *fci;
+ static char message[132];
+ char flag_chars[8];
+ int has_operand_number = 0;
+
+ /* Skip to format argument. If the argument isn't available, there's
+ no work for us to do; prototype checking will catch the problem. */
+ for (arg_num = 1; ; ++arg_num)
+ {
+ if (params == 0)
+ return;
+ if (arg_num == info->format_num)
+ break;
+ params = TREE_CHAIN (params);
+ }
+ format_tree = TREE_VALUE (params);
+ params = TREE_CHAIN (params);
+ if (format_tree == 0)
+ return;
+ /* We can only check the format if it's a string constant. */
+ while (TREE_CODE (format_tree) == NOP_EXPR)
+ format_tree = TREE_OPERAND (format_tree, 0); /* strip coercion */
+ if (format_tree == null_pointer_node)
+ {
+ warning ("null format string");
+ return;
+ }
+ if (TREE_CODE (format_tree) != ADDR_EXPR)
+ return;
+ format_tree = TREE_OPERAND (format_tree, 0);
+ if (TREE_CODE (format_tree) != STRING_CST)
+ return;
+ format_chars = TREE_STRING_POINTER (format_tree);
+ format_length = TREE_STRING_LENGTH (format_tree);
+ if (format_length <= 1)
+ warning ("zero-length format string");
+ if (format_chars[--format_length] != 0)
+ {
+ warning ("unterminated format string");
+ return;
+ }
+ /* Skip to first argument to check. */
+ while (arg_num + 1 < info->first_arg_num)
+ {
+ if (params == 0)
+ return;
+ params = TREE_CHAIN (params);
+ ++arg_num;
+ }
+
+ first_fillin_param = params;
+ while (1)
+ {
+ int aflag;
+ if (*format_chars == 0)
+ {
+ if (format_chars - TREE_STRING_POINTER (format_tree) != format_length)
+ warning ("embedded `\\0' in format");
+ if (info->first_arg_num != 0 && params != 0 && ! has_operand_number)
+ warning ("too many arguments for format");
+ return;
+ }
+ if (*format_chars++ != '%')
+ continue;
+ if (*format_chars == 0)
+ {
+ warning ("spurious trailing `%%' in format");
+ continue;
+ }
+ if (*format_chars == '%')
+ {
+ ++format_chars;
+ continue;
+ }
+ flag_chars[0] = 0;
+ suppressed = wide = precise = FALSE;
+ if (info->is_scan)
+ {
+ suppressed = *format_chars == '*';
+ if (suppressed)
+ ++format_chars;
+ while (isdigit (*format_chars))
+ ++format_chars;
+ }
+ else
+ {
+ /* See if we have a number followed by a dollar sign. If we do,
+ it is an operand number, so set PARAMS to that operand. */
+ if (*format_chars >= '0' && *format_chars <= '9')
+ {
+ char *p = format_chars;
+
+ while (*p >= '0' && *p++ <= '9')
+ ;
+
+ if (*p == '$')
+ {
+ int opnum = atoi (format_chars);
+
+ params = first_fillin_param;
+ format_chars = p + 1;
+ has_operand_number = 1;
+
+ for (i = 1; i < opnum && params != 0; i++)
+ params = TREE_CHAIN (params);
+
+ if (opnum == 0 || params == 0)
+ {
+ warning ("operand number out of range in format");
+ return;
+ }
+ }
+ }
+
+ while (*format_chars != 0 && index (" +#0-", *format_chars) != 0)
+ {
+ if (index (flag_chars, *format_chars) != 0)
+ {
+ sprintf (message, "repeated `%c' flag in format",
+ *format_chars);
+ warning (message);
+ }
+ i = strlen (flag_chars);
+ flag_chars[i++] = *format_chars++;
+ flag_chars[i] = 0;
+ }
+ /* "If the space and + flags both appear,
+ the space flag will be ignored." */
+ if (index (flag_chars, ' ') != 0
+ && index (flag_chars, '+') != 0)
+ warning ("use of both ` ' and `+' flags in format");
+ /* "If the 0 and - flags both appear,
+ the 0 flag will be ignored." */
+ if (index (flag_chars, '0') != 0
+ && index (flag_chars, '-') != 0)
+ warning ("use of both `0' and `-' flags in format");
+ if (*format_chars == '*')
+ {
+ wide = TRUE;
+ /* "...a field width...may be indicated by an asterisk.
+ In this case, an int argument supplies the field width..." */
+ ++format_chars;
+ if (params == 0)
+ {
+ warning (tfaff);
+ return;
+ }
+ if (info->first_arg_num != 0)
+ {
+ cur_param = TREE_VALUE (params);
+ params = TREE_CHAIN (params);
+ ++arg_num;
+ /* size_t is generally not valid here.
+ It will work on most machines, because size_t and int
+ have the same mode. But might as well warn anyway,
+ since it will fail on other machines. */
+ if ((TYPE_MAIN_VARIANT (TREE_TYPE (cur_param))
+ != integer_type_node)
+ &&
+ (TYPE_MAIN_VARIANT (TREE_TYPE (cur_param))
+ != unsigned_type_node))
+ {
+ sprintf (message,
+ "field width is not type int (arg %d)",
+ arg_num);
+ warning (message);
+ }
+ }
+ }
+ else
+ {
+ while (isdigit (*format_chars))
+ {
+ wide = TRUE;
+ ++format_chars;
+ }
+ }
+ if (*format_chars == '.')
+ {
+ precise = TRUE;
+ ++format_chars;
+ if (*format_chars != '*' && !isdigit (*format_chars))
+ warning ("`.' not followed by `*' or digit in format");
+ /* "...a...precision...may be indicated by an asterisk.
+ In this case, an int argument supplies the...precision." */
+ if (*format_chars == '*')
+ {
+ if (info->first_arg_num != 0)
+ {
+ ++format_chars;
+ if (params == 0)
+ {
+ warning (tfaff);
+ return;
+ }
+ cur_param = TREE_VALUE (params);
+ params = TREE_CHAIN (params);
+ ++arg_num;
+ if (TYPE_MAIN_VARIANT (TREE_TYPE (cur_param))
+ != integer_type_node)
+ {
+ sprintf (message,
+ "field width is not type int (arg %d)",
+ arg_num);
+ warning (message);
+ }
+ }
+ }
+ else
+ {
+ while (isdigit (*format_chars))
+ ++format_chars;
+ }
+ }
+ }
+ if (*format_chars == 'h' || *format_chars == 'l' || *format_chars == 'L')
+ length_char = *format_chars++;
+ else
+ length_char = 0;
+ aflag = 0;
+ if (*format_chars == 'a')
+ {
+ aflag = 1;
+ format_chars++;
+ }
+ if (suppressed && length_char != 0)
+ {
+ sprintf (message,
+ "use of `*' and `%c' together in format",
+ length_char);
+ warning (message);
+ }
+ format_char = *format_chars;
+ if (format_char == 0)
+ {
+ warning ("conversion lacks type at end of format");
+ continue;
+ }
+ format_chars++;
+ fci = info->is_scan ? scan_char_table : print_char_table;
+ while (fci->format_chars != 0
+ && index (fci->format_chars, format_char) == 0)
+ ++fci;
+ if (fci->format_chars == 0)
+ {
+ if (format_char >= 040 && format_char < 0177)
+ sprintf (message,
+ "unknown conversion type character `%c' in format",
+ format_char);
+ else
+ sprintf (message,
+ "unknown conversion type character 0x%x in format",
+ format_char);
+ warning (message);
+ continue;
+ }
+ if (wide && index (fci->flag_chars, 'w') == 0)
+ {
+ sprintf (message, "width used with `%c' format",
+ format_char);
+ warning (message);
+ }
+ if (precise && index (fci->flag_chars, 'p') == 0)
+ {
+ sprintf (message, "precision used with `%c' format",
+ format_char);
+ warning (message);
+ }
+ if (aflag && index (fci->flag_chars, 'a') == 0)
+ {
+ sprintf (message, "`a' flag used with `%c' format",
+ format_char);
+ warning (message);
+ }
+ if (info->is_scan && format_char == '[')
+ {
+ /* Skip over scan set, in case it happens to have '%' in it. */
+ if (*format_chars == '^')
+ ++format_chars;
+ /* Find closing bracket; if one is hit immediately, then
+ it's part of the scan set rather than a terminator. */
+ if (*format_chars == ']')
+ ++format_chars;
+ while (*format_chars && *format_chars != ']')
+ ++format_chars;
+ if (*format_chars != ']')
+ /* The end of the format string was reached. */
+ warning ("no closing `]' for `%%[' format");
+ }
+ if (suppressed)
+ {
+ if (index (fci->flag_chars, '*') == 0)
+ {
+ sprintf (message,
+ "suppression of `%c' conversion in format",
+ format_char);
+ warning (message);
+ }
+ continue;
+ }
+ for (i = 0; flag_chars[i] != 0; ++i)
+ {
+ if (index (fci->flag_chars, flag_chars[i]) == 0)
+ {
+ sprintf (message, "flag `%c' used with type `%c'",
+ flag_chars[i], format_char);
+ warning (message);
+ }
+ }
+ if (precise && index (flag_chars, '0') != 0
+ && (format_char == 'd' || format_char == 'i'
+ || format_char == 'o' || format_char == 'u'
+ || format_char == 'x' || format_char == 'x'))
+ {
+ sprintf (message,
+ "precision and `0' flag not both allowed with `%c' format",
+ format_char);
+ warning (message);
+ }
+ switch (length_char)
+ {
+ default: wanted_type = fci->nolen ? *(fci->nolen) : 0; break;
+ case 'h': wanted_type = fci->hlen ? *(fci->hlen) : 0; break;
+ case 'l': wanted_type = fci->llen ? *(fci->llen) : 0; break;
+ case 'L': wanted_type = fci->bigllen ? *(fci->bigllen) : 0; break;
+ }
+ if (wanted_type == 0)
+ {
+ sprintf (message,
+ "use of `%c' length character with `%c' type character",
+ length_char, format_char);
+ warning (message);
+ }
+
+ /*
+ ** XXX -- should kvetch about stuff such as
+ ** {
+ ** const int i;
+ **
+ ** scanf ("%d", &i);
+ ** }
+ */
+
+ /* Finally. . .check type of argument against desired type! */
+ if (info->first_arg_num == 0)
+ continue;
+ if (params == 0)
+ {
+ warning (tfaff);
+ return;
+ }
+ cur_param = TREE_VALUE (params);
+ params = TREE_CHAIN (params);
+ ++arg_num;
+ cur_type = TREE_TYPE (cur_param);
+
+ /* Check the types of any additional pointer arguments
+ that precede the "real" argument. */
+ for (i = 0; i < fci->pointer_count; ++i)
+ {
+ if (TREE_CODE (cur_type) == POINTER_TYPE)
+ {
+ cur_type = TREE_TYPE (cur_type);
+ continue;
+ }
+ sprintf (message,
+ "format argument is not a %s (arg %d)",
+ ((fci->pointer_count == 1) ? "pointer" : "pointer to a pointer"),
+ arg_num);
+ warning (message);
+ break;
+ }
+
+ /* Check the type of the "real" argument, if there's a type we want. */
+ if (i == fci->pointer_count && wanted_type != 0
+ && wanted_type != TYPE_MAIN_VARIANT (cur_type)
+ /* If we want `void *', allow any pointer type.
+ (Anything else would already have got a warning.) */
+ && ! (wanted_type == void_type_node
+ && fci->pointer_count > 0)
+ /* Don't warn about differences merely in signedness. */
+ && !(TREE_CODE (wanted_type) == INTEGER_TYPE
+ && TREE_CODE (TYPE_MAIN_VARIANT (cur_type)) == INTEGER_TYPE
+ && (TREE_UNSIGNED (wanted_type)
+ ? wanted_type == (cur_type = unsigned_type (cur_type))
+ : wanted_type == (cur_type = signed_type (cur_type))))
+ /* Likewise, "signed char", "unsigned char" and "char" are
+ equivalent but the above test won't consider them equivalent. */
+ && ! (wanted_type == char_type_node
+ && (TYPE_MAIN_VARIANT (cur_type) == signed_char_type_node
+ || TYPE_MAIN_VARIANT (cur_type) == unsigned_char_type_node)))
+ {
+ register char *this;
+ register char *that;
+
+ this = IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (wanted_type)));
+ that = 0;
+ if (TREE_CODE (cur_type) != ERROR_MARK
+ && TYPE_NAME (cur_type) != 0
+ && TREE_CODE (cur_type) != INTEGER_TYPE
+ && !(TREE_CODE (cur_type) == POINTER_TYPE
+ && TREE_CODE (TREE_TYPE (cur_type)) == INTEGER_TYPE))
+ {
+ if (TREE_CODE (TYPE_NAME (cur_type)) == TYPE_DECL
+ && DECL_NAME (TYPE_NAME (cur_type)) != 0)
+ that = IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (cur_type)));
+ else
+ that = IDENTIFIER_POINTER (TYPE_NAME (cur_type));
+ }
+
+ /* A nameless type can't possibly match what the format wants.
+ So there will be a warning for it.
+ Make up a string to describe vaguely what it is. */
+ if (that == 0)
+ {
+ if (TREE_CODE (cur_type) == POINTER_TYPE)
+ that = "pointer";
+ else
+ that = "different type";
+ }
+
+ /* Make the warning better in case of mismatch of int vs long. */
+ if (TREE_CODE (cur_type) == INTEGER_TYPE
+ && TREE_CODE (wanted_type) == INTEGER_TYPE
+ && TYPE_PRECISION (cur_type) == TYPE_PRECISION (wanted_type)
+ && TYPE_NAME (cur_type) != 0
+ && TREE_CODE (TYPE_NAME (cur_type)) == TYPE_DECL)
+ that = IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (cur_type)));
+
+ if (strcmp (this, that) != 0)
+ {
+ sprintf (message, "%s format, %s arg (arg %d)",
+ this, that, arg_num);
+ warning (message);
+ }
+ }
+ }
+}
+
+/* Print a warning if a constant expression had overflow in folding.
+ Invoke this function on every expression that the language
+ requires to be a constant expression.
+ Note the ANSI C standard says it is erroneous for a
+ constant expression to overflow. */
+
+void
+constant_expression_warning (value)
+ tree value;
+{
+ if ((TREE_CODE (value) == INTEGER_CST || TREE_CODE (value) == REAL_CST
+ || TREE_CODE (value) == COMPLEX_CST)
+ && TREE_CONSTANT_OVERFLOW (value) && pedantic)
+ pedwarn ("overflow in constant expression");
+}
+
+/* Print a warning if an expression had overflow in folding.
+ Invoke this function on every expression that
+ (1) appears in the source code, and
+ (2) might be a constant expression that overflowed, and
+ (3) is not already checked by convert_and_check;
+ however, do not invoke this function on operands of explicit casts. */
+
+void
+overflow_warning (value)
+ tree value;
+{
+ if ((TREE_CODE (value) == INTEGER_CST
+ || (TREE_CODE (value) == COMPLEX_CST
+ && TREE_CODE (TREE_REALPART (value)) == INTEGER_CST))
+ && TREE_OVERFLOW (value))
+ {
+ TREE_OVERFLOW (value) = 0;
+ warning ("integer overflow in expression");
+ }
+ else if ((TREE_CODE (value) == REAL_CST
+ || (TREE_CODE (value) == COMPLEX_CST
+ && TREE_CODE (TREE_REALPART (value)) == REAL_CST))
+ && TREE_OVERFLOW (value))
+ {
+ TREE_OVERFLOW (value) = 0;
+ warning ("floating-pointer overflow in expression");
+ }
+}
+
+/* Print a warning if a large constant is truncated to unsigned,
+ or if -Wconversion is used and a constant < 0 is converted to unsigned.
+ Invoke this function on every expression that might be implicitly
+ converted to an unsigned type. */
+
+void
+unsigned_conversion_warning (result, operand)
+ tree result, operand;
+{
+ if (TREE_CODE (operand) == INTEGER_CST
+ && TREE_CODE (TREE_TYPE (result)) == INTEGER_TYPE
+ && TREE_UNSIGNED (TREE_TYPE (result))
+ && !int_fits_type_p (operand, TREE_TYPE (result)))
+ {
+ if (!int_fits_type_p (operand, signed_type (TREE_TYPE (result))))
+ /* This detects cases like converting -129 or 256 to unsigned char. */
+ warning ("large integer implicitly truncated to unsigned type");
+ else if (warn_conversion)
+ warning ("negative integer implicitly converted to unsigned type");
+ }
+}
+
+/* Convert EXPR to TYPE, warning about conversion problems with constants.
+ Invoke this function on every expression that is converted implicitly,
+ i.e. because of language rules and not because of an explicit cast. */
+
+tree
+convert_and_check (type, expr)
+ tree type, expr;
+{
+ tree t = convert (type, expr);
+ if (TREE_CODE (t) == INTEGER_CST)
+ {
+ if (TREE_OVERFLOW (t))
+ {
+ TREE_OVERFLOW (t) = 0;
+
+ /* No warning for converting 0x80000000 to int. */
+ if (!(TREE_UNSIGNED (type) < TREE_UNSIGNED (TREE_TYPE (expr))
+ && TREE_CODE (TREE_TYPE (expr)) == INTEGER_TYPE
+ && TYPE_PRECISION (type) == TYPE_PRECISION (TREE_TYPE (expr))))
+ /* If EXPR fits in the unsigned version of TYPE,
+ don't warn unless pedantic. */
+ if (pedantic
+ || TREE_UNSIGNED (type)
+ || ! int_fits_type_p (expr, unsigned_type (type)))
+ warning ("overflow in implicit constant conversion");
+ }
+ else
+ unsigned_conversion_warning (t, expr);
+ }
+ return t;
+}
+
+void
+c_expand_expr_stmt (expr)
+ tree expr;
+{
+ /* Do default conversion if safe and possibly important,
+ in case within ({...}). */
+ if ((TREE_CODE (TREE_TYPE (expr)) == ARRAY_TYPE && lvalue_p (expr))
+ || TREE_CODE (TREE_TYPE (expr)) == FUNCTION_TYPE)
+ expr = default_conversion (expr);
+
+ if (TREE_TYPE (expr) != error_mark_node
+ && TYPE_SIZE (TREE_TYPE (expr)) == 0
+ && TREE_CODE (TREE_TYPE (expr)) != ARRAY_TYPE)
+ error ("expression statement has incomplete type");
+
+ expand_expr_stmt (expr);
+}
+
+/* Validate the expression after `case' and apply default promotions. */
+
+tree
+check_case_value (value)
+ tree value;
+{
+ if (value == NULL_TREE)
+ return value;
+
+ /* Strip NON_LVALUE_EXPRs since we aren't using as an lvalue. */
+ STRIP_TYPE_NOPS (value);
+
+ if (TREE_CODE (value) != INTEGER_CST
+ && value != error_mark_node)
+ {
+ error ("case label does not reduce to an integer constant");
+ value = error_mark_node;
+ }
+ else
+ /* Promote char or short to int. */
+ value = default_conversion (value);
+
+ constant_expression_warning (value);
+
+ return value;
+}
+
+/* Return an integer type with BITS bits of precision,
+ that is unsigned if UNSIGNEDP is nonzero, otherwise signed. */
+
+tree
+type_for_size (bits, unsignedp)
+ unsigned bits;
+ int unsignedp;
+{
+ if (bits == TYPE_PRECISION (signed_char_type_node))
+ return unsignedp ? unsigned_char_type_node : signed_char_type_node;
+
+ if (bits == TYPE_PRECISION (short_integer_type_node))
+ return unsignedp ? short_unsigned_type_node : short_integer_type_node;
+
+ if (bits == TYPE_PRECISION (integer_type_node))
+ return unsignedp ? unsigned_type_node : integer_type_node;
+
+ if (bits == TYPE_PRECISION (long_integer_type_node))
+ return unsignedp ? long_unsigned_type_node : long_integer_type_node;
+
+ if (bits == TYPE_PRECISION (long_long_integer_type_node))
+ return (unsignedp ? long_long_unsigned_type_node
+ : long_long_integer_type_node);
+
+ if (bits <= TYPE_PRECISION (intQI_type_node))
+ return unsignedp ? unsigned_intQI_type_node : intQI_type_node;
+
+ if (bits <= TYPE_PRECISION (intHI_type_node))
+ return unsignedp ? unsigned_intHI_type_node : intHI_type_node;
+
+ if (bits <= TYPE_PRECISION (intSI_type_node))
+ return unsignedp ? unsigned_intSI_type_node : intSI_type_node;
+
+ if (bits <= TYPE_PRECISION (intDI_type_node))
+ return unsignedp ? unsigned_intDI_type_node : intDI_type_node;
+
+ return 0;
+}
+
+/* Return a data type that has machine mode MODE.
+ If the mode is an integer,
+ then UNSIGNEDP selects between signed and unsigned types. */
+
+tree
+type_for_mode (mode, unsignedp)
+ enum machine_mode mode;
+ int unsignedp;
+{
+ if (mode == TYPE_MODE (signed_char_type_node))
+ return unsignedp ? unsigned_char_type_node : signed_char_type_node;
+
+ if (mode == TYPE_MODE (short_integer_type_node))
+ return unsignedp ? short_unsigned_type_node : short_integer_type_node;
+
+ if (mode == TYPE_MODE (integer_type_node))
+ return unsignedp ? unsigned_type_node : integer_type_node;
+
+ if (mode == TYPE_MODE (long_integer_type_node))
+ return unsignedp ? long_unsigned_type_node : long_integer_type_node;
+
+ if (mode == TYPE_MODE (long_long_integer_type_node))
+ return unsignedp ? long_long_unsigned_type_node : long_long_integer_type_node;
+
+ if (mode == TYPE_MODE (intQI_type_node))
+ return unsignedp ? unsigned_intQI_type_node : intQI_type_node;
+
+ if (mode == TYPE_MODE (intHI_type_node))
+ return unsignedp ? unsigned_intHI_type_node : intHI_type_node;
+
+ if (mode == TYPE_MODE (intSI_type_node))
+ return unsignedp ? unsigned_intSI_type_node : intSI_type_node;
+
+ if (mode == TYPE_MODE (intDI_type_node))
+ return unsignedp ? unsigned_intDI_type_node : intDI_type_node;
+
+ if (mode == TYPE_MODE (float_type_node))
+ return float_type_node;
+
+ if (mode == TYPE_MODE (double_type_node))
+ return double_type_node;
+
+ if (mode == TYPE_MODE (long_double_type_node))
+ return long_double_type_node;
+
+ if (mode == TYPE_MODE (build_pointer_type (char_type_node)))
+ return build_pointer_type (char_type_node);
+
+ if (mode == TYPE_MODE (build_pointer_type (integer_type_node)))
+ return build_pointer_type (integer_type_node);
+
+ return 0;
+}
+
+/* Print an error message for invalid operands to arith operation CODE.
+ NOP_EXPR is used as a special case (see truthvalue_conversion). */
+
+void
+binary_op_error (code)
+ enum tree_code code;
+{
+ register char *opname = "unknown";
+
+ switch (code)
+ {
+ case NOP_EXPR:
+ error ("invalid truth-value expression");
+ return;
+
+ case PLUS_EXPR:
+ opname = "+"; break;
+ case MINUS_EXPR:
+ opname = "-"; break;
+ case MULT_EXPR:
+ opname = "*"; break;
+ case MAX_EXPR:
+ opname = "max"; break;
+ case MIN_EXPR:
+ opname = "min"; break;
+ case EQ_EXPR:
+ opname = "=="; break;
+ case NE_EXPR:
+ opname = "!="; break;
+ case LE_EXPR:
+ opname = "<="; break;
+ case GE_EXPR:
+ opname = ">="; break;
+ case LT_EXPR:
+ opname = "<"; break;
+ case GT_EXPR:
+ opname = ">"; break;
+ case LSHIFT_EXPR:
+ opname = "<<"; break;
+ case RSHIFT_EXPR:
+ opname = ">>"; break;
+ case TRUNC_MOD_EXPR:
+ case FLOOR_MOD_EXPR:
+ opname = "%"; break;
+ case TRUNC_DIV_EXPR:
+ case FLOOR_DIV_EXPR:
+ opname = "/"; break;
+ case BIT_AND_EXPR:
+ opname = "&"; break;
+ case BIT_IOR_EXPR:
+ opname = "|"; break;
+ case TRUTH_ANDIF_EXPR:
+ opname = "&&"; break;
+ case TRUTH_ORIF_EXPR:
+ opname = "||"; break;
+ case BIT_XOR_EXPR:
+ opname = "^"; break;
+ case LROTATE_EXPR:
+ case RROTATE_EXPR:
+ opname = "rotate"; break;
+ }
+ error ("invalid operands to binary %s", opname);
+}
+
+/* Subroutine of build_binary_op, used for comparison operations.
+ See if the operands have both been converted from subword integer types
+ and, if so, perhaps change them both back to their original type.
+ This function is also responsible for converting the two operands
+ to the proper common type for comparison.
+
+ The arguments of this function are all pointers to local variables
+ of build_binary_op: OP0_PTR is &OP0, OP1_PTR is &OP1,
+ RESTYPE_PTR is &RESULT_TYPE and RESCODE_PTR is &RESULTCODE.
+
+ If this function returns nonzero, it means that the comparison has
+ a constant value. What this function returns is an expression for
+ that value. */
+
+tree
+shorten_compare (op0_ptr, op1_ptr, restype_ptr, rescode_ptr)
+ tree *op0_ptr, *op1_ptr;
+ tree *restype_ptr;
+ enum tree_code *rescode_ptr;
+{
+ register tree type;
+ tree op0 = *op0_ptr;
+ tree op1 = *op1_ptr;
+ int unsignedp0, unsignedp1;
+ int real1, real2;
+ tree primop0, primop1;
+ enum tree_code code = *rescode_ptr;
+
+ /* Throw away any conversions to wider types
+ already present in the operands. */
+
+ primop0 = get_narrower (op0, &unsignedp0);
+ primop1 = get_narrower (op1, &unsignedp1);
+
+ /* Handle the case that OP0 does not *contain* a conversion
+ but it *requires* conversion to FINAL_TYPE. */
+
+ if (op0 == primop0 && TREE_TYPE (op0) != *restype_ptr)
+ unsignedp0 = TREE_UNSIGNED (TREE_TYPE (op0));
+ if (op1 == primop1 && TREE_TYPE (op1) != *restype_ptr)
+ unsignedp1 = TREE_UNSIGNED (TREE_TYPE (op1));
+
+ /* If one of the operands must be floated, we cannot optimize. */
+ real1 = TREE_CODE (TREE_TYPE (primop0)) == REAL_TYPE;
+ real2 = TREE_CODE (TREE_TYPE (primop1)) == REAL_TYPE;
+
+ /* If first arg is constant, swap the args (changing operation
+ so value is preserved), for canonicalization. */
+
+ if (TREE_CONSTANT (primop0))
+ {
+ register tree tem = primop0;
+ register int temi = unsignedp0;
+ primop0 = primop1;
+ primop1 = tem;
+ tem = op0;
+ op0 = op1;
+ op1 = tem;
+ *op0_ptr = op0;
+ *op1_ptr = op1;
+ unsignedp0 = unsignedp1;
+ unsignedp1 = temi;
+ temi = real1;
+ real1 = real2;
+ real2 = temi;
+
+ switch (code)
+ {
+ case LT_EXPR:
+ code = GT_EXPR;
+ break;
+ case GT_EXPR:
+ code = LT_EXPR;
+ break;
+ case LE_EXPR:
+ code = GE_EXPR;
+ break;
+ case GE_EXPR:
+ code = LE_EXPR;
+ break;
+ }
+ *rescode_ptr = code;
+ }
+
+ /* If comparing an integer against a constant more bits wide,
+ maybe we can deduce a value of 1 or 0 independent of the data.
+ Or else truncate the constant now
+ rather than extend the variable at run time.
+
+ This is only interesting if the constant is the wider arg.
+ Also, it is not safe if the constant is unsigned and the
+ variable arg is signed, since in this case the variable
+ would be sign-extended and then regarded as unsigned.
+ Our technique fails in this case because the lowest/highest
+ possible unsigned results don't follow naturally from the
+ lowest/highest possible values of the variable operand.
+ For just EQ_EXPR and NE_EXPR there is another technique that
+ could be used: see if the constant can be faithfully represented
+ in the other operand's type, by truncating it and reextending it
+ and see if that preserves the constant's value. */
+
+ if (!real1 && !real2
+ && TREE_CODE (primop1) == INTEGER_CST
+ && TYPE_PRECISION (TREE_TYPE (primop0)) < TYPE_PRECISION (*restype_ptr))
+ {
+ int min_gt, max_gt, min_lt, max_lt;
+ tree maxval, minval;
+ /* 1 if comparison is nominally unsigned. */
+ int unsignedp = TREE_UNSIGNED (*restype_ptr);
+ tree val;
+
+ type = signed_or_unsigned_type (unsignedp0, TREE_TYPE (primop0));
+
+ maxval = TYPE_MAX_VALUE (type);
+ minval = TYPE_MIN_VALUE (type);
+
+ if (unsignedp && !unsignedp0)
+ *restype_ptr = signed_type (*restype_ptr);
+
+ if (TREE_TYPE (primop1) != *restype_ptr)
+ primop1 = convert (*restype_ptr, primop1);
+ if (type != *restype_ptr)
+ {
+ minval = convert (*restype_ptr, minval);
+ maxval = convert (*restype_ptr, maxval);
+ }
+
+ if (unsignedp && unsignedp0)
+ {
+ min_gt = INT_CST_LT_UNSIGNED (primop1, minval);
+ max_gt = INT_CST_LT_UNSIGNED (primop1, maxval);
+ min_lt = INT_CST_LT_UNSIGNED (minval, primop1);
+ max_lt = INT_CST_LT_UNSIGNED (maxval, primop1);
+ }
+ else
+ {
+ min_gt = INT_CST_LT (primop1, minval);
+ max_gt = INT_CST_LT (primop1, maxval);
+ min_lt = INT_CST_LT (minval, primop1);
+ max_lt = INT_CST_LT (maxval, primop1);
+ }
+
+ val = 0;
+ /* This used to be a switch, but Genix compiler can't handle that. */
+ if (code == NE_EXPR)
+ {
+ if (max_lt || min_gt)
+ val = integer_one_node;
+ }
+ else if (code == EQ_EXPR)
+ {
+ if (max_lt || min_gt)
+ val = integer_zero_node;
+ }
+ else if (code == LT_EXPR)
+ {
+ if (max_lt)
+ val = integer_one_node;
+ if (!min_lt)
+ val = integer_zero_node;
+ }
+ else if (code == GT_EXPR)
+ {
+ if (min_gt)
+ val = integer_one_node;
+ if (!max_gt)
+ val = integer_zero_node;
+ }
+ else if (code == LE_EXPR)
+ {
+ if (!max_gt)
+ val = integer_one_node;
+ if (min_gt)
+ val = integer_zero_node;
+ }
+ else if (code == GE_EXPR)
+ {
+ if (!min_lt)
+ val = integer_one_node;
+ if (max_lt)
+ val = integer_zero_node;
+ }
+
+ /* If primop0 was sign-extended and unsigned comparison specd,
+ we did a signed comparison above using the signed type bounds.
+ But the comparison we output must be unsigned.
+
+ Also, for inequalities, VAL is no good; but if the signed
+ comparison had *any* fixed result, it follows that the
+ unsigned comparison just tests the sign in reverse
+ (positive values are LE, negative ones GE).
+ So we can generate an unsigned comparison
+ against an extreme value of the signed type. */
+
+ if (unsignedp && !unsignedp0)
+ {
+ if (val != 0)
+ switch (code)
+ {
+ case LT_EXPR:
+ case GE_EXPR:
+ primop1 = TYPE_MIN_VALUE (type);
+ val = 0;
+ break;
+
+ case LE_EXPR:
+ case GT_EXPR:
+ primop1 = TYPE_MAX_VALUE (type);
+ val = 0;
+ break;
+ }
+ type = unsigned_type (type);
+ }
+
+ if (!max_gt && !unsignedp0 && TREE_CODE (primop0) != INTEGER_CST)
+ {
+ /* This is the case of (char)x >?< 0x80, which people used to use
+ expecting old C compilers to change the 0x80 into -0x80. */
+ if (val == integer_zero_node)
+ warning ("comparison is always 0 due to limited range of data type");
+ if (val == integer_one_node)
+ warning ("comparison is always 1 due to limited range of data type");
+ }
+
+ if (!min_lt && unsignedp0 && TREE_CODE (primop0) != INTEGER_CST)
+ {
+ /* This is the case of (unsigned char)x >?< -1 or < 0. */
+ if (val == integer_zero_node)
+ warning ("comparison is always 0 due to limited range of data type");
+ if (val == integer_one_node)
+ warning ("comparison is always 1 due to limited range of data type");
+ }
+
+ if (val != 0)
+ {
+ /* Don't forget to evaluate PRIMOP0 if it has side effects. */
+ if (TREE_SIDE_EFFECTS (primop0))
+ return build (COMPOUND_EXPR, TREE_TYPE (val), primop0, val);
+ return val;
+ }
+
+ /* Value is not predetermined, but do the comparison
+ in the type of the operand that is not constant.
+ TYPE is already properly set. */
+ }
+ else if (real1 && real2
+ && (TYPE_PRECISION (TREE_TYPE (primop0))
+ == TYPE_PRECISION (TREE_TYPE (primop1))))
+ type = TREE_TYPE (primop0);
+
+ /* If args' natural types are both narrower than nominal type
+ and both extend in the same manner, compare them
+ in the type of the wider arg.
+ Otherwise must actually extend both to the nominal
+ common type lest different ways of extending
+ alter the result.
+ (eg, (short)-1 == (unsigned short)-1 should be 0.) */
+
+ else if (unsignedp0 == unsignedp1 && real1 == real2
+ && TYPE_PRECISION (TREE_TYPE (primop0)) < TYPE_PRECISION (*restype_ptr)
+ && TYPE_PRECISION (TREE_TYPE (primop1)) < TYPE_PRECISION (*restype_ptr))
+ {
+ type = common_type (TREE_TYPE (primop0), TREE_TYPE (primop1));
+ type = signed_or_unsigned_type (unsignedp0
+ || TREE_UNSIGNED (*restype_ptr),
+ type);
+ /* Make sure shorter operand is extended the right way
+ to match the longer operand. */
+ primop0 = convert (signed_or_unsigned_type (unsignedp0, TREE_TYPE (primop0)),
+ primop0);
+ primop1 = convert (signed_or_unsigned_type (unsignedp1, TREE_TYPE (primop1)),
+ primop1);
+ }
+ else
+ {
+ /* Here we must do the comparison on the nominal type
+ using the args exactly as we received them. */
+ type = *restype_ptr;
+ primop0 = op0;
+ primop1 = op1;
+
+ if (!real1 && !real2 && integer_zerop (primop1)
+ && TREE_UNSIGNED (*restype_ptr))
+ {
+ tree value = 0;
+ switch (code)
+ {
+ case GE_EXPR:
+ if (extra_warnings)
+ warning ("unsigned value >= 0 is always 1");
+ value = integer_one_node;
+ break;
+
+ case LT_EXPR:
+ if (extra_warnings)
+ warning ("unsigned value < 0 is always 0");
+ value = integer_zero_node;
+ }
+
+ if (value != 0)
+ {
+ /* Don't forget to evaluate PRIMOP0 if it has side effects. */
+ if (TREE_SIDE_EFFECTS (primop0))
+ return build (COMPOUND_EXPR, TREE_TYPE (value),
+ primop0, value);
+ return value;
+ }
+ }
+ }
+
+ *op0_ptr = convert (type, primop0);
+ *op1_ptr = convert (type, primop1);
+
+ *restype_ptr = integer_type_node;
+
+ return 0;
+}
+
+/* Prepare expr to be an argument of a TRUTH_NOT_EXPR,
+ or validate its data type for an `if' or `while' statement or ?..: exp.
+
+ This preparation consists of taking the ordinary
+ representation of an expression expr and producing a valid tree
+ boolean expression describing whether expr is nonzero. We could
+ simply always do build_binary_op (NE_EXPR, expr, integer_zero_node, 1),
+ but we optimize comparisons, &&, ||, and !.
+
+ The resulting type should always be `integer_type_node'. */
+
+tree
+truthvalue_conversion (expr)
+ tree expr;
+{
+ if (TREE_CODE (expr) == ERROR_MARK)
+ return expr;
+
+#if 0 /* This appears to be wrong for C++. */
+ /* These really should return error_mark_node after 2.4 is stable.
+ But not all callers handle ERROR_MARK properly. */
+ switch (TREE_CODE (TREE_TYPE (expr)))
+ {
+ case RECORD_TYPE:
+ error ("struct type value used where scalar is required");
+ return integer_zero_node;
+
+ case UNION_TYPE:
+ error ("union type value used where scalar is required");
+ return integer_zero_node;
+
+ case ARRAY_TYPE:
+ error ("array type value used where scalar is required");
+ return integer_zero_node;
+
+ default:
+ break;
+ }
+#endif /* 0 */
+
+ switch (TREE_CODE (expr))
+ {
+ /* It is simpler and generates better code to have only TRUTH_*_EXPR
+ or comparison expressions as truth values at this level. */
+#if 0
+ case COMPONENT_REF:
+ /* A one-bit unsigned bit-field is already acceptable. */
+ if (1 == TREE_INT_CST_LOW (DECL_SIZE (TREE_OPERAND (expr, 1)))
+ && TREE_UNSIGNED (TREE_OPERAND (expr, 1)))
+ return expr;
+ break;
+#endif
+
+ case EQ_EXPR:
+ /* It is simpler and generates better code to have only TRUTH_*_EXPR
+ or comparison expressions as truth values at this level. */
+#if 0
+ if (integer_zerop (TREE_OPERAND (expr, 1)))
+ return build_unary_op (TRUTH_NOT_EXPR, TREE_OPERAND (expr, 0), 0);
+#endif
+ case NE_EXPR: case LE_EXPR: case GE_EXPR: case LT_EXPR: case GT_EXPR:
+ case TRUTH_ANDIF_EXPR:
+ case TRUTH_ORIF_EXPR:
+ case TRUTH_AND_EXPR:
+ case TRUTH_OR_EXPR:
+ case TRUTH_XOR_EXPR:
+ return convert (integer_type_node, expr);
+
+ case ERROR_MARK:
+ return expr;
+
+ case INTEGER_CST:
+ return integer_zerop (expr) ? integer_zero_node : integer_one_node;
+
+ case REAL_CST:
+ return real_zerop (expr) ? integer_zero_node : integer_one_node;
+
+ case ADDR_EXPR:
+ if (TREE_SIDE_EFFECTS (TREE_OPERAND (expr, 0)))
+ return build (COMPOUND_EXPR, integer_type_node,
+ TREE_OPERAND (expr, 0), integer_one_node);
+ else
+ return integer_one_node;
+
+ case COMPLEX_EXPR:
+ return build_binary_op ((TREE_SIDE_EFFECTS (TREE_OPERAND (expr, 1))
+ ? TRUTH_OR_EXPR : TRUTH_ORIF_EXPR),
+ truthvalue_conversion (TREE_OPERAND (expr, 0)),
+ truthvalue_conversion (TREE_OPERAND (expr, 1)),
+ 0);
+
+ case NEGATE_EXPR:
+ case ABS_EXPR:
+ case FLOAT_EXPR:
+ case FFS_EXPR:
+ /* These don't change whether an object is non-zero or zero. */
+ return truthvalue_conversion (TREE_OPERAND (expr, 0));
+
+ case LROTATE_EXPR:
+ case RROTATE_EXPR:
+ /* These don't change whether an object is zero or non-zero, but
+ we can't ignore them if their second arg has side-effects. */
+ if (TREE_SIDE_EFFECTS (TREE_OPERAND (expr, 1)))
+ return build (COMPOUND_EXPR, integer_type_node, TREE_OPERAND (expr, 1),
+ truthvalue_conversion (TREE_OPERAND (expr, 0)));
+ else
+ return truthvalue_conversion (TREE_OPERAND (expr, 0));
+
+ case COND_EXPR:
+ /* Distribute the conversion into the arms of a COND_EXPR. */
+ return fold (build (COND_EXPR, integer_type_node, TREE_OPERAND (expr, 0),
+ truthvalue_conversion (TREE_OPERAND (expr, 1)),
+ truthvalue_conversion (TREE_OPERAND (expr, 2))));
+
+ case CONVERT_EXPR:
+ /* Don't cancel the effect of a CONVERT_EXPR from a REFERENCE_TYPE,
+ since that affects how `default_conversion' will behave. */
+ if (TREE_CODE (TREE_TYPE (expr)) == REFERENCE_TYPE
+ || TREE_CODE (TREE_TYPE (TREE_OPERAND (expr, 0))) == REFERENCE_TYPE)
+ break;
+ /* fall through... */
+ case NOP_EXPR:
+ /* If this is widening the argument, we can ignore it. */
+ if (TYPE_PRECISION (TREE_TYPE (expr))
+ >= TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (expr, 0))))
+ return truthvalue_conversion (TREE_OPERAND (expr, 0));
+ break;
+
+ case MINUS_EXPR:
+ /* With IEEE arithmetic, x - x may not equal 0, so we can't optimize
+ this case. */
+ if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
+ && TREE_CODE (TREE_TYPE (expr)) == REAL_TYPE)
+ break;
+ /* fall through... */
+ case BIT_XOR_EXPR:
+ /* This and MINUS_EXPR can be changed into a comparison of the
+ two objects. */
+ if (TREE_TYPE (TREE_OPERAND (expr, 0))
+ == TREE_TYPE (TREE_OPERAND (expr, 1)))
+ return build_binary_op (NE_EXPR, TREE_OPERAND (expr, 0),
+ TREE_OPERAND (expr, 1), 1);
+ return build_binary_op (NE_EXPR, TREE_OPERAND (expr, 0),
+ fold (build1 (NOP_EXPR,
+ TREE_TYPE (TREE_OPERAND (expr, 0)),
+ TREE_OPERAND (expr, 1))), 1);
+
+ case BIT_AND_EXPR:
+ if (integer_onep (TREE_OPERAND (expr, 1)))
+ return expr;
+
+ case MODIFY_EXPR:
+ if (warn_parentheses && C_EXP_ORIGINAL_CODE (expr) == MODIFY_EXPR)
+ warning ("suggest parentheses around assignment used as truth value");
+ break;
+ }
+
+ if (TREE_CODE (TREE_TYPE (expr)) == COMPLEX_TYPE)
+ return (build_binary_op
+ ((TREE_SIDE_EFFECTS (expr)
+ ? TRUTH_OR_EXPR : TRUTH_ORIF_EXPR),
+ truthvalue_conversion (build_unary_op (REALPART_EXPR, expr, 0)),
+ truthvalue_conversion (build_unary_op (IMAGPART_EXPR, expr, 0)),
+ 0));
+
+ return build_binary_op (NE_EXPR, expr, integer_zero_node, 1);
+}
+
+/* Read the rest of a #-directive from input stream FINPUT.
+ In normal use, the directive name and the white space after it
+ have already been read, so they won't be included in the result.
+ We allow for the fact that the directive line may contain
+ a newline embedded within a character or string literal which forms
+ a part of the directive.
+
+ The value is a string in a reusable buffer. It remains valid
+ only until the next time this function is called. */
+
+char *
+get_directive_line (finput)
+ register FILE *finput;
+{
+ static char *directive_buffer = NULL;
+ static unsigned buffer_length = 0;
+ register char *p;
+ register char *buffer_limit;
+ register int looking_for = 0;
+ register int char_escaped = 0;
+
+ if (buffer_length == 0)
+ {
+ directive_buffer = (char *)xmalloc (128);
+ buffer_length = 128;
+ }
+
+ buffer_limit = &directive_buffer[buffer_length];
+
+ for (p = directive_buffer; ; )
+ {
+ int c;
+
+ /* Make buffer bigger if it is full. */
+ if (p >= buffer_limit)
+ {
+ register unsigned bytes_used = (p - directive_buffer);
+
+ buffer_length *= 2;
+ directive_buffer
+ = (char *)xrealloc (directive_buffer, buffer_length);
+ p = &directive_buffer[bytes_used];
+ buffer_limit = &directive_buffer[buffer_length];
+ }
+
+ c = getc (finput);
+
+ /* Discard initial whitespace. */
+ if ((c == ' ' || c == '\t') && p == directive_buffer)
+ continue;
+
+ /* Detect the end of the directive. */
+ if (c == '\n' && looking_for == 0)
+ {
+ ungetc (c, finput);
+ c = '\0';
+ }
+
+ *p++ = c;
+
+ if (c == 0)
+ return directive_buffer;
+
+ /* Handle string and character constant syntax. */
+ if (looking_for)
+ {
+ if (looking_for == c && !char_escaped)
+ looking_for = 0; /* Found terminator... stop looking. */
+ }
+ else
+ if (c == '\'' || c == '"')
+ looking_for = c; /* Don't stop buffering until we see another
+ another one of these (or an EOF). */
+
+ /* Handle backslash. */
+ char_escaped = (c == '\\' && ! char_escaped);
+ }
+}
+
+/* Make a variant type in the proper way for C/C++, propagating qualifiers
+ down to the element type of an array. */
+
+tree
+c_build_type_variant (type, constp, volatilep)
+ tree type;
+ int constp, volatilep;
+{
+ if (TREE_CODE (type) == ARRAY_TYPE)
+ {
+ tree real_main_variant = TYPE_MAIN_VARIANT (type);
+
+ push_obstacks (TYPE_OBSTACK (real_main_variant),
+ TYPE_OBSTACK (real_main_variant));
+ type = build_array_type (c_build_type_variant (TREE_TYPE (type),
+ constp, volatilep),
+ TYPE_DOMAIN (type));
+
+ /* TYPE must be on same obstack as REAL_MAIN_VARIANT. If not,
+ make a copy. (TYPE might have come from the hash table and
+ REAL_MAIN_VARIANT might be in some function's obstack.) */
+
+ if (TYPE_OBSTACK (type) != TYPE_OBSTACK (real_main_variant))
+ {
+ type = copy_node (type);
+ TYPE_POINTER_TO (type) = TYPE_REFERENCE_TO (type) = 0;
+ }
+
+ TYPE_MAIN_VARIANT (type) = real_main_variant;
+ pop_obstacks ();
+ }
+ return build_type_variant (type, constp, volatilep);
+}
diff --git a/gnu/usr.bin/cc/cc_int/caller-save.c b/gnu/usr.bin/cc/cc_int/caller-save.c
new file mode 100644
index 0000000..5b09606
--- /dev/null
+++ b/gnu/usr.bin/cc/cc_int/caller-save.c
@@ -0,0 +1,762 @@
+/* Save and restore call-clobbered registers which are live across a call.
+ Copyright (C) 1989, 1992, 1994 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+#include "config.h"
+#include "rtl.h"
+#include "insn-config.h"
+#include "flags.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "recog.h"
+#include "basic-block.h"
+#include "reload.h"
+#include "expr.h"
+
+#ifndef MAX_MOVE_MAX
+#define MAX_MOVE_MAX MOVE_MAX
+#endif
+
+#ifndef MAX_UNITS_PER_WORD
+#define MAX_UNITS_PER_WORD UNITS_PER_WORD
+#endif
+
+/* Modes for each hard register that we can save. The smallest mode is wide
+ enough to save the entire contents of the register. When saving the
+ register because it is live we first try to save in multi-register modes.
+ If that is not possible the save is done one register at a time. */
+
+static enum machine_mode
+ regno_save_mode[FIRST_PSEUDO_REGISTER][MAX_MOVE_MAX / MAX_UNITS_PER_WORD + 1];
+
+/* For each hard register, a place on the stack where it can be saved,
+ if needed. */
+
+static rtx
+ regno_save_mem[FIRST_PSEUDO_REGISTER][MAX_MOVE_MAX / MAX_UNITS_PER_WORD + 1];
+
+/* We will only make a register eligible for caller-save if it can be
+ saved in its widest mode with a simple SET insn as long as the memory
+ address is valid. We record the INSN_CODE is those insns here since
+ when we emit them, the addresses might not be valid, so they might not
+ be recognized. */
+
+static enum insn_code
+ reg_save_code[FIRST_PSEUDO_REGISTER][MAX_MOVE_MAX / MAX_UNITS_PER_WORD + 1];
+static enum insn_code
+ reg_restore_code[FIRST_PSEUDO_REGISTER][MAX_MOVE_MAX / MAX_UNITS_PER_WORD + 1];
+
+/* Set of hard regs currently live (during scan of all insns). */
+
+static HARD_REG_SET hard_regs_live;
+
+/* Set of hard regs currently residing in save area (during insn scan). */
+
+static HARD_REG_SET hard_regs_saved;
+
+/* Set of hard regs which need to be restored before referenced. */
+
+static HARD_REG_SET hard_regs_need_restore;
+
+/* Number of registers currently in hard_regs_saved. */
+
+int n_regs_saved;
+
+static void set_reg_live PROTO((rtx, rtx));
+static void clear_reg_live PROTO((rtx));
+static void restore_referenced_regs PROTO((rtx, rtx, enum machine_mode));
+static int insert_save_restore PROTO((rtx, int, int,
+ enum machine_mode, int));
+
+/* Initialize for caller-save.
+
+ Look at all the hard registers that are used by a call and for which
+ regclass.c has not already excluded from being used across a call.
+
+ Ensure that we can find a mode to save the register and that there is a
+ simple insn to save and restore the register. This latter check avoids
+ problems that would occur if we tried to save the MQ register of some
+ machines directly into memory. */
+
+void
+init_caller_save ()
+{
+ char *first_obj = (char *) oballoc (0);
+ rtx addr_reg;
+ int offset;
+ rtx address;
+ int i, j;
+
+ /* First find all the registers that we need to deal with and all
+ the modes that they can have. If we can't find a mode to use,
+ we can't have the register live over calls. */
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ {
+ if (call_used_regs[i] && ! call_fixed_regs[i])
+ {
+ for (j = 1; j <= MOVE_MAX / UNITS_PER_WORD; j++)
+ {
+ regno_save_mode[i][j] = choose_hard_reg_mode (i, j);
+ if (regno_save_mode[i][j] == VOIDmode && j == 1)
+ {
+ call_fixed_regs[i] = 1;
+ SET_HARD_REG_BIT (call_fixed_reg_set, i);
+ }
+ }
+ }
+ else
+ regno_save_mode[i][1] = VOIDmode;
+ }
+
+ /* The following code tries to approximate the conditions under which
+ we can easily save and restore a register without scratch registers or
+ other complexities. It will usually work, except under conditions where
+ the validity of an insn operand is dependent on the address offset.
+ No such cases are currently known.
+
+ We first find a typical offset from some BASE_REG_CLASS register.
+ This address is chosen by finding the first register in the class
+ and by finding the smallest power of two that is a valid offset from
+ that register in every mode we will use to save registers. */
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (TEST_HARD_REG_BIT (reg_class_contents[(int) BASE_REG_CLASS], i))
+ break;
+
+ if (i == FIRST_PSEUDO_REGISTER)
+ abort ();
+
+ addr_reg = gen_rtx (REG, Pmode, i);
+
+ for (offset = 1 << (HOST_BITS_PER_INT / 2); offset; offset >>= 1)
+ {
+ address = gen_rtx (PLUS, Pmode, addr_reg, GEN_INT (offset));
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (regno_save_mode[i][1] != VOIDmode
+ && ! strict_memory_address_p (regno_save_mode[i][1], address))
+ break;
+
+ if (i == FIRST_PSEUDO_REGISTER)
+ break;
+ }
+
+ /* If we didn't find a valid address, we must use register indirect. */
+ if (offset == 0)
+ address = addr_reg;
+
+ /* Next we try to form an insn to save and restore the register. We
+ see if such an insn is recognized and meets its constraints. */
+
+ start_sequence ();
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ for (j = 1; j <= MOVE_MAX / UNITS_PER_WORD; j++)
+ if (regno_save_mode[i][j] != VOIDmode)
+ {
+ rtx mem = gen_rtx (MEM, regno_save_mode[i][j], address);
+ rtx reg = gen_rtx (REG, regno_save_mode[i][j], i);
+ rtx savepat = gen_rtx (SET, VOIDmode, mem, reg);
+ rtx restpat = gen_rtx (SET, VOIDmode, reg, mem);
+ rtx saveinsn = emit_insn (savepat);
+ rtx restinsn = emit_insn (restpat);
+ int ok;
+
+ reg_save_code[i][j] = recog_memoized (saveinsn);
+ reg_restore_code[i][j] = recog_memoized (restinsn);
+
+ /* Now extract both insns and see if we can meet their constraints. */
+ ok = (reg_save_code[i][j] != -1 && reg_restore_code[i][j] != -1);
+ if (ok)
+ {
+ insn_extract (saveinsn);
+ ok = constrain_operands (reg_save_code[i][j], 1);
+ insn_extract (restinsn);
+ ok &= constrain_operands (reg_restore_code[i][j], 1);
+ }
+
+ if (! ok)
+ {
+ regno_save_mode[i][j] = VOIDmode;
+ if (j == 1)
+ {
+ call_fixed_regs[i] = 1;
+ SET_HARD_REG_BIT (call_fixed_reg_set, i);
+ }
+ }
+ }
+
+ end_sequence ();
+
+ obfree (first_obj);
+}
+
+/* Initialize save areas by showing that we haven't allocated any yet. */
+
+void
+init_save_areas ()
+{
+ int i, j;
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ for (j = 1; j <= MOVE_MAX / UNITS_PER_WORD; j++)
+ regno_save_mem[i][j] = 0;
+}
+
+/* Allocate save areas for any hard registers that might need saving.
+ We take a conservative approach here and look for call-clobbered hard
+ registers that are assigned to pseudos that cross calls. This may
+ overestimate slightly (especially if some of these registers are later
+ used as spill registers), but it should not be significant.
+
+ Then perform register elimination in the addresses of the save area
+ locations; return 1 if all eliminated addresses are strictly valid.
+ We assume that our caller has set up the elimination table to the
+ worst (largest) possible offsets.
+
+ Set *PCHANGED to 1 if we had to allocate some memory for the save area.
+
+ Future work:
+
+ In the fallback case we should iterate backwards across all possible
+ modes for the save, choosing the largest available one instead of
+ falling back to the smallest mode immediately. (eg TF -> DF -> SF).
+
+ We do not try to use "move multiple" instructions that exist
+ on some machines (such as the 68k moveml). It could be a win to try
+ and use them when possible. The hard part is doing it in a way that is
+ machine independent since they might be saving non-consecutive
+ registers. (imagine caller-saving d0,d1,a0,a1 on the 68k) */
+
+int
+setup_save_areas (pchanged)
+ int *pchanged;
+{
+ int i, j, k;
+ HARD_REG_SET hard_regs_used;
+ int ok = 1;
+
+
+ /* Allocate space in the save area for the largest multi-register
+ pseudos first, then work backwards to single register
+ pseudos. */
+
+ /* Find and record all call-used hard-registers in this function. */
+ CLEAR_HARD_REG_SET (hard_regs_used);
+ for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++)
+ if (reg_renumber[i] >= 0 && reg_n_calls_crossed[i] > 0)
+ {
+ int regno = reg_renumber[i];
+ int endregno
+ = regno + HARD_REGNO_NREGS (regno, GET_MODE (regno_reg_rtx[i]));
+ int nregs = endregno - regno;
+
+ for (j = 0; j < nregs; j++)
+ {
+ if (call_used_regs[regno+j])
+ SET_HARD_REG_BIT (hard_regs_used, regno+j);
+ }
+ }
+
+ /* Now run through all the call-used hard-registers and allocate
+ space for them in the caller-save area. Try to allocate space
+ in a manner which allows multi-register saves/restores to be done. */
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ for (j = MOVE_MAX / UNITS_PER_WORD; j > 0; j--)
+ {
+ int ok = 1;
+ int do_save;
+
+ /* If no mode exists for this size, try another. Also break out
+ if we have already saved this hard register. */
+ if (regno_save_mode[i][j] == VOIDmode || regno_save_mem[i][1] != 0)
+ continue;
+
+ /* See if any register in this group has been saved. */
+ do_save = 1;
+ for (k = 0; k < j; k++)
+ if (regno_save_mem[i + k][1])
+ {
+ do_save = 0;
+ break;
+ }
+ if (! do_save)
+ continue;
+
+ for (k = 0; k < j; k++)
+ {
+ int regno = i + k;
+ ok &= (TEST_HARD_REG_BIT (hard_regs_used, regno) != 0);
+ }
+
+ /* We have found an acceptable mode to store in. */
+ if (ok)
+ {
+
+ regno_save_mem[i][j]
+ = assign_stack_local (regno_save_mode[i][j],
+ GET_MODE_SIZE (regno_save_mode[i][j]), 0);
+
+ /* Setup single word save area just in case... */
+ for (k = 0; k < j; k++)
+ {
+ /* This should not depend on WORDS_BIG_ENDIAN.
+ The order of words in regs is the same as in memory. */
+ rtx temp = gen_rtx (MEM, regno_save_mode[i+k][1],
+ XEXP (regno_save_mem[i][j], 0));
+
+ regno_save_mem[i+k][1]
+ = adj_offsettable_operand (temp, k * UNITS_PER_WORD);
+ }
+ *pchanged = 1;
+ }
+ }
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ for (j = 1; j <= MOVE_MAX / UNITS_PER_WORD; j++)
+ if (regno_save_mem[i][j] != 0)
+ ok &= strict_memory_address_p (GET_MODE (regno_save_mem[i][j]),
+ XEXP (eliminate_regs (regno_save_mem[i][j], 0, NULL_RTX), 0));
+
+ return ok;
+}
+
+/* Find the places where hard regs are live across calls and save them.
+
+ INSN_MODE is the mode to assign to any insns that we add. This is used
+ by reload to determine whether or not reloads or register eliminations
+ need be done on these insns. */
+
+void
+save_call_clobbered_regs (insn_mode)
+ enum machine_mode insn_mode;
+{
+ rtx insn;
+ int b;
+
+ for (b = 0; b < n_basic_blocks; b++)
+ {
+ regset regs_live = basic_block_live_at_start[b];
+ rtx prev_block_last = PREV_INSN (basic_block_head[b]);
+ REGSET_ELT_TYPE bit;
+ int offset, i, j;
+ int regno;
+
+ /* Compute hard regs live at start of block -- this is the
+ real hard regs marked live, plus live pseudo regs that
+ have been renumbered to hard regs. No registers have yet been
+ saved because we restore all of them before the end of the basic
+ block. */
+
+#ifdef HARD_REG_SET
+ hard_regs_live = *regs_live;
+#else
+ COPY_HARD_REG_SET (hard_regs_live, regs_live);
+#endif
+
+ CLEAR_HARD_REG_SET (hard_regs_saved);
+ CLEAR_HARD_REG_SET (hard_regs_need_restore);
+ n_regs_saved = 0;
+
+ for (offset = 0, i = 0; offset < regset_size; offset++)
+ {
+ if (regs_live[offset] == 0)
+ i += REGSET_ELT_BITS;
+ else
+ for (bit = 1; bit && i < max_regno; bit <<= 1, i++)
+ if ((regs_live[offset] & bit)
+ && (regno = reg_renumber[i]) >= 0)
+ for (j = regno;
+ j < regno + HARD_REGNO_NREGS (regno,
+ PSEUDO_REGNO_MODE (i));
+ j++)
+ SET_HARD_REG_BIT (hard_regs_live, j);
+
+ }
+
+ /* Now scan the insns in the block, keeping track of what hard
+ regs are live as we go. When we see a call, save the live
+ call-clobbered hard regs. */
+
+ for (insn = basic_block_head[b]; ; insn = NEXT_INSN (insn))
+ {
+ RTX_CODE code = GET_CODE (insn);
+
+ if (GET_RTX_CLASS (code) == 'i')
+ {
+ rtx link;
+
+ /* If some registers have been saved, see if INSN references
+ any of them. We must restore them before the insn if so. */
+
+ if (n_regs_saved)
+ restore_referenced_regs (PATTERN (insn), insn, insn_mode);
+
+ /* NB: the normal procedure is to first enliven any
+ registers set by insn, then deaden any registers that
+ had their last use at insn. This is incorrect now,
+ since multiple pseudos may have been mapped to the
+ same hard reg, and the death notes are ambiguous. So
+ it must be done in the other, safe, order. */
+
+ for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
+ if (REG_NOTE_KIND (link) == REG_DEAD)
+ clear_reg_live (XEXP (link, 0));
+
+ /* When we reach a call, we need to save all registers that are
+ live, call-used, not fixed, and not already saved. We must
+ test at this point because registers that die in a CALL_INSN
+ are not live across the call and likewise for registers that
+ are born in the CALL_INSN.
+
+ If registers are filled with parameters for this function,
+ and some of these are also being set by this function, then
+ they will not appear to die (no REG_DEAD note for them),
+ to check if in fact they do, collect the set registers in
+ hard_regs_live first. */
+
+ if (code == CALL_INSN)
+ {
+ HARD_REG_SET this_call_sets;
+ {
+ HARD_REG_SET old_hard_regs_live;
+
+ /* Save the hard_regs_live information. */
+ COPY_HARD_REG_SET (old_hard_regs_live, hard_regs_live);
+
+ /* Now calculate hard_regs_live for this CALL_INSN
+ only. */
+ CLEAR_HARD_REG_SET (hard_regs_live);
+ note_stores (PATTERN (insn), set_reg_live);
+ COPY_HARD_REG_SET (this_call_sets, hard_regs_live);
+
+ /* Restore the hard_regs_live information. */
+ COPY_HARD_REG_SET (hard_regs_live, old_hard_regs_live);
+ }
+
+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+ if (call_used_regs[regno] && ! call_fixed_regs[regno]
+ && TEST_HARD_REG_BIT (hard_regs_live, regno)
+ /* It must not be set by this instruction. */
+ && ! TEST_HARD_REG_BIT (this_call_sets, regno)
+ && ! TEST_HARD_REG_BIT (hard_regs_saved, regno))
+ regno += insert_save_restore (insn, 1, regno,
+ insn_mode, 0);
+
+ /* Put the information for this CALL_INSN on top of what
+ we already had. */
+ IOR_HARD_REG_SET (hard_regs_live, this_call_sets);
+ COPY_HARD_REG_SET (hard_regs_need_restore, hard_regs_saved);
+
+ /* Must recompute n_regs_saved. */
+ n_regs_saved = 0;
+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+ if (TEST_HARD_REG_BIT (hard_regs_saved, regno))
+ n_regs_saved++;
+ }
+ else
+ note_stores (PATTERN (insn), set_reg_live);
+
+ for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
+ if (REG_NOTE_KIND (link) == REG_UNUSED)
+ clear_reg_live (XEXP (link, 0));
+ }
+
+ if (insn == basic_block_end[b])
+ break;
+ }
+
+ /* At the end of the basic block, we must restore any registers that
+ remain saved. If the last insn in the block is a JUMP_INSN, put
+ the restore before the insn, otherwise, put it after the insn. */
+
+ if (n_regs_saved)
+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+ if (TEST_HARD_REG_BIT (hard_regs_need_restore, regno))
+ regno += insert_save_restore ((GET_CODE (insn) == JUMP_INSN
+ ? insn : NEXT_INSN (insn)), 0,
+ regno, insn_mode, MOVE_MAX / UNITS_PER_WORD);
+
+ /* If we added any insns at the start of the block, update the start
+ of the block to point at those insns. */
+ basic_block_head[b] = NEXT_INSN (prev_block_last);
+ }
+}
+
+/* Here from note_stores when an insn stores a value in a register.
+ Set the proper bit or bits in hard_regs_live. All pseudos that have
+ been assigned hard regs have had their register number changed already,
+ so we can ignore pseudos. */
+
+static void
+set_reg_live (reg, setter)
+ rtx reg, setter;
+{
+ register int regno, endregno, i;
+ enum machine_mode mode = GET_MODE (reg);
+ int word = 0;
+
+ if (GET_CODE (reg) == SUBREG)
+ {
+ word = SUBREG_WORD (reg);
+ reg = SUBREG_REG (reg);
+ }
+
+ if (GET_CODE (reg) != REG || REGNO (reg) >= FIRST_PSEUDO_REGISTER)
+ return;
+
+ regno = REGNO (reg) + word;
+ endregno = regno + HARD_REGNO_NREGS (regno, mode);
+
+ for (i = regno; i < endregno; i++)
+ {
+ SET_HARD_REG_BIT (hard_regs_live, i);
+ CLEAR_HARD_REG_BIT (hard_regs_saved, i);
+ CLEAR_HARD_REG_BIT (hard_regs_need_restore, i);
+ }
+}
+
+/* Here when a REG_DEAD note records the last use of a reg. Clear
+ the appropriate bit or bits in hard_regs_live. Again we can ignore
+ pseudos. */
+
+static void
+clear_reg_live (reg)
+ rtx reg;
+{
+ register int regno, endregno, i;
+
+ if (GET_CODE (reg) != REG || REGNO (reg) >= FIRST_PSEUDO_REGISTER)
+ return;
+
+ regno = REGNO (reg);
+ endregno= regno + HARD_REGNO_NREGS (regno, GET_MODE (reg));
+
+ for (i = regno; i < endregno; i++)
+ {
+ CLEAR_HARD_REG_BIT (hard_regs_live, i);
+ CLEAR_HARD_REG_BIT (hard_regs_need_restore, i);
+ CLEAR_HARD_REG_BIT (hard_regs_saved, i);
+ }
+}
+
+/* If any register currently residing in the save area is referenced in X,
+ which is part of INSN, emit code to restore the register in front of INSN.
+ INSN_MODE is the mode to assign to any insns that we add. */
+
+static void
+restore_referenced_regs (x, insn, insn_mode)
+ rtx x;
+ rtx insn;
+ enum machine_mode insn_mode;
+{
+ enum rtx_code code = GET_CODE (x);
+ char *fmt;
+ int i, j;
+
+ if (code == CLOBBER)
+ return;
+
+ if (code == REG)
+ {
+ int regno = REGNO (x);
+
+ /* If this is a pseudo, scan its memory location, since it might
+ involve the use of another register, which might be saved. */
+
+ if (regno >= FIRST_PSEUDO_REGISTER
+ && reg_equiv_mem[regno] != 0)
+ restore_referenced_regs (XEXP (reg_equiv_mem[regno], 0),
+ insn, insn_mode);
+ else if (regno >= FIRST_PSEUDO_REGISTER
+ && reg_equiv_address[regno] != 0)
+ restore_referenced_regs (reg_equiv_address[regno],
+ insn, insn_mode);
+
+ /* Otherwise if this is a hard register, restore any piece of it that
+ is currently saved. */
+
+ else if (regno < FIRST_PSEUDO_REGISTER)
+ {
+ int numregs = HARD_REGNO_NREGS (regno, GET_MODE (x));
+ /* Save at most SAVEREGS at a time. This can not be larger than
+ MOVE_MAX, because that causes insert_save_restore to fail. */
+ int saveregs = MIN (numregs, MOVE_MAX / UNITS_PER_WORD);
+ int endregno = regno + numregs;
+
+ for (i = regno; i < endregno; i++)
+ if (TEST_HARD_REG_BIT (hard_regs_need_restore, i))
+ i += insert_save_restore (insn, 0, i, insn_mode, saveregs);
+ }
+
+ return;
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ restore_referenced_regs (XEXP (x, i), insn, insn_mode);
+ else if (fmt[i] == 'E')
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ restore_referenced_regs (XVECEXP (x, i, j), insn, insn_mode);
+ }
+}
+
+/* Insert a sequence of insns to save or restore, SAVE_P says which,
+ REGNO. Place these insns in front of INSN. INSN_MODE is the mode
+ to assign to these insns. MAXRESTORE is the maximum number of registers
+ which should be restored during this call (when SAVE_P == 0). It should
+ never be less than 1 since we only work with entire registers.
+
+ Note that we have verified in init_caller_save that we can do this
+ with a simple SET, so use it. Set INSN_CODE to what we save there
+ since the address might not be valid so the insn might not be recognized.
+ These insns will be reloaded and have register elimination done by
+ find_reload, so we need not worry about that here.
+
+ Return the extra number of registers saved. */
+
+static int
+insert_save_restore (insn, save_p, regno, insn_mode, maxrestore)
+ rtx insn;
+ int save_p;
+ int regno;
+ enum machine_mode insn_mode;
+ int maxrestore;
+{
+ rtx pat;
+ enum insn_code code;
+ int i, numregs;
+
+ /* A common failure mode if register status is not correct in the RTL
+ is for this routine to be called with a REGNO we didn't expect to
+ save. That will cause us to write an insn with a (nil) SET_DEST
+ or SET_SRC. Instead of doing so and causing a crash later, check
+ for this common case and abort here instead. This will remove one
+ step in debugging such problems. */
+
+ if (regno_save_mem[regno][1] == 0)
+ abort ();
+
+#ifdef HAVE_cc0
+ /* If INSN references CC0, put our insns in front of the insn that sets
+ CC0. This is always safe, since the only way we could be passed an
+ insn that references CC0 is for a restore, and doing a restore earlier
+ isn't a problem. We do, however, assume here that CALL_INSNs don't
+ reference CC0. Guard against non-INSN's like CODE_LABEL. */
+
+ if ((GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
+ && reg_referenced_p (cc0_rtx, PATTERN (insn)))
+ insn = prev_nonnote_insn (insn);
+#endif
+
+ /* Get the pattern to emit and update our status. */
+ if (save_p)
+ {
+ int i, j, k;
+ int ok;
+
+ /* See if we can save several registers with a single instruction.
+ Work backwards to the single register case. */
+ for (i = MOVE_MAX / UNITS_PER_WORD; i > 0; i--)
+ {
+ ok = 1;
+ if (regno_save_mem[regno][i] != 0)
+ for (j = 0; j < i; j++)
+ {
+ if (! call_used_regs[regno + j] || call_fixed_regs[regno + j]
+ || ! TEST_HARD_REG_BIT (hard_regs_live, regno + j)
+ || TEST_HARD_REG_BIT (hard_regs_saved, regno + j))
+ ok = 0;
+ }
+ else
+ continue;
+
+ /* Must do this one save at a time */
+ if (! ok)
+ continue;
+
+ pat = gen_rtx (SET, VOIDmode, regno_save_mem[regno][i],
+ gen_rtx (REG, GET_MODE (regno_save_mem[regno][i]), regno));
+ code = reg_save_code[regno][i];
+
+ /* Set hard_regs_saved for all the registers we saved. */
+ for (k = 0; k < i; k++)
+ {
+ SET_HARD_REG_BIT (hard_regs_saved, regno + k);
+ SET_HARD_REG_BIT (hard_regs_need_restore, regno + k);
+ n_regs_saved++;
+ }
+
+ numregs = i;
+ break;
+ }
+ }
+ else
+ {
+ int i, j, k;
+ int ok;
+
+ /* See if we can restore `maxrestore' registers at once. Work
+ backwards to the single register case. */
+ for (i = maxrestore; i > 0; i--)
+ {
+ ok = 1;
+ if (regno_save_mem[regno][i])
+ for (j = 0; j < i; j++)
+ {
+ if (! TEST_HARD_REG_BIT (hard_regs_need_restore, regno + j))
+ ok = 0;
+ }
+ else
+ continue;
+
+ /* Must do this one restore at a time */
+ if (! ok)
+ continue;
+
+ pat = gen_rtx (SET, VOIDmode,
+ gen_rtx (REG, GET_MODE (regno_save_mem[regno][i]),
+ regno),
+ regno_save_mem[regno][i]);
+ code = reg_restore_code[regno][i];
+
+
+ /* Clear status for all registers we restored. */
+ for (k = 0; k < i; k++)
+ {
+ CLEAR_HARD_REG_BIT (hard_regs_need_restore, regno + k);
+ n_regs_saved--;
+ }
+
+ numregs = i;
+ break;
+ }
+ }
+ /* Emit the insn and set the code and mode. */
+
+ insn = emit_insn_before (pat, insn);
+ PUT_MODE (insn, insn_mode);
+ INSN_CODE (insn) = code;
+
+ /* Tell our callers how many extra registers we saved/restored */
+ return numregs - 1;
+}
diff --git a/gnu/usr.bin/cc/cc_int/calls.c b/gnu/usr.bin/cc/cc_int/calls.c
new file mode 100644
index 0000000..f035079
--- /dev/null
+++ b/gnu/usr.bin/cc/cc_int/calls.c
@@ -0,0 +1,3061 @@
+/* Convert function calls to rtl insns, for GNU C compiler.
+ Copyright (C) 1989, 1992, 1993, 1994 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+#include "config.h"
+#include "rtl.h"
+#include "tree.h"
+#include "flags.h"
+#include "expr.h"
+#ifdef __STDC__
+#include <stdarg.h>
+#else
+#include <varargs.h>
+#endif
+#include "insn-flags.h"
+
+/* Decide whether a function's arguments should be processed
+ from first to last or from last to first.
+
+ They should if the stack and args grow in opposite directions, but
+ only if we have push insns. */
+
+#ifdef PUSH_ROUNDING
+
+#if defined (STACK_GROWS_DOWNWARD) != defined (ARGS_GROW_DOWNWARD)
+#define PUSH_ARGS_REVERSED /* If it's last to first */
+#endif
+
+#endif
+
+/* Like STACK_BOUNDARY but in units of bytes, not bits. */
+#define STACK_BYTES (STACK_BOUNDARY / BITS_PER_UNIT)
+
+/* Data structure and subroutines used within expand_call. */
+
+struct arg_data
+{
+ /* Tree node for this argument. */
+ tree tree_value;
+ /* Mode for value; TYPE_MODE unless promoted. */
+ enum machine_mode mode;
+ /* Current RTL value for argument, or 0 if it isn't precomputed. */
+ rtx value;
+ /* Initially-compute RTL value for argument; only for const functions. */
+ rtx initial_value;
+ /* Register to pass this argument in, 0 if passed on stack, or an
+ EXPR_LIST if the arg is to be copied into multiple different
+ registers. */
+ rtx reg;
+ /* If REG was promoted from the actual mode of the argument expression,
+ indicates whether the promotion is sign- or zero-extended. */
+ int unsignedp;
+ /* Number of registers to use. 0 means put the whole arg in registers.
+ Also 0 if not passed in registers. */
+ int partial;
+ /* Non-zero if argument must be passed on stack.
+ Note that some arguments may be passed on the stack
+ even though pass_on_stack is zero, just because FUNCTION_ARG says so.
+ pass_on_stack identifies arguments that *cannot* go in registers. */
+ int pass_on_stack;
+ /* Offset of this argument from beginning of stack-args. */
+ struct args_size offset;
+ /* Similar, but offset to the start of the stack slot. Different from
+ OFFSET if this arg pads downward. */
+ struct args_size slot_offset;
+ /* Size of this argument on the stack, rounded up for any padding it gets,
+ parts of the argument passed in registers do not count.
+ If REG_PARM_STACK_SPACE is defined, then register parms
+ are counted here as well. */
+ struct args_size size;
+ /* Location on the stack at which parameter should be stored. The store
+ has already been done if STACK == VALUE. */
+ rtx stack;
+ /* Location on the stack of the start of this argument slot. This can
+ differ from STACK if this arg pads downward. This location is known
+ to be aligned to FUNCTION_ARG_BOUNDARY. */
+ rtx stack_slot;
+#ifdef ACCUMULATE_OUTGOING_ARGS
+ /* Place that this stack area has been saved, if needed. */
+ rtx save_area;
+#endif
+#ifdef STRICT_ALIGNMENT
+ /* If an argument's alignment does not permit direct copying into registers,
+ copy in smaller-sized pieces into pseudos. These are stored in a
+ block pointed to by this field. The next field says how many
+ word-sized pseudos we made. */
+ rtx *aligned_regs;
+ int n_aligned_regs;
+#endif
+};
+
+#ifdef ACCUMULATE_OUTGOING_ARGS
+/* A vector of one char per byte of stack space. A byte if non-zero if
+ the corresponding stack location has been used.
+ This vector is used to prevent a function call within an argument from
+ clobbering any stack already set up. */
+static char *stack_usage_map;
+
+/* Size of STACK_USAGE_MAP. */
+static int highest_outgoing_arg_in_use;
+
+/* stack_arg_under_construction is nonzero when an argument may be
+ initialized with a constructor call (including a C function that
+ returns a BLKmode struct) and expand_call must take special action
+ to make sure the object being constructed does not overlap the
+ argument list for the constructor call. */
+int stack_arg_under_construction;
+#endif
+
+static int calls_function PROTO((tree, int));
+static int calls_function_1 PROTO((tree, int));
+static void emit_call_1 PROTO((rtx, tree, int, int, rtx, rtx, int,
+ rtx, int));
+static void store_one_arg PROTO ((struct arg_data *, rtx, int, int,
+ tree, int));
+
+/* If WHICH is 1, return 1 if EXP contains a call to the built-in function
+ `alloca'.
+
+ If WHICH is 0, return 1 if EXP contains a call to any function.
+ Actually, we only need return 1 if evaluating EXP would require pushing
+ arguments on the stack, but that is too difficult to compute, so we just
+ assume any function call might require the stack. */
+
+static tree calls_function_save_exprs;
+
+static int
+calls_function (exp, which)
+ tree exp;
+ int which;
+{
+ int val;
+ calls_function_save_exprs = 0;
+ val = calls_function_1 (exp, which);
+ calls_function_save_exprs = 0;
+ return val;
+}
+
+static int
+calls_function_1 (exp, which)
+ tree exp;
+ int which;
+{
+ register int i;
+ enum tree_code code = TREE_CODE (exp);
+ int type = TREE_CODE_CLASS (code);
+ int length = tree_code_length[(int) code];
+
+ /* If this code is langauge-specific, we don't know what it will do. */
+ if ((int) code >= NUM_TREE_CODES)
+ return 1;
+
+ /* Only expressions and references can contain calls. */
+ if (type != 'e' && type != '<' && type != '1' && type != '2' && type != 'r'
+ && type != 'b')
+ return 0;
+
+ switch (code)
+ {
+ case CALL_EXPR:
+ if (which == 0)
+ return 1;
+ else if (TREE_CODE (TREE_OPERAND (exp, 0)) == ADDR_EXPR
+ && (TREE_CODE (TREE_OPERAND (TREE_OPERAND (exp, 0), 0))
+ == FUNCTION_DECL))
+ {
+ tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
+
+ if ((DECL_BUILT_IN (fndecl)
+ && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_ALLOCA)
+ || (DECL_SAVED_INSNS (fndecl)
+ && (FUNCTION_FLAGS (DECL_SAVED_INSNS (fndecl))
+ & FUNCTION_FLAGS_CALLS_ALLOCA)))
+ return 1;
+ }
+
+ /* Third operand is RTL. */
+ length = 2;
+ break;
+
+ case SAVE_EXPR:
+ if (SAVE_EXPR_RTL (exp) != 0)
+ return 0;
+ if (value_member (exp, calls_function_save_exprs))
+ return 0;
+ calls_function_save_exprs = tree_cons (NULL_TREE, exp,
+ calls_function_save_exprs);
+ return (TREE_OPERAND (exp, 0) != 0
+ && calls_function_1 (TREE_OPERAND (exp, 0), which));
+
+ case BLOCK:
+ {
+ register tree local;
+
+ for (local = BLOCK_VARS (exp); local; local = TREE_CHAIN (local))
+ if (DECL_INITIAL (local) != 0
+ && calls_function_1 (DECL_INITIAL (local), which))
+ return 1;
+ }
+ {
+ register tree subblock;
+
+ for (subblock = BLOCK_SUBBLOCKS (exp);
+ subblock;
+ subblock = TREE_CHAIN (subblock))
+ if (calls_function_1 (subblock, which))
+ return 1;
+ }
+ return 0;
+
+ case METHOD_CALL_EXPR:
+ length = 3;
+ break;
+
+ case WITH_CLEANUP_EXPR:
+ length = 1;
+ break;
+
+ case RTL_EXPR:
+ return 0;
+ }
+
+ for (i = 0; i < length; i++)
+ if (TREE_OPERAND (exp, i) != 0
+ && calls_function_1 (TREE_OPERAND (exp, i), which))
+ return 1;
+
+ return 0;
+}
+
+/* Force FUNEXP into a form suitable for the address of a CALL,
+ and return that as an rtx. Also load the static chain register
+ if FNDECL is a nested function.
+
+ CALL_FUSAGE points to a variable holding the prospective
+ CALL_INSN_FUNCTION_USAGE information. */
+
+rtx
+prepare_call_address (funexp, fndecl, call_fusage, reg_parm_seen)
+ rtx funexp;
+ tree fndecl;
+ rtx *call_fusage;
+ int reg_parm_seen;
+{
+ rtx static_chain_value = 0;
+
+ funexp = protect_from_queue (funexp, 0);
+
+ if (fndecl != 0)
+ /* Get possible static chain value for nested function in C. */
+ static_chain_value = lookup_static_chain (fndecl);
+
+ /* Make a valid memory address and copy constants thru pseudo-regs,
+ but not for a constant address if -fno-function-cse. */
+ if (GET_CODE (funexp) != SYMBOL_REF)
+ funexp =
+#ifdef SMALL_REGISTER_CLASSES
+ /* If we are using registers for parameters, force the
+ function address into a register now. */
+ reg_parm_seen ? force_not_mem (memory_address (FUNCTION_MODE, funexp))
+ :
+#endif
+ memory_address (FUNCTION_MODE, funexp);
+ else
+ {
+#ifndef NO_FUNCTION_CSE
+ if (optimize && ! flag_no_function_cse)
+#ifdef NO_RECURSIVE_FUNCTION_CSE
+ if (fndecl != current_function_decl)
+#endif
+ funexp = force_reg (Pmode, funexp);
+#endif
+ }
+
+ if (static_chain_value != 0)
+ {
+ emit_move_insn (static_chain_rtx, static_chain_value);
+
+ use_reg (call_fusage, static_chain_rtx);
+ }
+
+ return funexp;
+}
+
+/* Generate instructions to call function FUNEXP,
+ and optionally pop the results.
+ The CALL_INSN is the first insn generated.
+
+ FUNTYPE is the data type of the function, or, for a library call,
+ the identifier for the name of the call. This is given to the
+ macro RETURN_POPS_ARGS to determine whether this function pops its own args.
+
+ STACK_SIZE is the number of bytes of arguments on the stack,
+ rounded up to STACK_BOUNDARY; zero if the size is variable.
+ This is both to put into the call insn and
+ to generate explicit popping code if necessary.
+
+ STRUCT_VALUE_SIZE is the number of bytes wanted in a structure value.
+ It is zero if this call doesn't want a structure value.
+
+ NEXT_ARG_REG is the rtx that results from executing
+ FUNCTION_ARG (args_so_far, VOIDmode, void_type_node, 1)
+ just after all the args have had their registers assigned.
+ This could be whatever you like, but normally it is the first
+ arg-register beyond those used for args in this call,
+ or 0 if all the arg-registers are used in this call.
+ It is passed on to `gen_call' so you can put this info in the call insn.
+
+ VALREG is a hard register in which a value is returned,
+ or 0 if the call does not return a value.
+
+ OLD_INHIBIT_DEFER_POP is the value that `inhibit_defer_pop' had before
+ the args to this call were processed.
+ We restore `inhibit_defer_pop' to that value.
+
+ CALL_FUSAGE is either empty or an EXPR_LIST of USE expressions that
+ denote registers used by the called function.
+
+ IS_CONST is true if this is a `const' call. */
+
+static void
+emit_call_1 (funexp, funtype, stack_size, struct_value_size, next_arg_reg,
+ valreg, old_inhibit_defer_pop, call_fusage, is_const)
+ rtx funexp;
+ tree funtype;
+ int stack_size;
+ int struct_value_size;
+ rtx next_arg_reg;
+ rtx valreg;
+ int old_inhibit_defer_pop;
+ rtx call_fusage;
+ int is_const;
+{
+ rtx stack_size_rtx = GEN_INT (stack_size);
+ rtx struct_value_size_rtx = GEN_INT (struct_value_size);
+ rtx call_insn;
+ int already_popped = 0;
+
+ /* Ensure address is valid. SYMBOL_REF is already valid, so no need,
+ and we don't want to load it into a register as an optimization,
+ because prepare_call_address already did it if it should be done. */
+ if (GET_CODE (funexp) != SYMBOL_REF)
+ funexp = memory_address (FUNCTION_MODE, funexp);
+
+#ifndef ACCUMULATE_OUTGOING_ARGS
+#if defined (HAVE_call_pop) && defined (HAVE_call_value_pop)
+ if (HAVE_call_pop && HAVE_call_value_pop
+ && (RETURN_POPS_ARGS (funtype, stack_size) > 0 || stack_size == 0))
+ {
+ rtx n_pop = GEN_INT (RETURN_POPS_ARGS (funtype, stack_size));
+ rtx pat;
+
+ /* If this subroutine pops its own args, record that in the call insn
+ if possible, for the sake of frame pointer elimination. */
+ if (valreg)
+ pat = gen_call_value_pop (valreg,
+ gen_rtx (MEM, FUNCTION_MODE, funexp),
+ stack_size_rtx, next_arg_reg, n_pop);
+ else
+ pat = gen_call_pop (gen_rtx (MEM, FUNCTION_MODE, funexp),
+ stack_size_rtx, next_arg_reg, n_pop);
+
+ emit_call_insn (pat);
+ already_popped = 1;
+ }
+ else
+#endif
+#endif
+
+#if defined (HAVE_call) && defined (HAVE_call_value)
+ if (HAVE_call && HAVE_call_value)
+ {
+ if (valreg)
+ emit_call_insn (gen_call_value (valreg,
+ gen_rtx (MEM, FUNCTION_MODE, funexp),
+ stack_size_rtx, next_arg_reg,
+ NULL_RTX));
+ else
+ emit_call_insn (gen_call (gen_rtx (MEM, FUNCTION_MODE, funexp),
+ stack_size_rtx, next_arg_reg,
+ struct_value_size_rtx));
+ }
+ else
+#endif
+ abort ();
+
+ /* Find the CALL insn we just emitted. */
+ for (call_insn = get_last_insn ();
+ call_insn && GET_CODE (call_insn) != CALL_INSN;
+ call_insn = PREV_INSN (call_insn))
+ ;
+
+ if (! call_insn)
+ abort ();
+
+ /* Put the register usage information on the CALL. If there is already
+ some usage information, put ours at the end. */
+ if (CALL_INSN_FUNCTION_USAGE (call_insn))
+ {
+ rtx link;
+
+ for (link = CALL_INSN_FUNCTION_USAGE (call_insn); XEXP (link, 1) != 0;
+ link = XEXP (link, 1))
+ ;
+
+ XEXP (link, 1) = call_fusage;
+ }
+ else
+ CALL_INSN_FUNCTION_USAGE (call_insn) = call_fusage;
+
+ /* If this is a const call, then set the insn's unchanging bit. */
+ if (is_const)
+ CONST_CALL_P (call_insn) = 1;
+
+ /* Restore this now, so that we do defer pops for this call's args
+ if the context of the call as a whole permits. */
+ inhibit_defer_pop = old_inhibit_defer_pop;
+
+#ifndef ACCUMULATE_OUTGOING_ARGS
+ /* If returning from the subroutine does not automatically pop the args,
+ we need an instruction to pop them sooner or later.
+ Perhaps do it now; perhaps just record how much space to pop later.
+
+ If returning from the subroutine does pop the args, indicate that the
+ stack pointer will be changed. */
+
+ if (stack_size != 0 && RETURN_POPS_ARGS (funtype, stack_size) > 0)
+ {
+ if (!already_popped)
+ CALL_INSN_FUNCTION_USAGE (call_insn) =
+ gen_rtx (EXPR_LIST, CLOBBER, stack_pointer_rtx,
+ CALL_INSN_FUNCTION_USAGE (call_insn));
+ stack_size -= RETURN_POPS_ARGS (funtype, stack_size);
+ stack_size_rtx = GEN_INT (stack_size);
+ }
+
+ if (stack_size != 0)
+ {
+ if (flag_defer_pop && inhibit_defer_pop == 0 && !is_const)
+ pending_stack_adjust += stack_size;
+ else
+ adjust_stack (stack_size_rtx);
+ }
+#endif
+}
+
+/* Generate all the code for a function call
+ and return an rtx for its value.
+ Store the value in TARGET (specified as an rtx) if convenient.
+ If the value is stored in TARGET then TARGET is returned.
+ If IGNORE is nonzero, then we ignore the value of the function call. */
+
+rtx
+expand_call (exp, target, ignore)
+ tree exp;
+ rtx target;
+ int ignore;
+{
+ /* List of actual parameters. */
+ tree actparms = TREE_OPERAND (exp, 1);
+ /* RTX for the function to be called. */
+ rtx funexp;
+ /* Tree node for the function to be called (not the address!). */
+ tree funtree;
+ /* Data type of the function. */
+ tree funtype;
+ /* Declaration of the function being called,
+ or 0 if the function is computed (not known by name). */
+ tree fndecl = 0;
+ char *name = 0;
+
+ /* Register in which non-BLKmode value will be returned,
+ or 0 if no value or if value is BLKmode. */
+ rtx valreg;
+ /* Address where we should return a BLKmode value;
+ 0 if value not BLKmode. */
+ rtx structure_value_addr = 0;
+ /* Nonzero if that address is being passed by treating it as
+ an extra, implicit first parameter. Otherwise,
+ it is passed by being copied directly into struct_value_rtx. */
+ int structure_value_addr_parm = 0;
+ /* Size of aggregate value wanted, or zero if none wanted
+ or if we are using the non-reentrant PCC calling convention
+ or expecting the value in registers. */
+ int struct_value_size = 0;
+ /* Nonzero if called function returns an aggregate in memory PCC style,
+ by returning the address of where to find it. */
+ int pcc_struct_value = 0;
+
+ /* Number of actual parameters in this call, including struct value addr. */
+ int num_actuals;
+ /* Number of named args. Args after this are anonymous ones
+ and they must all go on the stack. */
+ int n_named_args;
+ /* Count arg position in order args appear. */
+ int argpos;
+
+ /* Vector of information about each argument.
+ Arguments are numbered in the order they will be pushed,
+ not the order they are written. */
+ struct arg_data *args;
+
+ /* Total size in bytes of all the stack-parms scanned so far. */
+ struct args_size args_size;
+ /* Size of arguments before any adjustments (such as rounding). */
+ struct args_size original_args_size;
+ /* Data on reg parms scanned so far. */
+ CUMULATIVE_ARGS args_so_far;
+ /* Nonzero if a reg parm has been scanned. */
+ int reg_parm_seen;
+ /* Nonzero if this is an indirect function call. */
+ int current_call_is_indirect = 0;
+
+ /* Nonzero if we must avoid push-insns in the args for this call.
+ If stack space is allocated for register parameters, but not by the
+ caller, then it is preallocated in the fixed part of the stack frame.
+ So the entire argument block must then be preallocated (i.e., we
+ ignore PUSH_ROUNDING in that case). */
+
+#if defined(REG_PARM_STACK_SPACE) && ! defined(OUTGOING_REG_PARM_STACK_SPACE)
+ int must_preallocate = 1;
+#else
+#ifdef PUSH_ROUNDING
+ int must_preallocate = 0;
+#else
+ int must_preallocate = 1;
+#endif
+#endif
+
+ /* Size of the stack reserved for parameter registers. */
+ int reg_parm_stack_space = 0;
+
+ /* 1 if scanning parms front to back, -1 if scanning back to front. */
+ int inc;
+ /* Address of space preallocated for stack parms
+ (on machines that lack push insns), or 0 if space not preallocated. */
+ rtx argblock = 0;
+
+ /* Nonzero if it is plausible that this is a call to alloca. */
+ int may_be_alloca;
+ /* Nonzero if this is a call to setjmp or a related function. */
+ int returns_twice;
+ /* Nonzero if this is a call to `longjmp'. */
+ int is_longjmp;
+ /* Nonzero if this is a call to an inline function. */
+ int is_integrable = 0;
+ /* Nonzero if this is a call to a `const' function.
+ Note that only explicitly named functions are handled as `const' here. */
+ int is_const = 0;
+ /* Nonzero if this is a call to a `volatile' function. */
+ int is_volatile = 0;
+#if defined(ACCUMULATE_OUTGOING_ARGS) && defined(REG_PARM_STACK_SPACE)
+ /* Define the boundary of the register parm stack space that needs to be
+ save, if any. */
+ int low_to_save = -1, high_to_save;
+ rtx save_area = 0; /* Place that it is saved */
+#endif
+
+#ifdef ACCUMULATE_OUTGOING_ARGS
+ int initial_highest_arg_in_use = highest_outgoing_arg_in_use;
+ char *initial_stack_usage_map = stack_usage_map;
+#endif
+
+ rtx old_stack_level = 0;
+ int old_pending_adj = 0;
+ int old_stack_arg_under_construction;
+ int old_inhibit_defer_pop = inhibit_defer_pop;
+ tree old_cleanups = cleanups_this_call;
+ rtx call_fusage = 0;
+ register tree p;
+ register int i, j;
+
+ /* See if we can find a DECL-node for the actual function.
+ As a result, decide whether this is a call to an integrable function. */
+
+ p = TREE_OPERAND (exp, 0);
+ if (TREE_CODE (p) == ADDR_EXPR)
+ {
+ fndecl = TREE_OPERAND (p, 0);
+ if (TREE_CODE (fndecl) != FUNCTION_DECL)
+ fndecl = 0;
+ else
+ {
+ if (!flag_no_inline
+ && fndecl != current_function_decl
+ && DECL_SAVED_INSNS (fndecl))
+ is_integrable = 1;
+ else if (! TREE_ADDRESSABLE (fndecl))
+ {
+ /* In case this function later becomes inlinable,
+ record that there was already a non-inline call to it.
+
+ Use abstraction instead of setting TREE_ADDRESSABLE
+ directly. */
+ if (DECL_INLINE (fndecl) && warn_inline && !flag_no_inline)
+ warning_with_decl (fndecl, "can't inline call to `%s'");
+ mark_addressable (fndecl);
+ }
+
+ if (TREE_READONLY (fndecl) && ! TREE_THIS_VOLATILE (fndecl)
+ && TYPE_MODE (TREE_TYPE (exp)) != VOIDmode)
+ is_const = 1;
+
+ if (TREE_THIS_VOLATILE (fndecl))
+ is_volatile = 1;
+ }
+ }
+
+ /* If we don't have specific function to call, see if we have a
+ constant or `noreturn' function from the type. */
+ if (fndecl == 0)
+ {
+ is_const = TREE_READONLY (TREE_TYPE (TREE_TYPE (p)));
+ is_volatile = TREE_THIS_VOLATILE (TREE_TYPE (TREE_TYPE (p)));
+ }
+
+#ifdef REG_PARM_STACK_SPACE
+#ifdef MAYBE_REG_PARM_STACK_SPACE
+ reg_parm_stack_space = MAYBE_REG_PARM_STACK_SPACE;
+#else
+ reg_parm_stack_space = REG_PARM_STACK_SPACE (fndecl);
+#endif
+#endif
+
+ /* Warn if this value is an aggregate type,
+ regardless of which calling convention we are using for it. */
+ if (warn_aggregate_return && AGGREGATE_TYPE_P (TREE_TYPE (exp)))
+ warning ("function call has aggregate value");
+
+ /* Set up a place to return a structure. */
+
+ /* Cater to broken compilers. */
+ if (aggregate_value_p (exp))
+ {
+ /* This call returns a big structure. */
+ is_const = 0;
+
+#ifdef PCC_STATIC_STRUCT_RETURN
+ {
+ pcc_struct_value = 1;
+ /* Easier than making that case work right. */
+ if (is_integrable)
+ {
+ /* In case this is a static function, note that it has been
+ used. */
+ if (! TREE_ADDRESSABLE (fndecl))
+ mark_addressable (fndecl);
+ is_integrable = 0;
+ }
+ }
+#else /* not PCC_STATIC_STRUCT_RETURN */
+ {
+ struct_value_size = int_size_in_bytes (TREE_TYPE (exp));
+
+ if (struct_value_size < 0)
+ abort ();
+
+ if (target && GET_CODE (target) == MEM)
+ structure_value_addr = XEXP (target, 0);
+ else
+ {
+ /* Assign a temporary on the stack to hold the value. */
+
+ /* For variable-sized objects, we must be called with a target
+ specified. If we were to allocate space on the stack here,
+ we would have no way of knowing when to free it. */
+
+ structure_value_addr
+ = XEXP (assign_stack_temp (BLKmode, struct_value_size, 1), 0);
+ target = 0;
+ }
+ }
+#endif /* not PCC_STATIC_STRUCT_RETURN */
+ }
+
+ /* If called function is inline, try to integrate it. */
+
+ if (is_integrable)
+ {
+ rtx temp;
+ rtx before_call = get_last_insn ();
+
+ temp = expand_inline_function (fndecl, actparms, target,
+ ignore, TREE_TYPE (exp),
+ structure_value_addr);
+
+ /* If inlining succeeded, return. */
+ if ((HOST_WIDE_INT) temp != -1)
+ {
+ if (flag_short_temps)
+ {
+ /* Perform all cleanups needed for the arguments of this
+ call (i.e. destructors in C++). It is ok if these
+ destructors clobber RETURN_VALUE_REG, because the
+ only time we care about this is when TARGET is that
+ register. But in C++, we take care to never return
+ that register directly. */
+ expand_cleanups_to (old_cleanups);
+ }
+
+#ifdef ACCUMULATE_OUTGOING_ARGS
+ /* If the outgoing argument list must be preserved, push
+ the stack before executing the inlined function if it
+ makes any calls. */
+
+ for (i = reg_parm_stack_space - 1; i >= 0; i--)
+ if (i < highest_outgoing_arg_in_use && stack_usage_map[i] != 0)
+ break;
+
+ if (stack_arg_under_construction || i >= 0)
+ {
+ rtx insn = NEXT_INSN (before_call), seq;
+
+ /* Look for a call in the inline function code.
+ If OUTGOING_ARGS_SIZE (DECL_SAVED_INSNS (fndecl)) is
+ nonzero then there is a call and it is not necessary
+ to scan the insns. */
+
+ if (OUTGOING_ARGS_SIZE (DECL_SAVED_INSNS (fndecl)) == 0)
+ for (; insn; insn = NEXT_INSN (insn))
+ if (GET_CODE (insn) == CALL_INSN)
+ break;
+
+ if (insn)
+ {
+ /* Reserve enough stack space so that the largest
+ argument list of any function call in the inline
+ function does not overlap the argument list being
+ evaluated. This is usually an overestimate because
+ allocate_dynamic_stack_space reserves space for an
+ outgoing argument list in addition to the requested
+ space, but there is no way to ask for stack space such
+ that an argument list of a certain length can be
+ safely constructed. */
+
+ int adjust = OUTGOING_ARGS_SIZE (DECL_SAVED_INSNS (fndecl));
+#ifdef REG_PARM_STACK_SPACE
+ /* Add the stack space reserved for register arguments
+ in the inline function. What is really needed is the
+ largest value of reg_parm_stack_space in the inline
+ function, but that is not available. Using the current
+ value of reg_parm_stack_space is wrong, but gives
+ correct results on all supported machines. */
+ adjust += reg_parm_stack_space;
+#endif
+ start_sequence ();
+ emit_stack_save (SAVE_BLOCK, &old_stack_level, NULL_RTX);
+ allocate_dynamic_stack_space (GEN_INT (adjust),
+ NULL_RTX, BITS_PER_UNIT);
+ seq = get_insns ();
+ end_sequence ();
+ emit_insns_before (seq, NEXT_INSN (before_call));
+ emit_stack_restore (SAVE_BLOCK, old_stack_level, NULL_RTX);
+ }
+ }
+#endif
+
+ /* If the result is equivalent to TARGET, return TARGET to simplify
+ checks in store_expr. They can be equivalent but not equal in the
+ case of a function that returns BLKmode. */
+ if (temp != target && rtx_equal_p (temp, target))
+ return target;
+ return temp;
+ }
+
+ /* If inlining failed, mark FNDECL as needing to be compiled
+ separately after all. If function was declared inline,
+ give a warning. */
+ if (DECL_INLINE (fndecl) && warn_inline && !flag_no_inline
+ && ! TREE_ADDRESSABLE (fndecl))
+ warning_with_decl (fndecl, "can't inline call to `%s'");
+ mark_addressable (fndecl);
+ }
+
+ /* When calling a const function, we must pop the stack args right away,
+ so that the pop is deleted or moved with the call. */
+ if (is_const)
+ NO_DEFER_POP;
+
+ function_call_count++;
+
+ if (fndecl && DECL_NAME (fndecl))
+ name = IDENTIFIER_POINTER (DECL_NAME (fndecl));
+
+ /* On some machines (such as the PA) indirect calls have a different
+ calling convention than normal calls. FUNCTION_ARG in the target
+ description can look at current_call_is_indirect to determine which
+ calling convention to use. */
+ current_call_is_indirect = (fndecl == 0);
+#if 0
+ = TREE_CODE (TREE_OPERAND (exp, 0)) == NON_LVALUE_EXPR ? 1 : 0;
+#endif
+
+#if 0
+ /* Unless it's a call to a specific function that isn't alloca,
+ if it has one argument, we must assume it might be alloca. */
+
+ may_be_alloca =
+ (!(fndecl != 0 && strcmp (name, "alloca"))
+ && actparms != 0
+ && TREE_CHAIN (actparms) == 0);
+#else
+ /* We assume that alloca will always be called by name. It
+ makes no sense to pass it as a pointer-to-function to
+ anything that does not understand its behavior. */
+ may_be_alloca =
+ (name && ((IDENTIFIER_LENGTH (DECL_NAME (fndecl)) == 6
+ && name[0] == 'a'
+ && ! strcmp (name, "alloca"))
+ || (IDENTIFIER_LENGTH (DECL_NAME (fndecl)) == 16
+ && name[0] == '_'
+ && ! strcmp (name, "__builtin_alloca"))));
+#endif
+
+ /* See if this is a call to a function that can return more than once
+ or a call to longjmp. */
+
+ returns_twice = 0;
+ is_longjmp = 0;
+
+ if (name != 0 && IDENTIFIER_LENGTH (DECL_NAME (fndecl)) <= 15)
+ {
+ char *tname = name;
+
+ /* Disregard prefix _, __ or __x. */
+ if (name[0] == '_')
+ {
+ if (name[1] == '_' && name[2] == 'x')
+ tname += 3;
+ else if (name[1] == '_')
+ tname += 2;
+ else
+ tname += 1;
+ }
+
+ if (tname[0] == 's')
+ {
+ returns_twice
+ = ((tname[1] == 'e'
+ && (! strcmp (tname, "setjmp")
+ || ! strcmp (tname, "setjmp_syscall")))
+ || (tname[1] == 'i'
+ && ! strcmp (tname, "sigsetjmp"))
+ || (tname[1] == 'a'
+ && ! strcmp (tname, "savectx")));
+ if (tname[1] == 'i'
+ && ! strcmp (tname, "siglongjmp"))
+ is_longjmp = 1;
+ }
+ else if ((tname[0] == 'q' && tname[1] == 's'
+ && ! strcmp (tname, "qsetjmp"))
+ || (tname[0] == 'v' && tname[1] == 'f'
+ && ! strcmp (tname, "vfork")))
+ returns_twice = 1;
+
+ else if (tname[0] == 'l' && tname[1] == 'o'
+ && ! strcmp (tname, "longjmp"))
+ is_longjmp = 1;
+ }
+
+ if (may_be_alloca)
+ current_function_calls_alloca = 1;
+
+ /* Don't let pending stack adjusts add up to too much.
+ Also, do all pending adjustments now
+ if there is any chance this might be a call to alloca. */
+
+ if (pending_stack_adjust >= 32
+ || (pending_stack_adjust > 0 && may_be_alloca))
+ do_pending_stack_adjust ();
+
+ /* Operand 0 is a pointer-to-function; get the type of the function. */
+ funtype = TREE_TYPE (TREE_OPERAND (exp, 0));
+ if (TREE_CODE (funtype) != POINTER_TYPE)
+ abort ();
+ funtype = TREE_TYPE (funtype);
+
+ /* Push the temporary stack slot level so that we can free any temporaries
+ we make. */
+ push_temp_slots ();
+
+ /* Start updating where the next arg would go. */
+ INIT_CUMULATIVE_ARGS (args_so_far, funtype, NULL_RTX);
+
+ /* If struct_value_rtx is 0, it means pass the address
+ as if it were an extra parameter. */
+ if (structure_value_addr && struct_value_rtx == 0)
+ {
+#ifdef ACCUMULATE_OUTGOING_ARGS
+ /* If the stack will be adjusted, make sure the structure address
+ does not refer to virtual_outgoing_args_rtx. */
+ rtx temp = (stack_arg_under_construction
+ ? copy_addr_to_reg (structure_value_addr)
+ : force_reg (Pmode, structure_value_addr));
+#else
+ rtx temp = force_reg (Pmode, structure_value_addr);
+#endif
+
+ actparms
+ = tree_cons (error_mark_node,
+ make_tree (build_pointer_type (TREE_TYPE (funtype)),
+ temp),
+ actparms);
+ structure_value_addr_parm = 1;
+ }
+
+ /* Count the arguments and set NUM_ACTUALS. */
+ for (p = actparms, i = 0; p; p = TREE_CHAIN (p)) i++;
+ num_actuals = i;
+
+ /* Compute number of named args.
+ Normally, don't include the last named arg if anonymous args follow.
+ (If no anonymous args follow, the result of list_length
+ is actually one too large.)
+
+ If SETUP_INCOMING_VARARGS is defined, this machine will be able to
+ place unnamed args that were passed in registers into the stack. So
+ treat all args as named. This allows the insns emitting for a specific
+ argument list to be independent of the function declaration.
+
+ If SETUP_INCOMING_VARARGS is not defined, we do not have any reliable
+ way to pass unnamed args in registers, so we must force them into
+ memory. */
+#ifndef SETUP_INCOMING_VARARGS
+ if (TYPE_ARG_TYPES (funtype) != 0)
+ n_named_args
+ = list_length (TYPE_ARG_TYPES (funtype)) - 1
+ /* Count the struct value address, if it is passed as a parm. */
+ + structure_value_addr_parm;
+ else
+#endif
+ /* If we know nothing, treat all args as named. */
+ n_named_args = num_actuals;
+
+ /* Make a vector to hold all the information about each arg. */
+ args = (struct arg_data *) alloca (num_actuals * sizeof (struct arg_data));
+ bzero ((char *) args, num_actuals * sizeof (struct arg_data));
+
+ args_size.constant = 0;
+ args_size.var = 0;
+
+ /* In this loop, we consider args in the order they are written.
+ We fill up ARGS from the front of from the back if necessary
+ so that in any case the first arg to be pushed ends up at the front. */
+
+#ifdef PUSH_ARGS_REVERSED
+ i = num_actuals - 1, inc = -1;
+ /* In this case, must reverse order of args
+ so that we compute and push the last arg first. */
+#else
+ i = 0, inc = 1;
+#endif
+
+ /* I counts args in order (to be) pushed; ARGPOS counts in order written. */
+ for (p = actparms, argpos = 0; p; p = TREE_CHAIN (p), i += inc, argpos++)
+ {
+ tree type = TREE_TYPE (TREE_VALUE (p));
+ int unsignedp;
+ enum machine_mode mode;
+
+ args[i].tree_value = TREE_VALUE (p);
+
+ /* Replace erroneous argument with constant zero. */
+ if (type == error_mark_node || TYPE_SIZE (type) == 0)
+ args[i].tree_value = integer_zero_node, type = integer_type_node;
+
+ /* Decide where to pass this arg.
+
+ args[i].reg is nonzero if all or part is passed in registers.
+
+ args[i].partial is nonzero if part but not all is passed in registers,
+ and the exact value says how many words are passed in registers.
+
+ args[i].pass_on_stack is nonzero if the argument must at least be
+ computed on the stack. It may then be loaded back into registers
+ if args[i].reg is nonzero.
+
+ These decisions are driven by the FUNCTION_... macros and must agree
+ with those made by function.c. */
+
+ /* See if this argument should be passed by invisible reference. */
+ if ((TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST
+ && contains_placeholder_p (TYPE_SIZE (type)))
+ || TYPE_NEEDS_CONSTRUCTING (type)
+#ifdef FUNCTION_ARG_PASS_BY_REFERENCE
+ || FUNCTION_ARG_PASS_BY_REFERENCE (args_so_far, TYPE_MODE (type),
+ type, argpos < n_named_args)
+#endif
+ )
+ {
+#ifdef FUNCTION_ARG_CALLEE_COPIES
+ if (FUNCTION_ARG_CALLEE_COPIES (args_so_far, TYPE_MODE (type), type,
+ argpos < n_named_args)
+ /* If it's in a register, we must make a copy of it too. */
+ /* ??? Is this a sufficient test? Is there a better one? */
+ && !(TREE_CODE (args[i].tree_value) == VAR_DECL
+ && REG_P (DECL_RTL (args[i].tree_value))))
+ {
+ args[i].tree_value = build1 (ADDR_EXPR,
+ build_pointer_type (type),
+ args[i].tree_value);
+ type = build_pointer_type (type);
+ }
+ else
+#endif
+ {
+ /* We make a copy of the object and pass the address to the
+ function being called. */
+ rtx copy;
+
+ if (TYPE_SIZE (type) == 0
+ || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
+ {
+ /* This is a variable-sized object. Make space on the stack
+ for it. */
+ rtx size_rtx = expr_size (TREE_VALUE (p));
+
+ if (old_stack_level == 0)
+ {
+ emit_stack_save (SAVE_BLOCK, &old_stack_level, NULL_RTX);
+ old_pending_adj = pending_stack_adjust;
+ pending_stack_adjust = 0;
+ }
+
+ copy = gen_rtx (MEM, BLKmode,
+ allocate_dynamic_stack_space (size_rtx,
+ NULL_RTX,
+ TYPE_ALIGN (type)));
+ }
+ else
+ {
+ int size = int_size_in_bytes (type);
+ copy = assign_stack_temp (TYPE_MODE (type), size, 1);
+ }
+
+ MEM_IN_STRUCT_P (copy) = AGGREGATE_TYPE_P (type);
+
+ store_expr (args[i].tree_value, copy, 0);
+
+ args[i].tree_value = build1 (ADDR_EXPR,
+ build_pointer_type (type),
+ make_tree (type, copy));
+ type = build_pointer_type (type);
+ }
+ }
+
+ mode = TYPE_MODE (type);
+ unsignedp = TREE_UNSIGNED (type);
+
+#ifdef PROMOTE_FUNCTION_ARGS
+ mode = promote_mode (type, mode, &unsignedp, 1);
+#endif
+
+ args[i].unsignedp = unsignedp;
+ args[i].mode = mode;
+ args[i].reg = FUNCTION_ARG (args_so_far, mode, type,
+ argpos < n_named_args);
+#ifdef FUNCTION_ARG_PARTIAL_NREGS
+ if (args[i].reg)
+ args[i].partial
+ = FUNCTION_ARG_PARTIAL_NREGS (args_so_far, mode, type,
+ argpos < n_named_args);
+#endif
+
+ args[i].pass_on_stack = MUST_PASS_IN_STACK (mode, type);
+
+ /* If FUNCTION_ARG returned an (expr_list (nil) FOO), it means that
+ we are to pass this arg in the register(s) designated by FOO, but
+ also to pass it in the stack. */
+ if (args[i].reg && GET_CODE (args[i].reg) == EXPR_LIST
+ && XEXP (args[i].reg, 0) == 0)
+ args[i].pass_on_stack = 1, args[i].reg = XEXP (args[i].reg, 1);
+
+ /* If this is an addressable type, we must preallocate the stack
+ since we must evaluate the object into its final location.
+
+ If this is to be passed in both registers and the stack, it is simpler
+ to preallocate. */
+ if (TREE_ADDRESSABLE (type)
+ || (args[i].pass_on_stack && args[i].reg != 0))
+ must_preallocate = 1;
+
+ /* If this is an addressable type, we cannot pre-evaluate it. Thus,
+ we cannot consider this function call constant. */
+ if (TREE_ADDRESSABLE (type))
+ is_const = 0;
+
+ /* Compute the stack-size of this argument. */
+ if (args[i].reg == 0 || args[i].partial != 0
+#ifdef REG_PARM_STACK_SPACE
+ || reg_parm_stack_space > 0
+#endif
+ || args[i].pass_on_stack)
+ locate_and_pad_parm (mode, type,
+#ifdef STACK_PARMS_IN_REG_PARM_AREA
+ 1,
+#else
+ args[i].reg != 0,
+#endif
+ fndecl, &args_size, &args[i].offset,
+ &args[i].size);
+
+#ifndef ARGS_GROW_DOWNWARD
+ args[i].slot_offset = args_size;
+#endif
+
+#ifndef REG_PARM_STACK_SPACE
+ /* If a part of the arg was put into registers,
+ don't include that part in the amount pushed. */
+ if (! args[i].pass_on_stack)
+ args[i].size.constant -= ((args[i].partial * UNITS_PER_WORD)
+ / (PARM_BOUNDARY / BITS_PER_UNIT)
+ * (PARM_BOUNDARY / BITS_PER_UNIT));
+#endif
+
+ /* Update ARGS_SIZE, the total stack space for args so far. */
+
+ args_size.constant += args[i].size.constant;
+ if (args[i].size.var)
+ {
+ ADD_PARM_SIZE (args_size, args[i].size.var);
+ }
+
+ /* Since the slot offset points to the bottom of the slot,
+ we must record it after incrementing if the args grow down. */
+#ifdef ARGS_GROW_DOWNWARD
+ args[i].slot_offset = args_size;
+
+ args[i].slot_offset.constant = -args_size.constant;
+ if (args_size.var)
+ {
+ SUB_PARM_SIZE (args[i].slot_offset, args_size.var);
+ }
+#endif
+
+ /* Increment ARGS_SO_FAR, which has info about which arg-registers
+ have been used, etc. */
+
+ FUNCTION_ARG_ADVANCE (args_so_far, TYPE_MODE (type), type,
+ argpos < n_named_args);
+ }
+
+#ifdef FINAL_REG_PARM_STACK_SPACE
+ reg_parm_stack_space = FINAL_REG_PARM_STACK_SPACE (args_size.constant,
+ args_size.var);
+#endif
+
+ /* Compute the actual size of the argument block required. The variable
+ and constant sizes must be combined, the size may have to be rounded,
+ and there may be a minimum required size. */
+
+ original_args_size = args_size;
+ if (args_size.var)
+ {
+ /* If this function requires a variable-sized argument list, don't try to
+ make a cse'able block for this call. We may be able to do this
+ eventually, but it is too complicated to keep track of what insns go
+ in the cse'able block and which don't. */
+
+ is_const = 0;
+ must_preallocate = 1;
+
+ args_size.var = ARGS_SIZE_TREE (args_size);
+ args_size.constant = 0;
+
+#ifdef STACK_BOUNDARY
+ if (STACK_BOUNDARY != BITS_PER_UNIT)
+ args_size.var = round_up (args_size.var, STACK_BYTES);
+#endif
+
+#ifdef REG_PARM_STACK_SPACE
+ if (reg_parm_stack_space > 0)
+ {
+ args_size.var
+ = size_binop (MAX_EXPR, args_size.var,
+ size_int (REG_PARM_STACK_SPACE (fndecl)));
+
+#ifndef OUTGOING_REG_PARM_STACK_SPACE
+ /* The area corresponding to register parameters is not to count in
+ the size of the block we need. So make the adjustment. */
+ args_size.var
+ = size_binop (MINUS_EXPR, args_size.var,
+ size_int (reg_parm_stack_space));
+#endif
+ }
+#endif
+ }
+ else
+ {
+#ifdef STACK_BOUNDARY
+ args_size.constant = (((args_size.constant + (STACK_BYTES - 1))
+ / STACK_BYTES) * STACK_BYTES);
+#endif
+
+#ifdef REG_PARM_STACK_SPACE
+ args_size.constant = MAX (args_size.constant,
+ reg_parm_stack_space);
+#ifdef MAYBE_REG_PARM_STACK_SPACE
+ if (reg_parm_stack_space == 0)
+ args_size.constant = 0;
+#endif
+#ifndef OUTGOING_REG_PARM_STACK_SPACE
+ args_size.constant -= reg_parm_stack_space;
+#endif
+#endif
+ }
+
+ /* See if we have or want to preallocate stack space.
+
+ If we would have to push a partially-in-regs parm
+ before other stack parms, preallocate stack space instead.
+
+ If the size of some parm is not a multiple of the required stack
+ alignment, we must preallocate.
+
+ If the total size of arguments that would otherwise create a copy in
+ a temporary (such as a CALL) is more than half the total argument list
+ size, preallocation is faster.
+
+ Another reason to preallocate is if we have a machine (like the m88k)
+ where stack alignment is required to be maintained between every
+ pair of insns, not just when the call is made. However, we assume here
+ that such machines either do not have push insns (and hence preallocation
+ would occur anyway) or the problem is taken care of with
+ PUSH_ROUNDING. */
+
+ if (! must_preallocate)
+ {
+ int partial_seen = 0;
+ int copy_to_evaluate_size = 0;
+
+ for (i = 0; i < num_actuals && ! must_preallocate; i++)
+ {
+ if (args[i].partial > 0 && ! args[i].pass_on_stack)
+ partial_seen = 1;
+ else if (partial_seen && args[i].reg == 0)
+ must_preallocate = 1;
+
+ if (TYPE_MODE (TREE_TYPE (args[i].tree_value)) == BLKmode
+ && (TREE_CODE (args[i].tree_value) == CALL_EXPR
+ || TREE_CODE (args[i].tree_value) == TARGET_EXPR
+ || TREE_CODE (args[i].tree_value) == COND_EXPR
+ || TREE_ADDRESSABLE (TREE_TYPE (args[i].tree_value))))
+ copy_to_evaluate_size
+ += int_size_in_bytes (TREE_TYPE (args[i].tree_value));
+ }
+
+ if (copy_to_evaluate_size * 2 >= args_size.constant
+ && args_size.constant > 0)
+ must_preallocate = 1;
+ }
+
+ /* If the structure value address will reference the stack pointer, we must
+ stabilize it. We don't need to do this if we know that we are not going
+ to adjust the stack pointer in processing this call. */
+
+ if (structure_value_addr
+ && (reg_mentioned_p (virtual_stack_dynamic_rtx, structure_value_addr)
+ || reg_mentioned_p (virtual_outgoing_args_rtx, structure_value_addr))
+ && (args_size.var
+#ifndef ACCUMULATE_OUTGOING_ARGS
+ || args_size.constant
+#endif
+ ))
+ structure_value_addr = copy_to_reg (structure_value_addr);
+
+ /* If this function call is cse'able, precompute all the parameters.
+ Note that if the parameter is constructed into a temporary, this will
+ cause an additional copy because the parameter will be constructed
+ into a temporary location and then copied into the outgoing arguments.
+ If a parameter contains a call to alloca and this function uses the
+ stack, precompute the parameter. */
+
+ /* If we preallocated the stack space, and some arguments must be passed
+ on the stack, then we must precompute any parameter which contains a
+ function call which will store arguments on the stack.
+ Otherwise, evaluating the parameter may clobber previous parameters
+ which have already been stored into the stack. */
+
+ for (i = 0; i < num_actuals; i++)
+ if (is_const
+ || ((args_size.var != 0 || args_size.constant != 0)
+ && calls_function (args[i].tree_value, 1))
+ || (must_preallocate && (args_size.var != 0 || args_size.constant != 0)
+ && calls_function (args[i].tree_value, 0)))
+ {
+ push_temp_slots ();
+
+ args[i].initial_value = args[i].value
+ = expand_expr (args[i].tree_value, NULL_RTX, VOIDmode, 0);
+
+ if (TYPE_MODE (TREE_TYPE (args[i].tree_value)) != args[i].mode)
+ args[i].value
+ = convert_modes (args[i].mode,
+ TYPE_MODE (TREE_TYPE (args[i].tree_value)),
+ args[i].value, args[i].unsignedp);
+
+ preserve_temp_slots (args[i].value);
+ pop_temp_slots ();
+
+ /* ANSI doesn't require a sequence point here,
+ but PCC has one, so this will avoid some problems. */
+ emit_queue ();
+ }
+
+ /* Now we are about to start emitting insns that can be deleted
+ if a libcall is deleted. */
+ if (is_const)
+ start_sequence ();
+
+ /* If we have no actual push instructions, or shouldn't use them,
+ make space for all args right now. */
+
+ if (args_size.var != 0)
+ {
+ if (old_stack_level == 0)
+ {
+ emit_stack_save (SAVE_BLOCK, &old_stack_level, NULL_RTX);
+ old_pending_adj = pending_stack_adjust;
+ pending_stack_adjust = 0;
+#ifdef ACCUMULATE_OUTGOING_ARGS
+ /* stack_arg_under_construction says whether a stack arg is
+ being constructed at the old stack level. Pushing the stack
+ gets a clean outgoing argument block. */
+ old_stack_arg_under_construction = stack_arg_under_construction;
+ stack_arg_under_construction = 0;
+#endif
+ }
+ argblock = push_block (ARGS_SIZE_RTX (args_size), 0, 0);
+ }
+ else if (must_preallocate)
+ {
+ /* Note that we must go through the motions of allocating an argument
+ block even if the size is zero because we may be storing args
+ in the area reserved for register arguments, which may be part of
+ the stack frame. */
+ int needed = args_size.constant;
+
+#ifdef ACCUMULATE_OUTGOING_ARGS
+ /* Store the maximum argument space used. It will be pushed by the
+ prologue.
+
+ Since the stack pointer will never be pushed, it is possible for
+ the evaluation of a parm to clobber something we have already
+ written to the stack. Since most function calls on RISC machines
+ do not use the stack, this is uncommon, but must work correctly.
+
+ Therefore, we save any area of the stack that was already written
+ and that we are using. Here we set up to do this by making a new
+ stack usage map from the old one. The actual save will be done
+ by store_one_arg.
+
+ Another approach might be to try to reorder the argument
+ evaluations to avoid this conflicting stack usage. */
+
+ if (needed > current_function_outgoing_args_size)
+ current_function_outgoing_args_size = needed;
+
+#if defined(REG_PARM_STACK_SPACE) && ! defined(OUTGOING_REG_PARM_STACK_SPACE)
+ /* Since we will be writing into the entire argument area, the
+ map must be allocated for its entire size, not just the part that
+ is the responsibility of the caller. */
+ needed += reg_parm_stack_space;
+#endif
+
+#ifdef ARGS_GROW_DOWNWARD
+ highest_outgoing_arg_in_use = MAX (initial_highest_arg_in_use,
+ needed + 1);
+#else
+ highest_outgoing_arg_in_use = MAX (initial_highest_arg_in_use, needed);
+#endif
+ stack_usage_map = (char *) alloca (highest_outgoing_arg_in_use);
+
+ if (initial_highest_arg_in_use)
+ bcopy (initial_stack_usage_map, stack_usage_map,
+ initial_highest_arg_in_use);
+
+ if (initial_highest_arg_in_use != highest_outgoing_arg_in_use)
+ bzero (&stack_usage_map[initial_highest_arg_in_use],
+ highest_outgoing_arg_in_use - initial_highest_arg_in_use);
+ needed = 0;
+
+ /* The address of the outgoing argument list must not be copied to a
+ register here, because argblock would be left pointing to the
+ wrong place after the call to allocate_dynamic_stack_space below. */
+
+ argblock = virtual_outgoing_args_rtx;
+
+#else /* not ACCUMULATE_OUTGOING_ARGS */
+ if (inhibit_defer_pop == 0)
+ {
+ /* Try to reuse some or all of the pending_stack_adjust
+ to get this space. Maybe we can avoid any pushing. */
+ if (needed > pending_stack_adjust)
+ {
+ needed -= pending_stack_adjust;
+ pending_stack_adjust = 0;
+ }
+ else
+ {
+ pending_stack_adjust -= needed;
+ needed = 0;
+ }
+ }
+ /* Special case this because overhead of `push_block' in this
+ case is non-trivial. */
+ if (needed == 0)
+ argblock = virtual_outgoing_args_rtx;
+ else
+ argblock = push_block (GEN_INT (needed), 0, 0);
+
+ /* We only really need to call `copy_to_reg' in the case where push
+ insns are going to be used to pass ARGBLOCK to a function
+ call in ARGS. In that case, the stack pointer changes value
+ from the allocation point to the call point, and hence
+ the value of VIRTUAL_OUTGOING_ARGS_RTX changes as well.
+ But might as well always do it. */
+ argblock = copy_to_reg (argblock);
+#endif /* not ACCUMULATE_OUTGOING_ARGS */
+ }
+
+
+#ifdef ACCUMULATE_OUTGOING_ARGS
+ /* The save/restore code in store_one_arg handles all cases except one:
+ a constructor call (including a C function returning a BLKmode struct)
+ to initialize an argument. */
+ if (stack_arg_under_construction)
+ {
+#if defined(REG_PARM_STACK_SPACE) && ! defined(OUTGOING_REG_PARM_STACK_SPACE)
+ rtx push_size = GEN_INT (reg_parm_stack_space + args_size.constant);
+#else
+ rtx push_size = GEN_INT (args_size.constant);
+#endif
+ if (old_stack_level == 0)
+ {
+ emit_stack_save (SAVE_BLOCK, &old_stack_level, NULL_RTX);
+ old_pending_adj = pending_stack_adjust;
+ pending_stack_adjust = 0;
+ /* stack_arg_under_construction says whether a stack arg is
+ being constructed at the old stack level. Pushing the stack
+ gets a clean outgoing argument block. */
+ old_stack_arg_under_construction = stack_arg_under_construction;
+ stack_arg_under_construction = 0;
+ /* Make a new map for the new argument list. */
+ stack_usage_map = (char *)alloca (highest_outgoing_arg_in_use);
+ bzero (stack_usage_map, highest_outgoing_arg_in_use);
+ highest_outgoing_arg_in_use = 0;
+ }
+ allocate_dynamic_stack_space (push_size, NULL_RTX, BITS_PER_UNIT);
+ }
+ /* If argument evaluation might modify the stack pointer, copy the
+ address of the argument list to a register. */
+ for (i = 0; i < num_actuals; i++)
+ if (args[i].pass_on_stack)
+ {
+ argblock = copy_addr_to_reg (argblock);
+ break;
+ }
+#endif
+
+
+ /* If we preallocated stack space, compute the address of each argument.
+ We need not ensure it is a valid memory address here; it will be
+ validized when it is used. */
+ if (argblock)
+ {
+ rtx arg_reg = argblock;
+ int arg_offset = 0;
+
+ if (GET_CODE (argblock) == PLUS)
+ arg_reg = XEXP (argblock, 0), arg_offset = INTVAL (XEXP (argblock, 1));
+
+ for (i = 0; i < num_actuals; i++)
+ {
+ rtx offset = ARGS_SIZE_RTX (args[i].offset);
+ rtx slot_offset = ARGS_SIZE_RTX (args[i].slot_offset);
+ rtx addr;
+
+ /* Skip this parm if it will not be passed on the stack. */
+ if (! args[i].pass_on_stack && args[i].reg != 0)
+ continue;
+
+ if (GET_CODE (offset) == CONST_INT)
+ addr = plus_constant (arg_reg, INTVAL (offset));
+ else
+ addr = gen_rtx (PLUS, Pmode, arg_reg, offset);
+
+ addr = plus_constant (addr, arg_offset);
+ args[i].stack = gen_rtx (MEM, args[i].mode, addr);
+ MEM_IN_STRUCT_P (args[i].stack)
+ = AGGREGATE_TYPE_P (TREE_TYPE (args[i].tree_value));
+
+ if (GET_CODE (slot_offset) == CONST_INT)
+ addr = plus_constant (arg_reg, INTVAL (slot_offset));
+ else
+ addr = gen_rtx (PLUS, Pmode, arg_reg, slot_offset);
+
+ addr = plus_constant (addr, arg_offset);
+ args[i].stack_slot = gen_rtx (MEM, args[i].mode, addr);
+ }
+ }
+
+#ifdef PUSH_ARGS_REVERSED
+#ifdef STACK_BOUNDARY
+ /* If we push args individually in reverse order, perform stack alignment
+ before the first push (the last arg). */
+ if (argblock == 0)
+ anti_adjust_stack (GEN_INT (args_size.constant
+ - original_args_size.constant));
+#endif
+#endif
+
+ /* Don't try to defer pops if preallocating, not even from the first arg,
+ since ARGBLOCK probably refers to the SP. */
+ if (argblock)
+ NO_DEFER_POP;
+
+ /* Get the function to call, in the form of RTL. */
+ if (fndecl)
+ {
+ /* If this is the first use of the function, see if we need to
+ make an external definition for it. */
+ if (! TREE_USED (fndecl))
+ {
+ assemble_external (fndecl);
+ TREE_USED (fndecl) = 1;
+ }
+
+ /* Get a SYMBOL_REF rtx for the function address. */
+ funexp = XEXP (DECL_RTL (fndecl), 0);
+ }
+ else
+ /* Generate an rtx (probably a pseudo-register) for the address. */
+ {
+ push_temp_slots ();
+ funexp = expand_expr (TREE_OPERAND (exp, 0), NULL_RTX, VOIDmode, 0);
+ pop_temp_slots (); /* FUNEXP can't be BLKmode */
+ emit_queue ();
+ }
+
+ /* Figure out the register where the value, if any, will come back. */
+ valreg = 0;
+ if (TYPE_MODE (TREE_TYPE (exp)) != VOIDmode
+ && ! structure_value_addr)
+ {
+ if (pcc_struct_value)
+ valreg = hard_function_value (build_pointer_type (TREE_TYPE (exp)),
+ fndecl);
+ else
+ valreg = hard_function_value (TREE_TYPE (exp), fndecl);
+ }
+
+ /* Precompute all register parameters. It isn't safe to compute anything
+ once we have started filling any specific hard regs. */
+ reg_parm_seen = 0;
+ for (i = 0; i < num_actuals; i++)
+ if (args[i].reg != 0 && ! args[i].pass_on_stack)
+ {
+ reg_parm_seen = 1;
+
+ if (args[i].value == 0)
+ {
+ push_temp_slots ();
+ args[i].value = expand_expr (args[i].tree_value, NULL_RTX,
+ VOIDmode, 0);
+ preserve_temp_slots (args[i].value);
+ pop_temp_slots ();
+
+ /* ANSI doesn't require a sequence point here,
+ but PCC has one, so this will avoid some problems. */
+ emit_queue ();
+ }
+
+ /* If we are to promote the function arg to a wider mode,
+ do it now. */
+
+ if (args[i].mode != TYPE_MODE (TREE_TYPE (args[i].tree_value)))
+ args[i].value
+ = convert_modes (args[i].mode,
+ TYPE_MODE (TREE_TYPE (args[i].tree_value)),
+ args[i].value, args[i].unsignedp);
+
+ /* If the value is expensive, and we are inside an appropriately
+ short loop, put the value into a pseudo and then put the pseudo
+ into the hard reg.
+
+ For small register classes, also do this if this call uses
+ register parameters. This is to avoid reload conflicts while
+ loading the parameters registers. */
+
+ if ((! (GET_CODE (args[i].value) == REG
+ || (GET_CODE (args[i].value) == SUBREG
+ && GET_CODE (SUBREG_REG (args[i].value)) == REG)))
+ && args[i].mode != BLKmode
+ && rtx_cost (args[i].value, SET) > 2
+#ifdef SMALL_REGISTER_CLASSES
+ && (reg_parm_seen || preserve_subexpressions_p ()))
+#else
+ && preserve_subexpressions_p ())
+#endif
+ args[i].value = copy_to_mode_reg (args[i].mode, args[i].value);
+ }
+
+#if defined(ACCUMULATE_OUTGOING_ARGS) && defined(REG_PARM_STACK_SPACE)
+ /* The argument list is the property of the called routine and it
+ may clobber it. If the fixed area has been used for previous
+ parameters, we must save and restore it.
+
+ Here we compute the boundary of the that needs to be saved, if any. */
+
+#ifdef ARGS_GROW_DOWNWARD
+ for (i = 0; i < reg_parm_stack_space + 1; i++)
+#else
+ for (i = 0; i < reg_parm_stack_space; i++)
+#endif
+ {
+ if (i >= highest_outgoing_arg_in_use
+ || stack_usage_map[i] == 0)
+ continue;
+
+ if (low_to_save == -1)
+ low_to_save = i;
+
+ high_to_save = i;
+ }
+
+ if (low_to_save >= 0)
+ {
+ int num_to_save = high_to_save - low_to_save + 1;
+ enum machine_mode save_mode
+ = mode_for_size (num_to_save * BITS_PER_UNIT, MODE_INT, 1);
+ rtx stack_area;
+
+ /* If we don't have the required alignment, must do this in BLKmode. */
+ if ((low_to_save & (MIN (GET_MODE_SIZE (save_mode),
+ BIGGEST_ALIGNMENT / UNITS_PER_WORD) - 1)))
+ save_mode = BLKmode;
+
+ stack_area = gen_rtx (MEM, save_mode,
+ memory_address (save_mode,
+
+#ifdef ARGS_GROW_DOWNWARD
+ plus_constant (argblock,
+ - high_to_save)
+#else
+ plus_constant (argblock,
+ low_to_save)
+#endif
+ ));
+ if (save_mode == BLKmode)
+ {
+ save_area = assign_stack_temp (BLKmode, num_to_save, 1);
+ emit_block_move (validize_mem (save_area), stack_area,
+ GEN_INT (num_to_save),
+ PARM_BOUNDARY / BITS_PER_UNIT);
+ }
+ else
+ {
+ save_area = gen_reg_rtx (save_mode);
+ emit_move_insn (save_area, stack_area);
+ }
+ }
+#endif
+
+
+ /* Now store (and compute if necessary) all non-register parms.
+ These come before register parms, since they can require block-moves,
+ which could clobber the registers used for register parms.
+ Parms which have partial registers are not stored here,
+ but we do preallocate space here if they want that. */
+
+ for (i = 0; i < num_actuals; i++)
+ if (args[i].reg == 0 || args[i].pass_on_stack)
+ store_one_arg (&args[i], argblock, may_be_alloca,
+ args_size.var != 0, fndecl, reg_parm_stack_space);
+
+#ifdef STRICT_ALIGNMENT
+ /* If we have a parm that is passed in registers but not in memory
+ and whose alignment does not permit a direct copy into registers,
+ make a group of pseudos that correspond to each register that we
+ will later fill. */
+
+ for (i = 0; i < num_actuals; i++)
+ if (args[i].reg != 0 && ! args[i].pass_on_stack
+ && args[i].mode == BLKmode
+ && (TYPE_ALIGN (TREE_TYPE (args[i].tree_value))
+ < MIN (BIGGEST_ALIGNMENT, BITS_PER_WORD)))
+ {
+ int bytes = int_size_in_bytes (TREE_TYPE (args[i].tree_value));
+ int big_endian_correction = 0;
+
+ args[i].n_aligned_regs
+ = args[i].partial ? args[i].partial
+ : (bytes + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD;
+
+ args[i].aligned_regs = (rtx *) alloca (sizeof (rtx)
+ * args[i].n_aligned_regs);
+
+ /* Structures smaller than a word are aligned to the least signifcant
+ byte (to the right). On a BYTES_BIG_ENDIAN machine, this means we
+ must skip the empty high order bytes when calculating the bit
+ offset. */
+ if (BYTES_BIG_ENDIAN && bytes < UNITS_PER_WORD)
+ big_endian_correction = (BITS_PER_WORD - (bytes * BITS_PER_UNIT));
+
+ for (j = 0; j < args[i].n_aligned_regs; j++)
+ {
+ rtx reg = gen_reg_rtx (word_mode);
+ rtx word = operand_subword_force (args[i].value, j, BLKmode);
+ int bitsize = TYPE_ALIGN (TREE_TYPE (args[i].tree_value));
+ int bitpos;
+
+ args[i].aligned_regs[j] = reg;
+
+ /* Clobber REG and move each partword into it. Ensure we don't
+ go past the end of the structure. Note that the loop below
+ works because we've already verified that padding
+ and endianness are compatible. */
+
+ emit_insn (gen_rtx (CLOBBER, VOIDmode, reg));
+
+ for (bitpos = 0;
+ bitpos < BITS_PER_WORD && bytes > 0;
+ bitpos += bitsize, bytes -= bitsize / BITS_PER_UNIT)
+ {
+ int xbitpos = bitpos + big_endian_correction;
+
+ store_bit_field (reg, bitsize, xbitpos, word_mode,
+ extract_bit_field (word, bitsize, bitpos, 1,
+ NULL_RTX, word_mode,
+ word_mode,
+ bitsize / BITS_PER_UNIT,
+ BITS_PER_WORD),
+ bitsize / BITS_PER_UNIT, BITS_PER_WORD);
+ }
+ }
+ }
+#endif
+
+ /* Now store any partially-in-registers parm.
+ This is the last place a block-move can happen. */
+ if (reg_parm_seen)
+ for (i = 0; i < num_actuals; i++)
+ if (args[i].partial != 0 && ! args[i].pass_on_stack)
+ store_one_arg (&args[i], argblock, may_be_alloca,
+ args_size.var != 0, fndecl, reg_parm_stack_space);
+
+#ifndef PUSH_ARGS_REVERSED
+#ifdef STACK_BOUNDARY
+ /* If we pushed args in forward order, perform stack alignment
+ after pushing the last arg. */
+ if (argblock == 0)
+ anti_adjust_stack (GEN_INT (args_size.constant
+ - original_args_size.constant));
+#endif
+#endif
+
+ /* If register arguments require space on the stack and stack space
+ was not preallocated, allocate stack space here for arguments
+ passed in registers. */
+#if ! defined(ACCUMULATE_OUTGOING_ARGS) && defined(OUTGOING_REG_PARM_STACK_SPACE)
+ if (must_preallocate == 0 && reg_parm_stack_space > 0)
+ anti_adjust_stack (GEN_INT (reg_parm_stack_space));
+#endif
+
+ /* Pass the function the address in which to return a structure value. */
+ if (structure_value_addr && ! structure_value_addr_parm)
+ {
+ emit_move_insn (struct_value_rtx,
+ force_reg (Pmode,
+ force_operand (structure_value_addr,
+ NULL_RTX)));
+ if (GET_CODE (struct_value_rtx) == REG)
+ use_reg (&call_fusage, struct_value_rtx);
+ }
+
+ funexp = prepare_call_address (funexp, fndecl, &call_fusage, reg_parm_seen);
+
+ /* Now do the register loads required for any wholly-register parms or any
+ parms which are passed both on the stack and in a register. Their
+ expressions were already evaluated.
+
+ Mark all register-parms as living through the call, putting these USE
+ insns in the CALL_INSN_FUNCTION_USAGE field. */
+
+ for (i = 0; i < num_actuals; i++)
+ {
+ rtx list = args[i].reg;
+ int partial = args[i].partial;
+
+ while (list)
+ {
+ rtx reg;
+ int nregs;
+
+ /* Process each register that needs to get this arg. */
+ if (GET_CODE (list) == EXPR_LIST)
+ reg = XEXP (list, 0), list = XEXP (list, 1);
+ else
+ reg = list, list = 0;
+
+ /* Set to non-negative if must move a word at a time, even if just
+ one word (e.g, partial == 1 && mode == DFmode). Set to -1 if
+ we just use a normal move insn. This value can be zero if the
+ argument is a zero size structure with no fields. */
+ nregs = (partial ? partial
+ : (TYPE_MODE (TREE_TYPE (args[i].tree_value)) == BLKmode
+ ? ((int_size_in_bytes (TREE_TYPE (args[i].tree_value))
+ + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
+ : -1));
+
+ /* If simple case, just do move. If normal partial, store_one_arg
+ has already loaded the register for us. In all other cases,
+ load the register(s) from memory. */
+
+ if (nregs == -1)
+ emit_move_insn (reg, args[i].value);
+
+#ifdef STRICT_ALIGNMENT
+ /* If we have pre-computed the values to put in the registers in
+ the case of non-aligned structures, copy them in now. */
+
+ else if (args[i].n_aligned_regs != 0)
+ for (j = 0; j < args[i].n_aligned_regs; j++)
+ emit_move_insn (gen_rtx (REG, word_mode, REGNO (reg) + j),
+ args[i].aligned_regs[j]);
+#endif
+
+ else if (args[i].partial == 0 || args[i].pass_on_stack)
+ move_block_to_reg (REGNO (reg),
+ validize_mem (args[i].value), nregs,
+ args[i].mode);
+
+ if (nregs == -1)
+ use_reg (&call_fusage, reg);
+ else
+ use_regs (&call_fusage, REGNO (reg), nregs == 0 ? 1 : nregs);
+
+ /* PARTIAL referred only to the first register, so clear it for the
+ next time. */
+ partial = 0;
+ }
+ }
+
+ /* Perform postincrements before actually calling the function. */
+ emit_queue ();
+
+ /* All arguments and registers used for the call must be set up by now! */
+
+ /* Generate the actual call instruction. */
+ emit_call_1 (funexp, funtype, args_size.constant, struct_value_size,
+ FUNCTION_ARG (args_so_far, VOIDmode, void_type_node, 1),
+ valreg, old_inhibit_defer_pop, call_fusage, is_const);
+
+ /* If call is cse'able, make appropriate pair of reg-notes around it.
+ Test valreg so we don't crash; may safely ignore `const'
+ if return type is void. */
+ if (is_const && valreg != 0)
+ {
+ rtx note = 0;
+ rtx temp = gen_reg_rtx (GET_MODE (valreg));
+ rtx insns;
+
+ /* Construct an "equal form" for the value which mentions all the
+ arguments in order as well as the function name. */
+#ifdef PUSH_ARGS_REVERSED
+ for (i = 0; i < num_actuals; i++)
+ note = gen_rtx (EXPR_LIST, VOIDmode, args[i].initial_value, note);
+#else
+ for (i = num_actuals - 1; i >= 0; i--)
+ note = gen_rtx (EXPR_LIST, VOIDmode, args[i].initial_value, note);
+#endif
+ note = gen_rtx (EXPR_LIST, VOIDmode, funexp, note);
+
+ insns = get_insns ();
+ end_sequence ();
+
+ emit_libcall_block (insns, temp, valreg, note);
+
+ valreg = temp;
+ }
+
+ /* For calls to `setjmp', etc., inform flow.c it should complain
+ if nonvolatile values are live. */
+
+ if (returns_twice)
+ {
+ emit_note (name, NOTE_INSN_SETJMP);
+ current_function_calls_setjmp = 1;
+ }
+
+ if (is_longjmp)
+ current_function_calls_longjmp = 1;
+
+ /* Notice functions that cannot return.
+ If optimizing, insns emitted below will be dead.
+ If not optimizing, they will exist, which is useful
+ if the user uses the `return' command in the debugger. */
+
+ if (is_volatile || is_longjmp)
+ emit_barrier ();
+
+ /* If value type not void, return an rtx for the value. */
+
+ /* If there are cleanups to be called, don't use a hard reg as target. */
+ if (cleanups_this_call != old_cleanups
+ && target && REG_P (target)
+ && REGNO (target) < FIRST_PSEUDO_REGISTER)
+ target = 0;
+
+ if (TYPE_MODE (TREE_TYPE (exp)) == VOIDmode
+ || ignore)
+ {
+ target = const0_rtx;
+ }
+ else if (structure_value_addr)
+ {
+ if (target == 0 || GET_CODE (target) != MEM)
+ {
+ target = gen_rtx (MEM, TYPE_MODE (TREE_TYPE (exp)),
+ memory_address (TYPE_MODE (TREE_TYPE (exp)),
+ structure_value_addr));
+ MEM_IN_STRUCT_P (target) = AGGREGATE_TYPE_P (TREE_TYPE (exp));
+ }
+ }
+ else if (pcc_struct_value)
+ {
+ if (target == 0)
+ {
+ /* We used leave the value in the location that it is
+ returned in, but that causes problems if it is used more
+ than once in one expression. Rather than trying to track
+ when a copy is required, we always copy when TARGET is
+ not specified. This calling sequence is only used on
+ a few machines and TARGET is usually nonzero. */
+ if (TYPE_MODE (TREE_TYPE (exp)) == BLKmode)
+ {
+ target = assign_stack_temp (BLKmode,
+ int_size_in_bytes (TREE_TYPE (exp)),
+ 0);
+
+ MEM_IN_STRUCT_P (target) = AGGREGATE_TYPE_P (TREE_TYPE (exp));
+
+ /* Save this temp slot around the pop below. */
+ preserve_temp_slots (target);
+ }
+ else
+ target = gen_reg_rtx (TYPE_MODE (TREE_TYPE (exp)));
+ }
+
+ if (TYPE_MODE (TREE_TYPE (exp)) != BLKmode)
+ emit_move_insn (target, gen_rtx (MEM, TYPE_MODE (TREE_TYPE (exp)),
+ copy_to_reg (valreg)));
+ else
+ emit_block_move (target, gen_rtx (MEM, BLKmode, copy_to_reg (valreg)),
+ expr_size (exp),
+ TYPE_ALIGN (TREE_TYPE (exp)) / BITS_PER_UNIT);
+ }
+ else if (target && GET_MODE (target) == TYPE_MODE (TREE_TYPE (exp))
+ && GET_MODE (target) == GET_MODE (valreg))
+ /* TARGET and VALREG cannot be equal at this point because the latter
+ would not have REG_FUNCTION_VALUE_P true, while the former would if
+ it were referring to the same register.
+
+ If they refer to the same register, this move will be a no-op, except
+ when function inlining is being done. */
+ emit_move_insn (target, valreg);
+ else
+ target = copy_to_reg (valreg);
+
+#ifdef PROMOTE_FUNCTION_RETURN
+ /* If we promoted this return value, make the proper SUBREG. TARGET
+ might be const0_rtx here, so be careful. */
+ if (GET_CODE (target) == REG
+ && GET_MODE (target) != TYPE_MODE (TREE_TYPE (exp)))
+ {
+ tree type = TREE_TYPE (exp);
+ int unsignedp = TREE_UNSIGNED (type);
+
+ /* If we don't promote as expected, something is wrong. */
+ if (GET_MODE (target)
+ != promote_mode (type, TYPE_MODE (type), &unsignedp, 1))
+ abort ();
+
+ target = gen_rtx (SUBREG, TYPE_MODE (type), target, 0);
+ SUBREG_PROMOTED_VAR_P (target) = 1;
+ SUBREG_PROMOTED_UNSIGNED_P (target) = unsignedp;
+ }
+#endif
+
+ if (flag_short_temps)
+ {
+ /* Perform all cleanups needed for the arguments of this call
+ (i.e. destructors in C++). */
+ expand_cleanups_to (old_cleanups);
+ }
+
+ /* If size of args is variable or this was a constructor call for a stack
+ argument, restore saved stack-pointer value. */
+
+ if (old_stack_level)
+ {
+ emit_stack_restore (SAVE_BLOCK, old_stack_level, NULL_RTX);
+ pending_stack_adjust = old_pending_adj;
+#ifdef ACCUMULATE_OUTGOING_ARGS
+ stack_arg_under_construction = old_stack_arg_under_construction;
+ highest_outgoing_arg_in_use = initial_highest_arg_in_use;
+ stack_usage_map = initial_stack_usage_map;
+#endif
+ }
+#ifdef ACCUMULATE_OUTGOING_ARGS
+ else
+ {
+#ifdef REG_PARM_STACK_SPACE
+ if (save_area)
+ {
+ enum machine_mode save_mode = GET_MODE (save_area);
+ rtx stack_area
+ = gen_rtx (MEM, save_mode,
+ memory_address (save_mode,
+#ifdef ARGS_GROW_DOWNWARD
+ plus_constant (argblock, - high_to_save)
+#else
+ plus_constant (argblock, low_to_save)
+#endif
+ ));
+
+ if (save_mode != BLKmode)
+ emit_move_insn (stack_area, save_area);
+ else
+ emit_block_move (stack_area, validize_mem (save_area),
+ GEN_INT (high_to_save - low_to_save + 1),
+ PARM_BOUNDARY / BITS_PER_UNIT);
+ }
+#endif
+
+ /* If we saved any argument areas, restore them. */
+ for (i = 0; i < num_actuals; i++)
+ if (args[i].save_area)
+ {
+ enum machine_mode save_mode = GET_MODE (args[i].save_area);
+ rtx stack_area
+ = gen_rtx (MEM, save_mode,
+ memory_address (save_mode,
+ XEXP (args[i].stack_slot, 0)));
+
+ if (save_mode != BLKmode)
+ emit_move_insn (stack_area, args[i].save_area);
+ else
+ emit_block_move (stack_area, validize_mem (args[i].save_area),
+ GEN_INT (args[i].size.constant),
+ PARM_BOUNDARY / BITS_PER_UNIT);
+ }
+
+ highest_outgoing_arg_in_use = initial_highest_arg_in_use;
+ stack_usage_map = initial_stack_usage_map;
+ }
+#endif
+
+ /* If this was alloca, record the new stack level for nonlocal gotos.
+ Check for the handler slots since we might not have a save area
+ for non-local gotos. */
+
+ if (may_be_alloca && nonlocal_goto_handler_slot != 0)
+ emit_stack_save (SAVE_NONLOCAL, &nonlocal_goto_stack_level, NULL_RTX);
+
+ pop_temp_slots ();
+
+ return target;
+}
+
+/* Output a library call to function FUN (a SYMBOL_REF rtx)
+ (emitting the queue unless NO_QUEUE is nonzero),
+ for a value of mode OUTMODE,
+ with NARGS different arguments, passed as alternating rtx values
+ and machine_modes to convert them to.
+ The rtx values should have been passed through protect_from_queue already.
+
+ NO_QUEUE will be true if and only if the library call is a `const' call
+ which will be enclosed in REG_LIBCALL/REG_RETVAL notes; it is equivalent
+ to the variable is_const in expand_call.
+
+ NO_QUEUE must be true for const calls, because if it isn't, then
+ any pending increment will be emitted between REG_LIBCALL/REG_RETVAL notes,
+ and will be lost if the libcall sequence is optimized away.
+
+ NO_QUEUE must be false for non-const calls, because if it isn't, the
+ call insn will have its CONST_CALL_P bit set, and it will be incorrectly
+ optimized. For instance, the instruction scheduler may incorrectly
+ move memory references across the non-const call. */
+
+void
+emit_library_call VPROTO((rtx orgfun, int no_queue, enum machine_mode outmode,
+ int nargs, ...))
+{
+#ifndef __STDC__
+ rtx orgfun;
+ int no_queue;
+ enum machine_mode outmode;
+ int nargs;
+#endif
+ va_list p;
+ /* Total size in bytes of all the stack-parms scanned so far. */
+ struct args_size args_size;
+ /* Size of arguments before any adjustments (such as rounding). */
+ struct args_size original_args_size;
+ register int argnum;
+ rtx fun;
+ int inc;
+ int count;
+ rtx argblock = 0;
+ CUMULATIVE_ARGS args_so_far;
+ struct arg { rtx value; enum machine_mode mode; rtx reg; int partial;
+ struct args_size offset; struct args_size size; };
+ struct arg *argvec;
+ int old_inhibit_defer_pop = inhibit_defer_pop;
+ rtx call_fusage = 0;
+ /* library calls are never indirect calls. */
+ int current_call_is_indirect = 0;
+
+ VA_START (p, nargs);
+
+#ifndef __STDC__
+ orgfun = va_arg (p, rtx);
+ no_queue = va_arg (p, int);
+ outmode = va_arg (p, enum machine_mode);
+ nargs = va_arg (p, int);
+#endif
+
+ fun = orgfun;
+
+ /* Copy all the libcall-arguments out of the varargs data
+ and into a vector ARGVEC.
+
+ Compute how to pass each argument. We only support a very small subset
+ of the full argument passing conventions to limit complexity here since
+ library functions shouldn't have many args. */
+
+ argvec = (struct arg *) alloca (nargs * sizeof (struct arg));
+
+ INIT_CUMULATIVE_ARGS (args_so_far, NULL_TREE, fun);
+
+ args_size.constant = 0;
+ args_size.var = 0;
+
+ push_temp_slots ();
+
+ for (count = 0; count < nargs; count++)
+ {
+ rtx val = va_arg (p, rtx);
+ enum machine_mode mode = va_arg (p, enum machine_mode);
+
+ /* We cannot convert the arg value to the mode the library wants here;
+ must do it earlier where we know the signedness of the arg. */
+ if (mode == BLKmode
+ || (GET_MODE (val) != mode && GET_MODE (val) != VOIDmode))
+ abort ();
+
+ /* On some machines, there's no way to pass a float to a library fcn.
+ Pass it as a double instead. */
+#ifdef LIBGCC_NEEDS_DOUBLE
+ if (LIBGCC_NEEDS_DOUBLE && mode == SFmode)
+ val = convert_modes (DFmode, SFmode, val, 0), mode = DFmode;
+#endif
+
+ /* There's no need to call protect_from_queue, because
+ either emit_move_insn or emit_push_insn will do that. */
+
+ /* Make sure it is a reasonable operand for a move or push insn. */
+ if (GET_CODE (val) != REG && GET_CODE (val) != MEM
+ && ! (CONSTANT_P (val) && LEGITIMATE_CONSTANT_P (val)))
+ val = force_operand (val, NULL_RTX);
+
+#ifdef FUNCTION_ARG_PASS_BY_REFERENCE
+ if (FUNCTION_ARG_PASS_BY_REFERENCE (args_so_far, mode, NULL_TREE, 1))
+ {
+ /* We do not support FUNCTION_ARG_CALLEE_COPIES here since it can
+ be viewed as just an efficiency improvement. */
+ rtx slot = assign_stack_temp (mode, GET_MODE_SIZE (mode), 0);
+ emit_move_insn (slot, val);
+ val = XEXP (slot, 0);
+ mode = Pmode;
+ }
+#endif
+
+ argvec[count].value = val;
+ argvec[count].mode = mode;
+
+ argvec[count].reg = FUNCTION_ARG (args_so_far, mode, NULL_TREE, 1);
+ if (argvec[count].reg && GET_CODE (argvec[count].reg) == EXPR_LIST)
+ abort ();
+#ifdef FUNCTION_ARG_PARTIAL_NREGS
+ argvec[count].partial
+ = FUNCTION_ARG_PARTIAL_NREGS (args_so_far, mode, NULL_TREE, 1);
+#else
+ argvec[count].partial = 0;
+#endif
+
+ locate_and_pad_parm (mode, NULL_TREE,
+ argvec[count].reg && argvec[count].partial == 0,
+ NULL_TREE, &args_size, &argvec[count].offset,
+ &argvec[count].size);
+
+ if (argvec[count].size.var)
+ abort ();
+
+#ifndef REG_PARM_STACK_SPACE
+ if (argvec[count].partial)
+ argvec[count].size.constant -= argvec[count].partial * UNITS_PER_WORD;
+#endif
+
+ if (argvec[count].reg == 0 || argvec[count].partial != 0
+#ifdef REG_PARM_STACK_SPACE
+ || 1
+#endif
+ )
+ args_size.constant += argvec[count].size.constant;
+
+#ifdef ACCUMULATE_OUTGOING_ARGS
+ /* If this arg is actually passed on the stack, it might be
+ clobbering something we already put there (this library call might
+ be inside the evaluation of an argument to a function whose call
+ requires the stack). This will only occur when the library call
+ has sufficient args to run out of argument registers. Abort in
+ this case; if this ever occurs, code must be added to save and
+ restore the arg slot. */
+
+ if (argvec[count].reg == 0 || argvec[count].partial != 0)
+ abort ();
+#endif
+
+ FUNCTION_ARG_ADVANCE (args_so_far, mode, (tree)0, 1);
+ }
+ va_end (p);
+
+ /* If this machine requires an external definition for library
+ functions, write one out. */
+ assemble_external_libcall (fun);
+
+ original_args_size = args_size;
+#ifdef STACK_BOUNDARY
+ args_size.constant = (((args_size.constant + (STACK_BYTES - 1))
+ / STACK_BYTES) * STACK_BYTES);
+#endif
+
+#ifdef REG_PARM_STACK_SPACE
+ args_size.constant = MAX (args_size.constant,
+ REG_PARM_STACK_SPACE (NULL_TREE));
+#ifndef OUTGOING_REG_PARM_STACK_SPACE
+ args_size.constant -= REG_PARM_STACK_SPACE (NULL_TREE);
+#endif
+#endif
+
+#ifdef ACCUMULATE_OUTGOING_ARGS
+ if (args_size.constant > current_function_outgoing_args_size)
+ current_function_outgoing_args_size = args_size.constant;
+ args_size.constant = 0;
+#endif
+
+#ifndef PUSH_ROUNDING
+ argblock = push_block (GEN_INT (args_size.constant), 0, 0);
+#endif
+
+#ifdef PUSH_ARGS_REVERSED
+#ifdef STACK_BOUNDARY
+ /* If we push args individually in reverse order, perform stack alignment
+ before the first push (the last arg). */
+ if (argblock == 0)
+ anti_adjust_stack (GEN_INT (args_size.constant
+ - original_args_size.constant));
+#endif
+#endif
+
+#ifdef PUSH_ARGS_REVERSED
+ inc = -1;
+ argnum = nargs - 1;
+#else
+ inc = 1;
+ argnum = 0;
+#endif
+
+ /* Push the args that need to be pushed. */
+
+ for (count = 0; count < nargs; count++, argnum += inc)
+ {
+ register enum machine_mode mode = argvec[argnum].mode;
+ register rtx val = argvec[argnum].value;
+ rtx reg = argvec[argnum].reg;
+ int partial = argvec[argnum].partial;
+
+ if (! (reg != 0 && partial == 0))
+ emit_push_insn (val, mode, NULL_TREE, NULL_RTX, 0, partial, reg, 0,
+ argblock, GEN_INT (argvec[count].offset.constant));
+ NO_DEFER_POP;
+ }
+
+#ifndef PUSH_ARGS_REVERSED
+#ifdef STACK_BOUNDARY
+ /* If we pushed args in forward order, perform stack alignment
+ after pushing the last arg. */
+ if (argblock == 0)
+ anti_adjust_stack (GEN_INT (args_size.constant
+ - original_args_size.constant));
+#endif
+#endif
+
+#ifdef PUSH_ARGS_REVERSED
+ argnum = nargs - 1;
+#else
+ argnum = 0;
+#endif
+
+ fun = prepare_call_address (fun, NULL_TREE, &call_fusage, 0);
+
+ /* Now load any reg parms into their regs. */
+
+ for (count = 0; count < nargs; count++, argnum += inc)
+ {
+ register enum machine_mode mode = argvec[argnum].mode;
+ register rtx val = argvec[argnum].value;
+ rtx reg = argvec[argnum].reg;
+ int partial = argvec[argnum].partial;
+
+ if (reg != 0 && partial == 0)
+ emit_move_insn (reg, val);
+ NO_DEFER_POP;
+ }
+
+ /* For version 1.37, try deleting this entirely. */
+ if (! no_queue)
+ emit_queue ();
+
+ /* Any regs containing parms remain in use through the call. */
+ for (count = 0; count < nargs; count++)
+ if (argvec[count].reg != 0)
+ use_reg (&call_fusage, argvec[count].reg);
+
+ /* Don't allow popping to be deferred, since then
+ cse'ing of library calls could delete a call and leave the pop. */
+ NO_DEFER_POP;
+
+ /* We pass the old value of inhibit_defer_pop + 1 to emit_call_1, which
+ will set inhibit_defer_pop to that value. */
+
+ emit_call_1 (fun, get_identifier (XSTR (orgfun, 0)), args_size.constant, 0,
+ FUNCTION_ARG (args_so_far, VOIDmode, void_type_node, 1),
+ outmode != VOIDmode ? hard_libcall_value (outmode) : NULL_RTX,
+ old_inhibit_defer_pop + 1, call_fusage, no_queue);
+
+ pop_temp_slots ();
+
+ /* Now restore inhibit_defer_pop to its actual original value. */
+ OK_DEFER_POP;
+}
+
+/* Like emit_library_call except that an extra argument, VALUE,
+ comes second and says where to store the result.
+ (If VALUE is zero, this function chooses a convenient way
+ to return the value.
+
+ This function returns an rtx for where the value is to be found.
+ If VALUE is nonzero, VALUE is returned. */
+
+rtx
+emit_library_call_value VPROTO((rtx orgfun, rtx value, int no_queue,
+ enum machine_mode outmode, int nargs, ...))
+{
+#ifndef __STDC__
+ rtx orgfun;
+ rtx value;
+ int no_queue;
+ enum machine_mode outmode;
+ int nargs;
+#endif
+ va_list p;
+ /* Total size in bytes of all the stack-parms scanned so far. */
+ struct args_size args_size;
+ /* Size of arguments before any adjustments (such as rounding). */
+ struct args_size original_args_size;
+ register int argnum;
+ rtx fun;
+ int inc;
+ int count;
+ rtx argblock = 0;
+ CUMULATIVE_ARGS args_so_far;
+ struct arg { rtx value; enum machine_mode mode; rtx reg; int partial;
+ struct args_size offset; struct args_size size; };
+ struct arg *argvec;
+ int old_inhibit_defer_pop = inhibit_defer_pop;
+ rtx call_fusage = 0;
+ rtx mem_value = 0;
+ int pcc_struct_value = 0;
+ int struct_value_size = 0;
+ /* library calls are never indirect calls. */
+ int current_call_is_indirect = 0;
+ int is_const;
+
+ VA_START (p, nargs);
+
+#ifndef __STDC__
+ orgfun = va_arg (p, rtx);
+ value = va_arg (p, rtx);
+ no_queue = va_arg (p, int);
+ outmode = va_arg (p, enum machine_mode);
+ nargs = va_arg (p, int);
+#endif
+
+ is_const = no_queue;
+ fun = orgfun;
+
+ /* If this kind of value comes back in memory,
+ decide where in memory it should come back. */
+ if (aggregate_value_p (type_for_mode (outmode, 0)))
+ {
+#ifdef PCC_STATIC_STRUCT_RETURN
+ rtx pointer_reg
+ = hard_function_value (build_pointer_type (type_for_mode (outmode, 0)),
+ 0);
+ mem_value = gen_rtx (MEM, outmode, pointer_reg);
+ pcc_struct_value = 1;
+ if (value == 0)
+ value = gen_reg_rtx (outmode);
+#else /* not PCC_STATIC_STRUCT_RETURN */
+ struct_value_size = GET_MODE_SIZE (outmode);
+ if (value != 0 && GET_CODE (value) == MEM)
+ mem_value = value;
+ else
+ mem_value = assign_stack_temp (outmode, GET_MODE_SIZE (outmode), 0);
+#endif
+
+ /* This call returns a big structure. */
+ is_const = 0;
+ }
+
+ /* ??? Unfinished: must pass the memory address as an argument. */
+
+ /* Copy all the libcall-arguments out of the varargs data
+ and into a vector ARGVEC.
+
+ Compute how to pass each argument. We only support a very small subset
+ of the full argument passing conventions to limit complexity here since
+ library functions shouldn't have many args. */
+
+ argvec = (struct arg *) alloca ((nargs + 1) * sizeof (struct arg));
+
+ INIT_CUMULATIVE_ARGS (args_so_far, NULL_TREE, fun);
+
+ args_size.constant = 0;
+ args_size.var = 0;
+
+ count = 0;
+
+ push_temp_slots ();
+
+ /* If there's a structure value address to be passed,
+ either pass it in the special place, or pass it as an extra argument. */
+ if (mem_value && struct_value_rtx == 0 && ! pcc_struct_value)
+ {
+ rtx addr = XEXP (mem_value, 0);
+ nargs++;
+
+ /* Make sure it is a reasonable operand for a move or push insn. */
+ if (GET_CODE (addr) != REG && GET_CODE (addr) != MEM
+ && ! (CONSTANT_P (addr) && LEGITIMATE_CONSTANT_P (addr)))
+ addr = force_operand (addr, NULL_RTX);
+
+ argvec[count].value = addr;
+ argvec[count].mode = Pmode;
+ argvec[count].partial = 0;
+
+ argvec[count].reg = FUNCTION_ARG (args_so_far, Pmode, NULL_TREE, 1);
+#ifdef FUNCTION_ARG_PARTIAL_NREGS
+ if (FUNCTION_ARG_PARTIAL_NREGS (args_so_far, Pmode, NULL_TREE, 1))
+ abort ();
+#endif
+
+ locate_and_pad_parm (Pmode, NULL_TREE,
+ argvec[count].reg && argvec[count].partial == 0,
+ NULL_TREE, &args_size, &argvec[count].offset,
+ &argvec[count].size);
+
+
+ if (argvec[count].reg == 0 || argvec[count].partial != 0
+#ifdef REG_PARM_STACK_SPACE
+ || 1
+#endif
+ )
+ args_size.constant += argvec[count].size.constant;
+
+ FUNCTION_ARG_ADVANCE (args_so_far, Pmode, (tree)0, 1);
+
+ count++;
+ }
+
+ for (; count < nargs; count++)
+ {
+ rtx val = va_arg (p, rtx);
+ enum machine_mode mode = va_arg (p, enum machine_mode);
+
+ /* We cannot convert the arg value to the mode the library wants here;
+ must do it earlier where we know the signedness of the arg. */
+ if (mode == BLKmode
+ || (GET_MODE (val) != mode && GET_MODE (val) != VOIDmode))
+ abort ();
+
+ /* On some machines, there's no way to pass a float to a library fcn.
+ Pass it as a double instead. */
+#ifdef LIBGCC_NEEDS_DOUBLE
+ if (LIBGCC_NEEDS_DOUBLE && mode == SFmode)
+ val = convert_modes (DFmode, SFmode, val, 0), mode = DFmode;
+#endif
+
+ /* There's no need to call protect_from_queue, because
+ either emit_move_insn or emit_push_insn will do that. */
+
+ /* Make sure it is a reasonable operand for a move or push insn. */
+ if (GET_CODE (val) != REG && GET_CODE (val) != MEM
+ && ! (CONSTANT_P (val) && LEGITIMATE_CONSTANT_P (val)))
+ val = force_operand (val, NULL_RTX);
+
+#ifdef FUNCTION_ARG_PASS_BY_REFERENCE
+ if (FUNCTION_ARG_PASS_BY_REFERENCE (args_so_far, mode, NULL_TREE, 1))
+ {
+ /* We do not support FUNCTION_ARG_CALLEE_COPIES here since it can
+ be viewed as just an efficiency improvement. */
+ rtx slot = assign_stack_temp (mode, GET_MODE_SIZE (mode), 0);
+ emit_move_insn (slot, val);
+ val = XEXP (slot, 0);
+ mode = Pmode;
+ }
+#endif
+
+ argvec[count].value = val;
+ argvec[count].mode = mode;
+
+ argvec[count].reg = FUNCTION_ARG (args_so_far, mode, NULL_TREE, 1);
+ if (argvec[count].reg && GET_CODE (argvec[count].reg) == EXPR_LIST)
+ abort ();
+#ifdef FUNCTION_ARG_PARTIAL_NREGS
+ argvec[count].partial
+ = FUNCTION_ARG_PARTIAL_NREGS (args_so_far, mode, NULL_TREE, 1);
+#else
+ argvec[count].partial = 0;
+#endif
+
+ locate_and_pad_parm (mode, NULL_TREE,
+ argvec[count].reg && argvec[count].partial == 0,
+ NULL_TREE, &args_size, &argvec[count].offset,
+ &argvec[count].size);
+
+ if (argvec[count].size.var)
+ abort ();
+
+#ifndef REG_PARM_STACK_SPACE
+ if (argvec[count].partial)
+ argvec[count].size.constant -= argvec[count].partial * UNITS_PER_WORD;
+#endif
+
+ if (argvec[count].reg == 0 || argvec[count].partial != 0
+#ifdef REG_PARM_STACK_SPACE
+ || 1
+#endif
+ )
+ args_size.constant += argvec[count].size.constant;
+
+#ifdef ACCUMULATE_OUTGOING_ARGS
+ /* If this arg is actually passed on the stack, it might be
+ clobbering something we already put there (this library call might
+ be inside the evaluation of an argument to a function whose call
+ requires the stack). This will only occur when the library call
+ has sufficient args to run out of argument registers. Abort in
+ this case; if this ever occurs, code must be added to save and
+ restore the arg slot. */
+
+ if (argvec[count].reg == 0 || argvec[count].partial != 0)
+ abort ();
+#endif
+
+ FUNCTION_ARG_ADVANCE (args_so_far, mode, (tree)0, 1);
+ }
+ va_end (p);
+
+ /* If this machine requires an external definition for library
+ functions, write one out. */
+ assemble_external_libcall (fun);
+
+ original_args_size = args_size;
+#ifdef STACK_BOUNDARY
+ args_size.constant = (((args_size.constant + (STACK_BYTES - 1))
+ / STACK_BYTES) * STACK_BYTES);
+#endif
+
+#ifdef REG_PARM_STACK_SPACE
+ args_size.constant = MAX (args_size.constant,
+ REG_PARM_STACK_SPACE (NULL_TREE));
+#ifndef OUTGOING_REG_PARM_STACK_SPACE
+ args_size.constant -= REG_PARM_STACK_SPACE (NULL_TREE);
+#endif
+#endif
+
+#ifdef ACCUMULATE_OUTGOING_ARGS
+ if (args_size.constant > current_function_outgoing_args_size)
+ current_function_outgoing_args_size = args_size.constant;
+ args_size.constant = 0;
+#endif
+
+#ifndef PUSH_ROUNDING
+ argblock = push_block (GEN_INT (args_size.constant), 0, 0);
+#endif
+
+#ifdef PUSH_ARGS_REVERSED
+#ifdef STACK_BOUNDARY
+ /* If we push args individually in reverse order, perform stack alignment
+ before the first push (the last arg). */
+ if (argblock == 0)
+ anti_adjust_stack (GEN_INT (args_size.constant
+ - original_args_size.constant));
+#endif
+#endif
+
+#ifdef PUSH_ARGS_REVERSED
+ inc = -1;
+ argnum = nargs - 1;
+#else
+ inc = 1;
+ argnum = 0;
+#endif
+
+ /* Push the args that need to be pushed. */
+
+ for (count = 0; count < nargs; count++, argnum += inc)
+ {
+ register enum machine_mode mode = argvec[argnum].mode;
+ register rtx val = argvec[argnum].value;
+ rtx reg = argvec[argnum].reg;
+ int partial = argvec[argnum].partial;
+
+ if (! (reg != 0 && partial == 0))
+ emit_push_insn (val, mode, NULL_TREE, NULL_RTX, 0, partial, reg, 0,
+ argblock, GEN_INT (argvec[count].offset.constant));
+ NO_DEFER_POP;
+ }
+
+#ifndef PUSH_ARGS_REVERSED
+#ifdef STACK_BOUNDARY
+ /* If we pushed args in forward order, perform stack alignment
+ after pushing the last arg. */
+ if (argblock == 0)
+ anti_adjust_stack (GEN_INT (args_size.constant
+ - original_args_size.constant));
+#endif
+#endif
+
+#ifdef PUSH_ARGS_REVERSED
+ argnum = nargs - 1;
+#else
+ argnum = 0;
+#endif
+
+ fun = prepare_call_address (fun, NULL_TREE, &call_fusage, 0);
+
+ /* Now load any reg parms into their regs. */
+
+ for (count = 0; count < nargs; count++, argnum += inc)
+ {
+ register enum machine_mode mode = argvec[argnum].mode;
+ register rtx val = argvec[argnum].value;
+ rtx reg = argvec[argnum].reg;
+ int partial = argvec[argnum].partial;
+
+ if (reg != 0 && partial == 0)
+ emit_move_insn (reg, val);
+ NO_DEFER_POP;
+ }
+
+#if 0
+ /* For version 1.37, try deleting this entirely. */
+ if (! no_queue)
+ emit_queue ();
+#endif
+
+ /* Any regs containing parms remain in use through the call. */
+ for (count = 0; count < nargs; count++)
+ if (argvec[count].reg != 0)
+ use_reg (&call_fusage, argvec[count].reg);
+
+ /* Pass the function the address in which to return a structure value. */
+ if (mem_value != 0 && struct_value_rtx != 0 && ! pcc_struct_value)
+ {
+ emit_move_insn (struct_value_rtx,
+ force_reg (Pmode,
+ force_operand (XEXP (mem_value, 0),
+ NULL_RTX)));
+ if (GET_CODE (struct_value_rtx) == REG)
+ use_reg (&call_fusage, struct_value_rtx);
+ }
+
+ /* Don't allow popping to be deferred, since then
+ cse'ing of library calls could delete a call and leave the pop. */
+ NO_DEFER_POP;
+
+ /* We pass the old value of inhibit_defer_pop + 1 to emit_call_1, which
+ will set inhibit_defer_pop to that value. */
+
+ emit_call_1 (fun, get_identifier (XSTR (orgfun, 0)), args_size.constant,
+ struct_value_size,
+ FUNCTION_ARG (args_so_far, VOIDmode, void_type_node, 1),
+ (outmode != VOIDmode && mem_value == 0
+ ? hard_libcall_value (outmode) : NULL_RTX),
+ old_inhibit_defer_pop + 1, call_fusage, is_const);
+
+ /* Now restore inhibit_defer_pop to its actual original value. */
+ OK_DEFER_POP;
+
+ pop_temp_slots ();
+
+ /* Copy the value to the right place. */
+ if (outmode != VOIDmode)
+ {
+ if (mem_value)
+ {
+ if (value == 0)
+ value = mem_value;
+ if (value != mem_value)
+ emit_move_insn (value, mem_value);
+ }
+ else if (value != 0)
+ emit_move_insn (value, hard_libcall_value (outmode));
+ else
+ value = hard_libcall_value (outmode);
+ }
+
+ return value;
+}
+
+#if 0
+/* Return an rtx which represents a suitable home on the stack
+ given TYPE, the type of the argument looking for a home.
+ This is called only for BLKmode arguments.
+
+ SIZE is the size needed for this target.
+ ARGS_ADDR is the address of the bottom of the argument block for this call.
+ OFFSET describes this parameter's offset into ARGS_ADDR. It is meaningless
+ if this machine uses push insns. */
+
+static rtx
+target_for_arg (type, size, args_addr, offset)
+ tree type;
+ rtx size;
+ rtx args_addr;
+ struct args_size offset;
+{
+ rtx target;
+ rtx offset_rtx = ARGS_SIZE_RTX (offset);
+
+ /* We do not call memory_address if possible,
+ because we want to address as close to the stack
+ as possible. For non-variable sized arguments,
+ this will be stack-pointer relative addressing. */
+ if (GET_CODE (offset_rtx) == CONST_INT)
+ target = plus_constant (args_addr, INTVAL (offset_rtx));
+ else
+ {
+ /* I have no idea how to guarantee that this
+ will work in the presence of register parameters. */
+ target = gen_rtx (PLUS, Pmode, args_addr, offset_rtx);
+ target = memory_address (QImode, target);
+ }
+
+ return gen_rtx (MEM, BLKmode, target);
+}
+#endif
+
+/* Store a single argument for a function call
+ into the register or memory area where it must be passed.
+ *ARG describes the argument value and where to pass it.
+
+ ARGBLOCK is the address of the stack-block for all the arguments,
+ or 0 on a machine where arguments are pushed individually.
+
+ MAY_BE_ALLOCA nonzero says this could be a call to `alloca'
+ so must be careful about how the stack is used.
+
+ VARIABLE_SIZE nonzero says that this was a variable-sized outgoing
+ argument stack. This is used if ACCUMULATE_OUTGOING_ARGS to indicate
+ that we need not worry about saving and restoring the stack.
+
+ FNDECL is the declaration of the function we are calling. */
+
+static void
+store_one_arg (arg, argblock, may_be_alloca, variable_size, fndecl,
+ reg_parm_stack_space)
+ struct arg_data *arg;
+ rtx argblock;
+ int may_be_alloca;
+ int variable_size;
+ tree fndecl;
+ int reg_parm_stack_space;
+{
+ register tree pval = arg->tree_value;
+ rtx reg = 0;
+ int partial = 0;
+ int used = 0;
+ int i, lower_bound, upper_bound;
+
+ if (TREE_CODE (pval) == ERROR_MARK)
+ return;
+
+ /* Push a new temporary level for any temporaries we make for
+ this argument. */
+ push_temp_slots ();
+
+#ifdef ACCUMULATE_OUTGOING_ARGS
+ /* If this is being stored into a pre-allocated, fixed-size, stack area,
+ save any previous data at that location. */
+ if (argblock && ! variable_size && arg->stack)
+ {
+#ifdef ARGS_GROW_DOWNWARD
+ /* stack_slot is negative, but we want to index stack_usage_map */
+ /* with positive values. */
+ if (GET_CODE (XEXP (arg->stack_slot, 0)) == PLUS)
+ upper_bound = -INTVAL (XEXP (XEXP (arg->stack_slot, 0), 1)) + 1;
+ else
+ abort ();
+
+ lower_bound = upper_bound - arg->size.constant;
+#else
+ if (GET_CODE (XEXP (arg->stack_slot, 0)) == PLUS)
+ lower_bound = INTVAL (XEXP (XEXP (arg->stack_slot, 0), 1));
+ else
+ lower_bound = 0;
+
+ upper_bound = lower_bound + arg->size.constant;
+#endif
+
+ for (i = lower_bound; i < upper_bound; i++)
+ if (stack_usage_map[i]
+#ifdef REG_PARM_STACK_SPACE
+ /* Don't store things in the fixed argument area at this point;
+ it has already been saved. */
+ && i > reg_parm_stack_space
+#endif
+ )
+ break;
+
+ if (i != upper_bound)
+ {
+ /* We need to make a save area. See what mode we can make it. */
+ enum machine_mode save_mode
+ = mode_for_size (arg->size.constant * BITS_PER_UNIT, MODE_INT, 1);
+ rtx stack_area
+ = gen_rtx (MEM, save_mode,
+ memory_address (save_mode, XEXP (arg->stack_slot, 0)));
+
+ if (save_mode == BLKmode)
+ {
+ arg->save_area = assign_stack_temp (BLKmode,
+ arg->size.constant, 1);
+ preserve_temp_slots (arg->save_area);
+ emit_block_move (validize_mem (arg->save_area), stack_area,
+ GEN_INT (arg->size.constant),
+ PARM_BOUNDARY / BITS_PER_UNIT);
+ }
+ else
+ {
+ arg->save_area = gen_reg_rtx (save_mode);
+ emit_move_insn (arg->save_area, stack_area);
+ }
+ }
+ }
+#endif
+
+ /* If this isn't going to be placed on both the stack and in registers,
+ set up the register and number of words. */
+ if (! arg->pass_on_stack)
+ reg = arg->reg, partial = arg->partial;
+
+ if (reg != 0 && partial == 0)
+ /* Being passed entirely in a register. We shouldn't be called in
+ this case. */
+ abort ();
+
+#ifdef STRICT_ALIGNMENT
+ /* If this arg needs special alignment, don't load the registers
+ here. */
+ if (arg->n_aligned_regs != 0)
+ reg = 0;
+#endif
+
+ /* If this is being partially passed in a register, but multiple locations
+ are specified, we assume that the one partially used is the one that is
+ listed first. */
+ if (reg && GET_CODE (reg) == EXPR_LIST)
+ reg = XEXP (reg, 0);
+
+ /* If this is being passed partially in a register, we can't evaluate
+ it directly into its stack slot. Otherwise, we can. */
+ if (arg->value == 0)
+ {
+#ifdef ACCUMULATE_OUTGOING_ARGS
+ /* stack_arg_under_construction is nonzero if a function argument is
+ being evaluated directly into the outgoing argument list and
+ expand_call must take special action to preserve the argument list
+ if it is called recursively.
+
+ For scalar function arguments stack_usage_map is sufficient to
+ determine which stack slots must be saved and restored. Scalar
+ arguments in general have pass_on_stack == 0.
+
+ If this argument is initialized by a function which takes the
+ address of the argument (a C++ constructor or a C function
+ returning a BLKmode structure), then stack_usage_map is
+ insufficient and expand_call must push the stack around the
+ function call. Such arguments have pass_on_stack == 1.
+
+ Note that it is always safe to set stack_arg_under_construction,
+ but this generates suboptimal code if set when not needed. */
+
+ if (arg->pass_on_stack)
+ stack_arg_under_construction++;
+#endif
+ arg->value = expand_expr (pval,
+ (partial
+ || TYPE_MODE (TREE_TYPE (pval)) != arg->mode)
+ ? NULL_RTX : arg->stack,
+ VOIDmode, 0);
+
+ /* If we are promoting object (or for any other reason) the mode
+ doesn't agree, convert the mode. */
+
+ if (arg->mode != TYPE_MODE (TREE_TYPE (pval)))
+ arg->value = convert_modes (arg->mode, TYPE_MODE (TREE_TYPE (pval)),
+ arg->value, arg->unsignedp);
+
+#ifdef ACCUMULATE_OUTGOING_ARGS
+ if (arg->pass_on_stack)
+ stack_arg_under_construction--;
+#endif
+ }
+
+ /* Don't allow anything left on stack from computation
+ of argument to alloca. */
+ if (may_be_alloca)
+ do_pending_stack_adjust ();
+
+ if (arg->value == arg->stack)
+ /* If the value is already in the stack slot, we are done. */
+ ;
+ else if (arg->mode != BLKmode)
+ {
+ register int size;
+
+ /* Argument is a scalar, not entirely passed in registers.
+ (If part is passed in registers, arg->partial says how much
+ and emit_push_insn will take care of putting it there.)
+
+ Push it, and if its size is less than the
+ amount of space allocated to it,
+ also bump stack pointer by the additional space.
+ Note that in C the default argument promotions
+ will prevent such mismatches. */
+
+ size = GET_MODE_SIZE (arg->mode);
+ /* Compute how much space the push instruction will push.
+ On many machines, pushing a byte will advance the stack
+ pointer by a halfword. */
+#ifdef PUSH_ROUNDING
+ size = PUSH_ROUNDING (size);
+#endif
+ used = size;
+
+ /* Compute how much space the argument should get:
+ round up to a multiple of the alignment for arguments. */
+ if (none != FUNCTION_ARG_PADDING (arg->mode, TREE_TYPE (pval)))
+ used = (((size + PARM_BOUNDARY / BITS_PER_UNIT - 1)
+ / (PARM_BOUNDARY / BITS_PER_UNIT))
+ * (PARM_BOUNDARY / BITS_PER_UNIT));
+
+ /* This isn't already where we want it on the stack, so put it there.
+ This can either be done with push or copy insns. */
+ emit_push_insn (arg->value, arg->mode, TREE_TYPE (pval), NULL_RTX,
+ 0, partial, reg, used - size,
+ argblock, ARGS_SIZE_RTX (arg->offset));
+ }
+ else
+ {
+ /* BLKmode, at least partly to be pushed. */
+
+ register int excess;
+ rtx size_rtx;
+
+ /* Pushing a nonscalar.
+ If part is passed in registers, PARTIAL says how much
+ and emit_push_insn will take care of putting it there. */
+
+ /* Round its size up to a multiple
+ of the allocation unit for arguments. */
+
+ if (arg->size.var != 0)
+ {
+ excess = 0;
+ size_rtx = ARGS_SIZE_RTX (arg->size);
+ }
+ else
+ {
+ /* PUSH_ROUNDING has no effect on us, because
+ emit_push_insn for BLKmode is careful to avoid it. */
+ excess = (arg->size.constant - int_size_in_bytes (TREE_TYPE (pval))
+ + partial * UNITS_PER_WORD);
+ size_rtx = expr_size (pval);
+ }
+
+ emit_push_insn (arg->value, arg->mode, TREE_TYPE (pval), size_rtx,
+ TYPE_ALIGN (TREE_TYPE (pval)) / BITS_PER_UNIT, partial,
+ reg, excess, argblock, ARGS_SIZE_RTX (arg->offset));
+ }
+
+
+ /* Unless this is a partially-in-register argument, the argument is now
+ in the stack.
+
+ ??? Note that this can change arg->value from arg->stack to
+ arg->stack_slot and it matters when they are not the same.
+ It isn't totally clear that this is correct in all cases. */
+ if (partial == 0)
+ arg->value = arg->stack_slot;
+
+ /* Once we have pushed something, pops can't safely
+ be deferred during the rest of the arguments. */
+ NO_DEFER_POP;
+
+ /* ANSI doesn't require a sequence point here,
+ but PCC has one, so this will avoid some problems. */
+ emit_queue ();
+
+ /* Free any temporary slots made in processing this argument. */
+ free_temp_slots ();
+ pop_temp_slots ();
+
+#ifdef ACCUMULATE_OUTGOING_ARGS
+ /* Now mark the segment we just used. */
+ if (argblock && ! variable_size && arg->stack)
+ for (i = lower_bound; i < upper_bound; i++)
+ stack_usage_map[i] = 1;
+#endif
+}
diff --git a/gnu/usr.bin/cc/cc_int/combine.c b/gnu/usr.bin/cc/cc_int/combine.c
new file mode 100644
index 0000000..990fa4d
--- /dev/null
+++ b/gnu/usr.bin/cc/cc_int/combine.c
@@ -0,0 +1,10790 @@
+/* Optimize by combining instructions for GNU compiler.
+ Copyright (C) 1987, 1988, 1992, 1993, 1994 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+/* This module is essentially the "combiner" phase of the U. of Arizona
+ Portable Optimizer, but redone to work on our list-structured
+ representation for RTL instead of their string representation.
+
+ The LOG_LINKS of each insn identify the most recent assignment
+ to each REG used in the insn. It is a list of previous insns,
+ each of which contains a SET for a REG that is used in this insn
+ and not used or set in between. LOG_LINKs never cross basic blocks.
+ They were set up by the preceding pass (lifetime analysis).
+
+ We try to combine each pair of insns joined by a logical link.
+ We also try to combine triples of insns A, B and C when
+ C has a link back to B and B has a link back to A.
+
+ LOG_LINKS does not have links for use of the CC0. They don't
+ need to, because the insn that sets the CC0 is always immediately
+ before the insn that tests it. So we always regard a branch
+ insn as having a logical link to the preceding insn. The same is true
+ for an insn explicitly using CC0.
+
+ We check (with use_crosses_set_p) to avoid combining in such a way
+ as to move a computation to a place where its value would be different.
+
+ Combination is done by mathematically substituting the previous
+ insn(s) values for the regs they set into the expressions in
+ the later insns that refer to these regs. If the result is a valid insn
+ for our target machine, according to the machine description,
+ we install it, delete the earlier insns, and update the data flow
+ information (LOG_LINKS and REG_NOTES) for what we did.
+
+ There are a few exceptions where the dataflow information created by
+ flow.c aren't completely updated:
+
+ - reg_live_length is not updated
+ - reg_n_refs is not adjusted in the rare case when a register is
+ no longer required in a computation
+ - there are extremely rare cases (see distribute_regnotes) when a
+ REG_DEAD note is lost
+ - a LOG_LINKS entry that refers to an insn with multiple SETs may be
+ removed because there is no way to know which register it was
+ linking
+
+ To simplify substitution, we combine only when the earlier insn(s)
+ consist of only a single assignment. To simplify updating afterward,
+ we never combine when a subroutine call appears in the middle.
+
+ Since we do not represent assignments to CC0 explicitly except when that
+ is all an insn does, there is no LOG_LINKS entry in an insn that uses
+ the condition code for the insn that set the condition code.
+ Fortunately, these two insns must be consecutive.
+ Therefore, every JUMP_INSN is taken to have an implicit logical link
+ to the preceding insn. This is not quite right, since non-jumps can
+ also use the condition code; but in practice such insns would not
+ combine anyway. */
+
+#include "config.h"
+#ifdef __STDC__
+#include <stdarg.h>
+#else
+#include <varargs.h>
+#endif
+
+/* Must precede rtl.h for FFS. */
+#include <stdio.h>
+
+#include "rtl.h"
+#include "flags.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "expr.h"
+#include "basic-block.h"
+#include "insn-config.h"
+#include "insn-flags.h"
+#include "insn-codes.h"
+#include "insn-attr.h"
+#include "recog.h"
+#include "real.h"
+
+/* It is not safe to use ordinary gen_lowpart in combine.
+ Use gen_lowpart_for_combine instead. See comments there. */
+#define gen_lowpart dont_use_gen_lowpart_you_dummy
+
+/* Number of attempts to combine instructions in this function. */
+
+static int combine_attempts;
+
+/* Number of attempts that got as far as substitution in this function. */
+
+static int combine_merges;
+
+/* Number of instructions combined with added SETs in this function. */
+
+static int combine_extras;
+
+/* Number of instructions combined in this function. */
+
+static int combine_successes;
+
+/* Totals over entire compilation. */
+
+static int total_attempts, total_merges, total_extras, total_successes;
+
+/* Define a defulat value for REVERSIBLE_CC_MODE.
+ We can never assume that a condition code mode is safe to reverse unless
+ the md tells us so. */
+#ifndef REVERSIBLE_CC_MODE
+#define REVERSIBLE_CC_MODE(MODE) 0
+#endif
+
+/* Vector mapping INSN_UIDs to cuids.
+ The cuids are like uids but increase monotonically always.
+ Combine always uses cuids so that it can compare them.
+ But actually renumbering the uids, which we used to do,
+ proves to be a bad idea because it makes it hard to compare
+ the dumps produced by earlier passes with those from later passes. */
+
+static int *uid_cuid;
+
+/* Get the cuid of an insn. */
+
+#define INSN_CUID(INSN) (uid_cuid[INSN_UID (INSN)])
+
+/* Maximum register number, which is the size of the tables below. */
+
+static int combine_max_regno;
+
+/* Record last point of death of (hard or pseudo) register n. */
+
+static rtx *reg_last_death;
+
+/* Record last point of modification of (hard or pseudo) register n. */
+
+static rtx *reg_last_set;
+
+/* Record the cuid of the last insn that invalidated memory
+ (anything that writes memory, and subroutine calls, but not pushes). */
+
+static int mem_last_set;
+
+/* Record the cuid of the last CALL_INSN
+ so we can tell whether a potential combination crosses any calls. */
+
+static int last_call_cuid;
+
+/* When `subst' is called, this is the insn that is being modified
+ (by combining in a previous insn). The PATTERN of this insn
+ is still the old pattern partially modified and it should not be
+ looked at, but this may be used to examine the successors of the insn
+ to judge whether a simplification is valid. */
+
+static rtx subst_insn;
+
+/* If nonzero, this is the insn that should be presumed to be
+ immediately in front of `subst_insn'. */
+
+static rtx subst_prev_insn;
+
+/* This is the lowest CUID that `subst' is currently dealing with.
+ get_last_value will not return a value if the register was set at or
+ after this CUID. If not for this mechanism, we could get confused if
+ I2 or I1 in try_combine were an insn that used the old value of a register
+ to obtain a new value. In that case, we might erroneously get the
+ new value of the register when we wanted the old one. */
+
+static int subst_low_cuid;
+
+/* This contains any hard registers that are used in newpat; reg_dead_at_p
+ must consider all these registers to be always live. */
+
+static HARD_REG_SET newpat_used_regs;
+
+/* This is an insn to which a LOG_LINKS entry has been added. If this
+ insn is the earlier than I2 or I3, combine should rescan starting at
+ that location. */
+
+static rtx added_links_insn;
+
+/* This is the value of undobuf.num_undo when we started processing this
+ substitution. This will prevent gen_rtx_combine from re-used a piece
+ from the previous expression. Doing so can produce circular rtl
+ structures. */
+
+static int previous_num_undos;
+
+/* Basic block number of the block in which we are performing combines. */
+static int this_basic_block;
+
+/* The next group of arrays allows the recording of the last value assigned
+ to (hard or pseudo) register n. We use this information to see if a
+ operation being processed is redundant given a prior operation performed
+ on the register. For example, an `and' with a constant is redundant if
+ all the zero bits are already known to be turned off.
+
+ We use an approach similar to that used by cse, but change it in the
+ following ways:
+
+ (1) We do not want to reinitialize at each label.
+ (2) It is useful, but not critical, to know the actual value assigned
+ to a register. Often just its form is helpful.
+
+ Therefore, we maintain the following arrays:
+
+ reg_last_set_value the last value assigned
+ reg_last_set_label records the value of label_tick when the
+ register was assigned
+ reg_last_set_table_tick records the value of label_tick when a
+ value using the register is assigned
+ reg_last_set_invalid set to non-zero when it is not valid
+ to use the value of this register in some
+ register's value
+
+ To understand the usage of these tables, it is important to understand
+ the distinction between the value in reg_last_set_value being valid
+ and the register being validly contained in some other expression in the
+ table.
+
+ Entry I in reg_last_set_value is valid if it is non-zero, and either
+ reg_n_sets[i] is 1 or reg_last_set_label[i] == label_tick.
+
+ Register I may validly appear in any expression returned for the value
+ of another register if reg_n_sets[i] is 1. It may also appear in the
+ value for register J if reg_last_set_label[i] < reg_last_set_label[j] or
+ reg_last_set_invalid[j] is zero.
+
+ If an expression is found in the table containing a register which may
+ not validly appear in an expression, the register is replaced by
+ something that won't match, (clobber (const_int 0)).
+
+ reg_last_set_invalid[i] is set non-zero when register I is being assigned
+ to and reg_last_set_table_tick[i] == label_tick. */
+
+/* Record last value assigned to (hard or pseudo) register n. */
+
+static rtx *reg_last_set_value;
+
+/* Record the value of label_tick when the value for register n is placed in
+ reg_last_set_value[n]. */
+
+static int *reg_last_set_label;
+
+/* Record the value of label_tick when an expression involving register n
+ is placed in reg_last_set_value. */
+
+static int *reg_last_set_table_tick;
+
+/* Set non-zero if references to register n in expressions should not be
+ used. */
+
+static char *reg_last_set_invalid;
+
+/* Incremented for each label. */
+
+static int label_tick;
+
+/* Some registers that are set more than once and used in more than one
+ basic block are nevertheless always set in similar ways. For example,
+ a QImode register may be loaded from memory in two places on a machine
+ where byte loads zero extend.
+
+ We record in the following array what we know about the nonzero
+ bits of a register, specifically which bits are known to be zero.
+
+ If an entry is zero, it means that we don't know anything special. */
+
+static unsigned HOST_WIDE_INT *reg_nonzero_bits;
+
+/* Mode used to compute significance in reg_nonzero_bits. It is the largest
+ integer mode that can fit in HOST_BITS_PER_WIDE_INT. */
+
+static enum machine_mode nonzero_bits_mode;
+
+/* Nonzero if we know that a register has some leading bits that are always
+ equal to the sign bit. */
+
+static char *reg_sign_bit_copies;
+
+/* Nonzero when reg_nonzero_bits and reg_sign_bit_copies can be safely used.
+ It is zero while computing them and after combine has completed. This
+ former test prevents propagating values based on previously set values,
+ which can be incorrect if a variable is modified in a loop. */
+
+static int nonzero_sign_valid;
+
+/* These arrays are maintained in parallel with reg_last_set_value
+ and are used to store the mode in which the register was last set,
+ the bits that were known to be zero when it was last set, and the
+ number of sign bits copies it was known to have when it was last set. */
+
+static enum machine_mode *reg_last_set_mode;
+static unsigned HOST_WIDE_INT *reg_last_set_nonzero_bits;
+static char *reg_last_set_sign_bit_copies;
+
+/* Record one modification to rtl structure
+ to be undone by storing old_contents into *where.
+ is_int is 1 if the contents are an int. */
+
+struct undo
+{
+ int is_int;
+ union {rtx r; int i;} old_contents;
+ union {rtx *r; int *i;} where;
+};
+
+/* Record a bunch of changes to be undone, up to MAX_UNDO of them.
+ num_undo says how many are currently recorded.
+
+ storage is nonzero if we must undo the allocation of new storage.
+ The value of storage is what to pass to obfree.
+
+ other_insn is nonzero if we have modified some other insn in the process
+ of working on subst_insn. It must be verified too. */
+
+#define MAX_UNDO 50
+
+struct undobuf
+{
+ int num_undo;
+ char *storage;
+ struct undo undo[MAX_UNDO];
+ rtx other_insn;
+};
+
+static struct undobuf undobuf;
+
+/* Substitute NEWVAL, an rtx expression, into INTO, a place in some
+ insn. The substitution can be undone by undo_all. If INTO is already
+ set to NEWVAL, do not record this change. Because computing NEWVAL might
+ also call SUBST, we have to compute it before we put anything into
+ the undo table. */
+
+#define SUBST(INTO, NEWVAL) \
+ do { rtx _new = (NEWVAL); \
+ if (undobuf.num_undo < MAX_UNDO) \
+ { \
+ undobuf.undo[undobuf.num_undo].is_int = 0; \
+ undobuf.undo[undobuf.num_undo].where.r = &INTO; \
+ undobuf.undo[undobuf.num_undo].old_contents.r = INTO; \
+ INTO = _new; \
+ if (undobuf.undo[undobuf.num_undo].old_contents.r != INTO) \
+ undobuf.num_undo++; \
+ } \
+ } while (0)
+
+/* Similar to SUBST, but NEWVAL is an int. INTO will normally be an XINT
+ expression.
+ Note that substitution for the value of a CONST_INT is not safe. */
+
+#define SUBST_INT(INTO, NEWVAL) \
+ do { if (undobuf.num_undo < MAX_UNDO) \
+{ \
+ undobuf.undo[undobuf.num_undo].is_int = 1; \
+ undobuf.undo[undobuf.num_undo].where.i = (int *) &INTO; \
+ undobuf.undo[undobuf.num_undo].old_contents.i = INTO; \
+ INTO = NEWVAL; \
+ if (undobuf.undo[undobuf.num_undo].old_contents.i != INTO) \
+ undobuf.num_undo++; \
+ } \
+ } while (0)
+
+/* Number of times the pseudo being substituted for
+ was found and replaced. */
+
+static int n_occurrences;
+
+static void init_reg_last_arrays PROTO(());
+static void setup_incoming_promotions PROTO(());
+static void set_nonzero_bits_and_sign_copies PROTO((rtx, rtx));
+static int can_combine_p PROTO((rtx, rtx, rtx, rtx, rtx *, rtx *));
+static int combinable_i3pat PROTO((rtx, rtx *, rtx, rtx, int, rtx *));
+static rtx try_combine PROTO((rtx, rtx, rtx));
+static void undo_all PROTO((void));
+static rtx *find_split_point PROTO((rtx *, rtx));
+static rtx subst PROTO((rtx, rtx, rtx, int, int));
+static rtx simplify_rtx PROTO((rtx, enum machine_mode, int, int));
+static rtx simplify_if_then_else PROTO((rtx));
+static rtx simplify_set PROTO((rtx));
+static rtx simplify_logical PROTO((rtx, int));
+static rtx expand_compound_operation PROTO((rtx));
+static rtx expand_field_assignment PROTO((rtx));
+static rtx make_extraction PROTO((enum machine_mode, rtx, int, rtx, int,
+ int, int, int));
+static rtx extract_left_shift PROTO((rtx, int));
+static rtx make_compound_operation PROTO((rtx, enum rtx_code));
+static int get_pos_from_mask PROTO((unsigned HOST_WIDE_INT, int *));
+static rtx force_to_mode PROTO((rtx, enum machine_mode,
+ unsigned HOST_WIDE_INT, rtx, int));
+static rtx if_then_else_cond PROTO((rtx, rtx *, rtx *));
+static rtx known_cond PROTO((rtx, enum rtx_code, rtx, rtx));
+static rtx make_field_assignment PROTO((rtx));
+static rtx apply_distributive_law PROTO((rtx));
+static rtx simplify_and_const_int PROTO((rtx, enum machine_mode, rtx,
+ unsigned HOST_WIDE_INT));
+static unsigned HOST_WIDE_INT nonzero_bits PROTO((rtx, enum machine_mode));
+static int num_sign_bit_copies PROTO((rtx, enum machine_mode));
+static int merge_outer_ops PROTO((enum rtx_code *, HOST_WIDE_INT *,
+ enum rtx_code, HOST_WIDE_INT,
+ enum machine_mode, int *));
+static rtx simplify_shift_const PROTO((rtx, enum rtx_code, enum machine_mode,
+ rtx, int));
+static int recog_for_combine PROTO((rtx *, rtx, rtx *));
+static rtx gen_lowpart_for_combine PROTO((enum machine_mode, rtx));
+static rtx gen_rtx_combine PVPROTO((enum rtx_code code, enum machine_mode mode,
+ ...));
+static rtx gen_binary PROTO((enum rtx_code, enum machine_mode,
+ rtx, rtx));
+static rtx gen_unary PROTO((enum rtx_code, enum machine_mode,
+ enum machine_mode, rtx));
+static enum rtx_code simplify_comparison PROTO((enum rtx_code, rtx *, rtx *));
+static int reversible_comparison_p PROTO((rtx));
+static void update_table_tick PROTO((rtx));
+static void record_value_for_reg PROTO((rtx, rtx, rtx));
+static void record_dead_and_set_regs_1 PROTO((rtx, rtx));
+static void record_dead_and_set_regs PROTO((rtx));
+static int get_last_value_validate PROTO((rtx *, int, int));
+static rtx get_last_value PROTO((rtx));
+static int use_crosses_set_p PROTO((rtx, int));
+static void reg_dead_at_p_1 PROTO((rtx, rtx));
+static int reg_dead_at_p PROTO((rtx, rtx));
+static void move_deaths PROTO((rtx, int, rtx, rtx *));
+static int reg_bitfield_target_p PROTO((rtx, rtx));
+static void distribute_notes PROTO((rtx, rtx, rtx, rtx, rtx, rtx));
+static void distribute_links PROTO((rtx));
+static void mark_used_regs_combine PROTO((rtx));
+
+/* Main entry point for combiner. F is the first insn of the function.
+ NREGS is the first unused pseudo-reg number. */
+
+void
+combine_instructions (f, nregs)
+ rtx f;
+ int nregs;
+{
+ register rtx insn, next, prev;
+ register int i;
+ register rtx links, nextlinks;
+
+ combine_attempts = 0;
+ combine_merges = 0;
+ combine_extras = 0;
+ combine_successes = 0;
+ undobuf.num_undo = previous_num_undos = 0;
+
+ combine_max_regno = nregs;
+
+ reg_nonzero_bits
+ = (unsigned HOST_WIDE_INT *) alloca (nregs * sizeof (HOST_WIDE_INT));
+ reg_sign_bit_copies = (char *) alloca (nregs * sizeof (char));
+
+ bzero ((char *) reg_nonzero_bits, nregs * sizeof (HOST_WIDE_INT));
+ bzero (reg_sign_bit_copies, nregs * sizeof (char));
+
+ reg_last_death = (rtx *) alloca (nregs * sizeof (rtx));
+ reg_last_set = (rtx *) alloca (nregs * sizeof (rtx));
+ reg_last_set_value = (rtx *) alloca (nregs * sizeof (rtx));
+ reg_last_set_table_tick = (int *) alloca (nregs * sizeof (int));
+ reg_last_set_label = (int *) alloca (nregs * sizeof (int));
+ reg_last_set_invalid = (char *) alloca (nregs * sizeof (char));
+ reg_last_set_mode
+ = (enum machine_mode *) alloca (nregs * sizeof (enum machine_mode));
+ reg_last_set_nonzero_bits
+ = (unsigned HOST_WIDE_INT *) alloca (nregs * sizeof (HOST_WIDE_INT));
+ reg_last_set_sign_bit_copies
+ = (char *) alloca (nregs * sizeof (char));
+
+ init_reg_last_arrays ();
+
+ init_recog_no_volatile ();
+
+ /* Compute maximum uid value so uid_cuid can be allocated. */
+
+ for (insn = f, i = 0; insn; insn = NEXT_INSN (insn))
+ if (INSN_UID (insn) > i)
+ i = INSN_UID (insn);
+
+ uid_cuid = (int *) alloca ((i + 1) * sizeof (int));
+
+ nonzero_bits_mode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0);
+
+ /* Don't use reg_nonzero_bits when computing it. This can cause problems
+ when, for example, we have j <<= 1 in a loop. */
+
+ nonzero_sign_valid = 0;
+
+ /* Compute the mapping from uids to cuids.
+ Cuids are numbers assigned to insns, like uids,
+ except that cuids increase monotonically through the code.
+
+ Scan all SETs and see if we can deduce anything about what
+ bits are known to be zero for some registers and how many copies
+ of the sign bit are known to exist for those registers.
+
+ Also set any known values so that we can use it while searching
+ for what bits are known to be set. */
+
+ label_tick = 1;
+
+ setup_incoming_promotions ();
+
+ for (insn = f, i = 0; insn; insn = NEXT_INSN (insn))
+ {
+ INSN_CUID (insn) = ++i;
+ subst_low_cuid = i;
+ subst_insn = insn;
+
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ {
+ note_stores (PATTERN (insn), set_nonzero_bits_and_sign_copies);
+ record_dead_and_set_regs (insn);
+ }
+
+ if (GET_CODE (insn) == CODE_LABEL)
+ label_tick++;
+ }
+
+ nonzero_sign_valid = 1;
+
+ /* Now scan all the insns in forward order. */
+
+ this_basic_block = -1;
+ label_tick = 1;
+ last_call_cuid = 0;
+ mem_last_set = 0;
+ init_reg_last_arrays ();
+ setup_incoming_promotions ();
+
+ for (insn = f; insn; insn = next ? next : NEXT_INSN (insn))
+ {
+ next = 0;
+
+ /* If INSN starts a new basic block, update our basic block number. */
+ if (this_basic_block + 1 < n_basic_blocks
+ && basic_block_head[this_basic_block + 1] == insn)
+ this_basic_block++;
+
+ if (GET_CODE (insn) == CODE_LABEL)
+ label_tick++;
+
+ else if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ {
+ /* Try this insn with each insn it links back to. */
+
+ for (links = LOG_LINKS (insn); links; links = XEXP (links, 1))
+ if ((next = try_combine (insn, XEXP (links, 0), NULL_RTX)) != 0)
+ goto retry;
+
+ /* Try each sequence of three linked insns ending with this one. */
+
+ for (links = LOG_LINKS (insn); links; links = XEXP (links, 1))
+ for (nextlinks = LOG_LINKS (XEXP (links, 0)); nextlinks;
+ nextlinks = XEXP (nextlinks, 1))
+ if ((next = try_combine (insn, XEXP (links, 0),
+ XEXP (nextlinks, 0))) != 0)
+ goto retry;
+
+#ifdef HAVE_cc0
+ /* Try to combine a jump insn that uses CC0
+ with a preceding insn that sets CC0, and maybe with its
+ logical predecessor as well.
+ This is how we make decrement-and-branch insns.
+ We need this special code because data flow connections
+ via CC0 do not get entered in LOG_LINKS. */
+
+ if (GET_CODE (insn) == JUMP_INSN
+ && (prev = prev_nonnote_insn (insn)) != 0
+ && GET_CODE (prev) == INSN
+ && sets_cc0_p (PATTERN (prev)))
+ {
+ if ((next = try_combine (insn, prev, NULL_RTX)) != 0)
+ goto retry;
+
+ for (nextlinks = LOG_LINKS (prev); nextlinks;
+ nextlinks = XEXP (nextlinks, 1))
+ if ((next = try_combine (insn, prev,
+ XEXP (nextlinks, 0))) != 0)
+ goto retry;
+ }
+
+ /* Do the same for an insn that explicitly references CC0. */
+ if (GET_CODE (insn) == INSN
+ && (prev = prev_nonnote_insn (insn)) != 0
+ && GET_CODE (prev) == INSN
+ && sets_cc0_p (PATTERN (prev))
+ && GET_CODE (PATTERN (insn)) == SET
+ && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (insn))))
+ {
+ if ((next = try_combine (insn, prev, NULL_RTX)) != 0)
+ goto retry;
+
+ for (nextlinks = LOG_LINKS (prev); nextlinks;
+ nextlinks = XEXP (nextlinks, 1))
+ if ((next = try_combine (insn, prev,
+ XEXP (nextlinks, 0))) != 0)
+ goto retry;
+ }
+
+ /* Finally, see if any of the insns that this insn links to
+ explicitly references CC0. If so, try this insn, that insn,
+ and its predecessor if it sets CC0. */
+ for (links = LOG_LINKS (insn); links; links = XEXP (links, 1))
+ if (GET_CODE (XEXP (links, 0)) == INSN
+ && GET_CODE (PATTERN (XEXP (links, 0))) == SET
+ && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (XEXP (links, 0))))
+ && (prev = prev_nonnote_insn (XEXP (links, 0))) != 0
+ && GET_CODE (prev) == INSN
+ && sets_cc0_p (PATTERN (prev))
+ && (next = try_combine (insn, XEXP (links, 0), prev)) != 0)
+ goto retry;
+#endif
+
+ /* Try combining an insn with two different insns whose results it
+ uses. */
+ for (links = LOG_LINKS (insn); links; links = XEXP (links, 1))
+ for (nextlinks = XEXP (links, 1); nextlinks;
+ nextlinks = XEXP (nextlinks, 1))
+ if ((next = try_combine (insn, XEXP (links, 0),
+ XEXP (nextlinks, 0))) != 0)
+ goto retry;
+
+ if (GET_CODE (insn) != NOTE)
+ record_dead_and_set_regs (insn);
+
+ retry:
+ ;
+ }
+ }
+
+ total_attempts += combine_attempts;
+ total_merges += combine_merges;
+ total_extras += combine_extras;
+ total_successes += combine_successes;
+
+ nonzero_sign_valid = 0;
+}
+
+/* Wipe the reg_last_xxx arrays in preparation for another pass. */
+
+static void
+init_reg_last_arrays ()
+{
+ int nregs = combine_max_regno;
+
+ bzero ((char *) reg_last_death, nregs * sizeof (rtx));
+ bzero ((char *) reg_last_set, nregs * sizeof (rtx));
+ bzero ((char *) reg_last_set_value, nregs * sizeof (rtx));
+ bzero ((char *) reg_last_set_table_tick, nregs * sizeof (int));
+ bzero ((char *) reg_last_set_label, nregs * sizeof (int));
+ bzero (reg_last_set_invalid, nregs * sizeof (char));
+ bzero ((char *) reg_last_set_mode, nregs * sizeof (enum machine_mode));
+ bzero ((char *) reg_last_set_nonzero_bits, nregs * sizeof (HOST_WIDE_INT));
+ bzero (reg_last_set_sign_bit_copies, nregs * sizeof (char));
+}
+
+/* Set up any promoted values for incoming argument registers. */
+
+static void
+setup_incoming_promotions ()
+{
+#ifdef PROMOTE_FUNCTION_ARGS
+ int regno;
+ rtx reg;
+ enum machine_mode mode;
+ int unsignedp;
+ rtx first = get_insns ();
+
+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+ if (FUNCTION_ARG_REGNO_P (regno)
+ && (reg = promoted_input_arg (regno, &mode, &unsignedp)) != 0)
+ record_value_for_reg (reg, first,
+ gen_rtx (unsignedp ? ZERO_EXTEND : SIGN_EXTEND,
+ GET_MODE (reg),
+ gen_rtx (CLOBBER, mode, const0_rtx)));
+#endif
+}
+
+/* Called via note_stores. If X is a pseudo that is used in more than
+ one basic block, is narrower that HOST_BITS_PER_WIDE_INT, and is being
+ set, record what bits are known zero. If we are clobbering X,
+ ignore this "set" because the clobbered value won't be used.
+
+ If we are setting only a portion of X and we can't figure out what
+ portion, assume all bits will be used since we don't know what will
+ be happening.
+
+ Similarly, set how many bits of X are known to be copies of the sign bit
+ at all locations in the function. This is the smallest number implied
+ by any set of X. */
+
+static void
+set_nonzero_bits_and_sign_copies (x, set)
+ rtx x;
+ rtx set;
+{
+ int num;
+
+ if (GET_CODE (x) == REG
+ && REGNO (x) >= FIRST_PSEUDO_REGISTER
+ && reg_n_sets[REGNO (x)] > 1
+ && reg_basic_block[REGNO (x)] < 0
+ /* If this register is undefined at the start of the file, we can't
+ say what its contents were. */
+ && ! (basic_block_live_at_start[0][REGNO (x) / REGSET_ELT_BITS]
+ & ((REGSET_ELT_TYPE) 1 << (REGNO (x) % REGSET_ELT_BITS)))
+ && GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT)
+ {
+ if (GET_CODE (set) == CLOBBER)
+ {
+ reg_nonzero_bits[REGNO (x)] = GET_MODE_MASK (GET_MODE (x));
+ reg_sign_bit_copies[REGNO (x)] = 0;
+ return;
+ }
+
+ /* If this is a complex assignment, see if we can convert it into a
+ simple assignment. */
+ set = expand_field_assignment (set);
+
+ /* If this is a simple assignment, or we have a paradoxical SUBREG,
+ set what we know about X. */
+
+ if (SET_DEST (set) == x
+ || (GET_CODE (SET_DEST (set)) == SUBREG
+ && (GET_MODE_SIZE (GET_MODE (SET_DEST (set)))
+ > GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (set)))))
+ && SUBREG_REG (SET_DEST (set)) == x))
+ {
+ rtx src = SET_SRC (set);
+
+#ifdef SHORT_IMMEDIATES_SIGN_EXTEND
+ /* If X is narrower than a word and SRC is a non-negative
+ constant that would appear negative in the mode of X,
+ sign-extend it for use in reg_nonzero_bits because some
+ machines (maybe most) will actually do the sign-extension
+ and this is the conservative approach.
+
+ ??? For 2.5, try to tighten up the MD files in this regard
+ instead of this kludge. */
+
+ if (GET_MODE_BITSIZE (GET_MODE (x)) < BITS_PER_WORD
+ && GET_CODE (src) == CONST_INT
+ && INTVAL (src) > 0
+ && 0 != (INTVAL (src)
+ & ((HOST_WIDE_INT) 1
+ << (GET_MODE_BITSIZE (GET_MODE (x)) - 1))))
+ src = GEN_INT (INTVAL (src)
+ | ((HOST_WIDE_INT) (-1)
+ << GET_MODE_BITSIZE (GET_MODE (x))));
+#endif
+
+ reg_nonzero_bits[REGNO (x)]
+ |= nonzero_bits (src, nonzero_bits_mode);
+ num = num_sign_bit_copies (SET_SRC (set), GET_MODE (x));
+ if (reg_sign_bit_copies[REGNO (x)] == 0
+ || reg_sign_bit_copies[REGNO (x)] > num)
+ reg_sign_bit_copies[REGNO (x)] = num;
+ }
+ else
+ {
+ reg_nonzero_bits[REGNO (x)] = GET_MODE_MASK (GET_MODE (x));
+ reg_sign_bit_copies[REGNO (x)] = 0;
+ }
+ }
+}
+
+/* See if INSN can be combined into I3. PRED and SUCC are optionally
+ insns that were previously combined into I3 or that will be combined
+ into the merger of INSN and I3.
+
+ Return 0 if the combination is not allowed for any reason.
+
+ If the combination is allowed, *PDEST will be set to the single
+ destination of INSN and *PSRC to the single source, and this function
+ will return 1. */
+
+static int
+can_combine_p (insn, i3, pred, succ, pdest, psrc)
+ rtx insn;
+ rtx i3;
+ rtx pred, succ;
+ rtx *pdest, *psrc;
+{
+ int i;
+ rtx set = 0, src, dest;
+ rtx p, link;
+ int all_adjacent = (succ ? (next_active_insn (insn) == succ
+ && next_active_insn (succ) == i3)
+ : next_active_insn (insn) == i3);
+
+ /* Can combine only if previous insn is a SET of a REG, a SUBREG or CC0.
+ or a PARALLEL consisting of such a SET and CLOBBERs.
+
+ If INSN has CLOBBER parallel parts, ignore them for our processing.
+ By definition, these happen during the execution of the insn. When it
+ is merged with another insn, all bets are off. If they are, in fact,
+ needed and aren't also supplied in I3, they may be added by
+ recog_for_combine. Otherwise, it won't match.
+
+ We can also ignore a SET whose SET_DEST is mentioned in a REG_UNUSED
+ note.
+
+ Get the source and destination of INSN. If more than one, can't
+ combine. */
+
+ if (GET_CODE (PATTERN (insn)) == SET)
+ set = PATTERN (insn);
+ else if (GET_CODE (PATTERN (insn)) == PARALLEL
+ && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET)
+ {
+ for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
+ {
+ rtx elt = XVECEXP (PATTERN (insn), 0, i);
+
+ switch (GET_CODE (elt))
+ {
+ /* We can ignore CLOBBERs. */
+ case CLOBBER:
+ break;
+
+ case SET:
+ /* Ignore SETs whose result isn't used but not those that
+ have side-effects. */
+ if (find_reg_note (insn, REG_UNUSED, SET_DEST (elt))
+ && ! side_effects_p (elt))
+ break;
+
+ /* If we have already found a SET, this is a second one and
+ so we cannot combine with this insn. */
+ if (set)
+ return 0;
+
+ set = elt;
+ break;
+
+ default:
+ /* Anything else means we can't combine. */
+ return 0;
+ }
+ }
+
+ if (set == 0
+ /* If SET_SRC is an ASM_OPERANDS we can't throw away these CLOBBERs,
+ so don't do anything with it. */
+ || GET_CODE (SET_SRC (set)) == ASM_OPERANDS)
+ return 0;
+ }
+ else
+ return 0;
+
+ if (set == 0)
+ return 0;
+
+ set = expand_field_assignment (set);
+ src = SET_SRC (set), dest = SET_DEST (set);
+
+ /* Don't eliminate a store in the stack pointer. */
+ if (dest == stack_pointer_rtx
+ /* If we couldn't eliminate a field assignment, we can't combine. */
+ || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == STRICT_LOW_PART
+ /* Don't combine with an insn that sets a register to itself if it has
+ a REG_EQUAL note. This may be part of a REG_NO_CONFLICT sequence. */
+ || (rtx_equal_p (src, dest) && find_reg_note (insn, REG_EQUAL, NULL_RTX))
+ /* Can't merge a function call. */
+ || GET_CODE (src) == CALL
+ /* Don't eliminate a function call argument. */
+ || (GET_CODE (i3) == CALL_INSN && find_reg_fusage (i3, USE, dest))
+ /* Don't substitute into an incremented register. */
+ || FIND_REG_INC_NOTE (i3, dest)
+ || (succ && FIND_REG_INC_NOTE (succ, dest))
+ /* Don't combine the end of a libcall into anything. */
+ || find_reg_note (insn, REG_RETVAL, NULL_RTX)
+ /* Make sure that DEST is not used after SUCC but before I3. */
+ || (succ && ! all_adjacent
+ && reg_used_between_p (dest, succ, i3))
+ /* Make sure that the value that is to be substituted for the register
+ does not use any registers whose values alter in between. However,
+ If the insns are adjacent, a use can't cross a set even though we
+ think it might (this can happen for a sequence of insns each setting
+ the same destination; reg_last_set of that register might point to
+ a NOTE). If INSN has a REG_EQUIV note, the register is always
+ equivalent to the memory so the substitution is valid even if there
+ are intervening stores. Also, don't move a volatile asm or
+ UNSPEC_VOLATILE across any other insns. */
+ || (! all_adjacent
+ && (((GET_CODE (src) != MEM
+ || ! find_reg_note (insn, REG_EQUIV, src))
+ && use_crosses_set_p (src, INSN_CUID (insn)))
+ || (GET_CODE (src) == ASM_OPERANDS && MEM_VOLATILE_P (src))
+ || GET_CODE (src) == UNSPEC_VOLATILE))
+ /* If there is a REG_NO_CONFLICT note for DEST in I3 or SUCC, we get
+ better register allocation by not doing the combine. */
+ || find_reg_note (i3, REG_NO_CONFLICT, dest)
+ || (succ && find_reg_note (succ, REG_NO_CONFLICT, dest))
+ /* Don't combine across a CALL_INSN, because that would possibly
+ change whether the life span of some REGs crosses calls or not,
+ and it is a pain to update that information.
+ Exception: if source is a constant, moving it later can't hurt.
+ Accept that special case, because it helps -fforce-addr a lot. */
+ || (INSN_CUID (insn) < last_call_cuid && ! CONSTANT_P (src)))
+ return 0;
+
+ /* DEST must either be a REG or CC0. */
+ if (GET_CODE (dest) == REG)
+ {
+ /* If register alignment is being enforced for multi-word items in all
+ cases except for parameters, it is possible to have a register copy
+ insn referencing a hard register that is not allowed to contain the
+ mode being copied and which would not be valid as an operand of most
+ insns. Eliminate this problem by not combining with such an insn.
+
+ Also, on some machines we don't want to extend the life of a hard
+ register. */
+
+ if (GET_CODE (src) == REG
+ && ((REGNO (dest) < FIRST_PSEUDO_REGISTER
+ && ! HARD_REGNO_MODE_OK (REGNO (dest), GET_MODE (dest)))
+ /* Don't extend the life of a hard register unless it is
+ user variable (if we have few registers) or it can't
+ fit into the desired register (meaning something special
+ is going on). */
+ || (REGNO (src) < FIRST_PSEUDO_REGISTER
+ && (! HARD_REGNO_MODE_OK (REGNO (src), GET_MODE (src))
+#ifdef SMALL_REGISTER_CLASSES
+ || ! REG_USERVAR_P (src)
+#endif
+ ))))
+ return 0;
+ }
+ else if (GET_CODE (dest) != CC0)
+ return 0;
+
+ /* Don't substitute for a register intended as a clobberable operand.
+ Similarly, don't substitute an expression containing a register that
+ will be clobbered in I3. */
+ if (GET_CODE (PATTERN (i3)) == PARALLEL)
+ for (i = XVECLEN (PATTERN (i3), 0) - 1; i >= 0; i--)
+ if (GET_CODE (XVECEXP (PATTERN (i3), 0, i)) == CLOBBER
+ && (reg_overlap_mentioned_p (XEXP (XVECEXP (PATTERN (i3), 0, i), 0),
+ src)
+ || rtx_equal_p (XEXP (XVECEXP (PATTERN (i3), 0, i), 0), dest)))
+ return 0;
+
+ /* If INSN contains anything volatile, or is an `asm' (whether volatile
+ or not), reject, unless nothing volatile comes between it and I3,
+ with the exception of SUCC. */
+
+ if (GET_CODE (src) == ASM_OPERANDS || volatile_refs_p (src))
+ for (p = NEXT_INSN (insn); p != i3; p = NEXT_INSN (p))
+ if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
+ && p != succ && volatile_refs_p (PATTERN (p)))
+ return 0;
+
+ /* If there are any volatile insns between INSN and I3, reject, because
+ they might affect machine state. */
+
+ for (p = NEXT_INSN (insn); p != i3; p = NEXT_INSN (p))
+ if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
+ && p != succ && volatile_insn_p (PATTERN (p)))
+ return 0;
+
+ /* If INSN or I2 contains an autoincrement or autodecrement,
+ make sure that register is not used between there and I3,
+ and not already used in I3 either.
+ Also insist that I3 not be a jump; if it were one
+ and the incremented register were spilled, we would lose. */
+
+#ifdef AUTO_INC_DEC
+ for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
+ if (REG_NOTE_KIND (link) == REG_INC
+ && (GET_CODE (i3) == JUMP_INSN
+ || reg_used_between_p (XEXP (link, 0), insn, i3)
+ || reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i3))))
+ return 0;
+#endif
+
+#ifdef HAVE_cc0
+ /* Don't combine an insn that follows a CC0-setting insn.
+ An insn that uses CC0 must not be separated from the one that sets it.
+ We do, however, allow I2 to follow a CC0-setting insn if that insn
+ is passed as I1; in that case it will be deleted also.
+ We also allow combining in this case if all the insns are adjacent
+ because that would leave the two CC0 insns adjacent as well.
+ It would be more logical to test whether CC0 occurs inside I1 or I2,
+ but that would be much slower, and this ought to be equivalent. */
+
+ p = prev_nonnote_insn (insn);
+ if (p && p != pred && GET_CODE (p) == INSN && sets_cc0_p (PATTERN (p))
+ && ! all_adjacent)
+ return 0;
+#endif
+
+ /* If we get here, we have passed all the tests and the combination is
+ to be allowed. */
+
+ *pdest = dest;
+ *psrc = src;
+
+ return 1;
+}
+
+/* LOC is the location within I3 that contains its pattern or the component
+ of a PARALLEL of the pattern. We validate that it is valid for combining.
+
+ One problem is if I3 modifies its output, as opposed to replacing it
+ entirely, we can't allow the output to contain I2DEST or I1DEST as doing
+ so would produce an insn that is not equivalent to the original insns.
+
+ Consider:
+
+ (set (reg:DI 101) (reg:DI 100))
+ (set (subreg:SI (reg:DI 101) 0) <foo>)
+
+ This is NOT equivalent to:
+
+ (parallel [(set (subreg:SI (reg:DI 100) 0) <foo>)
+ (set (reg:DI 101) (reg:DI 100))])
+
+ Not only does this modify 100 (in which case it might still be valid
+ if 100 were dead in I2), it sets 101 to the ORIGINAL value of 100.
+
+ We can also run into a problem if I2 sets a register that I1
+ uses and I1 gets directly substituted into I3 (not via I2). In that
+ case, we would be getting the wrong value of I2DEST into I3, so we
+ must reject the combination. This case occurs when I2 and I1 both
+ feed into I3, rather than when I1 feeds into I2, which feeds into I3.
+ If I1_NOT_IN_SRC is non-zero, it means that finding I1 in the source
+ of a SET must prevent combination from occurring.
+
+ On machines where SMALL_REGISTER_CLASSES is defined, we don't combine
+ if the destination of a SET is a hard register that isn't a user
+ variable.
+
+ Before doing the above check, we first try to expand a field assignment
+ into a set of logical operations.
+
+ If PI3_DEST_KILLED is non-zero, it is a pointer to a location in which
+ we place a register that is both set and used within I3. If more than one
+ such register is detected, we fail.
+
+ Return 1 if the combination is valid, zero otherwise. */
+
+static int
+combinable_i3pat (i3, loc, i2dest, i1dest, i1_not_in_src, pi3dest_killed)
+ rtx i3;
+ rtx *loc;
+ rtx i2dest;
+ rtx i1dest;
+ int i1_not_in_src;
+ rtx *pi3dest_killed;
+{
+ rtx x = *loc;
+
+ if (GET_CODE (x) == SET)
+ {
+ rtx set = expand_field_assignment (x);
+ rtx dest = SET_DEST (set);
+ rtx src = SET_SRC (set);
+ rtx inner_dest = dest, inner_src = src;
+
+ SUBST (*loc, set);
+
+ while (GET_CODE (inner_dest) == STRICT_LOW_PART
+ || GET_CODE (inner_dest) == SUBREG
+ || GET_CODE (inner_dest) == ZERO_EXTRACT)
+ inner_dest = XEXP (inner_dest, 0);
+
+ /* We probably don't need this any more now that LIMIT_RELOAD_CLASS
+ was added. */
+#if 0
+ while (GET_CODE (inner_src) == STRICT_LOW_PART
+ || GET_CODE (inner_src) == SUBREG
+ || GET_CODE (inner_src) == ZERO_EXTRACT)
+ inner_src = XEXP (inner_src, 0);
+
+ /* If it is better that two different modes keep two different pseudos,
+ avoid combining them. This avoids producing the following pattern
+ on a 386:
+ (set (subreg:SI (reg/v:QI 21) 0)
+ (lshiftrt:SI (reg/v:SI 20)
+ (const_int 24)))
+ If that were made, reload could not handle the pair of
+ reg 20/21, since it would try to get any GENERAL_REGS
+ but some of them don't handle QImode. */
+
+ if (rtx_equal_p (inner_src, i2dest)
+ && GET_CODE (inner_dest) == REG
+ && ! MODES_TIEABLE_P (GET_MODE (i2dest), GET_MODE (inner_dest)))
+ return 0;
+#endif
+
+ /* Check for the case where I3 modifies its output, as
+ discussed above. */
+ if ((inner_dest != dest
+ && (reg_overlap_mentioned_p (i2dest, inner_dest)
+ || (i1dest && reg_overlap_mentioned_p (i1dest, inner_dest))))
+ /* This is the same test done in can_combine_p except that we
+ allow a hard register with SMALL_REGISTER_CLASSES if SRC is a
+ CALL operation. */
+ || (GET_CODE (inner_dest) == REG
+ && REGNO (inner_dest) < FIRST_PSEUDO_REGISTER
+ && (! HARD_REGNO_MODE_OK (REGNO (inner_dest),
+ GET_MODE (inner_dest))
+#ifdef SMALL_REGISTER_CLASSES
+ || (GET_CODE (src) != CALL && ! REG_USERVAR_P (inner_dest))
+#endif
+ ))
+ || (i1_not_in_src && reg_overlap_mentioned_p (i1dest, src)))
+ return 0;
+
+ /* If DEST is used in I3, it is being killed in this insn,
+ so record that for later.
+ Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the
+ STACK_POINTER_REGNUM, since these are always considered to be
+ live. Similarly for ARG_POINTER_REGNUM if it is fixed. */
+ if (pi3dest_killed && GET_CODE (dest) == REG
+ && reg_referenced_p (dest, PATTERN (i3))
+ && REGNO (dest) != FRAME_POINTER_REGNUM
+#if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
+ && REGNO (dest) != HARD_FRAME_POINTER_REGNUM
+#endif
+#if ARG_POINTER_REGNUM != FRAME_POINTER_REGNUM
+ && (REGNO (dest) != ARG_POINTER_REGNUM
+ || ! fixed_regs [REGNO (dest)])
+#endif
+ && REGNO (dest) != STACK_POINTER_REGNUM)
+ {
+ if (*pi3dest_killed)
+ return 0;
+
+ *pi3dest_killed = dest;
+ }
+ }
+
+ else if (GET_CODE (x) == PARALLEL)
+ {
+ int i;
+
+ for (i = 0; i < XVECLEN (x, 0); i++)
+ if (! combinable_i3pat (i3, &XVECEXP (x, 0, i), i2dest, i1dest,
+ i1_not_in_src, pi3dest_killed))
+ return 0;
+ }
+
+ return 1;
+}
+
+/* Try to combine the insns I1 and I2 into I3.
+ Here I1 and I2 appear earlier than I3.
+ I1 can be zero; then we combine just I2 into I3.
+
+ It we are combining three insns and the resulting insn is not recognized,
+ try splitting it into two insns. If that happens, I2 and I3 are retained
+ and I1 is pseudo-deleted by turning it into a NOTE. Otherwise, I1 and I2
+ are pseudo-deleted.
+
+ Return 0 if the combination does not work. Then nothing is changed.
+ If we did the combination, return the insn at which combine should
+ resume scanning. */
+
+static rtx
+try_combine (i3, i2, i1)
+ register rtx i3, i2, i1;
+{
+ /* New patterns for I3 and I3, respectively. */
+ rtx newpat, newi2pat = 0;
+ /* Indicates need to preserve SET in I1 or I2 in I3 if it is not dead. */
+ int added_sets_1, added_sets_2;
+ /* Total number of SETs to put into I3. */
+ int total_sets;
+ /* Nonzero is I2's body now appears in I3. */
+ int i2_is_used;
+ /* INSN_CODEs for new I3, new I2, and user of condition code. */
+ int insn_code_number, i2_code_number, other_code_number;
+ /* Contains I3 if the destination of I3 is used in its source, which means
+ that the old life of I3 is being killed. If that usage is placed into
+ I2 and not in I3, a REG_DEAD note must be made. */
+ rtx i3dest_killed = 0;
+ /* SET_DEST and SET_SRC of I2 and I1. */
+ rtx i2dest, i2src, i1dest = 0, i1src = 0;
+ /* PATTERN (I2), or a copy of it in certain cases. */
+ rtx i2pat;
+ /* Indicates if I2DEST or I1DEST is in I2SRC or I1_SRC. */
+ int i2dest_in_i2src = 0, i1dest_in_i1src = 0, i2dest_in_i1src = 0;
+ int i1_feeds_i3 = 0;
+ /* Notes that must be added to REG_NOTES in I3 and I2. */
+ rtx new_i3_notes, new_i2_notes;
+ /* Notes that we substituted I3 into I2 instead of the normal case. */
+ int i3_subst_into_i2 = 0;
+
+ int maxreg;
+ rtx temp;
+ register rtx link;
+ int i;
+
+ /* If any of I1, I2, and I3 isn't really an insn, we can't do anything.
+ This can occur when flow deletes an insn that it has merged into an
+ auto-increment address. We also can't do anything if I3 has a
+ REG_LIBCALL note since we don't want to disrupt the contiguity of a
+ libcall. */
+
+ if (GET_RTX_CLASS (GET_CODE (i3)) != 'i'
+ || GET_RTX_CLASS (GET_CODE (i2)) != 'i'
+ || (i1 && GET_RTX_CLASS (GET_CODE (i1)) != 'i')
+ || find_reg_note (i3, REG_LIBCALL, NULL_RTX))
+ return 0;
+
+ combine_attempts++;
+
+ undobuf.num_undo = previous_num_undos = 0;
+ undobuf.other_insn = 0;
+
+ /* Save the current high-water-mark so we can free storage if we didn't
+ accept this combination. */
+ undobuf.storage = (char *) oballoc (0);
+
+ /* Reset the hard register usage information. */
+ CLEAR_HARD_REG_SET (newpat_used_regs);
+
+ /* If I1 and I2 both feed I3, they can be in any order. To simplify the
+ code below, set I1 to be the earlier of the two insns. */
+ if (i1 && INSN_CUID (i1) > INSN_CUID (i2))
+ temp = i1, i1 = i2, i2 = temp;
+
+ subst_prev_insn = 0;
+ added_links_insn = 0;
+
+ /* First check for one important special-case that the code below will
+ not handle. Namely, the case where I1 is zero, I2 has multiple sets,
+ and I3 is a SET whose SET_SRC is a SET_DEST in I2. In that case,
+ we may be able to replace that destination with the destination of I3.
+ This occurs in the common code where we compute both a quotient and
+ remainder into a structure, in which case we want to do the computation
+ directly into the structure to avoid register-register copies.
+
+ We make very conservative checks below and only try to handle the
+ most common cases of this. For example, we only handle the case
+ where I2 and I3 are adjacent to avoid making difficult register
+ usage tests. */
+
+ if (i1 == 0 && GET_CODE (i3) == INSN && GET_CODE (PATTERN (i3)) == SET
+ && GET_CODE (SET_SRC (PATTERN (i3))) == REG
+ && REGNO (SET_SRC (PATTERN (i3))) >= FIRST_PSEUDO_REGISTER
+#ifdef SMALL_REGISTER_CLASSES
+ && (GET_CODE (SET_DEST (PATTERN (i3))) != REG
+ || REGNO (SET_DEST (PATTERN (i3))) >= FIRST_PSEUDO_REGISTER
+ || REG_USERVAR_P (SET_DEST (PATTERN (i3))))
+#endif
+ && find_reg_note (i3, REG_DEAD, SET_SRC (PATTERN (i3)))
+ && GET_CODE (PATTERN (i2)) == PARALLEL
+ && ! side_effects_p (SET_DEST (PATTERN (i3)))
+ /* If the dest of I3 is a ZERO_EXTRACT or STRICT_LOW_PART, the code
+ below would need to check what is inside (and reg_overlap_mentioned_p
+ doesn't support those codes anyway). Don't allow those destinations;
+ the resulting insn isn't likely to be recognized anyway. */
+ && GET_CODE (SET_DEST (PATTERN (i3))) != ZERO_EXTRACT
+ && GET_CODE (SET_DEST (PATTERN (i3))) != STRICT_LOW_PART
+ && ! reg_overlap_mentioned_p (SET_SRC (PATTERN (i3)),
+ SET_DEST (PATTERN (i3)))
+ && next_real_insn (i2) == i3)
+ {
+ rtx p2 = PATTERN (i2);
+
+ /* Make sure that the destination of I3,
+ which we are going to substitute into one output of I2,
+ is not used within another output of I2. We must avoid making this:
+ (parallel [(set (mem (reg 69)) ...)
+ (set (reg 69) ...)])
+ which is not well-defined as to order of actions.
+ (Besides, reload can't handle output reloads for this.)
+
+ The problem can also happen if the dest of I3 is a memory ref,
+ if another dest in I2 is an indirect memory ref. */
+ for (i = 0; i < XVECLEN (p2, 0); i++)
+ if (GET_CODE (XVECEXP (p2, 0, i)) == SET
+ && reg_overlap_mentioned_p (SET_DEST (PATTERN (i3)),
+ SET_DEST (XVECEXP (p2, 0, i))))
+ break;
+
+ if (i == XVECLEN (p2, 0))
+ for (i = 0; i < XVECLEN (p2, 0); i++)
+ if (SET_DEST (XVECEXP (p2, 0, i)) == SET_SRC (PATTERN (i3)))
+ {
+ combine_merges++;
+
+ subst_insn = i3;
+ subst_low_cuid = INSN_CUID (i2);
+
+ added_sets_2 = added_sets_1 = 0;
+ i2dest = SET_SRC (PATTERN (i3));
+
+ /* Replace the dest in I2 with our dest and make the resulting
+ insn the new pattern for I3. Then skip to where we
+ validate the pattern. Everything was set up above. */
+ SUBST (SET_DEST (XVECEXP (p2, 0, i)),
+ SET_DEST (PATTERN (i3)));
+
+ newpat = p2;
+ i3_subst_into_i2 = 1;
+ goto validate_replacement;
+ }
+ }
+
+#ifndef HAVE_cc0
+ /* If we have no I1 and I2 looks like:
+ (parallel [(set (reg:CC X) (compare:CC OP (const_int 0)))
+ (set Y OP)])
+ make up a dummy I1 that is
+ (set Y OP)
+ and change I2 to be
+ (set (reg:CC X) (compare:CC Y (const_int 0)))
+
+ (We can ignore any trailing CLOBBERs.)
+
+ This undoes a previous combination and allows us to match a branch-and-
+ decrement insn. */
+
+ if (i1 == 0 && GET_CODE (PATTERN (i2)) == PARALLEL
+ && XVECLEN (PATTERN (i2), 0) >= 2
+ && GET_CODE (XVECEXP (PATTERN (i2), 0, 0)) == SET
+ && (GET_MODE_CLASS (GET_MODE (SET_DEST (XVECEXP (PATTERN (i2), 0, 0))))
+ == MODE_CC)
+ && GET_CODE (SET_SRC (XVECEXP (PATTERN (i2), 0, 0))) == COMPARE
+ && XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 1) == const0_rtx
+ && GET_CODE (XVECEXP (PATTERN (i2), 0, 1)) == SET
+ && GET_CODE (SET_DEST (XVECEXP (PATTERN (i2), 0, 1))) == REG
+ && rtx_equal_p (XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 0),
+ SET_SRC (XVECEXP (PATTERN (i2), 0, 1))))
+ {
+ for (i = XVECLEN (PATTERN (i2), 0) - 1; i >= 2; i--)
+ if (GET_CODE (XVECEXP (PATTERN (i2), 0, i)) != CLOBBER)
+ break;
+
+ if (i == 1)
+ {
+ /* We make I1 with the same INSN_UID as I2. This gives it
+ the same INSN_CUID for value tracking. Our fake I1 will
+ never appear in the insn stream so giving it the same INSN_UID
+ as I2 will not cause a problem. */
+
+ subst_prev_insn = i1
+ = gen_rtx (INSN, VOIDmode, INSN_UID (i2), 0, i2,
+ XVECEXP (PATTERN (i2), 0, 1), -1, 0, 0);
+
+ SUBST (PATTERN (i2), XVECEXP (PATTERN (i2), 0, 0));
+ SUBST (XEXP (SET_SRC (PATTERN (i2)), 0),
+ SET_DEST (PATTERN (i1)));
+ }
+ }
+#endif
+
+ /* Verify that I2 and I1 are valid for combining. */
+ if (! can_combine_p (i2, i3, i1, NULL_RTX, &i2dest, &i2src)
+ || (i1 && ! can_combine_p (i1, i3, NULL_RTX, i2, &i1dest, &i1src)))
+ {
+ undo_all ();
+ return 0;
+ }
+
+ /* Record whether I2DEST is used in I2SRC and similarly for the other
+ cases. Knowing this will help in register status updating below. */
+ i2dest_in_i2src = reg_overlap_mentioned_p (i2dest, i2src);
+ i1dest_in_i1src = i1 && reg_overlap_mentioned_p (i1dest, i1src);
+ i2dest_in_i1src = i1 && reg_overlap_mentioned_p (i2dest, i1src);
+
+ /* See if I1 directly feeds into I3. It does if I1DEST is not used
+ in I2SRC. */
+ i1_feeds_i3 = i1 && ! reg_overlap_mentioned_p (i1dest, i2src);
+
+ /* Ensure that I3's pattern can be the destination of combines. */
+ if (! combinable_i3pat (i3, &PATTERN (i3), i2dest, i1dest,
+ i1 && i2dest_in_i1src && i1_feeds_i3,
+ &i3dest_killed))
+ {
+ undo_all ();
+ return 0;
+ }
+
+ /* If I3 has an inc, then give up if I1 or I2 uses the reg that is inc'd.
+ We used to do this EXCEPT in one case: I3 has a post-inc in an
+ output operand. However, that exception can give rise to insns like
+ mov r3,(r3)+
+ which is a famous insn on the PDP-11 where the value of r3 used as the
+ source was model-dependent. Avoid this sort of thing. */
+
+#if 0
+ if (!(GET_CODE (PATTERN (i3)) == SET
+ && GET_CODE (SET_SRC (PATTERN (i3))) == REG
+ && GET_CODE (SET_DEST (PATTERN (i3))) == MEM
+ && (GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_INC
+ || GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_DEC)))
+ /* It's not the exception. */
+#endif
+#ifdef AUTO_INC_DEC
+ for (link = REG_NOTES (i3); link; link = XEXP (link, 1))
+ if (REG_NOTE_KIND (link) == REG_INC
+ && (reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i2))
+ || (i1 != 0
+ && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i1)))))
+ {
+ undo_all ();
+ return 0;
+ }
+#endif
+
+ /* See if the SETs in I1 or I2 need to be kept around in the merged
+ instruction: whenever the value set there is still needed past I3.
+ For the SETs in I2, this is easy: we see if I2DEST dies or is set in I3.
+
+ For the SET in I1, we have two cases: If I1 and I2 independently
+ feed into I3, the set in I1 needs to be kept around if I1DEST dies
+ or is set in I3. Otherwise (if I1 feeds I2 which feeds I3), the set
+ in I1 needs to be kept around unless I1DEST dies or is set in either
+ I2 or I3. We can distinguish these cases by seeing if I2SRC mentions
+ I1DEST. If so, we know I1 feeds into I2. */
+
+ added_sets_2 = ! dead_or_set_p (i3, i2dest);
+
+ added_sets_1
+ = i1 && ! (i1_feeds_i3 ? dead_or_set_p (i3, i1dest)
+ : (dead_or_set_p (i3, i1dest) || dead_or_set_p (i2, i1dest)));
+
+ /* If the set in I2 needs to be kept around, we must make a copy of
+ PATTERN (I2), so that when we substitute I1SRC for I1DEST in
+ PATTERN (I2), we are only substituting for the original I1DEST, not into
+ an already-substituted copy. This also prevents making self-referential
+ rtx. If I2 is a PARALLEL, we just need the piece that assigns I2SRC to
+ I2DEST. */
+
+ i2pat = (GET_CODE (PATTERN (i2)) == PARALLEL
+ ? gen_rtx (SET, VOIDmode, i2dest, i2src)
+ : PATTERN (i2));
+
+ if (added_sets_2)
+ i2pat = copy_rtx (i2pat);
+
+ combine_merges++;
+
+ /* Substitute in the latest insn for the regs set by the earlier ones. */
+
+ maxreg = max_reg_num ();
+
+ subst_insn = i3;
+
+ /* It is possible that the source of I2 or I1 may be performing an
+ unneeded operation, such as a ZERO_EXTEND of something that is known
+ to have the high part zero. Handle that case by letting subst look at
+ the innermost one of them.
+
+ Another way to do this would be to have a function that tries to
+ simplify a single insn instead of merging two or more insns. We don't
+ do this because of the potential of infinite loops and because
+ of the potential extra memory required. However, doing it the way
+ we are is a bit of a kludge and doesn't catch all cases.
+
+ But only do this if -fexpensive-optimizations since it slows things down
+ and doesn't usually win. */
+
+ if (flag_expensive_optimizations)
+ {
+ /* Pass pc_rtx so no substitutions are done, just simplifications.
+ The cases that we are interested in here do not involve the few
+ cases were is_replaced is checked. */
+ if (i1)
+ {
+ subst_low_cuid = INSN_CUID (i1);
+ i1src = subst (i1src, pc_rtx, pc_rtx, 0, 0);
+ }
+ else
+ {
+ subst_low_cuid = INSN_CUID (i2);
+ i2src = subst (i2src, pc_rtx, pc_rtx, 0, 0);
+ }
+
+ previous_num_undos = undobuf.num_undo;
+ }
+
+#ifndef HAVE_cc0
+ /* Many machines that don't use CC0 have insns that can both perform an
+ arithmetic operation and set the condition code. These operations will
+ be represented as a PARALLEL with the first element of the vector
+ being a COMPARE of an arithmetic operation with the constant zero.
+ The second element of the vector will set some pseudo to the result
+ of the same arithmetic operation. If we simplify the COMPARE, we won't
+ match such a pattern and so will generate an extra insn. Here we test
+ for this case, where both the comparison and the operation result are
+ needed, and make the PARALLEL by just replacing I2DEST in I3SRC with
+ I2SRC. Later we will make the PARALLEL that contains I2. */
+
+ if (i1 == 0 && added_sets_2 && GET_CODE (PATTERN (i3)) == SET
+ && GET_CODE (SET_SRC (PATTERN (i3))) == COMPARE
+ && XEXP (SET_SRC (PATTERN (i3)), 1) == const0_rtx
+ && rtx_equal_p (XEXP (SET_SRC (PATTERN (i3)), 0), i2dest))
+ {
+ rtx *cc_use;
+ enum machine_mode compare_mode;
+
+ newpat = PATTERN (i3);
+ SUBST (XEXP (SET_SRC (newpat), 0), i2src);
+
+ i2_is_used = 1;
+
+#ifdef EXTRA_CC_MODES
+ /* See if a COMPARE with the operand we substituted in should be done
+ with the mode that is currently being used. If not, do the same
+ processing we do in `subst' for a SET; namely, if the destination
+ is used only once, try to replace it with a register of the proper
+ mode and also replace the COMPARE. */
+ if (undobuf.other_insn == 0
+ && (cc_use = find_single_use (SET_DEST (newpat), i3,
+ &undobuf.other_insn))
+ && ((compare_mode = SELECT_CC_MODE (GET_CODE (*cc_use),
+ i2src, const0_rtx))
+ != GET_MODE (SET_DEST (newpat))))
+ {
+ int regno = REGNO (SET_DEST (newpat));
+ rtx new_dest = gen_rtx (REG, compare_mode, regno);
+
+ if (regno < FIRST_PSEUDO_REGISTER
+ || (reg_n_sets[regno] == 1 && ! added_sets_2
+ && ! REG_USERVAR_P (SET_DEST (newpat))))
+ {
+ if (regno >= FIRST_PSEUDO_REGISTER)
+ SUBST (regno_reg_rtx[regno], new_dest);
+
+ SUBST (SET_DEST (newpat), new_dest);
+ SUBST (XEXP (*cc_use, 0), new_dest);
+ SUBST (SET_SRC (newpat),
+ gen_rtx_combine (COMPARE, compare_mode,
+ i2src, const0_rtx));
+ }
+ else
+ undobuf.other_insn = 0;
+ }
+#endif
+ }
+ else
+#endif
+ {
+ n_occurrences = 0; /* `subst' counts here */
+
+ /* If I1 feeds into I2 (not into I3) and I1DEST is in I1SRC, we
+ need to make a unique copy of I2SRC each time we substitute it
+ to avoid self-referential rtl. */
+
+ subst_low_cuid = INSN_CUID (i2);
+ newpat = subst (PATTERN (i3), i2dest, i2src, 0,
+ ! i1_feeds_i3 && i1dest_in_i1src);
+ previous_num_undos = undobuf.num_undo;
+
+ /* Record whether i2's body now appears within i3's body. */
+ i2_is_used = n_occurrences;
+ }
+
+ /* If we already got a failure, don't try to do more. Otherwise,
+ try to substitute in I1 if we have it. */
+
+ if (i1 && GET_CODE (newpat) != CLOBBER)
+ {
+ /* Before we can do this substitution, we must redo the test done
+ above (see detailed comments there) that ensures that I1DEST
+ isn't mentioned in any SETs in NEWPAT that are field assignments. */
+
+ if (! combinable_i3pat (NULL_RTX, &newpat, i1dest, NULL_RTX,
+ 0, NULL_PTR))
+ {
+ undo_all ();
+ return 0;
+ }
+
+ n_occurrences = 0;
+ subst_low_cuid = INSN_CUID (i1);
+ newpat = subst (newpat, i1dest, i1src, 0, 0);
+ previous_num_undos = undobuf.num_undo;
+ }
+
+ /* Fail if an autoincrement side-effect has been duplicated. Be careful
+ to count all the ways that I2SRC and I1SRC can be used. */
+ if ((FIND_REG_INC_NOTE (i2, NULL_RTX) != 0
+ && i2_is_used + added_sets_2 > 1)
+ || (i1 != 0 && FIND_REG_INC_NOTE (i1, NULL_RTX) != 0
+ && (n_occurrences + added_sets_1 + (added_sets_2 && ! i1_feeds_i3)
+ > 1))
+ /* Fail if we tried to make a new register (we used to abort, but there's
+ really no reason to). */
+ || max_reg_num () != maxreg
+ /* Fail if we couldn't do something and have a CLOBBER. */
+ || GET_CODE (newpat) == CLOBBER)
+ {
+ undo_all ();
+ return 0;
+ }
+
+ /* If the actions of the earlier insns must be kept
+ in addition to substituting them into the latest one,
+ we must make a new PARALLEL for the latest insn
+ to hold additional the SETs. */
+
+ if (added_sets_1 || added_sets_2)
+ {
+ combine_extras++;
+
+ if (GET_CODE (newpat) == PARALLEL)
+ {
+ rtvec old = XVEC (newpat, 0);
+ total_sets = XVECLEN (newpat, 0) + added_sets_1 + added_sets_2;
+ newpat = gen_rtx (PARALLEL, VOIDmode, rtvec_alloc (total_sets));
+ bcopy ((char *) &old->elem[0], (char *) &XVECEXP (newpat, 0, 0),
+ sizeof (old->elem[0]) * old->num_elem);
+ }
+ else
+ {
+ rtx old = newpat;
+ total_sets = 1 + added_sets_1 + added_sets_2;
+ newpat = gen_rtx (PARALLEL, VOIDmode, rtvec_alloc (total_sets));
+ XVECEXP (newpat, 0, 0) = old;
+ }
+
+ if (added_sets_1)
+ XVECEXP (newpat, 0, --total_sets)
+ = (GET_CODE (PATTERN (i1)) == PARALLEL
+ ? gen_rtx (SET, VOIDmode, i1dest, i1src) : PATTERN (i1));
+
+ if (added_sets_2)
+ {
+ /* If there is no I1, use I2's body as is. We used to also not do
+ the subst call below if I2 was substituted into I3,
+ but that could lose a simplification. */
+ if (i1 == 0)
+ XVECEXP (newpat, 0, --total_sets) = i2pat;
+ else
+ /* See comment where i2pat is assigned. */
+ XVECEXP (newpat, 0, --total_sets)
+ = subst (i2pat, i1dest, i1src, 0, 0);
+ }
+ }
+
+ /* We come here when we are replacing a destination in I2 with the
+ destination of I3. */
+ validate_replacement:
+
+ /* Note which hard regs this insn has as inputs. */
+ mark_used_regs_combine (newpat);
+
+ /* Is the result of combination a valid instruction? */
+ insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
+
+ /* If the result isn't valid, see if it is a PARALLEL of two SETs where
+ the second SET's destination is a register that is unused. In that case,
+ we just need the first SET. This can occur when simplifying a divmod
+ insn. We *must* test for this case here because the code below that
+ splits two independent SETs doesn't handle this case correctly when it
+ updates the register status. Also check the case where the first
+ SET's destination is unused. That would not cause incorrect code, but
+ does cause an unneeded insn to remain. */
+
+ if (insn_code_number < 0 && GET_CODE (newpat) == PARALLEL
+ && XVECLEN (newpat, 0) == 2
+ && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
+ && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
+ && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) == REG
+ && find_reg_note (i3, REG_UNUSED, SET_DEST (XVECEXP (newpat, 0, 1)))
+ && ! side_effects_p (SET_SRC (XVECEXP (newpat, 0, 1)))
+ && asm_noperands (newpat) < 0)
+ {
+ newpat = XVECEXP (newpat, 0, 0);
+ insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
+ }
+
+ else if (insn_code_number < 0 && GET_CODE (newpat) == PARALLEL
+ && XVECLEN (newpat, 0) == 2
+ && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
+ && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
+ && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) == REG
+ && find_reg_note (i3, REG_UNUSED, SET_DEST (XVECEXP (newpat, 0, 0)))
+ && ! side_effects_p (SET_SRC (XVECEXP (newpat, 0, 0)))
+ && asm_noperands (newpat) < 0)
+ {
+ newpat = XVECEXP (newpat, 0, 1);
+ insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
+ }
+
+ /* If we were combining three insns and the result is a simple SET
+ with no ASM_OPERANDS that wasn't recognized, try to split it into two
+ insns. There are two ways to do this. It can be split using a
+ machine-specific method (like when you have an addition of a large
+ constant) or by combine in the function find_split_point. */
+
+ if (i1 && insn_code_number < 0 && GET_CODE (newpat) == SET
+ && asm_noperands (newpat) < 0)
+ {
+ rtx m_split, *split;
+ rtx ni2dest = i2dest;
+
+ /* See if the MD file can split NEWPAT. If it can't, see if letting it
+ use I2DEST as a scratch register will help. In the latter case,
+ convert I2DEST to the mode of the source of NEWPAT if we can. */
+
+ m_split = split_insns (newpat, i3);
+
+ /* We can only use I2DEST as a scratch reg if it doesn't overlap any
+ inputs of NEWPAT. */
+
+ /* ??? If I2DEST is not safe, and I1DEST exists, then it would be
+ possible to try that as a scratch reg. This would require adding
+ more code to make it work though. */
+
+ if (m_split == 0 && ! reg_overlap_mentioned_p (ni2dest, newpat))
+ {
+ /* If I2DEST is a hard register or the only use of a pseudo,
+ we can change its mode. */
+ if (GET_MODE (SET_DEST (newpat)) != GET_MODE (i2dest)
+ && GET_MODE (SET_DEST (newpat)) != VOIDmode
+ && GET_CODE (i2dest) == REG
+ && (REGNO (i2dest) < FIRST_PSEUDO_REGISTER
+ || (reg_n_sets[REGNO (i2dest)] == 1 && ! added_sets_2
+ && ! REG_USERVAR_P (i2dest))))
+ ni2dest = gen_rtx (REG, GET_MODE (SET_DEST (newpat)),
+ REGNO (i2dest));
+
+ m_split = split_insns (gen_rtx (PARALLEL, VOIDmode,
+ gen_rtvec (2, newpat,
+ gen_rtx (CLOBBER,
+ VOIDmode,
+ ni2dest))),
+ i3);
+ }
+
+ if (m_split && GET_CODE (m_split) == SEQUENCE
+ && XVECLEN (m_split, 0) == 2
+ && (next_real_insn (i2) == i3
+ || ! use_crosses_set_p (PATTERN (XVECEXP (m_split, 0, 0)),
+ INSN_CUID (i2))))
+ {
+ rtx i2set, i3set;
+ rtx newi3pat = PATTERN (XVECEXP (m_split, 0, 1));
+ newi2pat = PATTERN (XVECEXP (m_split, 0, 0));
+
+ i3set = single_set (XVECEXP (m_split, 0, 1));
+ i2set = single_set (XVECEXP (m_split, 0, 0));
+
+ /* In case we changed the mode of I2DEST, replace it in the
+ pseudo-register table here. We can't do it above in case this
+ code doesn't get executed and we do a split the other way. */
+
+ if (REGNO (i2dest) >= FIRST_PSEUDO_REGISTER)
+ SUBST (regno_reg_rtx[REGNO (i2dest)], ni2dest);
+
+ i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
+
+ /* If I2 or I3 has multiple SETs, we won't know how to track
+ register status, so don't use these insns. */
+
+ if (i2_code_number >= 0 && i2set && i3set)
+ insn_code_number = recog_for_combine (&newi3pat, i3,
+ &new_i3_notes);
+
+ if (insn_code_number >= 0)
+ newpat = newi3pat;
+
+ /* It is possible that both insns now set the destination of I3.
+ If so, we must show an extra use of it. */
+
+ if (insn_code_number >= 0 && GET_CODE (SET_DEST (i3set)) == REG
+ && GET_CODE (SET_DEST (i2set)) == REG
+ && REGNO (SET_DEST (i3set)) == REGNO (SET_DEST (i2set)))
+ reg_n_sets[REGNO (SET_DEST (i2set))]++;
+ }
+
+ /* If we can split it and use I2DEST, go ahead and see if that
+ helps things be recognized. Verify that none of the registers
+ are set between I2 and I3. */
+ if (insn_code_number < 0 && (split = find_split_point (&newpat, i3)) != 0
+#ifdef HAVE_cc0
+ && GET_CODE (i2dest) == REG
+#endif
+ /* We need I2DEST in the proper mode. If it is a hard register
+ or the only use of a pseudo, we can change its mode. */
+ && (GET_MODE (*split) == GET_MODE (i2dest)
+ || GET_MODE (*split) == VOIDmode
+ || REGNO (i2dest) < FIRST_PSEUDO_REGISTER
+ || (reg_n_sets[REGNO (i2dest)] == 1 && ! added_sets_2
+ && ! REG_USERVAR_P (i2dest)))
+ && (next_real_insn (i2) == i3
+ || ! use_crosses_set_p (*split, INSN_CUID (i2)))
+ /* We can't overwrite I2DEST if its value is still used by
+ NEWPAT. */
+ && ! reg_referenced_p (i2dest, newpat))
+ {
+ rtx newdest = i2dest;
+
+ /* Get NEWDEST as a register in the proper mode. We have already
+ validated that we can do this. */
+ if (GET_MODE (i2dest) != GET_MODE (*split)
+ && GET_MODE (*split) != VOIDmode)
+ {
+ newdest = gen_rtx (REG, GET_MODE (*split), REGNO (i2dest));
+
+ if (REGNO (i2dest) >= FIRST_PSEUDO_REGISTER)
+ SUBST (regno_reg_rtx[REGNO (i2dest)], newdest);
+ }
+
+ /* If *SPLIT is a (mult FOO (const_int pow2)), convert it to
+ an ASHIFT. This can occur if it was inside a PLUS and hence
+ appeared to be a memory address. This is a kludge. */
+ if (GET_CODE (*split) == MULT
+ && GET_CODE (XEXP (*split, 1)) == CONST_INT
+ && (i = exact_log2 (INTVAL (XEXP (*split, 1)))) >= 0)
+ SUBST (*split, gen_rtx_combine (ASHIFT, GET_MODE (*split),
+ XEXP (*split, 0), GEN_INT (i)));
+
+#ifdef INSN_SCHEDULING
+ /* If *SPLIT is a paradoxical SUBREG, when we split it, it should
+ be written as a ZERO_EXTEND. */
+ if (GET_CODE (*split) == SUBREG
+ && GET_CODE (SUBREG_REG (*split)) == MEM)
+ SUBST (*split, gen_rtx_combine (ZERO_EXTEND, GET_MODE (*split),
+ XEXP (*split, 0)));
+#endif
+
+ newi2pat = gen_rtx_combine (SET, VOIDmode, newdest, *split);
+ SUBST (*split, newdest);
+ i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
+ if (i2_code_number >= 0)
+ insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
+ }
+ }
+
+ /* Check for a case where we loaded from memory in a narrow mode and
+ then sign extended it, but we need both registers. In that case,
+ we have a PARALLEL with both loads from the same memory location.
+ We can split this into a load from memory followed by a register-register
+ copy. This saves at least one insn, more if register allocation can
+ eliminate the copy.
+
+ We cannot do this if the destination of the second assignment is
+ a register that we have already assumed is zero-extended. Similarly
+ for a SUBREG of such a register. */
+
+ else if (i1 && insn_code_number < 0 && asm_noperands (newpat) < 0
+ && GET_CODE (newpat) == PARALLEL
+ && XVECLEN (newpat, 0) == 2
+ && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
+ && GET_CODE (SET_SRC (XVECEXP (newpat, 0, 0))) == SIGN_EXTEND
+ && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
+ && rtx_equal_p (SET_SRC (XVECEXP (newpat, 0, 1)),
+ XEXP (SET_SRC (XVECEXP (newpat, 0, 0)), 0))
+ && ! use_crosses_set_p (SET_SRC (XVECEXP (newpat, 0, 1)),
+ INSN_CUID (i2))
+ && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT
+ && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART
+ && ! (temp = SET_DEST (XVECEXP (newpat, 0, 1)),
+ (GET_CODE (temp) == REG
+ && reg_nonzero_bits[REGNO (temp)] != 0
+ && GET_MODE_BITSIZE (GET_MODE (temp)) < BITS_PER_WORD
+ && GET_MODE_BITSIZE (GET_MODE (temp)) < HOST_BITS_PER_INT
+ && (reg_nonzero_bits[REGNO (temp)]
+ != GET_MODE_MASK (word_mode))))
+ && ! (GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) == SUBREG
+ && (temp = SUBREG_REG (SET_DEST (XVECEXP (newpat, 0, 1))),
+ (GET_CODE (temp) == REG
+ && reg_nonzero_bits[REGNO (temp)] != 0
+ && GET_MODE_BITSIZE (GET_MODE (temp)) < BITS_PER_WORD
+ && GET_MODE_BITSIZE (GET_MODE (temp)) < HOST_BITS_PER_INT
+ && (reg_nonzero_bits[REGNO (temp)]
+ != GET_MODE_MASK (word_mode)))))
+ && ! reg_overlap_mentioned_p (SET_DEST (XVECEXP (newpat, 0, 1)),
+ SET_SRC (XVECEXP (newpat, 0, 1)))
+ && ! find_reg_note (i3, REG_UNUSED,
+ SET_DEST (XVECEXP (newpat, 0, 0))))
+ {
+ rtx ni2dest;
+
+ newi2pat = XVECEXP (newpat, 0, 0);
+ ni2dest = SET_DEST (XVECEXP (newpat, 0, 0));
+ newpat = XVECEXP (newpat, 0, 1);
+ SUBST (SET_SRC (newpat),
+ gen_lowpart_for_combine (GET_MODE (SET_SRC (newpat)), ni2dest));
+ i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
+ if (i2_code_number >= 0)
+ insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
+
+ if (insn_code_number >= 0)
+ {
+ rtx insn;
+ rtx link;
+
+ /* If we will be able to accept this, we have made a change to the
+ destination of I3. This can invalidate a LOG_LINKS pointing
+ to I3. No other part of combine.c makes such a transformation.
+
+ The new I3 will have a destination that was previously the
+ destination of I1 or I2 and which was used in i2 or I3. Call
+ distribute_links to make a LOG_LINK from the next use of
+ that destination. */
+
+ PATTERN (i3) = newpat;
+ distribute_links (gen_rtx (INSN_LIST, VOIDmode, i3, NULL_RTX));
+
+ /* I3 now uses what used to be its destination and which is
+ now I2's destination. That means we need a LOG_LINK from
+ I3 to I2. But we used to have one, so we still will.
+
+ However, some later insn might be using I2's dest and have
+ a LOG_LINK pointing at I3. We must remove this link.
+ The simplest way to remove the link is to point it at I1,
+ which we know will be a NOTE. */
+
+ for (insn = NEXT_INSN (i3);
+ insn && (this_basic_block == n_basic_blocks - 1
+ || insn != basic_block_head[this_basic_block + 1]);
+ insn = NEXT_INSN (insn))
+ {
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
+ && reg_referenced_p (ni2dest, PATTERN (insn)))
+ {
+ for (link = LOG_LINKS (insn); link;
+ link = XEXP (link, 1))
+ if (XEXP (link, 0) == i3)
+ XEXP (link, 0) = i1;
+
+ break;
+ }
+ }
+ }
+ }
+
+ /* Similarly, check for a case where we have a PARALLEL of two independent
+ SETs but we started with three insns. In this case, we can do the sets
+ as two separate insns. This case occurs when some SET allows two
+ other insns to combine, but the destination of that SET is still live. */
+
+ else if (i1 && insn_code_number < 0 && asm_noperands (newpat) < 0
+ && GET_CODE (newpat) == PARALLEL
+ && XVECLEN (newpat, 0) == 2
+ && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
+ && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != ZERO_EXTRACT
+ && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != STRICT_LOW_PART
+ && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
+ && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT
+ && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART
+ && ! use_crosses_set_p (SET_SRC (XVECEXP (newpat, 0, 1)),
+ INSN_CUID (i2))
+ /* Don't pass sets with (USE (MEM ...)) dests to the following. */
+ && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != USE
+ && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != USE
+ && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 1)),
+ XVECEXP (newpat, 0, 0))
+ && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 0)),
+ XVECEXP (newpat, 0, 1)))
+ {
+ newi2pat = XVECEXP (newpat, 0, 1);
+ newpat = XVECEXP (newpat, 0, 0);
+
+ i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
+ if (i2_code_number >= 0)
+ insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
+ }
+
+ /* If it still isn't recognized, fail and change things back the way they
+ were. */
+ if ((insn_code_number < 0
+ /* Is the result a reasonable ASM_OPERANDS? */
+ && (! check_asm_operands (newpat) || added_sets_1 || added_sets_2)))
+ {
+ undo_all ();
+ return 0;
+ }
+
+ /* If we had to change another insn, make sure it is valid also. */
+ if (undobuf.other_insn)
+ {
+ rtx other_pat = PATTERN (undobuf.other_insn);
+ rtx new_other_notes;
+ rtx note, next;
+
+ CLEAR_HARD_REG_SET (newpat_used_regs);
+
+ other_code_number = recog_for_combine (&other_pat, undobuf.other_insn,
+ &new_other_notes);
+
+ if (other_code_number < 0 && ! check_asm_operands (other_pat))
+ {
+ undo_all ();
+ return 0;
+ }
+
+ PATTERN (undobuf.other_insn) = other_pat;
+
+ /* If any of the notes in OTHER_INSN were REG_UNUSED, ensure that they
+ are still valid. Then add any non-duplicate notes added by
+ recog_for_combine. */
+ for (note = REG_NOTES (undobuf.other_insn); note; note = next)
+ {
+ next = XEXP (note, 1);
+
+ if (REG_NOTE_KIND (note) == REG_UNUSED
+ && ! reg_set_p (XEXP (note, 0), PATTERN (undobuf.other_insn)))
+ {
+ if (GET_CODE (XEXP (note, 0)) == REG)
+ reg_n_deaths[REGNO (XEXP (note, 0))]--;
+
+ remove_note (undobuf.other_insn, note);
+ }
+ }
+
+ for (note = new_other_notes; note; note = XEXP (note, 1))
+ if (GET_CODE (XEXP (note, 0)) == REG)
+ reg_n_deaths[REGNO (XEXP (note, 0))]++;
+
+ distribute_notes (new_other_notes, undobuf.other_insn,
+ undobuf.other_insn, NULL_RTX, NULL_RTX, NULL_RTX);
+ }
+
+ /* We now know that we can do this combination. Merge the insns and
+ update the status of registers and LOG_LINKS. */
+
+ {
+ rtx i3notes, i2notes, i1notes = 0;
+ rtx i3links, i2links, i1links = 0;
+ rtx midnotes = 0;
+ register int regno;
+ /* Compute which registers we expect to eliminate. */
+ rtx elim_i2 = (newi2pat || i2dest_in_i2src || i2dest_in_i1src
+ ? 0 : i2dest);
+ rtx elim_i1 = i1 == 0 || i1dest_in_i1src ? 0 : i1dest;
+
+ /* Get the old REG_NOTES and LOG_LINKS from all our insns and
+ clear them. */
+ i3notes = REG_NOTES (i3), i3links = LOG_LINKS (i3);
+ i2notes = REG_NOTES (i2), i2links = LOG_LINKS (i2);
+ if (i1)
+ i1notes = REG_NOTES (i1), i1links = LOG_LINKS (i1);
+
+ /* Ensure that we do not have something that should not be shared but
+ occurs multiple times in the new insns. Check this by first
+ resetting all the `used' flags and then copying anything is shared. */
+
+ reset_used_flags (i3notes);
+ reset_used_flags (i2notes);
+ reset_used_flags (i1notes);
+ reset_used_flags (newpat);
+ reset_used_flags (newi2pat);
+ if (undobuf.other_insn)
+ reset_used_flags (PATTERN (undobuf.other_insn));
+
+ i3notes = copy_rtx_if_shared (i3notes);
+ i2notes = copy_rtx_if_shared (i2notes);
+ i1notes = copy_rtx_if_shared (i1notes);
+ newpat = copy_rtx_if_shared (newpat);
+ newi2pat = copy_rtx_if_shared (newi2pat);
+ if (undobuf.other_insn)
+ reset_used_flags (PATTERN (undobuf.other_insn));
+
+ INSN_CODE (i3) = insn_code_number;
+ PATTERN (i3) = newpat;
+ if (undobuf.other_insn)
+ INSN_CODE (undobuf.other_insn) = other_code_number;
+
+ /* We had one special case above where I2 had more than one set and
+ we replaced a destination of one of those sets with the destination
+ of I3. In that case, we have to update LOG_LINKS of insns later
+ in this basic block. Note that this (expensive) case is rare.
+
+ Also, in this case, we must pretend that all REG_NOTEs for I2
+ actually came from I3, so that REG_UNUSED notes from I2 will be
+ properly handled. */
+
+ if (i3_subst_into_i2)
+ {
+ for (i = 0; i < XVECLEN (PATTERN (i2), 0); i++)
+ if (GET_CODE (SET_DEST (XVECEXP (PATTERN (i2), 0, i))) == REG
+ && SET_DEST (XVECEXP (PATTERN (i2), 0, i)) != i2dest
+ && ! find_reg_note (i2, REG_UNUSED,
+ SET_DEST (XVECEXP (PATTERN (i2), 0, i))))
+ for (temp = NEXT_INSN (i2);
+ temp && (this_basic_block == n_basic_blocks - 1
+ || basic_block_head[this_basic_block] != temp);
+ temp = NEXT_INSN (temp))
+ if (temp != i3 && GET_RTX_CLASS (GET_CODE (temp)) == 'i')
+ for (link = LOG_LINKS (temp); link; link = XEXP (link, 1))
+ if (XEXP (link, 0) == i2)
+ XEXP (link, 0) = i3;
+
+ if (i3notes)
+ {
+ rtx link = i3notes;
+ while (XEXP (link, 1))
+ link = XEXP (link, 1);
+ XEXP (link, 1) = i2notes;
+ }
+ else
+ i3notes = i2notes;
+ i2notes = 0;
+ }
+
+ LOG_LINKS (i3) = 0;
+ REG_NOTES (i3) = 0;
+ LOG_LINKS (i2) = 0;
+ REG_NOTES (i2) = 0;
+
+ if (newi2pat)
+ {
+ INSN_CODE (i2) = i2_code_number;
+ PATTERN (i2) = newi2pat;
+ }
+ else
+ {
+ PUT_CODE (i2, NOTE);
+ NOTE_LINE_NUMBER (i2) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (i2) = 0;
+ }
+
+ if (i1)
+ {
+ LOG_LINKS (i1) = 0;
+ REG_NOTES (i1) = 0;
+ PUT_CODE (i1, NOTE);
+ NOTE_LINE_NUMBER (i1) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (i1) = 0;
+ }
+
+ /* Get death notes for everything that is now used in either I3 or
+ I2 and used to die in a previous insn. */
+
+ move_deaths (newpat, i1 ? INSN_CUID (i1) : INSN_CUID (i2), i3, &midnotes);
+ if (newi2pat)
+ move_deaths (newi2pat, INSN_CUID (i1), i2, &midnotes);
+
+ /* Distribute all the LOG_LINKS and REG_NOTES from I1, I2, and I3. */
+ if (i3notes)
+ distribute_notes (i3notes, i3, i3, newi2pat ? i2 : NULL_RTX,
+ elim_i2, elim_i1);
+ if (i2notes)
+ distribute_notes (i2notes, i2, i3, newi2pat ? i2 : NULL_RTX,
+ elim_i2, elim_i1);
+ if (i1notes)
+ distribute_notes (i1notes, i1, i3, newi2pat ? i2 : NULL_RTX,
+ elim_i2, elim_i1);
+ if (midnotes)
+ distribute_notes (midnotes, NULL_RTX, i3, newi2pat ? i2 : NULL_RTX,
+ elim_i2, elim_i1);
+
+ /* Distribute any notes added to I2 or I3 by recog_for_combine. We
+ know these are REG_UNUSED and want them to go to the desired insn,
+ so we always pass it as i3. We have not counted the notes in
+ reg_n_deaths yet, so we need to do so now. */
+
+ if (newi2pat && new_i2_notes)
+ {
+ for (temp = new_i2_notes; temp; temp = XEXP (temp, 1))
+ if (GET_CODE (XEXP (temp, 0)) == REG)
+ reg_n_deaths[REGNO (XEXP (temp, 0))]++;
+
+ distribute_notes (new_i2_notes, i2, i2, NULL_RTX, NULL_RTX, NULL_RTX);
+ }
+
+ if (new_i3_notes)
+ {
+ for (temp = new_i3_notes; temp; temp = XEXP (temp, 1))
+ if (GET_CODE (XEXP (temp, 0)) == REG)
+ reg_n_deaths[REGNO (XEXP (temp, 0))]++;
+
+ distribute_notes (new_i3_notes, i3, i3, NULL_RTX, NULL_RTX, NULL_RTX);
+ }
+
+ /* If I3DEST was used in I3SRC, it really died in I3. We may need to
+ put a REG_DEAD note for it somewhere. Similarly for I2 and I1.
+ Show an additional death due to the REG_DEAD note we make here. If
+ we discard it in distribute_notes, we will decrement it again. */
+
+ if (i3dest_killed)
+ {
+ if (GET_CODE (i3dest_killed) == REG)
+ reg_n_deaths[REGNO (i3dest_killed)]++;
+
+ distribute_notes (gen_rtx (EXPR_LIST, REG_DEAD, i3dest_killed,
+ NULL_RTX),
+ NULL_RTX, i3, newi2pat ? i2 : NULL_RTX,
+ NULL_RTX, NULL_RTX);
+ }
+
+ /* For I2 and I1, we have to be careful. If NEWI2PAT exists and sets
+ I2DEST or I1DEST, the death must be somewhere before I2, not I3. If
+ we passed I3 in that case, it might delete I2. */
+
+ if (i2dest_in_i2src)
+ {
+ if (GET_CODE (i2dest) == REG)
+ reg_n_deaths[REGNO (i2dest)]++;
+
+ if (newi2pat && reg_set_p (i2dest, newi2pat))
+ distribute_notes (gen_rtx (EXPR_LIST, REG_DEAD, i2dest, NULL_RTX),
+ NULL_RTX, i2, NULL_RTX, NULL_RTX, NULL_RTX);
+ else
+ distribute_notes (gen_rtx (EXPR_LIST, REG_DEAD, i2dest, NULL_RTX),
+ NULL_RTX, i3, newi2pat ? i2 : NULL_RTX,
+ NULL_RTX, NULL_RTX);
+ }
+
+ if (i1dest_in_i1src)
+ {
+ if (GET_CODE (i1dest) == REG)
+ reg_n_deaths[REGNO (i1dest)]++;
+
+ if (newi2pat && reg_set_p (i1dest, newi2pat))
+ distribute_notes (gen_rtx (EXPR_LIST, REG_DEAD, i1dest, NULL_RTX),
+ NULL_RTX, i2, NULL_RTX, NULL_RTX, NULL_RTX);
+ else
+ distribute_notes (gen_rtx (EXPR_LIST, REG_DEAD, i1dest, NULL_RTX),
+ NULL_RTX, i3, newi2pat ? i2 : NULL_RTX,
+ NULL_RTX, NULL_RTX);
+ }
+
+ distribute_links (i3links);
+ distribute_links (i2links);
+ distribute_links (i1links);
+
+ if (GET_CODE (i2dest) == REG)
+ {
+ rtx link;
+ rtx i2_insn = 0, i2_val = 0, set;
+
+ /* The insn that used to set this register doesn't exist, and
+ this life of the register may not exist either. See if one of
+ I3's links points to an insn that sets I2DEST. If it does,
+ that is now the last known value for I2DEST. If we don't update
+ this and I2 set the register to a value that depended on its old
+ contents, we will get confused. If this insn is used, thing
+ will be set correctly in combine_instructions. */
+
+ for (link = LOG_LINKS (i3); link; link = XEXP (link, 1))
+ if ((set = single_set (XEXP (link, 0))) != 0
+ && rtx_equal_p (i2dest, SET_DEST (set)))
+ i2_insn = XEXP (link, 0), i2_val = SET_SRC (set);
+
+ record_value_for_reg (i2dest, i2_insn, i2_val);
+
+ /* If the reg formerly set in I2 died only once and that was in I3,
+ zero its use count so it won't make `reload' do any work. */
+ if (! added_sets_2 && newi2pat == 0 && ! i2dest_in_i2src)
+ {
+ regno = REGNO (i2dest);
+ reg_n_sets[regno]--;
+ if (reg_n_sets[regno] == 0
+ && ! (basic_block_live_at_start[0][regno / REGSET_ELT_BITS]
+ & ((REGSET_ELT_TYPE) 1 << (regno % REGSET_ELT_BITS))))
+ reg_n_refs[regno] = 0;
+ }
+ }
+
+ if (i1 && GET_CODE (i1dest) == REG)
+ {
+ rtx link;
+ rtx i1_insn = 0, i1_val = 0, set;
+
+ for (link = LOG_LINKS (i3); link; link = XEXP (link, 1))
+ if ((set = single_set (XEXP (link, 0))) != 0
+ && rtx_equal_p (i1dest, SET_DEST (set)))
+ i1_insn = XEXP (link, 0), i1_val = SET_SRC (set);
+
+ record_value_for_reg (i1dest, i1_insn, i1_val);
+
+ regno = REGNO (i1dest);
+ if (! added_sets_1 && ! i1dest_in_i1src)
+ {
+ reg_n_sets[regno]--;
+ if (reg_n_sets[regno] == 0
+ && ! (basic_block_live_at_start[0][regno / REGSET_ELT_BITS]
+ & ((REGSET_ELT_TYPE) 1 << (regno % REGSET_ELT_BITS))))
+ reg_n_refs[regno] = 0;
+ }
+ }
+
+ /* Update reg_nonzero_bits et al for any changes that may have been made
+ to this insn. */
+
+ note_stores (newpat, set_nonzero_bits_and_sign_copies);
+ if (newi2pat)
+ note_stores (newi2pat, set_nonzero_bits_and_sign_copies);
+
+ /* If I3 is now an unconditional jump, ensure that it has a
+ BARRIER following it since it may have initially been a
+ conditional jump. It may also be the last nonnote insn. */
+
+ if ((GET_CODE (newpat) == RETURN || simplejump_p (i3))
+ && ((temp = next_nonnote_insn (i3)) == NULL_RTX
+ || GET_CODE (temp) != BARRIER))
+ emit_barrier_after (i3);
+ }
+
+ combine_successes++;
+
+ if (added_links_insn
+ && (newi2pat == 0 || INSN_CUID (added_links_insn) < INSN_CUID (i2))
+ && INSN_CUID (added_links_insn) < INSN_CUID (i3))
+ return added_links_insn;
+ else
+ return newi2pat ? i2 : i3;
+}
+
+/* Undo all the modifications recorded in undobuf. */
+
+static void
+undo_all ()
+{
+ register int i;
+ if (undobuf.num_undo > MAX_UNDO)
+ undobuf.num_undo = MAX_UNDO;
+ for (i = undobuf.num_undo - 1; i >= 0; i--)
+ {
+ if (undobuf.undo[i].is_int)
+ *undobuf.undo[i].where.i = undobuf.undo[i].old_contents.i;
+ else
+ *undobuf.undo[i].where.r = undobuf.undo[i].old_contents.r;
+
+ }
+
+ obfree (undobuf.storage);
+ undobuf.num_undo = 0;
+}
+
+/* Find the innermost point within the rtx at LOC, possibly LOC itself,
+ where we have an arithmetic expression and return that point. LOC will
+ be inside INSN.
+
+ try_combine will call this function to see if an insn can be split into
+ two insns. */
+
+static rtx *
+find_split_point (loc, insn)
+ rtx *loc;
+ rtx insn;
+{
+ rtx x = *loc;
+ enum rtx_code code = GET_CODE (x);
+ rtx *split;
+ int len = 0, pos, unsignedp;
+ rtx inner;
+
+ /* First special-case some codes. */
+ switch (code)
+ {
+ case SUBREG:
+#ifdef INSN_SCHEDULING
+ /* If we are making a paradoxical SUBREG invalid, it becomes a split
+ point. */
+ if (GET_CODE (SUBREG_REG (x)) == MEM)
+ return loc;
+#endif
+ return find_split_point (&SUBREG_REG (x), insn);
+
+ case MEM:
+#ifdef HAVE_lo_sum
+ /* If we have (mem (const ..)) or (mem (symbol_ref ...)), split it
+ using LO_SUM and HIGH. */
+ if (GET_CODE (XEXP (x, 0)) == CONST
+ || GET_CODE (XEXP (x, 0)) == SYMBOL_REF)
+ {
+ SUBST (XEXP (x, 0),
+ gen_rtx_combine (LO_SUM, Pmode,
+ gen_rtx_combine (HIGH, Pmode, XEXP (x, 0)),
+ XEXP (x, 0)));
+ return &XEXP (XEXP (x, 0), 0);
+ }
+#endif
+
+ /* If we have a PLUS whose second operand is a constant and the
+ address is not valid, perhaps will can split it up using
+ the machine-specific way to split large constants. We use
+ the first psuedo-reg (one of the virtual regs) as a placeholder;
+ it will not remain in the result. */
+ if (GET_CODE (XEXP (x, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
+ && ! memory_address_p (GET_MODE (x), XEXP (x, 0)))
+ {
+ rtx reg = regno_reg_rtx[FIRST_PSEUDO_REGISTER];
+ rtx seq = split_insns (gen_rtx (SET, VOIDmode, reg, XEXP (x, 0)),
+ subst_insn);
+
+ /* This should have produced two insns, each of which sets our
+ placeholder. If the source of the second is a valid address,
+ we can make put both sources together and make a split point
+ in the middle. */
+
+ if (seq && XVECLEN (seq, 0) == 2
+ && GET_CODE (XVECEXP (seq, 0, 0)) == INSN
+ && GET_CODE (PATTERN (XVECEXP (seq, 0, 0))) == SET
+ && SET_DEST (PATTERN (XVECEXP (seq, 0, 0))) == reg
+ && ! reg_mentioned_p (reg,
+ SET_SRC (PATTERN (XVECEXP (seq, 0, 0))))
+ && GET_CODE (XVECEXP (seq, 0, 1)) == INSN
+ && GET_CODE (PATTERN (XVECEXP (seq, 0, 1))) == SET
+ && SET_DEST (PATTERN (XVECEXP (seq, 0, 1))) == reg
+ && memory_address_p (GET_MODE (x),
+ SET_SRC (PATTERN (XVECEXP (seq, 0, 1)))))
+ {
+ rtx src1 = SET_SRC (PATTERN (XVECEXP (seq, 0, 0)));
+ rtx src2 = SET_SRC (PATTERN (XVECEXP (seq, 0, 1)));
+
+ /* Replace the placeholder in SRC2 with SRC1. If we can
+ find where in SRC2 it was placed, that can become our
+ split point and we can replace this address with SRC2.
+ Just try two obvious places. */
+
+ src2 = replace_rtx (src2, reg, src1);
+ split = 0;
+ if (XEXP (src2, 0) == src1)
+ split = &XEXP (src2, 0);
+ else if (GET_RTX_FORMAT (GET_CODE (XEXP (src2, 0)))[0] == 'e'
+ && XEXP (XEXP (src2, 0), 0) == src1)
+ split = &XEXP (XEXP (src2, 0), 0);
+
+ if (split)
+ {
+ SUBST (XEXP (x, 0), src2);
+ return split;
+ }
+ }
+
+ /* If that didn't work, perhaps the first operand is complex and
+ needs to be computed separately, so make a split point there.
+ This will occur on machines that just support REG + CONST
+ and have a constant moved through some previous computation. */
+
+ else if (GET_RTX_CLASS (GET_CODE (XEXP (XEXP (x, 0), 0))) != 'o'
+ && ! (GET_CODE (XEXP (XEXP (x, 0), 0)) == SUBREG
+ && (GET_RTX_CLASS (GET_CODE (SUBREG_REG (XEXP (XEXP (x, 0), 0))))
+ == 'o')))
+ return &XEXP (XEXP (x, 0), 0);
+ }
+ break;
+
+ case SET:
+#ifdef HAVE_cc0
+ /* If SET_DEST is CC0 and SET_SRC is not an operand, a COMPARE, or a
+ ZERO_EXTRACT, the most likely reason why this doesn't match is that
+ we need to put the operand into a register. So split at that
+ point. */
+
+ if (SET_DEST (x) == cc0_rtx
+ && GET_CODE (SET_SRC (x)) != COMPARE
+ && GET_CODE (SET_SRC (x)) != ZERO_EXTRACT
+ && GET_RTX_CLASS (GET_CODE (SET_SRC (x))) != 'o'
+ && ! (GET_CODE (SET_SRC (x)) == SUBREG
+ && GET_RTX_CLASS (GET_CODE (SUBREG_REG (SET_SRC (x)))) == 'o'))
+ return &SET_SRC (x);
+#endif
+
+ /* See if we can split SET_SRC as it stands. */
+ split = find_split_point (&SET_SRC (x), insn);
+ if (split && split != &SET_SRC (x))
+ return split;
+
+ /* See if this is a bitfield assignment with everything constant. If
+ so, this is an IOR of an AND, so split it into that. */
+ if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
+ && (GET_MODE_BITSIZE (GET_MODE (XEXP (SET_DEST (x), 0)))
+ <= HOST_BITS_PER_WIDE_INT)
+ && GET_CODE (XEXP (SET_DEST (x), 1)) == CONST_INT
+ && GET_CODE (XEXP (SET_DEST (x), 2)) == CONST_INT
+ && GET_CODE (SET_SRC (x)) == CONST_INT
+ && ((INTVAL (XEXP (SET_DEST (x), 1))
+ + INTVAL (XEXP (SET_DEST (x), 2)))
+ <= GET_MODE_BITSIZE (GET_MODE (XEXP (SET_DEST (x), 0))))
+ && ! side_effects_p (XEXP (SET_DEST (x), 0)))
+ {
+ int pos = INTVAL (XEXP (SET_DEST (x), 2));
+ int len = INTVAL (XEXP (SET_DEST (x), 1));
+ int src = INTVAL (SET_SRC (x));
+ rtx dest = XEXP (SET_DEST (x), 0);
+ enum machine_mode mode = GET_MODE (dest);
+ unsigned HOST_WIDE_INT mask = ((HOST_WIDE_INT) 1 << len) - 1;
+
+#if BITS_BIG_ENDIAN
+ pos = GET_MODE_BITSIZE (mode) - len - pos;
+#endif
+
+ if (src == mask)
+ SUBST (SET_SRC (x),
+ gen_binary (IOR, mode, dest, GEN_INT (src << pos)));
+ else
+ SUBST (SET_SRC (x),
+ gen_binary (IOR, mode,
+ gen_binary (AND, mode, dest,
+ GEN_INT (~ (mask << pos)
+ & GET_MODE_MASK (mode))),
+ GEN_INT (src << pos)));
+
+ SUBST (SET_DEST (x), dest);
+
+ split = find_split_point (&SET_SRC (x), insn);
+ if (split && split != &SET_SRC (x))
+ return split;
+ }
+
+ /* Otherwise, see if this is an operation that we can split into two.
+ If so, try to split that. */
+ code = GET_CODE (SET_SRC (x));
+
+ switch (code)
+ {
+ case AND:
+ /* If we are AND'ing with a large constant that is only a single
+ bit and the result is only being used in a context where we
+ need to know if it is zero or non-zero, replace it with a bit
+ extraction. This will avoid the large constant, which might
+ have taken more than one insn to make. If the constant were
+ not a valid argument to the AND but took only one insn to make,
+ this is no worse, but if it took more than one insn, it will
+ be better. */
+
+ if (GET_CODE (XEXP (SET_SRC (x), 1)) == CONST_INT
+ && GET_CODE (XEXP (SET_SRC (x), 0)) == REG
+ && (pos = exact_log2 (INTVAL (XEXP (SET_SRC (x), 1)))) >= 7
+ && GET_CODE (SET_DEST (x)) == REG
+ && (split = find_single_use (SET_DEST (x), insn, NULL_PTR)) != 0
+ && (GET_CODE (*split) == EQ || GET_CODE (*split) == NE)
+ && XEXP (*split, 0) == SET_DEST (x)
+ && XEXP (*split, 1) == const0_rtx)
+ {
+ SUBST (SET_SRC (x),
+ make_extraction (GET_MODE (SET_DEST (x)),
+ XEXP (SET_SRC (x), 0),
+ pos, NULL_RTX, 1, 1, 0, 0));
+ return find_split_point (loc, insn);
+ }
+ break;
+
+ case SIGN_EXTEND:
+ inner = XEXP (SET_SRC (x), 0);
+ pos = 0;
+ len = GET_MODE_BITSIZE (GET_MODE (inner));
+ unsignedp = 0;
+ break;
+
+ case SIGN_EXTRACT:
+ case ZERO_EXTRACT:
+ if (GET_CODE (XEXP (SET_SRC (x), 1)) == CONST_INT
+ && GET_CODE (XEXP (SET_SRC (x), 2)) == CONST_INT)
+ {
+ inner = XEXP (SET_SRC (x), 0);
+ len = INTVAL (XEXP (SET_SRC (x), 1));
+ pos = INTVAL (XEXP (SET_SRC (x), 2));
+
+#if BITS_BIG_ENDIAN
+ pos = GET_MODE_BITSIZE (GET_MODE (inner)) - len - pos;
+#endif
+ unsignedp = (code == ZERO_EXTRACT);
+ }
+ break;
+ }
+
+ if (len && pos >= 0 && pos + len <= GET_MODE_BITSIZE (GET_MODE (inner)))
+ {
+ enum machine_mode mode = GET_MODE (SET_SRC (x));
+
+ /* For unsigned, we have a choice of a shift followed by an
+ AND or two shifts. Use two shifts for field sizes where the
+ constant might be too large. We assume here that we can
+ always at least get 8-bit constants in an AND insn, which is
+ true for every current RISC. */
+
+ if (unsignedp && len <= 8)
+ {
+ SUBST (SET_SRC (x),
+ gen_rtx_combine
+ (AND, mode,
+ gen_rtx_combine (LSHIFTRT, mode,
+ gen_lowpart_for_combine (mode, inner),
+ GEN_INT (pos)),
+ GEN_INT (((HOST_WIDE_INT) 1 << len) - 1)));
+
+ split = find_split_point (&SET_SRC (x), insn);
+ if (split && split != &SET_SRC (x))
+ return split;
+ }
+ else
+ {
+ SUBST (SET_SRC (x),
+ gen_rtx_combine
+ (unsignedp ? LSHIFTRT : ASHIFTRT, mode,
+ gen_rtx_combine (ASHIFT, mode,
+ gen_lowpart_for_combine (mode, inner),
+ GEN_INT (GET_MODE_BITSIZE (mode)
+ - len - pos)),
+ GEN_INT (GET_MODE_BITSIZE (mode) - len)));
+
+ split = find_split_point (&SET_SRC (x), insn);
+ if (split && split != &SET_SRC (x))
+ return split;
+ }
+ }
+
+ /* See if this is a simple operation with a constant as the second
+ operand. It might be that this constant is out of range and hence
+ could be used as a split point. */
+ if ((GET_RTX_CLASS (GET_CODE (SET_SRC (x))) == '2'
+ || GET_RTX_CLASS (GET_CODE (SET_SRC (x))) == 'c'
+ || GET_RTX_CLASS (GET_CODE (SET_SRC (x))) == '<')
+ && CONSTANT_P (XEXP (SET_SRC (x), 1))
+ && (GET_RTX_CLASS (GET_CODE (XEXP (SET_SRC (x), 0))) == 'o'
+ || (GET_CODE (XEXP (SET_SRC (x), 0)) == SUBREG
+ && (GET_RTX_CLASS (GET_CODE (SUBREG_REG (XEXP (SET_SRC (x), 0))))
+ == 'o'))))
+ return &XEXP (SET_SRC (x), 1);
+
+ /* Finally, see if this is a simple operation with its first operand
+ not in a register. The operation might require this operand in a
+ register, so return it as a split point. We can always do this
+ because if the first operand were another operation, we would have
+ already found it as a split point. */
+ if ((GET_RTX_CLASS (GET_CODE (SET_SRC (x))) == '2'
+ || GET_RTX_CLASS (GET_CODE (SET_SRC (x))) == 'c'
+ || GET_RTX_CLASS (GET_CODE (SET_SRC (x))) == '<'
+ || GET_RTX_CLASS (GET_CODE (SET_SRC (x))) == '1')
+ && ! register_operand (XEXP (SET_SRC (x), 0), VOIDmode))
+ return &XEXP (SET_SRC (x), 0);
+
+ return 0;
+
+ case AND:
+ case IOR:
+ /* We write NOR as (and (not A) (not B)), but if we don't have a NOR,
+ it is better to write this as (not (ior A B)) so we can split it.
+ Similarly for IOR. */
+ if (GET_CODE (XEXP (x, 0)) == NOT && GET_CODE (XEXP (x, 1)) == NOT)
+ {
+ SUBST (*loc,
+ gen_rtx_combine (NOT, GET_MODE (x),
+ gen_rtx_combine (code == IOR ? AND : IOR,
+ GET_MODE (x),
+ XEXP (XEXP (x, 0), 0),
+ XEXP (XEXP (x, 1), 0))));
+ return find_split_point (loc, insn);
+ }
+
+ /* Many RISC machines have a large set of logical insns. If the
+ second operand is a NOT, put it first so we will try to split the
+ other operand first. */
+ if (GET_CODE (XEXP (x, 1)) == NOT)
+ {
+ rtx tem = XEXP (x, 0);
+ SUBST (XEXP (x, 0), XEXP (x, 1));
+ SUBST (XEXP (x, 1), tem);
+ }
+ break;
+ }
+
+ /* Otherwise, select our actions depending on our rtx class. */
+ switch (GET_RTX_CLASS (code))
+ {
+ case 'b': /* This is ZERO_EXTRACT and SIGN_EXTRACT. */
+ case '3':
+ split = find_split_point (&XEXP (x, 2), insn);
+ if (split)
+ return split;
+ /* ... fall through ... */
+ case '2':
+ case 'c':
+ case '<':
+ split = find_split_point (&XEXP (x, 1), insn);
+ if (split)
+ return split;
+ /* ... fall through ... */
+ case '1':
+ /* Some machines have (and (shift ...) ...) insns. If X is not
+ an AND, but XEXP (X, 0) is, use it as our split point. */
+ if (GET_CODE (x) != AND && GET_CODE (XEXP (x, 0)) == AND)
+ return &XEXP (x, 0);
+
+ split = find_split_point (&XEXP (x, 0), insn);
+ if (split)
+ return split;
+ return loc;
+ }
+
+ /* Otherwise, we don't have a split point. */
+ return 0;
+}
+
+/* Throughout X, replace FROM with TO, and return the result.
+ The result is TO if X is FROM;
+ otherwise the result is X, but its contents may have been modified.
+ If they were modified, a record was made in undobuf so that
+ undo_all will (among other things) return X to its original state.
+
+ If the number of changes necessary is too much to record to undo,
+ the excess changes are not made, so the result is invalid.
+ The changes already made can still be undone.
+ undobuf.num_undo is incremented for such changes, so by testing that
+ the caller can tell whether the result is valid.
+
+ `n_occurrences' is incremented each time FROM is replaced.
+
+ IN_DEST is non-zero if we are processing the SET_DEST of a SET.
+
+ UNIQUE_COPY is non-zero if each substitution must be unique. We do this
+ by copying if `n_occurrences' is non-zero. */
+
+static rtx
+subst (x, from, to, in_dest, unique_copy)
+ register rtx x, from, to;
+ int in_dest;
+ int unique_copy;
+{
+ register enum rtx_code code = GET_CODE (x);
+ enum machine_mode op0_mode = VOIDmode;
+ register char *fmt;
+ register int len, i;
+ rtx new;
+
+/* Two expressions are equal if they are identical copies of a shared
+ RTX or if they are both registers with the same register number
+ and mode. */
+
+#define COMBINE_RTX_EQUAL_P(X,Y) \
+ ((X) == (Y) \
+ || (GET_CODE (X) == REG && GET_CODE (Y) == REG \
+ && REGNO (X) == REGNO (Y) && GET_MODE (X) == GET_MODE (Y)))
+
+ if (! in_dest && COMBINE_RTX_EQUAL_P (x, from))
+ {
+ n_occurrences++;
+ return (unique_copy && n_occurrences > 1 ? copy_rtx (to) : to);
+ }
+
+ /* If X and FROM are the same register but different modes, they will
+ not have been seen as equal above. However, flow.c will make a
+ LOG_LINKS entry for that case. If we do nothing, we will try to
+ rerecognize our original insn and, when it succeeds, we will
+ delete the feeding insn, which is incorrect.
+
+ So force this insn not to match in this (rare) case. */
+ if (! in_dest && code == REG && GET_CODE (from) == REG
+ && REGNO (x) == REGNO (from))
+ return gen_rtx (CLOBBER, GET_MODE (x), const0_rtx);
+
+ /* If this is an object, we are done unless it is a MEM or LO_SUM, both
+ of which may contain things that can be combined. */
+ if (code != MEM && code != LO_SUM && GET_RTX_CLASS (code) == 'o')
+ return x;
+
+ /* It is possible to have a subexpression appear twice in the insn.
+ Suppose that FROM is a register that appears within TO.
+ Then, after that subexpression has been scanned once by `subst',
+ the second time it is scanned, TO may be found. If we were
+ to scan TO here, we would find FROM within it and create a
+ self-referent rtl structure which is completely wrong. */
+ if (COMBINE_RTX_EQUAL_P (x, to))
+ return to;
+
+ len = GET_RTX_LENGTH (code);
+ fmt = GET_RTX_FORMAT (code);
+
+ /* We don't need to process a SET_DEST that is a register, CC0, or PC, so
+ set up to skip this common case. All other cases where we want to
+ suppress replacing something inside a SET_SRC are handled via the
+ IN_DEST operand. */
+ if (code == SET
+ && (GET_CODE (SET_DEST (x)) == REG
+ || GET_CODE (SET_DEST (x)) == CC0
+ || GET_CODE (SET_DEST (x)) == PC))
+ fmt = "ie";
+
+ /* Get the mode of operand 0 in case X is now a SIGN_EXTEND of a constant. */
+ if (fmt[0] == 'e')
+ op0_mode = GET_MODE (XEXP (x, 0));
+
+ for (i = 0; i < len; i++)
+ {
+ if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ {
+ if (COMBINE_RTX_EQUAL_P (XVECEXP (x, i, j), from))
+ {
+ new = (unique_copy && n_occurrences ? copy_rtx (to) : to);
+ n_occurrences++;
+ }
+ else
+ {
+ new = subst (XVECEXP (x, i, j), from, to, 0, unique_copy);
+
+ /* If this substitution failed, this whole thing fails. */
+ if (GET_CODE (new) == CLOBBER && XEXP (new, 0) == const0_rtx)
+ return new;
+ }
+
+ SUBST (XVECEXP (x, i, j), new);
+ }
+ }
+ else if (fmt[i] == 'e')
+ {
+ if (COMBINE_RTX_EQUAL_P (XEXP (x, i), from))
+ {
+ /* In general, don't install a subreg involving two modes not
+ tieable. It can worsen register allocation, and can even
+ make invalid reload insns, since the reg inside may need to
+ be copied from in the outside mode, and that may be invalid
+ if it is an fp reg copied in integer mode.
+
+ We allow two exceptions to this: It is valid if it is inside
+ another SUBREG and the mode of that SUBREG and the mode of
+ the inside of TO is tieable and it is valid if X is a SET
+ that copies FROM to CC0. */
+ if (GET_CODE (to) == SUBREG
+ && ! MODES_TIEABLE_P (GET_MODE (to),
+ GET_MODE (SUBREG_REG (to)))
+ && ! (code == SUBREG
+ && MODES_TIEABLE_P (GET_MODE (x),
+ GET_MODE (SUBREG_REG (to))))
+#ifdef HAVE_cc0
+ && ! (code == SET && i == 1 && XEXP (x, 0) == cc0_rtx)
+#endif
+ )
+ return gen_rtx (CLOBBER, VOIDmode, const0_rtx);
+
+ new = (unique_copy && n_occurrences ? copy_rtx (to) : to);
+ n_occurrences++;
+ }
+ else
+ /* If we are in a SET_DEST, suppress most cases unless we
+ have gone inside a MEM, in which case we want to
+ simplify the address. We assume here that things that
+ are actually part of the destination have their inner
+ parts in the first expression. This is true for SUBREG,
+ STRICT_LOW_PART, and ZERO_EXTRACT, which are the only
+ things aside from REG and MEM that should appear in a
+ SET_DEST. */
+ new = subst (XEXP (x, i), from, to,
+ (((in_dest
+ && (code == SUBREG || code == STRICT_LOW_PART
+ || code == ZERO_EXTRACT))
+ || code == SET)
+ && i == 0), unique_copy);
+
+ /* If we found that we will have to reject this combination,
+ indicate that by returning the CLOBBER ourselves, rather than
+ an expression containing it. This will speed things up as
+ well as prevent accidents where two CLOBBERs are considered
+ to be equal, thus producing an incorrect simplification. */
+
+ if (GET_CODE (new) == CLOBBER && XEXP (new, 0) == const0_rtx)
+ return new;
+
+ SUBST (XEXP (x, i), new);
+ }
+ }
+
+ /* Try to simplify X. If the simplification changed the code, it is likely
+ that further simplification will help, so loop, but limit the number
+ of repetitions that will be performed. */
+
+ for (i = 0; i < 4; i++)
+ {
+ /* If X is sufficiently simple, don't bother trying to do anything
+ with it. */
+ if (code != CONST_INT && code != REG && code != CLOBBER)
+ x = simplify_rtx (x, op0_mode, i == 3, in_dest);
+
+ if (GET_CODE (x) == code)
+ break;
+
+ code = GET_CODE (x);
+
+ /* We no longer know the original mode of operand 0 since we
+ have changed the form of X) */
+ op0_mode = VOIDmode;
+ }
+
+ return x;
+}
+
+/* Simplify X, a piece of RTL. We just operate on the expression at the
+ outer level; call `subst' to simplify recursively. Return the new
+ expression.
+
+ OP0_MODE is the original mode of XEXP (x, 0); LAST is nonzero if this
+ will be the iteration even if an expression with a code different from
+ X is returned; IN_DEST is nonzero if we are inside a SET_DEST. */
+
+static rtx
+simplify_rtx (x, op0_mode, last, in_dest)
+ rtx x;
+ enum machine_mode op0_mode;
+ int last;
+ int in_dest;
+{
+ enum rtx_code code = GET_CODE (x);
+ enum machine_mode mode = GET_MODE (x);
+ rtx temp;
+ int i;
+
+ /* If this is a commutative operation, put a constant last and a complex
+ expression first. We don't need to do this for comparisons here. */
+ if (GET_RTX_CLASS (code) == 'c'
+ && ((CONSTANT_P (XEXP (x, 0)) && GET_CODE (XEXP (x, 1)) != CONST_INT)
+ || (GET_RTX_CLASS (GET_CODE (XEXP (x, 0))) == 'o'
+ && GET_RTX_CLASS (GET_CODE (XEXP (x, 1))) != 'o')
+ || (GET_CODE (XEXP (x, 0)) == SUBREG
+ && GET_RTX_CLASS (GET_CODE (SUBREG_REG (XEXP (x, 0)))) == 'o'
+ && GET_RTX_CLASS (GET_CODE (XEXP (x, 1))) != 'o')))
+ {
+ temp = XEXP (x, 0);
+ SUBST (XEXP (x, 0), XEXP (x, 1));
+ SUBST (XEXP (x, 1), temp);
+ }
+
+ /* If this is a PLUS, MINUS, or MULT, and the first operand is the
+ sign extension of a PLUS with a constant, reverse the order of the sign
+ extension and the addition. Note that this not the same as the original
+ code, but overflow is undefined for signed values. Also note that the
+ PLUS will have been partially moved "inside" the sign-extension, so that
+ the first operand of X will really look like:
+ (ashiftrt (plus (ashift A C4) C5) C4).
+ We convert this to
+ (plus (ashiftrt (ashift A C4) C2) C4)
+ and replace the first operand of X with that expression. Later parts
+ of this function may simplify the expression further.
+
+ For example, if we start with (mult (sign_extend (plus A C1)) C2),
+ we swap the SIGN_EXTEND and PLUS. Later code will apply the
+ distributive law to produce (plus (mult (sign_extend X) C1) C3).
+
+ We do this to simplify address expressions. */
+
+ if ((code == PLUS || code == MINUS || code == MULT)
+ && GET_CODE (XEXP (x, 0)) == ASHIFTRT
+ && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == ASHIFT
+ && GET_CODE (XEXP (XEXP (XEXP (XEXP (x, 0), 0), 0), 1)) == CONST_INT
+ && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
+ && XEXP (XEXP (XEXP (XEXP (x, 0), 0), 0), 1) == XEXP (XEXP (x, 0), 1)
+ && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == CONST_INT
+ && (temp = simplify_binary_operation (ASHIFTRT, mode,
+ XEXP (XEXP (XEXP (x, 0), 0), 1),
+ XEXP (XEXP (x, 0), 1))) != 0)
+ {
+ rtx new
+ = simplify_shift_const (NULL_RTX, ASHIFT, mode,
+ XEXP (XEXP (XEXP (XEXP (x, 0), 0), 0), 0),
+ INTVAL (XEXP (XEXP (x, 0), 1)));
+
+ new = simplify_shift_const (NULL_RTX, ASHIFTRT, mode, new,
+ INTVAL (XEXP (XEXP (x, 0), 1)));
+
+ SUBST (XEXP (x, 0), gen_binary (PLUS, mode, new, temp));
+ }
+
+ /* If this is a simple operation applied to an IF_THEN_ELSE, try
+ applying it to the arms of the IF_THEN_ELSE. This often simplifies
+ things. Check for cases where both arms are testing the same
+ condition.
+
+ Don't do anything if all operands are very simple. */
+
+ if (((GET_RTX_CLASS (code) == '2' || GET_RTX_CLASS (code) == 'c'
+ || GET_RTX_CLASS (code) == '<')
+ && ((GET_RTX_CLASS (GET_CODE (XEXP (x, 0))) != 'o'
+ && ! (GET_CODE (XEXP (x, 0)) == SUBREG
+ && (GET_RTX_CLASS (GET_CODE (SUBREG_REG (XEXP (x, 0))))
+ == 'o')))
+ || (GET_RTX_CLASS (GET_CODE (XEXP (x, 1))) != 'o'
+ && ! (GET_CODE (XEXP (x, 1)) == SUBREG
+ && (GET_RTX_CLASS (GET_CODE (SUBREG_REG (XEXP (x, 1))))
+ == 'o')))))
+ || (GET_RTX_CLASS (code) == '1'
+ && ((GET_RTX_CLASS (GET_CODE (XEXP (x, 0))) != 'o'
+ && ! (GET_CODE (XEXP (x, 0)) == SUBREG
+ && (GET_RTX_CLASS (GET_CODE (SUBREG_REG (XEXP (x, 0))))
+ == 'o'))))))
+ {
+ rtx cond, true, false;
+
+ cond = if_then_else_cond (x, &true, &false);
+ if (cond != 0)
+ {
+ rtx cop1 = const0_rtx;
+ enum rtx_code cond_code = simplify_comparison (NE, &cond, &cop1);
+
+ /* Simplify the alternative arms; this may collapse the true and
+ false arms to store-flag values. */
+ true = subst (true, pc_rtx, pc_rtx, 0, 0);
+ false = subst (false, pc_rtx, pc_rtx, 0, 0);
+
+ /* Restarting if we generate a store-flag expression will cause
+ us to loop. Just drop through in this case. */
+
+ /* If the result values are STORE_FLAG_VALUE and zero, we can
+ just make the comparison operation. */
+ if (true == const_true_rtx && false == const0_rtx)
+ x = gen_binary (cond_code, mode, cond, cop1);
+ else if (true == const0_rtx && false == const_true_rtx)
+ x = gen_binary (reverse_condition (cond_code), mode, cond, cop1);
+
+ /* Likewise, we can make the negate of a comparison operation
+ if the result values are - STORE_FLAG_VALUE and zero. */
+ else if (GET_CODE (true) == CONST_INT
+ && INTVAL (true) == - STORE_FLAG_VALUE
+ && false == const0_rtx)
+ x = gen_unary (NEG, mode, mode,
+ gen_binary (cond_code, mode, cond, cop1));
+ else if (GET_CODE (false) == CONST_INT
+ && INTVAL (false) == - STORE_FLAG_VALUE
+ && true == const0_rtx)
+ x = gen_unary (NEG, mode, mode,
+ gen_binary (reverse_condition (cond_code),
+ mode, cond, cop1));
+ else
+ return gen_rtx (IF_THEN_ELSE, mode,
+ gen_binary (cond_code, VOIDmode, cond, cop1),
+ true, false);
+
+ code = GET_CODE (x);
+ op0_mode = VOIDmode;
+ }
+ }
+
+ /* Try to fold this expression in case we have constants that weren't
+ present before. */
+ temp = 0;
+ switch (GET_RTX_CLASS (code))
+ {
+ case '1':
+ temp = simplify_unary_operation (code, mode, XEXP (x, 0), op0_mode);
+ break;
+ case '<':
+ temp = simplify_relational_operation (code, op0_mode,
+ XEXP (x, 0), XEXP (x, 1));
+#ifdef FLOAT_STORE_FLAG_VALUE
+ if (temp != 0 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
+ temp = ((temp == const0_rtx) ? CONST0_RTX (GET_MODE (x))
+ : immed_real_const_1 (FLOAT_STORE_FLAG_VALUE, GET_MODE (x)));
+#endif
+ break;
+ case 'c':
+ case '2':
+ temp = simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
+ break;
+ case 'b':
+ case '3':
+ temp = simplify_ternary_operation (code, mode, op0_mode, XEXP (x, 0),
+ XEXP (x, 1), XEXP (x, 2));
+ break;
+ }
+
+ if (temp)
+ x = temp, code = GET_CODE (temp);
+
+ /* First see if we can apply the inverse distributive law. */
+ if (code == PLUS || code == MINUS
+ || code == AND || code == IOR || code == XOR)
+ {
+ x = apply_distributive_law (x);
+ code = GET_CODE (x);
+ }
+
+ /* If CODE is an associative operation not otherwise handled, see if we
+ can associate some operands. This can win if they are constants or
+ if they are logically related (i.e. (a & b) & a. */
+ if ((code == PLUS || code == MINUS
+ || code == MULT || code == AND || code == IOR || code == XOR
+ || code == DIV || code == UDIV
+ || code == SMAX || code == SMIN || code == UMAX || code == UMIN)
+ && INTEGRAL_MODE_P (mode))
+ {
+ if (GET_CODE (XEXP (x, 0)) == code)
+ {
+ rtx other = XEXP (XEXP (x, 0), 0);
+ rtx inner_op0 = XEXP (XEXP (x, 0), 1);
+ rtx inner_op1 = XEXP (x, 1);
+ rtx inner;
+
+ /* Make sure we pass the constant operand if any as the second
+ one if this is a commutative operation. */
+ if (CONSTANT_P (inner_op0) && GET_RTX_CLASS (code) == 'c')
+ {
+ rtx tem = inner_op0;
+ inner_op0 = inner_op1;
+ inner_op1 = tem;
+ }
+ inner = simplify_binary_operation (code == MINUS ? PLUS
+ : code == DIV ? MULT
+ : code == UDIV ? MULT
+ : code,
+ mode, inner_op0, inner_op1);
+
+ /* For commutative operations, try the other pair if that one
+ didn't simplify. */
+ if (inner == 0 && GET_RTX_CLASS (code) == 'c')
+ {
+ other = XEXP (XEXP (x, 0), 1);
+ inner = simplify_binary_operation (code, mode,
+ XEXP (XEXP (x, 0), 0),
+ XEXP (x, 1));
+ }
+
+ if (inner)
+ return gen_binary (code, mode, other, inner);
+ }
+ }
+
+ /* A little bit of algebraic simplification here. */
+ switch (code)
+ {
+ case MEM:
+ /* Ensure that our address has any ASHIFTs converted to MULT in case
+ address-recognizing predicates are called later. */
+ temp = make_compound_operation (XEXP (x, 0), MEM);
+ SUBST (XEXP (x, 0), temp);
+ break;
+
+ case SUBREG:
+ /* (subreg:A (mem:B X) N) becomes a modified MEM unless the SUBREG
+ is paradoxical. If we can't do that safely, then it becomes
+ something nonsensical so that this combination won't take place. */
+
+ if (GET_CODE (SUBREG_REG (x)) == MEM
+ && (GET_MODE_SIZE (mode)
+ <= GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
+ {
+ rtx inner = SUBREG_REG (x);
+ int endian_offset = 0;
+ /* Don't change the mode of the MEM
+ if that would change the meaning of the address. */
+ if (MEM_VOLATILE_P (SUBREG_REG (x))
+ || mode_dependent_address_p (XEXP (inner, 0)))
+ return gen_rtx (CLOBBER, mode, const0_rtx);
+
+#if BYTES_BIG_ENDIAN
+ if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
+ endian_offset += UNITS_PER_WORD - GET_MODE_SIZE (mode);
+ if (GET_MODE_SIZE (GET_MODE (inner)) < UNITS_PER_WORD)
+ endian_offset -= UNITS_PER_WORD - GET_MODE_SIZE (GET_MODE (inner));
+#endif
+ /* Note if the plus_constant doesn't make a valid address
+ then this combination won't be accepted. */
+ x = gen_rtx (MEM, mode,
+ plus_constant (XEXP (inner, 0),
+ (SUBREG_WORD (x) * UNITS_PER_WORD
+ + endian_offset)));
+ MEM_VOLATILE_P (x) = MEM_VOLATILE_P (inner);
+ RTX_UNCHANGING_P (x) = RTX_UNCHANGING_P (inner);
+ MEM_IN_STRUCT_P (x) = MEM_IN_STRUCT_P (inner);
+ return x;
+ }
+
+ /* If we are in a SET_DEST, these other cases can't apply. */
+ if (in_dest)
+ return x;
+
+ /* Changing mode twice with SUBREG => just change it once,
+ or not at all if changing back to starting mode. */
+ if (GET_CODE (SUBREG_REG (x)) == SUBREG)
+ {
+ if (mode == GET_MODE (SUBREG_REG (SUBREG_REG (x)))
+ && SUBREG_WORD (x) == 0 && SUBREG_WORD (SUBREG_REG (x)) == 0)
+ return SUBREG_REG (SUBREG_REG (x));
+
+ SUBST_INT (SUBREG_WORD (x),
+ SUBREG_WORD (x) + SUBREG_WORD (SUBREG_REG (x)));
+ SUBST (SUBREG_REG (x), SUBREG_REG (SUBREG_REG (x)));
+ }
+
+ /* SUBREG of a hard register => just change the register number
+ and/or mode. If the hard register is not valid in that mode,
+ suppress this combination. If the hard register is the stack,
+ frame, or argument pointer, leave this as a SUBREG. */
+
+ if (GET_CODE (SUBREG_REG (x)) == REG
+ && REGNO (SUBREG_REG (x)) < FIRST_PSEUDO_REGISTER
+ && REGNO (SUBREG_REG (x)) != FRAME_POINTER_REGNUM
+#if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
+ && REGNO (SUBREG_REG (x)) != HARD_FRAME_POINTER_REGNUM
+#endif
+#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
+ && REGNO (SUBREG_REG (x)) != ARG_POINTER_REGNUM
+#endif
+ && REGNO (SUBREG_REG (x)) != STACK_POINTER_REGNUM)
+ {
+ if (HARD_REGNO_MODE_OK (REGNO (SUBREG_REG (x)) + SUBREG_WORD (x),
+ mode))
+ return gen_rtx (REG, mode,
+ REGNO (SUBREG_REG (x)) + SUBREG_WORD (x));
+ else
+ return gen_rtx (CLOBBER, mode, const0_rtx);
+ }
+
+ /* For a constant, try to pick up the part we want. Handle a full
+ word and low-order part. Only do this if we are narrowing
+ the constant; if it is being widened, we have no idea what
+ the extra bits will have been set to. */
+
+ if (CONSTANT_P (SUBREG_REG (x)) && op0_mode != VOIDmode
+ && GET_MODE_SIZE (mode) == UNITS_PER_WORD
+ && GET_MODE_SIZE (op0_mode) < UNITS_PER_WORD
+ && GET_MODE_CLASS (mode) == MODE_INT)
+ {
+ temp = operand_subword (SUBREG_REG (x), SUBREG_WORD (x),
+ 0, op0_mode);
+ if (temp)
+ return temp;
+ }
+
+ /* If we want a subreg of a constant, at offset 0,
+ take the low bits. On a little-endian machine, that's
+ always valid. On a big-endian machine, it's valid
+ only if the constant's mode fits in one word. */
+ if (CONSTANT_P (SUBREG_REG (x)) && subreg_lowpart_p (x)
+ && GET_MODE_SIZE (mode) < GET_MODE_SIZE (op0_mode)
+#if WORDS_BIG_ENDIAN
+ && GET_MODE_BITSIZE (op0_mode) <= BITS_PER_WORD
+#endif
+ )
+ return gen_lowpart_for_combine (mode, SUBREG_REG (x));
+
+ /* A paradoxical SUBREG of a VOIDmode constant is the same constant,
+ since we are saying that the high bits don't matter. */
+ if (CONSTANT_P (SUBREG_REG (x)) && GET_MODE (SUBREG_REG (x)) == VOIDmode
+ && GET_MODE_SIZE (mode) > GET_MODE_SIZE (op0_mode))
+ return SUBREG_REG (x);
+
+ /* Note that we cannot do any narrowing for non-constants since
+ we might have been counting on using the fact that some bits were
+ zero. We now do this in the SET. */
+
+ break;
+
+ case NOT:
+ /* (not (plus X -1)) can become (neg X). */
+ if (GET_CODE (XEXP (x, 0)) == PLUS
+ && XEXP (XEXP (x, 0), 1) == constm1_rtx)
+ return gen_rtx_combine (NEG, mode, XEXP (XEXP (x, 0), 0));
+
+ /* Similarly, (not (neg X)) is (plus X -1). */
+ if (GET_CODE (XEXP (x, 0)) == NEG)
+ return gen_rtx_combine (PLUS, mode, XEXP (XEXP (x, 0), 0),
+ constm1_rtx);
+
+ /* (not (xor X C)) for C constant is (xor X D) with D = ~ C. */
+ if (GET_CODE (XEXP (x, 0)) == XOR
+ && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
+ && (temp = simplify_unary_operation (NOT, mode,
+ XEXP (XEXP (x, 0), 1),
+ mode)) != 0)
+ {
+ SUBST (XEXP (XEXP (x, 0), 1), temp);
+ return XEXP (x, 0);
+ }
+
+ /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for operands
+ other than 1, but that is not valid. We could do a similar
+ simplification for (not (lshiftrt C X)) where C is just the sign bit,
+ but this doesn't seem common enough to bother with. */
+ if (GET_CODE (XEXP (x, 0)) == ASHIFT
+ && XEXP (XEXP (x, 0), 0) == const1_rtx)
+ return gen_rtx (ROTATE, mode, gen_unary (NOT, mode, mode, const1_rtx),
+ XEXP (XEXP (x, 0), 1));
+
+ if (GET_CODE (XEXP (x, 0)) == SUBREG
+ && subreg_lowpart_p (XEXP (x, 0))
+ && (GET_MODE_SIZE (GET_MODE (XEXP (x, 0)))
+ < GET_MODE_SIZE (GET_MODE (SUBREG_REG (XEXP (x, 0)))))
+ && GET_CODE (SUBREG_REG (XEXP (x, 0))) == ASHIFT
+ && XEXP (SUBREG_REG (XEXP (x, 0)), 0) == const1_rtx)
+ {
+ enum machine_mode inner_mode = GET_MODE (SUBREG_REG (XEXP (x, 0)));
+
+ x = gen_rtx (ROTATE, inner_mode,
+ gen_unary (NOT, inner_mode, inner_mode, const1_rtx),
+ XEXP (SUBREG_REG (XEXP (x, 0)), 1));
+ return gen_lowpart_for_combine (mode, x);
+ }
+
+#if STORE_FLAG_VALUE == -1
+ /* (not (comparison foo bar)) can be done by reversing the comparison
+ code if valid. */
+ if (GET_RTX_CLASS (GET_CODE (XEXP (x, 0))) == '<'
+ && reversible_comparison_p (XEXP (x, 0)))
+ return gen_rtx_combine (reverse_condition (GET_CODE (XEXP (x, 0))),
+ mode, XEXP (XEXP (x, 0), 0),
+ XEXP (XEXP (x, 0), 1));
+
+ /* (ashiftrt foo C) where C is the number of bits in FOO minus 1
+ is (lt foo (const_int 0)), so we can perform the above
+ simplification. */
+
+ if (XEXP (x, 1) == const1_rtx
+ && GET_CODE (XEXP (x, 0)) == ASHIFTRT
+ && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
+ && INTVAL (XEXP (XEXP (x, 0), 1)) == GET_MODE_BITSIZE (mode) - 1)
+ return gen_rtx_combine (GE, mode, XEXP (XEXP (x, 0), 0), const0_rtx);
+#endif
+
+ /* Apply De Morgan's laws to reduce number of patterns for machines
+ with negating logical insns (and-not, nand, etc.). If result has
+ only one NOT, put it first, since that is how the patterns are
+ coded. */
+
+ if (GET_CODE (XEXP (x, 0)) == IOR || GET_CODE (XEXP (x, 0)) == AND)
+ {
+ rtx in1 = XEXP (XEXP (x, 0), 0), in2 = XEXP (XEXP (x, 0), 1);
+
+ if (GET_CODE (in1) == NOT)
+ in1 = XEXP (in1, 0);
+ else
+ in1 = gen_rtx_combine (NOT, GET_MODE (in1), in1);
+
+ if (GET_CODE (in2) == NOT)
+ in2 = XEXP (in2, 0);
+ else if (GET_CODE (in2) == CONST_INT
+ && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
+ in2 = GEN_INT (GET_MODE_MASK (mode) & ~ INTVAL (in2));
+ else
+ in2 = gen_rtx_combine (NOT, GET_MODE (in2), in2);
+
+ if (GET_CODE (in2) == NOT)
+ {
+ rtx tem = in2;
+ in2 = in1; in1 = tem;
+ }
+
+ return gen_rtx_combine (GET_CODE (XEXP (x, 0)) == IOR ? AND : IOR,
+ mode, in1, in2);
+ }
+ break;
+
+ case NEG:
+ /* (neg (plus X 1)) can become (not X). */
+ if (GET_CODE (XEXP (x, 0)) == PLUS
+ && XEXP (XEXP (x, 0), 1) == const1_rtx)
+ return gen_rtx_combine (NOT, mode, XEXP (XEXP (x, 0), 0));
+
+ /* Similarly, (neg (not X)) is (plus X 1). */
+ if (GET_CODE (XEXP (x, 0)) == NOT)
+ return plus_constant (XEXP (XEXP (x, 0), 0), 1);
+
+ /* (neg (minus X Y)) can become (minus Y X). */
+ if (GET_CODE (XEXP (x, 0)) == MINUS
+ && (! FLOAT_MODE_P (mode)
+ /* x-y != -(y-x) with IEEE floating point. */
+ || TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
+ || flag_fast_math))
+ return gen_binary (MINUS, mode, XEXP (XEXP (x, 0), 1),
+ XEXP (XEXP (x, 0), 0));
+
+ /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
+ if (GET_CODE (XEXP (x, 0)) == XOR && XEXP (XEXP (x, 0), 1) == const1_rtx
+ && nonzero_bits (XEXP (XEXP (x, 0), 0), mode) == 1)
+ return gen_binary (PLUS, mode, XEXP (XEXP (x, 0), 0), constm1_rtx);
+
+ /* NEG commutes with ASHIFT since it is multiplication. Only do this
+ if we can then eliminate the NEG (e.g.,
+ if the operand is a constant). */
+
+ if (GET_CODE (XEXP (x, 0)) == ASHIFT)
+ {
+ temp = simplify_unary_operation (NEG, mode,
+ XEXP (XEXP (x, 0), 0), mode);
+ if (temp)
+ {
+ SUBST (XEXP (XEXP (x, 0), 0), temp);
+ return XEXP (x, 0);
+ }
+ }
+
+ temp = expand_compound_operation (XEXP (x, 0));
+
+ /* For C equal to the width of MODE minus 1, (neg (ashiftrt X C)) can be
+ replaced by (lshiftrt X C). This will convert
+ (neg (sign_extract X 1 Y)) to (zero_extract X 1 Y). */
+
+ if (GET_CODE (temp) == ASHIFTRT
+ && GET_CODE (XEXP (temp, 1)) == CONST_INT
+ && INTVAL (XEXP (temp, 1)) == GET_MODE_BITSIZE (mode) - 1)
+ return simplify_shift_const (temp, LSHIFTRT, mode, XEXP (temp, 0),
+ INTVAL (XEXP (temp, 1)));
+
+ /* If X has only a single bit that might be nonzero, say, bit I, convert
+ (neg X) to (ashiftrt (ashift X C-I) C-I) where C is the bitsize of
+ MODE minus 1. This will convert (neg (zero_extract X 1 Y)) to
+ (sign_extract X 1 Y). But only do this if TEMP isn't a register
+ or a SUBREG of one since we'd be making the expression more
+ complex if it was just a register. */
+
+ if (GET_CODE (temp) != REG
+ && ! (GET_CODE (temp) == SUBREG
+ && GET_CODE (SUBREG_REG (temp)) == REG)
+ && (i = exact_log2 (nonzero_bits (temp, mode))) >= 0)
+ {
+ rtx temp1 = simplify_shift_const
+ (NULL_RTX, ASHIFTRT, mode,
+ simplify_shift_const (NULL_RTX, ASHIFT, mode, temp,
+ GET_MODE_BITSIZE (mode) - 1 - i),
+ GET_MODE_BITSIZE (mode) - 1 - i);
+
+ /* If all we did was surround TEMP with the two shifts, we
+ haven't improved anything, so don't use it. Otherwise,
+ we are better off with TEMP1. */
+ if (GET_CODE (temp1) != ASHIFTRT
+ || GET_CODE (XEXP (temp1, 0)) != ASHIFT
+ || XEXP (XEXP (temp1, 0), 0) != temp)
+ return temp1;
+ }
+ break;
+
+ case FLOAT_TRUNCATE:
+ /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
+ if (GET_CODE (XEXP (x, 0)) == FLOAT_EXTEND
+ && GET_MODE (XEXP (XEXP (x, 0), 0)) == mode)
+ return XEXP (XEXP (x, 0), 0);
+
+ /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
+ (OP:SF foo:SF) if OP is NEG or ABS. */
+ if ((GET_CODE (XEXP (x, 0)) == ABS
+ || GET_CODE (XEXP (x, 0)) == NEG)
+ && GET_CODE (XEXP (XEXP (x, 0), 0)) == FLOAT_EXTEND
+ && GET_MODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == mode)
+ return gen_unary (GET_CODE (XEXP (x, 0)), mode, mode,
+ XEXP (XEXP (XEXP (x, 0), 0), 0));
+ break;
+
+#ifdef HAVE_cc0
+ case COMPARE:
+ /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
+ using cc0, in which case we want to leave it as a COMPARE
+ so we can distinguish it from a register-register-copy. */
+ if (XEXP (x, 1) == const0_rtx)
+ return XEXP (x, 0);
+
+ /* In IEEE floating point, x-0 is not the same as x. */
+ if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
+ || ! FLOAT_MODE_P (GET_MODE (XEXP (x, 0)))
+ || flag_fast_math)
+ && XEXP (x, 1) == CONST0_RTX (GET_MODE (XEXP (x, 0))))
+ return XEXP (x, 0);
+ break;
+#endif
+
+ case CONST:
+ /* (const (const X)) can become (const X). Do it this way rather than
+ returning the inner CONST since CONST can be shared with a
+ REG_EQUAL note. */
+ if (GET_CODE (XEXP (x, 0)) == CONST)
+ SUBST (XEXP (x, 0), XEXP (XEXP (x, 0), 0));
+ break;
+
+#ifdef HAVE_lo_sum
+ case LO_SUM:
+ /* Convert (lo_sum (high FOO) FOO) to FOO. This is necessary so we
+ can add in an offset. find_split_point will split this address up
+ again if it doesn't match. */
+ if (GET_CODE (XEXP (x, 0)) == HIGH
+ && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
+ return XEXP (x, 1);
+ break;
+#endif
+
+ case PLUS:
+ /* If we have (plus (plus (A const) B)), associate it so that CONST is
+ outermost. That's because that's the way indexed addresses are
+ supposed to appear. This code used to check many more cases, but
+ they are now checked elsewhere. */
+ if (GET_CODE (XEXP (x, 0)) == PLUS
+ && CONSTANT_ADDRESS_P (XEXP (XEXP (x, 0), 1)))
+ return gen_binary (PLUS, mode,
+ gen_binary (PLUS, mode, XEXP (XEXP (x, 0), 0),
+ XEXP (x, 1)),
+ XEXP (XEXP (x, 0), 1));
+
+ /* (plus (xor (and <foo> (const_int pow2 - 1)) <c>) <-c>)
+ when c is (const_int (pow2 + 1) / 2) is a sign extension of a
+ bit-field and can be replaced by either a sign_extend or a
+ sign_extract. The `and' may be a zero_extend. */
+ if (GET_CODE (XEXP (x, 0)) == XOR
+ && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
+ && INTVAL (XEXP (x, 1)) == - INTVAL (XEXP (XEXP (x, 0), 1))
+ && (i = exact_log2 (INTVAL (XEXP (XEXP (x, 0), 1)))) >= 0
+ && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ && ((GET_CODE (XEXP (XEXP (x, 0), 0)) == AND
+ && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == CONST_INT
+ && (INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1))
+ == ((HOST_WIDE_INT) 1 << (i + 1)) - 1))
+ || (GET_CODE (XEXP (XEXP (x, 0), 0)) == ZERO_EXTEND
+ && (GET_MODE_BITSIZE (GET_MODE (XEXP (XEXP (XEXP (x, 0), 0), 0)))
+ == i + 1))))
+ return simplify_shift_const
+ (NULL_RTX, ASHIFTRT, mode,
+ simplify_shift_const (NULL_RTX, ASHIFT, mode,
+ XEXP (XEXP (XEXP (x, 0), 0), 0),
+ GET_MODE_BITSIZE (mode) - (i + 1)),
+ GET_MODE_BITSIZE (mode) - (i + 1));
+
+ /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
+ C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
+ is 1. This produces better code than the alternative immediately
+ below. */
+ if (GET_RTX_CLASS (GET_CODE (XEXP (x, 0))) == '<'
+ && reversible_comparison_p (XEXP (x, 0))
+ && ((STORE_FLAG_VALUE == -1 && XEXP (x, 1) == const1_rtx)
+ || (STORE_FLAG_VALUE == 1 && XEXP (x, 1) == constm1_rtx)))
+ return
+ gen_unary (NEG, mode, mode,
+ gen_binary (reverse_condition (GET_CODE (XEXP (x, 0))),
+ mode, XEXP (XEXP (x, 0), 0),
+ XEXP (XEXP (x, 0), 1)));
+
+ /* If only the low-order bit of X is possibly nonzero, (plus x -1)
+ can become (ashiftrt (ashift (xor x 1) C) C) where C is
+ the bitsize of the mode - 1. This allows simplification of
+ "a = (b & 8) == 0;" */
+ if (XEXP (x, 1) == constm1_rtx
+ && GET_CODE (XEXP (x, 0)) != REG
+ && ! (GET_CODE (XEXP (x,0)) == SUBREG
+ && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG)
+ && nonzero_bits (XEXP (x, 0), mode) == 1)
+ return simplify_shift_const (NULL_RTX, ASHIFTRT, mode,
+ simplify_shift_const (NULL_RTX, ASHIFT, mode,
+ gen_rtx_combine (XOR, mode,
+ XEXP (x, 0), const1_rtx),
+ GET_MODE_BITSIZE (mode) - 1),
+ GET_MODE_BITSIZE (mode) - 1);
+
+ /* If we are adding two things that have no bits in common, convert
+ the addition into an IOR. This will often be further simplified,
+ for example in cases like ((a & 1) + (a & 2)), which can
+ become a & 3. */
+
+ if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ && (nonzero_bits (XEXP (x, 0), mode)
+ & nonzero_bits (XEXP (x, 1), mode)) == 0)
+ return gen_binary (IOR, mode, XEXP (x, 0), XEXP (x, 1));
+ break;
+
+ case MINUS:
+#if STORE_FLAG_VALUE == 1
+ /* (minus 1 (comparison foo bar)) can be done by reversing the comparison
+ code if valid. */
+ if (XEXP (x, 0) == const1_rtx
+ && GET_RTX_CLASS (GET_CODE (XEXP (x, 1))) == '<'
+ && reversible_comparison_p (XEXP (x, 1)))
+ return gen_binary (reverse_condition (GET_CODE (XEXP (x, 1))),
+ mode, XEXP (XEXP (x, 1), 0),
+ XEXP (XEXP (x, 1), 1));
+#endif
+
+ /* (minus <foo> (and <foo> (const_int -pow2))) becomes
+ (and <foo> (const_int pow2-1)) */
+ if (GET_CODE (XEXP (x, 1)) == AND
+ && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
+ && exact_log2 (- INTVAL (XEXP (XEXP (x, 1), 1))) >= 0
+ && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
+ return simplify_and_const_int (NULL_RTX, mode, XEXP (x, 0),
+ - INTVAL (XEXP (XEXP (x, 1), 1)) - 1);
+
+ /* Canonicalize (minus A (plus B C)) to (minus (minus A B) C) for
+ integers. */
+ if (GET_CODE (XEXP (x, 1)) == PLUS && INTEGRAL_MODE_P (mode))
+ return gen_binary (MINUS, mode,
+ gen_binary (MINUS, mode, XEXP (x, 0),
+ XEXP (XEXP (x, 1), 0)),
+ XEXP (XEXP (x, 1), 1));
+ break;
+
+ case MULT:
+ /* If we have (mult (plus A B) C), apply the distributive law and then
+ the inverse distributive law to see if things simplify. This
+ occurs mostly in addresses, often when unrolling loops. */
+
+ if (GET_CODE (XEXP (x, 0)) == PLUS)
+ {
+ x = apply_distributive_law
+ (gen_binary (PLUS, mode,
+ gen_binary (MULT, mode,
+ XEXP (XEXP (x, 0), 0), XEXP (x, 1)),
+ gen_binary (MULT, mode,
+ XEXP (XEXP (x, 0), 1), XEXP (x, 1))));
+
+ if (GET_CODE (x) != MULT)
+ return x;
+ }
+ break;
+
+ case UDIV:
+ /* If this is a divide by a power of two, treat it as a shift if
+ its first operand is a shift. */
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT
+ && (i = exact_log2 (INTVAL (XEXP (x, 1)))) >= 0
+ && (GET_CODE (XEXP (x, 0)) == ASHIFT
+ || GET_CODE (XEXP (x, 0)) == LSHIFTRT
+ || GET_CODE (XEXP (x, 0)) == ASHIFTRT
+ || GET_CODE (XEXP (x, 0)) == ROTATE
+ || GET_CODE (XEXP (x, 0)) == ROTATERT))
+ return simplify_shift_const (NULL_RTX, LSHIFTRT, mode, XEXP (x, 0), i);
+ break;
+
+ case EQ: case NE:
+ case GT: case GTU: case GE: case GEU:
+ case LT: case LTU: case LE: case LEU:
+ /* If the first operand is a condition code, we can't do anything
+ with it. */
+ if (GET_CODE (XEXP (x, 0)) == COMPARE
+ || (GET_MODE_CLASS (GET_MODE (XEXP (x, 0))) != MODE_CC
+#ifdef HAVE_cc0
+ && XEXP (x, 0) != cc0_rtx
+#endif
+ ))
+ {
+ rtx op0 = XEXP (x, 0);
+ rtx op1 = XEXP (x, 1);
+ enum rtx_code new_code;
+
+ if (GET_CODE (op0) == COMPARE)
+ op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
+
+ /* Simplify our comparison, if possible. */
+ new_code = simplify_comparison (code, &op0, &op1);
+
+#if STORE_FLAG_VALUE == 1
+ /* If STORE_FLAG_VALUE is 1, we can convert (ne x 0) to simply X
+ if only the low-order bit is possibly nonzero in X (such as when
+ X is a ZERO_EXTRACT of one bit). Similarly, we can convert EQ to
+ (xor X 1) or (minus 1 X); we use the former. Finally, if X is
+ known to be either 0 or -1, NE becomes a NEG and EQ becomes
+ (plus X 1).
+
+ Remove any ZERO_EXTRACT we made when thinking this was a
+ comparison. It may now be simpler to use, e.g., an AND. If a
+ ZERO_EXTRACT is indeed appropriate, it will be placed back by
+ the call to make_compound_operation in the SET case. */
+
+ if (new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
+ && op1 == const0_rtx
+ && nonzero_bits (op0, mode) == 1)
+ return gen_lowpart_for_combine (mode,
+ expand_compound_operation (op0));
+
+ else if (new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
+ && op1 == const0_rtx
+ && (num_sign_bit_copies (op0, mode)
+ == GET_MODE_BITSIZE (mode)))
+ {
+ op0 = expand_compound_operation (op0);
+ return gen_unary (NEG, mode, mode,
+ gen_lowpart_for_combine (mode, op0));
+ }
+
+ else if (new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT
+ && op1 == const0_rtx
+ && nonzero_bits (op0, mode) == 1)
+ {
+ op0 = expand_compound_operation (op0);
+ return gen_binary (XOR, mode,
+ gen_lowpart_for_combine (mode, op0),
+ const1_rtx);
+ }
+
+ else if (new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT
+ && op1 == const0_rtx
+ && (num_sign_bit_copies (op0, mode)
+ == GET_MODE_BITSIZE (mode)))
+ {
+ op0 = expand_compound_operation (op0);
+ return plus_constant (gen_lowpart_for_combine (mode, op0), 1);
+ }
+#endif
+
+#if STORE_FLAG_VALUE == -1
+ /* If STORE_FLAG_VALUE is -1, we have cases similar to
+ those above. */
+ if (new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
+ && op1 == const0_rtx
+ && (num_sign_bit_copies (op0, mode)
+ == GET_MODE_BITSIZE (mode)))
+ return gen_lowpart_for_combine (mode,
+ expand_compound_operation (op0));
+
+ else if (new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
+ && op1 == const0_rtx
+ && nonzero_bits (op0, mode) == 1)
+ {
+ op0 = expand_compound_operation (op0);
+ return gen_unary (NEG, mode, mode,
+ gen_lowpart_for_combine (mode, op0));
+ }
+
+ else if (new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT
+ && op1 == const0_rtx
+ && (num_sign_bit_copies (op0, mode)
+ == GET_MODE_BITSIZE (mode)))
+ {
+ op0 = expand_compound_operation (op0);
+ return gen_unary (NOT, mode, mode,
+ gen_lowpart_for_combine (mode, op0));
+ }
+
+ /* If X is 0/1, (eq X 0) is X-1. */
+ else if (new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT
+ && op1 == const0_rtx
+ && nonzero_bits (op0, mode) == 1)
+ {
+ op0 = expand_compound_operation (op0);
+ return plus_constant (gen_lowpart_for_combine (mode, op0), -1);
+ }
+#endif
+
+ /* If STORE_FLAG_VALUE says to just test the sign bit and X has just
+ one bit that might be nonzero, we can convert (ne x 0) to
+ (ashift x c) where C puts the bit in the sign bit. Remove any
+ AND with STORE_FLAG_VALUE when we are done, since we are only
+ going to test the sign bit. */
+ if (new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
+ && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ && (STORE_FLAG_VALUE
+ == (HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
+ && op1 == const0_rtx
+ && mode == GET_MODE (op0)
+ && (i = exact_log2 (nonzero_bits (op0, mode))) >= 0)
+ {
+ x = simplify_shift_const (NULL_RTX, ASHIFT, mode,
+ expand_compound_operation (op0),
+ GET_MODE_BITSIZE (mode) - 1 - i);
+ if (GET_CODE (x) == AND && XEXP (x, 1) == const_true_rtx)
+ return XEXP (x, 0);
+ else
+ return x;
+ }
+
+ /* If the code changed, return a whole new comparison. */
+ if (new_code != code)
+ return gen_rtx_combine (new_code, mode, op0, op1);
+
+ /* Otherwise, keep this operation, but maybe change its operands.
+ This also converts (ne (compare FOO BAR) 0) to (ne FOO BAR). */
+ SUBST (XEXP (x, 0), op0);
+ SUBST (XEXP (x, 1), op1);
+ }
+ break;
+
+ case IF_THEN_ELSE:
+ return simplify_if_then_else (x);
+
+ case ZERO_EXTRACT:
+ case SIGN_EXTRACT:
+ case ZERO_EXTEND:
+ case SIGN_EXTEND:
+ /* If we are processing SET_DEST, we are done. */
+ if (in_dest)
+ return x;
+
+ return expand_compound_operation (x);
+
+ case SET:
+ return simplify_set (x);
+
+ case AND:
+ case IOR:
+ case XOR:
+ return simplify_logical (x, last);
+
+ case ABS:
+ /* (abs (neg <foo>)) -> (abs <foo>) */
+ if (GET_CODE (XEXP (x, 0)) == NEG)
+ SUBST (XEXP (x, 0), XEXP (XEXP (x, 0), 0));
+
+ /* If operand is something known to be positive, ignore the ABS. */
+ if (GET_CODE (XEXP (x, 0)) == FFS || GET_CODE (XEXP (x, 0)) == ABS
+ || ((GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)))
+ <= HOST_BITS_PER_WIDE_INT)
+ && ((nonzero_bits (XEXP (x, 0), GET_MODE (XEXP (x, 0)))
+ & ((HOST_WIDE_INT) 1
+ << (GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0))) - 1)))
+ == 0)))
+ return XEXP (x, 0);
+
+
+ /* If operand is known to be only -1 or 0, convert ABS to NEG. */
+ if (num_sign_bit_copies (XEXP (x, 0), mode) == GET_MODE_BITSIZE (mode))
+ return gen_rtx_combine (NEG, mode, XEXP (x, 0));
+
+ break;
+
+ case FFS:
+ /* (ffs (*_extend <X>)) = (ffs <X>) */
+ if (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
+ || GET_CODE (XEXP (x, 0)) == ZERO_EXTEND)
+ SUBST (XEXP (x, 0), XEXP (XEXP (x, 0), 0));
+ break;
+
+ case FLOAT:
+ /* (float (sign_extend <X>)) = (float <X>). */
+ if (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
+ SUBST (XEXP (x, 0), XEXP (XEXP (x, 0), 0));
+ break;
+
+ case ASHIFT:
+ case LSHIFTRT:
+ case ASHIFTRT:
+ case ROTATE:
+ case ROTATERT:
+ /* If this is a shift by a constant amount, simplify it. */
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT)
+ return simplify_shift_const (x, code, mode, XEXP (x, 0),
+ INTVAL (XEXP (x, 1)));
+
+#ifdef SHIFT_COUNT_TRUNCATED
+ else if (SHIFT_COUNT_TRUNCATED && GET_CODE (XEXP (x, 1)) != REG)
+ SUBST (XEXP (x, 1),
+ force_to_mode (XEXP (x, 1), GET_MODE (x),
+ ((HOST_WIDE_INT) 1
+ << exact_log2 (GET_MODE_BITSIZE (GET_MODE (x))))
+ - 1,
+ NULL_RTX, 0));
+#endif
+
+ break;
+ }
+
+ return x;
+}
+
+/* Simplify X, an IF_THEN_ELSE expression. Return the new expression. */
+
+static rtx
+simplify_if_then_else (x)
+ rtx x;
+{
+ enum machine_mode mode = GET_MODE (x);
+ rtx cond = XEXP (x, 0);
+ rtx true = XEXP (x, 1);
+ rtx false = XEXP (x, 2);
+ enum rtx_code true_code = GET_CODE (cond);
+ int comparison_p = GET_RTX_CLASS (true_code) == '<';
+ rtx temp;
+ int i;
+
+ /* Simplify storing of the truth value. */
+ if (comparison_p && true == const_true_rtx && false == const0_rtx)
+ return gen_binary (true_code, mode, XEXP (cond, 0), XEXP (cond, 1));
+
+ /* Also when the truth value has to be reversed. */
+ if (comparison_p && reversible_comparison_p (cond)
+ && true == const0_rtx && false == const_true_rtx)
+ return gen_binary (reverse_condition (true_code),
+ mode, XEXP (cond, 0), XEXP (cond, 1));
+
+ /* Sometimes we can simplify the arm of an IF_THEN_ELSE if a register used
+ in it is being compared against certain values. Get the true and false
+ comparisons and see if that says anything about the value of each arm. */
+
+ if (comparison_p && reversible_comparison_p (cond)
+ && GET_CODE (XEXP (cond, 0)) == REG)
+ {
+ HOST_WIDE_INT nzb;
+ rtx from = XEXP (cond, 0);
+ enum rtx_code false_code = reverse_condition (true_code);
+ rtx true_val = XEXP (cond, 1);
+ rtx false_val = true_val;
+ int swapped = 0;
+
+ /* If FALSE_CODE is EQ, swap the codes and arms. */
+
+ if (false_code == EQ)
+ {
+ swapped = 1, true_code = EQ, false_code = NE;
+ temp = true, true = false, false = temp;
+ }
+
+ /* If we are comparing against zero and the expression being tested has
+ only a single bit that might be nonzero, that is its value when it is
+ not equal to zero. Similarly if it is known to be -1 or 0. */
+
+ if (true_code == EQ && true_val == const0_rtx
+ && exact_log2 (nzb = nonzero_bits (from, GET_MODE (from))) >= 0)
+ false_code = EQ, false_val = GEN_INT (nzb);
+ else if (true_code == EQ && true_val == const0_rtx
+ && (num_sign_bit_copies (from, GET_MODE (from))
+ == GET_MODE_BITSIZE (GET_MODE (from))))
+ false_code = EQ, false_val = constm1_rtx;
+
+ /* Now simplify an arm if we know the value of the register in the
+ branch and it is used in the arm. Be careful due to the potential
+ of locally-shared RTL. */
+
+ if (reg_mentioned_p (from, true))
+ true = subst (known_cond (copy_rtx (true), true_code, from, true_val),
+ pc_rtx, pc_rtx, 0, 0);
+ if (reg_mentioned_p (from, false))
+ false = subst (known_cond (copy_rtx (false), false_code,
+ from, false_val),
+ pc_rtx, pc_rtx, 0, 0);
+
+ SUBST (XEXP (x, 1), swapped ? false : true);
+ SUBST (XEXP (x, 2), swapped ? true : false);
+
+ true = XEXP (x, 1), false = XEXP (x, 2), true_code = GET_CODE (cond);
+ }
+
+ /* If we have (if_then_else FOO (pc) (label_ref BAR)) and FOO can be
+ reversed, do so to avoid needing two sets of patterns for
+ subtract-and-branch insns. Similarly if we have a constant in the true
+ arm, the false arm is the same as the first operand of the comparison, or
+ the false arm is more complicated than the true arm. */
+
+ if (comparison_p && reversible_comparison_p (cond)
+ && (true == pc_rtx
+ || (CONSTANT_P (true)
+ && GET_CODE (false) != CONST_INT && false != pc_rtx)
+ || true == const0_rtx
+ || (GET_RTX_CLASS (GET_CODE (true)) == 'o'
+ && GET_RTX_CLASS (GET_CODE (false)) != 'o')
+ || (GET_CODE (true) == SUBREG
+ && GET_RTX_CLASS (GET_CODE (SUBREG_REG (true))) == 'o'
+ && GET_RTX_CLASS (GET_CODE (false)) != 'o')
+ || reg_mentioned_p (true, false)
+ || rtx_equal_p (false, XEXP (cond, 0))))
+ {
+ true_code = reverse_condition (true_code);
+ SUBST (XEXP (x, 0),
+ gen_binary (true_code, GET_MODE (cond), XEXP (cond, 0),
+ XEXP (cond, 1)));
+
+ SUBST (XEXP (x, 1), false);
+ SUBST (XEXP (x, 2), true);
+
+ temp = true, true = false, false = temp, cond = XEXP (x, 0);
+ }
+
+ /* If the two arms are identical, we don't need the comparison. */
+
+ if (rtx_equal_p (true, false) && ! side_effects_p (cond))
+ return true;
+
+ /* Look for cases where we have (abs x) or (neg (abs X)). */
+
+ if (GET_MODE_CLASS (mode) == MODE_INT
+ && GET_CODE (false) == NEG
+ && rtx_equal_p (true, XEXP (false, 0))
+ && comparison_p
+ && rtx_equal_p (true, XEXP (cond, 0))
+ && ! side_effects_p (true))
+ switch (true_code)
+ {
+ case GT:
+ case GE:
+ return gen_unary (ABS, mode, mode, true);
+ case LT:
+ case LE:
+ return gen_unary (NEG, mode, mode, gen_unary (ABS, mode, mode, true));
+ }
+
+ /* Look for MIN or MAX. */
+
+ if ((! FLOAT_MODE_P (mode) | flag_fast_math)
+ && comparison_p
+ && rtx_equal_p (XEXP (cond, 0), true)
+ && rtx_equal_p (XEXP (cond, 1), false)
+ && ! side_effects_p (cond))
+ switch (true_code)
+ {
+ case GE:
+ case GT:
+ return gen_binary (SMAX, mode, true, false);
+ case LE:
+ case LT:
+ return gen_binary (SMIN, mode, true, false);
+ case GEU:
+ case GTU:
+ return gen_binary (UMAX, mode, true, false);
+ case LEU:
+ case LTU:
+ return gen_binary (UMIN, mode, true, false);
+ }
+
+#if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
+
+ /* If we have (if_then_else COND (OP Z C1) Z) and OP is an identity when its
+ second operand is zero, this can be done as (OP Z (mult COND C2)) where
+ C2 = C1 * STORE_FLAG_VALUE. Similarly if OP has an outer ZERO_EXTEND or
+ SIGN_EXTEND as long as Z is already extended (so we don't destroy it).
+ We can do this kind of thing in some cases when STORE_FLAG_VALUE is
+ neither of the above, but it isn't worth checking for. */
+
+ if (comparison_p && mode != VOIDmode && ! side_effects_p (x))
+ {
+ rtx t = make_compound_operation (true, SET);
+ rtx f = make_compound_operation (false, SET);
+ rtx cond_op0 = XEXP (cond, 0);
+ rtx cond_op1 = XEXP (cond, 1);
+ enum rtx_code op, extend_op = NIL;
+ enum machine_mode m = mode;
+ rtx z = 0, c1;
+
+ if ((GET_CODE (t) == PLUS || GET_CODE (t) == MINUS
+ || GET_CODE (t) == IOR || GET_CODE (t) == XOR
+ || GET_CODE (t) == ASHIFT
+ || GET_CODE (t) == LSHIFTRT || GET_CODE (t) == ASHIFTRT)
+ && rtx_equal_p (XEXP (t, 0), f))
+ c1 = XEXP (t, 1), op = GET_CODE (t), z = f;
+
+ /* If an identity-zero op is commutative, check whether there
+ would be a match if we swapped the operands. */
+ else if ((GET_CODE (t) == PLUS || GET_CODE (t) == IOR
+ || GET_CODE (t) == XOR)
+ && rtx_equal_p (XEXP (t, 1), f))
+ c1 = XEXP (t, 0), op = GET_CODE (t), z = f;
+ else if (GET_CODE (t) == SIGN_EXTEND
+ && (GET_CODE (XEXP (t, 0)) == PLUS
+ || GET_CODE (XEXP (t, 0)) == MINUS
+ || GET_CODE (XEXP (t, 0)) == IOR
+ || GET_CODE (XEXP (t, 0)) == XOR
+ || GET_CODE (XEXP (t, 0)) == ASHIFT
+ || GET_CODE (XEXP (t, 0)) == LSHIFTRT
+ || GET_CODE (XEXP (t, 0)) == ASHIFTRT)
+ && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG
+ && subreg_lowpart_p (XEXP (XEXP (t, 0), 0))
+ && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
+ && (num_sign_bit_copies (f, GET_MODE (f))
+ > (GET_MODE_BITSIZE (mode)
+ - GET_MODE_BITSIZE (GET_MODE (XEXP (XEXP (t, 0), 0))))))
+ {
+ c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0));
+ extend_op = SIGN_EXTEND;
+ m = GET_MODE (XEXP (t, 0));
+ }
+ else if (GET_CODE (t) == SIGN_EXTEND
+ && (GET_CODE (XEXP (t, 0)) == PLUS
+ || GET_CODE (XEXP (t, 0)) == IOR
+ || GET_CODE (XEXP (t, 0)) == XOR)
+ && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG
+ && subreg_lowpart_p (XEXP (XEXP (t, 0), 1))
+ && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
+ && (num_sign_bit_copies (f, GET_MODE (f))
+ > (GET_MODE_BITSIZE (mode)
+ - GET_MODE_BITSIZE (GET_MODE (XEXP (XEXP (t, 0), 1))))))
+ {
+ c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0));
+ extend_op = SIGN_EXTEND;
+ m = GET_MODE (XEXP (t, 0));
+ }
+ else if (GET_CODE (t) == ZERO_EXTEND
+ && (GET_CODE (XEXP (t, 0)) == PLUS
+ || GET_CODE (XEXP (t, 0)) == MINUS
+ || GET_CODE (XEXP (t, 0)) == IOR
+ || GET_CODE (XEXP (t, 0)) == XOR
+ || GET_CODE (XEXP (t, 0)) == ASHIFT
+ || GET_CODE (XEXP (t, 0)) == LSHIFTRT
+ || GET_CODE (XEXP (t, 0)) == ASHIFTRT)
+ && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG
+ && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ && subreg_lowpart_p (XEXP (XEXP (t, 0), 0))
+ && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
+ && ((nonzero_bits (f, GET_MODE (f))
+ & ~ GET_MODE_MASK (GET_MODE (XEXP (XEXP (t, 0), 0))))
+ == 0))
+ {
+ c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0));
+ extend_op = ZERO_EXTEND;
+ m = GET_MODE (XEXP (t, 0));
+ }
+ else if (GET_CODE (t) == ZERO_EXTEND
+ && (GET_CODE (XEXP (t, 0)) == PLUS
+ || GET_CODE (XEXP (t, 0)) == IOR
+ || GET_CODE (XEXP (t, 0)) == XOR)
+ && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG
+ && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ && subreg_lowpart_p (XEXP (XEXP (t, 0), 1))
+ && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
+ && ((nonzero_bits (f, GET_MODE (f))
+ & ~ GET_MODE_MASK (GET_MODE (XEXP (XEXP (t, 0), 1))))
+ == 0))
+ {
+ c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0));
+ extend_op = ZERO_EXTEND;
+ m = GET_MODE (XEXP (t, 0));
+ }
+
+ if (z)
+ {
+ temp = subst (gen_binary (true_code, m, cond_op0, cond_op1),
+ pc_rtx, pc_rtx, 0, 0);
+ temp = gen_binary (MULT, m, temp,
+ gen_binary (MULT, m, c1, const_true_rtx));
+ temp = subst (temp, pc_rtx, pc_rtx, 0, 0);
+ temp = gen_binary (op, m, gen_lowpart_for_combine (m, z), temp);
+
+ if (extend_op != NIL)
+ temp = gen_unary (extend_op, mode, m, temp);
+
+ return temp;
+ }
+ }
+#endif
+
+ /* If we have (if_then_else (ne A 0) C1 0) and either A is known to be 0 or
+ 1 and C1 is a single bit or A is known to be 0 or -1 and C1 is the
+ negation of a single bit, we can convert this operation to a shift. We
+ can actually do this more generally, but it doesn't seem worth it. */
+
+ if (true_code == NE && XEXP (cond, 1) == const0_rtx
+ && false == const0_rtx && GET_CODE (true) == CONST_INT
+ && ((1 == nonzero_bits (XEXP (cond, 0), mode)
+ && (i = exact_log2 (INTVAL (true))) >= 0)
+ || ((num_sign_bit_copies (XEXP (cond, 0), mode)
+ == GET_MODE_BITSIZE (mode))
+ && (i = exact_log2 (- INTVAL (true))) >= 0)))
+ return
+ simplify_shift_const (NULL_RTX, ASHIFT, mode,
+ gen_lowpart_for_combine (mode, XEXP (cond, 0)), i);
+
+ return x;
+}
+
+/* Simplify X, a SET expression. Return the new expression. */
+
+static rtx
+simplify_set (x)
+ rtx x;
+{
+ rtx src = SET_SRC (x);
+ rtx dest = SET_DEST (x);
+ enum machine_mode mode
+ = GET_MODE (src) != VOIDmode ? GET_MODE (src) : GET_MODE (dest);
+ rtx other_insn;
+ rtx *cc_use;
+
+ /* (set (pc) (return)) gets written as (return). */
+ if (GET_CODE (dest) == PC && GET_CODE (src) == RETURN)
+ return src;
+
+ /* Now that we know for sure which bits of SRC we are using, see if we can
+ simplify the expression for the object knowing that we only need the
+ low-order bits. */
+
+ if (GET_MODE_CLASS (mode) == MODE_INT)
+ src = force_to_mode (src, mode, GET_MODE_MASK (mode), NULL_RTX, 0);
+
+ /* If we are setting CC0 or if the source is a COMPARE, look for the use of
+ the comparison result and try to simplify it unless we already have used
+ undobuf.other_insn. */
+ if ((GET_CODE (src) == COMPARE
+#ifdef HAVE_cc0
+ || dest == cc0_rtx
+#endif
+ )
+ && (cc_use = find_single_use (dest, subst_insn, &other_insn)) != 0
+ && (undobuf.other_insn == 0 || other_insn == undobuf.other_insn)
+ && GET_RTX_CLASS (GET_CODE (*cc_use)) == '<'
+ && rtx_equal_p (XEXP (*cc_use, 0), dest))
+ {
+ enum rtx_code old_code = GET_CODE (*cc_use);
+ enum rtx_code new_code;
+ rtx op0, op1;
+ int other_changed = 0;
+ enum machine_mode compare_mode = GET_MODE (dest);
+
+ if (GET_CODE (src) == COMPARE)
+ op0 = XEXP (src, 0), op1 = XEXP (src, 1);
+ else
+ op0 = src, op1 = const0_rtx;
+
+ /* Simplify our comparison, if possible. */
+ new_code = simplify_comparison (old_code, &op0, &op1);
+
+#ifdef EXTRA_CC_MODES
+ /* If this machine has CC modes other than CCmode, check to see if we
+ need to use a different CC mode here. */
+ compare_mode = SELECT_CC_MODE (new_code, op0, op1);
+#endif /* EXTRA_CC_MODES */
+
+#if !defined (HAVE_cc0) && defined (EXTRA_CC_MODES)
+ /* If the mode changed, we have to change SET_DEST, the mode in the
+ compare, and the mode in the place SET_DEST is used. If SET_DEST is
+ a hard register, just build new versions with the proper mode. If it
+ is a pseudo, we lose unless it is only time we set the pseudo, in
+ which case we can safely change its mode. */
+ if (compare_mode != GET_MODE (dest))
+ {
+ int regno = REGNO (dest);
+ rtx new_dest = gen_rtx (REG, compare_mode, regno);
+
+ if (regno < FIRST_PSEUDO_REGISTER
+ || (reg_n_sets[regno] == 1 && ! REG_USERVAR_P (dest)))
+ {
+ if (regno >= FIRST_PSEUDO_REGISTER)
+ SUBST (regno_reg_rtx[regno], new_dest);
+
+ SUBST (SET_DEST (x), new_dest);
+ SUBST (XEXP (*cc_use, 0), new_dest);
+ other_changed = 1;
+
+ dest = new_dest;
+ }
+ }
+#endif
+
+ /* If the code changed, we have to build a new comparison in
+ undobuf.other_insn. */
+ if (new_code != old_code)
+ {
+ unsigned HOST_WIDE_INT mask;
+
+ SUBST (*cc_use, gen_rtx_combine (new_code, GET_MODE (*cc_use),
+ dest, const0_rtx));
+
+ /* If the only change we made was to change an EQ into an NE or
+ vice versa, OP0 has only one bit that might be nonzero, and OP1
+ is zero, check if changing the user of the condition code will
+ produce a valid insn. If it won't, we can keep the original code
+ in that insn by surrounding our operation with an XOR. */
+
+ if (((old_code == NE && new_code == EQ)
+ || (old_code == EQ && new_code == NE))
+ && ! other_changed && op1 == const0_rtx
+ && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT
+ && exact_log2 (mask = nonzero_bits (op0, GET_MODE (op0))) >= 0)
+ {
+ rtx pat = PATTERN (other_insn), note = 0;
+
+ if ((recog_for_combine (&pat, other_insn, &note) < 0
+ && ! check_asm_operands (pat)))
+ {
+ PUT_CODE (*cc_use, old_code);
+ other_insn = 0;
+
+ op0 = gen_binary (XOR, GET_MODE (op0), op0, GEN_INT (mask));
+ }
+ }
+
+ other_changed = 1;
+ }
+
+ if (other_changed)
+ undobuf.other_insn = other_insn;
+
+#ifdef HAVE_cc0
+ /* If we are now comparing against zero, change our source if
+ needed. If we do not use cc0, we always have a COMPARE. */
+ if (op1 == const0_rtx && dest == cc0_rtx)
+ {
+ SUBST (SET_SRC (x), op0);
+ src = op0;
+ }
+ else
+#endif
+
+ /* Otherwise, if we didn't previously have a COMPARE in the
+ correct mode, we need one. */
+ if (GET_CODE (src) != COMPARE || GET_MODE (src) != compare_mode)
+ {
+ SUBST (SET_SRC (x),
+ gen_rtx_combine (COMPARE, compare_mode, op0, op1));
+ src = SET_SRC (x);
+ }
+ else
+ {
+ /* Otherwise, update the COMPARE if needed. */
+ SUBST (XEXP (src, 0), op0);
+ SUBST (XEXP (src, 1), op1);
+ }
+ }
+ else
+ {
+ /* Get SET_SRC in a form where we have placed back any
+ compound expressions. Then do the checks below. */
+ src = make_compound_operation (src, SET);
+ SUBST (SET_SRC (x), src);
+ }
+
+ /* If we have (set x (subreg:m1 (op:m2 ...) 0)) with OP being some operation,
+ and X being a REG or (subreg (reg)), we may be able to convert this to
+ (set (subreg:m2 x) (op)).
+
+ We can always do this if M1 is narrower than M2 because that means that
+ we only care about the low bits of the result.
+
+ However, on machines without WORD_REGISTER_OPERATIONS defined, we cannot
+ perform a narrower operation that requested since the high-order bits will
+ be undefined. On machine where it is defined, this transformation is safe
+ as long as M1 and M2 have the same number of words. */
+
+ if (GET_CODE (src) == SUBREG && subreg_lowpart_p (src)
+ && GET_RTX_CLASS (GET_CODE (SUBREG_REG (src))) != 'o'
+ && (((GET_MODE_SIZE (GET_MODE (src)) + (UNITS_PER_WORD - 1))
+ / UNITS_PER_WORD)
+ == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))
+ + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD))
+#ifndef WORD_REGISTER_OPERATIONS
+ && (GET_MODE_SIZE (GET_MODE (src))
+ < GET_MODE_SIZE (GET_MODE (SUBREG_REG (src))))
+#endif
+ && (GET_CODE (dest) == REG
+ || (GET_CODE (dest) == SUBREG
+ && GET_CODE (SUBREG_REG (dest)) == REG)))
+ {
+ SUBST (SET_DEST (x),
+ gen_lowpart_for_combine (GET_MODE (SUBREG_REG (src)),
+ dest));
+ SUBST (SET_SRC (x), SUBREG_REG (src));
+
+ src = SET_SRC (x), dest = SET_DEST (x);
+ }
+
+#ifdef LOAD_EXTEND_OP
+ /* If we have (set FOO (subreg:M (mem:N BAR) 0)) with M wider than N, this
+ would require a paradoxical subreg. Replace the subreg with a
+ zero_extend to avoid the reload that would otherwise be required. */
+
+ if (GET_CODE (src) == SUBREG && subreg_lowpart_p (src)
+ && LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (src))) != NIL
+ && SUBREG_WORD (src) == 0
+ && (GET_MODE_SIZE (GET_MODE (src))
+ > GET_MODE_SIZE (GET_MODE (SUBREG_REG (src))))
+ && GET_CODE (SUBREG_REG (src)) == MEM)
+ {
+ SUBST (SET_SRC (x),
+ gen_rtx_combine (LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (src))),
+ GET_MODE (src), XEXP (src, 0)));
+
+ src = SET_SRC (x);
+ }
+#endif
+
+ /* If we don't have a conditional move, SET_SRC is an IF_THEN_ELSE, and we
+ are comparing an item known to be 0 or -1 against 0, use a logical
+ operation instead. Check for one of the arms being an IOR of the other
+ arm with some value. We compute three terms to be IOR'ed together. In
+ practice, at most two will be nonzero. Then we do the IOR's. */
+
+ if (GET_CODE (dest) != PC
+ && GET_CODE (src) == IF_THEN_ELSE
+#ifdef HAVE_conditional_move
+ && ! HAVE_conditional_move
+#endif
+ && GET_MODE_CLASS (GET_MODE (src)) == MODE_INT
+ && (GET_CODE (XEXP (src, 0)) == EQ || GET_CODE (XEXP (src, 0)) == NE)
+ && XEXP (XEXP (src, 0), 1) == const0_rtx
+ && (num_sign_bit_copies (XEXP (XEXP (src, 0), 0),
+ GET_MODE (XEXP (XEXP (src, 0), 0)))
+ == GET_MODE_BITSIZE (GET_MODE (XEXP (XEXP (src, 0), 0))))
+ && ! side_effects_p (src))
+ {
+ rtx true = (GET_CODE (XEXP (src, 0)) == NE
+ ? XEXP (src, 1) : XEXP (src, 2));
+ rtx false = (GET_CODE (XEXP (src, 0)) == NE
+ ? XEXP (src, 2) : XEXP (src, 1));
+ rtx term1 = const0_rtx, term2, term3;
+
+ if (GET_CODE (true) == IOR && rtx_equal_p (XEXP (true, 0), false))
+ term1 = false, true = XEXP (true, 1), false = const0_rtx;
+ else if (GET_CODE (true) == IOR
+ && rtx_equal_p (XEXP (true, 1), false))
+ term1 = false, true = XEXP (true, 0), false = const0_rtx;
+ else if (GET_CODE (false) == IOR
+ && rtx_equal_p (XEXP (false, 0), true))
+ term1 = true, false = XEXP (false, 1), true = const0_rtx;
+ else if (GET_CODE (false) == IOR
+ && rtx_equal_p (XEXP (false, 1), true))
+ term1 = true, false = XEXP (false, 0), true = const0_rtx;
+
+ term2 = gen_binary (AND, GET_MODE (src), XEXP (XEXP (src, 0), 0), true);
+ term3 = gen_binary (AND, GET_MODE (src),
+ gen_unary (NOT, GET_MODE (src), GET_MODE (src),
+ XEXP (XEXP (src, 0), 0)),
+ false);
+
+ SUBST (SET_SRC (x),
+ gen_binary (IOR, GET_MODE (src),
+ gen_binary (IOR, GET_MODE (src), term1, term2),
+ term3));
+
+ src = SET_SRC (x);
+ }
+
+ /* If either SRC or DEST is a CLOBBER of (const_int 0), make this
+ whole thing fail. */
+ if (GET_CODE (src) == CLOBBER && XEXP (src, 0) == const0_rtx)
+ return src;
+ else if (GET_CODE (dest) == CLOBBER && XEXP (dest, 0) == const0_rtx)
+ return dest;
+ else
+ /* Convert this into a field assignment operation, if possible. */
+ return make_field_assignment (x);
+}
+
+/* Simplify, X, and AND, IOR, or XOR operation, and return the simplified
+ result. LAST is nonzero if this is the last retry. */
+
+static rtx
+simplify_logical (x, last)
+ rtx x;
+ int last;
+{
+ enum machine_mode mode = GET_MODE (x);
+ rtx op0 = XEXP (x, 0);
+ rtx op1 = XEXP (x, 1);
+
+ switch (GET_CODE (x))
+ {
+ case AND:
+ /* Convert (A ^ B) & A to A & (~ B) since the latter is often a single
+ insn (and may simplify more). */
+ if (GET_CODE (op0) == XOR
+ && rtx_equal_p (XEXP (op0, 0), op1)
+ && ! side_effects_p (op1))
+ x = gen_binary (AND, mode,
+ gen_unary (NOT, mode, mode, XEXP (op0, 1)), op1);
+
+ if (GET_CODE (op0) == XOR
+ && rtx_equal_p (XEXP (op0, 1), op1)
+ && ! side_effects_p (op1))
+ x = gen_binary (AND, mode,
+ gen_unary (NOT, mode, mode, XEXP (op0, 0)), op1);
+
+ /* Similarly for (~ (A ^ B)) & A. */
+ if (GET_CODE (op0) == NOT
+ && GET_CODE (XEXP (op0, 0)) == XOR
+ && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
+ && ! side_effects_p (op1))
+ x = gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
+
+ if (GET_CODE (op0) == NOT
+ && GET_CODE (XEXP (op0, 0)) == XOR
+ && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
+ && ! side_effects_p (op1))
+ x = gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
+
+ if (GET_CODE (op1) == CONST_INT)
+ {
+ x = simplify_and_const_int (x, mode, op0, INTVAL (op1));
+
+ /* If we have (ior (and (X C1) C2)) and the next restart would be
+ the last, simplify this by making C1 as small as possible
+ and then exit. */
+ if (last
+ && GET_CODE (x) == IOR && GET_CODE (op0) == AND
+ && GET_CODE (XEXP (op0, 1)) == CONST_INT
+ && GET_CODE (op1) == CONST_INT)
+ return gen_binary (IOR, mode,
+ gen_binary (AND, mode, XEXP (op0, 0),
+ GEN_INT (INTVAL (XEXP (op0, 1))
+ & ~ INTVAL (op1))), op1);
+
+ if (GET_CODE (x) != AND)
+ return x;
+ }
+
+ /* Convert (A | B) & A to A. */
+ if (GET_CODE (op0) == IOR
+ && (rtx_equal_p (XEXP (op0, 0), op1)
+ || rtx_equal_p (XEXP (op0, 1), op1))
+ && ! side_effects_p (XEXP (op0, 0))
+ && ! side_effects_p (XEXP (op0, 1)))
+ return op1;
+
+ /* In the following group of tests (and those in case IOR below),
+ we start with some combination of logical operations and apply
+ the distributive law followed by the inverse distributive law.
+ Most of the time, this results in no change. However, if some of
+ the operands are the same or inverses of each other, simplifications
+ will result.
+
+ For example, (and (ior A B) (not B)) can occur as the result of
+ expanding a bit field assignment. When we apply the distributive
+ law to this, we get (ior (and (A (not B))) (and (B (not B)))),
+ which then simplifies to (and (A (not B))).
+
+ If we have (and (ior A B) C), apply the distributive law and then
+ the inverse distributive law to see if things simplify. */
+
+ if (GET_CODE (op0) == IOR || GET_CODE (op0) == XOR)
+ {
+ x = apply_distributive_law
+ (gen_binary (GET_CODE (op0), mode,
+ gen_binary (AND, mode, XEXP (op0, 0), op1),
+ gen_binary (AND, mode, XEXP (op0, 1), op1)));
+ if (GET_CODE (x) != AND)
+ return x;
+ }
+
+ if (GET_CODE (op1) == IOR || GET_CODE (op1) == XOR)
+ return apply_distributive_law
+ (gen_binary (GET_CODE (op1), mode,
+ gen_binary (AND, mode, XEXP (op1, 0), op0),
+ gen_binary (AND, mode, XEXP (op1, 1), op0)));
+
+ /* Similarly, taking advantage of the fact that
+ (and (not A) (xor B C)) == (xor (ior A B) (ior A C)) */
+
+ if (GET_CODE (op0) == NOT && GET_CODE (op1) == XOR)
+ return apply_distributive_law
+ (gen_binary (XOR, mode,
+ gen_binary (IOR, mode, XEXP (op0, 0), XEXP (op1, 0)),
+ gen_binary (IOR, mode, XEXP (op0, 0), XEXP (op1, 1))));
+
+ else if (GET_CODE (op1) == NOT && GET_CODE (op0) == XOR)
+ return apply_distributive_law
+ (gen_binary (XOR, mode,
+ gen_binary (IOR, mode, XEXP (op1, 0), XEXP (op0, 0)),
+ gen_binary (IOR, mode, XEXP (op1, 0), XEXP (op0, 1))));
+ break;
+
+ case IOR:
+ /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
+ if (GET_CODE (op1) == CONST_INT
+ && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ && (nonzero_bits (op0, mode) & ~ INTVAL (op1)) == 0)
+ return op1;
+
+ /* Convert (A & B) | A to A. */
+ if (GET_CODE (op0) == AND
+ && (rtx_equal_p (XEXP (op0, 0), op1)
+ || rtx_equal_p (XEXP (op0, 1), op1))
+ && ! side_effects_p (XEXP (op0, 0))
+ && ! side_effects_p (XEXP (op0, 1)))
+ return op1;
+
+ /* If we have (ior (and A B) C), apply the distributive law and then
+ the inverse distributive law to see if things simplify. */
+
+ if (GET_CODE (op0) == AND)
+ {
+ x = apply_distributive_law
+ (gen_binary (AND, mode,
+ gen_binary (IOR, mode, XEXP (op0, 0), op1),
+ gen_binary (IOR, mode, XEXP (op0, 1), op1)));
+
+ if (GET_CODE (x) != IOR)
+ return x;
+ }
+
+ if (GET_CODE (op1) == AND)
+ {
+ x = apply_distributive_law
+ (gen_binary (AND, mode,
+ gen_binary (IOR, mode, XEXP (op1, 0), op0),
+ gen_binary (IOR, mode, XEXP (op1, 1), op0)));
+
+ if (GET_CODE (x) != IOR)
+ return x;
+ }
+
+ /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
+ mode size to (rotate A CX). */
+
+ if (((GET_CODE (op0) == ASHIFT && GET_CODE (op1) == LSHIFTRT)
+ || (GET_CODE (op1) == ASHIFT && GET_CODE (op0) == LSHIFTRT))
+ && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
+ && GET_CODE (XEXP (op0, 1)) == CONST_INT
+ && GET_CODE (XEXP (op1, 1)) == CONST_INT
+ && (INTVAL (XEXP (op0, 1)) + INTVAL (XEXP (op1, 1))
+ == GET_MODE_BITSIZE (mode)))
+ return gen_rtx (ROTATE, mode, XEXP (op0, 0),
+ (GET_CODE (op0) == ASHIFT
+ ? XEXP (op0, 1) : XEXP (op1, 1)));
+
+ /* If OP0 is (ashiftrt (plus ...) C), it might actually be
+ a (sign_extend (plus ...)). If so, OP1 is a CONST_INT, and the PLUS
+ does not affect any of the bits in OP1, it can really be done
+ as a PLUS and we can associate. We do this by seeing if OP1
+ can be safely shifted left C bits. */
+ if (GET_CODE (op1) == CONST_INT && GET_CODE (op0) == ASHIFTRT
+ && GET_CODE (XEXP (op0, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (op0, 0), 1)) == CONST_INT
+ && GET_CODE (XEXP (op0, 1)) == CONST_INT
+ && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
+ {
+ int count = INTVAL (XEXP (op0, 1));
+ HOST_WIDE_INT mask = INTVAL (op1) << count;
+
+ if (mask >> count == INTVAL (op1)
+ && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
+ {
+ SUBST (XEXP (XEXP (op0, 0), 1),
+ GEN_INT (INTVAL (XEXP (XEXP (op0, 0), 1)) | mask));
+ return op0;
+ }
+ }
+ break;
+
+ case XOR:
+ /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
+ Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
+ (NOT y). */
+ {
+ int num_negated = 0;
+
+ if (GET_CODE (op0) == NOT)
+ num_negated++, op0 = XEXP (op0, 0);
+ if (GET_CODE (op1) == NOT)
+ num_negated++, op1 = XEXP (op1, 0);
+
+ if (num_negated == 2)
+ {
+ SUBST (XEXP (x, 0), op0);
+ SUBST (XEXP (x, 1), op1);
+ }
+ else if (num_negated == 1)
+ return gen_unary (NOT, mode, mode, gen_binary (XOR, mode, op0, op1));
+ }
+
+ /* Convert (xor (and A B) B) to (and (not A) B). The latter may
+ correspond to a machine insn or result in further simplifications
+ if B is a constant. */
+
+ if (GET_CODE (op0) == AND
+ && rtx_equal_p (XEXP (op0, 1), op1)
+ && ! side_effects_p (op1))
+ return gen_binary (AND, mode,
+ gen_unary (NOT, mode, mode, XEXP (op0, 0)),
+ op1);
+
+ else if (GET_CODE (op0) == AND
+ && rtx_equal_p (XEXP (op0, 0), op1)
+ && ! side_effects_p (op1))
+ return gen_binary (AND, mode,
+ gen_unary (NOT, mode, mode, XEXP (op0, 1)),
+ op1);
+
+#if STORE_FLAG_VALUE == 1
+ /* (xor (comparison foo bar) (const_int 1)) can become the reversed
+ comparison. */
+ if (op1 == const1_rtx
+ && GET_RTX_CLASS (GET_CODE (op0)) == '<'
+ && reversible_comparison_p (op0))
+ return gen_rtx_combine (reverse_condition (GET_CODE (op0)),
+ mode, XEXP (op0, 0), XEXP (op0, 1));
+
+ /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
+ is (lt foo (const_int 0)), so we can perform the above
+ simplification. */
+
+ if (op1 == const1_rtx
+ && GET_CODE (op0) == LSHIFTRT
+ && GET_CODE (XEXP (op0, 1)) == CONST_INT
+ && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
+ return gen_rtx_combine (GE, mode, XEXP (op0, 0), const0_rtx);
+#endif
+
+ /* (xor (comparison foo bar) (const_int sign-bit))
+ when STORE_FLAG_VALUE is the sign bit. */
+ if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ && (STORE_FLAG_VALUE
+ == (HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
+ && op1 == const_true_rtx
+ && GET_RTX_CLASS (GET_CODE (op0)) == '<'
+ && reversible_comparison_p (op0))
+ return gen_rtx_combine (reverse_condition (GET_CODE (op0)),
+ mode, XEXP (op0, 0), XEXP (op0, 1));
+ break;
+ }
+
+ return x;
+}
+
+/* We consider ZERO_EXTRACT, SIGN_EXTRACT, and SIGN_EXTEND as "compound
+ operations" because they can be replaced with two more basic operations.
+ ZERO_EXTEND is also considered "compound" because it can be replaced with
+ an AND operation, which is simpler, though only one operation.
+
+ The function expand_compound_operation is called with an rtx expression
+ and will convert it to the appropriate shifts and AND operations,
+ simplifying at each stage.
+
+ The function make_compound_operation is called to convert an expression
+ consisting of shifts and ANDs into the equivalent compound expression.
+ It is the inverse of this function, loosely speaking. */
+
+static rtx
+expand_compound_operation (x)
+ rtx x;
+{
+ int pos = 0, len;
+ int unsignedp = 0;
+ int modewidth;
+ rtx tem;
+
+ switch (GET_CODE (x))
+ {
+ case ZERO_EXTEND:
+ unsignedp = 1;
+ case SIGN_EXTEND:
+ /* We can't necessarily use a const_int for a multiword mode;
+ it depends on implicitly extending the value.
+ Since we don't know the right way to extend it,
+ we can't tell whether the implicit way is right.
+
+ Even for a mode that is no wider than a const_int,
+ we can't win, because we need to sign extend one of its bits through
+ the rest of it, and we don't know which bit. */
+ if (GET_CODE (XEXP (x, 0)) == CONST_INT)
+ return x;
+
+ /* Return if (subreg:MODE FROM 0) is not a safe replacement for
+ (zero_extend:MODE FROM) or (sign_extend:MODE FROM). It is for any MEM
+ because (SUBREG (MEM...)) is guaranteed to cause the MEM to be
+ reloaded. If not for that, MEM's would very rarely be safe.
+
+ Reject MODEs bigger than a word, because we might not be able
+ to reference a two-register group starting with an arbitrary register
+ (and currently gen_lowpart might crash for a SUBREG). */
+
+ if (GET_MODE_SIZE (GET_MODE (XEXP (x, 0))) > UNITS_PER_WORD)
+ return x;
+
+ len = GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)));
+ /* If the inner object has VOIDmode (the only way this can happen
+ is if it is a ASM_OPERANDS), we can't do anything since we don't
+ know how much masking to do. */
+ if (len == 0)
+ return x;
+
+ break;
+
+ case ZERO_EXTRACT:
+ unsignedp = 1;
+ case SIGN_EXTRACT:
+ /* If the operand is a CLOBBER, just return it. */
+ if (GET_CODE (XEXP (x, 0)) == CLOBBER)
+ return XEXP (x, 0);
+
+ if (GET_CODE (XEXP (x, 1)) != CONST_INT
+ || GET_CODE (XEXP (x, 2)) != CONST_INT
+ || GET_MODE (XEXP (x, 0)) == VOIDmode)
+ return x;
+
+ len = INTVAL (XEXP (x, 1));
+ pos = INTVAL (XEXP (x, 2));
+
+ /* If this goes outside the object being extracted, replace the object
+ with a (use (mem ...)) construct that only combine understands
+ and is used only for this purpose. */
+ if (len + pos > GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0))))
+ SUBST (XEXP (x, 0), gen_rtx (USE, GET_MODE (x), XEXP (x, 0)));
+
+#if BITS_BIG_ENDIAN
+ pos = GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0))) - len - pos;
+#endif
+ break;
+
+ default:
+ return x;
+ }
+
+ /* If we reach here, we want to return a pair of shifts. The inner
+ shift is a left shift of BITSIZE - POS - LEN bits. The outer
+ shift is a right shift of BITSIZE - LEN bits. It is arithmetic or
+ logical depending on the value of UNSIGNEDP.
+
+ If this was a ZERO_EXTEND or ZERO_EXTRACT, this pair of shifts will be
+ converted into an AND of a shift.
+
+ We must check for the case where the left shift would have a negative
+ count. This can happen in a case like (x >> 31) & 255 on machines
+ that can't shift by a constant. On those machines, we would first
+ combine the shift with the AND to produce a variable-position
+ extraction. Then the constant of 31 would be substituted in to produce
+ a such a position. */
+
+ modewidth = GET_MODE_BITSIZE (GET_MODE (x));
+ if (modewidth >= pos - len)
+ tem = simplify_shift_const (NULL_RTX, unsignedp ? LSHIFTRT : ASHIFTRT,
+ GET_MODE (x),
+ simplify_shift_const (NULL_RTX, ASHIFT,
+ GET_MODE (x),
+ XEXP (x, 0),
+ modewidth - pos - len),
+ modewidth - len);
+
+ else if (unsignedp && len < HOST_BITS_PER_WIDE_INT)
+ tem = simplify_and_const_int (NULL_RTX, GET_MODE (x),
+ simplify_shift_const (NULL_RTX, LSHIFTRT,
+ GET_MODE (x),
+ XEXP (x, 0), pos),
+ ((HOST_WIDE_INT) 1 << len) - 1);
+ else
+ /* Any other cases we can't handle. */
+ return x;
+
+
+ /* If we couldn't do this for some reason, return the original
+ expression. */
+ if (GET_CODE (tem) == CLOBBER)
+ return x;
+
+ return tem;
+}
+
+/* X is a SET which contains an assignment of one object into
+ a part of another (such as a bit-field assignment, STRICT_LOW_PART,
+ or certain SUBREGS). If possible, convert it into a series of
+ logical operations.
+
+ We half-heartedly support variable positions, but do not at all
+ support variable lengths. */
+
+static rtx
+expand_field_assignment (x)
+ rtx x;
+{
+ rtx inner;
+ rtx pos; /* Always counts from low bit. */
+ int len;
+ rtx mask;
+ enum machine_mode compute_mode;
+
+ /* Loop until we find something we can't simplify. */
+ while (1)
+ {
+ if (GET_CODE (SET_DEST (x)) == STRICT_LOW_PART
+ && GET_CODE (XEXP (SET_DEST (x), 0)) == SUBREG)
+ {
+ inner = SUBREG_REG (XEXP (SET_DEST (x), 0));
+ len = GET_MODE_BITSIZE (GET_MODE (XEXP (SET_DEST (x), 0)));
+ pos = const0_rtx;
+ }
+ else if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
+ && GET_CODE (XEXP (SET_DEST (x), 1)) == CONST_INT)
+ {
+ inner = XEXP (SET_DEST (x), 0);
+ len = INTVAL (XEXP (SET_DEST (x), 1));
+ pos = XEXP (SET_DEST (x), 2);
+
+ /* If the position is constant and spans the width of INNER,
+ surround INNER with a USE to indicate this. */
+ if (GET_CODE (pos) == CONST_INT
+ && INTVAL (pos) + len > GET_MODE_BITSIZE (GET_MODE (inner)))
+ inner = gen_rtx (USE, GET_MODE (SET_DEST (x)), inner);
+
+#if BITS_BIG_ENDIAN
+ if (GET_CODE (pos) == CONST_INT)
+ pos = GEN_INT (GET_MODE_BITSIZE (GET_MODE (inner)) - len
+ - INTVAL (pos));
+ else if (GET_CODE (pos) == MINUS
+ && GET_CODE (XEXP (pos, 1)) == CONST_INT
+ && (INTVAL (XEXP (pos, 1))
+ == GET_MODE_BITSIZE (GET_MODE (inner)) - len))
+ /* If position is ADJUST - X, new position is X. */
+ pos = XEXP (pos, 0);
+ else
+ pos = gen_binary (MINUS, GET_MODE (pos),
+ GEN_INT (GET_MODE_BITSIZE (GET_MODE (inner))
+ - len),
+ pos);
+#endif
+ }
+
+ /* A SUBREG between two modes that occupy the same numbers of words
+ can be done by moving the SUBREG to the source. */
+ else if (GET_CODE (SET_DEST (x)) == SUBREG
+ && (((GET_MODE_SIZE (GET_MODE (SET_DEST (x)))
+ + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
+ == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (x))))
+ + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)))
+ {
+ x = gen_rtx (SET, VOIDmode, SUBREG_REG (SET_DEST (x)),
+ gen_lowpart_for_combine (GET_MODE (SUBREG_REG (SET_DEST (x))),
+ SET_SRC (x)));
+ continue;
+ }
+ else
+ break;
+
+ while (GET_CODE (inner) == SUBREG && subreg_lowpart_p (inner))
+ inner = SUBREG_REG (inner);
+
+ compute_mode = GET_MODE (inner);
+
+ /* Compute a mask of LEN bits, if we can do this on the host machine. */
+ if (len < HOST_BITS_PER_WIDE_INT)
+ mask = GEN_INT (((HOST_WIDE_INT) 1 << len) - 1);
+ else
+ break;
+
+ /* Now compute the equivalent expression. Make a copy of INNER
+ for the SET_DEST in case it is a MEM into which we will substitute;
+ we don't want shared RTL in that case. */
+ x = gen_rtx (SET, VOIDmode, copy_rtx (inner),
+ gen_binary (IOR, compute_mode,
+ gen_binary (AND, compute_mode,
+ gen_unary (NOT, compute_mode,
+ compute_mode,
+ gen_binary (ASHIFT,
+ compute_mode,
+ mask, pos)),
+ inner),
+ gen_binary (ASHIFT, compute_mode,
+ gen_binary (AND, compute_mode,
+ gen_lowpart_for_combine
+ (compute_mode,
+ SET_SRC (x)),
+ mask),
+ pos)));
+ }
+
+ return x;
+}
+
+/* Return an RTX for a reference to LEN bits of INNER. If POS_RTX is nonzero,
+ it is an RTX that represents a variable starting position; otherwise,
+ POS is the (constant) starting bit position (counted from the LSB).
+
+ INNER may be a USE. This will occur when we started with a bitfield
+ that went outside the boundary of the object in memory, which is
+ allowed on most machines. To isolate this case, we produce a USE
+ whose mode is wide enough and surround the MEM with it. The only
+ code that understands the USE is this routine. If it is not removed,
+ it will cause the resulting insn not to match.
+
+ UNSIGNEDP is non-zero for an unsigned reference and zero for a
+ signed reference.
+
+ IN_DEST is non-zero if this is a reference in the destination of a
+ SET. This is used when a ZERO_ or SIGN_EXTRACT isn't needed. If non-zero,
+ a STRICT_LOW_PART will be used, if zero, ZERO_EXTEND or SIGN_EXTEND will
+ be used.
+
+ IN_COMPARE is non-zero if we are in a COMPARE. This means that a
+ ZERO_EXTRACT should be built even for bits starting at bit 0.
+
+ MODE is the desired mode of the result (if IN_DEST == 0). */
+
+static rtx
+make_extraction (mode, inner, pos, pos_rtx, len,
+ unsignedp, in_dest, in_compare)
+ enum machine_mode mode;
+ rtx inner;
+ int pos;
+ rtx pos_rtx;
+ int len;
+ int unsignedp;
+ int in_dest, in_compare;
+{
+ /* This mode describes the size of the storage area
+ to fetch the overall value from. Within that, we
+ ignore the POS lowest bits, etc. */
+ enum machine_mode is_mode = GET_MODE (inner);
+ enum machine_mode inner_mode;
+ enum machine_mode wanted_mem_mode = byte_mode;
+ enum machine_mode pos_mode = word_mode;
+ enum machine_mode extraction_mode = word_mode;
+ enum machine_mode tmode = mode_for_size (len, MODE_INT, 1);
+ int spans_byte = 0;
+ rtx new = 0;
+ rtx orig_pos_rtx = pos_rtx;
+ int orig_pos;
+
+ /* Get some information about INNER and get the innermost object. */
+ if (GET_CODE (inner) == USE)
+ /* (use:SI (mem:QI foo)) stands for (mem:SI foo). */
+ /* We don't need to adjust the position because we set up the USE
+ to pretend that it was a full-word object. */
+ spans_byte = 1, inner = XEXP (inner, 0);
+ else if (GET_CODE (inner) == SUBREG && subreg_lowpart_p (inner))
+ {
+ /* If going from (subreg:SI (mem:QI ...)) to (mem:QI ...),
+ consider just the QI as the memory to extract from.
+ The subreg adds or removes high bits; its mode is
+ irrelevant to the meaning of this extraction,
+ since POS and LEN count from the lsb. */
+ if (GET_CODE (SUBREG_REG (inner)) == MEM)
+ is_mode = GET_MODE (SUBREG_REG (inner));
+ inner = SUBREG_REG (inner);
+ }
+
+ inner_mode = GET_MODE (inner);
+
+ if (pos_rtx && GET_CODE (pos_rtx) == CONST_INT)
+ pos = INTVAL (pos_rtx), pos_rtx = 0;
+
+ /* See if this can be done without an extraction. We never can if the
+ width of the field is not the same as that of some integer mode. For
+ registers, we can only avoid the extraction if the position is at the
+ low-order bit and this is either not in the destination or we have the
+ appropriate STRICT_LOW_PART operation available.
+
+ For MEM, we can avoid an extract if the field starts on an appropriate
+ boundary and we can change the mode of the memory reference. However,
+ we cannot directly access the MEM if we have a USE and the underlying
+ MEM is not TMODE. This combination means that MEM was being used in a
+ context where bits outside its mode were being referenced; that is only
+ valid in bit-field insns. */
+
+ if (tmode != BLKmode
+ && ! (spans_byte && inner_mode != tmode)
+ && ((pos_rtx == 0 && pos == 0 && GET_CODE (inner) != MEM
+ && (! in_dest
+ || (GET_CODE (inner) == REG
+ && (movstrict_optab->handlers[(int) tmode].insn_code
+ != CODE_FOR_nothing))))
+ || (GET_CODE (inner) == MEM && pos_rtx == 0
+ && (pos
+ % (STRICT_ALIGNMENT ? GET_MODE_ALIGNMENT (tmode)
+ : BITS_PER_UNIT)) == 0
+ /* We can't do this if we are widening INNER_MODE (it
+ may not be aligned, for one thing). */
+ && GET_MODE_BITSIZE (inner_mode) >= GET_MODE_BITSIZE (tmode)
+ && (inner_mode == tmode
+ || (! mode_dependent_address_p (XEXP (inner, 0))
+ && ! MEM_VOLATILE_P (inner))))))
+ {
+ /* If INNER is a MEM, make a new MEM that encompasses just the desired
+ field. If the original and current mode are the same, we need not
+ adjust the offset. Otherwise, we do if bytes big endian.
+
+ If INNER is not a MEM, get a piece consisting of the just the field
+ of interest (in this case POS must be 0). */
+
+ if (GET_CODE (inner) == MEM)
+ {
+ int offset;
+ /* POS counts from lsb, but make OFFSET count in memory order. */
+ if (BYTES_BIG_ENDIAN)
+ offset = (GET_MODE_BITSIZE (is_mode) - len - pos) / BITS_PER_UNIT;
+ else
+ offset = pos / BITS_PER_UNIT;
+
+ new = gen_rtx (MEM, tmode, plus_constant (XEXP (inner, 0), offset));
+ RTX_UNCHANGING_P (new) = RTX_UNCHANGING_P (inner);
+ MEM_VOLATILE_P (new) = MEM_VOLATILE_P (inner);
+ MEM_IN_STRUCT_P (new) = MEM_IN_STRUCT_P (inner);
+ }
+ else if (GET_CODE (inner) == REG)
+ {
+ /* We can't call gen_lowpart_for_combine here since we always want
+ a SUBREG and it would sometimes return a new hard register. */
+ if (tmode != inner_mode)
+ new = gen_rtx (SUBREG, tmode, inner,
+ (WORDS_BIG_ENDIAN
+ && GET_MODE_SIZE (inner_mode) > UNITS_PER_WORD
+ ? ((GET_MODE_SIZE (inner_mode)
+ - GET_MODE_SIZE (tmode))
+ / UNITS_PER_WORD)
+ : 0));
+ else
+ new = inner;
+ }
+ else
+ new = force_to_mode (inner, tmode,
+ len >= HOST_BITS_PER_WIDE_INT
+ ? GET_MODE_MASK (tmode)
+ : ((HOST_WIDE_INT) 1 << len) - 1,
+ NULL_RTX, 0);
+
+ /* If this extraction is going into the destination of a SET,
+ make a STRICT_LOW_PART unless we made a MEM. */
+
+ if (in_dest)
+ return (GET_CODE (new) == MEM ? new
+ : (GET_CODE (new) != SUBREG
+ ? gen_rtx (CLOBBER, tmode, const0_rtx)
+ : gen_rtx_combine (STRICT_LOW_PART, VOIDmode, new)));
+
+ /* Otherwise, sign- or zero-extend unless we already are in the
+ proper mode. */
+
+ return (mode == tmode ? new
+ : gen_rtx_combine (unsignedp ? ZERO_EXTEND : SIGN_EXTEND,
+ mode, new));
+ }
+
+ /* Unless this is a COMPARE or we have a funny memory reference,
+ don't do anything with zero-extending field extracts starting at
+ the low-order bit since they are simple AND operations. */
+ if (pos_rtx == 0 && pos == 0 && ! in_dest
+ && ! in_compare && ! spans_byte && unsignedp)
+ return 0;
+
+ /* Unless we are allowed to span bytes, reject this if we would be
+ spanning bytes or if the position is not a constant and the length
+ is not 1. In all other cases, we would only be going outside
+ out object in cases when an original shift would have been
+ undefined. */
+ if (! spans_byte
+ && ((pos_rtx == 0 && pos + len > GET_MODE_BITSIZE (is_mode))
+ || (pos_rtx != 0 && len != 1)))
+ return 0;
+
+ /* Get the mode to use should INNER be a MEM, the mode for the position,
+ and the mode for the result. */
+#ifdef HAVE_insv
+ if (in_dest)
+ {
+ wanted_mem_mode = insn_operand_mode[(int) CODE_FOR_insv][0];
+ pos_mode = insn_operand_mode[(int) CODE_FOR_insv][2];
+ extraction_mode = insn_operand_mode[(int) CODE_FOR_insv][3];
+ }
+#endif
+
+#ifdef HAVE_extzv
+ if (! in_dest && unsignedp)
+ {
+ wanted_mem_mode = insn_operand_mode[(int) CODE_FOR_extzv][1];
+ pos_mode = insn_operand_mode[(int) CODE_FOR_extzv][3];
+ extraction_mode = insn_operand_mode[(int) CODE_FOR_extzv][0];
+ }
+#endif
+
+#ifdef HAVE_extv
+ if (! in_dest && ! unsignedp)
+ {
+ wanted_mem_mode = insn_operand_mode[(int) CODE_FOR_extv][1];
+ pos_mode = insn_operand_mode[(int) CODE_FOR_extv][3];
+ extraction_mode = insn_operand_mode[(int) CODE_FOR_extv][0];
+ }
+#endif
+
+ /* Never narrow an object, since that might not be safe. */
+
+ if (mode != VOIDmode
+ && GET_MODE_SIZE (extraction_mode) < GET_MODE_SIZE (mode))
+ extraction_mode = mode;
+
+ if (pos_rtx && GET_MODE (pos_rtx) != VOIDmode
+ && GET_MODE_SIZE (pos_mode) < GET_MODE_SIZE (GET_MODE (pos_rtx)))
+ pos_mode = GET_MODE (pos_rtx);
+
+ /* If this is not from memory or we have to change the mode of memory and
+ cannot, the desired mode is EXTRACTION_MODE. */
+ if (GET_CODE (inner) != MEM
+ || (inner_mode != wanted_mem_mode
+ && (mode_dependent_address_p (XEXP (inner, 0))
+ || MEM_VOLATILE_P (inner))))
+ wanted_mem_mode = extraction_mode;
+
+ orig_pos = pos;
+
+#if BITS_BIG_ENDIAN
+ /* If position is constant, compute new position. Otherwise, build
+ subtraction. */
+ if (pos_rtx == 0)
+ pos = (MAX (GET_MODE_BITSIZE (is_mode), GET_MODE_BITSIZE (wanted_mem_mode))
+ - len - pos);
+ else
+ pos_rtx
+ = gen_rtx_combine (MINUS, GET_MODE (pos_rtx),
+ GEN_INT (MAX (GET_MODE_BITSIZE (is_mode),
+ GET_MODE_BITSIZE (wanted_mem_mode))
+ - len),
+ pos_rtx);
+#endif
+
+ /* If INNER has a wider mode, make it smaller. If this is a constant
+ extract, try to adjust the byte to point to the byte containing
+ the value. */
+ if (wanted_mem_mode != VOIDmode
+ && GET_MODE_SIZE (wanted_mem_mode) < GET_MODE_SIZE (is_mode)
+ && ((GET_CODE (inner) == MEM
+ && (inner_mode == wanted_mem_mode
+ || (! mode_dependent_address_p (XEXP (inner, 0))
+ && ! MEM_VOLATILE_P (inner))))))
+ {
+ int offset = 0;
+
+ /* The computations below will be correct if the machine is big
+ endian in both bits and bytes or little endian in bits and bytes.
+ If it is mixed, we must adjust. */
+
+ /* If bytes are big endian and we had a paradoxical SUBREG, we must
+ adjust OFFSET to compensate. */
+#if BYTES_BIG_ENDIAN
+ if (! spans_byte
+ && GET_MODE_SIZE (inner_mode) < GET_MODE_SIZE (is_mode))
+ offset -= GET_MODE_SIZE (is_mode) - GET_MODE_SIZE (inner_mode);
+#endif
+
+ /* If this is a constant position, we can move to the desired byte. */
+ if (pos_rtx == 0)
+ {
+ offset += pos / BITS_PER_UNIT;
+ pos %= GET_MODE_BITSIZE (wanted_mem_mode);
+ }
+
+#if BYTES_BIG_ENDIAN != BITS_BIG_ENDIAN
+ if (! spans_byte && is_mode != wanted_mem_mode)
+ offset = (GET_MODE_SIZE (is_mode)
+ - GET_MODE_SIZE (wanted_mem_mode) - offset);
+#endif
+
+ if (offset != 0 || inner_mode != wanted_mem_mode)
+ {
+ rtx newmem = gen_rtx (MEM, wanted_mem_mode,
+ plus_constant (XEXP (inner, 0), offset));
+ RTX_UNCHANGING_P (newmem) = RTX_UNCHANGING_P (inner);
+ MEM_VOLATILE_P (newmem) = MEM_VOLATILE_P (inner);
+ MEM_IN_STRUCT_P (newmem) = MEM_IN_STRUCT_P (inner);
+ inner = newmem;
+ }
+ }
+
+ /* If INNER is not memory, we can always get it into the proper mode. */
+ else if (GET_CODE (inner) != MEM)
+ inner = force_to_mode (inner, extraction_mode,
+ pos_rtx || len + orig_pos >= HOST_BITS_PER_WIDE_INT
+ ? GET_MODE_MASK (extraction_mode)
+ : (((HOST_WIDE_INT) 1 << len) - 1) << orig_pos,
+ NULL_RTX, 0);
+
+ /* Adjust mode of POS_RTX, if needed. If we want a wider mode, we
+ have to zero extend. Otherwise, we can just use a SUBREG. */
+ if (pos_rtx != 0
+ && GET_MODE_SIZE (pos_mode) > GET_MODE_SIZE (GET_MODE (pos_rtx)))
+ pos_rtx = gen_rtx_combine (ZERO_EXTEND, pos_mode, pos_rtx);
+ else if (pos_rtx != 0
+ && GET_MODE_SIZE (pos_mode) < GET_MODE_SIZE (GET_MODE (pos_rtx)))
+ pos_rtx = gen_lowpart_for_combine (pos_mode, pos_rtx);
+
+ /* Make POS_RTX unless we already have it and it is correct. If we don't
+ have a POS_RTX but we do have an ORIG_POS_RTX, the latter must
+ be a CONST_INT. */
+ if (pos_rtx == 0 && orig_pos_rtx != 0 && INTVAL (orig_pos_rtx) == pos)
+ pos_rtx = orig_pos_rtx;
+
+ else if (pos_rtx == 0)
+ pos_rtx = GEN_INT (pos);
+
+ /* Make the required operation. See if we can use existing rtx. */
+ new = gen_rtx_combine (unsignedp ? ZERO_EXTRACT : SIGN_EXTRACT,
+ extraction_mode, inner, GEN_INT (len), pos_rtx);
+ if (! in_dest)
+ new = gen_lowpart_for_combine (mode, new);
+
+ return new;
+}
+
+/* See if X contains an ASHIFT of COUNT or more bits that can be commuted
+ with any other operations in X. Return X without that shift if so. */
+
+static rtx
+extract_left_shift (x, count)
+ rtx x;
+ int count;
+{
+ enum rtx_code code = GET_CODE (x);
+ enum machine_mode mode = GET_MODE (x);
+ rtx tem;
+
+ switch (code)
+ {
+ case ASHIFT:
+ /* This is the shift itself. If it is wide enough, we will return
+ either the value being shifted if the shift count is equal to
+ COUNT or a shift for the difference. */
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT
+ && INTVAL (XEXP (x, 1)) >= count)
+ return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (x, 0),
+ INTVAL (XEXP (x, 1)) - count);
+ break;
+
+ case NEG: case NOT:
+ if ((tem = extract_left_shift (XEXP (x, 0), count)) != 0)
+ return gen_unary (code, mode, mode, tem);
+
+ break;
+
+ case PLUS: case IOR: case XOR: case AND:
+ /* If we can safely shift this constant and we find the inner shift,
+ make a new operation. */
+ if (GET_CODE (XEXP (x,1)) == CONST_INT
+ && (INTVAL (XEXP (x, 1)) & (((HOST_WIDE_INT) 1 << count)) - 1) == 0
+ && (tem = extract_left_shift (XEXP (x, 0), count)) != 0)
+ return gen_binary (code, mode, tem,
+ GEN_INT (INTVAL (XEXP (x, 1)) >> count));
+
+ break;
+ }
+
+ return 0;
+}
+
+/* Look at the expression rooted at X. Look for expressions
+ equivalent to ZERO_EXTRACT, SIGN_EXTRACT, ZERO_EXTEND, SIGN_EXTEND.
+ Form these expressions.
+
+ Return the new rtx, usually just X.
+
+ Also, for machines like the Vax that don't have logical shift insns,
+ try to convert logical to arithmetic shift operations in cases where
+ they are equivalent. This undoes the canonicalizations to logical
+ shifts done elsewhere.
+
+ We try, as much as possible, to re-use rtl expressions to save memory.
+
+ IN_CODE says what kind of expression we are processing. Normally, it is
+ SET. In a memory address (inside a MEM, PLUS or minus, the latter two
+ being kludges), it is MEM. When processing the arguments of a comparison
+ or a COMPARE against zero, it is COMPARE. */
+
+static rtx
+make_compound_operation (x, in_code)
+ rtx x;
+ enum rtx_code in_code;
+{
+ enum rtx_code code = GET_CODE (x);
+ enum machine_mode mode = GET_MODE (x);
+ int mode_width = GET_MODE_BITSIZE (mode);
+ rtx rhs, lhs;
+ enum rtx_code next_code;
+ int i;
+ rtx new = 0;
+ rtx tem;
+ char *fmt;
+
+ /* Select the code to be used in recursive calls. Once we are inside an
+ address, we stay there. If we have a comparison, set to COMPARE,
+ but once inside, go back to our default of SET. */
+
+ next_code = (code == MEM || code == PLUS || code == MINUS ? MEM
+ : ((code == COMPARE || GET_RTX_CLASS (code) == '<')
+ && XEXP (x, 1) == const0_rtx) ? COMPARE
+ : in_code == COMPARE ? SET : in_code);
+
+ /* Process depending on the code of this operation. If NEW is set
+ non-zero, it will be returned. */
+
+ switch (code)
+ {
+ case ASHIFT:
+ /* Convert shifts by constants into multiplications if inside
+ an address. */
+ if (in_code == MEM && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
+ && INTVAL (XEXP (x, 1)) >= 0)
+ {
+ new = make_compound_operation (XEXP (x, 0), next_code);
+ new = gen_rtx_combine (MULT, mode, new,
+ GEN_INT ((HOST_WIDE_INT) 1
+ << INTVAL (XEXP (x, 1))));
+ }
+ break;
+
+ case AND:
+ /* If the second operand is not a constant, we can't do anything
+ with it. */
+ if (GET_CODE (XEXP (x, 1)) != CONST_INT)
+ break;
+
+ /* If the constant is a power of two minus one and the first operand
+ is a logical right shift, make an extraction. */
+ if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
+ && (i = exact_log2 (INTVAL (XEXP (x, 1)) + 1)) >= 0)
+ {
+ new = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code);
+ new = make_extraction (mode, new, 0, XEXP (XEXP (x, 0), 1), i, 1,
+ 0, in_code == COMPARE);
+ }
+
+ /* Same as previous, but for (subreg (lshiftrt ...)) in first op. */
+ else if (GET_CODE (XEXP (x, 0)) == SUBREG
+ && subreg_lowpart_p (XEXP (x, 0))
+ && GET_CODE (SUBREG_REG (XEXP (x, 0))) == LSHIFTRT
+ && (i = exact_log2 (INTVAL (XEXP (x, 1)) + 1)) >= 0)
+ {
+ new = make_compound_operation (XEXP (SUBREG_REG (XEXP (x, 0)), 0),
+ next_code);
+ new = make_extraction (mode, new, 0,
+ XEXP (SUBREG_REG (XEXP (x, 0)), 1), i, 1,
+ 0, in_code == COMPARE);
+ }
+ /* Same as previous, but for (xor/ior (lshiftrt...) (lshiftrt...)). */
+ else if ((GET_CODE (XEXP (x, 0)) == XOR
+ || GET_CODE (XEXP (x, 0)) == IOR)
+ && GET_CODE (XEXP (XEXP (x, 0), 0)) == LSHIFTRT
+ && GET_CODE (XEXP (XEXP (x, 0), 1)) == LSHIFTRT
+ && (i = exact_log2 (INTVAL (XEXP (x, 1)) + 1)) >= 0)
+ {
+ /* Apply the distributive law, and then try to make extractions. */
+ new = gen_rtx_combine (GET_CODE (XEXP (x, 0)), mode,
+ gen_rtx (AND, mode, XEXP (XEXP (x, 0), 0),
+ XEXP (x, 1)),
+ gen_rtx (AND, mode, XEXP (XEXP (x, 0), 1),
+ XEXP (x, 1)));
+ new = make_compound_operation (new, in_code);
+ }
+
+ /* If we are have (and (rotate X C) M) and C is larger than the number
+ of bits in M, this is an extraction. */
+
+ else if (GET_CODE (XEXP (x, 0)) == ROTATE
+ && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
+ && (i = exact_log2 (INTVAL (XEXP (x, 1)) + 1)) >= 0
+ && i <= INTVAL (XEXP (XEXP (x, 0), 1)))
+ {
+ new = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code);
+ new = make_extraction (mode, new,
+ (GET_MODE_BITSIZE (mode)
+ - INTVAL (XEXP (XEXP (x, 0), 1))),
+ NULL_RTX, i, 1, 0, in_code == COMPARE);
+ }
+
+ /* On machines without logical shifts, if the operand of the AND is
+ a logical shift and our mask turns off all the propagated sign
+ bits, we can replace the logical shift with an arithmetic shift. */
+ else if (ashr_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing
+ && (lshr_optab->handlers[(int) mode].insn_code
+ == CODE_FOR_nothing)
+ && GET_CODE (XEXP (x, 0)) == LSHIFTRT
+ && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
+ && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
+ && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT
+ && mode_width <= HOST_BITS_PER_WIDE_INT)
+ {
+ unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
+
+ mask >>= INTVAL (XEXP (XEXP (x, 0), 1));
+ if ((INTVAL (XEXP (x, 1)) & ~mask) == 0)
+ SUBST (XEXP (x, 0),
+ gen_rtx_combine (ASHIFTRT, mode,
+ make_compound_operation (XEXP (XEXP (x, 0), 0),
+ next_code),
+ XEXP (XEXP (x, 0), 1)));
+ }
+
+ /* If the constant is one less than a power of two, this might be
+ representable by an extraction even if no shift is present.
+ If it doesn't end up being a ZERO_EXTEND, we will ignore it unless
+ we are in a COMPARE. */
+ else if ((i = exact_log2 (INTVAL (XEXP (x, 1)) + 1)) >= 0)
+ new = make_extraction (mode,
+ make_compound_operation (XEXP (x, 0),
+ next_code),
+ 0, NULL_RTX, i, 1, 0, in_code == COMPARE);
+
+ /* If we are in a comparison and this is an AND with a power of two,
+ convert this into the appropriate bit extract. */
+ else if (in_code == COMPARE
+ && (i = exact_log2 (INTVAL (XEXP (x, 1)))) >= 0)
+ new = make_extraction (mode,
+ make_compound_operation (XEXP (x, 0),
+ next_code),
+ i, NULL_RTX, 1, 1, 0, 1);
+
+ break;
+
+ case LSHIFTRT:
+ /* If the sign bit is known to be zero, replace this with an
+ arithmetic shift. */
+ if (ashr_optab->handlers[(int) mode].insn_code == CODE_FOR_nothing
+ && lshr_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing
+ && mode_width <= HOST_BITS_PER_WIDE_INT
+ && (nonzero_bits (XEXP (x, 0), mode) & (1 << (mode_width - 1))) == 0)
+ {
+ new = gen_rtx_combine (ASHIFTRT, mode,
+ make_compound_operation (XEXP (x, 0),
+ next_code),
+ XEXP (x, 1));
+ break;
+ }
+
+ /* ... fall through ... */
+
+ case ASHIFTRT:
+ lhs = XEXP (x, 0);
+ rhs = XEXP (x, 1);
+
+ /* If we have (ashiftrt (ashift foo C1) C2) with C2 >= C1,
+ this is a SIGN_EXTRACT. */
+ if (GET_CODE (rhs) == CONST_INT
+ && GET_CODE (lhs) == ASHIFT
+ && GET_CODE (XEXP (lhs, 1)) == CONST_INT
+ && INTVAL (rhs) >= INTVAL (XEXP (lhs, 1)))
+ {
+ new = make_compound_operation (XEXP (lhs, 0), next_code);
+ new = make_extraction (mode, new,
+ INTVAL (rhs) - INTVAL (XEXP (lhs, 1)),
+ NULL_RTX, mode_width - INTVAL (rhs),
+ code == LSHIFTRT, 0, in_code == COMPARE);
+ }
+
+ /* See if we have operations between an ASHIFTRT and an ASHIFT.
+ If so, try to merge the shifts into a SIGN_EXTEND. We could
+ also do this for some cases of SIGN_EXTRACT, but it doesn't
+ seem worth the effort; the case checked for occurs on Alpha. */
+
+ if (GET_RTX_CLASS (GET_CODE (lhs)) != 'o'
+ && ! (GET_CODE (lhs) == SUBREG
+ && (GET_RTX_CLASS (GET_CODE (SUBREG_REG (lhs))) == 'o'))
+ && GET_CODE (rhs) == CONST_INT
+ && INTVAL (rhs) < HOST_BITS_PER_WIDE_INT
+ && (new = extract_left_shift (lhs, INTVAL (rhs))) != 0)
+ new = make_extraction (mode, make_compound_operation (new, next_code),
+ 0, NULL_RTX, mode_width - INTVAL (rhs),
+ code == LSHIFTRT, 0, in_code == COMPARE);
+
+ break;
+
+ case SUBREG:
+ /* Call ourselves recursively on the inner expression. If we are
+ narrowing the object and it has a different RTL code from
+ what it originally did, do this SUBREG as a force_to_mode. */
+
+ tem = make_compound_operation (SUBREG_REG (x), in_code);
+ if (GET_CODE (tem) != GET_CODE (SUBREG_REG (x))
+ && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (tem))
+ && subreg_lowpart_p (x))
+ {
+ rtx newer = force_to_mode (tem, mode,
+ GET_MODE_MASK (mode), NULL_RTX, 0);
+
+ /* If we have something other than a SUBREG, we might have
+ done an expansion, so rerun outselves. */
+ if (GET_CODE (newer) != SUBREG)
+ newer = make_compound_operation (newer, in_code);
+
+ return newer;
+ }
+ }
+
+ if (new)
+ {
+ x = gen_lowpart_for_combine (mode, new);
+ code = GET_CODE (x);
+ }
+
+ /* Now recursively process each operand of this operation. */
+ fmt = GET_RTX_FORMAT (code);
+ for (i = 0; i < GET_RTX_LENGTH (code); i++)
+ if (fmt[i] == 'e')
+ {
+ new = make_compound_operation (XEXP (x, i), next_code);
+ SUBST (XEXP (x, i), new);
+ }
+
+ return x;
+}
+
+/* Given M see if it is a value that would select a field of bits
+ within an item, but not the entire word. Return -1 if not.
+ Otherwise, return the starting position of the field, where 0 is the
+ low-order bit.
+
+ *PLEN is set to the length of the field. */
+
+static int
+get_pos_from_mask (m, plen)
+ unsigned HOST_WIDE_INT m;
+ int *plen;
+{
+ /* Get the bit number of the first 1 bit from the right, -1 if none. */
+ int pos = exact_log2 (m & - m);
+
+ if (pos < 0)
+ return -1;
+
+ /* Now shift off the low-order zero bits and see if we have a power of
+ two minus 1. */
+ *plen = exact_log2 ((m >> pos) + 1);
+
+ if (*plen <= 0)
+ return -1;
+
+ return pos;
+}
+
+/* See if X can be simplified knowing that we will only refer to it in
+ MODE and will only refer to those bits that are nonzero in MASK.
+ If other bits are being computed or if masking operations are done
+ that select a superset of the bits in MASK, they can sometimes be
+ ignored.
+
+ Return a possibly simplified expression, but always convert X to
+ MODE. If X is a CONST_INT, AND the CONST_INT with MASK.
+
+ Also, if REG is non-zero and X is a register equal in value to REG,
+ replace X with REG.
+
+ If JUST_SELECT is nonzero, don't optimize by noticing that bits in MASK
+ are all off in X. This is used when X will be complemented, by either
+ NOT, NEG, or XOR. */
+
+static rtx
+force_to_mode (x, mode, mask, reg, just_select)
+ rtx x;
+ enum machine_mode mode;
+ unsigned HOST_WIDE_INT mask;
+ rtx reg;
+ int just_select;
+{
+ enum rtx_code code = GET_CODE (x);
+ int next_select = just_select || code == XOR || code == NOT || code == NEG;
+ enum machine_mode op_mode;
+ unsigned HOST_WIDE_INT fuller_mask, nonzero;
+ rtx op0, op1, temp;
+
+ /* If this is a CALL, don't do anything. Some of the code below
+ will do the wrong thing since the mode of a CALL is VOIDmode. */
+ if (code == CALL)
+ return x;
+
+ /* We want to perform the operation is its present mode unless we know
+ that the operation is valid in MODE, in which case we do the operation
+ in MODE. */
+ op_mode = ((GET_MODE_CLASS (mode) == GET_MODE_CLASS (GET_MODE (x))
+ && code_to_optab[(int) code] != 0
+ && (code_to_optab[(int) code]->handlers[(int) mode].insn_code
+ != CODE_FOR_nothing))
+ ? mode : GET_MODE (x));
+
+ /* It is not valid to do a right-shift in a narrower mode
+ than the one it came in with. */
+ if ((code == LSHIFTRT || code == ASHIFTRT)
+ && GET_MODE_BITSIZE (mode) < GET_MODE_BITSIZE (GET_MODE (x)))
+ op_mode = GET_MODE (x);
+
+ /* Truncate MASK to fit OP_MODE. */
+ if (op_mode)
+ mask &= GET_MODE_MASK (op_mode);
+
+ /* When we have an arithmetic operation, or a shift whose count we
+ do not know, we need to assume that all bit the up to the highest-order
+ bit in MASK will be needed. This is how we form such a mask. */
+ if (op_mode)
+ fuller_mask = (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT
+ ? GET_MODE_MASK (op_mode)
+ : ((HOST_WIDE_INT) 1 << (floor_log2 (mask) + 1)) - 1);
+ else
+ fuller_mask = ~ (HOST_WIDE_INT) 0;
+
+ /* Determine what bits of X are guaranteed to be (non)zero. */
+ nonzero = nonzero_bits (x, mode);
+
+ /* If none of the bits in X are needed, return a zero. */
+ if (! just_select && (nonzero & mask) == 0)
+ return const0_rtx;
+
+ /* If X is a CONST_INT, return a new one. Do this here since the
+ test below will fail. */
+ if (GET_CODE (x) == CONST_INT)
+ {
+ HOST_WIDE_INT cval = INTVAL (x) & mask;
+ int width = GET_MODE_BITSIZE (mode);
+
+ /* If MODE is narrower that HOST_WIDE_INT and CVAL is a negative
+ number, sign extend it. */
+ if (width > 0 && width < HOST_BITS_PER_WIDE_INT
+ && (cval & ((HOST_WIDE_INT) 1 << (width - 1))) != 0)
+ cval |= (HOST_WIDE_INT) -1 << width;
+
+ return GEN_INT (cval);
+ }
+
+ /* If X is narrower than MODE and we want all the bits in X's mode, just
+ get X in the proper mode. */
+ if (GET_MODE_SIZE (GET_MODE (x)) < GET_MODE_SIZE (mode)
+ && (GET_MODE_MASK (GET_MODE (x)) & ~ mask) == 0)
+ return gen_lowpart_for_combine (mode, x);
+
+ /* If we aren't changing the mode, X is not a SUBREG, and all zero bits in
+ MASK are already known to be zero in X, we need not do anything. */
+ if (GET_MODE (x) == mode && code != SUBREG && (~ mask & nonzero) == 0)
+ return x;
+
+ switch (code)
+ {
+ case CLOBBER:
+ /* If X is a (clobber (const_int)), return it since we know we are
+ generating something that won't match. */
+ return x;
+
+#if ! BITS_BIG_ENDIAN
+ case USE:
+ /* X is a (use (mem ..)) that was made from a bit-field extraction that
+ spanned the boundary of the MEM. If we are now masking so it is
+ within that boundary, we don't need the USE any more. */
+ if ((mask & ~ GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
+ return force_to_mode (XEXP (x, 0), mode, mask, reg, next_select);
+#endif
+
+ case SIGN_EXTEND:
+ case ZERO_EXTEND:
+ case ZERO_EXTRACT:
+ case SIGN_EXTRACT:
+ x = expand_compound_operation (x);
+ if (GET_CODE (x) != code)
+ return force_to_mode (x, mode, mask, reg, next_select);
+ break;
+
+ case REG:
+ if (reg != 0 && (rtx_equal_p (get_last_value (reg), x)
+ || rtx_equal_p (reg, get_last_value (x))))
+ x = reg;
+ break;
+
+ case SUBREG:
+ if (subreg_lowpart_p (x)
+ /* We can ignore the effect of this SUBREG if it narrows the mode or
+ if the constant masks to zero all the bits the mode doesn't
+ have. */
+ && ((GET_MODE_SIZE (GET_MODE (x))
+ < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
+ || (0 == (mask
+ & GET_MODE_MASK (GET_MODE (x))
+ & ~ GET_MODE_MASK (GET_MODE (SUBREG_REG (x)))))))
+ return force_to_mode (SUBREG_REG (x), mode, mask, reg, next_select);
+ break;
+
+ case AND:
+ /* If this is an AND with a constant, convert it into an AND
+ whose constant is the AND of that constant with MASK. If it
+ remains an AND of MASK, delete it since it is redundant. */
+
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT
+ && GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT)
+ {
+ x = simplify_and_const_int (x, op_mode, XEXP (x, 0),
+ mask & INTVAL (XEXP (x, 1)));
+
+ /* If X is still an AND, see if it is an AND with a mask that
+ is just some low-order bits. If so, and it is MASK, we don't
+ need it. */
+
+ if (GET_CODE (x) == AND && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && INTVAL (XEXP (x, 1)) == mask)
+ x = XEXP (x, 0);
+
+ /* If it remains an AND, try making another AND with the bits
+ in the mode mask that aren't in MASK turned on. If the
+ constant in the AND is wide enough, this might make a
+ cheaper constant. */
+
+ if (GET_CODE (x) == AND && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && GET_MODE_MASK (GET_MODE (x)) != mask)
+ {
+ HOST_WIDE_INT cval = (INTVAL (XEXP (x, 1))
+ | (GET_MODE_MASK (GET_MODE (x)) & ~ mask));
+ int width = GET_MODE_BITSIZE (GET_MODE (x));
+ rtx y;
+
+ /* If MODE is narrower that HOST_WIDE_INT and CVAL is a negative
+ number, sign extend it. */
+ if (width > 0 && width < HOST_BITS_PER_WIDE_INT
+ && (cval & ((HOST_WIDE_INT) 1 << (width - 1))) != 0)
+ cval |= (HOST_WIDE_INT) -1 << width;
+
+ y = gen_binary (AND, GET_MODE (x), XEXP (x, 0), GEN_INT (cval));
+ if (rtx_cost (y, SET) < rtx_cost (x, SET))
+ x = y;
+ }
+
+ break;
+ }
+
+ goto binop;
+
+ case PLUS:
+ /* In (and (plus FOO C1) M), if M is a mask that just turns off
+ low-order bits (as in an alignment operation) and FOO is already
+ aligned to that boundary, mask C1 to that boundary as well.
+ This may eliminate that PLUS and, later, the AND. */
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT
+ && exact_log2 (- mask) >= 0
+ && (nonzero_bits (XEXP (x, 0), mode) & ~ mask) == 0
+ && (INTVAL (XEXP (x, 1)) & ~ mask) != 0)
+ return force_to_mode (plus_constant (XEXP (x, 0),
+ INTVAL (XEXP (x, 1)) & mask),
+ mode, mask, reg, next_select);
+
+ /* ... fall through ... */
+
+ case MINUS:
+ case MULT:
+ /* For PLUS, MINUS and MULT, we need any bits less significant than the
+ most significant bit in MASK since carries from those bits will
+ affect the bits we are interested in. */
+ mask = fuller_mask;
+ goto binop;
+
+ case IOR:
+ case XOR:
+ /* If X is (ior (lshiftrt FOO C1) C2), try to commute the IOR and
+ LSHIFTRT so we end up with an (and (lshiftrt (ior ...) ...) ...)
+ operation which may be a bitfield extraction. Ensure that the
+ constant we form is not wider than the mode of X. */
+
+ if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
+ && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
+ && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
+ && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT
+ && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && ((INTVAL (XEXP (XEXP (x, 0), 1))
+ + floor_log2 (INTVAL (XEXP (x, 1))))
+ < GET_MODE_BITSIZE (GET_MODE (x)))
+ && (INTVAL (XEXP (x, 1))
+ & ~ nonzero_bits (XEXP (x, 0), GET_MODE (x)) == 0))
+ {
+ temp = GEN_INT ((INTVAL (XEXP (x, 1)) & mask)
+ << INTVAL (XEXP (XEXP (x, 0), 1)));
+ temp = gen_binary (GET_CODE (x), GET_MODE (x),
+ XEXP (XEXP (x, 0), 0), temp);
+ x = gen_binary (LSHIFTRT, GET_MODE (x), temp, XEXP (x, 1));
+ return force_to_mode (x, mode, mask, reg, next_select);
+ }
+
+ binop:
+ /* For most binary operations, just propagate into the operation and
+ change the mode if we have an operation of that mode. */
+
+ op0 = gen_lowpart_for_combine (op_mode,
+ force_to_mode (XEXP (x, 0), mode, mask,
+ reg, next_select));
+ op1 = gen_lowpart_for_combine (op_mode,
+ force_to_mode (XEXP (x, 1), mode, mask,
+ reg, next_select));
+
+ /* If OP1 is a CONST_INT and X is an IOR or XOR, clear bits outside
+ MASK since OP1 might have been sign-extended but we never want
+ to turn on extra bits, since combine might have previously relied
+ on them being off. */
+ if (GET_CODE (op1) == CONST_INT && (code == IOR || code == XOR)
+ && (INTVAL (op1) & mask) != 0)
+ op1 = GEN_INT (INTVAL (op1) & mask);
+
+ if (op_mode != GET_MODE (x) || op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
+ x = gen_binary (code, op_mode, op0, op1);
+ break;
+
+ case ASHIFT:
+ /* For left shifts, do the same, but just for the first operand.
+ However, we cannot do anything with shifts where we cannot
+ guarantee that the counts are smaller than the size of the mode
+ because such a count will have a different meaning in a
+ wider mode. */
+
+ if (! (GET_CODE (XEXP (x, 1)) == CONST_INT
+ && INTVAL (XEXP (x, 1)) >= 0
+ && INTVAL (XEXP (x, 1)) < GET_MODE_BITSIZE (mode))
+ && ! (GET_MODE (XEXP (x, 1)) != VOIDmode
+ && (nonzero_bits (XEXP (x, 1), GET_MODE (XEXP (x, 1)))
+ < (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (mode))))
+ break;
+
+ /* If the shift count is a constant and we can do arithmetic in
+ the mode of the shift, refine which bits we need. Otherwise, use the
+ conservative form of the mask. */
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT
+ && INTVAL (XEXP (x, 1)) >= 0
+ && INTVAL (XEXP (x, 1)) < GET_MODE_BITSIZE (op_mode)
+ && GET_MODE_BITSIZE (op_mode) <= HOST_BITS_PER_WIDE_INT)
+ mask >>= INTVAL (XEXP (x, 1));
+ else
+ mask = fuller_mask;
+
+ op0 = gen_lowpart_for_combine (op_mode,
+ force_to_mode (XEXP (x, 0), op_mode,
+ mask, reg, next_select));
+
+ if (op_mode != GET_MODE (x) || op0 != XEXP (x, 0))
+ x = gen_binary (code, op_mode, op0, XEXP (x, 1));
+ break;
+
+ case LSHIFTRT:
+ /* Here we can only do something if the shift count is a constant,
+ this shift constant is valid for the host, and we can do arithmetic
+ in OP_MODE. */
+
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT
+ && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
+ && GET_MODE_BITSIZE (op_mode) <= HOST_BITS_PER_WIDE_INT)
+ {
+ rtx inner = XEXP (x, 0);
+
+ /* Select the mask of the bits we need for the shift operand. */
+ mask <<= INTVAL (XEXP (x, 1));
+
+ /* We can only change the mode of the shift if we can do arithmetic
+ in the mode of the shift and MASK is no wider than the width of
+ OP_MODE. */
+ if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT
+ || (mask & ~ GET_MODE_MASK (op_mode)) != 0)
+ op_mode = GET_MODE (x);
+
+ inner = force_to_mode (inner, op_mode, mask, reg, next_select);
+
+ if (GET_MODE (x) != op_mode || inner != XEXP (x, 0))
+ x = gen_binary (LSHIFTRT, op_mode, inner, XEXP (x, 1));
+ }
+
+ /* If we have (and (lshiftrt FOO C1) C2) where the combination of the
+ shift and AND produces only copies of the sign bit (C2 is one less
+ than a power of two), we can do this with just a shift. */
+
+ if (GET_CODE (x) == LSHIFTRT
+ && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && ((INTVAL (XEXP (x, 1))
+ + num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0))))
+ >= GET_MODE_BITSIZE (GET_MODE (x)))
+ && exact_log2 (mask + 1) >= 0
+ && (num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0)))
+ >= exact_log2 (mask + 1)))
+ x = gen_binary (LSHIFTRT, GET_MODE (x), XEXP (x, 0),
+ GEN_INT (GET_MODE_BITSIZE (GET_MODE (x))
+ - exact_log2 (mask + 1)));
+ break;
+
+ case ASHIFTRT:
+ /* If we are just looking for the sign bit, we don't need this shift at
+ all, even if it has a variable count. */
+ if (mask == ((HOST_WIDE_INT) 1
+ << (GET_MODE_BITSIZE (GET_MODE (x)) - 1)))
+ return force_to_mode (XEXP (x, 0), mode, mask, reg, next_select);
+
+ /* If this is a shift by a constant, get a mask that contains those bits
+ that are not copies of the sign bit. We then have two cases: If
+ MASK only includes those bits, this can be a logical shift, which may
+ allow simplifications. If MASK is a single-bit field not within
+ those bits, we are requesting a copy of the sign bit and hence can
+ shift the sign bit to the appropriate location. */
+
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) >= 0
+ && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
+ {
+ int i = -1;
+
+ nonzero = GET_MODE_MASK (GET_MODE (x));
+ nonzero >>= INTVAL (XEXP (x, 1));
+
+ if ((mask & ~ nonzero) == 0
+ || (i = exact_log2 (mask)) >= 0)
+ {
+ x = simplify_shift_const
+ (x, LSHIFTRT, GET_MODE (x), XEXP (x, 0),
+ i < 0 ? INTVAL (XEXP (x, 1))
+ : GET_MODE_BITSIZE (GET_MODE (x)) - 1 - i);
+
+ if (GET_CODE (x) != ASHIFTRT)
+ return force_to_mode (x, mode, mask, reg, next_select);
+ }
+ }
+
+ /* If MASK is 1, convert this to a LSHIFTRT. This can be done
+ even if the shift count isn't a constant. */
+ if (mask == 1)
+ x = gen_binary (LSHIFTRT, GET_MODE (x), XEXP (x, 0), XEXP (x, 1));
+
+ /* If this is a sign-extension operation that just affects bits
+ we don't care about, remove it. Be sure the call above returned
+ something that is still a shift. */
+
+ if ((GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ASHIFTRT)
+ && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && INTVAL (XEXP (x, 1)) >= 0
+ && (INTVAL (XEXP (x, 1))
+ <= GET_MODE_BITSIZE (GET_MODE (x)) - (floor_log2 (mask) + 1))
+ && GET_CODE (XEXP (x, 0)) == ASHIFT
+ && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
+ && INTVAL (XEXP (XEXP (x, 0), 1)) == INTVAL (XEXP (x, 1)))
+ return force_to_mode (XEXP (XEXP (x, 0), 0), mode, mask,
+ reg, next_select);
+
+ break;
+
+ case ROTATE:
+ case ROTATERT:
+ /* If the shift count is constant and we can do computations
+ in the mode of X, compute where the bits we care about are.
+ Otherwise, we can't do anything. Don't change the mode of
+ the shift or propagate MODE into the shift, though. */
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT
+ && INTVAL (XEXP (x, 1)) >= 0)
+ {
+ temp = simplify_binary_operation (code == ROTATE ? ROTATERT : ROTATE,
+ GET_MODE (x), GEN_INT (mask),
+ XEXP (x, 1));
+ if (temp && GET_CODE(temp) == CONST_INT)
+ SUBST (XEXP (x, 0),
+ force_to_mode (XEXP (x, 0), GET_MODE (x),
+ INTVAL (temp), reg, next_select));
+ }
+ break;
+
+ case NEG:
+ /* If we just want the low-order bit, the NEG isn't needed since it
+ won't change the low-order bit. */
+ if (mask == 1)
+ return force_to_mode (XEXP (x, 0), mode, mask, reg, just_select);
+
+ /* We need any bits less significant than the most significant bit in
+ MASK since carries from those bits will affect the bits we are
+ interested in. */
+ mask = fuller_mask;
+ goto unop;
+
+ case NOT:
+ /* (not FOO) is (xor FOO CONST), so if FOO is an LSHIFTRT, we can do the
+ same as the XOR case above. Ensure that the constant we form is not
+ wider than the mode of X. */
+
+ if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
+ && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
+ && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
+ && (INTVAL (XEXP (XEXP (x, 0), 1)) + floor_log2 (mask)
+ < GET_MODE_BITSIZE (GET_MODE (x)))
+ && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT)
+ {
+ temp = GEN_INT (mask << INTVAL (XEXP (XEXP (x, 0), 1)));
+ temp = gen_binary (XOR, GET_MODE (x), XEXP (XEXP (x, 0), 0), temp);
+ x = gen_binary (LSHIFTRT, GET_MODE (x), temp, XEXP (XEXP (x, 0), 1));
+
+ return force_to_mode (x, mode, mask, reg, next_select);
+ }
+
+ unop:
+ op0 = gen_lowpart_for_combine (op_mode,
+ force_to_mode (XEXP (x, 0), mode, mask,
+ reg, next_select));
+ if (op_mode != GET_MODE (x) || op0 != XEXP (x, 0))
+ x = gen_unary (code, op_mode, op_mode, op0);
+ break;
+
+ case NE:
+ /* (and (ne FOO 0) CONST) can be (and FOO CONST) if CONST is included
+ in STORE_FLAG_VALUE and FOO has no bits that might be nonzero not
+ in CONST. */
+ if ((mask & ~ STORE_FLAG_VALUE) == 0 && XEXP (x, 0) == const0_rtx
+ && (nonzero_bits (XEXP (x, 0), mode) & ~ mask) == 0)
+ return force_to_mode (XEXP (x, 0), mode, mask, reg, next_select);
+
+ break;
+
+ case IF_THEN_ELSE:
+ /* We have no way of knowing if the IF_THEN_ELSE can itself be
+ written in a narrower mode. We play it safe and do not do so. */
+
+ SUBST (XEXP (x, 1),
+ gen_lowpart_for_combine (GET_MODE (x),
+ force_to_mode (XEXP (x, 1), mode,
+ mask, reg, next_select)));
+ SUBST (XEXP (x, 2),
+ gen_lowpart_for_combine (GET_MODE (x),
+ force_to_mode (XEXP (x, 2), mode,
+ mask, reg,next_select)));
+ break;
+ }
+
+ /* Ensure we return a value of the proper mode. */
+ return gen_lowpart_for_combine (mode, x);
+}
+
+/* Return nonzero if X is an expression that has one of two values depending on
+ whether some other value is zero or nonzero. In that case, we return the
+ value that is being tested, *PTRUE is set to the value if the rtx being
+ returned has a nonzero value, and *PFALSE is set to the other alternative.
+
+ If we return zero, we set *PTRUE and *PFALSE to X. */
+
+static rtx
+if_then_else_cond (x, ptrue, pfalse)
+ rtx x;
+ rtx *ptrue, *pfalse;
+{
+ enum machine_mode mode = GET_MODE (x);
+ enum rtx_code code = GET_CODE (x);
+ int size = GET_MODE_BITSIZE (mode);
+ rtx cond0, cond1, true0, true1, false0, false1;
+ unsigned HOST_WIDE_INT nz;
+
+ /* If this is a unary operation whose operand has one of two values, apply
+ our opcode to compute those values. */
+ if (GET_RTX_CLASS (code) == '1'
+ && (cond0 = if_then_else_cond (XEXP (x, 0), &true0, &false0)) != 0)
+ {
+ *ptrue = gen_unary (code, mode, GET_MODE (XEXP (x, 0)), true0);
+ *pfalse = gen_unary (code, mode, GET_MODE (XEXP (x, 0)), false0);
+ return cond0;
+ }
+
+ /* If this is a COMPARE, do nothing, since the IF_THEN_ELSE we would
+ make can't possibly match and would supress other optimizations. */
+ else if (code == COMPARE)
+ ;
+
+ /* If this is a binary operation, see if either side has only one of two
+ values. If either one does or if both do and they are conditional on
+ the same value, compute the new true and false values. */
+ else if (GET_RTX_CLASS (code) == 'c' || GET_RTX_CLASS (code) == '2'
+ || GET_RTX_CLASS (code) == '<')
+ {
+ cond0 = if_then_else_cond (XEXP (x, 0), &true0, &false0);
+ cond1 = if_then_else_cond (XEXP (x, 1), &true1, &false1);
+
+ if ((cond0 != 0 || cond1 != 0)
+ && ! (cond0 != 0 && cond1 != 0 && ! rtx_equal_p (cond0, cond1)))
+ {
+ *ptrue = gen_binary (code, mode, true0, true1);
+ *pfalse = gen_binary (code, mode, false0, false1);
+ return cond0 ? cond0 : cond1;
+ }
+
+#if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
+
+ /* See if we have PLUS, IOR, XOR, MINUS or UMAX, where one of the
+ operands is zero when the other is non-zero, and vice-versa. */
+
+ if ((code == PLUS || code == IOR || code == XOR || code == MINUS
+ || code == UMAX)
+ && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT)
+ {
+ rtx op0 = XEXP (XEXP (x, 0), 1);
+ rtx op1 = XEXP (XEXP (x, 1), 1);
+
+ cond0 = XEXP (XEXP (x, 0), 0);
+ cond1 = XEXP (XEXP (x, 1), 0);
+
+ if (GET_RTX_CLASS (GET_CODE (cond0)) == '<'
+ && GET_RTX_CLASS (GET_CODE (cond1)) == '<'
+ && reversible_comparison_p (cond1)
+ && ((GET_CODE (cond0) == reverse_condition (GET_CODE (cond1))
+ && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
+ && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
+ || ((swap_condition (GET_CODE (cond0))
+ == reverse_condition (GET_CODE (cond1)))
+ && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1))
+ && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0))))
+ && ! side_effects_p (x))
+ {
+ *ptrue = gen_binary (MULT, mode, op0, const_true_rtx);
+ *pfalse = gen_binary (MULT, mode,
+ (code == MINUS
+ ? gen_unary (NEG, mode, mode, op1) : op1),
+ const_true_rtx);
+ return cond0;
+ }
+ }
+
+ /* Similarly for MULT, AND and UMIN, execpt that for these the result
+ is always zero. */
+ if ((code == MULT || code == AND || code == UMIN)
+ && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT)
+ {
+ cond0 = XEXP (XEXP (x, 0), 0);
+ cond1 = XEXP (XEXP (x, 1), 0);
+
+ if (GET_RTX_CLASS (GET_CODE (cond0)) == '<'
+ && GET_RTX_CLASS (GET_CODE (cond1)) == '<'
+ && reversible_comparison_p (cond1)
+ && ((GET_CODE (cond0) == reverse_condition (GET_CODE (cond1))
+ && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
+ && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
+ || ((swap_condition (GET_CODE (cond0))
+ == reverse_condition (GET_CODE (cond1)))
+ && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1))
+ && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0))))
+ && ! side_effects_p (x))
+ {
+ *ptrue = *pfalse = const0_rtx;
+ return cond0;
+ }
+ }
+#endif
+ }
+
+ else if (code == IF_THEN_ELSE)
+ {
+ /* If we have IF_THEN_ELSE already, extract the condition and
+ canonicalize it if it is NE or EQ. */
+ cond0 = XEXP (x, 0);
+ *ptrue = XEXP (x, 1), *pfalse = XEXP (x, 2);
+ if (GET_CODE (cond0) == NE && XEXP (cond0, 1) == const0_rtx)
+ return XEXP (cond0, 0);
+ else if (GET_CODE (cond0) == EQ && XEXP (cond0, 1) == const0_rtx)
+ {
+ *ptrue = XEXP (x, 2), *pfalse = XEXP (x, 1);
+ return XEXP (cond0, 0);
+ }
+ else
+ return cond0;
+ }
+
+ /* If X is a normal SUBREG with both inner and outer modes integral,
+ we can narrow both the true and false values of the inner expression,
+ if there is a condition. */
+ else if (code == SUBREG && GET_MODE_CLASS (mode) == MODE_INT
+ && GET_MODE_CLASS (GET_MODE (SUBREG_REG (x))) == MODE_INT
+ && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))
+ && 0 != (cond0 = if_then_else_cond (SUBREG_REG (x),
+ &true0, &false0)))
+ {
+ *ptrue = force_to_mode (true0, mode, GET_MODE_MASK (mode), NULL_RTX, 0);
+ *pfalse
+ = force_to_mode (false0, mode, GET_MODE_MASK (mode), NULL_RTX, 0);
+
+ return cond0;
+ }
+
+ /* If X is a constant, this isn't special and will cause confusions
+ if we treat it as such. Likewise if it is equivalent to a constant. */
+ else if (CONSTANT_P (x)
+ || ((cond0 = get_last_value (x)) != 0 && CONSTANT_P (cond0)))
+ ;
+
+ /* If X is known to be either 0 or -1, those are the true and
+ false values when testing X. */
+ else if (num_sign_bit_copies (x, mode) == size)
+ {
+ *ptrue = constm1_rtx, *pfalse = const0_rtx;
+ return x;
+ }
+
+ /* Likewise for 0 or a single bit. */
+ else if (exact_log2 (nz = nonzero_bits (x, mode)) >= 0)
+ {
+ *ptrue = GEN_INT (nz), *pfalse = const0_rtx;
+ return x;
+ }
+
+ /* Otherwise fail; show no condition with true and false values the same. */
+ *ptrue = *pfalse = x;
+ return 0;
+}
+
+/* Return the value of expression X given the fact that condition COND
+ is known to be true when applied to REG as its first operand and VAL
+ as its second. X is known to not be shared and so can be modified in
+ place.
+
+ We only handle the simplest cases, and specifically those cases that
+ arise with IF_THEN_ELSE expressions. */
+
+static rtx
+known_cond (x, cond, reg, val)
+ rtx x;
+ enum rtx_code cond;
+ rtx reg, val;
+{
+ enum rtx_code code = GET_CODE (x);
+ rtx temp;
+ char *fmt;
+ int i, j;
+
+ if (side_effects_p (x))
+ return x;
+
+ if (cond == EQ && rtx_equal_p (x, reg))
+ return val;
+
+ /* If X is (abs REG) and we know something about REG's relationship
+ with zero, we may be able to simplify this. */
+
+ if (code == ABS && rtx_equal_p (XEXP (x, 0), reg) && val == const0_rtx)
+ switch (cond)
+ {
+ case GE: case GT: case EQ:
+ return XEXP (x, 0);
+ case LT: case LE:
+ return gen_unary (NEG, GET_MODE (XEXP (x, 0)), GET_MODE (XEXP (x, 0)),
+ XEXP (x, 0));
+ }
+
+ /* The only other cases we handle are MIN, MAX, and comparisons if the
+ operands are the same as REG and VAL. */
+
+ else if (GET_RTX_CLASS (code) == '<' || GET_RTX_CLASS (code) == 'c')
+ {
+ if (rtx_equal_p (XEXP (x, 0), val))
+ cond = swap_condition (cond), temp = val, val = reg, reg = temp;
+
+ if (rtx_equal_p (XEXP (x, 0), reg) && rtx_equal_p (XEXP (x, 1), val))
+ {
+ if (GET_RTX_CLASS (code) == '<')
+ return (comparison_dominates_p (cond, code) ? const_true_rtx
+ : (comparison_dominates_p (cond,
+ reverse_condition (code))
+ ? const0_rtx : x));
+
+ else if (code == SMAX || code == SMIN
+ || code == UMIN || code == UMAX)
+ {
+ int unsignedp = (code == UMIN || code == UMAX);
+
+ if (code == SMAX || code == UMAX)
+ cond = reverse_condition (cond);
+
+ switch (cond)
+ {
+ case GE: case GT:
+ return unsignedp ? x : XEXP (x, 1);
+ case LE: case LT:
+ return unsignedp ? x : XEXP (x, 0);
+ case GEU: case GTU:
+ return unsignedp ? XEXP (x, 1) : x;
+ case LEU: case LTU:
+ return unsignedp ? XEXP (x, 0) : x;
+ }
+ }
+ }
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ SUBST (XEXP (x, i), known_cond (XEXP (x, i), cond, reg, val));
+ else if (fmt[i] == 'E')
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ SUBST (XVECEXP (x, i, j), known_cond (XVECEXP (x, i, j),
+ cond, reg, val));
+ }
+
+ return x;
+}
+
+/* See if X, a SET operation, can be rewritten as a bit-field assignment.
+ Return that assignment if so.
+
+ We only handle the most common cases. */
+
+static rtx
+make_field_assignment (x)
+ rtx x;
+{
+ rtx dest = SET_DEST (x);
+ rtx src = SET_SRC (x);
+ rtx assign;
+ HOST_WIDE_INT c1;
+ int pos, len;
+ rtx other;
+ enum machine_mode mode;
+
+ /* If SRC was (and (not (ashift (const_int 1) POS)) DEST), this is
+ a clear of a one-bit field. We will have changed it to
+ (and (rotate (const_int -2) POS) DEST), so check for that. Also check
+ for a SUBREG. */
+
+ if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == ROTATE
+ && GET_CODE (XEXP (XEXP (src, 0), 0)) == CONST_INT
+ && INTVAL (XEXP (XEXP (src, 0), 0)) == -2
+ && (rtx_equal_p (dest, XEXP (src, 1))
+ || rtx_equal_p (dest, get_last_value (XEXP (src, 1)))
+ || rtx_equal_p (get_last_value (dest), XEXP (src, 1))))
+ {
+ assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1),
+ 1, 1, 1, 0);
+ return gen_rtx (SET, VOIDmode, assign, const0_rtx);
+ }
+
+ else if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == SUBREG
+ && subreg_lowpart_p (XEXP (src, 0))
+ && (GET_MODE_SIZE (GET_MODE (XEXP (src, 0)))
+ < GET_MODE_SIZE (GET_MODE (SUBREG_REG (XEXP (src, 0)))))
+ && GET_CODE (SUBREG_REG (XEXP (src, 0))) == ROTATE
+ && INTVAL (XEXP (SUBREG_REG (XEXP (src, 0)), 0)) == -2
+ && (rtx_equal_p (dest, XEXP (src, 1))
+ || rtx_equal_p (dest, get_last_value (XEXP (src, 1)))
+ || rtx_equal_p (get_last_value (dest), XEXP (src, 1))))
+ {
+ assign = make_extraction (VOIDmode, dest, 0,
+ XEXP (SUBREG_REG (XEXP (src, 0)), 1),
+ 1, 1, 1, 0);
+ return gen_rtx (SET, VOIDmode, assign, const0_rtx);
+ }
+
+ /* If SRC is (ior (ashift (const_int 1) POS DEST)), this is a set of a
+ one-bit field. */
+ else if (GET_CODE (src) == IOR && GET_CODE (XEXP (src, 0)) == ASHIFT
+ && XEXP (XEXP (src, 0), 0) == const1_rtx
+ && (rtx_equal_p (dest, XEXP (src, 1))
+ || rtx_equal_p (dest, get_last_value (XEXP (src, 1)))
+ || rtx_equal_p (get_last_value (dest), XEXP (src, 1))))
+ {
+ assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1),
+ 1, 1, 1, 0);
+ return gen_rtx (SET, VOIDmode, assign, const1_rtx);
+ }
+
+ /* The other case we handle is assignments into a constant-position
+ field. They look like (ior (and DEST C1) OTHER). If C1 represents
+ a mask that has all one bits except for a group of zero bits and
+ OTHER is known to have zeros where C1 has ones, this is such an
+ assignment. Compute the position and length from C1. Shift OTHER
+ to the appropriate position, force it to the required mode, and
+ make the extraction. Check for the AND in both operands. */
+
+ if (GET_CODE (src) == IOR && GET_CODE (XEXP (src, 0)) == AND
+ && GET_CODE (XEXP (XEXP (src, 0), 1)) == CONST_INT
+ && (rtx_equal_p (XEXP (XEXP (src, 0), 0), dest)
+ || rtx_equal_p (XEXP (XEXP (src, 0), 0), get_last_value (dest))
+ || rtx_equal_p (get_last_value (XEXP (XEXP (src, 0), 1)), dest)))
+ c1 = INTVAL (XEXP (XEXP (src, 0), 1)), other = XEXP (src, 1);
+ else if (GET_CODE (src) == IOR && GET_CODE (XEXP (src, 1)) == AND
+ && GET_CODE (XEXP (XEXP (src, 1), 1)) == CONST_INT
+ && (rtx_equal_p (XEXP (XEXP (src, 1), 0), dest)
+ || rtx_equal_p (XEXP (XEXP (src, 1), 0), get_last_value (dest))
+ || rtx_equal_p (get_last_value (XEXP (XEXP (src, 1), 0)),
+ dest)))
+ c1 = INTVAL (XEXP (XEXP (src, 1), 1)), other = XEXP (src, 0);
+ else
+ return x;
+
+ pos = get_pos_from_mask (c1 ^ GET_MODE_MASK (GET_MODE (dest)), &len);
+ if (pos < 0 || pos + len > GET_MODE_BITSIZE (GET_MODE (dest))
+ || (GET_MODE_BITSIZE (GET_MODE (other)) <= HOST_BITS_PER_WIDE_INT
+ && (c1 & nonzero_bits (other, GET_MODE (other))) != 0))
+ return x;
+
+ assign = make_extraction (VOIDmode, dest, pos, NULL_RTX, len, 1, 1, 0);
+
+ /* The mode to use for the source is the mode of the assignment, or of
+ what is inside a possible STRICT_LOW_PART. */
+ mode = (GET_CODE (assign) == STRICT_LOW_PART
+ ? GET_MODE (XEXP (assign, 0)) : GET_MODE (assign));
+
+ /* Shift OTHER right POS places and make it the source, restricting it
+ to the proper length and mode. */
+
+ src = force_to_mode (simplify_shift_const (NULL_RTX, LSHIFTRT,
+ GET_MODE (src), other, pos),
+ mode,
+ GET_MODE_BITSIZE (mode) >= HOST_BITS_PER_WIDE_INT
+ ? GET_MODE_MASK (mode)
+ : ((HOST_WIDE_INT) 1 << len) - 1,
+ dest, 0);
+
+ return gen_rtx_combine (SET, VOIDmode, assign, src);
+}
+
+/* See if X is of the form (+ (* a c) (* b c)) and convert to (* (+ a b) c)
+ if so. */
+
+static rtx
+apply_distributive_law (x)
+ rtx x;
+{
+ enum rtx_code code = GET_CODE (x);
+ rtx lhs, rhs, other;
+ rtx tem;
+ enum rtx_code inner_code;
+
+ /* Distributivity is not true for floating point.
+ It can change the value. So don't do it.
+ -- rms and moshier@world.std.com. */
+ if (FLOAT_MODE_P (GET_MODE (x)))
+ return x;
+
+ /* The outer operation can only be one of the following: */
+ if (code != IOR && code != AND && code != XOR
+ && code != PLUS && code != MINUS)
+ return x;
+
+ lhs = XEXP (x, 0), rhs = XEXP (x, 1);
+
+ /* If either operand is a primitive we can't do anything, so get out fast. */
+ if (GET_RTX_CLASS (GET_CODE (lhs)) == 'o'
+ || GET_RTX_CLASS (GET_CODE (rhs)) == 'o')
+ return x;
+
+ lhs = expand_compound_operation (lhs);
+ rhs = expand_compound_operation (rhs);
+ inner_code = GET_CODE (lhs);
+ if (inner_code != GET_CODE (rhs))
+ return x;
+
+ /* See if the inner and outer operations distribute. */
+ switch (inner_code)
+ {
+ case LSHIFTRT:
+ case ASHIFTRT:
+ case AND:
+ case IOR:
+ /* These all distribute except over PLUS. */
+ if (code == PLUS || code == MINUS)
+ return x;
+ break;
+
+ case MULT:
+ if (code != PLUS && code != MINUS)
+ return x;
+ break;
+
+ case ASHIFT:
+ /* This is also a multiply, so it distributes over everything. */
+ break;
+
+ case SUBREG:
+ /* Non-paradoxical SUBREGs distributes over all operations, provided
+ the inner modes and word numbers are the same, this is an extraction
+ of a low-order part, we don't convert an fp operation to int or
+ vice versa, and we would not be converting a single-word
+ operation into a multi-word operation. The latter test is not
+ required, but it prevents generating unneeded multi-word operations.
+ Some of the previous tests are redundant given the latter test, but
+ are retained because they are required for correctness.
+
+ We produce the result slightly differently in this case. */
+
+ if (GET_MODE (SUBREG_REG (lhs)) != GET_MODE (SUBREG_REG (rhs))
+ || SUBREG_WORD (lhs) != SUBREG_WORD (rhs)
+ || ! subreg_lowpart_p (lhs)
+ || (GET_MODE_CLASS (GET_MODE (lhs))
+ != GET_MODE_CLASS (GET_MODE (SUBREG_REG (lhs))))
+ || (GET_MODE_SIZE (GET_MODE (lhs))
+ < GET_MODE_SIZE (GET_MODE (SUBREG_REG (lhs))))
+ || GET_MODE_SIZE (GET_MODE (SUBREG_REG (lhs))) > UNITS_PER_WORD)
+ return x;
+
+ tem = gen_binary (code, GET_MODE (SUBREG_REG (lhs)),
+ SUBREG_REG (lhs), SUBREG_REG (rhs));
+ return gen_lowpart_for_combine (GET_MODE (x), tem);
+
+ default:
+ return x;
+ }
+
+ /* Set LHS and RHS to the inner operands (A and B in the example
+ above) and set OTHER to the common operand (C in the example).
+ These is only one way to do this unless the inner operation is
+ commutative. */
+ if (GET_RTX_CLASS (inner_code) == 'c'
+ && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 0)))
+ other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 1);
+ else if (GET_RTX_CLASS (inner_code) == 'c'
+ && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 1)))
+ other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 0);
+ else if (GET_RTX_CLASS (inner_code) == 'c'
+ && rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 0)))
+ other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 1);
+ else if (rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 1)))
+ other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 0);
+ else
+ return x;
+
+ /* Form the new inner operation, seeing if it simplifies first. */
+ tem = gen_binary (code, GET_MODE (x), lhs, rhs);
+
+ /* There is one exception to the general way of distributing:
+ (a ^ b) | (a ^ c) -> (~a) & (b ^ c) */
+ if (code == XOR && inner_code == IOR)
+ {
+ inner_code = AND;
+ other = gen_unary (NOT, GET_MODE (x), GET_MODE (x), other);
+ }
+
+ /* We may be able to continuing distributing the result, so call
+ ourselves recursively on the inner operation before forming the
+ outer operation, which we return. */
+ return gen_binary (inner_code, GET_MODE (x),
+ apply_distributive_law (tem), other);
+}
+
+/* We have X, a logical `and' of VAROP with the constant CONSTOP, to be done
+ in MODE.
+
+ Return an equivalent form, if different from X. Otherwise, return X. If
+ X is zero, we are to always construct the equivalent form. */
+
+static rtx
+simplify_and_const_int (x, mode, varop, constop)
+ rtx x;
+ enum machine_mode mode;
+ rtx varop;
+ unsigned HOST_WIDE_INT constop;
+{
+ unsigned HOST_WIDE_INT nonzero;
+ int i;
+
+ /* Simplify VAROP knowing that we will be only looking at some of the
+ bits in it. */
+ varop = force_to_mode (varop, mode, constop, NULL_RTX, 0);
+
+ /* If VAROP is a CLOBBER, we will fail so return it; if it is a
+ CONST_INT, we are done. */
+ if (GET_CODE (varop) == CLOBBER || GET_CODE (varop) == CONST_INT)
+ return varop;
+
+ /* See what bits may be nonzero in VAROP. Unlike the general case of
+ a call to nonzero_bits, here we don't care about bits outside
+ MODE. */
+
+ nonzero = nonzero_bits (varop, mode) & GET_MODE_MASK (mode);
+
+ /* Turn off all bits in the constant that are known to already be zero.
+ Thus, if the AND isn't needed at all, we will have CONSTOP == NONZERO_BITS
+ which is tested below. */
+
+ constop &= nonzero;
+
+ /* If we don't have any bits left, return zero. */
+ if (constop == 0)
+ return const0_rtx;
+
+ /* If VAROP is a NEG of something known to be zero or 1 and CONSTOP is
+ a power of two, we can replace this with a ASHIFT. */
+ if (GET_CODE (varop) == NEG && nonzero_bits (XEXP (varop, 0), mode) == 1
+ && (i = exact_log2 (constop)) >= 0)
+ return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (varop, 0), i);
+
+ /* If VAROP is an IOR or XOR, apply the AND to both branches of the IOR
+ or XOR, then try to apply the distributive law. This may eliminate
+ operations if either branch can be simplified because of the AND.
+ It may also make some cases more complex, but those cases probably
+ won't match a pattern either with or without this. */
+
+ if (GET_CODE (varop) == IOR || GET_CODE (varop) == XOR)
+ return
+ gen_lowpart_for_combine
+ (mode,
+ apply_distributive_law
+ (gen_binary (GET_CODE (varop), GET_MODE (varop),
+ simplify_and_const_int (NULL_RTX, GET_MODE (varop),
+ XEXP (varop, 0), constop),
+ simplify_and_const_int (NULL_RTX, GET_MODE (varop),
+ XEXP (varop, 1), constop))));
+
+ /* Get VAROP in MODE. Try to get a SUBREG if not. Don't make a new SUBREG
+ if we already had one (just check for the simplest cases). */
+ if (x && GET_CODE (XEXP (x, 0)) == SUBREG
+ && GET_MODE (XEXP (x, 0)) == mode
+ && SUBREG_REG (XEXP (x, 0)) == varop)
+ varop = XEXP (x, 0);
+ else
+ varop = gen_lowpart_for_combine (mode, varop);
+
+ /* If we can't make the SUBREG, try to return what we were given. */
+ if (GET_CODE (varop) == CLOBBER)
+ return x ? x : varop;
+
+ /* If we are only masking insignificant bits, return VAROP. */
+ if (constop == nonzero)
+ x = varop;
+
+ /* Otherwise, return an AND. See how much, if any, of X we can use. */
+ else if (x == 0 || GET_CODE (x) != AND || GET_MODE (x) != mode)
+ x = gen_binary (AND, mode, varop, GEN_INT (constop));
+
+ else
+ {
+ if (GET_CODE (XEXP (x, 1)) != CONST_INT
+ || INTVAL (XEXP (x, 1)) != constop)
+ SUBST (XEXP (x, 1), GEN_INT (constop));
+
+ SUBST (XEXP (x, 0), varop);
+ }
+
+ return x;
+}
+
+/* Given an expression, X, compute which bits in X can be non-zero.
+ We don't care about bits outside of those defined in MODE.
+
+ For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
+ a shift, AND, or zero_extract, we can do better. */
+
+static unsigned HOST_WIDE_INT
+nonzero_bits (x, mode)
+ rtx x;
+ enum machine_mode mode;
+{
+ unsigned HOST_WIDE_INT nonzero = GET_MODE_MASK (mode);
+ unsigned HOST_WIDE_INT inner_nz;
+ enum rtx_code code;
+ int mode_width = GET_MODE_BITSIZE (mode);
+ rtx tem;
+
+ /* For floating-point values, assume all bits are needed. */
+ if (FLOAT_MODE_P (GET_MODE (x)) || FLOAT_MODE_P (mode))
+ return nonzero;
+
+ /* If X is wider than MODE, use its mode instead. */
+ if (GET_MODE_BITSIZE (GET_MODE (x)) > mode_width)
+ {
+ mode = GET_MODE (x);
+ nonzero = GET_MODE_MASK (mode);
+ mode_width = GET_MODE_BITSIZE (mode);
+ }
+
+ if (mode_width > HOST_BITS_PER_WIDE_INT)
+ /* Our only callers in this case look for single bit values. So
+ just return the mode mask. Those tests will then be false. */
+ return nonzero;
+
+#ifndef WORD_REGISTER_OPERATIONS
+ /* If MODE is wider than X, but both are a single word for both the host
+ and target machines, we can compute this from which bits of the
+ object might be nonzero in its own mode, taking into account the fact
+ that on many CISC machines, accessing an object in a wider mode
+ causes the high-order bits to become undefined. So they are
+ not known to be zero. */
+
+ if (GET_MODE (x) != VOIDmode && GET_MODE (x) != mode
+ && GET_MODE_BITSIZE (GET_MODE (x)) <= BITS_PER_WORD
+ && GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT
+ && GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (GET_MODE (x)))
+ {
+ nonzero &= nonzero_bits (x, GET_MODE (x));
+ nonzero |= GET_MODE_MASK (mode) & ~ GET_MODE_MASK (GET_MODE (x));
+ return nonzero;
+ }
+#endif
+
+ code = GET_CODE (x);
+ switch (code)
+ {
+ case REG:
+#ifdef STACK_BOUNDARY
+ /* If this is the stack pointer, we may know something about its
+ alignment. If PUSH_ROUNDING is defined, it is possible for the
+ stack to be momentarily aligned only to that amount, so we pick
+ the least alignment. */
+
+ if (x == stack_pointer_rtx)
+ {
+ int sp_alignment = STACK_BOUNDARY / BITS_PER_UNIT;
+
+#ifdef PUSH_ROUNDING
+ sp_alignment = MIN (PUSH_ROUNDING (1), sp_alignment);
+#endif
+
+ return nonzero & ~ (sp_alignment - 1);
+ }
+#endif
+
+ /* If X is a register whose nonzero bits value is current, use it.
+ Otherwise, if X is a register whose value we can find, use that
+ value. Otherwise, use the previously-computed global nonzero bits
+ for this register. */
+
+ if (reg_last_set_value[REGNO (x)] != 0
+ && reg_last_set_mode[REGNO (x)] == mode
+ && (reg_n_sets[REGNO (x)] == 1
+ || reg_last_set_label[REGNO (x)] == label_tick)
+ && INSN_CUID (reg_last_set[REGNO (x)]) < subst_low_cuid)
+ return reg_last_set_nonzero_bits[REGNO (x)];
+
+ tem = get_last_value (x);
+
+ if (tem)
+ {
+#ifdef SHORT_IMMEDIATES_SIGN_EXTEND
+ /* If X is narrower than MODE and TEM is a non-negative
+ constant that would appear negative in the mode of X,
+ sign-extend it for use in reg_nonzero_bits because some
+ machines (maybe most) will actually do the sign-extension
+ and this is the conservative approach.
+
+ ??? For 2.5, try to tighten up the MD files in this regard
+ instead of this kludge. */
+
+ if (GET_MODE_BITSIZE (GET_MODE (x)) < mode_width
+ && GET_CODE (tem) == CONST_INT
+ && INTVAL (tem) > 0
+ && 0 != (INTVAL (tem)
+ & ((HOST_WIDE_INT) 1
+ << (GET_MODE_BITSIZE (GET_MODE (x)) - 1))))
+ tem = GEN_INT (INTVAL (tem)
+ | ((HOST_WIDE_INT) (-1)
+ << GET_MODE_BITSIZE (GET_MODE (x))));
+#endif
+ return nonzero_bits (tem, mode);
+ }
+ else if (nonzero_sign_valid && reg_nonzero_bits[REGNO (x)])
+ return reg_nonzero_bits[REGNO (x)] & nonzero;
+ else
+ return nonzero;
+
+ case CONST_INT:
+#ifdef SHORT_IMMEDIATES_SIGN_EXTEND
+ /* If X is negative in MODE, sign-extend the value. */
+ if (INTVAL (x) > 0 && mode_width < BITS_PER_WORD
+ && 0 != (INTVAL (x) & ((HOST_WIDE_INT) 1 << (mode_width - 1))))
+ return (INTVAL (x) | ((HOST_WIDE_INT) (-1) << mode_width));
+#endif
+
+ return INTVAL (x);
+
+ case MEM:
+#ifdef LOAD_EXTEND_OP
+ /* In many, if not most, RISC machines, reading a byte from memory
+ zeros the rest of the register. Noticing that fact saves a lot
+ of extra zero-extends. */
+ if (LOAD_EXTEND_OP (GET_MODE (x)) == ZERO_EXTEND)
+ nonzero &= GET_MODE_MASK (GET_MODE (x));
+#endif
+ break;
+
+ case EQ: case NE:
+ case GT: case GTU:
+ case LT: case LTU:
+ case GE: case GEU:
+ case LE: case LEU:
+
+ /* If this produces an integer result, we know which bits are set.
+ Code here used to clear bits outside the mode of X, but that is
+ now done above. */
+
+ if (GET_MODE_CLASS (mode) == MODE_INT
+ && mode_width <= HOST_BITS_PER_WIDE_INT)
+ nonzero = STORE_FLAG_VALUE;
+ break;
+
+ case NEG:
+ if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x))
+ == GET_MODE_BITSIZE (GET_MODE (x)))
+ nonzero = 1;
+
+ if (GET_MODE_SIZE (GET_MODE (x)) < mode_width)
+ nonzero |= (GET_MODE_MASK (mode) & ~ GET_MODE_MASK (GET_MODE (x)));
+ break;
+
+ case ABS:
+ if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x))
+ == GET_MODE_BITSIZE (GET_MODE (x)))
+ nonzero = 1;
+ break;
+
+ case TRUNCATE:
+ nonzero &= (nonzero_bits (XEXP (x, 0), mode) & GET_MODE_MASK (mode));
+ break;
+
+ case ZERO_EXTEND:
+ nonzero &= nonzero_bits (XEXP (x, 0), mode);
+ if (GET_MODE (XEXP (x, 0)) != VOIDmode)
+ nonzero &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
+ break;
+
+ case SIGN_EXTEND:
+ /* If the sign bit is known clear, this is the same as ZERO_EXTEND.
+ Otherwise, show all the bits in the outer mode but not the inner
+ may be non-zero. */
+ inner_nz = nonzero_bits (XEXP (x, 0), mode);
+ if (GET_MODE (XEXP (x, 0)) != VOIDmode)
+ {
+ inner_nz &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
+ if (inner_nz &
+ (((HOST_WIDE_INT) 1
+ << (GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0))) - 1))))
+ inner_nz |= (GET_MODE_MASK (mode)
+ & ~ GET_MODE_MASK (GET_MODE (XEXP (x, 0))));
+ }
+
+ nonzero &= inner_nz;
+ break;
+
+ case AND:
+ nonzero &= (nonzero_bits (XEXP (x, 0), mode)
+ & nonzero_bits (XEXP (x, 1), mode));
+ break;
+
+ case XOR: case IOR:
+ case UMIN: case UMAX: case SMIN: case SMAX:
+ nonzero &= (nonzero_bits (XEXP (x, 0), mode)
+ | nonzero_bits (XEXP (x, 1), mode));
+ break;
+
+ case PLUS: case MINUS:
+ case MULT:
+ case DIV: case UDIV:
+ case MOD: case UMOD:
+ /* We can apply the rules of arithmetic to compute the number of
+ high- and low-order zero bits of these operations. We start by
+ computing the width (position of the highest-order non-zero bit)
+ and the number of low-order zero bits for each value. */
+ {
+ unsigned HOST_WIDE_INT nz0 = nonzero_bits (XEXP (x, 0), mode);
+ unsigned HOST_WIDE_INT nz1 = nonzero_bits (XEXP (x, 1), mode);
+ int width0 = floor_log2 (nz0) + 1;
+ int width1 = floor_log2 (nz1) + 1;
+ int low0 = floor_log2 (nz0 & -nz0);
+ int low1 = floor_log2 (nz1 & -nz1);
+ int op0_maybe_minusp = (nz0 & ((HOST_WIDE_INT) 1 << (mode_width - 1)));
+ int op1_maybe_minusp = (nz1 & ((HOST_WIDE_INT) 1 << (mode_width - 1)));
+ int result_width = mode_width;
+ int result_low = 0;
+
+ switch (code)
+ {
+ case PLUS:
+ result_width = MAX (width0, width1) + 1;
+ result_low = MIN (low0, low1);
+ break;
+ case MINUS:
+ result_low = MIN (low0, low1);
+ break;
+ case MULT:
+ result_width = width0 + width1;
+ result_low = low0 + low1;
+ break;
+ case DIV:
+ if (! op0_maybe_minusp && ! op1_maybe_minusp)
+ result_width = width0;
+ break;
+ case UDIV:
+ result_width = width0;
+ break;
+ case MOD:
+ if (! op0_maybe_minusp && ! op1_maybe_minusp)
+ result_width = MIN (width0, width1);
+ result_low = MIN (low0, low1);
+ break;
+ case UMOD:
+ result_width = MIN (width0, width1);
+ result_low = MIN (low0, low1);
+ break;
+ }
+
+ if (result_width < mode_width)
+ nonzero &= ((HOST_WIDE_INT) 1 << result_width) - 1;
+
+ if (result_low > 0)
+ nonzero &= ~ (((HOST_WIDE_INT) 1 << result_low) - 1);
+ }
+ break;
+
+ case ZERO_EXTRACT:
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT
+ && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
+ nonzero &= ((HOST_WIDE_INT) 1 << INTVAL (XEXP (x, 1))) - 1;
+ break;
+
+ case SUBREG:
+ /* If this is a SUBREG formed for a promoted variable that has
+ been zero-extended, we know that at least the high-order bits
+ are zero, though others might be too. */
+
+ if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_UNSIGNED_P (x))
+ nonzero = (GET_MODE_MASK (GET_MODE (x))
+ & nonzero_bits (SUBREG_REG (x), GET_MODE (x)));
+
+ /* If the inner mode is a single word for both the host and target
+ machines, we can compute this from which bits of the inner
+ object might be nonzero. */
+ if (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))) <= BITS_PER_WORD
+ && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x)))
+ <= HOST_BITS_PER_WIDE_INT))
+ {
+ nonzero &= nonzero_bits (SUBREG_REG (x), mode);
+
+#ifndef WORD_REGISTER_OPERATIONS
+ /* On many CISC machines, accessing an object in a wider mode
+ causes the high-order bits to become undefined. So they are
+ not known to be zero. */
+ if (GET_MODE_SIZE (GET_MODE (x))
+ > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
+ nonzero |= (GET_MODE_MASK (GET_MODE (x))
+ & ~ GET_MODE_MASK (GET_MODE (SUBREG_REG (x))));
+#endif
+ }
+ break;
+
+ case ASHIFTRT:
+ case LSHIFTRT:
+ case ASHIFT:
+ case ROTATE:
+ /* The nonzero bits are in two classes: any bits within MODE
+ that aren't in GET_MODE (x) are always significant. The rest of the
+ nonzero bits are those that are significant in the operand of
+ the shift when shifted the appropriate number of bits. This
+ shows that high-order bits are cleared by the right shift and
+ low-order bits by left shifts. */
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT
+ && INTVAL (XEXP (x, 1)) >= 0
+ && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
+ {
+ enum machine_mode inner_mode = GET_MODE (x);
+ int width = GET_MODE_BITSIZE (inner_mode);
+ int count = INTVAL (XEXP (x, 1));
+ unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (inner_mode);
+ unsigned HOST_WIDE_INT op_nonzero = nonzero_bits (XEXP (x, 0), mode);
+ unsigned HOST_WIDE_INT inner = op_nonzero & mode_mask;
+ unsigned HOST_WIDE_INT outer = 0;
+
+ if (mode_width > width)
+ outer = (op_nonzero & nonzero & ~ mode_mask);
+
+ if (code == LSHIFTRT)
+ inner >>= count;
+ else if (code == ASHIFTRT)
+ {
+ inner >>= count;
+
+ /* If the sign bit may have been nonzero before the shift, we
+ need to mark all the places it could have been copied to
+ by the shift as possibly nonzero. */
+ if (inner & ((HOST_WIDE_INT) 1 << (width - 1 - count)))
+ inner |= (((HOST_WIDE_INT) 1 << count) - 1) << (width - count);
+ }
+ else if (code == ASHIFT)
+ inner <<= count;
+ else
+ inner = ((inner << (count % width)
+ | (inner >> (width - (count % width)))) & mode_mask);
+
+ nonzero &= (outer | inner);
+ }
+ break;
+
+ case FFS:
+ /* This is at most the number of bits in the mode. */
+ nonzero = ((HOST_WIDE_INT) 1 << (floor_log2 (mode_width) + 1)) - 1;
+ break;
+
+ case IF_THEN_ELSE:
+ nonzero &= (nonzero_bits (XEXP (x, 1), mode)
+ | nonzero_bits (XEXP (x, 2), mode));
+ break;
+ }
+
+ return nonzero;
+}
+
+/* Return the number of bits at the high-order end of X that are known to
+ be equal to the sign bit. X will be used in mode MODE; if MODE is
+ VOIDmode, X will be used in its own mode. The returned value will always
+ be between 1 and the number of bits in MODE. */
+
+static int
+num_sign_bit_copies (x, mode)
+ rtx x;
+ enum machine_mode mode;
+{
+ enum rtx_code code = GET_CODE (x);
+ int bitwidth;
+ int num0, num1, result;
+ unsigned HOST_WIDE_INT nonzero;
+ rtx tem;
+
+ /* If we weren't given a mode, use the mode of X. If the mode is still
+ VOIDmode, we don't know anything. Likewise if one of the modes is
+ floating-point. */
+
+ if (mode == VOIDmode)
+ mode = GET_MODE (x);
+
+ if (mode == VOIDmode || FLOAT_MODE_P (mode) || FLOAT_MODE_P (GET_MODE (x)))
+ return 1;
+
+ bitwidth = GET_MODE_BITSIZE (mode);
+
+ /* For a smaller object, just ignore the high bits. */
+ if (bitwidth < GET_MODE_BITSIZE (GET_MODE (x)))
+ return MAX (1, (num_sign_bit_copies (x, GET_MODE (x))
+ - (GET_MODE_BITSIZE (GET_MODE (x)) - bitwidth)));
+
+#ifndef WORD_REGISTER_OPERATIONS
+ /* If this machine does not do all register operations on the entire
+ register and MODE is wider than the mode of X, we can say nothing
+ at all about the high-order bits. */
+ if (GET_MODE (x) != VOIDmode && bitwidth > GET_MODE_BITSIZE (GET_MODE (x)))
+ return 1;
+#endif
+
+ switch (code)
+ {
+ case REG:
+
+ if (reg_last_set_value[REGNO (x)] != 0
+ && reg_last_set_mode[REGNO (x)] == mode
+ && (reg_n_sets[REGNO (x)] == 1
+ || reg_last_set_label[REGNO (x)] == label_tick)
+ && INSN_CUID (reg_last_set[REGNO (x)]) < subst_low_cuid)
+ return reg_last_set_sign_bit_copies[REGNO (x)];
+
+ tem = get_last_value (x);
+ if (tem != 0)
+ return num_sign_bit_copies (tem, mode);
+
+ if (nonzero_sign_valid && reg_sign_bit_copies[REGNO (x)] != 0)
+ return reg_sign_bit_copies[REGNO (x)];
+ break;
+
+ case MEM:
+#ifdef LOAD_EXTEND_OP
+ /* Some RISC machines sign-extend all loads of smaller than a word. */
+ if (LOAD_EXTEND_OP (GET_MODE (x)) == SIGN_EXTEND)
+ return MAX (1, bitwidth - GET_MODE_BITSIZE (GET_MODE (x)) + 1);
+#endif
+ break;
+
+ case CONST_INT:
+ /* If the constant is negative, take its 1's complement and remask.
+ Then see how many zero bits we have. */
+ nonzero = INTVAL (x) & GET_MODE_MASK (mode);
+ if (bitwidth <= HOST_BITS_PER_WIDE_INT
+ && (nonzero & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
+ nonzero = (~ nonzero) & GET_MODE_MASK (mode);
+
+ return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
+
+ case SUBREG:
+ /* If this is a SUBREG for a promoted object that is sign-extended
+ and we are looking at it in a wider mode, we know that at least the
+ high-order bits are known to be sign bit copies. */
+
+ if (SUBREG_PROMOTED_VAR_P (x) && ! SUBREG_PROMOTED_UNSIGNED_P (x))
+ return MAX (bitwidth - GET_MODE_BITSIZE (GET_MODE (x)) + 1,
+ num_sign_bit_copies (SUBREG_REG (x), mode));
+
+ /* For a smaller object, just ignore the high bits. */
+ if (bitwidth <= GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))))
+ {
+ num0 = num_sign_bit_copies (SUBREG_REG (x), VOIDmode);
+ return MAX (1, (num0
+ - (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x)))
+ - bitwidth)));
+ }
+
+#ifdef WORD_REGISTER_OPERATIONS
+ /* For paradoxical SUBREGs on machines where all register operations
+ affect the entire register, just look inside. Note that we are
+ passing MODE to the recursive call, so the number of sign bit copies
+ will remain relative to that mode, not the inner mode. */
+
+ if (GET_MODE_SIZE (GET_MODE (x))
+ > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
+ return num_sign_bit_copies (SUBREG_REG (x), mode);
+#endif
+ break;
+
+ case SIGN_EXTRACT:
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT)
+ return MAX (1, bitwidth - INTVAL (XEXP (x, 1)));
+ break;
+
+ case SIGN_EXTEND:
+ return (bitwidth - GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)))
+ + num_sign_bit_copies (XEXP (x, 0), VOIDmode));
+
+ case TRUNCATE:
+ /* For a smaller object, just ignore the high bits. */
+ num0 = num_sign_bit_copies (XEXP (x, 0), VOIDmode);
+ return MAX (1, (num0 - (GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)))
+ - bitwidth)));
+
+ case NOT:
+ return num_sign_bit_copies (XEXP (x, 0), mode);
+
+ case ROTATE: case ROTATERT:
+ /* If we are rotating left by a number of bits less than the number
+ of sign bit copies, we can just subtract that amount from the
+ number. */
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT
+ && INTVAL (XEXP (x, 1)) >= 0 && INTVAL (XEXP (x, 1)) < bitwidth)
+ {
+ num0 = num_sign_bit_copies (XEXP (x, 0), mode);
+ return MAX (1, num0 - (code == ROTATE ? INTVAL (XEXP (x, 1))
+ : bitwidth - INTVAL (XEXP (x, 1))));
+ }
+ break;
+
+ case NEG:
+ /* In general, this subtracts one sign bit copy. But if the value
+ is known to be positive, the number of sign bit copies is the
+ same as that of the input. Finally, if the input has just one bit
+ that might be nonzero, all the bits are copies of the sign bit. */
+ nonzero = nonzero_bits (XEXP (x, 0), mode);
+ if (nonzero == 1)
+ return bitwidth;
+
+ num0 = num_sign_bit_copies (XEXP (x, 0), mode);
+ if (num0 > 1
+ && bitwidth <= HOST_BITS_PER_WIDE_INT
+ && (((HOST_WIDE_INT) 1 << (bitwidth - 1)) & nonzero))
+ num0--;
+
+ return num0;
+
+ case IOR: case AND: case XOR:
+ case SMIN: case SMAX: case UMIN: case UMAX:
+ /* Logical operations will preserve the number of sign-bit copies.
+ MIN and MAX operations always return one of the operands. */
+ num0 = num_sign_bit_copies (XEXP (x, 0), mode);
+ num1 = num_sign_bit_copies (XEXP (x, 1), mode);
+ return MIN (num0, num1);
+
+ case PLUS: case MINUS:
+ /* For addition and subtraction, we can have a 1-bit carry. However,
+ if we are subtracting 1 from a positive number, there will not
+ be such a carry. Furthermore, if the positive number is known to
+ be 0 or 1, we know the result is either -1 or 0. */
+
+ if (code == PLUS && XEXP (x, 1) == constm1_rtx
+ && bitwidth <= HOST_BITS_PER_WIDE_INT)
+ {
+ nonzero = nonzero_bits (XEXP (x, 0), mode);
+ if ((((HOST_WIDE_INT) 1 << (bitwidth - 1)) & nonzero) == 0)
+ return (nonzero == 1 || nonzero == 0 ? bitwidth
+ : bitwidth - floor_log2 (nonzero) - 1);
+ }
+
+ num0 = num_sign_bit_copies (XEXP (x, 0), mode);
+ num1 = num_sign_bit_copies (XEXP (x, 1), mode);
+ return MAX (1, MIN (num0, num1) - 1);
+
+ case MULT:
+ /* The number of bits of the product is the sum of the number of
+ bits of both terms. However, unless one of the terms if known
+ to be positive, we must allow for an additional bit since negating
+ a negative number can remove one sign bit copy. */
+
+ num0 = num_sign_bit_copies (XEXP (x, 0), mode);
+ num1 = num_sign_bit_copies (XEXP (x, 1), mode);
+
+ result = bitwidth - (bitwidth - num0) - (bitwidth - num1);
+ if (result > 0
+ && bitwidth <= HOST_BITS_PER_WIDE_INT
+ && ((nonzero_bits (XEXP (x, 0), mode)
+ & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
+ && (nonzero_bits (XEXP (x, 1), mode)
+ & ((HOST_WIDE_INT) 1 << (bitwidth - 1)) != 0))
+ result--;
+
+ return MAX (1, result);
+
+ case UDIV:
+ /* The result must be <= the first operand. */
+ return num_sign_bit_copies (XEXP (x, 0), mode);
+
+ case UMOD:
+ /* The result must be <= the scond operand. */
+ return num_sign_bit_copies (XEXP (x, 1), mode);
+
+ case DIV:
+ /* Similar to unsigned division, except that we have to worry about
+ the case where the divisor is negative, in which case we have
+ to add 1. */
+ result = num_sign_bit_copies (XEXP (x, 0), mode);
+ if (result > 1
+ && bitwidth <= HOST_BITS_PER_WIDE_INT
+ && (nonzero_bits (XEXP (x, 1), mode)
+ & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
+ result --;
+
+ return result;
+
+ case MOD:
+ result = num_sign_bit_copies (XEXP (x, 1), mode);
+ if (result > 1
+ && bitwidth <= HOST_BITS_PER_WIDE_INT
+ && (nonzero_bits (XEXP (x, 1), mode)
+ & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
+ result --;
+
+ return result;
+
+ case ASHIFTRT:
+ /* Shifts by a constant add to the number of bits equal to the
+ sign bit. */
+ num0 = num_sign_bit_copies (XEXP (x, 0), mode);
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT
+ && INTVAL (XEXP (x, 1)) > 0)
+ num0 = MIN (bitwidth, num0 + INTVAL (XEXP (x, 1)));
+
+ return num0;
+
+ case ASHIFT:
+ /* Left shifts destroy copies. */
+ if (GET_CODE (XEXP (x, 1)) != CONST_INT
+ || INTVAL (XEXP (x, 1)) < 0
+ || INTVAL (XEXP (x, 1)) >= bitwidth)
+ return 1;
+
+ num0 = num_sign_bit_copies (XEXP (x, 0), mode);
+ return MAX (1, num0 - INTVAL (XEXP (x, 1)));
+
+ case IF_THEN_ELSE:
+ num0 = num_sign_bit_copies (XEXP (x, 1), mode);
+ num1 = num_sign_bit_copies (XEXP (x, 2), mode);
+ return MIN (num0, num1);
+
+#if STORE_FLAG_VALUE == -1
+ case EQ: case NE: case GE: case GT: case LE: case LT:
+ case GEU: case GTU: case LEU: case LTU:
+ return bitwidth;
+#endif
+ }
+
+ /* If we haven't been able to figure it out by one of the above rules,
+ see if some of the high-order bits are known to be zero. If so,
+ count those bits and return one less than that amount. If we can't
+ safely compute the mask for this mode, always return BITWIDTH. */
+
+ if (bitwidth > HOST_BITS_PER_WIDE_INT)
+ return 1;
+
+ nonzero = nonzero_bits (x, mode);
+ return (nonzero & ((HOST_WIDE_INT) 1 << (bitwidth - 1))
+ ? 1 : bitwidth - floor_log2 (nonzero) - 1);
+}
+
+/* Return the number of "extended" bits there are in X, when interpreted
+ as a quantity in MODE whose signedness is indicated by UNSIGNEDP. For
+ unsigned quantities, this is the number of high-order zero bits.
+ For signed quantities, this is the number of copies of the sign bit
+ minus 1. In both case, this function returns the number of "spare"
+ bits. For example, if two quantities for which this function returns
+ at least 1 are added, the addition is known not to overflow.
+
+ This function will always return 0 unless called during combine, which
+ implies that it must be called from a define_split. */
+
+int
+extended_count (x, mode, unsignedp)
+ rtx x;
+ enum machine_mode mode;
+ int unsignedp;
+{
+ if (nonzero_sign_valid == 0)
+ return 0;
+
+ return (unsignedp
+ ? (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ && (GET_MODE_BITSIZE (mode) - 1
+ - floor_log2 (nonzero_bits (x, mode))))
+ : num_sign_bit_copies (x, mode) - 1);
+}
+
+/* This function is called from `simplify_shift_const' to merge two
+ outer operations. Specifically, we have already found that we need
+ to perform operation *POP0 with constant *PCONST0 at the outermost
+ position. We would now like to also perform OP1 with constant CONST1
+ (with *POP0 being done last).
+
+ Return 1 if we can do the operation and update *POP0 and *PCONST0 with
+ the resulting operation. *PCOMP_P is set to 1 if we would need to
+ complement the innermost operand, otherwise it is unchanged.
+
+ MODE is the mode in which the operation will be done. No bits outside
+ the width of this mode matter. It is assumed that the width of this mode
+ is smaller than or equal to HOST_BITS_PER_WIDE_INT.
+
+ If *POP0 or OP1 are NIL, it means no operation is required. Only NEG, PLUS,
+ IOR, XOR, and AND are supported. We may set *POP0 to SET if the proper
+ result is simply *PCONST0.
+
+ If the resulting operation cannot be expressed as one operation, we
+ return 0 and do not change *POP0, *PCONST0, and *PCOMP_P. */
+
+static int
+merge_outer_ops (pop0, pconst0, op1, const1, mode, pcomp_p)
+ enum rtx_code *pop0;
+ HOST_WIDE_INT *pconst0;
+ enum rtx_code op1;
+ HOST_WIDE_INT const1;
+ enum machine_mode mode;
+ int *pcomp_p;
+{
+ enum rtx_code op0 = *pop0;
+ HOST_WIDE_INT const0 = *pconst0;
+
+ const0 &= GET_MODE_MASK (mode);
+ const1 &= GET_MODE_MASK (mode);
+
+ /* If OP0 is an AND, clear unimportant bits in CONST1. */
+ if (op0 == AND)
+ const1 &= const0;
+
+ /* If OP0 or OP1 is NIL, this is easy. Similarly if they are the same or
+ if OP0 is SET. */
+
+ if (op1 == NIL || op0 == SET)
+ return 1;
+
+ else if (op0 == NIL)
+ op0 = op1, const0 = const1;
+
+ else if (op0 == op1)
+ {
+ switch (op0)
+ {
+ case AND:
+ const0 &= const1;
+ break;
+ case IOR:
+ const0 |= const1;
+ break;
+ case XOR:
+ const0 ^= const1;
+ break;
+ case PLUS:
+ const0 += const1;
+ break;
+ case NEG:
+ op0 = NIL;
+ break;
+ }
+ }
+
+ /* Otherwise, if either is a PLUS or NEG, we can't do anything. */
+ else if (op0 == PLUS || op1 == PLUS || op0 == NEG || op1 == NEG)
+ return 0;
+
+ /* If the two constants aren't the same, we can't do anything. The
+ remaining six cases can all be done. */
+ else if (const0 != const1)
+ return 0;
+
+ else
+ switch (op0)
+ {
+ case IOR:
+ if (op1 == AND)
+ /* (a & b) | b == b */
+ op0 = SET;
+ else /* op1 == XOR */
+ /* (a ^ b) | b == a | b */
+ ;
+ break;
+
+ case XOR:
+ if (op1 == AND)
+ /* (a & b) ^ b == (~a) & b */
+ op0 = AND, *pcomp_p = 1;
+ else /* op1 == IOR */
+ /* (a | b) ^ b == a & ~b */
+ op0 = AND, *pconst0 = ~ const0;
+ break;
+
+ case AND:
+ if (op1 == IOR)
+ /* (a | b) & b == b */
+ op0 = SET;
+ else /* op1 == XOR */
+ /* (a ^ b) & b) == (~a) & b */
+ *pcomp_p = 1;
+ break;
+ }
+
+ /* Check for NO-OP cases. */
+ const0 &= GET_MODE_MASK (mode);
+ if (const0 == 0
+ && (op0 == IOR || op0 == XOR || op0 == PLUS))
+ op0 = NIL;
+ else if (const0 == 0 && op0 == AND)
+ op0 = SET;
+ else if (const0 == GET_MODE_MASK (mode) && op0 == AND)
+ op0 = NIL;
+
+ *pop0 = op0;
+ *pconst0 = const0;
+
+ return 1;
+}
+
+/* Simplify a shift of VAROP by COUNT bits. CODE says what kind of shift.
+ The result of the shift is RESULT_MODE. X, if non-zero, is an expression
+ that we started with.
+
+ The shift is normally computed in the widest mode we find in VAROP, as
+ long as it isn't a different number of words than RESULT_MODE. Exceptions
+ are ASHIFTRT and ROTATE, which are always done in their original mode, */
+
+static rtx
+simplify_shift_const (x, code, result_mode, varop, count)
+ rtx x;
+ enum rtx_code code;
+ enum machine_mode result_mode;
+ rtx varop;
+ int count;
+{
+ enum rtx_code orig_code = code;
+ int orig_count = count;
+ enum machine_mode mode = result_mode;
+ enum machine_mode shift_mode, tmode;
+ int mode_words
+ = (GET_MODE_SIZE (mode) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD;
+ /* We form (outer_op (code varop count) (outer_const)). */
+ enum rtx_code outer_op = NIL;
+ HOST_WIDE_INT outer_const = 0;
+ rtx const_rtx;
+ int complement_p = 0;
+ rtx new;
+
+ /* If we were given an invalid count, don't do anything except exactly
+ what was requested. */
+
+ if (count < 0 || count > GET_MODE_BITSIZE (mode))
+ {
+ if (x)
+ return x;
+
+ return gen_rtx (code, mode, varop, GEN_INT (count));
+ }
+
+ /* Unless one of the branches of the `if' in this loop does a `continue',
+ we will `break' the loop after the `if'. */
+
+ while (count != 0)
+ {
+ /* If we have an operand of (clobber (const_int 0)), just return that
+ value. */
+ if (GET_CODE (varop) == CLOBBER)
+ return varop;
+
+ /* If we discovered we had to complement VAROP, leave. Making a NOT
+ here would cause an infinite loop. */
+ if (complement_p)
+ break;
+
+ /* Convert ROTATETRT to ROTATE. */
+ if (code == ROTATERT)
+ code = ROTATE, count = GET_MODE_BITSIZE (result_mode) - count;
+
+ /* We need to determine what mode we will do the shift in. If the
+ shift is a ASHIFTRT or ROTATE, we must always do it in the mode it
+ was originally done in. Otherwise, we can do it in MODE, the widest
+ mode encountered. */
+ shift_mode = (code == ASHIFTRT || code == ROTATE ? result_mode : mode);
+
+ /* Handle cases where the count is greater than the size of the mode
+ minus 1. For ASHIFT, use the size minus one as the count (this can
+ occur when simplifying (lshiftrt (ashiftrt ..))). For rotates,
+ take the count modulo the size. For other shifts, the result is
+ zero.
+
+ Since these shifts are being produced by the compiler by combining
+ multiple operations, each of which are defined, we know what the
+ result is supposed to be. */
+
+ if (count > GET_MODE_BITSIZE (shift_mode) - 1)
+ {
+ if (code == ASHIFTRT)
+ count = GET_MODE_BITSIZE (shift_mode) - 1;
+ else if (code == ROTATE || code == ROTATERT)
+ count %= GET_MODE_BITSIZE (shift_mode);
+ else
+ {
+ /* We can't simply return zero because there may be an
+ outer op. */
+ varop = const0_rtx;
+ count = 0;
+ break;
+ }
+ }
+
+ /* Negative counts are invalid and should not have been made (a
+ programmer-specified negative count should have been handled
+ above). */
+ else if (count < 0)
+ abort ();
+
+ /* An arithmetic right shift of a quantity known to be -1 or 0
+ is a no-op. */
+ if (code == ASHIFTRT
+ && (num_sign_bit_copies (varop, shift_mode)
+ == GET_MODE_BITSIZE (shift_mode)))
+ {
+ count = 0;
+ break;
+ }
+
+ /* If we are doing an arithmetic right shift and discarding all but
+ the sign bit copies, this is equivalent to doing a shift by the
+ bitsize minus one. Convert it into that shift because it will often
+ allow other simplifications. */
+
+ if (code == ASHIFTRT
+ && (count + num_sign_bit_copies (varop, shift_mode)
+ >= GET_MODE_BITSIZE (shift_mode)))
+ count = GET_MODE_BITSIZE (shift_mode) - 1;
+
+ /* We simplify the tests below and elsewhere by converting
+ ASHIFTRT to LSHIFTRT if we know the sign bit is clear.
+ `make_compound_operation' will convert it to a ASHIFTRT for
+ those machines (such as Vax) that don't have a LSHIFTRT. */
+ if (GET_MODE_BITSIZE (shift_mode) <= HOST_BITS_PER_WIDE_INT
+ && code == ASHIFTRT
+ && ((nonzero_bits (varop, shift_mode)
+ & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (shift_mode) - 1)))
+ == 0))
+ code = LSHIFTRT;
+
+ switch (GET_CODE (varop))
+ {
+ case SIGN_EXTEND:
+ case ZERO_EXTEND:
+ case SIGN_EXTRACT:
+ case ZERO_EXTRACT:
+ new = expand_compound_operation (varop);
+ if (new != varop)
+ {
+ varop = new;
+ continue;
+ }
+ break;
+
+ case MEM:
+ /* If we have (xshiftrt (mem ...) C) and C is MODE_WIDTH
+ minus the width of a smaller mode, we can do this with a
+ SIGN_EXTEND or ZERO_EXTEND from the narrower memory location. */
+ if ((code == ASHIFTRT || code == LSHIFTRT)
+ && ! mode_dependent_address_p (XEXP (varop, 0))
+ && ! MEM_VOLATILE_P (varop)
+ && (tmode = mode_for_size (GET_MODE_BITSIZE (mode) - count,
+ MODE_INT, 1)) != BLKmode)
+ {
+#if BYTES_BIG_ENDIAN
+ new = gen_rtx (MEM, tmode, XEXP (varop, 0));
+#else
+ new = gen_rtx (MEM, tmode,
+ plus_constant (XEXP (varop, 0),
+ count / BITS_PER_UNIT));
+ RTX_UNCHANGING_P (new) = RTX_UNCHANGING_P (varop);
+ MEM_VOLATILE_P (new) = MEM_VOLATILE_P (varop);
+ MEM_IN_STRUCT_P (new) = MEM_IN_STRUCT_P (varop);
+#endif
+ varop = gen_rtx_combine (code == ASHIFTRT ? SIGN_EXTEND
+ : ZERO_EXTEND, mode, new);
+ count = 0;
+ continue;
+ }
+ break;
+
+ case USE:
+ /* Similar to the case above, except that we can only do this if
+ the resulting mode is the same as that of the underlying
+ MEM and adjust the address depending on the *bits* endianness
+ because of the way that bit-field extract insns are defined. */
+ if ((code == ASHIFTRT || code == LSHIFTRT)
+ && (tmode = mode_for_size (GET_MODE_BITSIZE (mode) - count,
+ MODE_INT, 1)) != BLKmode
+ && tmode == GET_MODE (XEXP (varop, 0)))
+ {
+#if BITS_BIG_ENDIAN
+ new = XEXP (varop, 0);
+#else
+ new = copy_rtx (XEXP (varop, 0));
+ SUBST (XEXP (new, 0),
+ plus_constant (XEXP (new, 0),
+ count / BITS_PER_UNIT));
+#endif
+
+ varop = gen_rtx_combine (code == ASHIFTRT ? SIGN_EXTEND
+ : ZERO_EXTEND, mode, new);
+ count = 0;
+ continue;
+ }
+ break;
+
+ case SUBREG:
+ /* If VAROP is a SUBREG, strip it as long as the inner operand has
+ the same number of words as what we've seen so far. Then store
+ the widest mode in MODE. */
+ if (subreg_lowpart_p (varop)
+ && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (varop)))
+ > GET_MODE_SIZE (GET_MODE (varop)))
+ && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (varop)))
+ + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
+ == mode_words))
+ {
+ varop = SUBREG_REG (varop);
+ if (GET_MODE_SIZE (GET_MODE (varop)) > GET_MODE_SIZE (mode))
+ mode = GET_MODE (varop);
+ continue;
+ }
+ break;
+
+ case MULT:
+ /* Some machines use MULT instead of ASHIFT because MULT
+ is cheaper. But it is still better on those machines to
+ merge two shifts into one. */
+ if (GET_CODE (XEXP (varop, 1)) == CONST_INT
+ && exact_log2 (INTVAL (XEXP (varop, 1))) >= 0)
+ {
+ varop = gen_binary (ASHIFT, GET_MODE (varop), XEXP (varop, 0),
+ GEN_INT (exact_log2 (INTVAL (XEXP (varop, 1)))));;
+ continue;
+ }
+ break;
+
+ case UDIV:
+ /* Similar, for when divides are cheaper. */
+ if (GET_CODE (XEXP (varop, 1)) == CONST_INT
+ && exact_log2 (INTVAL (XEXP (varop, 1))) >= 0)
+ {
+ varop = gen_binary (LSHIFTRT, GET_MODE (varop), XEXP (varop, 0),
+ GEN_INT (exact_log2 (INTVAL (XEXP (varop, 1)))));
+ continue;
+ }
+ break;
+
+ case ASHIFTRT:
+ /* If we are extracting just the sign bit of an arithmetic right
+ shift, that shift is not needed. */
+ if (code == LSHIFTRT && count == GET_MODE_BITSIZE (result_mode) - 1)
+ {
+ varop = XEXP (varop, 0);
+ continue;
+ }
+
+ /* ... fall through ... */
+
+ case LSHIFTRT:
+ case ASHIFT:
+ case ROTATE:
+ /* Here we have two nested shifts. The result is usually the
+ AND of a new shift with a mask. We compute the result below. */
+ if (GET_CODE (XEXP (varop, 1)) == CONST_INT
+ && INTVAL (XEXP (varop, 1)) >= 0
+ && INTVAL (XEXP (varop, 1)) < GET_MODE_BITSIZE (GET_MODE (varop))
+ && GET_MODE_BITSIZE (result_mode) <= HOST_BITS_PER_WIDE_INT
+ && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
+ {
+ enum rtx_code first_code = GET_CODE (varop);
+ int first_count = INTVAL (XEXP (varop, 1));
+ unsigned HOST_WIDE_INT mask;
+ rtx mask_rtx;
+
+ /* We have one common special case. We can't do any merging if
+ the inner code is an ASHIFTRT of a smaller mode. However, if
+ we have (ashift:M1 (subreg:M1 (ashiftrt:M2 FOO C1) 0) C2)
+ with C2 == GET_MODE_BITSIZE (M1) - GET_MODE_BITSIZE (M2),
+ we can convert it to
+ (ashiftrt:M1 (ashift:M1 (and:M1 (subreg:M1 FOO 0 C2) C3) C1).
+ This simplifies certain SIGN_EXTEND operations. */
+ if (code == ASHIFT && first_code == ASHIFTRT
+ && (GET_MODE_BITSIZE (result_mode)
+ - GET_MODE_BITSIZE (GET_MODE (varop))) == count)
+ {
+ /* C3 has the low-order C1 bits zero. */
+
+ mask = (GET_MODE_MASK (mode)
+ & ~ (((HOST_WIDE_INT) 1 << first_count) - 1));
+
+ varop = simplify_and_const_int (NULL_RTX, result_mode,
+ XEXP (varop, 0), mask);
+ varop = simplify_shift_const (NULL_RTX, ASHIFT, result_mode,
+ varop, count);
+ count = first_count;
+ code = ASHIFTRT;
+ continue;
+ }
+
+ /* If this was (ashiftrt (ashift foo C1) C2) and FOO has more
+ than C1 high-order bits equal to the sign bit, we can convert
+ this to either an ASHIFT or a ASHIFTRT depending on the
+ two counts.
+
+ We cannot do this if VAROP's mode is not SHIFT_MODE. */
+
+ if (code == ASHIFTRT && first_code == ASHIFT
+ && GET_MODE (varop) == shift_mode
+ && (num_sign_bit_copies (XEXP (varop, 0), shift_mode)
+ > first_count))
+ {
+ count -= first_count;
+ if (count < 0)
+ count = - count, code = ASHIFT;
+ varop = XEXP (varop, 0);
+ continue;
+ }
+
+ /* There are some cases we can't do. If CODE is ASHIFTRT,
+ we can only do this if FIRST_CODE is also ASHIFTRT.
+
+ We can't do the case when CODE is ROTATE and FIRST_CODE is
+ ASHIFTRT.
+
+ If the mode of this shift is not the mode of the outer shift,
+ we can't do this if either shift is ASHIFTRT or ROTATE.
+
+ Finally, we can't do any of these if the mode is too wide
+ unless the codes are the same.
+
+ Handle the case where the shift codes are the same
+ first. */
+
+ if (code == first_code)
+ {
+ if (GET_MODE (varop) != result_mode
+ && (code == ASHIFTRT || code == ROTATE))
+ break;
+
+ count += first_count;
+ varop = XEXP (varop, 0);
+ continue;
+ }
+
+ if (code == ASHIFTRT
+ || (code == ROTATE && first_code == ASHIFTRT)
+ || GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT
+ || (GET_MODE (varop) != result_mode
+ && (first_code == ASHIFTRT || first_code == ROTATE
+ || code == ROTATE)))
+ break;
+
+ /* To compute the mask to apply after the shift, shift the
+ nonzero bits of the inner shift the same way the
+ outer shift will. */
+
+ mask_rtx = GEN_INT (nonzero_bits (varop, GET_MODE (varop)));
+
+ mask_rtx
+ = simplify_binary_operation (code, result_mode, mask_rtx,
+ GEN_INT (count));
+
+ /* Give up if we can't compute an outer operation to use. */
+ if (mask_rtx == 0
+ || GET_CODE (mask_rtx) != CONST_INT
+ || ! merge_outer_ops (&outer_op, &outer_const, AND,
+ INTVAL (mask_rtx),
+ result_mode, &complement_p))
+ break;
+
+ /* If the shifts are in the same direction, we add the
+ counts. Otherwise, we subtract them. */
+ if ((code == ASHIFTRT || code == LSHIFTRT)
+ == (first_code == ASHIFTRT || first_code == LSHIFTRT))
+ count += first_count;
+ else
+ count -= first_count;
+
+ /* If COUNT is positive, the new shift is usually CODE,
+ except for the two exceptions below, in which case it is
+ FIRST_CODE. If the count is negative, FIRST_CODE should
+ always be used */
+ if (count > 0
+ && ((first_code == ROTATE && code == ASHIFT)
+ || (first_code == ASHIFTRT && code == LSHIFTRT)))
+ code = first_code;
+ else if (count < 0)
+ code = first_code, count = - count;
+
+ varop = XEXP (varop, 0);
+ continue;
+ }
+
+ /* If we have (A << B << C) for any shift, we can convert this to
+ (A << C << B). This wins if A is a constant. Only try this if
+ B is not a constant. */
+
+ else if (GET_CODE (varop) == code
+ && GET_CODE (XEXP (varop, 1)) != CONST_INT
+ && 0 != (new
+ = simplify_binary_operation (code, mode,
+ XEXP (varop, 0),
+ GEN_INT (count))))
+ {
+ varop = gen_rtx_combine (code, mode, new, XEXP (varop, 1));
+ count = 0;
+ continue;
+ }
+ break;
+
+ case NOT:
+ /* Make this fit the case below. */
+ varop = gen_rtx_combine (XOR, mode, XEXP (varop, 0),
+ GEN_INT (GET_MODE_MASK (mode)));
+ continue;
+
+ case IOR:
+ case AND:
+ case XOR:
+ /* If we have (xshiftrt (ior (plus X (const_int -1)) X) C)
+ with C the size of VAROP - 1 and the shift is logical if
+ STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
+ we have an (le X 0) operation. If we have an arithmetic shift
+ and STORE_FLAG_VALUE is 1 or we have a logical shift with
+ STORE_FLAG_VALUE of -1, we have a (neg (le X 0)) operation. */
+
+ if (GET_CODE (varop) == IOR && GET_CODE (XEXP (varop, 0)) == PLUS
+ && XEXP (XEXP (varop, 0), 1) == constm1_rtx
+ && (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
+ && (code == LSHIFTRT || code == ASHIFTRT)
+ && count == GET_MODE_BITSIZE (GET_MODE (varop)) - 1
+ && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
+ {
+ count = 0;
+ varop = gen_rtx_combine (LE, GET_MODE (varop), XEXP (varop, 1),
+ const0_rtx);
+
+ if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT)
+ varop = gen_rtx_combine (NEG, GET_MODE (varop), varop);
+
+ continue;
+ }
+
+ /* If we have (shift (logical)), move the logical to the outside
+ to allow it to possibly combine with another logical and the
+ shift to combine with another shift. This also canonicalizes to
+ what a ZERO_EXTRACT looks like. Also, some machines have
+ (and (shift)) insns. */
+
+ if (GET_CODE (XEXP (varop, 1)) == CONST_INT
+ && (new = simplify_binary_operation (code, result_mode,
+ XEXP (varop, 1),
+ GEN_INT (count))) != 0
+ && GET_CODE(new) == CONST_INT
+ && merge_outer_ops (&outer_op, &outer_const, GET_CODE (varop),
+ INTVAL (new), result_mode, &complement_p))
+ {
+ varop = XEXP (varop, 0);
+ continue;
+ }
+
+ /* If we can't do that, try to simplify the shift in each arm of the
+ logical expression, make a new logical expression, and apply
+ the inverse distributive law. */
+ {
+ rtx lhs = simplify_shift_const (NULL_RTX, code, shift_mode,
+ XEXP (varop, 0), count);
+ rtx rhs = simplify_shift_const (NULL_RTX, code, shift_mode,
+ XEXP (varop, 1), count);
+
+ varop = gen_binary (GET_CODE (varop), shift_mode, lhs, rhs);
+ varop = apply_distributive_law (varop);
+
+ count = 0;
+ }
+ break;
+
+ case EQ:
+ /* convert (lshiftrt (eq FOO 0) C) to (xor FOO 1) if STORE_FLAG_VALUE
+ says that the sign bit can be tested, FOO has mode MODE, C is
+ GET_MODE_BITSIZE (MODE) - 1, and FOO has only its low-order bit
+ that may be nonzero. */
+ if (code == LSHIFTRT
+ && XEXP (varop, 1) == const0_rtx
+ && GET_MODE (XEXP (varop, 0)) == result_mode
+ && count == GET_MODE_BITSIZE (result_mode) - 1
+ && GET_MODE_BITSIZE (result_mode) <= HOST_BITS_PER_WIDE_INT
+ && ((STORE_FLAG_VALUE
+ & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (result_mode) - 1))))
+ && nonzero_bits (XEXP (varop, 0), result_mode) == 1
+ && merge_outer_ops (&outer_op, &outer_const, XOR,
+ (HOST_WIDE_INT) 1, result_mode,
+ &complement_p))
+ {
+ varop = XEXP (varop, 0);
+ count = 0;
+ continue;
+ }
+ break;
+
+ case NEG:
+ /* (lshiftrt (neg A) C) where A is either 0 or 1 and C is one less
+ than the number of bits in the mode is equivalent to A. */
+ if (code == LSHIFTRT && count == GET_MODE_BITSIZE (result_mode) - 1
+ && nonzero_bits (XEXP (varop, 0), result_mode) == 1)
+ {
+ varop = XEXP (varop, 0);
+ count = 0;
+ continue;
+ }
+
+ /* NEG commutes with ASHIFT since it is multiplication. Move the
+ NEG outside to allow shifts to combine. */
+ if (code == ASHIFT
+ && merge_outer_ops (&outer_op, &outer_const, NEG,
+ (HOST_WIDE_INT) 0, result_mode,
+ &complement_p))
+ {
+ varop = XEXP (varop, 0);
+ continue;
+ }
+ break;
+
+ case PLUS:
+ /* (lshiftrt (plus A -1) C) where A is either 0 or 1 and C
+ is one less than the number of bits in the mode is
+ equivalent to (xor A 1). */
+ if (code == LSHIFTRT && count == GET_MODE_BITSIZE (result_mode) - 1
+ && XEXP (varop, 1) == constm1_rtx
+ && nonzero_bits (XEXP (varop, 0), result_mode) == 1
+ && merge_outer_ops (&outer_op, &outer_const, XOR,
+ (HOST_WIDE_INT) 1, result_mode,
+ &complement_p))
+ {
+ count = 0;
+ varop = XEXP (varop, 0);
+ continue;
+ }
+
+ /* If we have (xshiftrt (plus FOO BAR) C), and the only bits
+ that might be nonzero in BAR are those being shifted out and those
+ bits are known zero in FOO, we can replace the PLUS with FOO.
+ Similarly in the other operand order. This code occurs when
+ we are computing the size of a variable-size array. */
+
+ if ((code == ASHIFTRT || code == LSHIFTRT)
+ && count < HOST_BITS_PER_WIDE_INT
+ && nonzero_bits (XEXP (varop, 1), result_mode) >> count == 0
+ && (nonzero_bits (XEXP (varop, 1), result_mode)
+ & nonzero_bits (XEXP (varop, 0), result_mode)) == 0)
+ {
+ varop = XEXP (varop, 0);
+ continue;
+ }
+ else if ((code == ASHIFTRT || code == LSHIFTRT)
+ && count < HOST_BITS_PER_WIDE_INT
+ && GET_MODE_BITSIZE (result_mode) <= HOST_BITS_PER_WIDE_INT
+ && 0 == (nonzero_bits (XEXP (varop, 0), result_mode)
+ >> count)
+ && 0 == (nonzero_bits (XEXP (varop, 0), result_mode)
+ & nonzero_bits (XEXP (varop, 1),
+ result_mode)))
+ {
+ varop = XEXP (varop, 1);
+ continue;
+ }
+
+ /* (ashift (plus foo C) N) is (plus (ashift foo N) C'). */
+ if (code == ASHIFT
+ && GET_CODE (XEXP (varop, 1)) == CONST_INT
+ && (new = simplify_binary_operation (ASHIFT, result_mode,
+ XEXP (varop, 1),
+ GEN_INT (count))) != 0
+ && GET_CODE(new) == CONST_INT
+ && merge_outer_ops (&outer_op, &outer_const, PLUS,
+ INTVAL (new), result_mode, &complement_p))
+ {
+ varop = XEXP (varop, 0);
+ continue;
+ }
+ break;
+
+ case MINUS:
+ /* If we have (xshiftrt (minus (ashiftrt X C)) X) C)
+ with C the size of VAROP - 1 and the shift is logical if
+ STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
+ we have a (gt X 0) operation. If the shift is arithmetic with
+ STORE_FLAG_VALUE of 1 or logical with STORE_FLAG_VALUE == -1,
+ we have a (neg (gt X 0)) operation. */
+
+ if (GET_CODE (XEXP (varop, 0)) == ASHIFTRT
+ && count == GET_MODE_BITSIZE (GET_MODE (varop)) - 1
+ && (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
+ && (code == LSHIFTRT || code == ASHIFTRT)
+ && GET_CODE (XEXP (XEXP (varop, 0), 1)) == CONST_INT
+ && INTVAL (XEXP (XEXP (varop, 0), 1)) == count
+ && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
+ {
+ count = 0;
+ varop = gen_rtx_combine (GT, GET_MODE (varop), XEXP (varop, 1),
+ const0_rtx);
+
+ if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT)
+ varop = gen_rtx_combine (NEG, GET_MODE (varop), varop);
+
+ continue;
+ }
+ break;
+ }
+
+ break;
+ }
+
+ /* We need to determine what mode to do the shift in. If the shift is
+ a ASHIFTRT or ROTATE, we must always do it in the mode it was originally
+ done in. Otherwise, we can do it in MODE, the widest mode encountered.
+ The code we care about is that of the shift that will actually be done,
+ not the shift that was originally requested. */
+ shift_mode = (code == ASHIFTRT || code == ROTATE ? result_mode : mode);
+
+ /* We have now finished analyzing the shift. The result should be
+ a shift of type CODE with SHIFT_MODE shifting VAROP COUNT places. If
+ OUTER_OP is non-NIL, it is an operation that needs to be applied
+ to the result of the shift. OUTER_CONST is the relevant constant,
+ but we must turn off all bits turned off in the shift.
+
+ If we were passed a value for X, see if we can use any pieces of
+ it. If not, make new rtx. */
+
+ if (x && GET_RTX_CLASS (GET_CODE (x)) == '2'
+ && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && INTVAL (XEXP (x, 1)) == count)
+ const_rtx = XEXP (x, 1);
+ else
+ const_rtx = GEN_INT (count);
+
+ if (x && GET_CODE (XEXP (x, 0)) == SUBREG
+ && GET_MODE (XEXP (x, 0)) == shift_mode
+ && SUBREG_REG (XEXP (x, 0)) == varop)
+ varop = XEXP (x, 0);
+ else if (GET_MODE (varop) != shift_mode)
+ varop = gen_lowpart_for_combine (shift_mode, varop);
+
+ /* If we can't make the SUBREG, try to return what we were given. */
+ if (GET_CODE (varop) == CLOBBER)
+ return x ? x : varop;
+
+ new = simplify_binary_operation (code, shift_mode, varop, const_rtx);
+ if (new != 0)
+ x = new;
+ else
+ {
+ if (x == 0 || GET_CODE (x) != code || GET_MODE (x) != shift_mode)
+ x = gen_rtx_combine (code, shift_mode, varop, const_rtx);
+
+ SUBST (XEXP (x, 0), varop);
+ SUBST (XEXP (x, 1), const_rtx);
+ }
+
+ /* If we have an outer operation and we just made a shift, it is
+ possible that we could have simplified the shift were it not
+ for the outer operation. So try to do the simplification
+ recursively. */
+
+ if (outer_op != NIL && GET_CODE (x) == code
+ && GET_CODE (XEXP (x, 1)) == CONST_INT)
+ x = simplify_shift_const (x, code, shift_mode, XEXP (x, 0),
+ INTVAL (XEXP (x, 1)));
+
+ /* If we were doing a LSHIFTRT in a wider mode than it was originally,
+ turn off all the bits that the shift would have turned off. */
+ if (orig_code == LSHIFTRT && result_mode != shift_mode)
+ x = simplify_and_const_int (NULL_RTX, shift_mode, x,
+ GET_MODE_MASK (result_mode) >> orig_count);
+
+ /* Do the remainder of the processing in RESULT_MODE. */
+ x = gen_lowpart_for_combine (result_mode, x);
+
+ /* If COMPLEMENT_P is set, we have to complement X before doing the outer
+ operation. */
+ if (complement_p)
+ x = gen_unary (NOT, result_mode, result_mode, x);
+
+ if (outer_op != NIL)
+ {
+ if (GET_MODE_BITSIZE (result_mode) < HOST_BITS_PER_WIDE_INT)
+ outer_const &= GET_MODE_MASK (result_mode);
+
+ if (outer_op == AND)
+ x = simplify_and_const_int (NULL_RTX, result_mode, x, outer_const);
+ else if (outer_op == SET)
+ /* This means that we have determined that the result is
+ equivalent to a constant. This should be rare. */
+ x = GEN_INT (outer_const);
+ else if (GET_RTX_CLASS (outer_op) == '1')
+ x = gen_unary (outer_op, result_mode, result_mode, x);
+ else
+ x = gen_binary (outer_op, result_mode, x, GEN_INT (outer_const));
+ }
+
+ return x;
+}
+
+/* Like recog, but we receive the address of a pointer to a new pattern.
+ We try to match the rtx that the pointer points to.
+ If that fails, we may try to modify or replace the pattern,
+ storing the replacement into the same pointer object.
+
+ Modifications include deletion or addition of CLOBBERs.
+
+ PNOTES is a pointer to a location where any REG_UNUSED notes added for
+ the CLOBBERs are placed.
+
+ The value is the final insn code from the pattern ultimately matched,
+ or -1. */
+
+static int
+recog_for_combine (pnewpat, insn, pnotes)
+ rtx *pnewpat;
+ rtx insn;
+ rtx *pnotes;
+{
+ register rtx pat = *pnewpat;
+ int insn_code_number;
+ int num_clobbers_to_add = 0;
+ int i;
+ rtx notes = 0;
+
+ /* If PAT is a PARALLEL, check to see if it contains the CLOBBER
+ we use to indicate that something didn't match. If we find such a
+ thing, force rejection. */
+ if (GET_CODE (pat) == PARALLEL)
+ for (i = XVECLEN (pat, 0) - 1; i >= 0; i--)
+ if (GET_CODE (XVECEXP (pat, 0, i)) == CLOBBER
+ && XEXP (XVECEXP (pat, 0, i), 0) == const0_rtx)
+ return -1;
+
+ /* Is the result of combination a valid instruction? */
+ insn_code_number = recog (pat, insn, &num_clobbers_to_add);
+
+ /* If it isn't, there is the possibility that we previously had an insn
+ that clobbered some register as a side effect, but the combined
+ insn doesn't need to do that. So try once more without the clobbers
+ unless this represents an ASM insn. */
+
+ if (insn_code_number < 0 && ! check_asm_operands (pat)
+ && GET_CODE (pat) == PARALLEL)
+ {
+ int pos;
+
+ for (pos = 0, i = 0; i < XVECLEN (pat, 0); i++)
+ if (GET_CODE (XVECEXP (pat, 0, i)) != CLOBBER)
+ {
+ if (i != pos)
+ SUBST (XVECEXP (pat, 0, pos), XVECEXP (pat, 0, i));
+ pos++;
+ }
+
+ SUBST_INT (XVECLEN (pat, 0), pos);
+
+ if (pos == 1)
+ pat = XVECEXP (pat, 0, 0);
+
+ insn_code_number = recog (pat, insn, &num_clobbers_to_add);
+ }
+
+ /* If we had any clobbers to add, make a new pattern than contains
+ them. Then check to make sure that all of them are dead. */
+ if (num_clobbers_to_add)
+ {
+ rtx newpat = gen_rtx (PARALLEL, VOIDmode,
+ gen_rtvec (GET_CODE (pat) == PARALLEL
+ ? XVECLEN (pat, 0) + num_clobbers_to_add
+ : num_clobbers_to_add + 1));
+
+ if (GET_CODE (pat) == PARALLEL)
+ for (i = 0; i < XVECLEN (pat, 0); i++)
+ XVECEXP (newpat, 0, i) = XVECEXP (pat, 0, i);
+ else
+ XVECEXP (newpat, 0, 0) = pat;
+
+ add_clobbers (newpat, insn_code_number);
+
+ for (i = XVECLEN (newpat, 0) - num_clobbers_to_add;
+ i < XVECLEN (newpat, 0); i++)
+ {
+ if (GET_CODE (XEXP (XVECEXP (newpat, 0, i), 0)) == REG
+ && ! reg_dead_at_p (XEXP (XVECEXP (newpat, 0, i), 0), insn))
+ return -1;
+ notes = gen_rtx (EXPR_LIST, REG_UNUSED,
+ XEXP (XVECEXP (newpat, 0, i), 0), notes);
+ }
+ pat = newpat;
+ }
+
+ *pnewpat = pat;
+ *pnotes = notes;
+
+ return insn_code_number;
+}
+
+/* Like gen_lowpart but for use by combine. In combine it is not possible
+ to create any new pseudoregs. However, it is safe to create
+ invalid memory addresses, because combine will try to recognize
+ them and all they will do is make the combine attempt fail.
+
+ If for some reason this cannot do its job, an rtx
+ (clobber (const_int 0)) is returned.
+ An insn containing that will not be recognized. */
+
+#undef gen_lowpart
+
+static rtx
+gen_lowpart_for_combine (mode, x)
+ enum machine_mode mode;
+ register rtx x;
+{
+ rtx result;
+
+ if (GET_MODE (x) == mode)
+ return x;
+
+ /* We can only support MODE being wider than a word if X is a
+ constant integer or has a mode the same size. */
+
+ if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
+ && ! ((GET_MODE (x) == VOIDmode
+ && (GET_CODE (x) == CONST_INT
+ || GET_CODE (x) == CONST_DOUBLE))
+ || GET_MODE_SIZE (GET_MODE (x)) == GET_MODE_SIZE (mode)))
+ return gen_rtx (CLOBBER, GET_MODE (x), const0_rtx);
+
+ /* X might be a paradoxical (subreg (mem)). In that case, gen_lowpart
+ won't know what to do. So we will strip off the SUBREG here and
+ process normally. */
+ if (GET_CODE (x) == SUBREG && GET_CODE (SUBREG_REG (x)) == MEM)
+ {
+ x = SUBREG_REG (x);
+ if (GET_MODE (x) == mode)
+ return x;
+ }
+
+ result = gen_lowpart_common (mode, x);
+ if (result)
+ return result;
+
+ if (GET_CODE (x) == MEM)
+ {
+ register int offset = 0;
+ rtx new;
+
+ /* Refuse to work on a volatile memory ref or one with a mode-dependent
+ address. */
+ if (MEM_VOLATILE_P (x) || mode_dependent_address_p (XEXP (x, 0)))
+ return gen_rtx (CLOBBER, GET_MODE (x), const0_rtx);
+
+ /* If we want to refer to something bigger than the original memref,
+ generate a perverse subreg instead. That will force a reload
+ of the original memref X. */
+ if (GET_MODE_SIZE (GET_MODE (x)) < GET_MODE_SIZE (mode))
+ return gen_rtx (SUBREG, mode, x, 0);
+
+#if WORDS_BIG_ENDIAN
+ offset = (MAX (GET_MODE_SIZE (GET_MODE (x)), UNITS_PER_WORD)
+ - MAX (GET_MODE_SIZE (mode), UNITS_PER_WORD));
+#endif
+#if BYTES_BIG_ENDIAN
+ /* Adjust the address so that the address-after-the-data
+ is unchanged. */
+ offset -= (MIN (UNITS_PER_WORD, GET_MODE_SIZE (mode))
+ - MIN (UNITS_PER_WORD, GET_MODE_SIZE (GET_MODE (x))));
+#endif
+ new = gen_rtx (MEM, mode, plus_constant (XEXP (x, 0), offset));
+ RTX_UNCHANGING_P (new) = RTX_UNCHANGING_P (x);
+ MEM_VOLATILE_P (new) = MEM_VOLATILE_P (x);
+ MEM_IN_STRUCT_P (new) = MEM_IN_STRUCT_P (x);
+ return new;
+ }
+
+ /* If X is a comparison operator, rewrite it in a new mode. This
+ probably won't match, but may allow further simplifications. */
+ else if (GET_RTX_CLASS (GET_CODE (x)) == '<')
+ return gen_rtx_combine (GET_CODE (x), mode, XEXP (x, 0), XEXP (x, 1));
+
+ /* If we couldn't simplify X any other way, just enclose it in a
+ SUBREG. Normally, this SUBREG won't match, but some patterns may
+ include an explicit SUBREG or we may simplify it further in combine. */
+ else
+ {
+ int word = 0;
+
+ if (WORDS_BIG_ENDIAN && GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
+ word = ((GET_MODE_SIZE (GET_MODE (x))
+ - MAX (GET_MODE_SIZE (mode), UNITS_PER_WORD))
+ / UNITS_PER_WORD);
+ return gen_rtx (SUBREG, mode, x, word);
+ }
+}
+
+/* Make an rtx expression. This is a subset of gen_rtx and only supports
+ expressions of 1, 2, or 3 operands, each of which are rtx expressions.
+
+ If the identical expression was previously in the insn (in the undobuf),
+ it will be returned. Only if it is not found will a new expression
+ be made. */
+
+/*VARARGS2*/
+static rtx
+gen_rtx_combine VPROTO((enum rtx_code code, enum machine_mode mode, ...))
+{
+#ifndef __STDC__
+ enum rtx_code code;
+ enum machine_mode mode;
+#endif
+ va_list p;
+ int n_args;
+ rtx args[3];
+ int i, j;
+ char *fmt;
+ rtx rt;
+
+ VA_START (p, mode);
+
+#ifndef __STDC__
+ code = va_arg (p, enum rtx_code);
+ mode = va_arg (p, enum machine_mode);
+#endif
+
+ n_args = GET_RTX_LENGTH (code);
+ fmt = GET_RTX_FORMAT (code);
+
+ if (n_args == 0 || n_args > 3)
+ abort ();
+
+ /* Get each arg and verify that it is supposed to be an expression. */
+ for (j = 0; j < n_args; j++)
+ {
+ if (*fmt++ != 'e')
+ abort ();
+
+ args[j] = va_arg (p, rtx);
+ }
+
+ /* See if this is in undobuf. Be sure we don't use objects that came
+ from another insn; this could produce circular rtl structures. */
+
+ for (i = previous_num_undos; i < undobuf.num_undo; i++)
+ if (!undobuf.undo[i].is_int
+ && GET_CODE (undobuf.undo[i].old_contents.r) == code
+ && GET_MODE (undobuf.undo[i].old_contents.r) == mode)
+ {
+ for (j = 0; j < n_args; j++)
+ if (XEXP (undobuf.undo[i].old_contents.r, j) != args[j])
+ break;
+
+ if (j == n_args)
+ return undobuf.undo[i].old_contents.r;
+ }
+
+ /* Otherwise make a new rtx. We know we have 1, 2, or 3 args.
+ Use rtx_alloc instead of gen_rtx because it's faster on RISC. */
+ rt = rtx_alloc (code);
+ PUT_MODE (rt, mode);
+ XEXP (rt, 0) = args[0];
+ if (n_args > 1)
+ {
+ XEXP (rt, 1) = args[1];
+ if (n_args > 2)
+ XEXP (rt, 2) = args[2];
+ }
+ return rt;
+}
+
+/* These routines make binary and unary operations by first seeing if they
+ fold; if not, a new expression is allocated. */
+
+static rtx
+gen_binary (code, mode, op0, op1)
+ enum rtx_code code;
+ enum machine_mode mode;
+ rtx op0, op1;
+{
+ rtx result;
+ rtx tem;
+
+ if (GET_RTX_CLASS (code) == 'c'
+ && (GET_CODE (op0) == CONST_INT
+ || (CONSTANT_P (op0) && GET_CODE (op1) != CONST_INT)))
+ tem = op0, op0 = op1, op1 = tem;
+
+ if (GET_RTX_CLASS (code) == '<')
+ {
+ enum machine_mode op_mode = GET_MODE (op0);
+
+ /* Strip the COMPARE from (REL_OP (compare X Y) 0) to get
+ just (REL_OP X Y). */
+ if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
+ {
+ op1 = XEXP (op0, 1);
+ op0 = XEXP (op0, 0);
+ op_mode = GET_MODE (op0);
+ }
+
+ if (op_mode == VOIDmode)
+ op_mode = GET_MODE (op1);
+ result = simplify_relational_operation (code, op_mode, op0, op1);
+ }
+ else
+ result = simplify_binary_operation (code, mode, op0, op1);
+
+ if (result)
+ return result;
+
+ /* Put complex operands first and constants second. */
+ if (GET_RTX_CLASS (code) == 'c'
+ && ((CONSTANT_P (op0) && GET_CODE (op1) != CONST_INT)
+ || (GET_RTX_CLASS (GET_CODE (op0)) == 'o'
+ && GET_RTX_CLASS (GET_CODE (op1)) != 'o')
+ || (GET_CODE (op0) == SUBREG
+ && GET_RTX_CLASS (GET_CODE (SUBREG_REG (op0))) == 'o'
+ && GET_RTX_CLASS (GET_CODE (op1)) != 'o')))
+ return gen_rtx_combine (code, mode, op1, op0);
+
+ return gen_rtx_combine (code, mode, op0, op1);
+}
+
+static rtx
+gen_unary (code, mode, op0_mode, op0)
+ enum rtx_code code;
+ enum machine_mode mode, op0_mode;
+ rtx op0;
+{
+ rtx result = simplify_unary_operation (code, mode, op0, op0_mode);
+
+ if (result)
+ return result;
+
+ return gen_rtx_combine (code, mode, op0);
+}
+
+/* Simplify a comparison between *POP0 and *POP1 where CODE is the
+ comparison code that will be tested.
+
+ The result is a possibly different comparison code to use. *POP0 and
+ *POP1 may be updated.
+
+ It is possible that we might detect that a comparison is either always
+ true or always false. However, we do not perform general constant
+ folding in combine, so this knowledge isn't useful. Such tautologies
+ should have been detected earlier. Hence we ignore all such cases. */
+
+static enum rtx_code
+simplify_comparison (code, pop0, pop1)
+ enum rtx_code code;
+ rtx *pop0;
+ rtx *pop1;
+{
+ rtx op0 = *pop0;
+ rtx op1 = *pop1;
+ rtx tem, tem1;
+ int i;
+ enum machine_mode mode, tmode;
+
+ /* Try a few ways of applying the same transformation to both operands. */
+ while (1)
+ {
+#ifndef WORD_REGISTER_OPERATIONS
+ /* The test below this one won't handle SIGN_EXTENDs on these machines,
+ so check specially. */
+ if (code != GTU && code != GEU && code != LTU && code != LEU
+ && GET_CODE (op0) == ASHIFTRT && GET_CODE (op1) == ASHIFTRT
+ && GET_CODE (XEXP (op0, 0)) == ASHIFT
+ && GET_CODE (XEXP (op1, 0)) == ASHIFT
+ && GET_CODE (XEXP (XEXP (op0, 0), 0)) == SUBREG
+ && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SUBREG
+ && (GET_MODE (SUBREG_REG (XEXP (XEXP (op0, 0), 0)))
+ == GET_MODE (SUBREG_REG (XEXP (XEXP (op1, 0), 0))))
+ && GET_CODE (XEXP (op0, 1)) == CONST_INT
+ && GET_CODE (XEXP (op1, 1)) == CONST_INT
+ && GET_CODE (XEXP (XEXP (op0, 0), 1)) == CONST_INT
+ && GET_CODE (XEXP (XEXP (op1, 0), 1)) == CONST_INT
+ && INTVAL (XEXP (op0, 1)) == INTVAL (XEXP (op1, 1))
+ && INTVAL (XEXP (op0, 1)) == INTVAL (XEXP (XEXP (op0, 0), 1))
+ && INTVAL (XEXP (op0, 1)) == INTVAL (XEXP (XEXP (op1, 0), 1))
+ && (INTVAL (XEXP (op0, 1))
+ == (GET_MODE_BITSIZE (GET_MODE (op0))
+ - (GET_MODE_BITSIZE
+ (GET_MODE (SUBREG_REG (XEXP (XEXP (op0, 0), 0))))))))
+ {
+ op0 = SUBREG_REG (XEXP (XEXP (op0, 0), 0));
+ op1 = SUBREG_REG (XEXP (XEXP (op1, 0), 0));
+ }
+#endif
+
+ /* If both operands are the same constant shift, see if we can ignore the
+ shift. We can if the shift is a rotate or if the bits shifted out of
+ this shift are known to be zero for both inputs and if the type of
+ comparison is compatible with the shift. */
+ if (GET_CODE (op0) == GET_CODE (op1)
+ && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT
+ && ((GET_CODE (op0) == ROTATE && (code == NE || code == EQ))
+ || ((GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFT)
+ && (code != GT && code != LT && code != GE && code != LE))
+ || (GET_CODE (op0) == ASHIFTRT
+ && (code != GTU && code != LTU
+ && code != GEU && code != GEU)))
+ && GET_CODE (XEXP (op0, 1)) == CONST_INT
+ && INTVAL (XEXP (op0, 1)) >= 0
+ && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT
+ && XEXP (op0, 1) == XEXP (op1, 1))
+ {
+ enum machine_mode mode = GET_MODE (op0);
+ unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
+ int shift_count = INTVAL (XEXP (op0, 1));
+
+ if (GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFTRT)
+ mask &= (mask >> shift_count) << shift_count;
+ else if (GET_CODE (op0) == ASHIFT)
+ mask = (mask & (mask << shift_count)) >> shift_count;
+
+ if ((nonzero_bits (XEXP (op0, 0), mode) & ~ mask) == 0
+ && (nonzero_bits (XEXP (op1, 0), mode) & ~ mask) == 0)
+ op0 = XEXP (op0, 0), op1 = XEXP (op1, 0);
+ else
+ break;
+ }
+
+ /* If both operands are AND's of a paradoxical SUBREG by constant, the
+ SUBREGs are of the same mode, and, in both cases, the AND would
+ be redundant if the comparison was done in the narrower mode,
+ do the comparison in the narrower mode (e.g., we are AND'ing with 1
+ and the operand's possibly nonzero bits are 0xffffff01; in that case
+ if we only care about QImode, we don't need the AND). This case
+ occurs if the output mode of an scc insn is not SImode and
+ STORE_FLAG_VALUE == 1 (e.g., the 386).
+
+ Similarly, check for a case where the AND's are ZERO_EXTEND
+ operations from some narrower mode even though a SUBREG is not
+ present. */
+
+ else if (GET_CODE (op0) == AND && GET_CODE (op1) == AND
+ && GET_CODE (XEXP (op0, 1)) == CONST_INT
+ && GET_CODE (XEXP (op1, 1)) == CONST_INT)
+ {
+ rtx inner_op0 = XEXP (op0, 0);
+ rtx inner_op1 = XEXP (op1, 0);
+ HOST_WIDE_INT c0 = INTVAL (XEXP (op0, 1));
+ HOST_WIDE_INT c1 = INTVAL (XEXP (op1, 1));
+ int changed = 0;
+
+ if (GET_CODE (inner_op0) == SUBREG && GET_CODE (inner_op1) == SUBREG
+ && (GET_MODE_SIZE (GET_MODE (inner_op0))
+ > GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner_op0))))
+ && (GET_MODE (SUBREG_REG (inner_op0))
+ == GET_MODE (SUBREG_REG (inner_op1)))
+ && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
+ <= HOST_BITS_PER_WIDE_INT)
+ && (0 == (~c0) & nonzero_bits (SUBREG_REG (inner_op0),
+ GET_MODE (SUBREG_REG (op0))))
+ && (0 == (~c1) & nonzero_bits (SUBREG_REG (inner_op1),
+ GET_MODE (SUBREG_REG (inner_op1)))))
+ {
+ op0 = SUBREG_REG (inner_op0);
+ op1 = SUBREG_REG (inner_op1);
+
+ /* The resulting comparison is always unsigned since we masked
+ off the original sign bit. */
+ code = unsigned_condition (code);
+
+ changed = 1;
+ }
+
+ else if (c0 == c1)
+ for (tmode = GET_CLASS_NARROWEST_MODE
+ (GET_MODE_CLASS (GET_MODE (op0)));
+ tmode != GET_MODE (op0); tmode = GET_MODE_WIDER_MODE (tmode))
+ if (c0 == GET_MODE_MASK (tmode))
+ {
+ op0 = gen_lowpart_for_combine (tmode, inner_op0);
+ op1 = gen_lowpart_for_combine (tmode, inner_op1);
+ changed = 1;
+ break;
+ }
+
+ if (! changed)
+ break;
+ }
+
+ /* If both operands are NOT, we can strip off the outer operation
+ and adjust the comparison code for swapped operands; similarly for
+ NEG, except that this must be an equality comparison. */
+ else if ((GET_CODE (op0) == NOT && GET_CODE (op1) == NOT)
+ || (GET_CODE (op0) == NEG && GET_CODE (op1) == NEG
+ && (code == EQ || code == NE)))
+ op0 = XEXP (op0, 0), op1 = XEXP (op1, 0), code = swap_condition (code);
+
+ else
+ break;
+ }
+
+ /* If the first operand is a constant, swap the operands and adjust the
+ comparison code appropriately. */
+ if (CONSTANT_P (op0))
+ {
+ tem = op0, op0 = op1, op1 = tem;
+ code = swap_condition (code);
+ }
+
+ /* We now enter a loop during which we will try to simplify the comparison.
+ For the most part, we only are concerned with comparisons with zero,
+ but some things may really be comparisons with zero but not start
+ out looking that way. */
+
+ while (GET_CODE (op1) == CONST_INT)
+ {
+ enum machine_mode mode = GET_MODE (op0);
+ int mode_width = GET_MODE_BITSIZE (mode);
+ unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
+ int equality_comparison_p;
+ int sign_bit_comparison_p;
+ int unsigned_comparison_p;
+ HOST_WIDE_INT const_op;
+
+ /* We only want to handle integral modes. This catches VOIDmode,
+ CCmode, and the floating-point modes. An exception is that we
+ can handle VOIDmode if OP0 is a COMPARE or a comparison
+ operation. */
+
+ if (GET_MODE_CLASS (mode) != MODE_INT
+ && ! (mode == VOIDmode
+ && (GET_CODE (op0) == COMPARE
+ || GET_RTX_CLASS (GET_CODE (op0)) == '<')))
+ break;
+
+ /* Get the constant we are comparing against and turn off all bits
+ not on in our mode. */
+ const_op = INTVAL (op1);
+ if (mode_width <= HOST_BITS_PER_WIDE_INT)
+ const_op &= mask;
+
+ /* If we are comparing against a constant power of two and the value
+ being compared can only have that single bit nonzero (e.g., it was
+ `and'ed with that bit), we can replace this with a comparison
+ with zero. */
+ if (const_op
+ && (code == EQ || code == NE || code == GE || code == GEU
+ || code == LT || code == LTU)
+ && mode_width <= HOST_BITS_PER_WIDE_INT
+ && exact_log2 (const_op) >= 0
+ && nonzero_bits (op0, mode) == const_op)
+ {
+ code = (code == EQ || code == GE || code == GEU ? NE : EQ);
+ op1 = const0_rtx, const_op = 0;
+ }
+
+ /* Similarly, if we are comparing a value known to be either -1 or
+ 0 with -1, change it to the opposite comparison against zero. */
+
+ if (const_op == -1
+ && (code == EQ || code == NE || code == GT || code == LE
+ || code == GEU || code == LTU)
+ && num_sign_bit_copies (op0, mode) == mode_width)
+ {
+ code = (code == EQ || code == LE || code == GEU ? NE : EQ);
+ op1 = const0_rtx, const_op = 0;
+ }
+
+ /* Do some canonicalizations based on the comparison code. We prefer
+ comparisons against zero and then prefer equality comparisons.
+ If we can reduce the size of a constant, we will do that too. */
+
+ switch (code)
+ {
+ case LT:
+ /* < C is equivalent to <= (C - 1) */
+ if (const_op > 0)
+ {
+ const_op -= 1;
+ op1 = GEN_INT (const_op);
+ code = LE;
+ /* ... fall through to LE case below. */
+ }
+ else
+ break;
+
+ case LE:
+ /* <= C is equivalent to < (C + 1); we do this for C < 0 */
+ if (const_op < 0)
+ {
+ const_op += 1;
+ op1 = GEN_INT (const_op);
+ code = LT;
+ }
+
+ /* If we are doing a <= 0 comparison on a value known to have
+ a zero sign bit, we can replace this with == 0. */
+ else if (const_op == 0
+ && mode_width <= HOST_BITS_PER_WIDE_INT
+ && (nonzero_bits (op0, mode)
+ & ((HOST_WIDE_INT) 1 << (mode_width - 1))) == 0)
+ code = EQ;
+ break;
+
+ case GE:
+ /* >= C is equivalent to > (C - 1). */
+ if (const_op > 0)
+ {
+ const_op -= 1;
+ op1 = GEN_INT (const_op);
+ code = GT;
+ /* ... fall through to GT below. */
+ }
+ else
+ break;
+
+ case GT:
+ /* > C is equivalent to >= (C + 1); we do this for C < 0*/
+ if (const_op < 0)
+ {
+ const_op += 1;
+ op1 = GEN_INT (const_op);
+ code = GE;
+ }
+
+ /* If we are doing a > 0 comparison on a value known to have
+ a zero sign bit, we can replace this with != 0. */
+ else if (const_op == 0
+ && mode_width <= HOST_BITS_PER_WIDE_INT
+ && (nonzero_bits (op0, mode)
+ & ((HOST_WIDE_INT) 1 << (mode_width - 1))) == 0)
+ code = NE;
+ break;
+
+ case LTU:
+ /* < C is equivalent to <= (C - 1). */
+ if (const_op > 0)
+ {
+ const_op -= 1;
+ op1 = GEN_INT (const_op);
+ code = LEU;
+ /* ... fall through ... */
+ }
+
+ /* (unsigned) < 0x80000000 is equivalent to >= 0. */
+ else if (const_op == (HOST_WIDE_INT) 1 << (mode_width - 1))
+ {
+ const_op = 0, op1 = const0_rtx;
+ code = GE;
+ break;
+ }
+ else
+ break;
+
+ case LEU:
+ /* unsigned <= 0 is equivalent to == 0 */
+ if (const_op == 0)
+ code = EQ;
+
+ /* (unsigned) <= 0x7fffffff is equivalent to >= 0. */
+ else if (const_op == ((HOST_WIDE_INT) 1 << (mode_width - 1)) - 1)
+ {
+ const_op = 0, op1 = const0_rtx;
+ code = GE;
+ }
+ break;
+
+ case GEU:
+ /* >= C is equivalent to < (C - 1). */
+ if (const_op > 1)
+ {
+ const_op -= 1;
+ op1 = GEN_INT (const_op);
+ code = GTU;
+ /* ... fall through ... */
+ }
+
+ /* (unsigned) >= 0x80000000 is equivalent to < 0. */
+ else if (const_op == (HOST_WIDE_INT) 1 << (mode_width - 1))
+ {
+ const_op = 0, op1 = const0_rtx;
+ code = LT;
+ }
+ else
+ break;
+
+ case GTU:
+ /* unsigned > 0 is equivalent to != 0 */
+ if (const_op == 0)
+ code = NE;
+
+ /* (unsigned) > 0x7fffffff is equivalent to < 0. */
+ else if (const_op == ((HOST_WIDE_INT) 1 << (mode_width - 1)) - 1)
+ {
+ const_op = 0, op1 = const0_rtx;
+ code = LT;
+ }
+ break;
+ }
+
+ /* Compute some predicates to simplify code below. */
+
+ equality_comparison_p = (code == EQ || code == NE);
+ sign_bit_comparison_p = ((code == LT || code == GE) && const_op == 0);
+ unsigned_comparison_p = (code == LTU || code == LEU || code == GTU
+ || code == LEU);
+
+ /* If this is a sign bit comparison and we can do arithmetic in
+ MODE, say that we will only be needing the sign bit of OP0. */
+ if (sign_bit_comparison_p
+ && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
+ op0 = force_to_mode (op0, mode,
+ ((HOST_WIDE_INT) 1
+ << (GET_MODE_BITSIZE (mode) - 1)),
+ NULL_RTX, 0);
+
+ /* Now try cases based on the opcode of OP0. If none of the cases
+ does a "continue", we exit this loop immediately after the
+ switch. */
+
+ switch (GET_CODE (op0))
+ {
+ case ZERO_EXTRACT:
+ /* If we are extracting a single bit from a variable position in
+ a constant that has only a single bit set and are comparing it
+ with zero, we can convert this into an equality comparison
+ between the position and the location of the single bit. We can't
+ do this if bit endian and we don't have an extzv since we then
+ can't know what mode to use for the endianness adjustment. */
+
+#if ! BITS_BIG_ENDIAN || defined (HAVE_extzv)
+ if (GET_CODE (XEXP (op0, 0)) == CONST_INT
+ && XEXP (op0, 1) == const1_rtx
+ && equality_comparison_p && const_op == 0
+ && (i = exact_log2 (INTVAL (XEXP (op0, 0)))) >= 0)
+ {
+#if BITS_BIG_ENDIAN
+ i = (GET_MODE_BITSIZE
+ (insn_operand_mode[(int) CODE_FOR_extzv][1]) - 1 - i);
+#endif
+
+ op0 = XEXP (op0, 2);
+ op1 = GEN_INT (i);
+ const_op = i;
+
+ /* Result is nonzero iff shift count is equal to I. */
+ code = reverse_condition (code);
+ continue;
+ }
+#endif
+
+ /* ... fall through ... */
+
+ case SIGN_EXTRACT:
+ tem = expand_compound_operation (op0);
+ if (tem != op0)
+ {
+ op0 = tem;
+ continue;
+ }
+ break;
+
+ case NOT:
+ /* If testing for equality, we can take the NOT of the constant. */
+ if (equality_comparison_p
+ && (tem = simplify_unary_operation (NOT, mode, op1, mode)) != 0)
+ {
+ op0 = XEXP (op0, 0);
+ op1 = tem;
+ continue;
+ }
+
+ /* If just looking at the sign bit, reverse the sense of the
+ comparison. */
+ if (sign_bit_comparison_p)
+ {
+ op0 = XEXP (op0, 0);
+ code = (code == GE ? LT : GE);
+ continue;
+ }
+ break;
+
+ case NEG:
+ /* If testing for equality, we can take the NEG of the constant. */
+ if (equality_comparison_p
+ && (tem = simplify_unary_operation (NEG, mode, op1, mode)) != 0)
+ {
+ op0 = XEXP (op0, 0);
+ op1 = tem;
+ continue;
+ }
+
+ /* The remaining cases only apply to comparisons with zero. */
+ if (const_op != 0)
+ break;
+
+ /* When X is ABS or is known positive,
+ (neg X) is < 0 if and only if X != 0. */
+
+ if (sign_bit_comparison_p
+ && (GET_CODE (XEXP (op0, 0)) == ABS
+ || (mode_width <= HOST_BITS_PER_WIDE_INT
+ && (nonzero_bits (XEXP (op0, 0), mode)
+ & ((HOST_WIDE_INT) 1 << (mode_width - 1))) == 0)))
+ {
+ op0 = XEXP (op0, 0);
+ code = (code == LT ? NE : EQ);
+ continue;
+ }
+
+ /* If we have NEG of something whose two high-order bits are the
+ same, we know that "(-a) < 0" is equivalent to "a > 0". */
+ if (num_sign_bit_copies (op0, mode) >= 2)
+ {
+ op0 = XEXP (op0, 0);
+ code = swap_condition (code);
+ continue;
+ }
+ break;
+
+ case ROTATE:
+ /* If we are testing equality and our count is a constant, we
+ can perform the inverse operation on our RHS. */
+ if (equality_comparison_p && GET_CODE (XEXP (op0, 1)) == CONST_INT
+ && (tem = simplify_binary_operation (ROTATERT, mode,
+ op1, XEXP (op0, 1))) != 0)
+ {
+ op0 = XEXP (op0, 0);
+ op1 = tem;
+ continue;
+ }
+
+ /* If we are doing a < 0 or >= 0 comparison, it means we are testing
+ a particular bit. Convert it to an AND of a constant of that
+ bit. This will be converted into a ZERO_EXTRACT. */
+ if (const_op == 0 && sign_bit_comparison_p
+ && GET_CODE (XEXP (op0, 1)) == CONST_INT
+ && mode_width <= HOST_BITS_PER_WIDE_INT)
+ {
+ op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0),
+ ((HOST_WIDE_INT) 1
+ << (mode_width - 1
+ - INTVAL (XEXP (op0, 1)))));
+ code = (code == LT ? NE : EQ);
+ continue;
+ }
+
+ /* ... fall through ... */
+
+ case ABS:
+ /* ABS is ignorable inside an equality comparison with zero. */
+ if (const_op == 0 && equality_comparison_p)
+ {
+ op0 = XEXP (op0, 0);
+ continue;
+ }
+ break;
+
+
+ case SIGN_EXTEND:
+ /* Can simplify (compare (zero/sign_extend FOO) CONST)
+ to (compare FOO CONST) if CONST fits in FOO's mode and we
+ are either testing inequality or have an unsigned comparison
+ with ZERO_EXTEND or a signed comparison with SIGN_EXTEND. */
+ if (! unsigned_comparison_p
+ && (GET_MODE_BITSIZE (GET_MODE (XEXP (op0, 0)))
+ <= HOST_BITS_PER_WIDE_INT)
+ && ((unsigned HOST_WIDE_INT) const_op
+ < (((HOST_WIDE_INT) 1
+ << (GET_MODE_BITSIZE (GET_MODE (XEXP (op0, 0))) - 1)))))
+ {
+ op0 = XEXP (op0, 0);
+ continue;
+ }
+ break;
+
+ case SUBREG:
+ /* Check for the case where we are comparing A - C1 with C2,
+ both constants are smaller than 1/2 the maxium positive
+ value in MODE, and the comparison is equality or unsigned.
+ In that case, if A is either zero-extended to MODE or has
+ sufficient sign bits so that the high-order bit in MODE
+ is a copy of the sign in the inner mode, we can prove that it is
+ safe to do the operation in the wider mode. This simplifies
+ many range checks. */
+
+ if (mode_width <= HOST_BITS_PER_WIDE_INT
+ && subreg_lowpart_p (op0)
+ && GET_CODE (SUBREG_REG (op0)) == PLUS
+ && GET_CODE (XEXP (SUBREG_REG (op0), 1)) == CONST_INT
+ && INTVAL (XEXP (SUBREG_REG (op0), 1)) < 0
+ && (- INTVAL (XEXP (SUBREG_REG (op0), 1))
+ < GET_MODE_MASK (mode) / 2)
+ && (unsigned HOST_WIDE_INT) const_op < GET_MODE_MASK (mode) / 2
+ && (0 == (nonzero_bits (XEXP (SUBREG_REG (op0), 0),
+ GET_MODE (SUBREG_REG (op0)))
+ & ~ GET_MODE_MASK (mode))
+ || (num_sign_bit_copies (XEXP (SUBREG_REG (op0), 0),
+ GET_MODE (SUBREG_REG (op0)))
+ > (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
+ - GET_MODE_BITSIZE (mode)))))
+ {
+ op0 = SUBREG_REG (op0);
+ continue;
+ }
+
+ /* If the inner mode is narrower and we are extracting the low part,
+ we can treat the SUBREG as if it were a ZERO_EXTEND. */
+ if (subreg_lowpart_p (op0)
+ && GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0))) < mode_width)
+ /* Fall through */ ;
+ else
+ break;
+
+ /* ... fall through ... */
+
+ case ZERO_EXTEND:
+ if ((unsigned_comparison_p || equality_comparison_p)
+ && (GET_MODE_BITSIZE (GET_MODE (XEXP (op0, 0)))
+ <= HOST_BITS_PER_WIDE_INT)
+ && ((unsigned HOST_WIDE_INT) const_op
+ < GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))))
+ {
+ op0 = XEXP (op0, 0);
+ continue;
+ }
+ break;
+
+ case PLUS:
+ /* (eq (plus X A) B) -> (eq X (minus B A)). We can only do
+ this for equality comparisons due to pathological cases involving
+ overflows. */
+ if (equality_comparison_p
+ && 0 != (tem = simplify_binary_operation (MINUS, mode,
+ op1, XEXP (op0, 1))))
+ {
+ op0 = XEXP (op0, 0);
+ op1 = tem;
+ continue;
+ }
+
+ /* (plus (abs X) (const_int -1)) is < 0 if and only if X == 0. */
+ if (const_op == 0 && XEXP (op0, 1) == constm1_rtx
+ && GET_CODE (XEXP (op0, 0)) == ABS && sign_bit_comparison_p)
+ {
+ op0 = XEXP (XEXP (op0, 0), 0);
+ code = (code == LT ? EQ : NE);
+ continue;
+ }
+ break;
+
+ case MINUS:
+ /* (eq (minus A B) C) -> (eq A (plus B C)) or
+ (eq B (minus A C)), whichever simplifies. We can only do
+ this for equality comparisons due to pathological cases involving
+ overflows. */
+ if (equality_comparison_p
+ && 0 != (tem = simplify_binary_operation (PLUS, mode,
+ XEXP (op0, 1), op1)))
+ {
+ op0 = XEXP (op0, 0);
+ op1 = tem;
+ continue;
+ }
+
+ if (equality_comparison_p
+ && 0 != (tem = simplify_binary_operation (MINUS, mode,
+ XEXP (op0, 0), op1)))
+ {
+ op0 = XEXP (op0, 1);
+ op1 = tem;
+ continue;
+ }
+
+ /* The sign bit of (minus (ashiftrt X C) X), where C is the number
+ of bits in X minus 1, is one iff X > 0. */
+ if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 0)) == ASHIFTRT
+ && GET_CODE (XEXP (XEXP (op0, 0), 1)) == CONST_INT
+ && INTVAL (XEXP (XEXP (op0, 0), 1)) == mode_width - 1
+ && rtx_equal_p (XEXP (XEXP (op0, 0), 0), XEXP (op0, 1)))
+ {
+ op0 = XEXP (op0, 1);
+ code = (code == GE ? LE : GT);
+ continue;
+ }
+ break;
+
+ case XOR:
+ /* (eq (xor A B) C) -> (eq A (xor B C)). This is a simplification
+ if C is zero or B is a constant. */
+ if (equality_comparison_p
+ && 0 != (tem = simplify_binary_operation (XOR, mode,
+ XEXP (op0, 1), op1)))
+ {
+ op0 = XEXP (op0, 0);
+ op1 = tem;
+ continue;
+ }
+ break;
+
+ case EQ: case NE:
+ case LT: case LTU: case LE: case LEU:
+ case GT: case GTU: case GE: case GEU:
+ /* We can't do anything if OP0 is a condition code value, rather
+ than an actual data value. */
+ if (const_op != 0
+#ifdef HAVE_cc0
+ || XEXP (op0, 0) == cc0_rtx
+#endif
+ || GET_MODE_CLASS (GET_MODE (XEXP (op0, 0))) == MODE_CC)
+ break;
+
+ /* Get the two operands being compared. */
+ if (GET_CODE (XEXP (op0, 0)) == COMPARE)
+ tem = XEXP (XEXP (op0, 0), 0), tem1 = XEXP (XEXP (op0, 0), 1);
+ else
+ tem = XEXP (op0, 0), tem1 = XEXP (op0, 1);
+
+ /* Check for the cases where we simply want the result of the
+ earlier test or the opposite of that result. */
+ if (code == NE
+ || (code == EQ && reversible_comparison_p (op0))
+ || (GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT
+ && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT
+ && (STORE_FLAG_VALUE
+ & (((HOST_WIDE_INT) 1
+ << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1))))
+ && (code == LT
+ || (code == GE && reversible_comparison_p (op0)))))
+ {
+ code = (code == LT || code == NE
+ ? GET_CODE (op0) : reverse_condition (GET_CODE (op0)));
+ op0 = tem, op1 = tem1;
+ continue;
+ }
+ break;
+
+ case IOR:
+ /* The sign bit of (ior (plus X (const_int -1)) X) is non-zero
+ iff X <= 0. */
+ if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 0)) == PLUS
+ && XEXP (XEXP (op0, 0), 1) == constm1_rtx
+ && rtx_equal_p (XEXP (XEXP (op0, 0), 0), XEXP (op0, 1)))
+ {
+ op0 = XEXP (op0, 1);
+ code = (code == GE ? GT : LE);
+ continue;
+ }
+ break;
+
+ case AND:
+ /* Convert (and (xshift 1 X) Y) to (and (lshiftrt Y X) 1). This
+ will be converted to a ZERO_EXTRACT later. */
+ if (const_op == 0 && equality_comparison_p
+ && GET_CODE (XEXP (op0, 0)) == ASHIFT
+ && XEXP (XEXP (op0, 0), 0) == const1_rtx)
+ {
+ op0 = simplify_and_const_int
+ (op0, mode, gen_rtx_combine (LSHIFTRT, mode,
+ XEXP (op0, 1),
+ XEXP (XEXP (op0, 0), 1)),
+ (HOST_WIDE_INT) 1);
+ continue;
+ }
+
+ /* If we are comparing (and (lshiftrt X C1) C2) for equality with
+ zero and X is a comparison and C1 and C2 describe only bits set
+ in STORE_FLAG_VALUE, we can compare with X. */
+ if (const_op == 0 && equality_comparison_p
+ && mode_width <= HOST_BITS_PER_WIDE_INT
+ && GET_CODE (XEXP (op0, 1)) == CONST_INT
+ && GET_CODE (XEXP (op0, 0)) == LSHIFTRT
+ && GET_CODE (XEXP (XEXP (op0, 0), 1)) == CONST_INT
+ && INTVAL (XEXP (XEXP (op0, 0), 1)) >= 0
+ && INTVAL (XEXP (XEXP (op0, 0), 1)) < HOST_BITS_PER_WIDE_INT)
+ {
+ mask = ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode))
+ << INTVAL (XEXP (XEXP (op0, 0), 1)));
+ if ((~ STORE_FLAG_VALUE & mask) == 0
+ && (GET_RTX_CLASS (GET_CODE (XEXP (XEXP (op0, 0), 0))) == '<'
+ || ((tem = get_last_value (XEXP (XEXP (op0, 0), 0))) != 0
+ && GET_RTX_CLASS (GET_CODE (tem)) == '<')))
+ {
+ op0 = XEXP (XEXP (op0, 0), 0);
+ continue;
+ }
+ }
+
+ /* If we are doing an equality comparison of an AND of a bit equal
+ to the sign bit, replace this with a LT or GE comparison of
+ the underlying value. */
+ if (equality_comparison_p
+ && const_op == 0
+ && GET_CODE (XEXP (op0, 1)) == CONST_INT
+ && mode_width <= HOST_BITS_PER_WIDE_INT
+ && ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode))
+ == (HOST_WIDE_INT) 1 << (mode_width - 1)))
+ {
+ op0 = XEXP (op0, 0);
+ code = (code == EQ ? GE : LT);
+ continue;
+ }
+
+ /* If this AND operation is really a ZERO_EXTEND from a narrower
+ mode, the constant fits within that mode, and this is either an
+ equality or unsigned comparison, try to do this comparison in
+ the narrower mode. */
+ if ((equality_comparison_p || unsigned_comparison_p)
+ && GET_CODE (XEXP (op0, 1)) == CONST_INT
+ && (i = exact_log2 ((INTVAL (XEXP (op0, 1))
+ & GET_MODE_MASK (mode))
+ + 1)) >= 0
+ && const_op >> i == 0
+ && (tmode = mode_for_size (i, MODE_INT, 1)) != BLKmode)
+ {
+ op0 = gen_lowpart_for_combine (tmode, XEXP (op0, 0));
+ continue;
+ }
+ break;
+
+ case ASHIFT:
+ /* If we have (compare (ashift FOO N) (const_int C)) and
+ the high order N bits of FOO (N+1 if an inequality comparison)
+ are known to be zero, we can do this by comparing FOO with C
+ shifted right N bits so long as the low-order N bits of C are
+ zero. */
+ if (GET_CODE (XEXP (op0, 1)) == CONST_INT
+ && INTVAL (XEXP (op0, 1)) >= 0
+ && ((INTVAL (XEXP (op0, 1)) + ! equality_comparison_p)
+ < HOST_BITS_PER_WIDE_INT)
+ && ((const_op
+ & (((HOST_WIDE_INT) 1 << INTVAL (XEXP (op0, 1))) - 1)) == 0)
+ && mode_width <= HOST_BITS_PER_WIDE_INT
+ && (nonzero_bits (XEXP (op0, 0), mode)
+ & ~ (mask >> (INTVAL (XEXP (op0, 1))
+ + ! equality_comparison_p))) == 0)
+ {
+ const_op >>= INTVAL (XEXP (op0, 1));
+ op1 = GEN_INT (const_op);
+ op0 = XEXP (op0, 0);
+ continue;
+ }
+
+ /* If we are doing a sign bit comparison, it means we are testing
+ a particular bit. Convert it to the appropriate AND. */
+ if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 1)) == CONST_INT
+ && mode_width <= HOST_BITS_PER_WIDE_INT)
+ {
+ op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0),
+ ((HOST_WIDE_INT) 1
+ << (mode_width - 1
+ - INTVAL (XEXP (op0, 1)))));
+ code = (code == LT ? NE : EQ);
+ continue;
+ }
+
+ /* If this an equality comparison with zero and we are shifting
+ the low bit to the sign bit, we can convert this to an AND of the
+ low-order bit. */
+ if (const_op == 0 && equality_comparison_p
+ && GET_CODE (XEXP (op0, 1)) == CONST_INT
+ && INTVAL (XEXP (op0, 1)) == mode_width - 1)
+ {
+ op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0),
+ (HOST_WIDE_INT) 1);
+ continue;
+ }
+ break;
+
+ case ASHIFTRT:
+ /* If this is an equality comparison with zero, we can do this
+ as a logical shift, which might be much simpler. */
+ if (equality_comparison_p && const_op == 0
+ && GET_CODE (XEXP (op0, 1)) == CONST_INT)
+ {
+ op0 = simplify_shift_const (NULL_RTX, LSHIFTRT, mode,
+ XEXP (op0, 0),
+ INTVAL (XEXP (op0, 1)));
+ continue;
+ }
+
+ /* If OP0 is a sign extension and CODE is not an unsigned comparison,
+ do the comparison in a narrower mode. */
+ if (! unsigned_comparison_p
+ && GET_CODE (XEXP (op0, 1)) == CONST_INT
+ && GET_CODE (XEXP (op0, 0)) == ASHIFT
+ && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1)
+ && (tmode = mode_for_size (mode_width - INTVAL (XEXP (op0, 1)),
+ MODE_INT, 1)) != BLKmode
+ && ((unsigned HOST_WIDE_INT) const_op <= GET_MODE_MASK (tmode)
+ || ((unsigned HOST_WIDE_INT) - const_op
+ <= GET_MODE_MASK (tmode))))
+ {
+ op0 = gen_lowpart_for_combine (tmode, XEXP (XEXP (op0, 0), 0));
+ continue;
+ }
+
+ /* ... fall through ... */
+ case LSHIFTRT:
+ /* If we have (compare (xshiftrt FOO N) (const_int C)) and
+ the low order N bits of FOO are known to be zero, we can do this
+ by comparing FOO with C shifted left N bits so long as no
+ overflow occurs. */
+ if (GET_CODE (XEXP (op0, 1)) == CONST_INT
+ && INTVAL (XEXP (op0, 1)) >= 0
+ && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT
+ && mode_width <= HOST_BITS_PER_WIDE_INT
+ && (nonzero_bits (XEXP (op0, 0), mode)
+ & (((HOST_WIDE_INT) 1 << INTVAL (XEXP (op0, 1))) - 1)) == 0
+ && (const_op == 0
+ || (floor_log2 (const_op) + INTVAL (XEXP (op0, 1))
+ < mode_width)))
+ {
+ const_op <<= INTVAL (XEXP (op0, 1));
+ op1 = GEN_INT (const_op);
+ op0 = XEXP (op0, 0);
+ continue;
+ }
+
+ /* If we are using this shift to extract just the sign bit, we
+ can replace this with an LT or GE comparison. */
+ if (const_op == 0
+ && (equality_comparison_p || sign_bit_comparison_p)
+ && GET_CODE (XEXP (op0, 1)) == CONST_INT
+ && INTVAL (XEXP (op0, 1)) == mode_width - 1)
+ {
+ op0 = XEXP (op0, 0);
+ code = (code == NE || code == GT ? LT : GE);
+ continue;
+ }
+ break;
+ }
+
+ break;
+ }
+
+ /* Now make any compound operations involved in this comparison. Then,
+ check for an outmost SUBREG on OP0 that isn't doing anything or is
+ paradoxical. The latter case can only occur when it is known that the
+ "extra" bits will be zero. Therefore, it is safe to remove the SUBREG.
+ We can never remove a SUBREG for a non-equality comparison because the
+ sign bit is in a different place in the underlying object. */
+
+ op0 = make_compound_operation (op0, op1 == const0_rtx ? COMPARE : SET);
+ op1 = make_compound_operation (op1, SET);
+
+ if (GET_CODE (op0) == SUBREG && subreg_lowpart_p (op0)
+ && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT
+ && (code == NE || code == EQ)
+ && ((GET_MODE_SIZE (GET_MODE (op0))
+ > GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0))))))
+ {
+ op0 = SUBREG_REG (op0);
+ op1 = gen_lowpart_for_combine (GET_MODE (op0), op1);
+ }
+
+ else if (GET_CODE (op0) == SUBREG && subreg_lowpart_p (op0)
+ && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT
+ && (code == NE || code == EQ)
+ && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
+ <= HOST_BITS_PER_WIDE_INT)
+ && (nonzero_bits (SUBREG_REG (op0), GET_MODE (SUBREG_REG (op0)))
+ & ~ GET_MODE_MASK (GET_MODE (op0))) == 0
+ && (tem = gen_lowpart_for_combine (GET_MODE (SUBREG_REG (op0)),
+ op1),
+ (nonzero_bits (tem, GET_MODE (SUBREG_REG (op0)))
+ & ~ GET_MODE_MASK (GET_MODE (op0))) == 0))
+ op0 = SUBREG_REG (op0), op1 = tem;
+
+ /* We now do the opposite procedure: Some machines don't have compare
+ insns in all modes. If OP0's mode is an integer mode smaller than a
+ word and we can't do a compare in that mode, see if there is a larger
+ mode for which we can do the compare. There are a number of cases in
+ which we can use the wider mode. */
+
+ mode = GET_MODE (op0);
+ if (mode != VOIDmode && GET_MODE_CLASS (mode) == MODE_INT
+ && GET_MODE_SIZE (mode) < UNITS_PER_WORD
+ && cmp_optab->handlers[(int) mode].insn_code == CODE_FOR_nothing)
+ for (tmode = GET_MODE_WIDER_MODE (mode);
+ (tmode != VOIDmode
+ && GET_MODE_BITSIZE (tmode) <= HOST_BITS_PER_WIDE_INT);
+ tmode = GET_MODE_WIDER_MODE (tmode))
+ if (cmp_optab->handlers[(int) tmode].insn_code != CODE_FOR_nothing)
+ {
+ /* If the only nonzero bits in OP0 and OP1 are those in the
+ narrower mode and this is an equality or unsigned comparison,
+ we can use the wider mode. Similarly for sign-extended
+ values, in which case it is true for all comparisons. */
+ if (((code == EQ || code == NE
+ || code == GEU || code == GTU || code == LEU || code == LTU)
+ && (nonzero_bits (op0, tmode) & ~ GET_MODE_MASK (mode)) == 0
+ && (nonzero_bits (op1, tmode) & ~ GET_MODE_MASK (mode)) == 0)
+ || ((num_sign_bit_copies (op0, tmode)
+ > GET_MODE_BITSIZE (tmode) - GET_MODE_BITSIZE (mode))
+ && (num_sign_bit_copies (op1, tmode)
+ > GET_MODE_BITSIZE (tmode) - GET_MODE_BITSIZE (mode))))
+ {
+ op0 = gen_lowpart_for_combine (tmode, op0);
+ op1 = gen_lowpart_for_combine (tmode, op1);
+ break;
+ }
+
+ /* If this is a test for negative, we can make an explicit
+ test of the sign bit. */
+
+ if (op1 == const0_rtx && (code == LT || code == GE)
+ && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
+ {
+ op0 = gen_binary (AND, tmode,
+ gen_lowpart_for_combine (tmode, op0),
+ GEN_INT ((HOST_WIDE_INT) 1
+ << (GET_MODE_BITSIZE (mode) - 1)));
+ code = (code == LT) ? NE : EQ;
+ break;
+ }
+ }
+
+#ifdef CANONICALIZE_COMPARISON
+ /* If this machine only supports a subset of valid comparisons, see if we
+ can convert an unsupported one into a supported one. */
+ CANONICALIZE_COMPARISON (code, op0, op1);
+#endif
+
+ *pop0 = op0;
+ *pop1 = op1;
+
+ return code;
+}
+
+/* Return 1 if we know that X, a comparison operation, is not operating
+ on a floating-point value or is EQ or NE, meaning that we can safely
+ reverse it. */
+
+static int
+reversible_comparison_p (x)
+ rtx x;
+{
+ if (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
+ || flag_fast_math
+ || GET_CODE (x) == NE || GET_CODE (x) == EQ)
+ return 1;
+
+ switch (GET_MODE_CLASS (GET_MODE (XEXP (x, 0))))
+ {
+ case MODE_INT:
+ case MODE_PARTIAL_INT:
+ case MODE_COMPLEX_INT:
+ return 1;
+
+ case MODE_CC:
+ /* If the mode of the condition codes tells us that this is safe,
+ we need look no further. */
+ if (REVERSIBLE_CC_MODE (GET_MODE (XEXP (x, 0))))
+ return 1;
+
+ /* Otherwise try and find where the condition codes were last set and
+ use that. */
+ x = get_last_value (XEXP (x, 0));
+ return (x && GET_CODE (x) == COMPARE
+ && ! FLOAT_MODE_P (GET_MODE (XEXP (x, 0))));
+ }
+
+ return 0;
+}
+
+/* Utility function for following routine. Called when X is part of a value
+ being stored into reg_last_set_value. Sets reg_last_set_table_tick
+ for each register mentioned. Similar to mention_regs in cse.c */
+
+static void
+update_table_tick (x)
+ rtx x;
+{
+ register enum rtx_code code = GET_CODE (x);
+ register char *fmt = GET_RTX_FORMAT (code);
+ register int i;
+
+ if (code == REG)
+ {
+ int regno = REGNO (x);
+ int endregno = regno + (regno < FIRST_PSEUDO_REGISTER
+ ? HARD_REGNO_NREGS (regno, GET_MODE (x)) : 1);
+
+ for (i = regno; i < endregno; i++)
+ reg_last_set_table_tick[i] = label_tick;
+
+ return;
+ }
+
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ /* Note that we can't have an "E" in values stored; see
+ get_last_value_validate. */
+ if (fmt[i] == 'e')
+ update_table_tick (XEXP (x, i));
+}
+
+/* Record that REG is set to VALUE in insn INSN. If VALUE is zero, we
+ are saying that the register is clobbered and we no longer know its
+ value. If INSN is zero, don't update reg_last_set; this is only permitted
+ with VALUE also zero and is used to invalidate the register. */
+
+static void
+record_value_for_reg (reg, insn, value)
+ rtx reg;
+ rtx insn;
+ rtx value;
+{
+ int regno = REGNO (reg);
+ int endregno = regno + (regno < FIRST_PSEUDO_REGISTER
+ ? HARD_REGNO_NREGS (regno, GET_MODE (reg)) : 1);
+ int i;
+
+ /* If VALUE contains REG and we have a previous value for REG, substitute
+ the previous value. */
+ if (value && insn && reg_overlap_mentioned_p (reg, value))
+ {
+ rtx tem;
+
+ /* Set things up so get_last_value is allowed to see anything set up to
+ our insn. */
+ subst_low_cuid = INSN_CUID (insn);
+ tem = get_last_value (reg);
+
+ if (tem)
+ value = replace_rtx (copy_rtx (value), reg, tem);
+ }
+
+ /* For each register modified, show we don't know its value, that
+ we don't know about its bitwise content, that its value has been
+ updated, and that we don't know the location of the death of the
+ register. */
+ for (i = regno; i < endregno; i ++)
+ {
+ if (insn)
+ reg_last_set[i] = insn;
+ reg_last_set_value[i] = 0;
+ reg_last_set_mode[i] = 0;
+ reg_last_set_nonzero_bits[i] = 0;
+ reg_last_set_sign_bit_copies[i] = 0;
+ reg_last_death[i] = 0;
+ }
+
+ /* Mark registers that are being referenced in this value. */
+ if (value)
+ update_table_tick (value);
+
+ /* Now update the status of each register being set.
+ If someone is using this register in this block, set this register
+ to invalid since we will get confused between the two lives in this
+ basic block. This makes using this register always invalid. In cse, we
+ scan the table to invalidate all entries using this register, but this
+ is too much work for us. */
+
+ for (i = regno; i < endregno; i++)
+ {
+ reg_last_set_label[i] = label_tick;
+ if (value && reg_last_set_table_tick[i] == label_tick)
+ reg_last_set_invalid[i] = 1;
+ else
+ reg_last_set_invalid[i] = 0;
+ }
+
+ /* The value being assigned might refer to X (like in "x++;"). In that
+ case, we must replace it with (clobber (const_int 0)) to prevent
+ infinite loops. */
+ if (value && ! get_last_value_validate (&value,
+ reg_last_set_label[regno], 0))
+ {
+ value = copy_rtx (value);
+ if (! get_last_value_validate (&value, reg_last_set_label[regno], 1))
+ value = 0;
+ }
+
+ /* For the main register being modified, update the value, the mode, the
+ nonzero bits, and the number of sign bit copies. */
+
+ reg_last_set_value[regno] = value;
+
+ if (value)
+ {
+ subst_low_cuid = INSN_CUID (insn);
+ reg_last_set_mode[regno] = GET_MODE (reg);
+ reg_last_set_nonzero_bits[regno] = nonzero_bits (value, GET_MODE (reg));
+ reg_last_set_sign_bit_copies[regno]
+ = num_sign_bit_copies (value, GET_MODE (reg));
+ }
+}
+
+/* Used for communication between the following two routines. */
+static rtx record_dead_insn;
+
+/* Called via note_stores from record_dead_and_set_regs to handle one
+ SET or CLOBBER in an insn. */
+
+static void
+record_dead_and_set_regs_1 (dest, setter)
+ rtx dest, setter;
+{
+ if (GET_CODE (dest) == REG)
+ {
+ /* If we are setting the whole register, we know its value. Otherwise
+ show that we don't know the value. We can handle SUBREG in
+ some cases. */
+ if (GET_CODE (setter) == SET && dest == SET_DEST (setter))
+ record_value_for_reg (dest, record_dead_insn, SET_SRC (setter));
+ else if (GET_CODE (setter) == SET
+ && GET_CODE (SET_DEST (setter)) == SUBREG
+ && SUBREG_REG (SET_DEST (setter)) == dest
+ && GET_MODE_BITSIZE (GET_MODE (dest)) <= BITS_PER_WORD
+ && subreg_lowpart_p (SET_DEST (setter)))
+ record_value_for_reg (dest, record_dead_insn,
+ gen_lowpart_for_combine (GET_MODE (dest),
+ SET_SRC (setter)));
+ else
+ record_value_for_reg (dest, record_dead_insn, NULL_RTX);
+ }
+ else if (GET_CODE (dest) == MEM
+ /* Ignore pushes, they clobber nothing. */
+ && ! push_operand (dest, GET_MODE (dest)))
+ mem_last_set = INSN_CUID (record_dead_insn);
+}
+
+/* Update the records of when each REG was most recently set or killed
+ for the things done by INSN. This is the last thing done in processing
+ INSN in the combiner loop.
+
+ We update reg_last_set, reg_last_set_value, reg_last_set_mode,
+ reg_last_set_nonzero_bits, reg_last_set_sign_bit_copies, reg_last_death,
+ and also the similar information mem_last_set (which insn most recently
+ modified memory) and last_call_cuid (which insn was the most recent
+ subroutine call). */
+
+static void
+record_dead_and_set_regs (insn)
+ rtx insn;
+{
+ register rtx link;
+ int i;
+
+ for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
+ {
+ if (REG_NOTE_KIND (link) == REG_DEAD
+ && GET_CODE (XEXP (link, 0)) == REG)
+ {
+ int regno = REGNO (XEXP (link, 0));
+ int endregno
+ = regno + (regno < FIRST_PSEUDO_REGISTER
+ ? HARD_REGNO_NREGS (regno, GET_MODE (XEXP (link, 0)))
+ : 1);
+
+ for (i = regno; i < endregno; i++)
+ reg_last_death[i] = insn;
+ }
+ else if (REG_NOTE_KIND (link) == REG_INC)
+ record_value_for_reg (XEXP (link, 0), insn, NULL_RTX);
+ }
+
+ if (GET_CODE (insn) == CALL_INSN)
+ {
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (call_used_regs[i])
+ {
+ reg_last_set_value[i] = 0;
+ reg_last_set_mode[i] = 0;
+ reg_last_set_nonzero_bits[i] = 0;
+ reg_last_set_sign_bit_copies[i] = 0;
+ reg_last_death[i] = 0;
+ }
+
+ last_call_cuid = mem_last_set = INSN_CUID (insn);
+ }
+
+ record_dead_insn = insn;
+ note_stores (PATTERN (insn), record_dead_and_set_regs_1);
+}
+
+/* Utility routine for the following function. Verify that all the registers
+ mentioned in *LOC are valid when *LOC was part of a value set when
+ label_tick == TICK. Return 0 if some are not.
+
+ If REPLACE is non-zero, replace the invalid reference with
+ (clobber (const_int 0)) and return 1. This replacement is useful because
+ we often can get useful information about the form of a value (e.g., if
+ it was produced by a shift that always produces -1 or 0) even though
+ we don't know exactly what registers it was produced from. */
+
+static int
+get_last_value_validate (loc, tick, replace)
+ rtx *loc;
+ int tick;
+ int replace;
+{
+ rtx x = *loc;
+ char *fmt = GET_RTX_FORMAT (GET_CODE (x));
+ int len = GET_RTX_LENGTH (GET_CODE (x));
+ int i;
+
+ if (GET_CODE (x) == REG)
+ {
+ int regno = REGNO (x);
+ int endregno = regno + (regno < FIRST_PSEUDO_REGISTER
+ ? HARD_REGNO_NREGS (regno, GET_MODE (x)) : 1);
+ int j;
+
+ for (j = regno; j < endregno; j++)
+ if (reg_last_set_invalid[j]
+ /* If this is a pseudo-register that was only set once, it is
+ always valid. */
+ || (! (regno >= FIRST_PSEUDO_REGISTER && reg_n_sets[regno] == 1)
+ && reg_last_set_label[j] > tick))
+ {
+ if (replace)
+ *loc = gen_rtx (CLOBBER, GET_MODE (x), const0_rtx);
+ return replace;
+ }
+
+ return 1;
+ }
+
+ for (i = 0; i < len; i++)
+ if ((fmt[i] == 'e'
+ && get_last_value_validate (&XEXP (x, i), tick, replace) == 0)
+ /* Don't bother with these. They shouldn't occur anyway. */
+ || fmt[i] == 'E')
+ return 0;
+
+ /* If we haven't found a reason for it to be invalid, it is valid. */
+ return 1;
+}
+
+/* Get the last value assigned to X, if known. Some registers
+ in the value may be replaced with (clobber (const_int 0)) if their value
+ is known longer known reliably. */
+
+static rtx
+get_last_value (x)
+ rtx x;
+{
+ int regno;
+ rtx value;
+
+ /* If this is a non-paradoxical SUBREG, get the value of its operand and
+ then convert it to the desired mode. If this is a paradoxical SUBREG,
+ we cannot predict what values the "extra" bits might have. */
+ if (GET_CODE (x) == SUBREG
+ && subreg_lowpart_p (x)
+ && (GET_MODE_SIZE (GET_MODE (x))
+ <= GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
+ && (value = get_last_value (SUBREG_REG (x))) != 0)
+ return gen_lowpart_for_combine (GET_MODE (x), value);
+
+ if (GET_CODE (x) != REG)
+ return 0;
+
+ regno = REGNO (x);
+ value = reg_last_set_value[regno];
+
+ /* If we don't have a value or if it isn't for this basic block, return 0. */
+
+ if (value == 0
+ || (reg_n_sets[regno] != 1
+ && reg_last_set_label[regno] != label_tick))
+ return 0;
+
+ /* If the value was set in a later insn that the ones we are processing,
+ we can't use it even if the register was only set once, but make a quick
+ check to see if the previous insn set it to something. This is commonly
+ the case when the same pseudo is used by repeated insns. */
+
+ if (INSN_CUID (reg_last_set[regno]) >= subst_low_cuid)
+ {
+ rtx insn, set;
+
+ /* If there is an insn that is supposed to be immediately
+ in front of subst_insn, use it. */
+ if (subst_prev_insn != 0)
+ insn = subst_prev_insn;
+ else
+ for (insn = prev_nonnote_insn (subst_insn);
+ insn && INSN_CUID (insn) >= subst_low_cuid;
+ insn = prev_nonnote_insn (insn))
+ ;
+
+ if (insn
+ && (set = single_set (insn)) != 0
+ && rtx_equal_p (SET_DEST (set), x))
+ {
+ value = SET_SRC (set);
+
+ /* Make sure that VALUE doesn't reference X. Replace any
+ expliit references with a CLOBBER. If there are any remaining
+ references (rare), don't use the value. */
+
+ if (reg_mentioned_p (x, value))
+ value = replace_rtx (copy_rtx (value), x,
+ gen_rtx (CLOBBER, GET_MODE (x), const0_rtx));
+
+ if (reg_overlap_mentioned_p (x, value))
+ return 0;
+ }
+ else
+ return 0;
+ }
+
+ /* If the value has all its registers valid, return it. */
+ if (get_last_value_validate (&value, reg_last_set_label[regno], 0))
+ return value;
+
+ /* Otherwise, make a copy and replace any invalid register with
+ (clobber (const_int 0)). If that fails for some reason, return 0. */
+
+ value = copy_rtx (value);
+ if (get_last_value_validate (&value, reg_last_set_label[regno], 1))
+ return value;
+
+ return 0;
+}
+
+/* Return nonzero if expression X refers to a REG or to memory
+ that is set in an instruction more recent than FROM_CUID. */
+
+static int
+use_crosses_set_p (x, from_cuid)
+ register rtx x;
+ int from_cuid;
+{
+ register char *fmt;
+ register int i;
+ register enum rtx_code code = GET_CODE (x);
+
+ if (code == REG)
+ {
+ register int regno = REGNO (x);
+ int endreg = regno + (regno < FIRST_PSEUDO_REGISTER
+ ? HARD_REGNO_NREGS (regno, GET_MODE (x)) : 1);
+
+#ifdef PUSH_ROUNDING
+ /* Don't allow uses of the stack pointer to be moved,
+ because we don't know whether the move crosses a push insn. */
+ if (regno == STACK_POINTER_REGNUM)
+ return 1;
+#endif
+ for (;regno < endreg; regno++)
+ if (reg_last_set[regno]
+ && INSN_CUID (reg_last_set[regno]) > from_cuid)
+ return 1;
+ return 0;
+ }
+
+ if (code == MEM && mem_last_set > from_cuid)
+ return 1;
+
+ fmt = GET_RTX_FORMAT (code);
+
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ if (use_crosses_set_p (XVECEXP (x, i, j), from_cuid))
+ return 1;
+ }
+ else if (fmt[i] == 'e'
+ && use_crosses_set_p (XEXP (x, i), from_cuid))
+ return 1;
+ }
+ return 0;
+}
+
+/* Define three variables used for communication between the following
+ routines. */
+
+static int reg_dead_regno, reg_dead_endregno;
+static int reg_dead_flag;
+
+/* Function called via note_stores from reg_dead_at_p.
+
+ If DEST is within [reg_dead_rengno, reg_dead_endregno), set
+ reg_dead_flag to 1 if X is a CLOBBER and to -1 it is a SET. */
+
+static void
+reg_dead_at_p_1 (dest, x)
+ rtx dest;
+ rtx x;
+{
+ int regno, endregno;
+
+ if (GET_CODE (dest) != REG)
+ return;
+
+ regno = REGNO (dest);
+ endregno = regno + (regno < FIRST_PSEUDO_REGISTER
+ ? HARD_REGNO_NREGS (regno, GET_MODE (dest)) : 1);
+
+ if (reg_dead_endregno > regno && reg_dead_regno < endregno)
+ reg_dead_flag = (GET_CODE (x) == CLOBBER) ? 1 : -1;
+}
+
+/* Return non-zero if REG is known to be dead at INSN.
+
+ We scan backwards from INSN. If we hit a REG_DEAD note or a CLOBBER
+ referencing REG, it is dead. If we hit a SET referencing REG, it is
+ live. Otherwise, see if it is live or dead at the start of the basic
+ block we are in. Hard regs marked as being live in NEWPAT_USED_REGS
+ must be assumed to be always live. */
+
+static int
+reg_dead_at_p (reg, insn)
+ rtx reg;
+ rtx insn;
+{
+ int block, i;
+
+ /* Set variables for reg_dead_at_p_1. */
+ reg_dead_regno = REGNO (reg);
+ reg_dead_endregno = reg_dead_regno + (reg_dead_regno < FIRST_PSEUDO_REGISTER
+ ? HARD_REGNO_NREGS (reg_dead_regno,
+ GET_MODE (reg))
+ : 1);
+
+ reg_dead_flag = 0;
+
+ /* Check that reg isn't mentioned in NEWPAT_USED_REGS. */
+ if (reg_dead_regno < FIRST_PSEUDO_REGISTER)
+ {
+ for (i = reg_dead_regno; i < reg_dead_endregno; i++)
+ if (TEST_HARD_REG_BIT (newpat_used_regs, i))
+ return 0;
+ }
+
+ /* Scan backwards until we find a REG_DEAD note, SET, CLOBBER, label, or
+ beginning of function. */
+ for (; insn && GET_CODE (insn) != CODE_LABEL;
+ insn = prev_nonnote_insn (insn))
+ {
+ note_stores (PATTERN (insn), reg_dead_at_p_1);
+ if (reg_dead_flag)
+ return reg_dead_flag == 1 ? 1 : 0;
+
+ if (find_regno_note (insn, REG_DEAD, reg_dead_regno))
+ return 1;
+ }
+
+ /* Get the basic block number that we were in. */
+ if (insn == 0)
+ block = 0;
+ else
+ {
+ for (block = 0; block < n_basic_blocks; block++)
+ if (insn == basic_block_head[block])
+ break;
+
+ if (block == n_basic_blocks)
+ return 0;
+ }
+
+ for (i = reg_dead_regno; i < reg_dead_endregno; i++)
+ if (basic_block_live_at_start[block][i / REGSET_ELT_BITS]
+ & ((REGSET_ELT_TYPE) 1 << (i % REGSET_ELT_BITS)))
+ return 0;
+
+ return 1;
+}
+
+/* Note hard registers in X that are used. This code is similar to
+ that in flow.c, but much simpler since we don't care about pseudos. */
+
+static void
+mark_used_regs_combine (x)
+ rtx x;
+{
+ register RTX_CODE code = GET_CODE (x);
+ register int regno;
+ int i;
+
+ switch (code)
+ {
+ case LABEL_REF:
+ case SYMBOL_REF:
+ case CONST_INT:
+ case CONST:
+ case CONST_DOUBLE:
+ case PC:
+ case ADDR_VEC:
+ case ADDR_DIFF_VEC:
+ case ASM_INPUT:
+#ifdef HAVE_cc0
+ /* CC0 must die in the insn after it is set, so we don't need to take
+ special note of it here. */
+ case CC0:
+#endif
+ return;
+
+ case CLOBBER:
+ /* If we are clobbering a MEM, mark any hard registers inside the
+ address as used. */
+ if (GET_CODE (XEXP (x, 0)) == MEM)
+ mark_used_regs_combine (XEXP (XEXP (x, 0), 0));
+ return;
+
+ case REG:
+ regno = REGNO (x);
+ /* A hard reg in a wide mode may really be multiple registers.
+ If so, mark all of them just like the first. */
+ if (regno < FIRST_PSEUDO_REGISTER)
+ {
+ /* None of this applies to the stack, frame or arg pointers */
+ if (regno == STACK_POINTER_REGNUM
+#if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM
+ || regno == HARD_FRAME_POINTER_REGNUM
+#endif
+#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
+ || (regno == ARG_POINTER_REGNUM && fixed_regs[regno])
+#endif
+ || regno == FRAME_POINTER_REGNUM)
+ return;
+
+ i = HARD_REGNO_NREGS (regno, GET_MODE (x));
+ while (i-- > 0)
+ SET_HARD_REG_BIT (newpat_used_regs, regno + i);
+ }
+ return;
+
+ case SET:
+ {
+ /* If setting a MEM, or a SUBREG of a MEM, then note any hard regs in
+ the address. */
+ register rtx testreg = SET_DEST (x);
+
+ while (GET_CODE (testreg) == SUBREG
+ || GET_CODE (testreg) == ZERO_EXTRACT
+ || GET_CODE (testreg) == SIGN_EXTRACT
+ || GET_CODE (testreg) == STRICT_LOW_PART)
+ testreg = XEXP (testreg, 0);
+
+ if (GET_CODE (testreg) == MEM)
+ mark_used_regs_combine (XEXP (testreg, 0));
+
+ mark_used_regs_combine (SET_SRC (x));
+ return;
+ }
+ }
+
+ /* Recursively scan the operands of this expression. */
+
+ {
+ register char *fmt = GET_RTX_FORMAT (code);
+
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ mark_used_regs_combine (XEXP (x, i));
+ else if (fmt[i] == 'E')
+ {
+ register int j;
+
+ for (j = 0; j < XVECLEN (x, i); j++)
+ mark_used_regs_combine (XVECEXP (x, i, j));
+ }
+ }
+ }
+}
+
+
+/* Remove register number REGNO from the dead registers list of INSN.
+
+ Return the note used to record the death, if there was one. */
+
+rtx
+remove_death (regno, insn)
+ int regno;
+ rtx insn;
+{
+ register rtx note = find_regno_note (insn, REG_DEAD, regno);
+
+ if (note)
+ {
+ reg_n_deaths[regno]--;
+ remove_note (insn, note);
+ }
+
+ return note;
+}
+
+/* For each register (hardware or pseudo) used within expression X, if its
+ death is in an instruction with cuid between FROM_CUID (inclusive) and
+ TO_INSN (exclusive), put a REG_DEAD note for that register in the
+ list headed by PNOTES.
+
+ This is done when X is being merged by combination into TO_INSN. These
+ notes will then be distributed as needed. */
+
+static void
+move_deaths (x, from_cuid, to_insn, pnotes)
+ rtx x;
+ int from_cuid;
+ rtx to_insn;
+ rtx *pnotes;
+{
+ register char *fmt;
+ register int len, i;
+ register enum rtx_code code = GET_CODE (x);
+
+ if (code == REG)
+ {
+ register int regno = REGNO (x);
+ register rtx where_dead = reg_last_death[regno];
+
+ if (where_dead && INSN_CUID (where_dead) >= from_cuid
+ && INSN_CUID (where_dead) < INSN_CUID (to_insn))
+ {
+ rtx note = remove_death (regno, where_dead);
+
+ /* It is possible for the call above to return 0. This can occur
+ when reg_last_death points to I2 or I1 that we combined with.
+ In that case make a new note.
+
+ We must also check for the case where X is a hard register
+ and NOTE is a death note for a range of hard registers
+ including X. In that case, we must put REG_DEAD notes for
+ the remaining registers in place of NOTE. */
+
+ if (note != 0 && regno < FIRST_PSEUDO_REGISTER
+ && (GET_MODE_SIZE (GET_MODE (XEXP (note, 0)))
+ != GET_MODE_SIZE (GET_MODE (x))))
+ {
+ int deadregno = REGNO (XEXP (note, 0));
+ int deadend
+ = (deadregno + HARD_REGNO_NREGS (deadregno,
+ GET_MODE (XEXP (note, 0))));
+ int ourend = regno + HARD_REGNO_NREGS (regno, GET_MODE (x));
+ int i;
+
+ for (i = deadregno; i < deadend; i++)
+ if (i < regno || i >= ourend)
+ REG_NOTES (where_dead)
+ = gen_rtx (EXPR_LIST, REG_DEAD,
+ gen_rtx (REG, reg_raw_mode[i], i),
+ REG_NOTES (where_dead));
+ }
+
+ if (note != 0 && GET_MODE (XEXP (note, 0)) == GET_MODE (x))
+ {
+ XEXP (note, 1) = *pnotes;
+ *pnotes = note;
+ }
+ else
+ *pnotes = gen_rtx (EXPR_LIST, REG_DEAD, x, *pnotes);
+
+ reg_n_deaths[regno]++;
+ }
+
+ return;
+ }
+
+ else if (GET_CODE (x) == SET)
+ {
+ rtx dest = SET_DEST (x);
+
+ move_deaths (SET_SRC (x), from_cuid, to_insn, pnotes);
+
+ /* In the case of a ZERO_EXTRACT, a STRICT_LOW_PART, or a SUBREG
+ that accesses one word of a multi-word item, some
+ piece of everything register in the expression is used by
+ this insn, so remove any old death. */
+
+ if (GET_CODE (dest) == ZERO_EXTRACT
+ || GET_CODE (dest) == STRICT_LOW_PART
+ || (GET_CODE (dest) == SUBREG
+ && (((GET_MODE_SIZE (GET_MODE (dest))
+ + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
+ == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))
+ + UNITS_PER_WORD - 1) / UNITS_PER_WORD))))
+ {
+ move_deaths (dest, from_cuid, to_insn, pnotes);
+ return;
+ }
+
+ /* If this is some other SUBREG, we know it replaces the entire
+ value, so use that as the destination. */
+ if (GET_CODE (dest) == SUBREG)
+ dest = SUBREG_REG (dest);
+
+ /* If this is a MEM, adjust deaths of anything used in the address.
+ For a REG (the only other possibility), the entire value is
+ being replaced so the old value is not used in this insn. */
+
+ if (GET_CODE (dest) == MEM)
+ move_deaths (XEXP (dest, 0), from_cuid, to_insn, pnotes);
+ return;
+ }
+
+ else if (GET_CODE (x) == CLOBBER)
+ return;
+
+ len = GET_RTX_LENGTH (code);
+ fmt = GET_RTX_FORMAT (code);
+
+ for (i = 0; i < len; i++)
+ {
+ if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ move_deaths (XVECEXP (x, i, j), from_cuid, to_insn, pnotes);
+ }
+ else if (fmt[i] == 'e')
+ move_deaths (XEXP (x, i), from_cuid, to_insn, pnotes);
+ }
+}
+
+/* Return 1 if X is the target of a bit-field assignment in BODY, the
+ pattern of an insn. X must be a REG. */
+
+static int
+reg_bitfield_target_p (x, body)
+ rtx x;
+ rtx body;
+{
+ int i;
+
+ if (GET_CODE (body) == SET)
+ {
+ rtx dest = SET_DEST (body);
+ rtx target;
+ int regno, tregno, endregno, endtregno;
+
+ if (GET_CODE (dest) == ZERO_EXTRACT)
+ target = XEXP (dest, 0);
+ else if (GET_CODE (dest) == STRICT_LOW_PART)
+ target = SUBREG_REG (XEXP (dest, 0));
+ else
+ return 0;
+
+ if (GET_CODE (target) == SUBREG)
+ target = SUBREG_REG (target);
+
+ if (GET_CODE (target) != REG)
+ return 0;
+
+ tregno = REGNO (target), regno = REGNO (x);
+ if (tregno >= FIRST_PSEUDO_REGISTER || regno >= FIRST_PSEUDO_REGISTER)
+ return target == x;
+
+ endtregno = tregno + HARD_REGNO_NREGS (tregno, GET_MODE (target));
+ endregno = regno + HARD_REGNO_NREGS (regno, GET_MODE (x));
+
+ return endregno > tregno && regno < endtregno;
+ }
+
+ else if (GET_CODE (body) == PARALLEL)
+ for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
+ if (reg_bitfield_target_p (x, XVECEXP (body, 0, i)))
+ return 1;
+
+ return 0;
+}
+
+/* Given a chain of REG_NOTES originally from FROM_INSN, try to place them
+ as appropriate. I3 and I2 are the insns resulting from the combination
+ insns including FROM (I2 may be zero).
+
+ ELIM_I2 and ELIM_I1 are either zero or registers that we know will
+ not need REG_DEAD notes because they are being substituted for. This
+ saves searching in the most common cases.
+
+ Each note in the list is either ignored or placed on some insns, depending
+ on the type of note. */
+
+static void
+distribute_notes (notes, from_insn, i3, i2, elim_i2, elim_i1)
+ rtx notes;
+ rtx from_insn;
+ rtx i3, i2;
+ rtx elim_i2, elim_i1;
+{
+ rtx note, next_note;
+ rtx tem;
+
+ for (note = notes; note; note = next_note)
+ {
+ rtx place = 0, place2 = 0;
+
+ /* If this NOTE references a pseudo register, ensure it references
+ the latest copy of that register. */
+ if (XEXP (note, 0) && GET_CODE (XEXP (note, 0)) == REG
+ && REGNO (XEXP (note, 0)) >= FIRST_PSEUDO_REGISTER)
+ XEXP (note, 0) = regno_reg_rtx[REGNO (XEXP (note, 0))];
+
+ next_note = XEXP (note, 1);
+ switch (REG_NOTE_KIND (note))
+ {
+ case REG_UNUSED:
+ /* If this note is from any insn other than i3, then we have no
+ use for it, and must ignore it.
+
+ Any clobbers for i3 may still exist, and so we must process
+ REG_UNUSED notes from that insn.
+
+ Any clobbers from i2 or i1 can only exist if they were added by
+ recog_for_combine. In that case, recog_for_combine created the
+ necessary REG_UNUSED notes. Trying to keep any original
+ REG_UNUSED notes from these insns can cause incorrect output
+ if it is for the same register as the original i3 dest.
+ In that case, we will notice that the register is set in i3,
+ and then add a REG_UNUSED note for the destination of i3, which
+ is wrong. */
+ if (from_insn != i3)
+ break;
+
+ /* If this register is set or clobbered in I3, put the note there
+ unless there is one already. */
+ else if (reg_set_p (XEXP (note, 0), PATTERN (i3)))
+ {
+ if (! (GET_CODE (XEXP (note, 0)) == REG
+ ? find_regno_note (i3, REG_UNUSED, REGNO (XEXP (note, 0)))
+ : find_reg_note (i3, REG_UNUSED, XEXP (note, 0))))
+ place = i3;
+ }
+ /* Otherwise, if this register is used by I3, then this register
+ now dies here, so we must put a REG_DEAD note here unless there
+ is one already. */
+ else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3))
+ && ! (GET_CODE (XEXP (note, 0)) == REG
+ ? find_regno_note (i3, REG_DEAD, REGNO (XEXP (note, 0)))
+ : find_reg_note (i3, REG_DEAD, XEXP (note, 0))))
+ {
+ PUT_REG_NOTE_KIND (note, REG_DEAD);
+ place = i3;
+ }
+ break;
+
+ case REG_EQUAL:
+ case REG_EQUIV:
+ case REG_NONNEG:
+ /* These notes say something about results of an insn. We can
+ only support them if they used to be on I3 in which case they
+ remain on I3. Otherwise they are ignored.
+
+ If the note refers to an expression that is not a constant, we
+ must also ignore the note since we cannot tell whether the
+ equivalence is still true. It might be possible to do
+ slightly better than this (we only have a problem if I2DEST
+ or I1DEST is present in the expression), but it doesn't
+ seem worth the trouble. */
+
+ if (from_insn == i3
+ && (XEXP (note, 0) == 0 || CONSTANT_P (XEXP (note, 0))))
+ place = i3;
+ break;
+
+ case REG_INC:
+ case REG_NO_CONFLICT:
+ case REG_LABEL:
+ /* These notes say something about how a register is used. They must
+ be present on any use of the register in I2 or I3. */
+ if (reg_mentioned_p (XEXP (note, 0), PATTERN (i3)))
+ place = i3;
+
+ if (i2 && reg_mentioned_p (XEXP (note, 0), PATTERN (i2)))
+ {
+ if (place)
+ place2 = i2;
+ else
+ place = i2;
+ }
+ break;
+
+ case REG_WAS_0:
+ /* It is too much trouble to try to see if this note is still
+ correct in all situations. It is better to simply delete it. */
+ break;
+
+ case REG_RETVAL:
+ /* If the insn previously containing this note still exists,
+ put it back where it was. Otherwise move it to the previous
+ insn. Adjust the corresponding REG_LIBCALL note. */
+ if (GET_CODE (from_insn) != NOTE)
+ place = from_insn;
+ else
+ {
+ tem = find_reg_note (XEXP (note, 0), REG_LIBCALL, NULL_RTX);
+ place = prev_real_insn (from_insn);
+ if (tem && place)
+ XEXP (tem, 0) = place;
+ }
+ break;
+
+ case REG_LIBCALL:
+ /* This is handled similarly to REG_RETVAL. */
+ if (GET_CODE (from_insn) != NOTE)
+ place = from_insn;
+ else
+ {
+ tem = find_reg_note (XEXP (note, 0), REG_RETVAL, NULL_RTX);
+ place = next_real_insn (from_insn);
+ if (tem && place)
+ XEXP (tem, 0) = place;
+ }
+ break;
+
+ case REG_DEAD:
+ /* If the register is used as an input in I3, it dies there.
+ Similarly for I2, if it is non-zero and adjacent to I3.
+
+ If the register is not used as an input in either I3 or I2
+ and it is not one of the registers we were supposed to eliminate,
+ there are two possibilities. We might have a non-adjacent I2
+ or we might have somehow eliminated an additional register
+ from a computation. For example, we might have had A & B where
+ we discover that B will always be zero. In this case we will
+ eliminate the reference to A.
+
+ In both cases, we must search to see if we can find a previous
+ use of A and put the death note there. */
+
+ if (from_insn
+ && GET_CODE (from_insn) == CALL_INSN
+ && find_reg_fusage (from_insn, USE, XEXP (note, 0)))
+ place = from_insn;
+ else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3)))
+ place = i3;
+ else if (i2 != 0 && next_nonnote_insn (i2) == i3
+ && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
+ place = i2;
+
+ if (XEXP (note, 0) == elim_i2 || XEXP (note, 0) == elim_i1)
+ break;
+
+ /* If the register is used in both I2 and I3 and it dies in I3,
+ we might have added another reference to it. If reg_n_refs
+ was 2, bump it to 3. This has to be correct since the
+ register must have been set somewhere. The reason this is
+ done is because local-alloc.c treats 2 references as a
+ special case. */
+
+ if (place == i3 && i2 != 0 && GET_CODE (XEXP (note, 0)) == REG
+ && reg_n_refs[REGNO (XEXP (note, 0))]== 2
+ && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
+ reg_n_refs[REGNO (XEXP (note, 0))] = 3;
+
+ if (place == 0)
+ for (tem = prev_nonnote_insn (i3);
+ tem && (GET_CODE (tem) == INSN
+ || GET_CODE (tem) == CALL_INSN);
+ tem = prev_nonnote_insn (tem))
+ {
+ /* If the register is being set at TEM, see if that is all
+ TEM is doing. If so, delete TEM. Otherwise, make this
+ into a REG_UNUSED note instead. */
+ if (reg_set_p (XEXP (note, 0), PATTERN (tem)))
+ {
+ rtx set = single_set (tem);
+
+ /* Verify that it was the set, and not a clobber that
+ modified the register. */
+
+ if (set != 0 && ! side_effects_p (SET_SRC (set))
+ && rtx_equal_p (XEXP (note, 0), SET_DEST (set)))
+ {
+ /* Move the notes and links of TEM elsewhere.
+ This might delete other dead insns recursively.
+ First set the pattern to something that won't use
+ any register. */
+
+ PATTERN (tem) = pc_rtx;
+
+ distribute_notes (REG_NOTES (tem), tem, tem,
+ NULL_RTX, NULL_RTX, NULL_RTX);
+ distribute_links (LOG_LINKS (tem));
+
+ PUT_CODE (tem, NOTE);
+ NOTE_LINE_NUMBER (tem) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (tem) = 0;
+ }
+ else
+ {
+ PUT_REG_NOTE_KIND (note, REG_UNUSED);
+
+ /* If there isn't already a REG_UNUSED note, put one
+ here. */
+ if (! find_regno_note (tem, REG_UNUSED,
+ REGNO (XEXP (note, 0))))
+ place = tem;
+ break;
+ }
+ }
+ else if (reg_referenced_p (XEXP (note, 0), PATTERN (tem))
+ || (GET_CODE (tem) == CALL_INSN
+ && find_reg_fusage (tem, USE, XEXP (note, 0))))
+ {
+ place = tem;
+ break;
+ }
+ }
+
+ /* If the register is set or already dead at PLACE, we needn't do
+ anything with this note if it is still a REG_DEAD note.
+
+ Note that we cannot use just `dead_or_set_p' here since we can
+ convert an assignment to a register into a bit-field assignment.
+ Therefore, we must also omit the note if the register is the
+ target of a bitfield assignment. */
+
+ if (place && REG_NOTE_KIND (note) == REG_DEAD)
+ {
+ int regno = REGNO (XEXP (note, 0));
+
+ if (dead_or_set_p (place, XEXP (note, 0))
+ || reg_bitfield_target_p (XEXP (note, 0), PATTERN (place)))
+ {
+ /* Unless the register previously died in PLACE, clear
+ reg_last_death. [I no longer understand why this is
+ being done.] */
+ if (reg_last_death[regno] != place)
+ reg_last_death[regno] = 0;
+ place = 0;
+ }
+ else
+ reg_last_death[regno] = place;
+
+ /* If this is a death note for a hard reg that is occupying
+ multiple registers, ensure that we are still using all
+ parts of the object. If we find a piece of the object
+ that is unused, we must add a USE for that piece before
+ PLACE and put the appropriate REG_DEAD note on it.
+
+ An alternative would be to put a REG_UNUSED for the pieces
+ on the insn that set the register, but that can't be done if
+ it is not in the same block. It is simpler, though less
+ efficient, to add the USE insns. */
+
+ if (place && regno < FIRST_PSEUDO_REGISTER
+ && HARD_REGNO_NREGS (regno, GET_MODE (XEXP (note, 0))) > 1)
+ {
+ int endregno
+ = regno + HARD_REGNO_NREGS (regno,
+ GET_MODE (XEXP (note, 0)));
+ int all_used = 1;
+ int i;
+
+ for (i = regno; i < endregno; i++)
+ if (! refers_to_regno_p (i, i + 1, PATTERN (place), 0)
+ && ! find_regno_fusage (place, USE, i))
+ {
+ rtx piece = gen_rtx (REG, reg_raw_mode[i], i);
+ rtx p;
+
+ /* See if we already placed a USE note for this
+ register in front of PLACE. */
+ for (p = place;
+ GET_CODE (PREV_INSN (p)) == INSN
+ && GET_CODE (PATTERN (PREV_INSN (p))) == USE;
+ p = PREV_INSN (p))
+ if (rtx_equal_p (piece,
+ XEXP (PATTERN (PREV_INSN (p)), 0)))
+ {
+ p = 0;
+ break;
+ }
+
+ if (p)
+ {
+ rtx use_insn
+ = emit_insn_before (gen_rtx (USE, VOIDmode,
+ piece),
+ p);
+ REG_NOTES (use_insn)
+ = gen_rtx (EXPR_LIST, REG_DEAD, piece,
+ REG_NOTES (use_insn));
+ }
+
+ all_used = 0;
+ }
+
+ /* Check for the case where the register dying partially
+ overlaps the register set by this insn. */
+ if (all_used)
+ for (i = regno; i < endregno; i++)
+ if (dead_or_set_regno_p (place, i))
+ {
+ all_used = 0;
+ break;
+ }
+
+ if (! all_used)
+ {
+ /* Put only REG_DEAD notes for pieces that are
+ still used and that are not already dead or set. */
+
+ for (i = regno; i < endregno; i++)
+ {
+ rtx piece = gen_rtx (REG, reg_raw_mode[i], i);
+
+ if (reg_referenced_p (piece, PATTERN (place))
+ && ! dead_or_set_p (place, piece)
+ && ! reg_bitfield_target_p (piece,
+ PATTERN (place)))
+ REG_NOTES (place) = gen_rtx (EXPR_LIST, REG_DEAD,
+ piece,
+ REG_NOTES (place));
+ }
+
+ place = 0;
+ }
+ }
+ }
+ break;
+
+ default:
+ /* Any other notes should not be present at this point in the
+ compilation. */
+ abort ();
+ }
+
+ if (place)
+ {
+ XEXP (note, 1) = REG_NOTES (place);
+ REG_NOTES (place) = note;
+ }
+ else if ((REG_NOTE_KIND (note) == REG_DEAD
+ || REG_NOTE_KIND (note) == REG_UNUSED)
+ && GET_CODE (XEXP (note, 0)) == REG)
+ reg_n_deaths[REGNO (XEXP (note, 0))]--;
+
+ if (place2)
+ {
+ if ((REG_NOTE_KIND (note) == REG_DEAD
+ || REG_NOTE_KIND (note) == REG_UNUSED)
+ && GET_CODE (XEXP (note, 0)) == REG)
+ reg_n_deaths[REGNO (XEXP (note, 0))]++;
+
+ REG_NOTES (place2) = gen_rtx (GET_CODE (note), REG_NOTE_KIND (note),
+ XEXP (note, 0), REG_NOTES (place2));
+ }
+ }
+}
+
+/* Similarly to above, distribute the LOG_LINKS that used to be present on
+ I3, I2, and I1 to new locations. This is also called in one case to
+ add a link pointing at I3 when I3's destination is changed. */
+
+static void
+distribute_links (links)
+ rtx links;
+{
+ rtx link, next_link;
+
+ for (link = links; link; link = next_link)
+ {
+ rtx place = 0;
+ rtx insn;
+ rtx set, reg;
+
+ next_link = XEXP (link, 1);
+
+ /* If the insn that this link points to is a NOTE or isn't a single
+ set, ignore it. In the latter case, it isn't clear what we
+ can do other than ignore the link, since we can't tell which
+ register it was for. Such links wouldn't be used by combine
+ anyway.
+
+ It is not possible for the destination of the target of the link to
+ have been changed by combine. The only potential of this is if we
+ replace I3, I2, and I1 by I3 and I2. But in that case the
+ destination of I2 also remains unchanged. */
+
+ if (GET_CODE (XEXP (link, 0)) == NOTE
+ || (set = single_set (XEXP (link, 0))) == 0)
+ continue;
+
+ reg = SET_DEST (set);
+ while (GET_CODE (reg) == SUBREG || GET_CODE (reg) == ZERO_EXTRACT
+ || GET_CODE (reg) == SIGN_EXTRACT
+ || GET_CODE (reg) == STRICT_LOW_PART)
+ reg = XEXP (reg, 0);
+
+ /* A LOG_LINK is defined as being placed on the first insn that uses
+ a register and points to the insn that sets the register. Start
+ searching at the next insn after the target of the link and stop
+ when we reach a set of the register or the end of the basic block.
+
+ Note that this correctly handles the link that used to point from
+ I3 to I2. Also note that not much searching is typically done here
+ since most links don't point very far away. */
+
+ for (insn = NEXT_INSN (XEXP (link, 0));
+ (insn && (this_basic_block == n_basic_blocks - 1
+ || basic_block_head[this_basic_block + 1] != insn));
+ insn = NEXT_INSN (insn))
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
+ && reg_overlap_mentioned_p (reg, PATTERN (insn)))
+ {
+ if (reg_referenced_p (reg, PATTERN (insn)))
+ place = insn;
+ break;
+ }
+ else if (GET_CODE (insn) == CALL_INSN
+ && find_reg_fusage (insn, USE, reg))
+ {
+ place = insn;
+ break;
+ }
+
+ /* If we found a place to put the link, place it there unless there
+ is already a link to the same insn as LINK at that point. */
+
+ if (place)
+ {
+ rtx link2;
+
+ for (link2 = LOG_LINKS (place); link2; link2 = XEXP (link2, 1))
+ if (XEXP (link2, 0) == XEXP (link, 0))
+ break;
+
+ if (link2 == 0)
+ {
+ XEXP (link, 1) = LOG_LINKS (place);
+ LOG_LINKS (place) = link;
+
+ /* Set added_links_insn to the earliest insn we added a
+ link to. */
+ if (added_links_insn == 0
+ || INSN_CUID (added_links_insn) > INSN_CUID (place))
+ added_links_insn = place;
+ }
+ }
+ }
+}
+
+void
+dump_combine_stats (file)
+ FILE *file;
+{
+ fprintf
+ (file,
+ ";; Combiner statistics: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n\n",
+ combine_attempts, combine_merges, combine_extras, combine_successes);
+}
+
+void
+dump_combine_total_stats (file)
+ FILE *file;
+{
+ fprintf
+ (file,
+ "\n;; Combiner totals: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n",
+ total_attempts, total_merges, total_extras, total_successes);
+}
diff --git a/gnu/usr.bin/cc/cc_int/convert.c b/gnu/usr.bin/cc/cc_int/convert.c
new file mode 100644
index 0000000..2cb5990
--- /dev/null
+++ b/gnu/usr.bin/cc/cc_int/convert.c
@@ -0,0 +1,460 @@
+/* Utility routines for data type conversion for GNU C.
+ Copyright (C) 1987, 1988, 1991, 1992, 1994 Free Software Foundation, Inc.
+
+This file is part of GNU C.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+/* These routines are somewhat language-independent utility function
+ intended to be called by the language-specific convert () functions. */
+
+#include "config.h"
+#include "tree.h"
+#include "flags.h"
+#include "convert.h"
+
+/* Convert EXPR to some pointer type TYPE.
+
+ EXPR must be pointer, integer, enumeral, or literal zero;
+ in other cases error is called. */
+
+tree
+convert_to_pointer (type, expr)
+ tree type, expr;
+{
+ register tree intype = TREE_TYPE (expr);
+ register enum tree_code form = TREE_CODE (intype);
+
+ if (integer_zerop (expr))
+ {
+ if (type == TREE_TYPE (null_pointer_node))
+ return null_pointer_node;
+ expr = build_int_2 (0, 0);
+ TREE_TYPE (expr) = type;
+ return expr;
+ }
+
+ if (form == POINTER_TYPE)
+ return build1 (NOP_EXPR, type, expr);
+
+
+ if (form == INTEGER_TYPE || form == ENUMERAL_TYPE)
+ {
+ if (type_precision (intype) == POINTER_SIZE)
+ return build1 (CONVERT_EXPR, type, expr);
+ expr = convert (type_for_size (POINTER_SIZE, 0), expr);
+ /* Modes may be different but sizes should be the same. */
+ if (GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (expr)))
+ != GET_MODE_SIZE (TYPE_MODE (type)))
+ /* There is supposed to be some integral type
+ that is the same width as a pointer. */
+ abort ();
+ return convert_to_pointer (type, expr);
+ }
+
+ error ("cannot convert to a pointer type");
+
+ return null_pointer_node;
+}
+
+/* Convert EXPR to some floating-point type TYPE.
+
+ EXPR must be float, integer, or enumeral;
+ in other cases error is called. */
+
+tree
+convert_to_real (type, expr)
+ tree type, expr;
+{
+ register enum tree_code form = TREE_CODE (TREE_TYPE (expr));
+
+ if (form == REAL_TYPE)
+ return build1 (flag_float_store ? CONVERT_EXPR : NOP_EXPR,
+ type, expr);
+
+ if (INTEGRAL_TYPE_P (TREE_TYPE (expr)))
+ return build1 (FLOAT_EXPR, type, expr);
+
+ if (form == COMPLEX_TYPE)
+ return convert (type, fold (build1 (REALPART_EXPR,
+ TREE_TYPE (TREE_TYPE (expr)), expr)));
+
+ if (form == POINTER_TYPE)
+ error ("pointer value used where a floating point value was expected");
+ else
+ error ("aggregate value used where a float was expected");
+
+ {
+ register tree tem = make_node (REAL_CST);
+ TREE_TYPE (tem) = type;
+ TREE_REAL_CST (tem) = REAL_VALUE_ATOF ("0.0", TYPE_MODE (type));
+ return tem;
+ }
+}
+
+/* Convert EXPR to some integer (or enum) type TYPE.
+
+ EXPR must be pointer, integer, discrete (enum, char, or bool), or float;
+ in other cases error is called.
+
+ The result of this is always supposed to be a newly created tree node
+ not in use in any existing structure. */
+
+tree
+convert_to_integer (type, expr)
+ tree type, expr;
+{
+ register tree intype = TREE_TYPE (expr);
+ register enum tree_code form = TREE_CODE (intype);
+
+ if (form == POINTER_TYPE)
+ {
+ if (integer_zerop (expr))
+ expr = integer_zero_node;
+ else
+ expr = fold (build1 (CONVERT_EXPR,
+ type_for_size (POINTER_SIZE, 0), expr));
+ intype = TREE_TYPE (expr);
+ form = TREE_CODE (intype);
+ if (intype == type)
+ return expr;
+ }
+
+ if (form == INTEGER_TYPE || form == ENUMERAL_TYPE
+ || form == BOOLEAN_TYPE || form == CHAR_TYPE)
+ {
+ register unsigned outprec = TYPE_PRECISION (type);
+ register unsigned inprec = TYPE_PRECISION (intype);
+ register enum tree_code ex_form = TREE_CODE (expr);
+
+ /* If we are widening the type, put in an explicit conversion.
+ Similarly if we are not changing the width. However, if this is
+ a logical operation that just returns 0 or 1, we can change the
+ type of the expression. For logical operations, we must
+ also change the types of the operands to maintain type
+ correctness. */
+
+ if (TREE_CODE_CLASS (ex_form) == '<')
+ {
+ TREE_TYPE (expr) = type;
+ return expr;
+ }
+ else if (ex_form == TRUTH_AND_EXPR || ex_form == TRUTH_ANDIF_EXPR
+ || ex_form == TRUTH_OR_EXPR || ex_form == TRUTH_ORIF_EXPR
+ || ex_form == TRUTH_XOR_EXPR)
+ {
+ TREE_OPERAND (expr, 0) = convert (type, TREE_OPERAND (expr, 0));
+ TREE_OPERAND (expr, 1) = convert (type, TREE_OPERAND (expr, 1));
+ TREE_TYPE (expr) = type;
+ return expr;
+ }
+ else if (ex_form == TRUTH_NOT_EXPR)
+ {
+ TREE_OPERAND (expr, 0) = convert (type, TREE_OPERAND (expr, 0));
+ TREE_TYPE (expr) = type;
+ return expr;
+ }
+ else if (outprec >= inprec)
+ return build1 (NOP_EXPR, type, expr);
+
+ /* Here detect when we can distribute the truncation down past some
+ arithmetic. For example, if adding two longs and converting to an
+ int, we can equally well convert both to ints and then add.
+ For the operations handled here, such truncation distribution
+ is always safe.
+ It is desirable in these cases:
+ 1) when truncating down to full-word from a larger size
+ 2) when truncating takes no work.
+ 3) when at least one operand of the arithmetic has been extended
+ (as by C's default conversions). In this case we need two conversions
+ if we do the arithmetic as already requested, so we might as well
+ truncate both and then combine. Perhaps that way we need only one.
+
+ Note that in general we cannot do the arithmetic in a type
+ shorter than the desired result of conversion, even if the operands
+ are both extended from a shorter type, because they might overflow
+ if combined in that type. The exceptions to this--the times when
+ two narrow values can be combined in their narrow type even to
+ make a wider result--are handled by "shorten" in build_binary_op. */
+
+ switch (ex_form)
+ {
+ case RSHIFT_EXPR:
+ /* We can pass truncation down through right shifting
+ when the shift count is a nonpositive constant. */
+ if (TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST
+ && tree_int_cst_lt (TREE_OPERAND (expr, 1),
+ convert (TREE_TYPE (TREE_OPERAND (expr, 1)),
+ integer_one_node)))
+ goto trunc1;
+ break;
+
+ case LSHIFT_EXPR:
+ /* We can pass truncation down through left shifting
+ when the shift count is a nonnegative constant. */
+ if (TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST
+ && tree_int_cst_sgn (TREE_OPERAND (expr, 1)) >= 0
+ && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
+ {
+ /* If shift count is less than the width of the truncated type,
+ really shift. */
+ if (tree_int_cst_lt (TREE_OPERAND (expr, 1), TYPE_SIZE (type)))
+ /* In this case, shifting is like multiplication. */
+ goto trunc1;
+ else
+ {
+ /* If it is >= that width, result is zero.
+ Handling this with trunc1 would give the wrong result:
+ (int) ((long long) a << 32) is well defined (as 0)
+ but (int) a << 32 is undefined and would get a
+ warning. */
+
+ tree t = convert_to_integer (type, integer_zero_node);
+
+ /* If the original expression had side-effects, we must
+ preserve it. */
+ if (TREE_SIDE_EFFECTS (expr))
+ return build (COMPOUND_EXPR, type, expr, t);
+ else
+ return t;
+ }
+ }
+ break;
+
+ case MAX_EXPR:
+ case MIN_EXPR:
+ case MULT_EXPR:
+ {
+ tree arg0 = get_unwidened (TREE_OPERAND (expr, 0), type);
+ tree arg1 = get_unwidened (TREE_OPERAND (expr, 1), type);
+
+ /* Don't distribute unless the output precision is at least as big
+ as the actual inputs. Otherwise, the comparison of the
+ truncated values will be wrong. */
+ if (outprec >= TYPE_PRECISION (TREE_TYPE (arg0))
+ && outprec >= TYPE_PRECISION (TREE_TYPE (arg1))
+ /* If signedness of arg0 and arg1 don't match,
+ we can't necessarily find a type to compare them in. */
+ && (TREE_UNSIGNED (TREE_TYPE (arg0))
+ == TREE_UNSIGNED (TREE_TYPE (arg1))))
+ goto trunc1;
+ break;
+ }
+
+ case PLUS_EXPR:
+ case MINUS_EXPR:
+ case BIT_AND_EXPR:
+ case BIT_IOR_EXPR:
+ case BIT_XOR_EXPR:
+ case BIT_ANDTC_EXPR:
+ trunc1:
+ {
+ tree arg0 = get_unwidened (TREE_OPERAND (expr, 0), type);
+ tree arg1 = get_unwidened (TREE_OPERAND (expr, 1), type);
+
+ if (outprec >= BITS_PER_WORD
+ || TRULY_NOOP_TRUNCATION (outprec, inprec)
+ || inprec > TYPE_PRECISION (TREE_TYPE (arg0))
+ || inprec > TYPE_PRECISION (TREE_TYPE (arg1)))
+ {
+ /* Do the arithmetic in type TYPEX,
+ then convert result to TYPE. */
+ register tree typex = type;
+
+ /* Can't do arithmetic in enumeral types
+ so use an integer type that will hold the values. */
+ if (TREE_CODE (typex) == ENUMERAL_TYPE)
+ typex = type_for_size (TYPE_PRECISION (typex),
+ TREE_UNSIGNED (typex));
+
+ /* But now perhaps TYPEX is as wide as INPREC.
+ In that case, do nothing special here.
+ (Otherwise would recurse infinitely in convert. */
+ if (TYPE_PRECISION (typex) != inprec)
+ {
+ /* Don't do unsigned arithmetic where signed was wanted,
+ or vice versa.
+ Exception: if either of the original operands were
+ unsigned then can safely do the work as unsigned.
+ And we may need to do it as unsigned
+ if we truncate to the original size. */
+ typex = ((TREE_UNSIGNED (TREE_TYPE (expr))
+ || TREE_UNSIGNED (TREE_TYPE (arg0))
+ || TREE_UNSIGNED (TREE_TYPE (arg1)))
+ ? unsigned_type (typex) : signed_type (typex));
+ return convert (type,
+ fold (build (ex_form, typex,
+ convert (typex, arg0),
+ convert (typex, arg1),
+ 0)));
+ }
+ }
+ }
+ break;
+
+ case NEGATE_EXPR:
+ case BIT_NOT_EXPR:
+ /* This is not correct for ABS_EXPR,
+ since we must test the sign before truncation. */
+ {
+ register tree typex = type;
+
+ /* Can't do arithmetic in enumeral types
+ so use an integer type that will hold the values. */
+ if (TREE_CODE (typex) == ENUMERAL_TYPE)
+ typex = type_for_size (TYPE_PRECISION (typex),
+ TREE_UNSIGNED (typex));
+
+ /* But now perhaps TYPEX is as wide as INPREC.
+ In that case, do nothing special here.
+ (Otherwise would recurse infinitely in convert. */
+ if (TYPE_PRECISION (typex) != inprec)
+ {
+ /* Don't do unsigned arithmetic where signed was wanted,
+ or vice versa. */
+ typex = (TREE_UNSIGNED (TREE_TYPE (expr))
+ ? unsigned_type (typex) : signed_type (typex));
+ return convert (type,
+ fold (build1 (ex_form, typex,
+ convert (typex,
+ TREE_OPERAND (expr, 0)))));
+ }
+ }
+
+ case NOP_EXPR:
+ /* If truncating after truncating, might as well do all at once.
+ If truncating after extending, we may get rid of wasted work. */
+ return convert (type, get_unwidened (TREE_OPERAND (expr, 0), type));
+
+ case COND_EXPR:
+ /* Can treat the two alternative values like the operands
+ of an arithmetic expression. */
+ {
+ tree arg1 = get_unwidened (TREE_OPERAND (expr, 1), type);
+ tree arg2 = get_unwidened (TREE_OPERAND (expr, 2), type);
+
+ if (outprec >= BITS_PER_WORD
+ || TRULY_NOOP_TRUNCATION (outprec, inprec)
+ || inprec > TYPE_PRECISION (TREE_TYPE (arg1))
+ || inprec > TYPE_PRECISION (TREE_TYPE (arg2)))
+ {
+ /* Do the arithmetic in type TYPEX,
+ then convert result to TYPE. */
+ register tree typex = type;
+
+ /* Can't do arithmetic in enumeral types
+ so use an integer type that will hold the values. */
+ if (TREE_CODE (typex) == ENUMERAL_TYPE)
+ typex = type_for_size (TYPE_PRECISION (typex),
+ TREE_UNSIGNED (typex));
+
+ /* But now perhaps TYPEX is as wide as INPREC.
+ In that case, do nothing special here.
+ (Otherwise would recurse infinitely in convert. */
+ if (TYPE_PRECISION (typex) != inprec)
+ {
+ /* Don't do unsigned arithmetic where signed was wanted,
+ or vice versa. */
+ typex = (TREE_UNSIGNED (TREE_TYPE (expr))
+ ? unsigned_type (typex) : signed_type (typex));
+ return convert (type,
+ fold (build (COND_EXPR, typex,
+ TREE_OPERAND (expr, 0),
+ convert (typex, arg1),
+ convert (typex, arg2))));
+ }
+ else
+ /* It is sometimes worthwhile
+ to push the narrowing down through the conditional. */
+ return fold (build (COND_EXPR, type,
+ TREE_OPERAND (expr, 0),
+ convert (type, TREE_OPERAND (expr, 1)),
+ convert (type, TREE_OPERAND (expr, 2))));
+ }
+ }
+
+ }
+
+ return build1 (NOP_EXPR, type, expr);
+ }
+
+ if (form == REAL_TYPE)
+ return build1 (FIX_TRUNC_EXPR, type, expr);
+
+ if (form == COMPLEX_TYPE)
+ return convert (type, fold (build1 (REALPART_EXPR,
+ TREE_TYPE (TREE_TYPE (expr)), expr)));
+
+ error ("aggregate value used where an integer was expected");
+
+ {
+ register tree tem = build_int_2 (0, 0);
+ TREE_TYPE (tem) = type;
+ return tem;
+ }
+}
+
+/* Convert EXPR to the complex type TYPE in the usual ways. */
+
+tree
+convert_to_complex (type, expr)
+ tree type, expr;
+{
+ register enum tree_code form = TREE_CODE (TREE_TYPE (expr));
+ tree subtype = TREE_TYPE (type);
+
+ if (form == REAL_TYPE || form == INTEGER_TYPE || form == ENUMERAL_TYPE)
+ {
+ expr = convert (subtype, expr);
+ return build (COMPLEX_EXPR, type, expr,
+ convert (subtype, integer_zero_node));
+ }
+
+ if (form == COMPLEX_TYPE)
+ {
+ tree elt_type = TREE_TYPE (TREE_TYPE (expr));
+ if (TYPE_MAIN_VARIANT (elt_type) == TYPE_MAIN_VARIANT (subtype))
+ return expr;
+ else if (TREE_CODE (expr) == COMPLEX_EXPR)
+ return fold (build (COMPLEX_EXPR,
+ type,
+ convert (subtype, TREE_OPERAND (expr, 0)),
+ convert (subtype, TREE_OPERAND (expr, 1))));
+ else
+ {
+ expr = save_expr (expr);
+ return fold (build (COMPLEX_EXPR,
+ type,
+ convert (subtype,
+ fold (build1 (REALPART_EXPR,
+ TREE_TYPE (TREE_TYPE (expr)),
+ expr))),
+ convert (subtype,
+ fold (build1 (IMAGPART_EXPR,
+ TREE_TYPE (TREE_TYPE (expr)),
+ expr)))));
+ }
+ }
+
+ if (form == POINTER_TYPE)
+ error ("pointer value used where a complex was expected");
+ else
+ error ("aggregate value used where a complex was expected");
+
+ return build (COMPLEX_EXPR, type,
+ convert (subtype, integer_zero_node),
+ convert (subtype, integer_zero_node));
+}
diff --git a/gnu/usr.bin/cc/cc_int/cse.c b/gnu/usr.bin/cc/cc_int/cse.c
new file mode 100644
index 0000000..b4947d0
--- /dev/null
+++ b/gnu/usr.bin/cc/cc_int/cse.c
@@ -0,0 +1,8546 @@
+/* Common subexpression elimination for GNU compiler.
+ Copyright (C) 1987, 88, 89, 92, 93, 1994 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+#include "config.h"
+/* Must precede rtl.h for FFS. */
+#include <stdio.h>
+
+#include "rtl.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "flags.h"
+#include "real.h"
+#include "insn-config.h"
+#include "recog.h"
+
+#include <setjmp.h>
+
+/* The basic idea of common subexpression elimination is to go
+ through the code, keeping a record of expressions that would
+ have the same value at the current scan point, and replacing
+ expressions encountered with the cheapest equivalent expression.
+
+ It is too complicated to keep track of the different possibilities
+ when control paths merge; so, at each label, we forget all that is
+ known and start fresh. This can be described as processing each
+ basic block separately. Note, however, that these are not quite
+ the same as the basic blocks found by a later pass and used for
+ data flow analysis and register packing. We do not need to start fresh
+ after a conditional jump instruction if there is no label there.
+
+ We use two data structures to record the equivalent expressions:
+ a hash table for most expressions, and several vectors together
+ with "quantity numbers" to record equivalent (pseudo) registers.
+
+ The use of the special data structure for registers is desirable
+ because it is faster. It is possible because registers references
+ contain a fairly small number, the register number, taken from
+ a contiguously allocated series, and two register references are
+ identical if they have the same number. General expressions
+ do not have any such thing, so the only way to retrieve the
+ information recorded on an expression other than a register
+ is to keep it in a hash table.
+
+Registers and "quantity numbers":
+
+ At the start of each basic block, all of the (hardware and pseudo)
+ registers used in the function are given distinct quantity
+ numbers to indicate their contents. During scan, when the code
+ copies one register into another, we copy the quantity number.
+ When a register is loaded in any other way, we allocate a new
+ quantity number to describe the value generated by this operation.
+ `reg_qty' records what quantity a register is currently thought
+ of as containing.
+
+ All real quantity numbers are greater than or equal to `max_reg'.
+ If register N has not been assigned a quantity, reg_qty[N] will equal N.
+
+ Quantity numbers below `max_reg' do not exist and none of the `qty_...'
+ variables should be referenced with an index below `max_reg'.
+
+ We also maintain a bidirectional chain of registers for each
+ quantity number. `qty_first_reg', `qty_last_reg',
+ `reg_next_eqv' and `reg_prev_eqv' hold these chains.
+
+ The first register in a chain is the one whose lifespan is least local.
+ Among equals, it is the one that was seen first.
+ We replace any equivalent register with that one.
+
+ If two registers have the same quantity number, it must be true that
+ REG expressions with `qty_mode' must be in the hash table for both
+ registers and must be in the same class.
+
+ The converse is not true. Since hard registers may be referenced in
+ any mode, two REG expressions might be equivalent in the hash table
+ but not have the same quantity number if the quantity number of one
+ of the registers is not the same mode as those expressions.
+
+Constants and quantity numbers
+
+ When a quantity has a known constant value, that value is stored
+ in the appropriate element of qty_const. This is in addition to
+ putting the constant in the hash table as is usual for non-regs.
+
+ Whether a reg or a constant is preferred is determined by the configuration
+ macro CONST_COSTS and will often depend on the constant value. In any
+ event, expressions containing constants can be simplified, by fold_rtx.
+
+ When a quantity has a known nearly constant value (such as an address
+ of a stack slot), that value is stored in the appropriate element
+ of qty_const.
+
+ Integer constants don't have a machine mode. However, cse
+ determines the intended machine mode from the destination
+ of the instruction that moves the constant. The machine mode
+ is recorded in the hash table along with the actual RTL
+ constant expression so that different modes are kept separate.
+
+Other expressions:
+
+ To record known equivalences among expressions in general
+ we use a hash table called `table'. It has a fixed number of buckets
+ that contain chains of `struct table_elt' elements for expressions.
+ These chains connect the elements whose expressions have the same
+ hash codes.
+
+ Other chains through the same elements connect the elements which
+ currently have equivalent values.
+
+ Register references in an expression are canonicalized before hashing
+ the expression. This is done using `reg_qty' and `qty_first_reg'.
+ The hash code of a register reference is computed using the quantity
+ number, not the register number.
+
+ When the value of an expression changes, it is necessary to remove from the
+ hash table not just that expression but all expressions whose values
+ could be different as a result.
+
+ 1. If the value changing is in memory, except in special cases
+ ANYTHING referring to memory could be changed. That is because
+ nobody knows where a pointer does not point.
+ The function `invalidate_memory' removes what is necessary.
+
+ The special cases are when the address is constant or is
+ a constant plus a fixed register such as the frame pointer
+ or a static chain pointer. When such addresses are stored in,
+ we can tell exactly which other such addresses must be invalidated
+ due to overlap. `invalidate' does this.
+ All expressions that refer to non-constant
+ memory addresses are also invalidated. `invalidate_memory' does this.
+
+ 2. If the value changing is a register, all expressions
+ containing references to that register, and only those,
+ must be removed.
+
+ Because searching the entire hash table for expressions that contain
+ a register is very slow, we try to figure out when it isn't necessary.
+ Precisely, this is necessary only when expressions have been
+ entered in the hash table using this register, and then the value has
+ changed, and then another expression wants to be added to refer to
+ the register's new value. This sequence of circumstances is rare
+ within any one basic block.
+
+ The vectors `reg_tick' and `reg_in_table' are used to detect this case.
+ reg_tick[i] is incremented whenever a value is stored in register i.
+ reg_in_table[i] holds -1 if no references to register i have been
+ entered in the table; otherwise, it contains the value reg_tick[i] had
+ when the references were entered. If we want to enter a reference
+ and reg_in_table[i] != reg_tick[i], we must scan and remove old references.
+ Until we want to enter a new entry, the mere fact that the two vectors
+ don't match makes the entries be ignored if anyone tries to match them.
+
+ Registers themselves are entered in the hash table as well as in
+ the equivalent-register chains. However, the vectors `reg_tick'
+ and `reg_in_table' do not apply to expressions which are simple
+ register references. These expressions are removed from the table
+ immediately when they become invalid, and this can be done even if
+ we do not immediately search for all the expressions that refer to
+ the register.
+
+ A CLOBBER rtx in an instruction invalidates its operand for further
+ reuse. A CLOBBER or SET rtx whose operand is a MEM:BLK
+ invalidates everything that resides in memory.
+
+Related expressions:
+
+ Constant expressions that differ only by an additive integer
+ are called related. When a constant expression is put in
+ the table, the related expression with no constant term
+ is also entered. These are made to point at each other
+ so that it is possible to find out if there exists any
+ register equivalent to an expression related to a given expression. */
+
+/* One plus largest register number used in this function. */
+
+static int max_reg;
+
+/* Length of vectors indexed by quantity number.
+ We know in advance we will not need a quantity number this big. */
+
+static int max_qty;
+
+/* Next quantity number to be allocated.
+ This is 1 + the largest number needed so far. */
+
+static int next_qty;
+
+/* Indexed by quantity number, gives the first (or last) (pseudo) register
+ in the chain of registers that currently contain this quantity. */
+
+static int *qty_first_reg;
+static int *qty_last_reg;
+
+/* Index by quantity number, gives the mode of the quantity. */
+
+static enum machine_mode *qty_mode;
+
+/* Indexed by quantity number, gives the rtx of the constant value of the
+ quantity, or zero if it does not have a known value.
+ A sum of the frame pointer (or arg pointer) plus a constant
+ can also be entered here. */
+
+static rtx *qty_const;
+
+/* Indexed by qty number, gives the insn that stored the constant value
+ recorded in `qty_const'. */
+
+static rtx *qty_const_insn;
+
+/* The next three variables are used to track when a comparison between a
+ quantity and some constant or register has been passed. In that case, we
+ know the results of the comparison in case we see it again. These variables
+ record a comparison that is known to be true. */
+
+/* Indexed by qty number, gives the rtx code of a comparison with a known
+ result involving this quantity. If none, it is UNKNOWN. */
+static enum rtx_code *qty_comparison_code;
+
+/* Indexed by qty number, gives the constant being compared against in a
+ comparison of known result. If no such comparison, it is undefined.
+ If the comparison is not with a constant, it is zero. */
+
+static rtx *qty_comparison_const;
+
+/* Indexed by qty number, gives the quantity being compared against in a
+ comparison of known result. If no such comparison, if it undefined.
+ If the comparison is not with a register, it is -1. */
+
+static int *qty_comparison_qty;
+
+#ifdef HAVE_cc0
+/* For machines that have a CC0, we do not record its value in the hash
+ table since its use is guaranteed to be the insn immediately following
+ its definition and any other insn is presumed to invalidate it.
+
+ Instead, we store below the value last assigned to CC0. If it should
+ happen to be a constant, it is stored in preference to the actual
+ assigned value. In case it is a constant, we store the mode in which
+ the constant should be interpreted. */
+
+static rtx prev_insn_cc0;
+static enum machine_mode prev_insn_cc0_mode;
+#endif
+
+/* Previous actual insn. 0 if at first insn of basic block. */
+
+static rtx prev_insn;
+
+/* Insn being scanned. */
+
+static rtx this_insn;
+
+/* Index by (pseudo) register number, gives the quantity number
+ of the register's current contents. */
+
+static int *reg_qty;
+
+/* Index by (pseudo) register number, gives the number of the next (or
+ previous) (pseudo) register in the chain of registers sharing the same
+ value.
+
+ Or -1 if this register is at the end of the chain.
+
+ If reg_qty[N] == N, reg_next_eqv[N] is undefined. */
+
+static int *reg_next_eqv;
+static int *reg_prev_eqv;
+
+/* Index by (pseudo) register number, gives the number of times
+ that register has been altered in the current basic block. */
+
+static int *reg_tick;
+
+/* Index by (pseudo) register number, gives the reg_tick value at which
+ rtx's containing this register are valid in the hash table.
+ If this does not equal the current reg_tick value, such expressions
+ existing in the hash table are invalid.
+ If this is -1, no expressions containing this register have been
+ entered in the table. */
+
+static int *reg_in_table;
+
+/* A HARD_REG_SET containing all the hard registers for which there is
+ currently a REG expression in the hash table. Note the difference
+ from the above variables, which indicate if the REG is mentioned in some
+ expression in the table. */
+
+static HARD_REG_SET hard_regs_in_table;
+
+/* A HARD_REG_SET containing all the hard registers that are invalidated
+ by a CALL_INSN. */
+
+static HARD_REG_SET regs_invalidated_by_call;
+
+/* Two vectors of ints:
+ one containing max_reg -1's; the other max_reg + 500 (an approximation
+ for max_qty) elements where element i contains i.
+ These are used to initialize various other vectors fast. */
+
+static int *all_minus_one;
+static int *consec_ints;
+
+/* CUID of insn that starts the basic block currently being cse-processed. */
+
+static int cse_basic_block_start;
+
+/* CUID of insn that ends the basic block currently being cse-processed. */
+
+static int cse_basic_block_end;
+
+/* Vector mapping INSN_UIDs to cuids.
+ The cuids are like uids but increase monotonically always.
+ We use them to see whether a reg is used outside a given basic block. */
+
+static int *uid_cuid;
+
+/* Highest UID in UID_CUID. */
+static int max_uid;
+
+/* Get the cuid of an insn. */
+
+#define INSN_CUID(INSN) (uid_cuid[INSN_UID (INSN)])
+
+/* Nonzero if cse has altered conditional jump insns
+ in such a way that jump optimization should be redone. */
+
+static int cse_jumps_altered;
+
+/* canon_hash stores 1 in do_not_record
+ if it notices a reference to CC0, PC, or some other volatile
+ subexpression. */
+
+static int do_not_record;
+
+#ifdef LOAD_EXTEND_OP
+
+/* Scratch rtl used when looking for load-extended copy of a MEM. */
+static rtx memory_extend_rtx;
+#endif
+
+/* canon_hash stores 1 in hash_arg_in_memory
+ if it notices a reference to memory within the expression being hashed. */
+
+static int hash_arg_in_memory;
+
+/* canon_hash stores 1 in hash_arg_in_struct
+ if it notices a reference to memory that's part of a structure. */
+
+static int hash_arg_in_struct;
+
+/* The hash table contains buckets which are chains of `struct table_elt's,
+ each recording one expression's information.
+ That expression is in the `exp' field.
+
+ Those elements with the same hash code are chained in both directions
+ through the `next_same_hash' and `prev_same_hash' fields.
+
+ Each set of expressions with equivalent values
+ are on a two-way chain through the `next_same_value'
+ and `prev_same_value' fields, and all point with
+ the `first_same_value' field at the first element in
+ that chain. The chain is in order of increasing cost.
+ Each element's cost value is in its `cost' field.
+
+ The `in_memory' field is nonzero for elements that
+ involve any reference to memory. These elements are removed
+ whenever a write is done to an unidentified location in memory.
+ To be safe, we assume that a memory address is unidentified unless
+ the address is either a symbol constant or a constant plus
+ the frame pointer or argument pointer.
+
+ The `in_struct' field is nonzero for elements that
+ involve any reference to memory inside a structure or array.
+
+ The `related_value' field is used to connect related expressions
+ (that differ by adding an integer).
+ The related expressions are chained in a circular fashion.
+ `related_value' is zero for expressions for which this
+ chain is not useful.
+
+ The `cost' field stores the cost of this element's expression.
+
+ The `is_const' flag is set if the element is a constant (including
+ a fixed address).
+
+ The `flag' field is used as a temporary during some search routines.
+
+ The `mode' field is usually the same as GET_MODE (`exp'), but
+ if `exp' is a CONST_INT and has no machine mode then the `mode'
+ field is the mode it was being used as. Each constant is
+ recorded separately for each mode it is used with. */
+
+
+struct table_elt
+{
+ rtx exp;
+ struct table_elt *next_same_hash;
+ struct table_elt *prev_same_hash;
+ struct table_elt *next_same_value;
+ struct table_elt *prev_same_value;
+ struct table_elt *first_same_value;
+ struct table_elt *related_value;
+ int cost;
+ enum machine_mode mode;
+ char in_memory;
+ char in_struct;
+ char is_const;
+ char flag;
+};
+
+/* We don't want a lot of buckets, because we rarely have very many
+ things stored in the hash table, and a lot of buckets slows
+ down a lot of loops that happen frequently. */
+#define NBUCKETS 31
+
+/* Compute hash code of X in mode M. Special-case case where X is a pseudo
+ register (hard registers may require `do_not_record' to be set). */
+
+#define HASH(X, M) \
+ (GET_CODE (X) == REG && REGNO (X) >= FIRST_PSEUDO_REGISTER \
+ ? (((unsigned) REG << 7) + (unsigned) reg_qty[REGNO (X)]) % NBUCKETS \
+ : canon_hash (X, M) % NBUCKETS)
+
+/* Determine whether register number N is considered a fixed register for CSE.
+ It is desirable to replace other regs with fixed regs, to reduce need for
+ non-fixed hard regs.
+ A reg wins if it is either the frame pointer or designated as fixed,
+ but not if it is an overlapping register. */
+#ifdef OVERLAPPING_REGNO_P
+#define FIXED_REGNO_P(N) \
+ (((N) == FRAME_POINTER_REGNUM || (N) == HARD_FRAME_POINTER_REGNUM \
+ || fixed_regs[N] || global_regs[N]) \
+ && ! OVERLAPPING_REGNO_P ((N)))
+#else
+#define FIXED_REGNO_P(N) \
+ ((N) == FRAME_POINTER_REGNUM || (N) == HARD_FRAME_POINTER_REGNUM \
+ || fixed_regs[N] || global_regs[N])
+#endif
+
+/* Compute cost of X, as stored in the `cost' field of a table_elt. Fixed
+ hard registers and pointers into the frame are the cheapest with a cost
+ of 0. Next come pseudos with a cost of one and other hard registers with
+ a cost of 2. Aside from these special cases, call `rtx_cost'. */
+
+#define CHEAP_REGNO(N) \
+ ((N) == FRAME_POINTER_REGNUM || (N) == HARD_FRAME_POINTER_REGNUM \
+ || (N) == STACK_POINTER_REGNUM || (N) == ARG_POINTER_REGNUM \
+ || ((N) >= FIRST_VIRTUAL_REGISTER && (N) <= LAST_VIRTUAL_REGISTER) \
+ || ((N) < FIRST_PSEUDO_REGISTER \
+ && FIXED_REGNO_P (N) && REGNO_REG_CLASS (N) != NO_REGS))
+
+/* A register is cheap if it is a user variable assigned to the register
+ or if its register number always corresponds to a cheap register. */
+
+#define CHEAP_REG(N) \
+ ((REG_USERVAR_P (N) && REGNO (N) < FIRST_PSEUDO_REGISTER) \
+ || CHEAP_REGNO (REGNO (N)))
+
+#define COST(X) \
+ (GET_CODE (X) == REG \
+ ? (CHEAP_REG (X) ? 0 \
+ : REGNO (X) >= FIRST_PSEUDO_REGISTER ? 1 \
+ : 2) \
+ : rtx_cost (X, SET) * 2)
+
+/* Determine if the quantity number for register X represents a valid index
+ into the `qty_...' variables. */
+
+#define REGNO_QTY_VALID_P(N) (reg_qty[N] != (N))
+
+static struct table_elt *table[NBUCKETS];
+
+/* Chain of `struct table_elt's made so far for this function
+ but currently removed from the table. */
+
+static struct table_elt *free_element_chain;
+
+/* Number of `struct table_elt' structures made so far for this function. */
+
+static int n_elements_made;
+
+/* Maximum value `n_elements_made' has had so far in this compilation
+ for functions previously processed. */
+
+static int max_elements_made;
+
+/* Surviving equivalence class when two equivalence classes are merged
+ by recording the effects of a jump in the last insn. Zero if the
+ last insn was not a conditional jump. */
+
+static struct table_elt *last_jump_equiv_class;
+
+/* Set to the cost of a constant pool reference if one was found for a
+ symbolic constant. If this was found, it means we should try to
+ convert constants into constant pool entries if they don't fit in
+ the insn. */
+
+static int constant_pool_entries_cost;
+
+/* Bits describing what kind of values in memory must be invalidated
+ for a particular instruction. If all three bits are zero,
+ no memory refs need to be invalidated. Each bit is more powerful
+ than the preceding ones, and if a bit is set then the preceding
+ bits are also set.
+
+ Here is how the bits are set:
+ Pushing onto the stack invalidates only the stack pointer,
+ writing at a fixed address invalidates only variable addresses,
+ writing in a structure element at variable address
+ invalidates all but scalar variables,
+ and writing in anything else at variable address invalidates everything. */
+
+struct write_data
+{
+ int sp : 1; /* Invalidate stack pointer. */
+ int var : 1; /* Invalidate variable addresses. */
+ int nonscalar : 1; /* Invalidate all but scalar variables. */
+ int all : 1; /* Invalidate all memory refs. */
+};
+
+/* Define maximum length of a branch path. */
+
+#define PATHLENGTH 10
+
+/* This data describes a block that will be processed by cse_basic_block. */
+
+struct cse_basic_block_data {
+ /* Lowest CUID value of insns in block. */
+ int low_cuid;
+ /* Highest CUID value of insns in block. */
+ int high_cuid;
+ /* Total number of SETs in block. */
+ int nsets;
+ /* Last insn in the block. */
+ rtx last;
+ /* Size of current branch path, if any. */
+ int path_size;
+ /* Current branch path, indicating which branches will be taken. */
+ struct branch_path {
+ /* The branch insn. */
+ rtx branch;
+ /* Whether it should be taken or not. AROUND is the same as taken
+ except that it is used when the destination label is not preceded
+ by a BARRIER. */
+ enum taken {TAKEN, NOT_TAKEN, AROUND} status;
+ } path[PATHLENGTH];
+};
+
+/* Nonzero if X has the form (PLUS frame-pointer integer). We check for
+ virtual regs here because the simplify_*_operation routines are called
+ by integrate.c, which is called before virtual register instantiation. */
+
+#define FIXED_BASE_PLUS_P(X) \
+ ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
+ || (X) == arg_pointer_rtx \
+ || (X) == virtual_stack_vars_rtx \
+ || (X) == virtual_incoming_args_rtx \
+ || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
+ && (XEXP (X, 0) == frame_pointer_rtx \
+ || XEXP (X, 0) == hard_frame_pointer_rtx \
+ || XEXP (X, 0) == arg_pointer_rtx \
+ || XEXP (X, 0) == virtual_stack_vars_rtx \
+ || XEXP (X, 0) == virtual_incoming_args_rtx)))
+
+/* Similar, but also allows reference to the stack pointer.
+
+ This used to include FIXED_BASE_PLUS_P, however, we can't assume that
+ arg_pointer_rtx by itself is nonzero, because on at least one machine,
+ the i960, the arg pointer is zero when it is unused. */
+
+#define NONZERO_BASE_PLUS_P(X) \
+ ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
+ || (X) == virtual_stack_vars_rtx \
+ || (X) == virtual_incoming_args_rtx \
+ || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
+ && (XEXP (X, 0) == frame_pointer_rtx \
+ || XEXP (X, 0) == hard_frame_pointer_rtx \
+ || XEXP (X, 0) == arg_pointer_rtx \
+ || XEXP (X, 0) == virtual_stack_vars_rtx \
+ || XEXP (X, 0) == virtual_incoming_args_rtx)) \
+ || (X) == stack_pointer_rtx \
+ || (X) == virtual_stack_dynamic_rtx \
+ || (X) == virtual_outgoing_args_rtx \
+ || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
+ && (XEXP (X, 0) == stack_pointer_rtx \
+ || XEXP (X, 0) == virtual_stack_dynamic_rtx \
+ || XEXP (X, 0) == virtual_outgoing_args_rtx)))
+
+static void new_basic_block PROTO((void));
+static void make_new_qty PROTO((int));
+static void make_regs_eqv PROTO((int, int));
+static void delete_reg_equiv PROTO((int));
+static int mention_regs PROTO((rtx));
+static int insert_regs PROTO((rtx, struct table_elt *, int));
+static void free_element PROTO((struct table_elt *));
+static void remove_from_table PROTO((struct table_elt *, unsigned));
+static struct table_elt *get_element PROTO((void));
+static struct table_elt *lookup PROTO((rtx, unsigned, enum machine_mode)),
+ *lookup_for_remove PROTO((rtx, unsigned, enum machine_mode));
+static rtx lookup_as_function PROTO((rtx, enum rtx_code));
+static struct table_elt *insert PROTO((rtx, struct table_elt *, unsigned,
+ enum machine_mode));
+static void merge_equiv_classes PROTO((struct table_elt *,
+ struct table_elt *));
+static void invalidate PROTO((rtx));
+static void remove_invalid_refs PROTO((int));
+static void rehash_using_reg PROTO((rtx));
+static void invalidate_memory PROTO((struct write_data *));
+static void invalidate_for_call PROTO((void));
+static rtx use_related_value PROTO((rtx, struct table_elt *));
+static unsigned canon_hash PROTO((rtx, enum machine_mode));
+static unsigned safe_hash PROTO((rtx, enum machine_mode));
+static int exp_equiv_p PROTO((rtx, rtx, int, int));
+static void set_nonvarying_address_components PROTO((rtx, int, rtx *,
+ HOST_WIDE_INT *,
+ HOST_WIDE_INT *));
+static int refers_to_p PROTO((rtx, rtx));
+static int refers_to_mem_p PROTO((rtx, rtx, HOST_WIDE_INT,
+ HOST_WIDE_INT));
+static int cse_rtx_addr_varies_p PROTO((rtx));
+static rtx canon_reg PROTO((rtx, rtx));
+static void find_best_addr PROTO((rtx, rtx *));
+static enum rtx_code find_comparison_args PROTO((enum rtx_code, rtx *, rtx *,
+ enum machine_mode *,
+ enum machine_mode *));
+static rtx cse_gen_binary PROTO((enum rtx_code, enum machine_mode,
+ rtx, rtx));
+static rtx simplify_plus_minus PROTO((enum rtx_code, enum machine_mode,
+ rtx, rtx));
+static rtx fold_rtx PROTO((rtx, rtx));
+static rtx equiv_constant PROTO((rtx));
+static void record_jump_equiv PROTO((rtx, int));
+static void record_jump_cond PROTO((enum rtx_code, enum machine_mode,
+ rtx, rtx, int));
+static void cse_insn PROTO((rtx, int));
+static void note_mem_written PROTO((rtx, struct write_data *));
+static void invalidate_from_clobbers PROTO((struct write_data *, rtx));
+static rtx cse_process_notes PROTO((rtx, rtx));
+static void cse_around_loop PROTO((rtx));
+static void invalidate_skipped_set PROTO((rtx, rtx));
+static void invalidate_skipped_block PROTO((rtx));
+static void cse_check_loop_start PROTO((rtx, rtx));
+static void cse_set_around_loop PROTO((rtx, rtx, rtx));
+static rtx cse_basic_block PROTO((rtx, rtx, struct branch_path *, int));
+static void count_reg_usage PROTO((rtx, int *, rtx, int));
+
+extern int rtx_equal_function_value_matters;
+
+/* Return an estimate of the cost of computing rtx X.
+ One use is in cse, to decide which expression to keep in the hash table.
+ Another is in rtl generation, to pick the cheapest way to multiply.
+ Other uses like the latter are expected in the future. */
+
+/* Return the right cost to give to an operation
+ to make the cost of the corresponding register-to-register instruction
+ N times that of a fast register-to-register instruction. */
+
+#define COSTS_N_INSNS(N) ((N) * 4 - 2)
+
+int
+rtx_cost (x, outer_code)
+ rtx x;
+ enum rtx_code outer_code;
+{
+ register int i, j;
+ register enum rtx_code code;
+ register char *fmt;
+ register int total;
+
+ if (x == 0)
+ return 0;
+
+ /* Compute the default costs of certain things.
+ Note that RTX_COSTS can override the defaults. */
+
+ code = GET_CODE (x);
+ switch (code)
+ {
+ case MULT:
+ /* Count multiplication by 2**n as a shift,
+ because if we are considering it, we would output it as a shift. */
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT
+ && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
+ total = 2;
+ else
+ total = COSTS_N_INSNS (5);
+ break;
+ case DIV:
+ case UDIV:
+ case MOD:
+ case UMOD:
+ total = COSTS_N_INSNS (7);
+ break;
+ case USE:
+ /* Used in loop.c and combine.c as a marker. */
+ total = 0;
+ break;
+ case ASM_OPERANDS:
+ /* We don't want these to be used in substitutions because
+ we have no way of validating the resulting insn. So assign
+ anything containing an ASM_OPERANDS a very high cost. */
+ total = 1000;
+ break;
+ default:
+ total = 2;
+ }
+
+ switch (code)
+ {
+ case REG:
+ return ! CHEAP_REG (x);
+
+ case SUBREG:
+ /* If we can't tie these modes, make this expensive. The larger
+ the mode, the more expensive it is. */
+ if (! MODES_TIEABLE_P (GET_MODE (x), GET_MODE (SUBREG_REG (x))))
+ return COSTS_N_INSNS (2
+ + GET_MODE_SIZE (GET_MODE (x)) / UNITS_PER_WORD);
+ return 2;
+#ifdef RTX_COSTS
+ RTX_COSTS (x, code, outer_code);
+#endif
+ CONST_COSTS (x, code, outer_code);
+ }
+
+ /* Sum the costs of the sub-rtx's, plus cost of this operation,
+ which is already in total. */
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ if (fmt[i] == 'e')
+ total += rtx_cost (XEXP (x, i), code);
+ else if (fmt[i] == 'E')
+ for (j = 0; j < XVECLEN (x, i); j++)
+ total += rtx_cost (XVECEXP (x, i, j), code);
+
+ return total;
+}
+
+/* Clear the hash table and initialize each register with its own quantity,
+ for a new basic block. */
+
+static void
+new_basic_block ()
+{
+ register int i;
+
+ next_qty = max_reg;
+
+ bzero ((char *) reg_tick, max_reg * sizeof (int));
+
+ bcopy ((char *) all_minus_one, (char *) reg_in_table,
+ max_reg * sizeof (int));
+ bcopy ((char *) consec_ints, (char *) reg_qty, max_reg * sizeof (int));
+ CLEAR_HARD_REG_SET (hard_regs_in_table);
+
+ /* The per-quantity values used to be initialized here, but it is
+ much faster to initialize each as it is made in `make_new_qty'. */
+
+ for (i = 0; i < NBUCKETS; i++)
+ {
+ register struct table_elt *this, *next;
+ for (this = table[i]; this; this = next)
+ {
+ next = this->next_same_hash;
+ free_element (this);
+ }
+ }
+
+ bzero ((char *) table, sizeof table);
+
+ prev_insn = 0;
+
+#ifdef HAVE_cc0
+ prev_insn_cc0 = 0;
+#endif
+}
+
+/* Say that register REG contains a quantity not in any register before
+ and initialize that quantity. */
+
+static void
+make_new_qty (reg)
+ register int reg;
+{
+ register int q;
+
+ if (next_qty >= max_qty)
+ abort ();
+
+ q = reg_qty[reg] = next_qty++;
+ qty_first_reg[q] = reg;
+ qty_last_reg[q] = reg;
+ qty_const[q] = qty_const_insn[q] = 0;
+ qty_comparison_code[q] = UNKNOWN;
+
+ reg_next_eqv[reg] = reg_prev_eqv[reg] = -1;
+}
+
+/* Make reg NEW equivalent to reg OLD.
+ OLD is not changing; NEW is. */
+
+static void
+make_regs_eqv (new, old)
+ register int new, old;
+{
+ register int lastr, firstr;
+ register int q = reg_qty[old];
+
+ /* Nothing should become eqv until it has a "non-invalid" qty number. */
+ if (! REGNO_QTY_VALID_P (old))
+ abort ();
+
+ reg_qty[new] = q;
+ firstr = qty_first_reg[q];
+ lastr = qty_last_reg[q];
+
+ /* Prefer fixed hard registers to anything. Prefer pseudo regs to other
+ hard regs. Among pseudos, if NEW will live longer than any other reg
+ of the same qty, and that is beyond the current basic block,
+ make it the new canonical replacement for this qty. */
+ if (! (firstr < FIRST_PSEUDO_REGISTER && FIXED_REGNO_P (firstr))
+ /* Certain fixed registers might be of the class NO_REGS. This means
+ that not only can they not be allocated by the compiler, but
+ they cannot be used in substitutions or canonicalizations
+ either. */
+ && (new >= FIRST_PSEUDO_REGISTER || REGNO_REG_CLASS (new) != NO_REGS)
+ && ((new < FIRST_PSEUDO_REGISTER && FIXED_REGNO_P (new))
+ || (new >= FIRST_PSEUDO_REGISTER
+ && (firstr < FIRST_PSEUDO_REGISTER
+ || ((uid_cuid[regno_last_uid[new]] > cse_basic_block_end
+ || (uid_cuid[regno_first_uid[new]]
+ < cse_basic_block_start))
+ && (uid_cuid[regno_last_uid[new]]
+ > uid_cuid[regno_last_uid[firstr]]))))))
+ {
+ reg_prev_eqv[firstr] = new;
+ reg_next_eqv[new] = firstr;
+ reg_prev_eqv[new] = -1;
+ qty_first_reg[q] = new;
+ }
+ else
+ {
+ /* If NEW is a hard reg (known to be non-fixed), insert at end.
+ Otherwise, insert before any non-fixed hard regs that are at the
+ end. Registers of class NO_REGS cannot be used as an
+ equivalent for anything. */
+ while (lastr < FIRST_PSEUDO_REGISTER && reg_prev_eqv[lastr] >= 0
+ && (REGNO_REG_CLASS (lastr) == NO_REGS || ! FIXED_REGNO_P (lastr))
+ && new >= FIRST_PSEUDO_REGISTER)
+ lastr = reg_prev_eqv[lastr];
+ reg_next_eqv[new] = reg_next_eqv[lastr];
+ if (reg_next_eqv[lastr] >= 0)
+ reg_prev_eqv[reg_next_eqv[lastr]] = new;
+ else
+ qty_last_reg[q] = new;
+ reg_next_eqv[lastr] = new;
+ reg_prev_eqv[new] = lastr;
+ }
+}
+
+/* Remove REG from its equivalence class. */
+
+static void
+delete_reg_equiv (reg)
+ register int reg;
+{
+ register int q = reg_qty[reg];
+ register int p, n;
+
+ /* If invalid, do nothing. */
+ if (q == reg)
+ return;
+
+ p = reg_prev_eqv[reg];
+ n = reg_next_eqv[reg];
+
+ if (n != -1)
+ reg_prev_eqv[n] = p;
+ else
+ qty_last_reg[q] = p;
+ if (p != -1)
+ reg_next_eqv[p] = n;
+ else
+ qty_first_reg[q] = n;
+
+ reg_qty[reg] = reg;
+}
+
+/* Remove any invalid expressions from the hash table
+ that refer to any of the registers contained in expression X.
+
+ Make sure that newly inserted references to those registers
+ as subexpressions will be considered valid.
+
+ mention_regs is not called when a register itself
+ is being stored in the table.
+
+ Return 1 if we have done something that may have changed the hash code
+ of X. */
+
+static int
+mention_regs (x)
+ rtx x;
+{
+ register enum rtx_code code;
+ register int i, j;
+ register char *fmt;
+ register int changed = 0;
+
+ if (x == 0)
+ return 0;
+
+ code = GET_CODE (x);
+ if (code == REG)
+ {
+ register int regno = REGNO (x);
+ register int endregno
+ = regno + (regno >= FIRST_PSEUDO_REGISTER ? 1
+ : HARD_REGNO_NREGS (regno, GET_MODE (x)));
+ int i;
+
+ for (i = regno; i < endregno; i++)
+ {
+ if (reg_in_table[i] >= 0 && reg_in_table[i] != reg_tick[i])
+ remove_invalid_refs (i);
+
+ reg_in_table[i] = reg_tick[i];
+ }
+
+ return 0;
+ }
+
+ /* If X is a comparison or a COMPARE and either operand is a register
+ that does not have a quantity, give it one. This is so that a later
+ call to record_jump_equiv won't cause X to be assigned a different
+ hash code and not found in the table after that call.
+
+ It is not necessary to do this here, since rehash_using_reg can
+ fix up the table later, but doing this here eliminates the need to
+ call that expensive function in the most common case where the only
+ use of the register is in the comparison. */
+
+ if (code == COMPARE || GET_RTX_CLASS (code) == '<')
+ {
+ if (GET_CODE (XEXP (x, 0)) == REG
+ && ! REGNO_QTY_VALID_P (REGNO (XEXP (x, 0))))
+ if (insert_regs (XEXP (x, 0), NULL_PTR, 0))
+ {
+ rehash_using_reg (XEXP (x, 0));
+ changed = 1;
+ }
+
+ if (GET_CODE (XEXP (x, 1)) == REG
+ && ! REGNO_QTY_VALID_P (REGNO (XEXP (x, 1))))
+ if (insert_regs (XEXP (x, 1), NULL_PTR, 0))
+ {
+ rehash_using_reg (XEXP (x, 1));
+ changed = 1;
+ }
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ if (fmt[i] == 'e')
+ changed |= mention_regs (XEXP (x, i));
+ else if (fmt[i] == 'E')
+ for (j = 0; j < XVECLEN (x, i); j++)
+ changed |= mention_regs (XVECEXP (x, i, j));
+
+ return changed;
+}
+
+/* Update the register quantities for inserting X into the hash table
+ with a value equivalent to CLASSP.
+ (If the class does not contain a REG, it is irrelevant.)
+ If MODIFIED is nonzero, X is a destination; it is being modified.
+ Note that delete_reg_equiv should be called on a register
+ before insert_regs is done on that register with MODIFIED != 0.
+
+ Nonzero value means that elements of reg_qty have changed
+ so X's hash code may be different. */
+
+static int
+insert_regs (x, classp, modified)
+ rtx x;
+ struct table_elt *classp;
+ int modified;
+{
+ if (GET_CODE (x) == REG)
+ {
+ register int regno = REGNO (x);
+
+ /* If REGNO is in the equivalence table already but is of the
+ wrong mode for that equivalence, don't do anything here. */
+
+ if (REGNO_QTY_VALID_P (regno)
+ && qty_mode[reg_qty[regno]] != GET_MODE (x))
+ return 0;
+
+ if (modified || ! REGNO_QTY_VALID_P (regno))
+ {
+ if (classp)
+ for (classp = classp->first_same_value;
+ classp != 0;
+ classp = classp->next_same_value)
+ if (GET_CODE (classp->exp) == REG
+ && GET_MODE (classp->exp) == GET_MODE (x))
+ {
+ make_regs_eqv (regno, REGNO (classp->exp));
+ return 1;
+ }
+
+ make_new_qty (regno);
+ qty_mode[reg_qty[regno]] = GET_MODE (x);
+ return 1;
+ }
+
+ return 0;
+ }
+
+ /* If X is a SUBREG, we will likely be inserting the inner register in the
+ table. If that register doesn't have an assigned quantity number at
+ this point but does later, the insertion that we will be doing now will
+ not be accessible because its hash code will have changed. So assign
+ a quantity number now. */
+
+ else if (GET_CODE (x) == SUBREG && GET_CODE (SUBREG_REG (x)) == REG
+ && ! REGNO_QTY_VALID_P (REGNO (SUBREG_REG (x))))
+ {
+ insert_regs (SUBREG_REG (x), NULL_PTR, 0);
+ mention_regs (SUBREG_REG (x));
+ return 1;
+ }
+ else
+ return mention_regs (x);
+}
+
+/* Look in or update the hash table. */
+
+/* Put the element ELT on the list of free elements. */
+
+static void
+free_element (elt)
+ struct table_elt *elt;
+{
+ elt->next_same_hash = free_element_chain;
+ free_element_chain = elt;
+}
+
+/* Return an element that is free for use. */
+
+static struct table_elt *
+get_element ()
+{
+ struct table_elt *elt = free_element_chain;
+ if (elt)
+ {
+ free_element_chain = elt->next_same_hash;
+ return elt;
+ }
+ n_elements_made++;
+ return (struct table_elt *) oballoc (sizeof (struct table_elt));
+}
+
+/* Remove table element ELT from use in the table.
+ HASH is its hash code, made using the HASH macro.
+ It's an argument because often that is known in advance
+ and we save much time not recomputing it. */
+
+static void
+remove_from_table (elt, hash)
+ register struct table_elt *elt;
+ unsigned hash;
+{
+ if (elt == 0)
+ return;
+
+ /* Mark this element as removed. See cse_insn. */
+ elt->first_same_value = 0;
+
+ /* Remove the table element from its equivalence class. */
+
+ {
+ register struct table_elt *prev = elt->prev_same_value;
+ register struct table_elt *next = elt->next_same_value;
+
+ if (next) next->prev_same_value = prev;
+
+ if (prev)
+ prev->next_same_value = next;
+ else
+ {
+ register struct table_elt *newfirst = next;
+ while (next)
+ {
+ next->first_same_value = newfirst;
+ next = next->next_same_value;
+ }
+ }
+ }
+
+ /* Remove the table element from its hash bucket. */
+
+ {
+ register struct table_elt *prev = elt->prev_same_hash;
+ register struct table_elt *next = elt->next_same_hash;
+
+ if (next) next->prev_same_hash = prev;
+
+ if (prev)
+ prev->next_same_hash = next;
+ else if (table[hash] == elt)
+ table[hash] = next;
+ else
+ {
+ /* This entry is not in the proper hash bucket. This can happen
+ when two classes were merged by `merge_equiv_classes'. Search
+ for the hash bucket that it heads. This happens only very
+ rarely, so the cost is acceptable. */
+ for (hash = 0; hash < NBUCKETS; hash++)
+ if (table[hash] == elt)
+ table[hash] = next;
+ }
+ }
+
+ /* Remove the table element from its related-value circular chain. */
+
+ if (elt->related_value != 0 && elt->related_value != elt)
+ {
+ register struct table_elt *p = elt->related_value;
+ while (p->related_value != elt)
+ p = p->related_value;
+ p->related_value = elt->related_value;
+ if (p->related_value == p)
+ p->related_value = 0;
+ }
+
+ free_element (elt);
+}
+
+/* Look up X in the hash table and return its table element,
+ or 0 if X is not in the table.
+
+ MODE is the machine-mode of X, or if X is an integer constant
+ with VOIDmode then MODE is the mode with which X will be used.
+
+ Here we are satisfied to find an expression whose tree structure
+ looks like X. */
+
+static struct table_elt *
+lookup (x, hash, mode)
+ rtx x;
+ unsigned hash;
+ enum machine_mode mode;
+{
+ register struct table_elt *p;
+
+ for (p = table[hash]; p; p = p->next_same_hash)
+ if (mode == p->mode && ((x == p->exp && GET_CODE (x) == REG)
+ || exp_equiv_p (x, p->exp, GET_CODE (x) != REG, 0)))
+ return p;
+
+ return 0;
+}
+
+/* Like `lookup' but don't care whether the table element uses invalid regs.
+ Also ignore discrepancies in the machine mode of a register. */
+
+static struct table_elt *
+lookup_for_remove (x, hash, mode)
+ rtx x;
+ unsigned hash;
+ enum machine_mode mode;
+{
+ register struct table_elt *p;
+
+ if (GET_CODE (x) == REG)
+ {
+ int regno = REGNO (x);
+ /* Don't check the machine mode when comparing registers;
+ invalidating (REG:SI 0) also invalidates (REG:DF 0). */
+ for (p = table[hash]; p; p = p->next_same_hash)
+ if (GET_CODE (p->exp) == REG
+ && REGNO (p->exp) == regno)
+ return p;
+ }
+ else
+ {
+ for (p = table[hash]; p; p = p->next_same_hash)
+ if (mode == p->mode && (x == p->exp || exp_equiv_p (x, p->exp, 0, 0)))
+ return p;
+ }
+
+ return 0;
+}
+
+/* Look for an expression equivalent to X and with code CODE.
+ If one is found, return that expression. */
+
+static rtx
+lookup_as_function (x, code)
+ rtx x;
+ enum rtx_code code;
+{
+ register struct table_elt *p = lookup (x, safe_hash (x, VOIDmode) % NBUCKETS,
+ GET_MODE (x));
+ if (p == 0)
+ return 0;
+
+ for (p = p->first_same_value; p; p = p->next_same_value)
+ {
+ if (GET_CODE (p->exp) == code
+ /* Make sure this is a valid entry in the table. */
+ && exp_equiv_p (p->exp, p->exp, 1, 0))
+ return p->exp;
+ }
+
+ return 0;
+}
+
+/* Insert X in the hash table, assuming HASH is its hash code
+ and CLASSP is an element of the class it should go in
+ (or 0 if a new class should be made).
+ It is inserted at the proper position to keep the class in
+ the order cheapest first.
+
+ MODE is the machine-mode of X, or if X is an integer constant
+ with VOIDmode then MODE is the mode with which X will be used.
+
+ For elements of equal cheapness, the most recent one
+ goes in front, except that the first element in the list
+ remains first unless a cheaper element is added. The order of
+ pseudo-registers does not matter, as canon_reg will be called to
+ find the cheapest when a register is retrieved from the table.
+
+ The in_memory field in the hash table element is set to 0.
+ The caller must set it nonzero if appropriate.
+
+ You should call insert_regs (X, CLASSP, MODIFY) before calling here,
+ and if insert_regs returns a nonzero value
+ you must then recompute its hash code before calling here.
+
+ If necessary, update table showing constant values of quantities. */
+
+#define CHEAPER(X,Y) ((X)->cost < (Y)->cost)
+
+static struct table_elt *
+insert (x, classp, hash, mode)
+ register rtx x;
+ register struct table_elt *classp;
+ unsigned hash;
+ enum machine_mode mode;
+{
+ register struct table_elt *elt;
+
+ /* If X is a register and we haven't made a quantity for it,
+ something is wrong. */
+ if (GET_CODE (x) == REG && ! REGNO_QTY_VALID_P (REGNO (x)))
+ abort ();
+
+ /* If X is a hard register, show it is being put in the table. */
+ if (GET_CODE (x) == REG && REGNO (x) < FIRST_PSEUDO_REGISTER)
+ {
+ int regno = REGNO (x);
+ int endregno = regno + HARD_REGNO_NREGS (regno, GET_MODE (x));
+ int i;
+
+ for (i = regno; i < endregno; i++)
+ SET_HARD_REG_BIT (hard_regs_in_table, i);
+ }
+
+
+ /* Put an element for X into the right hash bucket. */
+
+ elt = get_element ();
+ elt->exp = x;
+ elt->cost = COST (x);
+ elt->next_same_value = 0;
+ elt->prev_same_value = 0;
+ elt->next_same_hash = table[hash];
+ elt->prev_same_hash = 0;
+ elt->related_value = 0;
+ elt->in_memory = 0;
+ elt->mode = mode;
+ elt->is_const = (CONSTANT_P (x)
+ /* GNU C++ takes advantage of this for `this'
+ (and other const values). */
+ || (RTX_UNCHANGING_P (x)
+ && GET_CODE (x) == REG
+ && REGNO (x) >= FIRST_PSEUDO_REGISTER)
+ || FIXED_BASE_PLUS_P (x));
+
+ if (table[hash])
+ table[hash]->prev_same_hash = elt;
+ table[hash] = elt;
+
+ /* Put it into the proper value-class. */
+ if (classp)
+ {
+ classp = classp->first_same_value;
+ if (CHEAPER (elt, classp))
+ /* Insert at the head of the class */
+ {
+ register struct table_elt *p;
+ elt->next_same_value = classp;
+ classp->prev_same_value = elt;
+ elt->first_same_value = elt;
+
+ for (p = classp; p; p = p->next_same_value)
+ p->first_same_value = elt;
+ }
+ else
+ {
+ /* Insert not at head of the class. */
+ /* Put it after the last element cheaper than X. */
+ register struct table_elt *p, *next;
+ for (p = classp; (next = p->next_same_value) && CHEAPER (next, elt);
+ p = next);
+ /* Put it after P and before NEXT. */
+ elt->next_same_value = next;
+ if (next)
+ next->prev_same_value = elt;
+ elt->prev_same_value = p;
+ p->next_same_value = elt;
+ elt->first_same_value = classp;
+ }
+ }
+ else
+ elt->first_same_value = elt;
+
+ /* If this is a constant being set equivalent to a register or a register
+ being set equivalent to a constant, note the constant equivalence.
+
+ If this is a constant, it cannot be equivalent to a different constant,
+ and a constant is the only thing that can be cheaper than a register. So
+ we know the register is the head of the class (before the constant was
+ inserted).
+
+ If this is a register that is not already known equivalent to a
+ constant, we must check the entire class.
+
+ If this is a register that is already known equivalent to an insn,
+ update `qty_const_insn' to show that `this_insn' is the latest
+ insn making that quantity equivalent to the constant. */
+
+ if (elt->is_const && classp && GET_CODE (classp->exp) == REG)
+ {
+ qty_const[reg_qty[REGNO (classp->exp)]]
+ = gen_lowpart_if_possible (qty_mode[reg_qty[REGNO (classp->exp)]], x);
+ qty_const_insn[reg_qty[REGNO (classp->exp)]] = this_insn;
+ }
+
+ else if (GET_CODE (x) == REG && classp && ! qty_const[reg_qty[REGNO (x)]])
+ {
+ register struct table_elt *p;
+
+ for (p = classp; p != 0; p = p->next_same_value)
+ {
+ if (p->is_const)
+ {
+ qty_const[reg_qty[REGNO (x)]]
+ = gen_lowpart_if_possible (GET_MODE (x), p->exp);
+ qty_const_insn[reg_qty[REGNO (x)]] = this_insn;
+ break;
+ }
+ }
+ }
+
+ else if (GET_CODE (x) == REG && qty_const[reg_qty[REGNO (x)]]
+ && GET_MODE (x) == qty_mode[reg_qty[REGNO (x)]])
+ qty_const_insn[reg_qty[REGNO (x)]] = this_insn;
+
+ /* If this is a constant with symbolic value,
+ and it has a term with an explicit integer value,
+ link it up with related expressions. */
+ if (GET_CODE (x) == CONST)
+ {
+ rtx subexp = get_related_value (x);
+ unsigned subhash;
+ struct table_elt *subelt, *subelt_prev;
+
+ if (subexp != 0)
+ {
+ /* Get the integer-free subexpression in the hash table. */
+ subhash = safe_hash (subexp, mode) % NBUCKETS;
+ subelt = lookup (subexp, subhash, mode);
+ if (subelt == 0)
+ subelt = insert (subexp, NULL_PTR, subhash, mode);
+ /* Initialize SUBELT's circular chain if it has none. */
+ if (subelt->related_value == 0)
+ subelt->related_value = subelt;
+ /* Find the element in the circular chain that precedes SUBELT. */
+ subelt_prev = subelt;
+ while (subelt_prev->related_value != subelt)
+ subelt_prev = subelt_prev->related_value;
+ /* Put new ELT into SUBELT's circular chain just before SUBELT.
+ This way the element that follows SUBELT is the oldest one. */
+ elt->related_value = subelt_prev->related_value;
+ subelt_prev->related_value = elt;
+ }
+ }
+
+ return elt;
+}
+
+/* Given two equivalence classes, CLASS1 and CLASS2, put all the entries from
+ CLASS2 into CLASS1. This is done when we have reached an insn which makes
+ the two classes equivalent.
+
+ CLASS1 will be the surviving class; CLASS2 should not be used after this
+ call.
+
+ Any invalid entries in CLASS2 will not be copied. */
+
+static void
+merge_equiv_classes (class1, class2)
+ struct table_elt *class1, *class2;
+{
+ struct table_elt *elt, *next, *new;
+
+ /* Ensure we start with the head of the classes. */
+ class1 = class1->first_same_value;
+ class2 = class2->first_same_value;
+
+ /* If they were already equal, forget it. */
+ if (class1 == class2)
+ return;
+
+ for (elt = class2; elt; elt = next)
+ {
+ unsigned hash;
+ rtx exp = elt->exp;
+ enum machine_mode mode = elt->mode;
+
+ next = elt->next_same_value;
+
+ /* Remove old entry, make a new one in CLASS1's class.
+ Don't do this for invalid entries as we cannot find their
+ hash code (it also isn't necessary). */
+ if (GET_CODE (exp) == REG || exp_equiv_p (exp, exp, 1, 0))
+ {
+ hash_arg_in_memory = 0;
+ hash_arg_in_struct = 0;
+ hash = HASH (exp, mode);
+
+ if (GET_CODE (exp) == REG)
+ delete_reg_equiv (REGNO (exp));
+
+ remove_from_table (elt, hash);
+
+ if (insert_regs (exp, class1, 0))
+ hash = HASH (exp, mode);
+ new = insert (exp, class1, hash, mode);
+ new->in_memory = hash_arg_in_memory;
+ new->in_struct = hash_arg_in_struct;
+ }
+ }
+}
+
+/* Remove from the hash table, or mark as invalid,
+ all expressions whose values could be altered by storing in X.
+ X is a register, a subreg, or a memory reference with nonvarying address
+ (because, when a memory reference with a varying address is stored in,
+ all memory references are removed by invalidate_memory
+ so specific invalidation is superfluous).
+
+ A nonvarying address may be just a register or just
+ a symbol reference, or it may be either of those plus
+ a numeric offset. */
+
+static void
+invalidate (x)
+ rtx x;
+{
+ register int i;
+ register struct table_elt *p;
+ rtx base;
+ HOST_WIDE_INT start, end;
+
+ /* If X is a register, dependencies on its contents
+ are recorded through the qty number mechanism.
+ Just change the qty number of the register,
+ mark it as invalid for expressions that refer to it,
+ and remove it itself. */
+
+ if (GET_CODE (x) == REG)
+ {
+ register int regno = REGNO (x);
+ register unsigned hash = HASH (x, GET_MODE (x));
+
+ /* Remove REGNO from any quantity list it might be on and indicate
+ that it's value might have changed. If it is a pseudo, remove its
+ entry from the hash table.
+
+ For a hard register, we do the first two actions above for any
+ additional hard registers corresponding to X. Then, if any of these
+ registers are in the table, we must remove any REG entries that
+ overlap these registers. */
+
+ delete_reg_equiv (regno);
+ reg_tick[regno]++;
+
+ if (regno >= FIRST_PSEUDO_REGISTER)
+ remove_from_table (lookup_for_remove (x, hash, GET_MODE (x)), hash);
+ else
+ {
+ HOST_WIDE_INT in_table
+ = TEST_HARD_REG_BIT (hard_regs_in_table, regno);
+ int endregno = regno + HARD_REGNO_NREGS (regno, GET_MODE (x));
+ int tregno, tendregno;
+ register struct table_elt *p, *next;
+
+ CLEAR_HARD_REG_BIT (hard_regs_in_table, regno);
+
+ for (i = regno + 1; i < endregno; i++)
+ {
+ in_table |= TEST_HARD_REG_BIT (hard_regs_in_table, i);
+ CLEAR_HARD_REG_BIT (hard_regs_in_table, i);
+ delete_reg_equiv (i);
+ reg_tick[i]++;
+ }
+
+ if (in_table)
+ for (hash = 0; hash < NBUCKETS; hash++)
+ for (p = table[hash]; p; p = next)
+ {
+ next = p->next_same_hash;
+
+ if (GET_CODE (p->exp) != REG
+ || REGNO (p->exp) >= FIRST_PSEUDO_REGISTER)
+ continue;
+
+ tregno = REGNO (p->exp);
+ tendregno
+ = tregno + HARD_REGNO_NREGS (tregno, GET_MODE (p->exp));
+ if (tendregno > regno && tregno < endregno)
+ remove_from_table (p, hash);
+ }
+ }
+
+ return;
+ }
+
+ if (GET_CODE (x) == SUBREG)
+ {
+ if (GET_CODE (SUBREG_REG (x)) != REG)
+ abort ();
+ invalidate (SUBREG_REG (x));
+ return;
+ }
+
+ /* X is not a register; it must be a memory reference with
+ a nonvarying address. Remove all hash table elements
+ that refer to overlapping pieces of memory. */
+
+ if (GET_CODE (x) != MEM)
+ abort ();
+
+ set_nonvarying_address_components (XEXP (x, 0), GET_MODE_SIZE (GET_MODE (x)),
+ &base, &start, &end);
+
+ for (i = 0; i < NBUCKETS; i++)
+ {
+ register struct table_elt *next;
+ for (p = table[i]; p; p = next)
+ {
+ next = p->next_same_hash;
+ if (refers_to_mem_p (p->exp, base, start, end))
+ remove_from_table (p, i);
+ }
+ }
+}
+
+/* Remove all expressions that refer to register REGNO,
+ since they are already invalid, and we are about to
+ mark that register valid again and don't want the old
+ expressions to reappear as valid. */
+
+static void
+remove_invalid_refs (regno)
+ int regno;
+{
+ register int i;
+ register struct table_elt *p, *next;
+
+ for (i = 0; i < NBUCKETS; i++)
+ for (p = table[i]; p; p = next)
+ {
+ next = p->next_same_hash;
+ if (GET_CODE (p->exp) != REG
+ && refers_to_regno_p (regno, regno + 1, p->exp, NULL_PTR))
+ remove_from_table (p, i);
+ }
+}
+
+/* Recompute the hash codes of any valid entries in the hash table that
+ reference X, if X is a register, or SUBREG_REG (X) if X is a SUBREG.
+
+ This is called when we make a jump equivalence. */
+
+static void
+rehash_using_reg (x)
+ rtx x;
+{
+ int i;
+ struct table_elt *p, *next;
+ unsigned hash;
+
+ if (GET_CODE (x) == SUBREG)
+ x = SUBREG_REG (x);
+
+ /* If X is not a register or if the register is known not to be in any
+ valid entries in the table, we have no work to do. */
+
+ if (GET_CODE (x) != REG
+ || reg_in_table[REGNO (x)] < 0
+ || reg_in_table[REGNO (x)] != reg_tick[REGNO (x)])
+ return;
+
+ /* Scan all hash chains looking for valid entries that mention X.
+ If we find one and it is in the wrong hash chain, move it. We can skip
+ objects that are registers, since they are handled specially. */
+
+ for (i = 0; i < NBUCKETS; i++)
+ for (p = table[i]; p; p = next)
+ {
+ next = p->next_same_hash;
+ if (GET_CODE (p->exp) != REG && reg_mentioned_p (x, p->exp)
+ && exp_equiv_p (p->exp, p->exp, 1, 0)
+ && i != (hash = safe_hash (p->exp, p->mode) % NBUCKETS))
+ {
+ if (p->next_same_hash)
+ p->next_same_hash->prev_same_hash = p->prev_same_hash;
+
+ if (p->prev_same_hash)
+ p->prev_same_hash->next_same_hash = p->next_same_hash;
+ else
+ table[i] = p->next_same_hash;
+
+ p->next_same_hash = table[hash];
+ p->prev_same_hash = 0;
+ if (table[hash])
+ table[hash]->prev_same_hash = p;
+ table[hash] = p;
+ }
+ }
+}
+
+/* Remove from the hash table all expressions that reference memory,
+ or some of them as specified by *WRITES. */
+
+static void
+invalidate_memory (writes)
+ struct write_data *writes;
+{
+ register int i;
+ register struct table_elt *p, *next;
+ int all = writes->all;
+ int nonscalar = writes->nonscalar;
+
+ for (i = 0; i < NBUCKETS; i++)
+ for (p = table[i]; p; p = next)
+ {
+ next = p->next_same_hash;
+ if (p->in_memory
+ && (all
+ || (nonscalar && p->in_struct)
+ || cse_rtx_addr_varies_p (p->exp)))
+ remove_from_table (p, i);
+ }
+}
+
+/* Remove from the hash table any expression that is a call-clobbered
+ register. Also update their TICK values. */
+
+static void
+invalidate_for_call ()
+{
+ int regno, endregno;
+ int i;
+ unsigned hash;
+ struct table_elt *p, *next;
+ int in_table = 0;
+
+ /* Go through all the hard registers. For each that is clobbered in
+ a CALL_INSN, remove the register from quantity chains and update
+ reg_tick if defined. Also see if any of these registers is currently
+ in the table. */
+
+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+ if (TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
+ {
+ delete_reg_equiv (regno);
+ if (reg_tick[regno] >= 0)
+ reg_tick[regno]++;
+
+ in_table |= TEST_HARD_REG_BIT (hard_regs_in_table, regno);
+ }
+
+ /* In the case where we have no call-clobbered hard registers in the
+ table, we are done. Otherwise, scan the table and remove any
+ entry that overlaps a call-clobbered register. */
+
+ if (in_table)
+ for (hash = 0; hash < NBUCKETS; hash++)
+ for (p = table[hash]; p; p = next)
+ {
+ next = p->next_same_hash;
+
+ if (GET_CODE (p->exp) != REG
+ || REGNO (p->exp) >= FIRST_PSEUDO_REGISTER)
+ continue;
+
+ regno = REGNO (p->exp);
+ endregno = regno + HARD_REGNO_NREGS (regno, GET_MODE (p->exp));
+
+ for (i = regno; i < endregno; i++)
+ if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
+ {
+ remove_from_table (p, hash);
+ break;
+ }
+ }
+}
+
+/* Given an expression X of type CONST,
+ and ELT which is its table entry (or 0 if it
+ is not in the hash table),
+ return an alternate expression for X as a register plus integer.
+ If none can be found, return 0. */
+
+static rtx
+use_related_value (x, elt)
+ rtx x;
+ struct table_elt *elt;
+{
+ register struct table_elt *relt = 0;
+ register struct table_elt *p, *q;
+ HOST_WIDE_INT offset;
+
+ /* First, is there anything related known?
+ If we have a table element, we can tell from that.
+ Otherwise, must look it up. */
+
+ if (elt != 0 && elt->related_value != 0)
+ relt = elt;
+ else if (elt == 0 && GET_CODE (x) == CONST)
+ {
+ rtx subexp = get_related_value (x);
+ if (subexp != 0)
+ relt = lookup (subexp,
+ safe_hash (subexp, GET_MODE (subexp)) % NBUCKETS,
+ GET_MODE (subexp));
+ }
+
+ if (relt == 0)
+ return 0;
+
+ /* Search all related table entries for one that has an
+ equivalent register. */
+
+ p = relt;
+ while (1)
+ {
+ /* This loop is strange in that it is executed in two different cases.
+ The first is when X is already in the table. Then it is searching
+ the RELATED_VALUE list of X's class (RELT). The second case is when
+ X is not in the table. Then RELT points to a class for the related
+ value.
+
+ Ensure that, whatever case we are in, that we ignore classes that have
+ the same value as X. */
+
+ if (rtx_equal_p (x, p->exp))
+ q = 0;
+ else
+ for (q = p->first_same_value; q; q = q->next_same_value)
+ if (GET_CODE (q->exp) == REG)
+ break;
+
+ if (q)
+ break;
+
+ p = p->related_value;
+
+ /* We went all the way around, so there is nothing to be found.
+ Alternatively, perhaps RELT was in the table for some other reason
+ and it has no related values recorded. */
+ if (p == relt || p == 0)
+ break;
+ }
+
+ if (q == 0)
+ return 0;
+
+ offset = (get_integer_term (x) - get_integer_term (p->exp));
+ /* Note: OFFSET may be 0 if P->xexp and X are related by commutativity. */
+ return plus_constant (q->exp, offset);
+}
+
+/* Hash an rtx. We are careful to make sure the value is never negative.
+ Equivalent registers hash identically.
+ MODE is used in hashing for CONST_INTs only;
+ otherwise the mode of X is used.
+
+ Store 1 in do_not_record if any subexpression is volatile.
+
+ Store 1 in hash_arg_in_memory if X contains a MEM rtx
+ which does not have the RTX_UNCHANGING_P bit set.
+ In this case, also store 1 in hash_arg_in_struct
+ if there is a MEM rtx which has the MEM_IN_STRUCT_P bit set.
+
+ Note that cse_insn knows that the hash code of a MEM expression
+ is just (int) MEM plus the hash code of the address. */
+
+static unsigned
+canon_hash (x, mode)
+ rtx x;
+ enum machine_mode mode;
+{
+ register int i, j;
+ register unsigned hash = 0;
+ register enum rtx_code code;
+ register char *fmt;
+
+ /* repeat is used to turn tail-recursion into iteration. */
+ repeat:
+ if (x == 0)
+ return hash;
+
+ code = GET_CODE (x);
+ switch (code)
+ {
+ case REG:
+ {
+ register int regno = REGNO (x);
+
+ /* On some machines, we can't record any non-fixed hard register,
+ because extending its life will cause reload problems. We
+ consider ap, fp, and sp to be fixed for this purpose.
+ On all machines, we can't record any global registers. */
+
+ if (regno < FIRST_PSEUDO_REGISTER
+ && (global_regs[regno]
+#ifdef SMALL_REGISTER_CLASSES
+ || (! fixed_regs[regno]
+ && regno != FRAME_POINTER_REGNUM
+ && regno != HARD_FRAME_POINTER_REGNUM
+ && regno != ARG_POINTER_REGNUM
+ && regno != STACK_POINTER_REGNUM)
+#endif
+ ))
+ {
+ do_not_record = 1;
+ return 0;
+ }
+ hash += ((unsigned) REG << 7) + (unsigned) reg_qty[regno];
+ return hash;
+ }
+
+ case CONST_INT:
+ {
+ unsigned HOST_WIDE_INT tem = INTVAL (x);
+ hash += ((unsigned) CONST_INT << 7) + (unsigned) mode + tem;
+ return hash;
+ }
+
+ case CONST_DOUBLE:
+ /* This is like the general case, except that it only counts
+ the integers representing the constant. */
+ hash += (unsigned) code + (unsigned) GET_MODE (x);
+ for (i = 2; i < GET_RTX_LENGTH (CONST_DOUBLE); i++)
+ {
+ unsigned tem = XINT (x, i);
+ hash += tem;
+ }
+ return hash;
+
+ /* Assume there is only one rtx object for any given label. */
+ case LABEL_REF:
+ hash
+ += ((unsigned) LABEL_REF << 7) + (unsigned HOST_WIDE_INT) XEXP (x, 0);
+ return hash;
+
+ case SYMBOL_REF:
+ hash
+ += ((unsigned) SYMBOL_REF << 7) + (unsigned HOST_WIDE_INT) XSTR (x, 0);
+ return hash;
+
+ case MEM:
+ if (MEM_VOLATILE_P (x))
+ {
+ do_not_record = 1;
+ return 0;
+ }
+ if (! RTX_UNCHANGING_P (x))
+ {
+ hash_arg_in_memory = 1;
+ if (MEM_IN_STRUCT_P (x)) hash_arg_in_struct = 1;
+ }
+ /* Now that we have already found this special case,
+ might as well speed it up as much as possible. */
+ hash += (unsigned) MEM;
+ x = XEXP (x, 0);
+ goto repeat;
+
+ case PRE_DEC:
+ case PRE_INC:
+ case POST_DEC:
+ case POST_INC:
+ case PC:
+ case CC0:
+ case CALL:
+ case UNSPEC_VOLATILE:
+ do_not_record = 1;
+ return 0;
+
+ case ASM_OPERANDS:
+ if (MEM_VOLATILE_P (x))
+ {
+ do_not_record = 1;
+ return 0;
+ }
+ }
+
+ i = GET_RTX_LENGTH (code) - 1;
+ hash += (unsigned) code + (unsigned) GET_MODE (x);
+ fmt = GET_RTX_FORMAT (code);
+ for (; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ {
+ rtx tem = XEXP (x, i);
+ rtx tem1;
+
+ /* If the operand is a REG that is equivalent to a constant, hash
+ as if we were hashing the constant, since we will be comparing
+ that way. */
+ if (tem != 0 && GET_CODE (tem) == REG
+ && REGNO_QTY_VALID_P (REGNO (tem))
+ && qty_mode[reg_qty[REGNO (tem)]] == GET_MODE (tem)
+ && (tem1 = qty_const[reg_qty[REGNO (tem)]]) != 0
+ && CONSTANT_P (tem1))
+ tem = tem1;
+
+ /* If we are about to do the last recursive call
+ needed at this level, change it into iteration.
+ This function is called enough to be worth it. */
+ if (i == 0)
+ {
+ x = tem;
+ goto repeat;
+ }
+ hash += canon_hash (tem, 0);
+ }
+ else if (fmt[i] == 'E')
+ for (j = 0; j < XVECLEN (x, i); j++)
+ hash += canon_hash (XVECEXP (x, i, j), 0);
+ else if (fmt[i] == 's')
+ {
+ register unsigned char *p = (unsigned char *) XSTR (x, i);
+ if (p)
+ while (*p)
+ hash += *p++;
+ }
+ else if (fmt[i] == 'i')
+ {
+ register unsigned tem = XINT (x, i);
+ hash += tem;
+ }
+ else
+ abort ();
+ }
+ return hash;
+}
+
+/* Like canon_hash but with no side effects. */
+
+static unsigned
+safe_hash (x, mode)
+ rtx x;
+ enum machine_mode mode;
+{
+ int save_do_not_record = do_not_record;
+ int save_hash_arg_in_memory = hash_arg_in_memory;
+ int save_hash_arg_in_struct = hash_arg_in_struct;
+ unsigned hash = canon_hash (x, mode);
+ hash_arg_in_memory = save_hash_arg_in_memory;
+ hash_arg_in_struct = save_hash_arg_in_struct;
+ do_not_record = save_do_not_record;
+ return hash;
+}
+
+/* Return 1 iff X and Y would canonicalize into the same thing,
+ without actually constructing the canonicalization of either one.
+ If VALIDATE is nonzero,
+ we assume X is an expression being processed from the rtl
+ and Y was found in the hash table. We check register refs
+ in Y for being marked as valid.
+
+ If EQUAL_VALUES is nonzero, we allow a register to match a constant value
+ that is known to be in the register. Ordinarily, we don't allow them
+ to match, because letting them match would cause unpredictable results
+ in all the places that search a hash table chain for an equivalent
+ for a given value. A possible equivalent that has different structure
+ has its hash code computed from different data. Whether the hash code
+ is the same as that of the the given value is pure luck. */
+
+static int
+exp_equiv_p (x, y, validate, equal_values)
+ rtx x, y;
+ int validate;
+ int equal_values;
+{
+ register int i, j;
+ register enum rtx_code code;
+ register char *fmt;
+
+ /* Note: it is incorrect to assume an expression is equivalent to itself
+ if VALIDATE is nonzero. */
+ if (x == y && !validate)
+ return 1;
+ if (x == 0 || y == 0)
+ return x == y;
+
+ code = GET_CODE (x);
+ if (code != GET_CODE (y))
+ {
+ if (!equal_values)
+ return 0;
+
+ /* If X is a constant and Y is a register or vice versa, they may be
+ equivalent. We only have to validate if Y is a register. */
+ if (CONSTANT_P (x) && GET_CODE (y) == REG
+ && REGNO_QTY_VALID_P (REGNO (y))
+ && GET_MODE (y) == qty_mode[reg_qty[REGNO (y)]]
+ && rtx_equal_p (x, qty_const[reg_qty[REGNO (y)]])
+ && (! validate || reg_in_table[REGNO (y)] == reg_tick[REGNO (y)]))
+ return 1;
+
+ if (CONSTANT_P (y) && code == REG
+ && REGNO_QTY_VALID_P (REGNO (x))
+ && GET_MODE (x) == qty_mode[reg_qty[REGNO (x)]]
+ && rtx_equal_p (y, qty_const[reg_qty[REGNO (x)]]))
+ return 1;
+
+ return 0;
+ }
+
+ /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent. */
+ if (GET_MODE (x) != GET_MODE (y))
+ return 0;
+
+ switch (code)
+ {
+ case PC:
+ case CC0:
+ return x == y;
+
+ case CONST_INT:
+ return INTVAL (x) == INTVAL (y);
+
+ case LABEL_REF:
+ return XEXP (x, 0) == XEXP (y, 0);
+
+ case SYMBOL_REF:
+ return XSTR (x, 0) == XSTR (y, 0);
+
+ case REG:
+ {
+ int regno = REGNO (y);
+ int endregno
+ = regno + (regno >= FIRST_PSEUDO_REGISTER ? 1
+ : HARD_REGNO_NREGS (regno, GET_MODE (y)));
+ int i;
+
+ /* If the quantities are not the same, the expressions are not
+ equivalent. If there are and we are not to validate, they
+ are equivalent. Otherwise, ensure all regs are up-to-date. */
+
+ if (reg_qty[REGNO (x)] != reg_qty[regno])
+ return 0;
+
+ if (! validate)
+ return 1;
+
+ for (i = regno; i < endregno; i++)
+ if (reg_in_table[i] != reg_tick[i])
+ return 0;
+
+ return 1;
+ }
+
+ /* For commutative operations, check both orders. */
+ case PLUS:
+ case MULT:
+ case AND:
+ case IOR:
+ case XOR:
+ case NE:
+ case EQ:
+ return ((exp_equiv_p (XEXP (x, 0), XEXP (y, 0), validate, equal_values)
+ && exp_equiv_p (XEXP (x, 1), XEXP (y, 1),
+ validate, equal_values))
+ || (exp_equiv_p (XEXP (x, 0), XEXP (y, 1),
+ validate, equal_values)
+ && exp_equiv_p (XEXP (x, 1), XEXP (y, 0),
+ validate, equal_values)));
+ }
+
+ /* Compare the elements. If any pair of corresponding elements
+ fail to match, return 0 for the whole things. */
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ switch (fmt[i])
+ {
+ case 'e':
+ if (! exp_equiv_p (XEXP (x, i), XEXP (y, i), validate, equal_values))
+ return 0;
+ break;
+
+ case 'E':
+ if (XVECLEN (x, i) != XVECLEN (y, i))
+ return 0;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ if (! exp_equiv_p (XVECEXP (x, i, j), XVECEXP (y, i, j),
+ validate, equal_values))
+ return 0;
+ break;
+
+ case 's':
+ if (strcmp (XSTR (x, i), XSTR (y, i)))
+ return 0;
+ break;
+
+ case 'i':
+ if (XINT (x, i) != XINT (y, i))
+ return 0;
+ break;
+
+ case 'w':
+ if (XWINT (x, i) != XWINT (y, i))
+ return 0;
+ break;
+
+ case '0':
+ break;
+
+ default:
+ abort ();
+ }
+ }
+
+ return 1;
+}
+
+/* Return 1 iff any subexpression of X matches Y.
+ Here we do not require that X or Y be valid (for registers referred to)
+ for being in the hash table. */
+
+static int
+refers_to_p (x, y)
+ rtx x, y;
+{
+ register int i;
+ register enum rtx_code code;
+ register char *fmt;
+
+ repeat:
+ if (x == y)
+ return 1;
+ if (x == 0 || y == 0)
+ return 0;
+
+ code = GET_CODE (x);
+ /* If X as a whole has the same code as Y, they may match.
+ If so, return 1. */
+ if (code == GET_CODE (y))
+ {
+ if (exp_equiv_p (x, y, 0, 1))
+ return 1;
+ }
+
+ /* X does not match, so try its subexpressions. */
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ if (fmt[i] == 'e')
+ {
+ if (i == 0)
+ {
+ x = XEXP (x, 0);
+ goto repeat;
+ }
+ else
+ if (refers_to_p (XEXP (x, i), y))
+ return 1;
+ }
+ else if (fmt[i] == 'E')
+ {
+ int j;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ if (refers_to_p (XVECEXP (x, i, j), y))
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Given ADDR and SIZE (a memory address, and the size of the memory reference),
+ set PBASE, PSTART, and PEND which correspond to the base of the address,
+ the starting offset, and ending offset respectively.
+
+ ADDR is known to be a nonvarying address.
+
+ cse_address_varies_p returns zero for nonvarying addresses. */
+
+static void
+set_nonvarying_address_components (addr, size, pbase, pstart, pend)
+ rtx addr;
+ int size;
+ rtx *pbase;
+ HOST_WIDE_INT *pstart, *pend;
+{
+ rtx base;
+ HOST_WIDE_INT start, end;
+
+ base = addr;
+ start = 0;
+ end = 0;
+
+ /* Registers with nonvarying addresses usually have constant equivalents;
+ but the frame pointer register is also possible. */
+ if (GET_CODE (base) == REG
+ && qty_const != 0
+ && REGNO_QTY_VALID_P (REGNO (base))
+ && qty_mode[reg_qty[REGNO (base)]] == GET_MODE (base)
+ && qty_const[reg_qty[REGNO (base)]] != 0)
+ base = qty_const[reg_qty[REGNO (base)]];
+ else if (GET_CODE (base) == PLUS
+ && GET_CODE (XEXP (base, 1)) == CONST_INT
+ && GET_CODE (XEXP (base, 0)) == REG
+ && qty_const != 0
+ && REGNO_QTY_VALID_P (REGNO (XEXP (base, 0)))
+ && (qty_mode[reg_qty[REGNO (XEXP (base, 0))]]
+ == GET_MODE (XEXP (base, 0)))
+ && qty_const[reg_qty[REGNO (XEXP (base, 0))]])
+ {
+ start = INTVAL (XEXP (base, 1));
+ base = qty_const[reg_qty[REGNO (XEXP (base, 0))]];
+ }
+
+ /* Handle everything that we can find inside an address that has been
+ viewed as constant. */
+
+ while (1)
+ {
+ /* If no part of this switch does a "continue", the code outside
+ will exit this loop. */
+
+ switch (GET_CODE (base))
+ {
+ case LO_SUM:
+ /* By definition, operand1 of a LO_SUM is the associated constant
+ address. Use the associated constant address as the base
+ instead. */
+ base = XEXP (base, 1);
+ continue;
+
+ case CONST:
+ /* Strip off CONST. */
+ base = XEXP (base, 0);
+ continue;
+
+ case PLUS:
+ if (GET_CODE (XEXP (base, 1)) == CONST_INT)
+ {
+ start += INTVAL (XEXP (base, 1));
+ base = XEXP (base, 0);
+ continue;
+ }
+ break;
+
+ case AND:
+ /* Handle the case of an AND which is the negative of a power of
+ two. This is used to represent unaligned memory operations. */
+ if (GET_CODE (XEXP (base, 1)) == CONST_INT
+ && exact_log2 (- INTVAL (XEXP (base, 1))) > 0)
+ {
+ set_nonvarying_address_components (XEXP (base, 0), size,
+ pbase, pstart, pend);
+
+ /* Assume the worst misalignment. START is affected, but not
+ END, so compensate but adjusting SIZE. Don't lose any
+ constant we already had. */
+
+ size = *pend - *pstart - INTVAL (XEXP (base, 1)) - 1;
+ start += *pstart - INTVAL (XEXP (base, 1)) - 1;
+ base = *pbase;
+ }
+ break;
+ }
+
+ break;
+ }
+
+ end = start + size;
+
+ /* Set the return values. */
+ *pbase = base;
+ *pstart = start;
+ *pend = end;
+}
+
+/* Return 1 iff any subexpression of X refers to memory
+ at an address of BASE plus some offset
+ such that any of the bytes' offsets fall between START (inclusive)
+ and END (exclusive).
+
+ The value is undefined if X is a varying address (as determined by
+ cse_rtx_addr_varies_p). This function is not used in such cases.
+
+ When used in the cse pass, `qty_const' is nonzero, and it is used
+ to treat an address that is a register with a known constant value
+ as if it were that constant value.
+ In the loop pass, `qty_const' is zero, so this is not done. */
+
+static int
+refers_to_mem_p (x, base, start, end)
+ rtx x, base;
+ HOST_WIDE_INT start, end;
+{
+ register HOST_WIDE_INT i;
+ register enum rtx_code code;
+ register char *fmt;
+
+ if (GET_CODE (base) == CONST_INT)
+ {
+ start += INTVAL (base);
+ end += INTVAL (base);
+ base = const0_rtx;
+ }
+
+ repeat:
+ if (x == 0)
+ return 0;
+
+ code = GET_CODE (x);
+ if (code == MEM)
+ {
+ register rtx addr = XEXP (x, 0); /* Get the address. */
+ rtx mybase;
+ HOST_WIDE_INT mystart, myend;
+
+ set_nonvarying_address_components (addr, GET_MODE_SIZE (GET_MODE (x)),
+ &mybase, &mystart, &myend);
+
+
+ /* refers_to_mem_p is never called with varying addresses.
+ If the base addresses are not equal, there is no chance
+ of the memory addresses conflicting. */
+ if (! rtx_equal_p (mybase, base))
+ return 0;
+
+ return myend > start && mystart < end;
+ }
+
+ /* X does not match, so try its subexpressions. */
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ if (fmt[i] == 'e')
+ {
+ if (i == 0)
+ {
+ x = XEXP (x, 0);
+ goto repeat;
+ }
+ else
+ if (refers_to_mem_p (XEXP (x, i), base, start, end))
+ return 1;
+ }
+ else if (fmt[i] == 'E')
+ {
+ int j;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ if (refers_to_mem_p (XVECEXP (x, i, j), base, start, end))
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Nonzero if X refers to memory at a varying address;
+ except that a register which has at the moment a known constant value
+ isn't considered variable. */
+
+static int
+cse_rtx_addr_varies_p (x)
+ rtx x;
+{
+ /* We need not check for X and the equivalence class being of the same
+ mode because if X is equivalent to a constant in some mode, it
+ doesn't vary in any mode. */
+
+ if (GET_CODE (x) == MEM
+ && GET_CODE (XEXP (x, 0)) == REG
+ && REGNO_QTY_VALID_P (REGNO (XEXP (x, 0)))
+ && GET_MODE (XEXP (x, 0)) == qty_mode[reg_qty[REGNO (XEXP (x, 0))]]
+ && qty_const[reg_qty[REGNO (XEXP (x, 0))]] != 0)
+ return 0;
+
+ if (GET_CODE (x) == MEM
+ && GET_CODE (XEXP (x, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
+ && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
+ && REGNO_QTY_VALID_P (REGNO (XEXP (XEXP (x, 0), 0)))
+ && (GET_MODE (XEXP (XEXP (x, 0), 0))
+ == qty_mode[reg_qty[REGNO (XEXP (XEXP (x, 0), 0))]])
+ && qty_const[reg_qty[REGNO (XEXP (XEXP (x, 0), 0))]])
+ return 0;
+
+ return rtx_addr_varies_p (x);
+}
+
+/* Canonicalize an expression:
+ replace each register reference inside it
+ with the "oldest" equivalent register.
+
+ If INSN is non-zero and we are replacing a pseudo with a hard register
+ or vice versa, validate_change is used to ensure that INSN remains valid
+ after we make our substitution. The calls are made with IN_GROUP non-zero
+ so apply_change_group must be called upon the outermost return from this
+ function (unless INSN is zero). The result of apply_change_group can
+ generally be discarded since the changes we are making are optional. */
+
+static rtx
+canon_reg (x, insn)
+ rtx x;
+ rtx insn;
+{
+ register int i;
+ register enum rtx_code code;
+ register char *fmt;
+
+ if (x == 0)
+ return x;
+
+ code = GET_CODE (x);
+ switch (code)
+ {
+ case PC:
+ case CC0:
+ case CONST:
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ case ADDR_VEC:
+ case ADDR_DIFF_VEC:
+ return x;
+
+ case REG:
+ {
+ register int first;
+
+ /* Never replace a hard reg, because hard regs can appear
+ in more than one machine mode, and we must preserve the mode
+ of each occurrence. Also, some hard regs appear in
+ MEMs that are shared and mustn't be altered. Don't try to
+ replace any reg that maps to a reg of class NO_REGS. */
+ if (REGNO (x) < FIRST_PSEUDO_REGISTER
+ || ! REGNO_QTY_VALID_P (REGNO (x)))
+ return x;
+
+ first = qty_first_reg[reg_qty[REGNO (x)]];
+ return (first >= FIRST_PSEUDO_REGISTER ? regno_reg_rtx[first]
+ : REGNO_REG_CLASS (first) == NO_REGS ? x
+ : gen_rtx (REG, qty_mode[reg_qty[REGNO (x)]], first));
+ }
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ register int j;
+
+ if (fmt[i] == 'e')
+ {
+ rtx new = canon_reg (XEXP (x, i), insn);
+
+ /* If replacing pseudo with hard reg or vice versa, ensure the
+ insn remains valid. Likewise if the insn has MATCH_DUPs. */
+ if (insn != 0 && new != 0
+ && GET_CODE (new) == REG && GET_CODE (XEXP (x, i)) == REG
+ && (((REGNO (new) < FIRST_PSEUDO_REGISTER)
+ != (REGNO (XEXP (x, i)) < FIRST_PSEUDO_REGISTER))
+ || insn_n_dups[recog_memoized (insn)] > 0))
+ validate_change (insn, &XEXP (x, i), new, 1);
+ else
+ XEXP (x, i) = new;
+ }
+ else if (fmt[i] == 'E')
+ for (j = 0; j < XVECLEN (x, i); j++)
+ XVECEXP (x, i, j) = canon_reg (XVECEXP (x, i, j), insn);
+ }
+
+ return x;
+}
+
+/* LOC is a location with INSN that is an operand address (the contents of
+ a MEM). Find the best equivalent address to use that is valid for this
+ insn.
+
+ On most CISC machines, complicated address modes are costly, and rtx_cost
+ is a good approximation for that cost. However, most RISC machines have
+ only a few (usually only one) memory reference formats. If an address is
+ valid at all, it is often just as cheap as any other address. Hence, for
+ RISC machines, we use the configuration macro `ADDRESS_COST' to compare the
+ costs of various addresses. For two addresses of equal cost, choose the one
+ with the highest `rtx_cost' value as that has the potential of eliminating
+ the most insns. For equal costs, we choose the first in the equivalence
+ class. Note that we ignore the fact that pseudo registers are cheaper
+ than hard registers here because we would also prefer the pseudo registers.
+ */
+
+static void
+find_best_addr (insn, loc)
+ rtx insn;
+ rtx *loc;
+{
+ struct table_elt *elt, *p;
+ rtx addr = *loc;
+ int our_cost;
+ int found_better = 1;
+ int save_do_not_record = do_not_record;
+ int save_hash_arg_in_memory = hash_arg_in_memory;
+ int save_hash_arg_in_struct = hash_arg_in_struct;
+ int addr_volatile;
+ int regno;
+ unsigned hash;
+
+ /* Do not try to replace constant addresses or addresses of local and
+ argument slots. These MEM expressions are made only once and inserted
+ in many instructions, as well as being used to control symbol table
+ output. It is not safe to clobber them.
+
+ There are some uncommon cases where the address is already in a register
+ for some reason, but we cannot take advantage of that because we have
+ no easy way to unshare the MEM. In addition, looking up all stack
+ addresses is costly. */
+ if ((GET_CODE (addr) == PLUS
+ && GET_CODE (XEXP (addr, 0)) == REG
+ && GET_CODE (XEXP (addr, 1)) == CONST_INT
+ && (regno = REGNO (XEXP (addr, 0)),
+ regno == FRAME_POINTER_REGNUM || regno == HARD_FRAME_POINTER_REGNUM
+ || regno == ARG_POINTER_REGNUM))
+ || (GET_CODE (addr) == REG
+ && (regno = REGNO (addr), regno == FRAME_POINTER_REGNUM
+ || regno == HARD_FRAME_POINTER_REGNUM
+ || regno == ARG_POINTER_REGNUM))
+ || CONSTANT_ADDRESS_P (addr))
+ return;
+
+ /* If this address is not simply a register, try to fold it. This will
+ sometimes simplify the expression. Many simplifications
+ will not be valid, but some, usually applying the associative rule, will
+ be valid and produce better code. */
+ if (GET_CODE (addr) != REG
+ && validate_change (insn, loc, fold_rtx (addr, insn), 0))
+ addr = *loc;
+
+ /* If this address is not in the hash table, we can't look for equivalences
+ of the whole address. Also, ignore if volatile. */
+
+ do_not_record = 0;
+ hash = HASH (addr, Pmode);
+ addr_volatile = do_not_record;
+ do_not_record = save_do_not_record;
+ hash_arg_in_memory = save_hash_arg_in_memory;
+ hash_arg_in_struct = save_hash_arg_in_struct;
+
+ if (addr_volatile)
+ return;
+
+ elt = lookup (addr, hash, Pmode);
+
+#ifndef ADDRESS_COST
+ if (elt)
+ {
+ our_cost = elt->cost;
+
+ /* Find the lowest cost below ours that works. */
+ for (elt = elt->first_same_value; elt; elt = elt->next_same_value)
+ if (elt->cost < our_cost
+ && (GET_CODE (elt->exp) == REG
+ || exp_equiv_p (elt->exp, elt->exp, 1, 0))
+ && validate_change (insn, loc,
+ canon_reg (copy_rtx (elt->exp), NULL_RTX), 0))
+ return;
+ }
+#else
+
+ if (elt)
+ {
+ /* We need to find the best (under the criteria documented above) entry
+ in the class that is valid. We use the `flag' field to indicate
+ choices that were invalid and iterate until we can't find a better
+ one that hasn't already been tried. */
+
+ for (p = elt->first_same_value; p; p = p->next_same_value)
+ p->flag = 0;
+
+ while (found_better)
+ {
+ int best_addr_cost = ADDRESS_COST (*loc);
+ int best_rtx_cost = (elt->cost + 1) >> 1;
+ struct table_elt *best_elt = elt;
+
+ found_better = 0;
+ for (p = elt->first_same_value; p; p = p->next_same_value)
+ if (! p->flag
+ && (GET_CODE (p->exp) == REG
+ || exp_equiv_p (p->exp, p->exp, 1, 0))
+ && (ADDRESS_COST (p->exp) < best_addr_cost
+ || (ADDRESS_COST (p->exp) == best_addr_cost
+ && (p->cost + 1) >> 1 > best_rtx_cost)))
+ {
+ found_better = 1;
+ best_addr_cost = ADDRESS_COST (p->exp);
+ best_rtx_cost = (p->cost + 1) >> 1;
+ best_elt = p;
+ }
+
+ if (found_better)
+ {
+ if (validate_change (insn, loc,
+ canon_reg (copy_rtx (best_elt->exp),
+ NULL_RTX), 0))
+ return;
+ else
+ best_elt->flag = 1;
+ }
+ }
+ }
+
+ /* If the address is a binary operation with the first operand a register
+ and the second a constant, do the same as above, but looking for
+ equivalences of the register. Then try to simplify before checking for
+ the best address to use. This catches a few cases: First is when we
+ have REG+const and the register is another REG+const. We can often merge
+ the constants and eliminate one insn and one register. It may also be
+ that a machine has a cheap REG+REG+const. Finally, this improves the
+ code on the Alpha for unaligned byte stores. */
+
+ if (flag_expensive_optimizations
+ && (GET_RTX_CLASS (GET_CODE (*loc)) == '2'
+ || GET_RTX_CLASS (GET_CODE (*loc)) == 'c')
+ && GET_CODE (XEXP (*loc, 0)) == REG
+ && GET_CODE (XEXP (*loc, 1)) == CONST_INT)
+ {
+ rtx c = XEXP (*loc, 1);
+
+ do_not_record = 0;
+ hash = HASH (XEXP (*loc, 0), Pmode);
+ do_not_record = save_do_not_record;
+ hash_arg_in_memory = save_hash_arg_in_memory;
+ hash_arg_in_struct = save_hash_arg_in_struct;
+
+ elt = lookup (XEXP (*loc, 0), hash, Pmode);
+ if (elt == 0)
+ return;
+
+ /* We need to find the best (under the criteria documented above) entry
+ in the class that is valid. We use the `flag' field to indicate
+ choices that were invalid and iterate until we can't find a better
+ one that hasn't already been tried. */
+
+ for (p = elt->first_same_value; p; p = p->next_same_value)
+ p->flag = 0;
+
+ while (found_better)
+ {
+ int best_addr_cost = ADDRESS_COST (*loc);
+ int best_rtx_cost = (COST (*loc) + 1) >> 1;
+ struct table_elt *best_elt = elt;
+ rtx best_rtx = *loc;
+ int count;
+
+ /* This is at worst case an O(n^2) algorithm, so limit our search
+ to the first 32 elements on the list. This avoids trouble
+ compiling code with very long basic blocks that can easily
+ call cse_gen_binary so many times that we run out of memory. */
+
+ found_better = 0;
+ for (p = elt->first_same_value, count = 0;
+ p && count < 32;
+ p = p->next_same_value, count++)
+ if (! p->flag
+ && (GET_CODE (p->exp) == REG
+ || exp_equiv_p (p->exp, p->exp, 1, 0)))
+ {
+ rtx new = cse_gen_binary (GET_CODE (*loc), Pmode, p->exp, c);
+
+ if ((ADDRESS_COST (new) < best_addr_cost
+ || (ADDRESS_COST (new) == best_addr_cost
+ && (COST (new) + 1) >> 1 > best_rtx_cost)))
+ {
+ found_better = 1;
+ best_addr_cost = ADDRESS_COST (new);
+ best_rtx_cost = (COST (new) + 1) >> 1;
+ best_elt = p;
+ best_rtx = new;
+ }
+ }
+
+ if (found_better)
+ {
+ if (validate_change (insn, loc,
+ canon_reg (copy_rtx (best_rtx),
+ NULL_RTX), 0))
+ return;
+ else
+ best_elt->flag = 1;
+ }
+ }
+ }
+#endif
+}
+
+/* Given an operation (CODE, *PARG1, *PARG2), where code is a comparison
+ operation (EQ, NE, GT, etc.), follow it back through the hash table and
+ what values are being compared.
+
+ *PARG1 and *PARG2 are updated to contain the rtx representing the values
+ actually being compared. For example, if *PARG1 was (cc0) and *PARG2
+ was (const_int 0), *PARG1 and *PARG2 will be set to the objects that were
+ compared to produce cc0.
+
+ The return value is the comparison operator and is either the code of
+ A or the code corresponding to the inverse of the comparison. */
+
+static enum rtx_code
+find_comparison_args (code, parg1, parg2, pmode1, pmode2)
+ enum rtx_code code;
+ rtx *parg1, *parg2;
+ enum machine_mode *pmode1, *pmode2;
+{
+ rtx arg1, arg2;
+
+ arg1 = *parg1, arg2 = *parg2;
+
+ /* If ARG2 is const0_rtx, see what ARG1 is equivalent to. */
+
+ while (arg2 == CONST0_RTX (GET_MODE (arg1)))
+ {
+ /* Set non-zero when we find something of interest. */
+ rtx x = 0;
+ int reverse_code = 0;
+ struct table_elt *p = 0;
+
+ /* If arg1 is a COMPARE, extract the comparison arguments from it.
+ On machines with CC0, this is the only case that can occur, since
+ fold_rtx will return the COMPARE or item being compared with zero
+ when given CC0. */
+
+ if (GET_CODE (arg1) == COMPARE && arg2 == const0_rtx)
+ x = arg1;
+
+ /* If ARG1 is a comparison operator and CODE is testing for
+ STORE_FLAG_VALUE, get the inner arguments. */
+
+ else if (GET_RTX_CLASS (GET_CODE (arg1)) == '<')
+ {
+ if (code == NE
+ || (GET_MODE_CLASS (GET_MODE (arg1)) == MODE_INT
+ && code == LT && STORE_FLAG_VALUE == -1)
+#ifdef FLOAT_STORE_FLAG_VALUE
+ || (GET_MODE_CLASS (GET_MODE (arg1)) == MODE_FLOAT
+ && FLOAT_STORE_FLAG_VALUE < 0)
+#endif
+ )
+ x = arg1;
+ else if (code == EQ
+ || (GET_MODE_CLASS (GET_MODE (arg1)) == MODE_INT
+ && code == GE && STORE_FLAG_VALUE == -1)
+#ifdef FLOAT_STORE_FLAG_VALUE
+ || (GET_MODE_CLASS (GET_MODE (arg1)) == MODE_FLOAT
+ && FLOAT_STORE_FLAG_VALUE < 0)
+#endif
+ )
+ x = arg1, reverse_code = 1;
+ }
+
+ /* ??? We could also check for
+
+ (ne (and (eq (...) (const_int 1))) (const_int 0))
+
+ and related forms, but let's wait until we see them occurring. */
+
+ if (x == 0)
+ /* Look up ARG1 in the hash table and see if it has an equivalence
+ that lets us see what is being compared. */
+ p = lookup (arg1, safe_hash (arg1, GET_MODE (arg1)) % NBUCKETS,
+ GET_MODE (arg1));
+ if (p) p = p->first_same_value;
+
+ for (; p; p = p->next_same_value)
+ {
+ enum machine_mode inner_mode = GET_MODE (p->exp);
+
+ /* If the entry isn't valid, skip it. */
+ if (! exp_equiv_p (p->exp, p->exp, 1, 0))
+ continue;
+
+ if (GET_CODE (p->exp) == COMPARE
+ /* Another possibility is that this machine has a compare insn
+ that includes the comparison code. In that case, ARG1 would
+ be equivalent to a comparison operation that would set ARG1 to
+ either STORE_FLAG_VALUE or zero. If this is an NE operation,
+ ORIG_CODE is the actual comparison being done; if it is an EQ,
+ we must reverse ORIG_CODE. On machine with a negative value
+ for STORE_FLAG_VALUE, also look at LT and GE operations. */
+ || ((code == NE
+ || (code == LT
+ && GET_MODE_CLASS (inner_mode) == MODE_INT
+ && (GET_MODE_BITSIZE (inner_mode)
+ <= HOST_BITS_PER_WIDE_INT)
+ && (STORE_FLAG_VALUE
+ & ((HOST_WIDE_INT) 1
+ << (GET_MODE_BITSIZE (inner_mode) - 1))))
+#ifdef FLOAT_STORE_FLAG_VALUE
+ || (code == LT
+ && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
+ && FLOAT_STORE_FLAG_VALUE < 0)
+#endif
+ )
+ && GET_RTX_CLASS (GET_CODE (p->exp)) == '<'))
+ {
+ x = p->exp;
+ break;
+ }
+ else if ((code == EQ
+ || (code == GE
+ && GET_MODE_CLASS (inner_mode) == MODE_INT
+ && (GET_MODE_BITSIZE (inner_mode)
+ <= HOST_BITS_PER_WIDE_INT)
+ && (STORE_FLAG_VALUE
+ & ((HOST_WIDE_INT) 1
+ << (GET_MODE_BITSIZE (inner_mode) - 1))))
+#ifdef FLOAT_STORE_FLAG_VALUE
+ || (code == GE
+ && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
+ && FLOAT_STORE_FLAG_VALUE < 0)
+#endif
+ )
+ && GET_RTX_CLASS (GET_CODE (p->exp)) == '<')
+ {
+ reverse_code = 1;
+ x = p->exp;
+ break;
+ }
+
+ /* If this is fp + constant, the equivalent is a better operand since
+ it may let us predict the value of the comparison. */
+ else if (NONZERO_BASE_PLUS_P (p->exp))
+ {
+ arg1 = p->exp;
+ continue;
+ }
+ }
+
+ /* If we didn't find a useful equivalence for ARG1, we are done.
+ Otherwise, set up for the next iteration. */
+ if (x == 0)
+ break;
+
+ arg1 = XEXP (x, 0), arg2 = XEXP (x, 1);
+ if (GET_RTX_CLASS (GET_CODE (x)) == '<')
+ code = GET_CODE (x);
+
+ if (reverse_code)
+ code = reverse_condition (code);
+ }
+
+ /* Return our results. Return the modes from before fold_rtx
+ because fold_rtx might produce const_int, and then it's too late. */
+ *pmode1 = GET_MODE (arg1), *pmode2 = GET_MODE (arg2);
+ *parg1 = fold_rtx (arg1, 0), *parg2 = fold_rtx (arg2, 0);
+
+ return code;
+}
+
+/* Try to simplify a unary operation CODE whose output mode is to be
+ MODE with input operand OP whose mode was originally OP_MODE.
+ Return zero if no simplification can be made. */
+
+rtx
+simplify_unary_operation (code, mode, op, op_mode)
+ enum rtx_code code;
+ enum machine_mode mode;
+ rtx op;
+ enum machine_mode op_mode;
+{
+ register int width = GET_MODE_BITSIZE (mode);
+
+ /* The order of these tests is critical so that, for example, we don't
+ check the wrong mode (input vs. output) for a conversion operation,
+ such as FIX. At some point, this should be simplified. */
+
+#if !defined(REAL_IS_NOT_DOUBLE) || defined(REAL_ARITHMETIC)
+
+ if (code == FLOAT && GET_MODE (op) == VOIDmode
+ && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
+ {
+ HOST_WIDE_INT hv, lv;
+ REAL_VALUE_TYPE d;
+
+ if (GET_CODE (op) == CONST_INT)
+ lv = INTVAL (op), hv = INTVAL (op) < 0 ? -1 : 0;
+ else
+ lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
+
+#ifdef REAL_ARITHMETIC
+ REAL_VALUE_FROM_INT (d, lv, hv);
+#else
+ if (hv < 0)
+ {
+ d = (double) (~ hv);
+ d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
+ * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
+ d += (double) (unsigned HOST_WIDE_INT) (~ lv);
+ d = (- d - 1.0);
+ }
+ else
+ {
+ d = (double) hv;
+ d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
+ * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
+ d += (double) (unsigned HOST_WIDE_INT) lv;
+ }
+#endif /* REAL_ARITHMETIC */
+
+ return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
+ }
+ else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
+ && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
+ {
+ HOST_WIDE_INT hv, lv;
+ REAL_VALUE_TYPE d;
+
+ if (GET_CODE (op) == CONST_INT)
+ lv = INTVAL (op), hv = INTVAL (op) < 0 ? -1 : 0;
+ else
+ lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
+
+ if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
+ ;
+ else
+ hv = 0, lv &= GET_MODE_MASK (op_mode);
+
+#ifdef REAL_ARITHMETIC
+ REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv);
+#else
+
+ d = (double) (unsigned HOST_WIDE_INT) hv;
+ d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
+ * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
+ d += (double) (unsigned HOST_WIDE_INT) lv;
+#endif /* REAL_ARITHMETIC */
+
+ return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
+ }
+#endif
+
+ if (GET_CODE (op) == CONST_INT
+ && width <= HOST_BITS_PER_WIDE_INT && width > 0)
+ {
+ register HOST_WIDE_INT arg0 = INTVAL (op);
+ register HOST_WIDE_INT val;
+
+ switch (code)
+ {
+ case NOT:
+ val = ~ arg0;
+ break;
+
+ case NEG:
+ val = - arg0;
+ break;
+
+ case ABS:
+ val = (arg0 >= 0 ? arg0 : - arg0);
+ break;
+
+ case FFS:
+ /* Don't use ffs here. Instead, get low order bit and then its
+ number. If arg0 is zero, this will return 0, as desired. */
+ arg0 &= GET_MODE_MASK (mode);
+ val = exact_log2 (arg0 & (- arg0)) + 1;
+ break;
+
+ case TRUNCATE:
+ val = arg0;
+ break;
+
+ case ZERO_EXTEND:
+ if (op_mode == VOIDmode)
+ op_mode = mode;
+ if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
+ {
+ /* If we were really extending the mode,
+ we would have to distinguish between zero-extension
+ and sign-extension. */
+ if (width != GET_MODE_BITSIZE (op_mode))
+ abort ();
+ val = arg0;
+ }
+ else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
+ val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
+ else
+ return 0;
+ break;
+
+ case SIGN_EXTEND:
+ if (op_mode == VOIDmode)
+ op_mode = mode;
+ if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
+ {
+ /* If we were really extending the mode,
+ we would have to distinguish between zero-extension
+ and sign-extension. */
+ if (width != GET_MODE_BITSIZE (op_mode))
+ abort ();
+ val = arg0;
+ }
+ else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
+ {
+ val
+ = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
+ if (val
+ & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
+ val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
+ }
+ else
+ return 0;
+ break;
+
+ case SQRT:
+ return 0;
+
+ default:
+ abort ();
+ }
+
+ /* Clear the bits that don't belong in our mode,
+ unless they and our sign bit are all one.
+ So we get either a reasonable negative value or a reasonable
+ unsigned value for this mode. */
+ if (width < HOST_BITS_PER_WIDE_INT
+ && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
+ != ((HOST_WIDE_INT) (-1) << (width - 1))))
+ val &= (1 << width) - 1;
+
+ return GEN_INT (val);
+ }
+
+ /* We can do some operations on integer CONST_DOUBLEs. Also allow
+ for a DImode operation on a CONST_INT. */
+ else if (GET_MODE (op) == VOIDmode && width == HOST_BITS_PER_INT * 2
+ && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
+ {
+ HOST_WIDE_INT l1, h1, lv, hv;
+
+ if (GET_CODE (op) == CONST_DOUBLE)
+ l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
+ else
+ l1 = INTVAL (op), h1 = l1 < 0 ? -1 : 0;
+
+ switch (code)
+ {
+ case NOT:
+ lv = ~ l1;
+ hv = ~ h1;
+ break;
+
+ case NEG:
+ neg_double (l1, h1, &lv, &hv);
+ break;
+
+ case ABS:
+ if (h1 < 0)
+ neg_double (l1, h1, &lv, &hv);
+ else
+ lv = l1, hv = h1;
+ break;
+
+ case FFS:
+ hv = 0;
+ if (l1 == 0)
+ lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & (-h1)) + 1;
+ else
+ lv = exact_log2 (l1 & (-l1)) + 1;
+ break;
+
+ case TRUNCATE:
+ if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
+ return GEN_INT (l1 & GET_MODE_MASK (mode));
+ else
+ return 0;
+ break;
+
+ case ZERO_EXTEND:
+ if (op_mode == VOIDmode
+ || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
+ return 0;
+
+ hv = 0;
+ lv = l1 & GET_MODE_MASK (op_mode);
+ break;
+
+ case SIGN_EXTEND:
+ if (op_mode == VOIDmode
+ || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
+ return 0;
+ else
+ {
+ lv = l1 & GET_MODE_MASK (op_mode);
+ if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
+ && (lv & ((HOST_WIDE_INT) 1
+ << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
+ lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
+
+ hv = (lv < 0) ? ~ (HOST_WIDE_INT) 0 : 0;
+ }
+ break;
+
+ case SQRT:
+ return 0;
+
+ default:
+ return 0;
+ }
+
+ return immed_double_const (lv, hv, mode);
+ }
+
+#if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
+ else if (GET_CODE (op) == CONST_DOUBLE
+ && GET_MODE_CLASS (mode) == MODE_FLOAT)
+ {
+ REAL_VALUE_TYPE d;
+ jmp_buf handler;
+ rtx x;
+
+ if (setjmp (handler))
+ /* There used to be a warning here, but that is inadvisable.
+ People may want to cause traps, and the natural way
+ to do it should not get a warning. */
+ return 0;
+
+ set_float_handler (handler);
+
+ REAL_VALUE_FROM_CONST_DOUBLE (d, op);
+
+ switch (code)
+ {
+ case NEG:
+ d = REAL_VALUE_NEGATE (d);
+ break;
+
+ case ABS:
+ if (REAL_VALUE_NEGATIVE (d))
+ d = REAL_VALUE_NEGATE (d);
+ break;
+
+ case FLOAT_TRUNCATE:
+ d = real_value_truncate (mode, d);
+ break;
+
+ case FLOAT_EXTEND:
+ /* All this does is change the mode. */
+ break;
+
+ case FIX:
+ d = REAL_VALUE_RNDZINT (d);
+ break;
+
+ case UNSIGNED_FIX:
+ d = REAL_VALUE_UNSIGNED_RNDZINT (d);
+ break;
+
+ case SQRT:
+ return 0;
+
+ default:
+ abort ();
+ }
+
+ x = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
+ set_float_handler (NULL_PTR);
+ return x;
+ }
+ else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE_CLASS (mode) == MODE_INT
+ && width <= HOST_BITS_PER_WIDE_INT && width > 0)
+ {
+ REAL_VALUE_TYPE d;
+ jmp_buf handler;
+ HOST_WIDE_INT val;
+
+ if (setjmp (handler))
+ return 0;
+
+ set_float_handler (handler);
+
+ REAL_VALUE_FROM_CONST_DOUBLE (d, op);
+
+ switch (code)
+ {
+ case FIX:
+ val = REAL_VALUE_FIX (d);
+ break;
+
+ case UNSIGNED_FIX:
+ val = REAL_VALUE_UNSIGNED_FIX (d);
+ break;
+
+ default:
+ abort ();
+ }
+
+ set_float_handler (NULL_PTR);
+
+ /* Clear the bits that don't belong in our mode,
+ unless they and our sign bit are all one.
+ So we get either a reasonable negative value or a reasonable
+ unsigned value for this mode. */
+ if (width < HOST_BITS_PER_WIDE_INT
+ && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
+ != ((HOST_WIDE_INT) (-1) << (width - 1))))
+ val &= ((HOST_WIDE_INT) 1 << width) - 1;
+
+ return GEN_INT (val);
+ }
+#endif
+ /* This was formerly used only for non-IEEE float.
+ eggert@twinsun.com says it is safe for IEEE also. */
+ else
+ {
+ /* There are some simplifications we can do even if the operands
+ aren't constant. */
+ switch (code)
+ {
+ case NEG:
+ case NOT:
+ /* (not (not X)) == X, similarly for NEG. */
+ if (GET_CODE (op) == code)
+ return XEXP (op, 0);
+ break;
+
+ case SIGN_EXTEND:
+ /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
+ becomes just the MINUS if its mode is MODE. This allows
+ folding switch statements on machines using casesi (such as
+ the Vax). */
+ if (GET_CODE (op) == TRUNCATE
+ && GET_MODE (XEXP (op, 0)) == mode
+ && GET_CODE (XEXP (op, 0)) == MINUS
+ && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
+ && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
+ return XEXP (op, 0);
+ break;
+ }
+
+ return 0;
+ }
+}
+
+/* Simplify a binary operation CODE with result mode MODE, operating on OP0
+ and OP1. Return 0 if no simplification is possible.
+
+ Don't use this for relational operations such as EQ or LT.
+ Use simplify_relational_operation instead. */
+
+rtx
+simplify_binary_operation (code, mode, op0, op1)
+ enum rtx_code code;
+ enum machine_mode mode;
+ rtx op0, op1;
+{
+ register HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
+ HOST_WIDE_INT val;
+ int width = GET_MODE_BITSIZE (mode);
+ rtx tem;
+
+ /* Relational operations don't work here. We must know the mode
+ of the operands in order to do the comparison correctly.
+ Assuming a full word can give incorrect results.
+ Consider comparing 128 with -128 in QImode. */
+
+ if (GET_RTX_CLASS (code) == '<')
+ abort ();
+
+#if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
+ if (GET_MODE_CLASS (mode) == MODE_FLOAT
+ && GET_CODE (op0) == CONST_DOUBLE && GET_CODE (op1) == CONST_DOUBLE
+ && mode == GET_MODE (op0) && mode == GET_MODE (op1))
+ {
+ REAL_VALUE_TYPE f0, f1, value;
+ jmp_buf handler;
+
+ if (setjmp (handler))
+ return 0;
+
+ set_float_handler (handler);
+
+ REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
+ REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
+ f0 = real_value_truncate (mode, f0);
+ f1 = real_value_truncate (mode, f1);
+
+#ifdef REAL_ARITHMETIC
+ REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
+#else
+ switch (code)
+ {
+ case PLUS:
+ value = f0 + f1;
+ break;
+ case MINUS:
+ value = f0 - f1;
+ break;
+ case MULT:
+ value = f0 * f1;
+ break;
+ case DIV:
+#ifndef REAL_INFINITY
+ if (f1 == 0)
+ return 0;
+#endif
+ value = f0 / f1;
+ break;
+ case SMIN:
+ value = MIN (f0, f1);
+ break;
+ case SMAX:
+ value = MAX (f0, f1);
+ break;
+ default:
+ abort ();
+ }
+#endif
+
+ value = real_value_truncate (mode, value);
+ set_float_handler (NULL_PTR);
+ return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
+ }
+#endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
+
+ /* We can fold some multi-word operations. */
+ if (GET_MODE_CLASS (mode) == MODE_INT
+ && width == HOST_BITS_PER_WIDE_INT * 2
+ && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
+ && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
+ {
+ HOST_WIDE_INT l1, l2, h1, h2, lv, hv;
+
+ if (GET_CODE (op0) == CONST_DOUBLE)
+ l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
+ else
+ l1 = INTVAL (op0), h1 = l1 < 0 ? -1 : 0;
+
+ if (GET_CODE (op1) == CONST_DOUBLE)
+ l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
+ else
+ l2 = INTVAL (op1), h2 = l2 < 0 ? -1 : 0;
+
+ switch (code)
+ {
+ case MINUS:
+ /* A - B == A + (-B). */
+ neg_double (l2, h2, &lv, &hv);
+ l2 = lv, h2 = hv;
+
+ /* .. fall through ... */
+
+ case PLUS:
+ add_double (l1, h1, l2, h2, &lv, &hv);
+ break;
+
+ case MULT:
+ mul_double (l1, h1, l2, h2, &lv, &hv);
+ break;
+
+ case DIV: case MOD: case UDIV: case UMOD:
+ /* We'd need to include tree.h to do this and it doesn't seem worth
+ it. */
+ return 0;
+
+ case AND:
+ lv = l1 & l2, hv = h1 & h2;
+ break;
+
+ case IOR:
+ lv = l1 | l2, hv = h1 | h2;
+ break;
+
+ case XOR:
+ lv = l1 ^ l2, hv = h1 ^ h2;
+ break;
+
+ case SMIN:
+ if (h1 < h2
+ || (h1 == h2
+ && ((unsigned HOST_WIDE_INT) l1
+ < (unsigned HOST_WIDE_INT) l2)))
+ lv = l1, hv = h1;
+ else
+ lv = l2, hv = h2;
+ break;
+
+ case SMAX:
+ if (h1 > h2
+ || (h1 == h2
+ && ((unsigned HOST_WIDE_INT) l1
+ > (unsigned HOST_WIDE_INT) l2)))
+ lv = l1, hv = h1;
+ else
+ lv = l2, hv = h2;
+ break;
+
+ case UMIN:
+ if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
+ || (h1 == h2
+ && ((unsigned HOST_WIDE_INT) l1
+ < (unsigned HOST_WIDE_INT) l2)))
+ lv = l1, hv = h1;
+ else
+ lv = l2, hv = h2;
+ break;
+
+ case UMAX:
+ if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
+ || (h1 == h2
+ && ((unsigned HOST_WIDE_INT) l1
+ > (unsigned HOST_WIDE_INT) l2)))
+ lv = l1, hv = h1;
+ else
+ lv = l2, hv = h2;
+ break;
+
+ case LSHIFTRT: case ASHIFTRT:
+ case ASHIFT:
+ case ROTATE: case ROTATERT:
+#ifdef SHIFT_COUNT_TRUNCATED
+ if (SHIFT_COUNT_TRUNCATED)
+ l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
+#endif
+
+ if (h2 != 0 || l2 < 0 || l2 >= GET_MODE_BITSIZE (mode))
+ return 0;
+
+ if (code == LSHIFTRT || code == ASHIFTRT)
+ rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
+ code == ASHIFTRT);
+ else if (code == ASHIFT)
+ lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
+ else if (code == ROTATE)
+ lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
+ else /* code == ROTATERT */
+ rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
+ break;
+
+ default:
+ return 0;
+ }
+
+ return immed_double_const (lv, hv, mode);
+ }
+
+ if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
+ || width > HOST_BITS_PER_WIDE_INT || width == 0)
+ {
+ /* Even if we can't compute a constant result,
+ there are some cases worth simplifying. */
+
+ switch (code)
+ {
+ case PLUS:
+ /* In IEEE floating point, x+0 is not the same as x. Similarly
+ for the other optimizations below. */
+ if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
+ && FLOAT_MODE_P (mode) && ! flag_fast_math)
+ break;
+
+ if (op1 == CONST0_RTX (mode))
+ return op0;
+
+ /* ((-a) + b) -> (b - a) and similarly for (a + (-b)) */
+ if (GET_CODE (op0) == NEG)
+ return cse_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
+ else if (GET_CODE (op1) == NEG)
+ return cse_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
+
+ /* Handle both-operands-constant cases. We can only add
+ CONST_INTs to constants since the sum of relocatable symbols
+ can't be handled by most assemblers. Don't add CONST_INT
+ to CONST_INT since overflow won't be computed properly if wider
+ than HOST_BITS_PER_WIDE_INT. */
+
+ if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
+ && GET_CODE (op1) == CONST_INT)
+ return plus_constant (op0, INTVAL (op1));
+ else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
+ && GET_CODE (op0) == CONST_INT)
+ return plus_constant (op1, INTVAL (op0));
+
+ /* See if this is something like X * C - X or vice versa or
+ if the multiplication is written as a shift. If so, we can
+ distribute and make a new multiply, shift, or maybe just
+ have X (if C is 2 in the example above). But don't make
+ real multiply if we didn't have one before. */
+
+ if (! FLOAT_MODE_P (mode))
+ {
+ HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
+ rtx lhs = op0, rhs = op1;
+ int had_mult = 0;
+
+ if (GET_CODE (lhs) == NEG)
+ coeff0 = -1, lhs = XEXP (lhs, 0);
+ else if (GET_CODE (lhs) == MULT
+ && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
+ {
+ coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
+ had_mult = 1;
+ }
+ else if (GET_CODE (lhs) == ASHIFT
+ && GET_CODE (XEXP (lhs, 1)) == CONST_INT
+ && INTVAL (XEXP (lhs, 1)) >= 0
+ && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
+ {
+ coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
+ lhs = XEXP (lhs, 0);
+ }
+
+ if (GET_CODE (rhs) == NEG)
+ coeff1 = -1, rhs = XEXP (rhs, 0);
+ else if (GET_CODE (rhs) == MULT
+ && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
+ {
+ coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
+ had_mult = 1;
+ }
+ else if (GET_CODE (rhs) == ASHIFT
+ && GET_CODE (XEXP (rhs, 1)) == CONST_INT
+ && INTVAL (XEXP (rhs, 1)) >= 0
+ && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
+ {
+ coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
+ rhs = XEXP (rhs, 0);
+ }
+
+ if (rtx_equal_p (lhs, rhs))
+ {
+ tem = cse_gen_binary (MULT, mode, lhs,
+ GEN_INT (coeff0 + coeff1));
+ return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
+ }
+ }
+
+ /* If one of the operands is a PLUS or a MINUS, see if we can
+ simplify this by the associative law.
+ Don't use the associative law for floating point.
+ The inaccuracy makes it nonassociative,
+ and subtle programs can break if operations are associated. */
+
+ if (INTEGRAL_MODE_P (mode)
+ && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
+ || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS)
+ && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
+ return tem;
+ break;
+
+ case COMPARE:
+#ifdef HAVE_cc0
+ /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
+ using cc0, in which case we want to leave it as a COMPARE
+ so we can distinguish it from a register-register-copy.
+
+ In IEEE floating point, x-0 is not the same as x. */
+
+ if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
+ || ! FLOAT_MODE_P (mode) || flag_fast_math)
+ && op1 == CONST0_RTX (mode))
+ return op0;
+#else
+ /* Do nothing here. */
+#endif
+ break;
+
+ case MINUS:
+ /* None of these optimizations can be done for IEEE
+ floating point. */
+ if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
+ && FLOAT_MODE_P (mode) && ! flag_fast_math)
+ break;
+
+ /* We can't assume x-x is 0 even with non-IEEE floating point,
+ but since it is zero except in very strange circumstances, we
+ will treat it as zero with -ffast-math. */
+ if (rtx_equal_p (op0, op1)
+ && ! side_effects_p (op0)
+ && (! FLOAT_MODE_P (mode) || flag_fast_math))
+ return CONST0_RTX (mode);
+
+ /* Change subtraction from zero into negation. */
+ if (op0 == CONST0_RTX (mode))
+ return gen_rtx (NEG, mode, op1);
+
+ /* (-1 - a) is ~a. */
+ if (op0 == constm1_rtx)
+ return gen_rtx (NOT, mode, op1);
+
+ /* Subtracting 0 has no effect. */
+ if (op1 == CONST0_RTX (mode))
+ return op0;
+
+ /* See if this is something like X * C - X or vice versa or
+ if the multiplication is written as a shift. If so, we can
+ distribute and make a new multiply, shift, or maybe just
+ have X (if C is 2 in the example above). But don't make
+ real multiply if we didn't have one before. */
+
+ if (! FLOAT_MODE_P (mode))
+ {
+ HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
+ rtx lhs = op0, rhs = op1;
+ int had_mult = 0;
+
+ if (GET_CODE (lhs) == NEG)
+ coeff0 = -1, lhs = XEXP (lhs, 0);
+ else if (GET_CODE (lhs) == MULT
+ && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
+ {
+ coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
+ had_mult = 1;
+ }
+ else if (GET_CODE (lhs) == ASHIFT
+ && GET_CODE (XEXP (lhs, 1)) == CONST_INT
+ && INTVAL (XEXP (lhs, 1)) >= 0
+ && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
+ {
+ coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
+ lhs = XEXP (lhs, 0);
+ }
+
+ if (GET_CODE (rhs) == NEG)
+ coeff1 = - 1, rhs = XEXP (rhs, 0);
+ else if (GET_CODE (rhs) == MULT
+ && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
+ {
+ coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
+ had_mult = 1;
+ }
+ else if (GET_CODE (rhs) == ASHIFT
+ && GET_CODE (XEXP (rhs, 1)) == CONST_INT
+ && INTVAL (XEXP (rhs, 1)) >= 0
+ && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
+ {
+ coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
+ rhs = XEXP (rhs, 0);
+ }
+
+ if (rtx_equal_p (lhs, rhs))
+ {
+ tem = cse_gen_binary (MULT, mode, lhs,
+ GEN_INT (coeff0 - coeff1));
+ return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
+ }
+ }
+
+ /* (a - (-b)) -> (a + b). */
+ if (GET_CODE (op1) == NEG)
+ return cse_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
+
+ /* If one of the operands is a PLUS or a MINUS, see if we can
+ simplify this by the associative law.
+ Don't use the associative law for floating point.
+ The inaccuracy makes it nonassociative,
+ and subtle programs can break if operations are associated. */
+
+ if (INTEGRAL_MODE_P (mode)
+ && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
+ || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS)
+ && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
+ return tem;
+
+ /* Don't let a relocatable value get a negative coeff. */
+ if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
+ return plus_constant (op0, - INTVAL (op1));
+ break;
+
+ case MULT:
+ if (op1 == constm1_rtx)
+ {
+ tem = simplify_unary_operation (NEG, mode, op0, mode);
+
+ return tem ? tem : gen_rtx (NEG, mode, op0);
+ }
+
+ /* In IEEE floating point, x*0 is not always 0. */
+ if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
+ || ! FLOAT_MODE_P (mode) || flag_fast_math)
+ && op1 == CONST0_RTX (mode)
+ && ! side_effects_p (op0))
+ return op1;
+
+ /* In IEEE floating point, x*1 is not equivalent to x for nans.
+ However, ANSI says we can drop signals,
+ so we can do this anyway. */
+ if (op1 == CONST1_RTX (mode))
+ return op0;
+
+ /* Convert multiply by constant power of two into shift unless
+ we are still generating RTL. This test is a kludge. */
+ if (GET_CODE (op1) == CONST_INT
+ && (val = exact_log2 (INTVAL (op1))) >= 0
+ && ! rtx_equal_function_value_matters)
+ return gen_rtx (ASHIFT, mode, op0, GEN_INT (val));
+
+ if (GET_CODE (op1) == CONST_DOUBLE
+ && GET_MODE_CLASS (GET_MODE (op1)) == MODE_FLOAT)
+ {
+ REAL_VALUE_TYPE d;
+ jmp_buf handler;
+ int op1is2, op1ism1;
+
+ if (setjmp (handler))
+ return 0;
+
+ set_float_handler (handler);
+ REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
+ op1is2 = REAL_VALUES_EQUAL (d, dconst2);
+ op1ism1 = REAL_VALUES_EQUAL (d, dconstm1);
+ set_float_handler (NULL_PTR);
+
+ /* x*2 is x+x and x*(-1) is -x */
+ if (op1is2 && GET_MODE (op0) == mode)
+ return gen_rtx (PLUS, mode, op0, copy_rtx (op0));
+
+ else if (op1ism1 && GET_MODE (op0) == mode)
+ return gen_rtx (NEG, mode, op0);
+ }
+ break;
+
+ case IOR:
+ if (op1 == const0_rtx)
+ return op0;
+ if (GET_CODE (op1) == CONST_INT
+ && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
+ return op1;
+ if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
+ return op0;
+ /* A | (~A) -> -1 */
+ if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
+ || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
+ && ! side_effects_p (op0)
+ && GET_MODE_CLASS (mode) != MODE_CC)
+ return constm1_rtx;
+ break;
+
+ case XOR:
+ if (op1 == const0_rtx)
+ return op0;
+ if (GET_CODE (op1) == CONST_INT
+ && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
+ return gen_rtx (NOT, mode, op0);
+ if (op0 == op1 && ! side_effects_p (op0)
+ && GET_MODE_CLASS (mode) != MODE_CC)
+ return const0_rtx;
+ break;
+
+ case AND:
+ if (op1 == const0_rtx && ! side_effects_p (op0))
+ return const0_rtx;
+ if (GET_CODE (op1) == CONST_INT
+ && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
+ return op0;
+ if (op0 == op1 && ! side_effects_p (op0)
+ && GET_MODE_CLASS (mode) != MODE_CC)
+ return op0;
+ /* A & (~A) -> 0 */
+ if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
+ || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
+ && ! side_effects_p (op0)
+ && GET_MODE_CLASS (mode) != MODE_CC)
+ return const0_rtx;
+ break;
+
+ case UDIV:
+ /* Convert divide by power of two into shift (divide by 1 handled
+ below). */
+ if (GET_CODE (op1) == CONST_INT
+ && (arg1 = exact_log2 (INTVAL (op1))) > 0)
+ return gen_rtx (LSHIFTRT, mode, op0, GEN_INT (arg1));
+
+ /* ... fall through ... */
+
+ case DIV:
+ if (op1 == CONST1_RTX (mode))
+ return op0;
+
+ /* In IEEE floating point, 0/x is not always 0. */
+ if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
+ || ! FLOAT_MODE_P (mode) || flag_fast_math)
+ && op0 == CONST0_RTX (mode)
+ && ! side_effects_p (op1))
+ return op0;
+
+#if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
+ /* Change division by a constant into multiplication. Only do
+ this with -ffast-math until an expert says it is safe in
+ general. */
+ else if (GET_CODE (op1) == CONST_DOUBLE
+ && GET_MODE_CLASS (GET_MODE (op1)) == MODE_FLOAT
+ && op1 != CONST0_RTX (mode)
+ && flag_fast_math)
+ {
+ REAL_VALUE_TYPE d;
+ REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
+
+ if (! REAL_VALUES_EQUAL (d, dconst0))
+ {
+#if defined (REAL_ARITHMETIC)
+ REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
+ return gen_rtx (MULT, mode, op0,
+ CONST_DOUBLE_FROM_REAL_VALUE (d, mode));
+#else
+ return gen_rtx (MULT, mode, op0,
+ CONST_DOUBLE_FROM_REAL_VALUE (1./d, mode));
+#endif
+ }
+ }
+#endif
+ break;
+
+ case UMOD:
+ /* Handle modulus by power of two (mod with 1 handled below). */
+ if (GET_CODE (op1) == CONST_INT
+ && exact_log2 (INTVAL (op1)) > 0)
+ return gen_rtx (AND, mode, op0, GEN_INT (INTVAL (op1) - 1));
+
+ /* ... fall through ... */
+
+ case MOD:
+ if ((op0 == const0_rtx || op1 == const1_rtx)
+ && ! side_effects_p (op0) && ! side_effects_p (op1))
+ return const0_rtx;
+ break;
+
+ case ROTATERT:
+ case ROTATE:
+ /* Rotating ~0 always results in ~0. */
+ if (GET_CODE (op0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
+ && INTVAL (op0) == GET_MODE_MASK (mode)
+ && ! side_effects_p (op1))
+ return op0;
+
+ /* ... fall through ... */
+
+ case ASHIFT:
+ case ASHIFTRT:
+ case LSHIFTRT:
+ if (op1 == const0_rtx)
+ return op0;
+ if (op0 == const0_rtx && ! side_effects_p (op1))
+ return op0;
+ break;
+
+ case SMIN:
+ if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (op1) == CONST_INT
+ && INTVAL (op1) == (HOST_WIDE_INT) 1 << (width -1)
+ && ! side_effects_p (op0))
+ return op1;
+ else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
+ return op0;
+ break;
+
+ case SMAX:
+ if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (op1) == CONST_INT
+ && (INTVAL (op1)
+ == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
+ && ! side_effects_p (op0))
+ return op1;
+ else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
+ return op0;
+ break;
+
+ case UMIN:
+ if (op1 == const0_rtx && ! side_effects_p (op0))
+ return op1;
+ else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
+ return op0;
+ break;
+
+ case UMAX:
+ if (op1 == constm1_rtx && ! side_effects_p (op0))
+ return op1;
+ else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
+ return op0;
+ break;
+
+ default:
+ abort ();
+ }
+
+ return 0;
+ }
+
+ /* Get the integer argument values in two forms:
+ zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
+
+ arg0 = INTVAL (op0);
+ arg1 = INTVAL (op1);
+
+ if (width < HOST_BITS_PER_WIDE_INT)
+ {
+ arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
+ arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
+
+ arg0s = arg0;
+ if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
+ arg0s |= ((HOST_WIDE_INT) (-1) << width);
+
+ arg1s = arg1;
+ if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
+ arg1s |= ((HOST_WIDE_INT) (-1) << width);
+ }
+ else
+ {
+ arg0s = arg0;
+ arg1s = arg1;
+ }
+
+ /* Compute the value of the arithmetic. */
+
+ switch (code)
+ {
+ case PLUS:
+ val = arg0s + arg1s;
+ break;
+
+ case MINUS:
+ val = arg0s - arg1s;
+ break;
+
+ case MULT:
+ val = arg0s * arg1s;
+ break;
+
+ case DIV:
+ if (arg1s == 0)
+ return 0;
+ val = arg0s / arg1s;
+ break;
+
+ case MOD:
+ if (arg1s == 0)
+ return 0;
+ val = arg0s % arg1s;
+ break;
+
+ case UDIV:
+ if (arg1 == 0)
+ return 0;
+ val = (unsigned HOST_WIDE_INT) arg0 / arg1;
+ break;
+
+ case UMOD:
+ if (arg1 == 0)
+ return 0;
+ val = (unsigned HOST_WIDE_INT) arg0 % arg1;
+ break;
+
+ case AND:
+ val = arg0 & arg1;
+ break;
+
+ case IOR:
+ val = arg0 | arg1;
+ break;
+
+ case XOR:
+ val = arg0 ^ arg1;
+ break;
+
+ case LSHIFTRT:
+ /* If shift count is undefined, don't fold it; let the machine do
+ what it wants. But truncate it if the machine will do that. */
+ if (arg1 < 0)
+ return 0;
+
+#ifdef SHIFT_COUNT_TRUNCATED
+ if (SHIFT_COUNT_TRUNCATED)
+ arg1 %= width;
+#endif
+
+ val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
+ break;
+
+ case ASHIFT:
+ if (arg1 < 0)
+ return 0;
+
+#ifdef SHIFT_COUNT_TRUNCATED
+ if (SHIFT_COUNT_TRUNCATED)
+ arg1 %= width;
+#endif
+
+ val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
+ break;
+
+ case ASHIFTRT:
+ if (arg1 < 0)
+ return 0;
+
+#ifdef SHIFT_COUNT_TRUNCATED
+ if (SHIFT_COUNT_TRUNCATED)
+ arg1 %= width;
+#endif
+
+ val = arg0s >> arg1;
+
+ /* Bootstrap compiler may not have sign extended the right shift.
+ Manually extend the sign to insure bootstrap cc matches gcc. */
+ if (arg0s < 0 && arg1 > 0)
+ val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
+
+ break;
+
+ case ROTATERT:
+ if (arg1 < 0)
+ return 0;
+
+ arg1 %= width;
+ val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
+ | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
+ break;
+
+ case ROTATE:
+ if (arg1 < 0)
+ return 0;
+
+ arg1 %= width;
+ val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
+ | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
+ break;
+
+ case COMPARE:
+ /* Do nothing here. */
+ return 0;
+
+ case SMIN:
+ val = arg0s <= arg1s ? arg0s : arg1s;
+ break;
+
+ case UMIN:
+ val = ((unsigned HOST_WIDE_INT) arg0
+ <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
+ break;
+
+ case SMAX:
+ val = arg0s > arg1s ? arg0s : arg1s;
+ break;
+
+ case UMAX:
+ val = ((unsigned HOST_WIDE_INT) arg0
+ > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
+ break;
+
+ default:
+ abort ();
+ }
+
+ /* Clear the bits that don't belong in our mode, unless they and our sign
+ bit are all one. So we get either a reasonable negative value or a
+ reasonable unsigned value for this mode. */
+ if (width < HOST_BITS_PER_WIDE_INT
+ && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
+ != ((HOST_WIDE_INT) (-1) << (width - 1))))
+ val &= ((HOST_WIDE_INT) 1 << width) - 1;
+
+ return GEN_INT (val);
+}
+
+/* Simplify a PLUS or MINUS, at least one of whose operands may be another
+ PLUS or MINUS.
+
+ Rather than test for specific case, we do this by a brute-force method
+ and do all possible simplifications until no more changes occur. Then
+ we rebuild the operation. */
+
+static rtx
+simplify_plus_minus (code, mode, op0, op1)
+ enum rtx_code code;
+ enum machine_mode mode;
+ rtx op0, op1;
+{
+ rtx ops[8];
+ int negs[8];
+ rtx result, tem;
+ int n_ops = 2, input_ops = 2, input_consts = 0, n_consts = 0;
+ int first = 1, negate = 0, changed;
+ int i, j;
+
+ bzero ((char *) ops, sizeof ops);
+
+ /* Set up the two operands and then expand them until nothing has been
+ changed. If we run out of room in our array, give up; this should
+ almost never happen. */
+
+ ops[0] = op0, ops[1] = op1, negs[0] = 0, negs[1] = (code == MINUS);
+
+ changed = 1;
+ while (changed)
+ {
+ changed = 0;
+
+ for (i = 0; i < n_ops; i++)
+ switch (GET_CODE (ops[i]))
+ {
+ case PLUS:
+ case MINUS:
+ if (n_ops == 7)
+ return 0;
+
+ ops[n_ops] = XEXP (ops[i], 1);
+ negs[n_ops++] = GET_CODE (ops[i]) == MINUS ? !negs[i] : negs[i];
+ ops[i] = XEXP (ops[i], 0);
+ input_ops++;
+ changed = 1;
+ break;
+
+ case NEG:
+ ops[i] = XEXP (ops[i], 0);
+ negs[i] = ! negs[i];
+ changed = 1;
+ break;
+
+ case CONST:
+ ops[i] = XEXP (ops[i], 0);
+ input_consts++;
+ changed = 1;
+ break;
+
+ case NOT:
+ /* ~a -> (-a - 1) */
+ if (n_ops != 7)
+ {
+ ops[n_ops] = constm1_rtx;
+ negs[n_ops++] = negs[i];
+ ops[i] = XEXP (ops[i], 0);
+ negs[i] = ! negs[i];
+ changed = 1;
+ }
+ break;
+
+ case CONST_INT:
+ if (negs[i])
+ ops[i] = GEN_INT (- INTVAL (ops[i])), negs[i] = 0, changed = 1;
+ break;
+ }
+ }
+
+ /* If we only have two operands, we can't do anything. */
+ if (n_ops <= 2)
+ return 0;
+
+ /* Now simplify each pair of operands until nothing changes. The first
+ time through just simplify constants against each other. */
+
+ changed = 1;
+ while (changed)
+ {
+ changed = first;
+
+ for (i = 0; i < n_ops - 1; i++)
+ for (j = i + 1; j < n_ops; j++)
+ if (ops[i] != 0 && ops[j] != 0
+ && (! first || (CONSTANT_P (ops[i]) && CONSTANT_P (ops[j]))))
+ {
+ rtx lhs = ops[i], rhs = ops[j];
+ enum rtx_code ncode = PLUS;
+
+ if (negs[i] && ! negs[j])
+ lhs = ops[j], rhs = ops[i], ncode = MINUS;
+ else if (! negs[i] && negs[j])
+ ncode = MINUS;
+
+ tem = simplify_binary_operation (ncode, mode, lhs, rhs);
+ if (tem)
+ {
+ ops[i] = tem, ops[j] = 0;
+ negs[i] = negs[i] && negs[j];
+ if (GET_CODE (tem) == NEG)
+ ops[i] = XEXP (tem, 0), negs[i] = ! negs[i];
+
+ if (GET_CODE (ops[i]) == CONST_INT && negs[i])
+ ops[i] = GEN_INT (- INTVAL (ops[i])), negs[i] = 0;
+ changed = 1;
+ }
+ }
+
+ first = 0;
+ }
+
+ /* Pack all the operands to the lower-numbered entries and give up if
+ we didn't reduce the number of operands we had. Make sure we
+ count a CONST as two operands. If we have the same number of
+ operands, but have made more CONSTs than we had, this is also
+ an improvement, so accept it. */
+
+ for (i = 0, j = 0; j < n_ops; j++)
+ if (ops[j] != 0)
+ {
+ ops[i] = ops[j], negs[i++] = negs[j];
+ if (GET_CODE (ops[j]) == CONST)
+ n_consts++;
+ }
+
+ if (i + n_consts > input_ops
+ || (i + n_consts == input_ops && n_consts <= input_consts))
+ return 0;
+
+ n_ops = i;
+
+ /* If we have a CONST_INT, put it last. */
+ for (i = 0; i < n_ops - 1; i++)
+ if (GET_CODE (ops[i]) == CONST_INT)
+ {
+ tem = ops[n_ops - 1], ops[n_ops - 1] = ops[i] , ops[i] = tem;
+ j = negs[n_ops - 1], negs[n_ops - 1] = negs[i], negs[i] = j;
+ }
+
+ /* Put a non-negated operand first. If there aren't any, make all
+ operands positive and negate the whole thing later. */
+ for (i = 0; i < n_ops && negs[i]; i++)
+ ;
+
+ if (i == n_ops)
+ {
+ for (i = 0; i < n_ops; i++)
+ negs[i] = 0;
+ negate = 1;
+ }
+ else if (i != 0)
+ {
+ tem = ops[0], ops[0] = ops[i], ops[i] = tem;
+ j = negs[0], negs[0] = negs[i], negs[i] = j;
+ }
+
+ /* Now make the result by performing the requested operations. */
+ result = ops[0];
+ for (i = 1; i < n_ops; i++)
+ result = cse_gen_binary (negs[i] ? MINUS : PLUS, mode, result, ops[i]);
+
+ return negate ? gen_rtx (NEG, mode, result) : result;
+}
+
+/* Make a binary operation by properly ordering the operands and
+ seeing if the expression folds. */
+
+static rtx
+cse_gen_binary (code, mode, op0, op1)
+ enum rtx_code code;
+ enum machine_mode mode;
+ rtx op0, op1;
+{
+ rtx tem;
+
+ /* Put complex operands first and constants second if commutative. */
+ if (GET_RTX_CLASS (code) == 'c'
+ && ((CONSTANT_P (op0) && GET_CODE (op1) != CONST_INT)
+ || (GET_RTX_CLASS (GET_CODE (op0)) == 'o'
+ && GET_RTX_CLASS (GET_CODE (op1)) != 'o')
+ || (GET_CODE (op0) == SUBREG
+ && GET_RTX_CLASS (GET_CODE (SUBREG_REG (op0))) == 'o'
+ && GET_RTX_CLASS (GET_CODE (op1)) != 'o')))
+ tem = op0, op0 = op1, op1 = tem;
+
+ /* If this simplifies, do it. */
+ tem = simplify_binary_operation (code, mode, op0, op1);
+
+ if (tem)
+ return tem;
+
+ /* Handle addition and subtraction of CONST_INT specially. Otherwise,
+ just form the operation. */
+
+ if (code == PLUS && GET_CODE (op1) == CONST_INT
+ && GET_MODE (op0) != VOIDmode)
+ return plus_constant (op0, INTVAL (op1));
+ else if (code == MINUS && GET_CODE (op1) == CONST_INT
+ && GET_MODE (op0) != VOIDmode)
+ return plus_constant (op0, - INTVAL (op1));
+ else
+ return gen_rtx (code, mode, op0, op1);
+}
+
+/* Like simplify_binary_operation except used for relational operators.
+ MODE is the mode of the operands, not that of the result. If MODE
+ is VOIDmode, both operands must also be VOIDmode and we compare the
+ operands in "infinite precision".
+
+ If no simplification is possible, this function returns zero. Otherwise,
+ it returns either const_true_rtx or const0_rtx. */
+
+rtx
+simplify_relational_operation (code, mode, op0, op1)
+ enum rtx_code code;
+ enum machine_mode mode;
+ rtx op0, op1;
+{
+ int equal, op0lt, op0ltu, op1lt, op1ltu;
+ rtx tem;
+
+ /* If op0 is a compare, extract the comparison arguments from it. */
+ if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
+ op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
+
+ /* We can't simplify MODE_CC values since we don't know what the
+ actual comparison is. */
+ if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC
+#ifdef HAVE_cc0
+ || op0 == cc0_rtx
+#endif
+ )
+ return 0;
+
+ /* For integer comparisons of A and B maybe we can simplify A - B and can
+ then simplify a comparison of that with zero. If A and B are both either
+ a register or a CONST_INT, this can't help; testing for these cases will
+ prevent infinite recursion here and speed things up.
+
+ If CODE is an unsigned comparison, we can only do this if A - B is a
+ constant integer, and then we have to compare that integer with zero as a
+ signed comparison. Note that this will give the incorrect result from
+ comparisons that overflow. Since these are undefined, this is probably
+ OK. If it causes a problem, we can check for A or B being an address
+ (fp + const or SYMBOL_REF) and only do it in that case. */
+
+ if (INTEGRAL_MODE_P (mode) && op1 != const0_rtx
+ && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == CONST_INT)
+ && (GET_CODE (op1) == REG || GET_CODE (op1) == CONST_INT))
+ && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
+ && (GET_CODE (tem) == CONST_INT
+ || (code != GTU && code != GEU &&
+ code != LTU && code != LEU)))
+ return simplify_relational_operation (signed_condition (code),
+ mode, tem, const0_rtx);
+
+ /* For non-IEEE floating-point, if the two operands are equal, we know the
+ result. */
+ if (rtx_equal_p (op0, op1)
+ && (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
+ || ! FLOAT_MODE_P (GET_MODE (op0)) || flag_fast_math))
+ equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
+
+ /* If the operands are floating-point constants, see if we can fold
+ the result. */
+#if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
+ else if (GET_CODE (op0) == CONST_DOUBLE && GET_CODE (op1) == CONST_DOUBLE
+ && GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
+ {
+ REAL_VALUE_TYPE d0, d1;
+ jmp_buf handler;
+
+ if (setjmp (handler))
+ return 0;
+
+ set_float_handler (handler);
+ REAL_VALUE_FROM_CONST_DOUBLE (d0, op0);
+ REAL_VALUE_FROM_CONST_DOUBLE (d1, op1);
+ equal = REAL_VALUES_EQUAL (d0, d1);
+ op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
+ op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
+ set_float_handler (NULL_PTR);
+ }
+#endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
+
+ /* Otherwise, see if the operands are both integers. */
+ else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
+ && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
+ && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
+ {
+ int width = GET_MODE_BITSIZE (mode);
+ HOST_WIDE_INT l0s, h0s, l1s, h1s;
+ unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
+
+ /* Get the two words comprising each integer constant. */
+ if (GET_CODE (op0) == CONST_DOUBLE)
+ {
+ l0u = l0s = CONST_DOUBLE_LOW (op0);
+ h0u = h0s = CONST_DOUBLE_HIGH (op0);
+ }
+ else
+ {
+ l0u = l0s = INTVAL (op0);
+ h0u = 0, h0s = l0s < 0 ? -1 : 0;
+ }
+
+ if (GET_CODE (op1) == CONST_DOUBLE)
+ {
+ l1u = l1s = CONST_DOUBLE_LOW (op1);
+ h1u = h1s = CONST_DOUBLE_HIGH (op1);
+ }
+ else
+ {
+ l1u = l1s = INTVAL (op1);
+ h1u = 0, h1s = l1s < 0 ? -1 : 0;
+ }
+
+ /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
+ we have to sign or zero-extend the values. */
+ if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
+ h0u = h1u = 0, h0s = l0s < 0 ? -1 : 0, h1s = l1s < 0 ? -1 : 0;
+
+ if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
+ {
+ l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
+ l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
+
+ if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
+ l0s |= ((HOST_WIDE_INT) (-1) << width);
+
+ if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
+ l1s |= ((HOST_WIDE_INT) (-1) << width);
+ }
+
+ equal = (h0u == h1u && l0u == l1u);
+ op0lt = (h0s < h1s || (h0s == h1s && l0s < l1s));
+ op1lt = (h1s < h0s || (h1s == h0s && l1s < l0s));
+ op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
+ op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
+ }
+
+ /* Otherwise, there are some code-specific tests we can make. */
+ else
+ {
+ switch (code)
+ {
+ case EQ:
+ /* References to the frame plus a constant or labels cannot
+ be zero, but a SYMBOL_REF can due to #pragma weak. */
+ if (((NONZERO_BASE_PLUS_P (op0) && op1 == const0_rtx)
+ || GET_CODE (op0) == LABEL_REF)
+#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
+ /* On some machines, the ap reg can be 0 sometimes. */
+ && op0 != arg_pointer_rtx
+#endif
+ )
+ return const0_rtx;
+ break;
+
+ case NE:
+ if (((NONZERO_BASE_PLUS_P (op0) && op1 == const0_rtx)
+ || GET_CODE (op0) == LABEL_REF)
+#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
+ && op0 != arg_pointer_rtx
+#endif
+ )
+ return const_true_rtx;
+ break;
+
+ case GEU:
+ /* Unsigned values are never negative. */
+ if (op1 == const0_rtx)
+ return const_true_rtx;
+ break;
+
+ case LTU:
+ if (op1 == const0_rtx)
+ return const0_rtx;
+ break;
+
+ case LEU:
+ /* Unsigned values are never greater than the largest
+ unsigned value. */
+ if (GET_CODE (op1) == CONST_INT
+ && INTVAL (op1) == GET_MODE_MASK (mode)
+ && INTEGRAL_MODE_P (mode))
+ return const_true_rtx;
+ break;
+
+ case GTU:
+ if (GET_CODE (op1) == CONST_INT
+ && INTVAL (op1) == GET_MODE_MASK (mode)
+ && INTEGRAL_MODE_P (mode))
+ return const0_rtx;
+ break;
+ }
+
+ return 0;
+ }
+
+ /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
+ as appropriate. */
+ switch (code)
+ {
+ case EQ:
+ return equal ? const_true_rtx : const0_rtx;
+ case NE:
+ return ! equal ? const_true_rtx : const0_rtx;
+ case LT:
+ return op0lt ? const_true_rtx : const0_rtx;
+ case GT:
+ return op1lt ? const_true_rtx : const0_rtx;
+ case LTU:
+ return op0ltu ? const_true_rtx : const0_rtx;
+ case GTU:
+ return op1ltu ? const_true_rtx : const0_rtx;
+ case LE:
+ return equal || op0lt ? const_true_rtx : const0_rtx;
+ case GE:
+ return equal || op1lt ? const_true_rtx : const0_rtx;
+ case LEU:
+ return equal || op0ltu ? const_true_rtx : const0_rtx;
+ case GEU:
+ return equal || op1ltu ? const_true_rtx : const0_rtx;
+ }
+
+ abort ();
+}
+
+/* Simplify CODE, an operation with result mode MODE and three operands,
+ OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
+ a constant. Return 0 if no simplifications is possible. */
+
+rtx
+simplify_ternary_operation (code, mode, op0_mode, op0, op1, op2)
+ enum rtx_code code;
+ enum machine_mode mode, op0_mode;
+ rtx op0, op1, op2;
+{
+ int width = GET_MODE_BITSIZE (mode);
+
+ /* VOIDmode means "infinite" precision. */
+ if (width == 0)
+ width = HOST_BITS_PER_WIDE_INT;
+
+ switch (code)
+ {
+ case SIGN_EXTRACT:
+ case ZERO_EXTRACT:
+ if (GET_CODE (op0) == CONST_INT
+ && GET_CODE (op1) == CONST_INT
+ && GET_CODE (op2) == CONST_INT
+ && INTVAL (op1) + INTVAL (op2) <= GET_MODE_BITSIZE (op0_mode)
+ && width <= HOST_BITS_PER_WIDE_INT)
+ {
+ /* Extracting a bit-field from a constant */
+ HOST_WIDE_INT val = INTVAL (op0);
+
+#if BITS_BIG_ENDIAN
+ val >>= (GET_MODE_BITSIZE (op0_mode) - INTVAL (op2) - INTVAL (op1));
+#else
+ val >>= INTVAL (op2);
+#endif
+ if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
+ {
+ /* First zero-extend. */
+ val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
+ /* If desired, propagate sign bit. */
+ if (code == SIGN_EXTRACT
+ && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
+ val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
+ }
+
+ /* Clear the bits that don't belong in our mode,
+ unless they and our sign bit are all one.
+ So we get either a reasonable negative value or a reasonable
+ unsigned value for this mode. */
+ if (width < HOST_BITS_PER_WIDE_INT
+ && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
+ != ((HOST_WIDE_INT) (-1) << (width - 1))))
+ val &= ((HOST_WIDE_INT) 1 << width) - 1;
+
+ return GEN_INT (val);
+ }
+ break;
+
+ case IF_THEN_ELSE:
+ if (GET_CODE (op0) == CONST_INT)
+ return op0 != const0_rtx ? op1 : op2;
+ break;
+
+ default:
+ abort ();
+ }
+
+ return 0;
+}
+
+/* If X is a nontrivial arithmetic operation on an argument
+ for which a constant value can be determined, return
+ the result of operating on that value, as a constant.
+ Otherwise, return X, possibly with one or more operands
+ modified by recursive calls to this function.
+
+ If X is a register whose contents are known, we do NOT
+ return those contents here. equiv_constant is called to
+ perform that task.
+
+ INSN is the insn that we may be modifying. If it is 0, make a copy
+ of X before modifying it. */
+
+static rtx
+fold_rtx (x, insn)
+ rtx x;
+ rtx insn;
+{
+ register enum rtx_code code;
+ register enum machine_mode mode;
+ register char *fmt;
+ register int i;
+ rtx new = 0;
+ int copied = 0;
+ int must_swap = 0;
+
+ /* Folded equivalents of first two operands of X. */
+ rtx folded_arg0;
+ rtx folded_arg1;
+
+ /* Constant equivalents of first three operands of X;
+ 0 when no such equivalent is known. */
+ rtx const_arg0;
+ rtx const_arg1;
+ rtx const_arg2;
+
+ /* The mode of the first operand of X. We need this for sign and zero
+ extends. */
+ enum machine_mode mode_arg0;
+
+ if (x == 0)
+ return x;
+
+ mode = GET_MODE (x);
+ code = GET_CODE (x);
+ switch (code)
+ {
+ case CONST:
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ case REG:
+ /* No use simplifying an EXPR_LIST
+ since they are used only for lists of args
+ in a function call's REG_EQUAL note. */
+ case EXPR_LIST:
+ return x;
+
+#ifdef HAVE_cc0
+ case CC0:
+ return prev_insn_cc0;
+#endif
+
+ case PC:
+ /* If the next insn is a CODE_LABEL followed by a jump table,
+ PC's value is a LABEL_REF pointing to that label. That
+ lets us fold switch statements on the Vax. */
+ if (insn && GET_CODE (insn) == JUMP_INSN)
+ {
+ rtx next = next_nonnote_insn (insn);
+
+ if (next && GET_CODE (next) == CODE_LABEL
+ && NEXT_INSN (next) != 0
+ && GET_CODE (NEXT_INSN (next)) == JUMP_INSN
+ && (GET_CODE (PATTERN (NEXT_INSN (next))) == ADDR_VEC
+ || GET_CODE (PATTERN (NEXT_INSN (next))) == ADDR_DIFF_VEC))
+ return gen_rtx (LABEL_REF, Pmode, next);
+ }
+ break;
+
+ case SUBREG:
+ /* See if we previously assigned a constant value to this SUBREG. */
+ if ((new = lookup_as_function (x, CONST_INT)) != 0
+ || (new = lookup_as_function (x, CONST_DOUBLE)) != 0)
+ return new;
+
+ /* If this is a paradoxical SUBREG, we have no idea what value the
+ extra bits would have. However, if the operand is equivalent
+ to a SUBREG whose operand is the same as our mode, and all the
+ modes are within a word, we can just use the inner operand
+ because these SUBREGs just say how to treat the register.
+
+ Similarly if we find an integer constant. */
+
+ if (GET_MODE_SIZE (mode) > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
+ {
+ enum machine_mode imode = GET_MODE (SUBREG_REG (x));
+ struct table_elt *elt;
+
+ if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
+ && GET_MODE_SIZE (imode) <= UNITS_PER_WORD
+ && (elt = lookup (SUBREG_REG (x), HASH (SUBREG_REG (x), imode),
+ imode)) != 0)
+ for (elt = elt->first_same_value;
+ elt; elt = elt->next_same_value)
+ {
+ if (CONSTANT_P (elt->exp)
+ && GET_MODE (elt->exp) == VOIDmode)
+ return elt->exp;
+
+ if (GET_CODE (elt->exp) == SUBREG
+ && GET_MODE (SUBREG_REG (elt->exp)) == mode
+ && exp_equiv_p (elt->exp, elt->exp, 1, 0))
+ return copy_rtx (SUBREG_REG (elt->exp));
+ }
+
+ return x;
+ }
+
+ /* Fold SUBREG_REG. If it changed, see if we can simplify the SUBREG.
+ We might be able to if the SUBREG is extracting a single word in an
+ integral mode or extracting the low part. */
+
+ folded_arg0 = fold_rtx (SUBREG_REG (x), insn);
+ const_arg0 = equiv_constant (folded_arg0);
+ if (const_arg0)
+ folded_arg0 = const_arg0;
+
+ if (folded_arg0 != SUBREG_REG (x))
+ {
+ new = 0;
+
+ if (GET_MODE_CLASS (mode) == MODE_INT
+ && GET_MODE_SIZE (mode) == UNITS_PER_WORD
+ && GET_MODE (SUBREG_REG (x)) != VOIDmode)
+ new = operand_subword (folded_arg0, SUBREG_WORD (x), 0,
+ GET_MODE (SUBREG_REG (x)));
+ if (new == 0 && subreg_lowpart_p (x))
+ new = gen_lowpart_if_possible (mode, folded_arg0);
+ if (new)
+ return new;
+ }
+
+ /* If this is a narrowing SUBREG and our operand is a REG, see if
+ we can find an equivalence for REG that is an arithmetic operation
+ in a wider mode where both operands are paradoxical SUBREGs
+ from objects of our result mode. In that case, we couldn't report
+ an equivalent value for that operation, since we don't know what the
+ extra bits will be. But we can find an equivalence for this SUBREG
+ by folding that operation is the narrow mode. This allows us to
+ fold arithmetic in narrow modes when the machine only supports
+ word-sized arithmetic.
+
+ Also look for a case where we have a SUBREG whose operand is the
+ same as our result. If both modes are smaller than a word, we
+ are simply interpreting a register in different modes and we
+ can use the inner value. */
+
+ if (GET_CODE (folded_arg0) == REG
+ && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (folded_arg0))
+ && subreg_lowpart_p (x))
+ {
+ struct table_elt *elt;
+
+ /* We can use HASH here since we know that canon_hash won't be
+ called. */
+ elt = lookup (folded_arg0,
+ HASH (folded_arg0, GET_MODE (folded_arg0)),
+ GET_MODE (folded_arg0));
+
+ if (elt)
+ elt = elt->first_same_value;
+
+ for (; elt; elt = elt->next_same_value)
+ {
+ enum rtx_code eltcode = GET_CODE (elt->exp);
+
+ /* Just check for unary and binary operations. */
+ if (GET_RTX_CLASS (GET_CODE (elt->exp)) == '1'
+ && GET_CODE (elt->exp) != SIGN_EXTEND
+ && GET_CODE (elt->exp) != ZERO_EXTEND
+ && GET_CODE (XEXP (elt->exp, 0)) == SUBREG
+ && GET_MODE (SUBREG_REG (XEXP (elt->exp, 0))) == mode)
+ {
+ rtx op0 = SUBREG_REG (XEXP (elt->exp, 0));
+
+ if (GET_CODE (op0) != REG && ! CONSTANT_P (op0))
+ op0 = fold_rtx (op0, NULL_RTX);
+
+ op0 = equiv_constant (op0);
+ if (op0)
+ new = simplify_unary_operation (GET_CODE (elt->exp), mode,
+ op0, mode);
+ }
+ else if ((GET_RTX_CLASS (GET_CODE (elt->exp)) == '2'
+ || GET_RTX_CLASS (GET_CODE (elt->exp)) == 'c')
+ && eltcode != DIV && eltcode != MOD
+ && eltcode != UDIV && eltcode != UMOD
+ && eltcode != ASHIFTRT && eltcode != LSHIFTRT
+ && eltcode != ROTATE && eltcode != ROTATERT
+ && ((GET_CODE (XEXP (elt->exp, 0)) == SUBREG
+ && (GET_MODE (SUBREG_REG (XEXP (elt->exp, 0)))
+ == mode))
+ || CONSTANT_P (XEXP (elt->exp, 0)))
+ && ((GET_CODE (XEXP (elt->exp, 1)) == SUBREG
+ && (GET_MODE (SUBREG_REG (XEXP (elt->exp, 1)))
+ == mode))
+ || CONSTANT_P (XEXP (elt->exp, 1))))
+ {
+ rtx op0 = gen_lowpart_common (mode, XEXP (elt->exp, 0));
+ rtx op1 = gen_lowpart_common (mode, XEXP (elt->exp, 1));
+
+ if (op0 && GET_CODE (op0) != REG && ! CONSTANT_P (op0))
+ op0 = fold_rtx (op0, NULL_RTX);
+
+ if (op0)
+ op0 = equiv_constant (op0);
+
+ if (op1 && GET_CODE (op1) != REG && ! CONSTANT_P (op1))
+ op1 = fold_rtx (op1, NULL_RTX);
+
+ if (op1)
+ op1 = equiv_constant (op1);
+
+ /* If we are looking for the low SImode part of
+ (ashift:DI c (const_int 32)), it doesn't work
+ to compute that in SImode, because a 32-bit shift
+ in SImode is unpredictable. We know the value is 0. */
+ if (op0 && op1
+ && GET_CODE (elt->exp) == ASHIFT
+ && GET_CODE (op1) == CONST_INT
+ && INTVAL (op1) >= GET_MODE_BITSIZE (mode))
+ {
+ if (INTVAL (op1) < GET_MODE_BITSIZE (GET_MODE (elt->exp)))
+
+ /* If the count fits in the inner mode's width,
+ but exceeds the outer mode's width,
+ the value will get truncated to 0
+ by the subreg. */
+ new = const0_rtx;
+ else
+ /* If the count exceeds even the inner mode's width,
+ don't fold this expression. */
+ new = 0;
+ }
+ else if (op0 && op1)
+ new = simplify_binary_operation (GET_CODE (elt->exp), mode,
+ op0, op1);
+ }
+
+ else if (GET_CODE (elt->exp) == SUBREG
+ && GET_MODE (SUBREG_REG (elt->exp)) == mode
+ && (GET_MODE_SIZE (GET_MODE (folded_arg0))
+ <= UNITS_PER_WORD)
+ && exp_equiv_p (elt->exp, elt->exp, 1, 0))
+ new = copy_rtx (SUBREG_REG (elt->exp));
+
+ if (new)
+ return new;
+ }
+ }
+
+ return x;
+
+ case NOT:
+ case NEG:
+ /* If we have (NOT Y), see if Y is known to be (NOT Z).
+ If so, (NOT Y) simplifies to Z. Similarly for NEG. */
+ new = lookup_as_function (XEXP (x, 0), code);
+ if (new)
+ return fold_rtx (copy_rtx (XEXP (new, 0)), insn);
+ break;
+
+ case MEM:
+ /* If we are not actually processing an insn, don't try to find the
+ best address. Not only don't we care, but we could modify the
+ MEM in an invalid way since we have no insn to validate against. */
+ if (insn != 0)
+ find_best_addr (insn, &XEXP (x, 0));
+
+ {
+ /* Even if we don't fold in the insn itself,
+ we can safely do so here, in hopes of getting a constant. */
+ rtx addr = fold_rtx (XEXP (x, 0), NULL_RTX);
+ rtx base = 0;
+ HOST_WIDE_INT offset = 0;
+
+ if (GET_CODE (addr) == REG
+ && REGNO_QTY_VALID_P (REGNO (addr))
+ && GET_MODE (addr) == qty_mode[reg_qty[REGNO (addr)]]
+ && qty_const[reg_qty[REGNO (addr)]] != 0)
+ addr = qty_const[reg_qty[REGNO (addr)]];
+
+ /* If address is constant, split it into a base and integer offset. */
+ if (GET_CODE (addr) == SYMBOL_REF || GET_CODE (addr) == LABEL_REF)
+ base = addr;
+ else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
+ {
+ base = XEXP (XEXP (addr, 0), 0);
+ offset = INTVAL (XEXP (XEXP (addr, 0), 1));
+ }
+ else if (GET_CODE (addr) == LO_SUM
+ && GET_CODE (XEXP (addr, 1)) == SYMBOL_REF)
+ base = XEXP (addr, 1);
+
+ /* If this is a constant pool reference, we can fold it into its
+ constant to allow better value tracking. */
+ if (base && GET_CODE (base) == SYMBOL_REF
+ && CONSTANT_POOL_ADDRESS_P (base))
+ {
+ rtx constant = get_pool_constant (base);
+ enum machine_mode const_mode = get_pool_mode (base);
+ rtx new;
+
+ if (CONSTANT_P (constant) && GET_CODE (constant) != CONST_INT)
+ constant_pool_entries_cost = COST (constant);
+
+ /* If we are loading the full constant, we have an equivalence. */
+ if (offset == 0 && mode == const_mode)
+ return constant;
+
+ /* If this actually isn't a constant (wierd!), we can't do
+ anything. Otherwise, handle the two most common cases:
+ extracting a word from a multi-word constant, and extracting
+ the low-order bits. Other cases don't seem common enough to
+ worry about. */
+ if (! CONSTANT_P (constant))
+ return x;
+
+ if (GET_MODE_CLASS (mode) == MODE_INT
+ && GET_MODE_SIZE (mode) == UNITS_PER_WORD
+ && offset % UNITS_PER_WORD == 0
+ && (new = operand_subword (constant,
+ offset / UNITS_PER_WORD,
+ 0, const_mode)) != 0)
+ return new;
+
+ if (((BYTES_BIG_ENDIAN
+ && offset == GET_MODE_SIZE (GET_MODE (constant)) - 1)
+ || (! BYTES_BIG_ENDIAN && offset == 0))
+ && (new = gen_lowpart_if_possible (mode, constant)) != 0)
+ return new;
+ }
+
+ /* If this is a reference to a label at a known position in a jump
+ table, we also know its value. */
+ if (base && GET_CODE (base) == LABEL_REF)
+ {
+ rtx label = XEXP (base, 0);
+ rtx table_insn = NEXT_INSN (label);
+
+ if (table_insn && GET_CODE (table_insn) == JUMP_INSN
+ && GET_CODE (PATTERN (table_insn)) == ADDR_VEC)
+ {
+ rtx table = PATTERN (table_insn);
+
+ if (offset >= 0
+ && (offset / GET_MODE_SIZE (GET_MODE (table))
+ < XVECLEN (table, 0)))
+ return XVECEXP (table, 0,
+ offset / GET_MODE_SIZE (GET_MODE (table)));
+ }
+ if (table_insn && GET_CODE (table_insn) == JUMP_INSN
+ && GET_CODE (PATTERN (table_insn)) == ADDR_DIFF_VEC)
+ {
+ rtx table = PATTERN (table_insn);
+
+ if (offset >= 0
+ && (offset / GET_MODE_SIZE (GET_MODE (table))
+ < XVECLEN (table, 1)))
+ {
+ offset /= GET_MODE_SIZE (GET_MODE (table));
+ new = gen_rtx (MINUS, Pmode, XVECEXP (table, 1, offset),
+ XEXP (table, 0));
+
+ if (GET_MODE (table) != Pmode)
+ new = gen_rtx (TRUNCATE, GET_MODE (table), new);
+
+ return new;
+ }
+ }
+ }
+
+ return x;
+ }
+ }
+
+ const_arg0 = 0;
+ const_arg1 = 0;
+ const_arg2 = 0;
+ mode_arg0 = VOIDmode;
+
+ /* Try folding our operands.
+ Then see which ones have constant values known. */
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ if (fmt[i] == 'e')
+ {
+ rtx arg = XEXP (x, i);
+ rtx folded_arg = arg, const_arg = 0;
+ enum machine_mode mode_arg = GET_MODE (arg);
+ rtx cheap_arg, expensive_arg;
+ rtx replacements[2];
+ int j;
+
+ /* Most arguments are cheap, so handle them specially. */
+ switch (GET_CODE (arg))
+ {
+ case REG:
+ /* This is the same as calling equiv_constant; it is duplicated
+ here for speed. */
+ if (REGNO_QTY_VALID_P (REGNO (arg))
+ && qty_const[reg_qty[REGNO (arg)]] != 0
+ && GET_CODE (qty_const[reg_qty[REGNO (arg)]]) != REG
+ && GET_CODE (qty_const[reg_qty[REGNO (arg)]]) != PLUS)
+ const_arg
+ = gen_lowpart_if_possible (GET_MODE (arg),
+ qty_const[reg_qty[REGNO (arg)]]);
+ break;
+
+ case CONST:
+ case CONST_INT:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ case CONST_DOUBLE:
+ const_arg = arg;
+ break;
+
+#ifdef HAVE_cc0
+ case CC0:
+ folded_arg = prev_insn_cc0;
+ mode_arg = prev_insn_cc0_mode;
+ const_arg = equiv_constant (folded_arg);
+ break;
+#endif
+
+ default:
+ folded_arg = fold_rtx (arg, insn);
+ const_arg = equiv_constant (folded_arg);
+ }
+
+ /* For the first three operands, see if the operand
+ is constant or equivalent to a constant. */
+ switch (i)
+ {
+ case 0:
+ folded_arg0 = folded_arg;
+ const_arg0 = const_arg;
+ mode_arg0 = mode_arg;
+ break;
+ case 1:
+ folded_arg1 = folded_arg;
+ const_arg1 = const_arg;
+ break;
+ case 2:
+ const_arg2 = const_arg;
+ break;
+ }
+
+ /* Pick the least expensive of the folded argument and an
+ equivalent constant argument. */
+ if (const_arg == 0 || const_arg == folded_arg
+ || COST (const_arg) > COST (folded_arg))
+ cheap_arg = folded_arg, expensive_arg = const_arg;
+ else
+ cheap_arg = const_arg, expensive_arg = folded_arg;
+
+ /* Try to replace the operand with the cheapest of the two
+ possibilities. If it doesn't work and this is either of the first
+ two operands of a commutative operation, try swapping them.
+ If THAT fails, try the more expensive, provided it is cheaper
+ than what is already there. */
+
+ if (cheap_arg == XEXP (x, i))
+ continue;
+
+ if (insn == 0 && ! copied)
+ {
+ x = copy_rtx (x);
+ copied = 1;
+ }
+
+ replacements[0] = cheap_arg, replacements[1] = expensive_arg;
+ for (j = 0;
+ j < 2 && replacements[j]
+ && COST (replacements[j]) < COST (XEXP (x, i));
+ j++)
+ {
+ if (validate_change (insn, &XEXP (x, i), replacements[j], 0))
+ break;
+
+ if (code == NE || code == EQ || GET_RTX_CLASS (code) == 'c')
+ {
+ validate_change (insn, &XEXP (x, i), XEXP (x, 1 - i), 1);
+ validate_change (insn, &XEXP (x, 1 - i), replacements[j], 1);
+
+ if (apply_change_group ())
+ {
+ /* Swap them back to be invalid so that this loop can
+ continue and flag them to be swapped back later. */
+ rtx tem;
+
+ tem = XEXP (x, 0); XEXP (x, 0) = XEXP (x, 1);
+ XEXP (x, 1) = tem;
+ must_swap = 1;
+ break;
+ }
+ }
+ }
+ }
+
+ else if (fmt[i] == 'E')
+ /* Don't try to fold inside of a vector of expressions.
+ Doing nothing is harmless. */
+ ;
+
+ /* If a commutative operation, place a constant integer as the second
+ operand unless the first operand is also a constant integer. Otherwise,
+ place any constant second unless the first operand is also a constant. */
+
+ if (code == EQ || code == NE || GET_RTX_CLASS (code) == 'c')
+ {
+ if (must_swap || (const_arg0
+ && (const_arg1 == 0
+ || (GET_CODE (const_arg0) == CONST_INT
+ && GET_CODE (const_arg1) != CONST_INT))))
+ {
+ register rtx tem = XEXP (x, 0);
+
+ if (insn == 0 && ! copied)
+ {
+ x = copy_rtx (x);
+ copied = 1;
+ }
+
+ validate_change (insn, &XEXP (x, 0), XEXP (x, 1), 1);
+ validate_change (insn, &XEXP (x, 1), tem, 1);
+ if (apply_change_group ())
+ {
+ tem = const_arg0, const_arg0 = const_arg1, const_arg1 = tem;
+ tem = folded_arg0, folded_arg0 = folded_arg1, folded_arg1 = tem;
+ }
+ }
+ }
+
+ /* If X is an arithmetic operation, see if we can simplify it. */
+
+ switch (GET_RTX_CLASS (code))
+ {
+ case '1':
+ /* We can't simplify extension ops unless we know the original mode. */
+ if ((code == ZERO_EXTEND || code == SIGN_EXTEND)
+ && mode_arg0 == VOIDmode)
+ break;
+ new = simplify_unary_operation (code, mode,
+ const_arg0 ? const_arg0 : folded_arg0,
+ mode_arg0);
+ break;
+
+ case '<':
+ /* See what items are actually being compared and set FOLDED_ARG[01]
+ to those values and CODE to the actual comparison code. If any are
+ constant, set CONST_ARG0 and CONST_ARG1 appropriately. We needn't
+ do anything if both operands are already known to be constant. */
+
+ if (const_arg0 == 0 || const_arg1 == 0)
+ {
+ struct table_elt *p0, *p1;
+ rtx true = const_true_rtx, false = const0_rtx;
+ enum machine_mode mode_arg1;
+
+#ifdef FLOAT_STORE_FLAG_VALUE
+ if (GET_MODE_CLASS (mode) == MODE_FLOAT)
+ {
+ true = CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE,
+ mode);
+ false = CONST0_RTX (mode);
+ }
+#endif
+
+ code = find_comparison_args (code, &folded_arg0, &folded_arg1,
+ &mode_arg0, &mode_arg1);
+ const_arg0 = equiv_constant (folded_arg0);
+ const_arg1 = equiv_constant (folded_arg1);
+
+ /* If the mode is VOIDmode or a MODE_CC mode, we don't know
+ what kinds of things are being compared, so we can't do
+ anything with this comparison. */
+
+ if (mode_arg0 == VOIDmode || GET_MODE_CLASS (mode_arg0) == MODE_CC)
+ break;
+
+ /* If we do not now have two constants being compared, see if we
+ can nevertheless deduce some things about the comparison. */
+ if (const_arg0 == 0 || const_arg1 == 0)
+ {
+ /* Is FOLDED_ARG0 frame-pointer plus a constant? Or non-explicit
+ constant? These aren't zero, but we don't know their sign. */
+ if (const_arg1 == const0_rtx
+ && (NONZERO_BASE_PLUS_P (folded_arg0)
+#if 0 /* Sad to say, on sysvr4, #pragma weak can make a symbol address
+ come out as 0. */
+ || GET_CODE (folded_arg0) == SYMBOL_REF
+#endif
+ || GET_CODE (folded_arg0) == LABEL_REF
+ || GET_CODE (folded_arg0) == CONST))
+ {
+ if (code == EQ)
+ return false;
+ else if (code == NE)
+ return true;
+ }
+
+ /* See if the two operands are the same. We don't do this
+ for IEEE floating-point since we can't assume x == x
+ since x might be a NaN. */
+
+ if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
+ || ! FLOAT_MODE_P (mode_arg0) || flag_fast_math)
+ && (folded_arg0 == folded_arg1
+ || (GET_CODE (folded_arg0) == REG
+ && GET_CODE (folded_arg1) == REG
+ && (reg_qty[REGNO (folded_arg0)]
+ == reg_qty[REGNO (folded_arg1)]))
+ || ((p0 = lookup (folded_arg0,
+ (safe_hash (folded_arg0, mode_arg0)
+ % NBUCKETS), mode_arg0))
+ && (p1 = lookup (folded_arg1,
+ (safe_hash (folded_arg1, mode_arg0)
+ % NBUCKETS), mode_arg0))
+ && p0->first_same_value == p1->first_same_value)))
+ return ((code == EQ || code == LE || code == GE
+ || code == LEU || code == GEU)
+ ? true : false);
+
+ /* If FOLDED_ARG0 is a register, see if the comparison we are
+ doing now is either the same as we did before or the reverse
+ (we only check the reverse if not floating-point). */
+ else if (GET_CODE (folded_arg0) == REG)
+ {
+ int qty = reg_qty[REGNO (folded_arg0)];
+
+ if (REGNO_QTY_VALID_P (REGNO (folded_arg0))
+ && (comparison_dominates_p (qty_comparison_code[qty], code)
+ || (comparison_dominates_p (qty_comparison_code[qty],
+ reverse_condition (code))
+ && ! FLOAT_MODE_P (mode_arg0)))
+ && (rtx_equal_p (qty_comparison_const[qty], folded_arg1)
+ || (const_arg1
+ && rtx_equal_p (qty_comparison_const[qty],
+ const_arg1))
+ || (GET_CODE (folded_arg1) == REG
+ && (reg_qty[REGNO (folded_arg1)]
+ == qty_comparison_qty[qty]))))
+ return (comparison_dominates_p (qty_comparison_code[qty],
+ code)
+ ? true : false);
+ }
+ }
+ }
+
+ /* If we are comparing against zero, see if the first operand is
+ equivalent to an IOR with a constant. If so, we may be able to
+ determine the result of this comparison. */
+
+ if (const_arg1 == const0_rtx)
+ {
+ rtx y = lookup_as_function (folded_arg0, IOR);
+ rtx inner_const;
+
+ if (y != 0
+ && (inner_const = equiv_constant (XEXP (y, 1))) != 0
+ && GET_CODE (inner_const) == CONST_INT
+ && INTVAL (inner_const) != 0)
+ {
+ int sign_bitnum = GET_MODE_BITSIZE (mode_arg0) - 1;
+ int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
+ && (INTVAL (inner_const)
+ & ((HOST_WIDE_INT) 1 << sign_bitnum)));
+ rtx true = const_true_rtx, false = const0_rtx;
+
+#ifdef FLOAT_STORE_FLAG_VALUE
+ if (GET_MODE_CLASS (mode) == MODE_FLOAT)
+ {
+ true = CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE,
+ mode);
+ false = CONST0_RTX (mode);
+ }
+#endif
+
+ switch (code)
+ {
+ case EQ:
+ return false;
+ case NE:
+ return true;
+ case LT: case LE:
+ if (has_sign)
+ return true;
+ break;
+ case GT: case GE:
+ if (has_sign)
+ return false;
+ break;
+ }
+ }
+ }
+
+ new = simplify_relational_operation (code, mode_arg0,
+ const_arg0 ? const_arg0 : folded_arg0,
+ const_arg1 ? const_arg1 : folded_arg1);
+#ifdef FLOAT_STORE_FLAG_VALUE
+ if (new != 0 && GET_MODE_CLASS (mode) == MODE_FLOAT)
+ new = ((new == const0_rtx) ? CONST0_RTX (mode)
+ : CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE, mode));
+#endif
+ break;
+
+ case '2':
+ case 'c':
+ switch (code)
+ {
+ case PLUS:
+ /* If the second operand is a LABEL_REF, see if the first is a MINUS
+ with that LABEL_REF as its second operand. If so, the result is
+ the first operand of that MINUS. This handles switches with an
+ ADDR_DIFF_VEC table. */
+ if (const_arg1 && GET_CODE (const_arg1) == LABEL_REF)
+ {
+ rtx y = lookup_as_function (folded_arg0, MINUS);
+
+ if (y != 0 && GET_CODE (XEXP (y, 1)) == LABEL_REF
+ && XEXP (XEXP (y, 1), 0) == XEXP (const_arg1, 0))
+ return XEXP (y, 0);
+ }
+
+ /* If second operand is a register equivalent to a negative
+ CONST_INT, see if we can find a register equivalent to the
+ positive constant. Make a MINUS if so. Don't do this for
+ a negative constant since we might then alternate between
+ chosing positive and negative constants. Having the positive
+ constant previously-used is the more common case. */
+ if (const_arg1 && GET_CODE (const_arg1) == CONST_INT
+ && INTVAL (const_arg1) < 0 && GET_CODE (folded_arg1) == REG)
+ {
+ rtx new_const = GEN_INT (- INTVAL (const_arg1));
+ struct table_elt *p
+ = lookup (new_const, safe_hash (new_const, mode) % NBUCKETS,
+ mode);
+
+ if (p)
+ for (p = p->first_same_value; p; p = p->next_same_value)
+ if (GET_CODE (p->exp) == REG)
+ return cse_gen_binary (MINUS, mode, folded_arg0,
+ canon_reg (p->exp, NULL_RTX));
+ }
+ goto from_plus;
+
+ case MINUS:
+ /* If we have (MINUS Y C), see if Y is known to be (PLUS Z C2).
+ If so, produce (PLUS Z C2-C). */
+ if (const_arg1 != 0 && GET_CODE (const_arg1) == CONST_INT)
+ {
+ rtx y = lookup_as_function (XEXP (x, 0), PLUS);
+ if (y && GET_CODE (XEXP (y, 1)) == CONST_INT)
+ return fold_rtx (plus_constant (copy_rtx (y),
+ -INTVAL (const_arg1)),
+ NULL_RTX);
+ }
+
+ /* ... fall through ... */
+
+ from_plus:
+ case SMIN: case SMAX: case UMIN: case UMAX:
+ case IOR: case AND: case XOR:
+ case MULT: case DIV: case UDIV:
+ case ASHIFT: case LSHIFTRT: case ASHIFTRT:
+ /* If we have (<op> <reg> <const_int>) for an associative OP and REG
+ is known to be of similar form, we may be able to replace the
+ operation with a combined operation. This may eliminate the
+ intermediate operation if every use is simplified in this way.
+ Note that the similar optimization done by combine.c only works
+ if the intermediate operation's result has only one reference. */
+
+ if (GET_CODE (folded_arg0) == REG
+ && const_arg1 && GET_CODE (const_arg1) == CONST_INT)
+ {
+ int is_shift
+ = (code == ASHIFT || code == ASHIFTRT || code == LSHIFTRT);
+ rtx y = lookup_as_function (folded_arg0, code);
+ rtx inner_const;
+ enum rtx_code associate_code;
+ rtx new_const;
+
+ if (y == 0
+ || 0 == (inner_const
+ = equiv_constant (fold_rtx (XEXP (y, 1), 0)))
+ || GET_CODE (inner_const) != CONST_INT
+ /* If we have compiled a statement like
+ "if (x == (x & mask1))", and now are looking at
+ "x & mask2", we will have a case where the first operand
+ of Y is the same as our first operand. Unless we detect
+ this case, an infinite loop will result. */
+ || XEXP (y, 0) == folded_arg0)
+ break;
+
+ /* Don't associate these operations if they are a PLUS with the
+ same constant and it is a power of two. These might be doable
+ with a pre- or post-increment. Similarly for two subtracts of
+ identical powers of two with post decrement. */
+
+ if (code == PLUS && INTVAL (const_arg1) == INTVAL (inner_const)
+ && (0
+#if defined(HAVE_PRE_INCREMENT) || defined(HAVE_POST_INCREMENT)
+ || exact_log2 (INTVAL (const_arg1)) >= 0
+#endif
+#if defined(HAVE_PRE_DECREMENT) || defined(HAVE_POST_DECREMENT)
+ || exact_log2 (- INTVAL (const_arg1)) >= 0
+#endif
+ ))
+ break;
+
+ /* Compute the code used to compose the constants. For example,
+ A/C1/C2 is A/(C1 * C2), so if CODE == DIV, we want MULT. */
+
+ associate_code
+ = (code == MULT || code == DIV || code == UDIV ? MULT
+ : is_shift || code == PLUS || code == MINUS ? PLUS : code);
+
+ new_const = simplify_binary_operation (associate_code, mode,
+ const_arg1, inner_const);
+
+ if (new_const == 0)
+ break;
+
+ /* If we are associating shift operations, don't let this
+ produce a shift of the size of the object or larger.
+ This could occur when we follow a sign-extend by a right
+ shift on a machine that does a sign-extend as a pair
+ of shifts. */
+
+ if (is_shift && GET_CODE (new_const) == CONST_INT
+ && INTVAL (new_const) >= GET_MODE_BITSIZE (mode))
+ {
+ /* As an exception, we can turn an ASHIFTRT of this
+ form into a shift of the number of bits - 1. */
+ if (code == ASHIFTRT)
+ new_const = GEN_INT (GET_MODE_BITSIZE (mode) - 1);
+ else
+ break;
+ }
+
+ y = copy_rtx (XEXP (y, 0));
+
+ /* If Y contains our first operand (the most common way this
+ can happen is if Y is a MEM), we would do into an infinite
+ loop if we tried to fold it. So don't in that case. */
+
+ if (! reg_mentioned_p (folded_arg0, y))
+ y = fold_rtx (y, insn);
+
+ return cse_gen_binary (code, mode, y, new_const);
+ }
+ }
+
+ new = simplify_binary_operation (code, mode,
+ const_arg0 ? const_arg0 : folded_arg0,
+ const_arg1 ? const_arg1 : folded_arg1);
+ break;
+
+ case 'o':
+ /* (lo_sum (high X) X) is simply X. */
+ if (code == LO_SUM && const_arg0 != 0
+ && GET_CODE (const_arg0) == HIGH
+ && rtx_equal_p (XEXP (const_arg0, 0), const_arg1))
+ return const_arg1;
+ break;
+
+ case '3':
+ case 'b':
+ new = simplify_ternary_operation (code, mode, mode_arg0,
+ const_arg0 ? const_arg0 : folded_arg0,
+ const_arg1 ? const_arg1 : folded_arg1,
+ const_arg2 ? const_arg2 : XEXP (x, 2));
+ break;
+ }
+
+ return new ? new : x;
+}
+
+/* Return a constant value currently equivalent to X.
+ Return 0 if we don't know one. */
+
+static rtx
+equiv_constant (x)
+ rtx x;
+{
+ if (GET_CODE (x) == REG
+ && REGNO_QTY_VALID_P (REGNO (x))
+ && qty_const[reg_qty[REGNO (x)]])
+ x = gen_lowpart_if_possible (GET_MODE (x), qty_const[reg_qty[REGNO (x)]]);
+
+ if (x != 0 && CONSTANT_P (x))
+ return x;
+
+ /* If X is a MEM, try to fold it outside the context of any insn to see if
+ it might be equivalent to a constant. That handles the case where it
+ is a constant-pool reference. Then try to look it up in the hash table
+ in case it is something whose value we have seen before. */
+
+ if (GET_CODE (x) == MEM)
+ {
+ struct table_elt *elt;
+
+ x = fold_rtx (x, NULL_RTX);
+ if (CONSTANT_P (x))
+ return x;
+
+ elt = lookup (x, safe_hash (x, GET_MODE (x)) % NBUCKETS, GET_MODE (x));
+ if (elt == 0)
+ return 0;
+
+ for (elt = elt->first_same_value; elt; elt = elt->next_same_value)
+ if (elt->is_const && CONSTANT_P (elt->exp))
+ return elt->exp;
+ }
+
+ return 0;
+}
+
+/* Assuming that X is an rtx (e.g., MEM, REG or SUBREG) for a fixed-point
+ number, return an rtx (MEM, SUBREG, or CONST_INT) that refers to the
+ least-significant part of X.
+ MODE specifies how big a part of X to return.
+
+ If the requested operation cannot be done, 0 is returned.
+
+ This is similar to gen_lowpart in emit-rtl.c. */
+
+rtx
+gen_lowpart_if_possible (mode, x)
+ enum machine_mode mode;
+ register rtx x;
+{
+ rtx result = gen_lowpart_common (mode, x);
+
+ if (result)
+ return result;
+ else if (GET_CODE (x) == MEM)
+ {
+ /* This is the only other case we handle. */
+ register int offset = 0;
+ rtx new;
+
+#if WORDS_BIG_ENDIAN
+ offset = (MAX (GET_MODE_SIZE (GET_MODE (x)), UNITS_PER_WORD)
+ - MAX (GET_MODE_SIZE (mode), UNITS_PER_WORD));
+#endif
+#if BYTES_BIG_ENDIAN
+ /* Adjust the address so that the address-after-the-data
+ is unchanged. */
+ offset -= (MIN (UNITS_PER_WORD, GET_MODE_SIZE (mode))
+ - MIN (UNITS_PER_WORD, GET_MODE_SIZE (GET_MODE (x))));
+#endif
+ new = gen_rtx (MEM, mode, plus_constant (XEXP (x, 0), offset));
+ if (! memory_address_p (mode, XEXP (new, 0)))
+ return 0;
+ MEM_VOLATILE_P (new) = MEM_VOLATILE_P (x);
+ RTX_UNCHANGING_P (new) = RTX_UNCHANGING_P (x);
+ MEM_IN_STRUCT_P (new) = MEM_IN_STRUCT_P (x);
+ return new;
+ }
+ else
+ return 0;
+}
+
+/* Given INSN, a jump insn, TAKEN indicates if we are following the "taken"
+ branch. It will be zero if not.
+
+ In certain cases, this can cause us to add an equivalence. For example,
+ if we are following the taken case of
+ if (i == 2)
+ we can add the fact that `i' and '2' are now equivalent.
+
+ In any case, we can record that this comparison was passed. If the same
+ comparison is seen later, we will know its value. */
+
+static void
+record_jump_equiv (insn, taken)
+ rtx insn;
+ int taken;
+{
+ int cond_known_true;
+ rtx op0, op1;
+ enum machine_mode mode, mode0, mode1;
+ int reversed_nonequality = 0;
+ enum rtx_code code;
+
+ /* Ensure this is the right kind of insn. */
+ if (! condjump_p (insn) || simplejump_p (insn))
+ return;
+
+ /* See if this jump condition is known true or false. */
+ if (taken)
+ cond_known_true = (XEXP (SET_SRC (PATTERN (insn)), 2) == pc_rtx);
+ else
+ cond_known_true = (XEXP (SET_SRC (PATTERN (insn)), 1) == pc_rtx);
+
+ /* Get the type of comparison being done and the operands being compared.
+ If we had to reverse a non-equality condition, record that fact so we
+ know that it isn't valid for floating-point. */
+ code = GET_CODE (XEXP (SET_SRC (PATTERN (insn)), 0));
+ op0 = fold_rtx (XEXP (XEXP (SET_SRC (PATTERN (insn)), 0), 0), insn);
+ op1 = fold_rtx (XEXP (XEXP (SET_SRC (PATTERN (insn)), 0), 1), insn);
+
+ code = find_comparison_args (code, &op0, &op1, &mode0, &mode1);
+ if (! cond_known_true)
+ {
+ reversed_nonequality = (code != EQ && code != NE);
+ code = reverse_condition (code);
+ }
+
+ /* The mode is the mode of the non-constant. */
+ mode = mode0;
+ if (mode1 != VOIDmode)
+ mode = mode1;
+
+ record_jump_cond (code, mode, op0, op1, reversed_nonequality);
+}
+
+/* We know that comparison CODE applied to OP0 and OP1 in MODE is true.
+ REVERSED_NONEQUALITY is nonzero if CODE had to be swapped.
+ Make any useful entries we can with that information. Called from
+ above function and called recursively. */
+
+static void
+record_jump_cond (code, mode, op0, op1, reversed_nonequality)
+ enum rtx_code code;
+ enum machine_mode mode;
+ rtx op0, op1;
+ int reversed_nonequality;
+{
+ unsigned op0_hash, op1_hash;
+ int op0_in_memory, op0_in_struct, op1_in_memory, op1_in_struct;
+ struct table_elt *op0_elt, *op1_elt;
+
+ /* If OP0 and OP1 are known equal, and either is a paradoxical SUBREG,
+ we know that they are also equal in the smaller mode (this is also
+ true for all smaller modes whether or not there is a SUBREG, but
+ is not worth testing for with no SUBREG. */
+
+ /* Note that GET_MODE (op0) may not equal MODE. */
+ if (code == EQ && GET_CODE (op0) == SUBREG
+ && (GET_MODE_SIZE (GET_MODE (op0))
+ > GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))))
+ {
+ enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
+ rtx tem = gen_lowpart_if_possible (inner_mode, op1);
+
+ record_jump_cond (code, mode, SUBREG_REG (op0),
+ tem ? tem : gen_rtx (SUBREG, inner_mode, op1, 0),
+ reversed_nonequality);
+ }
+
+ if (code == EQ && GET_CODE (op1) == SUBREG
+ && (GET_MODE_SIZE (GET_MODE (op1))
+ > GET_MODE_SIZE (GET_MODE (SUBREG_REG (op1)))))
+ {
+ enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op1));
+ rtx tem = gen_lowpart_if_possible (inner_mode, op0);
+
+ record_jump_cond (code, mode, SUBREG_REG (op1),
+ tem ? tem : gen_rtx (SUBREG, inner_mode, op0, 0),
+ reversed_nonequality);
+ }
+
+ /* Similarly, if this is an NE comparison, and either is a SUBREG
+ making a smaller mode, we know the whole thing is also NE. */
+
+ /* Note that GET_MODE (op0) may not equal MODE;
+ if we test MODE instead, we can get an infinite recursion
+ alternating between two modes each wider than MODE. */
+
+ if (code == NE && GET_CODE (op0) == SUBREG
+ && subreg_lowpart_p (op0)
+ && (GET_MODE_SIZE (GET_MODE (op0))
+ < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))))
+ {
+ enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
+ rtx tem = gen_lowpart_if_possible (inner_mode, op1);
+
+ record_jump_cond (code, mode, SUBREG_REG (op0),
+ tem ? tem : gen_rtx (SUBREG, inner_mode, op1, 0),
+ reversed_nonequality);
+ }
+
+ if (code == NE && GET_CODE (op1) == SUBREG
+ && subreg_lowpart_p (op1)
+ && (GET_MODE_SIZE (GET_MODE (op1))
+ < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op1)))))
+ {
+ enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op1));
+ rtx tem = gen_lowpart_if_possible (inner_mode, op0);
+
+ record_jump_cond (code, mode, SUBREG_REG (op1),
+ tem ? tem : gen_rtx (SUBREG, inner_mode, op0, 0),
+ reversed_nonequality);
+ }
+
+ /* Hash both operands. */
+
+ do_not_record = 0;
+ hash_arg_in_memory = 0;
+ hash_arg_in_struct = 0;
+ op0_hash = HASH (op0, mode);
+ op0_in_memory = hash_arg_in_memory;
+ op0_in_struct = hash_arg_in_struct;
+
+ if (do_not_record)
+ return;
+
+ do_not_record = 0;
+ hash_arg_in_memory = 0;
+ hash_arg_in_struct = 0;
+ op1_hash = HASH (op1, mode);
+ op1_in_memory = hash_arg_in_memory;
+ op1_in_struct = hash_arg_in_struct;
+
+ if (do_not_record)
+ return;
+
+ /* Look up both operands. */
+ op0_elt = lookup (op0, op0_hash, mode);
+ op1_elt = lookup (op1, op1_hash, mode);
+
+ /* If we aren't setting two things equal all we can do is save this
+ comparison. Similarly if this is floating-point. In the latter
+ case, OP1 might be zero and both -0.0 and 0.0 are equal to it.
+ If we record the equality, we might inadvertently delete code
+ whose intent was to change -0 to +0. */
+
+ if (code != EQ || FLOAT_MODE_P (GET_MODE (op0)))
+ {
+ /* If we reversed a floating-point comparison, if OP0 is not a
+ register, or if OP1 is neither a register or constant, we can't
+ do anything. */
+
+ if (GET_CODE (op1) != REG)
+ op1 = equiv_constant (op1);
+
+ if ((reversed_nonequality && FLOAT_MODE_P (mode))
+ || GET_CODE (op0) != REG || op1 == 0)
+ return;
+
+ /* Put OP0 in the hash table if it isn't already. This gives it a
+ new quantity number. */
+ if (op0_elt == 0)
+ {
+ if (insert_regs (op0, NULL_PTR, 0))
+ {
+ rehash_using_reg (op0);
+ op0_hash = HASH (op0, mode);
+
+ /* If OP0 is contained in OP1, this changes its hash code
+ as well. Faster to rehash than to check, except
+ for the simple case of a constant. */
+ if (! CONSTANT_P (op1))
+ op1_hash = HASH (op1,mode);
+ }
+
+ op0_elt = insert (op0, NULL_PTR, op0_hash, mode);
+ op0_elt->in_memory = op0_in_memory;
+ op0_elt->in_struct = op0_in_struct;
+ }
+
+ qty_comparison_code[reg_qty[REGNO (op0)]] = code;
+ if (GET_CODE (op1) == REG)
+ {
+ /* Look it up again--in case op0 and op1 are the same. */
+ op1_elt = lookup (op1, op1_hash, mode);
+
+ /* Put OP1 in the hash table so it gets a new quantity number. */
+ if (op1_elt == 0)
+ {
+ if (insert_regs (op1, NULL_PTR, 0))
+ {
+ rehash_using_reg (op1);
+ op1_hash = HASH (op1, mode);
+ }
+
+ op1_elt = insert (op1, NULL_PTR, op1_hash, mode);
+ op1_elt->in_memory = op1_in_memory;
+ op1_elt->in_struct = op1_in_struct;
+ }
+
+ qty_comparison_qty[reg_qty[REGNO (op0)]] = reg_qty[REGNO (op1)];
+ qty_comparison_const[reg_qty[REGNO (op0)]] = 0;
+ }
+ else
+ {
+ qty_comparison_qty[reg_qty[REGNO (op0)]] = -1;
+ qty_comparison_const[reg_qty[REGNO (op0)]] = op1;
+ }
+
+ return;
+ }
+
+ /* If either side is still missing an equivalence, make it now,
+ then merge the equivalences. */
+
+ if (op0_elt == 0)
+ {
+ if (insert_regs (op0, NULL_PTR, 0))
+ {
+ rehash_using_reg (op0);
+ op0_hash = HASH (op0, mode);
+ }
+
+ op0_elt = insert (op0, NULL_PTR, op0_hash, mode);
+ op0_elt->in_memory = op0_in_memory;
+ op0_elt->in_struct = op0_in_struct;
+ }
+
+ if (op1_elt == 0)
+ {
+ if (insert_regs (op1, NULL_PTR, 0))
+ {
+ rehash_using_reg (op1);
+ op1_hash = HASH (op1, mode);
+ }
+
+ op1_elt = insert (op1, NULL_PTR, op1_hash, mode);
+ op1_elt->in_memory = op1_in_memory;
+ op1_elt->in_struct = op1_in_struct;
+ }
+
+ merge_equiv_classes (op0_elt, op1_elt);
+ last_jump_equiv_class = op0_elt;
+}
+
+/* CSE processing for one instruction.
+ First simplify sources and addresses of all assignments
+ in the instruction, using previously-computed equivalents values.
+ Then install the new sources and destinations in the table
+ of available values.
+
+ If IN_LIBCALL_BLOCK is nonzero, don't record any equivalence made in
+ the insn. */
+
+/* Data on one SET contained in the instruction. */
+
+struct set
+{
+ /* The SET rtx itself. */
+ rtx rtl;
+ /* The SET_SRC of the rtx (the original value, if it is changing). */
+ rtx src;
+ /* The hash-table element for the SET_SRC of the SET. */
+ struct table_elt *src_elt;
+ /* Hash value for the SET_SRC. */
+ unsigned src_hash;
+ /* Hash value for the SET_DEST. */
+ unsigned dest_hash;
+ /* The SET_DEST, with SUBREG, etc., stripped. */
+ rtx inner_dest;
+ /* Place where the pointer to the INNER_DEST was found. */
+ rtx *inner_dest_loc;
+ /* Nonzero if the SET_SRC is in memory. */
+ char src_in_memory;
+ /* Nonzero if the SET_SRC is in a structure. */
+ char src_in_struct;
+ /* Nonzero if the SET_SRC contains something
+ whose value cannot be predicted and understood. */
+ char src_volatile;
+ /* Original machine mode, in case it becomes a CONST_INT. */
+ enum machine_mode mode;
+ /* A constant equivalent for SET_SRC, if any. */
+ rtx src_const;
+ /* Hash value of constant equivalent for SET_SRC. */
+ unsigned src_const_hash;
+ /* Table entry for constant equivalent for SET_SRC, if any. */
+ struct table_elt *src_const_elt;
+};
+
+static void
+cse_insn (insn, in_libcall_block)
+ rtx insn;
+ int in_libcall_block;
+{
+ register rtx x = PATTERN (insn);
+ register int i;
+ rtx tem;
+ register int n_sets = 0;
+
+ /* Records what this insn does to set CC0. */
+ rtx this_insn_cc0 = 0;
+ enum machine_mode this_insn_cc0_mode;
+ struct write_data writes_memory;
+ static struct write_data init = {0, 0, 0, 0};
+
+ rtx src_eqv = 0;
+ struct table_elt *src_eqv_elt = 0;
+ int src_eqv_volatile;
+ int src_eqv_in_memory;
+ int src_eqv_in_struct;
+ unsigned src_eqv_hash;
+
+ struct set *sets;
+
+ this_insn = insn;
+ writes_memory = init;
+
+ /* Find all the SETs and CLOBBERs in this instruction.
+ Record all the SETs in the array `set' and count them.
+ Also determine whether there is a CLOBBER that invalidates
+ all memory references, or all references at varying addresses. */
+
+ if (GET_CODE (insn) == CALL_INSN)
+ {
+ for (tem = CALL_INSN_FUNCTION_USAGE (insn); tem; tem = XEXP (tem, 1))
+ if (GET_CODE (XEXP (tem, 0)) == CLOBBER)
+ invalidate (SET_DEST (XEXP (tem, 0)));
+ }
+
+ if (GET_CODE (x) == SET)
+ {
+ sets = (struct set *) alloca (sizeof (struct set));
+ sets[0].rtl = x;
+
+ /* Ignore SETs that are unconditional jumps.
+ They never need cse processing, so this does not hurt.
+ The reason is not efficiency but rather
+ so that we can test at the end for instructions
+ that have been simplified to unconditional jumps
+ and not be misled by unchanged instructions
+ that were unconditional jumps to begin with. */
+ if (SET_DEST (x) == pc_rtx
+ && GET_CODE (SET_SRC (x)) == LABEL_REF)
+ ;
+
+ /* Don't count call-insns, (set (reg 0) (call ...)), as a set.
+ The hard function value register is used only once, to copy to
+ someplace else, so it isn't worth cse'ing (and on 80386 is unsafe)!
+ Ensure we invalidate the destination register. On the 80386 no
+ other code would invalidate it since it is a fixed_reg.
+ We need not check the return of apply_change_group; see canon_reg. */
+
+ else if (GET_CODE (SET_SRC (x)) == CALL)
+ {
+ canon_reg (SET_SRC (x), insn);
+ apply_change_group ();
+ fold_rtx (SET_SRC (x), insn);
+ invalidate (SET_DEST (x));
+ }
+ else
+ n_sets = 1;
+ }
+ else if (GET_CODE (x) == PARALLEL)
+ {
+ register int lim = XVECLEN (x, 0);
+
+ sets = (struct set *) alloca (lim * sizeof (struct set));
+
+ /* Find all regs explicitly clobbered in this insn,
+ and ensure they are not replaced with any other regs
+ elsewhere in this insn.
+ When a reg that is clobbered is also used for input,
+ we should presume that that is for a reason,
+ and we should not substitute some other register
+ which is not supposed to be clobbered.
+ Therefore, this loop cannot be merged into the one below
+ because a CALL may precede a CLOBBER and refer to the
+ value clobbered. We must not let a canonicalization do
+ anything in that case. */
+ for (i = 0; i < lim; i++)
+ {
+ register rtx y = XVECEXP (x, 0, i);
+ if (GET_CODE (y) == CLOBBER)
+ {
+ rtx clobbered = XEXP (y, 0);
+
+ if (GET_CODE (clobbered) == REG
+ || GET_CODE (clobbered) == SUBREG)
+ invalidate (clobbered);
+ else if (GET_CODE (clobbered) == STRICT_LOW_PART
+ || GET_CODE (clobbered) == ZERO_EXTRACT)
+ invalidate (XEXP (clobbered, 0));
+ }
+ }
+
+ for (i = 0; i < lim; i++)
+ {
+ register rtx y = XVECEXP (x, 0, i);
+ if (GET_CODE (y) == SET)
+ {
+ /* As above, we ignore unconditional jumps and call-insns and
+ ignore the result of apply_change_group. */
+ if (GET_CODE (SET_SRC (y)) == CALL)
+ {
+ canon_reg (SET_SRC (y), insn);
+ apply_change_group ();
+ fold_rtx (SET_SRC (y), insn);
+ invalidate (SET_DEST (y));
+ }
+ else if (SET_DEST (y) == pc_rtx
+ && GET_CODE (SET_SRC (y)) == LABEL_REF)
+ ;
+ else
+ sets[n_sets++].rtl = y;
+ }
+ else if (GET_CODE (y) == CLOBBER)
+ {
+ /* If we clobber memory, take note of that,
+ and canon the address.
+ This does nothing when a register is clobbered
+ because we have already invalidated the reg. */
+ if (GET_CODE (XEXP (y, 0)) == MEM)
+ {
+ canon_reg (XEXP (y, 0), NULL_RTX);
+ note_mem_written (XEXP (y, 0), &writes_memory);
+ }
+ }
+ else if (GET_CODE (y) == USE
+ && ! (GET_CODE (XEXP (y, 0)) == REG
+ && REGNO (XEXP (y, 0)) < FIRST_PSEUDO_REGISTER))
+ canon_reg (y, NULL_RTX);
+ else if (GET_CODE (y) == CALL)
+ {
+ /* The result of apply_change_group can be ignored; see
+ canon_reg. */
+ canon_reg (y, insn);
+ apply_change_group ();
+ fold_rtx (y, insn);
+ }
+ }
+ }
+ else if (GET_CODE (x) == CLOBBER)
+ {
+ if (GET_CODE (XEXP (x, 0)) == MEM)
+ {
+ canon_reg (XEXP (x, 0), NULL_RTX);
+ note_mem_written (XEXP (x, 0), &writes_memory);
+ }
+ }
+
+ /* Canonicalize a USE of a pseudo register or memory location. */
+ else if (GET_CODE (x) == USE
+ && ! (GET_CODE (XEXP (x, 0)) == REG
+ && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER))
+ canon_reg (XEXP (x, 0), NULL_RTX);
+ else if (GET_CODE (x) == CALL)
+ {
+ /* The result of apply_change_group can be ignored; see canon_reg. */
+ canon_reg (x, insn);
+ apply_change_group ();
+ fold_rtx (x, insn);
+ }
+
+ /* Store the equivalent value in SRC_EQV, if different, or if the DEST
+ is a STRICT_LOW_PART. The latter condition is necessary because SRC_EQV
+ is handled specially for this case, and if it isn't set, then there will
+ be no equivalence for the destinatation. */
+ if (n_sets == 1 && REG_NOTES (insn) != 0
+ && (tem = find_reg_note (insn, REG_EQUAL, NULL_RTX)) != 0
+ && (! rtx_equal_p (XEXP (tem, 0), SET_SRC (sets[0].rtl))
+ || GET_CODE (SET_DEST (sets[0].rtl)) == STRICT_LOW_PART))
+ src_eqv = canon_reg (XEXP (tem, 0), NULL_RTX);
+
+ /* Canonicalize sources and addresses of destinations.
+ We do this in a separate pass to avoid problems when a MATCH_DUP is
+ present in the insn pattern. In that case, we want to ensure that
+ we don't break the duplicate nature of the pattern. So we will replace
+ both operands at the same time. Otherwise, we would fail to find an
+ equivalent substitution in the loop calling validate_change below.
+
+ We used to suppress canonicalization of DEST if it appears in SRC,
+ but we don't do this any more. */
+
+ for (i = 0; i < n_sets; i++)
+ {
+ rtx dest = SET_DEST (sets[i].rtl);
+ rtx src = SET_SRC (sets[i].rtl);
+ rtx new = canon_reg (src, insn);
+
+ if ((GET_CODE (new) == REG && GET_CODE (src) == REG
+ && ((REGNO (new) < FIRST_PSEUDO_REGISTER)
+ != (REGNO (src) < FIRST_PSEUDO_REGISTER)))
+ || insn_n_dups[recog_memoized (insn)] > 0)
+ validate_change (insn, &SET_SRC (sets[i].rtl), new, 1);
+ else
+ SET_SRC (sets[i].rtl) = new;
+
+ if (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
+ {
+ validate_change (insn, &XEXP (dest, 1),
+ canon_reg (XEXP (dest, 1), insn), 1);
+ validate_change (insn, &XEXP (dest, 2),
+ canon_reg (XEXP (dest, 2), insn), 1);
+ }
+
+ while (GET_CODE (dest) == SUBREG || GET_CODE (dest) == STRICT_LOW_PART
+ || GET_CODE (dest) == ZERO_EXTRACT
+ || GET_CODE (dest) == SIGN_EXTRACT)
+ dest = XEXP (dest, 0);
+
+ if (GET_CODE (dest) == MEM)
+ canon_reg (dest, insn);
+ }
+
+ /* Now that we have done all the replacements, we can apply the change
+ group and see if they all work. Note that this will cause some
+ canonicalizations that would have worked individually not to be applied
+ because some other canonicalization didn't work, but this should not
+ occur often.
+
+ The result of apply_change_group can be ignored; see canon_reg. */
+
+ apply_change_group ();
+
+ /* Set sets[i].src_elt to the class each source belongs to.
+ Detect assignments from or to volatile things
+ and set set[i] to zero so they will be ignored
+ in the rest of this function.
+
+ Nothing in this loop changes the hash table or the register chains. */
+
+ for (i = 0; i < n_sets; i++)
+ {
+ register rtx src, dest;
+ register rtx src_folded;
+ register struct table_elt *elt = 0, *p;
+ enum machine_mode mode;
+ rtx src_eqv_here;
+ rtx src_const = 0;
+ rtx src_related = 0;
+ struct table_elt *src_const_elt = 0;
+ int src_cost = 10000, src_eqv_cost = 10000, src_folded_cost = 10000;
+ int src_related_cost = 10000, src_elt_cost = 10000;
+ /* Set non-zero if we need to call force_const_mem on with the
+ contents of src_folded before using it. */
+ int src_folded_force_flag = 0;
+
+ dest = SET_DEST (sets[i].rtl);
+ src = SET_SRC (sets[i].rtl);
+
+ /* If SRC is a constant that has no machine mode,
+ hash it with the destination's machine mode.
+ This way we can keep different modes separate. */
+
+ mode = GET_MODE (src) == VOIDmode ? GET_MODE (dest) : GET_MODE (src);
+ sets[i].mode = mode;
+
+ if (src_eqv)
+ {
+ enum machine_mode eqvmode = mode;
+ if (GET_CODE (dest) == STRICT_LOW_PART)
+ eqvmode = GET_MODE (SUBREG_REG (XEXP (dest, 0)));
+ do_not_record = 0;
+ hash_arg_in_memory = 0;
+ hash_arg_in_struct = 0;
+ src_eqv = fold_rtx (src_eqv, insn);
+ src_eqv_hash = HASH (src_eqv, eqvmode);
+
+ /* Find the equivalence class for the equivalent expression. */
+
+ if (!do_not_record)
+ src_eqv_elt = lookup (src_eqv, src_eqv_hash, eqvmode);
+
+ src_eqv_volatile = do_not_record;
+ src_eqv_in_memory = hash_arg_in_memory;
+ src_eqv_in_struct = hash_arg_in_struct;
+ }
+
+ /* If this is a STRICT_LOW_PART assignment, src_eqv corresponds to the
+ value of the INNER register, not the destination. So it is not
+ a legal substitution for the source. But save it for later. */
+ if (GET_CODE (dest) == STRICT_LOW_PART)
+ src_eqv_here = 0;
+ else
+ src_eqv_here = src_eqv;
+
+ /* Simplify and foldable subexpressions in SRC. Then get the fully-
+ simplified result, which may not necessarily be valid. */
+ src_folded = fold_rtx (src, insn);
+
+ /* If storing a constant in a bitfield, pre-truncate the constant
+ so we will be able to record it later. */
+ if (GET_CODE (SET_DEST (sets[i].rtl)) == ZERO_EXTRACT
+ || GET_CODE (SET_DEST (sets[i].rtl)) == SIGN_EXTRACT)
+ {
+ rtx width = XEXP (SET_DEST (sets[i].rtl), 1);
+
+ if (GET_CODE (src) == CONST_INT
+ && GET_CODE (width) == CONST_INT
+ && INTVAL (width) < HOST_BITS_PER_WIDE_INT
+ && (INTVAL (src) & ((HOST_WIDE_INT) (-1) << INTVAL (width))))
+ src_folded
+ = GEN_INT (INTVAL (src) & (((HOST_WIDE_INT) 1
+ << INTVAL (width)) - 1));
+ }
+
+ /* Compute SRC's hash code, and also notice if it
+ should not be recorded at all. In that case,
+ prevent any further processing of this assignment. */
+ do_not_record = 0;
+ hash_arg_in_memory = 0;
+ hash_arg_in_struct = 0;
+
+ sets[i].src = src;
+ sets[i].src_hash = HASH (src, mode);
+ sets[i].src_volatile = do_not_record;
+ sets[i].src_in_memory = hash_arg_in_memory;
+ sets[i].src_in_struct = hash_arg_in_struct;
+
+#if 0
+ /* It is no longer clear why we used to do this, but it doesn't
+ appear to still be needed. So let's try without it since this
+ code hurts cse'ing widened ops. */
+ /* If source is a perverse subreg (such as QI treated as an SI),
+ treat it as volatile. It may do the work of an SI in one context
+ where the extra bits are not being used, but cannot replace an SI
+ in general. */
+ if (GET_CODE (src) == SUBREG
+ && (GET_MODE_SIZE (GET_MODE (src))
+ > GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))))
+ sets[i].src_volatile = 1;
+#endif
+
+ /* Locate all possible equivalent forms for SRC. Try to replace
+ SRC in the insn with each cheaper equivalent.
+
+ We have the following types of equivalents: SRC itself, a folded
+ version, a value given in a REG_EQUAL note, or a value related
+ to a constant.
+
+ Each of these equivalents may be part of an additional class
+ of equivalents (if more than one is in the table, they must be in
+ the same class; we check for this).
+
+ If the source is volatile, we don't do any table lookups.
+
+ We note any constant equivalent for possible later use in a
+ REG_NOTE. */
+
+ if (!sets[i].src_volatile)
+ elt = lookup (src, sets[i].src_hash, mode);
+
+ sets[i].src_elt = elt;
+
+ if (elt && src_eqv_here && src_eqv_elt)
+ {
+ if (elt->first_same_value != src_eqv_elt->first_same_value)
+ {
+ /* The REG_EQUAL is indicating that two formerly distinct
+ classes are now equivalent. So merge them. */
+ merge_equiv_classes (elt, src_eqv_elt);
+ src_eqv_hash = HASH (src_eqv, elt->mode);
+ src_eqv_elt = lookup (src_eqv, src_eqv_hash, elt->mode);
+ }
+
+ src_eqv_here = 0;
+ }
+
+ else if (src_eqv_elt)
+ elt = src_eqv_elt;
+
+ /* Try to find a constant somewhere and record it in `src_const'.
+ Record its table element, if any, in `src_const_elt'. Look in
+ any known equivalences first. (If the constant is not in the
+ table, also set `sets[i].src_const_hash'). */
+ if (elt)
+ for (p = elt->first_same_value; p; p = p->next_same_value)
+ if (p->is_const)
+ {
+ src_const = p->exp;
+ src_const_elt = elt;
+ break;
+ }
+
+ if (src_const == 0
+ && (CONSTANT_P (src_folded)
+ /* Consider (minus (label_ref L1) (label_ref L2)) as
+ "constant" here so we will record it. This allows us
+ to fold switch statements when an ADDR_DIFF_VEC is used. */
+ || (GET_CODE (src_folded) == MINUS
+ && GET_CODE (XEXP (src_folded, 0)) == LABEL_REF
+ && GET_CODE (XEXP (src_folded, 1)) == LABEL_REF)))
+ src_const = src_folded, src_const_elt = elt;
+ else if (src_const == 0 && src_eqv_here && CONSTANT_P (src_eqv_here))
+ src_const = src_eqv_here, src_const_elt = src_eqv_elt;
+
+ /* If we don't know if the constant is in the table, get its
+ hash code and look it up. */
+ if (src_const && src_const_elt == 0)
+ {
+ sets[i].src_const_hash = HASH (src_const, mode);
+ src_const_elt = lookup (src_const, sets[i].src_const_hash, mode);
+ }
+
+ sets[i].src_const = src_const;
+ sets[i].src_const_elt = src_const_elt;
+
+ /* If the constant and our source are both in the table, mark them as
+ equivalent. Otherwise, if a constant is in the table but the source
+ isn't, set ELT to it. */
+ if (src_const_elt && elt
+ && src_const_elt->first_same_value != elt->first_same_value)
+ merge_equiv_classes (elt, src_const_elt);
+ else if (src_const_elt && elt == 0)
+ elt = src_const_elt;
+
+ /* See if there is a register linearly related to a constant
+ equivalent of SRC. */
+ if (src_const
+ && (GET_CODE (src_const) == CONST
+ || (src_const_elt && src_const_elt->related_value != 0)))
+ {
+ src_related = use_related_value (src_const, src_const_elt);
+ if (src_related)
+ {
+ struct table_elt *src_related_elt
+ = lookup (src_related, HASH (src_related, mode), mode);
+ if (src_related_elt && elt)
+ {
+ if (elt->first_same_value
+ != src_related_elt->first_same_value)
+ /* This can occur when we previously saw a CONST
+ involving a SYMBOL_REF and then see the SYMBOL_REF
+ twice. Merge the involved classes. */
+ merge_equiv_classes (elt, src_related_elt);
+
+ src_related = 0;
+ src_related_elt = 0;
+ }
+ else if (src_related_elt && elt == 0)
+ elt = src_related_elt;
+ }
+ }
+
+ /* See if we have a CONST_INT that is already in a register in a
+ wider mode. */
+
+ if (src_const && src_related == 0 && GET_CODE (src_const) == CONST_INT
+ && GET_MODE_CLASS (mode) == MODE_INT
+ && GET_MODE_BITSIZE (mode) < BITS_PER_WORD)
+ {
+ enum machine_mode wider_mode;
+
+ for (wider_mode = GET_MODE_WIDER_MODE (mode);
+ GET_MODE_BITSIZE (wider_mode) <= BITS_PER_WORD
+ && src_related == 0;
+ wider_mode = GET_MODE_WIDER_MODE (wider_mode))
+ {
+ struct table_elt *const_elt
+ = lookup (src_const, HASH (src_const, wider_mode), wider_mode);
+
+ if (const_elt == 0)
+ continue;
+
+ for (const_elt = const_elt->first_same_value;
+ const_elt; const_elt = const_elt->next_same_value)
+ if (GET_CODE (const_elt->exp) == REG)
+ {
+ src_related = gen_lowpart_if_possible (mode,
+ const_elt->exp);
+ break;
+ }
+ }
+ }
+
+ /* Another possibility is that we have an AND with a constant in
+ a mode narrower than a word. If so, it might have been generated
+ as part of an "if" which would narrow the AND. If we already
+ have done the AND in a wider mode, we can use a SUBREG of that
+ value. */
+
+ if (flag_expensive_optimizations && ! src_related
+ && GET_CODE (src) == AND && GET_CODE (XEXP (src, 1)) == CONST_INT
+ && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
+ {
+ enum machine_mode tmode;
+ rtx new_and = gen_rtx (AND, VOIDmode, NULL_RTX, XEXP (src, 1));
+
+ for (tmode = GET_MODE_WIDER_MODE (mode);
+ GET_MODE_SIZE (tmode) <= UNITS_PER_WORD;
+ tmode = GET_MODE_WIDER_MODE (tmode))
+ {
+ rtx inner = gen_lowpart_if_possible (tmode, XEXP (src, 0));
+ struct table_elt *larger_elt;
+
+ if (inner)
+ {
+ PUT_MODE (new_and, tmode);
+ XEXP (new_and, 0) = inner;
+ larger_elt = lookup (new_and, HASH (new_and, tmode), tmode);
+ if (larger_elt == 0)
+ continue;
+
+ for (larger_elt = larger_elt->first_same_value;
+ larger_elt; larger_elt = larger_elt->next_same_value)
+ if (GET_CODE (larger_elt->exp) == REG)
+ {
+ src_related
+ = gen_lowpart_if_possible (mode, larger_elt->exp);
+ break;
+ }
+
+ if (src_related)
+ break;
+ }
+ }
+ }
+
+#ifdef LOAD_EXTEND_OP
+ /* See if a MEM has already been loaded with a widening operation;
+ if it has, we can use a subreg of that. Many CISC machines
+ also have such operations, but this is only likely to be
+ beneficial these machines. */
+
+ if (flag_expensive_optimizations && src_related == 0
+ && (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
+ && GET_MODE_CLASS (mode) == MODE_INT
+ && GET_CODE (src) == MEM && ! do_not_record
+ && LOAD_EXTEND_OP (mode) != NIL)
+ {
+ enum machine_mode tmode;
+
+ /* Set what we are trying to extend and the operation it might
+ have been extended with. */
+ PUT_CODE (memory_extend_rtx, LOAD_EXTEND_OP (mode));
+ XEXP (memory_extend_rtx, 0) = src;
+
+ for (tmode = GET_MODE_WIDER_MODE (mode);
+ GET_MODE_SIZE (tmode) <= UNITS_PER_WORD;
+ tmode = GET_MODE_WIDER_MODE (tmode))
+ {
+ struct table_elt *larger_elt;
+
+ PUT_MODE (memory_extend_rtx, tmode);
+ larger_elt = lookup (memory_extend_rtx,
+ HASH (memory_extend_rtx, tmode), tmode);
+ if (larger_elt == 0)
+ continue;
+
+ for (larger_elt = larger_elt->first_same_value;
+ larger_elt; larger_elt = larger_elt->next_same_value)
+ if (GET_CODE (larger_elt->exp) == REG)
+ {
+ src_related = gen_lowpart_if_possible (mode,
+ larger_elt->exp);
+ break;
+ }
+
+ if (src_related)
+ break;
+ }
+ }
+#endif /* LOAD_EXTEND_OP */
+
+ if (src == src_folded)
+ src_folded = 0;
+
+ /* At this point, ELT, if non-zero, points to a class of expressions
+ equivalent to the source of this SET and SRC, SRC_EQV, SRC_FOLDED,
+ and SRC_RELATED, if non-zero, each contain additional equivalent
+ expressions. Prune these latter expressions by deleting expressions
+ already in the equivalence class.
+
+ Check for an equivalent identical to the destination. If found,
+ this is the preferred equivalent since it will likely lead to
+ elimination of the insn. Indicate this by placing it in
+ `src_related'. */
+
+ if (elt) elt = elt->first_same_value;
+ for (p = elt; p; p = p->next_same_value)
+ {
+ enum rtx_code code = GET_CODE (p->exp);
+
+ /* If the expression is not valid, ignore it. Then we do not
+ have to check for validity below. In most cases, we can use
+ `rtx_equal_p', since canonicalization has already been done. */
+ if (code != REG && ! exp_equiv_p (p->exp, p->exp, 1, 0))
+ continue;
+
+ if (src && GET_CODE (src) == code && rtx_equal_p (src, p->exp))
+ src = 0;
+ else if (src_folded && GET_CODE (src_folded) == code
+ && rtx_equal_p (src_folded, p->exp))
+ src_folded = 0;
+ else if (src_eqv_here && GET_CODE (src_eqv_here) == code
+ && rtx_equal_p (src_eqv_here, p->exp))
+ src_eqv_here = 0;
+ else if (src_related && GET_CODE (src_related) == code
+ && rtx_equal_p (src_related, p->exp))
+ src_related = 0;
+
+ /* This is the same as the destination of the insns, we want
+ to prefer it. Copy it to src_related. The code below will
+ then give it a negative cost. */
+ if (GET_CODE (dest) == code && rtx_equal_p (p->exp, dest))
+ src_related = dest;
+
+ }
+
+ /* Find the cheapest valid equivalent, trying all the available
+ possibilities. Prefer items not in the hash table to ones
+ that are when they are equal cost. Note that we can never
+ worsen an insn as the current contents will also succeed.
+ If we find an equivalent identical to the destination, use it as best,
+ since this insn will probably be eliminated in that case. */
+ if (src)
+ {
+ if (rtx_equal_p (src, dest))
+ src_cost = -1;
+ else
+ src_cost = COST (src);
+ }
+
+ if (src_eqv_here)
+ {
+ if (rtx_equal_p (src_eqv_here, dest))
+ src_eqv_cost = -1;
+ else
+ src_eqv_cost = COST (src_eqv_here);
+ }
+
+ if (src_folded)
+ {
+ if (rtx_equal_p (src_folded, dest))
+ src_folded_cost = -1;
+ else
+ src_folded_cost = COST (src_folded);
+ }
+
+ if (src_related)
+ {
+ if (rtx_equal_p (src_related, dest))
+ src_related_cost = -1;
+ else
+ src_related_cost = COST (src_related);
+ }
+
+ /* If this was an indirect jump insn, a known label will really be
+ cheaper even though it looks more expensive. */
+ if (dest == pc_rtx && src_const && GET_CODE (src_const) == LABEL_REF)
+ src_folded = src_const, src_folded_cost = -1;
+
+ /* Terminate loop when replacement made. This must terminate since
+ the current contents will be tested and will always be valid. */
+ while (1)
+ {
+ rtx trial;
+
+ /* Skip invalid entries. */
+ while (elt && GET_CODE (elt->exp) != REG
+ && ! exp_equiv_p (elt->exp, elt->exp, 1, 0))
+ elt = elt->next_same_value;
+
+ if (elt) src_elt_cost = elt->cost;
+
+ /* Find cheapest and skip it for the next time. For items
+ of equal cost, use this order:
+ src_folded, src, src_eqv, src_related and hash table entry. */
+ if (src_folded_cost <= src_cost
+ && src_folded_cost <= src_eqv_cost
+ && src_folded_cost <= src_related_cost
+ && src_folded_cost <= src_elt_cost)
+ {
+ trial = src_folded, src_folded_cost = 10000;
+ if (src_folded_force_flag)
+ trial = force_const_mem (mode, trial);
+ }
+ else if (src_cost <= src_eqv_cost
+ && src_cost <= src_related_cost
+ && src_cost <= src_elt_cost)
+ trial = src, src_cost = 10000;
+ else if (src_eqv_cost <= src_related_cost
+ && src_eqv_cost <= src_elt_cost)
+ trial = copy_rtx (src_eqv_here), src_eqv_cost = 10000;
+ else if (src_related_cost <= src_elt_cost)
+ trial = copy_rtx (src_related), src_related_cost = 10000;
+ else
+ {
+ trial = copy_rtx (elt->exp);
+ elt = elt->next_same_value;
+ src_elt_cost = 10000;
+ }
+
+ /* We don't normally have an insn matching (set (pc) (pc)), so
+ check for this separately here. We will delete such an
+ insn below.
+
+ Tablejump insns contain a USE of the table, so simply replacing
+ the operand with the constant won't match. This is simply an
+ unconditional branch, however, and is therefore valid. Just
+ insert the substitution here and we will delete and re-emit
+ the insn later. */
+
+ if (n_sets == 1 && dest == pc_rtx
+ && (trial == pc_rtx
+ || (GET_CODE (trial) == LABEL_REF
+ && ! condjump_p (insn))))
+ {
+ /* If TRIAL is a label in front of a jump table, we are
+ really falling through the switch (this is how casesi
+ insns work), so we must branch around the table. */
+ if (GET_CODE (trial) == CODE_LABEL
+ && NEXT_INSN (trial) != 0
+ && GET_CODE (NEXT_INSN (trial)) == JUMP_INSN
+ && (GET_CODE (PATTERN (NEXT_INSN (trial))) == ADDR_DIFF_VEC
+ || GET_CODE (PATTERN (NEXT_INSN (trial))) == ADDR_VEC))
+
+ trial = gen_rtx (LABEL_REF, Pmode, get_label_after (trial));
+
+ SET_SRC (sets[i].rtl) = trial;
+ cse_jumps_altered = 1;
+ break;
+ }
+
+ /* Look for a substitution that makes a valid insn. */
+ else if (validate_change (insn, &SET_SRC (sets[i].rtl), trial, 0))
+ {
+ /* The result of apply_change_group can be ignored; see
+ canon_reg. */
+
+ validate_change (insn, &SET_SRC (sets[i].rtl),
+ canon_reg (SET_SRC (sets[i].rtl), insn),
+ 1);
+ apply_change_group ();
+ break;
+ }
+
+ /* If we previously found constant pool entries for
+ constants and this is a constant, try making a
+ pool entry. Put it in src_folded unless we already have done
+ this since that is where it likely came from. */
+
+ else if (constant_pool_entries_cost
+ && CONSTANT_P (trial)
+ && (src_folded == 0 || GET_CODE (src_folded) != MEM)
+ && GET_MODE_CLASS (mode) != MODE_CC)
+ {
+ src_folded_force_flag = 1;
+ src_folded = trial;
+ src_folded_cost = constant_pool_entries_cost;
+ }
+ }
+
+ src = SET_SRC (sets[i].rtl);
+
+ /* In general, it is good to have a SET with SET_SRC == SET_DEST.
+ However, there is an important exception: If both are registers
+ that are not the head of their equivalence class, replace SET_SRC
+ with the head of the class. If we do not do this, we will have
+ both registers live over a portion of the basic block. This way,
+ their lifetimes will likely abut instead of overlapping. */
+ if (GET_CODE (dest) == REG
+ && REGNO_QTY_VALID_P (REGNO (dest))
+ && qty_mode[reg_qty[REGNO (dest)]] == GET_MODE (dest)
+ && qty_first_reg[reg_qty[REGNO (dest)]] != REGNO (dest)
+ && GET_CODE (src) == REG && REGNO (src) == REGNO (dest)
+ /* Don't do this if the original insn had a hard reg as
+ SET_SRC. */
+ && (GET_CODE (sets[i].src) != REG
+ || REGNO (sets[i].src) >= FIRST_PSEUDO_REGISTER))
+ /* We can't call canon_reg here because it won't do anything if
+ SRC is a hard register. */
+ {
+ int first = qty_first_reg[reg_qty[REGNO (src)]];
+
+ src = SET_SRC (sets[i].rtl)
+ = first >= FIRST_PSEUDO_REGISTER ? regno_reg_rtx[first]
+ : gen_rtx (REG, GET_MODE (src), first);
+
+ /* If we had a constant that is cheaper than what we are now
+ setting SRC to, use that constant. We ignored it when we
+ thought we could make this into a no-op. */
+ if (src_const && COST (src_const) < COST (src)
+ && validate_change (insn, &SET_SRC (sets[i].rtl), src_const, 0))
+ src = src_const;
+ }
+
+ /* If we made a change, recompute SRC values. */
+ if (src != sets[i].src)
+ {
+ do_not_record = 0;
+ hash_arg_in_memory = 0;
+ hash_arg_in_struct = 0;
+ sets[i].src = src;
+ sets[i].src_hash = HASH (src, mode);
+ sets[i].src_volatile = do_not_record;
+ sets[i].src_in_memory = hash_arg_in_memory;
+ sets[i].src_in_struct = hash_arg_in_struct;
+ sets[i].src_elt = lookup (src, sets[i].src_hash, mode);
+ }
+
+ /* If this is a single SET, we are setting a register, and we have an
+ equivalent constant, we want to add a REG_NOTE. We don't want
+ to write a REG_EQUAL note for a constant pseudo since verifying that
+ that pseudo hasn't been eliminated is a pain. Such a note also
+ won't help anything. */
+ if (n_sets == 1 && src_const && GET_CODE (dest) == REG
+ && GET_CODE (src_const) != REG)
+ {
+ tem = find_reg_note (insn, REG_EQUAL, NULL_RTX);
+
+ /* Record the actual constant value in a REG_EQUAL note, making
+ a new one if one does not already exist. */
+ if (tem)
+ XEXP (tem, 0) = src_const;
+ else
+ REG_NOTES (insn) = gen_rtx (EXPR_LIST, REG_EQUAL,
+ src_const, REG_NOTES (insn));
+
+ /* If storing a constant value in a register that
+ previously held the constant value 0,
+ record this fact with a REG_WAS_0 note on this insn.
+
+ Note that the *register* is required to have previously held 0,
+ not just any register in the quantity and we must point to the
+ insn that set that register to zero.
+
+ Rather than track each register individually, we just see if
+ the last set for this quantity was for this register. */
+
+ if (REGNO_QTY_VALID_P (REGNO (dest))
+ && qty_const[reg_qty[REGNO (dest)]] == const0_rtx)
+ {
+ /* See if we previously had a REG_WAS_0 note. */
+ rtx note = find_reg_note (insn, REG_WAS_0, NULL_RTX);
+ rtx const_insn = qty_const_insn[reg_qty[REGNO (dest)]];
+
+ if ((tem = single_set (const_insn)) != 0
+ && rtx_equal_p (SET_DEST (tem), dest))
+ {
+ if (note)
+ XEXP (note, 0) = const_insn;
+ else
+ REG_NOTES (insn) = gen_rtx (INSN_LIST, REG_WAS_0,
+ const_insn, REG_NOTES (insn));
+ }
+ }
+ }
+
+ /* Now deal with the destination. */
+ do_not_record = 0;
+ sets[i].inner_dest_loc = &SET_DEST (sets[0].rtl);
+
+ /* Look within any SIGN_EXTRACT or ZERO_EXTRACT
+ to the MEM or REG within it. */
+ while (GET_CODE (dest) == SIGN_EXTRACT
+ || GET_CODE (dest) == ZERO_EXTRACT
+ || GET_CODE (dest) == SUBREG
+ || GET_CODE (dest) == STRICT_LOW_PART)
+ {
+ sets[i].inner_dest_loc = &XEXP (dest, 0);
+ dest = XEXP (dest, 0);
+ }
+
+ sets[i].inner_dest = dest;
+
+ if (GET_CODE (dest) == MEM)
+ {
+ dest = fold_rtx (dest, insn);
+
+ /* Decide whether we invalidate everything in memory,
+ or just things at non-fixed places.
+ Writing a large aggregate must invalidate everything
+ because we don't know how long it is. */
+ note_mem_written (dest, &writes_memory);
+ }
+
+ /* Compute the hash code of the destination now,
+ before the effects of this instruction are recorded,
+ since the register values used in the address computation
+ are those before this instruction. */
+ sets[i].dest_hash = HASH (dest, mode);
+
+ /* Don't enter a bit-field in the hash table
+ because the value in it after the store
+ may not equal what was stored, due to truncation. */
+
+ if (GET_CODE (SET_DEST (sets[i].rtl)) == ZERO_EXTRACT
+ || GET_CODE (SET_DEST (sets[i].rtl)) == SIGN_EXTRACT)
+ {
+ rtx width = XEXP (SET_DEST (sets[i].rtl), 1);
+
+ if (src_const != 0 && GET_CODE (src_const) == CONST_INT
+ && GET_CODE (width) == CONST_INT
+ && INTVAL (width) < HOST_BITS_PER_WIDE_INT
+ && ! (INTVAL (src_const)
+ & ((HOST_WIDE_INT) (-1) << INTVAL (width))))
+ /* Exception: if the value is constant,
+ and it won't be truncated, record it. */
+ ;
+ else
+ {
+ /* This is chosen so that the destination will be invalidated
+ but no new value will be recorded.
+ We must invalidate because sometimes constant
+ values can be recorded for bitfields. */
+ sets[i].src_elt = 0;
+ sets[i].src_volatile = 1;
+ src_eqv = 0;
+ src_eqv_elt = 0;
+ }
+ }
+
+ /* If only one set in a JUMP_INSN and it is now a no-op, we can delete
+ the insn. */
+ else if (n_sets == 1 && dest == pc_rtx && src == pc_rtx)
+ {
+ PUT_CODE (insn, NOTE);
+ NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (insn) = 0;
+ cse_jumps_altered = 1;
+ /* One less use of the label this insn used to jump to. */
+ --LABEL_NUSES (JUMP_LABEL (insn));
+ /* No more processing for this set. */
+ sets[i].rtl = 0;
+ }
+
+ /* If this SET is now setting PC to a label, we know it used to
+ be a conditional or computed branch. So we see if we can follow
+ it. If it was a computed branch, delete it and re-emit. */
+ else if (dest == pc_rtx && GET_CODE (src) == LABEL_REF)
+ {
+ rtx p;
+
+ /* If this is not in the format for a simple branch and
+ we are the only SET in it, re-emit it. */
+ if (! simplejump_p (insn) && n_sets == 1)
+ {
+ rtx new = emit_jump_insn_before (gen_jump (XEXP (src, 0)), insn);
+ JUMP_LABEL (new) = XEXP (src, 0);
+ LABEL_NUSES (XEXP (src, 0))++;
+ delete_insn (insn);
+ insn = new;
+ }
+ else
+ /* Otherwise, force rerecognition, since it probably had
+ a different pattern before.
+ This shouldn't really be necessary, since whatever
+ changed the source value above should have done this.
+ Until the right place is found, might as well do this here. */
+ INSN_CODE (insn) = -1;
+
+ /* Now that we've converted this jump to an unconditional jump,
+ there is dead code after it. Delete the dead code until we
+ reach a BARRIER, the end of the function, or a label. Do
+ not delete NOTEs except for NOTE_INSN_DELETED since later
+ phases assume these notes are retained. */
+
+ p = insn;
+
+ while (NEXT_INSN (p) != 0
+ && GET_CODE (NEXT_INSN (p)) != BARRIER
+ && GET_CODE (NEXT_INSN (p)) != CODE_LABEL)
+ {
+ if (GET_CODE (NEXT_INSN (p)) != NOTE
+ || NOTE_LINE_NUMBER (NEXT_INSN (p)) == NOTE_INSN_DELETED)
+ delete_insn (NEXT_INSN (p));
+ else
+ p = NEXT_INSN (p);
+ }
+
+ /* If we don't have a BARRIER immediately after INSN, put one there.
+ Much code assumes that there are no NOTEs between a JUMP_INSN and
+ BARRIER. */
+
+ if (NEXT_INSN (insn) == 0
+ || GET_CODE (NEXT_INSN (insn)) != BARRIER)
+ emit_barrier_after (insn);
+
+ /* We might have two BARRIERs separated by notes. Delete the second
+ one if so. */
+
+ if (p != insn && NEXT_INSN (p) != 0
+ && GET_CODE (NEXT_INSN (p)) == BARRIER)
+ delete_insn (NEXT_INSN (p));
+
+ cse_jumps_altered = 1;
+ sets[i].rtl = 0;
+ }
+
+ /* If destination is volatile, invalidate it and then do no further
+ processing for this assignment. */
+
+ else if (do_not_record)
+ {
+ if (GET_CODE (dest) == REG || GET_CODE (dest) == SUBREG
+ || GET_CODE (dest) == MEM)
+ invalidate (dest);
+ else if (GET_CODE (dest) == STRICT_LOW_PART
+ || GET_CODE (dest) == ZERO_EXTRACT)
+ invalidate (XEXP (dest, 0));
+ sets[i].rtl = 0;
+ }
+
+ if (sets[i].rtl != 0 && dest != SET_DEST (sets[i].rtl))
+ sets[i].dest_hash = HASH (SET_DEST (sets[i].rtl), mode);
+
+#ifdef HAVE_cc0
+ /* If setting CC0, record what it was set to, or a constant, if it
+ is equivalent to a constant. If it is being set to a floating-point
+ value, make a COMPARE with the appropriate constant of 0. If we
+ don't do this, later code can interpret this as a test against
+ const0_rtx, which can cause problems if we try to put it into an
+ insn as a floating-point operand. */
+ if (dest == cc0_rtx)
+ {
+ this_insn_cc0 = src_const && mode != VOIDmode ? src_const : src;
+ this_insn_cc0_mode = mode;
+ if (FLOAT_MODE_P (mode))
+ this_insn_cc0 = gen_rtx (COMPARE, VOIDmode, this_insn_cc0,
+ CONST0_RTX (mode));
+ }
+#endif
+ }
+
+ /* Now enter all non-volatile source expressions in the hash table
+ if they are not already present.
+ Record their equivalence classes in src_elt.
+ This way we can insert the corresponding destinations into
+ the same classes even if the actual sources are no longer in them
+ (having been invalidated). */
+
+ if (src_eqv && src_eqv_elt == 0 && sets[0].rtl != 0 && ! src_eqv_volatile
+ && ! rtx_equal_p (src_eqv, SET_DEST (sets[0].rtl)))
+ {
+ register struct table_elt *elt;
+ register struct table_elt *classp = sets[0].src_elt;
+ rtx dest = SET_DEST (sets[0].rtl);
+ enum machine_mode eqvmode = GET_MODE (dest);
+
+ if (GET_CODE (dest) == STRICT_LOW_PART)
+ {
+ eqvmode = GET_MODE (SUBREG_REG (XEXP (dest, 0)));
+ classp = 0;
+ }
+ if (insert_regs (src_eqv, classp, 0))
+ src_eqv_hash = HASH (src_eqv, eqvmode);
+ elt = insert (src_eqv, classp, src_eqv_hash, eqvmode);
+ elt->in_memory = src_eqv_in_memory;
+ elt->in_struct = src_eqv_in_struct;
+ src_eqv_elt = elt;
+
+ /* Check to see if src_eqv_elt is the same as a set source which
+ does not yet have an elt, and if so set the elt of the set source
+ to src_eqv_elt. */
+ for (i = 0; i < n_sets; i++)
+ if (sets[i].rtl && sets[i].src_elt == 0
+ && rtx_equal_p (SET_SRC (sets[i].rtl), src_eqv))
+ sets[i].src_elt = src_eqv_elt;
+ }
+
+ for (i = 0; i < n_sets; i++)
+ if (sets[i].rtl && ! sets[i].src_volatile
+ && ! rtx_equal_p (SET_SRC (sets[i].rtl), SET_DEST (sets[i].rtl)))
+ {
+ if (GET_CODE (SET_DEST (sets[i].rtl)) == STRICT_LOW_PART)
+ {
+ /* REG_EQUAL in setting a STRICT_LOW_PART
+ gives an equivalent for the entire destination register,
+ not just for the subreg being stored in now.
+ This is a more interesting equivalence, so we arrange later
+ to treat the entire reg as the destination. */
+ sets[i].src_elt = src_eqv_elt;
+ sets[i].src_hash = src_eqv_hash;
+ }
+ else
+ {
+ /* Insert source and constant equivalent into hash table, if not
+ already present. */
+ register struct table_elt *classp = src_eqv_elt;
+ register rtx src = sets[i].src;
+ register rtx dest = SET_DEST (sets[i].rtl);
+ enum machine_mode mode
+ = GET_MODE (src) == VOIDmode ? GET_MODE (dest) : GET_MODE (src);
+
+ if (sets[i].src_elt == 0)
+ {
+ register struct table_elt *elt;
+
+ /* Note that these insert_regs calls cannot remove
+ any of the src_elt's, because they would have failed to
+ match if not still valid. */
+ if (insert_regs (src, classp, 0))
+ sets[i].src_hash = HASH (src, mode);
+ elt = insert (src, classp, sets[i].src_hash, mode);
+ elt->in_memory = sets[i].src_in_memory;
+ elt->in_struct = sets[i].src_in_struct;
+ sets[i].src_elt = classp = elt;
+ }
+
+ if (sets[i].src_const && sets[i].src_const_elt == 0
+ && src != sets[i].src_const
+ && ! rtx_equal_p (sets[i].src_const, src))
+ sets[i].src_elt = insert (sets[i].src_const, classp,
+ sets[i].src_const_hash, mode);
+ }
+ }
+ else if (sets[i].src_elt == 0)
+ /* If we did not insert the source into the hash table (e.g., it was
+ volatile), note the equivalence class for the REG_EQUAL value, if any,
+ so that the destination goes into that class. */
+ sets[i].src_elt = src_eqv_elt;
+
+ invalidate_from_clobbers (&writes_memory, x);
+
+ /* Some registers are invalidated by subroutine calls. Memory is
+ invalidated by non-constant calls. */
+
+ if (GET_CODE (insn) == CALL_INSN)
+ {
+ static struct write_data everything = {0, 1, 1, 1};
+
+ if (! CONST_CALL_P (insn))
+ invalidate_memory (&everything);
+ invalidate_for_call ();
+ }
+
+ /* Now invalidate everything set by this instruction.
+ If a SUBREG or other funny destination is being set,
+ sets[i].rtl is still nonzero, so here we invalidate the reg
+ a part of which is being set. */
+
+ for (i = 0; i < n_sets; i++)
+ if (sets[i].rtl)
+ {
+ register rtx dest = sets[i].inner_dest;
+
+ /* Needed for registers to remove the register from its
+ previous quantity's chain.
+ Needed for memory if this is a nonvarying address, unless
+ we have just done an invalidate_memory that covers even those. */
+ if (GET_CODE (dest) == REG || GET_CODE (dest) == SUBREG
+ || (! writes_memory.all && ! cse_rtx_addr_varies_p (dest)))
+ invalidate (dest);
+ else if (GET_CODE (dest) == STRICT_LOW_PART
+ || GET_CODE (dest) == ZERO_EXTRACT)
+ invalidate (XEXP (dest, 0));
+ }
+
+ /* Make sure registers mentioned in destinations
+ are safe for use in an expression to be inserted.
+ This removes from the hash table
+ any invalid entry that refers to one of these registers.
+
+ We don't care about the return value from mention_regs because
+ we are going to hash the SET_DEST values unconditionally. */
+
+ for (i = 0; i < n_sets; i++)
+ if (sets[i].rtl && GET_CODE (SET_DEST (sets[i].rtl)) != REG)
+ mention_regs (SET_DEST (sets[i].rtl));
+
+ /* We may have just removed some of the src_elt's from the hash table.
+ So replace each one with the current head of the same class. */
+
+ for (i = 0; i < n_sets; i++)
+ if (sets[i].rtl)
+ {
+ if (sets[i].src_elt && sets[i].src_elt->first_same_value == 0)
+ /* If elt was removed, find current head of same class,
+ or 0 if nothing remains of that class. */
+ {
+ register struct table_elt *elt = sets[i].src_elt;
+
+ while (elt && elt->prev_same_value)
+ elt = elt->prev_same_value;
+
+ while (elt && elt->first_same_value == 0)
+ elt = elt->next_same_value;
+ sets[i].src_elt = elt ? elt->first_same_value : 0;
+ }
+ }
+
+ /* Now insert the destinations into their equivalence classes. */
+
+ for (i = 0; i < n_sets; i++)
+ if (sets[i].rtl)
+ {
+ register rtx dest = SET_DEST (sets[i].rtl);
+ register struct table_elt *elt;
+
+ /* Don't record value if we are not supposed to risk allocating
+ floating-point values in registers that might be wider than
+ memory. */
+ if ((flag_float_store
+ && GET_CODE (dest) == MEM
+ && FLOAT_MODE_P (GET_MODE (dest)))
+ /* Don't record values of destinations set inside a libcall block
+ since we might delete the libcall. Things should have been set
+ up so we won't want to reuse such a value, but we play it safe
+ here. */
+ || in_libcall_block
+ /* If we didn't put a REG_EQUAL value or a source into the hash
+ table, there is no point is recording DEST. */
+ || sets[i].src_elt == 0)
+ continue;
+
+ /* STRICT_LOW_PART isn't part of the value BEING set,
+ and neither is the SUBREG inside it.
+ Note that in this case SETS[I].SRC_ELT is really SRC_EQV_ELT. */
+ if (GET_CODE (dest) == STRICT_LOW_PART)
+ dest = SUBREG_REG (XEXP (dest, 0));
+
+ if (GET_CODE (dest) == REG || GET_CODE (dest) == SUBREG)
+ /* Registers must also be inserted into chains for quantities. */
+ if (insert_regs (dest, sets[i].src_elt, 1))
+ /* If `insert_regs' changes something, the hash code must be
+ recalculated. */
+ sets[i].dest_hash = HASH (dest, GET_MODE (dest));
+
+ elt = insert (dest, sets[i].src_elt,
+ sets[i].dest_hash, GET_MODE (dest));
+ elt->in_memory = GET_CODE (sets[i].inner_dest) == MEM;
+ if (elt->in_memory)
+ {
+ /* This implicitly assumes a whole struct
+ need not have MEM_IN_STRUCT_P.
+ But a whole struct is *supposed* to have MEM_IN_STRUCT_P. */
+ elt->in_struct = (MEM_IN_STRUCT_P (sets[i].inner_dest)
+ || sets[i].inner_dest != SET_DEST (sets[i].rtl));
+ }
+
+ /* If we have (set (subreg:m1 (reg:m2 foo) 0) (bar:m1)), M1 is no
+ narrower than M2, and both M1 and M2 are the same number of words,
+ we are also doing (set (reg:m2 foo) (subreg:m2 (bar:m1) 0)) so
+ make that equivalence as well.
+
+ However, BAR may have equivalences for which gen_lowpart_if_possible
+ will produce a simpler value than gen_lowpart_if_possible applied to
+ BAR (e.g., if BAR was ZERO_EXTENDed from M2), so we will scan all
+ BAR's equivalences. If we don't get a simplified form, make
+ the SUBREG. It will not be used in an equivalence, but will
+ cause two similar assignments to be detected.
+
+ Note the loop below will find SUBREG_REG (DEST) since we have
+ already entered SRC and DEST of the SET in the table. */
+
+ if (GET_CODE (dest) == SUBREG
+ && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest))) - 1)
+ / UNITS_PER_WORD)
+ == (GET_MODE_SIZE (GET_MODE (dest)) - 1)/ UNITS_PER_WORD)
+ && (GET_MODE_SIZE (GET_MODE (dest))
+ >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest))))
+ && sets[i].src_elt != 0)
+ {
+ enum machine_mode new_mode = GET_MODE (SUBREG_REG (dest));
+ struct table_elt *elt, *classp = 0;
+
+ for (elt = sets[i].src_elt->first_same_value; elt;
+ elt = elt->next_same_value)
+ {
+ rtx new_src = 0;
+ unsigned src_hash;
+ struct table_elt *src_elt;
+
+ /* Ignore invalid entries. */
+ if (GET_CODE (elt->exp) != REG
+ && ! exp_equiv_p (elt->exp, elt->exp, 1, 0))
+ continue;
+
+ new_src = gen_lowpart_if_possible (new_mode, elt->exp);
+ if (new_src == 0)
+ new_src = gen_rtx (SUBREG, new_mode, elt->exp, 0);
+
+ src_hash = HASH (new_src, new_mode);
+ src_elt = lookup (new_src, src_hash, new_mode);
+
+ /* Put the new source in the hash table is if isn't
+ already. */
+ if (src_elt == 0)
+ {
+ if (insert_regs (new_src, classp, 0))
+ src_hash = HASH (new_src, new_mode);
+ src_elt = insert (new_src, classp, src_hash, new_mode);
+ src_elt->in_memory = elt->in_memory;
+ src_elt->in_struct = elt->in_struct;
+ }
+ else if (classp && classp != src_elt->first_same_value)
+ /* Show that two things that we've seen before are
+ actually the same. */
+ merge_equiv_classes (src_elt, classp);
+
+ classp = src_elt->first_same_value;
+ }
+ }
+ }
+
+ /* Special handling for (set REG0 REG1)
+ where REG0 is the "cheapest", cheaper than REG1.
+ After cse, REG1 will probably not be used in the sequel,
+ so (if easily done) change this insn to (set REG1 REG0) and
+ replace REG1 with REG0 in the previous insn that computed their value.
+ Then REG1 will become a dead store and won't cloud the situation
+ for later optimizations.
+
+ Do not make this change if REG1 is a hard register, because it will
+ then be used in the sequel and we may be changing a two-operand insn
+ into a three-operand insn.
+
+ Also do not do this if we are operating on a copy of INSN. */
+
+ if (n_sets == 1 && sets[0].rtl && GET_CODE (SET_DEST (sets[0].rtl)) == REG
+ && NEXT_INSN (PREV_INSN (insn)) == insn
+ && GET_CODE (SET_SRC (sets[0].rtl)) == REG
+ && REGNO (SET_SRC (sets[0].rtl)) >= FIRST_PSEUDO_REGISTER
+ && REGNO_QTY_VALID_P (REGNO (SET_SRC (sets[0].rtl)))
+ && (qty_first_reg[reg_qty[REGNO (SET_SRC (sets[0].rtl))]]
+ == REGNO (SET_DEST (sets[0].rtl))))
+ {
+ rtx prev = PREV_INSN (insn);
+ while (prev && GET_CODE (prev) == NOTE)
+ prev = PREV_INSN (prev);
+
+ if (prev && GET_CODE (prev) == INSN && GET_CODE (PATTERN (prev)) == SET
+ && SET_DEST (PATTERN (prev)) == SET_SRC (sets[0].rtl))
+ {
+ rtx dest = SET_DEST (sets[0].rtl);
+ rtx note = find_reg_note (prev, REG_EQUIV, NULL_RTX);
+
+ validate_change (prev, & SET_DEST (PATTERN (prev)), dest, 1);
+ validate_change (insn, & SET_DEST (sets[0].rtl),
+ SET_SRC (sets[0].rtl), 1);
+ validate_change (insn, & SET_SRC (sets[0].rtl), dest, 1);
+ apply_change_group ();
+
+ /* If REG1 was equivalent to a constant, REG0 is not. */
+ if (note)
+ PUT_REG_NOTE_KIND (note, REG_EQUAL);
+
+ /* If there was a REG_WAS_0 note on PREV, remove it. Move
+ any REG_WAS_0 note on INSN to PREV. */
+ note = find_reg_note (prev, REG_WAS_0, NULL_RTX);
+ if (note)
+ remove_note (prev, note);
+
+ note = find_reg_note (insn, REG_WAS_0, NULL_RTX);
+ if (note)
+ {
+ remove_note (insn, note);
+ XEXP (note, 1) = REG_NOTES (prev);
+ REG_NOTES (prev) = note;
+ }
+ }
+ }
+
+ /* If this is a conditional jump insn, record any known equivalences due to
+ the condition being tested. */
+
+ last_jump_equiv_class = 0;
+ if (GET_CODE (insn) == JUMP_INSN
+ && n_sets == 1 && GET_CODE (x) == SET
+ && GET_CODE (SET_SRC (x)) == IF_THEN_ELSE)
+ record_jump_equiv (insn, 0);
+
+#ifdef HAVE_cc0
+ /* If the previous insn set CC0 and this insn no longer references CC0,
+ delete the previous insn. Here we use the fact that nothing expects CC0
+ to be valid over an insn, which is true until the final pass. */
+ if (prev_insn && GET_CODE (prev_insn) == INSN
+ && (tem = single_set (prev_insn)) != 0
+ && SET_DEST (tem) == cc0_rtx
+ && ! reg_mentioned_p (cc0_rtx, x))
+ {
+ PUT_CODE (prev_insn, NOTE);
+ NOTE_LINE_NUMBER (prev_insn) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (prev_insn) = 0;
+ }
+
+ prev_insn_cc0 = this_insn_cc0;
+ prev_insn_cc0_mode = this_insn_cc0_mode;
+#endif
+
+ prev_insn = insn;
+}
+
+/* Store 1 in *WRITES_PTR for those categories of memory ref
+ that must be invalidated when the expression WRITTEN is stored in.
+ If WRITTEN is null, say everything must be invalidated. */
+
+static void
+note_mem_written (written, writes_ptr)
+ rtx written;
+ struct write_data *writes_ptr;
+{
+ static struct write_data everything = {0, 1, 1, 1};
+
+ if (written == 0)
+ *writes_ptr = everything;
+ else if (GET_CODE (written) == MEM)
+ {
+ /* Pushing or popping the stack invalidates just the stack pointer. */
+ rtx addr = XEXP (written, 0);
+ if ((GET_CODE (addr) == PRE_DEC || GET_CODE (addr) == PRE_INC
+ || GET_CODE (addr) == POST_DEC || GET_CODE (addr) == POST_INC)
+ && GET_CODE (XEXP (addr, 0)) == REG
+ && REGNO (XEXP (addr, 0)) == STACK_POINTER_REGNUM)
+ {
+ writes_ptr->sp = 1;
+ return;
+ }
+ else if (GET_MODE (written) == BLKmode)
+ *writes_ptr = everything;
+ /* (mem (scratch)) means clobber everything. */
+ else if (GET_CODE (addr) == SCRATCH)
+ *writes_ptr = everything;
+ else if (cse_rtx_addr_varies_p (written))
+ {
+ /* A varying address that is a sum indicates an array element,
+ and that's just as good as a structure element
+ in implying that we need not invalidate scalar variables.
+ However, we must allow QImode aliasing of scalars, because the
+ ANSI C standard allows character pointers to alias anything. */
+ if (! ((MEM_IN_STRUCT_P (written)
+ || GET_CODE (XEXP (written, 0)) == PLUS)
+ && GET_MODE (written) != QImode))
+ writes_ptr->all = 1;
+ writes_ptr->nonscalar = 1;
+ }
+ writes_ptr->var = 1;
+ }
+}
+
+/* Perform invalidation on the basis of everything about an insn
+ except for invalidating the actual places that are SET in it.
+ This includes the places CLOBBERed, and anything that might
+ alias with something that is SET or CLOBBERed.
+
+ W points to the writes_memory for this insn, a struct write_data
+ saying which kinds of memory references must be invalidated.
+ X is the pattern of the insn. */
+
+static void
+invalidate_from_clobbers (w, x)
+ struct write_data *w;
+ rtx x;
+{
+ /* If W->var is not set, W specifies no action.
+ If W->all is set, this step gets all memory refs
+ so they can be ignored in the rest of this function. */
+ if (w->var)
+ invalidate_memory (w);
+
+ if (w->sp)
+ {
+ if (reg_tick[STACK_POINTER_REGNUM] >= 0)
+ reg_tick[STACK_POINTER_REGNUM]++;
+
+ /* This should be *very* rare. */
+ if (TEST_HARD_REG_BIT (hard_regs_in_table, STACK_POINTER_REGNUM))
+ invalidate (stack_pointer_rtx);
+ }
+
+ if (GET_CODE (x) == CLOBBER)
+ {
+ rtx ref = XEXP (x, 0);
+ if (ref)
+ {
+ if (GET_CODE (ref) == REG || GET_CODE (ref) == SUBREG
+ || (GET_CODE (ref) == MEM && ! w->all))
+ invalidate (ref);
+ else if (GET_CODE (ref) == STRICT_LOW_PART
+ || GET_CODE (ref) == ZERO_EXTRACT)
+ invalidate (XEXP (ref, 0));
+ }
+ }
+ else if (GET_CODE (x) == PARALLEL)
+ {
+ register int i;
+ for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
+ {
+ register rtx y = XVECEXP (x, 0, i);
+ if (GET_CODE (y) == CLOBBER)
+ {
+ rtx ref = XEXP (y, 0);
+ if (ref)
+ {
+ if (GET_CODE (ref) == REG || GET_CODE (ref) == SUBREG
+ || (GET_CODE (ref) == MEM && !w->all))
+ invalidate (ref);
+ else if (GET_CODE (ref) == STRICT_LOW_PART
+ || GET_CODE (ref) == ZERO_EXTRACT)
+ invalidate (XEXP (ref, 0));
+ }
+ }
+ }
+ }
+}
+
+/* Process X, part of the REG_NOTES of an insn. Look at any REG_EQUAL notes
+ and replace any registers in them with either an equivalent constant
+ or the canonical form of the register. If we are inside an address,
+ only do this if the address remains valid.
+
+ OBJECT is 0 except when within a MEM in which case it is the MEM.
+
+ Return the replacement for X. */
+
+static rtx
+cse_process_notes (x, object)
+ rtx x;
+ rtx object;
+{
+ enum rtx_code code = GET_CODE (x);
+ char *fmt = GET_RTX_FORMAT (code);
+ int i;
+
+ switch (code)
+ {
+ case CONST_INT:
+ case CONST:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ case CONST_DOUBLE:
+ case PC:
+ case CC0:
+ case LO_SUM:
+ return x;
+
+ case MEM:
+ XEXP (x, 0) = cse_process_notes (XEXP (x, 0), x);
+ return x;
+
+ case EXPR_LIST:
+ case INSN_LIST:
+ if (REG_NOTE_KIND (x) == REG_EQUAL)
+ XEXP (x, 0) = cse_process_notes (XEXP (x, 0), NULL_RTX);
+ if (XEXP (x, 1))
+ XEXP (x, 1) = cse_process_notes (XEXP (x, 1), NULL_RTX);
+ return x;
+
+ case SIGN_EXTEND:
+ case ZERO_EXTEND:
+ {
+ rtx new = cse_process_notes (XEXP (x, 0), object);
+ /* We don't substitute VOIDmode constants into these rtx,
+ since they would impede folding. */
+ if (GET_MODE (new) != VOIDmode)
+ validate_change (object, &XEXP (x, 0), new, 0);
+ return x;
+ }
+
+ case REG:
+ i = reg_qty[REGNO (x)];
+
+ /* Return a constant or a constant register. */
+ if (REGNO_QTY_VALID_P (REGNO (x))
+ && qty_const[i] != 0
+ && (CONSTANT_P (qty_const[i])
+ || GET_CODE (qty_const[i]) == REG))
+ {
+ rtx new = gen_lowpart_if_possible (GET_MODE (x), qty_const[i]);
+ if (new)
+ return new;
+ }
+
+ /* Otherwise, canonicalize this register. */
+ return canon_reg (x, NULL_RTX);
+ }
+
+ for (i = 0; i < GET_RTX_LENGTH (code); i++)
+ if (fmt[i] == 'e')
+ validate_change (object, &XEXP (x, i),
+ cse_process_notes (XEXP (x, i), object), 0);
+
+ return x;
+}
+
+/* Find common subexpressions between the end test of a loop and the beginning
+ of the loop. LOOP_START is the CODE_LABEL at the start of a loop.
+
+ Often we have a loop where an expression in the exit test is used
+ in the body of the loop. For example "while (*p) *q++ = *p++;".
+ Because of the way we duplicate the loop exit test in front of the loop,
+ however, we don't detect that common subexpression. This will be caught
+ when global cse is implemented, but this is a quite common case.
+
+ This function handles the most common cases of these common expressions.
+ It is called after we have processed the basic block ending with the
+ NOTE_INSN_LOOP_END note that ends a loop and the previous JUMP_INSN
+ jumps to a label used only once. */
+
+static void
+cse_around_loop (loop_start)
+ rtx loop_start;
+{
+ rtx insn;
+ int i;
+ struct table_elt *p;
+
+ /* If the jump at the end of the loop doesn't go to the start, we don't
+ do anything. */
+ for (insn = PREV_INSN (loop_start);
+ insn && (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) >= 0);
+ insn = PREV_INSN (insn))
+ ;
+
+ if (insn == 0
+ || GET_CODE (insn) != NOTE
+ || NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG)
+ return;
+
+ /* If the last insn of the loop (the end test) was an NE comparison,
+ we will interpret it as an EQ comparison, since we fell through
+ the loop. Any equivalences resulting from that comparison are
+ therefore not valid and must be invalidated. */
+ if (last_jump_equiv_class)
+ for (p = last_jump_equiv_class->first_same_value; p;
+ p = p->next_same_value)
+ if (GET_CODE (p->exp) == MEM || GET_CODE (p->exp) == REG
+ || GET_CODE (p->exp) == SUBREG)
+ invalidate (p->exp);
+ else if (GET_CODE (p->exp) == STRICT_LOW_PART
+ || GET_CODE (p->exp) == ZERO_EXTRACT)
+ invalidate (XEXP (p->exp, 0));
+
+ /* Process insns starting after LOOP_START until we hit a CALL_INSN or
+ a CODE_LABEL (we could handle a CALL_INSN, but it isn't worth it).
+
+ The only thing we do with SET_DEST is invalidate entries, so we
+ can safely process each SET in order. It is slightly less efficient
+ to do so, but we only want to handle the most common cases. */
+
+ for (insn = NEXT_INSN (loop_start);
+ GET_CODE (insn) != CALL_INSN && GET_CODE (insn) != CODE_LABEL
+ && ! (GET_CODE (insn) == NOTE
+ && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END);
+ insn = NEXT_INSN (insn))
+ {
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
+ && (GET_CODE (PATTERN (insn)) == SET
+ || GET_CODE (PATTERN (insn)) == CLOBBER))
+ cse_set_around_loop (PATTERN (insn), insn, loop_start);
+ else if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
+ && GET_CODE (PATTERN (insn)) == PARALLEL)
+ for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
+ if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET
+ || GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == CLOBBER)
+ cse_set_around_loop (XVECEXP (PATTERN (insn), 0, i), insn,
+ loop_start);
+ }
+}
+
+/* Variable used for communications between the next two routines. */
+
+static struct write_data skipped_writes_memory;
+
+/* Process one SET of an insn that was skipped. We ignore CLOBBERs
+ since they are done elsewhere. This function is called via note_stores. */
+
+static void
+invalidate_skipped_set (dest, set)
+ rtx set;
+ rtx dest;
+{
+ if (GET_CODE (set) == CLOBBER
+#ifdef HAVE_cc0
+ || dest == cc0_rtx
+#endif
+ || dest == pc_rtx)
+ return;
+
+ if (GET_CODE (dest) == MEM)
+ note_mem_written (dest, &skipped_writes_memory);
+
+ /* There are times when an address can appear varying and be a PLUS
+ during this scan when it would be a fixed address were we to know
+ the proper equivalences. So promote "nonscalar" to be "all". */
+ if (skipped_writes_memory.nonscalar)
+ skipped_writes_memory.all = 1;
+
+ if (GET_CODE (dest) == REG || GET_CODE (dest) == SUBREG
+ || (! skipped_writes_memory.all && ! cse_rtx_addr_varies_p (dest)))
+ invalidate (dest);
+ else if (GET_CODE (dest) == STRICT_LOW_PART
+ || GET_CODE (dest) == ZERO_EXTRACT)
+ invalidate (XEXP (dest, 0));
+}
+
+/* Invalidate all insns from START up to the end of the function or the
+ next label. This called when we wish to CSE around a block that is
+ conditionally executed. */
+
+static void
+invalidate_skipped_block (start)
+ rtx start;
+{
+ rtx insn;
+ static struct write_data init = {0, 0, 0, 0};
+ static struct write_data everything = {0, 1, 1, 1};
+
+ for (insn = start; insn && GET_CODE (insn) != CODE_LABEL;
+ insn = NEXT_INSN (insn))
+ {
+ if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
+ continue;
+
+ skipped_writes_memory = init;
+
+ if (GET_CODE (insn) == CALL_INSN)
+ {
+ invalidate_for_call ();
+ skipped_writes_memory = everything;
+ }
+
+ note_stores (PATTERN (insn), invalidate_skipped_set);
+ invalidate_from_clobbers (&skipped_writes_memory, PATTERN (insn));
+ }
+}
+
+/* Used for communication between the following two routines; contains a
+ value to be checked for modification. */
+
+static rtx cse_check_loop_start_value;
+
+/* If modifying X will modify the value in CSE_CHECK_LOOP_START_VALUE,
+ indicate that fact by setting CSE_CHECK_LOOP_START_VALUE to 0. */
+
+static void
+cse_check_loop_start (x, set)
+ rtx x;
+ rtx set;
+{
+ if (cse_check_loop_start_value == 0
+ || GET_CODE (x) == CC0 || GET_CODE (x) == PC)
+ return;
+
+ if ((GET_CODE (x) == MEM && GET_CODE (cse_check_loop_start_value) == MEM)
+ || reg_overlap_mentioned_p (x, cse_check_loop_start_value))
+ cse_check_loop_start_value = 0;
+}
+
+/* X is a SET or CLOBBER contained in INSN that was found near the start of
+ a loop that starts with the label at LOOP_START.
+
+ If X is a SET, we see if its SET_SRC is currently in our hash table.
+ If so, we see if it has a value equal to some register used only in the
+ loop exit code (as marked by jump.c).
+
+ If those two conditions are true, we search backwards from the start of
+ the loop to see if that same value was loaded into a register that still
+ retains its value at the start of the loop.
+
+ If so, we insert an insn after the load to copy the destination of that
+ load into the equivalent register and (try to) replace our SET_SRC with that
+ register.
+
+ In any event, we invalidate whatever this SET or CLOBBER modifies. */
+
+static void
+cse_set_around_loop (x, insn, loop_start)
+ rtx x;
+ rtx insn;
+ rtx loop_start;
+{
+ struct table_elt *src_elt;
+ static struct write_data init = {0, 0, 0, 0};
+ struct write_data writes_memory;
+
+ writes_memory = init;
+
+ /* If this is a SET, see if we can replace SET_SRC, but ignore SETs that
+ are setting PC or CC0 or whose SET_SRC is already a register. */
+ if (GET_CODE (x) == SET
+ && GET_CODE (SET_DEST (x)) != PC && GET_CODE (SET_DEST (x)) != CC0
+ && GET_CODE (SET_SRC (x)) != REG)
+ {
+ src_elt = lookup (SET_SRC (x),
+ HASH (SET_SRC (x), GET_MODE (SET_DEST (x))),
+ GET_MODE (SET_DEST (x)));
+
+ if (src_elt)
+ for (src_elt = src_elt->first_same_value; src_elt;
+ src_elt = src_elt->next_same_value)
+ if (GET_CODE (src_elt->exp) == REG && REG_LOOP_TEST_P (src_elt->exp)
+ && COST (src_elt->exp) < COST (SET_SRC (x)))
+ {
+ rtx p, set;
+
+ /* Look for an insn in front of LOOP_START that sets
+ something in the desired mode to SET_SRC (x) before we hit
+ a label or CALL_INSN. */
+
+ for (p = prev_nonnote_insn (loop_start);
+ p && GET_CODE (p) != CALL_INSN
+ && GET_CODE (p) != CODE_LABEL;
+ p = prev_nonnote_insn (p))
+ if ((set = single_set (p)) != 0
+ && GET_CODE (SET_DEST (set)) == REG
+ && GET_MODE (SET_DEST (set)) == src_elt->mode
+ && rtx_equal_p (SET_SRC (set), SET_SRC (x)))
+ {
+ /* We now have to ensure that nothing between P
+ and LOOP_START modified anything referenced in
+ SET_SRC (x). We know that nothing within the loop
+ can modify it, or we would have invalidated it in
+ the hash table. */
+ rtx q;
+
+ cse_check_loop_start_value = SET_SRC (x);
+ for (q = p; q != loop_start; q = NEXT_INSN (q))
+ if (GET_RTX_CLASS (GET_CODE (q)) == 'i')
+ note_stores (PATTERN (q), cse_check_loop_start);
+
+ /* If nothing was changed and we can replace our
+ SET_SRC, add an insn after P to copy its destination
+ to what we will be replacing SET_SRC with. */
+ if (cse_check_loop_start_value
+ && validate_change (insn, &SET_SRC (x),
+ src_elt->exp, 0))
+ emit_insn_after (gen_move_insn (src_elt->exp,
+ SET_DEST (set)),
+ p);
+ break;
+ }
+ }
+ }
+
+ /* Now invalidate anything modified by X. */
+ note_mem_written (SET_DEST (x), &writes_memory);
+
+ if (writes_memory.var)
+ invalidate_memory (&writes_memory);
+
+ /* See comment on similar code in cse_insn for explanation of these tests. */
+ if (GET_CODE (SET_DEST (x)) == REG || GET_CODE (SET_DEST (x)) == SUBREG
+ || (GET_CODE (SET_DEST (x)) == MEM && ! writes_memory.all
+ && ! cse_rtx_addr_varies_p (SET_DEST (x))))
+ invalidate (SET_DEST (x));
+ else if (GET_CODE (SET_DEST (x)) == STRICT_LOW_PART
+ || GET_CODE (SET_DEST (x)) == ZERO_EXTRACT)
+ invalidate (XEXP (SET_DEST (x), 0));
+}
+
+/* Find the end of INSN's basic block and return its range,
+ the total number of SETs in all the insns of the block, the last insn of the
+ block, and the branch path.
+
+ The branch path indicates which branches should be followed. If a non-zero
+ path size is specified, the block should be rescanned and a different set
+ of branches will be taken. The branch path is only used if
+ FLAG_CSE_FOLLOW_JUMPS or FLAG_CSE_SKIP_BLOCKS is non-zero.
+
+ DATA is a pointer to a struct cse_basic_block_data, defined below, that is
+ used to describe the block. It is filled in with the information about
+ the current block. The incoming structure's branch path, if any, is used
+ to construct the output branch path. */
+
+void
+cse_end_of_basic_block (insn, data, follow_jumps, after_loop, skip_blocks)
+ rtx insn;
+ struct cse_basic_block_data *data;
+ int follow_jumps;
+ int after_loop;
+ int skip_blocks;
+{
+ rtx p = insn, q;
+ int nsets = 0;
+ int low_cuid = INSN_CUID (insn), high_cuid = INSN_CUID (insn);
+ rtx next = GET_RTX_CLASS (GET_CODE (insn)) == 'i' ? insn : next_real_insn (insn);
+ int path_size = data->path_size;
+ int path_entry = 0;
+ int i;
+
+ /* Update the previous branch path, if any. If the last branch was
+ previously TAKEN, mark it NOT_TAKEN. If it was previously NOT_TAKEN,
+ shorten the path by one and look at the previous branch. We know that
+ at least one branch must have been taken if PATH_SIZE is non-zero. */
+ while (path_size > 0)
+ {
+ if (data->path[path_size - 1].status != NOT_TAKEN)
+ {
+ data->path[path_size - 1].status = NOT_TAKEN;
+ break;
+ }
+ else
+ path_size--;
+ }
+
+ /* Scan to end of this basic block. */
+ while (p && GET_CODE (p) != CODE_LABEL)
+ {
+ /* Don't cse out the end of a loop. This makes a difference
+ only for the unusual loops that always execute at least once;
+ all other loops have labels there so we will stop in any case.
+ Cse'ing out the end of the loop is dangerous because it
+ might cause an invariant expression inside the loop
+ to be reused after the end of the loop. This would make it
+ hard to move the expression out of the loop in loop.c,
+ especially if it is one of several equivalent expressions
+ and loop.c would like to eliminate it.
+
+ If we are running after loop.c has finished, we can ignore
+ the NOTE_INSN_LOOP_END. */
+
+ if (! after_loop && GET_CODE (p) == NOTE
+ && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
+ break;
+
+ /* Don't cse over a call to setjmp; on some machines (eg vax)
+ the regs restored by the longjmp come from
+ a later time than the setjmp. */
+ if (GET_CODE (p) == NOTE
+ && NOTE_LINE_NUMBER (p) == NOTE_INSN_SETJMP)
+ break;
+
+ /* A PARALLEL can have lots of SETs in it,
+ especially if it is really an ASM_OPERANDS. */
+ if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
+ && GET_CODE (PATTERN (p)) == PARALLEL)
+ nsets += XVECLEN (PATTERN (p), 0);
+ else if (GET_CODE (p) != NOTE)
+ nsets += 1;
+
+ /* Ignore insns made by CSE; they cannot affect the boundaries of
+ the basic block. */
+
+ if (INSN_UID (p) <= max_uid && INSN_CUID (p) > high_cuid)
+ high_cuid = INSN_CUID (p);
+ if (INSN_UID (p) <= max_uid && INSN_CUID (p) < low_cuid)
+ low_cuid = INSN_CUID (p);
+
+ /* See if this insn is in our branch path. If it is and we are to
+ take it, do so. */
+ if (path_entry < path_size && data->path[path_entry].branch == p)
+ {
+ if (data->path[path_entry].status != NOT_TAKEN)
+ p = JUMP_LABEL (p);
+
+ /* Point to next entry in path, if any. */
+ path_entry++;
+ }
+
+ /* If this is a conditional jump, we can follow it if -fcse-follow-jumps
+ was specified, we haven't reached our maximum path length, there are
+ insns following the target of the jump, this is the only use of the
+ jump label, and the target label is preceded by a BARRIER.
+
+ Alternatively, we can follow the jump if it branches around a
+ block of code and there are no other branches into the block.
+ In this case invalidate_skipped_block will be called to invalidate any
+ registers set in the block when following the jump. */
+
+ else if ((follow_jumps || skip_blocks) && path_size < PATHLENGTH - 1
+ && GET_CODE (p) == JUMP_INSN
+ && GET_CODE (PATTERN (p)) == SET
+ && GET_CODE (SET_SRC (PATTERN (p))) == IF_THEN_ELSE
+ && LABEL_NUSES (JUMP_LABEL (p)) == 1
+ && NEXT_INSN (JUMP_LABEL (p)) != 0)
+ {
+ for (q = PREV_INSN (JUMP_LABEL (p)); q; q = PREV_INSN (q))
+ if ((GET_CODE (q) != NOTE
+ || NOTE_LINE_NUMBER (q) == NOTE_INSN_LOOP_END
+ || NOTE_LINE_NUMBER (q) == NOTE_INSN_SETJMP)
+ && (GET_CODE (q) != CODE_LABEL || LABEL_NUSES (q) != 0))
+ break;
+
+ /* If we ran into a BARRIER, this code is an extension of the
+ basic block when the branch is taken. */
+ if (follow_jumps && q != 0 && GET_CODE (q) == BARRIER)
+ {
+ /* Don't allow ourself to keep walking around an
+ always-executed loop. */
+ if (next_real_insn (q) == next)
+ {
+ p = NEXT_INSN (p);
+ continue;
+ }
+
+ /* Similarly, don't put a branch in our path more than once. */
+ for (i = 0; i < path_entry; i++)
+ if (data->path[i].branch == p)
+ break;
+
+ if (i != path_entry)
+ break;
+
+ data->path[path_entry].branch = p;
+ data->path[path_entry++].status = TAKEN;
+
+ /* This branch now ends our path. It was possible that we
+ didn't see this branch the last time around (when the
+ insn in front of the target was a JUMP_INSN that was
+ turned into a no-op). */
+ path_size = path_entry;
+
+ p = JUMP_LABEL (p);
+ /* Mark block so we won't scan it again later. */
+ PUT_MODE (NEXT_INSN (p), QImode);
+ }
+ /* Detect a branch around a block of code. */
+ else if (skip_blocks && q != 0 && GET_CODE (q) != CODE_LABEL)
+ {
+ register rtx tmp;
+
+ if (next_real_insn (q) == next)
+ {
+ p = NEXT_INSN (p);
+ continue;
+ }
+
+ for (i = 0; i < path_entry; i++)
+ if (data->path[i].branch == p)
+ break;
+
+ if (i != path_entry)
+ break;
+
+ /* This is no_labels_between_p (p, q) with an added check for
+ reaching the end of a function (in case Q precedes P). */
+ for (tmp = NEXT_INSN (p); tmp && tmp != q; tmp = NEXT_INSN (tmp))
+ if (GET_CODE (tmp) == CODE_LABEL)
+ break;
+
+ if (tmp == q)
+ {
+ data->path[path_entry].branch = p;
+ data->path[path_entry++].status = AROUND;
+
+ path_size = path_entry;
+
+ p = JUMP_LABEL (p);
+ /* Mark block so we won't scan it again later. */
+ PUT_MODE (NEXT_INSN (p), QImode);
+ }
+ }
+ }
+ p = NEXT_INSN (p);
+ }
+
+ data->low_cuid = low_cuid;
+ data->high_cuid = high_cuid;
+ data->nsets = nsets;
+ data->last = p;
+
+ /* If all jumps in the path are not taken, set our path length to zero
+ so a rescan won't be done. */
+ for (i = path_size - 1; i >= 0; i--)
+ if (data->path[i].status != NOT_TAKEN)
+ break;
+
+ if (i == -1)
+ data->path_size = 0;
+ else
+ data->path_size = path_size;
+
+ /* End the current branch path. */
+ data->path[path_size].branch = 0;
+}
+
+/* Perform cse on the instructions of a function.
+ F is the first instruction.
+ NREGS is one plus the highest pseudo-reg number used in the instruction.
+
+ AFTER_LOOP is 1 if this is the cse call done after loop optimization
+ (only if -frerun-cse-after-loop).
+
+ Returns 1 if jump_optimize should be redone due to simplifications
+ in conditional jump instructions. */
+
+int
+cse_main (f, nregs, after_loop, file)
+ rtx f;
+ int nregs;
+ int after_loop;
+ FILE *file;
+{
+ struct cse_basic_block_data val;
+ register rtx insn = f;
+ register int i;
+
+ cse_jumps_altered = 0;
+ constant_pool_entries_cost = 0;
+ val.path_size = 0;
+
+ init_recog ();
+
+ max_reg = nregs;
+
+ all_minus_one = (int *) alloca (nregs * sizeof (int));
+ consec_ints = (int *) alloca (nregs * sizeof (int));
+
+ for (i = 0; i < nregs; i++)
+ {
+ all_minus_one[i] = -1;
+ consec_ints[i] = i;
+ }
+
+ reg_next_eqv = (int *) alloca (nregs * sizeof (int));
+ reg_prev_eqv = (int *) alloca (nregs * sizeof (int));
+ reg_qty = (int *) alloca (nregs * sizeof (int));
+ reg_in_table = (int *) alloca (nregs * sizeof (int));
+ reg_tick = (int *) alloca (nregs * sizeof (int));
+
+#ifdef LOAD_EXTEND_OP
+
+ /* Allocate scratch rtl here. cse_insn will fill in the memory reference
+ and change the code and mode as appropriate. */
+ memory_extend_rtx = gen_rtx (ZERO_EXTEND, VOIDmode, 0);
+#endif
+
+ /* Discard all the free elements of the previous function
+ since they are allocated in the temporarily obstack. */
+ bzero ((char *) table, sizeof table);
+ free_element_chain = 0;
+ n_elements_made = 0;
+
+ /* Find the largest uid. */
+
+ max_uid = get_max_uid ();
+ uid_cuid = (int *) alloca ((max_uid + 1) * sizeof (int));
+ bzero ((char *) uid_cuid, (max_uid + 1) * sizeof (int));
+
+ /* Compute the mapping from uids to cuids.
+ CUIDs are numbers assigned to insns, like uids,
+ except that cuids increase monotonically through the code.
+ Don't assign cuids to line-number NOTEs, so that the distance in cuids
+ between two insns is not affected by -g. */
+
+ for (insn = f, i = 0; insn; insn = NEXT_INSN (insn))
+ {
+ if (GET_CODE (insn) != NOTE
+ || NOTE_LINE_NUMBER (insn) < 0)
+ INSN_CUID (insn) = ++i;
+ else
+ /* Give a line number note the same cuid as preceding insn. */
+ INSN_CUID (insn) = i;
+ }
+
+ /* Initialize which registers are clobbered by calls. */
+
+ CLEAR_HARD_REG_SET (regs_invalidated_by_call);
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if ((call_used_regs[i]
+ /* Used to check !fixed_regs[i] here, but that isn't safe;
+ fixed regs are still call-clobbered, and sched can get
+ confused if they can "live across calls".
+
+ The frame pointer is always preserved across calls. The arg
+ pointer is if it is fixed. The stack pointer usually is, unless
+ RETURN_POPS_ARGS, in which case an explicit CLOBBER
+ will be present. If we are generating PIC code, the PIC offset
+ table register is preserved across calls. */
+
+ && i != STACK_POINTER_REGNUM
+ && i != FRAME_POINTER_REGNUM
+#if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
+ && i != HARD_FRAME_POINTER_REGNUM
+#endif
+#if ARG_POINTER_REGNUM != FRAME_POINTER_REGNUM
+ && ! (i == ARG_POINTER_REGNUM && fixed_regs[i])
+#endif
+#if defined (PIC_OFFSET_TABLE_REGNUM) && !defined (PIC_OFFSET_TABLE_REG_CALL_CLOBBERED)
+ && ! (i == PIC_OFFSET_TABLE_REGNUM && flag_pic)
+#endif
+ )
+ || global_regs[i])
+ SET_HARD_REG_BIT (regs_invalidated_by_call, i);
+
+ /* Loop over basic blocks.
+ Compute the maximum number of qty's needed for each basic block
+ (which is 2 for each SET). */
+ insn = f;
+ while (insn)
+ {
+ cse_end_of_basic_block (insn, &val, flag_cse_follow_jumps, after_loop,
+ flag_cse_skip_blocks);
+
+ /* If this basic block was already processed or has no sets, skip it. */
+ if (val.nsets == 0 || GET_MODE (insn) == QImode)
+ {
+ PUT_MODE (insn, VOIDmode);
+ insn = (val.last ? NEXT_INSN (val.last) : 0);
+ val.path_size = 0;
+ continue;
+ }
+
+ cse_basic_block_start = val.low_cuid;
+ cse_basic_block_end = val.high_cuid;
+ max_qty = val.nsets * 2;
+
+ if (file)
+ fprintf (file, ";; Processing block from %d to %d, %d sets.\n",
+ INSN_UID (insn), val.last ? INSN_UID (val.last) : 0,
+ val.nsets);
+
+ /* Make MAX_QTY bigger to give us room to optimize
+ past the end of this basic block, if that should prove useful. */
+ if (max_qty < 500)
+ max_qty = 500;
+
+ max_qty += max_reg;
+
+ /* If this basic block is being extended by following certain jumps,
+ (see `cse_end_of_basic_block'), we reprocess the code from the start.
+ Otherwise, we start after this basic block. */
+ if (val.path_size > 0)
+ cse_basic_block (insn, val.last, val.path, 0);
+ else
+ {
+ int old_cse_jumps_altered = cse_jumps_altered;
+ rtx temp;
+
+ /* When cse changes a conditional jump to an unconditional
+ jump, we want to reprocess the block, since it will give
+ us a new branch path to investigate. */
+ cse_jumps_altered = 0;
+ temp = cse_basic_block (insn, val.last, val.path, ! after_loop);
+ if (cse_jumps_altered == 0
+ || (flag_cse_follow_jumps == 0 && flag_cse_skip_blocks == 0))
+ insn = temp;
+
+ cse_jumps_altered |= old_cse_jumps_altered;
+ }
+
+#ifdef USE_C_ALLOCA
+ alloca (0);
+#endif
+ }
+
+ /* Tell refers_to_mem_p that qty_const info is not available. */
+ qty_const = 0;
+
+ if (max_elements_made < n_elements_made)
+ max_elements_made = n_elements_made;
+
+ return cse_jumps_altered;
+}
+
+/* Process a single basic block. FROM and TO and the limits of the basic
+ block. NEXT_BRANCH points to the branch path when following jumps or
+ a null path when not following jumps.
+
+ AROUND_LOOP is non-zero if we are to try to cse around to the start of a
+ loop. This is true when we are being called for the last time on a
+ block and this CSE pass is before loop.c. */
+
+static rtx
+cse_basic_block (from, to, next_branch, around_loop)
+ register rtx from, to;
+ struct branch_path *next_branch;
+ int around_loop;
+{
+ register rtx insn;
+ int to_usage = 0;
+ int in_libcall_block = 0;
+
+ /* Each of these arrays is undefined before max_reg, so only allocate
+ the space actually needed and adjust the start below. */
+
+ qty_first_reg = (int *) alloca ((max_qty - max_reg) * sizeof (int));
+ qty_last_reg = (int *) alloca ((max_qty - max_reg) * sizeof (int));
+ qty_mode= (enum machine_mode *) alloca ((max_qty - max_reg) * sizeof (enum machine_mode));
+ qty_const = (rtx *) alloca ((max_qty - max_reg) * sizeof (rtx));
+ qty_const_insn = (rtx *) alloca ((max_qty - max_reg) * sizeof (rtx));
+ qty_comparison_code
+ = (enum rtx_code *) alloca ((max_qty - max_reg) * sizeof (enum rtx_code));
+ qty_comparison_qty = (int *) alloca ((max_qty - max_reg) * sizeof (int));
+ qty_comparison_const = (rtx *) alloca ((max_qty - max_reg) * sizeof (rtx));
+
+ qty_first_reg -= max_reg;
+ qty_last_reg -= max_reg;
+ qty_mode -= max_reg;
+ qty_const -= max_reg;
+ qty_const_insn -= max_reg;
+ qty_comparison_code -= max_reg;
+ qty_comparison_qty -= max_reg;
+ qty_comparison_const -= max_reg;
+
+ new_basic_block ();
+
+ /* TO might be a label. If so, protect it from being deleted. */
+ if (to != 0 && GET_CODE (to) == CODE_LABEL)
+ ++LABEL_NUSES (to);
+
+ for (insn = from; insn != to; insn = NEXT_INSN (insn))
+ {
+ register enum rtx_code code;
+
+ /* See if this is a branch that is part of the path. If so, and it is
+ to be taken, do so. */
+ if (next_branch->branch == insn)
+ {
+ enum taken status = next_branch++->status;
+ if (status != NOT_TAKEN)
+ {
+ if (status == TAKEN)
+ record_jump_equiv (insn, 1);
+ else
+ invalidate_skipped_block (NEXT_INSN (insn));
+
+ /* Set the last insn as the jump insn; it doesn't affect cc0.
+ Then follow this branch. */
+#ifdef HAVE_cc0
+ prev_insn_cc0 = 0;
+#endif
+ prev_insn = insn;
+ insn = JUMP_LABEL (insn);
+ continue;
+ }
+ }
+
+ code = GET_CODE (insn);
+ if (GET_MODE (insn) == QImode)
+ PUT_MODE (insn, VOIDmode);
+
+ if (GET_RTX_CLASS (code) == 'i')
+ {
+ /* Process notes first so we have all notes in canonical forms when
+ looking for duplicate operations. */
+
+ if (REG_NOTES (insn))
+ REG_NOTES (insn) = cse_process_notes (REG_NOTES (insn), NULL_RTX);
+
+ /* Track when we are inside in LIBCALL block. Inside such a block,
+ we do not want to record destinations. The last insn of a
+ LIBCALL block is not considered to be part of the block, since
+ its destination is the result of the block and hence should be
+ recorded. */
+
+ if (find_reg_note (insn, REG_LIBCALL, NULL_RTX))
+ in_libcall_block = 1;
+ else if (find_reg_note (insn, REG_RETVAL, NULL_RTX))
+ in_libcall_block = 0;
+
+ cse_insn (insn, in_libcall_block);
+ }
+
+ /* If INSN is now an unconditional jump, skip to the end of our
+ basic block by pretending that we just did the last insn in the
+ basic block. If we are jumping to the end of our block, show
+ that we can have one usage of TO. */
+
+ if (simplejump_p (insn))
+ {
+ if (to == 0)
+ return 0;
+
+ if (JUMP_LABEL (insn) == to)
+ to_usage = 1;
+
+ /* Maybe TO was deleted because the jump is unconditional.
+ If so, there is nothing left in this basic block. */
+ /* ??? Perhaps it would be smarter to set TO
+ to whatever follows this insn,
+ and pretend the basic block had always ended here. */
+ if (INSN_DELETED_P (to))
+ break;
+
+ insn = PREV_INSN (to);
+ }
+
+ /* See if it is ok to keep on going past the label
+ which used to end our basic block. Remember that we incremented
+ the count of that label, so we decrement it here. If we made
+ a jump unconditional, TO_USAGE will be one; in that case, we don't
+ want to count the use in that jump. */
+
+ if (to != 0 && NEXT_INSN (insn) == to
+ && GET_CODE (to) == CODE_LABEL && --LABEL_NUSES (to) == to_usage)
+ {
+ struct cse_basic_block_data val;
+
+ insn = NEXT_INSN (to);
+
+ if (LABEL_NUSES (to) == 0)
+ delete_insn (to);
+
+ /* Find the end of the following block. Note that we won't be
+ following branches in this case. If TO was the last insn
+ in the function, we are done. Similarly, if we deleted the
+ insn after TO, it must have been because it was preceded by
+ a BARRIER. In that case, we are done with this block because it
+ has no continuation. */
+
+ if (insn == 0 || INSN_DELETED_P (insn))
+ return 0;
+
+ to_usage = 0;
+ val.path_size = 0;
+ cse_end_of_basic_block (insn, &val, 0, 0, 0);
+
+ /* If the tables we allocated have enough space left
+ to handle all the SETs in the next basic block,
+ continue through it. Otherwise, return,
+ and that block will be scanned individually. */
+ if (val.nsets * 2 + next_qty > max_qty)
+ break;
+
+ cse_basic_block_start = val.low_cuid;
+ cse_basic_block_end = val.high_cuid;
+ to = val.last;
+
+ /* Prevent TO from being deleted if it is a label. */
+ if (to != 0 && GET_CODE (to) == CODE_LABEL)
+ ++LABEL_NUSES (to);
+
+ /* Back up so we process the first insn in the extension. */
+ insn = PREV_INSN (insn);
+ }
+ }
+
+ if (next_qty > max_qty)
+ abort ();
+
+ /* If we are running before loop.c, we stopped on a NOTE_INSN_LOOP_END, and
+ the previous insn is the only insn that branches to the head of a loop,
+ we can cse into the loop. Don't do this if we changed the jump
+ structure of a loop unless we aren't going to be following jumps. */
+
+ if ((cse_jumps_altered == 0
+ || (flag_cse_follow_jumps == 0 && flag_cse_skip_blocks == 0))
+ && around_loop && to != 0
+ && GET_CODE (to) == NOTE && NOTE_LINE_NUMBER (to) == NOTE_INSN_LOOP_END
+ && GET_CODE (PREV_INSN (to)) == JUMP_INSN
+ && JUMP_LABEL (PREV_INSN (to)) != 0
+ && LABEL_NUSES (JUMP_LABEL (PREV_INSN (to))) == 1)
+ cse_around_loop (JUMP_LABEL (PREV_INSN (to)));
+
+ return to ? NEXT_INSN (to) : 0;
+}
+
+/* Count the number of times registers are used (not set) in X.
+ COUNTS is an array in which we accumulate the count, INCR is how much
+ we count each register usage.
+
+ Don't count a usage of DEST, which is the SET_DEST of a SET which
+ contains X in its SET_SRC. This is because such a SET does not
+ modify the liveness of DEST. */
+
+static void
+count_reg_usage (x, counts, dest, incr)
+ rtx x;
+ int *counts;
+ rtx dest;
+ int incr;
+{
+ enum rtx_code code;
+ char *fmt;
+ int i, j;
+
+ if (x == 0)
+ return;
+
+ switch (code = GET_CODE (x))
+ {
+ case REG:
+ if (x != dest)
+ counts[REGNO (x)] += incr;
+ return;
+
+ case PC:
+ case CC0:
+ case CONST:
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ case CLOBBER:
+ return;
+
+ case SET:
+ /* Unless we are setting a REG, count everything in SET_DEST. */
+ if (GET_CODE (SET_DEST (x)) != REG)
+ count_reg_usage (SET_DEST (x), counts, NULL_RTX, incr);
+
+ /* If SRC has side-effects, then we can't delete this insn, so the
+ usage of SET_DEST inside SRC counts.
+
+ ??? Strictly-speaking, we might be preserving this insn
+ because some other SET has side-effects, but that's hard
+ to do and can't happen now. */
+ count_reg_usage (SET_SRC (x), counts,
+ side_effects_p (SET_SRC (x)) ? NULL_RTX : SET_DEST (x),
+ incr);
+ return;
+
+ case CALL_INSN:
+ count_reg_usage (CALL_INSN_FUNCTION_USAGE (x), counts, NULL_RTX, incr);
+
+ /* ... falls through ... */
+ case INSN:
+ case JUMP_INSN:
+ count_reg_usage (PATTERN (x), counts, NULL_RTX, incr);
+
+ /* Things used in a REG_EQUAL note aren't dead since loop may try to
+ use them. */
+
+ count_reg_usage (REG_NOTES (x), counts, NULL_RTX, incr);
+ return;
+
+ case EXPR_LIST:
+ case INSN_LIST:
+ if (REG_NOTE_KIND (x) == REG_EQUAL
+ || GET_CODE (XEXP (x,0)) == USE)
+ count_reg_usage (XEXP (x, 0), counts, NULL_RTX, incr);
+ count_reg_usage (XEXP (x, 1), counts, NULL_RTX, incr);
+ return;
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ count_reg_usage (XEXP (x, i), counts, dest, incr);
+ else if (fmt[i] == 'E')
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ count_reg_usage (XVECEXP (x, i, j), counts, dest, incr);
+ }
+}
+
+/* Scan all the insns and delete any that are dead; i.e., they store a register
+ that is never used or they copy a register to itself.
+
+ This is used to remove insns made obviously dead by cse. It improves the
+ heuristics in loop since it won't try to move dead invariants out of loops
+ or make givs for dead quantities. The remaining passes of the compilation
+ are also sped up. */
+
+void
+delete_dead_from_cse (insns, nreg)
+ rtx insns;
+ int nreg;
+{
+ int *counts = (int *) alloca (nreg * sizeof (int));
+ rtx insn, prev;
+ rtx tem;
+ int i;
+ int in_libcall = 0;
+
+ /* First count the number of times each register is used. */
+ bzero ((char *) counts, sizeof (int) * nreg);
+ for (insn = next_real_insn (insns); insn; insn = next_real_insn (insn))
+ count_reg_usage (insn, counts, NULL_RTX, 1);
+
+ /* Go from the last insn to the first and delete insns that only set unused
+ registers or copy a register to itself. As we delete an insn, remove
+ usage counts for registers it uses. */
+ for (insn = prev_real_insn (get_last_insn ()); insn; insn = prev)
+ {
+ int live_insn = 0;
+
+ prev = prev_real_insn (insn);
+
+ /* Don't delete any insns that are part of a libcall block.
+ Flow or loop might get confused if we did that. Remember
+ that we are scanning backwards. */
+ if (find_reg_note (insn, REG_RETVAL, NULL_RTX))
+ in_libcall = 1;
+
+ if (in_libcall)
+ live_insn = 1;
+ else if (GET_CODE (PATTERN (insn)) == SET)
+ {
+ if (GET_CODE (SET_DEST (PATTERN (insn))) == REG
+ && SET_DEST (PATTERN (insn)) == SET_SRC (PATTERN (insn)))
+ ;
+
+#ifdef HAVE_cc0
+ else if (GET_CODE (SET_DEST (PATTERN (insn))) == CC0
+ && ! side_effects_p (SET_SRC (PATTERN (insn)))
+ && ((tem = next_nonnote_insn (insn)) == 0
+ || GET_RTX_CLASS (GET_CODE (tem)) != 'i'
+ || ! reg_referenced_p (cc0_rtx, PATTERN (tem))))
+ ;
+#endif
+ else if (GET_CODE (SET_DEST (PATTERN (insn))) != REG
+ || REGNO (SET_DEST (PATTERN (insn))) < FIRST_PSEUDO_REGISTER
+ || counts[REGNO (SET_DEST (PATTERN (insn)))] != 0
+ || side_effects_p (SET_SRC (PATTERN (insn))))
+ live_insn = 1;
+ }
+ else if (GET_CODE (PATTERN (insn)) == PARALLEL)
+ for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
+ {
+ rtx elt = XVECEXP (PATTERN (insn), 0, i);
+
+ if (GET_CODE (elt) == SET)
+ {
+ if (GET_CODE (SET_DEST (elt)) == REG
+ && SET_DEST (elt) == SET_SRC (elt))
+ ;
+
+#ifdef HAVE_cc0
+ else if (GET_CODE (SET_DEST (elt)) == CC0
+ && ! side_effects_p (SET_SRC (elt))
+ && ((tem = next_nonnote_insn (insn)) == 0
+ || GET_RTX_CLASS (GET_CODE (tem)) != 'i'
+ || ! reg_referenced_p (cc0_rtx, PATTERN (tem))))
+ ;
+#endif
+ else if (GET_CODE (SET_DEST (elt)) != REG
+ || REGNO (SET_DEST (elt)) < FIRST_PSEUDO_REGISTER
+ || counts[REGNO (SET_DEST (elt))] != 0
+ || side_effects_p (SET_SRC (elt)))
+ live_insn = 1;
+ }
+ else if (GET_CODE (elt) != CLOBBER && GET_CODE (elt) != USE)
+ live_insn = 1;
+ }
+ else
+ live_insn = 1;
+
+ /* If this is a dead insn, delete it and show registers in it aren't
+ being used. */
+
+ if (! live_insn)
+ {
+ count_reg_usage (insn, counts, NULL_RTX, -1);
+ delete_insn (insn);
+ }
+
+ if (find_reg_note (insn, REG_LIBCALL, NULL_RTX))
+ in_libcall = 0;
+ }
+}
diff --git a/gnu/usr.bin/cc/cc_int/dbxout.c b/gnu/usr.bin/cc/cc_int/dbxout.c
new file mode 100644
index 0000000..d34497d
--- /dev/null
+++ b/gnu/usr.bin/cc/cc_int/dbxout.c
@@ -0,0 +1,2585 @@
+/* Output dbx-format symbol table information from GNU compiler.
+ Copyright (C) 1987, 1988, 1992, 1993, 1994 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+/* Output dbx-format symbol table data.
+ This consists of many symbol table entries, each of them
+ a .stabs assembler pseudo-op with four operands:
+ a "name" which is really a description of one symbol and its type,
+ a "code", which is a symbol defined in stab.h whose name starts with N_,
+ an unused operand always 0,
+ and a "value" which is an address or an offset.
+ The name is enclosed in doublequote characters.
+
+ Each function, variable, typedef, and structure tag
+ has a symbol table entry to define it.
+ The beginning and end of each level of name scoping within
+ a function are also marked by special symbol table entries.
+
+ The "name" consists of the symbol name, a colon, a kind-of-symbol letter,
+ and a data type number. The data type number may be followed by
+ "=" and a type definition; normally this will happen the first time
+ the type number is mentioned. The type definition may refer to
+ other types by number, and those type numbers may be followed
+ by "=" and nested definitions.
+
+ This can make the "name" quite long.
+ When a name is more than 80 characters, we split the .stabs pseudo-op
+ into two .stabs pseudo-ops, both sharing the same "code" and "value".
+ The first one is marked as continued with a double-backslash at the
+ end of its "name".
+
+ The kind-of-symbol letter distinguished function names from global
+ variables from file-scope variables from parameters from auto
+ variables in memory from typedef names from register variables.
+ See `dbxout_symbol'.
+
+ The "code" is mostly redundant with the kind-of-symbol letter
+ that goes in the "name", but not entirely: for symbols located
+ in static storage, the "code" says which segment the address is in,
+ which controls how it is relocated.
+
+ The "value" for a symbol in static storage
+ is the core address of the symbol (actually, the assembler
+ label for the symbol). For a symbol located in a stack slot
+ it is the stack offset; for one in a register, the register number.
+ For a typedef symbol, it is zero.
+
+ If DEBUG_SYMS_TEXT is defined, all debugging symbols must be
+ output while in the text section.
+
+ For more on data type definitions, see `dbxout_type'. */
+
+/* Include these first, because they may define MIN and MAX. */
+#include <stdio.h>
+#include <errno.h>
+
+#include "config.h"
+#include "tree.h"
+#include "rtl.h"
+#include "flags.h"
+#include "regs.h"
+#include "insn-config.h"
+#include "reload.h"
+#include "defaults.h"
+#include "output.h" /* ASM_OUTPUT_SOURCE_LINE may refer to sdb functions. */
+
+#ifndef errno
+extern int errno;
+#endif
+
+#ifdef XCOFF_DEBUGGING_INFO
+#include "xcoffout.h"
+#endif
+
+#ifndef ASM_STABS_OP
+#define ASM_STABS_OP ".stabs"
+#endif
+
+#ifndef ASM_STABN_OP
+#define ASM_STABN_OP ".stabn"
+#endif
+
+#ifndef DBX_TYPE_DECL_STABS_CODE
+#define DBX_TYPE_DECL_STABS_CODE N_LSYM
+#endif
+
+#ifndef DBX_STATIC_CONST_VAR_CODE
+#define DBX_STATIC_CONST_VAR_CODE N_FUN
+#endif
+
+#ifndef DBX_REGPARM_STABS_CODE
+#define DBX_REGPARM_STABS_CODE N_RSYM
+#endif
+
+#ifndef DBX_REGPARM_STABS_LETTER
+#define DBX_REGPARM_STABS_LETTER 'P'
+#endif
+
+#ifndef DBX_MEMPARM_STABS_LETTER
+#define DBX_MEMPARM_STABS_LETTER 'p'
+#endif
+
+#ifndef FILE_NAME_JOINER
+#define FILE_NAME_JOINER "/"
+#endif
+
+/* Nonzero means if the type has methods, only output debugging
+ information if methods are actually written to the asm file. */
+
+static int flag_minimal_debug = 1;
+
+/* Nonzero if we have actually used any of the GDB extensions
+ to the debugging format. The idea is that we use them for the
+ first time only if there's a strong reason, but once we have done that,
+ we use them whenever convenient. */
+
+static int have_used_extensions = 0;
+
+/* Number for the next N_SOL filename stabs label. The number 0 is reserved
+ for the N_SO filename stabs label. */
+
+static int source_label_number = 1;
+
+char *getpwd ();
+
+/* Typical USG systems don't have stab.h, and they also have
+ no use for DBX-format debugging info. */
+
+#if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
+
+#ifdef DEBUG_SYMS_TEXT
+#define FORCE_TEXT text_section ();
+#else
+#define FORCE_TEXT
+#endif
+
+#if defined (USG) || defined (NO_STAB_H)
+#include "gstab.h" /* If doing DBX on sysV, use our own stab.h. */
+#else
+#include <stab.h> /* On BSD, use the system's stab.h. */
+
+/* This is a GNU extension we need to reference in this file. */
+#ifndef N_CATCH
+#define N_CATCH 0x54
+#endif
+#endif /* not USG */
+
+#ifdef __GNU_STAB__
+#define STAB_CODE_TYPE enum __stab_debug_code
+#else
+#define STAB_CODE_TYPE int
+#endif
+
+/* 1 if PARM is passed to this function in memory. */
+
+#define PARM_PASSED_IN_MEMORY(PARM) \
+ (GET_CODE (DECL_INCOMING_RTL (PARM)) == MEM)
+
+/* A C expression for the integer offset value of an automatic variable
+ (N_LSYM) having address X (an RTX). */
+#ifndef DEBUGGER_AUTO_OFFSET
+#define DEBUGGER_AUTO_OFFSET(X) \
+ (GET_CODE (X) == PLUS ? INTVAL (XEXP (X, 1)) : 0)
+#endif
+
+/* A C expression for the integer offset value of an argument (N_PSYM)
+ having address X (an RTX). The nominal offset is OFFSET. */
+#ifndef DEBUGGER_ARG_OFFSET
+#define DEBUGGER_ARG_OFFSET(OFFSET, X) (OFFSET)
+#endif
+
+/* Stream for writing to assembler file. */
+
+static FILE *asmfile;
+
+/* Last source file name mentioned in a NOTE insn. */
+
+static char *lastfile;
+
+/* Current working directory. */
+
+static char *cwd;
+
+enum typestatus {TYPE_UNSEEN, TYPE_XREF, TYPE_DEFINED};
+
+/* Vector recording the status of describing C data types.
+ When we first notice a data type (a tree node),
+ we assign it a number using next_type_number.
+ That is its index in this vector.
+ The vector element says whether we have yet output
+ the definition of the type. TYPE_XREF says we have
+ output it as a cross-reference only. */
+
+enum typestatus *typevec;
+
+/* Number of elements of space allocated in `typevec'. */
+
+static int typevec_len;
+
+/* In dbx output, each type gets a unique number.
+ This is the number for the next type output.
+ The number, once assigned, is in the TYPE_SYMTAB_ADDRESS field. */
+
+static int next_type_number;
+
+/* In dbx output, we must assign symbol-blocks id numbers
+ in the order in which their beginnings are encountered.
+ We output debugging info that refers to the beginning and
+ end of the ranges of code in each block
+ with assembler labels LBBn and LBEn, where n is the block number.
+ The labels are generated in final, which assigns numbers to the
+ blocks in the same way. */
+
+static int next_block_number;
+
+/* These variables are for dbxout_symbol to communicate to
+ dbxout_finish_symbol.
+ current_sym_code is the symbol-type-code, a symbol N_... define in stab.h.
+ current_sym_value and current_sym_addr are two ways to address the
+ value to store in the symtab entry.
+ current_sym_addr if nonzero represents the value as an rtx.
+ If that is zero, current_sym_value is used. This is used
+ when the value is an offset (such as for auto variables,
+ register variables and parms). */
+
+static STAB_CODE_TYPE current_sym_code;
+static int current_sym_value;
+static rtx current_sym_addr;
+
+/* Number of chars of symbol-description generated so far for the
+ current symbol. Used by CHARS and CONTIN. */
+
+static int current_sym_nchars;
+
+/* Report having output N chars of the current symbol-description. */
+
+#define CHARS(N) (current_sym_nchars += (N))
+
+/* Break the current symbol-description, generating a continuation,
+ if it has become long. */
+
+#ifndef DBX_CONTIN_LENGTH
+#define DBX_CONTIN_LENGTH 80
+#endif
+
+#if DBX_CONTIN_LENGTH > 0
+#define CONTIN \
+ do {if (current_sym_nchars > DBX_CONTIN_LENGTH) dbxout_continue ();} while (0)
+#else
+#define CONTIN
+#endif
+
+void dbxout_types ();
+void dbxout_args ();
+void dbxout_symbol ();
+static void dbxout_type_name ();
+static void dbxout_type ();
+static void dbxout_typedefs ();
+static void dbxout_symbol_name ();
+static void dbxout_symbol_location ();
+static void dbxout_prepare_symbol ();
+static void dbxout_finish_symbol ();
+static void dbxout_continue ();
+static void print_int_cst_octal ();
+static void print_octal ();
+
+#if 0 /* Not clear we will actually need this. */
+
+/* Return the absolutized filename for the given relative
+ filename. Note that if that filename is already absolute, it may
+ still be returned in a modified form because this routine also
+ eliminates redundant slashes and single dots and eliminates double
+ dots to get a shortest possible filename from the given input
+ filename. The absolutization of relative filenames is made by
+ assuming that the given filename is to be taken as relative to
+ the first argument (cwd) or to the current directory if cwd is
+ NULL. */
+
+static char *
+abspath (rel_filename)
+ char *rel_filename;
+{
+ /* Setup the current working directory as needed. */
+ char *abs_buffer
+ = (char *) alloca (strlen (cwd) + strlen (rel_filename) + 1);
+ char *endp = abs_buffer;
+ char *outp, *inp;
+ char *value;
+
+ /* Copy the filename (possibly preceded by the current working
+ directory name) into the absolutization buffer. */
+
+ {
+ char *src_p;
+
+ if (rel_filename[0] != '/')
+ {
+ src_p = cwd;
+ while (*endp++ = *src_p++)
+ continue;
+ *(endp-1) = '/'; /* overwrite null */
+ }
+ src_p = rel_filename;
+ while (*endp++ = *src_p++)
+ continue;
+ if (endp[-1] == '/')
+ *endp = '\0';
+
+ /* Now make a copy of abs_buffer into abs_buffer, shortening the
+ filename (by taking out slashes and dots) as we go. */
+
+ outp = inp = abs_buffer;
+ *outp++ = *inp++; /* copy first slash */
+ for (;;)
+ {
+ if (!inp[0])
+ break;
+ else if (inp[0] == '/' && outp[-1] == '/')
+ {
+ inp++;
+ continue;
+ }
+ else if (inp[0] == '.' && outp[-1] == '/')
+ {
+ if (!inp[1])
+ break;
+ else if (inp[1] == '/')
+ {
+ inp += 2;
+ continue;
+ }
+ else if ((inp[1] == '.') && (inp[2] == 0 || inp[2] == '/'))
+ {
+ inp += (inp[2] == '/') ? 3 : 2;
+ outp -= 2;
+ while (outp >= abs_buffer && *outp != '/')
+ outp--;
+ if (outp < abs_buffer)
+ {
+ /* Catch cases like /.. where we try to backup to a
+ point above the absolute root of the logical file
+ system. */
+
+ fprintf (stderr, "%s: invalid file name: %s\n",
+ pname, rel_filename);
+ exit (1);
+ }
+ *++outp = '\0';
+ continue;
+ }
+ }
+ *outp++ = *inp++;
+ }
+
+ /* On exit, make sure that there is a trailing null, and make sure that
+ the last character of the returned string is *not* a slash. */
+
+ *outp = '\0';
+ if (outp[-1] == '/')
+ *--outp = '\0';
+
+ /* Make a copy (in the heap) of the stuff left in the absolutization
+ buffer and return a pointer to the copy. */
+
+ value = (char *) oballoc (strlen (abs_buffer) + 1);
+ strcpy (value, abs_buffer);
+ return value;
+}
+#endif /* 0 */
+
+/* At the beginning of compilation, start writing the symbol table.
+ Initialize `typevec' and output the standard data types of C. */
+
+void
+dbxout_init (asm_file, input_file_name, syms)
+ FILE *asm_file;
+ char *input_file_name;
+ tree syms;
+{
+ char ltext_label_name[100];
+
+ asmfile = asm_file;
+
+ typevec_len = 100;
+ typevec = (enum typestatus *) xmalloc (typevec_len * sizeof typevec[0]);
+ bzero ((char *) typevec, typevec_len * sizeof typevec[0]);
+
+ /* Convert Ltext into the appropriate format for local labels in case
+ the system doesn't insert underscores in front of user generated
+ labels. */
+ ASM_GENERATE_INTERNAL_LABEL (ltext_label_name, "Ltext", 0);
+
+ /* Put the current working directory in an N_SO symbol. */
+#ifndef DBX_WORKING_DIRECTORY /* Only some versions of DBX want this,
+ but GDB always does. */
+ if (use_gnu_debug_info_extensions)
+#endif
+ {
+ if (!cwd && (cwd = getpwd ()) && (!*cwd || cwd[strlen (cwd) - 1] != '/'))
+ {
+ char *wdslash = xmalloc (strlen (cwd) + sizeof (FILE_NAME_JOINER));
+ sprintf (wdslash, "%s%s", cwd, FILE_NAME_JOINER);
+ cwd = wdslash;
+ }
+ if (cwd)
+ {
+#ifdef DBX_OUTPUT_MAIN_SOURCE_DIRECTORY
+ DBX_OUTPUT_MAIN_SOURCE_DIRECTORY (asmfile, cwd);
+#else /* no DBX_OUTPUT_MAIN_SOURCE_DIRECTORY */
+ fprintf (asmfile, "%s ", ASM_STABS_OP);
+ output_quoted_string (asmfile, cwd);
+ fprintf (asmfile, ",%d,0,0,%s\n", N_SO, &ltext_label_name[1]);
+#endif /* no DBX_OUTPUT_MAIN_SOURCE_DIRECTORY */
+ }
+ }
+
+#ifdef DBX_OUTPUT_MAIN_SOURCE_FILENAME
+ /* This should NOT be DBX_OUTPUT_SOURCE_FILENAME. That
+ would give us an N_SOL, and we want an N_SO. */
+ DBX_OUTPUT_MAIN_SOURCE_FILENAME (asmfile, input_file_name);
+#else /* no DBX_OUTPUT_MAIN_SOURCE_FILENAME */
+ /* We include outputting `Ltext:' here,
+ because that gives you a way to override it. */
+ /* Used to put `Ltext:' before the reference, but that loses on sun 4. */
+ fprintf (asmfile, "%s ", ASM_STABS_OP);
+ output_quoted_string (asmfile, input_file_name);
+ fprintf (asmfile, ",%d,0,0,%s\n",
+ N_SO, &ltext_label_name[1]);
+ text_section ();
+ ASM_OUTPUT_INTERNAL_LABEL (asmfile, "Ltext", 0);
+#endif /* no DBX_OUTPUT_MAIN_SOURCE_FILENAME */
+
+ /* Possibly output something to inform GDB that this compilation was by
+ GCC. It's easier for GDB to parse it when after the N_SO's. This
+ is used in Solaris 2. */
+#ifdef ASM_IDENTIFY_GCC_AFTER_SOURCE
+ ASM_IDENTIFY_GCC_AFTER_SOURCE (asmfile);
+#endif
+
+ lastfile = input_file_name;
+
+ next_type_number = 1;
+ next_block_number = 2;
+
+ /* Make sure that types `int' and `char' have numbers 1 and 2.
+ Definitions of other integer types will refer to those numbers.
+ (Actually it should no longer matter what their numbers are.
+ Also, if any types with tags have been defined, dbxout_symbol
+ will output them first, so the numbers won't be 1 and 2. That
+ happens in C++. So it's a good thing it should no longer matter). */
+
+#ifdef DBX_OUTPUT_STANDARD_TYPES
+ DBX_OUTPUT_STANDARD_TYPES (syms);
+#else
+ dbxout_symbol (TYPE_NAME (integer_type_node), 0);
+ dbxout_symbol (TYPE_NAME (char_type_node), 0);
+#endif
+
+ /* Get all permanent types that have typedef names,
+ and output them all, except for those already output. */
+
+ dbxout_typedefs (syms);
+}
+
+/* Output any typedef names for types described by TYPE_DECLs in SYMS,
+ in the reverse order from that which is found in SYMS. */
+
+static void
+dbxout_typedefs (syms)
+ tree syms;
+{
+ if (syms)
+ {
+ dbxout_typedefs (TREE_CHAIN (syms));
+ if (TREE_CODE (syms) == TYPE_DECL)
+ {
+ tree type = TREE_TYPE (syms);
+ if (TYPE_NAME (type)
+ && TREE_CODE (TYPE_NAME (type)) == TYPE_DECL
+ && ! TREE_ASM_WRITTEN (TYPE_NAME (type)))
+ dbxout_symbol (TYPE_NAME (type), 0);
+ }
+ }
+}
+
+/* Output debugging info to FILE to switch to sourcefile FILENAME. */
+
+void
+dbxout_source_file (file, filename)
+ FILE *file;
+ char *filename;
+{
+ char ltext_label_name[100];
+
+ if (filename && (lastfile == 0 || strcmp (filename, lastfile)))
+ {
+#ifdef DBX_OUTPUT_SOURCE_FILENAME
+ DBX_OUTPUT_SOURCE_FILENAME (file, filename);
+#else
+ ASM_GENERATE_INTERNAL_LABEL (ltext_label_name, "Ltext",
+ source_label_number);
+ fprintf (file, "%s ", ASM_STABS_OP);
+ output_quoted_string (file, filename);
+ fprintf (file, ",%d,0,0,%s\n", N_SOL, &ltext_label_name[1]);
+ text_section ();
+ ASM_OUTPUT_INTERNAL_LABEL (asmfile, "Ltext", source_label_number);
+ source_label_number++;
+#endif
+ lastfile = filename;
+ }
+}
+
+/* Output a line number symbol entry into output stream FILE,
+ for source file FILENAME and line number LINENO. */
+
+void
+dbxout_source_line (file, filename, lineno)
+ FILE *file;
+ char *filename;
+ int lineno;
+{
+ dbxout_source_file (file, filename);
+
+#ifdef ASM_OUTPUT_SOURCE_LINE
+ ASM_OUTPUT_SOURCE_LINE (file, lineno);
+#else
+ fprintf (file, "\t%s %d,0,%d\n", ASM_STABD_OP, N_SLINE, lineno);
+#endif
+}
+
+/* At the end of compilation, finish writing the symbol table.
+ Unless you define DBX_OUTPUT_MAIN_SOURCE_FILE_END, the default is
+ to do nothing. */
+
+void
+dbxout_finish (file, filename)
+ FILE *file;
+ char *filename;
+{
+#ifdef DBX_OUTPUT_MAIN_SOURCE_FILE_END
+ DBX_OUTPUT_MAIN_SOURCE_FILE_END (file, filename);
+#endif /* DBX_OUTPUT_MAIN_SOURCE_FILE_END */
+}
+
+/* Continue a symbol-description that gets too big.
+ End one symbol table entry with a double-backslash
+ and start a new one, eventually producing something like
+ .stabs "start......\\",code,0,value
+ .stabs "...rest",code,0,value */
+
+static void
+dbxout_continue ()
+{
+#ifdef DBX_CONTIN_CHAR
+ fprintf (asmfile, "%c", DBX_CONTIN_CHAR);
+#else
+ fprintf (asmfile, "\\\\");
+#endif
+ dbxout_finish_symbol (NULL_TREE);
+ fprintf (asmfile, "%s \"", ASM_STABS_OP);
+ current_sym_nchars = 0;
+}
+
+/* Subroutine of `dbxout_type'. Output the type fields of TYPE.
+ This must be a separate function because anonymous unions require
+ recursive calls. */
+
+static void
+dbxout_type_fields (type)
+ tree type;
+{
+ tree tem;
+ /* Output the name, type, position (in bits), size (in bits) of each
+ field. */
+ for (tem = TYPE_FIELDS (type); tem; tem = TREE_CHAIN (tem))
+ {
+ /* Omit here local type decls until we know how to support them. */
+ if (TREE_CODE (tem) == TYPE_DECL)
+ continue;
+ /* Omit fields whose position or size are variable. */
+ else if (TREE_CODE (tem) == FIELD_DECL
+ && (TREE_CODE (DECL_FIELD_BITPOS (tem)) != INTEGER_CST
+ || TREE_CODE (DECL_SIZE (tem)) != INTEGER_CST))
+ continue;
+ /* Omit here the nameless fields that are used to skip bits. */
+ else if (TREE_CODE (tem) != CONST_DECL)
+ {
+ /* Continue the line if necessary,
+ but not before the first field. */
+ if (tem != TYPE_FIELDS (type))
+ CONTIN;
+
+ if (use_gnu_debug_info_extensions
+ && flag_minimal_debug
+ && TREE_CODE (tem) == FIELD_DECL
+ && DECL_VIRTUAL_P (tem)
+ && DECL_ASSEMBLER_NAME (tem))
+ {
+ have_used_extensions = 1;
+ CHARS (3 + IDENTIFIER_LENGTH (DECL_NAME (TYPE_NAME (DECL_FCONTEXT (tem)))));
+ fputs (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (tem)), asmfile);
+ dbxout_type (DECL_FCONTEXT (tem), 0, 0);
+ fprintf (asmfile, ":");
+ dbxout_type (TREE_TYPE (tem), 0, 0);
+ fprintf (asmfile, ",%d;",
+ TREE_INT_CST_LOW (DECL_FIELD_BITPOS (tem)));
+ continue;
+ }
+
+ if (DECL_NAME (tem))
+ {
+ fprintf (asmfile, "%s:", IDENTIFIER_POINTER (DECL_NAME (tem)));
+ CHARS (2 + IDENTIFIER_LENGTH (DECL_NAME (tem)));
+ }
+ else
+ {
+ fprintf (asmfile, ":");
+ CHARS (2);
+ }
+
+ if (use_gnu_debug_info_extensions
+ && (TREE_PRIVATE (tem) || TREE_PROTECTED (tem)
+ || TREE_CODE (tem) != FIELD_DECL))
+ {
+ have_used_extensions = 1;
+ putc ('/', asmfile);
+ putc ((TREE_PRIVATE (tem) ? '0'
+ : TREE_PROTECTED (tem) ? '1' : '2'),
+ asmfile);
+ CHARS (2);
+ }
+
+ dbxout_type ((TREE_CODE (tem) == FIELD_DECL
+ && DECL_BIT_FIELD_TYPE (tem))
+ ? DECL_BIT_FIELD_TYPE (tem)
+ : TREE_TYPE (tem), 0, 0);
+
+ if (TREE_CODE (tem) == VAR_DECL)
+ {
+ if (TREE_STATIC (tem) && use_gnu_debug_info_extensions)
+ {
+ char *name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (tem));
+ have_used_extensions = 1;
+ fprintf (asmfile, ":%s;", name);
+ CHARS (strlen (name));
+ }
+ else
+ {
+ /* If TEM is non-static, GDB won't understand it. */
+ fprintf (asmfile, ",0,0;");
+ }
+ }
+ else if (TREE_CODE (DECL_FIELD_BITPOS (tem)) == INTEGER_CST)
+ {
+ fprintf (asmfile, ",%d,%d;",
+ TREE_INT_CST_LOW (DECL_FIELD_BITPOS (tem)),
+ TREE_INT_CST_LOW (DECL_SIZE (tem)));
+ }
+ CHARS (23);
+ }
+ }
+}
+
+/* Subroutine of `dbxout_type_methods'. Output debug info about the
+ method described DECL. DEBUG_NAME is an encoding of the method's
+ type signature. ??? We may be able to do without DEBUG_NAME altogether
+ now. */
+
+static void
+dbxout_type_method_1 (decl, debug_name)
+ tree decl;
+ char *debug_name;
+{
+ tree firstarg = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (decl)));
+ char c1 = 'A', c2;
+
+ if (TREE_CODE (TREE_TYPE (decl)) == FUNCTION_TYPE)
+ c2 = '?';
+ else /* it's a METHOD_TYPE. */
+ {
+ /* A for normal functions.
+ B for `const' member functions.
+ C for `volatile' member functions.
+ D for `const volatile' member functions. */
+ if (TYPE_READONLY (TREE_TYPE (firstarg)))
+ c1 += 1;
+ if (TYPE_VOLATILE (TREE_TYPE (firstarg)))
+ c1 += 2;
+
+ if (DECL_VINDEX (decl))
+ c2 = '*';
+ else
+ c2 = '.';
+ }
+
+ fprintf (asmfile, ":%s;%c%c%c", debug_name,
+ TREE_PRIVATE (decl) ? '0' : TREE_PROTECTED (decl) ? '1' : '2', c1, c2);
+ CHARS (IDENTIFIER_LENGTH (DECL_ASSEMBLER_NAME (decl)) + 6
+ - (debug_name - IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))));
+ if (DECL_VINDEX (decl))
+ {
+ fprintf (asmfile, "%d;",
+ TREE_INT_CST_LOW (DECL_VINDEX (decl)));
+ dbxout_type (DECL_CONTEXT (decl), 0, 0);
+ fprintf (asmfile, ";");
+ CHARS (8);
+ }
+}
+
+/* Subroutine of `dbxout_type'. Output debug info about the methods defined
+ in TYPE. */
+
+static void
+dbxout_type_methods (type)
+ register tree type;
+{
+ /* C++: put out the method names and their parameter lists */
+ tree methods = TYPE_METHODS (type);
+ tree type_encoding;
+ register tree fndecl;
+ register tree last;
+ char formatted_type_identifier_length[16];
+ register int type_identifier_length;
+
+ if (methods == NULL_TREE)
+ return;
+
+ type_encoding = DECL_NAME (TYPE_NAME (type));
+
+#if 0
+ /* C++: Template classes break some assumptions made by this code about
+ the class names, constructor names, and encodings for assembler
+ label names. For now, disable output of dbx info for them. */
+ {
+ char *ptr = IDENTIFIER_POINTER (type_encoding);
+ /* This should use index. (mrs) */
+ while (*ptr && *ptr != '<') ptr++;
+ if (*ptr != 0)
+ {
+ static int warned;
+ if (!warned)
+ {
+ warned = 1;
+#ifdef HAVE_TEMPLATES
+ if (warn_template_debugging)
+ warning ("dbx info for template class methods not yet supported");
+#endif
+ }
+ return;
+ }
+ }
+#endif
+
+ type_identifier_length = IDENTIFIER_LENGTH (type_encoding);
+
+ sprintf(formatted_type_identifier_length, "%d", type_identifier_length);
+
+ if (TREE_CODE (methods) == FUNCTION_DECL)
+ fndecl = methods;
+ else if (TREE_VEC_ELT (methods, 0) != NULL_TREE)
+ fndecl = TREE_VEC_ELT (methods, 0);
+ else
+ fndecl = TREE_VEC_ELT (methods, 1);
+
+ while (fndecl)
+ {
+ tree name = DECL_NAME (fndecl);
+ int need_prefix = 1;
+
+ /* Group together all the methods for the same operation.
+ These differ in the types of the arguments. */
+ for (last = NULL_TREE;
+ fndecl && (last == NULL_TREE || DECL_NAME (fndecl) == DECL_NAME (last));
+ fndecl = TREE_CHAIN (fndecl))
+ /* Output the name of the field (after overloading), as
+ well as the name of the field before overloading, along
+ with its parameter list */
+ {
+ /* This is the "mangled" name of the method.
+ It encodes the argument types. */
+ char *debug_name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (fndecl));
+ int destructor = 0;
+
+ CONTIN;
+
+ last = fndecl;
+
+ if (DECL_IGNORED_P (fndecl))
+ continue;
+
+ if (flag_minimal_debug)
+ {
+ /* Detect ordinary methods because their mangled names
+ start with the operation name. */
+ if (!strncmp (IDENTIFIER_POINTER (name), debug_name,
+ IDENTIFIER_LENGTH (name)))
+ {
+ debug_name += IDENTIFIER_LENGTH (name);
+ if (debug_name[0] == '_' && debug_name[1] == '_')
+ {
+ char *method_name = debug_name + 2;
+ char *length_ptr = formatted_type_identifier_length;
+ /* Get past const and volatile qualifiers. */
+ while (*method_name == 'C' || *method_name == 'V')
+ method_name++;
+ /* Skip digits for length of type_encoding. */
+ while (*method_name == *length_ptr && *length_ptr)
+ length_ptr++, method_name++;
+ if (! strncmp (method_name,
+ IDENTIFIER_POINTER (type_encoding),
+ type_identifier_length))
+ method_name += type_identifier_length;
+ debug_name = method_name;
+ }
+ }
+ /* Detect constructors by their style of name mangling. */
+ else if (debug_name[0] == '_' && debug_name[1] == '_')
+ {
+ char *ctor_name = debug_name + 2;
+ char *length_ptr = formatted_type_identifier_length;
+ while (*ctor_name == 'C' || *ctor_name == 'V')
+ ctor_name++;
+ /* Skip digits for length of type_encoding. */
+ while (*ctor_name == *length_ptr && *length_ptr)
+ length_ptr++, ctor_name++;
+ if (!strncmp (IDENTIFIER_POINTER (type_encoding), ctor_name,
+ type_identifier_length))
+ debug_name = ctor_name + type_identifier_length;
+ }
+ /* The other alternative is a destructor. */
+ else
+ destructor = 1;
+
+ /* Output the operation name just once, for the first method
+ that we output. */
+ if (need_prefix)
+ {
+ fprintf (asmfile, "%s::", IDENTIFIER_POINTER (name));
+ CHARS (IDENTIFIER_LENGTH (name) + 2);
+ need_prefix = 0;
+ }
+ }
+
+ dbxout_type (TREE_TYPE (fndecl), 0, destructor);
+
+ dbxout_type_method_1 (fndecl, debug_name);
+ }
+ if (!need_prefix)
+ {
+ putc (';', asmfile);
+ CHARS (1);
+ }
+ }
+}
+
+/* Emit a "range" type specification, which has the form:
+ "r<index type>;<lower bound>;<upper bound>;".
+ TYPE is an INTEGER_TYPE. */
+
+static void
+dbxout_range_type (type)
+ tree type;
+{
+ fprintf (asmfile, "r");
+ if (TREE_TYPE (type))
+ dbxout_type (TREE_TYPE (type), 0, 0);
+ else if (TREE_CODE (type) != INTEGER_TYPE)
+ dbxout_type (type, 0, 0); /* E.g. Pascal's ARRAY [BOOLEAN] of INTEGER */
+ else
+ {
+ /* This used to say `r1' and we used to take care
+ to make sure that `int' was type number 1. */
+ fprintf (asmfile, "%d", TYPE_SYMTAB_ADDRESS (integer_type_node));
+ }
+ if (TREE_CODE (TYPE_MIN_VALUE (type)) == INTEGER_CST)
+ fprintf (asmfile, ";%d",
+ TREE_INT_CST_LOW (TYPE_MIN_VALUE (type)));
+ else
+ fprintf (asmfile, ";0");
+ if (TREE_CODE (TYPE_MAX_VALUE (type)) == INTEGER_CST)
+ fprintf (asmfile, ";%d;",
+ TREE_INT_CST_LOW (TYPE_MAX_VALUE (type)));
+ else
+ fprintf (asmfile, ";-1;");
+}
+
+/* Output a reference to a type. If the type has not yet been
+ described in the dbx output, output its definition now.
+ For a type already defined, just refer to its definition
+ using the type number.
+
+ If FULL is nonzero, and the type has been described only with
+ a forward-reference, output the definition now.
+ If FULL is zero in this case, just refer to the forward-reference
+ using the number previously allocated.
+
+ If SHOW_ARG_TYPES is nonzero, we output a description of the argument
+ types for a METHOD_TYPE. */
+
+static void
+dbxout_type (type, full, show_arg_types)
+ tree type;
+ int full;
+ int show_arg_types;
+{
+ register tree tem;
+ static int anonymous_type_number = 0;
+
+ /* If there was an input error and we don't really have a type,
+ avoid crashing and write something that is at least valid
+ by assuming `int'. */
+ if (type == error_mark_node)
+ type = integer_type_node;
+ else
+ {
+ type = TYPE_MAIN_VARIANT (type);
+ if (TYPE_NAME (type)
+ && TREE_CODE (TYPE_NAME (type)) == TYPE_DECL
+ && TYPE_DECL_SUPPRESS_DEBUG (TYPE_NAME (type)))
+ full = 0;
+ }
+
+ if (TYPE_SYMTAB_ADDRESS (type) == 0)
+ {
+ /* Type has no dbx number assigned. Assign next available number. */
+ TYPE_SYMTAB_ADDRESS (type) = next_type_number++;
+
+ /* Make sure type vector is long enough to record about this type. */
+
+ if (next_type_number == typevec_len)
+ {
+ typevec =
+ (enum typestatus *) xrealloc (typevec,
+ typevec_len * 2 * sizeof typevec[0]);
+ bzero ((char *) (typevec + typevec_len),
+ typevec_len * sizeof typevec[0]);
+ typevec_len *= 2;
+ }
+ }
+
+ /* Output the number of this type, to refer to it. */
+ fprintf (asmfile, "%d", TYPE_SYMTAB_ADDRESS (type));
+ CHARS (3);
+
+#ifdef DBX_TYPE_DEFINED
+ if (DBX_TYPE_DEFINED (type))
+ return;
+#endif
+
+ /* If this type's definition has been output or is now being output,
+ that is all. */
+
+ switch (typevec[TYPE_SYMTAB_ADDRESS (type)])
+ {
+ case TYPE_UNSEEN:
+ break;
+ case TYPE_XREF:
+ /* If we have already had a cross reference,
+ and either that's all we want or that's the best we could do,
+ don't repeat the cross reference.
+ Sun dbx crashes if we do. */
+ if (! full || TYPE_SIZE (type) == 0
+ /* No way in DBX fmt to describe a variable size. */
+ || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
+ return;
+ break;
+ case TYPE_DEFINED:
+ return;
+ }
+
+#ifdef DBX_NO_XREFS
+ /* For systems where dbx output does not allow the `=xsNAME:' syntax,
+ leave the type-number completely undefined rather than output
+ a cross-reference. If we have already used GNU debug info extensions,
+ then it is OK to output a cross reference. This is necessary to get
+ proper C++ debug output. */
+ if ((TREE_CODE (type) == RECORD_TYPE || TREE_CODE (type) == UNION_TYPE
+ || TREE_CODE (type) == QUAL_UNION_TYPE
+ || TREE_CODE (type) == ENUMERAL_TYPE)
+ && ! use_gnu_debug_info_extensions)
+ /* We must use the same test here as we use twice below when deciding
+ whether to emit a cross-reference. */
+ if ((TYPE_NAME (type) != 0
+ && ! (TREE_CODE (TYPE_NAME (type)) == TYPE_DECL
+ && DECL_IGNORED_P (TYPE_NAME (type)))
+ && !full)
+ || TYPE_SIZE (type) == 0
+ /* No way in DBX fmt to describe a variable size. */
+ || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
+ {
+ typevec[TYPE_SYMTAB_ADDRESS (type)] = TYPE_XREF;
+ return;
+ }
+#endif
+
+ /* Output a definition now. */
+
+ fprintf (asmfile, "=");
+ CHARS (1);
+
+ /* Mark it as defined, so that if it is self-referent
+ we will not get into an infinite recursion of definitions. */
+
+ typevec[TYPE_SYMTAB_ADDRESS (type)] = TYPE_DEFINED;
+
+ switch (TREE_CODE (type))
+ {
+ case VOID_TYPE:
+ case LANG_TYPE:
+ /* For a void type, just define it as itself; ie, "5=5".
+ This makes us consider it defined
+ without saying what it is. The debugger will make it
+ a void type when the reference is seen, and nothing will
+ ever override that default. */
+ fprintf (asmfile, "%d", TYPE_SYMTAB_ADDRESS (type));
+ CHARS (3);
+ break;
+
+ case INTEGER_TYPE:
+ if (type == char_type_node && ! TREE_UNSIGNED (type))
+ /* Output the type `char' as a subrange of itself!
+ I don't understand this definition, just copied it
+ from the output of pcc.
+ This used to use `r2' explicitly and we used to
+ take care to make sure that `char' was type number 2. */
+ fprintf (asmfile, "r%d;0;127;", TYPE_SYMTAB_ADDRESS (type));
+ else if (use_gnu_debug_info_extensions
+ && (TYPE_PRECISION (type) > TYPE_PRECISION (integer_type_node)
+ || TYPE_PRECISION (type) > HOST_BITS_PER_WIDE_INT))
+ {
+ /* This used to say `r1' and we used to take care
+ to make sure that `int' was type number 1. */
+ fprintf (asmfile, "r%d;", TYPE_SYMTAB_ADDRESS (integer_type_node));
+ print_int_cst_octal (TYPE_MIN_VALUE (type));
+ fprintf (asmfile, ";");
+ print_int_cst_octal (TYPE_MAX_VALUE (type));
+ fprintf (asmfile, ";");
+ }
+ else /* Output other integer types as subranges of `int'. */
+ dbxout_range_type (type);
+ CHARS (25);
+ break;
+
+ case REAL_TYPE:
+ /* This used to say `r1' and we used to take care
+ to make sure that `int' was type number 1. */
+ fprintf (asmfile, "r%d;%d;0;", TYPE_SYMTAB_ADDRESS (integer_type_node),
+ int_size_in_bytes (type));
+ CHARS (16);
+ break;
+
+ case CHAR_TYPE:
+ if (use_gnu_debug_info_extensions)
+ fprintf (asmfile, "@s%d;-20;",
+ BITS_PER_UNIT * int_size_in_bytes (type));
+ else
+ /* Output the type `char' as a subrange of itself.
+ That is what pcc seems to do. */
+ fprintf (asmfile, "r%d;0;%d;", TYPE_SYMTAB_ADDRESS (char_type_node),
+ TREE_UNSIGNED (type) ? 255 : 127);
+ CHARS (9);
+ break;
+
+ case BOOLEAN_TYPE:
+ if (use_gnu_debug_info_extensions)
+ fprintf (asmfile, "@s%d;-16;",
+ BITS_PER_UNIT * int_size_in_bytes (type));
+ else /* Define as enumeral type (False, True) */
+ fprintf (asmfile, "eFalse:0,True:1,;");
+ CHARS (17);
+ break;
+
+ case FILE_TYPE:
+ putc ('d', asmfile);
+ CHARS (1);
+ dbxout_type (TREE_TYPE (type), 0, 0);
+ break;
+
+ case COMPLEX_TYPE:
+ /* Differs from the REAL_TYPE by its new data type number */
+
+ if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
+ {
+ fprintf (asmfile, "r%d;%d;0;",
+ TYPE_SYMTAB_ADDRESS (type),
+ int_size_in_bytes (TREE_TYPE (type)));
+ CHARS (15); /* The number is probably incorrect here. */
+ }
+ else
+ {
+ /* Output a complex integer type as a structure,
+ pending some other way to do it. */
+ fprintf (asmfile, "s%d", int_size_in_bytes (type));
+
+ fprintf (asmfile, "real:");
+ CHARS (10);
+ dbxout_type (TREE_TYPE (type), 0, 0);
+ fprintf (asmfile, ",%d,%d;",
+ 0, TYPE_PRECISION (TREE_TYPE (type)));
+ CHARS (8);
+ fprintf (asmfile, "imag:");
+ CHARS (5);
+ dbxout_type (TREE_TYPE (type), 0, 0);
+ fprintf (asmfile, ",%d,%d;;",
+ TYPE_PRECISION (TREE_TYPE (type)),
+ TYPE_PRECISION (TREE_TYPE (type)));
+ CHARS (9);
+ }
+ break;
+
+ case SET_TYPE:
+ if (use_gnu_debug_info_extensions)
+ {
+ have_used_extensions = 1;
+ fprintf (asmfile, "@s%d;",
+ BITS_PER_UNIT * int_size_in_bytes (type));
+ /* Check if a bitstring type, which in Chill is
+ different from a [power]set. */
+ if (TYPE_STRING_FLAG (type))
+ fprintf (asmfile, "@S;");
+ }
+ putc ('S', asmfile);
+ CHARS (1);
+ dbxout_type (TYPE_DOMAIN (type), 0, 0);
+ break;
+
+ case ARRAY_TYPE:
+ /* Output "a" followed by a range type definition
+ for the index type of the array
+ followed by a reference to the target-type.
+ ar1;0;N;M for a C array of type M and size N+1. */
+ /* Check if a character string type, which in Chill is
+ different from an array of characters. */
+ if (TYPE_STRING_FLAG (type) && use_gnu_debug_info_extensions)
+ {
+ have_used_extensions = 1;
+ fprintf (asmfile, "@S;");
+ }
+ tem = TYPE_DOMAIN (type);
+ if (tem == NULL)
+ fprintf (asmfile, "ar%d;0;-1;",
+ TYPE_SYMTAB_ADDRESS (integer_type_node));
+ else
+ {
+ fprintf (asmfile, "a");
+ dbxout_range_type (tem);
+ }
+ CHARS (17);
+ dbxout_type (TREE_TYPE (type), 0, 0);
+ break;
+
+ case RECORD_TYPE:
+ case UNION_TYPE:
+ case QUAL_UNION_TYPE:
+ {
+ int i, n_baseclasses = 0;
+
+ if (TYPE_BINFO (type) != 0 && TYPE_BINFO_BASETYPES (type) != 0)
+ n_baseclasses = TREE_VEC_LENGTH (TYPE_BINFO_BASETYPES (type));
+
+ /* Output a structure type. We must use the same test here as we
+ use in the DBX_NO_XREFS case above. */
+ if ((TYPE_NAME (type) != 0
+ && ! (TREE_CODE (TYPE_NAME (type)) == TYPE_DECL
+ && DECL_IGNORED_P (TYPE_NAME (type)))
+ && !full)
+ || TYPE_SIZE (type) == 0
+ /* No way in DBX fmt to describe a variable size. */
+ || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
+ {
+ /* If the type is just a cross reference, output one
+ and mark the type as partially described.
+ If it later becomes defined, we will output
+ its real definition.
+ If the type has a name, don't nest its definition within
+ another type's definition; instead, output an xref
+ and let the definition come when the name is defined. */
+ fprintf (asmfile, (TREE_CODE (type) == RECORD_TYPE) ? "xs" : "xu");
+ CHARS (3);
+#if 0 /* This assertion is legitimately false in C++. */
+ /* We shouldn't be outputting a reference to a type before its
+ definition unless the type has a tag name.
+ A typedef name without a tag name should be impossible. */
+ if (TREE_CODE (TYPE_NAME (type)) != IDENTIFIER_NODE)
+ abort ();
+#endif
+ if (TYPE_NAME (type) != 0)
+ dbxout_type_name (type);
+ else
+ fprintf (asmfile, "$$%d", anonymous_type_number++);
+ fprintf (asmfile, ":");
+ typevec[TYPE_SYMTAB_ADDRESS (type)] = TYPE_XREF;
+ break;
+ }
+
+ /* Identify record or union, and print its size. */
+ fprintf (asmfile, (TREE_CODE (type) == RECORD_TYPE) ? "s%d" : "u%d",
+ int_size_in_bytes (type));
+
+ if (use_gnu_debug_info_extensions)
+ {
+ if (n_baseclasses)
+ {
+ have_used_extensions = 1;
+ fprintf (asmfile, "!%d,", n_baseclasses);
+ CHARS (8);
+ }
+ }
+ for (i = 0; i < n_baseclasses; i++)
+ {
+ tree child = TREE_VEC_ELT (BINFO_BASETYPES (TYPE_BINFO (type)), i);
+ if (use_gnu_debug_info_extensions)
+ {
+ have_used_extensions = 1;
+ putc (TREE_VIA_VIRTUAL (child) ? '1'
+ : '0',
+ asmfile);
+ putc (TREE_VIA_PUBLIC (child) ? '2'
+ : '0',
+ asmfile);
+ fprintf (asmfile, "%d,",
+ TREE_INT_CST_LOW (BINFO_OFFSET (child)) * BITS_PER_UNIT);
+ CHARS (15);
+ dbxout_type (BINFO_TYPE (child), 0, 0);
+ putc (';', asmfile);
+ }
+ else
+ {
+ /* Print out the base class information with fields
+ which have the same names at the types they hold. */
+ dbxout_type_name (BINFO_TYPE (child));
+ putc (':', asmfile);
+ dbxout_type (BINFO_TYPE (child), full, 0);
+ fprintf (asmfile, ",%d,%d;",
+ TREE_INT_CST_LOW (BINFO_OFFSET (child)) * BITS_PER_UNIT,
+ TREE_INT_CST_LOW (DECL_SIZE (TYPE_NAME (BINFO_TYPE (child)))) * BITS_PER_UNIT);
+ CHARS (20);
+ }
+ }
+ }
+
+ CHARS (11);
+
+ /* Write out the field declarations. */
+ dbxout_type_fields (type);
+ if (use_gnu_debug_info_extensions && TYPE_METHODS (type) != NULL_TREE)
+ {
+ have_used_extensions = 1;
+ dbxout_type_methods (type);
+ }
+ putc (';', asmfile);
+
+ if (use_gnu_debug_info_extensions && TREE_CODE (type) == RECORD_TYPE
+ /* Avoid the ~ if we don't really need it--it confuses dbx. */
+ && TYPE_VFIELD (type))
+ {
+ have_used_extensions = 1;
+
+ /* Tell GDB+ that it may keep reading. */
+ putc ('~', asmfile);
+
+ /* We need to write out info about what field this class
+ uses as its "main" vtable pointer field, because if this
+ field is inherited from a base class, GDB cannot necessarily
+ figure out which field it's using in time. */
+ if (TYPE_VFIELD (type))
+ {
+ putc ('%', asmfile);
+ dbxout_type (DECL_FCONTEXT (TYPE_VFIELD (type)), 0, 0);
+ }
+ putc (';', asmfile);
+ CHARS (3);
+ }
+ break;
+
+ case ENUMERAL_TYPE:
+ /* We must use the same test here as we use in the DBX_NO_XREFS case
+ above. We simplify it a bit since an enum will never have a variable
+ size. */
+ if ((TYPE_NAME (type) != 0
+ && ! (TREE_CODE (TYPE_NAME (type)) == TYPE_DECL
+ && DECL_IGNORED_P (TYPE_NAME (type)))
+ && !full)
+ || TYPE_SIZE (type) == 0)
+ {
+ fprintf (asmfile, "xe");
+ CHARS (3);
+ dbxout_type_name (type);
+ typevec[TYPE_SYMTAB_ADDRESS (type)] = TYPE_XREF;
+ fprintf (asmfile, ":");
+ return;
+ }
+#ifdef DBX_OUTPUT_ENUM
+ DBX_OUTPUT_ENUM (asmfile, type);
+#else
+ if (use_gnu_debug_info_extensions
+ && TYPE_PRECISION (type) != TYPE_PRECISION (integer_type_node))
+ fprintf (asmfile, "@s%d;", TYPE_PRECISION (type));
+ putc ('e', asmfile);
+ CHARS (1);
+ for (tem = TYPE_VALUES (type); tem; tem = TREE_CHAIN (tem))
+ {
+ fprintf (asmfile, "%s:", IDENTIFIER_POINTER (TREE_PURPOSE (tem)));
+ if (TREE_INT_CST_HIGH (TREE_VALUE (tem)) == 0)
+ fprintf (asmfile, "%lu",
+ (unsigned long) TREE_INT_CST_LOW (TREE_VALUE (tem)));
+ else if (TREE_INT_CST_HIGH (TREE_VALUE (tem)) == -1
+ && TREE_INT_CST_LOW (TREE_VALUE (tem)) < 0)
+ fprintf (asmfile, "%ld",
+ (long) TREE_INT_CST_LOW (TREE_VALUE (tem)));
+ else
+ print_int_cst_octal (TREE_VALUE (tem));
+ fprintf (asmfile, ",");
+ CHARS (20 + IDENTIFIER_LENGTH (TREE_PURPOSE (tem)));
+ if (TREE_CHAIN (tem) != 0)
+ CONTIN;
+ }
+ putc (';', asmfile);
+ CHARS (1);
+#endif
+ break;
+
+ case POINTER_TYPE:
+ putc ('*', asmfile);
+ CHARS (1);
+ dbxout_type (TREE_TYPE (type), 0, 0);
+ break;
+
+ case METHOD_TYPE:
+ if (use_gnu_debug_info_extensions)
+ {
+ have_used_extensions = 1;
+ putc ('#', asmfile);
+ CHARS (1);
+ if (flag_minimal_debug && !show_arg_types)
+ {
+ /* Normally, just output the return type.
+ The argument types are encoded in the method name. */
+ putc ('#', asmfile);
+ dbxout_type (TREE_TYPE (type), 0, 0);
+ putc (';', asmfile);
+ CHARS (1);
+ }
+ else
+ {
+ /* When outputting destructors, we need to write
+ the argument types out longhand. */
+ dbxout_type (TYPE_METHOD_BASETYPE (type), 0, 0);
+ putc (',', asmfile);
+ CHARS (1);
+ dbxout_type (TREE_TYPE (type), 0, 0);
+ dbxout_args (TYPE_ARG_TYPES (type));
+ putc (';', asmfile);
+ CHARS (1);
+ }
+ }
+ else
+ {
+ /* Treat it as a function type. */
+ dbxout_type (TREE_TYPE (type), 0, 0);
+ }
+ break;
+
+ case OFFSET_TYPE:
+ if (use_gnu_debug_info_extensions)
+ {
+ have_used_extensions = 1;
+ putc ('@', asmfile);
+ CHARS (1);
+ dbxout_type (TYPE_OFFSET_BASETYPE (type), 0, 0);
+ putc (',', asmfile);
+ CHARS (1);
+ dbxout_type (TREE_TYPE (type), 0, 0);
+ }
+ else
+ {
+ /* Should print as an int, because it is really
+ just an offset. */
+ dbxout_type (integer_type_node, 0, 0);
+ }
+ break;
+
+ case REFERENCE_TYPE:
+ if (use_gnu_debug_info_extensions)
+ have_used_extensions = 1;
+ putc (use_gnu_debug_info_extensions ? '&' : '*', asmfile);
+ CHARS (1);
+ dbxout_type (TREE_TYPE (type), 0, 0);
+ break;
+
+ case FUNCTION_TYPE:
+ putc ('f', asmfile);
+ CHARS (1);
+ dbxout_type (TREE_TYPE (type), 0, 0);
+ break;
+
+ default:
+ abort ();
+ }
+}
+
+/* Print the value of integer constant C, in octal,
+ handling double precision. */
+
+static void
+print_int_cst_octal (c)
+ tree c;
+{
+ unsigned HOST_WIDE_INT high = TREE_INT_CST_HIGH (c);
+ unsigned HOST_WIDE_INT low = TREE_INT_CST_LOW (c);
+ int excess = (3 - (HOST_BITS_PER_WIDE_INT % 3));
+ int width = TYPE_PRECISION (TREE_TYPE (c));
+
+ /* GDB wants constants with no extra leading "1" bits, so
+ we need to remove any sign-extension that might be
+ present. */
+ if (width == HOST_BITS_PER_WIDE_INT * 2)
+ ;
+ else if (width > HOST_BITS_PER_WIDE_INT)
+ high &= (((HOST_WIDE_INT) 1 << (width - HOST_BITS_PER_WIDE_INT)) - 1);
+ else if (width == HOST_BITS_PER_WIDE_INT)
+ high = 0;
+ else
+ high = 0, low &= (((HOST_WIDE_INT) 1 << width) - 1);
+
+ fprintf (asmfile, "0");
+
+ if (excess == 3)
+ {
+ print_octal (high, HOST_BITS_PER_WIDE_INT / 3);
+ print_octal (low, HOST_BITS_PER_WIDE_INT / 3);
+ }
+ else
+ {
+ unsigned HOST_WIDE_INT beg = high >> excess;
+ unsigned HOST_WIDE_INT middle
+ = ((high & (((HOST_WIDE_INT) 1 << excess) - 1)) << (3 - excess)
+ | (low >> (HOST_BITS_PER_WIDE_INT / 3 * 3)));
+ unsigned HOST_WIDE_INT end
+ = low & (((unsigned HOST_WIDE_INT) 1
+ << (HOST_BITS_PER_WIDE_INT / 3 * 3))
+ - 1);
+
+ fprintf (asmfile, "%o%01o", beg, middle);
+ print_octal (end, HOST_BITS_PER_WIDE_INT / 3);
+ }
+}
+
+static void
+print_octal (value, digits)
+ unsigned HOST_WIDE_INT value;
+ int digits;
+{
+ int i;
+
+ for (i = digits - 1; i >= 0; i--)
+ fprintf (asmfile, "%01o", ((value >> (3 * i)) & 7));
+}
+
+/* Output the name of type TYPE, with no punctuation.
+ Such names can be set up either by typedef declarations
+ or by struct, enum and union tags. */
+
+static void
+dbxout_type_name (type)
+ register tree type;
+{
+ tree t;
+ if (TYPE_NAME (type) == 0)
+ abort ();
+ if (TREE_CODE (TYPE_NAME (type)) == IDENTIFIER_NODE)
+ {
+ t = TYPE_NAME (type);
+ }
+ else if (TREE_CODE (TYPE_NAME (type)) == TYPE_DECL)
+ {
+ t = DECL_NAME (TYPE_NAME (type));
+ }
+ else
+ abort ();
+
+ fprintf (asmfile, "%s", IDENTIFIER_POINTER (t));
+ CHARS (IDENTIFIER_LENGTH (t));
+}
+
+/* Output a .stabs for the symbol defined by DECL,
+ which must be a ..._DECL node in the normal namespace.
+ It may be a CONST_DECL, a FUNCTION_DECL, a PARM_DECL or a VAR_DECL.
+ LOCAL is nonzero if the scope is less than the entire file. */
+
+void
+dbxout_symbol (decl, local)
+ tree decl;
+ int local;
+{
+ tree type = TREE_TYPE (decl);
+ tree context = NULL_TREE;
+
+ /* Cast avoids warning in old compilers. */
+ current_sym_code = (STAB_CODE_TYPE) 0;
+ current_sym_value = 0;
+ current_sym_addr = 0;
+
+ /* Ignore nameless syms, but don't ignore type tags. */
+
+ if ((DECL_NAME (decl) == 0 && TREE_CODE (decl) != TYPE_DECL)
+ || DECL_IGNORED_P (decl))
+ return;
+
+ dbxout_prepare_symbol (decl);
+
+ /* The output will always start with the symbol name,
+ so always count that in the length-output-so-far. */
+
+ if (DECL_NAME (decl) != 0)
+ current_sym_nchars = 2 + IDENTIFIER_LENGTH (DECL_NAME (decl));
+
+ switch (TREE_CODE (decl))
+ {
+ case CONST_DECL:
+ /* Enum values are defined by defining the enum type. */
+ break;
+
+ case FUNCTION_DECL:
+ if (DECL_RTL (decl) == 0)
+ return;
+ if (DECL_EXTERNAL (decl))
+ break;
+ /* Don't mention a nested function under its parent. */
+ context = decl_function_context (decl);
+ if (context == current_function_decl)
+ break;
+ if (GET_CODE (DECL_RTL (decl)) != MEM
+ || GET_CODE (XEXP (DECL_RTL (decl), 0)) != SYMBOL_REF)
+ break;
+ FORCE_TEXT;
+
+ fprintf (asmfile, "%s \"%s:%c", ASM_STABS_OP,
+ IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)),
+ TREE_PUBLIC (decl) ? 'F' : 'f');
+
+ current_sym_code = N_FUN;
+ current_sym_addr = XEXP (DECL_RTL (decl), 0);
+
+ if (TREE_TYPE (type))
+ dbxout_type (TREE_TYPE (type), 0, 0);
+ else
+ dbxout_type (void_type_node, 0, 0);
+
+ /* For a nested function, when that function is compiled,
+ mention the containing function name
+ as well as (since dbx wants it) our own assembler-name. */
+ if (context != 0)
+ fprintf (asmfile, ",%s,%s",
+ IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)),
+ IDENTIFIER_POINTER (DECL_NAME (context)));
+
+ dbxout_finish_symbol (decl);
+ break;
+
+ case TYPE_DECL:
+#if 0
+ /* This seems all wrong. Outputting most kinds of types gives no name
+ at all. A true definition gives no name; a cross-ref for a
+ structure can give the tag name, but not a type name.
+ It seems that no typedef name is defined by outputting a type. */
+
+ /* If this typedef name was defined by outputting the type,
+ don't duplicate it. */
+ if (typevec[TYPE_SYMTAB_ADDRESS (type)] == TYPE_DEFINED
+ && TYPE_NAME (TREE_TYPE (decl)) == decl)
+ return;
+#endif
+ /* Don't output the same typedef twice.
+ And don't output what language-specific stuff doesn't want output. */
+ if (TREE_ASM_WRITTEN (decl) || TYPE_DECL_SUPPRESS_DEBUG (decl))
+ return;
+
+ FORCE_TEXT;
+
+ {
+ int tag_needed = 1;
+ int did_output = 0;
+
+ if (DECL_NAME (decl))
+ {
+ /* Nonzero means we must output a tag as well as a typedef. */
+ tag_needed = 0;
+
+ /* Handle the case of a C++ structure or union
+ where the TYPE_NAME is a TYPE_DECL
+ which gives both a typedef name and a tag. */
+ /* dbx requires the tag first and the typedef second. */
+ if ((TREE_CODE (type) == RECORD_TYPE
+ || TREE_CODE (type) == UNION_TYPE
+ || TREE_CODE (type) == QUAL_UNION_TYPE)
+ && TYPE_NAME (type) == decl
+ && !(use_gnu_debug_info_extensions && have_used_extensions)
+ && !TREE_ASM_WRITTEN (TYPE_NAME (type))
+ /* Distinguish the implicit typedefs of C++
+ from explicit ones that might be found in C. */
+ && (!strcmp (lang_identify (), "cplusplus")
+ /* The following line maybe unnecessary;
+ in 2.6, try removing it. */
+ || DECL_SOURCE_LINE (decl) == 0))
+ {
+ tree name = TYPE_NAME (type);
+ if (TREE_CODE (name) == TYPE_DECL)
+ name = DECL_NAME (name);
+
+ current_sym_code = DBX_TYPE_DECL_STABS_CODE;
+ current_sym_value = 0;
+ current_sym_addr = 0;
+ current_sym_nchars = 2 + IDENTIFIER_LENGTH (name);
+
+ fprintf (asmfile, "%s \"%s:T", ASM_STABS_OP,
+ IDENTIFIER_POINTER (name));
+ dbxout_type (type, 1, 0);
+ dbxout_finish_symbol (NULL_TREE);
+ }
+
+ /* Output typedef name. */
+ fprintf (asmfile, "%s \"%s:", ASM_STABS_OP,
+ IDENTIFIER_POINTER (DECL_NAME (decl)));
+
+ /* Short cut way to output a tag also. */
+ if ((TREE_CODE (type) == RECORD_TYPE
+ || TREE_CODE (type) == UNION_TYPE
+ || TREE_CODE (type) == QUAL_UNION_TYPE)
+ && TYPE_NAME (type) == decl)
+ {
+ if (use_gnu_debug_info_extensions && have_used_extensions)
+ {
+ putc ('T', asmfile);
+ TREE_ASM_WRITTEN (TYPE_NAME (type)) = 1;
+ }
+#if 0 /* Now we generate the tag for this case up above. */
+ else
+ tag_needed = 1;
+#endif
+ }
+
+ putc ('t', asmfile);
+ current_sym_code = DBX_TYPE_DECL_STABS_CODE;
+
+ dbxout_type (type, 1, 0);
+ dbxout_finish_symbol (decl);
+ did_output = 1;
+ }
+
+ /* Don't output a tag if this is an incomplete type (TYPE_SIZE is
+ zero). This prevents the sun4 Sun OS 4.x dbx from crashing. */
+
+ if (tag_needed && TYPE_NAME (type) != 0 && TYPE_SIZE (type) != 0
+ && !TREE_ASM_WRITTEN (TYPE_NAME (type)))
+ {
+ /* For a TYPE_DECL with no name, but the type has a name,
+ output a tag.
+ This is what represents `struct foo' with no typedef. */
+ /* In C++, the name of a type is the corresponding typedef.
+ In C, it is an IDENTIFIER_NODE. */
+ tree name = TYPE_NAME (type);
+ if (TREE_CODE (name) == TYPE_DECL)
+ name = DECL_NAME (name);
+
+ current_sym_code = DBX_TYPE_DECL_STABS_CODE;
+ current_sym_value = 0;
+ current_sym_addr = 0;
+ current_sym_nchars = 2 + IDENTIFIER_LENGTH (name);
+
+ fprintf (asmfile, "%s \"%s:T", ASM_STABS_OP,
+ IDENTIFIER_POINTER (name));
+ dbxout_type (type, 1, 0);
+ dbxout_finish_symbol (NULL_TREE);
+ did_output = 1;
+ }
+
+ /* If an enum type has no name, it cannot be referred to,
+ but we must output it anyway, since the enumeration constants
+ can be referred to. */
+ if (!did_output && TREE_CODE (type) == ENUMERAL_TYPE)
+ {
+ current_sym_code = DBX_TYPE_DECL_STABS_CODE;
+ current_sym_value = 0;
+ current_sym_addr = 0;
+ current_sym_nchars = 2;
+
+ /* Some debuggers fail when given NULL names, so give this a
+ harmless name of ` '. */
+ fprintf (asmfile, "%s \" :T", ASM_STABS_OP);
+ dbxout_type (type, 1, 0);
+ dbxout_finish_symbol (NULL_TREE);
+ }
+
+ /* Prevent duplicate output of a typedef. */
+ TREE_ASM_WRITTEN (decl) = 1;
+ break;
+ }
+
+ case PARM_DECL:
+ /* Parm decls go in their own separate chains
+ and are output by dbxout_reg_parms and dbxout_parms. */
+ abort ();
+
+ case RESULT_DECL:
+ /* Named return value, treat like a VAR_DECL. */
+ case VAR_DECL:
+ if (DECL_RTL (decl) == 0)
+ return;
+ /* Don't mention a variable that is external.
+ Let the file that defines it describe it. */
+ if (DECL_EXTERNAL (decl))
+ break;
+
+ /* If the variable is really a constant
+ and not written in memory, inform the debugger. */
+ if (TREE_STATIC (decl) && TREE_READONLY (decl)
+ && DECL_INITIAL (decl) != 0
+ && ! TREE_ASM_WRITTEN (decl)
+ && (DECL_FIELD_CONTEXT (decl) == NULL_TREE
+ || TREE_CODE (DECL_FIELD_CONTEXT (decl)) == BLOCK))
+ {
+ if (TREE_PUBLIC (decl) == 0)
+ {
+ /* The sun4 assembler does not grok this. */
+ char *name = IDENTIFIER_POINTER (DECL_NAME (decl));
+ if (TREE_CODE (TREE_TYPE (decl)) == INTEGER_TYPE
+ || TREE_CODE (TREE_TYPE (decl)) == ENUMERAL_TYPE)
+ {
+ HOST_WIDE_INT ival = TREE_INT_CST_LOW (DECL_INITIAL (decl));
+#ifdef DBX_OUTPUT_CONSTANT_SYMBOL
+ DBX_OUTPUT_CONSTANT_SYMBOL (asmfile, name, ival);
+#else
+ fprintf (asmfile, "%s \"%s:c=i%d\",0x%x,0,0,0\n",
+ ASM_STABS_OP, name, ival, N_LSYM);
+#endif
+ return;
+ }
+ else if (TREE_CODE (TREE_TYPE (decl)) == REAL_TYPE)
+ {
+ /* don't know how to do this yet. */
+ }
+ break;
+ }
+ /* else it is something we handle like a normal variable. */
+ }
+
+ DECL_RTL (decl) = eliminate_regs (DECL_RTL (decl), 0, NULL_RTX);
+#ifdef LEAF_REG_REMAP
+ if (leaf_function)
+ leaf_renumber_regs_insn (DECL_RTL (decl));
+#endif
+
+ dbxout_symbol_location (decl, type, 0, DECL_RTL (decl));
+ }
+}
+
+/* Output the stab for DECL, a VAR_DECL, RESULT_DECL or PARM_DECL.
+ Add SUFFIX to its name, if SUFFIX is not 0.
+ Describe the variable as residing in HOME
+ (usually HOME is DECL_RTL (DECL), but not always). */
+
+static void
+dbxout_symbol_location (decl, type, suffix, home)
+ tree decl, type;
+ char *suffix;
+ rtx home;
+{
+ int letter = 0;
+ int regno = -1;
+
+ /* Don't mention a variable at all
+ if it was completely optimized into nothingness.
+
+ If the decl was from an inline function, then it's rtl
+ is not identically the rtl that was used in this
+ particular compilation. */
+ if (GET_CODE (home) == REG)
+ {
+ regno = REGNO (home);
+ if (regno >= FIRST_PSEUDO_REGISTER)
+ return;
+ }
+ else if (GET_CODE (home) == SUBREG)
+ {
+ rtx value = home;
+ int offset = 0;
+ while (GET_CODE (value) == SUBREG)
+ {
+ offset += SUBREG_WORD (value);
+ value = SUBREG_REG (value);
+ }
+ if (GET_CODE (value) == REG)
+ {
+ regno = REGNO (value);
+ if (regno >= FIRST_PSEUDO_REGISTER)
+ return;
+ regno += offset;
+ }
+ alter_subreg (home);
+ }
+
+ /* The kind-of-variable letter depends on where
+ the variable is and on the scope of its name:
+ G and N_GSYM for static storage and global scope,
+ S for static storage and file scope,
+ V for static storage and local scope,
+ for those two, use N_LCSYM if data is in bss segment,
+ N_STSYM if in data segment, N_FUN otherwise.
+ (We used N_FUN originally, then changed to N_STSYM
+ to please GDB. However, it seems that confused ld.
+ Now GDB has been fixed to like N_FUN, says Kingdon.)
+ no letter at all, and N_LSYM, for auto variable,
+ r and N_RSYM for register variable. */
+
+ if (GET_CODE (home) == MEM
+ && GET_CODE (XEXP (home, 0)) == SYMBOL_REF)
+ {
+ if (TREE_PUBLIC (decl))
+ {
+ letter = 'G';
+ current_sym_code = N_GSYM;
+ }
+ else
+ {
+ current_sym_addr = XEXP (home, 0);
+
+ letter = decl_function_context (decl) ? 'V' : 'S';
+
+ if (!DECL_INITIAL (decl))
+ current_sym_code = N_LCSYM;
+ else if (DECL_IN_TEXT_SECTION (decl))
+ /* This is not quite right, but it's the closest
+ of all the codes that Unix defines. */
+ current_sym_code = DBX_STATIC_CONST_VAR_CODE;
+ else
+ {
+ /* Ultrix `as' seems to need this. */
+#ifdef DBX_STATIC_STAB_DATA_SECTION
+ data_section ();
+#endif
+ current_sym_code = N_STSYM;
+ }
+ }
+ }
+ else if (regno >= 0)
+ {
+ letter = 'r';
+ current_sym_code = N_RSYM;
+ current_sym_value = DBX_REGISTER_NUMBER (regno);
+ }
+ else if (GET_CODE (home) == MEM
+ && (GET_CODE (XEXP (home, 0)) == MEM
+ || (GET_CODE (XEXP (home, 0)) == REG
+ && REGNO (XEXP (home, 0)) != HARD_FRAME_POINTER_REGNUM)))
+ /* If the value is indirect by memory or by a register
+ that isn't the frame pointer
+ then it means the object is variable-sized and address through
+ that register or stack slot. DBX has no way to represent this
+ so all we can do is output the variable as a pointer.
+ If it's not a parameter, ignore it.
+ (VAR_DECLs like this can be made by integrate.c.) */
+ {
+ if (GET_CODE (XEXP (home, 0)) == REG)
+ {
+ letter = 'r';
+ current_sym_code = N_RSYM;
+ current_sym_value = DBX_REGISTER_NUMBER (REGNO (XEXP (home, 0)));
+ }
+ else
+ {
+ current_sym_code = N_LSYM;
+ /* RTL looks like (MEM (MEM (PLUS (REG...) (CONST_INT...)))).
+ We want the value of that CONST_INT. */
+ current_sym_value
+ = DEBUGGER_AUTO_OFFSET (XEXP (XEXP (home, 0), 0));
+ }
+
+ /* Effectively do build_pointer_type, but don't cache this type,
+ since it might be temporary whereas the type it points to
+ might have been saved for inlining. */
+ /* Don't use REFERENCE_TYPE because dbx can't handle that. */
+ type = make_node (POINTER_TYPE);
+ TREE_TYPE (type) = TREE_TYPE (decl);
+ }
+ else if (GET_CODE (home) == MEM
+ && GET_CODE (XEXP (home, 0)) == REG)
+ {
+ current_sym_code = N_LSYM;
+ current_sym_value = DEBUGGER_AUTO_OFFSET (XEXP (home, 0));
+ }
+ else if (GET_CODE (home) == MEM
+ && GET_CODE (XEXP (home, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (home, 0), 1)) == CONST_INT)
+ {
+ current_sym_code = N_LSYM;
+ /* RTL looks like (MEM (PLUS (REG...) (CONST_INT...)))
+ We want the value of that CONST_INT. */
+ current_sym_value = DEBUGGER_AUTO_OFFSET (XEXP (home, 0));
+ }
+ else if (GET_CODE (home) == MEM
+ && GET_CODE (XEXP (home, 0)) == CONST)
+ {
+ /* Handle an obscure case which can arise when optimizing and
+ when there are few available registers. (This is *always*
+ the case for i386/i486 targets). The RTL looks like
+ (MEM (CONST ...)) even though this variable is a local `auto'
+ or a local `register' variable. In effect, what has happened
+ is that the reload pass has seen that all assignments and
+ references for one such a local variable can be replaced by
+ equivalent assignments and references to some static storage
+ variable, thereby avoiding the need for a register. In such
+ cases we're forced to lie to debuggers and tell them that
+ this variable was itself `static'. */
+ current_sym_code = N_LCSYM;
+ letter = 'V';
+ current_sym_addr = XEXP (XEXP (home, 0), 0);
+ }
+ else if (GET_CODE (home) == CONCAT)
+ {
+ tree subtype = TREE_TYPE (type);
+
+ /* If the variable's storage is in two parts,
+ output each as a separate stab with a modified name. */
+ if (WORDS_BIG_ENDIAN)
+ dbxout_symbol_location (decl, subtype, "$imag", XEXP (home, 0));
+ else
+ dbxout_symbol_location (decl, subtype, "$real", XEXP (home, 0));
+
+ /* Cast avoids warning in old compilers. */
+ current_sym_code = (STAB_CODE_TYPE) 0;
+ current_sym_value = 0;
+ current_sym_addr = 0;
+ dbxout_prepare_symbol (decl);
+
+ if (WORDS_BIG_ENDIAN)
+ dbxout_symbol_location (decl, subtype, "$real", XEXP (home, 1));
+ else
+ dbxout_symbol_location (decl, subtype, "$imag", XEXP (home, 1));
+ return;
+ }
+ else
+ /* Address might be a MEM, when DECL is a variable-sized object.
+ Or it might be const0_rtx, meaning previous passes
+ want us to ignore this variable. */
+ return;
+
+ /* Ok, start a symtab entry and output the variable name. */
+ FORCE_TEXT;
+
+#ifdef DBX_STATIC_BLOCK_START
+ DBX_STATIC_BLOCK_START (asmfile, current_sym_code);
+#endif
+
+ dbxout_symbol_name (decl, suffix, letter);
+ dbxout_type (type, 0, 0);
+ dbxout_finish_symbol (decl);
+
+#ifdef DBX_STATIC_BLOCK_END
+ DBX_STATIC_BLOCK_END (asmfile, current_sym_code);
+#endif
+}
+
+/* Output the symbol name of DECL for a stabs, with suffix SUFFIX.
+ Then output LETTER to indicate the kind of location the symbol has. */
+
+static void
+dbxout_symbol_name (decl, suffix, letter)
+ tree decl;
+ char *suffix;
+ int letter;
+{
+ /* One slight hitch: if this is a VAR_DECL which is a static
+ class member, we must put out the mangled name instead of the
+ DECL_NAME. */
+
+ char *name;
+ /* Note also that static member (variable) names DO NOT begin
+ with underscores in .stabs directives. */
+ if (DECL_LANG_SPECIFIC (decl))
+ name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
+ else
+ name = IDENTIFIER_POINTER (DECL_NAME (decl));
+ if (name == 0)
+ name = "(anon)";
+ fprintf (asmfile, "%s \"%s%s:", ASM_STABS_OP, name,
+ (suffix ? suffix : ""));
+
+ if (letter) putc (letter, asmfile);
+}
+
+static void
+dbxout_prepare_symbol (decl)
+ tree decl;
+{
+#ifdef WINNING_GDB
+ char *filename = DECL_SOURCE_FILE (decl);
+
+ dbxout_source_file (asmfile, filename);
+#endif
+}
+
+static void
+dbxout_finish_symbol (sym)
+ tree sym;
+{
+#ifdef DBX_FINISH_SYMBOL
+ DBX_FINISH_SYMBOL (sym);
+#else
+ int line = 0;
+ if (use_gnu_debug_info_extensions && sym != 0)
+ line = DECL_SOURCE_LINE (sym);
+
+ fprintf (asmfile, "\",%d,0,%d,", current_sym_code, line);
+ if (current_sym_addr)
+ output_addr_const (asmfile, current_sym_addr);
+ else
+ fprintf (asmfile, "%d", current_sym_value);
+ putc ('\n', asmfile);
+#endif
+}
+
+/* Output definitions of all the decls in a chain. */
+
+void
+dbxout_syms (syms)
+ tree syms;
+{
+ while (syms)
+ {
+ dbxout_symbol (syms, 1);
+ syms = TREE_CHAIN (syms);
+ }
+}
+
+/* The following two functions output definitions of function parameters.
+ Each parameter gets a definition locating it in the parameter list.
+ Each parameter that is a register variable gets a second definition
+ locating it in the register.
+
+ Printing or argument lists in gdb uses the definitions that
+ locate in the parameter list. But reference to the variable in
+ expressions uses preferentially the definition as a register. */
+
+/* Output definitions, referring to storage in the parmlist,
+ of all the parms in PARMS, which is a chain of PARM_DECL nodes. */
+
+void
+dbxout_parms (parms)
+ tree parms;
+{
+ for (; parms; parms = TREE_CHAIN (parms))
+ if (DECL_NAME (parms) && TREE_TYPE (parms) != error_mark_node)
+ {
+ dbxout_prepare_symbol (parms);
+
+ /* Perform any necessary register eliminations on the parameter's rtl,
+ so that the debugging output will be accurate. */
+ DECL_INCOMING_RTL (parms)
+ = eliminate_regs (DECL_INCOMING_RTL (parms), 0, NULL_RTX);
+ DECL_RTL (parms) = eliminate_regs (DECL_RTL (parms), 0, NULL_RTX);
+#ifdef LEAF_REG_REMAP
+ if (leaf_function)
+ {
+ leaf_renumber_regs_insn (DECL_INCOMING_RTL (parms));
+ leaf_renumber_regs_insn (DECL_RTL (parms));
+ }
+#endif
+
+ if (PARM_PASSED_IN_MEMORY (parms))
+ {
+ rtx addr = XEXP (DECL_INCOMING_RTL (parms), 0);
+
+ /* ??? Here we assume that the parm address is indexed
+ off the frame pointer or arg pointer.
+ If that is not true, we produce meaningless results,
+ but do not crash. */
+ if (GET_CODE (addr) == PLUS
+ && GET_CODE (XEXP (addr, 1)) == CONST_INT)
+ current_sym_value = INTVAL (XEXP (addr, 1));
+ else
+ current_sym_value = 0;
+
+ current_sym_code = N_PSYM;
+ current_sym_addr = 0;
+
+ FORCE_TEXT;
+ if (DECL_NAME (parms))
+ {
+ current_sym_nchars = 2 + IDENTIFIER_LENGTH (DECL_NAME (parms));
+
+ fprintf (asmfile, "%s \"%s:%c", ASM_STABS_OP,
+ IDENTIFIER_POINTER (DECL_NAME (parms)),
+ DBX_MEMPARM_STABS_LETTER);
+ }
+ else
+ {
+ current_sym_nchars = 8;
+ fprintf (asmfile, "%s \"(anon):%c", ASM_STABS_OP,
+ DBX_MEMPARM_STABS_LETTER);
+ }
+
+ if (GET_CODE (DECL_RTL (parms)) == REG
+ && REGNO (DECL_RTL (parms)) >= 0
+ && REGNO (DECL_RTL (parms)) < FIRST_PSEUDO_REGISTER)
+ dbxout_type (DECL_ARG_TYPE (parms), 0, 0);
+ else
+ {
+ int original_value = current_sym_value;
+
+ /* This is the case where the parm is passed as an int or double
+ and it is converted to a char, short or float and stored back
+ in the parmlist. In this case, describe the parm
+ with the variable's declared type, and adjust the address
+ if the least significant bytes (which we are using) are not
+ the first ones. */
+#if BYTES_BIG_ENDIAN
+ if (TREE_TYPE (parms) != DECL_ARG_TYPE (parms))
+ current_sym_value += (GET_MODE_SIZE (TYPE_MODE (DECL_ARG_TYPE (parms)))
+ - GET_MODE_SIZE (GET_MODE (DECL_RTL (parms))));
+#endif
+
+ if (GET_CODE (DECL_RTL (parms)) == MEM
+ && GET_CODE (XEXP (DECL_RTL (parms), 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (DECL_RTL (parms), 0), 1)) == CONST_INT
+ && INTVAL (XEXP (XEXP (DECL_RTL (parms), 0), 1)) == current_sym_value)
+ dbxout_type (TREE_TYPE (parms), 0, 0);
+ else
+ {
+ current_sym_value = original_value;
+ dbxout_type (DECL_ARG_TYPE (parms), 0, 0);
+ }
+ }
+ current_sym_value = DEBUGGER_ARG_OFFSET (current_sym_value, addr);
+ dbxout_finish_symbol (parms);
+ }
+ else if (GET_CODE (DECL_RTL (parms)) == REG)
+ {
+ rtx best_rtl;
+ char regparm_letter;
+ tree parm_type;
+ /* Parm passed in registers and lives in registers or nowhere. */
+
+ current_sym_code = DBX_REGPARM_STABS_CODE;
+ regparm_letter = DBX_REGPARM_STABS_LETTER;
+ current_sym_addr = 0;
+
+ /* If parm lives in a register, use that register;
+ pretend the parm was passed there. It would be more consistent
+ to describe the register where the parm was passed,
+ but in practice that register usually holds something else.
+
+ If we use DECL_RTL, then we must use the declared type of
+ the variable, not the type that it arrived in. */
+ if (REGNO (DECL_RTL (parms)) >= 0
+ && REGNO (DECL_RTL (parms)) < FIRST_PSEUDO_REGISTER)
+ {
+ best_rtl = DECL_RTL (parms);
+ parm_type = TREE_TYPE (parms);
+ }
+ /* If the parm lives nowhere, use the register where it was
+ passed. It is also better to use the declared type here. */
+ else
+ {
+ best_rtl = DECL_INCOMING_RTL (parms);
+ parm_type = TREE_TYPE (parms);
+ }
+ current_sym_value = DBX_REGISTER_NUMBER (REGNO (best_rtl));
+
+ FORCE_TEXT;
+ if (DECL_NAME (parms))
+ {
+ current_sym_nchars = 2 + IDENTIFIER_LENGTH (DECL_NAME (parms));
+ fprintf (asmfile, "%s \"%s:%c", ASM_STABS_OP,
+ IDENTIFIER_POINTER (DECL_NAME (parms)),
+ regparm_letter);
+ }
+ else
+ {
+ current_sym_nchars = 8;
+ fprintf (asmfile, "%s \"(anon):%c", ASM_STABS_OP,
+ regparm_letter);
+ }
+
+ dbxout_type (parm_type, 0, 0);
+ dbxout_finish_symbol (parms);
+ }
+ else if (GET_CODE (DECL_RTL (parms)) == MEM
+ && GET_CODE (XEXP (DECL_RTL (parms), 0)) == REG
+ && REGNO (XEXP (DECL_RTL (parms), 0)) != HARD_FRAME_POINTER_REGNUM
+ && REGNO (XEXP (DECL_RTL (parms), 0)) != STACK_POINTER_REGNUM
+#if ARG_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM
+ && REGNO (XEXP (DECL_RTL (parms), 0)) != ARG_POINTER_REGNUM
+#endif
+ )
+ {
+ /* Parm was passed via invisible reference.
+ That is, its address was passed in a register.
+ Output it as if it lived in that register.
+ The debugger will know from the type
+ that it was actually passed by invisible reference. */
+
+ char regparm_letter;
+ /* Parm passed in registers and lives in registers or nowhere. */
+
+ current_sym_code = DBX_REGPARM_STABS_CODE;
+ regparm_letter = DBX_REGPARM_STABS_LETTER;
+
+ /* DECL_RTL looks like (MEM (REG...). Get the register number.
+ If it is an unallocated pseudo-reg, then use the register where
+ it was passed instead. */
+ if (REGNO (XEXP (DECL_RTL (parms), 0)) >= 0
+ && REGNO (XEXP (DECL_RTL (parms), 0)) < FIRST_PSEUDO_REGISTER)
+ current_sym_value = REGNO (XEXP (DECL_RTL (parms), 0));
+ else
+ current_sym_value = REGNO (DECL_INCOMING_RTL (parms));
+
+ current_sym_addr = 0;
+
+ FORCE_TEXT;
+ if (DECL_NAME (parms))
+ {
+ current_sym_nchars = 2 + strlen (IDENTIFIER_POINTER (DECL_NAME (parms)));
+
+ fprintf (asmfile, "%s \"%s:%c", ASM_STABS_OP,
+ IDENTIFIER_POINTER (DECL_NAME (parms)),
+ DBX_REGPARM_STABS_LETTER);
+ }
+ else
+ {
+ current_sym_nchars = 8;
+ fprintf (asmfile, "%s \"(anon):%c", ASM_STABS_OP,
+ DBX_REGPARM_STABS_LETTER);
+ }
+
+ dbxout_type (TREE_TYPE (parms), 0, 0);
+ dbxout_finish_symbol (parms);
+ }
+ else if (GET_CODE (DECL_RTL (parms)) == MEM
+ && XEXP (DECL_RTL (parms), 0) != const0_rtx
+ /* ??? A constant address for a parm can happen
+ when the reg it lives in is equiv to a constant in memory.
+ Should make this not happen, after 2.4. */
+ && ! CONSTANT_P (XEXP (DECL_RTL (parms), 0)))
+ {
+ /* Parm was passed in registers but lives on the stack. */
+
+ current_sym_code = N_PSYM;
+ /* DECL_RTL looks like (MEM (PLUS (REG...) (CONST_INT...))),
+ in which case we want the value of that CONST_INT,
+ or (MEM (REG ...)) or (MEM (MEM ...)),
+ in which case we use a value of zero. */
+ if (GET_CODE (XEXP (DECL_RTL (parms), 0)) == REG
+ || GET_CODE (XEXP (DECL_RTL (parms), 0)) == MEM)
+ current_sym_value = 0;
+ else
+ current_sym_value = INTVAL (XEXP (XEXP (DECL_RTL (parms), 0), 1));
+ current_sym_addr = 0;
+
+ FORCE_TEXT;
+ if (DECL_NAME (parms))
+ {
+ current_sym_nchars = 2 + strlen (IDENTIFIER_POINTER (DECL_NAME (parms)));
+
+ fprintf (asmfile, "%s \"%s:%c", ASM_STABS_OP,
+ IDENTIFIER_POINTER (DECL_NAME (parms)),
+ DBX_MEMPARM_STABS_LETTER);
+ }
+ else
+ {
+ current_sym_nchars = 8;
+ fprintf (asmfile, "%s \"(anon):%c", ASM_STABS_OP,
+ DBX_MEMPARM_STABS_LETTER);
+ }
+
+ current_sym_value
+ = DEBUGGER_ARG_OFFSET (current_sym_value,
+ XEXP (DECL_RTL (parms), 0));
+ dbxout_type (TREE_TYPE (parms), 0, 0);
+ dbxout_finish_symbol (parms);
+ }
+ }
+}
+
+/* Output definitions for the places where parms live during the function,
+ when different from where they were passed, when the parms were passed
+ in memory.
+
+ It is not useful to do this for parms passed in registers
+ that live during the function in different registers, because it is
+ impossible to look in the passed register for the passed value,
+ so we use the within-the-function register to begin with.
+
+ PARMS is a chain of PARM_DECL nodes. */
+
+void
+dbxout_reg_parms (parms)
+ tree parms;
+{
+ for (; parms; parms = TREE_CHAIN (parms))
+ if (DECL_NAME (parms))
+ {
+ dbxout_prepare_symbol (parms);
+
+ /* Report parms that live in registers during the function
+ but were passed in memory. */
+ if (GET_CODE (DECL_RTL (parms)) == REG
+ && REGNO (DECL_RTL (parms)) >= 0
+ && REGNO (DECL_RTL (parms)) < FIRST_PSEUDO_REGISTER
+ && PARM_PASSED_IN_MEMORY (parms))
+ dbxout_symbol_location (parms, TREE_TYPE (parms),
+ 0, DECL_RTL (parms));
+ else if (GET_CODE (DECL_RTL (parms)) == CONCAT
+ && PARM_PASSED_IN_MEMORY (parms))
+ dbxout_symbol_location (parms, TREE_TYPE (parms),
+ 0, DECL_RTL (parms));
+ /* Report parms that live in memory but not where they were passed. */
+ else if (GET_CODE (DECL_RTL (parms)) == MEM
+ && GET_CODE (XEXP (DECL_RTL (parms), 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (DECL_RTL (parms), 0), 1)) == CONST_INT
+ && PARM_PASSED_IN_MEMORY (parms)
+ && ! rtx_equal_p (DECL_RTL (parms), DECL_INCOMING_RTL (parms)))
+ {
+#if 0 /* ??? It is not clear yet what should replace this. */
+ int offset = DECL_OFFSET (parms) / BITS_PER_UNIT;
+ /* A parm declared char is really passed as an int,
+ so it occupies the least significant bytes.
+ On a big-endian machine those are not the low-numbered ones. */
+#if BYTES_BIG_ENDIAN
+ if (offset != -1 && TREE_TYPE (parms) != DECL_ARG_TYPE (parms))
+ offset += (GET_MODE_SIZE (TYPE_MODE (DECL_ARG_TYPE (parms)))
+ - GET_MODE_SIZE (GET_MODE (DECL_RTL (parms))));
+#endif
+ if (INTVAL (XEXP (XEXP (DECL_RTL (parms), 0), 1)) != offset) {...}
+#endif
+ dbxout_symbol_location (parms, TREE_TYPE (parms),
+ 0, DECL_RTL (parms));
+ }
+#if 0
+ else if (GET_CODE (DECL_RTL (parms)) == MEM
+ && GET_CODE (XEXP (DECL_RTL (parms), 0)) == REG)
+ {
+ /* Parm was passed via invisible reference.
+ That is, its address was passed in a register.
+ Output it as if it lived in that register.
+ The debugger will know from the type
+ that it was actually passed by invisible reference. */
+
+ current_sym_code = N_RSYM;
+
+ /* DECL_RTL looks like (MEM (REG...). Get the register number. */
+ current_sym_value = REGNO (XEXP (DECL_RTL (parms), 0));
+ current_sym_addr = 0;
+
+ FORCE_TEXT;
+ if (DECL_NAME (parms))
+ {
+ current_sym_nchars = 2 + strlen (IDENTIFIER_POINTER (DECL_NAME (parms)));
+
+ fprintf (asmfile, "%s \"%s:r", ASM_STABS_OP,
+ IDENTIFIER_POINTER (DECL_NAME (parms)));
+ }
+ else
+ {
+ current_sym_nchars = 8;
+ fprintf (asmfile, "%s \"(anon):r", ASM_STABS_OP);
+ }
+
+ dbxout_type (TREE_TYPE (parms), 0, 0);
+ dbxout_finish_symbol (parms);
+ }
+#endif
+ }
+}
+
+/* Given a chain of ..._TYPE nodes (as come in a parameter list),
+ output definitions of those names, in raw form */
+
+void
+dbxout_args (args)
+ tree args;
+{
+ while (args)
+ {
+ putc (',', asmfile);
+ dbxout_type (TREE_VALUE (args), 0, 0);
+ CHARS (1);
+ args = TREE_CHAIN (args);
+ }
+}
+
+/* Given a chain of ..._TYPE nodes,
+ find those which have typedef names and output those names.
+ This is to ensure those types get output. */
+
+void
+dbxout_types (types)
+ register tree types;
+{
+ while (types)
+ {
+ if (TYPE_NAME (types)
+ && TREE_CODE (TYPE_NAME (types)) == TYPE_DECL
+ && ! TREE_ASM_WRITTEN (TYPE_NAME (types)))
+ dbxout_symbol (TYPE_NAME (types), 1);
+ types = TREE_CHAIN (types);
+ }
+}
+
+/* Output everything about a symbol block (a BLOCK node
+ that represents a scope level),
+ including recursive output of contained blocks.
+
+ BLOCK is the BLOCK node.
+ DEPTH is its depth within containing symbol blocks.
+ ARGS is usually zero; but for the outermost block of the
+ body of a function, it is a chain of PARM_DECLs for the function parameters.
+ We output definitions of all the register parms
+ as if they were local variables of that block.
+
+ If -g1 was used, we count blocks just the same, but output nothing
+ except for the outermost block.
+
+ Actually, BLOCK may be several blocks chained together.
+ We handle them all in sequence. */
+
+static void
+dbxout_block (block, depth, args)
+ register tree block;
+ int depth;
+ tree args;
+{
+ int blocknum;
+
+ while (block)
+ {
+ /* Ignore blocks never expanded or otherwise marked as real. */
+ if (TREE_USED (block))
+ {
+#ifndef DBX_LBRAC_FIRST
+ /* In dbx format, the syms of a block come before the N_LBRAC. */
+ if (debug_info_level != DINFO_LEVEL_TERSE || depth == 0)
+ dbxout_syms (BLOCK_VARS (block));
+ if (args)
+ dbxout_reg_parms (args);
+#endif
+
+ /* Now output an N_LBRAC symbol to represent the beginning of
+ the block. Use the block's tree-walk order to generate
+ the assembler symbols LBBn and LBEn
+ that final will define around the code in this block. */
+ if (depth > 0 && debug_info_level != DINFO_LEVEL_TERSE)
+ {
+ char buf[20];
+ blocknum = next_block_number++;
+ ASM_GENERATE_INTERNAL_LABEL (buf, "LBB", blocknum);
+
+ if (BLOCK_HANDLER_BLOCK (block))
+ {
+ /* A catch block. Must precede N_LBRAC. */
+ tree decl = BLOCK_VARS (block);
+ while (decl)
+ {
+#ifdef DBX_OUTPUT_CATCH
+ DBX_OUTPUT_CATCH (asmfile, decl, buf);
+#else
+ fprintf (asmfile, "%s \"%s:C1\",%d,0,0,", ASM_STABS_OP,
+ IDENTIFIER_POINTER (DECL_NAME (decl)), N_CATCH);
+ assemble_name (asmfile, buf);
+ fprintf (asmfile, "\n");
+#endif
+ decl = TREE_CHAIN (decl);
+ }
+ }
+
+#ifdef DBX_OUTPUT_LBRAC
+ DBX_OUTPUT_LBRAC (asmfile, buf);
+#else
+ fprintf (asmfile, "%s %d,0,0,", ASM_STABN_OP, N_LBRAC);
+ assemble_name (asmfile, buf);
+#if DBX_BLOCKS_FUNCTION_RELATIVE
+ fputc ('-', asmfile);
+ assemble_name (asmfile, XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0));
+#endif
+ fprintf (asmfile, "\n");
+#endif
+ }
+ else if (depth > 0)
+ /* Count blocks the same way regardless of debug_info_level. */
+ next_block_number++;
+
+#ifdef DBX_LBRAC_FIRST
+ /* On some weird machines, the syms of a block
+ come after the N_LBRAC. */
+ if (debug_info_level != DINFO_LEVEL_TERSE || depth == 0)
+ dbxout_syms (BLOCK_VARS (block));
+ if (args)
+ dbxout_reg_parms (args);
+#endif
+
+ /* Output the subblocks. */
+ dbxout_block (BLOCK_SUBBLOCKS (block), depth + 1, NULL_TREE);
+
+ /* Refer to the marker for the end of the block. */
+ if (depth > 0 && debug_info_level != DINFO_LEVEL_TERSE)
+ {
+ char buf[20];
+ ASM_GENERATE_INTERNAL_LABEL (buf, "LBE", blocknum);
+#ifdef DBX_OUTPUT_RBRAC
+ DBX_OUTPUT_RBRAC (asmfile, buf);
+#else
+ fprintf (asmfile, "%s %d,0,0,", ASM_STABN_OP, N_RBRAC);
+ assemble_name (asmfile, buf);
+#if DBX_BLOCKS_FUNCTION_RELATIVE
+ fputc ('-', asmfile);
+ assemble_name (asmfile, XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0));
+#endif
+ fprintf (asmfile, "\n");
+#endif
+ }
+ }
+ block = BLOCK_CHAIN (block);
+ }
+}
+
+/* Output the information about a function and its arguments and result.
+ Usually this follows the function's code,
+ but on some systems, it comes before. */
+
+static void
+dbxout_really_begin_function (decl)
+ tree decl;
+{
+ dbxout_symbol (decl, 0);
+ dbxout_parms (DECL_ARGUMENTS (decl));
+ if (DECL_NAME (DECL_RESULT (decl)) != 0)
+ dbxout_symbol (DECL_RESULT (decl), 1);
+}
+
+/* Called at beginning of output of function definition. */
+
+void
+dbxout_begin_function (decl)
+ tree decl;
+{
+#ifdef DBX_FUNCTION_FIRST
+ dbxout_really_begin_function (decl);
+#endif
+}
+
+/* Output dbx data for a function definition.
+ This includes a definition of the function name itself (a symbol),
+ definitions of the parameters (locating them in the parameter list)
+ and then output the block that makes up the function's body
+ (including all the auto variables of the function). */
+
+void
+dbxout_function (decl)
+ tree decl;
+{
+#ifndef DBX_FUNCTION_FIRST
+ dbxout_really_begin_function (decl);
+#endif
+ dbxout_block (DECL_INITIAL (decl), 0, DECL_ARGUMENTS (decl));
+#ifdef DBX_OUTPUT_FUNCTION_END
+ DBX_OUTPUT_FUNCTION_END (asmfile, decl);
+#endif
+}
+#endif /* DBX_DEBUGGING_INFO */
diff --git a/gnu/usr.bin/cc/cc_int/dwarfout.c b/gnu/usr.bin/cc/cc_int/dwarfout.c
new file mode 100644
index 0000000..a48c9b97
--- /dev/null
+++ b/gnu/usr.bin/cc/cc_int/dwarfout.c
@@ -0,0 +1,5667 @@
+/* Output Dwarf format symbol table information from the GNU C compiler.
+ Copyright (C) 1992, 1993 Free Software Foundation, Inc.
+
+ Written by Ron Guilmette (rfg@netcom.com) for
+ Network Computing Devices, August, September, October, November 1990.
+ Generously contributed by NCD to the Free Software Foundation.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+#include "config.h"
+
+#ifdef DWARF_DEBUGGING_INFO
+#include <stdio.h>
+#include "dwarf.h"
+#include "tree.h"
+#include "flags.h"
+#include "rtl.h"
+#include "hard-reg-set.h"
+#include "insn-config.h"
+#include "reload.h"
+#include "output.h"
+#include "defaults.h"
+
+#ifndef DWARF_VERSION
+#define DWARF_VERSION 1
+#endif
+
+/* #define NDEBUG 1 */
+#include "assert.h"
+
+#if defined(DWARF_TIMESTAMPS)
+#if defined(POSIX)
+#include <time.h>
+#else /* !defined(POSIX) */
+#include <sys/types.h>
+#if defined(__STDC__)
+extern time_t time (time_t *);
+#else /* !defined(__STDC__) */
+extern time_t time ();
+#endif /* !defined(__STDC__) */
+#endif /* !defined(POSIX) */
+#endif /* defined(DWARF_TIMESTAMPS) */
+
+extern char *getpwd ();
+
+extern char *index ();
+extern char *rindex ();
+
+/* IMPORTANT NOTE: Please see the file README.DWARF for important details
+ regarding the GNU implementation of Dwarf. */
+
+/* NOTE: In the comments in this file, many references are made to
+ so called "Debugging Information Entries". For the sake of brevity,
+ this term is abbreviated to `DIE' throughout the remainder of this
+ file. */
+
+/* Note that the implementation of C++ support herein is (as yet) unfinished.
+ If you want to try to complete it, more power to you. */
+
+#if defined(__GNUC__) && (NDEBUG == 1)
+#define inline static inline
+#else
+#define inline static
+#endif
+
+/* How to start an assembler comment. */
+#ifndef ASM_COMMENT_START
+#define ASM_COMMENT_START ";#"
+#endif
+
+/* How to print out a register name. */
+#ifndef PRINT_REG
+#define PRINT_REG(RTX, CODE, FILE) \
+ fprintf ((FILE), "%s", reg_names[REGNO (RTX)])
+#endif
+
+/* Define a macro which returns non-zero for any tagged type which is
+ used (directly or indirectly) in the specification of either some
+ function's return type or some formal parameter of some function.
+ We use this macro when we are operating in "terse" mode to help us
+ know what tagged types have to be represented in Dwarf (even in
+ terse mode) and which ones don't.
+
+ A flag bit with this meaning really should be a part of the normal
+ GCC ..._TYPE nodes, but at the moment, there is no such bit defined
+ for these nodes. For now, we have to just fake it. It it safe for
+ us to simply return zero for all complete tagged types (which will
+ get forced out anyway if they were used in the specification of some
+ formal or return type) and non-zero for all incomplete tagged types.
+*/
+
+#define TYPE_USED_FOR_FUNCTION(tagged_type) (TYPE_SIZE (tagged_type) == 0)
+
+extern int flag_traditional;
+extern char *version_string;
+extern char *language_string;
+
+/* Maximum size (in bytes) of an artificially generated label. */
+
+#define MAX_ARTIFICIAL_LABEL_BYTES 30
+
+/* Make sure we know the sizes of the various types dwarf can describe.
+ These are only defaults. If the sizes are different for your target,
+ you should override these values by defining the appropriate symbols
+ in your tm.h file. */
+
+#ifndef CHAR_TYPE_SIZE
+#define CHAR_TYPE_SIZE BITS_PER_UNIT
+#endif
+
+#ifndef SHORT_TYPE_SIZE
+#define SHORT_TYPE_SIZE (BITS_PER_UNIT * 2)
+#endif
+
+#ifndef INT_TYPE_SIZE
+#define INT_TYPE_SIZE BITS_PER_WORD
+#endif
+
+#ifndef LONG_TYPE_SIZE
+#define LONG_TYPE_SIZE BITS_PER_WORD
+#endif
+
+#ifndef LONG_LONG_TYPE_SIZE
+#define LONG_LONG_TYPE_SIZE (BITS_PER_WORD * 2)
+#endif
+
+#ifndef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE INT_TYPE_SIZE
+#endif
+
+#ifndef WCHAR_UNSIGNED
+#define WCHAR_UNSIGNED 0
+#endif
+
+#ifndef FLOAT_TYPE_SIZE
+#define FLOAT_TYPE_SIZE BITS_PER_WORD
+#endif
+
+#ifndef DOUBLE_TYPE_SIZE
+#define DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
+#endif
+
+#ifndef LONG_DOUBLE_TYPE_SIZE
+#define LONG_DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
+#endif
+
+/* Structure to keep track of source filenames. */
+
+struct filename_entry {
+ unsigned number;
+ char * name;
+};
+
+typedef struct filename_entry filename_entry;
+
+/* Pointer to an array of elements, each one having the structure above. */
+
+static filename_entry *filename_table;
+
+/* Total number of entries in the table (i.e. array) pointed to by
+ `filename_table'. This is the *total* and includes both used and
+ unused slots. */
+
+static unsigned ft_entries_allocated;
+
+/* Number of entries in the filename_table which are actually in use. */
+
+static unsigned ft_entries;
+
+/* Size (in elements) of increments by which we may expand the filename
+ table. Actually, a single hunk of space of this size should be enough
+ for most typical programs. */
+
+#define FT_ENTRIES_INCREMENT 64
+
+/* Local pointer to the name of the main input file. Initialized in
+ dwarfout_init. */
+
+static char *primary_filename;
+
+/* Pointer to the most recent filename for which we produced some line info. */
+
+static char *last_filename;
+
+/* For Dwarf output, we must assign lexical-blocks id numbers
+ in the order in which their beginnings are encountered.
+ We output Dwarf debugging info that refers to the beginnings
+ and ends of the ranges of code for each lexical block with
+ assembler labels ..Bn and ..Bn.e, where n is the block number.
+ The labels themselves are generated in final.c, which assigns
+ numbers to the blocks in the same way. */
+
+static unsigned next_block_number = 2;
+
+/* Counter to generate unique names for DIEs. */
+
+static unsigned next_unused_dienum = 1;
+
+/* Number of the DIE which is currently being generated. */
+
+static unsigned current_dienum;
+
+/* Number to use for the special "pubname" label on the next DIE which
+ represents a function or data object defined in this compilation
+ unit which has "extern" linkage. */
+
+static next_pubname_number = 0;
+
+#define NEXT_DIE_NUM pending_sibling_stack[pending_siblings-1]
+
+/* Pointer to a dynamically allocated list of pre-reserved and still
+ pending sibling DIE numbers. Note that this list will grow as needed. */
+
+static unsigned *pending_sibling_stack;
+
+/* Counter to keep track of the number of pre-reserved and still pending
+ sibling DIE numbers. */
+
+static unsigned pending_siblings;
+
+/* The currently allocated size of the above list (expressed in number of
+ list elements). */
+
+static unsigned pending_siblings_allocated;
+
+/* Size (in elements) of increments by which we may expand the pending
+ sibling stack. Actually, a single hunk of space of this size should
+ be enough for most typical programs. */
+
+#define PENDING_SIBLINGS_INCREMENT 64
+
+/* Non-zero if we are performing our file-scope finalization pass and if
+ we should force out Dwarf descriptions of any and all file-scope
+ tagged types which are still incomplete types. */
+
+static int finalizing = 0;
+
+/* A pointer to the base of a list of pending types which we haven't
+ generated DIEs for yet, but which we will have to come back to
+ later on. */
+
+static tree *pending_types_list;
+
+/* Number of elements currently allocated for the pending_types_list. */
+
+static unsigned pending_types_allocated;
+
+/* Number of elements of pending_types_list currently in use. */
+
+static unsigned pending_types;
+
+/* Size (in elements) of increments by which we may expand the pending
+ types list. Actually, a single hunk of space of this size should
+ be enough for most typical programs. */
+
+#define PENDING_TYPES_INCREMENT 64
+
+/* Pointer to an artificial RECORD_TYPE which we create in dwarfout_init.
+ This is used in a hack to help us get the DIEs describing types of
+ formal parameters to come *after* all of the DIEs describing the formal
+ parameters themselves. That's necessary in order to be compatible
+ with what the brain-damaged svr4 SDB debugger requires. */
+
+static tree fake_containing_scope;
+
+/* The number of the current function definition that we are generating
+ debugging information for. These numbers range from 1 up to the maximum
+ number of function definitions contained within the current compilation
+ unit. These numbers are used to create unique labels for various things
+ contained within various function definitions. */
+
+static unsigned current_funcdef_number = 1;
+
+/* A pointer to the ..._DECL node which we have most recently been working
+ on. We keep this around just in case something about it looks screwy
+ and we want to tell the user what the source coordinates for the actual
+ declaration are. */
+
+static tree dwarf_last_decl;
+
+/* Forward declarations for functions defined in this file. */
+
+static void output_type ();
+static void type_attribute ();
+static void output_decls_for_scope ();
+static void output_decl ();
+static unsigned lookup_filename ();
+
+/* Definitions of defaults for assembler-dependent names of various
+ pseudo-ops and section names.
+
+ Theses may be overridden in your tm.h file (if necessary) for your
+ particular assembler. The default values provided here correspond to
+ what is expected by "standard" AT&T System V.4 assemblers. */
+
+#ifndef FILE_ASM_OP
+#define FILE_ASM_OP ".file"
+#endif
+#ifndef VERSION_ASM_OP
+#define VERSION_ASM_OP ".version"
+#endif
+#ifndef UNALIGNED_SHORT_ASM_OP
+#define UNALIGNED_SHORT_ASM_OP ".2byte"
+#endif
+#ifndef UNALIGNED_INT_ASM_OP
+#define UNALIGNED_INT_ASM_OP ".4byte"
+#endif
+#ifndef ASM_BYTE_OP
+#define ASM_BYTE_OP ".byte"
+#endif
+#ifndef SET_ASM_OP
+#define SET_ASM_OP ".set"
+#endif
+
+/* Pseudo-ops for pushing the current section onto the section stack (and
+ simultaneously changing to a new section) and for poping back to the
+ section we were in immediately before this one. Note that most svr4
+ assemblers only maintain a one level stack... you can push all the
+ sections you want, but you can only pop out one level. (The sparc
+ svr4 assembler is an exception to this general rule.) That's
+ OK because we only use at most one level of the section stack herein. */
+
+#ifndef PUSHSECTION_ASM_OP
+#define PUSHSECTION_ASM_OP ".section"
+#endif
+#ifndef POPSECTION_ASM_OP
+#define POPSECTION_ASM_OP ".previous"
+#endif
+
+/* The default format used by the ASM_OUTPUT_PUSH_SECTION macro (see below)
+ to print the PUSHSECTION_ASM_OP and the section name. The default here
+ works for almost all svr4 assemblers, except for the sparc, where the
+ section name must be enclosed in double quotes. (See sparcv4.h.) */
+
+#ifndef PUSHSECTION_FORMAT
+#define PUSHSECTION_FORMAT "%s\t%s\n"
+#endif
+
+#ifndef DEBUG_SECTION
+#define DEBUG_SECTION ".debug"
+#endif
+#ifndef LINE_SECTION
+#define LINE_SECTION ".line"
+#endif
+#ifndef SFNAMES_SECTION
+#define SFNAMES_SECTION ".debug_sfnames"
+#endif
+#ifndef SRCINFO_SECTION
+#define SRCINFO_SECTION ".debug_srcinfo"
+#endif
+#ifndef MACINFO_SECTION
+#define MACINFO_SECTION ".debug_macinfo"
+#endif
+#ifndef PUBNAMES_SECTION
+#define PUBNAMES_SECTION ".debug_pubnames"
+#endif
+#ifndef ARANGES_SECTION
+#define ARANGES_SECTION ".debug_aranges"
+#endif
+#ifndef TEXT_SECTION
+#define TEXT_SECTION ".text"
+#endif
+#ifndef DATA_SECTION
+#define DATA_SECTION ".data"
+#endif
+#ifndef DATA1_SECTION
+#define DATA1_SECTION ".data1"
+#endif
+#ifndef RODATA_SECTION
+#define RODATA_SECTION ".rodata"
+#endif
+#ifndef RODATA1_SECTION
+#define RODATA1_SECTION ".rodata1"
+#endif
+#ifndef BSS_SECTION
+#define BSS_SECTION ".bss"
+#endif
+
+/* Definitions of defaults for formats and names of various special
+ (artificial) labels which may be generated within this file (when
+ the -g options is used and DWARF_DEBUGGING_INFO is in effect.
+
+ If necessary, these may be overridden from within your tm.h file,
+ but typically, you should never need to override these.
+
+ These labels have been hacked (temporarily) so that they all begin with
+ a `.L' sequence so as to appease the stock sparc/svr4 assembler and the
+ stock m88k/svr4 assembler, both of which need to see .L at the start of
+ a label in order to prevent that label from going into the linker symbol
+ table). When I get time, I'll have to fix this the right way so that we
+ will use ASM_GENERATE_INTERNAL_LABEL and ASM_OUTPUT_INTERNAL_LABEL herein,
+ but that will require a rather massive set of changes. For the moment,
+ the following definitions out to produce the right results for all svr4
+ and svr3 assemblers. -- rfg
+*/
+
+#ifndef TEXT_BEGIN_LABEL
+#define TEXT_BEGIN_LABEL ".L_text_b"
+#endif
+#ifndef TEXT_END_LABEL
+#define TEXT_END_LABEL ".L_text_e"
+#endif
+
+#ifndef DATA_BEGIN_LABEL
+#define DATA_BEGIN_LABEL ".L_data_b"
+#endif
+#ifndef DATA_END_LABEL
+#define DATA_END_LABEL ".L_data_e"
+#endif
+
+#ifndef DATA1_BEGIN_LABEL
+#define DATA1_BEGIN_LABEL ".L_data1_b"
+#endif
+#ifndef DATA1_END_LABEL
+#define DATA1_END_LABEL ".L_data1_e"
+#endif
+
+#ifndef RODATA_BEGIN_LABEL
+#define RODATA_BEGIN_LABEL ".L_rodata_b"
+#endif
+#ifndef RODATA_END_LABEL
+#define RODATA_END_LABEL ".L_rodata_e"
+#endif
+
+#ifndef RODATA1_BEGIN_LABEL
+#define RODATA1_BEGIN_LABEL ".L_rodata1_b"
+#endif
+#ifndef RODATA1_END_LABEL
+#define RODATA1_END_LABEL ".L_rodata1_e"
+#endif
+
+#ifndef BSS_BEGIN_LABEL
+#define BSS_BEGIN_LABEL ".L_bss_b"
+#endif
+#ifndef BSS_END_LABEL
+#define BSS_END_LABEL ".L_bss_e"
+#endif
+
+#ifndef LINE_BEGIN_LABEL
+#define LINE_BEGIN_LABEL ".L_line_b"
+#endif
+#ifndef LINE_LAST_ENTRY_LABEL
+#define LINE_LAST_ENTRY_LABEL ".L_line_last"
+#endif
+#ifndef LINE_END_LABEL
+#define LINE_END_LABEL ".L_line_e"
+#endif
+
+#ifndef DEBUG_BEGIN_LABEL
+#define DEBUG_BEGIN_LABEL ".L_debug_b"
+#endif
+#ifndef SFNAMES_BEGIN_LABEL
+#define SFNAMES_BEGIN_LABEL ".L_sfnames_b"
+#endif
+#ifndef SRCINFO_BEGIN_LABEL
+#define SRCINFO_BEGIN_LABEL ".L_srcinfo_b"
+#endif
+#ifndef MACINFO_BEGIN_LABEL
+#define MACINFO_BEGIN_LABEL ".L_macinfo_b"
+#endif
+
+#ifndef DIE_BEGIN_LABEL_FMT
+#define DIE_BEGIN_LABEL_FMT ".L_D%u"
+#endif
+#ifndef DIE_END_LABEL_FMT
+#define DIE_END_LABEL_FMT ".L_D%u_e"
+#endif
+#ifndef PUB_DIE_LABEL_FMT
+#define PUB_DIE_LABEL_FMT ".L_P%u"
+#endif
+#ifndef INSN_LABEL_FMT
+#define INSN_LABEL_FMT ".L_I%u_%u"
+#endif
+#ifndef BLOCK_BEGIN_LABEL_FMT
+#define BLOCK_BEGIN_LABEL_FMT ".L_B%u"
+#endif
+#ifndef BLOCK_END_LABEL_FMT
+#define BLOCK_END_LABEL_FMT ".L_B%u_e"
+#endif
+#ifndef SS_BEGIN_LABEL_FMT
+#define SS_BEGIN_LABEL_FMT ".L_s%u"
+#endif
+#ifndef SS_END_LABEL_FMT
+#define SS_END_LABEL_FMT ".L_s%u_e"
+#endif
+#ifndef EE_BEGIN_LABEL_FMT
+#define EE_BEGIN_LABEL_FMT ".L_e%u"
+#endif
+#ifndef EE_END_LABEL_FMT
+#define EE_END_LABEL_FMT ".L_e%u_e"
+#endif
+#ifndef MT_BEGIN_LABEL_FMT
+#define MT_BEGIN_LABEL_FMT ".L_t%u"
+#endif
+#ifndef MT_END_LABEL_FMT
+#define MT_END_LABEL_FMT ".L_t%u_e"
+#endif
+#ifndef LOC_BEGIN_LABEL_FMT
+#define LOC_BEGIN_LABEL_FMT ".L_l%u"
+#endif
+#ifndef LOC_END_LABEL_FMT
+#define LOC_END_LABEL_FMT ".L_l%u_e"
+#endif
+#ifndef BOUND_BEGIN_LABEL_FMT
+#define BOUND_BEGIN_LABEL_FMT ".L_b%u_%u_%c"
+#endif
+#ifndef BOUND_END_LABEL_FMT
+#define BOUND_END_LABEL_FMT ".L_b%u_%u_%c_e"
+#endif
+#ifndef DERIV_BEGIN_LABEL_FMT
+#define DERIV_BEGIN_LABEL_FMT ".L_d%u"
+#endif
+#ifndef DERIV_END_LABEL_FMT
+#define DERIV_END_LABEL_FMT ".L_d%u_e"
+#endif
+#ifndef SL_BEGIN_LABEL_FMT
+#define SL_BEGIN_LABEL_FMT ".L_sl%u"
+#endif
+#ifndef SL_END_LABEL_FMT
+#define SL_END_LABEL_FMT ".L_sl%u_e"
+#endif
+#ifndef BODY_BEGIN_LABEL_FMT
+#define BODY_BEGIN_LABEL_FMT ".L_b%u"
+#endif
+#ifndef BODY_END_LABEL_FMT
+#define BODY_END_LABEL_FMT ".L_b%u_e"
+#endif
+#ifndef FUNC_END_LABEL_FMT
+#define FUNC_END_LABEL_FMT ".L_f%u_e"
+#endif
+#ifndef TYPE_NAME_FMT
+#define TYPE_NAME_FMT ".L_T%u"
+#endif
+#ifndef DECL_NAME_FMT
+#define DECL_NAME_FMT ".L_E%u"
+#endif
+#ifndef LINE_CODE_LABEL_FMT
+#define LINE_CODE_LABEL_FMT ".L_LC%u"
+#endif
+#ifndef SFNAMES_ENTRY_LABEL_FMT
+#define SFNAMES_ENTRY_LABEL_FMT ".L_F%u"
+#endif
+#ifndef LINE_ENTRY_LABEL_FMT
+#define LINE_ENTRY_LABEL_FMT ".L_LE%u"
+#endif
+
+/* Definitions of defaults for various types of primitive assembly language
+ output operations.
+
+ If necessary, these may be overridden from within your tm.h file,
+ but typically, you shouldn't need to override these. */
+
+#ifndef ASM_OUTPUT_PUSH_SECTION
+#define ASM_OUTPUT_PUSH_SECTION(FILE, SECTION) \
+ fprintf ((FILE), PUSHSECTION_FORMAT, PUSHSECTION_ASM_OP, SECTION)
+#endif
+
+#ifndef ASM_OUTPUT_POP_SECTION
+#define ASM_OUTPUT_POP_SECTION(FILE) \
+ fprintf ((FILE), "\t%s\n", POPSECTION_ASM_OP)
+#endif
+
+#ifndef ASM_OUTPUT_SOURCE_FILENAME
+#define ASM_OUTPUT_SOURCE_FILENAME(FILE,NAME) \
+ do { fprintf (FILE, "\t%s\t", FILE_ASM_OP); \
+ output_quoted_string (FILE, NAME); \
+ fputc ('\n', FILE); \
+ } while (0)
+#endif
+
+#ifndef ASM_OUTPUT_DWARF_DELTA2
+#define ASM_OUTPUT_DWARF_DELTA2(FILE,LABEL1,LABEL2) \
+ do { fprintf ((FILE), "\t%s\t", UNALIGNED_SHORT_ASM_OP); \
+ assemble_name (FILE, LABEL1); \
+ fprintf (FILE, "-"); \
+ assemble_name (FILE, LABEL2); \
+ fprintf (FILE, "\n"); \
+ } while (0)
+#endif
+
+#ifndef ASM_OUTPUT_DWARF_DELTA4
+#define ASM_OUTPUT_DWARF_DELTA4(FILE,LABEL1,LABEL2) \
+ do { fprintf ((FILE), "\t%s\t", UNALIGNED_INT_ASM_OP); \
+ assemble_name (FILE, LABEL1); \
+ fprintf (FILE, "-"); \
+ assemble_name (FILE, LABEL2); \
+ fprintf (FILE, "\n"); \
+ } while (0)
+#endif
+
+#ifndef ASM_OUTPUT_DWARF_TAG
+#define ASM_OUTPUT_DWARF_TAG(FILE,TAG) \
+ do { \
+ fprintf ((FILE), "\t%s\t0x%x", \
+ UNALIGNED_SHORT_ASM_OP, (unsigned) TAG); \
+ if (flag_verbose_asm) \
+ fprintf ((FILE), "\t%s %s", \
+ ASM_COMMENT_START, dwarf_tag_name (TAG)); \
+ fputc ('\n', (FILE)); \
+ } while (0)
+#endif
+
+#ifndef ASM_OUTPUT_DWARF_ATTRIBUTE
+#define ASM_OUTPUT_DWARF_ATTRIBUTE(FILE,ATTR) \
+ do { \
+ fprintf ((FILE), "\t%s\t0x%x", \
+ UNALIGNED_SHORT_ASM_OP, (unsigned) ATTR); \
+ if (flag_verbose_asm) \
+ fprintf ((FILE), "\t%s %s", \
+ ASM_COMMENT_START, dwarf_attr_name (ATTR)); \
+ fputc ('\n', (FILE)); \
+ } while (0)
+#endif
+
+#ifndef ASM_OUTPUT_DWARF_STACK_OP
+#define ASM_OUTPUT_DWARF_STACK_OP(FILE,OP) \
+ do { \
+ fprintf ((FILE), "\t%s\t0x%x", ASM_BYTE_OP, (unsigned) OP); \
+ if (flag_verbose_asm) \
+ fprintf ((FILE), "\t%s %s", \
+ ASM_COMMENT_START, dwarf_stack_op_name (OP)); \
+ fputc ('\n', (FILE)); \
+ } while (0)
+#endif
+
+#ifndef ASM_OUTPUT_DWARF_FUND_TYPE
+#define ASM_OUTPUT_DWARF_FUND_TYPE(FILE,FT) \
+ do { \
+ fprintf ((FILE), "\t%s\t0x%x", \
+ UNALIGNED_SHORT_ASM_OP, (unsigned) FT); \
+ if (flag_verbose_asm) \
+ fprintf ((FILE), "\t%s %s", \
+ ASM_COMMENT_START, dwarf_fund_type_name (FT)); \
+ fputc ('\n', (FILE)); \
+ } while (0)
+#endif
+
+#ifndef ASM_OUTPUT_DWARF_FMT_BYTE
+#define ASM_OUTPUT_DWARF_FMT_BYTE(FILE,FMT) \
+ do { \
+ fprintf ((FILE), "\t%s\t0x%x", ASM_BYTE_OP, (unsigned) FMT); \
+ if (flag_verbose_asm) \
+ fprintf ((FILE), "\t%s %s", \
+ ASM_COMMENT_START, dwarf_fmt_byte_name (FMT)); \
+ fputc ('\n', (FILE)); \
+ } while (0)
+#endif
+
+#ifndef ASM_OUTPUT_DWARF_TYPE_MODIFIER
+#define ASM_OUTPUT_DWARF_TYPE_MODIFIER(FILE,MOD) \
+ do { \
+ fprintf ((FILE), "\t%s\t0x%x", ASM_BYTE_OP, (unsigned) MOD); \
+ if (flag_verbose_asm) \
+ fprintf ((FILE), "\t%s %s", \
+ ASM_COMMENT_START, dwarf_typemod_name (MOD)); \
+ fputc ('\n', (FILE)); \
+ } while (0)
+#endif
+
+#ifndef ASM_OUTPUT_DWARF_ADDR
+#define ASM_OUTPUT_DWARF_ADDR(FILE,LABEL) \
+ do { fprintf ((FILE), "\t%s\t", UNALIGNED_INT_ASM_OP); \
+ assemble_name (FILE, LABEL); \
+ fprintf (FILE, "\n"); \
+ } while (0)
+#endif
+
+#ifndef ASM_OUTPUT_DWARF_ADDR_CONST
+#define ASM_OUTPUT_DWARF_ADDR_CONST(FILE,RTX) \
+ do { \
+ fprintf ((FILE), "\t%s\t", UNALIGNED_INT_ASM_OP); \
+ output_addr_const ((FILE), (RTX)); \
+ fputc ('\n', (FILE)); \
+ } while (0)
+#endif
+
+#ifndef ASM_OUTPUT_DWARF_REF
+#define ASM_OUTPUT_DWARF_REF(FILE,LABEL) \
+ do { fprintf ((FILE), "\t%s\t", UNALIGNED_INT_ASM_OP); \
+ assemble_name (FILE, LABEL); \
+ fprintf (FILE, "\n"); \
+ } while (0)
+#endif
+
+#ifndef ASM_OUTPUT_DWARF_DATA1
+#define ASM_OUTPUT_DWARF_DATA1(FILE,VALUE) \
+ fprintf ((FILE), "\t%s\t0x%x\n", ASM_BYTE_OP, VALUE)
+#endif
+
+#ifndef ASM_OUTPUT_DWARF_DATA2
+#define ASM_OUTPUT_DWARF_DATA2(FILE,VALUE) \
+ fprintf ((FILE), "\t%s\t0x%x\n", UNALIGNED_SHORT_ASM_OP, (unsigned) VALUE)
+#endif
+
+#ifndef ASM_OUTPUT_DWARF_DATA4
+#define ASM_OUTPUT_DWARF_DATA4(FILE,VALUE) \
+ fprintf ((FILE), "\t%s\t0x%x\n", UNALIGNED_INT_ASM_OP, (unsigned) VALUE)
+#endif
+
+#ifndef ASM_OUTPUT_DWARF_DATA8
+#define ASM_OUTPUT_DWARF_DATA8(FILE,HIGH_VALUE,LOW_VALUE) \
+ do { \
+ if (WORDS_BIG_ENDIAN) \
+ { \
+ fprintf ((FILE), "\t%s\t0x%x\n", UNALIGNED_INT_ASM_OP, HIGH_VALUE); \
+ fprintf ((FILE), "\t%s\t0x%x\n", UNALIGNED_INT_ASM_OP, LOW_VALUE);\
+ } \
+ else \
+ { \
+ fprintf ((FILE), "\t%s\t0x%x\n", UNALIGNED_INT_ASM_OP, LOW_VALUE);\
+ fprintf ((FILE), "\t%s\t0x%x\n", UNALIGNED_INT_ASM_OP, HIGH_VALUE); \
+ } \
+ } while (0)
+#endif
+
+#ifndef ASM_OUTPUT_DWARF_STRING
+#define ASM_OUTPUT_DWARF_STRING(FILE,P) \
+ ASM_OUTPUT_ASCII ((FILE), P, strlen (P)+1)
+#endif
+
+/************************ general utility functions **************************/
+
+inline char *
+xstrdup (s)
+ register char *s;
+{
+ register char *p = (char *) xmalloc (strlen (s) + 1);
+
+ strcpy (p, s);
+ return p;
+}
+
+inline int
+is_pseudo_reg (rtl)
+ register rtx rtl;
+{
+ return (((GET_CODE (rtl) == REG) && (REGNO (rtl) >= FIRST_PSEUDO_REGISTER))
+ || ((GET_CODE (rtl) == SUBREG)
+ && (REGNO (XEXP (rtl, 0)) >= FIRST_PSEUDO_REGISTER)));
+}
+
+inline tree
+type_main_variant (type)
+ register tree type;
+{
+ type = TYPE_MAIN_VARIANT (type);
+
+ /* There really should be only one main variant among any group of variants
+ of a given type (and all of the MAIN_VARIANT values for all members of
+ the group should point to that one type) but sometimes the C front-end
+ messes this up for array types, so we work around that bug here. */
+
+ if (TREE_CODE (type) == ARRAY_TYPE)
+ {
+ while (type != TYPE_MAIN_VARIANT (type))
+ type = TYPE_MAIN_VARIANT (type);
+ }
+
+ return type;
+}
+
+/* Return non-zero if the given type node represents a tagged type. */
+
+inline int
+is_tagged_type (type)
+ register tree type;
+{
+ register enum tree_code code = TREE_CODE (type);
+
+ return (code == RECORD_TYPE || code == UNION_TYPE
+ || code == QUAL_UNION_TYPE || code == ENUMERAL_TYPE);
+}
+
+static char *
+dwarf_tag_name (tag)
+ register unsigned tag;
+{
+ switch (tag)
+ {
+ case TAG_padding: return "TAG_padding";
+ case TAG_array_type: return "TAG_array_type";
+ case TAG_class_type: return "TAG_class_type";
+ case TAG_entry_point: return "TAG_entry_point";
+ case TAG_enumeration_type: return "TAG_enumeration_type";
+ case TAG_formal_parameter: return "TAG_formal_parameter";
+ case TAG_global_subroutine: return "TAG_global_subroutine";
+ case TAG_global_variable: return "TAG_global_variable";
+ case TAG_label: return "TAG_label";
+ case TAG_lexical_block: return "TAG_lexical_block";
+ case TAG_local_variable: return "TAG_local_variable";
+ case TAG_member: return "TAG_member";
+ case TAG_pointer_type: return "TAG_pointer_type";
+ case TAG_reference_type: return "TAG_reference_type";
+ case TAG_compile_unit: return "TAG_compile_unit";
+ case TAG_string_type: return "TAG_string_type";
+ case TAG_structure_type: return "TAG_structure_type";
+ case TAG_subroutine: return "TAG_subroutine";
+ case TAG_subroutine_type: return "TAG_subroutine_type";
+ case TAG_typedef: return "TAG_typedef";
+ case TAG_union_type: return "TAG_union_type";
+ case TAG_unspecified_parameters: return "TAG_unspecified_parameters";
+ case TAG_variant: return "TAG_variant";
+ case TAG_common_block: return "TAG_common_block";
+ case TAG_common_inclusion: return "TAG_common_inclusion";
+ case TAG_inheritance: return "TAG_inheritance";
+ case TAG_inlined_subroutine: return "TAG_inlined_subroutine";
+ case TAG_module: return "TAG_module";
+ case TAG_ptr_to_member_type: return "TAG_ptr_to_member_type";
+ case TAG_set_type: return "TAG_set_type";
+ case TAG_subrange_type: return "TAG_subrange_type";
+ case TAG_with_stmt: return "TAG_with_stmt";
+
+ /* GNU extensions. */
+
+ case TAG_format_label: return "TAG_format_label";
+ case TAG_namelist: return "TAG_namelist";
+ case TAG_function_template: return "TAG_function_template";
+ case TAG_class_template: return "TAG_class_template";
+
+ default: return "TAG_<unknown>";
+ }
+}
+
+static char *
+dwarf_attr_name (attr)
+ register unsigned attr;
+{
+ switch (attr)
+ {
+ case AT_sibling: return "AT_sibling";
+ case AT_location: return "AT_location";
+ case AT_name: return "AT_name";
+ case AT_fund_type: return "AT_fund_type";
+ case AT_mod_fund_type: return "AT_mod_fund_type";
+ case AT_user_def_type: return "AT_user_def_type";
+ case AT_mod_u_d_type: return "AT_mod_u_d_type";
+ case AT_ordering: return "AT_ordering";
+ case AT_subscr_data: return "AT_subscr_data";
+ case AT_byte_size: return "AT_byte_size";
+ case AT_bit_offset: return "AT_bit_offset";
+ case AT_bit_size: return "AT_bit_size";
+ case AT_element_list: return "AT_element_list";
+ case AT_stmt_list: return "AT_stmt_list";
+ case AT_low_pc: return "AT_low_pc";
+ case AT_high_pc: return "AT_high_pc";
+ case AT_language: return "AT_language";
+ case AT_member: return "AT_member";
+ case AT_discr: return "AT_discr";
+ case AT_discr_value: return "AT_discr_value";
+ case AT_string_length: return "AT_string_length";
+ case AT_common_reference: return "AT_common_reference";
+ case AT_comp_dir: return "AT_comp_dir";
+ case AT_const_value_string: return "AT_const_value_string";
+ case AT_const_value_data2: return "AT_const_value_data2";
+ case AT_const_value_data4: return "AT_const_value_data4";
+ case AT_const_value_data8: return "AT_const_value_data8";
+ case AT_const_value_block2: return "AT_const_value_block2";
+ case AT_const_value_block4: return "AT_const_value_block4";
+ case AT_containing_type: return "AT_containing_type";
+ case AT_default_value_addr: return "AT_default_value_addr";
+ case AT_default_value_data2: return "AT_default_value_data2";
+ case AT_default_value_data4: return "AT_default_value_data4";
+ case AT_default_value_data8: return "AT_default_value_data8";
+ case AT_default_value_string: return "AT_default_value_string";
+ case AT_friends: return "AT_friends";
+ case AT_inline: return "AT_inline";
+ case AT_is_optional: return "AT_is_optional";
+ case AT_lower_bound_ref: return "AT_lower_bound_ref";
+ case AT_lower_bound_data2: return "AT_lower_bound_data2";
+ case AT_lower_bound_data4: return "AT_lower_bound_data4";
+ case AT_lower_bound_data8: return "AT_lower_bound_data8";
+ case AT_private: return "AT_private";
+ case AT_producer: return "AT_producer";
+ case AT_program: return "AT_program";
+ case AT_protected: return "AT_protected";
+ case AT_prototyped: return "AT_prototyped";
+ case AT_public: return "AT_public";
+ case AT_pure_virtual: return "AT_pure_virtual";
+ case AT_return_addr: return "AT_return_addr";
+ case AT_abstract_origin: return "AT_abstract_origin";
+ case AT_start_scope: return "AT_start_scope";
+ case AT_stride_size: return "AT_stride_size";
+ case AT_upper_bound_ref: return "AT_upper_bound_ref";
+ case AT_upper_bound_data2: return "AT_upper_bound_data2";
+ case AT_upper_bound_data4: return "AT_upper_bound_data4";
+ case AT_upper_bound_data8: return "AT_upper_bound_data8";
+ case AT_virtual: return "AT_virtual";
+
+ /* GNU extensions */
+
+ case AT_sf_names: return "AT_sf_names";
+ case AT_src_info: return "AT_src_info";
+ case AT_mac_info: return "AT_mac_info";
+ case AT_src_coords: return "AT_src_coords";
+ case AT_body_begin: return "AT_body_begin";
+ case AT_body_end: return "AT_body_end";
+
+ default: return "AT_<unknown>";
+ }
+}
+
+static char *
+dwarf_stack_op_name (op)
+ register unsigned op;
+{
+ switch (op)
+ {
+ case OP_REG: return "OP_REG";
+ case OP_BASEREG: return "OP_BASEREG";
+ case OP_ADDR: return "OP_ADDR";
+ case OP_CONST: return "OP_CONST";
+ case OP_DEREF2: return "OP_DEREF2";
+ case OP_DEREF4: return "OP_DEREF4";
+ case OP_ADD: return "OP_ADD";
+ default: return "OP_<unknown>";
+ }
+}
+
+static char *
+dwarf_typemod_name (mod)
+ register unsigned mod;
+{
+ switch (mod)
+ {
+ case MOD_pointer_to: return "MOD_pointer_to";
+ case MOD_reference_to: return "MOD_reference_to";
+ case MOD_const: return "MOD_const";
+ case MOD_volatile: return "MOD_volatile";
+ default: return "MOD_<unknown>";
+ }
+}
+
+static char *
+dwarf_fmt_byte_name (fmt)
+ register unsigned fmt;
+{
+ switch (fmt)
+ {
+ case FMT_FT_C_C: return "FMT_FT_C_C";
+ case FMT_FT_C_X: return "FMT_FT_C_X";
+ case FMT_FT_X_C: return "FMT_FT_X_C";
+ case FMT_FT_X_X: return "FMT_FT_X_X";
+ case FMT_UT_C_C: return "FMT_UT_C_C";
+ case FMT_UT_C_X: return "FMT_UT_C_X";
+ case FMT_UT_X_C: return "FMT_UT_X_C";
+ case FMT_UT_X_X: return "FMT_UT_X_X";
+ case FMT_ET: return "FMT_ET";
+ default: return "FMT_<unknown>";
+ }
+}
+static char *
+dwarf_fund_type_name (ft)
+ register unsigned ft;
+{
+ switch (ft)
+ {
+ case FT_char: return "FT_char";
+ case FT_signed_char: return "FT_signed_char";
+ case FT_unsigned_char: return "FT_unsigned_char";
+ case FT_short: return "FT_short";
+ case FT_signed_short: return "FT_signed_short";
+ case FT_unsigned_short: return "FT_unsigned_short";
+ case FT_integer: return "FT_integer";
+ case FT_signed_integer: return "FT_signed_integer";
+ case FT_unsigned_integer: return "FT_unsigned_integer";
+ case FT_long: return "FT_long";
+ case FT_signed_long: return "FT_signed_long";
+ case FT_unsigned_long: return "FT_unsigned_long";
+ case FT_pointer: return "FT_pointer";
+ case FT_float: return "FT_float";
+ case FT_dbl_prec_float: return "FT_dbl_prec_float";
+ case FT_ext_prec_float: return "FT_ext_prec_float";
+ case FT_complex: return "FT_complex";
+ case FT_dbl_prec_complex: return "FT_dbl_prec_complex";
+ case FT_void: return "FT_void";
+ case FT_boolean: return "FT_boolean";
+ case FT_ext_prec_complex: return "FT_ext_prec_complex";
+ case FT_label: return "FT_label";
+
+ /* GNU extensions. */
+
+ case FT_long_long: return "FT_long_long";
+ case FT_signed_long_long: return "FT_signed_long_long";
+ case FT_unsigned_long_long: return "FT_unsigned_long_long";
+
+ case FT_int8: return "FT_int8";
+ case FT_signed_int8: return "FT_signed_int8";
+ case FT_unsigned_int8: return "FT_unsigned_int8";
+ case FT_int16: return "FT_int16";
+ case FT_signed_int16: return "FT_signed_int16";
+ case FT_unsigned_int16: return "FT_unsigned_int16";
+ case FT_int32: return "FT_int32";
+ case FT_signed_int32: return "FT_signed_int32";
+ case FT_unsigned_int32: return "FT_unsigned_int32";
+ case FT_int64: return "FT_int64";
+ case FT_signed_int64: return "FT_signed_int64";
+ case FT_unsigned_int64: return "FT_signed_int64";
+
+ case FT_real32: return "FT_real32";
+ case FT_real64: return "FT_real64";
+ case FT_real96: return "FT_real96";
+ case FT_real128: return "FT_real128";
+
+ default: return "FT_<unknown>";
+ }
+}
+
+/* Determine the "ultimate origin" of a decl. The decl may be an
+ inlined instance of an inlined instance of a decl which is local
+ to an inline function, so we have to trace all of the way back
+ through the origin chain to find out what sort of node actually
+ served as the original seed for the given block. */
+
+static tree
+decl_ultimate_origin (decl)
+ register tree decl;
+{
+ register tree immediate_origin = DECL_ABSTRACT_ORIGIN (decl);
+
+ if (immediate_origin == NULL)
+ return NULL;
+ else
+ {
+ register tree ret_val;
+ register tree lookahead = immediate_origin;
+
+ do
+ {
+ ret_val = lookahead;
+ lookahead = DECL_ABSTRACT_ORIGIN (ret_val);
+ }
+ while (lookahead != NULL && lookahead != ret_val);
+ return ret_val;
+ }
+}
+
+/* Determine the "ultimate origin" of a block. The block may be an
+ inlined instance of an inlined instance of a block which is local
+ to an inline function, so we have to trace all of the way back
+ through the origin chain to find out what sort of node actually
+ served as the original seed for the given block. */
+
+static tree
+block_ultimate_origin (block)
+ register tree block;
+{
+ register tree immediate_origin = BLOCK_ABSTRACT_ORIGIN (block);
+
+ if (immediate_origin == NULL)
+ return NULL;
+ else
+ {
+ register tree ret_val;
+ register tree lookahead = immediate_origin;
+
+ do
+ {
+ ret_val = lookahead;
+ lookahead = (TREE_CODE (ret_val) == BLOCK)
+ ? BLOCK_ABSTRACT_ORIGIN (ret_val)
+ : NULL;
+ }
+ while (lookahead != NULL && lookahead != ret_val);
+ return ret_val;
+ }
+}
+
+static void
+output_unsigned_leb128 (value)
+ register unsigned long value;
+{
+ register unsigned long orig_value = value;
+
+ do
+ {
+ register unsigned byte = (value & 0x7f);
+
+ value >>= 7;
+ if (value != 0) /* more bytes to follow */
+ byte |= 0x80;
+ fprintf (asm_out_file, "\t%s\t0x%x", ASM_BYTE_OP, (unsigned) byte);
+ if (flag_verbose_asm && value == 0)
+ fprintf (asm_out_file, "\t%s ULEB128 number - value = %u",
+ ASM_COMMENT_START, orig_value);
+ fputc ('\n', asm_out_file);
+ }
+ while (value != 0);
+}
+
+static void
+output_signed_leb128 (value)
+ register long value;
+{
+ register long orig_value = value;
+ register int negative = (value < 0);
+ register int more;
+
+ do
+ {
+ register unsigned byte = (value & 0x7f);
+
+ value >>= 7;
+ if (negative)
+ value |= 0xfe000000; /* manually sign extend */
+ if (((value == 0) && ((byte & 0x40) == 0))
+ || ((value == -1) && ((byte & 0x40) == 1)))
+ more = 0;
+ else
+ {
+ byte |= 0x80;
+ more = 1;
+ }
+ fprintf (asm_out_file, "\t%s\t0x%x", ASM_BYTE_OP, (unsigned) byte);
+ if (flag_verbose_asm && more == 0)
+ fprintf (asm_out_file, "\t%s SLEB128 number - value = %d",
+ ASM_COMMENT_START, orig_value);
+ fputc ('\n', asm_out_file);
+ }
+ while (more);
+}
+
+/**************** utility functions for attribute functions ******************/
+
+/* Given a pointer to a BLOCK node return non-zero if (and only if) the
+ node in question represents the outermost pair of curly braces (i.e.
+ the "body block") of a function or method.
+
+ For any BLOCK node representing a "body block" of a function or method,
+ the BLOCK_SUPERCONTEXT of the node will point to another BLOCK node
+ which represents the outermost (function) scope for the function or
+ method (i.e. the one which includes the formal parameters). The
+ BLOCK_SUPERCONTEXT of *that* node in turn will point to the relevant
+ FUNCTION_DECL node.
+*/
+
+inline int
+is_body_block (stmt)
+ register tree stmt;
+{
+ if (TREE_CODE (stmt) == BLOCK)
+ {
+ register tree parent = BLOCK_SUPERCONTEXT (stmt);
+
+ if (TREE_CODE (parent) == BLOCK)
+ {
+ register tree grandparent = BLOCK_SUPERCONTEXT (parent);
+
+ if (TREE_CODE (grandparent) == FUNCTION_DECL)
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/* Given a pointer to a tree node for some type, return a Dwarf fundamental
+ type code for the given type.
+
+ This routine must only be called for GCC type nodes that correspond to
+ Dwarf fundamental types.
+
+ The current Dwarf draft specification calls for Dwarf fundamental types
+ to accurately reflect the fact that a given type was either a "plain"
+ integral type or an explicitly "signed" integral type. Unfortunately,
+ we can't always do this, because GCC may already have thrown away the
+ information about the precise way in which the type was originally
+ specified, as in:
+
+ typedef signed int my_type;
+
+ struct s { my_type f; };
+
+ Since we may be stuck here without enought information to do exactly
+ what is called for in the Dwarf draft specification, we do the best
+ that we can under the circumstances and always use the "plain" integral
+ fundamental type codes for int, short, and long types. That's probably
+ good enough. The additional accuracy called for in the current DWARF
+ draft specification is probably never even useful in practice. */
+
+static int
+fundamental_type_code (type)
+ register tree type;
+{
+ if (TREE_CODE (type) == ERROR_MARK)
+ return 0;
+
+ switch (TREE_CODE (type))
+ {
+ case ERROR_MARK:
+ return FT_void;
+
+ case VOID_TYPE:
+ return FT_void;
+
+ case INTEGER_TYPE:
+ /* Carefully distinguish all the standard types of C,
+ without messing up if the language is not C.
+ Note that we check only for the names that contain spaces;
+ other names might occur by coincidence in other languages. */
+ if (TYPE_NAME (type) != 0
+ && TREE_CODE (TYPE_NAME (type)) == TYPE_DECL
+ && DECL_NAME (TYPE_NAME (type)) != 0
+ && TREE_CODE (DECL_NAME (TYPE_NAME (type))) == IDENTIFIER_NODE)
+ {
+ char *name = IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (type)));
+
+ if (!strcmp (name, "unsigned char"))
+ return FT_unsigned_char;
+ if (!strcmp (name, "signed char"))
+ return FT_signed_char;
+ if (!strcmp (name, "unsigned int"))
+ return FT_unsigned_integer;
+ if (!strcmp (name, "short int"))
+ return FT_short;
+ if (!strcmp (name, "short unsigned int"))
+ return FT_unsigned_short;
+ if (!strcmp (name, "long int"))
+ return FT_long;
+ if (!strcmp (name, "long unsigned int"))
+ return FT_unsigned_long;
+ if (!strcmp (name, "long long int"))
+ return FT_long_long; /* Not grok'ed by svr4 SDB */
+ if (!strcmp (name, "long long unsigned int"))
+ return FT_unsigned_long_long; /* Not grok'ed by svr4 SDB */
+ }
+
+ /* Most integer types will be sorted out above, however, for the
+ sake of special `array index' integer types, the following code
+ is also provided. */
+
+ if (TYPE_PRECISION (type) == INT_TYPE_SIZE)
+ return (TREE_UNSIGNED (type) ? FT_unsigned_integer : FT_integer);
+
+ if (TYPE_PRECISION (type) == LONG_TYPE_SIZE)
+ return (TREE_UNSIGNED (type) ? FT_unsigned_long : FT_long);
+
+ if (TYPE_PRECISION (type) == LONG_LONG_TYPE_SIZE)
+ return (TREE_UNSIGNED (type) ? FT_unsigned_long_long : FT_long_long);
+
+ if (TYPE_PRECISION (type) == SHORT_TYPE_SIZE)
+ return (TREE_UNSIGNED (type) ? FT_unsigned_short : FT_short);
+
+ if (TYPE_PRECISION (type) == CHAR_TYPE_SIZE)
+ return (TREE_UNSIGNED (type) ? FT_unsigned_char : FT_char);
+
+ abort ();
+
+ case REAL_TYPE:
+ /* Carefully distinguish all the standard types of C,
+ without messing up if the language is not C. */
+ if (TYPE_NAME (type) != 0
+ && TREE_CODE (TYPE_NAME (type)) == TYPE_DECL
+ && DECL_NAME (TYPE_NAME (type)) != 0
+ && TREE_CODE (DECL_NAME (TYPE_NAME (type))) == IDENTIFIER_NODE)
+ {
+ char *name = IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (type)));
+
+ /* Note that here we can run afowl of a serious bug in "classic"
+ svr4 SDB debuggers. They don't seem to understand the
+ FT_ext_prec_float type (even though they should). */
+
+ if (!strcmp (name, "long double"))
+ return FT_ext_prec_float;
+ }
+
+ if (TYPE_PRECISION (type) == DOUBLE_TYPE_SIZE)
+ return FT_dbl_prec_float;
+ if (TYPE_PRECISION (type) == FLOAT_TYPE_SIZE)
+ return FT_float;
+
+ /* Note that here we can run afowl of a serious bug in "classic"
+ svr4 SDB debuggers. They don't seem to understand the
+ FT_ext_prec_float type (even though they should). */
+
+ if (TYPE_PRECISION (type) == LONG_DOUBLE_TYPE_SIZE)
+ return FT_ext_prec_float;
+ abort ();
+
+ case COMPLEX_TYPE:
+ return FT_complex; /* GNU FORTRAN COMPLEX type. */
+
+ case CHAR_TYPE:
+ return FT_char; /* GNU Pascal CHAR type. Not used in C. */
+
+ case BOOLEAN_TYPE:
+ return FT_boolean; /* GNU FORTRAN BOOLEAN type. */
+
+ default:
+ abort (); /* No other TREE_CODEs are Dwarf fundamental types. */
+ }
+ return 0;
+}
+
+/* Given a pointer to an arbitrary ..._TYPE tree node, return a pointer to
+ the Dwarf "root" type for the given input type. The Dwarf "root" type
+ of a given type is generally the same as the given type, except that if
+ the given type is a pointer or reference type, then the root type of
+ the given type is the root type of the "basis" type for the pointer or
+ reference type. (This definition of the "root" type is recursive.)
+ Also, the root type of a `const' qualified type or a `volatile'
+ qualified type is the root type of the given type without the
+ qualifiers. */
+
+static tree
+root_type (type)
+ register tree type;
+{
+ if (TREE_CODE (type) == ERROR_MARK)
+ return error_mark_node;
+
+ switch (TREE_CODE (type))
+ {
+ case ERROR_MARK:
+ return error_mark_node;
+
+ case POINTER_TYPE:
+ case REFERENCE_TYPE:
+ return type_main_variant (root_type (TREE_TYPE (type)));
+
+ default:
+ return type_main_variant (type);
+ }
+}
+
+/* Given a pointer to an arbitrary ..._TYPE tree node, write out a sequence
+ of zero or more Dwarf "type-modifier" bytes applicable to the type. */
+
+static void
+write_modifier_bytes (type, decl_const, decl_volatile)
+ register tree type;
+ register int decl_const;
+ register int decl_volatile;
+{
+ if (TREE_CODE (type) == ERROR_MARK)
+ return;
+
+ if (TYPE_READONLY (type) || decl_const)
+ ASM_OUTPUT_DWARF_TYPE_MODIFIER (asm_out_file, MOD_const);
+ if (TYPE_VOLATILE (type) || decl_volatile)
+ ASM_OUTPUT_DWARF_TYPE_MODIFIER (asm_out_file, MOD_volatile);
+ switch (TREE_CODE (type))
+ {
+ case POINTER_TYPE:
+ ASM_OUTPUT_DWARF_TYPE_MODIFIER (asm_out_file, MOD_pointer_to);
+ write_modifier_bytes (TREE_TYPE (type), 0, 0);
+ return;
+
+ case REFERENCE_TYPE:
+ ASM_OUTPUT_DWARF_TYPE_MODIFIER (asm_out_file, MOD_reference_to);
+ write_modifier_bytes (TREE_TYPE (type), 0, 0);
+ return;
+
+ case ERROR_MARK:
+ default:
+ return;
+ }
+}
+
+/* Given a pointer to an arbitrary ..._TYPE tree node, return non-zero if the
+ given input type is a Dwarf "fundamental" type. Otherwise return zero. */
+
+inline int
+type_is_fundamental (type)
+ register tree type;
+{
+ switch (TREE_CODE (type))
+ {
+ case ERROR_MARK:
+ case VOID_TYPE:
+ case INTEGER_TYPE:
+ case REAL_TYPE:
+ case COMPLEX_TYPE:
+ case BOOLEAN_TYPE:
+ case CHAR_TYPE:
+ return 1;
+
+ case SET_TYPE:
+ case ARRAY_TYPE:
+ case RECORD_TYPE:
+ case UNION_TYPE:
+ case QUAL_UNION_TYPE:
+ case ENUMERAL_TYPE:
+ case FUNCTION_TYPE:
+ case METHOD_TYPE:
+ case POINTER_TYPE:
+ case REFERENCE_TYPE:
+ case FILE_TYPE:
+ case OFFSET_TYPE:
+ case LANG_TYPE:
+ return 0;
+
+ default:
+ abort ();
+ }
+ return 0;
+}
+
+/* Given a pointer to some ..._DECL tree node, generate an assembly language
+ equate directive which will associate a symbolic name with the current DIE.
+
+ The name used is an artificial label generated from the DECL_UID number
+ associated with the given decl node. The name it gets equated to is the
+ symbolic label that we (previously) output at the start of the DIE that
+ we are currently generating.
+
+ Calling this function while generating some "decl related" form of DIE
+ makes it possible to later refer to the DIE which represents the given
+ decl simply by re-generating the symbolic name from the ..._DECL node's
+ UID number. */
+
+static void
+equate_decl_number_to_die_number (decl)
+ register tree decl;
+{
+ /* In the case where we are generating a DIE for some ..._DECL node
+ which represents either some inline function declaration or some
+ entity declared within an inline function declaration/definition,
+ setup a symbolic name for the current DIE so that we have a name
+ for this DIE that we can easily refer to later on within
+ AT_abstract_origin attributes. */
+
+ char decl_label[MAX_ARTIFICIAL_LABEL_BYTES];
+ char die_label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ sprintf (decl_label, DECL_NAME_FMT, DECL_UID (decl));
+ sprintf (die_label, DIE_BEGIN_LABEL_FMT, current_dienum);
+ ASM_OUTPUT_DEF (asm_out_file, decl_label, die_label);
+}
+
+/* Given a pointer to some ..._TYPE tree node, generate an assembly language
+ equate directive which will associate a symbolic name with the current DIE.
+
+ The name used is an artificial label generated from the TYPE_UID number
+ associated with the given type node. The name it gets equated to is the
+ symbolic label that we (previously) output at the start of the DIE that
+ we are currently generating.
+
+ Calling this function while generating some "type related" form of DIE
+ makes it easy to later refer to the DIE which represents the given type
+ simply by re-generating the alternative name from the ..._TYPE node's
+ UID number. */
+
+inline void
+equate_type_number_to_die_number (type)
+ register tree type;
+{
+ char type_label[MAX_ARTIFICIAL_LABEL_BYTES];
+ char die_label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ /* We are generating a DIE to represent the main variant of this type
+ (i.e the type without any const or volatile qualifiers) so in order
+ to get the equate to come out right, we need to get the main variant
+ itself here. */
+
+ type = type_main_variant (type);
+
+ sprintf (type_label, TYPE_NAME_FMT, TYPE_UID (type));
+ sprintf (die_label, DIE_BEGIN_LABEL_FMT, current_dienum);
+ ASM_OUTPUT_DEF (asm_out_file, type_label, die_label);
+}
+
+static void
+output_reg_number (rtl)
+ register rtx rtl;
+{
+ register unsigned regno = REGNO (rtl);
+
+ if (regno >= FIRST_PSEUDO_REGISTER)
+ {
+ warning_with_decl (dwarf_last_decl, "internal regno botch: regno = %d\n",
+ regno);
+ regno = 0;
+ }
+ fprintf (asm_out_file, "\t%s\t0x%x",
+ UNALIGNED_INT_ASM_OP, DBX_REGISTER_NUMBER (regno));
+ if (flag_verbose_asm)
+ {
+ fprintf (asm_out_file, "\t%s ", ASM_COMMENT_START);
+ PRINT_REG (rtl, 0, asm_out_file);
+ }
+ fputc ('\n', asm_out_file);
+}
+
+/* The following routine is a nice and simple transducer. It converts the
+ RTL for a variable or parameter (resident in memory) into an equivalent
+ Dwarf representation of a mechanism for getting the address of that same
+ variable onto the top of a hypothetical "address evaluation" stack.
+
+ When creating memory location descriptors, we are effectively trans-
+ forming the RTL for a memory-resident object into its Dwarf postfix
+ expression equivalent. This routine just recursively descends an
+ RTL tree, turning it into Dwarf postfix code as it goes. */
+
+static void
+output_mem_loc_descriptor (rtl)
+ register rtx rtl;
+{
+ /* Note that for a dynamically sized array, the location we will
+ generate a description of here will be the lowest numbered location
+ which is actually within the array. That's *not* necessarily the
+ same as the zeroth element of the array. */
+
+ switch (GET_CODE (rtl))
+ {
+ case SUBREG:
+
+ /* The case of a subreg may arise when we have a local (register)
+ variable or a formal (register) parameter which doesn't quite
+ fill up an entire register. For now, just assume that it is
+ legitimate to make the Dwarf info refer to the whole register
+ which contains the given subreg. */
+
+ rtl = XEXP (rtl, 0);
+ /* Drop thru. */
+
+ case REG:
+
+ /* Whenever a register number forms a part of the description of
+ the method for calculating the (dynamic) address of a memory
+ resident object, DWARF rules require the register number to
+ be referred to as a "base register". This distinction is not
+ based in any way upon what category of register the hardware
+ believes the given register belongs to. This is strictly
+ DWARF terminology we're dealing with here.
+
+ Note that in cases where the location of a memory-resident data
+ object could be expressed as:
+
+ OP_ADD (OP_BASEREG (basereg), OP_CONST (0))
+
+ the actual DWARF location descriptor that we generate may just
+ be OP_BASEREG (basereg). This may look deceptively like the
+ object in question was allocated to a register (rather than
+ in memory) so DWARF consumers need to be aware of the subtle
+ distinction between OP_REG and OP_BASEREG. */
+
+ ASM_OUTPUT_DWARF_STACK_OP (asm_out_file, OP_BASEREG);
+ output_reg_number (rtl);
+ break;
+
+ case MEM:
+ output_mem_loc_descriptor (XEXP (rtl, 0));
+ ASM_OUTPUT_DWARF_STACK_OP (asm_out_file, OP_DEREF4);
+ break;
+
+ case CONST:
+ case SYMBOL_REF:
+ ASM_OUTPUT_DWARF_STACK_OP (asm_out_file, OP_ADDR);
+ ASM_OUTPUT_DWARF_ADDR_CONST (asm_out_file, rtl);
+ break;
+
+ case PLUS:
+ output_mem_loc_descriptor (XEXP (rtl, 0));
+ output_mem_loc_descriptor (XEXP (rtl, 1));
+ ASM_OUTPUT_DWARF_STACK_OP (asm_out_file, OP_ADD);
+ break;
+
+ case CONST_INT:
+ ASM_OUTPUT_DWARF_STACK_OP (asm_out_file, OP_CONST);
+ ASM_OUTPUT_DWARF_DATA4 (asm_out_file, INTVAL (rtl));
+ break;
+
+ default:
+ abort ();
+ }
+}
+
+/* Output a proper Dwarf location descriptor for a variable or parameter
+ which is either allocated in a register or in a memory location. For
+ a register, we just generate an OP_REG and the register number. For a
+ memory location we provide a Dwarf postfix expression describing how to
+ generate the (dynamic) address of the object onto the address stack. */
+
+static void
+output_loc_descriptor (rtl)
+ register rtx rtl;
+{
+ switch (GET_CODE (rtl))
+ {
+ case SUBREG:
+
+ /* The case of a subreg may arise when we have a local (register)
+ variable or a formal (register) parameter which doesn't quite
+ fill up an entire register. For now, just assume that it is
+ legitimate to make the Dwarf info refer to the whole register
+ which contains the given subreg. */
+
+ rtl = XEXP (rtl, 0);
+ /* Drop thru. */
+
+ case REG:
+ ASM_OUTPUT_DWARF_STACK_OP (asm_out_file, OP_REG);
+ output_reg_number (rtl);
+ break;
+
+ case MEM:
+ output_mem_loc_descriptor (XEXP (rtl, 0));
+ break;
+
+ default:
+ abort (); /* Should never happen */
+ }
+}
+
+/* Given a tree node describing an array bound (either lower or upper)
+ output a representation for that bound. */
+
+static void
+output_bound_representation (bound, dim_num, u_or_l)
+ register tree bound;
+ register unsigned dim_num; /* For multi-dimensional arrays. */
+ register char u_or_l; /* Designates upper or lower bound. */
+{
+ switch (TREE_CODE (bound))
+ {
+
+ case ERROR_MARK:
+ return;
+
+ /* All fixed-bounds are represented by INTEGER_CST nodes. */
+
+ case INTEGER_CST:
+ ASM_OUTPUT_DWARF_DATA4 (asm_out_file,
+ (unsigned) TREE_INT_CST_LOW (bound));
+ break;
+
+ /* Dynamic bounds may be represented by NOP_EXPR nodes containing
+ SAVE_EXPR nodes. */
+
+ case NOP_EXPR:
+ bound = TREE_OPERAND (bound, 0);
+ /* ... fall thru... */
+
+ case SAVE_EXPR:
+ {
+ char begin_label[MAX_ARTIFICIAL_LABEL_BYTES];
+ char end_label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ sprintf (begin_label, BOUND_BEGIN_LABEL_FMT,
+ current_dienum, dim_num, u_or_l);
+
+ sprintf (end_label, BOUND_END_LABEL_FMT,
+ current_dienum, dim_num, u_or_l);
+
+ ASM_OUTPUT_DWARF_DELTA2 (asm_out_file, end_label, begin_label);
+ ASM_OUTPUT_LABEL (asm_out_file, begin_label);
+
+ /* If we are working on a bound for a dynamic dimension in C,
+ the dynamic dimension in question had better have a static
+ (zero) lower bound and a dynamic *upper* bound. */
+
+ if (u_or_l != 'u')
+ abort ();
+
+ /* If optimization is turned on, the SAVE_EXPRs that describe
+ how to access the upper bound values are essentially bogus.
+ They only describe (at best) how to get at these values at
+ the points in the generated code right after they have just
+ been computed. Worse yet, in the typical case, the upper
+ bound values will not even *be* computed in the optimized
+ code, so these SAVE_EXPRs are entirely bogus.
+
+ In order to compensate for this fact, we check here to see
+ if optimization is enabled, and if so, we effectively create
+ an empty location description for the (unknown and unknowable)
+ upper bound.
+
+ This should not cause too much trouble for existing (stupid?)
+ debuggers because they have to deal with empty upper bounds
+ location descriptions anyway in order to be able to deal with
+ incomplete array types.
+
+ Of course an intelligent debugger (GDB?) should be able to
+ comprehend that a missing upper bound specification in a
+ array type used for a storage class `auto' local array variable
+ indicates that the upper bound is both unknown (at compile-
+ time) and unknowable (at run-time) due to optimization.
+ */
+
+ if (! optimize)
+ output_loc_descriptor
+ (eliminate_regs (SAVE_EXPR_RTL (bound), 0, NULL_RTX));
+
+ ASM_OUTPUT_LABEL (asm_out_file, end_label);
+ }
+ break;
+
+ default:
+ abort ();
+ }
+}
+
+/* Recursive function to output a sequence of value/name pairs for
+ enumeration constants in reversed order. This is called from
+ enumeration_type_die. */
+
+static void
+output_enumeral_list (link)
+ register tree link;
+{
+ if (link)
+ {
+ output_enumeral_list (TREE_CHAIN (link));
+ ASM_OUTPUT_DWARF_DATA4 (asm_out_file,
+ (unsigned) TREE_INT_CST_LOW (TREE_VALUE (link)));
+ ASM_OUTPUT_DWARF_STRING (asm_out_file,
+ IDENTIFIER_POINTER (TREE_PURPOSE (link)));
+ }
+}
+
+/* Given an unsigned value, round it up to the lowest multiple of `boundary'
+ which is not less than the value itself. */
+
+inline unsigned
+ceiling (value, boundary)
+ register unsigned value;
+ register unsigned boundary;
+{
+ return (((value + boundary - 1) / boundary) * boundary);
+}
+
+/* Given a pointer to what is assumed to be a FIELD_DECL node, return a
+ pointer to the declared type for the relevant field variable, or return
+ `integer_type_node' if the given node turns out to be an ERROR_MARK node. */
+
+inline tree
+field_type (decl)
+ register tree decl;
+{
+ register tree type;
+
+ if (TREE_CODE (decl) == ERROR_MARK)
+ return integer_type_node;
+
+ type = DECL_BIT_FIELD_TYPE (decl);
+ if (type == NULL)
+ type = TREE_TYPE (decl);
+ return type;
+}
+
+/* Given a pointer to a tree node, assumed to be some kind of a ..._TYPE
+ node, return the alignment in bits for the type, or else return
+ BITS_PER_WORD if the node actually turns out to be an ERROR_MARK node. */
+
+inline unsigned
+simple_type_align_in_bits (type)
+ register tree type;
+{
+ return (TREE_CODE (type) != ERROR_MARK) ? TYPE_ALIGN (type) : BITS_PER_WORD;
+}
+
+/* Given a pointer to a tree node, assumed to be some kind of a ..._TYPE
+ node, return the size in bits for the type if it is a constant, or
+ else return the alignment for the type if the type's size is not
+ constant, or else return BITS_PER_WORD if the type actually turns out
+ to be an ERROR_MARK node. */
+
+inline unsigned
+simple_type_size_in_bits (type)
+ register tree type;
+{
+ if (TREE_CODE (type) == ERROR_MARK)
+ return BITS_PER_WORD;
+ else
+ {
+ register tree type_size_tree = TYPE_SIZE (type);
+
+ if (TREE_CODE (type_size_tree) != INTEGER_CST)
+ return TYPE_ALIGN (type);
+
+ return (unsigned) TREE_INT_CST_LOW (type_size_tree);
+ }
+}
+
+/* Given a pointer to what is assumed to be a FIELD_DECL node, compute and
+ return the byte offset of the lowest addressed byte of the "containing
+ object" for the given FIELD_DECL, or return 0 if we are unable to deter-
+ mine what that offset is, either because the argument turns out to be a
+ pointer to an ERROR_MARK node, or because the offset is actually variable.
+ (We can't handle the latter case just yet.) */
+
+static unsigned
+field_byte_offset (decl)
+ register tree decl;
+{
+ register unsigned type_align_in_bytes;
+ register unsigned type_align_in_bits;
+ register unsigned type_size_in_bits;
+ register unsigned object_offset_in_align_units;
+ register unsigned object_offset_in_bits;
+ register unsigned object_offset_in_bytes;
+ register tree type;
+ register tree bitpos_tree;
+ register tree field_size_tree;
+ register unsigned bitpos_int;
+ register unsigned deepest_bitpos;
+ register unsigned field_size_in_bits;
+
+ if (TREE_CODE (decl) == ERROR_MARK)
+ return 0;
+
+ if (TREE_CODE (decl) != FIELD_DECL)
+ abort ();
+
+ type = field_type (decl);
+
+ bitpos_tree = DECL_FIELD_BITPOS (decl);
+ field_size_tree = DECL_SIZE (decl);
+
+ /* We cannot yet cope with fields whose positions or sizes are variable,
+ so for now, when we see such things, we simply return 0. Someday,
+ we may be able to handle such cases, but it will be damn difficult. */
+
+ if (TREE_CODE (bitpos_tree) != INTEGER_CST)
+ return 0;
+ bitpos_int = (unsigned) TREE_INT_CST_LOW (bitpos_tree);
+
+ if (TREE_CODE (field_size_tree) != INTEGER_CST)
+ return 0;
+ field_size_in_bits = (unsigned) TREE_INT_CST_LOW (field_size_tree);
+
+ type_size_in_bits = simple_type_size_in_bits (type);
+
+ type_align_in_bits = simple_type_align_in_bits (type);
+ type_align_in_bytes = type_align_in_bits / BITS_PER_UNIT;
+
+ /* Note that the GCC front-end doesn't make any attempt to keep track
+ of the starting bit offset (relative to the start of the containing
+ structure type) of the hypothetical "containing object" for a bit-
+ field. Thus, when computing the byte offset value for the start of
+ the "containing object" of a bit-field, we must deduce this infor-
+ mation on our own.
+
+ This can be rather tricky to do in some cases. For example, handling
+ the following structure type definition when compiling for an i386/i486
+ target (which only aligns long long's to 32-bit boundaries) can be very
+ tricky:
+
+ struct S {
+ int field1;
+ long long field2:31;
+ };
+
+ Fortunately, there is a simple rule-of-thumb which can be used in such
+ cases. When compiling for an i386/i486, GCC will allocate 8 bytes for
+ the structure shown above. It decides to do this based upon one simple
+ rule for bit-field allocation. Quite simply, GCC allocates each "con-
+ taining object" for each bit-field at the first (i.e. lowest addressed)
+ legitimate alignment boundary (based upon the required minimum alignment
+ for the declared type of the field) which it can possibly use, subject
+ to the condition that there is still enough available space remaining
+ in the containing object (when allocated at the selected point) to
+ fully accommodate all of the bits of the bit-field itself.
+
+ This simple rule makes it obvious why GCC allocates 8 bytes for each
+ object of the structure type shown above. When looking for a place to
+ allocate the "containing object" for `field2', the compiler simply tries
+ to allocate a 64-bit "containing object" at each successive 32-bit
+ boundary (starting at zero) until it finds a place to allocate that 64-
+ bit field such that at least 31 contiguous (and previously unallocated)
+ bits remain within that selected 64 bit field. (As it turns out, for
+ the example above, the compiler finds that it is OK to allocate the
+ "containing object" 64-bit field at bit-offset zero within the
+ structure type.)
+
+ Here we attempt to work backwards from the limited set of facts we're
+ given, and we try to deduce from those facts, where GCC must have
+ believed that the containing object started (within the structure type).
+
+ The value we deduce is then used (by the callers of this routine) to
+ generate AT_location and AT_bit_offset attributes for fields (both
+ bit-fields and, in the case of AT_location, regular fields as well).
+ */
+
+ /* Figure out the bit-distance from the start of the structure to the
+ "deepest" bit of the bit-field. */
+ deepest_bitpos = bitpos_int + field_size_in_bits;
+
+ /* This is the tricky part. Use some fancy footwork to deduce where the
+ lowest addressed bit of the containing object must be. */
+ object_offset_in_bits
+ = ceiling (deepest_bitpos, type_align_in_bits) - type_size_in_bits;
+
+ /* Compute the offset of the containing object in "alignment units". */
+ object_offset_in_align_units = object_offset_in_bits / type_align_in_bits;
+
+ /* Compute the offset of the containing object in bytes. */
+ object_offset_in_bytes = object_offset_in_align_units * type_align_in_bytes;
+
+ return object_offset_in_bytes;
+}
+
+/****************************** attributes *********************************/
+
+/* The following routines are responsible for writing out the various types
+ of Dwarf attributes (and any following data bytes associated with them).
+ These routines are listed in order based on the numerical codes of their
+ associated attributes. */
+
+/* Generate an AT_sibling attribute. */
+
+inline void
+sibling_attribute ()
+{
+ char label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_sibling);
+ sprintf (label, DIE_BEGIN_LABEL_FMT, NEXT_DIE_NUM);
+ ASM_OUTPUT_DWARF_REF (asm_out_file, label);
+}
+
+/* Output the form of location attributes suitable for whole variables and
+ whole parameters. Note that the location attributes for struct fields
+ are generated by the routine `data_member_location_attribute' below. */
+
+static void
+location_attribute (rtl)
+ register rtx rtl;
+{
+ char begin_label[MAX_ARTIFICIAL_LABEL_BYTES];
+ char end_label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_location);
+ sprintf (begin_label, LOC_BEGIN_LABEL_FMT, current_dienum);
+ sprintf (end_label, LOC_END_LABEL_FMT, current_dienum);
+ ASM_OUTPUT_DWARF_DELTA2 (asm_out_file, end_label, begin_label);
+ ASM_OUTPUT_LABEL (asm_out_file, begin_label);
+
+ /* Handle a special case. If we are about to output a location descriptor
+ for a variable or parameter which has been optimized out of existence,
+ don't do that. Instead we output a zero-length location descriptor
+ value as part of the location attribute.
+
+ A variable which has been optimized out of existence will have a
+ DECL_RTL value which denotes a pseudo-reg.
+
+ Currently, in some rare cases, variables can have DECL_RTL values
+ which look like (MEM (REG pseudo-reg#)). These cases are due to
+ bugs elsewhere in the compiler. We treat such cases
+ as if the variable(s) in question had been optimized out of existence.
+
+ Note that in all cases where we wish to express the fact that a
+ variable has been optimized out of existence, we do not simply
+ suppress the generation of the entire location attribute because
+ the absence of a location attribute in certain kinds of DIEs is
+ used to indicate something else entirely... i.e. that the DIE
+ represents an object declaration, but not a definition. So sayeth
+ the PLSIG.
+ */
+
+ if (! is_pseudo_reg (rtl)
+ && (GET_CODE (rtl) != MEM || ! is_pseudo_reg (XEXP (rtl, 0))))
+ output_loc_descriptor (eliminate_regs (rtl, 0, NULL_RTX));
+
+ ASM_OUTPUT_LABEL (asm_out_file, end_label);
+}
+
+/* Output the specialized form of location attribute used for data members
+ of struct and union types.
+
+ In the special case of a FIELD_DECL node which represents a bit-field,
+ the "offset" part of this special location descriptor must indicate the
+ distance in bytes from the lowest-addressed byte of the containing
+ struct or union type to the lowest-addressed byte of the "containing
+ object" for the bit-field. (See the `field_byte_offset' function above.)
+
+ For any given bit-field, the "containing object" is a hypothetical
+ object (of some integral or enum type) within which the given bit-field
+ lives. The type of this hypothetical "containing object" is always the
+ same as the declared type of the individual bit-field itself (for GCC
+ anyway... the DWARF spec doesn't actually mandate this).
+
+ Note that it is the size (in bytes) of the hypothetical "containing
+ object" which will be given in the AT_byte_size attribute for this
+ bit-field. (See the `byte_size_attribute' function below.) It is
+ also used when calculating the value of the AT_bit_offset attribute.
+ (See the `bit_offset_attribute' function below.)
+*/
+
+static void
+data_member_location_attribute (decl)
+ register tree decl;
+{
+ register unsigned object_offset_in_bytes = field_byte_offset (decl);
+ char begin_label[MAX_ARTIFICIAL_LABEL_BYTES];
+ char end_label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_location);
+ sprintf (begin_label, LOC_BEGIN_LABEL_FMT, current_dienum);
+ sprintf (end_label, LOC_END_LABEL_FMT, current_dienum);
+ ASM_OUTPUT_DWARF_DELTA2 (asm_out_file, end_label, begin_label);
+ ASM_OUTPUT_LABEL (asm_out_file, begin_label);
+ ASM_OUTPUT_DWARF_STACK_OP (asm_out_file, OP_CONST);
+ ASM_OUTPUT_DWARF_DATA4 (asm_out_file, object_offset_in_bytes);
+ ASM_OUTPUT_DWARF_STACK_OP (asm_out_file, OP_ADD);
+ ASM_OUTPUT_LABEL (asm_out_file, end_label);
+}
+
+/* Output an AT_const_value attribute for a variable or a parameter which
+ does not have a "location" either in memory or in a register. These
+ things can arise in GNU C when a constant is passed as an actual
+ parameter to an inlined function. They can also arise in C++ where
+ declared constants do not necessarily get memory "homes". */
+
+static void
+const_value_attribute (rtl)
+ register rtx rtl;
+{
+ char begin_label[MAX_ARTIFICIAL_LABEL_BYTES];
+ char end_label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_const_value_block4);
+ sprintf (begin_label, LOC_BEGIN_LABEL_FMT, current_dienum);
+ sprintf (end_label, LOC_END_LABEL_FMT, current_dienum);
+ ASM_OUTPUT_DWARF_DELTA4 (asm_out_file, end_label, begin_label);
+ ASM_OUTPUT_LABEL (asm_out_file, begin_label);
+
+ switch (GET_CODE (rtl))
+ {
+ case CONST_INT:
+ /* Note that a CONST_INT rtx could represent either an integer or
+ a floating-point constant. A CONST_INT is used whenever the
+ constant will fit into a single word. In all such cases, the
+ original mode of the constant value is wiped out, and the
+ CONST_INT rtx is assigned VOIDmode. Since we no longer have
+ precise mode information for these constants, we always just
+ output them using 4 bytes. */
+
+ ASM_OUTPUT_DWARF_DATA4 (asm_out_file, (unsigned) INTVAL (rtl));
+ break;
+
+ case CONST_DOUBLE:
+ /* Note that a CONST_DOUBLE rtx could represent either an integer
+ or a floating-point constant. A CONST_DOUBLE is used whenever
+ the constant requires more than one word in order to be adequately
+ represented. In all such cases, the original mode of the constant
+ value is preserved as the mode of the CONST_DOUBLE rtx, but for
+ simplicity we always just output CONST_DOUBLEs using 8 bytes. */
+
+ ASM_OUTPUT_DWARF_DATA8 (asm_out_file,
+ (unsigned HOST_WIDE_INT) CONST_DOUBLE_HIGH (rtl),
+ (unsigned HOST_WIDE_INT) CONST_DOUBLE_LOW (rtl));
+ break;
+
+ case CONST_STRING:
+ ASM_OUTPUT_DWARF_STRING (asm_out_file, XSTR (rtl, 0));
+ break;
+
+ case SYMBOL_REF:
+ case LABEL_REF:
+ case CONST:
+ ASM_OUTPUT_DWARF_ADDR_CONST (asm_out_file, rtl);
+ break;
+
+ case PLUS:
+ /* In cases where an inlined instance of an inline function is passed
+ the address of an `auto' variable (which is local to the caller)
+ we can get a situation where the DECL_RTL of the artificial
+ local variable (for the inlining) which acts as a stand-in for
+ the corresponding formal parameter (of the inline function)
+ will look like (plus:SI (reg:SI FRAME_PTR) (const_int ...)).
+ This is not exactly a compile-time constant expression, but it
+ isn't the address of the (artificial) local variable either.
+ Rather, it represents the *value* which the artificial local
+ variable always has during its lifetime. We currently have no
+ way to represent such quasi-constant values in Dwarf, so for now
+ we just punt and generate an AT_const_value attribute with form
+ FORM_BLOCK4 and a length of zero. */
+ break;
+
+ default:
+ abort (); /* No other kinds of rtx should be possible here. */
+ }
+
+ ASM_OUTPUT_LABEL (asm_out_file, end_label);
+}
+
+/* Generate *either* an AT_location attribute or else an AT_const_value
+ data attribute for a variable or a parameter. We generate the
+ AT_const_value attribute only in those cases where the given
+ variable or parameter does not have a true "location" either in
+ memory or in a register. This can happen (for example) when a
+ constant is passed as an actual argument in a call to an inline
+ function. (It's possible that these things can crop up in other
+ ways also.) Note that one type of constant value which can be
+ passed into an inlined function is a constant pointer. This can
+ happen for example if an actual argument in an inlined function
+ call evaluates to a compile-time constant address. */
+
+static void
+location_or_const_value_attribute (decl)
+ register tree decl;
+{
+ register rtx rtl;
+
+ if (TREE_CODE (decl) == ERROR_MARK)
+ return;
+
+ if ((TREE_CODE (decl) != VAR_DECL) && (TREE_CODE (decl) != PARM_DECL))
+ {
+ /* Should never happen. */
+ abort ();
+ return;
+ }
+
+ /* Here we have to decide where we are going to say the parameter "lives"
+ (as far as the debugger is concerned). We only have a couple of choices.
+ GCC provides us with DECL_RTL and with DECL_INCOMING_RTL. DECL_RTL
+ normally indicates where the parameter lives during most of the activa-
+ tion of the function. If optimization is enabled however, this could
+ be either NULL or else a pseudo-reg. Both of those cases indicate that
+ the parameter doesn't really live anywhere (as far as the code generation
+ parts of GCC are concerned) during most of the function's activation.
+ That will happen (for example) if the parameter is never referenced
+ within the function.
+
+ We could just generate a location descriptor here for all non-NULL
+ non-pseudo values of DECL_RTL and ignore all of the rest, but we can
+ be a little nicer than that if we also consider DECL_INCOMING_RTL in
+ cases where DECL_RTL is NULL or is a pseudo-reg.
+
+ Note however that we can only get away with using DECL_INCOMING_RTL as
+ a backup substitute for DECL_RTL in certain limited cases. In cases
+ where DECL_ARG_TYPE(decl) indicates the same type as TREE_TYPE(decl)
+ we can be sure that the parameter was passed using the same type as it
+ is declared to have within the function, and that its DECL_INCOMING_RTL
+ points us to a place where a value of that type is passed. In cases
+ where DECL_ARG_TYPE(decl) and TREE_TYPE(decl) are different types
+ however, we cannot (in general) use DECL_INCOMING_RTL as a backup
+ substitute for DECL_RTL because in these cases, DECL_INCOMING_RTL
+ points us to a value of some type which is *different* from the type
+ of the parameter itself. Thus, if we tried to use DECL_INCOMING_RTL
+ to generate a location attribute in such cases, the debugger would
+ end up (for example) trying to fetch a `float' from a place which
+ actually contains the first part of a `double'. That would lead to
+ really incorrect and confusing output at debug-time, and we don't
+ want that now do we?
+
+ So in general, we DO NOT use DECL_INCOMING_RTL as a backup for DECL_RTL
+ in cases where DECL_ARG_TYPE(decl) != TREE_TYPE(decl). There are a
+ couple of cute exceptions however. On little-endian machines we can
+ get away with using DECL_INCOMING_RTL even when DECL_ARG_TYPE(decl) is
+ not the same as TREE_TYPE(decl) but only when DECL_ARG_TYPE(decl) is
+ an integral type which is smaller than TREE_TYPE(decl). These cases
+ arise when (on a little-endian machine) a non-prototyped function has
+ a parameter declared to be of type `short' or `char'. In such cases,
+ TREE_TYPE(decl) will be `short' or `char', DECL_ARG_TYPE(decl) will be
+ `int', and DECL_INCOMING_RTL will point to the lowest-order byte of the
+ passed `int' value. If the debugger then uses that address to fetch a
+ `short' or a `char' (on a little-endian machine) the result will be the
+ correct data, so we allow for such exceptional cases below.
+
+ Note that our goal here is to describe the place where the given formal
+ parameter lives during most of the function's activation (i.e. between
+ the end of the prologue and the start of the epilogue). We'll do that
+ as best as we can. Note however that if the given formal parameter is
+ modified sometime during the execution of the function, then a stack
+ backtrace (at debug-time) will show the function as having been called
+ with the *new* value rather than the value which was originally passed
+ in. This happens rarely enough that it is not a major problem, but it
+ *is* a problem, and I'd like to fix it. A future version of dwarfout.c
+ may generate two additional attributes for any given TAG_formal_parameter
+ DIE which will describe the "passed type" and the "passed location" for
+ the given formal parameter in addition to the attributes we now generate
+ to indicate the "declared type" and the "active location" for each
+ parameter. This additional set of attributes could be used by debuggers
+ for stack backtraces.
+
+ Separately, note that sometimes DECL_RTL can be NULL and DECL_INCOMING_RTL
+ can be NULL also. This happens (for example) for inlined-instances of
+ inline function formal parameters which are never referenced. This really
+ shouldn't be happening. All PARM_DECL nodes should get valid non-NULL
+ DECL_INCOMING_RTL values, but integrate.c doesn't currently generate
+ these values for inlined instances of inline function parameters, so
+ when we see such cases, we are just SOL (shit-out-of-luck) for the time
+ being (until integrate.c gets fixed).
+ */
+
+ /* Use DECL_RTL as the "location" unless we find something better. */
+ rtl = DECL_RTL (decl);
+
+ if (TREE_CODE (decl) == PARM_DECL)
+ if (rtl == NULL_RTX || is_pseudo_reg (rtl))
+ {
+ /* This decl represents a formal parameter which was optimized out. */
+ register tree declared_type = type_main_variant (TREE_TYPE (decl));
+ register tree passed_type = type_main_variant (DECL_ARG_TYPE (decl));
+
+ /* Note that DECL_INCOMING_RTL may be NULL in here, but we handle
+ *all* cases where (rtl == NULL_RTX) just below. */
+
+ if (declared_type == passed_type)
+ rtl = DECL_INCOMING_RTL (decl);
+#if (BYTES_BIG_ENDIAN == 0)
+ else
+ if (TREE_CODE (declared_type) == INTEGER_TYPE)
+ if (TYPE_SIZE (declared_type) <= TYPE_SIZE (passed_type))
+ rtl = DECL_INCOMING_RTL (decl);
+#endif /* (BYTES_BIG_ENDIAN == 0) */
+ }
+
+ if (rtl == NULL_RTX)
+ return;
+
+ switch (GET_CODE (rtl))
+ {
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case CONST_STRING:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ case CONST:
+ case PLUS: /* DECL_RTL could be (plus (reg ...) (const_int ...)) */
+ const_value_attribute (rtl);
+ break;
+
+ case MEM:
+ case REG:
+ case SUBREG:
+ location_attribute (rtl);
+ break;
+
+ default:
+ abort (); /* Should never happen. */
+ }
+}
+
+/* Generate an AT_name attribute given some string value to be included as
+ the value of the attribute. */
+
+inline void
+name_attribute (name_string)
+ register char *name_string;
+{
+ if (name_string && *name_string)
+ {
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_name);
+ ASM_OUTPUT_DWARF_STRING (asm_out_file, name_string);
+ }
+}
+
+inline void
+fund_type_attribute (ft_code)
+ register unsigned ft_code;
+{
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_fund_type);
+ ASM_OUTPUT_DWARF_FUND_TYPE (asm_out_file, ft_code);
+}
+
+static void
+mod_fund_type_attribute (type, decl_const, decl_volatile)
+ register tree type;
+ register int decl_const;
+ register int decl_volatile;
+{
+ char begin_label[MAX_ARTIFICIAL_LABEL_BYTES];
+ char end_label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_mod_fund_type);
+ sprintf (begin_label, MT_BEGIN_LABEL_FMT, current_dienum);
+ sprintf (end_label, MT_END_LABEL_FMT, current_dienum);
+ ASM_OUTPUT_DWARF_DELTA2 (asm_out_file, end_label, begin_label);
+ ASM_OUTPUT_LABEL (asm_out_file, begin_label);
+ write_modifier_bytes (type, decl_const, decl_volatile);
+ ASM_OUTPUT_DWARF_FUND_TYPE (asm_out_file,
+ fundamental_type_code (root_type (type)));
+ ASM_OUTPUT_LABEL (asm_out_file, end_label);
+}
+
+inline void
+user_def_type_attribute (type)
+ register tree type;
+{
+ char ud_type_name[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_user_def_type);
+ sprintf (ud_type_name, TYPE_NAME_FMT, TYPE_UID (type));
+ ASM_OUTPUT_DWARF_REF (asm_out_file, ud_type_name);
+}
+
+static void
+mod_u_d_type_attribute (type, decl_const, decl_volatile)
+ register tree type;
+ register int decl_const;
+ register int decl_volatile;
+{
+ char begin_label[MAX_ARTIFICIAL_LABEL_BYTES];
+ char end_label[MAX_ARTIFICIAL_LABEL_BYTES];
+ char ud_type_name[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_mod_u_d_type);
+ sprintf (begin_label, MT_BEGIN_LABEL_FMT, current_dienum);
+ sprintf (end_label, MT_END_LABEL_FMT, current_dienum);
+ ASM_OUTPUT_DWARF_DELTA2 (asm_out_file, end_label, begin_label);
+ ASM_OUTPUT_LABEL (asm_out_file, begin_label);
+ write_modifier_bytes (type, decl_const, decl_volatile);
+ sprintf (ud_type_name, TYPE_NAME_FMT, TYPE_UID (root_type (type)));
+ ASM_OUTPUT_DWARF_REF (asm_out_file, ud_type_name);
+ ASM_OUTPUT_LABEL (asm_out_file, end_label);
+}
+
+#ifdef USE_ORDERING_ATTRIBUTE
+inline void
+ordering_attribute (ordering)
+ register unsigned ordering;
+{
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_ordering);
+ ASM_OUTPUT_DWARF_DATA2 (asm_out_file, ordering);
+}
+#endif /* defined(USE_ORDERING_ATTRIBUTE) */
+
+/* Note that the block of subscript information for an array type also
+ includes information about the element type of type given array type. */
+
+static void
+subscript_data_attribute (type)
+ register tree type;
+{
+ register unsigned dimension_number;
+ char begin_label[MAX_ARTIFICIAL_LABEL_BYTES];
+ char end_label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_subscr_data);
+ sprintf (begin_label, SS_BEGIN_LABEL_FMT, current_dienum);
+ sprintf (end_label, SS_END_LABEL_FMT, current_dienum);
+ ASM_OUTPUT_DWARF_DELTA2 (asm_out_file, end_label, begin_label);
+ ASM_OUTPUT_LABEL (asm_out_file, begin_label);
+
+ /* The GNU compilers represent multidimensional array types as sequences
+ of one dimensional array types whose element types are themselves array
+ types. Here we squish that down, so that each multidimensional array
+ type gets only one array_type DIE in the Dwarf debugging info. The
+ draft Dwarf specification say that we are allowed to do this kind
+ of compression in C (because there is no difference between an
+ array or arrays and a multidimensional array in C) but for other
+ source languages (e.g. Ada) we probably shouldn't do this. */
+
+ for (dimension_number = 0;
+ TREE_CODE (type) == ARRAY_TYPE;
+ type = TREE_TYPE (type), dimension_number++)
+ {
+ register tree domain = TYPE_DOMAIN (type);
+
+ /* Arrays come in three flavors. Unspecified bounds, fixed
+ bounds, and (in GNU C only) variable bounds. Handle all
+ three forms here. */
+
+ if (domain)
+ {
+ /* We have an array type with specified bounds. */
+
+ register tree lower = TYPE_MIN_VALUE (domain);
+ register tree upper = TYPE_MAX_VALUE (domain);
+
+ /* Handle only fundamental types as index types for now. */
+
+ if (! type_is_fundamental (domain))
+ abort ();
+
+ /* Output the representation format byte for this dimension. */
+
+ ASM_OUTPUT_DWARF_FMT_BYTE (asm_out_file,
+ FMT_CODE (1,
+ TREE_CODE (lower) == INTEGER_CST,
+ TREE_CODE (upper) == INTEGER_CST));
+
+ /* Output the index type for this dimension. */
+
+ ASM_OUTPUT_DWARF_FUND_TYPE (asm_out_file,
+ fundamental_type_code (domain));
+
+ /* Output the representation for the lower bound. */
+
+ output_bound_representation (lower, dimension_number, 'l');
+
+ /* Output the representation for the upper bound. */
+
+ output_bound_representation (upper, dimension_number, 'u');
+ }
+ else
+ {
+ /* We have an array type with an unspecified length. For C and
+ C++ we can assume that this really means that (a) the index
+ type is an integral type, and (b) the lower bound is zero.
+ Note that Dwarf defines the representation of an unspecified
+ (upper) bound as being a zero-length location description. */
+
+ /* Output the array-bounds format byte. */
+
+ ASM_OUTPUT_DWARF_FMT_BYTE (asm_out_file, FMT_FT_C_X);
+
+ /* Output the (assumed) index type. */
+
+ ASM_OUTPUT_DWARF_FUND_TYPE (asm_out_file, FT_integer);
+
+ /* Output the (assumed) lower bound (constant) value. */
+
+ ASM_OUTPUT_DWARF_DATA4 (asm_out_file, 0);
+
+ /* Output the (empty) location description for the upper bound. */
+
+ ASM_OUTPUT_DWARF_DATA2 (asm_out_file, 0);
+ }
+ }
+
+ /* Output the prefix byte that says that the element type is comming up. */
+
+ ASM_OUTPUT_DWARF_FMT_BYTE (asm_out_file, FMT_ET);
+
+ /* Output a representation of the type of the elements of this array type. */
+
+ type_attribute (type, 0, 0);
+
+ ASM_OUTPUT_LABEL (asm_out_file, end_label);
+}
+
+static void
+byte_size_attribute (tree_node)
+ register tree tree_node;
+{
+ register unsigned size;
+
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_byte_size);
+ switch (TREE_CODE (tree_node))
+ {
+ case ERROR_MARK:
+ size = 0;
+ break;
+
+ case ENUMERAL_TYPE:
+ case RECORD_TYPE:
+ case UNION_TYPE:
+ case QUAL_UNION_TYPE:
+ size = int_size_in_bytes (tree_node);
+ break;
+
+ case FIELD_DECL:
+ /* For a data member of a struct or union, the AT_byte_size is
+ generally given as the number of bytes normally allocated for
+ an object of the *declared* type of the member itself. This
+ is true even for bit-fields. */
+ size = simple_type_size_in_bits (field_type (tree_node))
+ / BITS_PER_UNIT;
+ break;
+
+ default:
+ abort ();
+ }
+
+ /* Note that `size' might be -1 when we get to this point. If it
+ is, that indicates that the byte size of the entity in question
+ is variable. We have no good way of expressing this fact in Dwarf
+ at the present time, so just let the -1 pass on through. */
+
+ ASM_OUTPUT_DWARF_DATA4 (asm_out_file, size);
+}
+
+/* For a FIELD_DECL node which represents a bit-field, output an attribute
+ which specifies the distance in bits from the highest order bit of the
+ "containing object" for the bit-field to the highest order bit of the
+ bit-field itself.
+
+ For any given bit-field, the "containing object" is a hypothetical
+ object (of some integral or enum type) within which the given bit-field
+ lives. The type of this hypothetical "containing object" is always the
+ same as the declared type of the individual bit-field itself.
+
+ The determination of the exact location of the "containing object" for
+ a bit-field is rather complicated. It's handled by the `field_byte_offset'
+ function (above).
+
+ Note that it is the size (in bytes) of the hypothetical "containing
+ object" which will be given in the AT_byte_size attribute for this
+ bit-field. (See `byte_size_attribute' above.)
+*/
+
+inline void
+bit_offset_attribute (decl)
+ register tree decl;
+{
+ register unsigned object_offset_in_bytes = field_byte_offset (decl);
+ register tree type = DECL_BIT_FIELD_TYPE (decl);
+ register tree bitpos_tree = DECL_FIELD_BITPOS (decl);
+ register unsigned bitpos_int;
+ register unsigned highest_order_object_bit_offset;
+ register unsigned highest_order_field_bit_offset;
+ register unsigned bit_offset;
+
+ assert (TREE_CODE (decl) == FIELD_DECL); /* Must be a field. */
+ assert (type); /* Must be a bit field. */
+
+ /* We can't yet handle bit-fields whose offsets are variable, so if we
+ encounter such things, just return without generating any attribute
+ whatsoever. */
+
+ if (TREE_CODE (bitpos_tree) != INTEGER_CST)
+ return;
+ bitpos_int = (unsigned) TREE_INT_CST_LOW (bitpos_tree);
+
+ /* Note that the bit offset is always the distance (in bits) from the
+ highest-order bit of the "containing object" to the highest-order
+ bit of the bit-field itself. Since the "high-order end" of any
+ object or field is different on big-endian and little-endian machines,
+ the computation below must take account of these differences. */
+
+ highest_order_object_bit_offset = object_offset_in_bytes * BITS_PER_UNIT;
+ highest_order_field_bit_offset = bitpos_int;
+
+#if (BYTES_BIG_ENDIAN == 0)
+ highest_order_field_bit_offset
+ += (unsigned) TREE_INT_CST_LOW (DECL_SIZE (decl));
+
+ highest_order_object_bit_offset += simple_type_size_in_bits (type);
+#endif /* (BYTES_BIG_ENDIAN == 0) */
+
+ bit_offset =
+#if (BYTES_BIG_ENDIAN == 0)
+ highest_order_object_bit_offset - highest_order_field_bit_offset;
+#else /* (BYTES_BIG_ENDIAN != 0) */
+ highest_order_field_bit_offset - highest_order_object_bit_offset;
+#endif /* (BYTES_BIG_ENDIAN != 0) */
+
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_bit_offset);
+ ASM_OUTPUT_DWARF_DATA2 (asm_out_file, bit_offset);
+}
+
+/* For a FIELD_DECL node which represents a bit field, output an attribute
+ which specifies the length in bits of the given field. */
+
+inline void
+bit_size_attribute (decl)
+ register tree decl;
+{
+ assert (TREE_CODE (decl) == FIELD_DECL); /* Must be a field. */
+ assert (DECL_BIT_FIELD_TYPE (decl)); /* Must be a bit field. */
+
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_bit_size);
+ ASM_OUTPUT_DWARF_DATA4 (asm_out_file,
+ (unsigned) TREE_INT_CST_LOW (DECL_SIZE (decl)));
+}
+
+/* The following routine outputs the `element_list' attribute for enumeration
+ type DIEs. The element_lits attribute includes the names and values of
+ all of the enumeration constants associated with the given enumeration
+ type. */
+
+inline void
+element_list_attribute (element)
+ register tree element;
+{
+ char begin_label[MAX_ARTIFICIAL_LABEL_BYTES];
+ char end_label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_element_list);
+ sprintf (begin_label, EE_BEGIN_LABEL_FMT, current_dienum);
+ sprintf (end_label, EE_END_LABEL_FMT, current_dienum);
+ ASM_OUTPUT_DWARF_DELTA4 (asm_out_file, end_label, begin_label);
+ ASM_OUTPUT_LABEL (asm_out_file, begin_label);
+
+ /* Here we output a list of value/name pairs for each enumeration constant
+ defined for this enumeration type (as required), but we do it in REVERSE
+ order. The order is the one required by the draft #5 Dwarf specification
+ published by the UI/PLSIG. */
+
+ output_enumeral_list (element); /* Recursively output the whole list. */
+
+ ASM_OUTPUT_LABEL (asm_out_file, end_label);
+}
+
+/* Generate an AT_stmt_list attribute. These are normally present only in
+ DIEs with a TAG_compile_unit tag. */
+
+inline void
+stmt_list_attribute (label)
+ register char *label;
+{
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_stmt_list);
+ /* Don't use ASM_OUTPUT_DWARF_DATA4 here. */
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, label);
+}
+
+/* Generate an AT_low_pc attribute for a label DIE, a lexical_block DIE or
+ for a subroutine DIE. */
+
+inline void
+low_pc_attribute (asm_low_label)
+ register char *asm_low_label;
+{
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_low_pc);
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, asm_low_label);
+}
+
+/* Generate an AT_high_pc attribute for a lexical_block DIE or for a
+ subroutine DIE. */
+
+inline void
+high_pc_attribute (asm_high_label)
+ register char *asm_high_label;
+{
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_high_pc);
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, asm_high_label);
+}
+
+/* Generate an AT_body_begin attribute for a subroutine DIE. */
+
+inline void
+body_begin_attribute (asm_begin_label)
+ register char *asm_begin_label;
+{
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_body_begin);
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, asm_begin_label);
+}
+
+/* Generate an AT_body_end attribute for a subroutine DIE. */
+
+inline void
+body_end_attribute (asm_end_label)
+ register char *asm_end_label;
+{
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_body_end);
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, asm_end_label);
+}
+
+/* Generate an AT_language attribute given a LANG value. These attributes
+ are used only within TAG_compile_unit DIEs. */
+
+inline void
+language_attribute (language_code)
+ register unsigned language_code;
+{
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_language);
+ ASM_OUTPUT_DWARF_DATA4 (asm_out_file, language_code);
+}
+
+inline void
+member_attribute (context)
+ register tree context;
+{
+ char label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ /* Generate this attribute only for members in C++. */
+
+ if (context != NULL && is_tagged_type (context))
+ {
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_member);
+ sprintf (label, TYPE_NAME_FMT, TYPE_UID (context));
+ ASM_OUTPUT_DWARF_REF (asm_out_file, label);
+ }
+}
+
+inline void
+string_length_attribute (upper_bound)
+ register tree upper_bound;
+{
+ char begin_label[MAX_ARTIFICIAL_LABEL_BYTES];
+ char end_label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_string_length);
+ sprintf (begin_label, SL_BEGIN_LABEL_FMT, current_dienum);
+ sprintf (end_label, SL_END_LABEL_FMT, current_dienum);
+ ASM_OUTPUT_DWARF_DELTA2 (asm_out_file, end_label, begin_label);
+ ASM_OUTPUT_LABEL (asm_out_file, begin_label);
+ output_bound_representation (upper_bound, 0, 'u');
+ ASM_OUTPUT_LABEL (asm_out_file, end_label);
+}
+
+inline void
+comp_dir_attribute (dirname)
+ register char *dirname;
+{
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_comp_dir);
+ ASM_OUTPUT_DWARF_STRING (asm_out_file, dirname);
+}
+
+inline void
+sf_names_attribute (sf_names_start_label)
+ register char *sf_names_start_label;
+{
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_sf_names);
+ /* Don't use ASM_OUTPUT_DWARF_DATA4 here. */
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, sf_names_start_label);
+}
+
+inline void
+src_info_attribute (src_info_start_label)
+ register char *src_info_start_label;
+{
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_src_info);
+ /* Don't use ASM_OUTPUT_DWARF_DATA4 here. */
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, src_info_start_label);
+}
+
+inline void
+mac_info_attribute (mac_info_start_label)
+ register char *mac_info_start_label;
+{
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_mac_info);
+ /* Don't use ASM_OUTPUT_DWARF_DATA4 here. */
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, mac_info_start_label);
+}
+
+inline void
+prototyped_attribute (func_type)
+ register tree func_type;
+{
+ if ((strcmp (language_string, "GNU C") == 0)
+ && (TYPE_ARG_TYPES (func_type) != NULL))
+ {
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_prototyped);
+ ASM_OUTPUT_DWARF_STRING (asm_out_file, "");
+ }
+}
+
+inline void
+producer_attribute (producer)
+ register char *producer;
+{
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_producer);
+ ASM_OUTPUT_DWARF_STRING (asm_out_file, producer);
+}
+
+inline void
+inline_attribute (decl)
+ register tree decl;
+{
+ if (DECL_INLINE (decl))
+ {
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_inline);
+ ASM_OUTPUT_DWARF_STRING (asm_out_file, "");
+ }
+}
+
+inline void
+containing_type_attribute (containing_type)
+ register tree containing_type;
+{
+ char label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_containing_type);
+ sprintf (label, TYPE_NAME_FMT, TYPE_UID (containing_type));
+ ASM_OUTPUT_DWARF_REF (asm_out_file, label);
+}
+
+inline void
+abstract_origin_attribute (origin)
+ register tree origin;
+{
+ char label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_abstract_origin);
+ switch (TREE_CODE_CLASS (TREE_CODE (origin)))
+ {
+ case 'd':
+ sprintf (label, DECL_NAME_FMT, DECL_UID (origin));
+ break;
+
+ case 't':
+ sprintf (label, TYPE_NAME_FMT, TYPE_UID (origin));
+ break;
+
+ default:
+ abort (); /* Should never happen. */
+
+ }
+ ASM_OUTPUT_DWARF_REF (asm_out_file, label);
+}
+
+#ifdef DWARF_DECL_COORDINATES
+inline void
+src_coords_attribute (src_fileno, src_lineno)
+ register unsigned src_fileno;
+ register unsigned src_lineno;
+{
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_src_coords);
+ ASM_OUTPUT_DWARF_DATA2 (asm_out_file, src_fileno);
+ ASM_OUTPUT_DWARF_DATA2 (asm_out_file, src_lineno);
+}
+#endif /* defined(DWARF_DECL_COORDINATES) */
+
+inline void
+pure_or_virtual_attribute (func_decl)
+ register tree func_decl;
+{
+ if (DECL_VIRTUAL_P (func_decl))
+ {
+#if 0 /* DECL_ABSTRACT_VIRTUAL_P is C++-specific. */
+ if (DECL_ABSTRACT_VIRTUAL_P (func_decl))
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_pure_virtual);
+ else
+#endif
+ ASM_OUTPUT_DWARF_ATTRIBUTE (asm_out_file, AT_virtual);
+ ASM_OUTPUT_DWARF_STRING (asm_out_file, "");
+ }
+}
+
+/************************* end of attributes *****************************/
+
+/********************* utility routines for DIEs *************************/
+
+/* Output an AT_name attribute and an AT_src_coords attribute for the
+ given decl, but only if it actually has a name. */
+
+static void
+name_and_src_coords_attributes (decl)
+ register tree decl;
+{
+ register tree decl_name = DECL_NAME (decl);
+
+ if (decl_name && IDENTIFIER_POINTER (decl_name))
+ {
+ name_attribute (IDENTIFIER_POINTER (decl_name));
+#ifdef DWARF_DECL_COORDINATES
+ {
+ register unsigned file_index;
+
+ /* This is annoying, but we have to pop out of the .debug section
+ for a moment while we call `lookup_filename' because calling it
+ may cause a temporary switch into the .debug_sfnames section and
+ most svr4 assemblers are not smart enough be be able to nest
+ section switches to any depth greater than one. Note that we
+ also can't skirt this issue by delaying all output to the
+ .debug_sfnames section unit the end of compilation because that
+ would cause us to have inter-section forward references and
+ Fred Fish sez that m68k/svr4 assemblers botch those. */
+
+ ASM_OUTPUT_POP_SECTION (asm_out_file);
+ file_index = lookup_filename (DECL_SOURCE_FILE (decl));
+ ASM_OUTPUT_PUSH_SECTION (asm_out_file, DEBUG_SECTION);
+
+ src_coords_attribute (file_index, DECL_SOURCE_LINE (decl));
+ }
+#endif /* defined(DWARF_DECL_COORDINATES) */
+ }
+}
+
+/* Many forms of DIEs contain a "type description" part. The following
+ routine writes out these "type descriptor" parts. */
+
+static void
+type_attribute (type, decl_const, decl_volatile)
+ register tree type;
+ register int decl_const;
+ register int decl_volatile;
+{
+ register enum tree_code code = TREE_CODE (type);
+ register int root_type_modified;
+
+ if (TREE_CODE (type) == ERROR_MARK)
+ return;
+
+ /* Handle a special case. For functions whose return type is void,
+ we generate *no* type attribute. (Note that no object may have
+ type `void', so this only applies to function return types. */
+
+ if (TREE_CODE (type) == VOID_TYPE)
+ return;
+
+ root_type_modified = (code == POINTER_TYPE || code == REFERENCE_TYPE
+ || decl_const || decl_volatile
+ || TYPE_READONLY (type) || TYPE_VOLATILE (type));
+
+ if (type_is_fundamental (root_type (type)))
+ if (root_type_modified)
+ mod_fund_type_attribute (type, decl_const, decl_volatile);
+ else
+ fund_type_attribute (fundamental_type_code (type));
+ else
+ if (root_type_modified)
+ mod_u_d_type_attribute (type, decl_const, decl_volatile);
+ else
+ /* We have to get the type_main_variant here (and pass that to the
+ `user_def_type_attribute' routine) because the ..._TYPE node we
+ have might simply be a *copy* of some original type node (where
+ the copy was created to help us keep track of typedef names)
+ and that copy might have a different TYPE_UID from the original
+ ..._TYPE node. (Note that when `equate_type_number_to_die_number'
+ is labeling a given type DIE for future reference, it always and
+ only creates labels for DIEs representing *main variants*, and it
+ never even knows about non-main-variants.) */
+ user_def_type_attribute (type_main_variant (type));
+}
+
+/* Given a tree pointer to a struct, class, union, or enum type node, return
+ a pointer to the (string) tag name for the given type, or zero if the
+ type was declared without a tag. */
+
+static char *
+type_tag (type)
+ register tree type;
+{
+ register char *name = 0;
+
+ if (TYPE_NAME (type) != 0)
+ {
+ register tree t = 0;
+
+ /* Find the IDENTIFIER_NODE for the type name. */
+ if (TREE_CODE (TYPE_NAME (type)) == IDENTIFIER_NODE)
+ t = TYPE_NAME (type);
+#if 0
+ /* The g++ front end makes the TYPE_NAME of *each* tagged type point
+ to a TYPE_DECL node, regardless of whether or not a `typedef' was
+ involved. This is distinctly different from what the gcc front-end
+ does. It always makes the TYPE_NAME for each tagged type be either
+ NULL (signifying an anonymous tagged type) or else a pointer to an
+ IDENTIFIER_NODE. Obviously, we would like to generate correct Dwarf
+ for both C and C++, but given this inconsistency in the TREE
+ representation of tagged types for C and C++ in the GNU front-ends,
+ we cannot support both languages correctly unless we introduce some
+ front-end specific code here, and rms objects to that, so we can
+ only generate correct Dwarf for one of these two languages. C is
+ more important, so for now we'll do the right thing for C and let
+ g++ go fish. */
+
+ else
+ if (TREE_CODE (TYPE_NAME (type)) == TYPE_DECL)
+ t = DECL_NAME (TYPE_NAME (type));
+#endif
+ /* Now get the name as a string, or invent one. */
+ if (t != 0)
+ name = IDENTIFIER_POINTER (t);
+ }
+
+ return (name == 0 || *name == '\0') ? 0 : name;
+}
+
+inline void
+dienum_push ()
+{
+ /* Start by checking if the pending_sibling_stack needs to be expanded.
+ If necessary, expand it. */
+
+ if (pending_siblings == pending_siblings_allocated)
+ {
+ pending_siblings_allocated += PENDING_SIBLINGS_INCREMENT;
+ pending_sibling_stack
+ = (unsigned *) xrealloc (pending_sibling_stack,
+ pending_siblings_allocated * sizeof(unsigned));
+ }
+
+ pending_siblings++;
+ NEXT_DIE_NUM = next_unused_dienum++;
+}
+
+/* Pop the sibling stack so that the most recently pushed DIEnum becomes the
+ NEXT_DIE_NUM. */
+
+inline void
+dienum_pop ()
+{
+ pending_siblings--;
+}
+
+inline tree
+member_declared_type (member)
+ register tree member;
+{
+ return (DECL_BIT_FIELD_TYPE (member))
+ ? DECL_BIT_FIELD_TYPE (member)
+ : TREE_TYPE (member);
+}
+
+/* Get the function's label, as described by its RTL.
+ This may be different from the DECL_NAME name used
+ in the source file. */
+
+static char *
+function_start_label (decl)
+ register tree decl;
+{
+ rtx x;
+ char *fnname;
+
+ x = DECL_RTL (decl);
+ if (GET_CODE (x) != MEM)
+ abort ();
+ x = XEXP (x, 0);
+ if (GET_CODE (x) != SYMBOL_REF)
+ abort ();
+ fnname = XSTR (x, 0);
+ return fnname;
+}
+
+
+/******************************* DIEs ************************************/
+
+/* Output routines for individual types of DIEs. */
+
+/* Note that every type of DIE (except a null DIE) gets a sibling. */
+
+static void
+output_array_type_die (arg)
+ register void *arg;
+{
+ register tree type = arg;
+
+ ASM_OUTPUT_DWARF_TAG (asm_out_file, TAG_array_type);
+ sibling_attribute ();
+ equate_type_number_to_die_number (type);
+ member_attribute (TYPE_CONTEXT (type));
+
+ /* I believe that we can default the array ordering. SDB will probably
+ do the right things even if AT_ordering is not present. It's not
+ even an issue until we start to get into multidimensional arrays
+ anyway. If SDB is ever caught doing the Wrong Thing for multi-
+ dimensional arrays, then we'll have to put the AT_ordering attribute
+ back in. (But if and when we find out that we need to put these in,
+ we will only do so for multidimensional arrays. After all, we don't
+ want to waste space in the .debug section now do we?) */
+
+#ifdef USE_ORDERING_ATTRIBUTE
+ ordering_attribute (ORD_row_major);
+#endif /* defined(USE_ORDERING_ATTRIBUTE) */
+
+ subscript_data_attribute (type);
+}
+
+static void
+output_set_type_die (arg)
+ register void *arg;
+{
+ register tree type = arg;
+
+ ASM_OUTPUT_DWARF_TAG (asm_out_file, TAG_set_type);
+ sibling_attribute ();
+ equate_type_number_to_die_number (type);
+ member_attribute (TYPE_CONTEXT (type));
+ type_attribute (TREE_TYPE (type), 0, 0);
+}
+
+#if 0
+/* Implement this when there is a GNU FORTRAN or GNU Ada front end. */
+static void
+output_entry_point_die (arg)
+ register void *arg;
+{
+ register tree decl = arg;
+ register tree origin = decl_ultimate_origin (decl);
+
+ ASM_OUTPUT_DWARF_TAG (asm_out_file, TAG_entry_point);
+ sibling_attribute ();
+ dienum_push ();
+ if (origin != NULL)
+ abstract_origin_attribute (origin);
+ else
+ {
+ name_and_src_coords_attributes (decl);
+ member_attribute (DECL_CONTEXT (decl));
+ type_attribute (TREE_TYPE (TREE_TYPE (decl)), 0, 0);
+ }
+ if (DECL_ABSTRACT (decl))
+ equate_decl_number_to_die_number (decl);
+ else
+ low_pc_attribute (function_start_label (decl));
+}
+#endif
+
+/* Output a DIE to represent an inlined instance of an enumeration type. */
+
+static void
+output_inlined_enumeration_type_die (arg)
+ register void *arg;
+{
+ register tree type = arg;
+
+ ASM_OUTPUT_DWARF_TAG (asm_out_file, TAG_enumeration_type);
+ sibling_attribute ();
+ assert (TREE_ASM_WRITTEN (type));
+ abstract_origin_attribute (type);
+}
+
+/* Output a DIE to represent an inlined instance of a structure type. */
+
+static void
+output_inlined_structure_type_die (arg)
+ register void *arg;
+{
+ register tree type = arg;
+
+ ASM_OUTPUT_DWARF_TAG (asm_out_file, TAG_structure_type);
+ sibling_attribute ();
+ assert (TREE_ASM_WRITTEN (type));
+ abstract_origin_attribute (type);
+}
+
+/* Output a DIE to represent an inlined instance of a union type. */
+
+static void
+output_inlined_union_type_die (arg)
+ register void *arg;
+{
+ register tree type = arg;
+
+ ASM_OUTPUT_DWARF_TAG (asm_out_file, TAG_union_type);
+ sibling_attribute ();
+ assert (TREE_ASM_WRITTEN (type));
+ abstract_origin_attribute (type);
+}
+
+/* Output a DIE to represent an enumeration type. Note that these DIEs
+ include all of the information about the enumeration values also.
+ This information is encoded into the element_list attribute. */
+
+static void
+output_enumeration_type_die (arg)
+ register void *arg;
+{
+ register tree type = arg;
+
+ ASM_OUTPUT_DWARF_TAG (asm_out_file, TAG_enumeration_type);
+ sibling_attribute ();
+ equate_type_number_to_die_number (type);
+ name_attribute (type_tag (type));
+ member_attribute (TYPE_CONTEXT (type));
+
+ /* Handle a GNU C/C++ extension, i.e. incomplete enum types. If the
+ given enum type is incomplete, do not generate the AT_byte_size
+ attribute or the AT_element_list attribute. */
+
+ if (TYPE_SIZE (type))
+ {
+ byte_size_attribute (type);
+ element_list_attribute (TYPE_FIELDS (type));
+ }
+}
+
+/* Output a DIE to represent either a real live formal parameter decl or
+ to represent just the type of some formal parameter position in some
+ function type.
+
+ Note that this routine is a bit unusual because its argument may be
+ a ..._DECL node (i.e. either a PARM_DECL or perhaps a VAR_DECL which
+ represents an inlining of some PARM_DECL) or else some sort of a
+ ..._TYPE node. If it's the former then this function is being called
+ to output a DIE to represent a formal parameter object (or some inlining
+ thereof). If it's the latter, then this function is only being called
+ to output a TAG_formal_parameter DIE to stand as a placeholder for some
+ formal argument type of some subprogram type. */
+
+static void
+output_formal_parameter_die (arg)
+ register void *arg;
+{
+ register tree node = arg;
+
+ ASM_OUTPUT_DWARF_TAG (asm_out_file, TAG_formal_parameter);
+ sibling_attribute ();
+
+ switch (TREE_CODE_CLASS (TREE_CODE (node)))
+ {
+ case 'd': /* We were called with some kind of a ..._DECL node. */
+ {
+ register tree origin = decl_ultimate_origin (node);
+
+ if (origin != NULL)
+ abstract_origin_attribute (origin);
+ else
+ {
+ name_and_src_coords_attributes (node);
+ type_attribute (TREE_TYPE (node),
+ TREE_READONLY (node), TREE_THIS_VOLATILE (node));
+ }
+ if (DECL_ABSTRACT (node))
+ equate_decl_number_to_die_number (node);
+ else
+ location_or_const_value_attribute (node);
+ }
+ break;
+
+ case 't': /* We were called with some kind of a ..._TYPE node. */
+ type_attribute (node, 0, 0);
+ break;
+
+ default:
+ abort (); /* Should never happen. */
+ }
+}
+
+/* Output a DIE to represent a declared function (either file-scope
+ or block-local) which has "external linkage" (according to ANSI-C). */
+
+static void
+output_global_subroutine_die (arg)
+ register void *arg;
+{
+ register tree decl = arg;
+ register tree origin = decl_ultimate_origin (decl);
+
+ ASM_OUTPUT_DWARF_TAG (asm_out_file, TAG_global_subroutine);
+ sibling_attribute ();
+ dienum_push ();
+ if (origin != NULL)
+ abstract_origin_attribute (origin);
+ else
+ {
+ register tree type = TREE_TYPE (decl);
+
+ name_and_src_coords_attributes (decl);
+ inline_attribute (decl);
+ prototyped_attribute (type);
+ member_attribute (DECL_CONTEXT (decl));
+ type_attribute (TREE_TYPE (type), 0, 0);
+ pure_or_virtual_attribute (decl);
+ }
+ if (DECL_ABSTRACT (decl))
+ equate_decl_number_to_die_number (decl);
+ else
+ {
+ if (! DECL_EXTERNAL (decl))
+ {
+ char label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ low_pc_attribute (function_start_label (decl));
+ sprintf (label, FUNC_END_LABEL_FMT, current_funcdef_number);
+ high_pc_attribute (label);
+ sprintf (label, BODY_BEGIN_LABEL_FMT, current_funcdef_number);
+ body_begin_attribute (label);
+ sprintf (label, BODY_END_LABEL_FMT, current_funcdef_number);
+ body_end_attribute (label);
+ }
+ }
+}
+
+/* Output a DIE to represent a declared data object (either file-scope
+ or block-local) which has "external linkage" (according to ANSI-C). */
+
+static void
+output_global_variable_die (arg)
+ register void *arg;
+{
+ register tree decl = arg;
+ register tree origin = decl_ultimate_origin (decl);
+
+ ASM_OUTPUT_DWARF_TAG (asm_out_file, TAG_global_variable);
+ sibling_attribute ();
+ if (origin != NULL)
+ abstract_origin_attribute (origin);
+ else
+ {
+ name_and_src_coords_attributes (decl);
+ member_attribute (DECL_CONTEXT (decl));
+ type_attribute (TREE_TYPE (decl),
+ TREE_READONLY (decl), TREE_THIS_VOLATILE (decl));
+ }
+ if (DECL_ABSTRACT (decl))
+ equate_decl_number_to_die_number (decl);
+ else
+ {
+ if (!DECL_EXTERNAL (decl))
+ location_or_const_value_attribute (decl);
+ }
+}
+
+static void
+output_label_die (arg)
+ register void *arg;
+{
+ register tree decl = arg;
+ register tree origin = decl_ultimate_origin (decl);
+
+ ASM_OUTPUT_DWARF_TAG (asm_out_file, TAG_label);
+ sibling_attribute ();
+ if (origin != NULL)
+ abstract_origin_attribute (origin);
+ else
+ name_and_src_coords_attributes (decl);
+ if (DECL_ABSTRACT (decl))
+ equate_decl_number_to_die_number (decl);
+ else
+ {
+ register rtx insn = DECL_RTL (decl);
+
+ if (GET_CODE (insn) == CODE_LABEL)
+ {
+ char label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ /* When optimization is enabled (via -O) some parts of the compiler
+ (e.g. jump.c and cse.c) may try to delete CODE_LABEL insns which
+ represent source-level labels which were explicitly declared by
+ the user. This really shouldn't be happening though, so catch
+ it if it ever does happen. */
+
+ if (INSN_DELETED_P (insn))
+ abort (); /* Should never happen. */
+
+ sprintf (label, INSN_LABEL_FMT, current_funcdef_number,
+ (unsigned) INSN_UID (insn));
+ low_pc_attribute (label);
+ }
+ }
+}
+
+static void
+output_lexical_block_die (arg)
+ register void *arg;
+{
+ register tree stmt = arg;
+
+ ASM_OUTPUT_DWARF_TAG (asm_out_file, TAG_lexical_block);
+ sibling_attribute ();
+ dienum_push ();
+ if (! BLOCK_ABSTRACT (stmt))
+ {
+ char begin_label[MAX_ARTIFICIAL_LABEL_BYTES];
+ char end_label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ sprintf (begin_label, BLOCK_BEGIN_LABEL_FMT, next_block_number);
+ low_pc_attribute (begin_label);
+ sprintf (end_label, BLOCK_END_LABEL_FMT, next_block_number);
+ high_pc_attribute (end_label);
+ }
+}
+
+static void
+output_inlined_subroutine_die (arg)
+ register void *arg;
+{
+ register tree stmt = arg;
+
+ ASM_OUTPUT_DWARF_TAG (asm_out_file, TAG_inlined_subroutine);
+ sibling_attribute ();
+ dienum_push ();
+ abstract_origin_attribute (block_ultimate_origin (stmt));
+ if (! BLOCK_ABSTRACT (stmt))
+ {
+ char begin_label[MAX_ARTIFICIAL_LABEL_BYTES];
+ char end_label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ sprintf (begin_label, BLOCK_BEGIN_LABEL_FMT, next_block_number);
+ low_pc_attribute (begin_label);
+ sprintf (end_label, BLOCK_END_LABEL_FMT, next_block_number);
+ high_pc_attribute (end_label);
+ }
+}
+
+/* Output a DIE to represent a declared data object (either file-scope
+ or block-local) which has "internal linkage" (according to ANSI-C). */
+
+static void
+output_local_variable_die (arg)
+ register void *arg;
+{
+ register tree decl = arg;
+ register tree origin = decl_ultimate_origin (decl);
+
+ ASM_OUTPUT_DWARF_TAG (asm_out_file, TAG_local_variable);
+ sibling_attribute ();
+ if (origin != NULL)
+ abstract_origin_attribute (origin);
+ else
+ {
+ name_and_src_coords_attributes (decl);
+ member_attribute (DECL_CONTEXT (decl));
+ type_attribute (TREE_TYPE (decl),
+ TREE_READONLY (decl), TREE_THIS_VOLATILE (decl));
+ }
+ if (DECL_ABSTRACT (decl))
+ equate_decl_number_to_die_number (decl);
+ else
+ location_or_const_value_attribute (decl);
+}
+
+static void
+output_member_die (arg)
+ register void *arg;
+{
+ register tree decl = arg;
+
+ ASM_OUTPUT_DWARF_TAG (asm_out_file, TAG_member);
+ sibling_attribute ();
+ name_and_src_coords_attributes (decl);
+ member_attribute (DECL_CONTEXT (decl));
+ type_attribute (member_declared_type (decl),
+ TREE_READONLY (decl), TREE_THIS_VOLATILE (decl));
+ if (DECL_BIT_FIELD_TYPE (decl)) /* If this is a bit field... */
+ {
+ byte_size_attribute (decl);
+ bit_size_attribute (decl);
+ bit_offset_attribute (decl);
+ }
+ data_member_location_attribute (decl);
+}
+
+#if 0
+/* Don't generate either pointer_type DIEs or reference_type DIEs. Use
+ modified types instead.
+
+ We keep this code here just in case these types of DIEs may be needed
+ to represent certain things in other languages (e.g. Pascal) someday.
+*/
+
+static void
+output_pointer_type_die (arg)
+ register void *arg;
+{
+ register tree type = arg;
+
+ ASM_OUTPUT_DWARF_TAG (asm_out_file, TAG_pointer_type);
+ sibling_attribute ();
+ equate_type_number_to_die_number (type);
+ member_attribute (TYPE_CONTEXT (type));
+ type_attribute (TREE_TYPE (type), 0, 0);
+}
+
+static void
+output_reference_type_die (arg)
+ register void *arg;
+{
+ register tree type = arg;
+
+ ASM_OUTPUT_DWARF_TAG (asm_out_file, TAG_reference_type);
+ sibling_attribute ();
+ equate_type_number_to_die_number (type);
+ member_attribute (TYPE_CONTEXT (type));
+ type_attribute (TREE_TYPE (type), 0, 0);
+}
+#endif
+
+static void
+output_ptr_to_mbr_type_die (arg)
+ register void *arg;
+{
+ register tree type = arg;
+
+ ASM_OUTPUT_DWARF_TAG (asm_out_file, TAG_ptr_to_member_type);
+ sibling_attribute ();
+ equate_type_number_to_die_number (type);
+ member_attribute (TYPE_CONTEXT (type));
+ containing_type_attribute (TYPE_OFFSET_BASETYPE (type));
+ type_attribute (TREE_TYPE (type), 0, 0);
+}
+
+static void
+output_compile_unit_die (arg)
+ register void *arg;
+{
+ register char *main_input_filename = arg;
+
+ ASM_OUTPUT_DWARF_TAG (asm_out_file, TAG_compile_unit);
+ sibling_attribute ();
+ dienum_push ();
+ name_attribute (main_input_filename);
+
+ {
+ char producer[250];
+
+ sprintf (producer, "%s %s", language_string, version_string);
+ producer_attribute (producer);
+ }
+
+ if (strcmp (language_string, "GNU C++") == 0)
+ language_attribute (LANG_C_PLUS_PLUS);
+ else if (strcmp (language_string, "GNU Ada") == 0)
+ language_attribute (LANG_ADA83);
+ else if (flag_traditional)
+ language_attribute (LANG_C);
+ else
+ language_attribute (LANG_C89);
+ low_pc_attribute (TEXT_BEGIN_LABEL);
+ high_pc_attribute (TEXT_END_LABEL);
+ if (debug_info_level >= DINFO_LEVEL_NORMAL)
+ stmt_list_attribute (LINE_BEGIN_LABEL);
+ last_filename = xstrdup (main_input_filename);
+
+ {
+ char *wd = getpwd ();
+ if (wd)
+ comp_dir_attribute (wd);
+ }
+
+ if (debug_info_level >= DINFO_LEVEL_NORMAL)
+ {
+ sf_names_attribute (SFNAMES_BEGIN_LABEL);
+ src_info_attribute (SRCINFO_BEGIN_LABEL);
+ if (debug_info_level >= DINFO_LEVEL_VERBOSE)
+ mac_info_attribute (MACINFO_BEGIN_LABEL);
+ }
+}
+
+static void
+output_string_type_die (arg)
+ register void *arg;
+{
+ register tree type = arg;
+
+ ASM_OUTPUT_DWARF_TAG (asm_out_file, TAG_string_type);
+ sibling_attribute ();
+ member_attribute (TYPE_CONTEXT (type));
+
+ /* Fudge the string length attribute for now. */
+
+ string_length_attribute (TYPE_MAX_VALUE (TYPE_DOMAIN (type)));
+}
+
+static void
+output_structure_type_die (arg)
+ register void *arg;
+{
+ register tree type = arg;
+
+ ASM_OUTPUT_DWARF_TAG (asm_out_file, TAG_structure_type);
+ sibling_attribute ();
+ equate_type_number_to_die_number (type);
+ name_attribute (type_tag (type));
+ member_attribute (TYPE_CONTEXT (type));
+
+ /* If this type has been completed, then give it a byte_size attribute
+ and prepare to give a list of members. Otherwise, don't do either of
+ these things. In the latter case, we will not be generating a list
+ of members (since we don't have any idea what they might be for an
+ incomplete type). */
+
+ if (TYPE_SIZE (type))
+ {
+ dienum_push ();
+ byte_size_attribute (type);
+ }
+}
+
+/* Output a DIE to represent a declared function (either file-scope
+ or block-local) which has "internal linkage" (according to ANSI-C). */
+
+static void
+output_local_subroutine_die (arg)
+ register void *arg;
+{
+ register tree decl = arg;
+ register tree origin = decl_ultimate_origin (decl);
+
+ ASM_OUTPUT_DWARF_TAG (asm_out_file, TAG_subroutine);
+ sibling_attribute ();
+ dienum_push ();
+ if (origin != NULL)
+ abstract_origin_attribute (origin);
+ else
+ {
+ register tree type = TREE_TYPE (decl);
+
+ name_and_src_coords_attributes (decl);
+ inline_attribute (decl);
+ prototyped_attribute (type);
+ member_attribute (DECL_CONTEXT (decl));
+ type_attribute (TREE_TYPE (type), 0, 0);
+ pure_or_virtual_attribute (decl);
+ }
+ if (DECL_ABSTRACT (decl))
+ equate_decl_number_to_die_number (decl);
+ else
+ {
+ /* Avoid getting screwed up in cases where a function was declared
+ static but where no definition was ever given for it. */
+
+ if (TREE_ASM_WRITTEN (decl))
+ {
+ char label[MAX_ARTIFICIAL_LABEL_BYTES];
+ low_pc_attribute (function_start_label (decl));
+ sprintf (label, FUNC_END_LABEL_FMT, current_funcdef_number);
+ high_pc_attribute (label);
+ sprintf (label, BODY_BEGIN_LABEL_FMT, current_funcdef_number);
+ body_begin_attribute (label);
+ sprintf (label, BODY_END_LABEL_FMT, current_funcdef_number);
+ body_end_attribute (label);
+ }
+ }
+}
+
+static void
+output_subroutine_type_die (arg)
+ register void *arg;
+{
+ register tree type = arg;
+ register tree return_type = TREE_TYPE (type);
+
+ ASM_OUTPUT_DWARF_TAG (asm_out_file, TAG_subroutine_type);
+ sibling_attribute ();
+ dienum_push ();
+ equate_type_number_to_die_number (type);
+ prototyped_attribute (type);
+ member_attribute (TYPE_CONTEXT (type));
+ type_attribute (return_type, 0, 0);
+}
+
+static void
+output_typedef_die (arg)
+ register void *arg;
+{
+ register tree decl = arg;
+ register tree origin = decl_ultimate_origin (decl);
+
+ ASM_OUTPUT_DWARF_TAG (asm_out_file, TAG_typedef);
+ sibling_attribute ();
+ if (origin != NULL)
+ abstract_origin_attribute (origin);
+ else
+ {
+ name_and_src_coords_attributes (decl);
+ member_attribute (DECL_CONTEXT (decl));
+ type_attribute (TREE_TYPE (decl),
+ TREE_READONLY (decl), TREE_THIS_VOLATILE (decl));
+ }
+ if (DECL_ABSTRACT (decl))
+ equate_decl_number_to_die_number (decl);
+}
+
+static void
+output_union_type_die (arg)
+ register void *arg;
+{
+ register tree type = arg;
+
+ ASM_OUTPUT_DWARF_TAG (asm_out_file, TAG_union_type);
+ sibling_attribute ();
+ equate_type_number_to_die_number (type);
+ name_attribute (type_tag (type));
+ member_attribute (TYPE_CONTEXT (type));
+
+ /* If this type has been completed, then give it a byte_size attribute
+ and prepare to give a list of members. Otherwise, don't do either of
+ these things. In the latter case, we will not be generating a list
+ of members (since we don't have any idea what they might be for an
+ incomplete type). */
+
+ if (TYPE_SIZE (type))
+ {
+ dienum_push ();
+ byte_size_attribute (type);
+ }
+}
+
+/* Generate a special type of DIE used as a stand-in for a trailing ellipsis
+ at the end of an (ANSI prototyped) formal parameters list. */
+
+static void
+output_unspecified_parameters_die (arg)
+ register void *arg;
+{
+ register tree decl_or_type = arg;
+
+ ASM_OUTPUT_DWARF_TAG (asm_out_file, TAG_unspecified_parameters);
+ sibling_attribute ();
+
+ /* This kludge is here only for the sake of being compatible with what
+ the USL CI5 C compiler does. The specification of Dwarf Version 1
+ doesn't say that TAG_unspecified_parameters DIEs should contain any
+ attributes other than the AT_sibling attribute, but they are certainly
+ allowed to contain additional attributes, and the CI5 compiler
+ generates AT_name, AT_fund_type, and AT_location attributes within
+ TAG_unspecified_parameters DIEs which appear in the child lists for
+ DIEs representing function definitions, so we do likewise here. */
+
+ if (TREE_CODE (decl_or_type) == FUNCTION_DECL && DECL_INITIAL (decl_or_type))
+ {
+ name_attribute ("...");
+ fund_type_attribute (FT_pointer);
+ /* location_attribute (?); */
+ }
+}
+
+static void
+output_padded_null_die (arg)
+ register void *arg;
+{
+ ASM_OUTPUT_ALIGN (asm_out_file, 2); /* 2**2 == 4 */
+}
+
+/*************************** end of DIEs *********************************/
+
+/* Generate some type of DIE. This routine generates the generic outer
+ wrapper stuff which goes around all types of DIE's (regardless of their
+ TAGs. All forms of DIEs start with a DIE-specific label, followed by a
+ DIE-length word, followed by the guts of the DIE itself. After the guts
+ of the DIE, there must always be a terminator label for the DIE. */
+
+static void
+output_die (die_specific_output_function, param)
+ register void (*die_specific_output_function)();
+ register void *param;
+{
+ char begin_label[MAX_ARTIFICIAL_LABEL_BYTES];
+ char end_label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ current_dienum = NEXT_DIE_NUM;
+ NEXT_DIE_NUM = next_unused_dienum;
+
+ sprintf (begin_label, DIE_BEGIN_LABEL_FMT, current_dienum);
+ sprintf (end_label, DIE_END_LABEL_FMT, current_dienum);
+
+ /* Write a label which will act as the name for the start of this DIE. */
+
+ ASM_OUTPUT_LABEL (asm_out_file, begin_label);
+
+ /* Write the DIE-length word. */
+
+ ASM_OUTPUT_DWARF_DELTA4 (asm_out_file, end_label, begin_label);
+
+ /* Fill in the guts of the DIE. */
+
+ next_unused_dienum++;
+ die_specific_output_function (param);
+
+ /* Write a label which will act as the name for the end of this DIE. */
+
+ ASM_OUTPUT_LABEL (asm_out_file, end_label);
+}
+
+static void
+end_sibling_chain ()
+{
+ char begin_label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ current_dienum = NEXT_DIE_NUM;
+ NEXT_DIE_NUM = next_unused_dienum;
+
+ sprintf (begin_label, DIE_BEGIN_LABEL_FMT, current_dienum);
+
+ /* Write a label which will act as the name for the start of this DIE. */
+
+ ASM_OUTPUT_LABEL (asm_out_file, begin_label);
+
+ /* Write the DIE-length word. */
+
+ ASM_OUTPUT_DWARF_DATA4 (asm_out_file, 4);
+
+ dienum_pop ();
+}
+
+/* Generate a list of nameless TAG_formal_parameter DIEs (and perhaps a
+ TAG_unspecified_parameters DIE) to represent the types of the formal
+ parameters as specified in some function type specification (except
+ for those which appear as part of a function *definition*).
+
+ Note that we must be careful here to output all of the parameter DIEs
+ *before* we output any DIEs needed to represent the types of the formal
+ parameters. This keeps svr4 SDB happy because it (incorrectly) thinks
+ that the first non-parameter DIE it sees ends the formal parameter list.
+*/
+
+static void
+output_formal_types (function_or_method_type)
+ register tree function_or_method_type;
+{
+ register tree link;
+ register tree formal_type = NULL;
+ register tree first_parm_type = TYPE_ARG_TYPES (function_or_method_type);
+
+ /* In the case where we are generating a formal types list for a C++
+ non-static member function type, skip over the first thing on the
+ TYPE_ARG_TYPES list because it only represents the type of the
+ hidden `this pointer'. The debugger should be able to figure
+ out (without being explicitly told) that this non-static member
+ function type takes a `this pointer' and should be able to figure
+ what the type of that hidden parameter is from the AT_member
+ attribute of the parent TAG_subroutine_type DIE. */
+
+ if (TREE_CODE (function_or_method_type) == METHOD_TYPE)
+ first_parm_type = TREE_CHAIN (first_parm_type);
+
+ /* Make our first pass over the list of formal parameter types and output
+ a TAG_formal_parameter DIE for each one. */
+
+ for (link = first_parm_type; link; link = TREE_CHAIN (link))
+ {
+ formal_type = TREE_VALUE (link);
+ if (formal_type == void_type_node)
+ break;
+
+ /* Output a (nameless) DIE to represent the formal parameter itself. */
+
+ output_die (output_formal_parameter_die, formal_type);
+ }
+
+ /* If this function type has an ellipsis, add a TAG_unspecified_parameters
+ DIE to the end of the parameter list. */
+
+ if (formal_type != void_type_node)
+ output_die (output_unspecified_parameters_die, function_or_method_type);
+
+ /* Make our second (and final) pass over the list of formal parameter types
+ and output DIEs to represent those types (as necessary). */
+
+ for (link = TYPE_ARG_TYPES (function_or_method_type);
+ link;
+ link = TREE_CHAIN (link))
+ {
+ formal_type = TREE_VALUE (link);
+ if (formal_type == void_type_node)
+ break;
+
+ output_type (formal_type, function_or_method_type);
+ }
+}
+
+/* Remember a type in the pending_types_list. */
+
+static void
+pend_type (type)
+ register tree type;
+{
+ if (pending_types == pending_types_allocated)
+ {
+ pending_types_allocated += PENDING_TYPES_INCREMENT;
+ pending_types_list
+ = (tree *) xrealloc (pending_types_list,
+ sizeof (tree) * pending_types_allocated);
+ }
+ pending_types_list[pending_types++] = type;
+
+ /* Mark the pending type as having been output already (even though
+ it hasn't been). This prevents the type from being added to the
+ pending_types_list more than once. */
+
+ TREE_ASM_WRITTEN (type) = 1;
+}
+
+/* Return non-zero if it is legitimate to output DIEs to represent a
+ given type while we are generating the list of child DIEs for some
+ DIE (e.g. a function or lexical block DIE) associated with a given scope.
+
+ See the comments within the function for a description of when it is
+ considered legitimate to output DIEs for various kinds of types.
+
+ Note that TYPE_CONTEXT(type) may be NULL (to indicate global scope)
+ or it may point to a BLOCK node (for types local to a block), or to a
+ FUNCTION_DECL node (for types local to the heading of some function
+ definition), or to a FUNCTION_TYPE node (for types local to the
+ prototyped parameter list of a function type specification), or to a
+ RECORD_TYPE, UNION_TYPE, or QUAL_UNION_TYPE node
+ (in the case of C++ nested types).
+
+ The `scope' parameter should likewise be NULL or should point to a
+ BLOCK node, a FUNCTION_DECL node, a FUNCTION_TYPE node, a RECORD_TYPE
+ node, a UNION_TYPE node, or a QUAL_UNION_TYPE node.
+
+ This function is used only for deciding when to "pend" and when to
+ "un-pend" types to/from the pending_types_list.
+
+ Note that we sometimes make use of this "type pending" feature in a
+ rather twisted way to temporarily delay the production of DIEs for the
+ types of formal parameters. (We do this just to make svr4 SDB happy.)
+ It order to delay the production of DIEs representing types of formal
+ parameters, callers of this function supply `fake_containing_scope' as
+ the `scope' parameter to this function. Given that fake_containing_scope
+ is a tagged type which is *not* the containing scope for *any* other type,
+ the desired effect is achieved, i.e. output of DIEs representing types
+ is temporarily suspended, and any type DIEs which would have otherwise
+ been output are instead placed onto the pending_types_list. Later on,
+ we force these (temporarily pended) types to be output simply by calling
+ `output_pending_types_for_scope' with an actual argument equal to the
+ true scope of the types we temporarily pended.
+*/
+
+inline int
+type_ok_for_scope (type, scope)
+ register tree type;
+ register tree scope;
+{
+ /* Tagged types (i.e. struct, union, and enum types) must always be
+ output only in the scopes where they actually belong (or else the
+ scoping of their own tag names and the scoping of their member
+ names will be incorrect). Non-tagged-types on the other hand can
+ generally be output anywhere, except that svr4 SDB really doesn't
+ want to see them nested within struct or union types, so here we
+ say it is always OK to immediately output any such a (non-tagged)
+ type, so long as we are not within such a context. Note that the
+ only kinds of non-tagged types which we will be dealing with here
+ (for C and C++ anyway) will be array types and function types. */
+
+ return is_tagged_type (type)
+ ? (TYPE_CONTEXT (type) == scope)
+ : (scope == NULL_TREE || ! is_tagged_type (scope));
+}
+
+/* Output any pending types (from the pending_types list) which we can output
+ now (taking into account the scope that we are working on now).
+
+ For each type output, remove the given type from the pending_types_list
+ *before* we try to output it.
+
+ Note that we have to process the list in beginning-to-end order,
+ because the call made here to output_type may cause yet more types
+ to be added to the end of the list, and we may have to output some
+ of them too.
+*/
+
+static void
+output_pending_types_for_scope (containing_scope)
+ register tree containing_scope;
+{
+ register unsigned i;
+
+ for (i = 0; i < pending_types; )
+ {
+ register tree type = pending_types_list[i];
+
+ if (type_ok_for_scope (type, containing_scope))
+ {
+ register tree *mover;
+ register tree *limit;
+
+ pending_types--;
+ limit = &pending_types_list[pending_types];
+ for (mover = &pending_types_list[i]; mover < limit; mover++)
+ *mover = *(mover+1);
+
+ /* Un-mark the type as having been output already (because it
+ hasn't been, really). Then call output_type to generate a
+ Dwarf representation of it. */
+
+ TREE_ASM_WRITTEN (type) = 0;
+ output_type (type, containing_scope);
+
+ /* Don't increment the loop counter in this case because we
+ have shifted all of the subsequent pending types down one
+ element in the pending_types_list array. */
+ }
+ else
+ i++;
+ }
+}
+
+static void
+output_type (type, containing_scope)
+ register tree type;
+ register tree containing_scope;
+{
+ if (type == 0 || type == error_mark_node)
+ return;
+
+ /* We are going to output a DIE to represent the unqualified version of
+ of this type (i.e. without any const or volatile qualifiers) so get
+ the main variant (i.e. the unqualified version) of this type now. */
+
+ type = type_main_variant (type);
+
+ if (TREE_ASM_WRITTEN (type))
+ return;
+
+ /* Don't generate any DIEs for this type now unless it is OK to do so
+ (based upon what `type_ok_for_scope' tells us). */
+
+ if (! type_ok_for_scope (type, containing_scope))
+ {
+ pend_type (type);
+ return;
+ }
+
+ switch (TREE_CODE (type))
+ {
+ case ERROR_MARK:
+ break;
+
+ case POINTER_TYPE:
+ case REFERENCE_TYPE:
+ /* For these types, all that is required is that we output a DIE
+ (or a set of DIEs) to represent the "basis" type. */
+ output_type (TREE_TYPE (type), containing_scope);
+ break;
+
+ case OFFSET_TYPE:
+ /* This code is used for C++ pointer-to-data-member types. */
+ /* Output a description of the relevant class type. */
+ output_type (TYPE_OFFSET_BASETYPE (type), containing_scope);
+ /* Output a description of the type of the object pointed to. */
+ output_type (TREE_TYPE (type), containing_scope);
+ /* Now output a DIE to represent this pointer-to-data-member type
+ itself. */
+ output_die (output_ptr_to_mbr_type_die, type);
+ break;
+
+ case SET_TYPE:
+ output_type (TYPE_DOMAIN (type), containing_scope);
+ output_die (output_set_type_die, type);
+ break;
+
+ case FILE_TYPE:
+ output_type (TREE_TYPE (type), containing_scope);
+ abort (); /* No way to represent these in Dwarf yet! */
+ break;
+
+ case FUNCTION_TYPE:
+ /* Force out return type (in case it wasn't forced out already). */
+ output_type (TREE_TYPE (type), containing_scope);
+ output_die (output_subroutine_type_die, type);
+ output_formal_types (type);
+ end_sibling_chain ();
+ break;
+
+ case METHOD_TYPE:
+ /* Force out return type (in case it wasn't forced out already). */
+ output_type (TREE_TYPE (type), containing_scope);
+ output_die (output_subroutine_type_die, type);
+ output_formal_types (type);
+ end_sibling_chain ();
+ break;
+
+ case ARRAY_TYPE:
+ if (TYPE_STRING_FLAG (type) && TREE_CODE(TREE_TYPE(type)) == CHAR_TYPE)
+ {
+ output_type (TREE_TYPE (type), containing_scope);
+ output_die (output_string_type_die, type);
+ }
+ else
+ {
+ register tree element_type;
+
+ element_type = TREE_TYPE (type);
+ while (TREE_CODE (element_type) == ARRAY_TYPE)
+ element_type = TREE_TYPE (element_type);
+
+ output_type (element_type, containing_scope);
+ output_die (output_array_type_die, type);
+ }
+ break;
+
+ case ENUMERAL_TYPE:
+ case RECORD_TYPE:
+ case UNION_TYPE:
+ case QUAL_UNION_TYPE:
+
+ /* For a non-file-scope tagged type, we can always go ahead and
+ output a Dwarf description of this type right now, even if
+ the type in question is still incomplete, because if this
+ local type *was* ever completed anywhere within its scope,
+ that complete definition would already have been attached to
+ this RECORD_TYPE, UNION_TYPE, QUAL_UNION_TYPE or ENUMERAL_TYPE
+ node by the time we reach this point. That's true because of the
+ way the front-end does its processing of file-scope declarations (of
+ functions and class types) within which other types might be
+ nested. The C and C++ front-ends always gobble up such "local
+ scope" things en-mass before they try to output *any* debugging
+ information for any of the stuff contained inside them and thus,
+ we get the benefit here of what is (in effect) a pre-resolution
+ of forward references to tagged types in local scopes.
+
+ Note however that for file-scope tagged types we cannot assume
+ that such pre-resolution of forward references has taken place.
+ A given file-scope tagged type may appear to be incomplete when
+ we reach this point, but it may yet be given a full definition
+ (at file-scope) later on during compilation. In order to avoid
+ generating a premature (and possibly incorrect) set of Dwarf
+ DIEs for such (as yet incomplete) file-scope tagged types, we
+ generate nothing at all for as-yet incomplete file-scope tagged
+ types here unless we are making our special "finalization" pass
+ for file-scope things at the very end of compilation. At that
+ time, we will certainly know as much about each file-scope tagged
+ type as we are ever going to know, so at that point in time, we
+ can safely generate correct Dwarf descriptions for these file-
+ scope tagged types.
+ */
+
+ if (TYPE_SIZE (type) == 0 && TYPE_CONTEXT (type) == NULL && !finalizing)
+ return; /* EARLY EXIT! Avoid setting TREE_ASM_WRITTEN. */
+
+ /* Prevent infinite recursion in cases where the type of some
+ member of this type is expressed in terms of this type itself. */
+
+ TREE_ASM_WRITTEN (type) = 1;
+
+ /* Output a DIE to represent the tagged type itself. */
+
+ switch (TREE_CODE (type))
+ {
+ case ENUMERAL_TYPE:
+ output_die (output_enumeration_type_die, type);
+ return; /* a special case -- nothing left to do so just return */
+
+ case RECORD_TYPE:
+ output_die (output_structure_type_die, type);
+ break;
+
+ case UNION_TYPE:
+ case QUAL_UNION_TYPE:
+ output_die (output_union_type_die, type);
+ break;
+
+ default:
+ abort (); /* Should never happen. */
+ }
+
+ /* If this is not an incomplete type, output descriptions of
+ each of its members.
+
+ Note that as we output the DIEs necessary to represent the
+ members of this record or union type, we will also be trying
+ to output DIEs to represent the *types* of those members.
+ However the `output_type' function (above) will specifically
+ avoid generating type DIEs for member types *within* the list
+ of member DIEs for this (containing) type execpt for those
+ types (of members) which are explicitly marked as also being
+ members of this (containing) type themselves. The g++ front-
+ end can force any given type to be treated as a member of some
+ other (containing) type by setting the TYPE_CONTEXT of the
+ given (member) type to point to the TREE node representing the
+ appropriate (containing) type.
+ */
+
+ if (TYPE_SIZE (type))
+ {
+ {
+ register tree normal_member;
+
+ /* First output info about the data members and type members. */
+
+ for (normal_member = TYPE_FIELDS (type);
+ normal_member;
+ normal_member = TREE_CHAIN (normal_member))
+ output_decl (normal_member, type);
+ }
+
+ {
+ register tree vec_base;
+
+ /* Now output info about the function members (if any). */
+
+ vec_base = TYPE_METHODS (type);
+ if (vec_base)
+ {
+ register tree first_func_member = TREE_VEC_ELT (vec_base, 0);
+ register tree func_member;
+
+ /* This isn't documented, but the first element of the
+ vector of member functions can be NULL in cases where
+ the class type in question didn't have either a
+ constructor or a destructor declared for it. We have
+ to make allowances for that here. */
+
+ if (first_func_member == NULL)
+ first_func_member = TREE_VEC_ELT (vec_base, 1);
+
+ for (func_member = first_func_member;
+ func_member;
+ func_member = TREE_CHAIN (func_member))
+ output_decl (func_member, type);
+ }
+ }
+
+ /* RECORD_TYPEs, UNION_TYPEs, and QUAL_UNION_TYPEs are themselves
+ scopes (at least in C++) so we must now output any nested
+ pending types which are local just to this type. */
+
+ output_pending_types_for_scope (type);
+
+ end_sibling_chain (); /* Terminate member chain. */
+ }
+
+ break;
+
+ case VOID_TYPE:
+ case INTEGER_TYPE:
+ case REAL_TYPE:
+ case COMPLEX_TYPE:
+ case BOOLEAN_TYPE:
+ case CHAR_TYPE:
+ break; /* No DIEs needed for fundamental types. */
+
+ case LANG_TYPE: /* No Dwarf representation currently defined. */
+ break;
+
+ default:
+ abort ();
+ }
+
+ TREE_ASM_WRITTEN (type) = 1;
+}
+
+static void
+output_tagged_type_instantiation (type)
+ register tree type;
+{
+ if (type == 0 || type == error_mark_node)
+ return;
+
+ /* We are going to output a DIE to represent the unqualified version of
+ of this type (i.e. without any const or volatile qualifiers) so make
+ sure that we have the main variant (i.e. the unqualified version) of
+ this type now. */
+
+ assert (type == type_main_variant (type));
+
+ assert (TREE_ASM_WRITTEN (type));
+
+ switch (TREE_CODE (type))
+ {
+ case ERROR_MARK:
+ break;
+
+ case ENUMERAL_TYPE:
+ output_die (output_inlined_enumeration_type_die, type);
+ break;
+
+ case RECORD_TYPE:
+ output_die (output_inlined_structure_type_die, type);
+ break;
+
+ case UNION_TYPE:
+ case QUAL_UNION_TYPE:
+ output_die (output_inlined_union_type_die, type);
+ break;
+
+ default:
+ abort (); /* Should never happen. */
+ }
+}
+
+/* Output a TAG_lexical_block DIE followed by DIEs to represent all of
+ the things which are local to the given block. */
+
+static void
+output_block (stmt)
+ register tree stmt;
+{
+ register int must_output_die = 0;
+ register tree origin;
+ register enum tree_code origin_code;
+
+ /* Ignore blocks never really used to make RTL. */
+
+ if (! stmt || ! TREE_USED (stmt))
+ return;
+
+ /* Determine the "ultimate origin" of this block. This block may be an
+ inlined instance of an inlined instance of inline function, so we
+ have to trace all of the way back through the origin chain to find
+ out what sort of node actually served as the original seed for the
+ creation of the current block. */
+
+ origin = block_ultimate_origin (stmt);
+ origin_code = (origin != NULL) ? TREE_CODE (origin) : ERROR_MARK;
+
+ /* Determine if we need to output any Dwarf DIEs at all to represent this
+ block. */
+
+ if (origin_code == FUNCTION_DECL)
+ /* The outer scopes for inlinings *must* always be represented. We
+ generate TAG_inlined_subroutine DIEs for them. (See below.) */
+ must_output_die = 1;
+ else
+ {
+ /* In the case where the current block represents an inlining of the
+ "body block" of an inline function, we must *NOT* output any DIE
+ for this block because we have already output a DIE to represent
+ the whole inlined function scope and the "body block" of any
+ function doesn't really represent a different scope according to
+ ANSI C rules. So we check here to make sure that this block does
+ not represent a "body block inlining" before trying to set the
+ `must_output_die' flag. */
+
+ if (origin == NULL || ! is_body_block (origin))
+ {
+ /* Determine if this block directly contains any "significant"
+ local declarations which we will need to output DIEs for. */
+
+ if (debug_info_level > DINFO_LEVEL_TERSE)
+ /* We are not in terse mode so *any* local declaration counts
+ as being a "significant" one. */
+ must_output_die = (BLOCK_VARS (stmt) != NULL);
+ else
+ {
+ register tree decl;
+
+ /* We are in terse mode, so only local (nested) function
+ definitions count as "significant" local declarations. */
+
+ for (decl = BLOCK_VARS (stmt); decl; decl = TREE_CHAIN (decl))
+ if (TREE_CODE (decl) == FUNCTION_DECL && DECL_INITIAL (decl))
+ {
+ must_output_die = 1;
+ break;
+ }
+ }
+ }
+ }
+
+ /* It would be a waste of space to generate a Dwarf TAG_lexical_block
+ DIE for any block which contains no significant local declarations
+ at all. Rather, in such cases we just call `output_decls_for_scope'
+ so that any needed Dwarf info for any sub-blocks will get properly
+ generated. Note that in terse mode, our definition of what constitutes
+ a "significant" local declaration gets restricted to include only
+ inlined function instances and local (nested) function definitions. */
+
+ if (must_output_die)
+ {
+ output_die ((origin_code == FUNCTION_DECL)
+ ? output_inlined_subroutine_die
+ : output_lexical_block_die,
+ stmt);
+ output_decls_for_scope (stmt);
+ end_sibling_chain ();
+ }
+ else
+ output_decls_for_scope (stmt);
+}
+
+/* Output all of the decls declared within a given scope (also called
+ a `binding contour') and (recursively) all of it's sub-blocks. */
+
+static void
+output_decls_for_scope (stmt)
+ register tree stmt;
+{
+ /* Ignore blocks never really used to make RTL. */
+
+ if (! stmt || ! TREE_USED (stmt))
+ return;
+
+ if (! BLOCK_ABSTRACT (stmt))
+ next_block_number++;
+
+ /* Output the DIEs to represent all of the data objects, functions,
+ typedefs, and tagged types declared directly within this block
+ but not within any nested sub-blocks. */
+
+ {
+ register tree decl;
+
+ for (decl = BLOCK_VARS (stmt); decl; decl = TREE_CHAIN (decl))
+ output_decl (decl, stmt);
+ }
+
+ output_pending_types_for_scope (stmt);
+
+ /* Output the DIEs to represent all sub-blocks (and the items declared
+ therein) of this block. */
+
+ {
+ register tree subblocks;
+
+ for (subblocks = BLOCK_SUBBLOCKS (stmt);
+ subblocks;
+ subblocks = BLOCK_CHAIN (subblocks))
+ output_block (subblocks);
+ }
+}
+
+/* Output Dwarf .debug information for a decl described by DECL. */
+
+static void
+output_decl (decl, containing_scope)
+ register tree decl;
+ register tree containing_scope;
+{
+ /* Make a note of the decl node we are going to be working on. We may
+ need to give the user the source coordinates of where it appeared in
+ case we notice (later on) that something about it looks screwy. */
+
+ dwarf_last_decl = decl;
+
+ if (TREE_CODE (decl) == ERROR_MARK)
+ return;
+
+ /* If this ..._DECL node is marked to be ignored, then ignore it.
+ But don't ignore a function definition, since that would screw
+ up our count of blocks, and that it turn will completely screw up the
+ the labels we will reference in subsequent AT_low_pc and AT_high_pc
+ attributes (for subsequent blocks). */
+
+ if (DECL_IGNORED_P (decl) && TREE_CODE (decl) != FUNCTION_DECL)
+ return;
+
+ switch (TREE_CODE (decl))
+ {
+ case CONST_DECL:
+ /* The individual enumerators of an enum type get output when we
+ output the Dwarf representation of the relevant enum type itself. */
+ break;
+
+ case FUNCTION_DECL:
+ /* If we are in terse mode, don't output any DIEs to represent
+ mere function declarations. Also, if we are conforming
+ to the DWARF version 1 specification, don't output DIEs for
+ mere function declarations. */
+
+ if (DECL_INITIAL (decl) == NULL_TREE)
+#if (DWARF_VERSION > 1)
+ if (debug_info_level <= DINFO_LEVEL_TERSE)
+#endif
+ break;
+
+ /* Before we describe the FUNCTION_DECL itself, make sure that we
+ have described its return type. */
+
+ output_type (TREE_TYPE (TREE_TYPE (decl)), containing_scope);
+
+ /* If the following DIE will represent a function definition for a
+ function with "extern" linkage, output a special "pubnames" DIE
+ label just ahead of the actual DIE. A reference to this label
+ was already generated in the .debug_pubnames section sub-entry
+ for this function definition. */
+
+ if (TREE_PUBLIC (decl))
+ {
+ char label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ sprintf (label, PUB_DIE_LABEL_FMT, next_pubname_number++);
+ ASM_OUTPUT_LABEL (asm_out_file, label);
+ }
+
+ /* Now output a DIE to represent the function itself. */
+
+ output_die (TREE_PUBLIC (decl) || DECL_EXTERNAL (decl)
+ ? output_global_subroutine_die
+ : output_local_subroutine_die,
+ decl);
+
+ /* Now output descriptions of the arguments for this function.
+ This gets (unnecessarily?) complex because of the fact that
+ the DECL_ARGUMENT list for a FUNCTION_DECL doesn't indicate
+ cases where there was a trailing `...' at the end of the formal
+ parameter list. In order to find out if there was a trailing
+ ellipsis or not, we must instead look at the type associated
+ with the FUNCTION_DECL. This will be a node of type FUNCTION_TYPE.
+ If the chain of type nodes hanging off of this FUNCTION_TYPE node
+ ends with a void_type_node then there should *not* be an ellipsis
+ at the end. */
+
+ /* In the case where we are describing a mere function declaration, all
+ we need to do here (and all we *can* do here) is to describe
+ the *types* of its formal parameters. */
+
+ if (DECL_INITIAL (decl) == NULL_TREE)
+ output_formal_types (TREE_TYPE (decl));
+ else
+ {
+ register tree arg_decls = DECL_ARGUMENTS (decl);
+
+ {
+ register tree last_arg;
+
+ last_arg = (arg_decls && TREE_CODE (arg_decls) != ERROR_MARK)
+ ? tree_last (arg_decls)
+ : NULL;
+
+ /* Generate DIEs to represent all known formal parameters, but
+ don't do it if this looks like a varargs function. A given
+ function is considered to be a varargs function if (and only
+ if) its last named argument is named `__builtin_va_alist'. */
+
+ if (! last_arg
+ || ! DECL_NAME (last_arg)
+ || strcmp (IDENTIFIER_POINTER (DECL_NAME (last_arg)),
+ "__builtin_va_alist"))
+ {
+ register tree parm;
+
+ /* WARNING! Kludge zone ahead! Here we have a special
+ hack for svr4 SDB compatibility. Instead of passing the
+ current FUNCTION_DECL node as the second parameter (i.e.
+ the `containing_scope' parameter) to `output_decl' (as
+ we ought to) we instead pass a pointer to our own private
+ fake_containing_scope node. That node is a RECORD_TYPE
+ node which NO OTHER TYPE may ever actually be a member of.
+
+ This pointer will ultimately get passed into `output_type'
+ as its `containing_scope' parameter. `Output_type' will
+ then perform its part in the hack... i.e. it will pend
+ the type of the formal parameter onto the pending_types
+ list. Later on, when we are done generating the whole
+ sequence of formal parameter DIEs for this function
+ definition, we will un-pend all previously pended types
+ of formal parameters for this function definition.
+
+ This whole kludge prevents any type DIEs from being
+ mixed in with the formal parameter DIEs. That's good
+ because svr4 SDB believes that the list of formal
+ parameter DIEs for a function ends wherever the first
+ non-formal-parameter DIE appears. Thus, we have to
+ keep the formal parameter DIEs segregated. They must
+ all appear (consecutively) at the start of the list of
+ children for the DIE representing the function definition.
+ Then (and only then) may we output any additional DIEs
+ needed to represent the types of these formal parameters.
+ */
+
+ for (parm = arg_decls; parm; parm = TREE_CHAIN (parm))
+ if (TREE_CODE (parm) == PARM_DECL)
+ output_decl (parm, fake_containing_scope);
+
+ /* Now that we have finished generating all of the DIEs to
+ represent the formal parameters themselves, force out
+ any DIEs needed to represent their types. We do this
+ simply by un-pending all previously pended types which
+ can legitimately go into the chain of children DIEs for
+ the current FUNCTION_DECL. */
+
+ output_pending_types_for_scope (decl);
+ }
+ }
+
+ /* Now try to decide if we should put an ellipsis at the end. */
+
+ {
+ register int has_ellipsis = TRUE; /* default assumption */
+ register tree fn_arg_types = TYPE_ARG_TYPES (TREE_TYPE (decl));
+
+ if (fn_arg_types)
+ {
+ /* This function declaration/definition was prototyped. */
+
+ /* If the list of formal argument types ends with a
+ void_type_node, then the formals list did *not* end
+ with an ellipsis. */
+
+ if (TREE_VALUE (tree_last (fn_arg_types)) == void_type_node)
+ has_ellipsis = FALSE;
+ }
+ else
+ {
+ /* This function declaration/definition was not prototyped. */
+
+ /* Note that all non-prototyped function *declarations* are
+ assumed to represent varargs functions (until proven
+ otherwise). */
+
+ if (DECL_INITIAL (decl)) /* if this is a func definition */
+ {
+ if (!arg_decls)
+ has_ellipsis = FALSE; /* no args == (void) */
+ else
+ {
+ /* For a non-prototyped function definition which
+ declares one or more formal parameters, if the name
+ of the first formal parameter is *not*
+ __builtin_va_alist then we must assume that this
+ is *not* a varargs function. */
+
+ if (DECL_NAME (arg_decls)
+ && strcmp (IDENTIFIER_POINTER (DECL_NAME (arg_decls)),
+ "__builtin_va_alist"))
+ has_ellipsis = FALSE;
+ }
+ }
+ }
+
+ if (has_ellipsis)
+ output_die (output_unspecified_parameters_die, decl);
+ }
+ }
+
+ /* Output Dwarf info for all of the stuff within the body of the
+ function (if it has one - it may be just a declaration). */
+
+ {
+ register tree outer_scope = DECL_INITIAL (decl);
+
+ if (outer_scope && TREE_CODE (outer_scope) != ERROR_MARK)
+ {
+ /* Note that here, `outer_scope' is a pointer to the outermost
+ BLOCK node created to represent a function.
+ This outermost BLOCK actually represents the outermost
+ binding contour for the function, i.e. the contour in which
+ the function's formal parameters and labels get declared.
+
+ Curiously, it appears that the front end doesn't actually
+ put the PARM_DECL nodes for the current function onto the
+ BLOCK_VARS list for this outer scope. (They are strung
+ off of the DECL_ARGUMENTS list for the function instead.)
+ The BLOCK_VARS list for the `outer_scope' does provide us
+ with a list of the LABEL_DECL nodes for the function however,
+ and we output DWARF info for those here.
+
+ Just within the `outer_scope' there will be another BLOCK
+ node representing the function's outermost pair of curly
+ braces. We musn't generate a lexical_block DIE for this
+ outermost pair of curly braces because that is not really an
+ independent scope according to ANSI C rules. Rather, it is
+ the same scope in which the parameters were declared. */
+
+ {
+ register tree label;
+
+ for (label = BLOCK_VARS (outer_scope);
+ label;
+ label = TREE_CHAIN (label))
+ output_decl (label, outer_scope);
+ }
+
+ /* Note here that `BLOCK_SUBBLOCKS (outer_scope)' points to a
+ list of BLOCK nodes which is always only one element long.
+ That one element represents the outermost pair of curley
+ braces for the function body. */
+
+ output_decls_for_scope (BLOCK_SUBBLOCKS (outer_scope));
+
+ /* Finally, force out any pending types which are local to the
+ outermost block of this function definition. These will
+ all have a TYPE_CONTEXT which points to the FUNCTION_DECL
+ node itself. */
+
+ output_pending_types_for_scope (decl);
+ }
+ }
+
+ /* Generate a terminator for the list of stuff `owned' by this
+ function. */
+
+ end_sibling_chain ();
+
+ break;
+
+ case TYPE_DECL:
+ /* If we are in terse mode, don't generate any DIEs to represent
+ any actual typedefs. Note that even when we are in terse mode,
+ we must still output DIEs to represent those tagged types which
+ are used (directly or indirectly) in the specification of either
+ a return type or a formal parameter type of some function. */
+
+ if (debug_info_level <= DINFO_LEVEL_TERSE)
+ if (DECL_NAME (decl) != NULL
+ || ! TYPE_USED_FOR_FUNCTION (TREE_TYPE (decl)))
+ return;
+
+ /* In the special case of a null-named TYPE_DECL node (representing
+ the declaration of some type tag), if the given TYPE_DECL is
+ marked as having been instantiated from some other (original)
+ TYPE_DECL node (e.g. one which was generated within the original
+ definition of an inline function) we have to generate a special
+ (abbreviated) TAG_structure_type, TAG_union_type, or
+ TAG_enumeration-type DIE here. */
+
+ if (! DECL_NAME (decl) && DECL_ABSTRACT_ORIGIN (decl))
+ {
+ output_tagged_type_instantiation (TREE_TYPE (decl));
+ return;
+ }
+
+ output_type (TREE_TYPE (decl), containing_scope);
+
+ /* Note that unlike the gcc front end (which generates a NULL named
+ TYPE_DECL node for each complete tagged type, each array type,
+ and each function type node created) the g++ front end generates
+ a *named* TYPE_DECL node for each tagged type node created.
+ Unfortunately, these g++ TYPE_DECL nodes cause us to output many
+ superfluous and unnecessary TAG_typedef DIEs here. When g++ is
+ fixed to stop generating these superfluous named TYPE_DECL nodes,
+ the superfluous TAG_typedef DIEs will likewise cease. */
+
+ if (DECL_NAME (decl))
+ /* Output a DIE to represent the typedef itself. */
+ output_die (output_typedef_die, decl);
+ break;
+
+ case LABEL_DECL:
+ if (debug_info_level >= DINFO_LEVEL_NORMAL)
+ output_die (output_label_die, decl);
+ break;
+
+ case VAR_DECL:
+ /* If we are conforming to the DWARF version 1 specification, don't
+ generated any DIEs to represent mere external object declarations. */
+
+#if (DWARF_VERSION <= 1)
+ if (DECL_EXTERNAL (decl) && ! TREE_PUBLIC (decl))
+ break;
+#endif
+
+ /* If we are in terse mode, don't generate any DIEs to represent
+ any variable declarations or definitions. */
+
+ if (debug_info_level <= DINFO_LEVEL_TERSE)
+ break;
+
+ /* Output any DIEs that are needed to specify the type of this data
+ object. */
+
+ output_type (TREE_TYPE (decl), containing_scope);
+
+ /* If the following DIE will represent a data object definition for a
+ data object with "extern" linkage, output a special "pubnames" DIE
+ label just ahead of the actual DIE. A reference to this label
+ was already generated in the .debug_pubnames section sub-entry
+ for this data object definition. */
+
+ if (TREE_PUBLIC (decl) && ! DECL_ABSTRACT (decl))
+ {
+ char label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ sprintf (label, PUB_DIE_LABEL_FMT, next_pubname_number++);
+ ASM_OUTPUT_LABEL (asm_out_file, label);
+ }
+
+ /* Now output the DIE to represent the data object itself. This gets
+ complicated because of the possibility that the VAR_DECL really
+ represents an inlined instance of a formal parameter for an inline
+ function. */
+
+ {
+ register void (*func) ();
+ register tree origin = decl_ultimate_origin (decl);
+
+ if (origin != NULL && TREE_CODE (origin) == PARM_DECL)
+ func = output_formal_parameter_die;
+ else
+ {
+ if (TREE_PUBLIC (decl) || DECL_EXTERNAL (decl))
+ func = output_global_variable_die;
+ else
+ func = output_local_variable_die;
+ }
+ output_die (func, decl);
+ }
+ break;
+
+ case FIELD_DECL:
+ /* Ignore the nameless fields that are used to skip bits. */
+ if (DECL_NAME (decl) != 0)
+ {
+ output_type (member_declared_type (decl), containing_scope);
+ output_die (output_member_die, decl);
+ }
+ break;
+
+ case PARM_DECL:
+ /* Force out the type of this formal, if it was not forced out yet.
+ Note that here we can run afowl of a bug in "classic" svr4 SDB.
+ It should be able to grok the presence of type DIEs within a list
+ of TAG_formal_parameter DIEs, but it doesn't. */
+
+ output_type (TREE_TYPE (decl), containing_scope);
+ output_die (output_formal_parameter_die, decl);
+ break;
+
+ default:
+ abort ();
+ }
+}
+
+void
+dwarfout_file_scope_decl (decl, set_finalizing)
+ register tree decl;
+ register int set_finalizing;
+{
+ if (TREE_CODE (decl) == ERROR_MARK)
+ return;
+
+ /* If this ..._DECL node is marked to be ignored, then ignore it. We
+ gotta hope that the node in question doesn't represent a function
+ definition. If it does, then totally ignoring it is bound to screw
+ up our count of blocks, and that it turn will completely screw up the
+ the labels we will reference in subsequent AT_low_pc and AT_high_pc
+ attributes (for subsequent blocks). (It's too bad that BLOCK nodes
+ don't carry their own sequence numbers with them!) */
+
+ if (DECL_IGNORED_P (decl))
+ {
+ if (TREE_CODE (decl) == FUNCTION_DECL && DECL_INITIAL (decl) != NULL)
+ abort ();
+ return;
+ }
+
+ switch (TREE_CODE (decl))
+ {
+ case FUNCTION_DECL:
+
+ /* Ignore this FUNCTION_DECL if it refers to a builtin declaration of
+ a builtin function. Explicit programmer-supplied declarations of
+ these same functions should NOT be ignored however. */
+
+ if (DECL_EXTERNAL (decl) && DECL_FUNCTION_CODE (decl))
+ return;
+
+ /* What we would really like to do here is to filter out all mere
+ file-scope declarations of file-scope functions which are never
+ referenced later within this translation unit (and keep all of
+ ones that *are* referenced later on) but we aren't clarvoiant,
+ so we have no idea which functions will be referenced in the
+ future (i.e. later on within the current translation unit).
+ So here we just ignore all file-scope function declarations
+ which are not also definitions. If and when the debugger needs
+ to know something about these funcstion, it wil have to hunt
+ around and find the DWARF information associated with the
+ *definition* of the function.
+
+ Note that we can't just check `DECL_EXTERNAL' to find out which
+ FUNCTION_DECL nodes represent definitions and which ones represent
+ mere declarations. We have to check `DECL_INITIAL' instead. That's
+ because the C front-end supports some weird semantics for "extern
+ inline" function definitions. These can get inlined within the
+ current translation unit (an thus, we need to generate DWARF info
+ for their abstract instances so that the DWARF info for the
+ concrete inlined instances can have something to refer to) but
+ the compiler never generates any out-of-lines instances of such
+ things (despite the fact that they *are* definitions). The
+ important point is that the C front-end marks these "extern inline"
+ functions as DECL_EXTERNAL, but we need to generate DWARf for them
+ anyway.
+
+ Note that the C++ front-end also plays some similar games for inline
+ function definitions appearing within include files which also
+ contain `#pragma interface' pragmas. */
+
+ if (DECL_INITIAL (decl) == NULL_TREE)
+ return;
+
+ if (TREE_PUBLIC (decl)
+ && ! DECL_EXTERNAL (decl)
+ && ! DECL_ABSTRACT (decl))
+ {
+ char label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ /* Output a .debug_pubnames entry for a public function
+ defined in this compilation unit. */
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_PUSH_SECTION (asm_out_file, PUBNAMES_SECTION);
+ sprintf (label, PUB_DIE_LABEL_FMT, next_pubname_number);
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, label);
+ ASM_OUTPUT_DWARF_STRING (asm_out_file,
+ IDENTIFIER_POINTER (DECL_NAME (decl)));
+ ASM_OUTPUT_POP_SECTION (asm_out_file);
+ }
+
+ break;
+
+ case VAR_DECL:
+
+ /* Ignore this VAR_DECL if it refers to a file-scope extern data
+ object declaration and if the declaration was never even
+ referenced from within this entire compilation unit. We
+ suppress these DIEs in order to save space in the .debug section
+ (by eliminating entries which are probably useless). Note that
+ we must not suppress block-local extern declarations (whether
+ used or not) because that would screw-up the debugger's name
+ lookup mechanism and cause it to miss things which really ought
+ to be in scope at a given point. */
+
+ if (DECL_EXTERNAL (decl) && !TREE_USED (decl))
+ return;
+
+ if (TREE_PUBLIC (decl)
+ && ! DECL_EXTERNAL (decl)
+ && GET_CODE (DECL_RTL (decl)) == MEM
+ && ! DECL_ABSTRACT (decl))
+ {
+ char label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ if (debug_info_level >= DINFO_LEVEL_NORMAL)
+ {
+ /* Output a .debug_pubnames entry for a public variable
+ defined in this compilation unit. */
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_PUSH_SECTION (asm_out_file, PUBNAMES_SECTION);
+ sprintf (label, PUB_DIE_LABEL_FMT, next_pubname_number);
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, label);
+ ASM_OUTPUT_DWARF_STRING (asm_out_file,
+ IDENTIFIER_POINTER (DECL_NAME (decl)));
+ ASM_OUTPUT_POP_SECTION (asm_out_file);
+ }
+
+ if (DECL_INITIAL (decl) == NULL)
+ {
+ /* Output a .debug_aranges entry for a public variable
+ which is tentatively defined in this compilation unit. */
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_PUSH_SECTION (asm_out_file, ARANGES_SECTION);
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file,
+ IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)));
+ ASM_OUTPUT_DWARF_DATA4 (asm_out_file,
+ (unsigned) int_size_in_bytes (TREE_TYPE (decl)));
+ ASM_OUTPUT_POP_SECTION (asm_out_file);
+ }
+ }
+
+ /* If we are in terse mode, don't generate any DIEs to represent
+ any variable declarations or definitions. */
+
+ if (debug_info_level <= DINFO_LEVEL_TERSE)
+ return;
+
+ break;
+
+ case TYPE_DECL:
+ /* Don't bother trying to generate any DIEs to represent any of the
+ normal built-in types for the language we are compiling, except
+ in cases where the types in question are *not* DWARF fundamental
+ types. We make an exception in the case of non-fundamental types
+ for the sake of objective C (and perhaps C++) because the GNU
+ front-ends for these languages may in fact create certain "built-in"
+ types which are (for example) RECORD_TYPEs. In such cases, we
+ really need to output these (non-fundamental) types because other
+ DIEs may contain references to them. */
+
+ if (DECL_SOURCE_LINE (decl) == 0
+ && type_is_fundamental (TREE_TYPE (decl)))
+ return;
+
+ /* If we are in terse mode, don't generate any DIEs to represent
+ any actual typedefs. Note that even when we are in terse mode,
+ we must still output DIEs to represent those tagged types which
+ are used (directly or indirectly) in the specification of either
+ a return type or a formal parameter type of some function. */
+
+ if (debug_info_level <= DINFO_LEVEL_TERSE)
+ if (DECL_NAME (decl) != NULL
+ || ! TYPE_USED_FOR_FUNCTION (TREE_TYPE (decl)))
+ return;
+
+ break;
+
+ default:
+ return;
+ }
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_PUSH_SECTION (asm_out_file, DEBUG_SECTION);
+ finalizing = set_finalizing;
+ output_decl (decl, NULL_TREE);
+
+ /* NOTE: The call above to `output_decl' may have caused one or more
+ file-scope named types (i.e. tagged types) to be placed onto the
+ pending_types_list. We have to get those types off of that list
+ at some point, and this is the perfect time to do it. If we didn't
+ take them off now, they might still be on the list when cc1 finally
+ exits. That might be OK if it weren't for the fact that when we put
+ types onto the pending_types_list, we set the TREE_ASM_WRITTEN flag
+ for these types, and that causes them never to be output unless
+ `output_pending_types_for_scope' takes them off of the list and un-sets
+ their TREE_ASM_WRITTEN flags. */
+
+ output_pending_types_for_scope (NULL_TREE);
+
+ /* The above call should have totally emptied the pending_types_list. */
+
+ assert (pending_types == 0);
+
+ ASM_OUTPUT_POP_SECTION (asm_out_file);
+
+ if (TREE_CODE (decl) == FUNCTION_DECL && DECL_INITIAL (decl) != NULL)
+ current_funcdef_number++;
+}
+
+/* Output a marker (i.e. a label) for the beginning of the generated code
+ for a lexical block. */
+
+void
+dwarfout_begin_block (blocknum)
+ register unsigned blocknum;
+{
+ char label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ text_section ();
+ sprintf (label, BLOCK_BEGIN_LABEL_FMT, blocknum);
+ ASM_OUTPUT_LABEL (asm_out_file, label);
+}
+
+/* Output a marker (i.e. a label) for the end of the generated code
+ for a lexical block. */
+
+void
+dwarfout_end_block (blocknum)
+ register unsigned blocknum;
+{
+ char label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ text_section ();
+ sprintf (label, BLOCK_END_LABEL_FMT, blocknum);
+ ASM_OUTPUT_LABEL (asm_out_file, label);
+}
+
+/* Output a marker (i.e. a label) at a point in the assembly code which
+ corresponds to a given source level label. */
+
+void
+dwarfout_label (insn)
+ register rtx insn;
+{
+ if (debug_info_level >= DINFO_LEVEL_NORMAL)
+ {
+ char label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ text_section ();
+ sprintf (label, INSN_LABEL_FMT, current_funcdef_number,
+ (unsigned) INSN_UID (insn));
+ ASM_OUTPUT_LABEL (asm_out_file, label);
+ }
+}
+
+/* Output a marker (i.e. a label) for the point in the generated code where
+ the real body of the function begins (after parameters have been moved
+ to their home locations). */
+
+void
+dwarfout_begin_function ()
+{
+ char label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ text_section ();
+ sprintf (label, BODY_BEGIN_LABEL_FMT, current_funcdef_number);
+ ASM_OUTPUT_LABEL (asm_out_file, label);
+}
+
+/* Output a marker (i.e. a label) for the point in the generated code where
+ the real body of the function ends (just before the epilogue code). */
+
+void
+dwarfout_end_function ()
+{
+ char label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ text_section ();
+ sprintf (label, BODY_END_LABEL_FMT, current_funcdef_number);
+ ASM_OUTPUT_LABEL (asm_out_file, label);
+}
+
+/* Output a marker (i.e. a label) for the absolute end of the generated code
+ for a function definition. This gets called *after* the epilogue code
+ has been generated. */
+
+void
+dwarfout_end_epilogue ()
+{
+ char label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ /* Output a label to mark the endpoint of the code generated for this
+ function. */
+
+ sprintf (label, FUNC_END_LABEL_FMT, current_funcdef_number);
+ ASM_OUTPUT_LABEL (asm_out_file, label);
+}
+
+static void
+shuffle_filename_entry (new_zeroth)
+ register filename_entry *new_zeroth;
+{
+ filename_entry temp_entry;
+ register filename_entry *limit_p;
+ register filename_entry *move_p;
+
+ if (new_zeroth == &filename_table[0])
+ return;
+
+ temp_entry = *new_zeroth;
+
+ /* Shift entries up in the table to make room at [0]. */
+
+ limit_p = &filename_table[0];
+ for (move_p = new_zeroth; move_p > limit_p; move_p--)
+ *move_p = *(move_p-1);
+
+ /* Install the found entry at [0]. */
+
+ filename_table[0] = temp_entry;
+}
+
+/* Create a new (string) entry for the .debug_sfnames section. */
+
+static void
+generate_new_sfname_entry ()
+{
+ char label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_PUSH_SECTION (asm_out_file, SFNAMES_SECTION);
+ sprintf (label, SFNAMES_ENTRY_LABEL_FMT, filename_table[0].number);
+ ASM_OUTPUT_LABEL (asm_out_file, label);
+ ASM_OUTPUT_DWARF_STRING (asm_out_file,
+ filename_table[0].name
+ ? filename_table[0].name
+ : "");
+ ASM_OUTPUT_POP_SECTION (asm_out_file);
+}
+
+/* Lookup a filename (in the list of filenames that we know about here in
+ dwarfout.c) and return its "index". The index of each (known) filename
+ is just a unique number which is associated with only that one filename.
+ We need such numbers for the sake of generating labels (in the
+ .debug_sfnames section) and references to those unique labels (in the
+ .debug_srcinfo and .debug_macinfo sections).
+
+ If the filename given as an argument is not found in our current list,
+ add it to the list and assign it the next available unique index number.
+
+ Whatever we do (i.e. whether we find a pre-existing filename or add a new
+ one), we shuffle the filename found (or added) up to the zeroth entry of
+ our list of filenames (which is always searched linearly). We do this so
+ as to optimize the most common case for these filename lookups within
+ dwarfout.c. The most common case by far is the case where we call
+ lookup_filename to lookup the very same filename that we did a lookup
+ on the last time we called lookup_filename. We make sure that this
+ common case is fast because such cases will constitute 99.9% of the
+ lookups we ever do (in practice).
+
+ If we add a new filename entry to our table, we go ahead and generate
+ the corresponding entry in the .debug_sfnames section right away.
+ Doing so allows us to avoid tickling an assembler bug (present in some
+ m68k assemblers) which yields assembly-time errors in cases where the
+ difference of two label addresses is taken and where the two labels
+ are in a section *other* than the one where the difference is being
+ calculated, and where at least one of the two symbol references is a
+ forward reference. (This bug could be tickled by our .debug_srcinfo
+ entries if we don't output their corresponding .debug_sfnames entries
+ before them.)
+*/
+
+static unsigned
+lookup_filename (file_name)
+ char *file_name;
+{
+ register filename_entry *search_p;
+ register filename_entry *limit_p = &filename_table[ft_entries];
+
+ for (search_p = filename_table; search_p < limit_p; search_p++)
+ if (!strcmp (file_name, search_p->name))
+ {
+ /* When we get here, we have found the filename that we were
+ looking for in the filename_table. Now we want to make sure
+ that it gets moved to the zero'th entry in the table (if it
+ is not already there) so that subsequent attempts to find the
+ same filename will find it as quickly as possible. */
+
+ shuffle_filename_entry (search_p);
+ return filename_table[0].number;
+ }
+
+ /* We come here whenever we have a new filename which is not registered
+ in the current table. Here we add it to the table. */
+
+ /* Prepare to add a new table entry by making sure there is enough space
+ in the table to do so. If not, expand the current table. */
+
+ if (ft_entries == ft_entries_allocated)
+ {
+ ft_entries_allocated += FT_ENTRIES_INCREMENT;
+ filename_table
+ = (filename_entry *)
+ xrealloc (filename_table,
+ ft_entries_allocated * sizeof (filename_entry));
+ }
+
+ /* Initially, add the new entry at the end of the filename table. */
+
+ filename_table[ft_entries].number = ft_entries;
+ filename_table[ft_entries].name = xstrdup (file_name);
+
+ /* Shuffle the new entry into filename_table[0]. */
+
+ shuffle_filename_entry (&filename_table[ft_entries]);
+
+ if (debug_info_level >= DINFO_LEVEL_NORMAL)
+ generate_new_sfname_entry ();
+
+ ft_entries++;
+ return filename_table[0].number;
+}
+
+static void
+generate_srcinfo_entry (line_entry_num, files_entry_num)
+ unsigned line_entry_num;
+ unsigned files_entry_num;
+{
+ char label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_PUSH_SECTION (asm_out_file, SRCINFO_SECTION);
+ sprintf (label, LINE_ENTRY_LABEL_FMT, line_entry_num);
+ ASM_OUTPUT_DWARF_DELTA4 (asm_out_file, label, LINE_BEGIN_LABEL);
+ sprintf (label, SFNAMES_ENTRY_LABEL_FMT, files_entry_num);
+ ASM_OUTPUT_DWARF_DELTA4 (asm_out_file, label, SFNAMES_BEGIN_LABEL);
+ ASM_OUTPUT_POP_SECTION (asm_out_file);
+}
+
+void
+dwarfout_line (filename, line)
+ register char *filename;
+ register unsigned line;
+{
+ if (debug_info_level >= DINFO_LEVEL_NORMAL)
+ {
+ char label[MAX_ARTIFICIAL_LABEL_BYTES];
+ static unsigned last_line_entry_num = 0;
+ static unsigned prev_file_entry_num = (unsigned) -1;
+ register unsigned this_file_entry_num = lookup_filename (filename);
+
+ text_section ();
+ sprintf (label, LINE_CODE_LABEL_FMT, ++last_line_entry_num);
+ ASM_OUTPUT_LABEL (asm_out_file, label);
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_PUSH_SECTION (asm_out_file, LINE_SECTION);
+
+ if (this_file_entry_num != prev_file_entry_num)
+ {
+ char line_entry_label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ sprintf (line_entry_label, LINE_ENTRY_LABEL_FMT, last_line_entry_num);
+ ASM_OUTPUT_LABEL (asm_out_file, line_entry_label);
+ }
+
+ {
+ register char *tail = rindex (filename, '/');
+
+ if (tail != NULL)
+ filename = tail;
+ }
+
+ fprintf (asm_out_file, "\t%s\t%u\t%s %s:%u\n",
+ UNALIGNED_INT_ASM_OP, line, ASM_COMMENT_START,
+ filename, line);
+ ASM_OUTPUT_DWARF_DATA2 (asm_out_file, 0xffff);
+ ASM_OUTPUT_DWARF_DELTA4 (asm_out_file, label, TEXT_BEGIN_LABEL);
+ ASM_OUTPUT_POP_SECTION (asm_out_file);
+
+ if (this_file_entry_num != prev_file_entry_num)
+ generate_srcinfo_entry (last_line_entry_num, this_file_entry_num);
+ prev_file_entry_num = this_file_entry_num;
+ }
+}
+
+/* Generate an entry in the .debug_macinfo section. */
+
+static void
+generate_macinfo_entry (type_and_offset, string)
+ register char *type_and_offset;
+ register char *string;
+{
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_PUSH_SECTION (asm_out_file, MACINFO_SECTION);
+ fprintf (asm_out_file, "\t%s\t%s\n", UNALIGNED_INT_ASM_OP, type_and_offset);
+ ASM_OUTPUT_DWARF_STRING (asm_out_file, string);
+ ASM_OUTPUT_POP_SECTION (asm_out_file);
+}
+
+void
+dwarfout_start_new_source_file (filename)
+ register char *filename;
+{
+ char label[MAX_ARTIFICIAL_LABEL_BYTES];
+ char type_and_offset[MAX_ARTIFICIAL_LABEL_BYTES*3];
+
+ sprintf (label, SFNAMES_ENTRY_LABEL_FMT, lookup_filename (filename));
+ sprintf (type_and_offset, "0x%08x+%s-%s",
+ ((unsigned) MACINFO_start << 24), label, SFNAMES_BEGIN_LABEL);
+ generate_macinfo_entry (type_and_offset, "");
+}
+
+void
+dwarfout_resume_previous_source_file (lineno)
+ register unsigned lineno;
+{
+ char type_and_offset[MAX_ARTIFICIAL_LABEL_BYTES*2];
+
+ sprintf (type_and_offset, "0x%08x+%u",
+ ((unsigned) MACINFO_resume << 24), lineno);
+ generate_macinfo_entry (type_and_offset, "");
+}
+
+/* Called from check_newline in c-parse.y. The `buffer' parameter
+ contains the tail part of the directive line, i.e. the part which
+ is past the initial whitespace, #, whitespace, directive-name,
+ whitespace part. */
+
+void
+dwarfout_define (lineno, buffer)
+ register unsigned lineno;
+ register char *buffer;
+{
+ static int initialized = 0;
+ char type_and_offset[MAX_ARTIFICIAL_LABEL_BYTES*2];
+
+ if (!initialized)
+ {
+ dwarfout_start_new_source_file (primary_filename);
+ initialized = 1;
+ }
+ sprintf (type_and_offset, "0x%08x+%u",
+ ((unsigned) MACINFO_define << 24), lineno);
+ generate_macinfo_entry (type_and_offset, buffer);
+}
+
+/* Called from check_newline in c-parse.y. The `buffer' parameter
+ contains the tail part of the directive line, i.e. the part which
+ is past the initial whitespace, #, whitespace, directive-name,
+ whitespace part. */
+
+void
+dwarfout_undef (lineno, buffer)
+ register unsigned lineno;
+ register char *buffer;
+{
+ char type_and_offset[MAX_ARTIFICIAL_LABEL_BYTES*2];
+
+ sprintf (type_and_offset, "0x%08x+%u",
+ ((unsigned) MACINFO_undef << 24), lineno);
+ generate_macinfo_entry (type_and_offset, buffer);
+}
+
+/* Set up for Dwarf output at the start of compilation. */
+
+void
+dwarfout_init (asm_out_file, main_input_filename)
+ register FILE *asm_out_file;
+ register char *main_input_filename;
+{
+ /* Remember the name of the primary input file. */
+
+ primary_filename = main_input_filename;
+
+ /* Allocate the initial hunk of the pending_sibling_stack. */
+
+ pending_sibling_stack
+ = (unsigned *)
+ xmalloc (PENDING_SIBLINGS_INCREMENT * sizeof (unsigned));
+ pending_siblings_allocated = PENDING_SIBLINGS_INCREMENT;
+ pending_siblings = 1;
+
+ /* Allocate the initial hunk of the filename_table. */
+
+ filename_table
+ = (filename_entry *)
+ xmalloc (FT_ENTRIES_INCREMENT * sizeof (filename_entry));
+ ft_entries_allocated = FT_ENTRIES_INCREMENT;
+ ft_entries = 0;
+
+ /* Allocate the initial hunk of the pending_types_list. */
+
+ pending_types_list
+ = (tree *) xmalloc (PENDING_TYPES_INCREMENT * sizeof (tree));
+ pending_types_allocated = PENDING_TYPES_INCREMENT;
+ pending_types = 0;
+
+ /* Create an artificial RECORD_TYPE node which we can use in our hack
+ to get the DIEs representing types of formal parameters to come out
+ only *after* the DIEs for the formal parameters themselves. */
+
+ fake_containing_scope = make_node (RECORD_TYPE);
+
+ /* Output a starting label for the .text section. */
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_PUSH_SECTION (asm_out_file, TEXT_SECTION);
+ ASM_OUTPUT_LABEL (asm_out_file, TEXT_BEGIN_LABEL);
+ ASM_OUTPUT_POP_SECTION (asm_out_file);
+
+ /* Output a starting label for the .data section. */
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_PUSH_SECTION (asm_out_file, DATA_SECTION);
+ ASM_OUTPUT_LABEL (asm_out_file, DATA_BEGIN_LABEL);
+ ASM_OUTPUT_POP_SECTION (asm_out_file);
+
+#if 0 /* GNU C doesn't currently use .data1. */
+ /* Output a starting label for the .data1 section. */
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_PUSH_SECTION (asm_out_file, DATA1_SECTION);
+ ASM_OUTPUT_LABEL (asm_out_file, DATA1_BEGIN_LABEL);
+ ASM_OUTPUT_POP_SECTION (asm_out_file);
+#endif
+
+ /* Output a starting label for the .rodata section. */
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_PUSH_SECTION (asm_out_file, RODATA_SECTION);
+ ASM_OUTPUT_LABEL (asm_out_file, RODATA_BEGIN_LABEL);
+ ASM_OUTPUT_POP_SECTION (asm_out_file);
+
+#if 0 /* GNU C doesn't currently use .rodata1. */
+ /* Output a starting label for the .rodata1 section. */
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_PUSH_SECTION (asm_out_file, RODATA1_SECTION);
+ ASM_OUTPUT_LABEL (asm_out_file, RODATA1_BEGIN_LABEL);
+ ASM_OUTPUT_POP_SECTION (asm_out_file);
+#endif
+
+ /* Output a starting label for the .bss section. */
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_PUSH_SECTION (asm_out_file, BSS_SECTION);
+ ASM_OUTPUT_LABEL (asm_out_file, BSS_BEGIN_LABEL);
+ ASM_OUTPUT_POP_SECTION (asm_out_file);
+
+ if (debug_info_level >= DINFO_LEVEL_NORMAL)
+ {
+ /* Output a starting label and an initial (compilation directory)
+ entry for the .debug_sfnames section. The starting label will be
+ referenced by the initial entry in the .debug_srcinfo section. */
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_PUSH_SECTION (asm_out_file, SFNAMES_SECTION);
+ ASM_OUTPUT_LABEL (asm_out_file, SFNAMES_BEGIN_LABEL);
+ {
+ register char *pwd;
+ register unsigned len;
+ register char *dirname;
+
+ pwd = getpwd ();
+ if (!pwd)
+ pfatal_with_name ("getpwd");
+ len = strlen (pwd);
+ dirname = (char *) xmalloc (len + 2);
+
+ strcpy (dirname, pwd);
+ strcpy (dirname + len, "/");
+ ASM_OUTPUT_DWARF_STRING (asm_out_file, dirname);
+ free (dirname);
+ }
+ ASM_OUTPUT_POP_SECTION (asm_out_file);
+
+ if (debug_info_level >= DINFO_LEVEL_VERBOSE)
+ {
+ /* Output a starting label for the .debug_macinfo section. This
+ label will be referenced by the AT_mac_info attribute in the
+ TAG_compile_unit DIE. */
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_PUSH_SECTION (asm_out_file, MACINFO_SECTION);
+ ASM_OUTPUT_LABEL (asm_out_file, MACINFO_BEGIN_LABEL);
+ ASM_OUTPUT_POP_SECTION (asm_out_file);
+ }
+
+ /* Generate the initial entry for the .line section. */
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_PUSH_SECTION (asm_out_file, LINE_SECTION);
+ ASM_OUTPUT_LABEL (asm_out_file, LINE_BEGIN_LABEL);
+ ASM_OUTPUT_DWARF_DELTA4 (asm_out_file, LINE_END_LABEL, LINE_BEGIN_LABEL);
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, TEXT_BEGIN_LABEL);
+ ASM_OUTPUT_POP_SECTION (asm_out_file);
+
+ /* Generate the initial entry for the .debug_srcinfo section. */
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_PUSH_SECTION (asm_out_file, SRCINFO_SECTION);
+ ASM_OUTPUT_LABEL (asm_out_file, SRCINFO_BEGIN_LABEL);
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, LINE_BEGIN_LABEL);
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, SFNAMES_BEGIN_LABEL);
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, TEXT_BEGIN_LABEL);
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, TEXT_END_LABEL);
+#ifdef DWARF_TIMESTAMPS
+ ASM_OUTPUT_DWARF_DATA4 (asm_out_file, time (NULL));
+#else
+ ASM_OUTPUT_DWARF_DATA4 (asm_out_file, -1);
+#endif
+ ASM_OUTPUT_POP_SECTION (asm_out_file);
+
+ /* Generate the initial entry for the .debug_pubnames section. */
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_PUSH_SECTION (asm_out_file, PUBNAMES_SECTION);
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, DEBUG_BEGIN_LABEL);
+ ASM_OUTPUT_POP_SECTION (asm_out_file);
+
+ /* Generate the initial entry for the .debug_aranges section. */
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_PUSH_SECTION (asm_out_file, ARANGES_SECTION);
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, DEBUG_BEGIN_LABEL);
+ ASM_OUTPUT_POP_SECTION (asm_out_file);
+ }
+
+ /* Setup first DIE number == 1. */
+ NEXT_DIE_NUM = next_unused_dienum++;
+
+ /* Generate the initial DIE for the .debug section. Note that the
+ (string) value given in the AT_name attribute of the TAG_compile_unit
+ DIE will (typically) be a relative pathname and that this pathname
+ should be taken as being relative to the directory from which the
+ compiler was invoked when the given (base) source file was compiled. */
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_PUSH_SECTION (asm_out_file, DEBUG_SECTION);
+ ASM_OUTPUT_LABEL (asm_out_file, DEBUG_BEGIN_LABEL);
+ output_die (output_compile_unit_die, main_input_filename);
+ ASM_OUTPUT_POP_SECTION (asm_out_file);
+
+ fputc ('\n', asm_out_file);
+}
+
+/* Output stuff that dwarf requires at the end of every file. */
+
+void
+dwarfout_finish ()
+{
+ char label[MAX_ARTIFICIAL_LABEL_BYTES];
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_PUSH_SECTION (asm_out_file, DEBUG_SECTION);
+
+ /* Mark the end of the chain of siblings which represent all file-scope
+ declarations in this compilation unit. */
+
+ /* The (null) DIE which represents the terminator for the (sibling linked)
+ list of file-scope items is *special*. Normally, we would just call
+ end_sibling_chain at this point in order to output a word with the
+ value `4' and that word would act as the terminator for the list of
+ DIEs describing file-scope items. Unfortunately, if we were to simply
+ do that, the label that would follow this DIE in the .debug section
+ (i.e. `..D2') would *not* be properly aligned (as it must be on some
+ machines) to a 4 byte boundary.
+
+ In order to force the label `..D2' to get aligned to a 4 byte boundary,
+ the trick used is to insert extra (otherwise useless) padding bytes
+ into the (null) DIE that we know must precede the ..D2 label in the
+ .debug section. The amount of padding required can be anywhere between
+ 0 and 3 bytes. The length word at the start of this DIE (i.e. the one
+ with the padding) would normally contain the value 4, but now it will
+ also have to include the padding bytes, so it will instead have some
+ value in the range 4..7.
+
+ Fortunately, the rules of Dwarf say that any DIE whose length word
+ contains *any* value less than 8 should be treated as a null DIE, so
+ this trick works out nicely. Clever, eh? Don't give me any credit
+ (or blame). I didn't think of this scheme. I just conformed to it.
+ */
+
+ output_die (output_padded_null_die, (void *)0);
+ dienum_pop ();
+
+ sprintf (label, DIE_BEGIN_LABEL_FMT, NEXT_DIE_NUM);
+ ASM_OUTPUT_LABEL (asm_out_file, label); /* should be ..D2 */
+ ASM_OUTPUT_POP_SECTION (asm_out_file);
+
+ /* Output a terminator label for the .text section. */
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_PUSH_SECTION (asm_out_file, TEXT_SECTION);
+ ASM_OUTPUT_LABEL (asm_out_file, TEXT_END_LABEL);
+ ASM_OUTPUT_POP_SECTION (asm_out_file);
+
+ /* Output a terminator label for the .data section. */
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_PUSH_SECTION (asm_out_file, DATA_SECTION);
+ ASM_OUTPUT_LABEL (asm_out_file, DATA_END_LABEL);
+ ASM_OUTPUT_POP_SECTION (asm_out_file);
+
+#if 0 /* GNU C doesn't currently use .data1. */
+ /* Output a terminator label for the .data1 section. */
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_PUSH_SECTION (asm_out_file, DATA1_SECTION);
+ ASM_OUTPUT_LABEL (asm_out_file, DATA1_END_LABEL);
+ ASM_OUTPUT_POP_SECTION (asm_out_file);
+#endif
+
+ /* Output a terminator label for the .rodata section. */
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_PUSH_SECTION (asm_out_file, RODATA_SECTION);
+ ASM_OUTPUT_LABEL (asm_out_file, RODATA_END_LABEL);
+ ASM_OUTPUT_POP_SECTION (asm_out_file);
+
+#if 0 /* GNU C doesn't currently use .rodata1. */
+ /* Output a terminator label for the .rodata1 section. */
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_PUSH_SECTION (asm_out_file, RODATA1_SECTION);
+ ASM_OUTPUT_LABEL (asm_out_file, RODATA1_END_LABEL);
+ ASM_OUTPUT_POP_SECTION (asm_out_file);
+#endif
+
+ /* Output a terminator label for the .bss section. */
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_PUSH_SECTION (asm_out_file, BSS_SECTION);
+ ASM_OUTPUT_LABEL (asm_out_file, BSS_END_LABEL);
+ ASM_OUTPUT_POP_SECTION (asm_out_file);
+
+ if (debug_info_level >= DINFO_LEVEL_NORMAL)
+ {
+ /* Output a terminating entry for the .line section. */
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_PUSH_SECTION (asm_out_file, LINE_SECTION);
+ ASM_OUTPUT_LABEL (asm_out_file, LINE_LAST_ENTRY_LABEL);
+ ASM_OUTPUT_DWARF_DATA4 (asm_out_file, 0);
+ ASM_OUTPUT_DWARF_DATA2 (asm_out_file, 0xffff);
+ ASM_OUTPUT_DWARF_DELTA4 (asm_out_file, TEXT_END_LABEL, TEXT_BEGIN_LABEL);
+ ASM_OUTPUT_LABEL (asm_out_file, LINE_END_LABEL);
+ ASM_OUTPUT_POP_SECTION (asm_out_file);
+
+ /* Output a terminating entry for the .debug_srcinfo section. */
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_PUSH_SECTION (asm_out_file, SRCINFO_SECTION);
+ ASM_OUTPUT_DWARF_DELTA4 (asm_out_file,
+ LINE_LAST_ENTRY_LABEL, LINE_BEGIN_LABEL);
+ ASM_OUTPUT_DWARF_DATA4 (asm_out_file, -1);
+ ASM_OUTPUT_POP_SECTION (asm_out_file);
+
+ if (debug_info_level >= DINFO_LEVEL_VERBOSE)
+ {
+ /* Output terminating entries for the .debug_macinfo section. */
+
+ dwarfout_resume_previous_source_file (0);
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_PUSH_SECTION (asm_out_file, MACINFO_SECTION);
+ ASM_OUTPUT_DWARF_DATA4 (asm_out_file, 0);
+ ASM_OUTPUT_DWARF_STRING (asm_out_file, "");
+ ASM_OUTPUT_POP_SECTION (asm_out_file);
+ }
+
+ /* Generate the terminating entry for the .debug_pubnames section. */
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_PUSH_SECTION (asm_out_file, PUBNAMES_SECTION);
+ ASM_OUTPUT_DWARF_DATA4 (asm_out_file, 0);
+ ASM_OUTPUT_DWARF_STRING (asm_out_file, "");
+ ASM_OUTPUT_POP_SECTION (asm_out_file);
+
+ /* Generate the terminating entries for the .debug_aranges section.
+
+ Note that we want to do this only *after* we have output the end
+ labels (for the various program sections) which we are going to
+ refer to here. This allows us to work around a bug in the m68k
+ svr4 assembler. That assembler gives bogus assembly-time errors
+ if (within any given section) you try to take the difference of
+ two relocatable symbols, both of which are located within some
+ other section, and if one (or both?) of the symbols involved is
+ being forward-referenced. By generating the .debug_aranges
+ entries at this late point in the assembly output, we skirt the
+ issue simply by avoiding forward-references.
+ */
+
+ fputc ('\n', asm_out_file);
+ ASM_OUTPUT_PUSH_SECTION (asm_out_file, ARANGES_SECTION);
+
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, TEXT_BEGIN_LABEL);
+ ASM_OUTPUT_DWARF_DELTA4 (asm_out_file, TEXT_END_LABEL, TEXT_BEGIN_LABEL);
+
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, DATA_BEGIN_LABEL);
+ ASM_OUTPUT_DWARF_DELTA4 (asm_out_file, DATA_END_LABEL, DATA_BEGIN_LABEL);
+
+#if 0 /* GNU C doesn't currently use .data1. */
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, DATA1_BEGIN_LABEL);
+ ASM_OUTPUT_DWARF_DELTA4 (asm_out_file, DATA1_END_LABEL,
+ DATA1_BEGIN_LABEL);
+#endif
+
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, RODATA_BEGIN_LABEL);
+ ASM_OUTPUT_DWARF_DELTA4 (asm_out_file, RODATA_END_LABEL,
+ RODATA_BEGIN_LABEL);
+
+#if 0 /* GNU C doesn't currently use .rodata1. */
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, RODATA1_BEGIN_LABEL);
+ ASM_OUTPUT_DWARF_DELTA4 (asm_out_file, RODATA1_END_LABEL,
+ RODATA1_BEGIN_LABEL);
+#endif
+
+ ASM_OUTPUT_DWARF_ADDR (asm_out_file, BSS_BEGIN_LABEL);
+ ASM_OUTPUT_DWARF_DELTA4 (asm_out_file, BSS_END_LABEL, BSS_BEGIN_LABEL);
+
+ ASM_OUTPUT_DWARF_DATA4 (asm_out_file, 0);
+ ASM_OUTPUT_DWARF_DATA4 (asm_out_file, 0);
+
+ ASM_OUTPUT_POP_SECTION (asm_out_file);
+ }
+}
+
+#endif /* DWARF_DEBUGGING_INFO */
diff --git a/gnu/usr.bin/cc/cc_int/emit-rtl.c b/gnu/usr.bin/cc/cc_int/emit-rtl.c
new file mode 100644
index 0000000..3afcccb
--- /dev/null
+++ b/gnu/usr.bin/cc/cc_int/emit-rtl.c
@@ -0,0 +1,3359 @@
+/* Emit RTL for the GNU C-Compiler expander.
+ Copyright (C) 1987, 1988, 1992, 1993, 1994 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+/* Middle-to-low level generation of rtx code and insns.
+
+ This file contains the functions `gen_rtx', `gen_reg_rtx'
+ and `gen_label_rtx' that are the usual ways of creating rtl
+ expressions for most purposes.
+
+ It also has the functions for creating insns and linking
+ them in the doubly-linked chain.
+
+ The patterns of the insns are created by machine-dependent
+ routines in insn-emit.c, which is generated automatically from
+ the machine description. These routines use `gen_rtx' to make
+ the individual rtx's of the pattern; what is machine dependent
+ is the kind of rtx's they make and what arguments they use. */
+
+#include "config.h"
+#ifdef __STDC__
+#include <stdarg.h>
+#else
+#include <varargs.h>
+#endif
+#include "rtl.h"
+#include "tree.h"
+#include "flags.h"
+#include "function.h"
+#include "expr.h"
+#include "regs.h"
+#include "insn-config.h"
+#include "real.h"
+#include "obstack.h"
+
+#include "bytecode.h"
+#include "machmode.h"
+#include "bc-opcode.h"
+#include "bc-typecd.h"
+#include "bc-optab.h"
+#include "bc-emit.h"
+
+#include <stdio.h>
+
+
+/* Opcode names */
+#ifdef BCDEBUG_PRINT_CODE
+char *opcode_name[] =
+{
+#include "bc-opname.h"
+
+"***END***"
+};
+#endif
+
+
+/* Commonly used modes. */
+
+enum machine_mode byte_mode; /* Mode whose width is BITS_PER_UNIT */
+enum machine_mode word_mode; /* Mode whose width is BITS_PER_WORD */
+
+/* This is reset to LAST_VIRTUAL_REGISTER + 1 at the start of each function.
+ After rtl generation, it is 1 plus the largest register number used. */
+
+int reg_rtx_no = LAST_VIRTUAL_REGISTER + 1;
+
+/* This is *not* reset after each function. It gives each CODE_LABEL
+ in the entire compilation a unique label number. */
+
+static int label_num = 1;
+
+/* Lowest label number in current function. */
+
+static int first_label_num;
+
+/* Highest label number in current function.
+ Zero means use the value of label_num instead.
+ This is nonzero only when belatedly compiling an inline function. */
+
+static int last_label_num;
+
+/* Value label_num had when set_new_first_and_last_label_number was called.
+ If label_num has not changed since then, last_label_num is valid. */
+
+static int base_label_num;
+
+/* Nonzero means do not generate NOTEs for source line numbers. */
+
+static int no_line_numbers;
+
+/* Commonly used rtx's, so that we only need space for one copy.
+ These are initialized once for the entire compilation.
+ All of these except perhaps the floating-point CONST_DOUBLEs
+ are unique; no other rtx-object will be equal to any of these. */
+
+rtx pc_rtx; /* (PC) */
+rtx cc0_rtx; /* (CC0) */
+rtx cc1_rtx; /* (CC1) (not actually used nowadays) */
+rtx const0_rtx; /* (CONST_INT 0) */
+rtx const1_rtx; /* (CONST_INT 1) */
+rtx const2_rtx; /* (CONST_INT 2) */
+rtx constm1_rtx; /* (CONST_INT -1) */
+rtx const_true_rtx; /* (CONST_INT STORE_FLAG_VALUE) */
+
+/* We record floating-point CONST_DOUBLEs in each floating-point mode for
+ the values of 0, 1, and 2. For the integer entries and VOIDmode, we
+ record a copy of const[012]_rtx. */
+
+rtx const_tiny_rtx[3][(int) MAX_MACHINE_MODE];
+
+REAL_VALUE_TYPE dconst0;
+REAL_VALUE_TYPE dconst1;
+REAL_VALUE_TYPE dconst2;
+REAL_VALUE_TYPE dconstm1;
+
+/* All references to the following fixed hard registers go through
+ these unique rtl objects. On machines where the frame-pointer and
+ arg-pointer are the same register, they use the same unique object.
+
+ After register allocation, other rtl objects which used to be pseudo-regs
+ may be clobbered to refer to the frame-pointer register.
+ But references that were originally to the frame-pointer can be
+ distinguished from the others because they contain frame_pointer_rtx.
+
+ When to use frame_pointer_rtx and hard_frame_pointer_rtx is a little
+ tricky: until register elimination has taken place hard_frame_pointer_rtx
+ should be used if it is being set, and frame_pointer_rtx otherwise. After
+ register elimination hard_frame_pointer_rtx should always be used.
+ On machines where the two registers are same (most) then these are the
+ same.
+
+ In an inline procedure, the stack and frame pointer rtxs may not be
+ used for anything else. */
+rtx stack_pointer_rtx; /* (REG:Pmode STACK_POINTER_REGNUM) */
+rtx frame_pointer_rtx; /* (REG:Pmode FRAME_POINTER_REGNUM) */
+rtx hard_frame_pointer_rtx; /* (REG:Pmode HARD_FRAME_POINTER_REGNUM) */
+rtx arg_pointer_rtx; /* (REG:Pmode ARG_POINTER_REGNUM) */
+rtx struct_value_rtx; /* (REG:Pmode STRUCT_VALUE_REGNUM) */
+rtx struct_value_incoming_rtx; /* (REG:Pmode STRUCT_VALUE_INCOMING_REGNUM) */
+rtx static_chain_rtx; /* (REG:Pmode STATIC_CHAIN_REGNUM) */
+rtx static_chain_incoming_rtx; /* (REG:Pmode STATIC_CHAIN_INCOMING_REGNUM) */
+rtx pic_offset_table_rtx; /* (REG:Pmode PIC_OFFSET_TABLE_REGNUM) */
+
+rtx virtual_incoming_args_rtx; /* (REG:Pmode VIRTUAL_INCOMING_ARGS_REGNUM) */
+rtx virtual_stack_vars_rtx; /* (REG:Pmode VIRTUAL_STACK_VARS_REGNUM) */
+rtx virtual_stack_dynamic_rtx; /* (REG:Pmode VIRTUAL_STACK_DYNAMIC_REGNUM) */
+rtx virtual_outgoing_args_rtx; /* (REG:Pmode VIRTUAL_OUTGOING_ARGS_REGNUM) */
+
+/* We make one copy of (const_int C) where C is in
+ [- MAX_SAVED_CONST_INT, MAX_SAVED_CONST_INT]
+ to save space during the compilation and simplify comparisons of
+ integers. */
+
+#define MAX_SAVED_CONST_INT 64
+
+static rtx const_int_rtx[MAX_SAVED_CONST_INT * 2 + 1];
+
+/* The ends of the doubly-linked chain of rtl for the current function.
+ Both are reset to null at the start of rtl generation for the function.
+
+ start_sequence saves both of these on `sequence_stack' along with
+ `sequence_rtl_expr' and then starts a new, nested sequence of insns. */
+
+static rtx first_insn = NULL;
+static rtx last_insn = NULL;
+
+/* RTL_EXPR within which the current sequence will be placed. Use to
+ prevent reuse of any temporaries within the sequence until after the
+ RTL_EXPR is emitted. */
+
+tree sequence_rtl_expr = NULL;
+
+/* INSN_UID for next insn emitted.
+ Reset to 1 for each function compiled. */
+
+static int cur_insn_uid = 1;
+
+/* Line number and source file of the last line-number NOTE emitted.
+ This is used to avoid generating duplicates. */
+
+static int last_linenum = 0;
+static char *last_filename = 0;
+
+/* A vector indexed by pseudo reg number. The allocated length
+ of this vector is regno_pointer_flag_length. Since this
+ vector is needed during the expansion phase when the total
+ number of registers in the function is not yet known,
+ it is copied and made bigger when necessary. */
+
+char *regno_pointer_flag;
+int regno_pointer_flag_length;
+
+/* Indexed by pseudo register number, gives the rtx for that pseudo.
+ Allocated in parallel with regno_pointer_flag. */
+
+rtx *regno_reg_rtx;
+
+/* Stack of pending (incomplete) sequences saved by `start_sequence'.
+ Each element describes one pending sequence.
+ The main insn-chain is saved in the last element of the chain,
+ unless the chain is empty. */
+
+struct sequence_stack *sequence_stack;
+
+/* start_sequence and gen_sequence can make a lot of rtx expressions which are
+ shortly thrown away. We use two mechanisms to prevent this waste:
+
+ First, we keep a list of the expressions used to represent the sequence
+ stack in sequence_element_free_list.
+
+ Second, for sizes up to 5 elements, we keep a SEQUENCE and its associated
+ rtvec for use by gen_sequence. One entry for each size is sufficient
+ because most cases are calls to gen_sequence followed by immediately
+ emitting the SEQUENCE. Reuse is safe since emitting a sequence is
+ destructive on the insn in it anyway and hence can't be redone.
+
+ We do not bother to save this cached data over nested function calls.
+ Instead, we just reinitialize them. */
+
+#define SEQUENCE_RESULT_SIZE 5
+
+static struct sequence_stack *sequence_element_free_list;
+static rtx sequence_result[SEQUENCE_RESULT_SIZE];
+
+extern int rtx_equal_function_value_matters;
+
+/* Filename and line number of last line-number note,
+ whether we actually emitted it or not. */
+extern char *emit_filename;
+extern int emit_lineno;
+
+rtx change_address ();
+void init_emit ();
+
+extern struct obstack *rtl_obstack;
+
+extern int stack_depth;
+extern int max_stack_depth;
+
+/* rtx gen_rtx (code, mode, [element1, ..., elementn])
+**
+** This routine generates an RTX of the size specified by
+** <code>, which is an RTX code. The RTX structure is initialized
+** from the arguments <element1> through <elementn>, which are
+** interpreted according to the specific RTX type's format. The
+** special machine mode associated with the rtx (if any) is specified
+** in <mode>.
+**
+** gen_rtx can be invoked in a way which resembles the lisp-like
+** rtx it will generate. For example, the following rtx structure:
+**
+** (plus:QI (mem:QI (reg:SI 1))
+** (mem:QI (plusw:SI (reg:SI 2) (reg:SI 3))))
+**
+** ...would be generated by the following C code:
+**
+** gen_rtx (PLUS, QImode,
+** gen_rtx (MEM, QImode,
+** gen_rtx (REG, SImode, 1)),
+** gen_rtx (MEM, QImode,
+** gen_rtx (PLUS, SImode,
+** gen_rtx (REG, SImode, 2),
+** gen_rtx (REG, SImode, 3)))),
+*/
+
+/*VARARGS2*/
+rtx
+gen_rtx VPROTO((enum rtx_code code, enum machine_mode mode, ...))
+{
+#ifndef __STDC__
+ enum rtx_code code;
+ enum machine_mode mode;
+#endif
+ va_list p;
+ register int i; /* Array indices... */
+ register char *fmt; /* Current rtx's format... */
+ register rtx rt_val; /* RTX to return to caller... */
+
+ VA_START (p, mode);
+
+#ifndef __STDC__
+ code = va_arg (p, enum rtx_code);
+ mode = va_arg (p, enum machine_mode);
+#endif
+
+ if (code == CONST_INT)
+ {
+ HOST_WIDE_INT arg = va_arg (p, HOST_WIDE_INT);
+
+ if (arg >= - MAX_SAVED_CONST_INT && arg <= MAX_SAVED_CONST_INT)
+ return const_int_rtx[arg + MAX_SAVED_CONST_INT];
+
+ if (const_true_rtx && arg == STORE_FLAG_VALUE)
+ return const_true_rtx;
+
+ rt_val = rtx_alloc (code);
+ INTVAL (rt_val) = arg;
+ }
+ else if (code == REG)
+ {
+ int regno = va_arg (p, int);
+
+ /* In case the MD file explicitly references the frame pointer, have
+ all such references point to the same frame pointer. This is used
+ during frame pointer elimination to distinguish the explicit
+ references to these registers from pseudos that happened to be
+ assigned to them.
+
+ If we have eliminated the frame pointer or arg pointer, we will
+ be using it as a normal register, for example as a spill register.
+ In such cases, we might be accessing it in a mode that is not
+ Pmode and therefore cannot use the pre-allocated rtx.
+
+ Also don't do this when we are making new REGs in reload,
+ since we don't want to get confused with the real pointers. */
+
+ if (frame_pointer_rtx && regno == FRAME_POINTER_REGNUM && mode == Pmode
+ && ! reload_in_progress)
+ return frame_pointer_rtx;
+#if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM
+ if (hard_frame_pointer_rtx && regno == HARD_FRAME_POINTER_REGNUM
+ && mode == Pmode && ! reload_in_progress)
+ return hard_frame_pointer_rtx;
+#endif
+#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM && HARD_FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
+ if (arg_pointer_rtx && regno == ARG_POINTER_REGNUM && mode == Pmode
+ && ! reload_in_progress)
+ return arg_pointer_rtx;
+#endif
+ if (stack_pointer_rtx && regno == STACK_POINTER_REGNUM && mode == Pmode
+ && ! reload_in_progress)
+ return stack_pointer_rtx;
+ else
+ {
+ rt_val = rtx_alloc (code);
+ rt_val->mode = mode;
+ REGNO (rt_val) = regno;
+ return rt_val;
+ }
+ }
+ else
+ {
+ rt_val = rtx_alloc (code); /* Allocate the storage space. */
+ rt_val->mode = mode; /* Store the machine mode... */
+
+ fmt = GET_RTX_FORMAT (code); /* Find the right format... */
+ for (i = 0; i < GET_RTX_LENGTH (code); i++)
+ {
+ switch (*fmt++)
+ {
+ case '0': /* Unused field. */
+ break;
+
+ case 'i': /* An integer? */
+ XINT (rt_val, i) = va_arg (p, int);
+ break;
+
+ case 'w': /* A wide integer? */
+ XWINT (rt_val, i) = va_arg (p, HOST_WIDE_INT);
+ break;
+
+ case 's': /* A string? */
+ XSTR (rt_val, i) = va_arg (p, char *);
+ break;
+
+ case 'e': /* An expression? */
+ case 'u': /* An insn? Same except when printing. */
+ XEXP (rt_val, i) = va_arg (p, rtx);
+ break;
+
+ case 'E': /* An RTX vector? */
+ XVEC (rt_val, i) = va_arg (p, rtvec);
+ break;
+
+ default:
+ abort ();
+ }
+ }
+ }
+ va_end (p);
+ return rt_val; /* Return the new RTX... */
+}
+
+/* gen_rtvec (n, [rt1, ..., rtn])
+**
+** This routine creates an rtvec and stores within it the
+** pointers to rtx's which are its arguments.
+*/
+
+/*VARARGS1*/
+rtvec
+gen_rtvec VPROTO((int n, ...))
+{
+#ifndef __STDC__
+ int n;
+#endif
+ int i;
+ va_list p;
+ rtx *vector;
+
+ VA_START (p, n);
+
+#ifndef __STDC__
+ n = va_arg (p, int);
+#endif
+
+ if (n == 0)
+ return NULL_RTVEC; /* Don't allocate an empty rtvec... */
+
+ vector = (rtx *) alloca (n * sizeof (rtx));
+
+ for (i = 0; i < n; i++)
+ vector[i] = va_arg (p, rtx);
+ va_end (p);
+
+ return gen_rtvec_v (n, vector);
+}
+
+rtvec
+gen_rtvec_v (n, argp)
+ int n;
+ rtx *argp;
+{
+ register int i;
+ register rtvec rt_val;
+
+ if (n == 0)
+ return NULL_RTVEC; /* Don't allocate an empty rtvec... */
+
+ rt_val = rtvec_alloc (n); /* Allocate an rtvec... */
+
+ for (i = 0; i < n; i++)
+ rt_val->elem[i].rtx = *argp++;
+
+ return rt_val;
+}
+
+/* Generate a REG rtx for a new pseudo register of mode MODE.
+ This pseudo is assigned the next sequential register number. */
+
+rtx
+gen_reg_rtx (mode)
+ enum machine_mode mode;
+{
+ register rtx val;
+
+ /* Don't let anything called by or after reload create new registers
+ (actually, registers can't be created after flow, but this is a good
+ approximation). */
+
+ if (reload_in_progress || reload_completed)
+ abort ();
+
+ if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
+ || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
+ {
+ /* For complex modes, don't make a single pseudo.
+ Instead, make a CONCAT of two pseudos.
+ This allows noncontiguous allocation of the real and imaginary parts,
+ which makes much better code. Besides, allocating DCmode
+ pseudos overstrains reload on some machines like the 386. */
+ rtx realpart, imagpart;
+ int size = GET_MODE_UNIT_SIZE (mode);
+ enum machine_mode partmode
+ = mode_for_size (size * BITS_PER_UNIT,
+ (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
+ ? MODE_FLOAT : MODE_INT),
+ 0);
+
+ realpart = gen_reg_rtx (partmode);
+ imagpart = gen_reg_rtx (partmode);
+ return gen_rtx (CONCAT, mode, realpart, imagpart);
+ }
+
+ /* Make sure regno_pointer_flag and regno_reg_rtx are large
+ enough to have an element for this pseudo reg number. */
+
+ if (reg_rtx_no == regno_pointer_flag_length)
+ {
+ rtx *new1;
+ char *new =
+ (char *) oballoc (regno_pointer_flag_length * 2);
+ bcopy (regno_pointer_flag, new, regno_pointer_flag_length);
+ bzero (&new[regno_pointer_flag_length], regno_pointer_flag_length);
+ regno_pointer_flag = new;
+
+ new1 = (rtx *) oballoc (regno_pointer_flag_length * 2 * sizeof (rtx));
+ bcopy ((char *) regno_reg_rtx, (char *) new1,
+ regno_pointer_flag_length * sizeof (rtx));
+ bzero ((char *) &new1[regno_pointer_flag_length],
+ regno_pointer_flag_length * sizeof (rtx));
+ regno_reg_rtx = new1;
+
+ regno_pointer_flag_length *= 2;
+ }
+
+ val = gen_rtx (REG, mode, reg_rtx_no);
+ regno_reg_rtx[reg_rtx_no++] = val;
+ return val;
+}
+
+/* Identify REG as a probable pointer register. */
+
+void
+mark_reg_pointer (reg)
+ rtx reg;
+{
+ REGNO_POINTER_FLAG (REGNO (reg)) = 1;
+}
+
+/* Return 1 plus largest pseudo reg number used in the current function. */
+
+int
+max_reg_num ()
+{
+ return reg_rtx_no;
+}
+
+/* Return 1 + the largest label number used so far in the current function. */
+
+int
+max_label_num ()
+{
+ if (last_label_num && label_num == base_label_num)
+ return last_label_num;
+ return label_num;
+}
+
+/* Return first label number used in this function (if any were used). */
+
+int
+get_first_label_num ()
+{
+ return first_label_num;
+}
+
+/* Return a value representing some low-order bits of X, where the number
+ of low-order bits is given by MODE. Note that no conversion is done
+ between floating-point and fixed-point values, rather, the bit
+ representation is returned.
+
+ This function handles the cases in common between gen_lowpart, below,
+ and two variants in cse.c and combine.c. These are the cases that can
+ be safely handled at all points in the compilation.
+
+ If this is not a case we can handle, return 0. */
+
+rtx
+gen_lowpart_common (mode, x)
+ enum machine_mode mode;
+ register rtx x;
+{
+ int word = 0;
+
+ if (GET_MODE (x) == mode)
+ return x;
+
+ /* MODE must occupy no more words than the mode of X. */
+ if (GET_MODE (x) != VOIDmode
+ && ((GET_MODE_SIZE (mode) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD
+ > ((GET_MODE_SIZE (GET_MODE (x)) + (UNITS_PER_WORD - 1))
+ / UNITS_PER_WORD)))
+ return 0;
+
+ if (WORDS_BIG_ENDIAN && GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
+ word = ((GET_MODE_SIZE (GET_MODE (x))
+ - MAX (GET_MODE_SIZE (mode), UNITS_PER_WORD))
+ / UNITS_PER_WORD);
+
+ if ((GET_CODE (x) == ZERO_EXTEND || GET_CODE (x) == SIGN_EXTEND)
+ && (GET_MODE_CLASS (mode) == MODE_INT
+ || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT))
+ {
+ /* If we are getting the low-order part of something that has been
+ sign- or zero-extended, we can either just use the object being
+ extended or make a narrower extension. If we want an even smaller
+ piece than the size of the object being extended, call ourselves
+ recursively.
+
+ This case is used mostly by combine and cse. */
+
+ if (GET_MODE (XEXP (x, 0)) == mode)
+ return XEXP (x, 0);
+ else if (GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (XEXP (x, 0))))
+ return gen_lowpart_common (mode, XEXP (x, 0));
+ else if (GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (x)))
+ return gen_rtx (GET_CODE (x), mode, XEXP (x, 0));
+ }
+ else if (GET_CODE (x) == SUBREG
+ && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
+ || GET_MODE_SIZE (mode) == GET_MODE_UNIT_SIZE (GET_MODE (x))))
+ return (GET_MODE (SUBREG_REG (x)) == mode && SUBREG_WORD (x) == 0
+ ? SUBREG_REG (x)
+ : gen_rtx (SUBREG, mode, SUBREG_REG (x), SUBREG_WORD (x)));
+ else if (GET_CODE (x) == REG)
+ {
+ /* If the register is not valid for MODE, return 0. If we don't
+ do this, there is no way to fix up the resulting REG later.
+ But we do do this if the current REG is not valid for its
+ mode. This latter is a kludge, but is required due to the
+ way that parameters are passed on some machines, most
+ notably Sparc. */
+ if (REGNO (x) < FIRST_PSEUDO_REGISTER
+ && ! HARD_REGNO_MODE_OK (REGNO (x) + word, mode)
+ && HARD_REGNO_MODE_OK (REGNO (x), GET_MODE (x)))
+ return 0;
+ else if (REGNO (x) < FIRST_PSEUDO_REGISTER
+ /* integrate.c can't handle parts of a return value register. */
+ && (! REG_FUNCTION_VALUE_P (x)
+ || ! rtx_equal_function_value_matters)
+ /* We want to keep the stack, frame, and arg pointers
+ special. */
+ && x != frame_pointer_rtx
+#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
+ && x != arg_pointer_rtx
+#endif
+ && x != stack_pointer_rtx)
+ return gen_rtx (REG, mode, REGNO (x) + word);
+ else
+ return gen_rtx (SUBREG, mode, x, word);
+ }
+ /* If X is a CONST_INT or a CONST_DOUBLE, extract the appropriate bits
+ from the low-order part of the constant. */
+ else if ((GET_MODE_CLASS (mode) == MODE_INT
+ || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
+ && GET_MODE (x) == VOIDmode
+ && (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE))
+ {
+ /* If MODE is twice the host word size, X is already the desired
+ representation. Otherwise, if MODE is wider than a word, we can't
+ do this. If MODE is exactly a word, return just one CONST_INT.
+ If MODE is smaller than a word, clear the bits that don't belong
+ in our mode, unless they and our sign bit are all one. So we get
+ either a reasonable negative value or a reasonable unsigned value
+ for this mode. */
+
+ if (GET_MODE_BITSIZE (mode) == 2 * HOST_BITS_PER_WIDE_INT)
+ return x;
+ else if (GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT)
+ return 0;
+ else if (GET_MODE_BITSIZE (mode) == HOST_BITS_PER_WIDE_INT)
+ return (GET_CODE (x) == CONST_INT ? x
+ : GEN_INT (CONST_DOUBLE_LOW (x)));
+ else
+ {
+ /* MODE must be narrower than HOST_BITS_PER_INT. */
+ int width = GET_MODE_BITSIZE (mode);
+ HOST_WIDE_INT val = (GET_CODE (x) == CONST_INT ? INTVAL (x)
+ : CONST_DOUBLE_LOW (x));
+
+ if (((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
+ != ((HOST_WIDE_INT) (-1) << (width - 1))))
+ val &= ((HOST_WIDE_INT) 1 << width) - 1;
+
+ return (GET_CODE (x) == CONST_INT && INTVAL (x) == val ? x
+ : GEN_INT (val));
+ }
+ }
+
+ /* If X is an integral constant but we want it in floating-point, it
+ must be the case that we have a union of an integer and a floating-point
+ value. If the machine-parameters allow it, simulate that union here
+ and return the result. The two-word and single-word cases are
+ different. */
+
+ else if (((HOST_FLOAT_FORMAT == TARGET_FLOAT_FORMAT
+ && HOST_BITS_PER_WIDE_INT == BITS_PER_WORD)
+ || flag_pretend_float)
+ && GET_MODE_CLASS (mode) == MODE_FLOAT
+ && GET_MODE_SIZE (mode) == UNITS_PER_WORD
+ && GET_CODE (x) == CONST_INT
+ && sizeof (float) * HOST_BITS_PER_CHAR == HOST_BITS_PER_WIDE_INT)
+#ifdef REAL_ARITHMETIC
+ {
+ REAL_VALUE_TYPE r;
+ HOST_WIDE_INT i;
+
+ i = INTVAL (x);
+ r = REAL_VALUE_FROM_TARGET_SINGLE (i);
+ return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
+ }
+#else
+ {
+ union {HOST_WIDE_INT i; float d; } u;
+
+ u.i = INTVAL (x);
+ return CONST_DOUBLE_FROM_REAL_VALUE (u.d, mode);
+ }
+#endif
+ else if (((HOST_FLOAT_FORMAT == TARGET_FLOAT_FORMAT
+ && HOST_BITS_PER_WIDE_INT == BITS_PER_WORD)
+ || flag_pretend_float)
+ && GET_MODE_CLASS (mode) == MODE_FLOAT
+ && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
+ && (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE)
+ && GET_MODE (x) == VOIDmode
+ && (sizeof (double) * HOST_BITS_PER_CHAR
+ == 2 * HOST_BITS_PER_WIDE_INT))
+#ifdef REAL_ARITHMETIC
+ {
+ REAL_VALUE_TYPE r;
+ HOST_WIDE_INT i[2];
+ HOST_WIDE_INT low, high;
+
+ if (GET_CODE (x) == CONST_INT)
+ low = INTVAL (x), high = low >> (HOST_BITS_PER_WIDE_INT -1);
+ else
+ low = CONST_DOUBLE_LOW (x), high = CONST_DOUBLE_HIGH (x);
+
+ /* REAL_VALUE_TARGET_DOUBLE takes the addressing order of the
+ target machine. */
+ if (WORDS_BIG_ENDIAN)
+ i[0] = high, i[1] = low;
+ else
+ i[0] = low, i[1] = high;
+
+ r = REAL_VALUE_FROM_TARGET_DOUBLE (i);
+ return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
+ }
+#else
+ {
+ union {HOST_WIDE_INT i[2]; double d; } u;
+ HOST_WIDE_INT low, high;
+
+ if (GET_CODE (x) == CONST_INT)
+ low = INTVAL (x), high = low >> (HOST_BITS_PER_WIDE_INT -1);
+ else
+ low = CONST_DOUBLE_LOW (x), high = CONST_DOUBLE_HIGH (x);
+
+#ifdef HOST_WORDS_BIG_ENDIAN
+ u.i[0] = high, u.i[1] = low;
+#else
+ u.i[0] = low, u.i[1] = high;
+#endif
+
+ return CONST_DOUBLE_FROM_REAL_VALUE (u.d, mode);
+ }
+#endif
+ /* Similarly, if this is converting a floating-point value into a
+ single-word integer. Only do this is the host and target parameters are
+ compatible. */
+
+ else if (((HOST_FLOAT_FORMAT == TARGET_FLOAT_FORMAT
+ && HOST_BITS_PER_WIDE_INT == BITS_PER_WORD)
+ || flag_pretend_float)
+ && (GET_MODE_CLASS (mode) == MODE_INT
+ || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
+ && GET_CODE (x) == CONST_DOUBLE
+ && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT
+ && GET_MODE_BITSIZE (mode) == BITS_PER_WORD)
+ return operand_subword (x, 0, 0, GET_MODE (x));
+
+ /* Similarly, if this is converting a floating-point value into a
+ two-word integer, we can do this one word at a time and make an
+ integer. Only do this is the host and target parameters are
+ compatible. */
+
+ else if (((HOST_FLOAT_FORMAT == TARGET_FLOAT_FORMAT
+ && HOST_BITS_PER_WIDE_INT == BITS_PER_WORD)
+ || flag_pretend_float)
+ && (GET_MODE_CLASS (mode) == MODE_INT
+ || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
+ && GET_CODE (x) == CONST_DOUBLE
+ && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT
+ && GET_MODE_BITSIZE (mode) == 2 * BITS_PER_WORD)
+ {
+ rtx lowpart = operand_subword (x, WORDS_BIG_ENDIAN, 0, GET_MODE (x));
+ rtx highpart = operand_subword (x, ! WORDS_BIG_ENDIAN, 0, GET_MODE (x));
+
+ if (lowpart && GET_CODE (lowpart) == CONST_INT
+ && highpart && GET_CODE (highpart) == CONST_INT)
+ return immed_double_const (INTVAL (lowpart), INTVAL (highpart), mode);
+ }
+
+ /* Otherwise, we can't do this. */
+ return 0;
+}
+
+/* Return the real part (which has mode MODE) of a complex value X.
+ This always comes at the low address in memory. */
+
+rtx
+gen_realpart (mode, x)
+ enum machine_mode mode;
+ register rtx x;
+{
+ if (GET_CODE (x) == CONCAT && GET_MODE (XEXP (x, 0)) == mode)
+ return XEXP (x, 0);
+ else if (WORDS_BIG_ENDIAN)
+ return gen_highpart (mode, x);
+ else
+ return gen_lowpart (mode, x);
+}
+
+/* Return the imaginary part (which has mode MODE) of a complex value X.
+ This always comes at the high address in memory. */
+
+rtx
+gen_imagpart (mode, x)
+ enum machine_mode mode;
+ register rtx x;
+{
+ if (GET_CODE (x) == CONCAT && GET_MODE (XEXP (x, 0)) == mode)
+ return XEXP (x, 1);
+ else if (WORDS_BIG_ENDIAN)
+ return gen_lowpart (mode, x);
+ else
+ return gen_highpart (mode, x);
+}
+
+/* Return 1 iff X, assumed to be a SUBREG,
+ refers to the real part of the complex value in its containing reg.
+ Complex values are always stored with the real part in the first word,
+ regardless of WORDS_BIG_ENDIAN. */
+
+int
+subreg_realpart_p (x)
+ rtx x;
+{
+ if (GET_CODE (x) != SUBREG)
+ abort ();
+
+ return SUBREG_WORD (x) == 0;
+}
+
+/* Assuming that X is an rtx (e.g., MEM, REG or SUBREG) for a value,
+ return an rtx (MEM, SUBREG, or CONST_INT) that refers to the
+ least-significant part of X.
+ MODE specifies how big a part of X to return;
+ it usually should not be larger than a word.
+ If X is a MEM whose address is a QUEUED, the value may be so also. */
+
+rtx
+gen_lowpart (mode, x)
+ enum machine_mode mode;
+ register rtx x;
+{
+ rtx result = gen_lowpart_common (mode, x);
+
+ if (result)
+ return result;
+ else if (GET_CODE (x) == MEM)
+ {
+ /* The only additional case we can do is MEM. */
+ register int offset = 0;
+ if (WORDS_BIG_ENDIAN)
+ offset = (MAX (GET_MODE_SIZE (GET_MODE (x)), UNITS_PER_WORD)
+ - MAX (GET_MODE_SIZE (mode), UNITS_PER_WORD));
+
+ if (BYTES_BIG_ENDIAN)
+ /* Adjust the address so that the address-after-the-data
+ is unchanged. */
+ offset -= (MIN (UNITS_PER_WORD, GET_MODE_SIZE (mode))
+ - MIN (UNITS_PER_WORD, GET_MODE_SIZE (GET_MODE (x))));
+
+ return change_address (x, mode, plus_constant (XEXP (x, 0), offset));
+ }
+ else
+ abort ();
+}
+
+/* Like `gen_lowpart', but refer to the most significant part.
+ This is used to access the imaginary part of a complex number. */
+
+rtx
+gen_highpart (mode, x)
+ enum machine_mode mode;
+ register rtx x;
+{
+ /* This case loses if X is a subreg. To catch bugs early,
+ complain if an invalid MODE is used even in other cases. */
+ if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
+ && GET_MODE_SIZE (mode) != GET_MODE_UNIT_SIZE (GET_MODE (x)))
+ abort ();
+ if (GET_CODE (x) == CONST_DOUBLE
+#if !(TARGET_FLOAT_FORMAT != HOST_FLOAT_FORMAT || defined (REAL_IS_NOT_DOUBLE))
+ && GET_MODE_CLASS (GET_MODE (x)) != MODE_FLOAT
+#endif
+ )
+ return gen_rtx (CONST_INT, VOIDmode,
+ CONST_DOUBLE_HIGH (x) & GET_MODE_MASK (mode));
+ else if (GET_CODE (x) == CONST_INT)
+ return const0_rtx;
+ else if (GET_CODE (x) == MEM)
+ {
+ register int offset = 0;
+ if (! WORDS_BIG_ENDIAN)
+ offset = (MAX (GET_MODE_SIZE (GET_MODE (x)), UNITS_PER_WORD)
+ - MAX (GET_MODE_SIZE (mode), UNITS_PER_WORD));
+
+ if (! BYTES_BIG_ENDIAN
+ && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
+ offset -= (GET_MODE_SIZE (mode)
+ - MIN (UNITS_PER_WORD,
+ GET_MODE_SIZE (GET_MODE (x))));
+
+ return change_address (x, mode, plus_constant (XEXP (x, 0), offset));
+ }
+ else if (GET_CODE (x) == SUBREG)
+ {
+ /* The only time this should occur is when we are looking at a
+ multi-word item with a SUBREG whose mode is the same as that of the
+ item. It isn't clear what we would do if it wasn't. */
+ if (SUBREG_WORD (x) != 0)
+ abort ();
+ return gen_highpart (mode, SUBREG_REG (x));
+ }
+ else if (GET_CODE (x) == REG)
+ {
+ int word = 0;
+
+ if (! WORDS_BIG_ENDIAN
+ && GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
+ word = ((GET_MODE_SIZE (GET_MODE (x))
+ - MAX (GET_MODE_SIZE (mode), UNITS_PER_WORD))
+ / UNITS_PER_WORD);
+
+ if (REGNO (x) < FIRST_PSEUDO_REGISTER
+ /* integrate.c can't handle parts of a return value register. */
+ && (! REG_FUNCTION_VALUE_P (x)
+ || ! rtx_equal_function_value_matters)
+ /* We want to keep the stack, frame, and arg pointers special. */
+ && x != frame_pointer_rtx
+#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
+ && x != arg_pointer_rtx
+#endif
+ && x != stack_pointer_rtx)
+ return gen_rtx (REG, mode, REGNO (x) + word);
+ else
+ return gen_rtx (SUBREG, mode, x, word);
+ }
+ else
+ abort ();
+}
+
+/* Return 1 iff X, assumed to be a SUBREG,
+ refers to the least significant part of its containing reg.
+ If X is not a SUBREG, always return 1 (it is its own low part!). */
+
+int
+subreg_lowpart_p (x)
+ rtx x;
+{
+ if (GET_CODE (x) != SUBREG)
+ return 1;
+
+ if (WORDS_BIG_ENDIAN
+ && GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))) > UNITS_PER_WORD)
+ return (SUBREG_WORD (x)
+ == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))
+ - MAX (GET_MODE_SIZE (GET_MODE (x)), UNITS_PER_WORD))
+ / UNITS_PER_WORD));
+
+ return SUBREG_WORD (x) == 0;
+}
+
+/* Return subword I of operand OP.
+ The word number, I, is interpreted as the word number starting at the
+ low-order address. Word 0 is the low-order word if not WORDS_BIG_ENDIAN,
+ otherwise it is the high-order word.
+
+ If we cannot extract the required word, we return zero. Otherwise, an
+ rtx corresponding to the requested word will be returned.
+
+ VALIDATE_ADDRESS is nonzero if the address should be validated. Before
+ reload has completed, a valid address will always be returned. After
+ reload, if a valid address cannot be returned, we return zero.
+
+ If VALIDATE_ADDRESS is zero, we simply form the required address; validating
+ it is the responsibility of the caller.
+
+ MODE is the mode of OP in case it is a CONST_INT. */
+
+rtx
+operand_subword (op, i, validate_address, mode)
+ rtx op;
+ int i;
+ int validate_address;
+ enum machine_mode mode;
+{
+ HOST_WIDE_INT val;
+ int size_ratio = HOST_BITS_PER_WIDE_INT / BITS_PER_WORD;
+
+ if (mode == VOIDmode)
+ mode = GET_MODE (op);
+
+ if (mode == VOIDmode)
+ abort ();
+
+ /* If OP is narrower than a word or if we want a word outside OP, fail. */
+ if (mode != BLKmode
+ && (GET_MODE_SIZE (mode) < UNITS_PER_WORD
+ || (i + 1) * UNITS_PER_WORD > GET_MODE_SIZE (mode)))
+ return 0;
+
+ /* If OP is already an integer word, return it. */
+ if (GET_MODE_CLASS (mode) == MODE_INT
+ && GET_MODE_SIZE (mode) == UNITS_PER_WORD)
+ return op;
+
+ /* If OP is a REG or SUBREG, we can handle it very simply. */
+ if (GET_CODE (op) == REG)
+ {
+ /* If the register is not valid for MODE, return 0. If we don't
+ do this, there is no way to fix up the resulting REG later. */
+ if (REGNO (op) < FIRST_PSEUDO_REGISTER
+ && ! HARD_REGNO_MODE_OK (REGNO (op) + i, word_mode))
+ return 0;
+ else if (REGNO (op) >= FIRST_PSEUDO_REGISTER
+ || (REG_FUNCTION_VALUE_P (op)
+ && rtx_equal_function_value_matters)
+ /* We want to keep the stack, frame, and arg pointers
+ special. */
+ || op == frame_pointer_rtx
+#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
+ || op == arg_pointer_rtx
+#endif
+ || op == stack_pointer_rtx)
+ return gen_rtx (SUBREG, word_mode, op, i);
+ else
+ return gen_rtx (REG, word_mode, REGNO (op) + i);
+ }
+ else if (GET_CODE (op) == SUBREG)
+ return gen_rtx (SUBREG, word_mode, SUBREG_REG (op), i + SUBREG_WORD (op));
+ else if (GET_CODE (op) == CONCAT)
+ {
+ int partwords = GET_MODE_UNIT_SIZE (GET_MODE (op)) / UNITS_PER_WORD;
+ if (i < partwords)
+ return operand_subword (XEXP (op, 0), i, validate_address, mode);
+ return operand_subword (XEXP (op, 1), i - partwords,
+ validate_address, mode);
+ }
+
+ /* Form a new MEM at the requested address. */
+ if (GET_CODE (op) == MEM)
+ {
+ rtx addr = plus_constant (XEXP (op, 0), i * UNITS_PER_WORD);
+ rtx new;
+
+ if (validate_address)
+ {
+ if (reload_completed)
+ {
+ if (! strict_memory_address_p (word_mode, addr))
+ return 0;
+ }
+ else
+ addr = memory_address (word_mode, addr);
+ }
+
+ new = gen_rtx (MEM, word_mode, addr);
+
+ MEM_VOLATILE_P (new) = MEM_VOLATILE_P (op);
+ MEM_IN_STRUCT_P (new) = MEM_IN_STRUCT_P (op);
+ RTX_UNCHANGING_P (new) = RTX_UNCHANGING_P (op);
+
+ return new;
+ }
+
+ /* The only remaining cases are when OP is a constant. If the host and
+ target floating formats are the same, handling two-word floating
+ constants are easy. Note that REAL_VALUE_TO_TARGET_{SINGLE,DOUBLE}
+ are defined as returning one or two 32 bit values, respectively,
+ and not values of BITS_PER_WORD bits. */
+#ifdef REAL_ARITHMETIC
+/* The output is some bits, the width of the target machine's word.
+ A wider-word host can surely hold them in a CONST_INT. A narrower-word
+ host can't. */
+ if (HOST_BITS_PER_WIDE_INT >= BITS_PER_WORD
+ && GET_MODE_CLASS (mode) == MODE_FLOAT
+ && GET_MODE_BITSIZE (mode) == 64
+ && GET_CODE (op) == CONST_DOUBLE)
+ {
+ long k[2];
+ REAL_VALUE_TYPE rv;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
+ REAL_VALUE_TO_TARGET_DOUBLE (rv, k);
+
+ /* We handle 32-bit and >= 64-bit words here. Note that the order in
+ which the words are written depends on the word endianness.
+
+ ??? This is a potential portability problem and should
+ be fixed at some point. */
+ if (BITS_PER_WORD == 32)
+ return GEN_INT ((HOST_WIDE_INT) k[i]);
+#if HOST_BITS_PER_WIDE_INT > 32
+ else if (BITS_PER_WORD >= 64 && i == 0)
+ return GEN_INT ((((HOST_WIDE_INT) k[! WORDS_BIG_ENDIAN]) << 32)
+ | (HOST_WIDE_INT) k[WORDS_BIG_ENDIAN]);
+#endif
+ else
+ abort ();
+ }
+#else /* no REAL_ARITHMETIC */
+ if (((HOST_FLOAT_FORMAT == TARGET_FLOAT_FORMAT
+ && HOST_BITS_PER_WIDE_INT == BITS_PER_WORD)
+ || flag_pretend_float)
+ && GET_MODE_CLASS (mode) == MODE_FLOAT
+ && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
+ && GET_CODE (op) == CONST_DOUBLE)
+ {
+ /* The constant is stored in the host's word-ordering,
+ but we want to access it in the target's word-ordering. Some
+ compilers don't like a conditional inside macro args, so we have two
+ copies of the return. */
+#ifdef HOST_WORDS_BIG_ENDIAN
+ return GEN_INT (i == WORDS_BIG_ENDIAN
+ ? CONST_DOUBLE_HIGH (op) : CONST_DOUBLE_LOW (op));
+#else
+ return GEN_INT (i != WORDS_BIG_ENDIAN
+ ? CONST_DOUBLE_HIGH (op) : CONST_DOUBLE_LOW (op));
+#endif
+ }
+#endif /* no REAL_ARITHMETIC */
+
+ /* Single word float is a little harder, since single- and double-word
+ values often do not have the same high-order bits. We have already
+ verified that we want the only defined word of the single-word value. */
+#ifdef REAL_ARITHMETIC
+ if (GET_MODE_CLASS (mode) == MODE_FLOAT
+ && GET_MODE_BITSIZE (mode) == 32
+ && GET_CODE (op) == CONST_DOUBLE)
+ {
+ long l;
+ REAL_VALUE_TYPE rv;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
+ REAL_VALUE_TO_TARGET_SINGLE (rv, l);
+ return GEN_INT ((HOST_WIDE_INT) l);
+ }
+#else
+ if (((HOST_FLOAT_FORMAT == TARGET_FLOAT_FORMAT
+ && HOST_BITS_PER_WIDE_INT == BITS_PER_WORD)
+ || flag_pretend_float)
+ && GET_MODE_CLASS (mode) == MODE_FLOAT
+ && GET_MODE_SIZE (mode) == UNITS_PER_WORD
+ && GET_CODE (op) == CONST_DOUBLE)
+ {
+ double d;
+ union {float f; HOST_WIDE_INT i; } u;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (d, op);
+
+ u.f = d;
+ return GEN_INT (u.i);
+ }
+#endif /* no REAL_ARITHMETIC */
+
+ /* The only remaining cases that we can handle are integers.
+ Convert to proper endianness now since these cases need it.
+ At this point, i == 0 means the low-order word.
+
+ We do not want to handle the case when BITS_PER_WORD <= HOST_BITS_PER_INT
+ in general. However, if OP is (const_int 0), we can just return
+ it for any word. */
+
+ if (op == const0_rtx)
+ return op;
+
+ if (GET_MODE_CLASS (mode) != MODE_INT
+ || (GET_CODE (op) != CONST_INT && GET_CODE (op) != CONST_DOUBLE)
+ || BITS_PER_WORD > HOST_BITS_PER_INT)
+ return 0;
+
+ if (WORDS_BIG_ENDIAN)
+ i = GET_MODE_SIZE (mode) / UNITS_PER_WORD - 1 - i;
+
+ /* Find out which word on the host machine this value is in and get
+ it from the constant. */
+ val = (i / size_ratio == 0
+ ? (GET_CODE (op) == CONST_INT ? INTVAL (op) : CONST_DOUBLE_LOW (op))
+ : (GET_CODE (op) == CONST_INT
+ ? (INTVAL (op) < 0 ? ~0 : 0) : CONST_DOUBLE_HIGH (op)));
+
+ /* If BITS_PER_WORD is smaller than an int, get the appropriate bits. */
+ if (BITS_PER_WORD < HOST_BITS_PER_WIDE_INT)
+ val = ((val >> ((i % size_ratio) * BITS_PER_WORD))
+ & (((HOST_WIDE_INT) 1
+ << (BITS_PER_WORD % HOST_BITS_PER_WIDE_INT)) - 1));
+
+ return GEN_INT (val);
+}
+
+/* Similar to `operand_subword', but never return 0. If we can't extract
+ the required subword, put OP into a register and try again. If that fails,
+ abort. We always validate the address in this case. It is not valid
+ to call this function after reload; it is mostly meant for RTL
+ generation.
+
+ MODE is the mode of OP, in case it is CONST_INT. */
+
+rtx
+operand_subword_force (op, i, mode)
+ rtx op;
+ int i;
+ enum machine_mode mode;
+{
+ rtx result = operand_subword (op, i, 1, mode);
+
+ if (result)
+ return result;
+
+ if (mode != BLKmode && mode != VOIDmode)
+ op = force_reg (mode, op);
+
+ result = operand_subword (op, i, 1, mode);
+ if (result == 0)
+ abort ();
+
+ return result;
+}
+
+/* Given a compare instruction, swap the operands.
+ A test instruction is changed into a compare of 0 against the operand. */
+
+void
+reverse_comparison (insn)
+ rtx insn;
+{
+ rtx body = PATTERN (insn);
+ rtx comp;
+
+ if (GET_CODE (body) == SET)
+ comp = SET_SRC (body);
+ else
+ comp = SET_SRC (XVECEXP (body, 0, 0));
+
+ if (GET_CODE (comp) == COMPARE)
+ {
+ rtx op0 = XEXP (comp, 0);
+ rtx op1 = XEXP (comp, 1);
+ XEXP (comp, 0) = op1;
+ XEXP (comp, 1) = op0;
+ }
+ else
+ {
+ rtx new = gen_rtx (COMPARE, VOIDmode,
+ CONST0_RTX (GET_MODE (comp)), comp);
+ if (GET_CODE (body) == SET)
+ SET_SRC (body) = new;
+ else
+ SET_SRC (XVECEXP (body, 0, 0)) = new;
+ }
+}
+
+/* Return a memory reference like MEMREF, but with its mode changed
+ to MODE and its address changed to ADDR.
+ (VOIDmode means don't change the mode.
+ NULL for ADDR means don't change the address.) */
+
+rtx
+change_address (memref, mode, addr)
+ rtx memref;
+ enum machine_mode mode;
+ rtx addr;
+{
+ rtx new;
+
+ if (GET_CODE (memref) != MEM)
+ abort ();
+ if (mode == VOIDmode)
+ mode = GET_MODE (memref);
+ if (addr == 0)
+ addr = XEXP (memref, 0);
+
+ /* If reload is in progress or has completed, ADDR must be valid.
+ Otherwise, we can call memory_address to make it valid. */
+ if (reload_completed || reload_in_progress)
+ {
+ if (! memory_address_p (mode, addr))
+ abort ();
+ }
+ else
+ addr = memory_address (mode, addr);
+
+ new = gen_rtx (MEM, mode, addr);
+ MEM_VOLATILE_P (new) = MEM_VOLATILE_P (memref);
+ RTX_UNCHANGING_P (new) = RTX_UNCHANGING_P (memref);
+ MEM_IN_STRUCT_P (new) = MEM_IN_STRUCT_P (memref);
+ return new;
+}
+
+/* Return a newly created CODE_LABEL rtx with a unique label number. */
+
+rtx
+gen_label_rtx ()
+{
+ register rtx label;
+
+ label = (output_bytecode
+ ? gen_rtx (CODE_LABEL, VOIDmode, NULL, bc_get_bytecode_label ())
+ : gen_rtx (CODE_LABEL, VOIDmode, 0, 0, 0, label_num++, NULL_PTR));
+
+ LABEL_NUSES (label) = 0;
+ return label;
+}
+
+/* For procedure integration. */
+
+/* Return a newly created INLINE_HEADER rtx. Should allocate this
+ from a permanent obstack when the opportunity arises. */
+
+rtx
+gen_inline_header_rtx (first_insn, first_parm_insn, first_labelno,
+ last_labelno, max_parm_regnum, max_regnum, args_size,
+ pops_args, stack_slots, function_flags,
+ outgoing_args_size, original_arg_vector,
+ original_decl_initial)
+ rtx first_insn, first_parm_insn;
+ int first_labelno, last_labelno, max_parm_regnum, max_regnum, args_size;
+ int pops_args;
+ rtx stack_slots;
+ int function_flags;
+ int outgoing_args_size;
+ rtvec original_arg_vector;
+ rtx original_decl_initial;
+{
+ rtx header = gen_rtx (INLINE_HEADER, VOIDmode,
+ cur_insn_uid++, NULL_RTX,
+ first_insn, first_parm_insn,
+ first_labelno, last_labelno,
+ max_parm_regnum, max_regnum, args_size, pops_args,
+ stack_slots, function_flags, outgoing_args_size,
+ original_arg_vector, original_decl_initial);
+ return header;
+}
+
+/* Install new pointers to the first and last insns in the chain.
+ Used for an inline-procedure after copying the insn chain. */
+
+void
+set_new_first_and_last_insn (first, last)
+ rtx first, last;
+{
+ first_insn = first;
+ last_insn = last;
+}
+
+/* Set the range of label numbers found in the current function.
+ This is used when belatedly compiling an inline function. */
+
+void
+set_new_first_and_last_label_num (first, last)
+ int first, last;
+{
+ base_label_num = label_num;
+ first_label_num = first;
+ last_label_num = last;
+}
+
+/* Save all variables describing the current status into the structure *P.
+ This is used before starting a nested function. */
+
+void
+save_emit_status (p)
+ struct function *p;
+{
+ p->reg_rtx_no = reg_rtx_no;
+ p->first_label_num = first_label_num;
+ p->first_insn = first_insn;
+ p->last_insn = last_insn;
+ p->sequence_rtl_expr = sequence_rtl_expr;
+ p->sequence_stack = sequence_stack;
+ p->cur_insn_uid = cur_insn_uid;
+ p->last_linenum = last_linenum;
+ p->last_filename = last_filename;
+ p->regno_pointer_flag = regno_pointer_flag;
+ p->regno_pointer_flag_length = regno_pointer_flag_length;
+ p->regno_reg_rtx = regno_reg_rtx;
+}
+
+/* Restore all variables describing the current status from the structure *P.
+ This is used after a nested function. */
+
+void
+restore_emit_status (p)
+ struct function *p;
+{
+ int i;
+
+ reg_rtx_no = p->reg_rtx_no;
+ first_label_num = p->first_label_num;
+ last_label_num = 0;
+ first_insn = p->first_insn;
+ last_insn = p->last_insn;
+ sequence_rtl_expr = p->sequence_rtl_expr;
+ sequence_stack = p->sequence_stack;
+ cur_insn_uid = p->cur_insn_uid;
+ last_linenum = p->last_linenum;
+ last_filename = p->last_filename;
+ regno_pointer_flag = p->regno_pointer_flag;
+ regno_pointer_flag_length = p->regno_pointer_flag_length;
+ regno_reg_rtx = p->regno_reg_rtx;
+
+ /* Clear our cache of rtx expressions for start_sequence and gen_sequence. */
+ sequence_element_free_list = 0;
+ for (i = 0; i < SEQUENCE_RESULT_SIZE; i++)
+ sequence_result[i] = 0;
+}
+
+/* Go through all the RTL insn bodies and copy any invalid shared structure.
+ It does not work to do this twice, because the mark bits set here
+ are not cleared afterwards. */
+
+void
+unshare_all_rtl (insn)
+ register rtx insn;
+{
+ for (; insn; insn = NEXT_INSN (insn))
+ if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN
+ || GET_CODE (insn) == CALL_INSN)
+ {
+ PATTERN (insn) = copy_rtx_if_shared (PATTERN (insn));
+ REG_NOTES (insn) = copy_rtx_if_shared (REG_NOTES (insn));
+ LOG_LINKS (insn) = copy_rtx_if_shared (LOG_LINKS (insn));
+ }
+
+ /* Make sure the addresses of stack slots found outside the insn chain
+ (such as, in DECL_RTL of a variable) are not shared
+ with the insn chain.
+
+ This special care is necessary when the stack slot MEM does not
+ actually appear in the insn chain. If it does appear, its address
+ is unshared from all else at that point. */
+
+ copy_rtx_if_shared (stack_slot_list);
+}
+
+/* Mark ORIG as in use, and return a copy of it if it was already in use.
+ Recursively does the same for subexpressions. */
+
+rtx
+copy_rtx_if_shared (orig)
+ rtx orig;
+{
+ register rtx x = orig;
+ register int i;
+ register enum rtx_code code;
+ register char *format_ptr;
+ int copied = 0;
+
+ if (x == 0)
+ return 0;
+
+ code = GET_CODE (x);
+
+ /* These types may be freely shared. */
+
+ switch (code)
+ {
+ case REG:
+ case QUEUED:
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case SYMBOL_REF:
+ case CODE_LABEL:
+ case PC:
+ case CC0:
+ case SCRATCH:
+ /* SCRATCH must be shared because they represent distinct values. */
+ return x;
+
+ case CONST:
+ /* CONST can be shared if it contains a SYMBOL_REF. If it contains
+ a LABEL_REF, it isn't sharable. */
+ if (GET_CODE (XEXP (x, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
+ && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
+ return x;
+ break;
+
+ case INSN:
+ case JUMP_INSN:
+ case CALL_INSN:
+ case NOTE:
+ case BARRIER:
+ /* The chain of insns is not being copied. */
+ return x;
+
+ case MEM:
+ /* A MEM is allowed to be shared if its address is constant
+ or is a constant plus one of the special registers. */
+ if (CONSTANT_ADDRESS_P (XEXP (x, 0))
+ || XEXP (x, 0) == virtual_stack_vars_rtx
+ || XEXP (x, 0) == virtual_incoming_args_rtx)
+ return x;
+
+ if (GET_CODE (XEXP (x, 0)) == PLUS
+ && (XEXP (XEXP (x, 0), 0) == virtual_stack_vars_rtx
+ || XEXP (XEXP (x, 0), 0) == virtual_incoming_args_rtx)
+ && CONSTANT_ADDRESS_P (XEXP (XEXP (x, 0), 1)))
+ {
+ /* This MEM can appear in more than one place,
+ but its address better not be shared with anything else. */
+ if (! x->used)
+ XEXP (x, 0) = copy_rtx_if_shared (XEXP (x, 0));
+ x->used = 1;
+ return x;
+ }
+ }
+
+ /* This rtx may not be shared. If it has already been seen,
+ replace it with a copy of itself. */
+
+ if (x->used)
+ {
+ register rtx copy;
+
+ copy = rtx_alloc (code);
+ bcopy ((char *) x, (char *) copy,
+ (sizeof (*copy) - sizeof (copy->fld)
+ + sizeof (copy->fld[0]) * GET_RTX_LENGTH (code)));
+ x = copy;
+ copied = 1;
+ }
+ x->used = 1;
+
+ /* Now scan the subexpressions recursively.
+ We can store any replaced subexpressions directly into X
+ since we know X is not shared! Any vectors in X
+ must be copied if X was copied. */
+
+ format_ptr = GET_RTX_FORMAT (code);
+
+ for (i = 0; i < GET_RTX_LENGTH (code); i++)
+ {
+ switch (*format_ptr++)
+ {
+ case 'e':
+ XEXP (x, i) = copy_rtx_if_shared (XEXP (x, i));
+ break;
+
+ case 'E':
+ if (XVEC (x, i) != NULL)
+ {
+ register int j;
+ int len = XVECLEN (x, i);
+
+ if (copied && len > 0)
+ XVEC (x, i) = gen_rtvec_v (len, &XVECEXP (x, i, 0));
+ for (j = 0; j < len; j++)
+ XVECEXP (x, i, j) = copy_rtx_if_shared (XVECEXP (x, i, j));
+ }
+ break;
+ }
+ }
+ return x;
+}
+
+/* Clear all the USED bits in X to allow copy_rtx_if_shared to be used
+ to look for shared sub-parts. */
+
+void
+reset_used_flags (x)
+ rtx x;
+{
+ register int i, j;
+ register enum rtx_code code;
+ register char *format_ptr;
+
+ if (x == 0)
+ return;
+
+ code = GET_CODE (x);
+
+ /* These types may be freely shared so we needn't do any reseting
+ for them. */
+
+ switch (code)
+ {
+ case REG:
+ case QUEUED:
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case SYMBOL_REF:
+ case CODE_LABEL:
+ case PC:
+ case CC0:
+ return;
+
+ case INSN:
+ case JUMP_INSN:
+ case CALL_INSN:
+ case NOTE:
+ case LABEL_REF:
+ case BARRIER:
+ /* The chain of insns is not being copied. */
+ return;
+ }
+
+ x->used = 0;
+
+ format_ptr = GET_RTX_FORMAT (code);
+ for (i = 0; i < GET_RTX_LENGTH (code); i++)
+ {
+ switch (*format_ptr++)
+ {
+ case 'e':
+ reset_used_flags (XEXP (x, i));
+ break;
+
+ case 'E':
+ for (j = 0; j < XVECLEN (x, i); j++)
+ reset_used_flags (XVECEXP (x, i, j));
+ break;
+ }
+ }
+}
+
+/* Copy X if necessary so that it won't be altered by changes in OTHER.
+ Return X or the rtx for the pseudo reg the value of X was copied into.
+ OTHER must be valid as a SET_DEST. */
+
+rtx
+make_safe_from (x, other)
+ rtx x, other;
+{
+ while (1)
+ switch (GET_CODE (other))
+ {
+ case SUBREG:
+ other = SUBREG_REG (other);
+ break;
+ case STRICT_LOW_PART:
+ case SIGN_EXTEND:
+ case ZERO_EXTEND:
+ other = XEXP (other, 0);
+ break;
+ default:
+ goto done;
+ }
+ done:
+ if ((GET_CODE (other) == MEM
+ && ! CONSTANT_P (x)
+ && GET_CODE (x) != REG
+ && GET_CODE (x) != SUBREG)
+ || (GET_CODE (other) == REG
+ && (REGNO (other) < FIRST_PSEUDO_REGISTER
+ || reg_mentioned_p (other, x))))
+ {
+ rtx temp = gen_reg_rtx (GET_MODE (x));
+ emit_move_insn (temp, x);
+ return temp;
+ }
+ return x;
+}
+
+/* Emission of insns (adding them to the doubly-linked list). */
+
+/* Return the first insn of the current sequence or current function. */
+
+rtx
+get_insns ()
+{
+ return first_insn;
+}
+
+/* Return the last insn emitted in current sequence or current function. */
+
+rtx
+get_last_insn ()
+{
+ return last_insn;
+}
+
+/* Specify a new insn as the last in the chain. */
+
+void
+set_last_insn (insn)
+ rtx insn;
+{
+ if (NEXT_INSN (insn) != 0)
+ abort ();
+ last_insn = insn;
+}
+
+/* Return the last insn emitted, even if it is in a sequence now pushed. */
+
+rtx
+get_last_insn_anywhere ()
+{
+ struct sequence_stack *stack;
+ if (last_insn)
+ return last_insn;
+ for (stack = sequence_stack; stack; stack = stack->next)
+ if (stack->last != 0)
+ return stack->last;
+ return 0;
+}
+
+/* Return a number larger than any instruction's uid in this function. */
+
+int
+get_max_uid ()
+{
+ return cur_insn_uid;
+}
+
+/* Return the next insn. If it is a SEQUENCE, return the first insn
+ of the sequence. */
+
+rtx
+next_insn (insn)
+ rtx insn;
+{
+ if (insn)
+ {
+ insn = NEXT_INSN (insn);
+ if (insn && GET_CODE (insn) == INSN
+ && GET_CODE (PATTERN (insn)) == SEQUENCE)
+ insn = XVECEXP (PATTERN (insn), 0, 0);
+ }
+
+ return insn;
+}
+
+/* Return the previous insn. If it is a SEQUENCE, return the last insn
+ of the sequence. */
+
+rtx
+previous_insn (insn)
+ rtx insn;
+{
+ if (insn)
+ {
+ insn = PREV_INSN (insn);
+ if (insn && GET_CODE (insn) == INSN
+ && GET_CODE (PATTERN (insn)) == SEQUENCE)
+ insn = XVECEXP (PATTERN (insn), 0, XVECLEN (PATTERN (insn), 0) - 1);
+ }
+
+ return insn;
+}
+
+/* Return the next insn after INSN that is not a NOTE. This routine does not
+ look inside SEQUENCEs. */
+
+rtx
+next_nonnote_insn (insn)
+ rtx insn;
+{
+ while (insn)
+ {
+ insn = NEXT_INSN (insn);
+ if (insn == 0 || GET_CODE (insn) != NOTE)
+ break;
+ }
+
+ return insn;
+}
+
+/* Return the previous insn before INSN that is not a NOTE. This routine does
+ not look inside SEQUENCEs. */
+
+rtx
+prev_nonnote_insn (insn)
+ rtx insn;
+{
+ while (insn)
+ {
+ insn = PREV_INSN (insn);
+ if (insn == 0 || GET_CODE (insn) != NOTE)
+ break;
+ }
+
+ return insn;
+}
+
+/* Return the next INSN, CALL_INSN or JUMP_INSN after INSN;
+ or 0, if there is none. This routine does not look inside
+ SEQUENCEs. */
+
+rtx
+next_real_insn (insn)
+ rtx insn;
+{
+ while (insn)
+ {
+ insn = NEXT_INSN (insn);
+ if (insn == 0 || GET_CODE (insn) == INSN
+ || GET_CODE (insn) == CALL_INSN || GET_CODE (insn) == JUMP_INSN)
+ break;
+ }
+
+ return insn;
+}
+
+/* Return the last INSN, CALL_INSN or JUMP_INSN before INSN;
+ or 0, if there is none. This routine does not look inside
+ SEQUENCEs. */
+
+rtx
+prev_real_insn (insn)
+ rtx insn;
+{
+ while (insn)
+ {
+ insn = PREV_INSN (insn);
+ if (insn == 0 || GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN
+ || GET_CODE (insn) == JUMP_INSN)
+ break;
+ }
+
+ return insn;
+}
+
+/* Find the next insn after INSN that really does something. This routine
+ does not look inside SEQUENCEs. Until reload has completed, this is the
+ same as next_real_insn. */
+
+rtx
+next_active_insn (insn)
+ rtx insn;
+{
+ while (insn)
+ {
+ insn = NEXT_INSN (insn);
+ if (insn == 0
+ || GET_CODE (insn) == CALL_INSN || GET_CODE (insn) == JUMP_INSN
+ || (GET_CODE (insn) == INSN
+ && (! reload_completed
+ || (GET_CODE (PATTERN (insn)) != USE
+ && GET_CODE (PATTERN (insn)) != CLOBBER))))
+ break;
+ }
+
+ return insn;
+}
+
+/* Find the last insn before INSN that really does something. This routine
+ does not look inside SEQUENCEs. Until reload has completed, this is the
+ same as prev_real_insn. */
+
+rtx
+prev_active_insn (insn)
+ rtx insn;
+{
+ while (insn)
+ {
+ insn = PREV_INSN (insn);
+ if (insn == 0
+ || GET_CODE (insn) == CALL_INSN || GET_CODE (insn) == JUMP_INSN
+ || (GET_CODE (insn) == INSN
+ && (! reload_completed
+ || (GET_CODE (PATTERN (insn)) != USE
+ && GET_CODE (PATTERN (insn)) != CLOBBER))))
+ break;
+ }
+
+ return insn;
+}
+
+/* Return the next CODE_LABEL after the insn INSN, or 0 if there is none. */
+
+rtx
+next_label (insn)
+ rtx insn;
+{
+ while (insn)
+ {
+ insn = NEXT_INSN (insn);
+ if (insn == 0 || GET_CODE (insn) == CODE_LABEL)
+ break;
+ }
+
+ return insn;
+}
+
+/* Return the last CODE_LABEL before the insn INSN, or 0 if there is none. */
+
+rtx
+prev_label (insn)
+ rtx insn;
+{
+ while (insn)
+ {
+ insn = PREV_INSN (insn);
+ if (insn == 0 || GET_CODE (insn) == CODE_LABEL)
+ break;
+ }
+
+ return insn;
+}
+
+#ifdef HAVE_cc0
+/* INSN uses CC0 and is being moved into a delay slot. Set up REG_CC_SETTER
+ and REG_CC_USER notes so we can find it. */
+
+void
+link_cc0_insns (insn)
+ rtx insn;
+{
+ rtx user = next_nonnote_insn (insn);
+
+ if (GET_CODE (user) == INSN && GET_CODE (PATTERN (user)) == SEQUENCE)
+ user = XVECEXP (PATTERN (user), 0, 0);
+
+ REG_NOTES (user) = gen_rtx (INSN_LIST, REG_CC_SETTER, insn,
+ REG_NOTES (user));
+ REG_NOTES (insn) = gen_rtx (INSN_LIST, REG_CC_USER, user, REG_NOTES (insn));
+}
+
+/* Return the next insn that uses CC0 after INSN, which is assumed to
+ set it. This is the inverse of prev_cc0_setter (i.e., prev_cc0_setter
+ applied to the result of this function should yield INSN).
+
+ Normally, this is simply the next insn. However, if a REG_CC_USER note
+ is present, it contains the insn that uses CC0.
+
+ Return 0 if we can't find the insn. */
+
+rtx
+next_cc0_user (insn)
+ rtx insn;
+{
+ rtx note = find_reg_note (insn, REG_CC_USER, NULL_RTX);
+
+ if (note)
+ return XEXP (note, 0);
+
+ insn = next_nonnote_insn (insn);
+ if (insn && GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == SEQUENCE)
+ insn = XVECEXP (PATTERN (insn), 0, 0);
+
+ if (insn && GET_RTX_CLASS (GET_CODE (insn)) == 'i'
+ && reg_mentioned_p (cc0_rtx, PATTERN (insn)))
+ return insn;
+
+ return 0;
+}
+
+/* Find the insn that set CC0 for INSN. Unless INSN has a REG_CC_SETTER
+ note, it is the previous insn. */
+
+rtx
+prev_cc0_setter (insn)
+ rtx insn;
+{
+ rtx note = find_reg_note (insn, REG_CC_SETTER, NULL_RTX);
+ rtx link;
+
+ if (note)
+ return XEXP (note, 0);
+
+ insn = prev_nonnote_insn (insn);
+ if (! sets_cc0_p (PATTERN (insn)))
+ abort ();
+
+ return insn;
+}
+#endif
+
+/* Try splitting insns that can be split for better scheduling.
+ PAT is the pattern which might split.
+ TRIAL is the insn providing PAT.
+ LAST is non-zero if we should return the last insn of the sequence produced.
+
+ If this routine succeeds in splitting, it returns the first or last
+ replacement insn depending on the value of LAST. Otherwise, it
+ returns TRIAL. If the insn to be returned can be split, it will be. */
+
+rtx
+try_split (pat, trial, last)
+ rtx pat, trial;
+ int last;
+{
+ rtx before = PREV_INSN (trial);
+ rtx after = NEXT_INSN (trial);
+ rtx seq = split_insns (pat, trial);
+ int has_barrier = 0;
+ rtx tem;
+
+ /* If we are splitting a JUMP_INSN, it might be followed by a BARRIER.
+ We may need to handle this specially. */
+ if (after && GET_CODE (after) == BARRIER)
+ {
+ has_barrier = 1;
+ after = NEXT_INSN (after);
+ }
+
+ if (seq)
+ {
+ /* SEQ can either be a SEQUENCE or the pattern of a single insn.
+ The latter case will normally arise only when being done so that
+ it, in turn, will be split (SFmode on the 29k is an example). */
+ if (GET_CODE (seq) == SEQUENCE)
+ {
+ /* If we are splitting a JUMP_INSN, look for the JUMP_INSN in
+ SEQ and copy our JUMP_LABEL to it. If JUMP_LABEL is non-zero,
+ increment the usage count so we don't delete the label. */
+ int i;
+
+ if (GET_CODE (trial) == JUMP_INSN)
+ for (i = XVECLEN (seq, 0) - 1; i >= 0; i--)
+ if (GET_CODE (XVECEXP (seq, 0, i)) == JUMP_INSN)
+ {
+ JUMP_LABEL (XVECEXP (seq, 0, i)) = JUMP_LABEL (trial);
+
+ if (JUMP_LABEL (trial))
+ LABEL_NUSES (JUMP_LABEL (trial))++;
+ }
+
+ tem = emit_insn_after (seq, before);
+
+ delete_insn (trial);
+ if (has_barrier)
+ emit_barrier_after (tem);
+
+ /* Recursively call try_split for each new insn created; by the
+ time control returns here that insn will be fully split, so
+ set LAST and continue from the insn after the one returned.
+ We can't use next_active_insn here since AFTER may be a note.
+ Ignore deleted insns, which can be occur if not optimizing. */
+ for (tem = NEXT_INSN (before); tem != after;
+ tem = NEXT_INSN (tem))
+ if (! INSN_DELETED_P (tem))
+ tem = try_split (PATTERN (tem), tem, 1);
+ }
+ /* Avoid infinite loop if the result matches the original pattern. */
+ else if (rtx_equal_p (seq, pat))
+ return trial;
+ else
+ {
+ PATTERN (trial) = seq;
+ INSN_CODE (trial) = -1;
+ try_split (seq, trial, last);
+ }
+
+ /* Return either the first or the last insn, depending on which was
+ requested. */
+ return last ? prev_active_insn (after) : next_active_insn (before);
+ }
+
+ return trial;
+}
+
+/* Make and return an INSN rtx, initializing all its slots.
+ Store PATTERN in the pattern slots. */
+
+rtx
+make_insn_raw (pattern)
+ rtx pattern;
+{
+ register rtx insn;
+
+ insn = rtx_alloc (INSN);
+ INSN_UID (insn) = cur_insn_uid++;
+
+ PATTERN (insn) = pattern;
+ INSN_CODE (insn) = -1;
+ LOG_LINKS (insn) = NULL;
+ REG_NOTES (insn) = NULL;
+
+ return insn;
+}
+
+/* Like `make_insn' but make a JUMP_INSN instead of an insn. */
+
+static rtx
+make_jump_insn_raw (pattern)
+ rtx pattern;
+{
+ register rtx insn;
+
+ insn = rtx_alloc (JUMP_INSN);
+ INSN_UID (insn) = cur_insn_uid++;
+
+ PATTERN (insn) = pattern;
+ INSN_CODE (insn) = -1;
+ LOG_LINKS (insn) = NULL;
+ REG_NOTES (insn) = NULL;
+ JUMP_LABEL (insn) = NULL;
+
+ return insn;
+}
+
+/* Like `make_insn' but make a CALL_INSN instead of an insn. */
+
+static rtx
+make_call_insn_raw (pattern)
+ rtx pattern;
+{
+ register rtx insn;
+
+ insn = rtx_alloc (CALL_INSN);
+ INSN_UID (insn) = cur_insn_uid++;
+
+ PATTERN (insn) = pattern;
+ INSN_CODE (insn) = -1;
+ LOG_LINKS (insn) = NULL;
+ REG_NOTES (insn) = NULL;
+ CALL_INSN_FUNCTION_USAGE (insn) = NULL;
+
+ return insn;
+}
+
+/* Add INSN to the end of the doubly-linked list.
+ INSN may be an INSN, JUMP_INSN, CALL_INSN, CODE_LABEL, BARRIER or NOTE. */
+
+void
+add_insn (insn)
+ register rtx insn;
+{
+ PREV_INSN (insn) = last_insn;
+ NEXT_INSN (insn) = 0;
+
+ if (NULL != last_insn)
+ NEXT_INSN (last_insn) = insn;
+
+ if (NULL == first_insn)
+ first_insn = insn;
+
+ last_insn = insn;
+}
+
+/* Add INSN into the doubly-linked list after insn AFTER. This should be the
+ only function called to insert an insn once delay slots have been filled
+ since only it knows how to update a SEQUENCE. */
+
+void
+add_insn_after (insn, after)
+ rtx insn, after;
+{
+ rtx next = NEXT_INSN (after);
+
+ NEXT_INSN (insn) = next;
+ PREV_INSN (insn) = after;
+
+ if (next)
+ {
+ PREV_INSN (next) = insn;
+ if (GET_CODE (next) == INSN && GET_CODE (PATTERN (next)) == SEQUENCE)
+ PREV_INSN (XVECEXP (PATTERN (next), 0, 0)) = insn;
+ }
+ else if (last_insn == after)
+ last_insn = insn;
+ else
+ {
+ struct sequence_stack *stack = sequence_stack;
+ /* Scan all pending sequences too. */
+ for (; stack; stack = stack->next)
+ if (after == stack->last)
+ stack->last = insn;
+ }
+
+ NEXT_INSN (after) = insn;
+ if (GET_CODE (after) == INSN && GET_CODE (PATTERN (after)) == SEQUENCE)
+ {
+ rtx sequence = PATTERN (after);
+ NEXT_INSN (XVECEXP (sequence, 0, XVECLEN (sequence, 0) - 1)) = insn;
+ }
+}
+
+/* Delete all insns made since FROM.
+ FROM becomes the new last instruction. */
+
+void
+delete_insns_since (from)
+ rtx from;
+{
+ if (from == 0)
+ first_insn = 0;
+ else
+ NEXT_INSN (from) = 0;
+ last_insn = from;
+}
+
+/* This function is deprecated, please use sequences instead.
+
+ Move a consecutive bunch of insns to a different place in the chain.
+ The insns to be moved are those between FROM and TO.
+ They are moved to a new position after the insn AFTER.
+ AFTER must not be FROM or TO or any insn in between.
+
+ This function does not know about SEQUENCEs and hence should not be
+ called after delay-slot filling has been done. */
+
+void
+reorder_insns (from, to, after)
+ rtx from, to, after;
+{
+ /* Splice this bunch out of where it is now. */
+ if (PREV_INSN (from))
+ NEXT_INSN (PREV_INSN (from)) = NEXT_INSN (to);
+ if (NEXT_INSN (to))
+ PREV_INSN (NEXT_INSN (to)) = PREV_INSN (from);
+ if (last_insn == to)
+ last_insn = PREV_INSN (from);
+ if (first_insn == from)
+ first_insn = NEXT_INSN (to);
+
+ /* Make the new neighbors point to it and it to them. */
+ if (NEXT_INSN (after))
+ PREV_INSN (NEXT_INSN (after)) = to;
+
+ NEXT_INSN (to) = NEXT_INSN (after);
+ PREV_INSN (from) = after;
+ NEXT_INSN (after) = from;
+ if (after == last_insn)
+ last_insn = to;
+}
+
+/* Return the line note insn preceding INSN. */
+
+static rtx
+find_line_note (insn)
+ rtx insn;
+{
+ if (no_line_numbers)
+ return 0;
+
+ for (; insn; insn = PREV_INSN (insn))
+ if (GET_CODE (insn) == NOTE
+ && NOTE_LINE_NUMBER (insn) >= 0)
+ break;
+
+ return insn;
+}
+
+/* Like reorder_insns, but inserts line notes to preserve the line numbers
+ of the moved insns when debugging. This may insert a note between AFTER
+ and FROM, and another one after TO. */
+
+void
+reorder_insns_with_line_notes (from, to, after)
+ rtx from, to, after;
+{
+ rtx from_line = find_line_note (from);
+ rtx after_line = find_line_note (after);
+
+ reorder_insns (from, to, after);
+
+ if (from_line == after_line)
+ return;
+
+ if (from_line)
+ emit_line_note_after (NOTE_SOURCE_FILE (from_line),
+ NOTE_LINE_NUMBER (from_line),
+ after);
+ if (after_line)
+ emit_line_note_after (NOTE_SOURCE_FILE (after_line),
+ NOTE_LINE_NUMBER (after_line),
+ to);
+}
+
+/* Emit an insn of given code and pattern
+ at a specified place within the doubly-linked list. */
+
+/* Make an instruction with body PATTERN
+ and output it before the instruction BEFORE. */
+
+rtx
+emit_insn_before (pattern, before)
+ register rtx pattern, before;
+{
+ register rtx insn = before;
+
+ if (GET_CODE (pattern) == SEQUENCE)
+ {
+ register int i;
+
+ for (i = 0; i < XVECLEN (pattern, 0); i++)
+ {
+ insn = XVECEXP (pattern, 0, i);
+ add_insn_after (insn, PREV_INSN (before));
+ }
+ if (XVECLEN (pattern, 0) < SEQUENCE_RESULT_SIZE)
+ sequence_result[XVECLEN (pattern, 0)] = pattern;
+ }
+ else
+ {
+ insn = make_insn_raw (pattern);
+ add_insn_after (insn, PREV_INSN (before));
+ }
+
+ return insn;
+}
+
+/* Make an instruction with body PATTERN and code JUMP_INSN
+ and output it before the instruction BEFORE. */
+
+rtx
+emit_jump_insn_before (pattern, before)
+ register rtx pattern, before;
+{
+ register rtx insn;
+
+ if (GET_CODE (pattern) == SEQUENCE)
+ insn = emit_insn_before (pattern, before);
+ else
+ {
+ insn = make_jump_insn_raw (pattern);
+ add_insn_after (insn, PREV_INSN (before));
+ }
+
+ return insn;
+}
+
+/* Make an instruction with body PATTERN and code CALL_INSN
+ and output it before the instruction BEFORE. */
+
+rtx
+emit_call_insn_before (pattern, before)
+ register rtx pattern, before;
+{
+ register rtx insn;
+
+ if (GET_CODE (pattern) == SEQUENCE)
+ insn = emit_insn_before (pattern, before);
+ else
+ {
+ insn = make_call_insn_raw (pattern);
+ add_insn_after (insn, PREV_INSN (before));
+ PUT_CODE (insn, CALL_INSN);
+ }
+
+ return insn;
+}
+
+/* Make an insn of code BARRIER
+ and output it before the insn AFTER. */
+
+rtx
+emit_barrier_before (before)
+ register rtx before;
+{
+ register rtx insn = rtx_alloc (BARRIER);
+
+ INSN_UID (insn) = cur_insn_uid++;
+
+ add_insn_after (insn, PREV_INSN (before));
+ return insn;
+}
+
+/* Emit a note of subtype SUBTYPE before the insn BEFORE. */
+
+rtx
+emit_note_before (subtype, before)
+ int subtype;
+ rtx before;
+{
+ register rtx note = rtx_alloc (NOTE);
+ INSN_UID (note) = cur_insn_uid++;
+ NOTE_SOURCE_FILE (note) = 0;
+ NOTE_LINE_NUMBER (note) = subtype;
+
+ add_insn_after (note, PREV_INSN (before));
+ return note;
+}
+
+/* Make an insn of code INSN with body PATTERN
+ and output it after the insn AFTER. */
+
+rtx
+emit_insn_after (pattern, after)
+ register rtx pattern, after;
+{
+ register rtx insn = after;
+
+ if (GET_CODE (pattern) == SEQUENCE)
+ {
+ register int i;
+
+ for (i = 0; i < XVECLEN (pattern, 0); i++)
+ {
+ insn = XVECEXP (pattern, 0, i);
+ add_insn_after (insn, after);
+ after = insn;
+ }
+ if (XVECLEN (pattern, 0) < SEQUENCE_RESULT_SIZE)
+ sequence_result[XVECLEN (pattern, 0)] = pattern;
+ }
+ else
+ {
+ insn = make_insn_raw (pattern);
+ add_insn_after (insn, after);
+ }
+
+ return insn;
+}
+
+/* Similar to emit_insn_after, except that line notes are to be inserted so
+ as to act as if this insn were at FROM. */
+
+void
+emit_insn_after_with_line_notes (pattern, after, from)
+ rtx pattern, after, from;
+{
+ rtx from_line = find_line_note (from);
+ rtx after_line = find_line_note (after);
+ rtx insn = emit_insn_after (pattern, after);
+
+ if (from_line)
+ emit_line_note_after (NOTE_SOURCE_FILE (from_line),
+ NOTE_LINE_NUMBER (from_line),
+ after);
+
+ if (after_line)
+ emit_line_note_after (NOTE_SOURCE_FILE (after_line),
+ NOTE_LINE_NUMBER (after_line),
+ insn);
+}
+
+/* Make an insn of code JUMP_INSN with body PATTERN
+ and output it after the insn AFTER. */
+
+rtx
+emit_jump_insn_after (pattern, after)
+ register rtx pattern, after;
+{
+ register rtx insn;
+
+ if (GET_CODE (pattern) == SEQUENCE)
+ insn = emit_insn_after (pattern, after);
+ else
+ {
+ insn = make_jump_insn_raw (pattern);
+ add_insn_after (insn, after);
+ }
+
+ return insn;
+}
+
+/* Make an insn of code BARRIER
+ and output it after the insn AFTER. */
+
+rtx
+emit_barrier_after (after)
+ register rtx after;
+{
+ register rtx insn = rtx_alloc (BARRIER);
+
+ INSN_UID (insn) = cur_insn_uid++;
+
+ add_insn_after (insn, after);
+ return insn;
+}
+
+/* Emit the label LABEL after the insn AFTER. */
+
+rtx
+emit_label_after (label, after)
+ rtx label, after;
+{
+ /* This can be called twice for the same label
+ as a result of the confusion that follows a syntax error!
+ So make it harmless. */
+ if (INSN_UID (label) == 0)
+ {
+ INSN_UID (label) = cur_insn_uid++;
+ add_insn_after (label, after);
+ }
+
+ return label;
+}
+
+/* Emit a note of subtype SUBTYPE after the insn AFTER. */
+
+rtx
+emit_note_after (subtype, after)
+ int subtype;
+ rtx after;
+{
+ register rtx note = rtx_alloc (NOTE);
+ INSN_UID (note) = cur_insn_uid++;
+ NOTE_SOURCE_FILE (note) = 0;
+ NOTE_LINE_NUMBER (note) = subtype;
+ add_insn_after (note, after);
+ return note;
+}
+
+/* Emit a line note for FILE and LINE after the insn AFTER. */
+
+rtx
+emit_line_note_after (file, line, after)
+ char *file;
+ int line;
+ rtx after;
+{
+ register rtx note;
+
+ if (no_line_numbers && line > 0)
+ {
+ cur_insn_uid++;
+ return 0;
+ }
+
+ note = rtx_alloc (NOTE);
+ INSN_UID (note) = cur_insn_uid++;
+ NOTE_SOURCE_FILE (note) = file;
+ NOTE_LINE_NUMBER (note) = line;
+ add_insn_after (note, after);
+ return note;
+}
+
+/* Make an insn of code INSN with pattern PATTERN
+ and add it to the end of the doubly-linked list.
+ If PATTERN is a SEQUENCE, take the elements of it
+ and emit an insn for each element.
+
+ Returns the last insn emitted. */
+
+rtx
+emit_insn (pattern)
+ rtx pattern;
+{
+ rtx insn = last_insn;
+
+ if (GET_CODE (pattern) == SEQUENCE)
+ {
+ register int i;
+
+ for (i = 0; i < XVECLEN (pattern, 0); i++)
+ {
+ insn = XVECEXP (pattern, 0, i);
+ add_insn (insn);
+ }
+ if (XVECLEN (pattern, 0) < SEQUENCE_RESULT_SIZE)
+ sequence_result[XVECLEN (pattern, 0)] = pattern;
+ }
+ else
+ {
+ insn = make_insn_raw (pattern);
+ add_insn (insn);
+ }
+
+ return insn;
+}
+
+/* Emit the insns in a chain starting with INSN.
+ Return the last insn emitted. */
+
+rtx
+emit_insns (insn)
+ rtx insn;
+{
+ rtx last = 0;
+
+ while (insn)
+ {
+ rtx next = NEXT_INSN (insn);
+ add_insn (insn);
+ last = insn;
+ insn = next;
+ }
+
+ return last;
+}
+
+/* Emit the insns in a chain starting with INSN and place them in front of
+ the insn BEFORE. Return the last insn emitted. */
+
+rtx
+emit_insns_before (insn, before)
+ rtx insn;
+ rtx before;
+{
+ rtx last = 0;
+
+ while (insn)
+ {
+ rtx next = NEXT_INSN (insn);
+ add_insn_after (insn, PREV_INSN (before));
+ last = insn;
+ insn = next;
+ }
+
+ return last;
+}
+
+/* Emit the insns in a chain starting with FIRST and place them in back of
+ the insn AFTER. Return the last insn emitted. */
+
+rtx
+emit_insns_after (first, after)
+ register rtx first;
+ register rtx after;
+{
+ register rtx last;
+ register rtx after_after;
+
+ if (!after)
+ abort ();
+
+ if (!first)
+ return first;
+
+ for (last = first; NEXT_INSN (last); last = NEXT_INSN (last))
+ continue;
+
+ after_after = NEXT_INSN (after);
+
+ NEXT_INSN (after) = first;
+ PREV_INSN (first) = after;
+ NEXT_INSN (last) = after_after;
+ if (after_after)
+ PREV_INSN (after_after) = last;
+
+ if (after == last_insn)
+ last_insn = last;
+ return last;
+}
+
+/* Make an insn of code JUMP_INSN with pattern PATTERN
+ and add it to the end of the doubly-linked list. */
+
+rtx
+emit_jump_insn (pattern)
+ rtx pattern;
+{
+ if (GET_CODE (pattern) == SEQUENCE)
+ return emit_insn (pattern);
+ else
+ {
+ register rtx insn = make_jump_insn_raw (pattern);
+ add_insn (insn);
+ return insn;
+ }
+}
+
+/* Make an insn of code CALL_INSN with pattern PATTERN
+ and add it to the end of the doubly-linked list. */
+
+rtx
+emit_call_insn (pattern)
+ rtx pattern;
+{
+ if (GET_CODE (pattern) == SEQUENCE)
+ return emit_insn (pattern);
+ else
+ {
+ register rtx insn = make_call_insn_raw (pattern);
+ add_insn (insn);
+ PUT_CODE (insn, CALL_INSN);
+ return insn;
+ }
+}
+
+/* Add the label LABEL to the end of the doubly-linked list. */
+
+rtx
+emit_label (label)
+ rtx label;
+{
+ /* This can be called twice for the same label
+ as a result of the confusion that follows a syntax error!
+ So make it harmless. */
+ if (INSN_UID (label) == 0)
+ {
+ INSN_UID (label) = cur_insn_uid++;
+ add_insn (label);
+ }
+ return label;
+}
+
+/* Make an insn of code BARRIER
+ and add it to the end of the doubly-linked list. */
+
+rtx
+emit_barrier ()
+{
+ register rtx barrier = rtx_alloc (BARRIER);
+ INSN_UID (barrier) = cur_insn_uid++;
+ add_insn (barrier);
+ return barrier;
+}
+
+/* Make an insn of code NOTE
+ with data-fields specified by FILE and LINE
+ and add it to the end of the doubly-linked list,
+ but only if line-numbers are desired for debugging info. */
+
+rtx
+emit_line_note (file, line)
+ char *file;
+ int line;
+{
+ if (output_bytecode)
+ {
+ /* FIXME: for now we do nothing, but eventually we will have to deal with
+ debugging information. */
+ return 0;
+ }
+
+ emit_filename = file;
+ emit_lineno = line;
+
+#if 0
+ if (no_line_numbers)
+ return 0;
+#endif
+
+ return emit_note (file, line);
+}
+
+/* Make an insn of code NOTE
+ with data-fields specified by FILE and LINE
+ and add it to the end of the doubly-linked list.
+ If it is a line-number NOTE, omit it if it matches the previous one. */
+
+rtx
+emit_note (file, line)
+ char *file;
+ int line;
+{
+ register rtx note;
+
+ if (line > 0)
+ {
+ if (file && last_filename && !strcmp (file, last_filename)
+ && line == last_linenum)
+ return 0;
+ last_filename = file;
+ last_linenum = line;
+ }
+
+ if (no_line_numbers && line > 0)
+ {
+ cur_insn_uid++;
+ return 0;
+ }
+
+ note = rtx_alloc (NOTE);
+ INSN_UID (note) = cur_insn_uid++;
+ NOTE_SOURCE_FILE (note) = file;
+ NOTE_LINE_NUMBER (note) = line;
+ add_insn (note);
+ return note;
+}
+
+/* Emit a NOTE, and don't omit it even if LINE it the previous note. */
+
+rtx
+emit_line_note_force (file, line)
+ char *file;
+ int line;
+{
+ last_linenum = -1;
+ return emit_line_note (file, line);
+}
+
+/* Cause next statement to emit a line note even if the line number
+ has not changed. This is used at the beginning of a function. */
+
+void
+force_next_line_note ()
+{
+ last_linenum = -1;
+}
+
+/* Return an indication of which type of insn should have X as a body.
+ The value is CODE_LABEL, INSN, CALL_INSN or JUMP_INSN. */
+
+enum rtx_code
+classify_insn (x)
+ rtx x;
+{
+ if (GET_CODE (x) == CODE_LABEL)
+ return CODE_LABEL;
+ if (GET_CODE (x) == CALL)
+ return CALL_INSN;
+ if (GET_CODE (x) == RETURN)
+ return JUMP_INSN;
+ if (GET_CODE (x) == SET)
+ {
+ if (SET_DEST (x) == pc_rtx)
+ return JUMP_INSN;
+ else if (GET_CODE (SET_SRC (x)) == CALL)
+ return CALL_INSN;
+ else
+ return INSN;
+ }
+ if (GET_CODE (x) == PARALLEL)
+ {
+ register int j;
+ for (j = XVECLEN (x, 0) - 1; j >= 0; j--)
+ if (GET_CODE (XVECEXP (x, 0, j)) == CALL)
+ return CALL_INSN;
+ else if (GET_CODE (XVECEXP (x, 0, j)) == SET
+ && SET_DEST (XVECEXP (x, 0, j)) == pc_rtx)
+ return JUMP_INSN;
+ else if (GET_CODE (XVECEXP (x, 0, j)) == SET
+ && GET_CODE (SET_SRC (XVECEXP (x, 0, j))) == CALL)
+ return CALL_INSN;
+ }
+ return INSN;
+}
+
+/* Emit the rtl pattern X as an appropriate kind of insn.
+ If X is a label, it is simply added into the insn chain. */
+
+rtx
+emit (x)
+ rtx x;
+{
+ enum rtx_code code = classify_insn (x);
+
+ if (code == CODE_LABEL)
+ return emit_label (x);
+ else if (code == INSN)
+ return emit_insn (x);
+ else if (code == JUMP_INSN)
+ {
+ register rtx insn = emit_jump_insn (x);
+ if (simplejump_p (insn) || GET_CODE (x) == RETURN)
+ return emit_barrier ();
+ return insn;
+ }
+ else if (code == CALL_INSN)
+ return emit_call_insn (x);
+ else
+ abort ();
+}
+
+/* Begin emitting insns to a sequence which can be packaged in an RTL_EXPR. */
+
+void
+start_sequence ()
+{
+ struct sequence_stack *tem;
+
+ if (sequence_element_free_list)
+ {
+ /* Reuse a previously-saved struct sequence_stack. */
+ tem = sequence_element_free_list;
+ sequence_element_free_list = tem->next;
+ }
+ else
+ tem = (struct sequence_stack *) permalloc (sizeof (struct sequence_stack));
+
+ tem->next = sequence_stack;
+ tem->first = first_insn;
+ tem->last = last_insn;
+ tem->sequence_rtl_expr = sequence_rtl_expr;
+
+ sequence_stack = tem;
+
+ first_insn = 0;
+ last_insn = 0;
+}
+
+/* Similarly, but indicate that this sequence will be placed in
+ T, an RTL_EXPR. */
+
+void
+start_sequence_for_rtl_expr (t)
+ tree t;
+{
+ start_sequence ();
+
+ sequence_rtl_expr = t;
+}
+
+/* Set up the insn chain starting with FIRST
+ as the current sequence, saving the previously current one. */
+
+void
+push_to_sequence (first)
+ rtx first;
+{
+ rtx last;
+
+ start_sequence ();
+
+ for (last = first; last && NEXT_INSN (last); last = NEXT_INSN (last));
+
+ first_insn = first;
+ last_insn = last;
+}
+
+/* Set up the outer-level insn chain
+ as the current sequence, saving the previously current one. */
+
+void
+push_topmost_sequence ()
+{
+ struct sequence_stack *stack, *top;
+
+ start_sequence ();
+
+ for (stack = sequence_stack; stack; stack = stack->next)
+ top = stack;
+
+ first_insn = top->first;
+ last_insn = top->last;
+ sequence_rtl_expr = top->sequence_rtl_expr;
+}
+
+/* After emitting to the outer-level insn chain, update the outer-level
+ insn chain, and restore the previous saved state. */
+
+void
+pop_topmost_sequence ()
+{
+ struct sequence_stack *stack, *top;
+
+ for (stack = sequence_stack; stack; stack = stack->next)
+ top = stack;
+
+ top->first = first_insn;
+ top->last = last_insn;
+ /* ??? Why don't we save sequence_rtl_expr here? */
+
+ end_sequence ();
+}
+
+/* After emitting to a sequence, restore previous saved state.
+
+ To get the contents of the sequence just made,
+ you must call `gen_sequence' *before* calling here. */
+
+void
+end_sequence ()
+{
+ struct sequence_stack *tem = sequence_stack;
+
+ first_insn = tem->first;
+ last_insn = tem->last;
+ sequence_rtl_expr = tem->sequence_rtl_expr;
+ sequence_stack = tem->next;
+
+ tem->next = sequence_element_free_list;
+ sequence_element_free_list = tem;
+}
+
+/* Return 1 if currently emitting into a sequence. */
+
+int
+in_sequence_p ()
+{
+ return sequence_stack != 0;
+}
+
+/* Generate a SEQUENCE rtx containing the insns already emitted
+ to the current sequence.
+
+ This is how the gen_... function from a DEFINE_EXPAND
+ constructs the SEQUENCE that it returns. */
+
+rtx
+gen_sequence ()
+{
+ rtx result;
+ rtx tem;
+ int i;
+ int len;
+
+ /* Count the insns in the chain. */
+ len = 0;
+ for (tem = first_insn; tem; tem = NEXT_INSN (tem))
+ len++;
+
+ /* If only one insn, return its pattern rather than a SEQUENCE.
+ (Now that we cache SEQUENCE expressions, it isn't worth special-casing
+ the case of an empty list.) */
+ if (len == 1
+ && (GET_CODE (first_insn) == INSN
+ || GET_CODE (first_insn) == JUMP_INSN
+ || GET_CODE (first_insn) == CALL_INSN))
+ return PATTERN (first_insn);
+
+ /* Put them in a vector. See if we already have a SEQUENCE of the
+ appropriate length around. */
+ if (len < SEQUENCE_RESULT_SIZE && (result = sequence_result[len]) != 0)
+ sequence_result[len] = 0;
+ else
+ {
+ /* Ensure that this rtl goes in saveable_obstack, since we may be
+ caching it. */
+ push_obstacks_nochange ();
+ rtl_in_saveable_obstack ();
+ result = gen_rtx (SEQUENCE, VOIDmode, rtvec_alloc (len));
+ pop_obstacks ();
+ }
+
+ for (i = 0, tem = first_insn; tem; tem = NEXT_INSN (tem), i++)
+ XVECEXP (result, 0, i) = tem;
+
+ return result;
+}
+
+/* Set up regno_reg_rtx, reg_rtx_no and regno_pointer_flag
+ according to the chain of insns starting with FIRST.
+
+ Also set cur_insn_uid to exceed the largest uid in that chain.
+
+ This is used when an inline function's rtl is saved
+ and passed to rest_of_compilation later. */
+
+static void restore_reg_data_1 ();
+
+void
+restore_reg_data (first)
+ rtx first;
+{
+ register rtx insn;
+ int i;
+ register int max_uid = 0;
+
+ for (insn = first; insn; insn = NEXT_INSN (insn))
+ {
+ if (INSN_UID (insn) >= max_uid)
+ max_uid = INSN_UID (insn);
+
+ switch (GET_CODE (insn))
+ {
+ case NOTE:
+ case CODE_LABEL:
+ case BARRIER:
+ break;
+
+ case JUMP_INSN:
+ case CALL_INSN:
+ case INSN:
+ restore_reg_data_1 (PATTERN (insn));
+ break;
+ }
+ }
+
+ /* Don't duplicate the uids already in use. */
+ cur_insn_uid = max_uid + 1;
+
+ /* If any regs are missing, make them up.
+
+ ??? word_mode is not necessarily the right mode. Most likely these REGs
+ are never used. At some point this should be checked. */
+
+ for (i = FIRST_PSEUDO_REGISTER; i < reg_rtx_no; i++)
+ if (regno_reg_rtx[i] == 0)
+ regno_reg_rtx[i] = gen_rtx (REG, word_mode, i);
+}
+
+static void
+restore_reg_data_1 (orig)
+ rtx orig;
+{
+ register rtx x = orig;
+ register int i;
+ register enum rtx_code code;
+ register char *format_ptr;
+
+ code = GET_CODE (x);
+
+ switch (code)
+ {
+ case QUEUED:
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case SYMBOL_REF:
+ case CODE_LABEL:
+ case PC:
+ case CC0:
+ case LABEL_REF:
+ return;
+
+ case REG:
+ if (REGNO (x) >= FIRST_PSEUDO_REGISTER)
+ {
+ /* Make sure regno_pointer_flag and regno_reg_rtx are large
+ enough to have an element for this pseudo reg number. */
+ if (REGNO (x) >= reg_rtx_no)
+ {
+ reg_rtx_no = REGNO (x);
+
+ if (reg_rtx_no >= regno_pointer_flag_length)
+ {
+ int newlen = MAX (regno_pointer_flag_length * 2,
+ reg_rtx_no + 30);
+ rtx *new1;
+ char *new = (char *) oballoc (newlen);
+ bzero (new, newlen);
+ bcopy (regno_pointer_flag, new, regno_pointer_flag_length);
+
+ new1 = (rtx *) oballoc (newlen * sizeof (rtx));
+ bzero ((char *) new1, newlen * sizeof (rtx));
+ bcopy ((char *) regno_reg_rtx, (char *) new1,
+ regno_pointer_flag_length * sizeof (rtx));
+
+ regno_pointer_flag = new;
+ regno_reg_rtx = new1;
+ regno_pointer_flag_length = newlen;
+ }
+ reg_rtx_no ++;
+ }
+ regno_reg_rtx[REGNO (x)] = x;
+ }
+ return;
+
+ case MEM:
+ if (GET_CODE (XEXP (x, 0)) == REG)
+ mark_reg_pointer (XEXP (x, 0));
+ restore_reg_data_1 (XEXP (x, 0));
+ return;
+ }
+
+ /* Now scan the subexpressions recursively. */
+
+ format_ptr = GET_RTX_FORMAT (code);
+
+ for (i = 0; i < GET_RTX_LENGTH (code); i++)
+ {
+ switch (*format_ptr++)
+ {
+ case 'e':
+ restore_reg_data_1 (XEXP (x, i));
+ break;
+
+ case 'E':
+ if (XVEC (x, i) != NULL)
+ {
+ register int j;
+
+ for (j = 0; j < XVECLEN (x, i); j++)
+ restore_reg_data_1 (XVECEXP (x, i, j));
+ }
+ break;
+ }
+ }
+}
+
+/* Initialize data structures and variables in this file
+ before generating rtl for each function. */
+
+void
+init_emit ()
+{
+ int i;
+
+ first_insn = NULL;
+ last_insn = NULL;
+ sequence_rtl_expr = NULL;
+ cur_insn_uid = 1;
+ reg_rtx_no = LAST_VIRTUAL_REGISTER + 1;
+ last_linenum = 0;
+ last_filename = 0;
+ first_label_num = label_num;
+ last_label_num = 0;
+ sequence_stack = NULL;
+
+ /* Clear the start_sequence/gen_sequence cache. */
+ sequence_element_free_list = 0;
+ for (i = 0; i < SEQUENCE_RESULT_SIZE; i++)
+ sequence_result[i] = 0;
+
+ /* Init the tables that describe all the pseudo regs. */
+
+ regno_pointer_flag_length = LAST_VIRTUAL_REGISTER + 101;
+
+ regno_pointer_flag
+ = (char *) oballoc (regno_pointer_flag_length);
+ bzero (regno_pointer_flag, regno_pointer_flag_length);
+
+ regno_reg_rtx
+ = (rtx *) oballoc (regno_pointer_flag_length * sizeof (rtx));
+ bzero ((char *) regno_reg_rtx, regno_pointer_flag_length * sizeof (rtx));
+
+ /* Put copies of all the virtual register rtx into regno_reg_rtx. */
+ regno_reg_rtx[VIRTUAL_INCOMING_ARGS_REGNUM] = virtual_incoming_args_rtx;
+ regno_reg_rtx[VIRTUAL_STACK_VARS_REGNUM] = virtual_stack_vars_rtx;
+ regno_reg_rtx[VIRTUAL_STACK_DYNAMIC_REGNUM] = virtual_stack_dynamic_rtx;
+ regno_reg_rtx[VIRTUAL_OUTGOING_ARGS_REGNUM] = virtual_outgoing_args_rtx;
+
+ /* Indicate that the virtual registers and stack locations are
+ all pointers. */
+ REGNO_POINTER_FLAG (STACK_POINTER_REGNUM) = 1;
+ REGNO_POINTER_FLAG (FRAME_POINTER_REGNUM) = 1;
+ REGNO_POINTER_FLAG (ARG_POINTER_REGNUM) = 1;
+
+ REGNO_POINTER_FLAG (VIRTUAL_INCOMING_ARGS_REGNUM) = 1;
+ REGNO_POINTER_FLAG (VIRTUAL_STACK_VARS_REGNUM) = 1;
+ REGNO_POINTER_FLAG (VIRTUAL_STACK_DYNAMIC_REGNUM) = 1;
+ REGNO_POINTER_FLAG (VIRTUAL_OUTGOING_ARGS_REGNUM) = 1;
+
+#ifdef INIT_EXPANDERS
+ INIT_EXPANDERS;
+#endif
+}
+
+/* Create some permanent unique rtl objects shared between all functions.
+ LINE_NUMBERS is nonzero if line numbers are to be generated. */
+
+void
+init_emit_once (line_numbers)
+ int line_numbers;
+{
+ int i;
+ enum machine_mode mode;
+
+ no_line_numbers = ! line_numbers;
+
+ sequence_stack = NULL;
+
+ /* Compute the word and byte modes. */
+
+ byte_mode = VOIDmode;
+ word_mode = VOIDmode;
+
+ for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
+ mode = GET_MODE_WIDER_MODE (mode))
+ {
+ if (GET_MODE_BITSIZE (mode) == BITS_PER_UNIT
+ && byte_mode == VOIDmode)
+ byte_mode = mode;
+
+ if (GET_MODE_BITSIZE (mode) == BITS_PER_WORD
+ && word_mode == VOIDmode)
+ word_mode = mode;
+ }
+
+ /* Create the unique rtx's for certain rtx codes and operand values. */
+
+ pc_rtx = gen_rtx (PC, VOIDmode);
+ cc0_rtx = gen_rtx (CC0, VOIDmode);
+
+ /* Don't use gen_rtx here since gen_rtx in this case
+ tries to use these variables. */
+ for (i = - MAX_SAVED_CONST_INT; i <= MAX_SAVED_CONST_INT; i++)
+ {
+ const_int_rtx[i + MAX_SAVED_CONST_INT] = rtx_alloc (CONST_INT);
+ PUT_MODE (const_int_rtx[i + MAX_SAVED_CONST_INT], VOIDmode);
+ INTVAL (const_int_rtx[i + MAX_SAVED_CONST_INT]) = i;
+ }
+
+ /* These four calls obtain some of the rtx expressions made above. */
+ const0_rtx = GEN_INT (0);
+ const1_rtx = GEN_INT (1);
+ const2_rtx = GEN_INT (2);
+ constm1_rtx = GEN_INT (-1);
+
+ /* This will usually be one of the above constants, but may be a new rtx. */
+ const_true_rtx = GEN_INT (STORE_FLAG_VALUE);
+
+ dconst0 = REAL_VALUE_ATOF ("0", DFmode);
+ dconst1 = REAL_VALUE_ATOF ("1", DFmode);
+ dconst2 = REAL_VALUE_ATOF ("2", DFmode);
+ dconstm1 = REAL_VALUE_ATOF ("-1", DFmode);
+
+ for (i = 0; i <= 2; i++)
+ {
+ for (mode = GET_CLASS_NARROWEST_MODE (MODE_FLOAT); mode != VOIDmode;
+ mode = GET_MODE_WIDER_MODE (mode))
+ {
+ rtx tem = rtx_alloc (CONST_DOUBLE);
+ union real_extract u;
+
+ bzero ((char *) &u, sizeof u); /* Zero any holes in a structure. */
+ u.d = i == 0 ? dconst0 : i == 1 ? dconst1 : dconst2;
+
+ bcopy ((char *) &u, (char *) &CONST_DOUBLE_LOW (tem), sizeof u);
+ CONST_DOUBLE_MEM (tem) = cc0_rtx;
+ PUT_MODE (tem, mode);
+
+ const_tiny_rtx[i][(int) mode] = tem;
+ }
+
+ const_tiny_rtx[i][(int) VOIDmode] = GEN_INT (i);
+
+ for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
+ mode = GET_MODE_WIDER_MODE (mode))
+ const_tiny_rtx[i][(int) mode] = GEN_INT (i);
+
+ for (mode = GET_CLASS_NARROWEST_MODE (MODE_PARTIAL_INT);
+ mode != VOIDmode;
+ mode = GET_MODE_WIDER_MODE (mode))
+ const_tiny_rtx[i][(int) mode] = GEN_INT (i);
+ }
+
+ for (mode = GET_CLASS_NARROWEST_MODE (MODE_CC); mode != VOIDmode;
+ mode = GET_MODE_WIDER_MODE (mode))
+ const_tiny_rtx[0][(int) mode] = const0_rtx;
+
+ stack_pointer_rtx = gen_rtx (REG, Pmode, STACK_POINTER_REGNUM);
+ frame_pointer_rtx = gen_rtx (REG, Pmode, FRAME_POINTER_REGNUM);
+
+ if (HARD_FRAME_POINTER_REGNUM == FRAME_POINTER_REGNUM)
+ hard_frame_pointer_rtx = frame_pointer_rtx;
+ else
+ hard_frame_pointer_rtx = gen_rtx (REG, Pmode, HARD_FRAME_POINTER_REGNUM);
+
+ if (FRAME_POINTER_REGNUM == ARG_POINTER_REGNUM)
+ arg_pointer_rtx = frame_pointer_rtx;
+ else if (HARD_FRAME_POINTER_REGNUM == ARG_POINTER_REGNUM)
+ arg_pointer_rtx = hard_frame_pointer_rtx;
+ else if (STACK_POINTER_REGNUM == ARG_POINTER_REGNUM)
+ arg_pointer_rtx = stack_pointer_rtx;
+ else
+ arg_pointer_rtx = gen_rtx (REG, Pmode, ARG_POINTER_REGNUM);
+
+ /* Create the virtual registers. Do so here since the following objects
+ might reference them. */
+
+ virtual_incoming_args_rtx = gen_rtx (REG, Pmode,
+ VIRTUAL_INCOMING_ARGS_REGNUM);
+ virtual_stack_vars_rtx = gen_rtx (REG, Pmode,
+ VIRTUAL_STACK_VARS_REGNUM);
+ virtual_stack_dynamic_rtx = gen_rtx (REG, Pmode,
+ VIRTUAL_STACK_DYNAMIC_REGNUM);
+ virtual_outgoing_args_rtx = gen_rtx (REG, Pmode,
+ VIRTUAL_OUTGOING_ARGS_REGNUM);
+
+#ifdef STRUCT_VALUE
+ struct_value_rtx = STRUCT_VALUE;
+#else
+ struct_value_rtx = gen_rtx (REG, Pmode, STRUCT_VALUE_REGNUM);
+#endif
+
+#ifdef STRUCT_VALUE_INCOMING
+ struct_value_incoming_rtx = STRUCT_VALUE_INCOMING;
+#else
+#ifdef STRUCT_VALUE_INCOMING_REGNUM
+ struct_value_incoming_rtx
+ = gen_rtx (REG, Pmode, STRUCT_VALUE_INCOMING_REGNUM);
+#else
+ struct_value_incoming_rtx = struct_value_rtx;
+#endif
+#endif
+
+#ifdef STATIC_CHAIN_REGNUM
+ static_chain_rtx = gen_rtx (REG, Pmode, STATIC_CHAIN_REGNUM);
+
+#ifdef STATIC_CHAIN_INCOMING_REGNUM
+ if (STATIC_CHAIN_INCOMING_REGNUM != STATIC_CHAIN_REGNUM)
+ static_chain_incoming_rtx = gen_rtx (REG, Pmode, STATIC_CHAIN_INCOMING_REGNUM);
+ else
+#endif
+ static_chain_incoming_rtx = static_chain_rtx;
+#endif
+
+#ifdef STATIC_CHAIN
+ static_chain_rtx = STATIC_CHAIN;
+
+#ifdef STATIC_CHAIN_INCOMING
+ static_chain_incoming_rtx = STATIC_CHAIN_INCOMING;
+#else
+ static_chain_incoming_rtx = static_chain_rtx;
+#endif
+#endif
+
+#ifdef PIC_OFFSET_TABLE_REGNUM
+ pic_offset_table_rtx = gen_rtx (REG, Pmode, PIC_OFFSET_TABLE_REGNUM);
+#endif
+}
diff --git a/gnu/usr.bin/cc/cc_int/explow.c b/gnu/usr.bin/cc/cc_int/explow.c
new file mode 100644
index 0000000..b72e468
--- /dev/null
+++ b/gnu/usr.bin/cc/cc_int/explow.c
@@ -0,0 +1,1152 @@
+/* Subroutines for manipulating rtx's in semantically interesting ways.
+ Copyright (C) 1987, 1991, 1994 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+#include "config.h"
+#include "rtl.h"
+#include "tree.h"
+#include "flags.h"
+#include "expr.h"
+#include "hard-reg-set.h"
+#include "insn-config.h"
+#include "recog.h"
+#include "insn-flags.h"
+#include "insn-codes.h"
+
+/* Return an rtx for the sum of X and the integer C.
+
+ This function should be used via the `plus_constant' macro. */
+
+rtx
+plus_constant_wide (x, c)
+ register rtx x;
+ register HOST_WIDE_INT c;
+{
+ register RTX_CODE code;
+ register enum machine_mode mode;
+ register rtx tem;
+ int all_constant = 0;
+
+ if (c == 0)
+ return x;
+
+ restart:
+
+ code = GET_CODE (x);
+ mode = GET_MODE (x);
+ switch (code)
+ {
+ case CONST_INT:
+ return GEN_INT (INTVAL (x) + c);
+
+ case CONST_DOUBLE:
+ {
+ HOST_WIDE_INT l1 = CONST_DOUBLE_LOW (x);
+ HOST_WIDE_INT h1 = CONST_DOUBLE_HIGH (x);
+ HOST_WIDE_INT l2 = c;
+ HOST_WIDE_INT h2 = c < 0 ? ~0 : 0;
+ HOST_WIDE_INT lv, hv;
+
+ add_double (l1, h1, l2, h2, &lv, &hv);
+
+ return immed_double_const (lv, hv, VOIDmode);
+ }
+
+ case MEM:
+ /* If this is a reference to the constant pool, try replacing it with
+ a reference to a new constant. If the resulting address isn't
+ valid, don't return it because we have no way to validize it. */
+ if (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
+ && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)))
+ {
+ tem
+ = force_const_mem (GET_MODE (x),
+ plus_constant (get_pool_constant (XEXP (x, 0)),
+ c));
+ if (memory_address_p (GET_MODE (tem), XEXP (tem, 0)))
+ return tem;
+ }
+ break;
+
+ case CONST:
+ /* If adding to something entirely constant, set a flag
+ so that we can add a CONST around the result. */
+ x = XEXP (x, 0);
+ all_constant = 1;
+ goto restart;
+
+ case SYMBOL_REF:
+ case LABEL_REF:
+ all_constant = 1;
+ break;
+
+ case PLUS:
+ /* The interesting case is adding the integer to a sum.
+ Look for constant term in the sum and combine
+ with C. For an integer constant term, we make a combined
+ integer. For a constant term that is not an explicit integer,
+ we cannot really combine, but group them together anyway.
+
+ Use a recursive call in case the remaining operand is something
+ that we handle specially, such as a SYMBOL_REF. */
+
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT)
+ return plus_constant (XEXP (x, 0), c + INTVAL (XEXP (x, 1)));
+ else if (CONSTANT_P (XEXP (x, 0)))
+ return gen_rtx (PLUS, mode,
+ plus_constant (XEXP (x, 0), c),
+ XEXP (x, 1));
+ else if (CONSTANT_P (XEXP (x, 1)))
+ return gen_rtx (PLUS, mode,
+ XEXP (x, 0),
+ plus_constant (XEXP (x, 1), c));
+ }
+
+ if (c != 0)
+ x = gen_rtx (PLUS, mode, x, GEN_INT (c));
+
+ if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
+ return x;
+ else if (all_constant)
+ return gen_rtx (CONST, mode, x);
+ else
+ return x;
+}
+
+/* This is the same as `plus_constant', except that it handles LO_SUM.
+
+ This function should be used via the `plus_constant_for_output' macro. */
+
+rtx
+plus_constant_for_output_wide (x, c)
+ register rtx x;
+ register HOST_WIDE_INT c;
+{
+ register RTX_CODE code = GET_CODE (x);
+ register enum machine_mode mode = GET_MODE (x);
+ int all_constant = 0;
+
+ if (GET_CODE (x) == LO_SUM)
+ return gen_rtx (LO_SUM, mode, XEXP (x, 0),
+ plus_constant_for_output (XEXP (x, 1), c));
+
+ else
+ return plus_constant (x, c);
+}
+
+/* If X is a sum, return a new sum like X but lacking any constant terms.
+ Add all the removed constant terms into *CONSTPTR.
+ X itself is not altered. The result != X if and only if
+ it is not isomorphic to X. */
+
+rtx
+eliminate_constant_term (x, constptr)
+ rtx x;
+ rtx *constptr;
+{
+ register rtx x0, x1;
+ rtx tem;
+
+ if (GET_CODE (x) != PLUS)
+ return x;
+
+ /* First handle constants appearing at this level explicitly. */
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT
+ && 0 != (tem = simplify_binary_operation (PLUS, GET_MODE (x), *constptr,
+ XEXP (x, 1)))
+ && GET_CODE (tem) == CONST_INT)
+ {
+ *constptr = tem;
+ return eliminate_constant_term (XEXP (x, 0), constptr);
+ }
+
+ tem = const0_rtx;
+ x0 = eliminate_constant_term (XEXP (x, 0), &tem);
+ x1 = eliminate_constant_term (XEXP (x, 1), &tem);
+ if ((x1 != XEXP (x, 1) || x0 != XEXP (x, 0))
+ && 0 != (tem = simplify_binary_operation (PLUS, GET_MODE (x),
+ *constptr, tem))
+ && GET_CODE (tem) == CONST_INT)
+ {
+ *constptr = tem;
+ return gen_rtx (PLUS, GET_MODE (x), x0, x1);
+ }
+
+ return x;
+}
+
+/* Returns the insn that next references REG after INSN, or 0
+ if REG is clobbered before next referenced or we cannot find
+ an insn that references REG in a straight-line piece of code. */
+
+rtx
+find_next_ref (reg, insn)
+ rtx reg;
+ rtx insn;
+{
+ rtx next;
+
+ for (insn = NEXT_INSN (insn); insn; insn = next)
+ {
+ next = NEXT_INSN (insn);
+ if (GET_CODE (insn) == NOTE)
+ continue;
+ if (GET_CODE (insn) == CODE_LABEL
+ || GET_CODE (insn) == BARRIER)
+ return 0;
+ if (GET_CODE (insn) == INSN
+ || GET_CODE (insn) == JUMP_INSN
+ || GET_CODE (insn) == CALL_INSN)
+ {
+ if (reg_set_p (reg, insn))
+ return 0;
+ if (reg_mentioned_p (reg, PATTERN (insn)))
+ return insn;
+ if (GET_CODE (insn) == JUMP_INSN)
+ {
+ if (simplejump_p (insn))
+ next = JUMP_LABEL (insn);
+ else
+ return 0;
+ }
+ if (GET_CODE (insn) == CALL_INSN
+ && REGNO (reg) < FIRST_PSEUDO_REGISTER
+ && call_used_regs[REGNO (reg)])
+ return 0;
+ }
+ else
+ abort ();
+ }
+ return 0;
+}
+
+/* Return an rtx for the size in bytes of the value of EXP. */
+
+rtx
+expr_size (exp)
+ tree exp;
+{
+ tree size = size_in_bytes (TREE_TYPE (exp));
+
+ if (TREE_CODE (size) != INTEGER_CST
+ && contains_placeholder_p (size))
+ size = build (WITH_RECORD_EXPR, sizetype, size, exp);
+
+ return expand_expr (size, NULL_RTX, TYPE_MODE (sizetype), 0);
+}
+
+/* Return a copy of X in which all memory references
+ and all constants that involve symbol refs
+ have been replaced with new temporary registers.
+ Also emit code to load the memory locations and constants
+ into those registers.
+
+ If X contains no such constants or memory references,
+ X itself (not a copy) is returned.
+
+ If a constant is found in the address that is not a legitimate constant
+ in an insn, it is left alone in the hope that it might be valid in the
+ address.
+
+ X may contain no arithmetic except addition, subtraction and multiplication.
+ Values returned by expand_expr with 1 for sum_ok fit this constraint. */
+
+static rtx
+break_out_memory_refs (x)
+ register rtx x;
+{
+ if (GET_CODE (x) == MEM
+ || (CONSTANT_P (x) && CONSTANT_ADDRESS_P (x)
+ && GET_MODE (x) != VOIDmode))
+ x = force_reg (GET_MODE (x), x);
+ else if (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
+ || GET_CODE (x) == MULT)
+ {
+ register rtx op0 = break_out_memory_refs (XEXP (x, 0));
+ register rtx op1 = break_out_memory_refs (XEXP (x, 1));
+
+ if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
+ x = gen_rtx (GET_CODE (x), Pmode, op0, op1);
+ }
+
+ return x;
+}
+
+/* Given a memory address or facsimile X, construct a new address,
+ currently equivalent, that is stable: future stores won't change it.
+
+ X must be composed of constants, register and memory references
+ combined with addition, subtraction and multiplication:
+ in other words, just what you can get from expand_expr if sum_ok is 1.
+
+ Works by making copies of all regs and memory locations used
+ by X and combining them the same way X does.
+ You could also stabilize the reference to this address
+ by copying the address to a register with copy_to_reg;
+ but then you wouldn't get indexed addressing in the reference. */
+
+rtx
+copy_all_regs (x)
+ register rtx x;
+{
+ if (GET_CODE (x) == REG)
+ {
+ if (REGNO (x) != FRAME_POINTER_REGNUM
+#if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
+ && REGNO (x) != HARD_FRAME_POINTER_REGNUM
+#endif
+ )
+ x = copy_to_reg (x);
+ }
+ else if (GET_CODE (x) == MEM)
+ x = copy_to_reg (x);
+ else if (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
+ || GET_CODE (x) == MULT)
+ {
+ register rtx op0 = copy_all_regs (XEXP (x, 0));
+ register rtx op1 = copy_all_regs (XEXP (x, 1));
+ if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
+ x = gen_rtx (GET_CODE (x), Pmode, op0, op1);
+ }
+ return x;
+}
+
+/* Return something equivalent to X but valid as a memory address
+ for something of mode MODE. When X is not itself valid, this
+ works by copying X or subexpressions of it into registers. */
+
+rtx
+memory_address (mode, x)
+ enum machine_mode mode;
+ register rtx x;
+{
+ register rtx oldx = x;
+
+ /* By passing constant addresses thru registers
+ we get a chance to cse them. */
+ if (! cse_not_expected && CONSTANT_P (x) && CONSTANT_ADDRESS_P (x))
+ x = force_reg (Pmode, x);
+
+ /* Accept a QUEUED that refers to a REG
+ even though that isn't a valid address.
+ On attempting to put this in an insn we will call protect_from_queue
+ which will turn it into a REG, which is valid. */
+ else if (GET_CODE (x) == QUEUED
+ && GET_CODE (QUEUED_VAR (x)) == REG)
+ ;
+
+ /* We get better cse by rejecting indirect addressing at this stage.
+ Let the combiner create indirect addresses where appropriate.
+ For now, generate the code so that the subexpressions useful to share
+ are visible. But not if cse won't be done! */
+ else
+ {
+ if (! cse_not_expected && GET_CODE (x) != REG)
+ x = break_out_memory_refs (x);
+
+ /* At this point, any valid address is accepted. */
+ GO_IF_LEGITIMATE_ADDRESS (mode, x, win);
+
+ /* If it was valid before but breaking out memory refs invalidated it,
+ use it the old way. */
+ if (memory_address_p (mode, oldx))
+ goto win2;
+
+ /* Perform machine-dependent transformations on X
+ in certain cases. This is not necessary since the code
+ below can handle all possible cases, but machine-dependent
+ transformations can make better code. */
+ LEGITIMIZE_ADDRESS (x, oldx, mode, win);
+
+ /* PLUS and MULT can appear in special ways
+ as the result of attempts to make an address usable for indexing.
+ Usually they are dealt with by calling force_operand, below.
+ But a sum containing constant terms is special
+ if removing them makes the sum a valid address:
+ then we generate that address in a register
+ and index off of it. We do this because it often makes
+ shorter code, and because the addresses thus generated
+ in registers often become common subexpressions. */
+ if (GET_CODE (x) == PLUS)
+ {
+ rtx constant_term = const0_rtx;
+ rtx y = eliminate_constant_term (x, &constant_term);
+ if (constant_term == const0_rtx
+ || ! memory_address_p (mode, y))
+ x = force_operand (x, NULL_RTX);
+ else
+ {
+ y = gen_rtx (PLUS, GET_MODE (x), copy_to_reg (y), constant_term);
+ if (! memory_address_p (mode, y))
+ x = force_operand (x, NULL_RTX);
+ else
+ x = y;
+ }
+ }
+
+ if (GET_CODE (x) == MULT || GET_CODE (x) == MINUS)
+ x = force_operand (x, NULL_RTX);
+
+ /* If we have a register that's an invalid address,
+ it must be a hard reg of the wrong class. Copy it to a pseudo. */
+ else if (GET_CODE (x) == REG)
+ x = copy_to_reg (x);
+
+ /* Last resort: copy the value to a register, since
+ the register is a valid address. */
+ else
+ x = force_reg (Pmode, x);
+
+ goto done;
+
+ win2:
+ x = oldx;
+ win:
+ if (flag_force_addr && ! cse_not_expected && GET_CODE (x) != REG
+ /* Don't copy an addr via a reg if it is one of our stack slots. */
+ && ! (GET_CODE (x) == PLUS
+ && (XEXP (x, 0) == virtual_stack_vars_rtx
+ || XEXP (x, 0) == virtual_incoming_args_rtx)))
+ {
+ if (general_operand (x, Pmode))
+ x = force_reg (Pmode, x);
+ else
+ x = force_operand (x, NULL_RTX);
+ }
+ }
+
+ done:
+
+ /* If we didn't change the address, we are done. Otherwise, mark
+ a reg as a pointer if we have REG or REG + CONST_INT. */
+ if (oldx == x)
+ return x;
+ else if (GET_CODE (x) == REG)
+ mark_reg_pointer (x);
+ else if (GET_CODE (x) == PLUS
+ && GET_CODE (XEXP (x, 0)) == REG
+ && GET_CODE (XEXP (x, 1)) == CONST_INT)
+ mark_reg_pointer (XEXP (x, 0));
+
+ /* OLDX may have been the address on a temporary. Update the address
+ to indicate that X is now used. */
+ update_temp_slot_address (oldx, x);
+
+ return x;
+}
+
+/* Like `memory_address' but pretend `flag_force_addr' is 0. */
+
+rtx
+memory_address_noforce (mode, x)
+ enum machine_mode mode;
+ rtx x;
+{
+ int ambient_force_addr = flag_force_addr;
+ rtx val;
+
+ flag_force_addr = 0;
+ val = memory_address (mode, x);
+ flag_force_addr = ambient_force_addr;
+ return val;
+}
+
+/* Convert a mem ref into one with a valid memory address.
+ Pass through anything else unchanged. */
+
+rtx
+validize_mem (ref)
+ rtx ref;
+{
+ if (GET_CODE (ref) != MEM)
+ return ref;
+ if (memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
+ return ref;
+ /* Don't alter REF itself, since that is probably a stack slot. */
+ return change_address (ref, GET_MODE (ref), XEXP (ref, 0));
+}
+
+/* Return a modified copy of X with its memory address copied
+ into a temporary register to protect it from side effects.
+ If X is not a MEM, it is returned unchanged (and not copied).
+ Perhaps even if it is a MEM, if there is no need to change it. */
+
+rtx
+stabilize (x)
+ rtx x;
+{
+ register rtx addr;
+ if (GET_CODE (x) != MEM)
+ return x;
+ addr = XEXP (x, 0);
+ if (rtx_unstable_p (addr))
+ {
+ rtx temp = copy_all_regs (addr);
+ rtx mem;
+ if (GET_CODE (temp) != REG)
+ temp = copy_to_reg (temp);
+ mem = gen_rtx (MEM, GET_MODE (x), temp);
+
+ /* Mark returned memref with in_struct if it's in an array or
+ structure. Copy const and volatile from original memref. */
+
+ MEM_IN_STRUCT_P (mem) = MEM_IN_STRUCT_P (x) || GET_CODE (addr) == PLUS;
+ RTX_UNCHANGING_P (mem) = RTX_UNCHANGING_P (x);
+ MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (x);
+ return mem;
+ }
+ return x;
+}
+
+/* Copy the value or contents of X to a new temp reg and return that reg. */
+
+rtx
+copy_to_reg (x)
+ rtx x;
+{
+ register rtx temp = gen_reg_rtx (GET_MODE (x));
+
+ /* If not an operand, must be an address with PLUS and MULT so
+ do the computation. */
+ if (! general_operand (x, VOIDmode))
+ x = force_operand (x, temp);
+
+ if (x != temp)
+ emit_move_insn (temp, x);
+
+ return temp;
+}
+
+/* Like copy_to_reg but always give the new register mode Pmode
+ in case X is a constant. */
+
+rtx
+copy_addr_to_reg (x)
+ rtx x;
+{
+ return copy_to_mode_reg (Pmode, x);
+}
+
+/* Like copy_to_reg but always give the new register mode MODE
+ in case X is a constant. */
+
+rtx
+copy_to_mode_reg (mode, x)
+ enum machine_mode mode;
+ rtx x;
+{
+ register rtx temp = gen_reg_rtx (mode);
+
+ /* If not an operand, must be an address with PLUS and MULT so
+ do the computation. */
+ if (! general_operand (x, VOIDmode))
+ x = force_operand (x, temp);
+
+ if (GET_MODE (x) != mode && GET_MODE (x) != VOIDmode)
+ abort ();
+ if (x != temp)
+ emit_move_insn (temp, x);
+ return temp;
+}
+
+/* Load X into a register if it is not already one.
+ Use mode MODE for the register.
+ X should be valid for mode MODE, but it may be a constant which
+ is valid for all integer modes; that's why caller must specify MODE.
+
+ The caller must not alter the value in the register we return,
+ since we mark it as a "constant" register. */
+
+rtx
+force_reg (mode, x)
+ enum machine_mode mode;
+ rtx x;
+{
+ register rtx temp, insn, set;
+
+ if (GET_CODE (x) == REG)
+ return x;
+ temp = gen_reg_rtx (mode);
+ insn = emit_move_insn (temp, x);
+
+ /* Let optimizers know that TEMP's value never changes
+ and that X can be substituted for it. Don't get confused
+ if INSN set something else (such as a SUBREG of TEMP). */
+ if (CONSTANT_P (x)
+ && (set = single_set (insn)) != 0
+ && SET_DEST (set) == temp)
+ {
+ rtx note = find_reg_note (insn, REG_EQUAL, NULL_RTX);
+
+ if (note)
+ XEXP (note, 0) = x;
+ else
+ REG_NOTES (insn) = gen_rtx (EXPR_LIST, REG_EQUAL, x, REG_NOTES (insn));
+ }
+ return temp;
+}
+
+/* If X is a memory ref, copy its contents to a new temp reg and return
+ that reg. Otherwise, return X. */
+
+rtx
+force_not_mem (x)
+ rtx x;
+{
+ register rtx temp;
+ if (GET_CODE (x) != MEM || GET_MODE (x) == BLKmode)
+ return x;
+ temp = gen_reg_rtx (GET_MODE (x));
+ emit_move_insn (temp, x);
+ return temp;
+}
+
+/* Copy X to TARGET (if it's nonzero and a reg)
+ or to a new temp reg and return that reg.
+ MODE is the mode to use for X in case it is a constant. */
+
+rtx
+copy_to_suggested_reg (x, target, mode)
+ rtx x, target;
+ enum machine_mode mode;
+{
+ register rtx temp;
+
+ if (target && GET_CODE (target) == REG)
+ temp = target;
+ else
+ temp = gen_reg_rtx (mode);
+
+ emit_move_insn (temp, x);
+ return temp;
+}
+
+/* Return the mode to use to store a scalar of TYPE and MODE.
+ PUNSIGNEDP points to the signedness of the type and may be adjusted
+ to show what signedness to use on extension operations.
+
+ FOR_CALL is non-zero if this call is promoting args for a call. */
+
+enum machine_mode
+promote_mode (type, mode, punsignedp, for_call)
+ tree type;
+ enum machine_mode mode;
+ int *punsignedp;
+ int for_call;
+{
+ enum tree_code code = TREE_CODE (type);
+ int unsignedp = *punsignedp;
+
+#ifdef PROMOTE_FOR_CALL_ONLY
+ if (! for_call)
+ return mode;
+#endif
+
+ switch (code)
+ {
+#ifdef PROMOTE_MODE
+ case INTEGER_TYPE: case ENUMERAL_TYPE: case BOOLEAN_TYPE:
+ case CHAR_TYPE: case REAL_TYPE: case OFFSET_TYPE:
+ PROMOTE_MODE (mode, unsignedp, type);
+ break;
+#endif
+
+ case POINTER_TYPE:
+ break;
+ }
+
+ *punsignedp = unsignedp;
+ return mode;
+}
+
+/* Adjust the stack pointer by ADJUST (an rtx for a number of bytes).
+ This pops when ADJUST is positive. ADJUST need not be constant. */
+
+void
+adjust_stack (adjust)
+ rtx adjust;
+{
+ rtx temp;
+ adjust = protect_from_queue (adjust, 0);
+
+ if (adjust == const0_rtx)
+ return;
+
+ temp = expand_binop (Pmode,
+#ifdef STACK_GROWS_DOWNWARD
+ add_optab,
+#else
+ sub_optab,
+#endif
+ stack_pointer_rtx, adjust, stack_pointer_rtx, 0,
+ OPTAB_LIB_WIDEN);
+
+ if (temp != stack_pointer_rtx)
+ emit_move_insn (stack_pointer_rtx, temp);
+}
+
+/* Adjust the stack pointer by minus ADJUST (an rtx for a number of bytes).
+ This pushes when ADJUST is positive. ADJUST need not be constant. */
+
+void
+anti_adjust_stack (adjust)
+ rtx adjust;
+{
+ rtx temp;
+ adjust = protect_from_queue (adjust, 0);
+
+ if (adjust == const0_rtx)
+ return;
+
+ temp = expand_binop (Pmode,
+#ifdef STACK_GROWS_DOWNWARD
+ sub_optab,
+#else
+ add_optab,
+#endif
+ stack_pointer_rtx, adjust, stack_pointer_rtx, 0,
+ OPTAB_LIB_WIDEN);
+
+ if (temp != stack_pointer_rtx)
+ emit_move_insn (stack_pointer_rtx, temp);
+}
+
+/* Round the size of a block to be pushed up to the boundary required
+ by this machine. SIZE is the desired size, which need not be constant. */
+
+rtx
+round_push (size)
+ rtx size;
+{
+#ifdef STACK_BOUNDARY
+ int align = STACK_BOUNDARY / BITS_PER_UNIT;
+ if (align == 1)
+ return size;
+ if (GET_CODE (size) == CONST_INT)
+ {
+ int new = (INTVAL (size) + align - 1) / align * align;
+ if (INTVAL (size) != new)
+ size = GEN_INT (new);
+ }
+ else
+ {
+ /* CEIL_DIV_EXPR needs to worry about the addition overflowing,
+ but we know it can't. So add ourselves and then do TRUNC_DIV_EXPR. */
+ size = expand_binop (Pmode, add_optab, size, GEN_INT (align - 1),
+ NULL_RTX, 1, OPTAB_LIB_WIDEN);
+ size = expand_divmod (0, TRUNC_DIV_EXPR, Pmode, size, GEN_INT (align),
+ NULL_RTX, 1);
+ size = expand_mult (Pmode, size, GEN_INT (align), NULL_RTX, 1);
+ }
+#endif /* STACK_BOUNDARY */
+ return size;
+}
+
+/* Save the stack pointer for the purpose in SAVE_LEVEL. PSAVE is a pointer
+ to a previously-created save area. If no save area has been allocated,
+ this function will allocate one. If a save area is specified, it
+ must be of the proper mode.
+
+ The insns are emitted after insn AFTER, if nonzero, otherwise the insns
+ are emitted at the current position. */
+
+void
+emit_stack_save (save_level, psave, after)
+ enum save_level save_level;
+ rtx *psave;
+ rtx after;
+{
+ rtx sa = *psave;
+ /* The default is that we use a move insn and save in a Pmode object. */
+ rtx (*fcn) () = gen_move_insn;
+ enum machine_mode mode = Pmode;
+
+ /* See if this machine has anything special to do for this kind of save. */
+ switch (save_level)
+ {
+#ifdef HAVE_save_stack_block
+ case SAVE_BLOCK:
+ if (HAVE_save_stack_block)
+ {
+ fcn = gen_save_stack_block;
+ mode = insn_operand_mode[CODE_FOR_save_stack_block][0];
+ }
+ break;
+#endif
+#ifdef HAVE_save_stack_function
+ case SAVE_FUNCTION:
+ if (HAVE_save_stack_function)
+ {
+ fcn = gen_save_stack_function;
+ mode = insn_operand_mode[CODE_FOR_save_stack_function][0];
+ }
+ break;
+#endif
+#ifdef HAVE_save_stack_nonlocal
+ case SAVE_NONLOCAL:
+ if (HAVE_save_stack_nonlocal)
+ {
+ fcn = gen_save_stack_nonlocal;
+ mode = insn_operand_mode[(int) CODE_FOR_save_stack_nonlocal][0];
+ }
+ break;
+#endif
+ }
+
+ /* If there is no save area and we have to allocate one, do so. Otherwise
+ verify the save area is the proper mode. */
+
+ if (sa == 0)
+ {
+ if (mode != VOIDmode)
+ {
+ if (save_level == SAVE_NONLOCAL)
+ *psave = sa = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
+ else
+ *psave = sa = gen_reg_rtx (mode);
+ }
+ }
+ else
+ {
+ if (mode == VOIDmode || GET_MODE (sa) != mode)
+ abort ();
+ }
+
+ if (after)
+ {
+ rtx seq;
+
+ start_sequence ();
+ /* We must validize inside the sequence, to ensure that any instructions
+ created by the validize call also get moved to the right place. */
+ if (sa != 0)
+ sa = validize_mem (sa);
+ emit_insn (fcn (sa, stack_pointer_rtx));
+ seq = gen_sequence ();
+ end_sequence ();
+ emit_insn_after (seq, after);
+ }
+ else
+ {
+ if (sa != 0)
+ sa = validize_mem (sa);
+ emit_insn (fcn (sa, stack_pointer_rtx));
+ }
+}
+
+/* Restore the stack pointer for the purpose in SAVE_LEVEL. SA is the save
+ area made by emit_stack_save. If it is zero, we have nothing to do.
+
+ Put any emitted insns after insn AFTER, if nonzero, otherwise at
+ current position. */
+
+void
+emit_stack_restore (save_level, sa, after)
+ enum save_level save_level;
+ rtx after;
+ rtx sa;
+{
+ /* The default is that we use a move insn. */
+ rtx (*fcn) () = gen_move_insn;
+
+ /* See if this machine has anything special to do for this kind of save. */
+ switch (save_level)
+ {
+#ifdef HAVE_restore_stack_block
+ case SAVE_BLOCK:
+ if (HAVE_restore_stack_block)
+ fcn = gen_restore_stack_block;
+ break;
+#endif
+#ifdef HAVE_restore_stack_function
+ case SAVE_FUNCTION:
+ if (HAVE_restore_stack_function)
+ fcn = gen_restore_stack_function;
+ break;
+#endif
+#ifdef HAVE_restore_stack_nonlocal
+
+ case SAVE_NONLOCAL:
+ if (HAVE_restore_stack_nonlocal)
+ fcn = gen_restore_stack_nonlocal;
+ break;
+#endif
+ }
+
+ if (sa != 0)
+ sa = validize_mem (sa);
+
+ if (after)
+ {
+ rtx seq;
+
+ start_sequence ();
+ emit_insn (fcn (stack_pointer_rtx, sa));
+ seq = gen_sequence ();
+ end_sequence ();
+ emit_insn_after (seq, after);
+ }
+ else
+ emit_insn (fcn (stack_pointer_rtx, sa));
+}
+
+/* Return an rtx representing the address of an area of memory dynamically
+ pushed on the stack. This region of memory is always aligned to
+ a multiple of BIGGEST_ALIGNMENT.
+
+ Any required stack pointer alignment is preserved.
+
+ SIZE is an rtx representing the size of the area.
+ TARGET is a place in which the address can be placed.
+
+ KNOWN_ALIGN is the alignment (in bits) that we know SIZE has. */
+
+rtx
+allocate_dynamic_stack_space (size, target, known_align)
+ rtx size;
+ rtx target;
+ int known_align;
+{
+ /* If we're asking for zero bytes, it doesn't matter what we point
+ to since we can't derefference it. But return a reasonable
+ address anyway. */
+ if (size == const0_rtx)
+ return virtual_stack_dynamic_rtx;
+
+ /* Otherwise, show we're calling alloca or equivalent. */
+ current_function_calls_alloca = 1;
+
+ /* Ensure the size is in the proper mode. */
+ if (GET_MODE (size) != VOIDmode && GET_MODE (size) != Pmode)
+ size = convert_to_mode (Pmode, size, 1);
+
+ /* We will need to ensure that the address we return is aligned to
+ BIGGEST_ALIGNMENT. If STACK_DYNAMIC_OFFSET is defined, we don't
+ always know its final value at this point in the compilation (it
+ might depend on the size of the outgoing parameter lists, for
+ example), so we must align the value to be returned in that case.
+ (Note that STACK_DYNAMIC_OFFSET will have a default non-zero value if
+ STACK_POINTER_OFFSET or ACCUMULATE_OUTGOING_ARGS are defined).
+ We must also do an alignment operation on the returned value if
+ the stack pointer alignment is less strict that BIGGEST_ALIGNMENT.
+
+ If we have to align, we must leave space in SIZE for the hole
+ that might result from the alignment operation. */
+
+#if defined (STACK_DYNAMIC_OFFSET) || defined(STACK_POINTER_OFFSET) || defined (ALLOCATE_OUTGOING_ARGS)
+#define MUST_ALIGN
+#endif
+
+#if ! defined (MUST_ALIGN) && (!defined(STACK_BOUNDARY) || STACK_BOUNDARY < BIGGEST_ALIGNMENT)
+#define MUST_ALIGN
+#endif
+
+#ifdef MUST_ALIGN
+
+#if 0 /* It turns out we must always make extra space, if MUST_ALIGN
+ because we must always round the address up at the end,
+ because we don't know whether the dynamic offset
+ will mess up the desired alignment. */
+ /* If we have to round the address up regardless of known_align,
+ make extra space regardless, also. */
+ if (known_align % BIGGEST_ALIGNMENT != 0)
+#endif
+ {
+ if (GET_CODE (size) == CONST_INT)
+ size = GEN_INT (INTVAL (size)
+ + (BIGGEST_ALIGNMENT / BITS_PER_UNIT - 1));
+ else
+ size = expand_binop (Pmode, add_optab, size,
+ GEN_INT (BIGGEST_ALIGNMENT / BITS_PER_UNIT - 1),
+ NULL_RTX, 1, OPTAB_LIB_WIDEN);
+ }
+
+#endif
+
+#ifdef SETJMP_VIA_SAVE_AREA
+ /* If setjmp restores regs from a save area in the stack frame,
+ avoid clobbering the reg save area. Note that the offset of
+ virtual_incoming_args_rtx includes the preallocated stack args space.
+ It would be no problem to clobber that, but it's on the wrong side
+ of the old save area. */
+ {
+ rtx dynamic_offset
+ = expand_binop (Pmode, sub_optab, virtual_stack_dynamic_rtx,
+ stack_pointer_rtx, NULL_RTX, 1, OPTAB_LIB_WIDEN);
+ size = expand_binop (Pmode, add_optab, size, dynamic_offset,
+ NULL_RTX, 1, OPTAB_LIB_WIDEN);
+ }
+#endif /* SETJMP_VIA_SAVE_AREA */
+
+ /* Round the size to a multiple of the required stack alignment.
+ Since the stack if presumed to be rounded before this allocation,
+ this will maintain the required alignment.
+
+ If the stack grows downward, we could save an insn by subtracting
+ SIZE from the stack pointer and then aligning the stack pointer.
+ The problem with this is that the stack pointer may be unaligned
+ between the execution of the subtraction and alignment insns and
+ some machines do not allow this. Even on those that do, some
+ signal handlers malfunction if a signal should occur between those
+ insns. Since this is an extremely rare event, we have no reliable
+ way of knowing which systems have this problem. So we avoid even
+ momentarily mis-aligning the stack. */
+
+#ifdef STACK_BOUNDARY
+ /* If we added a variable amount to SIZE,
+ we can no longer assume it is aligned. */
+#if !defined (SETJMP_VIA_SAVE_AREA) && !defined (MUST_ALIGN)
+ if (known_align % STACK_BOUNDARY != 0)
+#endif
+ size = round_push (size);
+#endif
+
+ do_pending_stack_adjust ();
+
+ /* Don't use a TARGET that isn't a pseudo. */
+ if (target == 0 || GET_CODE (target) != REG
+ || REGNO (target) < FIRST_PSEUDO_REGISTER)
+ target = gen_reg_rtx (Pmode);
+
+ mark_reg_pointer (target);
+
+#ifndef STACK_GROWS_DOWNWARD
+ emit_move_insn (target, virtual_stack_dynamic_rtx);
+#endif
+
+ /* Perform the required allocation from the stack. Some systems do
+ this differently than simply incrementing/decrementing from the
+ stack pointer. */
+#ifdef HAVE_allocate_stack
+ if (HAVE_allocate_stack)
+ {
+ enum machine_mode mode
+ = insn_operand_mode[(int) CODE_FOR_allocate_stack][0];
+
+ if (insn_operand_predicate[(int) CODE_FOR_allocate_stack][0]
+ && ! ((*insn_operand_predicate[(int) CODE_FOR_allocate_stack][0])
+ (size, mode)))
+ size = copy_to_mode_reg (mode, size);
+
+ emit_insn (gen_allocate_stack (size));
+ }
+ else
+#endif
+ anti_adjust_stack (size);
+
+#ifdef STACK_GROWS_DOWNWARD
+ emit_move_insn (target, virtual_stack_dynamic_rtx);
+#endif
+
+#ifdef MUST_ALIGN
+#if 0 /* Even if we know the stack pointer has enough alignment,
+ there's no way to tell whether virtual_stack_dynamic_rtx shares that
+ alignment, so we still need to round the address up. */
+ if (known_align % BIGGEST_ALIGNMENT != 0)
+#endif
+ {
+ /* CEIL_DIV_EXPR needs to worry about the addition overflowing,
+ but we know it can't. So add ourselves and then do TRUNC_DIV_EXPR. */
+ target = expand_binop (Pmode, add_optab, target,
+ GEN_INT (BIGGEST_ALIGNMENT / BITS_PER_UNIT - 1),
+ NULL_RTX, 1, OPTAB_LIB_WIDEN);
+ target = expand_divmod (0, TRUNC_DIV_EXPR, Pmode, target,
+ GEN_INT (BIGGEST_ALIGNMENT / BITS_PER_UNIT),
+ NULL_RTX, 1);
+ target = expand_mult (Pmode, target,
+ GEN_INT (BIGGEST_ALIGNMENT / BITS_PER_UNIT),
+ NULL_RTX, 1);
+ }
+#endif
+
+ /* Some systems require a particular insn to refer to the stack
+ to make the pages exist. */
+#ifdef HAVE_probe
+ if (HAVE_probe)
+ emit_insn (gen_probe ());
+#endif
+
+ /* Record the new stack level for nonlocal gotos. */
+ if (nonlocal_goto_handler_slot != 0)
+ emit_stack_save (SAVE_NONLOCAL, &nonlocal_goto_stack_level, NULL_RTX);
+
+ return target;
+}
+
+/* Return an rtx representing the register or memory location
+ in which a scalar value of data type VALTYPE
+ was returned by a function call to function FUNC.
+ FUNC is a FUNCTION_DECL node if the precise function is known,
+ otherwise 0. */
+
+rtx
+hard_function_value (valtype, func)
+ tree valtype;
+ tree func;
+{
+ return FUNCTION_VALUE (valtype, func);
+}
+
+/* Return an rtx representing the register or memory location
+ in which a scalar value of mode MODE was returned by a library call. */
+
+rtx
+hard_libcall_value (mode)
+ enum machine_mode mode;
+{
+ return LIBCALL_VALUE (mode);
+}
+
+/* Look up the tree code for a given rtx code
+ to provide the arithmetic operation for REAL_ARITHMETIC.
+ The function returns an int because the caller may not know
+ what `enum tree_code' means. */
+
+int
+rtx_to_tree_code (code)
+ enum rtx_code code;
+{
+ enum tree_code tcode;
+
+ switch (code)
+ {
+ case PLUS:
+ tcode = PLUS_EXPR;
+ break;
+ case MINUS:
+ tcode = MINUS_EXPR;
+ break;
+ case MULT:
+ tcode = MULT_EXPR;
+ break;
+ case DIV:
+ tcode = RDIV_EXPR;
+ break;
+ case SMIN:
+ tcode = MIN_EXPR;
+ break;
+ case SMAX:
+ tcode = MAX_EXPR;
+ break;
+ default:
+ tcode = LAST_AND_UNUSED_TREE_CODE;
+ break;
+ }
+ return ((int) tcode);
+}
diff --git a/gnu/usr.bin/cc/cc_int/expmed.c b/gnu/usr.bin/cc/cc_int/expmed.c
new file mode 100644
index 0000000..f3beae0
--- /dev/null
+++ b/gnu/usr.bin/cc/cc_int/expmed.c
@@ -0,0 +1,3957 @@
+/* Medium-level subroutines: convert bit-field store and extract
+ and shifts, multiplies and divides to rtl instructions.
+ Copyright (C) 1987, 88, 89, 92, 93, 1994 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+#include "config.h"
+#include "rtl.h"
+#include "tree.h"
+#include "flags.h"
+#include "insn-flags.h"
+#include "insn-codes.h"
+#include "insn-config.h"
+#include "expr.h"
+#include "real.h"
+#include "recog.h"
+
+static void store_fixed_bit_field PROTO((rtx, int, int, int, rtx, int));
+static void store_split_bit_field PROTO((rtx, int, int, rtx, int));
+static rtx extract_fixed_bit_field PROTO((enum machine_mode, rtx, int,
+ int, int, rtx, int, int));
+static rtx mask_rtx PROTO((enum machine_mode, int,
+ int, int));
+static rtx lshift_value PROTO((enum machine_mode, rtx,
+ int, int));
+static rtx extract_split_bit_field PROTO((rtx, int, int, int, int));
+
+#define CEIL(x,y) (((x) + (y) - 1) / (y))
+
+/* Non-zero means divides or modulus operations are relatively cheap for
+ powers of two, so don't use branches; emit the operation instead.
+ Usually, this will mean that the MD file will emit non-branch
+ sequences. */
+
+static int sdiv_pow2_cheap, smod_pow2_cheap;
+
+#ifndef SLOW_UNALIGNED_ACCESS
+#define SLOW_UNALIGNED_ACCESS STRICT_ALIGNMENT
+#endif
+
+/* For compilers that support multiple targets with different word sizes,
+ MAX_BITS_PER_WORD contains the biggest value of BITS_PER_WORD. An example
+ is the H8/300(H) compiler. */
+
+#ifndef MAX_BITS_PER_WORD
+#define MAX_BITS_PER_WORD BITS_PER_WORD
+#endif
+
+/* Cost of various pieces of RTL. */
+static int add_cost, negate_cost, zero_cost;
+static int shift_cost[MAX_BITS_PER_WORD];
+static int shiftadd_cost[MAX_BITS_PER_WORD];
+static int shiftsub_cost[MAX_BITS_PER_WORD];
+
+void
+init_expmed ()
+{
+ char *free_point;
+ /* This is "some random pseudo register" for purposes of calling recog
+ to see what insns exist. */
+ rtx reg = gen_rtx (REG, word_mode, 10000);
+ rtx shift_insn, shiftadd_insn, shiftsub_insn;
+ int dummy;
+ int m;
+
+ start_sequence ();
+
+ /* Since we are on the permanent obstack, we must be sure we save this
+ spot AFTER we call start_sequence, since it will reuse the rtl it
+ makes. */
+
+ free_point = (char *) oballoc (0);
+
+ zero_cost = rtx_cost (const0_rtx, 0);
+ add_cost = rtx_cost (gen_rtx (PLUS, word_mode, reg, reg), SET);
+
+ shift_insn = emit_insn (gen_rtx (SET, VOIDmode, reg,
+ gen_rtx (ASHIFT, word_mode, reg,
+ const0_rtx)));
+
+ shiftadd_insn = emit_insn (gen_rtx (SET, VOIDmode, reg,
+ gen_rtx (PLUS, word_mode,
+ gen_rtx (MULT, word_mode,
+ reg, const0_rtx),
+ reg)));
+
+ shiftsub_insn = emit_insn (gen_rtx (SET, VOIDmode, reg,
+ gen_rtx (MINUS, word_mode,
+ gen_rtx (MULT, word_mode,
+ reg, const0_rtx),
+ reg)));
+
+ init_recog ();
+
+ shift_cost[0] = 0;
+ shiftadd_cost[0] = shiftsub_cost[0] = add_cost;
+
+ for (m = 1; m < BITS_PER_WORD; m++)
+ {
+ shift_cost[m] = shiftadd_cost[m] = shiftsub_cost[m] = 32000;
+
+ XEXP (SET_SRC (PATTERN (shift_insn)), 1) = GEN_INT (m);
+ if (recog (PATTERN (shift_insn), shift_insn, &dummy) >= 0)
+ shift_cost[m] = rtx_cost (SET_SRC (PATTERN (shift_insn)), SET);
+
+ XEXP (XEXP (SET_SRC (PATTERN (shiftadd_insn)), 0), 1)
+ = GEN_INT ((HOST_WIDE_INT) 1 << m);
+ if (recog (PATTERN (shiftadd_insn), shiftadd_insn, &dummy) >= 0)
+ shiftadd_cost[m] = rtx_cost (SET_SRC (PATTERN (shiftadd_insn)), SET);
+
+ XEXP (XEXP (SET_SRC (PATTERN (shiftsub_insn)), 0), 1)
+ = GEN_INT ((HOST_WIDE_INT) 1 << m);
+ if (recog (PATTERN (shiftsub_insn), shiftsub_insn, &dummy) >= 0)
+ shiftsub_cost[m] = rtx_cost (SET_SRC (PATTERN (shiftsub_insn)), SET);
+ }
+
+ negate_cost = rtx_cost (gen_rtx (NEG, word_mode, reg), SET);
+
+ sdiv_pow2_cheap
+ = (rtx_cost (gen_rtx (DIV, word_mode, reg, GEN_INT (32)), SET)
+ <= 2 * add_cost);
+ smod_pow2_cheap
+ = (rtx_cost (gen_rtx (MOD, word_mode, reg, GEN_INT (32)), SET)
+ <= 2 * add_cost);
+
+ /* Free the objects we just allocated. */
+ end_sequence ();
+ obfree (free_point);
+}
+
+/* Return an rtx representing minus the value of X.
+ MODE is the intended mode of the result,
+ useful if X is a CONST_INT. */
+
+rtx
+negate_rtx (mode, x)
+ enum machine_mode mode;
+ rtx x;
+{
+ if (GET_CODE (x) == CONST_INT)
+ {
+ HOST_WIDE_INT val = - INTVAL (x);
+ if (GET_MODE_BITSIZE (mode) < HOST_BITS_PER_WIDE_INT)
+ {
+ /* Sign extend the value from the bits that are significant. */
+ if (val & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))
+ val |= (HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (mode);
+ else
+ val &= ((HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (mode)) - 1;
+ }
+ return GEN_INT (val);
+ }
+ else
+ return expand_unop (GET_MODE (x), neg_optab, x, NULL_RTX, 0);
+}
+
+/* Generate code to store value from rtx VALUE
+ into a bit-field within structure STR_RTX
+ containing BITSIZE bits starting at bit BITNUM.
+ FIELDMODE is the machine-mode of the FIELD_DECL node for this field.
+ ALIGN is the alignment that STR_RTX is known to have, measured in bytes.
+ TOTAL_SIZE is the size of the structure in bytes, or -1 if varying. */
+
+/* ??? Note that there are two different ideas here for how
+ to determine the size to count bits within, for a register.
+ One is BITS_PER_WORD, and the other is the size of operand 3
+ of the insv pattern. (The latter assumes that an n-bit machine
+ will be able to insert bit fields up to n bits wide.)
+ It isn't certain that either of these is right.
+ extract_bit_field has the same quandary. */
+
+rtx
+store_bit_field (str_rtx, bitsize, bitnum, fieldmode, value, align, total_size)
+ rtx str_rtx;
+ register int bitsize;
+ int bitnum;
+ enum machine_mode fieldmode;
+ rtx value;
+ int align;
+ int total_size;
+{
+ int unit = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
+ register int offset = bitnum / unit;
+ register int bitpos = bitnum % unit;
+ register rtx op0 = str_rtx;
+
+ if (GET_CODE (str_rtx) == MEM && ! MEM_IN_STRUCT_P (str_rtx))
+ abort ();
+
+ /* Discount the part of the structure before the desired byte.
+ We need to know how many bytes are safe to reference after it. */
+ if (total_size >= 0)
+ total_size -= (bitpos / BIGGEST_ALIGNMENT
+ * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
+
+ while (GET_CODE (op0) == SUBREG)
+ {
+ /* The following line once was done only if WORDS_BIG_ENDIAN,
+ but I think that is a mistake. WORDS_BIG_ENDIAN is
+ meaningful at a much higher level; when structures are copied
+ between memory and regs, the higher-numbered regs
+ always get higher addresses. */
+ offset += SUBREG_WORD (op0);
+ /* We used to adjust BITPOS here, but now we do the whole adjustment
+ right after the loop. */
+ op0 = SUBREG_REG (op0);
+ }
+
+#if BYTES_BIG_ENDIAN
+ /* If OP0 is a register, BITPOS must count within a word.
+ But as we have it, it counts within whatever size OP0 now has.
+ On a bigendian machine, these are not the same, so convert. */
+ if (GET_CODE (op0) != MEM && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
+ bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
+#endif
+
+ value = protect_from_queue (value, 0);
+
+ if (flag_force_mem)
+ value = force_not_mem (value);
+
+ /* Note that the adjustment of BITPOS above has no effect on whether
+ BITPOS is 0 in a REG bigger than a word. */
+ if (GET_MODE_SIZE (fieldmode) >= UNITS_PER_WORD
+ && (GET_CODE (op0) != MEM
+ || ! SLOW_UNALIGNED_ACCESS
+ || (offset * BITS_PER_UNIT % bitsize == 0
+ && align % GET_MODE_SIZE (fieldmode) == 0))
+ && bitpos == 0 && bitsize == GET_MODE_BITSIZE (fieldmode))
+ {
+ /* Storing in a full-word or multi-word field in a register
+ can be done with just SUBREG. */
+ if (GET_MODE (op0) != fieldmode)
+ {
+ if (GET_CODE (op0) == REG)
+ op0 = gen_rtx (SUBREG, fieldmode, op0, offset);
+ else
+ op0 = change_address (op0, fieldmode,
+ plus_constant (XEXP (op0, 0), offset));
+ }
+ emit_move_insn (op0, value);
+ return value;
+ }
+
+ /* Storing an lsb-aligned field in a register
+ can be done with a movestrict instruction. */
+
+ if (GET_CODE (op0) != MEM
+#if BYTES_BIG_ENDIAN
+ && bitpos + bitsize == unit
+#else
+ && bitpos == 0
+#endif
+ && bitsize == GET_MODE_BITSIZE (fieldmode)
+ && (GET_MODE (op0) == fieldmode
+ || (movstrict_optab->handlers[(int) fieldmode].insn_code
+ != CODE_FOR_nothing)))
+ {
+ /* Get appropriate low part of the value being stored. */
+ if (GET_CODE (value) == CONST_INT || GET_CODE (value) == REG)
+ value = gen_lowpart (fieldmode, value);
+ else if (!(GET_CODE (value) == SYMBOL_REF
+ || GET_CODE (value) == LABEL_REF
+ || GET_CODE (value) == CONST))
+ value = convert_to_mode (fieldmode, value, 0);
+
+ if (GET_MODE (op0) == fieldmode)
+ emit_move_insn (op0, value);
+ else
+ {
+ int icode = movstrict_optab->handlers[(int) fieldmode].insn_code;
+ if(! (*insn_operand_predicate[icode][1]) (value, fieldmode))
+ value = copy_to_mode_reg (fieldmode, value);
+ emit_insn (GEN_FCN (icode)
+ (gen_rtx (SUBREG, fieldmode, op0, offset), value));
+ }
+ return value;
+ }
+
+ /* Handle fields bigger than a word. */
+
+ if (bitsize > BITS_PER_WORD)
+ {
+ /* Here we transfer the words of the field
+ in the order least significant first.
+ This is because the most significant word is the one which may
+ be less than full. */
+
+ int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
+ int i;
+
+ /* This is the mode we must force value to, so that there will be enough
+ subwords to extract. Note that fieldmode will often (always?) be
+ VOIDmode, because that is what store_field uses to indicate that this
+ is a bit field, but passing VOIDmode to operand_subword_force will
+ result in an abort. */
+ fieldmode = mode_for_size (nwords * BITS_PER_WORD, MODE_INT, 0);
+
+ for (i = 0; i < nwords; i++)
+ {
+ /* If I is 0, use the low-order word in both field and target;
+ if I is 1, use the next to lowest word; and so on. */
+ int wordnum = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
+ int bit_offset = (WORDS_BIG_ENDIAN
+ ? MAX (bitsize - (i + 1) * BITS_PER_WORD, 0)
+ : i * BITS_PER_WORD);
+ store_bit_field (op0, MIN (BITS_PER_WORD,
+ bitsize - i * BITS_PER_WORD),
+ bitnum + bit_offset, word_mode,
+ operand_subword_force (value, wordnum,
+ (GET_MODE (value) == VOIDmode
+ ? fieldmode
+ : GET_MODE (value))),
+ align, total_size);
+ }
+ return value;
+ }
+
+ /* From here on we can assume that the field to be stored in is
+ a full-word (whatever type that is), since it is shorter than a word. */
+
+ /* OFFSET is the number of words or bytes (UNIT says which)
+ from STR_RTX to the first word or byte containing part of the field. */
+
+ if (GET_CODE (op0) == REG)
+ {
+ if (offset != 0
+ || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
+ op0 = gen_rtx (SUBREG, TYPE_MODE (type_for_size (BITS_PER_WORD, 0)),
+ op0, offset);
+ offset = 0;
+ }
+ else
+ {
+ op0 = protect_from_queue (op0, 1);
+ }
+
+ /* If VALUE is a floating-point mode, access it as an integer of the
+ corresponding size. This can occur on a machine with 64 bit registers
+ that uses SFmode for float. This can also occur for unaligned float
+ structure fields. */
+ if (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT)
+ {
+ if (GET_CODE (value) != REG)
+ value = copy_to_reg (value);
+ value = gen_rtx (SUBREG, word_mode, value, 0);
+ }
+
+ /* Now OFFSET is nonzero only if OP0 is memory
+ and is therefore always measured in bytes. */
+
+#ifdef HAVE_insv
+ if (HAVE_insv
+ && !(bitsize == 1 && GET_CODE (value) == CONST_INT)
+ /* Ensure insv's size is wide enough for this field. */
+ && (GET_MODE_BITSIZE (insn_operand_mode[(int) CODE_FOR_insv][3])
+ >= bitsize))
+ {
+ int xbitpos = bitpos;
+ rtx value1;
+ rtx xop0 = op0;
+ rtx last = get_last_insn ();
+ rtx pat;
+ enum machine_mode maxmode
+ = insn_operand_mode[(int) CODE_FOR_insv][3];
+
+ int save_volatile_ok = volatile_ok;
+ volatile_ok = 1;
+
+ /* If this machine's insv can only insert into a register, or if we
+ are to force MEMs into a register, copy OP0 into a register and
+ save it back later. */
+ if (GET_CODE (op0) == MEM
+ && (flag_force_mem
+ || ! ((*insn_operand_predicate[(int) CODE_FOR_insv][0])
+ (op0, VOIDmode))))
+ {
+ rtx tempreg;
+ enum machine_mode bestmode;
+
+ /* Get the mode to use for inserting into this field. If OP0 is
+ BLKmode, get the smallest mode consistent with the alignment. If
+ OP0 is a non-BLKmode object that is no wider than MAXMODE, use its
+ mode. Otherwise, use the smallest mode containing the field. */
+
+ if (GET_MODE (op0) == BLKmode
+ || GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (maxmode))
+ bestmode
+ = get_best_mode (bitsize, bitnum, align * BITS_PER_UNIT, maxmode,
+ MEM_VOLATILE_P (op0));
+ else
+ bestmode = GET_MODE (op0);
+
+ if (bestmode == VOIDmode
+ || (STRICT_ALIGNMENT && GET_MODE_SIZE (bestmode) > align))
+ goto insv_loses;
+
+ /* Adjust address to point to the containing unit of that mode. */
+ unit = GET_MODE_BITSIZE (bestmode);
+ /* Compute offset as multiple of this unit, counting in bytes. */
+ offset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
+ bitpos = bitnum % unit;
+ op0 = change_address (op0, bestmode,
+ plus_constant (XEXP (op0, 0), offset));
+
+ /* Fetch that unit, store the bitfield in it, then store the unit. */
+ tempreg = copy_to_reg (op0);
+ store_bit_field (tempreg, bitsize, bitpos, fieldmode, value,
+ align, total_size);
+ emit_move_insn (op0, tempreg);
+ return value;
+ }
+ volatile_ok = save_volatile_ok;
+
+ /* Add OFFSET into OP0's address. */
+ if (GET_CODE (xop0) == MEM)
+ xop0 = change_address (xop0, byte_mode,
+ plus_constant (XEXP (xop0, 0), offset));
+
+ /* If xop0 is a register, we need it in MAXMODE
+ to make it acceptable to the format of insv. */
+ if (GET_CODE (xop0) == SUBREG)
+ /* We can't just change the mode, because this might clobber op0,
+ and we will need the original value of op0 if insv fails. */
+ xop0 = gen_rtx (SUBREG, maxmode, SUBREG_REG (xop0), SUBREG_WORD (xop0));
+ if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
+ xop0 = gen_rtx (SUBREG, maxmode, xop0, 0);
+
+ /* On big-endian machines, we count bits from the most significant.
+ If the bit field insn does not, we must invert. */
+
+#if BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN
+ xbitpos = unit - bitsize - xbitpos;
+#endif
+ /* We have been counting XBITPOS within UNIT.
+ Count instead within the size of the register. */
+#if BITS_BIG_ENDIAN
+ if (GET_CODE (xop0) != MEM)
+ xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
+#endif
+ unit = GET_MODE_BITSIZE (maxmode);
+
+ /* Convert VALUE to maxmode (which insv insn wants) in VALUE1. */
+ value1 = value;
+ if (GET_MODE (value) != maxmode)
+ {
+ if (GET_MODE_BITSIZE (GET_MODE (value)) >= bitsize)
+ {
+ /* Optimization: Don't bother really extending VALUE
+ if it has all the bits we will actually use. However,
+ if we must narrow it, be sure we do it correctly. */
+
+ if (GET_MODE_SIZE (GET_MODE (value)) < GET_MODE_SIZE (maxmode))
+ {
+ /* Avoid making subreg of a subreg, or of a mem. */
+ if (GET_CODE (value1) != REG)
+ value1 = copy_to_reg (value1);
+ value1 = gen_rtx (SUBREG, maxmode, value1, 0);
+ }
+ else
+ value1 = gen_lowpart (maxmode, value1);
+ }
+ else if (!CONSTANT_P (value))
+ /* Parse phase is supposed to make VALUE's data type
+ match that of the component reference, which is a type
+ at least as wide as the field; so VALUE should have
+ a mode that corresponds to that type. */
+ abort ();
+ }
+
+ /* If this machine's insv insists on a register,
+ get VALUE1 into a register. */
+ if (! ((*insn_operand_predicate[(int) CODE_FOR_insv][3])
+ (value1, maxmode)))
+ value1 = force_reg (maxmode, value1);
+
+ pat = gen_insv (xop0, GEN_INT (bitsize), GEN_INT (xbitpos), value1);
+ if (pat)
+ emit_insn (pat);
+ else
+ {
+ delete_insns_since (last);
+ store_fixed_bit_field (op0, offset, bitsize, bitpos, value, align);
+ }
+ }
+ else
+ insv_loses:
+#endif
+ /* Insv is not available; store using shifts and boolean ops. */
+ store_fixed_bit_field (op0, offset, bitsize, bitpos, value, align);
+ return value;
+}
+
+/* Use shifts and boolean operations to store VALUE
+ into a bit field of width BITSIZE
+ in a memory location specified by OP0 except offset by OFFSET bytes.
+ (OFFSET must be 0 if OP0 is a register.)
+ The field starts at position BITPOS within the byte.
+ (If OP0 is a register, it may be a full word or a narrower mode,
+ but BITPOS still counts within a full word,
+ which is significant on bigendian machines.)
+ STRUCT_ALIGN is the alignment the structure is known to have (in bytes).
+
+ Note that protect_from_queue has already been done on OP0 and VALUE. */
+
+static void
+store_fixed_bit_field (op0, offset, bitsize, bitpos, value, struct_align)
+ register rtx op0;
+ register int offset, bitsize, bitpos;
+ register rtx value;
+ int struct_align;
+{
+ register enum machine_mode mode;
+ int total_bits = BITS_PER_WORD;
+ rtx subtarget, temp;
+ int all_zero = 0;
+ int all_one = 0;
+
+ /* There is a case not handled here:
+ a structure with a known alignment of just a halfword
+ and a field split across two aligned halfwords within the structure.
+ Or likewise a structure with a known alignment of just a byte
+ and a field split across two bytes.
+ Such cases are not supposed to be able to occur. */
+
+ if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
+ {
+ if (offset != 0)
+ abort ();
+ /* Special treatment for a bit field split across two registers. */
+ if (bitsize + bitpos > BITS_PER_WORD)
+ {
+ store_split_bit_field (op0, bitsize, bitpos,
+ value, BITS_PER_WORD);
+ return;
+ }
+ }
+ else
+ {
+ /* Get the proper mode to use for this field. We want a mode that
+ includes the entire field. If such a mode would be larger than
+ a word, we won't be doing the extraction the normal way. */
+
+ mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
+ struct_align * BITS_PER_UNIT, word_mode,
+ GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0));
+
+ if (mode == VOIDmode)
+ {
+ /* The only way this should occur is if the field spans word
+ boundaries. */
+ store_split_bit_field (op0,
+ bitsize, bitpos + offset * BITS_PER_UNIT,
+ value, struct_align);
+ return;
+ }
+
+ total_bits = GET_MODE_BITSIZE (mode);
+
+ /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
+ be be in the range 0 to total_bits-1, and put any excess bytes in
+ OFFSET. */
+ if (bitpos >= total_bits)
+ {
+ offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
+ bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
+ * BITS_PER_UNIT);
+ }
+
+ /* Get ref to an aligned byte, halfword, or word containing the field.
+ Adjust BITPOS to be position within a word,
+ and OFFSET to be the offset of that word.
+ Then alter OP0 to refer to that word. */
+ bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
+ offset -= (offset % (total_bits / BITS_PER_UNIT));
+ op0 = change_address (op0, mode,
+ plus_constant (XEXP (op0, 0), offset));
+ }
+
+ mode = GET_MODE (op0);
+
+ /* Now MODE is either some integral mode for a MEM as OP0,
+ or is a full-word for a REG as OP0. TOTAL_BITS corresponds.
+ The bit field is contained entirely within OP0.
+ BITPOS is the starting bit number within OP0.
+ (OP0's mode may actually be narrower than MODE.) */
+
+#if BYTES_BIG_ENDIAN
+ /* BITPOS is the distance between our msb
+ and that of the containing datum.
+ Convert it to the distance from the lsb. */
+
+ bitpos = total_bits - bitsize - bitpos;
+#endif
+ /* Now BITPOS is always the distance between our lsb
+ and that of OP0. */
+
+ /* Shift VALUE left by BITPOS bits. If VALUE is not constant,
+ we must first convert its mode to MODE. */
+
+ if (GET_CODE (value) == CONST_INT)
+ {
+ register HOST_WIDE_INT v = INTVAL (value);
+
+ if (bitsize < HOST_BITS_PER_WIDE_INT)
+ v &= ((HOST_WIDE_INT) 1 << bitsize) - 1;
+
+ if (v == 0)
+ all_zero = 1;
+ else if ((bitsize < HOST_BITS_PER_WIDE_INT
+ && v == ((HOST_WIDE_INT) 1 << bitsize) - 1)
+ || (bitsize == HOST_BITS_PER_WIDE_INT && v == -1))
+ all_one = 1;
+
+ value = lshift_value (mode, value, bitpos, bitsize);
+ }
+ else
+ {
+ int must_and = (GET_MODE_BITSIZE (GET_MODE (value)) != bitsize
+ && bitpos + bitsize != GET_MODE_BITSIZE (mode));
+
+ if (GET_MODE (value) != mode)
+ {
+ if ((GET_CODE (value) == REG || GET_CODE (value) == SUBREG)
+ && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (value)))
+ value = gen_lowpart (mode, value);
+ else
+ value = convert_to_mode (mode, value, 1);
+ }
+
+ if (must_and)
+ value = expand_binop (mode, and_optab, value,
+ mask_rtx (mode, 0, bitsize, 0),
+ NULL_RTX, 1, OPTAB_LIB_WIDEN);
+ if (bitpos > 0)
+ value = expand_shift (LSHIFT_EXPR, mode, value,
+ build_int_2 (bitpos, 0), NULL_RTX, 1);
+ }
+
+ /* Now clear the chosen bits in OP0,
+ except that if VALUE is -1 we need not bother. */
+
+ subtarget = (GET_CODE (op0) == REG || ! flag_force_mem) ? op0 : 0;
+
+ if (! all_one)
+ {
+ temp = expand_binop (mode, and_optab, op0,
+ mask_rtx (mode, bitpos, bitsize, 1),
+ subtarget, 1, OPTAB_LIB_WIDEN);
+ subtarget = temp;
+ }
+ else
+ temp = op0;
+
+ /* Now logical-or VALUE into OP0, unless it is zero. */
+
+ if (! all_zero)
+ temp = expand_binop (mode, ior_optab, temp, value,
+ subtarget, 1, OPTAB_LIB_WIDEN);
+ if (op0 != temp)
+ emit_move_insn (op0, temp);
+}
+
+/* Store a bit field that is split across multiple accessible memory objects.
+
+ OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
+ BITSIZE is the field width; BITPOS the position of its first bit
+ (within the word).
+ VALUE is the value to store.
+ ALIGN is the known alignment of OP0, measured in bytes.
+ This is also the size of the memory objects to be used.
+
+ This does not yet handle fields wider than BITS_PER_WORD. */
+
+static void
+store_split_bit_field (op0, bitsize, bitpos, value, align)
+ rtx op0;
+ int bitsize, bitpos;
+ rtx value;
+ int align;
+{
+ int unit;
+ int bitsdone = 0;
+
+ /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
+ much at a time. */
+ if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
+ unit = BITS_PER_WORD;
+ else
+ unit = MIN (align * BITS_PER_UNIT, BITS_PER_WORD);
+
+ /* If VALUE is a constant other than a CONST_INT, get it into a register in
+ WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
+ that VALUE might be a floating-point constant. */
+ if (CONSTANT_P (value) && GET_CODE (value) != CONST_INT)
+ {
+ rtx word = gen_lowpart_common (word_mode, value);
+
+ if (word && (value != word))
+ value = word;
+ else
+ value = gen_lowpart_common (word_mode,
+ force_reg (GET_MODE (value), value));
+ }
+
+ while (bitsdone < bitsize)
+ {
+ int thissize;
+ rtx part, word;
+ int thispos;
+ int offset;
+
+ offset = (bitpos + bitsdone) / unit;
+ thispos = (bitpos + bitsdone) % unit;
+
+ /* THISSIZE must not overrun a word boundary. Otherwise,
+ store_fixed_bit_field will call us again, and we will mutually
+ recurse forever. */
+ thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
+ thissize = MIN (thissize, unit - thispos);
+
+#if BYTES_BIG_ENDIAN
+ /* Fetch successively less significant portions. */
+ if (GET_CODE (value) == CONST_INT)
+ part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
+ >> (bitsize - bitsdone - thissize))
+ & (((HOST_WIDE_INT) 1 << thissize) - 1));
+ else
+ {
+ /* The args are chosen so that the last part
+ includes the lsb. */
+ int bit_offset = 0;
+ /* If the value isn't in memory, then it must be right aligned
+ if a register, so skip past the padding on the left. If it
+ is in memory, then there is no padding on the left. */
+ if (GET_CODE (value) != MEM)
+ bit_offset = BITS_PER_WORD - bitsize;
+ part = extract_fixed_bit_field (word_mode, value, 0, thissize,
+ bit_offset + bitsdone,
+ NULL_RTX, 1, align);
+ }
+#else
+ /* Fetch successively more significant portions. */
+ if (GET_CODE (value) == CONST_INT)
+ part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value)) >> bitsdone)
+ & (((HOST_WIDE_INT) 1 << thissize) - 1));
+ else
+ part = extract_fixed_bit_field (word_mode, value, 0, thissize,
+ bitsdone, NULL_RTX, 1, align);
+#endif
+
+ /* If OP0 is a register, then handle OFFSET here.
+
+ When handling multiword bitfields, extract_bit_field may pass
+ down a word_mode SUBREG of a larger REG for a bitfield that actually
+ crosses a word boundary. Thus, for a SUBREG, we must find
+ the current word starting from the base register. */
+ if (GET_CODE (op0) == SUBREG)
+ {
+ word = operand_subword_force (SUBREG_REG (op0),
+ SUBREG_WORD (op0) + offset,
+ GET_MODE (SUBREG_REG (op0)));
+ offset = 0;
+ }
+ else if (GET_CODE (op0) == REG)
+ {
+ word = operand_subword_force (op0, offset, GET_MODE (op0));
+ offset = 0;
+ }
+ else
+ word = op0;
+
+ /* OFFSET is in UNITs, and UNIT is in bits.
+ store_fixed_bit_field wants offset in bytes. */
+ store_fixed_bit_field (word, offset * unit / BITS_PER_UNIT,
+ thissize, thispos, part, align);
+ bitsdone += thissize;
+ }
+}
+
+/* Generate code to extract a byte-field from STR_RTX
+ containing BITSIZE bits, starting at BITNUM,
+ and put it in TARGET if possible (if TARGET is nonzero).
+ Regardless of TARGET, we return the rtx for where the value is placed.
+ It may be a QUEUED.
+
+ STR_RTX is the structure containing the byte (a REG or MEM).
+ UNSIGNEDP is nonzero if this is an unsigned bit field.
+ MODE is the natural mode of the field value once extracted.
+ TMODE is the mode the caller would like the value to have;
+ but the value may be returned with type MODE instead.
+
+ ALIGN is the alignment that STR_RTX is known to have, measured in bytes.
+ TOTAL_SIZE is the size in bytes of the containing structure,
+ or -1 if varying.
+
+ If a TARGET is specified and we can store in it at no extra cost,
+ we do so, and return TARGET.
+ Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
+ if they are equally easy. */
+
+rtx
+extract_bit_field (str_rtx, bitsize, bitnum, unsignedp,
+ target, mode, tmode, align, total_size)
+ rtx str_rtx;
+ register int bitsize;
+ int bitnum;
+ int unsignedp;
+ rtx target;
+ enum machine_mode mode, tmode;
+ int align;
+ int total_size;
+{
+ int unit = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
+ register int offset = bitnum / unit;
+ register int bitpos = bitnum % unit;
+ register rtx op0 = str_rtx;
+ rtx spec_target = target;
+ rtx spec_target_subreg = 0;
+
+ if (GET_CODE (str_rtx) == MEM && ! MEM_IN_STRUCT_P (str_rtx))
+ abort ();
+
+ /* Discount the part of the structure before the desired byte.
+ We need to know how many bytes are safe to reference after it. */
+ if (total_size >= 0)
+ total_size -= (bitpos / BIGGEST_ALIGNMENT
+ * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
+
+ if (tmode == VOIDmode)
+ tmode = mode;
+ while (GET_CODE (op0) == SUBREG)
+ {
+ offset += SUBREG_WORD (op0);
+ op0 = SUBREG_REG (op0);
+ }
+
+#if BYTES_BIG_ENDIAN
+ /* If OP0 is a register, BITPOS must count within a word.
+ But as we have it, it counts within whatever size OP0 now has.
+ On a bigendian machine, these are not the same, so convert. */
+ if (GET_CODE (op0) != MEM && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
+ bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
+#endif
+
+ /* Extracting a full-word or multi-word value
+ from a structure in a register or aligned memory.
+ This can be done with just SUBREG.
+ So too extracting a subword value in
+ the least significant part of the register. */
+
+ if ((GET_CODE (op0) == REG
+ || (GET_CODE (op0) == MEM
+ && (! SLOW_UNALIGNED_ACCESS
+ || (offset * BITS_PER_UNIT % bitsize == 0
+ && align * BITS_PER_UNIT % bitsize == 0))))
+ && ((bitsize >= BITS_PER_WORD && bitsize == GET_MODE_BITSIZE (mode)
+ && bitpos % BITS_PER_WORD == 0)
+ || (mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0) != BLKmode
+#if BYTES_BIG_ENDIAN
+ && bitpos + bitsize == BITS_PER_WORD
+#else
+ && bitpos == 0
+#endif
+ )))
+ {
+ enum machine_mode mode1
+ = mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0);
+
+ if (mode1 != GET_MODE (op0))
+ {
+ if (GET_CODE (op0) == REG)
+ op0 = gen_rtx (SUBREG, mode1, op0, offset);
+ else
+ op0 = change_address (op0, mode1,
+ plus_constant (XEXP (op0, 0), offset));
+ }
+ if (mode1 != mode)
+ return convert_to_mode (tmode, op0, unsignedp);
+ return op0;
+ }
+
+ /* Handle fields bigger than a word. */
+
+ if (bitsize > BITS_PER_WORD)
+ {
+ /* Here we transfer the words of the field
+ in the order least significant first.
+ This is because the most significant word is the one which may
+ be less than full. */
+
+ int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
+ int i;
+
+ if (target == 0 || GET_CODE (target) != REG)
+ target = gen_reg_rtx (mode);
+
+ for (i = 0; i < nwords; i++)
+ {
+ /* If I is 0, use the low-order word in both field and target;
+ if I is 1, use the next to lowest word; and so on. */
+ int wordnum = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
+ int bit_offset = (WORDS_BIG_ENDIAN
+ ? MAX (0, bitsize - (i + 1) * BITS_PER_WORD)
+ : i * BITS_PER_WORD);
+ rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
+ rtx result_part
+ = extract_bit_field (op0, MIN (BITS_PER_WORD,
+ bitsize - i * BITS_PER_WORD),
+ bitnum + bit_offset,
+ 1, target_part, mode, word_mode,
+ align, total_size);
+
+ if (target_part == 0)
+ abort ();
+
+ if (result_part != target_part)
+ emit_move_insn (target_part, result_part);
+ }
+
+ if (unsignedp)
+ return target;
+ /* Signed bit field: sign-extend with two arithmetic shifts. */
+ target = expand_shift (LSHIFT_EXPR, mode, target,
+ build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
+ NULL_RTX, 0);
+ return expand_shift (RSHIFT_EXPR, mode, target,
+ build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
+ NULL_RTX, 0);
+ }
+
+ /* From here on we know the desired field is smaller than a word
+ so we can assume it is an integer. So we can safely extract it as one
+ size of integer, if necessary, and then truncate or extend
+ to the size that is wanted. */
+
+ /* OFFSET is the number of words or bytes (UNIT says which)
+ from STR_RTX to the first word or byte containing part of the field. */
+
+ if (GET_CODE (op0) == REG)
+ {
+ if (offset != 0
+ || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
+ op0 = gen_rtx (SUBREG, TYPE_MODE (type_for_size (BITS_PER_WORD, 0)),
+ op0, offset);
+ offset = 0;
+ }
+ else
+ {
+ op0 = protect_from_queue (str_rtx, 1);
+ }
+
+ /* Now OFFSET is nonzero only for memory operands. */
+
+ if (unsignedp)
+ {
+#ifdef HAVE_extzv
+ if (HAVE_extzv
+ && (GET_MODE_BITSIZE (insn_operand_mode[(int) CODE_FOR_extzv][0])
+ >= bitsize))
+ {
+ int xbitpos = bitpos, xoffset = offset;
+ rtx bitsize_rtx, bitpos_rtx;
+ rtx last = get_last_insn();
+ rtx xop0 = op0;
+ rtx xtarget = target;
+ rtx xspec_target = spec_target;
+ rtx xspec_target_subreg = spec_target_subreg;
+ rtx pat;
+ enum machine_mode maxmode
+ = insn_operand_mode[(int) CODE_FOR_extzv][0];
+
+ if (GET_CODE (xop0) == MEM)
+ {
+ int save_volatile_ok = volatile_ok;
+ volatile_ok = 1;
+
+ /* Is the memory operand acceptable? */
+ if (flag_force_mem
+ || ! ((*insn_operand_predicate[(int) CODE_FOR_extzv][1])
+ (xop0, GET_MODE (xop0))))
+ {
+ /* No, load into a reg and extract from there. */
+ enum machine_mode bestmode;
+
+ /* Get the mode to use for inserting into this field. If
+ OP0 is BLKmode, get the smallest mode consistent with the
+ alignment. If OP0 is a non-BLKmode object that is no
+ wider than MAXMODE, use its mode. Otherwise, use the
+ smallest mode containing the field. */
+
+ if (GET_MODE (xop0) == BLKmode
+ || (GET_MODE_SIZE (GET_MODE (op0))
+ > GET_MODE_SIZE (maxmode)))
+ bestmode = get_best_mode (bitsize, bitnum,
+ align * BITS_PER_UNIT, maxmode,
+ MEM_VOLATILE_P (xop0));
+ else
+ bestmode = GET_MODE (xop0);
+
+ if (bestmode == VOIDmode
+ || (STRICT_ALIGNMENT && GET_MODE_SIZE (bestmode) > align))
+ goto extzv_loses;
+
+ /* Compute offset as multiple of this unit,
+ counting in bytes. */
+ unit = GET_MODE_BITSIZE (bestmode);
+ xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
+ xbitpos = bitnum % unit;
+ xop0 = change_address (xop0, bestmode,
+ plus_constant (XEXP (xop0, 0),
+ xoffset));
+ /* Fetch it to a register in that size. */
+ xop0 = force_reg (bestmode, xop0);
+
+ /* XBITPOS counts within UNIT, which is what is expected. */
+ }
+ else
+ /* Get ref to first byte containing part of the field. */
+ xop0 = change_address (xop0, byte_mode,
+ plus_constant (XEXP (xop0, 0), xoffset));
+
+ volatile_ok = save_volatile_ok;
+ }
+
+ /* If op0 is a register, we need it in MAXMODE (which is usually
+ SImode). to make it acceptable to the format of extzv. */
+ if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
+ abort ();
+ if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
+ xop0 = gen_rtx (SUBREG, maxmode, xop0, 0);
+
+ /* On big-endian machines, we count bits from the most significant.
+ If the bit field insn does not, we must invert. */
+#if BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN
+ xbitpos = unit - bitsize - xbitpos;
+#endif
+ /* Now convert from counting within UNIT to counting in MAXMODE. */
+#if BITS_BIG_ENDIAN
+ if (GET_CODE (xop0) != MEM)
+ xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
+#endif
+ unit = GET_MODE_BITSIZE (maxmode);
+
+ if (xtarget == 0
+ || (flag_force_mem && GET_CODE (xtarget) == MEM))
+ xtarget = xspec_target = gen_reg_rtx (tmode);
+
+ if (GET_MODE (xtarget) != maxmode)
+ {
+ if (GET_CODE (xtarget) == REG)
+ {
+ int wider = (GET_MODE_SIZE (maxmode)
+ > GET_MODE_SIZE (GET_MODE (xtarget)));
+ xtarget = gen_lowpart (maxmode, xtarget);
+ if (wider)
+ xspec_target_subreg = xtarget;
+ }
+ else
+ xtarget = gen_reg_rtx (maxmode);
+ }
+
+ /* If this machine's extzv insists on a register target,
+ make sure we have one. */
+ if (! ((*insn_operand_predicate[(int) CODE_FOR_extzv][0])
+ (xtarget, maxmode)))
+ xtarget = gen_reg_rtx (maxmode);
+
+ bitsize_rtx = GEN_INT (bitsize);
+ bitpos_rtx = GEN_INT (xbitpos);
+
+ pat = gen_extzv (protect_from_queue (xtarget, 1),
+ xop0, bitsize_rtx, bitpos_rtx);
+ if (pat)
+ {
+ emit_insn (pat);
+ target = xtarget;
+ spec_target = xspec_target;
+ spec_target_subreg = xspec_target_subreg;
+ }
+ else
+ {
+ delete_insns_since (last);
+ target = extract_fixed_bit_field (tmode, op0, offset, bitsize,
+ bitpos, target, 1, align);
+ }
+ }
+ else
+ extzv_loses:
+#endif
+ target = extract_fixed_bit_field (tmode, op0, offset, bitsize, bitpos,
+ target, 1, align);
+ }
+ else
+ {
+#ifdef HAVE_extv
+ if (HAVE_extv
+ && (GET_MODE_BITSIZE (insn_operand_mode[(int) CODE_FOR_extv][0])
+ >= bitsize))
+ {
+ int xbitpos = bitpos, xoffset = offset;
+ rtx bitsize_rtx, bitpos_rtx;
+ rtx last = get_last_insn();
+ rtx xop0 = op0, xtarget = target;
+ rtx xspec_target = spec_target;
+ rtx xspec_target_subreg = spec_target_subreg;
+ rtx pat;
+ enum machine_mode maxmode
+ = insn_operand_mode[(int) CODE_FOR_extv][0];
+
+ if (GET_CODE (xop0) == MEM)
+ {
+ /* Is the memory operand acceptable? */
+ if (! ((*insn_operand_predicate[(int) CODE_FOR_extv][1])
+ (xop0, GET_MODE (xop0))))
+ {
+ /* No, load into a reg and extract from there. */
+ enum machine_mode bestmode;
+
+ /* Get the mode to use for inserting into this field. If
+ OP0 is BLKmode, get the smallest mode consistent with the
+ alignment. If OP0 is a non-BLKmode object that is no
+ wider than MAXMODE, use its mode. Otherwise, use the
+ smallest mode containing the field. */
+
+ if (GET_MODE (xop0) == BLKmode
+ || (GET_MODE_SIZE (GET_MODE (op0))
+ > GET_MODE_SIZE (maxmode)))
+ bestmode = get_best_mode (bitsize, bitnum,
+ align * BITS_PER_UNIT, maxmode,
+ MEM_VOLATILE_P (xop0));
+ else
+ bestmode = GET_MODE (xop0);
+
+ if (bestmode == VOIDmode
+ || (STRICT_ALIGNMENT && GET_MODE_SIZE (bestmode) > align))
+ goto extv_loses;
+
+ /* Compute offset as multiple of this unit,
+ counting in bytes. */
+ unit = GET_MODE_BITSIZE (bestmode);
+ xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
+ xbitpos = bitnum % unit;
+ xop0 = change_address (xop0, bestmode,
+ plus_constant (XEXP (xop0, 0),
+ xoffset));
+ /* Fetch it to a register in that size. */
+ xop0 = force_reg (bestmode, xop0);
+
+ /* XBITPOS counts within UNIT, which is what is expected. */
+ }
+ else
+ /* Get ref to first byte containing part of the field. */
+ xop0 = change_address (xop0, byte_mode,
+ plus_constant (XEXP (xop0, 0), xoffset));
+ }
+
+ /* If op0 is a register, we need it in MAXMODE (which is usually
+ SImode) to make it acceptable to the format of extv. */
+ if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
+ abort ();
+ if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
+ xop0 = gen_rtx (SUBREG, maxmode, xop0, 0);
+
+ /* On big-endian machines, we count bits from the most significant.
+ If the bit field insn does not, we must invert. */
+#if BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN
+ xbitpos = unit - bitsize - xbitpos;
+#endif
+ /* XBITPOS counts within a size of UNIT.
+ Adjust to count within a size of MAXMODE. */
+#if BITS_BIG_ENDIAN
+ if (GET_CODE (xop0) != MEM)
+ xbitpos += (GET_MODE_BITSIZE (maxmode) - unit);
+#endif
+ unit = GET_MODE_BITSIZE (maxmode);
+
+ if (xtarget == 0
+ || (flag_force_mem && GET_CODE (xtarget) == MEM))
+ xtarget = xspec_target = gen_reg_rtx (tmode);
+
+ if (GET_MODE (xtarget) != maxmode)
+ {
+ if (GET_CODE (xtarget) == REG)
+ {
+ int wider = (GET_MODE_SIZE (maxmode)
+ > GET_MODE_SIZE (GET_MODE (xtarget)));
+ xtarget = gen_lowpart (maxmode, xtarget);
+ if (wider)
+ xspec_target_subreg = xtarget;
+ }
+ else
+ xtarget = gen_reg_rtx (maxmode);
+ }
+
+ /* If this machine's extv insists on a register target,
+ make sure we have one. */
+ if (! ((*insn_operand_predicate[(int) CODE_FOR_extv][0])
+ (xtarget, maxmode)))
+ xtarget = gen_reg_rtx (maxmode);
+
+ bitsize_rtx = GEN_INT (bitsize);
+ bitpos_rtx = GEN_INT (xbitpos);
+
+ pat = gen_extv (protect_from_queue (xtarget, 1),
+ xop0, bitsize_rtx, bitpos_rtx);
+ if (pat)
+ {
+ emit_insn (pat);
+ target = xtarget;
+ spec_target = xspec_target;
+ spec_target_subreg = xspec_target_subreg;
+ }
+ else
+ {
+ delete_insns_since (last);
+ target = extract_fixed_bit_field (tmode, op0, offset, bitsize,
+ bitpos, target, 0, align);
+ }
+ }
+ else
+ extv_loses:
+#endif
+ target = extract_fixed_bit_field (tmode, op0, offset, bitsize, bitpos,
+ target, 0, align);
+ }
+ if (target == spec_target)
+ return target;
+ if (target == spec_target_subreg)
+ return spec_target;
+ if (GET_MODE (target) != tmode && GET_MODE (target) != mode)
+ {
+ /* If the target mode is floating-point, first convert to the
+ integer mode of that size and then access it as a floating-point
+ value via a SUBREG. */
+ if (GET_MODE_CLASS (tmode) == MODE_FLOAT)
+ {
+ target = convert_to_mode (mode_for_size (GET_MODE_BITSIZE (tmode),
+ MODE_INT, 0),
+ target, unsignedp);
+ if (GET_CODE (target) != REG)
+ target = copy_to_reg (target);
+ return gen_rtx (SUBREG, tmode, target, 0);
+ }
+ else
+ return convert_to_mode (tmode, target, unsignedp);
+ }
+ return target;
+}
+
+/* Extract a bit field using shifts and boolean operations
+ Returns an rtx to represent the value.
+ OP0 addresses a register (word) or memory (byte).
+ BITPOS says which bit within the word or byte the bit field starts in.
+ OFFSET says how many bytes farther the bit field starts;
+ it is 0 if OP0 is a register.
+ BITSIZE says how many bits long the bit field is.
+ (If OP0 is a register, it may be narrower than a full word,
+ but BITPOS still counts within a full word,
+ which is significant on bigendian machines.)
+
+ UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
+ If TARGET is nonzero, attempts to store the value there
+ and return TARGET, but this is not guaranteed.
+ If TARGET is not used, create a pseudo-reg of mode TMODE for the value.
+
+ ALIGN is the alignment that STR_RTX is known to have, measured in bytes. */
+
+static rtx
+extract_fixed_bit_field (tmode, op0, offset, bitsize, bitpos,
+ target, unsignedp, align)
+ enum machine_mode tmode;
+ register rtx op0, target;
+ register int offset, bitsize, bitpos;
+ int unsignedp;
+ int align;
+{
+ int total_bits = BITS_PER_WORD;
+ enum machine_mode mode;
+
+ if (GET_CODE (op0) == SUBREG || GET_CODE (op0) == REG)
+ {
+ /* Special treatment for a bit field split across two registers. */
+ if (bitsize + bitpos > BITS_PER_WORD)
+ return extract_split_bit_field (op0, bitsize, bitpos,
+ unsignedp, align);
+ }
+ else
+ {
+ /* Get the proper mode to use for this field. We want a mode that
+ includes the entire field. If such a mode would be larger than
+ a word, we won't be doing the extraction the normal way. */
+
+ mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
+ align * BITS_PER_UNIT, word_mode,
+ GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0));
+
+ if (mode == VOIDmode)
+ /* The only way this should occur is if the field spans word
+ boundaries. */
+ return extract_split_bit_field (op0, bitsize,
+ bitpos + offset * BITS_PER_UNIT,
+ unsignedp, align);
+
+ total_bits = GET_MODE_BITSIZE (mode);
+
+ /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
+ be be in the range 0 to total_bits-1, and put any excess bytes in
+ OFFSET. */
+ if (bitpos >= total_bits)
+ {
+ offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
+ bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
+ * BITS_PER_UNIT);
+ }
+
+ /* Get ref to an aligned byte, halfword, or word containing the field.
+ Adjust BITPOS to be position within a word,
+ and OFFSET to be the offset of that word.
+ Then alter OP0 to refer to that word. */
+ bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
+ offset -= (offset % (total_bits / BITS_PER_UNIT));
+ op0 = change_address (op0, mode,
+ plus_constant (XEXP (op0, 0), offset));
+ }
+
+ mode = GET_MODE (op0);
+
+#if BYTES_BIG_ENDIAN
+ /* BITPOS is the distance between our msb and that of OP0.
+ Convert it to the distance from the lsb. */
+
+ bitpos = total_bits - bitsize - bitpos;
+#endif
+ /* Now BITPOS is always the distance between the field's lsb and that of OP0.
+ We have reduced the big-endian case to the little-endian case. */
+
+ if (unsignedp)
+ {
+ if (bitpos)
+ {
+ /* If the field does not already start at the lsb,
+ shift it so it does. */
+ tree amount = build_int_2 (bitpos, 0);
+ /* Maybe propagate the target for the shift. */
+ /* But not if we will return it--could confuse integrate.c. */
+ rtx subtarget = (target != 0 && GET_CODE (target) == REG
+ && !REG_FUNCTION_VALUE_P (target)
+ ? target : 0);
+ if (tmode != mode) subtarget = 0;
+ op0 = expand_shift (RSHIFT_EXPR, mode, op0, amount, subtarget, 1);
+ }
+ /* Convert the value to the desired mode. */
+ if (mode != tmode)
+ op0 = convert_to_mode (tmode, op0, 1);
+
+ /* Unless the msb of the field used to be the msb when we shifted,
+ mask out the upper bits. */
+
+ if (GET_MODE_BITSIZE (mode) != bitpos + bitsize
+#if 0
+#ifdef SLOW_ZERO_EXTEND
+ /* Always generate an `and' if
+ we just zero-extended op0 and SLOW_ZERO_EXTEND, since it
+ will combine fruitfully with the zero-extend. */
+ || tmode != mode
+#endif
+#endif
+ )
+ return expand_binop (GET_MODE (op0), and_optab, op0,
+ mask_rtx (GET_MODE (op0), 0, bitsize, 0),
+ target, 1, OPTAB_LIB_WIDEN);
+ return op0;
+ }
+
+ /* To extract a signed bit-field, first shift its msb to the msb of the word,
+ then arithmetic-shift its lsb to the lsb of the word. */
+ op0 = force_reg (mode, op0);
+ if (mode != tmode)
+ target = 0;
+
+ /* Find the narrowest integer mode that contains the field. */
+
+ for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
+ mode = GET_MODE_WIDER_MODE (mode))
+ if (GET_MODE_BITSIZE (mode) >= bitsize + bitpos)
+ {
+ op0 = convert_to_mode (mode, op0, 0);
+ break;
+ }
+
+ if (GET_MODE_BITSIZE (mode) != (bitsize + bitpos))
+ {
+ tree amount = build_int_2 (GET_MODE_BITSIZE (mode) - (bitsize + bitpos), 0);
+ /* Maybe propagate the target for the shift. */
+ /* But not if we will return the result--could confuse integrate.c. */
+ rtx subtarget = (target != 0 && GET_CODE (target) == REG
+ && ! REG_FUNCTION_VALUE_P (target)
+ ? target : 0);
+ op0 = expand_shift (LSHIFT_EXPR, mode, op0, amount, subtarget, 1);
+ }
+
+ return expand_shift (RSHIFT_EXPR, mode, op0,
+ build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
+ target, 0);
+}
+
+/* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
+ of mode MODE with BITSIZE ones followed by BITPOS zeros, or the
+ complement of that if COMPLEMENT. The mask is truncated if
+ necessary to the width of mode MODE. */
+
+static rtx
+mask_rtx (mode, bitpos, bitsize, complement)
+ enum machine_mode mode;
+ int bitpos, bitsize, complement;
+{
+ HOST_WIDE_INT masklow, maskhigh;
+
+ if (bitpos < HOST_BITS_PER_WIDE_INT)
+ masklow = (HOST_WIDE_INT) -1 << bitpos;
+ else
+ masklow = 0;
+
+ if (bitpos + bitsize < HOST_BITS_PER_WIDE_INT)
+ masklow &= ((unsigned HOST_WIDE_INT) -1
+ >> (HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
+
+ if (bitpos <= HOST_BITS_PER_WIDE_INT)
+ maskhigh = -1;
+ else
+ maskhigh = (HOST_WIDE_INT) -1 << (bitpos - HOST_BITS_PER_WIDE_INT);
+
+ if (bitpos + bitsize > HOST_BITS_PER_WIDE_INT)
+ maskhigh &= ((unsigned HOST_WIDE_INT) -1
+ >> (2 * HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
+ else
+ maskhigh = 0;
+
+ if (complement)
+ {
+ maskhigh = ~maskhigh;
+ masklow = ~masklow;
+ }
+
+ return immed_double_const (masklow, maskhigh, mode);
+}
+
+/* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
+ VALUE truncated to BITSIZE bits and then shifted left BITPOS bits. */
+
+static rtx
+lshift_value (mode, value, bitpos, bitsize)
+ enum machine_mode mode;
+ rtx value;
+ int bitpos, bitsize;
+{
+ unsigned HOST_WIDE_INT v = INTVAL (value);
+ HOST_WIDE_INT low, high;
+
+ if (bitsize < HOST_BITS_PER_WIDE_INT)
+ v &= ~((HOST_WIDE_INT) -1 << bitsize);
+
+ if (bitpos < HOST_BITS_PER_WIDE_INT)
+ {
+ low = v << bitpos;
+ high = (bitpos > 0 ? (v >> (HOST_BITS_PER_WIDE_INT - bitpos)) : 0);
+ }
+ else
+ {
+ low = 0;
+ high = v << (bitpos - HOST_BITS_PER_WIDE_INT);
+ }
+
+ return immed_double_const (low, high, mode);
+}
+
+/* Extract a bit field that is split across two words
+ and return an RTX for the result.
+
+ OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
+ BITSIZE is the field width; BITPOS, position of its first bit, in the word.
+ UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend.
+
+ ALIGN is the known alignment of OP0, measured in bytes.
+ This is also the size of the memory objects to be used. */
+
+static rtx
+extract_split_bit_field (op0, bitsize, bitpos, unsignedp, align)
+ rtx op0;
+ int bitsize, bitpos, unsignedp, align;
+{
+ int unit;
+ int bitsdone = 0;
+ rtx result;
+ int first = 1;
+
+ /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
+ much at a time. */
+ if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
+ unit = BITS_PER_WORD;
+ else
+ unit = MIN (align * BITS_PER_UNIT, BITS_PER_WORD);
+
+ while (bitsdone < bitsize)
+ {
+ int thissize;
+ rtx part, word;
+ int thispos;
+ int offset;
+
+ offset = (bitpos + bitsdone) / unit;
+ thispos = (bitpos + bitsdone) % unit;
+
+ /* THISSIZE must not overrun a word boundary. Otherwise,
+ extract_fixed_bit_field will call us again, and we will mutually
+ recurse forever. */
+ thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
+ thissize = MIN (thissize, unit - thispos);
+
+ /* If OP0 is a register, then handle OFFSET here.
+
+ When handling multiword bitfields, extract_bit_field may pass
+ down a word_mode SUBREG of a larger REG for a bitfield that actually
+ crosses a word boundary. Thus, for a SUBREG, we must find
+ the current word starting from the base register. */
+ if (GET_CODE (op0) == SUBREG)
+ {
+ word = operand_subword_force (SUBREG_REG (op0),
+ SUBREG_WORD (op0) + offset,
+ GET_MODE (SUBREG_REG (op0)));
+ offset = 0;
+ }
+ else if (GET_CODE (op0) == REG)
+ {
+ word = operand_subword_force (op0, offset, GET_MODE (op0));
+ offset = 0;
+ }
+ else
+ word = op0;
+
+ /* Extract the parts in bit-counting order,
+ whose meaning is determined by BYTES_PER_UNIT.
+ OFFSET is in UNITs, and UNIT is in bits.
+ extract_fixed_bit_field wants offset in bytes. */
+ part = extract_fixed_bit_field (word_mode, word,
+ offset * unit / BITS_PER_UNIT,
+ thissize, thispos, 0, 1, align);
+ bitsdone += thissize;
+
+ /* Shift this part into place for the result. */
+#if BYTES_BIG_ENDIAN
+ if (bitsize != bitsdone)
+ part = expand_shift (LSHIFT_EXPR, word_mode, part,
+ build_int_2 (bitsize - bitsdone, 0), 0, 1);
+#else
+ if (bitsdone != thissize)
+ part = expand_shift (LSHIFT_EXPR, word_mode, part,
+ build_int_2 (bitsdone - thissize, 0), 0, 1);
+#endif
+
+ if (first)
+ result = part;
+ else
+ /* Combine the parts with bitwise or. This works
+ because we extracted each part as an unsigned bit field. */
+ result = expand_binop (word_mode, ior_optab, part, result, NULL_RTX, 1,
+ OPTAB_LIB_WIDEN);
+
+ first = 0;
+ }
+
+ /* Unsigned bit field: we are done. */
+ if (unsignedp)
+ return result;
+ /* Signed bit field: sign-extend with two arithmetic shifts. */
+ result = expand_shift (LSHIFT_EXPR, word_mode, result,
+ build_int_2 (BITS_PER_WORD - bitsize, 0),
+ NULL_RTX, 0);
+ return expand_shift (RSHIFT_EXPR, word_mode, result,
+ build_int_2 (BITS_PER_WORD - bitsize, 0), NULL_RTX, 0);
+}
+
+/* Add INC into TARGET. */
+
+void
+expand_inc (target, inc)
+ rtx target, inc;
+{
+ rtx value = expand_binop (GET_MODE (target), add_optab,
+ target, inc,
+ target, 0, OPTAB_LIB_WIDEN);
+ if (value != target)
+ emit_move_insn (target, value);
+}
+
+/* Subtract DEC from TARGET. */
+
+void
+expand_dec (target, dec)
+ rtx target, dec;
+{
+ rtx value = expand_binop (GET_MODE (target), sub_optab,
+ target, dec,
+ target, 0, OPTAB_LIB_WIDEN);
+ if (value != target)
+ emit_move_insn (target, value);
+}
+
+/* Output a shift instruction for expression code CODE,
+ with SHIFTED being the rtx for the value to shift,
+ and AMOUNT the tree for the amount to shift by.
+ Store the result in the rtx TARGET, if that is convenient.
+ If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
+ Return the rtx for where the value is. */
+
+rtx
+expand_shift (code, mode, shifted, amount, target, unsignedp)
+ enum tree_code code;
+ register enum machine_mode mode;
+ rtx shifted;
+ tree amount;
+ register rtx target;
+ int unsignedp;
+{
+ register rtx op1, temp = 0;
+ register int left = (code == LSHIFT_EXPR || code == LROTATE_EXPR);
+ register int rotate = (code == LROTATE_EXPR || code == RROTATE_EXPR);
+ int try;
+
+ /* Previously detected shift-counts computed by NEGATE_EXPR
+ and shifted in the other direction; but that does not work
+ on all machines. */
+
+ op1 = expand_expr (amount, NULL_RTX, VOIDmode, 0);
+
+#if 0 && SHIFT_COUNT_TRUNCATED
+ if (SHIFT_COUNT_TRUNCATED
+ && GET_CODE (op1) == CONST_INT
+ && (unsigned HOST_WIDE_INT) INTVAL (op1) >= GET_MODE_BITSIZE (mode))
+ op1 = GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (op1)
+ % GET_MODE_BITSIZE (mode));
+#endif
+
+ if (op1 == const0_rtx)
+ return shifted;
+
+ for (try = 0; temp == 0 && try < 3; try++)
+ {
+ enum optab_methods methods;
+
+ if (try == 0)
+ methods = OPTAB_DIRECT;
+ else if (try == 1)
+ methods = OPTAB_WIDEN;
+ else
+ methods = OPTAB_LIB_WIDEN;
+
+ if (rotate)
+ {
+ /* Widening does not work for rotation. */
+ if (methods == OPTAB_WIDEN)
+ continue;
+ else if (methods == OPTAB_LIB_WIDEN)
+ {
+ /* If we are rotating by a constant that is valid and
+ we have been unable to open-code this by a rotation,
+ do it as the IOR of two shifts. I.e., to rotate A
+ by N bits, compute (A << N) | ((unsigned) A >> (C - N))
+ where C is the bitsize of A.
+
+ It is theoretically possible that the target machine might
+ not be able to perform either shift and hence we would
+ be making two libcalls rather than just the one for the
+ shift (similarly if IOR could not be done). We will allow
+ this extremely unlikely lossage to avoid complicating the
+ code below. */
+
+ if (GET_CODE (op1) == CONST_INT && INTVAL (op1) > 0
+ && INTVAL (op1) < GET_MODE_BITSIZE (mode))
+ {
+ rtx subtarget = target == shifted ? 0 : target;
+ rtx temp1;
+ tree other_amount
+ = build_int_2 (GET_MODE_BITSIZE (mode) - INTVAL (op1), 0);
+
+ shifted = force_reg (mode, shifted);
+
+ temp = expand_shift (left ? LSHIFT_EXPR : RSHIFT_EXPR,
+ mode, shifted, amount, subtarget, 1);
+ temp1 = expand_shift (left ? RSHIFT_EXPR : LSHIFT_EXPR,
+ mode, shifted, other_amount, 0, 1);
+ return expand_binop (mode, ior_optab, temp, temp1, target,
+ unsignedp, methods);
+ }
+ else
+ methods = OPTAB_LIB;
+ }
+
+ temp = expand_binop (mode,
+ left ? rotl_optab : rotr_optab,
+ shifted, op1, target, unsignedp, methods);
+
+ /* If we don't have the rotate, but we are rotating by a constant
+ that is in range, try a rotate in the opposite direction. */
+
+ if (temp == 0 && GET_CODE (op1) == CONST_INT
+ && INTVAL (op1) > 0 && INTVAL (op1) < GET_MODE_BITSIZE (mode))
+ temp = expand_binop (mode,
+ left ? rotr_optab : rotl_optab,
+ shifted,
+ GEN_INT (GET_MODE_BITSIZE (mode)
+ - INTVAL (op1)),
+ target, unsignedp, methods);
+ }
+ else if (unsignedp)
+ temp = expand_binop (mode,
+ left ? ashl_optab : lshr_optab,
+ shifted, op1, target, unsignedp, methods);
+
+ /* Do arithmetic shifts.
+ Also, if we are going to widen the operand, we can just as well
+ use an arithmetic right-shift instead of a logical one. */
+ if (temp == 0 && ! rotate
+ && (! unsignedp || (! left && methods == OPTAB_WIDEN)))
+ {
+ enum optab_methods methods1 = methods;
+
+ /* If trying to widen a log shift to an arithmetic shift,
+ don't accept an arithmetic shift of the same size. */
+ if (unsignedp)
+ methods1 = OPTAB_MUST_WIDEN;
+
+ /* Arithmetic shift */
+
+ temp = expand_binop (mode,
+ left ? ashl_optab : ashr_optab,
+ shifted, op1, target, unsignedp, methods1);
+ }
+
+ /* We used to try extzv here for logical right shifts, but that was
+ only useful for one machine, the VAX, and caused poor code
+ generation there for lshrdi3, so the code was deleted and a
+ define_expand for lshrsi3 was added to vax.md. */
+ }
+
+ if (temp == 0)
+ abort ();
+ return temp;
+}
+
+enum alg_code { alg_zero, alg_m, alg_shift,
+ alg_add_t_m2, alg_sub_t_m2,
+ alg_add_factor, alg_sub_factor,
+ alg_add_t2_m, alg_sub_t2_m,
+ alg_add, alg_subtract, alg_factor, alg_shiftop };
+
+/* This structure records a sequence of operations.
+ `ops' is the number of operations recorded.
+ `cost' is their total cost.
+ The operations are stored in `op' and the corresponding
+ logarithms of the integer coefficients in `log'.
+
+ These are the operations:
+ alg_zero total := 0;
+ alg_m total := multiplicand;
+ alg_shift total := total * coeff
+ alg_add_t_m2 total := total + multiplicand * coeff;
+ alg_sub_t_m2 total := total - multiplicand * coeff;
+ alg_add_factor total := total * coeff + total;
+ alg_sub_factor total := total * coeff - total;
+ alg_add_t2_m total := total * coeff + multiplicand;
+ alg_sub_t2_m total := total * coeff - multiplicand;
+
+ The first operand must be either alg_zero or alg_m. */
+
+struct algorithm
+{
+ short cost;
+ short ops;
+ /* The size of the OP and LOG fields are not directly related to the
+ word size, but the worst-case algorithms will be if we have few
+ consecutive ones or zeros, i.e., a multiplicand like 10101010101...
+ In that case we will generate shift-by-2, add, shift-by-2, add,...,
+ in total wordsize operations. */
+ enum alg_code op[MAX_BITS_PER_WORD];
+ char log[MAX_BITS_PER_WORD];
+};
+
+/* Compute and return the best algorithm for multiplying by T.
+ The algorithm must cost less than cost_limit
+ If retval.cost >= COST_LIMIT, no algorithm was found and all
+ other field of the returned struct are undefined. */
+
+static void
+synth_mult (alg_out, t, cost_limit)
+ struct algorithm *alg_out;
+ unsigned HOST_WIDE_INT t;
+ int cost_limit;
+{
+ int m;
+ struct algorithm *alg_in, *best_alg;
+ unsigned int cost;
+ unsigned HOST_WIDE_INT q;
+
+ /* Indicate that no algorithm is yet found. If no algorithm
+ is found, this value will be returned and indicate failure. */
+ alg_out->cost = cost_limit;
+
+ if (cost_limit <= 0)
+ return;
+
+ /* t == 1 can be done in zero cost. */
+ if (t == 1)
+ {
+ alg_out->ops = 1;
+ alg_out->cost = 0;
+ alg_out->op[0] = alg_m;
+ return;
+ }
+
+ /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
+ fail now. */
+ if (t == 0)
+ {
+ if (zero_cost >= cost_limit)
+ return;
+ else
+ {
+ alg_out->ops = 1;
+ alg_out->cost = zero_cost;
+ alg_out->op[0] = alg_zero;
+ return;
+ }
+ }
+
+ /* We'll be needing a couple extra algorithm structures now. */
+
+ alg_in = (struct algorithm *)alloca (sizeof (struct algorithm));
+ best_alg = (struct algorithm *)alloca (sizeof (struct algorithm));
+
+ /* If we have a group of zero bits at the low-order part of T, try
+ multiplying by the remaining bits and then doing a shift. */
+
+ if ((t & 1) == 0)
+ {
+ m = floor_log2 (t & -t); /* m = number of low zero bits */
+ q = t >> m;
+ cost = shift_cost[m];
+ synth_mult (alg_in, q, cost_limit - cost);
+
+ cost += alg_in->cost;
+ if (cost < cost_limit)
+ {
+ struct algorithm *x;
+ x = alg_in, alg_in = best_alg, best_alg = x;
+ best_alg->log[best_alg->ops] = m;
+ best_alg->op[best_alg->ops] = alg_shift;
+ cost_limit = cost;
+ }
+ }
+
+ /* If we have an odd number, add or subtract one. */
+ if ((t & 1) != 0)
+ {
+ unsigned HOST_WIDE_INT w;
+
+ for (w = 1; (w & t) != 0; w <<= 1)
+ ;
+ if (w > 2
+ /* Reject the case where t is 3.
+ Thus we prefer addition in that case. */
+ && t != 3)
+ {
+ /* T ends with ...111. Multiply by (T + 1) and subtract 1. */
+
+ cost = add_cost;
+ synth_mult (alg_in, t + 1, cost_limit - cost);
+
+ cost += alg_in->cost;
+ if (cost < cost_limit)
+ {
+ struct algorithm *x;
+ x = alg_in, alg_in = best_alg, best_alg = x;
+ best_alg->log[best_alg->ops] = 0;
+ best_alg->op[best_alg->ops] = alg_sub_t_m2;
+ cost_limit = cost;
+ }
+ }
+ else
+ {
+ /* T ends with ...01 or ...011. Multiply by (T - 1) and add 1. */
+
+ cost = add_cost;
+ synth_mult (alg_in, t - 1, cost_limit - cost);
+
+ cost += alg_in->cost;
+ if (cost < cost_limit)
+ {
+ struct algorithm *x;
+ x = alg_in, alg_in = best_alg, best_alg = x;
+ best_alg->log[best_alg->ops] = 0;
+ best_alg->op[best_alg->ops] = alg_add_t_m2;
+ cost_limit = cost;
+ }
+ }
+ }
+
+ /* Look for factors of t of the form
+ t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
+ If we find such a factor, we can multiply by t using an algorithm that
+ multiplies by q, shift the result by m and add/subtract it to itself.
+
+ We search for large factors first and loop down, even if large factors
+ are less probable than small; if we find a large factor we will find a
+ good sequence quickly, and therefore be able to prune (by decreasing
+ COST_LIMIT) the search. */
+
+ for (m = floor_log2 (t - 1); m >= 2; m--)
+ {
+ unsigned HOST_WIDE_INT d;
+
+ d = ((unsigned HOST_WIDE_INT) 1 << m) + 1;
+ if (t % d == 0 && t > d)
+ {
+ cost = MIN (shiftadd_cost[m], add_cost + shift_cost[m]);
+ synth_mult (alg_in, t / d, cost_limit - cost);
+
+ cost += alg_in->cost;
+ if (cost < cost_limit)
+ {
+ struct algorithm *x;
+ x = alg_in, alg_in = best_alg, best_alg = x;
+ best_alg->log[best_alg->ops] = m;
+ best_alg->op[best_alg->ops] = alg_add_factor;
+ cost_limit = cost;
+ }
+ /* Other factors will have been taken care of in the recursion. */
+ break;
+ }
+
+ d = ((unsigned HOST_WIDE_INT) 1 << m) - 1;
+ if (t % d == 0 && t > d)
+ {
+ cost = MIN (shiftsub_cost[m], add_cost + shift_cost[m]);
+ synth_mult (alg_in, t / d, cost_limit - cost);
+
+ cost += alg_in->cost;
+ if (cost < cost_limit)
+ {
+ struct algorithm *x;
+ x = alg_in, alg_in = best_alg, best_alg = x;
+ best_alg->log[best_alg->ops] = m;
+ best_alg->op[best_alg->ops] = alg_sub_factor;
+ cost_limit = cost;
+ }
+ break;
+ }
+ }
+
+ /* Try shift-and-add (load effective address) instructions,
+ i.e. do a*3, a*5, a*9. */
+ if ((t & 1) != 0)
+ {
+ q = t - 1;
+ q = q & -q;
+ m = exact_log2 (q);
+ if (m >= 0)
+ {
+ cost = shiftadd_cost[m];
+ synth_mult (alg_in, (t - 1) >> m, cost_limit - cost);
+
+ cost += alg_in->cost;
+ if (cost < cost_limit)
+ {
+ struct algorithm *x;
+ x = alg_in, alg_in = best_alg, best_alg = x;
+ best_alg->log[best_alg->ops] = m;
+ best_alg->op[best_alg->ops] = alg_add_t2_m;
+ cost_limit = cost;
+ }
+ }
+
+ q = t + 1;
+ q = q & -q;
+ m = exact_log2 (q);
+ if (m >= 0)
+ {
+ cost = shiftsub_cost[m];
+ synth_mult (alg_in, (t + 1) >> m, cost_limit - cost);
+
+ cost += alg_in->cost;
+ if (cost < cost_limit)
+ {
+ struct algorithm *x;
+ x = alg_in, alg_in = best_alg, best_alg = x;
+ best_alg->log[best_alg->ops] = m;
+ best_alg->op[best_alg->ops] = alg_sub_t2_m;
+ cost_limit = cost;
+ }
+ }
+ }
+
+ /* If cost_limit has not decreased since we stored it in alg_out->cost,
+ we have not found any algorithm. */
+ if (cost_limit == alg_out->cost)
+ return;
+
+ /* If we are getting a too long sequence for `struct algorithm'
+ to record, make this search fail. */
+ if (best_alg->ops == MAX_BITS_PER_WORD)
+ return;
+
+ /* Copy the algorithm from temporary space to the space at alg_out.
+ We avoid using structure assignment because the majority of
+ best_alg is normally undefined, and this is a critical function. */
+ alg_out->ops = best_alg->ops + 1;
+ alg_out->cost = cost_limit;
+ bcopy ((char *) best_alg->op, (char *) alg_out->op,
+ alg_out->ops * sizeof *alg_out->op);
+ bcopy ((char *) best_alg->log, (char *) alg_out->log,
+ alg_out->ops * sizeof *alg_out->log);
+}
+
+/* Perform a multiplication and return an rtx for the result.
+ MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
+ TARGET is a suggestion for where to store the result (an rtx).
+
+ We check specially for a constant integer as OP1.
+ If you want this check for OP0 as well, then before calling
+ you should swap the two operands if OP0 would be constant. */
+
+rtx
+expand_mult (mode, op0, op1, target, unsignedp)
+ enum machine_mode mode;
+ register rtx op0, op1, target;
+ int unsignedp;
+{
+ rtx const_op1 = op1;
+
+ /* If we are multiplying in DImode, it may still be a win
+ to try to work with shifts and adds. */
+ if (GET_CODE (op1) == CONST_DOUBLE
+ && GET_MODE_CLASS (GET_MODE (op1)) == MODE_INT
+ && HOST_BITS_PER_INT <= BITS_PER_WORD)
+ {
+ if ((CONST_DOUBLE_HIGH (op1) == 0 && CONST_DOUBLE_LOW (op1) >= 0)
+ || (CONST_DOUBLE_HIGH (op1) == -1 && CONST_DOUBLE_LOW (op1) < 0))
+ const_op1 = GEN_INT (CONST_DOUBLE_LOW (op1));
+ }
+
+ /* We used to test optimize here, on the grounds that it's better to
+ produce a smaller program when -O is not used.
+ But this causes such a terrible slowdown sometimes
+ that it seems better to use synth_mult always. */
+
+ if (GET_CODE (const_op1) == CONST_INT)
+ {
+ struct algorithm alg;
+ struct algorithm alg2;
+ HOST_WIDE_INT val = INTVAL (op1);
+ HOST_WIDE_INT val_so_far;
+ rtx insn;
+ int mult_cost;
+ enum {basic_variant, negate_variant, add_variant} variant = basic_variant;
+
+ /* Try to do the computation three ways: multiply by the negative of OP1
+ and then negate, do the multiplication directly, or do multiplication
+ by OP1 - 1. */
+
+ mult_cost = rtx_cost (gen_rtx (MULT, mode, op0, op1), SET);
+ mult_cost = MIN (12 * add_cost, mult_cost);
+
+ synth_mult (&alg, val, mult_cost);
+ synth_mult (&alg2, - val,
+ (alg.cost < mult_cost ? alg.cost : mult_cost) - negate_cost);
+ if (alg2.cost + negate_cost < alg.cost)
+ alg = alg2, variant = negate_variant;
+
+ /* This proves very useful for division-by-constant. */
+ synth_mult (&alg2, val - 1, (alg.cost < mult_cost ? alg.cost : mult_cost) - add_cost);
+ if (alg2.cost + add_cost < alg.cost)
+ alg = alg2, variant = add_variant;
+
+ if (alg.cost < mult_cost)
+ {
+ /* We found something cheaper than a multiply insn. */
+ int opno;
+ rtx accum, tem;
+
+ op0 = protect_from_queue (op0, 0);
+
+ /* Avoid referencing memory over and over.
+ For speed, but also for correctness when mem is volatile. */
+ if (GET_CODE (op0) == MEM)
+ op0 = force_reg (mode, op0);
+
+ /* ACCUM starts out either as OP0 or as a zero, depending on
+ the first operation. */
+
+ if (alg.op[0] == alg_zero)
+ {
+ accum = copy_to_mode_reg (mode, const0_rtx);
+ val_so_far = 0;
+ }
+ else if (alg.op[0] == alg_m)
+ {
+ accum = copy_to_mode_reg (mode, op0);
+ val_so_far = 1;
+ }
+ else
+ abort ();
+
+ for (opno = 1; opno < alg.ops; opno++)
+ {
+ int log = alg.log[opno];
+ int preserve = preserve_subexpressions_p ();
+ rtx shift_subtarget = preserve ? 0 : accum;
+ rtx add_target = opno == alg.ops - 1 && target != 0 ? target : 0;
+ rtx accum_target = preserve ? 0 : accum;
+
+ switch (alg.op[opno])
+ {
+ case alg_shift:
+ accum = expand_shift (LSHIFT_EXPR, mode, accum,
+ build_int_2 (log, 0), NULL_RTX, 0);
+ val_so_far <<= log;
+ break;
+
+ case alg_add_t_m2:
+ tem = expand_shift (LSHIFT_EXPR, mode, op0,
+ build_int_2 (log, 0), NULL_RTX, 0);
+ accum = force_operand (gen_rtx (PLUS, mode, accum, tem),
+ add_target ? add_target : accum_target);
+ val_so_far += (HOST_WIDE_INT) 1 << log;
+ break;
+
+ case alg_sub_t_m2:
+ tem = expand_shift (LSHIFT_EXPR, mode, op0,
+ build_int_2 (log, 0), NULL_RTX, 0);
+ accum = force_operand (gen_rtx (MINUS, mode, accum, tem),
+ add_target ? add_target : accum_target);
+ val_so_far -= (HOST_WIDE_INT) 1 << log;
+ break;
+
+ case alg_add_t2_m:
+ accum = expand_shift (LSHIFT_EXPR, mode, accum,
+ build_int_2 (log, 0), shift_subtarget,
+ 0);
+ accum = force_operand (gen_rtx (PLUS, mode, accum, op0),
+ add_target ? add_target : accum_target);
+ val_so_far = (val_so_far << log) + 1;
+ break;
+
+ case alg_sub_t2_m:
+ accum = expand_shift (LSHIFT_EXPR, mode, accum,
+ build_int_2 (log, 0), shift_subtarget,
+ 0);
+ accum = force_operand (gen_rtx (MINUS, mode, accum, op0),
+ add_target ? add_target : accum_target);
+ val_so_far = (val_so_far << log) - 1;
+ break;
+
+ case alg_add_factor:
+ tem = expand_shift (LSHIFT_EXPR, mode, accum,
+ build_int_2 (log, 0), NULL_RTX, 0);
+ accum = force_operand (gen_rtx (PLUS, mode, accum, tem),
+ add_target ? add_target : accum_target);
+ val_so_far += val_so_far << log;
+ break;
+
+ case alg_sub_factor:
+ tem = expand_shift (LSHIFT_EXPR, mode, accum,
+ build_int_2 (log, 0), NULL_RTX, 0);
+ accum = force_operand (gen_rtx (MINUS, mode, tem, accum),
+ (add_target ? add_target
+ : preserve ? 0 : tem));
+ val_so_far = (val_so_far << log) - val_so_far;
+ break;
+
+ default:
+ abort ();;
+ }
+
+ /* Write a REG_EQUAL note on the last insn so that we can cse
+ multiplication sequences. */
+
+ insn = get_last_insn ();
+ REG_NOTES (insn)
+ = gen_rtx (EXPR_LIST, REG_EQUAL,
+ gen_rtx (MULT, mode, op0, GEN_INT (val_so_far)),
+ REG_NOTES (insn));
+ }
+
+ if (variant == negate_variant)
+ {
+ val_so_far = - val_so_far;
+ accum = expand_unop (mode, neg_optab, accum, target, 0);
+ }
+ else if (variant == add_variant)
+ {
+ val_so_far = val_so_far + 1;
+ accum = force_operand (gen_rtx (PLUS, mode, accum, op0), target);
+ }
+
+ if (val != val_so_far)
+ abort ();
+
+ return accum;
+ }
+ }
+
+ /* This used to use umul_optab if unsigned, but for non-widening multiply
+ there is no difference between signed and unsigned. */
+ op0 = expand_binop (mode, smul_optab,
+ op0, op1, target, unsignedp, OPTAB_LIB_WIDEN);
+ if (op0 == 0)
+ abort ();
+ return op0;
+}
+
+/* Return the smallest n such that 2**n >= X. */
+
+int
+ceil_log2 (x)
+ unsigned HOST_WIDE_INT x;
+{
+ return floor_log2 (x - 1) + 1;
+}
+
+/* Choose a minimal N + 1 bit approximation to 1/D that can be used to
+ replace division by D, and put the least significant N bits of the result
+ in *MULTIPLIER_PTR and return the most significant bit.
+
+ The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
+ needed precision is in PRECISION (should be <= N).
+
+ PRECISION should be as small as possible so this function can choose
+ multiplier more freely.
+
+ The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
+ is to be used for a final right shift is placed in *POST_SHIFT_PTR.
+
+ Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
+ where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
+
+static
+unsigned HOST_WIDE_INT
+choose_multiplier (d, n, precision, multiplier_ptr, post_shift_ptr, lgup_ptr)
+ unsigned HOST_WIDE_INT d;
+ int n;
+ int precision;
+ unsigned HOST_WIDE_INT *multiplier_ptr;
+ int *post_shift_ptr;
+ int *lgup_ptr;
+{
+ unsigned HOST_WIDE_INT mhigh_hi, mhigh_lo;
+ unsigned HOST_WIDE_INT mlow_hi, mlow_lo;
+ int lgup, post_shift;
+ int pow, pow2;
+ unsigned HOST_WIDE_INT nh, nl, dummy1, dummy2;
+
+ /* lgup = ceil(log2(divisor)); */
+ lgup = ceil_log2 (d);
+
+ if (lgup > n)
+ abort ();
+
+ pow = n + lgup;
+ pow2 = n + lgup - precision;
+
+ if (pow == 2 * HOST_BITS_PER_WIDE_INT)
+ {
+ /* We could handle this with some effort, but this case is much better
+ handled directly with a scc insn, so rely on caller using that. */
+ abort ();
+ }
+
+ /* mlow = 2^(N + lgup)/d */
+ if (pow >= HOST_BITS_PER_WIDE_INT)
+ {
+ nh = (unsigned HOST_WIDE_INT) 1 << (pow - HOST_BITS_PER_WIDE_INT);
+ nl = 0;
+ }
+ else
+ {
+ nh = 0;
+ nl = (unsigned HOST_WIDE_INT) 1 << pow;
+ }
+ div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
+ &mlow_lo, &mlow_hi, &dummy1, &dummy2);
+
+ /* mhigh = (2^(N + lgup) + 2^N + lgup - precision)/d */
+ if (pow2 >= HOST_BITS_PER_WIDE_INT)
+ nh |= (unsigned HOST_WIDE_INT) 1 << (pow2 - HOST_BITS_PER_WIDE_INT);
+ else
+ nl |= (unsigned HOST_WIDE_INT) 1 << pow2;
+ div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
+ &mhigh_lo, &mhigh_hi, &dummy1, &dummy2);
+
+ if (mhigh_hi && nh - d >= d)
+ abort ();
+ if (mhigh_hi > 1 || mlow_hi > 1)
+ abort ();
+ /* assert that mlow < mhigh. */
+ if (! (mlow_hi < mhigh_hi || (mlow_hi == mhigh_hi && mlow_lo < mhigh_lo)))
+ abort();
+
+ /* If precision == N, then mlow, mhigh exceed 2^N
+ (but they do not exceed 2^(N+1)). */
+
+ /* Reduce to lowest terms */
+ for (post_shift = lgup; post_shift > 0; post_shift--)
+ {
+ unsigned HOST_WIDE_INT ml_lo = (mlow_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mlow_lo >> 1);
+ unsigned HOST_WIDE_INT mh_lo = (mhigh_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mhigh_lo >> 1);
+ if (ml_lo >= mh_lo)
+ break;
+
+ mlow_hi = 0;
+ mlow_lo = ml_lo;
+ mhigh_hi = 0;
+ mhigh_lo = mh_lo;
+ }
+
+ *post_shift_ptr = post_shift;
+ *lgup_ptr = lgup;
+ if (n < HOST_BITS_PER_WIDE_INT)
+ {
+ unsigned HOST_WIDE_INT mask = ((unsigned HOST_WIDE_INT) 1 << n) - 1;
+ *multiplier_ptr = mhigh_lo & mask;
+ return mhigh_lo >= mask;
+ }
+ else
+ {
+ *multiplier_ptr = mhigh_lo;
+ return mhigh_hi;
+ }
+}
+
+/* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
+ congruent to 1 (mod 2**N). */
+
+static unsigned HOST_WIDE_INT
+invert_mod2n (x, n)
+ unsigned HOST_WIDE_INT x;
+ int n;
+{
+ /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
+
+ /* The algorithm notes that the choice y = x satisfies
+ x*y == 1 mod 2^3, since x is assumed odd.
+ Each iteration doubles the number of bits of significance in y. */
+
+ unsigned HOST_WIDE_INT mask;
+ unsigned HOST_WIDE_INT y = x;
+ int nbit = 3;
+
+ mask = (n == HOST_BITS_PER_WIDE_INT
+ ? ~(unsigned HOST_WIDE_INT) 0
+ : ((unsigned HOST_WIDE_INT) 1 << n) - 1);
+
+ while (nbit < n)
+ {
+ y = y * (2 - x*y) & mask; /* Modulo 2^N */
+ nbit *= 2;
+ }
+ return y;
+}
+
+/* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
+ flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
+ product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
+ to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
+ become signed.
+
+ The result is put in TARGET if that is convenient.
+
+ MODE is the mode of operation. */
+
+rtx
+expand_mult_highpart_adjust (mode, adj_operand, op0, op1, target, unsignedp)
+ enum machine_mode mode;
+ register rtx adj_operand, op0, op1, target;
+ int unsignedp;
+{
+ rtx tem;
+ enum rtx_code adj_code = unsignedp ? PLUS : MINUS;
+
+ tem = expand_shift (RSHIFT_EXPR, mode, op0,
+ build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0),
+ NULL_RTX, 0);
+ tem = expand_and (tem, op1, NULL_RTX);
+ adj_operand = force_operand (gen_rtx (adj_code, mode, adj_operand, tem),
+ adj_operand);
+
+ tem = expand_shift (RSHIFT_EXPR, mode, op1,
+ build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0),
+ NULL_RTX, 0);
+ tem = expand_and (tem, op0, NULL_RTX);
+ target = force_operand (gen_rtx (adj_code, mode, adj_operand, tem), target);
+
+ return target;
+}
+
+/* Emit code to multiply OP0 and CNST1, putting the high half of the result
+ in TARGET if that is convenient, and return where the result is. If the
+ operation can not be performed, 0 is returned.
+
+ MODE is the mode of operation and result.
+
+ UNSIGNEDP nonzero means unsigned multiply. */
+
+rtx
+expand_mult_highpart (mode, op0, cnst1, target, unsignedp)
+ enum machine_mode mode;
+ register rtx op0, target;
+ unsigned HOST_WIDE_INT cnst1;
+ int unsignedp;
+{
+ enum machine_mode wider_mode = GET_MODE_WIDER_MODE (mode);
+ optab mul_highpart_optab;
+ optab moptab;
+ rtx tem;
+ int size = GET_MODE_BITSIZE (mode);
+ rtx op1, wide_op1;
+
+ /* We can't support modes wider than HOST_BITS_PER_INT. */
+ if (size > HOST_BITS_PER_WIDE_INT)
+ abort ();
+
+ op1 = GEN_INT (cnst1);
+
+ if (GET_MODE_BITSIZE (wider_mode) <= HOST_BITS_PER_INT)
+ wide_op1 = op1;
+ else
+ wide_op1
+ = immed_double_const (cnst1,
+ (unsignedp
+ ? (HOST_WIDE_INT) 0
+ : -(cnst1 >> (HOST_BITS_PER_WIDE_INT - 1))),
+ wider_mode);
+
+ /* expand_mult handles constant multiplication of word_mode
+ or narrower. It does a poor job for large modes. */
+ if (size < BITS_PER_WORD)
+ {
+ /* We have to do this, since expand_binop doesn't do conversion for
+ multiply. Maybe change expand_binop to handle widening multiply? */
+ op0 = convert_to_mode (wider_mode, op0, unsignedp);
+
+ tem = expand_mult (wider_mode, op0, wide_op1, NULL_RTX, unsignedp);
+ tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
+ build_int_2 (size, 0), NULL_RTX, 1);
+ return gen_lowpart (mode, tem);
+ }
+
+ if (target == 0)
+ target = gen_reg_rtx (mode);
+
+ /* Firstly, try using a multiplication insn that only generates the needed
+ high part of the product, and in the sign flavor of unsignedp. */
+ mul_highpart_optab = unsignedp ? umul_highpart_optab : smul_highpart_optab;
+ target = expand_binop (mode, mul_highpart_optab,
+ op0, op1, target, unsignedp, OPTAB_DIRECT);
+ if (target)
+ return target;
+
+ /* Secondly, same as above, but use sign flavor opposite of unsignedp.
+ Need to adjust the result after the multiplication. */
+ mul_highpart_optab = unsignedp ? smul_highpart_optab : umul_highpart_optab;
+ target = expand_binop (mode, mul_highpart_optab,
+ op0, op1, target, unsignedp, OPTAB_DIRECT);
+ if (target)
+ /* We used the wrong signedness. Adjust the result. */
+ return expand_mult_highpart_adjust (mode, target, op0,
+ op1, target, unsignedp);
+
+ /* Thirdly, we try to use a widening multiplication, or a wider mode
+ multiplication. */
+
+ moptab = unsignedp ? umul_widen_optab : smul_widen_optab;
+ if (moptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing)
+ ;
+ else if (smul_optab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing)
+ moptab = smul_optab;
+ else
+ {
+ /* Try widening multiplication of opposite signedness, and adjust. */
+ moptab = unsignedp ? smul_widen_optab : umul_widen_optab;
+ if (moptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing)
+ {
+ tem = expand_binop (wider_mode, moptab, op0, wide_op1,
+ NULL_RTX, ! unsignedp, OPTAB_WIDEN);
+ if (tem != 0)
+ {
+ /* Extract the high half of the just generated product. */
+ tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
+ build_int_2 (size, 0), NULL_RTX, 1);
+ tem = gen_lowpart (mode, tem);
+ /* We used the wrong signedness. Adjust the result. */
+ return expand_mult_highpart_adjust (mode, tem, op0, op1,
+ target, unsignedp);
+ }
+ }
+
+ /* As a last resort, try widening the mode and perform a
+ non-widening multiplication. */
+ moptab = smul_optab;
+ }
+
+ /* Pass NULL_RTX as target since TARGET has wrong mode. */
+ tem = expand_binop (wider_mode, moptab, op0, wide_op1,
+ NULL_RTX, unsignedp, OPTAB_WIDEN);
+ if (tem == 0)
+ return 0;
+
+ /* Extract the high half of the just generated product. */
+ tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
+ build_int_2 (size, 0), NULL_RTX, 1);
+ return gen_lowpart (mode, tem);
+}
+
+/* Emit the code to divide OP0 by OP1, putting the result in TARGET
+ if that is convenient, and returning where the result is.
+ You may request either the quotient or the remainder as the result;
+ specify REM_FLAG nonzero to get the remainder.
+
+ CODE is the expression code for which kind of division this is;
+ it controls how rounding is done. MODE is the machine mode to use.
+ UNSIGNEDP nonzero means do unsigned division. */
+
+/* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
+ and then correct it by or'ing in missing high bits
+ if result of ANDI is nonzero.
+ For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
+ This could optimize to a bfexts instruction.
+ But C doesn't use these operations, so their optimizations are
+ left for later. */
+
+#define EXACT_POWER_OF_2_OR_ZERO_P(x) (((x) & ((x) - 1)) == 0)
+
+rtx
+expand_divmod (rem_flag, code, mode, op0, op1, target, unsignedp)
+ int rem_flag;
+ enum tree_code code;
+ enum machine_mode mode;
+ register rtx op0, op1, target;
+ int unsignedp;
+{
+ enum machine_mode compute_mode;
+ register rtx tquotient;
+ rtx quotient = 0, remainder = 0;
+ rtx last;
+ int size;
+ rtx insn;
+ optab optab1, optab2;
+ int op1_is_constant, op1_is_pow2;
+
+ op1_is_constant = GET_CODE (op1) == CONST_INT;
+ op1_is_pow2 = (op1_is_constant
+ && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
+ || EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1)))));
+
+ /*
+ This is the structure of expand_divmod:
+
+ First comes code to fix up the operands so we can perform the operations
+ correctly and efficiently.
+
+ Second comes a switch statement with code specific for each rounding mode.
+ For some special operands this code emits all RTL for the desired
+ operation, for other cases, it generates a quotient and stores it in
+ QUOTIENT. The case for trunc division/remainder might leave quotient = 0,
+ to indicate that it has not done anything.
+
+ Last comes code that finishes the operation. If QUOTIENT is set an
+ REM_FLAG, the remainder is computed as OP0 - QUOTIENT * OP1. If QUOTIENT
+ is not set, it is computed using trunc rounding.
+
+ We try to generate special code for division and remainder when OP1 is a
+ constant. If |OP1| = 2**n we can use shifts and some other fast
+ operations. For other values of OP1, we compute a carefully selected
+ fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
+ by m.
+
+ In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
+ half of the product. Different strategies for generating the product are
+ implemented in expand_mult_highpart.
+
+ If what we actually want is the remainder, we generate that by another
+ by-constant multiplication and a subtraction. */
+
+ /* We shouldn't be called with OP1 == const1_rtx, but some of the
+ code below will malfunction if we are, so check here and handle
+ the special case if so. */
+ if (op1 == const1_rtx)
+ return rem_flag ? const0_rtx : op0;
+
+ if (target
+ /* Don't use the function value register as a target
+ since we have to read it as well as write it,
+ and function-inlining gets confused by this. */
+ && ((REG_P (target) && REG_FUNCTION_VALUE_P (target))
+ /* Don't clobber an operand while doing a multi-step calculation. */
+ || ((rem_flag || op1_is_constant)
+ && (reg_mentioned_p (target, op0)
+ || (GET_CODE (op0) == MEM && GET_CODE (target) == MEM)))
+ || reg_mentioned_p (target, op1)
+ || (GET_CODE (op1) == MEM && GET_CODE (target) == MEM)))
+ target = 0;
+
+ /* Get the mode in which to perform this computation. Normally it will
+ be MODE, but sometimes we can't do the desired operation in MODE.
+ If so, pick a wider mode in which we can do the operation. Convert
+ to that mode at the start to avoid repeated conversions.
+
+ First see what operations we need. These depend on the expression
+ we are evaluating. (We assume that divxx3 insns exist under the
+ same conditions that modxx3 insns and that these insns don't normally
+ fail. If these assumptions are not correct, we may generate less
+ efficient code in some cases.)
+
+ Then see if we find a mode in which we can open-code that operation
+ (either a division, modulus, or shift). Finally, check for the smallest
+ mode for which we can do the operation with a library call. */
+
+ /* We might want to refine this now that we have division-by-constant
+ optimization. Since expand_mult_highpart tries so many variants, it is
+ not straightforward to generalize this. Maybe we should make an array
+ of possible modes in init_expmed? Save this for GCC 2.7. */
+
+ optab1 = (op1_is_pow2 ? (unsignedp ? lshr_optab : ashr_optab)
+ : (unsignedp ? udiv_optab : sdiv_optab));
+ optab2 = (op1_is_pow2 ? optab1 : (unsignedp ? udivmod_optab : sdivmod_optab));
+
+ for (compute_mode = mode; compute_mode != VOIDmode;
+ compute_mode = GET_MODE_WIDER_MODE (compute_mode))
+ if (optab1->handlers[(int) compute_mode].insn_code != CODE_FOR_nothing
+ || optab2->handlers[(int) compute_mode].insn_code != CODE_FOR_nothing)
+ break;
+
+ if (compute_mode == VOIDmode)
+ for (compute_mode = mode; compute_mode != VOIDmode;
+ compute_mode = GET_MODE_WIDER_MODE (compute_mode))
+ if (optab1->handlers[(int) compute_mode].libfunc
+ || optab2->handlers[(int) compute_mode].libfunc)
+ break;
+
+ /* If we still couldn't find a mode, use MODE, but we'll probably abort
+ in expand_binop. */
+ if (compute_mode == VOIDmode)
+ compute_mode = mode;
+
+ if (target && GET_MODE (target) == compute_mode)
+ tquotient = target;
+ else
+ tquotient = gen_reg_rtx (compute_mode);
+
+ size = GET_MODE_BITSIZE (compute_mode);
+#if 0
+ /* It should be possible to restrict the precision to GET_MODE_BITSIZE
+ (mode), and thereby get better code when OP1 is a constant. Do that for
+ GCC 2.7. It will require going over all usages of SIZE below. */
+ size = GET_MODE_BITSIZE (mode);
+#endif
+
+ /* Now convert to the best mode to use. */
+ if (compute_mode != mode)
+ {
+ op0 = convert_modes (compute_mode, mode, op0, unsignedp);
+ op1 = convert_modes (compute_mode, mode, op1, unsignedp);
+ }
+
+ /* If one of the operands is a volatile MEM, copy it into a register. */
+
+ if (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0))
+ op0 = force_reg (compute_mode, op0);
+ if (GET_CODE (op1) == MEM && MEM_VOLATILE_P (op1))
+ op1 = force_reg (compute_mode, op1);
+
+ /* If we need the remainder or if OP1 is constant, we need to
+ put OP0 in a register in case it has any queued subexpressions. */
+ if (rem_flag || op1_is_constant)
+ op0 = force_reg (compute_mode, op0);
+
+ last = get_last_insn ();
+
+ /* Promote floor rouding to trunc rounding for unsigned operations. */
+ if (unsignedp)
+ {
+ if (code == FLOOR_DIV_EXPR)
+ code = TRUNC_DIV_EXPR;
+ if (code == FLOOR_MOD_EXPR)
+ code = TRUNC_MOD_EXPR;
+ }
+
+ if (op1 != const0_rtx)
+ switch (code)
+ {
+ case TRUNC_MOD_EXPR:
+ case TRUNC_DIV_EXPR:
+ if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
+ {
+ if (unsignedp)
+ {
+ unsigned HOST_WIDE_INT mh, ml;
+ int pre_shift, post_shift;
+ int dummy;
+ unsigned HOST_WIDE_INT d = INTVAL (op1);
+
+ if (EXACT_POWER_OF_2_OR_ZERO_P (d))
+ {
+ pre_shift = floor_log2 (d);
+ if (rem_flag)
+ {
+ remainder = expand_binop (compute_mode, and_optab, op0,
+ GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
+ remainder, 1,
+ OPTAB_LIB_WIDEN);
+ if (remainder)
+ return gen_lowpart (mode, remainder);
+ }
+ quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
+ build_int_2 (pre_shift, 0),
+ tquotient, 1);
+ }
+ else if (d >= ((unsigned HOST_WIDE_INT) 1 << (size - 1)))
+ {
+ /* Most significant bit of divisor is set, emit a scc insn.
+ emit_store_flag needs to be passed a place for the
+ result. */
+ quotient = emit_store_flag (tquotient, GEU, op0, op1,
+ compute_mode, 1, 1);
+ /* Can emit_store_flag have failed? */
+ if (quotient == 0)
+ goto fail1;
+ }
+ else
+ {
+ /* Find a suitable multiplier and right shift count instead
+ of multiplying with D. */
+
+ mh = choose_multiplier (d, size, size,
+ &ml, &post_shift, &dummy);
+
+ /* If the suggested multiplier is more than SIZE bits, we
+ can do better for even divisors, using an initial right
+ shift. */
+ if (mh != 0 && (d & 1) == 0)
+ {
+ pre_shift = floor_log2 (d & -d);
+ mh = choose_multiplier (d >> pre_shift, size,
+ size - pre_shift,
+ &ml, &post_shift, &dummy);
+ if (mh)
+ abort ();
+ }
+ else
+ pre_shift = 0;
+
+ if (mh != 0)
+ {
+ rtx t1, t2, t3, t4;
+
+ t1 = expand_mult_highpart (compute_mode, op0, ml,
+ NULL_RTX, 1);
+ if (t1 == 0)
+ goto fail1;
+ t2 = force_operand (gen_rtx (MINUS, compute_mode,
+ op0, t1),
+ NULL_RTX);
+ t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
+ build_int_2 (1, 0), NULL_RTX, 1);
+ t4 = force_operand (gen_rtx (PLUS, compute_mode,
+ t1, t3),
+ NULL_RTX);
+ quotient = expand_shift (RSHIFT_EXPR, compute_mode, t4,
+ build_int_2 (post_shift - 1,
+ 0),
+ tquotient, 1);
+ }
+ else
+ {
+ rtx t1, t2;
+
+ t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
+ build_int_2 (pre_shift, 0),
+ NULL_RTX, 1);
+ t2 = expand_mult_highpart (compute_mode, t1, ml,
+ NULL_RTX, 1);
+ if (t2 == 0)
+ goto fail1;
+ quotient = expand_shift (RSHIFT_EXPR, compute_mode, t2,
+ build_int_2 (post_shift, 0),
+ tquotient, 1);
+ }
+ }
+
+ insn = get_last_insn ();
+ REG_NOTES (insn)
+ = gen_rtx (EXPR_LIST, REG_EQUAL,
+ gen_rtx (UDIV, compute_mode, op0, op1),
+ REG_NOTES (insn));
+ }
+ else /* TRUNC_DIV, signed */
+ {
+ unsigned HOST_WIDE_INT ml;
+ int lgup, post_shift;
+ HOST_WIDE_INT d = INTVAL (op1);
+ unsigned HOST_WIDE_INT abs_d = d >= 0 ? d : -d;
+
+ /* n rem d = n rem -d */
+ if (rem_flag && d < 0)
+ {
+ d = abs_d;
+ op1 = GEN_INT (abs_d);
+ }
+
+ if (d == 1)
+ quotient = op0;
+ else if (d == -1)
+ quotient = expand_unop (compute_mode, neg_optab, op0,
+ tquotient, 0);
+ else if (EXACT_POWER_OF_2_OR_ZERO_P (d)
+ && (rem_flag ? smod_pow2_cheap : sdiv_pow2_cheap))
+ ;
+ else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d))
+ {
+ lgup = floor_log2 (abs_d);
+ if (abs_d != 2 && BRANCH_COST < 3)
+ {
+ rtx label = gen_label_rtx ();
+ rtx t1;
+
+ t1 = copy_to_mode_reg (compute_mode, op0);
+ emit_cmp_insn (t1, const0_rtx, GE,
+ NULL_RTX, compute_mode, 0, 0);
+ emit_jump_insn (gen_bge (label));
+ expand_inc (t1, GEN_INT (abs_d - 1));
+ emit_label (label);
+ quotient = expand_shift (RSHIFT_EXPR, compute_mode, t1,
+ build_int_2 (lgup, 0),
+ tquotient, 0);
+ }
+ else
+ {
+ rtx t1, t2, t3;
+ t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
+ build_int_2 (size - 1, 0),
+ NULL_RTX, 0);
+ t2 = expand_shift (RSHIFT_EXPR, compute_mode, t1,
+ build_int_2 (size - lgup, 0),
+ NULL_RTX, 1);
+ t3 = force_operand (gen_rtx (PLUS, compute_mode,
+ op0, t2),
+ NULL_RTX);
+ quotient = expand_shift (RSHIFT_EXPR, compute_mode, t3,
+ build_int_2 (lgup, 0),
+ tquotient, 0);
+ }
+
+ if (d < 0)
+ {
+ insn = get_last_insn ();
+ REG_NOTES (insn)
+ = gen_rtx (EXPR_LIST, REG_EQUAL,
+ gen_rtx (DIV, compute_mode, op0,
+ GEN_INT (abs_d)),
+ REG_NOTES (insn));
+
+ quotient = expand_unop (compute_mode, neg_optab,
+ quotient, quotient, 0);
+ }
+ }
+ else
+ {
+ choose_multiplier (abs_d, size, size - 1,
+ &ml, &post_shift, &lgup);
+ if (ml < (unsigned HOST_WIDE_INT) 1 << (size - 1))
+ {
+ rtx t1, t2, t3;
+
+ t1 = expand_mult_highpart (compute_mode, op0, ml,
+ NULL_RTX, 0);
+ if (t1 == 0)
+ goto fail1;
+ t2 = expand_shift (RSHIFT_EXPR, compute_mode, t1,
+ build_int_2 (post_shift, 0), NULL_RTX, 0);
+ t3 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
+ build_int_2 (size - 1, 0), NULL_RTX, 0);
+ if (d < 0)
+ quotient = force_operand (gen_rtx (MINUS, compute_mode, t3, t2),
+ tquotient);
+ else
+ quotient = force_operand (gen_rtx (MINUS, compute_mode, t2, t3),
+ tquotient);
+ }
+ else
+ {
+ rtx t1, t2, t3, t4;
+
+ ml |= (~(unsigned HOST_WIDE_INT) 0) << (size - 1);
+ t1 = expand_mult_highpart (compute_mode, op0, ml,
+ NULL_RTX, 0);
+ if (t1 == 0)
+ goto fail1;
+ t2 = force_operand (gen_rtx (PLUS, compute_mode, t1, op0),
+ NULL_RTX);
+ t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
+ build_int_2 (post_shift, 0), NULL_RTX, 0);
+ t4 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
+ build_int_2 (size - 1, 0), NULL_RTX, 0);
+ if (d < 0)
+ quotient = force_operand (gen_rtx (MINUS, compute_mode, t4, t3),
+ tquotient);
+ else
+ quotient = force_operand (gen_rtx (MINUS, compute_mode, t3, t4),
+ tquotient);
+ }
+ }
+
+ if (quotient != 0)
+ {
+ insn = get_last_insn ();
+ REG_NOTES (insn)
+ = gen_rtx (EXPR_LIST, REG_EQUAL,
+ gen_rtx (DIV, compute_mode, op0, op1),
+ REG_NOTES (insn));
+ }
+ }
+ break;
+ }
+ fail1:
+ delete_insns_since (last);
+ break;
+
+ case FLOOR_DIV_EXPR:
+ case FLOOR_MOD_EXPR:
+ /* We will come here only for signed operations. */
+ if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
+ {
+ unsigned HOST_WIDE_INT mh, ml;
+ int pre_shift, lgup, post_shift;
+ HOST_WIDE_INT d = INTVAL (op1);
+
+ if (d > 0)
+ {
+ /* We could just as easily deal with negative constants here,
+ but it does not seem worth the trouble for GCC 2.6. */
+ if (EXACT_POWER_OF_2_OR_ZERO_P (d))
+ {
+ pre_shift = floor_log2 (d);
+ if (rem_flag)
+ {
+ remainder = expand_binop (compute_mode, and_optab, op0,
+ GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
+ remainder, 0, OPTAB_LIB_WIDEN);
+ if (remainder)
+ return gen_lowpart (mode, remainder);
+ }
+ quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
+ build_int_2 (pre_shift, 0),
+ tquotient, 0);
+ }
+ else
+ {
+ rtx t1, t2, t3, t4;
+
+ mh = choose_multiplier (d, size, size - 1,
+ &ml, &post_shift, &lgup);
+ if (mh)
+ abort ();
+
+ t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
+ build_int_2 (size - 1, 0), NULL_RTX, 0);
+ t2 = expand_binop (compute_mode, xor_optab, op0, t1,
+ NULL_RTX, 0, OPTAB_WIDEN);
+ t3 = expand_mult_highpart (compute_mode, t2, ml,
+ NULL_RTX, 1);
+ if (t3 != 0)
+ {
+ t4 = expand_shift (RSHIFT_EXPR, compute_mode, t3,
+ build_int_2 (post_shift, 0),
+ NULL_RTX, 1);
+ quotient = expand_binop (compute_mode, xor_optab,
+ t4, t1, tquotient, 0,
+ OPTAB_WIDEN);
+ }
+ }
+ }
+ else
+ {
+ rtx nsign, t1, t2, t3, t4;
+ t1 = force_operand (gen_rtx (PLUS, compute_mode,
+ op0, constm1_rtx), NULL_RTX);
+ t2 = expand_binop (compute_mode, ior_optab, op0, t1, NULL_RTX,
+ 0, OPTAB_WIDEN);
+ nsign = expand_shift (RSHIFT_EXPR, compute_mode, t2,
+ build_int_2 (size - 1, 0), NULL_RTX, 0);
+ t3 = force_operand (gen_rtx (MINUS, compute_mode, t1, nsign),
+ NULL_RTX);
+ t4 = expand_divmod (0, TRUNC_DIV_EXPR, compute_mode, t3, op1,
+ NULL_RTX, 0);
+ if (t4)
+ {
+ rtx t5;
+ t5 = expand_unop (compute_mode, one_cmpl_optab, nsign,
+ NULL_RTX, 0);
+ quotient = force_operand (gen_rtx (PLUS, compute_mode,
+ t4, t5),
+ tquotient);
+ }
+ }
+ }
+
+ if (quotient != 0)
+ break;
+ delete_insns_since (last);
+
+ /* Try using an instruction that produces both the quotient and
+ remainder, using truncation. We can easily compensate the quotient
+ or remainder to get floor rounding, once we have the remainder.
+ Notice that we compute also the final remainder value here,
+ and return the result right away. */
+ if (target == 0)
+ target = gen_reg_rtx (compute_mode);
+ if (rem_flag)
+ {
+ remainder = target;
+ quotient = gen_reg_rtx (compute_mode);
+ }
+ else
+ {
+ quotient = target;
+ remainder = gen_reg_rtx (compute_mode);
+ }
+
+ if (expand_twoval_binop (sdivmod_optab, op0, op1,
+ quotient, remainder, 0))
+ {
+ /* This could be computed with a branch-less sequence.
+ Save that for later. */
+ rtx tem;
+ rtx label = gen_label_rtx ();
+ emit_cmp_insn (remainder, const0_rtx, EQ, NULL_RTX,
+ compute_mode, 0, 0);
+ emit_jump_insn (gen_beq (label));
+ tem = expand_binop (compute_mode, xor_optab, op0, op1,
+ NULL_RTX, 0, OPTAB_WIDEN);
+ emit_cmp_insn (tem, const0_rtx, GE, NULL_RTX, compute_mode, 0, 0);
+ emit_jump_insn (gen_bge (label));
+ expand_dec (quotient, const1_rtx);
+ expand_inc (remainder, op1);
+ emit_label (label);
+ return gen_lowpart (mode, rem_flag ? remainder : quotient);
+ }
+
+ /* No luck with division elimination or divmod. Have to do it
+ by conditionally adjusting op0 *and* the result. */
+ {
+ rtx label1, label2, label3, label4, label5;
+ rtx adjusted_op0;
+ rtx tem;
+
+ quotient = gen_reg_rtx (compute_mode);
+ adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
+ label1 = gen_label_rtx ();
+ label2 = gen_label_rtx ();
+ label3 = gen_label_rtx ();
+ label4 = gen_label_rtx ();
+ label5 = gen_label_rtx ();
+ emit_cmp_insn (op1, const0_rtx, LT, NULL_RTX, compute_mode, 0, 0);
+ emit_jump_insn (gen_blt (label2));
+ emit_cmp_insn (adjusted_op0, const0_rtx, LT, NULL_RTX,
+ compute_mode, 0, 0);
+ emit_jump_insn (gen_blt (label1));
+ tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
+ quotient, 0, OPTAB_LIB_WIDEN);
+ if (tem != quotient)
+ emit_move_insn (quotient, tem);
+ emit_jump_insn (gen_jump (label5));
+ emit_barrier ();
+ emit_label (label1);
+ expand_inc (adjusted_op0, const1_rtx);
+ emit_jump_insn (gen_jump (label4));
+ emit_barrier ();
+ emit_label (label2);
+ emit_cmp_insn (adjusted_op0, const0_rtx, GT, NULL_RTX,
+ compute_mode, 0, 0);
+ emit_jump_insn (gen_bgt (label3));
+ tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
+ quotient, 0, OPTAB_LIB_WIDEN);
+ if (tem != quotient)
+ emit_move_insn (quotient, tem);
+ emit_jump_insn (gen_jump (label5));
+ emit_barrier ();
+ emit_label (label3);
+ expand_dec (adjusted_op0, const1_rtx);
+ emit_label (label4);
+ tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
+ quotient, 0, OPTAB_LIB_WIDEN);
+ if (tem != quotient)
+ emit_move_insn (quotient, tem);
+ expand_dec (quotient, const1_rtx);
+ emit_label (label5);
+ }
+ break;
+
+ case CEIL_DIV_EXPR:
+ case CEIL_MOD_EXPR:
+ if (unsignedp)
+ {
+ if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1)))
+ {
+ rtx t1, t2, t3;
+ unsigned HOST_WIDE_INT d = INTVAL (op1);
+ t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
+ build_int_2 (floor_log2 (d), 0),
+ tquotient, 1);
+ t2 = expand_binop (compute_mode, and_optab, op0,
+ GEN_INT (d - 1),
+ NULL_RTX, 1, OPTAB_LIB_WIDEN);
+ t3 = gen_reg_rtx (compute_mode);
+ t3 = emit_store_flag (t3, NE, t2, const0_rtx,
+ compute_mode, 1, 1);
+ if (t3 == 0)
+ {
+ rtx lab;
+ lab = gen_label_rtx ();
+ emit_cmp_insn (t2, const0_rtx, EQ, NULL_RTX,
+ compute_mode, 0, 0);
+ emit_jump_insn (gen_beq (lab));
+ expand_inc (t1, const1_rtx);
+ emit_label (lab);
+ quotient = t1;
+ }
+ else
+ quotient = force_operand (gen_rtx (PLUS, compute_mode,
+ t1, t3),
+ tquotient);
+ break;
+ }
+
+ /* Try using an instruction that produces both the quotient and
+ remainder, using truncation. We can easily compensate the
+ quotient or remainder to get ceiling rounding, once we have the
+ remainder. Notice that we compute also the final remainder
+ value here, and return the result right away. */
+ if (target == 0)
+ target = gen_reg_rtx (compute_mode);
+ if (rem_flag)
+ {
+ remainder = target;
+ quotient = gen_reg_rtx (compute_mode);
+ }
+ else
+ {
+ quotient = target;
+ remainder = gen_reg_rtx (compute_mode);
+ }
+
+ if (expand_twoval_binop (udivmod_optab, op0, op1, quotient,
+ remainder, 1))
+ {
+ /* This could be computed with a branch-less sequence.
+ Save that for later. */
+ rtx label = gen_label_rtx ();
+ emit_cmp_insn (remainder, const0_rtx, EQ, NULL_RTX,
+ compute_mode, 0, 0);
+ emit_jump_insn (gen_beq (label));
+ expand_inc (quotient, const1_rtx);
+ expand_dec (remainder, op1);
+ emit_label (label);
+ return gen_lowpart (mode, rem_flag ? remainder : quotient);
+ }
+
+ /* No luck with division elimination or divmod. Have to do it
+ by conditionally adjusting op0 *and* the result. */
+ {
+ rtx label1, label2;
+ rtx adjusted_op0, tem;
+
+ quotient = gen_reg_rtx (compute_mode);
+ adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
+ label1 = gen_label_rtx ();
+ label2 = gen_label_rtx ();
+ emit_cmp_insn (adjusted_op0, const0_rtx, NE, NULL_RTX,
+ compute_mode, 0, 0);
+ emit_jump_insn (gen_bne (label1));
+ emit_move_insn (quotient, const0_rtx);
+ emit_jump_insn (gen_jump (label2));
+ emit_barrier ();
+ emit_label (label1);
+ expand_dec (adjusted_op0, const1_rtx);
+ tem = expand_binop (compute_mode, udiv_optab, adjusted_op0, op1,
+ quotient, 1, OPTAB_LIB_WIDEN);
+ if (tem != quotient)
+ emit_move_insn (quotient, tem);
+ expand_inc (quotient, const1_rtx);
+ emit_label (label2);
+ }
+ }
+ else /* signed */
+ {
+ /* Try using an instruction that produces both the quotient and
+ remainder, using truncation. We can easily compensate the
+ quotient or remainder to get ceiling rounding, once we have the
+ remainder. Notice that we compute also the final remainder
+ value here, and return the result right away. */
+ if (target == 0)
+ target = gen_reg_rtx (compute_mode);
+ if (rem_flag)
+ {
+ remainder = target;
+ quotient = gen_reg_rtx (compute_mode);
+ }
+ else
+ {
+ quotient = target;
+ remainder = gen_reg_rtx (compute_mode);
+ }
+
+ if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient,
+ remainder, 0))
+ {
+ /* This could be computed with a branch-less sequence.
+ Save that for later. */
+ rtx tem;
+ rtx label = gen_label_rtx ();
+ emit_cmp_insn (remainder, const0_rtx, EQ, NULL_RTX,
+ compute_mode, 0, 0);
+ emit_jump_insn (gen_beq (label));
+ tem = expand_binop (compute_mode, xor_optab, op0, op1,
+ NULL_RTX, 0, OPTAB_WIDEN);
+ emit_cmp_insn (tem, const0_rtx, LT, NULL_RTX,
+ compute_mode, 0, 0);
+ emit_jump_insn (gen_blt (label));
+ expand_inc (quotient, const1_rtx);
+ expand_dec (remainder, op1);
+ emit_label (label);
+ return gen_lowpart (mode, rem_flag ? remainder : quotient);
+ }
+
+ /* No luck with division elimination or divmod. Have to do it
+ by conditionally adjusting op0 *and* the result. */
+ {
+ rtx label1, label2, label3, label4, label5;
+ rtx adjusted_op0;
+ rtx tem;
+
+ quotient = gen_reg_rtx (compute_mode);
+ adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
+ label1 = gen_label_rtx ();
+ label2 = gen_label_rtx ();
+ label3 = gen_label_rtx ();
+ label4 = gen_label_rtx ();
+ label5 = gen_label_rtx ();
+ emit_cmp_insn (op1, const0_rtx, LT, NULL_RTX,
+ compute_mode, 0, 0);
+ emit_jump_insn (gen_blt (label2));
+ emit_cmp_insn (adjusted_op0, const0_rtx, GT, NULL_RTX,
+ compute_mode, 0, 0);
+ emit_jump_insn (gen_bgt (label1));
+ tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
+ quotient, 0, OPTAB_LIB_WIDEN);
+ if (tem != quotient)
+ emit_move_insn (quotient, tem);
+ emit_jump_insn (gen_jump (label5));
+ emit_barrier ();
+ emit_label (label1);
+ expand_dec (adjusted_op0, const1_rtx);
+ emit_jump_insn (gen_jump (label4));
+ emit_barrier ();
+ emit_label (label2);
+ emit_cmp_insn (adjusted_op0, const0_rtx, LT, NULL_RTX,
+ compute_mode, 0, 0);
+ emit_jump_insn (gen_blt (label3));
+ tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
+ quotient, 0, OPTAB_LIB_WIDEN);
+ if (tem != quotient)
+ emit_move_insn (quotient, tem);
+ emit_jump_insn (gen_jump (label5));
+ emit_barrier ();
+ emit_label (label3);
+ expand_inc (adjusted_op0, const1_rtx);
+ emit_label (label4);
+ tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
+ quotient, 0, OPTAB_LIB_WIDEN);
+ if (tem != quotient)
+ emit_move_insn (quotient, tem);
+ expand_inc (quotient, const1_rtx);
+ emit_label (label5);
+ }
+ }
+ break;
+
+ case EXACT_DIV_EXPR:
+ if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
+ {
+ HOST_WIDE_INT d = INTVAL (op1);
+ unsigned HOST_WIDE_INT ml;
+ int post_shift;
+ rtx t1;
+
+ post_shift = floor_log2 (d & -d);
+ ml = invert_mod2n (d >> post_shift, size);
+ t1 = expand_mult (compute_mode, op0, GEN_INT (ml), NULL_RTX,
+ unsignedp);
+ quotient = expand_shift (RSHIFT_EXPR, compute_mode, t1,
+ build_int_2 (post_shift, 0),
+ NULL_RTX, unsignedp);
+
+ insn = get_last_insn ();
+ REG_NOTES (insn)
+ = gen_rtx (EXPR_LIST, REG_EQUAL,
+ gen_rtx (unsignedp ? UDIV : DIV, compute_mode,
+ op0, op1),
+ REG_NOTES (insn));
+ }
+ break;
+
+ case ROUND_DIV_EXPR:
+ case ROUND_MOD_EXPR:
+ /* The code that used to be here was wrong, and nothing really
+ depends on it. */
+ abort ();
+ break;
+ }
+
+ if (quotient == 0)
+ {
+ if (rem_flag)
+ {
+ /* Try to produce the remainder directly without a library call. */
+ remainder = sign_expand_binop (compute_mode, umod_optab, smod_optab,
+ op0, op1, target,
+ unsignedp, OPTAB_WIDEN);
+ if (remainder == 0)
+ {
+ /* No luck there. Can we do remainder and divide at once
+ without a library call? */
+ remainder = gen_reg_rtx (compute_mode);
+ if (! expand_twoval_binop ((unsignedp
+ ? udivmod_optab
+ : sdivmod_optab),
+ op0, op1,
+ NULL_RTX, remainder, unsignedp))
+ remainder = 0;
+ }
+
+ if (remainder)
+ return gen_lowpart (mode, remainder);
+ }
+
+ /* Produce the quotient. */
+ /* Try a quotient insn, but not a library call. */
+ quotient = sign_expand_binop (compute_mode, udiv_optab, sdiv_optab,
+ op0, op1, rem_flag ? NULL_RTX : target,
+ unsignedp, OPTAB_WIDEN);
+ if (quotient == 0)
+ {
+ /* No luck there. Try a quotient-and-remainder insn,
+ keeping the quotient alone. */
+ quotient = gen_reg_rtx (compute_mode);
+ if (! expand_twoval_binop (unsignedp ? udivmod_optab : sdivmod_optab,
+ op0, op1,
+ quotient, NULL_RTX, unsignedp))
+ {
+ quotient = 0;
+ if (! rem_flag)
+ /* Still no luck. If we are not computing the remainder,
+ use a library call for the quotient. */
+ quotient = sign_expand_binop (compute_mode,
+ udiv_optab, sdiv_optab,
+ op0, op1, target,
+ unsignedp, OPTAB_LIB_WIDEN);
+ }
+ }
+ }
+
+ if (rem_flag)
+ {
+ if (quotient == 0)
+ /* No divide instruction either. Use library for remainder. */
+ remainder = sign_expand_binop (compute_mode, umod_optab, smod_optab,
+ op0, op1, target,
+ unsignedp, OPTAB_LIB_WIDEN);
+ else
+ {
+ /* We divided. Now finish doing X - Y * (X / Y). */
+ remainder = expand_mult (compute_mode, quotient, op1,
+ NULL_RTX, unsignedp);
+ remainder = expand_binop (compute_mode, sub_optab, op0,
+ remainder, target, unsignedp,
+ OPTAB_LIB_WIDEN);
+ }
+ }
+
+ return gen_lowpart (mode, rem_flag ? remainder : quotient);
+}
+
+/* Return a tree node with data type TYPE, describing the value of X.
+ Usually this is an RTL_EXPR, if there is no obvious better choice.
+ X may be an expression, however we only support those expressions
+ generated by loop.c. */
+
+tree
+make_tree (type, x)
+ tree type;
+ rtx x;
+{
+ tree t;
+
+ switch (GET_CODE (x))
+ {
+ case CONST_INT:
+ t = build_int_2 (INTVAL (x),
+ TREE_UNSIGNED (type) || INTVAL (x) >= 0 ? 0 : -1);
+ TREE_TYPE (t) = type;
+ return t;
+
+ case CONST_DOUBLE:
+ if (GET_MODE (x) == VOIDmode)
+ {
+ t = build_int_2 (CONST_DOUBLE_LOW (x), CONST_DOUBLE_HIGH (x));
+ TREE_TYPE (t) = type;
+ }
+ else
+ {
+ REAL_VALUE_TYPE d;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (d, x);
+ t = build_real (type, d);
+ }
+
+ return t;
+
+ case PLUS:
+ return fold (build (PLUS_EXPR, type, make_tree (type, XEXP (x, 0)),
+ make_tree (type, XEXP (x, 1))));
+
+ case MINUS:
+ return fold (build (MINUS_EXPR, type, make_tree (type, XEXP (x, 0)),
+ make_tree (type, XEXP (x, 1))));
+
+ case NEG:
+ return fold (build1 (NEGATE_EXPR, type, make_tree (type, XEXP (x, 0))));
+
+ case MULT:
+ return fold (build (MULT_EXPR, type, make_tree (type, XEXP (x, 0)),
+ make_tree (type, XEXP (x, 1))));
+
+ case ASHIFT:
+ return fold (build (LSHIFT_EXPR, type, make_tree (type, XEXP (x, 0)),
+ make_tree (type, XEXP (x, 1))));
+
+ case LSHIFTRT:
+ return fold (convert (type,
+ build (RSHIFT_EXPR, unsigned_type (type),
+ make_tree (unsigned_type (type),
+ XEXP (x, 0)),
+ make_tree (type, XEXP (x, 1)))));
+
+ case ASHIFTRT:
+ return fold (convert (type,
+ build (RSHIFT_EXPR, signed_type (type),
+ make_tree (signed_type (type), XEXP (x, 0)),
+ make_tree (type, XEXP (x, 1)))));
+
+ case DIV:
+ if (TREE_CODE (type) != REAL_TYPE)
+ t = signed_type (type);
+ else
+ t = type;
+
+ return fold (convert (type,
+ build (TRUNC_DIV_EXPR, t,
+ make_tree (t, XEXP (x, 0)),
+ make_tree (t, XEXP (x, 1)))));
+ case UDIV:
+ t = unsigned_type (type);
+ return fold (convert (type,
+ build (TRUNC_DIV_EXPR, t,
+ make_tree (t, XEXP (x, 0)),
+ make_tree (t, XEXP (x, 1)))));
+ default:
+ t = make_node (RTL_EXPR);
+ TREE_TYPE (t) = type;
+ RTL_EXPR_RTL (t) = x;
+ /* There are no insns to be output
+ when this rtl_expr is used. */
+ RTL_EXPR_SEQUENCE (t) = 0;
+ return t;
+ }
+}
+
+/* Return an rtx representing the value of X * MULT + ADD.
+ TARGET is a suggestion for where to store the result (an rtx).
+ MODE is the machine mode for the computation.
+ X and MULT must have mode MODE. ADD may have a different mode.
+ So can X (defaults to same as MODE).
+ UNSIGNEDP is non-zero to do unsigned multiplication.
+ This may emit insns. */
+
+rtx
+expand_mult_add (x, target, mult, add, mode, unsignedp)
+ rtx x, target, mult, add;
+ enum machine_mode mode;
+ int unsignedp;
+{
+ tree type = type_for_mode (mode, unsignedp);
+ tree add_type = (GET_MODE (add) == VOIDmode
+ ? type : type_for_mode (GET_MODE (add), unsignedp));
+ tree result = fold (build (PLUS_EXPR, type,
+ fold (build (MULT_EXPR, type,
+ make_tree (type, x),
+ make_tree (type, mult))),
+ make_tree (add_type, add)));
+
+ return expand_expr (result, target, VOIDmode, 0);
+}
+
+/* Compute the logical-and of OP0 and OP1, storing it in TARGET
+ and returning TARGET.
+
+ If TARGET is 0, a pseudo-register or constant is returned. */
+
+rtx
+expand_and (op0, op1, target)
+ rtx op0, op1, target;
+{
+ enum machine_mode mode = VOIDmode;
+ rtx tem;
+
+ if (GET_MODE (op0) != VOIDmode)
+ mode = GET_MODE (op0);
+ else if (GET_MODE (op1) != VOIDmode)
+ mode = GET_MODE (op1);
+
+ if (mode != VOIDmode)
+ tem = expand_binop (mode, and_optab, op0, op1, target, 0, OPTAB_LIB_WIDEN);
+ else if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT)
+ tem = GEN_INT (INTVAL (op0) & INTVAL (op1));
+ else
+ abort ();
+
+ if (target == 0)
+ target = tem;
+ else if (tem != target)
+ emit_move_insn (target, tem);
+ return target;
+}
+
+/* Emit a store-flags instruction for comparison CODE on OP0 and OP1
+ and storing in TARGET. Normally return TARGET.
+ Return 0 if that cannot be done.
+
+ MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
+ it is VOIDmode, they cannot both be CONST_INT.
+
+ UNSIGNEDP is for the case where we have to widen the operands
+ to perform the operation. It says to use zero-extension.
+
+ NORMALIZEP is 1 if we should convert the result to be either zero
+ or one one. Normalize is -1 if we should convert the result to be
+ either zero or -1. If NORMALIZEP is zero, the result will be left
+ "raw" out of the scc insn. */
+
+rtx
+emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep)
+ rtx target;
+ enum rtx_code code;
+ rtx op0, op1;
+ enum machine_mode mode;
+ int unsignedp;
+ int normalizep;
+{
+ rtx subtarget;
+ enum insn_code icode;
+ enum machine_mode compare_mode;
+ enum machine_mode target_mode = GET_MODE (target);
+ rtx tem;
+ rtx last = 0;
+ rtx pattern, comparison;
+
+ if (mode == VOIDmode)
+ mode = GET_MODE (op0);
+
+ /* If one operand is constant, make it the second one. Only do this
+ if the other operand is not constant as well. */
+
+ if ((CONSTANT_P (op0) && ! CONSTANT_P (op1))
+ || (GET_CODE (op0) == CONST_INT && GET_CODE (op1) != CONST_INT))
+ {
+ tem = op0;
+ op0 = op1;
+ op1 = tem;
+ code = swap_condition (code);
+ }
+
+ /* For some comparisons with 1 and -1, we can convert this to
+ comparisons with zero. This will often produce more opportunities for
+ store-flag insns. */
+
+ switch (code)
+ {
+ case LT:
+ if (op1 == const1_rtx)
+ op1 = const0_rtx, code = LE;
+ break;
+ case LE:
+ if (op1 == constm1_rtx)
+ op1 = const0_rtx, code = LT;
+ break;
+ case GE:
+ if (op1 == const1_rtx)
+ op1 = const0_rtx, code = GT;
+ break;
+ case GT:
+ if (op1 == constm1_rtx)
+ op1 = const0_rtx, code = GE;
+ break;
+ case GEU:
+ if (op1 == const1_rtx)
+ op1 = const0_rtx, code = NE;
+ break;
+ case LTU:
+ if (op1 == const1_rtx)
+ op1 = const0_rtx, code = EQ;
+ break;
+ }
+
+ /* From now on, we won't change CODE, so set ICODE now. */
+ icode = setcc_gen_code[(int) code];
+
+ /* If this is A < 0 or A >= 0, we can do this by taking the ones
+ complement of A (for GE) and shifting the sign bit to the low bit. */
+ if (op1 == const0_rtx && (code == LT || code == GE)
+ && GET_MODE_CLASS (mode) == MODE_INT
+ && (normalizep || STORE_FLAG_VALUE == 1
+ || (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ && (STORE_FLAG_VALUE
+ == (HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))))
+ {
+ subtarget = target;
+
+ /* If the result is to be wider than OP0, it is best to convert it
+ first. If it is to be narrower, it is *incorrect* to convert it
+ first. */
+ if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (mode))
+ {
+ op0 = protect_from_queue (op0, 0);
+ op0 = convert_modes (target_mode, mode, op0, 0);
+ mode = target_mode;
+ }
+
+ if (target_mode != mode)
+ subtarget = 0;
+
+ if (code == GE)
+ op0 = expand_unop (mode, one_cmpl_optab, op0, subtarget, 0);
+
+ if (normalizep || STORE_FLAG_VALUE == 1)
+ /* If we are supposed to produce a 0/1 value, we want to do
+ a logical shift from the sign bit to the low-order bit; for
+ a -1/0 value, we do an arithmetic shift. */
+ op0 = expand_shift (RSHIFT_EXPR, mode, op0,
+ size_int (GET_MODE_BITSIZE (mode) - 1),
+ subtarget, normalizep != -1);
+
+ if (mode != target_mode)
+ op0 = convert_modes (target_mode, mode, op0, 0);
+
+ return op0;
+ }
+
+ if (icode != CODE_FOR_nothing)
+ {
+ /* We think we may be able to do this with a scc insn. Emit the
+ comparison and then the scc insn.
+
+ compare_from_rtx may call emit_queue, which would be deleted below
+ if the scc insn fails. So call it ourselves before setting LAST. */
+
+ emit_queue ();
+ last = get_last_insn ();
+
+ comparison
+ = compare_from_rtx (op0, op1, code, unsignedp, mode, NULL_RTX, 0);
+ if (GET_CODE (comparison) == CONST_INT)
+ return (comparison == const0_rtx ? const0_rtx
+ : normalizep == 1 ? const1_rtx
+ : normalizep == -1 ? constm1_rtx
+ : const_true_rtx);
+
+ /* If the code of COMPARISON doesn't match CODE, something is
+ wrong; we can no longer be sure that we have the operation.
+ We could handle this case, but it should not happen. */
+
+ if (GET_CODE (comparison) != code)
+ abort ();
+
+ /* Get a reference to the target in the proper mode for this insn. */
+ compare_mode = insn_operand_mode[(int) icode][0];
+ subtarget = target;
+ if (preserve_subexpressions_p ()
+ || ! (*insn_operand_predicate[(int) icode][0]) (subtarget, compare_mode))
+ subtarget = gen_reg_rtx (compare_mode);
+
+ pattern = GEN_FCN (icode) (subtarget);
+ if (pattern)
+ {
+ emit_insn (pattern);
+
+ /* If we are converting to a wider mode, first convert to
+ TARGET_MODE, then normalize. This produces better combining
+ opportunities on machines that have a SIGN_EXTRACT when we are
+ testing a single bit. This mostly benefits the 68k.
+
+ If STORE_FLAG_VALUE does not have the sign bit set when
+ interpreted in COMPARE_MODE, we can do this conversion as
+ unsigned, which is usually more efficient. */
+ if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (compare_mode))
+ {
+ convert_move (target, subtarget,
+ (GET_MODE_BITSIZE (compare_mode)
+ <= HOST_BITS_PER_WIDE_INT)
+ && 0 == (STORE_FLAG_VALUE
+ & ((HOST_WIDE_INT) 1
+ << (GET_MODE_BITSIZE (compare_mode) -1))));
+ op0 = target;
+ compare_mode = target_mode;
+ }
+ else
+ op0 = subtarget;
+
+ /* If we want to keep subexpressions around, don't reuse our
+ last target. */
+
+ if (preserve_subexpressions_p ())
+ subtarget = 0;
+
+ /* Now normalize to the proper value in COMPARE_MODE. Sometimes
+ we don't have to do anything. */
+ if (normalizep == 0 || normalizep == STORE_FLAG_VALUE)
+ ;
+ else if (normalizep == - STORE_FLAG_VALUE)
+ op0 = expand_unop (compare_mode, neg_optab, op0, subtarget, 0);
+
+ /* We don't want to use STORE_FLAG_VALUE < 0 below since this
+ makes it hard to use a value of just the sign bit due to
+ ANSI integer constant typing rules. */
+ else if (GET_MODE_BITSIZE (compare_mode) <= HOST_BITS_PER_WIDE_INT
+ && (STORE_FLAG_VALUE
+ & ((HOST_WIDE_INT) 1
+ << (GET_MODE_BITSIZE (compare_mode) - 1))))
+ op0 = expand_shift (RSHIFT_EXPR, compare_mode, op0,
+ size_int (GET_MODE_BITSIZE (compare_mode) - 1),
+ subtarget, normalizep == 1);
+ else if (STORE_FLAG_VALUE & 1)
+ {
+ op0 = expand_and (op0, const1_rtx, subtarget);
+ if (normalizep == -1)
+ op0 = expand_unop (compare_mode, neg_optab, op0, op0, 0);
+ }
+ else
+ abort ();
+
+ /* If we were converting to a smaller mode, do the
+ conversion now. */
+ if (target_mode != compare_mode)
+ {
+ convert_move (target, op0, 0);
+ return target;
+ }
+ else
+ return op0;
+ }
+ }
+
+ if (last)
+ delete_insns_since (last);
+
+ subtarget = target_mode == mode ? target : 0;
+
+ /* If we reached here, we can't do this with a scc insn. However, there
+ are some comparisons that can be done directly. For example, if
+ this is an equality comparison of integers, we can try to exclusive-or
+ (or subtract) the two operands and use a recursive call to try the
+ comparison with zero. Don't do any of these cases if branches are
+ very cheap. */
+
+ if (BRANCH_COST > 0
+ && GET_MODE_CLASS (mode) == MODE_INT && (code == EQ || code == NE)
+ && op1 != const0_rtx)
+ {
+ tem = expand_binop (mode, xor_optab, op0, op1, subtarget, 1,
+ OPTAB_WIDEN);
+
+ if (tem == 0)
+ tem = expand_binop (mode, sub_optab, op0, op1, subtarget, 1,
+ OPTAB_WIDEN);
+ if (tem != 0)
+ tem = emit_store_flag (target, code, tem, const0_rtx,
+ mode, unsignedp, normalizep);
+ if (tem == 0)
+ delete_insns_since (last);
+ return tem;
+ }
+
+ /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
+ the constant zero. Reject all other comparisons at this point. Only
+ do LE and GT if branches are expensive since they are expensive on
+ 2-operand machines. */
+
+ if (BRANCH_COST == 0
+ || GET_MODE_CLASS (mode) != MODE_INT || op1 != const0_rtx
+ || (code != EQ && code != NE
+ && (BRANCH_COST <= 1 || (code != LE && code != GT))))
+ return 0;
+
+ /* See what we need to return. We can only return a 1, -1, or the
+ sign bit. */
+
+ if (normalizep == 0)
+ {
+ if (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
+ normalizep = STORE_FLAG_VALUE;
+
+ else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ && (STORE_FLAG_VALUE
+ == (HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))
+ ;
+ else
+ return 0;
+ }
+
+ /* Try to put the result of the comparison in the sign bit. Assume we can't
+ do the necessary operation below. */
+
+ tem = 0;
+
+ /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
+ the sign bit set. */
+
+ if (code == LE)
+ {
+ /* This is destructive, so SUBTARGET can't be OP0. */
+ if (rtx_equal_p (subtarget, op0))
+ subtarget = 0;
+
+ tem = expand_binop (mode, sub_optab, op0, const1_rtx, subtarget, 0,
+ OPTAB_WIDEN);
+ if (tem)
+ tem = expand_binop (mode, ior_optab, op0, tem, subtarget, 0,
+ OPTAB_WIDEN);
+ }
+
+ /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
+ number of bits in the mode of OP0, minus one. */
+
+ if (code == GT)
+ {
+ if (rtx_equal_p (subtarget, op0))
+ subtarget = 0;
+
+ tem = expand_shift (RSHIFT_EXPR, mode, op0,
+ size_int (GET_MODE_BITSIZE (mode) - 1),
+ subtarget, 0);
+ tem = expand_binop (mode, sub_optab, tem, op0, subtarget, 0,
+ OPTAB_WIDEN);
+ }
+
+ if (code == EQ || code == NE)
+ {
+ /* For EQ or NE, one way to do the comparison is to apply an operation
+ that converts the operand into a positive number if it is non-zero
+ or zero if it was originally zero. Then, for EQ, we subtract 1 and
+ for NE we negate. This puts the result in the sign bit. Then we
+ normalize with a shift, if needed.
+
+ Two operations that can do the above actions are ABS and FFS, so try
+ them. If that doesn't work, and MODE is smaller than a full word,
+ we can use zero-extension to the wider mode (an unsigned conversion)
+ as the operation. */
+
+ if (abs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
+ tem = expand_unop (mode, abs_optab, op0, subtarget, 1);
+ else if (ffs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
+ tem = expand_unop (mode, ffs_optab, op0, subtarget, 1);
+ else if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
+ {
+ op0 = protect_from_queue (op0, 0);
+ tem = convert_modes (word_mode, mode, op0, 1);
+ mode = word_mode;
+ }
+
+ if (tem != 0)
+ {
+ if (code == EQ)
+ tem = expand_binop (mode, sub_optab, tem, const1_rtx, subtarget,
+ 0, OPTAB_WIDEN);
+ else
+ tem = expand_unop (mode, neg_optab, tem, subtarget, 0);
+ }
+
+ /* If we couldn't do it that way, for NE we can "or" the two's complement
+ of the value with itself. For EQ, we take the one's complement of
+ that "or", which is an extra insn, so we only handle EQ if branches
+ are expensive. */
+
+ if (tem == 0 && (code == NE || BRANCH_COST > 1))
+ {
+ if (rtx_equal_p (subtarget, op0))
+ subtarget = 0;
+
+ tem = expand_unop (mode, neg_optab, op0, subtarget, 0);
+ tem = expand_binop (mode, ior_optab, tem, op0, subtarget, 0,
+ OPTAB_WIDEN);
+
+ if (tem && code == EQ)
+ tem = expand_unop (mode, one_cmpl_optab, tem, subtarget, 0);
+ }
+ }
+
+ if (tem && normalizep)
+ tem = expand_shift (RSHIFT_EXPR, mode, tem,
+ size_int (GET_MODE_BITSIZE (mode) - 1),
+ tem, normalizep == 1);
+
+ if (tem && GET_MODE (tem) != target_mode)
+ {
+ convert_move (target, tem, 0);
+ tem = target;
+ }
+
+ if (tem == 0)
+ delete_insns_since (last);
+
+ return tem;
+}
diff --git a/gnu/usr.bin/cc/cc_int/expr.c b/gnu/usr.bin/cc/cc_int/expr.c
new file mode 100644
index 0000000..e764986
--- /dev/null
+++ b/gnu/usr.bin/cc/cc_int/expr.c
@@ -0,0 +1,10192 @@
+/* Convert tree expression to rtl instructions, for GNU compiler.
+ Copyright (C) 1988, 1992, 1993, 1994 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+#include "config.h"
+#include "machmode.h"
+#include "rtl.h"
+#include "tree.h"
+#include "obstack.h"
+#include "flags.h"
+#include "regs.h"
+#include "function.h"
+#include "insn-flags.h"
+#include "insn-codes.h"
+#include "expr.h"
+#include "insn-config.h"
+#include "recog.h"
+#include "output.h"
+#include "typeclass.h"
+
+#include "bytecode.h"
+#include "bc-opcode.h"
+#include "bc-typecd.h"
+#include "bc-optab.h"
+#include "bc-emit.h"
+
+
+#define CEIL(x,y) (((x) + (y) - 1) / (y))
+
+/* Decide whether a function's arguments should be processed
+ from first to last or from last to first.
+
+ They should if the stack and args grow in opposite directions, but
+ only if we have push insns. */
+
+#ifdef PUSH_ROUNDING
+
+#if defined (STACK_GROWS_DOWNWARD) != defined (ARGS_GROW_DOWNWARD)
+#define PUSH_ARGS_REVERSED /* If it's last to first */
+#endif
+
+#endif
+
+#ifndef STACK_PUSH_CODE
+#ifdef STACK_GROWS_DOWNWARD
+#define STACK_PUSH_CODE PRE_DEC
+#else
+#define STACK_PUSH_CODE PRE_INC
+#endif
+#endif
+
+/* Like STACK_BOUNDARY but in units of bytes, not bits. */
+#define STACK_BYTES (STACK_BOUNDARY / BITS_PER_UNIT)
+
+/* If this is nonzero, we do not bother generating VOLATILE
+ around volatile memory references, and we are willing to
+ output indirect addresses. If cse is to follow, we reject
+ indirect addresses so a useful potential cse is generated;
+ if it is used only once, instruction combination will produce
+ the same indirect address eventually. */
+int cse_not_expected;
+
+/* Nonzero to generate code for all the subroutines within an
+ expression before generating the upper levels of the expression.
+ Nowadays this is never zero. */
+int do_preexpand_calls = 1;
+
+/* Number of units that we should eventually pop off the stack.
+ These are the arguments to function calls that have already returned. */
+int pending_stack_adjust;
+
+/* Nonzero means stack pops must not be deferred, and deferred stack
+ pops must not be output. It is nonzero inside a function call,
+ inside a conditional expression, inside a statement expression,
+ and in other cases as well. */
+int inhibit_defer_pop;
+
+/* A list of all cleanups which belong to the arguments of
+ function calls being expanded by expand_call. */
+tree cleanups_this_call;
+
+/* When temporaries are created by TARGET_EXPRs, they are created at
+ this level of temp_slot_level, so that they can remain allocated
+ until no longer needed. CLEANUP_POINT_EXPRs define the lifetime
+ of TARGET_EXPRs. */
+int target_temp_slot_level;
+
+/* Nonzero means __builtin_saveregs has already been done in this function.
+ The value is the pseudoreg containing the value __builtin_saveregs
+ returned. */
+static rtx saveregs_value;
+
+/* Similarly for __builtin_apply_args. */
+static rtx apply_args_value;
+
+/* This structure is used by move_by_pieces to describe the move to
+ be performed. */
+
+struct move_by_pieces
+{
+ rtx to;
+ rtx to_addr;
+ int autinc_to;
+ int explicit_inc_to;
+ rtx from;
+ rtx from_addr;
+ int autinc_from;
+ int explicit_inc_from;
+ int len;
+ int offset;
+ int reverse;
+};
+
+/* Used to generate bytecodes: keep track of size of local variables,
+ as well as depth of arithmetic stack. (Notice that variables are
+ stored on the machine's stack, not the arithmetic stack.) */
+
+extern int local_vars_size;
+extern int stack_depth;
+extern int max_stack_depth;
+extern struct obstack permanent_obstack;
+
+
+static rtx enqueue_insn PROTO((rtx, rtx));
+static int queued_subexp_p PROTO((rtx));
+static void init_queue PROTO((void));
+static void move_by_pieces PROTO((rtx, rtx, int, int));
+static int move_by_pieces_ninsns PROTO((unsigned int, int));
+static void move_by_pieces_1 PROTO((rtx (*) (), enum machine_mode,
+ struct move_by_pieces *));
+static void store_constructor PROTO((tree, rtx));
+static rtx store_field PROTO((rtx, int, int, enum machine_mode, tree,
+ enum machine_mode, int, int, int));
+static int get_inner_unaligned_p PROTO((tree));
+static tree save_noncopied_parts PROTO((tree, tree));
+static tree init_noncopied_parts PROTO((tree, tree));
+static int safe_from_p PROTO((rtx, tree));
+static int fixed_type_p PROTO((tree));
+static int get_pointer_alignment PROTO((tree, unsigned));
+static tree string_constant PROTO((tree, tree *));
+static tree c_strlen PROTO((tree));
+static rtx expand_builtin PROTO((tree, rtx, rtx,
+ enum machine_mode, int));
+static int apply_args_size PROTO((void));
+static int apply_result_size PROTO((void));
+static rtx result_vector PROTO((int, rtx));
+static rtx expand_builtin_apply_args PROTO((void));
+static rtx expand_builtin_apply PROTO((rtx, rtx, rtx));
+static void expand_builtin_return PROTO((rtx));
+static rtx expand_increment PROTO((tree, int));
+rtx bc_expand_increment PROTO((struct increment_operator *, tree));
+tree bc_runtime_type_code PROTO((tree));
+rtx bc_allocate_local PROTO((int, int));
+void bc_store_memory PROTO((tree, tree));
+tree bc_expand_component_address PROTO((tree));
+tree bc_expand_address PROTO((tree));
+void bc_expand_constructor PROTO((tree));
+void bc_adjust_stack PROTO((int));
+tree bc_canonicalize_array_ref PROTO((tree));
+void bc_load_memory PROTO((tree, tree));
+void bc_load_externaddr PROTO((rtx));
+void bc_load_externaddr_id PROTO((tree, int));
+void bc_load_localaddr PROTO((rtx));
+void bc_load_parmaddr PROTO((rtx));
+static void preexpand_calls PROTO((tree));
+static void do_jump_by_parts_greater PROTO((tree, int, rtx, rtx));
+static void do_jump_by_parts_greater_rtx PROTO((enum machine_mode, int, rtx, rtx, rtx, rtx));
+static void do_jump_by_parts_equality PROTO((tree, rtx, rtx));
+static void do_jump_by_parts_equality_rtx PROTO((rtx, rtx, rtx));
+static void do_jump_for_compare PROTO((rtx, rtx, rtx));
+static rtx compare PROTO((tree, enum rtx_code, enum rtx_code));
+static rtx do_store_flag PROTO((tree, rtx, enum machine_mode, int));
+static tree defer_cleanups_to PROTO((tree));
+extern void (*interim_eh_hook) PROTO((tree));
+
+/* Record for each mode whether we can move a register directly to or
+ from an object of that mode in memory. If we can't, we won't try
+ to use that mode directly when accessing a field of that mode. */
+
+static char direct_load[NUM_MACHINE_MODES];
+static char direct_store[NUM_MACHINE_MODES];
+
+/* MOVE_RATIO is the number of move instructions that is better than
+ a block move. */
+
+#ifndef MOVE_RATIO
+#if defined (HAVE_movstrqi) || defined (HAVE_movstrhi) || defined (HAVE_movstrsi) || defined (HAVE_movstrdi) || defined (HAVE_movstrti)
+#define MOVE_RATIO 2
+#else
+/* A value of around 6 would minimize code size; infinity would minimize
+ execution time. */
+#define MOVE_RATIO 15
+#endif
+#endif
+
+/* This array records the insn_code of insns to perform block moves. */
+enum insn_code movstr_optab[NUM_MACHINE_MODES];
+
+/* SLOW_UNALIGNED_ACCESS is non-zero if unaligned accesses are very slow. */
+
+#ifndef SLOW_UNALIGNED_ACCESS
+#define SLOW_UNALIGNED_ACCESS 0
+#endif
+
+/* Register mappings for target machines without register windows. */
+#ifndef INCOMING_REGNO
+#define INCOMING_REGNO(OUT) (OUT)
+#endif
+#ifndef OUTGOING_REGNO
+#define OUTGOING_REGNO(IN) (IN)
+#endif
+
+/* Maps used to convert modes to const, load, and store bytecodes. */
+enum bytecode_opcode mode_to_const_map[MAX_MACHINE_MODE];
+enum bytecode_opcode mode_to_load_map[MAX_MACHINE_MODE];
+enum bytecode_opcode mode_to_store_map[MAX_MACHINE_MODE];
+
+/* Initialize maps used to convert modes to const, load, and store
+ bytecodes. */
+void
+bc_init_mode_to_opcode_maps ()
+{
+ int mode;
+
+ for (mode = 0; mode < (int) MAX_MACHINE_MODE; mode++)
+ mode_to_const_map[mode] =
+ mode_to_load_map[mode] =
+ mode_to_store_map[mode] = neverneverland;
+
+#define DEF_MODEMAP(SYM, CODE, UCODE, CONST, LOAD, STORE) \
+ mode_to_const_map[(int) SYM] = CONST; \
+ mode_to_load_map[(int) SYM] = LOAD; \
+ mode_to_store_map[(int) SYM] = STORE;
+
+#include "modemap.def"
+#undef DEF_MODEMAP
+}
+
+/* This is run once per compilation to set up which modes can be used
+ directly in memory and to initialize the block move optab. */
+
+void
+init_expr_once ()
+{
+ rtx insn, pat;
+ enum machine_mode mode;
+ /* Try indexing by frame ptr and try by stack ptr.
+ It is known that on the Convex the stack ptr isn't a valid index.
+ With luck, one or the other is valid on any machine. */
+ rtx mem = gen_rtx (MEM, VOIDmode, stack_pointer_rtx);
+ rtx mem1 = gen_rtx (MEM, VOIDmode, frame_pointer_rtx);
+
+ start_sequence ();
+ insn = emit_insn (gen_rtx (SET, 0, 0));
+ pat = PATTERN (insn);
+
+ for (mode = VOIDmode; (int) mode < NUM_MACHINE_MODES;
+ mode = (enum machine_mode) ((int) mode + 1))
+ {
+ int regno;
+ rtx reg;
+ int num_clobbers;
+
+ direct_load[(int) mode] = direct_store[(int) mode] = 0;
+ PUT_MODE (mem, mode);
+ PUT_MODE (mem1, mode);
+
+ /* See if there is some register that can be used in this mode and
+ directly loaded or stored from memory. */
+
+ if (mode != VOIDmode && mode != BLKmode)
+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER
+ && (direct_load[(int) mode] == 0 || direct_store[(int) mode] == 0);
+ regno++)
+ {
+ if (! HARD_REGNO_MODE_OK (regno, mode))
+ continue;
+
+ reg = gen_rtx (REG, mode, regno);
+
+ SET_SRC (pat) = mem;
+ SET_DEST (pat) = reg;
+ if (recog (pat, insn, &num_clobbers) >= 0)
+ direct_load[(int) mode] = 1;
+
+ SET_SRC (pat) = mem1;
+ SET_DEST (pat) = reg;
+ if (recog (pat, insn, &num_clobbers) >= 0)
+ direct_load[(int) mode] = 1;
+
+ SET_SRC (pat) = reg;
+ SET_DEST (pat) = mem;
+ if (recog (pat, insn, &num_clobbers) >= 0)
+ direct_store[(int) mode] = 1;
+
+ SET_SRC (pat) = reg;
+ SET_DEST (pat) = mem1;
+ if (recog (pat, insn, &num_clobbers) >= 0)
+ direct_store[(int) mode] = 1;
+ }
+ }
+
+ end_sequence ();
+}
+
+/* This is run at the start of compiling a function. */
+
+void
+init_expr ()
+{
+ init_queue ();
+
+ pending_stack_adjust = 0;
+ inhibit_defer_pop = 0;
+ cleanups_this_call = 0;
+ saveregs_value = 0;
+ apply_args_value = 0;
+ forced_labels = 0;
+}
+
+/* Save all variables describing the current status into the structure *P.
+ This is used before starting a nested function. */
+
+void
+save_expr_status (p)
+ struct function *p;
+{
+ /* Instead of saving the postincrement queue, empty it. */
+ emit_queue ();
+
+ p->pending_stack_adjust = pending_stack_adjust;
+ p->inhibit_defer_pop = inhibit_defer_pop;
+ p->cleanups_this_call = cleanups_this_call;
+ p->saveregs_value = saveregs_value;
+ p->apply_args_value = apply_args_value;
+ p->forced_labels = forced_labels;
+
+ pending_stack_adjust = 0;
+ inhibit_defer_pop = 0;
+ cleanups_this_call = 0;
+ saveregs_value = 0;
+ apply_args_value = 0;
+ forced_labels = 0;
+}
+
+/* Restore all variables describing the current status from the structure *P.
+ This is used after a nested function. */
+
+void
+restore_expr_status (p)
+ struct function *p;
+{
+ pending_stack_adjust = p->pending_stack_adjust;
+ inhibit_defer_pop = p->inhibit_defer_pop;
+ cleanups_this_call = p->cleanups_this_call;
+ saveregs_value = p->saveregs_value;
+ apply_args_value = p->apply_args_value;
+ forced_labels = p->forced_labels;
+}
+
+/* Manage the queue of increment instructions to be output
+ for POSTINCREMENT_EXPR expressions, etc. */
+
+static rtx pending_chain;
+
+/* Queue up to increment (or change) VAR later. BODY says how:
+ BODY should be the same thing you would pass to emit_insn
+ to increment right away. It will go to emit_insn later on.
+
+ The value is a QUEUED expression to be used in place of VAR
+ where you want to guarantee the pre-incrementation value of VAR. */
+
+static rtx
+enqueue_insn (var, body)
+ rtx var, body;
+{
+ pending_chain = gen_rtx (QUEUED, GET_MODE (var),
+ var, NULL_RTX, NULL_RTX, body, pending_chain);
+ return pending_chain;
+}
+
+/* Use protect_from_queue to convert a QUEUED expression
+ into something that you can put immediately into an instruction.
+ If the queued incrementation has not happened yet,
+ protect_from_queue returns the variable itself.
+ If the incrementation has happened, protect_from_queue returns a temp
+ that contains a copy of the old value of the variable.
+
+ Any time an rtx which might possibly be a QUEUED is to be put
+ into an instruction, it must be passed through protect_from_queue first.
+ QUEUED expressions are not meaningful in instructions.
+
+ Do not pass a value through protect_from_queue and then hold
+ on to it for a while before putting it in an instruction!
+ If the queue is flushed in between, incorrect code will result. */
+
+rtx
+protect_from_queue (x, modify)
+ register rtx x;
+ int modify;
+{
+ register RTX_CODE code = GET_CODE (x);
+
+#if 0 /* A QUEUED can hang around after the queue is forced out. */
+ /* Shortcut for most common case. */
+ if (pending_chain == 0)
+ return x;
+#endif
+
+ if (code != QUEUED)
+ {
+ /* A special hack for read access to (MEM (QUEUED ...)) to facilitate
+ use of autoincrement. Make a copy of the contents of the memory
+ location rather than a copy of the address, but not if the value is
+ of mode BLKmode. Don't modify X in place since it might be
+ shared. */
+ if (code == MEM && GET_MODE (x) != BLKmode
+ && GET_CODE (XEXP (x, 0)) == QUEUED && !modify)
+ {
+ register rtx y = XEXP (x, 0);
+ register rtx new = gen_rtx (MEM, GET_MODE (x), QUEUED_VAR (y));
+
+ MEM_IN_STRUCT_P (new) = MEM_IN_STRUCT_P (x);
+ RTX_UNCHANGING_P (new) = RTX_UNCHANGING_P (x);
+ MEM_VOLATILE_P (new) = MEM_VOLATILE_P (x);
+
+ if (QUEUED_INSN (y))
+ {
+ register rtx temp = gen_reg_rtx (GET_MODE (new));
+ emit_insn_before (gen_move_insn (temp, new),
+ QUEUED_INSN (y));
+ return temp;
+ }
+ return new;
+ }
+ /* Otherwise, recursively protect the subexpressions of all
+ the kinds of rtx's that can contain a QUEUED. */
+ if (code == MEM)
+ {
+ rtx tem = protect_from_queue (XEXP (x, 0), 0);
+ if (tem != XEXP (x, 0))
+ {
+ x = copy_rtx (x);
+ XEXP (x, 0) = tem;
+ }
+ }
+ else if (code == PLUS || code == MULT)
+ {
+ rtx new0 = protect_from_queue (XEXP (x, 0), 0);
+ rtx new1 = protect_from_queue (XEXP (x, 1), 0);
+ if (new0 != XEXP (x, 0) || new1 != XEXP (x, 1))
+ {
+ x = copy_rtx (x);
+ XEXP (x, 0) = new0;
+ XEXP (x, 1) = new1;
+ }
+ }
+ return x;
+ }
+ /* If the increment has not happened, use the variable itself. */
+ if (QUEUED_INSN (x) == 0)
+ return QUEUED_VAR (x);
+ /* If the increment has happened and a pre-increment copy exists,
+ use that copy. */
+ if (QUEUED_COPY (x) != 0)
+ return QUEUED_COPY (x);
+ /* The increment has happened but we haven't set up a pre-increment copy.
+ Set one up now, and use it. */
+ QUEUED_COPY (x) = gen_reg_rtx (GET_MODE (QUEUED_VAR (x)));
+ emit_insn_before (gen_move_insn (QUEUED_COPY (x), QUEUED_VAR (x)),
+ QUEUED_INSN (x));
+ return QUEUED_COPY (x);
+}
+
+/* Return nonzero if X contains a QUEUED expression:
+ if it contains anything that will be altered by a queued increment.
+ We handle only combinations of MEM, PLUS, MINUS and MULT operators
+ since memory addresses generally contain only those. */
+
+static int
+queued_subexp_p (x)
+ rtx x;
+{
+ register enum rtx_code code = GET_CODE (x);
+ switch (code)
+ {
+ case QUEUED:
+ return 1;
+ case MEM:
+ return queued_subexp_p (XEXP (x, 0));
+ case MULT:
+ case PLUS:
+ case MINUS:
+ return queued_subexp_p (XEXP (x, 0))
+ || queued_subexp_p (XEXP (x, 1));
+ }
+ return 0;
+}
+
+/* Perform all the pending incrementations. */
+
+void
+emit_queue ()
+{
+ register rtx p;
+ while (p = pending_chain)
+ {
+ QUEUED_INSN (p) = emit_insn (QUEUED_BODY (p));
+ pending_chain = QUEUED_NEXT (p);
+ }
+}
+
+static void
+init_queue ()
+{
+ if (pending_chain)
+ abort ();
+}
+
+/* Copy data from FROM to TO, where the machine modes are not the same.
+ Both modes may be integer, or both may be floating.
+ UNSIGNEDP should be nonzero if FROM is an unsigned type.
+ This causes zero-extension instead of sign-extension. */
+
+void
+convert_move (to, from, unsignedp)
+ register rtx to, from;
+ int unsignedp;
+{
+ enum machine_mode to_mode = GET_MODE (to);
+ enum machine_mode from_mode = GET_MODE (from);
+ int to_real = GET_MODE_CLASS (to_mode) == MODE_FLOAT;
+ int from_real = GET_MODE_CLASS (from_mode) == MODE_FLOAT;
+ enum insn_code code;
+ rtx libcall;
+
+ /* rtx code for making an equivalent value. */
+ enum rtx_code equiv_code = (unsignedp ? ZERO_EXTEND : SIGN_EXTEND);
+
+ to = protect_from_queue (to, 1);
+ from = protect_from_queue (from, 0);
+
+ if (to_real != from_real)
+ abort ();
+
+ /* If FROM is a SUBREG that indicates that we have already done at least
+ the required extension, strip it. We don't handle such SUBREGs as
+ TO here. */
+
+ if (GET_CODE (from) == SUBREG && SUBREG_PROMOTED_VAR_P (from)
+ && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (from)))
+ >= GET_MODE_SIZE (to_mode))
+ && SUBREG_PROMOTED_UNSIGNED_P (from) == unsignedp)
+ from = gen_lowpart (to_mode, from), from_mode = to_mode;
+
+ if (GET_CODE (to) == SUBREG && SUBREG_PROMOTED_VAR_P (to))
+ abort ();
+
+ if (to_mode == from_mode
+ || (from_mode == VOIDmode && CONSTANT_P (from)))
+ {
+ emit_move_insn (to, from);
+ return;
+ }
+
+ if (to_real)
+ {
+ rtx value;
+
+#ifdef HAVE_extendqfhf2
+ if (HAVE_extendqfsf2 && from_mode == QFmode && to_mode == HFmode)
+ {
+ emit_unop_insn (CODE_FOR_extendqfsf2, to, from, UNKNOWN);
+ return;
+ }
+#endif
+#ifdef HAVE_extendqfsf2
+ if (HAVE_extendqfsf2 && from_mode == QFmode && to_mode == SFmode)
+ {
+ emit_unop_insn (CODE_FOR_extendqfsf2, to, from, UNKNOWN);
+ return;
+ }
+#endif
+#ifdef HAVE_extendqfdf2
+ if (HAVE_extendqfdf2 && from_mode == QFmode && to_mode == DFmode)
+ {
+ emit_unop_insn (CODE_FOR_extendqfdf2, to, from, UNKNOWN);
+ return;
+ }
+#endif
+#ifdef HAVE_extendqfxf2
+ if (HAVE_extendqfxf2 && from_mode == QFmode && to_mode == XFmode)
+ {
+ emit_unop_insn (CODE_FOR_extendqfxf2, to, from, UNKNOWN);
+ return;
+ }
+#endif
+#ifdef HAVE_extendqftf2
+ if (HAVE_extendqftf2 && from_mode == QFmode && to_mode == TFmode)
+ {
+ emit_unop_insn (CODE_FOR_extendqftf2, to, from, UNKNOWN);
+ return;
+ }
+#endif
+
+#ifdef HAVE_extendhftqf2
+ if (HAVE_extendhftqf2 && from_mode == HFmode && to_mode == TQFmode)
+ {
+ emit_unop_insn (CODE_FOR_extendhftqf2, to, from, UNKNOWN);
+ return;
+ }
+#endif
+
+#ifdef HAVE_extendhfsf2
+ if (HAVE_extendhfsf2 && from_mode == HFmode && to_mode == SFmode)
+ {
+ emit_unop_insn (CODE_FOR_extendhfsf2, to, from, UNKNOWN);
+ return;
+ }
+#endif
+#ifdef HAVE_extendhfdf2
+ if (HAVE_extendhfdf2 && from_mode == HFmode && to_mode == DFmode)
+ {
+ emit_unop_insn (CODE_FOR_extendhfdf2, to, from, UNKNOWN);
+ return;
+ }
+#endif
+#ifdef HAVE_extendhfxf2
+ if (HAVE_extendhfxf2 && from_mode == HFmode && to_mode == XFmode)
+ {
+ emit_unop_insn (CODE_FOR_extendhfxf2, to, from, UNKNOWN);
+ return;
+ }
+#endif
+#ifdef HAVE_extendhftf2
+ if (HAVE_extendhftf2 && from_mode == HFmode && to_mode == TFmode)
+ {
+ emit_unop_insn (CODE_FOR_extendhftf2, to, from, UNKNOWN);
+ return;
+ }
+#endif
+
+#ifdef HAVE_extendsfdf2
+ if (HAVE_extendsfdf2 && from_mode == SFmode && to_mode == DFmode)
+ {
+ emit_unop_insn (CODE_FOR_extendsfdf2, to, from, UNKNOWN);
+ return;
+ }
+#endif
+#ifdef HAVE_extendsfxf2
+ if (HAVE_extendsfxf2 && from_mode == SFmode && to_mode == XFmode)
+ {
+ emit_unop_insn (CODE_FOR_extendsfxf2, to, from, UNKNOWN);
+ return;
+ }
+#endif
+#ifdef HAVE_extendsftf2
+ if (HAVE_extendsftf2 && from_mode == SFmode && to_mode == TFmode)
+ {
+ emit_unop_insn (CODE_FOR_extendsftf2, to, from, UNKNOWN);
+ return;
+ }
+#endif
+#ifdef HAVE_extenddfxf2
+ if (HAVE_extenddfxf2 && from_mode == DFmode && to_mode == XFmode)
+ {
+ emit_unop_insn (CODE_FOR_extenddfxf2, to, from, UNKNOWN);
+ return;
+ }
+#endif
+#ifdef HAVE_extenddftf2
+ if (HAVE_extenddftf2 && from_mode == DFmode && to_mode == TFmode)
+ {
+ emit_unop_insn (CODE_FOR_extenddftf2, to, from, UNKNOWN);
+ return;
+ }
+#endif
+
+#ifdef HAVE_trunchfqf2
+ if (HAVE_trunchfqf2 && from_mode == HFmode && to_mode == QFmode)
+ {
+ emit_unop_insn (CODE_FOR_trunchfqf2, to, from, UNKNOWN);
+ return;
+ }
+#endif
+#ifdef HAVE_truncsfqf2
+ if (HAVE_truncsfqf2 && from_mode == SFmode && to_mode == QFmode)
+ {
+ emit_unop_insn (CODE_FOR_truncsfqf2, to, from, UNKNOWN);
+ return;
+ }
+#endif
+#ifdef HAVE_truncdfqf2
+ if (HAVE_truncdfqf2 && from_mode == DFmode && to_mode == QFmode)
+ {
+ emit_unop_insn (CODE_FOR_truncdfqf2, to, from, UNKNOWN);
+ return;
+ }
+#endif
+#ifdef HAVE_truncxfqf2
+ if (HAVE_truncxfqf2 && from_mode == XFmode && to_mode == QFmode)
+ {
+ emit_unop_insn (CODE_FOR_truncxfqf2, to, from, UNKNOWN);
+ return;
+ }
+#endif
+#ifdef HAVE_trunctfqf2
+ if (HAVE_trunctfqf2 && from_mode == TFmode && to_mode == QFmode)
+ {
+ emit_unop_insn (CODE_FOR_trunctfqf2, to, from, UNKNOWN);
+ return;
+ }
+#endif
+
+#ifdef HAVE_trunctqfhf2
+ if (HAVE_trunctqfhf2 && from_mode == TQFmode && to_mode == HFmode)
+ {
+ emit_unop_insn (CODE_FOR_trunctqfhf2, to, from, UNKNOWN);
+ return;
+ }
+#endif
+#ifdef HAVE_truncsfhf2
+ if (HAVE_truncsfhf2 && from_mode == SFmode && to_mode == HFmode)
+ {
+ emit_unop_insn (CODE_FOR_truncsfhf2, to, from, UNKNOWN);
+ return;
+ }
+#endif
+#ifdef HAVE_truncdfhf2
+ if (HAVE_truncdfhf2 && from_mode == DFmode && to_mode == HFmode)
+ {
+ emit_unop_insn (CODE_FOR_truncdfhf2, to, from, UNKNOWN);
+ return;
+ }
+#endif
+#ifdef HAVE_truncxfhf2
+ if (HAVE_truncxfhf2 && from_mode == XFmode && to_mode == HFmode)
+ {
+ emit_unop_insn (CODE_FOR_truncxfhf2, to, from, UNKNOWN);
+ return;
+ }
+#endif
+#ifdef HAVE_trunctfhf2
+ if (HAVE_trunctfhf2 && from_mode == TFmode && to_mode == HFmode)
+ {
+ emit_unop_insn (CODE_FOR_trunctfhf2, to, from, UNKNOWN);
+ return;
+ }
+#endif
+#ifdef HAVE_truncdfsf2
+ if (HAVE_truncdfsf2 && from_mode == DFmode && to_mode == SFmode)
+ {
+ emit_unop_insn (CODE_FOR_truncdfsf2, to, from, UNKNOWN);
+ return;
+ }
+#endif
+#ifdef HAVE_truncxfsf2
+ if (HAVE_truncxfsf2 && from_mode == XFmode && to_mode == SFmode)
+ {
+ emit_unop_insn (CODE_FOR_truncxfsf2, to, from, UNKNOWN);
+ return;
+ }
+#endif
+#ifdef HAVE_trunctfsf2
+ if (HAVE_trunctfsf2 && from_mode == TFmode && to_mode == SFmode)
+ {
+ emit_unop_insn (CODE_FOR_trunctfsf2, to, from, UNKNOWN);
+ return;
+ }
+#endif
+#ifdef HAVE_truncxfdf2
+ if (HAVE_truncxfdf2 && from_mode == XFmode && to_mode == DFmode)
+ {
+ emit_unop_insn (CODE_FOR_truncxfdf2, to, from, UNKNOWN);
+ return;
+ }
+#endif
+#ifdef HAVE_trunctfdf2
+ if (HAVE_trunctfdf2 && from_mode == TFmode && to_mode == DFmode)
+ {
+ emit_unop_insn (CODE_FOR_trunctfdf2, to, from, UNKNOWN);
+ return;
+ }
+#endif
+
+ libcall = (rtx) 0;
+ switch (from_mode)
+ {
+ case SFmode:
+ switch (to_mode)
+ {
+ case DFmode:
+ libcall = extendsfdf2_libfunc;
+ break;
+
+ case XFmode:
+ libcall = extendsfxf2_libfunc;
+ break;
+
+ case TFmode:
+ libcall = extendsftf2_libfunc;
+ break;
+ }
+ break;
+
+ case DFmode:
+ switch (to_mode)
+ {
+ case SFmode:
+ libcall = truncdfsf2_libfunc;
+ break;
+
+ case XFmode:
+ libcall = extenddfxf2_libfunc;
+ break;
+
+ case TFmode:
+ libcall = extenddftf2_libfunc;
+ break;
+ }
+ break;
+
+ case XFmode:
+ switch (to_mode)
+ {
+ case SFmode:
+ libcall = truncxfsf2_libfunc;
+ break;
+
+ case DFmode:
+ libcall = truncxfdf2_libfunc;
+ break;
+ }
+ break;
+
+ case TFmode:
+ switch (to_mode)
+ {
+ case SFmode:
+ libcall = trunctfsf2_libfunc;
+ break;
+
+ case DFmode:
+ libcall = trunctfdf2_libfunc;
+ break;
+ }
+ break;
+ }
+
+ if (libcall == (rtx) 0)
+ /* This conversion is not implemented yet. */
+ abort ();
+
+ value = emit_library_call_value (libcall, NULL_RTX, 1, to_mode,
+ 1, from, from_mode);
+ emit_move_insn (to, value);
+ return;
+ }
+
+ /* Now both modes are integers. */
+
+ /* Handle expanding beyond a word. */
+ if (GET_MODE_BITSIZE (from_mode) < GET_MODE_BITSIZE (to_mode)
+ && GET_MODE_BITSIZE (to_mode) > BITS_PER_WORD)
+ {
+ rtx insns;
+ rtx lowpart;
+ rtx fill_value;
+ rtx lowfrom;
+ int i;
+ enum machine_mode lowpart_mode;
+ int nwords = CEIL (GET_MODE_SIZE (to_mode), UNITS_PER_WORD);
+
+ /* Try converting directly if the insn is supported. */
+ if ((code = can_extend_p (to_mode, from_mode, unsignedp))
+ != CODE_FOR_nothing)
+ {
+ /* If FROM is a SUBREG, put it into a register. Do this
+ so that we always generate the same set of insns for
+ better cse'ing; if an intermediate assignment occurred,
+ we won't be doing the operation directly on the SUBREG. */
+ if (optimize > 0 && GET_CODE (from) == SUBREG)
+ from = force_reg (from_mode, from);
+ emit_unop_insn (code, to, from, equiv_code);
+ return;
+ }
+ /* Next, try converting via full word. */
+ else if (GET_MODE_BITSIZE (from_mode) < BITS_PER_WORD
+ && ((code = can_extend_p (to_mode, word_mode, unsignedp))
+ != CODE_FOR_nothing))
+ {
+ if (GET_CODE (to) == REG)
+ emit_insn (gen_rtx (CLOBBER, VOIDmode, to));
+ convert_move (gen_lowpart (word_mode, to), from, unsignedp);
+ emit_unop_insn (code, to,
+ gen_lowpart (word_mode, to), equiv_code);
+ return;
+ }
+
+ /* No special multiword conversion insn; do it by hand. */
+ start_sequence ();
+
+ /* Get a copy of FROM widened to a word, if necessary. */
+ if (GET_MODE_BITSIZE (from_mode) < BITS_PER_WORD)
+ lowpart_mode = word_mode;
+ else
+ lowpart_mode = from_mode;
+
+ lowfrom = convert_to_mode (lowpart_mode, from, unsignedp);
+
+ lowpart = gen_lowpart (lowpart_mode, to);
+ emit_move_insn (lowpart, lowfrom);
+
+ /* Compute the value to put in each remaining word. */
+ if (unsignedp)
+ fill_value = const0_rtx;
+ else
+ {
+#ifdef HAVE_slt
+ if (HAVE_slt
+ && insn_operand_mode[(int) CODE_FOR_slt][0] == word_mode
+ && STORE_FLAG_VALUE == -1)
+ {
+ emit_cmp_insn (lowfrom, const0_rtx, NE, NULL_RTX,
+ lowpart_mode, 0, 0);
+ fill_value = gen_reg_rtx (word_mode);
+ emit_insn (gen_slt (fill_value));
+ }
+ else
+#endif
+ {
+ fill_value
+ = expand_shift (RSHIFT_EXPR, lowpart_mode, lowfrom,
+ size_int (GET_MODE_BITSIZE (lowpart_mode) - 1),
+ NULL_RTX, 0);
+ fill_value = convert_to_mode (word_mode, fill_value, 1);
+ }
+ }
+
+ /* Fill the remaining words. */
+ for (i = GET_MODE_SIZE (lowpart_mode) / UNITS_PER_WORD; i < nwords; i++)
+ {
+ int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
+ rtx subword = operand_subword (to, index, 1, to_mode);
+
+ if (subword == 0)
+ abort ();
+
+ if (fill_value != subword)
+ emit_move_insn (subword, fill_value);
+ }
+
+ insns = get_insns ();
+ end_sequence ();
+
+ emit_no_conflict_block (insns, to, from, NULL_RTX,
+ gen_rtx (equiv_code, to_mode, copy_rtx (from)));
+ return;
+ }
+
+ /* Truncating multi-word to a word or less. */
+ if (GET_MODE_BITSIZE (from_mode) > BITS_PER_WORD
+ && GET_MODE_BITSIZE (to_mode) <= BITS_PER_WORD)
+ {
+ if (!((GET_CODE (from) == MEM
+ && ! MEM_VOLATILE_P (from)
+ && direct_load[(int) to_mode]
+ && ! mode_dependent_address_p (XEXP (from, 0)))
+ || GET_CODE (from) == REG
+ || GET_CODE (from) == SUBREG))
+ from = force_reg (from_mode, from);
+ convert_move (to, gen_lowpart (word_mode, from), 0);
+ return;
+ }
+
+ /* Handle pointer conversion */ /* SPEE 900220 */
+ if (to_mode == PSImode)
+ {
+ if (from_mode != SImode)
+ from = convert_to_mode (SImode, from, unsignedp);
+
+#ifdef HAVE_truncsipsi2
+ if (HAVE_truncsipsi2)
+ {
+ emit_unop_insn (CODE_FOR_truncsipsi2, to, from, UNKNOWN);
+ return;
+ }
+#endif /* HAVE_truncsipsi2 */
+ abort ();
+ }
+
+ if (from_mode == PSImode)
+ {
+ if (to_mode != SImode)
+ {
+ from = convert_to_mode (SImode, from, unsignedp);
+ from_mode = SImode;
+ }
+ else
+ {
+#ifdef HAVE_extendpsisi2
+ if (HAVE_extendpsisi2)
+ {
+ emit_unop_insn (CODE_FOR_extendpsisi2, to, from, UNKNOWN);
+ return;
+ }
+#endif /* HAVE_extendpsisi2 */
+ abort ();
+ }
+ }
+
+ /* Now follow all the conversions between integers
+ no more than a word long. */
+
+ /* For truncation, usually we can just refer to FROM in a narrower mode. */
+ if (GET_MODE_BITSIZE (to_mode) < GET_MODE_BITSIZE (from_mode)
+ && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (to_mode),
+ GET_MODE_BITSIZE (from_mode)))
+ {
+ if (!((GET_CODE (from) == MEM
+ && ! MEM_VOLATILE_P (from)
+ && direct_load[(int) to_mode]
+ && ! mode_dependent_address_p (XEXP (from, 0)))
+ || GET_CODE (from) == REG
+ || GET_CODE (from) == SUBREG))
+ from = force_reg (from_mode, from);
+ emit_move_insn (to, gen_lowpart (to_mode, from));
+ return;
+ }
+
+ /* Handle extension. */
+ if (GET_MODE_BITSIZE (to_mode) > GET_MODE_BITSIZE (from_mode))
+ {
+ /* Convert directly if that works. */
+ if ((code = can_extend_p (to_mode, from_mode, unsignedp))
+ != CODE_FOR_nothing)
+ {
+ /* If FROM is a SUBREG, put it into a register. Do this
+ so that we always generate the same set of insns for
+ better cse'ing; if an intermediate assignment occurred,
+ we won't be doing the operation directly on the SUBREG. */
+ if (optimize > 0 && GET_CODE (from) == SUBREG)
+ from = force_reg (from_mode, from);
+ emit_unop_insn (code, to, from, equiv_code);
+ return;
+ }
+ else
+ {
+ enum machine_mode intermediate;
+
+ /* Search for a mode to convert via. */
+ for (intermediate = from_mode; intermediate != VOIDmode;
+ intermediate = GET_MODE_WIDER_MODE (intermediate))
+ if (((can_extend_p (to_mode, intermediate, unsignedp)
+ != CODE_FOR_nothing)
+ || (GET_MODE_SIZE (to_mode) < GET_MODE_SIZE (intermediate)
+ && TRULY_NOOP_TRUNCATION (to_mode, intermediate)))
+ && (can_extend_p (intermediate, from_mode, unsignedp)
+ != CODE_FOR_nothing))
+ {
+ convert_move (to, convert_to_mode (intermediate, from,
+ unsignedp), unsignedp);
+ return;
+ }
+
+ /* No suitable intermediate mode. */
+ abort ();
+ }
+ }
+
+ /* Support special truncate insns for certain modes. */
+
+ if (from_mode == DImode && to_mode == SImode)
+ {
+#ifdef HAVE_truncdisi2
+ if (HAVE_truncdisi2)
+ {
+ emit_unop_insn (CODE_FOR_truncdisi2, to, from, UNKNOWN);
+ return;
+ }
+#endif
+ convert_move (to, force_reg (from_mode, from), unsignedp);
+ return;
+ }
+
+ if (from_mode == DImode && to_mode == HImode)
+ {
+#ifdef HAVE_truncdihi2
+ if (HAVE_truncdihi2)
+ {
+ emit_unop_insn (CODE_FOR_truncdihi2, to, from, UNKNOWN);
+ return;
+ }
+#endif
+ convert_move (to, force_reg (from_mode, from), unsignedp);
+ return;
+ }
+
+ if (from_mode == DImode && to_mode == QImode)
+ {
+#ifdef HAVE_truncdiqi2
+ if (HAVE_truncdiqi2)
+ {
+ emit_unop_insn (CODE_FOR_truncdiqi2, to, from, UNKNOWN);
+ return;
+ }
+#endif
+ convert_move (to, force_reg (from_mode, from), unsignedp);
+ return;
+ }
+
+ if (from_mode == SImode && to_mode == HImode)
+ {
+#ifdef HAVE_truncsihi2
+ if (HAVE_truncsihi2)
+ {
+ emit_unop_insn (CODE_FOR_truncsihi2, to, from, UNKNOWN);
+ return;
+ }
+#endif
+ convert_move (to, force_reg (from_mode, from), unsignedp);
+ return;
+ }
+
+ if (from_mode == SImode && to_mode == QImode)
+ {
+#ifdef HAVE_truncsiqi2
+ if (HAVE_truncsiqi2)
+ {
+ emit_unop_insn (CODE_FOR_truncsiqi2, to, from, UNKNOWN);
+ return;
+ }
+#endif
+ convert_move (to, force_reg (from_mode, from), unsignedp);
+ return;
+ }
+
+ if (from_mode == HImode && to_mode == QImode)
+ {
+#ifdef HAVE_trunchiqi2
+ if (HAVE_trunchiqi2)
+ {
+ emit_unop_insn (CODE_FOR_trunchiqi2, to, from, UNKNOWN);
+ return;
+ }
+#endif
+ convert_move (to, force_reg (from_mode, from), unsignedp);
+ return;
+ }
+
+ /* Handle truncation of volatile memrefs, and so on;
+ the things that couldn't be truncated directly,
+ and for which there was no special instruction. */
+ if (GET_MODE_BITSIZE (to_mode) < GET_MODE_BITSIZE (from_mode))
+ {
+ rtx temp = force_reg (to_mode, gen_lowpart (to_mode, from));
+ emit_move_insn (to, temp);
+ return;
+ }
+
+ /* Mode combination is not recognized. */
+ abort ();
+}
+
+/* Return an rtx for a value that would result
+ from converting X to mode MODE.
+ Both X and MODE may be floating, or both integer.
+ UNSIGNEDP is nonzero if X is an unsigned value.
+ This can be done by referring to a part of X in place
+ or by copying to a new temporary with conversion.
+
+ This function *must not* call protect_from_queue
+ except when putting X into an insn (in which case convert_move does it). */
+
+rtx
+convert_to_mode (mode, x, unsignedp)
+ enum machine_mode mode;
+ rtx x;
+ int unsignedp;
+{
+ return convert_modes (mode, VOIDmode, x, unsignedp);
+}
+
+/* Return an rtx for a value that would result
+ from converting X from mode OLDMODE to mode MODE.
+ Both modes may be floating, or both integer.
+ UNSIGNEDP is nonzero if X is an unsigned value.
+
+ This can be done by referring to a part of X in place
+ or by copying to a new temporary with conversion.
+
+ You can give VOIDmode for OLDMODE, if you are sure X has a nonvoid mode.
+
+ This function *must not* call protect_from_queue
+ except when putting X into an insn (in which case convert_move does it). */
+
+rtx
+convert_modes (mode, oldmode, x, unsignedp)
+ enum machine_mode mode, oldmode;
+ rtx x;
+ int unsignedp;
+{
+ register rtx temp;
+
+ /* If FROM is a SUBREG that indicates that we have already done at least
+ the required extension, strip it. */
+
+ if (GET_CODE (x) == SUBREG && SUBREG_PROMOTED_VAR_P (x)
+ && GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))) >= GET_MODE_SIZE (mode)
+ && SUBREG_PROMOTED_UNSIGNED_P (x) == unsignedp)
+ x = gen_lowpart (mode, x);
+
+ if (GET_MODE (x) != VOIDmode)
+ oldmode = GET_MODE (x);
+
+ if (mode == oldmode)
+ return x;
+
+ /* There is one case that we must handle specially: If we are converting
+ a CONST_INT into a mode whose size is twice HOST_BITS_PER_WIDE_INT and
+ we are to interpret the constant as unsigned, gen_lowpart will do
+ the wrong if the constant appears negative. What we want to do is
+ make the high-order word of the constant zero, not all ones. */
+
+ if (unsignedp && GET_MODE_CLASS (mode) == MODE_INT
+ && GET_MODE_BITSIZE (mode) == 2 * HOST_BITS_PER_WIDE_INT
+ && GET_CODE (x) == CONST_INT && INTVAL (x) < 0)
+ return immed_double_const (INTVAL (x), (HOST_WIDE_INT) 0, mode);
+
+ /* We can do this with a gen_lowpart if both desired and current modes
+ are integer, and this is either a constant integer, a register, or a
+ non-volatile MEM. Except for the constant case where MODE is no
+ wider than HOST_BITS_PER_WIDE_INT, we must be narrowing the operand. */
+
+ if ((GET_CODE (x) == CONST_INT
+ && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
+ || (GET_MODE_CLASS (mode) == MODE_INT
+ && GET_MODE_CLASS (oldmode) == MODE_INT
+ && (GET_CODE (x) == CONST_DOUBLE
+ || (GET_MODE_SIZE (mode) <= GET_MODE_SIZE (oldmode)
+ && ((GET_CODE (x) == MEM && ! MEM_VOLATILE_P (x)
+ && direct_load[(int) mode])
+ || (GET_CODE (x) == REG
+ && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
+ GET_MODE_BITSIZE (GET_MODE (x)))))))))
+ {
+ /* ?? If we don't know OLDMODE, we have to assume here that
+ X does not need sign- or zero-extension. This may not be
+ the case, but it's the best we can do. */
+ if (GET_CODE (x) == CONST_INT && oldmode != VOIDmode
+ && GET_MODE_SIZE (mode) > GET_MODE_SIZE (oldmode))
+ {
+ HOST_WIDE_INT val = INTVAL (x);
+ int width = GET_MODE_BITSIZE (oldmode);
+
+ /* We must sign or zero-extend in this case. Start by
+ zero-extending, then sign extend if we need to. */
+ val &= ((HOST_WIDE_INT) 1 << width) - 1;
+ if (! unsignedp
+ && (val & ((HOST_WIDE_INT) 1 << (width - 1))))
+ val |= (HOST_WIDE_INT) (-1) << width;
+
+ return GEN_INT (val);
+ }
+
+ return gen_lowpart (mode, x);
+ }
+
+ temp = gen_reg_rtx (mode);
+ convert_move (temp, x, unsignedp);
+ return temp;
+}
+
+/* Generate several move instructions to copy LEN bytes
+ from block FROM to block TO. (These are MEM rtx's with BLKmode).
+ The caller must pass FROM and TO
+ through protect_from_queue before calling.
+ ALIGN (in bytes) is maximum alignment we can assume. */
+
+static void
+move_by_pieces (to, from, len, align)
+ rtx to, from;
+ int len, align;
+{
+ struct move_by_pieces data;
+ rtx to_addr = XEXP (to, 0), from_addr = XEXP (from, 0);
+ int max_size = MOVE_MAX + 1;
+
+ data.offset = 0;
+ data.to_addr = to_addr;
+ data.from_addr = from_addr;
+ data.to = to;
+ data.from = from;
+ data.autinc_to
+ = (GET_CODE (to_addr) == PRE_INC || GET_CODE (to_addr) == PRE_DEC
+ || GET_CODE (to_addr) == POST_INC || GET_CODE (to_addr) == POST_DEC);
+ data.autinc_from
+ = (GET_CODE (from_addr) == PRE_INC || GET_CODE (from_addr) == PRE_DEC
+ || GET_CODE (from_addr) == POST_INC
+ || GET_CODE (from_addr) == POST_DEC);
+
+ data.explicit_inc_from = 0;
+ data.explicit_inc_to = 0;
+ data.reverse
+ = (GET_CODE (to_addr) == PRE_DEC || GET_CODE (to_addr) == POST_DEC);
+ if (data.reverse) data.offset = len;
+ data.len = len;
+
+ /* If copying requires more than two move insns,
+ copy addresses to registers (to make displacements shorter)
+ and use post-increment if available. */
+ if (!(data.autinc_from && data.autinc_to)
+ && move_by_pieces_ninsns (len, align) > 2)
+ {
+#ifdef HAVE_PRE_DECREMENT
+ if (data.reverse && ! data.autinc_from)
+ {
+ data.from_addr = copy_addr_to_reg (plus_constant (from_addr, len));
+ data.autinc_from = 1;
+ data.explicit_inc_from = -1;
+ }
+#endif
+#ifdef HAVE_POST_INCREMENT
+ if (! data.autinc_from)
+ {
+ data.from_addr = copy_addr_to_reg (from_addr);
+ data.autinc_from = 1;
+ data.explicit_inc_from = 1;
+ }
+#endif
+ if (!data.autinc_from && CONSTANT_P (from_addr))
+ data.from_addr = copy_addr_to_reg (from_addr);
+#ifdef HAVE_PRE_DECREMENT
+ if (data.reverse && ! data.autinc_to)
+ {
+ data.to_addr = copy_addr_to_reg (plus_constant (to_addr, len));
+ data.autinc_to = 1;
+ data.explicit_inc_to = -1;
+ }
+#endif
+#ifdef HAVE_POST_INCREMENT
+ if (! data.reverse && ! data.autinc_to)
+ {
+ data.to_addr = copy_addr_to_reg (to_addr);
+ data.autinc_to = 1;
+ data.explicit_inc_to = 1;
+ }
+#endif
+ if (!data.autinc_to && CONSTANT_P (to_addr))
+ data.to_addr = copy_addr_to_reg (to_addr);
+ }
+
+ if (! (STRICT_ALIGNMENT || SLOW_UNALIGNED_ACCESS)
+ || align > MOVE_MAX || align >= BIGGEST_ALIGNMENT / BITS_PER_UNIT)
+ align = MOVE_MAX;
+
+ /* First move what we can in the largest integer mode, then go to
+ successively smaller modes. */
+
+ while (max_size > 1)
+ {
+ enum machine_mode mode = VOIDmode, tmode;
+ enum insn_code icode;
+
+ for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT);
+ tmode != VOIDmode; tmode = GET_MODE_WIDER_MODE (tmode))
+ if (GET_MODE_SIZE (tmode) < max_size)
+ mode = tmode;
+
+ if (mode == VOIDmode)
+ break;
+
+ icode = mov_optab->handlers[(int) mode].insn_code;
+ if (icode != CODE_FOR_nothing
+ && align >= MIN (BIGGEST_ALIGNMENT / BITS_PER_UNIT,
+ GET_MODE_SIZE (mode)))
+ move_by_pieces_1 (GEN_FCN (icode), mode, &data);
+
+ max_size = GET_MODE_SIZE (mode);
+ }
+
+ /* The code above should have handled everything. */
+ if (data.len != 0)
+ abort ();
+}
+
+/* Return number of insns required to move L bytes by pieces.
+ ALIGN (in bytes) is maximum alignment we can assume. */
+
+static int
+move_by_pieces_ninsns (l, align)
+ unsigned int l;
+ int align;
+{
+ register int n_insns = 0;
+ int max_size = MOVE_MAX + 1;
+
+ if (! (STRICT_ALIGNMENT || SLOW_UNALIGNED_ACCESS)
+ || align > MOVE_MAX || align >= BIGGEST_ALIGNMENT / BITS_PER_UNIT)
+ align = MOVE_MAX;
+
+ while (max_size > 1)
+ {
+ enum machine_mode mode = VOIDmode, tmode;
+ enum insn_code icode;
+
+ for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT);
+ tmode != VOIDmode; tmode = GET_MODE_WIDER_MODE (tmode))
+ if (GET_MODE_SIZE (tmode) < max_size)
+ mode = tmode;
+
+ if (mode == VOIDmode)
+ break;
+
+ icode = mov_optab->handlers[(int) mode].insn_code;
+ if (icode != CODE_FOR_nothing
+ && align >= MIN (BIGGEST_ALIGNMENT / BITS_PER_UNIT,
+ GET_MODE_SIZE (mode)))
+ n_insns += l / GET_MODE_SIZE (mode), l %= GET_MODE_SIZE (mode);
+
+ max_size = GET_MODE_SIZE (mode);
+ }
+
+ return n_insns;
+}
+
+/* Subroutine of move_by_pieces. Move as many bytes as appropriate
+ with move instructions for mode MODE. GENFUN is the gen_... function
+ to make a move insn for that mode. DATA has all the other info. */
+
+static void
+move_by_pieces_1 (genfun, mode, data)
+ rtx (*genfun) ();
+ enum machine_mode mode;
+ struct move_by_pieces *data;
+{
+ register int size = GET_MODE_SIZE (mode);
+ register rtx to1, from1;
+
+ while (data->len >= size)
+ {
+ if (data->reverse) data->offset -= size;
+
+ to1 = (data->autinc_to
+ ? gen_rtx (MEM, mode, data->to_addr)
+ : change_address (data->to, mode,
+ plus_constant (data->to_addr, data->offset)));
+ from1 =
+ (data->autinc_from
+ ? gen_rtx (MEM, mode, data->from_addr)
+ : change_address (data->from, mode,
+ plus_constant (data->from_addr, data->offset)));
+
+#ifdef HAVE_PRE_DECREMENT
+ if (data->explicit_inc_to < 0)
+ emit_insn (gen_add2_insn (data->to_addr, GEN_INT (-size)));
+ if (data->explicit_inc_from < 0)
+ emit_insn (gen_add2_insn (data->from_addr, GEN_INT (-size)));
+#endif
+
+ emit_insn ((*genfun) (to1, from1));
+#ifdef HAVE_POST_INCREMENT
+ if (data->explicit_inc_to > 0)
+ emit_insn (gen_add2_insn (data->to_addr, GEN_INT (size)));
+ if (data->explicit_inc_from > 0)
+ emit_insn (gen_add2_insn (data->from_addr, GEN_INT (size)));
+#endif
+
+ if (! data->reverse) data->offset += size;
+
+ data->len -= size;
+ }
+}
+
+/* Emit code to move a block Y to a block X.
+ This may be done with string-move instructions,
+ with multiple scalar move instructions, or with a library call.
+
+ Both X and Y must be MEM rtx's (perhaps inside VOLATILE)
+ with mode BLKmode.
+ SIZE is an rtx that says how long they are.
+ ALIGN is the maximum alignment we can assume they have,
+ measured in bytes. */
+
+void
+emit_block_move (x, y, size, align)
+ rtx x, y;
+ rtx size;
+ int align;
+{
+ if (GET_MODE (x) != BLKmode)
+ abort ();
+
+ if (GET_MODE (y) != BLKmode)
+ abort ();
+
+ x = protect_from_queue (x, 1);
+ y = protect_from_queue (y, 0);
+ size = protect_from_queue (size, 0);
+
+ if (GET_CODE (x) != MEM)
+ abort ();
+ if (GET_CODE (y) != MEM)
+ abort ();
+ if (size == 0)
+ abort ();
+
+ if (GET_CODE (size) == CONST_INT
+ && (move_by_pieces_ninsns (INTVAL (size), align) < MOVE_RATIO))
+ move_by_pieces (x, y, INTVAL (size), align);
+ else
+ {
+ /* Try the most limited insn first, because there's no point
+ including more than one in the machine description unless
+ the more limited one has some advantage. */
+
+ rtx opalign = GEN_INT (align);
+ enum machine_mode mode;
+
+ for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
+ mode = GET_MODE_WIDER_MODE (mode))
+ {
+ enum insn_code code = movstr_optab[(int) mode];
+
+ if (code != CODE_FOR_nothing
+ /* We don't need MODE to be narrower than BITS_PER_HOST_WIDE_INT
+ here because if SIZE is less than the mode mask, as it is
+ returned by the macro, it will definitely be less than the
+ actual mode mask. */
+ && (unsigned HOST_WIDE_INT) INTVAL (size) <= GET_MODE_MASK (mode)
+ && (insn_operand_predicate[(int) code][0] == 0
+ || (*insn_operand_predicate[(int) code][0]) (x, BLKmode))
+ && (insn_operand_predicate[(int) code][1] == 0
+ || (*insn_operand_predicate[(int) code][1]) (y, BLKmode))
+ && (insn_operand_predicate[(int) code][3] == 0
+ || (*insn_operand_predicate[(int) code][3]) (opalign,
+ VOIDmode)))
+ {
+ rtx op2;
+ rtx last = get_last_insn ();
+ rtx pat;
+
+ op2 = convert_to_mode (mode, size, 1);
+ if (insn_operand_predicate[(int) code][2] != 0
+ && ! (*insn_operand_predicate[(int) code][2]) (op2, mode))
+ op2 = copy_to_mode_reg (mode, op2);
+
+ pat = GEN_FCN ((int) code) (x, y, op2, opalign);
+ if (pat)
+ {
+ emit_insn (pat);
+ return;
+ }
+ else
+ delete_insns_since (last);
+ }
+ }
+
+#ifdef TARGET_MEM_FUNCTIONS
+ emit_library_call (memcpy_libfunc, 0,
+ VOIDmode, 3, XEXP (x, 0), Pmode,
+ XEXP (y, 0), Pmode,
+ convert_to_mode (TYPE_MODE (sizetype), size,
+ TREE_UNSIGNED (sizetype)),
+ TYPE_MODE (sizetype));
+#else
+ emit_library_call (bcopy_libfunc, 0,
+ VOIDmode, 3, XEXP (y, 0), Pmode,
+ XEXP (x, 0), Pmode,
+ convert_to_mode (TYPE_MODE (sizetype), size,
+ TREE_UNSIGNED (sizetype)),
+ TYPE_MODE (sizetype));
+#endif
+ }
+}
+
+/* Copy all or part of a value X into registers starting at REGNO.
+ The number of registers to be filled is NREGS. */
+
+void
+move_block_to_reg (regno, x, nregs, mode)
+ int regno;
+ rtx x;
+ int nregs;
+ enum machine_mode mode;
+{
+ int i;
+ rtx pat, last;
+
+ if (CONSTANT_P (x) && ! LEGITIMATE_CONSTANT_P (x))
+ x = validize_mem (force_const_mem (mode, x));
+
+ /* See if the machine can do this with a load multiple insn. */
+#ifdef HAVE_load_multiple
+ if (HAVE_load_multiple)
+ {
+ last = get_last_insn ();
+ pat = gen_load_multiple (gen_rtx (REG, word_mode, regno), x,
+ GEN_INT (nregs));
+ if (pat)
+ {
+ emit_insn (pat);
+ return;
+ }
+ else
+ delete_insns_since (last);
+ }
+#endif
+
+ for (i = 0; i < nregs; i++)
+ emit_move_insn (gen_rtx (REG, word_mode, regno + i),
+ operand_subword_force (x, i, mode));
+}
+
+/* Copy all or part of a BLKmode value X out of registers starting at REGNO.
+ The number of registers to be filled is NREGS. SIZE indicates the number
+ of bytes in the object X. */
+
+
+void
+move_block_from_reg (regno, x, nregs, size)
+ int regno;
+ rtx x;
+ int nregs;
+ int size;
+{
+ int i;
+ rtx pat, last;
+
+ /* Blocks smaller than a word on a BYTES_BIG_ENDIAN machine must be aligned
+ to the left before storing to memory. */
+ if (size < UNITS_PER_WORD && BYTES_BIG_ENDIAN)
+ {
+ rtx tem = operand_subword (x, 0, 1, BLKmode);
+ rtx shift;
+
+ if (tem == 0)
+ abort ();
+
+ shift = expand_shift (LSHIFT_EXPR, word_mode,
+ gen_rtx (REG, word_mode, regno),
+ build_int_2 ((UNITS_PER_WORD - size)
+ * BITS_PER_UNIT, 0), NULL_RTX, 0);
+ emit_move_insn (tem, shift);
+ return;
+ }
+
+ /* See if the machine can do this with a store multiple insn. */
+#ifdef HAVE_store_multiple
+ if (HAVE_store_multiple)
+ {
+ last = get_last_insn ();
+ pat = gen_store_multiple (x, gen_rtx (REG, word_mode, regno),
+ GEN_INT (nregs));
+ if (pat)
+ {
+ emit_insn (pat);
+ return;
+ }
+ else
+ delete_insns_since (last);
+ }
+#endif
+
+ for (i = 0; i < nregs; i++)
+ {
+ rtx tem = operand_subword (x, i, 1, BLKmode);
+
+ if (tem == 0)
+ abort ();
+
+ emit_move_insn (tem, gen_rtx (REG, word_mode, regno + i));
+ }
+}
+
+/* Add a USE expression for REG to the (possibly empty) list pointed
+ to by CALL_FUSAGE. REG must denote a hard register. */
+
+void
+use_reg (call_fusage, reg)
+ rtx *call_fusage, reg;
+{
+ if (GET_CODE (reg) != REG
+ || REGNO (reg) >= FIRST_PSEUDO_REGISTER)
+ abort();
+
+ *call_fusage
+ = gen_rtx (EXPR_LIST, VOIDmode,
+ gen_rtx (USE, VOIDmode, reg), *call_fusage);
+}
+
+/* Add USE expressions to *CALL_FUSAGE for each of NREGS consecutive regs,
+ starting at REGNO. All of these registers must be hard registers. */
+
+void
+use_regs (call_fusage, regno, nregs)
+ rtx *call_fusage;
+ int regno;
+ int nregs;
+{
+ int i;
+
+ if (regno + nregs > FIRST_PSEUDO_REGISTER)
+ abort ();
+
+ for (i = 0; i < nregs; i++)
+ use_reg (call_fusage, gen_rtx (REG, reg_raw_mode[regno + i], regno + i));
+}
+
+/* Write zeros through the storage of OBJECT.
+ If OBJECT has BLKmode, SIZE is its length in bytes. */
+
+void
+clear_storage (object, size)
+ rtx object;
+ int size;
+{
+ if (GET_MODE (object) == BLKmode)
+ {
+#ifdef TARGET_MEM_FUNCTIONS
+ emit_library_call (memset_libfunc, 0,
+ VOIDmode, 3,
+ XEXP (object, 0), Pmode, const0_rtx, Pmode,
+ GEN_INT (size), Pmode);
+#else
+ emit_library_call (bzero_libfunc, 0,
+ VOIDmode, 2,
+ XEXP (object, 0), Pmode,
+ GEN_INT (size), Pmode);
+#endif
+ }
+ else
+ emit_move_insn (object, const0_rtx);
+}
+
+/* Generate code to copy Y into X.
+ Both Y and X must have the same mode, except that
+ Y can be a constant with VOIDmode.
+ This mode cannot be BLKmode; use emit_block_move for that.
+
+ Return the last instruction emitted. */
+
+rtx
+emit_move_insn (x, y)
+ rtx x, y;
+{
+ enum machine_mode mode = GET_MODE (x);
+
+ x = protect_from_queue (x, 1);
+ y = protect_from_queue (y, 0);
+
+ if (mode == BLKmode || (GET_MODE (y) != mode && GET_MODE (y) != VOIDmode))
+ abort ();
+
+ if (CONSTANT_P (y) && ! LEGITIMATE_CONSTANT_P (y))
+ y = force_const_mem (mode, y);
+
+ /* If X or Y are memory references, verify that their addresses are valid
+ for the machine. */
+ if (GET_CODE (x) == MEM
+ && ((! memory_address_p (GET_MODE (x), XEXP (x, 0))
+ && ! push_operand (x, GET_MODE (x)))
+ || (flag_force_addr
+ && CONSTANT_ADDRESS_P (XEXP (x, 0)))))
+ x = change_address (x, VOIDmode, XEXP (x, 0));
+
+ if (GET_CODE (y) == MEM
+ && (! memory_address_p (GET_MODE (y), XEXP (y, 0))
+ || (flag_force_addr
+ && CONSTANT_ADDRESS_P (XEXP (y, 0)))))
+ y = change_address (y, VOIDmode, XEXP (y, 0));
+
+ if (mode == BLKmode)
+ abort ();
+
+ return emit_move_insn_1 (x, y);
+}
+
+/* Low level part of emit_move_insn.
+ Called just like emit_move_insn, but assumes X and Y
+ are basically valid. */
+
+rtx
+emit_move_insn_1 (x, y)
+ rtx x, y;
+{
+ enum machine_mode mode = GET_MODE (x);
+ enum machine_mode submode;
+ enum mode_class class = GET_MODE_CLASS (mode);
+ int i;
+
+ if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
+ return
+ emit_insn (GEN_FCN (mov_optab->handlers[(int) mode].insn_code) (x, y));
+
+ /* Expand complex moves by moving real part and imag part, if possible. */
+ else if ((class == MODE_COMPLEX_FLOAT || class == MODE_COMPLEX_INT)
+ && BLKmode != (submode = mode_for_size ((GET_MODE_UNIT_SIZE (mode)
+ * BITS_PER_UNIT),
+ (class == MODE_COMPLEX_INT
+ ? MODE_INT : MODE_FLOAT),
+ 0))
+ && (mov_optab->handlers[(int) submode].insn_code
+ != CODE_FOR_nothing))
+ {
+ /* Don't split destination if it is a stack push. */
+ int stack = push_operand (x, GET_MODE (x));
+ rtx insns;
+
+ start_sequence ();
+
+ /* If this is a stack, push the highpart first, so it
+ will be in the argument order.
+
+ In that case, change_address is used only to convert
+ the mode, not to change the address. */
+ if (stack)
+ {
+ /* Note that the real part always precedes the imag part in memory
+ regardless of machine's endianness. */
+#ifdef STACK_GROWS_DOWNWARD
+ emit_insn (GEN_FCN (mov_optab->handlers[(int) submode].insn_code)
+ (gen_rtx (MEM, submode, (XEXP (x, 0))),
+ gen_imagpart (submode, y)));
+ emit_insn (GEN_FCN (mov_optab->handlers[(int) submode].insn_code)
+ (gen_rtx (MEM, submode, (XEXP (x, 0))),
+ gen_realpart (submode, y)));
+#else
+ emit_insn (GEN_FCN (mov_optab->handlers[(int) submode].insn_code)
+ (gen_rtx (MEM, submode, (XEXP (x, 0))),
+ gen_realpart (submode, y)));
+ emit_insn (GEN_FCN (mov_optab->handlers[(int) submode].insn_code)
+ (gen_rtx (MEM, submode, (XEXP (x, 0))),
+ gen_imagpart (submode, y)));
+#endif
+ }
+ else
+ {
+ emit_insn (GEN_FCN (mov_optab->handlers[(int) submode].insn_code)
+ (gen_realpart (submode, x), gen_realpart (submode, y)));
+ emit_insn (GEN_FCN (mov_optab->handlers[(int) submode].insn_code)
+ (gen_imagpart (submode, x), gen_imagpart (submode, y)));
+ }
+
+ insns = get_insns ();
+ end_sequence ();
+
+ /* If X is a CONCAT, we got insns like RD = RS, ID = IS,
+ each with a separate pseudo as destination.
+ It's not correct for flow to treat them as a unit. */
+ if (GET_CODE (x) != CONCAT)
+ emit_no_conflict_block (insns, x, y, NULL_RTX, NULL_RTX);
+ else
+ emit_insns (insns);
+
+ return get_last_insn ();
+ }
+
+ /* This will handle any multi-word mode that lacks a move_insn pattern.
+ However, you will get better code if you define such patterns,
+ even if they must turn into multiple assembler instructions. */
+ else if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
+ {
+ rtx last_insn = 0;
+ rtx insns;
+
+ start_sequence ();
+
+ for (i = 0;
+ i < (GET_MODE_SIZE (mode) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD;
+ i++)
+ {
+ rtx xpart = operand_subword (x, i, 1, mode);
+ rtx ypart = operand_subword (y, i, 1, mode);
+
+ /* If we can't get a part of Y, put Y into memory if it is a
+ constant. Otherwise, force it into a register. If we still
+ can't get a part of Y, abort. */
+ if (ypart == 0 && CONSTANT_P (y))
+ {
+ y = force_const_mem (mode, y);
+ ypart = operand_subword (y, i, 1, mode);
+ }
+ else if (ypart == 0)
+ ypart = operand_subword_force (y, i, mode);
+
+ if (xpart == 0 || ypart == 0)
+ abort ();
+
+ last_insn = emit_move_insn (xpart, ypart);
+ }
+
+ insns = get_insns ();
+ end_sequence ();
+ emit_no_conflict_block (insns, x, y, NULL_RTX, NULL_RTX);
+
+ return last_insn;
+ }
+ else
+ abort ();
+}
+
+/* Pushing data onto the stack. */
+
+/* Push a block of length SIZE (perhaps variable)
+ and return an rtx to address the beginning of the block.
+ Note that it is not possible for the value returned to be a QUEUED.
+ The value may be virtual_outgoing_args_rtx.
+
+ EXTRA is the number of bytes of padding to push in addition to SIZE.
+ BELOW nonzero means this padding comes at low addresses;
+ otherwise, the padding comes at high addresses. */
+
+rtx
+push_block (size, extra, below)
+ rtx size;
+ int extra, below;
+{
+ register rtx temp;
+ if (CONSTANT_P (size))
+ anti_adjust_stack (plus_constant (size, extra));
+ else if (GET_CODE (size) == REG && extra == 0)
+ anti_adjust_stack (size);
+ else
+ {
+ rtx temp = copy_to_mode_reg (Pmode, size);
+ if (extra != 0)
+ temp = expand_binop (Pmode, add_optab, temp, GEN_INT (extra),
+ temp, 0, OPTAB_LIB_WIDEN);
+ anti_adjust_stack (temp);
+ }
+
+#ifdef STACK_GROWS_DOWNWARD
+ temp = virtual_outgoing_args_rtx;
+ if (extra != 0 && below)
+ temp = plus_constant (temp, extra);
+#else
+ if (GET_CODE (size) == CONST_INT)
+ temp = plus_constant (virtual_outgoing_args_rtx,
+ - INTVAL (size) - (below ? 0 : extra));
+ else if (extra != 0 && !below)
+ temp = gen_rtx (PLUS, Pmode, virtual_outgoing_args_rtx,
+ negate_rtx (Pmode, plus_constant (size, extra)));
+ else
+ temp = gen_rtx (PLUS, Pmode, virtual_outgoing_args_rtx,
+ negate_rtx (Pmode, size));
+#endif
+
+ return memory_address (GET_CLASS_NARROWEST_MODE (MODE_INT), temp);
+}
+
+rtx
+gen_push_operand ()
+{
+ return gen_rtx (STACK_PUSH_CODE, Pmode, stack_pointer_rtx);
+}
+
+/* Generate code to push X onto the stack, assuming it has mode MODE and
+ type TYPE.
+ MODE is redundant except when X is a CONST_INT (since they don't
+ carry mode info).
+ SIZE is an rtx for the size of data to be copied (in bytes),
+ needed only if X is BLKmode.
+
+ ALIGN (in bytes) is maximum alignment we can assume.
+
+ If PARTIAL and REG are both nonzero, then copy that many of the first
+ words of X into registers starting with REG, and push the rest of X.
+ The amount of space pushed is decreased by PARTIAL words,
+ rounded *down* to a multiple of PARM_BOUNDARY.
+ REG must be a hard register in this case.
+ If REG is zero but PARTIAL is not, take any all others actions for an
+ argument partially in registers, but do not actually load any
+ registers.
+
+ EXTRA is the amount in bytes of extra space to leave next to this arg.
+ This is ignored if an argument block has already been allocated.
+
+ On a machine that lacks real push insns, ARGS_ADDR is the address of
+ the bottom of the argument block for this call. We use indexing off there
+ to store the arg. On machines with push insns, ARGS_ADDR is 0 when a
+ argument block has not been preallocated.
+
+ ARGS_SO_FAR is the size of args previously pushed for this call. */
+
+void
+emit_push_insn (x, mode, type, size, align, partial, reg, extra,
+ args_addr, args_so_far)
+ register rtx x;
+ enum machine_mode mode;
+ tree type;
+ rtx size;
+ int align;
+ int partial;
+ rtx reg;
+ int extra;
+ rtx args_addr;
+ rtx args_so_far;
+{
+ rtx xinner;
+ enum direction stack_direction
+#ifdef STACK_GROWS_DOWNWARD
+ = downward;
+#else
+ = upward;
+#endif
+
+ /* Decide where to pad the argument: `downward' for below,
+ `upward' for above, or `none' for don't pad it.
+ Default is below for small data on big-endian machines; else above. */
+ enum direction where_pad = FUNCTION_ARG_PADDING (mode, type);
+
+ /* Invert direction if stack is post-update. */
+ if (STACK_PUSH_CODE == POST_INC || STACK_PUSH_CODE == POST_DEC)
+ if (where_pad != none)
+ where_pad = (where_pad == downward ? upward : downward);
+
+ xinner = x = protect_from_queue (x, 0);
+
+ if (mode == BLKmode)
+ {
+ /* Copy a block into the stack, entirely or partially. */
+
+ register rtx temp;
+ int used = partial * UNITS_PER_WORD;
+ int offset = used % (PARM_BOUNDARY / BITS_PER_UNIT);
+ int skip;
+
+ if (size == 0)
+ abort ();
+
+ used -= offset;
+
+ /* USED is now the # of bytes we need not copy to the stack
+ because registers will take care of them. */
+
+ if (partial != 0)
+ xinner = change_address (xinner, BLKmode,
+ plus_constant (XEXP (xinner, 0), used));
+
+ /* If the partial register-part of the arg counts in its stack size,
+ skip the part of stack space corresponding to the registers.
+ Otherwise, start copying to the beginning of the stack space,
+ by setting SKIP to 0. */
+#ifndef REG_PARM_STACK_SPACE
+ skip = 0;
+#else
+ skip = used;
+#endif
+
+#ifdef PUSH_ROUNDING
+ /* Do it with several push insns if that doesn't take lots of insns
+ and if there is no difficulty with push insns that skip bytes
+ on the stack for alignment purposes. */
+ if (args_addr == 0
+ && GET_CODE (size) == CONST_INT
+ && skip == 0
+ && (move_by_pieces_ninsns ((unsigned) INTVAL (size) - used, align)
+ < MOVE_RATIO)
+ /* Here we avoid the case of a structure whose weak alignment
+ forces many pushes of a small amount of data,
+ and such small pushes do rounding that causes trouble. */
+ && ((! STRICT_ALIGNMENT && ! SLOW_UNALIGNED_ACCESS)
+ || align >= BIGGEST_ALIGNMENT / BITS_PER_UNIT
+ || PUSH_ROUNDING (align) == align)
+ && PUSH_ROUNDING (INTVAL (size)) == INTVAL (size))
+ {
+ /* Push padding now if padding above and stack grows down,
+ or if padding below and stack grows up.
+ But if space already allocated, this has already been done. */
+ if (extra && args_addr == 0
+ && where_pad != none && where_pad != stack_direction)
+ anti_adjust_stack (GEN_INT (extra));
+
+ move_by_pieces (gen_rtx (MEM, BLKmode, gen_push_operand ()), xinner,
+ INTVAL (size) - used, align);
+ }
+ else
+#endif /* PUSH_ROUNDING */
+ {
+ /* Otherwise make space on the stack and copy the data
+ to the address of that space. */
+
+ /* Deduct words put into registers from the size we must copy. */
+ if (partial != 0)
+ {
+ if (GET_CODE (size) == CONST_INT)
+ size = GEN_INT (INTVAL (size) - used);
+ else
+ size = expand_binop (GET_MODE (size), sub_optab, size,
+ GEN_INT (used), NULL_RTX, 0,
+ OPTAB_LIB_WIDEN);
+ }
+
+ /* Get the address of the stack space.
+ In this case, we do not deal with EXTRA separately.
+ A single stack adjust will do. */
+ if (! args_addr)
+ {
+ temp = push_block (size, extra, where_pad == downward);
+ extra = 0;
+ }
+ else if (GET_CODE (args_so_far) == CONST_INT)
+ temp = memory_address (BLKmode,
+ plus_constant (args_addr,
+ skip + INTVAL (args_so_far)));
+ else
+ temp = memory_address (BLKmode,
+ plus_constant (gen_rtx (PLUS, Pmode,
+ args_addr, args_so_far),
+ skip));
+
+ /* TEMP is the address of the block. Copy the data there. */
+ if (GET_CODE (size) == CONST_INT
+ && (move_by_pieces_ninsns ((unsigned) INTVAL (size), align)
+ < MOVE_RATIO))
+ {
+ move_by_pieces (gen_rtx (MEM, BLKmode, temp), xinner,
+ INTVAL (size), align);
+ goto ret;
+ }
+ /* Try the most limited insn first, because there's no point
+ including more than one in the machine description unless
+ the more limited one has some advantage. */
+#ifdef HAVE_movstrqi
+ if (HAVE_movstrqi
+ && GET_CODE (size) == CONST_INT
+ && ((unsigned) INTVAL (size)
+ < (1 << (GET_MODE_BITSIZE (QImode) - 1))))
+ {
+ rtx pat = gen_movstrqi (gen_rtx (MEM, BLKmode, temp),
+ xinner, size, GEN_INT (align));
+ if (pat != 0)
+ {
+ emit_insn (pat);
+ goto ret;
+ }
+ }
+#endif
+#ifdef HAVE_movstrhi
+ if (HAVE_movstrhi
+ && GET_CODE (size) == CONST_INT
+ && ((unsigned) INTVAL (size)
+ < (1 << (GET_MODE_BITSIZE (HImode) - 1))))
+ {
+ rtx pat = gen_movstrhi (gen_rtx (MEM, BLKmode, temp),
+ xinner, size, GEN_INT (align));
+ if (pat != 0)
+ {
+ emit_insn (pat);
+ goto ret;
+ }
+ }
+#endif
+#ifdef HAVE_movstrsi
+ if (HAVE_movstrsi)
+ {
+ rtx pat = gen_movstrsi (gen_rtx (MEM, BLKmode, temp),
+ xinner, size, GEN_INT (align));
+ if (pat != 0)
+ {
+ emit_insn (pat);
+ goto ret;
+ }
+ }
+#endif
+#ifdef HAVE_movstrdi
+ if (HAVE_movstrdi)
+ {
+ rtx pat = gen_movstrdi (gen_rtx (MEM, BLKmode, temp),
+ xinner, size, GEN_INT (align));
+ if (pat != 0)
+ {
+ emit_insn (pat);
+ goto ret;
+ }
+ }
+#endif
+
+#ifndef ACCUMULATE_OUTGOING_ARGS
+ /* If the source is referenced relative to the stack pointer,
+ copy it to another register to stabilize it. We do not need
+ to do this if we know that we won't be changing sp. */
+
+ if (reg_mentioned_p (virtual_stack_dynamic_rtx, temp)
+ || reg_mentioned_p (virtual_outgoing_args_rtx, temp))
+ temp = copy_to_reg (temp);
+#endif
+
+ /* Make inhibit_defer_pop nonzero around the library call
+ to force it to pop the bcopy-arguments right away. */
+ NO_DEFER_POP;
+#ifdef TARGET_MEM_FUNCTIONS
+ emit_library_call (memcpy_libfunc, 0,
+ VOIDmode, 3, temp, Pmode, XEXP (xinner, 0), Pmode,
+ convert_to_mode (TYPE_MODE (sizetype),
+ size, TREE_UNSIGNED (sizetype)),
+ TYPE_MODE (sizetype));
+#else
+ emit_library_call (bcopy_libfunc, 0,
+ VOIDmode, 3, XEXP (xinner, 0), Pmode, temp, Pmode,
+ convert_to_mode (TYPE_MODE (sizetype),
+ size, TREE_UNSIGNED (sizetype)),
+ TYPE_MODE (sizetype));
+#endif
+ OK_DEFER_POP;
+ }
+ }
+ else if (partial > 0)
+ {
+ /* Scalar partly in registers. */
+
+ int size = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
+ int i;
+ int not_stack;
+ /* # words of start of argument
+ that we must make space for but need not store. */
+ int offset = partial % (PARM_BOUNDARY / BITS_PER_WORD);
+ int args_offset = INTVAL (args_so_far);
+ int skip;
+
+ /* Push padding now if padding above and stack grows down,
+ or if padding below and stack grows up.
+ But if space already allocated, this has already been done. */
+ if (extra && args_addr == 0
+ && where_pad != none && where_pad != stack_direction)
+ anti_adjust_stack (GEN_INT (extra));
+
+ /* If we make space by pushing it, we might as well push
+ the real data. Otherwise, we can leave OFFSET nonzero
+ and leave the space uninitialized. */
+ if (args_addr == 0)
+ offset = 0;
+
+ /* Now NOT_STACK gets the number of words that we don't need to
+ allocate on the stack. */
+ not_stack = partial - offset;
+
+ /* If the partial register-part of the arg counts in its stack size,
+ skip the part of stack space corresponding to the registers.
+ Otherwise, start copying to the beginning of the stack space,
+ by setting SKIP to 0. */
+#ifndef REG_PARM_STACK_SPACE
+ skip = 0;
+#else
+ skip = not_stack;
+#endif
+
+ if (CONSTANT_P (x) && ! LEGITIMATE_CONSTANT_P (x))
+ x = validize_mem (force_const_mem (mode, x));
+
+ /* If X is a hard register in a non-integer mode, copy it into a pseudo;
+ SUBREGs of such registers are not allowed. */
+ if ((GET_CODE (x) == REG && REGNO (x) < FIRST_PSEUDO_REGISTER
+ && GET_MODE_CLASS (GET_MODE (x)) != MODE_INT))
+ x = copy_to_reg (x);
+
+ /* Loop over all the words allocated on the stack for this arg. */
+ /* We can do it by words, because any scalar bigger than a word
+ has a size a multiple of a word. */
+#ifndef PUSH_ARGS_REVERSED
+ for (i = not_stack; i < size; i++)
+#else
+ for (i = size - 1; i >= not_stack; i--)
+#endif
+ if (i >= not_stack + offset)
+ emit_push_insn (operand_subword_force (x, i, mode),
+ word_mode, NULL_TREE, NULL_RTX, align, 0, NULL_RTX,
+ 0, args_addr,
+ GEN_INT (args_offset + ((i - not_stack + skip)
+ * UNITS_PER_WORD)));
+ }
+ else
+ {
+ rtx addr;
+
+ /* Push padding now if padding above and stack grows down,
+ or if padding below and stack grows up.
+ But if space already allocated, this has already been done. */
+ if (extra && args_addr == 0
+ && where_pad != none && where_pad != stack_direction)
+ anti_adjust_stack (GEN_INT (extra));
+
+#ifdef PUSH_ROUNDING
+ if (args_addr == 0)
+ addr = gen_push_operand ();
+ else
+#endif
+ if (GET_CODE (args_so_far) == CONST_INT)
+ addr
+ = memory_address (mode,
+ plus_constant (args_addr, INTVAL (args_so_far)));
+ else
+ addr = memory_address (mode, gen_rtx (PLUS, Pmode, args_addr,
+ args_so_far));
+
+ emit_move_insn (gen_rtx (MEM, mode, addr), x);
+ }
+
+ ret:
+ /* If part should go in registers, copy that part
+ into the appropriate registers. Do this now, at the end,
+ since mem-to-mem copies above may do function calls. */
+ if (partial > 0 && reg != 0)
+ move_block_to_reg (REGNO (reg), x, partial, mode);
+
+ if (extra && args_addr == 0 && where_pad == stack_direction)
+ anti_adjust_stack (GEN_INT (extra));
+}
+
+/* Expand an assignment that stores the value of FROM into TO.
+ If WANT_VALUE is nonzero, return an rtx for the value of TO.
+ (This may contain a QUEUED rtx;
+ if the value is constant, this rtx is a constant.)
+ Otherwise, the returned value is NULL_RTX.
+
+ SUGGEST_REG is no longer actually used.
+ It used to mean, copy the value through a register
+ and return that register, if that is possible.
+ We now use WANT_VALUE to decide whether to do this. */
+
+rtx
+expand_assignment (to, from, want_value, suggest_reg)
+ tree to, from;
+ int want_value;
+ int suggest_reg;
+{
+ register rtx to_rtx = 0;
+ rtx result;
+
+ /* Don't crash if the lhs of the assignment was erroneous. */
+
+ if (TREE_CODE (to) == ERROR_MARK)
+ {
+ result = expand_expr (from, NULL_RTX, VOIDmode, 0);
+ return want_value ? result : NULL_RTX;
+ }
+
+ if (output_bytecode)
+ {
+ tree dest_innermost;
+
+ bc_expand_expr (from);
+ bc_emit_instruction (duplicate);
+
+ dest_innermost = bc_expand_address (to);
+
+ /* Can't deduce from TYPE that we're dealing with a bitfield, so
+ take care of it here. */
+
+ bc_store_memory (TREE_TYPE (to), dest_innermost);
+ return NULL;
+ }
+
+ /* Assignment of a structure component needs special treatment
+ if the structure component's rtx is not simply a MEM.
+ Assignment of an array element at a constant index, and assignment of
+ an array element in an unaligned packed structure field, has the same
+ problem. */
+
+ if (TREE_CODE (to) == COMPONENT_REF
+ || TREE_CODE (to) == BIT_FIELD_REF
+ || (TREE_CODE (to) == ARRAY_REF
+ && ((TREE_CODE (TREE_OPERAND (to, 1)) == INTEGER_CST
+ && TREE_CODE (TYPE_SIZE (TREE_TYPE (to))) == INTEGER_CST)
+ || (STRICT_ALIGNMENT && get_inner_unaligned_p (to)))))
+ {
+ enum machine_mode mode1;
+ int bitsize;
+ int bitpos;
+ tree offset;
+ int unsignedp;
+ int volatilep = 0;
+ tree tem;
+ int alignment;
+
+ push_temp_slots ();
+ tem = get_inner_reference (to, &bitsize, &bitpos, &offset,
+ &mode1, &unsignedp, &volatilep);
+
+ /* If we are going to use store_bit_field and extract_bit_field,
+ make sure to_rtx will be safe for multiple use. */
+
+ if (mode1 == VOIDmode && want_value)
+ tem = stabilize_reference (tem);
+
+ alignment = TYPE_ALIGN (TREE_TYPE (tem)) / BITS_PER_UNIT;
+ to_rtx = expand_expr (tem, NULL_RTX, VOIDmode, 0);
+ if (offset != 0)
+ {
+ rtx offset_rtx = expand_expr (offset, NULL_RTX, VOIDmode, 0);
+
+ if (GET_CODE (to_rtx) != MEM)
+ abort ();
+ to_rtx = change_address (to_rtx, VOIDmode,
+ gen_rtx (PLUS, Pmode, XEXP (to_rtx, 0),
+ force_reg (Pmode, offset_rtx)));
+ /* If we have a variable offset, the known alignment
+ is only that of the innermost structure containing the field.
+ (Actually, we could sometimes do better by using the
+ align of an element of the innermost array, but no need.) */
+ if (TREE_CODE (to) == COMPONENT_REF
+ || TREE_CODE (to) == BIT_FIELD_REF)
+ alignment
+ = TYPE_ALIGN (TREE_TYPE (TREE_OPERAND (to, 0))) / BITS_PER_UNIT;
+ }
+ if (volatilep)
+ {
+ if (GET_CODE (to_rtx) == MEM)
+ MEM_VOLATILE_P (to_rtx) = 1;
+#if 0 /* This was turned off because, when a field is volatile
+ in an object which is not volatile, the object may be in a register,
+ and then we would abort over here. */
+ else
+ abort ();
+#endif
+ }
+
+ result = store_field (to_rtx, bitsize, bitpos, mode1, from,
+ (want_value
+ /* Spurious cast makes HPUX compiler happy. */
+ ? (enum machine_mode) TYPE_MODE (TREE_TYPE (to))
+ : VOIDmode),
+ unsignedp,
+ /* Required alignment of containing datum. */
+ alignment,
+ int_size_in_bytes (TREE_TYPE (tem)));
+ preserve_temp_slots (result);
+ free_temp_slots ();
+ pop_temp_slots ();
+
+ /* If the value is meaningful, convert RESULT to the proper mode.
+ Otherwise, return nothing. */
+ return (want_value ? convert_modes (TYPE_MODE (TREE_TYPE (to)),
+ TYPE_MODE (TREE_TYPE (from)),
+ result,
+ TREE_UNSIGNED (TREE_TYPE (to)))
+ : NULL_RTX);
+ }
+
+ /* If the rhs is a function call and its value is not an aggregate,
+ call the function before we start to compute the lhs.
+ This is needed for correct code for cases such as
+ val = setjmp (buf) on machines where reference to val
+ requires loading up part of an address in a separate insn.
+
+ Don't do this if TO is a VAR_DECL whose DECL_RTL is REG since it might be
+ a promoted variable where the zero- or sign- extension needs to be done.
+ Handling this in the normal way is safe because no computation is done
+ before the call. */
+ if (TREE_CODE (from) == CALL_EXPR && ! aggregate_value_p (from)
+ && ! (TREE_CODE (to) == VAR_DECL && GET_CODE (DECL_RTL (to)) == REG))
+ {
+ rtx value;
+
+ push_temp_slots ();
+ value = expand_expr (from, NULL_RTX, VOIDmode, 0);
+ if (to_rtx == 0)
+ to_rtx = expand_expr (to, NULL_RTX, VOIDmode, 0);
+ emit_move_insn (to_rtx, value);
+ preserve_temp_slots (to_rtx);
+ free_temp_slots ();
+ pop_temp_slots ();
+ return want_value ? to_rtx : NULL_RTX;
+ }
+
+ /* Ordinary treatment. Expand TO to get a REG or MEM rtx.
+ Don't re-expand if it was expanded already (in COMPONENT_REF case). */
+
+ if (to_rtx == 0)
+ to_rtx = expand_expr (to, NULL_RTX, VOIDmode, 0);
+
+ /* Don't move directly into a return register. */
+ if (TREE_CODE (to) == RESULT_DECL && GET_CODE (to_rtx) == REG)
+ {
+ rtx temp;
+
+ push_temp_slots ();
+ temp = expand_expr (from, 0, GET_MODE (to_rtx), 0);
+ emit_move_insn (to_rtx, temp);
+ preserve_temp_slots (to_rtx);
+ free_temp_slots ();
+ pop_temp_slots ();
+ return want_value ? to_rtx : NULL_RTX;
+ }
+
+ /* In case we are returning the contents of an object which overlaps
+ the place the value is being stored, use a safe function when copying
+ a value through a pointer into a structure value return block. */
+ if (TREE_CODE (to) == RESULT_DECL && TREE_CODE (from) == INDIRECT_REF
+ && current_function_returns_struct
+ && !current_function_returns_pcc_struct)
+ {
+ rtx from_rtx, size;
+
+ push_temp_slots ();
+ size = expr_size (from);
+ from_rtx = expand_expr (from, NULL_RTX, VOIDmode, 0);
+
+#ifdef TARGET_MEM_FUNCTIONS
+ emit_library_call (memcpy_libfunc, 0,
+ VOIDmode, 3, XEXP (to_rtx, 0), Pmode,
+ XEXP (from_rtx, 0), Pmode,
+ convert_to_mode (TYPE_MODE (sizetype),
+ size, TREE_UNSIGNED (sizetype)),
+ TYPE_MODE (sizetype));
+#else
+ emit_library_call (bcopy_libfunc, 0,
+ VOIDmode, 3, XEXP (from_rtx, 0), Pmode,
+ XEXP (to_rtx, 0), Pmode,
+ convert_to_mode (TYPE_MODE (sizetype),
+ size, TREE_UNSIGNED (sizetype)),
+ TYPE_MODE (sizetype));
+#endif
+
+ preserve_temp_slots (to_rtx);
+ free_temp_slots ();
+ pop_temp_slots ();
+ return want_value ? to_rtx : NULL_RTX;
+ }
+
+ /* Compute FROM and store the value in the rtx we got. */
+
+ push_temp_slots ();
+ result = store_expr (from, to_rtx, want_value);
+ preserve_temp_slots (result);
+ free_temp_slots ();
+ pop_temp_slots ();
+ return want_value ? result : NULL_RTX;
+}
+
+/* Generate code for computing expression EXP,
+ and storing the value into TARGET.
+ TARGET may contain a QUEUED rtx.
+
+ If WANT_VALUE is nonzero, return a copy of the value
+ not in TARGET, so that we can be sure to use the proper
+ value in a containing expression even if TARGET has something
+ else stored in it. If possible, we copy the value through a pseudo
+ and return that pseudo. Or, if the value is constant, we try to
+ return the constant. In some cases, we return a pseudo
+ copied *from* TARGET.
+
+ If the mode is BLKmode then we may return TARGET itself.
+ It turns out that in BLKmode it doesn't cause a problem.
+ because C has no operators that could combine two different
+ assignments into the same BLKmode object with different values
+ with no sequence point. Will other languages need this to
+ be more thorough?
+
+ If WANT_VALUE is 0, we return NULL, to make sure
+ to catch quickly any cases where the caller uses the value
+ and fails to set WANT_VALUE. */
+
+rtx
+store_expr (exp, target, want_value)
+ register tree exp;
+ register rtx target;
+ int want_value;
+{
+ register rtx temp;
+ int dont_return_target = 0;
+
+ if (TREE_CODE (exp) == COMPOUND_EXPR)
+ {
+ /* Perform first part of compound expression, then assign from second
+ part. */
+ expand_expr (TREE_OPERAND (exp, 0), const0_rtx, VOIDmode, 0);
+ emit_queue ();
+ return store_expr (TREE_OPERAND (exp, 1), target, want_value);
+ }
+ else if (TREE_CODE (exp) == COND_EXPR && GET_MODE (target) == BLKmode)
+ {
+ /* For conditional expression, get safe form of the target. Then
+ test the condition, doing the appropriate assignment on either
+ side. This avoids the creation of unnecessary temporaries.
+ For non-BLKmode, it is more efficient not to do this. */
+
+ rtx lab1 = gen_label_rtx (), lab2 = gen_label_rtx ();
+
+ emit_queue ();
+ target = protect_from_queue (target, 1);
+
+ NO_DEFER_POP;
+ jumpifnot (TREE_OPERAND (exp, 0), lab1);
+ store_expr (TREE_OPERAND (exp, 1), target, 0);
+ emit_queue ();
+ emit_jump_insn (gen_jump (lab2));
+ emit_barrier ();
+ emit_label (lab1);
+ store_expr (TREE_OPERAND (exp, 2), target, 0);
+ emit_queue ();
+ emit_label (lab2);
+ OK_DEFER_POP;
+ return want_value ? target : NULL_RTX;
+ }
+ else if (want_value && GET_CODE (target) == MEM && ! MEM_VOLATILE_P (target)
+ && GET_MODE (target) != BLKmode)
+ /* If target is in memory and caller wants value in a register instead,
+ arrange that. Pass TARGET as target for expand_expr so that,
+ if EXP is another assignment, WANT_VALUE will be nonzero for it.
+ We know expand_expr will not use the target in that case.
+ Don't do this if TARGET is volatile because we are supposed
+ to write it and then read it. */
+ {
+ temp = expand_expr (exp, cse_not_expected ? NULL_RTX : target,
+ GET_MODE (target), 0);
+ if (GET_MODE (temp) != BLKmode && GET_MODE (temp) != VOIDmode)
+ temp = copy_to_reg (temp);
+ dont_return_target = 1;
+ }
+ else if (queued_subexp_p (target))
+ /* If target contains a postincrement, let's not risk
+ using it as the place to generate the rhs. */
+ {
+ if (GET_MODE (target) != BLKmode && GET_MODE (target) != VOIDmode)
+ {
+ /* Expand EXP into a new pseudo. */
+ temp = gen_reg_rtx (GET_MODE (target));
+ temp = expand_expr (exp, temp, GET_MODE (target), 0);
+ }
+ else
+ temp = expand_expr (exp, NULL_RTX, GET_MODE (target), 0);
+
+ /* If target is volatile, ANSI requires accessing the value
+ *from* the target, if it is accessed. So make that happen.
+ In no case return the target itself. */
+ if (! MEM_VOLATILE_P (target) && want_value)
+ dont_return_target = 1;
+ }
+ else if (GET_CODE (target) == SUBREG && SUBREG_PROMOTED_VAR_P (target))
+ /* If this is an scalar in a register that is stored in a wider mode
+ than the declared mode, compute the result into its declared mode
+ and then convert to the wider mode. Our value is the computed
+ expression. */
+ {
+ temp = expand_expr (exp, NULL_RTX, VOIDmode, 0);
+
+ /* If TEMP is a volatile MEM and we want a result value, make
+ the access now so it gets done only once. */
+ if (GET_CODE (temp) == MEM && MEM_VOLATILE_P (temp))
+ temp = copy_to_reg (temp);
+
+ /* If TEMP is a VOIDmode constant, use convert_modes to make
+ sure that we properly convert it. */
+ if (CONSTANT_P (temp) && GET_MODE (temp) == VOIDmode)
+ temp = convert_modes (GET_MODE (SUBREG_REG (target)),
+ TYPE_MODE (TREE_TYPE (exp)), temp,
+ SUBREG_PROMOTED_UNSIGNED_P (target));
+
+ convert_move (SUBREG_REG (target), temp,
+ SUBREG_PROMOTED_UNSIGNED_P (target));
+ return want_value ? temp : NULL_RTX;
+ }
+ else
+ {
+ temp = expand_expr (exp, target, GET_MODE (target), 0);
+ /* Return TARGET if it's a specified hardware register.
+ If TARGET is a volatile mem ref, either return TARGET
+ or return a reg copied *from* TARGET; ANSI requires this.
+
+ Otherwise, if TEMP is not TARGET, return TEMP
+ if it is constant (for efficiency),
+ or if we really want the correct value. */
+ if (!(target && GET_CODE (target) == REG
+ && REGNO (target) < FIRST_PSEUDO_REGISTER)
+ && !(GET_CODE (target) == MEM && MEM_VOLATILE_P (target))
+ && temp != target
+ && (CONSTANT_P (temp) || want_value))
+ dont_return_target = 1;
+ }
+
+ /* If TEMP is a VOIDmode constant and the mode of the type of EXP is not
+ the same as that of TARGET, adjust the constant. This is needed, for
+ example, in case it is a CONST_DOUBLE and we want only a word-sized
+ value. */
+ if (CONSTANT_P (temp) && GET_MODE (temp) == VOIDmode
+ && TREE_CODE (exp) != ERROR_MARK
+ && GET_MODE (target) != TYPE_MODE (TREE_TYPE (exp)))
+ temp = convert_modes (GET_MODE (target), TYPE_MODE (TREE_TYPE (exp)),
+ temp, TREE_UNSIGNED (TREE_TYPE (exp)));
+
+ /* If value was not generated in the target, store it there.
+ Convert the value to TARGET's type first if nec. */
+
+ if (temp != target && TREE_CODE (exp) != ERROR_MARK)
+ {
+ target = protect_from_queue (target, 1);
+ if (GET_MODE (temp) != GET_MODE (target)
+ && GET_MODE (temp) != VOIDmode)
+ {
+ int unsignedp = TREE_UNSIGNED (TREE_TYPE (exp));
+ if (dont_return_target)
+ {
+ /* In this case, we will return TEMP,
+ so make sure it has the proper mode.
+ But don't forget to store the value into TARGET. */
+ temp = convert_to_mode (GET_MODE (target), temp, unsignedp);
+ emit_move_insn (target, temp);
+ }
+ else
+ convert_move (target, temp, unsignedp);
+ }
+
+ else if (GET_MODE (temp) == BLKmode && TREE_CODE (exp) == STRING_CST)
+ {
+ /* Handle copying a string constant into an array.
+ The string constant may be shorter than the array.
+ So copy just the string's actual length, and clear the rest. */
+ rtx size;
+
+ /* Get the size of the data type of the string,
+ which is actually the size of the target. */
+ size = expr_size (exp);
+ if (GET_CODE (size) == CONST_INT
+ && INTVAL (size) < TREE_STRING_LENGTH (exp))
+ emit_block_move (target, temp, size,
+ TYPE_ALIGN (TREE_TYPE (exp)) / BITS_PER_UNIT);
+ else
+ {
+ /* Compute the size of the data to copy from the string. */
+ tree copy_size
+ = size_binop (MIN_EXPR,
+ make_tree (sizetype, size),
+ convert (sizetype,
+ build_int_2 (TREE_STRING_LENGTH (exp), 0)));
+ rtx copy_size_rtx = expand_expr (copy_size, NULL_RTX,
+ VOIDmode, 0);
+ rtx label = 0;
+
+ /* Copy that much. */
+ emit_block_move (target, temp, copy_size_rtx,
+ TYPE_ALIGN (TREE_TYPE (exp)) / BITS_PER_UNIT);
+
+ /* Figure out how much is left in TARGET
+ that we have to clear. */
+ if (GET_CODE (copy_size_rtx) == CONST_INT)
+ {
+ temp = plus_constant (XEXP (target, 0),
+ TREE_STRING_LENGTH (exp));
+ size = plus_constant (size,
+ - TREE_STRING_LENGTH (exp));
+ }
+ else
+ {
+ enum machine_mode size_mode = Pmode;
+
+ temp = force_reg (Pmode, XEXP (target, 0));
+ temp = expand_binop (size_mode, add_optab, temp,
+ copy_size_rtx, NULL_RTX, 0,
+ OPTAB_LIB_WIDEN);
+
+ size = expand_binop (size_mode, sub_optab, size,
+ copy_size_rtx, NULL_RTX, 0,
+ OPTAB_LIB_WIDEN);
+
+ emit_cmp_insn (size, const0_rtx, LT, NULL_RTX,
+ GET_MODE (size), 0, 0);
+ label = gen_label_rtx ();
+ emit_jump_insn (gen_blt (label));
+ }
+
+ if (size != const0_rtx)
+ {
+#ifdef TARGET_MEM_FUNCTIONS
+ emit_library_call (memset_libfunc, 0, VOIDmode, 3,
+ temp, Pmode, const0_rtx, Pmode, size, Pmode);
+#else
+ emit_library_call (bzero_libfunc, 0, VOIDmode, 2,
+ temp, Pmode, size, Pmode);
+#endif
+ }
+ if (label)
+ emit_label (label);
+ }
+ }
+ else if (GET_MODE (temp) == BLKmode)
+ emit_block_move (target, temp, expr_size (exp),
+ TYPE_ALIGN (TREE_TYPE (exp)) / BITS_PER_UNIT);
+ else
+ emit_move_insn (target, temp);
+ }
+
+ /* If we don't want a value, return NULL_RTX. */
+ if (! want_value)
+ return NULL_RTX;
+
+ /* If we are supposed to return TEMP, do so as long as it isn't a MEM.
+ ??? The latter test doesn't seem to make sense. */
+ else if (dont_return_target && GET_CODE (temp) != MEM)
+ return temp;
+
+ /* Return TARGET itself if it is a hard register. */
+ else if (want_value && GET_MODE (target) != BLKmode
+ && ! (GET_CODE (target) == REG
+ && REGNO (target) < FIRST_PSEUDO_REGISTER))
+ return copy_to_reg (target);
+
+ else
+ return target;
+}
+
+/* Store the value of constructor EXP into the rtx TARGET.
+ TARGET is either a REG or a MEM. */
+
+static void
+store_constructor (exp, target)
+ tree exp;
+ rtx target;
+{
+ tree type = TREE_TYPE (exp);
+
+ /* We know our target cannot conflict, since safe_from_p has been called. */
+#if 0
+ /* Don't try copying piece by piece into a hard register
+ since that is vulnerable to being clobbered by EXP.
+ Instead, construct in a pseudo register and then copy it all. */
+ if (GET_CODE (target) == REG && REGNO (target) < FIRST_PSEUDO_REGISTER)
+ {
+ rtx temp = gen_reg_rtx (GET_MODE (target));
+ store_constructor (exp, temp);
+ emit_move_insn (target, temp);
+ return;
+ }
+#endif
+
+ if (TREE_CODE (type) == RECORD_TYPE || TREE_CODE (type) == UNION_TYPE
+ || TREE_CODE (type) == QUAL_UNION_TYPE)
+ {
+ register tree elt;
+
+ /* Inform later passes that the whole union value is dead. */
+ if (TREE_CODE (type) == UNION_TYPE
+ || TREE_CODE (type) == QUAL_UNION_TYPE)
+ emit_insn (gen_rtx (CLOBBER, VOIDmode, target));
+
+ /* If we are building a static constructor into a register,
+ set the initial value as zero so we can fold the value into
+ a constant. */
+ else if (GET_CODE (target) == REG && TREE_STATIC (exp))
+ emit_move_insn (target, const0_rtx);
+
+ /* If the constructor has fewer fields than the structure,
+ clear the whole structure first. */
+ else if (list_length (CONSTRUCTOR_ELTS (exp))
+ != list_length (TYPE_FIELDS (type)))
+ clear_storage (target, int_size_in_bytes (type));
+ else
+ /* Inform later passes that the old value is dead. */
+ emit_insn (gen_rtx (CLOBBER, VOIDmode, target));
+
+ /* Store each element of the constructor into
+ the corresponding field of TARGET. */
+
+ for (elt = CONSTRUCTOR_ELTS (exp); elt; elt = TREE_CHAIN (elt))
+ {
+ register tree field = TREE_PURPOSE (elt);
+ register enum machine_mode mode;
+ int bitsize;
+ int bitpos = 0;
+ int unsignedp;
+ tree pos, constant = 0, offset = 0;
+ rtx to_rtx = target;
+
+ /* Just ignore missing fields.
+ We cleared the whole structure, above,
+ if any fields are missing. */
+ if (field == 0)
+ continue;
+
+ bitsize = TREE_INT_CST_LOW (DECL_SIZE (field));
+ unsignedp = TREE_UNSIGNED (field);
+ mode = DECL_MODE (field);
+ if (DECL_BIT_FIELD (field))
+ mode = VOIDmode;
+
+ pos = DECL_FIELD_BITPOS (field);
+ if (TREE_CODE (pos) == INTEGER_CST)
+ constant = pos;
+ else if (TREE_CODE (pos) == PLUS_EXPR
+ && TREE_CODE (TREE_OPERAND (pos, 1)) == INTEGER_CST)
+ constant = TREE_OPERAND (pos, 1), offset = TREE_OPERAND (pos, 0);
+ else
+ offset = pos;
+
+ if (constant)
+ bitpos = TREE_INT_CST_LOW (DECL_FIELD_BITPOS (field));
+
+ if (offset)
+ {
+ rtx offset_rtx;
+
+ if (contains_placeholder_p (offset))
+ offset = build (WITH_RECORD_EXPR, sizetype,
+ offset, exp);
+
+ offset = size_binop (FLOOR_DIV_EXPR, offset,
+ size_int (BITS_PER_UNIT));
+
+ offset_rtx = expand_expr (offset, NULL_RTX, VOIDmode, 0);
+ if (GET_CODE (to_rtx) != MEM)
+ abort ();
+
+ to_rtx
+ = change_address (to_rtx, VOIDmode,
+ gen_rtx (PLUS, Pmode, XEXP (to_rtx, 0),
+ force_reg (Pmode, offset_rtx)));
+ }
+
+ store_field (to_rtx, bitsize, bitpos, mode, TREE_VALUE (elt),
+ /* The alignment of TARGET is
+ at least what its type requires. */
+ VOIDmode, 0,
+ TYPE_ALIGN (type) / BITS_PER_UNIT,
+ int_size_in_bytes (type));
+ }
+ }
+ else if (TREE_CODE (type) == ARRAY_TYPE)
+ {
+ register tree elt;
+ register int i;
+ tree domain = TYPE_DOMAIN (type);
+ HOST_WIDE_INT minelt = TREE_INT_CST_LOW (TYPE_MIN_VALUE (domain));
+ HOST_WIDE_INT maxelt = TREE_INT_CST_LOW (TYPE_MAX_VALUE (domain));
+ tree elttype = TREE_TYPE (type);
+
+ /* If the constructor has fewer fields than the structure,
+ clear the whole structure first. Similarly if this this is
+ static constructor of a non-BLKmode object. */
+
+ if (list_length (CONSTRUCTOR_ELTS (exp)) < maxelt - minelt + 1
+ || (GET_CODE (target) == REG && TREE_STATIC (exp)))
+ clear_storage (target, int_size_in_bytes (type));
+ else
+ /* Inform later passes that the old value is dead. */
+ emit_insn (gen_rtx (CLOBBER, VOIDmode, target));
+
+ /* Store each element of the constructor into
+ the corresponding element of TARGET, determined
+ by counting the elements. */
+ for (elt = CONSTRUCTOR_ELTS (exp), i = 0;
+ elt;
+ elt = TREE_CHAIN (elt), i++)
+ {
+ register enum machine_mode mode;
+ int bitsize;
+ int bitpos;
+ int unsignedp;
+ tree index = TREE_PURPOSE (elt);
+ rtx xtarget = target;
+
+ mode = TYPE_MODE (elttype);
+ bitsize = GET_MODE_BITSIZE (mode);
+ unsignedp = TREE_UNSIGNED (elttype);
+
+ if (index != 0 && TREE_CODE (index) != INTEGER_CST)
+ {
+ /* We don't currently allow variable indices in a
+ C initializer, but let's try here to support them. */
+ rtx pos_rtx, addr, xtarget;
+ tree position;
+
+ position = size_binop (MULT_EXPR, index, TYPE_SIZE (elttype));
+ pos_rtx = expand_expr (position, 0, VOIDmode, 0);
+ addr = gen_rtx (PLUS, Pmode, XEXP (target, 0), pos_rtx);
+ xtarget = change_address (target, mode, addr);
+ store_expr (TREE_VALUE (elt), xtarget, 0);
+ }
+ else
+ {
+ if (index != 0)
+ bitpos = ((TREE_INT_CST_LOW (index) - minelt)
+ * TREE_INT_CST_LOW (TYPE_SIZE (elttype)));
+ else
+ bitpos = (i * TREE_INT_CST_LOW (TYPE_SIZE (elttype)));
+
+ store_field (xtarget, bitsize, bitpos, mode, TREE_VALUE (elt),
+ /* The alignment of TARGET is
+ at least what its type requires. */
+ VOIDmode, 0,
+ TYPE_ALIGN (type) / BITS_PER_UNIT,
+ int_size_in_bytes (type));
+ }
+ }
+ }
+
+ else
+ abort ();
+}
+
+/* Store the value of EXP (an expression tree)
+ into a subfield of TARGET which has mode MODE and occupies
+ BITSIZE bits, starting BITPOS bits from the start of TARGET.
+ If MODE is VOIDmode, it means that we are storing into a bit-field.
+
+ If VALUE_MODE is VOIDmode, return nothing in particular.
+ UNSIGNEDP is not used in this case.
+
+ Otherwise, return an rtx for the value stored. This rtx
+ has mode VALUE_MODE if that is convenient to do.
+ In this case, UNSIGNEDP must be nonzero if the value is an unsigned type.
+
+ ALIGN is the alignment that TARGET is known to have, measured in bytes.
+ TOTAL_SIZE is the size in bytes of the structure, or -1 if varying. */
+
+static rtx
+store_field (target, bitsize, bitpos, mode, exp, value_mode,
+ unsignedp, align, total_size)
+ rtx target;
+ int bitsize, bitpos;
+ enum machine_mode mode;
+ tree exp;
+ enum machine_mode value_mode;
+ int unsignedp;
+ int align;
+ int total_size;
+{
+ HOST_WIDE_INT width_mask = 0;
+
+ if (bitsize < HOST_BITS_PER_WIDE_INT)
+ width_mask = ((HOST_WIDE_INT) 1 << bitsize) - 1;
+
+ /* If we are storing into an unaligned field of an aligned union that is
+ in a register, we may have the mode of TARGET being an integer mode but
+ MODE == BLKmode. In that case, get an aligned object whose size and
+ alignment are the same as TARGET and store TARGET into it (we can avoid
+ the store if the field being stored is the entire width of TARGET). Then
+ call ourselves recursively to store the field into a BLKmode version of
+ that object. Finally, load from the object into TARGET. This is not
+ very efficient in general, but should only be slightly more expensive
+ than the otherwise-required unaligned accesses. Perhaps this can be
+ cleaned up later. */
+
+ if (mode == BLKmode
+ && (GET_CODE (target) == REG || GET_CODE (target) == SUBREG))
+ {
+ rtx object = assign_stack_temp (GET_MODE (target),
+ GET_MODE_SIZE (GET_MODE (target)), 0);
+ rtx blk_object = copy_rtx (object);
+
+ MEM_IN_STRUCT_P (object) = 1;
+ MEM_IN_STRUCT_P (blk_object) = 1;
+ PUT_MODE (blk_object, BLKmode);
+
+ if (bitsize != GET_MODE_BITSIZE (GET_MODE (target)))
+ emit_move_insn (object, target);
+
+ store_field (blk_object, bitsize, bitpos, mode, exp, VOIDmode, 0,
+ align, total_size);
+
+ /* Even though we aren't returning target, we need to
+ give it the updated value. */
+ emit_move_insn (target, object);
+
+ return blk_object;
+ }
+
+ /* If the structure is in a register or if the component
+ is a bit field, we cannot use addressing to access it.
+ Use bit-field techniques or SUBREG to store in it. */
+
+ if (mode == VOIDmode
+ || (mode != BLKmode && ! direct_store[(int) mode])
+ || GET_CODE (target) == REG
+ || GET_CODE (target) == SUBREG
+ /* If the field isn't aligned enough to store as an ordinary memref,
+ store it as a bit field. */
+ || (STRICT_ALIGNMENT
+ && align * BITS_PER_UNIT < GET_MODE_ALIGNMENT (mode))
+ || (STRICT_ALIGNMENT && bitpos % GET_MODE_ALIGNMENT (mode) != 0))
+ {
+ rtx temp = expand_expr (exp, NULL_RTX, VOIDmode, 0);
+
+ /* Unless MODE is VOIDmode or BLKmode, convert TEMP to
+ MODE. */
+ if (mode != VOIDmode && mode != BLKmode
+ && mode != TYPE_MODE (TREE_TYPE (exp)))
+ temp = convert_modes (mode, TYPE_MODE (TREE_TYPE (exp)), temp, 1);
+
+ /* Store the value in the bitfield. */
+ store_bit_field (target, bitsize, bitpos, mode, temp, align, total_size);
+ if (value_mode != VOIDmode)
+ {
+ /* The caller wants an rtx for the value. */
+ /* If possible, avoid refetching from the bitfield itself. */
+ if (width_mask != 0
+ && ! (GET_CODE (target) == MEM && MEM_VOLATILE_P (target)))
+ {
+ tree count;
+ enum machine_mode tmode;
+
+ if (unsignedp)
+ return expand_and (temp, GEN_INT (width_mask), NULL_RTX);
+ tmode = GET_MODE (temp);
+ if (tmode == VOIDmode)
+ tmode = value_mode;
+ count = build_int_2 (GET_MODE_BITSIZE (tmode) - bitsize, 0);
+ temp = expand_shift (LSHIFT_EXPR, tmode, temp, count, 0, 0);
+ return expand_shift (RSHIFT_EXPR, tmode, temp, count, 0, 0);
+ }
+ return extract_bit_field (target, bitsize, bitpos, unsignedp,
+ NULL_RTX, value_mode, 0, align,
+ total_size);
+ }
+ return const0_rtx;
+ }
+ else
+ {
+ rtx addr = XEXP (target, 0);
+ rtx to_rtx;
+
+ /* If a value is wanted, it must be the lhs;
+ so make the address stable for multiple use. */
+
+ if (value_mode != VOIDmode && GET_CODE (addr) != REG
+ && ! CONSTANT_ADDRESS_P (addr)
+ /* A frame-pointer reference is already stable. */
+ && ! (GET_CODE (addr) == PLUS
+ && GET_CODE (XEXP (addr, 1)) == CONST_INT
+ && (XEXP (addr, 0) == virtual_incoming_args_rtx
+ || XEXP (addr, 0) == virtual_stack_vars_rtx)))
+ addr = copy_to_reg (addr);
+
+ /* Now build a reference to just the desired component. */
+
+ to_rtx = change_address (target, mode,
+ plus_constant (addr, (bitpos / BITS_PER_UNIT)));
+ MEM_IN_STRUCT_P (to_rtx) = 1;
+
+ return store_expr (exp, to_rtx, value_mode != VOIDmode);
+ }
+}
+
+/* Return true if any object containing the innermost array is an unaligned
+ packed structure field. */
+
+static int
+get_inner_unaligned_p (exp)
+ tree exp;
+{
+ int needed_alignment = TYPE_ALIGN (TREE_TYPE (exp));
+
+ while (1)
+ {
+ if (TREE_CODE (exp) == COMPONENT_REF || TREE_CODE (exp) == BIT_FIELD_REF)
+ {
+ if (TYPE_ALIGN (TREE_TYPE (TREE_OPERAND (exp, 0)))
+ < needed_alignment)
+ return 1;
+ }
+ else if (TREE_CODE (exp) != ARRAY_REF
+ && TREE_CODE (exp) != NON_LVALUE_EXPR
+ && ! ((TREE_CODE (exp) == NOP_EXPR
+ || TREE_CODE (exp) == CONVERT_EXPR)
+ && (TYPE_MODE (TREE_TYPE (exp))
+ == TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0))))))
+ break;
+
+ exp = TREE_OPERAND (exp, 0);
+ }
+
+ return 0;
+}
+
+/* Given an expression EXP that may be a COMPONENT_REF, a BIT_FIELD_REF,
+ or an ARRAY_REF, look for nested COMPONENT_REFs, BIT_FIELD_REFs, or
+ ARRAY_REFs and find the ultimate containing object, which we return.
+
+ We set *PBITSIZE to the size in bits that we want, *PBITPOS to the
+ bit position, and *PUNSIGNEDP to the signedness of the field.
+ If the position of the field is variable, we store a tree
+ giving the variable offset (in units) in *POFFSET.
+ This offset is in addition to the bit position.
+ If the position is not variable, we store 0 in *POFFSET.
+
+ If any of the extraction expressions is volatile,
+ we store 1 in *PVOLATILEP. Otherwise we don't change that.
+
+ If the field is a bit-field, *PMODE is set to VOIDmode. Otherwise, it
+ is a mode that can be used to access the field. In that case, *PBITSIZE
+ is redundant.
+
+ If the field describes a variable-sized object, *PMODE is set to
+ VOIDmode and *PBITSIZE is set to -1. An access cannot be made in
+ this case, but the address of the object can be found. */
+
+tree
+get_inner_reference (exp, pbitsize, pbitpos, poffset, pmode,
+ punsignedp, pvolatilep)
+ tree exp;
+ int *pbitsize;
+ int *pbitpos;
+ tree *poffset;
+ enum machine_mode *pmode;
+ int *punsignedp;
+ int *pvolatilep;
+{
+ tree orig_exp = exp;
+ tree size_tree = 0;
+ enum machine_mode mode = VOIDmode;
+ tree offset = integer_zero_node;
+
+ if (TREE_CODE (exp) == COMPONENT_REF)
+ {
+ size_tree = DECL_SIZE (TREE_OPERAND (exp, 1));
+ if (! DECL_BIT_FIELD (TREE_OPERAND (exp, 1)))
+ mode = DECL_MODE (TREE_OPERAND (exp, 1));
+ *punsignedp = TREE_UNSIGNED (TREE_OPERAND (exp, 1));
+ }
+ else if (TREE_CODE (exp) == BIT_FIELD_REF)
+ {
+ size_tree = TREE_OPERAND (exp, 1);
+ *punsignedp = TREE_UNSIGNED (exp);
+ }
+ else
+ {
+ mode = TYPE_MODE (TREE_TYPE (exp));
+ *pbitsize = GET_MODE_BITSIZE (mode);
+ *punsignedp = TREE_UNSIGNED (TREE_TYPE (exp));
+ }
+
+ if (size_tree)
+ {
+ if (TREE_CODE (size_tree) != INTEGER_CST)
+ mode = BLKmode, *pbitsize = -1;
+ else
+ *pbitsize = TREE_INT_CST_LOW (size_tree);
+ }
+
+ /* Compute cumulative bit-offset for nested component-refs and array-refs,
+ and find the ultimate containing object. */
+
+ *pbitpos = 0;
+
+ while (1)
+ {
+ if (TREE_CODE (exp) == COMPONENT_REF || TREE_CODE (exp) == BIT_FIELD_REF)
+ {
+ tree pos = (TREE_CODE (exp) == COMPONENT_REF
+ ? DECL_FIELD_BITPOS (TREE_OPERAND (exp, 1))
+ : TREE_OPERAND (exp, 2));
+
+ /* If this field hasn't been filled in yet, don't go
+ past it. This should only happen when folding expressions
+ made during type construction. */
+ if (pos == 0)
+ break;
+
+ if (TREE_CODE (pos) == PLUS_EXPR)
+ {
+ tree constant, var;
+ if (TREE_CODE (TREE_OPERAND (pos, 0)) == INTEGER_CST)
+ {
+ constant = TREE_OPERAND (pos, 0);
+ var = TREE_OPERAND (pos, 1);
+ }
+ else if (TREE_CODE (TREE_OPERAND (pos, 1)) == INTEGER_CST)
+ {
+ constant = TREE_OPERAND (pos, 1);
+ var = TREE_OPERAND (pos, 0);
+ }
+ else
+ abort ();
+
+ *pbitpos += TREE_INT_CST_LOW (constant);
+ offset = size_binop (PLUS_EXPR, offset,
+ size_binop (FLOOR_DIV_EXPR, var,
+ size_int (BITS_PER_UNIT)));
+ }
+ else if (TREE_CODE (pos) == INTEGER_CST)
+ *pbitpos += TREE_INT_CST_LOW (pos);
+ else
+ {
+ /* Assume here that the offset is a multiple of a unit.
+ If not, there should be an explicitly added constant. */
+ offset = size_binop (PLUS_EXPR, offset,
+ size_binop (FLOOR_DIV_EXPR, pos,
+ size_int (BITS_PER_UNIT)));
+ }
+ }
+
+ else if (TREE_CODE (exp) == ARRAY_REF)
+ {
+ /* This code is based on the code in case ARRAY_REF in expand_expr
+ below. We assume here that the size of an array element is
+ always an integral multiple of BITS_PER_UNIT. */
+
+ tree index = TREE_OPERAND (exp, 1);
+ tree domain = TYPE_DOMAIN (TREE_TYPE (TREE_OPERAND (exp, 0)));
+ tree low_bound
+ = domain ? TYPE_MIN_VALUE (domain) : integer_zero_node;
+ tree index_type = TREE_TYPE (index);
+
+ if (! integer_zerop (low_bound))
+ index = fold (build (MINUS_EXPR, index_type, index, low_bound));
+
+ if (TYPE_PRECISION (index_type) != POINTER_SIZE)
+ {
+ index = convert (type_for_size (POINTER_SIZE, 0), index);
+ index_type = TREE_TYPE (index);
+ }
+
+ index = fold (build (MULT_EXPR, index_type, index,
+ TYPE_SIZE (TREE_TYPE (exp))));
+
+ if (TREE_CODE (index) == INTEGER_CST
+ && TREE_INT_CST_HIGH (index) == 0)
+ *pbitpos += TREE_INT_CST_LOW (index);
+ else
+ offset = size_binop (PLUS_EXPR, offset,
+ size_binop (FLOOR_DIV_EXPR, index,
+ size_int (BITS_PER_UNIT)));
+ }
+ else if (TREE_CODE (exp) != NON_LVALUE_EXPR
+ && ! ((TREE_CODE (exp) == NOP_EXPR
+ || TREE_CODE (exp) == CONVERT_EXPR)
+ && (TYPE_MODE (TREE_TYPE (exp))
+ == TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0))))))
+ break;
+
+ /* If any reference in the chain is volatile, the effect is volatile. */
+ if (TREE_THIS_VOLATILE (exp))
+ *pvolatilep = 1;
+ exp = TREE_OPERAND (exp, 0);
+ }
+
+ /* If this was a bit-field, see if there is a mode that allows direct
+ access in case EXP is in memory. */
+ if (mode == VOIDmode && *pbitsize != 0 && *pbitpos % *pbitsize == 0)
+ {
+ mode = mode_for_size (*pbitsize, MODE_INT, 0);
+ if (mode == BLKmode)
+ mode = VOIDmode;
+ }
+
+ if (integer_zerop (offset))
+ offset = 0;
+
+ if (offset != 0 && contains_placeholder_p (offset))
+ offset = build (WITH_RECORD_EXPR, sizetype, offset, orig_exp);
+
+ *pmode = mode;
+ *poffset = offset;
+ return exp;
+}
+
+/* Given an rtx VALUE that may contain additions and multiplications,
+ return an equivalent value that just refers to a register or memory.
+ This is done by generating instructions to perform the arithmetic
+ and returning a pseudo-register containing the value.
+
+ The returned value may be a REG, SUBREG, MEM or constant. */
+
+rtx
+force_operand (value, target)
+ rtx value, target;
+{
+ register optab binoptab = 0;
+ /* Use a temporary to force order of execution of calls to
+ `force_operand'. */
+ rtx tmp;
+ register rtx op2;
+ /* Use subtarget as the target for operand 0 of a binary operation. */
+ register rtx subtarget = (target != 0 && GET_CODE (target) == REG ? target : 0);
+
+ if (GET_CODE (value) == PLUS)
+ binoptab = add_optab;
+ else if (GET_CODE (value) == MINUS)
+ binoptab = sub_optab;
+ else if (GET_CODE (value) == MULT)
+ {
+ op2 = XEXP (value, 1);
+ if (!CONSTANT_P (op2)
+ && !(GET_CODE (op2) == REG && op2 != subtarget))
+ subtarget = 0;
+ tmp = force_operand (XEXP (value, 0), subtarget);
+ return expand_mult (GET_MODE (value), tmp,
+ force_operand (op2, NULL_RTX),
+ target, 0);
+ }
+
+ if (binoptab)
+ {
+ op2 = XEXP (value, 1);
+ if (!CONSTANT_P (op2)
+ && !(GET_CODE (op2) == REG && op2 != subtarget))
+ subtarget = 0;
+ if (binoptab == sub_optab && GET_CODE (op2) == CONST_INT)
+ {
+ binoptab = add_optab;
+ op2 = negate_rtx (GET_MODE (value), op2);
+ }
+
+ /* Check for an addition with OP2 a constant integer and our first
+ operand a PLUS of a virtual register and something else. In that
+ case, we want to emit the sum of the virtual register and the
+ constant first and then add the other value. This allows virtual
+ register instantiation to simply modify the constant rather than
+ creating another one around this addition. */
+ if (binoptab == add_optab && GET_CODE (op2) == CONST_INT
+ && GET_CODE (XEXP (value, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (value, 0), 0)) == REG
+ && REGNO (XEXP (XEXP (value, 0), 0)) >= FIRST_VIRTUAL_REGISTER
+ && REGNO (XEXP (XEXP (value, 0), 0)) <= LAST_VIRTUAL_REGISTER)
+ {
+ rtx temp = expand_binop (GET_MODE (value), binoptab,
+ XEXP (XEXP (value, 0), 0), op2,
+ subtarget, 0, OPTAB_LIB_WIDEN);
+ return expand_binop (GET_MODE (value), binoptab, temp,
+ force_operand (XEXP (XEXP (value, 0), 1), 0),
+ target, 0, OPTAB_LIB_WIDEN);
+ }
+
+ tmp = force_operand (XEXP (value, 0), subtarget);
+ return expand_binop (GET_MODE (value), binoptab, tmp,
+ force_operand (op2, NULL_RTX),
+ target, 0, OPTAB_LIB_WIDEN);
+ /* We give UNSIGNEDP = 0 to expand_binop
+ because the only operations we are expanding here are signed ones. */
+ }
+ return value;
+}
+
+/* Subroutine of expand_expr:
+ save the non-copied parts (LIST) of an expr (LHS), and return a list
+ which can restore these values to their previous values,
+ should something modify their storage. */
+
+static tree
+save_noncopied_parts (lhs, list)
+ tree lhs;
+ tree list;
+{
+ tree tail;
+ tree parts = 0;
+
+ for (tail = list; tail; tail = TREE_CHAIN (tail))
+ if (TREE_CODE (TREE_VALUE (tail)) == TREE_LIST)
+ parts = chainon (parts, save_noncopied_parts (lhs, TREE_VALUE (tail)));
+ else
+ {
+ tree part = TREE_VALUE (tail);
+ tree part_type = TREE_TYPE (part);
+ tree to_be_saved = build (COMPONENT_REF, part_type, lhs, part);
+ rtx target = assign_stack_temp (TYPE_MODE (part_type),
+ int_size_in_bytes (part_type), 0);
+ if (! memory_address_p (TYPE_MODE (part_type), XEXP (target, 0)))
+ target = change_address (target, TYPE_MODE (part_type), NULL_RTX);
+ parts = tree_cons (to_be_saved,
+ build (RTL_EXPR, part_type, NULL_TREE,
+ (tree) target),
+ parts);
+ store_expr (TREE_PURPOSE (parts), RTL_EXPR_RTL (TREE_VALUE (parts)), 0);
+ }
+ return parts;
+}
+
+/* Subroutine of expand_expr:
+ record the non-copied parts (LIST) of an expr (LHS), and return a list
+ which specifies the initial values of these parts. */
+
+static tree
+init_noncopied_parts (lhs, list)
+ tree lhs;
+ tree list;
+{
+ tree tail;
+ tree parts = 0;
+
+ for (tail = list; tail; tail = TREE_CHAIN (tail))
+ if (TREE_CODE (TREE_VALUE (tail)) == TREE_LIST)
+ parts = chainon (parts, init_noncopied_parts (lhs, TREE_VALUE (tail)));
+ else
+ {
+ tree part = TREE_VALUE (tail);
+ tree part_type = TREE_TYPE (part);
+ tree to_be_initialized = build (COMPONENT_REF, part_type, lhs, part);
+ parts = tree_cons (TREE_PURPOSE (tail), to_be_initialized, parts);
+ }
+ return parts;
+}
+
+/* Subroutine of expand_expr: return nonzero iff there is no way that
+ EXP can reference X, which is being modified. */
+
+static int
+safe_from_p (x, exp)
+ rtx x;
+ tree exp;
+{
+ rtx exp_rtl = 0;
+ int i, nops;
+
+ if (x == 0)
+ return 1;
+
+ /* If this is a subreg of a hard register, declare it unsafe, otherwise,
+ find the underlying pseudo. */
+ if (GET_CODE (x) == SUBREG)
+ {
+ x = SUBREG_REG (x);
+ if (GET_CODE (x) == REG && REGNO (x) < FIRST_PSEUDO_REGISTER)
+ return 0;
+ }
+
+ /* If X is a location in the outgoing argument area, it is always safe. */
+ if (GET_CODE (x) == MEM
+ && (XEXP (x, 0) == virtual_outgoing_args_rtx
+ || (GET_CODE (XEXP (x, 0)) == PLUS
+ && XEXP (XEXP (x, 0), 0) == virtual_outgoing_args_rtx)))
+ return 1;
+
+ switch (TREE_CODE_CLASS (TREE_CODE (exp)))
+ {
+ case 'd':
+ exp_rtl = DECL_RTL (exp);
+ break;
+
+ case 'c':
+ return 1;
+
+ case 'x':
+ if (TREE_CODE (exp) == TREE_LIST)
+ return ((TREE_VALUE (exp) == 0
+ || safe_from_p (x, TREE_VALUE (exp)))
+ && (TREE_CHAIN (exp) == 0
+ || safe_from_p (x, TREE_CHAIN (exp))));
+ else
+ return 0;
+
+ case '1':
+ return safe_from_p (x, TREE_OPERAND (exp, 0));
+
+ case '2':
+ case '<':
+ return (safe_from_p (x, TREE_OPERAND (exp, 0))
+ && safe_from_p (x, TREE_OPERAND (exp, 1)));
+
+ case 'e':
+ case 'r':
+ /* Now do code-specific tests. EXP_RTL is set to any rtx we find in
+ the expression. If it is set, we conflict iff we are that rtx or
+ both are in memory. Otherwise, we check all operands of the
+ expression recursively. */
+
+ switch (TREE_CODE (exp))
+ {
+ case ADDR_EXPR:
+ return (staticp (TREE_OPERAND (exp, 0))
+ || safe_from_p (x, TREE_OPERAND (exp, 0)));
+
+ case INDIRECT_REF:
+ if (GET_CODE (x) == MEM)
+ return 0;
+ break;
+
+ case CALL_EXPR:
+ exp_rtl = CALL_EXPR_RTL (exp);
+ if (exp_rtl == 0)
+ {
+ /* Assume that the call will clobber all hard registers and
+ all of memory. */
+ if ((GET_CODE (x) == REG && REGNO (x) < FIRST_PSEUDO_REGISTER)
+ || GET_CODE (x) == MEM)
+ return 0;
+ }
+
+ break;
+
+ case RTL_EXPR:
+ exp_rtl = RTL_EXPR_RTL (exp);
+ if (exp_rtl == 0)
+ /* We don't know what this can modify. */
+ return 0;
+
+ break;
+
+ case WITH_CLEANUP_EXPR:
+ exp_rtl = RTL_EXPR_RTL (exp);
+ break;
+
+ case CLEANUP_POINT_EXPR:
+ return safe_from_p (x, TREE_OPERAND (exp, 0));
+
+ case SAVE_EXPR:
+ exp_rtl = SAVE_EXPR_RTL (exp);
+ break;
+
+ case BIND_EXPR:
+ /* The only operand we look at is operand 1. The rest aren't
+ part of the expression. */
+ return safe_from_p (x, TREE_OPERAND (exp, 1));
+
+ case METHOD_CALL_EXPR:
+ /* This takes a rtx argument, but shouldn't appear here. */
+ abort ();
+ }
+
+ /* If we have an rtx, we do not need to scan our operands. */
+ if (exp_rtl)
+ break;
+
+ nops = tree_code_length[(int) TREE_CODE (exp)];
+ for (i = 0; i < nops; i++)
+ if (TREE_OPERAND (exp, i) != 0
+ && ! safe_from_p (x, TREE_OPERAND (exp, i)))
+ return 0;
+ }
+
+ /* If we have an rtl, find any enclosed object. Then see if we conflict
+ with it. */
+ if (exp_rtl)
+ {
+ if (GET_CODE (exp_rtl) == SUBREG)
+ {
+ exp_rtl = SUBREG_REG (exp_rtl);
+ if (GET_CODE (exp_rtl) == REG
+ && REGNO (exp_rtl) < FIRST_PSEUDO_REGISTER)
+ return 0;
+ }
+
+ /* If the rtl is X, then it is not safe. Otherwise, it is unless both
+ are memory and EXP is not readonly. */
+ return ! (rtx_equal_p (x, exp_rtl)
+ || (GET_CODE (x) == MEM && GET_CODE (exp_rtl) == MEM
+ && ! TREE_READONLY (exp)));
+ }
+
+ /* If we reach here, it is safe. */
+ return 1;
+}
+
+/* Subroutine of expand_expr: return nonzero iff EXP is an
+ expression whose type is statically determinable. */
+
+static int
+fixed_type_p (exp)
+ tree exp;
+{
+ if (TREE_CODE (exp) == PARM_DECL
+ || TREE_CODE (exp) == VAR_DECL
+ || TREE_CODE (exp) == CALL_EXPR || TREE_CODE (exp) == TARGET_EXPR
+ || TREE_CODE (exp) == COMPONENT_REF
+ || TREE_CODE (exp) == ARRAY_REF)
+ return 1;
+ return 0;
+}
+
+/* expand_expr: generate code for computing expression EXP.
+ An rtx for the computed value is returned. The value is never null.
+ In the case of a void EXP, const0_rtx is returned.
+
+ The value may be stored in TARGET if TARGET is nonzero.
+ TARGET is just a suggestion; callers must assume that
+ the rtx returned may not be the same as TARGET.
+
+ If TARGET is CONST0_RTX, it means that the value will be ignored.
+
+ If TMODE is not VOIDmode, it suggests generating the
+ result in mode TMODE. But this is done only when convenient.
+ Otherwise, TMODE is ignored and the value generated in its natural mode.
+ TMODE is just a suggestion; callers must assume that
+ the rtx returned may not have mode TMODE.
+
+ Note that TARGET may have neither TMODE nor MODE. In that case, it
+ probably will not be used.
+
+ If MODIFIER is EXPAND_SUM then when EXP is an addition
+ we can return an rtx of the form (MULT (REG ...) (CONST_INT ...))
+ or a nest of (PLUS ...) and (MINUS ...) where the terms are
+ products as above, or REG or MEM, or constant.
+ Ordinarily in such cases we would output mul or add instructions
+ and then return a pseudo reg containing the sum.
+
+ EXPAND_INITIALIZER is much like EXPAND_SUM except that
+ it also marks a label as absolutely required (it can't be dead).
+ It also makes a ZERO_EXTEND or SIGN_EXTEND instead of emitting extend insns.
+ This is used for outputting expressions used in initializers.
+
+ EXPAND_CONST_ADDRESS says that it is okay to return a MEM
+ with a constant address even if that address is not normally legitimate.
+ EXPAND_INITIALIZER and EXPAND_SUM also have this effect. */
+
+rtx
+expand_expr (exp, target, tmode, modifier)
+ register tree exp;
+ rtx target;
+ enum machine_mode tmode;
+ enum expand_modifier modifier;
+{
+ /* Chain of pending expressions for PLACEHOLDER_EXPR to replace.
+ This is static so it will be accessible to our recursive callees. */
+ static tree placeholder_list = 0;
+ register rtx op0, op1, temp;
+ tree type = TREE_TYPE (exp);
+ int unsignedp = TREE_UNSIGNED (type);
+ register enum machine_mode mode = TYPE_MODE (type);
+ register enum tree_code code = TREE_CODE (exp);
+ optab this_optab;
+ /* Use subtarget as the target for operand 0 of a binary operation. */
+ rtx subtarget = (target != 0 && GET_CODE (target) == REG ? target : 0);
+ rtx original_target = target;
+ /* Maybe defer this until sure not doing bytecode? */
+ int ignore = (target == const0_rtx
+ || ((code == NON_LVALUE_EXPR || code == NOP_EXPR
+ || code == CONVERT_EXPR || code == REFERENCE_EXPR
+ || code == COND_EXPR)
+ && TREE_CODE (type) == VOID_TYPE));
+ tree context;
+
+
+ if (output_bytecode && modifier != EXPAND_INITIALIZER)
+ {
+ bc_expand_expr (exp);
+ return NULL;
+ }
+
+ /* Don't use hard regs as subtargets, because the combiner
+ can only handle pseudo regs. */
+ if (subtarget && REGNO (subtarget) < FIRST_PSEUDO_REGISTER)
+ subtarget = 0;
+ /* Avoid subtargets inside loops,
+ since they hide some invariant expressions. */
+ if (preserve_subexpressions_p ())
+ subtarget = 0;
+
+ /* If we are going to ignore this result, we need only do something
+ if there is a side-effect somewhere in the expression. If there
+ is, short-circuit the most common cases here. Note that we must
+ not call expand_expr with anything but const0_rtx in case this
+ is an initial expansion of a size that contains a PLACEHOLDER_EXPR. */
+
+ if (ignore)
+ {
+ if (! TREE_SIDE_EFFECTS (exp))
+ return const0_rtx;
+
+ /* Ensure we reference a volatile object even if value is ignored. */
+ if (TREE_THIS_VOLATILE (exp)
+ && TREE_CODE (exp) != FUNCTION_DECL
+ && mode != VOIDmode && mode != BLKmode)
+ {
+ temp = expand_expr (exp, NULL_RTX, VOIDmode, modifier);
+ if (GET_CODE (temp) == MEM)
+ temp = copy_to_reg (temp);
+ return const0_rtx;
+ }
+
+ if (TREE_CODE_CLASS (code) == '1')
+ return expand_expr (TREE_OPERAND (exp, 0), const0_rtx,
+ VOIDmode, modifier);
+ else if (TREE_CODE_CLASS (code) == '2'
+ || TREE_CODE_CLASS (code) == '<')
+ {
+ expand_expr (TREE_OPERAND (exp, 0), const0_rtx, VOIDmode, modifier);
+ expand_expr (TREE_OPERAND (exp, 1), const0_rtx, VOIDmode, modifier);
+ return const0_rtx;
+ }
+ else if ((code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR)
+ && ! TREE_SIDE_EFFECTS (TREE_OPERAND (exp, 1)))
+ /* If the second operand has no side effects, just evaluate
+ the first. */
+ return expand_expr (TREE_OPERAND (exp, 0), const0_rtx,
+ VOIDmode, modifier);
+
+ target = 0;
+ }
+
+ /* If will do cse, generate all results into pseudo registers
+ since 1) that allows cse to find more things
+ and 2) otherwise cse could produce an insn the machine
+ cannot support. */
+
+ if (! cse_not_expected && mode != BLKmode && target
+ && (GET_CODE (target) != REG || REGNO (target) < FIRST_PSEUDO_REGISTER))
+ target = subtarget;
+
+ switch (code)
+ {
+ case LABEL_DECL:
+ {
+ tree function = decl_function_context (exp);
+ /* Handle using a label in a containing function. */
+ if (function != current_function_decl && function != 0)
+ {
+ struct function *p = find_function_data (function);
+ /* Allocate in the memory associated with the function
+ that the label is in. */
+ push_obstacks (p->function_obstack,
+ p->function_maybepermanent_obstack);
+
+ p->forced_labels = gen_rtx (EXPR_LIST, VOIDmode,
+ label_rtx (exp), p->forced_labels);
+ pop_obstacks ();
+ }
+ else if (modifier == EXPAND_INITIALIZER)
+ forced_labels = gen_rtx (EXPR_LIST, VOIDmode,
+ label_rtx (exp), forced_labels);
+ temp = gen_rtx (MEM, FUNCTION_MODE,
+ gen_rtx (LABEL_REF, Pmode, label_rtx (exp)));
+ if (function != current_function_decl && function != 0)
+ LABEL_REF_NONLOCAL_P (XEXP (temp, 0)) = 1;
+ return temp;
+ }
+
+ case PARM_DECL:
+ if (DECL_RTL (exp) == 0)
+ {
+ error_with_decl (exp, "prior parameter's size depends on `%s'");
+ return CONST0_RTX (mode);
+ }
+
+ /* ... fall through ... */
+
+ case VAR_DECL:
+ /* If a static var's type was incomplete when the decl was written,
+ but the type is complete now, lay out the decl now. */
+ if (DECL_SIZE (exp) == 0 && TYPE_SIZE (TREE_TYPE (exp)) != 0
+ && (TREE_STATIC (exp) || DECL_EXTERNAL (exp)))
+ {
+ push_obstacks_nochange ();
+ end_temporary_allocation ();
+ layout_decl (exp, 0);
+ PUT_MODE (DECL_RTL (exp), DECL_MODE (exp));
+ pop_obstacks ();
+ }
+
+ /* ... fall through ... */
+
+ case FUNCTION_DECL:
+ case RESULT_DECL:
+ if (DECL_RTL (exp) == 0)
+ abort ();
+
+ /* Ensure variable marked as used even if it doesn't go through
+ a parser. If it hasn't be used yet, write out an external
+ definition. */
+ if (! TREE_USED (exp))
+ {
+ assemble_external (exp);
+ TREE_USED (exp) = 1;
+ }
+
+ /* Handle variables inherited from containing functions. */
+ context = decl_function_context (exp);
+
+ /* We treat inline_function_decl as an alias for the current function
+ because that is the inline function whose vars, types, etc.
+ are being merged into the current function.
+ See expand_inline_function. */
+
+ if (context != 0 && context != current_function_decl
+ && context != inline_function_decl
+ /* If var is static, we don't need a static chain to access it. */
+ && ! (GET_CODE (DECL_RTL (exp)) == MEM
+ && CONSTANT_P (XEXP (DECL_RTL (exp), 0))))
+ {
+ rtx addr;
+
+ /* Mark as non-local and addressable. */
+ DECL_NONLOCAL (exp) = 1;
+ mark_addressable (exp);
+ if (GET_CODE (DECL_RTL (exp)) != MEM)
+ abort ();
+ addr = XEXP (DECL_RTL (exp), 0);
+ if (GET_CODE (addr) == MEM)
+ addr = gen_rtx (MEM, Pmode,
+ fix_lexical_addr (XEXP (addr, 0), exp));
+ else
+ addr = fix_lexical_addr (addr, exp);
+ return change_address (DECL_RTL (exp), mode, addr);
+ }
+
+ /* This is the case of an array whose size is to be determined
+ from its initializer, while the initializer is still being parsed.
+ See expand_decl. */
+
+ if (GET_CODE (DECL_RTL (exp)) == MEM
+ && GET_CODE (XEXP (DECL_RTL (exp), 0)) == REG)
+ return change_address (DECL_RTL (exp), GET_MODE (DECL_RTL (exp)),
+ XEXP (DECL_RTL (exp), 0));
+
+ /* If DECL_RTL is memory, we are in the normal case and either
+ the address is not valid or it is not a register and -fforce-addr
+ is specified, get the address into a register. */
+
+ if (GET_CODE (DECL_RTL (exp)) == MEM
+ && modifier != EXPAND_CONST_ADDRESS
+ && modifier != EXPAND_SUM
+ && modifier != EXPAND_INITIALIZER
+ && (! memory_address_p (DECL_MODE (exp), XEXP (DECL_RTL (exp), 0))
+ || (flag_force_addr
+ && GET_CODE (XEXP (DECL_RTL (exp), 0)) != REG)))
+ return change_address (DECL_RTL (exp), VOIDmode,
+ copy_rtx (XEXP (DECL_RTL (exp), 0)));
+
+ /* If the mode of DECL_RTL does not match that of the decl, it
+ must be a promoted value. We return a SUBREG of the wanted mode,
+ but mark it so that we know that it was already extended. */
+
+ if (GET_CODE (DECL_RTL (exp)) == REG
+ && GET_MODE (DECL_RTL (exp)) != mode)
+ {
+ /* Get the signedness used for this variable. Ensure we get the
+ same mode we got when the variable was declared. */
+ if (GET_MODE (DECL_RTL (exp))
+ != promote_mode (type, DECL_MODE (exp), &unsignedp, 0))
+ abort ();
+
+ temp = gen_rtx (SUBREG, mode, DECL_RTL (exp), 0);
+ SUBREG_PROMOTED_VAR_P (temp) = 1;
+ SUBREG_PROMOTED_UNSIGNED_P (temp) = unsignedp;
+ return temp;
+ }
+
+ return DECL_RTL (exp);
+
+ case INTEGER_CST:
+ return immed_double_const (TREE_INT_CST_LOW (exp),
+ TREE_INT_CST_HIGH (exp),
+ mode);
+
+ case CONST_DECL:
+ return expand_expr (DECL_INITIAL (exp), target, VOIDmode, 0);
+
+ case REAL_CST:
+ /* If optimized, generate immediate CONST_DOUBLE
+ which will be turned into memory by reload if necessary.
+
+ We used to force a register so that loop.c could see it. But
+ this does not allow gen_* patterns to perform optimizations with
+ the constants. It also produces two insns in cases like "x = 1.0;".
+ On most machines, floating-point constants are not permitted in
+ many insns, so we'd end up copying it to a register in any case.
+
+ Now, we do the copying in expand_binop, if appropriate. */
+ return immed_real_const (exp);
+
+ case COMPLEX_CST:
+ case STRING_CST:
+ if (! TREE_CST_RTL (exp))
+ output_constant_def (exp);
+
+ /* TREE_CST_RTL probably contains a constant address.
+ On RISC machines where a constant address isn't valid,
+ make some insns to get that address into a register. */
+ if (GET_CODE (TREE_CST_RTL (exp)) == MEM
+ && modifier != EXPAND_CONST_ADDRESS
+ && modifier != EXPAND_INITIALIZER
+ && modifier != EXPAND_SUM
+ && (! memory_address_p (mode, XEXP (TREE_CST_RTL (exp), 0))
+ || (flag_force_addr
+ && GET_CODE (XEXP (TREE_CST_RTL (exp), 0)) != REG)))
+ return change_address (TREE_CST_RTL (exp), VOIDmode,
+ copy_rtx (XEXP (TREE_CST_RTL (exp), 0)));
+ return TREE_CST_RTL (exp);
+
+ case SAVE_EXPR:
+ context = decl_function_context (exp);
+
+ /* We treat inline_function_decl as an alias for the current function
+ because that is the inline function whose vars, types, etc.
+ are being merged into the current function.
+ See expand_inline_function. */
+ if (context == current_function_decl || context == inline_function_decl)
+ context = 0;
+
+ /* If this is non-local, handle it. */
+ if (context)
+ {
+ temp = SAVE_EXPR_RTL (exp);
+ if (temp && GET_CODE (temp) == REG)
+ {
+ put_var_into_stack (exp);
+ temp = SAVE_EXPR_RTL (exp);
+ }
+ if (temp == 0 || GET_CODE (temp) != MEM)
+ abort ();
+ return change_address (temp, mode,
+ fix_lexical_addr (XEXP (temp, 0), exp));
+ }
+ if (SAVE_EXPR_RTL (exp) == 0)
+ {
+ if (mode == BLKmode)
+ {
+ temp
+ = assign_stack_temp (mode, int_size_in_bytes (type), 0);
+ MEM_IN_STRUCT_P (temp) = AGGREGATE_TYPE_P (type);
+ }
+ else
+ temp = gen_reg_rtx (promote_mode (type, mode, &unsignedp, 0));
+
+ SAVE_EXPR_RTL (exp) = temp;
+ if (!optimize && GET_CODE (temp) == REG)
+ save_expr_regs = gen_rtx (EXPR_LIST, VOIDmode, temp,
+ save_expr_regs);
+
+ /* If the mode of TEMP does not match that of the expression, it
+ must be a promoted value. We pass store_expr a SUBREG of the
+ wanted mode but mark it so that we know that it was already
+ extended. Note that `unsignedp' was modified above in
+ this case. */
+
+ if (GET_CODE (temp) == REG && GET_MODE (temp) != mode)
+ {
+ temp = gen_rtx (SUBREG, mode, SAVE_EXPR_RTL (exp), 0);
+ SUBREG_PROMOTED_VAR_P (temp) = 1;
+ SUBREG_PROMOTED_UNSIGNED_P (temp) = unsignedp;
+ }
+
+ store_expr (TREE_OPERAND (exp, 0), temp, 0);
+ }
+
+ /* If the mode of SAVE_EXPR_RTL does not match that of the expression, it
+ must be a promoted value. We return a SUBREG of the wanted mode,
+ but mark it so that we know that it was already extended. */
+
+ if (GET_CODE (SAVE_EXPR_RTL (exp)) == REG
+ && GET_MODE (SAVE_EXPR_RTL (exp)) != mode)
+ {
+ /* Compute the signedness and make the proper SUBREG. */
+ promote_mode (type, mode, &unsignedp, 0);
+ temp = gen_rtx (SUBREG, mode, SAVE_EXPR_RTL (exp), 0);
+ SUBREG_PROMOTED_VAR_P (temp) = 1;
+ SUBREG_PROMOTED_UNSIGNED_P (temp) = unsignedp;
+ return temp;
+ }
+
+ return SAVE_EXPR_RTL (exp);
+
+ case PLACEHOLDER_EXPR:
+ /* If there is an object on the head of the placeholder list,
+ see if some object in it's references is of type TYPE. For
+ further information, see tree.def. */
+ if (placeholder_list)
+ {
+ tree object;
+ tree old_list = placeholder_list;
+
+ for (object = TREE_PURPOSE (placeholder_list);
+ TREE_TYPE (object) != type
+ && (TREE_CODE_CLASS (TREE_CODE (object)) == 'r'
+ || TREE_CODE_CLASS (TREE_CODE (object)) == '1'
+ || TREE_CODE_CLASS (TREE_CODE (object)) == '2'
+ || TREE_CODE_CLASS (TREE_CODE (object)) == 'e');
+ object = TREE_OPERAND (object, 0))
+ ;
+
+ if (object && TREE_TYPE (object) == type)
+ {
+ /* Expand this object skipping the list entries before
+ it was found in case it is also a PLACEHOLDER_EXPR.
+ In that case, we want to translate it using subsequent
+ entries. */
+ placeholder_list = TREE_CHAIN (placeholder_list);
+ temp = expand_expr (object, original_target, tmode, modifier);
+ placeholder_list = old_list;
+ return temp;
+ }
+ }
+
+ /* We can't find the object or there was a missing WITH_RECORD_EXPR. */
+ abort ();
+
+ case WITH_RECORD_EXPR:
+ /* Put the object on the placeholder list, expand our first operand,
+ and pop the list. */
+ placeholder_list = tree_cons (TREE_OPERAND (exp, 1), NULL_TREE,
+ placeholder_list);
+ target = expand_expr (TREE_OPERAND (exp, 0), original_target,
+ tmode, modifier);
+ placeholder_list = TREE_CHAIN (placeholder_list);
+ return target;
+
+ case EXIT_EXPR:
+ expand_exit_loop_if_false (NULL_PTR,
+ invert_truthvalue (TREE_OPERAND (exp, 0)));
+ return const0_rtx;
+
+ case LOOP_EXPR:
+ push_temp_slots ();
+ expand_start_loop (1);
+ expand_expr_stmt (TREE_OPERAND (exp, 0));
+ expand_end_loop ();
+ pop_temp_slots ();
+
+ return const0_rtx;
+
+ case BIND_EXPR:
+ {
+ tree vars = TREE_OPERAND (exp, 0);
+ int vars_need_expansion = 0;
+
+ /* Need to open a binding contour here because
+ if there are any cleanups they most be contained here. */
+ expand_start_bindings (0);
+
+ /* Mark the corresponding BLOCK for output in its proper place. */
+ if (TREE_OPERAND (exp, 2) != 0
+ && ! TREE_USED (TREE_OPERAND (exp, 2)))
+ insert_block (TREE_OPERAND (exp, 2));
+
+ /* If VARS have not yet been expanded, expand them now. */
+ while (vars)
+ {
+ if (DECL_RTL (vars) == 0)
+ {
+ vars_need_expansion = 1;
+ expand_decl (vars);
+ }
+ expand_decl_init (vars);
+ vars = TREE_CHAIN (vars);
+ }
+
+ temp = expand_expr (TREE_OPERAND (exp, 1), target, tmode, modifier);
+
+ expand_end_bindings (TREE_OPERAND (exp, 0), 0, 0);
+
+ return temp;
+ }
+
+ case RTL_EXPR:
+ if (RTL_EXPR_SEQUENCE (exp) == const0_rtx)
+ abort ();
+ emit_insns (RTL_EXPR_SEQUENCE (exp));
+ RTL_EXPR_SEQUENCE (exp) = const0_rtx;
+ preserve_rtl_expr_result (RTL_EXPR_RTL (exp));
+ free_temps_for_rtl_expr (exp);
+ return RTL_EXPR_RTL (exp);
+
+ case CONSTRUCTOR:
+ /* If we don't need the result, just ensure we evaluate any
+ subexpressions. */
+ if (ignore)
+ {
+ tree elt;
+ for (elt = CONSTRUCTOR_ELTS (exp); elt; elt = TREE_CHAIN (elt))
+ expand_expr (TREE_VALUE (elt), const0_rtx, VOIDmode, 0);
+ return const0_rtx;
+ }
+
+ /* All elts simple constants => refer to a constant in memory. But
+ if this is a non-BLKmode mode, let it store a field at a time
+ since that should make a CONST_INT or CONST_DOUBLE when we
+ fold. Likewise, if we have a target we can use, it is best to
+ store directly into the target unless the type is large enough
+ that memcpy will be used. If we are making an initializer and
+ all operands are constant, put it in memory as well. */
+ else if ((TREE_STATIC (exp)
+ && ((mode == BLKmode
+ && ! (target != 0 && safe_from_p (target, exp)))
+ || TREE_ADDRESSABLE (exp)
+ || (TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
+ && (move_by_pieces_ninsns
+ (TREE_INT_CST_LOW (TYPE_SIZE (type)),
+ TYPE_ALIGN (type))
+ > MOVE_RATIO))))
+ || (modifier == EXPAND_INITIALIZER && TREE_CONSTANT (exp)))
+ {
+ rtx constructor = output_constant_def (exp);
+ if (modifier != EXPAND_CONST_ADDRESS
+ && modifier != EXPAND_INITIALIZER
+ && modifier != EXPAND_SUM
+ && (! memory_address_p (GET_MODE (constructor),
+ XEXP (constructor, 0))
+ || (flag_force_addr
+ && GET_CODE (XEXP (constructor, 0)) != REG)))
+ constructor = change_address (constructor, VOIDmode,
+ XEXP (constructor, 0));
+ return constructor;
+ }
+
+ else
+ {
+ if (target == 0 || ! safe_from_p (target, exp))
+ {
+ if (mode != BLKmode && ! TREE_ADDRESSABLE (exp))
+ target = gen_reg_rtx (tmode != VOIDmode ? tmode : mode);
+ else
+ {
+ target
+ = assign_stack_temp (mode, int_size_in_bytes (type), 0);
+ if (AGGREGATE_TYPE_P (type))
+ MEM_IN_STRUCT_P (target) = 1;
+ }
+ }
+ store_constructor (exp, target);
+ return target;
+ }
+
+ case INDIRECT_REF:
+ {
+ tree exp1 = TREE_OPERAND (exp, 0);
+ tree exp2;
+
+ /* A SAVE_EXPR as the address in an INDIRECT_EXPR is generated
+ for *PTR += ANYTHING where PTR is put inside the SAVE_EXPR.
+ This code has the same general effect as simply doing
+ expand_expr on the save expr, except that the expression PTR
+ is computed for use as a memory address. This means different
+ code, suitable for indexing, may be generated. */
+ if (TREE_CODE (exp1) == SAVE_EXPR
+ && SAVE_EXPR_RTL (exp1) == 0
+ && TREE_CODE (exp2 = TREE_OPERAND (exp1, 0)) != ERROR_MARK
+ && TYPE_MODE (TREE_TYPE (exp1)) == Pmode
+ && TYPE_MODE (TREE_TYPE (exp2)) == Pmode)
+ {
+ temp = expand_expr (TREE_OPERAND (exp1, 0), NULL_RTX,
+ VOIDmode, EXPAND_SUM);
+ op0 = memory_address (mode, temp);
+ op0 = copy_all_regs (op0);
+ SAVE_EXPR_RTL (exp1) = op0;
+ }
+ else
+ {
+ op0 = expand_expr (exp1, NULL_RTX, VOIDmode, EXPAND_SUM);
+ op0 = memory_address (mode, op0);
+ }
+
+ temp = gen_rtx (MEM, mode, op0);
+ /* If address was computed by addition,
+ mark this as an element of an aggregate. */
+ if (TREE_CODE (TREE_OPERAND (exp, 0)) == PLUS_EXPR
+ || (TREE_CODE (TREE_OPERAND (exp, 0)) == SAVE_EXPR
+ && TREE_CODE (TREE_OPERAND (TREE_OPERAND (exp, 0), 0)) == PLUS_EXPR)
+ || AGGREGATE_TYPE_P (TREE_TYPE (exp))
+ || (TREE_CODE (exp1) == ADDR_EXPR
+ && (exp2 = TREE_OPERAND (exp1, 0))
+ && AGGREGATE_TYPE_P (TREE_TYPE (exp2))))
+ MEM_IN_STRUCT_P (temp) = 1;
+ MEM_VOLATILE_P (temp) = TREE_THIS_VOLATILE (exp) | flag_volatile;
+#if 0 /* It is incorrect to set RTX_UNCHANGING_P here, because the fact that
+ a location is accessed through a pointer to const does not mean
+ that the value there can never change. */
+ RTX_UNCHANGING_P (temp) = TREE_READONLY (exp);
+#endif
+ return temp;
+ }
+
+ case ARRAY_REF:
+ if (TREE_CODE (TREE_TYPE (TREE_OPERAND (exp, 0))) != ARRAY_TYPE)
+ abort ();
+
+ {
+ tree array = TREE_OPERAND (exp, 0);
+ tree domain = TYPE_DOMAIN (TREE_TYPE (array));
+ tree low_bound = domain ? TYPE_MIN_VALUE (domain) : integer_zero_node;
+ tree index = TREE_OPERAND (exp, 1);
+ tree index_type = TREE_TYPE (index);
+ int i;
+
+ if (TREE_CODE (low_bound) != INTEGER_CST
+ && contains_placeholder_p (low_bound))
+ low_bound = build (WITH_RECORD_EXPR, sizetype, low_bound, exp);
+
+ /* Optimize the special-case of a zero lower bound.
+
+ We convert the low_bound to sizetype to avoid some problems
+ with constant folding. (E.g. suppose the lower bound is 1,
+ and its mode is QI. Without the conversion, (ARRAY
+ +(INDEX-(unsigned char)1)) becomes ((ARRAY+(-(unsigned char)1))
+ +INDEX), which becomes (ARRAY+255+INDEX). Oops!)
+
+ But sizetype isn't quite right either (especially if
+ the lowbound is negative). FIXME */
+
+ if (! integer_zerop (low_bound))
+ index = fold (build (MINUS_EXPR, index_type, index,
+ convert (sizetype, low_bound)));
+
+ if ((TREE_CODE (index) != INTEGER_CST
+ || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
+ && (! STRICT_ALIGNMENT || ! get_inner_unaligned_p (exp)))
+ {
+ /* Nonconstant array index or nonconstant element size, and
+ not an array in an unaligned (packed) structure field.
+ Generate the tree for *(&array+index) and expand that,
+ except do it in a language-independent way
+ and don't complain about non-lvalue arrays.
+ `mark_addressable' should already have been called
+ for any array for which this case will be reached. */
+
+ /* Don't forget the const or volatile flag from the array
+ element. */
+ tree variant_type = build_type_variant (type,
+ TREE_READONLY (exp),
+ TREE_THIS_VOLATILE (exp));
+ tree array_adr = build1 (ADDR_EXPR,
+ build_pointer_type (variant_type), array);
+ tree elt;
+ tree size = size_in_bytes (type);
+
+ /* Convert the integer argument to a type the same size as a
+ pointer so the multiply won't overflow spuriously. */
+ if (TYPE_PRECISION (index_type) != POINTER_SIZE)
+ index = convert (type_for_size (POINTER_SIZE, 0), index);
+
+ if (TREE_CODE (size) != INTEGER_CST
+ && contains_placeholder_p (size))
+ size = build (WITH_RECORD_EXPR, sizetype, size, exp);
+
+ /* Don't think the address has side effects
+ just because the array does.
+ (In some cases the address might have side effects,
+ and we fail to record that fact here. However, it should not
+ matter, since expand_expr should not care.) */
+ TREE_SIDE_EFFECTS (array_adr) = 0;
+
+ elt = build1 (INDIRECT_REF, type,
+ fold (build (PLUS_EXPR,
+ TYPE_POINTER_TO (variant_type),
+ array_adr,
+ fold (build (MULT_EXPR,
+ TYPE_POINTER_TO (variant_type),
+ index, size)))));
+
+ /* Volatility, etc., of new expression is same as old
+ expression. */
+ TREE_SIDE_EFFECTS (elt) = TREE_SIDE_EFFECTS (exp);
+ TREE_THIS_VOLATILE (elt) = TREE_THIS_VOLATILE (exp);
+ TREE_READONLY (elt) = TREE_READONLY (exp);
+
+ return expand_expr (elt, target, tmode, modifier);
+ }
+
+ /* Fold an expression like: "foo"[2].
+ This is not done in fold so it won't happen inside &. */
+
+ if (TREE_CODE (array) == STRING_CST
+ && TREE_CODE (index) == INTEGER_CST
+ && !TREE_INT_CST_HIGH (index)
+ && (i = TREE_INT_CST_LOW (index)) < TREE_STRING_LENGTH (array)
+ && GET_MODE_CLASS (mode) == MODE_INT)
+ return GEN_INT (TREE_STRING_POINTER (array)[i]);
+
+ /* If this is a constant index into a constant array,
+ just get the value from the array. Handle both the cases when
+ we have an explicit constructor and when our operand is a variable
+ that was declared const. */
+
+ if (TREE_CODE (array) == CONSTRUCTOR && ! TREE_SIDE_EFFECTS (array))
+ {
+ if (TREE_CODE (index) == INTEGER_CST
+ && TREE_INT_CST_HIGH (index) == 0)
+ {
+ tree elem = CONSTRUCTOR_ELTS (TREE_OPERAND (exp, 0));
+
+ i = TREE_INT_CST_LOW (index);
+ while (elem && i--)
+ elem = TREE_CHAIN (elem);
+ if (elem)
+ return expand_expr (fold (TREE_VALUE (elem)), target,
+ tmode, modifier);
+ }
+ }
+
+ else if (optimize >= 1
+ && TREE_READONLY (array) && ! TREE_SIDE_EFFECTS (array)
+ && TREE_CODE (array) == VAR_DECL && DECL_INITIAL (array)
+ && TREE_CODE (DECL_INITIAL (array)) != ERROR_MARK)
+ {
+ if (TREE_CODE (index) == INTEGER_CST
+ && TREE_INT_CST_HIGH (index) == 0)
+ {
+ tree init = DECL_INITIAL (array);
+
+ i = TREE_INT_CST_LOW (index);
+ if (TREE_CODE (init) == CONSTRUCTOR)
+ {
+ tree elem = CONSTRUCTOR_ELTS (init);
+
+ while (elem
+ && !tree_int_cst_equal (TREE_PURPOSE (elem), index))
+ elem = TREE_CHAIN (elem);
+ if (elem)
+ return expand_expr (fold (TREE_VALUE (elem)), target,
+ tmode, modifier);
+ }
+ else if (TREE_CODE (init) == STRING_CST
+ && i < TREE_STRING_LENGTH (init))
+ return GEN_INT (TREE_STRING_POINTER (init)[i]);
+ }
+ }
+ }
+
+ /* Treat array-ref with constant index as a component-ref. */
+
+ case COMPONENT_REF:
+ case BIT_FIELD_REF:
+ /* If the operand is a CONSTRUCTOR, we can just extract the
+ appropriate field if it is present. */
+ if (code != ARRAY_REF
+ && TREE_CODE (TREE_OPERAND (exp, 0)) == CONSTRUCTOR)
+ {
+ tree elt;
+
+ for (elt = CONSTRUCTOR_ELTS (TREE_OPERAND (exp, 0)); elt;
+ elt = TREE_CHAIN (elt))
+ if (TREE_PURPOSE (elt) == TREE_OPERAND (exp, 1))
+ return expand_expr (TREE_VALUE (elt), target, tmode, modifier);
+ }
+
+ {
+ enum machine_mode mode1;
+ int bitsize;
+ int bitpos;
+ tree offset;
+ int volatilep = 0;
+ tree tem = get_inner_reference (exp, &bitsize, &bitpos, &offset,
+ &mode1, &unsignedp, &volatilep);
+ int alignment;
+
+ /* If we got back the original object, something is wrong. Perhaps
+ we are evaluating an expression too early. In any event, don't
+ infinitely recurse. */
+ if (tem == exp)
+ abort ();
+
+ /* In some cases, we will be offsetting OP0's address by a constant.
+ So get it as a sum, if possible. If we will be using it
+ directly in an insn, we validate it. */
+ op0 = expand_expr (tem, NULL_RTX, VOIDmode, EXPAND_SUM);
+
+ /* If this is a constant, put it into a register if it is a
+ legitimate constant and memory if it isn't. */
+ if (CONSTANT_P (op0))
+ {
+ enum machine_mode mode = TYPE_MODE (TREE_TYPE (tem));
+ if (mode != BLKmode && LEGITIMATE_CONSTANT_P (op0))
+ op0 = force_reg (mode, op0);
+ else
+ op0 = validize_mem (force_const_mem (mode, op0));
+ }
+
+ alignment = TYPE_ALIGN (TREE_TYPE (tem)) / BITS_PER_UNIT;
+ if (offset != 0)
+ {
+ rtx offset_rtx = expand_expr (offset, NULL_RTX, VOIDmode, 0);
+
+ if (GET_CODE (op0) != MEM)
+ abort ();
+ op0 = change_address (op0, VOIDmode,
+ gen_rtx (PLUS, Pmode, XEXP (op0, 0),
+ force_reg (Pmode, offset_rtx)));
+ /* If we have a variable offset, the known alignment
+ is only that of the innermost structure containing the field.
+ (Actually, we could sometimes do better by using the
+ size of an element of the innermost array, but no need.) */
+ if (TREE_CODE (exp) == COMPONENT_REF
+ || TREE_CODE (exp) == BIT_FIELD_REF)
+ alignment = (TYPE_ALIGN (TREE_TYPE (TREE_OPERAND (exp, 0)))
+ / BITS_PER_UNIT);
+ }
+
+ /* Don't forget about volatility even if this is a bitfield. */
+ if (GET_CODE (op0) == MEM && volatilep && ! MEM_VOLATILE_P (op0))
+ {
+ op0 = copy_rtx (op0);
+ MEM_VOLATILE_P (op0) = 1;
+ }
+
+ /* In cases where an aligned union has an unaligned object
+ as a field, we might be extracting a BLKmode value from
+ an integer-mode (e.g., SImode) object. Handle this case
+ by doing the extract into an object as wide as the field
+ (which we know to be the width of a basic mode), then
+ storing into memory, and changing the mode to BLKmode. */
+ if (mode1 == VOIDmode
+ || (mode1 != BLKmode && ! direct_load[(int) mode1]
+ && modifier != EXPAND_CONST_ADDRESS
+ && modifier != EXPAND_SUM && modifier != EXPAND_INITIALIZER)
+ || GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG
+ /* If the field isn't aligned enough to fetch as a memref,
+ fetch it as a bit field. */
+ || (STRICT_ALIGNMENT
+ && TYPE_ALIGN (TREE_TYPE (tem)) < GET_MODE_ALIGNMENT (mode))
+ || (STRICT_ALIGNMENT && bitpos % GET_MODE_ALIGNMENT (mode) != 0))
+ {
+ enum machine_mode ext_mode = mode;
+
+ if (ext_mode == BLKmode)
+ ext_mode = mode_for_size (bitsize, MODE_INT, 1);
+
+ if (ext_mode == BLKmode)
+ abort ();
+
+ op0 = extract_bit_field (validize_mem (op0), bitsize, bitpos,
+ unsignedp, target, ext_mode, ext_mode,
+ alignment,
+ int_size_in_bytes (TREE_TYPE (tem)));
+ if (mode == BLKmode)
+ {
+ rtx new = assign_stack_temp (ext_mode,
+ bitsize / BITS_PER_UNIT, 0);
+
+ emit_move_insn (new, op0);
+ op0 = copy_rtx (new);
+ PUT_MODE (op0, BLKmode);
+ MEM_IN_STRUCT_P (op0) = 1;
+ }
+
+ return op0;
+ }
+
+ /* Get a reference to just this component. */
+ if (modifier == EXPAND_CONST_ADDRESS
+ || modifier == EXPAND_SUM || modifier == EXPAND_INITIALIZER)
+ op0 = gen_rtx (MEM, mode1, plus_constant (XEXP (op0, 0),
+ (bitpos / BITS_PER_UNIT)));
+ else
+ op0 = change_address (op0, mode1,
+ plus_constant (XEXP (op0, 0),
+ (bitpos / BITS_PER_UNIT)));
+ MEM_IN_STRUCT_P (op0) = 1;
+ MEM_VOLATILE_P (op0) |= volatilep;
+ if (mode == mode1 || mode1 == BLKmode || mode1 == tmode)
+ return op0;
+ if (target == 0)
+ target = gen_reg_rtx (tmode != VOIDmode ? tmode : mode);
+ convert_move (target, op0, unsignedp);
+ return target;
+ }
+
+ case OFFSET_REF:
+ {
+ tree base = build1 (ADDR_EXPR, type, TREE_OPERAND (exp, 0));
+ tree addr = build (PLUS_EXPR, type, base, TREE_OPERAND (exp, 1));
+ op0 = expand_expr (addr, NULL_RTX, VOIDmode, EXPAND_SUM);
+ temp = gen_rtx (MEM, mode, memory_address (mode, op0));
+ MEM_IN_STRUCT_P (temp) = 1;
+ MEM_VOLATILE_P (temp) = TREE_THIS_VOLATILE (exp);
+#if 0 /* It is incorrect to set RTX_UNCHANGING_P here, because the fact that
+ a location is accessed through a pointer to const does not mean
+ that the value there can never change. */
+ RTX_UNCHANGING_P (temp) = TREE_READONLY (exp);
+#endif
+ return temp;
+ }
+
+ /* Intended for a reference to a buffer of a file-object in Pascal.
+ But it's not certain that a special tree code will really be
+ necessary for these. INDIRECT_REF might work for them. */
+ case BUFFER_REF:
+ abort ();
+
+ case IN_EXPR:
+ {
+ /* Pascal set IN expression.
+
+ Algorithm:
+ rlo = set_low - (set_low%bits_per_word);
+ the_word = set [ (index - rlo)/bits_per_word ];
+ bit_index = index % bits_per_word;
+ bitmask = 1 << bit_index;
+ return !!(the_word & bitmask); */
+
+ tree set = TREE_OPERAND (exp, 0);
+ tree index = TREE_OPERAND (exp, 1);
+ int iunsignedp = TREE_UNSIGNED (TREE_TYPE (index));
+ tree set_type = TREE_TYPE (set);
+ tree set_low_bound = TYPE_MIN_VALUE (TYPE_DOMAIN (set_type));
+ tree set_high_bound = TYPE_MAX_VALUE (TYPE_DOMAIN (set_type));
+ rtx index_val = expand_expr (index, 0, VOIDmode, 0);
+ rtx lo_r = expand_expr (set_low_bound, 0, VOIDmode, 0);
+ rtx hi_r = expand_expr (set_high_bound, 0, VOIDmode, 0);
+ rtx setval = expand_expr (set, 0, VOIDmode, 0);
+ rtx setaddr = XEXP (setval, 0);
+ enum machine_mode index_mode = TYPE_MODE (TREE_TYPE (index));
+ rtx rlow;
+ rtx diff, quo, rem, addr, bit, result;
+
+ preexpand_calls (exp);
+
+ /* If domain is empty, answer is no. Likewise if index is constant
+ and out of bounds. */
+ if ((TREE_CODE (set_high_bound) == INTEGER_CST
+ && TREE_CODE (set_low_bound) == INTEGER_CST
+ && tree_int_cst_lt (set_high_bound, set_low_bound)
+ || (TREE_CODE (index) == INTEGER_CST
+ && TREE_CODE (set_low_bound) == INTEGER_CST
+ && tree_int_cst_lt (index, set_low_bound))
+ || (TREE_CODE (set_high_bound) == INTEGER_CST
+ && TREE_CODE (index) == INTEGER_CST
+ && tree_int_cst_lt (set_high_bound, index))))
+ return const0_rtx;
+
+ if (target == 0)
+ target = gen_reg_rtx (tmode != VOIDmode ? tmode : mode);
+
+ /* If we get here, we have to generate the code for both cases
+ (in range and out of range). */
+
+ op0 = gen_label_rtx ();
+ op1 = gen_label_rtx ();
+
+ if (! (GET_CODE (index_val) == CONST_INT
+ && GET_CODE (lo_r) == CONST_INT))
+ {
+ emit_cmp_insn (index_val, lo_r, LT, NULL_RTX,
+ GET_MODE (index_val), iunsignedp, 0);
+ emit_jump_insn (gen_blt (op1));
+ }
+
+ if (! (GET_CODE (index_val) == CONST_INT
+ && GET_CODE (hi_r) == CONST_INT))
+ {
+ emit_cmp_insn (index_val, hi_r, GT, NULL_RTX,
+ GET_MODE (index_val), iunsignedp, 0);
+ emit_jump_insn (gen_bgt (op1));
+ }
+
+ /* Calculate the element number of bit zero in the first word
+ of the set. */
+ if (GET_CODE (lo_r) == CONST_INT)
+ rlow = GEN_INT (INTVAL (lo_r)
+ & ~ ((HOST_WIDE_INT) 1 << BITS_PER_UNIT));
+ else
+ rlow = expand_binop (index_mode, and_optab, lo_r,
+ GEN_INT (~((HOST_WIDE_INT) 1 << BITS_PER_UNIT)),
+ NULL_RTX, iunsignedp, OPTAB_LIB_WIDEN);
+
+ diff = expand_binop (index_mode, sub_optab, index_val, rlow,
+ NULL_RTX, iunsignedp, OPTAB_LIB_WIDEN);
+
+ quo = expand_divmod (0, TRUNC_DIV_EXPR, index_mode, diff,
+ GEN_INT (BITS_PER_UNIT), NULL_RTX, iunsignedp);
+ rem = expand_divmod (1, TRUNC_MOD_EXPR, index_mode, index_val,
+ GEN_INT (BITS_PER_UNIT), NULL_RTX, iunsignedp);
+
+ addr = memory_address (byte_mode,
+ expand_binop (index_mode, add_optab, diff,
+ setaddr, NULL_RTX, iunsignedp,
+ OPTAB_LIB_WIDEN));
+
+ /* Extract the bit we want to examine */
+ bit = expand_shift (RSHIFT_EXPR, byte_mode,
+ gen_rtx (MEM, byte_mode, addr),
+ make_tree (TREE_TYPE (index), rem),
+ NULL_RTX, 1);
+ result = expand_binop (byte_mode, and_optab, bit, const1_rtx,
+ GET_MODE (target) == byte_mode ? target : 0,
+ 1, OPTAB_LIB_WIDEN);
+
+ if (result != target)
+ convert_move (target, result, 1);
+
+ /* Output the code to handle the out-of-range case. */
+ emit_jump (op0);
+ emit_label (op1);
+ emit_move_insn (target, const0_rtx);
+ emit_label (op0);
+ return target;
+ }
+
+ case WITH_CLEANUP_EXPR:
+ if (RTL_EXPR_RTL (exp) == 0)
+ {
+ RTL_EXPR_RTL (exp)
+ = expand_expr (TREE_OPERAND (exp, 0), target, tmode, modifier);
+ cleanups_this_call
+ = tree_cons (NULL_TREE, TREE_OPERAND (exp, 2), cleanups_this_call);
+ /* That's it for this cleanup. */
+ TREE_OPERAND (exp, 2) = 0;
+ (*interim_eh_hook) (NULL_TREE);
+ }
+ return RTL_EXPR_RTL (exp);
+
+ case CLEANUP_POINT_EXPR:
+ {
+ extern int temp_slot_level;
+ tree old_cleanups = cleanups_this_call;
+ int old_temp_level = target_temp_slot_level;
+ push_temp_slots ();
+ target_temp_slot_level = temp_slot_level;
+ op0 = expand_expr (TREE_OPERAND (exp, 0), target, VOIDmode, modifier);
+ expand_cleanups_to (old_cleanups);
+ preserve_temp_slots (op0);
+ free_temp_slots ();
+ pop_temp_slots ();
+ target_temp_slot_level = old_temp_level;
+ }
+ return op0;
+
+ case CALL_EXPR:
+ /* Check for a built-in function. */
+ if (TREE_CODE (TREE_OPERAND (exp, 0)) == ADDR_EXPR
+ && (TREE_CODE (TREE_OPERAND (TREE_OPERAND (exp, 0), 0))
+ == FUNCTION_DECL)
+ && DECL_BUILT_IN (TREE_OPERAND (TREE_OPERAND (exp, 0), 0)))
+ return expand_builtin (exp, target, subtarget, tmode, ignore);
+
+ /* If this call was expanded already by preexpand_calls,
+ just return the result we got. */
+ if (CALL_EXPR_RTL (exp) != 0)
+ return CALL_EXPR_RTL (exp);
+
+ return expand_call (exp, target, ignore);
+
+ case NON_LVALUE_EXPR:
+ case NOP_EXPR:
+ case CONVERT_EXPR:
+ case REFERENCE_EXPR:
+ if (mode == TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0))))
+ {
+ op0 = expand_expr (TREE_OPERAND (exp, 0), target, VOIDmode,
+ modifier);
+
+ /* If the signedness of the conversion differs and OP0 is
+ a promoted SUBREG, clear that indication since we now
+ have to do the proper extension. */
+ if (TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (exp, 0))) != unsignedp
+ && GET_CODE (op0) == SUBREG)
+ SUBREG_PROMOTED_VAR_P (op0) = 0;
+
+ return op0;
+ }
+
+ if (TREE_CODE (type) == UNION_TYPE)
+ {
+ tree valtype = TREE_TYPE (TREE_OPERAND (exp, 0));
+ if (target == 0)
+ {
+ if (mode == BLKmode)
+ {
+ if (TYPE_SIZE (type) == 0
+ || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
+ abort ();
+ target = assign_stack_temp (BLKmode,
+ (TREE_INT_CST_LOW (TYPE_SIZE (type))
+ + BITS_PER_UNIT - 1)
+ / BITS_PER_UNIT, 0);
+ }
+ else
+ target = gen_reg_rtx (tmode != VOIDmode ? tmode : mode);
+ }
+
+ if (GET_CODE (target) == MEM)
+ /* Store data into beginning of memory target. */
+ store_expr (TREE_OPERAND (exp, 0),
+ change_address (target, TYPE_MODE (valtype), 0), 0);
+
+ else if (GET_CODE (target) == REG)
+ /* Store this field into a union of the proper type. */
+ store_field (target, GET_MODE_BITSIZE (TYPE_MODE (valtype)), 0,
+ TYPE_MODE (valtype), TREE_OPERAND (exp, 0),
+ VOIDmode, 0, 1,
+ int_size_in_bytes (TREE_TYPE (TREE_OPERAND (exp, 0))));
+ else
+ abort ();
+
+ /* Return the entire union. */
+ return target;
+ }
+
+ op0 = expand_expr (TREE_OPERAND (exp, 0), NULL_RTX, mode, 0);
+ if (GET_MODE (op0) == mode)
+ return op0;
+
+ /* If OP0 is a constant, just convert it into the proper mode. */
+ if (CONSTANT_P (op0))
+ return
+ convert_modes (mode, TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0))),
+ op0, TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (exp, 0))));
+
+ if (modifier == EXPAND_INITIALIZER)
+ return gen_rtx (unsignedp ? ZERO_EXTEND : SIGN_EXTEND, mode, op0);
+
+ if (flag_force_mem && GET_CODE (op0) == MEM)
+ op0 = copy_to_reg (op0);
+
+ if (target == 0)
+ return
+ convert_to_mode (mode, op0,
+ TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (exp, 0))));
+ else
+ convert_move (target, op0,
+ TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (exp, 0))));
+ return target;
+
+ case PLUS_EXPR:
+ /* We come here from MINUS_EXPR when the second operand is a constant. */
+ plus_expr:
+ this_optab = add_optab;
+
+ /* If we are adding a constant, an RTL_EXPR that is sp, fp, or ap, and
+ something else, make sure we add the register to the constant and
+ then to the other thing. This case can occur during strength
+ reduction and doing it this way will produce better code if the
+ frame pointer or argument pointer is eliminated.
+
+ fold-const.c will ensure that the constant is always in the inner
+ PLUS_EXPR, so the only case we need to do anything about is if
+ sp, ap, or fp is our second argument, in which case we must swap
+ the innermost first argument and our second argument. */
+
+ if (TREE_CODE (TREE_OPERAND (exp, 0)) == PLUS_EXPR
+ && TREE_CODE (TREE_OPERAND (TREE_OPERAND (exp, 0), 1)) == INTEGER_CST
+ && TREE_CODE (TREE_OPERAND (exp, 1)) == RTL_EXPR
+ && (RTL_EXPR_RTL (TREE_OPERAND (exp, 1)) == frame_pointer_rtx
+ || RTL_EXPR_RTL (TREE_OPERAND (exp, 1)) == stack_pointer_rtx
+ || RTL_EXPR_RTL (TREE_OPERAND (exp, 1)) == arg_pointer_rtx))
+ {
+ tree t = TREE_OPERAND (exp, 1);
+
+ TREE_OPERAND (exp, 1) = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
+ TREE_OPERAND (TREE_OPERAND (exp, 0), 0) = t;
+ }
+
+ /* If the result is to be Pmode and we are adding an integer to
+ something, we might be forming a constant. So try to use
+ plus_constant. If it produces a sum and we can't accept it,
+ use force_operand. This allows P = &ARR[const] to generate
+ efficient code on machines where a SYMBOL_REF is not a valid
+ address.
+
+ If this is an EXPAND_SUM call, always return the sum. */
+ if (modifier == EXPAND_SUM || modifier == EXPAND_INITIALIZER
+ || mode == Pmode)
+ {
+ if (TREE_CODE (TREE_OPERAND (exp, 0)) == INTEGER_CST
+ && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ && TREE_CONSTANT (TREE_OPERAND (exp, 1)))
+ {
+ op1 = expand_expr (TREE_OPERAND (exp, 1), subtarget, VOIDmode,
+ EXPAND_SUM);
+ op1 = plus_constant (op1, TREE_INT_CST_LOW (TREE_OPERAND (exp, 0)));
+ if (modifier != EXPAND_SUM && modifier != EXPAND_INITIALIZER)
+ op1 = force_operand (op1, target);
+ return op1;
+ }
+
+ else if (TREE_CODE (TREE_OPERAND (exp, 1)) == INTEGER_CST
+ && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_INT
+ && TREE_CONSTANT (TREE_OPERAND (exp, 0)))
+ {
+ op0 = expand_expr (TREE_OPERAND (exp, 0), subtarget, VOIDmode,
+ EXPAND_SUM);
+ if (! CONSTANT_P (op0))
+ {
+ op1 = expand_expr (TREE_OPERAND (exp, 1), NULL_RTX,
+ VOIDmode, modifier);
+ /* Don't go to both_summands if modifier
+ says it's not right to return a PLUS. */
+ if (modifier != EXPAND_SUM && modifier != EXPAND_INITIALIZER)
+ goto binop2;
+ goto both_summands;
+ }
+ op0 = plus_constant (op0, TREE_INT_CST_LOW (TREE_OPERAND (exp, 1)));
+ if (modifier != EXPAND_SUM && modifier != EXPAND_INITIALIZER)
+ op0 = force_operand (op0, target);
+ return op0;
+ }
+ }
+
+ /* No sense saving up arithmetic to be done
+ if it's all in the wrong mode to form part of an address.
+ And force_operand won't know whether to sign-extend or
+ zero-extend. */
+ if ((modifier != EXPAND_SUM && modifier != EXPAND_INITIALIZER)
+ || mode != Pmode)
+ goto binop;
+
+ preexpand_calls (exp);
+ if (! safe_from_p (subtarget, TREE_OPERAND (exp, 1)))
+ subtarget = 0;
+
+ op0 = expand_expr (TREE_OPERAND (exp, 0), subtarget, VOIDmode, modifier);
+ op1 = expand_expr (TREE_OPERAND (exp, 1), NULL_RTX, VOIDmode, modifier);
+
+ both_summands:
+ /* Make sure any term that's a sum with a constant comes last. */
+ if (GET_CODE (op0) == PLUS
+ && CONSTANT_P (XEXP (op0, 1)))
+ {
+ temp = op0;
+ op0 = op1;
+ op1 = temp;
+ }
+ /* If adding to a sum including a constant,
+ associate it to put the constant outside. */
+ if (GET_CODE (op1) == PLUS
+ && CONSTANT_P (XEXP (op1, 1)))
+ {
+ rtx constant_term = const0_rtx;
+
+ temp = simplify_binary_operation (PLUS, mode, XEXP (op1, 0), op0);
+ if (temp != 0)
+ op0 = temp;
+ /* Ensure that MULT comes first if there is one. */
+ else if (GET_CODE (op0) == MULT)
+ op0 = gen_rtx (PLUS, mode, op0, XEXP (op1, 0));
+ else
+ op0 = gen_rtx (PLUS, mode, XEXP (op1, 0), op0);
+
+ /* Let's also eliminate constants from op0 if possible. */
+ op0 = eliminate_constant_term (op0, &constant_term);
+
+ /* CONSTANT_TERM and XEXP (op1, 1) are known to be constant, so
+ their sum should be a constant. Form it into OP1, since the
+ result we want will then be OP0 + OP1. */
+
+ temp = simplify_binary_operation (PLUS, mode, constant_term,
+ XEXP (op1, 1));
+ if (temp != 0)
+ op1 = temp;
+ else
+ op1 = gen_rtx (PLUS, mode, constant_term, XEXP (op1, 1));
+ }
+
+ /* Put a constant term last and put a multiplication first. */
+ if (CONSTANT_P (op0) || GET_CODE (op1) == MULT)
+ temp = op1, op1 = op0, op0 = temp;
+
+ temp = simplify_binary_operation (PLUS, mode, op0, op1);
+ return temp ? temp : gen_rtx (PLUS, mode, op0, op1);
+
+ case MINUS_EXPR:
+ /* For initializers, we are allowed to return a MINUS of two
+ symbolic constants. Here we handle all cases when both operands
+ are constant. */
+ /* Handle difference of two symbolic constants,
+ for the sake of an initializer. */
+ if ((modifier == EXPAND_SUM || modifier == EXPAND_INITIALIZER)
+ && really_constant_p (TREE_OPERAND (exp, 0))
+ && really_constant_p (TREE_OPERAND (exp, 1)))
+ {
+ rtx op0 = expand_expr (TREE_OPERAND (exp, 0), NULL_RTX,
+ VOIDmode, modifier);
+ rtx op1 = expand_expr (TREE_OPERAND (exp, 1), NULL_RTX,
+ VOIDmode, modifier);
+
+ /* If one operand is a CONST_INT, put it last. */
+ if (GET_CODE (op0) == CONST_INT)
+ temp = op0, op0 = op1, op1 = temp;
+
+ /* If the last operand is a CONST_INT, use plus_constant of
+ the negated constant. Else make the MINUS. */
+ if (GET_CODE (op1) == CONST_INT)
+ return plus_constant (op0, - INTVAL (op1));
+ else
+ return gen_rtx (MINUS, mode, op0, op1);
+ }
+ /* Convert A - const to A + (-const). */
+ if (TREE_CODE (TREE_OPERAND (exp, 1)) == INTEGER_CST)
+ {
+ exp = build (PLUS_EXPR, type, TREE_OPERAND (exp, 0),
+ fold (build1 (NEGATE_EXPR, type,
+ TREE_OPERAND (exp, 1))));
+ goto plus_expr;
+ }
+ this_optab = sub_optab;
+ goto binop;
+
+ case MULT_EXPR:
+ preexpand_calls (exp);
+ /* If first operand is constant, swap them.
+ Thus the following special case checks need only
+ check the second operand. */
+ if (TREE_CODE (TREE_OPERAND (exp, 0)) == INTEGER_CST)
+ {
+ register tree t1 = TREE_OPERAND (exp, 0);
+ TREE_OPERAND (exp, 0) = TREE_OPERAND (exp, 1);
+ TREE_OPERAND (exp, 1) = t1;
+ }
+
+ /* Attempt to return something suitable for generating an
+ indexed address, for machines that support that. */
+
+ if (modifier == EXPAND_SUM && mode == Pmode
+ && TREE_CODE (TREE_OPERAND (exp, 1)) == INTEGER_CST
+ && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
+ {
+ op0 = expand_expr (TREE_OPERAND (exp, 0), subtarget, VOIDmode, EXPAND_SUM);
+
+ /* Apply distributive law if OP0 is x+c. */
+ if (GET_CODE (op0) == PLUS
+ && GET_CODE (XEXP (op0, 1)) == CONST_INT)
+ return gen_rtx (PLUS, mode,
+ gen_rtx (MULT, mode, XEXP (op0, 0),
+ GEN_INT (TREE_INT_CST_LOW (TREE_OPERAND (exp, 1)))),
+ GEN_INT (TREE_INT_CST_LOW (TREE_OPERAND (exp, 1))
+ * INTVAL (XEXP (op0, 1))));
+
+ if (GET_CODE (op0) != REG)
+ op0 = force_operand (op0, NULL_RTX);
+ if (GET_CODE (op0) != REG)
+ op0 = copy_to_mode_reg (mode, op0);
+
+ return gen_rtx (MULT, mode, op0,
+ GEN_INT (TREE_INT_CST_LOW (TREE_OPERAND (exp, 1))));
+ }
+
+ if (! safe_from_p (subtarget, TREE_OPERAND (exp, 1)))
+ subtarget = 0;
+
+ /* Check for multiplying things that have been extended
+ from a narrower type. If this machine supports multiplying
+ in that narrower type with a result in the desired type,
+ do it that way, and avoid the explicit type-conversion. */
+ if (TREE_CODE (TREE_OPERAND (exp, 0)) == NOP_EXPR
+ && TREE_CODE (type) == INTEGER_TYPE
+ && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (exp, 0), 0)))
+ < TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (exp, 0))))
+ && ((TREE_CODE (TREE_OPERAND (exp, 1)) == INTEGER_CST
+ && int_fits_type_p (TREE_OPERAND (exp, 1),
+ TREE_TYPE (TREE_OPERAND (TREE_OPERAND (exp, 0), 0)))
+ /* Don't use a widening multiply if a shift will do. */
+ && ((GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 1))))
+ > HOST_BITS_PER_WIDE_INT)
+ || exact_log2 (TREE_INT_CST_LOW (TREE_OPERAND (exp, 1))) < 0))
+ ||
+ (TREE_CODE (TREE_OPERAND (exp, 1)) == NOP_EXPR
+ && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (exp, 1), 0)))
+ ==
+ TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (exp, 0), 0))))
+ /* If both operands are extended, they must either both
+ be zero-extended or both be sign-extended. */
+ && (TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (exp, 1), 0)))
+ ==
+ TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (exp, 0), 0)))))))
+ {
+ enum machine_mode innermode
+ = TYPE_MODE (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (exp, 0), 0)));
+ this_optab = (TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (exp, 0), 0)))
+ ? umul_widen_optab : smul_widen_optab);
+ if (mode == GET_MODE_WIDER_MODE (innermode)
+ && this_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
+ {
+ op0 = expand_expr (TREE_OPERAND (TREE_OPERAND (exp, 0), 0),
+ NULL_RTX, VOIDmode, 0);
+ if (TREE_CODE (TREE_OPERAND (exp, 1)) == INTEGER_CST)
+ op1 = expand_expr (TREE_OPERAND (exp, 1), NULL_RTX,
+ VOIDmode, 0);
+ else
+ op1 = expand_expr (TREE_OPERAND (TREE_OPERAND (exp, 1), 0),
+ NULL_RTX, VOIDmode, 0);
+ goto binop2;
+ }
+ }
+ op0 = expand_expr (TREE_OPERAND (exp, 0), subtarget, VOIDmode, 0);
+ op1 = expand_expr (TREE_OPERAND (exp, 1), NULL_RTX, VOIDmode, 0);
+ return expand_mult (mode, op0, op1, target, unsignedp);
+
+ case TRUNC_DIV_EXPR:
+ case FLOOR_DIV_EXPR:
+ case CEIL_DIV_EXPR:
+ case ROUND_DIV_EXPR:
+ case EXACT_DIV_EXPR:
+ preexpand_calls (exp);
+ if (! safe_from_p (subtarget, TREE_OPERAND (exp, 1)))
+ subtarget = 0;
+ /* Possible optimization: compute the dividend with EXPAND_SUM
+ then if the divisor is constant can optimize the case
+ where some terms of the dividend have coeffs divisible by it. */
+ op0 = expand_expr (TREE_OPERAND (exp, 0), subtarget, VOIDmode, 0);
+ op1 = expand_expr (TREE_OPERAND (exp, 1), NULL_RTX, VOIDmode, 0);
+ return expand_divmod (0, code, mode, op0, op1, target, unsignedp);
+
+ case RDIV_EXPR:
+ this_optab = flodiv_optab;
+ goto binop;
+
+ case TRUNC_MOD_EXPR:
+ case FLOOR_MOD_EXPR:
+ case CEIL_MOD_EXPR:
+ case ROUND_MOD_EXPR:
+ preexpand_calls (exp);
+ if (! safe_from_p (subtarget, TREE_OPERAND (exp, 1)))
+ subtarget = 0;
+ op0 = expand_expr (TREE_OPERAND (exp, 0), subtarget, VOIDmode, 0);
+ op1 = expand_expr (TREE_OPERAND (exp, 1), NULL_RTX, VOIDmode, 0);
+ return expand_divmod (1, code, mode, op0, op1, target, unsignedp);
+
+ case FIX_ROUND_EXPR:
+ case FIX_FLOOR_EXPR:
+ case FIX_CEIL_EXPR:
+ abort (); /* Not used for C. */
+
+ case FIX_TRUNC_EXPR:
+ op0 = expand_expr (TREE_OPERAND (exp, 0), NULL_RTX, VOIDmode, 0);
+ if (target == 0)
+ target = gen_reg_rtx (mode);
+ expand_fix (target, op0, unsignedp);
+ return target;
+
+ case FLOAT_EXPR:
+ op0 = expand_expr (TREE_OPERAND (exp, 0), NULL_RTX, VOIDmode, 0);
+ if (target == 0)
+ target = gen_reg_rtx (mode);
+ /* expand_float can't figure out what to do if FROM has VOIDmode.
+ So give it the correct mode. With -O, cse will optimize this. */
+ if (GET_MODE (op0) == VOIDmode)
+ op0 = copy_to_mode_reg (TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0))),
+ op0);
+ expand_float (target, op0,
+ TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (exp, 0))));
+ return target;
+
+ case NEGATE_EXPR:
+ op0 = expand_expr (TREE_OPERAND (exp, 0), subtarget, VOIDmode, 0);
+ temp = expand_unop (mode, neg_optab, op0, target, 0);
+ if (temp == 0)
+ abort ();
+ return temp;
+
+ case ABS_EXPR:
+ op0 = expand_expr (TREE_OPERAND (exp, 0), subtarget, VOIDmode, 0);
+
+ /* Handle complex values specially. */
+ if (GET_MODE_CLASS (mode) == MODE_COMPLEX_INT
+ || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
+ return expand_complex_abs (mode, op0, target, unsignedp);
+
+ /* Unsigned abs is simply the operand. Testing here means we don't
+ risk generating incorrect code below. */
+ if (TREE_UNSIGNED (type))
+ return op0;
+
+ /* First try to do it with a special abs instruction. */
+ temp = expand_unop (mode, abs_optab, op0, target, 0);
+ if (temp != 0)
+ return temp;
+
+ /* If this machine has expensive jumps, we can do integer absolute
+ value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
+ where W is the width of MODE. */
+
+ if (GET_MODE_CLASS (mode) == MODE_INT && BRANCH_COST >= 2)
+ {
+ rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
+ size_int (GET_MODE_BITSIZE (mode) - 1),
+ NULL_RTX, 0);
+
+ temp = expand_binop (mode, xor_optab, extended, op0, target, 0,
+ OPTAB_LIB_WIDEN);
+ if (temp != 0)
+ temp = expand_binop (mode, sub_optab, temp, extended, target, 0,
+ OPTAB_LIB_WIDEN);
+
+ if (temp != 0)
+ return temp;
+ }
+
+ /* If that does not win, use conditional jump and negate. */
+ target = original_target;
+ op1 = gen_label_rtx ();
+ if (target == 0 || ! safe_from_p (target, TREE_OPERAND (exp, 0))
+ || GET_MODE (target) != mode
+ || (GET_CODE (target) == MEM && MEM_VOLATILE_P (target))
+ || (GET_CODE (target) == REG
+ && REGNO (target) < FIRST_PSEUDO_REGISTER))
+ target = gen_reg_rtx (mode);
+
+ emit_move_insn (target, op0);
+ NO_DEFER_POP;
+
+ /* If this mode is an integer too wide to compare properly,
+ compare word by word. Rely on CSE to optimize constant cases. */
+ if (GET_MODE_CLASS (mode) == MODE_INT && ! can_compare_p (mode))
+ do_jump_by_parts_greater_rtx (mode, 0, target, const0_rtx,
+ NULL_RTX, op1);
+ else
+ {
+ temp = compare_from_rtx (target, CONST0_RTX (mode), GE, 0, mode,
+ NULL_RTX, 0);
+ if (temp == const1_rtx)
+ return target;
+ else if (temp != const0_rtx)
+ {
+ if (bcc_gen_fctn[(int) GET_CODE (temp)] != 0)
+ emit_jump_insn ((*bcc_gen_fctn[(int) GET_CODE (temp)]) (op1));
+ else
+ abort ();
+ }
+ }
+
+ op0 = expand_unop (mode, neg_optab, target, target, 0);
+ if (op0 != target)
+ emit_move_insn (target, op0);
+ emit_label (op1);
+ OK_DEFER_POP;
+ return target;
+
+ case MAX_EXPR:
+ case MIN_EXPR:
+ target = original_target;
+ if (target == 0 || ! safe_from_p (target, TREE_OPERAND (exp, 1))
+ || (GET_CODE (target) == MEM && MEM_VOLATILE_P (target))
+ || GET_MODE (target) != mode
+ || (GET_CODE (target) == REG
+ && REGNO (target) < FIRST_PSEUDO_REGISTER))
+ target = gen_reg_rtx (mode);
+ op1 = expand_expr (TREE_OPERAND (exp, 1), NULL_RTX, VOIDmode, 0);
+ op0 = expand_expr (TREE_OPERAND (exp, 0), target, VOIDmode, 0);
+
+ /* First try to do it with a special MIN or MAX instruction.
+ If that does not win, use a conditional jump to select the proper
+ value. */
+ this_optab = (TREE_UNSIGNED (type)
+ ? (code == MIN_EXPR ? umin_optab : umax_optab)
+ : (code == MIN_EXPR ? smin_optab : smax_optab));
+
+ temp = expand_binop (mode, this_optab, op0, op1, target, unsignedp,
+ OPTAB_WIDEN);
+ if (temp != 0)
+ return temp;
+
+ if (target != op0)
+ emit_move_insn (target, op0);
+
+ op0 = gen_label_rtx ();
+
+ /* If this mode is an integer too wide to compare properly,
+ compare word by word. Rely on cse to optimize constant cases. */
+ if (GET_MODE_CLASS (mode) == MODE_INT && !can_compare_p (mode))
+ {
+ if (code == MAX_EXPR)
+ do_jump_by_parts_greater_rtx (mode, TREE_UNSIGNED (type),
+ target, op1, NULL_RTX, op0);
+ else
+ do_jump_by_parts_greater_rtx (mode, TREE_UNSIGNED (type),
+ op1, target, NULL_RTX, op0);
+ emit_move_insn (target, op1);
+ }
+ else
+ {
+ if (code == MAX_EXPR)
+ temp = (TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (exp, 1)))
+ ? compare_from_rtx (target, op1, GEU, 1, mode, NULL_RTX, 0)
+ : compare_from_rtx (target, op1, GE, 0, mode, NULL_RTX, 0));
+ else
+ temp = (TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (exp, 1)))
+ ? compare_from_rtx (target, op1, LEU, 1, mode, NULL_RTX, 0)
+ : compare_from_rtx (target, op1, LE, 0, mode, NULL_RTX, 0));
+ if (temp == const0_rtx)
+ emit_move_insn (target, op1);
+ else if (temp != const_true_rtx)
+ {
+ if (bcc_gen_fctn[(int) GET_CODE (temp)] != 0)
+ emit_jump_insn ((*bcc_gen_fctn[(int) GET_CODE (temp)]) (op0));
+ else
+ abort ();
+ emit_move_insn (target, op1);
+ }
+ }
+ emit_label (op0);
+ return target;
+
+ case BIT_NOT_EXPR:
+ op0 = expand_expr (TREE_OPERAND (exp, 0), subtarget, VOIDmode, 0);
+ temp = expand_unop (mode, one_cmpl_optab, op0, target, 1);
+ if (temp == 0)
+ abort ();
+ return temp;
+
+ case FFS_EXPR:
+ op0 = expand_expr (TREE_OPERAND (exp, 0), subtarget, VOIDmode, 0);
+ temp = expand_unop (mode, ffs_optab, op0, target, 1);
+ if (temp == 0)
+ abort ();
+ return temp;
+
+ /* ??? Can optimize bitwise operations with one arg constant.
+ Can optimize (a bitwise1 n) bitwise2 (a bitwise3 b)
+ and (a bitwise1 b) bitwise2 b (etc)
+ but that is probably not worth while. */
+
+ /* BIT_AND_EXPR is for bitwise anding. TRUTH_AND_EXPR is for anding two
+ boolean values when we want in all cases to compute both of them. In
+ general it is fastest to do TRUTH_AND_EXPR by computing both operands
+ as actual zero-or-1 values and then bitwise anding. In cases where
+ there cannot be any side effects, better code would be made by
+ treating TRUTH_AND_EXPR like TRUTH_ANDIF_EXPR; but the question is
+ how to recognize those cases. */
+
+ case TRUTH_AND_EXPR:
+ case BIT_AND_EXPR:
+ this_optab = and_optab;
+ goto binop;
+
+ case TRUTH_OR_EXPR:
+ case BIT_IOR_EXPR:
+ this_optab = ior_optab;
+ goto binop;
+
+ case TRUTH_XOR_EXPR:
+ case BIT_XOR_EXPR:
+ this_optab = xor_optab;
+ goto binop;
+
+ case LSHIFT_EXPR:
+ case RSHIFT_EXPR:
+ case LROTATE_EXPR:
+ case RROTATE_EXPR:
+ preexpand_calls (exp);
+ if (! safe_from_p (subtarget, TREE_OPERAND (exp, 1)))
+ subtarget = 0;
+ op0 = expand_expr (TREE_OPERAND (exp, 0), subtarget, VOIDmode, 0);
+ return expand_shift (code, mode, op0, TREE_OPERAND (exp, 1), target,
+ unsignedp);
+
+ /* Could determine the answer when only additive constants differ. Also,
+ the addition of one can be handled by changing the condition. */
+ case LT_EXPR:
+ case LE_EXPR:
+ case GT_EXPR:
+ case GE_EXPR:
+ case EQ_EXPR:
+ case NE_EXPR:
+ preexpand_calls (exp);
+ temp = do_store_flag (exp, target, tmode != VOIDmode ? tmode : mode, 0);
+ if (temp != 0)
+ return temp;
+
+ /* For foo != 0, load foo, and if it is nonzero load 1 instead. */
+ if (code == NE_EXPR && integer_zerop (TREE_OPERAND (exp, 1))
+ && original_target
+ && GET_CODE (original_target) == REG
+ && (GET_MODE (original_target)
+ == TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)))))
+ {
+ temp = expand_expr (TREE_OPERAND (exp, 0), original_target,
+ VOIDmode, 0);
+
+ if (temp != original_target)
+ temp = copy_to_reg (temp);
+
+ op1 = gen_label_rtx ();
+ emit_cmp_insn (temp, const0_rtx, EQ, NULL_RTX,
+ GET_MODE (temp), unsignedp, 0);
+ emit_jump_insn (gen_beq (op1));
+ emit_move_insn (temp, const1_rtx);
+ emit_label (op1);
+ return temp;
+ }
+
+ /* If no set-flag instruction, must generate a conditional
+ store into a temporary variable. Drop through
+ and handle this like && and ||. */
+
+ case TRUTH_ANDIF_EXPR:
+ case TRUTH_ORIF_EXPR:
+ if (! ignore
+ && (target == 0 || ! safe_from_p (target, exp)
+ /* Make sure we don't have a hard reg (such as function's return
+ value) live across basic blocks, if not optimizing. */
+ || (!optimize && GET_CODE (target) == REG
+ && REGNO (target) < FIRST_PSEUDO_REGISTER)))
+ target = gen_reg_rtx (tmode != VOIDmode ? tmode : mode);
+
+ if (target)
+ emit_clr_insn (target);
+
+ op1 = gen_label_rtx ();
+ jumpifnot (exp, op1);
+
+ if (target)
+ emit_0_to_1_insn (target);
+
+ emit_label (op1);
+ return ignore ? const0_rtx : target;
+
+ case TRUTH_NOT_EXPR:
+ op0 = expand_expr (TREE_OPERAND (exp, 0), target, VOIDmode, 0);
+ /* The parser is careful to generate TRUTH_NOT_EXPR
+ only with operands that are always zero or one. */
+ temp = expand_binop (mode, xor_optab, op0, const1_rtx,
+ target, 1, OPTAB_LIB_WIDEN);
+ if (temp == 0)
+ abort ();
+ return temp;
+
+ case COMPOUND_EXPR:
+ expand_expr (TREE_OPERAND (exp, 0), const0_rtx, VOIDmode, 0);
+ emit_queue ();
+ return expand_expr (TREE_OPERAND (exp, 1),
+ (ignore ? const0_rtx : target),
+ VOIDmode, 0);
+
+ case COND_EXPR:
+ {
+ rtx flag = NULL_RTX;
+ tree left_cleanups = NULL_TREE;
+ tree right_cleanups = NULL_TREE;
+
+ /* Used to save a pointer to the place to put the setting of
+ the flag that indicates if this side of the conditional was
+ taken. We backpatch the code, if we find out later that we
+ have any conditional cleanups that need to be performed. */
+ rtx dest_right_flag = NULL_RTX;
+ rtx dest_left_flag = NULL_RTX;
+
+ /* Note that COND_EXPRs whose type is a structure or union
+ are required to be constructed to contain assignments of
+ a temporary variable, so that we can evaluate them here
+ for side effect only. If type is void, we must do likewise. */
+
+ /* If an arm of the branch requires a cleanup,
+ only that cleanup is performed. */
+
+ tree singleton = 0;
+ tree binary_op = 0, unary_op = 0;
+ tree old_cleanups = cleanups_this_call;
+
+ /* If this is (A ? 1 : 0) and A is a condition, just evaluate it and
+ convert it to our mode, if necessary. */
+ if (integer_onep (TREE_OPERAND (exp, 1))
+ && integer_zerop (TREE_OPERAND (exp, 2))
+ && TREE_CODE_CLASS (TREE_CODE (TREE_OPERAND (exp, 0))) == '<')
+ {
+ if (ignore)
+ {
+ expand_expr (TREE_OPERAND (exp, 0), const0_rtx, VOIDmode,
+ modifier);
+ return const0_rtx;
+ }
+
+ op0 = expand_expr (TREE_OPERAND (exp, 0), target, mode, modifier);
+ if (GET_MODE (op0) == mode)
+ return op0;
+
+ if (target == 0)
+ target = gen_reg_rtx (mode);
+ convert_move (target, op0, unsignedp);
+ return target;
+ }
+
+ /* If we are not to produce a result, we have no target. Otherwise,
+ if a target was specified use it; it will not be used as an
+ intermediate target unless it is safe. If no target, use a
+ temporary. */
+
+ if (ignore)
+ temp = 0;
+ else if (original_target
+ && safe_from_p (original_target, TREE_OPERAND (exp, 0))
+ && GET_MODE (original_target) == mode)
+ temp = original_target;
+ else if (mode == BLKmode)
+ {
+ if (TYPE_SIZE (type) == 0
+ || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
+ abort ();
+
+ temp = assign_stack_temp (BLKmode,
+ (TREE_INT_CST_LOW (TYPE_SIZE (type))
+ + BITS_PER_UNIT - 1)
+ / BITS_PER_UNIT, 0);
+ MEM_IN_STRUCT_P (temp) = AGGREGATE_TYPE_P (type);
+ }
+ else
+ temp = gen_reg_rtx (mode);
+
+ /* Check for X ? A + B : A. If we have this, we can copy
+ A to the output and conditionally add B. Similarly for unary
+ operations. Don't do this if X has side-effects because
+ those side effects might affect A or B and the "?" operation is
+ a sequence point in ANSI. (We test for side effects later.) */
+
+ if (TREE_CODE_CLASS (TREE_CODE (TREE_OPERAND (exp, 1))) == '2'
+ && operand_equal_p (TREE_OPERAND (exp, 2),
+ TREE_OPERAND (TREE_OPERAND (exp, 1), 0), 0))
+ singleton = TREE_OPERAND (exp, 2), binary_op = TREE_OPERAND (exp, 1);
+ else if (TREE_CODE_CLASS (TREE_CODE (TREE_OPERAND (exp, 2))) == '2'
+ && operand_equal_p (TREE_OPERAND (exp, 1),
+ TREE_OPERAND (TREE_OPERAND (exp, 2), 0), 0))
+ singleton = TREE_OPERAND (exp, 1), binary_op = TREE_OPERAND (exp, 2);
+ else if (TREE_CODE_CLASS (TREE_CODE (TREE_OPERAND (exp, 1))) == '1'
+ && operand_equal_p (TREE_OPERAND (exp, 2),
+ TREE_OPERAND (TREE_OPERAND (exp, 1), 0), 0))
+ singleton = TREE_OPERAND (exp, 2), unary_op = TREE_OPERAND (exp, 1);
+ else if (TREE_CODE_CLASS (TREE_CODE (TREE_OPERAND (exp, 2))) == '1'
+ && operand_equal_p (TREE_OPERAND (exp, 1),
+ TREE_OPERAND (TREE_OPERAND (exp, 2), 0), 0))
+ singleton = TREE_OPERAND (exp, 1), unary_op = TREE_OPERAND (exp, 2);
+
+ /* If we had X ? A + 1 : A and we can do the test of X as a store-flag
+ operation, do this as A + (X != 0). Similarly for other simple
+ binary operators. */
+ if (temp && singleton && binary_op
+ && ! TREE_SIDE_EFFECTS (TREE_OPERAND (exp, 0))
+ && (TREE_CODE (binary_op) == PLUS_EXPR
+ || TREE_CODE (binary_op) == MINUS_EXPR
+ || TREE_CODE (binary_op) == BIT_IOR_EXPR
+ || TREE_CODE (binary_op) == BIT_XOR_EXPR
+ || TREE_CODE (binary_op) == BIT_AND_EXPR)
+ && integer_onep (TREE_OPERAND (binary_op, 1))
+ && TREE_CODE_CLASS (TREE_CODE (TREE_OPERAND (exp, 0))) == '<')
+ {
+ rtx result;
+ optab boptab = (TREE_CODE (binary_op) == PLUS_EXPR ? add_optab
+ : TREE_CODE (binary_op) == MINUS_EXPR ? sub_optab
+ : TREE_CODE (binary_op) == BIT_IOR_EXPR ? ior_optab
+ : TREE_CODE (binary_op) == BIT_XOR_EXPR ? xor_optab
+ : and_optab);
+
+ /* If we had X ? A : A + 1, do this as A + (X == 0).
+
+ We have to invert the truth value here and then put it
+ back later if do_store_flag fails. We cannot simply copy
+ TREE_OPERAND (exp, 0) to another variable and modify that
+ because invert_truthvalue can modify the tree pointed to
+ by its argument. */
+ if (singleton == TREE_OPERAND (exp, 1))
+ TREE_OPERAND (exp, 0)
+ = invert_truthvalue (TREE_OPERAND (exp, 0));
+
+ result = do_store_flag (TREE_OPERAND (exp, 0),
+ (safe_from_p (temp, singleton)
+ ? temp : NULL_RTX),
+ mode, BRANCH_COST <= 1);
+
+ if (result)
+ {
+ op1 = expand_expr (singleton, NULL_RTX, VOIDmode, 0);
+ return expand_binop (mode, boptab, op1, result, temp,
+ unsignedp, OPTAB_LIB_WIDEN);
+ }
+ else if (singleton == TREE_OPERAND (exp, 1))
+ TREE_OPERAND (exp, 0)
+ = invert_truthvalue (TREE_OPERAND (exp, 0));
+ }
+
+ NO_DEFER_POP;
+ op0 = gen_label_rtx ();
+
+ flag = gen_reg_rtx (word_mode);
+ if (singleton && ! TREE_SIDE_EFFECTS (TREE_OPERAND (exp, 0)))
+ {
+ if (temp != 0)
+ {
+ /* If the target conflicts with the other operand of the
+ binary op, we can't use it. Also, we can't use the target
+ if it is a hard register, because evaluating the condition
+ might clobber it. */
+ if ((binary_op
+ && ! safe_from_p (temp, TREE_OPERAND (binary_op, 1)))
+ || (GET_CODE (temp) == REG
+ && REGNO (temp) < FIRST_PSEUDO_REGISTER))
+ temp = gen_reg_rtx (mode);
+ store_expr (singleton, temp, 0);
+ }
+ else
+ expand_expr (singleton,
+ ignore ? const0_rtx : NULL_RTX, VOIDmode, 0);
+ dest_left_flag = get_last_insn ();
+ if (singleton == TREE_OPERAND (exp, 1))
+ jumpif (TREE_OPERAND (exp, 0), op0);
+ else
+ jumpifnot (TREE_OPERAND (exp, 0), op0);
+
+ /* Allows cleanups up to here. */
+ old_cleanups = cleanups_this_call;
+ if (binary_op && temp == 0)
+ /* Just touch the other operand. */
+ expand_expr (TREE_OPERAND (binary_op, 1),
+ ignore ? const0_rtx : NULL_RTX, VOIDmode, 0);
+ else if (binary_op)
+ store_expr (build (TREE_CODE (binary_op), type,
+ make_tree (type, temp),
+ TREE_OPERAND (binary_op, 1)),
+ temp, 0);
+ else
+ store_expr (build1 (TREE_CODE (unary_op), type,
+ make_tree (type, temp)),
+ temp, 0);
+ op1 = op0;
+ dest_right_flag = get_last_insn ();
+ }
+#if 0
+ /* This is now done in jump.c and is better done there because it
+ produces shorter register lifetimes. */
+
+ /* Check for both possibilities either constants or variables
+ in registers (but not the same as the target!). If so, can
+ save branches by assigning one, branching, and assigning the
+ other. */
+ else if (temp && GET_MODE (temp) != BLKmode
+ && (TREE_CONSTANT (TREE_OPERAND (exp, 1))
+ || ((TREE_CODE (TREE_OPERAND (exp, 1)) == PARM_DECL
+ || TREE_CODE (TREE_OPERAND (exp, 1)) == VAR_DECL)
+ && DECL_RTL (TREE_OPERAND (exp, 1))
+ && GET_CODE (DECL_RTL (TREE_OPERAND (exp, 1))) == REG
+ && DECL_RTL (TREE_OPERAND (exp, 1)) != temp))
+ && (TREE_CONSTANT (TREE_OPERAND (exp, 2))
+ || ((TREE_CODE (TREE_OPERAND (exp, 2)) == PARM_DECL
+ || TREE_CODE (TREE_OPERAND (exp, 2)) == VAR_DECL)
+ && DECL_RTL (TREE_OPERAND (exp, 2))
+ && GET_CODE (DECL_RTL (TREE_OPERAND (exp, 2))) == REG
+ && DECL_RTL (TREE_OPERAND (exp, 2)) != temp)))
+ {
+ if (GET_CODE (temp) == REG && REGNO (temp) < FIRST_PSEUDO_REGISTER)
+ temp = gen_reg_rtx (mode);
+ store_expr (TREE_OPERAND (exp, 2), temp, 0);
+ dest_left_flag = get_last_insn ();
+ jumpifnot (TREE_OPERAND (exp, 0), op0);
+
+ /* Allows cleanups up to here. */
+ old_cleanups = cleanups_this_call;
+ store_expr (TREE_OPERAND (exp, 1), temp, 0);
+ op1 = op0;
+ dest_right_flag = get_last_insn ();
+ }
+#endif
+ /* Check for A op 0 ? A : FOO and A op 0 ? FOO : A where OP is any
+ comparison operator. If we have one of these cases, set the
+ output to A, branch on A (cse will merge these two references),
+ then set the output to FOO. */
+ else if (temp
+ && TREE_CODE_CLASS (TREE_CODE (TREE_OPERAND (exp, 0))) == '<'
+ && integer_zerop (TREE_OPERAND (TREE_OPERAND (exp, 0), 1))
+ && operand_equal_p (TREE_OPERAND (TREE_OPERAND (exp, 0), 0),
+ TREE_OPERAND (exp, 1), 0)
+ && ! TREE_SIDE_EFFECTS (TREE_OPERAND (exp, 0))
+ && safe_from_p (temp, TREE_OPERAND (exp, 2)))
+ {
+ if (GET_CODE (temp) == REG && REGNO (temp) < FIRST_PSEUDO_REGISTER)
+ temp = gen_reg_rtx (mode);
+ store_expr (TREE_OPERAND (exp, 1), temp, 0);
+ dest_left_flag = get_last_insn ();
+ jumpif (TREE_OPERAND (exp, 0), op0);
+
+ /* Allows cleanups up to here. */
+ old_cleanups = cleanups_this_call;
+ store_expr (TREE_OPERAND (exp, 2), temp, 0);
+ op1 = op0;
+ dest_right_flag = get_last_insn ();
+ }
+ else if (temp
+ && TREE_CODE_CLASS (TREE_CODE (TREE_OPERAND (exp, 0))) == '<'
+ && integer_zerop (TREE_OPERAND (TREE_OPERAND (exp, 0), 1))
+ && operand_equal_p (TREE_OPERAND (TREE_OPERAND (exp, 0), 0),
+ TREE_OPERAND (exp, 2), 0)
+ && ! TREE_SIDE_EFFECTS (TREE_OPERAND (exp, 0))
+ && safe_from_p (temp, TREE_OPERAND (exp, 1)))
+ {
+ if (GET_CODE (temp) == REG && REGNO (temp) < FIRST_PSEUDO_REGISTER)
+ temp = gen_reg_rtx (mode);
+ store_expr (TREE_OPERAND (exp, 2), temp, 0);
+ dest_left_flag = get_last_insn ();
+ jumpifnot (TREE_OPERAND (exp, 0), op0);
+
+ /* Allows cleanups up to here. */
+ old_cleanups = cleanups_this_call;
+ store_expr (TREE_OPERAND (exp, 1), temp, 0);
+ op1 = op0;
+ dest_right_flag = get_last_insn ();
+ }
+ else
+ {
+ op1 = gen_label_rtx ();
+ jumpifnot (TREE_OPERAND (exp, 0), op0);
+
+ /* Allows cleanups up to here. */
+ old_cleanups = cleanups_this_call;
+ if (temp != 0)
+ store_expr (TREE_OPERAND (exp, 1), temp, 0);
+ else
+ expand_expr (TREE_OPERAND (exp, 1),
+ ignore ? const0_rtx : NULL_RTX, VOIDmode, 0);
+ dest_left_flag = get_last_insn ();
+
+ /* Handle conditional cleanups, if any. */
+ left_cleanups = defer_cleanups_to (old_cleanups);
+
+ emit_queue ();
+ emit_jump_insn (gen_jump (op1));
+ emit_barrier ();
+ emit_label (op0);
+ if (temp != 0)
+ store_expr (TREE_OPERAND (exp, 2), temp, 0);
+ else
+ expand_expr (TREE_OPERAND (exp, 2),
+ ignore ? const0_rtx : NULL_RTX, VOIDmode, 0);
+ dest_right_flag = get_last_insn ();
+ }
+
+ /* Handle conditional cleanups, if any. */
+ right_cleanups = defer_cleanups_to (old_cleanups);
+
+ emit_queue ();
+ emit_label (op1);
+ OK_DEFER_POP;
+
+ /* Add back in, any conditional cleanups. */
+ if (left_cleanups || right_cleanups)
+ {
+ tree new_cleanups;
+ tree cond;
+ rtx last;
+
+ /* Now that we know that a flag is needed, go back and add in the
+ setting of the flag. */
+
+ /* Do the left side flag. */
+ last = get_last_insn ();
+ /* Flag left cleanups as needed. */
+ emit_move_insn (flag, const1_rtx);
+ /* ??? deprecated, use sequences instead. */
+ reorder_insns (NEXT_INSN (last), get_last_insn (), dest_left_flag);
+
+ /* Do the right side flag. */
+ last = get_last_insn ();
+ /* Flag left cleanups as needed. */
+ emit_move_insn (flag, const0_rtx);
+ /* ??? deprecated, use sequences instead. */
+ reorder_insns (NEXT_INSN (last), get_last_insn (), dest_right_flag);
+
+ /* convert flag, which is an rtx, into a tree. */
+ cond = make_node (RTL_EXPR);
+ TREE_TYPE (cond) = integer_type_node;
+ RTL_EXPR_RTL (cond) = flag;
+ RTL_EXPR_SEQUENCE (cond) = NULL_RTX;
+
+ if (! left_cleanups)
+ left_cleanups = integer_zero_node;
+ if (! right_cleanups)
+ right_cleanups = integer_zero_node;
+ new_cleanups = build (COND_EXPR, void_type_node, cond,
+ left_cleanups, right_cleanups);
+ new_cleanups = fold (new_cleanups);
+
+ /* Now add in the conditionalized cleanups. */
+ cleanups_this_call
+ = tree_cons (NULL_TREE, new_cleanups, cleanups_this_call);
+ (*interim_eh_hook) (NULL_TREE);
+ }
+ return temp;
+ }
+
+ case TARGET_EXPR:
+ {
+ int need_exception_region = 0;
+ /* Something needs to be initialized, but we didn't know
+ where that thing was when building the tree. For example,
+ it could be the return value of a function, or a parameter
+ to a function which lays down in the stack, or a temporary
+ variable which must be passed by reference.
+
+ We guarantee that the expression will either be constructed
+ or copied into our original target. */
+
+ tree slot = TREE_OPERAND (exp, 0);
+ tree exp1;
+ rtx temp;
+
+ if (TREE_CODE (slot) != VAR_DECL)
+ abort ();
+
+ if (target == 0)
+ {
+ if (DECL_RTL (slot) != 0)
+ {
+ target = DECL_RTL (slot);
+ /* If we have already expanded the slot, so don't do
+ it again. (mrs) */
+ if (TREE_OPERAND (exp, 1) == NULL_TREE)
+ return target;
+ }
+ else
+ {
+ target = assign_stack_temp (mode, int_size_in_bytes (type), 2);
+ /* All temp slots at this level must not conflict. */
+ preserve_temp_slots (target);
+ DECL_RTL (slot) = target;
+
+ /* Since SLOT is not known to the called function
+ to belong to its stack frame, we must build an explicit
+ cleanup. This case occurs when we must build up a reference
+ to pass the reference as an argument. In this case,
+ it is very likely that such a reference need not be
+ built here. */
+
+ if (TREE_OPERAND (exp, 2) == 0)
+ TREE_OPERAND (exp, 2) = maybe_build_cleanup (slot);
+ if (TREE_OPERAND (exp, 2))
+ {
+ cleanups_this_call = tree_cons (NULL_TREE,
+ TREE_OPERAND (exp, 2),
+ cleanups_this_call);
+ need_exception_region = 1;
+ }
+ }
+ }
+ else
+ {
+ /* This case does occur, when expanding a parameter which
+ needs to be constructed on the stack. The target
+ is the actual stack address that we want to initialize.
+ The function we call will perform the cleanup in this case. */
+
+ /* If we have already assigned it space, use that space,
+ not target that we were passed in, as our target
+ parameter is only a hint. */
+ if (DECL_RTL (slot) != 0)
+ {
+ target = DECL_RTL (slot);
+ /* If we have already expanded the slot, so don't do
+ it again. (mrs) */
+ if (TREE_OPERAND (exp, 1) == NULL_TREE)
+ return target;
+ }
+
+ DECL_RTL (slot) = target;
+ }
+
+ exp1 = TREE_OPERAND (exp, 1);
+ /* Mark it as expanded. */
+ TREE_OPERAND (exp, 1) = NULL_TREE;
+
+ temp = expand_expr (exp1, target, tmode, modifier);
+
+ if (need_exception_region)
+ (*interim_eh_hook) (NULL_TREE);
+
+ return temp;
+ }
+
+ case INIT_EXPR:
+ {
+ tree lhs = TREE_OPERAND (exp, 0);
+ tree rhs = TREE_OPERAND (exp, 1);
+ tree noncopied_parts = 0;
+ tree lhs_type = TREE_TYPE (lhs);
+
+ temp = expand_assignment (lhs, rhs, ! ignore, original_target != 0);
+ if (TYPE_NONCOPIED_PARTS (lhs_type) != 0 && !fixed_type_p (rhs))
+ noncopied_parts = init_noncopied_parts (stabilize_reference (lhs),
+ TYPE_NONCOPIED_PARTS (lhs_type));
+ while (noncopied_parts != 0)
+ {
+ expand_assignment (TREE_VALUE (noncopied_parts),
+ TREE_PURPOSE (noncopied_parts), 0, 0);
+ noncopied_parts = TREE_CHAIN (noncopied_parts);
+ }
+ return temp;
+ }
+
+ case MODIFY_EXPR:
+ {
+ /* If lhs is complex, expand calls in rhs before computing it.
+ That's so we don't compute a pointer and save it over a call.
+ If lhs is simple, compute it first so we can give it as a
+ target if the rhs is just a call. This avoids an extra temp and copy
+ and that prevents a partial-subsumption which makes bad code.
+ Actually we could treat component_ref's of vars like vars. */
+
+ tree lhs = TREE_OPERAND (exp, 0);
+ tree rhs = TREE_OPERAND (exp, 1);
+ tree noncopied_parts = 0;
+ tree lhs_type = TREE_TYPE (lhs);
+
+ temp = 0;
+
+ if (TREE_CODE (lhs) != VAR_DECL
+ && TREE_CODE (lhs) != RESULT_DECL
+ && TREE_CODE (lhs) != PARM_DECL)
+ preexpand_calls (exp);
+
+ /* Check for |= or &= of a bitfield of size one into another bitfield
+ of size 1. In this case, (unless we need the result of the
+ assignment) we can do this more efficiently with a
+ test followed by an assignment, if necessary.
+
+ ??? At this point, we can't get a BIT_FIELD_REF here. But if
+ things change so we do, this code should be enhanced to
+ support it. */
+ if (ignore
+ && TREE_CODE (lhs) == COMPONENT_REF
+ && (TREE_CODE (rhs) == BIT_IOR_EXPR
+ || TREE_CODE (rhs) == BIT_AND_EXPR)
+ && TREE_OPERAND (rhs, 0) == lhs
+ && TREE_CODE (TREE_OPERAND (rhs, 1)) == COMPONENT_REF
+ && TREE_INT_CST_LOW (DECL_SIZE (TREE_OPERAND (lhs, 1))) == 1
+ && TREE_INT_CST_LOW (DECL_SIZE (TREE_OPERAND (TREE_OPERAND (rhs, 1), 1))) == 1)
+ {
+ rtx label = gen_label_rtx ();
+
+ do_jump (TREE_OPERAND (rhs, 1),
+ TREE_CODE (rhs) == BIT_IOR_EXPR ? label : 0,
+ TREE_CODE (rhs) == BIT_AND_EXPR ? label : 0);
+ expand_assignment (lhs, convert (TREE_TYPE (rhs),
+ (TREE_CODE (rhs) == BIT_IOR_EXPR
+ ? integer_one_node
+ : integer_zero_node)),
+ 0, 0);
+ do_pending_stack_adjust ();
+ emit_label (label);
+ return const0_rtx;
+ }
+
+ if (TYPE_NONCOPIED_PARTS (lhs_type) != 0
+ && ! (fixed_type_p (lhs) && fixed_type_p (rhs)))
+ noncopied_parts = save_noncopied_parts (stabilize_reference (lhs),
+ TYPE_NONCOPIED_PARTS (lhs_type));
+
+ temp = expand_assignment (lhs, rhs, ! ignore, original_target != 0);
+ while (noncopied_parts != 0)
+ {
+ expand_assignment (TREE_PURPOSE (noncopied_parts),
+ TREE_VALUE (noncopied_parts), 0, 0);
+ noncopied_parts = TREE_CHAIN (noncopied_parts);
+ }
+ return temp;
+ }
+
+ case PREINCREMENT_EXPR:
+ case PREDECREMENT_EXPR:
+ return expand_increment (exp, 0);
+
+ case POSTINCREMENT_EXPR:
+ case POSTDECREMENT_EXPR:
+ /* Faster to treat as pre-increment if result is not used. */
+ return expand_increment (exp, ! ignore);
+
+ case ADDR_EXPR:
+ /* If nonzero, TEMP will be set to the address of something that might
+ be a MEM corresponding to a stack slot. */
+ temp = 0;
+
+ /* Are we taking the address of a nested function? */
+ if (TREE_CODE (TREE_OPERAND (exp, 0)) == FUNCTION_DECL
+ && decl_function_context (TREE_OPERAND (exp, 0)) != 0)
+ {
+ op0 = trampoline_address (TREE_OPERAND (exp, 0));
+ op0 = force_operand (op0, target);
+ }
+ /* If we are taking the address of something erroneous, just
+ return a zero. */
+ else if (TREE_CODE (TREE_OPERAND (exp, 0)) == ERROR_MARK)
+ return const0_rtx;
+ else
+ {
+ /* We make sure to pass const0_rtx down if we came in with
+ ignore set, to avoid doing the cleanups twice for something. */
+ op0 = expand_expr (TREE_OPERAND (exp, 0),
+ ignore ? const0_rtx : NULL_RTX, VOIDmode,
+ (modifier == EXPAND_INITIALIZER
+ ? modifier : EXPAND_CONST_ADDRESS));
+
+ /* If we are going to ignore the result, OP0 will have been set
+ to const0_rtx, so just return it. Don't get confused and
+ think we are taking the address of the constant. */
+ if (ignore)
+ return op0;
+
+ /* We would like the object in memory. If it is a constant,
+ we can have it be statically allocated into memory. For
+ a non-constant (REG, SUBREG or CONCAT), we need to allocate some
+ memory and store the value into it. */
+
+ if (CONSTANT_P (op0))
+ op0 = force_const_mem (TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0))),
+ op0);
+ else if (GET_CODE (op0) == MEM)
+ temp = XEXP (op0, 0);
+
+ else if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG
+ || GET_CODE (op0) == CONCAT)
+ {
+ /* If this object is in a register, it must be not
+ be BLKmode. */
+ tree inner_type = TREE_TYPE (TREE_OPERAND (exp, 0));
+ enum machine_mode inner_mode = TYPE_MODE (inner_type);
+ rtx memloc
+ = assign_stack_temp (inner_mode,
+ int_size_in_bytes (inner_type), 1);
+
+ emit_move_insn (memloc, op0);
+ op0 = memloc;
+ }
+
+ if (GET_CODE (op0) != MEM)
+ abort ();
+
+ if (modifier == EXPAND_SUM || modifier == EXPAND_INITIALIZER)
+ return XEXP (op0, 0);
+
+ op0 = force_operand (XEXP (op0, 0), target);
+ }
+
+ if (flag_force_addr && GET_CODE (op0) != REG)
+ op0 = force_reg (Pmode, op0);
+
+ if (GET_CODE (op0) == REG)
+ mark_reg_pointer (op0);
+
+ /* If we might have had a temp slot, add an equivalent address
+ for it. */
+ if (temp != 0)
+ update_temp_slot_address (temp, op0);
+
+ return op0;
+
+ case ENTRY_VALUE_EXPR:
+ abort ();
+
+ /* COMPLEX type for Extended Pascal & Fortran */
+ case COMPLEX_EXPR:
+ {
+ enum machine_mode mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (exp)));
+ rtx insns;
+
+ /* Get the rtx code of the operands. */
+ op0 = expand_expr (TREE_OPERAND (exp, 0), 0, VOIDmode, 0);
+ op1 = expand_expr (TREE_OPERAND (exp, 1), 0, VOIDmode, 0);
+
+ if (! target)
+ target = gen_reg_rtx (TYPE_MODE (TREE_TYPE (exp)));
+
+ start_sequence ();
+
+ /* Move the real (op0) and imaginary (op1) parts to their location. */
+ emit_move_insn (gen_realpart (mode, target), op0);
+ emit_move_insn (gen_imagpart (mode, target), op1);
+
+ insns = get_insns ();
+ end_sequence ();
+
+ /* Complex construction should appear as a single unit. */
+ /* If TARGET is a CONCAT, we got insns like RD = RS, ID = IS,
+ each with a separate pseudo as destination.
+ It's not correct for flow to treat them as a unit. */
+ if (GET_CODE (target) != CONCAT)
+ emit_no_conflict_block (insns, target, op0, op1, NULL_RTX);
+ else
+ emit_insns (insns);
+
+ return target;
+ }
+
+ case REALPART_EXPR:
+ op0 = expand_expr (TREE_OPERAND (exp, 0), 0, VOIDmode, 0);
+ return gen_realpart (mode, op0);
+
+ case IMAGPART_EXPR:
+ op0 = expand_expr (TREE_OPERAND (exp, 0), 0, VOIDmode, 0);
+ return gen_imagpart (mode, op0);
+
+ case CONJ_EXPR:
+ {
+ rtx imag_t;
+ rtx insns;
+
+ op0 = expand_expr (TREE_OPERAND (exp, 0), 0, VOIDmode, 0);
+
+ if (! target)
+ target = gen_reg_rtx (mode);
+
+ start_sequence ();
+
+ /* Store the realpart and the negated imagpart to target. */
+ emit_move_insn (gen_realpart (mode, target), gen_realpart (mode, op0));
+
+ imag_t = gen_imagpart (mode, target);
+ temp = expand_unop (mode, neg_optab,
+ gen_imagpart (mode, op0), imag_t, 0);
+ if (temp != imag_t)
+ emit_move_insn (imag_t, temp);
+
+ insns = get_insns ();
+ end_sequence ();
+
+ /* Conjugate should appear as a single unit
+ If TARGET is a CONCAT, we got insns like RD = RS, ID = - IS,
+ each with a separate pseudo as destination.
+ It's not correct for flow to treat them as a unit. */
+ if (GET_CODE (target) != CONCAT)
+ emit_no_conflict_block (insns, target, op0, NULL_RTX, NULL_RTX);
+ else
+ emit_insns (insns);
+
+ return target;
+ }
+
+ case ERROR_MARK:
+ op0 = CONST0_RTX (tmode);
+ if (op0 != 0)
+ return op0;
+ return const0_rtx;
+
+ default:
+ return (*lang_expand_expr) (exp, original_target, tmode, modifier);
+ }
+
+ /* Here to do an ordinary binary operator, generating an instruction
+ from the optab already placed in `this_optab'. */
+ binop:
+ preexpand_calls (exp);
+ if (! safe_from_p (subtarget, TREE_OPERAND (exp, 1)))
+ subtarget = 0;
+ op0 = expand_expr (TREE_OPERAND (exp, 0), subtarget, VOIDmode, 0);
+ op1 = expand_expr (TREE_OPERAND (exp, 1), NULL_RTX, VOIDmode, 0);
+ binop2:
+ temp = expand_binop (mode, this_optab, op0, op1, target,
+ unsignedp, OPTAB_LIB_WIDEN);
+ if (temp == 0)
+ abort ();
+ return temp;
+}
+
+
+/* Emit bytecode to evaluate the given expression EXP to the stack. */
+void
+bc_expand_expr (exp)
+ tree exp;
+{
+ enum tree_code code;
+ tree type, arg0;
+ rtx r;
+ struct binary_operator *binoptab;
+ struct unary_operator *unoptab;
+ struct increment_operator *incroptab;
+ struct bc_label *lab, *lab1;
+ enum bytecode_opcode opcode;
+
+
+ code = TREE_CODE (exp);
+
+ switch (code)
+ {
+ case PARM_DECL:
+
+ if (DECL_RTL (exp) == 0)
+ {
+ error_with_decl (exp, "prior parameter's size depends on `%s'");
+ return;
+ }
+
+ bc_load_parmaddr (DECL_RTL (exp));
+ bc_load_memory (TREE_TYPE (exp), exp);
+
+ return;
+
+ case VAR_DECL:
+
+ if (DECL_RTL (exp) == 0)
+ abort ();
+
+#if 0
+ if (BYTECODE_LABEL (DECL_RTL (exp)))
+ bc_load_externaddr (DECL_RTL (exp));
+ else
+ bc_load_localaddr (DECL_RTL (exp));
+#endif
+ if (TREE_PUBLIC (exp))
+ bc_load_externaddr_id (DECL_ASSEMBLER_NAME (exp),
+ BYTECODE_BC_LABEL (DECL_RTL (exp))->offset);
+ else
+ bc_load_localaddr (DECL_RTL (exp));
+
+ bc_load_memory (TREE_TYPE (exp), exp);
+ return;
+
+ case INTEGER_CST:
+
+#ifdef DEBUG_PRINT_CODE
+ fprintf (stderr, " [%x]\n", TREE_INT_CST_LOW (exp));
+#endif
+ bc_emit_instruction (mode_to_const_map[(int) (DECL_BIT_FIELD (exp)
+ ? SImode
+ : TYPE_MODE (TREE_TYPE (exp)))],
+ (HOST_WIDE_INT) TREE_INT_CST_LOW (exp));
+ return;
+
+ case REAL_CST:
+
+#if 0
+#ifdef DEBUG_PRINT_CODE
+ fprintf (stderr, " [%g]\n", (double) TREE_INT_CST_LOW (exp));
+#endif
+ /* FIX THIS: find a better way to pass real_cst's. -bson */
+ bc_emit_instruction (mode_to_const_map[TYPE_MODE (TREE_TYPE (exp))],
+ (double) TREE_REAL_CST (exp));
+#else
+ abort ();
+#endif
+
+ return;
+
+ case CALL_EXPR:
+
+ /* We build a call description vector describing the type of
+ the return value and of the arguments; this call vector,
+ together with a pointer to a location for the return value
+ and the base of the argument list, is passed to the low
+ level machine dependent call subroutine, which is responsible
+ for putting the arguments wherever real functions expect
+ them, as well as getting the return value back. */
+ {
+ tree calldesc = 0, arg;
+ int nargs = 0, i;
+ rtx retval;
+
+ /* Push the evaluated args on the evaluation stack in reverse
+ order. Also make an entry for each arg in the calldesc
+ vector while we're at it. */
+
+ TREE_OPERAND (exp, 1) = nreverse (TREE_OPERAND (exp, 1));
+
+ for (arg = TREE_OPERAND (exp, 1); arg; arg = TREE_CHAIN (arg))
+ {
+ ++nargs;
+ bc_expand_expr (TREE_VALUE (arg));
+
+ calldesc = tree_cons ((tree) 0,
+ size_in_bytes (TREE_TYPE (TREE_VALUE (arg))),
+ calldesc);
+ calldesc = tree_cons ((tree) 0,
+ bc_runtime_type_code (TREE_TYPE (TREE_VALUE (arg))),
+ calldesc);
+ }
+
+ TREE_OPERAND (exp, 1) = nreverse (TREE_OPERAND (exp, 1));
+
+ /* Allocate a location for the return value and push its
+ address on the evaluation stack. Also make an entry
+ at the front of the calldesc for the return value type. */
+
+ type = TREE_TYPE (TREE_TYPE (TREE_TYPE (TREE_OPERAND (exp, 0))));
+ retval = bc_allocate_local (int_size_in_bytes (type), TYPE_ALIGN (type));
+ bc_load_localaddr (retval);
+
+ calldesc = tree_cons ((tree) 0, size_in_bytes (type), calldesc);
+ calldesc = tree_cons ((tree) 0, bc_runtime_type_code (type), calldesc);
+
+ /* Prepend the argument count. */
+ calldesc = tree_cons ((tree) 0,
+ build_int_2 (nargs, 0),
+ calldesc);
+
+ /* Push the address of the call description vector on the stack. */
+ calldesc = build_nt (CONSTRUCTOR, (tree) 0, calldesc);
+ TREE_TYPE (calldesc) = build_array_type (integer_type_node,
+ build_index_type (build_int_2 (nargs * 2, 0)));
+ r = output_constant_def (calldesc);
+ bc_load_externaddr (r);
+
+ /* Push the address of the function to be called. */
+ bc_expand_expr (TREE_OPERAND (exp, 0));
+
+ /* Call the function, popping its address and the calldesc vector
+ address off the evaluation stack in the process. */
+ bc_emit_instruction (call);
+
+ /* Pop the arguments off the stack. */
+ bc_adjust_stack (nargs);
+
+ /* Load the return value onto the stack. */
+ bc_load_localaddr (retval);
+ bc_load_memory (type, TREE_OPERAND (exp, 0));
+ }
+ return;
+
+ case SAVE_EXPR:
+
+ if (!SAVE_EXPR_RTL (exp))
+ {
+ /* First time around: copy to local variable */
+ SAVE_EXPR_RTL (exp) = bc_allocate_local (int_size_in_bytes (TREE_TYPE (exp)),
+ TYPE_ALIGN (TREE_TYPE(exp)));
+ bc_expand_expr (TREE_OPERAND (exp, 0));
+ bc_emit_instruction (duplicate);
+
+ bc_load_localaddr (SAVE_EXPR_RTL (exp));
+ bc_store_memory (TREE_TYPE (exp), TREE_OPERAND (exp, 0));
+ }
+ else
+ {
+ /* Consecutive reference: use saved copy */
+ bc_load_localaddr (SAVE_EXPR_RTL (exp));
+ bc_load_memory (TREE_TYPE (exp), TREE_OPERAND (exp, 0));
+ }
+ return;
+
+#if 0
+ /* FIXME: the XXXX_STMT codes have been removed in GCC2, but
+ how are they handled instead? */
+ case LET_STMT:
+
+ TREE_USED (exp) = 1;
+ bc_expand_expr (STMT_BODY (exp));
+ return;
+#endif
+
+ case NOP_EXPR:
+ case CONVERT_EXPR:
+
+ bc_expand_expr (TREE_OPERAND (exp, 0));
+ bc_expand_conversion (TREE_TYPE (TREE_OPERAND (exp, 0)), TREE_TYPE (exp));
+ return;
+
+ case MODIFY_EXPR:
+
+ expand_assignment (TREE_OPERAND (exp, 0), TREE_OPERAND (exp, 1), 0, 0);
+ return;
+
+ case ADDR_EXPR:
+
+ bc_expand_address (TREE_OPERAND (exp, 0));
+ return;
+
+ case INDIRECT_REF:
+
+ bc_expand_expr (TREE_OPERAND (exp, 0));
+ bc_load_memory (TREE_TYPE (exp), TREE_OPERAND (exp, 0));
+ return;
+
+ case ARRAY_REF:
+
+ bc_expand_expr (bc_canonicalize_array_ref (exp));
+ return;
+
+ case COMPONENT_REF:
+
+ bc_expand_component_address (exp);
+
+ /* If we have a bitfield, generate a proper load */
+ bc_load_memory (TREE_TYPE (TREE_OPERAND (exp, 1)), TREE_OPERAND (exp, 1));
+ return;
+
+ case COMPOUND_EXPR:
+
+ bc_expand_expr (TREE_OPERAND (exp, 0));
+ bc_emit_instruction (drop);
+ bc_expand_expr (TREE_OPERAND (exp, 1));
+ return;
+
+ case COND_EXPR:
+
+ bc_expand_expr (TREE_OPERAND (exp, 0));
+ bc_expand_truth_conversion (TREE_TYPE (TREE_OPERAND (exp, 0)));
+ lab = bc_get_bytecode_label ();
+ bc_emit_bytecode (xjumpifnot);
+ bc_emit_bytecode_labelref (lab);
+
+#ifdef DEBUG_PRINT_CODE
+ fputc ('\n', stderr);
+#endif
+ bc_expand_expr (TREE_OPERAND (exp, 1));
+ lab1 = bc_get_bytecode_label ();
+ bc_emit_bytecode (jump);
+ bc_emit_bytecode_labelref (lab1);
+
+#ifdef DEBUG_PRINT_CODE
+ fputc ('\n', stderr);
+#endif
+
+ bc_emit_bytecode_labeldef (lab);
+ bc_expand_expr (TREE_OPERAND (exp, 2));
+ bc_emit_bytecode_labeldef (lab1);
+ return;
+
+ case TRUTH_ANDIF_EXPR:
+
+ opcode = xjumpifnot;
+ goto andorif;
+
+ case TRUTH_ORIF_EXPR:
+
+ opcode = xjumpif;
+ goto andorif;
+
+ case PLUS_EXPR:
+
+ binoptab = optab_plus_expr;
+ goto binop;
+
+ case MINUS_EXPR:
+
+ binoptab = optab_minus_expr;
+ goto binop;
+
+ case MULT_EXPR:
+
+ binoptab = optab_mult_expr;
+ goto binop;
+
+ case TRUNC_DIV_EXPR:
+ case FLOOR_DIV_EXPR:
+ case CEIL_DIV_EXPR:
+ case ROUND_DIV_EXPR:
+ case EXACT_DIV_EXPR:
+
+ binoptab = optab_trunc_div_expr;
+ goto binop;
+
+ case TRUNC_MOD_EXPR:
+ case FLOOR_MOD_EXPR:
+ case CEIL_MOD_EXPR:
+ case ROUND_MOD_EXPR:
+
+ binoptab = optab_trunc_mod_expr;
+ goto binop;
+
+ case FIX_ROUND_EXPR:
+ case FIX_FLOOR_EXPR:
+ case FIX_CEIL_EXPR:
+ abort (); /* Not used for C. */
+
+ case FIX_TRUNC_EXPR:
+ case FLOAT_EXPR:
+ case MAX_EXPR:
+ case MIN_EXPR:
+ case FFS_EXPR:
+ case LROTATE_EXPR:
+ case RROTATE_EXPR:
+ abort (); /* FIXME */
+
+ case RDIV_EXPR:
+
+ binoptab = optab_rdiv_expr;
+ goto binop;
+
+ case BIT_AND_EXPR:
+
+ binoptab = optab_bit_and_expr;
+ goto binop;
+
+ case BIT_IOR_EXPR:
+
+ binoptab = optab_bit_ior_expr;
+ goto binop;
+
+ case BIT_XOR_EXPR:
+
+ binoptab = optab_bit_xor_expr;
+ goto binop;
+
+ case LSHIFT_EXPR:
+
+ binoptab = optab_lshift_expr;
+ goto binop;
+
+ case RSHIFT_EXPR:
+
+ binoptab = optab_rshift_expr;
+ goto binop;
+
+ case TRUTH_AND_EXPR:
+
+ binoptab = optab_truth_and_expr;
+ goto binop;
+
+ case TRUTH_OR_EXPR:
+
+ binoptab = optab_truth_or_expr;
+ goto binop;
+
+ case LT_EXPR:
+
+ binoptab = optab_lt_expr;
+ goto binop;
+
+ case LE_EXPR:
+
+ binoptab = optab_le_expr;
+ goto binop;
+
+ case GE_EXPR:
+
+ binoptab = optab_ge_expr;
+ goto binop;
+
+ case GT_EXPR:
+
+ binoptab = optab_gt_expr;
+ goto binop;
+
+ case EQ_EXPR:
+
+ binoptab = optab_eq_expr;
+ goto binop;
+
+ case NE_EXPR:
+
+ binoptab = optab_ne_expr;
+ goto binop;
+
+ case NEGATE_EXPR:
+
+ unoptab = optab_negate_expr;
+ goto unop;
+
+ case BIT_NOT_EXPR:
+
+ unoptab = optab_bit_not_expr;
+ goto unop;
+
+ case TRUTH_NOT_EXPR:
+
+ unoptab = optab_truth_not_expr;
+ goto unop;
+
+ case PREDECREMENT_EXPR:
+
+ incroptab = optab_predecrement_expr;
+ goto increment;
+
+ case PREINCREMENT_EXPR:
+
+ incroptab = optab_preincrement_expr;
+ goto increment;
+
+ case POSTDECREMENT_EXPR:
+
+ incroptab = optab_postdecrement_expr;
+ goto increment;
+
+ case POSTINCREMENT_EXPR:
+
+ incroptab = optab_postincrement_expr;
+ goto increment;
+
+ case CONSTRUCTOR:
+
+ bc_expand_constructor (exp);
+ return;
+
+ case ERROR_MARK:
+ case RTL_EXPR:
+
+ return;
+
+ case BIND_EXPR:
+ {
+ tree vars = TREE_OPERAND (exp, 0);
+ int vars_need_expansion = 0;
+
+ /* Need to open a binding contour here because
+ if there are any cleanups they most be contained here. */
+ expand_start_bindings (0);
+
+ /* Mark the corresponding BLOCK for output. */
+ if (TREE_OPERAND (exp, 2) != 0)
+ TREE_USED (TREE_OPERAND (exp, 2)) = 1;
+
+ /* If VARS have not yet been expanded, expand them now. */
+ while (vars)
+ {
+ if (DECL_RTL (vars) == 0)
+ {
+ vars_need_expansion = 1;
+ expand_decl (vars);
+ }
+ expand_decl_init (vars);
+ vars = TREE_CHAIN (vars);
+ }
+
+ bc_expand_expr (TREE_OPERAND (exp, 1));
+
+ expand_end_bindings (TREE_OPERAND (exp, 0), 0, 0);
+
+ return;
+ }
+ }
+
+ abort ();
+
+ binop:
+
+ bc_expand_binary_operation (binoptab, TREE_TYPE (exp),
+ TREE_OPERAND (exp, 0), TREE_OPERAND (exp, 1));
+ return;
+
+
+ unop:
+
+ bc_expand_unary_operation (unoptab, TREE_TYPE (exp), TREE_OPERAND (exp, 0));
+ return;
+
+
+ andorif:
+
+ bc_expand_expr (TREE_OPERAND (exp, 0));
+ bc_expand_truth_conversion (TREE_TYPE (TREE_OPERAND (exp, 0)));
+ lab = bc_get_bytecode_label ();
+
+ bc_emit_instruction (duplicate);
+ bc_emit_bytecode (opcode);
+ bc_emit_bytecode_labelref (lab);
+
+#ifdef DEBUG_PRINT_CODE
+ fputc ('\n', stderr);
+#endif
+
+ bc_emit_instruction (drop);
+
+ bc_expand_expr (TREE_OPERAND (exp, 1));
+ bc_expand_truth_conversion (TREE_TYPE (TREE_OPERAND (exp, 1)));
+ bc_emit_bytecode_labeldef (lab);
+ return;
+
+
+ increment:
+
+ type = TREE_TYPE (TREE_OPERAND (exp, 0));
+
+ /* Push the quantum. */
+ bc_expand_expr (TREE_OPERAND (exp, 1));
+
+ /* Convert it to the lvalue's type. */
+ bc_expand_conversion (TREE_TYPE (TREE_OPERAND (exp, 1)), type);
+
+ /* Push the address of the lvalue */
+ bc_expand_expr (build1 (ADDR_EXPR, TYPE_POINTER_TO (type), TREE_OPERAND (exp, 0)));
+
+ /* Perform actual increment */
+ bc_expand_increment (incroptab, type);
+ return;
+}
+
+/* Return the alignment in bits of EXP, a pointer valued expression.
+ But don't return more than MAX_ALIGN no matter what.
+ The alignment returned is, by default, the alignment of the thing that
+ EXP points to (if it is not a POINTER_TYPE, 0 is returned).
+
+ Otherwise, look at the expression to see if we can do better, i.e., if the
+ expression is actually pointing at an object whose alignment is tighter. */
+
+static int
+get_pointer_alignment (exp, max_align)
+ tree exp;
+ unsigned max_align;
+{
+ unsigned align, inner;
+
+ if (TREE_CODE (TREE_TYPE (exp)) != POINTER_TYPE)
+ return 0;
+
+ align = TYPE_ALIGN (TREE_TYPE (TREE_TYPE (exp)));
+ align = MIN (align, max_align);
+
+ while (1)
+ {
+ switch (TREE_CODE (exp))
+ {
+ case NOP_EXPR:
+ case CONVERT_EXPR:
+ case NON_LVALUE_EXPR:
+ exp = TREE_OPERAND (exp, 0);
+ if (TREE_CODE (TREE_TYPE (exp)) != POINTER_TYPE)
+ return align;
+ inner = TYPE_ALIGN (TREE_TYPE (TREE_TYPE (exp)));
+ align = MIN (inner, max_align);
+ break;
+
+ case PLUS_EXPR:
+ /* If sum of pointer + int, restrict our maximum alignment to that
+ imposed by the integer. If not, we can't do any better than
+ ALIGN. */
+ if (TREE_CODE (TREE_OPERAND (exp, 1)) != INTEGER_CST)
+ return align;
+
+ while (((TREE_INT_CST_LOW (TREE_OPERAND (exp, 1)) * BITS_PER_UNIT)
+ & (max_align - 1))
+ != 0)
+ max_align >>= 1;
+
+ exp = TREE_OPERAND (exp, 0);
+ break;
+
+ case ADDR_EXPR:
+ /* See what we are pointing at and look at its alignment. */
+ exp = TREE_OPERAND (exp, 0);
+ if (TREE_CODE (exp) == FUNCTION_DECL)
+ align = FUNCTION_BOUNDARY;
+ else if (TREE_CODE_CLASS (TREE_CODE (exp)) == 'd')
+ align = DECL_ALIGN (exp);
+#ifdef CONSTANT_ALIGNMENT
+ else if (TREE_CODE_CLASS (TREE_CODE (exp)) == 'c')
+ align = CONSTANT_ALIGNMENT (exp, align);
+#endif
+ return MIN (align, max_align);
+
+ default:
+ return align;
+ }
+ }
+}
+
+/* Return the tree node and offset if a given argument corresponds to
+ a string constant. */
+
+static tree
+string_constant (arg, ptr_offset)
+ tree arg;
+ tree *ptr_offset;
+{
+ STRIP_NOPS (arg);
+
+ if (TREE_CODE (arg) == ADDR_EXPR
+ && TREE_CODE (TREE_OPERAND (arg, 0)) == STRING_CST)
+ {
+ *ptr_offset = integer_zero_node;
+ return TREE_OPERAND (arg, 0);
+ }
+ else if (TREE_CODE (arg) == PLUS_EXPR)
+ {
+ tree arg0 = TREE_OPERAND (arg, 0);
+ tree arg1 = TREE_OPERAND (arg, 1);
+
+ STRIP_NOPS (arg0);
+ STRIP_NOPS (arg1);
+
+ if (TREE_CODE (arg0) == ADDR_EXPR
+ && TREE_CODE (TREE_OPERAND (arg0, 0)) == STRING_CST)
+ {
+ *ptr_offset = arg1;
+ return TREE_OPERAND (arg0, 0);
+ }
+ else if (TREE_CODE (arg1) == ADDR_EXPR
+ && TREE_CODE (TREE_OPERAND (arg1, 0)) == STRING_CST)
+ {
+ *ptr_offset = arg0;
+ return TREE_OPERAND (arg1, 0);
+ }
+ }
+
+ return 0;
+}
+
+/* Compute the length of a C string. TREE_STRING_LENGTH is not the right
+ way, because it could contain a zero byte in the middle.
+ TREE_STRING_LENGTH is the size of the character array, not the string.
+
+ Unfortunately, string_constant can't access the values of const char
+ arrays with initializers, so neither can we do so here. */
+
+static tree
+c_strlen (src)
+ tree src;
+{
+ tree offset_node;
+ int offset, max;
+ char *ptr;
+
+ src = string_constant (src, &offset_node);
+ if (src == 0)
+ return 0;
+ max = TREE_STRING_LENGTH (src);
+ ptr = TREE_STRING_POINTER (src);
+ if (offset_node && TREE_CODE (offset_node) != INTEGER_CST)
+ {
+ /* If the string has an internal zero byte (e.g., "foo\0bar"), we can't
+ compute the offset to the following null if we don't know where to
+ start searching for it. */
+ int i;
+ for (i = 0; i < max; i++)
+ if (ptr[i] == 0)
+ return 0;
+ /* We don't know the starting offset, but we do know that the string
+ has no internal zero bytes. We can assume that the offset falls
+ within the bounds of the string; otherwise, the programmer deserves
+ what he gets. Subtract the offset from the length of the string,
+ and return that. */
+ /* This would perhaps not be valid if we were dealing with named
+ arrays in addition to literal string constants. */
+ return size_binop (MINUS_EXPR, size_int (max), offset_node);
+ }
+
+ /* We have a known offset into the string. Start searching there for
+ a null character. */
+ if (offset_node == 0)
+ offset = 0;
+ else
+ {
+ /* Did we get a long long offset? If so, punt. */
+ if (TREE_INT_CST_HIGH (offset_node) != 0)
+ return 0;
+ offset = TREE_INT_CST_LOW (offset_node);
+ }
+ /* If the offset is known to be out of bounds, warn, and call strlen at
+ runtime. */
+ if (offset < 0 || offset > max)
+ {
+ warning ("offset outside bounds of constant string");
+ return 0;
+ }
+ /* Use strlen to search for the first zero byte. Since any strings
+ constructed with build_string will have nulls appended, we win even
+ if we get handed something like (char[4])"abcd".
+
+ Since OFFSET is our starting index into the string, no further
+ calculation is needed. */
+ return size_int (strlen (ptr + offset));
+}
+
+/* Expand an expression EXP that calls a built-in function,
+ with result going to TARGET if that's convenient
+ (and in mode MODE if that's convenient).
+ SUBTARGET may be used as the target for computing one of EXP's operands.
+ IGNORE is nonzero if the value is to be ignored. */
+
+#define CALLED_AS_BUILT_IN(NODE) \
+ (!strncmp (IDENTIFIER_POINTER (DECL_NAME (NODE)), "__builtin_", 10))
+
+static rtx
+expand_builtin (exp, target, subtarget, mode, ignore)
+ tree exp;
+ rtx target;
+ rtx subtarget;
+ enum machine_mode mode;
+ int ignore;
+{
+ tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
+ tree arglist = TREE_OPERAND (exp, 1);
+ rtx op0;
+ rtx lab1, insns;
+ enum machine_mode value_mode = TYPE_MODE (TREE_TYPE (exp));
+ optab builtin_optab;
+
+ switch (DECL_FUNCTION_CODE (fndecl))
+ {
+ case BUILT_IN_ABS:
+ case BUILT_IN_LABS:
+ case BUILT_IN_FABS:
+ /* build_function_call changes these into ABS_EXPR. */
+ abort ();
+
+ case BUILT_IN_SIN:
+ case BUILT_IN_COS:
+ case BUILT_IN_FSQRT:
+ /* If not optimizing, call the library function. */
+ if (! optimize)
+ break;
+
+ if (arglist == 0
+ /* Arg could be wrong type if user redeclared this fcn wrong. */
+ || TREE_CODE (TREE_TYPE (TREE_VALUE (arglist))) != REAL_TYPE)
+ break;
+
+ /* Stabilize and compute the argument. */
+ if (TREE_CODE (TREE_VALUE (arglist)) != VAR_DECL
+ && TREE_CODE (TREE_VALUE (arglist)) != PARM_DECL)
+ {
+ exp = copy_node (exp);
+ arglist = copy_node (arglist);
+ TREE_OPERAND (exp, 1) = arglist;
+ TREE_VALUE (arglist) = save_expr (TREE_VALUE (arglist));
+ }
+ op0 = expand_expr (TREE_VALUE (arglist), subtarget, VOIDmode, 0);
+
+ /* Make a suitable register to place result in. */
+ target = gen_reg_rtx (TYPE_MODE (TREE_TYPE (exp)));
+
+ emit_queue ();
+ start_sequence ();
+
+ switch (DECL_FUNCTION_CODE (fndecl))
+ {
+ case BUILT_IN_SIN:
+ builtin_optab = sin_optab; break;
+ case BUILT_IN_COS:
+ builtin_optab = cos_optab; break;
+ case BUILT_IN_FSQRT:
+ builtin_optab = sqrt_optab; break;
+ default:
+ abort ();
+ }
+
+ /* Compute into TARGET.
+ Set TARGET to wherever the result comes back. */
+ target = expand_unop (TYPE_MODE (TREE_TYPE (TREE_VALUE (arglist))),
+ builtin_optab, op0, target, 0);
+
+ /* If we were unable to expand via the builtin, stop the
+ sequence (without outputting the insns) and break, causing
+ a call the the library function. */
+ if (target == 0)
+ {
+ end_sequence ();
+ break;
+ }
+
+ /* Check the results by default. But if flag_fast_math is turned on,
+ then assume sqrt will always be called with valid arguments. */
+
+ if (! flag_fast_math)
+ {
+ /* Don't define the builtin FP instructions
+ if your machine is not IEEE. */
+ if (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT)
+ abort ();
+
+ lab1 = gen_label_rtx ();
+
+ /* Test the result; if it is NaN, set errno=EDOM because
+ the argument was not in the domain. */
+ emit_cmp_insn (target, target, EQ, 0, GET_MODE (target), 0, 0);
+ emit_jump_insn (gen_beq (lab1));
+
+#ifdef TARGET_EDOM
+ {
+#ifdef GEN_ERRNO_RTX
+ rtx errno_rtx = GEN_ERRNO_RTX;
+#else
+ rtx errno_rtx
+ = gen_rtx (MEM, word_mode, gen_rtx (SYMBOL_REF, Pmode, "errno"));
+#endif
+
+ emit_move_insn (errno_rtx, GEN_INT (TARGET_EDOM));
+ }
+#else
+ /* We can't set errno=EDOM directly; let the library call do it.
+ Pop the arguments right away in case the call gets deleted. */
+ NO_DEFER_POP;
+ expand_call (exp, target, 0);
+ OK_DEFER_POP;
+#endif
+
+ emit_label (lab1);
+ }
+
+ /* Output the entire sequence. */
+ insns = get_insns ();
+ end_sequence ();
+ emit_insns (insns);
+
+ return target;
+
+ /* __builtin_apply_args returns block of memory allocated on
+ the stack into which is stored the arg pointer, structure
+ value address, static chain, and all the registers that might
+ possibly be used in performing a function call. The code is
+ moved to the start of the function so the incoming values are
+ saved. */
+ case BUILT_IN_APPLY_ARGS:
+ /* Don't do __builtin_apply_args more than once in a function.
+ Save the result of the first call and reuse it. */
+ if (apply_args_value != 0)
+ return apply_args_value;
+ {
+ /* When this function is called, it means that registers must be
+ saved on entry to this function. So we migrate the
+ call to the first insn of this function. */
+ rtx temp;
+ rtx seq;
+
+ start_sequence ();
+ temp = expand_builtin_apply_args ();
+ seq = get_insns ();
+ end_sequence ();
+
+ apply_args_value = temp;
+
+ /* Put the sequence after the NOTE that starts the function.
+ If this is inside a SEQUENCE, make the outer-level insn
+ chain current, so the code is placed at the start of the
+ function. */
+ push_topmost_sequence ();
+ emit_insns_before (seq, NEXT_INSN (get_insns ()));
+ pop_topmost_sequence ();
+ return temp;
+ }
+
+ /* __builtin_apply (FUNCTION, ARGUMENTS, ARGSIZE) invokes
+ FUNCTION with a copy of the parameters described by
+ ARGUMENTS, and ARGSIZE. It returns a block of memory
+ allocated on the stack into which is stored all the registers
+ that might possibly be used for returning the result of a
+ function. ARGUMENTS is the value returned by
+ __builtin_apply_args. ARGSIZE is the number of bytes of
+ arguments that must be copied. ??? How should this value be
+ computed? We'll also need a safe worst case value for varargs
+ functions. */
+ case BUILT_IN_APPLY:
+ if (arglist == 0
+ /* Arg could be non-pointer if user redeclared this fcn wrong. */
+ || TREE_CODE (TREE_TYPE (TREE_VALUE (arglist))) != POINTER_TYPE
+ || TREE_CHAIN (arglist) == 0
+ || TREE_CODE (TREE_TYPE (TREE_VALUE (TREE_CHAIN (arglist)))) != POINTER_TYPE
+ || TREE_CHAIN (TREE_CHAIN (arglist)) == 0
+ || TREE_CODE (TREE_TYPE (TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist))))) != INTEGER_TYPE)
+ return const0_rtx;
+ else
+ {
+ int i;
+ tree t;
+ rtx ops[3];
+
+ for (t = arglist, i = 0; t; t = TREE_CHAIN (t), i++)
+ ops[i] = expand_expr (TREE_VALUE (t), NULL_RTX, VOIDmode, 0);
+
+ return expand_builtin_apply (ops[0], ops[1], ops[2]);
+ }
+
+ /* __builtin_return (RESULT) causes the function to return the
+ value described by RESULT. RESULT is address of the block of
+ memory returned by __builtin_apply. */
+ case BUILT_IN_RETURN:
+ if (arglist
+ /* Arg could be non-pointer if user redeclared this fcn wrong. */
+ && TREE_CODE (TREE_TYPE (TREE_VALUE (arglist))) == POINTER_TYPE)
+ expand_builtin_return (expand_expr (TREE_VALUE (arglist),
+ NULL_RTX, VOIDmode, 0));
+ return const0_rtx;
+
+ case BUILT_IN_SAVEREGS:
+ /* Don't do __builtin_saveregs more than once in a function.
+ Save the result of the first call and reuse it. */
+ if (saveregs_value != 0)
+ return saveregs_value;
+ {
+ /* When this function is called, it means that registers must be
+ saved on entry to this function. So we migrate the
+ call to the first insn of this function. */
+ rtx temp;
+ rtx seq;
+
+ /* Now really call the function. `expand_call' does not call
+ expand_builtin, so there is no danger of infinite recursion here. */
+ start_sequence ();
+
+#ifdef EXPAND_BUILTIN_SAVEREGS
+ /* Do whatever the machine needs done in this case. */
+ temp = EXPAND_BUILTIN_SAVEREGS (arglist);
+#else
+ /* The register where the function returns its value
+ is likely to have something else in it, such as an argument.
+ So preserve that register around the call. */
+
+ if (value_mode != VOIDmode)
+ {
+ rtx valreg = hard_libcall_value (value_mode);
+ rtx saved_valreg = gen_reg_rtx (value_mode);
+
+ emit_move_insn (saved_valreg, valreg);
+ temp = expand_call (exp, target, ignore);
+ emit_move_insn (valreg, saved_valreg);
+ }
+ else
+ /* Generate the call, putting the value in a pseudo. */
+ temp = expand_call (exp, target, ignore);
+#endif
+
+ seq = get_insns ();
+ end_sequence ();
+
+ saveregs_value = temp;
+
+ /* Put the sequence after the NOTE that starts the function.
+ If this is inside a SEQUENCE, make the outer-level insn
+ chain current, so the code is placed at the start of the
+ function. */
+ push_topmost_sequence ();
+ emit_insns_before (seq, NEXT_INSN (get_insns ()));
+ pop_topmost_sequence ();
+ return temp;
+ }
+
+ /* __builtin_args_info (N) returns word N of the arg space info
+ for the current function. The number and meanings of words
+ is controlled by the definition of CUMULATIVE_ARGS. */
+ case BUILT_IN_ARGS_INFO:
+ {
+ int nwords = sizeof (CUMULATIVE_ARGS) / sizeof (int);
+ int i;
+ int *word_ptr = (int *) &current_function_args_info;
+ tree type, elts, result;
+
+ if (sizeof (CUMULATIVE_ARGS) % sizeof (int) != 0)
+ fatal ("CUMULATIVE_ARGS type defined badly; see %s, line %d",
+ __FILE__, __LINE__);
+
+ if (arglist != 0)
+ {
+ tree arg = TREE_VALUE (arglist);
+ if (TREE_CODE (arg) != INTEGER_CST)
+ error ("argument of `__builtin_args_info' must be constant");
+ else
+ {
+ int wordnum = TREE_INT_CST_LOW (arg);
+
+ if (wordnum < 0 || wordnum >= nwords || TREE_INT_CST_HIGH (arg))
+ error ("argument of `__builtin_args_info' out of range");
+ else
+ return GEN_INT (word_ptr[wordnum]);
+ }
+ }
+ else
+ error ("missing argument in `__builtin_args_info'");
+
+ return const0_rtx;
+
+#if 0
+ for (i = 0; i < nwords; i++)
+ elts = tree_cons (NULL_TREE, build_int_2 (word_ptr[i], 0));
+
+ type = build_array_type (integer_type_node,
+ build_index_type (build_int_2 (nwords, 0)));
+ result = build (CONSTRUCTOR, type, NULL_TREE, nreverse (elts));
+ TREE_CONSTANT (result) = 1;
+ TREE_STATIC (result) = 1;
+ result = build (INDIRECT_REF, build_pointer_type (type), result);
+ TREE_CONSTANT (result) = 1;
+ return expand_expr (result, NULL_RTX, VOIDmode, 0);
+#endif
+ }
+
+ /* Return the address of the first anonymous stack arg. */
+ case BUILT_IN_NEXT_ARG:
+ {
+ tree fntype = TREE_TYPE (current_function_decl);
+
+ if ((TYPE_ARG_TYPES (fntype) == 0
+ || (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype)))
+ == void_type_node))
+ && ! current_function_varargs)
+ {
+ error ("`va_start' used in function with fixed args");
+ return const0_rtx;
+ }
+
+ if (arglist)
+ {
+ tree last_parm = tree_last (DECL_ARGUMENTS (current_function_decl));
+ tree arg = TREE_VALUE (arglist);
+
+ /* Strip off all nops for the sake of the comparison. This
+ is not quite the same as STRIP_NOPS. It does more. */
+ while (TREE_CODE (arg) == NOP_EXPR
+ || TREE_CODE (arg) == CONVERT_EXPR
+ || TREE_CODE (arg) == NON_LVALUE_EXPR)
+ arg = TREE_OPERAND (arg, 0);
+ if (arg != last_parm)
+ warning ("second parameter of `va_start' not last named argument");
+ }
+ else
+ /* Evidently an out of date version of <stdarg.h>; can't validate
+ va_start's second argument, but can still work as intended. */
+ warning ("`__builtin_next_arg' called without an argument");
+ }
+
+ return expand_binop (Pmode, add_optab,
+ current_function_internal_arg_pointer,
+ current_function_arg_offset_rtx,
+ NULL_RTX, 0, OPTAB_LIB_WIDEN);
+
+ case BUILT_IN_CLASSIFY_TYPE:
+ if (arglist != 0)
+ {
+ tree type = TREE_TYPE (TREE_VALUE (arglist));
+ enum tree_code code = TREE_CODE (type);
+ if (code == VOID_TYPE)
+ return GEN_INT (void_type_class);
+ if (code == INTEGER_TYPE)
+ return GEN_INT (integer_type_class);
+ if (code == CHAR_TYPE)
+ return GEN_INT (char_type_class);
+ if (code == ENUMERAL_TYPE)
+ return GEN_INT (enumeral_type_class);
+ if (code == BOOLEAN_TYPE)
+ return GEN_INT (boolean_type_class);
+ if (code == POINTER_TYPE)
+ return GEN_INT (pointer_type_class);
+ if (code == REFERENCE_TYPE)
+ return GEN_INT (reference_type_class);
+ if (code == OFFSET_TYPE)
+ return GEN_INT (offset_type_class);
+ if (code == REAL_TYPE)
+ return GEN_INT (real_type_class);
+ if (code == COMPLEX_TYPE)
+ return GEN_INT (complex_type_class);
+ if (code == FUNCTION_TYPE)
+ return GEN_INT (function_type_class);
+ if (code == METHOD_TYPE)
+ return GEN_INT (method_type_class);
+ if (code == RECORD_TYPE)
+ return GEN_INT (record_type_class);
+ if (code == UNION_TYPE || code == QUAL_UNION_TYPE)
+ return GEN_INT (union_type_class);
+ if (code == ARRAY_TYPE)
+ {
+ if (TYPE_STRING_FLAG (type))
+ return GEN_INT (string_type_class);
+ else
+ return GEN_INT (array_type_class);
+ }
+ if (code == SET_TYPE)
+ return GEN_INT (set_type_class);
+ if (code == FILE_TYPE)
+ return GEN_INT (file_type_class);
+ if (code == LANG_TYPE)
+ return GEN_INT (lang_type_class);
+ }
+ return GEN_INT (no_type_class);
+
+ case BUILT_IN_CONSTANT_P:
+ if (arglist == 0)
+ return const0_rtx;
+ else
+ return (TREE_CODE_CLASS (TREE_CODE (TREE_VALUE (arglist))) == 'c'
+ ? const1_rtx : const0_rtx);
+
+ case BUILT_IN_FRAME_ADDRESS:
+ /* The argument must be a nonnegative integer constant.
+ It counts the number of frames to scan up the stack.
+ The value is the address of that frame. */
+ case BUILT_IN_RETURN_ADDRESS:
+ /* The argument must be a nonnegative integer constant.
+ It counts the number of frames to scan up the stack.
+ The value is the return address saved in that frame. */
+ if (arglist == 0)
+ /* Warning about missing arg was already issued. */
+ return const0_rtx;
+ else if (TREE_CODE (TREE_VALUE (arglist)) != INTEGER_CST)
+ {
+ error ("invalid arg to `__builtin_return_address'");
+ return const0_rtx;
+ }
+ else if (tree_int_cst_sgn (TREE_VALUE (arglist)) < 0)
+ {
+ error ("invalid arg to `__builtin_return_address'");
+ return const0_rtx;
+ }
+ else
+ {
+ int count = TREE_INT_CST_LOW (TREE_VALUE (arglist));
+ rtx tem = frame_pointer_rtx;
+ int i;
+
+ /* Some machines need special handling before we can access arbitrary
+ frames. For example, on the sparc, we must first flush all
+ register windows to the stack. */
+#ifdef SETUP_FRAME_ADDRESSES
+ SETUP_FRAME_ADDRESSES ();
+#endif
+
+ /* On the sparc, the return address is not in the frame, it is
+ in a register. There is no way to access it off of the current
+ frame pointer, but it can be accessed off the previous frame
+ pointer by reading the value from the register window save
+ area. */
+#ifdef RETURN_ADDR_IN_PREVIOUS_FRAME
+ if (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_RETURN_ADDRESS)
+ count--;
+#endif
+
+ /* Scan back COUNT frames to the specified frame. */
+ for (i = 0; i < count; i++)
+ {
+ /* Assume the dynamic chain pointer is in the word that
+ the frame address points to, unless otherwise specified. */
+#ifdef DYNAMIC_CHAIN_ADDRESS
+ tem = DYNAMIC_CHAIN_ADDRESS (tem);
+#endif
+ tem = memory_address (Pmode, tem);
+ tem = copy_to_reg (gen_rtx (MEM, Pmode, tem));
+ }
+
+ /* For __builtin_frame_address, return what we've got. */
+ if (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_FRAME_ADDRESS)
+ return tem;
+
+ /* For __builtin_return_address,
+ Get the return address from that frame. */
+#ifdef RETURN_ADDR_RTX
+ return RETURN_ADDR_RTX (count, tem);
+#else
+ tem = memory_address (Pmode,
+ plus_constant (tem, GET_MODE_SIZE (Pmode)));
+ return copy_to_reg (gen_rtx (MEM, Pmode, tem));
+#endif
+ }
+
+ case BUILT_IN_ALLOCA:
+ if (arglist == 0
+ /* Arg could be non-integer if user redeclared this fcn wrong. */
+ || TREE_CODE (TREE_TYPE (TREE_VALUE (arglist))) != INTEGER_TYPE)
+ break;
+
+ /* Compute the argument. */
+ op0 = expand_expr (TREE_VALUE (arglist), NULL_RTX, VOIDmode, 0);
+
+ /* Allocate the desired space. */
+ return allocate_dynamic_stack_space (op0, target, BITS_PER_UNIT);
+
+ case BUILT_IN_FFS:
+ /* If not optimizing, call the library function. */
+ if (!optimize && ! CALLED_AS_BUILT_IN (fndecl))
+ break;
+
+ if (arglist == 0
+ /* Arg could be non-integer if user redeclared this fcn wrong. */
+ || TREE_CODE (TREE_TYPE (TREE_VALUE (arglist))) != INTEGER_TYPE)
+ break;
+
+ /* Compute the argument. */
+ op0 = expand_expr (TREE_VALUE (arglist), subtarget, VOIDmode, 0);
+ /* Compute ffs, into TARGET if possible.
+ Set TARGET to wherever the result comes back. */
+ target = expand_unop (TYPE_MODE (TREE_TYPE (TREE_VALUE (arglist))),
+ ffs_optab, op0, target, 1);
+ if (target == 0)
+ abort ();
+ return target;
+
+ case BUILT_IN_STRLEN:
+ /* If not optimizing, call the library function. */
+ if (!optimize && ! CALLED_AS_BUILT_IN (fndecl))
+ break;
+
+ if (arglist == 0
+ /* Arg could be non-pointer if user redeclared this fcn wrong. */
+ || TREE_CODE (TREE_TYPE (TREE_VALUE (arglist))) != POINTER_TYPE)
+ break;
+ else
+ {
+ tree src = TREE_VALUE (arglist);
+ tree len = c_strlen (src);
+
+ int align
+ = get_pointer_alignment (src, BIGGEST_ALIGNMENT) / BITS_PER_UNIT;
+
+ rtx result, src_rtx, char_rtx;
+ enum machine_mode insn_mode = value_mode, char_mode;
+ enum insn_code icode;
+
+ /* If the length is known, just return it. */
+ if (len != 0)
+ return expand_expr (len, target, mode, 0);
+
+ /* If SRC is not a pointer type, don't do this operation inline. */
+ if (align == 0)
+ break;
+
+ /* Call a function if we can't compute strlen in the right mode. */
+
+ while (insn_mode != VOIDmode)
+ {
+ icode = strlen_optab->handlers[(int) insn_mode].insn_code;
+ if (icode != CODE_FOR_nothing)
+ break;
+
+ insn_mode = GET_MODE_WIDER_MODE (insn_mode);
+ }
+ if (insn_mode == VOIDmode)
+ break;
+
+ /* Make a place to write the result of the instruction. */
+ result = target;
+ if (! (result != 0
+ && GET_CODE (result) == REG
+ && GET_MODE (result) == insn_mode
+ && REGNO (result) >= FIRST_PSEUDO_REGISTER))
+ result = gen_reg_rtx (insn_mode);
+
+ /* Make sure the operands are acceptable to the predicates. */
+
+ if (! (*insn_operand_predicate[(int)icode][0]) (result, insn_mode))
+ result = gen_reg_rtx (insn_mode);
+
+ src_rtx = memory_address (BLKmode,
+ expand_expr (src, NULL_RTX, Pmode,
+ EXPAND_NORMAL));
+ if (! (*insn_operand_predicate[(int)icode][1]) (src_rtx, Pmode))
+ src_rtx = copy_to_mode_reg (Pmode, src_rtx);
+
+ char_rtx = const0_rtx;
+ char_mode = insn_operand_mode[(int)icode][2];
+ if (! (*insn_operand_predicate[(int)icode][2]) (char_rtx, char_mode))
+ char_rtx = copy_to_mode_reg (char_mode, char_rtx);
+
+ emit_insn (GEN_FCN (icode) (result,
+ gen_rtx (MEM, BLKmode, src_rtx),
+ char_rtx, GEN_INT (align)));
+
+ /* Return the value in the proper mode for this function. */
+ if (GET_MODE (result) == value_mode)
+ return result;
+ else if (target != 0)
+ {
+ convert_move (target, result, 0);
+ return target;
+ }
+ else
+ return convert_to_mode (value_mode, result, 0);
+ }
+
+ case BUILT_IN_STRCPY:
+ /* If not optimizing, call the library function. */
+ if (!optimize && ! CALLED_AS_BUILT_IN (fndecl))
+ break;
+
+ if (arglist == 0
+ /* Arg could be non-pointer if user redeclared this fcn wrong. */
+ || TREE_CODE (TREE_TYPE (TREE_VALUE (arglist))) != POINTER_TYPE
+ || TREE_CHAIN (arglist) == 0
+ || TREE_CODE (TREE_TYPE (TREE_VALUE (TREE_CHAIN (arglist)))) != POINTER_TYPE)
+ break;
+ else
+ {
+ tree len = c_strlen (TREE_VALUE (TREE_CHAIN (arglist)));
+
+ if (len == 0)
+ break;
+
+ len = size_binop (PLUS_EXPR, len, integer_one_node);
+
+ chainon (arglist, build_tree_list (NULL_TREE, len));
+ }
+
+ /* Drops in. */
+ case BUILT_IN_MEMCPY:
+ /* If not optimizing, call the library function. */
+ if (!optimize && ! CALLED_AS_BUILT_IN (fndecl))
+ break;
+
+ if (arglist == 0
+ /* Arg could be non-pointer if user redeclared this fcn wrong. */
+ || TREE_CODE (TREE_TYPE (TREE_VALUE (arglist))) != POINTER_TYPE
+ || TREE_CHAIN (arglist) == 0
+ || TREE_CODE (TREE_TYPE (TREE_VALUE (TREE_CHAIN (arglist)))) != POINTER_TYPE
+ || TREE_CHAIN (TREE_CHAIN (arglist)) == 0
+ || TREE_CODE (TREE_TYPE (TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist))))) != INTEGER_TYPE)
+ break;
+ else
+ {
+ tree dest = TREE_VALUE (arglist);
+ tree src = TREE_VALUE (TREE_CHAIN (arglist));
+ tree len = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
+
+ int src_align
+ = get_pointer_alignment (src, BIGGEST_ALIGNMENT) / BITS_PER_UNIT;
+ int dest_align
+ = get_pointer_alignment (dest, BIGGEST_ALIGNMENT) / BITS_PER_UNIT;
+ rtx dest_rtx, dest_mem, src_mem;
+
+ /* If either SRC or DEST is not a pointer type, don't do
+ this operation in-line. */
+ if (src_align == 0 || dest_align == 0)
+ {
+ if (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_STRCPY)
+ TREE_CHAIN (TREE_CHAIN (arglist)) = 0;
+ break;
+ }
+
+ dest_rtx = expand_expr (dest, NULL_RTX, Pmode, EXPAND_NORMAL);
+ dest_mem = gen_rtx (MEM, BLKmode,
+ memory_address (BLKmode, dest_rtx));
+ src_mem = gen_rtx (MEM, BLKmode,
+ memory_address (BLKmode,
+ expand_expr (src, NULL_RTX,
+ Pmode,
+ EXPAND_NORMAL)));
+
+ /* Copy word part most expediently. */
+ emit_block_move (dest_mem, src_mem,
+ expand_expr (len, NULL_RTX, VOIDmode, 0),
+ MIN (src_align, dest_align));
+ return dest_rtx;
+ }
+
+/* These comparison functions need an instruction that returns an actual
+ index. An ordinary compare that just sets the condition codes
+ is not enough. */
+#ifdef HAVE_cmpstrsi
+ case BUILT_IN_STRCMP:
+ /* If not optimizing, call the library function. */
+ if (!optimize && ! CALLED_AS_BUILT_IN (fndecl))
+ break;
+
+ if (arglist == 0
+ /* Arg could be non-pointer if user redeclared this fcn wrong. */
+ || TREE_CODE (TREE_TYPE (TREE_VALUE (arglist))) != POINTER_TYPE
+ || TREE_CHAIN (arglist) == 0
+ || TREE_CODE (TREE_TYPE (TREE_VALUE (TREE_CHAIN (arglist)))) != POINTER_TYPE)
+ break;
+ else if (!HAVE_cmpstrsi)
+ break;
+ {
+ tree arg1 = TREE_VALUE (arglist);
+ tree arg2 = TREE_VALUE (TREE_CHAIN (arglist));
+ tree offset;
+ tree len, len2;
+
+ len = c_strlen (arg1);
+ if (len)
+ len = size_binop (PLUS_EXPR, integer_one_node, len);
+ len2 = c_strlen (arg2);
+ if (len2)
+ len2 = size_binop (PLUS_EXPR, integer_one_node, len2);
+
+ /* If we don't have a constant length for the first, use the length
+ of the second, if we know it. We don't require a constant for
+ this case; some cost analysis could be done if both are available
+ but neither is constant. For now, assume they're equally cheap.
+
+ If both strings have constant lengths, use the smaller. This
+ could arise if optimization results in strcpy being called with
+ two fixed strings, or if the code was machine-generated. We should
+ add some code to the `memcmp' handler below to deal with such
+ situations, someday. */
+ if (!len || TREE_CODE (len) != INTEGER_CST)
+ {
+ if (len2)
+ len = len2;
+ else if (len == 0)
+ break;
+ }
+ else if (len2 && TREE_CODE (len2) == INTEGER_CST)
+ {
+ if (tree_int_cst_lt (len2, len))
+ len = len2;
+ }
+
+ chainon (arglist, build_tree_list (NULL_TREE, len));
+ }
+
+ /* Drops in. */
+ case BUILT_IN_MEMCMP:
+ /* If not optimizing, call the library function. */
+ if (!optimize && ! CALLED_AS_BUILT_IN (fndecl))
+ break;
+
+ if (arglist == 0
+ /* Arg could be non-pointer if user redeclared this fcn wrong. */
+ || TREE_CODE (TREE_TYPE (TREE_VALUE (arglist))) != POINTER_TYPE
+ || TREE_CHAIN (arglist) == 0
+ || TREE_CODE (TREE_TYPE (TREE_VALUE (TREE_CHAIN (arglist)))) != POINTER_TYPE
+ || TREE_CHAIN (TREE_CHAIN (arglist)) == 0
+ || TREE_CODE (TREE_TYPE (TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist))))) != INTEGER_TYPE)
+ break;
+ else if (!HAVE_cmpstrsi)
+ break;
+ {
+ tree arg1 = TREE_VALUE (arglist);
+ tree arg2 = TREE_VALUE (TREE_CHAIN (arglist));
+ tree len = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
+ rtx result;
+
+ int arg1_align
+ = get_pointer_alignment (arg1, BIGGEST_ALIGNMENT) / BITS_PER_UNIT;
+ int arg2_align
+ = get_pointer_alignment (arg2, BIGGEST_ALIGNMENT) / BITS_PER_UNIT;
+ enum machine_mode insn_mode
+ = insn_operand_mode[(int) CODE_FOR_cmpstrsi][0];
+
+ /* If we don't have POINTER_TYPE, call the function. */
+ if (arg1_align == 0 || arg2_align == 0)
+ {
+ if (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_STRCMP)
+ TREE_CHAIN (TREE_CHAIN (arglist)) = 0;
+ break;
+ }
+
+ /* Make a place to write the result of the instruction. */
+ result = target;
+ if (! (result != 0
+ && GET_CODE (result) == REG && GET_MODE (result) == insn_mode
+ && REGNO (result) >= FIRST_PSEUDO_REGISTER))
+ result = gen_reg_rtx (insn_mode);
+
+ emit_insn (gen_cmpstrsi (result,
+ gen_rtx (MEM, BLKmode,
+ expand_expr (arg1, NULL_RTX, Pmode,
+ EXPAND_NORMAL)),
+ gen_rtx (MEM, BLKmode,
+ expand_expr (arg2, NULL_RTX, Pmode,
+ EXPAND_NORMAL)),
+ expand_expr (len, NULL_RTX, VOIDmode, 0),
+ GEN_INT (MIN (arg1_align, arg2_align))));
+
+ /* Return the value in the proper mode for this function. */
+ mode = TYPE_MODE (TREE_TYPE (exp));
+ if (GET_MODE (result) == mode)
+ return result;
+ else if (target != 0)
+ {
+ convert_move (target, result, 0);
+ return target;
+ }
+ else
+ return convert_to_mode (mode, result, 0);
+ }
+#else
+ case BUILT_IN_STRCMP:
+ case BUILT_IN_MEMCMP:
+ break;
+#endif
+
+ default: /* just do library call, if unknown builtin */
+ error ("built-in function `%s' not currently supported",
+ IDENTIFIER_POINTER (DECL_NAME (fndecl)));
+ }
+
+ /* The switch statement above can drop through to cause the function
+ to be called normally. */
+
+ return expand_call (exp, target, ignore);
+}
+
+/* Built-in functions to perform an untyped call and return. */
+
+/* For each register that may be used for calling a function, this
+ gives a mode used to copy the register's value. VOIDmode indicates
+ the register is not used for calling a function. If the machine
+ has register windows, this gives only the outbound registers.
+ INCOMING_REGNO gives the corresponding inbound register. */
+static enum machine_mode apply_args_mode[FIRST_PSEUDO_REGISTER];
+
+/* For each register that may be used for returning values, this gives
+ a mode used to copy the register's value. VOIDmode indicates the
+ register is not used for returning values. If the machine has
+ register windows, this gives only the outbound registers.
+ INCOMING_REGNO gives the corresponding inbound register. */
+static enum machine_mode apply_result_mode[FIRST_PSEUDO_REGISTER];
+
+/* For each register that may be used for calling a function, this
+ gives the offset of that register into the block returned by
+ __bultin_apply_args. 0 indicates that the register is not
+ used for calling a function. */
+static int apply_args_reg_offset[FIRST_PSEUDO_REGISTER];
+
+/* Return the offset of register REGNO into the block returned by
+ __builtin_apply_args. This is not declared static, since it is
+ needed in objc-act.c. */
+
+int
+apply_args_register_offset (regno)
+ int regno;
+{
+ apply_args_size ();
+
+ /* Arguments are always put in outgoing registers (in the argument
+ block) if such make sense. */
+#ifdef OUTGOING_REGNO
+ regno = OUTGOING_REGNO(regno);
+#endif
+ return apply_args_reg_offset[regno];
+}
+
+/* Return the size required for the block returned by __builtin_apply_args,
+ and initialize apply_args_mode. */
+
+static int
+apply_args_size ()
+{
+ static int size = -1;
+ int align, regno;
+ enum machine_mode mode;
+
+ /* The values computed by this function never change. */
+ if (size < 0)
+ {
+ /* The first value is the incoming arg-pointer. */
+ size = GET_MODE_SIZE (Pmode);
+
+ /* The second value is the structure value address unless this is
+ passed as an "invisible" first argument. */
+ if (struct_value_rtx)
+ size += GET_MODE_SIZE (Pmode);
+
+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+ if (FUNCTION_ARG_REGNO_P (regno))
+ {
+ /* Search for the proper mode for copying this register's
+ value. I'm not sure this is right, but it works so far. */
+ enum machine_mode best_mode = VOIDmode;
+
+ for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
+ mode != VOIDmode;
+ mode = GET_MODE_WIDER_MODE (mode))
+ if (HARD_REGNO_MODE_OK (regno, mode)
+ && HARD_REGNO_NREGS (regno, mode) == 1)
+ best_mode = mode;
+
+ if (best_mode == VOIDmode)
+ for (mode = GET_CLASS_NARROWEST_MODE (MODE_FLOAT);
+ mode != VOIDmode;
+ mode = GET_MODE_WIDER_MODE (mode))
+ if (HARD_REGNO_MODE_OK (regno, mode)
+ && (mov_optab->handlers[(int) mode].insn_code
+ != CODE_FOR_nothing))
+ best_mode = mode;
+
+ mode = best_mode;
+ if (mode == VOIDmode)
+ abort ();
+
+ align = GET_MODE_ALIGNMENT (mode) / BITS_PER_UNIT;
+ if (size % align != 0)
+ size = CEIL (size, align) * align;
+ apply_args_reg_offset[regno] = size;
+ size += GET_MODE_SIZE (mode);
+ apply_args_mode[regno] = mode;
+ }
+ else
+ {
+ apply_args_mode[regno] = VOIDmode;
+ apply_args_reg_offset[regno] = 0;
+ }
+ }
+ return size;
+}
+
+/* Return the size required for the block returned by __builtin_apply,
+ and initialize apply_result_mode. */
+
+static int
+apply_result_size ()
+{
+ static int size = -1;
+ int align, regno;
+ enum machine_mode mode;
+
+ /* The values computed by this function never change. */
+ if (size < 0)
+ {
+ size = 0;
+
+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+ if (FUNCTION_VALUE_REGNO_P (regno))
+ {
+ /* Search for the proper mode for copying this register's
+ value. I'm not sure this is right, but it works so far. */
+ enum machine_mode best_mode = VOIDmode;
+
+ for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
+ mode != TImode;
+ mode = GET_MODE_WIDER_MODE (mode))
+ if (HARD_REGNO_MODE_OK (regno, mode))
+ best_mode = mode;
+
+ if (best_mode == VOIDmode)
+ for (mode = GET_CLASS_NARROWEST_MODE (MODE_FLOAT);
+ mode != VOIDmode;
+ mode = GET_MODE_WIDER_MODE (mode))
+ if (HARD_REGNO_MODE_OK (regno, mode)
+ && (mov_optab->handlers[(int) mode].insn_code
+ != CODE_FOR_nothing))
+ best_mode = mode;
+
+ mode = best_mode;
+ if (mode == VOIDmode)
+ abort ();
+
+ align = GET_MODE_ALIGNMENT (mode) / BITS_PER_UNIT;
+ if (size % align != 0)
+ size = CEIL (size, align) * align;
+ size += GET_MODE_SIZE (mode);
+ apply_result_mode[regno] = mode;
+ }
+ else
+ apply_result_mode[regno] = VOIDmode;
+
+ /* Allow targets that use untyped_call and untyped_return to override
+ the size so that machine-specific information can be stored here. */
+#ifdef APPLY_RESULT_SIZE
+ size = APPLY_RESULT_SIZE;
+#endif
+ }
+ return size;
+}
+
+#if defined (HAVE_untyped_call) || defined (HAVE_untyped_return)
+/* Create a vector describing the result block RESULT. If SAVEP is true,
+ the result block is used to save the values; otherwise it is used to
+ restore the values. */
+
+static rtx
+result_vector (savep, result)
+ int savep;
+ rtx result;
+{
+ int regno, size, align, nelts;
+ enum machine_mode mode;
+ rtx reg, mem;
+ rtx *savevec = (rtx *) alloca (FIRST_PSEUDO_REGISTER * sizeof (rtx));
+
+ size = nelts = 0;
+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+ if ((mode = apply_result_mode[regno]) != VOIDmode)
+ {
+ align = GET_MODE_ALIGNMENT (mode) / BITS_PER_UNIT;
+ if (size % align != 0)
+ size = CEIL (size, align) * align;
+ reg = gen_rtx (REG, mode, savep ? INCOMING_REGNO (regno) : regno);
+ mem = change_address (result, mode,
+ plus_constant (XEXP (result, 0), size));
+ savevec[nelts++] = (savep
+ ? gen_rtx (SET, VOIDmode, mem, reg)
+ : gen_rtx (SET, VOIDmode, reg, mem));
+ size += GET_MODE_SIZE (mode);
+ }
+ return gen_rtx (PARALLEL, VOIDmode, gen_rtvec_v (nelts, savevec));
+}
+#endif /* HAVE_untyped_call or HAVE_untyped_return */
+
+/* Save the state required to perform an untyped call with the same
+ arguments as were passed to the current function. */
+
+static rtx
+expand_builtin_apply_args ()
+{
+ rtx registers;
+ int size, align, regno;
+ enum machine_mode mode;
+
+ /* Create a block where the arg-pointer, structure value address,
+ and argument registers can be saved. */
+ registers = assign_stack_local (BLKmode, apply_args_size (), -1);
+
+ /* Walk past the arg-pointer and structure value address. */
+ size = GET_MODE_SIZE (Pmode);
+ if (struct_value_rtx)
+ size += GET_MODE_SIZE (Pmode);
+
+ /* Save each register used in calling a function to the block. */
+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+ if ((mode = apply_args_mode[regno]) != VOIDmode)
+ {
+ align = GET_MODE_ALIGNMENT (mode) / BITS_PER_UNIT;
+ if (size % align != 0)
+ size = CEIL (size, align) * align;
+ emit_move_insn (change_address (registers, mode,
+ plus_constant (XEXP (registers, 0),
+ size)),
+ gen_rtx (REG, mode, INCOMING_REGNO (regno)));
+ size += GET_MODE_SIZE (mode);
+ }
+
+ /* Save the arg pointer to the block. */
+ emit_move_insn (change_address (registers, Pmode, XEXP (registers, 0)),
+ copy_to_reg (virtual_incoming_args_rtx));
+ size = GET_MODE_SIZE (Pmode);
+
+ /* Save the structure value address unless this is passed as an
+ "invisible" first argument. */
+ if (struct_value_incoming_rtx)
+ {
+ emit_move_insn (change_address (registers, Pmode,
+ plus_constant (XEXP (registers, 0),
+ size)),
+ copy_to_reg (struct_value_incoming_rtx));
+ size += GET_MODE_SIZE (Pmode);
+ }
+
+ /* Return the address of the block. */
+ return copy_addr_to_reg (XEXP (registers, 0));
+}
+
+/* Perform an untyped call and save the state required to perform an
+ untyped return of whatever value was returned by the given function. */
+
+static rtx
+expand_builtin_apply (function, arguments, argsize)
+ rtx function, arguments, argsize;
+{
+ int size, align, regno;
+ enum machine_mode mode;
+ rtx incoming_args, result, reg, dest, call_insn;
+ rtx old_stack_level = 0;
+ rtx call_fusage = 0;
+
+ /* Create a block where the return registers can be saved. */
+ result = assign_stack_local (BLKmode, apply_result_size (), -1);
+
+ /* ??? The argsize value should be adjusted here. */
+
+ /* Fetch the arg pointer from the ARGUMENTS block. */
+ incoming_args = gen_reg_rtx (Pmode);
+ emit_move_insn (incoming_args,
+ gen_rtx (MEM, Pmode, arguments));
+#ifndef STACK_GROWS_DOWNWARD
+ incoming_args = expand_binop (Pmode, sub_optab, incoming_args, argsize,
+ incoming_args, 0, OPTAB_LIB_WIDEN);
+#endif
+
+ /* Perform postincrements before actually calling the function. */
+ emit_queue ();
+
+ /* Push a new argument block and copy the arguments. */
+ do_pending_stack_adjust ();
+ emit_stack_save (SAVE_BLOCK, &old_stack_level, NULL_RTX);
+
+ /* Push a block of memory onto the stack to store the memory arguments.
+ Save the address in a register, and copy the memory arguments. ??? I
+ haven't figured out how the calling convention macros effect this,
+ but it's likely that the source and/or destination addresses in
+ the block copy will need updating in machine specific ways. */
+ dest = copy_addr_to_reg (push_block (argsize, 0, 0));
+ emit_block_move (gen_rtx (MEM, BLKmode, dest),
+ gen_rtx (MEM, BLKmode, incoming_args),
+ argsize,
+ PARM_BOUNDARY / BITS_PER_UNIT);
+
+ /* Refer to the argument block. */
+ apply_args_size ();
+ arguments = gen_rtx (MEM, BLKmode, arguments);
+
+ /* Walk past the arg-pointer and structure value address. */
+ size = GET_MODE_SIZE (Pmode);
+ if (struct_value_rtx)
+ size += GET_MODE_SIZE (Pmode);
+
+ /* Restore each of the registers previously saved. Make USE insns
+ for each of these registers for use in making the call. */
+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+ if ((mode = apply_args_mode[regno]) != VOIDmode)
+ {
+ align = GET_MODE_ALIGNMENT (mode) / BITS_PER_UNIT;
+ if (size % align != 0)
+ size = CEIL (size, align) * align;
+ reg = gen_rtx (REG, mode, regno);
+ emit_move_insn (reg,
+ change_address (arguments, mode,
+ plus_constant (XEXP (arguments, 0),
+ size)));
+
+ use_reg (&call_fusage, reg);
+ size += GET_MODE_SIZE (mode);
+ }
+
+ /* Restore the structure value address unless this is passed as an
+ "invisible" first argument. */
+ size = GET_MODE_SIZE (Pmode);
+ if (struct_value_rtx)
+ {
+ rtx value = gen_reg_rtx (Pmode);
+ emit_move_insn (value,
+ change_address (arguments, Pmode,
+ plus_constant (XEXP (arguments, 0),
+ size)));
+ emit_move_insn (struct_value_rtx, value);
+ if (GET_CODE (struct_value_rtx) == REG)
+ use_reg (&call_fusage, struct_value_rtx);
+ size += GET_MODE_SIZE (Pmode);
+ }
+
+ /* All arguments and registers used for the call are set up by now! */
+ function = prepare_call_address (function, NULL_TREE, &call_fusage, 0);
+
+ /* Ensure address is valid. SYMBOL_REF is already valid, so no need,
+ and we don't want to load it into a register as an optimization,
+ because prepare_call_address already did it if it should be done. */
+ if (GET_CODE (function) != SYMBOL_REF)
+ function = memory_address (FUNCTION_MODE, function);
+
+ /* Generate the actual call instruction and save the return value. */
+#ifdef HAVE_untyped_call
+ if (HAVE_untyped_call)
+ emit_call_insn (gen_untyped_call (gen_rtx (MEM, FUNCTION_MODE, function),
+ result, result_vector (1, result)));
+ else
+#endif
+#ifdef HAVE_call_value
+ if (HAVE_call_value)
+ {
+ rtx valreg = 0;
+
+ /* Locate the unique return register. It is not possible to
+ express a call that sets more than one return register using
+ call_value; use untyped_call for that. In fact, untyped_call
+ only needs to save the return registers in the given block. */
+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+ if ((mode = apply_result_mode[regno]) != VOIDmode)
+ {
+ if (valreg)
+ abort (); /* HAVE_untyped_call required. */
+ valreg = gen_rtx (REG, mode, regno);
+ }
+
+ emit_call_insn (gen_call_value (valreg,
+ gen_rtx (MEM, FUNCTION_MODE, function),
+ const0_rtx, NULL_RTX, const0_rtx));
+
+ emit_move_insn (change_address (result, GET_MODE (valreg),
+ XEXP (result, 0)),
+ valreg);
+ }
+ else
+#endif
+ abort ();
+
+ /* Find the CALL insn we just emitted. */
+ for (call_insn = get_last_insn ();
+ call_insn && GET_CODE (call_insn) != CALL_INSN;
+ call_insn = PREV_INSN (call_insn))
+ ;
+
+ if (! call_insn)
+ abort ();
+
+ /* Put the register usage information on the CALL. If there is already
+ some usage information, put ours at the end. */
+ if (CALL_INSN_FUNCTION_USAGE (call_insn))
+ {
+ rtx link;
+
+ for (link = CALL_INSN_FUNCTION_USAGE (call_insn); XEXP (link, 1) != 0;
+ link = XEXP (link, 1))
+ ;
+
+ XEXP (link, 1) = call_fusage;
+ }
+ else
+ CALL_INSN_FUNCTION_USAGE (call_insn) = call_fusage;
+
+ /* Restore the stack. */
+ emit_stack_restore (SAVE_BLOCK, old_stack_level, NULL_RTX);
+
+ /* Return the address of the result block. */
+ return copy_addr_to_reg (XEXP (result, 0));
+}
+
+/* Perform an untyped return. */
+
+static void
+expand_builtin_return (result)
+ rtx result;
+{
+ int size, align, regno;
+ enum machine_mode mode;
+ rtx reg;
+ rtx call_fusage = 0;
+
+ apply_result_size ();
+ result = gen_rtx (MEM, BLKmode, result);
+
+#ifdef HAVE_untyped_return
+ if (HAVE_untyped_return)
+ {
+ emit_jump_insn (gen_untyped_return (result, result_vector (0, result)));
+ emit_barrier ();
+ return;
+ }
+#endif
+
+ /* Restore the return value and note that each value is used. */
+ size = 0;
+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+ if ((mode = apply_result_mode[regno]) != VOIDmode)
+ {
+ align = GET_MODE_ALIGNMENT (mode) / BITS_PER_UNIT;
+ if (size % align != 0)
+ size = CEIL (size, align) * align;
+ reg = gen_rtx (REG, mode, INCOMING_REGNO (regno));
+ emit_move_insn (reg,
+ change_address (result, mode,
+ plus_constant (XEXP (result, 0),
+ size)));
+
+ push_to_sequence (call_fusage);
+ emit_insn (gen_rtx (USE, VOIDmode, reg));
+ call_fusage = get_insns ();
+ end_sequence ();
+ size += GET_MODE_SIZE (mode);
+ }
+
+ /* Put the USE insns before the return. */
+ emit_insns (call_fusage);
+
+ /* Return whatever values was restored by jumping directly to the end
+ of the function. */
+ expand_null_return ();
+}
+
+/* Expand code for a post- or pre- increment or decrement
+ and return the RTX for the result.
+ POST is 1 for postinc/decrements and 0 for preinc/decrements. */
+
+static rtx
+expand_increment (exp, post)
+ register tree exp;
+ int post;
+{
+ register rtx op0, op1;
+ register rtx temp, value;
+ register tree incremented = TREE_OPERAND (exp, 0);
+ optab this_optab = add_optab;
+ int icode;
+ enum machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ int op0_is_copy = 0;
+ int single_insn = 0;
+ /* 1 means we can't store into OP0 directly,
+ because it is a subreg narrower than a word,
+ and we don't dare clobber the rest of the word. */
+ int bad_subreg = 0;
+
+ if (output_bytecode)
+ {
+ bc_expand_expr (exp);
+ return NULL_RTX;
+ }
+
+ /* Stabilize any component ref that might need to be
+ evaluated more than once below. */
+ if (!post
+ || TREE_CODE (incremented) == BIT_FIELD_REF
+ || (TREE_CODE (incremented) == COMPONENT_REF
+ && (TREE_CODE (TREE_OPERAND (incremented, 0)) != INDIRECT_REF
+ || DECL_BIT_FIELD (TREE_OPERAND (incremented, 1)))))
+ incremented = stabilize_reference (incremented);
+ /* Nested *INCREMENT_EXPRs can happen in C++. We must force innermost
+ ones into save exprs so that they don't accidentally get evaluated
+ more than once by the code below. */
+ if (TREE_CODE (incremented) == PREINCREMENT_EXPR
+ || TREE_CODE (incremented) == PREDECREMENT_EXPR)
+ incremented = save_expr (incremented);
+
+ /* Compute the operands as RTX.
+ Note whether OP0 is the actual lvalue or a copy of it:
+ I believe it is a copy iff it is a register or subreg
+ and insns were generated in computing it. */
+
+ temp = get_last_insn ();
+ op0 = expand_expr (incremented, NULL_RTX, VOIDmode, 0);
+
+ /* If OP0 is a SUBREG made for a promoted variable, we cannot increment
+ in place but intead must do sign- or zero-extension during assignment,
+ so we copy it into a new register and let the code below use it as
+ a copy.
+
+ Note that we can safely modify this SUBREG since it is know not to be
+ shared (it was made by the expand_expr call above). */
+
+ if (GET_CODE (op0) == SUBREG && SUBREG_PROMOTED_VAR_P (op0))
+ {
+ if (post)
+ SUBREG_REG (op0) = copy_to_reg (SUBREG_REG (op0));
+ else
+ bad_subreg = 1;
+ }
+ else if (GET_CODE (op0) == SUBREG
+ && GET_MODE_BITSIZE (GET_MODE (op0)) < BITS_PER_WORD)
+ {
+ /* We cannot increment this SUBREG in place. If we are
+ post-incrementing, get a copy of the old value. Otherwise,
+ just mark that we cannot increment in place. */
+ if (post)
+ op0 = copy_to_reg (op0);
+ else
+ bad_subreg = 1;
+ }
+
+ op0_is_copy = ((GET_CODE (op0) == SUBREG || GET_CODE (op0) == REG)
+ && temp != get_last_insn ());
+ op1 = expand_expr (TREE_OPERAND (exp, 1), NULL_RTX, VOIDmode, 0);
+
+ /* Decide whether incrementing or decrementing. */
+ if (TREE_CODE (exp) == POSTDECREMENT_EXPR
+ || TREE_CODE (exp) == PREDECREMENT_EXPR)
+ this_optab = sub_optab;
+
+ /* Convert decrement by a constant into a negative increment. */
+ if (this_optab == sub_optab
+ && GET_CODE (op1) == CONST_INT)
+ {
+ op1 = GEN_INT (- INTVAL (op1));
+ this_optab = add_optab;
+ }
+
+ /* For a preincrement, see if we can do this with a single instruction. */
+ if (!post)
+ {
+ icode = (int) this_optab->handlers[(int) mode].insn_code;
+ if (icode != (int) CODE_FOR_nothing
+ /* Make sure that OP0 is valid for operands 0 and 1
+ of the insn we want to queue. */
+ && (*insn_operand_predicate[icode][0]) (op0, mode)
+ && (*insn_operand_predicate[icode][1]) (op0, mode)
+ && (*insn_operand_predicate[icode][2]) (op1, mode))
+ single_insn = 1;
+ }
+
+ /* If OP0 is not the actual lvalue, but rather a copy in a register,
+ then we cannot just increment OP0. We must therefore contrive to
+ increment the original value. Then, for postincrement, we can return
+ OP0 since it is a copy of the old value. For preincrement, expand here
+ unless we can do it with a single insn.
+
+ Likewise if storing directly into OP0 would clobber high bits
+ we need to preserve (bad_subreg). */
+ if (op0_is_copy || (!post && !single_insn) || bad_subreg)
+ {
+ /* This is the easiest way to increment the value wherever it is.
+ Problems with multiple evaluation of INCREMENTED are prevented
+ because either (1) it is a component_ref or preincrement,
+ in which case it was stabilized above, or (2) it is an array_ref
+ with constant index in an array in a register, which is
+ safe to reevaluate. */
+ tree newexp = build (((TREE_CODE (exp) == POSTDECREMENT_EXPR
+ || TREE_CODE (exp) == PREDECREMENT_EXPR)
+ ? MINUS_EXPR : PLUS_EXPR),
+ TREE_TYPE (exp),
+ incremented,
+ TREE_OPERAND (exp, 1));
+ temp = expand_assignment (incremented, newexp, ! post, 0);
+ return post ? op0 : temp;
+ }
+
+ if (post)
+ {
+ /* We have a true reference to the value in OP0.
+ If there is an insn to add or subtract in this mode, queue it.
+ Queueing the increment insn avoids the register shuffling
+ that often results if we must increment now and first save
+ the old value for subsequent use. */
+
+#if 0 /* Turned off to avoid making extra insn for indexed memref. */
+ op0 = stabilize (op0);
+#endif
+
+ icode = (int) this_optab->handlers[(int) mode].insn_code;
+ if (icode != (int) CODE_FOR_nothing
+ /* Make sure that OP0 is valid for operands 0 and 1
+ of the insn we want to queue. */
+ && (*insn_operand_predicate[icode][0]) (op0, mode)
+ && (*insn_operand_predicate[icode][1]) (op0, mode))
+ {
+ if (! (*insn_operand_predicate[icode][2]) (op1, mode))
+ op1 = force_reg (mode, op1);
+
+ return enqueue_insn (op0, GEN_FCN (icode) (op0, op0, op1));
+ }
+ }
+
+ /* Preincrement, or we can't increment with one simple insn. */
+ if (post)
+ /* Save a copy of the value before inc or dec, to return it later. */
+ temp = value = copy_to_reg (op0);
+ else
+ /* Arrange to return the incremented value. */
+ /* Copy the rtx because expand_binop will protect from the queue,
+ and the results of that would be invalid for us to return
+ if our caller does emit_queue before using our result. */
+ temp = copy_rtx (value = op0);
+
+ /* Increment however we can. */
+ op1 = expand_binop (mode, this_optab, value, op1, op0,
+ TREE_UNSIGNED (TREE_TYPE (exp)), OPTAB_LIB_WIDEN);
+ /* Make sure the value is stored into OP0. */
+ if (op1 != op0)
+ emit_move_insn (op0, op1);
+
+ return temp;
+}
+
+/* Expand all function calls contained within EXP, innermost ones first.
+ But don't look within expressions that have sequence points.
+ For each CALL_EXPR, record the rtx for its value
+ in the CALL_EXPR_RTL field. */
+
+static void
+preexpand_calls (exp)
+ tree exp;
+{
+ register int nops, i;
+ int type = TREE_CODE_CLASS (TREE_CODE (exp));
+
+ if (! do_preexpand_calls)
+ return;
+
+ /* Only expressions and references can contain calls. */
+
+ if (type != 'e' && type != '<' && type != '1' && type != '2' && type != 'r')
+ return;
+
+ switch (TREE_CODE (exp))
+ {
+ case CALL_EXPR:
+ /* Do nothing if already expanded. */
+ if (CALL_EXPR_RTL (exp) != 0)
+ return;
+
+ /* Do nothing to built-in functions. */
+ if (TREE_CODE (TREE_OPERAND (exp, 0)) != ADDR_EXPR
+ || TREE_CODE (TREE_OPERAND (TREE_OPERAND (exp, 0), 0)) != FUNCTION_DECL
+ || ! DECL_BUILT_IN (TREE_OPERAND (TREE_OPERAND (exp, 0), 0)))
+ CALL_EXPR_RTL (exp) = expand_call (exp, NULL_RTX, 0);
+ return;
+
+ case COMPOUND_EXPR:
+ case COND_EXPR:
+ case TRUTH_ANDIF_EXPR:
+ case TRUTH_ORIF_EXPR:
+ /* If we find one of these, then we can be sure
+ the adjust will be done for it (since it makes jumps).
+ Do it now, so that if this is inside an argument
+ of a function, we don't get the stack adjustment
+ after some other args have already been pushed. */
+ do_pending_stack_adjust ();
+ return;
+
+ case BLOCK:
+ case RTL_EXPR:
+ case WITH_CLEANUP_EXPR:
+ return;
+
+ case SAVE_EXPR:
+ if (SAVE_EXPR_RTL (exp) != 0)
+ return;
+ }
+
+ nops = tree_code_length[(int) TREE_CODE (exp)];
+ for (i = 0; i < nops; i++)
+ if (TREE_OPERAND (exp, i) != 0)
+ {
+ type = TREE_CODE_CLASS (TREE_CODE (TREE_OPERAND (exp, i)));
+ if (type == 'e' || type == '<' || type == '1' || type == '2'
+ || type == 'r')
+ preexpand_calls (TREE_OPERAND (exp, i));
+ }
+}
+
+/* At the start of a function, record that we have no previously-pushed
+ arguments waiting to be popped. */
+
+void
+init_pending_stack_adjust ()
+{
+ pending_stack_adjust = 0;
+}
+
+/* When exiting from function, if safe, clear out any pending stack adjust
+ so the adjustment won't get done. */
+
+void
+clear_pending_stack_adjust ()
+{
+#ifdef EXIT_IGNORE_STACK
+ if (! flag_omit_frame_pointer && EXIT_IGNORE_STACK
+ && ! (DECL_INLINE (current_function_decl) && ! flag_no_inline)
+ && ! flag_inline_functions)
+ pending_stack_adjust = 0;
+#endif
+}
+
+/* Pop any previously-pushed arguments that have not been popped yet. */
+
+void
+do_pending_stack_adjust ()
+{
+ if (inhibit_defer_pop == 0)
+ {
+ if (pending_stack_adjust != 0)
+ adjust_stack (GEN_INT (pending_stack_adjust));
+ pending_stack_adjust = 0;
+ }
+}
+
+/* Defer the expansion all cleanups up to OLD_CLEANUPS.
+ Returns the cleanups to be performed. */
+
+static tree
+defer_cleanups_to (old_cleanups)
+ tree old_cleanups;
+{
+ tree new_cleanups = NULL_TREE;
+ tree cleanups = cleanups_this_call;
+ tree last = NULL_TREE;
+
+ while (cleanups_this_call != old_cleanups)
+ {
+ (*interim_eh_hook) (TREE_VALUE (cleanups_this_call));
+ cleanups_this_call = TREE_CHAIN (cleanups_this_call);
+ }
+
+ if (last)
+ {
+ /* Remove the list from the chain of cleanups. */
+ TREE_CHAIN (last) = NULL_TREE;
+
+ /* reverse them so that we can build them in the right order. */
+ cleanups = nreverse (cleanups);
+
+ while (cleanups)
+ {
+ if (new_cleanups)
+ new_cleanups = build (COMPOUND_EXPR, TREE_TYPE (new_cleanups),
+ TREE_VALUE (cleanups), new_cleanups);
+ else
+ new_cleanups = TREE_VALUE (cleanups);
+
+ cleanups = TREE_CHAIN (cleanups);
+ }
+ }
+
+ return new_cleanups;
+}
+
+/* Expand all cleanups up to OLD_CLEANUPS.
+ Needed here, and also for language-dependent calls. */
+
+void
+expand_cleanups_to (old_cleanups)
+ tree old_cleanups;
+{
+ while (cleanups_this_call != old_cleanups)
+ {
+ (*interim_eh_hook) (TREE_VALUE (cleanups_this_call));
+ expand_expr (TREE_VALUE (cleanups_this_call), const0_rtx, VOIDmode, 0);
+ cleanups_this_call = TREE_CHAIN (cleanups_this_call);
+ }
+}
+
+/* Expand conditional expressions. */
+
+/* Generate code to evaluate EXP and jump to LABEL if the value is zero.
+ LABEL is an rtx of code CODE_LABEL, in this function and all the
+ functions here. */
+
+void
+jumpifnot (exp, label)
+ tree exp;
+ rtx label;
+{
+ do_jump (exp, label, NULL_RTX);
+}
+
+/* Generate code to evaluate EXP and jump to LABEL if the value is nonzero. */
+
+void
+jumpif (exp, label)
+ tree exp;
+ rtx label;
+{
+ do_jump (exp, NULL_RTX, label);
+}
+
+/* Generate code to evaluate EXP and jump to IF_FALSE_LABEL if
+ the result is zero, or IF_TRUE_LABEL if the result is one.
+ Either of IF_FALSE_LABEL and IF_TRUE_LABEL may be zero,
+ meaning fall through in that case.
+
+ do_jump always does any pending stack adjust except when it does not
+ actually perform a jump. An example where there is no jump
+ is when EXP is `(foo (), 0)' and IF_FALSE_LABEL is null.
+
+ This function is responsible for optimizing cases such as
+ &&, || and comparison operators in EXP. */
+
+void
+do_jump (exp, if_false_label, if_true_label)
+ tree exp;
+ rtx if_false_label, if_true_label;
+{
+ register enum tree_code code = TREE_CODE (exp);
+ /* Some cases need to create a label to jump to
+ in order to properly fall through.
+ These cases set DROP_THROUGH_LABEL nonzero. */
+ rtx drop_through_label = 0;
+ rtx temp;
+ rtx comparison = 0;
+ int i;
+ tree type;
+ enum machine_mode mode;
+
+ emit_queue ();
+
+ switch (code)
+ {
+ case ERROR_MARK:
+ break;
+
+ case INTEGER_CST:
+ temp = integer_zerop (exp) ? if_false_label : if_true_label;
+ if (temp)
+ emit_jump (temp);
+ break;
+
+#if 0
+ /* This is not true with #pragma weak */
+ case ADDR_EXPR:
+ /* The address of something can never be zero. */
+ if (if_true_label)
+ emit_jump (if_true_label);
+ break;
+#endif
+
+ case NOP_EXPR:
+ if (TREE_CODE (TREE_OPERAND (exp, 0)) == COMPONENT_REF
+ || TREE_CODE (TREE_OPERAND (exp, 0)) == BIT_FIELD_REF
+ || TREE_CODE (TREE_OPERAND (exp, 0)) == ARRAY_REF)
+ goto normal;
+ case CONVERT_EXPR:
+ /* If we are narrowing the operand, we have to do the compare in the
+ narrower mode. */
+ if ((TYPE_PRECISION (TREE_TYPE (exp))
+ < TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (exp, 0)))))
+ goto normal;
+ case NON_LVALUE_EXPR:
+ case REFERENCE_EXPR:
+ case ABS_EXPR:
+ case NEGATE_EXPR:
+ case LROTATE_EXPR:
+ case RROTATE_EXPR:
+ /* These cannot change zero->non-zero or vice versa. */
+ do_jump (TREE_OPERAND (exp, 0), if_false_label, if_true_label);
+ break;
+
+#if 0
+ /* This is never less insns than evaluating the PLUS_EXPR followed by
+ a test and can be longer if the test is eliminated. */
+ case PLUS_EXPR:
+ /* Reduce to minus. */
+ exp = build (MINUS_EXPR, TREE_TYPE (exp),
+ TREE_OPERAND (exp, 0),
+ fold (build1 (NEGATE_EXPR, TREE_TYPE (TREE_OPERAND (exp, 1)),
+ TREE_OPERAND (exp, 1))));
+ /* Process as MINUS. */
+#endif
+
+ case MINUS_EXPR:
+ /* Non-zero iff operands of minus differ. */
+ comparison = compare (build (NE_EXPR, TREE_TYPE (exp),
+ TREE_OPERAND (exp, 0),
+ TREE_OPERAND (exp, 1)),
+ NE, NE);
+ break;
+
+ case BIT_AND_EXPR:
+ /* If we are AND'ing with a small constant, do this comparison in the
+ smallest type that fits. If the machine doesn't have comparisons
+ that small, it will be converted back to the wider comparison.
+ This helps if we are testing the sign bit of a narrower object.
+ combine can't do this for us because it can't know whether a
+ ZERO_EXTRACT or a compare in a smaller mode exists, but we do. */
+
+ if (! SLOW_BYTE_ACCESS
+ && TREE_CODE (TREE_OPERAND (exp, 1)) == INTEGER_CST
+ && TYPE_PRECISION (TREE_TYPE (exp)) <= HOST_BITS_PER_WIDE_INT
+ && (i = floor_log2 (TREE_INT_CST_LOW (TREE_OPERAND (exp, 1)))) >= 0
+ && (mode = mode_for_size (i + 1, MODE_INT, 0)) != BLKmode
+ && (type = type_for_mode (mode, 1)) != 0
+ && TYPE_PRECISION (type) < TYPE_PRECISION (TREE_TYPE (exp))
+ && (cmp_optab->handlers[(int) TYPE_MODE (type)].insn_code
+ != CODE_FOR_nothing))
+ {
+ do_jump (convert (type, exp), if_false_label, if_true_label);
+ break;
+ }
+ goto normal;
+
+ case TRUTH_NOT_EXPR:
+ do_jump (TREE_OPERAND (exp, 0), if_true_label, if_false_label);
+ break;
+
+ case TRUTH_ANDIF_EXPR:
+ if (if_false_label == 0)
+ if_false_label = drop_through_label = gen_label_rtx ();
+ do_jump (TREE_OPERAND (exp, 0), if_false_label, NULL_RTX);
+ do_jump (TREE_OPERAND (exp, 1), if_false_label, if_true_label);
+ break;
+
+ case TRUTH_ORIF_EXPR:
+ if (if_true_label == 0)
+ if_true_label = drop_through_label = gen_label_rtx ();
+ do_jump (TREE_OPERAND (exp, 0), NULL_RTX, if_true_label);
+ do_jump (TREE_OPERAND (exp, 1), if_false_label, if_true_label);
+ break;
+
+ case COMPOUND_EXPR:
+ push_temp_slots ();
+ expand_expr (TREE_OPERAND (exp, 0), const0_rtx, VOIDmode, 0);
+ free_temp_slots ();
+ pop_temp_slots ();
+ emit_queue ();
+ do_pending_stack_adjust ();
+ do_jump (TREE_OPERAND (exp, 1), if_false_label, if_true_label);
+ break;
+
+ case COMPONENT_REF:
+ case BIT_FIELD_REF:
+ case ARRAY_REF:
+ {
+ int bitsize, bitpos, unsignedp;
+ enum machine_mode mode;
+ tree type;
+ tree offset;
+ int volatilep = 0;
+
+ /* Get description of this reference. We don't actually care
+ about the underlying object here. */
+ get_inner_reference (exp, &bitsize, &bitpos, &offset,
+ &mode, &unsignedp, &volatilep);
+
+ type = type_for_size (bitsize, unsignedp);
+ if (! SLOW_BYTE_ACCESS
+ && type != 0 && bitsize >= 0
+ && TYPE_PRECISION (type) < TYPE_PRECISION (TREE_TYPE (exp))
+ && (cmp_optab->handlers[(int) TYPE_MODE (type)].insn_code
+ != CODE_FOR_nothing))
+ {
+ do_jump (convert (type, exp), if_false_label, if_true_label);
+ break;
+ }
+ goto normal;
+ }
+
+ case COND_EXPR:
+ /* Do (a ? 1 : 0) and (a ? 0 : 1) as special cases. */
+ if (integer_onep (TREE_OPERAND (exp, 1))
+ && integer_zerop (TREE_OPERAND (exp, 2)))
+ do_jump (TREE_OPERAND (exp, 0), if_false_label, if_true_label);
+
+ else if (integer_zerop (TREE_OPERAND (exp, 1))
+ && integer_onep (TREE_OPERAND (exp, 2)))
+ do_jump (TREE_OPERAND (exp, 0), if_true_label, if_false_label);
+
+ else
+ {
+ register rtx label1 = gen_label_rtx ();
+ drop_through_label = gen_label_rtx ();
+ do_jump (TREE_OPERAND (exp, 0), label1, NULL_RTX);
+ /* Now the THEN-expression. */
+ do_jump (TREE_OPERAND (exp, 1),
+ if_false_label ? if_false_label : drop_through_label,
+ if_true_label ? if_true_label : drop_through_label);
+ /* In case the do_jump just above never jumps. */
+ do_pending_stack_adjust ();
+ emit_label (label1);
+ /* Now the ELSE-expression. */
+ do_jump (TREE_OPERAND (exp, 2),
+ if_false_label ? if_false_label : drop_through_label,
+ if_true_label ? if_true_label : drop_through_label);
+ }
+ break;
+
+ case EQ_EXPR:
+ if (integer_zerop (TREE_OPERAND (exp, 1)))
+ do_jump (TREE_OPERAND (exp, 0), if_true_label, if_false_label);
+ else if (((GET_MODE_CLASS (TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0))))
+ == MODE_INT)
+ &&
+ !can_compare_p (TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)))))
+ || GET_MODE_CLASS (TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)))) == MODE_COMPLEX_FLOAT
+ || GET_MODE_CLASS (TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)))) == MODE_COMPLEX_INT)
+ do_jump_by_parts_equality (exp, if_false_label, if_true_label);
+ else
+ comparison = compare (exp, EQ, EQ);
+ break;
+
+ case NE_EXPR:
+ if (integer_zerop (TREE_OPERAND (exp, 1)))
+ do_jump (TREE_OPERAND (exp, 0), if_false_label, if_true_label);
+ else if (((GET_MODE_CLASS (TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0))))
+ == MODE_INT)
+ &&
+ !can_compare_p (TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)))))
+ || GET_MODE_CLASS (TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)))) == MODE_COMPLEX_FLOAT
+ || GET_MODE_CLASS (TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)))) == MODE_COMPLEX_INT)
+ do_jump_by_parts_equality (exp, if_true_label, if_false_label);
+ else
+ comparison = compare (exp, NE, NE);
+ break;
+
+ case LT_EXPR:
+ if ((GET_MODE_CLASS (TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0))))
+ == MODE_INT)
+ && !can_compare_p (TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)))))
+ do_jump_by_parts_greater (exp, 1, if_false_label, if_true_label);
+ else
+ comparison = compare (exp, LT, LTU);
+ break;
+
+ case LE_EXPR:
+ if ((GET_MODE_CLASS (TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0))))
+ == MODE_INT)
+ && !can_compare_p (TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)))))
+ do_jump_by_parts_greater (exp, 0, if_true_label, if_false_label);
+ else
+ comparison = compare (exp, LE, LEU);
+ break;
+
+ case GT_EXPR:
+ if ((GET_MODE_CLASS (TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0))))
+ == MODE_INT)
+ && !can_compare_p (TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)))))
+ do_jump_by_parts_greater (exp, 0, if_false_label, if_true_label);
+ else
+ comparison = compare (exp, GT, GTU);
+ break;
+
+ case GE_EXPR:
+ if ((GET_MODE_CLASS (TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0))))
+ == MODE_INT)
+ && !can_compare_p (TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)))))
+ do_jump_by_parts_greater (exp, 1, if_true_label, if_false_label);
+ else
+ comparison = compare (exp, GE, GEU);
+ break;
+
+ default:
+ normal:
+ temp = expand_expr (exp, NULL_RTX, VOIDmode, 0);
+#if 0
+ /* This is not needed any more and causes poor code since it causes
+ comparisons and tests from non-SI objects to have different code
+ sequences. */
+ /* Copy to register to avoid generating bad insns by cse
+ from (set (mem ...) (arithop)) (set (cc0) (mem ...)). */
+ if (!cse_not_expected && GET_CODE (temp) == MEM)
+ temp = copy_to_reg (temp);
+#endif
+ do_pending_stack_adjust ();
+ if (GET_CODE (temp) == CONST_INT)
+ comparison = (temp == const0_rtx ? const0_rtx : const_true_rtx);
+ else if (GET_CODE (temp) == LABEL_REF)
+ comparison = const_true_rtx;
+ else if (GET_MODE_CLASS (GET_MODE (temp)) == MODE_INT
+ && !can_compare_p (GET_MODE (temp)))
+ /* Note swapping the labels gives us not-equal. */
+ do_jump_by_parts_equality_rtx (temp, if_true_label, if_false_label);
+ else if (GET_MODE (temp) != VOIDmode)
+ comparison = compare_from_rtx (temp, CONST0_RTX (GET_MODE (temp)),
+ NE, TREE_UNSIGNED (TREE_TYPE (exp)),
+ GET_MODE (temp), NULL_RTX, 0);
+ else
+ abort ();
+ }
+
+ /* Do any postincrements in the expression that was tested. */
+ emit_queue ();
+
+ /* If COMPARISON is nonzero here, it is an rtx that can be substituted
+ straight into a conditional jump instruction as the jump condition.
+ Otherwise, all the work has been done already. */
+
+ if (comparison == const_true_rtx)
+ {
+ if (if_true_label)
+ emit_jump (if_true_label);
+ }
+ else if (comparison == const0_rtx)
+ {
+ if (if_false_label)
+ emit_jump (if_false_label);
+ }
+ else if (comparison)
+ do_jump_for_compare (comparison, if_false_label, if_true_label);
+
+ if (drop_through_label)
+ {
+ /* If do_jump produces code that might be jumped around,
+ do any stack adjusts from that code, before the place
+ where control merges in. */
+ do_pending_stack_adjust ();
+ emit_label (drop_through_label);
+ }
+}
+
+/* Given a comparison expression EXP for values too wide to be compared
+ with one insn, test the comparison and jump to the appropriate label.
+ The code of EXP is ignored; we always test GT if SWAP is 0,
+ and LT if SWAP is 1. */
+
+static void
+do_jump_by_parts_greater (exp, swap, if_false_label, if_true_label)
+ tree exp;
+ int swap;
+ rtx if_false_label, if_true_label;
+{
+ rtx op0 = expand_expr (TREE_OPERAND (exp, swap), NULL_RTX, VOIDmode, 0);
+ rtx op1 = expand_expr (TREE_OPERAND (exp, !swap), NULL_RTX, VOIDmode, 0);
+ enum machine_mode mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)));
+ int nwords = (GET_MODE_SIZE (mode) / UNITS_PER_WORD);
+ rtx drop_through_label = 0;
+ int unsignedp = TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (exp, 0)));
+ int i;
+
+ if (! if_true_label || ! if_false_label)
+ drop_through_label = gen_label_rtx ();
+ if (! if_true_label)
+ if_true_label = drop_through_label;
+ if (! if_false_label)
+ if_false_label = drop_through_label;
+
+ /* Compare a word at a time, high order first. */
+ for (i = 0; i < nwords; i++)
+ {
+ rtx comp;
+ rtx op0_word, op1_word;
+
+ if (WORDS_BIG_ENDIAN)
+ {
+ op0_word = operand_subword_force (op0, i, mode);
+ op1_word = operand_subword_force (op1, i, mode);
+ }
+ else
+ {
+ op0_word = operand_subword_force (op0, nwords - 1 - i, mode);
+ op1_word = operand_subword_force (op1, nwords - 1 - i, mode);
+ }
+
+ /* All but high-order word must be compared as unsigned. */
+ comp = compare_from_rtx (op0_word, op1_word,
+ (unsignedp || i > 0) ? GTU : GT,
+ unsignedp, word_mode, NULL_RTX, 0);
+ if (comp == const_true_rtx)
+ emit_jump (if_true_label);
+ else if (comp != const0_rtx)
+ do_jump_for_compare (comp, NULL_RTX, if_true_label);
+
+ /* Consider lower words only if these are equal. */
+ comp = compare_from_rtx (op0_word, op1_word, NE, unsignedp, word_mode,
+ NULL_RTX, 0);
+ if (comp == const_true_rtx)
+ emit_jump (if_false_label);
+ else if (comp != const0_rtx)
+ do_jump_for_compare (comp, NULL_RTX, if_false_label);
+ }
+
+ if (if_false_label)
+ emit_jump (if_false_label);
+ if (drop_through_label)
+ emit_label (drop_through_label);
+}
+
+/* Compare OP0 with OP1, word at a time, in mode MODE.
+ UNSIGNEDP says to do unsigned comparison.
+ Jump to IF_TRUE_LABEL if OP0 is greater, IF_FALSE_LABEL otherwise. */
+
+static void
+do_jump_by_parts_greater_rtx (mode, unsignedp, op0, op1, if_false_label, if_true_label)
+ enum machine_mode mode;
+ int unsignedp;
+ rtx op0, op1;
+ rtx if_false_label, if_true_label;
+{
+ int nwords = (GET_MODE_SIZE (mode) / UNITS_PER_WORD);
+ rtx drop_through_label = 0;
+ int i;
+
+ if (! if_true_label || ! if_false_label)
+ drop_through_label = gen_label_rtx ();
+ if (! if_true_label)
+ if_true_label = drop_through_label;
+ if (! if_false_label)
+ if_false_label = drop_through_label;
+
+ /* Compare a word at a time, high order first. */
+ for (i = 0; i < nwords; i++)
+ {
+ rtx comp;
+ rtx op0_word, op1_word;
+
+ if (WORDS_BIG_ENDIAN)
+ {
+ op0_word = operand_subword_force (op0, i, mode);
+ op1_word = operand_subword_force (op1, i, mode);
+ }
+ else
+ {
+ op0_word = operand_subword_force (op0, nwords - 1 - i, mode);
+ op1_word = operand_subword_force (op1, nwords - 1 - i, mode);
+ }
+
+ /* All but high-order word must be compared as unsigned. */
+ comp = compare_from_rtx (op0_word, op1_word,
+ (unsignedp || i > 0) ? GTU : GT,
+ unsignedp, word_mode, NULL_RTX, 0);
+ if (comp == const_true_rtx)
+ emit_jump (if_true_label);
+ else if (comp != const0_rtx)
+ do_jump_for_compare (comp, NULL_RTX, if_true_label);
+
+ /* Consider lower words only if these are equal. */
+ comp = compare_from_rtx (op0_word, op1_word, NE, unsignedp, word_mode,
+ NULL_RTX, 0);
+ if (comp == const_true_rtx)
+ emit_jump (if_false_label);
+ else if (comp != const0_rtx)
+ do_jump_for_compare (comp, NULL_RTX, if_false_label);
+ }
+
+ if (if_false_label)
+ emit_jump (if_false_label);
+ if (drop_through_label)
+ emit_label (drop_through_label);
+}
+
+/* Given an EQ_EXPR expression EXP for values too wide to be compared
+ with one insn, test the comparison and jump to the appropriate label. */
+
+static void
+do_jump_by_parts_equality (exp, if_false_label, if_true_label)
+ tree exp;
+ rtx if_false_label, if_true_label;
+{
+ rtx op0 = expand_expr (TREE_OPERAND (exp, 0), NULL_RTX, VOIDmode, 0);
+ rtx op1 = expand_expr (TREE_OPERAND (exp, 1), NULL_RTX, VOIDmode, 0);
+ enum machine_mode mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)));
+ int nwords = (GET_MODE_SIZE (mode) / UNITS_PER_WORD);
+ int i;
+ rtx drop_through_label = 0;
+
+ if (! if_false_label)
+ drop_through_label = if_false_label = gen_label_rtx ();
+
+ for (i = 0; i < nwords; i++)
+ {
+ rtx comp = compare_from_rtx (operand_subword_force (op0, i, mode),
+ operand_subword_force (op1, i, mode),
+ EQ, TREE_UNSIGNED (TREE_TYPE (exp)),
+ word_mode, NULL_RTX, 0);
+ if (comp == const_true_rtx)
+ emit_jump (if_false_label);
+ else if (comp != const0_rtx)
+ do_jump_for_compare (comp, if_false_label, NULL_RTX);
+ }
+
+ if (if_true_label)
+ emit_jump (if_true_label);
+ if (drop_through_label)
+ emit_label (drop_through_label);
+}
+
+/* Jump according to whether OP0 is 0.
+ We assume that OP0 has an integer mode that is too wide
+ for the available compare insns. */
+
+static void
+do_jump_by_parts_equality_rtx (op0, if_false_label, if_true_label)
+ rtx op0;
+ rtx if_false_label, if_true_label;
+{
+ int nwords = GET_MODE_SIZE (GET_MODE (op0)) / UNITS_PER_WORD;
+ int i;
+ rtx drop_through_label = 0;
+
+ if (! if_false_label)
+ drop_through_label = if_false_label = gen_label_rtx ();
+
+ for (i = 0; i < nwords; i++)
+ {
+ rtx comp = compare_from_rtx (operand_subword_force (op0, i,
+ GET_MODE (op0)),
+ const0_rtx, EQ, 1, word_mode, NULL_RTX, 0);
+ if (comp == const_true_rtx)
+ emit_jump (if_false_label);
+ else if (comp != const0_rtx)
+ do_jump_for_compare (comp, if_false_label, NULL_RTX);
+ }
+
+ if (if_true_label)
+ emit_jump (if_true_label);
+ if (drop_through_label)
+ emit_label (drop_through_label);
+}
+
+/* Given a comparison expression in rtl form, output conditional branches to
+ IF_TRUE_LABEL, IF_FALSE_LABEL, or both. */
+
+static void
+do_jump_for_compare (comparison, if_false_label, if_true_label)
+ rtx comparison, if_false_label, if_true_label;
+{
+ if (if_true_label)
+ {
+ if (bcc_gen_fctn[(int) GET_CODE (comparison)] != 0)
+ emit_jump_insn ((*bcc_gen_fctn[(int) GET_CODE (comparison)]) (if_true_label));
+ else
+ abort ();
+
+ if (if_false_label)
+ emit_jump (if_false_label);
+ }
+ else if (if_false_label)
+ {
+ rtx insn;
+ rtx prev = get_last_insn ();
+ rtx branch = 0;
+
+ if (prev != 0)
+ prev = PREV_INSN (prev);
+
+ /* Output the branch with the opposite condition. Then try to invert
+ what is generated. If more than one insn is a branch, or if the
+ branch is not the last insn written, abort. If we can't invert
+ the branch, emit make a true label, redirect this jump to that,
+ emit a jump to the false label and define the true label. */
+
+ if (bcc_gen_fctn[(int) GET_CODE (comparison)] != 0)
+ emit_jump_insn ((*bcc_gen_fctn[(int) GET_CODE (comparison)]) (if_false_label));
+ else
+ abort ();
+
+ /* Here we get the insn before what was just emitted.
+ On some machines, emitting the branch can discard
+ the previous compare insn and emit a replacement. */
+ if (prev == 0)
+ /* If there's only one preceding insn... */
+ insn = get_insns ();
+ else
+ insn = NEXT_INSN (prev);
+
+ for (insn = NEXT_INSN (insn); insn; insn = NEXT_INSN (insn))
+ if (GET_CODE (insn) == JUMP_INSN)
+ {
+ if (branch)
+ abort ();
+ branch = insn;
+ }
+
+ if (branch != get_last_insn ())
+ abort ();
+
+ JUMP_LABEL (branch) = if_false_label;
+ if (! invert_jump (branch, if_false_label))
+ {
+ if_true_label = gen_label_rtx ();
+ redirect_jump (branch, if_true_label);
+ emit_jump (if_false_label);
+ emit_label (if_true_label);
+ }
+ }
+}
+
+/* Generate code for a comparison expression EXP
+ (including code to compute the values to be compared)
+ and set (CC0) according to the result.
+ SIGNED_CODE should be the rtx operation for this comparison for
+ signed data; UNSIGNED_CODE, likewise for use if data is unsigned.
+
+ We force a stack adjustment unless there are currently
+ things pushed on the stack that aren't yet used. */
+
+static rtx
+compare (exp, signed_code, unsigned_code)
+ register tree exp;
+ enum rtx_code signed_code, unsigned_code;
+{
+ register rtx op0
+ = expand_expr (TREE_OPERAND (exp, 0), NULL_RTX, VOIDmode, 0);
+ register rtx op1
+ = expand_expr (TREE_OPERAND (exp, 1), NULL_RTX, VOIDmode, 0);
+ register tree type = TREE_TYPE (TREE_OPERAND (exp, 0));
+ register enum machine_mode mode = TYPE_MODE (type);
+ int unsignedp = TREE_UNSIGNED (type);
+ enum rtx_code code = unsignedp ? unsigned_code : signed_code;
+
+ return compare_from_rtx (op0, op1, code, unsignedp, mode,
+ ((mode == BLKmode)
+ ? expr_size (TREE_OPERAND (exp, 0)) : NULL_RTX),
+ TYPE_ALIGN (TREE_TYPE (exp)) / BITS_PER_UNIT);
+}
+
+/* Like compare but expects the values to compare as two rtx's.
+ The decision as to signed or unsigned comparison must be made by the caller.
+
+ If MODE is BLKmode, SIZE is an RTX giving the size of the objects being
+ compared.
+
+ If ALIGN is non-zero, it is the alignment of this type; if zero, the
+ size of MODE should be used. */
+
+rtx
+compare_from_rtx (op0, op1, code, unsignedp, mode, size, align)
+ register rtx op0, op1;
+ enum rtx_code code;
+ int unsignedp;
+ enum machine_mode mode;
+ rtx size;
+ int align;
+{
+ rtx tem;
+
+ /* If one operand is constant, make it the second one. Only do this
+ if the other operand is not constant as well. */
+
+ if ((CONSTANT_P (op0) && ! CONSTANT_P (op1))
+ || (GET_CODE (op0) == CONST_INT && GET_CODE (op1) != CONST_INT))
+ {
+ tem = op0;
+ op0 = op1;
+ op1 = tem;
+ code = swap_condition (code);
+ }
+
+ if (flag_force_mem)
+ {
+ op0 = force_not_mem (op0);
+ op1 = force_not_mem (op1);
+ }
+
+ do_pending_stack_adjust ();
+
+ if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT
+ && (tem = simplify_relational_operation (code, mode, op0, op1)) != 0)
+ return tem;
+
+#if 0
+ /* There's no need to do this now that combine.c can eliminate lots of
+ sign extensions. This can be less efficient in certain cases on other
+ machines. */
+
+ /* If this is a signed equality comparison, we can do it as an
+ unsigned comparison since zero-extension is cheaper than sign
+ extension and comparisons with zero are done as unsigned. This is
+ the case even on machines that can do fast sign extension, since
+ zero-extension is easier to combine with other operations than
+ sign-extension is. If we are comparing against a constant, we must
+ convert it to what it would look like unsigned. */
+ if ((code == EQ || code == NE) && ! unsignedp
+ && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
+ {
+ if (GET_CODE (op1) == CONST_INT
+ && (INTVAL (op1) & GET_MODE_MASK (GET_MODE (op0))) != INTVAL (op1))
+ op1 = GEN_INT (INTVAL (op1) & GET_MODE_MASK (GET_MODE (op0)));
+ unsignedp = 1;
+ }
+#endif
+
+ emit_cmp_insn (op0, op1, code, size, mode, unsignedp, align);
+
+ return gen_rtx (code, VOIDmode, cc0_rtx, const0_rtx);
+}
+
+/* Generate code to calculate EXP using a store-flag instruction
+ and return an rtx for the result. EXP is either a comparison
+ or a TRUTH_NOT_EXPR whose operand is a comparison.
+
+ If TARGET is nonzero, store the result there if convenient.
+
+ If ONLY_CHEAP is non-zero, only do this if it is likely to be very
+ cheap.
+
+ Return zero if there is no suitable set-flag instruction
+ available on this machine.
+
+ Once expand_expr has been called on the arguments of the comparison,
+ we are committed to doing the store flag, since it is not safe to
+ re-evaluate the expression. We emit the store-flag insn by calling
+ emit_store_flag, but only expand the arguments if we have a reason
+ to believe that emit_store_flag will be successful. If we think that
+ it will, but it isn't, we have to simulate the store-flag with a
+ set/jump/set sequence. */
+
+static rtx
+do_store_flag (exp, target, mode, only_cheap)
+ tree exp;
+ rtx target;
+ enum machine_mode mode;
+ int only_cheap;
+{
+ enum rtx_code code;
+ tree arg0, arg1, type;
+ tree tem;
+ enum machine_mode operand_mode;
+ int invert = 0;
+ int unsignedp;
+ rtx op0, op1;
+ enum insn_code icode;
+ rtx subtarget = target;
+ rtx result, label, pattern, jump_pat;
+
+ /* If this is a TRUTH_NOT_EXPR, set a flag indicating we must invert the
+ result at the end. We can't simply invert the test since it would
+ have already been inverted if it were valid. This case occurs for
+ some floating-point comparisons. */
+
+ if (TREE_CODE (exp) == TRUTH_NOT_EXPR)
+ invert = 1, exp = TREE_OPERAND (exp, 0);
+
+ arg0 = TREE_OPERAND (exp, 0);
+ arg1 = TREE_OPERAND (exp, 1);
+ type = TREE_TYPE (arg0);
+ operand_mode = TYPE_MODE (type);
+ unsignedp = TREE_UNSIGNED (type);
+
+ /* We won't bother with BLKmode store-flag operations because it would mean
+ passing a lot of information to emit_store_flag. */
+ if (operand_mode == BLKmode)
+ return 0;
+
+ STRIP_NOPS (arg0);
+ STRIP_NOPS (arg1);
+
+ /* Get the rtx comparison code to use. We know that EXP is a comparison
+ operation of some type. Some comparisons against 1 and -1 can be
+ converted to comparisons with zero. Do so here so that the tests
+ below will be aware that we have a comparison with zero. These
+ tests will not catch constants in the first operand, but constants
+ are rarely passed as the first operand. */
+
+ switch (TREE_CODE (exp))
+ {
+ case EQ_EXPR:
+ code = EQ;
+ break;
+ case NE_EXPR:
+ code = NE;
+ break;
+ case LT_EXPR:
+ if (integer_onep (arg1))
+ arg1 = integer_zero_node, code = unsignedp ? LEU : LE;
+ else
+ code = unsignedp ? LTU : LT;
+ break;
+ case LE_EXPR:
+ if (! unsignedp && integer_all_onesp (arg1))
+ arg1 = integer_zero_node, code = LT;
+ else
+ code = unsignedp ? LEU : LE;
+ break;
+ case GT_EXPR:
+ if (! unsignedp && integer_all_onesp (arg1))
+ arg1 = integer_zero_node, code = GE;
+ else
+ code = unsignedp ? GTU : GT;
+ break;
+ case GE_EXPR:
+ if (integer_onep (arg1))
+ arg1 = integer_zero_node, code = unsignedp ? GTU : GT;
+ else
+ code = unsignedp ? GEU : GE;
+ break;
+ default:
+ abort ();
+ }
+
+ /* Put a constant second. */
+ if (TREE_CODE (arg0) == REAL_CST || TREE_CODE (arg0) == INTEGER_CST)
+ {
+ tem = arg0; arg0 = arg1; arg1 = tem;
+ code = swap_condition (code);
+ }
+
+ /* If this is an equality or inequality test of a single bit, we can
+ do this by shifting the bit being tested to the low-order bit and
+ masking the result with the constant 1. If the condition was EQ,
+ we xor it with 1. This does not require an scc insn and is faster
+ than an scc insn even if we have it. */
+
+ if ((code == NE || code == EQ)
+ && TREE_CODE (arg0) == BIT_AND_EXPR && integer_zerop (arg1)
+ && integer_pow2p (TREE_OPERAND (arg0, 1))
+ && TYPE_PRECISION (type) <= HOST_BITS_PER_WIDE_INT)
+ {
+ tree inner = TREE_OPERAND (arg0, 0);
+ int bitnum = exact_log2 (INTVAL (expand_expr (TREE_OPERAND (arg0, 1),
+ NULL_RTX, VOIDmode, 0)));
+ int ops_unsignedp;
+
+ /* If INNER is a right shift of a constant and it plus BITNUM does
+ not overflow, adjust BITNUM and INNER. */
+
+ if (TREE_CODE (inner) == RSHIFT_EXPR
+ && TREE_CODE (TREE_OPERAND (inner, 1)) == INTEGER_CST
+ && TREE_INT_CST_HIGH (TREE_OPERAND (inner, 1)) == 0
+ && (bitnum + TREE_INT_CST_LOW (TREE_OPERAND (inner, 1))
+ < TYPE_PRECISION (type)))
+ {
+ bitnum +=TREE_INT_CST_LOW (TREE_OPERAND (inner, 1));
+ inner = TREE_OPERAND (inner, 0);
+ }
+
+ /* If we are going to be able to omit the AND below, we must do our
+ operations as unsigned. If we must use the AND, we have a choice.
+ Normally unsigned is faster, but for some machines signed is. */
+ ops_unsignedp = (bitnum == TYPE_PRECISION (type) - 1 ? 1
+#ifdef LOAD_EXTEND_OP
+ : (LOAD_EXTEND_OP (operand_mode) == SIGN_EXTEND ? 0 : 1)
+#else
+ : 1
+#endif
+ );
+
+ if (subtarget == 0 || GET_CODE (subtarget) != REG
+ || GET_MODE (subtarget) != operand_mode
+ || ! safe_from_p (subtarget, inner))
+ subtarget = 0;
+
+ op0 = expand_expr (inner, subtarget, VOIDmode, 0);
+
+ if (bitnum != 0)
+ op0 = expand_shift (RSHIFT_EXPR, GET_MODE (op0), op0,
+ size_int (bitnum), subtarget, ops_unsignedp);
+
+ if (GET_MODE (op0) != mode)
+ op0 = convert_to_mode (mode, op0, ops_unsignedp);
+
+ if ((code == EQ && ! invert) || (code == NE && invert))
+ op0 = expand_binop (mode, xor_optab, op0, const1_rtx, subtarget,
+ ops_unsignedp, OPTAB_LIB_WIDEN);
+
+ /* Put the AND last so it can combine with more things. */
+ if (bitnum != TYPE_PRECISION (type) - 1)
+ op0 = expand_and (op0, const1_rtx, subtarget);
+
+ return op0;
+ }
+
+ /* Now see if we are likely to be able to do this. Return if not. */
+ if (! can_compare_p (operand_mode))
+ return 0;
+ icode = setcc_gen_code[(int) code];
+ if (icode == CODE_FOR_nothing
+ || (only_cheap && insn_operand_mode[(int) icode][0] != mode))
+ {
+ /* We can only do this if it is one of the special cases that
+ can be handled without an scc insn. */
+ if ((code == LT && integer_zerop (arg1))
+ || (! only_cheap && code == GE && integer_zerop (arg1)))
+ ;
+ else if (BRANCH_COST >= 0
+ && ! only_cheap && (code == NE || code == EQ)
+ && TREE_CODE (type) != REAL_TYPE
+ && ((abs_optab->handlers[(int) operand_mode].insn_code
+ != CODE_FOR_nothing)
+ || (ffs_optab->handlers[(int) operand_mode].insn_code
+ != CODE_FOR_nothing)))
+ ;
+ else
+ return 0;
+ }
+
+ preexpand_calls (exp);
+ if (subtarget == 0 || GET_CODE (subtarget) != REG
+ || GET_MODE (subtarget) != operand_mode
+ || ! safe_from_p (subtarget, arg1))
+ subtarget = 0;
+
+ op0 = expand_expr (arg0, subtarget, VOIDmode, 0);
+ op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
+
+ if (target == 0)
+ target = gen_reg_rtx (mode);
+
+ /* Pass copies of OP0 and OP1 in case they contain a QUEUED. This is safe
+ because, if the emit_store_flag does anything it will succeed and
+ OP0 and OP1 will not be used subsequently. */
+
+ result = emit_store_flag (target, code,
+ queued_subexp_p (op0) ? copy_rtx (op0) : op0,
+ queued_subexp_p (op1) ? copy_rtx (op1) : op1,
+ operand_mode, unsignedp, 1);
+
+ if (result)
+ {
+ if (invert)
+ result = expand_binop (mode, xor_optab, result, const1_rtx,
+ result, 0, OPTAB_LIB_WIDEN);
+ return result;
+ }
+
+ /* If this failed, we have to do this with set/compare/jump/set code. */
+ if (target == 0 || GET_CODE (target) != REG
+ || reg_mentioned_p (target, op0) || reg_mentioned_p (target, op1))
+ target = gen_reg_rtx (GET_MODE (target));
+
+ emit_move_insn (target, invert ? const0_rtx : const1_rtx);
+ result = compare_from_rtx (op0, op1, code, unsignedp,
+ operand_mode, NULL_RTX, 0);
+ if (GET_CODE (result) == CONST_INT)
+ return (((result == const0_rtx && ! invert)
+ || (result != const0_rtx && invert))
+ ? const0_rtx : const1_rtx);
+
+ label = gen_label_rtx ();
+ if (bcc_gen_fctn[(int) code] == 0)
+ abort ();
+
+ emit_jump_insn ((*bcc_gen_fctn[(int) code]) (label));
+ emit_move_insn (target, invert ? const1_rtx : const0_rtx);
+ emit_label (label);
+
+ return target;
+}
+
+/* Generate a tablejump instruction (used for switch statements). */
+
+#ifdef HAVE_tablejump
+
+/* INDEX is the value being switched on, with the lowest value
+ in the table already subtracted.
+ MODE is its expected mode (needed if INDEX is constant).
+ RANGE is the length of the jump table.
+ TABLE_LABEL is a CODE_LABEL rtx for the table itself.
+
+ DEFAULT_LABEL is a CODE_LABEL rtx to jump to if the
+ index value is out of range. */
+
+void
+do_tablejump (index, mode, range, table_label, default_label)
+ rtx index, range, table_label, default_label;
+ enum machine_mode mode;
+{
+ register rtx temp, vector;
+
+ /* Do an unsigned comparison (in the proper mode) between the index
+ expression and the value which represents the length of the range.
+ Since we just finished subtracting the lower bound of the range
+ from the index expression, this comparison allows us to simultaneously
+ check that the original index expression value is both greater than
+ or equal to the minimum value of the range and less than or equal to
+ the maximum value of the range. */
+
+ emit_cmp_insn (index, range, GTU, NULL_RTX, mode, 1, 0);
+ emit_jump_insn (gen_bgtu (default_label));
+
+ /* If index is in range, it must fit in Pmode.
+ Convert to Pmode so we can index with it. */
+ if (mode != Pmode)
+ index = convert_to_mode (Pmode, index, 1);
+
+ /* Don't let a MEM slip thru, because then INDEX that comes
+ out of PIC_CASE_VECTOR_ADDRESS won't be a valid address,
+ and break_out_memory_refs will go to work on it and mess it up. */
+#ifdef PIC_CASE_VECTOR_ADDRESS
+ if (flag_pic && GET_CODE (index) != REG)
+ index = copy_to_mode_reg (Pmode, index);
+#endif
+
+ /* If flag_force_addr were to affect this address
+ it could interfere with the tricky assumptions made
+ about addresses that contain label-refs,
+ which may be valid only very near the tablejump itself. */
+ /* ??? The only correct use of CASE_VECTOR_MODE is the one inside the
+ GET_MODE_SIZE, because this indicates how large insns are. The other
+ uses should all be Pmode, because they are addresses. This code
+ could fail if addresses and insns are not the same size. */
+ index = gen_rtx (PLUS, Pmode,
+ gen_rtx (MULT, Pmode, index,
+ GEN_INT (GET_MODE_SIZE (CASE_VECTOR_MODE))),
+ gen_rtx (LABEL_REF, Pmode, table_label));
+#ifdef PIC_CASE_VECTOR_ADDRESS
+ if (flag_pic)
+ index = PIC_CASE_VECTOR_ADDRESS (index);
+ else
+#endif
+ index = memory_address_noforce (CASE_VECTOR_MODE, index);
+ temp = gen_reg_rtx (CASE_VECTOR_MODE);
+ vector = gen_rtx (MEM, CASE_VECTOR_MODE, index);
+ RTX_UNCHANGING_P (vector) = 1;
+ convert_move (temp, vector, 0);
+
+ emit_jump_insn (gen_tablejump (temp, table_label));
+
+#ifndef CASE_VECTOR_PC_RELATIVE
+ /* If we are generating PIC code or if the table is PC-relative, the
+ table and JUMP_INSN must be adjacent, so don't output a BARRIER. */
+ if (! flag_pic)
+ emit_barrier ();
+#endif
+}
+
+#endif /* HAVE_tablejump */
+
+
+/* Emit a suitable bytecode to load a value from memory, assuming a pointer
+ to that value is on the top of the stack. The resulting type is TYPE, and
+ the source declaration is DECL. */
+
+void
+bc_load_memory (type, decl)
+ tree type, decl;
+{
+ enum bytecode_opcode opcode;
+
+
+ /* Bit fields are special. We only know about signed and
+ unsigned ints, and enums. The latter are treated as
+ signed integers. */
+
+ if (DECL_BIT_FIELD (decl))
+ if (TREE_CODE (type) == ENUMERAL_TYPE
+ || TREE_CODE (type) == INTEGER_TYPE)
+ opcode = TREE_UNSIGNED (type) ? zxloadBI : sxloadBI;
+ else
+ abort ();
+ else
+ /* See corresponding comment in bc_store_memory(). */
+ if (TYPE_MODE (type) == BLKmode
+ || TYPE_MODE (type) == VOIDmode)
+ return;
+ else
+ opcode = mode_to_load_map [(int) TYPE_MODE (type)];
+
+ if (opcode == neverneverland)
+ abort ();
+
+ bc_emit_bytecode (opcode);
+
+#ifdef DEBUG_PRINT_CODE
+ fputc ('\n', stderr);
+#endif
+}
+
+
+/* Store the contents of the second stack slot to the address in the
+ top stack slot. DECL is the declaration of the destination and is used
+ to determine whether we're dealing with a bitfield. */
+
+void
+bc_store_memory (type, decl)
+ tree type, decl;
+{
+ enum bytecode_opcode opcode;
+
+
+ if (DECL_BIT_FIELD (decl))
+ {
+ if (TREE_CODE (type) == ENUMERAL_TYPE
+ || TREE_CODE (type) == INTEGER_TYPE)
+ opcode = sstoreBI;
+ else
+ abort ();
+ }
+ else
+ if (TYPE_MODE (type) == BLKmode)
+ {
+ /* Copy structure. This expands to a block copy instruction, storeBLK.
+ In addition to the arguments expected by the other store instructions,
+ it also expects a type size (SImode) on top of the stack, which is the
+ structure size in size units (usually bytes). The two first arguments
+ are already on the stack; so we just put the size on level 1. For some
+ other languages, the size may be variable, this is why we don't encode
+ it as a storeBLK literal, but rather treat it as a full-fledged expression. */
+
+ bc_expand_expr (TYPE_SIZE (type));
+ opcode = storeBLK;
+ }
+ else
+ opcode = mode_to_store_map [(int) TYPE_MODE (type)];
+
+ if (opcode == neverneverland)
+ abort ();
+
+ bc_emit_bytecode (opcode);
+
+#ifdef DEBUG_PRINT_CODE
+ fputc ('\n', stderr);
+#endif
+}
+
+
+/* Allocate local stack space sufficient to hold a value of the given
+ SIZE at alignment boundary ALIGNMENT bits. ALIGNMENT must be an
+ integral power of 2. A special case is locals of type VOID, which
+ have size 0 and alignment 1 - any "voidish" SIZE or ALIGNMENT is
+ remapped into the corresponding attribute of SI. */
+
+rtx
+bc_allocate_local (size, alignment)
+ int size, alignment;
+{
+ rtx retval;
+ int byte_alignment;
+
+ if (size < 0)
+ abort ();
+
+ /* Normalize size and alignment */
+ if (!size)
+ size = UNITS_PER_WORD;
+
+ if (alignment < BITS_PER_UNIT)
+ byte_alignment = 1 << (INT_ALIGN - 1);
+ else
+ /* Align */
+ byte_alignment = alignment / BITS_PER_UNIT;
+
+ if (local_vars_size & (byte_alignment - 1))
+ local_vars_size += byte_alignment - (local_vars_size & (byte_alignment - 1));
+
+ retval = bc_gen_rtx ((char *) 0, local_vars_size, (struct bc_label *) 0);
+ local_vars_size += size;
+
+ return retval;
+}
+
+
+/* Allocate variable-sized local array. Variable-sized arrays are
+ actually pointers to the address in memory where they are stored. */
+
+rtx
+bc_allocate_variable_array (size)
+ tree size;
+{
+ rtx retval;
+ const int ptralign = (1 << (PTR_ALIGN - 1));
+
+ /* Align pointer */
+ if (local_vars_size & ptralign)
+ local_vars_size += ptralign - (local_vars_size & ptralign);
+
+ /* Note down local space needed: pointer to block; also return
+ dummy rtx */
+
+ retval = bc_gen_rtx ((char *) 0, local_vars_size, (struct bc_label *) 0);
+ local_vars_size += POINTER_SIZE / BITS_PER_UNIT;
+ return retval;
+}
+
+
+/* Push the machine address for the given external variable offset. */
+void
+bc_load_externaddr (externaddr)
+ rtx externaddr;
+{
+ bc_emit_bytecode (constP);
+ bc_emit_code_labelref (BYTECODE_LABEL (externaddr),
+ BYTECODE_BC_LABEL (externaddr)->offset);
+
+#ifdef DEBUG_PRINT_CODE
+ fputc ('\n', stderr);
+#endif
+}
+
+
+static char *
+bc_strdup (s)
+ char *s;
+{
+ char *new = (char *) xmalloc ((strlen (s) + 1) * sizeof *s);
+ strcpy (new, s);
+ return new;
+}
+
+
+/* Like above, but expects an IDENTIFIER. */
+void
+bc_load_externaddr_id (id, offset)
+ tree id;
+ int offset;
+{
+ if (!IDENTIFIER_POINTER (id))
+ abort ();
+
+ bc_emit_bytecode (constP);
+ bc_emit_code_labelref (bc_xstrdup (IDENTIFIER_POINTER (id)), offset);
+
+#ifdef DEBUG_PRINT_CODE
+ fputc ('\n', stderr);
+#endif
+}
+
+
+/* Push the machine address for the given local variable offset. */
+void
+bc_load_localaddr (localaddr)
+ rtx localaddr;
+{
+ bc_emit_instruction (localP, (HOST_WIDE_INT) BYTECODE_BC_LABEL (localaddr)->offset);
+}
+
+
+/* Push the machine address for the given parameter offset.
+ NOTE: offset is in bits. */
+void
+bc_load_parmaddr (parmaddr)
+ rtx parmaddr;
+{
+ bc_emit_instruction (argP, ((HOST_WIDE_INT) BYTECODE_BC_LABEL (parmaddr)->offset
+ / BITS_PER_UNIT));
+}
+
+
+/* Convert a[i] into *(a + i). */
+tree
+bc_canonicalize_array_ref (exp)
+ tree exp;
+{
+ tree type = TREE_TYPE (exp);
+ tree array_adr = build1 (ADDR_EXPR, TYPE_POINTER_TO (type),
+ TREE_OPERAND (exp, 0));
+ tree index = TREE_OPERAND (exp, 1);
+
+
+ /* Convert the integer argument to a type the same size as a pointer
+ so the multiply won't overflow spuriously. */
+
+ if (TYPE_PRECISION (TREE_TYPE (index)) != POINTER_SIZE)
+ index = convert (type_for_size (POINTER_SIZE, 0), index);
+
+ /* The array address isn't volatile even if the array is.
+ (Of course this isn't terribly relevant since the bytecode
+ translator treats nearly everything as volatile anyway.) */
+ TREE_THIS_VOLATILE (array_adr) = 0;
+
+ return build1 (INDIRECT_REF, type,
+ fold (build (PLUS_EXPR,
+ TYPE_POINTER_TO (type),
+ array_adr,
+ fold (build (MULT_EXPR,
+ TYPE_POINTER_TO (type),
+ index,
+ size_in_bytes (type))))));
+}
+
+
+/* Load the address of the component referenced by the given
+ COMPONENT_REF expression.
+
+ Returns innermost lvalue. */
+
+tree
+bc_expand_component_address (exp)
+ tree exp;
+{
+ tree tem, chain;
+ enum machine_mode mode;
+ int bitpos = 0;
+ HOST_WIDE_INT SIval;
+
+
+ tem = TREE_OPERAND (exp, 1);
+ mode = DECL_MODE (tem);
+
+
+ /* Compute cumulative bit offset for nested component refs
+ and array refs, and find the ultimate containing object. */
+
+ for (tem = exp;; tem = TREE_OPERAND (tem, 0))
+ {
+ if (TREE_CODE (tem) == COMPONENT_REF)
+ bitpos += TREE_INT_CST_LOW (DECL_FIELD_BITPOS (TREE_OPERAND (tem, 1)));
+ else
+ if (TREE_CODE (tem) == ARRAY_REF
+ && TREE_CODE (TREE_OPERAND (tem, 1)) == INTEGER_CST
+ && TREE_CODE (TYPE_SIZE (TREE_TYPE (tem))) == INTEGER_CST)
+
+ bitpos += (TREE_INT_CST_LOW (TREE_OPERAND (tem, 1))
+ * TREE_INT_CST_LOW (TYPE_SIZE (TREE_TYPE (tem)))
+ /* * TYPE_SIZE_UNIT (TREE_TYPE (tem)) */);
+ else
+ break;
+ }
+
+ bc_expand_expr (tem);
+
+
+ /* For bitfields also push their offset and size */
+ if (DECL_BIT_FIELD (TREE_OPERAND (exp, 1)))
+ bc_push_offset_and_size (bitpos, /* DECL_SIZE_UNIT */ (TREE_OPERAND (exp, 1)));
+ else
+ if (SIval = bitpos / BITS_PER_UNIT)
+ bc_emit_instruction (addconstPSI, SIval);
+
+ return (TREE_OPERAND (exp, 1));
+}
+
+
+/* Emit code to push two SI constants */
+void
+bc_push_offset_and_size (offset, size)
+ HOST_WIDE_INT offset, size;
+{
+ bc_emit_instruction (constSI, offset);
+ bc_emit_instruction (constSI, size);
+}
+
+
+/* Emit byte code to push the address of the given lvalue expression to
+ the stack. If it's a bit field, we also push offset and size info.
+
+ Returns innermost component, which allows us to determine not only
+ its type, but also whether it's a bitfield. */
+
+tree
+bc_expand_address (exp)
+ tree exp;
+{
+ /* Safeguard */
+ if (!exp || TREE_CODE (exp) == ERROR_MARK)
+ return (exp);
+
+
+ switch (TREE_CODE (exp))
+ {
+ case ARRAY_REF:
+
+ return (bc_expand_address (bc_canonicalize_array_ref (exp)));
+
+ case COMPONENT_REF:
+
+ return (bc_expand_component_address (exp));
+
+ case INDIRECT_REF:
+
+ bc_expand_expr (TREE_OPERAND (exp, 0));
+
+ /* For variable-sized types: retrieve pointer. Sometimes the
+ TYPE_SIZE tree is NULL. Is this a bug or a feature? Let's
+ also make sure we have an operand, just in case... */
+
+ if (TREE_OPERAND (exp, 0)
+ && TYPE_SIZE (TREE_TYPE (TREE_OPERAND (exp, 0)))
+ && TREE_CODE (TYPE_SIZE (TREE_TYPE (TREE_OPERAND (exp, 0)))) != INTEGER_CST)
+ bc_emit_instruction (loadP);
+
+ /* If packed, also return offset and size */
+ if (DECL_BIT_FIELD (TREE_OPERAND (exp, 0)))
+
+ bc_push_offset_and_size (TREE_INT_CST_LOW (DECL_FIELD_BITPOS (TREE_OPERAND (exp, 0))),
+ TREE_INT_CST_LOW (DECL_SIZE (TREE_OPERAND (exp, 0))));
+
+ return (TREE_OPERAND (exp, 0));
+
+ case FUNCTION_DECL:
+
+ bc_load_externaddr_id (DECL_ASSEMBLER_NAME (exp),
+ BYTECODE_BC_LABEL (DECL_RTL (exp))->offset);
+ break;
+
+ case PARM_DECL:
+
+ bc_load_parmaddr (DECL_RTL (exp));
+
+ /* For variable-sized types: retrieve pointer */
+ if (TYPE_SIZE (TREE_TYPE (exp))
+ && TREE_CODE (TYPE_SIZE (TREE_TYPE (exp))) != INTEGER_CST)
+ bc_emit_instruction (loadP);
+
+ /* If packed, also return offset and size */
+ if (DECL_BIT_FIELD (exp))
+ bc_push_offset_and_size (TREE_INT_CST_LOW (DECL_FIELD_BITPOS (exp)),
+ TREE_INT_CST_LOW (DECL_SIZE (exp)));
+
+ break;
+
+ case RESULT_DECL:
+
+ bc_emit_instruction (returnP);
+ break;
+
+ case VAR_DECL:
+
+#if 0
+ if (BYTECODE_LABEL (DECL_RTL (exp)))
+ bc_load_externaddr (DECL_RTL (exp));
+#endif
+
+ if (DECL_EXTERNAL (exp))
+ bc_load_externaddr_id (DECL_ASSEMBLER_NAME (exp),
+ (BYTECODE_BC_LABEL (DECL_RTL (exp)))->offset);
+ else
+ bc_load_localaddr (DECL_RTL (exp));
+
+ /* For variable-sized types: retrieve pointer */
+ if (TYPE_SIZE (TREE_TYPE (exp))
+ && TREE_CODE (TYPE_SIZE (TREE_TYPE (exp))) != INTEGER_CST)
+ bc_emit_instruction (loadP);
+
+ /* If packed, also return offset and size */
+ if (DECL_BIT_FIELD (exp))
+ bc_push_offset_and_size (TREE_INT_CST_LOW (DECL_FIELD_BITPOS (exp)),
+ TREE_INT_CST_LOW (DECL_SIZE (exp)));
+
+ break;
+
+ case STRING_CST:
+ {
+ rtx r;
+
+ bc_emit_bytecode (constP);
+ r = output_constant_def (exp);
+ bc_emit_code_labelref (BYTECODE_LABEL (r), BYTECODE_BC_LABEL (r)->offset);
+
+#ifdef DEBUG_PRINT_CODE
+ fputc ('\n', stderr);
+#endif
+ }
+ break;
+
+ default:
+
+ abort();
+ break;
+ }
+
+ /* Most lvalues don't have components. */
+ return (exp);
+}
+
+
+/* Emit a type code to be used by the runtime support in handling
+ parameter passing. The type code consists of the machine mode
+ plus the minimal alignment shifted left 8 bits. */
+
+tree
+bc_runtime_type_code (type)
+ tree type;
+{
+ int val;
+
+ switch (TREE_CODE (type))
+ {
+ case VOID_TYPE:
+ case INTEGER_TYPE:
+ case REAL_TYPE:
+ case COMPLEX_TYPE:
+ case ENUMERAL_TYPE:
+ case POINTER_TYPE:
+ case RECORD_TYPE:
+
+ val = (int) TYPE_MODE (type) | TYPE_ALIGN (type) << 8;
+ break;
+
+ case ERROR_MARK:
+
+ val = 0;
+ break;
+
+ default:
+
+ abort ();
+ }
+ return build_int_2 (val, 0);
+}
+
+
+/* Generate constructor label */
+char *
+bc_gen_constr_label ()
+{
+ static int label_counter;
+ static char label[20];
+
+ sprintf (label, "*LR%d", label_counter++);
+
+ return (obstack_copy0 (&permanent_obstack, label, strlen (label)));
+}
+
+
+/* Evaluate constructor CONSTR and return pointer to it on level one. We
+ expand the constructor data as static data, and push a pointer to it.
+ The pointer is put in the pointer table and is retrieved by a constP
+ bytecode instruction. We then loop and store each constructor member in
+ the corresponding component. Finally, we return the original pointer on
+ the stack. */
+
+void
+bc_expand_constructor (constr)
+ tree constr;
+{
+ char *l;
+ HOST_WIDE_INT ptroffs;
+ rtx constr_rtx;
+
+
+ /* Literal constructors are handled as constants, whereas
+ non-literals are evaluated and stored element by element
+ into the data segment. */
+
+ /* Allocate space in proper segment and push pointer to space on stack.
+ */
+
+ l = bc_gen_constr_label ();
+
+ if (TREE_CONSTANT (constr))
+ {
+ text_section ();
+
+ bc_emit_const_labeldef (l);
+ bc_output_constructor (constr, int_size_in_bytes (TREE_TYPE (constr)));
+ }
+ else
+ {
+ data_section ();
+
+ bc_emit_data_labeldef (l);
+ bc_output_data_constructor (constr);
+ }
+
+
+ /* Add reference to pointer table and recall pointer to stack;
+ this code is common for both types of constructors: literals
+ and non-literals. */
+
+ ptroffs = bc_define_pointer (l);
+ bc_emit_instruction (constP, ptroffs);
+
+ /* This is all that has to be done if it's a literal. */
+ if (TREE_CONSTANT (constr))
+ return;
+
+
+ /* At this point, we have the pointer to the structure on top of the stack.
+ Generate sequences of store_memory calls for the constructor. */
+
+ /* constructor type is structure */
+ if (TREE_CODE (TREE_TYPE (constr)) == RECORD_TYPE)
+ {
+ register tree elt;
+
+ /* If the constructor has fewer fields than the structure,
+ clear the whole structure first. */
+
+ if (list_length (CONSTRUCTOR_ELTS (constr))
+ != list_length (TYPE_FIELDS (TREE_TYPE (constr))))
+ {
+ bc_emit_instruction (duplicate);
+ bc_emit_instruction (constSI, (HOST_WIDE_INT) int_size_in_bytes (TREE_TYPE (constr)));
+ bc_emit_instruction (clearBLK);
+ }
+
+ /* Store each element of the constructor into the corresponding
+ field of TARGET. */
+
+ for (elt = CONSTRUCTOR_ELTS (constr); elt; elt = TREE_CHAIN (elt))
+ {
+ register tree field = TREE_PURPOSE (elt);
+ register enum machine_mode mode;
+ int bitsize;
+ int bitpos;
+ int unsignedp;
+
+ bitsize = TREE_INT_CST_LOW (DECL_SIZE (field)) /* * DECL_SIZE_UNIT (field) */;
+ mode = DECL_MODE (field);
+ unsignedp = TREE_UNSIGNED (field);
+
+ bitpos = TREE_INT_CST_LOW (DECL_FIELD_BITPOS (field));
+
+ bc_store_field (elt, bitsize, bitpos, mode, TREE_VALUE (elt), TREE_TYPE (TREE_VALUE (elt)),
+ /* The alignment of TARGET is
+ at least what its type requires. */
+ VOIDmode, 0,
+ TYPE_ALIGN (TREE_TYPE (constr)) / BITS_PER_UNIT,
+ int_size_in_bytes (TREE_TYPE (constr)));
+ }
+ }
+ else
+
+ /* Constructor type is array */
+ if (TREE_CODE (TREE_TYPE (constr)) == ARRAY_TYPE)
+ {
+ register tree elt;
+ register int i;
+ tree domain = TYPE_DOMAIN (TREE_TYPE (constr));
+ int minelt = TREE_INT_CST_LOW (TYPE_MIN_VALUE (domain));
+ int maxelt = TREE_INT_CST_LOW (TYPE_MAX_VALUE (domain));
+ tree elttype = TREE_TYPE (TREE_TYPE (constr));
+
+ /* If the constructor has fewer fields than the structure,
+ clear the whole structure first. */
+
+ if (list_length (CONSTRUCTOR_ELTS (constr)) < maxelt - minelt + 1)
+ {
+ bc_emit_instruction (duplicate);
+ bc_emit_instruction (constSI, (HOST_WIDE_INT) int_size_in_bytes (TREE_TYPE (constr)));
+ bc_emit_instruction (clearBLK);
+ }
+
+
+ /* Store each element of the constructor into the corresponding
+ element of TARGET, determined by counting the elements. */
+
+ for (elt = CONSTRUCTOR_ELTS (constr), i = 0;
+ elt;
+ elt = TREE_CHAIN (elt), i++)
+ {
+ register enum machine_mode mode;
+ int bitsize;
+ int bitpos;
+ int unsignedp;
+
+ mode = TYPE_MODE (elttype);
+ bitsize = GET_MODE_BITSIZE (mode);
+ unsignedp = TREE_UNSIGNED (elttype);
+
+ bitpos = (i * TREE_INT_CST_LOW (TYPE_SIZE (elttype))
+ /* * TYPE_SIZE_UNIT (elttype) */ );
+
+ bc_store_field (elt, bitsize, bitpos, mode,
+ TREE_VALUE (elt), TREE_TYPE (TREE_VALUE (elt)),
+ /* The alignment of TARGET is
+ at least what its type requires. */
+ VOIDmode, 0,
+ TYPE_ALIGN (TREE_TYPE (constr)) / BITS_PER_UNIT,
+ int_size_in_bytes (TREE_TYPE (constr)));
+ }
+
+ }
+}
+
+
+/* Store the value of EXP (an expression tree) into member FIELD of
+ structure at address on stack, which has type TYPE, mode MODE and
+ occupies BITSIZE bits, starting BITPOS bits from the beginning of the
+ structure.
+
+ ALIGN is the alignment that TARGET is known to have, measured in bytes.
+ TOTAL_SIZE is its size in bytes, or -1 if variable. */
+
+void
+bc_store_field (field, bitsize, bitpos, mode, exp, type,
+ value_mode, unsignedp, align, total_size)
+ int bitsize, bitpos;
+ enum machine_mode mode;
+ tree field, exp, type;
+ enum machine_mode value_mode;
+ int unsignedp;
+ int align;
+ int total_size;
+{
+
+ /* Expand expression and copy pointer */
+ bc_expand_expr (exp);
+ bc_emit_instruction (over);
+
+
+ /* If the component is a bit field, we cannot use addressing to access
+ it. Use bit-field techniques to store in it. */
+
+ if (DECL_BIT_FIELD (field))
+ {
+ bc_store_bit_field (bitpos, bitsize, unsignedp);
+ return;
+ }
+ else
+ /* Not bit field */
+ {
+ HOST_WIDE_INT offset = bitpos / BITS_PER_UNIT;
+
+ /* Advance pointer to the desired member */
+ if (offset)
+ bc_emit_instruction (addconstPSI, offset);
+
+ /* Store */
+ bc_store_memory (type, field);
+ }
+}
+
+
+/* Store SI/SU in bitfield */
+void
+bc_store_bit_field (offset, size, unsignedp)
+ int offset, size, unsignedp;
+{
+ /* Push bitfield offset and size */
+ bc_push_offset_and_size (offset, size);
+
+ /* Store */
+ bc_emit_instruction (sstoreBI);
+}
+
+
+/* Load SI/SU from bitfield */
+void
+bc_load_bit_field (offset, size, unsignedp)
+ int offset, size, unsignedp;
+{
+ /* Push bitfield offset and size */
+ bc_push_offset_and_size (offset, size);
+
+ /* Load: sign-extend if signed, else zero-extend */
+ bc_emit_instruction (unsignedp ? zxloadBI : sxloadBI);
+}
+
+
+/* Adjust interpreter stack by NLEVELS. Positive means drop NLEVELS
+ (adjust stack pointer upwards), negative means add that number of
+ levels (adjust the stack pointer downwards). Only positive values
+ normally make sense. */
+
+void
+bc_adjust_stack (nlevels)
+ int nlevels;
+{
+ switch (nlevels)
+ {
+ case 0:
+ break;
+
+ case 2:
+ bc_emit_instruction (drop);
+
+ case 1:
+ bc_emit_instruction (drop);
+ break;
+
+ default:
+
+ bc_emit_instruction (adjstackSI, (HOST_WIDE_INT) nlevels);
+ stack_depth -= nlevels;
+ }
+
+#if defined (VALIDATE_STACK_FOR_BC)
+ VALIDATE_STACK_FOR_BC ();
+#endif
+}
diff --git a/gnu/usr.bin/cc/cc_int/final.c b/gnu/usr.bin/cc/cc_int/final.c
new file mode 100644
index 0000000..74d01e9
--- /dev/null
+++ b/gnu/usr.bin/cc/cc_int/final.c
@@ -0,0 +1,3069 @@
+/* Convert RTL to assembler code and output it, for GNU compiler.
+ Copyright (C) 1987, 88, 89, 92, 93, 1994 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+/* This is the final pass of the compiler.
+ It looks at the rtl code for a function and outputs assembler code.
+
+ Call `final_start_function' to output the assembler code for function entry,
+ `final' to output assembler code for some RTL code,
+ `final_end_function' to output assembler code for function exit.
+ If a function is compiled in several pieces, each piece is
+ output separately with `final'.
+
+ Some optimizations are also done at this level.
+ Move instructions that were made unnecessary by good register allocation
+ are detected and omitted from the output. (Though most of these
+ are removed by the last jump pass.)
+
+ Instructions to set the condition codes are omitted when it can be
+ seen that the condition codes already had the desired values.
+
+ In some cases it is sufficient if the inherited condition codes
+ have related values, but this may require the following insn
+ (the one that tests the condition codes) to be modified.
+
+ The code for the function prologue and epilogue are generated
+ directly as assembler code by the macros FUNCTION_PROLOGUE and
+ FUNCTION_EPILOGUE. Those instructions never exist as rtl. */
+
+#include "config.h"
+#ifdef __STDC__
+#include <stdarg.h>
+#else
+#include <varargs.h>
+#endif
+#include <stdio.h>
+#include <ctype.h>
+
+#include "tree.h"
+#include "rtl.h"
+#include "regs.h"
+#include "insn-config.h"
+#include "insn-flags.h"
+#include "insn-attr.h"
+#include "insn-codes.h"
+#include "recog.h"
+#include "conditions.h"
+#include "flags.h"
+#include "real.h"
+#include "hard-reg-set.h"
+#include "defaults.h"
+#include "output.h"
+
+/* Get N_SLINE and N_SOL from stab.h if we can expect the file to exist. */
+#if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
+#if defined (USG) || defined (NO_STAB_H)
+#include "gstab.h" /* If doing DBX on sysV, use our own stab.h. */
+#else
+#include <stab.h> /* On BSD, use the system's stab.h. */
+#endif /* not USG */
+#endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
+
+#ifdef XCOFF_DEBUGGING_INFO
+#include "xcoffout.h"
+#endif
+
+/* .stabd code for line number. */
+#ifndef N_SLINE
+#define N_SLINE 0x44
+#endif
+
+/* .stabs code for included file name. */
+#ifndef N_SOL
+#define N_SOL 0x84
+#endif
+
+#ifndef INT_TYPE_SIZE
+#define INT_TYPE_SIZE BITS_PER_WORD
+#endif
+
+/* If we aren't using cc0, CC_STATUS_INIT shouldn't exist. So define a
+ null default for it to save conditionalization later. */
+#ifndef CC_STATUS_INIT
+#define CC_STATUS_INIT
+#endif
+
+/* How to start an assembler comment. */
+#ifndef ASM_COMMENT_START
+#define ASM_COMMENT_START ";#"
+#endif
+
+/* Is the given character a logical line separator for the assembler? */
+#ifndef IS_ASM_LOGICAL_LINE_SEPARATOR
+#define IS_ASM_LOGICAL_LINE_SEPARATOR(C) ((C) == ';')
+#endif
+
+/* Nonzero means this function is a leaf function, with no function calls.
+ This variable exists to be examined in FUNCTION_PROLOGUE
+ and FUNCTION_EPILOGUE. Always zero, unless set by some action. */
+int leaf_function;
+
+/* Last insn processed by final_scan_insn. */
+static rtx debug_insn = 0;
+
+/* Line number of last NOTE. */
+static int last_linenum;
+
+/* Filename of last NOTE. */
+static char *last_filename;
+
+/* Number of basic blocks seen so far;
+ used if profile_block_flag is set. */
+static int count_basic_blocks;
+
+/* Nonzero while outputting an `asm' with operands.
+ This means that inconsistencies are the user's fault, so don't abort.
+ The precise value is the insn being output, to pass to error_for_asm. */
+static rtx this_is_asm_operands;
+
+/* Number of operands of this insn, for an `asm' with operands. */
+static int insn_noperands;
+
+/* Compare optimization flag. */
+
+static rtx last_ignored_compare = 0;
+
+/* Flag indicating this insn is the start of a new basic block. */
+
+static int new_block = 1;
+
+/* All the symbol-blocks (levels of scoping) in the compilation
+ are assigned sequence numbers in order of appearance of the
+ beginnings of the symbol-blocks. Both final and dbxout do this,
+ and assume that they will both give the same number to each block.
+ Final uses these sequence numbers to generate assembler label names
+ LBBnnn and LBEnnn for the beginning and end of the symbol-block.
+ Dbxout uses the sequence numbers to generate references to the same labels
+ from the dbx debugging information.
+
+ Sdb records this level at the beginning of each function,
+ in order to find the current level when recursing down declarations.
+ It outputs the block beginning and endings
+ at the point in the asm file where the blocks would begin and end. */
+
+int next_block_index;
+
+/* Assign a unique number to each insn that is output.
+ This can be used to generate unique local labels. */
+
+static int insn_counter = 0;
+
+#ifdef HAVE_cc0
+/* This variable contains machine-dependent flags (defined in tm.h)
+ set and examined by output routines
+ that describe how to interpret the condition codes properly. */
+
+CC_STATUS cc_status;
+
+/* During output of an insn, this contains a copy of cc_status
+ from before the insn. */
+
+CC_STATUS cc_prev_status;
+#endif
+
+/* Indexed by hardware reg number, is 1 if that register is ever
+ used in the current function.
+
+ In life_analysis, or in stupid_life_analysis, this is set
+ up to record the hard regs used explicitly. Reload adds
+ in the hard regs used for holding pseudo regs. Final uses
+ it to generate the code in the function prologue and epilogue
+ to save and restore registers as needed. */
+
+char regs_ever_live[FIRST_PSEUDO_REGISTER];
+
+/* Nonzero means current function must be given a frame pointer.
+ Set in stmt.c if anything is allocated on the stack there.
+ Set in reload1.c if anything is allocated on the stack there. */
+
+int frame_pointer_needed;
+
+/* Assign unique numbers to labels generated for profiling. */
+
+int profile_label_no;
+
+/* Length so far allocated in PENDING_BLOCKS. */
+
+static int max_block_depth;
+
+/* Stack of sequence numbers of symbol-blocks of which we have seen the
+ beginning but not yet the end. Sequence numbers are assigned at
+ the beginning; this stack allows us to find the sequence number
+ of a block that is ending. */
+
+static int *pending_blocks;
+
+/* Number of elements currently in use in PENDING_BLOCKS. */
+
+static int block_depth;
+
+/* Nonzero if have enabled APP processing of our assembler output. */
+
+static int app_on;
+
+/* If we are outputting an insn sequence, this contains the sequence rtx.
+ Zero otherwise. */
+
+rtx final_sequence;
+
+#ifdef ASSEMBLER_DIALECT
+
+/* Number of the assembler dialect to use, starting at 0. */
+static int dialect_number;
+#endif
+
+/* Indexed by line number, nonzero if there is a note for that line. */
+
+static char *line_note_exists;
+
+/* Linked list to hold line numbers for each basic block. */
+
+struct bb_list {
+ struct bb_list *next; /* pointer to next basic block */
+ int line_num; /* line number */
+ int file_label_num; /* LPBC<n> label # for stored filename */
+ int func_label_num; /* LPBC<n> label # for stored function name */
+};
+
+static struct bb_list *bb_head = 0; /* Head of basic block list */
+static struct bb_list **bb_tail = &bb_head; /* Ptr to store next bb ptr */
+static int bb_file_label_num = -1; /* Current label # for file */
+static int bb_func_label_num = -1; /* Current label # for func */
+
+/* Linked list to hold the strings for each file and function name output. */
+
+struct bb_str {
+ struct bb_str *next; /* pointer to next string */
+ char *string; /* string */
+ int label_num; /* label number */
+ int length; /* string length */
+};
+
+extern rtx peephole PROTO((rtx));
+
+static struct bb_str *sbb_head = 0; /* Head of string list. */
+static struct bb_str **sbb_tail = &sbb_head; /* Ptr to store next bb str */
+static int sbb_label_num = 0; /* Last label used */
+
+static int asm_insn_count PROTO((rtx));
+static void profile_function PROTO((FILE *));
+static void profile_after_prologue PROTO((FILE *));
+static void add_bb PROTO((FILE *));
+static int add_bb_string PROTO((char *, int));
+static void output_source_line PROTO((FILE *, rtx));
+static rtx walk_alter_subreg PROTO((rtx));
+static int alter_cond PROTO((rtx));
+static void output_operand PROTO((rtx, int));
+static void leaf_renumber_regs PROTO((rtx));
+
+extern char *getpwd ();
+
+/* Initialize data in final at the beginning of a compilation. */
+
+void
+init_final (filename)
+ char *filename;
+{
+ next_block_index = 2;
+ app_on = 0;
+ max_block_depth = 20;
+ pending_blocks = (int *) xmalloc (20 * sizeof *pending_blocks);
+ final_sequence = 0;
+
+#ifdef ASSEMBLER_DIALECT
+ dialect_number = ASSEMBLER_DIALECT;
+#endif
+}
+
+/* Called at end of source file,
+ to output the block-profiling table for this entire compilation. */
+
+void
+end_final (filename)
+ char *filename;
+{
+ int i;
+
+ if (profile_block_flag)
+ {
+ char name[20];
+ int align = exact_log2 (BIGGEST_ALIGNMENT / BITS_PER_UNIT);
+ int size = (POINTER_SIZE / BITS_PER_UNIT) * count_basic_blocks;
+ int rounded = size;
+ struct bb_list *ptr;
+ struct bb_str *sptr;
+
+ rounded += (BIGGEST_ALIGNMENT / BITS_PER_UNIT) - 1;
+ rounded = (rounded / (BIGGEST_ALIGNMENT / BITS_PER_UNIT)
+ * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
+
+ data_section ();
+
+ /* Output the main header, of 10 words:
+ 0: 1 if this file's initialized, else 0.
+ 1: address of file name (LPBX1).
+ 2: address of table of counts (LPBX2).
+ 3: number of counts in the table.
+ 4: always 0, for compatibility with Sun.
+
+ The following are GNU extensions:
+
+ 5: address of table of start addrs of basic blocks (LPBX3).
+ 6: Number of bytes in this header.
+ 7: address of table of function names (LPBX4).
+ 8: address of table of line numbers (LPBX5) or 0.
+ 9: address of table of file names (LPBX6) or 0. */
+
+ ASM_OUTPUT_ALIGN (asm_out_file, align);
+
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, "LPBX", 0);
+ /* zero word */
+ assemble_integer (const0_rtx, UNITS_PER_WORD, 1);
+
+ /* address of filename */
+ ASM_GENERATE_INTERNAL_LABEL (name, "LPBX", 1);
+ assemble_integer (gen_rtx (SYMBOL_REF, Pmode, name), UNITS_PER_WORD, 1);
+
+ /* address of count table */
+ ASM_GENERATE_INTERNAL_LABEL (name, "LPBX", 2);
+ assemble_integer (gen_rtx (SYMBOL_REF, Pmode, name), UNITS_PER_WORD, 1);
+
+ /* count of the # of basic blocks */
+ assemble_integer (GEN_INT (count_basic_blocks), UNITS_PER_WORD, 1);
+
+ /* zero word (link field) */
+ assemble_integer (const0_rtx, UNITS_PER_WORD, 1);
+
+ /* address of basic block start address table */
+ ASM_GENERATE_INTERNAL_LABEL (name, "LPBX", 3);
+ assemble_integer (gen_rtx (SYMBOL_REF, Pmode, name), UNITS_PER_WORD, 1);
+
+ /* byte count for extended structure. */
+ assemble_integer (GEN_INT (10 * UNITS_PER_WORD), UNITS_PER_WORD, 1);
+
+ /* address of function name table */
+ ASM_GENERATE_INTERNAL_LABEL (name, "LPBX", 4);
+ assemble_integer (gen_rtx (SYMBOL_REF, Pmode, name), UNITS_PER_WORD, 1);
+
+ /* address of line number and filename tables if debugging. */
+ if (write_symbols != NO_DEBUG)
+ {
+ ASM_GENERATE_INTERNAL_LABEL (name, "LPBX", 5);
+ assemble_integer (gen_rtx (SYMBOL_REF, Pmode, name), UNITS_PER_WORD, 1);
+ ASM_GENERATE_INTERNAL_LABEL (name, "LPBX", 6);
+ assemble_integer (gen_rtx (SYMBOL_REF, Pmode, name), UNITS_PER_WORD, 1);
+ }
+ else
+ {
+ assemble_integer (const0_rtx, UNITS_PER_WORD, 1);
+ assemble_integer (const0_rtx, UNITS_PER_WORD, 1);
+ }
+
+ /* Output the file name changing the suffix to .d for Sun tcov
+ compatibility. */
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, "LPBX", 1);
+ {
+ char *cwd = getpwd ();
+ int len = strlen (filename) + strlen (cwd) + 1;
+ char *data_file = (char *) alloca (len + 4);
+
+ strcpy (data_file, cwd);
+ strcat (data_file, "/");
+ strcat (data_file, filename);
+ strip_off_ending (data_file, len);
+ strcat (data_file, ".d");
+ assemble_string (data_file, strlen (data_file) + 1);
+ }
+
+ /* Make space for the table of counts. */
+ if (flag_no_common || size == 0)
+ {
+ /* Realign data section. */
+ ASM_OUTPUT_ALIGN (asm_out_file, align);
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, "LPBX", 2);
+ if (size != 0)
+ assemble_zeros (size);
+ }
+ else
+ {
+ ASM_GENERATE_INTERNAL_LABEL (name, "LPBX", 2);
+#ifdef ASM_OUTPUT_SHARED_LOCAL
+ if (flag_shared_data)
+ ASM_OUTPUT_SHARED_LOCAL (asm_out_file, name, size, rounded);
+ else
+#endif
+#ifdef ASM_OUTPUT_ALIGNED_LOCAL
+ ASM_OUTPUT_ALIGNED_LOCAL (asm_out_file, name, size,
+ BIGGEST_ALIGNMENT);
+#else
+ ASM_OUTPUT_LOCAL (asm_out_file, name, size, rounded);
+#endif
+ }
+
+ /* Output any basic block strings */
+ readonly_data_section ();
+ if (sbb_head)
+ {
+ ASM_OUTPUT_ALIGN (asm_out_file, align);
+ for (sptr = sbb_head; sptr != 0; sptr = sptr->next)
+ {
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, "LPBC", sptr->label_num);
+ assemble_string (sptr->string, sptr->length);
+ }
+ }
+
+ /* Output the table of addresses. */
+ /* Realign in new section */
+ ASM_OUTPUT_ALIGN (asm_out_file, align);
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, "LPBX", 3);
+ for (i = 0; i < count_basic_blocks; i++)
+ {
+ ASM_GENERATE_INTERNAL_LABEL (name, "LPB", i);
+ assemble_integer (gen_rtx (SYMBOL_REF, Pmode, name),
+ UNITS_PER_WORD, 1);
+ }
+
+ /* Output the table of function names. */
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, "LPBX", 4);
+ for ((ptr = bb_head), (i = 0); ptr != 0; (ptr = ptr->next), i++)
+ {
+ if (ptr->func_label_num >= 0)
+ {
+ ASM_GENERATE_INTERNAL_LABEL (name, "LPBC", ptr->func_label_num);
+ assemble_integer (gen_rtx (SYMBOL_REF, Pmode, name),
+ UNITS_PER_WORD, 1);
+ }
+ else
+ assemble_integer (const0_rtx, UNITS_PER_WORD, 1);
+ }
+
+ for ( ; i < count_basic_blocks; i++)
+ assemble_integer (const0_rtx, UNITS_PER_WORD, 1);
+
+ if (write_symbols != NO_DEBUG)
+ {
+ /* Output the table of line numbers. */
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, "LPBX", 5);
+ for ((ptr = bb_head), (i = 0); ptr != 0; (ptr = ptr->next), i++)
+ assemble_integer (GEN_INT (ptr->line_num), UNITS_PER_WORD, 1);
+
+ for ( ; i < count_basic_blocks; i++)
+ assemble_integer (const0_rtx, UNITS_PER_WORD, 1);
+
+ /* Output the table of file names. */
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, "LPBX", 6);
+ for ((ptr = bb_head), (i = 0); ptr != 0; (ptr = ptr->next), i++)
+ {
+ if (ptr->file_label_num >= 0)
+ {
+ ASM_GENERATE_INTERNAL_LABEL (name, "LPBC", ptr->file_label_num);
+ assemble_integer (gen_rtx (SYMBOL_REF, Pmode, name),
+ UNITS_PER_WORD, 1);
+ }
+ else
+ assemble_integer (const0_rtx, UNITS_PER_WORD, 1);
+ }
+
+ for ( ; i < count_basic_blocks; i++)
+ assemble_integer (const0_rtx, UNITS_PER_WORD, 1);
+ }
+
+ /* End with the address of the table of addresses,
+ so we can find it easily, as the last word in the file's text. */
+ ASM_GENERATE_INTERNAL_LABEL (name, "LPBX", 3);
+ assemble_integer (gen_rtx (SYMBOL_REF, Pmode, name), UNITS_PER_WORD, 1);
+ }
+}
+
+/* Enable APP processing of subsequent output.
+ Used before the output from an `asm' statement. */
+
+void
+app_enable ()
+{
+ if (! app_on)
+ {
+ fprintf (asm_out_file, ASM_APP_ON);
+ app_on = 1;
+ }
+}
+
+/* Disable APP processing of subsequent output.
+ Called from varasm.c before most kinds of output. */
+
+void
+app_disable ()
+{
+ if (app_on)
+ {
+ fprintf (asm_out_file, ASM_APP_OFF);
+ app_on = 0;
+ }
+}
+
+/* Return the number of slots filled in the current
+ delayed branch sequence (we don't count the insn needing the
+ delay slot). Zero if not in a delayed branch sequence. */
+
+#ifdef DELAY_SLOTS
+int
+dbr_sequence_length ()
+{
+ if (final_sequence != 0)
+ return XVECLEN (final_sequence, 0) - 1;
+ else
+ return 0;
+}
+#endif
+
+/* The next two pages contain routines used to compute the length of an insn
+ and to shorten branches. */
+
+/* Arrays for insn lengths, and addresses. The latter is referenced by
+ `insn_current_length'. */
+
+static short *insn_lengths;
+int *insn_addresses;
+
+/* Address of insn being processed. Used by `insn_current_length'. */
+int insn_current_address;
+
+/* Indicate that branch shortening hasn't yet been done. */
+
+void
+init_insn_lengths ()
+{
+ insn_lengths = 0;
+}
+
+/* Obtain the current length of an insn. If branch shortening has been done,
+ get its actual length. Otherwise, get its maximum length. */
+
+int
+get_attr_length (insn)
+ rtx insn;
+{
+#ifdef HAVE_ATTR_length
+ rtx body;
+ int i;
+ int length = 0;
+
+ if (insn_lengths)
+ return insn_lengths[INSN_UID (insn)];
+ else
+ switch (GET_CODE (insn))
+ {
+ case NOTE:
+ case BARRIER:
+ case CODE_LABEL:
+ return 0;
+
+ case CALL_INSN:
+ length = insn_default_length (insn);
+ break;
+
+ case JUMP_INSN:
+ body = PATTERN (insn);
+ if (GET_CODE (body) == ADDR_VEC || GET_CODE (body) == ADDR_DIFF_VEC)
+ {
+ /* This only takes room if jump tables go into the text section. */
+#if !defined(READONLY_DATA_SECTION) || defined(JUMP_TABLES_IN_TEXT_SECTION)
+ length = (XVECLEN (body, GET_CODE (body) == ADDR_DIFF_VEC)
+ * GET_MODE_SIZE (GET_MODE (body)));
+
+ /* Be pessimistic and assume worst-case alignment. */
+ length += (GET_MODE_SIZE (GET_MODE (body)) - 1);
+#else
+ return 0;
+#endif
+ }
+ else
+ length = insn_default_length (insn);
+ break;
+
+ case INSN:
+ body = PATTERN (insn);
+ if (GET_CODE (body) == USE || GET_CODE (body) == CLOBBER)
+ return 0;
+
+ else if (GET_CODE (body) == ASM_INPUT || asm_noperands (body) >= 0)
+ length = asm_insn_count (body) * insn_default_length (insn);
+ else if (GET_CODE (body) == SEQUENCE)
+ for (i = 0; i < XVECLEN (body, 0); i++)
+ length += get_attr_length (XVECEXP (body, 0, i));
+ else
+ length = insn_default_length (insn);
+ }
+
+#ifdef ADJUST_INSN_LENGTH
+ ADJUST_INSN_LENGTH (insn, length);
+#endif
+ return length;
+#else /* not HAVE_ATTR_length */
+ return 0;
+#endif /* not HAVE_ATTR_length */
+}
+
+/* Make a pass over all insns and compute their actual lengths by shortening
+ any branches of variable length if possible. */
+
+/* Give a default value for the lowest address in a function. */
+
+#ifndef FIRST_INSN_ADDRESS
+#define FIRST_INSN_ADDRESS 0
+#endif
+
+void
+shorten_branches (first)
+ rtx first;
+{
+#ifdef HAVE_ATTR_length
+ rtx insn;
+ int something_changed = 1;
+ int max_uid = 0;
+ char *varying_length;
+ rtx body;
+ int uid;
+
+ /* Compute maximum UID and allocate arrays. */
+ for (insn = first; insn; insn = NEXT_INSN (insn))
+ if (INSN_UID (insn) > max_uid)
+ max_uid = INSN_UID (insn);
+
+ max_uid++;
+ insn_lengths = (short *) oballoc (max_uid * sizeof (short));
+ insn_addresses = (int *) oballoc (max_uid * sizeof (int));
+ varying_length = (char *) oballoc (max_uid * sizeof (char));
+
+ /* Compute initial lengths, addresses, and varying flags for each insn. */
+ for (insn_current_address = FIRST_INSN_ADDRESS, insn = first;
+ insn != 0;
+ insn_current_address += insn_lengths[uid], insn = NEXT_INSN (insn))
+ {
+ uid = INSN_UID (insn);
+ insn_addresses[uid] = insn_current_address;
+ insn_lengths[uid] = 0;
+ varying_length[uid] = 0;
+
+ if (GET_CODE (insn) == NOTE || GET_CODE (insn) == BARRIER
+ || GET_CODE (insn) == CODE_LABEL)
+ continue;
+
+ body = PATTERN (insn);
+ if (GET_CODE (body) == ADDR_VEC || GET_CODE (body) == ADDR_DIFF_VEC)
+ {
+ /* This only takes room if read-only data goes into the text
+ section. */
+#if !defined(READONLY_DATA_SECTION) || defined(JUMP_TABLES_IN_TEXT_SECTION)
+ int unitsize = GET_MODE_SIZE (GET_MODE (body));
+
+ insn_lengths[uid] = (XVECLEN (body, GET_CODE (body) == ADDR_DIFF_VEC)
+ * GET_MODE_SIZE (GET_MODE (body)));
+
+ /* Account for possible alignment. */
+ insn_lengths[uid]
+ += unitsize - (insn_current_address & (unitsize - 1));
+#else
+ ;
+#endif
+ }
+ else if (asm_noperands (body) >= 0)
+ insn_lengths[uid] = asm_insn_count (body) * insn_default_length (insn);
+ else if (GET_CODE (body) == SEQUENCE)
+ {
+ int i;
+ int const_delay_slots;
+#ifdef DELAY_SLOTS
+ const_delay_slots = const_num_delay_slots (XVECEXP (body, 0, 0));
+#else
+ const_delay_slots = 0;
+#endif
+ /* Inside a delay slot sequence, we do not do any branch shortening
+ if the shortening could change the number of delay slots
+ of the branch. */
+ for (i = 0; i < XVECLEN (body, 0); i++)
+ {
+ rtx inner_insn = XVECEXP (body, 0, i);
+ int inner_uid = INSN_UID (inner_insn);
+ int inner_length;
+
+ if (asm_noperands (PATTERN (XVECEXP (body, 0, i))) >= 0)
+ inner_length = (asm_insn_count (PATTERN (inner_insn))
+ * insn_default_length (inner_insn));
+ else
+ inner_length = insn_default_length (inner_insn);
+
+ insn_lengths[inner_uid] = inner_length;
+ if (const_delay_slots)
+ {
+ if ((varying_length[inner_uid]
+ = insn_variable_length_p (inner_insn)) != 0)
+ varying_length[uid] = 1;
+ insn_addresses[inner_uid] = (insn_current_address +
+ insn_lengths[uid]);
+ }
+ else
+ varying_length[inner_uid] = 0;
+ insn_lengths[uid] += inner_length;
+ }
+ }
+ else if (GET_CODE (body) != USE && GET_CODE (body) != CLOBBER)
+ {
+ insn_lengths[uid] = insn_default_length (insn);
+ varying_length[uid] = insn_variable_length_p (insn);
+ }
+
+ /* If needed, do any adjustment. */
+#ifdef ADJUST_INSN_LENGTH
+ ADJUST_INSN_LENGTH (insn, insn_lengths[uid]);
+#endif
+ }
+
+ /* Now loop over all the insns finding varying length insns. For each,
+ get the current insn length. If it has changed, reflect the change.
+ When nothing changes for a full pass, we are done. */
+
+ while (something_changed)
+ {
+ something_changed = 0;
+ for (insn_current_address = FIRST_INSN_ADDRESS, insn = first;
+ insn != 0;
+ insn = NEXT_INSN (insn))
+ {
+ int new_length;
+ int tmp_length;
+
+ uid = INSN_UID (insn);
+ insn_addresses[uid] = insn_current_address;
+ if (! varying_length[uid])
+ {
+ insn_current_address += insn_lengths[uid];
+ continue;
+ }
+ if (GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == SEQUENCE)
+ {
+ int i;
+
+ body = PATTERN (insn);
+ new_length = 0;
+ for (i = 0; i < XVECLEN (body, 0); i++)
+ {
+ rtx inner_insn = XVECEXP (body, 0, i);
+ int inner_uid = INSN_UID (inner_insn);
+ int inner_length;
+
+ insn_addresses[inner_uid] = insn_current_address;
+
+ /* insn_current_length returns 0 for insns with a
+ non-varying length. */
+ if (! varying_length[inner_uid])
+ inner_length = insn_lengths[inner_uid];
+ else
+ inner_length = insn_current_length (inner_insn);
+
+ if (inner_length != insn_lengths[inner_uid])
+ {
+ insn_lengths[inner_uid] = inner_length;
+ something_changed = 1;
+ }
+ insn_current_address += insn_lengths[inner_uid];
+ new_length += inner_length;
+ }
+ }
+ else
+ {
+ new_length = insn_current_length (insn);
+ insn_current_address += new_length;
+ }
+
+#ifdef SHORTEN_WITH_ADJUST_INSN_LENGTH
+#ifdef ADJUST_INSN_LENGTH
+ /* If needed, do any adjustment. */
+ tmp_length = new_length;
+ ADJUST_INSN_LENGTH (insn, new_length);
+ insn_current_address += (new_length - tmp_length);
+#endif
+#endif
+
+ if (new_length != insn_lengths[uid])
+ {
+ insn_lengths[uid] = new_length;
+ something_changed = 1;
+ }
+ }
+ }
+#endif /* HAVE_ATTR_length */
+}
+
+#ifdef HAVE_ATTR_length
+/* Given the body of an INSN known to be generated by an ASM statement, return
+ the number of machine instructions likely to be generated for this insn.
+ This is used to compute its length. */
+
+static int
+asm_insn_count (body)
+ rtx body;
+{
+ char *template;
+ int count = 1;
+
+ for (template = decode_asm_operands (body, NULL_PTR, NULL_PTR,
+ NULL_PTR, NULL_PTR);
+ *template; template++)
+ if (IS_ASM_LOGICAL_LINE_SEPARATOR(*template) || *template == '\n')
+ count++;
+
+ return count;
+}
+#endif
+
+/* Output assembler code for the start of a function,
+ and initialize some of the variables in this file
+ for the new function. The label for the function and associated
+ assembler pseudo-ops have already been output in `assemble_start_function'.
+
+ FIRST is the first insn of the rtl for the function being compiled.
+ FILE is the file to write assembler code to.
+ OPTIMIZE is nonzero if we should eliminate redundant
+ test and compare insns. */
+
+void
+final_start_function (first, file, optimize)
+ rtx first;
+ FILE *file;
+ int optimize;
+{
+ block_depth = 0;
+
+ this_is_asm_operands = 0;
+
+#ifdef NON_SAVING_SETJMP
+ /* A function that calls setjmp should save and restore all the
+ call-saved registers on a system where longjmp clobbers them. */
+ if (NON_SAVING_SETJMP && current_function_calls_setjmp)
+ {
+ int i;
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (!call_used_regs[i] && !call_fixed_regs[i])
+ regs_ever_live[i] = 1;
+ }
+#endif
+
+ /* Initial line number is supposed to be output
+ before the function's prologue and label
+ so that the function's address will not appear to be
+ in the last statement of the preceding function. */
+ if (NOTE_LINE_NUMBER (first) != NOTE_INSN_DELETED)
+ {
+ if (write_symbols == SDB_DEBUG)
+ /* For sdb, let's not, but say we did.
+ We need to set last_linenum for sdbout_function_begin,
+ but we can't have an actual line number before the .bf symbol.
+ (sdb_begin_function_line is not set,
+ and other compilers don't do it.) */
+ last_linenum = NOTE_LINE_NUMBER (first);
+#ifdef XCOFF_DEBUGGING_INFO
+ else if (write_symbols == XCOFF_DEBUG)
+ {
+ last_linenum = NOTE_LINE_NUMBER (first);
+ xcoffout_output_first_source_line (file, last_linenum);
+ }
+#endif
+ else
+ output_source_line (file, first);
+ }
+
+#ifdef LEAF_REG_REMAP
+ if (leaf_function)
+ leaf_renumber_regs (first);
+#endif
+
+ /* The Sun386i and perhaps other machines don't work right
+ if the profiling code comes after the prologue. */
+#ifdef PROFILE_BEFORE_PROLOGUE
+ if (profile_flag)
+ profile_function (file);
+#endif /* PROFILE_BEFORE_PROLOGUE */
+
+#ifdef FUNCTION_PROLOGUE
+ /* First output the function prologue: code to set up the stack frame. */
+ FUNCTION_PROLOGUE (file, get_frame_size ());
+#endif
+
+#if defined (SDB_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
+ if (write_symbols == SDB_DEBUG || write_symbols == XCOFF_DEBUG)
+ next_block_index = 1;
+#endif
+
+ /* If the machine represents the prologue as RTL, the profiling code must
+ be emitted when NOTE_INSN_PROLOGUE_END is scanned. */
+#ifdef HAVE_prologue
+ if (! HAVE_prologue)
+#endif
+ profile_after_prologue (file);
+
+ profile_label_no++;
+
+ /* If we are doing basic block profiling, remember a printable version
+ of the function name. */
+ if (profile_block_flag)
+ {
+ char *junk = "function";
+ bb_func_label_num =
+ add_bb_string ((*decl_printable_name) (current_function_decl, &junk), FALSE);
+ }
+}
+
+static void
+profile_after_prologue (file)
+ FILE *file;
+{
+#ifdef FUNCTION_BLOCK_PROFILER
+ if (profile_block_flag)
+ {
+ FUNCTION_BLOCK_PROFILER (file, profile_label_no);
+ }
+#endif /* FUNCTION_BLOCK_PROFILER */
+
+#ifndef PROFILE_BEFORE_PROLOGUE
+ if (profile_flag)
+ profile_function (file);
+#endif /* not PROFILE_BEFORE_PROLOGUE */
+}
+
+static void
+profile_function (file)
+ FILE *file;
+{
+#ifndef NO_PROFILE_DATA
+ int align = MIN (BIGGEST_ALIGNMENT, POINTER_SIZE);
+#endif /* not NO_PROFILE_DATA */
+ int sval = current_function_returns_struct;
+ int cxt = current_function_needs_context;
+
+#ifndef NO_PROFILE_DATA
+ data_section ();
+ ASM_OUTPUT_ALIGN (file, floor_log2 (align / BITS_PER_UNIT));
+ ASM_OUTPUT_INTERNAL_LABEL (file, "LP", profile_label_no);
+ assemble_integer (const0_rtx, POINTER_SIZE / BITS_PER_UNIT, 1);
+#endif /* not NO_PROFILE_DATA */
+
+ text_section ();
+
+#ifdef STRUCT_VALUE_INCOMING_REGNUM
+ if (sval)
+ ASM_OUTPUT_REG_PUSH (file, STRUCT_VALUE_INCOMING_REGNUM);
+#else
+#ifdef STRUCT_VALUE_REGNUM
+ if (sval)
+ ASM_OUTPUT_REG_PUSH (file, STRUCT_VALUE_REGNUM);
+#endif
+#endif
+
+#if 0
+#ifdef STATIC_CHAIN_INCOMING_REGNUM
+ if (cxt)
+ ASM_OUTPUT_REG_PUSH (file, STATIC_CHAIN_INCOMING_REGNUM);
+#else
+#ifdef STATIC_CHAIN_REGNUM
+ if (cxt)
+ ASM_OUTPUT_REG_PUSH (file, STATIC_CHAIN_REGNUM);
+#endif
+#endif
+#endif /* 0 */
+
+ FUNCTION_PROFILER (file, profile_label_no);
+
+#if 0
+#ifdef STATIC_CHAIN_INCOMING_REGNUM
+ if (cxt)
+ ASM_OUTPUT_REG_POP (file, STATIC_CHAIN_INCOMING_REGNUM);
+#else
+#ifdef STATIC_CHAIN_REGNUM
+ if (cxt)
+ ASM_OUTPUT_REG_POP (file, STATIC_CHAIN_REGNUM);
+#endif
+#endif
+#endif /* 0 */
+
+#ifdef STRUCT_VALUE_INCOMING_REGNUM
+ if (sval)
+ ASM_OUTPUT_REG_POP (file, STRUCT_VALUE_INCOMING_REGNUM);
+#else
+#ifdef STRUCT_VALUE_REGNUM
+ if (sval)
+ ASM_OUTPUT_REG_POP (file, STRUCT_VALUE_REGNUM);
+#endif
+#endif
+}
+
+/* Output assembler code for the end of a function.
+ For clarity, args are same as those of `final_start_function'
+ even though not all of them are needed. */
+
+void
+final_end_function (first, file, optimize)
+ rtx first;
+ FILE *file;
+ int optimize;
+{
+ if (app_on)
+ {
+ fprintf (file, ASM_APP_OFF);
+ app_on = 0;
+ }
+
+#ifdef SDB_DEBUGGING_INFO
+ if (write_symbols == SDB_DEBUG)
+ sdbout_end_function (last_linenum);
+#endif
+
+#ifdef DWARF_DEBUGGING_INFO
+ if (write_symbols == DWARF_DEBUG)
+ dwarfout_end_function ();
+#endif
+
+#ifdef XCOFF_DEBUGGING_INFO
+ if (write_symbols == XCOFF_DEBUG)
+ xcoffout_end_function (file, last_linenum);
+#endif
+
+#ifdef FUNCTION_EPILOGUE
+ /* Finally, output the function epilogue:
+ code to restore the stack frame and return to the caller. */
+ FUNCTION_EPILOGUE (file, get_frame_size ());
+#endif
+
+#ifdef SDB_DEBUGGING_INFO
+ if (write_symbols == SDB_DEBUG)
+ sdbout_end_epilogue ();
+#endif
+
+#ifdef DWARF_DEBUGGING_INFO
+ if (write_symbols == DWARF_DEBUG)
+ dwarfout_end_epilogue ();
+#endif
+
+#ifdef XCOFF_DEBUGGING_INFO
+ if (write_symbols == XCOFF_DEBUG)
+ xcoffout_end_epilogue (file);
+#endif
+
+ bb_func_label_num = -1; /* not in function, nuke label # */
+
+ /* If FUNCTION_EPILOGUE is not defined, then the function body
+ itself contains return instructions wherever needed. */
+}
+
+/* Add a block to the linked list that remembers the current line/file/function
+ for basic block profiling. Emit the label in front of the basic block and
+ the instructions that increment the count field. */
+
+static void
+add_bb (file)
+ FILE *file;
+{
+ struct bb_list *ptr = (struct bb_list *) permalloc (sizeof (struct bb_list));
+
+ /* Add basic block to linked list. */
+ ptr->next = 0;
+ ptr->line_num = last_linenum;
+ ptr->file_label_num = bb_file_label_num;
+ ptr->func_label_num = bb_func_label_num;
+ *bb_tail = ptr;
+ bb_tail = &ptr->next;
+
+ /* Enable the table of basic-block use counts
+ to point at the code it applies to. */
+ ASM_OUTPUT_INTERNAL_LABEL (file, "LPB", count_basic_blocks);
+
+ /* Before first insn of this basic block, increment the
+ count of times it was entered. */
+#ifdef BLOCK_PROFILER
+ BLOCK_PROFILER (file, count_basic_blocks);
+ CC_STATUS_INIT;
+#endif
+
+ new_block = 0;
+ count_basic_blocks++;
+}
+
+/* Add a string to be used for basic block profiling. */
+
+static int
+add_bb_string (string, perm_p)
+ char *string;
+ int perm_p;
+{
+ int len;
+ struct bb_str *ptr = 0;
+
+ if (!string)
+ {
+ string = "<unknown>";
+ perm_p = TRUE;
+ }
+
+ /* Allocate a new string if the current string isn't permanent. If
+ the string is permanent search for the same string in other
+ allocations. */
+
+ len = strlen (string) + 1;
+ if (!perm_p)
+ {
+ char *p = (char *) permalloc (len);
+ bcopy (string, p, len);
+ string = p;
+ }
+ else
+ for (ptr = sbb_head; ptr != (struct bb_str *)0; ptr = ptr->next)
+ if (ptr->string == string)
+ break;
+
+ /* Allocate a new string block if we need to. */
+ if (!ptr)
+ {
+ ptr = (struct bb_str *) permalloc (sizeof (*ptr));
+ ptr->next = 0;
+ ptr->length = len;
+ ptr->label_num = sbb_label_num++;
+ ptr->string = string;
+ *sbb_tail = ptr;
+ sbb_tail = &ptr->next;
+ }
+
+ return ptr->label_num;
+}
+
+
+/* Output assembler code for some insns: all or part of a function.
+ For description of args, see `final_start_function', above.
+
+ PRESCAN is 1 if we are not really outputting,
+ just scanning as if we were outputting.
+ Prescanning deletes and rearranges insns just like ordinary output.
+ PRESCAN is -2 if we are outputting after having prescanned.
+ In this case, don't try to delete or rearrange insns
+ because that has already been done.
+ Prescanning is done only on certain machines. */
+
+void
+final (first, file, optimize, prescan)
+ rtx first;
+ FILE *file;
+ int optimize;
+ int prescan;
+{
+ register rtx insn;
+ int max_line = 0;
+
+ last_ignored_compare = 0;
+ new_block = 1;
+
+ /* Make a map indicating which line numbers appear in this function.
+ When producing SDB debugging info, delete troublesome line number
+ notes from inlined functions in other files as well as duplicate
+ line number notes. */
+#ifdef SDB_DEBUGGING_INFO
+ if (write_symbols == SDB_DEBUG)
+ {
+ rtx last = 0;
+ for (insn = first; insn; insn = NEXT_INSN (insn))
+ if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
+ {
+ if ((RTX_INTEGRATED_P (insn)
+ && strcmp (NOTE_SOURCE_FILE (insn), main_input_filename) != 0)
+ || (last != 0
+ && NOTE_LINE_NUMBER (insn) == NOTE_LINE_NUMBER (last)
+ && NOTE_SOURCE_FILE (insn) == NOTE_SOURCE_FILE (last)))
+ {
+ NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (insn) = 0;
+ continue;
+ }
+ last = insn;
+ if (NOTE_LINE_NUMBER (insn) > max_line)
+ max_line = NOTE_LINE_NUMBER (insn);
+ }
+ }
+ else
+#endif
+ {
+ for (insn = first; insn; insn = NEXT_INSN (insn))
+ if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > max_line)
+ max_line = NOTE_LINE_NUMBER (insn);
+ }
+
+ line_note_exists = (char *) oballoc (max_line + 1);
+ bzero (line_note_exists, max_line + 1);
+
+ for (insn = first; insn; insn = NEXT_INSN (insn))
+ if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
+ line_note_exists[NOTE_LINE_NUMBER (insn)] = 1;
+
+ init_recog ();
+
+ CC_STATUS_INIT;
+
+ /* Output the insns. */
+ for (insn = NEXT_INSN (first); insn;)
+ insn = final_scan_insn (insn, file, optimize, prescan, 0);
+
+ /* Do basic-block profiling here
+ if the last insn was a conditional branch. */
+ if (profile_block_flag && new_block)
+ add_bb (file);
+}
+
+/* The final scan for one insn, INSN.
+ Args are same as in `final', except that INSN
+ is the insn being scanned.
+ Value returned is the next insn to be scanned.
+
+ NOPEEPHOLES is the flag to disallow peephole processing (currently
+ used for within delayed branch sequence output). */
+
+rtx
+final_scan_insn (insn, file, optimize, prescan, nopeepholes)
+ rtx insn;
+ FILE *file;
+ int optimize;
+ int prescan;
+ int nopeepholes;
+{
+ register int i;
+ insn_counter++;
+
+ /* Ignore deleted insns. These can occur when we split insns (due to a
+ template of "#") while not optimizing. */
+ if (INSN_DELETED_P (insn))
+ return NEXT_INSN (insn);
+
+ switch (GET_CODE (insn))
+ {
+ case NOTE:
+ if (prescan > 0)
+ break;
+
+ /* Align the beginning of a loop, for higher speed
+ on certain machines. */
+
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG && optimize > 0)
+ {
+#ifdef ASM_OUTPUT_LOOP_ALIGN
+ rtx next = next_nonnote_insn (insn);
+ if (next && GET_CODE (next) == CODE_LABEL)
+ {
+ ASM_OUTPUT_LOOP_ALIGN (asm_out_file);
+ }
+#endif
+ break;
+ }
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
+ break;
+
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_PROLOGUE_END)
+ {
+#ifdef FUNCTION_END_PROLOGUE
+ FUNCTION_END_PROLOGUE (file);
+#endif
+ profile_after_prologue (file);
+ break;
+ }
+
+#ifdef FUNCTION_BEGIN_EPILOGUE
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_EPILOGUE_BEG)
+ {
+ FUNCTION_BEGIN_EPILOGUE (file);
+ break;
+ }
+#endif
+
+ if (write_symbols == NO_DEBUG)
+ break;
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_FUNCTION_BEG)
+ {
+#ifdef SDB_DEBUGGING_INFO
+ if (write_symbols == SDB_DEBUG)
+ sdbout_begin_function (last_linenum);
+#endif
+#ifdef XCOFF_DEBUGGING_INFO
+ if (write_symbols == XCOFF_DEBUG)
+ xcoffout_begin_function (file, last_linenum);
+#endif
+#ifdef DWARF_DEBUGGING_INFO
+ if (write_symbols == DWARF_DEBUG)
+ dwarfout_begin_function ();
+#endif
+ break;
+ }
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED)
+ break; /* An insn that was "deleted" */
+ if (app_on)
+ {
+ fprintf (file, ASM_APP_OFF);
+ app_on = 0;
+ }
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_BLOCK_BEG
+ && (debug_info_level == DINFO_LEVEL_NORMAL
+ || debug_info_level == DINFO_LEVEL_VERBOSE
+#ifdef DWARF_DEBUGGING_INFO
+ || write_symbols == DWARF_DEBUG
+#endif
+ )
+ )
+ {
+ /* Beginning of a symbol-block. Assign it a sequence number
+ and push the number onto the stack PENDING_BLOCKS. */
+
+ if (block_depth == max_block_depth)
+ {
+ /* PENDING_BLOCKS is full; make it longer. */
+ max_block_depth *= 2;
+ pending_blocks
+ = (int *) xrealloc (pending_blocks,
+ max_block_depth * sizeof (int));
+ }
+ pending_blocks[block_depth++] = next_block_index;
+
+ /* Output debugging info about the symbol-block beginning. */
+
+#ifdef SDB_DEBUGGING_INFO
+ if (write_symbols == SDB_DEBUG)
+ sdbout_begin_block (file, last_linenum, next_block_index);
+#endif
+#ifdef XCOFF_DEBUGGING_INFO
+ if (write_symbols == XCOFF_DEBUG)
+ xcoffout_begin_block (file, last_linenum, next_block_index);
+#endif
+#ifdef DBX_DEBUGGING_INFO
+ if (write_symbols == DBX_DEBUG)
+ ASM_OUTPUT_INTERNAL_LABEL (file, "LBB", next_block_index);
+#endif
+#ifdef DWARF_DEBUGGING_INFO
+ if (write_symbols == DWARF_DEBUG && block_depth > 1)
+ dwarfout_begin_block (next_block_index);
+#endif
+
+ next_block_index++;
+ }
+ else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_BLOCK_END
+ && (debug_info_level == DINFO_LEVEL_NORMAL
+ || debug_info_level == DINFO_LEVEL_VERBOSE
+#ifdef DWARF_DEBUGGING_INFO
+ || write_symbols == DWARF_DEBUG
+#endif
+ )
+ )
+ {
+ /* End of a symbol-block. Pop its sequence number off
+ PENDING_BLOCKS and output debugging info based on that. */
+
+ --block_depth;
+
+#ifdef XCOFF_DEBUGGING_INFO
+ if (write_symbols == XCOFF_DEBUG && block_depth >= 0)
+ xcoffout_end_block (file, last_linenum, pending_blocks[block_depth]);
+#endif
+#ifdef DBX_DEBUGGING_INFO
+ if (write_symbols == DBX_DEBUG && block_depth >= 0)
+ ASM_OUTPUT_INTERNAL_LABEL (file, "LBE",
+ pending_blocks[block_depth]);
+#endif
+#ifdef SDB_DEBUGGING_INFO
+ if (write_symbols == SDB_DEBUG && block_depth >= 0)
+ sdbout_end_block (file, last_linenum, pending_blocks[block_depth]);
+#endif
+#ifdef DWARF_DEBUGGING_INFO
+ if (write_symbols == DWARF_DEBUG && block_depth >= 1)
+ dwarfout_end_block (pending_blocks[block_depth]);
+#endif
+ }
+ else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED_LABEL
+ && (debug_info_level == DINFO_LEVEL_NORMAL
+ || debug_info_level == DINFO_LEVEL_VERBOSE))
+ {
+#ifdef DWARF_DEBUGGING_INFO
+ if (write_symbols == DWARF_DEBUG)
+ dwarfout_label (insn);
+#endif
+ }
+ else if (NOTE_LINE_NUMBER (insn) > 0)
+ /* This note is a line-number. */
+ {
+ register rtx note;
+
+#if 0 /* This is what we used to do. */
+ output_source_line (file, insn);
+#endif
+ int note_after = 0;
+
+ /* If there is anything real after this note,
+ output it. If another line note follows, omit this one. */
+ for (note = NEXT_INSN (insn); note; note = NEXT_INSN (note))
+ {
+ if (GET_CODE (note) != NOTE && GET_CODE (note) != CODE_LABEL)
+ break;
+ /* These types of notes can be significant
+ so make sure the preceding line number stays. */
+ else if (GET_CODE (note) == NOTE
+ && (NOTE_LINE_NUMBER (note) == NOTE_INSN_BLOCK_BEG
+ || NOTE_LINE_NUMBER (note) == NOTE_INSN_BLOCK_END
+ || NOTE_LINE_NUMBER (note) == NOTE_INSN_FUNCTION_BEG))
+ break;
+ else if (GET_CODE (note) == NOTE && NOTE_LINE_NUMBER (note) > 0)
+ {
+ /* Another line note follows; we can delete this note
+ if no intervening line numbers have notes elsewhere. */
+ int num;
+ for (num = NOTE_LINE_NUMBER (insn) + 1;
+ num < NOTE_LINE_NUMBER (note);
+ num++)
+ if (line_note_exists[num])
+ break;
+
+ if (num >= NOTE_LINE_NUMBER (note))
+ note_after = 1;
+ break;
+ }
+ }
+
+ /* Output this line note
+ if it is the first or the last line note in a row. */
+ if (!note_after)
+ output_source_line (file, insn);
+ }
+ break;
+
+ case BARRIER:
+#ifdef ASM_OUTPUT_ALIGN_CODE
+ /* Don't litter the assembler output with needless alignments. A
+ BARRIER will be placed at the end of every function if HAVE_epilogue
+ is true. */
+ if (NEXT_INSN (insn))
+ ASM_OUTPUT_ALIGN_CODE (file);
+#endif
+ break;
+
+ case CODE_LABEL:
+ CC_STATUS_INIT;
+ if (prescan > 0)
+ break;
+ new_block = 1;
+#ifdef SDB_DEBUGGING_INFO
+ if (write_symbols == SDB_DEBUG && LABEL_NAME (insn))
+ sdbout_label (insn);
+#endif
+#ifdef DWARF_DEBUGGING_INFO
+ if (write_symbols == DWARF_DEBUG && LABEL_NAME (insn))
+ dwarfout_label (insn);
+#endif
+ if (app_on)
+ {
+ fprintf (file, ASM_APP_OFF);
+ app_on = 0;
+ }
+ if (NEXT_INSN (insn) != 0
+ && GET_CODE (NEXT_INSN (insn)) == JUMP_INSN)
+ {
+ rtx nextbody = PATTERN (NEXT_INSN (insn));
+
+ /* If this label is followed by a jump-table,
+ make sure we put the label in the read-only section. Also
+ possibly write the label and jump table together. */
+
+ if (GET_CODE (nextbody) == ADDR_VEC
+ || GET_CODE (nextbody) == ADDR_DIFF_VEC)
+ {
+#ifndef JUMP_TABLES_IN_TEXT_SECTION
+ readonly_data_section ();
+#ifdef READONLY_DATA_SECTION
+ ASM_OUTPUT_ALIGN (file,
+ exact_log2 (BIGGEST_ALIGNMENT
+ / BITS_PER_UNIT));
+#endif /* READONLY_DATA_SECTION */
+#else /* JUMP_TABLES_IN_TEXT_SECTION */
+ text_section ();
+#endif /* JUMP_TABLES_IN_TEXT_SECTION */
+#ifdef ASM_OUTPUT_CASE_LABEL
+ ASM_OUTPUT_CASE_LABEL (file, "L", CODE_LABEL_NUMBER (insn),
+ NEXT_INSN (insn));
+#else
+ ASM_OUTPUT_INTERNAL_LABEL (file, "L", CODE_LABEL_NUMBER (insn));
+#endif
+ break;
+ }
+ }
+
+ ASM_OUTPUT_INTERNAL_LABEL (file, "L", CODE_LABEL_NUMBER (insn));
+ break;
+
+ default:
+ {
+ register rtx body = PATTERN (insn);
+ int insn_code_number;
+ char *template;
+ rtx note;
+
+ /* An INSN, JUMP_INSN or CALL_INSN.
+ First check for special kinds that recog doesn't recognize. */
+
+ if (GET_CODE (body) == USE /* These are just declarations */
+ || GET_CODE (body) == CLOBBER)
+ break;
+
+#ifdef HAVE_cc0
+ /* If there is a REG_CC_SETTER note on this insn, it means that
+ the setting of the condition code was done in the delay slot
+ of the insn that branched here. So recover the cc status
+ from the insn that set it. */
+
+ note = find_reg_note (insn, REG_CC_SETTER, NULL_RTX);
+ if (note)
+ {
+ NOTICE_UPDATE_CC (PATTERN (XEXP (note, 0)), XEXP (note, 0));
+ cc_prev_status = cc_status;
+ }
+#endif
+
+ /* Detect insns that are really jump-tables
+ and output them as such. */
+
+ if (GET_CODE (body) == ADDR_VEC || GET_CODE (body) == ADDR_DIFF_VEC)
+ {
+ register int vlen, idx;
+
+ if (prescan > 0)
+ break;
+
+ if (app_on)
+ {
+ fprintf (file, ASM_APP_OFF);
+ app_on = 0;
+ }
+
+ vlen = XVECLEN (body, GET_CODE (body) == ADDR_DIFF_VEC);
+ for (idx = 0; idx < vlen; idx++)
+ {
+ if (GET_CODE (body) == ADDR_VEC)
+ {
+#ifdef ASM_OUTPUT_ADDR_VEC_ELT
+ ASM_OUTPUT_ADDR_VEC_ELT
+ (file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
+#else
+ abort ();
+#endif
+ }
+ else
+ {
+#ifdef ASM_OUTPUT_ADDR_DIFF_ELT
+ ASM_OUTPUT_ADDR_DIFF_ELT
+ (file,
+ CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 1, idx), 0)),
+ CODE_LABEL_NUMBER (XEXP (XEXP (body, 0), 0)));
+#else
+ abort ();
+#endif
+ }
+ }
+#ifdef ASM_OUTPUT_CASE_END
+ ASM_OUTPUT_CASE_END (file,
+ CODE_LABEL_NUMBER (PREV_INSN (insn)),
+ insn);
+#endif
+
+ text_section ();
+
+ break;
+ }
+
+ /* Do basic-block profiling when we reach a new block.
+ Done here to avoid jump tables. */
+ if (profile_block_flag && new_block)
+ add_bb (file);
+
+ if (GET_CODE (body) == ASM_INPUT)
+ {
+ /* There's no telling what that did to the condition codes. */
+ CC_STATUS_INIT;
+ if (prescan > 0)
+ break;
+ if (! app_on)
+ {
+ fprintf (file, ASM_APP_ON);
+ app_on = 1;
+ }
+ fprintf (asm_out_file, "\t%s\n", XSTR (body, 0));
+ break;
+ }
+
+ /* Detect `asm' construct with operands. */
+ if (asm_noperands (body) >= 0)
+ {
+ int noperands = asm_noperands (body);
+ rtx *ops = (rtx *) alloca (noperands * sizeof (rtx));
+ char *string;
+
+ /* There's no telling what that did to the condition codes. */
+ CC_STATUS_INIT;
+ if (prescan > 0)
+ break;
+
+ if (! app_on)
+ {
+ fprintf (file, ASM_APP_ON);
+ app_on = 1;
+ }
+
+ /* Get out the operand values. */
+ string = decode_asm_operands (body, ops, NULL_PTR,
+ NULL_PTR, NULL_PTR);
+ /* Inhibit aborts on what would otherwise be compiler bugs. */
+ insn_noperands = noperands;
+ this_is_asm_operands = insn;
+
+ /* Output the insn using them. */
+ output_asm_insn (string, ops);
+ this_is_asm_operands = 0;
+ break;
+ }
+
+ if (prescan <= 0 && app_on)
+ {
+ fprintf (file, ASM_APP_OFF);
+ app_on = 0;
+ }
+
+ if (GET_CODE (body) == SEQUENCE)
+ {
+ /* A delayed-branch sequence */
+ register int i;
+ rtx next;
+
+ if (prescan > 0)
+ break;
+ final_sequence = body;
+
+ /* The first insn in this SEQUENCE might be a JUMP_INSN that will
+ force the restoration of a comparison that was previously
+ thought unnecessary. If that happens, cancel this sequence
+ and cause that insn to be restored. */
+
+ next = final_scan_insn (XVECEXP (body, 0, 0), file, 0, prescan, 1);
+ if (next != XVECEXP (body, 0, 1))
+ {
+ final_sequence = 0;
+ return next;
+ }
+
+ for (i = 1; i < XVECLEN (body, 0); i++)
+ final_scan_insn (XVECEXP (body, 0, i), file, 0, prescan, 1);
+#ifdef DBR_OUTPUT_SEQEND
+ DBR_OUTPUT_SEQEND (file);
+#endif
+ final_sequence = 0;
+
+ /* If the insn requiring the delay slot was a CALL_INSN, the
+ insns in the delay slot are actually executed before the
+ called function. Hence we don't preserve any CC-setting
+ actions in these insns and the CC must be marked as being
+ clobbered by the function. */
+ if (GET_CODE (XVECEXP (body, 0, 0)) == CALL_INSN)
+ CC_STATUS_INIT;
+
+ /* Following a conditional branch sequence, we have a new basic
+ block. */
+ if (profile_block_flag)
+ {
+ rtx insn = XVECEXP (body, 0, 0);
+ rtx body = PATTERN (insn);
+
+ if ((GET_CODE (insn) == JUMP_INSN && GET_CODE (body) == SET
+ && GET_CODE (SET_SRC (body)) != LABEL_REF)
+ || (GET_CODE (insn) == JUMP_INSN
+ && GET_CODE (body) == PARALLEL
+ && GET_CODE (XVECEXP (body, 0, 0)) == SET
+ && GET_CODE (SET_SRC (XVECEXP (body, 0, 0))) != LABEL_REF))
+ new_block = 1;
+ }
+ break;
+ }
+
+ /* We have a real machine instruction as rtl. */
+
+ body = PATTERN (insn);
+
+#ifdef HAVE_cc0
+ /* Check for redundant test and compare instructions
+ (when the condition codes are already set up as desired).
+ This is done only when optimizing; if not optimizing,
+ it should be possible for the user to alter a variable
+ with the debugger in between statements
+ and the next statement should reexamine the variable
+ to compute the condition codes. */
+
+ if (optimize
+ && GET_CODE (body) == SET
+ && GET_CODE (SET_DEST (body)) == CC0
+ && insn != last_ignored_compare)
+ {
+ if (GET_CODE (SET_SRC (body)) == SUBREG)
+ SET_SRC (body) = alter_subreg (SET_SRC (body));
+ else if (GET_CODE (SET_SRC (body)) == COMPARE)
+ {
+ if (GET_CODE (XEXP (SET_SRC (body), 0)) == SUBREG)
+ XEXP (SET_SRC (body), 0)
+ = alter_subreg (XEXP (SET_SRC (body), 0));
+ if (GET_CODE (XEXP (SET_SRC (body), 1)) == SUBREG)
+ XEXP (SET_SRC (body), 1)
+ = alter_subreg (XEXP (SET_SRC (body), 1));
+ }
+ if ((cc_status.value1 != 0
+ && rtx_equal_p (SET_SRC (body), cc_status.value1))
+ || (cc_status.value2 != 0
+ && rtx_equal_p (SET_SRC (body), cc_status.value2)))
+ {
+ /* Don't delete insn if it has an addressing side-effect. */
+ if (! FIND_REG_INC_NOTE (insn, 0)
+ /* or if anything in it is volatile. */
+ && ! volatile_refs_p (PATTERN (insn)))
+ {
+ /* We don't really delete the insn; just ignore it. */
+ last_ignored_compare = insn;
+ break;
+ }
+ }
+ }
+#endif
+
+ /* Following a conditional branch, we have a new basic block.
+ But if we are inside a sequence, the new block starts after the
+ last insn of the sequence. */
+ if (profile_block_flag && final_sequence == 0
+ && ((GET_CODE (insn) == JUMP_INSN && GET_CODE (body) == SET
+ && GET_CODE (SET_SRC (body)) != LABEL_REF)
+ || (GET_CODE (insn) == JUMP_INSN && GET_CODE (body) == PARALLEL
+ && GET_CODE (XVECEXP (body, 0, 0)) == SET
+ && GET_CODE (SET_SRC (XVECEXP (body, 0, 0))) != LABEL_REF)))
+ new_block = 1;
+
+#ifndef STACK_REGS
+ /* Don't bother outputting obvious no-ops, even without -O.
+ This optimization is fast and doesn't interfere with debugging.
+ Don't do this if the insn is in a delay slot, since this
+ will cause an improper number of delay insns to be written. */
+ if (final_sequence == 0
+ && prescan >= 0
+ && GET_CODE (insn) == INSN && GET_CODE (body) == SET
+ && GET_CODE (SET_SRC (body)) == REG
+ && GET_CODE (SET_DEST (body)) == REG
+ && REGNO (SET_SRC (body)) == REGNO (SET_DEST (body)))
+ break;
+#endif
+
+#ifdef HAVE_cc0
+ /* If this is a conditional branch, maybe modify it
+ if the cc's are in a nonstandard state
+ so that it accomplishes the same thing that it would
+ do straightforwardly if the cc's were set up normally. */
+
+ if (cc_status.flags != 0
+ && GET_CODE (insn) == JUMP_INSN
+ && GET_CODE (body) == SET
+ && SET_DEST (body) == pc_rtx
+ && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE
+ /* This is done during prescan; it is not done again
+ in final scan when prescan has been done. */
+ && prescan >= 0)
+ {
+ /* This function may alter the contents of its argument
+ and clear some of the cc_status.flags bits.
+ It may also return 1 meaning condition now always true
+ or -1 meaning condition now always false
+ or 2 meaning condition nontrivial but altered. */
+ register int result = alter_cond (XEXP (SET_SRC (body), 0));
+ /* If condition now has fixed value, replace the IF_THEN_ELSE
+ with its then-operand or its else-operand. */
+ if (result == 1)
+ SET_SRC (body) = XEXP (SET_SRC (body), 1);
+ if (result == -1)
+ SET_SRC (body) = XEXP (SET_SRC (body), 2);
+
+ /* The jump is now either unconditional or a no-op.
+ If it has become a no-op, don't try to output it.
+ (It would not be recognized.) */
+ if (SET_SRC (body) == pc_rtx)
+ {
+ PUT_CODE (insn, NOTE);
+ NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (insn) = 0;
+ break;
+ }
+ else if (GET_CODE (SET_SRC (body)) == RETURN)
+ /* Replace (set (pc) (return)) with (return). */
+ PATTERN (insn) = body = SET_SRC (body);
+
+ /* Rerecognize the instruction if it has changed. */
+ if (result != 0)
+ INSN_CODE (insn) = -1;
+ }
+
+ /* Make same adjustments to instructions that examine the
+ condition codes without jumping (if this machine has them). */
+
+ if (cc_status.flags != 0
+ && GET_CODE (body) == SET)
+ {
+ switch (GET_CODE (SET_SRC (body)))
+ {
+ case GTU:
+ case GT:
+ case LTU:
+ case LT:
+ case GEU:
+ case GE:
+ case LEU:
+ case LE:
+ case EQ:
+ case NE:
+ {
+ register int result;
+ if (XEXP (SET_SRC (body), 0) != cc0_rtx)
+ break;
+ result = alter_cond (SET_SRC (body));
+ if (result == 1)
+ validate_change (insn, &SET_SRC (body), const_true_rtx, 0);
+ else if (result == -1)
+ validate_change (insn, &SET_SRC (body), const0_rtx, 0);
+ else if (result == 2)
+ INSN_CODE (insn) = -1;
+ }
+ }
+ }
+#endif
+
+ /* Do machine-specific peephole optimizations if desired. */
+
+ if (optimize && !flag_no_peephole && !nopeepholes)
+ {
+ rtx next = peephole (insn);
+ /* When peepholing, if there were notes within the peephole,
+ emit them before the peephole. */
+ if (next != 0 && next != NEXT_INSN (insn))
+ {
+ rtx prev = PREV_INSN (insn);
+ rtx note;
+
+ for (note = NEXT_INSN (insn); note != next;
+ note = NEXT_INSN (note))
+ final_scan_insn (note, file, optimize, prescan, nopeepholes);
+
+ /* In case this is prescan, put the notes
+ in proper position for later rescan. */
+ note = NEXT_INSN (insn);
+ PREV_INSN (note) = prev;
+ NEXT_INSN (prev) = note;
+ NEXT_INSN (PREV_INSN (next)) = insn;
+ PREV_INSN (insn) = PREV_INSN (next);
+ NEXT_INSN (insn) = next;
+ PREV_INSN (next) = insn;
+ }
+
+ /* PEEPHOLE might have changed this. */
+ body = PATTERN (insn);
+ }
+
+ /* Try to recognize the instruction.
+ If successful, verify that the operands satisfy the
+ constraints for the instruction. Crash if they don't,
+ since `reload' should have changed them so that they do. */
+
+ insn_code_number = recog_memoized (insn);
+ insn_extract (insn);
+ for (i = 0; i < insn_n_operands[insn_code_number]; i++)
+ {
+ if (GET_CODE (recog_operand[i]) == SUBREG)
+ recog_operand[i] = alter_subreg (recog_operand[i]);
+ else if (GET_CODE (recog_operand[i]) == PLUS
+ || GET_CODE (recog_operand[i]) == MULT)
+ recog_operand[i] = walk_alter_subreg (recog_operand[i]);
+ }
+
+ for (i = 0; i < insn_n_dups[insn_code_number]; i++)
+ {
+ if (GET_CODE (*recog_dup_loc[i]) == SUBREG)
+ *recog_dup_loc[i] = alter_subreg (*recog_dup_loc[i]);
+ else if (GET_CODE (*recog_dup_loc[i]) == PLUS
+ || GET_CODE (*recog_dup_loc[i]) == MULT)
+ *recog_dup_loc[i] = walk_alter_subreg (*recog_dup_loc[i]);
+ }
+
+#ifdef REGISTER_CONSTRAINTS
+ if (! constrain_operands (insn_code_number, 1))
+ fatal_insn_not_found (insn);
+#endif
+
+ /* Some target machines need to prescan each insn before
+ it is output. */
+
+#ifdef FINAL_PRESCAN_INSN
+ FINAL_PRESCAN_INSN (insn, recog_operand,
+ insn_n_operands[insn_code_number]);
+#endif
+
+#ifdef HAVE_cc0
+ cc_prev_status = cc_status;
+
+ /* Update `cc_status' for this instruction.
+ The instruction's output routine may change it further.
+ If the output routine for a jump insn needs to depend
+ on the cc status, it should look at cc_prev_status. */
+
+ NOTICE_UPDATE_CC (body, insn);
+#endif
+
+ debug_insn = insn;
+
+ /* If the proper template needs to be chosen by some C code,
+ run that code and get the real template. */
+
+ template = insn_template[insn_code_number];
+ if (template == 0)
+ {
+ template = (*insn_outfun[insn_code_number]) (recog_operand, insn);
+
+ /* If the C code returns 0, it means that it is a jump insn
+ which follows a deleted test insn, and that test insn
+ needs to be reinserted. */
+ if (template == 0)
+ {
+ if (prev_nonnote_insn (insn) != last_ignored_compare)
+ abort ();
+ new_block = 0;
+ return prev_nonnote_insn (insn);
+ }
+ }
+
+ /* If the template is the string "#", it means that this insn must
+ be split. */
+ if (template[0] == '#' && template[1] == '\0')
+ {
+ rtx new = try_split (body, insn, 0);
+
+ /* If we didn't split the insn, go away. */
+ if (new == insn && PATTERN (new) == body)
+ abort ();
+
+ new_block = 0;
+ return new;
+ }
+
+ if (prescan > 0)
+ break;
+
+ /* Output assembler code from the template. */
+
+ output_asm_insn (template, recog_operand);
+
+#if 0
+ /* It's not at all clear why we did this and doing so interferes
+ with tests we'd like to do to use REG_WAS_0 notes, so let's try
+ with this out. */
+
+ /* Mark this insn as having been output. */
+ INSN_DELETED_P (insn) = 1;
+#endif
+
+ debug_insn = 0;
+ }
+ }
+ return NEXT_INSN (insn);
+}
+
+/* Output debugging info to the assembler file FILE
+ based on the NOTE-insn INSN, assumed to be a line number. */
+
+static void
+output_source_line (file, insn)
+ FILE *file;
+ rtx insn;
+{
+ register char *filename = NOTE_SOURCE_FILE (insn);
+
+ /* Remember filename for basic block profiling.
+ Filenames are allocated on the permanent obstack
+ or are passed in ARGV, so we don't have to save
+ the string. */
+
+ if (profile_block_flag && last_filename != filename)
+ bb_file_label_num = add_bb_string (filename, TRUE);
+
+ last_filename = filename;
+ last_linenum = NOTE_LINE_NUMBER (insn);
+
+ if (write_symbols != NO_DEBUG)
+ {
+#ifdef SDB_DEBUGGING_INFO
+ if (write_symbols == SDB_DEBUG
+#if 0 /* People like having line numbers even in wrong file! */
+ /* COFF can't handle multiple source files--lose, lose. */
+ && !strcmp (filename, main_input_filename)
+#endif
+ /* COFF relative line numbers must be positive. */
+ && last_linenum > sdb_begin_function_line)
+ {
+#ifdef ASM_OUTPUT_SOURCE_LINE
+ ASM_OUTPUT_SOURCE_LINE (file, last_linenum);
+#else
+ fprintf (file, "\t.ln\t%d\n",
+ ((sdb_begin_function_line > -1)
+ ? last_linenum - sdb_begin_function_line : 1));
+#endif
+ }
+#endif
+
+#if defined (DBX_DEBUGGING_INFO)
+ if (write_symbols == DBX_DEBUG)
+ dbxout_source_line (file, filename, NOTE_LINE_NUMBER (insn));
+#endif
+
+#if defined (XCOFF_DEBUGGING_INFO)
+ if (write_symbols == XCOFF_DEBUG)
+ xcoffout_source_line (file, filename, insn);
+#endif
+
+#ifdef DWARF_DEBUGGING_INFO
+ if (write_symbols == DWARF_DEBUG)
+ dwarfout_line (filename, NOTE_LINE_NUMBER (insn));
+#endif
+ }
+}
+
+/* If X is a SUBREG, replace it with a REG or a MEM,
+ based on the thing it is a subreg of. */
+
+rtx
+alter_subreg (x)
+ register rtx x;
+{
+ register rtx y = SUBREG_REG (x);
+ if (GET_CODE (y) == SUBREG)
+ y = alter_subreg (y);
+
+ if (GET_CODE (y) == REG)
+ {
+ /* If the containing reg really gets a hard reg, so do we. */
+ PUT_CODE (x, REG);
+ REGNO (x) = REGNO (y) + SUBREG_WORD (x);
+ }
+ else if (GET_CODE (y) == MEM)
+ {
+ register int offset = SUBREG_WORD (x) * UNITS_PER_WORD;
+#if BYTES_BIG_ENDIAN
+ offset -= (MIN (UNITS_PER_WORD, GET_MODE_SIZE (GET_MODE (x)))
+ - MIN (UNITS_PER_WORD, GET_MODE_SIZE (GET_MODE (y))));
+#endif
+ PUT_CODE (x, MEM);
+ MEM_VOLATILE_P (x) = MEM_VOLATILE_P (y);
+ XEXP (x, 0) = plus_constant (XEXP (y, 0), offset);
+ }
+
+ return x;
+}
+
+/* Do alter_subreg on all the SUBREGs contained in X. */
+
+static rtx
+walk_alter_subreg (x)
+ rtx x;
+{
+ switch (GET_CODE (x))
+ {
+ case PLUS:
+ case MULT:
+ XEXP (x, 0) = walk_alter_subreg (XEXP (x, 0));
+ XEXP (x, 1) = walk_alter_subreg (XEXP (x, 1));
+ break;
+
+ case MEM:
+ XEXP (x, 0) = walk_alter_subreg (XEXP (x, 0));
+ break;
+
+ case SUBREG:
+ return alter_subreg (x);
+ }
+
+ return x;
+}
+
+#ifdef HAVE_cc0
+
+/* Given BODY, the body of a jump instruction, alter the jump condition
+ as required by the bits that are set in cc_status.flags.
+ Not all of the bits there can be handled at this level in all cases.
+
+ The value is normally 0.
+ 1 means that the condition has become always true.
+ -1 means that the condition has become always false.
+ 2 means that COND has been altered. */
+
+static int
+alter_cond (cond)
+ register rtx cond;
+{
+ int value = 0;
+
+ if (cc_status.flags & CC_REVERSED)
+ {
+ value = 2;
+ PUT_CODE (cond, swap_condition (GET_CODE (cond)));
+ }
+
+ if (cc_status.flags & CC_INVERTED)
+ {
+ value = 2;
+ PUT_CODE (cond, reverse_condition (GET_CODE (cond)));
+ }
+
+ if (cc_status.flags & CC_NOT_POSITIVE)
+ switch (GET_CODE (cond))
+ {
+ case LE:
+ case LEU:
+ case GEU:
+ /* Jump becomes unconditional. */
+ return 1;
+
+ case GT:
+ case GTU:
+ case LTU:
+ /* Jump becomes no-op. */
+ return -1;
+
+ case GE:
+ PUT_CODE (cond, EQ);
+ value = 2;
+ break;
+
+ case LT:
+ PUT_CODE (cond, NE);
+ value = 2;
+ break;
+ }
+
+ if (cc_status.flags & CC_NOT_NEGATIVE)
+ switch (GET_CODE (cond))
+ {
+ case GE:
+ case GEU:
+ /* Jump becomes unconditional. */
+ return 1;
+
+ case LT:
+ case LTU:
+ /* Jump becomes no-op. */
+ return -1;
+
+ case LE:
+ case LEU:
+ PUT_CODE (cond, EQ);
+ value = 2;
+ break;
+
+ case GT:
+ case GTU:
+ PUT_CODE (cond, NE);
+ value = 2;
+ break;
+ }
+
+ if (cc_status.flags & CC_NO_OVERFLOW)
+ switch (GET_CODE (cond))
+ {
+ case GEU:
+ /* Jump becomes unconditional. */
+ return 1;
+
+ case LEU:
+ PUT_CODE (cond, EQ);
+ value = 2;
+ break;
+
+ case GTU:
+ PUT_CODE (cond, NE);
+ value = 2;
+ break;
+
+ case LTU:
+ /* Jump becomes no-op. */
+ return -1;
+ }
+
+ if (cc_status.flags & (CC_Z_IN_NOT_N | CC_Z_IN_N))
+ switch (GET_CODE (cond))
+ {
+ case LE:
+ case LEU:
+ case GE:
+ case GEU:
+ case LT:
+ case LTU:
+ case GT:
+ case GTU:
+ abort ();
+
+ case NE:
+ PUT_CODE (cond, cc_status.flags & CC_Z_IN_N ? GE : LT);
+ value = 2;
+ break;
+
+ case EQ:
+ PUT_CODE (cond, cc_status.flags & CC_Z_IN_N ? LT : GE);
+ value = 2;
+ break;
+ }
+
+ if (cc_status.flags & CC_NOT_SIGNED)
+ /* The flags are valid if signed condition operators are converted
+ to unsigned. */
+ switch (GET_CODE (cond))
+ {
+ case LE:
+ PUT_CODE (cond, LEU);
+ value = 2;
+ break;
+
+ case LT:
+ PUT_CODE (cond, LTU);
+ value = 2;
+ break;
+
+ case GT:
+ PUT_CODE (cond, GTU);
+ value = 2;
+ break;
+
+ case GE:
+ PUT_CODE (cond, GEU);
+ value = 2;
+ break;
+ }
+
+ return value;
+}
+#endif
+
+/* Report inconsistency between the assembler template and the operands.
+ In an `asm', it's the user's fault; otherwise, the compiler's fault. */
+
+void
+output_operand_lossage (str)
+ char *str;
+{
+ if (this_is_asm_operands)
+ error_for_asm (this_is_asm_operands, "invalid `asm': %s", str);
+ else
+ abort ();
+}
+
+/* Output of assembler code from a template, and its subroutines. */
+
+/* Output text from TEMPLATE to the assembler output file,
+ obeying %-directions to substitute operands taken from
+ the vector OPERANDS.
+
+ %N (for N a digit) means print operand N in usual manner.
+ %lN means require operand N to be a CODE_LABEL or LABEL_REF
+ and print the label name with no punctuation.
+ %cN means require operand N to be a constant
+ and print the constant expression with no punctuation.
+ %aN means expect operand N to be a memory address
+ (not a memory reference!) and print a reference
+ to that address.
+ %nN means expect operand N to be a constant
+ and print a constant expression for minus the value
+ of the operand, with no other punctuation. */
+
+void
+output_asm_insn (template, operands)
+ char *template;
+ rtx *operands;
+{
+ register char *p;
+ register int c, i;
+
+ /* An insn may return a null string template
+ in a case where no assembler code is needed. */
+ if (*template == 0)
+ return;
+
+ p = template;
+ putc ('\t', asm_out_file);
+
+#ifdef ASM_OUTPUT_OPCODE
+ ASM_OUTPUT_OPCODE (asm_out_file, p);
+#endif
+
+ while (c = *p++)
+ switch (c)
+ {
+#ifdef ASM_OUTPUT_OPCODE
+ case '\n':
+ putc (c, asm_out_file);
+ while ((c = *p) == '\t')
+ {
+ putc (c, asm_out_file);
+ p++;
+ }
+ ASM_OUTPUT_OPCODE (asm_out_file, p);
+ break;
+#endif
+
+#ifdef ASSEMBLER_DIALECT
+ case '{':
+ /* If we want the first dialect, do nothing. Otherwise, skip
+ DIALECT_NUMBER of strings ending with '|'. */
+ for (i = 0; i < dialect_number; i++)
+ {
+ while (*p && *p++ != '|')
+ ;
+
+ if (*p == '|')
+ p++;
+ }
+ break;
+
+ case '|':
+ /* Skip to close brace. */
+ while (*p && *p++ != '}')
+ ;
+ break;
+
+ case '}':
+ break;
+#endif
+
+ case '%':
+ /* %% outputs a single %. */
+ if (*p == '%')
+ {
+ p++;
+ putc (c, asm_out_file);
+ }
+ /* %= outputs a number which is unique to each insn in the entire
+ compilation. This is useful for making local labels that are
+ referred to more than once in a given insn. */
+ else if (*p == '=')
+ {
+ p++;
+ fprintf (asm_out_file, "%d", insn_counter);
+ }
+ /* % followed by a letter and some digits
+ outputs an operand in a special way depending on the letter.
+ Letters `acln' are implemented directly.
+ Other letters are passed to `output_operand' so that
+ the PRINT_OPERAND macro can define them. */
+ else if ((*p >= 'a' && *p <= 'z')
+ || (*p >= 'A' && *p <= 'Z'))
+ {
+ int letter = *p++;
+ c = atoi (p);
+
+ if (! (*p >= '0' && *p <= '9'))
+ output_operand_lossage ("operand number missing after %-letter");
+ else if (this_is_asm_operands && c >= (unsigned) insn_noperands)
+ output_operand_lossage ("operand number out of range");
+ else if (letter == 'l')
+ output_asm_label (operands[c]);
+ else if (letter == 'a')
+ output_address (operands[c]);
+ else if (letter == 'c')
+ {
+ if (CONSTANT_ADDRESS_P (operands[c]))
+ output_addr_const (asm_out_file, operands[c]);
+ else
+ output_operand (operands[c], 'c');
+ }
+ else if (letter == 'n')
+ {
+ if (GET_CODE (operands[c]) == CONST_INT)
+ fprintf (asm_out_file,
+#if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_INT
+ "%d",
+#else
+ "%ld",
+#endif
+ - INTVAL (operands[c]));
+ else
+ {
+ putc ('-', asm_out_file);
+ output_addr_const (asm_out_file, operands[c]);
+ }
+ }
+ else
+ output_operand (operands[c], letter);
+
+ while ((c = *p) >= '0' && c <= '9') p++;
+ }
+ /* % followed by a digit outputs an operand the default way. */
+ else if (*p >= '0' && *p <= '9')
+ {
+ c = atoi (p);
+ if (this_is_asm_operands && c >= (unsigned) insn_noperands)
+ output_operand_lossage ("operand number out of range");
+ else
+ output_operand (operands[c], 0);
+ while ((c = *p) >= '0' && c <= '9') p++;
+ }
+ /* % followed by punctuation: output something for that
+ punctuation character alone, with no operand.
+ The PRINT_OPERAND macro decides what is actually done. */
+#ifdef PRINT_OPERAND_PUNCT_VALID_P
+ else if (PRINT_OPERAND_PUNCT_VALID_P (*p))
+ output_operand (NULL_RTX, *p++);
+#endif
+ else
+ output_operand_lossage ("invalid %%-code");
+ break;
+
+ default:
+ putc (c, asm_out_file);
+ }
+
+ if (flag_print_asm_name)
+ {
+ /* Annotate the assembly with a comment describing the pattern and
+ alternative used. */
+ if (debug_insn)
+ {
+ register int num = INSN_CODE (debug_insn);
+ fprintf (asm_out_file, " %s %d %s",
+ ASM_COMMENT_START, INSN_UID (debug_insn), insn_name[num]);
+ if (insn_n_alternatives[num] > 1)
+ fprintf (asm_out_file, "/%d", which_alternative + 1);
+
+ /* Clear this so only the first assembler insn
+ of any rtl insn will get the special comment for -dp. */
+ debug_insn = 0;
+ }
+ }
+
+ putc ('\n', asm_out_file);
+}
+
+/* Output a LABEL_REF, or a bare CODE_LABEL, as an assembler symbol. */
+
+void
+output_asm_label (x)
+ rtx x;
+{
+ char buf[256];
+
+ if (GET_CODE (x) == LABEL_REF)
+ ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (XEXP (x, 0)));
+ else if (GET_CODE (x) == CODE_LABEL)
+ ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (x));
+ else
+ output_operand_lossage ("`%l' operand isn't a label");
+
+ assemble_name (asm_out_file, buf);
+}
+
+/* Print operand X using machine-dependent assembler syntax.
+ The macro PRINT_OPERAND is defined just to control this function.
+ CODE is a non-digit that preceded the operand-number in the % spec,
+ such as 'z' if the spec was `%z3'. CODE is 0 if there was no char
+ between the % and the digits.
+ When CODE is a non-letter, X is 0.
+
+ The meanings of the letters are machine-dependent and controlled
+ by PRINT_OPERAND. */
+
+static void
+output_operand (x, code)
+ rtx x;
+ int code;
+{
+ if (x && GET_CODE (x) == SUBREG)
+ x = alter_subreg (x);
+
+ /* If X is a pseudo-register, abort now rather than writing trash to the
+ assembler file. */
+
+ if (x && GET_CODE (x) == REG && REGNO (x) >= FIRST_PSEUDO_REGISTER)
+ abort ();
+
+ PRINT_OPERAND (asm_out_file, x, code);
+}
+
+/* Print a memory reference operand for address X
+ using machine-dependent assembler syntax.
+ The macro PRINT_OPERAND_ADDRESS exists just to control this function. */
+
+void
+output_address (x)
+ rtx x;
+{
+ walk_alter_subreg (x);
+ PRINT_OPERAND_ADDRESS (asm_out_file, x);
+}
+
+/* Print an integer constant expression in assembler syntax.
+ Addition and subtraction are the only arithmetic
+ that may appear in these expressions. */
+
+void
+output_addr_const (file, x)
+ FILE *file;
+ rtx x;
+{
+ char buf[256];
+
+ restart:
+ switch (GET_CODE (x))
+ {
+ case PC:
+ if (flag_pic)
+ putc ('.', file);
+ else
+ abort ();
+ break;
+
+ case SYMBOL_REF:
+ assemble_name (file, XSTR (x, 0));
+ break;
+
+ case LABEL_REF:
+ ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (XEXP (x, 0)));
+ assemble_name (file, buf);
+ break;
+
+ case CODE_LABEL:
+ ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (x));
+ assemble_name (file, buf);
+ break;
+
+ case CONST_INT:
+ fprintf (file,
+#if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_INT
+ "%d",
+#else
+ "%ld",
+#endif
+ INTVAL (x));
+ break;
+
+ case CONST:
+ /* This used to output parentheses around the expression,
+ but that does not work on the 386 (either ATT or BSD assembler). */
+ output_addr_const (file, XEXP (x, 0));
+ break;
+
+ case CONST_DOUBLE:
+ if (GET_MODE (x) == VOIDmode)
+ {
+ /* We can use %d if the number is one word and positive. */
+ if (CONST_DOUBLE_HIGH (x))
+ fprintf (file,
+#if HOST_BITS_PER_WIDE_INT == 64
+#if HOST_BITS_PER_WIDE_INT != HOST_BITS_PER_INT
+ "0x%lx%016lx",
+#else
+ "0x%x%016x",
+#endif
+#else
+#if HOST_BITS_PER_WIDE_INT != HOST_BITS_PER_INT
+ "0x%lx%08lx",
+#else
+ "0x%x%08x",
+#endif
+#endif
+ CONST_DOUBLE_HIGH (x), CONST_DOUBLE_LOW (x));
+ else if (CONST_DOUBLE_LOW (x) < 0)
+ fprintf (file,
+#if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_INT
+ "0x%x",
+#else
+ "0x%lx",
+#endif
+ CONST_DOUBLE_LOW (x));
+ else
+ fprintf (file,
+#if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_INT
+ "%d",
+#else
+ "%ld",
+#endif
+ CONST_DOUBLE_LOW (x));
+ }
+ else
+ /* We can't handle floating point constants;
+ PRINT_OPERAND must handle them. */
+ output_operand_lossage ("floating constant misused");
+ break;
+
+ case PLUS:
+ /* Some assemblers need integer constants to appear last (eg masm). */
+ if (GET_CODE (XEXP (x, 0)) == CONST_INT)
+ {
+ output_addr_const (file, XEXP (x, 1));
+ if (INTVAL (XEXP (x, 0)) >= 0)
+ fprintf (file, "+");
+ output_addr_const (file, XEXP (x, 0));
+ }
+ else
+ {
+ output_addr_const (file, XEXP (x, 0));
+ if (INTVAL (XEXP (x, 1)) >= 0)
+ fprintf (file, "+");
+ output_addr_const (file, XEXP (x, 1));
+ }
+ break;
+
+ case MINUS:
+ /* Avoid outputting things like x-x or x+5-x,
+ since some assemblers can't handle that. */
+ x = simplify_subtraction (x);
+ if (GET_CODE (x) != MINUS)
+ goto restart;
+
+ output_addr_const (file, XEXP (x, 0));
+ fprintf (file, "-");
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT
+ && INTVAL (XEXP (x, 1)) < 0)
+ {
+ fprintf (file, ASM_OPEN_PAREN);
+ output_addr_const (file, XEXP (x, 1));
+ fprintf (file, ASM_CLOSE_PAREN);
+ }
+ else
+ output_addr_const (file, XEXP (x, 1));
+ break;
+
+ case ZERO_EXTEND:
+ case SIGN_EXTEND:
+ output_addr_const (file, XEXP (x, 0));
+ break;
+
+ default:
+ output_operand_lossage ("invalid expression as operand");
+ }
+}
+
+/* A poor man's fprintf, with the added features of %I, %R, %L, and %U.
+ %R prints the value of REGISTER_PREFIX.
+ %L prints the value of LOCAL_LABEL_PREFIX.
+ %U prints the value of USER_LABEL_PREFIX.
+ %I prints the value of IMMEDIATE_PREFIX.
+ %O runs ASM_OUTPUT_OPCODE to transform what follows in the string.
+ Also supported are %d, %x, %s, %e, %f, %g and %%.
+
+ We handle alternate assembler dialects here, just like output_asm_insn. */
+
+void
+asm_fprintf VPROTO((FILE *file, char *p, ...))
+{
+#ifndef __STDC__
+ FILE *file;
+ char *p;
+#endif
+ va_list argptr;
+ char buf[10];
+ char *q, c;
+ int i;
+
+ VA_START (argptr, p);
+
+#ifndef __STDC__
+ file = va_arg (argptr, FILE*);
+ p = va_arg (argptr, char*);
+#endif
+
+ buf[0] = '%';
+
+ while (c = *p++)
+ switch (c)
+ {
+#ifdef ASSEMBLER_DIALECT
+ case '{':
+ /* If we want the first dialect, do nothing. Otherwise, skip
+ DIALECT_NUMBER of strings ending with '|'. */
+ for (i = 0; i < dialect_number; i++)
+ {
+ while (*p && *p++ != '|')
+ ;
+
+ if (*p == '|')
+ p++;
+ }
+ break;
+
+ case '|':
+ /* Skip to close brace. */
+ while (*p && *p++ != '}')
+ ;
+ break;
+
+ case '}':
+ break;
+#endif
+
+ case '%':
+ c = *p++;
+ q = &buf[1];
+ while ((c >= '0' && c <= '9') || c == '.')
+ {
+ *q++ = c;
+ c = *p++;
+ }
+ switch (c)
+ {
+ case '%':
+ fprintf (file, "%%");
+ break;
+
+ case 'd': case 'i': case 'u':
+ case 'x': case 'p': case 'X':
+ case 'o':
+ *q++ = c;
+ *q = 0;
+ fprintf (file, buf, va_arg (argptr, int));
+ break;
+
+ case 'w':
+ /* This is a prefix to the 'd', 'i', 'u', 'x', 'p', and 'X' cases,
+ but we do not check for those cases. It means that the value
+ is a HOST_WIDE_INT, which may be either `int' or `long'. */
+
+#if HOST_BITS_PER_WIDE_INT != HOST_BITS_PER_INT
+ *q++ = 'l';
+#endif
+
+ *q++ = *p++;
+ *q = 0;
+ fprintf (file, buf, va_arg (argptr, HOST_WIDE_INT));
+ break;
+
+ case 'l':
+ *q++ = c;
+ *q++ = *p++;
+ *q = 0;
+ fprintf (file, buf, va_arg (argptr, long));
+ break;
+
+ case 'e':
+ case 'f':
+ case 'g':
+ *q++ = c;
+ *q = 0;
+ fprintf (file, buf, va_arg (argptr, double));
+ break;
+
+ case 's':
+ *q++ = c;
+ *q = 0;
+ fprintf (file, buf, va_arg (argptr, char *));
+ break;
+
+ case 'O':
+#ifdef ASM_OUTPUT_OPCODE
+ ASM_OUTPUT_OPCODE (asm_out_file, p);
+#endif
+ break;
+
+ case 'R':
+#ifdef REGISTER_PREFIX
+ fprintf (file, "%s", REGISTER_PREFIX);
+#endif
+ break;
+
+ case 'I':
+#ifdef IMMEDIATE_PREFIX
+ fprintf (file, "%s", IMMEDIATE_PREFIX);
+#endif
+ break;
+
+ case 'L':
+#ifdef LOCAL_LABEL_PREFIX
+ fprintf (file, "%s", LOCAL_LABEL_PREFIX);
+#endif
+ break;
+
+ case 'U':
+#ifdef USER_LABEL_PREFIX
+ fprintf (file, "%s", USER_LABEL_PREFIX);
+#endif
+ break;
+
+ default:
+ abort ();
+ }
+ break;
+
+ default:
+ fputc (c, file);
+ }
+}
+
+/* Split up a CONST_DOUBLE or integer constant rtx
+ into two rtx's for single words,
+ storing in *FIRST the word that comes first in memory in the target
+ and in *SECOND the other. */
+
+void
+split_double (value, first, second)
+ rtx value;
+ rtx *first, *second;
+{
+ if (GET_CODE (value) == CONST_INT)
+ {
+ /* The rule for using CONST_INT for a wider mode
+ is that we regard the value as signed.
+ So sign-extend it. */
+ rtx high = (INTVAL (value) < 0 ? constm1_rtx : const0_rtx);
+#if WORDS_BIG_ENDIAN
+ *first = high;
+ *second = value;
+#else
+ *first = value;
+ *second = high;
+#endif
+ }
+ else if (GET_CODE (value) != CONST_DOUBLE)
+ {
+#if WORDS_BIG_ENDIAN
+ *first = const0_rtx;
+ *second = value;
+#else
+ *first = value;
+ *second = const0_rtx;
+#endif
+ }
+ else if (GET_MODE (value) == VOIDmode
+ /* This is the old way we did CONST_DOUBLE integers. */
+ || GET_MODE_CLASS (GET_MODE (value)) == MODE_INT)
+ {
+ /* In an integer, the words are defined as most and least significant.
+ So order them by the target's convention. */
+#if WORDS_BIG_ENDIAN
+ *first = GEN_INT (CONST_DOUBLE_HIGH (value));
+ *second = GEN_INT (CONST_DOUBLE_LOW (value));
+#else
+ *first = GEN_INT (CONST_DOUBLE_LOW (value));
+ *second = GEN_INT (CONST_DOUBLE_HIGH (value));
+#endif
+ }
+ else
+ {
+#ifdef REAL_ARITHMETIC
+ REAL_VALUE_TYPE r; long l[2];
+ REAL_VALUE_FROM_CONST_DOUBLE (r, value);
+
+ /* Note, this converts the REAL_VALUE_TYPE to the target's
+ format, splits up the floating point double and outputs
+ exactly 32 bits of it into each of l[0] and l[1] --
+ not necessarily BITS_PER_WORD bits. */
+ REAL_VALUE_TO_TARGET_DOUBLE (r, l);
+
+ *first = GEN_INT ((HOST_WIDE_INT) l[0]);
+ *second = GEN_INT ((HOST_WIDE_INT) l[1]);
+#else
+ if ((HOST_FLOAT_FORMAT != TARGET_FLOAT_FORMAT
+ || HOST_BITS_PER_WIDE_INT != BITS_PER_WORD)
+ && ! flag_pretend_float)
+ abort ();
+
+#if defined (HOST_WORDS_BIG_ENDIAN) == WORDS_BIG_ENDIAN
+ /* Host and target agree => no need to swap. */
+ *first = GEN_INT (CONST_DOUBLE_LOW (value));
+ *second = GEN_INT (CONST_DOUBLE_HIGH (value));
+#else
+ *second = GEN_INT (CONST_DOUBLE_LOW (value));
+ *first = GEN_INT (CONST_DOUBLE_HIGH (value));
+#endif
+#endif /* no REAL_ARITHMETIC */
+ }
+}
+
+/* Return nonzero if this function has no function calls. */
+
+int
+leaf_function_p ()
+{
+ rtx insn;
+
+ if (profile_flag || profile_block_flag)
+ return 0;
+
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ {
+ if (GET_CODE (insn) == CALL_INSN)
+ return 0;
+ if (GET_CODE (insn) == INSN
+ && GET_CODE (PATTERN (insn)) == SEQUENCE
+ && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == CALL_INSN)
+ return 0;
+ }
+ for (insn = current_function_epilogue_delay_list; insn; insn = XEXP (insn, 1))
+ {
+ if (GET_CODE (XEXP (insn, 0)) == CALL_INSN)
+ return 0;
+ if (GET_CODE (XEXP (insn, 0)) == INSN
+ && GET_CODE (PATTERN (XEXP (insn, 0))) == SEQUENCE
+ && GET_CODE (XVECEXP (PATTERN (XEXP (insn, 0)), 0, 0)) == CALL_INSN)
+ return 0;
+ }
+
+ return 1;
+}
+
+/* On some machines, a function with no call insns
+ can run faster if it doesn't create its own register window.
+ When output, the leaf function should use only the "output"
+ registers. Ordinarily, the function would be compiled to use
+ the "input" registers to find its arguments; it is a candidate
+ for leaf treatment if it uses only the "input" registers.
+ Leaf function treatment means renumbering so the function
+ uses the "output" registers instead. */
+
+#ifdef LEAF_REGISTERS
+
+static char permitted_reg_in_leaf_functions[] = LEAF_REGISTERS;
+
+/* Return 1 if this function uses only the registers that can be
+ safely renumbered. */
+
+int
+only_leaf_regs_used ()
+{
+ int i;
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ {
+ if ((regs_ever_live[i] || global_regs[i])
+ && ! permitted_reg_in_leaf_functions[i])
+ return 0;
+ }
+ return 1;
+}
+
+/* Scan all instructions and renumber all registers into those
+ available in leaf functions. */
+
+static void
+leaf_renumber_regs (first)
+ rtx first;
+{
+ rtx insn;
+
+ /* Renumber only the actual patterns.
+ The reg-notes can contain frame pointer refs,
+ and renumbering them could crash, and should not be needed. */
+ for (insn = first; insn; insn = NEXT_INSN (insn))
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ leaf_renumber_regs_insn (PATTERN (insn));
+ for (insn = current_function_epilogue_delay_list; insn; insn = XEXP (insn, 1))
+ if (GET_RTX_CLASS (GET_CODE (XEXP (insn, 0))) == 'i')
+ leaf_renumber_regs_insn (PATTERN (XEXP (insn, 0)));
+}
+
+/* Scan IN_RTX and its subexpressions, and renumber all regs into those
+ available in leaf functions. */
+
+void
+leaf_renumber_regs_insn (in_rtx)
+ register rtx in_rtx;
+{
+ register int i, j;
+ register char *format_ptr;
+
+ if (in_rtx == 0)
+ return;
+
+ /* Renumber all input-registers into output-registers.
+ renumbered_regs would be 1 for an output-register;
+ they */
+
+ if (GET_CODE (in_rtx) == REG)
+ {
+ int newreg;
+
+ /* Don't renumber the same reg twice. */
+ if (in_rtx->used)
+ return;
+
+ newreg = REGNO (in_rtx);
+ /* Don't try to renumber pseudo regs. It is possible for a pseudo reg
+ to reach here as part of a REG_NOTE. */
+ if (newreg >= FIRST_PSEUDO_REGISTER)
+ {
+ in_rtx->used = 1;
+ return;
+ }
+ newreg = LEAF_REG_REMAP (newreg);
+ if (newreg < 0)
+ abort ();
+ regs_ever_live[REGNO (in_rtx)] = 0;
+ regs_ever_live[newreg] = 1;
+ REGNO (in_rtx) = newreg;
+ in_rtx->used = 1;
+ }
+
+ if (GET_RTX_CLASS (GET_CODE (in_rtx)) == 'i')
+ {
+ /* Inside a SEQUENCE, we find insns.
+ Renumber just the patterns of these insns,
+ just as we do for the top-level insns. */
+ leaf_renumber_regs_insn (PATTERN (in_rtx));
+ return;
+ }
+
+ format_ptr = GET_RTX_FORMAT (GET_CODE (in_rtx));
+
+ for (i = 0; i < GET_RTX_LENGTH (GET_CODE (in_rtx)); i++)
+ switch (*format_ptr++)
+ {
+ case 'e':
+ leaf_renumber_regs_insn (XEXP (in_rtx, i));
+ break;
+
+ case 'E':
+ if (NULL != XVEC (in_rtx, i))
+ {
+ for (j = 0; j < XVECLEN (in_rtx, i); j++)
+ leaf_renumber_regs_insn (XVECEXP (in_rtx, i, j));
+ }
+ break;
+
+ case 'S':
+ case 's':
+ case '0':
+ case 'i':
+ case 'w':
+ case 'n':
+ case 'u':
+ break;
+
+ default:
+ abort ();
+ }
+}
+#endif
diff --git a/gnu/usr.bin/cc/cc_int/flow.c b/gnu/usr.bin/cc/cc_int/flow.c
new file mode 100644
index 0000000..cc9fed9
--- /dev/null
+++ b/gnu/usr.bin/cc/cc_int/flow.c
@@ -0,0 +1,2793 @@
+/* Data flow analysis for GNU compiler.
+ Copyright (C) 1987, 1988, 1992, 1993, 1994 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+/* This file contains the data flow analysis pass of the compiler.
+ It computes data flow information
+ which tells combine_instructions which insns to consider combining
+ and controls register allocation.
+
+ Additional data flow information that is too bulky to record
+ is generated during the analysis, and is used at that time to
+ create autoincrement and autodecrement addressing.
+
+ The first step is dividing the function into basic blocks.
+ find_basic_blocks does this. Then life_analysis determines
+ where each register is live and where it is dead.
+
+ ** find_basic_blocks **
+
+ find_basic_blocks divides the current function's rtl
+ into basic blocks. It records the beginnings and ends of the
+ basic blocks in the vectors basic_block_head and basic_block_end,
+ and the number of blocks in n_basic_blocks.
+
+ find_basic_blocks also finds any unreachable loops
+ and deletes them.
+
+ ** life_analysis **
+
+ life_analysis is called immediately after find_basic_blocks.
+ It uses the basic block information to determine where each
+ hard or pseudo register is live.
+
+ ** live-register info **
+
+ The information about where each register is live is in two parts:
+ the REG_NOTES of insns, and the vector basic_block_live_at_start.
+
+ basic_block_live_at_start has an element for each basic block,
+ and the element is a bit-vector with a bit for each hard or pseudo
+ register. The bit is 1 if the register is live at the beginning
+ of the basic block.
+
+ Two types of elements can be added to an insn's REG_NOTES.
+ A REG_DEAD note is added to an insn's REG_NOTES for any register
+ that meets both of two conditions: The value in the register is not
+ needed in subsequent insns and the insn does not replace the value in
+ the register (in the case of multi-word hard registers, the value in
+ each register must be replaced by the insn to avoid a REG_DEAD note).
+
+ In the vast majority of cases, an object in a REG_DEAD note will be
+ used somewhere in the insn. The (rare) exception to this is if an
+ insn uses a multi-word hard register and only some of the registers are
+ needed in subsequent insns. In that case, REG_DEAD notes will be
+ provided for those hard registers that are not subsequently needed.
+ Partial REG_DEAD notes of this type do not occur when an insn sets
+ only some of the hard registers used in such a multi-word operand;
+ omitting REG_DEAD notes for objects stored in an insn is optional and
+ the desire to do so does not justify the complexity of the partial
+ REG_DEAD notes.
+
+ REG_UNUSED notes are added for each register that is set by the insn
+ but is unused subsequently (if every register set by the insn is unused
+ and the insn does not reference memory or have some other side-effect,
+ the insn is deleted instead). If only part of a multi-word hard
+ register is used in a subsequent insn, REG_UNUSED notes are made for
+ the parts that will not be used.
+
+ To determine which registers are live after any insn, one can
+ start from the beginning of the basic block and scan insns, noting
+ which registers are set by each insn and which die there.
+
+ ** Other actions of life_analysis **
+
+ life_analysis sets up the LOG_LINKS fields of insns because the
+ information needed to do so is readily available.
+
+ life_analysis deletes insns whose only effect is to store a value
+ that is never used.
+
+ life_analysis notices cases where a reference to a register as
+ a memory address can be combined with a preceding or following
+ incrementation or decrementation of the register. The separate
+ instruction to increment or decrement is deleted and the address
+ is changed to a POST_INC or similar rtx.
+
+ Each time an incrementing or decrementing address is created,
+ a REG_INC element is added to the insn's REG_NOTES list.
+
+ life_analysis fills in certain vectors containing information about
+ register usage: reg_n_refs, reg_n_deaths, reg_n_sets, reg_live_length,
+ reg_n_calls_crosses and reg_basic_block. */
+
+#include <stdio.h>
+#include "config.h"
+#include "rtl.h"
+#include "basic-block.h"
+#include "insn-config.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "flags.h"
+#include "output.h"
+
+#include "obstack.h"
+#define obstack_chunk_alloc xmalloc
+#define obstack_chunk_free free
+
+/* List of labels that must never be deleted. */
+extern rtx forced_labels;
+
+/* Get the basic block number of an insn.
+ This info should not be expected to remain available
+ after the end of life_analysis. */
+
+/* This is the limit of the allocated space in the following two arrays. */
+
+static int max_uid_for_flow;
+
+#define BLOCK_NUM(INSN) uid_block_number[INSN_UID (INSN)]
+
+/* This is where the BLOCK_NUM values are really stored.
+ This is set up by find_basic_blocks and used there and in life_analysis,
+ and then freed. */
+
+static int *uid_block_number;
+
+/* INSN_VOLATILE (insn) is 1 if the insn refers to anything volatile. */
+
+#define INSN_VOLATILE(INSN) uid_volatile[INSN_UID (INSN)]
+static char *uid_volatile;
+
+/* Number of basic blocks in the current function. */
+
+int n_basic_blocks;
+
+/* Maximum register number used in this function, plus one. */
+
+int max_regno;
+
+/* Maximum number of SCRATCH rtx's used in any basic block of this function. */
+
+int max_scratch;
+
+/* Number of SCRATCH rtx's in the current block. */
+
+static int num_scratch;
+
+/* Indexed by n, gives number of basic block that (REG n) is used in.
+ If the value is REG_BLOCK_GLOBAL (-2),
+ it means (REG n) is used in more than one basic block.
+ REG_BLOCK_UNKNOWN (-1) means it hasn't been seen yet so we don't know.
+ This information remains valid for the rest of the compilation
+ of the current function; it is used to control register allocation. */
+
+int *reg_basic_block;
+
+/* Indexed by n, gives number of times (REG n) is used or set, each
+ weighted by its loop-depth.
+ This information remains valid for the rest of the compilation
+ of the current function; it is used to control register allocation. */
+
+int *reg_n_refs;
+
+/* Indexed by N, gives number of places register N dies.
+ This information remains valid for the rest of the compilation
+ of the current function; it is used to control register allocation. */
+
+short *reg_n_deaths;
+
+/* Indexed by N, gives 1 if that reg is live across any CALL_INSNs.
+ This information remains valid for the rest of the compilation
+ of the current function; it is used to control register allocation. */
+
+int *reg_n_calls_crossed;
+
+/* Total number of instructions at which (REG n) is live.
+ The larger this is, the less priority (REG n) gets for
+ allocation in a real register.
+ This information remains valid for the rest of the compilation
+ of the current function; it is used to control register allocation.
+
+ local-alloc.c may alter this number to change the priority.
+
+ Negative values are special.
+ -1 is used to mark a pseudo reg which has a constant or memory equivalent
+ and is used infrequently enough that it should not get a hard register.
+ -2 is used to mark a pseudo reg for a parameter, when a frame pointer
+ is not required. global.c makes an allocno for this but does
+ not try to assign a hard register to it. */
+
+int *reg_live_length;
+
+/* Element N is the next insn that uses (hard or pseudo) register number N
+ within the current basic block; or zero, if there is no such insn.
+ This is valid only during the final backward scan in propagate_block. */
+
+static rtx *reg_next_use;
+
+/* Size of a regset for the current function,
+ in (1) bytes and (2) elements. */
+
+int regset_bytes;
+int regset_size;
+
+/* Element N is first insn in basic block N.
+ This info lasts until we finish compiling the function. */
+
+rtx *basic_block_head;
+
+/* Element N is last insn in basic block N.
+ This info lasts until we finish compiling the function. */
+
+rtx *basic_block_end;
+
+/* Element N is a regset describing the registers live
+ at the start of basic block N.
+ This info lasts until we finish compiling the function. */
+
+regset *basic_block_live_at_start;
+
+/* Regset of regs live when calls to `setjmp'-like functions happen. */
+
+regset regs_live_at_setjmp;
+
+/* List made of EXPR_LIST rtx's which gives pairs of pseudo registers
+ that have to go in the same hard reg.
+ The first two regs in the list are a pair, and the next two
+ are another pair, etc. */
+rtx regs_may_share;
+
+/* Element N is nonzero if control can drop into basic block N
+ from the preceding basic block. Freed after life_analysis. */
+
+static char *basic_block_drops_in;
+
+/* Element N is depth within loops of the last insn in basic block number N.
+ Freed after life_analysis. */
+
+static short *basic_block_loop_depth;
+
+/* Element N nonzero if basic block N can actually be reached.
+ Vector exists only during find_basic_blocks. */
+
+static char *block_live_static;
+
+/* Depth within loops of basic block being scanned for lifetime analysis,
+ plus one. This is the weight attached to references to registers. */
+
+static int loop_depth;
+
+/* During propagate_block, this is non-zero if the value of CC0 is live. */
+
+static int cc0_live;
+
+/* During propagate_block, this contains the last MEM stored into. It
+ is used to eliminate consecutive stores to the same location. */
+
+static rtx last_mem_set;
+
+/* Set of registers that may be eliminable. These are handled specially
+ in updating regs_ever_live. */
+
+static HARD_REG_SET elim_reg_set;
+
+/* Forward declarations */
+static void find_basic_blocks PROTO((rtx, rtx));
+static int uses_reg_or_mem PROTO((rtx));
+static void mark_label_ref PROTO((rtx, rtx, int));
+static void life_analysis PROTO((rtx, int));
+void allocate_for_life_analysis PROTO((void));
+static void init_regset_vector PROTO((regset *, regset, int, int));
+static void propagate_block PROTO((regset, rtx, rtx, int,
+ regset, int));
+static int insn_dead_p PROTO((rtx, regset, int));
+static int libcall_dead_p PROTO((rtx, regset, rtx, rtx));
+static void mark_set_regs PROTO((regset, regset, rtx,
+ rtx, regset));
+static void mark_set_1 PROTO((regset, regset, rtx,
+ rtx, regset));
+static void find_auto_inc PROTO((regset, rtx, rtx));
+static void mark_used_regs PROTO((regset, regset, rtx, int, rtx));
+static int try_pre_increment_1 PROTO((rtx));
+static int try_pre_increment PROTO((rtx, rtx, HOST_WIDE_INT));
+static rtx find_use_as_address PROTO((rtx, rtx, HOST_WIDE_INT));
+void dump_flow_info PROTO((FILE *));
+
+/* Find basic blocks of the current function and perform data flow analysis.
+ F is the first insn of the function and NREGS the number of register numbers
+ in use. */
+
+void
+flow_analysis (f, nregs, file)
+ rtx f;
+ int nregs;
+ FILE *file;
+{
+ register rtx insn;
+ register int i;
+ rtx nonlocal_label_list = nonlocal_label_rtx_list ();
+
+#ifdef ELIMINABLE_REGS
+ static struct {int from, to; } eliminables[] = ELIMINABLE_REGS;
+#endif
+
+ /* Record which registers will be eliminated. We use this in
+ mark_used_regs. */
+
+ CLEAR_HARD_REG_SET (elim_reg_set);
+
+#ifdef ELIMINABLE_REGS
+ for (i = 0; i < sizeof eliminables / sizeof eliminables[0]; i++)
+ SET_HARD_REG_BIT (elim_reg_set, eliminables[i].from);
+#else
+ SET_HARD_REG_BIT (elim_reg_set, FRAME_POINTER_REGNUM);
+#endif
+
+ /* Count the basic blocks. Also find maximum insn uid value used. */
+
+ {
+ register RTX_CODE prev_code = JUMP_INSN;
+ register RTX_CODE code;
+
+ max_uid_for_flow = 0;
+
+ for (insn = f, i = 0; insn; insn = NEXT_INSN (insn))
+ {
+ code = GET_CODE (insn);
+ if (INSN_UID (insn) > max_uid_for_flow)
+ max_uid_for_flow = INSN_UID (insn);
+ if (code == CODE_LABEL
+ || (GET_RTX_CLASS (code) == 'i'
+ && (prev_code == JUMP_INSN
+ || (prev_code == CALL_INSN
+ && nonlocal_label_list != 0)
+ || prev_code == BARRIER)))
+ i++;
+ if (code != NOTE)
+ prev_code = code;
+ }
+ }
+
+#ifdef AUTO_INC_DEC
+ /* Leave space for insns we make in some cases for auto-inc. These cases
+ are rare, so we don't need too much space. */
+ max_uid_for_flow += max_uid_for_flow / 10;
+#endif
+
+ /* Allocate some tables that last till end of compiling this function
+ and some needed only in find_basic_blocks and life_analysis. */
+
+ n_basic_blocks = i;
+ basic_block_head = (rtx *) oballoc (n_basic_blocks * sizeof (rtx));
+ basic_block_end = (rtx *) oballoc (n_basic_blocks * sizeof (rtx));
+ basic_block_drops_in = (char *) alloca (n_basic_blocks);
+ basic_block_loop_depth = (short *) alloca (n_basic_blocks * sizeof (short));
+ uid_block_number
+ = (int *) alloca ((max_uid_for_flow + 1) * sizeof (int));
+ uid_volatile = (char *) alloca (max_uid_for_flow + 1);
+ bzero (uid_volatile, max_uid_for_flow + 1);
+
+ find_basic_blocks (f, nonlocal_label_list);
+ life_analysis (f, nregs);
+ if (file)
+ dump_flow_info (file);
+
+ basic_block_drops_in = 0;
+ uid_block_number = 0;
+ basic_block_loop_depth = 0;
+}
+
+/* Find all basic blocks of the function whose first insn is F.
+ Store the correct data in the tables that describe the basic blocks,
+ set up the chains of references for each CODE_LABEL, and
+ delete any entire basic blocks that cannot be reached.
+
+ NONLOCAL_LABEL_LIST is the same local variable from flow_analysis. */
+
+static void
+find_basic_blocks (f, nonlocal_label_list)
+ rtx f, nonlocal_label_list;
+{
+ register rtx insn;
+ register int i;
+ register char *block_live = (char *) alloca (n_basic_blocks);
+ register char *block_marked = (char *) alloca (n_basic_blocks);
+ /* List of label_refs to all labels whose addresses are taken
+ and used as data. */
+ rtx label_value_list = 0;
+ rtx x, note;
+ enum rtx_code prev_code, code;
+ int depth;
+
+ block_live_static = block_live;
+ bzero (block_live, n_basic_blocks);
+ bzero (block_marked, n_basic_blocks);
+
+ /* Initialize with just block 0 reachable and no blocks marked. */
+ if (n_basic_blocks > 0)
+ block_live[0] = 1;
+
+ /* Initialize the ref chain of each label to 0. Record where all the
+ blocks start and end and their depth in loops. For each insn, record
+ the block it is in. Also mark as reachable any blocks headed by labels
+ that must not be deleted. */
+
+ for (insn = f, i = -1, prev_code = JUMP_INSN, depth = 1;
+ insn; insn = NEXT_INSN (insn))
+ {
+ code = GET_CODE (insn);
+ if (code == NOTE)
+ {
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
+ depth++;
+ else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
+ depth--;
+ }
+
+ /* A basic block starts at label, or after something that can jump. */
+ else if (code == CODE_LABEL
+ || (GET_RTX_CLASS (code) == 'i'
+ && (prev_code == JUMP_INSN
+ || (prev_code == CALL_INSN
+ && nonlocal_label_list != 0)
+ || prev_code == BARRIER)))
+ {
+ basic_block_head[++i] = insn;
+ basic_block_end[i] = insn;
+ basic_block_loop_depth[i] = depth;
+
+ if (code == CODE_LABEL)
+ {
+ LABEL_REFS (insn) = insn;
+ /* Any label that cannot be deleted
+ is considered to start a reachable block. */
+ if (LABEL_PRESERVE_P (insn))
+ block_live[i] = 1;
+ }
+ }
+
+ else if (GET_RTX_CLASS (code) == 'i')
+ {
+ basic_block_end[i] = insn;
+ basic_block_loop_depth[i] = depth;
+ }
+
+ if (GET_RTX_CLASS (code) == 'i')
+ {
+ /* Make a list of all labels referred to other than by jumps. */
+ for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
+ if (REG_NOTE_KIND (note) == REG_LABEL)
+ label_value_list = gen_rtx (EXPR_LIST, VOIDmode, XEXP (note, 0),
+ label_value_list);
+ }
+
+ BLOCK_NUM (insn) = i;
+
+ if (code != NOTE)
+ prev_code = code;
+ }
+
+ if (i + 1 != n_basic_blocks)
+ abort ();
+
+ /* Don't delete the labels (in this function)
+ that are referenced by non-jump instructions. */
+
+ for (x = label_value_list; x; x = XEXP (x, 1))
+ if (! LABEL_REF_NONLOCAL_P (x))
+ block_live[BLOCK_NUM (XEXP (x, 0))] = 1;
+
+ for (x = forced_labels; x; x = XEXP (x, 1))
+ if (! LABEL_REF_NONLOCAL_P (x))
+ block_live[BLOCK_NUM (XEXP (x, 0))] = 1;
+
+ /* Record which basic blocks control can drop in to. */
+
+ for (i = 0; i < n_basic_blocks; i++)
+ {
+ for (insn = PREV_INSN (basic_block_head[i]);
+ insn && GET_CODE (insn) == NOTE; insn = PREV_INSN (insn))
+ ;
+
+ basic_block_drops_in[i] = insn && GET_CODE (insn) != BARRIER;
+ }
+
+ /* Now find which basic blocks can actually be reached
+ and put all jump insns' LABEL_REFS onto the ref-chains
+ of their target labels. */
+
+ if (n_basic_blocks > 0)
+ {
+ int something_marked = 1;
+
+ /* Find all indirect jump insns and mark them as possibly jumping to all
+ the labels whose addresses are explicitly used. This is because,
+ when there are computed gotos, we can't tell which labels they jump
+ to, of all the possibilities.
+
+ Tablejumps and casesi insns are OK and we can recognize them by
+ a (use (label_ref)). */
+
+ for (insn = f; insn; insn = NEXT_INSN (insn))
+ if (GET_CODE (insn) == JUMP_INSN)
+ {
+ rtx pat = PATTERN (insn);
+ int computed_jump = 0;
+
+ if (GET_CODE (pat) == PARALLEL)
+ {
+ int len = XVECLEN (pat, 0);
+ int has_use_labelref = 0;
+
+ for (i = len - 1; i >= 0; i--)
+ if (GET_CODE (XVECEXP (pat, 0, i)) == USE
+ && (GET_CODE (XEXP (XVECEXP (pat, 0, i), 0))
+ == LABEL_REF))
+ has_use_labelref = 1;
+
+ if (! has_use_labelref)
+ for (i = len - 1; i >= 0; i--)
+ if (GET_CODE (XVECEXP (pat, 0, i)) == SET
+ && SET_DEST (XVECEXP (pat, 0, i)) == pc_rtx
+ && uses_reg_or_mem (SET_SRC (XVECEXP (pat, 0, i))))
+ computed_jump = 1;
+ }
+ else if (GET_CODE (pat) == SET
+ && SET_DEST (pat) == pc_rtx
+ && uses_reg_or_mem (SET_SRC (pat)))
+ computed_jump = 1;
+
+ if (computed_jump)
+ {
+ for (x = label_value_list; x; x = XEXP (x, 1))
+ mark_label_ref (gen_rtx (LABEL_REF, VOIDmode, XEXP (x, 0)),
+ insn, 0);
+
+ for (x = forced_labels; x; x = XEXP (x, 1))
+ mark_label_ref (gen_rtx (LABEL_REF, VOIDmode, XEXP (x, 0)),
+ insn, 0);
+ }
+ }
+
+ /* Find all call insns and mark them as possibly jumping
+ to all the nonlocal goto handler labels. */
+
+ for (insn = f; insn; insn = NEXT_INSN (insn))
+ if (GET_CODE (insn) == CALL_INSN)
+ {
+ for (x = nonlocal_label_list; x; x = XEXP (x, 1))
+ /* Don't try marking labels that
+ were deleted as unreferenced. */
+ if (GET_CODE (XEXP (x, 0)) == CODE_LABEL)
+ mark_label_ref (gen_rtx (LABEL_REF, VOIDmode, XEXP (x, 0)),
+ insn, 0);
+
+ /* ??? This could be made smarter:
+ in some cases it's possible to tell that certain
+ calls will not do a nonlocal goto.
+
+ For example, if the nested functions that do the
+ nonlocal gotos do not have their addresses taken, then
+ only calls to those functions or to other nested
+ functions that use them could possibly do nonlocal
+ gotos. */
+ }
+
+ /* Pass over all blocks, marking each block that is reachable
+ and has not yet been marked.
+ Keep doing this until, in one pass, no blocks have been marked.
+ Then blocks_live and blocks_marked are identical and correct.
+ In addition, all jumps actually reachable have been marked. */
+
+ while (something_marked)
+ {
+ something_marked = 0;
+ for (i = 0; i < n_basic_blocks; i++)
+ if (block_live[i] && !block_marked[i])
+ {
+ block_marked[i] = 1;
+ something_marked = 1;
+ if (i + 1 < n_basic_blocks && basic_block_drops_in[i + 1])
+ block_live[i + 1] = 1;
+ insn = basic_block_end[i];
+ if (GET_CODE (insn) == JUMP_INSN)
+ mark_label_ref (PATTERN (insn), insn, 0);
+ }
+ }
+
+ /* Now delete the code for any basic blocks that can't be reached.
+ They can occur because jump_optimize does not recognize
+ unreachable loops as unreachable. */
+
+ for (i = 0; i < n_basic_blocks; i++)
+ if (!block_live[i])
+ {
+ insn = basic_block_head[i];
+ while (1)
+ {
+ if (GET_CODE (insn) == BARRIER)
+ abort ();
+ if (GET_CODE (insn) != NOTE)
+ {
+ PUT_CODE (insn, NOTE);
+ NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (insn) = 0;
+ }
+ if (insn == basic_block_end[i])
+ {
+ /* BARRIERs are between basic blocks, not part of one.
+ Delete a BARRIER if the preceding jump is deleted.
+ We cannot alter a BARRIER into a NOTE
+ because it is too short; but we can really delete
+ it because it is not part of a basic block. */
+ if (NEXT_INSN (insn) != 0
+ && GET_CODE (NEXT_INSN (insn)) == BARRIER)
+ delete_insn (NEXT_INSN (insn));
+ break;
+ }
+ insn = NEXT_INSN (insn);
+ }
+ /* Each time we delete some basic blocks,
+ see if there is a jump around them that is
+ being turned into a no-op. If so, delete it. */
+
+ if (block_live[i - 1])
+ {
+ register int j;
+ for (j = i; j < n_basic_blocks; j++)
+ if (block_live[j])
+ {
+ rtx label;
+ insn = basic_block_end[i - 1];
+ if (GET_CODE (insn) == JUMP_INSN
+ /* An unconditional jump is the only possibility
+ we must check for, since a conditional one
+ would make these blocks live. */
+ && simplejump_p (insn)
+ && (label = XEXP (SET_SRC (PATTERN (insn)), 0), 1)
+ && INSN_UID (label) != 0
+ && BLOCK_NUM (label) == j)
+ {
+ PUT_CODE (insn, NOTE);
+ NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (insn) = 0;
+ if (GET_CODE (NEXT_INSN (insn)) != BARRIER)
+ abort ();
+ delete_insn (NEXT_INSN (insn));
+ }
+ break;
+ }
+ }
+ }
+ }
+}
+
+/* Return 1 if X contain a REG or MEM that is not in the constant pool. */
+
+static int
+uses_reg_or_mem (x)
+ rtx x;
+{
+ enum rtx_code code = GET_CODE (x);
+ int i, j;
+ char *fmt;
+
+ if (code == REG
+ || (code == MEM
+ && ! (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
+ && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)))))
+ return 1;
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e'
+ && uses_reg_or_mem (XEXP (x, i)))
+ return 1;
+
+ if (fmt[i] == 'E')
+ for (j = 0; j < XVECLEN (x, i); j++)
+ if (uses_reg_or_mem (XVECEXP (x, i, j)))
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Check expression X for label references;
+ if one is found, add INSN to the label's chain of references.
+
+ CHECKDUP means check for and avoid creating duplicate references
+ from the same insn. Such duplicates do no serious harm but
+ can slow life analysis. CHECKDUP is set only when duplicates
+ are likely. */
+
+static void
+mark_label_ref (x, insn, checkdup)
+ rtx x, insn;
+ int checkdup;
+{
+ register RTX_CODE code;
+ register int i;
+ register char *fmt;
+
+ /* We can be called with NULL when scanning label_value_list. */
+ if (x == 0)
+ return;
+
+ code = GET_CODE (x);
+ if (code == LABEL_REF)
+ {
+ register rtx label = XEXP (x, 0);
+ register rtx y;
+ if (GET_CODE (label) != CODE_LABEL)
+ abort ();
+ /* If the label was never emitted, this insn is junk,
+ but avoid a crash trying to refer to BLOCK_NUM (label).
+ This can happen as a result of a syntax error
+ and a diagnostic has already been printed. */
+ if (INSN_UID (label) == 0)
+ return;
+ CONTAINING_INSN (x) = insn;
+ /* if CHECKDUP is set, check for duplicate ref from same insn
+ and don't insert. */
+ if (checkdup)
+ for (y = LABEL_REFS (label); y != label; y = LABEL_NEXTREF (y))
+ if (CONTAINING_INSN (y) == insn)
+ return;
+ LABEL_NEXTREF (x) = LABEL_REFS (label);
+ LABEL_REFS (label) = x;
+ block_live_static[BLOCK_NUM (label)] = 1;
+ return;
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ mark_label_ref (XEXP (x, i), insn, 0);
+ if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ mark_label_ref (XVECEXP (x, i, j), insn, 1);
+ }
+ }
+}
+
+/* Determine which registers are live at the start of each
+ basic block of the function whose first insn is F.
+ NREGS is the number of registers used in F.
+ We allocate the vector basic_block_live_at_start
+ and the regsets that it points to, and fill them with the data.
+ regset_size and regset_bytes are also set here. */
+
+static void
+life_analysis (f, nregs)
+ rtx f;
+ int nregs;
+{
+ register regset tem;
+ int first_pass;
+ int changed;
+ /* For each basic block, a bitmask of regs
+ live on exit from the block. */
+ regset *basic_block_live_at_end;
+ /* For each basic block, a bitmask of regs
+ live on entry to a successor-block of this block.
+ If this does not match basic_block_live_at_end,
+ that must be updated, and the block must be rescanned. */
+ regset *basic_block_new_live_at_end;
+ /* For each basic block, a bitmask of regs
+ whose liveness at the end of the basic block
+ can make a difference in which regs are live on entry to the block.
+ These are the regs that are set within the basic block,
+ possibly excluding those that are used after they are set. */
+ regset *basic_block_significant;
+ register int i;
+ rtx insn;
+
+ struct obstack flow_obstack;
+
+ gcc_obstack_init (&flow_obstack);
+
+ max_regno = nregs;
+
+ bzero (regs_ever_live, sizeof regs_ever_live);
+
+ /* Allocate and zero out many data structures
+ that will record the data from lifetime analysis. */
+
+ allocate_for_life_analysis ();
+
+ reg_next_use = (rtx *) alloca (nregs * sizeof (rtx));
+ bzero ((char *) reg_next_use, nregs * sizeof (rtx));
+
+ /* Set up several regset-vectors used internally within this function.
+ Their meanings are documented above, with their declarations. */
+
+ basic_block_live_at_end
+ = (regset *) alloca (n_basic_blocks * sizeof (regset));
+
+ /* Don't use alloca since that leads to a crash rather than an error message
+ if there isn't enough space.
+ Don't use oballoc since we may need to allocate other things during
+ this function on the temporary obstack. */
+ tem = (regset) obstack_alloc (&flow_obstack, n_basic_blocks * regset_bytes);
+ bzero ((char *) tem, n_basic_blocks * regset_bytes);
+ init_regset_vector (basic_block_live_at_end, tem,
+ n_basic_blocks, regset_bytes);
+
+ basic_block_new_live_at_end
+ = (regset *) alloca (n_basic_blocks * sizeof (regset));
+ tem = (regset) obstack_alloc (&flow_obstack, n_basic_blocks * regset_bytes);
+ bzero ((char *) tem, n_basic_blocks * regset_bytes);
+ init_regset_vector (basic_block_new_live_at_end, tem,
+ n_basic_blocks, regset_bytes);
+
+ basic_block_significant
+ = (regset *) alloca (n_basic_blocks * sizeof (regset));
+ tem = (regset) obstack_alloc (&flow_obstack, n_basic_blocks * regset_bytes);
+ bzero ((char *) tem, n_basic_blocks * regset_bytes);
+ init_regset_vector (basic_block_significant, tem,
+ n_basic_blocks, regset_bytes);
+
+ /* Record which insns refer to any volatile memory
+ or for any reason can't be deleted just because they are dead stores.
+ Also, delete any insns that copy a register to itself. */
+
+ for (insn = f; insn; insn = NEXT_INSN (insn))
+ {
+ enum rtx_code code1 = GET_CODE (insn);
+ if (code1 == CALL_INSN)
+ INSN_VOLATILE (insn) = 1;
+ else if (code1 == INSN || code1 == JUMP_INSN)
+ {
+ /* Delete (in effect) any obvious no-op moves. */
+ if (GET_CODE (PATTERN (insn)) == SET
+ && GET_CODE (SET_DEST (PATTERN (insn))) == REG
+ && GET_CODE (SET_SRC (PATTERN (insn))) == REG
+ && REGNO (SET_DEST (PATTERN (insn))) ==
+ REGNO (SET_SRC (PATTERN (insn)))
+ /* Insns carrying these notes are useful later on. */
+ && ! find_reg_note (insn, REG_EQUAL, NULL_RTX))
+ {
+ PUT_CODE (insn, NOTE);
+ NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (insn) = 0;
+ }
+ else if (GET_CODE (PATTERN (insn)) == PARALLEL)
+ {
+ /* If nothing but SETs of registers to themselves,
+ this insn can also be deleted. */
+ for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
+ {
+ rtx tem = XVECEXP (PATTERN (insn), 0, i);
+
+ if (GET_CODE (tem) == USE
+ || GET_CODE (tem) == CLOBBER)
+ continue;
+
+ if (GET_CODE (tem) != SET
+ || GET_CODE (SET_DEST (tem)) != REG
+ || GET_CODE (SET_SRC (tem)) != REG
+ || REGNO (SET_DEST (tem)) != REGNO (SET_SRC (tem)))
+ break;
+ }
+
+ if (i == XVECLEN (PATTERN (insn), 0)
+ /* Insns carrying these notes are useful later on. */
+ && ! find_reg_note (insn, REG_EQUAL, NULL_RTX))
+ {
+ PUT_CODE (insn, NOTE);
+ NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (insn) = 0;
+ }
+ else
+ INSN_VOLATILE (insn) = volatile_refs_p (PATTERN (insn));
+ }
+ else if (GET_CODE (PATTERN (insn)) != USE)
+ INSN_VOLATILE (insn) = volatile_refs_p (PATTERN (insn));
+ /* A SET that makes space on the stack cannot be dead.
+ (Such SETs occur only for allocating variable-size data,
+ so they will always have a PLUS or MINUS according to the
+ direction of stack growth.)
+ Even if this function never uses this stack pointer value,
+ signal handlers do! */
+ else if (code1 == INSN && GET_CODE (PATTERN (insn)) == SET
+ && SET_DEST (PATTERN (insn)) == stack_pointer_rtx
+#ifdef STACK_GROWS_DOWNWARD
+ && GET_CODE (SET_SRC (PATTERN (insn))) == MINUS
+#else
+ && GET_CODE (SET_SRC (PATTERN (insn))) == PLUS
+#endif
+ && XEXP (SET_SRC (PATTERN (insn)), 0) == stack_pointer_rtx)
+ INSN_VOLATILE (insn) = 1;
+ }
+ }
+
+ if (n_basic_blocks > 0)
+#ifdef EXIT_IGNORE_STACK
+ if (! EXIT_IGNORE_STACK
+ || (! FRAME_POINTER_REQUIRED && flag_omit_frame_pointer))
+#endif
+ {
+ /* If exiting needs the right stack value,
+ consider the stack pointer live at the end of the function. */
+ basic_block_live_at_end[n_basic_blocks - 1]
+ [STACK_POINTER_REGNUM / REGSET_ELT_BITS]
+ |= (REGSET_ELT_TYPE) 1 << (STACK_POINTER_REGNUM % REGSET_ELT_BITS);
+ basic_block_new_live_at_end[n_basic_blocks - 1]
+ [STACK_POINTER_REGNUM / REGSET_ELT_BITS]
+ |= (REGSET_ELT_TYPE) 1 << (STACK_POINTER_REGNUM % REGSET_ELT_BITS);
+ }
+
+ /* Mark the frame pointer is needed at the end of the function. If
+ we end up eliminating it, it will be removed from the live list
+ of each basic block by reload. */
+
+ if (n_basic_blocks > 0)
+ {
+ basic_block_live_at_end[n_basic_blocks - 1]
+ [FRAME_POINTER_REGNUM / REGSET_ELT_BITS]
+ |= (REGSET_ELT_TYPE) 1 << (FRAME_POINTER_REGNUM % REGSET_ELT_BITS);
+ basic_block_new_live_at_end[n_basic_blocks - 1]
+ [FRAME_POINTER_REGNUM / REGSET_ELT_BITS]
+ |= (REGSET_ELT_TYPE) 1 << (FRAME_POINTER_REGNUM % REGSET_ELT_BITS);
+#if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM
+ /* If they are different, also mark the hard frame pointer as live */
+ basic_block_live_at_end[n_basic_blocks - 1]
+ [HARD_FRAME_POINTER_REGNUM / REGSET_ELT_BITS]
+ |= (REGSET_ELT_TYPE) 1 << (HARD_FRAME_POINTER_REGNUM
+ % REGSET_ELT_BITS);
+ basic_block_new_live_at_end[n_basic_blocks - 1]
+ [HARD_FRAME_POINTER_REGNUM / REGSET_ELT_BITS]
+ |= (REGSET_ELT_TYPE) 1 << (HARD_FRAME_POINTER_REGNUM
+ % REGSET_ELT_BITS);
+#endif
+ }
+
+ /* Mark all global registers as being live at the end of the function
+ since they may be referenced by our caller. */
+
+ if (n_basic_blocks > 0)
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (global_regs[i])
+ {
+ basic_block_live_at_end[n_basic_blocks - 1]
+ [i / REGSET_ELT_BITS]
+ |= (REGSET_ELT_TYPE) 1 << (i % REGSET_ELT_BITS);
+ basic_block_new_live_at_end[n_basic_blocks - 1]
+ [i / REGSET_ELT_BITS]
+ |= (REGSET_ELT_TYPE) 1 << (i % REGSET_ELT_BITS);
+ }
+
+ /* Propagate life info through the basic blocks
+ around the graph of basic blocks.
+
+ This is a relaxation process: each time a new register
+ is live at the end of the basic block, we must scan the block
+ to determine which registers are, as a consequence, live at the beginning
+ of that block. These registers must then be marked live at the ends
+ of all the blocks that can transfer control to that block.
+ The process continues until it reaches a fixed point. */
+
+ first_pass = 1;
+ changed = 1;
+ while (changed)
+ {
+ changed = 0;
+ for (i = n_basic_blocks - 1; i >= 0; i--)
+ {
+ int consider = first_pass;
+ int must_rescan = first_pass;
+ register int j;
+
+ if (!first_pass)
+ {
+ /* Set CONSIDER if this block needs thinking about at all
+ (that is, if the regs live now at the end of it
+ are not the same as were live at the end of it when
+ we last thought about it).
+ Set must_rescan if it needs to be thought about
+ instruction by instruction (that is, if any additional
+ reg that is live at the end now but was not live there before
+ is one of the significant regs of this basic block). */
+
+ for (j = 0; j < regset_size; j++)
+ {
+ register REGSET_ELT_TYPE x
+ = (basic_block_new_live_at_end[i][j]
+ & ~basic_block_live_at_end[i][j]);
+ if (x)
+ consider = 1;
+ if (x & basic_block_significant[i][j])
+ {
+ must_rescan = 1;
+ consider = 1;
+ break;
+ }
+ }
+
+ if (! consider)
+ continue;
+ }
+
+ /* The live_at_start of this block may be changing,
+ so another pass will be required after this one. */
+ changed = 1;
+
+ if (! must_rescan)
+ {
+ /* No complete rescan needed;
+ just record those variables newly known live at end
+ as live at start as well. */
+ for (j = 0; j < regset_size; j++)
+ {
+ register REGSET_ELT_TYPE x
+ = (basic_block_new_live_at_end[i][j]
+ & ~basic_block_live_at_end[i][j]);
+ basic_block_live_at_start[i][j] |= x;
+ basic_block_live_at_end[i][j] |= x;
+ }
+ }
+ else
+ {
+ /* Update the basic_block_live_at_start
+ by propagation backwards through the block. */
+ bcopy ((char *) basic_block_new_live_at_end[i],
+ (char *) basic_block_live_at_end[i], regset_bytes);
+ bcopy ((char *) basic_block_live_at_end[i],
+ (char *) basic_block_live_at_start[i], regset_bytes);
+ propagate_block (basic_block_live_at_start[i],
+ basic_block_head[i], basic_block_end[i], 0,
+ first_pass ? basic_block_significant[i]
+ : (regset) 0,
+ i);
+ }
+
+ {
+ register rtx jump, head;
+ /* Update the basic_block_new_live_at_end's of the block
+ that falls through into this one (if any). */
+ head = basic_block_head[i];
+ jump = PREV_INSN (head);
+ if (basic_block_drops_in[i])
+ {
+ register int from_block = BLOCK_NUM (jump);
+ register int j;
+ for (j = 0; j < regset_size; j++)
+ basic_block_new_live_at_end[from_block][j]
+ |= basic_block_live_at_start[i][j];
+ }
+ /* Update the basic_block_new_live_at_end's of
+ all the blocks that jump to this one. */
+ if (GET_CODE (head) == CODE_LABEL)
+ for (jump = LABEL_REFS (head);
+ jump != head;
+ jump = LABEL_NEXTREF (jump))
+ {
+ register int from_block = BLOCK_NUM (CONTAINING_INSN (jump));
+ register int j;
+ for (j = 0; j < regset_size; j++)
+ basic_block_new_live_at_end[from_block][j]
+ |= basic_block_live_at_start[i][j];
+ }
+ }
+#ifdef USE_C_ALLOCA
+ alloca (0);
+#endif
+ }
+ first_pass = 0;
+ }
+
+ /* The only pseudos that are live at the beginning of the function are
+ those that were not set anywhere in the function. local-alloc doesn't
+ know how to handle these correctly, so mark them as not local to any
+ one basic block. */
+
+ if (n_basic_blocks > 0)
+ for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++)
+ if (basic_block_live_at_start[0][i / REGSET_ELT_BITS]
+ & ((REGSET_ELT_TYPE) 1 << (i % REGSET_ELT_BITS)))
+ reg_basic_block[i] = REG_BLOCK_GLOBAL;
+
+ /* Now the life information is accurate.
+ Make one more pass over each basic block
+ to delete dead stores, create autoincrement addressing
+ and record how many times each register is used, is set, or dies.
+
+ To save time, we operate directly in basic_block_live_at_end[i],
+ thus destroying it (in fact, converting it into a copy of
+ basic_block_live_at_start[i]). This is ok now because
+ basic_block_live_at_end[i] is no longer used past this point. */
+
+ max_scratch = 0;
+
+ for (i = 0; i < n_basic_blocks; i++)
+ {
+ propagate_block (basic_block_live_at_end[i],
+ basic_block_head[i], basic_block_end[i], 1,
+ (regset) 0, i);
+#ifdef USE_C_ALLOCA
+ alloca (0);
+#endif
+ }
+
+#if 0
+ /* Something live during a setjmp should not be put in a register
+ on certain machines which restore regs from stack frames
+ rather than from the jmpbuf.
+ But we don't need to do this for the user's variables, since
+ ANSI says only volatile variables need this. */
+#ifdef LONGJMP_RESTORE_FROM_STACK
+ for (i = FIRST_PSEUDO_REGISTER; i < nregs; i++)
+ if (regs_live_at_setjmp[i / REGSET_ELT_BITS]
+ & ((REGSET_ELT_TYPE) 1 << (i % REGSET_ELT_BITS))
+ && regno_reg_rtx[i] != 0 && ! REG_USERVAR_P (regno_reg_rtx[i]))
+ {
+ reg_live_length[i] = -1;
+ reg_basic_block[i] = -1;
+ }
+#endif
+#endif
+
+ /* We have a problem with any pseudoreg that
+ lives across the setjmp. ANSI says that if a
+ user variable does not change in value
+ between the setjmp and the longjmp, then the longjmp preserves it.
+ This includes longjmp from a place where the pseudo appears dead.
+ (In principle, the value still exists if it is in scope.)
+ If the pseudo goes in a hard reg, some other value may occupy
+ that hard reg where this pseudo is dead, thus clobbering the pseudo.
+ Conclusion: such a pseudo must not go in a hard reg. */
+ for (i = FIRST_PSEUDO_REGISTER; i < nregs; i++)
+ if ((regs_live_at_setjmp[i / REGSET_ELT_BITS]
+ & ((REGSET_ELT_TYPE) 1 << (i % REGSET_ELT_BITS)))
+ && regno_reg_rtx[i] != 0)
+ {
+ reg_live_length[i] = -1;
+ reg_basic_block[i] = -1;
+ }
+
+ obstack_free (&flow_obstack, NULL_PTR);
+}
+
+/* Subroutines of life analysis. */
+
+/* Allocate the permanent data structures that represent the results
+ of life analysis. Not static since used also for stupid life analysis. */
+
+void
+allocate_for_life_analysis ()
+{
+ register int i;
+ register regset tem;
+
+ regset_size = ((max_regno + REGSET_ELT_BITS - 1) / REGSET_ELT_BITS);
+ regset_bytes = regset_size * sizeof (*(regset)0);
+
+ reg_n_refs = (int *) oballoc (max_regno * sizeof (int));
+ bzero ((char *) reg_n_refs, max_regno * sizeof (int));
+
+ reg_n_sets = (short *) oballoc (max_regno * sizeof (short));
+ bzero ((char *) reg_n_sets, max_regno * sizeof (short));
+
+ reg_n_deaths = (short *) oballoc (max_regno * sizeof (short));
+ bzero ((char *) reg_n_deaths, max_regno * sizeof (short));
+
+ reg_live_length = (int *) oballoc (max_regno * sizeof (int));
+ bzero ((char *) reg_live_length, max_regno * sizeof (int));
+
+ reg_n_calls_crossed = (int *) oballoc (max_regno * sizeof (int));
+ bzero ((char *) reg_n_calls_crossed, max_regno * sizeof (int));
+
+ reg_basic_block = (int *) oballoc (max_regno * sizeof (int));
+ for (i = 0; i < max_regno; i++)
+ reg_basic_block[i] = REG_BLOCK_UNKNOWN;
+
+ basic_block_live_at_start
+ = (regset *) oballoc (n_basic_blocks * sizeof (regset));
+ tem = (regset) oballoc (n_basic_blocks * regset_bytes);
+ bzero ((char *) tem, n_basic_blocks * regset_bytes);
+ init_regset_vector (basic_block_live_at_start, tem,
+ n_basic_blocks, regset_bytes);
+
+ regs_live_at_setjmp = (regset) oballoc (regset_bytes);
+ bzero ((char *) regs_live_at_setjmp, regset_bytes);
+}
+
+/* Make each element of VECTOR point at a regset,
+ taking the space for all those regsets from SPACE.
+ SPACE is of type regset, but it is really as long as NELTS regsets.
+ BYTES_PER_ELT is the number of bytes in one regset. */
+
+static void
+init_regset_vector (vector, space, nelts, bytes_per_elt)
+ regset *vector;
+ regset space;
+ int nelts;
+ int bytes_per_elt;
+{
+ register int i;
+ register regset p = space;
+
+ for (i = 0; i < nelts; i++)
+ {
+ vector[i] = p;
+ p += bytes_per_elt / sizeof (*p);
+ }
+}
+
+/* Compute the registers live at the beginning of a basic block
+ from those live at the end.
+
+ When called, OLD contains those live at the end.
+ On return, it contains those live at the beginning.
+ FIRST and LAST are the first and last insns of the basic block.
+
+ FINAL is nonzero if we are doing the final pass which is not
+ for computing the life info (since that has already been done)
+ but for acting on it. On this pass, we delete dead stores,
+ set up the logical links and dead-variables lists of instructions,
+ and merge instructions for autoincrement and autodecrement addresses.
+
+ SIGNIFICANT is nonzero only the first time for each basic block.
+ If it is nonzero, it points to a regset in which we store
+ a 1 for each register that is set within the block.
+
+ BNUM is the number of the basic block. */
+
+static void
+propagate_block (old, first, last, final, significant, bnum)
+ register regset old;
+ rtx first;
+ rtx last;
+ int final;
+ regset significant;
+ int bnum;
+{
+ register rtx insn;
+ rtx prev;
+ regset live;
+ regset dead;
+
+ /* The following variables are used only if FINAL is nonzero. */
+ /* This vector gets one element for each reg that has been live
+ at any point in the basic block that has been scanned so far.
+ SOMETIMES_MAX says how many elements are in use so far.
+ In each element, OFFSET is the byte-number within a regset
+ for the register described by the element, and BIT is a mask
+ for that register's bit within the byte. */
+ register struct sometimes { short offset; short bit; } *regs_sometimes_live;
+ int sometimes_max = 0;
+ /* This regset has 1 for each reg that we have seen live so far.
+ It and REGS_SOMETIMES_LIVE are updated together. */
+ regset maxlive;
+
+ /* The loop depth may change in the middle of a basic block. Since we
+ scan from end to beginning, we start with the depth at the end of the
+ current basic block, and adjust as we pass ends and starts of loops. */
+ loop_depth = basic_block_loop_depth[bnum];
+
+ dead = (regset) alloca (regset_bytes);
+ live = (regset) alloca (regset_bytes);
+
+ cc0_live = 0;
+ last_mem_set = 0;
+
+ /* Include any notes at the end of the block in the scan.
+ This is in case the block ends with a call to setjmp. */
+
+ while (NEXT_INSN (last) != 0 && GET_CODE (NEXT_INSN (last)) == NOTE)
+ {
+ /* Look for loop boundaries, we are going forward here. */
+ last = NEXT_INSN (last);
+ if (NOTE_LINE_NUMBER (last) == NOTE_INSN_LOOP_BEG)
+ loop_depth++;
+ else if (NOTE_LINE_NUMBER (last) == NOTE_INSN_LOOP_END)
+ loop_depth--;
+ }
+
+ if (final)
+ {
+ register int i, offset;
+ REGSET_ELT_TYPE bit;
+
+ num_scratch = 0;
+ maxlive = (regset) alloca (regset_bytes);
+ bcopy ((char *) old, (char *) maxlive, regset_bytes);
+ regs_sometimes_live
+ = (struct sometimes *) alloca (max_regno * sizeof (struct sometimes));
+
+ /* Process the regs live at the end of the block.
+ Enter them in MAXLIVE and REGS_SOMETIMES_LIVE.
+ Also mark them as not local to any one basic block. */
+
+ for (offset = 0, i = 0; offset < regset_size; offset++)
+ for (bit = 1; bit; bit <<= 1, i++)
+ {
+ if (i == max_regno)
+ break;
+ if (old[offset] & bit)
+ {
+ reg_basic_block[i] = REG_BLOCK_GLOBAL;
+ regs_sometimes_live[sometimes_max].offset = offset;
+ regs_sometimes_live[sometimes_max].bit = i % REGSET_ELT_BITS;
+ sometimes_max++;
+ }
+ }
+ }
+
+ /* Scan the block an insn at a time from end to beginning. */
+
+ for (insn = last; ; insn = prev)
+ {
+ prev = PREV_INSN (insn);
+
+ /* Look for loop boundaries, remembering that we are going backwards. */
+ if (GET_CODE (insn) == NOTE
+ && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
+ loop_depth++;
+ else if (GET_CODE (insn) == NOTE
+ && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
+ loop_depth--;
+
+ /* If we have LOOP_DEPTH == 0, there has been a bookkeeping error.
+ Abort now rather than setting register status incorrectly. */
+ if (loop_depth == 0)
+ abort ();
+
+ /* If this is a call to `setjmp' et al,
+ warn if any non-volatile datum is live. */
+
+ if (final && GET_CODE (insn) == NOTE
+ && NOTE_LINE_NUMBER (insn) == NOTE_INSN_SETJMP)
+ {
+ int i;
+ for (i = 0; i < regset_size; i++)
+ regs_live_at_setjmp[i] |= old[i];
+ }
+
+ /* Update the life-status of regs for this insn.
+ First DEAD gets which regs are set in this insn
+ then LIVE gets which regs are used in this insn.
+ Then the regs live before the insn
+ are those live after, with DEAD regs turned off,
+ and then LIVE regs turned on. */
+
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ {
+ register int i;
+ rtx note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
+ int insn_is_dead
+ = (insn_dead_p (PATTERN (insn), old, 0)
+ /* Don't delete something that refers to volatile storage! */
+ && ! INSN_VOLATILE (insn));
+ int libcall_is_dead
+ = (insn_is_dead && note != 0
+ && libcall_dead_p (PATTERN (insn), old, note, insn));
+
+ /* If an instruction consists of just dead store(s) on final pass,
+ "delete" it by turning it into a NOTE of type NOTE_INSN_DELETED.
+ We could really delete it with delete_insn, but that
+ can cause trouble for first or last insn in a basic block. */
+ if (final && insn_is_dead)
+ {
+ PUT_CODE (insn, NOTE);
+ NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (insn) = 0;
+
+ /* CC0 is now known to be dead. Either this insn used it,
+ in which case it doesn't anymore, or clobbered it,
+ so the next insn can't use it. */
+ cc0_live = 0;
+
+ /* If this insn is copying the return value from a library call,
+ delete the entire library call. */
+ if (libcall_is_dead)
+ {
+ rtx first = XEXP (note, 0);
+ rtx p = insn;
+ while (INSN_DELETED_P (first))
+ first = NEXT_INSN (first);
+ while (p != first)
+ {
+ p = PREV_INSN (p);
+ PUT_CODE (p, NOTE);
+ NOTE_LINE_NUMBER (p) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (p) = 0;
+ }
+ }
+ goto flushed;
+ }
+
+ for (i = 0; i < regset_size; i++)
+ {
+ dead[i] = 0; /* Faster than bzero here */
+ live[i] = 0; /* since regset_size is usually small */
+ }
+
+ /* See if this is an increment or decrement that can be
+ merged into a following memory address. */
+#ifdef AUTO_INC_DEC
+ {
+ register rtx x = PATTERN (insn);
+ /* Does this instruction increment or decrement a register? */
+ if (final && GET_CODE (x) == SET
+ && GET_CODE (SET_DEST (x)) == REG
+ && (GET_CODE (SET_SRC (x)) == PLUS
+ || GET_CODE (SET_SRC (x)) == MINUS)
+ && XEXP (SET_SRC (x), 0) == SET_DEST (x)
+ && GET_CODE (XEXP (SET_SRC (x), 1)) == CONST_INT
+ /* Ok, look for a following memory ref we can combine with.
+ If one is found, change the memory ref to a PRE_INC
+ or PRE_DEC, cancel this insn, and return 1.
+ Return 0 if nothing has been done. */
+ && try_pre_increment_1 (insn))
+ goto flushed;
+ }
+#endif /* AUTO_INC_DEC */
+
+ /* If this is not the final pass, and this insn is copying the
+ value of a library call and it's dead, don't scan the
+ insns that perform the library call, so that the call's
+ arguments are not marked live. */
+ if (libcall_is_dead)
+ {
+ /* Mark the dest reg as `significant'. */
+ mark_set_regs (old, dead, PATTERN (insn), NULL_RTX, significant);
+
+ insn = XEXP (note, 0);
+ prev = PREV_INSN (insn);
+ }
+ else if (GET_CODE (PATTERN (insn)) == SET
+ && SET_DEST (PATTERN (insn)) == stack_pointer_rtx
+ && GET_CODE (SET_SRC (PATTERN (insn))) == PLUS
+ && XEXP (SET_SRC (PATTERN (insn)), 0) == stack_pointer_rtx
+ && GET_CODE (XEXP (SET_SRC (PATTERN (insn)), 1)) == CONST_INT)
+ /* We have an insn to pop a constant amount off the stack.
+ (Such insns use PLUS regardless of the direction of the stack,
+ and any insn to adjust the stack by a constant is always a pop.)
+ These insns, if not dead stores, have no effect on life. */
+ ;
+ else
+ {
+ /* LIVE gets the regs used in INSN;
+ DEAD gets those set by it. Dead insns don't make anything
+ live. */
+
+ mark_set_regs (old, dead, PATTERN (insn),
+ final ? insn : NULL_RTX, significant);
+
+ /* If an insn doesn't use CC0, it becomes dead since we
+ assume that every insn clobbers it. So show it dead here;
+ mark_used_regs will set it live if it is referenced. */
+ cc0_live = 0;
+
+ if (! insn_is_dead)
+ mark_used_regs (old, live, PATTERN (insn), final, insn);
+
+ /* Sometimes we may have inserted something before INSN (such as
+ a move) when we make an auto-inc. So ensure we will scan
+ those insns. */
+#ifdef AUTO_INC_DEC
+ prev = PREV_INSN (insn);
+#endif
+
+ if (! insn_is_dead && GET_CODE (insn) == CALL_INSN)
+ {
+ register int i;
+
+ rtx note;
+
+ for (note = CALL_INSN_FUNCTION_USAGE (insn);
+ note;
+ note = XEXP (note, 1))
+ if (GET_CODE (XEXP (note, 0)) == USE)
+ mark_used_regs (old, live, SET_DEST (XEXP (note, 0)),
+ final, insn);
+
+ /* Each call clobbers all call-clobbered regs that are not
+ global. Note that the function-value reg is a
+ call-clobbered reg, and mark_set_regs has already had
+ a chance to handle it. */
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (call_used_regs[i] && ! global_regs[i])
+ dead[i / REGSET_ELT_BITS]
+ |= ((REGSET_ELT_TYPE) 1 << (i % REGSET_ELT_BITS));
+
+ /* The stack ptr is used (honorarily) by a CALL insn. */
+ live[STACK_POINTER_REGNUM / REGSET_ELT_BITS]
+ |= ((REGSET_ELT_TYPE) 1
+ << (STACK_POINTER_REGNUM % REGSET_ELT_BITS));
+
+ /* Calls may also reference any of the global registers,
+ so they are made live. */
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (global_regs[i])
+ live[i / REGSET_ELT_BITS]
+ |= ((REGSET_ELT_TYPE) 1 << (i % REGSET_ELT_BITS));
+
+ /* Calls also clobber memory. */
+ last_mem_set = 0;
+ }
+
+ /* Update OLD for the registers used or set. */
+ for (i = 0; i < regset_size; i++)
+ {
+ old[i] &= ~dead[i];
+ old[i] |= live[i];
+ }
+
+ if (GET_CODE (insn) == CALL_INSN && final)
+ {
+ /* Any regs live at the time of a call instruction
+ must not go in a register clobbered by calls.
+ Find all regs now live and record this for them. */
+
+ register struct sometimes *p = regs_sometimes_live;
+
+ for (i = 0; i < sometimes_max; i++, p++)
+ if (old[p->offset] & ((REGSET_ELT_TYPE) 1 << p->bit))
+ reg_n_calls_crossed[p->offset * REGSET_ELT_BITS + p->bit]+= 1;
+ }
+ }
+
+ /* On final pass, add any additional sometimes-live regs
+ into MAXLIVE and REGS_SOMETIMES_LIVE.
+ Also update counts of how many insns each reg is live at. */
+
+ if (final)
+ {
+ for (i = 0; i < regset_size; i++)
+ {
+ register REGSET_ELT_TYPE diff = live[i] & ~maxlive[i];
+
+ if (diff)
+ {
+ register int regno;
+ maxlive[i] |= diff;
+ for (regno = 0; diff && regno < REGSET_ELT_BITS; regno++)
+ if (diff & ((REGSET_ELT_TYPE) 1 << regno))
+ {
+ regs_sometimes_live[sometimes_max].offset = i;
+ regs_sometimes_live[sometimes_max].bit = regno;
+ diff &= ~ ((REGSET_ELT_TYPE) 1 << regno);
+ sometimes_max++;
+ }
+ }
+ }
+
+ {
+ register struct sometimes *p = regs_sometimes_live;
+ for (i = 0; i < sometimes_max; i++, p++)
+ {
+ if (old[p->offset] & ((REGSET_ELT_TYPE) 1 << p->bit))
+ reg_live_length[p->offset * REGSET_ELT_BITS + p->bit]++;
+ }
+ }
+ }
+ }
+ flushed: ;
+ if (insn == first)
+ break;
+ }
+
+ if (num_scratch > max_scratch)
+ max_scratch = num_scratch;
+}
+
+/* Return 1 if X (the body of an insn, or part of it) is just dead stores
+ (SET expressions whose destinations are registers dead after the insn).
+ NEEDED is the regset that says which regs are alive after the insn.
+
+ Unless CALL_OK is non-zero, an insn is needed if it contains a CALL. */
+
+static int
+insn_dead_p (x, needed, call_ok)
+ rtx x;
+ regset needed;
+ int call_ok;
+{
+ register RTX_CODE code = GET_CODE (x);
+ /* If setting something that's a reg or part of one,
+ see if that register's altered value will be live. */
+
+ if (code == SET)
+ {
+ register rtx r = SET_DEST (x);
+ /* A SET that is a subroutine call cannot be dead. */
+ if (! call_ok && GET_CODE (SET_SRC (x)) == CALL)
+ return 0;
+
+#ifdef HAVE_cc0
+ if (GET_CODE (r) == CC0)
+ return ! cc0_live;
+#endif
+
+ if (GET_CODE (r) == MEM && last_mem_set && ! MEM_VOLATILE_P (r)
+ && rtx_equal_p (r, last_mem_set))
+ return 1;
+
+ while (GET_CODE (r) == SUBREG
+ || GET_CODE (r) == STRICT_LOW_PART
+ || GET_CODE (r) == ZERO_EXTRACT
+ || GET_CODE (r) == SIGN_EXTRACT)
+ r = SUBREG_REG (r);
+
+ if (GET_CODE (r) == REG)
+ {
+ register int regno = REGNO (r);
+ register int offset = regno / REGSET_ELT_BITS;
+ register REGSET_ELT_TYPE bit
+ = (REGSET_ELT_TYPE) 1 << (regno % REGSET_ELT_BITS);
+
+ /* Don't delete insns to set global regs. */
+ if ((regno < FIRST_PSEUDO_REGISTER && global_regs[regno])
+ /* Make sure insns to set frame pointer aren't deleted. */
+ || regno == FRAME_POINTER_REGNUM
+#if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM
+ || regno == HARD_FRAME_POINTER_REGNUM
+#endif
+#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
+ /* Make sure insns to set arg pointer are never deleted
+ (if the arg pointer isn't fixed, there will be a USE for
+ it, so we can treat it normally). */
+ || (regno == ARG_POINTER_REGNUM && fixed_regs[regno])
+#endif
+ || (needed[offset] & bit) != 0)
+ return 0;
+
+ /* If this is a hard register, verify that subsequent words are
+ not needed. */
+ if (regno < FIRST_PSEUDO_REGISTER)
+ {
+ int n = HARD_REGNO_NREGS (regno, GET_MODE (r));
+
+ while (--n > 0)
+ if ((needed[(regno + n) / REGSET_ELT_BITS]
+ & ((REGSET_ELT_TYPE) 1
+ << ((regno + n) % REGSET_ELT_BITS))) != 0)
+ return 0;
+ }
+
+ return 1;
+ }
+ }
+ /* If performing several activities,
+ insn is dead if each activity is individually dead.
+ Also, CLOBBERs and USEs can be ignored; a CLOBBER or USE
+ that's inside a PARALLEL doesn't make the insn worth keeping. */
+ else if (code == PARALLEL)
+ {
+ register int i = XVECLEN (x, 0);
+ for (i--; i >= 0; i--)
+ {
+ rtx elt = XVECEXP (x, 0, i);
+ if (!insn_dead_p (elt, needed, call_ok)
+ && GET_CODE (elt) != CLOBBER
+ && GET_CODE (elt) != USE)
+ return 0;
+ }
+ return 1;
+ }
+ /* We do not check CLOBBER or USE here.
+ An insn consisting of just a CLOBBER or just a USE
+ should not be deleted. */
+ return 0;
+}
+
+/* If X is the pattern of the last insn in a libcall, and assuming X is dead,
+ return 1 if the entire library call is dead.
+ This is true if X copies a register (hard or pseudo)
+ and if the hard return reg of the call insn is dead.
+ (The caller should have tested the destination of X already for death.)
+
+ If this insn doesn't just copy a register, then we don't
+ have an ordinary libcall. In that case, cse could not have
+ managed to substitute the source for the dest later on,
+ so we can assume the libcall is dead.
+
+ NEEDED is the bit vector of pseudoregs live before this insn.
+ NOTE is the REG_RETVAL note of the insn. INSN is the insn itself. */
+
+static int
+libcall_dead_p (x, needed, note, insn)
+ rtx x;
+ regset needed;
+ rtx note;
+ rtx insn;
+{
+ register RTX_CODE code = GET_CODE (x);
+
+ if (code == SET)
+ {
+ register rtx r = SET_SRC (x);
+ if (GET_CODE (r) == REG)
+ {
+ rtx call = XEXP (note, 0);
+ register int i;
+
+ /* Find the call insn. */
+ while (call != insn && GET_CODE (call) != CALL_INSN)
+ call = NEXT_INSN (call);
+
+ /* If there is none, do nothing special,
+ since ordinary death handling can understand these insns. */
+ if (call == insn)
+ return 0;
+
+ /* See if the hard reg holding the value is dead.
+ If this is a PARALLEL, find the call within it. */
+ call = PATTERN (call);
+ if (GET_CODE (call) == PARALLEL)
+ {
+ for (i = XVECLEN (call, 0) - 1; i >= 0; i--)
+ if (GET_CODE (XVECEXP (call, 0, i)) == SET
+ && GET_CODE (SET_SRC (XVECEXP (call, 0, i))) == CALL)
+ break;
+
+ /* This may be a library call that is returning a value
+ via invisible pointer. Do nothing special, since
+ ordinary death handling can understand these insns. */
+ if (i < 0)
+ return 0;
+
+ call = XVECEXP (call, 0, i);
+ }
+
+ return insn_dead_p (call, needed, 1);
+ }
+ }
+ return 1;
+}
+
+/* Return 1 if register REGNO was used before it was set.
+ In other words, if it is live at function entry.
+ Don't count global regster variables, though. */
+
+int
+regno_uninitialized (regno)
+ int regno;
+{
+ if (n_basic_blocks == 0
+ || (regno < FIRST_PSEUDO_REGISTER && global_regs[regno]))
+ return 0;
+
+ return (basic_block_live_at_start[0][regno / REGSET_ELT_BITS]
+ & ((REGSET_ELT_TYPE) 1 << (regno % REGSET_ELT_BITS)));
+}
+
+/* 1 if register REGNO was alive at a place where `setjmp' was called
+ and was set more than once or is an argument.
+ Such regs may be clobbered by `longjmp'. */
+
+int
+regno_clobbered_at_setjmp (regno)
+ int regno;
+{
+ if (n_basic_blocks == 0)
+ return 0;
+
+ return ((reg_n_sets[regno] > 1
+ || (basic_block_live_at_start[0][regno / REGSET_ELT_BITS]
+ & ((REGSET_ELT_TYPE) 1 << (regno % REGSET_ELT_BITS))))
+ && (regs_live_at_setjmp[regno / REGSET_ELT_BITS]
+ & ((REGSET_ELT_TYPE) 1 << (regno % REGSET_ELT_BITS))));
+}
+
+/* Process the registers that are set within X.
+ Their bits are set to 1 in the regset DEAD,
+ because they are dead prior to this insn.
+
+ If INSN is nonzero, it is the insn being processed
+ and the fact that it is nonzero implies this is the FINAL pass
+ in propagate_block. In this case, various info about register
+ usage is stored, LOG_LINKS fields of insns are set up. */
+
+static void
+mark_set_regs (needed, dead, x, insn, significant)
+ regset needed;
+ regset dead;
+ rtx x;
+ rtx insn;
+ regset significant;
+{
+ register RTX_CODE code = GET_CODE (x);
+
+ if (code == SET || code == CLOBBER)
+ mark_set_1 (needed, dead, x, insn, significant);
+ else if (code == PARALLEL)
+ {
+ register int i;
+ for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
+ {
+ code = GET_CODE (XVECEXP (x, 0, i));
+ if (code == SET || code == CLOBBER)
+ mark_set_1 (needed, dead, XVECEXP (x, 0, i), insn, significant);
+ }
+ }
+}
+
+/* Process a single SET rtx, X. */
+
+static void
+mark_set_1 (needed, dead, x, insn, significant)
+ regset needed;
+ regset dead;
+ rtx x;
+ rtx insn;
+ regset significant;
+{
+ register int regno;
+ register rtx reg = SET_DEST (x);
+
+ /* Modifying just one hardware register of a multi-reg value
+ or just a byte field of a register
+ does not mean the value from before this insn is now dead.
+ But it does mean liveness of that register at the end of the block
+ is significant.
+
+ Within mark_set_1, however, we treat it as if the register is
+ indeed modified. mark_used_regs will, however, also treat this
+ register as being used. Thus, we treat these insns as setting a
+ new value for the register as a function of its old value. This
+ cases LOG_LINKS to be made appropriately and this will help combine. */
+
+ while (GET_CODE (reg) == SUBREG || GET_CODE (reg) == ZERO_EXTRACT
+ || GET_CODE (reg) == SIGN_EXTRACT
+ || GET_CODE (reg) == STRICT_LOW_PART)
+ reg = XEXP (reg, 0);
+
+ /* If we are writing into memory or into a register mentioned in the
+ address of the last thing stored into memory, show we don't know
+ what the last store was. If we are writing memory, save the address
+ unless it is volatile. */
+ if (GET_CODE (reg) == MEM
+ || (GET_CODE (reg) == REG
+ && last_mem_set != 0 && reg_overlap_mentioned_p (reg, last_mem_set)))
+ last_mem_set = 0;
+
+ if (GET_CODE (reg) == MEM && ! side_effects_p (reg)
+ /* There are no REG_INC notes for SP, so we can't assume we'll see
+ everything that invalidates it. To be safe, don't eliminate any
+ stores though SP; none of them should be redundant anyway. */
+ && ! reg_mentioned_p (stack_pointer_rtx, reg))
+ last_mem_set = reg;
+
+ if (GET_CODE (reg) == REG
+ && (regno = REGNO (reg), regno != FRAME_POINTER_REGNUM)
+#if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM
+ && regno != HARD_FRAME_POINTER_REGNUM
+#endif
+#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
+ && ! (regno == ARG_POINTER_REGNUM && fixed_regs[regno])
+#endif
+ && ! (regno < FIRST_PSEUDO_REGISTER && global_regs[regno]))
+ /* && regno != STACK_POINTER_REGNUM) -- let's try without this. */
+ {
+ register int offset = regno / REGSET_ELT_BITS;
+ register REGSET_ELT_TYPE bit
+ = (REGSET_ELT_TYPE) 1 << (regno % REGSET_ELT_BITS);
+ REGSET_ELT_TYPE all_needed = (needed[offset] & bit);
+ REGSET_ELT_TYPE some_needed = (needed[offset] & bit);
+
+ /* Mark it as a significant register for this basic block. */
+ if (significant)
+ significant[offset] |= bit;
+
+ /* Mark it as as dead before this insn. */
+ dead[offset] |= bit;
+
+ /* A hard reg in a wide mode may really be multiple registers.
+ If so, mark all of them just like the first. */
+ if (regno < FIRST_PSEUDO_REGISTER)
+ {
+ int n;
+
+ /* Nothing below is needed for the stack pointer; get out asap.
+ Eg, log links aren't needed, since combine won't use them. */
+ if (regno == STACK_POINTER_REGNUM)
+ return;
+
+ n = HARD_REGNO_NREGS (regno, GET_MODE (reg));
+ while (--n > 0)
+ {
+ if (significant)
+ significant[(regno + n) / REGSET_ELT_BITS]
+ |= (REGSET_ELT_TYPE) 1 << ((regno + n) % REGSET_ELT_BITS);
+ dead[(regno + n) / REGSET_ELT_BITS]
+ |= (REGSET_ELT_TYPE) 1 << ((regno + n) % REGSET_ELT_BITS);
+ some_needed
+ |= (needed[(regno + n) / REGSET_ELT_BITS]
+ & (REGSET_ELT_TYPE) 1 << ((regno + n) % REGSET_ELT_BITS));
+ all_needed
+ &= (needed[(regno + n) / REGSET_ELT_BITS]
+ & (REGSET_ELT_TYPE) 1 << ((regno + n) % REGSET_ELT_BITS));
+ }
+ }
+ /* Additional data to record if this is the final pass. */
+ if (insn)
+ {
+ register rtx y = reg_next_use[regno];
+ register int blocknum = BLOCK_NUM (insn);
+
+ /* The next use is no longer "next", since a store intervenes. */
+ reg_next_use[regno] = 0;
+
+ /* If this is a hard reg, record this function uses the reg. */
+
+ if (regno < FIRST_PSEUDO_REGISTER)
+ {
+ register int i;
+ int endregno = regno + HARD_REGNO_NREGS (regno, GET_MODE (reg));
+
+ for (i = regno; i < endregno; i++)
+ {
+ regs_ever_live[i] = 1;
+ reg_n_sets[i]++;
+ }
+ }
+ else
+ {
+ /* Keep track of which basic blocks each reg appears in. */
+
+ if (reg_basic_block[regno] == REG_BLOCK_UNKNOWN)
+ reg_basic_block[regno] = blocknum;
+ else if (reg_basic_block[regno] != blocknum)
+ reg_basic_block[regno] = REG_BLOCK_GLOBAL;
+
+ /* Count (weighted) references, stores, etc. This counts a
+ register twice if it is modified, but that is correct. */
+ reg_n_sets[regno]++;
+
+ reg_n_refs[regno] += loop_depth;
+
+ /* The insns where a reg is live are normally counted
+ elsewhere, but we want the count to include the insn
+ where the reg is set, and the normal counting mechanism
+ would not count it. */
+ reg_live_length[regno]++;
+ }
+
+ if (all_needed)
+ {
+ /* Make a logical link from the next following insn
+ that uses this register, back to this insn.
+ The following insns have already been processed.
+
+ We don't build a LOG_LINK for hard registers containing
+ in ASM_OPERANDs. If these registers get replaced,
+ we might wind up changing the semantics of the insn,
+ even if reload can make what appear to be valid assignments
+ later. */
+ if (y && (BLOCK_NUM (y) == blocknum)
+ && (regno >= FIRST_PSEUDO_REGISTER
+ || asm_noperands (PATTERN (y)) < 0))
+ LOG_LINKS (y)
+ = gen_rtx (INSN_LIST, VOIDmode, insn, LOG_LINKS (y));
+ }
+ else if (! some_needed)
+ {
+ /* Note that dead stores have already been deleted when possible
+ If we get here, we have found a dead store that cannot
+ be eliminated (because the same insn does something useful).
+ Indicate this by marking the reg being set as dying here. */
+ REG_NOTES (insn)
+ = gen_rtx (EXPR_LIST, REG_UNUSED, reg, REG_NOTES (insn));
+ reg_n_deaths[REGNO (reg)]++;
+ }
+ else
+ {
+ /* This is a case where we have a multi-word hard register
+ and some, but not all, of the words of the register are
+ needed in subsequent insns. Write REG_UNUSED notes
+ for those parts that were not needed. This case should
+ be rare. */
+
+ int i;
+
+ for (i = HARD_REGNO_NREGS (regno, GET_MODE (reg)) - 1;
+ i >= 0; i--)
+ if ((needed[(regno + i) / REGSET_ELT_BITS]
+ & ((REGSET_ELT_TYPE) 1
+ << ((regno + i) % REGSET_ELT_BITS))) == 0)
+ REG_NOTES (insn)
+ = gen_rtx (EXPR_LIST, REG_UNUSED,
+ gen_rtx (REG, reg_raw_mode[regno + i],
+ regno + i),
+ REG_NOTES (insn));
+ }
+ }
+ }
+ else if (GET_CODE (reg) == REG)
+ reg_next_use[regno] = 0;
+
+ /* If this is the last pass and this is a SCRATCH, show it will be dying
+ here and count it. */
+ else if (GET_CODE (reg) == SCRATCH && insn != 0)
+ {
+ REG_NOTES (insn)
+ = gen_rtx (EXPR_LIST, REG_UNUSED, reg, REG_NOTES (insn));
+ num_scratch++;
+ }
+}
+
+#ifdef AUTO_INC_DEC
+
+/* X is a MEM found in INSN. See if we can convert it into an auto-increment
+ reference. */
+
+static void
+find_auto_inc (needed, x, insn)
+ regset needed;
+ rtx x;
+ rtx insn;
+{
+ rtx addr = XEXP (x, 0);
+ HOST_WIDE_INT offset = 0;
+ rtx set;
+
+ /* Here we detect use of an index register which might be good for
+ postincrement, postdecrement, preincrement, or predecrement. */
+
+ if (GET_CODE (addr) == PLUS && GET_CODE (XEXP (addr, 1)) == CONST_INT)
+ offset = INTVAL (XEXP (addr, 1)), addr = XEXP (addr, 0);
+
+ if (GET_CODE (addr) == REG)
+ {
+ register rtx y;
+ register int size = GET_MODE_SIZE (GET_MODE (x));
+ rtx use;
+ rtx incr;
+ int regno = REGNO (addr);
+
+ /* Is the next use an increment that might make auto-increment? */
+ if ((incr = reg_next_use[regno]) != 0
+ && (set = single_set (incr)) != 0
+ && GET_CODE (set) == SET
+ && BLOCK_NUM (incr) == BLOCK_NUM (insn)
+ /* Can't add side effects to jumps; if reg is spilled and
+ reloaded, there's no way to store back the altered value. */
+ && GET_CODE (insn) != JUMP_INSN
+ && (y = SET_SRC (set), GET_CODE (y) == PLUS)
+ && XEXP (y, 0) == addr
+ && GET_CODE (XEXP (y, 1)) == CONST_INT
+ && (0
+#ifdef HAVE_POST_INCREMENT
+ || (INTVAL (XEXP (y, 1)) == size && offset == 0)
+#endif
+#ifdef HAVE_POST_DECREMENT
+ || (INTVAL (XEXP (y, 1)) == - size && offset == 0)
+#endif
+#ifdef HAVE_PRE_INCREMENT
+ || (INTVAL (XEXP (y, 1)) == size && offset == size)
+#endif
+#ifdef HAVE_PRE_DECREMENT
+ || (INTVAL (XEXP (y, 1)) == - size && offset == - size)
+#endif
+ )
+ /* Make sure this reg appears only once in this insn. */
+ && (use = find_use_as_address (PATTERN (insn), addr, offset),
+ use != 0 && use != (rtx) 1))
+ {
+ int win = 0;
+ rtx q = SET_DEST (set);
+
+ if (dead_or_set_p (incr, addr))
+ win = 1;
+ else if (GET_CODE (q) == REG
+ /* PREV_INSN used here to check the semi-open interval
+ [insn,incr). */
+ && ! reg_used_between_p (q, PREV_INSN (insn), incr))
+ {
+ /* We have *p followed sometime later by q = p+size.
+ Both p and q must be live afterward,
+ and q is not used between INSN and it's assignment.
+ Change it to q = p, ...*q..., q = q+size.
+ Then fall into the usual case. */
+ rtx insns, temp;
+
+ start_sequence ();
+ emit_move_insn (q, addr);
+ insns = get_insns ();
+ end_sequence ();
+
+ /* If anything in INSNS have UID's that don't fit within the
+ extra space we allocate earlier, we can't make this auto-inc.
+ This should never happen. */
+ for (temp = insns; temp; temp = NEXT_INSN (temp))
+ {
+ if (INSN_UID (temp) > max_uid_for_flow)
+ return;
+ BLOCK_NUM (temp) = BLOCK_NUM (insn);
+ }
+
+ emit_insns_before (insns, insn);
+
+ if (basic_block_head[BLOCK_NUM (insn)] == insn)
+ basic_block_head[BLOCK_NUM (insn)] = insns;
+
+ XEXP (x, 0) = q;
+ XEXP (y, 0) = q;
+
+ /* INCR will become a NOTE and INSN won't contain a
+ use of ADDR. If a use of ADDR was just placed in
+ the insn before INSN, make that the next use.
+ Otherwise, invalidate it. */
+ if (GET_CODE (PREV_INSN (insn)) == INSN
+ && GET_CODE (PATTERN (PREV_INSN (insn))) == SET
+ && SET_SRC (PATTERN (PREV_INSN (insn))) == addr)
+ reg_next_use[regno] = PREV_INSN (insn);
+ else
+ reg_next_use[regno] = 0;
+
+ addr = q;
+ regno = REGNO (q);
+ win = 1;
+
+ /* REGNO is now used in INCR which is below INSN, but
+ it previously wasn't live here. If we don't mark
+ it as needed, we'll put a REG_DEAD note for it
+ on this insn, which is incorrect. */
+ needed[regno / REGSET_ELT_BITS]
+ |= (REGSET_ELT_TYPE) 1 << (regno % REGSET_ELT_BITS);
+
+ /* If there are any calls between INSN and INCR, show
+ that REGNO now crosses them. */
+ for (temp = insn; temp != incr; temp = NEXT_INSN (temp))
+ if (GET_CODE (temp) == CALL_INSN)
+ reg_n_calls_crossed[regno]++;
+ }
+
+ if (win
+ /* If we have found a suitable auto-increment, do
+ POST_INC around the register here, and patch out the
+ increment instruction that follows. */
+ && validate_change (insn, &XEXP (x, 0),
+ gen_rtx ((INTVAL (XEXP (y, 1)) == size
+ ? (offset ? PRE_INC : POST_INC)
+ : (offset ? PRE_DEC : POST_DEC)),
+ Pmode, addr), 0))
+ {
+ /* Record that this insn has an implicit side effect. */
+ REG_NOTES (insn)
+ = gen_rtx (EXPR_LIST, REG_INC, addr, REG_NOTES (insn));
+
+ /* Modify the old increment-insn to simply copy
+ the already-incremented value of our register. */
+ SET_SRC (set) = addr;
+ /* Indicate insn must be re-recognized. */
+ INSN_CODE (incr) = -1;
+
+ /* If that makes it a no-op (copying the register into itself)
+ then delete it so it won't appear to be a "use" and a "set"
+ of this register. */
+ if (SET_DEST (set) == addr)
+ {
+ PUT_CODE (incr, NOTE);
+ NOTE_LINE_NUMBER (incr) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (incr) = 0;
+ }
+
+ if (regno >= FIRST_PSEUDO_REGISTER)
+ {
+ /* Count an extra reference to the reg. When a reg is
+ incremented, spilling it is worse, so we want to make
+ that less likely. */
+ reg_n_refs[regno] += loop_depth;
+ /* Count the increment as a setting of the register,
+ even though it isn't a SET in rtl. */
+ reg_n_sets[regno]++;
+ }
+ }
+ }
+ }
+}
+#endif /* AUTO_INC_DEC */
+
+/* Scan expression X and store a 1-bit in LIVE for each reg it uses.
+ This is done assuming the registers needed from X
+ are those that have 1-bits in NEEDED.
+
+ On the final pass, FINAL is 1. This means try for autoincrement
+ and count the uses and deaths of each pseudo-reg.
+
+ INSN is the containing instruction. If INSN is dead, this function is not
+ called. */
+
+static void
+mark_used_regs (needed, live, x, final, insn)
+ regset needed;
+ regset live;
+ rtx x;
+ int final;
+ rtx insn;
+{
+ register RTX_CODE code;
+ register int regno;
+ int i;
+
+ retry:
+ code = GET_CODE (x);
+ switch (code)
+ {
+ case LABEL_REF:
+ case SYMBOL_REF:
+ case CONST_INT:
+ case CONST:
+ case CONST_DOUBLE:
+ case PC:
+ case ADDR_VEC:
+ case ADDR_DIFF_VEC:
+ case ASM_INPUT:
+ return;
+
+#ifdef HAVE_cc0
+ case CC0:
+ cc0_live = 1;
+ return;
+#endif
+
+ case CLOBBER:
+ /* If we are clobbering a MEM, mark any registers inside the address
+ as being used. */
+ if (GET_CODE (XEXP (x, 0)) == MEM)
+ mark_used_regs (needed, live, XEXP (XEXP (x, 0), 0), final, insn);
+ return;
+
+ case MEM:
+ /* Invalidate the data for the last MEM stored. We could do this only
+ if the addresses conflict, but this doesn't seem worthwhile. */
+ last_mem_set = 0;
+
+#ifdef AUTO_INC_DEC
+ if (final)
+ find_auto_inc (needed, x, insn);
+#endif
+ break;
+
+ case REG:
+ /* See a register other than being set
+ => mark it as needed. */
+
+ regno = REGNO (x);
+ {
+ register int offset = regno / REGSET_ELT_BITS;
+ register REGSET_ELT_TYPE bit
+ = (REGSET_ELT_TYPE) 1 << (regno % REGSET_ELT_BITS);
+ REGSET_ELT_TYPE all_needed = needed[offset] & bit;
+ REGSET_ELT_TYPE some_needed = needed[offset] & bit;
+
+ live[offset] |= bit;
+ /* A hard reg in a wide mode may really be multiple registers.
+ If so, mark all of them just like the first. */
+ if (regno < FIRST_PSEUDO_REGISTER)
+ {
+ int n;
+
+ /* For stack ptr or fixed arg pointer,
+ nothing below can be necessary, so waste no more time. */
+ if (regno == STACK_POINTER_REGNUM
+#if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM
+ || regno == HARD_FRAME_POINTER_REGNUM
+#endif
+#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
+ || (regno == ARG_POINTER_REGNUM && fixed_regs[regno])
+#endif
+ || regno == FRAME_POINTER_REGNUM)
+ {
+ /* If this is a register we are going to try to eliminate,
+ don't mark it live here. If we are successful in
+ eliminating it, it need not be live unless it is used for
+ pseudos, in which case it will have been set live when
+ it was allocated to the pseudos. If the register will not
+ be eliminated, reload will set it live at that point. */
+
+ if (! TEST_HARD_REG_BIT (elim_reg_set, regno))
+ regs_ever_live[regno] = 1;
+ return;
+ }
+ /* No death notes for global register variables;
+ their values are live after this function exits. */
+ if (global_regs[regno])
+ {
+ if (final)
+ reg_next_use[regno] = insn;
+ return;
+ }
+
+ n = HARD_REGNO_NREGS (regno, GET_MODE (x));
+ while (--n > 0)
+ {
+ live[(regno + n) / REGSET_ELT_BITS]
+ |= (REGSET_ELT_TYPE) 1 << ((regno + n) % REGSET_ELT_BITS);
+ some_needed
+ |= (needed[(regno + n) / REGSET_ELT_BITS]
+ & (REGSET_ELT_TYPE) 1 << ((regno + n) % REGSET_ELT_BITS));
+ all_needed
+ &= (needed[(regno + n) / REGSET_ELT_BITS]
+ & (REGSET_ELT_TYPE) 1 << ((regno + n) % REGSET_ELT_BITS));
+ }
+ }
+ if (final)
+ {
+ /* Record where each reg is used, so when the reg
+ is set we know the next insn that uses it. */
+
+ reg_next_use[regno] = insn;
+
+ if (regno < FIRST_PSEUDO_REGISTER)
+ {
+ /* If a hard reg is being used,
+ record that this function does use it. */
+
+ i = HARD_REGNO_NREGS (regno, GET_MODE (x));
+ if (i == 0)
+ i = 1;
+ do
+ regs_ever_live[regno + --i] = 1;
+ while (i > 0);
+ }
+ else
+ {
+ /* Keep track of which basic block each reg appears in. */
+
+ register int blocknum = BLOCK_NUM (insn);
+
+ if (reg_basic_block[regno] == REG_BLOCK_UNKNOWN)
+ reg_basic_block[regno] = blocknum;
+ else if (reg_basic_block[regno] != blocknum)
+ reg_basic_block[regno] = REG_BLOCK_GLOBAL;
+
+ /* Count (weighted) number of uses of each reg. */
+
+ reg_n_refs[regno] += loop_depth;
+ }
+
+ /* Record and count the insns in which a reg dies.
+ If it is used in this insn and was dead below the insn
+ then it dies in this insn. If it was set in this insn,
+ we do not make a REG_DEAD note; likewise if we already
+ made such a note. */
+
+ if (! all_needed
+ && ! dead_or_set_p (insn, x)
+#if 0
+ && (regno >= FIRST_PSEUDO_REGISTER || ! fixed_regs[regno])
+#endif
+ )
+ {
+ /* If none of the words in X is needed, make a REG_DEAD
+ note. Otherwise, we must make partial REG_DEAD notes. */
+ if (! some_needed)
+ {
+ REG_NOTES (insn)
+ = gen_rtx (EXPR_LIST, REG_DEAD, x, REG_NOTES (insn));
+ reg_n_deaths[regno]++;
+ }
+ else
+ {
+ int i;
+
+ /* Don't make a REG_DEAD note for a part of a register
+ that is set in the insn. */
+
+ for (i = HARD_REGNO_NREGS (regno, GET_MODE (x)) - 1;
+ i >= 0; i--)
+ if ((needed[(regno + i) / REGSET_ELT_BITS]
+ & ((REGSET_ELT_TYPE) 1
+ << ((regno + i) % REGSET_ELT_BITS))) == 0
+ && ! dead_or_set_regno_p (insn, regno + i))
+ REG_NOTES (insn)
+ = gen_rtx (EXPR_LIST, REG_DEAD,
+ gen_rtx (REG, reg_raw_mode[regno + i],
+ regno + i),
+ REG_NOTES (insn));
+ }
+ }
+ }
+ }
+ return;
+
+ case SET:
+ {
+ register rtx testreg = SET_DEST (x);
+ int mark_dest = 0;
+
+ /* If storing into MEM, don't show it as being used. But do
+ show the address as being used. */
+ if (GET_CODE (testreg) == MEM)
+ {
+#ifdef AUTO_INC_DEC
+ if (final)
+ find_auto_inc (needed, testreg, insn);
+#endif
+ mark_used_regs (needed, live, XEXP (testreg, 0), final, insn);
+ mark_used_regs (needed, live, SET_SRC (x), final, insn);
+ return;
+ }
+
+ /* Storing in STRICT_LOW_PART is like storing in a reg
+ in that this SET might be dead, so ignore it in TESTREG.
+ but in some other ways it is like using the reg.
+
+ Storing in a SUBREG or a bit field is like storing the entire
+ register in that if the register's value is not used
+ then this SET is not needed. */
+ while (GET_CODE (testreg) == STRICT_LOW_PART
+ || GET_CODE (testreg) == ZERO_EXTRACT
+ || GET_CODE (testreg) == SIGN_EXTRACT
+ || GET_CODE (testreg) == SUBREG)
+ {
+ /* Modifying a single register in an alternate mode
+ does not use any of the old value. But these other
+ ways of storing in a register do use the old value. */
+ if (GET_CODE (testreg) == SUBREG
+ && !(REG_SIZE (SUBREG_REG (testreg)) > REG_SIZE (testreg)))
+ ;
+ else
+ mark_dest = 1;
+
+ testreg = XEXP (testreg, 0);
+ }
+
+ /* If this is a store into a register,
+ recursively scan the value being stored. */
+
+ if (GET_CODE (testreg) == REG
+ && (regno = REGNO (testreg), regno != FRAME_POINTER_REGNUM)
+#if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM
+ && regno != HARD_FRAME_POINTER_REGNUM
+#endif
+#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
+ && ! (regno == ARG_POINTER_REGNUM && fixed_regs[regno])
+#endif
+ )
+ /* We used to exclude global_regs here, but that seems wrong.
+ Storing in them is like storing in mem. */
+ {
+ mark_used_regs (needed, live, SET_SRC (x), final, insn);
+ if (mark_dest)
+ mark_used_regs (needed, live, SET_DEST (x), final, insn);
+ return;
+ }
+ }
+ break;
+
+ case RETURN:
+ /* If exiting needs the right stack value, consider this insn as
+ using the stack pointer. In any event, consider it as using
+ all global registers. */
+
+#ifdef EXIT_IGNORE_STACK
+ if (! EXIT_IGNORE_STACK
+ || (! FRAME_POINTER_REQUIRED && flag_omit_frame_pointer))
+#endif
+ live[STACK_POINTER_REGNUM / REGSET_ELT_BITS]
+ |= (REGSET_ELT_TYPE) 1 << (STACK_POINTER_REGNUM % REGSET_ELT_BITS);
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (global_regs[i])
+ live[i / REGSET_ELT_BITS]
+ |= (REGSET_ELT_TYPE) 1 << (i % REGSET_ELT_BITS);
+ break;
+ }
+
+ /* Recursively scan the operands of this expression. */
+
+ {
+ register char *fmt = GET_RTX_FORMAT (code);
+ register int i;
+
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ {
+ /* Tail recursive case: save a function call level. */
+ if (i == 0)
+ {
+ x = XEXP (x, 0);
+ goto retry;
+ }
+ mark_used_regs (needed, live, XEXP (x, i), final, insn);
+ }
+ else if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ mark_used_regs (needed, live, XVECEXP (x, i, j), final, insn);
+ }
+ }
+ }
+}
+
+#ifdef AUTO_INC_DEC
+
+static int
+try_pre_increment_1 (insn)
+ rtx insn;
+{
+ /* Find the next use of this reg. If in same basic block,
+ make it do pre-increment or pre-decrement if appropriate. */
+ rtx x = PATTERN (insn);
+ HOST_WIDE_INT amount = ((GET_CODE (SET_SRC (x)) == PLUS ? 1 : -1)
+ * INTVAL (XEXP (SET_SRC (x), 1)));
+ int regno = REGNO (SET_DEST (x));
+ rtx y = reg_next_use[regno];
+ if (y != 0
+ && BLOCK_NUM (y) == BLOCK_NUM (insn)
+ /* Don't do this if the reg dies, or gets set in y; a standard addressing
+ mode would be better. */
+ && ! dead_or_set_p (y, SET_DEST (x))
+ && try_pre_increment (y, SET_DEST (PATTERN (insn)),
+ amount))
+ {
+ /* We have found a suitable auto-increment
+ and already changed insn Y to do it.
+ So flush this increment-instruction. */
+ PUT_CODE (insn, NOTE);
+ NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (insn) = 0;
+ /* Count a reference to this reg for the increment
+ insn we are deleting. When a reg is incremented.
+ spilling it is worse, so we want to make that
+ less likely. */
+ if (regno >= FIRST_PSEUDO_REGISTER)
+ {
+ reg_n_refs[regno] += loop_depth;
+ reg_n_sets[regno]++;
+ }
+ return 1;
+ }
+ return 0;
+}
+
+/* Try to change INSN so that it does pre-increment or pre-decrement
+ addressing on register REG in order to add AMOUNT to REG.
+ AMOUNT is negative for pre-decrement.
+ Returns 1 if the change could be made.
+ This checks all about the validity of the result of modifying INSN. */
+
+static int
+try_pre_increment (insn, reg, amount)
+ rtx insn, reg;
+ HOST_WIDE_INT amount;
+{
+ register rtx use;
+
+ /* Nonzero if we can try to make a pre-increment or pre-decrement.
+ For example, addl $4,r1; movl (r1),... can become movl +(r1),... */
+ int pre_ok = 0;
+ /* Nonzero if we can try to make a post-increment or post-decrement.
+ For example, addl $4,r1; movl -4(r1),... can become movl (r1)+,...
+ It is possible for both PRE_OK and POST_OK to be nonzero if the machine
+ supports both pre-inc and post-inc, or both pre-dec and post-dec. */
+ int post_ok = 0;
+
+ /* Nonzero if the opportunity actually requires post-inc or post-dec. */
+ int do_post = 0;
+
+ /* From the sign of increment, see which possibilities are conceivable
+ on this target machine. */
+#ifdef HAVE_PRE_INCREMENT
+ if (amount > 0)
+ pre_ok = 1;
+#endif
+#ifdef HAVE_POST_INCREMENT
+ if (amount > 0)
+ post_ok = 1;
+#endif
+
+#ifdef HAVE_PRE_DECREMENT
+ if (amount < 0)
+ pre_ok = 1;
+#endif
+#ifdef HAVE_POST_DECREMENT
+ if (amount < 0)
+ post_ok = 1;
+#endif
+
+ if (! (pre_ok || post_ok))
+ return 0;
+
+ /* It is not safe to add a side effect to a jump insn
+ because if the incremented register is spilled and must be reloaded
+ there would be no way to store the incremented value back in memory. */
+
+ if (GET_CODE (insn) == JUMP_INSN)
+ return 0;
+
+ use = 0;
+ if (pre_ok)
+ use = find_use_as_address (PATTERN (insn), reg, 0);
+ if (post_ok && (use == 0 || use == (rtx) 1))
+ {
+ use = find_use_as_address (PATTERN (insn), reg, -amount);
+ do_post = 1;
+ }
+
+ if (use == 0 || use == (rtx) 1)
+ return 0;
+
+ if (GET_MODE_SIZE (GET_MODE (use)) != (amount > 0 ? amount : - amount))
+ return 0;
+
+ /* See if this combination of instruction and addressing mode exists. */
+ if (! validate_change (insn, &XEXP (use, 0),
+ gen_rtx (amount > 0
+ ? (do_post ? POST_INC : PRE_INC)
+ : (do_post ? POST_DEC : PRE_DEC),
+ Pmode, reg), 0))
+ return 0;
+
+ /* Record that this insn now has an implicit side effect on X. */
+ REG_NOTES (insn) = gen_rtx (EXPR_LIST, REG_INC, reg, REG_NOTES (insn));
+ return 1;
+}
+
+#endif /* AUTO_INC_DEC */
+
+/* Find the place in the rtx X where REG is used as a memory address.
+ Return the MEM rtx that so uses it.
+ If PLUSCONST is nonzero, search instead for a memory address equivalent to
+ (plus REG (const_int PLUSCONST)).
+
+ If such an address does not appear, return 0.
+ If REG appears more than once, or is used other than in such an address,
+ return (rtx)1. */
+
+static rtx
+find_use_as_address (x, reg, plusconst)
+ register rtx x;
+ rtx reg;
+ HOST_WIDE_INT plusconst;
+{
+ enum rtx_code code = GET_CODE (x);
+ char *fmt = GET_RTX_FORMAT (code);
+ register int i;
+ register rtx value = 0;
+ register rtx tem;
+
+ if (code == MEM && XEXP (x, 0) == reg && plusconst == 0)
+ return x;
+
+ if (code == MEM && GET_CODE (XEXP (x, 0)) == PLUS
+ && XEXP (XEXP (x, 0), 0) == reg
+ && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
+ && INTVAL (XEXP (XEXP (x, 0), 1)) == plusconst)
+ return x;
+
+ if (code == SIGN_EXTRACT || code == ZERO_EXTRACT)
+ {
+ /* If REG occurs inside a MEM used in a bit-field reference,
+ that is unacceptable. */
+ if (find_use_as_address (XEXP (x, 0), reg, 0) != 0)
+ return (rtx) (HOST_WIDE_INT) 1;
+ }
+
+ if (x == reg)
+ return (rtx) (HOST_WIDE_INT) 1;
+
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ {
+ tem = find_use_as_address (XEXP (x, i), reg, plusconst);
+ if (value == 0)
+ value = tem;
+ else if (tem != 0)
+ return (rtx) (HOST_WIDE_INT) 1;
+ }
+ if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ {
+ tem = find_use_as_address (XVECEXP (x, i, j), reg, plusconst);
+ if (value == 0)
+ value = tem;
+ else if (tem != 0)
+ return (rtx) (HOST_WIDE_INT) 1;
+ }
+ }
+ }
+
+ return value;
+}
+
+/* Write information about registers and basic blocks into FILE.
+ This is part of making a debugging dump. */
+
+void
+dump_flow_info (file)
+ FILE *file;
+{
+ register int i;
+ static char *reg_class_names[] = REG_CLASS_NAMES;
+
+ fprintf (file, "%d registers.\n", max_regno);
+
+ for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++)
+ if (reg_n_refs[i])
+ {
+ enum reg_class class, altclass;
+ fprintf (file, "\nRegister %d used %d times across %d insns",
+ i, reg_n_refs[i], reg_live_length[i]);
+ if (reg_basic_block[i] >= 0)
+ fprintf (file, " in block %d", reg_basic_block[i]);
+ if (reg_n_deaths[i] != 1)
+ fprintf (file, "; dies in %d places", reg_n_deaths[i]);
+ if (reg_n_calls_crossed[i] == 1)
+ fprintf (file, "; crosses 1 call");
+ else if (reg_n_calls_crossed[i])
+ fprintf (file, "; crosses %d calls", reg_n_calls_crossed[i]);
+ if (PSEUDO_REGNO_BYTES (i) != UNITS_PER_WORD)
+ fprintf (file, "; %d bytes", PSEUDO_REGNO_BYTES (i));
+ class = reg_preferred_class (i);
+ altclass = reg_alternate_class (i);
+ if (class != GENERAL_REGS || altclass != ALL_REGS)
+ {
+ if (altclass == ALL_REGS || class == ALL_REGS)
+ fprintf (file, "; pref %s", reg_class_names[(int) class]);
+ else if (altclass == NO_REGS)
+ fprintf (file, "; %s or none", reg_class_names[(int) class]);
+ else
+ fprintf (file, "; pref %s, else %s",
+ reg_class_names[(int) class],
+ reg_class_names[(int) altclass]);
+ }
+ if (REGNO_POINTER_FLAG (i))
+ fprintf (file, "; pointer");
+ fprintf (file, ".\n");
+ }
+ fprintf (file, "\n%d basic blocks.\n", n_basic_blocks);
+ for (i = 0; i < n_basic_blocks; i++)
+ {
+ register rtx head, jump;
+ register int regno;
+ fprintf (file, "\nBasic block %d: first insn %d, last %d.\n",
+ i,
+ INSN_UID (basic_block_head[i]),
+ INSN_UID (basic_block_end[i]));
+ /* The control flow graph's storage is freed
+ now when flow_analysis returns.
+ Don't try to print it if it is gone. */
+ if (basic_block_drops_in)
+ {
+ fprintf (file, "Reached from blocks: ");
+ head = basic_block_head[i];
+ if (GET_CODE (head) == CODE_LABEL)
+ for (jump = LABEL_REFS (head);
+ jump != head;
+ jump = LABEL_NEXTREF (jump))
+ {
+ register int from_block = BLOCK_NUM (CONTAINING_INSN (jump));
+ fprintf (file, " %d", from_block);
+ }
+ if (basic_block_drops_in[i])
+ fprintf (file, " previous");
+ }
+ fprintf (file, "\nRegisters live at start:");
+ for (regno = 0; regno < max_regno; regno++)
+ {
+ register int offset = regno / REGSET_ELT_BITS;
+ register REGSET_ELT_TYPE bit
+ = (REGSET_ELT_TYPE) 1 << (regno % REGSET_ELT_BITS);
+ if (basic_block_live_at_start[i][offset] & bit)
+ fprintf (file, " %d", regno);
+ }
+ fprintf (file, "\n");
+ }
+ fprintf (file, "\n");
+}
diff --git a/gnu/usr.bin/cc/cc_int/fold-const.c b/gnu/usr.bin/cc/cc_int/fold-const.c
new file mode 100644
index 0000000..0417aab
--- /dev/null
+++ b/gnu/usr.bin/cc/cc_int/fold-const.c
@@ -0,0 +1,4889 @@
+/* Fold a constant sub-tree into a single node for C-compiler
+ Copyright (C) 1987, 1988, 1992, 1993, 1994 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+/*@@ This file should be rewritten to use an arbitrary precision
+ @@ representation for "struct tree_int_cst" and "struct tree_real_cst".
+ @@ Perhaps the routines could also be used for bc/dc, and made a lib.
+ @@ The routines that translate from the ap rep should
+ @@ warn if precision et. al. is lost.
+ @@ This would also make life easier when this technology is used
+ @@ for cross-compilers. */
+
+
+/* The entry points in this file are fold, size_int and size_binop.
+
+ fold takes a tree as argument and returns a simplified tree.
+
+ size_binop takes a tree code for an arithmetic operation
+ and two operands that are trees, and produces a tree for the
+ result, assuming the type comes from `sizetype'.
+
+ size_int takes an integer value, and creates a tree constant
+ with type from `sizetype'. */
+
+#include <stdio.h>
+#include <setjmp.h>
+#include "config.h"
+#include "flags.h"
+#include "tree.h"
+
+/* Handle floating overflow for `const_binop'. */
+static jmp_buf float_error;
+
+static void encode PROTO((HOST_WIDE_INT *, HOST_WIDE_INT, HOST_WIDE_INT));
+static void decode PROTO((HOST_WIDE_INT *, HOST_WIDE_INT *, HOST_WIDE_INT *));
+int div_and_round_double PROTO((enum tree_code, int, HOST_WIDE_INT,
+ HOST_WIDE_INT, HOST_WIDE_INT,
+ HOST_WIDE_INT, HOST_WIDE_INT *,
+ HOST_WIDE_INT *, HOST_WIDE_INT *,
+ HOST_WIDE_INT *));
+static int split_tree PROTO((tree, enum tree_code, tree *, tree *, int *));
+static tree const_binop PROTO((enum tree_code, tree, tree, int));
+static tree fold_convert PROTO((tree, tree));
+static enum tree_code invert_tree_comparison PROTO((enum tree_code));
+static enum tree_code swap_tree_comparison PROTO((enum tree_code));
+static int truth_value_p PROTO((enum tree_code));
+static int operand_equal_for_comparison_p PROTO((tree, tree, tree));
+static int twoval_comparison_p PROTO((tree, tree *, tree *, int *));
+static tree eval_subst PROTO((tree, tree, tree, tree, tree));
+static tree omit_one_operand PROTO((tree, tree, tree));
+static tree distribute_bit_expr PROTO((enum tree_code, tree, tree, tree));
+static tree make_bit_field_ref PROTO((tree, tree, int, int, int));
+static tree optimize_bit_field_compare PROTO((enum tree_code, tree,
+ tree, tree));
+static tree decode_field_reference PROTO((tree, int *, int *,
+ enum machine_mode *, int *,
+ int *, tree *));
+static int all_ones_mask_p PROTO((tree, int));
+static int simple_operand_p PROTO((tree));
+static tree range_test PROTO((enum tree_code, tree, enum tree_code,
+ enum tree_code, tree, tree, tree));
+static tree fold_truthop PROTO((enum tree_code, tree, tree, tree));
+static tree strip_compound_expr PROTO((tree, tree));
+
+#ifndef BRANCH_COST
+#define BRANCH_COST 1
+#endif
+
+/* Yield nonzero if a signed left shift of A by B bits overflows. */
+#define left_shift_overflows(a, b) ((a) != ((a) << (b)) >> (b))
+
+/* Suppose A1 + B1 = SUM1, using 2's complement arithmetic ignoring overflow.
+ Suppose A, B and SUM have the same respective signs as A1, B1, and SUM1.
+ Then this yields nonzero if overflow occurred during the addition.
+ Overflow occurs if A and B have the same sign, but A and SUM differ in sign.
+ Use `^' to test whether signs differ, and `< 0' to isolate the sign. */
+#define overflow_sum_sign(a, b, sum) ((~((a) ^ (b)) & ((a) ^ (sum))) < 0)
+
+/* To do constant folding on INTEGER_CST nodes requires two-word arithmetic.
+ We do that by representing the two-word integer in 4 words, with only
+ HOST_BITS_PER_WIDE_INT/2 bits stored in each word, as a positive number. */
+
+#define LOWPART(x) \
+ ((x) & (((unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT/2)) - 1))
+#define HIGHPART(x) \
+ ((unsigned HOST_WIDE_INT) (x) >> HOST_BITS_PER_WIDE_INT/2)
+#define BASE ((unsigned HOST_WIDE_INT) 1 << HOST_BITS_PER_WIDE_INT/2)
+
+/* Unpack a two-word integer into 4 words.
+ LOW and HI are the integer, as two `HOST_WIDE_INT' pieces.
+ WORDS points to the array of HOST_WIDE_INTs. */
+
+static void
+encode (words, low, hi)
+ HOST_WIDE_INT *words;
+ HOST_WIDE_INT low, hi;
+{
+ words[0] = LOWPART (low);
+ words[1] = HIGHPART (low);
+ words[2] = LOWPART (hi);
+ words[3] = HIGHPART (hi);
+}
+
+/* Pack an array of 4 words into a two-word integer.
+ WORDS points to the array of words.
+ The integer is stored into *LOW and *HI as two `HOST_WIDE_INT' pieces. */
+
+static void
+decode (words, low, hi)
+ HOST_WIDE_INT *words;
+ HOST_WIDE_INT *low, *hi;
+{
+ *low = words[0] | words[1] * BASE;
+ *hi = words[2] | words[3] * BASE;
+}
+
+/* Make the integer constant T valid for its type
+ by setting to 0 or 1 all the bits in the constant
+ that don't belong in the type.
+ Yield 1 if a signed overflow occurs, 0 otherwise.
+ If OVERFLOW is nonzero, a signed overflow has already occurred
+ in calculating T, so propagate it.
+
+ Make the real constant T valid for its type by calling CHECK_FLOAT_VALUE,
+ if it exists. */
+
+int
+force_fit_type (t, overflow)
+ tree t;
+ int overflow;
+{
+ HOST_WIDE_INT low, high;
+ register int prec;
+
+ if (TREE_CODE (t) == REAL_CST)
+ {
+#ifdef CHECK_FLOAT_VALUE
+ CHECK_FLOAT_VALUE (TYPE_MODE (TREE_TYPE (t)), TREE_REAL_CST (t),
+ overflow);
+#endif
+ return overflow;
+ }
+
+ else if (TREE_CODE (t) != INTEGER_CST)
+ return overflow;
+
+ low = TREE_INT_CST_LOW (t);
+ high = TREE_INT_CST_HIGH (t);
+
+ if (TREE_CODE (TREE_TYPE (t)) == POINTER_TYPE)
+ prec = POINTER_SIZE;
+ else
+ prec = TYPE_PRECISION (TREE_TYPE (t));
+
+ /* First clear all bits that are beyond the type's precision. */
+
+ if (prec == 2 * HOST_BITS_PER_WIDE_INT)
+ ;
+ else if (prec > HOST_BITS_PER_WIDE_INT)
+ {
+ TREE_INT_CST_HIGH (t)
+ &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
+ }
+ else
+ {
+ TREE_INT_CST_HIGH (t) = 0;
+ if (prec < HOST_BITS_PER_WIDE_INT)
+ TREE_INT_CST_LOW (t) &= ~((HOST_WIDE_INT) (-1) << prec);
+ }
+
+ /* Unsigned types do not suffer sign extension or overflow. */
+ if (TREE_UNSIGNED (TREE_TYPE (t)))
+ return 0;
+
+ /* If the value's sign bit is set, extend the sign. */
+ if (prec != 2 * HOST_BITS_PER_WIDE_INT
+ && (prec > HOST_BITS_PER_WIDE_INT
+ ? (TREE_INT_CST_HIGH (t)
+ & ((HOST_WIDE_INT) 1 << (prec - HOST_BITS_PER_WIDE_INT - 1)))
+ : TREE_INT_CST_LOW (t) & ((HOST_WIDE_INT) 1 << (prec - 1))))
+ {
+ /* Value is negative:
+ set to 1 all the bits that are outside this type's precision. */
+ if (prec > HOST_BITS_PER_WIDE_INT)
+ {
+ TREE_INT_CST_HIGH (t)
+ |= ((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
+ }
+ else
+ {
+ TREE_INT_CST_HIGH (t) = -1;
+ if (prec < HOST_BITS_PER_WIDE_INT)
+ TREE_INT_CST_LOW (t) |= ((HOST_WIDE_INT) (-1) << prec);
+ }
+ }
+
+ /* Yield nonzero if signed overflow occurred. */
+ return
+ ((overflow | (low ^ TREE_INT_CST_LOW (t)) | (high ^ TREE_INT_CST_HIGH (t)))
+ != 0);
+}
+
+/* Add two doubleword integers with doubleword result.
+ Each argument is given as two `HOST_WIDE_INT' pieces.
+ One argument is L1 and H1; the other, L2 and H2.
+ The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
+
+int
+add_double (l1, h1, l2, h2, lv, hv)
+ HOST_WIDE_INT l1, h1, l2, h2;
+ HOST_WIDE_INT *lv, *hv;
+{
+ HOST_WIDE_INT l, h;
+
+ l = l1 + l2;
+ h = h1 + h2 + ((unsigned HOST_WIDE_INT) l < l1);
+
+ *lv = l;
+ *hv = h;
+ return overflow_sum_sign (h1, h2, h);
+}
+
+/* Negate a doubleword integer with doubleword result.
+ Return nonzero if the operation overflows, assuming it's signed.
+ The argument is given as two `HOST_WIDE_INT' pieces in L1 and H1.
+ The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
+
+int
+neg_double (l1, h1, lv, hv)
+ HOST_WIDE_INT l1, h1;
+ HOST_WIDE_INT *lv, *hv;
+{
+ if (l1 == 0)
+ {
+ *lv = 0;
+ *hv = - h1;
+ return (*hv & h1) < 0;
+ }
+ else
+ {
+ *lv = - l1;
+ *hv = ~ h1;
+ return 0;
+ }
+}
+
+/* Multiply two doubleword integers with doubleword result.
+ Return nonzero if the operation overflows, assuming it's signed.
+ Each argument is given as two `HOST_WIDE_INT' pieces.
+ One argument is L1 and H1; the other, L2 and H2.
+ The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
+
+int
+mul_double (l1, h1, l2, h2, lv, hv)
+ HOST_WIDE_INT l1, h1, l2, h2;
+ HOST_WIDE_INT *lv, *hv;
+{
+ HOST_WIDE_INT arg1[4];
+ HOST_WIDE_INT arg2[4];
+ HOST_WIDE_INT prod[4 * 2];
+ register unsigned HOST_WIDE_INT carry;
+ register int i, j, k;
+ HOST_WIDE_INT toplow, tophigh, neglow, neghigh;
+
+ encode (arg1, l1, h1);
+ encode (arg2, l2, h2);
+
+ bzero ((char *) prod, sizeof prod);
+
+ for (i = 0; i < 4; i++)
+ {
+ carry = 0;
+ for (j = 0; j < 4; j++)
+ {
+ k = i + j;
+ /* This product is <= 0xFFFE0001, the sum <= 0xFFFF0000. */
+ carry += arg1[i] * arg2[j];
+ /* Since prod[p] < 0xFFFF, this sum <= 0xFFFFFFFF. */
+ carry += prod[k];
+ prod[k] = LOWPART (carry);
+ carry = HIGHPART (carry);
+ }
+ prod[i + 4] = carry;
+ }
+
+ decode (prod, lv, hv); /* This ignores prod[4] through prod[4*2-1] */
+
+ /* Check for overflow by calculating the top half of the answer in full;
+ it should agree with the low half's sign bit. */
+ decode (prod+4, &toplow, &tophigh);
+ if (h1 < 0)
+ {
+ neg_double (l2, h2, &neglow, &neghigh);
+ add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
+ }
+ if (h2 < 0)
+ {
+ neg_double (l1, h1, &neglow, &neghigh);
+ add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
+ }
+ return (*hv < 0 ? ~(toplow & tophigh) : toplow | tophigh) != 0;
+}
+
+/* Shift the doubleword integer in L1, H1 left by COUNT places
+ keeping only PREC bits of result.
+ Shift right if COUNT is negative.
+ ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
+ Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
+
+void
+lshift_double (l1, h1, count, prec, lv, hv, arith)
+ HOST_WIDE_INT l1, h1, count;
+ int prec;
+ HOST_WIDE_INT *lv, *hv;
+ int arith;
+{
+ if (count < 0)
+ {
+ rshift_double (l1, h1, - count, prec, lv, hv, arith);
+ return;
+ }
+
+ if (count >= prec)
+ count = (unsigned HOST_WIDE_INT) count & prec;
+
+ if (count >= HOST_BITS_PER_WIDE_INT)
+ {
+ *hv = (unsigned HOST_WIDE_INT) l1 << count - HOST_BITS_PER_WIDE_INT;
+ *lv = 0;
+ }
+ else
+ {
+ *hv = (((unsigned HOST_WIDE_INT) h1 << count)
+ | ((unsigned HOST_WIDE_INT) l1 >> HOST_BITS_PER_WIDE_INT - count - 1 >> 1));
+ *lv = (unsigned HOST_WIDE_INT) l1 << count;
+ }
+}
+
+/* Shift the doubleword integer in L1, H1 right by COUNT places
+ keeping only PREC bits of result. COUNT must be positive.
+ ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
+ Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
+
+void
+rshift_double (l1, h1, count, prec, lv, hv, arith)
+ HOST_WIDE_INT l1, h1, count;
+ int prec;
+ HOST_WIDE_INT *lv, *hv;
+ int arith;
+{
+ unsigned HOST_WIDE_INT signmask;
+ signmask = (arith
+ ? -((unsigned HOST_WIDE_INT) h1 >> (HOST_BITS_PER_WIDE_INT - 1))
+ : 0);
+
+ if (count >= prec)
+ count = (unsigned HOST_WIDE_INT) count % prec;
+
+ if (count >= HOST_BITS_PER_WIDE_INT)
+ {
+ *hv = signmask;
+ *lv = ((signmask << 2 * HOST_BITS_PER_WIDE_INT - count - 1 << 1)
+ | ((unsigned HOST_WIDE_INT) h1 >> count - HOST_BITS_PER_WIDE_INT));
+ }
+ else
+ {
+ *lv = (((unsigned HOST_WIDE_INT) l1 >> count)
+ | ((unsigned HOST_WIDE_INT) h1 << HOST_BITS_PER_WIDE_INT - count - 1 << 1));
+ *hv = ((signmask << HOST_BITS_PER_WIDE_INT - count)
+ | ((unsigned HOST_WIDE_INT) h1 >> count));
+ }
+}
+
+/* Rotate the doubleword integer in L1, H1 left by COUNT places
+ keeping only PREC bits of result.
+ Rotate right if COUNT is negative.
+ Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
+
+void
+lrotate_double (l1, h1, count, prec, lv, hv)
+ HOST_WIDE_INT l1, h1, count;
+ int prec;
+ HOST_WIDE_INT *lv, *hv;
+{
+ HOST_WIDE_INT arg1[4];
+ register int i;
+ register int carry;
+
+ if (count < 0)
+ {
+ rrotate_double (l1, h1, - count, prec, lv, hv);
+ return;
+ }
+
+ encode (arg1, l1, h1);
+
+ if (count > prec)
+ count = prec;
+
+ carry = arg1[4 - 1] >> 16 - 1;
+ while (count > 0)
+ {
+ for (i = 0; i < 4; i++)
+ {
+ carry += arg1[i] << 1;
+ arg1[i] = LOWPART (carry);
+ carry = HIGHPART (carry);
+ }
+ count--;
+ }
+
+ decode (arg1, lv, hv);
+}
+
+/* Rotate the doubleword integer in L1, H1 left by COUNT places
+ keeping only PREC bits of result. COUNT must be positive.
+ Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
+
+void
+rrotate_double (l1, h1, count, prec, lv, hv)
+ HOST_WIDE_INT l1, h1, count;
+ int prec;
+ HOST_WIDE_INT *lv, *hv;
+{
+ HOST_WIDE_INT arg1[4];
+ register int i;
+ register int carry;
+
+ encode (arg1, l1, h1);
+
+ if (count > prec)
+ count = prec;
+
+ carry = arg1[0] & 1;
+ while (count > 0)
+ {
+ for (i = 4 - 1; i >= 0; i--)
+ {
+ carry *= BASE;
+ carry += arg1[i];
+ arg1[i] = LOWPART (carry >> 1);
+ }
+ count--;
+ }
+
+ decode (arg1, lv, hv);
+}
+
+/* Divide doubleword integer LNUM, HNUM by doubleword integer LDEN, HDEN
+ for a quotient (stored in *LQUO, *HQUO) and remainder (in *LREM, *HREM).
+ CODE is a tree code for a kind of division, one of
+ TRUNC_DIV_EXPR, FLOOR_DIV_EXPR, CEIL_DIV_EXPR, ROUND_DIV_EXPR
+ or EXACT_DIV_EXPR
+ It controls how the quotient is rounded to a integer.
+ Return nonzero if the operation overflows.
+ UNS nonzero says do unsigned division. */
+
+int
+div_and_round_double (code, uns,
+ lnum_orig, hnum_orig, lden_orig, hden_orig,
+ lquo, hquo, lrem, hrem)
+ enum tree_code code;
+ int uns;
+ HOST_WIDE_INT lnum_orig, hnum_orig; /* num == numerator == dividend */
+ HOST_WIDE_INT lden_orig, hden_orig; /* den == denominator == divisor */
+ HOST_WIDE_INT *lquo, *hquo, *lrem, *hrem;
+{
+ int quo_neg = 0;
+ HOST_WIDE_INT num[4 + 1]; /* extra element for scaling. */
+ HOST_WIDE_INT den[4], quo[4];
+ register int i, j;
+ unsigned HOST_WIDE_INT work;
+ register int carry = 0;
+ HOST_WIDE_INT lnum = lnum_orig;
+ HOST_WIDE_INT hnum = hnum_orig;
+ HOST_WIDE_INT lden = lden_orig;
+ HOST_WIDE_INT hden = hden_orig;
+ int overflow = 0;
+
+ if ((hden == 0) && (lden == 0))
+ abort ();
+
+ /* calculate quotient sign and convert operands to unsigned. */
+ if (!uns)
+ {
+ if (hnum < 0)
+ {
+ quo_neg = ~ quo_neg;
+ /* (minimum integer) / (-1) is the only overflow case. */
+ if (neg_double (lnum, hnum, &lnum, &hnum) && (lden & hden) == -1)
+ overflow = 1;
+ }
+ if (hden < 0)
+ {
+ quo_neg = ~ quo_neg;
+ neg_double (lden, hden, &lden, &hden);
+ }
+ }
+
+ if (hnum == 0 && hden == 0)
+ { /* single precision */
+ *hquo = *hrem = 0;
+ /* This unsigned division rounds toward zero. */
+ *lquo = lnum / (unsigned HOST_WIDE_INT) lden;
+ goto finish_up;
+ }
+
+ if (hnum == 0)
+ { /* trivial case: dividend < divisor */
+ /* hden != 0 already checked. */
+ *hquo = *lquo = 0;
+ *hrem = hnum;
+ *lrem = lnum;
+ goto finish_up;
+ }
+
+ bzero ((char *) quo, sizeof quo);
+
+ bzero ((char *) num, sizeof num); /* to zero 9th element */
+ bzero ((char *) den, sizeof den);
+
+ encode (num, lnum, hnum);
+ encode (den, lden, hden);
+
+ /* Special code for when the divisor < BASE. */
+ if (hden == 0 && lden < BASE)
+ {
+ /* hnum != 0 already checked. */
+ for (i = 4 - 1; i >= 0; i--)
+ {
+ work = num[i] + carry * BASE;
+ quo[i] = work / (unsigned HOST_WIDE_INT) lden;
+ carry = work % (unsigned HOST_WIDE_INT) lden;
+ }
+ }
+ else
+ {
+ /* Full double precision division,
+ with thanks to Don Knuth's "Seminumerical Algorithms". */
+ int quo_est, scale, num_hi_sig, den_hi_sig;
+
+ /* Find the highest non-zero divisor digit. */
+ for (i = 4 - 1; ; i--)
+ if (den[i] != 0) {
+ den_hi_sig = i;
+ break;
+ }
+
+ /* Insure that the first digit of the divisor is at least BASE/2.
+ This is required by the quotient digit estimation algorithm. */
+
+ scale = BASE / (den[den_hi_sig] + 1);
+ if (scale > 1) { /* scale divisor and dividend */
+ carry = 0;
+ for (i = 0; i <= 4 - 1; i++) {
+ work = (num[i] * scale) + carry;
+ num[i] = LOWPART (work);
+ carry = HIGHPART (work);
+ } num[4] = carry;
+ carry = 0;
+ for (i = 0; i <= 4 - 1; i++) {
+ work = (den[i] * scale) + carry;
+ den[i] = LOWPART (work);
+ carry = HIGHPART (work);
+ if (den[i] != 0) den_hi_sig = i;
+ }
+ }
+
+ num_hi_sig = 4;
+
+ /* Main loop */
+ for (i = num_hi_sig - den_hi_sig - 1; i >= 0; i--) {
+ /* guess the next quotient digit, quo_est, by dividing the first
+ two remaining dividend digits by the high order quotient digit.
+ quo_est is never low and is at most 2 high. */
+ unsigned HOST_WIDE_INT tmp;
+
+ num_hi_sig = i + den_hi_sig + 1;
+ work = num[num_hi_sig] * BASE + num[num_hi_sig - 1];
+ if (num[num_hi_sig] != den[den_hi_sig])
+ quo_est = work / den[den_hi_sig];
+ else
+ quo_est = BASE - 1;
+
+ /* refine quo_est so it's usually correct, and at most one high. */
+ tmp = work - quo_est * den[den_hi_sig];
+ if (tmp < BASE
+ && den[den_hi_sig - 1] * quo_est > (tmp * BASE + num[num_hi_sig - 2]))
+ quo_est--;
+
+ /* Try QUO_EST as the quotient digit, by multiplying the
+ divisor by QUO_EST and subtracting from the remaining dividend.
+ Keep in mind that QUO_EST is the I - 1st digit. */
+
+ carry = 0;
+ for (j = 0; j <= den_hi_sig; j++)
+ {
+ work = quo_est * den[j] + carry;
+ carry = HIGHPART (work);
+ work = num[i + j] - LOWPART (work);
+ num[i + j] = LOWPART (work);
+ carry += HIGHPART (work) != 0;
+ }
+
+ /* if quo_est was high by one, then num[i] went negative and
+ we need to correct things. */
+
+ if (num[num_hi_sig] < carry)
+ {
+ quo_est--;
+ carry = 0; /* add divisor back in */
+ for (j = 0; j <= den_hi_sig; j++)
+ {
+ work = num[i + j] + den[j] + carry;
+ carry = HIGHPART (work);
+ num[i + j] = LOWPART (work);
+ }
+ num [num_hi_sig] += carry;
+ }
+
+ /* store the quotient digit. */
+ quo[i] = quo_est;
+ }
+ }
+
+ decode (quo, lquo, hquo);
+
+ finish_up:
+ /* if result is negative, make it so. */
+ if (quo_neg)
+ neg_double (*lquo, *hquo, lquo, hquo);
+
+ /* compute trial remainder: rem = num - (quo * den) */
+ mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
+ neg_double (*lrem, *hrem, lrem, hrem);
+ add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
+
+ switch (code)
+ {
+ case TRUNC_DIV_EXPR:
+ case TRUNC_MOD_EXPR: /* round toward zero */
+ case EXACT_DIV_EXPR: /* for this one, it shouldn't matter */
+ return overflow;
+
+ case FLOOR_DIV_EXPR:
+ case FLOOR_MOD_EXPR: /* round toward negative infinity */
+ if (quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio < 0 && rem != 0 */
+ {
+ /* quo = quo - 1; */
+ add_double (*lquo, *hquo, (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1,
+ lquo, hquo);
+ }
+ else return overflow;
+ break;
+
+ case CEIL_DIV_EXPR:
+ case CEIL_MOD_EXPR: /* round toward positive infinity */
+ if (!quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio > 0 && rem != 0 */
+ {
+ add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
+ lquo, hquo);
+ }
+ else return overflow;
+ break;
+
+ case ROUND_DIV_EXPR:
+ case ROUND_MOD_EXPR: /* round to closest integer */
+ {
+ HOST_WIDE_INT labs_rem = *lrem, habs_rem = *hrem;
+ HOST_WIDE_INT labs_den = lden, habs_den = hden, ltwice, htwice;
+
+ /* get absolute values */
+ if (*hrem < 0) neg_double (*lrem, *hrem, &labs_rem, &habs_rem);
+ if (hden < 0) neg_double (lden, hden, &labs_den, &habs_den);
+
+ /* if (2 * abs (lrem) >= abs (lden)) */
+ mul_double ((HOST_WIDE_INT) 2, (HOST_WIDE_INT) 0,
+ labs_rem, habs_rem, &ltwice, &htwice);
+ if (((unsigned HOST_WIDE_INT) habs_den
+ < (unsigned HOST_WIDE_INT) htwice)
+ || (((unsigned HOST_WIDE_INT) habs_den
+ == (unsigned HOST_WIDE_INT) htwice)
+ && ((HOST_WIDE_INT unsigned) labs_den
+ < (unsigned HOST_WIDE_INT) ltwice)))
+ {
+ if (*hquo < 0)
+ /* quo = quo - 1; */
+ add_double (*lquo, *hquo,
+ (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1, lquo, hquo);
+ else
+ /* quo = quo + 1; */
+ add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
+ lquo, hquo);
+ }
+ else return overflow;
+ }
+ break;
+
+ default:
+ abort ();
+ }
+
+ /* compute true remainder: rem = num - (quo * den) */
+ mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
+ neg_double (*lrem, *hrem, lrem, hrem);
+ add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
+ return overflow;
+}
+
+#ifndef REAL_ARITHMETIC
+/* Effectively truncate a real value to represent the nearest possible value
+ in a narrower mode. The result is actually represented in the same data
+ type as the argument, but its value is usually different.
+
+ A trap may occur during the FP operations and it is the responsibility
+ of the calling function to have a handler established. */
+
+REAL_VALUE_TYPE
+real_value_truncate (mode, arg)
+ enum machine_mode mode;
+ REAL_VALUE_TYPE arg;
+{
+ return REAL_VALUE_TRUNCATE (mode, arg);
+}
+
+#if TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
+
+/* Check for infinity in an IEEE double precision number. */
+
+int
+target_isinf (x)
+ REAL_VALUE_TYPE x;
+{
+ /* The IEEE 64-bit double format. */
+ union {
+ REAL_VALUE_TYPE d;
+ struct {
+ unsigned sign : 1;
+ unsigned exponent : 11;
+ unsigned mantissa1 : 20;
+ unsigned mantissa2;
+ } little_endian;
+ struct {
+ unsigned mantissa2;
+ unsigned mantissa1 : 20;
+ unsigned exponent : 11;
+ unsigned sign : 1;
+ } big_endian;
+ } u;
+
+ u.d = dconstm1;
+ if (u.big_endian.sign == 1)
+ {
+ u.d = x;
+ return (u.big_endian.exponent == 2047
+ && u.big_endian.mantissa1 == 0
+ && u.big_endian.mantissa2 == 0);
+ }
+ else
+ {
+ u.d = x;
+ return (u.little_endian.exponent == 2047
+ && u.little_endian.mantissa1 == 0
+ && u.little_endian.mantissa2 == 0);
+ }
+}
+
+/* Check whether an IEEE double precision number is a NaN. */
+
+int
+target_isnan (x)
+ REAL_VALUE_TYPE x;
+{
+ /* The IEEE 64-bit double format. */
+ union {
+ REAL_VALUE_TYPE d;
+ struct {
+ unsigned sign : 1;
+ unsigned exponent : 11;
+ unsigned mantissa1 : 20;
+ unsigned mantissa2;
+ } little_endian;
+ struct {
+ unsigned mantissa2;
+ unsigned mantissa1 : 20;
+ unsigned exponent : 11;
+ unsigned sign : 1;
+ } big_endian;
+ } u;
+
+ u.d = dconstm1;
+ if (u.big_endian.sign == 1)
+ {
+ u.d = x;
+ return (u.big_endian.exponent == 2047
+ && (u.big_endian.mantissa1 != 0
+ || u.big_endian.mantissa2 != 0));
+ }
+ else
+ {
+ u.d = x;
+ return (u.little_endian.exponent == 2047
+ && (u.little_endian.mantissa1 != 0
+ || u.little_endian.mantissa2 != 0));
+ }
+}
+
+/* Check for a negative IEEE double precision number. */
+
+int
+target_negative (x)
+ REAL_VALUE_TYPE x;
+{
+ /* The IEEE 64-bit double format. */
+ union {
+ REAL_VALUE_TYPE d;
+ struct {
+ unsigned sign : 1;
+ unsigned exponent : 11;
+ unsigned mantissa1 : 20;
+ unsigned mantissa2;
+ } little_endian;
+ struct {
+ unsigned mantissa2;
+ unsigned mantissa1 : 20;
+ unsigned exponent : 11;
+ unsigned sign : 1;
+ } big_endian;
+ } u;
+
+ u.d = dconstm1;
+ if (u.big_endian.sign == 1)
+ {
+ u.d = x;
+ return u.big_endian.sign;
+ }
+ else
+ {
+ u.d = x;
+ return u.little_endian.sign;
+ }
+}
+#else /* Target not IEEE */
+
+/* Let's assume other float formats don't have infinity.
+ (This can be overridden by redefining REAL_VALUE_ISINF.) */
+
+target_isinf (x)
+ REAL_VALUE_TYPE x;
+{
+ return 0;
+}
+
+/* Let's assume other float formats don't have NaNs.
+ (This can be overridden by redefining REAL_VALUE_ISNAN.) */
+
+target_isnan (x)
+ REAL_VALUE_TYPE x;
+{
+ return 0;
+}
+
+/* Let's assume other float formats don't have minus zero.
+ (This can be overridden by redefining REAL_VALUE_NEGATIVE.) */
+
+target_negative (x)
+ REAL_VALUE_TYPE x;
+{
+ return x < 0;
+}
+#endif /* Target not IEEE */
+#endif /* no REAL_ARITHMETIC */
+
+/* Split a tree IN into a constant and a variable part
+ that could be combined with CODE to make IN.
+ CODE must be a commutative arithmetic operation.
+ Store the constant part into *CONP and the variable in &VARP.
+ Return 1 if this was done; zero means the tree IN did not decompose
+ this way.
+
+ If CODE is PLUS_EXPR we also split trees that use MINUS_EXPR.
+ Therefore, we must tell the caller whether the variable part
+ was subtracted. We do this by storing 1 or -1 into *VARSIGNP.
+ The value stored is the coefficient for the variable term.
+ The constant term we return should always be added;
+ we negate it if necessary. */
+
+static int
+split_tree (in, code, varp, conp, varsignp)
+ tree in;
+ enum tree_code code;
+ tree *varp, *conp;
+ int *varsignp;
+{
+ register tree outtype = TREE_TYPE (in);
+ *varp = 0;
+ *conp = 0;
+
+ /* Strip any conversions that don't change the machine mode. */
+ while ((TREE_CODE (in) == NOP_EXPR
+ || TREE_CODE (in) == CONVERT_EXPR)
+ && (TYPE_MODE (TREE_TYPE (in))
+ == TYPE_MODE (TREE_TYPE (TREE_OPERAND (in, 0)))))
+ in = TREE_OPERAND (in, 0);
+
+ if (TREE_CODE (in) == code
+ || (! FLOAT_TYPE_P (TREE_TYPE (in))
+ /* We can associate addition and subtraction together
+ (even though the C standard doesn't say so)
+ for integers because the value is not affected.
+ For reals, the value might be affected, so we can't. */
+ && ((code == PLUS_EXPR && TREE_CODE (in) == MINUS_EXPR)
+ || (code == MINUS_EXPR && TREE_CODE (in) == PLUS_EXPR))))
+ {
+ enum tree_code code = TREE_CODE (TREE_OPERAND (in, 0));
+ if (code == INTEGER_CST)
+ {
+ *conp = TREE_OPERAND (in, 0);
+ *varp = TREE_OPERAND (in, 1);
+ if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype)
+ && TREE_TYPE (*varp) != outtype)
+ *varp = convert (outtype, *varp);
+ *varsignp = (TREE_CODE (in) == MINUS_EXPR) ? -1 : 1;
+ return 1;
+ }
+ if (TREE_CONSTANT (TREE_OPERAND (in, 1)))
+ {
+ *conp = TREE_OPERAND (in, 1);
+ *varp = TREE_OPERAND (in, 0);
+ *varsignp = 1;
+ if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype)
+ && TREE_TYPE (*varp) != outtype)
+ *varp = convert (outtype, *varp);
+ if (TREE_CODE (in) == MINUS_EXPR)
+ {
+ /* If operation is subtraction and constant is second,
+ must negate it to get an additive constant.
+ And this cannot be done unless it is a manifest constant.
+ It could also be the address of a static variable.
+ We cannot negate that, so give up. */
+ if (TREE_CODE (*conp) == INTEGER_CST)
+ /* Subtracting from integer_zero_node loses for long long. */
+ *conp = fold (build1 (NEGATE_EXPR, TREE_TYPE (*conp), *conp));
+ else
+ return 0;
+ }
+ return 1;
+ }
+ if (TREE_CONSTANT (TREE_OPERAND (in, 0)))
+ {
+ *conp = TREE_OPERAND (in, 0);
+ *varp = TREE_OPERAND (in, 1);
+ if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype)
+ && TREE_TYPE (*varp) != outtype)
+ *varp = convert (outtype, *varp);
+ *varsignp = (TREE_CODE (in) == MINUS_EXPR) ? -1 : 1;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/* Combine two constants NUM and ARG2 under operation CODE
+ to produce a new constant.
+ We assume ARG1 and ARG2 have the same data type,
+ or at least are the same kind of constant and the same machine mode.
+
+ If NOTRUNC is nonzero, do not truncate the result to fit the data type. */
+
+static tree
+const_binop (code, arg1, arg2, notrunc)
+ enum tree_code code;
+ register tree arg1, arg2;
+ int notrunc;
+{
+ if (TREE_CODE (arg1) == INTEGER_CST)
+ {
+ register HOST_WIDE_INT int1l = TREE_INT_CST_LOW (arg1);
+ register HOST_WIDE_INT int1h = TREE_INT_CST_HIGH (arg1);
+ HOST_WIDE_INT int2l = TREE_INT_CST_LOW (arg2);
+ HOST_WIDE_INT int2h = TREE_INT_CST_HIGH (arg2);
+ HOST_WIDE_INT low, hi;
+ HOST_WIDE_INT garbagel, garbageh;
+ register tree t;
+ int uns = TREE_UNSIGNED (TREE_TYPE (arg1));
+ int overflow = 0;
+
+ switch (code)
+ {
+ case BIT_IOR_EXPR:
+ t = build_int_2 (int1l | int2l, int1h | int2h);
+ break;
+
+ case BIT_XOR_EXPR:
+ t = build_int_2 (int1l ^ int2l, int1h ^ int2h);
+ break;
+
+ case BIT_AND_EXPR:
+ t = build_int_2 (int1l & int2l, int1h & int2h);
+ break;
+
+ case BIT_ANDTC_EXPR:
+ t = build_int_2 (int1l & ~int2l, int1h & ~int2h);
+ break;
+
+ case RSHIFT_EXPR:
+ int2l = - int2l;
+ case LSHIFT_EXPR:
+ /* It's unclear from the C standard whether shifts can overflow.
+ The following code ignores overflow; perhaps a C standard
+ interpretation ruling is needed. */
+ lshift_double (int1l, int1h, int2l,
+ TYPE_PRECISION (TREE_TYPE (arg1)),
+ &low, &hi,
+ !uns);
+ t = build_int_2 (low, hi);
+ TREE_TYPE (t) = TREE_TYPE (arg1);
+ if (!notrunc)
+ force_fit_type (t, 0);
+ TREE_OVERFLOW (t) = TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2);
+ TREE_CONSTANT_OVERFLOW (t)
+ = TREE_CONSTANT_OVERFLOW (arg1) | TREE_CONSTANT_OVERFLOW (arg2);
+ return t;
+
+ case RROTATE_EXPR:
+ int2l = - int2l;
+ case LROTATE_EXPR:
+ lrotate_double (int1l, int1h, int2l,
+ TYPE_PRECISION (TREE_TYPE (arg1)),
+ &low, &hi);
+ t = build_int_2 (low, hi);
+ break;
+
+ case PLUS_EXPR:
+ if (int1h == 0)
+ {
+ int2l += int1l;
+ if ((unsigned HOST_WIDE_INT) int2l < int1l)
+ {
+ hi = int2h++;
+ overflow = int2h < hi;
+ }
+ t = build_int_2 (int2l, int2h);
+ break;
+ }
+ if (int2h == 0)
+ {
+ int1l += int2l;
+ if ((unsigned HOST_WIDE_INT) int1l < int2l)
+ {
+ hi = int1h++;
+ overflow = int1h < hi;
+ }
+ t = build_int_2 (int1l, int1h);
+ break;
+ }
+ overflow = add_double (int1l, int1h, int2l, int2h, &low, &hi);
+ t = build_int_2 (low, hi);
+ break;
+
+ case MINUS_EXPR:
+ if (int2h == 0 && int2l == 0)
+ {
+ t = build_int_2 (int1l, int1h);
+ break;
+ }
+ neg_double (int2l, int2h, &low, &hi);
+ add_double (int1l, int1h, low, hi, &low, &hi);
+ overflow = overflow_sum_sign (hi, int2h, int1h);
+ t = build_int_2 (low, hi);
+ break;
+
+ case MULT_EXPR:
+ overflow = mul_double (int1l, int1h, int2l, int2h, &low, &hi);
+ t = build_int_2 (low, hi);
+ break;
+
+ case TRUNC_DIV_EXPR:
+ case FLOOR_DIV_EXPR: case CEIL_DIV_EXPR:
+ case EXACT_DIV_EXPR:
+ /* This is a shortcut for a common special case.
+ It reduces the number of tree nodes generated
+ and saves time. */
+ if (int2h == 0 && int2l > 0
+ && TREE_TYPE (arg1) == sizetype
+ && int1h == 0 && int1l >= 0)
+ {
+ if (code == CEIL_DIV_EXPR)
+ int1l += int2l-1;
+ return size_int (int1l / int2l);
+ }
+ case ROUND_DIV_EXPR:
+ if (int2h == 0 && int2l == 1)
+ {
+ t = build_int_2 (int1l, int1h);
+ break;
+ }
+ if (int1l == int2l && int1h == int2h)
+ {
+ if ((int1l | int1h) == 0)
+ abort ();
+ t = build_int_2 (1, 0);
+ break;
+ }
+ overflow = div_and_round_double (code, uns,
+ int1l, int1h, int2l, int2h,
+ &low, &hi, &garbagel, &garbageh);
+ t = build_int_2 (low, hi);
+ break;
+
+ case TRUNC_MOD_EXPR: case ROUND_MOD_EXPR:
+ case FLOOR_MOD_EXPR: case CEIL_MOD_EXPR:
+ overflow = div_and_round_double (code, uns,
+ int1l, int1h, int2l, int2h,
+ &garbagel, &garbageh, &low, &hi);
+ t = build_int_2 (low, hi);
+ break;
+
+ case MIN_EXPR:
+ case MAX_EXPR:
+ if (uns)
+ {
+ low = (((unsigned HOST_WIDE_INT) int1h
+ < (unsigned HOST_WIDE_INT) int2h)
+ || (((unsigned HOST_WIDE_INT) int1h
+ == (unsigned HOST_WIDE_INT) int2h)
+ && ((unsigned HOST_WIDE_INT) int1l
+ < (unsigned HOST_WIDE_INT) int2l)));
+ }
+ else
+ {
+ low = ((int1h < int2h)
+ || ((int1h == int2h)
+ && ((unsigned HOST_WIDE_INT) int1l
+ < (unsigned HOST_WIDE_INT) int2l)));
+ }
+ if (low == (code == MIN_EXPR))
+ t = build_int_2 (int1l, int1h);
+ else
+ t = build_int_2 (int2l, int2h);
+ break;
+
+ default:
+ abort ();
+ }
+ got_it:
+ TREE_TYPE (t) = TREE_TYPE (arg1);
+ TREE_OVERFLOW (t)
+ = ((notrunc ? !uns && overflow : force_fit_type (t, overflow))
+ | TREE_OVERFLOW (arg1)
+ | TREE_OVERFLOW (arg2));
+ TREE_CONSTANT_OVERFLOW (t) = (TREE_OVERFLOW (t)
+ | TREE_CONSTANT_OVERFLOW (arg1)
+ | TREE_CONSTANT_OVERFLOW (arg2));
+ return t;
+ }
+#if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
+ if (TREE_CODE (arg1) == REAL_CST)
+ {
+ REAL_VALUE_TYPE d1;
+ REAL_VALUE_TYPE d2;
+ int overflow = 0;
+ REAL_VALUE_TYPE value;
+ tree t;
+
+ d1 = TREE_REAL_CST (arg1);
+ d2 = TREE_REAL_CST (arg2);
+
+ /* If either operand is a NaN, just return it. Otherwise, set up
+ for floating-point trap; we return an overflow. */
+ if (REAL_VALUE_ISNAN (d1))
+ return arg1;
+ else if (REAL_VALUE_ISNAN (d2))
+ return arg2;
+ else if (setjmp (float_error))
+ {
+ t = copy_node (arg1);
+ overflow = 1;
+ goto got_float;
+ }
+
+ set_float_handler (float_error);
+
+#ifdef REAL_ARITHMETIC
+ REAL_ARITHMETIC (value, code, d1, d2);
+#else
+ switch (code)
+ {
+ case PLUS_EXPR:
+ value = d1 + d2;
+ break;
+
+ case MINUS_EXPR:
+ value = d1 - d2;
+ break;
+
+ case MULT_EXPR:
+ value = d1 * d2;
+ break;
+
+ case RDIV_EXPR:
+#ifndef REAL_INFINITY
+ if (d2 == 0)
+ abort ();
+#endif
+
+ value = d1 / d2;
+ break;
+
+ case MIN_EXPR:
+ value = MIN (d1, d2);
+ break;
+
+ case MAX_EXPR:
+ value = MAX (d1, d2);
+ break;
+
+ default:
+ abort ();
+ }
+#endif /* no REAL_ARITHMETIC */
+ t = build_real (TREE_TYPE (arg1),
+ real_value_truncate (TYPE_MODE (TREE_TYPE (arg1)), value));
+ got_float:
+ set_float_handler (NULL_PTR);
+
+ TREE_OVERFLOW (t)
+ = (force_fit_type (t, overflow)
+ | TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2));
+ TREE_CONSTANT_OVERFLOW (t)
+ = TREE_OVERFLOW (t)
+ | TREE_CONSTANT_OVERFLOW (arg1)
+ | TREE_CONSTANT_OVERFLOW (arg2);
+ return t;
+ }
+#endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
+ if (TREE_CODE (arg1) == COMPLEX_CST)
+ {
+ register tree r1 = TREE_REALPART (arg1);
+ register tree i1 = TREE_IMAGPART (arg1);
+ register tree r2 = TREE_REALPART (arg2);
+ register tree i2 = TREE_IMAGPART (arg2);
+ register tree t;
+
+ switch (code)
+ {
+ case PLUS_EXPR:
+ t = build_complex (const_binop (PLUS_EXPR, r1, r2, notrunc),
+ const_binop (PLUS_EXPR, i1, i2, notrunc));
+ break;
+
+ case MINUS_EXPR:
+ t = build_complex (const_binop (MINUS_EXPR, r1, r2, notrunc),
+ const_binop (MINUS_EXPR, i1, i2, notrunc));
+ break;
+
+ case MULT_EXPR:
+ t = build_complex (const_binop (MINUS_EXPR,
+ const_binop (MULT_EXPR,
+ r1, r2, notrunc),
+ const_binop (MULT_EXPR,
+ i1, i2, notrunc),
+ notrunc),
+ const_binop (PLUS_EXPR,
+ const_binop (MULT_EXPR,
+ r1, i2, notrunc),
+ const_binop (MULT_EXPR,
+ i1, r2, notrunc),
+ notrunc));
+ break;
+
+ case RDIV_EXPR:
+ {
+ register tree magsquared
+ = const_binop (PLUS_EXPR,
+ const_binop (MULT_EXPR, r2, r2, notrunc),
+ const_binop (MULT_EXPR, i2, i2, notrunc),
+ notrunc);
+
+ t = build_complex
+ (const_binop (INTEGRAL_TYPE_P (TREE_TYPE (r1))
+ ? TRUNC_DIV_EXPR : RDIV_EXPR,
+ const_binop (PLUS_EXPR,
+ const_binop (MULT_EXPR, r1, r2,
+ notrunc),
+ const_binop (MULT_EXPR, i1, i2,
+ notrunc),
+ notrunc),
+ magsquared, notrunc),
+ const_binop (INTEGRAL_TYPE_P (TREE_TYPE (r1))
+ ? TRUNC_DIV_EXPR : RDIV_EXPR,
+ const_binop (MINUS_EXPR,
+ const_binop (MULT_EXPR, i1, r2,
+ notrunc),
+ const_binop (MULT_EXPR, r1, i2,
+ notrunc),
+ notrunc),
+ magsquared, notrunc));
+ }
+ break;
+
+ default:
+ abort ();
+ }
+ TREE_TYPE (t) = TREE_TYPE (arg1);
+ return t;
+ }
+ return 0;
+}
+
+/* Return an INTEGER_CST with value V and type from `sizetype'. */
+
+tree
+size_int (number)
+ unsigned int number;
+{
+ register tree t;
+ /* Type-size nodes already made for small sizes. */
+ static tree size_table[2*HOST_BITS_PER_WIDE_INT + 1];
+
+ if (number < 2*HOST_BITS_PER_WIDE_INT + 1
+ && size_table[number] != 0)
+ return size_table[number];
+ if (number < 2*HOST_BITS_PER_WIDE_INT + 1)
+ {
+ push_obstacks_nochange ();
+ /* Make this a permanent node. */
+ end_temporary_allocation ();
+ t = build_int_2 (number, 0);
+ TREE_TYPE (t) = sizetype;
+ size_table[number] = t;
+ pop_obstacks ();
+ }
+ else
+ {
+ t = build_int_2 (number, 0);
+ TREE_TYPE (t) = sizetype;
+ }
+ return t;
+}
+
+/* Combine operands OP1 and OP2 with arithmetic operation CODE.
+ CODE is a tree code. Data type is taken from `sizetype',
+ If the operands are constant, so is the result. */
+
+tree
+size_binop (code, arg0, arg1)
+ enum tree_code code;
+ tree arg0, arg1;
+{
+ /* Handle the special case of two integer constants faster. */
+ if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST)
+ {
+ /* And some specific cases even faster than that. */
+ if (code == PLUS_EXPR
+ && TREE_INT_CST_LOW (arg0) == 0
+ && TREE_INT_CST_HIGH (arg0) == 0)
+ return arg1;
+ if (code == MINUS_EXPR
+ && TREE_INT_CST_LOW (arg1) == 0
+ && TREE_INT_CST_HIGH (arg1) == 0)
+ return arg0;
+ if (code == MULT_EXPR
+ && TREE_INT_CST_LOW (arg0) == 1
+ && TREE_INT_CST_HIGH (arg0) == 0)
+ return arg1;
+ /* Handle general case of two integer constants. */
+ return const_binop (code, arg0, arg1, 1);
+ }
+
+ if (arg0 == error_mark_node || arg1 == error_mark_node)
+ return error_mark_node;
+
+ return fold (build (code, sizetype, arg0, arg1));
+}
+
+/* Given T, a tree representing type conversion of ARG1, a constant,
+ return a constant tree representing the result of conversion. */
+
+static tree
+fold_convert (t, arg1)
+ register tree t;
+ register tree arg1;
+{
+ register tree type = TREE_TYPE (t);
+ int overflow = 0;
+
+ if (TREE_CODE (type) == POINTER_TYPE || INTEGRAL_TYPE_P (type))
+ {
+ if (TREE_CODE (arg1) == INTEGER_CST)
+ {
+ /* Given an integer constant, make new constant with new type,
+ appropriately sign-extended or truncated. */
+ t = build_int_2 (TREE_INT_CST_LOW (arg1),
+ TREE_INT_CST_HIGH (arg1));
+ TREE_TYPE (t) = type;
+ /* Indicate an overflow if (1) ARG1 already overflowed,
+ or (2) force_fit_type indicates an overflow.
+ Tell force_fit_type that an overflow has already occurred
+ if ARG1 is a too-large unsigned value and T is signed. */
+ TREE_OVERFLOW (t)
+ = (TREE_OVERFLOW (arg1)
+ | force_fit_type (t,
+ (TREE_INT_CST_HIGH (arg1) < 0
+ & (TREE_UNSIGNED (type)
+ < TREE_UNSIGNED (TREE_TYPE (arg1))))));
+ TREE_CONSTANT_OVERFLOW (t)
+ = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg1);
+ }
+#if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
+ else if (TREE_CODE (arg1) == REAL_CST)
+ {
+ /* Don't initialize these, use assignments.
+ Initialized local aggregates don't work on old compilers. */
+ REAL_VALUE_TYPE x;
+ REAL_VALUE_TYPE l;
+ REAL_VALUE_TYPE u;
+
+ x = TREE_REAL_CST (arg1);
+ l = real_value_from_int_cst (TYPE_MIN_VALUE (type));
+ u = real_value_from_int_cst (TYPE_MAX_VALUE (type));
+ /* See if X will be in range after truncation towards 0.
+ To compensate for truncation, move the bounds away from 0,
+ but reject if X exactly equals the adjusted bounds. */
+#ifdef REAL_ARITHMETIC
+ REAL_ARITHMETIC (l, MINUS_EXPR, l, dconst1);
+ REAL_ARITHMETIC (u, PLUS_EXPR, u, dconst1);
+#else
+ l--;
+ u++;
+#endif
+ /* If X is a NaN, use zero instead and show we have an overflow.
+ Otherwise, range check. */
+ if (REAL_VALUE_ISNAN (x))
+ overflow = 1, x = dconst0;
+ else if (! (REAL_VALUES_LESS (l, x) && REAL_VALUES_LESS (x, u)))
+ overflow = 1;
+
+#ifndef REAL_ARITHMETIC
+ {
+ HOST_WIDE_INT low, high;
+ HOST_WIDE_INT half_word
+ = (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2);
+
+ if (x < 0)
+ x = -x;
+
+ high = (HOST_WIDE_INT) (x / half_word / half_word);
+ x -= (REAL_VALUE_TYPE) high * half_word * half_word;
+ if (x >= (REAL_VALUE_TYPE) half_word * half_word / 2)
+ {
+ low = x - (REAL_VALUE_TYPE) half_word * half_word / 2;
+ low |= (HOST_WIDE_INT) -1 << (HOST_BITS_PER_WIDE_INT - 1);
+ }
+ else
+ low = (HOST_WIDE_INT) x;
+ if (TREE_REAL_CST (arg1) < 0)
+ neg_double (low, high, &low, &high);
+ t = build_int_2 (low, high);
+ }
+#else
+ {
+ HOST_WIDE_INT low, high;
+ REAL_VALUE_TO_INT (&low, &high, x);
+ t = build_int_2 (low, high);
+ }
+#endif
+ TREE_TYPE (t) = type;
+ TREE_OVERFLOW (t)
+ = TREE_OVERFLOW (arg1) | force_fit_type (t, overflow);
+ TREE_CONSTANT_OVERFLOW (t)
+ = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg1);
+ }
+#endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
+ TREE_TYPE (t) = type;
+ }
+ else if (TREE_CODE (type) == REAL_TYPE)
+ {
+#if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
+ if (TREE_CODE (arg1) == INTEGER_CST)
+ return build_real_from_int_cst (type, arg1);
+#endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
+ if (TREE_CODE (arg1) == REAL_CST)
+ {
+ if (REAL_VALUE_ISNAN (TREE_REAL_CST (arg1)))
+ return arg1;
+ else if (setjmp (float_error))
+ {
+ overflow = 1;
+ t = copy_node (arg1);
+ goto got_it;
+ }
+ set_float_handler (float_error);
+
+ t = build_real (type, real_value_truncate (TYPE_MODE (type),
+ TREE_REAL_CST (arg1)));
+ set_float_handler (NULL_PTR);
+
+ got_it:
+ TREE_OVERFLOW (t)
+ = TREE_OVERFLOW (arg1) | force_fit_type (t, overflow);
+ TREE_CONSTANT_OVERFLOW (t)
+ = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg1);
+ return t;
+ }
+ }
+ TREE_CONSTANT (t) = 1;
+ return t;
+}
+
+/* Return an expr equal to X but certainly not valid as an lvalue.
+ Also make sure it is not valid as an null pointer constant. */
+
+tree
+non_lvalue (x)
+ tree x;
+{
+ tree result;
+
+ /* These things are certainly not lvalues. */
+ if (TREE_CODE (x) == NON_LVALUE_EXPR
+ || TREE_CODE (x) == INTEGER_CST
+ || TREE_CODE (x) == REAL_CST
+ || TREE_CODE (x) == STRING_CST
+ || TREE_CODE (x) == ADDR_EXPR)
+ {
+ if (TREE_CODE (x) == INTEGER_CST && integer_zerop (x))
+ {
+ /* Use NOP_EXPR instead of NON_LVALUE_EXPR
+ so convert_for_assignment won't strip it.
+ This is so this 0 won't be treated as a null pointer constant. */
+ result = build1 (NOP_EXPR, TREE_TYPE (x), x);
+ TREE_CONSTANT (result) = TREE_CONSTANT (x);
+ return result;
+ }
+ return x;
+ }
+
+ result = build1 (NON_LVALUE_EXPR, TREE_TYPE (x), x);
+ TREE_CONSTANT (result) = TREE_CONSTANT (x);
+ return result;
+}
+
+/* When pedantic, return an expr equal to X but certainly not valid as a
+ pedantic lvalue. Otherwise, return X. */
+
+tree
+pedantic_non_lvalue (x)
+ tree x;
+{
+ if (pedantic)
+ return non_lvalue (x);
+ else
+ return x;
+}
+
+/* Given a tree comparison code, return the code that is the logical inverse
+ of the given code. It is not safe to do this for floating-point
+ comparisons, except for NE_EXPR and EQ_EXPR. */
+
+static enum tree_code
+invert_tree_comparison (code)
+ enum tree_code code;
+{
+ switch (code)
+ {
+ case EQ_EXPR:
+ return NE_EXPR;
+ case NE_EXPR:
+ return EQ_EXPR;
+ case GT_EXPR:
+ return LE_EXPR;
+ case GE_EXPR:
+ return LT_EXPR;
+ case LT_EXPR:
+ return GE_EXPR;
+ case LE_EXPR:
+ return GT_EXPR;
+ default:
+ abort ();
+ }
+}
+
+/* Similar, but return the comparison that results if the operands are
+ swapped. This is safe for floating-point. */
+
+static enum tree_code
+swap_tree_comparison (code)
+ enum tree_code code;
+{
+ switch (code)
+ {
+ case EQ_EXPR:
+ case NE_EXPR:
+ return code;
+ case GT_EXPR:
+ return LT_EXPR;
+ case GE_EXPR:
+ return LE_EXPR;
+ case LT_EXPR:
+ return GT_EXPR;
+ case LE_EXPR:
+ return GE_EXPR;
+ default:
+ abort ();
+ }
+}
+
+/* Return nonzero if CODE is a tree code that represents a truth value. */
+
+static int
+truth_value_p (code)
+ enum tree_code code;
+{
+ return (TREE_CODE_CLASS (code) == '<'
+ || code == TRUTH_AND_EXPR || code == TRUTH_ANDIF_EXPR
+ || code == TRUTH_OR_EXPR || code == TRUTH_ORIF_EXPR
+ || code == TRUTH_XOR_EXPR || code == TRUTH_NOT_EXPR);
+}
+
+/* Return nonzero if two operands are necessarily equal.
+ If ONLY_CONST is non-zero, only return non-zero for constants.
+ This function tests whether the operands are indistinguishable;
+ it does not test whether they are equal using C's == operation.
+ The distinction is important for IEEE floating point, because
+ (1) -0.0 and 0.0 are distinguishable, but -0.0==0.0, and
+ (2) two NaNs may be indistinguishable, but NaN!=NaN. */
+
+int
+operand_equal_p (arg0, arg1, only_const)
+ tree arg0, arg1;
+ int only_const;
+{
+ /* If both types don't have the same signedness, then we can't consider
+ them equal. We must check this before the STRIP_NOPS calls
+ because they may change the signedness of the arguments. */
+ if (TREE_UNSIGNED (TREE_TYPE (arg0)) != TREE_UNSIGNED (TREE_TYPE (arg1)))
+ return 0;
+
+ STRIP_NOPS (arg0);
+ STRIP_NOPS (arg1);
+
+ /* If ARG0 and ARG1 are the same SAVE_EXPR, they are necessarily equal.
+ We don't care about side effects in that case because the SAVE_EXPR
+ takes care of that for us. */
+ if (TREE_CODE (arg0) == SAVE_EXPR && arg0 == arg1)
+ return ! only_const;
+
+ if (TREE_SIDE_EFFECTS (arg0) || TREE_SIDE_EFFECTS (arg1))
+ return 0;
+
+ if (TREE_CODE (arg0) == TREE_CODE (arg1)
+ && TREE_CODE (arg0) == ADDR_EXPR
+ && TREE_OPERAND (arg0, 0) == TREE_OPERAND (arg1, 0))
+ return 1;
+
+ if (TREE_CODE (arg0) == TREE_CODE (arg1)
+ && TREE_CODE (arg0) == INTEGER_CST
+ && TREE_INT_CST_LOW (arg0) == TREE_INT_CST_LOW (arg1)
+ && TREE_INT_CST_HIGH (arg0) == TREE_INT_CST_HIGH (arg1))
+ return 1;
+
+ /* Detect when real constants are equal. */
+ if (TREE_CODE (arg0) == TREE_CODE (arg1)
+ && TREE_CODE (arg0) == REAL_CST)
+ return !bcmp ((char *) &TREE_REAL_CST (arg0),
+ (char *) &TREE_REAL_CST (arg1),
+ sizeof (REAL_VALUE_TYPE));
+
+ if (only_const)
+ return 0;
+
+ if (arg0 == arg1)
+ return 1;
+
+ if (TREE_CODE (arg0) != TREE_CODE (arg1))
+ return 0;
+ /* This is needed for conversions and for COMPONENT_REF.
+ Might as well play it safe and always test this. */
+ if (TYPE_MODE (TREE_TYPE (arg0)) != TYPE_MODE (TREE_TYPE (arg1)))
+ return 0;
+
+ switch (TREE_CODE_CLASS (TREE_CODE (arg0)))
+ {
+ case '1':
+ /* Two conversions are equal only if signedness and modes match. */
+ if ((TREE_CODE (arg0) == NOP_EXPR || TREE_CODE (arg0) == CONVERT_EXPR)
+ && (TREE_UNSIGNED (TREE_TYPE (arg0))
+ != TREE_UNSIGNED (TREE_TYPE (arg1))))
+ return 0;
+
+ return operand_equal_p (TREE_OPERAND (arg0, 0),
+ TREE_OPERAND (arg1, 0), 0);
+
+ case '<':
+ case '2':
+ return (operand_equal_p (TREE_OPERAND (arg0, 0),
+ TREE_OPERAND (arg1, 0), 0)
+ && operand_equal_p (TREE_OPERAND (arg0, 1),
+ TREE_OPERAND (arg1, 1), 0));
+
+ case 'r':
+ switch (TREE_CODE (arg0))
+ {
+ case INDIRECT_REF:
+ return operand_equal_p (TREE_OPERAND (arg0, 0),
+ TREE_OPERAND (arg1, 0), 0);
+
+ case COMPONENT_REF:
+ case ARRAY_REF:
+ return (operand_equal_p (TREE_OPERAND (arg0, 0),
+ TREE_OPERAND (arg1, 0), 0)
+ && operand_equal_p (TREE_OPERAND (arg0, 1),
+ TREE_OPERAND (arg1, 1), 0));
+
+ case BIT_FIELD_REF:
+ return (operand_equal_p (TREE_OPERAND (arg0, 0),
+ TREE_OPERAND (arg1, 0), 0)
+ && operand_equal_p (TREE_OPERAND (arg0, 1),
+ TREE_OPERAND (arg1, 1), 0)
+ && operand_equal_p (TREE_OPERAND (arg0, 2),
+ TREE_OPERAND (arg1, 2), 0));
+ }
+ break;
+ }
+
+ return 0;
+}
+
+/* Similar to operand_equal_p, but see if ARG0 might have been made by
+ shorten_compare from ARG1 when ARG1 was being compared with OTHER.
+
+ When in doubt, return 0. */
+
+static int
+operand_equal_for_comparison_p (arg0, arg1, other)
+ tree arg0, arg1;
+ tree other;
+{
+ int unsignedp1, unsignedpo;
+ tree primarg1, primother;
+ unsigned correct_width;
+
+ if (operand_equal_p (arg0, arg1, 0))
+ return 1;
+
+ if (! INTEGRAL_TYPE_P (TREE_TYPE (arg0)))
+ return 0;
+
+ /* Duplicate what shorten_compare does to ARG1 and see if that gives the
+ actual comparison operand, ARG0.
+
+ First throw away any conversions to wider types
+ already present in the operands. */
+
+ primarg1 = get_narrower (arg1, &unsignedp1);
+ primother = get_narrower (other, &unsignedpo);
+
+ correct_width = TYPE_PRECISION (TREE_TYPE (arg1));
+ if (unsignedp1 == unsignedpo
+ && TYPE_PRECISION (TREE_TYPE (primarg1)) < correct_width
+ && TYPE_PRECISION (TREE_TYPE (primother)) < correct_width)
+ {
+ tree type = TREE_TYPE (arg0);
+
+ /* Make sure shorter operand is extended the right way
+ to match the longer operand. */
+ primarg1 = convert (signed_or_unsigned_type (unsignedp1,
+ TREE_TYPE (primarg1)),
+ primarg1);
+
+ if (operand_equal_p (arg0, convert (type, primarg1), 0))
+ return 1;
+ }
+
+ return 0;
+}
+
+/* See if ARG is an expression that is either a comparison or is performing
+ arithmetic on comparisons. The comparisons must only be comparing
+ two different values, which will be stored in *CVAL1 and *CVAL2; if
+ they are non-zero it means that some operands have already been found.
+ No variables may be used anywhere else in the expression except in the
+ comparisons. If SAVE_P is true it means we removed a SAVE_EXPR around
+ the expression and save_expr needs to be called with CVAL1 and CVAL2.
+
+ If this is true, return 1. Otherwise, return zero. */
+
+static int
+twoval_comparison_p (arg, cval1, cval2, save_p)
+ tree arg;
+ tree *cval1, *cval2;
+ int *save_p;
+{
+ enum tree_code code = TREE_CODE (arg);
+ char class = TREE_CODE_CLASS (code);
+
+ /* We can handle some of the 'e' cases here. */
+ if (class == 'e' && code == TRUTH_NOT_EXPR)
+ class = '1';
+ else if (class == 'e'
+ && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR
+ || code == COMPOUND_EXPR))
+ class = '2';
+
+ /* ??? Disable this since the SAVE_EXPR might already be in use outside
+ the expression. There may be no way to make this work, but it needs
+ to be looked at again for 2.6. */
+#if 0
+ else if (class == 'e' && code == SAVE_EXPR && SAVE_EXPR_RTL (arg) == 0)
+ {
+ /* If we've already found a CVAL1 or CVAL2, this expression is
+ two complex to handle. */
+ if (*cval1 || *cval2)
+ return 0;
+
+ class = '1';
+ *save_p = 1;
+ }
+#endif
+
+ switch (class)
+ {
+ case '1':
+ return twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2, save_p);
+
+ case '2':
+ return (twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2, save_p)
+ && twoval_comparison_p (TREE_OPERAND (arg, 1),
+ cval1, cval2, save_p));
+
+ case 'c':
+ return 1;
+
+ case 'e':
+ if (code == COND_EXPR)
+ return (twoval_comparison_p (TREE_OPERAND (arg, 0),
+ cval1, cval2, save_p)
+ && twoval_comparison_p (TREE_OPERAND (arg, 1),
+ cval1, cval2, save_p)
+ && twoval_comparison_p (TREE_OPERAND (arg, 2),
+ cval1, cval2, save_p));
+ return 0;
+
+ case '<':
+ /* First see if we can handle the first operand, then the second. For
+ the second operand, we know *CVAL1 can't be zero. It must be that
+ one side of the comparison is each of the values; test for the
+ case where this isn't true by failing if the two operands
+ are the same. */
+
+ if (operand_equal_p (TREE_OPERAND (arg, 0),
+ TREE_OPERAND (arg, 1), 0))
+ return 0;
+
+ if (*cval1 == 0)
+ *cval1 = TREE_OPERAND (arg, 0);
+ else if (operand_equal_p (*cval1, TREE_OPERAND (arg, 0), 0))
+ ;
+ else if (*cval2 == 0)
+ *cval2 = TREE_OPERAND (arg, 0);
+ else if (operand_equal_p (*cval2, TREE_OPERAND (arg, 0), 0))
+ ;
+ else
+ return 0;
+
+ if (operand_equal_p (*cval1, TREE_OPERAND (arg, 1), 0))
+ ;
+ else if (*cval2 == 0)
+ *cval2 = TREE_OPERAND (arg, 1);
+ else if (operand_equal_p (*cval2, TREE_OPERAND (arg, 1), 0))
+ ;
+ else
+ return 0;
+
+ return 1;
+ }
+
+ return 0;
+}
+
+/* ARG is a tree that is known to contain just arithmetic operations and
+ comparisons. Evaluate the operations in the tree substituting NEW0 for
+ any occurrence of OLD0 as an operand of a comparison and likewise for
+ NEW1 and OLD1. */
+
+static tree
+eval_subst (arg, old0, new0, old1, new1)
+ tree arg;
+ tree old0, new0, old1, new1;
+{
+ tree type = TREE_TYPE (arg);
+ enum tree_code code = TREE_CODE (arg);
+ char class = TREE_CODE_CLASS (code);
+
+ /* We can handle some of the 'e' cases here. */
+ if (class == 'e' && code == TRUTH_NOT_EXPR)
+ class = '1';
+ else if (class == 'e'
+ && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR))
+ class = '2';
+
+ switch (class)
+ {
+ case '1':
+ return fold (build1 (code, type,
+ eval_subst (TREE_OPERAND (arg, 0),
+ old0, new0, old1, new1)));
+
+ case '2':
+ return fold (build (code, type,
+ eval_subst (TREE_OPERAND (arg, 0),
+ old0, new0, old1, new1),
+ eval_subst (TREE_OPERAND (arg, 1),
+ old0, new0, old1, new1)));
+
+ case 'e':
+ switch (code)
+ {
+ case SAVE_EXPR:
+ return eval_subst (TREE_OPERAND (arg, 0), old0, new0, old1, new1);
+
+ case COMPOUND_EXPR:
+ return eval_subst (TREE_OPERAND (arg, 1), old0, new0, old1, new1);
+
+ case COND_EXPR:
+ return fold (build (code, type,
+ eval_subst (TREE_OPERAND (arg, 0),
+ old0, new0, old1, new1),
+ eval_subst (TREE_OPERAND (arg, 1),
+ old0, new0, old1, new1),
+ eval_subst (TREE_OPERAND (arg, 2),
+ old0, new0, old1, new1)));
+ }
+
+ case '<':
+ {
+ tree arg0 = TREE_OPERAND (arg, 0);
+ tree arg1 = TREE_OPERAND (arg, 1);
+
+ /* We need to check both for exact equality and tree equality. The
+ former will be true if the operand has a side-effect. In that
+ case, we know the operand occurred exactly once. */
+
+ if (arg0 == old0 || operand_equal_p (arg0, old0, 0))
+ arg0 = new0;
+ else if (arg0 == old1 || operand_equal_p (arg0, old1, 0))
+ arg0 = new1;
+
+ if (arg1 == old0 || operand_equal_p (arg1, old0, 0))
+ arg1 = new0;
+ else if (arg1 == old1 || operand_equal_p (arg1, old1, 0))
+ arg1 = new1;
+
+ return fold (build (code, type, arg0, arg1));
+ }
+ }
+
+ return arg;
+}
+
+/* Return a tree for the case when the result of an expression is RESULT
+ converted to TYPE and OMITTED was previously an operand of the expression
+ but is now not needed (e.g., we folded OMITTED * 0).
+
+ If OMITTED has side effects, we must evaluate it. Otherwise, just do
+ the conversion of RESULT to TYPE. */
+
+static tree
+omit_one_operand (type, result, omitted)
+ tree type, result, omitted;
+{
+ tree t = convert (type, result);
+
+ if (TREE_SIDE_EFFECTS (omitted))
+ return build (COMPOUND_EXPR, type, omitted, t);
+
+ return non_lvalue (t);
+}
+
+/* Return a simplified tree node for the truth-negation of ARG. This
+ never alters ARG itself. We assume that ARG is an operation that
+ returns a truth value (0 or 1). */
+
+tree
+invert_truthvalue (arg)
+ tree arg;
+{
+ tree type = TREE_TYPE (arg);
+ enum tree_code code = TREE_CODE (arg);
+
+ if (code == ERROR_MARK)
+ return arg;
+
+ /* If this is a comparison, we can simply invert it, except for
+ floating-point non-equality comparisons, in which case we just
+ enclose a TRUTH_NOT_EXPR around what we have. */
+
+ if (TREE_CODE_CLASS (code) == '<')
+ {
+ if (FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (arg, 0)))
+ && code != NE_EXPR && code != EQ_EXPR)
+ return build1 (TRUTH_NOT_EXPR, type, arg);
+ else
+ return build (invert_tree_comparison (code), type,
+ TREE_OPERAND (arg, 0), TREE_OPERAND (arg, 1));
+ }
+
+ switch (code)
+ {
+ case INTEGER_CST:
+ return convert (type, build_int_2 (TREE_INT_CST_LOW (arg) == 0
+ && TREE_INT_CST_HIGH (arg) == 0, 0));
+
+ case TRUTH_AND_EXPR:
+ return build (TRUTH_OR_EXPR, type,
+ invert_truthvalue (TREE_OPERAND (arg, 0)),
+ invert_truthvalue (TREE_OPERAND (arg, 1)));
+
+ case TRUTH_OR_EXPR:
+ return build (TRUTH_AND_EXPR, type,
+ invert_truthvalue (TREE_OPERAND (arg, 0)),
+ invert_truthvalue (TREE_OPERAND (arg, 1)));
+
+ case TRUTH_XOR_EXPR:
+ /* Here we can invert either operand. We invert the first operand
+ unless the second operand is a TRUTH_NOT_EXPR in which case our
+ result is the XOR of the first operand with the inside of the
+ negation of the second operand. */
+
+ if (TREE_CODE (TREE_OPERAND (arg, 1)) == TRUTH_NOT_EXPR)
+ return build (TRUTH_XOR_EXPR, type, TREE_OPERAND (arg, 0),
+ TREE_OPERAND (TREE_OPERAND (arg, 1), 0));
+ else
+ return build (TRUTH_XOR_EXPR, type,
+ invert_truthvalue (TREE_OPERAND (arg, 0)),
+ TREE_OPERAND (arg, 1));
+
+ case TRUTH_ANDIF_EXPR:
+ return build (TRUTH_ORIF_EXPR, type,
+ invert_truthvalue (TREE_OPERAND (arg, 0)),
+ invert_truthvalue (TREE_OPERAND (arg, 1)));
+
+ case TRUTH_ORIF_EXPR:
+ return build (TRUTH_ANDIF_EXPR, type,
+ invert_truthvalue (TREE_OPERAND (arg, 0)),
+ invert_truthvalue (TREE_OPERAND (arg, 1)));
+
+ case TRUTH_NOT_EXPR:
+ return TREE_OPERAND (arg, 0);
+
+ case COND_EXPR:
+ return build (COND_EXPR, type, TREE_OPERAND (arg, 0),
+ invert_truthvalue (TREE_OPERAND (arg, 1)),
+ invert_truthvalue (TREE_OPERAND (arg, 2)));
+
+ case COMPOUND_EXPR:
+ return build (COMPOUND_EXPR, type, TREE_OPERAND (arg, 0),
+ invert_truthvalue (TREE_OPERAND (arg, 1)));
+
+ case NON_LVALUE_EXPR:
+ return invert_truthvalue (TREE_OPERAND (arg, 0));
+
+ case NOP_EXPR:
+ case CONVERT_EXPR:
+ case FLOAT_EXPR:
+ return build1 (TREE_CODE (arg), type,
+ invert_truthvalue (TREE_OPERAND (arg, 0)));
+
+ case BIT_AND_EXPR:
+ if (!integer_onep (TREE_OPERAND (arg, 1)))
+ break;
+ return build (EQ_EXPR, type, arg, convert (type, integer_zero_node));
+
+ case SAVE_EXPR:
+ return build1 (TRUTH_NOT_EXPR, type, arg);
+ }
+ if (TREE_CODE (TREE_TYPE (arg)) != BOOLEAN_TYPE)
+ abort ();
+ return build1 (TRUTH_NOT_EXPR, type, arg);
+}
+
+/* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both
+ operands are another bit-wise operation with a common input. If so,
+ distribute the bit operations to save an operation and possibly two if
+ constants are involved. For example, convert
+ (A | B) & (A | C) into A | (B & C)
+ Further simplification will occur if B and C are constants.
+
+ If this optimization cannot be done, 0 will be returned. */
+
+static tree
+distribute_bit_expr (code, type, arg0, arg1)
+ enum tree_code code;
+ tree type;
+ tree arg0, arg1;
+{
+ tree common;
+ tree left, right;
+
+ if (TREE_CODE (arg0) != TREE_CODE (arg1)
+ || TREE_CODE (arg0) == code
+ || (TREE_CODE (arg0) != BIT_AND_EXPR
+ && TREE_CODE (arg0) != BIT_IOR_EXPR))
+ return 0;
+
+ if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0), 0))
+ {
+ common = TREE_OPERAND (arg0, 0);
+ left = TREE_OPERAND (arg0, 1);
+ right = TREE_OPERAND (arg1, 1);
+ }
+ else if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 1), 0))
+ {
+ common = TREE_OPERAND (arg0, 0);
+ left = TREE_OPERAND (arg0, 1);
+ right = TREE_OPERAND (arg1, 0);
+ }
+ else if (operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 0), 0))
+ {
+ common = TREE_OPERAND (arg0, 1);
+ left = TREE_OPERAND (arg0, 0);
+ right = TREE_OPERAND (arg1, 1);
+ }
+ else if (operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 1), 0))
+ {
+ common = TREE_OPERAND (arg0, 1);
+ left = TREE_OPERAND (arg0, 0);
+ right = TREE_OPERAND (arg1, 0);
+ }
+ else
+ return 0;
+
+ return fold (build (TREE_CODE (arg0), type, common,
+ fold (build (code, type, left, right))));
+}
+
+/* Return a BIT_FIELD_REF of type TYPE to refer to BITSIZE bits of INNER
+ starting at BITPOS. The field is unsigned if UNSIGNEDP is non-zero. */
+
+static tree
+make_bit_field_ref (inner, type, bitsize, bitpos, unsignedp)
+ tree inner;
+ tree type;
+ int bitsize, bitpos;
+ int unsignedp;
+{
+ tree result = build (BIT_FIELD_REF, type, inner,
+ size_int (bitsize), size_int (bitpos));
+
+ TREE_UNSIGNED (result) = unsignedp;
+
+ return result;
+}
+
+/* Optimize a bit-field compare.
+
+ There are two cases: First is a compare against a constant and the
+ second is a comparison of two items where the fields are at the same
+ bit position relative to the start of a chunk (byte, halfword, word)
+ large enough to contain it. In these cases we can avoid the shift
+ implicit in bitfield extractions.
+
+ For constants, we emit a compare of the shifted constant with the
+ BIT_AND_EXPR of a mask and a byte, halfword, or word of the operand being
+ compared. For two fields at the same position, we do the ANDs with the
+ similar mask and compare the result of the ANDs.
+
+ CODE is the comparison code, known to be either NE_EXPR or EQ_EXPR.
+ COMPARE_TYPE is the type of the comparison, and LHS and RHS
+ are the left and right operands of the comparison, respectively.
+
+ If the optimization described above can be done, we return the resulting
+ tree. Otherwise we return zero. */
+
+static tree
+optimize_bit_field_compare (code, compare_type, lhs, rhs)
+ enum tree_code code;
+ tree compare_type;
+ tree lhs, rhs;
+{
+ int lbitpos, lbitsize, rbitpos, rbitsize;
+ int lnbitpos, lnbitsize, rnbitpos, rnbitsize;
+ tree type = TREE_TYPE (lhs);
+ tree signed_type, unsigned_type;
+ int const_p = TREE_CODE (rhs) == INTEGER_CST;
+ enum machine_mode lmode, rmode, lnmode, rnmode;
+ int lunsignedp, runsignedp;
+ int lvolatilep = 0, rvolatilep = 0;
+ tree linner, rinner;
+ tree mask;
+ tree offset;
+
+ /* Get all the information about the extractions being done. If the bit size
+ if the same as the size of the underlying object, we aren't doing an
+ extraction at all and so can do nothing. */
+ linner = get_inner_reference (lhs, &lbitsize, &lbitpos, &offset, &lmode,
+ &lunsignedp, &lvolatilep);
+ if (linner == lhs || lbitsize == GET_MODE_BITSIZE (lmode) || lbitsize < 0
+ || offset != 0)
+ return 0;
+
+ if (!const_p)
+ {
+ /* If this is not a constant, we can only do something if bit positions,
+ sizes, and signedness are the same. */
+ rinner = get_inner_reference (rhs, &rbitsize, &rbitpos, &offset,
+ &rmode, &runsignedp, &rvolatilep);
+
+ if (rinner == rhs || lbitpos != rbitpos || lbitsize != rbitsize
+ || lunsignedp != runsignedp || offset != 0)
+ return 0;
+ }
+
+ /* See if we can find a mode to refer to this field. We should be able to,
+ but fail if we can't. */
+ lnmode = get_best_mode (lbitsize, lbitpos,
+ TYPE_ALIGN (TREE_TYPE (linner)), word_mode,
+ lvolatilep);
+ if (lnmode == VOIDmode)
+ return 0;
+
+ /* Set signed and unsigned types of the precision of this mode for the
+ shifts below. */
+ signed_type = type_for_mode (lnmode, 0);
+ unsigned_type = type_for_mode (lnmode, 1);
+
+ if (! const_p)
+ {
+ rnmode = get_best_mode (rbitsize, rbitpos,
+ TYPE_ALIGN (TREE_TYPE (rinner)), word_mode,
+ rvolatilep);
+ if (rnmode == VOIDmode)
+ return 0;
+ }
+
+ /* Compute the bit position and size for the new reference and our offset
+ within it. If the new reference is the same size as the original, we
+ won't optimize anything, so return zero. */
+ lnbitsize = GET_MODE_BITSIZE (lnmode);
+ lnbitpos = lbitpos & ~ (lnbitsize - 1);
+ lbitpos -= lnbitpos;
+ if (lnbitsize == lbitsize)
+ return 0;
+
+ if (! const_p)
+ {
+ rnbitsize = GET_MODE_BITSIZE (rnmode);
+ rnbitpos = rbitpos & ~ (rnbitsize - 1);
+ rbitpos -= rnbitpos;
+ if (rnbitsize == rbitsize)
+ return 0;
+ }
+
+#if BYTES_BIG_ENDIAN
+ lbitpos = lnbitsize - lbitsize - lbitpos;
+#endif
+
+ /* Make the mask to be used against the extracted field. */
+ mask = build_int_2 (~0, ~0);
+ TREE_TYPE (mask) = unsigned_type;
+ force_fit_type (mask, 0);
+ mask = convert (unsigned_type, mask);
+ mask = const_binop (LSHIFT_EXPR, mask, size_int (lnbitsize - lbitsize), 0);
+ mask = const_binop (RSHIFT_EXPR, mask,
+ size_int (lnbitsize - lbitsize - lbitpos), 0);
+
+ if (! const_p)
+ /* If not comparing with constant, just rework the comparison
+ and return. */
+ return build (code, compare_type,
+ build (BIT_AND_EXPR, unsigned_type,
+ make_bit_field_ref (linner, unsigned_type,
+ lnbitsize, lnbitpos, 1),
+ mask),
+ build (BIT_AND_EXPR, unsigned_type,
+ make_bit_field_ref (rinner, unsigned_type,
+ rnbitsize, rnbitpos, 1),
+ mask));
+
+ /* Otherwise, we are handling the constant case. See if the constant is too
+ big for the field. Warn and return a tree of for 0 (false) if so. We do
+ this not only for its own sake, but to avoid having to test for this
+ error case below. If we didn't, we might generate wrong code.
+
+ For unsigned fields, the constant shifted right by the field length should
+ be all zero. For signed fields, the high-order bits should agree with
+ the sign bit. */
+
+ if (lunsignedp)
+ {
+ if (! integer_zerop (const_binop (RSHIFT_EXPR,
+ convert (unsigned_type, rhs),
+ size_int (lbitsize), 0)))
+ {
+ warning ("comparison is always %s due to width of bitfield",
+ code == NE_EXPR ? "one" : "zero");
+ return convert (compare_type,
+ (code == NE_EXPR
+ ? integer_one_node : integer_zero_node));
+ }
+ }
+ else
+ {
+ tree tem = const_binop (RSHIFT_EXPR, convert (signed_type, rhs),
+ size_int (lbitsize - 1), 0);
+ if (! integer_zerop (tem) && ! integer_all_onesp (tem))
+ {
+ warning ("comparison is always %s due to width of bitfield",
+ code == NE_EXPR ? "one" : "zero");
+ return convert (compare_type,
+ (code == NE_EXPR
+ ? integer_one_node : integer_zero_node));
+ }
+ }
+
+ /* Single-bit compares should always be against zero. */
+ if (lbitsize == 1 && ! integer_zerop (rhs))
+ {
+ code = code == EQ_EXPR ? NE_EXPR : EQ_EXPR;
+ rhs = convert (type, integer_zero_node);
+ }
+
+ /* Make a new bitfield reference, shift the constant over the
+ appropriate number of bits and mask it with the computed mask
+ (in case this was a signed field). If we changed it, make a new one. */
+ lhs = make_bit_field_ref (linner, unsigned_type, lnbitsize, lnbitpos, 1);
+ if (lvolatilep)
+ {
+ TREE_SIDE_EFFECTS (lhs) = 1;
+ TREE_THIS_VOLATILE (lhs) = 1;
+ }
+
+ rhs = fold (const_binop (BIT_AND_EXPR,
+ const_binop (LSHIFT_EXPR,
+ convert (unsigned_type, rhs),
+ size_int (lbitpos), 0),
+ mask, 0));
+
+ return build (code, compare_type,
+ build (BIT_AND_EXPR, unsigned_type, lhs, mask),
+ rhs);
+}
+
+/* Subroutine for fold_truthop: decode a field reference.
+
+ If EXP is a comparison reference, we return the innermost reference.
+
+ *PBITSIZE is set to the number of bits in the reference, *PBITPOS is
+ set to the starting bit number.
+
+ If the innermost field can be completely contained in a mode-sized
+ unit, *PMODE is set to that mode. Otherwise, it is set to VOIDmode.
+
+ *PVOLATILEP is set to 1 if the any expression encountered is volatile;
+ otherwise it is not changed.
+
+ *PUNSIGNEDP is set to the signedness of the field.
+
+ *PMASK is set to the mask used. This is either contained in a
+ BIT_AND_EXPR or derived from the width of the field.
+
+ Return 0 if this is not a component reference or is one that we can't
+ do anything with. */
+
+static tree
+decode_field_reference (exp, pbitsize, pbitpos, pmode, punsignedp,
+ pvolatilep, pmask)
+ tree exp;
+ int *pbitsize, *pbitpos;
+ enum machine_mode *pmode;
+ int *punsignedp, *pvolatilep;
+ tree *pmask;
+{
+ tree and_mask = 0;
+ tree mask, inner, offset;
+ tree unsigned_type;
+ int precision;
+
+ /* All the optimizations using this function assume integer fields.
+ There are problems with FP fields since the type_for_size call
+ below can fail for, e.g., XFmode. */
+ if (! INTEGRAL_TYPE_P (TREE_TYPE (exp)))
+ return 0;
+
+ STRIP_NOPS (exp);
+
+ if (TREE_CODE (exp) == BIT_AND_EXPR)
+ {
+ and_mask = TREE_OPERAND (exp, 1);
+ exp = TREE_OPERAND (exp, 0);
+ STRIP_NOPS (exp); STRIP_NOPS (and_mask);
+ if (TREE_CODE (and_mask) != INTEGER_CST)
+ return 0;
+ }
+
+ if (TREE_CODE (exp) != COMPONENT_REF && TREE_CODE (exp) != ARRAY_REF
+ && TREE_CODE (exp) != BIT_FIELD_REF)
+ return 0;
+
+ inner = get_inner_reference (exp, pbitsize, pbitpos, &offset, pmode,
+ punsignedp, pvolatilep);
+ if (inner == exp || *pbitsize < 0 || offset != 0)
+ return 0;
+
+ /* Compute the mask to access the bitfield. */
+ unsigned_type = type_for_size (*pbitsize, 1);
+ precision = TYPE_PRECISION (unsigned_type);
+
+ mask = build_int_2 (~0, ~0);
+ TREE_TYPE (mask) = unsigned_type;
+ force_fit_type (mask, 0);
+ mask = const_binop (LSHIFT_EXPR, mask, size_int (precision - *pbitsize), 0);
+ mask = const_binop (RSHIFT_EXPR, mask, size_int (precision - *pbitsize), 0);
+
+ /* Merge it with the mask we found in the BIT_AND_EXPR, if any. */
+ if (and_mask != 0)
+ mask = fold (build (BIT_AND_EXPR, unsigned_type,
+ convert (unsigned_type, and_mask), mask));
+
+ *pmask = mask;
+ return inner;
+}
+
+/* Return non-zero if MASK represents a mask of SIZE ones in the low-order
+ bit positions. */
+
+static int
+all_ones_mask_p (mask, size)
+ tree mask;
+ int size;
+{
+ tree type = TREE_TYPE (mask);
+ int precision = TYPE_PRECISION (type);
+ tree tmask;
+
+ tmask = build_int_2 (~0, ~0);
+ TREE_TYPE (tmask) = signed_type (type);
+ force_fit_type (tmask, 0);
+ return
+ operand_equal_p (mask,
+ const_binop (RSHIFT_EXPR,
+ const_binop (LSHIFT_EXPR, tmask,
+ size_int (precision - size), 0),
+ size_int (precision - size), 0),
+ 0);
+}
+
+/* Subroutine for fold_truthop: determine if an operand is simple enough
+ to be evaluated unconditionally. */
+
+static int
+simple_operand_p (exp)
+ tree exp;
+{
+ /* Strip any conversions that don't change the machine mode. */
+ while ((TREE_CODE (exp) == NOP_EXPR
+ || TREE_CODE (exp) == CONVERT_EXPR)
+ && (TYPE_MODE (TREE_TYPE (exp))
+ == TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)))))
+ exp = TREE_OPERAND (exp, 0);
+
+ return (TREE_CODE_CLASS (TREE_CODE (exp)) == 'c'
+ || (TREE_CODE_CLASS (TREE_CODE (exp)) == 'd'
+ && ! TREE_ADDRESSABLE (exp)
+ && ! TREE_THIS_VOLATILE (exp)
+ && ! DECL_NONLOCAL (exp)
+ /* Don't regard global variables as simple. They may be
+ allocated in ways unknown to the compiler (shared memory,
+ #pragma weak, etc). */
+ && ! TREE_PUBLIC (exp)
+ && ! DECL_EXTERNAL (exp)
+ /* Loading a static variable is unduly expensive, but global
+ registers aren't expensive. */
+ && (! TREE_STATIC (exp) || DECL_REGISTER (exp))));
+}
+
+/* Subroutine for fold_truthop: try to optimize a range test.
+
+ For example, "i >= 2 && i =< 9" can be done as "(unsigned) (i - 2) <= 7".
+
+ JCODE is the logical combination of the two terms. It is TRUTH_AND_EXPR
+ (representing TRUTH_ANDIF_EXPR and TRUTH_AND_EXPR) or TRUTH_OR_EXPR
+ (representing TRUTH_ORIF_EXPR and TRUTH_OR_EXPR). TYPE is the type of
+ the result.
+
+ VAR is the value being tested. LO_CODE and HI_CODE are the comparison
+ operators comparing VAR to LO_CST and HI_CST. LO_CST is known to be no
+ larger than HI_CST (they may be equal).
+
+ We return the simplified tree or 0 if no optimization is possible. */
+
+static tree
+range_test (jcode, type, lo_code, hi_code, var, lo_cst, hi_cst)
+ enum tree_code jcode, lo_code, hi_code;
+ tree type, var, lo_cst, hi_cst;
+{
+ tree utype;
+ enum tree_code rcode;
+
+ /* See if this is a range test and normalize the constant terms. */
+
+ if (jcode == TRUTH_AND_EXPR)
+ {
+ switch (lo_code)
+ {
+ case NE_EXPR:
+ /* See if we have VAR != CST && VAR != CST+1. */
+ if (! (hi_code == NE_EXPR
+ && TREE_INT_CST_LOW (hi_cst) - TREE_INT_CST_LOW (lo_cst) == 1
+ && tree_int_cst_equal (integer_one_node,
+ const_binop (MINUS_EXPR,
+ hi_cst, lo_cst, 0))))
+ return 0;
+
+ rcode = GT_EXPR;
+ break;
+
+ case GT_EXPR:
+ case GE_EXPR:
+ if (hi_code == LT_EXPR)
+ hi_cst = const_binop (MINUS_EXPR, hi_cst, integer_one_node, 0);
+ else if (hi_code != LE_EXPR)
+ return 0;
+
+ if (lo_code == GT_EXPR)
+ lo_cst = const_binop (PLUS_EXPR, lo_cst, integer_one_node, 0);
+
+ /* We now have VAR >= LO_CST && VAR <= HI_CST. */
+ rcode = LE_EXPR;
+ break;
+
+ default:
+ return 0;
+ }
+ }
+ else
+ {
+ switch (lo_code)
+ {
+ case EQ_EXPR:
+ /* See if we have VAR == CST || VAR == CST+1. */
+ if (! (hi_code == EQ_EXPR
+ && TREE_INT_CST_LOW (hi_cst) - TREE_INT_CST_LOW (lo_cst) == 1
+ && tree_int_cst_equal (integer_one_node,
+ const_binop (MINUS_EXPR,
+ hi_cst, lo_cst, 0))))
+ return 0;
+
+ rcode = LE_EXPR;
+ break;
+
+ case LE_EXPR:
+ case LT_EXPR:
+ if (hi_code == GE_EXPR)
+ hi_cst = const_binop (MINUS_EXPR, hi_cst, integer_one_node, 0);
+ else if (hi_code != GT_EXPR)
+ return 0;
+
+ if (lo_code == LE_EXPR)
+ lo_cst = const_binop (PLUS_EXPR, lo_cst, integer_one_node, 0);
+
+ /* We now have VAR < LO_CST || VAR > HI_CST. */
+ rcode = GT_EXPR;
+ break;
+
+ default:
+ return 0;
+ }
+ }
+
+ /* When normalizing, it is possible to both increment the smaller constant
+ and decrement the larger constant. See if they are still ordered. */
+ if (tree_int_cst_lt (hi_cst, lo_cst))
+ return 0;
+
+ /* Fail if VAR isn't an integer. */
+ utype = TREE_TYPE (var);
+ if (! INTEGRAL_TYPE_P (utype))
+ return 0;
+
+ /* The range test is invalid if subtracting the two constants results
+ in overflow. This can happen in traditional mode. */
+ if (! int_fits_type_p (hi_cst, TREE_TYPE (var))
+ || ! int_fits_type_p (lo_cst, TREE_TYPE (var)))
+ return 0;
+
+ if (! TREE_UNSIGNED (utype))
+ {
+ utype = unsigned_type (utype);
+ var = convert (utype, var);
+ lo_cst = convert (utype, lo_cst);
+ hi_cst = convert (utype, hi_cst);
+ }
+
+ return fold (convert (type,
+ build (rcode, utype,
+ build (MINUS_EXPR, utype, var, lo_cst),
+ const_binop (MINUS_EXPR, hi_cst, lo_cst, 0))));
+}
+
+/* Find ways of folding logical expressions of LHS and RHS:
+ Try to merge two comparisons to the same innermost item.
+ Look for range tests like "ch >= '0' && ch <= '9'".
+ Look for combinations of simple terms on machines with expensive branches
+ and evaluate the RHS unconditionally.
+
+ For example, if we have p->a == 2 && p->b == 4 and we can make an
+ object large enough to span both A and B, we can do this with a comparison
+ against the object ANDed with the a mask.
+
+ If we have p->a == q->a && p->b == q->b, we may be able to use bit masking
+ operations to do this with one comparison.
+
+ We check for both normal comparisons and the BIT_AND_EXPRs made this by
+ function and the one above.
+
+ CODE is the logical operation being done. It can be TRUTH_ANDIF_EXPR,
+ TRUTH_AND_EXPR, TRUTH_ORIF_EXPR, or TRUTH_OR_EXPR.
+
+ TRUTH_TYPE is the type of the logical operand and LHS and RHS are its
+ two operands.
+
+ We return the simplified tree or 0 if no optimization is possible. */
+
+static tree
+fold_truthop (code, truth_type, lhs, rhs)
+ enum tree_code code;
+ tree truth_type, lhs, rhs;
+{
+ /* If this is the "or" of two comparisons, we can do something if we
+ the comparisons are NE_EXPR. If this is the "and", we can do something
+ if the comparisons are EQ_EXPR. I.e.,
+ (a->b == 2 && a->c == 4) can become (a->new == NEW).
+
+ WANTED_CODE is this operation code. For single bit fields, we can
+ convert EQ_EXPR to NE_EXPR so we need not reject the "wrong"
+ comparison for one-bit fields. */
+
+ enum tree_code wanted_code;
+ enum tree_code lcode, rcode;
+ tree ll_arg, lr_arg, rl_arg, rr_arg;
+ tree ll_inner, lr_inner, rl_inner, rr_inner;
+ int ll_bitsize, ll_bitpos, lr_bitsize, lr_bitpos;
+ int rl_bitsize, rl_bitpos, rr_bitsize, rr_bitpos;
+ int xll_bitpos, xlr_bitpos, xrl_bitpos, xrr_bitpos;
+ int lnbitsize, lnbitpos, rnbitsize, rnbitpos;
+ int ll_unsignedp, lr_unsignedp, rl_unsignedp, rr_unsignedp;
+ enum machine_mode ll_mode, lr_mode, rl_mode, rr_mode;
+ enum machine_mode lnmode, rnmode;
+ tree ll_mask, lr_mask, rl_mask, rr_mask;
+ tree l_const, r_const;
+ tree type, result;
+ int first_bit, end_bit;
+ int volatilep;
+
+ /* Start by getting the comparison codes and seeing if this looks like
+ a range test. Fail if anything is volatile. If one operand is a
+ BIT_AND_EXPR with the constant one, treat it as if it were surrounded
+ with a NE_EXPR. */
+
+ if (TREE_SIDE_EFFECTS (lhs)
+ || TREE_SIDE_EFFECTS (rhs))
+ return 0;
+
+ lcode = TREE_CODE (lhs);
+ rcode = TREE_CODE (rhs);
+
+ if (lcode == BIT_AND_EXPR && integer_onep (TREE_OPERAND (lhs, 1)))
+ lcode = NE_EXPR, lhs = build (NE_EXPR, truth_type, lhs, integer_zero_node);
+
+ if (rcode == BIT_AND_EXPR && integer_onep (TREE_OPERAND (rhs, 1)))
+ rcode = NE_EXPR, rhs = build (NE_EXPR, truth_type, rhs, integer_zero_node);
+
+ if (TREE_CODE_CLASS (lcode) != '<'
+ || TREE_CODE_CLASS (rcode) != '<')
+ return 0;
+
+ code = ((code == TRUTH_AND_EXPR || code == TRUTH_ANDIF_EXPR)
+ ? TRUTH_AND_EXPR : TRUTH_OR_EXPR);
+
+ ll_arg = TREE_OPERAND (lhs, 0);
+ lr_arg = TREE_OPERAND (lhs, 1);
+ rl_arg = TREE_OPERAND (rhs, 0);
+ rr_arg = TREE_OPERAND (rhs, 1);
+
+ if (TREE_CODE (lr_arg) == INTEGER_CST
+ && TREE_CODE (rr_arg) == INTEGER_CST
+ && operand_equal_p (ll_arg, rl_arg, 0))
+ {
+ if (tree_int_cst_lt (lr_arg, rr_arg))
+ result = range_test (code, truth_type, lcode, rcode,
+ ll_arg, lr_arg, rr_arg);
+ else
+ result = range_test (code, truth_type, rcode, lcode,
+ ll_arg, rr_arg, lr_arg);
+
+ /* If this isn't a range test, it also isn't a comparison that
+ can be merged. However, it wins to evaluate the RHS unconditionally
+ on machines with expensive branches. */
+
+ if (result == 0 && BRANCH_COST >= 2)
+ {
+ if (TREE_CODE (ll_arg) != VAR_DECL
+ && TREE_CODE (ll_arg) != PARM_DECL)
+ {
+ /* Avoid evaluating the variable part twice. */
+ ll_arg = save_expr (ll_arg);
+ lhs = build (lcode, TREE_TYPE (lhs), ll_arg, lr_arg);
+ rhs = build (rcode, TREE_TYPE (rhs), ll_arg, rr_arg);
+ }
+ return build (code, truth_type, lhs, rhs);
+ }
+ return result;
+ }
+
+ /* If the RHS can be evaluated unconditionally and its operands are
+ simple, it wins to evaluate the RHS unconditionally on machines
+ with expensive branches. In this case, this isn't a comparison
+ that can be merged. */
+
+ /* @@ I'm not sure it wins on the m88110 to do this if the comparisons
+ are with zero (tmw). */
+
+ if (BRANCH_COST >= 2
+ && INTEGRAL_TYPE_P (TREE_TYPE (rhs))
+ && simple_operand_p (rl_arg)
+ && simple_operand_p (rr_arg))
+ return build (code, truth_type, lhs, rhs);
+
+ /* See if the comparisons can be merged. Then get all the parameters for
+ each side. */
+
+ if ((lcode != EQ_EXPR && lcode != NE_EXPR)
+ || (rcode != EQ_EXPR && rcode != NE_EXPR))
+ return 0;
+
+ volatilep = 0;
+ ll_inner = decode_field_reference (ll_arg,
+ &ll_bitsize, &ll_bitpos, &ll_mode,
+ &ll_unsignedp, &volatilep, &ll_mask);
+ lr_inner = decode_field_reference (lr_arg,
+ &lr_bitsize, &lr_bitpos, &lr_mode,
+ &lr_unsignedp, &volatilep, &lr_mask);
+ rl_inner = decode_field_reference (rl_arg,
+ &rl_bitsize, &rl_bitpos, &rl_mode,
+ &rl_unsignedp, &volatilep, &rl_mask);
+ rr_inner = decode_field_reference (rr_arg,
+ &rr_bitsize, &rr_bitpos, &rr_mode,
+ &rr_unsignedp, &volatilep, &rr_mask);
+
+ /* It must be true that the inner operation on the lhs of each
+ comparison must be the same if we are to be able to do anything.
+ Then see if we have constants. If not, the same must be true for
+ the rhs's. */
+ if (volatilep || ll_inner == 0 || rl_inner == 0
+ || ! operand_equal_p (ll_inner, rl_inner, 0))
+ return 0;
+
+ if (TREE_CODE (lr_arg) == INTEGER_CST
+ && TREE_CODE (rr_arg) == INTEGER_CST)
+ l_const = lr_arg, r_const = rr_arg;
+ else if (lr_inner == 0 || rr_inner == 0
+ || ! operand_equal_p (lr_inner, rr_inner, 0))
+ return 0;
+ else
+ l_const = r_const = 0;
+
+ /* If either comparison code is not correct for our logical operation,
+ fail. However, we can convert a one-bit comparison against zero into
+ the opposite comparison against that bit being set in the field. */
+
+ wanted_code = (code == TRUTH_AND_EXPR ? EQ_EXPR : NE_EXPR);
+ if (lcode != wanted_code)
+ {
+ if (l_const && integer_zerop (l_const) && integer_pow2p (ll_mask))
+ l_const = ll_mask;
+ else
+ return 0;
+ }
+
+ if (rcode != wanted_code)
+ {
+ if (r_const && integer_zerop (r_const) && integer_pow2p (rl_mask))
+ r_const = rl_mask;
+ else
+ return 0;
+ }
+
+ /* See if we can find a mode that contains both fields being compared on
+ the left. If we can't, fail. Otherwise, update all constants and masks
+ to be relative to a field of that size. */
+ first_bit = MIN (ll_bitpos, rl_bitpos);
+ end_bit = MAX (ll_bitpos + ll_bitsize, rl_bitpos + rl_bitsize);
+ lnmode = get_best_mode (end_bit - first_bit, first_bit,
+ TYPE_ALIGN (TREE_TYPE (ll_inner)), word_mode,
+ volatilep);
+ if (lnmode == VOIDmode)
+ return 0;
+
+ lnbitsize = GET_MODE_BITSIZE (lnmode);
+ lnbitpos = first_bit & ~ (lnbitsize - 1);
+ type = type_for_size (lnbitsize, 1);
+ xll_bitpos = ll_bitpos - lnbitpos, xrl_bitpos = rl_bitpos - lnbitpos;
+
+#if BYTES_BIG_ENDIAN
+ xll_bitpos = lnbitsize - xll_bitpos - ll_bitsize;
+ xrl_bitpos = lnbitsize - xrl_bitpos - rl_bitsize;
+#endif
+
+ ll_mask = const_binop (LSHIFT_EXPR, convert (type, ll_mask),
+ size_int (xll_bitpos), 0);
+ rl_mask = const_binop (LSHIFT_EXPR, convert (type, rl_mask),
+ size_int (xrl_bitpos), 0);
+
+ /* Make sure the constants are interpreted as unsigned, so we
+ don't have sign bits outside the range of their type. */
+
+ if (l_const)
+ {
+ l_const = convert (unsigned_type (TREE_TYPE (l_const)), l_const);
+ l_const = const_binop (LSHIFT_EXPR, convert (type, l_const),
+ size_int (xll_bitpos), 0);
+ }
+ if (r_const)
+ {
+ r_const = convert (unsigned_type (TREE_TYPE (r_const)), r_const);
+ r_const = const_binop (LSHIFT_EXPR, convert (type, r_const),
+ size_int (xrl_bitpos), 0);
+ }
+
+ /* If the right sides are not constant, do the same for it. Also,
+ disallow this optimization if a size or signedness mismatch occurs
+ between the left and right sides. */
+ if (l_const == 0)
+ {
+ if (ll_bitsize != lr_bitsize || rl_bitsize != rr_bitsize
+ || ll_unsignedp != lr_unsignedp || rl_unsignedp != rr_unsignedp
+ /* Make sure the two fields on the right
+ correspond to the left without being swapped. */
+ || ll_bitpos - rl_bitpos != lr_bitpos - rr_bitpos)
+ return 0;
+
+ first_bit = MIN (lr_bitpos, rr_bitpos);
+ end_bit = MAX (lr_bitpos + lr_bitsize, rr_bitpos + rr_bitsize);
+ rnmode = get_best_mode (end_bit - first_bit, first_bit,
+ TYPE_ALIGN (TREE_TYPE (lr_inner)), word_mode,
+ volatilep);
+ if (rnmode == VOIDmode)
+ return 0;
+
+ rnbitsize = GET_MODE_BITSIZE (rnmode);
+ rnbitpos = first_bit & ~ (rnbitsize - 1);
+ xlr_bitpos = lr_bitpos - rnbitpos, xrr_bitpos = rr_bitpos - rnbitpos;
+
+#if BYTES_BIG_ENDIAN
+ xlr_bitpos = rnbitsize - xlr_bitpos - lr_bitsize;
+ xrr_bitpos = rnbitsize - xrr_bitpos - rr_bitsize;
+#endif
+
+ lr_mask = const_binop (LSHIFT_EXPR, convert (type, lr_mask),
+ size_int (xlr_bitpos), 0);
+ rr_mask = const_binop (LSHIFT_EXPR, convert (type, rr_mask),
+ size_int (xrr_bitpos), 0);
+
+ /* Make a mask that corresponds to both fields being compared.
+ Do this for both items being compared. If the masks agree,
+ we can do this by masking both and comparing the masked
+ results. */
+ ll_mask = const_binop (BIT_IOR_EXPR, ll_mask, rl_mask, 0);
+ lr_mask = const_binop (BIT_IOR_EXPR, lr_mask, rr_mask, 0);
+ if (operand_equal_p (ll_mask, lr_mask, 0) && lnbitsize == rnbitsize)
+ {
+ lhs = make_bit_field_ref (ll_inner, type, lnbitsize, lnbitpos,
+ ll_unsignedp || rl_unsignedp);
+ rhs = make_bit_field_ref (lr_inner, type, rnbitsize, rnbitpos,
+ lr_unsignedp || rr_unsignedp);
+ if (! all_ones_mask_p (ll_mask, lnbitsize))
+ {
+ lhs = build (BIT_AND_EXPR, type, lhs, ll_mask);
+ rhs = build (BIT_AND_EXPR, type, rhs, ll_mask);
+ }
+ return build (wanted_code, truth_type, lhs, rhs);
+ }
+
+ /* There is still another way we can do something: If both pairs of
+ fields being compared are adjacent, we may be able to make a wider
+ field containing them both. */
+ if ((ll_bitsize + ll_bitpos == rl_bitpos
+ && lr_bitsize + lr_bitpos == rr_bitpos)
+ || (ll_bitpos == rl_bitpos + rl_bitsize
+ && lr_bitpos == rr_bitpos + rr_bitsize))
+ return build (wanted_code, truth_type,
+ make_bit_field_ref (ll_inner, type,
+ ll_bitsize + rl_bitsize,
+ MIN (ll_bitpos, rl_bitpos),
+ ll_unsignedp),
+ make_bit_field_ref (lr_inner, type,
+ lr_bitsize + rr_bitsize,
+ MIN (lr_bitpos, rr_bitpos),
+ lr_unsignedp));
+
+ return 0;
+ }
+
+ /* Handle the case of comparisons with constants. If there is something in
+ common between the masks, those bits of the constants must be the same.
+ If not, the condition is always false. Test for this to avoid generating
+ incorrect code below. */
+ result = const_binop (BIT_AND_EXPR, ll_mask, rl_mask, 0);
+ if (! integer_zerop (result)
+ && simple_cst_equal (const_binop (BIT_AND_EXPR, result, l_const, 0),
+ const_binop (BIT_AND_EXPR, result, r_const, 0)) != 1)
+ {
+ if (wanted_code == NE_EXPR)
+ {
+ warning ("`or' of unmatched not-equal tests is always 1");
+ return convert (truth_type, integer_one_node);
+ }
+ else
+ {
+ warning ("`and' of mutually exclusive equal-tests is always zero");
+ return convert (truth_type, integer_zero_node);
+ }
+ }
+
+ /* Construct the expression we will return. First get the component
+ reference we will make. Unless the mask is all ones the width of
+ that field, perform the mask operation. Then compare with the
+ merged constant. */
+ result = make_bit_field_ref (ll_inner, type, lnbitsize, lnbitpos,
+ ll_unsignedp || rl_unsignedp);
+
+ ll_mask = const_binop (BIT_IOR_EXPR, ll_mask, rl_mask, 0);
+ if (! all_ones_mask_p (ll_mask, lnbitsize))
+ result = build (BIT_AND_EXPR, type, result, ll_mask);
+
+ return build (wanted_code, truth_type, result,
+ const_binop (BIT_IOR_EXPR, l_const, r_const, 0));
+}
+
+/* If T contains a COMPOUND_EXPR which was inserted merely to evaluate
+ S, a SAVE_EXPR, return the expression actually being evaluated. Note
+ that we may sometimes modify the tree. */
+
+static tree
+strip_compound_expr (t, s)
+ tree t;
+ tree s;
+{
+ tree type = TREE_TYPE (t);
+ enum tree_code code = TREE_CODE (t);
+
+ /* See if this is the COMPOUND_EXPR we want to eliminate. */
+ if (code == COMPOUND_EXPR && TREE_CODE (TREE_OPERAND (t, 0)) == CONVERT_EXPR
+ && TREE_OPERAND (TREE_OPERAND (t, 0), 0) == s)
+ return TREE_OPERAND (t, 1);
+
+ /* See if this is a COND_EXPR or a simple arithmetic operator. We
+ don't bother handling any other types. */
+ else if (code == COND_EXPR)
+ {
+ TREE_OPERAND (t, 0) = strip_compound_expr (TREE_OPERAND (t, 0), s);
+ TREE_OPERAND (t, 1) = strip_compound_expr (TREE_OPERAND (t, 1), s);
+ TREE_OPERAND (t, 2) = strip_compound_expr (TREE_OPERAND (t, 2), s);
+ }
+ else if (TREE_CODE_CLASS (code) == '1')
+ TREE_OPERAND (t, 0) = strip_compound_expr (TREE_OPERAND (t, 0), s);
+ else if (TREE_CODE_CLASS (code) == '<'
+ || TREE_CODE_CLASS (code) == '2')
+ {
+ TREE_OPERAND (t, 0) = strip_compound_expr (TREE_OPERAND (t, 0), s);
+ TREE_OPERAND (t, 1) = strip_compound_expr (TREE_OPERAND (t, 1), s);
+ }
+
+ return t;
+}
+
+/* Perform constant folding and related simplification of EXPR.
+ The related simplifications include x*1 => x, x*0 => 0, etc.,
+ and application of the associative law.
+ NOP_EXPR conversions may be removed freely (as long as we
+ are careful not to change the C type of the overall expression)
+ We cannot simplify through a CONVERT_EXPR, FIX_EXPR or FLOAT_EXPR,
+ but we can constant-fold them if they have constant operands. */
+
+tree
+fold (expr)
+ tree expr;
+{
+ register tree t = expr;
+ tree t1 = NULL_TREE;
+ tree tem;
+ tree type = TREE_TYPE (expr);
+ register tree arg0, arg1;
+ register enum tree_code code = TREE_CODE (t);
+ register int kind;
+ int invert;
+
+ /* WINS will be nonzero when the switch is done
+ if all operands are constant. */
+
+ int wins = 1;
+
+ /* Don't try to process an RTL_EXPR since its operands aren't trees. */
+ if (code == RTL_EXPR)
+ return t;
+
+ /* Return right away if already constant. */
+ if (TREE_CONSTANT (t))
+ {
+ if (code == CONST_DECL)
+ return DECL_INITIAL (t);
+ return t;
+ }
+
+ kind = TREE_CODE_CLASS (code);
+ if (code == NOP_EXPR || code == FLOAT_EXPR || code == CONVERT_EXPR)
+ {
+ tree subop;
+
+ /* Special case for conversion ops that can have fixed point args. */
+ arg0 = TREE_OPERAND (t, 0);
+
+ /* Don't use STRIP_NOPS, because signedness of argument type matters. */
+ if (arg0 != 0)
+ STRIP_TYPE_NOPS (arg0);
+
+ if (arg0 != 0 && TREE_CODE (arg0) == COMPLEX_CST)
+ subop = TREE_REALPART (arg0);
+ else
+ subop = arg0;
+
+ if (subop != 0 && TREE_CODE (subop) != INTEGER_CST
+#if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
+ && TREE_CODE (subop) != REAL_CST
+#endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
+ )
+ /* Note that TREE_CONSTANT isn't enough:
+ static var addresses are constant but we can't
+ do arithmetic on them. */
+ wins = 0;
+ }
+ else if (kind == 'e' || kind == '<'
+ || kind == '1' || kind == '2' || kind == 'r')
+ {
+ register int len = tree_code_length[(int) code];
+ register int i;
+ for (i = 0; i < len; i++)
+ {
+ tree op = TREE_OPERAND (t, i);
+ tree subop;
+
+ if (op == 0)
+ continue; /* Valid for CALL_EXPR, at least. */
+
+ if (kind == '<' || code == RSHIFT_EXPR)
+ {
+ /* Signedness matters here. Perhaps we can refine this
+ later. */
+ STRIP_TYPE_NOPS (op);
+ }
+ else
+ {
+ /* Strip any conversions that don't change the mode. */
+ STRIP_NOPS (op);
+ }
+
+ if (TREE_CODE (op) == COMPLEX_CST)
+ subop = TREE_REALPART (op);
+ else
+ subop = op;
+
+ if (TREE_CODE (subop) != INTEGER_CST
+#if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
+ && TREE_CODE (subop) != REAL_CST
+#endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
+ )
+ /* Note that TREE_CONSTANT isn't enough:
+ static var addresses are constant but we can't
+ do arithmetic on them. */
+ wins = 0;
+
+ if (i == 0)
+ arg0 = op;
+ else if (i == 1)
+ arg1 = op;
+ }
+ }
+
+ /* If this is a commutative operation, and ARG0 is a constant, move it
+ to ARG1 to reduce the number of tests below. */
+ if ((code == PLUS_EXPR || code == MULT_EXPR || code == MIN_EXPR
+ || code == MAX_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR
+ || code == BIT_AND_EXPR)
+ && (TREE_CODE (arg0) == INTEGER_CST || TREE_CODE (arg0) == REAL_CST))
+ {
+ tem = arg0; arg0 = arg1; arg1 = tem;
+
+ tem = TREE_OPERAND (t, 0); TREE_OPERAND (t, 0) = TREE_OPERAND (t, 1);
+ TREE_OPERAND (t, 1) = tem;
+ }
+
+ /* Now WINS is set as described above,
+ ARG0 is the first operand of EXPR,
+ and ARG1 is the second operand (if it has more than one operand).
+
+ First check for cases where an arithmetic operation is applied to a
+ compound, conditional, or comparison operation. Push the arithmetic
+ operation inside the compound or conditional to see if any folding
+ can then be done. Convert comparison to conditional for this purpose.
+ The also optimizes non-constant cases that used to be done in
+ expand_expr.
+
+ Before we do that, see if this is a BIT_AND_EXPR or a BIT_OR_EXPR,
+ one of the operands is a comparison and the other is a comparison, a
+ BIT_AND_EXPR with the constant 1, or a truth value. In that case, the
+ code below would make the expression more complex. Change it to a
+ TRUTH_{AND,OR}_EXPR. Likewise, convert a similar NE_EXPR to
+ TRUTH_XOR_EXPR and an EQ_EXPR to the inversion of a TRUTH_XOR_EXPR. */
+
+ if ((code == BIT_AND_EXPR || code == BIT_IOR_EXPR
+ || code == EQ_EXPR || code == NE_EXPR)
+ && ((truth_value_p (TREE_CODE (arg0))
+ && (truth_value_p (TREE_CODE (arg1))
+ || (TREE_CODE (arg1) == BIT_AND_EXPR
+ && integer_onep (TREE_OPERAND (arg1, 1)))))
+ || (truth_value_p (TREE_CODE (arg1))
+ && (truth_value_p (TREE_CODE (arg0))
+ || (TREE_CODE (arg0) == BIT_AND_EXPR
+ && integer_onep (TREE_OPERAND (arg0, 1)))))))
+ {
+ t = fold (build (code == BIT_AND_EXPR ? TRUTH_AND_EXPR
+ : code == BIT_IOR_EXPR ? TRUTH_OR_EXPR
+ : TRUTH_XOR_EXPR,
+ type, arg0, arg1));
+
+ if (code == EQ_EXPR)
+ t = invert_truthvalue (t);
+
+ return t;
+ }
+
+ if (TREE_CODE_CLASS (code) == '1')
+ {
+ if (TREE_CODE (arg0) == COMPOUND_EXPR)
+ return build (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
+ fold (build1 (code, type, TREE_OPERAND (arg0, 1))));
+ else if (TREE_CODE (arg0) == COND_EXPR)
+ {
+ t = fold (build (COND_EXPR, type, TREE_OPERAND (arg0, 0),
+ fold (build1 (code, type, TREE_OPERAND (arg0, 1))),
+ fold (build1 (code, type, TREE_OPERAND (arg0, 2)))));
+
+ /* If this was a conversion, and all we did was to move into
+ inside the COND_EXPR, bring it back out. Then return so we
+ don't get into an infinite recursion loop taking the conversion
+ out and then back in. */
+
+ if ((code == NOP_EXPR || code == CONVERT_EXPR
+ || code == NON_LVALUE_EXPR)
+ && TREE_CODE (t) == COND_EXPR
+ && TREE_CODE (TREE_OPERAND (t, 1)) == code
+ && TREE_CODE (TREE_OPERAND (t, 2)) == code
+ && (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 1), 0))
+ == TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 2), 0))))
+ t = build1 (code, type,
+ build (COND_EXPR,
+ TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 1), 0)),
+ TREE_OPERAND (t, 0),
+ TREE_OPERAND (TREE_OPERAND (t, 1), 0),
+ TREE_OPERAND (TREE_OPERAND (t, 2), 0)));
+ return t;
+ }
+ else if (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<')
+ return fold (build (COND_EXPR, type, arg0,
+ fold (build1 (code, type, integer_one_node)),
+ fold (build1 (code, type, integer_zero_node))));
+ }
+ else if (TREE_CODE_CLASS (code) == '2'
+ || TREE_CODE_CLASS (code) == '<')
+ {
+ if (TREE_CODE (arg1) == COMPOUND_EXPR)
+ return build (COMPOUND_EXPR, type, TREE_OPERAND (arg1, 0),
+ fold (build (code, type,
+ arg0, TREE_OPERAND (arg1, 1))));
+ else if (TREE_CODE (arg1) == COND_EXPR
+ || TREE_CODE_CLASS (TREE_CODE (arg1)) == '<')
+ {
+ tree test, true_value, false_value;
+
+ if (TREE_CODE (arg1) == COND_EXPR)
+ {
+ test = TREE_OPERAND (arg1, 0);
+ true_value = TREE_OPERAND (arg1, 1);
+ false_value = TREE_OPERAND (arg1, 2);
+ }
+ else
+ {
+ test = arg1;
+ true_value = integer_one_node;
+ false_value = integer_zero_node;
+ }
+
+ /* If ARG0 is complex we want to make sure we only evaluate
+ it once. Though this is only required if it is volatile, it
+ might be more efficient even if it is not. However, if we
+ succeed in folding one part to a constant, we do not need
+ to make this SAVE_EXPR. Since we do this optimization
+ primarily to see if we do end up with constant and this
+ SAVE_EXPR interfers with later optimizations, suppressing
+ it when we can is important. */
+
+ if (TREE_CODE (arg0) != SAVE_EXPR
+ && ((TREE_CODE (arg0) != VAR_DECL
+ && TREE_CODE (arg0) != PARM_DECL)
+ || TREE_SIDE_EFFECTS (arg0)))
+ {
+ tree lhs = fold (build (code, type, arg0, true_value));
+ tree rhs = fold (build (code, type, arg0, false_value));
+
+ if (TREE_CONSTANT (lhs) || TREE_CONSTANT (rhs))
+ return fold (build (COND_EXPR, type, test, lhs, rhs));
+
+ arg0 = save_expr (arg0);
+ }
+
+ test = fold (build (COND_EXPR, type, test,
+ fold (build (code, type, arg0, true_value)),
+ fold (build (code, type, arg0, false_value))));
+ if (TREE_CODE (arg0) == SAVE_EXPR)
+ return build (COMPOUND_EXPR, type,
+ convert (void_type_node, arg0),
+ strip_compound_expr (test, arg0));
+ else
+ return convert (type, test);
+ }
+
+ else if (TREE_CODE (arg0) == COMPOUND_EXPR)
+ return build (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
+ fold (build (code, type, TREE_OPERAND (arg0, 1), arg1)));
+ else if (TREE_CODE (arg0) == COND_EXPR
+ || TREE_CODE_CLASS (TREE_CODE (arg0)) == '<')
+ {
+ tree test, true_value, false_value;
+
+ if (TREE_CODE (arg0) == COND_EXPR)
+ {
+ test = TREE_OPERAND (arg0, 0);
+ true_value = TREE_OPERAND (arg0, 1);
+ false_value = TREE_OPERAND (arg0, 2);
+ }
+ else
+ {
+ test = arg0;
+ true_value = integer_one_node;
+ false_value = integer_zero_node;
+ }
+
+ if (TREE_CODE (arg1) != SAVE_EXPR
+ && ((TREE_CODE (arg1) != VAR_DECL
+ && TREE_CODE (arg1) != PARM_DECL)
+ || TREE_SIDE_EFFECTS (arg1)))
+ {
+ tree lhs = fold (build (code, type, true_value, arg1));
+ tree rhs = fold (build (code, type, false_value, arg1));
+
+ if (TREE_CONSTANT (lhs) || TREE_CONSTANT (rhs)
+ || TREE_CONSTANT (arg1))
+ return fold (build (COND_EXPR, type, test, lhs, rhs));
+
+ arg1 = save_expr (arg1);
+ }
+
+ test = fold (build (COND_EXPR, type, test,
+ fold (build (code, type, true_value, arg1)),
+ fold (build (code, type, false_value, arg1))));
+ if (TREE_CODE (arg1) == SAVE_EXPR)
+ return build (COMPOUND_EXPR, type,
+ convert (void_type_node, arg1),
+ strip_compound_expr (test, arg1));
+ else
+ return convert (type, test);
+ }
+ }
+ else if (TREE_CODE_CLASS (code) == '<'
+ && TREE_CODE (arg0) == COMPOUND_EXPR)
+ return build (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
+ fold (build (code, type, TREE_OPERAND (arg0, 1), arg1)));
+ else if (TREE_CODE_CLASS (code) == '<'
+ && TREE_CODE (arg1) == COMPOUND_EXPR)
+ return build (COMPOUND_EXPR, type, TREE_OPERAND (arg1, 0),
+ fold (build (code, type, arg0, TREE_OPERAND (arg1, 1))));
+
+ switch (code)
+ {
+ case INTEGER_CST:
+ case REAL_CST:
+ case STRING_CST:
+ case COMPLEX_CST:
+ case CONSTRUCTOR:
+ return t;
+
+ case CONST_DECL:
+ return fold (DECL_INITIAL (t));
+
+ case NOP_EXPR:
+ case FLOAT_EXPR:
+ case CONVERT_EXPR:
+ case FIX_TRUNC_EXPR:
+ /* Other kinds of FIX are not handled properly by fold_convert. */
+
+ /* In addition to the cases of two conversions in a row
+ handled below, if we are converting something to its own
+ type via an object of identical or wider precision, neither
+ conversion is needed. */
+ if ((TREE_CODE (TREE_OPERAND (t, 0)) == NOP_EXPR
+ || TREE_CODE (TREE_OPERAND (t, 0)) == CONVERT_EXPR)
+ && TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)) == TREE_TYPE (t)
+ && ((INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (t, 0)))
+ && INTEGRAL_TYPE_P (TREE_TYPE (t)))
+ || (FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (t, 0)))
+ && FLOAT_TYPE_P (TREE_TYPE (t))))
+ && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
+ >= TYPE_PRECISION (TREE_TYPE (t))))
+ return TREE_OPERAND (TREE_OPERAND (t, 0), 0);
+
+ /* Two conversions in a row are not needed unless:
+ - the intermediate type is narrower than both initial and final, or
+ - the intermediate type and innermost type differ in signedness,
+ and the outermost type is wider than the intermediate, or
+ - the initial type is a pointer type and the precisions of the
+ intermediate and final types differ, or
+ - the final type is a pointer type and the precisions of the
+ initial and intermediate types differ. */
+ if ((TREE_CODE (TREE_OPERAND (t, 0)) == NOP_EXPR
+ || TREE_CODE (TREE_OPERAND (t, 0)) == CONVERT_EXPR)
+ && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
+ > TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))
+ ||
+ TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
+ > TYPE_PRECISION (TREE_TYPE (t)))
+ && ! ((TREE_CODE (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))
+ == INTEGER_TYPE)
+ && (TREE_CODE (TREE_TYPE (TREE_OPERAND (t, 0)))
+ == INTEGER_TYPE)
+ && (TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (t, 0)))
+ != TREE_UNSIGNED (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))
+ && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
+ < TYPE_PRECISION (TREE_TYPE (t))))
+ && ((TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (t, 0)))
+ && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
+ > TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))))
+ ==
+ (TREE_UNSIGNED (TREE_TYPE (t))
+ && (TYPE_PRECISION (TREE_TYPE (t))
+ > TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0))))))
+ && ! ((TREE_CODE (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))
+ == POINTER_TYPE)
+ && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
+ != TYPE_PRECISION (TREE_TYPE (t))))
+ && ! (TREE_CODE (TREE_TYPE (t)) == POINTER_TYPE
+ && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))
+ != TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0))))))
+ return convert (TREE_TYPE (t), TREE_OPERAND (TREE_OPERAND (t, 0), 0));
+
+ if (TREE_CODE (TREE_OPERAND (t, 0)) == MODIFY_EXPR
+ && TREE_CONSTANT (TREE_OPERAND (TREE_OPERAND (t, 0), 1))
+ /* Detect assigning a bitfield. */
+ && !(TREE_CODE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)) == COMPONENT_REF
+ && DECL_BIT_FIELD (TREE_OPERAND (TREE_OPERAND (TREE_OPERAND (t, 0), 0), 1))))
+ {
+ /* Don't leave an assignment inside a conversion
+ unless assigning a bitfield. */
+ tree prev = TREE_OPERAND (t, 0);
+ TREE_OPERAND (t, 0) = TREE_OPERAND (prev, 1);
+ /* First do the assignment, then return converted constant. */
+ t = build (COMPOUND_EXPR, TREE_TYPE (t), prev, fold (t));
+ TREE_USED (t) = 1;
+ return t;
+ }
+ if (!wins)
+ {
+ TREE_CONSTANT (t) = TREE_CONSTANT (arg0);
+ return t;
+ }
+ return fold_convert (t, arg0);
+
+#if 0 /* This loses on &"foo"[0]. */
+ case ARRAY_REF:
+ {
+ int i;
+
+ /* Fold an expression like: "foo"[2] */
+ if (TREE_CODE (arg0) == STRING_CST
+ && TREE_CODE (arg1) == INTEGER_CST
+ && !TREE_INT_CST_HIGH (arg1)
+ && (i = TREE_INT_CST_LOW (arg1)) < TREE_STRING_LENGTH (arg0))
+ {
+ t = build_int_2 (TREE_STRING_POINTER (arg0)[i], 0);
+ TREE_TYPE (t) = TREE_TYPE (TREE_TYPE (arg0));
+ force_fit_type (t, 0);
+ }
+ }
+ return t;
+#endif /* 0 */
+
+ case RANGE_EXPR:
+ TREE_CONSTANT (t) = wins;
+ return t;
+
+ case NEGATE_EXPR:
+ if (wins)
+ {
+ if (TREE_CODE (arg0) == INTEGER_CST)
+ {
+ HOST_WIDE_INT low, high;
+ int overflow = neg_double (TREE_INT_CST_LOW (arg0),
+ TREE_INT_CST_HIGH (arg0),
+ &low, &high);
+ t = build_int_2 (low, high);
+ TREE_TYPE (t) = type;
+ TREE_OVERFLOW (t)
+ = (TREE_OVERFLOW (arg0)
+ | force_fit_type (t, overflow));
+ TREE_CONSTANT_OVERFLOW (t)
+ = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg0);
+ }
+ else if (TREE_CODE (arg0) == REAL_CST)
+ t = build_real (type, REAL_VALUE_NEGATE (TREE_REAL_CST (arg0)));
+ TREE_TYPE (t) = type;
+ }
+ else if (TREE_CODE (arg0) == NEGATE_EXPR)
+ return TREE_OPERAND (arg0, 0);
+
+ /* Convert - (a - b) to (b - a) for non-floating-point. */
+ else if (TREE_CODE (arg0) == MINUS_EXPR && ! FLOAT_TYPE_P (type))
+ return build (MINUS_EXPR, type, TREE_OPERAND (arg0, 1),
+ TREE_OPERAND (arg0, 0));
+
+ return t;
+
+ case ABS_EXPR:
+ if (wins)
+ {
+ if (TREE_CODE (arg0) == INTEGER_CST)
+ {
+ if (! TREE_UNSIGNED (type)
+ && TREE_INT_CST_HIGH (arg0) < 0)
+ {
+ HOST_WIDE_INT low, high;
+ int overflow = neg_double (TREE_INT_CST_LOW (arg0),
+ TREE_INT_CST_HIGH (arg0),
+ &low, &high);
+ t = build_int_2 (low, high);
+ TREE_TYPE (t) = type;
+ TREE_OVERFLOW (t)
+ = (TREE_OVERFLOW (arg0)
+ | force_fit_type (t, overflow));
+ TREE_CONSTANT_OVERFLOW (t)
+ = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg0);
+ }
+ }
+ else if (TREE_CODE (arg0) == REAL_CST)
+ {
+ if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (arg0)))
+ t = build_real (type,
+ REAL_VALUE_NEGATE (TREE_REAL_CST (arg0)));
+ }
+ TREE_TYPE (t) = type;
+ }
+ else if (TREE_CODE (arg0) == ABS_EXPR || TREE_CODE (arg0) == NEGATE_EXPR)
+ return build1 (ABS_EXPR, type, TREE_OPERAND (arg0, 0));
+ return t;
+
+ case CONJ_EXPR:
+ if (TREE_CODE (TREE_TYPE (arg0)) != COMPLEX_TYPE)
+ return arg0;
+ else if (TREE_CODE (arg0) == COMPLEX_EXPR)
+ return build (COMPLEX_EXPR, TREE_TYPE (arg0),
+ TREE_OPERAND (arg0, 0),
+ fold (build1 (NEGATE_EXPR,
+ TREE_TYPE (TREE_TYPE (arg0)),
+ TREE_OPERAND (arg0, 1))));
+ else if (TREE_CODE (arg0) == COMPLEX_CST)
+ return build_complex (TREE_OPERAND (arg0, 0),
+ fold (build1 (NEGATE_EXPR,
+ TREE_TYPE (TREE_TYPE (arg0)),
+ TREE_OPERAND (arg0, 1))));
+ else if (TREE_CODE (arg0) == PLUS_EXPR || TREE_CODE (arg0) == MINUS_EXPR)
+ return fold (build (TREE_CODE (arg0), type,
+ fold (build1 (CONJ_EXPR, type,
+ TREE_OPERAND (arg0, 0))),
+ fold (build1 (CONJ_EXPR,
+ type, TREE_OPERAND (arg0, 1)))));
+ else if (TREE_CODE (arg0) == CONJ_EXPR)
+ return TREE_OPERAND (arg0, 0);
+ return t;
+
+ case BIT_NOT_EXPR:
+ if (wins)
+ {
+ if (TREE_CODE (arg0) == INTEGER_CST)
+ t = build_int_2 (~ TREE_INT_CST_LOW (arg0),
+ ~ TREE_INT_CST_HIGH (arg0));
+ TREE_TYPE (t) = type;
+ force_fit_type (t, 0);
+ TREE_OVERFLOW (t) = TREE_OVERFLOW (arg0);
+ TREE_CONSTANT_OVERFLOW (t) = TREE_CONSTANT_OVERFLOW (arg0);
+ }
+ else if (TREE_CODE (arg0) == BIT_NOT_EXPR)
+ return TREE_OPERAND (arg0, 0);
+ return t;
+
+ case PLUS_EXPR:
+ /* A + (-B) -> A - B */
+ if (TREE_CODE (arg1) == NEGATE_EXPR)
+ return fold (build (MINUS_EXPR, type, arg0, TREE_OPERAND (arg1, 0)));
+ else if (! FLOAT_TYPE_P (type))
+ {
+ if (integer_zerop (arg1))
+ return non_lvalue (convert (type, arg0));
+
+ /* If we are adding two BIT_AND_EXPR's, both of which are and'ing
+ with a constant, and the two constants have no bits in common,
+ we should treat this as a BIT_IOR_EXPR since this may produce more
+ simplifications. */
+ if (TREE_CODE (arg0) == BIT_AND_EXPR
+ && TREE_CODE (arg1) == BIT_AND_EXPR
+ && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
+ && TREE_CODE (TREE_OPERAND (arg1, 1)) == INTEGER_CST
+ && integer_zerop (const_binop (BIT_AND_EXPR,
+ TREE_OPERAND (arg0, 1),
+ TREE_OPERAND (arg1, 1), 0)))
+ {
+ code = BIT_IOR_EXPR;
+ goto bit_ior;
+ }
+
+ /* (A * C) + (B * C) -> (A+B) * C. Since we are most concerned
+ about the case where C is a constant, just try one of the
+ four possibilities. */
+
+ if (TREE_CODE (arg0) == MULT_EXPR && TREE_CODE (arg1) == MULT_EXPR
+ && operand_equal_p (TREE_OPERAND (arg0, 1),
+ TREE_OPERAND (arg1, 1), 0))
+ return fold (build (MULT_EXPR, type,
+ fold (build (PLUS_EXPR, type,
+ TREE_OPERAND (arg0, 0),
+ TREE_OPERAND (arg1, 0))),
+ TREE_OPERAND (arg0, 1)));
+ }
+ /* In IEEE floating point, x+0 may not equal x. */
+ else if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
+ || flag_fast_math)
+ && real_zerop (arg1))
+ return non_lvalue (convert (type, arg0));
+ associate:
+ /* In most languages, can't associate operations on floats
+ through parentheses. Rather than remember where the parentheses
+ were, we don't associate floats at all. It shouldn't matter much.
+ However, associating multiplications is only very slightly
+ inaccurate, so do that if -ffast-math is specified. */
+ if (FLOAT_TYPE_P (type)
+ && ! (flag_fast_math && code == MULT_EXPR))
+ goto binary;
+
+ /* The varsign == -1 cases happen only for addition and subtraction.
+ It says that the arg that was split was really CON minus VAR.
+ The rest of the code applies to all associative operations. */
+ if (!wins)
+ {
+ tree var, con;
+ int varsign;
+
+ if (split_tree (arg0, code, &var, &con, &varsign))
+ {
+ if (varsign == -1)
+ {
+ /* EXPR is (CON-VAR) +- ARG1. */
+ /* If it is + and VAR==ARG1, return just CONST. */
+ if (code == PLUS_EXPR && operand_equal_p (var, arg1, 0))
+ return convert (TREE_TYPE (t), con);
+
+ /* If ARG0 is a constant, don't change things around;
+ instead keep all the constant computations together. */
+
+ if (TREE_CONSTANT (arg0))
+ return t;
+
+ /* Otherwise return (CON +- ARG1) - VAR. */
+ TREE_SET_CODE (t, MINUS_EXPR);
+ TREE_OPERAND (t, 1) = var;
+ TREE_OPERAND (t, 0)
+ = fold (build (code, TREE_TYPE (t), con, arg1));
+ }
+ else
+ {
+ /* EXPR is (VAR+CON) +- ARG1. */
+ /* If it is - and VAR==ARG1, return just CONST. */
+ if (code == MINUS_EXPR && operand_equal_p (var, arg1, 0))
+ return convert (TREE_TYPE (t), con);
+
+ /* If ARG0 is a constant, don't change things around;
+ instead keep all the constant computations together. */
+
+ if (TREE_CONSTANT (arg0))
+ return t;
+
+ /* Otherwise return VAR +- (ARG1 +- CON). */
+ TREE_OPERAND (t, 1) = tem
+ = fold (build (code, TREE_TYPE (t), arg1, con));
+ TREE_OPERAND (t, 0) = var;
+ if (integer_zerop (tem)
+ && (code == PLUS_EXPR || code == MINUS_EXPR))
+ return convert (type, var);
+ /* If we have x +/- (c - d) [c an explicit integer]
+ change it to x -/+ (d - c) since if d is relocatable
+ then the latter can be a single immediate insn
+ and the former cannot. */
+ if (TREE_CODE (tem) == MINUS_EXPR
+ && TREE_CODE (TREE_OPERAND (tem, 0)) == INTEGER_CST)
+ {
+ tree tem1 = TREE_OPERAND (tem, 1);
+ TREE_OPERAND (tem, 1) = TREE_OPERAND (tem, 0);
+ TREE_OPERAND (tem, 0) = tem1;
+ TREE_SET_CODE (t,
+ (code == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR));
+ }
+ }
+ return t;
+ }
+
+ if (split_tree (arg1, code, &var, &con, &varsign))
+ {
+ if (TREE_CONSTANT (arg1))
+ return t;
+
+ if (varsign == -1)
+ TREE_SET_CODE (t,
+ (code == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR));
+
+ /* EXPR is ARG0 +- (CON +- VAR). */
+ if (TREE_CODE (t) == MINUS_EXPR
+ && operand_equal_p (var, arg0, 0))
+ {
+ /* If VAR and ARG0 cancel, return just CON or -CON. */
+ if (code == PLUS_EXPR)
+ return convert (TREE_TYPE (t), con);
+ return fold (build1 (NEGATE_EXPR, TREE_TYPE (t),
+ convert (TREE_TYPE (t), con)));
+ }
+
+ TREE_OPERAND (t, 0)
+ = fold (build (code, TREE_TYPE (t), arg0, con));
+ TREE_OPERAND (t, 1) = var;
+ if (integer_zerop (TREE_OPERAND (t, 0))
+ && TREE_CODE (t) == PLUS_EXPR)
+ return convert (TREE_TYPE (t), var);
+ return t;
+ }
+ }
+ binary:
+#if defined (REAL_IS_NOT_DOUBLE) && ! defined (REAL_ARITHMETIC)
+ if (TREE_CODE (arg1) == REAL_CST)
+ return t;
+#endif /* REAL_IS_NOT_DOUBLE, and no REAL_ARITHMETIC */
+ if (wins)
+ t1 = const_binop (code, arg0, arg1, 0);
+ if (t1 != NULL_TREE)
+ {
+ /* The return value should always have
+ the same type as the original expression. */
+ TREE_TYPE (t1) = TREE_TYPE (t);
+ return t1;
+ }
+ return t;
+
+ case MINUS_EXPR:
+ if (! FLOAT_TYPE_P (type))
+ {
+ if (! wins && integer_zerop (arg0))
+ return build1 (NEGATE_EXPR, type, arg1);
+ if (integer_zerop (arg1))
+ return non_lvalue (convert (type, arg0));
+
+ /* (A * C) - (B * C) -> (A-B) * C. Since we are most concerned
+ about the case where C is a constant, just try one of the
+ four possibilities. */
+
+ if (TREE_CODE (arg0) == MULT_EXPR && TREE_CODE (arg1) == MULT_EXPR
+ && operand_equal_p (TREE_OPERAND (arg0, 1),
+ TREE_OPERAND (arg1, 1), 0))
+ return fold (build (MULT_EXPR, type,
+ fold (build (MINUS_EXPR, type,
+ TREE_OPERAND (arg0, 0),
+ TREE_OPERAND (arg1, 0))),
+ TREE_OPERAND (arg0, 1)));
+ }
+ /* Convert A - (-B) to A + B. */
+ else if (TREE_CODE (arg1) == NEGATE_EXPR)
+ return fold (build (PLUS_EXPR, type, arg0, TREE_OPERAND (arg1, 0)));
+
+ else if (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
+ || flag_fast_math)
+ {
+ /* Except with IEEE floating point, 0-x equals -x. */
+ if (! wins && real_zerop (arg0))
+ return build1 (NEGATE_EXPR, type, arg1);
+ /* Except with IEEE floating point, x-0 equals x. */
+ if (real_zerop (arg1))
+ return non_lvalue (convert (type, arg0));
+ }
+
+ /* Fold &x - &x. This can happen from &x.foo - &x.
+ This is unsafe for certain floats even in non-IEEE formats.
+ In IEEE, it is unsafe because it does wrong for NaNs.
+ Also note that operand_equal_p is always false if an operand
+ is volatile. */
+
+ if (operand_equal_p (arg0, arg1,
+ FLOAT_TYPE_P (type) && ! flag_fast_math))
+ return convert (type, integer_zero_node);
+
+ goto associate;
+
+ case MULT_EXPR:
+ if (! FLOAT_TYPE_P (type))
+ {
+ if (integer_zerop (arg1))
+ return omit_one_operand (type, arg1, arg0);
+ if (integer_onep (arg1))
+ return non_lvalue (convert (type, arg0));
+
+ /* ((A / C) * C) is A if the division is an
+ EXACT_DIV_EXPR. Since C is normally a constant,
+ just check for one of the four possibilities. */
+
+ if (TREE_CODE (arg0) == EXACT_DIV_EXPR
+ && operand_equal_p (TREE_OPERAND (arg0, 1), arg1, 0))
+ return TREE_OPERAND (arg0, 0);
+
+ /* (a * (1 << b)) is (a << b) */
+ if (TREE_CODE (arg1) == LSHIFT_EXPR
+ && integer_onep (TREE_OPERAND (arg1, 0)))
+ return fold (build (LSHIFT_EXPR, type, arg0,
+ TREE_OPERAND (arg1, 1)));
+ if (TREE_CODE (arg0) == LSHIFT_EXPR
+ && integer_onep (TREE_OPERAND (arg0, 0)))
+ return fold (build (LSHIFT_EXPR, type, arg1,
+ TREE_OPERAND (arg0, 1)));
+ }
+ else
+ {
+ /* x*0 is 0, except for IEEE floating point. */
+ if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
+ || flag_fast_math)
+ && real_zerop (arg1))
+ return omit_one_operand (type, arg1, arg0);
+ /* In IEEE floating point, x*1 is not equivalent to x for snans.
+ However, ANSI says we can drop signals,
+ so we can do this anyway. */
+ if (real_onep (arg1))
+ return non_lvalue (convert (type, arg0));
+ /* x*2 is x+x */
+ if (! wins && real_twop (arg1))
+ {
+ tree arg = save_expr (arg0);
+ return build (PLUS_EXPR, type, arg, arg);
+ }
+ }
+ goto associate;
+
+ case BIT_IOR_EXPR:
+ bit_ior:
+ if (integer_all_onesp (arg1))
+ return omit_one_operand (type, arg1, arg0);
+ if (integer_zerop (arg1))
+ return non_lvalue (convert (type, arg0));
+ t1 = distribute_bit_expr (code, type, arg0, arg1);
+ if (t1 != NULL_TREE)
+ return t1;
+
+ /* (a << C1) | (a >> C2) if A is unsigned and C1+C2 is the size of A
+ is a rotate of A by C1 bits. */
+
+ if ((TREE_CODE (arg0) == RSHIFT_EXPR
+ || TREE_CODE (arg0) == LSHIFT_EXPR)
+ && (TREE_CODE (arg1) == RSHIFT_EXPR
+ || TREE_CODE (arg1) == LSHIFT_EXPR)
+ && TREE_CODE (arg0) != TREE_CODE (arg1)
+ && operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1,0), 0)
+ && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg0, 0)))
+ && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
+ && TREE_CODE (TREE_OPERAND (arg1, 1)) == INTEGER_CST
+ && TREE_INT_CST_HIGH (TREE_OPERAND (arg0, 1)) == 0
+ && TREE_INT_CST_HIGH (TREE_OPERAND (arg1, 1)) == 0
+ && ((TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1))
+ + TREE_INT_CST_LOW (TREE_OPERAND (arg1, 1)))
+ == TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0)))))
+ return build (LROTATE_EXPR, type, TREE_OPERAND (arg0, 0),
+ TREE_CODE (arg0) == LSHIFT_EXPR
+ ? TREE_OPERAND (arg0, 1) : TREE_OPERAND (arg1, 1));
+
+ goto associate;
+
+ case BIT_XOR_EXPR:
+ if (integer_zerop (arg1))
+ return non_lvalue (convert (type, arg0));
+ if (integer_all_onesp (arg1))
+ return fold (build1 (BIT_NOT_EXPR, type, arg0));
+ goto associate;
+
+ case BIT_AND_EXPR:
+ bit_and:
+ if (integer_all_onesp (arg1))
+ return non_lvalue (convert (type, arg0));
+ if (integer_zerop (arg1))
+ return omit_one_operand (type, arg1, arg0);
+ t1 = distribute_bit_expr (code, type, arg0, arg1);
+ if (t1 != NULL_TREE)
+ return t1;
+ /* Simplify ((int)c & 0x377) into (int)c, if c is unsigned char. */
+ if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == NOP_EXPR
+ && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg1, 0))))
+ {
+ int prec = TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg1, 0)));
+ if (prec < BITS_PER_WORD && prec < HOST_BITS_PER_WIDE_INT
+ && (~TREE_INT_CST_LOW (arg0)
+ & (((HOST_WIDE_INT) 1 << prec) - 1)) == 0)
+ return build1 (NOP_EXPR, type, TREE_OPERAND (arg1, 0));
+ }
+ if (TREE_CODE (arg1) == INTEGER_CST && TREE_CODE (arg0) == NOP_EXPR
+ && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg0, 0))))
+ {
+ int prec = TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0)));
+ if (prec < BITS_PER_WORD && prec < HOST_BITS_PER_WIDE_INT
+ && (~TREE_INT_CST_LOW (arg1)
+ & (((HOST_WIDE_INT) 1 << prec) - 1)) == 0)
+ return build1 (NOP_EXPR, type, TREE_OPERAND (arg0, 0));
+ }
+ goto associate;
+
+ case BIT_ANDTC_EXPR:
+ if (integer_all_onesp (arg0))
+ return non_lvalue (convert (type, arg1));
+ if (integer_zerop (arg0))
+ return omit_one_operand (type, arg0, arg1);
+ if (TREE_CODE (arg1) == INTEGER_CST)
+ {
+ arg1 = fold (build1 (BIT_NOT_EXPR, type, arg1));
+ code = BIT_AND_EXPR;
+ goto bit_and;
+ }
+ goto binary;
+
+ case RDIV_EXPR:
+ /* In most cases, do nothing with a divide by zero. */
+#if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
+#ifndef REAL_INFINITY
+ if (TREE_CODE (arg1) == REAL_CST && real_zerop (arg1))
+ return t;
+#endif
+#endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
+
+ /* In IEEE floating point, x/1 is not equivalent to x for snans.
+ However, ANSI says we can drop signals, so we can do this anyway. */
+ if (real_onep (arg1))
+ return non_lvalue (convert (type, arg0));
+
+ /* If ARG1 is a constant, we can convert this to a multiply by the
+ reciprocal. This does not have the same rounding properties,
+ so only do this if -ffast-math. We can actually always safely
+ do it if ARG1 is a power of two, but it's hard to tell if it is
+ or not in a portable manner. */
+ if (TREE_CODE (arg1) == REAL_CST && flag_fast_math
+ && 0 != (tem = const_binop (code, build_real (type, dconst1),
+ arg1, 0)))
+ return fold (build (MULT_EXPR, type, arg0, tem));
+
+ goto binary;
+
+ case TRUNC_DIV_EXPR:
+ case ROUND_DIV_EXPR:
+ case FLOOR_DIV_EXPR:
+ case CEIL_DIV_EXPR:
+ case EXACT_DIV_EXPR:
+ if (integer_onep (arg1))
+ return non_lvalue (convert (type, arg0));
+ if (integer_zerop (arg1))
+ return t;
+
+ /* If we have ((a / C1) / C2) where both division are the same type, try
+ to simplify. First see if C1 * C2 overflows or not. */
+ if (TREE_CODE (arg0) == code && TREE_CODE (arg1) == INTEGER_CST
+ && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST)
+ {
+ tree new_divisor;
+
+ new_divisor = const_binop (MULT_EXPR, TREE_OPERAND (arg0, 1), arg1, 0);
+ tem = const_binop (FLOOR_DIV_EXPR, new_divisor, arg1, 0);
+
+ if (TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1)) == TREE_INT_CST_LOW (tem)
+ && TREE_INT_CST_HIGH (TREE_OPERAND (arg0, 1)) == TREE_INT_CST_HIGH (tem))
+ {
+ /* If no overflow, divide by C1*C2. */
+ return fold (build (code, type, TREE_OPERAND (arg0, 0), new_divisor));
+ }
+ }
+
+ /* Look for ((a * C1) / C3) or (((a * C1) + C2) / C3),
+ where C1 % C3 == 0 or C3 % C1 == 0. We can simplify these
+ expressions, which often appear in the offsets or sizes of
+ objects with a varying size. Only deal with positive divisors
+ and multiplicands. If C2 is negative, we must have C2 % C3 == 0.
+
+ Look for NOPs and SAVE_EXPRs inside. */
+
+ if (TREE_CODE (arg1) == INTEGER_CST
+ && tree_int_cst_sgn (arg1) >= 0)
+ {
+ int have_save_expr = 0;
+ tree c2 = integer_zero_node;
+ tree xarg0 = arg0;
+
+ if (TREE_CODE (xarg0) == SAVE_EXPR)
+ have_save_expr = 1, xarg0 = TREE_OPERAND (xarg0, 0);
+
+ STRIP_NOPS (xarg0);
+
+ if (TREE_CODE (xarg0) == PLUS_EXPR
+ && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST)
+ c2 = TREE_OPERAND (xarg0, 1), xarg0 = TREE_OPERAND (xarg0, 0);
+ else if (TREE_CODE (xarg0) == MINUS_EXPR
+ && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST
+ /* If we are doing this computation unsigned, the negate
+ is incorrect. */
+ && ! TREE_UNSIGNED (type))
+ {
+ c2 = fold (build1 (NEGATE_EXPR, type, TREE_OPERAND (xarg0, 1)));
+ xarg0 = TREE_OPERAND (xarg0, 0);
+ }
+
+ if (TREE_CODE (xarg0) == SAVE_EXPR)
+ have_save_expr = 1, xarg0 = TREE_OPERAND (xarg0, 0);
+
+ STRIP_NOPS (xarg0);
+
+ if (TREE_CODE (xarg0) == MULT_EXPR
+ && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST
+ && tree_int_cst_sgn (TREE_OPERAND (xarg0, 1)) >= 0
+ && (integer_zerop (const_binop (TRUNC_MOD_EXPR,
+ TREE_OPERAND (xarg0, 1), arg1, 1))
+ || integer_zerop (const_binop (TRUNC_MOD_EXPR, arg1,
+ TREE_OPERAND (xarg0, 1), 1)))
+ && (tree_int_cst_sgn (c2) >= 0
+ || integer_zerop (const_binop (TRUNC_MOD_EXPR, c2,
+ arg1, 1))))
+ {
+ tree outer_div = integer_one_node;
+ tree c1 = TREE_OPERAND (xarg0, 1);
+ tree c3 = arg1;
+
+ /* If C3 > C1, set them equal and do a divide by
+ C3/C1 at the end of the operation. */
+ if (tree_int_cst_lt (c1, c3))
+ outer_div = const_binop (code, c3, c1, 0), c3 = c1;
+
+ /* The result is A * (C1/C3) + (C2/C3). */
+ t = fold (build (PLUS_EXPR, type,
+ fold (build (MULT_EXPR, type,
+ TREE_OPERAND (xarg0, 0),
+ const_binop (code, c1, c3, 1))),
+ const_binop (code, c2, c3, 1)));
+
+ if (! integer_onep (outer_div))
+ t = fold (build (code, type, t, convert (type, outer_div)));
+
+ if (have_save_expr)
+ t = save_expr (t);
+
+ return t;
+ }
+ }
+
+ goto binary;
+
+ case CEIL_MOD_EXPR:
+ case FLOOR_MOD_EXPR:
+ case ROUND_MOD_EXPR:
+ case TRUNC_MOD_EXPR:
+ if (integer_onep (arg1))
+ return omit_one_operand (type, integer_zero_node, arg0);
+ if (integer_zerop (arg1))
+ return t;
+
+ /* Look for ((a * C1) % C3) or (((a * C1) + C2) % C3),
+ where C1 % C3 == 0. Handle similarly to the division case,
+ but don't bother with SAVE_EXPRs. */
+
+ if (TREE_CODE (arg1) == INTEGER_CST
+ && ! integer_zerop (arg1))
+ {
+ tree c2 = integer_zero_node;
+ tree xarg0 = arg0;
+
+ if (TREE_CODE (xarg0) == PLUS_EXPR
+ && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST)
+ c2 = TREE_OPERAND (xarg0, 1), xarg0 = TREE_OPERAND (xarg0, 0);
+ else if (TREE_CODE (xarg0) == MINUS_EXPR
+ && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST
+ && ! TREE_UNSIGNED (type))
+ {
+ c2 = fold (build1 (NEGATE_EXPR, type, TREE_OPERAND (xarg0, 1)));
+ xarg0 = TREE_OPERAND (xarg0, 0);
+ }
+
+ STRIP_NOPS (xarg0);
+
+ if (TREE_CODE (xarg0) == MULT_EXPR
+ && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST
+ && integer_zerop (const_binop (TRUNC_MOD_EXPR,
+ TREE_OPERAND (xarg0, 1),
+ arg1, 1))
+ && tree_int_cst_sgn (c2) >= 0)
+ /* The result is (C2%C3). */
+ return omit_one_operand (type, const_binop (code, c2, arg1, 1),
+ TREE_OPERAND (xarg0, 0));
+ }
+
+ goto binary;
+
+ case LSHIFT_EXPR:
+ case RSHIFT_EXPR:
+ case LROTATE_EXPR:
+ case RROTATE_EXPR:
+ if (integer_zerop (arg1))
+ return non_lvalue (convert (type, arg0));
+ /* Since negative shift count is not well-defined,
+ don't try to compute it in the compiler. */
+ if (tree_int_cst_sgn (arg1) < 0)
+ return t;
+ goto binary;
+
+ case MIN_EXPR:
+ if (operand_equal_p (arg0, arg1, 0))
+ return arg0;
+ if (INTEGRAL_TYPE_P (type)
+ && operand_equal_p (arg1, TYPE_MIN_VALUE (type), 1))
+ return omit_one_operand (type, arg1, arg0);
+ goto associate;
+
+ case MAX_EXPR:
+ if (operand_equal_p (arg0, arg1, 0))
+ return arg0;
+ if (INTEGRAL_TYPE_P (type)
+ && operand_equal_p (arg1, TYPE_MAX_VALUE (type), 1))
+ return omit_one_operand (type, arg1, arg0);
+ goto associate;
+
+ case TRUTH_NOT_EXPR:
+ /* Note that the operand of this must be an int
+ and its values must be 0 or 1.
+ ("true" is a fixed value perhaps depending on the language,
+ but we don't handle values other than 1 correctly yet.) */
+ return invert_truthvalue (arg0);
+
+ case TRUTH_ANDIF_EXPR:
+ /* Note that the operands of this must be ints
+ and their values must be 0 or 1.
+ ("true" is a fixed value perhaps depending on the language.) */
+ /* If first arg is constant zero, return it. */
+ if (integer_zerop (arg0))
+ return arg0;
+ case TRUTH_AND_EXPR:
+ /* If either arg is constant true, drop it. */
+ if (TREE_CODE (arg0) == INTEGER_CST && ! integer_zerop (arg0))
+ return non_lvalue (arg1);
+ if (TREE_CODE (arg1) == INTEGER_CST && ! integer_zerop (arg1))
+ return non_lvalue (arg0);
+ /* If second arg is constant zero, result is zero, but first arg
+ must be evaluated. */
+ if (integer_zerop (arg1))
+ return omit_one_operand (type, arg1, arg0);
+
+ truth_andor:
+ /* We only do these simplifications if we are optimizing. */
+ if (!optimize)
+ return t;
+
+ /* Check for things like (A || B) && (A || C). We can convert this
+ to A || (B && C). Note that either operator can be any of the four
+ truth and/or operations and the transformation will still be
+ valid. Also note that we only care about order for the
+ ANDIF and ORIF operators. */
+ if (TREE_CODE (arg0) == TREE_CODE (arg1)
+ && (TREE_CODE (arg0) == TRUTH_ANDIF_EXPR
+ || TREE_CODE (arg0) == TRUTH_ORIF_EXPR
+ || TREE_CODE (arg0) == TRUTH_AND_EXPR
+ || TREE_CODE (arg0) == TRUTH_OR_EXPR))
+ {
+ tree a00 = TREE_OPERAND (arg0, 0);
+ tree a01 = TREE_OPERAND (arg0, 1);
+ tree a10 = TREE_OPERAND (arg1, 0);
+ tree a11 = TREE_OPERAND (arg1, 1);
+ int commutative = ((TREE_CODE (arg0) == TRUTH_OR_EXPR
+ || TREE_CODE (arg0) == TRUTH_AND_EXPR)
+ && (code == TRUTH_AND_EXPR
+ || code == TRUTH_OR_EXPR));
+
+ if (operand_equal_p (a00, a10, 0))
+ return fold (build (TREE_CODE (arg0), type, a00,
+ fold (build (code, type, a01, a11))));
+ else if (commutative && operand_equal_p (a00, a11, 0))
+ return fold (build (TREE_CODE (arg0), type, a00,
+ fold (build (code, type, a01, a10))));
+ else if (commutative && operand_equal_p (a01, a10, 0))
+ return fold (build (TREE_CODE (arg0), type, a01,
+ fold (build (code, type, a00, a11))));
+
+ /* This case if tricky because we must either have commutative
+ operators or else A10 must not have side-effects. */
+
+ else if ((commutative || ! TREE_SIDE_EFFECTS (a10))
+ && operand_equal_p (a01, a11, 0))
+ return fold (build (TREE_CODE (arg0), type,
+ fold (build (code, type, a00, a10)),
+ a01));
+ }
+
+ /* Check for the possibility of merging component references. If our
+ lhs is another similar operation, try to merge its rhs with our
+ rhs. Then try to merge our lhs and rhs. */
+ if (TREE_CODE (arg0) == code
+ && 0 != (tem = fold_truthop (code, type,
+ TREE_OPERAND (arg0, 1), arg1)))
+ return fold (build (code, type, TREE_OPERAND (arg0, 0), tem));
+
+ if ((tem = fold_truthop (code, type, arg0, arg1)) != 0)
+ return tem;
+
+ return t;
+
+ case TRUTH_ORIF_EXPR:
+ /* Note that the operands of this must be ints
+ and their values must be 0 or true.
+ ("true" is a fixed value perhaps depending on the language.) */
+ /* If first arg is constant true, return it. */
+ if (TREE_CODE (arg0) == INTEGER_CST && ! integer_zerop (arg0))
+ return arg0;
+ case TRUTH_OR_EXPR:
+ /* If either arg is constant zero, drop it. */
+ if (TREE_CODE (arg0) == INTEGER_CST && integer_zerop (arg0))
+ return non_lvalue (arg1);
+ if (TREE_CODE (arg1) == INTEGER_CST && integer_zerop (arg1))
+ return non_lvalue (arg0);
+ /* If second arg is constant true, result is true, but we must
+ evaluate first arg. */
+ if (TREE_CODE (arg1) == INTEGER_CST && ! integer_zerop (arg1))
+ return omit_one_operand (type, arg1, arg0);
+ goto truth_andor;
+
+ case TRUTH_XOR_EXPR:
+ /* If either arg is constant zero, drop it. */
+ if (integer_zerop (arg0))
+ return non_lvalue (arg1);
+ if (integer_zerop (arg1))
+ return non_lvalue (arg0);
+ /* If either arg is constant true, this is a logical inversion. */
+ if (integer_onep (arg0))
+ return non_lvalue (invert_truthvalue (arg1));
+ if (integer_onep (arg1))
+ return non_lvalue (invert_truthvalue (arg0));
+ return t;
+
+ case EQ_EXPR:
+ case NE_EXPR:
+ case LT_EXPR:
+ case GT_EXPR:
+ case LE_EXPR:
+ case GE_EXPR:
+ /* If one arg is a constant integer, put it last. */
+ if (TREE_CODE (arg0) == INTEGER_CST
+ && TREE_CODE (arg1) != INTEGER_CST)
+ {
+ TREE_OPERAND (t, 0) = arg1;
+ TREE_OPERAND (t, 1) = arg0;
+ arg0 = TREE_OPERAND (t, 0);
+ arg1 = TREE_OPERAND (t, 1);
+ code = swap_tree_comparison (code);
+ TREE_SET_CODE (t, code);
+ }
+
+ /* Convert foo++ == CONST into ++foo == CONST + INCR.
+ First, see if one arg is constant; find the constant arg
+ and the other one. */
+ {
+ tree constop = 0, varop;
+ tree *constoploc;
+
+ if (TREE_CONSTANT (arg1))
+ constoploc = &TREE_OPERAND (t, 1), constop = arg1, varop = arg0;
+ if (TREE_CONSTANT (arg0))
+ constoploc = &TREE_OPERAND (t, 0), constop = arg0, varop = arg1;
+
+ if (constop && TREE_CODE (varop) == POSTINCREMENT_EXPR)
+ {
+ /* This optimization is invalid for ordered comparisons
+ if CONST+INCR overflows or if foo+incr might overflow.
+ This optimization is invalid for floating point due to rounding.
+ For pointer types we assume overflow doesn't happen. */
+ if (TREE_CODE (TREE_TYPE (varop)) == POINTER_TYPE
+ || (! FLOAT_TYPE_P (TREE_TYPE (varop))
+ && (code == EQ_EXPR || code == NE_EXPR)))
+ {
+ tree newconst
+ = fold (build (PLUS_EXPR, TREE_TYPE (varop),
+ constop, TREE_OPERAND (varop, 1)));
+ TREE_SET_CODE (varop, PREINCREMENT_EXPR);
+ *constoploc = newconst;
+ return t;
+ }
+ }
+ else if (constop && TREE_CODE (varop) == POSTDECREMENT_EXPR)
+ {
+ if (TREE_CODE (TREE_TYPE (varop)) == POINTER_TYPE
+ || (! FLOAT_TYPE_P (TREE_TYPE (varop))
+ && (code == EQ_EXPR || code == NE_EXPR)))
+ {
+ tree newconst
+ = fold (build (MINUS_EXPR, TREE_TYPE (varop),
+ constop, TREE_OPERAND (varop, 1)));
+ TREE_SET_CODE (varop, PREDECREMENT_EXPR);
+ *constoploc = newconst;
+ return t;
+ }
+ }
+ }
+
+ /* Change X >= CST to X > (CST - 1) if CST is positive. */
+ if (TREE_CODE (arg1) == INTEGER_CST
+ && TREE_CODE (arg0) != INTEGER_CST
+ && tree_int_cst_sgn (arg1) > 0)
+ {
+ switch (TREE_CODE (t))
+ {
+ case GE_EXPR:
+ code = GT_EXPR;
+ TREE_SET_CODE (t, code);
+ arg1 = const_binop (MINUS_EXPR, arg1, integer_one_node, 0);
+ TREE_OPERAND (t, 1) = arg1;
+ break;
+
+ case LT_EXPR:
+ code = LE_EXPR;
+ TREE_SET_CODE (t, code);
+ arg1 = const_binop (MINUS_EXPR, arg1, integer_one_node, 0);
+ TREE_OPERAND (t, 1) = arg1;
+ }
+ }
+
+ /* If this is an EQ or NE comparison with zero and ARG0 is
+ (1 << foo) & bar, convert it to (bar >> foo) & 1. Both require
+ two operations, but the latter can be done in one less insn
+ one machine that have only two-operand insns or on which a
+ constant cannot be the first operand. */
+ if (integer_zerop (arg1) && (code == EQ_EXPR || code == NE_EXPR)
+ && TREE_CODE (arg0) == BIT_AND_EXPR)
+ {
+ if (TREE_CODE (TREE_OPERAND (arg0, 0)) == LSHIFT_EXPR
+ && integer_onep (TREE_OPERAND (TREE_OPERAND (arg0, 0), 0)))
+ return
+ fold (build (code, type,
+ build (BIT_AND_EXPR, TREE_TYPE (arg0),
+ build (RSHIFT_EXPR,
+ TREE_TYPE (TREE_OPERAND (arg0, 0)),
+ TREE_OPERAND (arg0, 1),
+ TREE_OPERAND (TREE_OPERAND (arg0, 0), 1)),
+ convert (TREE_TYPE (arg0),
+ integer_one_node)),
+ arg1));
+ else if (TREE_CODE (TREE_OPERAND (arg0, 1)) == LSHIFT_EXPR
+ && integer_onep (TREE_OPERAND (TREE_OPERAND (arg0, 1), 0)))
+ return
+ fold (build (code, type,
+ build (BIT_AND_EXPR, TREE_TYPE (arg0),
+ build (RSHIFT_EXPR,
+ TREE_TYPE (TREE_OPERAND (arg0, 1)),
+ TREE_OPERAND (arg0, 0),
+ TREE_OPERAND (TREE_OPERAND (arg0, 1), 1)),
+ convert (TREE_TYPE (arg0),
+ integer_one_node)),
+ arg1));
+ }
+
+ /* If this is an NE or EQ comparison of zero against the result of a
+ signed MOD operation whose second operand is a power of 2, make
+ the MOD operation unsigned since it is simpler and equivalent. */
+ if ((code == NE_EXPR || code == EQ_EXPR)
+ && integer_zerop (arg1)
+ && ! TREE_UNSIGNED (TREE_TYPE (arg0))
+ && (TREE_CODE (arg0) == TRUNC_MOD_EXPR
+ || TREE_CODE (arg0) == CEIL_MOD_EXPR
+ || TREE_CODE (arg0) == FLOOR_MOD_EXPR
+ || TREE_CODE (arg0) == ROUND_MOD_EXPR)
+ && integer_pow2p (TREE_OPERAND (arg0, 1)))
+ {
+ tree newtype = unsigned_type (TREE_TYPE (arg0));
+ tree newmod = build (TREE_CODE (arg0), newtype,
+ convert (newtype, TREE_OPERAND (arg0, 0)),
+ convert (newtype, TREE_OPERAND (arg0, 1)));
+
+ return build (code, type, newmod, convert (newtype, arg1));
+ }
+
+ /* If this is an NE comparison of zero with an AND of one, remove the
+ comparison since the AND will give the correct value. */
+ if (code == NE_EXPR && integer_zerop (arg1)
+ && TREE_CODE (arg0) == BIT_AND_EXPR
+ && integer_onep (TREE_OPERAND (arg0, 1)))
+ return convert (type, arg0);
+
+ /* If we have (A & C) == C where C is a power of 2, convert this into
+ (A & C) != 0. Similarly for NE_EXPR. */
+ if ((code == EQ_EXPR || code == NE_EXPR)
+ && TREE_CODE (arg0) == BIT_AND_EXPR
+ && integer_pow2p (TREE_OPERAND (arg0, 1))
+ && operand_equal_p (TREE_OPERAND (arg0, 1), arg1, 0))
+ return build (code == EQ_EXPR ? NE_EXPR : EQ_EXPR, type,
+ arg0, integer_zero_node);
+
+ /* If X is unsigned, convert X < (1 << Y) into X >> Y == 0
+ and similarly for >= into !=. */
+ if ((code == LT_EXPR || code == GE_EXPR)
+ && TREE_UNSIGNED (TREE_TYPE (arg0))
+ && TREE_CODE (arg1) == LSHIFT_EXPR
+ && integer_onep (TREE_OPERAND (arg1, 0)))
+ return build (code == LT_EXPR ? EQ_EXPR : NE_EXPR, type,
+ build (RSHIFT_EXPR, TREE_TYPE (arg0), arg0,
+ TREE_OPERAND (arg1, 1)),
+ convert (TREE_TYPE (arg0), integer_zero_node));
+
+ else if ((code == LT_EXPR || code == GE_EXPR)
+ && TREE_UNSIGNED (TREE_TYPE (arg0))
+ && (TREE_CODE (arg1) == NOP_EXPR
+ || TREE_CODE (arg1) == CONVERT_EXPR)
+ && TREE_CODE (TREE_OPERAND (arg1, 0)) == LSHIFT_EXPR
+ && integer_onep (TREE_OPERAND (TREE_OPERAND (arg1, 0), 0)))
+ return
+ build (code == LT_EXPR ? EQ_EXPR : NE_EXPR, type,
+ convert (TREE_TYPE (arg0),
+ build (RSHIFT_EXPR, TREE_TYPE (arg0), arg0,
+ TREE_OPERAND (TREE_OPERAND (arg1, 0), 1))),
+ convert (TREE_TYPE (arg0), integer_zero_node));
+
+ /* Simplify comparison of something with itself. (For IEEE
+ floating-point, we can only do some of these simplifications.) */
+ if (operand_equal_p (arg0, arg1, 0))
+ {
+ switch (code)
+ {
+ case EQ_EXPR:
+ case GE_EXPR:
+ case LE_EXPR:
+ if (INTEGRAL_TYPE_P (TREE_TYPE (arg0)))
+ {
+ t = build_int_2 (1, 0);
+ TREE_TYPE (t) = type;
+ return t;
+ }
+ code = EQ_EXPR;
+ TREE_SET_CODE (t, code);
+ break;
+
+ case NE_EXPR:
+ /* For NE, we can only do this simplification if integer. */
+ if (! INTEGRAL_TYPE_P (TREE_TYPE (arg0)))
+ break;
+ /* ... fall through ... */
+ case GT_EXPR:
+ case LT_EXPR:
+ t = build_int_2 (0, 0);
+ TREE_TYPE (t) = type;
+ return t;
+ }
+ }
+
+ /* An unsigned comparison against 0 can be simplified. */
+ if (integer_zerop (arg1)
+ && (INTEGRAL_TYPE_P (TREE_TYPE (arg1))
+ || TREE_CODE (TREE_TYPE (arg1)) == POINTER_TYPE)
+ && TREE_UNSIGNED (TREE_TYPE (arg1)))
+ {
+ switch (TREE_CODE (t))
+ {
+ case GT_EXPR:
+ code = NE_EXPR;
+ TREE_SET_CODE (t, NE_EXPR);
+ break;
+ case LE_EXPR:
+ code = EQ_EXPR;
+ TREE_SET_CODE (t, EQ_EXPR);
+ break;
+ case GE_EXPR:
+ return omit_one_operand (type,
+ convert (type, integer_one_node),
+ arg0);
+ case LT_EXPR:
+ return omit_one_operand (type,
+ convert (type, integer_zero_node),
+ arg0);
+ }
+ }
+
+ /* If we are comparing an expression that just has comparisons
+ of two integer values, arithmetic expressions of those comparisons,
+ and constants, we can simplify it. There are only three cases
+ to check: the two values can either be equal, the first can be
+ greater, or the second can be greater. Fold the expression for
+ those three values. Since each value must be 0 or 1, we have
+ eight possibilities, each of which corresponds to the constant 0
+ or 1 or one of the six possible comparisons.
+
+ This handles common cases like (a > b) == 0 but also handles
+ expressions like ((x > y) - (y > x)) > 0, which supposedly
+ occur in macroized code. */
+
+ if (TREE_CODE (arg1) == INTEGER_CST && TREE_CODE (arg0) != INTEGER_CST)
+ {
+ tree cval1 = 0, cval2 = 0;
+ int save_p = 0;
+
+ if (twoval_comparison_p (arg0, &cval1, &cval2, &save_p)
+ /* Don't handle degenerate cases here; they should already
+ have been handled anyway. */
+ && cval1 != 0 && cval2 != 0
+ && ! (TREE_CONSTANT (cval1) && TREE_CONSTANT (cval2))
+ && TREE_TYPE (cval1) == TREE_TYPE (cval2)
+ && INTEGRAL_TYPE_P (TREE_TYPE (cval1))
+ && ! operand_equal_p (TYPE_MIN_VALUE (TREE_TYPE (cval1)),
+ TYPE_MAX_VALUE (TREE_TYPE (cval2)), 0))
+ {
+ tree maxval = TYPE_MAX_VALUE (TREE_TYPE (cval1));
+ tree minval = TYPE_MIN_VALUE (TREE_TYPE (cval1));
+
+ /* We can't just pass T to eval_subst in case cval1 or cval2
+ was the same as ARG1. */
+
+ tree high_result
+ = fold (build (code, type,
+ eval_subst (arg0, cval1, maxval, cval2, minval),
+ arg1));
+ tree equal_result
+ = fold (build (code, type,
+ eval_subst (arg0, cval1, maxval, cval2, maxval),
+ arg1));
+ tree low_result
+ = fold (build (code, type,
+ eval_subst (arg0, cval1, minval, cval2, maxval),
+ arg1));
+
+ /* All three of these results should be 0 or 1. Confirm they
+ are. Then use those values to select the proper code
+ to use. */
+
+ if ((integer_zerop (high_result)
+ || integer_onep (high_result))
+ && (integer_zerop (equal_result)
+ || integer_onep (equal_result))
+ && (integer_zerop (low_result)
+ || integer_onep (low_result)))
+ {
+ /* Make a 3-bit mask with the high-order bit being the
+ value for `>', the next for '=', and the low for '<'. */
+ switch ((integer_onep (high_result) * 4)
+ + (integer_onep (equal_result) * 2)
+ + integer_onep (low_result))
+ {
+ case 0:
+ /* Always false. */
+ return omit_one_operand (type, integer_zero_node, arg0);
+ case 1:
+ code = LT_EXPR;
+ break;
+ case 2:
+ code = EQ_EXPR;
+ break;
+ case 3:
+ code = LE_EXPR;
+ break;
+ case 4:
+ code = GT_EXPR;
+ break;
+ case 5:
+ code = NE_EXPR;
+ break;
+ case 6:
+ code = GE_EXPR;
+ break;
+ case 7:
+ /* Always true. */
+ return omit_one_operand (type, integer_one_node, arg0);
+ }
+
+ t = build (code, type, cval1, cval2);
+ if (save_p)
+ return save_expr (t);
+ else
+ return fold (t);
+ }
+ }
+ }
+
+ /* If this is a comparison of a field, we may be able to simplify it. */
+ if ((TREE_CODE (arg0) == COMPONENT_REF
+ || TREE_CODE (arg0) == BIT_FIELD_REF)
+ && (code == EQ_EXPR || code == NE_EXPR)
+ /* Handle the constant case even without -O
+ to make sure the warnings are given. */
+ && (optimize || TREE_CODE (arg1) == INTEGER_CST))
+ {
+ t1 = optimize_bit_field_compare (code, type, arg0, arg1);
+ return t1 ? t1 : t;
+ }
+
+ /* If this is a comparison of complex values and either or both
+ sizes are a COMPLEX_EXPR, it is best to split up the comparisons
+ and join them with a TRUTH_ANDIF_EXPR or TRUTH_ORIF_EXPR. This
+ may prevent needless evaluations. */
+ if ((code == EQ_EXPR || code == NE_EXPR)
+ && TREE_CODE (TREE_TYPE (arg0)) == COMPLEX_TYPE
+ && (TREE_CODE (arg0) == COMPLEX_EXPR
+ || TREE_CODE (arg1) == COMPLEX_EXPR))
+ {
+ tree subtype = TREE_TYPE (TREE_TYPE (arg0));
+ tree real0 = fold (build1 (REALPART_EXPR, subtype, arg0));
+ tree imag0 = fold (build1 (IMAGPART_EXPR, subtype, arg0));
+ tree real1 = fold (build1 (REALPART_EXPR, subtype, arg1));
+ tree imag1 = fold (build1 (IMAGPART_EXPR, subtype, arg1));
+
+ return fold (build ((code == EQ_EXPR ? TRUTH_ANDIF_EXPR
+ : TRUTH_ORIF_EXPR),
+ type,
+ fold (build (code, type, real0, real1)),
+ fold (build (code, type, imag0, imag1))));
+ }
+
+ /* From here on, the only cases we handle are when the result is
+ known to be a constant.
+
+ To compute GT, swap the arguments and do LT.
+ To compute GE, do LT and invert the result.
+ To compute LE, swap the arguments, do LT and invert the result.
+ To compute NE, do EQ and invert the result.
+
+ Therefore, the code below must handle only EQ and LT. */
+
+ if (code == LE_EXPR || code == GT_EXPR)
+ {
+ tem = arg0, arg0 = arg1, arg1 = tem;
+ code = swap_tree_comparison (code);
+ }
+
+ /* Note that it is safe to invert for real values here because we
+ will check below in the one case that it matters. */
+
+ invert = 0;
+ if (code == NE_EXPR || code == GE_EXPR)
+ {
+ invert = 1;
+ code = invert_tree_comparison (code);
+ }
+
+ /* Compute a result for LT or EQ if args permit;
+ otherwise return T. */
+ if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST)
+ {
+ if (code == EQ_EXPR)
+ t1 = build_int_2 ((TREE_INT_CST_LOW (arg0)
+ == TREE_INT_CST_LOW (arg1))
+ && (TREE_INT_CST_HIGH (arg0)
+ == TREE_INT_CST_HIGH (arg1)),
+ 0);
+ else
+ t1 = build_int_2 ((TREE_UNSIGNED (TREE_TYPE (arg0))
+ ? INT_CST_LT_UNSIGNED (arg0, arg1)
+ : INT_CST_LT (arg0, arg1)),
+ 0);
+ }
+
+ /* Assume a nonexplicit constant cannot equal an explicit one,
+ since such code would be undefined anyway.
+ Exception: on sysvr4, using #pragma weak,
+ a label can come out as 0. */
+ else if (TREE_CODE (arg1) == INTEGER_CST
+ && !integer_zerop (arg1)
+ && TREE_CONSTANT (arg0)
+ && TREE_CODE (arg0) == ADDR_EXPR
+ && code == EQ_EXPR)
+ t1 = build_int_2 (0, 0);
+
+ /* Two real constants can be compared explicitly. */
+ else if (TREE_CODE (arg0) == REAL_CST && TREE_CODE (arg1) == REAL_CST)
+ {
+ /* If either operand is a NaN, the result is false with two
+ exceptions: First, an NE_EXPR is true on NaNs, but that case
+ is already handled correctly since we will be inverting the
+ result for NE_EXPR. Second, if we had inverted a LE_EXPR
+ or a GE_EXPR into a LT_EXPR, we must return true so that it
+ will be inverted into false. */
+
+ if (REAL_VALUE_ISNAN (TREE_REAL_CST (arg0))
+ || REAL_VALUE_ISNAN (TREE_REAL_CST (arg1)))
+ t1 = build_int_2 (invert && code == LT_EXPR, 0);
+
+ else if (code == EQ_EXPR)
+ t1 = build_int_2 (REAL_VALUES_EQUAL (TREE_REAL_CST (arg0),
+ TREE_REAL_CST (arg1)),
+ 0);
+ else
+ t1 = build_int_2 (REAL_VALUES_LESS (TREE_REAL_CST (arg0),
+ TREE_REAL_CST (arg1)),
+ 0);
+ }
+
+ if (t1 == NULL_TREE)
+ return t;
+
+ if (invert)
+ TREE_INT_CST_LOW (t1) ^= 1;
+
+ TREE_TYPE (t1) = type;
+ return t1;
+
+ case COND_EXPR:
+ /* Pedantic ANSI C says that a conditional expression is never an lvalue,
+ so all simple results must be passed through pedantic_non_lvalue. */
+ if (TREE_CODE (arg0) == INTEGER_CST)
+ return pedantic_non_lvalue
+ (TREE_OPERAND (t, (integer_zerop (arg0) ? 2 : 1)));
+ else if (operand_equal_p (arg1, TREE_OPERAND (expr, 2), 0))
+ return pedantic_non_lvalue (omit_one_operand (type, arg1, arg0));
+
+ /* If the second operand is zero, invert the comparison and swap
+ the second and third operands. Likewise if the second operand
+ is constant and the third is not or if the third operand is
+ equivalent to the first operand of the comparison. */
+
+ if (integer_zerop (arg1)
+ || (TREE_CONSTANT (arg1) && ! TREE_CONSTANT (TREE_OPERAND (t, 2)))
+ || (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<'
+ && operand_equal_for_comparison_p (TREE_OPERAND (arg0, 0),
+ TREE_OPERAND (t, 2),
+ TREE_OPERAND (arg0, 1))))
+ {
+ /* See if this can be inverted. If it can't, possibly because
+ it was a floating-point inequality comparison, don't do
+ anything. */
+ tem = invert_truthvalue (arg0);
+
+ if (TREE_CODE (tem) != TRUTH_NOT_EXPR)
+ {
+ arg0 = TREE_OPERAND (t, 0) = tem;
+ TREE_OPERAND (t, 1) = TREE_OPERAND (t, 2);
+ TREE_OPERAND (t, 2) = arg1;
+ arg1 = TREE_OPERAND (t, 1);
+ }
+ }
+
+ /* If we have A op B ? A : C, we may be able to convert this to a
+ simpler expression, depending on the operation and the values
+ of B and C. IEEE floating point prevents this though,
+ because A or B might be -0.0 or a NaN. */
+
+ if (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<'
+ && (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
+ || ! FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (arg0, 0)))
+ || flag_fast_math)
+ && operand_equal_for_comparison_p (TREE_OPERAND (arg0, 0),
+ arg1, TREE_OPERAND (arg0, 1)))
+ {
+ tree arg2 = TREE_OPERAND (t, 2);
+ enum tree_code comp_code = TREE_CODE (arg0);
+
+ /* If we have A op 0 ? A : -A, this is A, -A, abs (A), or abs (-A),
+ depending on the comparison operation. */
+ if (integer_zerop (TREE_OPERAND (arg0, 1))
+ && TREE_CODE (arg2) == NEGATE_EXPR
+ && operand_equal_p (TREE_OPERAND (arg2, 0), arg1, 0))
+ switch (comp_code)
+ {
+ case EQ_EXPR:
+ return pedantic_non_lvalue
+ (fold (build1 (NEGATE_EXPR, type, arg1)));
+ case NE_EXPR:
+ return pedantic_non_lvalue (convert (type, arg1));
+ case GE_EXPR:
+ case GT_EXPR:
+ return pedantic_non_lvalue
+ (fold (build1 (ABS_EXPR, type, arg1)));
+ case LE_EXPR:
+ case LT_EXPR:
+ return pedantic_non_lvalue
+ (fold (build1 (NEGATE_EXPR, type,
+ fold (build1 (ABS_EXPR, type, arg1)))));
+ }
+
+ /* If this is A != 0 ? A : 0, this is simply A. For ==, it is
+ always zero. */
+
+ if (integer_zerop (TREE_OPERAND (arg0, 1)) && integer_zerop (arg2))
+ {
+ if (comp_code == NE_EXPR)
+ return pedantic_non_lvalue (convert (type, arg1));
+ else if (comp_code == EQ_EXPR)
+ return pedantic_non_lvalue (convert (type, integer_zero_node));
+ }
+
+ /* If this is A op B ? A : B, this is either A, B, min (A, B),
+ or max (A, B), depending on the operation. */
+
+ if (operand_equal_for_comparison_p (TREE_OPERAND (arg0, 1),
+ arg2, TREE_OPERAND (arg0, 0)))
+ switch (comp_code)
+ {
+ case EQ_EXPR:
+ return pedantic_non_lvalue (convert (type, arg2));
+ case NE_EXPR:
+ return pedantic_non_lvalue (convert (type, arg1));
+ case LE_EXPR:
+ case LT_EXPR:
+ return pedantic_non_lvalue
+ (fold (build (MIN_EXPR, type, arg1, arg2)));
+ case GE_EXPR:
+ case GT_EXPR:
+ return pedantic_non_lvalue
+ (fold (build (MAX_EXPR, type, arg1, arg2)));
+ }
+
+ /* If this is A op C1 ? A : C2 with C1 and C2 constant integers,
+ we might still be able to simplify this. For example,
+ if C1 is one less or one more than C2, this might have started
+ out as a MIN or MAX and been transformed by this function.
+ Only good for INTEGER_TYPEs, because we need TYPE_MAX_VALUE. */
+
+ if (INTEGRAL_TYPE_P (type)
+ && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
+ && TREE_CODE (arg2) == INTEGER_CST)
+ switch (comp_code)
+ {
+ case EQ_EXPR:
+ /* We can replace A with C1 in this case. */
+ arg1 = TREE_OPERAND (t, 1)
+ = convert (type, TREE_OPERAND (arg0, 1));
+ break;
+
+ case LT_EXPR:
+ /* If C1 is C2 + 1, this is min(A, C2). */
+ if (! operand_equal_p (arg2, TYPE_MAX_VALUE (type), 1)
+ && operand_equal_p (TREE_OPERAND (arg0, 1),
+ const_binop (PLUS_EXPR, arg2,
+ integer_one_node, 0), 1))
+ return pedantic_non_lvalue
+ (fold (build (MIN_EXPR, type, arg1, arg2)));
+ break;
+
+ case LE_EXPR:
+ /* If C1 is C2 - 1, this is min(A, C2). */
+ if (! operand_equal_p (arg2, TYPE_MIN_VALUE (type), 1)
+ && operand_equal_p (TREE_OPERAND (arg0, 1),
+ const_binop (MINUS_EXPR, arg2,
+ integer_one_node, 0), 1))
+ return pedantic_non_lvalue
+ (fold (build (MIN_EXPR, type, arg1, arg2)));
+ break;
+
+ case GT_EXPR:
+ /* If C1 is C2 - 1, this is max(A, C2). */
+ if (! operand_equal_p (arg2, TYPE_MIN_VALUE (type), 1)
+ && operand_equal_p (TREE_OPERAND (arg0, 1),
+ const_binop (MINUS_EXPR, arg2,
+ integer_one_node, 0), 1))
+ return pedantic_non_lvalue
+ (fold (build (MAX_EXPR, type, arg1, arg2)));
+ break;
+
+ case GE_EXPR:
+ /* If C1 is C2 + 1, this is max(A, C2). */
+ if (! operand_equal_p (arg2, TYPE_MAX_VALUE (type), 1)
+ && operand_equal_p (TREE_OPERAND (arg0, 1),
+ const_binop (PLUS_EXPR, arg2,
+ integer_one_node, 0), 1))
+ return pedantic_non_lvalue
+ (fold (build (MAX_EXPR, type, arg1, arg2)));
+ break;
+ }
+ }
+
+ /* Convert A ? 1 : 0 to simply A. */
+ if (integer_onep (TREE_OPERAND (t, 1))
+ && integer_zerop (TREE_OPERAND (t, 2))
+ /* If we try to convert TREE_OPERAND (t, 0) to our type, the
+ call to fold will try to move the conversion inside
+ a COND, which will recurse. In that case, the COND_EXPR
+ is probably the best choice, so leave it alone. */
+ && type == TREE_TYPE (arg0))
+ return pedantic_non_lvalue (arg0);
+
+
+ /* Look for expressions of the form A & 2 ? 2 : 0. The result of this
+ operation is simply A & 2. */
+
+ if (integer_zerop (TREE_OPERAND (t, 2))
+ && TREE_CODE (arg0) == NE_EXPR
+ && integer_zerop (TREE_OPERAND (arg0, 1))
+ && integer_pow2p (arg1)
+ && TREE_CODE (TREE_OPERAND (arg0, 0)) == BIT_AND_EXPR
+ && operand_equal_p (TREE_OPERAND (TREE_OPERAND (arg0, 0), 1),
+ arg1, 1))
+ return pedantic_non_lvalue (convert (type, TREE_OPERAND (arg0, 0)));
+
+ return t;
+
+ case COMPOUND_EXPR:
+ /* When pedantic, a compound expression can be neither an lvalue
+ nor an integer constant expression. */
+ if (TREE_SIDE_EFFECTS (arg0) || pedantic)
+ return t;
+ /* Don't let (0, 0) be null pointer constant. */
+ if (integer_zerop (arg1))
+ return non_lvalue (arg1);
+ return arg1;
+
+ case COMPLEX_EXPR:
+ if (wins)
+ return build_complex (arg0, arg1);
+ return t;
+
+ case REALPART_EXPR:
+ if (TREE_CODE (TREE_TYPE (arg0)) != COMPLEX_TYPE)
+ return t;
+ else if (TREE_CODE (arg0) == COMPLEX_EXPR)
+ return omit_one_operand (type, TREE_OPERAND (arg0, 0),
+ TREE_OPERAND (arg0, 1));
+ else if (TREE_CODE (arg0) == COMPLEX_CST)
+ return TREE_REALPART (arg0);
+ else if (TREE_CODE (arg0) == PLUS_EXPR || TREE_CODE (arg0) == MINUS_EXPR)
+ return fold (build (TREE_CODE (arg0), type,
+ fold (build1 (REALPART_EXPR, type,
+ TREE_OPERAND (arg0, 0))),
+ fold (build1 (REALPART_EXPR,
+ type, TREE_OPERAND (arg0, 1)))));
+ return t;
+
+ case IMAGPART_EXPR:
+ if (TREE_CODE (TREE_TYPE (arg0)) != COMPLEX_TYPE)
+ return convert (type, integer_zero_node);
+ else if (TREE_CODE (arg0) == COMPLEX_EXPR)
+ return omit_one_operand (type, TREE_OPERAND (arg0, 1),
+ TREE_OPERAND (arg0, 0));
+ else if (TREE_CODE (arg0) == COMPLEX_CST)
+ return TREE_IMAGPART (arg0);
+ else if (TREE_CODE (arg0) == PLUS_EXPR || TREE_CODE (arg0) == MINUS_EXPR)
+ return fold (build (TREE_CODE (arg0), type,
+ fold (build1 (IMAGPART_EXPR, type,
+ TREE_OPERAND (arg0, 0))),
+ fold (build1 (IMAGPART_EXPR, type,
+ TREE_OPERAND (arg0, 1)))));
+ return t;
+
+ default:
+ return t;
+ } /* switch (code) */
+}
diff --git a/gnu/usr.bin/cc/cc_int/function.c b/gnu/usr.bin/cc/cc_int/function.c
new file mode 100644
index 0000000..9f33396
--- /dev/null
+++ b/gnu/usr.bin/cc/cc_int/function.c
@@ -0,0 +1,5496 @@
+/* Expands front end tree to back end RTL for GNU C-Compiler
+ Copyright (C) 1987, 88, 89, 91, 92, 93, 1994 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+/* This file handles the generation of rtl code from tree structure
+ at the level of the function as a whole.
+ It creates the rtl expressions for parameters and auto variables
+ and has full responsibility for allocating stack slots.
+
+ `expand_function_start' is called at the beginning of a function,
+ before the function body is parsed, and `expand_function_end' is
+ called after parsing the body.
+
+ Call `assign_stack_local' to allocate a stack slot for a local variable.
+ This is usually done during the RTL generation for the function body,
+ but it can also be done in the reload pass when a pseudo-register does
+ not get a hard register.
+
+ Call `put_var_into_stack' when you learn, belatedly, that a variable
+ previously given a pseudo-register must in fact go in the stack.
+ This function changes the DECL_RTL to be a stack slot instead of a reg
+ then scans all the RTL instructions so far generated to correct them. */
+
+#include "config.h"
+
+#include <stdio.h>
+
+#include "rtl.h"
+#include "tree.h"
+#include "flags.h"
+#include "function.h"
+#include "insn-flags.h"
+#include "expr.h"
+#include "insn-codes.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "insn-config.h"
+#include "recog.h"
+#include "output.h"
+#include "basic-block.h"
+#include "obstack.h"
+#include "bytecode.h"
+
+/* Some systems use __main in a way incompatible with its use in gcc, in these
+ cases use the macros NAME__MAIN to give a quoted symbol and SYMBOL__MAIN to
+ give the same symbol without quotes for an alternative entry point. You
+ must define both, or niether. */
+#ifndef NAME__MAIN
+#define NAME__MAIN "__main"
+#define SYMBOL__MAIN __main
+#endif
+
+/* Round a value to the lowest integer less than it that is a multiple of
+ the required alignment. Avoid using division in case the value is
+ negative. Assume the alignment is a power of two. */
+#define FLOOR_ROUND(VALUE,ALIGN) ((VALUE) & ~((ALIGN) - 1))
+
+/* Similar, but round to the next highest integer that meets the
+ alignment. */
+#define CEIL_ROUND(VALUE,ALIGN) (((VALUE) + (ALIGN) - 1) & ~((ALIGN)- 1))
+
+/* NEED_SEPARATE_AP means that we cannot derive ap from the value of fp
+ during rtl generation. If they are different register numbers, this is
+ always true. It may also be true if
+ FIRST_PARM_OFFSET - STARTING_FRAME_OFFSET is not a constant during rtl
+ generation. See fix_lexical_addr for details. */
+
+#if ARG_POINTER_REGNUM != FRAME_POINTER_REGNUM
+#define NEED_SEPARATE_AP
+#endif
+
+/* Number of bytes of args popped by function being compiled on its return.
+ Zero if no bytes are to be popped.
+ May affect compilation of return insn or of function epilogue. */
+
+int current_function_pops_args;
+
+/* Nonzero if function being compiled needs to be given an address
+ where the value should be stored. */
+
+int current_function_returns_struct;
+
+/* Nonzero if function being compiled needs to
+ return the address of where it has put a structure value. */
+
+int current_function_returns_pcc_struct;
+
+/* Nonzero if function being compiled needs to be passed a static chain. */
+
+int current_function_needs_context;
+
+/* Nonzero if function being compiled can call setjmp. */
+
+int current_function_calls_setjmp;
+
+/* Nonzero if function being compiled can call longjmp. */
+
+int current_function_calls_longjmp;
+
+/* Nonzero if function being compiled receives nonlocal gotos
+ from nested functions. */
+
+int current_function_has_nonlocal_label;
+
+/* Nonzero if function being compiled has nonlocal gotos to parent
+ function. */
+
+int current_function_has_nonlocal_goto;
+
+/* Nonzero if function being compiled contains nested functions. */
+
+int current_function_contains_functions;
+
+/* Nonzero if function being compiled can call alloca,
+ either as a subroutine or builtin. */
+
+int current_function_calls_alloca;
+
+/* Nonzero if the current function returns a pointer type */
+
+int current_function_returns_pointer;
+
+/* If some insns can be deferred to the delay slots of the epilogue, the
+ delay list for them is recorded here. */
+
+rtx current_function_epilogue_delay_list;
+
+/* If function's args have a fixed size, this is that size, in bytes.
+ Otherwise, it is -1.
+ May affect compilation of return insn or of function epilogue. */
+
+int current_function_args_size;
+
+/* # bytes the prologue should push and pretend that the caller pushed them.
+ The prologue must do this, but only if parms can be passed in registers. */
+
+int current_function_pretend_args_size;
+
+/* # of bytes of outgoing arguments required to be pushed by the prologue.
+ If this is non-zero, it means that ACCUMULATE_OUTGOING_ARGS was defined
+ and no stack adjusts will be done on function calls. */
+
+int current_function_outgoing_args_size;
+
+/* This is the offset from the arg pointer to the place where the first
+ anonymous arg can be found, if there is one. */
+
+rtx current_function_arg_offset_rtx;
+
+/* Nonzero if current function uses varargs.h or equivalent.
+ Zero for functions that use stdarg.h. */
+
+int current_function_varargs;
+
+/* Quantities of various kinds of registers
+ used for the current function's args. */
+
+CUMULATIVE_ARGS current_function_args_info;
+
+/* Name of function now being compiled. */
+
+char *current_function_name;
+
+/* If non-zero, an RTL expression for that location at which the current
+ function returns its result. Always equal to
+ DECL_RTL (DECL_RESULT (current_function_decl)), but provided
+ independently of the tree structures. */
+
+rtx current_function_return_rtx;
+
+/* Nonzero if the current function uses the constant pool. */
+
+int current_function_uses_const_pool;
+
+/* Nonzero if the current function uses pic_offset_table_rtx. */
+int current_function_uses_pic_offset_table;
+
+/* The arg pointer hard register, or the pseudo into which it was copied. */
+rtx current_function_internal_arg_pointer;
+
+/* The FUNCTION_DECL for an inline function currently being expanded. */
+tree inline_function_decl;
+
+/* Number of function calls seen so far in current function. */
+
+int function_call_count;
+
+/* List (chain of TREE_LIST) of LABEL_DECLs for all nonlocal labels
+ (labels to which there can be nonlocal gotos from nested functions)
+ in this function. */
+
+tree nonlocal_labels;
+
+/* RTX for stack slot that holds the current handler for nonlocal gotos.
+ Zero when function does not have nonlocal labels. */
+
+rtx nonlocal_goto_handler_slot;
+
+/* RTX for stack slot that holds the stack pointer value to restore
+ for a nonlocal goto.
+ Zero when function does not have nonlocal labels. */
+
+rtx nonlocal_goto_stack_level;
+
+/* Label that will go on parm cleanup code, if any.
+ Jumping to this label runs cleanup code for parameters, if
+ such code must be run. Following this code is the logical return label. */
+
+rtx cleanup_label;
+
+/* Label that will go on function epilogue.
+ Jumping to this label serves as a "return" instruction
+ on machines which require execution of the epilogue on all returns. */
+
+rtx return_label;
+
+/* List (chain of EXPR_LISTs) of pseudo-regs of SAVE_EXPRs.
+ So we can mark them all live at the end of the function, if nonopt. */
+rtx save_expr_regs;
+
+/* List (chain of EXPR_LISTs) of all stack slots in this function.
+ Made for the sake of unshare_all_rtl. */
+rtx stack_slot_list;
+
+/* Chain of all RTL_EXPRs that have insns in them. */
+tree rtl_expr_chain;
+
+/* Label to jump back to for tail recursion, or 0 if we have
+ not yet needed one for this function. */
+rtx tail_recursion_label;
+
+/* Place after which to insert the tail_recursion_label if we need one. */
+rtx tail_recursion_reentry;
+
+/* Location at which to save the argument pointer if it will need to be
+ referenced. There are two cases where this is done: if nonlocal gotos
+ exist, or if vars stored at an offset from the argument pointer will be
+ needed by inner routines. */
+
+rtx arg_pointer_save_area;
+
+/* Offset to end of allocated area of stack frame.
+ If stack grows down, this is the address of the last stack slot allocated.
+ If stack grows up, this is the address for the next slot. */
+int frame_offset;
+
+/* List (chain of TREE_LISTs) of static chains for containing functions.
+ Each link has a FUNCTION_DECL in the TREE_PURPOSE and a reg rtx
+ in an RTL_EXPR in the TREE_VALUE. */
+static tree context_display;
+
+/* List (chain of TREE_LISTs) of trampolines for nested functions.
+ The trampoline sets up the static chain and jumps to the function.
+ We supply the trampoline's address when the function's address is requested.
+
+ Each link has a FUNCTION_DECL in the TREE_PURPOSE and a reg rtx
+ in an RTL_EXPR in the TREE_VALUE. */
+static tree trampoline_list;
+
+/* Insn after which register parms and SAVE_EXPRs are born, if nonopt. */
+static rtx parm_birth_insn;
+
+#if 0
+/* Nonzero if a stack slot has been generated whose address is not
+ actually valid. It means that the generated rtl must all be scanned
+ to detect and correct the invalid addresses where they occur. */
+static int invalid_stack_slot;
+#endif
+
+/* Last insn of those whose job was to put parms into their nominal homes. */
+static rtx last_parm_insn;
+
+/* 1 + last pseudo register number used for loading a copy
+ of a parameter of this function. */
+static int max_parm_reg;
+
+/* Vector indexed by REGNO, containing location on stack in which
+ to put the parm which is nominally in pseudo register REGNO,
+ if we discover that that parm must go in the stack. */
+static rtx *parm_reg_stack_loc;
+
+#if 0 /* Turned off because 0 seems to work just as well. */
+/* Cleanup lists are required for binding levels regardless of whether
+ that binding level has cleanups or not. This node serves as the
+ cleanup list whenever an empty list is required. */
+static tree empty_cleanup_list;
+#endif
+
+/* Nonzero once virtual register instantiation has been done.
+ assign_stack_local uses frame_pointer_rtx when this is nonzero. */
+static int virtuals_instantiated;
+
+/* These variables hold pointers to functions to
+ save and restore machine-specific data,
+ in push_function_context and pop_function_context. */
+void (*save_machine_status) ();
+void (*restore_machine_status) ();
+
+/* Nonzero if we need to distinguish between the return value of this function
+ and the return value of a function called by this function. This helps
+ integrate.c */
+
+extern int rtx_equal_function_value_matters;
+extern tree sequence_rtl_expr;
+extern tree bc_runtime_type_code ();
+extern rtx bc_build_calldesc ();
+extern char *bc_emit_trampoline ();
+extern char *bc_end_function ();
+
+/* In order to evaluate some expressions, such as function calls returning
+ structures in memory, we need to temporarily allocate stack locations.
+ We record each allocated temporary in the following structure.
+
+ Associated with each temporary slot is a nesting level. When we pop up
+ one level, all temporaries associated with the previous level are freed.
+ Normally, all temporaries are freed after the execution of the statement
+ in which they were created. However, if we are inside a ({...}) grouping,
+ the result may be in a temporary and hence must be preserved. If the
+ result could be in a temporary, we preserve it if we can determine which
+ one it is in. If we cannot determine which temporary may contain the
+ result, all temporaries are preserved. A temporary is preserved by
+ pretending it was allocated at the previous nesting level.
+
+ Automatic variables are also assigned temporary slots, at the nesting
+ level where they are defined. They are marked a "kept" so that
+ free_temp_slots will not free them. */
+
+struct temp_slot
+{
+ /* Points to next temporary slot. */
+ struct temp_slot *next;
+ /* The rtx to used to reference the slot. */
+ rtx slot;
+ /* The rtx used to represent the address if not the address of the
+ slot above. May be an EXPR_LIST if multiple addresses exist. */
+ rtx address;
+ /* The size, in units, of the slot. */
+ int size;
+ /* The value of `sequence_rtl_expr' when this temporary is allocated. */
+ tree rtl_expr;
+ /* Non-zero if this temporary is currently in use. */
+ char in_use;
+ /* Nesting level at which this slot is being used. */
+ int level;
+ /* Non-zero if this should survive a call to free_temp_slots. */
+ int keep;
+};
+
+/* List of all temporaries allocated, both available and in use. */
+
+struct temp_slot *temp_slots;
+
+/* Current nesting level for temporaries. */
+
+int temp_slot_level;
+
+/* The FUNCTION_DECL node for the current function. */
+static tree this_function_decl;
+
+/* Callinfo pointer for the current function. */
+static rtx this_function_callinfo;
+
+/* The label in the bytecode file of this function's actual bytecode.
+ Not an rtx. */
+static char *this_function_bytecode;
+
+/* The call description vector for the current function. */
+static rtx this_function_calldesc;
+
+/* Size of the local variables allocated for the current function. */
+int local_vars_size;
+
+/* Current depth of the bytecode evaluation stack. */
+int stack_depth;
+
+/* Maximum depth of the evaluation stack in this function. */
+int max_stack_depth;
+
+/* Current depth in statement expressions. */
+static int stmt_expr_depth;
+
+/* This structure is used to record MEMs or pseudos used to replace VAR, any
+ SUBREGs of VAR, and any MEMs containing VAR as an address. We need to
+ maintain this list in case two operands of an insn were required to match;
+ in that case we must ensure we use the same replacement. */
+
+struct fixup_replacement
+{
+ rtx old;
+ rtx new;
+ struct fixup_replacement *next;
+};
+
+/* Forward declarations. */
+
+static struct temp_slot *find_temp_slot_from_address PROTO((rtx));
+static void put_reg_into_stack PROTO((struct function *, rtx, tree,
+ enum machine_mode, enum machine_mode));
+static void fixup_var_refs PROTO((rtx, enum machine_mode, int));
+static struct fixup_replacement
+ *find_fixup_replacement PROTO((struct fixup_replacement **, rtx));
+static void fixup_var_refs_insns PROTO((rtx, enum machine_mode, int,
+ rtx, int));
+static void fixup_var_refs_1 PROTO((rtx, enum machine_mode, rtx *, rtx,
+ struct fixup_replacement **));
+static rtx fixup_memory_subreg PROTO((rtx, rtx, int));
+static rtx walk_fixup_memory_subreg PROTO((rtx, rtx, int));
+static rtx fixup_stack_1 PROTO((rtx, rtx));
+static void optimize_bit_field PROTO((rtx, rtx, rtx *));
+static void instantiate_decls PROTO((tree, int));
+static void instantiate_decls_1 PROTO((tree, int));
+static void instantiate_decl PROTO((rtx, int, int));
+static int instantiate_virtual_regs_1 PROTO((rtx *, rtx, int));
+static void delete_handlers PROTO((void));
+static void pad_to_arg_alignment PROTO((struct args_size *, int));
+static void pad_below PROTO((struct args_size *, enum machine_mode,
+ tree));
+static tree round_down PROTO((tree, int));
+static rtx round_trampoline_addr PROTO((rtx));
+static tree blocks_nreverse PROTO((tree));
+static int all_blocks PROTO((tree, tree *));
+static int *record_insns PROTO((rtx));
+static int contains PROTO((rtx, int *));
+
+/* Pointer to chain of `struct function' for containing functions. */
+struct function *outer_function_chain;
+
+/* Given a function decl for a containing function,
+ return the `struct function' for it. */
+
+struct function *
+find_function_data (decl)
+ tree decl;
+{
+ struct function *p;
+ for (p = outer_function_chain; p; p = p->next)
+ if (p->decl == decl)
+ return p;
+ abort ();
+}
+
+/* Save the current context for compilation of a nested function.
+ This is called from language-specific code.
+ The caller is responsible for saving any language-specific status,
+ since this function knows only about language-independent variables. */
+
+void
+push_function_context ()
+{
+ struct function *p = (struct function *) xmalloc (sizeof (struct function));
+
+ p->next = outer_function_chain;
+ outer_function_chain = p;
+
+ p->name = current_function_name;
+ p->decl = current_function_decl;
+ p->pops_args = current_function_pops_args;
+ p->returns_struct = current_function_returns_struct;
+ p->returns_pcc_struct = current_function_returns_pcc_struct;
+ p->needs_context = current_function_needs_context;
+ p->calls_setjmp = current_function_calls_setjmp;
+ p->calls_longjmp = current_function_calls_longjmp;
+ p->calls_alloca = current_function_calls_alloca;
+ p->has_nonlocal_label = current_function_has_nonlocal_label;
+ p->has_nonlocal_goto = current_function_has_nonlocal_goto;
+ p->args_size = current_function_args_size;
+ p->pretend_args_size = current_function_pretend_args_size;
+ p->arg_offset_rtx = current_function_arg_offset_rtx;
+ p->varargs = current_function_varargs;
+ p->uses_const_pool = current_function_uses_const_pool;
+ p->uses_pic_offset_table = current_function_uses_pic_offset_table;
+ p->internal_arg_pointer = current_function_internal_arg_pointer;
+ p->max_parm_reg = max_parm_reg;
+ p->parm_reg_stack_loc = parm_reg_stack_loc;
+ p->outgoing_args_size = current_function_outgoing_args_size;
+ p->return_rtx = current_function_return_rtx;
+ p->nonlocal_goto_handler_slot = nonlocal_goto_handler_slot;
+ p->nonlocal_goto_stack_level = nonlocal_goto_stack_level;
+ p->nonlocal_labels = nonlocal_labels;
+ p->cleanup_label = cleanup_label;
+ p->return_label = return_label;
+ p->save_expr_regs = save_expr_regs;
+ p->stack_slot_list = stack_slot_list;
+ p->parm_birth_insn = parm_birth_insn;
+ p->frame_offset = frame_offset;
+ p->tail_recursion_label = tail_recursion_label;
+ p->tail_recursion_reentry = tail_recursion_reentry;
+ p->arg_pointer_save_area = arg_pointer_save_area;
+ p->rtl_expr_chain = rtl_expr_chain;
+ p->last_parm_insn = last_parm_insn;
+ p->context_display = context_display;
+ p->trampoline_list = trampoline_list;
+ p->function_call_count = function_call_count;
+ p->temp_slots = temp_slots;
+ p->temp_slot_level = temp_slot_level;
+ p->fixup_var_refs_queue = 0;
+ p->epilogue_delay_list = current_function_epilogue_delay_list;
+
+ save_tree_status (p);
+ save_storage_status (p);
+ save_emit_status (p);
+ init_emit ();
+ save_expr_status (p);
+ save_stmt_status (p);
+ save_varasm_status (p);
+
+ if (save_machine_status)
+ (*save_machine_status) (p);
+}
+
+/* Restore the last saved context, at the end of a nested function.
+ This function is called from language-specific code. */
+
+void
+pop_function_context ()
+{
+ struct function *p = outer_function_chain;
+
+ outer_function_chain = p->next;
+
+ current_function_name = p->name;
+ current_function_decl = p->decl;
+ current_function_pops_args = p->pops_args;
+ current_function_returns_struct = p->returns_struct;
+ current_function_returns_pcc_struct = p->returns_pcc_struct;
+ current_function_needs_context = p->needs_context;
+ current_function_calls_setjmp = p->calls_setjmp;
+ current_function_calls_longjmp = p->calls_longjmp;
+ current_function_calls_alloca = p->calls_alloca;
+ current_function_has_nonlocal_label = p->has_nonlocal_label;
+ current_function_has_nonlocal_goto = p->has_nonlocal_goto;
+ current_function_contains_functions = 1;
+ current_function_args_size = p->args_size;
+ current_function_pretend_args_size = p->pretend_args_size;
+ current_function_arg_offset_rtx = p->arg_offset_rtx;
+ current_function_varargs = p->varargs;
+ current_function_uses_const_pool = p->uses_const_pool;
+ current_function_uses_pic_offset_table = p->uses_pic_offset_table;
+ current_function_internal_arg_pointer = p->internal_arg_pointer;
+ max_parm_reg = p->max_parm_reg;
+ parm_reg_stack_loc = p->parm_reg_stack_loc;
+ current_function_outgoing_args_size = p->outgoing_args_size;
+ current_function_return_rtx = p->return_rtx;
+ nonlocal_goto_handler_slot = p->nonlocal_goto_handler_slot;
+ nonlocal_goto_stack_level = p->nonlocal_goto_stack_level;
+ nonlocal_labels = p->nonlocal_labels;
+ cleanup_label = p->cleanup_label;
+ return_label = p->return_label;
+ save_expr_regs = p->save_expr_regs;
+ stack_slot_list = p->stack_slot_list;
+ parm_birth_insn = p->parm_birth_insn;
+ frame_offset = p->frame_offset;
+ tail_recursion_label = p->tail_recursion_label;
+ tail_recursion_reentry = p->tail_recursion_reentry;
+ arg_pointer_save_area = p->arg_pointer_save_area;
+ rtl_expr_chain = p->rtl_expr_chain;
+ last_parm_insn = p->last_parm_insn;
+ context_display = p->context_display;
+ trampoline_list = p->trampoline_list;
+ function_call_count = p->function_call_count;
+ temp_slots = p->temp_slots;
+ temp_slot_level = p->temp_slot_level;
+ current_function_epilogue_delay_list = p->epilogue_delay_list;
+
+ restore_tree_status (p);
+ restore_storage_status (p);
+ restore_expr_status (p);
+ restore_emit_status (p);
+ restore_stmt_status (p);
+ restore_varasm_status (p);
+
+ if (restore_machine_status)
+ (*restore_machine_status) (p);
+
+ /* Finish doing put_var_into_stack for any of our variables
+ which became addressable during the nested function. */
+ {
+ struct var_refs_queue *queue = p->fixup_var_refs_queue;
+ for (; queue; queue = queue->next)
+ fixup_var_refs (queue->modified, queue->promoted_mode, queue->unsignedp);
+ }
+
+ free (p);
+
+ /* Reset variables that have known state during rtx generation. */
+ rtx_equal_function_value_matters = 1;
+ virtuals_instantiated = 0;
+}
+
+/* Allocate fixed slots in the stack frame of the current function. */
+
+/* Return size needed for stack frame based on slots so far allocated.
+ This size counts from zero. It is not rounded to STACK_BOUNDARY;
+ the caller may have to do that. */
+
+int
+get_frame_size ()
+{
+#ifdef FRAME_GROWS_DOWNWARD
+ return -frame_offset;
+#else
+ return frame_offset;
+#endif
+}
+
+/* Allocate a stack slot of SIZE bytes and return a MEM rtx for it
+ with machine mode MODE.
+
+ ALIGN controls the amount of alignment for the address of the slot:
+ 0 means according to MODE,
+ -1 means use BIGGEST_ALIGNMENT and round size to multiple of that,
+ positive specifies alignment boundary in bits.
+
+ We do not round to stack_boundary here. */
+
+rtx
+assign_stack_local (mode, size, align)
+ enum machine_mode mode;
+ int size;
+ int align;
+{
+ register rtx x, addr;
+ int bigend_correction = 0;
+ int alignment;
+
+ if (align == 0)
+ {
+ alignment = GET_MODE_ALIGNMENT (mode) / BITS_PER_UNIT;
+ if (mode == BLKmode)
+ alignment = BIGGEST_ALIGNMENT / BITS_PER_UNIT;
+ }
+ else if (align == -1)
+ {
+ alignment = BIGGEST_ALIGNMENT / BITS_PER_UNIT;
+ size = CEIL_ROUND (size, alignment);
+ }
+ else
+ alignment = align / BITS_PER_UNIT;
+
+ /* Round frame offset to that alignment.
+ We must be careful here, since FRAME_OFFSET might be negative and
+ division with a negative dividend isn't as well defined as we might
+ like. So we instead assume that ALIGNMENT is a power of two and
+ use logical operations which are unambiguous. */
+#ifdef FRAME_GROWS_DOWNWARD
+ frame_offset = FLOOR_ROUND (frame_offset, alignment);
+#else
+ frame_offset = CEIL_ROUND (frame_offset, alignment);
+#endif
+
+ /* On a big-endian machine, if we are allocating more space than we will use,
+ use the least significant bytes of those that are allocated. */
+#if BYTES_BIG_ENDIAN
+ if (mode != BLKmode)
+ bigend_correction = size - GET_MODE_SIZE (mode);
+#endif
+
+#ifdef FRAME_GROWS_DOWNWARD
+ frame_offset -= size;
+#endif
+
+ /* If we have already instantiated virtual registers, return the actual
+ address relative to the frame pointer. */
+ if (virtuals_instantiated)
+ addr = plus_constant (frame_pointer_rtx,
+ (frame_offset + bigend_correction
+ + STARTING_FRAME_OFFSET));
+ else
+ addr = plus_constant (virtual_stack_vars_rtx,
+ frame_offset + bigend_correction);
+
+#ifndef FRAME_GROWS_DOWNWARD
+ frame_offset += size;
+#endif
+
+ x = gen_rtx (MEM, mode, addr);
+
+ stack_slot_list = gen_rtx (EXPR_LIST, VOIDmode, x, stack_slot_list);
+
+ return x;
+}
+
+/* Assign a stack slot in a containing function.
+ First three arguments are same as in preceding function.
+ The last argument specifies the function to allocate in. */
+
+rtx
+assign_outer_stack_local (mode, size, align, function)
+ enum machine_mode mode;
+ int size;
+ int align;
+ struct function *function;
+{
+ register rtx x, addr;
+ int bigend_correction = 0;
+ int alignment;
+
+ /* Allocate in the memory associated with the function in whose frame
+ we are assigning. */
+ push_obstacks (function->function_obstack,
+ function->function_maybepermanent_obstack);
+
+ if (align == 0)
+ {
+ alignment = GET_MODE_ALIGNMENT (mode) / BITS_PER_UNIT;
+ if (mode == BLKmode)
+ alignment = BIGGEST_ALIGNMENT / BITS_PER_UNIT;
+ }
+ else if (align == -1)
+ {
+ alignment = BIGGEST_ALIGNMENT / BITS_PER_UNIT;
+ size = CEIL_ROUND (size, alignment);
+ }
+ else
+ alignment = align / BITS_PER_UNIT;
+
+ /* Round frame offset to that alignment. */
+#ifdef FRAME_GROWS_DOWNWARD
+ function->frame_offset = FLOOR_ROUND (function->frame_offset, alignment);
+#else
+ function->frame_offset = CEIL_ROUND (function->frame_offset, alignment);
+#endif
+
+ /* On a big-endian machine, if we are allocating more space than we will use,
+ use the least significant bytes of those that are allocated. */
+#if BYTES_BIG_ENDIAN
+ if (mode != BLKmode)
+ bigend_correction = size - GET_MODE_SIZE (mode);
+#endif
+
+#ifdef FRAME_GROWS_DOWNWARD
+ function->frame_offset -= size;
+#endif
+ addr = plus_constant (virtual_stack_vars_rtx,
+ function->frame_offset + bigend_correction);
+#ifndef FRAME_GROWS_DOWNWARD
+ function->frame_offset += size;
+#endif
+
+ x = gen_rtx (MEM, mode, addr);
+
+ function->stack_slot_list
+ = gen_rtx (EXPR_LIST, VOIDmode, x, function->stack_slot_list);
+
+ pop_obstacks ();
+
+ return x;
+}
+
+/* Allocate a temporary stack slot and record it for possible later
+ reuse.
+
+ MODE is the machine mode to be given to the returned rtx.
+
+ SIZE is the size in units of the space required. We do no rounding here
+ since assign_stack_local will do any required rounding.
+
+ KEEP is 1 if this slot is to be retained after a call to
+ free_temp_slots. Automatic variables for a block are allocated
+ with this flag. KEEP is 2, if we allocate a longer term temporary,
+ whose lifetime is controlled by CLEANUP_POINT_EXPRs. */
+
+rtx
+assign_stack_temp (mode, size, keep)
+ enum machine_mode mode;
+ int size;
+ int keep;
+{
+ struct temp_slot *p, *best_p = 0;
+
+ /* First try to find an available, already-allocated temporary that is the
+ exact size we require. */
+ for (p = temp_slots; p; p = p->next)
+ if (p->size == size && GET_MODE (p->slot) == mode && ! p->in_use)
+ break;
+
+ /* If we didn't find, one, try one that is larger than what we want. We
+ find the smallest such. */
+ if (p == 0)
+ for (p = temp_slots; p; p = p->next)
+ if (p->size > size && GET_MODE (p->slot) == mode && ! p->in_use
+ && (best_p == 0 || best_p->size > p->size))
+ best_p = p;
+
+ /* Make our best, if any, the one to use. */
+ if (best_p)
+ {
+ /* If there are enough aligned bytes left over, make them into a new
+ temp_slot so that the extra bytes don't get wasted. Do this only
+ for BLKmode slots, so that we can be sure of the alignment. */
+ if (GET_MODE (best_p->slot) == BLKmode)
+ {
+ int alignment = BIGGEST_ALIGNMENT / BITS_PER_UNIT;
+ int rounded_size = CEIL_ROUND (size, alignment);
+
+ if (best_p->size - rounded_size >= alignment)
+ {
+ p = (struct temp_slot *) oballoc (sizeof (struct temp_slot));
+ p->in_use = 0;
+ p->size = best_p->size - rounded_size;
+ p->slot = gen_rtx (MEM, BLKmode,
+ plus_constant (XEXP (best_p->slot, 0),
+ rounded_size));
+ p->address = 0;
+ p->next = temp_slots;
+ temp_slots = p;
+
+ stack_slot_list = gen_rtx (EXPR_LIST, VOIDmode, p->slot,
+ stack_slot_list);
+
+ best_p->size = rounded_size;
+ }
+ }
+
+ p = best_p;
+ }
+
+ /* If we still didn't find one, make a new temporary. */
+ if (p == 0)
+ {
+ p = (struct temp_slot *) oballoc (sizeof (struct temp_slot));
+ p->size = size;
+ /* If the temp slot mode doesn't indicate the alignment,
+ use the largest possible, so no one will be disappointed. */
+ p->slot = assign_stack_local (mode, size, mode == BLKmode ? -1 : 0);
+ p->address = 0;
+ p->next = temp_slots;
+ temp_slots = p;
+ }
+
+ p->in_use = 1;
+ p->rtl_expr = sequence_rtl_expr;
+ if (keep == 2)
+ {
+ p->level = target_temp_slot_level;
+ p->keep = 0;
+ }
+ else
+ {
+ p->level = temp_slot_level;
+ p->keep = keep;
+ }
+ return p->slot;
+}
+
+/* Combine temporary stack slots which are adjacent on the stack.
+
+ This allows for better use of already allocated stack space. This is only
+ done for BLKmode slots because we can be sure that we won't have alignment
+ problems in this case. */
+
+void
+combine_temp_slots ()
+{
+ struct temp_slot *p, *q;
+ struct temp_slot *prev_p, *prev_q;
+ /* Determine where to free back to after this function. */
+ rtx free_pointer = rtx_alloc (CONST_INT);
+
+ for (p = temp_slots, prev_p = 0; p; p = prev_p ? prev_p->next : temp_slots)
+ {
+ int delete_p = 0;
+ if (! p->in_use && GET_MODE (p->slot) == BLKmode)
+ for (q = p->next, prev_q = p; q; q = prev_q->next)
+ {
+ int delete_q = 0;
+ if (! q->in_use && GET_MODE (q->slot) == BLKmode)
+ {
+ if (rtx_equal_p (plus_constant (XEXP (p->slot, 0), p->size),
+ XEXP (q->slot, 0)))
+ {
+ /* Q comes after P; combine Q into P. */
+ p->size += q->size;
+ delete_q = 1;
+ }
+ else if (rtx_equal_p (plus_constant (XEXP (q->slot, 0), q->size),
+ XEXP (p->slot, 0)))
+ {
+ /* P comes after Q; combine P into Q. */
+ q->size += p->size;
+ delete_p = 1;
+ break;
+ }
+ }
+ /* Either delete Q or advance past it. */
+ if (delete_q)
+ prev_q->next = q->next;
+ else
+ prev_q = q;
+ }
+ /* Either delete P or advance past it. */
+ if (delete_p)
+ {
+ if (prev_p)
+ prev_p->next = p->next;
+ else
+ temp_slots = p->next;
+ }
+ else
+ prev_p = p;
+ }
+
+ /* Free all the RTL made by plus_constant. */
+ rtx_free (free_pointer);
+}
+
+/* Find the temp slot corresponding to the object at address X. */
+
+static struct temp_slot *
+find_temp_slot_from_address (x)
+ rtx x;
+{
+ struct temp_slot *p;
+ rtx next;
+
+ for (p = temp_slots; p; p = p->next)
+ {
+ if (! p->in_use)
+ continue;
+ else if (XEXP (p->slot, 0) == x
+ || p->address == x)
+ return p;
+
+ else if (p->address != 0 && GET_CODE (p->address) == EXPR_LIST)
+ for (next = p->address; next; next = XEXP (next, 1))
+ if (XEXP (next, 0) == x)
+ return p;
+ }
+
+ return 0;
+}
+
+/* Indicate that NEW is an alternate way of refering to the temp slot
+ that previous was known by OLD. */
+
+void
+update_temp_slot_address (old, new)
+ rtx old, new;
+{
+ struct temp_slot *p = find_temp_slot_from_address (old);
+
+ /* If none, return. Else add NEW as an alias. */
+ if (p == 0)
+ return;
+ else if (p->address == 0)
+ p->address = new;
+ else
+ {
+ if (GET_CODE (p->address) != EXPR_LIST)
+ p->address = gen_rtx (EXPR_LIST, VOIDmode, p->address, NULL_RTX);
+
+ p->address = gen_rtx (EXPR_LIST, VOIDmode, new, p->address);
+ }
+}
+
+/* If X could be a reference to a temporary slot, mark that slot as belonging
+ to the to one level higher. If X matched one of our slots, just mark that
+ one. Otherwise, we can't easily predict which it is, so upgrade all of
+ them. Kept slots need not be touched.
+
+ This is called when an ({...}) construct occurs and a statement
+ returns a value in memory. */
+
+void
+preserve_temp_slots (x)
+ rtx x;
+{
+ struct temp_slot *p;
+
+ if (x == 0)
+ return;
+
+ /* If X is a register that is being used as a pointer, see if we have
+ a temporary slot we know it points to. To be consistent with
+ the code below, we really should preserve all non-kept slots
+ if we can't find a match, but that seems to be much too costly. */
+ if (GET_CODE (x) == REG && REGNO_POINTER_FLAG (REGNO (x))
+ && (p = find_temp_slot_from_address (x)) != 0)
+ {
+ p->level--;
+ return;
+ }
+
+ /* If X is not in memory or is at a constant address, it cannot be in
+ a temporary slot. */
+ if (GET_CODE (x) != MEM || CONSTANT_P (XEXP (x, 0)))
+ return;
+
+ /* First see if we can find a match. */
+ p = find_temp_slot_from_address (XEXP (x, 0));
+ if (p != 0)
+ {
+ p->level--;
+ return;
+ }
+
+ /* Otherwise, preserve all non-kept slots at this level. */
+ for (p = temp_slots; p; p = p->next)
+ if (p->in_use && p->level == temp_slot_level && ! p->keep)
+ p->level--;
+}
+
+/* X is the result of an RTL_EXPR. If it is a temporary slot associated
+ with that RTL_EXPR, promote it into a temporary slot at the present
+ level so it will not be freed when we free slots made in the
+ RTL_EXPR. */
+
+void
+preserve_rtl_expr_result (x)
+ rtx x;
+{
+ struct temp_slot *p;
+
+ /* If X is not in memory or is at a constant address, it cannot be in
+ a temporary slot. */
+ if (x == 0 || GET_CODE (x) != MEM || CONSTANT_P (XEXP (x, 0)))
+ return;
+
+ /* If we can find a match, move it to our level. */
+ for (p = temp_slots; p; p = p->next)
+ if (p->in_use && rtx_equal_p (x, p->slot))
+ {
+ p->level = temp_slot_level;
+ p->rtl_expr = 0;
+ return;
+ }
+
+ return;
+}
+
+/* Free all temporaries used so far. This is normally called at the end
+ of generating code for a statement. Don't free any temporaries
+ currently in use for an RTL_EXPR that hasn't yet been emitted.
+ We could eventually do better than this since it can be reused while
+ generating the same RTL_EXPR, but this is complex and probably not
+ worthwhile. */
+
+void
+free_temp_slots ()
+{
+ struct temp_slot *p;
+
+ for (p = temp_slots; p; p = p->next)
+ if (p->in_use && p->level == temp_slot_level && ! p->keep
+ && p->rtl_expr == 0)
+ p->in_use = 0;
+
+ combine_temp_slots ();
+}
+
+/* Free all temporary slots used in T, an RTL_EXPR node. */
+
+void
+free_temps_for_rtl_expr (t)
+ tree t;
+{
+ struct temp_slot *p;
+
+ for (p = temp_slots; p; p = p->next)
+ if (p->rtl_expr == t)
+ p->in_use = 0;
+
+ combine_temp_slots ();
+}
+
+/* Push deeper into the nesting level for stack temporaries. */
+
+void
+push_temp_slots ()
+{
+ temp_slot_level++;
+}
+
+/* Pop a temporary nesting level. All slots in use in the current level
+ are freed. */
+
+void
+pop_temp_slots ()
+{
+ struct temp_slot *p;
+
+ for (p = temp_slots; p; p = p->next)
+ if (p->in_use && p->level == temp_slot_level && p->rtl_expr == 0)
+ p->in_use = 0;
+
+ combine_temp_slots ();
+
+ temp_slot_level--;
+}
+
+/* Retroactively move an auto variable from a register to a stack slot.
+ This is done when an address-reference to the variable is seen. */
+
+void
+put_var_into_stack (decl)
+ tree decl;
+{
+ register rtx reg;
+ enum machine_mode promoted_mode, decl_mode;
+ struct function *function = 0;
+ tree context;
+
+ if (output_bytecode)
+ return;
+
+ context = decl_function_context (decl);
+
+ /* Get the current rtl used for this object and it's original mode. */
+ reg = TREE_CODE (decl) == SAVE_EXPR ? SAVE_EXPR_RTL (decl) : DECL_RTL (decl);
+
+ /* No need to do anything if decl has no rtx yet
+ since in that case caller is setting TREE_ADDRESSABLE
+ and a stack slot will be assigned when the rtl is made. */
+ if (reg == 0)
+ return;
+
+ /* Get the declared mode for this object. */
+ decl_mode = (TREE_CODE (decl) == SAVE_EXPR ? TYPE_MODE (TREE_TYPE (decl))
+ : DECL_MODE (decl));
+ /* Get the mode it's actually stored in. */
+ promoted_mode = GET_MODE (reg);
+
+ /* If this variable comes from an outer function,
+ find that function's saved context. */
+ if (context != current_function_decl)
+ for (function = outer_function_chain; function; function = function->next)
+ if (function->decl == context)
+ break;
+
+ /* If this is a variable-size object with a pseudo to address it,
+ put that pseudo into the stack, if the var is nonlocal. */
+ if (DECL_NONLOCAL (decl)
+ && GET_CODE (reg) == MEM
+ && GET_CODE (XEXP (reg, 0)) == REG
+ && REGNO (XEXP (reg, 0)) > LAST_VIRTUAL_REGISTER)
+ {
+ reg = XEXP (reg, 0);
+ decl_mode = promoted_mode = GET_MODE (reg);
+ }
+
+ /* Now we should have a value that resides in one or more pseudo regs. */
+
+ if (GET_CODE (reg) == REG)
+ put_reg_into_stack (function, reg, TREE_TYPE (decl),
+ promoted_mode, decl_mode);
+ else if (GET_CODE (reg) == CONCAT)
+ {
+ /* A CONCAT contains two pseudos; put them both in the stack.
+ We do it so they end up consecutive. */
+ enum machine_mode part_mode = GET_MODE (XEXP (reg, 0));
+ tree part_type = TREE_TYPE (TREE_TYPE (decl));
+#ifdef STACK_GROWS_DOWNWARD
+ /* Since part 0 should have a lower address, do it second. */
+ put_reg_into_stack (function, XEXP (reg, 1),
+ part_type, part_mode, part_mode);
+ put_reg_into_stack (function, XEXP (reg, 0),
+ part_type, part_mode, part_mode);
+#else
+ put_reg_into_stack (function, XEXP (reg, 0),
+ part_type, part_mode, part_mode);
+ put_reg_into_stack (function, XEXP (reg, 1),
+ part_type, part_mode, part_mode);
+#endif
+
+ /* Change the CONCAT into a combined MEM for both parts. */
+ PUT_CODE (reg, MEM);
+ /* The two parts are in memory order already.
+ Use the lower parts address as ours. */
+ XEXP (reg, 0) = XEXP (XEXP (reg, 0), 0);
+ /* Prevent sharing of rtl that might lose. */
+ if (GET_CODE (XEXP (reg, 0)) == PLUS)
+ XEXP (reg, 0) = copy_rtx (XEXP (reg, 0));
+ }
+}
+
+/* Subroutine of put_var_into_stack. This puts a single pseudo reg REG
+ into the stack frame of FUNCTION (0 means the current function).
+ DECL_MODE is the machine mode of the user-level data type.
+ PROMOTED_MODE is the machine mode of the register. */
+
+static void
+put_reg_into_stack (function, reg, type, promoted_mode, decl_mode)
+ struct function *function;
+ rtx reg;
+ tree type;
+ enum machine_mode promoted_mode, decl_mode;
+{
+ rtx new = 0;
+
+ if (function)
+ {
+ if (REGNO (reg) < function->max_parm_reg)
+ new = function->parm_reg_stack_loc[REGNO (reg)];
+ if (new == 0)
+ new = assign_outer_stack_local (decl_mode, GET_MODE_SIZE (decl_mode),
+ 0, function);
+ }
+ else
+ {
+ if (REGNO (reg) < max_parm_reg)
+ new = parm_reg_stack_loc[REGNO (reg)];
+ if (new == 0)
+ new = assign_stack_local (decl_mode, GET_MODE_SIZE (decl_mode), 0);
+ }
+
+ XEXP (reg, 0) = XEXP (new, 0);
+ /* `volatil' bit means one thing for MEMs, another entirely for REGs. */
+ REG_USERVAR_P (reg) = 0;
+ PUT_CODE (reg, MEM);
+ PUT_MODE (reg, decl_mode);
+
+ /* If this is a memory ref that contains aggregate components,
+ mark it as such for cse and loop optimize. */
+ MEM_IN_STRUCT_P (reg) = AGGREGATE_TYPE_P (type);
+
+ /* Now make sure that all refs to the variable, previously made
+ when it was a register, are fixed up to be valid again. */
+ if (function)
+ {
+ struct var_refs_queue *temp;
+
+ /* Variable is inherited; fix it up when we get back to its function. */
+ push_obstacks (function->function_obstack,
+ function->function_maybepermanent_obstack);
+
+ /* See comment in restore_tree_status in tree.c for why this needs to be
+ on saveable obstack. */
+ temp
+ = (struct var_refs_queue *) savealloc (sizeof (struct var_refs_queue));
+ temp->modified = reg;
+ temp->promoted_mode = promoted_mode;
+ temp->unsignedp = TREE_UNSIGNED (type);
+ temp->next = function->fixup_var_refs_queue;
+ function->fixup_var_refs_queue = temp;
+ pop_obstacks ();
+ }
+ else
+ /* Variable is local; fix it up now. */
+ fixup_var_refs (reg, promoted_mode, TREE_UNSIGNED (type));
+}
+
+static void
+fixup_var_refs (var, promoted_mode, unsignedp)
+ rtx var;
+ enum machine_mode promoted_mode;
+ int unsignedp;
+{
+ tree pending;
+ rtx first_insn = get_insns ();
+ struct sequence_stack *stack = sequence_stack;
+ tree rtl_exps = rtl_expr_chain;
+
+ /* Must scan all insns for stack-refs that exceed the limit. */
+ fixup_var_refs_insns (var, promoted_mode, unsignedp, first_insn, stack == 0);
+
+ /* Scan all pending sequences too. */
+ for (; stack; stack = stack->next)
+ {
+ push_to_sequence (stack->first);
+ fixup_var_refs_insns (var, promoted_mode, unsignedp,
+ stack->first, stack->next != 0);
+ /* Update remembered end of sequence
+ in case we added an insn at the end. */
+ stack->last = get_last_insn ();
+ end_sequence ();
+ }
+
+ /* Scan all waiting RTL_EXPRs too. */
+ for (pending = rtl_exps; pending; pending = TREE_CHAIN (pending))
+ {
+ rtx seq = RTL_EXPR_SEQUENCE (TREE_VALUE (pending));
+ if (seq != const0_rtx && seq != 0)
+ {
+ push_to_sequence (seq);
+ fixup_var_refs_insns (var, promoted_mode, unsignedp, seq, 0);
+ end_sequence ();
+ }
+ }
+}
+
+/* REPLACEMENTS is a pointer to a list of the struct fixup_replacement and X is
+ some part of an insn. Return a struct fixup_replacement whose OLD
+ value is equal to X. Allocate a new structure if no such entry exists. */
+
+static struct fixup_replacement *
+find_fixup_replacement (replacements, x)
+ struct fixup_replacement **replacements;
+ rtx x;
+{
+ struct fixup_replacement *p;
+
+ /* See if we have already replaced this. */
+ for (p = *replacements; p && p->old != x; p = p->next)
+ ;
+
+ if (p == 0)
+ {
+ p = (struct fixup_replacement *) oballoc (sizeof (struct fixup_replacement));
+ p->old = x;
+ p->new = 0;
+ p->next = *replacements;
+ *replacements = p;
+ }
+
+ return p;
+}
+
+/* Scan the insn-chain starting with INSN for refs to VAR
+ and fix them up. TOPLEVEL is nonzero if this chain is the
+ main chain of insns for the current function. */
+
+static void
+fixup_var_refs_insns (var, promoted_mode, unsignedp, insn, toplevel)
+ rtx var;
+ enum machine_mode promoted_mode;
+ int unsignedp;
+ rtx insn;
+ int toplevel;
+{
+ rtx call_dest = 0;
+
+ while (insn)
+ {
+ rtx next = NEXT_INSN (insn);
+ rtx note;
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ {
+ /* If this is a CLOBBER of VAR, delete it.
+
+ If it has a REG_LIBCALL note, delete the REG_LIBCALL
+ and REG_RETVAL notes too. */
+ if (GET_CODE (PATTERN (insn)) == CLOBBER
+ && XEXP (PATTERN (insn), 0) == var)
+ {
+ if ((note = find_reg_note (insn, REG_LIBCALL, NULL_RTX)) != 0)
+ /* The REG_LIBCALL note will go away since we are going to
+ turn INSN into a NOTE, so just delete the
+ corresponding REG_RETVAL note. */
+ remove_note (XEXP (note, 0),
+ find_reg_note (XEXP (note, 0), REG_RETVAL,
+ NULL_RTX));
+
+ /* In unoptimized compilation, we shouldn't call delete_insn
+ except in jump.c doing warnings. */
+ PUT_CODE (insn, NOTE);
+ NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (insn) = 0;
+ }
+
+ /* The insn to load VAR from a home in the arglist
+ is now a no-op. When we see it, just delete it. */
+ else if (toplevel
+ && GET_CODE (PATTERN (insn)) == SET
+ && SET_DEST (PATTERN (insn)) == var
+ /* If this represents the result of an insn group,
+ don't delete the insn. */
+ && find_reg_note (insn, REG_RETVAL, NULL_RTX) == 0
+ && rtx_equal_p (SET_SRC (PATTERN (insn)), var))
+ {
+ /* In unoptimized compilation, we shouldn't call delete_insn
+ except in jump.c doing warnings. */
+ PUT_CODE (insn, NOTE);
+ NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (insn) = 0;
+ if (insn == last_parm_insn)
+ last_parm_insn = PREV_INSN (next);
+ }
+ else
+ {
+ struct fixup_replacement *replacements = 0;
+ rtx next_insn = NEXT_INSN (insn);
+
+#ifdef SMALL_REGISTER_CLASSES
+ /* If the insn that copies the results of a CALL_INSN
+ into a pseudo now references VAR, we have to use an
+ intermediate pseudo since we want the life of the
+ return value register to be only a single insn.
+
+ If we don't use an intermediate pseudo, such things as
+ address computations to make the address of VAR valid
+ if it is not can be placed beween the CALL_INSN and INSN.
+
+ To make sure this doesn't happen, we record the destination
+ of the CALL_INSN and see if the next insn uses both that
+ and VAR. */
+
+ if (call_dest != 0 && GET_CODE (insn) == INSN
+ && reg_mentioned_p (var, PATTERN (insn))
+ && reg_mentioned_p (call_dest, PATTERN (insn)))
+ {
+ rtx temp = gen_reg_rtx (GET_MODE (call_dest));
+
+ emit_insn_before (gen_move_insn (temp, call_dest), insn);
+
+ PATTERN (insn) = replace_rtx (PATTERN (insn),
+ call_dest, temp);
+ }
+
+ if (GET_CODE (insn) == CALL_INSN
+ && GET_CODE (PATTERN (insn)) == SET)
+ call_dest = SET_DEST (PATTERN (insn));
+ else if (GET_CODE (insn) == CALL_INSN
+ && GET_CODE (PATTERN (insn)) == PARALLEL
+ && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET)
+ call_dest = SET_DEST (XVECEXP (PATTERN (insn), 0, 0));
+ else
+ call_dest = 0;
+#endif
+
+ /* See if we have to do anything to INSN now that VAR is in
+ memory. If it needs to be loaded into a pseudo, use a single
+ pseudo for the entire insn in case there is a MATCH_DUP
+ between two operands. We pass a pointer to the head of
+ a list of struct fixup_replacements. If fixup_var_refs_1
+ needs to allocate pseudos or replacement MEMs (for SUBREGs),
+ it will record them in this list.
+
+ If it allocated a pseudo for any replacement, we copy into
+ it here. */
+
+ fixup_var_refs_1 (var, promoted_mode, &PATTERN (insn), insn,
+ &replacements);
+
+ /* If this is last_parm_insn, and any instructions were output
+ after it to fix it up, then we must set last_parm_insn to
+ the last such instruction emitted. */
+ if (insn == last_parm_insn)
+ last_parm_insn = PREV_INSN (next_insn);
+
+ while (replacements)
+ {
+ if (GET_CODE (replacements->new) == REG)
+ {
+ rtx insert_before;
+ rtx seq;
+
+ /* OLD might be a (subreg (mem)). */
+ if (GET_CODE (replacements->old) == SUBREG)
+ replacements->old
+ = fixup_memory_subreg (replacements->old, insn, 0);
+ else
+ replacements->old
+ = fixup_stack_1 (replacements->old, insn);
+
+ insert_before = insn;
+
+ /* If we are changing the mode, do a conversion.
+ This might be wasteful, but combine.c will
+ eliminate much of the waste. */
+
+ if (GET_MODE (replacements->new)
+ != GET_MODE (replacements->old))
+ {
+ start_sequence ();
+ convert_move (replacements->new,
+ replacements->old, unsignedp);
+ seq = gen_sequence ();
+ end_sequence ();
+ }
+ else
+ seq = gen_move_insn (replacements->new,
+ replacements->old);
+
+ emit_insn_before (seq, insert_before);
+ }
+
+ replacements = replacements->next;
+ }
+ }
+
+ /* Also fix up any invalid exprs in the REG_NOTES of this insn.
+ But don't touch other insns referred to by reg-notes;
+ we will get them elsewhere. */
+ for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
+ if (GET_CODE (note) != INSN_LIST)
+ XEXP (note, 0)
+ = walk_fixup_memory_subreg (XEXP (note, 0), insn, 1);
+ }
+ insn = next;
+ }
+}
+
+/* VAR is a MEM that used to be a pseudo register with mode PROMOTED_MODE.
+ See if the rtx expression at *LOC in INSN needs to be changed.
+
+ REPLACEMENTS is a pointer to a list head that starts out zero, but may
+ contain a list of original rtx's and replacements. If we find that we need
+ to modify this insn by replacing a memory reference with a pseudo or by
+ making a new MEM to implement a SUBREG, we consult that list to see if
+ we have already chosen a replacement. If none has already been allocated,
+ we allocate it and update the list. fixup_var_refs_insns will copy VAR
+ or the SUBREG, as appropriate, to the pseudo. */
+
+static void
+fixup_var_refs_1 (var, promoted_mode, loc, insn, replacements)
+ register rtx var;
+ enum machine_mode promoted_mode;
+ register rtx *loc;
+ rtx insn;
+ struct fixup_replacement **replacements;
+{
+ register int i;
+ register rtx x = *loc;
+ RTX_CODE code = GET_CODE (x);
+ register char *fmt;
+ register rtx tem, tem1;
+ struct fixup_replacement *replacement;
+
+ switch (code)
+ {
+ case MEM:
+ if (var == x)
+ {
+ /* If we already have a replacement, use it. Otherwise,
+ try to fix up this address in case it is invalid. */
+
+ replacement = find_fixup_replacement (replacements, var);
+ if (replacement->new)
+ {
+ *loc = replacement->new;
+ return;
+ }
+
+ *loc = replacement->new = x = fixup_stack_1 (x, insn);
+
+ /* Unless we are forcing memory to register or we changed the mode,
+ we can leave things the way they are if the insn is valid. */
+
+ INSN_CODE (insn) = -1;
+ if (! flag_force_mem && GET_MODE (x) == promoted_mode
+ && recog_memoized (insn) >= 0)
+ return;
+
+ *loc = replacement->new = gen_reg_rtx (promoted_mode);
+ return;
+ }
+
+ /* If X contains VAR, we need to unshare it here so that we update
+ each occurrence separately. But all identical MEMs in one insn
+ must be replaced with the same rtx because of the possibility of
+ MATCH_DUPs. */
+
+ if (reg_mentioned_p (var, x))
+ {
+ replacement = find_fixup_replacement (replacements, x);
+ if (replacement->new == 0)
+ replacement->new = copy_most_rtx (x, var);
+
+ *loc = x = replacement->new;
+ }
+ break;
+
+ case REG:
+ case CC0:
+ case PC:
+ case CONST_INT:
+ case CONST:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ case CONST_DOUBLE:
+ return;
+
+ case SIGN_EXTRACT:
+ case ZERO_EXTRACT:
+ /* Note that in some cases those types of expressions are altered
+ by optimize_bit_field, and do not survive to get here. */
+ if (XEXP (x, 0) == var
+ || (GET_CODE (XEXP (x, 0)) == SUBREG
+ && SUBREG_REG (XEXP (x, 0)) == var))
+ {
+ /* Get TEM as a valid MEM in the mode presently in the insn.
+
+ We don't worry about the possibility of MATCH_DUP here; it
+ is highly unlikely and would be tricky to handle. */
+
+ tem = XEXP (x, 0);
+ if (GET_CODE (tem) == SUBREG)
+ tem = fixup_memory_subreg (tem, insn, 1);
+ tem = fixup_stack_1 (tem, insn);
+
+ /* Unless we want to load from memory, get TEM into the proper mode
+ for an extract from memory. This can only be done if the
+ extract is at a constant position and length. */
+
+ if (! flag_force_mem && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && GET_CODE (XEXP (x, 2)) == CONST_INT
+ && ! mode_dependent_address_p (XEXP (tem, 0))
+ && ! MEM_VOLATILE_P (tem))
+ {
+ enum machine_mode wanted_mode = VOIDmode;
+ enum machine_mode is_mode = GET_MODE (tem);
+ int width = INTVAL (XEXP (x, 1));
+ int pos = INTVAL (XEXP (x, 2));
+
+#ifdef HAVE_extzv
+ if (GET_CODE (x) == ZERO_EXTRACT)
+ wanted_mode = insn_operand_mode[(int) CODE_FOR_extzv][1];
+#endif
+#ifdef HAVE_extv
+ if (GET_CODE (x) == SIGN_EXTRACT)
+ wanted_mode = insn_operand_mode[(int) CODE_FOR_extv][1];
+#endif
+ /* If we have a narrower mode, we can do something. */
+ if (wanted_mode != VOIDmode
+ && GET_MODE_SIZE (wanted_mode) < GET_MODE_SIZE (is_mode))
+ {
+ int offset = pos / BITS_PER_UNIT;
+ rtx old_pos = XEXP (x, 2);
+ rtx newmem;
+
+ /* If the bytes and bits are counted differently, we
+ must adjust the offset. */
+#if BYTES_BIG_ENDIAN != BITS_BIG_ENDIAN
+ offset = (GET_MODE_SIZE (is_mode)
+ - GET_MODE_SIZE (wanted_mode) - offset);
+#endif
+
+ pos %= GET_MODE_BITSIZE (wanted_mode);
+
+ newmem = gen_rtx (MEM, wanted_mode,
+ plus_constant (XEXP (tem, 0), offset));
+ RTX_UNCHANGING_P (newmem) = RTX_UNCHANGING_P (tem);
+ MEM_VOLATILE_P (newmem) = MEM_VOLATILE_P (tem);
+ MEM_IN_STRUCT_P (newmem) = MEM_IN_STRUCT_P (tem);
+
+ /* Make the change and see if the insn remains valid. */
+ INSN_CODE (insn) = -1;
+ XEXP (x, 0) = newmem;
+ XEXP (x, 2) = GEN_INT (pos);
+
+ if (recog_memoized (insn) >= 0)
+ return;
+
+ /* Otherwise, restore old position. XEXP (x, 0) will be
+ restored later. */
+ XEXP (x, 2) = old_pos;
+ }
+ }
+
+ /* If we get here, the bitfield extract insn can't accept a memory
+ reference. Copy the input into a register. */
+
+ tem1 = gen_reg_rtx (GET_MODE (tem));
+ emit_insn_before (gen_move_insn (tem1, tem), insn);
+ XEXP (x, 0) = tem1;
+ return;
+ }
+ break;
+
+ case SUBREG:
+ if (SUBREG_REG (x) == var)
+ {
+ /* If this is a special SUBREG made because VAR was promoted
+ from a wider mode, replace it with VAR and call ourself
+ recursively, this time saying that the object previously
+ had its current mode (by virtue of the SUBREG). */
+
+ if (SUBREG_PROMOTED_VAR_P (x))
+ {
+ *loc = var;
+ fixup_var_refs_1 (var, GET_MODE (var), loc, insn, replacements);
+ return;
+ }
+
+ /* If this SUBREG makes VAR wider, it has become a paradoxical
+ SUBREG with VAR in memory, but these aren't allowed at this
+ stage of the compilation. So load VAR into a pseudo and take
+ a SUBREG of that pseudo. */
+ if (GET_MODE_SIZE (GET_MODE (x)) > GET_MODE_SIZE (GET_MODE (var)))
+ {
+ replacement = find_fixup_replacement (replacements, var);
+ if (replacement->new == 0)
+ replacement->new = gen_reg_rtx (GET_MODE (var));
+ SUBREG_REG (x) = replacement->new;
+ return;
+ }
+
+ /* See if we have already found a replacement for this SUBREG.
+ If so, use it. Otherwise, make a MEM and see if the insn
+ is recognized. If not, or if we should force MEM into a register,
+ make a pseudo for this SUBREG. */
+ replacement = find_fixup_replacement (replacements, x);
+ if (replacement->new)
+ {
+ *loc = replacement->new;
+ return;
+ }
+
+ replacement->new = *loc = fixup_memory_subreg (x, insn, 0);
+
+ INSN_CODE (insn) = -1;
+ if (! flag_force_mem && recog_memoized (insn) >= 0)
+ return;
+
+ *loc = replacement->new = gen_reg_rtx (GET_MODE (x));
+ return;
+ }
+ break;
+
+ case SET:
+ /* First do special simplification of bit-field references. */
+ if (GET_CODE (SET_DEST (x)) == SIGN_EXTRACT
+ || GET_CODE (SET_DEST (x)) == ZERO_EXTRACT)
+ optimize_bit_field (x, insn, 0);
+ if (GET_CODE (SET_SRC (x)) == SIGN_EXTRACT
+ || GET_CODE (SET_SRC (x)) == ZERO_EXTRACT)
+ optimize_bit_field (x, insn, NULL_PTR);
+
+ /* If SET_DEST is now a paradoxical SUBREG, put the result of this
+ insn into a pseudo and store the low part of the pseudo into VAR. */
+ if (GET_CODE (SET_DEST (x)) == SUBREG
+ && SUBREG_REG (SET_DEST (x)) == var
+ && (GET_MODE_SIZE (GET_MODE (SET_DEST (x)))
+ > GET_MODE_SIZE (GET_MODE (var))))
+ {
+ SET_DEST (x) = tem = gen_reg_rtx (GET_MODE (SET_DEST (x)));
+ emit_insn_after (gen_move_insn (var, gen_lowpart (GET_MODE (var),
+ tem)),
+ insn);
+ break;
+ }
+
+ {
+ rtx dest = SET_DEST (x);
+ rtx src = SET_SRC (x);
+ rtx outerdest = dest;
+
+ while (GET_CODE (dest) == SUBREG || GET_CODE (dest) == STRICT_LOW_PART
+ || GET_CODE (dest) == SIGN_EXTRACT
+ || GET_CODE (dest) == ZERO_EXTRACT)
+ dest = XEXP (dest, 0);
+
+ if (GET_CODE (src) == SUBREG)
+ src = XEXP (src, 0);
+
+ /* If VAR does not appear at the top level of the SET
+ just scan the lower levels of the tree. */
+
+ if (src != var && dest != var)
+ break;
+
+ /* We will need to rerecognize this insn. */
+ INSN_CODE (insn) = -1;
+
+#ifdef HAVE_insv
+ if (GET_CODE (outerdest) == ZERO_EXTRACT && dest == var)
+ {
+ /* Since this case will return, ensure we fixup all the
+ operands here. */
+ fixup_var_refs_1 (var, promoted_mode, &XEXP (outerdest, 1),
+ insn, replacements);
+ fixup_var_refs_1 (var, promoted_mode, &XEXP (outerdest, 2),
+ insn, replacements);
+ fixup_var_refs_1 (var, promoted_mode, &SET_SRC (x),
+ insn, replacements);
+
+ tem = XEXP (outerdest, 0);
+
+ /* Clean up (SUBREG:SI (MEM:mode ...) 0)
+ that may appear inside a ZERO_EXTRACT.
+ This was legitimate when the MEM was a REG. */
+ if (GET_CODE (tem) == SUBREG
+ && SUBREG_REG (tem) == var)
+ tem = fixup_memory_subreg (tem, insn, 1);
+ else
+ tem = fixup_stack_1 (tem, insn);
+
+ if (GET_CODE (XEXP (outerdest, 1)) == CONST_INT
+ && GET_CODE (XEXP (outerdest, 2)) == CONST_INT
+ && ! mode_dependent_address_p (XEXP (tem, 0))
+ && ! MEM_VOLATILE_P (tem))
+ {
+ enum machine_mode wanted_mode
+ = insn_operand_mode[(int) CODE_FOR_insv][0];
+ enum machine_mode is_mode = GET_MODE (tem);
+ int width = INTVAL (XEXP (outerdest, 1));
+ int pos = INTVAL (XEXP (outerdest, 2));
+
+ /* If we have a narrower mode, we can do something. */
+ if (GET_MODE_SIZE (wanted_mode) < GET_MODE_SIZE (is_mode))
+ {
+ int offset = pos / BITS_PER_UNIT;
+ rtx old_pos = XEXP (outerdest, 2);
+ rtx newmem;
+
+#if BYTES_BIG_ENDIAN != BITS_BIG_ENDIAN
+ offset = (GET_MODE_SIZE (is_mode)
+ - GET_MODE_SIZE (wanted_mode) - offset);
+#endif
+
+ pos %= GET_MODE_BITSIZE (wanted_mode);
+
+ newmem = gen_rtx (MEM, wanted_mode,
+ plus_constant (XEXP (tem, 0), offset));
+ RTX_UNCHANGING_P (newmem) = RTX_UNCHANGING_P (tem);
+ MEM_VOLATILE_P (newmem) = MEM_VOLATILE_P (tem);
+ MEM_IN_STRUCT_P (newmem) = MEM_IN_STRUCT_P (tem);
+
+ /* Make the change and see if the insn remains valid. */
+ INSN_CODE (insn) = -1;
+ XEXP (outerdest, 0) = newmem;
+ XEXP (outerdest, 2) = GEN_INT (pos);
+
+ if (recog_memoized (insn) >= 0)
+ return;
+
+ /* Otherwise, restore old position. XEXP (x, 0) will be
+ restored later. */
+ XEXP (outerdest, 2) = old_pos;
+ }
+ }
+
+ /* If we get here, the bit-field store doesn't allow memory
+ or isn't located at a constant position. Load the value into
+ a register, do the store, and put it back into memory. */
+
+ tem1 = gen_reg_rtx (GET_MODE (tem));
+ emit_insn_before (gen_move_insn (tem1, tem), insn);
+ emit_insn_after (gen_move_insn (tem, tem1), insn);
+ XEXP (outerdest, 0) = tem1;
+ return;
+ }
+#endif
+
+ /* STRICT_LOW_PART is a no-op on memory references
+ and it can cause combinations to be unrecognizable,
+ so eliminate it. */
+
+ if (dest == var && GET_CODE (SET_DEST (x)) == STRICT_LOW_PART)
+ SET_DEST (x) = XEXP (SET_DEST (x), 0);
+
+ /* A valid insn to copy VAR into or out of a register
+ must be left alone, to avoid an infinite loop here.
+ If the reference to VAR is by a subreg, fix that up,
+ since SUBREG is not valid for a memref.
+ Also fix up the address of the stack slot.
+
+ Note that we must not try to recognize the insn until
+ after we know that we have valid addresses and no
+ (subreg (mem ...) ...) constructs, since these interfere
+ with determining the validity of the insn. */
+
+ if ((SET_SRC (x) == var
+ || (GET_CODE (SET_SRC (x)) == SUBREG
+ && SUBREG_REG (SET_SRC (x)) == var))
+ && (GET_CODE (SET_DEST (x)) == REG
+ || (GET_CODE (SET_DEST (x)) == SUBREG
+ && GET_CODE (SUBREG_REG (SET_DEST (x))) == REG))
+ && x == single_set (PATTERN (insn)))
+ {
+ rtx pat;
+
+ replacement = find_fixup_replacement (replacements, SET_SRC (x));
+ if (replacement->new)
+ SET_SRC (x) = replacement->new;
+ else if (GET_CODE (SET_SRC (x)) == SUBREG)
+ SET_SRC (x) = replacement->new
+ = fixup_memory_subreg (SET_SRC (x), insn, 0);
+ else
+ SET_SRC (x) = replacement->new
+ = fixup_stack_1 (SET_SRC (x), insn);
+
+ if (recog_memoized (insn) >= 0)
+ return;
+
+ /* INSN is not valid, but we know that we want to
+ copy SET_SRC (x) to SET_DEST (x) in some way. So
+ we generate the move and see whether it requires more
+ than one insn. If it does, we emit those insns and
+ delete INSN. Otherwise, we an just replace the pattern
+ of INSN; we have already verified above that INSN has
+ no other function that to do X. */
+
+ pat = gen_move_insn (SET_DEST (x), SET_SRC (x));
+ if (GET_CODE (pat) == SEQUENCE)
+ {
+ emit_insn_after (pat, insn);
+ PUT_CODE (insn, NOTE);
+ NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (insn) = 0;
+ }
+ else
+ PATTERN (insn) = pat;
+
+ return;
+ }
+
+ if ((SET_DEST (x) == var
+ || (GET_CODE (SET_DEST (x)) == SUBREG
+ && SUBREG_REG (SET_DEST (x)) == var))
+ && (GET_CODE (SET_SRC (x)) == REG
+ || (GET_CODE (SET_SRC (x)) == SUBREG
+ && GET_CODE (SUBREG_REG (SET_SRC (x))) == REG))
+ && x == single_set (PATTERN (insn)))
+ {
+ rtx pat;
+
+ if (GET_CODE (SET_DEST (x)) == SUBREG)
+ SET_DEST (x) = fixup_memory_subreg (SET_DEST (x), insn, 0);
+ else
+ SET_DEST (x) = fixup_stack_1 (SET_DEST (x), insn);
+
+ if (recog_memoized (insn) >= 0)
+ return;
+
+ pat = gen_move_insn (SET_DEST (x), SET_SRC (x));
+ if (GET_CODE (pat) == SEQUENCE)
+ {
+ emit_insn_after (pat, insn);
+ PUT_CODE (insn, NOTE);
+ NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (insn) = 0;
+ }
+ else
+ PATTERN (insn) = pat;
+
+ return;
+ }
+
+ /* Otherwise, storing into VAR must be handled specially
+ by storing into a temporary and copying that into VAR
+ with a new insn after this one. Note that this case
+ will be used when storing into a promoted scalar since
+ the insn will now have different modes on the input
+ and output and hence will be invalid (except for the case
+ of setting it to a constant, which does not need any
+ change if it is valid). We generate extra code in that case,
+ but combine.c will eliminate it. */
+
+ if (dest == var)
+ {
+ rtx temp;
+ rtx fixeddest = SET_DEST (x);
+
+ /* STRICT_LOW_PART can be discarded, around a MEM. */
+ if (GET_CODE (fixeddest) == STRICT_LOW_PART)
+ fixeddest = XEXP (fixeddest, 0);
+ /* Convert (SUBREG (MEM)) to a MEM in a changed mode. */
+ if (GET_CODE (fixeddest) == SUBREG)
+ fixeddest = fixup_memory_subreg (fixeddest, insn, 0);
+ else
+ fixeddest = fixup_stack_1 (fixeddest, insn);
+
+ temp = gen_reg_rtx (GET_MODE (SET_SRC (x)) == VOIDmode
+ ? GET_MODE (fixeddest)
+ : GET_MODE (SET_SRC (x)));
+
+ emit_insn_after (gen_move_insn (fixeddest,
+ gen_lowpart (GET_MODE (fixeddest),
+ temp)),
+ insn);
+
+ SET_DEST (x) = temp;
+ }
+ }
+ }
+
+ /* Nothing special about this RTX; fix its operands. */
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ fixup_var_refs_1 (var, promoted_mode, &XEXP (x, i), insn, replacements);
+ if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ fixup_var_refs_1 (var, promoted_mode, &XVECEXP (x, i, j),
+ insn, replacements);
+ }
+ }
+}
+
+/* Given X, an rtx of the form (SUBREG:m1 (MEM:m2 addr)),
+ return an rtx (MEM:m1 newaddr) which is equivalent.
+ If any insns must be emitted to compute NEWADDR, put them before INSN.
+
+ UNCRITICAL nonzero means accept paradoxical subregs.
+ This is used for subregs found inside of ZERO_EXTRACTs and in REG_NOTES. */
+
+static rtx
+fixup_memory_subreg (x, insn, uncritical)
+ rtx x;
+ rtx insn;
+ int uncritical;
+{
+ int offset = SUBREG_WORD (x) * UNITS_PER_WORD;
+ rtx addr = XEXP (SUBREG_REG (x), 0);
+ enum machine_mode mode = GET_MODE (x);
+ rtx saved, result;
+
+ /* Paradoxical SUBREGs are usually invalid during RTL generation. */
+ if (GET_MODE_SIZE (mode) > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))
+ && ! uncritical)
+ abort ();
+
+#if BYTES_BIG_ENDIAN
+ offset += (MIN (UNITS_PER_WORD, GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
+ - MIN (UNITS_PER_WORD, GET_MODE_SIZE (mode)));
+#endif
+ addr = plus_constant (addr, offset);
+ if (!flag_force_addr && memory_address_p (mode, addr))
+ /* Shortcut if no insns need be emitted. */
+ return change_address (SUBREG_REG (x), mode, addr);
+ start_sequence ();
+ result = change_address (SUBREG_REG (x), mode, addr);
+ emit_insn_before (gen_sequence (), insn);
+ end_sequence ();
+ return result;
+}
+
+/* Do fixup_memory_subreg on all (SUBREG (MEM ...) ...) contained in X.
+ Replace subexpressions of X in place.
+ If X itself is a (SUBREG (MEM ...) ...), return the replacement expression.
+ Otherwise return X, with its contents possibly altered.
+
+ If any insns must be emitted to compute NEWADDR, put them before INSN.
+
+ UNCRITICAL is as in fixup_memory_subreg. */
+
+static rtx
+walk_fixup_memory_subreg (x, insn, uncritical)
+ register rtx x;
+ rtx insn;
+ int uncritical;
+{
+ register enum rtx_code code;
+ register char *fmt;
+ register int i;
+
+ if (x == 0)
+ return 0;
+
+ code = GET_CODE (x);
+
+ if (code == SUBREG && GET_CODE (SUBREG_REG (x)) == MEM)
+ return fixup_memory_subreg (x, insn, uncritical);
+
+ /* Nothing special about this RTX; fix its operands. */
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ XEXP (x, i) = walk_fixup_memory_subreg (XEXP (x, i), insn, uncritical);
+ if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ XVECEXP (x, i, j)
+ = walk_fixup_memory_subreg (XVECEXP (x, i, j), insn, uncritical);
+ }
+ }
+ return x;
+}
+
+/* For each memory ref within X, if it refers to a stack slot
+ with an out of range displacement, put the address in a temp register
+ (emitting new insns before INSN to load these registers)
+ and alter the memory ref to use that register.
+ Replace each such MEM rtx with a copy, to avoid clobberage. */
+
+static rtx
+fixup_stack_1 (x, insn)
+ rtx x;
+ rtx insn;
+{
+ register int i;
+ register RTX_CODE code = GET_CODE (x);
+ register char *fmt;
+
+ if (code == MEM)
+ {
+ register rtx ad = XEXP (x, 0);
+ /* If we have address of a stack slot but it's not valid
+ (displacement is too large), compute the sum in a register. */
+ if (GET_CODE (ad) == PLUS
+ && GET_CODE (XEXP (ad, 0)) == REG
+ && ((REGNO (XEXP (ad, 0)) >= FIRST_VIRTUAL_REGISTER
+ && REGNO (XEXP (ad, 0)) <= LAST_VIRTUAL_REGISTER)
+ || XEXP (ad, 0) == current_function_internal_arg_pointer)
+ && GET_CODE (XEXP (ad, 1)) == CONST_INT)
+ {
+ rtx temp, seq;
+ if (memory_address_p (GET_MODE (x), ad))
+ return x;
+
+ start_sequence ();
+ temp = copy_to_reg (ad);
+ seq = gen_sequence ();
+ end_sequence ();
+ emit_insn_before (seq, insn);
+ return change_address (x, VOIDmode, temp);
+ }
+ return x;
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ XEXP (x, i) = fixup_stack_1 (XEXP (x, i), insn);
+ if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ XVECEXP (x, i, j) = fixup_stack_1 (XVECEXP (x, i, j), insn);
+ }
+ }
+ return x;
+}
+
+/* Optimization: a bit-field instruction whose field
+ happens to be a byte or halfword in memory
+ can be changed to a move instruction.
+
+ We call here when INSN is an insn to examine or store into a bit-field.
+ BODY is the SET-rtx to be altered.
+
+ EQUIV_MEM is the table `reg_equiv_mem' if that is available; else 0.
+ (Currently this is called only from function.c, and EQUIV_MEM
+ is always 0.) */
+
+static void
+optimize_bit_field (body, insn, equiv_mem)
+ rtx body;
+ rtx insn;
+ rtx *equiv_mem;
+{
+ register rtx bitfield;
+ int destflag;
+ rtx seq = 0;
+ enum machine_mode mode;
+
+ if (GET_CODE (SET_DEST (body)) == SIGN_EXTRACT
+ || GET_CODE (SET_DEST (body)) == ZERO_EXTRACT)
+ bitfield = SET_DEST (body), destflag = 1;
+ else
+ bitfield = SET_SRC (body), destflag = 0;
+
+ /* First check that the field being stored has constant size and position
+ and is in fact a byte or halfword suitably aligned. */
+
+ if (GET_CODE (XEXP (bitfield, 1)) == CONST_INT
+ && GET_CODE (XEXP (bitfield, 2)) == CONST_INT
+ && ((mode = mode_for_size (INTVAL (XEXP (bitfield, 1)), MODE_INT, 1))
+ != BLKmode)
+ && INTVAL (XEXP (bitfield, 2)) % INTVAL (XEXP (bitfield, 1)) == 0)
+ {
+ register rtx memref = 0;
+
+ /* Now check that the containing word is memory, not a register,
+ and that it is safe to change the machine mode. */
+
+ if (GET_CODE (XEXP (bitfield, 0)) == MEM)
+ memref = XEXP (bitfield, 0);
+ else if (GET_CODE (XEXP (bitfield, 0)) == REG
+ && equiv_mem != 0)
+ memref = equiv_mem[REGNO (XEXP (bitfield, 0))];
+ else if (GET_CODE (XEXP (bitfield, 0)) == SUBREG
+ && GET_CODE (SUBREG_REG (XEXP (bitfield, 0))) == MEM)
+ memref = SUBREG_REG (XEXP (bitfield, 0));
+ else if (GET_CODE (XEXP (bitfield, 0)) == SUBREG
+ && equiv_mem != 0
+ && GET_CODE (SUBREG_REG (XEXP (bitfield, 0))) == REG)
+ memref = equiv_mem[REGNO (SUBREG_REG (XEXP (bitfield, 0)))];
+
+ if (memref
+ && ! mode_dependent_address_p (XEXP (memref, 0))
+ && ! MEM_VOLATILE_P (memref))
+ {
+ /* Now adjust the address, first for any subreg'ing
+ that we are now getting rid of,
+ and then for which byte of the word is wanted. */
+
+ register int offset = INTVAL (XEXP (bitfield, 2));
+ /* Adjust OFFSET to count bits from low-address byte. */
+#if BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN
+ offset = (GET_MODE_BITSIZE (GET_MODE (XEXP (bitfield, 0)))
+ - offset - INTVAL (XEXP (bitfield, 1)));
+#endif
+ /* Adjust OFFSET to count bytes from low-address byte. */
+ offset /= BITS_PER_UNIT;
+ if (GET_CODE (XEXP (bitfield, 0)) == SUBREG)
+ {
+ offset += SUBREG_WORD (XEXP (bitfield, 0)) * UNITS_PER_WORD;
+#if BYTES_BIG_ENDIAN
+ offset -= (MIN (UNITS_PER_WORD,
+ GET_MODE_SIZE (GET_MODE (XEXP (bitfield, 0))))
+ - MIN (UNITS_PER_WORD,
+ GET_MODE_SIZE (GET_MODE (memref))));
+#endif
+ }
+
+ memref = change_address (memref, mode,
+ plus_constant (XEXP (memref, 0), offset));
+
+ /* Store this memory reference where
+ we found the bit field reference. */
+
+ if (destflag)
+ {
+ validate_change (insn, &SET_DEST (body), memref, 1);
+ if (! CONSTANT_ADDRESS_P (SET_SRC (body)))
+ {
+ rtx src = SET_SRC (body);
+ while (GET_CODE (src) == SUBREG
+ && SUBREG_WORD (src) == 0)
+ src = SUBREG_REG (src);
+ if (GET_MODE (src) != GET_MODE (memref))
+ src = gen_lowpart (GET_MODE (memref), SET_SRC (body));
+ validate_change (insn, &SET_SRC (body), src, 1);
+ }
+ else if (GET_MODE (SET_SRC (body)) != VOIDmode
+ && GET_MODE (SET_SRC (body)) != GET_MODE (memref))
+ /* This shouldn't happen because anything that didn't have
+ one of these modes should have got converted explicitly
+ and then referenced through a subreg.
+ This is so because the original bit-field was
+ handled by agg_mode and so its tree structure had
+ the same mode that memref now has. */
+ abort ();
+ }
+ else
+ {
+ rtx dest = SET_DEST (body);
+
+ while (GET_CODE (dest) == SUBREG
+ && SUBREG_WORD (dest) == 0)
+ dest = SUBREG_REG (dest);
+
+ validate_change (insn, &SET_DEST (body), dest, 1);
+
+ if (GET_MODE (dest) == GET_MODE (memref))
+ validate_change (insn, &SET_SRC (body), memref, 1);
+ else
+ {
+ /* Convert the mem ref to the destination mode. */
+ rtx newreg = gen_reg_rtx (GET_MODE (dest));
+
+ start_sequence ();
+ convert_move (newreg, memref,
+ GET_CODE (SET_SRC (body)) == ZERO_EXTRACT);
+ seq = get_insns ();
+ end_sequence ();
+
+ validate_change (insn, &SET_SRC (body), newreg, 1);
+ }
+ }
+
+ /* See if we can convert this extraction or insertion into
+ a simple move insn. We might not be able to do so if this
+ was, for example, part of a PARALLEL.
+
+ If we succeed, write out any needed conversions. If we fail,
+ it is hard to guess why we failed, so don't do anything
+ special; just let the optimization be suppressed. */
+
+ if (apply_change_group () && seq)
+ emit_insns_before (seq, insn);
+ }
+ }
+}
+
+/* These routines are responsible for converting virtual register references
+ to the actual hard register references once RTL generation is complete.
+
+ The following four variables are used for communication between the
+ routines. They contain the offsets of the virtual registers from their
+ respective hard registers. */
+
+static int in_arg_offset;
+static int var_offset;
+static int dynamic_offset;
+static int out_arg_offset;
+
+/* In most machines, the stack pointer register is equivalent to the bottom
+ of the stack. */
+
+#ifndef STACK_POINTER_OFFSET
+#define STACK_POINTER_OFFSET 0
+#endif
+
+/* If not defined, pick an appropriate default for the offset of dynamically
+ allocated memory depending on the value of ACCUMULATE_OUTGOING_ARGS,
+ REG_PARM_STACK_SPACE, and OUTGOING_REG_PARM_STACK_SPACE. */
+
+#ifndef STACK_DYNAMIC_OFFSET
+
+#ifdef ACCUMULATE_OUTGOING_ARGS
+/* The bottom of the stack points to the actual arguments. If
+ REG_PARM_STACK_SPACE is defined, this includes the space for the register
+ parameters. However, if OUTGOING_REG_PARM_STACK space is not defined,
+ stack space for register parameters is not pushed by the caller, but
+ rather part of the fixed stack areas and hence not included in
+ `current_function_outgoing_args_size'. Nevertheless, we must allow
+ for it when allocating stack dynamic objects. */
+
+#if defined(REG_PARM_STACK_SPACE) && ! defined(OUTGOING_REG_PARM_STACK_SPACE)
+#define STACK_DYNAMIC_OFFSET(FNDECL) \
+(current_function_outgoing_args_size \
+ + REG_PARM_STACK_SPACE (FNDECL) + (STACK_POINTER_OFFSET))
+
+#else
+#define STACK_DYNAMIC_OFFSET(FNDECL) \
+(current_function_outgoing_args_size + (STACK_POINTER_OFFSET))
+#endif
+
+#else
+#define STACK_DYNAMIC_OFFSET(FNDECL) STACK_POINTER_OFFSET
+#endif
+#endif
+
+/* Pass through the INSNS of function FNDECL and convert virtual register
+ references to hard register references. */
+
+void
+instantiate_virtual_regs (fndecl, insns)
+ tree fndecl;
+ rtx insns;
+{
+ rtx insn;
+
+ /* Compute the offsets to use for this function. */
+ in_arg_offset = FIRST_PARM_OFFSET (fndecl);
+ var_offset = STARTING_FRAME_OFFSET;
+ dynamic_offset = STACK_DYNAMIC_OFFSET (fndecl);
+ out_arg_offset = STACK_POINTER_OFFSET;
+
+ /* Scan all variables and parameters of this function. For each that is
+ in memory, instantiate all virtual registers if the result is a valid
+ address. If not, we do it later. That will handle most uses of virtual
+ regs on many machines. */
+ instantiate_decls (fndecl, 1);
+
+ /* Initialize recognition, indicating that volatile is OK. */
+ init_recog ();
+
+ /* Scan through all the insns, instantiating every virtual register still
+ present. */
+ for (insn = insns; insn; insn = NEXT_INSN (insn))
+ if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN
+ || GET_CODE (insn) == CALL_INSN)
+ {
+ instantiate_virtual_regs_1 (&PATTERN (insn), insn, 1);
+ instantiate_virtual_regs_1 (&REG_NOTES (insn), NULL_RTX, 0);
+ }
+
+ /* Now instantiate the remaining register equivalences for debugging info.
+ These will not be valid addresses. */
+ instantiate_decls (fndecl, 0);
+
+ /* Indicate that, from now on, assign_stack_local should use
+ frame_pointer_rtx. */
+ virtuals_instantiated = 1;
+}
+
+/* Scan all decls in FNDECL (both variables and parameters) and instantiate
+ all virtual registers in their DECL_RTL's.
+
+ If VALID_ONLY, do this only if the resulting address is still valid.
+ Otherwise, always do it. */
+
+static void
+instantiate_decls (fndecl, valid_only)
+ tree fndecl;
+ int valid_only;
+{
+ tree decl;
+
+ if (DECL_INLINE (fndecl))
+ /* When compiling an inline function, the obstack used for
+ rtl allocation is the maybepermanent_obstack. Calling
+ `resume_temporary_allocation' switches us back to that
+ obstack while we process this function's parameters. */
+ resume_temporary_allocation ();
+
+ /* Process all parameters of the function. */
+ for (decl = DECL_ARGUMENTS (fndecl); decl; decl = TREE_CHAIN (decl))
+ {
+ instantiate_decl (DECL_RTL (decl), int_size_in_bytes (TREE_TYPE (decl)),
+ valid_only);
+ instantiate_decl (DECL_INCOMING_RTL (decl),
+ int_size_in_bytes (TREE_TYPE (decl)), valid_only);
+ }
+
+ /* Now process all variables defined in the function or its subblocks. */
+ instantiate_decls_1 (DECL_INITIAL (fndecl), valid_only);
+
+ if (DECL_INLINE (fndecl))
+ {
+ /* Save all rtl allocated for this function by raising the
+ high-water mark on the maybepermanent_obstack. */
+ preserve_data ();
+ /* All further rtl allocation is now done in the current_obstack. */
+ rtl_in_current_obstack ();
+ }
+}
+
+/* Subroutine of instantiate_decls: Process all decls in the given
+ BLOCK node and all its subblocks. */
+
+static void
+instantiate_decls_1 (let, valid_only)
+ tree let;
+ int valid_only;
+{
+ tree t;
+
+ for (t = BLOCK_VARS (let); t; t = TREE_CHAIN (t))
+ instantiate_decl (DECL_RTL (t), int_size_in_bytes (TREE_TYPE (t)),
+ valid_only);
+
+ /* Process all subblocks. */
+ for (t = BLOCK_SUBBLOCKS (let); t; t = TREE_CHAIN (t))
+ instantiate_decls_1 (t, valid_only);
+}
+
+/* Subroutine of the preceding procedures: Given RTL representing a
+ decl and the size of the object, do any instantiation required.
+
+ If VALID_ONLY is non-zero, it means that the RTL should only be
+ changed if the new address is valid. */
+
+static void
+instantiate_decl (x, size, valid_only)
+ rtx x;
+ int size;
+ int valid_only;
+{
+ enum machine_mode mode;
+ rtx addr;
+
+ /* If this is not a MEM, no need to do anything. Similarly if the
+ address is a constant or a register that is not a virtual register. */
+
+ if (x == 0 || GET_CODE (x) != MEM)
+ return;
+
+ addr = XEXP (x, 0);
+ if (CONSTANT_P (addr)
+ || (GET_CODE (addr) == REG
+ && (REGNO (addr) < FIRST_VIRTUAL_REGISTER
+ || REGNO (addr) > LAST_VIRTUAL_REGISTER)))
+ return;
+
+ /* If we should only do this if the address is valid, copy the address.
+ We need to do this so we can undo any changes that might make the
+ address invalid. This copy is unfortunate, but probably can't be
+ avoided. */
+
+ if (valid_only)
+ addr = copy_rtx (addr);
+
+ instantiate_virtual_regs_1 (&addr, NULL_RTX, 0);
+
+ if (! valid_only)
+ return;
+
+ /* Now verify that the resulting address is valid for every integer or
+ floating-point mode up to and including SIZE bytes long. We do this
+ since the object might be accessed in any mode and frame addresses
+ are shared. */
+
+ for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
+ mode != VOIDmode && GET_MODE_SIZE (mode) <= size;
+ mode = GET_MODE_WIDER_MODE (mode))
+ if (! memory_address_p (mode, addr))
+ return;
+
+ for (mode = GET_CLASS_NARROWEST_MODE (MODE_FLOAT);
+ mode != VOIDmode && GET_MODE_SIZE (mode) <= size;
+ mode = GET_MODE_WIDER_MODE (mode))
+ if (! memory_address_p (mode, addr))
+ return;
+
+ /* Otherwise, put back the address, now that we have updated it and we
+ know it is valid. */
+
+ XEXP (x, 0) = addr;
+}
+
+/* Given a pointer to a piece of rtx and an optional pointer to the
+ containing object, instantiate any virtual registers present in it.
+
+ If EXTRA_INSNS, we always do the replacement and generate
+ any extra insns before OBJECT. If it zero, we do nothing if replacement
+ is not valid.
+
+ Return 1 if we either had nothing to do or if we were able to do the
+ needed replacement. Return 0 otherwise; we only return zero if
+ EXTRA_INSNS is zero.
+
+ We first try some simple transformations to avoid the creation of extra
+ pseudos. */
+
+static int
+instantiate_virtual_regs_1 (loc, object, extra_insns)
+ rtx *loc;
+ rtx object;
+ int extra_insns;
+{
+ rtx x;
+ RTX_CODE code;
+ rtx new = 0;
+ int offset;
+ rtx temp;
+ rtx seq;
+ int i, j;
+ char *fmt;
+
+ /* Re-start here to avoid recursion in common cases. */
+ restart:
+
+ x = *loc;
+ if (x == 0)
+ return 1;
+
+ code = GET_CODE (x);
+
+ /* Check for some special cases. */
+ switch (code)
+ {
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case CONST:
+ case SYMBOL_REF:
+ case CODE_LABEL:
+ case PC:
+ case CC0:
+ case ASM_INPUT:
+ case ADDR_VEC:
+ case ADDR_DIFF_VEC:
+ case RETURN:
+ return 1;
+
+ case SET:
+ /* We are allowed to set the virtual registers. This means that
+ that the actual register should receive the source minus the
+ appropriate offset. This is used, for example, in the handling
+ of non-local gotos. */
+ if (SET_DEST (x) == virtual_incoming_args_rtx)
+ new = arg_pointer_rtx, offset = - in_arg_offset;
+ else if (SET_DEST (x) == virtual_stack_vars_rtx)
+ new = frame_pointer_rtx, offset = - var_offset;
+ else if (SET_DEST (x) == virtual_stack_dynamic_rtx)
+ new = stack_pointer_rtx, offset = - dynamic_offset;
+ else if (SET_DEST (x) == virtual_outgoing_args_rtx)
+ new = stack_pointer_rtx, offset = - out_arg_offset;
+
+ if (new)
+ {
+ /* The only valid sources here are PLUS or REG. Just do
+ the simplest possible thing to handle them. */
+ if (GET_CODE (SET_SRC (x)) != REG
+ && GET_CODE (SET_SRC (x)) != PLUS)
+ abort ();
+
+ start_sequence ();
+ if (GET_CODE (SET_SRC (x)) != REG)
+ temp = force_operand (SET_SRC (x), NULL_RTX);
+ else
+ temp = SET_SRC (x);
+ temp = force_operand (plus_constant (temp, offset), NULL_RTX);
+ seq = get_insns ();
+ end_sequence ();
+
+ emit_insns_before (seq, object);
+ SET_DEST (x) = new;
+
+ if (!validate_change (object, &SET_SRC (x), temp, 0)
+ || ! extra_insns)
+ abort ();
+
+ return 1;
+ }
+
+ instantiate_virtual_regs_1 (&SET_DEST (x), object, extra_insns);
+ loc = &SET_SRC (x);
+ goto restart;
+
+ case PLUS:
+ /* Handle special case of virtual register plus constant. */
+ if (CONSTANT_P (XEXP (x, 1)))
+ {
+ rtx old;
+
+ /* Check for (plus (plus VIRT foo) (const_int)) first. */
+ if (GET_CODE (XEXP (x, 0)) == PLUS)
+ {
+ rtx inner = XEXP (XEXP (x, 0), 0);
+
+ if (inner == virtual_incoming_args_rtx)
+ new = arg_pointer_rtx, offset = in_arg_offset;
+ else if (inner == virtual_stack_vars_rtx)
+ new = frame_pointer_rtx, offset = var_offset;
+ else if (inner == virtual_stack_dynamic_rtx)
+ new = stack_pointer_rtx, offset = dynamic_offset;
+ else if (inner == virtual_outgoing_args_rtx)
+ new = stack_pointer_rtx, offset = out_arg_offset;
+ else
+ {
+ loc = &XEXP (x, 0);
+ goto restart;
+ }
+
+ instantiate_virtual_regs_1 (&XEXP (XEXP (x, 0), 1), object,
+ extra_insns);
+ new = gen_rtx (PLUS, Pmode, new, XEXP (XEXP (x, 0), 1));
+ }
+
+ else if (XEXP (x, 0) == virtual_incoming_args_rtx)
+ new = arg_pointer_rtx, offset = in_arg_offset;
+ else if (XEXP (x, 0) == virtual_stack_vars_rtx)
+ new = frame_pointer_rtx, offset = var_offset;
+ else if (XEXP (x, 0) == virtual_stack_dynamic_rtx)
+ new = stack_pointer_rtx, offset = dynamic_offset;
+ else if (XEXP (x, 0) == virtual_outgoing_args_rtx)
+ new = stack_pointer_rtx, offset = out_arg_offset;
+ else
+ {
+ /* We know the second operand is a constant. Unless the
+ first operand is a REG (which has been already checked),
+ it needs to be checked. */
+ if (GET_CODE (XEXP (x, 0)) != REG)
+ {
+ loc = &XEXP (x, 0);
+ goto restart;
+ }
+ return 1;
+ }
+
+ old = XEXP (x, 0);
+ XEXP (x, 0) = new;
+ new = plus_constant (XEXP (x, 1), offset);
+
+ /* If the new constant is zero, try to replace the sum with its
+ first operand. */
+ if (new == const0_rtx
+ && validate_change (object, loc, XEXP (x, 0), 0))
+ return 1;
+
+ /* Next try to replace constant with new one. */
+ if (!validate_change (object, &XEXP (x, 1), new, 0))
+ {
+ if (! extra_insns)
+ {
+ XEXP (x, 0) = old;
+ return 0;
+ }
+
+ /* Otherwise copy the new constant into a register and replace
+ constant with that register. */
+ temp = gen_reg_rtx (Pmode);
+ if (validate_change (object, &XEXP (x, 1), temp, 0))
+ emit_insn_before (gen_move_insn (temp, new), object);
+ else
+ {
+ /* If that didn't work, replace this expression with a
+ register containing the sum. */
+
+ new = gen_rtx (PLUS, Pmode, XEXP (x, 0), new);
+ XEXP (x, 0) = old;
+
+ start_sequence ();
+ temp = force_operand (new, NULL_RTX);
+ seq = get_insns ();
+ end_sequence ();
+
+ emit_insns_before (seq, object);
+ if (! validate_change (object, loc, temp, 0)
+ && ! validate_replace_rtx (x, temp, object))
+ abort ();
+ }
+ }
+
+ return 1;
+ }
+
+ /* Fall through to generic two-operand expression case. */
+ case EXPR_LIST:
+ case CALL:
+ case COMPARE:
+ case MINUS:
+ case MULT:
+ case DIV: case UDIV:
+ case MOD: case UMOD:
+ case AND: case IOR: case XOR:
+ case ROTATERT: case ROTATE:
+ case ASHIFTRT: case LSHIFTRT: case ASHIFT:
+ case NE: case EQ:
+ case GE: case GT: case GEU: case GTU:
+ case LE: case LT: case LEU: case LTU:
+ if (XEXP (x, 1) && ! CONSTANT_P (XEXP (x, 1)))
+ instantiate_virtual_regs_1 (&XEXP (x, 1), object, extra_insns);
+ loc = &XEXP (x, 0);
+ goto restart;
+
+ case MEM:
+ /* Most cases of MEM that convert to valid addresses have already been
+ handled by our scan of regno_reg_rtx. The only special handling we
+ need here is to make a copy of the rtx to ensure it isn't being
+ shared if we have to change it to a pseudo.
+
+ If the rtx is a simple reference to an address via a virtual register,
+ it can potentially be shared. In such cases, first try to make it
+ a valid address, which can also be shared. Otherwise, copy it and
+ proceed normally.
+
+ First check for common cases that need no processing. These are
+ usually due to instantiation already being done on a previous instance
+ of a shared rtx. */
+
+ temp = XEXP (x, 0);
+ if (CONSTANT_ADDRESS_P (temp)
+#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
+ || temp == arg_pointer_rtx
+#endif
+#if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
+ || temp == hard_frame_pointer_rtx
+#endif
+ || temp == frame_pointer_rtx)
+ return 1;
+
+ if (GET_CODE (temp) == PLUS
+ && CONSTANT_ADDRESS_P (XEXP (temp, 1))
+ && (XEXP (temp, 0) == frame_pointer_rtx
+#if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
+ || XEXP (temp, 0) == hard_frame_pointer_rtx
+#endif
+#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
+ || XEXP (temp, 0) == arg_pointer_rtx
+#endif
+ ))
+ return 1;
+
+ if (temp == virtual_stack_vars_rtx
+ || temp == virtual_incoming_args_rtx
+ || (GET_CODE (temp) == PLUS
+ && CONSTANT_ADDRESS_P (XEXP (temp, 1))
+ && (XEXP (temp, 0) == virtual_stack_vars_rtx
+ || XEXP (temp, 0) == virtual_incoming_args_rtx)))
+ {
+ /* This MEM may be shared. If the substitution can be done without
+ the need to generate new pseudos, we want to do it in place
+ so all copies of the shared rtx benefit. The call below will
+ only make substitutions if the resulting address is still
+ valid.
+
+ Note that we cannot pass X as the object in the recursive call
+ since the insn being processed may not allow all valid
+ addresses. However, if we were not passed on object, we can
+ only modify X without copying it if X will have a valid
+ address.
+
+ ??? Also note that this can still lose if OBJECT is an insn that
+ has less restrictions on an address that some other insn.
+ In that case, we will modify the shared address. This case
+ doesn't seem very likely, though. */
+
+ if (instantiate_virtual_regs_1 (&XEXP (x, 0),
+ object ? object : x, 0))
+ return 1;
+
+ /* Otherwise make a copy and process that copy. We copy the entire
+ RTL expression since it might be a PLUS which could also be
+ shared. */
+ *loc = x = copy_rtx (x);
+ }
+
+ /* Fall through to generic unary operation case. */
+ case USE:
+ case CLOBBER:
+ case SUBREG:
+ case STRICT_LOW_PART:
+ case NEG: case NOT:
+ case PRE_DEC: case PRE_INC: case POST_DEC: case POST_INC:
+ case SIGN_EXTEND: case ZERO_EXTEND:
+ case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE:
+ case FLOAT: case FIX:
+ case UNSIGNED_FIX: case UNSIGNED_FLOAT:
+ case ABS:
+ case SQRT:
+ case FFS:
+ /* These case either have just one operand or we know that we need not
+ check the rest of the operands. */
+ loc = &XEXP (x, 0);
+ goto restart;
+
+ case REG:
+ /* Try to replace with a PLUS. If that doesn't work, compute the sum
+ in front of this insn and substitute the temporary. */
+ if (x == virtual_incoming_args_rtx)
+ new = arg_pointer_rtx, offset = in_arg_offset;
+ else if (x == virtual_stack_vars_rtx)
+ new = frame_pointer_rtx, offset = var_offset;
+ else if (x == virtual_stack_dynamic_rtx)
+ new = stack_pointer_rtx, offset = dynamic_offset;
+ else if (x == virtual_outgoing_args_rtx)
+ new = stack_pointer_rtx, offset = out_arg_offset;
+
+ if (new)
+ {
+ temp = plus_constant (new, offset);
+ if (!validate_change (object, loc, temp, 0))
+ {
+ if (! extra_insns)
+ return 0;
+
+ start_sequence ();
+ temp = force_operand (temp, NULL_RTX);
+ seq = get_insns ();
+ end_sequence ();
+
+ emit_insns_before (seq, object);
+ if (! validate_change (object, loc, temp, 0)
+ && ! validate_replace_rtx (x, temp, object))
+ abort ();
+ }
+ }
+
+ return 1;
+ }
+
+ /* Scan all subexpressions. */
+ fmt = GET_RTX_FORMAT (code);
+ for (i = 0; i < GET_RTX_LENGTH (code); i++, fmt++)
+ if (*fmt == 'e')
+ {
+ if (!instantiate_virtual_regs_1 (&XEXP (x, i), object, extra_insns))
+ return 0;
+ }
+ else if (*fmt == 'E')
+ for (j = 0; j < XVECLEN (x, i); j++)
+ if (! instantiate_virtual_regs_1 (&XVECEXP (x, i, j), object,
+ extra_insns))
+ return 0;
+
+ return 1;
+}
+
+/* Optimization: assuming this function does not receive nonlocal gotos,
+ delete the handlers for such, as well as the insns to establish
+ and disestablish them. */
+
+static void
+delete_handlers ()
+{
+ rtx insn;
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ {
+ /* Delete the handler by turning off the flag that would
+ prevent jump_optimize from deleting it.
+ Also permit deletion of the nonlocal labels themselves
+ if nothing local refers to them. */
+ if (GET_CODE (insn) == CODE_LABEL)
+ LABEL_PRESERVE_P (insn) = 0;
+ if (GET_CODE (insn) == INSN
+ && ((nonlocal_goto_handler_slot != 0
+ && reg_mentioned_p (nonlocal_goto_handler_slot, PATTERN (insn)))
+ || (nonlocal_goto_stack_level != 0
+ && reg_mentioned_p (nonlocal_goto_stack_level,
+ PATTERN (insn)))))
+ delete_insn (insn);
+ }
+}
+
+/* Return a list (chain of EXPR_LIST nodes) for the nonlocal labels
+ of the current function. */
+
+rtx
+nonlocal_label_rtx_list ()
+{
+ tree t;
+ rtx x = 0;
+
+ for (t = nonlocal_labels; t; t = TREE_CHAIN (t))
+ x = gen_rtx (EXPR_LIST, VOIDmode, label_rtx (TREE_VALUE (t)), x);
+
+ return x;
+}
+
+/* Output a USE for any register use in RTL.
+ This is used with -noreg to mark the extent of lifespan
+ of any registers used in a user-visible variable's DECL_RTL. */
+
+void
+use_variable (rtl)
+ rtx rtl;
+{
+ if (GET_CODE (rtl) == REG)
+ /* This is a register variable. */
+ emit_insn (gen_rtx (USE, VOIDmode, rtl));
+ else if (GET_CODE (rtl) == MEM
+ && GET_CODE (XEXP (rtl, 0)) == REG
+ && (REGNO (XEXP (rtl, 0)) < FIRST_VIRTUAL_REGISTER
+ || REGNO (XEXP (rtl, 0)) > LAST_VIRTUAL_REGISTER)
+ && XEXP (rtl, 0) != current_function_internal_arg_pointer)
+ /* This is a variable-sized structure. */
+ emit_insn (gen_rtx (USE, VOIDmode, XEXP (rtl, 0)));
+}
+
+/* Like use_variable except that it outputs the USEs after INSN
+ instead of at the end of the insn-chain. */
+
+void
+use_variable_after (rtl, insn)
+ rtx rtl, insn;
+{
+ if (GET_CODE (rtl) == REG)
+ /* This is a register variable. */
+ emit_insn_after (gen_rtx (USE, VOIDmode, rtl), insn);
+ else if (GET_CODE (rtl) == MEM
+ && GET_CODE (XEXP (rtl, 0)) == REG
+ && (REGNO (XEXP (rtl, 0)) < FIRST_VIRTUAL_REGISTER
+ || REGNO (XEXP (rtl, 0)) > LAST_VIRTUAL_REGISTER)
+ && XEXP (rtl, 0) != current_function_internal_arg_pointer)
+ /* This is a variable-sized structure. */
+ emit_insn_after (gen_rtx (USE, VOIDmode, XEXP (rtl, 0)), insn);
+}
+
+int
+max_parm_reg_num ()
+{
+ return max_parm_reg;
+}
+
+/* Return the first insn following those generated by `assign_parms'. */
+
+rtx
+get_first_nonparm_insn ()
+{
+ if (last_parm_insn)
+ return NEXT_INSN (last_parm_insn);
+ return get_insns ();
+}
+
+/* Return the first NOTE_INSN_BLOCK_BEG note in the function.
+ Crash if there is none. */
+
+rtx
+get_first_block_beg ()
+{
+ register rtx searcher;
+ register rtx insn = get_first_nonparm_insn ();
+
+ for (searcher = insn; searcher; searcher = NEXT_INSN (searcher))
+ if (GET_CODE (searcher) == NOTE
+ && NOTE_LINE_NUMBER (searcher) == NOTE_INSN_BLOCK_BEG)
+ return searcher;
+
+ abort (); /* Invalid call to this function. (See comments above.) */
+ return NULL_RTX;
+}
+
+/* Return 1 if EXP is an aggregate type (or a value with aggregate type).
+ This means a type for which function calls must pass an address to the
+ function or get an address back from the function.
+ EXP may be a type node or an expression (whose type is tested). */
+
+int
+aggregate_value_p (exp)
+ tree exp;
+{
+ int i, regno, nregs;
+ rtx reg;
+ tree type;
+ if (TREE_CODE_CLASS (TREE_CODE (exp)) == 't')
+ type = exp;
+ else
+ type = TREE_TYPE (exp);
+
+ if (RETURN_IN_MEMORY (type))
+ return 1;
+ if (flag_pcc_struct_return && AGGREGATE_TYPE_P (type))
+ return 1;
+ /* Make sure we have suitable call-clobbered regs to return
+ the value in; if not, we must return it in memory. */
+ reg = hard_function_value (type, 0);
+ regno = REGNO (reg);
+ nregs = HARD_REGNO_NREGS (regno, TYPE_MODE (type));
+ for (i = 0; i < nregs; i++)
+ if (! call_used_regs[regno + i])
+ return 1;
+ return 0;
+}
+
+/* Assign RTL expressions to the function's parameters.
+ This may involve copying them into registers and using
+ those registers as the RTL for them.
+
+ If SECOND_TIME is non-zero it means that this function is being
+ called a second time. This is done by integrate.c when a function's
+ compilation is deferred. We need to come back here in case the
+ FUNCTION_ARG macro computes items needed for the rest of the compilation
+ (such as changing which registers are fixed or caller-saved). But suppress
+ writing any insns or setting DECL_RTL of anything in this case. */
+
+void
+assign_parms (fndecl, second_time)
+ tree fndecl;
+ int second_time;
+{
+ register tree parm;
+ register rtx entry_parm = 0;
+ register rtx stack_parm = 0;
+ CUMULATIVE_ARGS args_so_far;
+ enum machine_mode promoted_mode, passed_mode, nominal_mode;
+ int unsignedp;
+ /* Total space needed so far for args on the stack,
+ given as a constant and a tree-expression. */
+ struct args_size stack_args_size;
+ tree fntype = TREE_TYPE (fndecl);
+ tree fnargs = DECL_ARGUMENTS (fndecl);
+ /* This is used for the arg pointer when referring to stack args. */
+ rtx internal_arg_pointer;
+ /* This is a dummy PARM_DECL that we used for the function result if
+ the function returns a structure. */
+ tree function_result_decl = 0;
+ int nparmregs = list_length (fnargs) + LAST_VIRTUAL_REGISTER + 1;
+ int varargs_setup = 0;
+ rtx conversion_insns = 0;
+ /* FUNCTION_ARG may look at this variable. Since this is not
+ expanding a call it will always be zero in this function. */
+ int current_call_is_indirect = 0;
+
+ /* Nonzero if the last arg is named `__builtin_va_alist',
+ which is used on some machines for old-fashioned non-ANSI varargs.h;
+ this should be stuck onto the stack as if it had arrived there. */
+ int hide_last_arg
+ = (current_function_varargs
+ && fnargs
+ && (parm = tree_last (fnargs)) != 0
+ && DECL_NAME (parm)
+ && (! strcmp (IDENTIFIER_POINTER (DECL_NAME (parm)),
+ "__builtin_va_alist")));
+
+ /* Nonzero if function takes extra anonymous args.
+ This means the last named arg must be on the stack
+ right before the anonymous ones. */
+ int stdarg
+ = (TYPE_ARG_TYPES (fntype) != 0
+ && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype)))
+ != void_type_node));
+
+ /* If the reg that the virtual arg pointer will be translated into is
+ not a fixed reg or is the stack pointer, make a copy of the virtual
+ arg pointer, and address parms via the copy. The frame pointer is
+ considered fixed even though it is not marked as such.
+
+ The second time through, simply use ap to avoid generating rtx. */
+
+ if ((ARG_POINTER_REGNUM == STACK_POINTER_REGNUM
+ || ! (fixed_regs[ARG_POINTER_REGNUM]
+ || ARG_POINTER_REGNUM == FRAME_POINTER_REGNUM))
+ && ! second_time)
+ internal_arg_pointer = copy_to_reg (virtual_incoming_args_rtx);
+ else
+ internal_arg_pointer = virtual_incoming_args_rtx;
+ current_function_internal_arg_pointer = internal_arg_pointer;
+
+ stack_args_size.constant = 0;
+ stack_args_size.var = 0;
+
+ /* If struct value address is treated as the first argument, make it so. */
+ if (aggregate_value_p (DECL_RESULT (fndecl))
+ && ! current_function_returns_pcc_struct
+ && struct_value_incoming_rtx == 0)
+ {
+ tree type = build_pointer_type (fntype);
+
+ function_result_decl = build_decl (PARM_DECL, NULL_TREE, type);
+
+ DECL_ARG_TYPE (function_result_decl) = type;
+ TREE_CHAIN (function_result_decl) = fnargs;
+ fnargs = function_result_decl;
+ }
+
+ parm_reg_stack_loc = (rtx *) oballoc (nparmregs * sizeof (rtx));
+ bzero ((char *) parm_reg_stack_loc, nparmregs * sizeof (rtx));
+
+#ifdef INIT_CUMULATIVE_INCOMING_ARGS
+ INIT_CUMULATIVE_INCOMING_ARGS (args_so_far, fntype, NULL_RTX);
+#else
+ INIT_CUMULATIVE_ARGS (args_so_far, fntype, NULL_RTX);
+#endif
+
+ /* We haven't yet found an argument that we must push and pretend the
+ caller did. */
+ current_function_pretend_args_size = 0;
+
+ for (parm = fnargs; parm; parm = TREE_CHAIN (parm))
+ {
+ int aggregate = AGGREGATE_TYPE_P (TREE_TYPE (parm));
+ struct args_size stack_offset;
+ struct args_size arg_size;
+ int passed_pointer = 0;
+ tree passed_type = DECL_ARG_TYPE (parm);
+
+ /* Set LAST_NAMED if this is last named arg before some
+ anonymous args. We treat it as if it were anonymous too. */
+ int last_named = ((TREE_CHAIN (parm) == 0
+ || DECL_NAME (TREE_CHAIN (parm)) == 0)
+ && (stdarg || current_function_varargs));
+
+ if (TREE_TYPE (parm) == error_mark_node
+ /* This can happen after weird syntax errors
+ or if an enum type is defined among the parms. */
+ || TREE_CODE (parm) != PARM_DECL
+ || passed_type == NULL)
+ {
+ DECL_INCOMING_RTL (parm) = DECL_RTL (parm) = gen_rtx (MEM, BLKmode,
+ const0_rtx);
+ TREE_USED (parm) = 1;
+ continue;
+ }
+
+ /* For varargs.h function, save info about regs and stack space
+ used by the individual args, not including the va_alist arg. */
+ if (hide_last_arg && last_named)
+ current_function_args_info = args_so_far;
+
+ /* Find mode of arg as it is passed, and mode of arg
+ as it should be during execution of this function. */
+ passed_mode = TYPE_MODE (passed_type);
+ nominal_mode = TYPE_MODE (TREE_TYPE (parm));
+
+ /* If the parm's mode is VOID, its value doesn't matter,
+ and avoid the usual things like emit_move_insn that could crash. */
+ if (nominal_mode == VOIDmode)
+ {
+ DECL_INCOMING_RTL (parm) = DECL_RTL (parm) = const0_rtx;
+ continue;
+ }
+
+ /* See if this arg was passed by invisible reference. It is if
+ it is an object whose size depends on the contents of the
+ object itself or if the machine requires these objects be passed
+ that way. */
+
+ if ((TREE_CODE (TYPE_SIZE (passed_type)) != INTEGER_CST
+ && contains_placeholder_p (TYPE_SIZE (passed_type)))
+ || TYPE_NEEDS_CONSTRUCTING (passed_type)
+#ifdef FUNCTION_ARG_PASS_BY_REFERENCE
+ || FUNCTION_ARG_PASS_BY_REFERENCE (args_so_far, passed_mode,
+ passed_type, ! last_named)
+#endif
+ )
+ {
+ passed_type = build_pointer_type (passed_type);
+ passed_pointer = 1;
+ passed_mode = nominal_mode = Pmode;
+ }
+
+ promoted_mode = passed_mode;
+
+#ifdef PROMOTE_FUNCTION_ARGS
+ /* Compute the mode in which the arg is actually extended to. */
+ promoted_mode = promote_mode (passed_type, promoted_mode, &unsignedp, 1);
+#endif
+
+ /* Let machine desc say which reg (if any) the parm arrives in.
+ 0 means it arrives on the stack. */
+#ifdef FUNCTION_INCOMING_ARG
+ entry_parm = FUNCTION_INCOMING_ARG (args_so_far, promoted_mode,
+ passed_type, ! last_named);
+#else
+ entry_parm = FUNCTION_ARG (args_so_far, promoted_mode,
+ passed_type, ! last_named);
+#endif
+
+ if (entry_parm)
+ passed_mode = promoted_mode;
+
+#ifdef SETUP_INCOMING_VARARGS
+ /* If this is the last named parameter, do any required setup for
+ varargs or stdargs. We need to know about the case of this being an
+ addressable type, in which case we skip the registers it
+ would have arrived in.
+
+ For stdargs, LAST_NAMED will be set for two parameters, the one that
+ is actually the last named, and the dummy parameter. We only
+ want to do this action once.
+
+ Also, indicate when RTL generation is to be suppressed. */
+ if (last_named && !varargs_setup)
+ {
+ SETUP_INCOMING_VARARGS (args_so_far, passed_mode, passed_type,
+ current_function_pretend_args_size,
+ second_time);
+ varargs_setup = 1;
+ }
+#endif
+
+ /* Determine parm's home in the stack,
+ in case it arrives in the stack or we should pretend it did.
+
+ Compute the stack position and rtx where the argument arrives
+ and its size.
+
+ There is one complexity here: If this was a parameter that would
+ have been passed in registers, but wasn't only because it is
+ __builtin_va_alist, we want locate_and_pad_parm to treat it as if
+ it came in a register so that REG_PARM_STACK_SPACE isn't skipped.
+ In this case, we call FUNCTION_ARG with NAMED set to 1 instead of
+ 0 as it was the previous time. */
+
+ locate_and_pad_parm (passed_mode, passed_type,
+#ifdef STACK_PARMS_IN_REG_PARM_AREA
+ 1,
+#else
+#ifdef FUNCTION_INCOMING_ARG
+ FUNCTION_INCOMING_ARG (args_so_far, passed_mode,
+ passed_type,
+ (! last_named
+ || varargs_setup)) != 0,
+#else
+ FUNCTION_ARG (args_so_far, passed_mode,
+ passed_type,
+ ! last_named || varargs_setup) != 0,
+#endif
+#endif
+ fndecl, &stack_args_size, &stack_offset, &arg_size);
+
+ if (! second_time)
+ {
+ rtx offset_rtx = ARGS_SIZE_RTX (stack_offset);
+
+ if (offset_rtx == const0_rtx)
+ stack_parm = gen_rtx (MEM, passed_mode, internal_arg_pointer);
+ else
+ stack_parm = gen_rtx (MEM, passed_mode,
+ gen_rtx (PLUS, Pmode,
+ internal_arg_pointer, offset_rtx));
+
+ /* If this is a memory ref that contains aggregate components,
+ mark it as such for cse and loop optimize. */
+ MEM_IN_STRUCT_P (stack_parm) = aggregate;
+ }
+
+ /* If this parameter was passed both in registers and in the stack,
+ use the copy on the stack. */
+ if (MUST_PASS_IN_STACK (passed_mode, passed_type))
+ entry_parm = 0;
+
+#ifdef FUNCTION_ARG_PARTIAL_NREGS
+ /* If this parm was passed part in regs and part in memory,
+ pretend it arrived entirely in memory
+ by pushing the register-part onto the stack.
+
+ In the special case of a DImode or DFmode that is split,
+ we could put it together in a pseudoreg directly,
+ but for now that's not worth bothering with. */
+
+ if (entry_parm)
+ {
+ int nregs = FUNCTION_ARG_PARTIAL_NREGS (args_so_far, passed_mode,
+ passed_type, ! last_named);
+
+ if (nregs > 0)
+ {
+ current_function_pretend_args_size
+ = (((nregs * UNITS_PER_WORD) + (PARM_BOUNDARY / BITS_PER_UNIT) - 1)
+ / (PARM_BOUNDARY / BITS_PER_UNIT)
+ * (PARM_BOUNDARY / BITS_PER_UNIT));
+
+ if (! second_time)
+ move_block_from_reg (REGNO (entry_parm),
+ validize_mem (stack_parm), nregs,
+ int_size_in_bytes (TREE_TYPE (parm)));
+ entry_parm = stack_parm;
+ }
+ }
+#endif
+
+ /* If we didn't decide this parm came in a register,
+ by default it came on the stack. */
+ if (entry_parm == 0)
+ entry_parm = stack_parm;
+
+ /* Record permanently how this parm was passed. */
+ if (! second_time)
+ DECL_INCOMING_RTL (parm) = entry_parm;
+
+ /* If there is actually space on the stack for this parm,
+ count it in stack_args_size; otherwise set stack_parm to 0
+ to indicate there is no preallocated stack slot for the parm. */
+
+ if (entry_parm == stack_parm
+#if defined (REG_PARM_STACK_SPACE) && ! defined (MAYBE_REG_PARM_STACK_SPACE)
+ /* On some machines, even if a parm value arrives in a register
+ there is still an (uninitialized) stack slot allocated for it.
+
+ ??? When MAYBE_REG_PARM_STACK_SPACE is defined, we can't tell
+ whether this parameter already has a stack slot allocated,
+ because an arg block exists only if current_function_args_size
+ is larger than some threshhold, and we haven't calculated that
+ yet. So, for now, we just assume that stack slots never exist
+ in this case. */
+ || REG_PARM_STACK_SPACE (fndecl) > 0
+#endif
+ )
+ {
+ stack_args_size.constant += arg_size.constant;
+ if (arg_size.var)
+ ADD_PARM_SIZE (stack_args_size, arg_size.var);
+ }
+ else
+ /* No stack slot was pushed for this parm. */
+ stack_parm = 0;
+
+ /* Update info on where next arg arrives in registers. */
+
+ FUNCTION_ARG_ADVANCE (args_so_far, passed_mode,
+ passed_type, ! last_named);
+
+ /* If this is our second time through, we are done with this parm. */
+ if (second_time)
+ continue;
+
+ /* If we can't trust the parm stack slot to be aligned enough
+ for its ultimate type, don't use that slot after entry.
+ We'll make another stack slot, if we need one. */
+ {
+ int thisparm_boundary
+ = FUNCTION_ARG_BOUNDARY (passed_mode, passed_type);
+
+ if (GET_MODE_ALIGNMENT (nominal_mode) > thisparm_boundary)
+ stack_parm = 0;
+ }
+
+ /* If parm was passed in memory, and we need to convert it on entry,
+ don't store it back in that same slot. */
+ if (entry_parm != 0
+ && nominal_mode != BLKmode && nominal_mode != passed_mode)
+ stack_parm = 0;
+
+#if 0
+ /* Now adjust STACK_PARM to the mode and precise location
+ where this parameter should live during execution,
+ if we discover that it must live in the stack during execution.
+ To make debuggers happier on big-endian machines, we store
+ the value in the last bytes of the space available. */
+
+ if (nominal_mode != BLKmode && nominal_mode != passed_mode
+ && stack_parm != 0)
+ {
+ rtx offset_rtx;
+
+#if BYTES_BIG_ENDIAN
+ if (GET_MODE_SIZE (nominal_mode) < UNITS_PER_WORD)
+ stack_offset.constant += (GET_MODE_SIZE (passed_mode)
+ - GET_MODE_SIZE (nominal_mode));
+#endif
+
+ offset_rtx = ARGS_SIZE_RTX (stack_offset);
+ if (offset_rtx == const0_rtx)
+ stack_parm = gen_rtx (MEM, nominal_mode, internal_arg_pointer);
+ else
+ stack_parm = gen_rtx (MEM, nominal_mode,
+ gen_rtx (PLUS, Pmode,
+ internal_arg_pointer, offset_rtx));
+
+ /* If this is a memory ref that contains aggregate components,
+ mark it as such for cse and loop optimize. */
+ MEM_IN_STRUCT_P (stack_parm) = aggregate;
+ }
+#endif /* 0 */
+
+ /* ENTRY_PARM is an RTX for the parameter as it arrives,
+ in the mode in which it arrives.
+ STACK_PARM is an RTX for a stack slot where the parameter can live
+ during the function (in case we want to put it there).
+ STACK_PARM is 0 if no stack slot was pushed for it.
+
+ Now output code if necessary to convert ENTRY_PARM to
+ the type in which this function declares it,
+ and store that result in an appropriate place,
+ which may be a pseudo reg, may be STACK_PARM,
+ or may be a local stack slot if STACK_PARM is 0.
+
+ Set DECL_RTL to that place. */
+
+ if (nominal_mode == BLKmode)
+ {
+ /* If a BLKmode arrives in registers, copy it to a stack slot. */
+ if (GET_CODE (entry_parm) == REG)
+ {
+ int size_stored = CEIL_ROUND (int_size_in_bytes (TREE_TYPE (parm)),
+ UNITS_PER_WORD);
+
+ /* Note that we will be storing an integral number of words.
+ So we have to be careful to ensure that we allocate an
+ integral number of words. We do this below in the
+ assign_stack_local if space was not allocated in the argument
+ list. If it was, this will not work if PARM_BOUNDARY is not
+ a multiple of BITS_PER_WORD. It isn't clear how to fix this
+ if it becomes a problem. */
+
+ if (stack_parm == 0)
+ {
+ stack_parm
+ = assign_stack_local (GET_MODE (entry_parm), size_stored, 0);
+ /* If this is a memory ref that contains aggregate components,
+ mark it as such for cse and loop optimize. */
+ MEM_IN_STRUCT_P (stack_parm) = aggregate;
+ }
+
+ else if (PARM_BOUNDARY % BITS_PER_WORD != 0)
+ abort ();
+
+ move_block_from_reg (REGNO (entry_parm),
+ validize_mem (stack_parm),
+ size_stored / UNITS_PER_WORD,
+ int_size_in_bytes (TREE_TYPE (parm)));
+ }
+ DECL_RTL (parm) = stack_parm;
+ }
+ else if (! ((obey_regdecls && ! DECL_REGISTER (parm)
+ && ! DECL_INLINE (fndecl))
+ /* layout_decl may set this. */
+ || TREE_ADDRESSABLE (parm)
+ || TREE_SIDE_EFFECTS (parm)
+ /* If -ffloat-store specified, don't put explicit
+ float variables into registers. */
+ || (flag_float_store
+ && TREE_CODE (TREE_TYPE (parm)) == REAL_TYPE))
+ /* Always assign pseudo to structure return or item passed
+ by invisible reference. */
+ || passed_pointer || parm == function_result_decl)
+ {
+ /* Store the parm in a pseudoregister during the function, but we
+ may need to do it in a wider mode. */
+
+ register rtx parmreg;
+ int regno, regnoi, regnor;
+
+ unsignedp = TREE_UNSIGNED (TREE_TYPE (parm));
+ nominal_mode = promote_mode (TREE_TYPE (parm), nominal_mode,
+ &unsignedp, 1);
+
+ parmreg = gen_reg_rtx (nominal_mode);
+ REG_USERVAR_P (parmreg) = 1;
+
+ /* If this was an item that we received a pointer to, set DECL_RTL
+ appropriately. */
+ if (passed_pointer)
+ {
+ DECL_RTL (parm) = gen_rtx (MEM, TYPE_MODE (TREE_TYPE (passed_type)), parmreg);
+ MEM_IN_STRUCT_P (DECL_RTL (parm)) = aggregate;
+ }
+ else
+ DECL_RTL (parm) = parmreg;
+
+ /* Copy the value into the register. */
+ if (GET_MODE (parmreg) != GET_MODE (entry_parm))
+ {
+ /* If ENTRY_PARM is a hard register, it might be in a register
+ not valid for operating in its mode (e.g., an odd-numbered
+ register for a DFmode). In that case, moves are the only
+ thing valid, so we can't do a convert from there. This
+ occurs when the calling sequence allow such misaligned
+ usages.
+
+ In addition, the conversion may involve a call, which could
+ clobber parameters which haven't been copied to pseudo
+ registers yet. Therefore, we must first copy the parm to
+ a pseudo reg here, and save the conversion until after all
+ parameters have been moved. */
+
+ rtx tempreg = gen_reg_rtx (GET_MODE (entry_parm));
+
+ emit_move_insn (tempreg, validize_mem (entry_parm));
+
+ push_to_sequence (conversion_insns);
+ convert_move (parmreg, tempreg, unsignedp);
+ conversion_insns = get_insns ();
+ end_sequence ();
+ }
+ else
+ emit_move_insn (parmreg, validize_mem (entry_parm));
+
+ /* If we were passed a pointer but the actual value
+ can safely live in a register, put it in one. */
+ if (passed_pointer && TYPE_MODE (TREE_TYPE (parm)) != BLKmode
+ && ! ((obey_regdecls && ! DECL_REGISTER (parm)
+ && ! DECL_INLINE (fndecl))
+ /* layout_decl may set this. */
+ || TREE_ADDRESSABLE (parm)
+ || TREE_SIDE_EFFECTS (parm)
+ /* If -ffloat-store specified, don't put explicit
+ float variables into registers. */
+ || (flag_float_store
+ && TREE_CODE (TREE_TYPE (parm)) == REAL_TYPE)))
+ {
+ /* We can't use nominal_mode, because it will have been set to
+ Pmode above. We must use the actual mode of the parm. */
+ parmreg = gen_reg_rtx (TYPE_MODE (TREE_TYPE (parm)));
+ REG_USERVAR_P (parmreg) = 1;
+ emit_move_insn (parmreg, DECL_RTL (parm));
+ DECL_RTL (parm) = parmreg;
+ /* STACK_PARM is the pointer, not the parm, and PARMREG is
+ now the parm. */
+ stack_parm = 0;
+ }
+#ifdef FUNCTION_ARG_CALLEE_COPIES
+ /* If we are passed an arg by reference and it is our responsibility
+ to make a copy, do it now.
+ PASSED_TYPE and PASSED mode now refer to the pointer, not the
+ original argument, so we must recreate them in the call to
+ FUNCTION_ARG_CALLEE_COPIES. */
+ /* ??? Later add code to handle the case that if the argument isn't
+ modified, don't do the copy. */
+
+ else if (passed_pointer
+ && FUNCTION_ARG_CALLEE_COPIES (args_so_far,
+ TYPE_MODE (DECL_ARG_TYPE (parm)),
+ DECL_ARG_TYPE (parm),
+ ! last_named))
+ {
+ rtx copy;
+ tree type = DECL_ARG_TYPE (parm);
+
+ /* This sequence may involve a library call perhaps clobbering
+ registers that haven't been copied to pseudos yet. */
+
+ push_to_sequence (conversion_insns);
+
+ if (TYPE_SIZE (type) == 0
+ || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
+ /* This is a variable sized object. */
+ copy = gen_rtx (MEM, BLKmode,
+ allocate_dynamic_stack_space
+ (expr_size (parm), NULL_RTX,
+ TYPE_ALIGN (type)));
+ else
+ copy = assign_stack_temp (TYPE_MODE (type),
+ int_size_in_bytes (type), 1);
+
+ store_expr (parm, copy, 0);
+ emit_move_insn (parmreg, XEXP (copy, 0));
+ conversion_insns = get_insns ();
+ end_sequence ();
+ }
+#endif /* FUNCTION_ARG_CALLEE_COPIES */
+
+ /* In any case, record the parm's desired stack location
+ in case we later discover it must live in the stack.
+
+ If it is a COMPLEX value, store the stack location for both
+ halves. */
+
+ if (GET_CODE (parmreg) == CONCAT)
+ regno = MAX (REGNO (XEXP (parmreg, 0)), REGNO (XEXP (parmreg, 1)));
+ else
+ regno = REGNO (parmreg);
+
+ if (regno >= nparmregs)
+ {
+ rtx *new;
+ int old_nparmregs = nparmregs;
+
+ nparmregs = regno + 5;
+ new = (rtx *) oballoc (nparmregs * sizeof (rtx));
+ bcopy ((char *) parm_reg_stack_loc, (char *) new,
+ old_nparmregs * sizeof (rtx));
+ bzero ((char *) (new + old_nparmregs),
+ (nparmregs - old_nparmregs) * sizeof (rtx));
+ parm_reg_stack_loc = new;
+ }
+
+ if (GET_CODE (parmreg) == CONCAT)
+ {
+ enum machine_mode submode = GET_MODE (XEXP (parmreg, 0));
+
+ regnor = REGNO (gen_realpart (submode, parmreg));
+ regnoi = REGNO (gen_imagpart (submode, parmreg));
+
+ if (stack_parm != 0)
+ {
+ parm_reg_stack_loc[regnor]
+ = gen_realpart (submode, stack_parm);
+ parm_reg_stack_loc[regnoi]
+ = gen_imagpart (submode, stack_parm);
+ }
+ else
+ {
+ parm_reg_stack_loc[regnor] = 0;
+ parm_reg_stack_loc[regnoi] = 0;
+ }
+ }
+ else
+ parm_reg_stack_loc[REGNO (parmreg)] = stack_parm;
+
+ /* Mark the register as eliminable if we did no conversion
+ and it was copied from memory at a fixed offset,
+ and the arg pointer was not copied to a pseudo-reg.
+ If the arg pointer is a pseudo reg or the offset formed
+ an invalid address, such memory-equivalences
+ as we make here would screw up life analysis for it. */
+ if (nominal_mode == passed_mode
+ && ! conversion_insns
+ && GET_CODE (entry_parm) == MEM
+ && entry_parm == stack_parm
+ && stack_offset.var == 0
+ && reg_mentioned_p (virtual_incoming_args_rtx,
+ XEXP (entry_parm, 0)))
+ {
+ rtx linsn = get_last_insn ();
+
+ /* Mark complex types separately. */
+ if (GET_CODE (parmreg) == CONCAT)
+ {
+ REG_NOTES (linsn)
+ = gen_rtx (EXPR_LIST, REG_EQUIV,
+ parm_reg_stack_loc[regnoi], REG_NOTES (linsn));
+
+ /* Now search backward for where we set the real part. */
+ for (; linsn != 0
+ && ! reg_referenced_p (parm_reg_stack_loc[regnor],
+ PATTERN (linsn));
+ linsn = prev_nonnote_insn (linsn))
+ ;
+
+ REG_NOTES (linsn)
+ = gen_rtx (EXPR_LIST, REG_EQUIV,
+ parm_reg_stack_loc[regnor], REG_NOTES (linsn));
+ }
+ else
+ REG_NOTES (linsn)
+ = gen_rtx (EXPR_LIST, REG_EQUIV,
+ entry_parm, REG_NOTES (linsn));
+ }
+
+ /* For pointer data type, suggest pointer register. */
+ if (TREE_CODE (TREE_TYPE (parm)) == POINTER_TYPE)
+ mark_reg_pointer (parmreg);
+ }
+ else
+ {
+ /* Value must be stored in the stack slot STACK_PARM
+ during function execution. */
+
+ if (passed_mode != nominal_mode)
+ {
+ /* Conversion is required. */
+ rtx tempreg = gen_reg_rtx (GET_MODE (entry_parm));
+
+ emit_move_insn (tempreg, validize_mem (entry_parm));
+
+ push_to_sequence (conversion_insns);
+ entry_parm = convert_to_mode (nominal_mode, tempreg,
+ TREE_UNSIGNED (TREE_TYPE (parm)));
+ conversion_insns = get_insns ();
+ end_sequence ();
+ }
+
+ if (entry_parm != stack_parm)
+ {
+ if (stack_parm == 0)
+ {
+ stack_parm
+ = assign_stack_local (GET_MODE (entry_parm),
+ GET_MODE_SIZE (GET_MODE (entry_parm)), 0);
+ /* If this is a memory ref that contains aggregate components,
+ mark it as such for cse and loop optimize. */
+ MEM_IN_STRUCT_P (stack_parm) = aggregate;
+ }
+
+ if (passed_mode != nominal_mode)
+ {
+ push_to_sequence (conversion_insns);
+ emit_move_insn (validize_mem (stack_parm),
+ validize_mem (entry_parm));
+ conversion_insns = get_insns ();
+ end_sequence ();
+ }
+ else
+ emit_move_insn (validize_mem (stack_parm),
+ validize_mem (entry_parm));
+ }
+
+ DECL_RTL (parm) = stack_parm;
+ }
+
+ /* If this "parameter" was the place where we are receiving the
+ function's incoming structure pointer, set up the result. */
+ if (parm == function_result_decl)
+ {
+ tree result = DECL_RESULT (fndecl);
+ tree restype = TREE_TYPE (result);
+
+ DECL_RTL (result)
+ = gen_rtx (MEM, DECL_MODE (result), DECL_RTL (parm));
+
+ MEM_IN_STRUCT_P (DECL_RTL (result)) = AGGREGATE_TYPE_P (restype);
+ }
+
+ if (TREE_THIS_VOLATILE (parm))
+ MEM_VOLATILE_P (DECL_RTL (parm)) = 1;
+ if (TREE_READONLY (parm))
+ RTX_UNCHANGING_P (DECL_RTL (parm)) = 1;
+ }
+
+ /* Output all parameter conversion instructions (possibly including calls)
+ now that all parameters have been copied out of hard registers. */
+ emit_insns (conversion_insns);
+
+ max_parm_reg = max_reg_num ();
+ last_parm_insn = get_last_insn ();
+
+ current_function_args_size = stack_args_size.constant;
+
+ /* Adjust function incoming argument size for alignment and
+ minimum length. */
+
+#ifdef REG_PARM_STACK_SPACE
+#ifndef MAYBE_REG_PARM_STACK_SPACE
+ current_function_args_size = MAX (current_function_args_size,
+ REG_PARM_STACK_SPACE (fndecl));
+#endif
+#endif
+
+#ifdef STACK_BOUNDARY
+#define STACK_BYTES (STACK_BOUNDARY / BITS_PER_UNIT)
+
+ current_function_args_size
+ = ((current_function_args_size + STACK_BYTES - 1)
+ / STACK_BYTES) * STACK_BYTES;
+#endif
+
+#ifdef ARGS_GROW_DOWNWARD
+ current_function_arg_offset_rtx
+ = (stack_args_size.var == 0 ? GEN_INT (-stack_args_size.constant)
+ : expand_expr (size_binop (MINUS_EXPR, stack_args_size.var,
+ size_int (-stack_args_size.constant)),
+ NULL_RTX, VOIDmode, 0));
+#else
+ current_function_arg_offset_rtx = ARGS_SIZE_RTX (stack_args_size);
+#endif
+
+ /* See how many bytes, if any, of its args a function should try to pop
+ on return. */
+
+ current_function_pops_args = RETURN_POPS_ARGS (TREE_TYPE (fndecl),
+ current_function_args_size);
+
+ /* For stdarg.h function, save info about
+ regs and stack space used by the named args. */
+
+ if (!hide_last_arg)
+ current_function_args_info = args_so_far;
+
+ /* Set the rtx used for the function return value. Put this in its
+ own variable so any optimizers that need this information don't have
+ to include tree.h. Do this here so it gets done when an inlined
+ function gets output. */
+
+ current_function_return_rtx = DECL_RTL (DECL_RESULT (fndecl));
+}
+
+/* Indicate whether REGNO is an incoming argument to the current function
+ that was promoted to a wider mode. If so, return the RTX for the
+ register (to get its mode). PMODE and PUNSIGNEDP are set to the mode
+ that REGNO is promoted from and whether the promotion was signed or
+ unsigned. */
+
+#ifdef PROMOTE_FUNCTION_ARGS
+
+rtx
+promoted_input_arg (regno, pmode, punsignedp)
+ int regno;
+ enum machine_mode *pmode;
+ int *punsignedp;
+{
+ tree arg;
+
+ for (arg = DECL_ARGUMENTS (current_function_decl); arg;
+ arg = TREE_CHAIN (arg))
+ if (GET_CODE (DECL_INCOMING_RTL (arg)) == REG
+ && REGNO (DECL_INCOMING_RTL (arg)) == regno)
+ {
+ enum machine_mode mode = TYPE_MODE (TREE_TYPE (arg));
+ int unsignedp = TREE_UNSIGNED (TREE_TYPE (arg));
+
+ mode = promote_mode (TREE_TYPE (arg), mode, &unsignedp, 1);
+ if (mode == GET_MODE (DECL_INCOMING_RTL (arg))
+ && mode != DECL_MODE (arg))
+ {
+ *pmode = DECL_MODE (arg);
+ *punsignedp = unsignedp;
+ return DECL_INCOMING_RTL (arg);
+ }
+ }
+
+ return 0;
+}
+
+#endif
+
+/* Compute the size and offset from the start of the stacked arguments for a
+ parm passed in mode PASSED_MODE and with type TYPE.
+
+ INITIAL_OFFSET_PTR points to the current offset into the stacked
+ arguments.
+
+ The starting offset and size for this parm are returned in *OFFSET_PTR
+ and *ARG_SIZE_PTR, respectively.
+
+ IN_REGS is non-zero if the argument will be passed in registers. It will
+ never be set if REG_PARM_STACK_SPACE is not defined.
+
+ FNDECL is the function in which the argument was defined.
+
+ There are two types of rounding that are done. The first, controlled by
+ FUNCTION_ARG_BOUNDARY, forces the offset from the start of the argument
+ list to be aligned to the specific boundary (in bits). This rounding
+ affects the initial and starting offsets, but not the argument size.
+
+ The second, controlled by FUNCTION_ARG_PADDING and PARM_BOUNDARY,
+ optionally rounds the size of the parm to PARM_BOUNDARY. The
+ initial offset is not affected by this rounding, while the size always
+ is and the starting offset may be. */
+
+/* offset_ptr will be negative for ARGS_GROW_DOWNWARD case;
+ initial_offset_ptr is positive because locate_and_pad_parm's
+ callers pass in the total size of args so far as
+ initial_offset_ptr. arg_size_ptr is always positive.*/
+
+void
+locate_and_pad_parm (passed_mode, type, in_regs, fndecl,
+ initial_offset_ptr, offset_ptr, arg_size_ptr)
+ enum machine_mode passed_mode;
+ tree type;
+ int in_regs;
+ tree fndecl;
+ struct args_size *initial_offset_ptr;
+ struct args_size *offset_ptr;
+ struct args_size *arg_size_ptr;
+{
+ tree sizetree
+ = type ? size_in_bytes (type) : size_int (GET_MODE_SIZE (passed_mode));
+ enum direction where_pad = FUNCTION_ARG_PADDING (passed_mode, type);
+ int boundary = FUNCTION_ARG_BOUNDARY (passed_mode, type);
+ int boundary_in_bytes = boundary / BITS_PER_UNIT;
+ int reg_parm_stack_space = 0;
+
+#ifdef REG_PARM_STACK_SPACE
+ /* If we have found a stack parm before we reach the end of the
+ area reserved for registers, skip that area. */
+ if (! in_regs)
+ {
+#ifdef MAYBE_REG_PARM_STACK_SPACE
+ reg_parm_stack_space = MAYBE_REG_PARM_STACK_SPACE;
+#else
+ reg_parm_stack_space = REG_PARM_STACK_SPACE (fndecl);
+#endif
+ if (reg_parm_stack_space > 0)
+ {
+ if (initial_offset_ptr->var)
+ {
+ initial_offset_ptr->var
+ = size_binop (MAX_EXPR, ARGS_SIZE_TREE (*initial_offset_ptr),
+ size_int (reg_parm_stack_space));
+ initial_offset_ptr->constant = 0;
+ }
+ else if (initial_offset_ptr->constant < reg_parm_stack_space)
+ initial_offset_ptr->constant = reg_parm_stack_space;
+ }
+ }
+#endif /* REG_PARM_STACK_SPACE */
+
+ arg_size_ptr->var = 0;
+ arg_size_ptr->constant = 0;
+
+#ifdef ARGS_GROW_DOWNWARD
+ if (initial_offset_ptr->var)
+ {
+ offset_ptr->constant = 0;
+ offset_ptr->var = size_binop (MINUS_EXPR, integer_zero_node,
+ initial_offset_ptr->var);
+ }
+ else
+ {
+ offset_ptr->constant = - initial_offset_ptr->constant;
+ offset_ptr->var = 0;
+ }
+ if (where_pad != none
+ && (TREE_CODE (sizetree) != INTEGER_CST
+ || ((TREE_INT_CST_LOW (sizetree) * BITS_PER_UNIT) % PARM_BOUNDARY)))
+ sizetree = round_up (sizetree, PARM_BOUNDARY / BITS_PER_UNIT);
+ SUB_PARM_SIZE (*offset_ptr, sizetree);
+ if (where_pad != downward)
+ pad_to_arg_alignment (offset_ptr, boundary);
+ if (initial_offset_ptr->var)
+ {
+ arg_size_ptr->var = size_binop (MINUS_EXPR,
+ size_binop (MINUS_EXPR,
+ integer_zero_node,
+ initial_offset_ptr->var),
+ offset_ptr->var);
+ }
+ else
+ {
+ arg_size_ptr->constant = (- initial_offset_ptr->constant -
+ offset_ptr->constant);
+ }
+#else /* !ARGS_GROW_DOWNWARD */
+ pad_to_arg_alignment (initial_offset_ptr, boundary);
+ *offset_ptr = *initial_offset_ptr;
+
+#ifdef PUSH_ROUNDING
+ if (passed_mode != BLKmode)
+ sizetree = size_int (PUSH_ROUNDING (TREE_INT_CST_LOW (sizetree)));
+#endif
+
+ if (where_pad != none
+ && (TREE_CODE (sizetree) != INTEGER_CST
+ || ((TREE_INT_CST_LOW (sizetree) * BITS_PER_UNIT) % PARM_BOUNDARY)))
+ sizetree = round_up (sizetree, PARM_BOUNDARY / BITS_PER_UNIT);
+
+ /* This must be done after rounding sizetree, so that it will subtract
+ the same value that we explicitly add below. */
+ if (where_pad == downward)
+ pad_below (offset_ptr, passed_mode, sizetree);
+ ADD_PARM_SIZE (*arg_size_ptr, sizetree);
+#endif /* ARGS_GROW_DOWNWARD */
+}
+
+/* Round the stack offset in *OFFSET_PTR up to a multiple of BOUNDARY.
+ BOUNDARY is measured in bits, but must be a multiple of a storage unit. */
+
+static void
+pad_to_arg_alignment (offset_ptr, boundary)
+ struct args_size *offset_ptr;
+ int boundary;
+{
+ int boundary_in_bytes = boundary / BITS_PER_UNIT;
+
+ if (boundary > BITS_PER_UNIT)
+ {
+ if (offset_ptr->var)
+ {
+ offset_ptr->var =
+#ifdef ARGS_GROW_DOWNWARD
+ round_down
+#else
+ round_up
+#endif
+ (ARGS_SIZE_TREE (*offset_ptr),
+ boundary / BITS_PER_UNIT);
+ offset_ptr->constant = 0; /*?*/
+ }
+ else
+ offset_ptr->constant =
+#ifdef ARGS_GROW_DOWNWARD
+ FLOOR_ROUND (offset_ptr->constant, boundary_in_bytes);
+#else
+ CEIL_ROUND (offset_ptr->constant, boundary_in_bytes);
+#endif
+ }
+}
+
+static void
+pad_below (offset_ptr, passed_mode, sizetree)
+ struct args_size *offset_ptr;
+ enum machine_mode passed_mode;
+ tree sizetree;
+{
+ if (passed_mode != BLKmode)
+ {
+ if (GET_MODE_BITSIZE (passed_mode) % PARM_BOUNDARY)
+ offset_ptr->constant
+ += (((GET_MODE_BITSIZE (passed_mode) + PARM_BOUNDARY - 1)
+ / PARM_BOUNDARY * PARM_BOUNDARY / BITS_PER_UNIT)
+ - GET_MODE_SIZE (passed_mode));
+ }
+ else
+ {
+ if (TREE_CODE (sizetree) != INTEGER_CST
+ || (TREE_INT_CST_LOW (sizetree) * BITS_PER_UNIT) % PARM_BOUNDARY)
+ {
+ /* Round the size up to multiple of PARM_BOUNDARY bits. */
+ tree s2 = round_up (sizetree, PARM_BOUNDARY / BITS_PER_UNIT);
+ /* Add it in. */
+ ADD_PARM_SIZE (*offset_ptr, s2);
+ SUB_PARM_SIZE (*offset_ptr, sizetree);
+ }
+ }
+}
+
+static tree
+round_down (value, divisor)
+ tree value;
+ int divisor;
+{
+ return size_binop (MULT_EXPR,
+ size_binop (FLOOR_DIV_EXPR, value, size_int (divisor)),
+ size_int (divisor));
+}
+
+/* Walk the tree of blocks describing the binding levels within a function
+ and warn about uninitialized variables.
+ This is done after calling flow_analysis and before global_alloc
+ clobbers the pseudo-regs to hard regs. */
+
+void
+uninitialized_vars_warning (block)
+ tree block;
+{
+ register tree decl, sub;
+ for (decl = BLOCK_VARS (block); decl; decl = TREE_CHAIN (decl))
+ {
+ if (TREE_CODE (decl) == VAR_DECL
+ /* These warnings are unreliable for and aggregates
+ because assigning the fields one by one can fail to convince
+ flow.c that the entire aggregate was initialized.
+ Unions are troublesome because members may be shorter. */
+ && ! AGGREGATE_TYPE_P (TREE_TYPE (decl))
+ && DECL_RTL (decl) != 0
+ && GET_CODE (DECL_RTL (decl)) == REG
+ && regno_uninitialized (REGNO (DECL_RTL (decl))))
+ warning_with_decl (decl,
+ "`%s' might be used uninitialized in this function");
+ if (TREE_CODE (decl) == VAR_DECL
+ && DECL_RTL (decl) != 0
+ && GET_CODE (DECL_RTL (decl)) == REG
+ && regno_clobbered_at_setjmp (REGNO (DECL_RTL (decl))))
+ warning_with_decl (decl,
+ "variable `%s' might be clobbered by `longjmp' or `vfork'");
+ }
+ for (sub = BLOCK_SUBBLOCKS (block); sub; sub = TREE_CHAIN (sub))
+ uninitialized_vars_warning (sub);
+}
+
+/* Do the appropriate part of uninitialized_vars_warning
+ but for arguments instead of local variables. */
+
+void
+setjmp_args_warning (block)
+ tree block;
+{
+ register tree decl;
+ for (decl = DECL_ARGUMENTS (current_function_decl);
+ decl; decl = TREE_CHAIN (decl))
+ if (DECL_RTL (decl) != 0
+ && GET_CODE (DECL_RTL (decl)) == REG
+ && regno_clobbered_at_setjmp (REGNO (DECL_RTL (decl))))
+ warning_with_decl (decl, "argument `%s' might be clobbered by `longjmp' or `vfork'");
+}
+
+/* If this function call setjmp, put all vars into the stack
+ unless they were declared `register'. */
+
+void
+setjmp_protect (block)
+ tree block;
+{
+ register tree decl, sub;
+ for (decl = BLOCK_VARS (block); decl; decl = TREE_CHAIN (decl))
+ if ((TREE_CODE (decl) == VAR_DECL
+ || TREE_CODE (decl) == PARM_DECL)
+ && DECL_RTL (decl) != 0
+ && GET_CODE (DECL_RTL (decl)) == REG
+ /* If this variable came from an inline function, it must be
+ that it's life doesn't overlap the setjmp. If there was a
+ setjmp in the function, it would already be in memory. We
+ must exclude such variable because their DECL_RTL might be
+ set to strange things such as virtual_stack_vars_rtx. */
+ && ! DECL_FROM_INLINE (decl)
+ && (
+#ifdef NON_SAVING_SETJMP
+ /* If longjmp doesn't restore the registers,
+ don't put anything in them. */
+ NON_SAVING_SETJMP
+ ||
+#endif
+ ! DECL_REGISTER (decl)))
+ put_var_into_stack (decl);
+ for (sub = BLOCK_SUBBLOCKS (block); sub; sub = TREE_CHAIN (sub))
+ setjmp_protect (sub);
+}
+
+/* Like the previous function, but for args instead of local variables. */
+
+void
+setjmp_protect_args ()
+{
+ register tree decl, sub;
+ for (decl = DECL_ARGUMENTS (current_function_decl);
+ decl; decl = TREE_CHAIN (decl))
+ if ((TREE_CODE (decl) == VAR_DECL
+ || TREE_CODE (decl) == PARM_DECL)
+ && DECL_RTL (decl) != 0
+ && GET_CODE (DECL_RTL (decl)) == REG
+ && (
+ /* If longjmp doesn't restore the registers,
+ don't put anything in them. */
+#ifdef NON_SAVING_SETJMP
+ NON_SAVING_SETJMP
+ ||
+#endif
+ ! DECL_REGISTER (decl)))
+ put_var_into_stack (decl);
+}
+
+/* Return the context-pointer register corresponding to DECL,
+ or 0 if it does not need one. */
+
+rtx
+lookup_static_chain (decl)
+ tree decl;
+{
+ tree context = decl_function_context (decl);
+ tree link;
+
+ if (context == 0)
+ return 0;
+
+ /* We treat inline_function_decl as an alias for the current function
+ because that is the inline function whose vars, types, etc.
+ are being merged into the current function.
+ See expand_inline_function. */
+ if (context == current_function_decl || context == inline_function_decl)
+ return virtual_stack_vars_rtx;
+
+ for (link = context_display; link; link = TREE_CHAIN (link))
+ if (TREE_PURPOSE (link) == context)
+ return RTL_EXPR_RTL (TREE_VALUE (link));
+
+ abort ();
+}
+
+/* Convert a stack slot address ADDR for variable VAR
+ (from a containing function)
+ into an address valid in this function (using a static chain). */
+
+rtx
+fix_lexical_addr (addr, var)
+ rtx addr;
+ tree var;
+{
+ rtx basereg;
+ int displacement;
+ tree context = decl_function_context (var);
+ struct function *fp;
+ rtx base = 0;
+
+ /* If this is the present function, we need not do anything. */
+ if (context == current_function_decl || context == inline_function_decl)
+ return addr;
+
+ for (fp = outer_function_chain; fp; fp = fp->next)
+ if (fp->decl == context)
+ break;
+
+ if (fp == 0)
+ abort ();
+
+ /* Decode given address as base reg plus displacement. */
+ if (GET_CODE (addr) == REG)
+ basereg = addr, displacement = 0;
+ else if (GET_CODE (addr) == PLUS && GET_CODE (XEXP (addr, 1)) == CONST_INT)
+ basereg = XEXP (addr, 0), displacement = INTVAL (XEXP (addr, 1));
+ else
+ abort ();
+
+ /* We accept vars reached via the containing function's
+ incoming arg pointer and via its stack variables pointer. */
+ if (basereg == fp->internal_arg_pointer)
+ {
+ /* If reached via arg pointer, get the arg pointer value
+ out of that function's stack frame.
+
+ There are two cases: If a separate ap is needed, allocate a
+ slot in the outer function for it and dereference it that way.
+ This is correct even if the real ap is actually a pseudo.
+ Otherwise, just adjust the offset from the frame pointer to
+ compensate. */
+
+#ifdef NEED_SEPARATE_AP
+ rtx addr;
+
+ if (fp->arg_pointer_save_area == 0)
+ fp->arg_pointer_save_area
+ = assign_outer_stack_local (Pmode, GET_MODE_SIZE (Pmode), 0, fp);
+
+ addr = fix_lexical_addr (XEXP (fp->arg_pointer_save_area, 0), var);
+ addr = memory_address (Pmode, addr);
+
+ base = copy_to_reg (gen_rtx (MEM, Pmode, addr));
+#else
+ displacement += (FIRST_PARM_OFFSET (context) - STARTING_FRAME_OFFSET);
+ base = lookup_static_chain (var);
+#endif
+ }
+
+ else if (basereg == virtual_stack_vars_rtx)
+ {
+ /* This is the same code as lookup_static_chain, duplicated here to
+ avoid an extra call to decl_function_context. */
+ tree link;
+
+ for (link = context_display; link; link = TREE_CHAIN (link))
+ if (TREE_PURPOSE (link) == context)
+ {
+ base = RTL_EXPR_RTL (TREE_VALUE (link));
+ break;
+ }
+ }
+
+ if (base == 0)
+ abort ();
+
+ /* Use same offset, relative to appropriate static chain or argument
+ pointer. */
+ return plus_constant (base, displacement);
+}
+
+/* Return the address of the trampoline for entering nested fn FUNCTION.
+ If necessary, allocate a trampoline (in the stack frame)
+ and emit rtl to initialize its contents (at entry to this function). */
+
+rtx
+trampoline_address (function)
+ tree function;
+{
+ tree link;
+ tree rtlexp;
+ rtx tramp;
+ struct function *fp;
+ tree fn_context;
+
+ /* Find an existing trampoline and return it. */
+ for (link = trampoline_list; link; link = TREE_CHAIN (link))
+ if (TREE_PURPOSE (link) == function)
+ return XEXP (RTL_EXPR_RTL (TREE_VALUE (link)), 0);
+ for (fp = outer_function_chain; fp; fp = fp->next)
+ for (link = fp->trampoline_list; link; link = TREE_CHAIN (link))
+ if (TREE_PURPOSE (link) == function)
+ {
+ tramp = fix_lexical_addr (XEXP (RTL_EXPR_RTL (TREE_VALUE (link)), 0),
+ function);
+ return round_trampoline_addr (tramp);
+ }
+
+ /* None exists; we must make one. */
+
+ /* Find the `struct function' for the function containing FUNCTION. */
+ fp = 0;
+ fn_context = decl_function_context (function);
+ if (fn_context != current_function_decl)
+ for (fp = outer_function_chain; fp; fp = fp->next)
+ if (fp->decl == fn_context)
+ break;
+
+ /* Allocate run-time space for this trampoline
+ (usually in the defining function's stack frame). */
+#ifdef ALLOCATE_TRAMPOLINE
+ tramp = ALLOCATE_TRAMPOLINE (fp);
+#else
+ /* If rounding needed, allocate extra space
+ to ensure we have TRAMPOLINE_SIZE bytes left after rounding up. */
+#ifdef TRAMPOLINE_ALIGNMENT
+#define TRAMPOLINE_REAL_SIZE (TRAMPOLINE_SIZE + TRAMPOLINE_ALIGNMENT - 1)
+#else
+#define TRAMPOLINE_REAL_SIZE (TRAMPOLINE_SIZE)
+#endif
+ if (fp != 0)
+ tramp = assign_outer_stack_local (BLKmode, TRAMPOLINE_REAL_SIZE, 0, fp);
+ else
+ tramp = assign_stack_local (BLKmode, TRAMPOLINE_REAL_SIZE, 0);
+#endif
+
+ /* Record the trampoline for reuse and note it for later initialization
+ by expand_function_end. */
+ if (fp != 0)
+ {
+ push_obstacks (fp->function_maybepermanent_obstack,
+ fp->function_maybepermanent_obstack);
+ rtlexp = make_node (RTL_EXPR);
+ RTL_EXPR_RTL (rtlexp) = tramp;
+ fp->trampoline_list = tree_cons (function, rtlexp, fp->trampoline_list);
+ pop_obstacks ();
+ }
+ else
+ {
+ /* Make the RTL_EXPR node temporary, not momentary, so that the
+ trampoline_list doesn't become garbage. */
+ int momentary = suspend_momentary ();
+ rtlexp = make_node (RTL_EXPR);
+ resume_momentary (momentary);
+
+ RTL_EXPR_RTL (rtlexp) = tramp;
+ trampoline_list = tree_cons (function, rtlexp, trampoline_list);
+ }
+
+ tramp = fix_lexical_addr (XEXP (tramp, 0), function);
+ return round_trampoline_addr (tramp);
+}
+
+/* Given a trampoline address,
+ round it to multiple of TRAMPOLINE_ALIGNMENT. */
+
+static rtx
+round_trampoline_addr (tramp)
+ rtx tramp;
+{
+#ifdef TRAMPOLINE_ALIGNMENT
+ /* Round address up to desired boundary. */
+ rtx temp = gen_reg_rtx (Pmode);
+ temp = expand_binop (Pmode, add_optab, tramp,
+ GEN_INT (TRAMPOLINE_ALIGNMENT - 1),
+ temp, 0, OPTAB_LIB_WIDEN);
+ tramp = expand_binop (Pmode, and_optab, temp,
+ GEN_INT (- TRAMPOLINE_ALIGNMENT),
+ temp, 0, OPTAB_LIB_WIDEN);
+#endif
+ return tramp;
+}
+
+/* The functions identify_blocks and reorder_blocks provide a way to
+ reorder the tree of BLOCK nodes, for optimizers that reshuffle or
+ duplicate portions of the RTL code. Call identify_blocks before
+ changing the RTL, and call reorder_blocks after. */
+
+/* Put all this function's BLOCK nodes into a vector, and return it.
+ Also store in each NOTE for the beginning or end of a block
+ the index of that block in the vector.
+ The arguments are TOP_BLOCK, the top-level block of the function,
+ and INSNS, the insn chain of the function. */
+
+tree *
+identify_blocks (top_block, insns)
+ tree top_block;
+ rtx insns;
+{
+ int n_blocks;
+ tree *block_vector;
+ int *block_stack;
+ int depth = 0;
+ int next_block_number = 0;
+ int current_block_number = 0;
+ rtx insn;
+
+ if (top_block == 0)
+ return 0;
+
+ n_blocks = all_blocks (top_block, 0);
+ block_vector = (tree *) xmalloc (n_blocks * sizeof (tree));
+ block_stack = (int *) alloca (n_blocks * sizeof (int));
+
+ all_blocks (top_block, block_vector);
+
+ for (insn = insns; insn; insn = NEXT_INSN (insn))
+ if (GET_CODE (insn) == NOTE)
+ {
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_BLOCK_BEG)
+ {
+ block_stack[depth++] = current_block_number;
+ current_block_number = next_block_number;
+ NOTE_BLOCK_NUMBER (insn) = next_block_number++;
+ }
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_BLOCK_END)
+ {
+ current_block_number = block_stack[--depth];
+ NOTE_BLOCK_NUMBER (insn) = current_block_number;
+ }
+ }
+
+ return block_vector;
+}
+
+/* Given BLOCK_VECTOR which was returned by identify_blocks,
+ and a revised instruction chain, rebuild the tree structure
+ of BLOCK nodes to correspond to the new order of RTL.
+ The new block tree is inserted below TOP_BLOCK.
+ Returns the current top-level block. */
+
+tree
+reorder_blocks (block_vector, top_block, insns)
+ tree *block_vector;
+ tree top_block;
+ rtx insns;
+{
+ tree current_block = top_block;
+ rtx insn;
+
+ if (block_vector == 0)
+ return top_block;
+
+ /* Prune the old tree away, so that it doesn't get in the way. */
+ BLOCK_SUBBLOCKS (current_block) = 0;
+
+ for (insn = insns; insn; insn = NEXT_INSN (insn))
+ if (GET_CODE (insn) == NOTE)
+ {
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_BLOCK_BEG)
+ {
+ tree block = block_vector[NOTE_BLOCK_NUMBER (insn)];
+ /* If we have seen this block before, copy it. */
+ if (TREE_ASM_WRITTEN (block))
+ block = copy_node (block);
+ BLOCK_SUBBLOCKS (block) = 0;
+ TREE_ASM_WRITTEN (block) = 1;
+ BLOCK_SUPERCONTEXT (block) = current_block;
+ BLOCK_CHAIN (block) = BLOCK_SUBBLOCKS (current_block);
+ BLOCK_SUBBLOCKS (current_block) = block;
+ current_block = block;
+ NOTE_SOURCE_FILE (insn) = 0;
+ }
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_BLOCK_END)
+ {
+ BLOCK_SUBBLOCKS (current_block)
+ = blocks_nreverse (BLOCK_SUBBLOCKS (current_block));
+ current_block = BLOCK_SUPERCONTEXT (current_block);
+ NOTE_SOURCE_FILE (insn) = 0;
+ }
+ }
+
+ return current_block;
+}
+
+/* Reverse the order of elements in the chain T of blocks,
+ and return the new head of the chain (old last element). */
+
+static tree
+blocks_nreverse (t)
+ tree t;
+{
+ register tree prev = 0, decl, next;
+ for (decl = t; decl; decl = next)
+ {
+ next = BLOCK_CHAIN (decl);
+ BLOCK_CHAIN (decl) = prev;
+ prev = decl;
+ }
+ return prev;
+}
+
+/* Count the subblocks of BLOCK, and list them all into the vector VECTOR.
+ Also clear TREE_ASM_WRITTEN in all blocks. */
+
+static int
+all_blocks (block, vector)
+ tree block;
+ tree *vector;
+{
+ int n_blocks = 1;
+ tree subblocks;
+
+ TREE_ASM_WRITTEN (block) = 0;
+ /* Record this block. */
+ if (vector)
+ vector[0] = block;
+
+ /* Record the subblocks, and their subblocks. */
+ for (subblocks = BLOCK_SUBBLOCKS (block);
+ subblocks; subblocks = BLOCK_CHAIN (subblocks))
+ n_blocks += all_blocks (subblocks, vector ? vector + n_blocks : 0);
+
+ return n_blocks;
+}
+
+/* Build bytecode call descriptor for function SUBR. */
+
+rtx
+bc_build_calldesc (subr)
+ tree subr;
+{
+ tree calldesc = 0, arg;
+ int nargs = 0;
+
+ /* Build the argument description vector in reverse order. */
+ DECL_ARGUMENTS (subr) = nreverse (DECL_ARGUMENTS (subr));
+ nargs = 0;
+
+ for (arg = DECL_ARGUMENTS (subr); arg; arg = TREE_CHAIN (arg))
+ {
+ ++nargs;
+
+ calldesc = tree_cons ((tree) 0, size_in_bytes (TREE_TYPE (arg)), calldesc);
+ calldesc = tree_cons ((tree) 0, bc_runtime_type_code (TREE_TYPE (arg)), calldesc);
+ }
+
+ DECL_ARGUMENTS (subr) = nreverse (DECL_ARGUMENTS (subr));
+
+ /* Prepend the function's return type. */
+ calldesc = tree_cons ((tree) 0,
+ size_in_bytes (TREE_TYPE (TREE_TYPE (subr))),
+ calldesc);
+
+ calldesc = tree_cons ((tree) 0,
+ bc_runtime_type_code (TREE_TYPE (TREE_TYPE (subr))),
+ calldesc);
+
+ /* Prepend the arg count. */
+ calldesc = tree_cons ((tree) 0, build_int_2 (nargs, 0), calldesc);
+
+ /* Output the call description vector and get its address. */
+ calldesc = build_nt (CONSTRUCTOR, (tree) 0, calldesc);
+ TREE_TYPE (calldesc) = build_array_type (integer_type_node,
+ build_index_type (build_int_2 (nargs * 2, 0)));
+
+ return output_constant_def (calldesc);
+}
+
+
+/* Generate RTL for the start of the function SUBR (a FUNCTION_DECL tree node)
+ and initialize static variables for generating RTL for the statements
+ of the function. */
+
+void
+init_function_start (subr, filename, line)
+ tree subr;
+ char *filename;
+ int line;
+{
+ char *junk;
+
+ if (output_bytecode)
+ {
+ this_function_decl = subr;
+ this_function_calldesc = bc_build_calldesc (subr);
+ local_vars_size = 0;
+ stack_depth = 0;
+ max_stack_depth = 0;
+ stmt_expr_depth = 0;
+ return;
+ }
+
+ init_stmt_for_function ();
+
+ cse_not_expected = ! optimize;
+
+ /* Caller save not needed yet. */
+ caller_save_needed = 0;
+
+ /* No stack slots have been made yet. */
+ stack_slot_list = 0;
+
+ /* There is no stack slot for handling nonlocal gotos. */
+ nonlocal_goto_handler_slot = 0;
+ nonlocal_goto_stack_level = 0;
+
+ /* No labels have been declared for nonlocal use. */
+ nonlocal_labels = 0;
+
+ /* No function calls so far in this function. */
+ function_call_count = 0;
+
+ /* No parm regs have been allocated.
+ (This is important for output_inline_function.) */
+ max_parm_reg = LAST_VIRTUAL_REGISTER + 1;
+
+ /* Initialize the RTL mechanism. */
+ init_emit ();
+
+ /* Initialize the queue of pending postincrement and postdecrements,
+ and some other info in expr.c. */
+ init_expr ();
+
+ /* We haven't done register allocation yet. */
+ reg_renumber = 0;
+
+ init_const_rtx_hash_table ();
+
+ current_function_name = (*decl_printable_name) (subr, &junk);
+
+ /* Nonzero if this is a nested function that uses a static chain. */
+
+ current_function_needs_context
+ = (decl_function_context (current_function_decl) != 0);
+
+ /* Set if a call to setjmp is seen. */
+ current_function_calls_setjmp = 0;
+
+ /* Set if a call to longjmp is seen. */
+ current_function_calls_longjmp = 0;
+
+ current_function_calls_alloca = 0;
+ current_function_has_nonlocal_label = 0;
+ current_function_has_nonlocal_goto = 0;
+ current_function_contains_functions = 0;
+
+ current_function_returns_pcc_struct = 0;
+ current_function_returns_struct = 0;
+ current_function_epilogue_delay_list = 0;
+ current_function_uses_const_pool = 0;
+ current_function_uses_pic_offset_table = 0;
+
+ /* We have not yet needed to make a label to jump to for tail-recursion. */
+ tail_recursion_label = 0;
+
+ /* We haven't had a need to make a save area for ap yet. */
+
+ arg_pointer_save_area = 0;
+
+ /* No stack slots allocated yet. */
+ frame_offset = 0;
+
+ /* No SAVE_EXPRs in this function yet. */
+ save_expr_regs = 0;
+
+ /* No RTL_EXPRs in this function yet. */
+ rtl_expr_chain = 0;
+
+ /* We have not allocated any temporaries yet. */
+ temp_slots = 0;
+ temp_slot_level = 0;
+ target_temp_slot_level = 0;
+
+ /* Within function body, compute a type's size as soon it is laid out. */
+ immediate_size_expand++;
+
+ /* We haven't made any trampolines for this function yet. */
+ trampoline_list = 0;
+
+ init_pending_stack_adjust ();
+ inhibit_defer_pop = 0;
+
+ current_function_outgoing_args_size = 0;
+
+ /* Initialize the insn lengths. */
+ init_insn_lengths ();
+
+ /* Prevent ever trying to delete the first instruction of a function.
+ Also tell final how to output a linenum before the function prologue. */
+ emit_line_note (filename, line);
+
+ /* Make sure first insn is a note even if we don't want linenums.
+ This makes sure the first insn will never be deleted.
+ Also, final expects a note to appear there. */
+ emit_note (NULL_PTR, NOTE_INSN_DELETED);
+
+ /* Set flags used by final.c. */
+ if (aggregate_value_p (DECL_RESULT (subr)))
+ {
+#ifdef PCC_STATIC_STRUCT_RETURN
+ current_function_returns_pcc_struct = 1;
+#endif
+ current_function_returns_struct = 1;
+ }
+
+ /* Warn if this value is an aggregate type,
+ regardless of which calling convention we are using for it. */
+ if (warn_aggregate_return
+ && AGGREGATE_TYPE_P (TREE_TYPE (DECL_RESULT (subr))))
+ warning ("function returns an aggregate");
+
+ current_function_returns_pointer
+ = (TREE_CODE (TREE_TYPE (DECL_RESULT (subr))) == POINTER_TYPE);
+
+ /* Indicate that we need to distinguish between the return value of the
+ present function and the return value of a function being called. */
+ rtx_equal_function_value_matters = 1;
+
+ /* Indicate that we have not instantiated virtual registers yet. */
+ virtuals_instantiated = 0;
+
+ /* Indicate we have no need of a frame pointer yet. */
+ frame_pointer_needed = 0;
+
+ /* By default assume not varargs. */
+ current_function_varargs = 0;
+}
+
+/* Indicate that the current function uses extra args
+ not explicitly mentioned in the argument list in any fashion. */
+
+void
+mark_varargs ()
+{
+ current_function_varargs = 1;
+}
+
+/* Expand a call to __main at the beginning of a possible main function. */
+
+void
+expand_main_function ()
+{
+ if (!output_bytecode)
+ {
+ /* The zero below avoids a possible parse error */
+ 0;
+#if !defined (INIT_SECTION_ASM_OP) || defined (INVOKE__main)
+ emit_library_call (gen_rtx (SYMBOL_REF, Pmode, NAME__MAIN), 0,
+ VOIDmode, 0);
+#endif /* not INIT_SECTION_ASM_OP or INVOKE__main */
+ }
+}
+
+extern struct obstack permanent_obstack;
+
+/* Expand start of bytecode function. See comment at
+ expand_function_start below for details. */
+
+void
+bc_expand_function_start (subr, parms_have_cleanups)
+ tree subr;
+ int parms_have_cleanups;
+{
+ char label[20], *name;
+ static int nlab;
+ tree thisarg;
+ int argsz;
+
+ if (TREE_PUBLIC (subr))
+ bc_globalize_label (IDENTIFIER_POINTER (DECL_NAME (subr)));
+
+#ifdef DEBUG_PRINT_CODE
+ fprintf (stderr, "\n<func %s>\n", IDENTIFIER_POINTER (DECL_NAME (subr)));
+#endif
+
+ for (argsz = 0, thisarg = DECL_ARGUMENTS (subr); thisarg; thisarg = TREE_CHAIN (thisarg))
+ {
+ if (DECL_RTL (thisarg))
+ abort (); /* Should be NULL here I think. */
+ else if (TREE_CONSTANT (DECL_SIZE (thisarg)))
+ {
+ DECL_RTL (thisarg) = bc_gen_rtx ((char *) 0, argsz, (struct bc_label *) 0);
+ argsz += TREE_INT_CST_LOW (DECL_SIZE (thisarg));
+ }
+ else
+ {
+ /* Variable-sized objects are pointers to their storage. */
+ DECL_RTL (thisarg) = bc_gen_rtx ((char *) 0, argsz, (struct bc_label *) 0);
+ argsz += POINTER_SIZE;
+ }
+ }
+
+ bc_begin_function (bc_xstrdup (IDENTIFIER_POINTER (DECL_NAME (subr))));
+
+ ASM_GENERATE_INTERNAL_LABEL (label, "LX", nlab);
+
+ ++nlab;
+ name = (char *) obstack_copy0 (&permanent_obstack, label, strlen (label));
+ this_function_callinfo = bc_gen_rtx (name, 0, (struct bc_label *) 0);
+ this_function_bytecode =
+ bc_emit_trampoline (BYTECODE_LABEL (this_function_callinfo));
+}
+
+
+/* Expand end of bytecode function. See details the comment of
+ expand_function_end(), below. */
+
+void
+bc_expand_function_end ()
+{
+ char *ptrconsts;
+
+ expand_null_return ();
+
+ /* Emit any fixup code. This must be done before the call to
+ to BC_END_FUNCTION (), since that will cause the bytecode
+ segment to be finished off and closed. */
+
+ expand_fixups (NULL_RTX);
+
+ ptrconsts = bc_end_function ();
+
+ bc_align_const (2 /* INT_ALIGN */);
+
+ /* If this changes also make sure to change bc-interp.h! */
+
+ bc_emit_const_labeldef (BYTECODE_LABEL (this_function_callinfo));
+ bc_emit_const ((char *) &max_stack_depth, sizeof max_stack_depth);
+ bc_emit_const ((char *) &local_vars_size, sizeof local_vars_size);
+ bc_emit_const_labelref (this_function_bytecode, 0);
+ bc_emit_const_labelref (ptrconsts, 0);
+ bc_emit_const_labelref (BYTECODE_LABEL (this_function_calldesc), 0);
+}
+
+
+/* Start the RTL for a new function, and set variables used for
+ emitting RTL.
+ SUBR is the FUNCTION_DECL node.
+ PARMS_HAVE_CLEANUPS is nonzero if there are cleanups associated with
+ the function's parameters, which must be run at any return statement. */
+
+void
+expand_function_start (subr, parms_have_cleanups)
+ tree subr;
+ int parms_have_cleanups;
+{
+ register int i;
+ tree tem;
+ rtx last_ptr;
+
+ if (output_bytecode)
+ {
+ bc_expand_function_start (subr, parms_have_cleanups);
+ return;
+ }
+
+ /* Make sure volatile mem refs aren't considered
+ valid operands of arithmetic insns. */
+ init_recog_no_volatile ();
+
+ /* If function gets a static chain arg, store it in the stack frame.
+ Do this first, so it gets the first stack slot offset. */
+ if (current_function_needs_context)
+ {
+ last_ptr = assign_stack_local (Pmode, GET_MODE_SIZE (Pmode), 0);
+
+#ifdef SMALL_REGISTER_CLASSES
+ /* Delay copying static chain if it is not a register to avoid
+ conflicts with regs used for parameters. */
+ if (GET_CODE (static_chain_incoming_rtx) == REG)
+#endif
+ emit_move_insn (last_ptr, static_chain_incoming_rtx);
+ }
+
+ /* If the parameters of this function need cleaning up, get a label
+ for the beginning of the code which executes those cleanups. This must
+ be done before doing anything with return_label. */
+ if (parms_have_cleanups)
+ cleanup_label = gen_label_rtx ();
+ else
+ cleanup_label = 0;
+
+ /* Make the label for return statements to jump to, if this machine
+ does not have a one-instruction return and uses an epilogue,
+ or if it returns a structure, or if it has parm cleanups. */
+#ifdef HAVE_return
+ if (cleanup_label == 0 && HAVE_return
+ && ! current_function_returns_pcc_struct
+ && ! (current_function_returns_struct && ! optimize))
+ return_label = 0;
+ else
+ return_label = gen_label_rtx ();
+#else
+ return_label = gen_label_rtx ();
+#endif
+
+ /* Initialize rtx used to return the value. */
+ /* Do this before assign_parms so that we copy the struct value address
+ before any library calls that assign parms might generate. */
+
+ /* Decide whether to return the value in memory or in a register. */
+ if (aggregate_value_p (DECL_RESULT (subr)))
+ {
+ /* Returning something that won't go in a register. */
+ register rtx value_address;
+
+#ifdef PCC_STATIC_STRUCT_RETURN
+ if (current_function_returns_pcc_struct)
+ {
+ int size = int_size_in_bytes (TREE_TYPE (DECL_RESULT (subr)));
+ value_address = assemble_static_space (size);
+ }
+ else
+#endif
+ {
+ /* Expect to be passed the address of a place to store the value.
+ If it is passed as an argument, assign_parms will take care of
+ it. */
+ if (struct_value_incoming_rtx)
+ {
+ value_address = gen_reg_rtx (Pmode);
+ emit_move_insn (value_address, struct_value_incoming_rtx);
+ }
+ }
+ if (value_address)
+ {
+ DECL_RTL (DECL_RESULT (subr))
+ = gen_rtx (MEM, DECL_MODE (DECL_RESULT (subr)), value_address);
+ MEM_IN_STRUCT_P (DECL_RTL (DECL_RESULT (subr)))
+ = AGGREGATE_TYPE_P (TREE_TYPE (DECL_RESULT (subr)));
+ }
+ }
+ else if (DECL_MODE (DECL_RESULT (subr)) == VOIDmode)
+ /* If return mode is void, this decl rtl should not be used. */
+ DECL_RTL (DECL_RESULT (subr)) = 0;
+ else if (parms_have_cleanups)
+ {
+ /* If function will end with cleanup code for parms,
+ compute the return values into a pseudo reg,
+ which we will copy into the true return register
+ after the cleanups are done. */
+
+ enum machine_mode mode = DECL_MODE (DECL_RESULT (subr));
+
+#ifdef PROMOTE_FUNCTION_RETURN
+ tree type = TREE_TYPE (DECL_RESULT (subr));
+ int unsignedp = TREE_UNSIGNED (type);
+
+ mode = promote_mode (type, mode, &unsignedp, 1);
+#endif
+
+ DECL_RTL (DECL_RESULT (subr)) = gen_reg_rtx (mode);
+ }
+ else
+ /* Scalar, returned in a register. */
+ {
+#ifdef FUNCTION_OUTGOING_VALUE
+ DECL_RTL (DECL_RESULT (subr))
+ = FUNCTION_OUTGOING_VALUE (TREE_TYPE (DECL_RESULT (subr)), subr);
+#else
+ DECL_RTL (DECL_RESULT (subr))
+ = FUNCTION_VALUE (TREE_TYPE (DECL_RESULT (subr)), subr);
+#endif
+
+ /* Mark this reg as the function's return value. */
+ if (GET_CODE (DECL_RTL (DECL_RESULT (subr))) == REG)
+ {
+ REG_FUNCTION_VALUE_P (DECL_RTL (DECL_RESULT (subr))) = 1;
+ /* Needed because we may need to move this to memory
+ in case it's a named return value whose address is taken. */
+ DECL_REGISTER (DECL_RESULT (subr)) = 1;
+ }
+ }
+
+ /* Initialize rtx for parameters and local variables.
+ In some cases this requires emitting insns. */
+
+ assign_parms (subr, 0);
+
+#ifdef SMALL_REGISTER_CLASSES
+ /* Copy the static chain now if it wasn't a register. The delay is to
+ avoid conflicts with the parameter passing registers. */
+
+ if (current_function_needs_context)
+ if (GET_CODE (static_chain_incoming_rtx) != REG)
+ emit_move_insn (last_ptr, static_chain_incoming_rtx);
+#endif
+
+ /* The following was moved from init_function_start.
+ The move is supposed to make sdb output more accurate. */
+ /* Indicate the beginning of the function body,
+ as opposed to parm setup. */
+ emit_note (NULL_PTR, NOTE_INSN_FUNCTION_BEG);
+
+ /* If doing stupid allocation, mark parms as born here. */
+
+ if (GET_CODE (get_last_insn ()) != NOTE)
+ emit_note (NULL_PTR, NOTE_INSN_DELETED);
+ parm_birth_insn = get_last_insn ();
+
+ if (obey_regdecls)
+ {
+ for (i = LAST_VIRTUAL_REGISTER + 1; i < max_parm_reg; i++)
+ use_variable (regno_reg_rtx[i]);
+
+ if (current_function_internal_arg_pointer != virtual_incoming_args_rtx)
+ use_variable (current_function_internal_arg_pointer);
+ }
+
+ /* Fetch static chain values for containing functions. */
+ tem = decl_function_context (current_function_decl);
+ /* If not doing stupid register allocation, then start off with the static
+ chain pointer in a pseudo register. Otherwise, we use the stack
+ address that was generated above. */
+ if (tem && ! obey_regdecls)
+ last_ptr = copy_to_reg (static_chain_incoming_rtx);
+ context_display = 0;
+ while (tem)
+ {
+ tree rtlexp = make_node (RTL_EXPR);
+
+ RTL_EXPR_RTL (rtlexp) = last_ptr;
+ context_display = tree_cons (tem, rtlexp, context_display);
+ tem = decl_function_context (tem);
+ if (tem == 0)
+ break;
+ /* Chain thru stack frames, assuming pointer to next lexical frame
+ is found at the place we always store it. */
+#ifdef FRAME_GROWS_DOWNWARD
+ last_ptr = plus_constant (last_ptr, - GET_MODE_SIZE (Pmode));
+#endif
+ last_ptr = copy_to_reg (gen_rtx (MEM, Pmode,
+ memory_address (Pmode, last_ptr)));
+
+ /* If we are not optimizing, ensure that we know that this
+ piece of context is live over the entire function. */
+ if (! optimize)
+ save_expr_regs = gen_rtx (EXPR_LIST, VOIDmode, last_ptr,
+ save_expr_regs);
+ }
+
+ /* After the display initializations is where the tail-recursion label
+ should go, if we end up needing one. Ensure we have a NOTE here
+ since some things (like trampolines) get placed before this. */
+ tail_recursion_reentry = emit_note (NULL_PTR, NOTE_INSN_DELETED);
+
+ /* Evaluate now the sizes of any types declared among the arguments. */
+ for (tem = nreverse (get_pending_sizes ()); tem; tem = TREE_CHAIN (tem))
+ expand_expr (TREE_VALUE (tem), const0_rtx, VOIDmode, 0);
+
+ /* Make sure there is a line number after the function entry setup code. */
+ force_next_line_note ();
+}
+
+/* Generate RTL for the end of the current function.
+ FILENAME and LINE are the current position in the source file.
+
+ It is up to language-specific callers to do cleanups for parameters--
+ or else, supply 1 for END_BINDINGS and we will call expand_end_bindings. */
+
+void
+expand_function_end (filename, line, end_bindings)
+ char *filename;
+ int line;
+ int end_bindings;
+{
+ register int i;
+ tree link;
+
+ static rtx initial_trampoline;
+
+ if (output_bytecode)
+ {
+ bc_expand_function_end ();
+ return;
+ }
+
+#ifdef NON_SAVING_SETJMP
+ /* Don't put any variables in registers if we call setjmp
+ on a machine that fails to restore the registers. */
+ if (NON_SAVING_SETJMP && current_function_calls_setjmp)
+ {
+ setjmp_protect (DECL_INITIAL (current_function_decl));
+ setjmp_protect_args ();
+ }
+#endif
+
+ /* Save the argument pointer if a save area was made for it. */
+ if (arg_pointer_save_area)
+ {
+ rtx x = gen_move_insn (arg_pointer_save_area, virtual_incoming_args_rtx);
+ emit_insn_before (x, tail_recursion_reentry);
+ }
+
+ /* Initialize any trampolines required by this function. */
+ for (link = trampoline_list; link; link = TREE_CHAIN (link))
+ {
+ tree function = TREE_PURPOSE (link);
+ rtx context = lookup_static_chain (function);
+ rtx tramp = RTL_EXPR_RTL (TREE_VALUE (link));
+ rtx seq;
+
+ /* First make sure this compilation has a template for
+ initializing trampolines. */
+ if (initial_trampoline == 0)
+ {
+ end_temporary_allocation ();
+ initial_trampoline
+ = gen_rtx (MEM, BLKmode, assemble_trampoline_template ());
+ resume_temporary_allocation ();
+ }
+
+ /* Generate insns to initialize the trampoline. */
+ start_sequence ();
+ tramp = change_address (initial_trampoline, BLKmode,
+ round_trampoline_addr (XEXP (tramp, 0)));
+ emit_block_move (tramp, initial_trampoline, GEN_INT (TRAMPOLINE_SIZE),
+ FUNCTION_BOUNDARY / BITS_PER_UNIT);
+ INITIALIZE_TRAMPOLINE (XEXP (tramp, 0),
+ XEXP (DECL_RTL (function), 0), context);
+ seq = get_insns ();
+ end_sequence ();
+
+ /* Put those insns at entry to the containing function (this one). */
+ emit_insns_before (seq, tail_recursion_reentry);
+ }
+
+#if 0 /* I think unused parms are legitimate enough. */
+ /* Warn about unused parms. */
+ if (warn_unused)
+ {
+ rtx decl;
+
+ for (decl = DECL_ARGUMENTS (current_function_decl);
+ decl; decl = TREE_CHAIN (decl))
+ if (! TREE_USED (decl) && TREE_CODE (decl) == VAR_DECL)
+ warning_with_decl (decl, "unused parameter `%s'");
+ }
+#endif
+
+ /* Delete handlers for nonlocal gotos if nothing uses them. */
+ if (nonlocal_goto_handler_slot != 0 && !current_function_has_nonlocal_label)
+ delete_handlers ();
+
+ /* End any sequences that failed to be closed due to syntax errors. */
+ while (in_sequence_p ())
+ end_sequence ();
+
+ /* Outside function body, can't compute type's actual size
+ until next function's body starts. */
+ immediate_size_expand--;
+
+ /* If doing stupid register allocation,
+ mark register parms as dying here. */
+
+ if (obey_regdecls)
+ {
+ rtx tem;
+ for (i = LAST_VIRTUAL_REGISTER + 1; i < max_parm_reg; i++)
+ use_variable (regno_reg_rtx[i]);
+
+ /* Likewise for the regs of all the SAVE_EXPRs in the function. */
+
+ for (tem = save_expr_regs; tem; tem = XEXP (tem, 1))
+ {
+ use_variable (XEXP (tem, 0));
+ use_variable_after (XEXP (tem, 0), parm_birth_insn);
+ }
+
+ if (current_function_internal_arg_pointer != virtual_incoming_args_rtx)
+ use_variable (current_function_internal_arg_pointer);
+ }
+
+ clear_pending_stack_adjust ();
+ do_pending_stack_adjust ();
+
+ /* Mark the end of the function body.
+ If control reaches this insn, the function can drop through
+ without returning a value. */
+ emit_note (NULL_PTR, NOTE_INSN_FUNCTION_END);
+
+ /* Output a linenumber for the end of the function.
+ SDB depends on this. */
+ emit_line_note_force (filename, line);
+
+ /* Output the label for the actual return from the function,
+ if one is expected. This happens either because a function epilogue
+ is used instead of a return instruction, or because a return was done
+ with a goto in order to run local cleanups, or because of pcc-style
+ structure returning. */
+
+ if (return_label)
+ emit_label (return_label);
+
+ /* C++ uses this. */
+ if (end_bindings)
+ expand_end_bindings (0, 0, 0);
+
+ /* If we had calls to alloca, and this machine needs
+ an accurate stack pointer to exit the function,
+ insert some code to save and restore the stack pointer. */
+#ifdef EXIT_IGNORE_STACK
+ if (! EXIT_IGNORE_STACK)
+#endif
+ if (current_function_calls_alloca)
+ {
+ rtx tem = 0;
+
+ emit_stack_save (SAVE_FUNCTION, &tem, parm_birth_insn);
+ emit_stack_restore (SAVE_FUNCTION, tem, NULL_RTX);
+ }
+
+ /* If scalar return value was computed in a pseudo-reg,
+ copy that to the hard return register. */
+ if (DECL_RTL (DECL_RESULT (current_function_decl)) != 0
+ && GET_CODE (DECL_RTL (DECL_RESULT (current_function_decl))) == REG
+ && (REGNO (DECL_RTL (DECL_RESULT (current_function_decl)))
+ >= FIRST_PSEUDO_REGISTER))
+ {
+ rtx real_decl_result;
+
+#ifdef FUNCTION_OUTGOING_VALUE
+ real_decl_result
+ = FUNCTION_OUTGOING_VALUE (TREE_TYPE (DECL_RESULT (current_function_decl)),
+ current_function_decl);
+#else
+ real_decl_result
+ = FUNCTION_VALUE (TREE_TYPE (DECL_RESULT (current_function_decl)),
+ current_function_decl);
+#endif
+ REG_FUNCTION_VALUE_P (real_decl_result) = 1;
+ emit_move_insn (real_decl_result,
+ DECL_RTL (DECL_RESULT (current_function_decl)));
+ emit_insn (gen_rtx (USE, VOIDmode, real_decl_result));
+ }
+
+ /* If returning a structure, arrange to return the address of the value
+ in a place where debuggers expect to find it.
+
+ If returning a structure PCC style,
+ the caller also depends on this value.
+ And current_function_returns_pcc_struct is not necessarily set. */
+ if (current_function_returns_struct
+ || current_function_returns_pcc_struct)
+ {
+ rtx value_address = XEXP (DECL_RTL (DECL_RESULT (current_function_decl)), 0);
+ tree type = TREE_TYPE (DECL_RESULT (current_function_decl));
+#ifdef FUNCTION_OUTGOING_VALUE
+ rtx outgoing
+ = FUNCTION_OUTGOING_VALUE (build_pointer_type (type),
+ current_function_decl);
+#else
+ rtx outgoing
+ = FUNCTION_VALUE (build_pointer_type (type),
+ current_function_decl);
+#endif
+
+ /* Mark this as a function return value so integrate will delete the
+ assignment and USE below when inlining this function. */
+ REG_FUNCTION_VALUE_P (outgoing) = 1;
+
+ emit_move_insn (outgoing, value_address);
+ use_variable (outgoing);
+ }
+
+ /* Output a return insn if we are using one.
+ Otherwise, let the rtl chain end here, to drop through
+ into the epilogue. */
+
+#ifdef HAVE_return
+ if (HAVE_return)
+ {
+ emit_jump_insn (gen_return ());
+ emit_barrier ();
+ }
+#endif
+
+ /* Fix up any gotos that jumped out to the outermost
+ binding level of the function.
+ Must follow emitting RETURN_LABEL. */
+
+ /* If you have any cleanups to do at this point,
+ and they need to create temporary variables,
+ then you will lose. */
+ expand_fixups (get_insns ());
+}
+
+/* These arrays record the INSN_UIDs of the prologue and epilogue insns. */
+
+static int *prologue;
+static int *epilogue;
+
+/* Create an array that records the INSN_UIDs of INSNS (either a sequence
+ or a single insn). */
+
+static int *
+record_insns (insns)
+ rtx insns;
+{
+ int *vec;
+
+ if (GET_CODE (insns) == SEQUENCE)
+ {
+ int len = XVECLEN (insns, 0);
+ vec = (int *) oballoc ((len + 1) * sizeof (int));
+ vec[len] = 0;
+ while (--len >= 0)
+ vec[len] = INSN_UID (XVECEXP (insns, 0, len));
+ }
+ else
+ {
+ vec = (int *) oballoc (2 * sizeof (int));
+ vec[0] = INSN_UID (insns);
+ vec[1] = 0;
+ }
+ return vec;
+}
+
+/* Determine how many INSN_UIDs in VEC are part of INSN. */
+
+static int
+contains (insn, vec)
+ rtx insn;
+ int *vec;
+{
+ register int i, j;
+
+ if (GET_CODE (insn) == INSN
+ && GET_CODE (PATTERN (insn)) == SEQUENCE)
+ {
+ int count = 0;
+ for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
+ for (j = 0; vec[j]; j++)
+ if (INSN_UID (XVECEXP (PATTERN (insn), 0, i)) == vec[j])
+ count++;
+ return count;
+ }
+ else
+ {
+ for (j = 0; vec[j]; j++)
+ if (INSN_UID (insn) == vec[j])
+ return 1;
+ }
+ return 0;
+}
+
+/* Generate the prologe and epilogue RTL if the machine supports it. Thread
+ this into place with notes indicating where the prologue ends and where
+ the epilogue begins. Update the basic block information when possible. */
+
+void
+thread_prologue_and_epilogue_insns (f)
+ rtx f;
+{
+#ifdef HAVE_prologue
+ if (HAVE_prologue)
+ {
+ rtx head, seq, insn;
+
+ /* The first insn (a NOTE_INSN_DELETED) is followed by zero or more
+ prologue insns and a NOTE_INSN_PROLOGUE_END. */
+ emit_note_after (NOTE_INSN_PROLOGUE_END, f);
+ seq = gen_prologue ();
+ head = emit_insn_after (seq, f);
+
+ /* Include the new prologue insns in the first block. Ignore them
+ if they form a basic block unto themselves. */
+ if (basic_block_head && n_basic_blocks
+ && GET_CODE (basic_block_head[0]) != CODE_LABEL)
+ basic_block_head[0] = NEXT_INSN (f);
+
+ /* Retain a map of the prologue insns. */
+ prologue = record_insns (GET_CODE (seq) == SEQUENCE ? seq : head);
+ }
+ else
+#endif
+ prologue = 0;
+
+#ifdef HAVE_epilogue
+ if (HAVE_epilogue)
+ {
+ rtx insn = get_last_insn ();
+ rtx prev = prev_nonnote_insn (insn);
+
+ /* If we end with a BARRIER, we don't need an epilogue. */
+ if (! (prev && GET_CODE (prev) == BARRIER))
+ {
+ rtx tail, seq, tem;
+ rtx first_use = 0;
+ rtx last_use = 0;
+
+ /* The last basic block ends with a NOTE_INSN_EPILOGUE_BEG, the
+ epilogue insns, the USE insns at the end of a function,
+ the jump insn that returns, and then a BARRIER. */
+
+ /* Move the USE insns at the end of a function onto a list. */
+ while (prev
+ && GET_CODE (prev) == INSN
+ && GET_CODE (PATTERN (prev)) == USE)
+ {
+ tem = prev;
+ prev = prev_nonnote_insn (prev);
+
+ NEXT_INSN (PREV_INSN (tem)) = NEXT_INSN (tem);
+ PREV_INSN (NEXT_INSN (tem)) = PREV_INSN (tem);
+ if (first_use)
+ {
+ NEXT_INSN (tem) = first_use;
+ PREV_INSN (first_use) = tem;
+ }
+ first_use = tem;
+ if (!last_use)
+ last_use = tem;
+ }
+
+ emit_barrier_after (insn);
+
+ seq = gen_epilogue ();
+ tail = emit_jump_insn_after (seq, insn);
+
+ /* Insert the USE insns immediately before the return insn, which
+ must be the first instruction before the final barrier. */
+ if (first_use)
+ {
+ tem = prev_nonnote_insn (get_last_insn ());
+ NEXT_INSN (PREV_INSN (tem)) = first_use;
+ PREV_INSN (first_use) = PREV_INSN (tem);
+ PREV_INSN (tem) = last_use;
+ NEXT_INSN (last_use) = tem;
+ }
+
+ emit_note_after (NOTE_INSN_EPILOGUE_BEG, insn);
+
+ /* Include the new epilogue insns in the last block. Ignore
+ them if they form a basic block unto themselves. */
+ if (basic_block_end && n_basic_blocks
+ && GET_CODE (basic_block_end[n_basic_blocks - 1]) != JUMP_INSN)
+ basic_block_end[n_basic_blocks - 1] = tail;
+
+ /* Retain a map of the epilogue insns. */
+ epilogue = record_insns (GET_CODE (seq) == SEQUENCE ? seq : tail);
+ return;
+ }
+ }
+#endif
+ epilogue = 0;
+}
+
+/* Reposition the prologue-end and epilogue-begin notes after instruction
+ scheduling and delayed branch scheduling. */
+
+void
+reposition_prologue_and_epilogue_notes (f)
+ rtx f;
+{
+#if defined (HAVE_prologue) || defined (HAVE_epilogue)
+ /* Reposition the prologue and epilogue notes. */
+ if (n_basic_blocks)
+ {
+ rtx next, prev;
+ int len;
+
+ if (prologue)
+ {
+ register rtx insn, note = 0;
+
+ /* Scan from the beginning until we reach the last prologue insn.
+ We apparently can't depend on basic_block_{head,end} after
+ reorg has run. */
+ for (len = 0; prologue[len]; len++)
+ ;
+ for (insn = f; len && insn; insn = NEXT_INSN (insn))
+ {
+ if (GET_CODE (insn) == NOTE)
+ {
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_PROLOGUE_END)
+ note = insn;
+ }
+ else if ((len -= contains (insn, prologue)) == 0)
+ {
+ /* Find the prologue-end note if we haven't already, and
+ move it to just after the last prologue insn. */
+ if (note == 0)
+ {
+ for (note = insn; note = NEXT_INSN (note);)
+ if (GET_CODE (note) == NOTE
+ && NOTE_LINE_NUMBER (note) == NOTE_INSN_PROLOGUE_END)
+ break;
+ }
+ next = NEXT_INSN (note);
+ prev = PREV_INSN (note);
+ if (prev)
+ NEXT_INSN (prev) = next;
+ if (next)
+ PREV_INSN (next) = prev;
+ add_insn_after (note, insn);
+ }
+ }
+ }
+
+ if (epilogue)
+ {
+ register rtx insn, note = 0;
+
+ /* Scan from the end until we reach the first epilogue insn.
+ We apparently can't depend on basic_block_{head,end} after
+ reorg has run. */
+ for (len = 0; epilogue[len]; len++)
+ ;
+ for (insn = get_last_insn (); len && insn; insn = PREV_INSN (insn))
+ {
+ if (GET_CODE (insn) == NOTE)
+ {
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_EPILOGUE_BEG)
+ note = insn;
+ }
+ else if ((len -= contains (insn, epilogue)) == 0)
+ {
+ /* Find the epilogue-begin note if we haven't already, and
+ move it to just before the first epilogue insn. */
+ if (note == 0)
+ {
+ for (note = insn; note = PREV_INSN (note);)
+ if (GET_CODE (note) == NOTE
+ && NOTE_LINE_NUMBER (note) == NOTE_INSN_EPILOGUE_BEG)
+ break;
+ }
+ next = NEXT_INSN (note);
+ prev = PREV_INSN (note);
+ if (prev)
+ NEXT_INSN (prev) = next;
+ if (next)
+ PREV_INSN (next) = prev;
+ add_insn_after (note, PREV_INSN (insn));
+ }
+ }
+ }
+ }
+#endif /* HAVE_prologue or HAVE_epilogue */
+}
diff --git a/gnu/usr.bin/cc/cc_int/getpwd.c b/gnu/usr.bin/cc/cc_int/getpwd.c
new file mode 100644
index 0000000..922a9ed
--- /dev/null
+++ b/gnu/usr.bin/cc/cc_int/getpwd.c
@@ -0,0 +1,94 @@
+/* getpwd.c - get the working directory */
+
+#include "config.h"
+
+#include <errno.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#ifndef errno
+extern int errno;
+#endif
+
+/* Virtually every UN*X system now in common use (except for pre-4.3-tahoe
+ BSD systems) now provides getcwd as called for by POSIX. Allow for
+ the few exceptions to the general rule here. */
+
+#if !(defined (POSIX) || defined (USG) || defined (VMS))
+#include <sys/param.h>
+extern char *getwd ();
+#define getcwd(buf,len) getwd(buf)
+#define GUESSPATHLEN (MAXPATHLEN + 1)
+#else /* (defined (USG) || defined (VMS)) */
+extern char *getcwd ();
+/* We actually use this as a starting point, not a limit. */
+#define GUESSPATHLEN 100
+#endif /* (defined (USG) || defined (VMS)) */
+
+char *getenv ();
+char *xmalloc ();
+
+#ifndef VMS
+
+/* Get the working directory. Use the PWD environment variable if it's
+ set correctly, since this is faster and gives more uniform answers
+ to the user. Yield the working directory if successful; otherwise,
+ yield 0 and set errno. */
+
+char *
+getpwd ()
+{
+ static char *pwd;
+ static int failure_errno;
+
+ char *p = pwd;
+ size_t s;
+ struct stat dotstat, pwdstat;
+
+ if (!p && !(errno = failure_errno))
+ {
+ if (! ((p = getenv ("PWD")) != 0
+ && *p == '/'
+ && stat (p, &pwdstat) == 0
+ && stat (".", &dotstat) == 0
+ && dotstat.st_ino == pwdstat.st_ino
+ && dotstat.st_dev == pwdstat.st_dev))
+
+ /* The shortcut didn't work. Try the slow, ``sure'' way. */
+ for (s = GUESSPATHLEN; ! getcwd (p = xmalloc (s), s); s *= 2)
+ {
+ int e = errno;
+ free (p);
+#ifdef ERANGE
+ if (e != ERANGE)
+#endif
+ {
+ errno = failure_errno = e;
+ p = 0;
+ break;
+ }
+ }
+
+ /* Cache the result. This assumes that the program does
+ not invoke chdir between calls to getpwd. */
+ pwd = p;
+ }
+ return p;
+}
+
+#else /* VMS */
+
+#ifndef MAXPATHLEN
+#define MAXPATHLEN 255
+#endif
+
+char *
+getpwd ()
+{
+ static char *pwd = 0;
+
+ if (!pwd) pwd = getcwd (xmalloc (MAXPATHLEN+1), MAXPATHLEN+1);
+ return pwd;
+}
+
+#endif /* VMS */
diff --git a/gnu/usr.bin/cc/cc_int/global.c b/gnu/usr.bin/cc/cc_int/global.c
new file mode 100644
index 0000000..297e930
--- /dev/null
+++ b/gnu/usr.bin/cc/cc_int/global.c
@@ -0,0 +1,1680 @@
+/* Allocate registers for pseudo-registers that span basic blocks.
+ Copyright (C) 1987, 1988, 1991, 1994 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+#include <stdio.h>
+#include "config.h"
+#include "rtl.h"
+#include "flags.h"
+#include "basic-block.h"
+#include "hard-reg-set.h"
+#include "regs.h"
+#include "insn-config.h"
+#include "output.h"
+
+/* This pass of the compiler performs global register allocation.
+ It assigns hard register numbers to all the pseudo registers
+ that were not handled in local_alloc. Assignments are recorded
+ in the vector reg_renumber, not by changing the rtl code.
+ (Such changes are made by final). The entry point is
+ the function global_alloc.
+
+ After allocation is complete, the reload pass is run as a subroutine
+ of this pass, so that when a pseudo reg loses its hard reg due to
+ spilling it is possible to make a second attempt to find a hard
+ reg for it. The reload pass is independent in other respects
+ and it is run even when stupid register allocation is in use.
+
+ 1. count the pseudo-registers still needing allocation
+ and assign allocation-numbers (allocnos) to them.
+ Set up tables reg_allocno and allocno_reg to map
+ reg numbers to allocnos and vice versa.
+ max_allocno gets the number of allocnos in use.
+
+ 2. Allocate a max_allocno by max_allocno conflict bit matrix and clear it.
+ Allocate a max_allocno by FIRST_PSEUDO_REGISTER conflict matrix
+ for conflicts between allocnos and explicit hard register use
+ (which includes use of pseudo-registers allocated by local_alloc).
+
+ 3. for each basic block
+ walk forward through the block, recording which
+ unallocated registers and which hardware registers are live.
+ Build the conflict matrix between the unallocated registers
+ and another of unallocated registers versus hardware registers.
+ Also record the preferred hardware registers
+ for each unallocated one.
+
+ 4. Sort a table of the allocnos into order of
+ desirability of the variables.
+
+ 5. Allocate the variables in that order; each if possible into
+ a preferred register, else into another register. */
+
+/* Number of pseudo-registers still requiring allocation
+ (not allocated by local_allocate). */
+
+static int max_allocno;
+
+/* Indexed by (pseudo) reg number, gives the allocno, or -1
+ for pseudo registers already allocated by local_allocate. */
+
+static int *reg_allocno;
+
+/* Indexed by allocno, gives the reg number. */
+
+static int *allocno_reg;
+
+/* A vector of the integers from 0 to max_allocno-1,
+ sorted in the order of first-to-be-allocated first. */
+
+static int *allocno_order;
+
+/* Indexed by an allocno, gives the number of consecutive
+ hard registers needed by that pseudo reg. */
+
+static int *allocno_size;
+
+/* Indexed by (pseudo) reg number, gives the number of another
+ lower-numbered pseudo reg which can share a hard reg with this pseudo
+ *even if the two pseudos would otherwise appear to conflict*. */
+
+static int *reg_may_share;
+
+/* Define the number of bits in each element of `conflicts' and what
+ type that element has. We use the largest integer format on the
+ host machine. */
+
+#define INT_BITS HOST_BITS_PER_WIDE_INT
+#define INT_TYPE HOST_WIDE_INT
+
+/* max_allocno by max_allocno array of bits,
+ recording whether two allocno's conflict (can't go in the same
+ hardware register).
+
+ `conflicts' is not symmetric; a conflict between allocno's i and j
+ is recorded either in element i,j or in element j,i. */
+
+static INT_TYPE *conflicts;
+
+/* Number of ints require to hold max_allocno bits.
+ This is the length of a row in `conflicts'. */
+
+static int allocno_row_words;
+
+/* Two macros to test or store 1 in an element of `conflicts'. */
+
+#define CONFLICTP(I, J) \
+ (conflicts[(I) * allocno_row_words + (J) / INT_BITS] \
+ & ((INT_TYPE) 1 << ((J) % INT_BITS)))
+
+#define SET_CONFLICT(I, J) \
+ (conflicts[(I) * allocno_row_words + (J) / INT_BITS] \
+ |= ((INT_TYPE) 1 << ((J) % INT_BITS)))
+
+/* Set of hard regs currently live (during scan of all insns). */
+
+static HARD_REG_SET hard_regs_live;
+
+/* Indexed by N, set of hard regs conflicting with allocno N. */
+
+static HARD_REG_SET *hard_reg_conflicts;
+
+/* Indexed by N, set of hard regs preferred by allocno N.
+ This is used to make allocnos go into regs that are copied to or from them,
+ when possible, to reduce register shuffling. */
+
+static HARD_REG_SET *hard_reg_preferences;
+
+/* Similar, but just counts register preferences made in simple copy
+ operations, rather than arithmetic. These are given priority because
+ we can always eliminate an insn by using these, but using a register
+ in the above list won't always eliminate an insn. */
+
+static HARD_REG_SET *hard_reg_copy_preferences;
+
+/* Similar to hard_reg_preferences, but includes bits for subsequent
+ registers when an allocno is multi-word. The above variable is used for
+ allocation while this is used to build reg_someone_prefers, below. */
+
+static HARD_REG_SET *hard_reg_full_preferences;
+
+/* Indexed by N, set of hard registers that some later allocno has a
+ preference for. */
+
+static HARD_REG_SET *regs_someone_prefers;
+
+/* Set of registers that global-alloc isn't supposed to use. */
+
+static HARD_REG_SET no_global_alloc_regs;
+
+/* Set of registers used so far. */
+
+static HARD_REG_SET regs_used_so_far;
+
+/* Number of calls crossed by each allocno. */
+
+static int *allocno_calls_crossed;
+
+/* Number of refs (weighted) to each allocno. */
+
+static int *allocno_n_refs;
+
+/* Guess at live length of each allocno.
+ This is actually the max of the live lengths of the regs. */
+
+static int *allocno_live_length;
+
+/* Number of refs (weighted) to each hard reg, as used by local alloc.
+ It is zero for a reg that contains global pseudos or is explicitly used. */
+
+static int local_reg_n_refs[FIRST_PSEUDO_REGISTER];
+
+/* Guess at live length of each hard reg, as used by local alloc.
+ This is actually the sum of the live lengths of the specific regs. */
+
+static int local_reg_live_length[FIRST_PSEUDO_REGISTER];
+
+/* Test a bit in TABLE, a vector of HARD_REG_SETs,
+ for vector element I, and hard register number J. */
+
+#define REGBITP(TABLE, I, J) TEST_HARD_REG_BIT (TABLE[I], J)
+
+/* Set to 1 a bit in a vector of HARD_REG_SETs. Works like REGBITP. */
+
+#define SET_REGBIT(TABLE, I, J) SET_HARD_REG_BIT (TABLE[I], J)
+
+/* Bit mask for allocnos live at current point in the scan. */
+
+static INT_TYPE *allocnos_live;
+
+/* Test, set or clear bit number I in allocnos_live,
+ a bit vector indexed by allocno. */
+
+#define ALLOCNO_LIVE_P(I) \
+ (allocnos_live[(I) / INT_BITS] & ((INT_TYPE) 1 << ((I) % INT_BITS)))
+
+#define SET_ALLOCNO_LIVE(I) \
+ (allocnos_live[(I) / INT_BITS] |= ((INT_TYPE) 1 << ((I) % INT_BITS)))
+
+#define CLEAR_ALLOCNO_LIVE(I) \
+ (allocnos_live[(I) / INT_BITS] &= ~((INT_TYPE) 1 << ((I) % INT_BITS)))
+
+/* This is turned off because it doesn't work right for DImode.
+ (And it is only used for DImode, so the other cases are worthless.)
+ The problem is that it isn't true that there is NO possibility of conflict;
+ only that there is no conflict if the two pseudos get the exact same regs.
+ If they were allocated with a partial overlap, there would be a conflict.
+ We can't safely turn off the conflict unless we have another way to
+ prevent the partial overlap.
+
+ Idea: change hard_reg_conflicts so that instead of recording which
+ hard regs the allocno may not overlap, it records where the allocno
+ may not start. Change both where it is used and where it is updated.
+ Then there is a way to record that (reg:DI 108) may start at 10
+ but not at 9 or 11. There is still the question of how to record
+ this semi-conflict between two pseudos. */
+#if 0
+/* Reg pairs for which conflict after the current insn
+ is inhibited by a REG_NO_CONFLICT note.
+ If the table gets full, we ignore any other notes--that is conservative. */
+#define NUM_NO_CONFLICT_PAIRS 4
+/* Number of pairs in use in this insn. */
+int n_no_conflict_pairs;
+static struct { int allocno1, allocno2;}
+ no_conflict_pairs[NUM_NO_CONFLICT_PAIRS];
+#endif /* 0 */
+
+/* Record all regs that are set in any one insn.
+ Communication from mark_reg_{store,clobber} and global_conflicts. */
+
+static rtx *regs_set;
+static int n_regs_set;
+
+/* All registers that can be eliminated. */
+
+static HARD_REG_SET eliminable_regset;
+
+static int allocno_compare PROTO((int *, int *));
+static void global_conflicts PROTO((void));
+static void expand_preferences PROTO((void));
+static void prune_preferences PROTO((void));
+static void find_reg PROTO((int, HARD_REG_SET, int, int, int));
+static void record_one_conflict PROTO((int));
+static void record_conflicts PROTO((short *, int));
+static void mark_reg_store PROTO((rtx, rtx));
+static void mark_reg_clobber PROTO((rtx, rtx));
+static void mark_reg_conflicts PROTO((rtx));
+static void mark_reg_death PROTO((rtx));
+static void mark_reg_live_nc PROTO((int, enum machine_mode));
+static void set_preference PROTO((rtx, rtx));
+static void dump_conflicts PROTO((FILE *));
+
+/* Perform allocation of pseudo-registers not allocated by local_alloc.
+ FILE is a file to output debugging information on,
+ or zero if such output is not desired.
+
+ Return value is nonzero if reload failed
+ and we must not do any more for this function. */
+
+int
+global_alloc (file)
+ FILE *file;
+{
+#ifdef ELIMINABLE_REGS
+ static struct {int from, to; } eliminables[] = ELIMINABLE_REGS;
+#endif
+ int need_fp
+ = (! flag_omit_frame_pointer
+#ifdef EXIT_IGNORE_STACK
+ || (current_function_calls_alloca && EXIT_IGNORE_STACK)
+#endif
+ || FRAME_POINTER_REQUIRED);
+
+ register int i;
+ rtx x;
+
+ max_allocno = 0;
+
+ /* A machine may have certain hard registers that
+ are safe to use only within a basic block. */
+
+ CLEAR_HARD_REG_SET (no_global_alloc_regs);
+#ifdef OVERLAPPING_REGNO_P
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (OVERLAPPING_REGNO_P (i))
+ SET_HARD_REG_BIT (no_global_alloc_regs, i);
+#endif
+
+ /* Build the regset of all eliminable registers and show we can't use those
+ that we already know won't be eliminated. */
+#ifdef ELIMINABLE_REGS
+ for (i = 0; i < sizeof eliminables / sizeof eliminables[0]; i++)
+ {
+ SET_HARD_REG_BIT (eliminable_regset, eliminables[i].from);
+
+ if (! CAN_ELIMINATE (eliminables[i].from, eliminables[i].to)
+ || (eliminables[i].to == STACK_POINTER_REGNUM && need_fp))
+ SET_HARD_REG_BIT (no_global_alloc_regs, eliminables[i].from);
+ }
+#if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM
+ SET_HARD_REG_BIT (eliminable_regset, HARD_FRAME_POINTER_REGNUM);
+ if (need_fp)
+ SET_HARD_REG_BIT (no_global_alloc_regs, HARD_FRAME_POINTER_REGNUM);
+#endif
+
+#else
+ SET_HARD_REG_BIT (eliminable_regset, FRAME_POINTER_REGNUM);
+ if (need_fp)
+ SET_HARD_REG_BIT (no_global_alloc_regs, FRAME_POINTER_REGNUM);
+#endif
+
+ /* Track which registers have already been used. Start with registers
+ explicitly in the rtl, then registers allocated by local register
+ allocation. */
+
+ CLEAR_HARD_REG_SET (regs_used_so_far);
+#ifdef LEAF_REGISTERS
+ /* If we are doing the leaf function optimization, and this is a leaf
+ function, it means that the registers that take work to save are those
+ that need a register window. So prefer the ones that can be used in
+ a leaf function. */
+ {
+ char *cheap_regs;
+ static char leaf_regs[] = LEAF_REGISTERS;
+
+ if (only_leaf_regs_used () && leaf_function_p ())
+ cheap_regs = leaf_regs;
+ else
+ cheap_regs = call_used_regs;
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (regs_ever_live[i] || cheap_regs[i])
+ SET_HARD_REG_BIT (regs_used_so_far, i);
+ }
+#else
+ /* We consider registers that do not have to be saved over calls as if
+ they were already used since there is no cost in using them. */
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (regs_ever_live[i] || call_used_regs[i])
+ SET_HARD_REG_BIT (regs_used_so_far, i);
+#endif
+
+ for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++)
+ if (reg_renumber[i] >= 0)
+ SET_HARD_REG_BIT (regs_used_so_far, reg_renumber[i]);
+
+ /* Establish mappings from register number to allocation number
+ and vice versa. In the process, count the allocnos. */
+
+ reg_allocno = (int *) alloca (max_regno * sizeof (int));
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ reg_allocno[i] = -1;
+
+ /* Initialize the shared-hard-reg mapping
+ from the list of pairs that may share. */
+ reg_may_share = (int *) alloca (max_regno * sizeof (int));
+ bzero ((char *) reg_may_share, max_regno * sizeof (int));
+ for (x = regs_may_share; x; x = XEXP (XEXP (x, 1), 1))
+ {
+ int r1 = REGNO (XEXP (x, 0));
+ int r2 = REGNO (XEXP (XEXP (x, 1), 0));
+ if (r1 > r2)
+ reg_may_share[r1] = r2;
+ else
+ reg_may_share[r2] = r1;
+ }
+
+ for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++)
+ /* Note that reg_live_length[i] < 0 indicates a "constant" reg
+ that we are supposed to refrain from putting in a hard reg.
+ -2 means do make an allocno but don't allocate it. */
+ if (reg_n_refs[i] != 0 && reg_renumber[i] < 0 && reg_live_length[i] != -1
+ /* Don't allocate pseudos that cross calls,
+ if this function receives a nonlocal goto. */
+ && (! current_function_has_nonlocal_label
+ || reg_n_calls_crossed[i] == 0))
+ {
+ if (reg_may_share[i] && reg_allocno[reg_may_share[i]] >= 0)
+ reg_allocno[i] = reg_allocno[reg_may_share[i]];
+ else
+ reg_allocno[i] = max_allocno++;
+ if (reg_live_length[i] == 0)
+ abort ();
+ }
+ else
+ reg_allocno[i] = -1;
+
+ allocno_reg = (int *) alloca (max_allocno * sizeof (int));
+ allocno_size = (int *) alloca (max_allocno * sizeof (int));
+ allocno_calls_crossed = (int *) alloca (max_allocno * sizeof (int));
+ allocno_n_refs = (int *) alloca (max_allocno * sizeof (int));
+ allocno_live_length = (int *) alloca (max_allocno * sizeof (int));
+ bzero ((char *) allocno_size, max_allocno * sizeof (int));
+ bzero ((char *) allocno_calls_crossed, max_allocno * sizeof (int));
+ bzero ((char *) allocno_n_refs, max_allocno * sizeof (int));
+ bzero ((char *) allocno_live_length, max_allocno * sizeof (int));
+
+ for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++)
+ if (reg_allocno[i] >= 0)
+ {
+ int allocno = reg_allocno[i];
+ allocno_reg[allocno] = i;
+ allocno_size[allocno] = PSEUDO_REGNO_SIZE (i);
+ allocno_calls_crossed[allocno] += reg_n_calls_crossed[i];
+ allocno_n_refs[allocno] += reg_n_refs[i];
+ if (allocno_live_length[allocno] < reg_live_length[i])
+ allocno_live_length[allocno] = reg_live_length[i];
+ }
+
+ /* Calculate amount of usage of each hard reg by pseudos
+ allocated by local-alloc. This is to see if we want to
+ override it. */
+ bzero ((char *) local_reg_live_length, sizeof local_reg_live_length);
+ bzero ((char *) local_reg_n_refs, sizeof local_reg_n_refs);
+ for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++)
+ if (reg_allocno[i] < 0 && reg_renumber[i] >= 0)
+ {
+ int regno = reg_renumber[i];
+ int endregno = regno + HARD_REGNO_NREGS (regno, PSEUDO_REGNO_MODE (i));
+ int j;
+
+ for (j = regno; j < endregno; j++)
+ {
+ local_reg_n_refs[j] += reg_n_refs[i];
+ local_reg_live_length[j] += reg_live_length[i];
+ }
+ }
+
+ /* We can't override local-alloc for a reg used not just by local-alloc. */
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (regs_ever_live[i])
+ local_reg_n_refs[i] = 0;
+
+ /* Allocate the space for the conflict and preference tables and
+ initialize them. */
+
+ hard_reg_conflicts
+ = (HARD_REG_SET *) alloca (max_allocno * sizeof (HARD_REG_SET));
+ bzero ((char *) hard_reg_conflicts, max_allocno * sizeof (HARD_REG_SET));
+
+ hard_reg_preferences
+ = (HARD_REG_SET *) alloca (max_allocno * sizeof (HARD_REG_SET));
+ bzero ((char *) hard_reg_preferences, max_allocno * sizeof (HARD_REG_SET));
+
+ hard_reg_copy_preferences
+ = (HARD_REG_SET *) alloca (max_allocno * sizeof (HARD_REG_SET));
+ bzero ((char *) hard_reg_copy_preferences,
+ max_allocno * sizeof (HARD_REG_SET));
+
+ hard_reg_full_preferences
+ = (HARD_REG_SET *) alloca (max_allocno * sizeof (HARD_REG_SET));
+ bzero ((char *) hard_reg_full_preferences,
+ max_allocno * sizeof (HARD_REG_SET));
+
+ regs_someone_prefers
+ = (HARD_REG_SET *) alloca (max_allocno * sizeof (HARD_REG_SET));
+ bzero ((char *) regs_someone_prefers, max_allocno * sizeof (HARD_REG_SET));
+
+ allocno_row_words = (max_allocno + INT_BITS - 1) / INT_BITS;
+
+ conflicts = (INT_TYPE *) alloca (max_allocno * allocno_row_words
+ * sizeof (INT_TYPE));
+ bzero ((char *) conflicts,
+ max_allocno * allocno_row_words * sizeof (INT_TYPE));
+
+ allocnos_live = (INT_TYPE *) alloca (allocno_row_words * sizeof (INT_TYPE));
+
+ /* If there is work to be done (at least one reg to allocate),
+ perform global conflict analysis and allocate the regs. */
+
+ if (max_allocno > 0)
+ {
+ /* Scan all the insns and compute the conflicts among allocnos
+ and between allocnos and hard regs. */
+
+ global_conflicts ();
+
+ /* Eliminate conflicts between pseudos and eliminable registers. If
+ the register is not eliminated, the pseudo won't really be able to
+ live in the eliminable register, so the conflict doesn't matter.
+ If we do eliminate the register, the conflict will no longer exist.
+ So in either case, we can ignore the conflict. Likewise for
+ preferences. */
+
+ for (i = 0; i < max_allocno; i++)
+ {
+ AND_COMPL_HARD_REG_SET (hard_reg_conflicts[i], eliminable_regset);
+ AND_COMPL_HARD_REG_SET (hard_reg_copy_preferences[i],
+ eliminable_regset);
+ AND_COMPL_HARD_REG_SET (hard_reg_preferences[i], eliminable_regset);
+ }
+
+ /* Try to expand the preferences by merging them between allocnos. */
+
+ expand_preferences ();
+
+ /* Determine the order to allocate the remaining pseudo registers. */
+
+ allocno_order = (int *) alloca (max_allocno * sizeof (int));
+ for (i = 0; i < max_allocno; i++)
+ allocno_order[i] = i;
+
+ /* Default the size to 1, since allocno_compare uses it to divide by.
+ Also convert allocno_live_length of zero to -1. A length of zero
+ can occur when all the registers for that allocno have reg_live_length
+ equal to -2. In this case, we want to make an allocno, but not
+ allocate it. So avoid the divide-by-zero and set it to a low
+ priority. */
+
+ for (i = 0; i < max_allocno; i++)
+ {
+ if (allocno_size[i] == 0)
+ allocno_size[i] = 1;
+ if (allocno_live_length[i] == 0)
+ allocno_live_length[i] = -1;
+ }
+
+ qsort (allocno_order, max_allocno, sizeof (int), allocno_compare);
+
+ prune_preferences ();
+
+ if (file)
+ dump_conflicts (file);
+
+ /* Try allocating them, one by one, in that order,
+ except for parameters marked with reg_live_length[regno] == -2. */
+
+ for (i = 0; i < max_allocno; i++)
+ if (reg_live_length[allocno_reg[allocno_order[i]]] >= 0)
+ {
+ /* If we have more than one register class,
+ first try allocating in the class that is cheapest
+ for this pseudo-reg. If that fails, try any reg. */
+ if (N_REG_CLASSES > 1)
+ {
+ find_reg (allocno_order[i], HARD_CONST (0), 0, 0, 0);
+ if (reg_renumber[allocno_reg[allocno_order[i]]] >= 0)
+ continue;
+ }
+ if (reg_alternate_class (allocno_reg[allocno_order[i]]) != NO_REGS)
+ find_reg (allocno_order[i], HARD_CONST (0), 1, 0, 0);
+ }
+ }
+
+ /* Do the reloads now while the allocno data still exist, so that we can
+ try to assign new hard regs to any pseudo regs that are spilled. */
+
+#if 0 /* We need to eliminate regs even if there is no rtl code,
+ for the sake of debugging information. */
+ if (n_basic_blocks > 0)
+#endif
+ return reload (get_insns (), 1, file);
+}
+
+/* Sort predicate for ordering the allocnos.
+ Returns -1 (1) if *v1 should be allocated before (after) *v2. */
+
+static int
+allocno_compare (v1, v2)
+ int *v1, *v2;
+{
+ /* Note that the quotient will never be bigger than
+ the value of floor_log2 times the maximum number of
+ times a register can occur in one insn (surely less than 100).
+ Multiplying this by 10000 can't overflow. */
+ register int pri1
+ = (((double) (floor_log2 (allocno_n_refs[*v1]) * allocno_n_refs[*v1])
+ / allocno_live_length[*v1])
+ * 10000 * allocno_size[*v1]);
+ register int pri2
+ = (((double) (floor_log2 (allocno_n_refs[*v2]) * allocno_n_refs[*v2])
+ / allocno_live_length[*v2])
+ * 10000 * allocno_size[*v2]);
+ if (pri2 - pri1)
+ return pri2 - pri1;
+
+ /* If regs are equally good, sort by allocno,
+ so that the results of qsort leave nothing to chance. */
+ return *v1 - *v2;
+}
+
+/* Scan the rtl code and record all conflicts and register preferences in the
+ conflict matrices and preference tables. */
+
+static void
+global_conflicts ()
+{
+ register int b, i;
+ register rtx insn;
+ short *block_start_allocnos;
+
+ /* Make a vector that mark_reg_{store,clobber} will store in. */
+ regs_set = (rtx *) alloca (max_parallel * sizeof (rtx) * 2);
+
+ block_start_allocnos = (short *) alloca (max_allocno * sizeof (short));
+
+ for (b = 0; b < n_basic_blocks; b++)
+ {
+ bzero ((char *) allocnos_live, allocno_row_words * sizeof (INT_TYPE));
+
+ /* Initialize table of registers currently live
+ to the state at the beginning of this basic block.
+ This also marks the conflicts among them.
+
+ For pseudo-regs, there is only one bit for each one
+ no matter how many hard regs it occupies.
+ This is ok; we know the size from PSEUDO_REGNO_SIZE.
+ For explicit hard regs, we cannot know the size that way
+ since one hard reg can be used with various sizes.
+ Therefore, we must require that all the hard regs
+ implicitly live as part of a multi-word hard reg
+ are explicitly marked in basic_block_live_at_start. */
+
+ {
+ register int offset;
+ REGSET_ELT_TYPE bit;
+ register regset old = basic_block_live_at_start[b];
+ int ax = 0;
+
+#ifdef HARD_REG_SET
+ hard_regs_live = old[0];
+#else
+ COPY_HARD_REG_SET (hard_regs_live, old);
+#endif
+ for (offset = 0, i = 0; offset < regset_size; offset++)
+ if (old[offset] == 0)
+ i += REGSET_ELT_BITS;
+ else
+ for (bit = 1; bit; bit <<= 1, i++)
+ {
+ if (i >= max_regno)
+ break;
+ if (old[offset] & bit)
+ {
+ register int a = reg_allocno[i];
+ if (a >= 0)
+ {
+ SET_ALLOCNO_LIVE (a);
+ block_start_allocnos[ax++] = a;
+ }
+ else if ((a = reg_renumber[i]) >= 0)
+ mark_reg_live_nc (a, PSEUDO_REGNO_MODE (i));
+ }
+ }
+
+ /* Record that each allocno now live conflicts with each other
+ allocno now live, and with each hard reg now live. */
+
+ record_conflicts (block_start_allocnos, ax);
+ }
+
+ insn = basic_block_head[b];
+
+ /* Scan the code of this basic block, noting which allocnos
+ and hard regs are born or die. When one is born,
+ record a conflict with all others currently live. */
+
+ while (1)
+ {
+ register RTX_CODE code = GET_CODE (insn);
+ register rtx link;
+
+ /* Make regs_set an empty set. */
+
+ n_regs_set = 0;
+
+ if (code == INSN || code == CALL_INSN || code == JUMP_INSN)
+ {
+
+#if 0
+ int i = 0;
+ for (link = REG_NOTES (insn);
+ link && i < NUM_NO_CONFLICT_PAIRS;
+ link = XEXP (link, 1))
+ if (REG_NOTE_KIND (link) == REG_NO_CONFLICT)
+ {
+ no_conflict_pairs[i].allocno1
+ = reg_allocno[REGNO (SET_DEST (PATTERN (insn)))];
+ no_conflict_pairs[i].allocno2
+ = reg_allocno[REGNO (XEXP (link, 0))];
+ i++;
+ }
+#endif /* 0 */
+
+ /* Mark any registers clobbered by INSN as live,
+ so they conflict with the inputs. */
+
+ note_stores (PATTERN (insn), mark_reg_clobber);
+
+ /* Mark any registers dead after INSN as dead now. */
+
+ for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
+ if (REG_NOTE_KIND (link) == REG_DEAD)
+ mark_reg_death (XEXP (link, 0));
+
+ /* Mark any registers set in INSN as live,
+ and mark them as conflicting with all other live regs.
+ Clobbers are processed again, so they conflict with
+ the registers that are set. */
+
+ note_stores (PATTERN (insn), mark_reg_store);
+
+#ifdef AUTO_INC_DEC
+ for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
+ if (REG_NOTE_KIND (link) == REG_INC)
+ mark_reg_store (XEXP (link, 0), NULL_RTX);
+#endif
+
+ /* If INSN has multiple outputs, then any reg that dies here
+ and is used inside of an output
+ must conflict with the other outputs. */
+
+ if (GET_CODE (PATTERN (insn)) == PARALLEL && !single_set (insn))
+ for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
+ if (REG_NOTE_KIND (link) == REG_DEAD)
+ {
+ int used_in_output = 0;
+ int i;
+ rtx reg = XEXP (link, 0);
+
+ for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
+ {
+ rtx set = XVECEXP (PATTERN (insn), 0, i);
+ if (GET_CODE (set) == SET
+ && GET_CODE (SET_DEST (set)) != REG
+ && !rtx_equal_p (reg, SET_DEST (set))
+ && reg_overlap_mentioned_p (reg, SET_DEST (set)))
+ used_in_output = 1;
+ }
+ if (used_in_output)
+ mark_reg_conflicts (reg);
+ }
+
+ /* Mark any registers set in INSN and then never used. */
+
+ while (n_regs_set > 0)
+ if (find_regno_note (insn, REG_UNUSED,
+ REGNO (regs_set[--n_regs_set])))
+ mark_reg_death (regs_set[n_regs_set]);
+ }
+
+ if (insn == basic_block_end[b])
+ break;
+ insn = NEXT_INSN (insn);
+ }
+ }
+}
+/* Expand the preference information by looking for cases where one allocno
+ dies in an insn that sets an allocno. If those two allocnos don't conflict,
+ merge any preferences between those allocnos. */
+
+static void
+expand_preferences ()
+{
+ rtx insn;
+ rtx link;
+ rtx set;
+
+ /* We only try to handle the most common cases here. Most of the cases
+ where this wins are reg-reg copies. */
+
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
+ && (set = single_set (insn)) != 0
+ && GET_CODE (SET_DEST (set)) == REG
+ && reg_allocno[REGNO (SET_DEST (set))] >= 0)
+ for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
+ if (REG_NOTE_KIND (link) == REG_DEAD
+ && GET_CODE (XEXP (link, 0)) == REG
+ && reg_allocno[REGNO (XEXP (link, 0))] >= 0
+ && ! CONFLICTP (reg_allocno[REGNO (SET_DEST (set))],
+ reg_allocno[REGNO (XEXP (link, 0))])
+ && ! CONFLICTP (reg_allocno[REGNO (XEXP (link, 0))],
+ reg_allocno[REGNO (SET_DEST (set))]))
+ {
+ int a1 = reg_allocno[REGNO (SET_DEST (set))];
+ int a2 = reg_allocno[REGNO (XEXP (link, 0))];
+
+ if (XEXP (link, 0) == SET_SRC (set))
+ {
+ IOR_HARD_REG_SET (hard_reg_copy_preferences[a1],
+ hard_reg_copy_preferences[a2]);
+ IOR_HARD_REG_SET (hard_reg_copy_preferences[a2],
+ hard_reg_copy_preferences[a1]);
+ }
+
+ IOR_HARD_REG_SET (hard_reg_preferences[a1],
+ hard_reg_preferences[a2]);
+ IOR_HARD_REG_SET (hard_reg_preferences[a2],
+ hard_reg_preferences[a1]);
+ IOR_HARD_REG_SET (hard_reg_full_preferences[a1],
+ hard_reg_full_preferences[a2]);
+ IOR_HARD_REG_SET (hard_reg_full_preferences[a2],
+ hard_reg_full_preferences[a1]);
+ }
+}
+
+/* Prune the preferences for global registers to exclude registers that cannot
+ be used.
+
+ Compute `regs_someone_prefers', which is a bitmask of the hard registers
+ that are preferred by conflicting registers of lower priority. If possible,
+ we will avoid using these registers. */
+
+static void
+prune_preferences ()
+{
+ int i, j;
+ int allocno;
+
+ /* Scan least most important to most important.
+ For each allocno, remove from preferences registers that cannot be used,
+ either because of conflicts or register type. Then compute all registers
+ preferred by each lower-priority register that conflicts. */
+
+ for (i = max_allocno - 1; i >= 0; i--)
+ {
+ HARD_REG_SET temp;
+
+ allocno = allocno_order[i];
+ COPY_HARD_REG_SET (temp, hard_reg_conflicts[allocno]);
+
+ if (allocno_calls_crossed[allocno] == 0)
+ IOR_HARD_REG_SET (temp, fixed_reg_set);
+ else
+ IOR_HARD_REG_SET (temp, call_used_reg_set);
+
+ IOR_COMPL_HARD_REG_SET
+ (temp,
+ reg_class_contents[(int) reg_preferred_class (allocno_reg[allocno])]);
+
+ AND_COMPL_HARD_REG_SET (hard_reg_preferences[allocno], temp);
+ AND_COMPL_HARD_REG_SET (hard_reg_copy_preferences[allocno], temp);
+ AND_COMPL_HARD_REG_SET (hard_reg_full_preferences[allocno], temp);
+
+ CLEAR_HARD_REG_SET (regs_someone_prefers[allocno]);
+
+ /* Merge in the preferences of lower-priority registers (they have
+ already been pruned). If we also prefer some of those registers,
+ don't exclude them unless we are of a smaller size (in which case
+ we want to give the lower-priority allocno the first chance for
+ these registers). */
+ for (j = i + 1; j < max_allocno; j++)
+ if (CONFLICTP (allocno, allocno_order[j]))
+ {
+ COPY_HARD_REG_SET (temp,
+ hard_reg_full_preferences[allocno_order[j]]);
+ if (allocno_size[allocno_order[j]] <= allocno_size[allocno])
+ AND_COMPL_HARD_REG_SET (temp,
+ hard_reg_full_preferences[allocno]);
+
+ IOR_HARD_REG_SET (regs_someone_prefers[allocno], temp);
+ }
+ }
+}
+
+/* Assign a hard register to ALLOCNO; look for one that is the beginning
+ of a long enough stretch of hard regs none of which conflicts with ALLOCNO.
+ The registers marked in PREFREGS are tried first.
+
+ LOSERS, if non-zero, is a HARD_REG_SET indicating registers that cannot
+ be used for this allocation.
+
+ If ALT_REGS_P is zero, consider only the preferred class of ALLOCNO's reg.
+ Otherwise ignore that preferred class and use the alternate class.
+
+ If ACCEPT_CALL_CLOBBERED is nonzero, accept a call-clobbered hard reg that
+ will have to be saved and restored at calls.
+
+ RETRYING is nonzero if this is called from retry_global_alloc.
+
+ If we find one, record it in reg_renumber.
+ If not, do nothing. */
+
+static void
+find_reg (allocno, losers, alt_regs_p, accept_call_clobbered, retrying)
+ int allocno;
+ HARD_REG_SET losers;
+ int alt_regs_p;
+ int accept_call_clobbered;
+ int retrying;
+{
+ register int i, best_reg, pass;
+#ifdef HARD_REG_SET
+ register /* Declare it register if it's a scalar. */
+#endif
+ HARD_REG_SET used, used1, used2;
+
+ enum reg_class class = (alt_regs_p
+ ? reg_alternate_class (allocno_reg[allocno])
+ : reg_preferred_class (allocno_reg[allocno]));
+ enum machine_mode mode = PSEUDO_REGNO_MODE (allocno_reg[allocno]);
+
+ if (accept_call_clobbered)
+ COPY_HARD_REG_SET (used1, call_fixed_reg_set);
+ else if (allocno_calls_crossed[allocno] == 0)
+ COPY_HARD_REG_SET (used1, fixed_reg_set);
+ else
+ COPY_HARD_REG_SET (used1, call_used_reg_set);
+
+ /* Some registers should not be allocated in global-alloc. */
+ IOR_HARD_REG_SET (used1, no_global_alloc_regs);
+ if (losers)
+ IOR_HARD_REG_SET (used1, losers);
+
+ IOR_COMPL_HARD_REG_SET (used1, reg_class_contents[(int) class]);
+ COPY_HARD_REG_SET (used2, used1);
+
+ IOR_HARD_REG_SET (used1, hard_reg_conflicts[allocno]);
+
+ /* Try each hard reg to see if it fits. Do this in two passes.
+ In the first pass, skip registers that are preferred by some other pseudo
+ to give it a better chance of getting one of those registers. Only if
+ we can't get a register when excluding those do we take one of them.
+ However, we never allocate a register for the first time in pass 0. */
+
+ COPY_HARD_REG_SET (used, used1);
+ IOR_COMPL_HARD_REG_SET (used, regs_used_so_far);
+ IOR_HARD_REG_SET (used, regs_someone_prefers[allocno]);
+
+ best_reg = -1;
+ for (i = FIRST_PSEUDO_REGISTER, pass = 0;
+ pass <= 1 && i >= FIRST_PSEUDO_REGISTER;
+ pass++)
+ {
+ if (pass == 1)
+ COPY_HARD_REG_SET (used, used1);
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ {
+#ifdef REG_ALLOC_ORDER
+ int regno = reg_alloc_order[i];
+#else
+ int regno = i;
+#endif
+ if (! TEST_HARD_REG_BIT (used, regno)
+ && HARD_REGNO_MODE_OK (regno, mode))
+ {
+ register int j;
+ register int lim = regno + HARD_REGNO_NREGS (regno, mode);
+ for (j = regno + 1;
+ (j < lim
+ && ! TEST_HARD_REG_BIT (used, j));
+ j++);
+ if (j == lim)
+ {
+ best_reg = regno;
+ break;
+ }
+#ifndef REG_ALLOC_ORDER
+ i = j; /* Skip starting points we know will lose */
+#endif
+ }
+ }
+ }
+
+ /* See if there is a preferred register with the same class as the register
+ we allocated above. Making this restriction prevents register
+ preferencing from creating worse register allocation.
+
+ Remove from the preferred registers and conflicting registers. Note that
+ additional conflicts may have been added after `prune_preferences' was
+ called.
+
+ First do this for those register with copy preferences, then all
+ preferred registers. */
+
+ AND_COMPL_HARD_REG_SET (hard_reg_copy_preferences[allocno], used);
+ GO_IF_HARD_REG_SUBSET (hard_reg_copy_preferences[allocno],
+ reg_class_contents[(int) NO_REGS], no_copy_prefs);
+
+ if (best_reg >= 0)
+ {
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (TEST_HARD_REG_BIT (hard_reg_copy_preferences[allocno], i)
+ && HARD_REGNO_MODE_OK (i, mode)
+ && (REGNO_REG_CLASS (i) == REGNO_REG_CLASS (best_reg)
+ || reg_class_subset_p (REGNO_REG_CLASS (i),
+ REGNO_REG_CLASS (best_reg))
+ || reg_class_subset_p (REGNO_REG_CLASS (best_reg),
+ REGNO_REG_CLASS (i))))
+ {
+ register int j;
+ register int lim = i + HARD_REGNO_NREGS (i, mode);
+ for (j = i + 1;
+ (j < lim
+ && ! TEST_HARD_REG_BIT (used, j)
+ && (REGNO_REG_CLASS (j)
+ == REGNO_REG_CLASS (best_reg + (j - i))
+ || reg_class_subset_p (REGNO_REG_CLASS (j),
+ REGNO_REG_CLASS (best_reg + (j - i)))
+ || reg_class_subset_p (REGNO_REG_CLASS (best_reg + (j - i)),
+ REGNO_REG_CLASS (j))));
+ j++);
+ if (j == lim)
+ {
+ best_reg = i;
+ goto no_prefs;
+ }
+ }
+ }
+ no_copy_prefs:
+
+ AND_COMPL_HARD_REG_SET (hard_reg_preferences[allocno], used);
+ GO_IF_HARD_REG_SUBSET (hard_reg_preferences[allocno],
+ reg_class_contents[(int) NO_REGS], no_prefs);
+
+ if (best_reg >= 0)
+ {
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (TEST_HARD_REG_BIT (hard_reg_preferences[allocno], i)
+ && HARD_REGNO_MODE_OK (i, mode)
+ && (REGNO_REG_CLASS (i) == REGNO_REG_CLASS (best_reg)
+ || reg_class_subset_p (REGNO_REG_CLASS (i),
+ REGNO_REG_CLASS (best_reg))
+ || reg_class_subset_p (REGNO_REG_CLASS (best_reg),
+ REGNO_REG_CLASS (i))))
+ {
+ register int j;
+ register int lim = i + HARD_REGNO_NREGS (i, mode);
+ for (j = i + 1;
+ (j < lim
+ && ! TEST_HARD_REG_BIT (used, j)
+ && (REGNO_REG_CLASS (j)
+ == REGNO_REG_CLASS (best_reg + (j - i))
+ || reg_class_subset_p (REGNO_REG_CLASS (j),
+ REGNO_REG_CLASS (best_reg + (j - i)))
+ || reg_class_subset_p (REGNO_REG_CLASS (best_reg + (j - i)),
+ REGNO_REG_CLASS (j))));
+ j++);
+ if (j == lim)
+ {
+ best_reg = i;
+ break;
+ }
+ }
+ }
+ no_prefs:
+
+ /* If we haven't succeeded yet, try with caller-saves.
+ We need not check to see if the current function has nonlocal
+ labels because we don't put any pseudos that are live over calls in
+ registers in that case. */
+
+ if (flag_caller_saves && best_reg < 0)
+ {
+ /* Did not find a register. If it would be profitable to
+ allocate a call-clobbered register and save and restore it
+ around calls, do that. */
+ if (! accept_call_clobbered
+ && allocno_calls_crossed[allocno] != 0
+ && CALLER_SAVE_PROFITABLE (allocno_n_refs[allocno],
+ allocno_calls_crossed[allocno]))
+ {
+ find_reg (allocno, losers, alt_regs_p, 1, retrying);
+ if (reg_renumber[allocno_reg[allocno]] >= 0)
+ {
+ caller_save_needed = 1;
+ return;
+ }
+ }
+ }
+
+ /* If we haven't succeeded yet,
+ see if some hard reg that conflicts with us
+ was utilized poorly by local-alloc.
+ If so, kick out the regs that were put there by local-alloc
+ so we can use it instead. */
+ if (best_reg < 0 && !retrying
+ /* Let's not bother with multi-reg allocnos. */
+ && allocno_size[allocno] == 1)
+ {
+ /* Count from the end, to find the least-used ones first. */
+ for (i = FIRST_PSEUDO_REGISTER - 1; i >= 0; i--)
+ {
+#ifdef REG_ALLOC_ORDER
+ int regno = reg_alloc_order[i];
+#else
+ int regno = i;
+#endif
+
+ if (local_reg_n_refs[regno] != 0
+ /* Don't use a reg no good for this pseudo. */
+ && ! TEST_HARD_REG_BIT (used2, regno)
+ && HARD_REGNO_MODE_OK (regno, mode)
+ && (((double) local_reg_n_refs[regno]
+ / local_reg_live_length[regno])
+ < ((double) allocno_n_refs[allocno]
+ / allocno_live_length[allocno])))
+ {
+ /* Hard reg REGNO was used less in total by local regs
+ than it would be used by this one allocno! */
+ int k;
+ for (k = 0; k < max_regno; k++)
+ if (reg_renumber[k] >= 0)
+ {
+ int r = reg_renumber[k];
+ int endregno
+ = r + HARD_REGNO_NREGS (r, PSEUDO_REGNO_MODE (k));
+
+ if (regno >= r && regno < endregno)
+ reg_renumber[k] = -1;
+ }
+
+ best_reg = regno;
+ break;
+ }
+ }
+ }
+
+ /* Did we find a register? */
+
+ if (best_reg >= 0)
+ {
+ register int lim, j;
+ HARD_REG_SET this_reg;
+
+ /* Yes. Record it as the hard register of this pseudo-reg. */
+ reg_renumber[allocno_reg[allocno]] = best_reg;
+ /* Also of any pseudo-regs that share with it. */
+ if (reg_may_share[allocno_reg[allocno]])
+ for (j = FIRST_PSEUDO_REGISTER; j < max_regno; j++)
+ if (reg_allocno[j] == allocno)
+ reg_renumber[j] = best_reg;
+
+ /* Make a set of the hard regs being allocated. */
+ CLEAR_HARD_REG_SET (this_reg);
+ lim = best_reg + HARD_REGNO_NREGS (best_reg, mode);
+ for (j = best_reg; j < lim; j++)
+ {
+ SET_HARD_REG_BIT (this_reg, j);
+ SET_HARD_REG_BIT (regs_used_so_far, j);
+ /* This is no longer a reg used just by local regs. */
+ local_reg_n_refs[j] = 0;
+ }
+ /* For each other pseudo-reg conflicting with this one,
+ mark it as conflicting with the hard regs this one occupies. */
+ lim = allocno;
+ for (j = 0; j < max_allocno; j++)
+ if (CONFLICTP (lim, j) || CONFLICTP (j, lim))
+ {
+ IOR_HARD_REG_SET (hard_reg_conflicts[j], this_reg);
+ }
+ }
+}
+
+/* Called from `reload' to look for a hard reg to put pseudo reg REGNO in.
+ Perhaps it had previously seemed not worth a hard reg,
+ or perhaps its old hard reg has been commandeered for reloads.
+ FORBIDDEN_REGS indicates certain hard regs that may not be used, even if
+ they do not appear to be allocated.
+ If FORBIDDEN_REGS is zero, no regs are forbidden. */
+
+void
+retry_global_alloc (regno, forbidden_regs)
+ int regno;
+ HARD_REG_SET forbidden_regs;
+{
+ int allocno = reg_allocno[regno];
+ if (allocno >= 0)
+ {
+ /* If we have more than one register class,
+ first try allocating in the class that is cheapest
+ for this pseudo-reg. If that fails, try any reg. */
+ if (N_REG_CLASSES > 1)
+ find_reg (allocno, forbidden_regs, 0, 0, 1);
+ if (reg_renumber[regno] < 0
+ && reg_alternate_class (regno) != NO_REGS)
+ find_reg (allocno, forbidden_regs, 1, 0, 1);
+
+ /* If we found a register, modify the RTL for the register to
+ show the hard register, and mark that register live. */
+ if (reg_renumber[regno] >= 0)
+ {
+ REGNO (regno_reg_rtx[regno]) = reg_renumber[regno];
+ mark_home_live (regno);
+ }
+ }
+}
+
+/* Record a conflict between register REGNO
+ and everything currently live.
+ REGNO must not be a pseudo reg that was allocated
+ by local_alloc; such numbers must be translated through
+ reg_renumber before calling here. */
+
+static void
+record_one_conflict (regno)
+ int regno;
+{
+ register int j;
+
+ if (regno < FIRST_PSEUDO_REGISTER)
+ /* When a hard register becomes live,
+ record conflicts with live pseudo regs. */
+ for (j = 0; j < max_allocno; j++)
+ {
+ if (ALLOCNO_LIVE_P (j))
+ SET_HARD_REG_BIT (hard_reg_conflicts[j], regno);
+ }
+ else
+ /* When a pseudo-register becomes live,
+ record conflicts first with hard regs,
+ then with other pseudo regs. */
+ {
+ register int ialloc = reg_allocno[regno];
+ register int ialloc_prod = ialloc * allocno_row_words;
+ IOR_HARD_REG_SET (hard_reg_conflicts[ialloc], hard_regs_live);
+ for (j = allocno_row_words - 1; j >= 0; j--)
+ {
+#if 0
+ int k;
+ for (k = 0; k < n_no_conflict_pairs; k++)
+ if (! ((j == no_conflict_pairs[k].allocno1
+ && ialloc == no_conflict_pairs[k].allocno2)
+ ||
+ (j == no_conflict_pairs[k].allocno2
+ && ialloc == no_conflict_pairs[k].allocno1)))
+#endif /* 0 */
+ conflicts[ialloc_prod + j] |= allocnos_live[j];
+ }
+ }
+}
+
+/* Record all allocnos currently live as conflicting
+ with each other and with all hard regs currently live.
+ ALLOCNO_VEC is a vector of LEN allocnos, all allocnos that
+ are currently live. Their bits are also flagged in allocnos_live. */
+
+static void
+record_conflicts (allocno_vec, len)
+ register short *allocno_vec;
+ register int len;
+{
+ register int allocno;
+ register int j;
+ register int ialloc_prod;
+
+ while (--len >= 0)
+ {
+ allocno = allocno_vec[len];
+ ialloc_prod = allocno * allocno_row_words;
+ IOR_HARD_REG_SET (hard_reg_conflicts[allocno], hard_regs_live);
+ for (j = allocno_row_words - 1; j >= 0; j--)
+ conflicts[ialloc_prod + j] |= allocnos_live[j];
+ }
+}
+
+/* Handle the case where REG is set by the insn being scanned,
+ during the forward scan to accumulate conflicts.
+ Store a 1 in regs_live or allocnos_live for this register, record how many
+ consecutive hardware registers it actually needs,
+ and record a conflict with all other registers already live.
+
+ Note that even if REG does not remain alive after this insn,
+ we must mark it here as live, to ensure a conflict between
+ REG and any other regs set in this insn that really do live.
+ This is because those other regs could be considered after this.
+
+ REG might actually be something other than a register;
+ if so, we do nothing.
+
+ SETTER is 0 if this register was modified by an auto-increment (i.e.,
+ a REG_INC note was found for it).
+
+ CLOBBERs are processed here by calling mark_reg_clobber. */
+
+static void
+mark_reg_store (orig_reg, setter)
+ rtx orig_reg, setter;
+{
+ register int regno;
+ register rtx reg = orig_reg;
+
+ /* WORD is which word of a multi-register group is being stored.
+ For the case where the store is actually into a SUBREG of REG.
+ Except we don't use it; I believe the entire REG needs to be
+ made live. */
+ int word = 0;
+
+ if (GET_CODE (reg) == SUBREG)
+ {
+ word = SUBREG_WORD (reg);
+ reg = SUBREG_REG (reg);
+ }
+
+ if (GET_CODE (reg) != REG)
+ return;
+
+ if (setter && GET_CODE (setter) == CLOBBER)
+ {
+ /* A clobber of a register should be processed here too. */
+ mark_reg_clobber (orig_reg, setter);
+ return;
+ }
+
+ regs_set[n_regs_set++] = reg;
+
+ if (setter)
+ set_preference (reg, SET_SRC (setter));
+
+ regno = REGNO (reg);
+
+ if (reg_renumber[regno] >= 0)
+ regno = reg_renumber[regno] /* + word */;
+
+ /* Either this is one of the max_allocno pseudo regs not allocated,
+ or it is or has a hardware reg. First handle the pseudo-regs. */
+ if (regno >= FIRST_PSEUDO_REGISTER)
+ {
+ if (reg_allocno[regno] >= 0)
+ {
+ SET_ALLOCNO_LIVE (reg_allocno[regno]);
+ record_one_conflict (regno);
+ }
+ }
+ /* Handle hardware regs (and pseudos allocated to hard regs). */
+ else if (! fixed_regs[regno])
+ {
+ register int last = regno + HARD_REGNO_NREGS (regno, GET_MODE (reg));
+ while (regno < last)
+ {
+ record_one_conflict (regno);
+ SET_HARD_REG_BIT (hard_regs_live, regno);
+ regno++;
+ }
+ }
+}
+
+/* Like mark_reg_set except notice just CLOBBERs; ignore SETs. */
+
+static void
+mark_reg_clobber (reg, setter)
+ rtx reg, setter;
+{
+ register int regno;
+
+ /* WORD is which word of a multi-register group is being stored.
+ For the case where the store is actually into a SUBREG of REG.
+ Except we don't use it; I believe the entire REG needs to be
+ made live. */
+ int word = 0;
+
+ if (GET_CODE (setter) != CLOBBER)
+ return;
+
+ if (GET_CODE (reg) == SUBREG)
+ {
+ word = SUBREG_WORD (reg);
+ reg = SUBREG_REG (reg);
+ }
+
+ if (GET_CODE (reg) != REG)
+ return;
+
+ regs_set[n_regs_set++] = reg;
+
+ regno = REGNO (reg);
+
+ if (reg_renumber[regno] >= 0)
+ regno = reg_renumber[regno] /* + word */;
+
+ /* Either this is one of the max_allocno pseudo regs not allocated,
+ or it is or has a hardware reg. First handle the pseudo-regs. */
+ if (regno >= FIRST_PSEUDO_REGISTER)
+ {
+ if (reg_allocno[regno] >= 0)
+ {
+ SET_ALLOCNO_LIVE (reg_allocno[regno]);
+ record_one_conflict (regno);
+ }
+ }
+ /* Handle hardware regs (and pseudos allocated to hard regs). */
+ else if (! fixed_regs[regno])
+ {
+ register int last = regno + HARD_REGNO_NREGS (regno, GET_MODE (reg));
+ while (regno < last)
+ {
+ record_one_conflict (regno);
+ SET_HARD_REG_BIT (hard_regs_live, regno);
+ regno++;
+ }
+ }
+}
+
+/* Record that REG has conflicts with all the regs currently live.
+ Do not mark REG itself as live. */
+
+static void
+mark_reg_conflicts (reg)
+ rtx reg;
+{
+ register int regno;
+
+ if (GET_CODE (reg) == SUBREG)
+ reg = SUBREG_REG (reg);
+
+ if (GET_CODE (reg) != REG)
+ return;
+
+ regno = REGNO (reg);
+
+ if (reg_renumber[regno] >= 0)
+ regno = reg_renumber[regno];
+
+ /* Either this is one of the max_allocno pseudo regs not allocated,
+ or it is or has a hardware reg. First handle the pseudo-regs. */
+ if (regno >= FIRST_PSEUDO_REGISTER)
+ {
+ if (reg_allocno[regno] >= 0)
+ record_one_conflict (regno);
+ }
+ /* Handle hardware regs (and pseudos allocated to hard regs). */
+ else if (! fixed_regs[regno])
+ {
+ register int last = regno + HARD_REGNO_NREGS (regno, GET_MODE (reg));
+ while (regno < last)
+ {
+ record_one_conflict (regno);
+ regno++;
+ }
+ }
+}
+
+/* Mark REG as being dead (following the insn being scanned now).
+ Store a 0 in regs_live or allocnos_live for this register. */
+
+static void
+mark_reg_death (reg)
+ rtx reg;
+{
+ register int regno = REGNO (reg);
+
+ /* For pseudo reg, see if it has been assigned a hardware reg. */
+ if (reg_renumber[regno] >= 0)
+ regno = reg_renumber[regno];
+
+ /* Either this is one of the max_allocno pseudo regs not allocated,
+ or it is a hardware reg. First handle the pseudo-regs. */
+ if (regno >= FIRST_PSEUDO_REGISTER)
+ {
+ if (reg_allocno[regno] >= 0)
+ CLEAR_ALLOCNO_LIVE (reg_allocno[regno]);
+ }
+ /* Handle hardware regs (and pseudos allocated to hard regs). */
+ else if (! fixed_regs[regno])
+ {
+ /* Pseudo regs already assigned hardware regs are treated
+ almost the same as explicit hardware regs. */
+ register int last = regno + HARD_REGNO_NREGS (regno, GET_MODE (reg));
+ while (regno < last)
+ {
+ CLEAR_HARD_REG_BIT (hard_regs_live, regno);
+ regno++;
+ }
+ }
+}
+
+/* Mark hard reg REGNO as currently live, assuming machine mode MODE
+ for the value stored in it. MODE determines how many consecutive
+ registers are actually in use. Do not record conflicts;
+ it is assumed that the caller will do that. */
+
+static void
+mark_reg_live_nc (regno, mode)
+ register int regno;
+ enum machine_mode mode;
+{
+ register int last = regno + HARD_REGNO_NREGS (regno, mode);
+ while (regno < last)
+ {
+ SET_HARD_REG_BIT (hard_regs_live, regno);
+ regno++;
+ }
+}
+
+/* Try to set a preference for an allocno to a hard register.
+ We are passed DEST and SRC which are the operands of a SET. It is known
+ that SRC is a register. If SRC or the first operand of SRC is a register,
+ try to set a preference. If one of the two is a hard register and the other
+ is a pseudo-register, mark the preference.
+
+ Note that we are not as aggressive as local-alloc in trying to tie a
+ pseudo-register to a hard register. */
+
+static void
+set_preference (dest, src)
+ rtx dest, src;
+{
+ int src_regno, dest_regno;
+ /* Amount to add to the hard regno for SRC, or subtract from that for DEST,
+ to compensate for subregs in SRC or DEST. */
+ int offset = 0;
+ int i;
+ int copy = 1;
+
+ if (GET_RTX_FORMAT (GET_CODE (src))[0] == 'e')
+ src = XEXP (src, 0), copy = 0;
+
+ /* Get the reg number for both SRC and DEST.
+ If neither is a reg, give up. */
+
+ if (GET_CODE (src) == REG)
+ src_regno = REGNO (src);
+ else if (GET_CODE (src) == SUBREG && GET_CODE (SUBREG_REG (src)) == REG)
+ {
+ src_regno = REGNO (SUBREG_REG (src));
+ offset += SUBREG_WORD (src);
+ }
+ else
+ return;
+
+ if (GET_CODE (dest) == REG)
+ dest_regno = REGNO (dest);
+ else if (GET_CODE (dest) == SUBREG && GET_CODE (SUBREG_REG (dest)) == REG)
+ {
+ dest_regno = REGNO (SUBREG_REG (dest));
+ offset -= SUBREG_WORD (dest);
+ }
+ else
+ return;
+
+ /* Convert either or both to hard reg numbers. */
+
+ if (reg_renumber[src_regno] >= 0)
+ src_regno = reg_renumber[src_regno];
+
+ if (reg_renumber[dest_regno] >= 0)
+ dest_regno = reg_renumber[dest_regno];
+
+ /* Now if one is a hard reg and the other is a global pseudo
+ then give the other a preference. */
+
+ if (dest_regno < FIRST_PSEUDO_REGISTER && src_regno >= FIRST_PSEUDO_REGISTER
+ && reg_allocno[src_regno] >= 0)
+ {
+ dest_regno -= offset;
+ if (dest_regno >= 0 && dest_regno < FIRST_PSEUDO_REGISTER)
+ {
+ if (copy)
+ SET_REGBIT (hard_reg_copy_preferences,
+ reg_allocno[src_regno], dest_regno);
+
+ SET_REGBIT (hard_reg_preferences,
+ reg_allocno[src_regno], dest_regno);
+ for (i = dest_regno;
+ i < dest_regno + HARD_REGNO_NREGS (dest_regno, GET_MODE (dest));
+ i++)
+ SET_REGBIT (hard_reg_full_preferences, reg_allocno[src_regno], i);
+ }
+ }
+
+ if (src_regno < FIRST_PSEUDO_REGISTER && dest_regno >= FIRST_PSEUDO_REGISTER
+ && reg_allocno[dest_regno] >= 0)
+ {
+ src_regno += offset;
+ if (src_regno >= 0 && src_regno < FIRST_PSEUDO_REGISTER)
+ {
+ if (copy)
+ SET_REGBIT (hard_reg_copy_preferences,
+ reg_allocno[dest_regno], src_regno);
+
+ SET_REGBIT (hard_reg_preferences,
+ reg_allocno[dest_regno], src_regno);
+ for (i = src_regno;
+ i < src_regno + HARD_REGNO_NREGS (src_regno, GET_MODE (src));
+ i++)
+ SET_REGBIT (hard_reg_full_preferences, reg_allocno[dest_regno], i);
+ }
+ }
+}
+
+/* Indicate that hard register number FROM was eliminated and replaced with
+ an offset from hard register number TO. The status of hard registers live
+ at the start of a basic block is updated by replacing a use of FROM with
+ a use of TO. */
+
+void
+mark_elimination (from, to)
+ int from, to;
+{
+ int i;
+
+ for (i = 0; i < n_basic_blocks; i++)
+ if ((basic_block_live_at_start[i][from / REGSET_ELT_BITS]
+ & ((REGSET_ELT_TYPE) 1 << (from % REGSET_ELT_BITS))) != 0)
+ {
+ basic_block_live_at_start[i][from / REGSET_ELT_BITS]
+ &= ~ ((REGSET_ELT_TYPE) 1 << (from % REGSET_ELT_BITS));
+ basic_block_live_at_start[i][to / REGSET_ELT_BITS]
+ |= ((REGSET_ELT_TYPE) 1 << (to % REGSET_ELT_BITS));
+ }
+}
+
+/* Print debugging trace information if -greg switch is given,
+ showing the information on which the allocation decisions are based. */
+
+static void
+dump_conflicts (file)
+ FILE *file;
+{
+ register int i;
+ register int has_preferences;
+ fprintf (file, ";; %d regs to allocate:", max_allocno);
+ for (i = 0; i < max_allocno; i++)
+ {
+ int j;
+ fprintf (file, " %d", allocno_reg[allocno_order[i]]);
+ for (j = 0; j < max_regno; j++)
+ if (reg_allocno[j] == allocno_order[i]
+ && j != allocno_reg[allocno_order[i]])
+ fprintf (file, "+%d", j);
+ if (allocno_size[allocno_order[i]] != 1)
+ fprintf (file, " (%d)", allocno_size[allocno_order[i]]);
+ }
+ fprintf (file, "\n");
+
+ for (i = 0; i < max_allocno; i++)
+ {
+ register int j;
+ fprintf (file, ";; %d conflicts:", allocno_reg[i]);
+ for (j = 0; j < max_allocno; j++)
+ if (CONFLICTP (i, j) || CONFLICTP (j, i))
+ fprintf (file, " %d", allocno_reg[j]);
+ for (j = 0; j < FIRST_PSEUDO_REGISTER; j++)
+ if (TEST_HARD_REG_BIT (hard_reg_conflicts[i], j))
+ fprintf (file, " %d", j);
+ fprintf (file, "\n");
+
+ has_preferences = 0;
+ for (j = 0; j < FIRST_PSEUDO_REGISTER; j++)
+ if (TEST_HARD_REG_BIT (hard_reg_preferences[i], j))
+ has_preferences = 1;
+
+ if (! has_preferences)
+ continue;
+ fprintf (file, ";; %d preferences:", allocno_reg[i]);
+ for (j = 0; j < FIRST_PSEUDO_REGISTER; j++)
+ if (TEST_HARD_REG_BIT (hard_reg_preferences[i], j))
+ fprintf (file, " %d", j);
+ fprintf (file, "\n");
+ }
+ fprintf (file, "\n");
+}
+
+void
+dump_global_regs (file)
+ FILE *file;
+{
+ register int i, j;
+
+ fprintf (file, ";; Register dispositions:\n");
+ for (i = FIRST_PSEUDO_REGISTER, j = 0; i < max_regno; i++)
+ if (reg_renumber[i] >= 0)
+ {
+ fprintf (file, "%d in %d ", i, reg_renumber[i]);
+ if (++j % 6 == 0)
+ fprintf (file, "\n");
+ }
+
+ fprintf (file, "\n\n;; Hard regs used: ");
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (regs_ever_live[i])
+ fprintf (file, " %d", i);
+ fprintf (file, "\n\n");
+}
diff --git a/gnu/usr.bin/cc/cc_int/insn-attrtab.c b/gnu/usr.bin/cc/cc_int/insn-attrtab.c
new file mode 100644
index 0000000..0e86d1f
--- /dev/null
+++ b/gnu/usr.bin/cc/cc_int/insn-attrtab.c
@@ -0,0 +1,14 @@
+/* Generated automatically by the program `genattrtab'
+from the machine description file `md'. */
+
+#include "config.h"
+#include "rtl.h"
+#include "insn-config.h"
+#include "recog.h"
+#include "regs.h"
+#include "real.h"
+#include "output.h"
+#include "insn-attr.h"
+
+#define operands recog_operand
+
diff --git a/gnu/usr.bin/cc/cc_int/insn-emit.c b/gnu/usr.bin/cc/cc_int/insn-emit.c
new file mode 100644
index 0000000..01463d0
--- /dev/null
+++ b/gnu/usr.bin/cc/cc_int/insn-emit.c
@@ -0,0 +1,3973 @@
+/* Generated automatically by the program `genemit'
+from the machine description file `md'. */
+
+#include "config.h"
+#include "rtl.h"
+#include "expr.h"
+#include "real.h"
+#include "output.h"
+#include "insn-config.h"
+
+#include "insn-flags.h"
+
+#include "insn-codes.h"
+
+extern char *insn_operand_constraint[][MAX_RECOG_OPERANDS];
+
+extern rtx recog_operand[];
+#define operands emit_operand
+
+#define FAIL goto _fail
+
+#define DONE goto _done
+
+rtx
+gen_tstsi_1 (operand0)
+ rtx operand0;
+{
+ return gen_rtx (SET, VOIDmode,
+ cc0_rtx,
+ operand0);
+}
+
+rtx
+gen_tstsi (operand0)
+ rtx operand0;
+{
+ rtx operands[1];
+ rtx _val = 0;
+ start_sequence ();
+ operands[0] = operand0;
+
+{
+ i386_compare_gen = gen_tstsi_1;
+ i386_compare_op0 = operands[0];
+ DONE;
+}
+ operand0 = operands[0];
+ emit_insn (gen_rtx (SET, VOIDmode,
+ cc0_rtx,
+ operand0));
+ _done:
+ _val = gen_sequence ();
+ _fail:
+ end_sequence ();
+ return _val;
+}
+
+rtx
+gen_tsthi_1 (operand0)
+ rtx operand0;
+{
+ return gen_rtx (SET, VOIDmode,
+ cc0_rtx,
+ operand0);
+}
+
+rtx
+gen_tsthi (operand0)
+ rtx operand0;
+{
+ rtx operands[1];
+ rtx _val = 0;
+ start_sequence ();
+ operands[0] = operand0;
+
+{
+ i386_compare_gen = gen_tsthi_1;
+ i386_compare_op0 = operands[0];
+ DONE;
+}
+ operand0 = operands[0];
+ emit_insn (gen_rtx (SET, VOIDmode,
+ cc0_rtx,
+ operand0));
+ _done:
+ _val = gen_sequence ();
+ _fail:
+ end_sequence ();
+ return _val;
+}
+
+rtx
+gen_tstqi_1 (operand0)
+ rtx operand0;
+{
+ return gen_rtx (SET, VOIDmode,
+ cc0_rtx,
+ operand0);
+}
+
+rtx
+gen_tstqi (operand0)
+ rtx operand0;
+{
+ rtx operands[1];
+ rtx _val = 0;
+ start_sequence ();
+ operands[0] = operand0;
+
+{
+ i386_compare_gen = gen_tstqi_1;
+ i386_compare_op0 = operands[0];
+ DONE;
+}
+ operand0 = operands[0];
+ emit_insn (gen_rtx (SET, VOIDmode,
+ cc0_rtx,
+ operand0));
+ _done:
+ _val = gen_sequence ();
+ _fail:
+ end_sequence ();
+ return _val;
+}
+
+rtx
+gen_tstsf_cc (operand0)
+ rtx operand0;
+{
+ return gen_rtx (PARALLEL, VOIDmode, gen_rtvec (2,
+ gen_rtx (SET, VOIDmode,
+ cc0_rtx,
+ operand0),
+ gen_rtx (CLOBBER, VOIDmode,
+ gen_rtx (SCRATCH, HImode, 0))));
+}
+
+rtx
+gen_tstsf (operand0)
+ rtx operand0;
+{
+ rtx operands[1];
+ rtx _val = 0;
+ start_sequence ();
+ operands[0] = operand0;
+
+{
+ i386_compare_gen = gen_tstsf_cc;
+ i386_compare_op0 = operands[0];
+ DONE;
+}
+ operand0 = operands[0];
+ emit (gen_rtx (PARALLEL, VOIDmode,
+ gen_rtvec (2,
+ gen_rtx (SET, VOIDmode,
+ cc0_rtx,
+ operand0),
+ gen_rtx (CLOBBER, VOIDmode,
+ gen_rtx (SCRATCH, HImode, 0)))));
+ _done:
+ _val = gen_sequence ();
+ _fail:
+ end_sequence ();
+ return _val;
+}
+
+rtx
+gen_tstdf_cc (operand0)
+ rtx operand0;
+{
+ return gen_rtx (PARALLEL, VOIDmode, gen_rtvec (2,
+ gen_rtx (SET, VOIDmode,
+ cc0_rtx,
+ operand0),
+ gen_rtx (CLOBBER, VOIDmode,
+ gen_rtx (SCRATCH, HImode, 0))));
+}
+
+rtx
+gen_tstdf (operand0)
+ rtx operand0;
+{
+ rtx operands[1];
+ rtx _val = 0;
+ start_sequence ();
+ operands[0] = operand0;
+
+{
+ i386_compare_gen = gen_tstdf_cc;
+ i386_compare_op0 = operands[0];
+ DONE;
+}
+ operand0 = operands[0];
+ emit (gen_rtx (PARALLEL, VOIDmode,
+ gen_rtvec (2,
+ gen_rtx (SET, VOIDmode,
+ cc0_rtx,
+ operand0),
+ gen_rtx (CLOBBER, VOIDmode,
+ gen_rtx (SCRATCH, HImode, 0)))));
+ _done:
+ _val = gen_sequence ();
+ _fail:
+ end_sequence ();
+ return _val;
+}
+
+rtx
+gen_tstxf_cc (operand0)
+ rtx operand0;
+{
+ return gen_rtx (PARALLEL, VOIDmode, gen_rtvec (2,
+ gen_rtx (SET, VOIDmode,
+ cc0_rtx,
+ operand0),
+ gen_rtx (CLOBBER, VOIDmode,
+ gen_rtx (SCRATCH, HImode, 0))));
+}
+
+rtx
+gen_tstxf (operand0)
+ rtx operand0;
+{
+ rtx operands[1];
+ rtx _val = 0;
+ start_sequence ();
+ operands[0] = operand0;
+
+{
+ i386_compare_gen = gen_tstxf_cc;
+ i386_compare_op0 = operands[0];
+ DONE;
+}
+ operand0 = operands[0];
+ emit (gen_rtx (PARALLEL, VOIDmode,
+ gen_rtvec (2,
+ gen_rtx (SET, VOIDmode,
+ cc0_rtx,
+ operand0),
+ gen_rtx (CLOBBER, VOIDmode,
+ gen_rtx (SCRATCH, HImode, 0)))));
+ _done:
+ _val = gen_sequence ();
+ _fail:
+ end_sequence ();
+ return _val;
+}
+
+rtx
+gen_cmpsi_1 (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ return gen_rtx (SET, VOIDmode,
+ cc0_rtx,
+ gen_rtx (COMPARE, VOIDmode,
+ operand0,
+ operand1));
+}
+
+rtx
+gen_cmpsi (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ rtx operands[2];
+ rtx _val = 0;
+ start_sequence ();
+ operands[0] = operand0;
+ operands[1] = operand1;
+
+{
+ if (GET_CODE (operands[0]) == MEM && GET_CODE (operands[1]) == MEM)
+ operands[0] = force_reg (SImode, operands[0]);
+
+ i386_compare_gen = gen_cmpsi_1;
+ i386_compare_op0 = operands[0];
+ i386_compare_op1 = operands[1];
+ DONE;
+}
+ operand0 = operands[0];
+ operand1 = operands[1];
+ emit_insn (gen_rtx (SET, VOIDmode,
+ cc0_rtx,
+ gen_rtx (COMPARE, VOIDmode,
+ operand0,
+ operand1)));
+ _done:
+ _val = gen_sequence ();
+ _fail:
+ end_sequence ();
+ return _val;
+}
+
+rtx
+gen_cmphi_1 (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ return gen_rtx (SET, VOIDmode,
+ cc0_rtx,
+ gen_rtx (COMPARE, VOIDmode,
+ operand0,
+ operand1));
+}
+
+rtx
+gen_cmphi (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ rtx operands[2];
+ rtx _val = 0;
+ start_sequence ();
+ operands[0] = operand0;
+ operands[1] = operand1;
+
+{
+ if (GET_CODE (operands[0]) == MEM && GET_CODE (operands[1]) == MEM)
+ operands[0] = force_reg (HImode, operands[0]);
+
+ i386_compare_gen = gen_cmphi_1;
+ i386_compare_op0 = operands[0];
+ i386_compare_op1 = operands[1];
+ DONE;
+}
+ operand0 = operands[0];
+ operand1 = operands[1];
+ emit_insn (gen_rtx (SET, VOIDmode,
+ cc0_rtx,
+ gen_rtx (COMPARE, VOIDmode,
+ operand0,
+ operand1)));
+ _done:
+ _val = gen_sequence ();
+ _fail:
+ end_sequence ();
+ return _val;
+}
+
+rtx
+gen_cmpqi_1 (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ return gen_rtx (SET, VOIDmode,
+ cc0_rtx,
+ gen_rtx (COMPARE, VOIDmode,
+ operand0,
+ operand1));
+}
+
+rtx
+gen_cmpqi (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ rtx operands[2];
+ rtx _val = 0;
+ start_sequence ();
+ operands[0] = operand0;
+ operands[1] = operand1;
+
+{
+ if (GET_CODE (operands[0]) == MEM && GET_CODE (operands[1]) == MEM)
+ operands[0] = force_reg (QImode, operands[0]);
+
+ i386_compare_gen = gen_cmpqi_1;
+ i386_compare_op0 = operands[0];
+ i386_compare_op1 = operands[1];
+ DONE;
+}
+ operand0 = operands[0];
+ operand1 = operands[1];
+ emit_insn (gen_rtx (SET, VOIDmode,
+ cc0_rtx,
+ gen_rtx (COMPARE, VOIDmode,
+ operand0,
+ operand1)));
+ _done:
+ _val = gen_sequence ();
+ _fail:
+ end_sequence ();
+ return _val;
+}
+
+rtx
+gen_cmpsf_cc_1 (operand0, operand1, operand2)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+{
+ return gen_rtx (PARALLEL, VOIDmode, gen_rtvec (2,
+ gen_rtx (SET, VOIDmode,
+ cc0_rtx,
+ gen_rtx (GET_CODE (operand2), VOIDmode,
+ operand0,
+ operand1)),
+ gen_rtx (CLOBBER, VOIDmode,
+ gen_rtx (SCRATCH, HImode, 0))));
+}
+
+rtx
+gen_cmpxf (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ rtx operands[2];
+ rtx _val = 0;
+ start_sequence ();
+ operands[0] = operand0;
+ operands[1] = operand1;
+
+{
+ i386_compare_gen = gen_cmpxf_cc;
+ i386_compare_gen_eq = gen_cmpxf_ccfpeq;
+ i386_compare_op0 = operands[0];
+ i386_compare_op1 = operands[1];
+ DONE;
+}
+ operand0 = operands[0];
+ operand1 = operands[1];
+ emit_insn (gen_rtx (SET, VOIDmode,
+ cc0_rtx,
+ gen_rtx (COMPARE, VOIDmode,
+ operand0,
+ operand1)));
+ _done:
+ _val = gen_sequence ();
+ _fail:
+ end_sequence ();
+ return _val;
+}
+
+rtx
+gen_cmpdf (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ rtx operands[2];
+ rtx _val = 0;
+ start_sequence ();
+ operands[0] = operand0;
+ operands[1] = operand1;
+
+{
+ i386_compare_gen = gen_cmpdf_cc;
+ i386_compare_gen_eq = gen_cmpdf_ccfpeq;
+ i386_compare_op0 = operands[0];
+ i386_compare_op1 = operands[1];
+ DONE;
+}
+ operand0 = operands[0];
+ operand1 = operands[1];
+ emit_insn (gen_rtx (SET, VOIDmode,
+ cc0_rtx,
+ gen_rtx (COMPARE, VOIDmode,
+ operand0,
+ operand1)));
+ _done:
+ _val = gen_sequence ();
+ _fail:
+ end_sequence ();
+ return _val;
+}
+
+rtx
+gen_cmpsf (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ rtx operands[2];
+ rtx _val = 0;
+ start_sequence ();
+ operands[0] = operand0;
+ operands[1] = operand1;
+
+{
+ i386_compare_gen = gen_cmpsf_cc;
+ i386_compare_gen_eq = gen_cmpsf_ccfpeq;
+ i386_compare_op0 = operands[0];
+ i386_compare_op1 = operands[1];
+ DONE;
+}
+ operand0 = operands[0];
+ operand1 = operands[1];
+ emit_insn (gen_rtx (SET, VOIDmode,
+ cc0_rtx,
+ gen_rtx (COMPARE, VOIDmode,
+ operand0,
+ operand1)));
+ _done:
+ _val = gen_sequence ();
+ _fail:
+ end_sequence ();
+ return _val;
+}
+
+rtx
+gen_cmpxf_cc (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ return gen_rtx (PARALLEL, VOIDmode,
+ gen_rtvec (2,
+ gen_rtx (SET, VOIDmode,
+ cc0_rtx,
+ gen_rtx (COMPARE, VOIDmode,
+ operand0,
+ operand1)),
+ gen_rtx (CLOBBER, VOIDmode,
+ gen_rtx (SCRATCH, HImode, 0))));
+}
+
+rtx
+gen_cmpxf_ccfpeq (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ rtx operands[2];
+ rtx _val = 0;
+ start_sequence ();
+ operands[0] = operand0;
+ operands[1] = operand1;
+
+{
+ if (! register_operand (operands[1], XFmode))
+ operands[1] = copy_to_mode_reg (XFmode, operands[1]);
+}
+ operand0 = operands[0];
+ operand1 = operands[1];
+ emit (gen_rtx (PARALLEL, VOIDmode,
+ gen_rtvec (2,
+ gen_rtx (SET, VOIDmode,
+ cc0_rtx,
+ gen_rtx (COMPARE, CCFPEQmode,
+ operand0,
+ operand1)),
+ gen_rtx (CLOBBER, VOIDmode,
+ gen_rtx (SCRATCH, HImode, 0)))));
+ _done:
+ _val = gen_sequence ();
+ _fail:
+ end_sequence ();
+ return _val;
+}
+
+rtx
+gen_cmpdf_cc (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ return gen_rtx (PARALLEL, VOIDmode,
+ gen_rtvec (2,
+ gen_rtx (SET, VOIDmode,
+ cc0_rtx,
+ gen_rtx (COMPARE, VOIDmode,
+ operand0,
+ operand1)),
+ gen_rtx (CLOBBER, VOIDmode,
+ gen_rtx (SCRATCH, HImode, 0))));
+}
+
+rtx
+gen_cmpdf_ccfpeq (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ rtx operands[2];
+ rtx _val = 0;
+ start_sequence ();
+ operands[0] = operand0;
+ operands[1] = operand1;
+
+{
+ if (! register_operand (operands[1], DFmode))
+ operands[1] = copy_to_mode_reg (DFmode, operands[1]);
+}
+ operand0 = operands[0];
+ operand1 = operands[1];
+ emit (gen_rtx (PARALLEL, VOIDmode,
+ gen_rtvec (2,
+ gen_rtx (SET, VOIDmode,
+ cc0_rtx,
+ gen_rtx (COMPARE, CCFPEQmode,
+ operand0,
+ operand1)),
+ gen_rtx (CLOBBER, VOIDmode,
+ gen_rtx (SCRATCH, HImode, 0)))));
+ _done:
+ _val = gen_sequence ();
+ _fail:
+ end_sequence ();
+ return _val;
+}
+
+rtx
+gen_cmpsf_cc (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ return gen_rtx (PARALLEL, VOIDmode,
+ gen_rtvec (2,
+ gen_rtx (SET, VOIDmode,
+ cc0_rtx,
+ gen_rtx (COMPARE, VOIDmode,
+ operand0,
+ operand1)),
+ gen_rtx (CLOBBER, VOIDmode,
+ gen_rtx (SCRATCH, HImode, 0))));
+}
+
+rtx
+gen_cmpsf_ccfpeq (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ rtx operands[2];
+ rtx _val = 0;
+ start_sequence ();
+ operands[0] = operand0;
+ operands[1] = operand1;
+
+{
+ if (! register_operand (operands[1], SFmode))
+ operands[1] = copy_to_mode_reg (SFmode, operands[1]);
+}
+ operand0 = operands[0];
+ operand1 = operands[1];
+ emit (gen_rtx (PARALLEL, VOIDmode,
+ gen_rtvec (2,
+ gen_rtx (SET, VOIDmode,
+ cc0_rtx,
+ gen_rtx (COMPARE, CCFPEQmode,
+ operand0,
+ operand1)),
+ gen_rtx (CLOBBER, VOIDmode,
+ gen_rtx (SCRATCH, HImode, 0)))));
+ _done:
+ _val = gen_sequence ();
+ _fail:
+ end_sequence ();
+ return _val;
+}
+
+rtx
+gen_movsi (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ rtx operands[2];
+ rtx _val = 0;
+ start_sequence ();
+ operands[0] = operand0;
+ operands[1] = operand1;
+
+{
+ extern int flag_pic;
+
+ if (flag_pic && SYMBOLIC_CONST (operands[1]))
+ emit_pic_move (operands, SImode);
+}
+ operand0 = operands[0];
+ operand1 = operands[1];
+ emit_insn (gen_rtx (SET, VOIDmode,
+ operand0,
+ operand1));
+ _done:
+ _val = gen_sequence ();
+ _fail:
+ end_sequence ();
+ return _val;
+}
+
+rtx
+gen_movhi (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ operand1);
+}
+
+rtx
+gen_movstricthi (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ return gen_rtx (SET, VOIDmode,
+ gen_rtx (STRICT_LOW_PART, VOIDmode,
+ operand0),
+ operand1);
+}
+
+rtx
+gen_movqi (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ operand1);
+}
+
+rtx
+gen_movstrictqi (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ return gen_rtx (SET, VOIDmode,
+ gen_rtx (STRICT_LOW_PART, VOIDmode,
+ operand0),
+ operand1);
+}
+
+rtx
+gen_movsf (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ operand1);
+}
+
+rtx
+gen_swapdf (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ return gen_rtx (PARALLEL, VOIDmode, gen_rtvec (2,
+ gen_rtx (SET, VOIDmode,
+ operand0,
+ operand1),
+ gen_rtx (SET, VOIDmode,
+ operand1,
+ operand0)));
+}
+
+rtx
+gen_movdf (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ operand1);
+}
+
+rtx
+gen_swapxf (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ return gen_rtx (PARALLEL, VOIDmode, gen_rtvec (2,
+ gen_rtx (SET, VOIDmode,
+ operand0,
+ operand1),
+ gen_rtx (SET, VOIDmode,
+ operand1,
+ operand0)));
+}
+
+rtx
+gen_movxf (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ operand1);
+}
+
+rtx
+gen_movdi (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ operand1);
+}
+
+rtx
+gen_zero_extendhisi2 (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (ZERO_EXTEND, SImode,
+ operand1));
+}
+
+rtx
+gen_zero_extendqihi2 (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (ZERO_EXTEND, HImode,
+ operand1));
+}
+
+rtx
+gen_zero_extendqisi2 (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (ZERO_EXTEND, SImode,
+ operand1));
+}
+
+rtx
+gen_zero_extendsidi2 (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (ZERO_EXTEND, DImode,
+ operand1));
+}
+
+rtx
+gen_extendsidi2 (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (SIGN_EXTEND, DImode,
+ operand1));
+}
+
+rtx
+gen_extendhisi2 (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (SIGN_EXTEND, SImode,
+ operand1));
+}
+
+rtx
+gen_extendqihi2 (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (SIGN_EXTEND, HImode,
+ operand1));
+}
+
+rtx
+gen_extendqisi2 (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (SIGN_EXTEND, SImode,
+ operand1));
+}
+
+rtx
+gen_extendsfdf2 (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (FLOAT_EXTEND, DFmode,
+ operand1));
+}
+
+rtx
+gen_extenddfxf2 (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (FLOAT_EXTEND, XFmode,
+ operand1));
+}
+
+rtx
+gen_extendsfxf2 (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (FLOAT_EXTEND, XFmode,
+ operand1));
+}
+
+rtx
+gen_truncdfsf2 (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ rtx operand2;
+ rtx operands[3];
+ rtx _val = 0;
+ start_sequence ();
+ operands[0] = operand0;
+ operands[1] = operand1;
+
+{
+ operands[2] = (rtx) assign_386_stack_local (SFmode, 0);
+}
+ operand0 = operands[0];
+ operand1 = operands[1];
+ operand2 = operands[2];
+ emit (gen_rtx (PARALLEL, VOIDmode,
+ gen_rtvec (2,
+ gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (FLOAT_TRUNCATE, SFmode,
+ operand1)),
+ gen_rtx (CLOBBER, VOIDmode,
+ operand2))));
+ _done:
+ _val = gen_sequence ();
+ _fail:
+ end_sequence ();
+ return _val;
+}
+
+rtx
+gen_truncxfsf2 (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (FLOAT_TRUNCATE, SFmode,
+ operand1));
+}
+
+rtx
+gen_truncxfdf2 (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (FLOAT_TRUNCATE, DFmode,
+ operand1));
+}
+
+rtx
+gen_fixuns_truncxfsi2 (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ rtx operand2;
+ rtx operand3;
+ rtx operand4;
+ rtx operand5;
+ rtx operand6;
+ rtx operands[7];
+ rtx _val = 0;
+ start_sequence ();
+ operands[0] = operand0;
+ operands[1] = operand1;
+
+{
+ operands[2] = gen_reg_rtx (DImode);
+ operands[3] = gen_lowpart (SImode, operands[2]);
+ operands[4] = gen_reg_rtx (XFmode);
+ operands[5] = (rtx) assign_386_stack_local (SImode, 0);
+ operands[6] = (rtx) assign_386_stack_local (SImode, 1);
+}
+ operand0 = operands[0];
+ operand1 = operands[1];
+ operand2 = operands[2];
+ operand3 = operands[3];
+ operand4 = operands[4];
+ operand5 = operands[5];
+ operand6 = operands[6];
+ emit_insn (gen_rtx (SET, VOIDmode,
+ operand4,
+ operand1));
+ emit (gen_rtx (PARALLEL, VOIDmode,
+ gen_rtvec (5,
+ gen_rtx (SET, VOIDmode,
+ operand2,
+ gen_rtx (FIX, DImode,
+ gen_rtx (FIX, XFmode,
+ operand4))),
+ gen_rtx (CLOBBER, VOIDmode,
+ operand4),
+ gen_rtx (CLOBBER, VOIDmode,
+ operand5),
+ gen_rtx (CLOBBER, VOIDmode,
+ operand6),
+ gen_rtx (CLOBBER, VOIDmode,
+ gen_rtx (SCRATCH, SImode, 0)))));
+ emit_insn (gen_rtx (SET, VOIDmode,
+ operand0,
+ operand3));
+ _done:
+ _val = gen_sequence ();
+ _fail:
+ end_sequence ();
+ return _val;
+}
+
+rtx
+gen_fixuns_truncdfsi2 (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ rtx operand2;
+ rtx operand3;
+ rtx operand4;
+ rtx operand5;
+ rtx operand6;
+ rtx operands[7];
+ rtx _val = 0;
+ start_sequence ();
+ operands[0] = operand0;
+ operands[1] = operand1;
+
+{
+ operands[2] = gen_reg_rtx (DImode);
+ operands[3] = gen_lowpart (SImode, operands[2]);
+ operands[4] = gen_reg_rtx (DFmode);
+ operands[5] = (rtx) assign_386_stack_local (SImode, 0);
+ operands[6] = (rtx) assign_386_stack_local (SImode, 1);
+}
+ operand0 = operands[0];
+ operand1 = operands[1];
+ operand2 = operands[2];
+ operand3 = operands[3];
+ operand4 = operands[4];
+ operand5 = operands[5];
+ operand6 = operands[6];
+ emit_insn (gen_rtx (SET, VOIDmode,
+ operand4,
+ operand1));
+ emit (gen_rtx (PARALLEL, VOIDmode,
+ gen_rtvec (5,
+ gen_rtx (SET, VOIDmode,
+ operand2,
+ gen_rtx (FIX, DImode,
+ gen_rtx (FIX, DFmode,
+ operand4))),
+ gen_rtx (CLOBBER, VOIDmode,
+ operand4),
+ gen_rtx (CLOBBER, VOIDmode,
+ operand5),
+ gen_rtx (CLOBBER, VOIDmode,
+ operand6),
+ gen_rtx (CLOBBER, VOIDmode,
+ gen_rtx (SCRATCH, SImode, 0)))));
+ emit_insn (gen_rtx (SET, VOIDmode,
+ operand0,
+ operand3));
+ _done:
+ _val = gen_sequence ();
+ _fail:
+ end_sequence ();
+ return _val;
+}
+
+rtx
+gen_fixuns_truncsfsi2 (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ rtx operand2;
+ rtx operand3;
+ rtx operand4;
+ rtx operand5;
+ rtx operand6;
+ rtx operands[7];
+ rtx _val = 0;
+ start_sequence ();
+ operands[0] = operand0;
+ operands[1] = operand1;
+
+{
+ operands[2] = gen_reg_rtx (DImode);
+ operands[3] = gen_lowpart (SImode, operands[2]);
+ operands[4] = gen_reg_rtx (SFmode);
+ operands[5] = (rtx) assign_386_stack_local (SImode, 0);
+ operands[6] = (rtx) assign_386_stack_local (SImode, 1);
+}
+ operand0 = operands[0];
+ operand1 = operands[1];
+ operand2 = operands[2];
+ operand3 = operands[3];
+ operand4 = operands[4];
+ operand5 = operands[5];
+ operand6 = operands[6];
+ emit_insn (gen_rtx (SET, VOIDmode,
+ operand4,
+ operand1));
+ emit (gen_rtx (PARALLEL, VOIDmode,
+ gen_rtvec (5,
+ gen_rtx (SET, VOIDmode,
+ operand2,
+ gen_rtx (FIX, DImode,
+ gen_rtx (FIX, SFmode,
+ operand4))),
+ gen_rtx (CLOBBER, VOIDmode,
+ operand4),
+ gen_rtx (CLOBBER, VOIDmode,
+ operand5),
+ gen_rtx (CLOBBER, VOIDmode,
+ operand6),
+ gen_rtx (CLOBBER, VOIDmode,
+ gen_rtx (SCRATCH, SImode, 0)))));
+ emit_insn (gen_rtx (SET, VOIDmode,
+ operand0,
+ operand3));
+ _done:
+ _val = gen_sequence ();
+ _fail:
+ end_sequence ();
+ return _val;
+}
+
+rtx
+gen_fix_truncxfdi2 (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ rtx operand2;
+ rtx operand3;
+ rtx operand4;
+ rtx operands[5];
+ rtx _val = 0;
+ start_sequence ();
+ operands[0] = operand0;
+ operands[1] = operand1;
+
+{
+ operands[1] = copy_to_mode_reg (XFmode, operands[1]);
+ operands[2] = gen_reg_rtx (XFmode);
+ operands[3] = (rtx) assign_386_stack_local (SImode, 0);
+ operands[4] = (rtx) assign_386_stack_local (SImode, 1);
+}
+ operand0 = operands[0];
+ operand1 = operands[1];
+ operand2 = operands[2];
+ operand3 = operands[3];
+ operand4 = operands[4];
+ emit_insn (gen_rtx (SET, VOIDmode,
+ operand2,
+ operand1));
+ emit (gen_rtx (PARALLEL, VOIDmode,
+ gen_rtvec (5,
+ gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (FIX, DImode,
+ gen_rtx (FIX, XFmode,
+ operand2))),
+ gen_rtx (CLOBBER, VOIDmode,
+ operand2),
+ gen_rtx (CLOBBER, VOIDmode,
+ operand3),
+ gen_rtx (CLOBBER, VOIDmode,
+ operand4),
+ gen_rtx (CLOBBER, VOIDmode,
+ gen_rtx (SCRATCH, SImode, 0)))));
+ _done:
+ _val = gen_sequence ();
+ _fail:
+ end_sequence ();
+ return _val;
+}
+
+rtx
+gen_fix_truncdfdi2 (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ rtx operand2;
+ rtx operand3;
+ rtx operand4;
+ rtx operands[5];
+ rtx _val = 0;
+ start_sequence ();
+ operands[0] = operand0;
+ operands[1] = operand1;
+
+{
+ operands[1] = copy_to_mode_reg (DFmode, operands[1]);
+ operands[2] = gen_reg_rtx (DFmode);
+ operands[3] = (rtx) assign_386_stack_local (SImode, 0);
+ operands[4] = (rtx) assign_386_stack_local (SImode, 1);
+}
+ operand0 = operands[0];
+ operand1 = operands[1];
+ operand2 = operands[2];
+ operand3 = operands[3];
+ operand4 = operands[4];
+ emit_insn (gen_rtx (SET, VOIDmode,
+ operand2,
+ operand1));
+ emit (gen_rtx (PARALLEL, VOIDmode,
+ gen_rtvec (5,
+ gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (FIX, DImode,
+ gen_rtx (FIX, DFmode,
+ operand2))),
+ gen_rtx (CLOBBER, VOIDmode,
+ operand2),
+ gen_rtx (CLOBBER, VOIDmode,
+ operand3),
+ gen_rtx (CLOBBER, VOIDmode,
+ operand4),
+ gen_rtx (CLOBBER, VOIDmode,
+ gen_rtx (SCRATCH, SImode, 0)))));
+ _done:
+ _val = gen_sequence ();
+ _fail:
+ end_sequence ();
+ return _val;
+}
+
+rtx
+gen_fix_truncsfdi2 (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ rtx operand2;
+ rtx operand3;
+ rtx operand4;
+ rtx operands[5];
+ rtx _val = 0;
+ start_sequence ();
+ operands[0] = operand0;
+ operands[1] = operand1;
+
+{
+ operands[1] = copy_to_mode_reg (SFmode, operands[1]);
+ operands[2] = gen_reg_rtx (SFmode);
+ operands[3] = (rtx) assign_386_stack_local (SImode, 0);
+ operands[4] = (rtx) assign_386_stack_local (SImode, 1);
+}
+ operand0 = operands[0];
+ operand1 = operands[1];
+ operand2 = operands[2];
+ operand3 = operands[3];
+ operand4 = operands[4];
+ emit_insn (gen_rtx (SET, VOIDmode,
+ operand2,
+ operand1));
+ emit (gen_rtx (PARALLEL, VOIDmode,
+ gen_rtvec (5,
+ gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (FIX, DImode,
+ gen_rtx (FIX, SFmode,
+ operand2))),
+ gen_rtx (CLOBBER, VOIDmode,
+ operand2),
+ gen_rtx (CLOBBER, VOIDmode,
+ operand3),
+ gen_rtx (CLOBBER, VOIDmode,
+ operand4),
+ gen_rtx (CLOBBER, VOIDmode,
+ gen_rtx (SCRATCH, SImode, 0)))));
+ _done:
+ _val = gen_sequence ();
+ _fail:
+ end_sequence ();
+ return _val;
+}
+
+rtx
+gen_fix_truncxfsi2 (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ rtx operand2;
+ rtx operand3;
+ rtx operands[4];
+ rtx _val = 0;
+ start_sequence ();
+ operands[0] = operand0;
+ operands[1] = operand1;
+
+{
+ operands[2] = (rtx) assign_386_stack_local (SImode, 0);
+ operands[3] = (rtx) assign_386_stack_local (SImode, 1);
+}
+ operand0 = operands[0];
+ operand1 = operands[1];
+ operand2 = operands[2];
+ operand3 = operands[3];
+ emit (gen_rtx (PARALLEL, VOIDmode,
+ gen_rtvec (4,
+ gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (FIX, SImode,
+ gen_rtx (FIX, XFmode,
+ operand1))),
+ gen_rtx (CLOBBER, VOIDmode,
+ operand2),
+ gen_rtx (CLOBBER, VOIDmode,
+ operand3),
+ gen_rtx (CLOBBER, VOIDmode,
+ gen_rtx (SCRATCH, SImode, 0)))));
+ _done:
+ _val = gen_sequence ();
+ _fail:
+ end_sequence ();
+ return _val;
+}
+
+rtx
+gen_fix_truncdfsi2 (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ rtx operand2;
+ rtx operand3;
+ rtx operands[4];
+ rtx _val = 0;
+ start_sequence ();
+ operands[0] = operand0;
+ operands[1] = operand1;
+
+{
+ operands[2] = (rtx) assign_386_stack_local (SImode, 0);
+ operands[3] = (rtx) assign_386_stack_local (SImode, 1);
+}
+ operand0 = operands[0];
+ operand1 = operands[1];
+ operand2 = operands[2];
+ operand3 = operands[3];
+ emit (gen_rtx (PARALLEL, VOIDmode,
+ gen_rtvec (4,
+ gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (FIX, SImode,
+ gen_rtx (FIX, DFmode,
+ operand1))),
+ gen_rtx (CLOBBER, VOIDmode,
+ operand2),
+ gen_rtx (CLOBBER, VOIDmode,
+ operand3),
+ gen_rtx (CLOBBER, VOIDmode,
+ gen_rtx (SCRATCH, SImode, 0)))));
+ _done:
+ _val = gen_sequence ();
+ _fail:
+ end_sequence ();
+ return _val;
+}
+
+rtx
+gen_fix_truncsfsi2 (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ rtx operand2;
+ rtx operand3;
+ rtx operands[4];
+ rtx _val = 0;
+ start_sequence ();
+ operands[0] = operand0;
+ operands[1] = operand1;
+
+{
+ operands[2] = (rtx) assign_386_stack_local (SImode, 0);
+ operands[3] = (rtx) assign_386_stack_local (SImode, 1);
+}
+ operand0 = operands[0];
+ operand1 = operands[1];
+ operand2 = operands[2];
+ operand3 = operands[3];
+ emit (gen_rtx (PARALLEL, VOIDmode,
+ gen_rtvec (4,
+ gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (FIX, SImode,
+ gen_rtx (FIX, SFmode,
+ operand1))),
+ gen_rtx (CLOBBER, VOIDmode,
+ operand2),
+ gen_rtx (CLOBBER, VOIDmode,
+ operand3),
+ gen_rtx (CLOBBER, VOIDmode,
+ gen_rtx (SCRATCH, SImode, 0)))));
+ _done:
+ _val = gen_sequence ();
+ _fail:
+ end_sequence ();
+ return _val;
+}
+
+rtx
+gen_floatsisf2 (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (FLOAT, SFmode,
+ operand1));
+}
+
+rtx
+gen_floatdisf2 (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (FLOAT, SFmode,
+ operand1));
+}
+
+rtx
+gen_floatsidf2 (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (FLOAT, DFmode,
+ operand1));
+}
+
+rtx
+gen_floatdidf2 (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (FLOAT, DFmode,
+ operand1));
+}
+
+rtx
+gen_floatsixf2 (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (FLOAT, XFmode,
+ operand1));
+}
+
+rtx
+gen_floatdixf2 (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (FLOAT, XFmode,
+ operand1));
+}
+
+rtx
+gen_adddi3 (operand0, operand1, operand2)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (PLUS, DImode,
+ operand1,
+ operand2));
+}
+
+rtx
+gen_addsi3 (operand0, operand1, operand2)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (PLUS, SImode,
+ operand1,
+ operand2));
+}
+
+rtx
+gen_addhi3 (operand0, operand1, operand2)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (PLUS, HImode,
+ operand1,
+ operand2));
+}
+
+rtx
+gen_addqi3 (operand0, operand1, operand2)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (PLUS, QImode,
+ operand1,
+ operand2));
+}
+
+rtx
+gen_addxf3 (operand0, operand1, operand2)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (PLUS, XFmode,
+ operand1,
+ operand2));
+}
+
+rtx
+gen_adddf3 (operand0, operand1, operand2)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (PLUS, DFmode,
+ operand1,
+ operand2));
+}
+
+rtx
+gen_addsf3 (operand0, operand1, operand2)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (PLUS, SFmode,
+ operand1,
+ operand2));
+}
+
+rtx
+gen_subdi3 (operand0, operand1, operand2)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (MINUS, DImode,
+ operand1,
+ operand2));
+}
+
+rtx
+gen_subsi3 (operand0, operand1, operand2)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (MINUS, SImode,
+ operand1,
+ operand2));
+}
+
+rtx
+gen_subhi3 (operand0, operand1, operand2)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (MINUS, HImode,
+ operand1,
+ operand2));
+}
+
+rtx
+gen_subqi3 (operand0, operand1, operand2)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (MINUS, QImode,
+ operand1,
+ operand2));
+}
+
+rtx
+gen_subxf3 (operand0, operand1, operand2)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (MINUS, XFmode,
+ operand1,
+ operand2));
+}
+
+rtx
+gen_subdf3 (operand0, operand1, operand2)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (MINUS, DFmode,
+ operand1,
+ operand2));
+}
+
+rtx
+gen_subsf3 (operand0, operand1, operand2)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (MINUS, SFmode,
+ operand1,
+ operand2));
+}
+
+rtx
+gen_mulhi3 (operand0, operand1, operand2)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (MULT, HImode,
+ operand1,
+ operand2));
+}
+
+rtx
+gen_mulsi3 (operand0, operand1, operand2)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (MULT, SImode,
+ operand1,
+ operand2));
+}
+
+rtx
+gen_umulqihi3 (operand0, operand1, operand2)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (MULT, HImode,
+ gen_rtx (ZERO_EXTEND, HImode,
+ operand1),
+ gen_rtx (ZERO_EXTEND, HImode,
+ operand2)));
+}
+
+rtx
+gen_mulqihi3 (operand0, operand1, operand2)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (MULT, HImode,
+ gen_rtx (SIGN_EXTEND, HImode,
+ operand1),
+ gen_rtx (SIGN_EXTEND, HImode,
+ operand2)));
+}
+
+rtx
+gen_umulsidi3 (operand0, operand1, operand2)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (MULT, DImode,
+ gen_rtx (ZERO_EXTEND, DImode,
+ operand1),
+ gen_rtx (ZERO_EXTEND, DImode,
+ operand2)));
+}
+
+rtx
+gen_mulsidi3 (operand0, operand1, operand2)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (MULT, DImode,
+ gen_rtx (SIGN_EXTEND, DImode,
+ operand1),
+ gen_rtx (SIGN_EXTEND, DImode,
+ operand2)));
+}
+
+rtx
+gen_mulxf3 (operand0, operand1, operand2)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (MULT, XFmode,
+ operand1,
+ operand2));
+}
+
+rtx
+gen_muldf3 (operand0, operand1, operand2)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (MULT, DFmode,
+ operand1,
+ operand2));
+}
+
+rtx
+gen_mulsf3 (operand0, operand1, operand2)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (MULT, SFmode,
+ operand1,
+ operand2));
+}
+
+rtx
+gen_divqi3 (operand0, operand1, operand2)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (DIV, QImode,
+ operand1,
+ operand2));
+}
+
+rtx
+gen_udivqi3 (operand0, operand1, operand2)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (UDIV, QImode,
+ operand1,
+ operand2));
+}
+
+rtx
+gen_divxf3 (operand0, operand1, operand2)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (DIV, XFmode,
+ operand1,
+ operand2));
+}
+
+rtx
+gen_divdf3 (operand0, operand1, operand2)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (DIV, DFmode,
+ operand1,
+ operand2));
+}
+
+rtx
+gen_divsf3 (operand0, operand1, operand2)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (DIV, SFmode,
+ operand1,
+ operand2));
+}
+
+rtx
+gen_divmodsi4 (operand0, operand1, operand2, operand3)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+ rtx operand3;
+{
+ return gen_rtx (PARALLEL, VOIDmode, gen_rtvec (2,
+ gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (DIV, SImode,
+ operand1,
+ operand2)),
+ gen_rtx (SET, VOIDmode,
+ operand3,
+ gen_rtx (MOD, SImode,
+ operand1,
+ operand2))));
+}
+
+rtx
+gen_divmodhi4 (operand0, operand1, operand2, operand3)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+ rtx operand3;
+{
+ return gen_rtx (PARALLEL, VOIDmode, gen_rtvec (2,
+ gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (DIV, HImode,
+ operand1,
+ operand2)),
+ gen_rtx (SET, VOIDmode,
+ operand3,
+ gen_rtx (MOD, HImode,
+ operand1,
+ operand2))));
+}
+
+rtx
+gen_udivmodsi4 (operand0, operand1, operand2, operand3)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+ rtx operand3;
+{
+ return gen_rtx (PARALLEL, VOIDmode, gen_rtvec (2,
+ gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (UDIV, SImode,
+ operand1,
+ operand2)),
+ gen_rtx (SET, VOIDmode,
+ operand3,
+ gen_rtx (UMOD, SImode,
+ operand1,
+ operand2))));
+}
+
+rtx
+gen_udivmodhi4 (operand0, operand1, operand2, operand3)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+ rtx operand3;
+{
+ return gen_rtx (PARALLEL, VOIDmode, gen_rtvec (2,
+ gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (UDIV, HImode,
+ operand1,
+ operand2)),
+ gen_rtx (SET, VOIDmode,
+ operand3,
+ gen_rtx (UMOD, HImode,
+ operand1,
+ operand2))));
+}
+
+rtx
+gen_andsi3 (operand0, operand1, operand2)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (AND, SImode,
+ operand1,
+ operand2));
+}
+
+rtx
+gen_andhi3 (operand0, operand1, operand2)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (AND, HImode,
+ operand1,
+ operand2));
+}
+
+rtx
+gen_andqi3 (operand0, operand1, operand2)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (AND, QImode,
+ operand1,
+ operand2));
+}
+
+rtx
+gen_iorsi3 (operand0, operand1, operand2)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (IOR, SImode,
+ operand1,
+ operand2));
+}
+
+rtx
+gen_iorhi3 (operand0, operand1, operand2)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (IOR, HImode,
+ operand1,
+ operand2));
+}
+
+rtx
+gen_iorqi3 (operand0, operand1, operand2)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (IOR, QImode,
+ operand1,
+ operand2));
+}
+
+rtx
+gen_xorsi3 (operand0, operand1, operand2)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (XOR, SImode,
+ operand1,
+ operand2));
+}
+
+rtx
+gen_xorhi3 (operand0, operand1, operand2)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (XOR, HImode,
+ operand1,
+ operand2));
+}
+
+rtx
+gen_xorqi3 (operand0, operand1, operand2)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (XOR, QImode,
+ operand1,
+ operand2));
+}
+
+rtx
+gen_negdi2 (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (NEG, DImode,
+ operand1));
+}
+
+rtx
+gen_negsi2 (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (NEG, SImode,
+ operand1));
+}
+
+rtx
+gen_neghi2 (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (NEG, HImode,
+ operand1));
+}
+
+rtx
+gen_negqi2 (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (NEG, QImode,
+ operand1));
+}
+
+rtx
+gen_negsf2 (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (NEG, SFmode,
+ operand1));
+}
+
+rtx
+gen_negdf2 (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (NEG, DFmode,
+ operand1));
+}
+
+rtx
+gen_negxf2 (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (NEG, XFmode,
+ operand1));
+}
+
+rtx
+gen_abssf2 (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (ABS, SFmode,
+ operand1));
+}
+
+rtx
+gen_absdf2 (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (ABS, DFmode,
+ operand1));
+}
+
+rtx
+gen_absxf2 (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (ABS, XFmode,
+ operand1));
+}
+
+rtx
+gen_sqrtsf2 (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (SQRT, SFmode,
+ operand1));
+}
+
+rtx
+gen_sqrtdf2 (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (SQRT, DFmode,
+ operand1));
+}
+
+rtx
+gen_sqrtxf2 (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (SQRT, XFmode,
+ operand1));
+}
+
+rtx
+gen_sindf2 (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (UNSPEC, DFmode,
+ gen_rtvec (1,
+ operand1),
+ 1));
+}
+
+rtx
+gen_sinsf2 (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (UNSPEC, SFmode,
+ gen_rtvec (1,
+ operand1),
+ 1));
+}
+
+rtx
+gen_cosdf2 (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (UNSPEC, DFmode,
+ gen_rtvec (1,
+ operand1),
+ 2));
+}
+
+rtx
+gen_cossf2 (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (UNSPEC, SFmode,
+ gen_rtvec (1,
+ operand1),
+ 2));
+}
+
+rtx
+gen_one_cmplsi2 (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (NOT, SImode,
+ operand1));
+}
+
+rtx
+gen_one_cmplhi2 (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (NOT, HImode,
+ operand1));
+}
+
+rtx
+gen_one_cmplqi2 (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (NOT, QImode,
+ operand1));
+}
+
+rtx
+gen_ashldi3 (operand0, operand1, operand2)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+{
+ rtx operands[3];
+ rtx _val = 0;
+ start_sequence ();
+ operands[0] = operand0;
+ operands[1] = operand1;
+ operands[2] = operand2;
+
+{
+ if (GET_CODE (operands[2]) != CONST_INT
+ || ! CONST_OK_FOR_LETTER_P (INTVAL (operands[2]), 'J'))
+ {
+ operands[2] = copy_to_mode_reg (QImode, operands[2]);
+ emit_insn (gen_ashldi3_non_const_int (operands[0], operands[1],
+ operands[2]));
+ }
+ else
+ emit_insn (gen_ashldi3_const_int (operands[0], operands[1], operands[2]));
+
+ DONE;
+}
+ operand0 = operands[0];
+ operand1 = operands[1];
+ operand2 = operands[2];
+ emit_insn (gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (ASHIFT, DImode,
+ operand1,
+ operand2)));
+ _done:
+ _val = gen_sequence ();
+ _fail:
+ end_sequence ();
+ return _val;
+}
+
+rtx
+gen_ashldi3_const_int (operand0, operand1, operand2)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (ASHIFT, DImode,
+ operand1,
+ operand2));
+}
+
+rtx
+gen_ashldi3_non_const_int (operand0, operand1, operand2)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+{
+ return gen_rtx (PARALLEL, VOIDmode, gen_rtvec (2,
+ gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (ASHIFT, DImode,
+ operand1,
+ operand2)),
+ gen_rtx (CLOBBER, VOIDmode,
+ operand2)));
+}
+
+rtx
+gen_ashlsi3 (operand0, operand1, operand2)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (ASHIFT, SImode,
+ operand1,
+ operand2));
+}
+
+rtx
+gen_ashlhi3 (operand0, operand1, operand2)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (ASHIFT, HImode,
+ operand1,
+ operand2));
+}
+
+rtx
+gen_ashlqi3 (operand0, operand1, operand2)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (ASHIFT, QImode,
+ operand1,
+ operand2));
+}
+
+rtx
+gen_ashrdi3 (operand0, operand1, operand2)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+{
+ rtx operands[3];
+ rtx _val = 0;
+ start_sequence ();
+ operands[0] = operand0;
+ operands[1] = operand1;
+ operands[2] = operand2;
+
+{
+ if (GET_CODE (operands[2]) != CONST_INT
+ || ! CONST_OK_FOR_LETTER_P (INTVAL (operands[2]), 'J'))
+ {
+ operands[2] = copy_to_mode_reg (QImode, operands[2]);
+ emit_insn (gen_ashrdi3_non_const_int (operands[0], operands[1],
+ operands[2]));
+ }
+ else
+ emit_insn (gen_ashrdi3_const_int (operands[0], operands[1], operands[2]));
+
+ DONE;
+}
+ operand0 = operands[0];
+ operand1 = operands[1];
+ operand2 = operands[2];
+ emit_insn (gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (ASHIFTRT, DImode,
+ operand1,
+ operand2)));
+ _done:
+ _val = gen_sequence ();
+ _fail:
+ end_sequence ();
+ return _val;
+}
+
+rtx
+gen_ashrdi3_const_int (operand0, operand1, operand2)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (ASHIFTRT, DImode,
+ operand1,
+ operand2));
+}
+
+rtx
+gen_ashrdi3_non_const_int (operand0, operand1, operand2)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+{
+ return gen_rtx (PARALLEL, VOIDmode, gen_rtvec (2,
+ gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (ASHIFTRT, DImode,
+ operand1,
+ operand2)),
+ gen_rtx (CLOBBER, VOIDmode,
+ operand2)));
+}
+
+rtx
+gen_ashrsi3 (operand0, operand1, operand2)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (ASHIFTRT, SImode,
+ operand1,
+ operand2));
+}
+
+rtx
+gen_ashrhi3 (operand0, operand1, operand2)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (ASHIFTRT, HImode,
+ operand1,
+ operand2));
+}
+
+rtx
+gen_ashrqi3 (operand0, operand1, operand2)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (ASHIFTRT, QImode,
+ operand1,
+ operand2));
+}
+
+rtx
+gen_lshrdi3 (operand0, operand1, operand2)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+{
+ rtx operands[3];
+ rtx _val = 0;
+ start_sequence ();
+ operands[0] = operand0;
+ operands[1] = operand1;
+ operands[2] = operand2;
+
+{
+ if (GET_CODE (operands[2]) != CONST_INT
+ || ! CONST_OK_FOR_LETTER_P (INTVAL (operands[2]), 'J'))
+ {
+ operands[2] = copy_to_mode_reg (QImode, operands[2]);
+ emit_insn (gen_lshrdi3_non_const_int (operands[0], operands[1],
+ operands[2]));
+ }
+ else
+ emit_insn (gen_lshrdi3_const_int (operands[0], operands[1], operands[2]));
+
+ DONE;
+}
+ operand0 = operands[0];
+ operand1 = operands[1];
+ operand2 = operands[2];
+ emit_insn (gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (LSHIFTRT, DImode,
+ operand1,
+ operand2)));
+ _done:
+ _val = gen_sequence ();
+ _fail:
+ end_sequence ();
+ return _val;
+}
+
+rtx
+gen_lshrdi3_const_int (operand0, operand1, operand2)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (LSHIFTRT, DImode,
+ operand1,
+ operand2));
+}
+
+rtx
+gen_lshrdi3_non_const_int (operand0, operand1, operand2)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+{
+ return gen_rtx (PARALLEL, VOIDmode, gen_rtvec (2,
+ gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (LSHIFTRT, DImode,
+ operand1,
+ operand2)),
+ gen_rtx (CLOBBER, VOIDmode,
+ operand2)));
+}
+
+rtx
+gen_lshrsi3 (operand0, operand1, operand2)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (LSHIFTRT, SImode,
+ operand1,
+ operand2));
+}
+
+rtx
+gen_lshrhi3 (operand0, operand1, operand2)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (LSHIFTRT, HImode,
+ operand1,
+ operand2));
+}
+
+rtx
+gen_lshrqi3 (operand0, operand1, operand2)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (LSHIFTRT, QImode,
+ operand1,
+ operand2));
+}
+
+rtx
+gen_rotlsi3 (operand0, operand1, operand2)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (ROTATE, SImode,
+ operand1,
+ operand2));
+}
+
+rtx
+gen_rotlhi3 (operand0, operand1, operand2)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (ROTATE, HImode,
+ operand1,
+ operand2));
+}
+
+rtx
+gen_rotlqi3 (operand0, operand1, operand2)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (ROTATE, QImode,
+ operand1,
+ operand2));
+}
+
+rtx
+gen_rotrsi3 (operand0, operand1, operand2)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (ROTATERT, SImode,
+ operand1,
+ operand2));
+}
+
+rtx
+gen_rotrhi3 (operand0, operand1, operand2)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (ROTATERT, HImode,
+ operand1,
+ operand2));
+}
+
+rtx
+gen_rotrqi3 (operand0, operand1, operand2)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+{
+ return gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (ROTATERT, QImode,
+ operand1,
+ operand2));
+}
+
+rtx
+gen_seq (operand0)
+ rtx operand0;
+{
+ rtx operand1;
+ rtx operands[2];
+ rtx _val = 0;
+ start_sequence ();
+ operands[0] = operand0;
+
+{
+ if (TARGET_IEEE_FP
+ && GET_MODE_CLASS (GET_MODE (i386_compare_op0)) == MODE_FLOAT)
+ operands[1] = (*i386_compare_gen_eq)(i386_compare_op0, i386_compare_op1);
+ else
+ operands[1] = (*i386_compare_gen)(i386_compare_op0, i386_compare_op1);
+}
+ operand0 = operands[0];
+ operand1 = operands[1];
+ emit (operand1);
+ emit_insn (gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (EQ, QImode,
+ cc0_rtx,
+ const0_rtx)));
+ _done:
+ _val = gen_sequence ();
+ _fail:
+ end_sequence ();
+ return _val;
+}
+
+rtx
+gen_sne (operand0)
+ rtx operand0;
+{
+ rtx operand1;
+ rtx operands[2];
+ rtx _val = 0;
+ start_sequence ();
+ operands[0] = operand0;
+
+{
+ if (TARGET_IEEE_FP
+ && GET_MODE_CLASS (GET_MODE (i386_compare_op0)) == MODE_FLOAT)
+ operands[1] = (*i386_compare_gen_eq)(i386_compare_op0, i386_compare_op1);
+ else
+ operands[1] = (*i386_compare_gen)(i386_compare_op0, i386_compare_op1);
+}
+ operand0 = operands[0];
+ operand1 = operands[1];
+ emit (operand1);
+ emit_insn (gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (NE, QImode,
+ cc0_rtx,
+ const0_rtx)));
+ _done:
+ _val = gen_sequence ();
+ _fail:
+ end_sequence ();
+ return _val;
+}
+
+rtx
+gen_sgt (operand0)
+ rtx operand0;
+{
+ rtx operand1;
+ rtx operands[2];
+ rtx _val = 0;
+ start_sequence ();
+ operands[0] = operand0;
+operands[1] = (*i386_compare_gen)(i386_compare_op0, i386_compare_op1);
+ operand0 = operands[0];
+ operand1 = operands[1];
+ emit (operand1);
+ emit_insn (gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (GT, QImode,
+ cc0_rtx,
+ const0_rtx)));
+ _done:
+ _val = gen_sequence ();
+ _fail:
+ end_sequence ();
+ return _val;
+}
+
+rtx
+gen_sgtu (operand0)
+ rtx operand0;
+{
+ rtx operand1;
+ rtx operands[2];
+ rtx _val = 0;
+ start_sequence ();
+ operands[0] = operand0;
+operands[1] = (*i386_compare_gen)(i386_compare_op0, i386_compare_op1);
+ operand0 = operands[0];
+ operand1 = operands[1];
+ emit (operand1);
+ emit_insn (gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (GTU, QImode,
+ cc0_rtx,
+ const0_rtx)));
+ _done:
+ _val = gen_sequence ();
+ _fail:
+ end_sequence ();
+ return _val;
+}
+
+rtx
+gen_slt (operand0)
+ rtx operand0;
+{
+ rtx operand1;
+ rtx operands[2];
+ rtx _val = 0;
+ start_sequence ();
+ operands[0] = operand0;
+operands[1] = (*i386_compare_gen)(i386_compare_op0, i386_compare_op1);
+ operand0 = operands[0];
+ operand1 = operands[1];
+ emit (operand1);
+ emit_insn (gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (LT, QImode,
+ cc0_rtx,
+ const0_rtx)));
+ _done:
+ _val = gen_sequence ();
+ _fail:
+ end_sequence ();
+ return _val;
+}
+
+rtx
+gen_sltu (operand0)
+ rtx operand0;
+{
+ rtx operand1;
+ rtx operands[2];
+ rtx _val = 0;
+ start_sequence ();
+ operands[0] = operand0;
+operands[1] = (*i386_compare_gen)(i386_compare_op0, i386_compare_op1);
+ operand0 = operands[0];
+ operand1 = operands[1];
+ emit (operand1);
+ emit_insn (gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (LTU, QImode,
+ cc0_rtx,
+ const0_rtx)));
+ _done:
+ _val = gen_sequence ();
+ _fail:
+ end_sequence ();
+ return _val;
+}
+
+rtx
+gen_sge (operand0)
+ rtx operand0;
+{
+ rtx operand1;
+ rtx operands[2];
+ rtx _val = 0;
+ start_sequence ();
+ operands[0] = operand0;
+operands[1] = (*i386_compare_gen)(i386_compare_op0, i386_compare_op1);
+ operand0 = operands[0];
+ operand1 = operands[1];
+ emit (operand1);
+ emit_insn (gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (GE, QImode,
+ cc0_rtx,
+ const0_rtx)));
+ _done:
+ _val = gen_sequence ();
+ _fail:
+ end_sequence ();
+ return _val;
+}
+
+rtx
+gen_sgeu (operand0)
+ rtx operand0;
+{
+ rtx operand1;
+ rtx operands[2];
+ rtx _val = 0;
+ start_sequence ();
+ operands[0] = operand0;
+operands[1] = (*i386_compare_gen)(i386_compare_op0, i386_compare_op1);
+ operand0 = operands[0];
+ operand1 = operands[1];
+ emit (operand1);
+ emit_insn (gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (GEU, QImode,
+ cc0_rtx,
+ const0_rtx)));
+ _done:
+ _val = gen_sequence ();
+ _fail:
+ end_sequence ();
+ return _val;
+}
+
+rtx
+gen_sle (operand0)
+ rtx operand0;
+{
+ rtx operand1;
+ rtx operands[2];
+ rtx _val = 0;
+ start_sequence ();
+ operands[0] = operand0;
+operands[1] = (*i386_compare_gen)(i386_compare_op0, i386_compare_op1);
+ operand0 = operands[0];
+ operand1 = operands[1];
+ emit (operand1);
+ emit_insn (gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (LE, QImode,
+ cc0_rtx,
+ const0_rtx)));
+ _done:
+ _val = gen_sequence ();
+ _fail:
+ end_sequence ();
+ return _val;
+}
+
+rtx
+gen_sleu (operand0)
+ rtx operand0;
+{
+ rtx operand1;
+ rtx operands[2];
+ rtx _val = 0;
+ start_sequence ();
+ operands[0] = operand0;
+operands[1] = (*i386_compare_gen)(i386_compare_op0, i386_compare_op1);
+ operand0 = operands[0];
+ operand1 = operands[1];
+ emit (operand1);
+ emit_insn (gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (LEU, QImode,
+ cc0_rtx,
+ const0_rtx)));
+ _done:
+ _val = gen_sequence ();
+ _fail:
+ end_sequence ();
+ return _val;
+}
+
+rtx
+gen_beq (operand0)
+ rtx operand0;
+{
+ rtx operand1;
+ rtx operands[2];
+ rtx _val = 0;
+ start_sequence ();
+ operands[0] = operand0;
+
+{
+ if (TARGET_IEEE_FP
+ && GET_MODE_CLASS (GET_MODE (i386_compare_op0)) == MODE_FLOAT)
+ operands[1] = (*i386_compare_gen_eq)(i386_compare_op0, i386_compare_op1);
+ else
+ operands[1] = (*i386_compare_gen)(i386_compare_op0, i386_compare_op1);
+}
+ operand0 = operands[0];
+ operand1 = operands[1];
+ emit (operand1);
+ emit_jump_insn (gen_rtx (SET, VOIDmode,
+ pc_rtx,
+ gen_rtx (IF_THEN_ELSE, VOIDmode,
+ gen_rtx (EQ, VOIDmode,
+ cc0_rtx,
+ const0_rtx),
+ gen_rtx (LABEL_REF, VOIDmode,
+ operand0),
+ pc_rtx)));
+ _done:
+ _val = gen_sequence ();
+ _fail:
+ end_sequence ();
+ return _val;
+}
+
+rtx
+gen_bne (operand0)
+ rtx operand0;
+{
+ rtx operand1;
+ rtx operands[2];
+ rtx _val = 0;
+ start_sequence ();
+ operands[0] = operand0;
+
+{
+ if (TARGET_IEEE_FP
+ && GET_MODE_CLASS (GET_MODE (i386_compare_op0)) == MODE_FLOAT)
+ operands[1] = (*i386_compare_gen_eq)(i386_compare_op0, i386_compare_op1);
+ else
+ operands[1] = (*i386_compare_gen)(i386_compare_op0, i386_compare_op1);
+}
+ operand0 = operands[0];
+ operand1 = operands[1];
+ emit (operand1);
+ emit_jump_insn (gen_rtx (SET, VOIDmode,
+ pc_rtx,
+ gen_rtx (IF_THEN_ELSE, VOIDmode,
+ gen_rtx (NE, VOIDmode,
+ cc0_rtx,
+ const0_rtx),
+ gen_rtx (LABEL_REF, VOIDmode,
+ operand0),
+ pc_rtx)));
+ _done:
+ _val = gen_sequence ();
+ _fail:
+ end_sequence ();
+ return _val;
+}
+
+rtx
+gen_bgt (operand0)
+ rtx operand0;
+{
+ rtx operand1;
+ rtx operands[2];
+ rtx _val = 0;
+ start_sequence ();
+ operands[0] = operand0;
+operands[1] = (*i386_compare_gen)(i386_compare_op0, i386_compare_op1);
+ operand0 = operands[0];
+ operand1 = operands[1];
+ emit (operand1);
+ emit_jump_insn (gen_rtx (SET, VOIDmode,
+ pc_rtx,
+ gen_rtx (IF_THEN_ELSE, VOIDmode,
+ gen_rtx (GT, VOIDmode,
+ cc0_rtx,
+ const0_rtx),
+ gen_rtx (LABEL_REF, VOIDmode,
+ operand0),
+ pc_rtx)));
+ _done:
+ _val = gen_sequence ();
+ _fail:
+ end_sequence ();
+ return _val;
+}
+
+rtx
+gen_bgtu (operand0)
+ rtx operand0;
+{
+ rtx operand1;
+ rtx operands[2];
+ rtx _val = 0;
+ start_sequence ();
+ operands[0] = operand0;
+operands[1] = (*i386_compare_gen)(i386_compare_op0, i386_compare_op1);
+ operand0 = operands[0];
+ operand1 = operands[1];
+ emit (operand1);
+ emit_jump_insn (gen_rtx (SET, VOIDmode,
+ pc_rtx,
+ gen_rtx (IF_THEN_ELSE, VOIDmode,
+ gen_rtx (GTU, VOIDmode,
+ cc0_rtx,
+ const0_rtx),
+ gen_rtx (LABEL_REF, VOIDmode,
+ operand0),
+ pc_rtx)));
+ _done:
+ _val = gen_sequence ();
+ _fail:
+ end_sequence ();
+ return _val;
+}
+
+rtx
+gen_blt (operand0)
+ rtx operand0;
+{
+ rtx operand1;
+ rtx operands[2];
+ rtx _val = 0;
+ start_sequence ();
+ operands[0] = operand0;
+operands[1] = (*i386_compare_gen)(i386_compare_op0, i386_compare_op1);
+ operand0 = operands[0];
+ operand1 = operands[1];
+ emit (operand1);
+ emit_jump_insn (gen_rtx (SET, VOIDmode,
+ pc_rtx,
+ gen_rtx (IF_THEN_ELSE, VOIDmode,
+ gen_rtx (LT, VOIDmode,
+ cc0_rtx,
+ const0_rtx),
+ gen_rtx (LABEL_REF, VOIDmode,
+ operand0),
+ pc_rtx)));
+ _done:
+ _val = gen_sequence ();
+ _fail:
+ end_sequence ();
+ return _val;
+}
+
+rtx
+gen_bltu (operand0)
+ rtx operand0;
+{
+ rtx operand1;
+ rtx operands[2];
+ rtx _val = 0;
+ start_sequence ();
+ operands[0] = operand0;
+operands[1] = (*i386_compare_gen)(i386_compare_op0, i386_compare_op1);
+ operand0 = operands[0];
+ operand1 = operands[1];
+ emit (operand1);
+ emit_jump_insn (gen_rtx (SET, VOIDmode,
+ pc_rtx,
+ gen_rtx (IF_THEN_ELSE, VOIDmode,
+ gen_rtx (LTU, VOIDmode,
+ cc0_rtx,
+ const0_rtx),
+ gen_rtx (LABEL_REF, VOIDmode,
+ operand0),
+ pc_rtx)));
+ _done:
+ _val = gen_sequence ();
+ _fail:
+ end_sequence ();
+ return _val;
+}
+
+rtx
+gen_bge (operand0)
+ rtx operand0;
+{
+ rtx operand1;
+ rtx operands[2];
+ rtx _val = 0;
+ start_sequence ();
+ operands[0] = operand0;
+operands[1] = (*i386_compare_gen)(i386_compare_op0, i386_compare_op1);
+ operand0 = operands[0];
+ operand1 = operands[1];
+ emit (operand1);
+ emit_jump_insn (gen_rtx (SET, VOIDmode,
+ pc_rtx,
+ gen_rtx (IF_THEN_ELSE, VOIDmode,
+ gen_rtx (GE, VOIDmode,
+ cc0_rtx,
+ const0_rtx),
+ gen_rtx (LABEL_REF, VOIDmode,
+ operand0),
+ pc_rtx)));
+ _done:
+ _val = gen_sequence ();
+ _fail:
+ end_sequence ();
+ return _val;
+}
+
+rtx
+gen_bgeu (operand0)
+ rtx operand0;
+{
+ rtx operand1;
+ rtx operands[2];
+ rtx _val = 0;
+ start_sequence ();
+ operands[0] = operand0;
+operands[1] = (*i386_compare_gen)(i386_compare_op0, i386_compare_op1);
+ operand0 = operands[0];
+ operand1 = operands[1];
+ emit (operand1);
+ emit_jump_insn (gen_rtx (SET, VOIDmode,
+ pc_rtx,
+ gen_rtx (IF_THEN_ELSE, VOIDmode,
+ gen_rtx (GEU, VOIDmode,
+ cc0_rtx,
+ const0_rtx),
+ gen_rtx (LABEL_REF, VOIDmode,
+ operand0),
+ pc_rtx)));
+ _done:
+ _val = gen_sequence ();
+ _fail:
+ end_sequence ();
+ return _val;
+}
+
+rtx
+gen_ble (operand0)
+ rtx operand0;
+{
+ rtx operand1;
+ rtx operands[2];
+ rtx _val = 0;
+ start_sequence ();
+ operands[0] = operand0;
+operands[1] = (*i386_compare_gen)(i386_compare_op0, i386_compare_op1);
+ operand0 = operands[0];
+ operand1 = operands[1];
+ emit (operand1);
+ emit_jump_insn (gen_rtx (SET, VOIDmode,
+ pc_rtx,
+ gen_rtx (IF_THEN_ELSE, VOIDmode,
+ gen_rtx (LE, VOIDmode,
+ cc0_rtx,
+ const0_rtx),
+ gen_rtx (LABEL_REF, VOIDmode,
+ operand0),
+ pc_rtx)));
+ _done:
+ _val = gen_sequence ();
+ _fail:
+ end_sequence ();
+ return _val;
+}
+
+rtx
+gen_bleu (operand0)
+ rtx operand0;
+{
+ rtx operand1;
+ rtx operands[2];
+ rtx _val = 0;
+ start_sequence ();
+ operands[0] = operand0;
+operands[1] = (*i386_compare_gen)(i386_compare_op0, i386_compare_op1);
+ operand0 = operands[0];
+ operand1 = operands[1];
+ emit (operand1);
+ emit_jump_insn (gen_rtx (SET, VOIDmode,
+ pc_rtx,
+ gen_rtx (IF_THEN_ELSE, VOIDmode,
+ gen_rtx (LEU, VOIDmode,
+ cc0_rtx,
+ const0_rtx),
+ gen_rtx (LABEL_REF, VOIDmode,
+ operand0),
+ pc_rtx)));
+ _done:
+ _val = gen_sequence ();
+ _fail:
+ end_sequence ();
+ return _val;
+}
+
+rtx
+gen_jump (operand0)
+ rtx operand0;
+{
+ return gen_rtx (SET, VOIDmode,
+ pc_rtx,
+ gen_rtx (LABEL_REF, VOIDmode,
+ operand0));
+}
+
+rtx
+gen_indirect_jump (operand0)
+ rtx operand0;
+{
+ return gen_rtx (SET, VOIDmode,
+ pc_rtx,
+ operand0);
+}
+
+rtx
+gen_casesi (operand0, operand1, operand2, operand3, operand4)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+ rtx operand3;
+ rtx operand4;
+{
+ rtx operand5;
+ rtx operands[6];
+ rtx _val = 0;
+ start_sequence ();
+ operands[0] = operand0;
+ operands[1] = operand1;
+ operands[2] = operand2;
+ operands[3] = operand3;
+ operands[4] = operand4;
+
+{
+ operands[5] = gen_reg_rtx (SImode);
+ current_function_uses_pic_offset_table = 1;
+}
+ operand0 = operands[0];
+ operand1 = operands[1];
+ operand2 = operands[2];
+ operand3 = operands[3];
+ operand4 = operands[4];
+ operand5 = operands[5];
+ emit_insn (gen_rtx (SET, VOIDmode,
+ operand5,
+ gen_rtx (MINUS, SImode,
+ operand0,
+ operand1)));
+ emit_insn (gen_rtx (SET, VOIDmode,
+ cc0_rtx,
+ gen_rtx (COMPARE, CCmode,
+ operand5,
+ operand2)));
+ emit_jump_insn (gen_rtx (SET, VOIDmode,
+ pc_rtx,
+ gen_rtx (IF_THEN_ELSE, VOIDmode,
+ gen_rtx (GTU, VOIDmode,
+ cc0_rtx,
+ const0_rtx),
+ gen_rtx (LABEL_REF, VOIDmode,
+ operand4),
+ pc_rtx)));
+ emit_jump_insn (gen_rtx (PARALLEL, VOIDmode,
+ gen_rtvec (2,
+ gen_rtx (SET, VOIDmode,
+ pc_rtx,
+ gen_rtx (MINUS, SImode,
+ gen_rtx (REG, SImode,
+ 3),
+ gen_rtx (MEM, SImode,
+ gen_rtx (PLUS, SImode,
+ gen_rtx (MULT, SImode,
+ operand5,
+ GEN_INT (4)),
+ gen_rtx (LABEL_REF, VOIDmode,
+ operand3))))),
+ gen_rtx (CLOBBER, VOIDmode,
+ gen_rtx (SCRATCH, SImode, 0)))));
+ _done:
+ _val = gen_sequence ();
+ _fail:
+ end_sequence ();
+ return _val;
+}
+
+rtx
+gen_tablejump (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ return gen_rtx (PARALLEL, VOIDmode, gen_rtvec (2,
+ gen_rtx (SET, VOIDmode,
+ pc_rtx,
+ operand0),
+ gen_rtx (USE, VOIDmode,
+ gen_rtx (LABEL_REF, VOIDmode,
+ operand1))));
+}
+
+rtx
+gen_call_pop (operand0, operand1, operand2, operand3)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+ rtx operand3;
+{
+ rtx operands[4];
+ rtx _val = 0;
+ start_sequence ();
+ operands[0] = operand0;
+ operands[1] = operand1;
+ operands[2] = operand2;
+ operands[3] = operand3;
+
+{
+ rtx addr;
+
+ if (flag_pic)
+ current_function_uses_pic_offset_table = 1;
+
+ /* With half-pic, force the address into a register. */
+ addr = XEXP (operands[0], 0);
+ if (GET_CODE (addr) != REG && HALF_PIC_P () && !CONSTANT_ADDRESS_P (addr))
+ XEXP (operands[0], 0) = force_reg (Pmode, addr);
+
+ if (! expander_call_insn_operand (operands[0], QImode))
+ operands[0]
+ = change_address (operands[0], VOIDmode,
+ copy_to_mode_reg (Pmode, XEXP (operands[0], 0)));
+}
+ operand0 = operands[0];
+ operand1 = operands[1];
+ operand2 = operands[2];
+ operand3 = operands[3];
+ emit_call_insn (gen_rtx (PARALLEL, VOIDmode,
+ gen_rtvec (2,
+ gen_rtx (CALL, VOIDmode,
+ operand0,
+ operand1),
+ gen_rtx (SET, VOIDmode,
+ gen_rtx (REG, SImode,
+ 7),
+ gen_rtx (PLUS, SImode,
+ gen_rtx (REG, SImode,
+ 7),
+ operand3)))));
+ _done:
+ _val = gen_sequence ();
+ _fail:
+ end_sequence ();
+ return _val;
+}
+
+rtx
+gen_call (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ rtx operands[2];
+ rtx _val = 0;
+ start_sequence ();
+ operands[0] = operand0;
+ operands[1] = operand1;
+
+{
+ rtx addr;
+
+ if (flag_pic)
+ current_function_uses_pic_offset_table = 1;
+
+ /* With half-pic, force the address into a register. */
+ addr = XEXP (operands[0], 0);
+ if (GET_CODE (addr) != REG && HALF_PIC_P () && !CONSTANT_ADDRESS_P (addr))
+ XEXP (operands[0], 0) = force_reg (Pmode, addr);
+
+ if (! expander_call_insn_operand (operands[0], QImode))
+ operands[0]
+ = change_address (operands[0], VOIDmode,
+ copy_to_mode_reg (Pmode, XEXP (operands[0], 0)));
+}
+ operand0 = operands[0];
+ operand1 = operands[1];
+ emit_call_insn (gen_rtx (CALL, VOIDmode,
+ operand0,
+ operand1));
+ _done:
+ _val = gen_sequence ();
+ _fail:
+ end_sequence ();
+ return _val;
+}
+
+rtx
+gen_call_value_pop (operand0, operand1, operand2, operand3, operand4)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+ rtx operand3;
+ rtx operand4;
+{
+ rtx operands[5];
+ rtx _val = 0;
+ start_sequence ();
+ operands[0] = operand0;
+ operands[1] = operand1;
+ operands[2] = operand2;
+ operands[3] = operand3;
+ operands[4] = operand4;
+
+{
+ rtx addr;
+
+ if (flag_pic)
+ current_function_uses_pic_offset_table = 1;
+
+ /* With half-pic, force the address into a register. */
+ addr = XEXP (operands[1], 0);
+ if (GET_CODE (addr) != REG && HALF_PIC_P () && !CONSTANT_ADDRESS_P (addr))
+ XEXP (operands[1], 0) = force_reg (Pmode, addr);
+
+ if (! expander_call_insn_operand (operands[1], QImode))
+ operands[1]
+ = change_address (operands[1], VOIDmode,
+ copy_to_mode_reg (Pmode, XEXP (operands[1], 0)));
+}
+ operand0 = operands[0];
+ operand1 = operands[1];
+ operand2 = operands[2];
+ operand3 = operands[3];
+ operand4 = operands[4];
+ emit_call_insn (gen_rtx (PARALLEL, VOIDmode,
+ gen_rtvec (2,
+ gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (CALL, VOIDmode,
+ operand1,
+ operand2)),
+ gen_rtx (SET, VOIDmode,
+ gen_rtx (REG, SImode,
+ 7),
+ gen_rtx (PLUS, SImode,
+ gen_rtx (REG, SImode,
+ 7),
+ operand4)))));
+ _done:
+ _val = gen_sequence ();
+ _fail:
+ end_sequence ();
+ return _val;
+}
+
+rtx
+gen_call_value (operand0, operand1, operand2)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+{
+ rtx operands[3];
+ rtx _val = 0;
+ start_sequence ();
+ operands[0] = operand0;
+ operands[1] = operand1;
+ operands[2] = operand2;
+
+{
+ rtx addr;
+
+ if (flag_pic)
+ current_function_uses_pic_offset_table = 1;
+
+ /* With half-pic, force the address into a register. */
+ addr = XEXP (operands[1], 0);
+ if (GET_CODE (addr) != REG && HALF_PIC_P () && !CONSTANT_ADDRESS_P (addr))
+ XEXP (operands[1], 0) = force_reg (Pmode, addr);
+
+ if (! expander_call_insn_operand (operands[1], QImode))
+ operands[1]
+ = change_address (operands[1], VOIDmode,
+ copy_to_mode_reg (Pmode, XEXP (operands[1], 0)));
+}
+ operand0 = operands[0];
+ operand1 = operands[1];
+ operand2 = operands[2];
+ emit_call_insn (gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (CALL, VOIDmode,
+ operand1,
+ operand2)));
+ _done:
+ _val = gen_sequence ();
+ _fail:
+ end_sequence ();
+ return _val;
+}
+
+rtx
+gen_untyped_call (operand0, operand1, operand2)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+{
+ rtx operands[3];
+ rtx _val = 0;
+ start_sequence ();
+ operands[0] = operand0;
+ operands[1] = operand1;
+ operands[2] = operand2;
+
+{
+ rtx addr;
+
+ if (flag_pic)
+ current_function_uses_pic_offset_table = 1;
+
+ /* With half-pic, force the address into a register. */
+ addr = XEXP (operands[0], 0);
+ if (GET_CODE (addr) != REG && HALF_PIC_P () && !CONSTANT_ADDRESS_P (addr))
+ XEXP (operands[0], 0) = force_reg (Pmode, addr);
+
+ operands[1] = change_address (operands[1], DImode, XEXP (operands[1], 0));
+ if (! expander_call_insn_operand (operands[1], QImode))
+ operands[1]
+ = change_address (operands[1], VOIDmode,
+ copy_to_mode_reg (Pmode, XEXP (operands[1], 0)));
+}
+ operand0 = operands[0];
+ operand1 = operands[1];
+ operand2 = operands[2];
+ emit_call_insn (gen_rtx (PARALLEL, VOIDmode,
+ gen_rtvec (3,
+ gen_rtx (CALL, VOIDmode,
+ operand0,
+ const0_rtx),
+ operand1,
+ operand2)));
+ _done:
+ _val = gen_sequence ();
+ _fail:
+ end_sequence ();
+ return _val;
+}
+
+rtx
+gen_untyped_return (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ rtx operands[2];
+ rtx _val = 0;
+ start_sequence ();
+ operands[0] = operand0;
+ operands[1] = operand1;
+
+{
+ rtx valreg1 = gen_rtx (REG, SImode, 0);
+ rtx valreg2 = gen_rtx (REG, SImode, 1);
+ rtx result = operands[0];
+
+ /* Restore the FPU state. */
+ emit_insn (gen_update_return (change_address (result, SImode,
+ plus_constant (XEXP (result, 0),
+ 8))));
+
+ /* Reload the function value registers. */
+ emit_move_insn (valreg1, change_address (result, SImode, XEXP (result, 0)));
+ emit_move_insn (valreg2,
+ change_address (result, SImode,
+ plus_constant (XEXP (result, 0), 4)));
+
+ /* Put USE insns before the return. */
+ emit_insn (gen_rtx (USE, VOIDmode, valreg1));
+ emit_insn (gen_rtx (USE, VOIDmode, valreg2));
+
+ /* Construct the return. */
+ expand_null_return ();
+
+ DONE;
+}
+ operand0 = operands[0];
+ operand1 = operands[1];
+ emit (operand0);
+ emit (operand1);
+ _done:
+ _val = gen_sequence ();
+ _fail:
+ end_sequence ();
+ return _val;
+}
+
+rtx
+gen_update_return (operand0)
+ rtx operand0;
+{
+ return gen_rtx (UNSPEC, SImode,
+ gen_rtvec (1,
+ operand0),
+ 0);
+}
+
+rtx
+gen_return ()
+{
+ return gen_rtx (RETURN, VOIDmode);
+}
+
+rtx
+gen_nop ()
+{
+ return const0_rtx;
+}
+
+rtx
+gen_movstrsi (operand0, operand1, operand2, operand3)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+ rtx operand3;
+{
+ rtx operand4;
+ rtx operand5;
+ rtx operand6;
+ rtx operands[7];
+ rtx _val = 0;
+ start_sequence ();
+ operands[0] = operand0;
+ operands[1] = operand1;
+ operands[2] = operand2;
+ operands[3] = operand3;
+
+{
+ rtx addr0, addr1;
+
+ if (GET_CODE (operands[2]) != CONST_INT)
+ FAIL;
+
+ addr0 = copy_to_mode_reg (Pmode, XEXP (operands[0], 0));
+ addr1 = copy_to_mode_reg (Pmode, XEXP (operands[1], 0));
+
+ operands[5] = addr0;
+ operands[6] = addr1;
+
+ operands[0] = gen_rtx (MEM, BLKmode, addr0);
+ operands[1] = gen_rtx (MEM, BLKmode, addr1);
+}
+ operand0 = operands[0];
+ operand1 = operands[1];
+ operand2 = operands[2];
+ operand3 = operands[3];
+ operand4 = operands[4];
+ operand5 = operands[5];
+ operand6 = operands[6];
+ emit (gen_rtx (PARALLEL, VOIDmode,
+ gen_rtvec (6,
+ gen_rtx (SET, VOIDmode,
+ operand0,
+ operand1),
+ gen_rtx (USE, VOIDmode,
+ operand2),
+ gen_rtx (USE, VOIDmode,
+ operand3),
+ gen_rtx (CLOBBER, VOIDmode,
+ gen_rtx (SCRATCH, SImode, 0)),
+ gen_rtx (CLOBBER, VOIDmode,
+ operand5),
+ gen_rtx (CLOBBER, VOIDmode,
+ operand6))));
+ _done:
+ _val = gen_sequence ();
+ _fail:
+ end_sequence ();
+ return _val;
+}
+
+rtx
+gen_cmpstrsi (operand0, operand1, operand2, operand3, operand4)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+ rtx operand3;
+ rtx operand4;
+{
+ rtx operand5;
+ rtx operand6;
+ rtx operands[7];
+ rtx _val = 0;
+ start_sequence ();
+ operands[0] = operand0;
+ operands[1] = operand1;
+ operands[2] = operand2;
+ operands[3] = operand3;
+ operands[4] = operand4;
+
+{
+ rtx addr1, addr2;
+
+ addr1 = copy_to_mode_reg (Pmode, XEXP (operands[1], 0));
+ addr2 = copy_to_mode_reg (Pmode, XEXP (operands[2], 0));
+ operands[3] = copy_to_mode_reg (SImode, operands[3]);
+
+ operands[5] = addr1;
+ operands[6] = addr2;
+
+ operands[1] = gen_rtx (MEM, BLKmode, addr1);
+ operands[2] = gen_rtx (MEM, BLKmode, addr2);
+
+}
+ operand0 = operands[0];
+ operand1 = operands[1];
+ operand2 = operands[2];
+ operand3 = operands[3];
+ operand4 = operands[4];
+ operand5 = operands[5];
+ operand6 = operands[6];
+ emit (gen_rtx (PARALLEL, VOIDmode,
+ gen_rtvec (6,
+ gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (COMPARE, SImode,
+ operand1,
+ operand2)),
+ gen_rtx (USE, VOIDmode,
+ operand3),
+ gen_rtx (USE, VOIDmode,
+ operand4),
+ gen_rtx (CLOBBER, VOIDmode,
+ operand5),
+ gen_rtx (CLOBBER, VOIDmode,
+ operand6),
+ gen_rtx (CLOBBER, VOIDmode,
+ operand3))));
+ _done:
+ _val = gen_sequence ();
+ _fail:
+ end_sequence ();
+ return _val;
+}
+
+rtx
+gen_ffssi2 (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ rtx operand2;
+ rtx operands[3];
+ rtx _val = 0;
+ start_sequence ();
+ operands[0] = operand0;
+ operands[1] = operand1;
+operands[2] = gen_reg_rtx (SImode);
+ operand0 = operands[0];
+ operand1 = operands[1];
+ operand2 = operands[2];
+ emit_insn (gen_rtx (SET, VOIDmode,
+ operand2,
+ gen_rtx (PLUS, SImode,
+ gen_rtx (FFS, SImode,
+ operand1),
+ constm1_rtx)));
+ emit_insn (gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (PLUS, SImode,
+ operand2,
+ const1_rtx)));
+ _done:
+ _val = gen_sequence ();
+ _fail:
+ end_sequence ();
+ return _val;
+}
+
+rtx
+gen_ffshi2 (operand0, operand1)
+ rtx operand0;
+ rtx operand1;
+{
+ rtx operand2;
+ rtx operands[3];
+ rtx _val = 0;
+ start_sequence ();
+ operands[0] = operand0;
+ operands[1] = operand1;
+operands[2] = gen_reg_rtx (HImode);
+ operand0 = operands[0];
+ operand1 = operands[1];
+ operand2 = operands[2];
+ emit_insn (gen_rtx (SET, VOIDmode,
+ operand2,
+ gen_rtx (PLUS, HImode,
+ gen_rtx (FFS, HImode,
+ operand1),
+ constm1_rtx)));
+ emit_insn (gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (PLUS, HImode,
+ operand2,
+ const1_rtx)));
+ _done:
+ _val = gen_sequence ();
+ _fail:
+ end_sequence ();
+ return _val;
+}
+
+rtx
+gen_strlensi (operand0, operand1, operand2, operand3)
+ rtx operand0;
+ rtx operand1;
+ rtx operand2;
+ rtx operand3;
+{
+ rtx operand4;
+ rtx operand5;
+ rtx operands[6];
+ rtx _val = 0;
+ start_sequence ();
+ operands[0] = operand0;
+ operands[1] = operand1;
+ operands[2] = operand2;
+ operands[3] = operand3;
+
+{
+ operands[1] = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
+ operands[4] = gen_reg_rtx (SImode);
+ operands[5] = gen_reg_rtx (SImode);
+}
+ operand0 = operands[0];
+ operand1 = operands[1];
+ operand2 = operands[2];
+ operand3 = operands[3];
+ operand4 = operands[4];
+ operand5 = operands[5];
+ emit (gen_rtx (PARALLEL, VOIDmode,
+ gen_rtvec (2,
+ gen_rtx (SET, VOIDmode,
+ operand4,
+ gen_rtx (UNSPEC, SImode,
+ gen_rtvec (3,
+ gen_rtx (MEM, BLKmode,
+ operand1),
+ operand2,
+ operand3),
+ 0)),
+ gen_rtx (CLOBBER, VOIDmode,
+ operand1))));
+ emit_insn (gen_rtx (SET, VOIDmode,
+ operand5,
+ gen_rtx (NOT, SImode,
+ operand4)));
+ emit_insn (gen_rtx (SET, VOIDmode,
+ operand0,
+ gen_rtx (MINUS, SImode,
+ operand5,
+ const1_rtx)));
+ _done:
+ _val = gen_sequence ();
+ _fail:
+ end_sequence ();
+ return _val;
+}
+
+
+
+void
+add_clobbers (pattern, insn_code_number)
+ rtx pattern;
+ int insn_code_number;
+{
+ int i;
+
+ switch (insn_code_number)
+ {
+ case 264:
+ XVECEXP (pattern, 0, 1) = gen_rtx (CLOBBER, VOIDmode,
+ gen_rtx (SCRATCH, SImode, 0));
+ break;
+
+ case 95:
+ case 94:
+ case 93:
+ XVECEXP (pattern, 0, 3) = gen_rtx (CLOBBER, VOIDmode,
+ gen_rtx (SCRATCH, SImode, 0));
+ break;
+
+ case 89:
+ case 88:
+ case 87:
+ XVECEXP (pattern, 0, 4) = gen_rtx (CLOBBER, VOIDmode,
+ gen_rtx (SCRATCH, SImode, 0));
+ break;
+
+ case 33:
+ case 32:
+ case 31:
+ case 30:
+ case 29:
+ case 28:
+ case 27:
+ case 26:
+ case 25:
+ case 24:
+ case 23:
+ case 22:
+ case 21:
+ case 20:
+ case 19:
+ case 18:
+ case 10:
+ case 8:
+ case 6:
+ XVECEXP (pattern, 0, 1) = gen_rtx (CLOBBER, VOIDmode,
+ gen_rtx (SCRATCH, HImode, 0));
+ break;
+
+ default:
+ abort ();
+ }
+}
+
+void
+init_mov_optab ()
+{
+#ifdef HAVE_movccfpeq
+ if (HAVE_movccfpeq)
+ mov_optab->handlers[(int) CCFPEQmode].insn_code = CODE_FOR_movccfpeq;
+#endif
+}
diff --git a/gnu/usr.bin/cc/cc_int/insn-extract.c b/gnu/usr.bin/cc/cc_int/insn-extract.c
new file mode 100644
index 0000000..7f6f1a4
--- /dev/null
+++ b/gnu/usr.bin/cc/cc_int/insn-extract.c
@@ -0,0 +1,533 @@
+/* Generated automatically by the program `genextract'
+from the machine description file `md'. */
+
+#include "config.h"
+#include "rtl.h"
+
+static rtx junk;
+extern rtx recog_operand[];
+extern rtx *recog_operand_loc[];
+extern rtx *recog_dup_loc[];
+extern char recog_dup_num[];
+extern
+#ifdef __GNUC__
+__volatile__
+#endif
+void fatal_insn_not_found ();
+
+void
+insn_extract (insn)
+ rtx insn;
+{
+ register rtx *ro = recog_operand;
+ register rtx **ro_loc = recog_operand_loc;
+ rtx pat = PATTERN (insn);
+ switch (INSN_CODE (insn))
+ {
+ case -1:
+ fatal_insn_not_found (insn);
+
+ case 308:
+ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 0), 0));
+ ro[1] = *(ro_loc[1] = &XEXP (XVECEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0, 0), 0));
+ ro[2] = *(ro_loc[2] = &XVECEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0, 1));
+ ro[3] = *(ro_loc[3] = &XVECEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0, 2));
+ recog_dup_loc[0] = &XEXP (XVECEXP (pat, 0, 1), 0);
+ recog_dup_num[0] = 1;
+ break;
+
+ case 306:
+ case 303:
+ case 302:
+ case 300:
+ case 299:
+ ro[0] = *(ro_loc[0] = &XEXP (pat, 0));
+ ro[1] = *(ro_loc[1] = &XEXP (XEXP (pat, 1), 0));
+ ro[2] = *(ro_loc[2] = &XEXP (XEXP (XEXP (pat, 1), 1), 0));
+ ro[3] = *(ro_loc[3] = &XEXP (pat, 1));
+ break;
+
+ case 305:
+ case 301:
+ case 298:
+ case 297:
+ case 295:
+ ro[0] = *(ro_loc[0] = &XEXP (pat, 0));
+ ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (pat, 1), 0), 0));
+ ro[2] = *(ro_loc[2] = &XEXP (XEXP (pat, 1), 1));
+ ro[3] = *(ro_loc[3] = &XEXP (pat, 1));
+ break;
+
+ case 304:
+ case 296:
+ case 294:
+ ro[0] = *(ro_loc[0] = &XEXP (pat, 0));
+ ro[1] = *(ro_loc[1] = &XEXP (XEXP (pat, 1), 0));
+ ro[2] = *(ro_loc[2] = &XEXP (XEXP (pat, 1), 1));
+ ro[3] = *(ro_loc[3] = &XEXP (pat, 1));
+ break;
+
+ case 289:
+ ro[0] = *(ro_loc[0] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 0));
+ ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 1), 0));
+ ro[2] = *(ro_loc[2] = &XEXP (XVECEXP (pat, 0, 1), 0));
+ ro[3] = *(ro_loc[3] = &XEXP (XVECEXP (pat, 0, 2), 0));
+ recog_dup_loc[0] = &XEXP (XVECEXP (pat, 0, 5), 0);
+ recog_dup_num[0] = 2;
+ recog_dup_loc[1] = &XEXP (XVECEXP (pat, 0, 4), 0);
+ recog_dup_num[1] = 1;
+ recog_dup_loc[2] = &XEXP (XVECEXP (pat, 0, 3), 0);
+ recog_dup_num[2] = 0;
+ break;
+
+ case 288:
+ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 0), 0));
+ ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 0));
+ ro[2] = *(ro_loc[2] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 1), 0));
+ ro[3] = *(ro_loc[3] = &XEXP (XVECEXP (pat, 0, 1), 0));
+ ro[4] = *(ro_loc[4] = &XEXP (XVECEXP (pat, 0, 2), 0));
+ recog_dup_loc[0] = &XEXP (XVECEXP (pat, 0, 5), 0);
+ recog_dup_num[0] = 3;
+ recog_dup_loc[1] = &XEXP (XVECEXP (pat, 0, 4), 0);
+ recog_dup_num[1] = 2;
+ recog_dup_loc[2] = &XEXP (XVECEXP (pat, 0, 3), 0);
+ recog_dup_num[2] = 1;
+ break;
+
+ case 286:
+ ro[0] = *(ro_loc[0] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 0), 0));
+ ro[1] = *(ro_loc[1] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0));
+ ro[2] = *(ro_loc[2] = &XEXP (XVECEXP (pat, 0, 1), 0));
+ ro[3] = *(ro_loc[3] = &XEXP (XVECEXP (pat, 0, 2), 0));
+ ro[4] = *(ro_loc[4] = &XEXP (XVECEXP (pat, 0, 3), 0));
+ recog_dup_loc[0] = &XEXP (XVECEXP (pat, 0, 5), 0);
+ recog_dup_num[0] = 1;
+ recog_dup_loc[1] = &XEXP (XVECEXP (pat, 0, 4), 0);
+ recog_dup_num[1] = 0;
+ break;
+
+ case 284:
+ case 283:
+ break;
+
+ case 282:
+ ro[0] = *(ro_loc[0] = &XVECEXP (pat, 0, 0));
+ break;
+
+ case 280:
+ ro[0] = *(ro_loc[0] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 0), 0));
+ ro[1] = *(ro_loc[1] = &XVECEXP (pat, 0, 1));
+ ro[2] = *(ro_loc[2] = &XVECEXP (pat, 0, 2));
+ break;
+
+ case 279:
+ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 0), 0));
+ ro[1] = *(ro_loc[1] = &XVECEXP (pat, 0, 1));
+ ro[2] = *(ro_loc[2] = &XVECEXP (pat, 0, 2));
+ break;
+
+ case 277:
+ ro[0] = *(ro_loc[0] = &XEXP (pat, 0));
+ ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (pat, 1), 0), 0));
+ ro[2] = *(ro_loc[2] = &XEXP (XEXP (pat, 1), 1));
+ break;
+
+ case 274:
+ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 0), 0));
+ ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 0));
+ ro[2] = *(ro_loc[2] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 1));
+ ro[3] = const0_rtx;
+ ro_loc[3] = &junk;
+ ro[4] = *(ro_loc[4] = &XEXP (XEXP (XVECEXP (pat, 0, 1), 1), 1));
+ break;
+
+ case 273:
+ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 0), 0));
+ ro[1] = *(ro_loc[1] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0));
+ ro[2] = *(ro_loc[2] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 1));
+ ro[3] = const0_rtx;
+ ro_loc[3] = &junk;
+ ro[4] = *(ro_loc[4] = &XEXP (XEXP (XVECEXP (pat, 0, 1), 1), 1));
+ break;
+
+ case 268:
+ ro[0] = *(ro_loc[0] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 0), 0));
+ ro[1] = *(ro_loc[1] = &XEXP (XVECEXP (pat, 0, 0), 1));
+ ro[2] = const0_rtx;
+ ro_loc[2] = &junk;
+ ro[3] = *(ro_loc[3] = &XEXP (XEXP (XVECEXP (pat, 0, 1), 1), 1));
+ break;
+
+ case 267:
+ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 0), 0));
+ ro[1] = *(ro_loc[1] = &XEXP (XVECEXP (pat, 0, 0), 1));
+ ro[2] = const0_rtx;
+ ro_loc[2] = &junk;
+ ro[3] = *(ro_loc[3] = &XEXP (XEXP (XVECEXP (pat, 0, 1), 1), 1));
+ break;
+
+ case 265:
+ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 0), 1));
+ ro[1] = *(ro_loc[1] = &XEXP (XEXP (XVECEXP (pat, 0, 1), 0), 0));
+ break;
+
+ case 264:
+ ro[0] = *(ro_loc[0] = &XEXP (XEXP (XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 1), 0), 0), 0));
+ ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 1), 0), 1), 0));
+ ro[2] = *(ro_loc[2] = &XEXP (XVECEXP (pat, 0, 1), 0));
+ break;
+
+ case 261:
+ ro[0] = *(ro_loc[0] = &XEXP (XEXP (pat, 1), 0));
+ break;
+
+ case 260:
+ case 259:
+ case 258:
+ case 257:
+ case 256:
+ case 255:
+ case 254:
+ case 253:
+ case 252:
+ case 251:
+ ro[0] = *(ro_loc[0] = &XEXP (XEXP (XEXP (pat, 1), 2), 0));
+ break;
+
+ case 250:
+ case 248:
+ case 246:
+ case 244:
+ case 242:
+ case 240:
+ case 238:
+ case 236:
+ case 234:
+ case 232:
+ ro[0] = *(ro_loc[0] = &XEXP (XEXP (XEXP (pat, 1), 1), 0));
+ break;
+
+ case 230:
+ case 228:
+ case 226:
+ case 224:
+ case 222:
+ case 220:
+ case 218:
+ case 216:
+ case 214:
+ case 212:
+ ro[0] = *(ro_loc[0] = &XEXP (pat, 0));
+ break;
+
+ case 210:
+ case 209:
+ ro[0] = *(ro_loc[0] = &XEXP (XEXP (pat, 1), 0));
+ ro[1] = *(ro_loc[1] = &XEXP (XEXP (pat, 1), 1));
+ ro[2] = *(ro_loc[2] = &XEXP (XEXP (pat, 1), 2));
+ break;
+
+ case 208:
+ ro[0] = *(ro_loc[0] = &XEXP (XEXP (pat, 1), 0));
+ ro[1] = *(ro_loc[1] = &XEXP (XEXP (pat, 1), 2));
+ break;
+
+ case 207:
+ ro[0] = *(ro_loc[0] = &XEXP (pat, 0));
+ ro[1] = *(ro_loc[1] = &XEXP (XEXP (pat, 1), 0));
+ ro[2] = *(ro_loc[2] = &XEXP (XEXP (XEXP (pat, 1), 1), 1));
+ break;
+
+ case 206:
+ ro[0] = *(ro_loc[0] = &XEXP (pat, 0));
+ ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (pat, 1), 0), 1));
+ ro[2] = *(ro_loc[2] = &XEXP (XEXP (pat, 1), 1));
+ break;
+
+ case 205:
+ ro[0] = *(ro_loc[0] = &XEXP (XEXP (pat, 0), 0));
+ ro[1] = const0_rtx;
+ ro_loc[1] = &junk;
+ ro[2] = *(ro_loc[2] = &XEXP (XEXP (pat, 0), 2));
+ ro[3] = *(ro_loc[3] = &XEXP (pat, 1));
+ break;
+
+ case 195:
+ case 189:
+ case 183:
+ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 0), 0));
+ ro[1] = *(ro_loc[1] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0));
+ ro[2] = *(ro_loc[2] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 1));
+ recog_dup_loc[0] = &XEXP (XVECEXP (pat, 0, 1), 0);
+ recog_dup_num[0] = 2;
+ break;
+
+ case 177:
+ case 174:
+ ro[0] = *(ro_loc[0] = &XEXP (pat, 0));
+ ro[1] = *(ro_loc[1] = &XEXP (XVECEXP (XEXP (pat, 1), 0, 0), 0));
+ break;
+
+ case 176:
+ case 175:
+ case 173:
+ case 172:
+ ro[0] = *(ro_loc[0] = &XEXP (pat, 0));
+ ro[1] = *(ro_loc[1] = &XVECEXP (XEXP (pat, 1), 0, 0));
+ break;
+
+ case 293:
+ case 291:
+ case 171:
+ case 170:
+ case 168:
+ case 165:
+ case 163:
+ case 160:
+ case 158:
+ ro[0] = *(ro_loc[0] = &XEXP (pat, 0));
+ ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (pat, 1), 0), 0));
+ break;
+
+ case 142:
+ case 141:
+ case 140:
+ case 139:
+ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 0), 0));
+ ro[1] = *(ro_loc[1] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0));
+ ro[2] = *(ro_loc[2] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 1));
+ ro[3] = *(ro_loc[3] = &XEXP (XVECEXP (pat, 0, 1), 0));
+ recog_dup_loc[0] = &XEXP (XEXP (XVECEXP (pat, 0, 1), 1), 0);
+ recog_dup_num[0] = 1;
+ recog_dup_loc[1] = &XEXP (XEXP (XVECEXP (pat, 0, 1), 1), 1);
+ recog_dup_num[1] = 2;
+ break;
+
+ case 130:
+ case 129:
+ case 128:
+ case 127:
+ ro[0] = *(ro_loc[0] = &XEXP (pat, 0));
+ ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (pat, 1), 0), 0));
+ ro[2] = *(ro_loc[2] = &XEXP (XEXP (XEXP (pat, 1), 1), 0));
+ break;
+
+ case 276:
+ case 204:
+ case 203:
+ case 202:
+ case 201:
+ case 200:
+ case 199:
+ case 198:
+ case 197:
+ case 196:
+ case 194:
+ case 192:
+ case 191:
+ case 190:
+ case 188:
+ case 186:
+ case 185:
+ case 184:
+ case 182:
+ case 151:
+ case 150:
+ case 149:
+ case 148:
+ case 147:
+ case 146:
+ case 145:
+ case 144:
+ case 143:
+ case 135:
+ case 134:
+ case 126:
+ case 125:
+ case 124:
+ case 123:
+ case 119:
+ case 118:
+ case 117:
+ case 116:
+ case 111:
+ case 110:
+ case 109:
+ case 108:
+ ro[0] = *(ro_loc[0] = &XEXP (pat, 0));
+ ro[1] = *(ro_loc[1] = &XEXP (XEXP (pat, 1), 0));
+ ro[2] = *(ro_loc[2] = &XEXP (XEXP (pat, 1), 1));
+ break;
+
+ case 95:
+ case 94:
+ case 93:
+ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 0), 0));
+ ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 0));
+ ro[2] = *(ro_loc[2] = &XEXP (XVECEXP (pat, 0, 1), 0));
+ ro[3] = *(ro_loc[3] = &XEXP (XVECEXP (pat, 0, 2), 0));
+ ro[4] = *(ro_loc[4] = &XEXP (XVECEXP (pat, 0, 3), 0));
+ break;
+
+ case 89:
+ case 88:
+ case 87:
+ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 0), 0));
+ ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 0));
+ ro[2] = *(ro_loc[2] = &XEXP (XVECEXP (pat, 0, 2), 0));
+ ro[3] = *(ro_loc[3] = &XEXP (XVECEXP (pat, 0, 3), 0));
+ ro[4] = *(ro_loc[4] = &XEXP (XVECEXP (pat, 0, 4), 0));
+ recog_dup_loc[0] = &XEXP (XVECEXP (pat, 0, 1), 0);
+ recog_dup_num[0] = 1;
+ break;
+
+ case 78:
+ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 0), 0));
+ ro[1] = *(ro_loc[1] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0));
+ ro[2] = *(ro_loc[2] = &XEXP (XVECEXP (pat, 0, 1), 0));
+ break;
+
+ case 180:
+ case 179:
+ case 178:
+ case 169:
+ case 167:
+ case 166:
+ case 164:
+ case 162:
+ case 161:
+ case 159:
+ case 157:
+ case 156:
+ case 155:
+ case 154:
+ case 153:
+ case 152:
+ case 107:
+ case 106:
+ case 105:
+ case 104:
+ case 103:
+ case 102:
+ case 80:
+ case 79:
+ case 76:
+ case 75:
+ case 74:
+ case 73:
+ case 72:
+ case 71:
+ case 70:
+ case 69:
+ case 68:
+ case 67:
+ case 66:
+ ro[0] = *(ro_loc[0] = &XEXP (pat, 0));
+ ro[1] = *(ro_loc[1] = &XEXP (XEXP (pat, 1), 0));
+ break;
+
+ case 62:
+ case 59:
+ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 0), 0));
+ ro[1] = *(ro_loc[1] = &XEXP (XVECEXP (pat, 0, 0), 1));
+ recog_dup_loc[0] = &XEXP (XVECEXP (pat, 0, 1), 0);
+ recog_dup_num[0] = 1;
+ recog_dup_loc[1] = &XEXP (XVECEXP (pat, 0, 1), 1);
+ recog_dup_num[1] = 0;
+ break;
+
+ case 271:
+ case 55:
+ case 52:
+ ro[0] = *(ro_loc[0] = &XEXP (XEXP (pat, 0), 0));
+ ro[1] = *(ro_loc[1] = &XEXP (pat, 1));
+ break;
+
+ case 270:
+ case 112:
+ case 65:
+ case 64:
+ case 63:
+ case 61:
+ case 60:
+ case 58:
+ case 57:
+ case 56:
+ case 54:
+ case 53:
+ case 51:
+ case 50:
+ case 49:
+ case 47:
+ case 46:
+ ro[0] = *(ro_loc[0] = &XEXP (pat, 0));
+ ro[1] = *(ro_loc[1] = &XEXP (pat, 1));
+ break;
+
+ case 33:
+ case 29:
+ case 23:
+ ro[0] = *(ro_loc[0] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0));
+ ro[1] = *(ro_loc[1] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 1));
+ ro[2] = *(ro_loc[2] = &XEXP (XVECEXP (pat, 0, 1), 0));
+ break;
+
+ case 32:
+ case 28:
+ case 26:
+ case 20:
+ ro[0] = *(ro_loc[0] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 0));
+ ro[1] = *(ro_loc[1] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 1));
+ ro[2] = *(ro_loc[2] = &XEXP (XVECEXP (pat, 0, 0), 1));
+ ro[3] = *(ro_loc[3] = &XEXP (XVECEXP (pat, 0, 1), 0));
+ break;
+
+ case 31:
+ case 27:
+ case 25:
+ case 22:
+ case 21:
+ case 19:
+ ro[0] = *(ro_loc[0] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0));
+ ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 1), 0));
+ ro[2] = *(ro_loc[2] = &XEXP (XVECEXP (pat, 0, 0), 1));
+ ro[3] = *(ro_loc[3] = &XEXP (XVECEXP (pat, 0, 1), 0));
+ break;
+
+ case 30:
+ case 24:
+ case 18:
+ ro[0] = *(ro_loc[0] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0));
+ ro[1] = *(ro_loc[1] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 1));
+ ro[2] = *(ro_loc[2] = &XEXP (XVECEXP (pat, 0, 0), 1));
+ ro[3] = *(ro_loc[3] = &XEXP (XVECEXP (pat, 0, 1), 0));
+ break;
+
+ case 45:
+ case 44:
+ case 43:
+ case 16:
+ case 14:
+ case 12:
+ ro[0] = *(ro_loc[0] = &XEXP (XEXP (pat, 1), 0));
+ ro[1] = *(ro_loc[1] = &XEXP (XEXP (pat, 1), 1));
+ break;
+
+ case 10:
+ case 8:
+ case 6:
+ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 0), 1));
+ ro[1] = *(ro_loc[1] = &XEXP (XVECEXP (pat, 0, 1), 0));
+ break;
+
+ case 262:
+ case 4:
+ case 2:
+ case 0:
+ ro[0] = *(ro_loc[0] = &XEXP (pat, 1));
+ break;
+
+ default:
+ abort ();
+ }
+}
diff --git a/gnu/usr.bin/cc/cc_int/insn-opinit.c b/gnu/usr.bin/cc/cc_int/insn-opinit.c
new file mode 100644
index 0000000..8ad7929
--- /dev/null
+++ b/gnu/usr.bin/cc/cc_int/insn-opinit.c
@@ -0,0 +1,216 @@
+/* Generated automatically by the program `genopinit'
+from the machine description file `md'. */
+
+#include "config.h"
+#include "rtl.h"
+#include "flags.h"
+#include "insn-flags.h"
+#include "insn-codes.h"
+#include "insn-config.h"
+#include "recog.h"
+#include "expr.h"
+#include "reload.h"
+
+void
+init_all_optabs ()
+{
+ tst_optab->handlers[(int) SImode].insn_code = CODE_FOR_tstsi;
+ tst_optab->handlers[(int) HImode].insn_code = CODE_FOR_tsthi;
+ tst_optab->handlers[(int) QImode].insn_code = CODE_FOR_tstqi;
+ if (HAVE_tstsf)
+ tst_optab->handlers[(int) SFmode].insn_code = CODE_FOR_tstsf;
+ if (HAVE_tstdf)
+ tst_optab->handlers[(int) DFmode].insn_code = CODE_FOR_tstdf;
+ if (HAVE_tstxf)
+ tst_optab->handlers[(int) XFmode].insn_code = CODE_FOR_tstxf;
+ cmp_optab->handlers[(int) SImode].insn_code = CODE_FOR_cmpsi;
+ cmp_optab->handlers[(int) HImode].insn_code = CODE_FOR_cmphi;
+ cmp_optab->handlers[(int) QImode].insn_code = CODE_FOR_cmpqi;
+ if (HAVE_cmpxf)
+ cmp_optab->handlers[(int) XFmode].insn_code = CODE_FOR_cmpxf;
+ if (HAVE_cmpdf)
+ cmp_optab->handlers[(int) DFmode].insn_code = CODE_FOR_cmpdf;
+ if (HAVE_cmpsf)
+ cmp_optab->handlers[(int) SFmode].insn_code = CODE_FOR_cmpsf;
+ mov_optab->handlers[(int) SImode].insn_code = CODE_FOR_movsi;
+ mov_optab->handlers[(int) HImode].insn_code = CODE_FOR_movhi;
+ movstrict_optab->handlers[(int) HImode].insn_code = CODE_FOR_movstricthi;
+ mov_optab->handlers[(int) QImode].insn_code = CODE_FOR_movqi;
+ movstrict_optab->handlers[(int) QImode].insn_code = CODE_FOR_movstrictqi;
+ mov_optab->handlers[(int) SFmode].insn_code = CODE_FOR_movsf;
+ mov_optab->handlers[(int) DFmode].insn_code = CODE_FOR_movdf;
+ mov_optab->handlers[(int) XFmode].insn_code = CODE_FOR_movxf;
+ mov_optab->handlers[(int) DImode].insn_code = CODE_FOR_movdi;
+ extendtab[(int) SImode][(int) HImode][1] = CODE_FOR_zero_extendhisi2;
+ extendtab[(int) HImode][(int) QImode][1] = CODE_FOR_zero_extendqihi2;
+ extendtab[(int) SImode][(int) QImode][1] = CODE_FOR_zero_extendqisi2;
+ extendtab[(int) DImode][(int) SImode][1] = CODE_FOR_zero_extendsidi2;
+ extendtab[(int) DImode][(int) SImode][0] = CODE_FOR_extendsidi2;
+ extendtab[(int) SImode][(int) HImode][0] = CODE_FOR_extendhisi2;
+ extendtab[(int) HImode][(int) QImode][0] = CODE_FOR_extendqihi2;
+ extendtab[(int) SImode][(int) QImode][0] = CODE_FOR_extendqisi2;
+ if (HAVE_extendsfdf2)
+ extendtab[(int) DFmode][(int) SFmode][0] = CODE_FOR_extendsfdf2;
+ if (HAVE_extenddfxf2)
+ extendtab[(int) XFmode][(int) DFmode][0] = CODE_FOR_extenddfxf2;
+ if (HAVE_extendsfxf2)
+ extendtab[(int) XFmode][(int) SFmode][0] = CODE_FOR_extendsfxf2;
+ if (HAVE_fixuns_truncxfsi2)
+ fixtrunctab[(int) XFmode][(int) SImode][1] = CODE_FOR_fixuns_truncxfsi2;
+ if (HAVE_fixuns_truncdfsi2)
+ fixtrunctab[(int) DFmode][(int) SImode][1] = CODE_FOR_fixuns_truncdfsi2;
+ if (HAVE_fixuns_truncsfsi2)
+ fixtrunctab[(int) SFmode][(int) SImode][1] = CODE_FOR_fixuns_truncsfsi2;
+ if (HAVE_fix_truncxfdi2)
+ fixtrunctab[(int) XFmode][(int) DImode][0] = CODE_FOR_fix_truncxfdi2;
+ if (HAVE_fix_truncdfdi2)
+ fixtrunctab[(int) DFmode][(int) DImode][0] = CODE_FOR_fix_truncdfdi2;
+ if (HAVE_fix_truncsfdi2)
+ fixtrunctab[(int) SFmode][(int) DImode][0] = CODE_FOR_fix_truncsfdi2;
+ if (HAVE_fix_truncxfsi2)
+ fixtrunctab[(int) XFmode][(int) SImode][0] = CODE_FOR_fix_truncxfsi2;
+ if (HAVE_fix_truncdfsi2)
+ fixtrunctab[(int) DFmode][(int) SImode][0] = CODE_FOR_fix_truncdfsi2;
+ if (HAVE_fix_truncsfsi2)
+ fixtrunctab[(int) SFmode][(int) SImode][0] = CODE_FOR_fix_truncsfsi2;
+ if (HAVE_floatsisf2)
+ floattab[(int) SFmode][(int) SImode][0] = CODE_FOR_floatsisf2;
+ if (HAVE_floatdisf2)
+ floattab[(int) SFmode][(int) DImode][0] = CODE_FOR_floatdisf2;
+ if (HAVE_floatsidf2)
+ floattab[(int) DFmode][(int) SImode][0] = CODE_FOR_floatsidf2;
+ if (HAVE_floatdidf2)
+ floattab[(int) DFmode][(int) DImode][0] = CODE_FOR_floatdidf2;
+ if (HAVE_floatsixf2)
+ floattab[(int) XFmode][(int) SImode][0] = CODE_FOR_floatsixf2;
+ if (HAVE_floatdixf2)
+ floattab[(int) XFmode][(int) DImode][0] = CODE_FOR_floatdixf2;
+ add_optab->handlers[(int) DImode].insn_code = CODE_FOR_adddi3;
+ add_optab->handlers[(int) SImode].insn_code = CODE_FOR_addsi3;
+ add_optab->handlers[(int) HImode].insn_code = CODE_FOR_addhi3;
+ add_optab->handlers[(int) QImode].insn_code = CODE_FOR_addqi3;
+ if (HAVE_addxf3)
+ add_optab->handlers[(int) XFmode].insn_code = CODE_FOR_addxf3;
+ if (HAVE_adddf3)
+ add_optab->handlers[(int) DFmode].insn_code = CODE_FOR_adddf3;
+ if (HAVE_addsf3)
+ add_optab->handlers[(int) SFmode].insn_code = CODE_FOR_addsf3;
+ sub_optab->handlers[(int) DImode].insn_code = CODE_FOR_subdi3;
+ sub_optab->handlers[(int) SImode].insn_code = CODE_FOR_subsi3;
+ sub_optab->handlers[(int) HImode].insn_code = CODE_FOR_subhi3;
+ sub_optab->handlers[(int) QImode].insn_code = CODE_FOR_subqi3;
+ if (HAVE_subxf3)
+ sub_optab->handlers[(int) XFmode].insn_code = CODE_FOR_subxf3;
+ if (HAVE_subdf3)
+ sub_optab->handlers[(int) DFmode].insn_code = CODE_FOR_subdf3;
+ if (HAVE_subsf3)
+ sub_optab->handlers[(int) SFmode].insn_code = CODE_FOR_subsf3;
+ smul_optab->handlers[(int) HImode].insn_code = CODE_FOR_mulhi3;
+ smul_optab->handlers[(int) SImode].insn_code = CODE_FOR_mulsi3;
+ umul_widen_optab->handlers[(int) HImode].insn_code = CODE_FOR_umulqihi3;
+ smul_widen_optab->handlers[(int) HImode].insn_code = CODE_FOR_mulqihi3;
+ umul_widen_optab->handlers[(int) DImode].insn_code = CODE_FOR_umulsidi3;
+ smul_widen_optab->handlers[(int) DImode].insn_code = CODE_FOR_mulsidi3;
+ if (HAVE_mulxf3)
+ smul_optab->handlers[(int) XFmode].insn_code = CODE_FOR_mulxf3;
+ if (HAVE_muldf3)
+ smul_optab->handlers[(int) DFmode].insn_code = CODE_FOR_muldf3;
+ if (HAVE_mulsf3)
+ smul_optab->handlers[(int) SFmode].insn_code = CODE_FOR_mulsf3;
+ sdiv_optab->handlers[(int) QImode].insn_code = CODE_FOR_divqi3;
+ udiv_optab->handlers[(int) QImode].insn_code = CODE_FOR_udivqi3;
+ if (HAVE_divxf3)
+ flodiv_optab->handlers[(int) XFmode].insn_code = CODE_FOR_divxf3;
+ if (HAVE_divdf3)
+ flodiv_optab->handlers[(int) DFmode].insn_code = CODE_FOR_divdf3;
+ if (HAVE_divsf3)
+ flodiv_optab->handlers[(int) SFmode].insn_code = CODE_FOR_divsf3;
+ sdivmod_optab->handlers[(int) SImode].insn_code = CODE_FOR_divmodsi4;
+ sdivmod_optab->handlers[(int) HImode].insn_code = CODE_FOR_divmodhi4;
+ udivmod_optab->handlers[(int) SImode].insn_code = CODE_FOR_udivmodsi4;
+ udivmod_optab->handlers[(int) HImode].insn_code = CODE_FOR_udivmodhi4;
+ and_optab->handlers[(int) SImode].insn_code = CODE_FOR_andsi3;
+ and_optab->handlers[(int) HImode].insn_code = CODE_FOR_andhi3;
+ and_optab->handlers[(int) QImode].insn_code = CODE_FOR_andqi3;
+ ior_optab->handlers[(int) SImode].insn_code = CODE_FOR_iorsi3;
+ ior_optab->handlers[(int) HImode].insn_code = CODE_FOR_iorhi3;
+ ior_optab->handlers[(int) QImode].insn_code = CODE_FOR_iorqi3;
+ xor_optab->handlers[(int) SImode].insn_code = CODE_FOR_xorsi3;
+ xor_optab->handlers[(int) HImode].insn_code = CODE_FOR_xorhi3;
+ xor_optab->handlers[(int) QImode].insn_code = CODE_FOR_xorqi3;
+ neg_optab->handlers[(int) DImode].insn_code = CODE_FOR_negdi2;
+ neg_optab->handlers[(int) SImode].insn_code = CODE_FOR_negsi2;
+ neg_optab->handlers[(int) HImode].insn_code = CODE_FOR_neghi2;
+ neg_optab->handlers[(int) QImode].insn_code = CODE_FOR_negqi2;
+ if (HAVE_negsf2)
+ neg_optab->handlers[(int) SFmode].insn_code = CODE_FOR_negsf2;
+ if (HAVE_negdf2)
+ neg_optab->handlers[(int) DFmode].insn_code = CODE_FOR_negdf2;
+ if (HAVE_negxf2)
+ neg_optab->handlers[(int) XFmode].insn_code = CODE_FOR_negxf2;
+ if (HAVE_abssf2)
+ abs_optab->handlers[(int) SFmode].insn_code = CODE_FOR_abssf2;
+ if (HAVE_absdf2)
+ abs_optab->handlers[(int) DFmode].insn_code = CODE_FOR_absdf2;
+ if (HAVE_absxf2)
+ abs_optab->handlers[(int) XFmode].insn_code = CODE_FOR_absxf2;
+ if (HAVE_sqrtsf2)
+ sqrt_optab->handlers[(int) SFmode].insn_code = CODE_FOR_sqrtsf2;
+ if (HAVE_sqrtdf2)
+ sqrt_optab->handlers[(int) DFmode].insn_code = CODE_FOR_sqrtdf2;
+ if (HAVE_sqrtxf2)
+ sqrt_optab->handlers[(int) XFmode].insn_code = CODE_FOR_sqrtxf2;
+ if (HAVE_sindf2)
+ sin_optab->handlers[(int) DFmode].insn_code = CODE_FOR_sindf2;
+ if (HAVE_sinsf2)
+ sin_optab->handlers[(int) SFmode].insn_code = CODE_FOR_sinsf2;
+ if (HAVE_cosdf2)
+ cos_optab->handlers[(int) DFmode].insn_code = CODE_FOR_cosdf2;
+ if (HAVE_cossf2)
+ cos_optab->handlers[(int) SFmode].insn_code = CODE_FOR_cossf2;
+ one_cmpl_optab->handlers[(int) SImode].insn_code = CODE_FOR_one_cmplsi2;
+ one_cmpl_optab->handlers[(int) HImode].insn_code = CODE_FOR_one_cmplhi2;
+ one_cmpl_optab->handlers[(int) QImode].insn_code = CODE_FOR_one_cmplqi2;
+ ashl_optab->handlers[(int) DImode].insn_code = CODE_FOR_ashldi3;
+ ashl_optab->handlers[(int) SImode].insn_code = CODE_FOR_ashlsi3;
+ ashl_optab->handlers[(int) HImode].insn_code = CODE_FOR_ashlhi3;
+ ashl_optab->handlers[(int) QImode].insn_code = CODE_FOR_ashlqi3;
+ ashr_optab->handlers[(int) DImode].insn_code = CODE_FOR_ashrdi3;
+ ashr_optab->handlers[(int) SImode].insn_code = CODE_FOR_ashrsi3;
+ ashr_optab->handlers[(int) HImode].insn_code = CODE_FOR_ashrhi3;
+ ashr_optab->handlers[(int) QImode].insn_code = CODE_FOR_ashrqi3;
+ lshr_optab->handlers[(int) DImode].insn_code = CODE_FOR_lshrdi3;
+ lshr_optab->handlers[(int) SImode].insn_code = CODE_FOR_lshrsi3;
+ lshr_optab->handlers[(int) HImode].insn_code = CODE_FOR_lshrhi3;
+ lshr_optab->handlers[(int) QImode].insn_code = CODE_FOR_lshrqi3;
+ rotl_optab->handlers[(int) SImode].insn_code = CODE_FOR_rotlsi3;
+ rotl_optab->handlers[(int) HImode].insn_code = CODE_FOR_rotlhi3;
+ rotl_optab->handlers[(int) QImode].insn_code = CODE_FOR_rotlqi3;
+ rotr_optab->handlers[(int) SImode].insn_code = CODE_FOR_rotrsi3;
+ rotr_optab->handlers[(int) HImode].insn_code = CODE_FOR_rotrhi3;
+ rotr_optab->handlers[(int) QImode].insn_code = CODE_FOR_rotrqi3;
+ setcc_gen_code[(int) EQ] = CODE_FOR_seq;
+ setcc_gen_code[(int) NE] = CODE_FOR_sne;
+ setcc_gen_code[(int) GT] = CODE_FOR_sgt;
+ setcc_gen_code[(int) GTU] = CODE_FOR_sgtu;
+ setcc_gen_code[(int) LT] = CODE_FOR_slt;
+ setcc_gen_code[(int) LTU] = CODE_FOR_sltu;
+ setcc_gen_code[(int) GE] = CODE_FOR_sge;
+ setcc_gen_code[(int) GEU] = CODE_FOR_sgeu;
+ setcc_gen_code[(int) LE] = CODE_FOR_sle;
+ setcc_gen_code[(int) LEU] = CODE_FOR_sleu;
+ bcc_gen_fctn[(int) EQ] = gen_beq;
+ bcc_gen_fctn[(int) NE] = gen_bne;
+ bcc_gen_fctn[(int) GT] = gen_bgt;
+ bcc_gen_fctn[(int) GTU] = gen_bgtu;
+ bcc_gen_fctn[(int) LT] = gen_blt;
+ bcc_gen_fctn[(int) LTU] = gen_bltu;
+ bcc_gen_fctn[(int) GE] = gen_bge;
+ bcc_gen_fctn[(int) GEU] = gen_bgeu;
+ bcc_gen_fctn[(int) LE] = gen_ble;
+ bcc_gen_fctn[(int) LEU] = gen_bleu;
+ movstr_optab[(int) SImode] = CODE_FOR_movstrsi;
+ ffs_optab->handlers[(int) SImode].insn_code = CODE_FOR_ffssi2;
+ ffs_optab->handlers[(int) HImode].insn_code = CODE_FOR_ffshi2;
+ strlen_optab->handlers[(int) SImode].insn_code = CODE_FOR_strlensi;
+}
diff --git a/gnu/usr.bin/cc/cc_int/insn-output.c b/gnu/usr.bin/cc/cc_int/insn-output.c
new file mode 100644
index 0000000..b354cf4
--- /dev/null
+++ b/gnu/usr.bin/cc/cc_int/insn-output.c
@@ -0,0 +1,6865 @@
+/* Generated automatically by the program `genoutput'
+from the machine description file `md'. */
+
+#include "config.h"
+#include "rtl.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "real.h"
+#include "insn-config.h"
+
+#include "conditions.h"
+#include "insn-flags.h"
+#include "insn-attr.h"
+
+#include "insn-codes.h"
+
+#include "recog.h"
+
+#include <stdio.h>
+#include "output.h"
+
+static char *
+output_0 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (REG_P (operands[0]))
+ return AS2 (test%L0,%0,%0);
+
+ operands[1] = const0_rtx;
+ return AS2 (cmp%L0,%1,%0);
+}
+}
+
+static char *
+output_2 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (REG_P (operands[0]))
+ return AS2 (test%W0,%0,%0);
+
+ operands[1] = const0_rtx;
+ return AS2 (cmp%W0,%1,%0);
+}
+}
+
+static char *
+output_4 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (REG_P (operands[0]))
+ return AS2 (test%B0,%0,%0);
+
+ operands[1] = const0_rtx;
+ return AS2 (cmp%B0,%1,%0);
+}
+}
+
+static char *
+output_6 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (! STACK_TOP_P (operands[0]))
+ abort ();
+
+ output_asm_insn ("ftst", operands);
+
+ if (find_regno_note (insn, REG_DEAD, FIRST_STACK_REG))
+ output_asm_insn (AS1 (fstp,%y0), operands);
+
+ return (char *) output_fp_cc0_set (insn);
+}
+}
+
+static char *
+output_8 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (! STACK_TOP_P (operands[0]))
+ abort ();
+
+ output_asm_insn ("ftst", operands);
+
+ if (find_regno_note (insn, REG_DEAD, FIRST_STACK_REG))
+ output_asm_insn (AS1 (fstp,%y0), operands);
+
+ return (char *) output_fp_cc0_set (insn);
+}
+}
+
+static char *
+output_10 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (! STACK_TOP_P (operands[0]))
+ abort ();
+
+ output_asm_insn ("ftst", operands);
+
+ if (find_regno_note (insn, REG_DEAD, FIRST_STACK_REG))
+ output_asm_insn (AS1 (fstp,%y0), operands);
+
+ return (char *) output_fp_cc0_set (insn);
+}
+}
+
+static char *
+output_12 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (CONSTANT_P (operands[0]) || GET_CODE (operands[1]) == MEM)
+ {
+ cc_status.flags |= CC_REVERSED;
+ return AS2 (cmp%L0,%0,%1);
+ }
+ return AS2 (cmp%L0,%1,%0);
+}
+}
+
+static char *
+output_14 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (CONSTANT_P (operands[0]) || GET_CODE (operands[1]) == MEM)
+ {
+ cc_status.flags |= CC_REVERSED;
+ return AS2 (cmp%W0,%0,%1);
+ }
+ return AS2 (cmp%W0,%1,%0);
+}
+}
+
+static char *
+output_16 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (CONSTANT_P (operands[0]) || GET_CODE (operands[1]) == MEM)
+ {
+ cc_status.flags |= CC_REVERSED;
+ return AS2 (cmp%B0,%0,%1);
+ }
+ return AS2 (cmp%B0,%1,%0);
+}
+}
+
+static char *
+output_18 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+ return (char *) output_float_compare (insn, operands);
+}
+
+static char *
+output_19 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+ return (char *) output_float_compare (insn, operands);
+}
+
+static char *
+output_20 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+ return (char *) output_float_compare (insn, operands);
+}
+
+static char *
+output_21 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+ return (char *) output_float_compare (insn, operands);
+}
+
+static char *
+output_22 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+ return (char *) output_float_compare (insn, operands);
+}
+
+static char *
+output_23 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+ return (char *) output_float_compare (insn, operands);
+}
+
+static char *
+output_24 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+ return (char *) output_float_compare (insn, operands);
+}
+
+static char *
+output_25 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+ return (char *) output_float_compare (insn, operands);
+}
+
+static char *
+output_26 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+ return (char *) output_float_compare (insn, operands);
+}
+
+static char *
+output_27 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+ return (char *) output_float_compare (insn, operands);
+}
+
+static char *
+output_28 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+ return (char *) output_float_compare (insn, operands);
+}
+
+static char *
+output_29 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+ return (char *) output_float_compare (insn, operands);
+}
+
+static char *
+output_30 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+ return (char *) output_float_compare (insn, operands);
+}
+
+static char *
+output_31 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+ return (char *) output_float_compare (insn, operands);
+}
+
+static char *
+output_32 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+ return (char *) output_float_compare (insn, operands);
+}
+
+static char *
+output_33 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+ return (char *) output_float_compare (insn, operands);
+}
+
+static char *
+output_43 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ /* For small integers, we may actually use testb. */
+ if (GET_CODE (operands[1]) == CONST_INT
+ && ! (GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))
+ && (! REG_P (operands[0]) || QI_REG_P (operands[0])))
+ {
+ /* We may set the sign bit spuriously. */
+
+ if ((INTVAL (operands[1]) & ~0xff) == 0)
+ {
+ cc_status.flags |= CC_NOT_NEGATIVE;
+ return AS2 (test%B0,%1,%b0);
+ }
+
+ if ((INTVAL (operands[1]) & ~0xff00) == 0)
+ {
+ cc_status.flags |= CC_NOT_NEGATIVE;
+ operands[1] = GEN_INT (INTVAL (operands[1]) >> 8);
+
+ if (QI_REG_P (operands[0]))
+ return AS2 (test%B0,%1,%h0);
+ else
+ {
+ operands[0] = adj_offsettable_operand (operands[0], 1);
+ return AS2 (test%B0,%1,%b0);
+ }
+ }
+
+ if (GET_CODE (operands[0]) == MEM
+ && (INTVAL (operands[1]) & ~0xff0000) == 0)
+ {
+ cc_status.flags |= CC_NOT_NEGATIVE;
+ operands[1] = GEN_INT (INTVAL (operands[1]) >> 16);
+ operands[0] = adj_offsettable_operand (operands[0], 2);
+ return AS2 (test%B0,%1,%b0);
+ }
+
+ if (GET_CODE (operands[0]) == MEM
+ && (INTVAL (operands[1]) & ~0xff000000) == 0)
+ {
+ operands[1] = GEN_INT ((INTVAL (operands[1]) >> 24) & 0xff);
+ operands[0] = adj_offsettable_operand (operands[0], 3);
+ return AS2 (test%B0,%1,%b0);
+ }
+ }
+
+ if (CONSTANT_P (operands[1]) || GET_CODE (operands[0]) == MEM)
+ return AS2 (test%L0,%1,%0);
+
+ return AS2 (test%L1,%0,%1);
+}
+}
+
+static char *
+output_44 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (GET_CODE (operands[1]) == CONST_INT
+ && ! (GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))
+ && (! REG_P (operands[0]) || QI_REG_P (operands[0])))
+ {
+ if ((INTVAL (operands[1]) & 0xff00) == 0)
+ {
+ /* ??? This might not be necessary. */
+ if (INTVAL (operands[1]) & 0xffff0000)
+ operands[1] = GEN_INT (INTVAL (operands[1]) & 0xff);
+
+ /* We may set the sign bit spuriously. */
+ cc_status.flags |= CC_NOT_NEGATIVE;
+ return AS2 (test%B0,%1,%b0);
+ }
+
+ if ((INTVAL (operands[1]) & 0xff) == 0)
+ {
+ operands[1] = GEN_INT ((INTVAL (operands[1]) >> 8) & 0xff);
+
+ if (QI_REG_P (operands[0]))
+ return AS2 (test%B0,%1,%h0);
+ else
+ {
+ operands[0] = adj_offsettable_operand (operands[0], 1);
+ return AS2 (test%B0,%1,%b0);
+ }
+ }
+ }
+
+ if (CONSTANT_P (operands[1]) || GET_CODE (operands[0]) == MEM)
+ return AS2 (test%W0,%1,%0);
+
+ return AS2 (test%W1,%0,%1);
+}
+}
+
+static char *
+output_45 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (CONSTANT_P (operands[1]) || GET_CODE (operands[0]) == MEM)
+ return AS2 (test%B0,%1,%0);
+
+ return AS2 (test%B1,%0,%1);
+}
+}
+
+static char *
+output_49 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ rtx link;
+ if (operands[1] == const0_rtx && REG_P (operands[0]))
+ return AS2 (xor%L0,%0,%0);
+
+ if (operands[1] == const1_rtx
+ && (link = find_reg_note (insn, REG_WAS_0, 0))
+ /* Make sure the insn that stored the 0 is still present. */
+ && ! INSN_DELETED_P (XEXP (link, 0))
+ && GET_CODE (XEXP (link, 0)) != NOTE
+ /* Make sure cross jumping didn't happen here. */
+ && no_labels_between_p (XEXP (link, 0), insn)
+ /* Make sure the reg hasn't been clobbered. */
+ && ! reg_set_between_p (operands[0], XEXP (link, 0), insn))
+ /* Fastest way to change a 0 to a 1. */
+ return AS1 (inc%L0,%0);
+
+ return AS2 (mov%L0,%1,%0);
+}
+}
+
+static char *
+output_51 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ rtx link;
+ if (REG_P (operands[0]) && operands[1] == const0_rtx)
+ return AS2 (xor%L0,%k0,%k0);
+
+ if (REG_P (operands[0]) && operands[1] == const1_rtx
+ && (link = find_reg_note (insn, REG_WAS_0, 0))
+ /* Make sure the insn that stored the 0 is still present. */
+ && ! INSN_DELETED_P (XEXP (link, 0))
+ && GET_CODE (XEXP (link, 0)) != NOTE
+ /* Make sure cross jumping didn't happen here. */
+ && no_labels_between_p (XEXP (link, 0), insn)
+ /* Make sure the reg hasn't been clobbered. */
+ && ! reg_set_between_p (operands[0], XEXP (link, 0), insn))
+ /* Fastest way to change a 0 to a 1. */
+ return AS1 (inc%L0,%k0);
+
+ if (REG_P (operands[0]))
+ {
+ if (REG_P (operands[1]))
+ return AS2 (mov%L0,%k1,%k0);
+ else if (CONSTANT_P (operands[1]))
+ return AS2 (mov%L0,%1,%k0);
+ }
+
+ return AS2 (mov%W0,%1,%0);
+}
+}
+
+static char *
+output_52 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ rtx link;
+ if (operands[1] == const0_rtx && REG_P (operands[0]))
+ return AS2 (xor%W0,%0,%0);
+
+ if (operands[1] == const1_rtx
+ && (link = find_reg_note (insn, REG_WAS_0, 0))
+ /* Make sure the insn that stored the 0 is still present. */
+ && ! INSN_DELETED_P (XEXP (link, 0))
+ && GET_CODE (XEXP (link, 0)) != NOTE
+ /* Make sure cross jumping didn't happen here. */
+ && no_labels_between_p (XEXP (link, 0), insn)
+ /* Make sure the reg hasn't been clobbered. */
+ && ! reg_set_between_p (operands[0], XEXP (link, 0), insn))
+ /* Fastest way to change a 0 to a 1. */
+ return AS1 (inc%W0,%0);
+
+ return AS2 (mov%W0,%1,%0);
+}
+}
+
+static char *
+output_53 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ operands[1] = gen_rtx (REG, HImode, REGNO (operands[1]));
+ return AS1 (push%W0,%1);
+}
+}
+
+static char *
+output_54 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ rtx link;
+ if (operands[1] == const0_rtx && REG_P (operands[0]))
+ return AS2 (xor%B0,%0,%0);
+
+ if (operands[1] == const1_rtx
+ && (link = find_reg_note (insn, REG_WAS_0, 0))
+ /* Make sure the insn that stored the 0 is still present. */
+ && ! INSN_DELETED_P (XEXP (link, 0))
+ && GET_CODE (XEXP (link, 0)) != NOTE
+ /* Make sure cross jumping didn't happen here. */
+ && no_labels_between_p (XEXP (link, 0), insn)
+ /* Make sure the reg hasn't been clobbered. */
+ && ! reg_set_between_p (operands[0], XEXP (link, 0), insn))
+ /* Fastest way to change a 0 to a 1. */
+ return AS1 (inc%B0,%0);
+
+ /* If mov%B0 isn't allowed for one of these regs, use mov%L0. */
+ if (NON_QI_REG_P (operands[0]) || NON_QI_REG_P (operands[1]))
+ return (AS2 (mov%L0,%k1,%k0));
+
+ return (AS2 (mov%B0,%1,%0));
+}
+}
+
+static char *
+output_55 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ rtx link;
+ if (operands[1] == const0_rtx && REG_P (operands[0]))
+ return AS2 (xor%B0,%0,%0);
+
+ if (operands[1] == const1_rtx
+ && (link = find_reg_note (insn, REG_WAS_0, 0))
+ /* Make sure the insn that stored the 0 is still present. */
+ && ! INSN_DELETED_P (XEXP (link, 0))
+ && GET_CODE (XEXP (link, 0)) != NOTE
+ /* Make sure cross jumping didn't happen here. */
+ && no_labels_between_p (XEXP (link, 0), insn)
+ /* Make sure the reg hasn't been clobbered. */
+ && ! reg_set_between_p (operands[0], XEXP (link, 0), insn))
+ /* Fastest way to change a 0 to a 1. */
+ return AS1 (inc%B0,%0);
+
+ /* If mov%B0 isn't allowed for one of these regs, use mov%L0. */
+ if (NON_QI_REG_P (operands[0]) || NON_QI_REG_P (operands[1]))
+ {
+ abort ();
+ return (AS2 (mov%L0,%k1,%k0));
+ }
+
+ return AS2 (mov%B0,%1,%0);
+}
+}
+
+static char *
+output_56 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (STACK_REG_P (operands[1]))
+ {
+ rtx xops[3];
+
+ if (! STACK_TOP_P (operands[1]))
+ abort ();
+
+ xops[0] = AT_SP (SFmode);
+ xops[1] = GEN_INT (4);
+ xops[2] = stack_pointer_rtx;
+
+ output_asm_insn (AS2 (sub%L2,%1,%2), xops);
+
+ if (find_regno_note (insn, REG_DEAD, FIRST_STACK_REG))
+ output_asm_insn (AS1 (fstp%S0,%0), xops);
+ else
+ output_asm_insn (AS1 (fst%S0,%0), xops);
+ RET;
+ }
+ return AS1 (push%L1,%1);
+}
+}
+
+static char *
+output_57 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
+
+ /* First handle a `pop' insn or a `fld %st(0)' */
+
+ if (STACK_TOP_P (operands[0]) && STACK_TOP_P (operands[1]))
+ {
+ if (stack_top_dies)
+ return AS1 (fstp,%y0);
+ else
+ return AS1 (fld,%y0);
+ }
+
+ /* Handle a transfer between the 387 and a 386 register */
+
+ if (STACK_TOP_P (operands[0]) && NON_STACK_REG_P (operands[1]))
+ {
+ output_op_from_reg (operands[1], AS1 (fld%z0,%y1));
+ RET;
+ }
+
+ if (STACK_TOP_P (operands[1]) && NON_STACK_REG_P (operands[0]))
+ {
+ output_to_reg (operands[0], stack_top_dies);
+ RET;
+ }
+
+ /* Handle other kinds of writes from the 387 */
+
+ if (STACK_TOP_P (operands[1]))
+ {
+ if (stack_top_dies)
+ return AS1 (fstp%z0,%y0);
+ else
+ return AS1 (fst%z0,%y0);
+ }
+
+ /* Handle other kinds of reads to the 387 */
+
+ if (STACK_TOP_P (operands[0]) && GET_CODE (operands[1]) == CONST_DOUBLE)
+ return (char *) output_move_const_single (operands);
+
+ if (STACK_TOP_P (operands[0]))
+ return AS1 (fld%z1,%y1);
+
+ /* Handle all SFmode moves not involving the 387 */
+
+ return (char *) singlemove_string (operands);
+}
+}
+
+static char *
+output_58 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (STACK_REG_P (operands[1]))
+ {
+ rtx xops[3];
+
+ xops[0] = AT_SP (SFmode);
+ xops[1] = GEN_INT (8);
+ xops[2] = stack_pointer_rtx;
+
+ output_asm_insn (AS2 (sub%L2,%1,%2), xops);
+
+ if (find_regno_note (insn, REG_DEAD, FIRST_STACK_REG))
+ output_asm_insn (AS1 (fstp%Q0,%0), xops);
+ else
+ output_asm_insn (AS1 (fst%Q0,%0), xops);
+
+ RET;
+ }
+ else
+ return (char *) output_move_double (operands);
+}
+}
+
+static char *
+output_59 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (STACK_TOP_P (operands[0]))
+ return AS1 (fxch,%1);
+ else
+ return AS1 (fxch,%0);
+}
+}
+
+static char *
+output_60 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
+
+ /* First handle a `pop' insn or a `fld %st(0)' */
+
+ if (STACK_TOP_P (operands[0]) && STACK_TOP_P (operands[1]))
+ {
+ if (stack_top_dies)
+ return AS1 (fstp,%y0);
+ else
+ return AS1 (fld,%y0);
+ }
+
+ /* Handle a transfer between the 387 and a 386 register */
+
+ if (STACK_TOP_P (operands[0]) && NON_STACK_REG_P (operands[1]))
+ {
+ output_op_from_reg (operands[1], AS1 (fld%z0,%y1));
+ RET;
+ }
+
+ if (STACK_TOP_P (operands[1]) && NON_STACK_REG_P (operands[0]))
+ {
+ output_to_reg (operands[0], stack_top_dies);
+ RET;
+ }
+
+ /* Handle other kinds of writes from the 387 */
+
+ if (STACK_TOP_P (operands[1]))
+ {
+ if (stack_top_dies)
+ return AS1 (fstp%z0,%y0);
+ else
+ return AS1 (fst%z0,%y0);
+ }
+
+ /* Handle other kinds of reads to the 387 */
+
+ if (STACK_TOP_P (operands[0]) && GET_CODE (operands[1]) == CONST_DOUBLE)
+ return (char *) output_move_const_single (operands);
+
+ if (STACK_TOP_P (operands[0]))
+ return AS1 (fld%z1,%y1);
+
+ /* Handle all DFmode moves not involving the 387 */
+
+ return (char *) output_move_double (operands);
+}
+}
+
+static char *
+output_61 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (STACK_REG_P (operands[1]))
+ {
+ rtx xops[3];
+
+ xops[0] = AT_SP (SFmode);
+ xops[1] = GEN_INT (12);
+ xops[2] = stack_pointer_rtx;
+
+ output_asm_insn (AS2 (sub%L2,%1,%2), xops);
+ output_asm_insn (AS1 (fstp%T0,%0), xops);
+ if (! find_regno_note (insn, REG_DEAD, FIRST_STACK_REG))
+ output_asm_insn (AS1 (fld%T0,%0), xops);
+
+ RET;
+ }
+ else
+ return (char *) output_move_double (operands);
+ }
+}
+
+static char *
+output_62 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (STACK_TOP_P (operands[0]))
+ return AS1 (fxch,%1);
+ else
+ return AS1 (fxch,%0);
+}
+}
+
+static char *
+output_63 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
+
+ /* First handle a `pop' insn or a `fld %st(0)' */
+
+ if (STACK_TOP_P (operands[0]) && STACK_TOP_P (operands[1]))
+ {
+ if (stack_top_dies)
+ return AS1 (fstp,%y0);
+ else
+ return AS1 (fld,%y0);
+ }
+
+ /* Handle a transfer between the 387 and a 386 register */
+
+ if (STACK_TOP_P (operands[0]) && NON_STACK_REG_P (operands[1]))
+ {
+ output_op_from_reg (operands[1], AS1 (fld%z0,%y1));
+ RET;
+ }
+
+ if (STACK_TOP_P (operands[1]) && NON_STACK_REG_P (operands[0]))
+ {
+ output_to_reg (operands[0], stack_top_dies);
+ RET;
+ }
+
+ /* Handle other kinds of writes from the 387 */
+
+ if (STACK_TOP_P (operands[1]))
+ {
+ output_asm_insn (AS1 (fstp%z0,%y0), operands);
+ if (! stack_top_dies)
+ return AS1 (fld%z0,%y0);
+
+ RET;
+ }
+
+ /* Handle other kinds of reads to the 387 */
+
+ if (STACK_TOP_P (operands[0]) && GET_CODE (operands[1]) == CONST_DOUBLE)
+ return (char *) output_move_const_single (operands);
+
+ if (STACK_TOP_P (operands[0]))
+ return AS1 (fld%z1,%y1);
+
+ /* Handle all XFmode moves not involving the 387 */
+
+ return (char *) output_move_double (operands);
+}
+}
+
+static char *
+output_64 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ return (char *) output_move_double (operands);
+}
+}
+
+static char *
+output_65 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ return (char *) output_move_double (operands);
+}
+}
+
+static char *
+output_66 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if ((TARGET_486 || REGNO (operands[0]) == 0)
+ && REG_P (operands[1]) && REGNO (operands[0]) == REGNO (operands[1]))
+ {
+ rtx xops[2];
+ xops[0] = operands[0];
+ xops[1] = GEN_INT (0xffff);
+ output_asm_insn (AS2 (and%L0,%1,%k0), xops);
+ RET;
+ }
+
+#ifdef INTEL_SYNTAX
+ return AS2 (movzx,%1,%0);
+#else
+ return AS2 (movz%W0%L0,%1,%0);
+#endif
+}
+}
+
+static char *
+output_67 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if ((TARGET_486 || REGNO (operands[0]) == 0)
+ && REG_P (operands[1]) && REGNO (operands[0]) == REGNO (operands[1]))
+ {
+ rtx xops[2];
+ xops[0] = operands[0];
+ xops[1] = GEN_INT (0xff);
+ output_asm_insn (AS2 (and%L0,%1,%k0), xops);
+ RET;
+ }
+
+#ifdef INTEL_SYNTAX
+ return AS2 (movzx,%1,%0);
+#else
+ return AS2 (movz%B0%W0,%1,%0);
+#endif
+}
+}
+
+static char *
+output_68 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if ((TARGET_486 || REGNO (operands[0]) == 0)
+ && REG_P (operands[1]) && REGNO (operands[0]) == REGNO (operands[1]))
+ {
+ rtx xops[2];
+ xops[0] = operands[0];
+ xops[1] = GEN_INT (0xff);
+ output_asm_insn (AS2 (and%L0,%1,%k0), xops);
+ RET;
+ }
+
+#ifdef INTEL_SYNTAX
+ return AS2 (movzx,%1,%0);
+#else
+ return AS2 (movz%B0%L0,%1,%0);
+#endif
+}
+}
+
+static char *
+output_69 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ operands[0] = gen_rtx (REG, SImode, REGNO (operands[0]) + 1);
+ return AS2 (xor%L0,%0,%0);
+}
+}
+
+static char *
+output_70 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (REGNO (operands[0]) == 0)
+ {
+ /* This used to be cwtl, but that extends HI to SI somehow. */
+#ifdef INTEL_SYNTAX
+ return "cdq";
+#else
+ return "cltd";
+#endif
+ }
+
+ operands[1] = gen_rtx (REG, SImode, REGNO (operands[0]) + 1);
+ output_asm_insn (AS2 (mov%L0,%0,%1), operands);
+
+ operands[0] = GEN_INT (31);
+ return AS2 (sar%L1,%0,%1);
+}
+}
+
+static char *
+output_71 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (REGNO (operands[0]) == 0
+ && REG_P (operands[1]) && REGNO (operands[1]) == 0)
+#ifdef INTEL_SYNTAX
+ return "cwde";
+#else
+ return "cwtl";
+#endif
+
+#ifdef INTEL_SYNTAX
+ return AS2 (movsx,%1,%0);
+#else
+ return AS2 (movs%W0%L0,%1,%0);
+#endif
+}
+}
+
+static char *
+output_72 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (REGNO (operands[0]) == 0
+ && REG_P (operands[1]) && REGNO (operands[1]) == 0)
+ return "cbtw";
+
+#ifdef INTEL_SYNTAX
+ return AS2 (movsx,%1,%0);
+#else
+ return AS2 (movs%B0%W0,%1,%0);
+#endif
+}
+}
+
+static char *
+output_73 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+#ifdef INTEL_SYNTAX
+ return AS2 (movsx,%1,%0);
+#else
+ return AS2 (movs%B0%L0,%1,%0);
+#endif
+}
+}
+
+static char *
+output_74 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
+
+ if (NON_STACK_REG_P (operands[1]))
+ {
+ output_op_from_reg (operands[1], AS1 (fld%z0,%y1));
+ RET;
+ }
+
+ if (NON_STACK_REG_P (operands[0]))
+ {
+ output_to_reg (operands[0], stack_top_dies);
+ RET;
+ }
+
+ if (STACK_TOP_P (operands[0]))
+ return AS1 (fld%z1,%y1);
+
+ if (GET_CODE (operands[0]) == MEM)
+ {
+ if (stack_top_dies)
+ return AS1 (fstp%z0,%y0);
+ else
+ return AS1 (fst%z0,%y0);
+ }
+
+ abort ();
+}
+}
+
+static char *
+output_75 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
+
+ if (NON_STACK_REG_P (operands[1]))
+ {
+ output_op_from_reg (operands[1], AS1 (fld%z0,%y1));
+ RET;
+ }
+
+ if (NON_STACK_REG_P (operands[0]))
+ {
+ output_to_reg (operands[0], stack_top_dies);
+ RET;
+ }
+
+ if (STACK_TOP_P (operands[0]))
+ return AS1 (fld%z1,%y1);
+
+ if (GET_CODE (operands[0]) == MEM)
+ {
+ output_asm_insn (AS1 (fstp%z0,%y0), operands);
+ if (! stack_top_dies)
+ return AS1 (fld%z0,%y0);
+ RET;
+ }
+
+ abort ();
+}
+}
+
+static char *
+output_76 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
+
+ if (NON_STACK_REG_P (operands[1]))
+ {
+ output_op_from_reg (operands[1], AS1 (fld%z0,%y1));
+ RET;
+ }
+
+ if (NON_STACK_REG_P (operands[0]))
+ {
+ output_to_reg (operands[0], stack_top_dies);
+ RET;
+ }
+
+ if (STACK_TOP_P (operands[0]))
+ return AS1 (fld%z1,%y1);
+
+ if (GET_CODE (operands[0]) == MEM)
+ {
+ output_asm_insn (AS1 (fstp%z0,%y0), operands);
+ if (! stack_top_dies)
+ return AS1 (fld%z0,%y0);
+ RET;
+ }
+
+ abort ();
+}
+}
+
+static char *
+output_78 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
+
+ if (GET_CODE (operands[0]) == MEM)
+ {
+ if (stack_top_dies)
+ return AS1 (fstp%z0,%0);
+ else
+ return AS1 (fst%z0,%0);
+ }
+ else if (STACK_TOP_P (operands[0]))
+ {
+ output_asm_insn (AS1 (fstp%z2,%y2), operands);
+ return AS1 (fld%z2,%y2);
+ }
+ else
+ abort ();
+}
+}
+
+static char *
+output_79 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
+
+ if (NON_STACK_REG_P (operands[0]))
+ {
+ if (stack_top_dies == 0)
+ {
+ output_asm_insn (AS1 (fld,%y1), operands);
+ stack_top_dies = 1;
+ }
+ output_to_reg (operands[0], stack_top_dies);
+ RET;
+ }
+ else if (GET_CODE (operands[0]) == MEM)
+ {
+ if (stack_top_dies)
+ return AS1 (fstp%z0,%0);
+ else
+ {
+ output_asm_insn (AS1 (fld,%y1), operands);
+ return AS1 (fstp%z0,%0);
+ }
+ }
+ else
+ abort ();
+}
+}
+
+static char *
+output_80 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
+
+ if (NON_STACK_REG_P (operands[0]))
+ {
+ if (stack_top_dies == 0)
+ {
+ output_asm_insn (AS1 (fld,%y1), operands);
+ stack_top_dies = 1;
+ }
+ output_to_reg (operands[0], stack_top_dies);
+ RET;
+ }
+ else if (GET_CODE (operands[0]) == MEM)
+ {
+ if (stack_top_dies)
+ return AS1 (fstp%z0,%0);
+ else
+ {
+ output_asm_insn (AS1 (fld,%y1), operands);
+ return AS1 (fstp%z0,%0);
+ }
+ }
+ else
+ abort ();
+}
+}
+
+static char *
+output_87 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+ return (char *) output_fix_trunc (insn, operands);
+}
+
+static char *
+output_88 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+ return (char *) output_fix_trunc (insn, operands);
+}
+
+static char *
+output_89 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+ return (char *) output_fix_trunc (insn, operands);
+}
+
+static char *
+output_93 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+ return (char *) output_fix_trunc (insn, operands);
+}
+
+static char *
+output_94 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+ return (char *) output_fix_trunc (insn, operands);
+}
+
+static char *
+output_95 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+ return (char *) output_fix_trunc (insn, operands);
+}
+
+static char *
+output_102 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (NON_STACK_REG_P (operands[1]))
+ {
+ output_op_from_reg (operands[1], AS1 (fild%z0,%1));
+ RET;
+ }
+ else if (GET_CODE (operands[1]) == MEM)
+ return AS1 (fild%z1,%1);
+ else
+ abort ();
+}
+}
+
+static char *
+output_103 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (NON_STACK_REG_P (operands[1]))
+ {
+ output_op_from_reg (operands[1], AS1 (fild%z0,%1));
+ RET;
+ }
+ else if (GET_CODE (operands[1]) == MEM)
+ return AS1 (fild%z1,%1);
+ else
+ abort ();
+}
+}
+
+static char *
+output_104 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (NON_STACK_REG_P (operands[1]))
+ {
+ output_op_from_reg (operands[1], AS1 (fild%z0,%1));
+ RET;
+ }
+ else if (GET_CODE (operands[1]) == MEM)
+ return AS1 (fild%z1,%1);
+ else
+ abort ();
+}
+}
+
+static char *
+output_105 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (NON_STACK_REG_P (operands[1]))
+ {
+ output_op_from_reg (operands[1], AS1 (fild%z0,%1));
+ RET;
+ }
+ else if (GET_CODE (operands[1]) == MEM)
+ return AS1 (fild%z1,%1);
+ else
+ abort ();
+}
+}
+
+static char *
+output_106 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (NON_STACK_REG_P (operands[1]))
+ {
+ output_op_from_reg (operands[1], AS1 (fild%z0,%1));
+ RET;
+ }
+ else if (GET_CODE (operands[1]) == MEM)
+ return AS1 (fild%z1,%1);
+ else
+ abort ();
+}
+}
+
+static char *
+output_107 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (NON_STACK_REG_P (operands[1]))
+ {
+ output_op_from_reg (operands[1], AS1 (fild%z0,%1));
+ RET;
+ }
+ else if (GET_CODE (operands[1]) == MEM)
+ return AS1 (fild%z1,%1);
+ else
+ abort ();
+}
+}
+
+static char *
+output_108 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ rtx low[3], high[3];
+
+ CC_STATUS_INIT;
+
+ split_di (operands, 3, low, high);
+
+ if (GET_CODE (low[2]) != CONST_INT || INTVAL (low[2]) != 0)
+ {
+ output_asm_insn (AS2 (add%L0,%2,%0), low);
+ output_asm_insn (AS2 (adc%L0,%2,%0), high);
+ }
+ else
+ output_asm_insn (AS2 (add%L0,%2,%0), high);
+ RET;
+}
+}
+
+static char *
+output_109 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (REG_P (operands[0]) && REGNO (operands[0]) != REGNO (operands[1]))
+ {
+ if (REG_P (operands[2]) && REGNO (operands[0]) == REGNO (operands[2]))
+ return AS2 (add%L0,%1,%0);
+
+ if (! TARGET_486 || ! REG_P (operands[2]))
+ {
+ CC_STATUS_INIT;
+
+ if (operands[2] == stack_pointer_rtx)
+ {
+ rtx temp;
+
+ temp = operands[1];
+ operands[1] = operands[2];
+ operands[2] = temp;
+ }
+ if (operands[2] != stack_pointer_rtx)
+ {
+ operands[1] = SET_SRC (PATTERN (insn));
+ return AS2 (lea%L0,%a1,%0);
+ }
+ }
+
+ output_asm_insn (AS2 (mov%L0,%1,%0), operands);
+ }
+
+ if (operands[2] == const1_rtx)
+ return AS1 (inc%L0,%0);
+
+ if (operands[2] == constm1_rtx)
+ return AS1 (dec%L0,%0);
+
+ return AS2 (add%L0,%2,%0);
+}
+}
+
+static char *
+output_110 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ /* ??? what about offsettable memory references? */
+ if (QI_REG_P (operands[0])
+ && GET_CODE (operands[2]) == CONST_INT
+ && (INTVAL (operands[2]) & 0xff) == 0)
+ {
+ int byteval = (INTVAL (operands[2]) >> 8) & 0xff;
+ CC_STATUS_INIT;
+
+ if (byteval == 1)
+ return AS1 (inc%B0,%h0);
+ else if (byteval == 255)
+ return AS1 (dec%B0,%h0);
+
+ operands[2] = GEN_INT (byteval);
+ return AS2 (add%B0,%2,%h0);
+ }
+
+ if (operands[2] == const1_rtx)
+ return AS1 (inc%W0,%0);
+
+ if (operands[2] == constm1_rtx
+ || (GET_CODE (operands[2]) == CONST_INT
+ && INTVAL (operands[2]) == 65535))
+ return AS1 (dec%W0,%0);
+
+ return AS2 (add%W0,%2,%0);
+}
+}
+
+static char *
+output_111 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (operands[2] == const1_rtx)
+ return AS1 (inc%B0,%0);
+
+ if (operands[2] == constm1_rtx
+ || (GET_CODE (operands[2]) == CONST_INT
+ && INTVAL (operands[2]) == 255))
+ return AS1 (dec%B0,%0);
+
+ return AS2 (add%B0,%2,%0);
+}
+}
+
+static char *
+output_112 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ CC_STATUS_INIT;
+ /* Adding a constant to a register is faster with an add. */
+ /* ??? can this ever happen? */
+ if (GET_CODE (operands[1]) == PLUS
+ && GET_CODE (XEXP (operands[1], 1)) == CONST_INT
+ && rtx_equal_p (operands[0], XEXP (operands[1], 0)))
+ {
+ operands[1] = XEXP (operands[1], 1);
+
+ if (operands[1] == const1_rtx)
+ return AS1 (inc%L0,%0);
+
+ if (operands[1] == constm1_rtx)
+ return AS1 (dec%L0,%0);
+
+ return AS2 (add%L0,%1,%0);
+ }
+ return AS2 (lea%L0,%a1,%0);
+}
+}
+
+static char *
+output_116 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ rtx low[3], high[3];
+
+ CC_STATUS_INIT;
+
+ split_di (operands, 3, low, high);
+
+ if (GET_CODE (low[2]) != CONST_INT || INTVAL (low[2]) != 0)
+ {
+ output_asm_insn (AS2 (sub%L0,%2,%0), low);
+ output_asm_insn (AS2 (sbb%L0,%2,%0), high);
+ }
+ else
+ output_asm_insn (AS2 (sub%L0,%2,%0), high);
+
+ RET;
+}
+}
+
+static char *
+output_117 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+ return AS2 (sub%L0,%2,%0);
+}
+
+static char *
+output_118 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+ return AS2 (sub%W0,%2,%0);
+}
+
+static char *
+output_119 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+ return AS2 (sub%B0,%2,%0);
+}
+
+static char *
+output_123 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+ return AS2 (imul%W0,%2,%0);
+}
+
+static char *
+output_124 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (GET_CODE (operands[1]) == REG
+ && REGNO (operands[1]) == REGNO (operands[0])
+ && (GET_CODE (operands[2]) == MEM || GET_CODE (operands[2]) == REG))
+ /* Assembler has weird restrictions. */
+ return AS2 (imul%W0,%2,%0);
+ return AS3 (imul%W0,%2,%1,%0);
+}
+}
+
+static char *
+output_125 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+ return AS2 (imul%L0,%2,%0);
+}
+
+static char *
+output_126 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (GET_CODE (operands[1]) == REG
+ && REGNO (operands[1]) == REGNO (operands[0])
+ && (GET_CODE (operands[2]) == MEM || GET_CODE (operands[2]) == REG))
+ /* Assembler has weird restrictions. */
+ return AS2 (imul%L0,%2,%0);
+ return AS3 (imul%L0,%2,%1,%0);
+}
+}
+
+static char *
+output_139 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+#ifdef INTEL_SYNTAX
+ output_asm_insn ("cdq", operands);
+#else
+ output_asm_insn ("cltd", operands);
+#endif
+ return AS1 (idiv%L0,%2);
+}
+}
+
+static char *
+output_141 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ output_asm_insn (AS2 (xor%L3,%3,%3), operands);
+ return AS1 (div%L0,%2);
+}
+}
+
+static char *
+output_142 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ output_asm_insn (AS2 (xor%W0,%3,%3), operands);
+ return AS1 (div%W0,%2);
+}
+}
+
+static char *
+output_143 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (GET_CODE (operands[2]) == CONST_INT
+ && ! (GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0])))
+ {
+ if (INTVAL (operands[2]) == 0xffff && REG_P (operands[0])
+ && (! REG_P (operands[1])
+ || REGNO (operands[0]) != 0 || REGNO (operands[1]) != 0)
+ && (! TARGET_486 || ! rtx_equal_p (operands[0], operands[1])))
+ {
+ /* ??? tege: Should forget CC_STATUS only if we clobber a
+ remembered operand. Fix that later. */
+ CC_STATUS_INIT;
+#ifdef INTEL_SYNTAX
+ return AS2 (movzx,%w1,%0);
+#else
+ return AS2 (movz%W0%L0,%w1,%0);
+#endif
+ }
+
+ if (INTVAL (operands[2]) == 0xff && REG_P (operands[0])
+ && !(REG_P (operands[1]) && NON_QI_REG_P (operands[1]))
+ && (! REG_P (operands[1])
+ || REGNO (operands[0]) != 0 || REGNO (operands[1]) != 0)
+ && (! TARGET_486 || ! rtx_equal_p (operands[0], operands[1])))
+ {
+ /* ??? tege: Should forget CC_STATUS only if we clobber a
+ remembered operand. Fix that later. */
+ CC_STATUS_INIT;
+#ifdef INTEL_SYNTAX
+ return AS2 (movzx,%b1,%0);
+#else
+ return AS2 (movz%B0%L0,%b1,%0);
+#endif
+ }
+
+ if (QI_REG_P (operands[0]) && ~(INTVAL (operands[2]) | 0xff) == 0)
+ {
+ CC_STATUS_INIT;
+
+ if (INTVAL (operands[2]) == 0xffffff00)
+ {
+ operands[2] = const0_rtx;
+ return AS2 (mov%B0,%2,%b0);
+ }
+
+ operands[2] = GEN_INT (INTVAL (operands[2]) & 0xff);
+ return AS2 (and%B0,%2,%b0);
+ }
+
+ if (QI_REG_P (operands[0]) && ~(INTVAL (operands[2]) | 0xff00) == 0)
+ {
+ CC_STATUS_INIT;
+
+ if (INTVAL (operands[2]) == 0xffff00ff)
+ {
+ operands[2] = const0_rtx;
+ return AS2 (mov%B0,%2,%h0);
+ }
+
+ operands[2] = GEN_INT ((INTVAL (operands[2]) >> 8) & 0xff);
+ return AS2 (and%B0,%2,%h0);
+ }
+
+ if (GET_CODE (operands[0]) == MEM && INTVAL (operands[2]) == 0xffff0000)
+ {
+ operands[2] = const0_rtx;
+ return AS2 (mov%W0,%2,%w0);
+ }
+ }
+
+ return AS2 (and%L0,%2,%0);
+}
+}
+
+static char *
+output_144 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (GET_CODE (operands[2]) == CONST_INT
+ && ! (GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0])))
+ {
+ /* Can we ignore the upper byte? */
+ if ((! REG_P (operands[0]) || QI_REG_P (operands[0]))
+ && (INTVAL (operands[2]) & 0xff00) == 0xff00)
+ {
+ CC_STATUS_INIT;
+
+ if ((INTVAL (operands[2]) & 0xff) == 0)
+ {
+ operands[2] = const0_rtx;
+ return AS2 (mov%B0,%2,%b0);
+ }
+
+ operands[2] = GEN_INT (INTVAL (operands[2]) & 0xff);
+ return AS2 (and%B0,%2,%b0);
+ }
+
+ /* Can we ignore the lower byte? */
+ /* ??? what about offsettable memory references? */
+ if (QI_REG_P (operands[0]) && (INTVAL (operands[2]) & 0xff) == 0xff)
+ {
+ CC_STATUS_INIT;
+
+ if ((INTVAL (operands[2]) & 0xff00) == 0)
+ {
+ operands[2] = const0_rtx;
+ return AS2 (mov%B0,%2,%h0);
+ }
+
+ operands[2] = GEN_INT ((INTVAL (operands[2]) >> 8) & 0xff);
+ return AS2 (and%B0,%2,%h0);
+ }
+ }
+
+ return AS2 (and%W0,%2,%0);
+}
+}
+
+static char *
+output_145 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+ return AS2 (and%B0,%2,%0);
+}
+
+static char *
+output_146 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (GET_CODE (operands[2]) == CONST_INT
+ && ! (GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0])))
+ {
+ if ((! REG_P (operands[0]) || QI_REG_P (operands[0]))
+ && (INTVAL (operands[2]) & ~0xff) == 0)
+ {
+ CC_STATUS_INIT;
+
+ if (INTVAL (operands[2]) == 0xff)
+ return AS2 (mov%B0,%2,%b0);
+
+ return AS2 (or%B0,%2,%b0);
+ }
+
+ if (QI_REG_P (operands[0]) && (INTVAL (operands[2]) & ~0xff00) == 0)
+ {
+ CC_STATUS_INIT;
+ operands[2] = GEN_INT (INTVAL (operands[2]) >> 8);
+
+ if (INTVAL (operands[2]) == 0xff)
+ return AS2 (mov%B0,%2,%h0);
+
+ return AS2 (or%B0,%2,%h0);
+ }
+ }
+
+ return AS2 (or%L0,%2,%0);
+}
+}
+
+static char *
+output_147 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (GET_CODE (operands[2]) == CONST_INT
+ && ! (GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0])))
+ {
+ /* Can we ignore the upper byte? */
+ if ((! REG_P (operands[0]) || QI_REG_P (operands[0]))
+ && (INTVAL (operands[2]) & 0xff00) == 0)
+ {
+ CC_STATUS_INIT;
+ if (INTVAL (operands[2]) & 0xffff0000)
+ operands[2] = GEN_INT (INTVAL (operands[2]) & 0xffff);
+
+ if (INTVAL (operands[2]) == 0xff)
+ return AS2 (mov%B0,%2,%b0);
+
+ return AS2 (or%B0,%2,%b0);
+ }
+
+ /* Can we ignore the lower byte? */
+ /* ??? what about offsettable memory references? */
+ if (QI_REG_P (operands[0])
+ && (INTVAL (operands[2]) & 0xff) == 0)
+ {
+ CC_STATUS_INIT;
+ operands[2] = GEN_INT ((INTVAL (operands[2]) >> 8) & 0xff);
+
+ if (INTVAL (operands[2]) == 0xff)
+ return AS2 (mov%B0,%2,%h0);
+
+ return AS2 (or%B0,%2,%h0);
+ }
+ }
+
+ return AS2 (or%W0,%2,%0);
+}
+}
+
+static char *
+output_148 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+ return AS2 (or%B0,%2,%0);
+}
+
+static char *
+output_149 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (GET_CODE (operands[2]) == CONST_INT
+ && ! (GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0])))
+ {
+ if ((! REG_P (operands[0]) || QI_REG_P (operands[0]))
+ && (INTVAL (operands[2]) & ~0xff) == 0)
+ {
+ CC_STATUS_INIT;
+
+ if (INTVAL (operands[2]) == 0xff)
+ return AS1 (not%B0,%b0);
+
+ return AS2 (xor%B0,%2,%b0);
+ }
+
+ if (QI_REG_P (operands[0]) && (INTVAL (operands[2]) & ~0xff00) == 0)
+ {
+ CC_STATUS_INIT;
+ operands[2] = GEN_INT (INTVAL (operands[2]) >> 8);
+
+ if (INTVAL (operands[2]) == 0xff)
+ return AS1 (not%B0,%h0);
+
+ return AS2 (xor%B0,%2,%h0);
+ }
+ }
+
+ return AS2 (xor%L0,%2,%0);
+}
+}
+
+static char *
+output_150 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (GET_CODE (operands[2]) == CONST_INT
+ && ! (GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0])))
+ {
+ /* Can we ignore the upper byte? */
+ if ((! REG_P (operands[0]) || QI_REG_P (operands[0]))
+ && (INTVAL (operands[2]) & 0xff00) == 0)
+ {
+ CC_STATUS_INIT;
+ if (INTVAL (operands[2]) & 0xffff0000)
+ operands[2] = GEN_INT (INTVAL (operands[2]) & 0xffff);
+
+ if (INTVAL (operands[2]) == 0xff)
+ return AS1 (not%B0,%b0);
+
+ return AS2 (xor%B0,%2,%b0);
+ }
+
+ /* Can we ignore the lower byte? */
+ /* ??? what about offsettable memory references? */
+ if (QI_REG_P (operands[0])
+ && (INTVAL (operands[2]) & 0xff) == 0)
+ {
+ CC_STATUS_INIT;
+ operands[2] = GEN_INT ((INTVAL (operands[2]) >> 8) & 0xff);
+
+ if (INTVAL (operands[2]) == 0xff)
+ return AS1 (not%B0,%h0);
+
+ return AS2 (xor%B0,%2,%h0);
+ }
+ }
+
+ return AS2 (xor%W0,%2,%0);
+}
+}
+
+static char *
+output_151 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+ return AS2 (xor%B0,%2,%0);
+}
+
+static char *
+output_152 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ rtx xops[2], low[1], high[1];
+
+ CC_STATUS_INIT;
+
+ split_di (operands, 1, low, high);
+ xops[0] = const0_rtx;
+ xops[1] = high[0];
+
+ output_asm_insn (AS1 (neg%L0,%0), low);
+ output_asm_insn (AS2 (adc%L1,%0,%1), xops);
+ output_asm_insn (AS1 (neg%L0,%0), high);
+ RET;
+}
+}
+
+static char *
+output_182 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ rtx xops[4], low[1], high[1];
+
+ CC_STATUS_INIT;
+
+ split_di (operands, 1, low, high);
+ xops[0] = operands[2];
+ xops[1] = const1_rtx;
+ xops[2] = low[0];
+ xops[3] = high[0];
+
+ if (INTVAL (xops[0]) > 31)
+ {
+ output_asm_insn (AS2 (mov%L3,%2,%3), xops); /* Fast shift by 32 */
+ output_asm_insn (AS2 (xor%L2,%2,%2), xops);
+
+ if (INTVAL (xops[0]) > 32)
+ {
+ xops[0] = GEN_INT (INTVAL (xops[0]) - 32);
+ output_asm_insn (AS2 (sal%L3,%0,%3), xops); /* Remaining shift */
+ }
+ }
+ else
+ {
+ output_asm_insn (AS3 (shld%L3,%0,%2,%3), xops);
+ output_asm_insn (AS2 (sal%L2,%0,%2), xops);
+ }
+ RET;
+}
+}
+
+static char *
+output_183 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ rtx xops[4], low[1], high[1];
+
+ CC_STATUS_INIT;
+
+ split_di (operands, 1, low, high);
+ xops[0] = operands[2];
+ xops[1] = const1_rtx;
+ xops[2] = low[0];
+ xops[3] = high[0];
+
+ output_asm_insn (AS2 (ror%B0,%1,%0), xops); /* shift count / 2 */
+
+ output_asm_insn (AS3_SHIFT_DOUBLE (shld%L3,%0,%2,%3), xops);
+ output_asm_insn (AS2 (sal%L2,%0,%2), xops);
+ output_asm_insn (AS3_SHIFT_DOUBLE (shld%L3,%0,%2,%3), xops);
+ output_asm_insn (AS2 (sal%L2,%0,%2), xops);
+
+ xops[1] = GEN_INT (7); /* shift count & 1 */
+
+ output_asm_insn (AS2 (shr%B0,%1,%0), xops);
+
+ output_asm_insn (AS3_SHIFT_DOUBLE (shld%L3,%0,%2,%3), xops);
+ output_asm_insn (AS2 (sal%L2,%0,%2), xops);
+
+ RET;
+}
+}
+
+static char *
+output_184 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (REG_P (operands[0]) && REGNO (operands[0]) != REGNO (operands[1]))
+ {
+ if (TARGET_486 && INTVAL (operands[2]) == 1)
+ {
+ output_asm_insn (AS2 (mov%L0,%1,%0), operands);
+ return AS2 (add%L0,%1,%0);
+ }
+ else
+ {
+ CC_STATUS_INIT;
+
+ if (operands[1] == stack_pointer_rtx)
+ {
+ output_asm_insn (AS2 (mov%L0,%1,%0), operands);
+ operands[1] = operands[0];
+ }
+ operands[1] = gen_rtx (MULT, SImode, operands[1],
+ GEN_INT (1 << INTVAL (operands[2])));
+ return AS2 (lea%L0,%a1,%0);
+ }
+ }
+
+ if (REG_P (operands[2]))
+ return AS2 (sal%L0,%b2,%0);
+
+ if (REG_P (operands[0]) && operands[2] == const1_rtx)
+ return AS2 (add%L0,%0,%0);
+
+ return AS2 (sal%L0,%2,%0);
+}
+}
+
+static char *
+output_185 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (REG_P (operands[2]))
+ return AS2 (sal%W0,%b2,%0);
+
+ if (REG_P (operands[0]) && operands[2] == const1_rtx)
+ return AS2 (add%W0,%0,%0);
+
+ return AS2 (sal%W0,%2,%0);
+}
+}
+
+static char *
+output_186 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (REG_P (operands[2]))
+ return AS2 (sal%B0,%b2,%0);
+
+ if (REG_P (operands[0]) && operands[2] == const1_rtx)
+ return AS2 (add%B0,%0,%0);
+
+ return AS2 (sal%B0,%2,%0);
+}
+}
+
+static char *
+output_188 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ rtx xops[4], low[1], high[1];
+
+ CC_STATUS_INIT;
+
+ split_di (operands, 1, low, high);
+ xops[0] = operands[2];
+ xops[1] = const1_rtx;
+ xops[2] = low[0];
+ xops[3] = high[0];
+
+ if (INTVAL (xops[0]) > 31)
+ {
+ xops[1] = GEN_INT (31);
+ output_asm_insn (AS2 (mov%L2,%3,%2), xops);
+ output_asm_insn (AS2 (sar%L3,%1,%3), xops); /* shift by 32 */
+
+ if (INTVAL (xops[0]) > 32)
+ {
+ xops[0] = GEN_INT (INTVAL (xops[0]) - 32);
+ output_asm_insn (AS2 (sar%L2,%0,%2), xops); /* Remaining shift */
+ }
+ }
+ else
+ {
+ output_asm_insn (AS3 (shrd%L2,%0,%3,%2), xops);
+ output_asm_insn (AS2 (sar%L3,%0,%3), xops);
+ }
+
+ RET;
+}
+}
+
+static char *
+output_189 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ rtx xops[4], low[1], high[1];
+
+ CC_STATUS_INIT;
+
+ split_di (operands, 1, low, high);
+ xops[0] = operands[2];
+ xops[1] = const1_rtx;
+ xops[2] = low[0];
+ xops[3] = high[0];
+
+ output_asm_insn (AS2 (ror%B0,%1,%0), xops); /* shift count / 2 */
+
+ output_asm_insn (AS3_SHIFT_DOUBLE (shrd%L2,%0,%3,%2), xops);
+ output_asm_insn (AS2 (sar%L3,%0,%3), xops);
+ output_asm_insn (AS3_SHIFT_DOUBLE (shrd%L2,%0,%3,%2), xops);
+ output_asm_insn (AS2 (sar%L3,%0,%3), xops);
+
+ xops[1] = GEN_INT (7); /* shift count & 1 */
+
+ output_asm_insn (AS2 (shr%B0,%1,%0), xops);
+
+ output_asm_insn (AS3_SHIFT_DOUBLE (shrd%L2,%0,%3,%2), xops);
+ output_asm_insn (AS2 (sar%L3,%0,%3), xops);
+
+ RET;
+}
+}
+
+static char *
+output_190 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (REG_P (operands[2]))
+ return AS2 (sar%L0,%b2,%0);
+ else
+ return AS2 (sar%L0,%2,%0);
+}
+}
+
+static char *
+output_191 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (REG_P (operands[2]))
+ return AS2 (sar%W0,%b2,%0);
+ else
+ return AS2 (sar%W0,%2,%0);
+}
+}
+
+static char *
+output_192 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (REG_P (operands[2]))
+ return AS2 (sar%B0,%b2,%0);
+ else
+ return AS2 (sar%B0,%2,%0);
+}
+}
+
+static char *
+output_194 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ rtx xops[4], low[1], high[1];
+
+ CC_STATUS_INIT;
+
+ split_di (operands, 1, low, high);
+ xops[0] = operands[2];
+ xops[1] = const1_rtx;
+ xops[2] = low[0];
+ xops[3] = high[0];
+
+ if (INTVAL (xops[0]) > 31)
+ {
+ output_asm_insn (AS2 (mov%L2,%3,%2), xops); /* Fast shift by 32 */
+ output_asm_insn (AS2 (xor%L3,%3,%3), xops);
+
+ if (INTVAL (xops[0]) > 32)
+ {
+ xops[0] = GEN_INT (INTVAL (xops[0]) - 32);
+ output_asm_insn (AS2 (shr%L2,%0,%2), xops); /* Remaining shift */
+ }
+ }
+ else
+ {
+ output_asm_insn (AS3 (shrd%L2,%0,%3,%2), xops);
+ output_asm_insn (AS2 (shr%L3,%0,%3), xops);
+ }
+
+ RET;
+}
+}
+
+static char *
+output_195 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ rtx xops[4], low[1], high[1];
+
+ CC_STATUS_INIT;
+
+ split_di (operands, 1, low, high);
+ xops[0] = operands[2];
+ xops[1] = const1_rtx;
+ xops[2] = low[0];
+ xops[3] = high[0];
+
+ output_asm_insn (AS2 (ror%B0,%1,%0), xops); /* shift count / 2 */
+
+ output_asm_insn (AS3_SHIFT_DOUBLE (shrd%L2,%0,%3,%2), xops);
+ output_asm_insn (AS2 (shr%L3,%0,%3), xops);
+ output_asm_insn (AS3_SHIFT_DOUBLE (shrd%L2,%0,%3,%2), xops);
+ output_asm_insn (AS2 (shr%L3,%0,%3), xops);
+
+ xops[1] = GEN_INT (7); /* shift count & 1 */
+
+ output_asm_insn (AS2 (shr%B0,%1,%0), xops);
+
+ output_asm_insn (AS3_SHIFT_DOUBLE (shrd%L2,%0,%3,%2), xops);
+ output_asm_insn (AS2 (shr%L3,%0,%3), xops);
+
+ RET;
+}
+}
+
+static char *
+output_196 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (REG_P (operands[2]))
+ return AS2 (shr%L0,%b2,%0);
+ else
+ return AS2 (shr%L0,%2,%1);
+}
+}
+
+static char *
+output_197 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (REG_P (operands[2]))
+ return AS2 (shr%W0,%b2,%0);
+ else
+ return AS2 (shr%W0,%2,%0);
+}
+}
+
+static char *
+output_198 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (REG_P (operands[2]))
+ return AS2 (shr%B0,%b2,%0);
+ else
+ return AS2 (shr%B0,%2,%0);
+}
+}
+
+static char *
+output_199 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (REG_P (operands[2]))
+ return AS2 (rol%L0,%b2,%0);
+ else
+ return AS2 (rol%L0,%2,%0);
+}
+}
+
+static char *
+output_200 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (REG_P (operands[2]))
+ return AS2 (rol%W0,%b2,%0);
+ else
+ return AS2 (rol%W0,%2,%0);
+}
+}
+
+static char *
+output_201 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (REG_P (operands[2]))
+ return AS2 (rol%B0,%b2,%0);
+ else
+ return AS2 (rol%B0,%2,%0);
+}
+}
+
+static char *
+output_202 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (REG_P (operands[2]))
+ return AS2 (ror%L0,%b2,%0);
+ else
+ return AS2 (ror%L0,%2,%0);
+}
+}
+
+static char *
+output_203 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (REG_P (operands[2]))
+ return AS2 (ror%W0,%b2,%0);
+ else
+ return AS2 (ror%W0,%2,%0);
+}
+}
+
+static char *
+output_204 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (REG_P (operands[2]))
+ return AS2 (ror%B0,%b2,%0);
+ else
+ return AS2 (ror%B0,%2,%0);
+}
+}
+
+static char *
+output_205 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ CC_STATUS_INIT;
+
+ if (INTVAL (operands[3]) == 1)
+ return AS2 (bts%L0,%2,%0);
+ else
+ return AS2 (btr%L0,%2,%0);
+}
+}
+
+static char *
+output_206 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ CC_STATUS_INIT;
+
+ return AS2 (btc%L0,%1,%0);
+}
+}
+
+static char *
+output_207 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ CC_STATUS_INIT;
+
+ return AS2 (btc%L0,%2,%0);
+}
+}
+
+static char *
+output_208 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ cc_status.flags |= CC_Z_IN_NOT_C;
+ return AS2 (bt%L0,%1,%0);
+}
+}
+
+static char *
+output_209 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ unsigned int mask;
+
+ mask = ((1 << INTVAL (operands[1])) - 1) << INTVAL (operands[2]);
+ operands[1] = GEN_INT (mask);
+
+ if (QI_REG_P (operands[0]))
+ {
+ if ((mask & ~0xff) == 0)
+ {
+ cc_status.flags |= CC_NOT_NEGATIVE;
+ return AS2 (test%B0,%1,%b0);
+ }
+
+ if ((mask & ~0xff00) == 0)
+ {
+ cc_status.flags |= CC_NOT_NEGATIVE;
+ operands[1] = GEN_INT (mask >> 8);
+ return AS2 (test%B0,%1,%h0);
+ }
+ }
+
+ return AS2 (test%L0,%1,%0);
+}
+}
+
+static char *
+output_210 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ unsigned int mask;
+
+ mask = ((1 << INTVAL (operands[1])) - 1) << INTVAL (operands[2]);
+ operands[1] = GEN_INT (mask);
+
+ if (! REG_P (operands[0]) || QI_REG_P (operands[0]))
+ {
+ if ((mask & ~0xff) == 0)
+ {
+ cc_status.flags |= CC_NOT_NEGATIVE;
+ return AS2 (test%B0,%1,%b0);
+ }
+
+ if ((mask & ~0xff00) == 0)
+ {
+ cc_status.flags |= CC_NOT_NEGATIVE;
+ operands[1] = GEN_INT (mask >> 8);
+
+ if (QI_REG_P (operands[0]))
+ return AS2 (test%B0,%1,%h0);
+ else
+ {
+ operands[0] = adj_offsettable_operand (operands[0], 1);
+ return AS2 (test%B0,%1,%b0);
+ }
+ }
+
+ if (GET_CODE (operands[0]) == MEM && (mask & ~0xff0000) == 0)
+ {
+ cc_status.flags |= CC_NOT_NEGATIVE;
+ operands[1] = GEN_INT (mask >> 16);
+ operands[0] = adj_offsettable_operand (operands[0], 2);
+ return AS2 (test%B0,%1,%b0);
+ }
+
+ if (GET_CODE (operands[0]) == MEM && (mask & ~0xff000000) == 0)
+ {
+ cc_status.flags |= CC_NOT_NEGATIVE;
+ operands[1] = GEN_INT (mask >> 24);
+ operands[0] = adj_offsettable_operand (operands[0], 3);
+ return AS2 (test%B0,%1,%b0);
+ }
+ }
+
+ if (CONSTANT_P (operands[1]) || GET_CODE (operands[0]) == MEM)
+ return AS2 (test%L0,%1,%0);
+
+ return AS2 (test%L1,%0,%1);
+}
+}
+
+static char *
+output_212 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (cc_prev_status.flags & CC_Z_IN_NOT_C)
+ return AS1 (setnb,%0);
+ else
+ return AS1 (sete,%0);
+}
+}
+
+static char *
+output_214 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (cc_prev_status.flags & CC_Z_IN_NOT_C)
+ return AS1 (setb,%0);
+ else
+ return AS1 (setne,%0);
+}
+
+}
+
+static char *
+output_216 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (TARGET_IEEE_FP && (cc_prev_status.flags & CC_IN_80387))
+ return AS1 (sete,%0);
+
+ OUTPUT_JUMP ("setg %0", "seta %0", NULL_PTR);
+}
+}
+
+static char *
+output_218 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+ return "seta %0";
+}
+
+static char *
+output_220 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (TARGET_IEEE_FP && (cc_prev_status.flags & CC_IN_80387))
+ return AS1 (sete,%0);
+
+ OUTPUT_JUMP ("setl %0", "setb %0", "sets %0");
+}
+}
+
+static char *
+output_222 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+ return "setb %0";
+}
+
+static char *
+output_224 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (TARGET_IEEE_FP && (cc_prev_status.flags & CC_IN_80387))
+ return AS1 (sete,%0);
+
+ OUTPUT_JUMP ("setge %0", "setae %0", "setns %0");
+}
+}
+
+static char *
+output_226 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+ return "setae %0";
+}
+
+static char *
+output_228 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (TARGET_IEEE_FP && (cc_prev_status.flags & CC_IN_80387))
+ return AS1 (setb,%0);
+
+ OUTPUT_JUMP ("setle %0", "setbe %0", NULL_PTR);
+}
+}
+
+static char *
+output_230 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+ return "setbe %0";
+}
+
+static char *
+output_232 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (cc_prev_status.flags & CC_Z_IN_NOT_C)
+ return "jnc %l0";
+ else
+ return "je %l0";
+}
+}
+
+static char *
+output_234 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (cc_prev_status.flags & CC_Z_IN_NOT_C)
+ return "jc %l0";
+ else
+ return "jne %l0";
+}
+}
+
+static char *
+output_236 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (TARGET_IEEE_FP && (cc_prev_status.flags & CC_IN_80387))
+ return AS1 (je,%l0);
+
+ OUTPUT_JUMP ("jg %l0", "ja %l0", NULL_PTR);
+}
+}
+
+static char *
+output_240 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (TARGET_IEEE_FP && (cc_prev_status.flags & CC_IN_80387))
+ return AS1 (je,%l0);
+
+ OUTPUT_JUMP ("jl %l0", "jb %l0", "js %l0");
+}
+}
+
+static char *
+output_244 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (TARGET_IEEE_FP && (cc_prev_status.flags & CC_IN_80387))
+ return AS1 (je,%l0);
+
+ OUTPUT_JUMP ("jge %l0", "jae %l0", "jns %l0");
+}
+}
+
+static char *
+output_248 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (TARGET_IEEE_FP && (cc_prev_status.flags & CC_IN_80387))
+ return AS1 (jb,%l0);
+
+ OUTPUT_JUMP ("jle %l0", "jbe %l0", NULL_PTR);
+}
+}
+
+static char *
+output_251 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (cc_prev_status.flags & CC_Z_IN_NOT_C)
+ return "jc %l0";
+ else
+ return "jne %l0";
+}
+}
+
+static char *
+output_252 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (cc_prev_status.flags & CC_Z_IN_NOT_C)
+ return "jnc %l0";
+ else
+ return "je %l0";
+}
+}
+
+static char *
+output_253 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (TARGET_IEEE_FP && (cc_prev_status.flags & CC_IN_80387))
+ return AS1 (jne,%l0);
+
+ OUTPUT_JUMP ("jle %l0", "jbe %l0", NULL_PTR);
+}
+}
+
+static char *
+output_255 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (TARGET_IEEE_FP && (cc_prev_status.flags & CC_IN_80387))
+ return AS1 (jne,%l0);
+
+ OUTPUT_JUMP ("jge %l0", "jae %l0", "jns %l0");
+}
+}
+
+static char *
+output_257 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (TARGET_IEEE_FP && (cc_prev_status.flags & CC_IN_80387))
+ return AS1 (jne,%l0);
+
+ OUTPUT_JUMP ("jl %l0", "jb %l0", "js %l0");
+}
+}
+
+static char *
+output_259 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (TARGET_IEEE_FP && (cc_prev_status.flags & CC_IN_80387))
+ return AS1 (jae,%l0);
+
+ OUTPUT_JUMP ("jg %l0", "ja %l0", NULL_PTR);
+}
+}
+
+static char *
+output_262 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ CC_STATUS_INIT;
+
+ return AS1 (jmp,%*%0);
+}
+}
+
+static char *
+output_264 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ rtx xops[4];
+
+ xops[0] = operands[0];
+ xops[1] = operands[1];
+ xops[2] = operands[2];
+ xops[3] = pic_offset_table_rtx;
+
+ output_asm_insn (AS2 (mov%L2,%3,%2), xops);
+ output_asm_insn ("sub%L2 %l1@GOTOFF(%3,%0,4),%2", xops);
+ output_asm_insn (AS1 (jmp,%*%2), xops);
+ ASM_OUTPUT_ALIGN_CODE (asm_out_file);
+ RET;
+}
+}
+
+static char *
+output_265 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ CC_STATUS_INIT;
+
+ return AS1 (jmp,%*%0);
+}
+}
+
+static char *
+output_267 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (GET_CODE (operands[0]) == MEM
+ && ! CONSTANT_ADDRESS_P (XEXP (operands[0], 0)))
+ {
+ operands[0] = XEXP (operands[0], 0);
+ return AS1 (call,%*%0);
+ }
+ else
+ return AS1 (call,%P0);
+}
+}
+
+static char *
+output_270 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (GET_CODE (operands[0]) == MEM
+ && ! CONSTANT_ADDRESS_P (XEXP (operands[0], 0)))
+ {
+ operands[0] = XEXP (operands[0], 0);
+ return AS1 (call,%*%0);
+ }
+ else
+ return AS1 (call,%P0);
+}
+}
+
+static char *
+output_273 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (GET_CODE (operands[1]) == MEM
+ && ! CONSTANT_ADDRESS_P (XEXP (operands[1], 0)))
+ {
+ operands[1] = XEXP (operands[1], 0);
+ output_asm_insn (AS1 (call,%*%1), operands);
+ }
+ else
+ output_asm_insn (AS1 (call,%P1), operands);
+
+ RET;
+}
+}
+
+static char *
+output_276 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ if (GET_CODE (operands[1]) == MEM
+ && ! CONSTANT_ADDRESS_P (XEXP (operands[1], 0)))
+ {
+ operands[1] = XEXP (operands[1], 0);
+ output_asm_insn (AS1 (call,%*%1), operands);
+ }
+ else
+ output_asm_insn (AS1 (call,%P1), operands);
+
+ RET;
+}
+}
+
+static char *
+output_279 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ rtx addr = operands[1];
+
+ if (GET_CODE (operands[0]) == MEM
+ && ! CONSTANT_ADDRESS_P (XEXP (operands[0], 0)))
+ {
+ operands[0] = XEXP (operands[0], 0);
+ output_asm_insn (AS1 (call,%*%0), operands);
+ }
+ else
+ output_asm_insn (AS1 (call,%P0), operands);
+
+ operands[2] = gen_rtx (REG, SImode, 0);
+ output_asm_insn (AS2 (mov%L2,%2,%1), operands);
+
+ operands[2] = gen_rtx (REG, SImode, 1);
+ operands[1] = adj_offsettable_operand (addr, 4);
+ output_asm_insn (AS2 (mov%L2,%2,%1), operands);
+
+ operands[1] = adj_offsettable_operand (addr, 8);
+ return AS1 (fnsave,%1);
+}
+}
+
+static char *
+output_280 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ rtx addr = operands[1];
+
+ output_asm_insn (AS1 (call,%P0), operands);
+
+ operands[2] = gen_rtx (REG, SImode, 0);
+ output_asm_insn (AS2 (mov%L2,%2,%1), operands);
+
+ operands[2] = gen_rtx (REG, SImode, 1);
+ operands[1] = adj_offsettable_operand (addr, 4);
+ output_asm_insn (AS2 (mov%L2,%2,%1), operands);
+
+ operands[1] = adj_offsettable_operand (addr, 8);
+ return AS1 (fnsave,%1);
+}
+}
+
+static char *
+output_283 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ function_epilogue (asm_out_file, get_frame_size ());
+ RET;
+}
+}
+
+static char *
+output_286 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ rtx xops[2];
+
+ output_asm_insn ("cld", operands);
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ if (INTVAL (operands[2]) & ~0x03)
+ {
+ xops[0] = GEN_INT ((INTVAL (operands[2]) >> 2) & 0x3fffffff);
+ xops[1] = operands[4];
+
+ output_asm_insn (AS2 (mov%L1,%0,%1), xops);
+#ifdef INTEL_SYNTAX
+ output_asm_insn ("rep movsd", xops);
+#else
+ output_asm_insn ("rep\n\tmovsl", xops);
+#endif
+ }
+ if (INTVAL (operands[2]) & 0x02)
+ output_asm_insn ("movsw", operands);
+ if (INTVAL (operands[2]) & 0x01)
+ output_asm_insn ("movsb", operands);
+ }
+ else
+ abort ();
+ RET;
+}
+}
+
+static char *
+output_288 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ rtx xops[4], label;
+
+ label = gen_label_rtx ();
+
+ output_asm_insn ("cld", operands);
+ output_asm_insn (AS2 (xor%L0,%0,%0), operands);
+ output_asm_insn ("repz\n\tcmps%B2", operands);
+ output_asm_insn ("je %l0", &label);
+
+ xops[0] = operands[0];
+ xops[1] = gen_rtx (MEM, QImode,
+ gen_rtx (PLUS, SImode, operands[1], constm1_rtx));
+ xops[2] = gen_rtx (MEM, QImode,
+ gen_rtx (PLUS, SImode, operands[2], constm1_rtx));
+ xops[3] = operands[3];
+
+ output_asm_insn (AS2 (movz%B1%L0,%1,%0), xops);
+ output_asm_insn (AS2 (movz%B2%L3,%2,%3), xops);
+
+ output_asm_insn (AS2 (sub%L0,%3,%0), xops);
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (label));
+ RET;
+}
+}
+
+static char *
+output_289 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ rtx xops[2];
+
+ cc_status.flags |= CC_NOT_SIGNED;
+
+ xops[0] = gen_rtx (REG, QImode, 0);
+ xops[1] = CONST0_RTX (QImode);
+
+ output_asm_insn ("cld", operands);
+ output_asm_insn (AS2 (test%B0,%1,%0), xops);
+ return "repz\n\tcmps%B2";
+}
+}
+
+static char *
+output_291 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ rtx xops[3];
+ static int ffssi_label_number;
+ char buffer[30];
+
+ xops[0] = operands[0];
+ xops[1] = operands[1];
+ xops[2] = constm1_rtx;
+ /* Can there be a way to avoid the jump here? */
+ output_asm_insn (AS2 (bsf%L0,%1,%0), xops);
+#ifdef LOCAL_LABEL_PREFIX
+ sprintf (buffer, "jnz %sLFFSSI%d",
+ LOCAL_LABEL_PREFIX, ffssi_label_number);
+#else
+ sprintf (buffer, "jnz %sLFFSSI%d",
+ "", ffssi_label_number);
+#endif
+ output_asm_insn (buffer, xops);
+ output_asm_insn (AS2 (mov%L0,%2,%0), xops);
+#ifdef LOCAL_LABEL_PREFIX
+ sprintf (buffer, "%sLFFSSI%d:",
+ LOCAL_LABEL_PREFIX, ffssi_label_number);
+#else
+ sprintf (buffer, "%sLFFSSI%d:",
+ "", ffssi_label_number);
+#endif
+ output_asm_insn (buffer, xops);
+
+ ffssi_label_number++;
+ return "";
+}
+}
+
+static char *
+output_293 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ rtx xops[3];
+ static int ffshi_label_number;
+ char buffer[30];
+
+ xops[0] = operands[0];
+ xops[1] = operands[1];
+ xops[2] = constm1_rtx;
+ output_asm_insn (AS2 (bsf%W0,%1,%0), xops);
+#ifdef LOCAL_LABEL_PREFIX
+ sprintf (buffer, "jnz %sLFFSHI%d",
+ LOCAL_LABEL_PREFIX, ffshi_label_number);
+#else
+ sprintf (buffer, "jnz %sLFFSHI%d",
+ "", ffshi_label_number);
+#endif
+ output_asm_insn (buffer, xops);
+ output_asm_insn (AS2 (mov%W0,%2,%0), xops);
+#ifdef LOCAL_LABEL_PREFIX
+ sprintf (buffer, "%sLFFSHI%d:",
+ LOCAL_LABEL_PREFIX, ffshi_label_number);
+#else
+ sprintf (buffer, "%sLFFSHI%d:",
+ "", ffshi_label_number);
+#endif
+ output_asm_insn (buffer, xops);
+
+ ffshi_label_number++;
+ return "";
+}
+}
+
+static char *
+output_294 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+ return (char *) output_387_binary_op (insn, operands);
+}
+
+static char *
+output_295 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+ return (char *) output_387_binary_op (insn, operands);
+}
+
+static char *
+output_296 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+ return (char *) output_387_binary_op (insn, operands);
+}
+
+static char *
+output_297 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+ return (char *) output_387_binary_op (insn, operands);
+}
+
+static char *
+output_298 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+ return (char *) output_387_binary_op (insn, operands);
+}
+
+static char *
+output_299 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+ return (char *) output_387_binary_op (insn, operands);
+}
+
+static char *
+output_300 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+ return (char *) output_387_binary_op (insn, operands);
+}
+
+static char *
+output_301 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+ return (char *) output_387_binary_op (insn, operands);
+}
+
+static char *
+output_302 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+ return (char *) output_387_binary_op (insn, operands);
+}
+
+static char *
+output_303 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+ return (char *) output_387_binary_op (insn, operands);
+}
+
+static char *
+output_304 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+ return (char *) output_387_binary_op (insn, operands);
+}
+
+static char *
+output_305 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+ return (char *) output_387_binary_op (insn, operands);
+}
+
+static char *
+output_306 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+ return (char *) output_387_binary_op (insn, operands);
+}
+
+static char *
+output_308 (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+
+{
+ rtx xops[2];
+
+ xops[0] = operands[0];
+ xops[1] = constm1_rtx;
+ output_asm_insn ("cld", operands);
+ output_asm_insn (AS2 (mov%L0,%1,%0), xops);
+ return "repnz\n\tscas%B2";
+}
+}
+
+char * const insn_template[] =
+ {
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ "push%L0 %1",
+ "push%L0 %1",
+ 0,
+ 0,
+ "push%W0 %1",
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ "mul%B0 %2",
+ "imul%B0 %2",
+ "mul%L0 %2",
+ "imul%L0 %2",
+ 0,
+ 0,
+ 0,
+ "idiv%B0 %2",
+ "div%B0 %2",
+ 0,
+ 0,
+ 0,
+ 0,
+ "cwtd\n\tidiv%W0 %2",
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ "neg%L0 %0",
+ "neg%W0 %0",
+ "neg%B0 %0",
+ "fchs",
+ "fchs",
+ "fchs",
+ "fchs",
+ "fchs",
+ "fabs",
+ "fabs",
+ "fabs",
+ "fabs",
+ "fabs",
+ "fsqrt",
+ "fsqrt",
+ "fsqrt",
+ "fsqrt",
+ "fsqrt",
+ "fsqrt",
+ "fsin",
+ "fsin",
+ "fsin",
+ "fcos",
+ "fcos",
+ "fcos",
+ "not%L0 %0",
+ "not%W0 %0",
+ "not%B0 %0",
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ "ja %l0",
+ 0,
+ 0,
+ 0,
+ "jb %l0",
+ 0,
+ 0,
+ 0,
+ "jae %l0",
+ 0,
+ 0,
+ 0,
+ "jbe %l0",
+ 0,
+ 0,
+ 0,
+ "jbe %l0",
+ 0,
+ "jae %l0",
+ 0,
+ "jb %l0",
+ 0,
+ "ja %l0",
+ "jmp %l0",
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ "call %P0",
+ 0,
+ 0,
+ "call %P0",
+ 0,
+ 0,
+ "call %P1",
+ 0,
+ 0,
+ "call %P1",
+ 0,
+ 0,
+ 0,
+ 0,
+ "frstor %0",
+ 0,
+ "nop",
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ };
+
+char *(*const insn_outfun[])() =
+ {
+ output_0,
+ 0,
+ output_2,
+ 0,
+ output_4,
+ 0,
+ output_6,
+ 0,
+ output_8,
+ 0,
+ output_10,
+ 0,
+ output_12,
+ 0,
+ output_14,
+ 0,
+ output_16,
+ 0,
+ output_18,
+ output_19,
+ output_20,
+ output_21,
+ output_22,
+ output_23,
+ output_24,
+ output_25,
+ output_26,
+ output_27,
+ output_28,
+ output_29,
+ output_30,
+ output_31,
+ output_32,
+ output_33,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ output_43,
+ output_44,
+ output_45,
+ 0,
+ 0,
+ 0,
+ output_49,
+ 0,
+ output_51,
+ output_52,
+ output_53,
+ output_54,
+ output_55,
+ output_56,
+ output_57,
+ output_58,
+ output_59,
+ output_60,
+ output_61,
+ output_62,
+ output_63,
+ output_64,
+ output_65,
+ output_66,
+ output_67,
+ output_68,
+ output_69,
+ output_70,
+ output_71,
+ output_72,
+ output_73,
+ output_74,
+ output_75,
+ output_76,
+ 0,
+ output_78,
+ output_79,
+ output_80,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ output_87,
+ output_88,
+ output_89,
+ 0,
+ 0,
+ 0,
+ output_93,
+ output_94,
+ output_95,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ output_102,
+ output_103,
+ output_104,
+ output_105,
+ output_106,
+ output_107,
+ output_108,
+ output_109,
+ output_110,
+ output_111,
+ output_112,
+ 0,
+ 0,
+ 0,
+ output_116,
+ output_117,
+ output_118,
+ output_119,
+ 0,
+ 0,
+ 0,
+ output_123,
+ output_124,
+ output_125,
+ output_126,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ output_139,
+ 0,
+ output_141,
+ output_142,
+ output_143,
+ output_144,
+ output_145,
+ output_146,
+ output_147,
+ output_148,
+ output_149,
+ output_150,
+ output_151,
+ output_152,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ output_182,
+ output_183,
+ output_184,
+ output_185,
+ output_186,
+ 0,
+ output_188,
+ output_189,
+ output_190,
+ output_191,
+ output_192,
+ 0,
+ output_194,
+ output_195,
+ output_196,
+ output_197,
+ output_198,
+ output_199,
+ output_200,
+ output_201,
+ output_202,
+ output_203,
+ output_204,
+ output_205,
+ output_206,
+ output_207,
+ output_208,
+ output_209,
+ output_210,
+ 0,
+ output_212,
+ 0,
+ output_214,
+ 0,
+ output_216,
+ 0,
+ output_218,
+ 0,
+ output_220,
+ 0,
+ output_222,
+ 0,
+ output_224,
+ 0,
+ output_226,
+ 0,
+ output_228,
+ 0,
+ output_230,
+ 0,
+ output_232,
+ 0,
+ output_234,
+ 0,
+ output_236,
+ 0,
+ 0,
+ 0,
+ output_240,
+ 0,
+ 0,
+ 0,
+ output_244,
+ 0,
+ 0,
+ 0,
+ output_248,
+ 0,
+ 0,
+ output_251,
+ output_252,
+ output_253,
+ 0,
+ output_255,
+ 0,
+ output_257,
+ 0,
+ output_259,
+ 0,
+ 0,
+ output_262,
+ 0,
+ output_264,
+ output_265,
+ 0,
+ output_267,
+ 0,
+ 0,
+ output_270,
+ 0,
+ 0,
+ output_273,
+ 0,
+ 0,
+ output_276,
+ 0,
+ 0,
+ output_279,
+ output_280,
+ 0,
+ 0,
+ output_283,
+ 0,
+ 0,
+ output_286,
+ 0,
+ output_288,
+ output_289,
+ 0,
+ output_291,
+ 0,
+ output_293,
+ output_294,
+ output_295,
+ output_296,
+ output_297,
+ output_298,
+ output_299,
+ output_300,
+ output_301,
+ output_302,
+ output_303,
+ output_304,
+ output_305,
+ output_306,
+ 0,
+ output_308,
+ };
+
+rtx (*const insn_gen_function[]) () =
+ {
+ gen_tstsi_1,
+ gen_tstsi,
+ gen_tsthi_1,
+ gen_tsthi,
+ gen_tstqi_1,
+ gen_tstqi,
+ gen_tstsf_cc,
+ gen_tstsf,
+ gen_tstdf_cc,
+ gen_tstdf,
+ gen_tstxf_cc,
+ gen_tstxf,
+ gen_cmpsi_1,
+ gen_cmpsi,
+ gen_cmphi_1,
+ gen_cmphi,
+ gen_cmpqi_1,
+ gen_cmpqi,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ gen_cmpsf_cc_1,
+ 0,
+ 0,
+ 0,
+ gen_cmpxf,
+ gen_cmpdf,
+ gen_cmpsf,
+ gen_cmpxf_cc,
+ gen_cmpxf_ccfpeq,
+ gen_cmpdf_cc,
+ gen_cmpdf_ccfpeq,
+ gen_cmpsf_cc,
+ gen_cmpsf_ccfpeq,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ gen_movsi,
+ 0,
+ 0,
+ gen_movhi,
+ gen_movstricthi,
+ 0,
+ gen_movqi,
+ gen_movstrictqi,
+ 0,
+ gen_movsf,
+ 0,
+ gen_swapdf,
+ gen_movdf,
+ 0,
+ gen_swapxf,
+ gen_movxf,
+ 0,
+ gen_movdi,
+ gen_zero_extendhisi2,
+ gen_zero_extendqihi2,
+ gen_zero_extendqisi2,
+ gen_zero_extendsidi2,
+ gen_extendsidi2,
+ gen_extendhisi2,
+ gen_extendqihi2,
+ gen_extendqisi2,
+ gen_extendsfdf2,
+ gen_extenddfxf2,
+ gen_extendsfxf2,
+ gen_truncdfsf2,
+ 0,
+ gen_truncxfsf2,
+ gen_truncxfdf2,
+ gen_fixuns_truncxfsi2,
+ gen_fixuns_truncdfsi2,
+ gen_fixuns_truncsfsi2,
+ gen_fix_truncxfdi2,
+ gen_fix_truncdfdi2,
+ gen_fix_truncsfdi2,
+ 0,
+ 0,
+ 0,
+ gen_fix_truncxfsi2,
+ gen_fix_truncdfsi2,
+ gen_fix_truncsfsi2,
+ 0,
+ 0,
+ 0,
+ gen_floatsisf2,
+ gen_floatdisf2,
+ gen_floatsidf2,
+ gen_floatdidf2,
+ gen_floatsixf2,
+ gen_floatdixf2,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ gen_adddi3,
+ gen_addsi3,
+ gen_addhi3,
+ gen_addqi3,
+ 0,
+ gen_addxf3,
+ gen_adddf3,
+ gen_addsf3,
+ gen_subdi3,
+ gen_subsi3,
+ gen_subhi3,
+ gen_subqi3,
+ gen_subxf3,
+ gen_subdf3,
+ gen_subsf3,
+ 0,
+ gen_mulhi3,
+ 0,
+ gen_mulsi3,
+ gen_umulqihi3,
+ gen_mulqihi3,
+ gen_umulsidi3,
+ gen_mulsidi3,
+ gen_mulxf3,
+ gen_muldf3,
+ gen_mulsf3,
+ gen_divqi3,
+ gen_udivqi3,
+ gen_divxf3,
+ gen_divdf3,
+ gen_divsf3,
+ gen_divmodsi4,
+ gen_divmodhi4,
+ gen_udivmodsi4,
+ gen_udivmodhi4,
+ gen_andsi3,
+ gen_andhi3,
+ gen_andqi3,
+ gen_iorsi3,
+ gen_iorhi3,
+ gen_iorqi3,
+ gen_xorsi3,
+ gen_xorhi3,
+ gen_xorqi3,
+ gen_negdi2,
+ gen_negsi2,
+ gen_neghi2,
+ gen_negqi2,
+ gen_negsf2,
+ gen_negdf2,
+ 0,
+ gen_negxf2,
+ 0,
+ gen_abssf2,
+ gen_absdf2,
+ 0,
+ gen_absxf2,
+ 0,
+ gen_sqrtsf2,
+ gen_sqrtdf2,
+ 0,
+ gen_sqrtxf2,
+ 0,
+ 0,
+ gen_sindf2,
+ gen_sinsf2,
+ 0,
+ gen_cosdf2,
+ gen_cossf2,
+ 0,
+ gen_one_cmplsi2,
+ gen_one_cmplhi2,
+ gen_one_cmplqi2,
+ gen_ashldi3,
+ gen_ashldi3_const_int,
+ gen_ashldi3_non_const_int,
+ gen_ashlsi3,
+ gen_ashlhi3,
+ gen_ashlqi3,
+ gen_ashrdi3,
+ gen_ashrdi3_const_int,
+ gen_ashrdi3_non_const_int,
+ gen_ashrsi3,
+ gen_ashrhi3,
+ gen_ashrqi3,
+ gen_lshrdi3,
+ gen_lshrdi3_const_int,
+ gen_lshrdi3_non_const_int,
+ gen_lshrsi3,
+ gen_lshrhi3,
+ gen_lshrqi3,
+ gen_rotlsi3,
+ gen_rotlhi3,
+ gen_rotlqi3,
+ gen_rotrsi3,
+ gen_rotrhi3,
+ gen_rotrqi3,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ gen_seq,
+ 0,
+ gen_sne,
+ 0,
+ gen_sgt,
+ 0,
+ gen_sgtu,
+ 0,
+ gen_slt,
+ 0,
+ gen_sltu,
+ 0,
+ gen_sge,
+ 0,
+ gen_sgeu,
+ 0,
+ gen_sle,
+ 0,
+ gen_sleu,
+ 0,
+ gen_beq,
+ 0,
+ gen_bne,
+ 0,
+ gen_bgt,
+ 0,
+ gen_bgtu,
+ 0,
+ gen_blt,
+ 0,
+ gen_bltu,
+ 0,
+ gen_bge,
+ 0,
+ gen_bgeu,
+ 0,
+ gen_ble,
+ 0,
+ gen_bleu,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ gen_jump,
+ gen_indirect_jump,
+ gen_casesi,
+ 0,
+ gen_tablejump,
+ gen_call_pop,
+ 0,
+ 0,
+ gen_call,
+ 0,
+ 0,
+ gen_call_value_pop,
+ 0,
+ 0,
+ gen_call_value,
+ 0,
+ 0,
+ gen_untyped_call,
+ 0,
+ 0,
+ gen_untyped_return,
+ gen_update_return,
+ gen_return,
+ gen_nop,
+ gen_movstrsi,
+ 0,
+ gen_cmpstrsi,
+ 0,
+ 0,
+ gen_ffssi2,
+ 0,
+ gen_ffshi2,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ gen_strlensi,
+ 0,
+ };
+
+char *insn_name[] =
+ {
+ "tstsi_1",
+ "tstsi",
+ "tsthi_1",
+ "tsthi",
+ "tstqi_1",
+ "tstqi",
+ "tstsf_cc",
+ "tstsf",
+ "tstdf_cc",
+ "tstdf",
+ "tstxf_cc",
+ "tstxf",
+ "cmpsi_1",
+ "cmpsi",
+ "cmphi_1",
+ "cmphi",
+ "cmpqi_1",
+ "cmpqi",
+ "cmpqi+1",
+ "cmpqi+2",
+ "cmpqi+3",
+ "cmpqi+4",
+ "cmpqi+5",
+ "cmpqi+6",
+ "cmpsf_cc_1-6",
+ "cmpsf_cc_1-5",
+ "cmpsf_cc_1-4",
+ "cmpsf_cc_1-3",
+ "cmpsf_cc_1-2",
+ "cmpsf_cc_1-1",
+ "cmpsf_cc_1",
+ "cmpsf_cc_1+1",
+ "cmpsf_cc_1+2",
+ "cmpxf-1",
+ "cmpxf",
+ "cmpdf",
+ "cmpsf",
+ "cmpxf_cc",
+ "cmpxf_ccfpeq",
+ "cmpdf_cc",
+ "cmpdf_ccfpeq",
+ "cmpsf_cc",
+ "cmpsf_ccfpeq",
+ "cmpsf_ccfpeq+1",
+ "cmpsf_ccfpeq+2",
+ "cmpsf_ccfpeq+3",
+ "movsi-2",
+ "movsi-1",
+ "movsi",
+ "movsi+1",
+ "movhi-1",
+ "movhi",
+ "movstricthi",
+ "movstricthi+1",
+ "movqi",
+ "movstrictqi",
+ "movstrictqi+1",
+ "movsf",
+ "movsf+1",
+ "swapdf",
+ "movdf",
+ "movdf+1",
+ "swapxf",
+ "movxf",
+ "movxf+1",
+ "movdi",
+ "zero_extendhisi2",
+ "zero_extendqihi2",
+ "zero_extendqisi2",
+ "zero_extendsidi2",
+ "extendsidi2",
+ "extendhisi2",
+ "extendqihi2",
+ "extendqisi2",
+ "extendsfdf2",
+ "extenddfxf2",
+ "extendsfxf2",
+ "truncdfsf2",
+ "truncdfsf2+1",
+ "truncxfsf2",
+ "truncxfdf2",
+ "fixuns_truncxfsi2",
+ "fixuns_truncdfsi2",
+ "fixuns_truncsfsi2",
+ "fix_truncxfdi2",
+ "fix_truncdfdi2",
+ "fix_truncsfdi2",
+ "fix_truncsfdi2+1",
+ "fix_truncsfdi2+2",
+ "fix_truncxfsi2-1",
+ "fix_truncxfsi2",
+ "fix_truncdfsi2",
+ "fix_truncsfsi2",
+ "fix_truncsfsi2+1",
+ "fix_truncsfsi2+2",
+ "floatsisf2-1",
+ "floatsisf2",
+ "floatdisf2",
+ "floatsidf2",
+ "floatdidf2",
+ "floatsixf2",
+ "floatdixf2",
+ "floatdixf2+1",
+ "floatdixf2+2",
+ "floatdixf2+3",
+ "adddi3-3",
+ "adddi3-2",
+ "adddi3-1",
+ "adddi3",
+ "addsi3",
+ "addhi3",
+ "addqi3",
+ "addqi3+1",
+ "addxf3",
+ "adddf3",
+ "addsf3",
+ "subdi3",
+ "subsi3",
+ "subhi3",
+ "subqi3",
+ "subxf3",
+ "subdf3",
+ "subsf3",
+ "subsf3+1",
+ "mulhi3",
+ "mulhi3+1",
+ "mulsi3",
+ "umulqihi3",
+ "mulqihi3",
+ "umulsidi3",
+ "mulsidi3",
+ "mulxf3",
+ "muldf3",
+ "mulsf3",
+ "divqi3",
+ "udivqi3",
+ "divxf3",
+ "divdf3",
+ "divsf3",
+ "divmodsi4",
+ "divmodhi4",
+ "udivmodsi4",
+ "udivmodhi4",
+ "andsi3",
+ "andhi3",
+ "andqi3",
+ "iorsi3",
+ "iorhi3",
+ "iorqi3",
+ "xorsi3",
+ "xorhi3",
+ "xorqi3",
+ "negdi2",
+ "negsi2",
+ "neghi2",
+ "negqi2",
+ "negsf2",
+ "negdf2",
+ "negdf2+1",
+ "negxf2",
+ "negxf2+1",
+ "abssf2",
+ "absdf2",
+ "absdf2+1",
+ "absxf2",
+ "absxf2+1",
+ "sqrtsf2",
+ "sqrtdf2",
+ "sqrtdf2+1",
+ "sqrtxf2",
+ "sqrtxf2+1",
+ "sindf2-1",
+ "sindf2",
+ "sinsf2",
+ "sinsf2+1",
+ "cosdf2",
+ "cossf2",
+ "cossf2+1",
+ "one_cmplsi2",
+ "one_cmplhi2",
+ "one_cmplqi2",
+ "ashldi3",
+ "ashldi3_const_int",
+ "ashldi3_non_const_int",
+ "ashlsi3",
+ "ashlhi3",
+ "ashlqi3",
+ "ashrdi3",
+ "ashrdi3_const_int",
+ "ashrdi3_non_const_int",
+ "ashrsi3",
+ "ashrhi3",
+ "ashrqi3",
+ "lshrdi3",
+ "lshrdi3_const_int",
+ "lshrdi3_non_const_int",
+ "lshrsi3",
+ "lshrhi3",
+ "lshrqi3",
+ "rotlsi3",
+ "rotlhi3",
+ "rotlqi3",
+ "rotrsi3",
+ "rotrhi3",
+ "rotrqi3",
+ "rotrqi3+1",
+ "rotrqi3+2",
+ "rotrqi3+3",
+ "seq-3",
+ "seq-2",
+ "seq-1",
+ "seq",
+ "seq+1",
+ "sne",
+ "sne+1",
+ "sgt",
+ "sgt+1",
+ "sgtu",
+ "sgtu+1",
+ "slt",
+ "slt+1",
+ "sltu",
+ "sltu+1",
+ "sge",
+ "sge+1",
+ "sgeu",
+ "sgeu+1",
+ "sle",
+ "sle+1",
+ "sleu",
+ "sleu+1",
+ "beq",
+ "beq+1",
+ "bne",
+ "bne+1",
+ "bgt",
+ "bgt+1",
+ "bgtu",
+ "bgtu+1",
+ "blt",
+ "blt+1",
+ "bltu",
+ "bltu+1",
+ "bge",
+ "bge+1",
+ "bgeu",
+ "bgeu+1",
+ "ble",
+ "ble+1",
+ "bleu",
+ "bleu+1",
+ "bleu+2",
+ "bleu+3",
+ "bleu+4",
+ "bleu+5",
+ "bleu+6",
+ "jump-5",
+ "jump-4",
+ "jump-3",
+ "jump-2",
+ "jump-1",
+ "jump",
+ "indirect_jump",
+ "casesi",
+ "casesi+1",
+ "tablejump",
+ "call_pop",
+ "call_pop+1",
+ "call-1",
+ "call",
+ "call+1",
+ "call_value_pop-1",
+ "call_value_pop",
+ "call_value_pop+1",
+ "call_value-1",
+ "call_value",
+ "call_value+1",
+ "untyped_call-1",
+ "untyped_call",
+ "untyped_call+1",
+ "untyped_return-1",
+ "untyped_return",
+ "update_return",
+ "return",
+ "nop",
+ "movstrsi",
+ "movstrsi+1",
+ "cmpstrsi",
+ "cmpstrsi+1",
+ "ffssi2-1",
+ "ffssi2",
+ "ffssi2+1",
+ "ffshi2",
+ "ffshi2+1",
+ "ffshi2+2",
+ "ffshi2+3",
+ "ffshi2+4",
+ "ffshi2+5",
+ "ffshi2+6",
+ "ffshi2+7",
+ "strlensi-7",
+ "strlensi-6",
+ "strlensi-5",
+ "strlensi-4",
+ "strlensi-3",
+ "strlensi-2",
+ "strlensi-1",
+ "strlensi",
+ "strlensi+1",
+ };
+char **insn_name_ptr = insn_name;
+
+const int insn_n_operands[] =
+ {
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 4,
+ 4,
+ 4,
+ 4,
+ 4,
+ 3,
+ 4,
+ 4,
+ 4,
+ 4,
+ 4,
+ 3,
+ 4,
+ 4,
+ 4,
+ 3,
+ 2,
+ 2,
+ 2,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 3,
+ 2,
+ 2,
+ 8,
+ 8,
+ 8,
+ 6,
+ 6,
+ 6,
+ 5,
+ 5,
+ 5,
+ 5,
+ 5,
+ 5,
+ 5,
+ 5,
+ 5,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 4,
+ 4,
+ 4,
+ 4,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 4,
+ 3,
+ 3,
+ 2,
+ 3,
+ 3,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 1,
+ 7,
+ 3,
+ 1,
+ 4,
+ 4,
+ 4,
+ 2,
+ 2,
+ 2,
+ 5,
+ 5,
+ 5,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 1,
+ 0,
+ 0,
+ 5,
+ 5,
+ 5,
+ 5,
+ 4,
+ 2,
+ 2,
+ 2,
+ 2,
+ 4,
+ 4,
+ 4,
+ 4,
+ 4,
+ 4,
+ 4,
+ 4,
+ 4,
+ 4,
+ 4,
+ 4,
+ 4,
+ 4,
+ 4,
+ };
+
+const int insn_n_dups[] =
+ {
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 2,
+ 0,
+ 0,
+ 2,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 1,
+ 0,
+ 0,
+ 0,
+ 7,
+ 7,
+ 7,
+ 5,
+ 5,
+ 5,
+ 1,
+ 1,
+ 1,
+ 2,
+ 2,
+ 2,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 2,
+ 2,
+ 2,
+ 2,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 1,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 1,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 1,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 1,
+ 0,
+ 1,
+ 0,
+ 1,
+ 0,
+ 1,
+ 0,
+ 1,
+ 0,
+ 1,
+ 0,
+ 1,
+ 0,
+ 1,
+ 0,
+ 1,
+ 0,
+ 1,
+ 0,
+ 1,
+ 0,
+ 1,
+ 0,
+ 1,
+ 0,
+ 1,
+ 0,
+ 1,
+ 0,
+ 1,
+ 0,
+ 1,
+ 0,
+ 1,
+ 0,
+ 1,
+ 0,
+ 1,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 3,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 2,
+ 2,
+ 3,
+ 3,
+ 3,
+ 2,
+ 0,
+ 2,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 5,
+ 1,
+ };
+
+char *const insn_operand_constraint[][MAX_RECOG_OPERANDS] =
+ {
+ { "rm", },
+ { "", },
+ { "rm", },
+ { "", },
+ { "qm", },
+ { "", },
+ { "f", "=a", },
+ { "", "", },
+ { "f", "=a", },
+ { "", "", },
+ { "f", "=a", },
+ { "", "", },
+ { "mr,r", "ri,mr", },
+ { "", "", },
+ { "mr,r", "ri,mr", },
+ { "", "", },
+ { "q,mq", "qm,nq", },
+ { "", "", },
+ { "f", "f", "", "=a", },
+ { "f", "rm", "", "=a", },
+ { "rm", "f", "", "=a", },
+ { "f", "fm", "", "=a", },
+ { "f", "fm", "", "=a", },
+ { "f", "f", "=a", },
+ { "f,fm", "fm,f", "", "=a,a", },
+ { "f", "rm", "", "=a", },
+ { "rm", "f", "", "=a", },
+ { "f", "fm", "", "=a", },
+ { "fm", "f", "", "=a", },
+ { "f", "f", "=a", },
+ { "f,fm", "fm,f", "", "=a,a", },
+ { "f", "rm", "", "=a", },
+ { "rm", "f", "", "=a", },
+ { "f", "f", "=a", },
+ { "", "", },
+ { "", "", },
+ { "", "", },
+ { "", "", "", },
+ { "", "", "", },
+ { "", "", "", },
+ { "", "", "", },
+ { "", "", "", },
+ { "", "", "", },
+ { "%ro", "ri", },
+ { "%ro", "ri", },
+ { "%qm", "qi", },
+ { "=<", "g", },
+ { "=<", "ri", },
+ { "", "", },
+ { "=g,r", "ri,m", },
+ { "=<", "g", },
+ { "=g,r", "ri,m", },
+ { "+g,r", "ri,m", },
+ { "=<", "q", },
+ { "=q,*r,qm", "*g,q,qn", },
+ { "+qm,q", "*qn,m", },
+ { "=<,<", "gF,f", },
+ { "=*rfm,*rf,f,!*rm", "*rf,*rfm,fG,fF", },
+ { "=<,<", "gF,f", },
+ { "f", "f", },
+ { "=*rfm,*rf,f,!*rm", "*rf,*rfm,fG,fF", },
+ { "=<,<", "gF,f", },
+ { "f", "f", },
+ { "=f,fm,!*rf,!*rm", "fmG,f,*rfm,*rfF", },
+ { "=<", "roiF", },
+ { "=r,rm", "m,riF", },
+ { "=r", "rm", },
+ { "=r", "qm", },
+ { "=r", "qm", },
+ { "=r", "0", },
+ { "=r", "0", },
+ { "=r", "rm", },
+ { "=r", "qm", },
+ { "=r", "qm", },
+ { "=fm,f", "f,fm", },
+ { "=fm,f,f,!*r", "f,fm,!*r,f", },
+ { "=fm,f,f,!*r", "f,fm,!*r,f", },
+ { "", "", },
+ { "=f,m", "0,f", "m,m", },
+ { "=m,!*r", "f,f", },
+ { "=m,!*r", "f,f", },
+ { "", "", "", "", "", "", "", "", },
+ { "", "", "", "", "", "", "", "", },
+ { "", "", "", "", "", "", "", "", },
+ { "", "", "", "", "", "", },
+ { "", "", "", "", "", "", },
+ { "", "", "", "", "", "", },
+ { "=rm", "f", "m", "m", "=&q", },
+ { "=rm", "f", "m", "m", "=&q", },
+ { "=rm", "f", "m", "m", "=&q", },
+ { "", "", "", "", "", },
+ { "", "", "", "", "", },
+ { "", "", "", "", "", },
+ { "=rm", "f", "m", "m", "=&q", },
+ { "=rm", "f", "m", "m", "=&q", },
+ { "=rm", "f", "m", "m", "=&q", },
+ { "", "", },
+ { "", "", },
+ { "", "", },
+ { "", "", },
+ { "", "", },
+ { "", "", },
+ { "=f", "rm", },
+ { "=f", "rm", },
+ { "=f", "rm", },
+ { "=f", "rm", },
+ { "=f,f", "m,!*r", },
+ { "=f", "rm", },
+ { "=&r,ro", "%0,0", "o,riF", },
+ { "=?r,rm,r", "%r,0,0", "ri,ri,rm", },
+ { "=rm,r", "%0,0", "ri,rm", },
+ { "=qm,q", "%0,0", "qn,qmn", },
+ { "=r", "p", },
+ { "", "", "", },
+ { "", "", "", },
+ { "", "", "", },
+ { "=&r,ro", "0,0", "o,riF", },
+ { "=rm,r", "0,0", "ri,rm", },
+ { "=rm,r", "0,0", "ri,rm", },
+ { "=qm,q", "0,0", "qn,qmn", },
+ { "", "", "", },
+ { "", "", "", },
+ { "", "", "", },
+ { "=r", "%0", "r", },
+ { "=r,r", "%0,rm", "g,i", },
+ { "=r", "%0", "r", },
+ { "=r,r", "%0,rm", "g,i", },
+ { "=a", "%0", "qm", },
+ { "=a", "%0", "qm", },
+ { "=A", "%0", "rm", },
+ { "=A", "%0", "rm", },
+ { "", "", "", },
+ { "", "", "", },
+ { "", "", "", },
+ { "=a", "0", "qm", },
+ { "=a", "0", "qm", },
+ { "", "", "", },
+ { "", "", "", },
+ { "", "", "", },
+ { "=a", "0", "rm", "=&d", },
+ { "=a", "0", "rm", "=&d", },
+ { "=a", "0", "rm", "=&d", },
+ { "=a", "0", "rm", "=&d", },
+ { "=r,r,rm,r", "%rm,qm,0,0", "L,K,ri,rm", },
+ { "=rm,r", "%0,0", "ri,rm", },
+ { "=qm,q", "%0,0", "qn,qmn", },
+ { "=rm,r", "%0,0", "ri,rm", },
+ { "=rm,r", "%0,0", "ri,rm", },
+ { "=qm,q", "%0,0", "qn,qmn", },
+ { "=rm,r", "%0,0", "ri,rm", },
+ { "=rm,r", "%0,0", "ri,rm", },
+ { "=qm,q", "%0,0", "qn,qm", },
+ { "=&ro", "0", },
+ { "=rm", "0", },
+ { "=rm", "0", },
+ { "=qm", "0", },
+ { "=f", "0", },
+ { "=f", "0", },
+ { "=f", "0", },
+ { "=f", "0", },
+ { "=f", "0", },
+ { "=f", "0", },
+ { "=f", "0", },
+ { "=f", "0", },
+ { "=f", "0", },
+ { "=f", "0", },
+ { "=f", "0", },
+ { "=f", "0", },
+ { "=f", "0", },
+ { "=f", "0", },
+ { "=f", "0", },
+ { "=f", "0", },
+ { "=f", "0", },
+ { "=f", "0", },
+ { "=f", "0", },
+ { "=f", "0", },
+ { "=f", "0", },
+ { "=f", "0", },
+ { "=rm", "0", },
+ { "=rm", "0", },
+ { "=qm", "0", },
+ { "", "", "", },
+ { "=&r", "0", "J", },
+ { "=&r", "0", "c", },
+ { "=r,rm", "r,0", "M,cI", },
+ { "=rm", "0", "cI", },
+ { "=qm", "0", "cI", },
+ { "", "", "", },
+ { "=&r", "0", "J", },
+ { "=&r", "0", "c", },
+ { "=rm", "0", "cI", },
+ { "=rm", "0", "cI", },
+ { "=qm", "0", "cI", },
+ { "", "", "", },
+ { "=&r", "0", "J", },
+ { "=&r", "0", "c", },
+ { "=rm", "0", "cI", },
+ { "=rm", "0", "cI", },
+ { "=qm", "0", "cI", },
+ { "=rm", "0", "cI", },
+ { "=rm", "0", "cI", },
+ { "=qm", "0", "cI", },
+ { "=rm", "0", "cI", },
+ { "=rm", "0", "cI", },
+ { "=qm", "0", "cI", },
+ { "+rm", "", "r", "n", },
+ { "=rm", "r", "0", },
+ { "=rm", "0", "r", },
+ { "r", "r", },
+ { "r", "n", "n", },
+ { "rm", "n", "n", },
+ { "", },
+ { "=q", },
+ { "", },
+ { "=q", },
+ { "", },
+ { "=q", },
+ { "", },
+ { "=q", },
+ { "", },
+ { "=q", },
+ { "", },
+ { "=q", },
+ { "", },
+ { "=q", },
+ { "", },
+ { "=q", },
+ { "", },
+ { "=q", },
+ { "", },
+ { "=q", },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { "rm", },
+ { "", "", "", "", "", "", "", },
+ { "r", "", "=&r", },
+ { "rm", },
+ { "", "", "", "", },
+ { "m", "g", "", "i", },
+ { "", "g", "", "i", },
+ { "", "", },
+ { "m", "g", },
+ { "", "g", },
+ { "", "", "", "", "", },
+ { "=rf", "m", "g", "", "i", },
+ { "=rf", "", "g", "", "i", },
+ { "", "", "", },
+ { "=rf", "m", "g", },
+ { "=rf", "", "g", },
+ { "", "", "", },
+ { "m", "o", "", },
+ { "", "o", "", },
+ { "", "", },
+ { "m", },
+ { 0 },
+ { 0 },
+ { "", "", "", "", "", },
+ { "D", "S", "n", "i", "=&c", },
+ { "", "", "", "", "", },
+ { "=&r", "S", "D", "c", "i", },
+ { "S", "D", "c", "i", },
+ { "", "", },
+ { "=&r", "rm", },
+ { "", "", },
+ { "=&r", "rm", },
+ { "=f,f", "0,fm", "fm,0", "", },
+ { "=f", "rm", "0", "", },
+ { "=f,f", "0,f", "f,0", "", },
+ { "=f", "rm", "0", "", },
+ { "=f,f", "fm,0", "0,f", "", },
+ { "=f", "0", "rm", "", },
+ { "=f,f", "0,f", "fm,0", "", },
+ { "=f,f", "fm,0", "0,f", "", },
+ { "=f", "0", "rm", "", },
+ { "=f,f", "0,f", "fm,0", "", },
+ { "=f,f", "0,fm", "fm,0", "", },
+ { "=f", "rm", "0", "", },
+ { "=f", "0", "rm", "", },
+ { "", "", "", "", },
+ { "=&c", "D", "a", "i", },
+ };
+
+const enum machine_mode insn_operand_mode[][MAX_RECOG_OPERANDS] =
+ {
+ { SImode, },
+ { SImode, },
+ { HImode, },
+ { HImode, },
+ { QImode, },
+ { QImode, },
+ { SFmode, HImode, },
+ { SFmode, HImode, },
+ { DFmode, HImode, },
+ { DFmode, HImode, },
+ { XFmode, HImode, },
+ { XFmode, HImode, },
+ { SImode, SImode, },
+ { SImode, SImode, },
+ { HImode, HImode, },
+ { HImode, HImode, },
+ { QImode, QImode, },
+ { QImode, QImode, },
+ { XFmode, XFmode, VOIDmode, HImode, },
+ { XFmode, SImode, VOIDmode, HImode, },
+ { SImode, XFmode, VOIDmode, HImode, },
+ { XFmode, DFmode, VOIDmode, HImode, },
+ { XFmode, SFmode, VOIDmode, HImode, },
+ { XFmode, XFmode, HImode, },
+ { DFmode, DFmode, VOIDmode, HImode, },
+ { DFmode, SImode, VOIDmode, HImode, },
+ { SImode, DFmode, VOIDmode, HImode, },
+ { DFmode, SFmode, VOIDmode, HImode, },
+ { SFmode, DFmode, VOIDmode, HImode, },
+ { DFmode, DFmode, HImode, },
+ { SFmode, SFmode, VOIDmode, HImode, },
+ { SFmode, SImode, VOIDmode, HImode, },
+ { SImode, SFmode, VOIDmode, HImode, },
+ { SFmode, SFmode, HImode, },
+ { XFmode, XFmode, },
+ { DFmode, DFmode, },
+ { SFmode, SFmode, },
+ { XFmode, XFmode, HImode, },
+ { XFmode, XFmode, HImode, },
+ { DFmode, DFmode, HImode, },
+ { DFmode, DFmode, HImode, },
+ { SFmode, SFmode, HImode, },
+ { SFmode, SFmode, HImode, },
+ { SImode, SImode, },
+ { HImode, HImode, },
+ { QImode, QImode, },
+ { SImode, SImode, },
+ { SImode, SImode, },
+ { SImode, SImode, },
+ { SImode, SImode, },
+ { HImode, HImode, },
+ { HImode, HImode, },
+ { HImode, HImode, },
+ { QImode, QImode, },
+ { QImode, QImode, },
+ { QImode, QImode, },
+ { SFmode, SFmode, },
+ { SFmode, SFmode, },
+ { DFmode, DFmode, },
+ { DFmode, DFmode, },
+ { DFmode, DFmode, },
+ { XFmode, XFmode, },
+ { XFmode, XFmode, },
+ { XFmode, XFmode, },
+ { DImode, DImode, },
+ { DImode, DImode, },
+ { SImode, HImode, },
+ { HImode, QImode, },
+ { SImode, QImode, },
+ { DImode, SImode, },
+ { DImode, SImode, },
+ { SImode, HImode, },
+ { HImode, QImode, },
+ { SImode, QImode, },
+ { DFmode, SFmode, },
+ { XFmode, DFmode, },
+ { XFmode, SFmode, },
+ { SFmode, DFmode, },
+ { SFmode, DFmode, SFmode, },
+ { SFmode, XFmode, },
+ { DFmode, XFmode, },
+ { SImode, XFmode, VOIDmode, VOIDmode, VOIDmode, VOIDmode, VOIDmode, SImode, },
+ { SImode, DFmode, VOIDmode, VOIDmode, VOIDmode, VOIDmode, VOIDmode, SImode, },
+ { SImode, SFmode, VOIDmode, VOIDmode, VOIDmode, VOIDmode, VOIDmode, SImode, },
+ { DImode, XFmode, VOIDmode, VOIDmode, VOIDmode, SImode, },
+ { DImode, DFmode, VOIDmode, VOIDmode, VOIDmode, SImode, },
+ { DImode, SFmode, VOIDmode, VOIDmode, VOIDmode, SImode, },
+ { DImode, XFmode, SImode, SImode, SImode, },
+ { DImode, DFmode, SImode, SImode, SImode, },
+ { DImode, SFmode, SImode, SImode, SImode, },
+ { SImode, XFmode, VOIDmode, VOIDmode, SImode, },
+ { SImode, DFmode, VOIDmode, VOIDmode, SImode, },
+ { SImode, SFmode, VOIDmode, VOIDmode, SImode, },
+ { SImode, XFmode, SImode, SImode, SImode, },
+ { SImode, DFmode, SImode, SImode, SImode, },
+ { SImode, SFmode, SImode, SImode, SImode, },
+ { SFmode, SImode, },
+ { SFmode, DImode, },
+ { DFmode, SImode, },
+ { DFmode, DImode, },
+ { XFmode, SImode, },
+ { XFmode, DImode, },
+ { XFmode, DImode, },
+ { DFmode, DImode, },
+ { SFmode, DImode, },
+ { DFmode, SImode, },
+ { XFmode, SImode, },
+ { SFmode, SImode, },
+ { DImode, DImode, DImode, },
+ { SImode, SImode, SImode, },
+ { HImode, HImode, HImode, },
+ { QImode, QImode, QImode, },
+ { SImode, QImode, },
+ { XFmode, XFmode, XFmode, },
+ { DFmode, DFmode, DFmode, },
+ { SFmode, SFmode, SFmode, },
+ { DImode, DImode, DImode, },
+ { SImode, SImode, SImode, },
+ { HImode, HImode, HImode, },
+ { QImode, QImode, QImode, },
+ { XFmode, XFmode, XFmode, },
+ { DFmode, DFmode, DFmode, },
+ { SFmode, SFmode, SFmode, },
+ { HImode, HImode, HImode, },
+ { HImode, HImode, HImode, },
+ { SImode, SImode, SImode, },
+ { SImode, SImode, SImode, },
+ { HImode, QImode, QImode, },
+ { HImode, QImode, QImode, },
+ { DImode, SImode, SImode, },
+ { DImode, SImode, SImode, },
+ { XFmode, XFmode, XFmode, },
+ { DFmode, DFmode, DFmode, },
+ { SFmode, SFmode, SFmode, },
+ { QImode, HImode, QImode, },
+ { QImode, HImode, QImode, },
+ { XFmode, XFmode, XFmode, },
+ { DFmode, DFmode, DFmode, },
+ { SFmode, SFmode, SFmode, },
+ { SImode, SImode, SImode, SImode, },
+ { HImode, HImode, HImode, HImode, },
+ { SImode, SImode, SImode, SImode, },
+ { HImode, HImode, HImode, HImode, },
+ { SImode, SImode, SImode, },
+ { HImode, HImode, HImode, },
+ { QImode, QImode, QImode, },
+ { SImode, SImode, SImode, },
+ { HImode, HImode, HImode, },
+ { QImode, QImode, QImode, },
+ { SImode, SImode, SImode, },
+ { HImode, HImode, HImode, },
+ { QImode, QImode, QImode, },
+ { DImode, DImode, },
+ { SImode, SImode, },
+ { HImode, HImode, },
+ { QImode, QImode, },
+ { SFmode, SFmode, },
+ { DFmode, DFmode, },
+ { DFmode, SFmode, },
+ { XFmode, XFmode, },
+ { XFmode, DFmode, },
+ { SFmode, SFmode, },
+ { DFmode, DFmode, },
+ { DFmode, SFmode, },
+ { XFmode, XFmode, },
+ { XFmode, DFmode, },
+ { SFmode, SFmode, },
+ { DFmode, DFmode, },
+ { DFmode, SFmode, },
+ { XFmode, XFmode, },
+ { XFmode, DFmode, },
+ { XFmode, SFmode, },
+ { DFmode, DFmode, },
+ { SFmode, SFmode, },
+ { DFmode, SFmode, },
+ { DFmode, DFmode, },
+ { SFmode, SFmode, },
+ { DFmode, SFmode, },
+ { SImode, SImode, },
+ { HImode, HImode, },
+ { QImode, QImode, },
+ { DImode, DImode, QImode, },
+ { DImode, DImode, QImode, },
+ { DImode, DImode, QImode, },
+ { SImode, SImode, SImode, },
+ { HImode, HImode, HImode, },
+ { QImode, QImode, QImode, },
+ { DImode, DImode, QImode, },
+ { DImode, DImode, QImode, },
+ { DImode, DImode, QImode, },
+ { SImode, SImode, SImode, },
+ { HImode, HImode, HImode, },
+ { QImode, QImode, QImode, },
+ { DImode, DImode, QImode, },
+ { DImode, DImode, QImode, },
+ { DImode, DImode, QImode, },
+ { SImode, SImode, SImode, },
+ { HImode, HImode, HImode, },
+ { QImode, QImode, QImode, },
+ { SImode, SImode, SImode, },
+ { HImode, HImode, HImode, },
+ { QImode, QImode, QImode, },
+ { SImode, SImode, SImode, },
+ { HImode, HImode, HImode, },
+ { QImode, QImode, QImode, },
+ { SImode, VOIDmode, SImode, SImode, },
+ { SImode, SImode, SImode, },
+ { SImode, SImode, SImode, },
+ { SImode, SImode, },
+ { SImode, SImode, SImode, },
+ { QImode, SImode, SImode, },
+ { QImode, },
+ { QImode, },
+ { QImode, },
+ { QImode, },
+ { QImode, },
+ { QImode, },
+ { QImode, },
+ { QImode, },
+ { QImode, },
+ { QImode, },
+ { QImode, },
+ { QImode, },
+ { QImode, },
+ { QImode, },
+ { QImode, },
+ { QImode, },
+ { QImode, },
+ { QImode, },
+ { QImode, },
+ { QImode, },
+ { VOIDmode },
+ { VOIDmode },
+ { VOIDmode },
+ { VOIDmode },
+ { VOIDmode },
+ { VOIDmode },
+ { VOIDmode },
+ { VOIDmode },
+ { VOIDmode },
+ { VOIDmode },
+ { VOIDmode },
+ { VOIDmode },
+ { VOIDmode },
+ { VOIDmode },
+ { VOIDmode },
+ { VOIDmode },
+ { VOIDmode },
+ { VOIDmode },
+ { VOIDmode },
+ { VOIDmode },
+ { VOIDmode },
+ { VOIDmode },
+ { VOIDmode },
+ { VOIDmode },
+ { VOIDmode },
+ { VOIDmode },
+ { VOIDmode },
+ { VOIDmode },
+ { VOIDmode },
+ { VOIDmode },
+ { VOIDmode },
+ { SImode, },
+ { SImode, SImode, SImode, VOIDmode, VOIDmode, VOIDmode, SImode, },
+ { SImode, VOIDmode, SImode, },
+ { SImode, },
+ { QImode, SImode, VOIDmode, SImode, },
+ { QImode, SImode, VOIDmode, SImode, },
+ { SImode, SImode, VOIDmode, SImode, },
+ { QImode, SImode, },
+ { QImode, SImode, },
+ { SImode, SImode, },
+ { VOIDmode, QImode, SImode, VOIDmode, SImode, },
+ { VOIDmode, QImode, SImode, VOIDmode, SImode, },
+ { VOIDmode, SImode, SImode, VOIDmode, SImode, },
+ { VOIDmode, QImode, SImode, },
+ { VOIDmode, QImode, SImode, },
+ { VOIDmode, SImode, SImode, },
+ { QImode, BLKmode, VOIDmode, },
+ { QImode, DImode, VOIDmode, },
+ { SImode, DImode, VOIDmode, },
+ { BLKmode, VOIDmode, },
+ { SImode, },
+ { VOIDmode },
+ { VOIDmode },
+ { BLKmode, BLKmode, SImode, SImode, SImode, },
+ { SImode, SImode, SImode, SImode, SImode, },
+ { SImode, BLKmode, BLKmode, SImode, SImode, },
+ { SImode, SImode, SImode, SImode, SImode, },
+ { SImode, SImode, SImode, SImode, },
+ { SImode, SImode, },
+ { SImode, SImode, },
+ { HImode, HImode, },
+ { HImode, SImode, },
+ { DFmode, DFmode, DFmode, DFmode, },
+ { DFmode, SImode, DFmode, DFmode, },
+ { XFmode, XFmode, XFmode, XFmode, },
+ { XFmode, SImode, XFmode, XFmode, },
+ { XFmode, SFmode, XFmode, XFmode, },
+ { XFmode, XFmode, SImode, XFmode, },
+ { XFmode, XFmode, SFmode, XFmode, },
+ { DFmode, SFmode, DFmode, DFmode, },
+ { DFmode, DFmode, SImode, DFmode, },
+ { DFmode, DFmode, SFmode, DFmode, },
+ { SFmode, SFmode, SFmode, SFmode, },
+ { SFmode, SImode, SFmode, SFmode, },
+ { SFmode, SFmode, SImode, SFmode, },
+ { SImode, BLKmode, QImode, SImode, },
+ { SImode, SImode, QImode, SImode, },
+ };
+
+const char insn_operand_strict_low[][MAX_RECOG_OPERANDS] =
+ {
+ { 0, },
+ { 0, },
+ { 0, },
+ { 0, },
+ { 0, },
+ { 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, 0, 0, },
+ { 0, 0, 0, 0, },
+ { 0, 0, 0, 0, },
+ { 0, 0, 0, 0, },
+ { 0, 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, 0, },
+ { 0, 0, 0, 0, },
+ { 0, 0, 0, 0, },
+ { 0, 0, 0, 0, },
+ { 0, 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, 0, },
+ { 0, 0, 0, 0, },
+ { 0, 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 1, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 1, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, 0, 0, 0, 0, 0, 0, },
+ { 0, 0, 0, 0, 0, 0, 0, 0, },
+ { 0, 0, 0, 0, 0, 0, 0, 0, },
+ { 0, 0, 0, 0, 0, 0, },
+ { 0, 0, 0, 0, 0, 0, },
+ { 0, 0, 0, 0, 0, 0, },
+ { 0, 0, 0, 0, 0, },
+ { 0, 0, 0, 0, 0, },
+ { 0, 0, 0, 0, 0, },
+ { 0, 0, 0, 0, 0, },
+ { 0, 0, 0, 0, 0, },
+ { 0, 0, 0, 0, 0, },
+ { 0, 0, 0, 0, 0, },
+ { 0, 0, 0, 0, 0, },
+ { 0, 0, 0, 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, 0, },
+ { 0, 0, 0, 0, },
+ { 0, 0, 0, 0, },
+ { 0, 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, },
+ { 0, },
+ { 0, },
+ { 0, },
+ { 0, },
+ { 0, },
+ { 0, },
+ { 0, },
+ { 0, },
+ { 0, },
+ { 0, },
+ { 0, },
+ { 0, },
+ { 0, },
+ { 0, },
+ { 0, },
+ { 0, },
+ { 0, },
+ { 0, },
+ { 0, },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0, },
+ { 0, 0, 0, 0, 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, },
+ { 0, 0, 0, 0, },
+ { 0, 0, 0, 0, },
+ { 0, 0, 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, 0, 0, 0, },
+ { 0, 0, 0, 0, 0, },
+ { 0, 0, 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, 0, },
+ { 0, 0, },
+ { 0, },
+ { 0 },
+ { 0 },
+ { 0, 0, 0, 0, 0, },
+ { 0, 0, 0, 0, 0, },
+ { 0, 0, 0, 0, 0, },
+ { 0, 0, 0, 0, 0, },
+ { 0, 0, 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, },
+ { 0, 0, 0, 0, },
+ { 0, 0, 0, 0, },
+ { 0, 0, 0, 0, },
+ { 0, 0, 0, 0, },
+ { 0, 0, 0, 0, },
+ { 0, 0, 0, 0, },
+ { 0, 0, 0, 0, },
+ { 0, 0, 0, 0, },
+ { 0, 0, 0, 0, },
+ { 0, 0, 0, 0, },
+ { 0, 0, 0, 0, },
+ { 0, 0, 0, 0, },
+ { 0, 0, 0, 0, },
+ { 0, 0, 0, 0, },
+ { 0, 0, 0, 0, },
+ };
+
+extern int nonimmediate_operand ();
+extern int register_operand ();
+extern int scratch_operand ();
+extern int general_operand ();
+extern int VOIDmode_compare_op ();
+extern int push_operand ();
+extern int memory_operand ();
+extern int address_operand ();
+extern int nonmemory_operand ();
+extern int const_int_operand ();
+extern int indirect_operand ();
+extern int immediate_operand ();
+extern int call_insn_operand ();
+extern int symbolic_operand ();
+extern int binary_387_op ();
+
+int (*const insn_operand_predicate[][MAX_RECOG_OPERANDS])() =
+ {
+ { nonimmediate_operand, },
+ { nonimmediate_operand, },
+ { nonimmediate_operand, },
+ { nonimmediate_operand, },
+ { nonimmediate_operand, },
+ { nonimmediate_operand, },
+ { register_operand, scratch_operand, },
+ { register_operand, scratch_operand, },
+ { register_operand, scratch_operand, },
+ { register_operand, scratch_operand, },
+ { register_operand, scratch_operand, },
+ { register_operand, scratch_operand, },
+ { nonimmediate_operand, general_operand, },
+ { nonimmediate_operand, general_operand, },
+ { nonimmediate_operand, general_operand, },
+ { nonimmediate_operand, general_operand, },
+ { nonimmediate_operand, general_operand, },
+ { nonimmediate_operand, general_operand, },
+ { nonimmediate_operand, nonimmediate_operand, VOIDmode_compare_op, scratch_operand, },
+ { register_operand, nonimmediate_operand, VOIDmode_compare_op, scratch_operand, },
+ { nonimmediate_operand, register_operand, VOIDmode_compare_op, scratch_operand, },
+ { register_operand, nonimmediate_operand, VOIDmode_compare_op, scratch_operand, },
+ { register_operand, nonimmediate_operand, VOIDmode_compare_op, scratch_operand, },
+ { register_operand, register_operand, scratch_operand, },
+ { nonimmediate_operand, nonimmediate_operand, VOIDmode_compare_op, scratch_operand, },
+ { register_operand, nonimmediate_operand, VOIDmode_compare_op, scratch_operand, },
+ { nonimmediate_operand, register_operand, VOIDmode_compare_op, scratch_operand, },
+ { register_operand, nonimmediate_operand, VOIDmode_compare_op, scratch_operand, },
+ { nonimmediate_operand, register_operand, VOIDmode_compare_op, scratch_operand, },
+ { register_operand, register_operand, scratch_operand, },
+ { nonimmediate_operand, nonimmediate_operand, VOIDmode_compare_op, scratch_operand, },
+ { register_operand, nonimmediate_operand, VOIDmode_compare_op, scratch_operand, },
+ { nonimmediate_operand, register_operand, VOIDmode_compare_op, scratch_operand, },
+ { register_operand, register_operand, scratch_operand, },
+ { register_operand, nonimmediate_operand, },
+ { register_operand, nonimmediate_operand, },
+ { register_operand, nonimmediate_operand, },
+ { register_operand, register_operand, scratch_operand, },
+ { register_operand, register_operand, scratch_operand, },
+ { register_operand, register_operand, scratch_operand, },
+ { register_operand, register_operand, scratch_operand, },
+ { register_operand, register_operand, scratch_operand, },
+ { register_operand, register_operand, scratch_operand, },
+ { general_operand, general_operand, },
+ { general_operand, general_operand, },
+ { general_operand, general_operand, },
+ { push_operand, general_operand, },
+ { push_operand, general_operand, },
+ { general_operand, general_operand, },
+ { general_operand, general_operand, },
+ { push_operand, general_operand, },
+ { general_operand, general_operand, },
+ { general_operand, general_operand, },
+ { push_operand, general_operand, },
+ { general_operand, general_operand, },
+ { general_operand, general_operand, },
+ { push_operand, general_operand, },
+ { general_operand, general_operand, },
+ { push_operand, general_operand, },
+ { register_operand, register_operand, },
+ { general_operand, general_operand, },
+ { push_operand, general_operand, },
+ { register_operand, register_operand, },
+ { general_operand, general_operand, },
+ { push_operand, general_operand, },
+ { general_operand, general_operand, },
+ { general_operand, nonimmediate_operand, },
+ { general_operand, nonimmediate_operand, },
+ { general_operand, nonimmediate_operand, },
+ { register_operand, register_operand, },
+ { register_operand, register_operand, },
+ { general_operand, nonimmediate_operand, },
+ { general_operand, nonimmediate_operand, },
+ { general_operand, nonimmediate_operand, },
+ { general_operand, general_operand, },
+ { general_operand, general_operand, },
+ { general_operand, general_operand, },
+ { nonimmediate_operand, register_operand, },
+ { nonimmediate_operand, register_operand, memory_operand, },
+ { general_operand, register_operand, },
+ { general_operand, register_operand, },
+ { general_operand, register_operand, 0, 0, 0, 0, 0, scratch_operand, },
+ { general_operand, register_operand, 0, 0, 0, 0, 0, scratch_operand, },
+ { general_operand, register_operand, 0, 0, 0, 0, 0, scratch_operand, },
+ { general_operand, register_operand, 0, 0, 0, scratch_operand, },
+ { general_operand, register_operand, 0, 0, 0, scratch_operand, },
+ { general_operand, register_operand, 0, 0, 0, scratch_operand, },
+ { general_operand, register_operand, memory_operand, memory_operand, scratch_operand, },
+ { general_operand, register_operand, memory_operand, memory_operand, scratch_operand, },
+ { general_operand, register_operand, memory_operand, memory_operand, scratch_operand, },
+ { general_operand, register_operand, 0, 0, scratch_operand, },
+ { general_operand, register_operand, 0, 0, scratch_operand, },
+ { general_operand, register_operand, 0, 0, scratch_operand, },
+ { general_operand, register_operand, memory_operand, memory_operand, scratch_operand, },
+ { general_operand, register_operand, memory_operand, memory_operand, scratch_operand, },
+ { general_operand, register_operand, memory_operand, memory_operand, scratch_operand, },
+ { register_operand, nonimmediate_operand, },
+ { register_operand, nonimmediate_operand, },
+ { register_operand, nonimmediate_operand, },
+ { register_operand, nonimmediate_operand, },
+ { register_operand, nonimmediate_operand, },
+ { register_operand, nonimmediate_operand, },
+ { register_operand, general_operand, },
+ { register_operand, nonimmediate_operand, },
+ { register_operand, nonimmediate_operand, },
+ { register_operand, nonimmediate_operand, },
+ { register_operand, general_operand, },
+ { register_operand, nonimmediate_operand, },
+ { general_operand, general_operand, general_operand, },
+ { general_operand, general_operand, general_operand, },
+ { general_operand, general_operand, general_operand, },
+ { general_operand, general_operand, general_operand, },
+ { register_operand, address_operand, },
+ { register_operand, nonimmediate_operand, nonimmediate_operand, },
+ { register_operand, nonimmediate_operand, nonimmediate_operand, },
+ { register_operand, nonimmediate_operand, nonimmediate_operand, },
+ { general_operand, general_operand, general_operand, },
+ { general_operand, general_operand, general_operand, },
+ { general_operand, general_operand, general_operand, },
+ { general_operand, general_operand, general_operand, },
+ { register_operand, nonimmediate_operand, nonimmediate_operand, },
+ { register_operand, nonimmediate_operand, nonimmediate_operand, },
+ { register_operand, nonimmediate_operand, nonimmediate_operand, },
+ { general_operand, general_operand, general_operand, },
+ { general_operand, general_operand, general_operand, },
+ { general_operand, general_operand, general_operand, },
+ { general_operand, general_operand, general_operand, },
+ { general_operand, nonimmediate_operand, nonimmediate_operand, },
+ { general_operand, nonimmediate_operand, nonimmediate_operand, },
+ { register_operand, register_operand, nonimmediate_operand, },
+ { register_operand, register_operand, nonimmediate_operand, },
+ { register_operand, nonimmediate_operand, nonimmediate_operand, },
+ { register_operand, nonimmediate_operand, nonimmediate_operand, },
+ { register_operand, nonimmediate_operand, nonimmediate_operand, },
+ { general_operand, general_operand, general_operand, },
+ { general_operand, general_operand, general_operand, },
+ { register_operand, nonimmediate_operand, nonimmediate_operand, },
+ { register_operand, nonimmediate_operand, nonimmediate_operand, },
+ { register_operand, nonimmediate_operand, nonimmediate_operand, },
+ { register_operand, register_operand, general_operand, register_operand, },
+ { register_operand, register_operand, general_operand, register_operand, },
+ { register_operand, register_operand, general_operand, register_operand, },
+ { register_operand, register_operand, general_operand, register_operand, },
+ { general_operand, general_operand, general_operand, },
+ { general_operand, general_operand, general_operand, },
+ { general_operand, general_operand, general_operand, },
+ { general_operand, general_operand, general_operand, },
+ { general_operand, general_operand, general_operand, },
+ { general_operand, general_operand, general_operand, },
+ { general_operand, general_operand, general_operand, },
+ { general_operand, general_operand, general_operand, },
+ { general_operand, general_operand, general_operand, },
+ { general_operand, general_operand, },
+ { general_operand, general_operand, },
+ { general_operand, general_operand, },
+ { general_operand, general_operand, },
+ { register_operand, general_operand, },
+ { register_operand, general_operand, },
+ { register_operand, general_operand, },
+ { register_operand, general_operand, },
+ { register_operand, general_operand, },
+ { register_operand, general_operand, },
+ { register_operand, general_operand, },
+ { register_operand, general_operand, },
+ { register_operand, general_operand, },
+ { register_operand, general_operand, },
+ { register_operand, general_operand, },
+ { register_operand, general_operand, },
+ { register_operand, general_operand, },
+ { register_operand, general_operand, },
+ { register_operand, general_operand, },
+ { register_operand, general_operand, },
+ { register_operand, register_operand, },
+ { register_operand, register_operand, },
+ { register_operand, register_operand, },
+ { register_operand, register_operand, },
+ { register_operand, register_operand, },
+ { register_operand, register_operand, },
+ { general_operand, general_operand, },
+ { general_operand, general_operand, },
+ { general_operand, general_operand, },
+ { register_operand, register_operand, nonmemory_operand, },
+ { register_operand, register_operand, const_int_operand, },
+ { register_operand, register_operand, register_operand, },
+ { general_operand, general_operand, nonmemory_operand, },
+ { general_operand, general_operand, nonmemory_operand, },
+ { general_operand, general_operand, nonmemory_operand, },
+ { register_operand, register_operand, nonmemory_operand, },
+ { register_operand, register_operand, const_int_operand, },
+ { register_operand, register_operand, register_operand, },
+ { general_operand, general_operand, nonmemory_operand, },
+ { general_operand, general_operand, nonmemory_operand, },
+ { general_operand, general_operand, nonmemory_operand, },
+ { register_operand, register_operand, nonmemory_operand, },
+ { register_operand, register_operand, const_int_operand, },
+ { register_operand, register_operand, register_operand, },
+ { general_operand, general_operand, nonmemory_operand, },
+ { general_operand, general_operand, nonmemory_operand, },
+ { general_operand, general_operand, nonmemory_operand, },
+ { general_operand, general_operand, nonmemory_operand, },
+ { general_operand, general_operand, nonmemory_operand, },
+ { general_operand, general_operand, nonmemory_operand, },
+ { general_operand, general_operand, nonmemory_operand, },
+ { general_operand, general_operand, nonmemory_operand, },
+ { general_operand, general_operand, nonmemory_operand, },
+ { general_operand, 0, general_operand, const_int_operand, },
+ { general_operand, general_operand, general_operand, },
+ { general_operand, general_operand, general_operand, },
+ { register_operand, general_operand, },
+ { register_operand, const_int_operand, const_int_operand, },
+ { general_operand, const_int_operand, const_int_operand, },
+ { register_operand, },
+ { register_operand, },
+ { register_operand, },
+ { register_operand, },
+ { register_operand, },
+ { register_operand, },
+ { register_operand, },
+ { register_operand, },
+ { register_operand, },
+ { register_operand, },
+ { register_operand, },
+ { register_operand, },
+ { register_operand, },
+ { register_operand, },
+ { register_operand, },
+ { register_operand, },
+ { register_operand, },
+ { register_operand, },
+ { register_operand, },
+ { register_operand, },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { general_operand, },
+ { general_operand, general_operand, general_operand, 0, 0, 0, scratch_operand, },
+ { register_operand, 0, scratch_operand, },
+ { general_operand, },
+ { indirect_operand, general_operand, 0, immediate_operand, },
+ { call_insn_operand, general_operand, 0, immediate_operand, },
+ { symbolic_operand, general_operand, 0, immediate_operand, },
+ { indirect_operand, general_operand, },
+ { call_insn_operand, general_operand, },
+ { symbolic_operand, general_operand, },
+ { 0, indirect_operand, general_operand, 0, immediate_operand, },
+ { 0, call_insn_operand, general_operand, 0, immediate_operand, },
+ { 0, symbolic_operand, general_operand, 0, immediate_operand, },
+ { 0, indirect_operand, general_operand, },
+ { 0, call_insn_operand, general_operand, },
+ { 0, symbolic_operand, general_operand, },
+ { indirect_operand, memory_operand, 0, },
+ { call_insn_operand, memory_operand, 0, },
+ { symbolic_operand, memory_operand, 0, },
+ { memory_operand, 0, },
+ { memory_operand, },
+ { 0 },
+ { 0 },
+ { memory_operand, memory_operand, const_int_operand, const_int_operand, scratch_operand, },
+ { address_operand, address_operand, const_int_operand, immediate_operand, scratch_operand, },
+ { general_operand, general_operand, general_operand, general_operand, immediate_operand, },
+ { general_operand, address_operand, address_operand, register_operand, immediate_operand, },
+ { address_operand, address_operand, register_operand, immediate_operand, },
+ { general_operand, general_operand, },
+ { general_operand, general_operand, },
+ { general_operand, general_operand, },
+ { general_operand, general_operand, },
+ { register_operand, nonimmediate_operand, nonimmediate_operand, binary_387_op, },
+ { register_operand, general_operand, general_operand, binary_387_op, },
+ { register_operand, nonimmediate_operand, nonimmediate_operand, binary_387_op, },
+ { register_operand, general_operand, general_operand, binary_387_op, },
+ { register_operand, general_operand, general_operand, binary_387_op, },
+ { register_operand, general_operand, general_operand, binary_387_op, },
+ { register_operand, general_operand, general_operand, binary_387_op, },
+ { register_operand, general_operand, general_operand, binary_387_op, },
+ { register_operand, general_operand, general_operand, binary_387_op, },
+ { register_operand, general_operand, general_operand, binary_387_op, },
+ { register_operand, nonimmediate_operand, nonimmediate_operand, binary_387_op, },
+ { register_operand, general_operand, general_operand, binary_387_op, },
+ { register_operand, general_operand, general_operand, binary_387_op, },
+ { register_operand, general_operand, register_operand, immediate_operand, },
+ { register_operand, address_operand, register_operand, immediate_operand, },
+ };
+
+const int insn_n_alternatives[] =
+ {
+ 1,
+ 0,
+ 1,
+ 0,
+ 1,
+ 0,
+ 1,
+ 0,
+ 1,
+ 0,
+ 1,
+ 0,
+ 2,
+ 0,
+ 2,
+ 0,
+ 2,
+ 0,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 2,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 2,
+ 1,
+ 1,
+ 1,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 0,
+ 2,
+ 1,
+ 2,
+ 2,
+ 1,
+ 3,
+ 2,
+ 2,
+ 4,
+ 2,
+ 1,
+ 4,
+ 2,
+ 1,
+ 4,
+ 1,
+ 2,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 2,
+ 4,
+ 4,
+ 0,
+ 2,
+ 2,
+ 2,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 1,
+ 1,
+ 1,
+ 0,
+ 0,
+ 0,
+ 1,
+ 1,
+ 1,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 1,
+ 1,
+ 1,
+ 1,
+ 2,
+ 1,
+ 2,
+ 3,
+ 2,
+ 2,
+ 1,
+ 0,
+ 0,
+ 0,
+ 2,
+ 2,
+ 2,
+ 2,
+ 0,
+ 0,
+ 0,
+ 1,
+ 2,
+ 1,
+ 2,
+ 1,
+ 1,
+ 1,
+ 1,
+ 0,
+ 0,
+ 0,
+ 1,
+ 1,
+ 0,
+ 0,
+ 0,
+ 1,
+ 1,
+ 1,
+ 1,
+ 4,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 0,
+ 1,
+ 1,
+ 2,
+ 1,
+ 1,
+ 0,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 0,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 0,
+ 1,
+ 0,
+ 1,
+ 0,
+ 1,
+ 0,
+ 1,
+ 0,
+ 1,
+ 0,
+ 1,
+ 0,
+ 1,
+ 0,
+ 1,
+ 0,
+ 1,
+ 0,
+ 1,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 1,
+ 0,
+ 1,
+ 1,
+ 0,
+ 1,
+ 1,
+ 0,
+ 1,
+ 1,
+ 0,
+ 1,
+ 1,
+ 0,
+ 1,
+ 1,
+ 0,
+ 1,
+ 1,
+ 0,
+ 1,
+ 0,
+ 0,
+ 0,
+ 1,
+ 0,
+ 1,
+ 1,
+ 0,
+ 1,
+ 0,
+ 1,
+ 2,
+ 1,
+ 2,
+ 1,
+ 2,
+ 1,
+ 2,
+ 2,
+ 1,
+ 2,
+ 2,
+ 1,
+ 1,
+ 0,
+ 1,
+ };
diff --git a/gnu/usr.bin/cc/cc_int/insn-peep.c b/gnu/usr.bin/cc/cc_int/insn-peep.c
new file mode 100644
index 0000000..37136c4
--- /dev/null
+++ b/gnu/usr.bin/cc/cc_int/insn-peep.c
@@ -0,0 +1,28 @@
+/* Generated automatically by the program `genpeep'
+from the machine description file `md'. */
+
+#include "config.h"
+#include "rtl.h"
+#include "regs.h"
+#include "output.h"
+#include "real.h"
+
+extern rtx peep_operand[];
+
+#define operands peep_operand
+
+rtx
+peephole (ins1)
+ rtx ins1;
+{
+ rtx insn, x, pat;
+ int i;
+
+ if (NEXT_INSN (ins1)
+ && GET_CODE (NEXT_INSN (ins1)) == BARRIER)
+ return 0;
+
+ return 0;
+}
+
+rtx peep_operand[2];
diff --git a/gnu/usr.bin/cc/cc_int/insn-recog.c b/gnu/usr.bin/cc/cc_int/insn-recog.c
new file mode 100644
index 0000000..293dd7e
--- /dev/null
+++ b/gnu/usr.bin/cc/cc_int/insn-recog.c
@@ -0,0 +1,7138 @@
+/* Generated automatically by the program `genrecog'
+from the machine description file `md'. */
+
+#include "config.h"
+#include "rtl.h"
+#include "insn-config.h"
+#include "recog.h"
+#include "real.h"
+#include "output.h"
+#include "flags.h"
+
+
+/* `recog' contains a decision tree
+ that recognizes whether the rtx X0 is a valid instruction.
+
+ recog returns -1 if the rtx is not valid.
+ If the rtx is valid, recog returns a nonnegative number
+ which is the insn code number for the pattern that matched.
+ This is the same as the order in the machine description of
+ the entry that matched. This number can be used as an index into
+ entry that matched. This number can be used as an index into various
+ insn_* tables, such as insn_templates, insn_outfun, and insn_n_operands
+ (found in insn-output.c).
+
+ The third argument to recog is an optional pointer to an int.
+ If present, recog will accept a pattern if it matches except for
+ missing CLOBBER expressions at the end. In that case, the value
+ pointed to by the optional pointer will be set to the number of
+ CLOBBERs that need to be added (it should be initialized to zero by
+ the caller). If it is set nonzero, the caller should allocate a
+ PARALLEL of the appropriate size, copy the initial entries, and call
+ add_clobbers (found in insn-emit.c) to fill in the CLOBBERs.*/
+
+rtx recog_operand[MAX_RECOG_OPERANDS];
+
+rtx *recog_operand_loc[MAX_RECOG_OPERANDS];
+
+rtx *recog_dup_loc[MAX_DUP_OPERANDS];
+
+char recog_dup_num[MAX_DUP_OPERANDS];
+
+#define operands recog_operand
+
+int
+recog_1 (x0, insn, pnum_clobbers)
+ register rtx x0;
+ rtx insn;
+ int *pnum_clobbers;
+{
+ register rtx *ro = &recog_operand[0];
+ register rtx x1, x2, x3, x4, x5, x6;
+ int tem;
+
+ x1 = XEXP (x0, 1);
+ switch (GET_MODE (x1))
+ {
+ case HImode:
+ switch (GET_CODE (x1))
+ {
+ case ZERO_EXTEND:
+ goto L370;
+ case SIGN_EXTEND:
+ goto L390;
+ case PLUS:
+ goto L603;
+ case MINUS:
+ goto L626;
+ case MULT:
+ goto L660;
+ case AND:
+ goto L747;
+ case IOR:
+ goto L762;
+ case XOR:
+ goto L777;
+ case NEG:
+ goto L795;
+ case NOT:
+ goto L904;
+ case ASHIFT:
+ goto L930;
+ case ASHIFTRT:
+ goto L958;
+ case LSHIFTRT:
+ goto L986;
+ case ROTATE:
+ goto L1001;
+ case ROTATERT:
+ goto L1016;
+ }
+ }
+ if (general_operand (x1, HImode))
+ {
+ ro[1] = x1;
+ return 51;
+ }
+ goto ret0;
+
+ L370:
+ x2 = XEXP (x1, 0);
+ if (nonimmediate_operand (x2, QImode))
+ {
+ ro[1] = x2;
+ return 67;
+ }
+ goto ret0;
+
+ L390:
+ x2 = XEXP (x1, 0);
+ if (nonimmediate_operand (x2, QImode))
+ {
+ ro[1] = x2;
+ return 72;
+ }
+ goto ret0;
+
+ L603:
+ x2 = XEXP (x1, 0);
+ if (general_operand (x2, HImode))
+ {
+ ro[1] = x2;
+ goto L604;
+ }
+ goto ret0;
+
+ L604:
+ x2 = XEXP (x1, 1);
+ if (general_operand (x2, HImode))
+ {
+ ro[2] = x2;
+ return 110;
+ }
+ goto ret0;
+
+ L626:
+ x2 = XEXP (x1, 0);
+ if (general_operand (x2, HImode))
+ {
+ ro[1] = x2;
+ goto L627;
+ }
+ goto ret0;
+
+ L627:
+ x2 = XEXP (x1, 1);
+ if (general_operand (x2, HImode))
+ {
+ ro[2] = x2;
+ return 118;
+ }
+ goto ret0;
+
+ L660:
+ x2 = XEXP (x1, 0);
+ switch (GET_MODE (x2))
+ {
+ case HImode:
+ switch (GET_CODE (x2))
+ {
+ case ZERO_EXTEND:
+ goto L661;
+ case SIGN_EXTEND:
+ goto L668;
+ }
+ }
+ if (general_operand (x2, HImode))
+ {
+ ro[1] = x2;
+ goto L637;
+ }
+ goto ret0;
+
+ L661:
+ x3 = XEXP (x2, 0);
+ if (nonimmediate_operand (x3, QImode))
+ {
+ ro[1] = x3;
+ goto L662;
+ }
+ goto ret0;
+
+ L662:
+ x2 = XEXP (x1, 1);
+ if (GET_MODE (x2) == HImode && GET_CODE (x2) == ZERO_EXTEND && 1)
+ goto L663;
+ goto ret0;
+
+ L663:
+ x3 = XEXP (x2, 0);
+ if (nonimmediate_operand (x3, QImode))
+ {
+ ro[2] = x3;
+ return 127;
+ }
+ goto ret0;
+
+ L668:
+ x3 = XEXP (x2, 0);
+ if (nonimmediate_operand (x3, QImode))
+ {
+ ro[1] = x3;
+ goto L669;
+ }
+ goto ret0;
+
+ L669:
+ x2 = XEXP (x1, 1);
+ if (GET_MODE (x2) == HImode && GET_CODE (x2) == SIGN_EXTEND && 1)
+ goto L670;
+ goto ret0;
+
+ L670:
+ x3 = XEXP (x2, 0);
+ if (nonimmediate_operand (x3, QImode))
+ {
+ ro[2] = x3;
+ return 128;
+ }
+ goto ret0;
+
+ L637:
+ x2 = XEXP (x1, 1);
+ if (general_operand (x2, HImode))
+ goto L643;
+ goto ret0;
+
+ L643:
+ ro[2] = x2;
+ if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) == 0x80)
+ return 123;
+ L644:
+ ro[2] = x2;
+ return 124;
+
+ L747:
+ x2 = XEXP (x1, 0);
+ if (general_operand (x2, HImode))
+ {
+ ro[1] = x2;
+ goto L748;
+ }
+ goto ret0;
+
+ L748:
+ x2 = XEXP (x1, 1);
+ if (general_operand (x2, HImode))
+ {
+ ro[2] = x2;
+ return 144;
+ }
+ goto ret0;
+
+ L762:
+ x2 = XEXP (x1, 0);
+ if (general_operand (x2, HImode))
+ {
+ ro[1] = x2;
+ goto L763;
+ }
+ goto ret0;
+
+ L763:
+ x2 = XEXP (x1, 1);
+ if (general_operand (x2, HImode))
+ {
+ ro[2] = x2;
+ return 147;
+ }
+ goto ret0;
+
+ L777:
+ x2 = XEXP (x1, 0);
+ if (general_operand (x2, HImode))
+ {
+ ro[1] = x2;
+ goto L778;
+ }
+ goto ret0;
+
+ L778:
+ x2 = XEXP (x1, 1);
+ if (general_operand (x2, HImode))
+ {
+ ro[2] = x2;
+ return 150;
+ }
+ goto ret0;
+
+ L795:
+ x2 = XEXP (x1, 0);
+ if (general_operand (x2, HImode))
+ {
+ ro[1] = x2;
+ return 154;
+ }
+ goto ret0;
+
+ L904:
+ x2 = XEXP (x1, 0);
+ if (general_operand (x2, HImode))
+ {
+ ro[1] = x2;
+ return 179;
+ }
+ goto ret0;
+
+ L930:
+ x2 = XEXP (x1, 0);
+ if (general_operand (x2, HImode))
+ {
+ ro[1] = x2;
+ goto L931;
+ }
+ goto ret0;
+
+ L931:
+ x2 = XEXP (x1, 1);
+ if (nonmemory_operand (x2, HImode))
+ {
+ ro[2] = x2;
+ return 185;
+ }
+ goto ret0;
+
+ L958:
+ x2 = XEXP (x1, 0);
+ if (general_operand (x2, HImode))
+ {
+ ro[1] = x2;
+ goto L959;
+ }
+ goto ret0;
+
+ L959:
+ x2 = XEXP (x1, 1);
+ if (nonmemory_operand (x2, HImode))
+ {
+ ro[2] = x2;
+ return 191;
+ }
+ goto ret0;
+
+ L986:
+ x2 = XEXP (x1, 0);
+ if (general_operand (x2, HImode))
+ {
+ ro[1] = x2;
+ goto L987;
+ }
+ goto ret0;
+
+ L987:
+ x2 = XEXP (x1, 1);
+ if (nonmemory_operand (x2, HImode))
+ {
+ ro[2] = x2;
+ return 197;
+ }
+ goto ret0;
+
+ L1001:
+ x2 = XEXP (x1, 0);
+ if (general_operand (x2, HImode))
+ {
+ ro[1] = x2;
+ goto L1002;
+ }
+ goto ret0;
+
+ L1002:
+ x2 = XEXP (x1, 1);
+ if (nonmemory_operand (x2, HImode))
+ {
+ ro[2] = x2;
+ return 200;
+ }
+ goto ret0;
+
+ L1016:
+ x2 = XEXP (x1, 0);
+ if (general_operand (x2, HImode))
+ {
+ ro[1] = x2;
+ goto L1017;
+ }
+ goto ret0;
+
+ L1017:
+ x2 = XEXP (x1, 1);
+ if (nonmemory_operand (x2, HImode))
+ {
+ ro[2] = x2;
+ return 203;
+ }
+ goto ret0;
+ ret0: return -1;
+}
+
+int
+recog_2 (x0, insn, pnum_clobbers)
+ register rtx x0;
+ rtx insn;
+ int *pnum_clobbers;
+{
+ register rtx *ro = &recog_operand[0];
+ register rtx x1, x2, x3, x4, x5, x6;
+ int tem;
+
+ x1 = XEXP (x0, 1);
+ switch (GET_MODE (x1))
+ {
+ case SImode:
+ if (nonimmediate_operand (x1, SImode))
+ {
+ ro[0] = x1;
+ return 0;
+ }
+ break;
+ case HImode:
+ if (nonimmediate_operand (x1, HImode))
+ {
+ ro[0] = x1;
+ return 2;
+ }
+ break;
+ case QImode:
+ if (nonimmediate_operand (x1, QImode))
+ {
+ ro[0] = x1;
+ return 4;
+ }
+ break;
+ case SFmode:
+ if (pnum_clobbers != 0 && register_operand (x1, SFmode))
+ {
+ ro[0] = x1;
+ if (TARGET_80387 && ! TARGET_IEEE_FP)
+ {
+ *pnum_clobbers = 1;
+ return 6;
+ }
+ }
+ break;
+ case DFmode:
+ if (pnum_clobbers != 0 && register_operand (x1, DFmode))
+ {
+ ro[0] = x1;
+ if (TARGET_80387 && ! TARGET_IEEE_FP)
+ {
+ *pnum_clobbers = 1;
+ return 8;
+ }
+ }
+ break;
+ case XFmode:
+ if (pnum_clobbers != 0 && register_operand (x1, XFmode))
+ {
+ ro[0] = x1;
+ if (TARGET_80387 && ! TARGET_IEEE_FP)
+ {
+ *pnum_clobbers = 1;
+ return 10;
+ }
+ }
+ }
+ switch (GET_CODE (x1))
+ {
+ case COMPARE:
+ goto L39;
+ case ZERO_EXTRACT:
+ goto L1046;
+ }
+ L61:
+ if (VOIDmode_compare_op (x1, VOIDmode))
+ {
+ ro[2] = x1;
+ goto L91;
+ }
+ L134:
+ switch (GET_MODE (x1))
+ {
+ case CCFPEQmode:
+ switch (GET_CODE (x1))
+ {
+ case COMPARE:
+ goto L135;
+ }
+ break;
+ case SImode:
+ switch (GET_CODE (x1))
+ {
+ case AND:
+ goto L282;
+ }
+ break;
+ case HImode:
+ switch (GET_CODE (x1))
+ {
+ case AND:
+ goto L287;
+ }
+ break;
+ case QImode:
+ if (GET_CODE (x1) == AND && 1)
+ goto L292;
+ }
+ goto ret0;
+
+ L39:
+ x2 = XEXP (x1, 0);
+ switch (GET_MODE (x2))
+ {
+ case SImode:
+ if (nonimmediate_operand (x2, SImode))
+ {
+ ro[0] = x2;
+ goto L40;
+ }
+ break;
+ case HImode:
+ if (nonimmediate_operand (x2, HImode))
+ {
+ ro[0] = x2;
+ goto L45;
+ }
+ break;
+ case QImode:
+ if (nonimmediate_operand (x2, QImode))
+ {
+ ro[0] = x2;
+ goto L50;
+ }
+ }
+ goto L61;
+
+ L40:
+ x2 = XEXP (x1, 1);
+ if (general_operand (x2, SImode))
+ {
+ ro[1] = x2;
+ if (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)
+ return 12;
+ }
+ goto L61;
+
+ L45:
+ x2 = XEXP (x1, 1);
+ if (general_operand (x2, HImode))
+ {
+ ro[1] = x2;
+ if (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)
+ return 14;
+ }
+ goto L61;
+
+ L50:
+ x2 = XEXP (x1, 1);
+ if (general_operand (x2, QImode))
+ {
+ ro[1] = x2;
+ if (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)
+ return 16;
+ }
+ goto L61;
+
+ L1046:
+ x2 = XEXP (x1, 0);
+ switch (GET_MODE (x2))
+ {
+ case SImode:
+ if (register_operand (x2, SImode))
+ {
+ ro[0] = x2;
+ goto L1047;
+ }
+ break;
+ case QImode:
+ if (general_operand (x2, QImode))
+ {
+ ro[0] = x2;
+ goto L1059;
+ }
+ }
+ goto L61;
+
+ L1047:
+ x2 = XEXP (x1, 1);
+ if (GET_CODE (x2) != CONST_INT)
+ {
+ goto L61;
+ }
+ if (XWINT (x2, 0) == 1 && 1)
+ goto L1048;
+ L1053:
+ ro[1] = x2;
+ goto L1054;
+
+ L1048:
+ x2 = XEXP (x1, 2);
+ if (general_operand (x2, SImode))
+ {
+ ro[1] = x2;
+ if (GET_CODE (operands[1]) != CONST_INT)
+ return 208;
+ }
+ x2 = XEXP (x1, 1);
+ goto L1053;
+
+ L1054:
+ x2 = XEXP (x1, 2);
+ if (GET_CODE (x2) == CONST_INT && 1)
+ {
+ ro[2] = x2;
+ return 209;
+ }
+ goto L61;
+
+ L1059:
+ x2 = XEXP (x1, 1);
+ if (GET_CODE (x2) == CONST_INT && 1)
+ {
+ ro[1] = x2;
+ goto L1060;
+ }
+ goto L61;
+
+ L1060:
+ x2 = XEXP (x1, 2);
+ if (GET_CODE (x2) == CONST_INT && 1)
+ {
+ ro[2] = x2;
+ if (GET_CODE (operands[0]) != MEM || ! MEM_VOLATILE_P (operands[0]))
+ return 210;
+ }
+ goto L61;
+
+ L91:
+ x2 = XEXP (x1, 0);
+ switch (GET_MODE (x2))
+ {
+ case XFmode:
+ if (GET_CODE (x2) == FLOAT && 1)
+ goto L92;
+ if (nonimmediate_operand (x2, XFmode))
+ {
+ ro[0] = x2;
+ goto L63;
+ }
+ L76:
+ if (register_operand (x2, XFmode))
+ {
+ ro[0] = x2;
+ goto L77;
+ }
+ break;
+ case DFmode:
+ switch (GET_CODE (x2))
+ {
+ case FLOAT:
+ goto L178;
+ case FLOAT_EXTEND:
+ goto L208;
+ case SUBREG:
+ case REG:
+ case MEM:
+ if (nonimmediate_operand (x2, DFmode))
+ {
+ ro[0] = x2;
+ goto L149;
+ }
+ }
+ L162:
+ if (register_operand (x2, DFmode))
+ {
+ ro[0] = x2;
+ goto L163;
+ }
+ break;
+ case SFmode:
+ if (GET_CODE (x2) == FLOAT && 1)
+ goto L264;
+ if (nonimmediate_operand (x2, SFmode))
+ {
+ ro[0] = x2;
+ goto L235;
+ }
+ L248:
+ if (register_operand (x2, SFmode))
+ {
+ ro[0] = x2;
+ goto L249;
+ }
+ }
+ goto L134;
+
+ L92:
+ x3 = XEXP (x2, 0);
+ if (nonimmediate_operand (x3, SImode))
+ {
+ ro[0] = x3;
+ goto L93;
+ }
+ goto L134;
+
+ L93:
+ x2 = XEXP (x1, 1);
+ if (pnum_clobbers != 0 && register_operand (x2, XFmode))
+ {
+ ro[1] = x2;
+ if (TARGET_80387)
+ {
+ *pnum_clobbers = 1;
+ return 20;
+ }
+ }
+ goto L134;
+
+ L63:
+ x2 = XEXP (x1, 1);
+ if (pnum_clobbers != 0 && nonimmediate_operand (x2, XFmode))
+ {
+ ro[1] = x2;
+ if (TARGET_80387
+ && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM))
+ {
+ *pnum_clobbers = 1;
+ return 18;
+ }
+ }
+ x2 = XEXP (x1, 0);
+ goto L76;
+
+ L77:
+ x2 = XEXP (x1, 1);
+ if (GET_MODE (x2) != XFmode)
+ {
+ goto L134;
+ }
+ switch (GET_CODE (x2))
+ {
+ case FLOAT:
+ goto L78;
+ case FLOAT_EXTEND:
+ goto L108;
+ }
+ goto L134;
+
+ L78:
+ x3 = XEXP (x2, 0);
+ if (pnum_clobbers != 0 && nonimmediate_operand (x3, SImode))
+ {
+ ro[1] = x3;
+ if (TARGET_80387)
+ {
+ *pnum_clobbers = 1;
+ return 19;
+ }
+ }
+ goto L134;
+
+ L108:
+ x3 = XEXP (x2, 0);
+ switch (GET_MODE (x3))
+ {
+ case DFmode:
+ if (pnum_clobbers != 0 && nonimmediate_operand (x3, DFmode))
+ {
+ ro[1] = x3;
+ if (TARGET_80387)
+ {
+ *pnum_clobbers = 1;
+ return 21;
+ }
+ }
+ break;
+ case SFmode:
+ if (pnum_clobbers != 0 && nonimmediate_operand (x3, SFmode))
+ {
+ ro[1] = x3;
+ if (TARGET_80387)
+ {
+ *pnum_clobbers = 1;
+ return 22;
+ }
+ }
+ }
+ goto L134;
+
+ L178:
+ x3 = XEXP (x2, 0);
+ if (nonimmediate_operand (x3, SImode))
+ {
+ ro[0] = x3;
+ goto L179;
+ }
+ goto L134;
+
+ L179:
+ x2 = XEXP (x1, 1);
+ if (pnum_clobbers != 0 && register_operand (x2, DFmode))
+ {
+ ro[1] = x2;
+ if (TARGET_80387)
+ {
+ *pnum_clobbers = 1;
+ return 26;
+ }
+ }
+ goto L134;
+
+ L208:
+ x3 = XEXP (x2, 0);
+ if (nonimmediate_operand (x3, SFmode))
+ {
+ ro[0] = x3;
+ goto L209;
+ }
+ goto L134;
+
+ L209:
+ x2 = XEXP (x1, 1);
+ if (pnum_clobbers != 0 && register_operand (x2, DFmode))
+ {
+ ro[1] = x2;
+ if (TARGET_80387)
+ {
+ *pnum_clobbers = 1;
+ return 28;
+ }
+ }
+ goto L134;
+
+ L149:
+ x2 = XEXP (x1, 1);
+ if (pnum_clobbers != 0 && nonimmediate_operand (x2, DFmode))
+ {
+ ro[1] = x2;
+ if (TARGET_80387
+ && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM))
+ {
+ *pnum_clobbers = 1;
+ return 24;
+ }
+ }
+ x2 = XEXP (x1, 0);
+ goto L162;
+
+ L163:
+ x2 = XEXP (x1, 1);
+ if (GET_MODE (x2) != DFmode)
+ {
+ goto L134;
+ }
+ switch (GET_CODE (x2))
+ {
+ case FLOAT:
+ goto L164;
+ case FLOAT_EXTEND:
+ goto L194;
+ }
+ goto L134;
+
+ L164:
+ x3 = XEXP (x2, 0);
+ if (pnum_clobbers != 0 && nonimmediate_operand (x3, SImode))
+ {
+ ro[1] = x3;
+ if (TARGET_80387)
+ {
+ *pnum_clobbers = 1;
+ return 25;
+ }
+ }
+ goto L134;
+
+ L194:
+ x3 = XEXP (x2, 0);
+ if (pnum_clobbers != 0 && nonimmediate_operand (x3, SFmode))
+ {
+ ro[1] = x3;
+ if (TARGET_80387)
+ {
+ *pnum_clobbers = 1;
+ return 27;
+ }
+ }
+ goto L134;
+
+ L264:
+ x3 = XEXP (x2, 0);
+ if (nonimmediate_operand (x3, SImode))
+ {
+ ro[0] = x3;
+ goto L265;
+ }
+ goto L134;
+
+ L265:
+ x2 = XEXP (x1, 1);
+ if (pnum_clobbers != 0 && register_operand (x2, SFmode))
+ {
+ ro[1] = x2;
+ if (TARGET_80387)
+ {
+ *pnum_clobbers = 1;
+ return 32;
+ }
+ }
+ goto L134;
+
+ L235:
+ x2 = XEXP (x1, 1);
+ if (pnum_clobbers != 0 && nonimmediate_operand (x2, SFmode))
+ {
+ ro[1] = x2;
+ if (TARGET_80387
+ && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM))
+ {
+ *pnum_clobbers = 1;
+ return 30;
+ }
+ }
+ x2 = XEXP (x1, 0);
+ goto L248;
+
+ L249:
+ x2 = XEXP (x1, 1);
+ if (GET_MODE (x2) == SFmode && GET_CODE (x2) == FLOAT && 1)
+ goto L250;
+ goto L134;
+
+ L250:
+ x3 = XEXP (x2, 0);
+ if (pnum_clobbers != 0 && nonimmediate_operand (x3, SImode))
+ {
+ ro[1] = x3;
+ if (TARGET_80387)
+ {
+ *pnum_clobbers = 1;
+ return 31;
+ }
+ }
+ goto L134;
+
+ L135:
+ x2 = XEXP (x1, 0);
+ switch (GET_MODE (x2))
+ {
+ case XFmode:
+ if (register_operand (x2, XFmode))
+ {
+ ro[0] = x2;
+ goto L136;
+ }
+ break;
+ case DFmode:
+ if (register_operand (x2, DFmode))
+ {
+ ro[0] = x2;
+ goto L222;
+ }
+ break;
+ case SFmode:
+ if (register_operand (x2, SFmode))
+ {
+ ro[0] = x2;
+ goto L278;
+ }
+ }
+ goto ret0;
+
+ L136:
+ x2 = XEXP (x1, 1);
+ if (pnum_clobbers != 0 && register_operand (x2, XFmode))
+ {
+ ro[1] = x2;
+ if (TARGET_80387)
+ {
+ *pnum_clobbers = 1;
+ return 23;
+ }
+ }
+ goto ret0;
+
+ L222:
+ x2 = XEXP (x1, 1);
+ if (pnum_clobbers != 0 && register_operand (x2, DFmode))
+ {
+ ro[1] = x2;
+ if (TARGET_80387)
+ {
+ *pnum_clobbers = 1;
+ return 29;
+ }
+ }
+ goto ret0;
+
+ L278:
+ x2 = XEXP (x1, 1);
+ if (pnum_clobbers != 0 && register_operand (x2, SFmode))
+ {
+ ro[1] = x2;
+ if (TARGET_80387)
+ {
+ *pnum_clobbers = 1;
+ return 33;
+ }
+ }
+ goto ret0;
+
+ L282:
+ x2 = XEXP (x1, 0);
+ if (general_operand (x2, SImode))
+ {
+ ro[0] = x2;
+ goto L283;
+ }
+ goto ret0;
+
+ L283:
+ x2 = XEXP (x1, 1);
+ if (general_operand (x2, SImode))
+ {
+ ro[1] = x2;
+ return 43;
+ }
+ goto ret0;
+
+ L287:
+ x2 = XEXP (x1, 0);
+ if (general_operand (x2, HImode))
+ {
+ ro[0] = x2;
+ goto L288;
+ }
+ goto ret0;
+
+ L288:
+ x2 = XEXP (x1, 1);
+ if (general_operand (x2, HImode))
+ {
+ ro[1] = x2;
+ return 44;
+ }
+ goto ret0;
+
+ L292:
+ x2 = XEXP (x1, 0);
+ if (general_operand (x2, QImode))
+ {
+ ro[0] = x2;
+ goto L293;
+ }
+ goto ret0;
+
+ L293:
+ x2 = XEXP (x1, 1);
+ if (general_operand (x2, QImode))
+ {
+ ro[1] = x2;
+ return 45;
+ }
+ goto ret0;
+ ret0: return -1;
+}
+
+int
+recog_3 (x0, insn, pnum_clobbers)
+ register rtx x0;
+ rtx insn;
+ int *pnum_clobbers;
+{
+ register rtx *ro = &recog_operand[0];
+ register rtx x1, x2, x3, x4, x5, x6;
+ int tem;
+
+ x1 = XEXP (x0, 1);
+ x2 = XEXP (x1, 0);
+ switch (GET_CODE (x2))
+ {
+ case EQ:
+ goto L1115;
+ case NE:
+ goto L1124;
+ case GT:
+ goto L1133;
+ case GTU:
+ goto L1142;
+ case LT:
+ goto L1151;
+ case LTU:
+ goto L1160;
+ case GE:
+ goto L1169;
+ case GEU:
+ goto L1178;
+ case LE:
+ goto L1187;
+ case LEU:
+ goto L1196;
+ }
+ goto ret0;
+
+ L1115:
+ x3 = XEXP (x2, 0);
+ if (GET_CODE (x3) == CC0 && 1)
+ goto L1116;
+ goto ret0;
+
+ L1116:
+ x3 = XEXP (x2, 1);
+ if (GET_CODE (x3) == CONST_INT && XWINT (x3, 0) == 0 && 1)
+ goto L1117;
+ goto ret0;
+
+ L1117:
+ x2 = XEXP (x1, 1);
+ switch (GET_CODE (x2))
+ {
+ case LABEL_REF:
+ goto L1118;
+ case PC:
+ goto L1208;
+ }
+ goto ret0;
+
+ L1118:
+ x3 = XEXP (x2, 0);
+ ro[0] = x3;
+ goto L1119;
+
+ L1119:
+ x2 = XEXP (x1, 2);
+ if (GET_CODE (x2) == PC && 1)
+ return 232;
+ goto ret0;
+
+ L1208:
+ x2 = XEXP (x1, 2);
+ if (GET_CODE (x2) == LABEL_REF && 1)
+ goto L1209;
+ goto ret0;
+
+ L1209:
+ x3 = XEXP (x2, 0);
+ ro[0] = x3;
+ return 251;
+
+ L1124:
+ x3 = XEXP (x2, 0);
+ if (GET_CODE (x3) == CC0 && 1)
+ goto L1125;
+ goto ret0;
+
+ L1125:
+ x3 = XEXP (x2, 1);
+ if (GET_CODE (x3) == CONST_INT && XWINT (x3, 0) == 0 && 1)
+ goto L1126;
+ goto ret0;
+
+ L1126:
+ x2 = XEXP (x1, 1);
+ switch (GET_CODE (x2))
+ {
+ case LABEL_REF:
+ goto L1127;
+ case PC:
+ goto L1217;
+ }
+ goto ret0;
+
+ L1127:
+ x3 = XEXP (x2, 0);
+ ro[0] = x3;
+ goto L1128;
+
+ L1128:
+ x2 = XEXP (x1, 2);
+ if (GET_CODE (x2) == PC && 1)
+ return 234;
+ goto ret0;
+
+ L1217:
+ x2 = XEXP (x1, 2);
+ if (GET_CODE (x2) == LABEL_REF && 1)
+ goto L1218;
+ goto ret0;
+
+ L1218:
+ x3 = XEXP (x2, 0);
+ ro[0] = x3;
+ return 252;
+
+ L1133:
+ x3 = XEXP (x2, 0);
+ if (GET_CODE (x3) == CC0 && 1)
+ goto L1134;
+ goto ret0;
+
+ L1134:
+ x3 = XEXP (x2, 1);
+ if (GET_CODE (x3) == CONST_INT && XWINT (x3, 0) == 0 && 1)
+ goto L1135;
+ goto ret0;
+
+ L1135:
+ x2 = XEXP (x1, 1);
+ switch (GET_CODE (x2))
+ {
+ case LABEL_REF:
+ goto L1136;
+ case PC:
+ goto L1226;
+ }
+ goto ret0;
+
+ L1136:
+ x3 = XEXP (x2, 0);
+ ro[0] = x3;
+ goto L1137;
+
+ L1137:
+ x2 = XEXP (x1, 2);
+ if (GET_CODE (x2) == PC && 1)
+ return 236;
+ goto ret0;
+
+ L1226:
+ x2 = XEXP (x1, 2);
+ if (GET_CODE (x2) == LABEL_REF && 1)
+ goto L1227;
+ goto ret0;
+
+ L1227:
+ x3 = XEXP (x2, 0);
+ ro[0] = x3;
+ return 253;
+
+ L1142:
+ x3 = XEXP (x2, 0);
+ if (GET_CODE (x3) == CC0 && 1)
+ goto L1143;
+ goto ret0;
+
+ L1143:
+ x3 = XEXP (x2, 1);
+ if (GET_CODE (x3) == CONST_INT && XWINT (x3, 0) == 0 && 1)
+ goto L1144;
+ goto ret0;
+
+ L1144:
+ x2 = XEXP (x1, 1);
+ switch (GET_CODE (x2))
+ {
+ case LABEL_REF:
+ goto L1145;
+ case PC:
+ goto L1235;
+ }
+ goto ret0;
+
+ L1145:
+ x3 = XEXP (x2, 0);
+ ro[0] = x3;
+ goto L1146;
+
+ L1146:
+ x2 = XEXP (x1, 2);
+ if (GET_CODE (x2) == PC && 1)
+ return 238;
+ goto ret0;
+
+ L1235:
+ x2 = XEXP (x1, 2);
+ if (GET_CODE (x2) == LABEL_REF && 1)
+ goto L1236;
+ goto ret0;
+
+ L1236:
+ x3 = XEXP (x2, 0);
+ ro[0] = x3;
+ return 254;
+
+ L1151:
+ x3 = XEXP (x2, 0);
+ if (GET_CODE (x3) == CC0 && 1)
+ goto L1152;
+ goto ret0;
+
+ L1152:
+ x3 = XEXP (x2, 1);
+ if (GET_CODE (x3) == CONST_INT && XWINT (x3, 0) == 0 && 1)
+ goto L1153;
+ goto ret0;
+
+ L1153:
+ x2 = XEXP (x1, 1);
+ switch (GET_CODE (x2))
+ {
+ case LABEL_REF:
+ goto L1154;
+ case PC:
+ goto L1244;
+ }
+ goto ret0;
+
+ L1154:
+ x3 = XEXP (x2, 0);
+ ro[0] = x3;
+ goto L1155;
+
+ L1155:
+ x2 = XEXP (x1, 2);
+ if (GET_CODE (x2) == PC && 1)
+ return 240;
+ goto ret0;
+
+ L1244:
+ x2 = XEXP (x1, 2);
+ if (GET_CODE (x2) == LABEL_REF && 1)
+ goto L1245;
+ goto ret0;
+
+ L1245:
+ x3 = XEXP (x2, 0);
+ ro[0] = x3;
+ return 255;
+
+ L1160:
+ x3 = XEXP (x2, 0);
+ if (GET_CODE (x3) == CC0 && 1)
+ goto L1161;
+ goto ret0;
+
+ L1161:
+ x3 = XEXP (x2, 1);
+ if (GET_CODE (x3) == CONST_INT && XWINT (x3, 0) == 0 && 1)
+ goto L1162;
+ goto ret0;
+
+ L1162:
+ x2 = XEXP (x1, 1);
+ switch (GET_CODE (x2))
+ {
+ case LABEL_REF:
+ goto L1163;
+ case PC:
+ goto L1253;
+ }
+ goto ret0;
+
+ L1163:
+ x3 = XEXP (x2, 0);
+ ro[0] = x3;
+ goto L1164;
+
+ L1164:
+ x2 = XEXP (x1, 2);
+ if (GET_CODE (x2) == PC && 1)
+ return 242;
+ goto ret0;
+
+ L1253:
+ x2 = XEXP (x1, 2);
+ if (GET_CODE (x2) == LABEL_REF && 1)
+ goto L1254;
+ goto ret0;
+
+ L1254:
+ x3 = XEXP (x2, 0);
+ ro[0] = x3;
+ return 256;
+
+ L1169:
+ x3 = XEXP (x2, 0);
+ if (GET_CODE (x3) == CC0 && 1)
+ goto L1170;
+ goto ret0;
+
+ L1170:
+ x3 = XEXP (x2, 1);
+ if (GET_CODE (x3) == CONST_INT && XWINT (x3, 0) == 0 && 1)
+ goto L1171;
+ goto ret0;
+
+ L1171:
+ x2 = XEXP (x1, 1);
+ switch (GET_CODE (x2))
+ {
+ case LABEL_REF:
+ goto L1172;
+ case PC:
+ goto L1262;
+ }
+ goto ret0;
+
+ L1172:
+ x3 = XEXP (x2, 0);
+ ro[0] = x3;
+ goto L1173;
+
+ L1173:
+ x2 = XEXP (x1, 2);
+ if (GET_CODE (x2) == PC && 1)
+ return 244;
+ goto ret0;
+
+ L1262:
+ x2 = XEXP (x1, 2);
+ if (GET_CODE (x2) == LABEL_REF && 1)
+ goto L1263;
+ goto ret0;
+
+ L1263:
+ x3 = XEXP (x2, 0);
+ ro[0] = x3;
+ return 257;
+
+ L1178:
+ x3 = XEXP (x2, 0);
+ if (GET_CODE (x3) == CC0 && 1)
+ goto L1179;
+ goto ret0;
+
+ L1179:
+ x3 = XEXP (x2, 1);
+ if (GET_CODE (x3) == CONST_INT && XWINT (x3, 0) == 0 && 1)
+ goto L1180;
+ goto ret0;
+
+ L1180:
+ x2 = XEXP (x1, 1);
+ switch (GET_CODE (x2))
+ {
+ case LABEL_REF:
+ goto L1181;
+ case PC:
+ goto L1271;
+ }
+ goto ret0;
+
+ L1181:
+ x3 = XEXP (x2, 0);
+ ro[0] = x3;
+ goto L1182;
+
+ L1182:
+ x2 = XEXP (x1, 2);
+ if (GET_CODE (x2) == PC && 1)
+ return 246;
+ goto ret0;
+
+ L1271:
+ x2 = XEXP (x1, 2);
+ if (GET_CODE (x2) == LABEL_REF && 1)
+ goto L1272;
+ goto ret0;
+
+ L1272:
+ x3 = XEXP (x2, 0);
+ ro[0] = x3;
+ return 258;
+
+ L1187:
+ x3 = XEXP (x2, 0);
+ if (GET_CODE (x3) == CC0 && 1)
+ goto L1188;
+ goto ret0;
+
+ L1188:
+ x3 = XEXP (x2, 1);
+ if (GET_CODE (x3) == CONST_INT && XWINT (x3, 0) == 0 && 1)
+ goto L1189;
+ goto ret0;
+
+ L1189:
+ x2 = XEXP (x1, 1);
+ switch (GET_CODE (x2))
+ {
+ case LABEL_REF:
+ goto L1190;
+ case PC:
+ goto L1280;
+ }
+ goto ret0;
+
+ L1190:
+ x3 = XEXP (x2, 0);
+ ro[0] = x3;
+ goto L1191;
+
+ L1191:
+ x2 = XEXP (x1, 2);
+ if (GET_CODE (x2) == PC && 1)
+ return 248;
+ goto ret0;
+
+ L1280:
+ x2 = XEXP (x1, 2);
+ if (GET_CODE (x2) == LABEL_REF && 1)
+ goto L1281;
+ goto ret0;
+
+ L1281:
+ x3 = XEXP (x2, 0);
+ ro[0] = x3;
+ return 259;
+
+ L1196:
+ x3 = XEXP (x2, 0);
+ if (GET_CODE (x3) == CC0 && 1)
+ goto L1197;
+ goto ret0;
+
+ L1197:
+ x3 = XEXP (x2, 1);
+ if (GET_CODE (x3) == CONST_INT && XWINT (x3, 0) == 0 && 1)
+ goto L1198;
+ goto ret0;
+
+ L1198:
+ x2 = XEXP (x1, 1);
+ switch (GET_CODE (x2))
+ {
+ case LABEL_REF:
+ goto L1199;
+ case PC:
+ goto L1289;
+ }
+ goto ret0;
+
+ L1199:
+ x3 = XEXP (x2, 0);
+ ro[0] = x3;
+ goto L1200;
+
+ L1200:
+ x2 = XEXP (x1, 2);
+ if (GET_CODE (x2) == PC && 1)
+ return 250;
+ goto ret0;
+
+ L1289:
+ x2 = XEXP (x1, 2);
+ if (GET_CODE (x2) == LABEL_REF && 1)
+ goto L1290;
+ goto ret0;
+
+ L1290:
+ x3 = XEXP (x2, 0);
+ ro[0] = x3;
+ return 260;
+ ret0: return -1;
+}
+
+int
+recog_4 (x0, insn, pnum_clobbers)
+ register rtx x0;
+ rtx insn;
+ int *pnum_clobbers;
+{
+ register rtx *ro = &recog_operand[0];
+ register rtx x1, x2, x3, x4, x5, x6;
+ int tem;
+
+ x1 = XEXP (x0, 0);
+ switch (GET_MODE (x1))
+ {
+ case SImode:
+ switch (GET_CODE (x1))
+ {
+ case MEM:
+ if (push_operand (x1, SImode))
+ {
+ ro[0] = x1;
+ goto L296;
+ }
+ break;
+ case ZERO_EXTRACT:
+ goto L1025;
+ }
+ L303:
+ if (general_operand (x1, SImode))
+ {
+ ro[0] = x1;
+ goto L365;
+ }
+ L611:
+ if (register_operand (x1, SImode))
+ {
+ ro[0] = x1;
+ goto L612;
+ }
+ L619:
+ if (general_operand (x1, SImode))
+ {
+ ro[0] = x1;
+ goto L620;
+ }
+ break;
+ case HImode:
+ if (GET_CODE (x1) == MEM && push_operand (x1, HImode))
+ {
+ ro[0] = x1;
+ goto L307;
+ }
+ L309:
+ if (general_operand (x1, HImode))
+ {
+ ro[0] = x1;
+ goto L369;
+ }
+ break;
+ case QImode:
+ if (GET_CODE (x1) == MEM && push_operand (x1, QImode))
+ {
+ ro[0] = x1;
+ goto L317;
+ }
+ L319:
+ if (general_operand (x1, QImode))
+ {
+ ro[0] = x1;
+ goto L607;
+ }
+ L1062:
+ if (register_operand (x1, QImode))
+ {
+ ro[0] = x1;
+ goto L1063;
+ }
+ break;
+ case SFmode:
+ if (GET_CODE (x1) == MEM && push_operand (x1, SFmode))
+ {
+ ro[0] = x1;
+ goto L327;
+ }
+ L329:
+ if (general_operand (x1, SFmode))
+ {
+ ro[0] = x1;
+ goto L416;
+ }
+ L575:
+ if (register_operand (x1, SFmode))
+ {
+ ro[0] = x1;
+ goto L576;
+ }
+ break;
+ case DFmode:
+ if (GET_CODE (x1) == MEM && push_operand (x1, DFmode))
+ {
+ ro[0] = x1;
+ goto L333;
+ }
+ L342:
+ if (general_operand (x1, DFmode))
+ {
+ ro[0] = x1;
+ goto L397;
+ }
+ L571:
+ if (register_operand (x1, DFmode))
+ {
+ ro[0] = x1;
+ goto L572;
+ }
+ break;
+ case XFmode:
+ if (GET_CODE (x1) == MEM && push_operand (x1, XFmode))
+ {
+ ro[0] = x1;
+ goto L346;
+ }
+ L355:
+ if (general_operand (x1, XFmode))
+ {
+ ro[0] = x1;
+ goto L401;
+ }
+ L567:
+ if (register_operand (x1, XFmode))
+ {
+ ro[0] = x1;
+ goto L568;
+ }
+ break;
+ case DImode:
+ if (GET_CODE (x1) == MEM && push_operand (x1, DImode))
+ {
+ ro[0] = x1;
+ goto L359;
+ }
+ L361:
+ if (general_operand (x1, DImode))
+ {
+ ro[0] = x1;
+ goto L592;
+ }
+ L376:
+ if (register_operand (x1, DImode))
+ {
+ ro[0] = x1;
+ goto L377;
+ }
+ }
+ switch (GET_CODE (x1))
+ {
+ case CC0:
+ goto L2;
+ case STRICT_LOW_PART:
+ goto L313;
+ case PC:
+ goto L1314;
+ }
+ L1380:
+ ro[0] = x1;
+ goto L1381;
+ L1460:
+ switch (GET_MODE (x1))
+ {
+ case SImode:
+ if (general_operand (x1, SImode))
+ {
+ ro[0] = x1;
+ goto L1461;
+ }
+ break;
+ case HImode:
+ if (general_operand (x1, HImode))
+ {
+ ro[0] = x1;
+ goto L1467;
+ }
+ break;
+ case DFmode:
+ if (register_operand (x1, DFmode))
+ {
+ ro[0] = x1;
+ goto L1473;
+ }
+ break;
+ case XFmode:
+ if (register_operand (x1, XFmode))
+ {
+ ro[0] = x1;
+ goto L1484;
+ }
+ break;
+ case SFmode:
+ if (register_operand (x1, SFmode))
+ {
+ ro[0] = x1;
+ goto L1531;
+ }
+ }
+ goto ret0;
+
+ L296:
+ x1 = XEXP (x0, 1);
+ if (general_operand (x1, SImode))
+ goto L300;
+ x1 = XEXP (x0, 0);
+ goto L303;
+
+ L300:
+ ro[1] = x1;
+ if (! TARGET_486)
+ return 46;
+ L301:
+ ro[1] = x1;
+ if (TARGET_486)
+ return 47;
+ x1 = XEXP (x0, 0);
+ goto L303;
+
+ L1025:
+ x2 = XEXP (x1, 0);
+ if (GET_MODE (x2) == SImode && general_operand (x2, SImode))
+ {
+ ro[0] = x2;
+ goto L1026;
+ }
+ goto L1380;
+
+ L1026:
+ x2 = XEXP (x1, 1);
+ if (GET_CODE (x2) == CONST_INT && XWINT (x2, 0) == 1 && 1)
+ goto L1027;
+ goto L1380;
+
+ L1027:
+ x2 = XEXP (x1, 2);
+ if (general_operand (x2, SImode))
+ {
+ ro[2] = x2;
+ goto L1028;
+ }
+ goto L1380;
+
+ L1028:
+ x1 = XEXP (x0, 1);
+ if (GET_CODE (x1) == CONST_INT && 1)
+ {
+ ro[3] = x1;
+ if (! TARGET_486 && GET_CODE (operands[2]) != CONST_INT)
+ return 205;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L365:
+ x1 = XEXP (x0, 1);
+ switch (GET_MODE (x1))
+ {
+ case SImode:
+ switch (GET_CODE (x1))
+ {
+ case ZERO_EXTEND:
+ goto L366;
+ case SIGN_EXTEND:
+ goto L386;
+ case PLUS:
+ goto L598;
+ }
+ }
+ if (general_operand (x1, SImode))
+ {
+ ro[1] = x1;
+ return 49;
+ }
+ x1 = XEXP (x0, 0);
+ goto L611;
+
+ L366:
+ x2 = XEXP (x1, 0);
+ switch (GET_MODE (x2))
+ {
+ case HImode:
+ if (nonimmediate_operand (x2, HImode))
+ {
+ ro[1] = x2;
+ return 66;
+ }
+ break;
+ case QImode:
+ if (nonimmediate_operand (x2, QImode))
+ {
+ ro[1] = x2;
+ return 68;
+ }
+ }
+ x1 = XEXP (x0, 0);
+ goto L611;
+
+ L386:
+ x2 = XEXP (x1, 0);
+ switch (GET_MODE (x2))
+ {
+ case HImode:
+ if (nonimmediate_operand (x2, HImode))
+ {
+ ro[1] = x2;
+ return 71;
+ }
+ break;
+ case QImode:
+ if (nonimmediate_operand (x2, QImode))
+ {
+ ro[1] = x2;
+ return 73;
+ }
+ }
+ x1 = XEXP (x0, 0);
+ goto L611;
+
+ L598:
+ x2 = XEXP (x1, 0);
+ if (general_operand (x2, SImode))
+ {
+ ro[1] = x2;
+ goto L599;
+ }
+ x1 = XEXP (x0, 0);
+ goto L611;
+
+ L599:
+ x2 = XEXP (x1, 1);
+ if (general_operand (x2, SImode))
+ {
+ ro[2] = x2;
+ return 109;
+ }
+ x1 = XEXP (x0, 0);
+ goto L611;
+
+ L612:
+ x1 = XEXP (x0, 1);
+ if (address_operand (x1, QImode))
+ {
+ ro[1] = x1;
+ return 112;
+ }
+ x1 = XEXP (x0, 0);
+ goto L619;
+
+ L620:
+ x1 = XEXP (x0, 1);
+ if (GET_MODE (x1) != SImode)
+ {
+ x1 = XEXP (x0, 0);
+ goto L1380;
+ }
+ switch (GET_CODE (x1))
+ {
+ case MINUS:
+ goto L621;
+ case MULT:
+ goto L648;
+ case AND:
+ goto L742;
+ case IOR:
+ goto L757;
+ case XOR:
+ goto L1032;
+ case NEG:
+ goto L791;
+ case NOT:
+ goto L900;
+ case ASHIFT:
+ goto L925;
+ case ASHIFTRT:
+ goto L953;
+ case LSHIFTRT:
+ goto L981;
+ case ROTATE:
+ goto L996;
+ case ROTATERT:
+ goto L1011;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L621:
+ x2 = XEXP (x1, 0);
+ if (general_operand (x2, SImode))
+ {
+ ro[1] = x2;
+ goto L622;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L622:
+ x2 = XEXP (x1, 1);
+ if (general_operand (x2, SImode))
+ {
+ ro[2] = x2;
+ return 117;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L648:
+ x2 = XEXP (x1, 0);
+ if (general_operand (x2, SImode))
+ {
+ ro[1] = x2;
+ goto L649;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L649:
+ x2 = XEXP (x1, 1);
+ if (general_operand (x2, SImode))
+ goto L655;
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L655:
+ ro[2] = x2;
+ if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) == 0x80)
+ return 125;
+ L656:
+ ro[2] = x2;
+ return 126;
+
+ L742:
+ x2 = XEXP (x1, 0);
+ if (general_operand (x2, SImode))
+ {
+ ro[1] = x2;
+ goto L743;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L743:
+ x2 = XEXP (x1, 1);
+ if (general_operand (x2, SImode))
+ {
+ ro[2] = x2;
+ return 143;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L757:
+ x2 = XEXP (x1, 0);
+ if (general_operand (x2, SImode))
+ {
+ ro[1] = x2;
+ goto L758;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L758:
+ x2 = XEXP (x1, 1);
+ if (general_operand (x2, SImode))
+ {
+ ro[2] = x2;
+ return 146;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L1032:
+ x2 = XEXP (x1, 0);
+ if (GET_MODE (x2) == SImode && GET_CODE (x2) == ASHIFT && 1)
+ goto L1033;
+ if (general_operand (x2, SImode))
+ {
+ ro[1] = x2;
+ goto L1040;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L1033:
+ x3 = XEXP (x2, 0);
+ if (GET_CODE (x3) == CONST_INT && XWINT (x3, 0) == 1 && 1)
+ goto L1034;
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L1034:
+ x3 = XEXP (x2, 1);
+ if (general_operand (x3, SImode))
+ {
+ ro[1] = x3;
+ goto L1035;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L1035:
+ x2 = XEXP (x1, 1);
+ if (general_operand (x2, SImode))
+ {
+ ro[2] = x2;
+ if (! TARGET_486 && GET_CODE (operands[1]) != CONST_INT)
+ return 206;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L1040:
+ x2 = XEXP (x1, 1);
+ if (GET_MODE (x2) == SImode && GET_CODE (x2) == ASHIFT && 1)
+ goto L1041;
+ if (general_operand (x2, SImode))
+ {
+ ro[2] = x2;
+ return 149;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L1041:
+ x3 = XEXP (x2, 0);
+ if (GET_CODE (x3) == CONST_INT && XWINT (x3, 0) == 1 && 1)
+ goto L1042;
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L1042:
+ x3 = XEXP (x2, 1);
+ if (general_operand (x3, SImode))
+ {
+ ro[2] = x3;
+ if (! TARGET_486 && GET_CODE (operands[2]) != CONST_INT)
+ return 207;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L791:
+ x2 = XEXP (x1, 0);
+ if (general_operand (x2, SImode))
+ {
+ ro[1] = x2;
+ return 153;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L900:
+ x2 = XEXP (x1, 0);
+ if (general_operand (x2, SImode))
+ {
+ ro[1] = x2;
+ return 178;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L925:
+ x2 = XEXP (x1, 0);
+ if (general_operand (x2, SImode))
+ {
+ ro[1] = x2;
+ goto L926;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L926:
+ x2 = XEXP (x1, 1);
+ if (nonmemory_operand (x2, SImode))
+ {
+ ro[2] = x2;
+ return 184;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L953:
+ x2 = XEXP (x1, 0);
+ if (general_operand (x2, SImode))
+ {
+ ro[1] = x2;
+ goto L954;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L954:
+ x2 = XEXP (x1, 1);
+ if (nonmemory_operand (x2, SImode))
+ {
+ ro[2] = x2;
+ return 190;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L981:
+ x2 = XEXP (x1, 0);
+ if (general_operand (x2, SImode))
+ {
+ ro[1] = x2;
+ goto L982;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L982:
+ x2 = XEXP (x1, 1);
+ if (nonmemory_operand (x2, SImode))
+ {
+ ro[2] = x2;
+ return 196;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L996:
+ x2 = XEXP (x1, 0);
+ if (general_operand (x2, SImode))
+ {
+ ro[1] = x2;
+ goto L997;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L997:
+ x2 = XEXP (x1, 1);
+ if (nonmemory_operand (x2, SImode))
+ {
+ ro[2] = x2;
+ return 199;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L1011:
+ x2 = XEXP (x1, 0);
+ if (general_operand (x2, SImode))
+ {
+ ro[1] = x2;
+ goto L1012;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L1012:
+ x2 = XEXP (x1, 1);
+ if (nonmemory_operand (x2, SImode))
+ {
+ ro[2] = x2;
+ return 202;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L307:
+ x1 = XEXP (x0, 1);
+ if (general_operand (x1, HImode))
+ {
+ ro[1] = x1;
+ return 50;
+ }
+ x1 = XEXP (x0, 0);
+ goto L309;
+ L369:
+ tem = recog_1 (x0, insn, pnum_clobbers);
+ if (tem >= 0) return tem;
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L317:
+ x1 = XEXP (x0, 1);
+ if (general_operand (x1, QImode))
+ {
+ ro[1] = x1;
+ return 53;
+ }
+ x1 = XEXP (x0, 0);
+ goto L319;
+
+ L607:
+ x1 = XEXP (x0, 1);
+ switch (GET_MODE (x1))
+ {
+ case QImode:
+ switch (GET_CODE (x1))
+ {
+ case PLUS:
+ goto L608;
+ case MINUS:
+ goto L631;
+ case DIV:
+ goto L688;
+ case UDIV:
+ goto L693;
+ case AND:
+ goto L752;
+ case IOR:
+ goto L767;
+ case XOR:
+ goto L782;
+ case NEG:
+ goto L799;
+ case NOT:
+ goto L908;
+ case ASHIFT:
+ goto L935;
+ case ASHIFTRT:
+ goto L963;
+ case LSHIFTRT:
+ goto L991;
+ case ROTATE:
+ goto L1006;
+ case ROTATERT:
+ goto L1021;
+ }
+ }
+ if (general_operand (x1, QImode))
+ {
+ ro[1] = x1;
+ return 54;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1062;
+
+ L608:
+ x2 = XEXP (x1, 0);
+ if (general_operand (x2, QImode))
+ {
+ ro[1] = x2;
+ goto L609;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1062;
+
+ L609:
+ x2 = XEXP (x1, 1);
+ if (general_operand (x2, QImode))
+ {
+ ro[2] = x2;
+ return 111;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1062;
+
+ L631:
+ x2 = XEXP (x1, 0);
+ if (general_operand (x2, QImode))
+ {
+ ro[1] = x2;
+ goto L632;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1062;
+
+ L632:
+ x2 = XEXP (x1, 1);
+ if (general_operand (x2, QImode))
+ {
+ ro[2] = x2;
+ return 119;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1062;
+
+ L688:
+ x2 = XEXP (x1, 0);
+ if (general_operand (x2, HImode))
+ {
+ ro[1] = x2;
+ goto L689;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1062;
+
+ L689:
+ x2 = XEXP (x1, 1);
+ if (general_operand (x2, QImode))
+ {
+ ro[2] = x2;
+ return 134;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1062;
+
+ L693:
+ x2 = XEXP (x1, 0);
+ if (general_operand (x2, HImode))
+ {
+ ro[1] = x2;
+ goto L694;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1062;
+
+ L694:
+ x2 = XEXP (x1, 1);
+ if (general_operand (x2, QImode))
+ {
+ ro[2] = x2;
+ return 135;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1062;
+
+ L752:
+ x2 = XEXP (x1, 0);
+ if (general_operand (x2, QImode))
+ {
+ ro[1] = x2;
+ goto L753;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1062;
+
+ L753:
+ x2 = XEXP (x1, 1);
+ if (general_operand (x2, QImode))
+ {
+ ro[2] = x2;
+ return 145;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1062;
+
+ L767:
+ x2 = XEXP (x1, 0);
+ if (general_operand (x2, QImode))
+ {
+ ro[1] = x2;
+ goto L768;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1062;
+
+ L768:
+ x2 = XEXP (x1, 1);
+ if (general_operand (x2, QImode))
+ {
+ ro[2] = x2;
+ return 148;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1062;
+
+ L782:
+ x2 = XEXP (x1, 0);
+ if (general_operand (x2, QImode))
+ {
+ ro[1] = x2;
+ goto L783;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1062;
+
+ L783:
+ x2 = XEXP (x1, 1);
+ if (general_operand (x2, QImode))
+ {
+ ro[2] = x2;
+ return 151;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1062;
+
+ L799:
+ x2 = XEXP (x1, 0);
+ if (general_operand (x2, QImode))
+ {
+ ro[1] = x2;
+ return 155;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1062;
+
+ L908:
+ x2 = XEXP (x1, 0);
+ if (general_operand (x2, QImode))
+ {
+ ro[1] = x2;
+ return 180;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1062;
+
+ L935:
+ x2 = XEXP (x1, 0);
+ if (general_operand (x2, QImode))
+ {
+ ro[1] = x2;
+ goto L936;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1062;
+
+ L936:
+ x2 = XEXP (x1, 1);
+ if (nonmemory_operand (x2, QImode))
+ {
+ ro[2] = x2;
+ return 186;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1062;
+
+ L963:
+ x2 = XEXP (x1, 0);
+ if (general_operand (x2, QImode))
+ {
+ ro[1] = x2;
+ goto L964;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1062;
+
+ L964:
+ x2 = XEXP (x1, 1);
+ if (nonmemory_operand (x2, QImode))
+ {
+ ro[2] = x2;
+ return 192;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1062;
+
+ L991:
+ x2 = XEXP (x1, 0);
+ if (general_operand (x2, QImode))
+ {
+ ro[1] = x2;
+ goto L992;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1062;
+
+ L992:
+ x2 = XEXP (x1, 1);
+ if (nonmemory_operand (x2, QImode))
+ {
+ ro[2] = x2;
+ return 198;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1062;
+
+ L1006:
+ x2 = XEXP (x1, 0);
+ if (general_operand (x2, QImode))
+ {
+ ro[1] = x2;
+ goto L1007;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1062;
+
+ L1007:
+ x2 = XEXP (x1, 1);
+ if (nonmemory_operand (x2, QImode))
+ {
+ ro[2] = x2;
+ return 201;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1062;
+
+ L1021:
+ x2 = XEXP (x1, 0);
+ if (general_operand (x2, QImode))
+ {
+ ro[1] = x2;
+ goto L1022;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1062;
+
+ L1022:
+ x2 = XEXP (x1, 1);
+ if (nonmemory_operand (x2, QImode))
+ {
+ ro[2] = x2;
+ return 204;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1062;
+
+ L1063:
+ x1 = XEXP (x0, 1);
+ if (GET_MODE (x1) != QImode)
+ {
+ x1 = XEXP (x0, 0);
+ goto L1380;
+ }
+ switch (GET_CODE (x1))
+ {
+ case EQ:
+ goto L1064;
+ case NE:
+ goto L1069;
+ case GT:
+ goto L1074;
+ case GTU:
+ goto L1079;
+ case LT:
+ goto L1084;
+ case LTU:
+ goto L1089;
+ case GE:
+ goto L1094;
+ case GEU:
+ goto L1099;
+ case LE:
+ goto L1104;
+ case LEU:
+ goto L1109;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L1064:
+ x2 = XEXP (x1, 0);
+ if (GET_CODE (x2) == CC0 && 1)
+ goto L1065;
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L1065:
+ x2 = XEXP (x1, 1);
+ if (GET_CODE (x2) == CONST_INT && XWINT (x2, 0) == 0 && 1)
+ return 212;
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L1069:
+ x2 = XEXP (x1, 0);
+ if (GET_CODE (x2) == CC0 && 1)
+ goto L1070;
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L1070:
+ x2 = XEXP (x1, 1);
+ if (GET_CODE (x2) == CONST_INT && XWINT (x2, 0) == 0 && 1)
+ return 214;
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L1074:
+ x2 = XEXP (x1, 0);
+ if (GET_CODE (x2) == CC0 && 1)
+ goto L1075;
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L1075:
+ x2 = XEXP (x1, 1);
+ if (GET_CODE (x2) == CONST_INT && XWINT (x2, 0) == 0 && 1)
+ return 216;
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L1079:
+ x2 = XEXP (x1, 0);
+ if (GET_CODE (x2) == CC0 && 1)
+ goto L1080;
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L1080:
+ x2 = XEXP (x1, 1);
+ if (GET_CODE (x2) == CONST_INT && XWINT (x2, 0) == 0 && 1)
+ return 218;
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L1084:
+ x2 = XEXP (x1, 0);
+ if (GET_CODE (x2) == CC0 && 1)
+ goto L1085;
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L1085:
+ x2 = XEXP (x1, 1);
+ if (GET_CODE (x2) == CONST_INT && XWINT (x2, 0) == 0 && 1)
+ return 220;
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L1089:
+ x2 = XEXP (x1, 0);
+ if (GET_CODE (x2) == CC0 && 1)
+ goto L1090;
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L1090:
+ x2 = XEXP (x1, 1);
+ if (GET_CODE (x2) == CONST_INT && XWINT (x2, 0) == 0 && 1)
+ return 222;
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L1094:
+ x2 = XEXP (x1, 0);
+ if (GET_CODE (x2) == CC0 && 1)
+ goto L1095;
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L1095:
+ x2 = XEXP (x1, 1);
+ if (GET_CODE (x2) == CONST_INT && XWINT (x2, 0) == 0 && 1)
+ return 224;
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L1099:
+ x2 = XEXP (x1, 0);
+ if (GET_CODE (x2) == CC0 && 1)
+ goto L1100;
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L1100:
+ x2 = XEXP (x1, 1);
+ if (GET_CODE (x2) == CONST_INT && XWINT (x2, 0) == 0 && 1)
+ return 226;
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L1104:
+ x2 = XEXP (x1, 0);
+ if (GET_CODE (x2) == CC0 && 1)
+ goto L1105;
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L1105:
+ x2 = XEXP (x1, 1);
+ if (GET_CODE (x2) == CONST_INT && XWINT (x2, 0) == 0 && 1)
+ return 228;
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L1109:
+ x2 = XEXP (x1, 0);
+ if (GET_CODE (x2) == CC0 && 1)
+ goto L1110;
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L1110:
+ x2 = XEXP (x1, 1);
+ if (GET_CODE (x2) == CONST_INT && XWINT (x2, 0) == 0 && 1)
+ return 230;
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L327:
+ x1 = XEXP (x0, 1);
+ if (general_operand (x1, SFmode))
+ {
+ ro[1] = x1;
+ return 56;
+ }
+ x1 = XEXP (x0, 0);
+ goto L329;
+
+ L416:
+ x1 = XEXP (x0, 1);
+ if (GET_MODE (x1) == SFmode && GET_CODE (x1) == FLOAT_TRUNCATE && 1)
+ goto L417;
+ if (general_operand (x1, SFmode))
+ {
+ ro[1] = x1;
+ return 57;
+ }
+ x1 = XEXP (x0, 0);
+ goto L575;
+
+ L417:
+ x2 = XEXP (x1, 0);
+ if (register_operand (x2, XFmode))
+ {
+ ro[1] = x2;
+ if (TARGET_80387)
+ return 79;
+ }
+ x1 = XEXP (x0, 0);
+ goto L575;
+
+ L576:
+ x1 = XEXP (x0, 1);
+ if (GET_MODE (x1) != SFmode)
+ {
+ x1 = XEXP (x0, 0);
+ goto L1380;
+ }
+ switch (GET_CODE (x1))
+ {
+ case FLOAT:
+ goto L577;
+ case NEG:
+ goto L803;
+ case ABS:
+ goto L825;
+ case SQRT:
+ goto L847;
+ case UNSPEC:
+ if (XINT (x1, 1) == 1 && XVECLEN (x1, 0) == 1 && 1)
+ goto L878;
+ if (XINT (x1, 1) == 2 && XVECLEN (x1, 0) == 1 && 1)
+ goto L891;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L577:
+ x2 = XEXP (x1, 0);
+ switch (GET_MODE (x2))
+ {
+ case DImode:
+ if (nonimmediate_operand (x2, DImode))
+ {
+ ro[1] = x2;
+ if (TARGET_80387)
+ return 104;
+ }
+ break;
+ case SImode:
+ if (nonimmediate_operand (x2, SImode))
+ {
+ ro[1] = x2;
+ if (TARGET_80387)
+ return 107;
+ }
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L803:
+ x2 = XEXP (x1, 0);
+ if (general_operand (x2, SFmode))
+ {
+ ro[1] = x2;
+ if (TARGET_80387)
+ return 156;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L825:
+ x2 = XEXP (x1, 0);
+ if (general_operand (x2, SFmode))
+ {
+ ro[1] = x2;
+ if (TARGET_80387)
+ return 161;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L847:
+ x2 = XEXP (x1, 0);
+ if (general_operand (x2, SFmode))
+ {
+ ro[1] = x2;
+ if (! TARGET_NO_FANCY_MATH_387 && TARGET_80387
+ && (TARGET_IEEE_FP || flag_fast_math) )
+ return 166;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L878:
+ x2 = XVECEXP (x1, 0, 0);
+ if (register_operand (x2, SFmode))
+ {
+ ro[1] = x2;
+ if (! TARGET_NO_FANCY_MATH_387 && TARGET_80387
+ && (TARGET_IEEE_FP || flag_fast_math) )
+ return 173;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L891:
+ x2 = XVECEXP (x1, 0, 0);
+ if (register_operand (x2, SFmode))
+ {
+ ro[1] = x2;
+ if (! TARGET_NO_FANCY_MATH_387 && TARGET_80387
+ && (TARGET_IEEE_FP || flag_fast_math) )
+ return 176;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L333:
+ x1 = XEXP (x0, 1);
+ if (general_operand (x1, DFmode))
+ {
+ ro[1] = x1;
+ return 58;
+ }
+ x1 = XEXP (x0, 0);
+ goto L342;
+
+ L397:
+ x1 = XEXP (x0, 1);
+ switch (GET_MODE (x1))
+ {
+ case DFmode:
+ switch (GET_CODE (x1))
+ {
+ case FLOAT_EXTEND:
+ goto L398;
+ case FLOAT_TRUNCATE:
+ goto L421;
+ }
+ }
+ if (general_operand (x1, DFmode))
+ {
+ ro[1] = x1;
+ return 60;
+ }
+ x1 = XEXP (x0, 0);
+ goto L571;
+
+ L398:
+ x2 = XEXP (x1, 0);
+ if (general_operand (x2, SFmode))
+ {
+ ro[1] = x2;
+ if (TARGET_80387)
+ return 74;
+ }
+ x1 = XEXP (x0, 0);
+ goto L571;
+
+ L421:
+ x2 = XEXP (x1, 0);
+ if (register_operand (x2, XFmode))
+ {
+ ro[1] = x2;
+ if (TARGET_80387)
+ return 80;
+ }
+ x1 = XEXP (x0, 0);
+ goto L571;
+
+ L572:
+ x1 = XEXP (x0, 1);
+ if (GET_MODE (x1) != DFmode)
+ {
+ x1 = XEXP (x0, 0);
+ goto L1380;
+ }
+ switch (GET_CODE (x1))
+ {
+ case FLOAT:
+ goto L573;
+ case NEG:
+ goto L811;
+ case ABS:
+ goto L833;
+ case SQRT:
+ goto L855;
+ case UNSPEC:
+ if (XINT (x1, 1) == 1 && XVECLEN (x1, 0) == 1 && 1)
+ goto L882;
+ if (XINT (x1, 1) == 2 && XVECLEN (x1, 0) == 1 && 1)
+ goto L895;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L573:
+ x2 = XEXP (x1, 0);
+ switch (GET_MODE (x2))
+ {
+ case DImode:
+ if (nonimmediate_operand (x2, DImode))
+ {
+ ro[1] = x2;
+ if (TARGET_80387)
+ return 103;
+ }
+ break;
+ case SImode:
+ if (nonimmediate_operand (x2, SImode))
+ {
+ ro[1] = x2;
+ if (TARGET_80387)
+ return 105;
+ }
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L811:
+ x2 = XEXP (x1, 0);
+ if (GET_MODE (x2) == DFmode && GET_CODE (x2) == FLOAT_EXTEND && 1)
+ goto L812;
+ if (general_operand (x2, DFmode))
+ {
+ ro[1] = x2;
+ if (TARGET_80387)
+ return 157;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L812:
+ x3 = XEXP (x2, 0);
+ if (general_operand (x3, SFmode))
+ {
+ ro[1] = x3;
+ if (TARGET_80387)
+ return 158;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L833:
+ x2 = XEXP (x1, 0);
+ if (GET_MODE (x2) == DFmode && GET_CODE (x2) == FLOAT_EXTEND && 1)
+ goto L834;
+ if (general_operand (x2, DFmode))
+ {
+ ro[1] = x2;
+ if (TARGET_80387)
+ return 162;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L834:
+ x3 = XEXP (x2, 0);
+ if (general_operand (x3, SFmode))
+ {
+ ro[1] = x3;
+ if (TARGET_80387)
+ return 163;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L855:
+ x2 = XEXP (x1, 0);
+ if (GET_MODE (x2) == DFmode && GET_CODE (x2) == FLOAT_EXTEND && 1)
+ goto L856;
+ if (general_operand (x2, DFmode))
+ {
+ ro[1] = x2;
+ if (! TARGET_NO_FANCY_MATH_387 && TARGET_80387
+ && (TARGET_IEEE_FP || flag_fast_math) )
+ return 167;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L856:
+ x3 = XEXP (x2, 0);
+ if (general_operand (x3, SFmode))
+ {
+ ro[1] = x3;
+ if (! TARGET_NO_FANCY_MATH_387 && TARGET_80387
+ && (TARGET_IEEE_FP || flag_fast_math) )
+ return 168;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L882:
+ x2 = XVECEXP (x1, 0, 0);
+ if (GET_MODE (x2) != DFmode)
+ {
+ x1 = XEXP (x0, 0);
+ goto L1380;
+ }
+ if (GET_CODE (x2) == FLOAT_EXTEND && 1)
+ goto L883;
+ if (register_operand (x2, DFmode))
+ {
+ ro[1] = x2;
+ if (! TARGET_NO_FANCY_MATH_387 && TARGET_80387
+ && (TARGET_IEEE_FP || flag_fast_math) )
+ return 172;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L883:
+ x3 = XEXP (x2, 0);
+ if (register_operand (x3, SFmode))
+ {
+ ro[1] = x3;
+ if (! TARGET_NO_FANCY_MATH_387 && TARGET_80387
+ && (TARGET_IEEE_FP || flag_fast_math) )
+ return 174;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L895:
+ x2 = XVECEXP (x1, 0, 0);
+ if (GET_MODE (x2) != DFmode)
+ {
+ x1 = XEXP (x0, 0);
+ goto L1380;
+ }
+ if (GET_CODE (x2) == FLOAT_EXTEND && 1)
+ goto L896;
+ if (register_operand (x2, DFmode))
+ {
+ ro[1] = x2;
+ if (! TARGET_NO_FANCY_MATH_387 && TARGET_80387
+ && (TARGET_IEEE_FP || flag_fast_math) )
+ return 175;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L896:
+ x3 = XEXP (x2, 0);
+ if (register_operand (x3, SFmode))
+ {
+ ro[1] = x3;
+ if (! TARGET_NO_FANCY_MATH_387 && TARGET_80387
+ && (TARGET_IEEE_FP || flag_fast_math) )
+ return 177;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L346:
+ x1 = XEXP (x0, 1);
+ if (general_operand (x1, XFmode))
+ {
+ ro[1] = x1;
+ return 61;
+ }
+ x1 = XEXP (x0, 0);
+ goto L355;
+
+ L401:
+ x1 = XEXP (x0, 1);
+ if (GET_MODE (x1) == XFmode && GET_CODE (x1) == FLOAT_EXTEND && 1)
+ goto L402;
+ if (general_operand (x1, XFmode))
+ {
+ ro[1] = x1;
+ return 63;
+ }
+ x1 = XEXP (x0, 0);
+ goto L567;
+
+ L402:
+ x2 = XEXP (x1, 0);
+ if (general_operand (x2, DFmode))
+ {
+ ro[1] = x2;
+ if (TARGET_80387)
+ return 75;
+ }
+ L406:
+ if (general_operand (x2, SFmode))
+ {
+ ro[1] = x2;
+ if (TARGET_80387)
+ return 76;
+ }
+ x1 = XEXP (x0, 0);
+ goto L567;
+
+ L568:
+ x1 = XEXP (x0, 1);
+ if (GET_MODE (x1) != XFmode)
+ {
+ x1 = XEXP (x0, 0);
+ goto L1380;
+ }
+ switch (GET_CODE (x1))
+ {
+ case FLOAT:
+ goto L569;
+ case NEG:
+ goto L820;
+ case ABS:
+ goto L842;
+ case SQRT:
+ goto L864;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L569:
+ x2 = XEXP (x1, 0);
+ if (general_operand (x2, DImode))
+ {
+ ro[1] = x2;
+ if (TARGET_80387)
+ return 102;
+ }
+ L585:
+ if (general_operand (x2, SImode))
+ {
+ ro[1] = x2;
+ if (TARGET_80387)
+ return 106;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L820:
+ x2 = XEXP (x1, 0);
+ if (GET_MODE (x2) == XFmode && GET_CODE (x2) == FLOAT_EXTEND && 1)
+ goto L821;
+ if (general_operand (x2, XFmode))
+ {
+ ro[1] = x2;
+ if (TARGET_80387)
+ return 159;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L821:
+ x3 = XEXP (x2, 0);
+ if (general_operand (x3, DFmode))
+ {
+ ro[1] = x3;
+ if (TARGET_80387)
+ return 160;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L842:
+ x2 = XEXP (x1, 0);
+ if (GET_MODE (x2) == XFmode && GET_CODE (x2) == FLOAT_EXTEND && 1)
+ goto L843;
+ if (general_operand (x2, XFmode))
+ {
+ ro[1] = x2;
+ if (TARGET_80387)
+ return 164;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L843:
+ x3 = XEXP (x2, 0);
+ if (general_operand (x3, DFmode))
+ {
+ ro[1] = x3;
+ if (TARGET_80387)
+ return 165;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L864:
+ x2 = XEXP (x1, 0);
+ if (GET_MODE (x2) == XFmode && GET_CODE (x2) == FLOAT_EXTEND && 1)
+ goto L865;
+ if (general_operand (x2, XFmode))
+ {
+ ro[1] = x2;
+ if (! TARGET_NO_FANCY_MATH_387 && TARGET_80387
+ && (TARGET_IEEE_FP || flag_fast_math) )
+ return 169;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L865:
+ x3 = XEXP (x2, 0);
+ if (general_operand (x3, DFmode))
+ {
+ ro[1] = x3;
+ if (! TARGET_NO_FANCY_MATH_387 && TARGET_80387
+ && (TARGET_IEEE_FP || flag_fast_math) )
+ return 170;
+ }
+ L870:
+ if (general_operand (x3, SFmode))
+ {
+ ro[1] = x3;
+ if (! TARGET_NO_FANCY_MATH_387 && TARGET_80387
+ && (TARGET_IEEE_FP || flag_fast_math) )
+ return 171;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L359:
+ x1 = XEXP (x0, 1);
+ if (general_operand (x1, DImode))
+ {
+ ro[1] = x1;
+ return 64;
+ }
+ x1 = XEXP (x0, 0);
+ goto L361;
+
+ L592:
+ x1 = XEXP (x0, 1);
+ switch (GET_MODE (x1))
+ {
+ case DImode:
+ switch (GET_CODE (x1))
+ {
+ case PLUS:
+ goto L593;
+ case MINUS:
+ goto L616;
+ case NEG:
+ goto L787;
+ }
+ }
+ if (general_operand (x1, DImode))
+ {
+ ro[1] = x1;
+ return 65;
+ }
+ x1 = XEXP (x0, 0);
+ goto L376;
+
+ L593:
+ x2 = XEXP (x1, 0);
+ if (general_operand (x2, DImode))
+ {
+ ro[1] = x2;
+ goto L594;
+ }
+ x1 = XEXP (x0, 0);
+ goto L376;
+
+ L594:
+ x2 = XEXP (x1, 1);
+ if (general_operand (x2, DImode))
+ {
+ ro[2] = x2;
+ return 108;
+ }
+ x1 = XEXP (x0, 0);
+ goto L376;
+
+ L616:
+ x2 = XEXP (x1, 0);
+ if (general_operand (x2, DImode))
+ {
+ ro[1] = x2;
+ goto L617;
+ }
+ x1 = XEXP (x0, 0);
+ goto L376;
+
+ L617:
+ x2 = XEXP (x1, 1);
+ if (general_operand (x2, DImode))
+ {
+ ro[2] = x2;
+ return 116;
+ }
+ x1 = XEXP (x0, 0);
+ goto L376;
+
+ L787:
+ x2 = XEXP (x1, 0);
+ if (general_operand (x2, DImode))
+ {
+ ro[1] = x2;
+ return 152;
+ }
+ x1 = XEXP (x0, 0);
+ goto L376;
+
+ L377:
+ x1 = XEXP (x0, 1);
+ if (GET_MODE (x1) != DImode)
+ {
+ x1 = XEXP (x0, 0);
+ goto L1380;
+ }
+ switch (GET_CODE (x1))
+ {
+ case ZERO_EXTEND:
+ goto L378;
+ case SIGN_EXTEND:
+ goto L382;
+ case MULT:
+ goto L674;
+ case ASHIFT:
+ goto L912;
+ case ASHIFTRT:
+ goto L940;
+ case LSHIFTRT:
+ goto L968;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L378:
+ x2 = XEXP (x1, 0);
+ if (register_operand (x2, SImode))
+ {
+ ro[1] = x2;
+ return 69;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L382:
+ x2 = XEXP (x1, 0);
+ if (register_operand (x2, SImode))
+ {
+ ro[1] = x2;
+ return 70;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L674:
+ x2 = XEXP (x1, 0);
+ if (GET_MODE (x2) != DImode)
+ {
+ x1 = XEXP (x0, 0);
+ goto L1380;
+ }
+ switch (GET_CODE (x2))
+ {
+ case ZERO_EXTEND:
+ goto L675;
+ case SIGN_EXTEND:
+ goto L682;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L675:
+ x3 = XEXP (x2, 0);
+ if (register_operand (x3, SImode))
+ {
+ ro[1] = x3;
+ goto L676;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L676:
+ x2 = XEXP (x1, 1);
+ if (GET_MODE (x2) == DImode && GET_CODE (x2) == ZERO_EXTEND && 1)
+ goto L677;
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L677:
+ x3 = XEXP (x2, 0);
+ if (nonimmediate_operand (x3, SImode))
+ {
+ ro[2] = x3;
+ return 129;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L682:
+ x3 = XEXP (x2, 0);
+ if (register_operand (x3, SImode))
+ {
+ ro[1] = x3;
+ goto L683;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L683:
+ x2 = XEXP (x1, 1);
+ if (GET_MODE (x2) == DImode && GET_CODE (x2) == SIGN_EXTEND && 1)
+ goto L684;
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L684:
+ x3 = XEXP (x2, 0);
+ if (nonimmediate_operand (x3, SImode))
+ {
+ ro[2] = x3;
+ return 130;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L912:
+ x2 = XEXP (x1, 0);
+ if (register_operand (x2, DImode))
+ {
+ ro[1] = x2;
+ goto L913;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L913:
+ x2 = XEXP (x1, 1);
+ if (GET_CODE (x2) == CONST_INT && 1)
+ {
+ ro[2] = x2;
+ return 182;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L940:
+ x2 = XEXP (x1, 0);
+ if (register_operand (x2, DImode))
+ {
+ ro[1] = x2;
+ goto L941;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L941:
+ x2 = XEXP (x1, 1);
+ if (GET_CODE (x2) == CONST_INT && 1)
+ {
+ ro[2] = x2;
+ return 188;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L968:
+ x2 = XEXP (x1, 0);
+ if (register_operand (x2, DImode))
+ {
+ ro[1] = x2;
+ goto L969;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L969:
+ x2 = XEXP (x1, 1);
+ if (GET_CODE (x2) == CONST_INT && 1)
+ {
+ ro[2] = x2;
+ return 194;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+ L2:
+ tem = recog_2 (x0, insn, pnum_clobbers);
+ if (tem >= 0) return tem;
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L313:
+ x2 = XEXP (x1, 0);
+ switch (GET_MODE (x2))
+ {
+ case HImode:
+ if (general_operand (x2, HImode))
+ {
+ ro[0] = x2;
+ goto L314;
+ }
+ break;
+ case QImode:
+ if (general_operand (x2, QImode))
+ {
+ ro[0] = x2;
+ goto L324;
+ }
+ }
+ goto L1380;
+
+ L314:
+ x1 = XEXP (x0, 1);
+ if (general_operand (x1, HImode))
+ {
+ ro[1] = x1;
+ return 52;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L324:
+ x1 = XEXP (x0, 1);
+ if (general_operand (x1, QImode))
+ {
+ ro[1] = x1;
+ return 55;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L1314:
+ x1 = XEXP (x0, 1);
+ switch (GET_CODE (x1))
+ {
+ case MINUS:
+ if (GET_MODE (x1) == SImode && 1)
+ goto L1315;
+ break;
+ case IF_THEN_ELSE:
+ goto L1114;
+ case LABEL_REF:
+ goto L1294;
+ }
+ L1297:
+ if (general_operand (x1, SImode))
+ {
+ ro[0] = x1;
+ return 262;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L1315:
+ x2 = XEXP (x1, 0);
+ if (GET_MODE (x2) == SImode && GET_CODE (x2) == REG && XINT (x2, 0) == 3 && 1)
+ goto L1316;
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L1316:
+ x2 = XEXP (x1, 1);
+ if (GET_MODE (x2) == SImode && GET_CODE (x2) == MEM && 1)
+ goto L1317;
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L1317:
+ x3 = XEXP (x2, 0);
+ if (GET_MODE (x3) == SImode && GET_CODE (x3) == PLUS && 1)
+ goto L1318;
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L1318:
+ x4 = XEXP (x3, 0);
+ if (GET_MODE (x4) == SImode && GET_CODE (x4) == MULT && 1)
+ goto L1319;
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L1319:
+ x5 = XEXP (x4, 0);
+ if (register_operand (x5, SImode))
+ {
+ ro[0] = x5;
+ goto L1320;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L1320:
+ x5 = XEXP (x4, 1);
+ if (GET_CODE (x5) == CONST_INT && XWINT (x5, 0) == 4 && 1)
+ goto L1321;
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L1321:
+ x4 = XEXP (x3, 1);
+ if (GET_CODE (x4) == LABEL_REF && 1)
+ goto L1322;
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L1322:
+ x5 = XEXP (x4, 0);
+ if (pnum_clobbers != 0 && 1)
+ {
+ ro[1] = x5;
+ *pnum_clobbers = 1;
+ return 264;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1380;
+ L1114:
+ tem = recog_3 (x0, insn, pnum_clobbers);
+ if (tem >= 0) return tem;
+ x1 = XEXP (x0, 0);
+ goto L1380;
+
+ L1294:
+ x2 = XEXP (x1, 0);
+ ro[0] = x2;
+ return 261;
+
+ L1381:
+ x1 = XEXP (x0, 1);
+ if (GET_CODE (x1) == CALL && 1)
+ goto L1382;
+ x1 = XEXP (x0, 0);
+ goto L1460;
+
+ L1382:
+ x2 = XEXP (x1, 0);
+ if (call_insn_operand (x2, QImode))
+ {
+ ro[1] = x2;
+ goto L1383;
+ }
+ L1387:
+ if (GET_MODE (x2) == QImode && GET_CODE (x2) == MEM && 1)
+ goto L1388;
+ x1 = XEXP (x0, 0);
+ goto L1460;
+
+ L1383:
+ x2 = XEXP (x1, 1);
+ if (general_operand (x2, SImode))
+ {
+ ro[2] = x2;
+ return 276;
+ }
+ x2 = XEXP (x1, 0);
+ goto L1387;
+
+ L1388:
+ x3 = XEXP (x2, 0);
+ if (symbolic_operand (x3, SImode))
+ {
+ ro[1] = x3;
+ goto L1389;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1460;
+
+ L1389:
+ x2 = XEXP (x1, 1);
+ if (general_operand (x2, SImode))
+ {
+ ro[2] = x2;
+ if (!HALF_PIC_P ())
+ return 277;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1460;
+
+ L1461:
+ x1 = XEXP (x0, 1);
+ if (GET_MODE (x1) == SImode && GET_CODE (x1) == PLUS && 1)
+ goto L1462;
+ goto ret0;
+
+ L1462:
+ x2 = XEXP (x1, 0);
+ if (GET_MODE (x2) == SImode && GET_CODE (x2) == FFS && 1)
+ goto L1463;
+ goto ret0;
+
+ L1463:
+ x3 = XEXP (x2, 0);
+ if (general_operand (x3, SImode))
+ {
+ ro[1] = x3;
+ goto L1464;
+ }
+ goto ret0;
+
+ L1464:
+ x2 = XEXP (x1, 1);
+ if (GET_CODE (x2) == CONST_INT && XWINT (x2, 0) == -1 && 1)
+ return 291;
+ goto ret0;
+
+ L1467:
+ x1 = XEXP (x0, 1);
+ if (GET_MODE (x1) == HImode && GET_CODE (x1) == PLUS && 1)
+ goto L1468;
+ goto ret0;
+
+ L1468:
+ x2 = XEXP (x1, 0);
+ if (GET_MODE (x2) == HImode && GET_CODE (x2) == FFS && 1)
+ goto L1469;
+ goto ret0;
+
+ L1469:
+ x3 = XEXP (x2, 0);
+ if (general_operand (x3, SImode))
+ {
+ ro[1] = x3;
+ goto L1470;
+ }
+ goto ret0;
+
+ L1470:
+ x2 = XEXP (x1, 1);
+ if (GET_CODE (x2) == CONST_INT && XWINT (x2, 0) == -1 && 1)
+ return 293;
+ goto ret0;
+
+ L1473:
+ x1 = XEXP (x0, 1);
+ if (binary_387_op (x1, DFmode))
+ {
+ ro[3] = x1;
+ goto L1479;
+ }
+ goto ret0;
+
+ L1479:
+ x2 = XEXP (x1, 0);
+ switch (GET_MODE (x2))
+ {
+ case DFmode:
+ switch (GET_CODE (x2))
+ {
+ case FLOAT:
+ goto L1480;
+ case FLOAT_EXTEND:
+ goto L1515;
+ case SUBREG:
+ case REG:
+ case MEM:
+ if (nonimmediate_operand (x2, DFmode))
+ {
+ ro[1] = x2;
+ goto L1475;
+ }
+ }
+ }
+ L1520:
+ if (general_operand (x2, DFmode))
+ {
+ ro[1] = x2;
+ goto L1521;
+ }
+ goto ret0;
+
+ L1480:
+ x3 = XEXP (x2, 0);
+ if (general_operand (x3, SImode))
+ {
+ ro[1] = x3;
+ goto L1481;
+ }
+ goto ret0;
+
+ L1481:
+ x2 = XEXP (x1, 1);
+ if (general_operand (x2, DFmode))
+ {
+ ro[2] = x2;
+ if (TARGET_80387)
+ return 295;
+ }
+ goto ret0;
+
+ L1515:
+ x3 = XEXP (x2, 0);
+ if (general_operand (x3, SFmode))
+ {
+ ro[1] = x3;
+ goto L1516;
+ }
+ goto ret0;
+
+ L1516:
+ x2 = XEXP (x1, 1);
+ if (general_operand (x2, DFmode))
+ {
+ ro[2] = x2;
+ if (TARGET_80387)
+ return 301;
+ }
+ goto ret0;
+
+ L1475:
+ x2 = XEXP (x1, 1);
+ if (nonimmediate_operand (x2, DFmode))
+ {
+ ro[2] = x2;
+ if (TARGET_80387)
+ return 294;
+ }
+ x2 = XEXP (x1, 0);
+ goto L1520;
+
+ L1521:
+ x2 = XEXP (x1, 1);
+ if (GET_MODE (x2) != DFmode)
+ goto ret0;
+ switch (GET_CODE (x2))
+ {
+ case FLOAT:
+ goto L1522;
+ case FLOAT_EXTEND:
+ goto L1528;
+ }
+ goto ret0;
+
+ L1522:
+ x3 = XEXP (x2, 0);
+ if (general_operand (x3, SImode))
+ {
+ ro[2] = x3;
+ if (TARGET_80387)
+ return 302;
+ }
+ goto ret0;
+
+ L1528:
+ x3 = XEXP (x2, 0);
+ if (general_operand (x3, SFmode))
+ {
+ ro[2] = x3;
+ if (TARGET_80387)
+ return 303;
+ }
+ goto ret0;
+
+ L1484:
+ x1 = XEXP (x0, 1);
+ if (binary_387_op (x1, XFmode))
+ {
+ ro[3] = x1;
+ goto L1490;
+ }
+ goto ret0;
+
+ L1490:
+ x2 = XEXP (x1, 0);
+ switch (GET_MODE (x2))
+ {
+ case XFmode:
+ switch (GET_CODE (x2))
+ {
+ case FLOAT:
+ goto L1491;
+ case FLOAT_EXTEND:
+ goto L1497;
+ case SUBREG:
+ case REG:
+ case MEM:
+ if (nonimmediate_operand (x2, XFmode))
+ {
+ ro[1] = x2;
+ goto L1486;
+ }
+ }
+ }
+ L1502:
+ if (general_operand (x2, XFmode))
+ {
+ ro[1] = x2;
+ goto L1503;
+ }
+ goto ret0;
+
+ L1491:
+ x3 = XEXP (x2, 0);
+ if (general_operand (x3, SImode))
+ {
+ ro[1] = x3;
+ goto L1492;
+ }
+ goto ret0;
+
+ L1492:
+ x2 = XEXP (x1, 1);
+ if (general_operand (x2, XFmode))
+ {
+ ro[2] = x2;
+ if (TARGET_80387)
+ return 297;
+ }
+ goto ret0;
+
+ L1497:
+ x3 = XEXP (x2, 0);
+ if (general_operand (x3, SFmode))
+ {
+ ro[1] = x3;
+ goto L1498;
+ }
+ goto ret0;
+
+ L1498:
+ x2 = XEXP (x1, 1);
+ if (general_operand (x2, XFmode))
+ {
+ ro[2] = x2;
+ if (TARGET_80387)
+ return 298;
+ }
+ goto ret0;
+
+ L1486:
+ x2 = XEXP (x1, 1);
+ if (nonimmediate_operand (x2, XFmode))
+ {
+ ro[2] = x2;
+ if (TARGET_80387)
+ return 296;
+ }
+ x2 = XEXP (x1, 0);
+ goto L1502;
+
+ L1503:
+ x2 = XEXP (x1, 1);
+ if (GET_MODE (x2) != XFmode)
+ goto ret0;
+ switch (GET_CODE (x2))
+ {
+ case FLOAT:
+ goto L1504;
+ case FLOAT_EXTEND:
+ goto L1510;
+ }
+ goto ret0;
+
+ L1504:
+ x3 = XEXP (x2, 0);
+ if (general_operand (x3, SImode))
+ {
+ ro[2] = x3;
+ if (TARGET_80387)
+ return 299;
+ }
+ goto ret0;
+
+ L1510:
+ x3 = XEXP (x2, 0);
+ if (general_operand (x3, SFmode))
+ {
+ ro[2] = x3;
+ if (TARGET_80387)
+ return 300;
+ }
+ goto ret0;
+
+ L1531:
+ x1 = XEXP (x0, 1);
+ if (binary_387_op (x1, SFmode))
+ {
+ ro[3] = x1;
+ goto L1537;
+ }
+ goto ret0;
+
+ L1537:
+ x2 = XEXP (x1, 0);
+ switch (GET_MODE (x2))
+ {
+ case SFmode:
+ if (GET_CODE (x2) == FLOAT && 1)
+ goto L1538;
+ if (nonimmediate_operand (x2, SFmode))
+ {
+ ro[1] = x2;
+ goto L1533;
+ }
+ }
+ L1543:
+ if (general_operand (x2, SFmode))
+ {
+ ro[1] = x2;
+ goto L1544;
+ }
+ goto ret0;
+
+ L1538:
+ x3 = XEXP (x2, 0);
+ if (general_operand (x3, SImode))
+ {
+ ro[1] = x3;
+ goto L1539;
+ }
+ goto ret0;
+
+ L1539:
+ x2 = XEXP (x1, 1);
+ if (general_operand (x2, SFmode))
+ {
+ ro[2] = x2;
+ if (TARGET_80387)
+ return 305;
+ }
+ goto ret0;
+
+ L1533:
+ x2 = XEXP (x1, 1);
+ if (nonimmediate_operand (x2, SFmode))
+ {
+ ro[2] = x2;
+ if (TARGET_80387)
+ return 304;
+ }
+ x2 = XEXP (x1, 0);
+ goto L1543;
+
+ L1544:
+ x2 = XEXP (x1, 1);
+ if (GET_MODE (x2) == SFmode && GET_CODE (x2) == FLOAT && 1)
+ goto L1545;
+ goto ret0;
+
+ L1545:
+ x3 = XEXP (x2, 0);
+ if (general_operand (x3, SImode))
+ {
+ ro[2] = x3;
+ if (TARGET_80387)
+ return 306;
+ }
+ goto ret0;
+ ret0: return -1;
+}
+
+int
+recog_5 (x0, insn, pnum_clobbers)
+ register rtx x0;
+ rtx insn;
+ int *pnum_clobbers;
+{
+ register rtx *ro = &recog_operand[0];
+ register rtx x1, x2, x3, x4, x5, x6;
+ int tem;
+
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 1);
+ x3 = XEXP (x2, 0);
+ switch (GET_MODE (x3))
+ {
+ case XFmode:
+ if (GET_CODE (x3) == FLOAT && 1)
+ goto L84;
+ if (nonimmediate_operand (x3, XFmode))
+ {
+ ro[0] = x3;
+ goto L56;
+ }
+ L68:
+ if (register_operand (x3, XFmode))
+ {
+ ro[0] = x3;
+ goto L69;
+ }
+ break;
+ case DFmode:
+ switch (GET_CODE (x3))
+ {
+ case FLOAT:
+ goto L170;
+ case FLOAT_EXTEND:
+ goto L200;
+ case SUBREG:
+ case REG:
+ case MEM:
+ if (nonimmediate_operand (x3, DFmode))
+ {
+ ro[0] = x3;
+ goto L142;
+ }
+ }
+ L154:
+ if (register_operand (x3, DFmode))
+ {
+ ro[0] = x3;
+ goto L155;
+ }
+ break;
+ case SFmode:
+ if (GET_CODE (x3) == FLOAT && 1)
+ goto L256;
+ if (nonimmediate_operand (x3, SFmode))
+ {
+ ro[0] = x3;
+ goto L228;
+ }
+ L240:
+ if (register_operand (x3, SFmode))
+ {
+ ro[0] = x3;
+ goto L241;
+ }
+ }
+ goto ret0;
+
+ L84:
+ x4 = XEXP (x3, 0);
+ if (nonimmediate_operand (x4, SImode))
+ {
+ ro[0] = x4;
+ goto L85;
+ }
+ goto ret0;
+
+ L85:
+ x3 = XEXP (x2, 1);
+ if (register_operand (x3, XFmode))
+ {
+ ro[1] = x3;
+ goto L86;
+ }
+ goto ret0;
+
+ L86:
+ x1 = XVECEXP (x0, 0, 1);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L87;
+ goto ret0;
+
+ L87:
+ x2 = XEXP (x1, 0);
+ if (scratch_operand (x2, HImode))
+ {
+ ro[3] = x2;
+ if (TARGET_80387)
+ return 20;
+ }
+ goto ret0;
+
+ L56:
+ x3 = XEXP (x2, 1);
+ if (nonimmediate_operand (x3, XFmode))
+ {
+ ro[1] = x3;
+ goto L57;
+ }
+ x3 = XEXP (x2, 0);
+ goto L68;
+
+ L57:
+ x1 = XVECEXP (x0, 0, 1);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L58;
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 1);
+ x3 = XEXP (x2, 0);
+ goto L68;
+
+ L58:
+ x2 = XEXP (x1, 0);
+ if (scratch_operand (x2, HImode))
+ {
+ ro[3] = x2;
+ if (TARGET_80387
+ && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM))
+ return 18;
+ }
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 1);
+ x3 = XEXP (x2, 0);
+ goto L68;
+
+ L69:
+ x3 = XEXP (x2, 1);
+ if (GET_MODE (x3) != XFmode)
+ goto ret0;
+ switch (GET_CODE (x3))
+ {
+ case FLOAT:
+ goto L70;
+ case FLOAT_EXTEND:
+ goto L100;
+ }
+ goto ret0;
+
+ L70:
+ x4 = XEXP (x3, 0);
+ if (nonimmediate_operand (x4, SImode))
+ {
+ ro[1] = x4;
+ goto L71;
+ }
+ goto ret0;
+
+ L71:
+ x1 = XVECEXP (x0, 0, 1);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L72;
+ goto ret0;
+
+ L72:
+ x2 = XEXP (x1, 0);
+ if (scratch_operand (x2, HImode))
+ {
+ ro[3] = x2;
+ if (TARGET_80387)
+ return 19;
+ }
+ goto ret0;
+
+ L100:
+ x4 = XEXP (x3, 0);
+ switch (GET_MODE (x4))
+ {
+ case DFmode:
+ if (nonimmediate_operand (x4, DFmode))
+ {
+ ro[1] = x4;
+ goto L101;
+ }
+ break;
+ case SFmode:
+ if (nonimmediate_operand (x4, SFmode))
+ {
+ ro[1] = x4;
+ goto L116;
+ }
+ }
+ goto ret0;
+
+ L101:
+ x1 = XVECEXP (x0, 0, 1);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L102;
+ goto ret0;
+
+ L102:
+ x2 = XEXP (x1, 0);
+ if (scratch_operand (x2, HImode))
+ {
+ ro[3] = x2;
+ if (TARGET_80387)
+ return 21;
+ }
+ goto ret0;
+
+ L116:
+ x1 = XVECEXP (x0, 0, 1);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L117;
+ goto ret0;
+
+ L117:
+ x2 = XEXP (x1, 0);
+ if (scratch_operand (x2, HImode))
+ {
+ ro[3] = x2;
+ if (TARGET_80387)
+ return 22;
+ }
+ goto ret0;
+
+ L170:
+ x4 = XEXP (x3, 0);
+ if (nonimmediate_operand (x4, SImode))
+ {
+ ro[0] = x4;
+ goto L171;
+ }
+ goto ret0;
+
+ L171:
+ x3 = XEXP (x2, 1);
+ if (register_operand (x3, DFmode))
+ {
+ ro[1] = x3;
+ goto L172;
+ }
+ goto ret0;
+
+ L172:
+ x1 = XVECEXP (x0, 0, 1);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L173;
+ goto ret0;
+
+ L173:
+ x2 = XEXP (x1, 0);
+ if (scratch_operand (x2, HImode))
+ {
+ ro[3] = x2;
+ if (TARGET_80387)
+ return 26;
+ }
+ goto ret0;
+
+ L200:
+ x4 = XEXP (x3, 0);
+ if (nonimmediate_operand (x4, SFmode))
+ {
+ ro[0] = x4;
+ goto L201;
+ }
+ goto ret0;
+
+ L201:
+ x3 = XEXP (x2, 1);
+ if (register_operand (x3, DFmode))
+ {
+ ro[1] = x3;
+ goto L202;
+ }
+ goto ret0;
+
+ L202:
+ x1 = XVECEXP (x0, 0, 1);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L203;
+ goto ret0;
+
+ L203:
+ x2 = XEXP (x1, 0);
+ if (scratch_operand (x2, HImode))
+ {
+ ro[3] = x2;
+ if (TARGET_80387)
+ return 28;
+ }
+ goto ret0;
+
+ L142:
+ x3 = XEXP (x2, 1);
+ if (nonimmediate_operand (x3, DFmode))
+ {
+ ro[1] = x3;
+ goto L143;
+ }
+ x3 = XEXP (x2, 0);
+ goto L154;
+
+ L143:
+ x1 = XVECEXP (x0, 0, 1);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L144;
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 1);
+ x3 = XEXP (x2, 0);
+ goto L154;
+
+ L144:
+ x2 = XEXP (x1, 0);
+ if (scratch_operand (x2, HImode))
+ {
+ ro[3] = x2;
+ if (TARGET_80387
+ && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM))
+ return 24;
+ }
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 1);
+ x3 = XEXP (x2, 0);
+ goto L154;
+
+ L155:
+ x3 = XEXP (x2, 1);
+ if (GET_MODE (x3) != DFmode)
+ goto ret0;
+ switch (GET_CODE (x3))
+ {
+ case FLOAT:
+ goto L156;
+ case FLOAT_EXTEND:
+ goto L186;
+ }
+ goto ret0;
+
+ L156:
+ x4 = XEXP (x3, 0);
+ if (nonimmediate_operand (x4, SImode))
+ {
+ ro[1] = x4;
+ goto L157;
+ }
+ goto ret0;
+
+ L157:
+ x1 = XVECEXP (x0, 0, 1);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L158;
+ goto ret0;
+
+ L158:
+ x2 = XEXP (x1, 0);
+ if (scratch_operand (x2, HImode))
+ {
+ ro[3] = x2;
+ if (TARGET_80387)
+ return 25;
+ }
+ goto ret0;
+
+ L186:
+ x4 = XEXP (x3, 0);
+ if (nonimmediate_operand (x4, SFmode))
+ {
+ ro[1] = x4;
+ goto L187;
+ }
+ goto ret0;
+
+ L187:
+ x1 = XVECEXP (x0, 0, 1);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L188;
+ goto ret0;
+
+ L188:
+ x2 = XEXP (x1, 0);
+ if (scratch_operand (x2, HImode))
+ {
+ ro[3] = x2;
+ if (TARGET_80387)
+ return 27;
+ }
+ goto ret0;
+
+ L256:
+ x4 = XEXP (x3, 0);
+ if (nonimmediate_operand (x4, SImode))
+ {
+ ro[0] = x4;
+ goto L257;
+ }
+ goto ret0;
+
+ L257:
+ x3 = XEXP (x2, 1);
+ if (register_operand (x3, SFmode))
+ {
+ ro[1] = x3;
+ goto L258;
+ }
+ goto ret0;
+
+ L258:
+ x1 = XVECEXP (x0, 0, 1);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L259;
+ goto ret0;
+
+ L259:
+ x2 = XEXP (x1, 0);
+ if (scratch_operand (x2, HImode))
+ {
+ ro[3] = x2;
+ if (TARGET_80387)
+ return 32;
+ }
+ goto ret0;
+
+ L228:
+ x3 = XEXP (x2, 1);
+ if (nonimmediate_operand (x3, SFmode))
+ {
+ ro[1] = x3;
+ goto L229;
+ }
+ x3 = XEXP (x2, 0);
+ goto L240;
+
+ L229:
+ x1 = XVECEXP (x0, 0, 1);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L230;
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 1);
+ x3 = XEXP (x2, 0);
+ goto L240;
+
+ L230:
+ x2 = XEXP (x1, 0);
+ if (scratch_operand (x2, HImode))
+ {
+ ro[3] = x2;
+ if (TARGET_80387
+ && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM))
+ return 30;
+ }
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 1);
+ x3 = XEXP (x2, 0);
+ goto L240;
+
+ L241:
+ x3 = XEXP (x2, 1);
+ if (GET_MODE (x3) == SFmode && GET_CODE (x3) == FLOAT && 1)
+ goto L242;
+ goto ret0;
+
+ L242:
+ x4 = XEXP (x3, 0);
+ if (nonimmediate_operand (x4, SImode))
+ {
+ ro[1] = x4;
+ goto L243;
+ }
+ goto ret0;
+
+ L243:
+ x1 = XVECEXP (x0, 0, 1);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L244;
+ goto ret0;
+
+ L244:
+ x2 = XEXP (x1, 0);
+ if (scratch_operand (x2, HImode))
+ {
+ ro[3] = x2;
+ if (TARGET_80387)
+ return 31;
+ }
+ goto ret0;
+ ret0: return -1;
+}
+
+int
+recog_6 (x0, insn, pnum_clobbers)
+ register rtx x0;
+ rtx insn;
+ int *pnum_clobbers;
+{
+ register rtx *ro = &recog_operand[0];
+ register rtx x1, x2, x3, x4, x5, x6;
+ int tem;
+
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 0);
+ switch (GET_MODE (x2))
+ {
+ case DFmode:
+ if (register_operand (x2, DFmode))
+ {
+ ro[0] = x2;
+ goto L337;
+ }
+ break;
+ case XFmode:
+ if (register_operand (x2, XFmode))
+ {
+ ro[0] = x2;
+ goto L350;
+ }
+ break;
+ case SFmode:
+ if (nonimmediate_operand (x2, SFmode))
+ {
+ ro[0] = x2;
+ goto L410;
+ }
+ break;
+ case SImode:
+ if (register_operand (x2, SImode))
+ {
+ ro[0] = x2;
+ goto L698;
+ }
+ break;
+ case HImode:
+ if (register_operand (x2, HImode))
+ {
+ ro[0] = x2;
+ goto L709;
+ }
+ break;
+ case DImode:
+ if (register_operand (x2, DImode))
+ {
+ ro[0] = x2;
+ goto L917;
+ }
+ }
+ switch (GET_CODE (x2))
+ {
+ case CC0:
+ goto L12;
+ case PC:
+ goto L1301;
+ }
+ L1358:
+ ro[0] = x2;
+ goto L1359;
+ L1548:
+ if (register_operand (x2, SImode))
+ {
+ ro[0] = x2;
+ goto L1549;
+ }
+ goto ret0;
+
+ L337:
+ x2 = XEXP (x1, 1);
+ if (register_operand (x2, DFmode))
+ {
+ ro[1] = x2;
+ goto L338;
+ }
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L338:
+ x1 = XVECEXP (x0, 0, 1);
+ if (GET_CODE (x1) == SET && 1)
+ goto L339;
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L339:
+ x2 = XEXP (x1, 0);
+ if (rtx_equal_p (x2, ro[1]) && 1)
+ goto L340;
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L340:
+ x2 = XEXP (x1, 1);
+ if (rtx_equal_p (x2, ro[0]) && 1)
+ return 59;
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L350:
+ x2 = XEXP (x1, 1);
+ if (register_operand (x2, XFmode))
+ {
+ ro[1] = x2;
+ goto L351;
+ }
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L351:
+ x1 = XVECEXP (x0, 0, 1);
+ if (GET_CODE (x1) == SET && 1)
+ goto L352;
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L352:
+ x2 = XEXP (x1, 0);
+ if (rtx_equal_p (x2, ro[1]) && 1)
+ goto L353;
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L353:
+ x2 = XEXP (x1, 1);
+ if (rtx_equal_p (x2, ro[0]) && 1)
+ return 62;
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L410:
+ x2 = XEXP (x1, 1);
+ if (GET_MODE (x2) == SFmode && GET_CODE (x2) == FLOAT_TRUNCATE && 1)
+ goto L411;
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L411:
+ x3 = XEXP (x2, 0);
+ if (register_operand (x3, DFmode))
+ {
+ ro[1] = x3;
+ goto L412;
+ }
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L412:
+ x1 = XVECEXP (x0, 0, 1);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L413;
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L413:
+ x2 = XEXP (x1, 0);
+ if (memory_operand (x2, SFmode))
+ {
+ ro[2] = x2;
+ if (TARGET_80387)
+ return 78;
+ }
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L698:
+ x2 = XEXP (x1, 1);
+ if (GET_MODE (x2) != SImode)
+ {
+ x2 = XEXP (x1, 0);
+ goto L1358;
+ }
+ switch (GET_CODE (x2))
+ {
+ case DIV:
+ goto L699;
+ case UDIV:
+ goto L721;
+ }
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L699:
+ x3 = XEXP (x2, 0);
+ if (register_operand (x3, SImode))
+ {
+ ro[1] = x3;
+ goto L700;
+ }
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L700:
+ x3 = XEXP (x2, 1);
+ if (general_operand (x3, SImode))
+ {
+ ro[2] = x3;
+ goto L701;
+ }
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L701:
+ x1 = XVECEXP (x0, 0, 1);
+ if (GET_CODE (x1) == SET && 1)
+ goto L702;
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L702:
+ x2 = XEXP (x1, 0);
+ if (register_operand (x2, SImode))
+ {
+ ro[3] = x2;
+ goto L703;
+ }
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L703:
+ x2 = XEXP (x1, 1);
+ if (GET_MODE (x2) == SImode && GET_CODE (x2) == MOD && 1)
+ goto L704;
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L704:
+ x3 = XEXP (x2, 0);
+ if (rtx_equal_p (x3, ro[1]) && 1)
+ goto L705;
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L705:
+ x3 = XEXP (x2, 1);
+ if (rtx_equal_p (x3, ro[2]) && 1)
+ return 139;
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L721:
+ x3 = XEXP (x2, 0);
+ if (register_operand (x3, SImode))
+ {
+ ro[1] = x3;
+ goto L722;
+ }
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L722:
+ x3 = XEXP (x2, 1);
+ if (general_operand (x3, SImode))
+ {
+ ro[2] = x3;
+ goto L723;
+ }
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L723:
+ x1 = XVECEXP (x0, 0, 1);
+ if (GET_CODE (x1) == SET && 1)
+ goto L724;
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L724:
+ x2 = XEXP (x1, 0);
+ if (register_operand (x2, SImode))
+ {
+ ro[3] = x2;
+ goto L725;
+ }
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L725:
+ x2 = XEXP (x1, 1);
+ if (GET_MODE (x2) == SImode && GET_CODE (x2) == UMOD && 1)
+ goto L726;
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L726:
+ x3 = XEXP (x2, 0);
+ if (rtx_equal_p (x3, ro[1]) && 1)
+ goto L727;
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L727:
+ x3 = XEXP (x2, 1);
+ if (rtx_equal_p (x3, ro[2]) && 1)
+ return 141;
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L709:
+ x2 = XEXP (x1, 1);
+ if (GET_MODE (x2) != HImode)
+ {
+ x2 = XEXP (x1, 0);
+ goto L1358;
+ }
+ switch (GET_CODE (x2))
+ {
+ case DIV:
+ goto L710;
+ case UDIV:
+ goto L732;
+ }
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L710:
+ x3 = XEXP (x2, 0);
+ if (register_operand (x3, HImode))
+ {
+ ro[1] = x3;
+ goto L711;
+ }
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L711:
+ x3 = XEXP (x2, 1);
+ if (general_operand (x3, HImode))
+ {
+ ro[2] = x3;
+ goto L712;
+ }
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L712:
+ x1 = XVECEXP (x0, 0, 1);
+ if (GET_CODE (x1) == SET && 1)
+ goto L713;
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L713:
+ x2 = XEXP (x1, 0);
+ if (register_operand (x2, HImode))
+ {
+ ro[3] = x2;
+ goto L714;
+ }
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L714:
+ x2 = XEXP (x1, 1);
+ if (GET_MODE (x2) == HImode && GET_CODE (x2) == MOD && 1)
+ goto L715;
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L715:
+ x3 = XEXP (x2, 0);
+ if (rtx_equal_p (x3, ro[1]) && 1)
+ goto L716;
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L716:
+ x3 = XEXP (x2, 1);
+ if (rtx_equal_p (x3, ro[2]) && 1)
+ return 140;
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L732:
+ x3 = XEXP (x2, 0);
+ if (register_operand (x3, HImode))
+ {
+ ro[1] = x3;
+ goto L733;
+ }
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L733:
+ x3 = XEXP (x2, 1);
+ if (general_operand (x3, HImode))
+ {
+ ro[2] = x3;
+ goto L734;
+ }
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L734:
+ x1 = XVECEXP (x0, 0, 1);
+ if (GET_CODE (x1) == SET && 1)
+ goto L735;
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L735:
+ x2 = XEXP (x1, 0);
+ if (register_operand (x2, HImode))
+ {
+ ro[3] = x2;
+ goto L736;
+ }
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L736:
+ x2 = XEXP (x1, 1);
+ if (GET_MODE (x2) == HImode && GET_CODE (x2) == UMOD && 1)
+ goto L737;
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L737:
+ x3 = XEXP (x2, 0);
+ if (rtx_equal_p (x3, ro[1]) && 1)
+ goto L738;
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L738:
+ x3 = XEXP (x2, 1);
+ if (rtx_equal_p (x3, ro[2]) && 1)
+ return 142;
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L917:
+ x2 = XEXP (x1, 1);
+ if (GET_MODE (x2) != DImode)
+ {
+ x2 = XEXP (x1, 0);
+ goto L1358;
+ }
+ switch (GET_CODE (x2))
+ {
+ case ASHIFT:
+ goto L918;
+ case ASHIFTRT:
+ goto L946;
+ case LSHIFTRT:
+ goto L974;
+ }
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L918:
+ x3 = XEXP (x2, 0);
+ if (register_operand (x3, DImode))
+ {
+ ro[1] = x3;
+ goto L919;
+ }
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L919:
+ x3 = XEXP (x2, 1);
+ if (register_operand (x3, QImode))
+ {
+ ro[2] = x3;
+ goto L920;
+ }
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L920:
+ x1 = XVECEXP (x0, 0, 1);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L921;
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L921:
+ x2 = XEXP (x1, 0);
+ if (rtx_equal_p (x2, ro[2]) && 1)
+ return 183;
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L946:
+ x3 = XEXP (x2, 0);
+ if (register_operand (x3, DImode))
+ {
+ ro[1] = x3;
+ goto L947;
+ }
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L947:
+ x3 = XEXP (x2, 1);
+ if (register_operand (x3, QImode))
+ {
+ ro[2] = x3;
+ goto L948;
+ }
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L948:
+ x1 = XVECEXP (x0, 0, 1);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L949;
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L949:
+ x2 = XEXP (x1, 0);
+ if (rtx_equal_p (x2, ro[2]) && 1)
+ return 189;
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L974:
+ x3 = XEXP (x2, 0);
+ if (register_operand (x3, DImode))
+ {
+ ro[1] = x3;
+ goto L975;
+ }
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L975:
+ x3 = XEXP (x2, 1);
+ if (register_operand (x3, QImode))
+ {
+ ro[2] = x3;
+ goto L976;
+ }
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L976:
+ x1 = XVECEXP (x0, 0, 1);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L977;
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L977:
+ x2 = XEXP (x1, 0);
+ if (rtx_equal_p (x2, ro[2]) && 1)
+ return 195;
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L12:
+ x2 = XEXP (x1, 1);
+ switch (GET_MODE (x2))
+ {
+ case SFmode:
+ if (register_operand (x2, SFmode))
+ {
+ ro[0] = x2;
+ goto L13;
+ }
+ break;
+ case DFmode:
+ if (register_operand (x2, DFmode))
+ {
+ ro[0] = x2;
+ goto L22;
+ }
+ break;
+ case XFmode:
+ if (register_operand (x2, XFmode))
+ {
+ ro[0] = x2;
+ goto L31;
+ }
+ }
+ L54:
+ if (VOIDmode_compare_op (x2, VOIDmode))
+ {
+ ro[2] = x2;
+ goto L83;
+ }
+ L127:
+ if (GET_MODE (x2) == CCFPEQmode && GET_CODE (x2) == COMPARE && 1)
+ goto L128;
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L13:
+ x1 = XVECEXP (x0, 0, 1);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L14;
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 1);
+ goto L54;
+
+ L14:
+ x2 = XEXP (x1, 0);
+ if (scratch_operand (x2, HImode))
+ {
+ ro[1] = x2;
+ if (TARGET_80387 && ! TARGET_IEEE_FP)
+ return 6;
+ }
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 1);
+ goto L54;
+
+ L22:
+ x1 = XVECEXP (x0, 0, 1);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L23;
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 1);
+ goto L54;
+
+ L23:
+ x2 = XEXP (x1, 0);
+ if (scratch_operand (x2, HImode))
+ {
+ ro[1] = x2;
+ if (TARGET_80387 && ! TARGET_IEEE_FP)
+ return 8;
+ }
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 1);
+ goto L54;
+
+ L31:
+ x1 = XVECEXP (x0, 0, 1);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L32;
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 1);
+ goto L54;
+
+ L32:
+ x2 = XEXP (x1, 0);
+ if (scratch_operand (x2, HImode))
+ {
+ ro[1] = x2;
+ if (TARGET_80387 && ! TARGET_IEEE_FP)
+ return 10;
+ }
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 1);
+ goto L54;
+ L83:
+ tem = recog_5 (x0, insn, pnum_clobbers);
+ if (tem >= 0) return tem;
+ goto L127;
+
+ L128:
+ x3 = XEXP (x2, 0);
+ switch (GET_MODE (x3))
+ {
+ case XFmode:
+ if (register_operand (x3, XFmode))
+ {
+ ro[0] = x3;
+ goto L129;
+ }
+ break;
+ case DFmode:
+ if (register_operand (x3, DFmode))
+ {
+ ro[0] = x3;
+ goto L215;
+ }
+ break;
+ case SFmode:
+ if (register_operand (x3, SFmode))
+ {
+ ro[0] = x3;
+ goto L271;
+ }
+ }
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L129:
+ x3 = XEXP (x2, 1);
+ if (register_operand (x3, XFmode))
+ {
+ ro[1] = x3;
+ goto L130;
+ }
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L130:
+ x1 = XVECEXP (x0, 0, 1);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L131;
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L131:
+ x2 = XEXP (x1, 0);
+ if (scratch_operand (x2, HImode))
+ {
+ ro[2] = x2;
+ if (TARGET_80387)
+ return 23;
+ }
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L215:
+ x3 = XEXP (x2, 1);
+ if (register_operand (x3, DFmode))
+ {
+ ro[1] = x3;
+ goto L216;
+ }
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L216:
+ x1 = XVECEXP (x0, 0, 1);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L217;
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L217:
+ x2 = XEXP (x1, 0);
+ if (scratch_operand (x2, HImode))
+ {
+ ro[2] = x2;
+ if (TARGET_80387)
+ return 29;
+ }
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L271:
+ x3 = XEXP (x2, 1);
+ if (register_operand (x3, SFmode))
+ {
+ ro[1] = x3;
+ goto L272;
+ }
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L272:
+ x1 = XVECEXP (x0, 0, 1);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L273;
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L273:
+ x2 = XEXP (x1, 0);
+ if (scratch_operand (x2, HImode))
+ {
+ ro[2] = x2;
+ if (TARGET_80387)
+ return 33;
+ }
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L1301:
+ x2 = XEXP (x1, 1);
+ if (GET_MODE (x2) == SImode && GET_CODE (x2) == MINUS && 1)
+ goto L1302;
+ if (general_operand (x2, SImode))
+ {
+ ro[0] = x2;
+ goto L1327;
+ }
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L1302:
+ x3 = XEXP (x2, 0);
+ if (GET_MODE (x3) == SImode && GET_CODE (x3) == REG && XINT (x3, 0) == 3 && 1)
+ goto L1303;
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L1303:
+ x3 = XEXP (x2, 1);
+ if (GET_MODE (x3) == SImode && GET_CODE (x3) == MEM && 1)
+ goto L1304;
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L1304:
+ x4 = XEXP (x3, 0);
+ if (GET_MODE (x4) == SImode && GET_CODE (x4) == PLUS && 1)
+ goto L1305;
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L1305:
+ x5 = XEXP (x4, 0);
+ if (GET_MODE (x5) == SImode && GET_CODE (x5) == MULT && 1)
+ goto L1306;
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L1306:
+ x6 = XEXP (x5, 0);
+ if (register_operand (x6, SImode))
+ {
+ ro[0] = x6;
+ goto L1307;
+ }
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L1307:
+ x6 = XEXP (x5, 1);
+ if (GET_CODE (x6) == CONST_INT && XWINT (x6, 0) == 4 && 1)
+ goto L1308;
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L1308:
+ x5 = XEXP (x4, 1);
+ if (GET_CODE (x5) == LABEL_REF && 1)
+ goto L1309;
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L1309:
+ x6 = XEXP (x5, 0);
+ ro[1] = x6;
+ goto L1310;
+
+ L1310:
+ x1 = XVECEXP (x0, 0, 1);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L1311;
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L1311:
+ x2 = XEXP (x1, 0);
+ if (scratch_operand (x2, SImode))
+ {
+ ro[2] = x2;
+ return 264;
+ }
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L1327:
+ x1 = XVECEXP (x0, 0, 1);
+ if (GET_CODE (x1) == USE && 1)
+ goto L1328;
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L1328:
+ x2 = XEXP (x1, 0);
+ if (GET_CODE (x2) == LABEL_REF && 1)
+ goto L1329;
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 0);
+ goto L1358;
+
+ L1329:
+ x3 = XEXP (x2, 0);
+ ro[1] = x3;
+ return 265;
+
+ L1359:
+ x2 = XEXP (x1, 1);
+ if (GET_CODE (x2) == CALL && 1)
+ goto L1371;
+ x2 = XEXP (x1, 0);
+ goto L1548;
+
+ L1371:
+ x3 = XEXP (x2, 0);
+ if (GET_MODE (x3) == QImode && GET_CODE (x3) == MEM && 1)
+ goto L1372;
+ L1360:
+ if (call_insn_operand (x3, QImode))
+ {
+ ro[1] = x3;
+ goto L1361;
+ }
+ x2 = XEXP (x1, 0);
+ goto L1548;
+
+ L1372:
+ x4 = XEXP (x3, 0);
+ if (symbolic_operand (x4, SImode))
+ {
+ ro[1] = x4;
+ goto L1373;
+ }
+ goto L1360;
+
+ L1373:
+ x3 = XEXP (x2, 1);
+ if (general_operand (x3, SImode))
+ {
+ ro[2] = x3;
+ goto L1374;
+ }
+ x3 = XEXP (x2, 0);
+ goto L1360;
+
+ L1374:
+ x1 = XVECEXP (x0, 0, 1);
+ if (GET_CODE (x1) == SET && 1)
+ goto L1375;
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 1);
+ x3 = XEXP (x2, 0);
+ goto L1360;
+
+ L1375:
+ x2 = XEXP (x1, 0);
+ if (GET_MODE (x2) == SImode && GET_CODE (x2) == REG && XINT (x2, 0) == 7 && 1)
+ goto L1376;
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 1);
+ x3 = XEXP (x2, 0);
+ goto L1360;
+
+ L1376:
+ x2 = XEXP (x1, 1);
+ if (GET_MODE (x2) == SImode && GET_CODE (x2) == PLUS && 1)
+ goto L1377;
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 1);
+ x3 = XEXP (x2, 0);
+ goto L1360;
+
+ L1377:
+ x3 = XEXP (x2, 0);
+ if (GET_MODE (x3) == SImode && GET_CODE (x3) == REG && XINT (x3, 0) == 7 && 1)
+ goto L1378;
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 1);
+ x3 = XEXP (x2, 0);
+ goto L1360;
+
+ L1378:
+ x3 = XEXP (x2, 1);
+ if (immediate_operand (x3, SImode))
+ {
+ ro[4] = x3;
+ if (!HALF_PIC_P ())
+ return 274;
+ }
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 1);
+ x3 = XEXP (x2, 0);
+ goto L1360;
+
+ L1361:
+ x3 = XEXP (x2, 1);
+ if (general_operand (x3, SImode))
+ {
+ ro[2] = x3;
+ goto L1362;
+ }
+ x2 = XEXP (x1, 0);
+ goto L1548;
+
+ L1362:
+ x1 = XVECEXP (x0, 0, 1);
+ if (GET_CODE (x1) == SET && 1)
+ goto L1363;
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 0);
+ goto L1548;
+
+ L1363:
+ x2 = XEXP (x1, 0);
+ if (GET_MODE (x2) == SImode && GET_CODE (x2) == REG && XINT (x2, 0) == 7 && 1)
+ goto L1364;
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 0);
+ goto L1548;
+
+ L1364:
+ x2 = XEXP (x1, 1);
+ if (GET_MODE (x2) == SImode && GET_CODE (x2) == PLUS && 1)
+ goto L1365;
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 0);
+ goto L1548;
+
+ L1365:
+ x3 = XEXP (x2, 0);
+ if (GET_MODE (x3) == SImode && GET_CODE (x3) == REG && XINT (x3, 0) == 7 && 1)
+ goto L1366;
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 0);
+ goto L1548;
+
+ L1366:
+ x3 = XEXP (x2, 1);
+ if (immediate_operand (x3, SImode))
+ {
+ ro[4] = x3;
+ return 273;
+ }
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 0);
+ goto L1548;
+
+ L1549:
+ x2 = XEXP (x1, 1);
+ if (GET_MODE (x2) == SImode && GET_CODE (x2) == UNSPEC && XINT (x2, 1) == 0 && XVECLEN (x2, 0) == 3 && 1)
+ goto L1550;
+ goto ret0;
+
+ L1550:
+ x3 = XVECEXP (x2, 0, 0);
+ if (GET_MODE (x3) == BLKmode && GET_CODE (x3) == MEM && 1)
+ goto L1551;
+ goto ret0;
+
+ L1551:
+ x4 = XEXP (x3, 0);
+ if (address_operand (x4, SImode))
+ {
+ ro[1] = x4;
+ goto L1552;
+ }
+ goto ret0;
+
+ L1552:
+ x3 = XVECEXP (x2, 0, 1);
+ if (register_operand (x3, QImode))
+ {
+ ro[2] = x3;
+ goto L1553;
+ }
+ goto ret0;
+
+ L1553:
+ x3 = XVECEXP (x2, 0, 2);
+ if (immediate_operand (x3, SImode))
+ {
+ ro[3] = x3;
+ goto L1554;
+ }
+ goto ret0;
+
+ L1554:
+ x1 = XVECEXP (x0, 0, 1);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L1555;
+ goto ret0;
+
+ L1555:
+ x2 = XEXP (x1, 0);
+ if (rtx_equal_p (x2, ro[1]) && 1)
+ return 308;
+ goto ret0;
+ ret0: return -1;
+}
+
+int
+recog_7 (x0, insn, pnum_clobbers)
+ register rtx x0;
+ rtx insn;
+ int *pnum_clobbers;
+{
+ register rtx *ro = &recog_operand[0];
+ register rtx x1, x2, x3, x4, x5, x6;
+ int tem;
+
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 0);
+ switch (GET_MODE (x2))
+ {
+ case DImode:
+ if (general_operand (x2, DImode))
+ {
+ ro[0] = x2;
+ goto L439;
+ }
+ break;
+ case SImode:
+ if (general_operand (x2, SImode))
+ {
+ ro[0] = x2;
+ goto L503;
+ }
+ }
+ goto ret0;
+
+ L439:
+ x2 = XEXP (x1, 1);
+ if (GET_MODE (x2) == DImode && GET_CODE (x2) == FIX && 1)
+ goto L440;
+ goto ret0;
+
+ L440:
+ x3 = XEXP (x2, 0);
+ if (GET_CODE (x3) != FIX)
+ goto ret0;
+ switch (GET_MODE (x3))
+ {
+ case XFmode:
+ goto L441;
+ case DFmode:
+ goto L467;
+ case SFmode:
+ goto L493;
+ }
+ goto ret0;
+
+ L441:
+ x4 = XEXP (x3, 0);
+ if (register_operand (x4, XFmode))
+ {
+ ro[1] = x4;
+ goto L442;
+ }
+ goto ret0;
+
+ L442:
+ x1 = XVECEXP (x0, 0, 1);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L443;
+ goto ret0;
+
+ L443:
+ x2 = XEXP (x1, 0);
+ if (rtx_equal_p (x2, ro[1]) && 1)
+ goto L444;
+ goto ret0;
+
+ L444:
+ x1 = XVECEXP (x0, 0, 2);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L445;
+ goto ret0;
+
+ L445:
+ x2 = XEXP (x1, 0);
+ if (memory_operand (x2, SImode))
+ {
+ ro[2] = x2;
+ goto L446;
+ }
+ goto ret0;
+
+ L446:
+ x1 = XVECEXP (x0, 0, 3);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L447;
+ goto ret0;
+
+ L447:
+ x2 = XEXP (x1, 0);
+ if (pnum_clobbers != 0 && memory_operand (x2, SImode))
+ {
+ ro[3] = x2;
+ if (TARGET_80387)
+ {
+ *pnum_clobbers = 1;
+ return 87;
+ }
+ }
+ goto ret0;
+
+ L467:
+ x4 = XEXP (x3, 0);
+ if (register_operand (x4, DFmode))
+ {
+ ro[1] = x4;
+ goto L468;
+ }
+ goto ret0;
+
+ L468:
+ x1 = XVECEXP (x0, 0, 1);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L469;
+ goto ret0;
+
+ L469:
+ x2 = XEXP (x1, 0);
+ if (rtx_equal_p (x2, ro[1]) && 1)
+ goto L470;
+ goto ret0;
+
+ L470:
+ x1 = XVECEXP (x0, 0, 2);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L471;
+ goto ret0;
+
+ L471:
+ x2 = XEXP (x1, 0);
+ if (memory_operand (x2, SImode))
+ {
+ ro[2] = x2;
+ goto L472;
+ }
+ goto ret0;
+
+ L472:
+ x1 = XVECEXP (x0, 0, 3);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L473;
+ goto ret0;
+
+ L473:
+ x2 = XEXP (x1, 0);
+ if (pnum_clobbers != 0 && memory_operand (x2, SImode))
+ {
+ ro[3] = x2;
+ if (TARGET_80387)
+ {
+ *pnum_clobbers = 1;
+ return 88;
+ }
+ }
+ goto ret0;
+
+ L493:
+ x4 = XEXP (x3, 0);
+ if (register_operand (x4, SFmode))
+ {
+ ro[1] = x4;
+ goto L494;
+ }
+ goto ret0;
+
+ L494:
+ x1 = XVECEXP (x0, 0, 1);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L495;
+ goto ret0;
+
+ L495:
+ x2 = XEXP (x1, 0);
+ if (rtx_equal_p (x2, ro[1]) && 1)
+ goto L496;
+ goto ret0;
+
+ L496:
+ x1 = XVECEXP (x0, 0, 2);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L497;
+ goto ret0;
+
+ L497:
+ x2 = XEXP (x1, 0);
+ if (memory_operand (x2, SImode))
+ {
+ ro[2] = x2;
+ goto L498;
+ }
+ goto ret0;
+
+ L498:
+ x1 = XVECEXP (x0, 0, 3);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L499;
+ goto ret0;
+
+ L499:
+ x2 = XEXP (x1, 0);
+ if (pnum_clobbers != 0 && memory_operand (x2, SImode))
+ {
+ ro[3] = x2;
+ if (TARGET_80387)
+ {
+ *pnum_clobbers = 1;
+ return 89;
+ }
+ }
+ goto ret0;
+
+ L503:
+ x2 = XEXP (x1, 1);
+ if (GET_MODE (x2) == SImode && GET_CODE (x2) == FIX && 1)
+ goto L504;
+ goto ret0;
+
+ L504:
+ x3 = XEXP (x2, 0);
+ if (GET_CODE (x3) != FIX)
+ goto ret0;
+ switch (GET_MODE (x3))
+ {
+ case XFmode:
+ goto L505;
+ case DFmode:
+ goto L527;
+ case SFmode:
+ goto L549;
+ }
+ goto ret0;
+
+ L505:
+ x4 = XEXP (x3, 0);
+ if (register_operand (x4, XFmode))
+ {
+ ro[1] = x4;
+ goto L506;
+ }
+ goto ret0;
+
+ L506:
+ x1 = XVECEXP (x0, 0, 1);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L507;
+ goto ret0;
+
+ L507:
+ x2 = XEXP (x1, 0);
+ if (memory_operand (x2, SImode))
+ {
+ ro[2] = x2;
+ goto L508;
+ }
+ goto ret0;
+
+ L508:
+ x1 = XVECEXP (x0, 0, 2);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L509;
+ goto ret0;
+
+ L509:
+ x2 = XEXP (x1, 0);
+ if (memory_operand (x2, SImode))
+ {
+ ro[3] = x2;
+ goto L510;
+ }
+ goto ret0;
+
+ L510:
+ x1 = XVECEXP (x0, 0, 3);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L511;
+ goto ret0;
+
+ L511:
+ x2 = XEXP (x1, 0);
+ if (scratch_operand (x2, SImode))
+ {
+ ro[4] = x2;
+ if (TARGET_80387)
+ return 93;
+ }
+ goto ret0;
+
+ L527:
+ x4 = XEXP (x3, 0);
+ if (register_operand (x4, DFmode))
+ {
+ ro[1] = x4;
+ goto L528;
+ }
+ goto ret0;
+
+ L528:
+ x1 = XVECEXP (x0, 0, 1);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L529;
+ goto ret0;
+
+ L529:
+ x2 = XEXP (x1, 0);
+ if (memory_operand (x2, SImode))
+ {
+ ro[2] = x2;
+ goto L530;
+ }
+ goto ret0;
+
+ L530:
+ x1 = XVECEXP (x0, 0, 2);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L531;
+ goto ret0;
+
+ L531:
+ x2 = XEXP (x1, 0);
+ if (memory_operand (x2, SImode))
+ {
+ ro[3] = x2;
+ goto L532;
+ }
+ goto ret0;
+
+ L532:
+ x1 = XVECEXP (x0, 0, 3);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L533;
+ goto ret0;
+
+ L533:
+ x2 = XEXP (x1, 0);
+ if (scratch_operand (x2, SImode))
+ {
+ ro[4] = x2;
+ if (TARGET_80387)
+ return 94;
+ }
+ goto ret0;
+
+ L549:
+ x4 = XEXP (x3, 0);
+ if (register_operand (x4, SFmode))
+ {
+ ro[1] = x4;
+ goto L550;
+ }
+ goto ret0;
+
+ L550:
+ x1 = XVECEXP (x0, 0, 1);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L551;
+ goto ret0;
+
+ L551:
+ x2 = XEXP (x1, 0);
+ if (memory_operand (x2, SImode))
+ {
+ ro[2] = x2;
+ goto L552;
+ }
+ goto ret0;
+
+ L552:
+ x1 = XVECEXP (x0, 0, 2);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L553;
+ goto ret0;
+
+ L553:
+ x2 = XEXP (x1, 0);
+ if (memory_operand (x2, SImode))
+ {
+ ro[3] = x2;
+ goto L554;
+ }
+ goto ret0;
+
+ L554:
+ x1 = XVECEXP (x0, 0, 3);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L555;
+ goto ret0;
+
+ L555:
+ x2 = XEXP (x1, 0);
+ if (scratch_operand (x2, SImode))
+ {
+ ro[4] = x2;
+ if (TARGET_80387)
+ return 95;
+ }
+ goto ret0;
+ ret0: return -1;
+}
+
+int
+recog (x0, insn, pnum_clobbers)
+ register rtx x0;
+ rtx insn;
+ int *pnum_clobbers;
+{
+ register rtx *ro = &recog_operand[0];
+ register rtx x1, x2, x3, x4, x5, x6;
+ int tem;
+
+ L1403:
+ switch (GET_CODE (x0))
+ {
+ case UNSPEC:
+ if (GET_MODE (x0) == SImode && XINT (x0, 1) == 0 && XVECLEN (x0, 0) == 1 && 1)
+ goto L1404;
+ break;
+ case SET:
+ goto L295;
+ case PARALLEL:
+ if (XVECLEN (x0, 0) == 2 && 1)
+ goto L10;
+ if (XVECLEN (x0, 0) == 5 && 1)
+ goto L423;
+ if (XVECLEN (x0, 0) == 4 && 1)
+ goto L437;
+ if (XVECLEN (x0, 0) == 3 && 1)
+ goto L513;
+ if (XVECLEN (x0, 0) == 6 && 1)
+ goto L1408;
+ break;
+ case CALL:
+ goto L1350;
+ case RETURN:
+ if (simple_386_epilogue ())
+ return 283;
+ break;
+ case CONST_INT:
+ if (XWINT (x0, 0) == 0 && 1)
+ return 284;
+ }
+ goto ret0;
+
+ L1404:
+ x1 = XVECEXP (x0, 0, 0);
+ if (memory_operand (x1, SImode))
+ {
+ ro[0] = x1;
+ return 282;
+ }
+ goto ret0;
+ L295:
+ return recog_4 (x0, insn, pnum_clobbers);
+
+ L10:
+ x1 = XVECEXP (x0, 0, 0);
+ switch (GET_CODE (x1))
+ {
+ case SET:
+ goto L336;
+ case CALL:
+ goto L1341;
+ }
+ goto ret0;
+ L336:
+ return recog_6 (x0, insn, pnum_clobbers);
+
+ L1341:
+ x2 = XEXP (x1, 0);
+ if (GET_MODE (x2) == QImode && GET_CODE (x2) == MEM && 1)
+ goto L1342;
+ L1332:
+ if (call_insn_operand (x2, QImode))
+ {
+ ro[0] = x2;
+ goto L1333;
+ }
+ goto ret0;
+
+ L1342:
+ x3 = XEXP (x2, 0);
+ if (symbolic_operand (x3, SImode))
+ {
+ ro[0] = x3;
+ goto L1343;
+ }
+ goto L1332;
+
+ L1343:
+ x2 = XEXP (x1, 1);
+ if (general_operand (x2, SImode))
+ {
+ ro[1] = x2;
+ goto L1344;
+ }
+ x2 = XEXP (x1, 0);
+ goto L1332;
+
+ L1344:
+ x1 = XVECEXP (x0, 0, 1);
+ if (GET_CODE (x1) == SET && 1)
+ goto L1345;
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 0);
+ goto L1332;
+
+ L1345:
+ x2 = XEXP (x1, 0);
+ if (GET_MODE (x2) == SImode && GET_CODE (x2) == REG && XINT (x2, 0) == 7 && 1)
+ goto L1346;
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 0);
+ goto L1332;
+
+ L1346:
+ x2 = XEXP (x1, 1);
+ if (GET_MODE (x2) == SImode && GET_CODE (x2) == PLUS && 1)
+ goto L1347;
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 0);
+ goto L1332;
+
+ L1347:
+ x3 = XEXP (x2, 0);
+ if (GET_MODE (x3) == SImode && GET_CODE (x3) == REG && XINT (x3, 0) == 7 && 1)
+ goto L1348;
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 0);
+ goto L1332;
+
+ L1348:
+ x3 = XEXP (x2, 1);
+ if (immediate_operand (x3, SImode))
+ {
+ ro[3] = x3;
+ if (!HALF_PIC_P ())
+ return 268;
+ }
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 0);
+ goto L1332;
+
+ L1333:
+ x2 = XEXP (x1, 1);
+ if (general_operand (x2, SImode))
+ {
+ ro[1] = x2;
+ goto L1334;
+ }
+ goto ret0;
+
+ L1334:
+ x1 = XVECEXP (x0, 0, 1);
+ if (GET_CODE (x1) == SET && 1)
+ goto L1335;
+ goto ret0;
+
+ L1335:
+ x2 = XEXP (x1, 0);
+ if (GET_MODE (x2) == SImode && GET_CODE (x2) == REG && XINT (x2, 0) == 7 && 1)
+ goto L1336;
+ goto ret0;
+
+ L1336:
+ x2 = XEXP (x1, 1);
+ if (GET_MODE (x2) == SImode && GET_CODE (x2) == PLUS && 1)
+ goto L1337;
+ goto ret0;
+
+ L1337:
+ x3 = XEXP (x2, 0);
+ if (GET_MODE (x3) == SImode && GET_CODE (x3) == REG && XINT (x3, 0) == 7 && 1)
+ goto L1338;
+ goto ret0;
+
+ L1338:
+ x3 = XEXP (x2, 1);
+ if (immediate_operand (x3, SImode))
+ {
+ ro[3] = x3;
+ return 267;
+ }
+ goto ret0;
+
+ L423:
+ x1 = XVECEXP (x0, 0, 0);
+ if (GET_CODE (x1) == SET && 1)
+ goto L424;
+ goto ret0;
+
+ L424:
+ x2 = XEXP (x1, 0);
+ if (GET_MODE (x2) == DImode && general_operand (x2, DImode))
+ {
+ ro[0] = x2;
+ goto L425;
+ }
+ goto ret0;
+
+ L425:
+ x2 = XEXP (x1, 1);
+ if (GET_MODE (x2) == DImode && GET_CODE (x2) == FIX && 1)
+ goto L426;
+ goto ret0;
+
+ L426:
+ x3 = XEXP (x2, 0);
+ if (GET_CODE (x3) != FIX)
+ goto ret0;
+ switch (GET_MODE (x3))
+ {
+ case XFmode:
+ goto L427;
+ case DFmode:
+ goto L453;
+ case SFmode:
+ goto L479;
+ }
+ goto ret0;
+
+ L427:
+ x4 = XEXP (x3, 0);
+ if (register_operand (x4, XFmode))
+ {
+ ro[1] = x4;
+ goto L428;
+ }
+ goto ret0;
+
+ L428:
+ x1 = XVECEXP (x0, 0, 1);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L429;
+ goto ret0;
+
+ L429:
+ x2 = XEXP (x1, 0);
+ if (rtx_equal_p (x2, ro[1]) && 1)
+ goto L430;
+ goto ret0;
+
+ L430:
+ x1 = XVECEXP (x0, 0, 2);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L431;
+ goto ret0;
+
+ L431:
+ x2 = XEXP (x1, 0);
+ if (memory_operand (x2, SImode))
+ {
+ ro[2] = x2;
+ goto L432;
+ }
+ goto ret0;
+
+ L432:
+ x1 = XVECEXP (x0, 0, 3);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L433;
+ goto ret0;
+
+ L433:
+ x2 = XEXP (x1, 0);
+ if (memory_operand (x2, SImode))
+ {
+ ro[3] = x2;
+ goto L434;
+ }
+ goto ret0;
+
+ L434:
+ x1 = XVECEXP (x0, 0, 4);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L435;
+ goto ret0;
+
+ L435:
+ x2 = XEXP (x1, 0);
+ if (scratch_operand (x2, SImode))
+ {
+ ro[4] = x2;
+ if (TARGET_80387)
+ return 87;
+ }
+ goto ret0;
+
+ L453:
+ x4 = XEXP (x3, 0);
+ if (register_operand (x4, DFmode))
+ {
+ ro[1] = x4;
+ goto L454;
+ }
+ goto ret0;
+
+ L454:
+ x1 = XVECEXP (x0, 0, 1);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L455;
+ goto ret0;
+
+ L455:
+ x2 = XEXP (x1, 0);
+ if (rtx_equal_p (x2, ro[1]) && 1)
+ goto L456;
+ goto ret0;
+
+ L456:
+ x1 = XVECEXP (x0, 0, 2);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L457;
+ goto ret0;
+
+ L457:
+ x2 = XEXP (x1, 0);
+ if (memory_operand (x2, SImode))
+ {
+ ro[2] = x2;
+ goto L458;
+ }
+ goto ret0;
+
+ L458:
+ x1 = XVECEXP (x0, 0, 3);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L459;
+ goto ret0;
+
+ L459:
+ x2 = XEXP (x1, 0);
+ if (memory_operand (x2, SImode))
+ {
+ ro[3] = x2;
+ goto L460;
+ }
+ goto ret0;
+
+ L460:
+ x1 = XVECEXP (x0, 0, 4);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L461;
+ goto ret0;
+
+ L461:
+ x2 = XEXP (x1, 0);
+ if (scratch_operand (x2, SImode))
+ {
+ ro[4] = x2;
+ if (TARGET_80387)
+ return 88;
+ }
+ goto ret0;
+
+ L479:
+ x4 = XEXP (x3, 0);
+ if (register_operand (x4, SFmode))
+ {
+ ro[1] = x4;
+ goto L480;
+ }
+ goto ret0;
+
+ L480:
+ x1 = XVECEXP (x0, 0, 1);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L481;
+ goto ret0;
+
+ L481:
+ x2 = XEXP (x1, 0);
+ if (rtx_equal_p (x2, ro[1]) && 1)
+ goto L482;
+ goto ret0;
+
+ L482:
+ x1 = XVECEXP (x0, 0, 2);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L483;
+ goto ret0;
+
+ L483:
+ x2 = XEXP (x1, 0);
+ if (memory_operand (x2, SImode))
+ {
+ ro[2] = x2;
+ goto L484;
+ }
+ goto ret0;
+
+ L484:
+ x1 = XVECEXP (x0, 0, 3);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L485;
+ goto ret0;
+
+ L485:
+ x2 = XEXP (x1, 0);
+ if (memory_operand (x2, SImode))
+ {
+ ro[3] = x2;
+ goto L486;
+ }
+ goto ret0;
+
+ L486:
+ x1 = XVECEXP (x0, 0, 4);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L487;
+ goto ret0;
+
+ L487:
+ x2 = XEXP (x1, 0);
+ if (scratch_operand (x2, SImode))
+ {
+ ro[4] = x2;
+ if (TARGET_80387)
+ return 89;
+ }
+ goto ret0;
+
+ L437:
+ x1 = XVECEXP (x0, 0, 0);
+ if (GET_CODE (x1) == SET && 1)
+ goto L438;
+ goto ret0;
+ L438:
+ return recog_7 (x0, insn, pnum_clobbers);
+
+ L513:
+ x1 = XVECEXP (x0, 0, 0);
+ switch (GET_CODE (x1))
+ {
+ case SET:
+ goto L514;
+ case CALL:
+ goto L1398;
+ }
+ goto ret0;
+
+ L514:
+ x2 = XEXP (x1, 0);
+ if (GET_MODE (x2) == SImode && general_operand (x2, SImode))
+ {
+ ro[0] = x2;
+ goto L515;
+ }
+ goto ret0;
+
+ L515:
+ x2 = XEXP (x1, 1);
+ if (GET_MODE (x2) == SImode && GET_CODE (x2) == FIX && 1)
+ goto L516;
+ goto ret0;
+
+ L516:
+ x3 = XEXP (x2, 0);
+ if (GET_CODE (x3) != FIX)
+ goto ret0;
+ switch (GET_MODE (x3))
+ {
+ case XFmode:
+ goto L517;
+ case DFmode:
+ goto L539;
+ case SFmode:
+ goto L561;
+ }
+ goto ret0;
+
+ L517:
+ x4 = XEXP (x3, 0);
+ if (register_operand (x4, XFmode))
+ {
+ ro[1] = x4;
+ goto L518;
+ }
+ goto ret0;
+
+ L518:
+ x1 = XVECEXP (x0, 0, 1);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L519;
+ goto ret0;
+
+ L519:
+ x2 = XEXP (x1, 0);
+ if (memory_operand (x2, SImode))
+ {
+ ro[2] = x2;
+ goto L520;
+ }
+ goto ret0;
+
+ L520:
+ x1 = XVECEXP (x0, 0, 2);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L521;
+ goto ret0;
+
+ L521:
+ x2 = XEXP (x1, 0);
+ if (pnum_clobbers != 0 && memory_operand (x2, SImode))
+ {
+ ro[3] = x2;
+ if (TARGET_80387)
+ {
+ *pnum_clobbers = 1;
+ return 93;
+ }
+ }
+ goto ret0;
+
+ L539:
+ x4 = XEXP (x3, 0);
+ if (register_operand (x4, DFmode))
+ {
+ ro[1] = x4;
+ goto L540;
+ }
+ goto ret0;
+
+ L540:
+ x1 = XVECEXP (x0, 0, 1);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L541;
+ goto ret0;
+
+ L541:
+ x2 = XEXP (x1, 0);
+ if (memory_operand (x2, SImode))
+ {
+ ro[2] = x2;
+ goto L542;
+ }
+ goto ret0;
+
+ L542:
+ x1 = XVECEXP (x0, 0, 2);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L543;
+ goto ret0;
+
+ L543:
+ x2 = XEXP (x1, 0);
+ if (pnum_clobbers != 0 && memory_operand (x2, SImode))
+ {
+ ro[3] = x2;
+ if (TARGET_80387)
+ {
+ *pnum_clobbers = 1;
+ return 94;
+ }
+ }
+ goto ret0;
+
+ L561:
+ x4 = XEXP (x3, 0);
+ if (register_operand (x4, SFmode))
+ {
+ ro[1] = x4;
+ goto L562;
+ }
+ goto ret0;
+
+ L562:
+ x1 = XVECEXP (x0, 0, 1);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L563;
+ goto ret0;
+
+ L563:
+ x2 = XEXP (x1, 0);
+ if (memory_operand (x2, SImode))
+ {
+ ro[2] = x2;
+ goto L564;
+ }
+ goto ret0;
+
+ L564:
+ x1 = XVECEXP (x0, 0, 2);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L565;
+ goto ret0;
+
+ L565:
+ x2 = XEXP (x1, 0);
+ if (pnum_clobbers != 0 && memory_operand (x2, SImode))
+ {
+ ro[3] = x2;
+ if (TARGET_80387)
+ {
+ *pnum_clobbers = 1;
+ return 95;
+ }
+ }
+ goto ret0;
+
+ L1398:
+ x2 = XEXP (x1, 0);
+ if (GET_MODE (x2) == QImode && GET_CODE (x2) == MEM && 1)
+ goto L1399;
+ L1392:
+ if (call_insn_operand (x2, QImode))
+ {
+ ro[0] = x2;
+ goto L1393;
+ }
+ goto ret0;
+
+ L1399:
+ x3 = XEXP (x2, 0);
+ if (symbolic_operand (x3, SImode))
+ {
+ ro[0] = x3;
+ goto L1400;
+ }
+ goto L1392;
+
+ L1400:
+ x2 = XEXP (x1, 1);
+ if (GET_CODE (x2) == CONST_INT && XWINT (x2, 0) == 0 && 1)
+ goto L1401;
+ x2 = XEXP (x1, 0);
+ goto L1392;
+
+ L1401:
+ x1 = XVECEXP (x0, 0, 1);
+ if (memory_operand (x1, DImode))
+ {
+ ro[1] = x1;
+ goto L1402;
+ }
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 0);
+ goto L1392;
+
+ L1402:
+ x1 = XVECEXP (x0, 0, 2);
+ ro[2] = x1;
+ if (!HALF_PIC_P ())
+ return 280;
+ x1 = XVECEXP (x0, 0, 0);
+ x2 = XEXP (x1, 0);
+ goto L1392;
+
+ L1393:
+ x2 = XEXP (x1, 1);
+ if (GET_CODE (x2) == CONST_INT && XWINT (x2, 0) == 0 && 1)
+ goto L1394;
+ goto ret0;
+
+ L1394:
+ x1 = XVECEXP (x0, 0, 1);
+ if (memory_operand (x1, DImode))
+ {
+ ro[1] = x1;
+ goto L1395;
+ }
+ goto ret0;
+
+ L1395:
+ x1 = XVECEXP (x0, 0, 2);
+ ro[2] = x1;
+ return 279;
+
+ L1408:
+ x1 = XVECEXP (x0, 0, 0);
+ if (GET_CODE (x1) == SET && 1)
+ goto L1409;
+ goto ret0;
+
+ L1409:
+ x2 = XEXP (x1, 0);
+ switch (GET_MODE (x2))
+ {
+ case BLKmode:
+ if (GET_CODE (x2) == MEM && 1)
+ goto L1410;
+ break;
+ case SImode:
+ if (general_operand (x2, SImode))
+ {
+ ro[0] = x2;
+ goto L1426;
+ }
+ }
+ if (GET_CODE (x2) == CC0 && 1)
+ goto L1444;
+ goto ret0;
+
+ L1410:
+ x3 = XEXP (x2, 0);
+ if (address_operand (x3, SImode))
+ {
+ ro[0] = x3;
+ goto L1411;
+ }
+ goto ret0;
+
+ L1411:
+ x2 = XEXP (x1, 1);
+ if (GET_MODE (x2) == BLKmode && GET_CODE (x2) == MEM && 1)
+ goto L1412;
+ goto ret0;
+
+ L1412:
+ x3 = XEXP (x2, 0);
+ if (address_operand (x3, SImode))
+ {
+ ro[1] = x3;
+ goto L1413;
+ }
+ goto ret0;
+
+ L1413:
+ x1 = XVECEXP (x0, 0, 1);
+ if (GET_CODE (x1) == USE && 1)
+ goto L1414;
+ goto ret0;
+
+ L1414:
+ x2 = XEXP (x1, 0);
+ if (GET_CODE (x2) == CONST_INT && 1)
+ {
+ ro[2] = x2;
+ goto L1415;
+ }
+ goto ret0;
+
+ L1415:
+ x1 = XVECEXP (x0, 0, 2);
+ if (GET_CODE (x1) == USE && 1)
+ goto L1416;
+ goto ret0;
+
+ L1416:
+ x2 = XEXP (x1, 0);
+ if (immediate_operand (x2, SImode))
+ {
+ ro[3] = x2;
+ goto L1417;
+ }
+ goto ret0;
+
+ L1417:
+ x1 = XVECEXP (x0, 0, 3);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L1418;
+ goto ret0;
+
+ L1418:
+ x2 = XEXP (x1, 0);
+ if (scratch_operand (x2, SImode))
+ {
+ ro[4] = x2;
+ goto L1419;
+ }
+ goto ret0;
+
+ L1419:
+ x1 = XVECEXP (x0, 0, 4);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L1420;
+ goto ret0;
+
+ L1420:
+ x2 = XEXP (x1, 0);
+ if (rtx_equal_p (x2, ro[0]) && 1)
+ goto L1421;
+ goto ret0;
+
+ L1421:
+ x1 = XVECEXP (x0, 0, 5);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L1422;
+ goto ret0;
+
+ L1422:
+ x2 = XEXP (x1, 0);
+ if (rtx_equal_p (x2, ro[1]) && 1)
+ return 286;
+ goto ret0;
+
+ L1426:
+ x2 = XEXP (x1, 1);
+ if (GET_MODE (x2) == SImode && GET_CODE (x2) == COMPARE && 1)
+ goto L1427;
+ goto ret0;
+
+ L1427:
+ x3 = XEXP (x2, 0);
+ if (GET_MODE (x3) == BLKmode && GET_CODE (x3) == MEM && 1)
+ goto L1428;
+ goto ret0;
+
+ L1428:
+ x4 = XEXP (x3, 0);
+ if (address_operand (x4, SImode))
+ {
+ ro[1] = x4;
+ goto L1429;
+ }
+ goto ret0;
+
+ L1429:
+ x3 = XEXP (x2, 1);
+ if (GET_MODE (x3) == BLKmode && GET_CODE (x3) == MEM && 1)
+ goto L1430;
+ goto ret0;
+
+ L1430:
+ x4 = XEXP (x3, 0);
+ if (address_operand (x4, SImode))
+ {
+ ro[2] = x4;
+ goto L1431;
+ }
+ goto ret0;
+
+ L1431:
+ x1 = XVECEXP (x0, 0, 1);
+ if (GET_CODE (x1) == USE && 1)
+ goto L1432;
+ goto ret0;
+
+ L1432:
+ x2 = XEXP (x1, 0);
+ if (register_operand (x2, SImode))
+ {
+ ro[3] = x2;
+ goto L1433;
+ }
+ goto ret0;
+
+ L1433:
+ x1 = XVECEXP (x0, 0, 2);
+ if (GET_CODE (x1) == USE && 1)
+ goto L1434;
+ goto ret0;
+
+ L1434:
+ x2 = XEXP (x1, 0);
+ if (immediate_operand (x2, SImode))
+ {
+ ro[4] = x2;
+ goto L1435;
+ }
+ goto ret0;
+
+ L1435:
+ x1 = XVECEXP (x0, 0, 3);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L1436;
+ goto ret0;
+
+ L1436:
+ x2 = XEXP (x1, 0);
+ if (rtx_equal_p (x2, ro[1]) && 1)
+ goto L1437;
+ goto ret0;
+
+ L1437:
+ x1 = XVECEXP (x0, 0, 4);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L1438;
+ goto ret0;
+
+ L1438:
+ x2 = XEXP (x1, 0);
+ if (rtx_equal_p (x2, ro[2]) && 1)
+ goto L1439;
+ goto ret0;
+
+ L1439:
+ x1 = XVECEXP (x0, 0, 5);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L1440;
+ goto ret0;
+
+ L1440:
+ x2 = XEXP (x1, 0);
+ if (rtx_equal_p (x2, ro[3]) && 1)
+ return 288;
+ goto ret0;
+
+ L1444:
+ x2 = XEXP (x1, 1);
+ if (GET_MODE (x2) == SImode && GET_CODE (x2) == COMPARE && 1)
+ goto L1445;
+ goto ret0;
+
+ L1445:
+ x3 = XEXP (x2, 0);
+ if (GET_MODE (x3) == BLKmode && GET_CODE (x3) == MEM && 1)
+ goto L1446;
+ goto ret0;
+
+ L1446:
+ x4 = XEXP (x3, 0);
+ if (address_operand (x4, SImode))
+ {
+ ro[0] = x4;
+ goto L1447;
+ }
+ goto ret0;
+
+ L1447:
+ x3 = XEXP (x2, 1);
+ if (GET_MODE (x3) == BLKmode && GET_CODE (x3) == MEM && 1)
+ goto L1448;
+ goto ret0;
+
+ L1448:
+ x4 = XEXP (x3, 0);
+ if (address_operand (x4, SImode))
+ {
+ ro[1] = x4;
+ goto L1449;
+ }
+ goto ret0;
+
+ L1449:
+ x1 = XVECEXP (x0, 0, 1);
+ if (GET_CODE (x1) == USE && 1)
+ goto L1450;
+ goto ret0;
+
+ L1450:
+ x2 = XEXP (x1, 0);
+ if (register_operand (x2, SImode))
+ {
+ ro[2] = x2;
+ goto L1451;
+ }
+ goto ret0;
+
+ L1451:
+ x1 = XVECEXP (x0, 0, 2);
+ if (GET_CODE (x1) == USE && 1)
+ goto L1452;
+ goto ret0;
+
+ L1452:
+ x2 = XEXP (x1, 0);
+ if (immediate_operand (x2, SImode))
+ {
+ ro[3] = x2;
+ goto L1453;
+ }
+ goto ret0;
+
+ L1453:
+ x1 = XVECEXP (x0, 0, 3);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L1454;
+ goto ret0;
+
+ L1454:
+ x2 = XEXP (x1, 0);
+ if (rtx_equal_p (x2, ro[0]) && 1)
+ goto L1455;
+ goto ret0;
+
+ L1455:
+ x1 = XVECEXP (x0, 0, 4);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L1456;
+ goto ret0;
+
+ L1456:
+ x2 = XEXP (x1, 0);
+ if (rtx_equal_p (x2, ro[1]) && 1)
+ goto L1457;
+ goto ret0;
+
+ L1457:
+ x1 = XVECEXP (x0, 0, 5);
+ if (GET_CODE (x1) == CLOBBER && 1)
+ goto L1458;
+ goto ret0;
+
+ L1458:
+ x2 = XEXP (x1, 0);
+ if (rtx_equal_p (x2, ro[2]) && 1)
+ return 289;
+ goto ret0;
+
+ L1350:
+ x1 = XEXP (x0, 0);
+ if (call_insn_operand (x1, QImode))
+ {
+ ro[0] = x1;
+ goto L1351;
+ }
+ L1353:
+ if (GET_MODE (x1) == QImode && GET_CODE (x1) == MEM && 1)
+ goto L1354;
+ goto ret0;
+
+ L1351:
+ x1 = XEXP (x0, 1);
+ if (general_operand (x1, SImode))
+ {
+ ro[1] = x1;
+ return 270;
+ }
+ x1 = XEXP (x0, 0);
+ goto L1353;
+
+ L1354:
+ x2 = XEXP (x1, 0);
+ if (symbolic_operand (x2, SImode))
+ {
+ ro[0] = x2;
+ goto L1355;
+ }
+ goto ret0;
+
+ L1355:
+ x1 = XEXP (x0, 1);
+ if (general_operand (x1, SImode))
+ {
+ ro[1] = x1;
+ if (!HALF_PIC_P ())
+ return 271;
+ }
+ goto ret0;
+ ret0: return -1;
+}
+
+rtx
+split_insns (x0, insn)
+ register rtx x0;
+ rtx insn;
+{
+ register rtx *ro = &recog_operand[0];
+ register rtx x1, x2, x3, x4, x5, x6;
+ rtx tem;
+
+ goto ret0;
+ ret0: return 0;
+}
+
diff --git a/gnu/usr.bin/cc/cc_int/integrate.c b/gnu/usr.bin/cc/cc_int/integrate.c
new file mode 100644
index 0000000..63b150d
--- /dev/null
+++ b/gnu/usr.bin/cc/cc_int/integrate.c
@@ -0,0 +1,3035 @@
+/* Procedure integration for GNU CC.
+ Copyright (C) 1988, 1991, 1993, 1994 Free Software Foundation, Inc.
+ Contributed by Michael Tiemann (tiemann@cygnus.com)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+#include <stdio.h>
+
+#include "config.h"
+#include "rtl.h"
+#include "tree.h"
+#include "flags.h"
+#include "insn-config.h"
+#include "insn-flags.h"
+#include "expr.h"
+#include "output.h"
+#include "integrate.h"
+#include "real.h"
+#include "function.h"
+#include "bytecode.h"
+
+#include "obstack.h"
+#define obstack_chunk_alloc xmalloc
+#define obstack_chunk_free free
+
+extern struct obstack *function_maybepermanent_obstack;
+
+extern tree pushdecl ();
+extern tree poplevel ();
+
+/* Similar, but round to the next highest integer that meets the
+ alignment. */
+#define CEIL_ROUND(VALUE,ALIGN) (((VALUE) + (ALIGN) - 1) & ~((ALIGN)- 1))
+
+/* Default max number of insns a function can have and still be inline.
+ This is overridden on RISC machines. */
+#ifndef INTEGRATE_THRESHOLD
+#define INTEGRATE_THRESHOLD(DECL) \
+ (8 * (8 + list_length (DECL_ARGUMENTS (DECL))))
+#endif
+
+static rtx initialize_for_inline PROTO((tree, int, int, int, int));
+static void finish_inline PROTO((tree, rtx));
+static void adjust_copied_decl_tree PROTO((tree));
+static tree copy_decl_list PROTO((tree));
+static tree copy_decl_tree PROTO((tree));
+static void copy_decl_rtls PROTO((tree));
+static void save_constants PROTO((rtx *));
+static void note_modified_parmregs PROTO((rtx, rtx));
+static rtx copy_for_inline PROTO((rtx));
+static void integrate_parm_decls PROTO((tree, struct inline_remap *, rtvec));
+static void integrate_decl_tree PROTO((tree, int, struct inline_remap *));
+static void subst_constants PROTO((rtx *, rtx, struct inline_remap *));
+static void restore_constants PROTO((rtx *));
+static void set_block_origin_self PROTO((tree));
+static void set_decl_origin_self PROTO((tree));
+static void set_block_abstract_flags PROTO((tree, int));
+
+void set_decl_abstract_flags PROTO((tree, int));
+
+/* Zero if the current function (whose FUNCTION_DECL is FNDECL)
+ is safe and reasonable to integrate into other functions.
+ Nonzero means value is a warning message with a single %s
+ for the function's name. */
+
+char *
+function_cannot_inline_p (fndecl)
+ register tree fndecl;
+{
+ register rtx insn;
+ tree last = tree_last (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
+ int max_insns = INTEGRATE_THRESHOLD (fndecl);
+ register int ninsns = 0;
+ register tree parms;
+
+ /* No inlines with varargs. `grokdeclarator' gives a warning
+ message about that if `inline' is specified. This code
+ it put in to catch the volunteers. */
+ if ((last && TREE_VALUE (last) != void_type_node)
+ || current_function_varargs)
+ return "varargs function cannot be inline";
+
+ if (current_function_calls_alloca)
+ return "function using alloca cannot be inline";
+
+ if (current_function_contains_functions)
+ return "function with nested functions cannot be inline";
+
+ /* If its not even close, don't even look. */
+ if (!DECL_INLINE (fndecl) && get_max_uid () > 3 * max_insns)
+ return "function too large to be inline";
+
+#if 0
+ /* Large stacks are OK now that inlined functions can share them. */
+ /* Don't inline functions with large stack usage,
+ since they can make other recursive functions burn up stack. */
+ if (!DECL_INLINE (fndecl) && get_frame_size () > 100)
+ return "function stack frame for inlining";
+#endif
+
+#if 0
+ /* Don't inline functions which do not specify a function prototype and
+ have BLKmode argument or take the address of a parameter. */
+ for (parms = DECL_ARGUMENTS (fndecl); parms; parms = TREE_CHAIN (parms))
+ {
+ if (TYPE_MODE (TREE_TYPE (parms)) == BLKmode)
+ TREE_ADDRESSABLE (parms) = 1;
+ if (last == NULL_TREE && TREE_ADDRESSABLE (parms))
+ return "no prototype, and parameter address used; cannot be inline";
+ }
+#endif
+
+ /* We can't inline functions that return structures
+ the old-fashioned PCC way, copying into a static block. */
+ if (current_function_returns_pcc_struct)
+ return "inline functions not supported for this return value type";
+
+ /* We can't inline functions that return structures of varying size. */
+ if (int_size_in_bytes (TREE_TYPE (TREE_TYPE (fndecl))) < 0)
+ return "function with varying-size return value cannot be inline";
+
+ /* Cannot inline a function with a varying size argument. */
+ for (parms = DECL_ARGUMENTS (fndecl); parms; parms = TREE_CHAIN (parms))
+ if (int_size_in_bytes (TREE_TYPE (parms)) < 0)
+ return "function with varying-size parameter cannot be inline";
+
+ if (!DECL_INLINE (fndecl) && get_max_uid () > max_insns)
+ {
+ for (ninsns = 0, insn = get_first_nonparm_insn (); insn && ninsns < max_insns;
+ insn = NEXT_INSN (insn))
+ {
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ ninsns++;
+ }
+
+ if (ninsns >= max_insns)
+ return "function too large to be inline";
+ }
+
+ /* We cannot inline this function if forced_labels is non-zero. This
+ implies that a label in this function was used as an initializer.
+ Because labels can not be duplicated, all labels in the function
+ will be renamed when it is inlined. However, there is no way to find
+ and fix all variables initialized with addresses of labels in this
+ function, hence inlining is impossible. */
+
+ if (forced_labels)
+ return "function with label addresses used in initializers cannot inline";
+
+ /* We cannot inline a nested function that jumps to a nonlocal label. */
+ if (current_function_has_nonlocal_goto)
+ return "function with nonlocal goto cannot be inline";
+
+ return 0;
+}
+
+/* Variables used within save_for_inline. */
+
+/* Mapping from old pseudo-register to new pseudo-registers.
+ The first element of this map is reg_map[FIRST_PSEUDO_REGISTER].
+ It is allocated in `save_for_inline' and `expand_inline_function',
+ and deallocated on exit from each of those routines. */
+static rtx *reg_map;
+
+/* Mapping from old code-labels to new code-labels.
+ The first element of this map is label_map[min_labelno].
+ It is allocated in `save_for_inline' and `expand_inline_function',
+ and deallocated on exit from each of those routines. */
+static rtx *label_map;
+
+/* Mapping from old insn uid's to copied insns.
+ It is allocated in `save_for_inline' and `expand_inline_function',
+ and deallocated on exit from each of those routines. */
+static rtx *insn_map;
+
+/* Map pseudo reg number into the PARM_DECL for the parm living in the reg.
+ Zero for a reg that isn't a parm's home.
+ Only reg numbers less than max_parm_reg are mapped here. */
+static tree *parmdecl_map;
+
+/* Keep track of first pseudo-register beyond those that are parms. */
+static int max_parm_reg;
+
+/* When an insn is being copied by copy_for_inline,
+ this is nonzero if we have copied an ASM_OPERANDS.
+ In that case, it is the original input-operand vector. */
+static rtvec orig_asm_operands_vector;
+
+/* When an insn is being copied by copy_for_inline,
+ this is nonzero if we have copied an ASM_OPERANDS.
+ In that case, it is the copied input-operand vector. */
+static rtvec copy_asm_operands_vector;
+
+/* Likewise, this is the copied constraints vector. */
+static rtvec copy_asm_constraints_vector;
+
+/* In save_for_inline, nonzero if past the parm-initialization insns. */
+static int in_nonparm_insns;
+
+/* Subroutine for `save_for_inline{copying,nocopy}'. Performs initialization
+ needed to save FNDECL's insns and info for future inline expansion. */
+
+static rtx
+initialize_for_inline (fndecl, min_labelno, max_labelno, max_reg, copy)
+ tree fndecl;
+ int min_labelno;
+ int max_labelno;
+ int max_reg;
+ int copy;
+{
+ int function_flags, i;
+ rtvec arg_vector;
+ tree parms;
+
+ /* Compute the values of any flags we must restore when inlining this. */
+
+ function_flags
+ = (current_function_calls_alloca * FUNCTION_FLAGS_CALLS_ALLOCA
+ + current_function_calls_setjmp * FUNCTION_FLAGS_CALLS_SETJMP
+ + current_function_calls_longjmp * FUNCTION_FLAGS_CALLS_LONGJMP
+ + current_function_returns_struct * FUNCTION_FLAGS_RETURNS_STRUCT
+ + current_function_returns_pcc_struct * FUNCTION_FLAGS_RETURNS_PCC_STRUCT
+ + current_function_needs_context * FUNCTION_FLAGS_NEEDS_CONTEXT
+ + current_function_has_nonlocal_label * FUNCTION_FLAGS_HAS_NONLOCAL_LABEL
+ + current_function_returns_pointer * FUNCTION_FLAGS_RETURNS_POINTER
+ + current_function_uses_const_pool * FUNCTION_FLAGS_USES_CONST_POOL
+ + current_function_uses_pic_offset_table * FUNCTION_FLAGS_USES_PIC_OFFSET_TABLE);
+
+ /* Clear out PARMDECL_MAP. It was allocated in the caller's frame. */
+ bzero ((char *) parmdecl_map, max_parm_reg * sizeof (tree));
+ arg_vector = rtvec_alloc (list_length (DECL_ARGUMENTS (fndecl)));
+
+ for (parms = DECL_ARGUMENTS (fndecl), i = 0;
+ parms;
+ parms = TREE_CHAIN (parms), i++)
+ {
+ rtx p = DECL_RTL (parms);
+
+ if (GET_CODE (p) == MEM && copy)
+ {
+ /* Copy the rtl so that modifications of the addresses
+ later in compilation won't affect this arg_vector.
+ Virtual register instantiation can screw the address
+ of the rtl. */
+ rtx new = copy_rtx (p);
+
+ /* Don't leave the old copy anywhere in this decl. */
+ if (DECL_RTL (parms) == DECL_INCOMING_RTL (parms)
+ || (GET_CODE (DECL_RTL (parms)) == MEM
+ && GET_CODE (DECL_INCOMING_RTL (parms)) == MEM
+ && (XEXP (DECL_RTL (parms), 0)
+ == XEXP (DECL_INCOMING_RTL (parms), 0))))
+ DECL_INCOMING_RTL (parms) = new;
+ DECL_RTL (parms) = new;
+ }
+
+ RTVEC_ELT (arg_vector, i) = p;
+
+ if (GET_CODE (p) == REG)
+ parmdecl_map[REGNO (p)] = parms;
+ else if (GET_CODE (p) == CONCAT)
+ {
+ rtx preal = gen_realpart (GET_MODE (XEXP (p, 0)), p);
+ rtx pimag = gen_imagpart (GET_MODE (preal), p);
+
+ if (GET_CODE (preal) == REG)
+ parmdecl_map[REGNO (preal)] = parms;
+ if (GET_CODE (pimag) == REG)
+ parmdecl_map[REGNO (pimag)] = parms;
+ }
+
+ /* This flag is cleared later
+ if the function ever modifies the value of the parm. */
+ TREE_READONLY (parms) = 1;
+ }
+
+ /* Assume we start out in the insns that set up the parameters. */
+ in_nonparm_insns = 0;
+
+ /* The list of DECL_SAVED_INSNS, starts off with a header which
+ contains the following information:
+
+ the first insn of the function (not including the insns that copy
+ parameters into registers).
+ the first parameter insn of the function,
+ the first label used by that function,
+ the last label used by that function,
+ the highest register number used for parameters,
+ the total number of registers used,
+ the size of the incoming stack area for parameters,
+ the number of bytes popped on return,
+ the stack slot list,
+ some flags that are used to restore compiler globals,
+ the value of current_function_outgoing_args_size,
+ the original argument vector,
+ and the original DECL_INITIAL. */
+
+ return gen_inline_header_rtx (NULL_RTX, NULL_RTX, min_labelno, max_labelno,
+ max_parm_reg, max_reg,
+ current_function_args_size,
+ current_function_pops_args,
+ stack_slot_list, function_flags,
+ current_function_outgoing_args_size,
+ arg_vector, (rtx) DECL_INITIAL (fndecl));
+}
+
+/* Subroutine for `save_for_inline{copying,nocopy}'. Finishes up the
+ things that must be done to make FNDECL expandable as an inline function.
+ HEAD contains the chain of insns to which FNDECL will expand. */
+
+static void
+finish_inline (fndecl, head)
+ tree fndecl;
+ rtx head;
+{
+ NEXT_INSN (head) = get_first_nonparm_insn ();
+ FIRST_PARM_INSN (head) = get_insns ();
+ DECL_SAVED_INSNS (fndecl) = head;
+ DECL_FRAME_SIZE (fndecl) = get_frame_size ();
+ DECL_INLINE (fndecl) = 1;
+}
+
+/* Adjust the BLOCK_END_NOTE pointers in a given copied DECL tree so that
+ they all point to the new (copied) rtxs. */
+
+static void
+adjust_copied_decl_tree (block)
+ register tree block;
+{
+ register tree subblock;
+ register rtx original_end;
+
+ original_end = BLOCK_END_NOTE (block);
+ if (original_end)
+ {
+ BLOCK_END_NOTE (block) = (rtx) NOTE_SOURCE_FILE (original_end);
+ NOTE_SOURCE_FILE (original_end) = 0;
+ }
+
+ /* Process all subblocks. */
+ for (subblock = BLOCK_SUBBLOCKS (block);
+ subblock;
+ subblock = TREE_CHAIN (subblock))
+ adjust_copied_decl_tree (subblock);
+}
+
+/* Make the insns and PARM_DECLs of the current function permanent
+ and record other information in DECL_SAVED_INSNS to allow inlining
+ of this function in subsequent calls.
+
+ This function is called when we are going to immediately compile
+ the insns for FNDECL. The insns in maybepermanent_obstack cannot be
+ modified by the compilation process, so we copy all of them to
+ new storage and consider the new insns to be the insn chain to be
+ compiled. Our caller (rest_of_compilation) saves the original
+ DECL_INITIAL and DECL_ARGUMENTS; here we copy them. */
+
+void
+save_for_inline_copying (fndecl)
+ tree fndecl;
+{
+ rtx first_insn, last_insn, insn;
+ rtx head, copy;
+ int max_labelno, min_labelno, i, len;
+ int max_reg;
+ int max_uid;
+ rtx first_nonparm_insn;
+
+ /* Make and emit a return-label if we have not already done so.
+ Do this before recording the bounds on label numbers. */
+
+ if (return_label == 0)
+ {
+ return_label = gen_label_rtx ();
+ emit_label (return_label);
+ }
+
+ /* Get some bounds on the labels and registers used. */
+
+ max_labelno = max_label_num ();
+ min_labelno = get_first_label_num ();
+ max_reg = max_reg_num ();
+
+ /* Set up PARMDECL_MAP which maps pseudo-reg number to its PARM_DECL.
+ Later we set TREE_READONLY to 0 if the parm is modified inside the fn.
+ Also set up ARG_VECTOR, which holds the unmodified DECL_RTX values
+ for the parms, prior to elimination of virtual registers.
+ These values are needed for substituting parms properly. */
+
+ max_parm_reg = max_parm_reg_num ();
+ parmdecl_map = (tree *) alloca (max_parm_reg * sizeof (tree));
+
+ head = initialize_for_inline (fndecl, min_labelno, max_labelno, max_reg, 1);
+
+ if (current_function_uses_const_pool)
+ {
+ /* Replace any constant pool references with the actual constant. We
+ will put the constants back in the copy made below. */
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ {
+ save_constants (&PATTERN (insn));
+ if (REG_NOTES (insn))
+ save_constants (&REG_NOTES (insn));
+ }
+
+ /* Clear out the constant pool so that we can recreate it with the
+ copied constants below. */
+ init_const_rtx_hash_table ();
+ clear_const_double_mem ();
+ }
+
+ max_uid = INSN_UID (head);
+
+ /* We have now allocated all that needs to be allocated permanently
+ on the rtx obstack. Set our high-water mark, so that we
+ can free the rest of this when the time comes. */
+
+ preserve_data ();
+
+ /* Copy the chain insns of this function.
+ Install the copied chain as the insns of this function,
+ for continued compilation;
+ the original chain is recorded as the DECL_SAVED_INSNS
+ for inlining future calls. */
+
+ /* If there are insns that copy parms from the stack into pseudo registers,
+ those insns are not copied. `expand_inline_function' must
+ emit the correct code to handle such things. */
+
+ insn = get_insns ();
+ if (GET_CODE (insn) != NOTE)
+ abort ();
+ first_insn = rtx_alloc (NOTE);
+ NOTE_SOURCE_FILE (first_insn) = NOTE_SOURCE_FILE (insn);
+ NOTE_LINE_NUMBER (first_insn) = NOTE_LINE_NUMBER (insn);
+ INSN_UID (first_insn) = INSN_UID (insn);
+ PREV_INSN (first_insn) = NULL;
+ NEXT_INSN (first_insn) = NULL;
+ last_insn = first_insn;
+
+ /* Each pseudo-reg in the old insn chain must have a unique rtx in the copy.
+ Make these new rtx's now, and install them in regno_reg_rtx, so they
+ will be the official pseudo-reg rtx's for the rest of compilation. */
+
+ reg_map = (rtx *) alloca ((max_reg + 1) * sizeof (rtx));
+
+ len = sizeof (struct rtx_def) + (GET_RTX_LENGTH (REG) - 1) * sizeof (rtunion);
+ for (i = max_reg - 1; i > LAST_VIRTUAL_REGISTER; i--)
+ reg_map[i] = (rtx)obstack_copy (function_maybepermanent_obstack,
+ regno_reg_rtx[i], len);
+
+ bcopy ((char *) (reg_map + LAST_VIRTUAL_REGISTER + 1),
+ (char *) (regno_reg_rtx + LAST_VIRTUAL_REGISTER + 1),
+ (max_reg - (LAST_VIRTUAL_REGISTER + 1)) * sizeof (rtx));
+
+ /* Likewise each label rtx must have a unique rtx as its copy. */
+
+ label_map = (rtx *)alloca ((max_labelno - min_labelno) * sizeof (rtx));
+ label_map -= min_labelno;
+
+ for (i = min_labelno; i < max_labelno; i++)
+ label_map[i] = gen_label_rtx ();
+
+ /* Record the mapping of old insns to copied insns. */
+
+ insn_map = (rtx *) alloca (max_uid * sizeof (rtx));
+ bzero ((char *) insn_map, max_uid * sizeof (rtx));
+
+ /* Get the insn which signals the end of parameter setup code. */
+ first_nonparm_insn = get_first_nonparm_insn ();
+
+ /* Copy any entries in regno_reg_rtx or DECL_RTLs that reference MEM
+ (the former occurs when a variable has its address taken)
+ since these may be shared and can be changed by virtual
+ register instantiation. DECL_RTL values for our arguments
+ have already been copied by initialize_for_inline. */
+ for (i = LAST_VIRTUAL_REGISTER + 1; i < max_reg; i++)
+ if (GET_CODE (regno_reg_rtx[i]) == MEM)
+ XEXP (regno_reg_rtx[i], 0)
+ = copy_for_inline (XEXP (regno_reg_rtx[i], 0));
+
+ /* Copy the tree of subblocks of the function, and the decls in them.
+ We will use the copy for compiling this function, then restore the original
+ subblocks and decls for use when inlining this function.
+
+ Several parts of the compiler modify BLOCK trees. In particular,
+ instantiate_virtual_regs will instantiate any virtual regs
+ mentioned in the DECL_RTLs of the decls, and loop
+ unrolling will replicate any BLOCK trees inside an unrolled loop.
+
+ The modified subblocks or DECL_RTLs would be incorrect for the original rtl
+ which we will use for inlining. The rtl might even contain pseudoregs
+ whose space has been freed. */
+
+ DECL_INITIAL (fndecl) = copy_decl_tree (DECL_INITIAL (fndecl));
+ DECL_ARGUMENTS (fndecl) = copy_decl_list (DECL_ARGUMENTS (fndecl));
+
+ /* Now copy each DECL_RTL which is a MEM,
+ so it is safe to modify their addresses. */
+ copy_decl_rtls (DECL_INITIAL (fndecl));
+
+ /* The fndecl node acts as its own progenitor, so mark it as such. */
+ DECL_ABSTRACT_ORIGIN (fndecl) = fndecl;
+
+ /* Now copy the chain of insns. Do this twice. The first copy the insn
+ itself and its body. The second time copy of REG_NOTES. This is because
+ a REG_NOTE may have a forward pointer to another insn. */
+
+ for (insn = NEXT_INSN (insn); insn; insn = NEXT_INSN (insn))
+ {
+ orig_asm_operands_vector = 0;
+
+ if (insn == first_nonparm_insn)
+ in_nonparm_insns = 1;
+
+ switch (GET_CODE (insn))
+ {
+ case NOTE:
+ /* No need to keep these. */
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED)
+ continue;
+
+ copy = rtx_alloc (NOTE);
+ NOTE_LINE_NUMBER (copy) = NOTE_LINE_NUMBER (insn);
+ if (NOTE_LINE_NUMBER (insn) != NOTE_INSN_BLOCK_END)
+ NOTE_SOURCE_FILE (copy) = NOTE_SOURCE_FILE (insn);
+ else
+ {
+ NOTE_SOURCE_FILE (insn) = (char *) copy;
+ NOTE_SOURCE_FILE (copy) = 0;
+ }
+ break;
+
+ case INSN:
+ case JUMP_INSN:
+ case CALL_INSN:
+ copy = rtx_alloc (GET_CODE (insn));
+
+ if (GET_CODE (insn) == CALL_INSN)
+ CALL_INSN_FUNCTION_USAGE (copy) =
+ copy_for_inline (CALL_INSN_FUNCTION_USAGE (insn));
+
+ PATTERN (copy) = copy_for_inline (PATTERN (insn));
+ INSN_CODE (copy) = -1;
+ LOG_LINKS (copy) = NULL_RTX;
+ RTX_INTEGRATED_P (copy) = RTX_INTEGRATED_P (insn);
+ break;
+
+ case CODE_LABEL:
+ copy = label_map[CODE_LABEL_NUMBER (insn)];
+ LABEL_NAME (copy) = LABEL_NAME (insn);
+ break;
+
+ case BARRIER:
+ copy = rtx_alloc (BARRIER);
+ break;
+
+ default:
+ abort ();
+ }
+ INSN_UID (copy) = INSN_UID (insn);
+ insn_map[INSN_UID (insn)] = copy;
+ NEXT_INSN (last_insn) = copy;
+ PREV_INSN (copy) = last_insn;
+ last_insn = copy;
+ }
+
+ adjust_copied_decl_tree (DECL_INITIAL (fndecl));
+
+ /* Now copy the REG_NOTES. */
+ for (insn = NEXT_INSN (get_insns ()); insn; insn = NEXT_INSN (insn))
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
+ && insn_map[INSN_UID(insn)])
+ REG_NOTES (insn_map[INSN_UID (insn)])
+ = copy_for_inline (REG_NOTES (insn));
+
+ NEXT_INSN (last_insn) = NULL;
+
+ finish_inline (fndecl, head);
+
+ set_new_first_and_last_insn (first_insn, last_insn);
+}
+
+/* Return a copy of a chain of nodes, chained through the TREE_CHAIN field.
+ For example, this can copy a list made of TREE_LIST nodes. While copying,
+ for each node copied which doesn't already have is DECL_ABSTRACT_ORIGIN
+ set to some non-zero value, set the DECL_ABSTRACT_ORIGIN of the copy to
+ point to the corresponding (abstract) original node. */
+
+static tree
+copy_decl_list (list)
+ tree list;
+{
+ tree head;
+ register tree prev, next;
+
+ if (list == 0)
+ return 0;
+
+ head = prev = copy_node (list);
+ if (DECL_ABSTRACT_ORIGIN (head) == NULL_TREE)
+ DECL_ABSTRACT_ORIGIN (head) = list;
+ next = TREE_CHAIN (list);
+ while (next)
+ {
+ register tree copy;
+
+ copy = copy_node (next);
+ if (DECL_ABSTRACT_ORIGIN (copy) == NULL_TREE)
+ DECL_ABSTRACT_ORIGIN (copy) = next;
+ TREE_CHAIN (prev) = copy;
+ prev = copy;
+ next = TREE_CHAIN (next);
+ }
+ return head;
+}
+
+/* Make a copy of the entire tree of blocks BLOCK, and return it. */
+
+static tree
+copy_decl_tree (block)
+ tree block;
+{
+ tree t, vars, subblocks;
+
+ vars = copy_decl_list (BLOCK_VARS (block));
+ subblocks = 0;
+
+ /* Process all subblocks. */
+ for (t = BLOCK_SUBBLOCKS (block); t; t = TREE_CHAIN (t))
+ {
+ tree copy = copy_decl_tree (t);
+ TREE_CHAIN (copy) = subblocks;
+ subblocks = copy;
+ }
+
+ t = copy_node (block);
+ BLOCK_VARS (t) = vars;
+ BLOCK_SUBBLOCKS (t) = nreverse (subblocks);
+ /* If the BLOCK being cloned is already marked as having been instantiated
+ from something else, then leave that `origin' marking alone. Elsewise,
+ mark the clone as having originated from the BLOCK we are cloning. */
+ if (BLOCK_ABSTRACT_ORIGIN (t) == NULL_TREE)
+ BLOCK_ABSTRACT_ORIGIN (t) = block;
+ return t;
+}
+
+/* Copy DECL_RTLs in all decls in the given BLOCK node. */
+
+static void
+copy_decl_rtls (block)
+ tree block;
+{
+ tree t;
+
+ for (t = BLOCK_VARS (block); t; t = TREE_CHAIN (t))
+ if (DECL_RTL (t) && GET_CODE (DECL_RTL (t)) == MEM)
+ DECL_RTL (t) = copy_for_inline (DECL_RTL (t));
+
+ /* Process all subblocks. */
+ for (t = BLOCK_SUBBLOCKS (block); t; t = TREE_CHAIN (t))
+ copy_decl_rtls (t);
+}
+
+/* Make the insns and PARM_DECLs of the current function permanent
+ and record other information in DECL_SAVED_INSNS to allow inlining
+ of this function in subsequent calls.
+
+ This routine need not copy any insns because we are not going
+ to immediately compile the insns in the insn chain. There
+ are two cases when we would compile the insns for FNDECL:
+ (1) when FNDECL is expanded inline, and (2) when FNDECL needs to
+ be output at the end of other compilation, because somebody took
+ its address. In the first case, the insns of FNDECL are copied
+ as it is expanded inline, so FNDECL's saved insns are not
+ modified. In the second case, FNDECL is used for the last time,
+ so modifying the rtl is not a problem.
+
+ ??? Actually, we do not verify that FNDECL is not inline expanded
+ by other functions which must also be written down at the end
+ of compilation. We could set flag_no_inline to nonzero when
+ the time comes to write down such functions. */
+
+void
+save_for_inline_nocopy (fndecl)
+ tree fndecl;
+{
+ rtx insn;
+ rtx head;
+ rtx first_nonparm_insn;
+
+ /* Set up PARMDECL_MAP which maps pseudo-reg number to its PARM_DECL.
+ Later we set TREE_READONLY to 0 if the parm is modified inside the fn.
+ Also set up ARG_VECTOR, which holds the unmodified DECL_RTX values
+ for the parms, prior to elimination of virtual registers.
+ These values are needed for substituting parms properly. */
+
+ max_parm_reg = max_parm_reg_num ();
+ parmdecl_map = (tree *) alloca (max_parm_reg * sizeof (tree));
+
+ /* Make and emit a return-label if we have not already done so. */
+
+ if (return_label == 0)
+ {
+ return_label = gen_label_rtx ();
+ emit_label (return_label);
+ }
+
+ head = initialize_for_inline (fndecl, get_first_label_num (),
+ max_label_num (), max_reg_num (), 0);
+
+ /* If there are insns that copy parms from the stack into pseudo registers,
+ those insns are not copied. `expand_inline_function' must
+ emit the correct code to handle such things. */
+
+ insn = get_insns ();
+ if (GET_CODE (insn) != NOTE)
+ abort ();
+
+ /* Get the insn which signals the end of parameter setup code. */
+ first_nonparm_insn = get_first_nonparm_insn ();
+
+ /* Now just scan the chain of insns to see what happens to our
+ PARM_DECLs. If a PARM_DECL is used but never modified, we
+ can substitute its rtl directly when expanding inline (and
+ perform constant folding when its incoming value is constant).
+ Otherwise, we have to copy its value into a new register and track
+ the new register's life. */
+
+ for (insn = NEXT_INSN (insn); insn; insn = NEXT_INSN (insn))
+ {
+ if (insn == first_nonparm_insn)
+ in_nonparm_insns = 1;
+
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ {
+ if (current_function_uses_const_pool)
+ {
+ /* Replace any constant pool references with the actual constant.
+ We will put the constant back if we need to write the
+ function out after all. */
+ save_constants (&PATTERN (insn));
+ if (REG_NOTES (insn))
+ save_constants (&REG_NOTES (insn));
+ }
+
+ /* Record what interesting things happen to our parameters. */
+ note_stores (PATTERN (insn), note_modified_parmregs);
+ }
+ }
+
+ /* We have now allocated all that needs to be allocated permanently
+ on the rtx obstack. Set our high-water mark, so that we
+ can free the rest of this when the time comes. */
+
+ preserve_data ();
+
+ finish_inline (fndecl, head);
+}
+
+/* Given PX, a pointer into an insn, search for references to the constant
+ pool. Replace each with a CONST that has the mode of the original
+ constant, contains the constant, and has RTX_INTEGRATED_P set.
+ Similarly, constant pool addresses not enclosed in a MEM are replaced
+ with an ADDRESS rtx which also gives the constant, mode, and has
+ RTX_INTEGRATED_P set. */
+
+static void
+save_constants (px)
+ rtx *px;
+{
+ rtx x;
+ int i, j;
+
+ again:
+ x = *px;
+
+ /* If this is a CONST_DOUBLE, don't try to fix things up in
+ CONST_DOUBLE_MEM, because this is an infinite recursion. */
+ if (GET_CODE (x) == CONST_DOUBLE)
+ return;
+ else if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == SYMBOL_REF
+ && CONSTANT_POOL_ADDRESS_P (XEXP (x,0)))
+ {
+ enum machine_mode const_mode = get_pool_mode (XEXP (x, 0));
+ rtx new = gen_rtx (CONST, const_mode, get_pool_constant (XEXP (x, 0)));
+ RTX_INTEGRATED_P (new) = 1;
+
+ /* If the MEM was in a different mode than the constant (perhaps we
+ were only looking at the low-order part), surround it with a
+ SUBREG so we can save both modes. */
+
+ if (GET_MODE (x) != const_mode)
+ {
+ new = gen_rtx (SUBREG, GET_MODE (x), new, 0);
+ RTX_INTEGRATED_P (new) = 1;
+ }
+
+ *px = new;
+ save_constants (&XEXP (*px, 0));
+ }
+ else if (GET_CODE (x) == SYMBOL_REF
+ && CONSTANT_POOL_ADDRESS_P (x))
+ {
+ *px = gen_rtx (ADDRESS, get_pool_mode (x), get_pool_constant (x));
+ save_constants (&XEXP (*px, 0));
+ RTX_INTEGRATED_P (*px) = 1;
+ }
+
+ else
+ {
+ char *fmt = GET_RTX_FORMAT (GET_CODE (x));
+ int len = GET_RTX_LENGTH (GET_CODE (x));
+
+ for (i = len-1; i >= 0; i--)
+ {
+ switch (fmt[i])
+ {
+ case 'E':
+ for (j = 0; j < XVECLEN (x, i); j++)
+ save_constants (&XVECEXP (x, i, j));
+ break;
+
+ case 'e':
+ if (XEXP (x, i) == 0)
+ continue;
+ if (i == 0)
+ {
+ /* Hack tail-recursion here. */
+ px = &XEXP (x, 0);
+ goto again;
+ }
+ save_constants (&XEXP (x, i));
+ break;
+ }
+ }
+ }
+}
+
+/* Note whether a parameter is modified or not. */
+
+static void
+note_modified_parmregs (reg, x)
+ rtx reg;
+ rtx x;
+{
+ if (GET_CODE (reg) == REG && in_nonparm_insns
+ && REGNO (reg) < max_parm_reg
+ && REGNO (reg) >= FIRST_PSEUDO_REGISTER
+ && parmdecl_map[REGNO (reg)] != 0)
+ TREE_READONLY (parmdecl_map[REGNO (reg)]) = 0;
+}
+
+/* Copy the rtx ORIG recursively, replacing pseudo-regs and labels
+ according to `reg_map' and `label_map'. The original rtl insns
+ will be saved for inlining; this is used to make a copy
+ which is used to finish compiling the inline function itself.
+
+ If we find a "saved" constant pool entry, one which was replaced with
+ the value of the constant, convert it back to a constant pool entry.
+ Since the pool wasn't touched, this should simply restore the old
+ address.
+
+ All other kinds of rtx are copied except those that can never be
+ changed during compilation. */
+
+static rtx
+copy_for_inline (orig)
+ rtx orig;
+{
+ register rtx x = orig;
+ register int i;
+ register enum rtx_code code;
+ register char *format_ptr;
+
+ if (x == 0)
+ return x;
+
+ code = GET_CODE (x);
+
+ /* These types may be freely shared. */
+
+ switch (code)
+ {
+ case QUEUED:
+ case CONST_INT:
+ case SYMBOL_REF:
+ case PC:
+ case CC0:
+ return x;
+
+ case CONST_DOUBLE:
+ /* We have to make a new CONST_DOUBLE to ensure that we account for
+ it correctly. Using the old CONST_DOUBLE_MEM data is wrong. */
+ if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
+ {
+ REAL_VALUE_TYPE d;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (d, x);
+ return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
+ }
+ else
+ return immed_double_const (CONST_DOUBLE_LOW (x), CONST_DOUBLE_HIGH (x),
+ VOIDmode);
+
+ case CONST:
+ /* Get constant pool entry for constant in the pool. */
+ if (RTX_INTEGRATED_P (x))
+ return validize_mem (force_const_mem (GET_MODE (x),
+ copy_for_inline (XEXP (x, 0))));
+ break;
+
+ case SUBREG:
+ /* Get constant pool entry, but access in different mode. */
+ if (RTX_INTEGRATED_P (x))
+ {
+ rtx new
+ = force_const_mem (GET_MODE (SUBREG_REG (x)),
+ copy_for_inline (XEXP (SUBREG_REG (x), 0)));
+
+ PUT_MODE (new, GET_MODE (x));
+ return validize_mem (new);
+ }
+ break;
+
+ case ADDRESS:
+ /* If not special for constant pool error. Else get constant pool
+ address. */
+ if (! RTX_INTEGRATED_P (x))
+ abort ();
+
+ return XEXP (force_const_mem (GET_MODE (x),
+ copy_for_inline (XEXP (x, 0))), 0);
+
+ case ASM_OPERANDS:
+ /* If a single asm insn contains multiple output operands
+ then it contains multiple ASM_OPERANDS rtx's that share operand 3.
+ We must make sure that the copied insn continues to share it. */
+ if (orig_asm_operands_vector == XVEC (orig, 3))
+ {
+ x = rtx_alloc (ASM_OPERANDS);
+ x->volatil = orig->volatil;
+ XSTR (x, 0) = XSTR (orig, 0);
+ XSTR (x, 1) = XSTR (orig, 1);
+ XINT (x, 2) = XINT (orig, 2);
+ XVEC (x, 3) = copy_asm_operands_vector;
+ XVEC (x, 4) = copy_asm_constraints_vector;
+ XSTR (x, 5) = XSTR (orig, 5);
+ XINT (x, 6) = XINT (orig, 6);
+ return x;
+ }
+ break;
+
+ case MEM:
+ /* A MEM is usually allowed to be shared if its address is constant
+ or is a constant plus one of the special registers.
+
+ We do not allow sharing of addresses that are either a special
+ register or the sum of a constant and a special register because
+ it is possible for unshare_all_rtl to copy the address, into memory
+ that won't be saved. Although the MEM can safely be shared, and
+ won't be copied there, the address itself cannot be shared, and may
+ need to be copied.
+
+ There are also two exceptions with constants: The first is if the
+ constant is a LABEL_REF or the sum of the LABEL_REF
+ and an integer. This case can happen if we have an inline
+ function that supplies a constant operand to the call of another
+ inline function that uses it in a switch statement. In this case,
+ we will be replacing the LABEL_REF, so we have to replace this MEM
+ as well.
+
+ The second case is if we have a (const (plus (address ..) ...)).
+ In that case we need to put back the address of the constant pool
+ entry. */
+
+ if (CONSTANT_ADDRESS_P (XEXP (x, 0))
+ && GET_CODE (XEXP (x, 0)) != LABEL_REF
+ && ! (GET_CODE (XEXP (x, 0)) == CONST
+ && (GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS
+ && ((GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0))
+ == LABEL_REF)
+ || (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0))
+ == ADDRESS)))))
+ return x;
+ break;
+
+ case LABEL_REF:
+ /* If this is a non-local label, just make a new LABEL_REF.
+ Otherwise, use the new label as well. */
+ x = gen_rtx (LABEL_REF, GET_MODE (orig),
+ LABEL_REF_NONLOCAL_P (orig) ? XEXP (orig, 0)
+ : label_map[CODE_LABEL_NUMBER (XEXP (orig, 0))]);
+ LABEL_REF_NONLOCAL_P (x) = LABEL_REF_NONLOCAL_P (orig);
+ LABEL_OUTSIDE_LOOP_P (x) = LABEL_OUTSIDE_LOOP_P (orig);
+ return x;
+
+ case REG:
+ if (REGNO (x) > LAST_VIRTUAL_REGISTER)
+ return reg_map [REGNO (x)];
+ else
+ return x;
+
+ case SET:
+ /* If a parm that gets modified lives in a pseudo-reg,
+ clear its TREE_READONLY to prevent certain optimizations. */
+ {
+ rtx dest = SET_DEST (x);
+
+ while (GET_CODE (dest) == STRICT_LOW_PART
+ || GET_CODE (dest) == ZERO_EXTRACT
+ || GET_CODE (dest) == SUBREG)
+ dest = XEXP (dest, 0);
+
+ if (GET_CODE (dest) == REG
+ && REGNO (dest) < max_parm_reg
+ && REGNO (dest) >= FIRST_PSEUDO_REGISTER
+ && parmdecl_map[REGNO (dest)] != 0
+ /* The insn to load an arg pseudo from a stack slot
+ does not count as modifying it. */
+ && in_nonparm_insns)
+ TREE_READONLY (parmdecl_map[REGNO (dest)]) = 0;
+ }
+ break;
+
+#if 0 /* This is a good idea, but here is the wrong place for it. */
+ /* Arrange that CONST_INTs always appear as the second operand
+ if they appear, and that `frame_pointer_rtx' or `arg_pointer_rtx'
+ always appear as the first. */
+ case PLUS:
+ if (GET_CODE (XEXP (x, 0)) == CONST_INT
+ || (XEXP (x, 1) == frame_pointer_rtx
+ || (ARG_POINTER_REGNUM != FRAME_POINTER_REGNUM
+ && XEXP (x, 1) == arg_pointer_rtx)))
+ {
+ rtx t = XEXP (x, 0);
+ XEXP (x, 0) = XEXP (x, 1);
+ XEXP (x, 1) = t;
+ }
+ break;
+#endif
+ }
+
+ /* Replace this rtx with a copy of itself. */
+
+ x = rtx_alloc (code);
+ bcopy ((char *) orig, (char *) x,
+ (sizeof (*x) - sizeof (x->fld)
+ + sizeof (x->fld[0]) * GET_RTX_LENGTH (code)));
+
+ /* Now scan the subexpressions recursively.
+ We can store any replaced subexpressions directly into X
+ since we know X is not shared! Any vectors in X
+ must be copied if X was copied. */
+
+ format_ptr = GET_RTX_FORMAT (code);
+
+ for (i = 0; i < GET_RTX_LENGTH (code); i++)
+ {
+ switch (*format_ptr++)
+ {
+ case 'e':
+ XEXP (x, i) = copy_for_inline (XEXP (x, i));
+ break;
+
+ case 'u':
+ /* Change any references to old-insns to point to the
+ corresponding copied insns. */
+ XEXP (x, i) = insn_map[INSN_UID (XEXP (x, i))];
+ break;
+
+ case 'E':
+ if (XVEC (x, i) != NULL && XVECLEN (x, i) != 0)
+ {
+ register int j;
+
+ XVEC (x, i) = gen_rtvec_v (XVECLEN (x, i), &XVECEXP (x, i, 0));
+ for (j = 0; j < XVECLEN (x, i); j++)
+ XVECEXP (x, i, j)
+ = copy_for_inline (XVECEXP (x, i, j));
+ }
+ break;
+ }
+ }
+
+ if (code == ASM_OPERANDS && orig_asm_operands_vector == 0)
+ {
+ orig_asm_operands_vector = XVEC (orig, 3);
+ copy_asm_operands_vector = XVEC (x, 3);
+ copy_asm_constraints_vector = XVEC (x, 4);
+ }
+
+ return x;
+}
+
+/* Unfortunately, we need a global copy of const_equiv map for communication
+ with a function called from note_stores. Be *very* careful that this
+ is used properly in the presence of recursion. */
+
+rtx *global_const_equiv_map;
+int global_const_equiv_map_size;
+
+#define FIXED_BASE_PLUS_P(X) \
+ (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && REGNO (XEXP (X, 0)) >= FIRST_VIRTUAL_REGISTER \
+ && REGNO (XEXP (X, 0)) <= LAST_VIRTUAL_REGISTER)
+
+/* Integrate the procedure defined by FNDECL. Note that this function
+ may wind up calling itself. Since the static variables are not
+ reentrant, we do not assign them until after the possibility
+ of recursion is eliminated.
+
+ If IGNORE is nonzero, do not produce a value.
+ Otherwise store the value in TARGET if it is nonzero and that is convenient.
+
+ Value is:
+ (rtx)-1 if we could not substitute the function
+ 0 if we substituted it and it does not produce a value
+ else an rtx for where the value is stored. */
+
+rtx
+expand_inline_function (fndecl, parms, target, ignore, type, structure_value_addr)
+ tree fndecl, parms;
+ rtx target;
+ int ignore;
+ tree type;
+ rtx structure_value_addr;
+{
+ tree formal, actual, block;
+ rtx header = DECL_SAVED_INSNS (fndecl);
+ rtx insns = FIRST_FUNCTION_INSN (header);
+ rtx parm_insns = FIRST_PARM_INSN (header);
+ tree *arg_trees;
+ rtx *arg_vals;
+ rtx insn;
+ int max_regno;
+ register int i;
+ int min_labelno = FIRST_LABELNO (header);
+ int max_labelno = LAST_LABELNO (header);
+ int nargs;
+ rtx local_return_label = 0;
+ rtx loc;
+ rtx temp;
+ struct inline_remap *map;
+ rtx cc0_insn = 0;
+ rtvec arg_vector = ORIGINAL_ARG_VECTOR (header);
+ rtx static_chain_value = 0;
+
+ /* Allow for equivalences of the pseudos we make for virtual fp and ap. */
+ max_regno = MAX_REGNUM (header) + 3;
+ if (max_regno < FIRST_PSEUDO_REGISTER)
+ abort ();
+
+ nargs = list_length (DECL_ARGUMENTS (fndecl));
+
+ /* Check that the parms type match and that sufficient arguments were
+ passed. Since the appropriate conversions or default promotions have
+ already been applied, the machine modes should match exactly. */
+
+ for (formal = DECL_ARGUMENTS (fndecl),
+ actual = parms;
+ formal;
+ formal = TREE_CHAIN (formal),
+ actual = TREE_CHAIN (actual))
+ {
+ tree arg;
+ enum machine_mode mode;
+
+ if (actual == 0)
+ return (rtx) (HOST_WIDE_INT) -1;
+
+ arg = TREE_VALUE (actual);
+ mode= TYPE_MODE (DECL_ARG_TYPE (formal));
+
+ if (mode != TYPE_MODE (TREE_TYPE (arg))
+ /* If they are block mode, the types should match exactly.
+ They don't match exactly if TREE_TYPE (FORMAL) == ERROR_MARK_NODE,
+ which could happen if the parameter has incomplete type. */
+ || (mode == BLKmode && TREE_TYPE (arg) != TREE_TYPE (formal)))
+ return (rtx) (HOST_WIDE_INT) -1;
+ }
+
+ /* Extra arguments are valid, but will be ignored below, so we must
+ evaluate them here for side-effects. */
+ for (; actual; actual = TREE_CHAIN (actual))
+ expand_expr (TREE_VALUE (actual), const0_rtx,
+ TYPE_MODE (TREE_TYPE (TREE_VALUE (actual))), 0);
+
+ /* Make a binding contour to keep inline cleanups called at
+ outer function-scope level from looking like they are shadowing
+ parameter declarations. */
+ pushlevel (0);
+
+ /* Make a fresh binding contour that we can easily remove. */
+ pushlevel (0);
+ expand_start_bindings (0);
+ if (GET_CODE (parm_insns) == NOTE
+ && NOTE_LINE_NUMBER (parm_insns) > 0)
+ {
+ rtx note = emit_note (NOTE_SOURCE_FILE (parm_insns),
+ NOTE_LINE_NUMBER (parm_insns));
+ if (note)
+ RTX_INTEGRATED_P (note) = 1;
+ }
+
+ /* Expand the function arguments. Do this first so that any
+ new registers get created before we allocate the maps. */
+
+ arg_vals = (rtx *) alloca (nargs * sizeof (rtx));
+ arg_trees = (tree *) alloca (nargs * sizeof (tree));
+
+ for (formal = DECL_ARGUMENTS (fndecl), actual = parms, i = 0;
+ formal;
+ formal = TREE_CHAIN (formal), actual = TREE_CHAIN (actual), i++)
+ {
+ /* Actual parameter, converted to the type of the argument within the
+ function. */
+ tree arg = convert (TREE_TYPE (formal), TREE_VALUE (actual));
+ /* Mode of the variable used within the function. */
+ enum machine_mode mode = TYPE_MODE (TREE_TYPE (formal));
+
+ /* Make sure this formal has some correspondence in the users code
+ * before emitting any line notes for it. */
+ if (DECL_SOURCE_LINE (formal))
+ {
+ rtx note = emit_note (DECL_SOURCE_FILE (formal),
+ DECL_SOURCE_LINE (formal));
+ if (note)
+ RTX_INTEGRATED_P (note) = 1;
+ }
+
+ arg_trees[i] = arg;
+ loc = RTVEC_ELT (arg_vector, i);
+
+ /* If this is an object passed by invisible reference, we copy the
+ object into a stack slot and save its address. If this will go
+ into memory, we do nothing now. Otherwise, we just expand the
+ argument. */
+ if (GET_CODE (loc) == MEM && GET_CODE (XEXP (loc, 0)) == REG
+ && REGNO (XEXP (loc, 0)) > LAST_VIRTUAL_REGISTER)
+ {
+ rtx stack_slot
+ = assign_stack_temp (TYPE_MODE (TREE_TYPE (arg)),
+ int_size_in_bytes (TREE_TYPE (arg)), 1);
+
+ store_expr (arg, stack_slot, 0);
+
+ arg_vals[i] = XEXP (stack_slot, 0);
+ }
+ else if (GET_CODE (loc) != MEM)
+ {
+ if (GET_MODE (loc) != TYPE_MODE (TREE_TYPE (arg)))
+ /* The mode if LOC and ARG can differ if LOC was a variable
+ that had its mode promoted via PROMOTED_MODE. */
+ arg_vals[i] = convert_modes (GET_MODE (loc),
+ TYPE_MODE (TREE_TYPE (arg)),
+ expand_expr (arg, NULL_RTX, mode,
+ EXPAND_SUM),
+ TREE_UNSIGNED (TREE_TYPE (formal)));
+ else
+ arg_vals[i] = expand_expr (arg, NULL_RTX, mode, EXPAND_SUM);
+ }
+ else
+ arg_vals[i] = 0;
+
+ if (arg_vals[i] != 0
+ && (! TREE_READONLY (formal)
+ /* If the parameter is not read-only, copy our argument through
+ a register. Also, we cannot use ARG_VALS[I] if it overlaps
+ TARGET in any way. In the inline function, they will likely
+ be two different pseudos, and `safe_from_p' will make all
+ sorts of smart assumptions about their not conflicting.
+ But if ARG_VALS[I] overlaps TARGET, these assumptions are
+ wrong, so put ARG_VALS[I] into a fresh register. */
+ || (target != 0
+ && (GET_CODE (arg_vals[i]) == REG
+ || GET_CODE (arg_vals[i]) == SUBREG
+ || GET_CODE (arg_vals[i]) == MEM)
+ && reg_overlap_mentioned_p (arg_vals[i], target))
+ /* ??? We must always copy a SUBREG into a REG, because it might
+ get substituted into an address, and not all ports correctly
+ handle SUBREGs in addresses. */
+ || (GET_CODE (arg_vals[i]) == SUBREG)))
+ arg_vals[i] = copy_to_mode_reg (GET_MODE (loc), arg_vals[i]);
+ }
+
+ /* Allocate the structures we use to remap things. */
+
+ map = (struct inline_remap *) alloca (sizeof (struct inline_remap));
+ map->fndecl = fndecl;
+
+ map->reg_map = (rtx *) alloca (max_regno * sizeof (rtx));
+ bzero ((char *) map->reg_map, max_regno * sizeof (rtx));
+
+ map->label_map = (rtx *)alloca ((max_labelno - min_labelno) * sizeof (rtx));
+ map->label_map -= min_labelno;
+
+ map->insn_map = (rtx *) alloca (INSN_UID (header) * sizeof (rtx));
+ bzero ((char *) map->insn_map, INSN_UID (header) * sizeof (rtx));
+ map->min_insnno = 0;
+ map->max_insnno = INSN_UID (header);
+
+ map->integrating = 1;
+
+ /* const_equiv_map maps pseudos in our routine to constants, so it needs to
+ be large enough for all our pseudos. This is the number we are currently
+ using plus the number in the called routine, plus 15 for each arg,
+ five to compute the virtual frame pointer, and five for the return value.
+ This should be enough for most cases. We do not reference entries
+ outside the range of the map.
+
+ ??? These numbers are quite arbitrary and were obtained by
+ experimentation. At some point, we should try to allocate the
+ table after all the parameters are set up so we an more accurately
+ estimate the number of pseudos we will need. */
+
+ map->const_equiv_map_size
+ = max_reg_num () + (max_regno - FIRST_PSEUDO_REGISTER) + 15 * nargs + 10;
+
+ map->const_equiv_map
+ = (rtx *)alloca (map->const_equiv_map_size * sizeof (rtx));
+ bzero ((char *) map->const_equiv_map,
+ map->const_equiv_map_size * sizeof (rtx));
+
+ map->const_age_map
+ = (unsigned *)alloca (map->const_equiv_map_size * sizeof (unsigned));
+ bzero ((char *) map->const_age_map,
+ map->const_equiv_map_size * sizeof (unsigned));
+ map->const_age = 0;
+
+ /* Record the current insn in case we have to set up pointers to frame
+ and argument memory blocks. */
+ map->insns_at_start = get_last_insn ();
+
+ /* Update the outgoing argument size to allow for those in the inlined
+ function. */
+ if (OUTGOING_ARGS_SIZE (header) > current_function_outgoing_args_size)
+ current_function_outgoing_args_size = OUTGOING_ARGS_SIZE (header);
+
+ /* If the inline function needs to make PIC references, that means
+ that this function's PIC offset table must be used. */
+ if (FUNCTION_FLAGS (header) & FUNCTION_FLAGS_USES_PIC_OFFSET_TABLE)
+ current_function_uses_pic_offset_table = 1;
+
+ /* If this function needs a context, set it up. */
+ if (FUNCTION_FLAGS (header) & FUNCTION_FLAGS_NEEDS_CONTEXT)
+ static_chain_value = lookup_static_chain (fndecl);
+
+ /* Process each argument. For each, set up things so that the function's
+ reference to the argument will refer to the argument being passed.
+ We only replace REG with REG here. Any simplifications are done
+ via const_equiv_map.
+
+ We make two passes: In the first, we deal with parameters that will
+ be placed into registers, since we need to ensure that the allocated
+ register number fits in const_equiv_map. Then we store all non-register
+ parameters into their memory location. */
+
+ /* Don't try to free temp stack slots here, because we may put one of the
+ parameters into a temp stack slot. */
+
+ for (i = 0; i < nargs; i++)
+ {
+ rtx copy = arg_vals[i];
+
+ loc = RTVEC_ELT (arg_vector, i);
+
+ /* There are three cases, each handled separately. */
+ if (GET_CODE (loc) == MEM && GET_CODE (XEXP (loc, 0)) == REG
+ && REGNO (XEXP (loc, 0)) > LAST_VIRTUAL_REGISTER)
+ {
+ /* This must be an object passed by invisible reference (it could
+ also be a variable-sized object, but we forbid inlining functions
+ with variable-sized arguments). COPY is the address of the
+ actual value (this computation will cause it to be copied). We
+ map that address for the register, noting the actual address as
+ an equivalent in case it can be substituted into the insns. */
+
+ if (GET_CODE (copy) != REG)
+ {
+ temp = copy_addr_to_reg (copy);
+ if ((CONSTANT_P (copy) || FIXED_BASE_PLUS_P (copy))
+ && REGNO (temp) < map->const_equiv_map_size)
+ {
+ map->const_equiv_map[REGNO (temp)] = copy;
+ map->const_age_map[REGNO (temp)] = CONST_AGE_PARM;
+ }
+ copy = temp;
+ }
+ map->reg_map[REGNO (XEXP (loc, 0))] = copy;
+ }
+ else if (GET_CODE (loc) == MEM)
+ {
+ /* This is the case of a parameter that lives in memory.
+ It will live in the block we allocate in the called routine's
+ frame that simulates the incoming argument area. Do nothing
+ now; we will call store_expr later. */
+ ;
+ }
+ else if (GET_CODE (loc) == REG)
+ {
+ /* This is the good case where the parameter is in a register.
+ If it is read-only and our argument is a constant, set up the
+ constant equivalence.
+
+ If LOC is REG_USERVAR_P, the usual case, COPY must also have
+ that flag set if it is a register.
+
+ Also, don't allow hard registers here; they might not be valid
+ when substituted into insns. */
+
+ if ((GET_CODE (copy) != REG && GET_CODE (copy) != SUBREG)
+ || (GET_CODE (copy) == REG && REG_USERVAR_P (loc)
+ && ! REG_USERVAR_P (copy))
+ || (GET_CODE (copy) == REG
+ && REGNO (copy) < FIRST_PSEUDO_REGISTER))
+ {
+ temp = copy_to_mode_reg (GET_MODE (loc), copy);
+ REG_USERVAR_P (temp) = REG_USERVAR_P (loc);
+ if ((CONSTANT_P (copy) || FIXED_BASE_PLUS_P (copy))
+ && REGNO (temp) < map->const_equiv_map_size)
+ {
+ map->const_equiv_map[REGNO (temp)] = copy;
+ map->const_age_map[REGNO (temp)] = CONST_AGE_PARM;
+ }
+ copy = temp;
+ }
+ map->reg_map[REGNO (loc)] = copy;
+ }
+ else if (GET_CODE (loc) == CONCAT)
+ {
+ /* This is the good case where the parameter is in a
+ pair of separate pseudos.
+ If it is read-only and our argument is a constant, set up the
+ constant equivalence.
+
+ If LOC is REG_USERVAR_P, the usual case, COPY must also have
+ that flag set if it is a register.
+
+ Also, don't allow hard registers here; they might not be valid
+ when substituted into insns. */
+ rtx locreal = gen_realpart (GET_MODE (XEXP (loc, 0)), loc);
+ rtx locimag = gen_imagpart (GET_MODE (XEXP (loc, 0)), loc);
+ rtx copyreal = gen_realpart (GET_MODE (locreal), copy);
+ rtx copyimag = gen_imagpart (GET_MODE (locimag), copy);
+
+ if ((GET_CODE (copyreal) != REG && GET_CODE (copyreal) != SUBREG)
+ || (GET_CODE (copyreal) == REG && REG_USERVAR_P (locreal)
+ && ! REG_USERVAR_P (copyreal))
+ || (GET_CODE (copyreal) == REG
+ && REGNO (copyreal) < FIRST_PSEUDO_REGISTER))
+ {
+ temp = copy_to_mode_reg (GET_MODE (locreal), copyreal);
+ REG_USERVAR_P (temp) = REG_USERVAR_P (locreal);
+ if ((CONSTANT_P (copyreal) || FIXED_BASE_PLUS_P (copyreal))
+ && REGNO (temp) < map->const_equiv_map_size)
+ {
+ map->const_equiv_map[REGNO (temp)] = copyreal;
+ map->const_age_map[REGNO (temp)] = CONST_AGE_PARM;
+ }
+ copyreal = temp;
+ }
+ map->reg_map[REGNO (locreal)] = copyreal;
+
+ if ((GET_CODE (copyimag) != REG && GET_CODE (copyimag) != SUBREG)
+ || (GET_CODE (copyimag) == REG && REG_USERVAR_P (locimag)
+ && ! REG_USERVAR_P (copyimag))
+ || (GET_CODE (copyimag) == REG
+ && REGNO (copyimag) < FIRST_PSEUDO_REGISTER))
+ {
+ temp = copy_to_mode_reg (GET_MODE (locimag), copyimag);
+ REG_USERVAR_P (temp) = REG_USERVAR_P (locimag);
+ if ((CONSTANT_P (copyimag) || FIXED_BASE_PLUS_P (copyimag))
+ && REGNO (temp) < map->const_equiv_map_size)
+ {
+ map->const_equiv_map[REGNO (temp)] = copyimag;
+ map->const_age_map[REGNO (temp)] = CONST_AGE_PARM;
+ }
+ copyimag = temp;
+ }
+ map->reg_map[REGNO (locimag)] = copyimag;
+ }
+ else
+ abort ();
+ }
+
+ /* Now do the parameters that will be placed in memory. */
+
+ for (formal = DECL_ARGUMENTS (fndecl), i = 0;
+ formal; formal = TREE_CHAIN (formal), i++)
+ {
+ loc = RTVEC_ELT (arg_vector, i);
+
+ if (GET_CODE (loc) == MEM
+ /* Exclude case handled above. */
+ && ! (GET_CODE (XEXP (loc, 0)) == REG
+ && REGNO (XEXP (loc, 0)) > LAST_VIRTUAL_REGISTER))
+ {
+ rtx note = emit_note (DECL_SOURCE_FILE (formal),
+ DECL_SOURCE_LINE (formal));
+ if (note)
+ RTX_INTEGRATED_P (note) = 1;
+
+ /* Compute the address in the area we reserved and store the
+ value there. */
+ temp = copy_rtx_and_substitute (loc, map);
+ subst_constants (&temp, NULL_RTX, map);
+ apply_change_group ();
+ if (! memory_address_p (GET_MODE (temp), XEXP (temp, 0)))
+ temp = change_address (temp, VOIDmode, XEXP (temp, 0));
+ store_expr (arg_trees[i], temp, 0);
+ }
+ }
+
+ /* Deal with the places that the function puts its result.
+ We are driven by what is placed into DECL_RESULT.
+
+ Initially, we assume that we don't have anything special handling for
+ REG_FUNCTION_RETURN_VALUE_P. */
+
+ map->inline_target = 0;
+ loc = DECL_RTL (DECL_RESULT (fndecl));
+ if (TYPE_MODE (type) == VOIDmode)
+ /* There is no return value to worry about. */
+ ;
+ else if (GET_CODE (loc) == MEM)
+ {
+ if (! structure_value_addr || ! aggregate_value_p (DECL_RESULT (fndecl)))
+ abort ();
+
+ /* Pass the function the address in which to return a structure value.
+ Note that a constructor can cause someone to call us with
+ STRUCTURE_VALUE_ADDR, but the initialization takes place
+ via the first parameter, rather than the struct return address.
+
+ We have two cases: If the address is a simple register indirect,
+ use the mapping mechanism to point that register to our structure
+ return address. Otherwise, store the structure return value into
+ the place that it will be referenced from. */
+
+ if (GET_CODE (XEXP (loc, 0)) == REG)
+ {
+ temp = force_reg (Pmode, structure_value_addr);
+ map->reg_map[REGNO (XEXP (loc, 0))] = temp;
+ if ((CONSTANT_P (structure_value_addr)
+ || (GET_CODE (structure_value_addr) == PLUS
+ && XEXP (structure_value_addr, 0) == virtual_stack_vars_rtx
+ && GET_CODE (XEXP (structure_value_addr, 1)) == CONST_INT))
+ && REGNO (temp) < map->const_equiv_map_size)
+ {
+ map->const_equiv_map[REGNO (temp)] = structure_value_addr;
+ map->const_age_map[REGNO (temp)] = CONST_AGE_PARM;
+ }
+ }
+ else
+ {
+ temp = copy_rtx_and_substitute (loc, map);
+ subst_constants (&temp, NULL_RTX, map);
+ apply_change_group ();
+ emit_move_insn (temp, structure_value_addr);
+ }
+ }
+ else if (ignore)
+ /* We will ignore the result value, so don't look at its structure.
+ Note that preparations for an aggregate return value
+ do need to be made (above) even if it will be ignored. */
+ ;
+ else if (GET_CODE (loc) == REG)
+ {
+ /* The function returns an object in a register and we use the return
+ value. Set up our target for remapping. */
+
+ /* Machine mode function was declared to return. */
+ enum machine_mode departing_mode = TYPE_MODE (type);
+ /* (Possibly wider) machine mode it actually computes
+ (for the sake of callers that fail to declare it right). */
+ enum machine_mode arriving_mode
+ = TYPE_MODE (TREE_TYPE (DECL_RESULT (fndecl)));
+ rtx reg_to_map;
+
+ /* Don't use MEMs as direct targets because on some machines
+ substituting a MEM for a REG makes invalid insns.
+ Let the combiner substitute the MEM if that is valid. */
+ if (target == 0 || GET_CODE (target) != REG
+ || GET_MODE (target) != departing_mode)
+ target = gen_reg_rtx (departing_mode);
+
+ /* If function's value was promoted before return,
+ avoid machine mode mismatch when we substitute INLINE_TARGET.
+ But TARGET is what we will return to the caller. */
+ if (arriving_mode != departing_mode)
+ reg_to_map = gen_rtx (SUBREG, arriving_mode, target, 0);
+ else
+ reg_to_map = target;
+
+ /* Usually, the result value is the machine's return register.
+ Sometimes it may be a pseudo. Handle both cases. */
+ if (REG_FUNCTION_VALUE_P (loc))
+ map->inline_target = reg_to_map;
+ else
+ map->reg_map[REGNO (loc)] = reg_to_map;
+ }
+
+ /* Make new label equivalences for the labels in the called function. */
+ for (i = min_labelno; i < max_labelno; i++)
+ map->label_map[i] = gen_label_rtx ();
+
+ /* Perform postincrements before actually calling the function. */
+ emit_queue ();
+
+ /* Clean up stack so that variables might have smaller offsets. */
+ do_pending_stack_adjust ();
+
+ /* Save a copy of the location of const_equiv_map for mark_stores, called
+ via note_stores. */
+ global_const_equiv_map = map->const_equiv_map;
+ global_const_equiv_map_size = map->const_equiv_map_size;
+
+ /* Now copy the insns one by one. Do this in two passes, first the insns and
+ then their REG_NOTES, just like save_for_inline. */
+
+ /* This loop is very similar to the loop in copy_loop_body in unroll.c. */
+
+ for (insn = insns; insn; insn = NEXT_INSN (insn))
+ {
+ rtx copy, pattern;
+
+ map->orig_asm_operands_vector = 0;
+
+ switch (GET_CODE (insn))
+ {
+ case INSN:
+ pattern = PATTERN (insn);
+ copy = 0;
+ if (GET_CODE (pattern) == USE
+ && GET_CODE (XEXP (pattern, 0)) == REG
+ && REG_FUNCTION_VALUE_P (XEXP (pattern, 0)))
+ /* The (USE (REG n)) at return from the function should
+ be ignored since we are changing (REG n) into
+ inline_target. */
+ break;
+
+ /* Ignore setting a function value that we don't want to use. */
+ if (map->inline_target == 0
+ && GET_CODE (pattern) == SET
+ && GET_CODE (SET_DEST (pattern)) == REG
+ && REG_FUNCTION_VALUE_P (SET_DEST (pattern)))
+ {
+ if (volatile_refs_p (SET_SRC (pattern)))
+ {
+ /* If we must not delete the source,
+ load it into a new temporary. */
+ copy = emit_insn (copy_rtx_and_substitute (pattern, map));
+ SET_DEST (PATTERN (copy))
+ = gen_reg_rtx (GET_MODE (SET_DEST (PATTERN (copy))));
+ }
+ else
+ break;
+ }
+ /* If this is setting the static chain pseudo, set it from
+ the value we want to give it instead. */
+ else if (static_chain_value != 0
+ && GET_CODE (pattern) == SET
+ && rtx_equal_p (SET_SRC (pattern),
+ static_chain_incoming_rtx))
+ {
+ rtx newdest = copy_rtx_and_substitute (SET_DEST (pattern), map);
+
+ copy = emit_insn (gen_rtx (SET, VOIDmode, newdest,
+ static_chain_value));
+
+ static_chain_value = 0;
+ }
+ else
+ copy = emit_insn (copy_rtx_and_substitute (pattern, map));
+ /* REG_NOTES will be copied later. */
+
+#ifdef HAVE_cc0
+ /* If this insn is setting CC0, it may need to look at
+ the insn that uses CC0 to see what type of insn it is.
+ In that case, the call to recog via validate_change will
+ fail. So don't substitute constants here. Instead,
+ do it when we emit the following insn.
+
+ For example, see the pyr.md file. That machine has signed and
+ unsigned compares. The compare patterns must check the
+ following branch insn to see which what kind of compare to
+ emit.
+
+ If the previous insn set CC0, substitute constants on it as
+ well. */
+ if (sets_cc0_p (PATTERN (copy)) != 0)
+ cc0_insn = copy;
+ else
+ {
+ if (cc0_insn)
+ try_constants (cc0_insn, map);
+ cc0_insn = 0;
+ try_constants (copy, map);
+ }
+#else
+ try_constants (copy, map);
+#endif
+ break;
+
+ case JUMP_INSN:
+ if (GET_CODE (PATTERN (insn)) == RETURN)
+ {
+ if (local_return_label == 0)
+ local_return_label = gen_label_rtx ();
+ pattern = gen_jump (local_return_label);
+ }
+ else
+ pattern = copy_rtx_and_substitute (PATTERN (insn), map);
+
+ copy = emit_jump_insn (pattern);
+
+#ifdef HAVE_cc0
+ if (cc0_insn)
+ try_constants (cc0_insn, map);
+ cc0_insn = 0;
+#endif
+ try_constants (copy, map);
+
+ /* If this used to be a conditional jump insn but whose branch
+ direction is now know, we must do something special. */
+ if (condjump_p (insn) && ! simplejump_p (insn) && map->last_pc_value)
+ {
+#ifdef HAVE_cc0
+ /* The previous insn set cc0 for us. So delete it. */
+ delete_insn (PREV_INSN (copy));
+#endif
+
+ /* If this is now a no-op, delete it. */
+ if (map->last_pc_value == pc_rtx)
+ {
+ delete_insn (copy);
+ copy = 0;
+ }
+ else
+ /* Otherwise, this is unconditional jump so we must put a
+ BARRIER after it. We could do some dead code elimination
+ here, but jump.c will do it just as well. */
+ emit_barrier ();
+ }
+ break;
+
+ case CALL_INSN:
+ pattern = copy_rtx_and_substitute (PATTERN (insn), map);
+ copy = emit_call_insn (pattern);
+
+ /* Because the USAGE information potentially contains objects other
+ than hard registers, we need to copy it. */
+ CALL_INSN_FUNCTION_USAGE (copy) =
+ copy_rtx_and_substitute (CALL_INSN_FUNCTION_USAGE (insn), map);
+
+#ifdef HAVE_cc0
+ if (cc0_insn)
+ try_constants (cc0_insn, map);
+ cc0_insn = 0;
+#endif
+ try_constants (copy, map);
+
+ /* Be lazy and assume CALL_INSNs clobber all hard registers. */
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ map->const_equiv_map[i] = 0;
+ break;
+
+ case CODE_LABEL:
+ copy = emit_label (map->label_map[CODE_LABEL_NUMBER (insn)]);
+ LABEL_NAME (copy) = LABEL_NAME (insn);
+ map->const_age++;
+ break;
+
+ case BARRIER:
+ copy = emit_barrier ();
+ break;
+
+ case NOTE:
+ /* It is important to discard function-end and function-beg notes,
+ so we have only one of each in the current function.
+ Also, NOTE_INSN_DELETED notes aren't useful (save_for_inline
+ deleted these in the copy used for continuing compilation,
+ not the copy used for inlining). */
+ if (NOTE_LINE_NUMBER (insn) != NOTE_INSN_FUNCTION_END
+ && NOTE_LINE_NUMBER (insn) != NOTE_INSN_FUNCTION_BEG
+ && NOTE_LINE_NUMBER (insn) != NOTE_INSN_DELETED)
+ copy = emit_note (NOTE_SOURCE_FILE (insn), NOTE_LINE_NUMBER (insn));
+ else
+ copy = 0;
+ break;
+
+ default:
+ abort ();
+ break;
+ }
+
+ if (copy)
+ RTX_INTEGRATED_P (copy) = 1;
+
+ map->insn_map[INSN_UID (insn)] = copy;
+ }
+
+ /* Now copy the REG_NOTES. Increment const_age, so that only constants
+ from parameters can be substituted in. These are the only ones that
+ are valid across the entire function. */
+ map->const_age++;
+ for (insn = insns; insn; insn = NEXT_INSN (insn))
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
+ && map->insn_map[INSN_UID (insn)]
+ && REG_NOTES (insn))
+ {
+ rtx tem = copy_rtx_and_substitute (REG_NOTES (insn), map);
+ /* We must also do subst_constants, in case one of our parameters
+ has const type and constant value. */
+ subst_constants (&tem, NULL_RTX, map);
+ apply_change_group ();
+ REG_NOTES (map->insn_map[INSN_UID (insn)]) = tem;
+ }
+
+ if (local_return_label)
+ emit_label (local_return_label);
+
+ /* Make copies of the decls of the symbols in the inline function, so that
+ the copies of the variables get declared in the current function. Set
+ up things so that lookup_static_chain knows that to interpret registers
+ in SAVE_EXPRs for TYPE_SIZEs as local. */
+
+ inline_function_decl = fndecl;
+ integrate_parm_decls (DECL_ARGUMENTS (fndecl), map, arg_vector);
+ integrate_decl_tree ((tree) ORIGINAL_DECL_INITIAL (header), 0, map);
+ inline_function_decl = 0;
+
+ /* End the scope containing the copied formal parameter variables
+ and copied LABEL_DECLs. */
+
+ expand_end_bindings (getdecls (), 1, 1);
+ block = poplevel (1, 1, 0);
+ BLOCK_ABSTRACT_ORIGIN (block) = (DECL_ABSTRACT_ORIGIN (fndecl) == NULL
+ ? fndecl : DECL_ABSTRACT_ORIGIN (fndecl));
+ poplevel (0, 0, 0);
+ emit_line_note (input_filename, lineno);
+
+ if (structure_value_addr)
+ {
+ target = gen_rtx (MEM, TYPE_MODE (type),
+ memory_address (TYPE_MODE (type), structure_value_addr));
+ MEM_IN_STRUCT_P (target) = 1;
+ }
+ return target;
+}
+
+/* Given a chain of PARM_DECLs, ARGS, copy each decl into a VAR_DECL,
+ push all of those decls and give each one the corresponding home. */
+
+static void
+integrate_parm_decls (args, map, arg_vector)
+ tree args;
+ struct inline_remap *map;
+ rtvec arg_vector;
+{
+ register tree tail;
+ register int i;
+
+ for (tail = args, i = 0; tail; tail = TREE_CHAIN (tail), i++)
+ {
+ register tree decl = build_decl (VAR_DECL, DECL_NAME (tail),
+ TREE_TYPE (tail));
+ rtx new_decl_rtl
+ = copy_rtx_and_substitute (RTVEC_ELT (arg_vector, i), map);
+
+ DECL_ARG_TYPE (decl) = DECL_ARG_TYPE (tail);
+ /* We really should be setting DECL_INCOMING_RTL to something reasonable
+ here, but that's going to require some more work. */
+ /* DECL_INCOMING_RTL (decl) = ?; */
+ /* These args would always appear unused, if not for this. */
+ TREE_USED (decl) = 1;
+ /* Prevent warning for shadowing with these. */
+ DECL_ABSTRACT_ORIGIN (decl) = tail;
+ pushdecl (decl);
+ /* Fully instantiate the address with the equivalent form so that the
+ debugging information contains the actual register, instead of the
+ virtual register. Do this by not passing an insn to
+ subst_constants. */
+ subst_constants (&new_decl_rtl, NULL_RTX, map);
+ apply_change_group ();
+ DECL_RTL (decl) = new_decl_rtl;
+ }
+}
+
+/* Given a BLOCK node LET, push decls and levels so as to construct in the
+ current function a tree of contexts isomorphic to the one that is given.
+
+ LEVEL indicates how far down into the BLOCK tree is the node we are
+ currently traversing. It is always zero except for recursive calls.
+
+ MAP, if nonzero, is a pointer to an inline_remap map which indicates how
+ registers used in the DECL_RTL field should be remapped. If it is zero,
+ no mapping is necessary. */
+
+static void
+integrate_decl_tree (let, level, map)
+ tree let;
+ int level;
+ struct inline_remap *map;
+{
+ tree t, node;
+
+ if (level > 0)
+ pushlevel (0);
+
+ for (t = BLOCK_VARS (let); t; t = TREE_CHAIN (t))
+ {
+ tree d;
+
+ push_obstacks_nochange ();
+ saveable_allocation ();
+ d = copy_node (t);
+ pop_obstacks ();
+
+ if (DECL_RTL (t) != 0)
+ {
+ DECL_RTL (d) = copy_rtx_and_substitute (DECL_RTL (t), map);
+ /* Fully instantiate the address with the equivalent form so that the
+ debugging information contains the actual register, instead of the
+ virtual register. Do this by not passing an insn to
+ subst_constants. */
+ subst_constants (&DECL_RTL (d), NULL_RTX, map);
+ apply_change_group ();
+ }
+ /* These args would always appear unused, if not for this. */
+ TREE_USED (d) = 1;
+ /* Prevent warning for shadowing with these. */
+ DECL_ABSTRACT_ORIGIN (d) = t;
+
+ if (DECL_LANG_SPECIFIC (d))
+ copy_lang_decl (d);
+
+ pushdecl (d);
+ }
+
+ for (t = BLOCK_SUBBLOCKS (let); t; t = TREE_CHAIN (t))
+ integrate_decl_tree (t, level + 1, map);
+
+ if (level > 0)
+ {
+ node = poplevel (1, 0, 0);
+ if (node)
+ {
+ TREE_USED (node) = TREE_USED (let);
+ BLOCK_ABSTRACT_ORIGIN (node) = let;
+ }
+ }
+}
+
+/* Create a new copy of an rtx.
+ Recursively copies the operands of the rtx,
+ except for those few rtx codes that are sharable.
+
+ We always return an rtx that is similar to that incoming rtx, with the
+ exception of possibly changing a REG to a SUBREG or vice versa. No
+ rtl is ever emitted.
+
+ Handle constants that need to be placed in the constant pool by
+ calling `force_const_mem'. */
+
+rtx
+copy_rtx_and_substitute (orig, map)
+ register rtx orig;
+ struct inline_remap *map;
+{
+ register rtx copy, temp;
+ register int i, j;
+ register RTX_CODE code;
+ register enum machine_mode mode;
+ register char *format_ptr;
+ int regno;
+
+ if (orig == 0)
+ return 0;
+
+ code = GET_CODE (orig);
+ mode = GET_MODE (orig);
+
+ switch (code)
+ {
+ case REG:
+ /* If the stack pointer register shows up, it must be part of
+ stack-adjustments (*not* because we eliminated the frame pointer!).
+ Small hard registers are returned as-is. Pseudo-registers
+ go through their `reg_map'. */
+ regno = REGNO (orig);
+ if (regno <= LAST_VIRTUAL_REGISTER)
+ {
+ /* Some hard registers are also mapped,
+ but others are not translated. */
+ if (map->reg_map[regno] != 0)
+ return map->reg_map[regno];
+
+ /* If this is the virtual frame pointer, make space in current
+ function's stack frame for the stack frame of the inline function.
+
+ Copy the address of this area into a pseudo. Map
+ virtual_stack_vars_rtx to this pseudo and set up a constant
+ equivalence for it to be the address. This will substitute the
+ address into insns where it can be substituted and use the new
+ pseudo where it can't. */
+ if (regno == VIRTUAL_STACK_VARS_REGNUM)
+ {
+ rtx loc, seq;
+ int size = DECL_FRAME_SIZE (map->fndecl);
+ int rounded;
+
+ start_sequence ();
+ loc = assign_stack_temp (BLKmode, size, 1);
+ loc = XEXP (loc, 0);
+#ifdef FRAME_GROWS_DOWNWARD
+ /* In this case, virtual_stack_vars_rtx points to one byte
+ higher than the top of the frame area. So compute the offset
+ to one byte higher than our substitute frame.
+ Keep the fake frame pointer aligned like a real one. */
+ rounded = CEIL_ROUND (size, BIGGEST_ALIGNMENT / BITS_PER_UNIT);
+ loc = plus_constant (loc, rounded);
+#endif
+ map->reg_map[regno] = temp
+ = force_reg (Pmode, force_operand (loc, NULL_RTX));
+
+ if (REGNO (temp) < map->const_equiv_map_size)
+ {
+ map->const_equiv_map[REGNO (temp)] = loc;
+ map->const_age_map[REGNO (temp)] = CONST_AGE_PARM;
+ }
+
+ seq = gen_sequence ();
+ end_sequence ();
+ emit_insn_after (seq, map->insns_at_start);
+ return temp;
+ }
+ else if (regno == VIRTUAL_INCOMING_ARGS_REGNUM)
+ {
+ /* Do the same for a block to contain any arguments referenced
+ in memory. */
+ rtx loc, seq;
+ int size = FUNCTION_ARGS_SIZE (DECL_SAVED_INSNS (map->fndecl));
+
+ start_sequence ();
+ loc = assign_stack_temp (BLKmode, size, 1);
+ loc = XEXP (loc, 0);
+ /* When arguments grow downward, the virtual incoming
+ args pointer points to the top of the argument block,
+ so the remapped location better do the same. */
+#ifdef ARGS_GROW_DOWNWARD
+ loc = plus_constant (loc, size);
+#endif
+ map->reg_map[regno] = temp
+ = force_reg (Pmode, force_operand (loc, NULL_RTX));
+
+ if (REGNO (temp) < map->const_equiv_map_size)
+ {
+ map->const_equiv_map[REGNO (temp)] = loc;
+ map->const_age_map[REGNO (temp)] = CONST_AGE_PARM;
+ }
+
+ seq = gen_sequence ();
+ end_sequence ();
+ emit_insn_after (seq, map->insns_at_start);
+ return temp;
+ }
+ else if (REG_FUNCTION_VALUE_P (orig))
+ {
+ /* This is a reference to the function return value. If
+ the function doesn't have a return value, error. If the
+ mode doesn't agree, make a SUBREG. */
+ if (map->inline_target == 0)
+ /* Must be unrolling loops or replicating code if we
+ reach here, so return the register unchanged. */
+ return orig;
+ else if (mode != GET_MODE (map->inline_target))
+ return gen_lowpart (mode, map->inline_target);
+ else
+ return map->inline_target;
+ }
+ return orig;
+ }
+ if (map->reg_map[regno] == NULL)
+ {
+ map->reg_map[regno] = gen_reg_rtx (mode);
+ REG_USERVAR_P (map->reg_map[regno]) = REG_USERVAR_P (orig);
+ REG_LOOP_TEST_P (map->reg_map[regno]) = REG_LOOP_TEST_P (orig);
+ RTX_UNCHANGING_P (map->reg_map[regno]) = RTX_UNCHANGING_P (orig);
+ /* A reg with REG_FUNCTION_VALUE_P true will never reach here. */
+ }
+ return map->reg_map[regno];
+
+ case SUBREG:
+ copy = copy_rtx_and_substitute (SUBREG_REG (orig), map);
+ /* SUBREG is ordinary, but don't make nested SUBREGs. */
+ if (GET_CODE (copy) == SUBREG)
+ return gen_rtx (SUBREG, GET_MODE (orig), SUBREG_REG (copy),
+ SUBREG_WORD (orig) + SUBREG_WORD (copy));
+ else if (GET_CODE (copy) == CONCAT)
+ return (subreg_realpart_p (orig) ? XEXP (copy, 0) : XEXP (copy, 1));
+ else
+ return gen_rtx (SUBREG, GET_MODE (orig), copy,
+ SUBREG_WORD (orig));
+
+ case USE:
+ case CLOBBER:
+ /* USE and CLOBBER are ordinary, but we convert (use (subreg foo))
+ to (use foo) if the original insn didn't have a subreg.
+ Removing the subreg distorts the VAX movstrhi pattern
+ by changing the mode of an operand. */
+ copy = copy_rtx_and_substitute (XEXP (orig, 0), map);
+ if (GET_CODE (copy) == SUBREG && GET_CODE (XEXP (orig, 0)) != SUBREG)
+ copy = SUBREG_REG (copy);
+ return gen_rtx (code, VOIDmode, copy);
+
+ case CODE_LABEL:
+ LABEL_PRESERVE_P (map->label_map[CODE_LABEL_NUMBER (orig)])
+ = LABEL_PRESERVE_P (orig);
+ return map->label_map[CODE_LABEL_NUMBER (orig)];
+
+ case LABEL_REF:
+ copy = gen_rtx (LABEL_REF, mode,
+ LABEL_REF_NONLOCAL_P (orig) ? XEXP (orig, 0)
+ : map->label_map[CODE_LABEL_NUMBER (XEXP (orig, 0))]);
+ LABEL_OUTSIDE_LOOP_P (copy) = LABEL_OUTSIDE_LOOP_P (orig);
+
+ /* The fact that this label was previously nonlocal does not mean
+ it still is, so we must check if it is within the range of
+ this function's labels. */
+ LABEL_REF_NONLOCAL_P (copy)
+ = (LABEL_REF_NONLOCAL_P (orig)
+ && ! (CODE_LABEL_NUMBER (XEXP (copy, 0)) >= get_first_label_num ()
+ && CODE_LABEL_NUMBER (XEXP (copy, 0)) < max_label_num ()));
+
+ /* If we have made a nonlocal label local, it means that this
+ inlined call will be refering to our nonlocal goto handler.
+ So make sure we create one for this block; we normally would
+ not since this is not otherwise considered a "call". */
+ if (LABEL_REF_NONLOCAL_P (orig) && ! LABEL_REF_NONLOCAL_P (copy))
+ function_call_count++;
+
+ return copy;
+
+ case PC:
+ case CC0:
+ case CONST_INT:
+ return orig;
+
+ case SYMBOL_REF:
+ /* Symbols which represent the address of a label stored in the constant
+ pool must be modified to point to a constant pool entry for the
+ remapped label. Otherwise, symbols are returned unchanged. */
+ if (CONSTANT_POOL_ADDRESS_P (orig))
+ {
+ rtx constant = get_pool_constant (orig);
+ if (GET_CODE (constant) == LABEL_REF)
+ return XEXP (force_const_mem (Pmode,
+ copy_rtx_and_substitute (constant,
+ map)),
+ 0);
+ }
+
+ return orig;
+
+ case CONST_DOUBLE:
+ /* We have to make a new copy of this CONST_DOUBLE because don't want
+ to use the old value of CONST_DOUBLE_MEM. Also, this may be a
+ duplicate of a CONST_DOUBLE we have already seen. */
+ if (GET_MODE_CLASS (GET_MODE (orig)) == MODE_FLOAT)
+ {
+ REAL_VALUE_TYPE d;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (d, orig);
+ return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (orig));
+ }
+ else
+ return immed_double_const (CONST_DOUBLE_LOW (orig),
+ CONST_DOUBLE_HIGH (orig), VOIDmode);
+
+ case CONST:
+ /* Make new constant pool entry for a constant
+ that was in the pool of the inline function. */
+ if (RTX_INTEGRATED_P (orig))
+ {
+ /* If this was an address of a constant pool entry that itself
+ had to be placed in the constant pool, it might not be a
+ valid address. So the recursive call below might turn it
+ into a register. In that case, it isn't a constant any
+ more, so return it. This has the potential of changing a
+ MEM into a REG, but we'll assume that it safe. */
+ temp = copy_rtx_and_substitute (XEXP (orig, 0), map);
+ if (! CONSTANT_P (temp))
+ return temp;
+ return validize_mem (force_const_mem (GET_MODE (orig), temp));
+ }
+ break;
+
+ case ADDRESS:
+ /* If from constant pool address, make new constant pool entry and
+ return its address. */
+ if (! RTX_INTEGRATED_P (orig))
+ abort ();
+
+ temp = force_const_mem (GET_MODE (orig),
+ copy_rtx_and_substitute (XEXP (orig, 0), map));
+
+#if 0
+ /* Legitimizing the address here is incorrect.
+
+ The only ADDRESS rtx's that can reach here are ones created by
+ save_constants. Hence the operand of the ADDRESS is always legal
+ in this position of the instruction, since the original rtx without
+ the ADDRESS was legal.
+
+ The reason we don't legitimize the address here is that on the
+ Sparc, the caller may have a (high ...) surrounding this ADDRESS.
+ This code forces the operand of the address to a register, which
+ fails because we can not take the HIGH part of a register.
+
+ Also, change_address may create new registers. These registers
+ will not have valid reg_map entries. This can cause try_constants()
+ to fail because assumes that all registers in the rtx have valid
+ reg_map entries, and it may end up replacing one of these new
+ registers with junk. */
+
+ if (! memory_address_p (GET_MODE (temp), XEXP (temp, 0)))
+ temp = change_address (temp, GET_MODE (temp), XEXP (temp, 0));
+#endif
+
+ return XEXP (temp, 0);
+
+ case ASM_OPERANDS:
+ /* If a single asm insn contains multiple output operands
+ then it contains multiple ASM_OPERANDS rtx's that share operand 3.
+ We must make sure that the copied insn continues to share it. */
+ if (map->orig_asm_operands_vector == XVEC (orig, 3))
+ {
+ copy = rtx_alloc (ASM_OPERANDS);
+ copy->volatil = orig->volatil;
+ XSTR (copy, 0) = XSTR (orig, 0);
+ XSTR (copy, 1) = XSTR (orig, 1);
+ XINT (copy, 2) = XINT (orig, 2);
+ XVEC (copy, 3) = map->copy_asm_operands_vector;
+ XVEC (copy, 4) = map->copy_asm_constraints_vector;
+ XSTR (copy, 5) = XSTR (orig, 5);
+ XINT (copy, 6) = XINT (orig, 6);
+ return copy;
+ }
+ break;
+
+ case CALL:
+ /* This is given special treatment because the first
+ operand of a CALL is a (MEM ...) which may get
+ forced into a register for cse. This is undesirable
+ if function-address cse isn't wanted or if we won't do cse. */
+#ifndef NO_FUNCTION_CSE
+ if (! (optimize && ! flag_no_function_cse))
+#endif
+ return gen_rtx (CALL, GET_MODE (orig),
+ gen_rtx (MEM, GET_MODE (XEXP (orig, 0)),
+ copy_rtx_and_substitute (XEXP (XEXP (orig, 0), 0), map)),
+ copy_rtx_and_substitute (XEXP (orig, 1), map));
+ break;
+
+#if 0
+ /* Must be ifdefed out for loop unrolling to work. */
+ case RETURN:
+ abort ();
+#endif
+
+ case SET:
+ /* If this is setting fp or ap, it means that we have a nonlocal goto.
+ Don't alter that.
+ If the nonlocal goto is into the current function,
+ this will result in unnecessarily bad code, but should work. */
+ if (SET_DEST (orig) == virtual_stack_vars_rtx
+ || SET_DEST (orig) == virtual_incoming_args_rtx)
+ return gen_rtx (SET, VOIDmode, SET_DEST (orig),
+ copy_rtx_and_substitute (SET_SRC (orig), map));
+ break;
+
+ case MEM:
+ copy = rtx_alloc (MEM);
+ PUT_MODE (copy, mode);
+ XEXP (copy, 0) = copy_rtx_and_substitute (XEXP (orig, 0), map);
+ MEM_IN_STRUCT_P (copy) = MEM_IN_STRUCT_P (orig);
+ MEM_VOLATILE_P (copy) = MEM_VOLATILE_P (orig);
+
+ /* If doing function inlining, this MEM might not be const in the
+ function that it is being inlined into, and thus may not be
+ unchanging after function inlining. Constant pool references are
+ handled elsewhere, so this doesn't lose RTX_UNCHANGING_P bits
+ for them. */
+ if (! map->integrating)
+ RTX_UNCHANGING_P (copy) = RTX_UNCHANGING_P (orig);
+
+ return copy;
+ }
+
+ copy = rtx_alloc (code);
+ PUT_MODE (copy, mode);
+ copy->in_struct = orig->in_struct;
+ copy->volatil = orig->volatil;
+ copy->unchanging = orig->unchanging;
+
+ format_ptr = GET_RTX_FORMAT (GET_CODE (copy));
+
+ for (i = 0; i < GET_RTX_LENGTH (GET_CODE (copy)); i++)
+ {
+ switch (*format_ptr++)
+ {
+ case '0':
+ break;
+
+ case 'e':
+ XEXP (copy, i) = copy_rtx_and_substitute (XEXP (orig, i), map);
+ break;
+
+ case 'u':
+ /* Change any references to old-insns to point to the
+ corresponding copied insns. */
+ XEXP (copy, i) = map->insn_map[INSN_UID (XEXP (orig, i))];
+ break;
+
+ case 'E':
+ XVEC (copy, i) = XVEC (orig, i);
+ if (XVEC (orig, i) != NULL && XVECLEN (orig, i) != 0)
+ {
+ XVEC (copy, i) = rtvec_alloc (XVECLEN (orig, i));
+ for (j = 0; j < XVECLEN (copy, i); j++)
+ XVECEXP (copy, i, j)
+ = copy_rtx_and_substitute (XVECEXP (orig, i, j), map);
+ }
+ break;
+
+ case 'w':
+ XWINT (copy, i) = XWINT (orig, i);
+ break;
+
+ case 'i':
+ XINT (copy, i) = XINT (orig, i);
+ break;
+
+ case 's':
+ XSTR (copy, i) = XSTR (orig, i);
+ break;
+
+ default:
+ abort ();
+ }
+ }
+
+ if (code == ASM_OPERANDS && map->orig_asm_operands_vector == 0)
+ {
+ map->orig_asm_operands_vector = XVEC (orig, 3);
+ map->copy_asm_operands_vector = XVEC (copy, 3);
+ map->copy_asm_constraints_vector = XVEC (copy, 4);
+ }
+
+ return copy;
+}
+
+/* Substitute known constant values into INSN, if that is valid. */
+
+void
+try_constants (insn, map)
+ rtx insn;
+ struct inline_remap *map;
+{
+ int i;
+
+ map->num_sets = 0;
+ subst_constants (&PATTERN (insn), insn, map);
+
+ /* Apply the changes if they are valid; otherwise discard them. */
+ apply_change_group ();
+
+ /* Show we don't know the value of anything stored or clobbered. */
+ note_stores (PATTERN (insn), mark_stores);
+ map->last_pc_value = 0;
+#ifdef HAVE_cc0
+ map->last_cc0_value = 0;
+#endif
+
+ /* Set up any constant equivalences made in this insn. */
+ for (i = 0; i < map->num_sets; i++)
+ {
+ if (GET_CODE (map->equiv_sets[i].dest) == REG)
+ {
+ int regno = REGNO (map->equiv_sets[i].dest);
+
+ if (regno < map->const_equiv_map_size
+ && (map->const_equiv_map[regno] == 0
+ /* Following clause is a hack to make case work where GNU C++
+ reassigns a variable to make cse work right. */
+ || ! rtx_equal_p (map->const_equiv_map[regno],
+ map->equiv_sets[i].equiv)))
+ {
+ map->const_equiv_map[regno] = map->equiv_sets[i].equiv;
+ map->const_age_map[regno] = map->const_age;
+ }
+ }
+ else if (map->equiv_sets[i].dest == pc_rtx)
+ map->last_pc_value = map->equiv_sets[i].equiv;
+#ifdef HAVE_cc0
+ else if (map->equiv_sets[i].dest == cc0_rtx)
+ map->last_cc0_value = map->equiv_sets[i].equiv;
+#endif
+ }
+}
+
+/* Substitute known constants for pseudo regs in the contents of LOC,
+ which are part of INSN.
+ If INSN is zero, the substitution should always be done (this is used to
+ update DECL_RTL).
+ These changes are taken out by try_constants if the result is not valid.
+
+ Note that we are more concerned with determining when the result of a SET
+ is a constant, for further propagation, than actually inserting constants
+ into insns; cse will do the latter task better.
+
+ This function is also used to adjust address of items previously addressed
+ via the virtual stack variable or virtual incoming arguments registers. */
+
+static void
+subst_constants (loc, insn, map)
+ rtx *loc;
+ rtx insn;
+ struct inline_remap *map;
+{
+ rtx x = *loc;
+ register int i;
+ register enum rtx_code code;
+ register char *format_ptr;
+ int num_changes = num_validated_changes ();
+ rtx new = 0;
+ enum machine_mode op0_mode;
+
+ code = GET_CODE (x);
+
+ switch (code)
+ {
+ case PC:
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case SYMBOL_REF:
+ case CONST:
+ case LABEL_REF:
+ case ADDRESS:
+ return;
+
+#ifdef HAVE_cc0
+ case CC0:
+ validate_change (insn, loc, map->last_cc0_value, 1);
+ return;
+#endif
+
+ case USE:
+ case CLOBBER:
+ /* The only thing we can do with a USE or CLOBBER is possibly do
+ some substitutions in a MEM within it. */
+ if (GET_CODE (XEXP (x, 0)) == MEM)
+ subst_constants (&XEXP (XEXP (x, 0), 0), insn, map);
+ return;
+
+ case REG:
+ /* Substitute for parms and known constants. Don't replace
+ hard regs used as user variables with constants. */
+ {
+ int regno = REGNO (x);
+
+ if (! (regno < FIRST_PSEUDO_REGISTER && REG_USERVAR_P (x))
+ && regno < map->const_equiv_map_size
+ && map->const_equiv_map[regno] != 0
+ && map->const_age_map[regno] >= map->const_age)
+ validate_change (insn, loc, map->const_equiv_map[regno], 1);
+ return;
+ }
+
+ case SUBREG:
+ /* SUBREG applied to something other than a reg
+ should be treated as ordinary, since that must
+ be a special hack and we don't know how to treat it specially.
+ Consider for example mulsidi3 in m68k.md.
+ Ordinary SUBREG of a REG needs this special treatment. */
+ if (GET_CODE (SUBREG_REG (x)) == REG)
+ {
+ rtx inner = SUBREG_REG (x);
+ rtx new = 0;
+
+ /* We can't call subst_constants on &SUBREG_REG (x) because any
+ constant or SUBREG wouldn't be valid inside our SUBEG. Instead,
+ see what is inside, try to form the new SUBREG and see if that is
+ valid. We handle two cases: extracting a full word in an
+ integral mode and extracting the low part. */
+ subst_constants (&inner, NULL_RTX, map);
+
+ if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
+ && GET_MODE_SIZE (GET_MODE (x)) == UNITS_PER_WORD
+ && GET_MODE (SUBREG_REG (x)) != VOIDmode)
+ new = operand_subword (inner, SUBREG_WORD (x), 0,
+ GET_MODE (SUBREG_REG (x)));
+
+ if (new == 0 && subreg_lowpart_p (x))
+ new = gen_lowpart_common (GET_MODE (x), inner);
+
+ if (new)
+ validate_change (insn, loc, new, 1);
+
+ return;
+ }
+ break;
+
+ case MEM:
+ subst_constants (&XEXP (x, 0), insn, map);
+
+ /* If a memory address got spoiled, change it back. */
+ if (insn != 0 && num_validated_changes () != num_changes
+ && !memory_address_p (GET_MODE (x), XEXP (x, 0)))
+ cancel_changes (num_changes);
+ return;
+
+ case SET:
+ {
+ /* Substitute constants in our source, and in any arguments to a
+ complex (e..g, ZERO_EXTRACT) destination, but not in the destination
+ itself. */
+ rtx *dest_loc = &SET_DEST (x);
+ rtx dest = *dest_loc;
+ rtx src, tem;
+
+ subst_constants (&SET_SRC (x), insn, map);
+ src = SET_SRC (x);
+
+ while (GET_CODE (*dest_loc) == ZERO_EXTRACT
+ /* By convention, we always use ZERO_EXTRACT in the dest. */
+/* || GET_CODE (*dest_loc) == SIGN_EXTRACT */
+ || GET_CODE (*dest_loc) == SUBREG
+ || GET_CODE (*dest_loc) == STRICT_LOW_PART)
+ {
+ if (GET_CODE (*dest_loc) == ZERO_EXTRACT)
+ {
+ subst_constants (&XEXP (*dest_loc, 1), insn, map);
+ subst_constants (&XEXP (*dest_loc, 2), insn, map);
+ }
+ dest_loc = &XEXP (*dest_loc, 0);
+ }
+
+ /* Do substitute in the address of a destination in memory. */
+ if (GET_CODE (*dest_loc) == MEM)
+ subst_constants (&XEXP (*dest_loc, 0), insn, map);
+
+ /* Check for the case of DEST a SUBREG, both it and the underlying
+ register are less than one word, and the SUBREG has the wider mode.
+ In the case, we are really setting the underlying register to the
+ source converted to the mode of DEST. So indicate that. */
+ if (GET_CODE (dest) == SUBREG
+ && GET_MODE_SIZE (GET_MODE (dest)) <= UNITS_PER_WORD
+ && GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest))) <= UNITS_PER_WORD
+ && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))
+ <= GET_MODE_SIZE (GET_MODE (dest)))
+ && (tem = gen_lowpart_if_possible (GET_MODE (SUBREG_REG (dest)),
+ src)))
+ src = tem, dest = SUBREG_REG (dest);
+
+ /* If storing a recognizable value save it for later recording. */
+ if ((map->num_sets < MAX_RECOG_OPERANDS)
+ && (CONSTANT_P (src)
+ || (GET_CODE (src) == PLUS
+ && GET_CODE (XEXP (src, 0)) == REG
+ && REGNO (XEXP (src, 0)) >= FIRST_VIRTUAL_REGISTER
+ && REGNO (XEXP (src, 0)) <= LAST_VIRTUAL_REGISTER
+ && CONSTANT_P (XEXP (src, 1)))
+ || GET_CODE (src) == COMPARE
+#ifdef HAVE_cc0
+ || dest == cc0_rtx
+#endif
+ || (dest == pc_rtx
+ && (src == pc_rtx || GET_CODE (src) == RETURN
+ || GET_CODE (src) == LABEL_REF))))
+ {
+ /* Normally, this copy won't do anything. But, if SRC is a COMPARE
+ it will cause us to save the COMPARE with any constants
+ substituted, which is what we want for later. */
+ map->equiv_sets[map->num_sets].equiv = copy_rtx (src);
+ map->equiv_sets[map->num_sets++].dest = dest;
+ }
+
+ return;
+ }
+ }
+
+ format_ptr = GET_RTX_FORMAT (code);
+
+ /* If the first operand is an expression, save its mode for later. */
+ if (*format_ptr == 'e')
+ op0_mode = GET_MODE (XEXP (x, 0));
+
+ for (i = 0; i < GET_RTX_LENGTH (code); i++)
+ {
+ switch (*format_ptr++)
+ {
+ case '0':
+ break;
+
+ case 'e':
+ if (XEXP (x, i))
+ subst_constants (&XEXP (x, i), insn, map);
+ break;
+
+ case 'u':
+ case 'i':
+ case 's':
+ case 'w':
+ break;
+
+ case 'E':
+ if (XVEC (x, i) != NULL && XVECLEN (x, i) != 0)
+ {
+ int j;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ subst_constants (&XVECEXP (x, i, j), insn, map);
+ }
+ break;
+
+ default:
+ abort ();
+ }
+ }
+
+ /* If this is a commutative operation, move a constant to the second
+ operand unless the second operand is already a CONST_INT. */
+ if ((GET_RTX_CLASS (code) == 'c' || code == NE || code == EQ)
+ && CONSTANT_P (XEXP (x, 0)) && GET_CODE (XEXP (x, 1)) != CONST_INT)
+ {
+ rtx tem = XEXP (x, 0);
+ validate_change (insn, &XEXP (x, 0), XEXP (x, 1), 1);
+ validate_change (insn, &XEXP (x, 1), tem, 1);
+ }
+
+ /* Simplify the expression in case we put in some constants. */
+ switch (GET_RTX_CLASS (code))
+ {
+ case '1':
+ new = simplify_unary_operation (code, GET_MODE (x),
+ XEXP (x, 0), op0_mode);
+ break;
+
+ case '<':
+ {
+ enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
+ if (op_mode == VOIDmode)
+ op_mode = GET_MODE (XEXP (x, 1));
+ new = simplify_relational_operation (code, op_mode,
+ XEXP (x, 0), XEXP (x, 1));
+#ifdef FLOAT_STORE_FLAG_VALUE
+ if (new != 0 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
+ new = ((new == const0_rtx) ? CONST0_RTX (GET_MODE (x))
+ : CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE,
+ GET_MODE (x)));
+#endif
+ break;
+ }
+
+ case '2':
+ case 'c':
+ new = simplify_binary_operation (code, GET_MODE (x),
+ XEXP (x, 0), XEXP (x, 1));
+ break;
+
+ case 'b':
+ case '3':
+ new = simplify_ternary_operation (code, GET_MODE (x), op0_mode,
+ XEXP (x, 0), XEXP (x, 1), XEXP (x, 2));
+ break;
+ }
+
+ if (new)
+ validate_change (insn, loc, new, 1);
+}
+
+/* Show that register modified no longer contain known constants. We are
+ called from note_stores with parts of the new insn. */
+
+void
+mark_stores (dest, x)
+ rtx dest;
+ rtx x;
+{
+ int regno = -1;
+ enum machine_mode mode;
+
+ /* DEST is always the innermost thing set, except in the case of
+ SUBREGs of hard registers. */
+
+ if (GET_CODE (dest) == REG)
+ regno = REGNO (dest), mode = GET_MODE (dest);
+ else if (GET_CODE (dest) == SUBREG && GET_CODE (SUBREG_REG (dest)) == REG)
+ {
+ regno = REGNO (SUBREG_REG (dest)) + SUBREG_WORD (dest);
+ mode = GET_MODE (SUBREG_REG (dest));
+ }
+
+ if (regno >= 0)
+ {
+ int last_reg = (regno >= FIRST_PSEUDO_REGISTER ? regno
+ : regno + HARD_REGNO_NREGS (regno, mode) - 1);
+ int i;
+
+ for (i = regno; i <= last_reg; i++)
+ if (i < global_const_equiv_map_size)
+ global_const_equiv_map[i] = 0;
+ }
+}
+
+/* If any CONST expressions with RTX_INTEGRATED_P are present in the rtx
+ pointed to by PX, they represent constants in the constant pool.
+ Replace these with a new memory reference obtained from force_const_mem.
+ Similarly, ADDRESS expressions with RTX_INTEGRATED_P represent the
+ address of a constant pool entry. Replace them with the address of
+ a new constant pool entry obtained from force_const_mem. */
+
+static void
+restore_constants (px)
+ rtx *px;
+{
+ rtx x = *px;
+ int i, j;
+ char *fmt;
+
+ if (x == 0)
+ return;
+
+ if (GET_CODE (x) == CONST_DOUBLE)
+ {
+ /* We have to make a new CONST_DOUBLE to ensure that we account for
+ it correctly. Using the old CONST_DOUBLE_MEM data is wrong. */
+ if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
+ {
+ REAL_VALUE_TYPE d;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (d, x);
+ *px = CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
+ }
+ else
+ *px = immed_double_const (CONST_DOUBLE_LOW (x), CONST_DOUBLE_HIGH (x),
+ VOIDmode);
+ }
+
+ else if (RTX_INTEGRATED_P (x) && GET_CODE (x) == CONST)
+ {
+ restore_constants (&XEXP (x, 0));
+ *px = validize_mem (force_const_mem (GET_MODE (x), XEXP (x, 0)));
+ }
+ else if (RTX_INTEGRATED_P (x) && GET_CODE (x) == SUBREG)
+ {
+ /* This must be (subreg/i:M1 (const/i:M2 ...) 0). */
+ rtx new = XEXP (SUBREG_REG (x), 0);
+
+ restore_constants (&new);
+ new = force_const_mem (GET_MODE (SUBREG_REG (x)), new);
+ PUT_MODE (new, GET_MODE (x));
+ *px = validize_mem (new);
+ }
+ else if (RTX_INTEGRATED_P (x) && GET_CODE (x) == ADDRESS)
+ {
+ restore_constants (&XEXP (x, 0));
+ *px = XEXP (force_const_mem (GET_MODE (x), XEXP (x, 0)), 0);
+ }
+ else
+ {
+ fmt = GET_RTX_FORMAT (GET_CODE (x));
+ for (i = 0; i < GET_RTX_LENGTH (GET_CODE (x)); i++)
+ {
+ switch (*fmt++)
+ {
+ case 'E':
+ for (j = 0; j < XVECLEN (x, i); j++)
+ restore_constants (&XVECEXP (x, i, j));
+ break;
+
+ case 'e':
+ restore_constants (&XEXP (x, i));
+ break;
+ }
+ }
+ }
+}
+
+/* Given a pointer to some BLOCK node, if the BLOCK_ABSTRACT_ORIGIN for the
+ given BLOCK node is NULL, set the BLOCK_ABSTRACT_ORIGIN for the node so
+ that it points to the node itself, thus indicating that the node is its
+ own (abstract) origin. Additionally, if the BLOCK_ABSTRACT_ORIGIN for
+ the given node is NULL, recursively descend the decl/block tree which
+ it is the root of, and for each other ..._DECL or BLOCK node contained
+ therein whose DECL_ABSTRACT_ORIGINs or BLOCK_ABSTRACT_ORIGINs are also
+ still NULL, set *their* DECL_ABSTRACT_ORIGIN or BLOCK_ABSTRACT_ORIGIN
+ values to point to themselves. */
+
+static void
+set_block_origin_self (stmt)
+ register tree stmt;
+{
+ if (BLOCK_ABSTRACT_ORIGIN (stmt) == NULL_TREE)
+ {
+ BLOCK_ABSTRACT_ORIGIN (stmt) = stmt;
+
+ {
+ register tree local_decl;
+
+ for (local_decl = BLOCK_VARS (stmt);
+ local_decl != NULL_TREE;
+ local_decl = TREE_CHAIN (local_decl))
+ set_decl_origin_self (local_decl); /* Potential recursion. */
+ }
+
+ {
+ register tree subblock;
+
+ for (subblock = BLOCK_SUBBLOCKS (stmt);
+ subblock != NULL_TREE;
+ subblock = BLOCK_CHAIN (subblock))
+ set_block_origin_self (subblock); /* Recurse. */
+ }
+ }
+}
+
+/* Given a pointer to some ..._DECL node, if the DECL_ABSTRACT_ORIGIN for
+ the given ..._DECL node is NULL, set the DECL_ABSTRACT_ORIGIN for the
+ node to so that it points to the node itself, thus indicating that the
+ node represents its own (abstract) origin. Additionally, if the
+ DECL_ABSTRACT_ORIGIN for the given node is NULL, recursively descend
+ the decl/block tree of which the given node is the root of, and for
+ each other ..._DECL or BLOCK node contained therein whose
+ DECL_ABSTRACT_ORIGINs or BLOCK_ABSTRACT_ORIGINs are also still NULL,
+ set *their* DECL_ABSTRACT_ORIGIN or BLOCK_ABSTRACT_ORIGIN values to
+ point to themselves. */
+
+static void
+set_decl_origin_self (decl)
+ register tree decl;
+{
+ if (DECL_ABSTRACT_ORIGIN (decl) == NULL_TREE)
+ {
+ DECL_ABSTRACT_ORIGIN (decl) = decl;
+ if (TREE_CODE (decl) == FUNCTION_DECL)
+ {
+ register tree arg;
+
+ for (arg = DECL_ARGUMENTS (decl); arg; arg = TREE_CHAIN (arg))
+ DECL_ABSTRACT_ORIGIN (arg) = arg;
+ if (DECL_INITIAL (decl) != NULL_TREE)
+ set_block_origin_self (DECL_INITIAL (decl));
+ }
+ }
+}
+
+/* Given a pointer to some BLOCK node, and a boolean value to set the
+ "abstract" flags to, set that value into the BLOCK_ABSTRACT flag for
+ the given block, and for all local decls and all local sub-blocks
+ (recursively) which are contained therein. */
+
+static void
+set_block_abstract_flags (stmt, setting)
+ register tree stmt;
+ register int setting;
+{
+ BLOCK_ABSTRACT (stmt) = setting;
+
+ {
+ register tree local_decl;
+
+ for (local_decl = BLOCK_VARS (stmt);
+ local_decl != NULL_TREE;
+ local_decl = TREE_CHAIN (local_decl))
+ set_decl_abstract_flags (local_decl, setting);
+ }
+
+ {
+ register tree subblock;
+
+ for (subblock = BLOCK_SUBBLOCKS (stmt);
+ subblock != NULL_TREE;
+ subblock = BLOCK_CHAIN (subblock))
+ set_block_abstract_flags (subblock, setting);
+ }
+}
+
+/* Given a pointer to some ..._DECL node, and a boolean value to set the
+ "abstract" flags to, set that value into the DECL_ABSTRACT flag for the
+ given decl, and (in the case where the decl is a FUNCTION_DECL) also
+ set the abstract flags for all of the parameters, local vars, local
+ blocks and sub-blocks (recursively) to the same setting. */
+
+void
+set_decl_abstract_flags (decl, setting)
+ register tree decl;
+ register int setting;
+{
+ DECL_ABSTRACT (decl) = setting;
+ if (TREE_CODE (decl) == FUNCTION_DECL)
+ {
+ register tree arg;
+
+ for (arg = DECL_ARGUMENTS (decl); arg; arg = TREE_CHAIN (arg))
+ DECL_ABSTRACT (arg) = setting;
+ if (DECL_INITIAL (decl) != NULL_TREE)
+ set_block_abstract_flags (DECL_INITIAL (decl), setting);
+ }
+}
+
+/* Output the assembly language code for the function FNDECL
+ from its DECL_SAVED_INSNS. Used for inline functions that are output
+ at end of compilation instead of where they came in the source. */
+
+void
+output_inline_function (fndecl)
+ tree fndecl;
+{
+ rtx head;
+ rtx last;
+
+ if (output_bytecode)
+ {
+ warning ("`inline' ignored for bytecode output");
+ return;
+ }
+
+ head = DECL_SAVED_INSNS (fndecl);
+ current_function_decl = fndecl;
+
+ /* This call is only used to initialize global variables. */
+ init_function_start (fndecl, "lossage", 1);
+
+ /* Redo parameter determinations in case the FUNCTION_...
+ macros took machine-specific actions that need to be redone. */
+ assign_parms (fndecl, 1);
+
+ /* Set stack frame size. */
+ assign_stack_local (BLKmode, DECL_FRAME_SIZE (fndecl), 0);
+
+ restore_reg_data (FIRST_PARM_INSN (head));
+
+ stack_slot_list = STACK_SLOT_LIST (head);
+
+ if (FUNCTION_FLAGS (head) & FUNCTION_FLAGS_CALLS_ALLOCA)
+ current_function_calls_alloca = 1;
+
+ if (FUNCTION_FLAGS (head) & FUNCTION_FLAGS_CALLS_SETJMP)
+ current_function_calls_setjmp = 1;
+
+ if (FUNCTION_FLAGS (head) & FUNCTION_FLAGS_CALLS_LONGJMP)
+ current_function_calls_longjmp = 1;
+
+ if (FUNCTION_FLAGS (head) & FUNCTION_FLAGS_RETURNS_STRUCT)
+ current_function_returns_struct = 1;
+
+ if (FUNCTION_FLAGS (head) & FUNCTION_FLAGS_RETURNS_PCC_STRUCT)
+ current_function_returns_pcc_struct = 1;
+
+ if (FUNCTION_FLAGS (head) & FUNCTION_FLAGS_NEEDS_CONTEXT)
+ current_function_needs_context = 1;
+
+ if (FUNCTION_FLAGS (head) & FUNCTION_FLAGS_HAS_NONLOCAL_LABEL)
+ current_function_has_nonlocal_label = 1;
+
+ if (FUNCTION_FLAGS (head) & FUNCTION_FLAGS_RETURNS_POINTER)
+ current_function_returns_pointer = 1;
+
+ if (FUNCTION_FLAGS (head) & FUNCTION_FLAGS_USES_CONST_POOL)
+ current_function_uses_const_pool = 1;
+
+ if (FUNCTION_FLAGS (head) & FUNCTION_FLAGS_USES_PIC_OFFSET_TABLE)
+ current_function_uses_pic_offset_table = 1;
+
+ current_function_outgoing_args_size = OUTGOING_ARGS_SIZE (head);
+ current_function_pops_args = POPS_ARGS (head);
+
+ /* There is no need to output a return label again. */
+ return_label = 0;
+
+ expand_function_end (DECL_SOURCE_FILE (fndecl), DECL_SOURCE_LINE (fndecl), 0);
+
+ /* Find last insn and rebuild the constant pool. */
+ for (last = FIRST_PARM_INSN (head);
+ NEXT_INSN (last); last = NEXT_INSN (last))
+ {
+ if (GET_RTX_CLASS (GET_CODE (last)) == 'i')
+ {
+ restore_constants (&PATTERN (last));
+ restore_constants (&REG_NOTES (last));
+ }
+ }
+
+ set_new_first_and_last_insn (FIRST_PARM_INSN (head), last);
+ set_new_first_and_last_label_num (FIRST_LABELNO (head), LAST_LABELNO (head));
+
+ /* We must have already output DWARF debugging information for the
+ original (abstract) inline function declaration/definition, so
+ we want to make sure that the debugging information we generate
+ for this special instance of the inline function refers back to
+ the information we already generated. To make sure that happens,
+ we simply have to set the DECL_ABSTRACT_ORIGIN for the function
+ node (and for all of the local ..._DECL nodes which are its children)
+ so that they all point to themselves. */
+
+ set_decl_origin_self (fndecl);
+
+ /* We're not deferring this any longer. */
+ DECL_DEFER_OUTPUT (fndecl) = 0;
+
+ /* Compile this function all the way down to assembly code. */
+ rest_of_compilation (fndecl);
+
+ current_function_decl = 0;
+}
diff --git a/gnu/usr.bin/cc/cc_int/jump.c b/gnu/usr.bin/cc/cc_int/jump.c
new file mode 100644
index 0000000..0792f17
--- /dev/null
+++ b/gnu/usr.bin/cc/cc_int/jump.c
@@ -0,0 +1,4395 @@
+/* Optimize jump instructions, for GNU compiler.
+ Copyright (C) 1987, 88, 89, 91, 92, 93, 1994 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+/* This is the jump-optimization pass of the compiler.
+ It is run two or three times: once before cse, sometimes once after cse,
+ and once after reload (before final).
+
+ jump_optimize deletes unreachable code and labels that are not used.
+ It also deletes jumps that jump to the following insn,
+ and simplifies jumps around unconditional jumps and jumps
+ to unconditional jumps.
+
+ Each CODE_LABEL has a count of the times it is used
+ stored in the LABEL_NUSES internal field, and each JUMP_INSN
+ has one label that it refers to stored in the
+ JUMP_LABEL internal field. With this we can detect labels that
+ become unused because of the deletion of all the jumps that
+ formerly used them. The JUMP_LABEL info is sometimes looked
+ at by later passes.
+
+ Optionally, cross-jumping can be done. Currently it is done
+ only the last time (when after reload and before final).
+ In fact, the code for cross-jumping now assumes that register
+ allocation has been done, since it uses `rtx_renumbered_equal_p'.
+
+ Jump optimization is done after cse when cse's constant-propagation
+ causes jumps to become unconditional or to be deleted.
+
+ Unreachable loops are not detected here, because the labels
+ have references and the insns appear reachable from the labels.
+ find_basic_blocks in flow.c finds and deletes such loops.
+
+ The subroutines delete_insn, redirect_jump, and invert_jump are used
+ from other passes as well. */
+
+#include "config.h"
+#include "rtl.h"
+#include "flags.h"
+#include "hard-reg-set.h"
+#include "regs.h"
+#include "expr.h"
+#include "insn-config.h"
+#include "insn-flags.h"
+#include "real.h"
+
+/* ??? Eventually must record somehow the labels used by jumps
+ from nested functions. */
+/* Pre-record the next or previous real insn for each label?
+ No, this pass is very fast anyway. */
+/* Condense consecutive labels?
+ This would make life analysis faster, maybe. */
+/* Optimize jump y; x: ... y: jumpif... x?
+ Don't know if it is worth bothering with. */
+/* Optimize two cases of conditional jump to conditional jump?
+ This can never delete any instruction or make anything dead,
+ or even change what is live at any point.
+ So perhaps let combiner do it. */
+
+/* Vector indexed by uid.
+ For each CODE_LABEL, index by its uid to get first unconditional jump
+ that jumps to the label.
+ For each JUMP_INSN, index by its uid to get the next unconditional jump
+ that jumps to the same label.
+ Element 0 is the start of a chain of all return insns.
+ (It is safe to use element 0 because insn uid 0 is not used. */
+
+static rtx *jump_chain;
+
+/* List of labels referred to from initializers.
+ These can never be deleted. */
+rtx forced_labels;
+
+/* Maximum index in jump_chain. */
+
+static int max_jump_chain;
+
+/* Set nonzero by jump_optimize if control can fall through
+ to the end of the function. */
+int can_reach_end;
+
+/* Indicates whether death notes are significant in cross jump analysis.
+ Normally they are not significant, because of A and B jump to C,
+ and R dies in A, it must die in B. But this might not be true after
+ stack register conversion, and we must compare death notes in that
+ case. */
+
+static int cross_jump_death_matters = 0;
+
+static int duplicate_loop_exit_test PROTO((rtx));
+static void find_cross_jump PROTO((rtx, rtx, int, rtx *, rtx *));
+static void do_cross_jump PROTO((rtx, rtx, rtx));
+static int jump_back_p PROTO((rtx, rtx));
+static int tension_vector_labels PROTO((rtx, int));
+static void mark_jump_label PROTO((rtx, rtx, int));
+static void delete_computation PROTO((rtx));
+static void delete_from_jump_chain PROTO((rtx));
+static int delete_labelref_insn PROTO((rtx, rtx, int));
+static void redirect_tablejump PROTO((rtx, rtx));
+
+/* Delete no-op jumps and optimize jumps to jumps
+ and jumps around jumps.
+ Delete unused labels and unreachable code.
+
+ If CROSS_JUMP is 1, detect matching code
+ before a jump and its destination and unify them.
+ If CROSS_JUMP is 2, do cross-jumping, but pay attention to death notes.
+
+ If NOOP_MOVES is nonzero, delete no-op move insns.
+
+ If AFTER_REGSCAN is nonzero, then this jump pass is being run immediately
+ after regscan, and it is safe to use regno_first_uid and regno_last_uid.
+
+ If `optimize' is zero, don't change any code,
+ just determine whether control drops off the end of the function.
+ This case occurs when we have -W and not -O.
+ It works because `delete_insn' checks the value of `optimize'
+ and refrains from actually deleting when that is 0. */
+
+void
+jump_optimize (f, cross_jump, noop_moves, after_regscan)
+ rtx f;
+ int cross_jump;
+ int noop_moves;
+ int after_regscan;
+{
+ register rtx insn, next, note;
+ int changed;
+ int first = 1;
+ int max_uid = 0;
+ rtx last_insn;
+
+ cross_jump_death_matters = (cross_jump == 2);
+
+ /* Initialize LABEL_NUSES and JUMP_LABEL fields. Delete any REG_LABEL
+ notes whose labels don't occur in the insn any more. */
+
+ for (insn = f; insn; insn = NEXT_INSN (insn))
+ {
+ if (GET_CODE (insn) == CODE_LABEL)
+ LABEL_NUSES (insn) = (LABEL_PRESERVE_P (insn) != 0);
+ else if (GET_CODE (insn) == JUMP_INSN)
+ JUMP_LABEL (insn) = 0;
+ else if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
+ for (note = REG_NOTES (insn); note; note = next)
+ {
+ next = XEXP (note, 1);
+ if (REG_NOTE_KIND (note) == REG_LABEL
+ && ! reg_mentioned_p (XEXP (note, 0), PATTERN (insn)))
+ remove_note (insn, note);
+ }
+
+ if (INSN_UID (insn) > max_uid)
+ max_uid = INSN_UID (insn);
+ }
+
+ max_uid++;
+
+ /* Delete insns following barriers, up to next label. */
+
+ for (insn = f; insn;)
+ {
+ if (GET_CODE (insn) == BARRIER)
+ {
+ insn = NEXT_INSN (insn);
+ while (insn != 0 && GET_CODE (insn) != CODE_LABEL)
+ {
+ if (GET_CODE (insn) == NOTE
+ && NOTE_LINE_NUMBER (insn) != NOTE_INSN_FUNCTION_END)
+ insn = NEXT_INSN (insn);
+ else
+ insn = delete_insn (insn);
+ }
+ /* INSN is now the code_label. */
+ }
+ else
+ insn = NEXT_INSN (insn);
+ }
+
+ /* Leave some extra room for labels and duplicate exit test insns
+ we make. */
+ max_jump_chain = max_uid * 14 / 10;
+ jump_chain = (rtx *) alloca (max_jump_chain * sizeof (rtx));
+ bzero ((char *) jump_chain, max_jump_chain * sizeof (rtx));
+
+ /* Mark the label each jump jumps to.
+ Combine consecutive labels, and count uses of labels.
+
+ For each label, make a chain (using `jump_chain')
+ of all the *unconditional* jumps that jump to it;
+ also make a chain of all returns. */
+
+ for (insn = f; insn; insn = NEXT_INSN (insn))
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
+ && ! INSN_DELETED_P (insn))
+ {
+ mark_jump_label (PATTERN (insn), insn, cross_jump);
+ if (GET_CODE (insn) == JUMP_INSN)
+ {
+ if (JUMP_LABEL (insn) != 0 && simplejump_p (insn))
+ {
+ jump_chain[INSN_UID (insn)]
+ = jump_chain[INSN_UID (JUMP_LABEL (insn))];
+ jump_chain[INSN_UID (JUMP_LABEL (insn))] = insn;
+ }
+ if (GET_CODE (PATTERN (insn)) == RETURN)
+ {
+ jump_chain[INSN_UID (insn)] = jump_chain[0];
+ jump_chain[0] = insn;
+ }
+ }
+ }
+
+ /* Keep track of labels used from static data;
+ they cannot ever be deleted. */
+
+ for (insn = forced_labels; insn; insn = XEXP (insn, 1))
+ LABEL_NUSES (XEXP (insn, 0))++;
+
+ /* Delete all labels already not referenced.
+ Also find the last insn. */
+
+ last_insn = 0;
+ for (insn = f; insn; )
+ {
+ if (GET_CODE (insn) == CODE_LABEL && LABEL_NUSES (insn) == 0)
+ insn = delete_insn (insn);
+ else
+ {
+ last_insn = insn;
+ insn = NEXT_INSN (insn);
+ }
+ }
+
+ if (!optimize)
+ {
+ /* See if there is still a NOTE_INSN_FUNCTION_END in this function.
+ If so record that this function can drop off the end. */
+
+ insn = last_insn;
+ {
+ int n_labels = 1;
+ while (insn
+ /* One label can follow the end-note: the return label. */
+ && ((GET_CODE (insn) == CODE_LABEL && n_labels-- > 0)
+ /* Ordinary insns can follow it if returning a structure. */
+ || GET_CODE (insn) == INSN
+ /* If machine uses explicit RETURN insns, no epilogue,
+ then one of them follows the note. */
+ || (GET_CODE (insn) == JUMP_INSN
+ && GET_CODE (PATTERN (insn)) == RETURN)
+ /* Other kinds of notes can follow also. */
+ || (GET_CODE (insn) == NOTE
+ && NOTE_LINE_NUMBER (insn) != NOTE_INSN_FUNCTION_END)))
+ insn = PREV_INSN (insn);
+ }
+
+ /* Report if control can fall through at the end of the function. */
+ if (insn && GET_CODE (insn) == NOTE
+ && NOTE_LINE_NUMBER (insn) == NOTE_INSN_FUNCTION_END
+ && ! INSN_DELETED_P (insn))
+ can_reach_end = 1;
+
+ /* Zero the "deleted" flag of all the "deleted" insns. */
+ for (insn = f; insn; insn = NEXT_INSN (insn))
+ INSN_DELETED_P (insn) = 0;
+ return;
+ }
+
+#ifdef HAVE_return
+ if (HAVE_return)
+ {
+ /* If we fall through to the epilogue, see if we can insert a RETURN insn
+ in front of it. If the machine allows it at this point (we might be
+ after reload for a leaf routine), it will improve optimization for it
+ to be there. */
+ insn = get_last_insn ();
+ while (insn && GET_CODE (insn) == NOTE)
+ insn = PREV_INSN (insn);
+
+ if (insn && GET_CODE (insn) != BARRIER)
+ {
+ emit_jump_insn (gen_return ());
+ emit_barrier ();
+ }
+ }
+#endif
+
+ if (noop_moves)
+ for (insn = f; insn; )
+ {
+ next = NEXT_INSN (insn);
+
+ if (GET_CODE (insn) == INSN)
+ {
+ register rtx body = PATTERN (insn);
+
+/* Combine stack_adjusts with following push_insns. */
+#ifdef PUSH_ROUNDING
+ if (GET_CODE (body) == SET
+ && SET_DEST (body) == stack_pointer_rtx
+ && GET_CODE (SET_SRC (body)) == PLUS
+ && XEXP (SET_SRC (body), 0) == stack_pointer_rtx
+ && GET_CODE (XEXP (SET_SRC (body), 1)) == CONST_INT
+ && INTVAL (XEXP (SET_SRC (body), 1)) > 0)
+ {
+ rtx p;
+ rtx stack_adjust_insn = insn;
+ int stack_adjust_amount = INTVAL (XEXP (SET_SRC (body), 1));
+ int total_pushed = 0;
+ int pushes = 0;
+
+ /* Find all successive push insns. */
+ p = insn;
+ /* Don't convert more than three pushes;
+ that starts adding too many displaced addresses
+ and the whole thing starts becoming a losing
+ proposition. */
+ while (pushes < 3)
+ {
+ rtx pbody, dest;
+ p = next_nonnote_insn (p);
+ if (p == 0 || GET_CODE (p) != INSN)
+ break;
+ pbody = PATTERN (p);
+ if (GET_CODE (pbody) != SET)
+ break;
+ dest = SET_DEST (pbody);
+ /* Allow a no-op move between the adjust and the push. */
+ if (GET_CODE (dest) == REG
+ && GET_CODE (SET_SRC (pbody)) == REG
+ && REGNO (dest) == REGNO (SET_SRC (pbody)))
+ continue;
+ if (! (GET_CODE (dest) == MEM
+ && GET_CODE (XEXP (dest, 0)) == POST_INC
+ && XEXP (XEXP (dest, 0), 0) == stack_pointer_rtx))
+ break;
+ pushes++;
+ if (total_pushed + GET_MODE_SIZE (GET_MODE (SET_DEST (pbody)))
+ > stack_adjust_amount)
+ break;
+ total_pushed += GET_MODE_SIZE (GET_MODE (SET_DEST (pbody)));
+ }
+
+ /* Discard the amount pushed from the stack adjust;
+ maybe eliminate it entirely. */
+ if (total_pushed >= stack_adjust_amount)
+ {
+ delete_computation (stack_adjust_insn);
+ total_pushed = stack_adjust_amount;
+ }
+ else
+ XEXP (SET_SRC (PATTERN (stack_adjust_insn)), 1)
+ = GEN_INT (stack_adjust_amount - total_pushed);
+
+ /* Change the appropriate push insns to ordinary stores. */
+ p = insn;
+ while (total_pushed > 0)
+ {
+ rtx pbody, dest;
+ p = next_nonnote_insn (p);
+ if (GET_CODE (p) != INSN)
+ break;
+ pbody = PATTERN (p);
+ if (GET_CODE (pbody) == SET)
+ break;
+ dest = SET_DEST (pbody);
+ if (! (GET_CODE (dest) == MEM
+ && GET_CODE (XEXP (dest, 0)) == POST_INC
+ && XEXP (XEXP (dest, 0), 0) == stack_pointer_rtx))
+ break;
+ total_pushed -= GET_MODE_SIZE (GET_MODE (SET_DEST (pbody)));
+ /* If this push doesn't fully fit in the space
+ of the stack adjust that we deleted,
+ make another stack adjust here for what we
+ didn't use up. There should be peepholes
+ to recognize the resulting sequence of insns. */
+ if (total_pushed < 0)
+ {
+ emit_insn_before (gen_add2_insn (stack_pointer_rtx,
+ GEN_INT (- total_pushed)),
+ p);
+ break;
+ }
+ XEXP (dest, 0)
+ = plus_constant (stack_pointer_rtx, total_pushed);
+ }
+ }
+#endif
+
+ /* Detect and delete no-op move instructions
+ resulting from not allocating a parameter in a register. */
+
+ if (GET_CODE (body) == SET
+ && (SET_DEST (body) == SET_SRC (body)
+ || (GET_CODE (SET_DEST (body)) == MEM
+ && GET_CODE (SET_SRC (body)) == MEM
+ && rtx_equal_p (SET_SRC (body), SET_DEST (body))))
+ && ! (GET_CODE (SET_DEST (body)) == MEM
+ && MEM_VOLATILE_P (SET_DEST (body)))
+ && ! (GET_CODE (SET_SRC (body)) == MEM
+ && MEM_VOLATILE_P (SET_SRC (body))))
+ delete_computation (insn);
+
+ /* Detect and ignore no-op move instructions
+ resulting from smart or fortuitous register allocation. */
+
+ else if (GET_CODE (body) == SET)
+ {
+ int sreg = true_regnum (SET_SRC (body));
+ int dreg = true_regnum (SET_DEST (body));
+
+ if (sreg == dreg && sreg >= 0)
+ delete_insn (insn);
+ else if (sreg >= 0 && dreg >= 0)
+ {
+ rtx trial;
+ rtx tem = find_equiv_reg (NULL_RTX, insn, 0,
+ sreg, NULL_PTR, dreg,
+ GET_MODE (SET_SRC (body)));
+
+#ifdef PRESERVE_DEATH_INFO_REGNO_P
+ /* Deleting insn could lose a death-note for SREG or DREG
+ so don't do it if final needs accurate death-notes. */
+ if (! PRESERVE_DEATH_INFO_REGNO_P (sreg)
+ && ! PRESERVE_DEATH_INFO_REGNO_P (dreg))
+#endif
+ {
+ /* DREG may have been the target of a REG_DEAD note in
+ the insn which makes INSN redundant. If so, reorg
+ would still think it is dead. So search for such a
+ note and delete it if we find it. */
+ for (trial = prev_nonnote_insn (insn);
+ trial && GET_CODE (trial) != CODE_LABEL;
+ trial = prev_nonnote_insn (trial))
+ if (find_regno_note (trial, REG_DEAD, dreg))
+ {
+ remove_death (dreg, trial);
+ break;
+ }
+
+ if (tem != 0
+ && GET_MODE (tem) == GET_MODE (SET_DEST (body)))
+ delete_insn (insn);
+ }
+ }
+ else if (dreg >= 0 && CONSTANT_P (SET_SRC (body))
+ && find_equiv_reg (SET_SRC (body), insn, 0, dreg,
+ NULL_PTR, 0,
+ GET_MODE (SET_DEST (body))))
+ {
+ /* This handles the case where we have two consecutive
+ assignments of the same constant to pseudos that didn't
+ get a hard reg. Each SET from the constant will be
+ converted into a SET of the spill register and an
+ output reload will be made following it. This produces
+ two loads of the same constant into the same spill
+ register. */
+
+ rtx in_insn = insn;
+
+ /* Look back for a death note for the first reg.
+ If there is one, it is no longer accurate. */
+ while (in_insn && GET_CODE (in_insn) != CODE_LABEL)
+ {
+ if ((GET_CODE (in_insn) == INSN
+ || GET_CODE (in_insn) == JUMP_INSN)
+ && find_regno_note (in_insn, REG_DEAD, dreg))
+ {
+ remove_death (dreg, in_insn);
+ break;
+ }
+ in_insn = PREV_INSN (in_insn);
+ }
+
+ /* Delete the second load of the value. */
+ delete_insn (insn);
+ }
+ }
+ else if (GET_CODE (body) == PARALLEL)
+ {
+ /* If each part is a set between two identical registers or
+ a USE or CLOBBER, delete the insn. */
+ int i, sreg, dreg;
+ rtx tem;
+
+ for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
+ {
+ tem = XVECEXP (body, 0, i);
+ if (GET_CODE (tem) == USE || GET_CODE (tem) == CLOBBER)
+ continue;
+
+ if (GET_CODE (tem) != SET
+ || (sreg = true_regnum (SET_SRC (tem))) < 0
+ || (dreg = true_regnum (SET_DEST (tem))) < 0
+ || dreg != sreg)
+ break;
+ }
+
+ if (i < 0)
+ delete_insn (insn);
+ }
+#if !BYTES_BIG_ENDIAN /* Not worth the hair to detect this
+ in the big-endian case. */
+ /* Also delete insns to store bit fields if they are no-ops. */
+ else if (GET_CODE (body) == SET
+ && GET_CODE (SET_DEST (body)) == ZERO_EXTRACT
+ && XEXP (SET_DEST (body), 2) == const0_rtx
+ && XEXP (SET_DEST (body), 0) == SET_SRC (body)
+ && ! (GET_CODE (SET_SRC (body)) == MEM
+ && MEM_VOLATILE_P (SET_SRC (body))))
+ delete_insn (insn);
+#endif /* not BYTES_BIG_ENDIAN */
+ }
+ insn = next;
+ }
+
+ /* If we haven't yet gotten to reload and we have just run regscan,
+ delete any insn that sets a register that isn't used elsewhere.
+ This helps some of the optimizations below by having less insns
+ being jumped around. */
+
+ if (! reload_completed && after_regscan)
+ for (insn = f; insn; insn = next)
+ {
+ rtx set = single_set (insn);
+
+ next = NEXT_INSN (insn);
+
+ if (set && GET_CODE (SET_DEST (set)) == REG
+ && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER
+ && regno_first_uid[REGNO (SET_DEST (set))] == INSN_UID (insn)
+ /* We use regno_last_note_uid so as not to delete the setting
+ of a reg that's used in notes. A subsequent optimization
+ might arrange to use that reg for real. */
+ && regno_last_note_uid[REGNO (SET_DEST (set))] == INSN_UID (insn)
+ && ! side_effects_p (SET_SRC (set))
+ && ! find_reg_note (insn, REG_RETVAL, 0))
+ delete_insn (insn);
+ }
+
+ /* Now iterate optimizing jumps until nothing changes over one pass. */
+ changed = 1;
+ while (changed)
+ {
+ changed = 0;
+
+ for (insn = f; insn; insn = next)
+ {
+ rtx reallabelprev;
+ rtx temp, temp1, temp2, temp3, temp4, temp5, temp6;
+ rtx nlabel;
+ int this_is_simplejump, this_is_condjump, reversep;
+ int this_is_condjump_in_parallel;
+#if 0
+ /* If NOT the first iteration, if this is the last jump pass
+ (just before final), do the special peephole optimizations.
+ Avoiding the first iteration gives ordinary jump opts
+ a chance to work before peephole opts. */
+
+ if (reload_completed && !first && !flag_no_peephole)
+ if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
+ peephole (insn);
+#endif
+
+ /* That could have deleted some insns after INSN, so check now
+ what the following insn is. */
+
+ next = NEXT_INSN (insn);
+
+ /* See if this is a NOTE_INSN_LOOP_BEG followed by an unconditional
+ jump. Try to optimize by duplicating the loop exit test if so.
+ This is only safe immediately after regscan, because it uses
+ the values of regno_first_uid and regno_last_uid. */
+ if (after_regscan && GET_CODE (insn) == NOTE
+ && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG
+ && (temp1 = next_nonnote_insn (insn)) != 0
+ && simplejump_p (temp1))
+ {
+ temp = PREV_INSN (insn);
+ if (duplicate_loop_exit_test (insn))
+ {
+ changed = 1;
+ next = NEXT_INSN (temp);
+ continue;
+ }
+ }
+
+ if (GET_CODE (insn) != JUMP_INSN)
+ continue;
+
+ this_is_simplejump = simplejump_p (insn);
+ this_is_condjump = condjump_p (insn);
+ this_is_condjump_in_parallel = condjump_in_parallel_p (insn);
+
+ /* Tension the labels in dispatch tables. */
+
+ if (GET_CODE (PATTERN (insn)) == ADDR_VEC)
+ changed |= tension_vector_labels (PATTERN (insn), 0);
+ if (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC)
+ changed |= tension_vector_labels (PATTERN (insn), 1);
+
+ /* If a dispatch table always goes to the same place,
+ get rid of it and replace the insn that uses it. */
+
+ if (GET_CODE (PATTERN (insn)) == ADDR_VEC
+ || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC)
+ {
+ int i;
+ rtx pat = PATTERN (insn);
+ int diff_vec_p = GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC;
+ int len = XVECLEN (pat, diff_vec_p);
+ rtx dispatch = prev_real_insn (insn);
+
+ for (i = 0; i < len; i++)
+ if (XEXP (XVECEXP (pat, diff_vec_p, i), 0)
+ != XEXP (XVECEXP (pat, diff_vec_p, 0), 0))
+ break;
+ if (i == len
+ && dispatch != 0
+ && GET_CODE (dispatch) == JUMP_INSN
+ && JUMP_LABEL (dispatch) != 0
+ /* Don't mess with a casesi insn. */
+ && !(GET_CODE (PATTERN (dispatch)) == SET
+ && (GET_CODE (SET_SRC (PATTERN (dispatch)))
+ == IF_THEN_ELSE))
+ && next_real_insn (JUMP_LABEL (dispatch)) == insn)
+ {
+ redirect_tablejump (dispatch,
+ XEXP (XVECEXP (pat, diff_vec_p, 0), 0));
+ changed = 1;
+ }
+ }
+
+ reallabelprev = prev_active_insn (JUMP_LABEL (insn));
+
+ /* If a jump references the end of the function, try to turn
+ it into a RETURN insn, possibly a conditional one. */
+ if (JUMP_LABEL (insn)
+ && (next_active_insn (JUMP_LABEL (insn)) == 0
+ || GET_CODE (PATTERN (next_active_insn (JUMP_LABEL (insn))))
+ == RETURN))
+ changed |= redirect_jump (insn, NULL_RTX);
+
+ /* Detect jump to following insn. */
+ if (reallabelprev == insn && condjump_p (insn))
+ {
+ delete_jump (insn);
+ changed = 1;
+ continue;
+ }
+
+ /* If we have an unconditional jump preceded by a USE, try to put
+ the USE before the target and jump there. This simplifies many
+ of the optimizations below since we don't have to worry about
+ dealing with these USE insns. We only do this if the label
+ being branch to already has the identical USE or if code
+ never falls through to that label. */
+
+ if (this_is_simplejump
+ && (temp = prev_nonnote_insn (insn)) != 0
+ && GET_CODE (temp) == INSN && GET_CODE (PATTERN (temp)) == USE
+ && (temp1 = prev_nonnote_insn (JUMP_LABEL (insn))) != 0
+ && (GET_CODE (temp1) == BARRIER
+ || (GET_CODE (temp1) == INSN
+ && rtx_equal_p (PATTERN (temp), PATTERN (temp1)))))
+ {
+ if (GET_CODE (temp1) == BARRIER)
+ {
+ emit_insn_after (PATTERN (temp), temp1);
+ temp1 = NEXT_INSN (temp1);
+ }
+
+ delete_insn (temp);
+ redirect_jump (insn, get_label_before (temp1));
+ reallabelprev = prev_real_insn (temp1);
+ changed = 1;
+ }
+
+ /* Simplify if (...) x = a; else x = b; by converting it
+ to x = b; if (...) x = a;
+ if B is sufficiently simple, the test doesn't involve X,
+ and nothing in the test modifies B or X.
+
+ If we have small register classes, we also can't do this if X
+ is a hard register.
+
+ If the "x = b;" insn has any REG_NOTES, we don't do this because
+ of the possibility that we are running after CSE and there is a
+ REG_EQUAL note that is only valid if the branch has already been
+ taken. If we move the insn with the REG_EQUAL note, we may
+ fold the comparison to always be false in a later CSE pass.
+ (We could also delete the REG_NOTES when moving the insn, but it
+ seems simpler to not move it.) An exception is that we can move
+ the insn if the only note is a REG_EQUAL or REG_EQUIV whose
+ value is the same as "b".
+
+ INSN is the branch over the `else' part.
+
+ We set:
+
+ TEMP to the jump insn preceding "x = a;"
+ TEMP1 to X
+ TEMP2 to the insn that sets "x = b;"
+ TEMP3 to the insn that sets "x = a;"
+ TEMP4 to the set of "x = b"; */
+
+ if (this_is_simplejump
+ && (temp3 = prev_active_insn (insn)) != 0
+ && GET_CODE (temp3) == INSN
+ && (temp4 = single_set (temp3)) != 0
+ && GET_CODE (temp1 = SET_DEST (temp4)) == REG
+#ifdef SMALL_REGISTER_CLASSES
+ && REGNO (temp1) >= FIRST_PSEUDO_REGISTER
+#endif
+ && (temp2 = next_active_insn (insn)) != 0
+ && GET_CODE (temp2) == INSN
+ && (temp4 = single_set (temp2)) != 0
+ && rtx_equal_p (SET_DEST (temp4), temp1)
+ && (GET_CODE (SET_SRC (temp4)) == REG
+ || GET_CODE (SET_SRC (temp4)) == SUBREG
+ || CONSTANT_P (SET_SRC (temp4)))
+ && (REG_NOTES (temp2) == 0
+ || ((REG_NOTE_KIND (REG_NOTES (temp2)) == REG_EQUAL
+ || REG_NOTE_KIND (REG_NOTES (temp2)) == REG_EQUIV)
+ && XEXP (REG_NOTES (temp2), 1) == 0
+ && rtx_equal_p (XEXP (REG_NOTES (temp2), 0),
+ SET_SRC (temp4))))
+ && (temp = prev_active_insn (temp3)) != 0
+ && condjump_p (temp) && ! simplejump_p (temp)
+ /* TEMP must skip over the "x = a;" insn */
+ && prev_real_insn (JUMP_LABEL (temp)) == insn
+ && no_labels_between_p (insn, JUMP_LABEL (temp))
+ /* There must be no other entries to the "x = b;" insn. */
+ && no_labels_between_p (JUMP_LABEL (temp), temp2)
+ /* INSN must either branch to the insn after TEMP2 or the insn
+ after TEMP2 must branch to the same place as INSN. */
+ && (reallabelprev == temp2
+ || ((temp5 = next_active_insn (temp2)) != 0
+ && simplejump_p (temp5)
+ && JUMP_LABEL (temp5) == JUMP_LABEL (insn))))
+ {
+ /* The test expression, X, may be a complicated test with
+ multiple branches. See if we can find all the uses of
+ the label that TEMP branches to without hitting a CALL_INSN
+ or a jump to somewhere else. */
+ rtx target = JUMP_LABEL (temp);
+ int nuses = LABEL_NUSES (target);
+ rtx p, q;
+
+ /* Set P to the first jump insn that goes around "x = a;". */
+ for (p = temp; nuses && p; p = prev_nonnote_insn (p))
+ {
+ if (GET_CODE (p) == JUMP_INSN)
+ {
+ if (condjump_p (p) && ! simplejump_p (p)
+ && JUMP_LABEL (p) == target)
+ {
+ nuses--;
+ if (nuses == 0)
+ break;
+ }
+ else
+ break;
+ }
+ else if (GET_CODE (p) == CALL_INSN)
+ break;
+ }
+
+#ifdef HAVE_cc0
+ /* We cannot insert anything between a set of cc and its use
+ so if P uses cc0, we must back up to the previous insn. */
+ q = prev_nonnote_insn (p);
+ if (q && GET_RTX_CLASS (GET_CODE (q)) == 'i'
+ && sets_cc0_p (PATTERN (q)))
+ p = q;
+#endif
+
+ if (p)
+ p = PREV_INSN (p);
+
+ /* If we found all the uses and there was no data conflict, we
+ can move the assignment unless we can branch into the middle
+ from somewhere. */
+ if (nuses == 0 && p
+ && no_labels_between_p (p, insn)
+ && ! reg_referenced_between_p (temp1, p, NEXT_INSN (temp3))
+ && ! reg_set_between_p (temp1, p, temp3)
+ && (GET_CODE (SET_SRC (temp4)) == CONST_INT
+ || ! reg_set_between_p (SET_SRC (temp4), p, temp2)))
+ {
+ emit_insn_after_with_line_notes (PATTERN (temp2), p, temp2);
+ delete_insn (temp2);
+
+ /* Set NEXT to an insn that we know won't go away. */
+ next = next_active_insn (insn);
+
+ /* Delete the jump around the set. Note that we must do
+ this before we redirect the test jumps so that it won't
+ delete the code immediately following the assignment
+ we moved (which might be a jump). */
+
+ delete_insn (insn);
+
+ /* We either have two consecutive labels or a jump to
+ a jump, so adjust all the JUMP_INSNs to branch to where
+ INSN branches to. */
+ for (p = NEXT_INSN (p); p != next; p = NEXT_INSN (p))
+ if (GET_CODE (p) == JUMP_INSN)
+ redirect_jump (p, target);
+
+ changed = 1;
+ continue;
+ }
+ }
+
+#ifndef HAVE_cc0
+ /* If we have if (...) x = exp; and branches are expensive,
+ EXP is a single insn, does not have any side effects, cannot
+ trap, and is not too costly, convert this to
+ t = exp; if (...) x = t;
+
+ Don't do this when we have CC0 because it is unlikely to help
+ and we'd need to worry about where to place the new insn and
+ the potential for conflicts. We also can't do this when we have
+ notes on the insn for the same reason as above.
+
+ We set:
+
+ TEMP to the "x = exp;" insn.
+ TEMP1 to the single set in the "x = exp; insn.
+ TEMP2 to "x". */
+
+ if (! reload_completed
+ && this_is_condjump && ! this_is_simplejump
+ && BRANCH_COST >= 3
+ && (temp = next_nonnote_insn (insn)) != 0
+ && GET_CODE (temp) == INSN
+ && REG_NOTES (temp) == 0
+ && (reallabelprev == temp
+ || ((temp2 = next_active_insn (temp)) != 0
+ && simplejump_p (temp2)
+ && JUMP_LABEL (temp2) == JUMP_LABEL (insn)))
+ && (temp1 = single_set (temp)) != 0
+ && (temp2 = SET_DEST (temp1), GET_CODE (temp2) == REG)
+ && GET_MODE_CLASS (GET_MODE (temp2)) == MODE_INT
+#ifdef SMALL_REGISTER_CLASSES
+ && REGNO (temp2) >= FIRST_PSEUDO_REGISTER
+#endif
+ && GET_CODE (SET_SRC (temp1)) != REG
+ && GET_CODE (SET_SRC (temp1)) != SUBREG
+ && GET_CODE (SET_SRC (temp1)) != CONST_INT
+ && ! side_effects_p (SET_SRC (temp1))
+ && ! may_trap_p (SET_SRC (temp1))
+ && rtx_cost (SET_SRC (temp1)) < 10)
+ {
+ rtx new = gen_reg_rtx (GET_MODE (temp2));
+
+ if (validate_change (temp, &SET_DEST (temp1), new, 0))
+ {
+ next = emit_insn_after (gen_move_insn (temp2, new), insn);
+ emit_insn_after_with_line_notes (PATTERN (temp),
+ PREV_INSN (insn), temp);
+ delete_insn (temp);
+ reallabelprev = prev_active_insn (JUMP_LABEL (insn));
+ }
+ }
+
+ /* Similarly, if it takes two insns to compute EXP but they
+ have the same destination. Here TEMP3 will be the second
+ insn and TEMP4 the SET from that insn. */
+
+ if (! reload_completed
+ && this_is_condjump && ! this_is_simplejump
+ && BRANCH_COST >= 4
+ && (temp = next_nonnote_insn (insn)) != 0
+ && GET_CODE (temp) == INSN
+ && REG_NOTES (temp) == 0
+ && (temp3 = next_nonnote_insn (temp)) != 0
+ && GET_CODE (temp3) == INSN
+ && REG_NOTES (temp3) == 0
+ && (reallabelprev == temp3
+ || ((temp2 = next_active_insn (temp3)) != 0
+ && simplejump_p (temp2)
+ && JUMP_LABEL (temp2) == JUMP_LABEL (insn)))
+ && (temp1 = single_set (temp)) != 0
+ && (temp2 = SET_DEST (temp1), GET_CODE (temp2) == REG)
+ && GET_MODE_CLASS (GET_MODE (temp2)) == MODE_INT
+#ifdef SMALL_REGISTER_CLASSES
+ && REGNO (temp2) >= FIRST_PSEUDO_REGISTER
+#endif
+ && ! side_effects_p (SET_SRC (temp1))
+ && ! may_trap_p (SET_SRC (temp1))
+ && rtx_cost (SET_SRC (temp1)) < 10
+ && (temp4 = single_set (temp3)) != 0
+ && rtx_equal_p (SET_DEST (temp4), temp2)
+ && ! side_effects_p (SET_SRC (temp4))
+ && ! may_trap_p (SET_SRC (temp4))
+ && rtx_cost (SET_SRC (temp4)) < 10)
+ {
+ rtx new = gen_reg_rtx (GET_MODE (temp2));
+
+ if (validate_change (temp, &SET_DEST (temp1), new, 0))
+ {
+ next = emit_insn_after (gen_move_insn (temp2, new), insn);
+ emit_insn_after_with_line_notes (PATTERN (temp),
+ PREV_INSN (insn), temp);
+ emit_insn_after_with_line_notes
+ (replace_rtx (PATTERN (temp3), temp2, new),
+ PREV_INSN (insn), temp3);
+ delete_insn (temp);
+ delete_insn (temp3);
+ reallabelprev = prev_active_insn (JUMP_LABEL (insn));
+ }
+ }
+
+ /* Finally, handle the case where two insns are used to
+ compute EXP but a temporary register is used. Here we must
+ ensure that the temporary register is not used anywhere else. */
+
+ if (! reload_completed
+ && after_regscan
+ && this_is_condjump && ! this_is_simplejump
+ && BRANCH_COST >= 4
+ && (temp = next_nonnote_insn (insn)) != 0
+ && GET_CODE (temp) == INSN
+ && REG_NOTES (temp) == 0
+ && (temp3 = next_nonnote_insn (temp)) != 0
+ && GET_CODE (temp3) == INSN
+ && REG_NOTES (temp3) == 0
+ && (reallabelprev == temp3
+ || ((temp2 = next_active_insn (temp3)) != 0
+ && simplejump_p (temp2)
+ && JUMP_LABEL (temp2) == JUMP_LABEL (insn)))
+ && (temp1 = single_set (temp)) != 0
+ && (temp5 = SET_DEST (temp1),
+ (GET_CODE (temp5) == REG
+ || (GET_CODE (temp5) == SUBREG
+ && (temp5 = SUBREG_REG (temp5),
+ GET_CODE (temp5) == REG))))
+ && REGNO (temp5) >= FIRST_PSEUDO_REGISTER
+ && regno_first_uid[REGNO (temp5)] == INSN_UID (temp)
+ && regno_last_uid[REGNO (temp5)] == INSN_UID (temp3)
+ && ! side_effects_p (SET_SRC (temp1))
+ && ! may_trap_p (SET_SRC (temp1))
+ && rtx_cost (SET_SRC (temp1)) < 10
+ && (temp4 = single_set (temp3)) != 0
+ && (temp2 = SET_DEST (temp4), GET_CODE (temp2) == REG)
+ && GET_MODE_CLASS (GET_MODE (temp2)) == MODE_INT
+#ifdef SMALL_REGISTER_CLASSES
+ && REGNO (temp2) >= FIRST_PSEUDO_REGISTER
+#endif
+ && rtx_equal_p (SET_DEST (temp4), temp2)
+ && ! side_effects_p (SET_SRC (temp4))
+ && ! may_trap_p (SET_SRC (temp4))
+ && rtx_cost (SET_SRC (temp4)) < 10)
+ {
+ rtx new = gen_reg_rtx (GET_MODE (temp2));
+
+ if (validate_change (temp3, &SET_DEST (temp4), new, 0))
+ {
+ next = emit_insn_after (gen_move_insn (temp2, new), insn);
+ emit_insn_after_with_line_notes (PATTERN (temp),
+ PREV_INSN (insn), temp);
+ emit_insn_after_with_line_notes (PATTERN (temp3),
+ PREV_INSN (insn), temp3);
+ delete_insn (temp);
+ delete_insn (temp3);
+ reallabelprev = prev_active_insn (JUMP_LABEL (insn));
+ }
+ }
+#endif /* HAVE_cc0 */
+
+ /* We deal with four cases:
+
+ 1) x = a; if (...) x = b; and either A or B is zero,
+ 2) if (...) x = 0; and jumps are expensive,
+ 3) x = a; if (...) x = b; and A and B are constants where all the
+ set bits in A are also set in B and jumps are expensive, and
+ 4) x = a; if (...) x = b; and A and B non-zero, and jumps are
+ more expensive.
+ 5) if (...) x = b; if jumps are even more expensive.
+
+ In each of these try to use a store-flag insn to avoid the jump.
+ (If the jump would be faster, the machine should not have
+ defined the scc insns!). These cases are often made by the
+ previous optimization.
+
+ INSN here is the jump around the store. We set:
+
+ TEMP to the "x = b;" insn.
+ TEMP1 to X.
+ TEMP2 to B (const0_rtx in the second case).
+ TEMP3 to A (X in the second case).
+ TEMP4 to the condition being tested.
+ TEMP5 to the earliest insn used to find the condition. */
+
+ if (/* We can't do this after reload has completed. */
+ ! reload_completed
+ && this_is_condjump && ! this_is_simplejump
+ /* Set TEMP to the "x = b;" insn. */
+ && (temp = next_nonnote_insn (insn)) != 0
+ && GET_CODE (temp) == INSN
+ && GET_CODE (PATTERN (temp)) == SET
+ && GET_CODE (temp1 = SET_DEST (PATTERN (temp))) == REG
+#ifdef SMALL_REGISTER_CLASSES
+ && REGNO (temp1) >= FIRST_PSEUDO_REGISTER
+#endif
+ && GET_MODE_CLASS (GET_MODE (temp1)) == MODE_INT
+ && (GET_CODE (temp2 = SET_SRC (PATTERN (temp))) == REG
+ || GET_CODE (temp2) == SUBREG
+ || GET_CODE (temp2) == CONST_INT)
+ /* Allow either form, but prefer the former if both apply.
+ There is no point in using the old value of TEMP1 if
+ it is a register, since cse will alias them. It can
+ lose if the old value were a hard register since CSE
+ won't replace hard registers. */
+ && (((temp3 = reg_set_last (temp1, insn)) != 0
+ && GET_CODE (temp3) == CONST_INT)
+ /* Make the latter case look like x = x; if (...) x = 0; */
+ || (temp3 = temp1,
+ ((BRANCH_COST >= 2
+ && temp2 == const0_rtx)
+#ifdef HAVE_conditional_move
+ || HAVE_conditional_move
+#endif
+ || BRANCH_COST >= 3)))
+ /* INSN must either branch to the insn after TEMP or the insn
+ after TEMP must branch to the same place as INSN. */
+ && (reallabelprev == temp
+ || ((temp4 = next_active_insn (temp)) != 0
+ && simplejump_p (temp4)
+ && JUMP_LABEL (temp4) == JUMP_LABEL (insn)))
+ && (temp4 = get_condition (insn, &temp5)) != 0
+ /* We must be comparing objects whose modes imply the size.
+ We could handle BLKmode if (1) emit_store_flag could
+ and (2) we could find the size reliably. */
+ && GET_MODE (XEXP (temp4, 0)) != BLKmode
+
+ /* If B is zero, OK; if A is zero, can only do (1) if we
+ can reverse the condition. See if (3) applies possibly
+ by reversing the condition. Prefer reversing to (4) when
+ branches are very expensive. */
+ && ((reversep = 0, temp2 == const0_rtx)
+ || (temp3 == const0_rtx
+ && (reversep = can_reverse_comparison_p (temp4, insn)))
+ || (BRANCH_COST >= 2
+ && GET_CODE (temp2) == CONST_INT
+ && GET_CODE (temp3) == CONST_INT
+ && ((INTVAL (temp2) & INTVAL (temp3)) == INTVAL (temp2)
+ || ((INTVAL (temp2) & INTVAL (temp3)) == INTVAL (temp3)
+ && (reversep = can_reverse_comparison_p (temp4,
+ insn)))))
+#ifdef HAVE_conditional_move
+ || HAVE_conditional_move
+#endif
+ || BRANCH_COST >= 3)
+#ifdef HAVE_cc0
+ /* If the previous insn sets CC0 and something else, we can't
+ do this since we are going to delete that insn. */
+
+ && ! ((temp6 = prev_nonnote_insn (insn)) != 0
+ && GET_CODE (temp6) == INSN
+ && (sets_cc0_p (PATTERN (temp6)) == -1
+ || (sets_cc0_p (PATTERN (temp6)) == 1
+ && FIND_REG_INC_NOTE (temp6, NULL_RTX))))
+#endif
+ )
+ {
+ enum rtx_code code = GET_CODE (temp4);
+ rtx uval, cval, var = temp1;
+ int normalizep;
+ rtx target;
+
+ /* If necessary, reverse the condition. */
+ if (reversep)
+ code = reverse_condition (code), uval = temp2, cval = temp3;
+ else
+ uval = temp3, cval = temp2;
+
+ /* See if we can do this with a store-flag insn. */
+ start_sequence ();
+
+ /* If CVAL is non-zero, normalize to -1. Otherwise,
+ if UVAL is the constant 1, it is best to just compute
+ the result directly. If UVAL is constant and STORE_FLAG_VALUE
+ includes all of its bits, it is best to compute the flag
+ value unnormalized and `and' it with UVAL. Otherwise,
+ normalize to -1 and `and' with UVAL. */
+ normalizep = (cval != const0_rtx ? -1
+ : (uval == const1_rtx ? 1
+ : (GET_CODE (uval) == CONST_INT
+ && (INTVAL (uval) & ~STORE_FLAG_VALUE) == 0)
+ ? 0 : -1));
+
+ /* We will be putting the store-flag insn immediately in
+ front of the comparison that was originally being done,
+ so we know all the variables in TEMP4 will be valid.
+ However, this might be in front of the assignment of
+ A to VAR. If it is, it would clobber the store-flag
+ we will be emitting.
+
+ Therefore, emit into a temporary which will be copied to
+ VAR immediately after TEMP. */
+
+ target = emit_store_flag (gen_reg_rtx (GET_MODE (var)), code,
+ XEXP (temp4, 0), XEXP (temp4, 1),
+ VOIDmode,
+ (code == LTU || code == LEU
+ || code == GEU || code == GTU),
+ normalizep);
+ if (target)
+ {
+ rtx before = insn;
+ rtx seq;
+
+ /* Put the store-flag insns in front of the first insn
+ used to compute the condition to ensure that we
+ use the same values of them as the current
+ comparison. However, the remainder of the insns we
+ generate will be placed directly in front of the
+ jump insn, in case any of the pseudos we use
+ are modified earlier. */
+
+ seq = get_insns ();
+ end_sequence ();
+
+ emit_insns_before (seq, temp5);
+
+ start_sequence ();
+
+ /* Both CVAL and UVAL are non-zero. */
+ if (cval != const0_rtx && uval != const0_rtx)
+ {
+ rtx tem1, tem2;
+
+ tem1 = expand_and (uval, target, NULL_RTX);
+ if (GET_CODE (cval) == CONST_INT
+ && GET_CODE (uval) == CONST_INT
+ && (INTVAL (cval) & INTVAL (uval)) == INTVAL (cval))
+ tem2 = cval;
+ else
+ {
+ tem2 = expand_unop (GET_MODE (var), one_cmpl_optab,
+ target, NULL_RTX, 0);
+ tem2 = expand_and (cval, tem2,
+ (GET_CODE (tem2) == REG
+ ? tem2 : 0));
+ }
+
+ /* If we usually make new pseudos, do so here. This
+ turns out to help machines that have conditional
+ move insns. */
+
+ if (flag_expensive_optimizations)
+ target = 0;
+
+ target = expand_binop (GET_MODE (var), ior_optab,
+ tem1, tem2, target,
+ 1, OPTAB_WIDEN);
+ }
+ else if (normalizep != 1)
+ {
+ /* We know that either CVAL or UVAL is zero. If
+ UVAL is zero, negate TARGET and `and' with CVAL.
+ Otherwise, `and' with UVAL. */
+ if (uval == const0_rtx)
+ {
+ target = expand_unop (GET_MODE (var), one_cmpl_optab,
+ target, NULL_RTX, 0);
+ uval = cval;
+ }
+
+ target = expand_and (uval, target,
+ (GET_CODE (target) == REG
+ && ! preserve_subexpressions_p ()
+ ? target : NULL_RTX));
+ }
+
+ emit_move_insn (var, target);
+ seq = get_insns ();
+ end_sequence ();
+
+#ifdef HAVE_cc0
+ /* If INSN uses CC0, we must not separate it from the
+ insn that sets cc0. */
+
+ if (reg_mentioned_p (cc0_rtx, PATTERN (before)))
+ before = prev_nonnote_insn (before);
+#endif
+
+ emit_insns_before (seq, before);
+
+ delete_insn (temp);
+ next = NEXT_INSN (insn);
+
+ delete_jump (insn);
+ changed = 1;
+ continue;
+ }
+ else
+ end_sequence ();
+ }
+
+ /* If branches are expensive, convert
+ if (foo) bar++; to bar += (foo != 0);
+ and similarly for "bar--;"
+
+ INSN is the conditional branch around the arithmetic. We set:
+
+ TEMP is the arithmetic insn.
+ TEMP1 is the SET doing the arithmetic.
+ TEMP2 is the operand being incremented or decremented.
+ TEMP3 to the condition being tested.
+ TEMP4 to the earliest insn used to find the condition. */
+
+ if ((BRANCH_COST >= 2
+#ifdef HAVE_incscc
+ || HAVE_incscc
+#endif
+#ifdef HAVE_decscc
+ || HAVE_decscc
+#endif
+ )
+ && ! reload_completed
+ && this_is_condjump && ! this_is_simplejump
+ && (temp = next_nonnote_insn (insn)) != 0
+ && (temp1 = single_set (temp)) != 0
+ && (temp2 = SET_DEST (temp1),
+ GET_MODE_CLASS (GET_MODE (temp2)) == MODE_INT)
+ && GET_CODE (SET_SRC (temp1)) == PLUS
+ && (XEXP (SET_SRC (temp1), 1) == const1_rtx
+ || XEXP (SET_SRC (temp1), 1) == constm1_rtx)
+ && rtx_equal_p (temp2, XEXP (SET_SRC (temp1), 0))
+ && ! side_effects_p (temp2)
+ && ! may_trap_p (temp2)
+ /* INSN must either branch to the insn after TEMP or the insn
+ after TEMP must branch to the same place as INSN. */
+ && (reallabelprev == temp
+ || ((temp3 = next_active_insn (temp)) != 0
+ && simplejump_p (temp3)
+ && JUMP_LABEL (temp3) == JUMP_LABEL (insn)))
+ && (temp3 = get_condition (insn, &temp4)) != 0
+ /* We must be comparing objects whose modes imply the size.
+ We could handle BLKmode if (1) emit_store_flag could
+ and (2) we could find the size reliably. */
+ && GET_MODE (XEXP (temp3, 0)) != BLKmode
+ && can_reverse_comparison_p (temp3, insn))
+ {
+ rtx temp6, target = 0, seq, init_insn = 0, init = temp2;
+ enum rtx_code code = reverse_condition (GET_CODE (temp3));
+
+ start_sequence ();
+
+ /* It must be the case that TEMP2 is not modified in the range
+ [TEMP4, INSN). The one exception we make is if the insn
+ before INSN sets TEMP2 to something which is also unchanged
+ in that range. In that case, we can move the initialization
+ into our sequence. */
+
+ if ((temp5 = prev_active_insn (insn)) != 0
+ && GET_CODE (temp5) == INSN
+ && (temp6 = single_set (temp5)) != 0
+ && rtx_equal_p (temp2, SET_DEST (temp6))
+ && (CONSTANT_P (SET_SRC (temp6))
+ || GET_CODE (SET_SRC (temp6)) == REG
+ || GET_CODE (SET_SRC (temp6)) == SUBREG))
+ {
+ emit_insn (PATTERN (temp5));
+ init_insn = temp5;
+ init = SET_SRC (temp6);
+ }
+
+ if (CONSTANT_P (init)
+ || ! reg_set_between_p (init, PREV_INSN (temp4), insn))
+ target = emit_store_flag (gen_reg_rtx (GET_MODE (temp2)), code,
+ XEXP (temp3, 0), XEXP (temp3, 1),
+ VOIDmode,
+ (code == LTU || code == LEU
+ || code == GTU || code == GEU), 1);
+
+ /* If we can do the store-flag, do the addition or
+ subtraction. */
+
+ if (target)
+ target = expand_binop (GET_MODE (temp2),
+ (XEXP (SET_SRC (temp1), 1) == const1_rtx
+ ? add_optab : sub_optab),
+ temp2, target, temp2, 0, OPTAB_WIDEN);
+
+ if (target != 0)
+ {
+ /* Put the result back in temp2 in case it isn't already.
+ Then replace the jump, possible a CC0-setting insn in
+ front of the jump, and TEMP, with the sequence we have
+ made. */
+
+ if (target != temp2)
+ emit_move_insn (temp2, target);
+
+ seq = get_insns ();
+ end_sequence ();
+
+ emit_insns_before (seq, temp4);
+ delete_insn (temp);
+
+ if (init_insn)
+ delete_insn (init_insn);
+
+ next = NEXT_INSN (insn);
+#ifdef HAVE_cc0
+ delete_insn (prev_nonnote_insn (insn));
+#endif
+ delete_insn (insn);
+ changed = 1;
+ continue;
+ }
+ else
+ end_sequence ();
+ }
+
+ /* Simplify if (...) x = 1; else {...} if (x) ...
+ We recognize this case scanning backwards as well.
+
+ TEMP is the assignment to x;
+ TEMP1 is the label at the head of the second if. */
+ /* ?? This should call get_condition to find the values being
+ compared, instead of looking for a COMPARE insn when HAVE_cc0
+ is not defined. This would allow it to work on the m88k. */
+ /* ?? This optimization is only safe before cse is run if HAVE_cc0
+ is not defined and the condition is tested by a separate compare
+ insn. This is because the code below assumes that the result
+ of the compare dies in the following branch.
+
+ Not only that, but there might be other insns between the
+ compare and branch whose results are live. Those insns need
+ to be executed.
+
+ A way to fix this is to move the insns at JUMP_LABEL (insn)
+ to before INSN. If we are running before flow, they will
+ be deleted if they aren't needed. But this doesn't work
+ well after flow.
+
+ This is really a special-case of jump threading, anyway. The
+ right thing to do is to replace this and jump threading with
+ much simpler code in cse.
+
+ This code has been turned off in the non-cc0 case in the
+ meantime. */
+
+#ifdef HAVE_cc0
+ else if (this_is_simplejump
+ /* Safe to skip USE and CLOBBER insns here
+ since they will not be deleted. */
+ && (temp = prev_active_insn (insn))
+ && no_labels_between_p (temp, insn)
+ && GET_CODE (temp) == INSN
+ && GET_CODE (PATTERN (temp)) == SET
+ && GET_CODE (SET_DEST (PATTERN (temp))) == REG
+ && CONSTANT_P (SET_SRC (PATTERN (temp)))
+ && (temp1 = next_active_insn (JUMP_LABEL (insn)))
+ /* If we find that the next value tested is `x'
+ (TEMP1 is the insn where this happens), win. */
+ && GET_CODE (temp1) == INSN
+ && GET_CODE (PATTERN (temp1)) == SET
+#ifdef HAVE_cc0
+ /* Does temp1 `tst' the value of x? */
+ && SET_SRC (PATTERN (temp1)) == SET_DEST (PATTERN (temp))
+ && SET_DEST (PATTERN (temp1)) == cc0_rtx
+ && (temp1 = next_nonnote_insn (temp1))
+#else
+ /* Does temp1 compare the value of x against zero? */
+ && GET_CODE (SET_SRC (PATTERN (temp1))) == COMPARE
+ && XEXP (SET_SRC (PATTERN (temp1)), 1) == const0_rtx
+ && (XEXP (SET_SRC (PATTERN (temp1)), 0)
+ == SET_DEST (PATTERN (temp)))
+ && GET_CODE (SET_DEST (PATTERN (temp1))) == REG
+ && (temp1 = find_next_ref (SET_DEST (PATTERN (temp1)), temp1))
+#endif
+ && condjump_p (temp1))
+ {
+ /* Get the if_then_else from the condjump. */
+ rtx choice = SET_SRC (PATTERN (temp1));
+ if (GET_CODE (choice) == IF_THEN_ELSE)
+ {
+ enum rtx_code code = GET_CODE (XEXP (choice, 0));
+ rtx val = SET_SRC (PATTERN (temp));
+ rtx cond
+ = simplify_relational_operation (code, GET_MODE (SET_DEST (PATTERN (temp))),
+ val, const0_rtx);
+ rtx ultimate;
+
+ if (cond == const_true_rtx)
+ ultimate = XEXP (choice, 1);
+ else if (cond == const0_rtx)
+ ultimate = XEXP (choice, 2);
+ else
+ ultimate = 0;
+
+ if (ultimate == pc_rtx)
+ ultimate = get_label_after (temp1);
+ else if (ultimate && GET_CODE (ultimate) != RETURN)
+ ultimate = XEXP (ultimate, 0);
+
+ if (ultimate)
+ changed |= redirect_jump (insn, ultimate);
+ }
+ }
+#endif
+
+#if 0
+ /* @@ This needs a bit of work before it will be right.
+
+ Any type of comparison can be accepted for the first and
+ second compare. When rewriting the first jump, we must
+ compute the what conditions can reach label3, and use the
+ appropriate code. We can not simply reverse/swap the code
+ of the first jump. In some cases, the second jump must be
+ rewritten also.
+
+ For example,
+ < == converts to > ==
+ < != converts to == >
+ etc.
+
+ If the code is written to only accept an '==' test for the second
+ compare, then all that needs to be done is to swap the condition
+ of the first branch.
+
+ It is questionable whether we want this optimization anyways,
+ since if the user wrote code like this because he/she knew that
+ the jump to label1 is taken most of the time, then rewriting
+ this gives slower code. */
+ /* @@ This should call get_condition to find the values being
+ compared, instead of looking for a COMPARE insn when HAVE_cc0
+ is not defined. This would allow it to work on the m88k. */
+ /* @@ This optimization is only safe before cse is run if HAVE_cc0
+ is not defined and the condition is tested by a separate compare
+ insn. This is because the code below assumes that the result
+ of the compare dies in the following branch. */
+
+ /* Simplify test a ~= b
+ condjump label1;
+ test a == b
+ condjump label2;
+ jump label3;
+ label1:
+
+ rewriting as
+ test a ~~= b
+ condjump label3
+ test a == b
+ condjump label2
+ label1:
+
+ where ~= is an inequality, e.g. >, and ~~= is the swapped
+ inequality, e.g. <.
+
+ We recognize this case scanning backwards.
+
+ TEMP is the conditional jump to `label2';
+ TEMP1 is the test for `a == b';
+ TEMP2 is the conditional jump to `label1';
+ TEMP3 is the test for `a ~= b'. */
+ else if (this_is_simplejump
+ && (temp = prev_active_insn (insn))
+ && no_labels_between_p (temp, insn)
+ && condjump_p (temp)
+ && (temp1 = prev_active_insn (temp))
+ && no_labels_between_p (temp1, temp)
+ && GET_CODE (temp1) == INSN
+ && GET_CODE (PATTERN (temp1)) == SET
+#ifdef HAVE_cc0
+ && sets_cc0_p (PATTERN (temp1)) == 1
+#else
+ && GET_CODE (SET_SRC (PATTERN (temp1))) == COMPARE
+ && GET_CODE (SET_DEST (PATTERN (temp1))) == REG
+ && (temp == find_next_ref (SET_DEST (PATTERN (temp1)), temp1))
+#endif
+ && (temp2 = prev_active_insn (temp1))
+ && no_labels_between_p (temp2, temp1)
+ && condjump_p (temp2)
+ && JUMP_LABEL (temp2) == next_nonnote_insn (NEXT_INSN (insn))
+ && (temp3 = prev_active_insn (temp2))
+ && no_labels_between_p (temp3, temp2)
+ && GET_CODE (PATTERN (temp3)) == SET
+ && rtx_equal_p (SET_DEST (PATTERN (temp3)),
+ SET_DEST (PATTERN (temp1)))
+ && rtx_equal_p (SET_SRC (PATTERN (temp1)),
+ SET_SRC (PATTERN (temp3)))
+ && ! inequality_comparisons_p (PATTERN (temp))
+ && inequality_comparisons_p (PATTERN (temp2)))
+ {
+ rtx fallthrough_label = JUMP_LABEL (temp2);
+
+ ++LABEL_NUSES (fallthrough_label);
+ if (swap_jump (temp2, JUMP_LABEL (insn)))
+ {
+ delete_insn (insn);
+ changed = 1;
+ }
+
+ if (--LABEL_NUSES (fallthrough_label) == 0)
+ delete_insn (fallthrough_label);
+ }
+#endif
+ /* Simplify if (...) {... x = 1;} if (x) ...
+
+ We recognize this case backwards.
+
+ TEMP is the test of `x';
+ TEMP1 is the assignment to `x' at the end of the
+ previous statement. */
+ /* @@ This should call get_condition to find the values being
+ compared, instead of looking for a COMPARE insn when HAVE_cc0
+ is not defined. This would allow it to work on the m88k. */
+ /* @@ This optimization is only safe before cse is run if HAVE_cc0
+ is not defined and the condition is tested by a separate compare
+ insn. This is because the code below assumes that the result
+ of the compare dies in the following branch. */
+
+ /* ??? This has to be turned off. The problem is that the
+ unconditional jump might indirectly end up branching to the
+ label between TEMP1 and TEMP. We can't detect this, in general,
+ since it may become a jump to there after further optimizations.
+ If that jump is done, it will be deleted, so we will retry
+ this optimization in the next pass, thus an infinite loop.
+
+ The present code prevents this by putting the jump after the
+ label, but this is not logically correct. */
+#if 0
+ else if (this_is_condjump
+ /* Safe to skip USE and CLOBBER insns here
+ since they will not be deleted. */
+ && (temp = prev_active_insn (insn))
+ && no_labels_between_p (temp, insn)
+ && GET_CODE (temp) == INSN
+ && GET_CODE (PATTERN (temp)) == SET
+#ifdef HAVE_cc0
+ && sets_cc0_p (PATTERN (temp)) == 1
+ && GET_CODE (SET_SRC (PATTERN (temp))) == REG
+#else
+ /* Temp must be a compare insn, we can not accept a register
+ to register move here, since it may not be simply a
+ tst insn. */
+ && GET_CODE (SET_SRC (PATTERN (temp))) == COMPARE
+ && XEXP (SET_SRC (PATTERN (temp)), 1) == const0_rtx
+ && GET_CODE (XEXP (SET_SRC (PATTERN (temp)), 0)) == REG
+ && GET_CODE (SET_DEST (PATTERN (temp))) == REG
+ && insn == find_next_ref (SET_DEST (PATTERN (temp)), temp)
+#endif
+ /* May skip USE or CLOBBER insns here
+ for checking for opportunity, since we
+ take care of them later. */
+ && (temp1 = prev_active_insn (temp))
+ && GET_CODE (temp1) == INSN
+ && GET_CODE (PATTERN (temp1)) == SET
+#ifdef HAVE_cc0
+ && SET_SRC (PATTERN (temp)) == SET_DEST (PATTERN (temp1))
+#else
+ && (XEXP (SET_SRC (PATTERN (temp)), 0)
+ == SET_DEST (PATTERN (temp1)))
+#endif
+ && CONSTANT_P (SET_SRC (PATTERN (temp1)))
+ /* If this isn't true, cse will do the job. */
+ && ! no_labels_between_p (temp1, temp))
+ {
+ /* Get the if_then_else from the condjump. */
+ rtx choice = SET_SRC (PATTERN (insn));
+ if (GET_CODE (choice) == IF_THEN_ELSE
+ && (GET_CODE (XEXP (choice, 0)) == EQ
+ || GET_CODE (XEXP (choice, 0)) == NE))
+ {
+ int want_nonzero = (GET_CODE (XEXP (choice, 0)) == NE);
+ rtx last_insn;
+ rtx ultimate;
+ rtx p;
+
+ /* Get the place that condjump will jump to
+ if it is reached from here. */
+ if ((SET_SRC (PATTERN (temp1)) != const0_rtx)
+ == want_nonzero)
+ ultimate = XEXP (choice, 1);
+ else
+ ultimate = XEXP (choice, 2);
+ /* Get it as a CODE_LABEL. */
+ if (ultimate == pc_rtx)
+ ultimate = get_label_after (insn);
+ else
+ /* Get the label out of the LABEL_REF. */
+ ultimate = XEXP (ultimate, 0);
+
+ /* Insert the jump immediately before TEMP, specifically
+ after the label that is between TEMP1 and TEMP. */
+ last_insn = PREV_INSN (temp);
+
+ /* If we would be branching to the next insn, the jump
+ would immediately be deleted and the re-inserted in
+ a subsequent pass over the code. So don't do anything
+ in that case. */
+ if (next_active_insn (last_insn)
+ != next_active_insn (ultimate))
+ {
+ emit_barrier_after (last_insn);
+ p = emit_jump_insn_after (gen_jump (ultimate),
+ last_insn);
+ JUMP_LABEL (p) = ultimate;
+ ++LABEL_NUSES (ultimate);
+ if (INSN_UID (ultimate) < max_jump_chain
+ && INSN_CODE (p) < max_jump_chain)
+ {
+ jump_chain[INSN_UID (p)]
+ = jump_chain[INSN_UID (ultimate)];
+ jump_chain[INSN_UID (ultimate)] = p;
+ }
+ changed = 1;
+ continue;
+ }
+ }
+ }
+#endif
+ /* Detect a conditional jump going to the same place
+ as an immediately following unconditional jump. */
+ else if (this_is_condjump
+ && (temp = next_active_insn (insn)) != 0
+ && simplejump_p (temp)
+ && (next_active_insn (JUMP_LABEL (insn))
+ == next_active_insn (JUMP_LABEL (temp))))
+ {
+ delete_jump (insn);
+ changed = 1;
+ continue;
+ }
+ /* Detect a conditional jump jumping over an unconditional jump. */
+
+ else if ((this_is_condjump || this_is_condjump_in_parallel)
+ && ! this_is_simplejump
+ && reallabelprev != 0
+ && GET_CODE (reallabelprev) == JUMP_INSN
+ && prev_active_insn (reallabelprev) == insn
+ && no_labels_between_p (insn, reallabelprev)
+ && simplejump_p (reallabelprev))
+ {
+ /* When we invert the unconditional jump, we will be
+ decrementing the usage count of its old label.
+ Make sure that we don't delete it now because that
+ might cause the following code to be deleted. */
+ rtx prev_uses = prev_nonnote_insn (reallabelprev);
+ rtx prev_label = JUMP_LABEL (insn);
+
+ if (prev_label)
+ ++LABEL_NUSES (prev_label);
+
+ if (invert_jump (insn, JUMP_LABEL (reallabelprev)))
+ {
+ /* It is very likely that if there are USE insns before
+ this jump, they hold REG_DEAD notes. These REG_DEAD
+ notes are no longer valid due to this optimization,
+ and will cause the life-analysis that following passes
+ (notably delayed-branch scheduling) to think that
+ these registers are dead when they are not.
+
+ To prevent this trouble, we just remove the USE insns
+ from the insn chain. */
+
+ while (prev_uses && GET_CODE (prev_uses) == INSN
+ && GET_CODE (PATTERN (prev_uses)) == USE)
+ {
+ rtx useless = prev_uses;
+ prev_uses = prev_nonnote_insn (prev_uses);
+ delete_insn (useless);
+ }
+
+ delete_insn (reallabelprev);
+ next = insn;
+ changed = 1;
+ }
+
+ /* We can now safely delete the label if it is unreferenced
+ since the delete_insn above has deleted the BARRIER. */
+ if (prev_label && --LABEL_NUSES (prev_label) == 0)
+ delete_insn (prev_label);
+ continue;
+ }
+ else
+ {
+ /* Detect a jump to a jump. */
+
+ nlabel = follow_jumps (JUMP_LABEL (insn));
+ if (nlabel != JUMP_LABEL (insn)
+ && redirect_jump (insn, nlabel))
+ {
+ changed = 1;
+ next = insn;
+ }
+
+ /* Look for if (foo) bar; else break; */
+ /* The insns look like this:
+ insn = condjump label1;
+ ...range1 (some insns)...
+ jump label2;
+ label1:
+ ...range2 (some insns)...
+ jump somewhere unconditionally
+ label2: */
+ {
+ rtx label1 = next_label (insn);
+ rtx range1end = label1 ? prev_active_insn (label1) : 0;
+ /* Don't do this optimization on the first round, so that
+ jump-around-a-jump gets simplified before we ask here
+ whether a jump is unconditional.
+
+ Also don't do it when we are called after reload since
+ it will confuse reorg. */
+ if (! first
+ && (reload_completed ? ! flag_delayed_branch : 1)
+ /* Make sure INSN is something we can invert. */
+ && condjump_p (insn)
+ && label1 != 0
+ && JUMP_LABEL (insn) == label1
+ && LABEL_NUSES (label1) == 1
+ && GET_CODE (range1end) == JUMP_INSN
+ && simplejump_p (range1end))
+ {
+ rtx label2 = next_label (label1);
+ rtx range2end = label2 ? prev_active_insn (label2) : 0;
+ if (range1end != range2end
+ && JUMP_LABEL (range1end) == label2
+ && GET_CODE (range2end) == JUMP_INSN
+ && GET_CODE (NEXT_INSN (range2end)) == BARRIER
+ /* Invert the jump condition, so we
+ still execute the same insns in each case. */
+ && invert_jump (insn, label1))
+ {
+ rtx range1beg = next_active_insn (insn);
+ rtx range2beg = next_active_insn (label1);
+ rtx range1after, range2after;
+ rtx range1before, range2before;
+ rtx rangenext;
+
+ /* Include in each range any notes before it, to be
+ sure that we get the line number note if any, even
+ if there are other notes here. */
+ while (PREV_INSN (range1beg)
+ && GET_CODE (PREV_INSN (range1beg)) == NOTE)
+ range1beg = PREV_INSN (range1beg);
+
+ while (PREV_INSN (range2beg)
+ && GET_CODE (PREV_INSN (range2beg)) == NOTE)
+ range2beg = PREV_INSN (range2beg);
+
+ /* Don't move NOTEs for blocks or loops; shift them
+ outside the ranges, where they'll stay put. */
+ range1beg = squeeze_notes (range1beg, range1end);
+ range2beg = squeeze_notes (range2beg, range2end);
+
+ /* Get current surrounds of the 2 ranges. */
+ range1before = PREV_INSN (range1beg);
+ range2before = PREV_INSN (range2beg);
+ range1after = NEXT_INSN (range1end);
+ range2after = NEXT_INSN (range2end);
+
+ /* Splice range2 where range1 was. */
+ NEXT_INSN (range1before) = range2beg;
+ PREV_INSN (range2beg) = range1before;
+ NEXT_INSN (range2end) = range1after;
+ PREV_INSN (range1after) = range2end;
+ /* Splice range1 where range2 was. */
+ NEXT_INSN (range2before) = range1beg;
+ PREV_INSN (range1beg) = range2before;
+ NEXT_INSN (range1end) = range2after;
+ PREV_INSN (range2after) = range1end;
+
+ /* Check for a loop end note between the end of
+ range2, and the next code label. If there is one,
+ then what we have really seen is
+ if (foo) break; end_of_loop;
+ and moved the break sequence outside the loop.
+ We must move the LOOP_END note to where the
+ loop really ends now, or we will confuse loop
+ optimization. */
+ for (;range2after != label2; range2after = rangenext)
+ {
+ rangenext = NEXT_INSN (range2after);
+ if (GET_CODE (range2after) == NOTE
+ && (NOTE_LINE_NUMBER (range2after)
+ == NOTE_INSN_LOOP_END))
+ {
+ NEXT_INSN (PREV_INSN (range2after))
+ = rangenext;
+ PREV_INSN (rangenext)
+ = PREV_INSN (range2after);
+ PREV_INSN (range2after)
+ = PREV_INSN (range1beg);
+ NEXT_INSN (range2after) = range1beg;
+ NEXT_INSN (PREV_INSN (range1beg))
+ = range2after;
+ PREV_INSN (range1beg) = range2after;
+ }
+ }
+ changed = 1;
+ continue;
+ }
+ }
+ }
+
+ /* Now that the jump has been tensioned,
+ try cross jumping: check for identical code
+ before the jump and before its target label. */
+
+ /* First, cross jumping of conditional jumps: */
+
+ if (cross_jump && condjump_p (insn))
+ {
+ rtx newjpos, newlpos;
+ rtx x = prev_real_insn (JUMP_LABEL (insn));
+
+ /* A conditional jump may be crossjumped
+ only if the place it jumps to follows
+ an opposing jump that comes back here. */
+
+ if (x != 0 && ! jump_back_p (x, insn))
+ /* We have no opposing jump;
+ cannot cross jump this insn. */
+ x = 0;
+
+ newjpos = 0;
+ /* TARGET is nonzero if it is ok to cross jump
+ to code before TARGET. If so, see if matches. */
+ if (x != 0)
+ find_cross_jump (insn, x, 2,
+ &newjpos, &newlpos);
+
+ if (newjpos != 0)
+ {
+ do_cross_jump (insn, newjpos, newlpos);
+ /* Make the old conditional jump
+ into an unconditional one. */
+ SET_SRC (PATTERN (insn))
+ = gen_rtx (LABEL_REF, VOIDmode, JUMP_LABEL (insn));
+ INSN_CODE (insn) = -1;
+ emit_barrier_after (insn);
+ /* Add to jump_chain unless this is a new label
+ whose UID is too large. */
+ if (INSN_UID (JUMP_LABEL (insn)) < max_jump_chain)
+ {
+ jump_chain[INSN_UID (insn)]
+ = jump_chain[INSN_UID (JUMP_LABEL (insn))];
+ jump_chain[INSN_UID (JUMP_LABEL (insn))] = insn;
+ }
+ changed = 1;
+ next = insn;
+ }
+ }
+
+ /* Cross jumping of unconditional jumps:
+ a few differences. */
+
+ if (cross_jump && simplejump_p (insn))
+ {
+ rtx newjpos, newlpos;
+ rtx target;
+
+ newjpos = 0;
+
+ /* TARGET is nonzero if it is ok to cross jump
+ to code before TARGET. If so, see if matches. */
+ find_cross_jump (insn, JUMP_LABEL (insn), 1,
+ &newjpos, &newlpos);
+
+ /* If cannot cross jump to code before the label,
+ see if we can cross jump to another jump to
+ the same label. */
+ /* Try each other jump to this label. */
+ if (INSN_UID (JUMP_LABEL (insn)) < max_uid)
+ for (target = jump_chain[INSN_UID (JUMP_LABEL (insn))];
+ target != 0 && newjpos == 0;
+ target = jump_chain[INSN_UID (target)])
+ if (target != insn
+ && JUMP_LABEL (target) == JUMP_LABEL (insn)
+ /* Ignore TARGET if it's deleted. */
+ && ! INSN_DELETED_P (target))
+ find_cross_jump (insn, target, 2,
+ &newjpos, &newlpos);
+
+ if (newjpos != 0)
+ {
+ do_cross_jump (insn, newjpos, newlpos);
+ changed = 1;
+ next = insn;
+ }
+ }
+
+ /* This code was dead in the previous jump.c! */
+ if (cross_jump && GET_CODE (PATTERN (insn)) == RETURN)
+ {
+ /* Return insns all "jump to the same place"
+ so we can cross-jump between any two of them. */
+
+ rtx newjpos, newlpos, target;
+
+ newjpos = 0;
+
+ /* If cannot cross jump to code before the label,
+ see if we can cross jump to another jump to
+ the same label. */
+ /* Try each other jump to this label. */
+ for (target = jump_chain[0];
+ target != 0 && newjpos == 0;
+ target = jump_chain[INSN_UID (target)])
+ if (target != insn
+ && ! INSN_DELETED_P (target)
+ && GET_CODE (PATTERN (target)) == RETURN)
+ find_cross_jump (insn, target, 2,
+ &newjpos, &newlpos);
+
+ if (newjpos != 0)
+ {
+ do_cross_jump (insn, newjpos, newlpos);
+ changed = 1;
+ next = insn;
+ }
+ }
+ }
+ }
+
+ first = 0;
+ }
+
+ /* Delete extraneous line number notes.
+ Note that two consecutive notes for different lines are not really
+ extraneous. There should be some indication where that line belonged,
+ even if it became empty. */
+
+ {
+ rtx last_note = 0;
+
+ for (insn = f; insn; insn = NEXT_INSN (insn))
+ if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) >= 0)
+ {
+ /* Delete this note if it is identical to previous note. */
+ if (last_note
+ && NOTE_SOURCE_FILE (insn) == NOTE_SOURCE_FILE (last_note)
+ && NOTE_LINE_NUMBER (insn) == NOTE_LINE_NUMBER (last_note))
+ {
+ delete_insn (insn);
+ continue;
+ }
+
+ last_note = insn;
+ }
+ }
+
+#ifdef HAVE_return
+ if (HAVE_return)
+ {
+ /* If we fall through to the epilogue, see if we can insert a RETURN insn
+ in front of it. If the machine allows it at this point (we might be
+ after reload for a leaf routine), it will improve optimization for it
+ to be there. We do this both here and at the start of this pass since
+ the RETURN might have been deleted by some of our optimizations. */
+ insn = get_last_insn ();
+ while (insn && GET_CODE (insn) == NOTE)
+ insn = PREV_INSN (insn);
+
+ if (insn && GET_CODE (insn) != BARRIER)
+ {
+ emit_jump_insn (gen_return ());
+ emit_barrier ();
+ }
+ }
+#endif
+
+ /* See if there is still a NOTE_INSN_FUNCTION_END in this function.
+ If so, delete it, and record that this function can drop off the end. */
+
+ insn = last_insn;
+ {
+ int n_labels = 1;
+ while (insn
+ /* One label can follow the end-note: the return label. */
+ && ((GET_CODE (insn) == CODE_LABEL && n_labels-- > 0)
+ /* Ordinary insns can follow it if returning a structure. */
+ || GET_CODE (insn) == INSN
+ /* If machine uses explicit RETURN insns, no epilogue,
+ then one of them follows the note. */
+ || (GET_CODE (insn) == JUMP_INSN
+ && GET_CODE (PATTERN (insn)) == RETURN)
+ /* Other kinds of notes can follow also. */
+ || (GET_CODE (insn) == NOTE
+ && NOTE_LINE_NUMBER (insn) != NOTE_INSN_FUNCTION_END)))
+ insn = PREV_INSN (insn);
+ }
+
+ /* Report if control can fall through at the end of the function. */
+ if (insn && GET_CODE (insn) == NOTE
+ && NOTE_LINE_NUMBER (insn) == NOTE_INSN_FUNCTION_END)
+ {
+ can_reach_end = 1;
+ delete_insn (insn);
+ }
+
+ /* Show JUMP_CHAIN no longer valid. */
+ jump_chain = 0;
+}
+
+/* LOOP_START is a NOTE_INSN_LOOP_BEG note that is followed by an unconditional
+ jump. Assume that this unconditional jump is to the exit test code. If
+ the code is sufficiently simple, make a copy of it before INSN,
+ followed by a jump to the exit of the loop. Then delete the unconditional
+ jump after INSN.
+
+ Note that it is possible we can get confused here if the jump immediately
+ after the loop start branches outside the loop but within an outer loop.
+ If we are near the exit of that loop, we will copy its exit test. This
+ will not generate incorrect code, but could suppress some optimizations.
+ However, such cases are degenerate loops anyway.
+
+ Return 1 if we made the change, else 0.
+
+ This is only safe immediately after a regscan pass because it uses the
+ values of regno_first_uid and regno_last_uid. */
+
+static int
+duplicate_loop_exit_test (loop_start)
+ rtx loop_start;
+{
+ rtx insn, set, reg, p, link;
+ rtx copy = 0;
+ int num_insns = 0;
+ rtx exitcode = NEXT_INSN (JUMP_LABEL (next_nonnote_insn (loop_start)));
+ rtx lastexit;
+ int max_reg = max_reg_num ();
+ rtx *reg_map = 0;
+
+ /* Scan the exit code. We do not perform this optimization if any insn:
+
+ is a CALL_INSN
+ is a CODE_LABEL
+ has a REG_RETVAL or REG_LIBCALL note (hard to adjust)
+ is a NOTE_INSN_LOOP_BEG because this means we have a nested loop
+ is a NOTE_INSN_BLOCK_{BEG,END} because duplicating these notes
+ are not valid
+
+ Also, don't do this if the exit code is more than 20 insns. */
+
+ for (insn = exitcode;
+ insn
+ && ! (GET_CODE (insn) == NOTE
+ && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END);
+ insn = NEXT_INSN (insn))
+ {
+ switch (GET_CODE (insn))
+ {
+ case CODE_LABEL:
+ case CALL_INSN:
+ return 0;
+ case NOTE:
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG
+ || NOTE_LINE_NUMBER (insn) == NOTE_INSN_BLOCK_BEG
+ || NOTE_LINE_NUMBER (insn) == NOTE_INSN_BLOCK_END)
+ return 0;
+ break;
+ case JUMP_INSN:
+ case INSN:
+ if (++num_insns > 20
+ || find_reg_note (insn, REG_RETVAL, NULL_RTX)
+ || find_reg_note (insn, REG_LIBCALL, NULL_RTX))
+ return 0;
+ break;
+ }
+ }
+
+ /* Unless INSN is zero, we can do the optimization. */
+ if (insn == 0)
+ return 0;
+
+ lastexit = insn;
+
+ /* See if any insn sets a register only used in the loop exit code and
+ not a user variable. If so, replace it with a new register. */
+ for (insn = exitcode; insn != lastexit; insn = NEXT_INSN (insn))
+ if (GET_CODE (insn) == INSN
+ && (set = single_set (insn)) != 0
+ && ((reg = SET_DEST (set), GET_CODE (reg) == REG)
+ || (GET_CODE (reg) == SUBREG
+ && (reg = SUBREG_REG (reg), GET_CODE (reg) == REG)))
+ && REGNO (reg) >= FIRST_PSEUDO_REGISTER
+ && regno_first_uid[REGNO (reg)] == INSN_UID (insn))
+ {
+ for (p = NEXT_INSN (insn); p != lastexit; p = NEXT_INSN (p))
+ if (regno_last_uid[REGNO (reg)] == INSN_UID (p))
+ break;
+
+ if (p != lastexit)
+ {
+ /* We can do the replacement. Allocate reg_map if this is the
+ first replacement we found. */
+ if (reg_map == 0)
+ {
+ reg_map = (rtx *) alloca (max_reg * sizeof (rtx));
+ bzero ((char *) reg_map, max_reg * sizeof (rtx));
+ }
+
+ REG_LOOP_TEST_P (reg) = 1;
+
+ reg_map[REGNO (reg)] = gen_reg_rtx (GET_MODE (reg));
+ }
+ }
+
+ /* Now copy each insn. */
+ for (insn = exitcode; insn != lastexit; insn = NEXT_INSN (insn))
+ switch (GET_CODE (insn))
+ {
+ case BARRIER:
+ copy = emit_barrier_before (loop_start);
+ break;
+ case NOTE:
+ /* Only copy line-number notes. */
+ if (NOTE_LINE_NUMBER (insn) >= 0)
+ {
+ copy = emit_note_before (NOTE_LINE_NUMBER (insn), loop_start);
+ NOTE_SOURCE_FILE (copy) = NOTE_SOURCE_FILE (insn);
+ }
+ break;
+
+ case INSN:
+ copy = emit_insn_before (copy_rtx (PATTERN (insn)), loop_start);
+ if (reg_map)
+ replace_regs (PATTERN (copy), reg_map, max_reg, 1);
+
+ mark_jump_label (PATTERN (copy), copy, 0);
+
+ /* Copy all REG_NOTES except REG_LABEL since mark_jump_label will
+ make them. */
+ for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
+ if (REG_NOTE_KIND (link) != REG_LABEL)
+ REG_NOTES (copy)
+ = copy_rtx (gen_rtx (EXPR_LIST, REG_NOTE_KIND (link),
+ XEXP (link, 0), REG_NOTES (copy)));
+ if (reg_map && REG_NOTES (copy))
+ replace_regs (REG_NOTES (copy), reg_map, max_reg, 1);
+ break;
+
+ case JUMP_INSN:
+ copy = emit_jump_insn_before (copy_rtx (PATTERN (insn)), loop_start);
+ if (reg_map)
+ replace_regs (PATTERN (copy), reg_map, max_reg, 1);
+ mark_jump_label (PATTERN (copy), copy, 0);
+ if (REG_NOTES (insn))
+ {
+ REG_NOTES (copy) = copy_rtx (REG_NOTES (insn));
+ if (reg_map)
+ replace_regs (REG_NOTES (copy), reg_map, max_reg, 1);
+ }
+
+ /* If this is a simple jump, add it to the jump chain. */
+
+ if (INSN_UID (copy) < max_jump_chain && JUMP_LABEL (copy)
+ && simplejump_p (copy))
+ {
+ jump_chain[INSN_UID (copy)]
+ = jump_chain[INSN_UID (JUMP_LABEL (copy))];
+ jump_chain[INSN_UID (JUMP_LABEL (copy))] = copy;
+ }
+ break;
+
+ default:
+ abort ();
+ }
+
+ /* Now clean up by emitting a jump to the end label and deleting the jump
+ at the start of the loop. */
+ if (! copy || GET_CODE (copy) != BARRIER)
+ {
+ copy = emit_jump_insn_before (gen_jump (get_label_after (insn)),
+ loop_start);
+ mark_jump_label (PATTERN (copy), copy, 0);
+ if (INSN_UID (copy) < max_jump_chain
+ && INSN_UID (JUMP_LABEL (copy)) < max_jump_chain)
+ {
+ jump_chain[INSN_UID (copy)]
+ = jump_chain[INSN_UID (JUMP_LABEL (copy))];
+ jump_chain[INSN_UID (JUMP_LABEL (copy))] = copy;
+ }
+ emit_barrier_before (loop_start);
+ }
+
+ delete_insn (next_nonnote_insn (loop_start));
+
+ /* Mark the exit code as the virtual top of the converted loop. */
+ emit_note_before (NOTE_INSN_LOOP_VTOP, exitcode);
+
+ return 1;
+}
+
+/* Move all block-beg, block-end, loop-beg, loop-cont, loop-vtop, and
+ loop-end notes between START and END out before START. Assume that
+ END is not such a note. START may be such a note. Returns the value
+ of the new starting insn, which may be different if the original start
+ was such a note. */
+
+rtx
+squeeze_notes (start, end)
+ rtx start, end;
+{
+ rtx insn;
+ rtx next;
+
+ for (insn = start; insn != end; insn = next)
+ {
+ next = NEXT_INSN (insn);
+ if (GET_CODE (insn) == NOTE
+ && (NOTE_LINE_NUMBER (insn) == NOTE_INSN_BLOCK_END
+ || NOTE_LINE_NUMBER (insn) == NOTE_INSN_BLOCK_BEG
+ || NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG
+ || NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END
+ || NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_CONT
+ || NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_VTOP))
+ {
+ if (insn == start)
+ start = next;
+ else
+ {
+ rtx prev = PREV_INSN (insn);
+ PREV_INSN (insn) = PREV_INSN (start);
+ NEXT_INSN (insn) = start;
+ NEXT_INSN (PREV_INSN (insn)) = insn;
+ PREV_INSN (NEXT_INSN (insn)) = insn;
+ NEXT_INSN (prev) = next;
+ PREV_INSN (next) = prev;
+ }
+ }
+ }
+
+ return start;
+}
+
+/* Compare the instructions before insn E1 with those before E2
+ to find an opportunity for cross jumping.
+ (This means detecting identical sequences of insns followed by
+ jumps to the same place, or followed by a label and a jump
+ to that label, and replacing one with a jump to the other.)
+
+ Assume E1 is a jump that jumps to label E2
+ (that is not always true but it might as well be).
+ Find the longest possible equivalent sequences
+ and store the first insns of those sequences into *F1 and *F2.
+ Store zero there if no equivalent preceding instructions are found.
+
+ We give up if we find a label in stream 1.
+ Actually we could transfer that label into stream 2. */
+
+static void
+find_cross_jump (e1, e2, minimum, f1, f2)
+ rtx e1, e2;
+ int minimum;
+ rtx *f1, *f2;
+{
+ register rtx i1 = e1, i2 = e2;
+ register rtx p1, p2;
+ int lose = 0;
+
+ rtx last1 = 0, last2 = 0;
+ rtx afterlast1 = 0, afterlast2 = 0;
+ rtx prev1;
+
+ *f1 = 0;
+ *f2 = 0;
+
+ while (1)
+ {
+ i1 = prev_nonnote_insn (i1);
+
+ i2 = PREV_INSN (i2);
+ while (i2 && (GET_CODE (i2) == NOTE || GET_CODE (i2) == CODE_LABEL))
+ i2 = PREV_INSN (i2);
+
+ if (i1 == 0)
+ break;
+
+ /* Don't allow the range of insns preceding E1 or E2
+ to include the other (E2 or E1). */
+ if (i2 == e1 || i1 == e2)
+ break;
+
+ /* If we will get to this code by jumping, those jumps will be
+ tensioned to go directly to the new label (before I2),
+ so this cross-jumping won't cost extra. So reduce the minimum. */
+ if (GET_CODE (i1) == CODE_LABEL)
+ {
+ --minimum;
+ break;
+ }
+
+ if (i2 == 0 || GET_CODE (i1) != GET_CODE (i2))
+ break;
+
+ p1 = PATTERN (i1);
+ p2 = PATTERN (i2);
+
+ /* If this is a CALL_INSN, compare register usage information.
+ If we don't check this on stack register machines, the two
+ CALL_INSNs might be merged leaving reg-stack.c with mismatching
+ numbers of stack registers in the same basic block.
+ If we don't check this on machines with delay slots, a delay slot may
+ be filled that clobbers a parameter expected by the subroutine.
+
+ ??? We take the simple route for now and assume that if they're
+ equal, they were constructed identically. */
+
+ if (GET_CODE (i1) == CALL_INSN
+ && ! rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
+ CALL_INSN_FUNCTION_USAGE (i2)))
+ lose = 1;
+
+#ifdef STACK_REGS
+ /* If cross_jump_death_matters is not 0, the insn's mode
+ indicates whether or not the insn contains any stack-like
+ regs. */
+
+ if (!lose && cross_jump_death_matters && GET_MODE (i1) == QImode)
+ {
+ /* If register stack conversion has already been done, then
+ death notes must also be compared before it is certain that
+ the two instruction streams match. */
+
+ rtx note;
+ HARD_REG_SET i1_regset, i2_regset;
+
+ CLEAR_HARD_REG_SET (i1_regset);
+ CLEAR_HARD_REG_SET (i2_regset);
+
+ for (note = REG_NOTES (i1); note; note = XEXP (note, 1))
+ if (REG_NOTE_KIND (note) == REG_DEAD
+ && STACK_REG_P (XEXP (note, 0)))
+ SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0)));
+
+ for (note = REG_NOTES (i2); note; note = XEXP (note, 1))
+ if (REG_NOTE_KIND (note) == REG_DEAD
+ && STACK_REG_P (XEXP (note, 0)))
+ SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0)));
+
+ GO_IF_HARD_REG_EQUAL (i1_regset, i2_regset, done);
+
+ lose = 1;
+
+ done:
+ ;
+ }
+#endif
+
+ if (lose || GET_CODE (p1) != GET_CODE (p2)
+ || ! rtx_renumbered_equal_p (p1, p2))
+ {
+ /* The following code helps take care of G++ cleanups. */
+ rtx equiv1;
+ rtx equiv2;
+
+ if (!lose && GET_CODE (p1) == GET_CODE (p2)
+ && ((equiv1 = find_reg_note (i1, REG_EQUAL, NULL_RTX)) != 0
+ || (equiv1 = find_reg_note (i1, REG_EQUIV, NULL_RTX)) != 0)
+ && ((equiv2 = find_reg_note (i2, REG_EQUAL, NULL_RTX)) != 0
+ || (equiv2 = find_reg_note (i2, REG_EQUIV, NULL_RTX)) != 0)
+ /* If the equivalences are not to a constant, they may
+ reference pseudos that no longer exist, so we can't
+ use them. */
+ && CONSTANT_P (XEXP (equiv1, 0))
+ && rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))
+ {
+ rtx s1 = single_set (i1);
+ rtx s2 = single_set (i2);
+ if (s1 != 0 && s2 != 0
+ && rtx_renumbered_equal_p (SET_DEST (s1), SET_DEST (s2)))
+ {
+ validate_change (i1, &SET_SRC (s1), XEXP (equiv1, 0), 1);
+ validate_change (i2, &SET_SRC (s2), XEXP (equiv2, 0), 1);
+ if (! rtx_renumbered_equal_p (p1, p2))
+ cancel_changes (0);
+ else if (apply_change_group ())
+ goto win;
+ }
+ }
+
+ /* Insns fail to match; cross jumping is limited to the following
+ insns. */
+
+#ifdef HAVE_cc0
+ /* Don't allow the insn after a compare to be shared by
+ cross-jumping unless the compare is also shared.
+ Here, if either of these non-matching insns is a compare,
+ exclude the following insn from possible cross-jumping. */
+ if (sets_cc0_p (p1) || sets_cc0_p (p2))
+ last1 = afterlast1, last2 = afterlast2, ++minimum;
+#endif
+
+ /* If cross-jumping here will feed a jump-around-jump
+ optimization, this jump won't cost extra, so reduce
+ the minimum. */
+ if (GET_CODE (i1) == JUMP_INSN
+ && JUMP_LABEL (i1)
+ && prev_real_insn (JUMP_LABEL (i1)) == e1)
+ --minimum;
+ break;
+ }
+
+ win:
+ if (GET_CODE (p1) != USE && GET_CODE (p1) != CLOBBER)
+ {
+ /* Ok, this insn is potentially includable in a cross-jump here. */
+ afterlast1 = last1, afterlast2 = last2;
+ last1 = i1, last2 = i2, --minimum;
+ }
+ }
+
+ if (minimum <= 0 && last1 != 0 && last1 != e1)
+ *f1 = last1, *f2 = last2;
+}
+
+static void
+do_cross_jump (insn, newjpos, newlpos)
+ rtx insn, newjpos, newlpos;
+{
+ /* Find an existing label at this point
+ or make a new one if there is none. */
+ register rtx label = get_label_before (newlpos);
+
+ /* Make the same jump insn jump to the new point. */
+ if (GET_CODE (PATTERN (insn)) == RETURN)
+ {
+ /* Remove from jump chain of returns. */
+ delete_from_jump_chain (insn);
+ /* Change the insn. */
+ PATTERN (insn) = gen_jump (label);
+ INSN_CODE (insn) = -1;
+ JUMP_LABEL (insn) = label;
+ LABEL_NUSES (label)++;
+ /* Add to new the jump chain. */
+ if (INSN_UID (label) < max_jump_chain
+ && INSN_UID (insn) < max_jump_chain)
+ {
+ jump_chain[INSN_UID (insn)] = jump_chain[INSN_UID (label)];
+ jump_chain[INSN_UID (label)] = insn;
+ }
+ }
+ else
+ redirect_jump (insn, label);
+
+ /* Delete the matching insns before the jump. Also, remove any REG_EQUAL
+ or REG_EQUIV note in the NEWLPOS stream that isn't also present in
+ the NEWJPOS stream. */
+
+ while (newjpos != insn)
+ {
+ rtx lnote;
+
+ for (lnote = REG_NOTES (newlpos); lnote; lnote = XEXP (lnote, 1))
+ if ((REG_NOTE_KIND (lnote) == REG_EQUAL
+ || REG_NOTE_KIND (lnote) == REG_EQUIV)
+ && ! find_reg_note (newjpos, REG_EQUAL, XEXP (lnote, 0))
+ && ! find_reg_note (newjpos, REG_EQUIV, XEXP (lnote, 0)))
+ remove_note (newlpos, lnote);
+
+ delete_insn (newjpos);
+ newjpos = next_real_insn (newjpos);
+ newlpos = next_real_insn (newlpos);
+ }
+}
+
+/* Return the label before INSN, or put a new label there. */
+
+rtx
+get_label_before (insn)
+ rtx insn;
+{
+ rtx label;
+
+ /* Find an existing label at this point
+ or make a new one if there is none. */
+ label = prev_nonnote_insn (insn);
+
+ if (label == 0 || GET_CODE (label) != CODE_LABEL)
+ {
+ rtx prev = PREV_INSN (insn);
+
+ label = gen_label_rtx ();
+ emit_label_after (label, prev);
+ LABEL_NUSES (label) = 0;
+ }
+ return label;
+}
+
+/* Return the label after INSN, or put a new label there. */
+
+rtx
+get_label_after (insn)
+ rtx insn;
+{
+ rtx label;
+
+ /* Find an existing label at this point
+ or make a new one if there is none. */
+ label = next_nonnote_insn (insn);
+
+ if (label == 0 || GET_CODE (label) != CODE_LABEL)
+ {
+ label = gen_label_rtx ();
+ emit_label_after (label, insn);
+ LABEL_NUSES (label) = 0;
+ }
+ return label;
+}
+
+/* Return 1 if INSN is a jump that jumps to right after TARGET
+ only on the condition that TARGET itself would drop through.
+ Assumes that TARGET is a conditional jump. */
+
+static int
+jump_back_p (insn, target)
+ rtx insn, target;
+{
+ rtx cinsn, ctarget;
+ enum rtx_code codei, codet;
+
+ if (simplejump_p (insn) || ! condjump_p (insn)
+ || simplejump_p (target)
+ || target != prev_real_insn (JUMP_LABEL (insn)))
+ return 0;
+
+ cinsn = XEXP (SET_SRC (PATTERN (insn)), 0);
+ ctarget = XEXP (SET_SRC (PATTERN (target)), 0);
+
+ codei = GET_CODE (cinsn);
+ codet = GET_CODE (ctarget);
+
+ if (XEXP (SET_SRC (PATTERN (insn)), 1) == pc_rtx)
+ {
+ if (! can_reverse_comparison_p (cinsn, insn))
+ return 0;
+ codei = reverse_condition (codei);
+ }
+
+ if (XEXP (SET_SRC (PATTERN (target)), 2) == pc_rtx)
+ {
+ if (! can_reverse_comparison_p (ctarget, target))
+ return 0;
+ codet = reverse_condition (codet);
+ }
+
+ return (codei == codet
+ && rtx_renumbered_equal_p (XEXP (cinsn, 0), XEXP (ctarget, 0))
+ && rtx_renumbered_equal_p (XEXP (cinsn, 1), XEXP (ctarget, 1)));
+}
+
+/* Given a comparison, COMPARISON, inside a conditional jump insn, INSN,
+ return non-zero if it is safe to reverse this comparison. It is if our
+ floating-point is not IEEE, if this is an NE or EQ comparison, or if
+ this is known to be an integer comparison. */
+
+int
+can_reverse_comparison_p (comparison, insn)
+ rtx comparison;
+ rtx insn;
+{
+ rtx arg0;
+
+ /* If this is not actually a comparison, we can't reverse it. */
+ if (GET_RTX_CLASS (GET_CODE (comparison)) != '<')
+ return 0;
+
+ if (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
+ /* If this is an NE comparison, it is safe to reverse it to an EQ
+ comparison and vice versa, even for floating point. If no operands
+ are NaNs, the reversal is valid. If some operand is a NaN, EQ is
+ always false and NE is always true, so the reversal is also valid. */
+ || flag_fast_math
+ || GET_CODE (comparison) == NE
+ || GET_CODE (comparison) == EQ)
+ return 1;
+
+ arg0 = XEXP (comparison, 0);
+
+ /* Make sure ARG0 is one of the actual objects being compared. If we
+ can't do this, we can't be sure the comparison can be reversed.
+
+ Handle cc0 and a MODE_CC register. */
+ if ((GET_CODE (arg0) == REG && GET_MODE_CLASS (GET_MODE (arg0)) == MODE_CC)
+#ifdef HAVE_cc0
+ || arg0 == cc0_rtx
+#endif
+ )
+ {
+ rtx prev = prev_nonnote_insn (insn);
+ rtx set = single_set (prev);
+
+ if (set == 0 || SET_DEST (set) != arg0)
+ return 0;
+
+ arg0 = SET_SRC (set);
+
+ if (GET_CODE (arg0) == COMPARE)
+ arg0 = XEXP (arg0, 0);
+ }
+
+ /* We can reverse this if ARG0 is a CONST_INT or if its mode is
+ not VOIDmode and neither a MODE_CC nor MODE_FLOAT type. */
+ return (GET_CODE (arg0) == CONST_INT
+ || (GET_MODE (arg0) != VOIDmode
+ && GET_MODE_CLASS (GET_MODE (arg0)) != MODE_CC
+ && GET_MODE_CLASS (GET_MODE (arg0)) != MODE_FLOAT));
+}
+
+/* Given an rtx-code for a comparison, return the code
+ for the negated comparison.
+ WATCH OUT! reverse_condition is not safe to use on a jump
+ that might be acting on the results of an IEEE floating point comparison,
+ because of the special treatment of non-signaling nans in comparisons.
+ Use can_reverse_comparison_p to be sure. */
+
+enum rtx_code
+reverse_condition (code)
+ enum rtx_code code;
+{
+ switch (code)
+ {
+ case EQ:
+ return NE;
+
+ case NE:
+ return EQ;
+
+ case GT:
+ return LE;
+
+ case GE:
+ return LT;
+
+ case LT:
+ return GE;
+
+ case LE:
+ return GT;
+
+ case GTU:
+ return LEU;
+
+ case GEU:
+ return LTU;
+
+ case LTU:
+ return GEU;
+
+ case LEU:
+ return GTU;
+
+ default:
+ abort ();
+ return UNKNOWN;
+ }
+}
+
+/* Similar, but return the code when two operands of a comparison are swapped.
+ This IS safe for IEEE floating-point. */
+
+enum rtx_code
+swap_condition (code)
+ enum rtx_code code;
+{
+ switch (code)
+ {
+ case EQ:
+ case NE:
+ return code;
+
+ case GT:
+ return LT;
+
+ case GE:
+ return LE;
+
+ case LT:
+ return GT;
+
+ case LE:
+ return GE;
+
+ case GTU:
+ return LTU;
+
+ case GEU:
+ return LEU;
+
+ case LTU:
+ return GTU;
+
+ case LEU:
+ return GEU;
+
+ default:
+ abort ();
+ return UNKNOWN;
+ }
+}
+
+/* Given a comparison CODE, return the corresponding unsigned comparison.
+ If CODE is an equality comparison or already an unsigned comparison,
+ CODE is returned. */
+
+enum rtx_code
+unsigned_condition (code)
+ enum rtx_code code;
+{
+ switch (code)
+ {
+ case EQ:
+ case NE:
+ case GTU:
+ case GEU:
+ case LTU:
+ case LEU:
+ return code;
+
+ case GT:
+ return GTU;
+
+ case GE:
+ return GEU;
+
+ case LT:
+ return LTU;
+
+ case LE:
+ return LEU;
+
+ default:
+ abort ();
+ }
+}
+
+/* Similarly, return the signed version of a comparison. */
+
+enum rtx_code
+signed_condition (code)
+ enum rtx_code code;
+{
+ switch (code)
+ {
+ case EQ:
+ case NE:
+ case GT:
+ case GE:
+ case LT:
+ case LE:
+ return code;
+
+ case GTU:
+ return GT;
+
+ case GEU:
+ return GE;
+
+ case LTU:
+ return LT;
+
+ case LEU:
+ return LE;
+
+ default:
+ abort ();
+ }
+}
+
+/* Return non-zero if CODE1 is more strict than CODE2, i.e., if the
+ truth of CODE1 implies the truth of CODE2. */
+
+int
+comparison_dominates_p (code1, code2)
+ enum rtx_code code1, code2;
+{
+ if (code1 == code2)
+ return 1;
+
+ switch (code1)
+ {
+ case EQ:
+ if (code2 == LE || code2 == LEU || code2 == GE || code2 == GEU)
+ return 1;
+ break;
+
+ case LT:
+ if (code2 == LE || code2 == NE)
+ return 1;
+ break;
+
+ case GT:
+ if (code2 == GE || code2 == NE)
+ return 1;
+ break;
+
+ case LTU:
+ if (code2 == LEU || code2 == NE)
+ return 1;
+ break;
+
+ case GTU:
+ if (code2 == GEU || code2 == NE)
+ return 1;
+ break;
+ }
+
+ return 0;
+}
+
+/* Return 1 if INSN is an unconditional jump and nothing else. */
+
+int
+simplejump_p (insn)
+ rtx insn;
+{
+ return (GET_CODE (insn) == JUMP_INSN
+ && GET_CODE (PATTERN (insn)) == SET
+ && GET_CODE (SET_DEST (PATTERN (insn))) == PC
+ && GET_CODE (SET_SRC (PATTERN (insn))) == LABEL_REF);
+}
+
+/* Return nonzero if INSN is a (possibly) conditional jump
+ and nothing more. */
+
+int
+condjump_p (insn)
+ rtx insn;
+{
+ register rtx x = PATTERN (insn);
+ if (GET_CODE (x) != SET)
+ return 0;
+ if (GET_CODE (SET_DEST (x)) != PC)
+ return 0;
+ if (GET_CODE (SET_SRC (x)) == LABEL_REF)
+ return 1;
+ if (GET_CODE (SET_SRC (x)) != IF_THEN_ELSE)
+ return 0;
+ if (XEXP (SET_SRC (x), 2) == pc_rtx
+ && (GET_CODE (XEXP (SET_SRC (x), 1)) == LABEL_REF
+ || GET_CODE (XEXP (SET_SRC (x), 1)) == RETURN))
+ return 1;
+ if (XEXP (SET_SRC (x), 1) == pc_rtx
+ && (GET_CODE (XEXP (SET_SRC (x), 2)) == LABEL_REF
+ || GET_CODE (XEXP (SET_SRC (x), 2)) == RETURN))
+ return 1;
+ return 0;
+}
+
+/* Return nonzero if INSN is a (possibly) conditional jump
+ and nothing more. */
+
+int
+condjump_in_parallel_p (insn)
+ rtx insn;
+{
+ register rtx x = PATTERN (insn);
+
+ if (GET_CODE (x) != PARALLEL)
+ return 0;
+ else
+ x = XVECEXP (x, 0, 0);
+
+ if (GET_CODE (x) != SET)
+ return 0;
+ if (GET_CODE (SET_DEST (x)) != PC)
+ return 0;
+ if (GET_CODE (SET_SRC (x)) == LABEL_REF)
+ return 1;
+ if (GET_CODE (SET_SRC (x)) != IF_THEN_ELSE)
+ return 0;
+ if (XEXP (SET_SRC (x), 2) == pc_rtx
+ && (GET_CODE (XEXP (SET_SRC (x), 1)) == LABEL_REF
+ || GET_CODE (XEXP (SET_SRC (x), 1)) == RETURN))
+ return 1;
+ if (XEXP (SET_SRC (x), 1) == pc_rtx
+ && (GET_CODE (XEXP (SET_SRC (x), 2)) == LABEL_REF
+ || GET_CODE (XEXP (SET_SRC (x), 2)) == RETURN))
+ return 1;
+ return 0;
+}
+
+/* Return 1 if X is an RTX that does nothing but set the condition codes
+ and CLOBBER or USE registers.
+ Return -1 if X does explicitly set the condition codes,
+ but also does other things. */
+
+int
+sets_cc0_p (x)
+ rtx x;
+{
+#ifdef HAVE_cc0
+ if (GET_CODE (x) == SET && SET_DEST (x) == cc0_rtx)
+ return 1;
+ if (GET_CODE (x) == PARALLEL)
+ {
+ int i;
+ int sets_cc0 = 0;
+ int other_things = 0;
+ for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
+ {
+ if (GET_CODE (XVECEXP (x, 0, i)) == SET
+ && SET_DEST (XVECEXP (x, 0, i)) == cc0_rtx)
+ sets_cc0 = 1;
+ else if (GET_CODE (XVECEXP (x, 0, i)) == SET)
+ other_things = 1;
+ }
+ return ! sets_cc0 ? 0 : other_things ? -1 : 1;
+ }
+ return 0;
+#else
+ abort ();
+#endif
+}
+
+/* Follow any unconditional jump at LABEL;
+ return the ultimate label reached by any such chain of jumps.
+ If LABEL is not followed by a jump, return LABEL.
+ If the chain loops or we can't find end, return LABEL,
+ since that tells caller to avoid changing the insn.
+
+ If RELOAD_COMPLETED is 0, we do not chain across a NOTE_INSN_LOOP_BEG or
+ a USE or CLOBBER. */
+
+rtx
+follow_jumps (label)
+ rtx label;
+{
+ register rtx insn;
+ register rtx next;
+ register rtx value = label;
+ register int depth;
+
+ for (depth = 0;
+ (depth < 10
+ && (insn = next_active_insn (value)) != 0
+ && GET_CODE (insn) == JUMP_INSN
+ && (JUMP_LABEL (insn) != 0 || GET_CODE (PATTERN (insn)) == RETURN)
+ && (next = NEXT_INSN (insn))
+ && GET_CODE (next) == BARRIER);
+ depth++)
+ {
+ /* Don't chain through the insn that jumps into a loop
+ from outside the loop,
+ since that would create multiple loop entry jumps
+ and prevent loop optimization. */
+ rtx tem;
+ if (!reload_completed)
+ for (tem = value; tem != insn; tem = NEXT_INSN (tem))
+ if (GET_CODE (tem) == NOTE
+ && NOTE_LINE_NUMBER (tem) == NOTE_INSN_LOOP_BEG)
+ return value;
+
+ /* If we have found a cycle, make the insn jump to itself. */
+ if (JUMP_LABEL (insn) == label)
+ return label;
+
+ tem = next_active_insn (JUMP_LABEL (insn));
+ if (tem && (GET_CODE (PATTERN (tem)) == ADDR_VEC
+ || GET_CODE (PATTERN (tem)) == ADDR_DIFF_VEC))
+ break;
+
+ value = JUMP_LABEL (insn);
+ }
+ if (depth == 10)
+ return label;
+ return value;
+}
+
+/* Assuming that field IDX of X is a vector of label_refs,
+ replace each of them by the ultimate label reached by it.
+ Return nonzero if a change is made.
+ If IGNORE_LOOPS is 0, we do not chain across a NOTE_INSN_LOOP_BEG. */
+
+static int
+tension_vector_labels (x, idx)
+ register rtx x;
+ register int idx;
+{
+ int changed = 0;
+ register int i;
+ for (i = XVECLEN (x, idx) - 1; i >= 0; i--)
+ {
+ register rtx olabel = XEXP (XVECEXP (x, idx, i), 0);
+ register rtx nlabel = follow_jumps (olabel);
+ if (nlabel && nlabel != olabel)
+ {
+ XEXP (XVECEXP (x, idx, i), 0) = nlabel;
+ ++LABEL_NUSES (nlabel);
+ if (--LABEL_NUSES (olabel) == 0)
+ delete_insn (olabel);
+ changed = 1;
+ }
+ }
+ return changed;
+}
+
+/* Find all CODE_LABELs referred to in X, and increment their use counts.
+ If INSN is a JUMP_INSN and there is at least one CODE_LABEL referenced
+ in INSN, then store one of them in JUMP_LABEL (INSN).
+ If INSN is an INSN or a CALL_INSN and there is at least one CODE_LABEL
+ referenced in INSN, add a REG_LABEL note containing that label to INSN.
+ Also, when there are consecutive labels, canonicalize on the last of them.
+
+ Note that two labels separated by a loop-beginning note
+ must be kept distinct if we have not yet done loop-optimization,
+ because the gap between them is where loop-optimize
+ will want to move invariant code to. CROSS_JUMP tells us
+ that loop-optimization is done with.
+
+ Once reload has completed (CROSS_JUMP non-zero), we need not consider
+ two labels distinct if they are separated by only USE or CLOBBER insns. */
+
+static void
+mark_jump_label (x, insn, cross_jump)
+ register rtx x;
+ rtx insn;
+ int cross_jump;
+{
+ register RTX_CODE code = GET_CODE (x);
+ register int i;
+ register char *fmt;
+
+ switch (code)
+ {
+ case PC:
+ case CC0:
+ case REG:
+ case SUBREG:
+ case CONST_INT:
+ case SYMBOL_REF:
+ case CONST_DOUBLE:
+ case CLOBBER:
+ case CALL:
+ return;
+
+ case MEM:
+ /* If this is a constant-pool reference, see if it is a label. */
+ if (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
+ && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)))
+ mark_jump_label (get_pool_constant (XEXP (x, 0)), insn, cross_jump);
+ break;
+
+ case LABEL_REF:
+ {
+ rtx label = XEXP (x, 0);
+ rtx olabel = label;
+ rtx note;
+ rtx next;
+
+ if (GET_CODE (label) != CODE_LABEL)
+ abort ();
+
+ /* Ignore references to labels of containing functions. */
+ if (LABEL_REF_NONLOCAL_P (x))
+ break;
+
+ /* If there are other labels following this one,
+ replace it with the last of the consecutive labels. */
+ for (next = NEXT_INSN (label); next; next = NEXT_INSN (next))
+ {
+ if (GET_CODE (next) == CODE_LABEL)
+ label = next;
+ else if (cross_jump && GET_CODE (next) == INSN
+ && (GET_CODE (PATTERN (next)) == USE
+ || GET_CODE (PATTERN (next)) == CLOBBER))
+ continue;
+ else if (GET_CODE (next) != NOTE)
+ break;
+ else if (! cross_jump
+ && (NOTE_LINE_NUMBER (next) == NOTE_INSN_LOOP_BEG
+ || NOTE_LINE_NUMBER (next) == NOTE_INSN_FUNCTION_END))
+ break;
+ }
+
+ XEXP (x, 0) = label;
+ ++LABEL_NUSES (label);
+
+ if (insn)
+ {
+ if (GET_CODE (insn) == JUMP_INSN)
+ JUMP_LABEL (insn) = label;
+
+ /* If we've changed OLABEL and we had a REG_LABEL note
+ for it, update it as well. */
+ else if (label != olabel
+ && (note = find_reg_note (insn, REG_LABEL, olabel)) != 0)
+ XEXP (note, 0) = label;
+
+ /* Otherwise, add a REG_LABEL note for LABEL unless there already
+ is one. */
+ else if (! find_reg_note (insn, REG_LABEL, label))
+ {
+ rtx next = next_real_insn (label);
+ /* Don't record labels that refer to dispatch tables.
+ This is not necessary, since the tablejump
+ references the same label.
+ And if we did record them, flow.c would make worse code. */
+ if (next == 0
+ || ! (GET_CODE (next) == JUMP_INSN
+ && (GET_CODE (PATTERN (next)) == ADDR_VEC
+ || GET_CODE (PATTERN (next)) == ADDR_DIFF_VEC)))
+ REG_NOTES (insn) = gen_rtx (EXPR_LIST, REG_LABEL, label,
+ REG_NOTES (insn));
+ }
+ }
+ return;
+ }
+
+ /* Do walk the labels in a vector, but not the first operand of an
+ ADDR_DIFF_VEC. Don't set the JUMP_LABEL of a vector. */
+ case ADDR_VEC:
+ case ADDR_DIFF_VEC:
+ {
+ int eltnum = code == ADDR_DIFF_VEC ? 1 : 0;
+
+ for (i = 0; i < XVECLEN (x, eltnum); i++)
+ mark_jump_label (XVECEXP (x, eltnum, i), NULL_RTX, cross_jump);
+ return;
+ }
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ mark_jump_label (XEXP (x, i), insn, cross_jump);
+ else if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ mark_jump_label (XVECEXP (x, i, j), insn, cross_jump);
+ }
+ }
+}
+
+/* If all INSN does is set the pc, delete it,
+ and delete the insn that set the condition codes for it
+ if that's what the previous thing was. */
+
+void
+delete_jump (insn)
+ rtx insn;
+{
+ register rtx set = single_set (insn);
+
+ if (set && GET_CODE (SET_DEST (set)) == PC)
+ delete_computation (insn);
+}
+
+/* Delete INSN and recursively delete insns that compute values used only
+ by INSN. This uses the REG_DEAD notes computed during flow analysis.
+ If we are running before flow.c, we need do nothing since flow.c will
+ delete dead code. We also can't know if the registers being used are
+ dead or not at this point.
+
+ Otherwise, look at all our REG_DEAD notes. If a previous insn does
+ nothing other than set a register that dies in this insn, we can delete
+ that insn as well.
+
+ On machines with CC0, if CC0 is used in this insn, we may be able to
+ delete the insn that set it. */
+
+static void
+delete_computation (insn)
+ rtx insn;
+{
+ rtx note, next;
+
+#ifdef HAVE_cc0
+ if (reg_referenced_p (cc0_rtx, PATTERN (insn)))
+ {
+ rtx prev = prev_nonnote_insn (insn);
+ /* We assume that at this stage
+ CC's are always set explicitly
+ and always immediately before the jump that
+ will use them. So if the previous insn
+ exists to set the CC's, delete it
+ (unless it performs auto-increments, etc.). */
+ if (prev && GET_CODE (prev) == INSN
+ && sets_cc0_p (PATTERN (prev)))
+ {
+ if (sets_cc0_p (PATTERN (prev)) > 0
+ && !FIND_REG_INC_NOTE (prev, NULL_RTX))
+ delete_computation (prev);
+ else
+ /* Otherwise, show that cc0 won't be used. */
+ REG_NOTES (prev) = gen_rtx (EXPR_LIST, REG_UNUSED,
+ cc0_rtx, REG_NOTES (prev));
+ }
+ }
+#endif
+
+ for (note = REG_NOTES (insn); note; note = next)
+ {
+ rtx our_prev;
+
+ next = XEXP (note, 1);
+
+ if (REG_NOTE_KIND (note) != REG_DEAD
+ /* Verify that the REG_NOTE is legitimate. */
+ || GET_CODE (XEXP (note, 0)) != REG)
+ continue;
+
+ for (our_prev = prev_nonnote_insn (insn);
+ our_prev && GET_CODE (our_prev) == INSN;
+ our_prev = prev_nonnote_insn (our_prev))
+ {
+ /* If we reach a SEQUENCE, it is too complex to try to
+ do anything with it, so give up. */
+ if (GET_CODE (PATTERN (our_prev)) == SEQUENCE)
+ break;
+
+ if (GET_CODE (PATTERN (our_prev)) == USE
+ && GET_CODE (XEXP (PATTERN (our_prev), 0)) == INSN)
+ /* reorg creates USEs that look like this. We leave them
+ alone because reorg needs them for its own purposes. */
+ break;
+
+ if (reg_set_p (XEXP (note, 0), PATTERN (our_prev)))
+ {
+ if (FIND_REG_INC_NOTE (our_prev, NULL_RTX))
+ break;
+
+ if (GET_CODE (PATTERN (our_prev)) == PARALLEL)
+ {
+ /* If we find a SET of something else, we can't
+ delete the insn. */
+
+ int i;
+
+ for (i = 0; i < XVECLEN (PATTERN (our_prev), 0); i++)
+ {
+ rtx part = XVECEXP (PATTERN (our_prev), 0, i);
+
+ if (GET_CODE (part) == SET
+ && SET_DEST (part) != XEXP (note, 0))
+ break;
+ }
+
+ if (i == XVECLEN (PATTERN (our_prev), 0))
+ delete_computation (our_prev);
+ }
+ else if (GET_CODE (PATTERN (our_prev)) == SET
+ && SET_DEST (PATTERN (our_prev)) == XEXP (note, 0))
+ delete_computation (our_prev);
+
+ break;
+ }
+
+ /* If OUR_PREV references the register that dies here, it is an
+ additional use. Hence any prior SET isn't dead. However, this
+ insn becomes the new place for the REG_DEAD note. */
+ if (reg_overlap_mentioned_p (XEXP (note, 0),
+ PATTERN (our_prev)))
+ {
+ XEXP (note, 1) = REG_NOTES (our_prev);
+ REG_NOTES (our_prev) = note;
+ break;
+ }
+ }
+ }
+
+ delete_insn (insn);
+}
+
+/* Delete insn INSN from the chain of insns and update label ref counts.
+ May delete some following insns as a consequence; may even delete
+ a label elsewhere and insns that follow it.
+
+ Returns the first insn after INSN that was not deleted. */
+
+rtx
+delete_insn (insn)
+ register rtx insn;
+{
+ register rtx next = NEXT_INSN (insn);
+ register rtx prev = PREV_INSN (insn);
+ register int was_code_label = (GET_CODE (insn) == CODE_LABEL);
+ register int dont_really_delete = 0;
+
+ while (next && INSN_DELETED_P (next))
+ next = NEXT_INSN (next);
+
+ /* This insn is already deleted => return first following nondeleted. */
+ if (INSN_DELETED_P (insn))
+ return next;
+
+ /* Don't delete user-declared labels. Convert them to special NOTEs
+ instead. */
+ if (was_code_label && LABEL_NAME (insn) != 0
+ && optimize && ! dont_really_delete)
+ {
+ PUT_CODE (insn, NOTE);
+ NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED_LABEL;
+ NOTE_SOURCE_FILE (insn) = 0;
+ dont_really_delete = 1;
+ }
+ else
+ /* Mark this insn as deleted. */
+ INSN_DELETED_P (insn) = 1;
+
+ /* If this is an unconditional jump, delete it from the jump chain. */
+ if (simplejump_p (insn))
+ delete_from_jump_chain (insn);
+
+ /* If instruction is followed by a barrier,
+ delete the barrier too. */
+
+ if (next != 0 && GET_CODE (next) == BARRIER)
+ {
+ INSN_DELETED_P (next) = 1;
+ next = NEXT_INSN (next);
+ }
+
+ /* Patch out INSN (and the barrier if any) */
+
+ if (optimize && ! dont_really_delete)
+ {
+ if (prev)
+ {
+ NEXT_INSN (prev) = next;
+ if (GET_CODE (prev) == INSN && GET_CODE (PATTERN (prev)) == SEQUENCE)
+ NEXT_INSN (XVECEXP (PATTERN (prev), 0,
+ XVECLEN (PATTERN (prev), 0) - 1)) = next;
+ }
+
+ if (next)
+ {
+ PREV_INSN (next) = prev;
+ if (GET_CODE (next) == INSN && GET_CODE (PATTERN (next)) == SEQUENCE)
+ PREV_INSN (XVECEXP (PATTERN (next), 0, 0)) = prev;
+ }
+
+ if (prev && NEXT_INSN (prev) == 0)
+ set_last_insn (prev);
+ }
+
+ /* If deleting a jump, decrement the count of the label,
+ and delete the label if it is now unused. */
+
+ if (GET_CODE (insn) == JUMP_INSN && JUMP_LABEL (insn))
+ if (--LABEL_NUSES (JUMP_LABEL (insn)) == 0)
+ {
+ /* This can delete NEXT or PREV,
+ either directly if NEXT is JUMP_LABEL (INSN),
+ or indirectly through more levels of jumps. */
+ delete_insn (JUMP_LABEL (insn));
+ /* I feel a little doubtful about this loop,
+ but I see no clean and sure alternative way
+ to find the first insn after INSN that is not now deleted.
+ I hope this works. */
+ while (next && INSN_DELETED_P (next))
+ next = NEXT_INSN (next);
+ return next;
+ }
+
+ while (prev && (INSN_DELETED_P (prev) || GET_CODE (prev) == NOTE))
+ prev = PREV_INSN (prev);
+
+ /* If INSN was a label and a dispatch table follows it,
+ delete the dispatch table. The tablejump must have gone already.
+ It isn't useful to fall through into a table. */
+
+ if (was_code_label
+ && NEXT_INSN (insn) != 0
+ && GET_CODE (NEXT_INSN (insn)) == JUMP_INSN
+ && (GET_CODE (PATTERN (NEXT_INSN (insn))) == ADDR_VEC
+ || GET_CODE (PATTERN (NEXT_INSN (insn))) == ADDR_DIFF_VEC))
+ next = delete_insn (NEXT_INSN (insn));
+
+ /* If INSN was a label, delete insns following it if now unreachable. */
+
+ if (was_code_label && prev && GET_CODE (prev) == BARRIER)
+ {
+ register RTX_CODE code;
+ while (next != 0
+ && (GET_RTX_CLASS (code = GET_CODE (next)) == 'i'
+ || code == NOTE
+ || (code == CODE_LABEL && INSN_DELETED_P (next))))
+ {
+ if (code == NOTE
+ && NOTE_LINE_NUMBER (next) != NOTE_INSN_FUNCTION_END)
+ next = NEXT_INSN (next);
+ /* Keep going past other deleted labels to delete what follows. */
+ else if (code == CODE_LABEL && INSN_DELETED_P (next))
+ next = NEXT_INSN (next);
+ else
+ /* Note: if this deletes a jump, it can cause more
+ deletion of unreachable code, after a different label.
+ As long as the value from this recursive call is correct,
+ this invocation functions correctly. */
+ next = delete_insn (next);
+ }
+ }
+
+ return next;
+}
+
+/* Advance from INSN till reaching something not deleted
+ then return that. May return INSN itself. */
+
+rtx
+next_nondeleted_insn (insn)
+ rtx insn;
+{
+ while (INSN_DELETED_P (insn))
+ insn = NEXT_INSN (insn);
+ return insn;
+}
+
+/* Delete a range of insns from FROM to TO, inclusive.
+ This is for the sake of peephole optimization, so assume
+ that whatever these insns do will still be done by a new
+ peephole insn that will replace them. */
+
+void
+delete_for_peephole (from, to)
+ register rtx from, to;
+{
+ register rtx insn = from;
+
+ while (1)
+ {
+ register rtx next = NEXT_INSN (insn);
+ register rtx prev = PREV_INSN (insn);
+
+ if (GET_CODE (insn) != NOTE)
+ {
+ INSN_DELETED_P (insn) = 1;
+
+ /* Patch this insn out of the chain. */
+ /* We don't do this all at once, because we
+ must preserve all NOTEs. */
+ if (prev)
+ NEXT_INSN (prev) = next;
+
+ if (next)
+ PREV_INSN (next) = prev;
+ }
+
+ if (insn == to)
+ break;
+ insn = next;
+ }
+
+ /* Note that if TO is an unconditional jump
+ we *do not* delete the BARRIER that follows,
+ since the peephole that replaces this sequence
+ is also an unconditional jump in that case. */
+}
+
+/* Invert the condition of the jump JUMP, and make it jump
+ to label NLABEL instead of where it jumps now. */
+
+int
+invert_jump (jump, nlabel)
+ rtx jump, nlabel;
+{
+ /* We have to either invert the condition and change the label or
+ do neither. Either operation could fail. We first try to invert
+ the jump. If that succeeds, we try changing the label. If that fails,
+ we invert the jump back to what it was. */
+
+ if (! invert_exp (PATTERN (jump), jump))
+ return 0;
+
+ if (redirect_jump (jump, nlabel))
+ return 1;
+
+ if (! invert_exp (PATTERN (jump), jump))
+ /* This should just be putting it back the way it was. */
+ abort ();
+
+ return 0;
+}
+
+/* Invert the jump condition of rtx X contained in jump insn, INSN.
+
+ Return 1 if we can do so, 0 if we cannot find a way to do so that
+ matches a pattern. */
+
+int
+invert_exp (x, insn)
+ rtx x;
+ rtx insn;
+{
+ register RTX_CODE code;
+ register int i;
+ register char *fmt;
+
+ code = GET_CODE (x);
+
+ if (code == IF_THEN_ELSE)
+ {
+ register rtx comp = XEXP (x, 0);
+ register rtx tem;
+
+ /* We can do this in two ways: The preferable way, which can only
+ be done if this is not an integer comparison, is to reverse
+ the comparison code. Otherwise, swap the THEN-part and ELSE-part
+ of the IF_THEN_ELSE. If we can't do either, fail. */
+
+ if (can_reverse_comparison_p (comp, insn)
+ && validate_change (insn, &XEXP (x, 0),
+ gen_rtx (reverse_condition (GET_CODE (comp)),
+ GET_MODE (comp), XEXP (comp, 0),
+ XEXP (comp, 1)), 0))
+ return 1;
+
+ tem = XEXP (x, 1);
+ validate_change (insn, &XEXP (x, 1), XEXP (x, 2), 1);
+ validate_change (insn, &XEXP (x, 2), tem, 1);
+ return apply_change_group ();
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ if (! invert_exp (XEXP (x, i), insn))
+ return 0;
+ if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ if (!invert_exp (XVECEXP (x, i, j), insn))
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+/* Make jump JUMP jump to label NLABEL instead of where it jumps now.
+ If the old jump target label is unused as a result,
+ it and the code following it may be deleted.
+
+ If NLABEL is zero, we are to turn the jump into a (possibly conditional)
+ RETURN insn.
+
+ The return value will be 1 if the change was made, 0 if it wasn't (this
+ can only occur for NLABEL == 0). */
+
+int
+redirect_jump (jump, nlabel)
+ rtx jump, nlabel;
+{
+ register rtx olabel = JUMP_LABEL (jump);
+
+ if (nlabel == olabel)
+ return 1;
+
+ if (! redirect_exp (&PATTERN (jump), olabel, nlabel, jump))
+ return 0;
+
+ /* If this is an unconditional branch, delete it from the jump_chain of
+ OLABEL and add it to the jump_chain of NLABEL (assuming both labels
+ have UID's in range and JUMP_CHAIN is valid). */
+ if (jump_chain && (simplejump_p (jump)
+ || GET_CODE (PATTERN (jump)) == RETURN))
+ {
+ int label_index = nlabel ? INSN_UID (nlabel) : 0;
+
+ delete_from_jump_chain (jump);
+ if (label_index < max_jump_chain
+ && INSN_UID (jump) < max_jump_chain)
+ {
+ jump_chain[INSN_UID (jump)] = jump_chain[label_index];
+ jump_chain[label_index] = jump;
+ }
+ }
+
+ JUMP_LABEL (jump) = nlabel;
+ if (nlabel)
+ ++LABEL_NUSES (nlabel);
+
+ if (olabel && --LABEL_NUSES (olabel) == 0)
+ delete_insn (olabel);
+
+ return 1;
+}
+
+/* Delete the instruction JUMP from any jump chain it might be on. */
+
+static void
+delete_from_jump_chain (jump)
+ rtx jump;
+{
+ int index;
+ rtx olabel = JUMP_LABEL (jump);
+
+ /* Handle unconditional jumps. */
+ if (jump_chain && olabel != 0
+ && INSN_UID (olabel) < max_jump_chain
+ && simplejump_p (jump))
+ index = INSN_UID (olabel);
+ /* Handle return insns. */
+ else if (jump_chain && GET_CODE (PATTERN (jump)) == RETURN)
+ index = 0;
+ else return;
+
+ if (jump_chain[index] == jump)
+ jump_chain[index] = jump_chain[INSN_UID (jump)];
+ else
+ {
+ rtx insn;
+
+ for (insn = jump_chain[index];
+ insn != 0;
+ insn = jump_chain[INSN_UID (insn)])
+ if (jump_chain[INSN_UID (insn)] == jump)
+ {
+ jump_chain[INSN_UID (insn)] = jump_chain[INSN_UID (jump)];
+ break;
+ }
+ }
+}
+
+/* If NLABEL is nonzero, throughout the rtx at LOC,
+ alter (LABEL_REF OLABEL) to (LABEL_REF NLABEL). If OLABEL is
+ zero, alter (RETURN) to (LABEL_REF NLABEL).
+
+ If NLABEL is zero, alter (LABEL_REF OLABEL) to (RETURN) and check
+ validity with validate_change. Convert (set (pc) (label_ref olabel))
+ to (return).
+
+ Return 0 if we found a change we would like to make but it is invalid.
+ Otherwise, return 1. */
+
+int
+redirect_exp (loc, olabel, nlabel, insn)
+ rtx *loc;
+ rtx olabel, nlabel;
+ rtx insn;
+{
+ register rtx x = *loc;
+ register RTX_CODE code = GET_CODE (x);
+ register int i;
+ register char *fmt;
+
+ if (code == LABEL_REF)
+ {
+ if (XEXP (x, 0) == olabel)
+ {
+ if (nlabel)
+ XEXP (x, 0) = nlabel;
+ else
+ return validate_change (insn, loc, gen_rtx (RETURN, VOIDmode), 0);
+ return 1;
+ }
+ }
+ else if (code == RETURN && olabel == 0)
+ {
+ x = gen_rtx (LABEL_REF, VOIDmode, nlabel);
+ if (loc == &PATTERN (insn))
+ x = gen_rtx (SET, VOIDmode, pc_rtx, x);
+ return validate_change (insn, loc, x, 0);
+ }
+
+ if (code == SET && nlabel == 0 && SET_DEST (x) == pc_rtx
+ && GET_CODE (SET_SRC (x)) == LABEL_REF
+ && XEXP (SET_SRC (x), 0) == olabel)
+ return validate_change (insn, loc, gen_rtx (RETURN, VOIDmode), 0);
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ if (! redirect_exp (&XEXP (x, i), olabel, nlabel, insn))
+ return 0;
+ if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ if (! redirect_exp (&XVECEXP (x, i, j), olabel, nlabel, insn))
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+/* Make jump JUMP jump to label NLABEL, assuming it used to be a tablejump.
+
+ If the old jump target label (before the dispatch table) becomes unused,
+ it and the dispatch table may be deleted. In that case, find the insn
+ before the jump references that label and delete it and logical successors
+ too. */
+
+static void
+redirect_tablejump (jump, nlabel)
+ rtx jump, nlabel;
+{
+ register rtx olabel = JUMP_LABEL (jump);
+
+ /* Add this jump to the jump_chain of NLABEL. */
+ if (jump_chain && INSN_UID (nlabel) < max_jump_chain
+ && INSN_UID (jump) < max_jump_chain)
+ {
+ jump_chain[INSN_UID (jump)] = jump_chain[INSN_UID (nlabel)];
+ jump_chain[INSN_UID (nlabel)] = jump;
+ }
+
+ PATTERN (jump) = gen_jump (nlabel);
+ JUMP_LABEL (jump) = nlabel;
+ ++LABEL_NUSES (nlabel);
+ INSN_CODE (jump) = -1;
+
+ if (--LABEL_NUSES (olabel) == 0)
+ {
+ delete_labelref_insn (jump, olabel, 0);
+ delete_insn (olabel);
+ }
+}
+
+/* Find the insn referencing LABEL that is a logical predecessor of INSN.
+ If we found one, delete it and then delete this insn if DELETE_THIS is
+ non-zero. Return non-zero if INSN or a predecessor references LABEL. */
+
+static int
+delete_labelref_insn (insn, label, delete_this)
+ rtx insn, label;
+ int delete_this;
+{
+ int deleted = 0;
+ rtx link;
+
+ if (GET_CODE (insn) != NOTE
+ && reg_mentioned_p (label, PATTERN (insn)))
+ {
+ if (delete_this)
+ {
+ delete_insn (insn);
+ deleted = 1;
+ }
+ else
+ return 1;
+ }
+
+ for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
+ if (delete_labelref_insn (XEXP (link, 0), label, 1))
+ {
+ if (delete_this)
+ {
+ delete_insn (insn);
+ deleted = 1;
+ }
+ else
+ return 1;
+ }
+
+ return deleted;
+}
+
+/* Like rtx_equal_p except that it considers two REGs as equal
+ if they renumber to the same value and considers two commutative
+ operations to be the same if the order of the operands has been
+ reversed. */
+
+int
+rtx_renumbered_equal_p (x, y)
+ rtx x, y;
+{
+ register int i;
+ register RTX_CODE code = GET_CODE (x);
+ register char *fmt;
+
+ if (x == y)
+ return 1;
+
+ if ((code == REG || (code == SUBREG && GET_CODE (SUBREG_REG (x)) == REG))
+ && (GET_CODE (y) == REG || (GET_CODE (y) == SUBREG
+ && GET_CODE (SUBREG_REG (y)) == REG)))
+ {
+ int reg_x = -1, reg_y = -1;
+ int word_x = 0, word_y = 0;
+
+ if (GET_MODE (x) != GET_MODE (y))
+ return 0;
+
+ /* If we haven't done any renumbering, don't
+ make any assumptions. */
+ if (reg_renumber == 0)
+ return rtx_equal_p (x, y);
+
+ if (code == SUBREG)
+ {
+ reg_x = REGNO (SUBREG_REG (x));
+ word_x = SUBREG_WORD (x);
+
+ if (reg_renumber[reg_x] >= 0)
+ {
+ reg_x = reg_renumber[reg_x] + word_x;
+ word_x = 0;
+ }
+ }
+
+ else
+ {
+ reg_x = REGNO (x);
+ if (reg_renumber[reg_x] >= 0)
+ reg_x = reg_renumber[reg_x];
+ }
+
+ if (GET_CODE (y) == SUBREG)
+ {
+ reg_y = REGNO (SUBREG_REG (y));
+ word_y = SUBREG_WORD (y);
+
+ if (reg_renumber[reg_y] >= 0)
+ {
+ reg_y = reg_renumber[reg_y];
+ word_y = 0;
+ }
+ }
+
+ else
+ {
+ reg_y = REGNO (y);
+ if (reg_renumber[reg_y] >= 0)
+ reg_y = reg_renumber[reg_y];
+ }
+
+ return reg_x >= 0 && reg_x == reg_y && word_x == word_y;
+ }
+
+ /* Now we have disposed of all the cases
+ in which different rtx codes can match. */
+ if (code != GET_CODE (y))
+ return 0;
+
+ switch (code)
+ {
+ case PC:
+ case CC0:
+ case ADDR_VEC:
+ case ADDR_DIFF_VEC:
+ return 0;
+
+ case CONST_INT:
+ return INTVAL (x) == INTVAL (y);
+
+ case LABEL_REF:
+ /* We can't assume nonlocal labels have their following insns yet. */
+ if (LABEL_REF_NONLOCAL_P (x) || LABEL_REF_NONLOCAL_P (y))
+ return XEXP (x, 0) == XEXP (y, 0);
+
+ /* Two label-refs are equivalent if they point at labels
+ in the same position in the instruction stream. */
+ return (next_real_insn (XEXP (x, 0))
+ == next_real_insn (XEXP (y, 0)));
+
+ case SYMBOL_REF:
+ return XSTR (x, 0) == XSTR (y, 0);
+ }
+
+ /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent. */
+
+ if (GET_MODE (x) != GET_MODE (y))
+ return 0;
+
+ /* For commutative operations, the RTX match if the operand match in any
+ order. Also handle the simple binary and unary cases without a loop. */
+ if (code == EQ || code == NE || GET_RTX_CLASS (code) == 'c')
+ return ((rtx_renumbered_equal_p (XEXP (x, 0), XEXP (y, 0))
+ && rtx_renumbered_equal_p (XEXP (x, 1), XEXP (y, 1)))
+ || (rtx_renumbered_equal_p (XEXP (x, 0), XEXP (y, 1))
+ && rtx_renumbered_equal_p (XEXP (x, 1), XEXP (y, 0))));
+ else if (GET_RTX_CLASS (code) == '<' || GET_RTX_CLASS (code) == '2')
+ return (rtx_renumbered_equal_p (XEXP (x, 0), XEXP (y, 0))
+ && rtx_renumbered_equal_p (XEXP (x, 1), XEXP (y, 1)));
+ else if (GET_RTX_CLASS (code) == '1')
+ return rtx_renumbered_equal_p (XEXP (x, 0), XEXP (y, 0));
+
+ /* Compare the elements. If any pair of corresponding elements
+ fail to match, return 0 for the whole things. */
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ register int j;
+ switch (fmt[i])
+ {
+ case 'w':
+ if (XWINT (x, i) != XWINT (y, i))
+ return 0;
+ break;
+
+ case 'i':
+ if (XINT (x, i) != XINT (y, i))
+ return 0;
+ break;
+
+ case 's':
+ if (strcmp (XSTR (x, i), XSTR (y, i)))
+ return 0;
+ break;
+
+ case 'e':
+ if (! rtx_renumbered_equal_p (XEXP (x, i), XEXP (y, i)))
+ return 0;
+ break;
+
+ case 'u':
+ if (XEXP (x, i) != XEXP (y, i))
+ return 0;
+ /* fall through. */
+ case '0':
+ break;
+
+ case 'E':
+ if (XVECLEN (x, i) != XVECLEN (y, i))
+ return 0;
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ if (!rtx_renumbered_equal_p (XVECEXP (x, i, j), XVECEXP (y, i, j)))
+ return 0;
+ break;
+
+ default:
+ abort ();
+ }
+ }
+ return 1;
+}
+
+/* If X is a hard register or equivalent to one or a subregister of one,
+ return the hard register number. If X is a pseudo register that was not
+ assigned a hard register, return the pseudo register number. Otherwise,
+ return -1. Any rtx is valid for X. */
+
+int
+true_regnum (x)
+ rtx x;
+{
+ if (GET_CODE (x) == REG)
+ {
+ if (REGNO (x) >= FIRST_PSEUDO_REGISTER && reg_renumber[REGNO (x)] >= 0)
+ return reg_renumber[REGNO (x)];
+ return REGNO (x);
+ }
+ if (GET_CODE (x) == SUBREG)
+ {
+ int base = true_regnum (SUBREG_REG (x));
+ if (base >= 0 && base < FIRST_PSEUDO_REGISTER)
+ return SUBREG_WORD (x) + base;
+ }
+ return -1;
+}
+
+/* Optimize code of the form:
+
+ for (x = a[i]; x; ...)
+ ...
+ for (x = a[i]; x; ...)
+ ...
+ foo:
+
+ Loop optimize will change the above code into
+
+ if (x = a[i])
+ for (;;)
+ { ...; if (! (x = ...)) break; }
+ if (x = a[i])
+ for (;;)
+ { ...; if (! (x = ...)) break; }
+ foo:
+
+ In general, if the first test fails, the program can branch
+ directly to `foo' and skip the second try which is doomed to fail.
+ We run this after loop optimization and before flow analysis. */
+
+/* When comparing the insn patterns, we track the fact that different
+ pseudo-register numbers may have been used in each computation.
+ The following array stores an equivalence -- same_regs[I] == J means
+ that pseudo register I was used in the first set of tests in a context
+ where J was used in the second set. We also count the number of such
+ pending equivalences. If nonzero, the expressions really aren't the
+ same. */
+
+static int *same_regs;
+
+static int num_same_regs;
+
+/* Track any registers modified between the target of the first jump and
+ the second jump. They never compare equal. */
+
+static char *modified_regs;
+
+/* Record if memory was modified. */
+
+static int modified_mem;
+
+/* Called via note_stores on each insn between the target of the first
+ branch and the second branch. It marks any changed registers. */
+
+static void
+mark_modified_reg (dest, x)
+ rtx dest;
+ rtx x;
+{
+ int regno, i;
+
+ if (GET_CODE (dest) == SUBREG)
+ dest = SUBREG_REG (dest);
+
+ if (GET_CODE (dest) == MEM)
+ modified_mem = 1;
+
+ if (GET_CODE (dest) != REG)
+ return;
+
+ regno = REGNO (dest);
+ if (regno >= FIRST_PSEUDO_REGISTER)
+ modified_regs[regno] = 1;
+ else
+ for (i = 0; i < HARD_REGNO_NREGS (regno, GET_MODE (dest)); i++)
+ modified_regs[regno + i] = 1;
+}
+
+/* F is the first insn in the chain of insns. */
+
+void
+thread_jumps (f, max_reg, flag_before_loop)
+ rtx f;
+ int max_reg;
+ int flag_before_loop;
+{
+ /* Basic algorithm is to find a conditional branch,
+ the label it may branch to, and the branch after
+ that label. If the two branches test the same condition,
+ walk back from both branch paths until the insn patterns
+ differ, or code labels are hit. If we make it back to
+ the target of the first branch, then we know that the first branch
+ will either always succeed or always fail depending on the relative
+ senses of the two branches. So adjust the first branch accordingly
+ in this case. */
+
+ rtx label, b1, b2, t1, t2;
+ enum rtx_code code1, code2;
+ rtx b1op0, b1op1, b2op0, b2op1;
+ int changed = 1;
+ int i;
+ int *all_reset;
+
+ /* Allocate register tables and quick-reset table. */
+ modified_regs = (char *) alloca (max_reg * sizeof (char));
+ same_regs = (int *) alloca (max_reg * sizeof (int));
+ all_reset = (int *) alloca (max_reg * sizeof (int));
+ for (i = 0; i < max_reg; i++)
+ all_reset[i] = -1;
+
+ while (changed)
+ {
+ changed = 0;
+
+ for (b1 = f; b1; b1 = NEXT_INSN (b1))
+ {
+ /* Get to a candidate branch insn. */
+ if (GET_CODE (b1) != JUMP_INSN
+ || ! condjump_p (b1) || simplejump_p (b1)
+ || JUMP_LABEL (b1) == 0)
+ continue;
+
+ bzero (modified_regs, max_reg * sizeof (char));
+ modified_mem = 0;
+
+ bcopy ((char *) all_reset, (char *) same_regs,
+ max_reg * sizeof (int));
+ num_same_regs = 0;
+
+ label = JUMP_LABEL (b1);
+
+ /* Look for a branch after the target. Record any registers and
+ memory modified between the target and the branch. Stop when we
+ get to a label since we can't know what was changed there. */
+ for (b2 = NEXT_INSN (label); b2; b2 = NEXT_INSN (b2))
+ {
+ if (GET_CODE (b2) == CODE_LABEL)
+ break;
+
+ else if (GET_CODE (b2) == JUMP_INSN)
+ {
+ /* If this is an unconditional jump and is the only use of
+ its target label, we can follow it. */
+ if (simplejump_p (b2)
+ && JUMP_LABEL (b2) != 0
+ && LABEL_NUSES (JUMP_LABEL (b2)) == 1)
+ {
+ b2 = JUMP_LABEL (b2);
+ continue;
+ }
+ else
+ break;
+ }
+
+ if (GET_CODE (b2) != CALL_INSN && GET_CODE (b2) != INSN)
+ continue;
+
+ if (GET_CODE (b2) == CALL_INSN)
+ {
+ modified_mem = 1;
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (call_used_regs[i] && ! fixed_regs[i]
+ && i != STACK_POINTER_REGNUM
+ && i != FRAME_POINTER_REGNUM
+ && i != HARD_FRAME_POINTER_REGNUM
+ && i != ARG_POINTER_REGNUM)
+ modified_regs[i] = 1;
+ }
+
+ note_stores (PATTERN (b2), mark_modified_reg);
+ }
+
+ /* Check the next candidate branch insn from the label
+ of the first. */
+ if (b2 == 0
+ || GET_CODE (b2) != JUMP_INSN
+ || b2 == b1
+ || ! condjump_p (b2)
+ || simplejump_p (b2))
+ continue;
+
+ /* Get the comparison codes and operands, reversing the
+ codes if appropriate. If we don't have comparison codes,
+ we can't do anything. */
+ b1op0 = XEXP (XEXP (SET_SRC (PATTERN (b1)), 0), 0);
+ b1op1 = XEXP (XEXP (SET_SRC (PATTERN (b1)), 0), 1);
+ code1 = GET_CODE (XEXP (SET_SRC (PATTERN (b1)), 0));
+ if (XEXP (SET_SRC (PATTERN (b1)), 1) == pc_rtx)
+ code1 = reverse_condition (code1);
+
+ b2op0 = XEXP (XEXP (SET_SRC (PATTERN (b2)), 0), 0);
+ b2op1 = XEXP (XEXP (SET_SRC (PATTERN (b2)), 0), 1);
+ code2 = GET_CODE (XEXP (SET_SRC (PATTERN (b2)), 0));
+ if (XEXP (SET_SRC (PATTERN (b2)), 1) == pc_rtx)
+ code2 = reverse_condition (code2);
+
+ /* If they test the same things and knowing that B1 branches
+ tells us whether or not B2 branches, check if we
+ can thread the branch. */
+ if (rtx_equal_for_thread_p (b1op0, b2op0, b2)
+ && rtx_equal_for_thread_p (b1op1, b2op1, b2)
+ && (comparison_dominates_p (code1, code2)
+ || comparison_dominates_p (code1, reverse_condition (code2))))
+ {
+ t1 = prev_nonnote_insn (b1);
+ t2 = prev_nonnote_insn (b2);
+
+ while (t1 != 0 && t2 != 0)
+ {
+ if (t2 == label)
+ {
+ /* We have reached the target of the first branch.
+ If there are no pending register equivalents,
+ we know that this branch will either always
+ succeed (if the senses of the two branches are
+ the same) or always fail (if not). */
+ rtx new_label;
+
+ if (num_same_regs != 0)
+ break;
+
+ if (comparison_dominates_p (code1, code2))
+ new_label = JUMP_LABEL (b2);
+ else
+ new_label = get_label_after (b2);
+
+ if (JUMP_LABEL (b1) != new_label)
+ {
+ rtx prev = PREV_INSN (new_label);
+
+ if (flag_before_loop
+ && NOTE_LINE_NUMBER (prev) == NOTE_INSN_LOOP_BEG)
+ {
+ /* Don't thread to the loop label. If a loop
+ label is reused, loop optimization will
+ be disabled for that loop. */
+ new_label = gen_label_rtx ();
+ emit_label_after (new_label, PREV_INSN (prev));
+ }
+ changed |= redirect_jump (b1, new_label);
+ }
+ break;
+ }
+
+ /* If either of these is not a normal insn (it might be
+ a JUMP_INSN, CALL_INSN, or CODE_LABEL) we fail. (NOTEs
+ have already been skipped above.) Similarly, fail
+ if the insns are different. */
+ if (GET_CODE (t1) != INSN || GET_CODE (t2) != INSN
+ || recog_memoized (t1) != recog_memoized (t2)
+ || ! rtx_equal_for_thread_p (PATTERN (t1),
+ PATTERN (t2), t2))
+ break;
+
+ t1 = prev_nonnote_insn (t1);
+ t2 = prev_nonnote_insn (t2);
+ }
+ }
+ }
+ }
+}
+
+/* This is like RTX_EQUAL_P except that it knows about our handling of
+ possibly equivalent registers and knows to consider volatile and
+ modified objects as not equal.
+
+ YINSN is the insn containing Y. */
+
+int
+rtx_equal_for_thread_p (x, y, yinsn)
+ rtx x, y;
+ rtx yinsn;
+{
+ register int i;
+ register int j;
+ register enum rtx_code code;
+ register char *fmt;
+
+ code = GET_CODE (x);
+ /* Rtx's of different codes cannot be equal. */
+ if (code != GET_CODE (y))
+ return 0;
+
+ /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
+ (REG:SI x) and (REG:HI x) are NOT equivalent. */
+
+ if (GET_MODE (x) != GET_MODE (y))
+ return 0;
+
+ /* For commutative operations, the RTX match if the operand match in any
+ order. Also handle the simple binary and unary cases without a loop. */
+ if (code == EQ || code == NE || GET_RTX_CLASS (code) == 'c')
+ return ((rtx_equal_for_thread_p (XEXP (x, 0), XEXP (y, 0), yinsn)
+ && rtx_equal_for_thread_p (XEXP (x, 1), XEXP (y, 1), yinsn))
+ || (rtx_equal_for_thread_p (XEXP (x, 0), XEXP (y, 1), yinsn)
+ && rtx_equal_for_thread_p (XEXP (x, 1), XEXP (y, 0), yinsn)));
+ else if (GET_RTX_CLASS (code) == '<' || GET_RTX_CLASS (code) == '2')
+ return (rtx_equal_for_thread_p (XEXP (x, 0), XEXP (y, 0), yinsn)
+ && rtx_equal_for_thread_p (XEXP (x, 1), XEXP (y, 1), yinsn));
+ else if (GET_RTX_CLASS (code) == '1')
+ return rtx_equal_for_thread_p (XEXP (x, 0), XEXP (y, 0), yinsn);
+
+ /* Handle special-cases first. */
+ switch (code)
+ {
+ case REG:
+ if (REGNO (x) == REGNO (y) && ! modified_regs[REGNO (x)])
+ return 1;
+
+ /* If neither is user variable or hard register, check for possible
+ equivalence. */
+ if (REG_USERVAR_P (x) || REG_USERVAR_P (y)
+ || REGNO (x) < FIRST_PSEUDO_REGISTER
+ || REGNO (y) < FIRST_PSEUDO_REGISTER)
+ return 0;
+
+ if (same_regs[REGNO (x)] == -1)
+ {
+ same_regs[REGNO (x)] = REGNO (y);
+ num_same_regs++;
+
+ /* If this is the first time we are seeing a register on the `Y'
+ side, see if it is the last use. If not, we can't thread the
+ jump, so mark it as not equivalent. */
+ if (regno_last_uid[REGNO (y)] != INSN_UID (yinsn))
+ return 0;
+
+ return 1;
+ }
+ else
+ return (same_regs[REGNO (x)] == REGNO (y));
+
+ break;
+
+ case MEM:
+ /* If memory modified or either volatile, not equivalent.
+ Else, check address. */
+ if (modified_mem || MEM_VOLATILE_P (x) || MEM_VOLATILE_P (y))
+ return 0;
+
+ return rtx_equal_for_thread_p (XEXP (x, 0), XEXP (y, 0), yinsn);
+
+ case ASM_INPUT:
+ if (MEM_VOLATILE_P (x) || MEM_VOLATILE_P (y))
+ return 0;
+
+ break;
+
+ case SET:
+ /* Cancel a pending `same_regs' if setting equivalenced registers.
+ Then process source. */
+ if (GET_CODE (SET_DEST (x)) == REG
+ && GET_CODE (SET_DEST (y)) == REG)
+ {
+ if (same_regs[REGNO (SET_DEST (x))] == REGNO (SET_DEST (y)))
+ {
+ same_regs[REGNO (SET_DEST (x))] = -1;
+ num_same_regs--;
+ }
+ else if (REGNO (SET_DEST (x)) != REGNO (SET_DEST (y)))
+ return 0;
+ }
+ else
+ if (rtx_equal_for_thread_p (SET_DEST (x), SET_DEST (y), yinsn) == 0)
+ return 0;
+
+ return rtx_equal_for_thread_p (SET_SRC (x), SET_SRC (y), yinsn);
+
+ case LABEL_REF:
+ return XEXP (x, 0) == XEXP (y, 0);
+
+ case SYMBOL_REF:
+ return XSTR (x, 0) == XSTR (y, 0);
+ }
+
+ if (x == y)
+ return 1;
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ switch (fmt[i])
+ {
+ case 'w':
+ if (XWINT (x, i) != XWINT (y, i))
+ return 0;
+ break;
+
+ case 'n':
+ case 'i':
+ if (XINT (x, i) != XINT (y, i))
+ return 0;
+ break;
+
+ case 'V':
+ case 'E':
+ /* Two vectors must have the same length. */
+ if (XVECLEN (x, i) != XVECLEN (y, i))
+ return 0;
+
+ /* And the corresponding elements must match. */
+ for (j = 0; j < XVECLEN (x, i); j++)
+ if (rtx_equal_for_thread_p (XVECEXP (x, i, j),
+ XVECEXP (y, i, j), yinsn) == 0)
+ return 0;
+ break;
+
+ case 'e':
+ if (rtx_equal_for_thread_p (XEXP (x, i), XEXP (y, i), yinsn) == 0)
+ return 0;
+ break;
+
+ case 'S':
+ case 's':
+ if (strcmp (XSTR (x, i), XSTR (y, i)))
+ return 0;
+ break;
+
+ case 'u':
+ /* These are just backpointers, so they don't matter. */
+ break;
+
+ case '0':
+ break;
+
+ /* It is believed that rtx's at this level will never
+ contain anything but integers and other rtx's,
+ except for within LABEL_REFs and SYMBOL_REFs. */
+ default:
+ abort ();
+ }
+ }
+ return 1;
+}
diff --git a/gnu/usr.bin/cc/cc_int/local-alloc.c b/gnu/usr.bin/cc/cc_int/local-alloc.c
new file mode 100644
index 0000000..3b2d81e
--- /dev/null
+++ b/gnu/usr.bin/cc/cc_int/local-alloc.c
@@ -0,0 +1,2355 @@
+/* Allocate registers within a basic block, for GNU compiler.
+ Copyright (C) 1987, 1988, 1991, 1993, 1994 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+/* Allocation of hard register numbers to pseudo registers is done in
+ two passes. In this pass we consider only regs that are born and
+ die once within one basic block. We do this one basic block at a
+ time. Then the next pass allocates the registers that remain.
+ Two passes are used because this pass uses methods that work only
+ on linear code, but that do a better job than the general methods
+ used in global_alloc, and more quickly too.
+
+ The assignments made are recorded in the vector reg_renumber
+ whose space is allocated here. The rtl code itself is not altered.
+
+ We assign each instruction in the basic block a number
+ which is its order from the beginning of the block.
+ Then we can represent the lifetime of a pseudo register with
+ a pair of numbers, and check for conflicts easily.
+ We can record the availability of hard registers with a
+ HARD_REG_SET for each instruction. The HARD_REG_SET
+ contains 0 or 1 for each hard reg.
+
+ To avoid register shuffling, we tie registers together when one
+ dies by being copied into another, or dies in an instruction that
+ does arithmetic to produce another. The tied registers are
+ allocated as one. Registers with different reg class preferences
+ can never be tied unless the class preferred by one is a subclass
+ of the one preferred by the other.
+
+ Tying is represented with "quantity numbers".
+ A non-tied register is given a new quantity number.
+ Tied registers have the same quantity number.
+
+ We have provision to exempt registers, even when they are contained
+ within the block, that can be tied to others that are not contained in it.
+ This is so that global_alloc could process them both and tie them then.
+ But this is currently disabled since tying in global_alloc is not
+ yet implemented. */
+
+#include <stdio.h>
+#include "config.h"
+#include "rtl.h"
+#include "flags.h"
+#include "basic-block.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "insn-config.h"
+#include "recog.h"
+#include "output.h"
+
+/* Pseudos allocated here cannot be reallocated by global.c if the hard
+ register is used as a spill register. So we don't allocate such pseudos
+ here if their preferred class is likely to be used by spills.
+
+ On most machines, the appropriate test is if the class has one
+ register, so we default to that. */
+
+#ifndef CLASS_LIKELY_SPILLED_P
+#define CLASS_LIKELY_SPILLED_P(CLASS) (reg_class_size[(int) (CLASS)] == 1)
+#endif
+
+/* Next quantity number available for allocation. */
+
+static int next_qty;
+
+/* In all the following vectors indexed by quantity number. */
+
+/* Element Q is the hard reg number chosen for quantity Q,
+ or -1 if none was found. */
+
+static short *qty_phys_reg;
+
+/* We maintain two hard register sets that indicate suggested hard registers
+ for each quantity. The first, qty_phys_copy_sugg, contains hard registers
+ that are tied to the quantity by a simple copy. The second contains all
+ hard registers that are tied to the quantity via an arithmetic operation.
+
+ The former register set is given priority for allocation. This tends to
+ eliminate copy insns. */
+
+/* Element Q is a set of hard registers that are suggested for quantity Q by
+ copy insns. */
+
+static HARD_REG_SET *qty_phys_copy_sugg;
+
+/* Element Q is a set of hard registers that are suggested for quantity Q by
+ arithmetic insns. */
+
+static HARD_REG_SET *qty_phys_sugg;
+
+/* Element Q is the number of suggested registers in qty_phys_copy_sugg. */
+
+static short *qty_phys_num_copy_sugg;
+
+/* Element Q is the number of suggested registers in qty_phys_sugg. */
+
+static short *qty_phys_num_sugg;
+
+/* Element Q is the number of refs to quantity Q. */
+
+static int *qty_n_refs;
+
+/* Element Q is a reg class contained in (smaller than) the
+ preferred classes of all the pseudo regs that are tied in quantity Q.
+ This is the preferred class for allocating that quantity. */
+
+static enum reg_class *qty_min_class;
+
+/* Insn number (counting from head of basic block)
+ where quantity Q was born. -1 if birth has not been recorded. */
+
+static int *qty_birth;
+
+/* Insn number (counting from head of basic block)
+ where quantity Q died. Due to the way tying is done,
+ and the fact that we consider in this pass only regs that die but once,
+ a quantity can die only once. Each quantity's life span
+ is a set of consecutive insns. -1 if death has not been recorded. */
+
+static int *qty_death;
+
+/* Number of words needed to hold the data in quantity Q.
+ This depends on its machine mode. It is used for these purposes:
+ 1. It is used in computing the relative importances of qtys,
+ which determines the order in which we look for regs for them.
+ 2. It is used in rules that prevent tying several registers of
+ different sizes in a way that is geometrically impossible
+ (see combine_regs). */
+
+static int *qty_size;
+
+/* This holds the mode of the registers that are tied to qty Q,
+ or VOIDmode if registers with differing modes are tied together. */
+
+static enum machine_mode *qty_mode;
+
+/* Number of times a reg tied to qty Q lives across a CALL_INSN. */
+
+static int *qty_n_calls_crossed;
+
+/* Register class within which we allocate qty Q if we can't get
+ its preferred class. */
+
+static enum reg_class *qty_alternate_class;
+
+/* Element Q is the SCRATCH expression for which this quantity is being
+ allocated or 0 if this quantity is allocating registers. */
+
+static rtx *qty_scratch_rtx;
+
+/* Element Q is the register number of one pseudo register whose
+ reg_qty value is Q, or -1 is this quantity is for a SCRATCH. This
+ register should be the head of the chain maintained in reg_next_in_qty. */
+
+static int *qty_first_reg;
+
+/* If (REG N) has been assigned a quantity number, is a register number
+ of another register assigned the same quantity number, or -1 for the
+ end of the chain. qty_first_reg point to the head of this chain. */
+
+static int *reg_next_in_qty;
+
+/* reg_qty[N] (where N is a pseudo reg number) is the qty number of that reg
+ if it is >= 0,
+ of -1 if this register cannot be allocated by local-alloc,
+ or -2 if not known yet.
+
+ Note that if we see a use or death of pseudo register N with
+ reg_qty[N] == -2, register N must be local to the current block. If
+ it were used in more than one block, we would have reg_qty[N] == -1.
+ This relies on the fact that if reg_basic_block[N] is >= 0, register N
+ will not appear in any other block. We save a considerable number of
+ tests by exploiting this.
+
+ If N is < FIRST_PSEUDO_REGISTER, reg_qty[N] is undefined and should not
+ be referenced. */
+
+static int *reg_qty;
+
+/* The offset (in words) of register N within its quantity.
+ This can be nonzero if register N is SImode, and has been tied
+ to a subreg of a DImode register. */
+
+static char *reg_offset;
+
+/* Vector of substitutions of register numbers,
+ used to map pseudo regs into hardware regs.
+ This is set up as a result of register allocation.
+ Element N is the hard reg assigned to pseudo reg N,
+ or is -1 if no hard reg was assigned.
+ If N is a hard reg number, element N is N. */
+
+short *reg_renumber;
+
+/* Set of hard registers live at the current point in the scan
+ of the instructions in a basic block. */
+
+static HARD_REG_SET regs_live;
+
+/* Each set of hard registers indicates registers live at a particular
+ point in the basic block. For N even, regs_live_at[N] says which
+ hard registers are needed *after* insn N/2 (i.e., they may not
+ conflict with the outputs of insn N/2 or the inputs of insn N/2 + 1.
+
+ If an object is to conflict with the inputs of insn J but not the
+ outputs of insn J + 1, we say it is born at index J*2 - 1. Similarly,
+ if it is to conflict with the outputs of insn J but not the inputs of
+ insn J + 1, it is said to die at index J*2 + 1. */
+
+static HARD_REG_SET *regs_live_at;
+
+int *scratch_block;
+rtx *scratch_list;
+int scratch_list_length;
+static int scratch_index;
+
+/* Communicate local vars `insn_number' and `insn'
+ from `block_alloc' to `reg_is_set', `wipe_dead_reg', and `alloc_qty'. */
+static int this_insn_number;
+static rtx this_insn;
+
+static void alloc_qty PROTO((int, enum machine_mode, int, int));
+static void alloc_qty_for_scratch PROTO((rtx, int, rtx, int, int));
+static void validate_equiv_mem_from_store PROTO((rtx, rtx));
+static int validate_equiv_mem PROTO((rtx, rtx, rtx));
+static int memref_referenced_p PROTO((rtx, rtx));
+static int memref_used_between_p PROTO((rtx, rtx, rtx));
+static void optimize_reg_copy_1 PROTO((rtx, rtx, rtx));
+static void optimize_reg_copy_2 PROTO((rtx, rtx, rtx));
+static void update_equiv_regs PROTO((void));
+static void block_alloc PROTO((int));
+static int qty_sugg_compare PROTO((int, int));
+static int qty_sugg_compare_1 PROTO((int *, int *));
+static int qty_compare PROTO((int, int));
+static int qty_compare_1 PROTO((int *, int *));
+static int combine_regs PROTO((rtx, rtx, int, int, rtx, int));
+static int reg_meets_class_p PROTO((int, enum reg_class));
+static int reg_classes_overlap_p PROTO((enum reg_class, enum reg_class,
+ int));
+static void update_qty_class PROTO((int, int));
+static void reg_is_set PROTO((rtx, rtx));
+static void reg_is_born PROTO((rtx, int));
+static void wipe_dead_reg PROTO((rtx, int));
+static int find_free_reg PROTO((enum reg_class, enum machine_mode,
+ int, int, int, int, int));
+static void mark_life PROTO((int, enum machine_mode, int));
+static void post_mark_life PROTO((int, enum machine_mode, int, int, int));
+static int no_conflict_p PROTO((rtx, rtx, rtx));
+static int requires_inout PROTO((char *));
+
+/* Allocate a new quantity (new within current basic block)
+ for register number REGNO which is born at index BIRTH
+ within the block. MODE and SIZE are info on reg REGNO. */
+
+static void
+alloc_qty (regno, mode, size, birth)
+ int regno;
+ enum machine_mode mode;
+ int size, birth;
+{
+ register int qty = next_qty++;
+
+ reg_qty[regno] = qty;
+ reg_offset[regno] = 0;
+ reg_next_in_qty[regno] = -1;
+
+ qty_first_reg[qty] = regno;
+ qty_size[qty] = size;
+ qty_mode[qty] = mode;
+ qty_birth[qty] = birth;
+ qty_n_calls_crossed[qty] = reg_n_calls_crossed[regno];
+ qty_min_class[qty] = reg_preferred_class (regno);
+ qty_alternate_class[qty] = reg_alternate_class (regno);
+ qty_n_refs[qty] = reg_n_refs[regno];
+}
+
+/* Similar to `alloc_qty', but allocates a quantity for a SCRATCH rtx
+ used as operand N in INSN. We assume here that the SCRATCH is used in
+ a CLOBBER. */
+
+static void
+alloc_qty_for_scratch (scratch, n, insn, insn_code_num, insn_number)
+ rtx scratch;
+ int n;
+ rtx insn;
+ int insn_code_num, insn_number;
+{
+ register int qty;
+ enum reg_class class;
+ char *p, c;
+ int i;
+
+#ifdef REGISTER_CONSTRAINTS
+ /* If we haven't yet computed which alternative will be used, do so now.
+ Then set P to the constraints for that alternative. */
+ if (which_alternative == -1)
+ if (! constrain_operands (insn_code_num, 0))
+ return;
+
+ for (p = insn_operand_constraint[insn_code_num][n], i = 0;
+ *p && i < which_alternative; p++)
+ if (*p == ',')
+ i++;
+
+ /* Compute the class required for this SCRATCH. If we don't need a
+ register, the class will remain NO_REGS. If we guessed the alternative
+ number incorrectly, reload will fix things up for us. */
+
+ class = NO_REGS;
+ while ((c = *p++) != '\0' && c != ',')
+ switch (c)
+ {
+ case '=': case '+': case '?':
+ case '#': case '&': case '!':
+ case '*': case '%':
+ case '0': case '1': case '2': case '3': case '4':
+ case 'm': case '<': case '>': case 'V': case 'o':
+ case 'E': case 'F': case 'G': case 'H':
+ case 's': case 'i': case 'n':
+ case 'I': case 'J': case 'K': case 'L':
+ case 'M': case 'N': case 'O': case 'P':
+#ifdef EXTRA_CONSTRAINT
+ case 'Q': case 'R': case 'S': case 'T': case 'U':
+#endif
+ case 'p':
+ /* These don't say anything we care about. */
+ break;
+
+ case 'X':
+ /* We don't need to allocate this SCRATCH. */
+ return;
+
+ case 'g': case 'r':
+ class = reg_class_subunion[(int) class][(int) GENERAL_REGS];
+ break;
+
+ default:
+ class
+ = reg_class_subunion[(int) class][(int) REG_CLASS_FROM_LETTER (c)];
+ break;
+ }
+
+ if (class == NO_REGS)
+ return;
+
+#else /* REGISTER_CONSTRAINTS */
+
+ class = GENERAL_REGS;
+#endif
+
+
+ qty = next_qty++;
+
+ qty_first_reg[qty] = -1;
+ qty_scratch_rtx[qty] = scratch;
+ qty_size[qty] = GET_MODE_SIZE (GET_MODE (scratch));
+ qty_mode[qty] = GET_MODE (scratch);
+ qty_birth[qty] = 2 * insn_number - 1;
+ qty_death[qty] = 2 * insn_number + 1;
+ qty_n_calls_crossed[qty] = 0;
+ qty_min_class[qty] = class;
+ qty_alternate_class[qty] = NO_REGS;
+ qty_n_refs[qty] = 1;
+}
+
+/* Main entry point of this file. */
+
+void
+local_alloc ()
+{
+ register int b, i;
+ int max_qty;
+
+ /* Leaf functions and non-leaf functions have different needs.
+ If defined, let the machine say what kind of ordering we
+ should use. */
+#ifdef ORDER_REGS_FOR_LOCAL_ALLOC
+ ORDER_REGS_FOR_LOCAL_ALLOC;
+#endif
+
+ /* Promote REG_EQUAL notes to REG_EQUIV notes and adjust status of affected
+ registers. */
+ update_equiv_regs ();
+
+ /* This sets the maximum number of quantities we can have. Quantity
+ numbers start at zero and we can have one for each pseudo plus the
+ number of SCRATCHes in the largest block, in the worst case. */
+ max_qty = (max_regno - FIRST_PSEUDO_REGISTER) + max_scratch;
+
+ /* Allocate vectors of temporary data.
+ See the declarations of these variables, above,
+ for what they mean. */
+
+ /* There can be up to MAX_SCRATCH * N_BASIC_BLOCKS SCRATCHes to allocate.
+ Instead of allocating this much memory from now until the end of
+ reload, only allocate space for MAX_QTY SCRATCHes. If there are more
+ reload will allocate them. */
+
+ scratch_list_length = max_qty;
+ scratch_list = (rtx *) xmalloc (scratch_list_length * sizeof (rtx));
+ bzero ((char *) scratch_list, scratch_list_length * sizeof (rtx));
+ scratch_block = (int *) xmalloc (scratch_list_length * sizeof (int));
+ bzero ((char *) scratch_block, scratch_list_length * sizeof (int));
+ scratch_index = 0;
+
+ qty_phys_reg = (short *) alloca (max_qty * sizeof (short));
+ qty_phys_copy_sugg
+ = (HARD_REG_SET *) alloca (max_qty * sizeof (HARD_REG_SET));
+ qty_phys_num_copy_sugg = (short *) alloca (max_qty * sizeof (short));
+ qty_phys_sugg = (HARD_REG_SET *) alloca (max_qty * sizeof (HARD_REG_SET));
+ qty_phys_num_sugg = (short *) alloca (max_qty * sizeof (short));
+ qty_birth = (int *) alloca (max_qty * sizeof (int));
+ qty_death = (int *) alloca (max_qty * sizeof (int));
+ qty_scratch_rtx = (rtx *) alloca (max_qty * sizeof (rtx));
+ qty_first_reg = (int *) alloca (max_qty * sizeof (int));
+ qty_size = (int *) alloca (max_qty * sizeof (int));
+ qty_mode
+ = (enum machine_mode *) alloca (max_qty * sizeof (enum machine_mode));
+ qty_n_calls_crossed = (int *) alloca (max_qty * sizeof (int));
+ qty_min_class
+ = (enum reg_class *) alloca (max_qty * sizeof (enum reg_class));
+ qty_alternate_class
+ = (enum reg_class *) alloca (max_qty * sizeof (enum reg_class));
+ qty_n_refs = (int *) alloca (max_qty * sizeof (int));
+
+ reg_qty = (int *) alloca (max_regno * sizeof (int));
+ reg_offset = (char *) alloca (max_regno * sizeof (char));
+ reg_next_in_qty = (int *) alloca (max_regno * sizeof (int));
+
+ reg_renumber = (short *) oballoc (max_regno * sizeof (short));
+ for (i = 0; i < max_regno; i++)
+ reg_renumber[i] = -1;
+
+ /* Determine which pseudo-registers can be allocated by local-alloc.
+ In general, these are the registers used only in a single block and
+ which only die once. However, if a register's preferred class has only
+ a few entries, don't allocate this register here unless it is preferred
+ or nothing since retry_global_alloc won't be able to move it to
+ GENERAL_REGS if a reload register of this class is needed.
+
+ We need not be concerned with which block actually uses the register
+ since we will never see it outside that block. */
+
+ for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++)
+ {
+ if (reg_basic_block[i] >= 0 && reg_n_deaths[i] == 1
+ && (reg_alternate_class (i) == NO_REGS
+ || ! CLASS_LIKELY_SPILLED_P (reg_preferred_class (i))))
+ reg_qty[i] = -2;
+ else
+ reg_qty[i] = -1;
+ }
+
+ /* Force loop below to initialize entire quantity array. */
+ next_qty = max_qty;
+
+ /* Allocate each block's local registers, block by block. */
+
+ for (b = 0; b < n_basic_blocks; b++)
+ {
+ /* NEXT_QTY indicates which elements of the `qty_...'
+ vectors might need to be initialized because they were used
+ for the previous block; it is set to the entire array before
+ block 0. Initialize those, with explicit loop if there are few,
+ else with bzero and bcopy. Do not initialize vectors that are
+ explicit set by `alloc_qty'. */
+
+ if (next_qty < 6)
+ {
+ for (i = 0; i < next_qty; i++)
+ {
+ qty_scratch_rtx[i] = 0;
+ CLEAR_HARD_REG_SET (qty_phys_copy_sugg[i]);
+ qty_phys_num_copy_sugg[i] = 0;
+ CLEAR_HARD_REG_SET (qty_phys_sugg[i]);
+ qty_phys_num_sugg[i] = 0;
+ }
+ }
+ else
+ {
+#define CLEAR(vector) \
+ bzero ((char *) (vector), (sizeof (*(vector))) * next_qty);
+
+ CLEAR (qty_scratch_rtx);
+ CLEAR (qty_phys_copy_sugg);
+ CLEAR (qty_phys_num_copy_sugg);
+ CLEAR (qty_phys_sugg);
+ CLEAR (qty_phys_num_sugg);
+ }
+
+ next_qty = 0;
+
+ block_alloc (b);
+#ifdef USE_C_ALLOCA
+ alloca (0);
+#endif
+ }
+}
+
+/* Depth of loops we are in while in update_equiv_regs. */
+static int loop_depth;
+
+/* Used for communication between the following two functions: contains
+ a MEM that we wish to ensure remains unchanged. */
+static rtx equiv_mem;
+
+/* Set nonzero if EQUIV_MEM is modified. */
+static int equiv_mem_modified;
+
+/* If EQUIV_MEM is modified by modifying DEST, indicate that it is modified.
+ Called via note_stores. */
+
+static void
+validate_equiv_mem_from_store (dest, set)
+ rtx dest;
+ rtx set;
+{
+ if ((GET_CODE (dest) == REG
+ && reg_overlap_mentioned_p (dest, equiv_mem))
+ || (GET_CODE (dest) == MEM
+ && true_dependence (dest, equiv_mem)))
+ equiv_mem_modified = 1;
+}
+
+/* Verify that no store between START and the death of REG invalidates
+ MEMREF. MEMREF is invalidated by modifying a register used in MEMREF,
+ by storing into an overlapping memory location, or with a non-const
+ CALL_INSN.
+
+ Return 1 if MEMREF remains valid. */
+
+static int
+validate_equiv_mem (start, reg, memref)
+ rtx start;
+ rtx reg;
+ rtx memref;
+{
+ rtx insn;
+ rtx note;
+
+ equiv_mem = memref;
+ equiv_mem_modified = 0;
+
+ /* If the memory reference has side effects or is volatile, it isn't a
+ valid equivalence. */
+ if (side_effects_p (memref))
+ return 0;
+
+ for (insn = start; insn && ! equiv_mem_modified; insn = NEXT_INSN (insn))
+ {
+ if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
+ continue;
+
+ if (find_reg_note (insn, REG_DEAD, reg))
+ return 1;
+
+ if (GET_CODE (insn) == CALL_INSN && ! RTX_UNCHANGING_P (memref)
+ && ! CONST_CALL_P (insn))
+ return 0;
+
+ note_stores (PATTERN (insn), validate_equiv_mem_from_store);
+
+ /* If a register mentioned in MEMREF is modified via an
+ auto-increment, we lose the equivalence. Do the same if one
+ dies; although we could extend the life, it doesn't seem worth
+ the trouble. */
+
+ for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
+ if ((REG_NOTE_KIND (note) == REG_INC
+ || REG_NOTE_KIND (note) == REG_DEAD)
+ && GET_CODE (XEXP (note, 0)) == REG
+ && reg_overlap_mentioned_p (XEXP (note, 0), memref))
+ return 0;
+ }
+
+ return 0;
+}
+
+/* TRUE if X references a memory location that would be affected by a store
+ to MEMREF. */
+
+static int
+memref_referenced_p (memref, x)
+ rtx x;
+ rtx memref;
+{
+ int i, j;
+ char *fmt;
+ enum rtx_code code = GET_CODE (x);
+
+ switch (code)
+ {
+ case REG:
+ case CONST_INT:
+ case CONST:
+ case LABEL_REF:
+ case SYMBOL_REF:
+ case CONST_DOUBLE:
+ case PC:
+ case CC0:
+ case HIGH:
+ case LO_SUM:
+ return 0;
+
+ case MEM:
+ if (true_dependence (memref, x))
+ return 1;
+ break;
+
+ case SET:
+ /* If we are setting a MEM, it doesn't count (its address does), but any
+ other SET_DEST that has a MEM in it is referencing the MEM. */
+ if (GET_CODE (SET_DEST (x)) == MEM)
+ {
+ if (memref_referenced_p (memref, XEXP (SET_DEST (x), 0)))
+ return 1;
+ }
+ else if (memref_referenced_p (memref, SET_DEST (x)))
+ return 1;
+
+ return memref_referenced_p (memref, SET_SRC (x));
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ switch (fmt[i])
+ {
+ case 'e':
+ if (memref_referenced_p (memref, XEXP (x, i)))
+ return 1;
+ break;
+ case 'E':
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ if (memref_referenced_p (memref, XVECEXP (x, i, j)))
+ return 1;
+ break;
+ }
+
+ return 0;
+}
+
+/* TRUE if some insn in the range (START, END] references a memory location
+ that would be affected by a store to MEMREF. */
+
+static int
+memref_used_between_p (memref, start, end)
+ rtx memref;
+ rtx start;
+ rtx end;
+{
+ rtx insn;
+
+ for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
+ insn = NEXT_INSN (insn))
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
+ && memref_referenced_p (memref, PATTERN (insn)))
+ return 1;
+
+ return 0;
+}
+
+/* INSN is a copy from SRC to DEST, both registers, and SRC does not die
+ in INSN.
+
+ Search forward to see if SRC dies before either it or DEST is modified,
+ but don't scan past the end of a basic block. If so, we can replace SRC
+ with DEST and let SRC die in INSN.
+
+ This will reduce the number of registers live in that range and may enable
+ DEST to be tied to SRC, thus often saving one register in addition to a
+ register-register copy. */
+
+static void
+optimize_reg_copy_1 (insn, dest, src)
+ rtx insn;
+ rtx dest;
+ rtx src;
+{
+ rtx p, q;
+ rtx note;
+ rtx dest_death = 0;
+ int sregno = REGNO (src);
+ int dregno = REGNO (dest);
+
+ if (sregno == dregno
+#ifdef SMALL_REGISTER_CLASSES
+ /* We don't want to mess with hard regs if register classes are small. */
+ || sregno < FIRST_PSEUDO_REGISTER || dregno < FIRST_PSEUDO_REGISTER
+#endif
+ /* We don't see all updates to SP if they are in an auto-inc memory
+ reference, so we must disallow this optimization on them. */
+ || sregno == STACK_POINTER_REGNUM || dregno == STACK_POINTER_REGNUM)
+ return;
+
+ for (p = NEXT_INSN (insn); p; p = NEXT_INSN (p))
+ {
+ if (GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN
+ || (GET_CODE (p) == NOTE
+ && (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG
+ || NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)))
+ break;
+
+ if (GET_RTX_CLASS (GET_CODE (p)) != 'i')
+ continue;
+
+ if (reg_set_p (src, p) || reg_set_p (dest, p)
+ /* Don't change a USE of a register. */
+ || (GET_CODE (PATTERN (p)) == USE
+ && reg_overlap_mentioned_p (src, XEXP (PATTERN (p), 0))))
+ break;
+
+ /* See if all of SRC dies in P. This test is slightly more
+ conservative than it needs to be. */
+ if ((note = find_regno_note (p, REG_DEAD, sregno)) != 0
+ && GET_MODE (XEXP (note, 0)) == GET_MODE (src))
+ {
+ int failed = 0;
+ int length = 0;
+ int d_length = 0;
+ int n_calls = 0;
+ int d_n_calls = 0;
+
+ /* We can do the optimization. Scan forward from INSN again,
+ replacing regs as we go. Set FAILED if a replacement can't
+ be done. In that case, we can't move the death note for SRC.
+ This should be rare. */
+
+ /* Set to stop at next insn. */
+ for (q = next_real_insn (insn);
+ q != next_real_insn (p);
+ q = next_real_insn (q))
+ {
+ if (reg_overlap_mentioned_p (src, PATTERN (q)))
+ {
+ /* If SRC is a hard register, we might miss some
+ overlapping registers with validate_replace_rtx,
+ so we would have to undo it. We can't if DEST is
+ present in the insn, so fail in that combination
+ of cases. */
+ if (sregno < FIRST_PSEUDO_REGISTER
+ && reg_mentioned_p (dest, PATTERN (q)))
+ failed = 1;
+
+ /* Replace all uses and make sure that the register
+ isn't still present. */
+ else if (validate_replace_rtx (src, dest, q)
+ && (sregno >= FIRST_PSEUDO_REGISTER
+ || ! reg_overlap_mentioned_p (src,
+ PATTERN (q))))
+ {
+ /* We assume that a register is used exactly once per
+ insn in the updates below. If this is not correct,
+ no great harm is done. */
+ if (sregno >= FIRST_PSEUDO_REGISTER)
+ reg_n_refs[sregno] -= loop_depth;
+ if (dregno >= FIRST_PSEUDO_REGISTER)
+ reg_n_refs[dregno] += loop_depth;
+ }
+ else
+ {
+ validate_replace_rtx (dest, src, q);
+ failed = 1;
+ }
+ }
+
+ /* Count the insns and CALL_INSNs passed. If we passed the
+ death note of DEST, show increased live length. */
+ length++;
+ if (dest_death)
+ d_length++;
+
+ /* If the insn in which SRC dies is a CALL_INSN, don't count it
+ as a call that has been crossed. Otherwise, count it. */
+ if (q != p && GET_CODE (q) == CALL_INSN)
+ {
+ n_calls++;
+ if (dest_death)
+ d_n_calls++;
+ }
+
+ /* If DEST dies here, remove the death note and save it for
+ later. Make sure ALL of DEST dies here; again, this is
+ overly conservative. */
+ if (dest_death == 0
+ && (dest_death = find_regno_note (q, REG_DEAD, dregno)) != 0
+ && GET_MODE (XEXP (dest_death, 0)) == GET_MODE (dest))
+ remove_note (q, dest_death);
+ }
+
+ if (! failed)
+ {
+ if (sregno >= FIRST_PSEUDO_REGISTER)
+ {
+ reg_live_length[sregno] -= length;
+ /* reg_live_length is only an approximation after combine
+ if sched is not run, so make sure that we still have
+ a reasonable value. */
+ if (reg_live_length[sregno] < 2)
+ reg_live_length[sregno] = 2;
+ reg_n_calls_crossed[sregno] -= n_calls;
+ }
+
+ if (dregno >= FIRST_PSEUDO_REGISTER)
+ {
+ reg_live_length[dregno] += d_length;
+ reg_n_calls_crossed[dregno] += d_n_calls;
+ }
+
+ /* Move death note of SRC from P to INSN. */
+ remove_note (p, note);
+ XEXP (note, 1) = REG_NOTES (insn);
+ REG_NOTES (insn) = note;
+ }
+
+ /* Put death note of DEST on P if we saw it die. */
+ if (dest_death)
+ {
+ XEXP (dest_death, 1) = REG_NOTES (p);
+ REG_NOTES (p) = dest_death;
+ }
+
+ return;
+ }
+
+ /* If SRC is a hard register which is set or killed in some other
+ way, we can't do this optimization. */
+ else if (sregno < FIRST_PSEUDO_REGISTER
+ && dead_or_set_p (p, src))
+ break;
+ }
+}
+
+/* INSN is a copy of SRC to DEST, in which SRC dies. See if we now have
+ a sequence of insns that modify DEST followed by an insn that sets
+ SRC to DEST in which DEST dies, with no prior modification of DEST.
+ (There is no need to check if the insns in between actually modify
+ DEST. We should not have cases where DEST is not modified, but
+ the optimization is safe if no such modification is detected.)
+ In that case, we can replace all uses of DEST, starting with INSN and
+ ending with the set of SRC to DEST, with SRC. We do not do this
+ optimization if a CALL_INSN is crossed unless SRC already crosses a
+ call.
+
+ It is assumed that DEST and SRC are pseudos; it is too complicated to do
+ this for hard registers since the substitutions we may make might fail. */
+
+static void
+optimize_reg_copy_2 (insn, dest, src)
+ rtx insn;
+ rtx dest;
+ rtx src;
+{
+ rtx p, q;
+ rtx set;
+ int sregno = REGNO (src);
+ int dregno = REGNO (dest);
+
+ for (p = NEXT_INSN (insn); p; p = NEXT_INSN (p))
+ {
+ if (GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN
+ || (GET_CODE (p) == NOTE
+ && (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG
+ || NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)))
+ break;
+
+ if (GET_RTX_CLASS (GET_CODE (p)) != 'i')
+ continue;
+
+ set = single_set (p);
+ if (set && SET_SRC (set) == dest && SET_DEST (set) == src
+ && find_reg_note (p, REG_DEAD, dest))
+ {
+ /* We can do the optimization. Scan forward from INSN again,
+ replacing regs as we go. */
+
+ /* Set to stop at next insn. */
+ for (q = insn; q != NEXT_INSN (p); q = NEXT_INSN (q))
+ if (GET_RTX_CLASS (GET_CODE (q)) == 'i')
+ {
+ if (reg_mentioned_p (dest, PATTERN (q)))
+ {
+ PATTERN (q) = replace_rtx (PATTERN (q), dest, src);
+
+ /* We assume that a register is used exactly once per
+ insn in the updates below. If this is not correct,
+ no great harm is done. */
+ reg_n_refs[dregno] -= loop_depth;
+ reg_n_refs[sregno] += loop_depth;
+ }
+
+
+ if (GET_CODE (q) == CALL_INSN)
+ {
+ reg_n_calls_crossed[dregno]--;
+ reg_n_calls_crossed[sregno]++;
+ }
+ }
+
+ remove_note (p, find_reg_note (p, REG_DEAD, dest));
+ reg_n_deaths[dregno]--;
+ remove_note (insn, find_reg_note (insn, REG_DEAD, src));
+ reg_n_deaths[sregno]--;
+ return;
+ }
+
+ if (reg_set_p (src, p)
+ || (GET_CODE (p) == CALL_INSN && reg_n_calls_crossed[sregno] == 0))
+ break;
+ }
+}
+
+/* Find registers that are equivalent to a single value throughout the
+ compilation (either because they can be referenced in memory or are set once
+ from a single constant). Lower their priority for a register.
+
+ If such a register is only referenced once, try substituting its value
+ into the using insn. If it succeeds, we can eliminate the register
+ completely. */
+
+static void
+update_equiv_regs ()
+{
+ rtx *reg_equiv_init_insn = (rtx *) alloca (max_regno * sizeof (rtx *));
+ rtx *reg_equiv_replacement = (rtx *) alloca (max_regno * sizeof (rtx *));
+ rtx insn;
+
+ bzero ((char *) reg_equiv_init_insn, max_regno * sizeof (rtx *));
+ bzero ((char *) reg_equiv_replacement, max_regno * sizeof (rtx *));
+
+ init_alias_analysis ();
+
+ loop_depth = 1;
+
+ /* Scan the insns and find which registers have equivalences. Do this
+ in a separate scan of the insns because (due to -fcse-follow-jumps)
+ a register can be set below its use. */
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ {
+ rtx note;
+ rtx set = single_set (insn);
+ rtx dest;
+ int regno;
+
+ if (GET_CODE (insn) == NOTE)
+ {
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
+ loop_depth++;
+ else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
+ loop_depth--;
+ }
+
+ /* If this insn contains more (or less) than a single SET, ignore it. */
+ if (set == 0)
+ continue;
+
+ dest = SET_DEST (set);
+
+ /* If this sets a MEM to the contents of a REG that is only used
+ in a single basic block, see if the register is always equivalent
+ to that memory location and if moving the store from INSN to the
+ insn that set REG is safe. If so, put a REG_EQUIV note on the
+ initializing insn. */
+
+ if (GET_CODE (dest) == MEM && GET_CODE (SET_SRC (set)) == REG
+ && (regno = REGNO (SET_SRC (set))) >= FIRST_PSEUDO_REGISTER
+ && reg_basic_block[regno] >= 0
+ && reg_equiv_init_insn[regno] != 0
+ && validate_equiv_mem (reg_equiv_init_insn[regno], SET_SRC (set),
+ dest)
+ && ! memref_used_between_p (SET_DEST (set),
+ reg_equiv_init_insn[regno], insn))
+ REG_NOTES (reg_equiv_init_insn[regno])
+ = gen_rtx (EXPR_LIST, REG_EQUIV, dest,
+ REG_NOTES (reg_equiv_init_insn[regno]));
+
+ /* If this is a register-register copy where SRC is not dead, see if we
+ can optimize it. */
+ if (flag_expensive_optimizations && GET_CODE (dest) == REG
+ && GET_CODE (SET_SRC (set)) == REG
+ && ! find_reg_note (insn, REG_DEAD, SET_SRC (set)))
+ optimize_reg_copy_1 (insn, dest, SET_SRC (set));
+
+ /* Similarly for a pseudo-pseudo copy when SRC is dead. */
+ else if (flag_expensive_optimizations && GET_CODE (dest) == REG
+ && REGNO (dest) >= FIRST_PSEUDO_REGISTER
+ && GET_CODE (SET_SRC (set)) == REG
+ && REGNO (SET_SRC (set)) >= FIRST_PSEUDO_REGISTER
+ && find_reg_note (insn, REG_DEAD, SET_SRC (set)))
+ optimize_reg_copy_2 (insn, dest, SET_SRC (set));
+
+ /* Otherwise, we only handle the case of a pseudo register being set
+ once. */
+ if (GET_CODE (dest) != REG
+ || (regno = REGNO (dest)) < FIRST_PSEUDO_REGISTER
+ || reg_n_sets[regno] != 1)
+ continue;
+
+ note = find_reg_note (insn, REG_EQUAL, NULL_RTX);
+
+ /* Record this insn as initializing this register. */
+ reg_equiv_init_insn[regno] = insn;
+
+ /* If this register is known to be equal to a constant, record that
+ it is always equivalent to the constant. */
+ if (note && CONSTANT_P (XEXP (note, 0)))
+ PUT_MODE (note, (enum machine_mode) REG_EQUIV);
+
+ /* If this insn introduces a "constant" register, decrease the priority
+ of that register. Record this insn if the register is only used once
+ more and the equivalence value is the same as our source.
+
+ The latter condition is checked for two reasons: First, it is an
+ indication that it may be more efficient to actually emit the insn
+ as written (if no registers are available, reload will substitute
+ the equivalence). Secondly, it avoids problems with any registers
+ dying in this insn whose death notes would be missed.
+
+ If we don't have a REG_EQUIV note, see if this insn is loading
+ a register used only in one basic block from a MEM. If so, and the
+ MEM remains unchanged for the life of the register, add a REG_EQUIV
+ note. */
+
+ note = find_reg_note (insn, REG_EQUIV, NULL_RTX);
+
+ if (note == 0 && reg_basic_block[regno] >= 0
+ && GET_CODE (SET_SRC (set)) == MEM
+ && validate_equiv_mem (insn, dest, SET_SRC (set)))
+ REG_NOTES (insn) = note = gen_rtx (EXPR_LIST, REG_EQUIV, SET_SRC (set),
+ REG_NOTES (insn));
+
+ /* Don't mess with things live during setjmp. */
+ if (note && reg_live_length[regno] >= 0)
+ {
+ int regno = REGNO (dest);
+
+ /* Note that the statement below does not affect the priority
+ in local-alloc! */
+ reg_live_length[regno] *= 2;
+
+ /* If the register is referenced exactly twice, meaning it is set
+ once and used once, indicate that the reference may be replaced
+ by the equivalence we computed above. If the register is only
+ used in one basic block, this can't succeed or combine would
+ have done it.
+
+ It would be nice to use "loop_depth * 2" in the compare
+ below. Unfortunately, LOOP_DEPTH need not be constant within
+ a basic block so this would be too complicated.
+
+ This case normally occurs when a parameter is read from memory
+ and then used exactly once, not in a loop. */
+
+ if (reg_n_refs[regno] == 2
+ && reg_basic_block[regno] < 0
+ && rtx_equal_p (XEXP (note, 0), SET_SRC (set)))
+ reg_equiv_replacement[regno] = SET_SRC (set);
+ }
+ }
+
+ /* Now scan all regs killed in an insn to see if any of them are registers
+ only used that once. If so, see if we can replace the reference with
+ the equivalent from. If we can, delete the initializing reference
+ and this register will go away. */
+ for (insn = next_active_insn (get_insns ());
+ insn;
+ insn = next_active_insn (insn))
+ {
+ rtx link;
+
+ for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
+ if (REG_NOTE_KIND (link) == REG_DEAD
+ /* Make sure this insn still refers to the register. */
+ && reg_mentioned_p (XEXP (link, 0), PATTERN (insn)))
+ {
+ int regno = REGNO (XEXP (link, 0));
+
+ if (reg_equiv_replacement[regno]
+ && validate_replace_rtx (regno_reg_rtx[regno],
+ reg_equiv_replacement[regno], insn))
+ {
+ rtx equiv_insn = reg_equiv_init_insn[regno];
+
+ remove_death (regno, insn);
+ reg_n_refs[regno] = 0;
+ PUT_CODE (equiv_insn, NOTE);
+ NOTE_LINE_NUMBER (equiv_insn) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (equiv_insn) = 0;
+ }
+ }
+ }
+}
+
+/* Allocate hard regs to the pseudo regs used only within block number B.
+ Only the pseudos that die but once can be handled. */
+
+static void
+block_alloc (b)
+ int b;
+{
+ register int i, q;
+ register rtx insn;
+ rtx note;
+ int insn_number = 0;
+ int insn_count = 0;
+ int max_uid = get_max_uid ();
+ int *qty_order;
+ int no_conflict_combined_regno = -1;
+ /* Counter to prevent allocating more SCRATCHes than can be stored
+ in SCRATCH_LIST. */
+ int scratches_allocated = scratch_index;
+
+ /* Count the instructions in the basic block. */
+
+ insn = basic_block_end[b];
+ while (1)
+ {
+ if (GET_CODE (insn) != NOTE)
+ if (++insn_count > max_uid)
+ abort ();
+ if (insn == basic_block_head[b])
+ break;
+ insn = PREV_INSN (insn);
+ }
+
+ /* +2 to leave room for a post_mark_life at the last insn and for
+ the birth of a CLOBBER in the first insn. */
+ regs_live_at = (HARD_REG_SET *) alloca ((2 * insn_count + 2)
+ * sizeof (HARD_REG_SET));
+ bzero ((char *) regs_live_at, (2 * insn_count + 2) * sizeof (HARD_REG_SET));
+
+ /* Initialize table of hardware registers currently live. */
+
+#ifdef HARD_REG_SET
+ regs_live = *basic_block_live_at_start[b];
+#else
+ COPY_HARD_REG_SET (regs_live, basic_block_live_at_start[b]);
+#endif
+
+ /* This loop scans the instructions of the basic block
+ and assigns quantities to registers.
+ It computes which registers to tie. */
+
+ insn = basic_block_head[b];
+ while (1)
+ {
+ register rtx body = PATTERN (insn);
+
+ if (GET_CODE (insn) != NOTE)
+ insn_number++;
+
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ {
+ register rtx link, set;
+ register int win = 0;
+ register rtx r0, r1;
+ int combined_regno = -1;
+ int i;
+ int insn_code_number = recog_memoized (insn);
+
+ this_insn_number = insn_number;
+ this_insn = insn;
+
+ if (insn_code_number >= 0)
+ insn_extract (insn);
+ which_alternative = -1;
+
+ /* Is this insn suitable for tying two registers?
+ If so, try doing that.
+ Suitable insns are those with at least two operands and where
+ operand 0 is an output that is a register that is not
+ earlyclobber.
+
+ We can tie operand 0 with some operand that dies in this insn.
+ First look for operands that are required to be in the same
+ register as operand 0. If we find such, only try tying that
+ operand or one that can be put into that operand if the
+ operation is commutative. If we don't find an operand
+ that is required to be in the same register as operand 0,
+ we can tie with any operand.
+
+ Subregs in place of regs are also ok.
+
+ If tying is done, WIN is set nonzero. */
+
+ if (insn_code_number >= 0
+#ifdef REGISTER_CONSTRAINTS
+ && insn_n_operands[insn_code_number] > 1
+ && insn_operand_constraint[insn_code_number][0][0] == '='
+ && insn_operand_constraint[insn_code_number][0][1] != '&'
+#else
+ && GET_CODE (PATTERN (insn)) == SET
+ && rtx_equal_p (SET_DEST (PATTERN (insn)), recog_operand[0])
+#endif
+ )
+ {
+#ifdef REGISTER_CONSTRAINTS
+ /* If non-negative, is an operand that must match operand 0. */
+ int must_match_0 = -1;
+ /* Counts number of alternatives that require a match with
+ operand 0. */
+ int n_matching_alts = 0;
+
+ for (i = 1; i < insn_n_operands[insn_code_number]; i++)
+ {
+ char *p = insn_operand_constraint[insn_code_number][i];
+ int this_match = (requires_inout (p));
+
+ n_matching_alts += this_match;
+ if (this_match == insn_n_alternatives[insn_code_number])
+ must_match_0 = i;
+ }
+#endif
+
+ r0 = recog_operand[0];
+ for (i = 1; i < insn_n_operands[insn_code_number]; i++)
+ {
+#ifdef REGISTER_CONSTRAINTS
+ /* Skip this operand if we found an operand that
+ must match operand 0 and this operand isn't it
+ and can't be made to be it by commutativity. */
+
+ if (must_match_0 >= 0 && i != must_match_0
+ && ! (i == must_match_0 + 1
+ && insn_operand_constraint[insn_code_number][i-1][0] == '%')
+ && ! (i == must_match_0 - 1
+ && insn_operand_constraint[insn_code_number][i][0] == '%'))
+ continue;
+
+ /* Likewise if each alternative has some operand that
+ must match operand zero. In that case, skip any
+ operand that doesn't list operand 0 since we know that
+ the operand always conflicts with operand 0. We
+ ignore commutatity in this case to keep things simple. */
+ if (n_matching_alts == insn_n_alternatives[insn_code_number]
+ && (0 == requires_inout
+ (insn_operand_constraint[insn_code_number][i])))
+ continue;
+#endif
+
+ r1 = recog_operand[i];
+
+ /* If the operand is an address, find a register in it.
+ There may be more than one register, but we only try one
+ of them. */
+ if (
+#ifdef REGISTER_CONSTRAINTS
+ insn_operand_constraint[insn_code_number][i][0] == 'p'
+#else
+ insn_operand_address_p[insn_code_number][i]
+#endif
+ )
+ while (GET_CODE (r1) == PLUS || GET_CODE (r1) == MULT)
+ r1 = XEXP (r1, 0);
+
+ if (GET_CODE (r0) == REG || GET_CODE (r0) == SUBREG)
+ {
+ /* We have two priorities for hard register preferences.
+ If we have a move insn or an insn whose first input
+ can only be in the same register as the output, give
+ priority to an equivalence found from that insn. */
+ int may_save_copy
+ = ((SET_DEST (body) == r0 && SET_SRC (body) == r1)
+#ifdef REGISTER_CONSTRAINTS
+ || (r1 == recog_operand[i] && must_match_0 >= 0)
+#endif
+ );
+
+ if (GET_CODE (r1) == REG || GET_CODE (r1) == SUBREG)
+ win = combine_regs (r1, r0, may_save_copy,
+ insn_number, insn, 0);
+ }
+ }
+ }
+
+ /* Recognize an insn sequence with an ultimate result
+ which can safely overlap one of the inputs.
+ The sequence begins with a CLOBBER of its result,
+ and ends with an insn that copies the result to itself
+ and has a REG_EQUAL note for an equivalent formula.
+ That note indicates what the inputs are.
+ The result and the input can overlap if each insn in
+ the sequence either doesn't mention the input
+ or has a REG_NO_CONFLICT note to inhibit the conflict.
+
+ We do the combining test at the CLOBBER so that the
+ destination register won't have had a quantity number
+ assigned, since that would prevent combining. */
+
+ if (GET_CODE (PATTERN (insn)) == CLOBBER
+ && (r0 = XEXP (PATTERN (insn), 0),
+ GET_CODE (r0) == REG)
+ && (link = find_reg_note (insn, REG_LIBCALL, NULL_RTX)) != 0
+ && XEXP (link, 0) != 0
+ && GET_CODE (XEXP (link, 0)) == INSN
+ && (set = single_set (XEXP (link, 0))) != 0
+ && SET_DEST (set) == r0 && SET_SRC (set) == r0
+ && (note = find_reg_note (XEXP (link, 0), REG_EQUAL,
+ NULL_RTX)) != 0)
+ {
+ if (r1 = XEXP (note, 0), GET_CODE (r1) == REG
+ /* Check that we have such a sequence. */
+ && no_conflict_p (insn, r0, r1))
+ win = combine_regs (r1, r0, 1, insn_number, insn, 1);
+ else if (GET_RTX_FORMAT (GET_CODE (XEXP (note, 0)))[0] == 'e'
+ && (r1 = XEXP (XEXP (note, 0), 0),
+ GET_CODE (r1) == REG || GET_CODE (r1) == SUBREG)
+ && no_conflict_p (insn, r0, r1))
+ win = combine_regs (r1, r0, 0, insn_number, insn, 1);
+
+ /* Here we care if the operation to be computed is
+ commutative. */
+ else if ((GET_CODE (XEXP (note, 0)) == EQ
+ || GET_CODE (XEXP (note, 0)) == NE
+ || GET_RTX_CLASS (GET_CODE (XEXP (note, 0))) == 'c')
+ && (r1 = XEXP (XEXP (note, 0), 1),
+ (GET_CODE (r1) == REG || GET_CODE (r1) == SUBREG))
+ && no_conflict_p (insn, r0, r1))
+ win = combine_regs (r1, r0, 0, insn_number, insn, 1);
+
+ /* If we did combine something, show the register number
+ in question so that we know to ignore its death. */
+ if (win)
+ no_conflict_combined_regno = REGNO (r1);
+ }
+
+ /* If registers were just tied, set COMBINED_REGNO
+ to the number of the register used in this insn
+ that was tied to the register set in this insn.
+ This register's qty should not be "killed". */
+
+ if (win)
+ {
+ while (GET_CODE (r1) == SUBREG)
+ r1 = SUBREG_REG (r1);
+ combined_regno = REGNO (r1);
+ }
+
+ /* Mark the death of everything that dies in this instruction,
+ except for anything that was just combined. */
+
+ for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
+ if (REG_NOTE_KIND (link) == REG_DEAD
+ && GET_CODE (XEXP (link, 0)) == REG
+ && combined_regno != REGNO (XEXP (link, 0))
+ && (no_conflict_combined_regno != REGNO (XEXP (link, 0))
+ || ! find_reg_note (insn, REG_NO_CONFLICT, XEXP (link, 0))))
+ wipe_dead_reg (XEXP (link, 0), 0);
+
+ /* Allocate qty numbers for all registers local to this block
+ that are born (set) in this instruction.
+ A pseudo that already has a qty is not changed. */
+
+ note_stores (PATTERN (insn), reg_is_set);
+
+ /* If anything is set in this insn and then unused, mark it as dying
+ after this insn, so it will conflict with our outputs. This
+ can't match with something that combined, and it doesn't matter
+ if it did. Do this after the calls to reg_is_set since these
+ die after, not during, the current insn. */
+
+ for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
+ if (REG_NOTE_KIND (link) == REG_UNUSED
+ && GET_CODE (XEXP (link, 0)) == REG)
+ wipe_dead_reg (XEXP (link, 0), 1);
+
+ /* Allocate quantities for any SCRATCH operands of this insn. */
+
+ if (insn_code_number >= 0)
+ for (i = 0; i < insn_n_operands[insn_code_number]; i++)
+ if (GET_CODE (recog_operand[i]) == SCRATCH
+ && scratches_allocated++ < scratch_list_length)
+ alloc_qty_for_scratch (recog_operand[i], i, insn,
+ insn_code_number, insn_number);
+
+ /* If this is an insn that has a REG_RETVAL note pointing at a
+ CLOBBER insn, we have reached the end of a REG_NO_CONFLICT
+ block, so clear any register number that combined within it. */
+ if ((note = find_reg_note (insn, REG_RETVAL, NULL_RTX)) != 0
+ && GET_CODE (XEXP (note, 0)) == INSN
+ && GET_CODE (PATTERN (XEXP (note, 0))) == CLOBBER)
+ no_conflict_combined_regno = -1;
+ }
+
+ /* Set the registers live after INSN_NUMBER. Note that we never
+ record the registers live before the block's first insn, since no
+ pseudos we care about are live before that insn. */
+
+ IOR_HARD_REG_SET (regs_live_at[2 * insn_number], regs_live);
+ IOR_HARD_REG_SET (regs_live_at[2 * insn_number + 1], regs_live);
+
+ if (insn == basic_block_end[b])
+ break;
+
+ insn = NEXT_INSN (insn);
+ }
+
+ /* Now every register that is local to this basic block
+ should have been given a quantity, or else -1 meaning ignore it.
+ Every quantity should have a known birth and death.
+
+ Order the qtys so we assign them registers in order of the
+ number of suggested registers they need so we allocate those with
+ the most restrictive needs first. */
+
+ qty_order = (int *) alloca (next_qty * sizeof (int));
+ for (i = 0; i < next_qty; i++)
+ qty_order[i] = i;
+
+#define EXCHANGE(I1, I2) \
+ { i = qty_order[I1]; qty_order[I1] = qty_order[I2]; qty_order[I2] = i; }
+
+ switch (next_qty)
+ {
+ case 3:
+ /* Make qty_order[2] be the one to allocate last. */
+ if (qty_sugg_compare (0, 1) > 0)
+ EXCHANGE (0, 1);
+ if (qty_sugg_compare (1, 2) > 0)
+ EXCHANGE (2, 1);
+
+ /* ... Fall through ... */
+ case 2:
+ /* Put the best one to allocate in qty_order[0]. */
+ if (qty_sugg_compare (0, 1) > 0)
+ EXCHANGE (0, 1);
+
+ /* ... Fall through ... */
+
+ case 1:
+ case 0:
+ /* Nothing to do here. */
+ break;
+
+ default:
+ qsort (qty_order, next_qty, sizeof (int), qty_sugg_compare_1);
+ }
+
+ /* Try to put each quantity in a suggested physical register, if it has one.
+ This may cause registers to be allocated that otherwise wouldn't be, but
+ this seems acceptable in local allocation (unlike global allocation). */
+ for (i = 0; i < next_qty; i++)
+ {
+ q = qty_order[i];
+ if (qty_phys_num_sugg[q] != 0 || qty_phys_num_copy_sugg[q] != 0)
+ qty_phys_reg[q] = find_free_reg (qty_min_class[q], qty_mode[q], q,
+ 0, 1, qty_birth[q], qty_death[q]);
+ else
+ qty_phys_reg[q] = -1;
+ }
+
+ /* Order the qtys so we assign them registers in order of
+ decreasing length of life. Normally call qsort, but if we
+ have only a very small number of quantities, sort them ourselves. */
+
+ for (i = 0; i < next_qty; i++)
+ qty_order[i] = i;
+
+#define EXCHANGE(I1, I2) \
+ { i = qty_order[I1]; qty_order[I1] = qty_order[I2]; qty_order[I2] = i; }
+
+ switch (next_qty)
+ {
+ case 3:
+ /* Make qty_order[2] be the one to allocate last. */
+ if (qty_compare (0, 1) > 0)
+ EXCHANGE (0, 1);
+ if (qty_compare (1, 2) > 0)
+ EXCHANGE (2, 1);
+
+ /* ... Fall through ... */
+ case 2:
+ /* Put the best one to allocate in qty_order[0]. */
+ if (qty_compare (0, 1) > 0)
+ EXCHANGE (0, 1);
+
+ /* ... Fall through ... */
+
+ case 1:
+ case 0:
+ /* Nothing to do here. */
+ break;
+
+ default:
+ qsort (qty_order, next_qty, sizeof (int), qty_compare_1);
+ }
+
+ /* Now for each qty that is not a hardware register,
+ look for a hardware register to put it in.
+ First try the register class that is cheapest for this qty,
+ if there is more than one class. */
+
+ for (i = 0; i < next_qty; i++)
+ {
+ q = qty_order[i];
+ if (qty_phys_reg[q] < 0)
+ {
+ if (N_REG_CLASSES > 1)
+ {
+ qty_phys_reg[q] = find_free_reg (qty_min_class[q],
+ qty_mode[q], q, 0, 0,
+ qty_birth[q], qty_death[q]);
+ if (qty_phys_reg[q] >= 0)
+ continue;
+ }
+
+ if (qty_alternate_class[q] != NO_REGS)
+ qty_phys_reg[q] = find_free_reg (qty_alternate_class[q],
+ qty_mode[q], q, 0, 0,
+ qty_birth[q], qty_death[q]);
+ }
+ }
+
+ /* Now propagate the register assignments
+ to the pseudo regs belonging to the qtys. */
+
+ for (q = 0; q < next_qty; q++)
+ if (qty_phys_reg[q] >= 0)
+ {
+ for (i = qty_first_reg[q]; i >= 0; i = reg_next_in_qty[i])
+ reg_renumber[i] = qty_phys_reg[q] + reg_offset[i];
+ if (qty_scratch_rtx[q])
+ {
+ if (GET_CODE (qty_scratch_rtx[q]) == REG)
+ abort ();
+ PUT_CODE (qty_scratch_rtx[q], REG);
+ REGNO (qty_scratch_rtx[q]) = qty_phys_reg[q];
+
+ scratch_block[scratch_index] = b;
+ scratch_list[scratch_index++] = qty_scratch_rtx[q];
+
+ /* Must clear the USED field, because it will have been set by
+ copy_rtx_if_shared, but the leaf_register code expects that
+ it is zero in all REG rtx. copy_rtx_if_shared does not set the
+ used bit for REGs, but does for SCRATCHes. */
+ qty_scratch_rtx[q]->used = 0;
+ }
+ }
+}
+
+/* Compare two quantities' priority for getting real registers.
+ We give shorter-lived quantities higher priority.
+ Quantities with more references are also preferred, as are quantities that
+ require multiple registers. This is the identical prioritization as
+ done by global-alloc.
+
+ We used to give preference to registers with *longer* lives, but using
+ the same algorithm in both local- and global-alloc can speed up execution
+ of some programs by as much as a factor of three! */
+
+static int
+qty_compare (q1, q2)
+ int q1, q2;
+{
+ /* Note that the quotient will never be bigger than
+ the value of floor_log2 times the maximum number of
+ times a register can occur in one insn (surely less than 100).
+ Multiplying this by 10000 can't overflow. */
+ register int pri1
+ = (((double) (floor_log2 (qty_n_refs[q1]) * qty_n_refs[q1] * qty_size[q1])
+ / (qty_death[q1] - qty_birth[q1]))
+ * 10000);
+ register int pri2
+ = (((double) (floor_log2 (qty_n_refs[q2]) * qty_n_refs[q2] * qty_size[q2])
+ / (qty_death[q2] - qty_birth[q2]))
+ * 10000);
+ return pri2 - pri1;
+}
+
+static int
+qty_compare_1 (q1, q2)
+ int *q1, *q2;
+{
+ register int tem;
+
+ /* Note that the quotient will never be bigger than
+ the value of floor_log2 times the maximum number of
+ times a register can occur in one insn (surely less than 100).
+ Multiplying this by 10000 can't overflow. */
+ register int pri1
+ = (((double) (floor_log2 (qty_n_refs[*q1]) * qty_n_refs[*q1]
+ * qty_size[*q1])
+ / (qty_death[*q1] - qty_birth[*q1]))
+ * 10000);
+ register int pri2
+ = (((double) (floor_log2 (qty_n_refs[*q2]) * qty_n_refs[*q2]
+ * qty_size[*q2])
+ / (qty_death[*q2] - qty_birth[*q2]))
+ * 10000);
+
+ tem = pri2 - pri1;
+ if (tem != 0) return tem;
+ /* If qtys are equally good, sort by qty number,
+ so that the results of qsort leave nothing to chance. */
+ return *q1 - *q2;
+}
+
+/* Compare two quantities' priority for getting real registers. This version
+ is called for quantities that have suggested hard registers. First priority
+ goes to quantities that have copy preferences, then to those that have
+ normal preferences. Within those groups, quantities with the lower
+ number of preferenes have the highest priority. Of those, we use the same
+ algorithm as above. */
+
+static int
+qty_sugg_compare (q1, q2)
+ int q1, q2;
+{
+ register int sugg1 = (qty_phys_num_copy_sugg[q1]
+ ? qty_phys_num_copy_sugg[q1]
+ : qty_phys_num_sugg[q1] * FIRST_PSEUDO_REGISTER);
+ register int sugg2 = (qty_phys_num_copy_sugg[q2]
+ ? qty_phys_num_copy_sugg[q2]
+ : qty_phys_num_sugg[q2] * FIRST_PSEUDO_REGISTER);
+ /* Note that the quotient will never be bigger than
+ the value of floor_log2 times the maximum number of
+ times a register can occur in one insn (surely less than 100).
+ Multiplying this by 10000 can't overflow. */
+ register int pri1
+ = (((double) (floor_log2 (qty_n_refs[q1]) * qty_n_refs[q1] * qty_size[q1])
+ / (qty_death[q1] - qty_birth[q1]))
+ * 10000);
+ register int pri2
+ = (((double) (floor_log2 (qty_n_refs[q2]) * qty_n_refs[q2] * qty_size[q2])
+ / (qty_death[q2] - qty_birth[q2]))
+ * 10000);
+
+ if (sugg1 != sugg2)
+ return sugg1 - sugg2;
+
+ return pri2 - pri1;
+}
+
+static int
+qty_sugg_compare_1 (q1, q2)
+ int *q1, *q2;
+{
+ register int sugg1 = (qty_phys_num_copy_sugg[*q1]
+ ? qty_phys_num_copy_sugg[*q1]
+ : qty_phys_num_sugg[*q1] * FIRST_PSEUDO_REGISTER);
+ register int sugg2 = (qty_phys_num_copy_sugg[*q2]
+ ? qty_phys_num_copy_sugg[*q2]
+ : qty_phys_num_sugg[*q2] * FIRST_PSEUDO_REGISTER);
+
+ /* Note that the quotient will never be bigger than
+ the value of floor_log2 times the maximum number of
+ times a register can occur in one insn (surely less than 100).
+ Multiplying this by 10000 can't overflow. */
+ register int pri1
+ = (((double) (floor_log2 (qty_n_refs[*q1]) * qty_n_refs[*q1]
+ * qty_size[*q1])
+ / (qty_death[*q1] - qty_birth[*q1]))
+ * 10000);
+ register int pri2
+ = (((double) (floor_log2 (qty_n_refs[*q2]) * qty_n_refs[*q2]
+ * qty_size[*q2])
+ / (qty_death[*q2] - qty_birth[*q2]))
+ * 10000);
+
+ if (sugg1 != sugg2)
+ return sugg1 - sugg2;
+
+ if (pri1 != pri2)
+ return pri2 - pri1;
+
+ /* If qtys are equally good, sort by qty number,
+ so that the results of qsort leave nothing to chance. */
+ return *q1 - *q2;
+}
+
+/* Attempt to combine the two registers (rtx's) USEDREG and SETREG.
+ Returns 1 if have done so, or 0 if cannot.
+
+ Combining registers means marking them as having the same quantity
+ and adjusting the offsets within the quantity if either of
+ them is a SUBREG).
+
+ We don't actually combine a hard reg with a pseudo; instead
+ we just record the hard reg as the suggestion for the pseudo's quantity.
+ If we really combined them, we could lose if the pseudo lives
+ across an insn that clobbers the hard reg (eg, movstr).
+
+ ALREADY_DEAD is non-zero if USEDREG is known to be dead even though
+ there is no REG_DEAD note on INSN. This occurs during the processing
+ of REG_NO_CONFLICT blocks.
+
+ MAY_SAVE_COPYCOPY is non-zero if this insn is simply copying USEDREG to
+ SETREG or if the input and output must share a register.
+ In that case, we record a hard reg suggestion in QTY_PHYS_COPY_SUGG.
+
+ There are elaborate checks for the validity of combining. */
+
+
+static int
+combine_regs (usedreg, setreg, may_save_copy, insn_number, insn, already_dead)
+ rtx usedreg, setreg;
+ int may_save_copy;
+ int insn_number;
+ rtx insn;
+ int already_dead;
+{
+ register int ureg, sreg;
+ register int offset = 0;
+ int usize, ssize;
+ register int sqty;
+
+ /* Determine the numbers and sizes of registers being used. If a subreg
+ is present that does not change the entire register, don't consider
+ this a copy insn. */
+
+ while (GET_CODE (usedreg) == SUBREG)
+ {
+ if (GET_MODE_SIZE (GET_MODE (SUBREG_REG (usedreg))) > UNITS_PER_WORD)
+ may_save_copy = 0;
+ offset += SUBREG_WORD (usedreg);
+ usedreg = SUBREG_REG (usedreg);
+ }
+ if (GET_CODE (usedreg) != REG)
+ return 0;
+ ureg = REGNO (usedreg);
+ usize = REG_SIZE (usedreg);
+
+ while (GET_CODE (setreg) == SUBREG)
+ {
+ if (GET_MODE_SIZE (GET_MODE (SUBREG_REG (setreg))) > UNITS_PER_WORD)
+ may_save_copy = 0;
+ offset -= SUBREG_WORD (setreg);
+ setreg = SUBREG_REG (setreg);
+ }
+ if (GET_CODE (setreg) != REG)
+ return 0;
+ sreg = REGNO (setreg);
+ ssize = REG_SIZE (setreg);
+
+ /* If UREG is a pseudo-register that hasn't already been assigned a
+ quantity number, it means that it is not local to this block or dies
+ more than once. In either event, we can't do anything with it. */
+ if ((ureg >= FIRST_PSEUDO_REGISTER && reg_qty[ureg] < 0)
+ /* Do not combine registers unless one fits within the other. */
+ || (offset > 0 && usize + offset > ssize)
+ || (offset < 0 && usize + offset < ssize)
+ /* Do not combine with a smaller already-assigned object
+ if that smaller object is already combined with something bigger. */
+ || (ssize > usize && ureg >= FIRST_PSEUDO_REGISTER
+ && usize < qty_size[reg_qty[ureg]])
+ /* Can't combine if SREG is not a register we can allocate. */
+ || (sreg >= FIRST_PSEUDO_REGISTER && reg_qty[sreg] == -1)
+ /* Don't combine with a pseudo mentioned in a REG_NO_CONFLICT note.
+ These have already been taken care of. This probably wouldn't
+ combine anyway, but don't take any chances. */
+ || (ureg >= FIRST_PSEUDO_REGISTER
+ && find_reg_note (insn, REG_NO_CONFLICT, usedreg))
+ /* Don't tie something to itself. In most cases it would make no
+ difference, but it would screw up if the reg being tied to itself
+ also dies in this insn. */
+ || ureg == sreg
+ /* Don't try to connect two different hardware registers. */
+ || (ureg < FIRST_PSEUDO_REGISTER && sreg < FIRST_PSEUDO_REGISTER)
+ /* Don't connect two different machine modes if they have different
+ implications as to which registers may be used. */
+ || !MODES_TIEABLE_P (GET_MODE (usedreg), GET_MODE (setreg)))
+ return 0;
+
+ /* Now, if UREG is a hard reg and SREG is a pseudo, record the hard reg in
+ qty_phys_sugg for the pseudo instead of tying them.
+
+ Return "failure" so that the lifespan of UREG is terminated here;
+ that way the two lifespans will be disjoint and nothing will prevent
+ the pseudo reg from being given this hard reg. */
+
+ if (ureg < FIRST_PSEUDO_REGISTER)
+ {
+ /* Allocate a quantity number so we have a place to put our
+ suggestions. */
+ if (reg_qty[sreg] == -2)
+ reg_is_born (setreg, 2 * insn_number);
+
+ if (reg_qty[sreg] >= 0)
+ {
+ if (may_save_copy
+ && ! TEST_HARD_REG_BIT (qty_phys_copy_sugg[reg_qty[sreg]], ureg))
+ {
+ SET_HARD_REG_BIT (qty_phys_copy_sugg[reg_qty[sreg]], ureg);
+ qty_phys_num_copy_sugg[reg_qty[sreg]]++;
+ }
+ else if (! TEST_HARD_REG_BIT (qty_phys_sugg[reg_qty[sreg]], ureg))
+ {
+ SET_HARD_REG_BIT (qty_phys_sugg[reg_qty[sreg]], ureg);
+ qty_phys_num_sugg[reg_qty[sreg]]++;
+ }
+ }
+ return 0;
+ }
+
+ /* Similarly for SREG a hard register and UREG a pseudo register. */
+
+ if (sreg < FIRST_PSEUDO_REGISTER)
+ {
+ if (may_save_copy
+ && ! TEST_HARD_REG_BIT (qty_phys_copy_sugg[reg_qty[ureg]], sreg))
+ {
+ SET_HARD_REG_BIT (qty_phys_copy_sugg[reg_qty[ureg]], sreg);
+ qty_phys_num_copy_sugg[reg_qty[ureg]]++;
+ }
+ else if (! TEST_HARD_REG_BIT (qty_phys_sugg[reg_qty[ureg]], sreg))
+ {
+ SET_HARD_REG_BIT (qty_phys_sugg[reg_qty[ureg]], sreg);
+ qty_phys_num_sugg[reg_qty[ureg]]++;
+ }
+ return 0;
+ }
+
+ /* At this point we know that SREG and UREG are both pseudos.
+ Do nothing if SREG already has a quantity or is a register that we
+ don't allocate. */
+ if (reg_qty[sreg] >= -1
+ /* If we are not going to let any regs live across calls,
+ don't tie a call-crossing reg to a non-call-crossing reg. */
+ || (current_function_has_nonlocal_label
+ && ((reg_n_calls_crossed[ureg] > 0)
+ != (reg_n_calls_crossed[sreg] > 0))))
+ return 0;
+
+ /* We don't already know about SREG, so tie it to UREG
+ if this is the last use of UREG, provided the classes they want
+ are compatible. */
+
+ if ((already_dead || find_regno_note (insn, REG_DEAD, ureg))
+ && reg_meets_class_p (sreg, qty_min_class[reg_qty[ureg]]))
+ {
+ /* Add SREG to UREG's quantity. */
+ sqty = reg_qty[ureg];
+ reg_qty[sreg] = sqty;
+ reg_offset[sreg] = reg_offset[ureg] + offset;
+ reg_next_in_qty[sreg] = qty_first_reg[sqty];
+ qty_first_reg[sqty] = sreg;
+
+ /* If SREG's reg class is smaller, set qty_min_class[SQTY]. */
+ update_qty_class (sqty, sreg);
+
+ /* Update info about quantity SQTY. */
+ qty_n_calls_crossed[sqty] += reg_n_calls_crossed[sreg];
+ qty_n_refs[sqty] += reg_n_refs[sreg];
+ if (usize < ssize)
+ {
+ register int i;
+
+ for (i = qty_first_reg[sqty]; i >= 0; i = reg_next_in_qty[i])
+ reg_offset[i] -= offset;
+
+ qty_size[sqty] = ssize;
+ qty_mode[sqty] = GET_MODE (setreg);
+ }
+ }
+ else
+ return 0;
+
+ return 1;
+}
+
+/* Return 1 if the preferred class of REG allows it to be tied
+ to a quantity or register whose class is CLASS.
+ True if REG's reg class either contains or is contained in CLASS. */
+
+static int
+reg_meets_class_p (reg, class)
+ int reg;
+ enum reg_class class;
+{
+ register enum reg_class rclass = reg_preferred_class (reg);
+ return (reg_class_subset_p (rclass, class)
+ || reg_class_subset_p (class, rclass));
+}
+
+/* Return 1 if the two specified classes have registers in common.
+ If CALL_SAVED, then consider only call-saved registers. */
+
+static int
+reg_classes_overlap_p (c1, c2, call_saved)
+ register enum reg_class c1;
+ register enum reg_class c2;
+ int call_saved;
+{
+ HARD_REG_SET c;
+ int i;
+
+ COPY_HARD_REG_SET (c, reg_class_contents[(int) c1]);
+ AND_HARD_REG_SET (c, reg_class_contents[(int) c2]);
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (TEST_HARD_REG_BIT (c, i)
+ && (! call_saved || ! call_used_regs[i]))
+ return 1;
+
+ return 0;
+}
+
+/* Update the class of QTY assuming that REG is being tied to it. */
+
+static void
+update_qty_class (qty, reg)
+ int qty;
+ int reg;
+{
+ enum reg_class rclass = reg_preferred_class (reg);
+ if (reg_class_subset_p (rclass, qty_min_class[qty]))
+ qty_min_class[qty] = rclass;
+
+ rclass = reg_alternate_class (reg);
+ if (reg_class_subset_p (rclass, qty_alternate_class[qty]))
+ qty_alternate_class[qty] = rclass;
+}
+
+/* Handle something which alters the value of an rtx REG.
+
+ REG is whatever is set or clobbered. SETTER is the rtx that
+ is modifying the register.
+
+ If it is not really a register, we do nothing.
+ The file-global variables `this_insn' and `this_insn_number'
+ carry info from `block_alloc'. */
+
+static void
+reg_is_set (reg, setter)
+ rtx reg;
+ rtx setter;
+{
+ /* Note that note_stores will only pass us a SUBREG if it is a SUBREG of
+ a hard register. These may actually not exist any more. */
+
+ if (GET_CODE (reg) != SUBREG
+ && GET_CODE (reg) != REG)
+ return;
+
+ /* Mark this register as being born. If it is used in a CLOBBER, mark
+ it as being born halfway between the previous insn and this insn so that
+ it conflicts with our inputs but not the outputs of the previous insn. */
+
+ reg_is_born (reg, 2 * this_insn_number - (GET_CODE (setter) == CLOBBER));
+}
+
+/* Handle beginning of the life of register REG.
+ BIRTH is the index at which this is happening. */
+
+static void
+reg_is_born (reg, birth)
+ rtx reg;
+ int birth;
+{
+ register int regno;
+
+ if (GET_CODE (reg) == SUBREG)
+ regno = REGNO (SUBREG_REG (reg)) + SUBREG_WORD (reg);
+ else
+ regno = REGNO (reg);
+
+ if (regno < FIRST_PSEUDO_REGISTER)
+ {
+ mark_life (regno, GET_MODE (reg), 1);
+
+ /* If the register was to have been born earlier that the present
+ insn, mark it as live where it is actually born. */
+ if (birth < 2 * this_insn_number)
+ post_mark_life (regno, GET_MODE (reg), 1, birth, 2 * this_insn_number);
+ }
+ else
+ {
+ if (reg_qty[regno] == -2)
+ alloc_qty (regno, GET_MODE (reg), PSEUDO_REGNO_SIZE (regno), birth);
+
+ /* If this register has a quantity number, show that it isn't dead. */
+ if (reg_qty[regno] >= 0)
+ qty_death[reg_qty[regno]] = -1;
+ }
+}
+
+/* Record the death of REG in the current insn. If OUTPUT_P is non-zero,
+ REG is an output that is dying (i.e., it is never used), otherwise it
+ is an input (the normal case).
+ If OUTPUT_P is 1, then we extend the life past the end of this insn. */
+
+static void
+wipe_dead_reg (reg, output_p)
+ register rtx reg;
+ int output_p;
+{
+ register int regno = REGNO (reg);
+
+ /* If this insn has multiple results,
+ and the dead reg is used in one of the results,
+ extend its life to after this insn,
+ so it won't get allocated together with any other result of this insn. */
+ if (GET_CODE (PATTERN (this_insn)) == PARALLEL
+ && !single_set (this_insn))
+ {
+ int i;
+ for (i = XVECLEN (PATTERN (this_insn), 0) - 1; i >= 0; i--)
+ {
+ rtx set = XVECEXP (PATTERN (this_insn), 0, i);
+ if (GET_CODE (set) == SET
+ && GET_CODE (SET_DEST (set)) != REG
+ && !rtx_equal_p (reg, SET_DEST (set))
+ && reg_overlap_mentioned_p (reg, SET_DEST (set)))
+ output_p = 1;
+ }
+ }
+
+ if (regno < FIRST_PSEUDO_REGISTER)
+ {
+ mark_life (regno, GET_MODE (reg), 0);
+
+ /* If a hard register is dying as an output, mark it as in use at
+ the beginning of this insn (the above statement would cause this
+ not to happen). */
+ if (output_p)
+ post_mark_life (regno, GET_MODE (reg), 1,
+ 2 * this_insn_number, 2 * this_insn_number+ 1);
+ }
+
+ else if (reg_qty[regno] >= 0)
+ qty_death[reg_qty[regno]] = 2 * this_insn_number + output_p;
+}
+
+/* Find a block of SIZE words of hard regs in reg_class CLASS
+ that can hold something of machine-mode MODE
+ (but actually we test only the first of the block for holding MODE)
+ and still free between insn BORN_INDEX and insn DEAD_INDEX,
+ and return the number of the first of them.
+ Return -1 if such a block cannot be found.
+ If QTY crosses calls, insist on a register preserved by calls,
+ unless ACCEPT_CALL_CLOBBERED is nonzero.
+
+ If JUST_TRY_SUGGESTED is non-zero, only try to see if the suggested
+ register is available. If not, return -1. */
+
+static int
+find_free_reg (class, mode, qty, accept_call_clobbered, just_try_suggested,
+ born_index, dead_index)
+ enum reg_class class;
+ enum machine_mode mode;
+ int qty;
+ int accept_call_clobbered;
+ int just_try_suggested;
+ int born_index, dead_index;
+{
+ register int i, ins;
+#ifdef HARD_REG_SET
+ register /* Declare it register if it's a scalar. */
+#endif
+ HARD_REG_SET used, first_used;
+#ifdef ELIMINABLE_REGS
+ static struct {int from, to; } eliminables[] = ELIMINABLE_REGS;
+#endif
+
+ /* Validate our parameters. */
+ if (born_index < 0 || born_index > dead_index)
+ abort ();
+
+ /* Don't let a pseudo live in a reg across a function call
+ if we might get a nonlocal goto. */
+ if (current_function_has_nonlocal_label
+ && qty_n_calls_crossed[qty] > 0)
+ return -1;
+
+ if (accept_call_clobbered)
+ COPY_HARD_REG_SET (used, call_fixed_reg_set);
+ else if (qty_n_calls_crossed[qty] == 0)
+ COPY_HARD_REG_SET (used, fixed_reg_set);
+ else
+ COPY_HARD_REG_SET (used, call_used_reg_set);
+
+ for (ins = born_index; ins < dead_index; ins++)
+ IOR_HARD_REG_SET (used, regs_live_at[ins]);
+
+ IOR_COMPL_HARD_REG_SET (used, reg_class_contents[(int) class]);
+
+ /* Don't use the frame pointer reg in local-alloc even if
+ we may omit the frame pointer, because if we do that and then we
+ need a frame pointer, reload won't know how to move the pseudo
+ to another hard reg. It can move only regs made by global-alloc.
+
+ This is true of any register that can be eliminated. */
+#ifdef ELIMINABLE_REGS
+ for (i = 0; i < sizeof eliminables / sizeof eliminables[0]; i++)
+ SET_HARD_REG_BIT (used, eliminables[i].from);
+#if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM
+ /* If FRAME_POINTER_REGNUM is not a real register, then protect the one
+ that it might be eliminated into. */
+ SET_HARD_REG_BIT (used, HARD_FRAME_POINTER_REGNUM);
+#endif
+#else
+ SET_HARD_REG_BIT (used, FRAME_POINTER_REGNUM);
+#endif
+
+ /* Normally, the registers that can be used for the first register in
+ a multi-register quantity are the same as those that can be used for
+ subsequent registers. However, if just trying suggested registers,
+ restrict our consideration to them. If there are copy-suggested
+ register, try them. Otherwise, try the arithmetic-suggested
+ registers. */
+ COPY_HARD_REG_SET (first_used, used);
+
+ if (just_try_suggested)
+ {
+ if (qty_phys_num_copy_sugg[qty] != 0)
+ IOR_COMPL_HARD_REG_SET (first_used, qty_phys_copy_sugg[qty]);
+ else
+ IOR_COMPL_HARD_REG_SET (first_used, qty_phys_sugg[qty]);
+ }
+
+ /* If all registers are excluded, we can't do anything. */
+ GO_IF_HARD_REG_SUBSET (reg_class_contents[(int) ALL_REGS], first_used, fail);
+
+ /* If at least one would be suitable, test each hard reg. */
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ {
+#ifdef REG_ALLOC_ORDER
+ int regno = reg_alloc_order[i];
+#else
+ int regno = i;
+#endif
+ if (! TEST_HARD_REG_BIT (first_used, regno)
+ && HARD_REGNO_MODE_OK (regno, mode))
+ {
+ register int j;
+ register int size1 = HARD_REGNO_NREGS (regno, mode);
+ for (j = 1; j < size1 && ! TEST_HARD_REG_BIT (used, regno + j); j++);
+ if (j == size1)
+ {
+ /* Mark that this register is in use between its birth and death
+ insns. */
+ post_mark_life (regno, mode, 1, born_index, dead_index);
+ return regno;
+ }
+#ifndef REG_ALLOC_ORDER
+ i += j; /* Skip starting points we know will lose */
+#endif
+ }
+ }
+
+ fail:
+
+ /* If we are just trying suggested register, we have just tried copy-
+ suggested registers, and there are arithmetic-suggested registers,
+ try them. */
+
+ /* If it would be profitable to allocate a call-clobbered register
+ and save and restore it around calls, do that. */
+ if (just_try_suggested && qty_phys_num_copy_sugg[qty] != 0
+ && qty_phys_num_sugg[qty] != 0)
+ {
+ /* Don't try the copy-suggested regs again. */
+ qty_phys_num_copy_sugg[qty] = 0;
+ return find_free_reg (class, mode, qty, accept_call_clobbered, 1,
+ born_index, dead_index);
+ }
+
+ /* We need not check to see if the current function has nonlocal
+ labels because we don't put any pseudos that are live over calls in
+ registers in that case. */
+
+ if (! accept_call_clobbered
+ && flag_caller_saves
+ && ! just_try_suggested
+ && qty_n_calls_crossed[qty] != 0
+ && CALLER_SAVE_PROFITABLE (qty_n_refs[qty], qty_n_calls_crossed[qty]))
+ {
+ i = find_free_reg (class, mode, qty, 1, 0, born_index, dead_index);
+ if (i >= 0)
+ caller_save_needed = 1;
+ return i;
+ }
+ return -1;
+}
+
+/* Mark that REGNO with machine-mode MODE is live starting from the current
+ insn (if LIFE is non-zero) or dead starting at the current insn (if LIFE
+ is zero). */
+
+static void
+mark_life (regno, mode, life)
+ register int regno;
+ enum machine_mode mode;
+ int life;
+{
+ register int j = HARD_REGNO_NREGS (regno, mode);
+ if (life)
+ while (--j >= 0)
+ SET_HARD_REG_BIT (regs_live, regno + j);
+ else
+ while (--j >= 0)
+ CLEAR_HARD_REG_BIT (regs_live, regno + j);
+}
+
+/* Mark register number REGNO (with machine-mode MODE) as live (if LIFE
+ is non-zero) or dead (if LIFE is zero) from insn number BIRTH (inclusive)
+ to insn number DEATH (exclusive). */
+
+static void
+post_mark_life (regno, mode, life, birth, death)
+ int regno;
+ enum machine_mode mode;
+ int life, birth, death;
+{
+ register int j = HARD_REGNO_NREGS (regno, mode);
+#ifdef HARD_REG_SET
+ register /* Declare it register if it's a scalar. */
+#endif
+ HARD_REG_SET this_reg;
+
+ CLEAR_HARD_REG_SET (this_reg);
+ while (--j >= 0)
+ SET_HARD_REG_BIT (this_reg, regno + j);
+
+ if (life)
+ while (birth < death)
+ {
+ IOR_HARD_REG_SET (regs_live_at[birth], this_reg);
+ birth++;
+ }
+ else
+ while (birth < death)
+ {
+ AND_COMPL_HARD_REG_SET (regs_live_at[birth], this_reg);
+ birth++;
+ }
+}
+
+/* INSN is the CLOBBER insn that starts a REG_NO_NOCONFLICT block, R0
+ is the register being clobbered, and R1 is a register being used in
+ the equivalent expression.
+
+ If R1 dies in the block and has a REG_NO_CONFLICT note on every insn
+ in which it is used, return 1.
+
+ Otherwise, return 0. */
+
+static int
+no_conflict_p (insn, r0, r1)
+ rtx insn, r0, r1;
+{
+ int ok = 0;
+ rtx note = find_reg_note (insn, REG_LIBCALL, NULL_RTX);
+ rtx p, last;
+
+ /* If R1 is a hard register, return 0 since we handle this case
+ when we scan the insns that actually use it. */
+
+ if (note == 0
+ || (GET_CODE (r1) == REG && REGNO (r1) < FIRST_PSEUDO_REGISTER)
+ || (GET_CODE (r1) == SUBREG && GET_CODE (SUBREG_REG (r1)) == REG
+ && REGNO (SUBREG_REG (r1)) < FIRST_PSEUDO_REGISTER))
+ return 0;
+
+ last = XEXP (note, 0);
+
+ for (p = NEXT_INSN (insn); p && p != last; p = NEXT_INSN (p))
+ if (GET_RTX_CLASS (GET_CODE (p)) == 'i')
+ {
+ if (find_reg_note (p, REG_DEAD, r1))
+ ok = 1;
+
+ if (reg_mentioned_p (r1, PATTERN (p))
+ && ! find_reg_note (p, REG_NO_CONFLICT, r1))
+ return 0;
+ }
+
+ return ok;
+}
+
+#ifdef REGISTER_CONSTRAINTS
+
+/* Return the number of alternatives for which the constraint string P
+ indicates that the operand must be equal to operand 0 and that no register
+ is acceptable. */
+
+static int
+requires_inout (p)
+ char *p;
+{
+ char c;
+ int found_zero = 0;
+ int reg_allowed = 0;
+ int num_matching_alts = 0;
+
+ while (c = *p++)
+ switch (c)
+ {
+ case '=': case '+': case '?':
+ case '#': case '&': case '!':
+ case '*': case '%':
+ case '1': case '2': case '3': case '4':
+ case 'm': case '<': case '>': case 'V': case 'o':
+ case 'E': case 'F': case 'G': case 'H':
+ case 's': case 'i': case 'n':
+ case 'I': case 'J': case 'K': case 'L':
+ case 'M': case 'N': case 'O': case 'P':
+#ifdef EXTRA_CONSTRAINT
+ case 'Q': case 'R': case 'S': case 'T': case 'U':
+#endif
+ case 'X':
+ /* These don't say anything we care about. */
+ break;
+
+ case ',':
+ if (found_zero && ! reg_allowed)
+ num_matching_alts++;
+
+ found_zero = reg_allowed = 0;
+ break;
+
+ case '0':
+ found_zero = 1;
+ break;
+
+ case 'p':
+ case 'g': case 'r':
+ default:
+ reg_allowed = 1;
+ break;
+ }
+
+ if (found_zero && ! reg_allowed)
+ num_matching_alts++;
+
+ return num_matching_alts;
+}
+#endif /* REGISTER_CONSTRAINTS */
+
+void
+dump_local_alloc (file)
+ FILE *file;
+{
+ register int i;
+ for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++)
+ if (reg_renumber[i] != -1)
+ fprintf (file, ";; Register %d in %d.\n", i, reg_renumber[i]);
+}
diff --git a/gnu/usr.bin/cc/cc_int/loop.c b/gnu/usr.bin/cc/cc_int/loop.c
new file mode 100644
index 0000000..c6caefe
--- /dev/null
+++ b/gnu/usr.bin/cc/cc_int/loop.c
@@ -0,0 +1,6587 @@
+/* Move constant computations out of loops.
+ Copyright (C) 1987, 88, 89, 91, 92, 93, 1994 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+/* This is the loop optimization pass of the compiler.
+ It finds invariant computations within loops and moves them
+ to the beginning of the loop. Then it identifies basic and
+ general induction variables. Strength reduction is applied to the general
+ induction variables, and induction variable elimination is applied to
+ the basic induction variables.
+
+ It also finds cases where
+ a register is set within the loop by zero-extending a narrower value
+ and changes these to zero the entire register once before the loop
+ and merely copy the low part within the loop.
+
+ Most of the complexity is in heuristics to decide when it is worth
+ while to do these things. */
+
+#include <stdio.h>
+#include "config.h"
+#include "rtl.h"
+#include "obstack.h"
+#include "expr.h"
+#include "insn-config.h"
+#include "insn-flags.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "recog.h"
+#include "flags.h"
+#include "real.h"
+#include "loop.h"
+
+/* Vector mapping INSN_UIDs to luids.
+ The luids are like uids but increase monotonically always.
+ We use them to see whether a jump comes from outside a given loop. */
+
+int *uid_luid;
+
+/* Indexed by INSN_UID, contains the ordinal giving the (innermost) loop
+ number the insn is contained in. */
+
+int *uid_loop_num;
+
+/* 1 + largest uid of any insn. */
+
+int max_uid_for_loop;
+
+/* 1 + luid of last insn. */
+
+static int max_luid;
+
+/* Number of loops detected in current function. Used as index to the
+ next few tables. */
+
+static int max_loop_num;
+
+/* Indexed by loop number, contains the first and last insn of each loop. */
+
+static rtx *loop_number_loop_starts, *loop_number_loop_ends;
+
+/* For each loop, gives the containing loop number, -1 if none. */
+
+int *loop_outer_loop;
+
+/* Indexed by loop number, contains a nonzero value if the "loop" isn't
+ really a loop (an insn outside the loop branches into it). */
+
+static char *loop_invalid;
+
+/* Indexed by loop number, links together all LABEL_REFs which refer to
+ code labels outside the loop. Used by routines that need to know all
+ loop exits, such as final_biv_value and final_giv_value.
+
+ This does not include loop exits due to return instructions. This is
+ because all bivs and givs are pseudos, and hence must be dead after a
+ return, so the presense of a return does not affect any of the
+ optimizations that use this info. It is simpler to just not include return
+ instructions on this list. */
+
+rtx *loop_number_exit_labels;
+
+/* Holds the number of loop iterations. It is zero if the number could not be
+ calculated. Must be unsigned since the number of iterations can
+ be as high as 2^wordsize-1. For loops with a wider iterator, this number
+ will will be zero if the number of loop iterations is too large for an
+ unsigned integer to hold. */
+
+unsigned HOST_WIDE_INT loop_n_iterations;
+
+/* Nonzero if there is a subroutine call in the current loop.
+ (unknown_address_altered is also nonzero in this case.) */
+
+static int loop_has_call;
+
+/* Nonzero if there is a volatile memory reference in the current
+ loop. */
+
+static int loop_has_volatile;
+
+/* Added loop_continue which is the NOTE_INSN_LOOP_CONT of the
+ current loop. A continue statement will generate a branch to
+ NEXT_INSN (loop_continue). */
+
+static rtx loop_continue;
+
+/* Indexed by register number, contains the number of times the reg
+ is set during the loop being scanned.
+ During code motion, a negative value indicates a reg that has been
+ made a candidate; in particular -2 means that it is an candidate that
+ we know is equal to a constant and -1 means that it is an candidate
+ not known equal to a constant.
+ After code motion, regs moved have 0 (which is accurate now)
+ while the failed candidates have the original number of times set.
+
+ Therefore, at all times, == 0 indicates an invariant register;
+ < 0 a conditionally invariant one. */
+
+static short *n_times_set;
+
+/* Original value of n_times_set; same except that this value
+ is not set negative for a reg whose sets have been made candidates
+ and not set to 0 for a reg that is moved. */
+
+static short *n_times_used;
+
+/* Index by register number, 1 indicates that the register
+ cannot be moved or strength reduced. */
+
+static char *may_not_optimize;
+
+/* Nonzero means reg N has already been moved out of one loop.
+ This reduces the desire to move it out of another. */
+
+static char *moved_once;
+
+/* Array of MEMs that are stored in this loop. If there are too many to fit
+ here, we just turn on unknown_address_altered. */
+
+#define NUM_STORES 20
+static rtx loop_store_mems[NUM_STORES];
+
+/* Index of first available slot in above array. */
+static int loop_store_mems_idx;
+
+/* Nonzero if we don't know what MEMs were changed in the current loop.
+ This happens if the loop contains a call (in which case `loop_has_call'
+ will also be set) or if we store into more than NUM_STORES MEMs. */
+
+static int unknown_address_altered;
+
+/* Count of movable (i.e. invariant) instructions discovered in the loop. */
+static int num_movables;
+
+/* Count of memory write instructions discovered in the loop. */
+static int num_mem_sets;
+
+/* Number of loops contained within the current one, including itself. */
+static int loops_enclosed;
+
+/* Bound on pseudo register number before loop optimization.
+ A pseudo has valid regscan info if its number is < max_reg_before_loop. */
+int max_reg_before_loop;
+
+/* This obstack is used in product_cheap_p to allocate its rtl. It
+ may call gen_reg_rtx which, in turn, may reallocate regno_reg_rtx.
+ If we used the same obstack that it did, we would be deallocating
+ that array. */
+
+static struct obstack temp_obstack;
+
+/* This is where the pointer to the obstack being used for RTL is stored. */
+
+extern struct obstack *rtl_obstack;
+
+#define obstack_chunk_alloc xmalloc
+#define obstack_chunk_free free
+
+extern char *oballoc ();
+
+/* During the analysis of a loop, a chain of `struct movable's
+ is made to record all the movable insns found.
+ Then the entire chain can be scanned to decide which to move. */
+
+struct movable
+{
+ rtx insn; /* A movable insn */
+ rtx set_src; /* The expression this reg is set from. */
+ rtx set_dest; /* The destination of this SET. */
+ rtx dependencies; /* When INSN is libcall, this is an EXPR_LIST
+ of any registers used within the LIBCALL. */
+ int consec; /* Number of consecutive following insns
+ that must be moved with this one. */
+ int regno; /* The register it sets */
+ short lifetime; /* lifetime of that register;
+ may be adjusted when matching movables
+ that load the same value are found. */
+ short savings; /* Number of insns we can move for this reg,
+ including other movables that force this
+ or match this one. */
+ unsigned int cond : 1; /* 1 if only conditionally movable */
+ unsigned int force : 1; /* 1 means MUST move this insn */
+ unsigned int global : 1; /* 1 means reg is live outside this loop */
+ /* If PARTIAL is 1, GLOBAL means something different:
+ that the reg is live outside the range from where it is set
+ to the following label. */
+ unsigned int done : 1; /* 1 inhibits further processing of this */
+
+ unsigned int partial : 1; /* 1 means this reg is used for zero-extending.
+ In particular, moving it does not make it
+ invariant. */
+ unsigned int move_insn : 1; /* 1 means that we call emit_move_insn to
+ load SRC, rather than copying INSN. */
+ unsigned int is_equiv : 1; /* 1 means a REG_EQUIV is present on INSN. */
+ enum machine_mode savemode; /* Nonzero means it is a mode for a low part
+ that we should avoid changing when clearing
+ the rest of the reg. */
+ struct movable *match; /* First entry for same value */
+ struct movable *forces; /* An insn that must be moved if this is */
+ struct movable *next;
+};
+
+FILE *loop_dump_stream;
+
+/* Forward declarations. */
+
+static void find_and_verify_loops ();
+static void mark_loop_jump ();
+static void prescan_loop ();
+static int reg_in_basic_block_p ();
+static int consec_sets_invariant_p ();
+static rtx libcall_other_reg ();
+static int labels_in_range_p ();
+static void count_loop_regs_set ();
+static void note_addr_stored ();
+static int loop_reg_used_before_p ();
+static void scan_loop ();
+static void replace_call_address ();
+static rtx skip_consec_insns ();
+static int libcall_benefit ();
+static void ignore_some_movables ();
+static void force_movables ();
+static void combine_movables ();
+static int rtx_equal_for_loop_p ();
+static void move_movables ();
+static void strength_reduce ();
+static int valid_initial_value_p ();
+static void find_mem_givs ();
+static void record_biv ();
+static void check_final_value ();
+static void record_giv ();
+static void update_giv_derive ();
+static int basic_induction_var ();
+static rtx simplify_giv_expr ();
+static int general_induction_var ();
+static int consec_sets_giv ();
+static int check_dbra_loop ();
+static rtx express_from ();
+static int combine_givs_p ();
+static void combine_givs ();
+static int product_cheap_p ();
+static int maybe_eliminate_biv ();
+static int maybe_eliminate_biv_1 ();
+static int last_use_this_basic_block ();
+static void record_initial ();
+static void update_reg_last_use ();
+
+/* Relative gain of eliminating various kinds of operations. */
+int add_cost;
+#if 0
+int shift_cost;
+int mult_cost;
+#endif
+
+/* Benefit penalty, if a giv is not replaceable, i.e. must emit an insn to
+ copy the value of the strength reduced giv to its original register. */
+int copy_cost;
+
+void
+init_loop ()
+{
+ char *free_point = (char *) oballoc (1);
+ rtx reg = gen_rtx (REG, word_mode, 0);
+
+ add_cost = rtx_cost (gen_rtx (PLUS, word_mode, reg, reg), SET);
+
+ /* We multiply by 2 to reconcile the difference in scale between
+ these two ways of computing costs. Otherwise the cost of a copy
+ will be far less than the cost of an add. */
+
+ copy_cost = 2 * 2;
+
+ /* Free the objects we just allocated. */
+ obfree (free_point);
+
+ /* Initialize the obstack used for rtl in product_cheap_p. */
+ gcc_obstack_init (&temp_obstack);
+}
+
+/* Entry point of this file. Perform loop optimization
+ on the current function. F is the first insn of the function
+ and DUMPFILE is a stream for output of a trace of actions taken
+ (or 0 if none should be output). */
+
+void
+loop_optimize (f, dumpfile)
+ /* f is the first instruction of a chain of insns for one function */
+ rtx f;
+ FILE *dumpfile;
+{
+ register rtx insn;
+ register int i;
+ rtx last_insn;
+
+ loop_dump_stream = dumpfile;
+
+ init_recog_no_volatile ();
+ init_alias_analysis ();
+
+ max_reg_before_loop = max_reg_num ();
+
+ moved_once = (char *) alloca (max_reg_before_loop);
+ bzero (moved_once, max_reg_before_loop);
+
+ regs_may_share = 0;
+
+ /* Count the number of loops. */
+
+ max_loop_num = 0;
+ for (insn = f; insn; insn = NEXT_INSN (insn))
+ {
+ if (GET_CODE (insn) == NOTE
+ && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
+ max_loop_num++;
+ }
+
+ /* Don't waste time if no loops. */
+ if (max_loop_num == 0)
+ return;
+
+ /* Get size to use for tables indexed by uids.
+ Leave some space for labels allocated by find_and_verify_loops. */
+ max_uid_for_loop = get_max_uid () + 1 + max_loop_num * 32;
+
+ uid_luid = (int *) alloca (max_uid_for_loop * sizeof (int));
+ uid_loop_num = (int *) alloca (max_uid_for_loop * sizeof (int));
+
+ bzero ((char *) uid_luid, max_uid_for_loop * sizeof (int));
+ bzero ((char *) uid_loop_num, max_uid_for_loop * sizeof (int));
+
+ /* Allocate tables for recording each loop. We set each entry, so they need
+ not be zeroed. */
+ loop_number_loop_starts = (rtx *) alloca (max_loop_num * sizeof (rtx));
+ loop_number_loop_ends = (rtx *) alloca (max_loop_num * sizeof (rtx));
+ loop_outer_loop = (int *) alloca (max_loop_num * sizeof (int));
+ loop_invalid = (char *) alloca (max_loop_num * sizeof (char));
+ loop_number_exit_labels = (rtx *) alloca (max_loop_num * sizeof (rtx));
+
+ /* Find and process each loop.
+ First, find them, and record them in order of their beginnings. */
+ find_and_verify_loops (f);
+
+ /* Now find all register lifetimes. This must be done after
+ find_and_verify_loops, because it might reorder the insns in the
+ function. */
+ reg_scan (f, max_reg_num (), 1);
+
+ /* See if we went too far. */
+ if (get_max_uid () > max_uid_for_loop)
+ abort ();
+
+ /* Compute the mapping from uids to luids.
+ LUIDs are numbers assigned to insns, like uids,
+ except that luids increase monotonically through the code.
+ Don't assign luids to line-number NOTEs, so that the distance in luids
+ between two insns is not affected by -g. */
+
+ for (insn = f, i = 0; insn; insn = NEXT_INSN (insn))
+ {
+ last_insn = insn;
+ if (GET_CODE (insn) != NOTE
+ || NOTE_LINE_NUMBER (insn) <= 0)
+ uid_luid[INSN_UID (insn)] = ++i;
+ else
+ /* Give a line number note the same luid as preceding insn. */
+ uid_luid[INSN_UID (insn)] = i;
+ }
+
+ max_luid = i + 1;
+
+ /* Don't leave gaps in uid_luid for insns that have been
+ deleted. It is possible that the first or last insn
+ using some register has been deleted by cross-jumping.
+ Make sure that uid_luid for that former insn's uid
+ points to the general area where that insn used to be. */
+ for (i = 0; i < max_uid_for_loop; i++)
+ {
+ uid_luid[0] = uid_luid[i];
+ if (uid_luid[0] != 0)
+ break;
+ }
+ for (i = 0; i < max_uid_for_loop; i++)
+ if (uid_luid[i] == 0)
+ uid_luid[i] = uid_luid[i - 1];
+
+ /* Create a mapping from loops to BLOCK tree nodes. */
+ if (flag_unroll_loops && write_symbols != NO_DEBUG)
+ find_loop_tree_blocks ();
+
+ /* Now scan the loops, last ones first, since this means inner ones are done
+ before outer ones. */
+ for (i = max_loop_num-1; i >= 0; i--)
+ if (! loop_invalid[i] && loop_number_loop_ends[i])
+ scan_loop (loop_number_loop_starts[i], loop_number_loop_ends[i],
+ max_reg_num ());
+
+ /* If debugging and unrolling loops, we must replicate the tree nodes
+ corresponding to the blocks inside the loop, so that the original one
+ to one mapping will remain. */
+ if (flag_unroll_loops && write_symbols != NO_DEBUG)
+ unroll_block_trees ();
+}
+
+/* Optimize one loop whose start is LOOP_START and end is END.
+ LOOP_START is the NOTE_INSN_LOOP_BEG and END is the matching
+ NOTE_INSN_LOOP_END. */
+
+/* ??? Could also move memory writes out of loops if the destination address
+ is invariant, the source is invariant, the memory write is not volatile,
+ and if we can prove that no read inside the loop can read this address
+ before the write occurs. If there is a read of this address after the
+ write, then we can also mark the memory read as invariant. */
+
+static void
+scan_loop (loop_start, end, nregs)
+ rtx loop_start, end;
+ int nregs;
+{
+ register int i;
+ register rtx p;
+ /* 1 if we are scanning insns that could be executed zero times. */
+ int maybe_never = 0;
+ /* 1 if we are scanning insns that might never be executed
+ due to a subroutine call which might exit before they are reached. */
+ int call_passed = 0;
+ /* For a rotated loop that is entered near the bottom,
+ this is the label at the top. Otherwise it is zero. */
+ rtx loop_top = 0;
+ /* Jump insn that enters the loop, or 0 if control drops in. */
+ rtx loop_entry_jump = 0;
+ /* Place in the loop where control enters. */
+ rtx scan_start;
+ /* Number of insns in the loop. */
+ int insn_count;
+ int in_libcall = 0;
+ int tem;
+ rtx temp;
+ /* The SET from an insn, if it is the only SET in the insn. */
+ rtx set, set1;
+ /* Chain describing insns movable in current loop. */
+ struct movable *movables = 0;
+ /* Last element in `movables' -- so we can add elements at the end. */
+ struct movable *last_movable = 0;
+ /* Ratio of extra register life span we can justify
+ for saving an instruction. More if loop doesn't call subroutines
+ since in that case saving an insn makes more difference
+ and more registers are available. */
+ int threshold;
+ /* If we have calls, contains the insn in which a register was used
+ if it was used exactly once; contains const0_rtx if it was used more
+ than once. */
+ rtx *reg_single_usage = 0;
+ /* Nonzero if we are scanning instructions in a sub-loop. */
+ int loop_depth = 0;
+
+ n_times_set = (short *) alloca (nregs * sizeof (short));
+ n_times_used = (short *) alloca (nregs * sizeof (short));
+ may_not_optimize = (char *) alloca (nregs);
+
+ /* Determine whether this loop starts with a jump down to a test at
+ the end. This will occur for a small number of loops with a test
+ that is too complex to duplicate in front of the loop.
+
+ We search for the first insn or label in the loop, skipping NOTEs.
+ However, we must be careful not to skip past a NOTE_INSN_LOOP_BEG
+ (because we might have a loop executed only once that contains a
+ loop which starts with a jump to its exit test) or a NOTE_INSN_LOOP_END
+ (in case we have a degenerate loop).
+
+ Note that if we mistakenly think that a loop is entered at the top
+ when, in fact, it is entered at the exit test, the only effect will be
+ slightly poorer optimization. Making the opposite error can generate
+ incorrect code. Since very few loops now start with a jump to the
+ exit test, the code here to detect that case is very conservative. */
+
+ for (p = NEXT_INSN (loop_start);
+ p != end
+ && GET_CODE (p) != CODE_LABEL && GET_RTX_CLASS (GET_CODE (p)) != 'i'
+ && (GET_CODE (p) != NOTE
+ || (NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_BEG
+ && NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_END));
+ p = NEXT_INSN (p))
+ ;
+
+ scan_start = p;
+
+ /* Set up variables describing this loop. */
+ prescan_loop (loop_start, end);
+ threshold = (loop_has_call ? 1 : 2) * (1 + n_non_fixed_regs);
+
+ /* If loop has a jump before the first label,
+ the true entry is the target of that jump.
+ Start scan from there.
+ But record in LOOP_TOP the place where the end-test jumps
+ back to so we can scan that after the end of the loop. */
+ if (GET_CODE (p) == JUMP_INSN)
+ {
+ loop_entry_jump = p;
+
+ /* Loop entry must be unconditional jump (and not a RETURN) */
+ if (simplejump_p (p)
+ && JUMP_LABEL (p) != 0
+ /* Check to see whether the jump actually
+ jumps out of the loop (meaning it's no loop).
+ This case can happen for things like
+ do {..} while (0). If this label was generated previously
+ by loop, we can't tell anything about it and have to reject
+ the loop. */
+ && INSN_UID (JUMP_LABEL (p)) < max_uid_for_loop
+ && INSN_LUID (JUMP_LABEL (p)) >= INSN_LUID (loop_start)
+ && INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (end))
+ {
+ loop_top = next_label (scan_start);
+ scan_start = JUMP_LABEL (p);
+ }
+ }
+
+ /* If SCAN_START was an insn created by loop, we don't know its luid
+ as required by loop_reg_used_before_p. So skip such loops. (This
+ test may never be true, but it's best to play it safe.)
+
+ Also, skip loops where we do not start scanning at a label. This
+ test also rejects loops starting with a JUMP_INSN that failed the
+ test above. */
+
+ if (INSN_UID (scan_start) >= max_uid_for_loop
+ || GET_CODE (scan_start) != CODE_LABEL)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, "\nLoop from %d to %d is phony.\n\n",
+ INSN_UID (loop_start), INSN_UID (end));
+ return;
+ }
+
+ /* Count number of times each reg is set during this loop.
+ Set may_not_optimize[I] if it is not safe to move out
+ the setting of register I. If this loop has calls, set
+ reg_single_usage[I]. */
+
+ bzero ((char *) n_times_set, nregs * sizeof (short));
+ bzero (may_not_optimize, nregs);
+
+ if (loop_has_call)
+ {
+ reg_single_usage = (rtx *) alloca (nregs * sizeof (rtx));
+ bzero ((char *) reg_single_usage, nregs * sizeof (rtx));
+ }
+
+ count_loop_regs_set (loop_top ? loop_top : loop_start, end,
+ may_not_optimize, reg_single_usage, &insn_count, nregs);
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ may_not_optimize[i] = 1, n_times_set[i] = 1;
+ bcopy ((char *) n_times_set, (char *) n_times_used, nregs * sizeof (short));
+
+ if (loop_dump_stream)
+ {
+ fprintf (loop_dump_stream, "\nLoop from %d to %d: %d real insns.\n",
+ INSN_UID (loop_start), INSN_UID (end), insn_count);
+ if (loop_continue)
+ fprintf (loop_dump_stream, "Continue at insn %d.\n",
+ INSN_UID (loop_continue));
+ }
+
+ /* Scan through the loop finding insns that are safe to move.
+ Set n_times_set negative for the reg being set, so that
+ this reg will be considered invariant for subsequent insns.
+ We consider whether subsequent insns use the reg
+ in deciding whether it is worth actually moving.
+
+ MAYBE_NEVER is nonzero if we have passed a conditional jump insn
+ and therefore it is possible that the insns we are scanning
+ would never be executed. At such times, we must make sure
+ that it is safe to execute the insn once instead of zero times.
+ When MAYBE_NEVER is 0, all insns will be executed at least once
+ so that is not a problem. */
+
+ p = scan_start;
+ while (1)
+ {
+ p = NEXT_INSN (p);
+ /* At end of a straight-in loop, we are done.
+ At end of a loop entered at the bottom, scan the top. */
+ if (p == scan_start)
+ break;
+ if (p == end)
+ {
+ if (loop_top != 0)
+ p = loop_top;
+ else
+ break;
+ if (p == scan_start)
+ break;
+ }
+
+ if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
+ && find_reg_note (p, REG_LIBCALL, NULL_RTX))
+ in_libcall = 1;
+ else if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
+ && find_reg_note (p, REG_RETVAL, NULL_RTX))
+ in_libcall = 0;
+
+ if (GET_CODE (p) == INSN
+ && (set = single_set (p))
+ && GET_CODE (SET_DEST (set)) == REG
+ && ! may_not_optimize[REGNO (SET_DEST (set))])
+ {
+ int tem1 = 0;
+ int tem2 = 0;
+ int move_insn = 0;
+ rtx src = SET_SRC (set);
+ rtx dependencies = 0;
+
+ /* Figure out what to use as a source of this insn. If a REG_EQUIV
+ note is given or if a REG_EQUAL note with a constant operand is
+ specified, use it as the source and mark that we should move
+ this insn by calling emit_move_insn rather that duplicating the
+ insn.
+
+ Otherwise, only use the REG_EQUAL contents if a REG_RETVAL note
+ is present. */
+ temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
+ if (temp)
+ src = XEXP (temp, 0), move_insn = 1;
+ else
+ {
+ temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
+ if (temp && CONSTANT_P (XEXP (temp, 0)))
+ src = XEXP (temp, 0), move_insn = 1;
+ if (temp && find_reg_note (p, REG_RETVAL, NULL_RTX))
+ {
+ src = XEXP (temp, 0);
+ /* A libcall block can use regs that don't appear in
+ the equivalent expression. To move the libcall,
+ we must move those regs too. */
+ dependencies = libcall_other_reg (p, src);
+ }
+ }
+
+ /* Don't try to optimize a register that was made
+ by loop-optimization for an inner loop.
+ We don't know its life-span, so we can't compute the benefit. */
+ if (REGNO (SET_DEST (set)) >= max_reg_before_loop)
+ ;
+ /* In order to move a register, we need to have one of three cases:
+ (1) it is used only in the same basic block as the set
+ (2) it is not a user variable and it is not used in the
+ exit test (this can cause the variable to be used
+ before it is set just like a user-variable).
+ (3) the set is guaranteed to be executed once the loop starts,
+ and the reg is not used until after that. */
+ else if (! ((! maybe_never
+ && ! loop_reg_used_before_p (set, p, loop_start,
+ scan_start, end))
+ || (! REG_USERVAR_P (SET_DEST (set))
+ && ! REG_LOOP_TEST_P (SET_DEST (set)))
+ || reg_in_basic_block_p (p, SET_DEST (set))))
+ ;
+ else if ((tem = invariant_p (src))
+ && (dependencies == 0
+ || (tem2 = invariant_p (dependencies)) != 0)
+ && (n_times_set[REGNO (SET_DEST (set))] == 1
+ || (tem1
+ = consec_sets_invariant_p (SET_DEST (set),
+ n_times_set[REGNO (SET_DEST (set))],
+ p)))
+ /* If the insn can cause a trap (such as divide by zero),
+ can't move it unless it's guaranteed to be executed
+ once loop is entered. Even a function call might
+ prevent the trap insn from being reached
+ (since it might exit!) */
+ && ! ((maybe_never || call_passed)
+ && may_trap_p (src)))
+ {
+ register struct movable *m;
+ register int regno = REGNO (SET_DEST (set));
+
+ /* A potential lossage is where we have a case where two insns
+ can be combined as long as they are both in the loop, but
+ we move one of them outside the loop. For large loops,
+ this can lose. The most common case of this is the address
+ of a function being called.
+
+ Therefore, if this register is marked as being used exactly
+ once if we are in a loop with calls (a "large loop"), see if
+ we can replace the usage of this register with the source
+ of this SET. If we can, delete this insn.
+
+ Don't do this if P has a REG_RETVAL note or if we have
+ SMALL_REGISTER_CLASSES and SET_SRC is a hard register. */
+
+ if (reg_single_usage && reg_single_usage[regno] != 0
+ && reg_single_usage[regno] != const0_rtx
+ && regno_first_uid[regno] == INSN_UID (p)
+ && (regno_last_uid[regno]
+ == INSN_UID (reg_single_usage[regno]))
+ && n_times_set[REGNO (SET_DEST (set))] == 1
+ && ! side_effects_p (SET_SRC (set))
+ && ! find_reg_note (p, REG_RETVAL, NULL_RTX)
+#ifdef SMALL_REGISTER_CLASSES
+ && ! (GET_CODE (SET_SRC (set)) == REG
+ && REGNO (SET_SRC (set)) < FIRST_PSEUDO_REGISTER)
+#endif
+ /* This test is not redundant; SET_SRC (set) might be
+ a call-clobbered register and the life of REGNO
+ might span a call. */
+ && ! modified_between_p (SET_SRC (set), p,
+ reg_single_usage[regno])
+ && no_labels_between_p (p, reg_single_usage[regno])
+ && validate_replace_rtx (SET_DEST (set), SET_SRC (set),
+ reg_single_usage[regno]))
+ {
+ /* Replace any usage in a REG_EQUAL note. */
+ REG_NOTES (reg_single_usage[regno])
+ = replace_rtx (REG_NOTES (reg_single_usage[regno]),
+ SET_DEST (set), SET_SRC (set));
+
+ PUT_CODE (p, NOTE);
+ NOTE_LINE_NUMBER (p) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (p) = 0;
+ n_times_set[regno] = 0;
+ continue;
+ }
+
+ m = (struct movable *) alloca (sizeof (struct movable));
+ m->next = 0;
+ m->insn = p;
+ m->set_src = src;
+ m->dependencies = dependencies;
+ m->set_dest = SET_DEST (set);
+ m->force = 0;
+ m->consec = n_times_set[REGNO (SET_DEST (set))] - 1;
+ m->done = 0;
+ m->forces = 0;
+ m->partial = 0;
+ m->move_insn = move_insn;
+ m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
+ m->savemode = VOIDmode;
+ m->regno = regno;
+ /* Set M->cond if either invariant_p or consec_sets_invariant_p
+ returned 2 (only conditionally invariant). */
+ m->cond = ((tem | tem1 | tem2) > 1);
+ m->global = (uid_luid[regno_last_uid[regno]] > INSN_LUID (end)
+ || uid_luid[regno_first_uid[regno]] < INSN_LUID (loop_start));
+ m->match = 0;
+ m->lifetime = (uid_luid[regno_last_uid[regno]]
+ - uid_luid[regno_first_uid[regno]]);
+ m->savings = n_times_used[regno];
+ if (find_reg_note (p, REG_RETVAL, NULL_RTX))
+ m->savings += libcall_benefit (p);
+ n_times_set[regno] = move_insn ? -2 : -1;
+ /* Add M to the end of the chain MOVABLES. */
+ if (movables == 0)
+ movables = m;
+ else
+ last_movable->next = m;
+ last_movable = m;
+
+ if (m->consec > 0)
+ {
+ /* Skip this insn, not checking REG_LIBCALL notes. */
+ p = next_nonnote_insn (p);
+ /* Skip the consecutive insns, if there are any. */
+ p = skip_consec_insns (p, m->consec);
+ /* Back up to the last insn of the consecutive group. */
+ p = prev_nonnote_insn (p);
+
+ /* We must now reset m->move_insn, m->is_equiv, and possibly
+ m->set_src to correspond to the effects of all the
+ insns. */
+ temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
+ if (temp)
+ m->set_src = XEXP (temp, 0), m->move_insn = 1;
+ else
+ {
+ temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
+ if (temp && CONSTANT_P (XEXP (temp, 0)))
+ m->set_src = XEXP (temp, 0), m->move_insn = 1;
+ else
+ m->move_insn = 0;
+
+ }
+ m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
+ }
+ }
+ /* If this register is always set within a STRICT_LOW_PART
+ or set to zero, then its high bytes are constant.
+ So clear them outside the loop and within the loop
+ just load the low bytes.
+ We must check that the machine has an instruction to do so.
+ Also, if the value loaded into the register
+ depends on the same register, this cannot be done. */
+ else if (SET_SRC (set) == const0_rtx
+ && GET_CODE (NEXT_INSN (p)) == INSN
+ && (set1 = single_set (NEXT_INSN (p)))
+ && GET_CODE (set1) == SET
+ && (GET_CODE (SET_DEST (set1)) == STRICT_LOW_PART)
+ && (GET_CODE (XEXP (SET_DEST (set1), 0)) == SUBREG)
+ && (SUBREG_REG (XEXP (SET_DEST (set1), 0))
+ == SET_DEST (set))
+ && !reg_mentioned_p (SET_DEST (set), SET_SRC (set1)))
+ {
+ register int regno = REGNO (SET_DEST (set));
+ if (n_times_set[regno] == 2)
+ {
+ register struct movable *m;
+ m = (struct movable *) alloca (sizeof (struct movable));
+ m->next = 0;
+ m->insn = p;
+ m->set_dest = SET_DEST (set);
+ m->dependencies = 0;
+ m->force = 0;
+ m->consec = 0;
+ m->done = 0;
+ m->forces = 0;
+ m->move_insn = 0;
+ m->partial = 1;
+ /* If the insn may not be executed on some cycles,
+ we can't clear the whole reg; clear just high part.
+ Not even if the reg is used only within this loop.
+ Consider this:
+ while (1)
+ while (s != t) {
+ if (foo ()) x = *s;
+ use (x);
+ }
+ Clearing x before the inner loop could clobber a value
+ being saved from the last time around the outer loop.
+ However, if the reg is not used outside this loop
+ and all uses of the register are in the same
+ basic block as the store, there is no problem.
+
+ If this insn was made by loop, we don't know its
+ INSN_LUID and hence must make a conservative
+ assumption. */
+ m->global = (INSN_UID (p) >= max_uid_for_loop
+ || (uid_luid[regno_last_uid[regno]]
+ > INSN_LUID (end))
+ || (uid_luid[regno_first_uid[regno]]
+ < INSN_LUID (p))
+ || (labels_in_range_p
+ (p, uid_luid[regno_first_uid[regno]])));
+ if (maybe_never && m->global)
+ m->savemode = GET_MODE (SET_SRC (set1));
+ else
+ m->savemode = VOIDmode;
+ m->regno = regno;
+ m->cond = 0;
+ m->match = 0;
+ m->lifetime = (uid_luid[regno_last_uid[regno]]
+ - uid_luid[regno_first_uid[regno]]);
+ m->savings = 1;
+ n_times_set[regno] = -1;
+ /* Add M to the end of the chain MOVABLES. */
+ if (movables == 0)
+ movables = m;
+ else
+ last_movable->next = m;
+ last_movable = m;
+ }
+ }
+ }
+ /* Past a call insn, we get to insns which might not be executed
+ because the call might exit. This matters for insns that trap.
+ Call insns inside a REG_LIBCALL/REG_RETVAL block always return,
+ so they don't count. */
+ else if (GET_CODE (p) == CALL_INSN && ! in_libcall)
+ call_passed = 1;
+ /* Past a label or a jump, we get to insns for which we
+ can't count on whether or how many times they will be
+ executed during each iteration. Therefore, we can
+ only move out sets of trivial variables
+ (those not used after the loop). */
+ /* This code appears in three places, once in scan_loop, and twice
+ in strength_reduce. */
+ else if ((GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN)
+ /* If we enter the loop in the middle, and scan around to the
+ beginning, don't set maybe_never for that. This must be an
+ unconditional jump, otherwise the code at the top of the
+ loop might never be executed. Unconditional jumps are
+ followed a by barrier then loop end. */
+ && ! (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == loop_top
+ && NEXT_INSN (NEXT_INSN (p)) == end
+ && simplejump_p (p)))
+ maybe_never = 1;
+ else if (GET_CODE (p) == NOTE)
+ {
+ /* At the virtual top of a converted loop, insns are again known to
+ be executed: logically, the loop begins here even though the exit
+ code has been duplicated. */
+ if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
+ maybe_never = call_passed = 0;
+ else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
+ loop_depth++;
+ else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
+ loop_depth--;
+ }
+ }
+
+ /* If one movable subsumes another, ignore that other. */
+
+ ignore_some_movables (movables);
+
+ /* For each movable insn, see if the reg that it loads
+ leads when it dies right into another conditionally movable insn.
+ If so, record that the second insn "forces" the first one,
+ since the second can be moved only if the first is. */
+
+ force_movables (movables);
+
+ /* See if there are multiple movable insns that load the same value.
+ If there are, make all but the first point at the first one
+ through the `match' field, and add the priorities of them
+ all together as the priority of the first. */
+
+ combine_movables (movables, nregs);
+
+ /* Now consider each movable insn to decide whether it is worth moving.
+ Store 0 in n_times_set for each reg that is moved. */
+
+ move_movables (movables, threshold,
+ insn_count, loop_start, end, nregs);
+
+ /* Now candidates that still are negative are those not moved.
+ Change n_times_set to indicate that those are not actually invariant. */
+ for (i = 0; i < nregs; i++)
+ if (n_times_set[i] < 0)
+ n_times_set[i] = n_times_used[i];
+
+ if (flag_strength_reduce)
+ strength_reduce (scan_start, end, loop_top,
+ insn_count, loop_start, end);
+}
+
+/* Add elements to *OUTPUT to record all the pseudo-regs
+ mentioned in IN_THIS but not mentioned in NOT_IN_THIS. */
+
+void
+record_excess_regs (in_this, not_in_this, output)
+ rtx in_this, not_in_this;
+ rtx *output;
+{
+ enum rtx_code code;
+ char *fmt;
+ int i;
+
+ code = GET_CODE (in_this);
+
+ switch (code)
+ {
+ case PC:
+ case CC0:
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case CONST:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ return;
+
+ case REG:
+ if (REGNO (in_this) >= FIRST_PSEUDO_REGISTER
+ && ! reg_mentioned_p (in_this, not_in_this))
+ *output = gen_rtx (EXPR_LIST, VOIDmode, in_this, *output);
+ return;
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ int j;
+
+ switch (fmt[i])
+ {
+ case 'E':
+ for (j = 0; j < XVECLEN (in_this, i); j++)
+ record_excess_regs (XVECEXP (in_this, i, j), not_in_this, output);
+ break;
+
+ case 'e':
+ record_excess_regs (XEXP (in_this, i), not_in_this, output);
+ break;
+ }
+ }
+}
+
+/* Check what regs are referred to in the libcall block ending with INSN,
+ aside from those mentioned in the equivalent value.
+ If there are none, return 0.
+ If there are one or more, return an EXPR_LIST containing all of them. */
+
+static rtx
+libcall_other_reg (insn, equiv)
+ rtx insn, equiv;
+{
+ rtx note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
+ rtx p = XEXP (note, 0);
+ rtx output = 0;
+
+ /* First, find all the regs used in the libcall block
+ that are not mentioned as inputs to the result. */
+
+ while (p != insn)
+ {
+ if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
+ || GET_CODE (p) == CALL_INSN)
+ record_excess_regs (PATTERN (p), equiv, &output);
+ p = NEXT_INSN (p);
+ }
+
+ return output;
+}
+
+/* Return 1 if all uses of REG
+ are between INSN and the end of the basic block. */
+
+static int
+reg_in_basic_block_p (insn, reg)
+ rtx insn, reg;
+{
+ int regno = REGNO (reg);
+ rtx p;
+
+ if (regno_first_uid[regno] != INSN_UID (insn))
+ return 0;
+
+ /* Search this basic block for the already recorded last use of the reg. */
+ for (p = insn; p; p = NEXT_INSN (p))
+ {
+ switch (GET_CODE (p))
+ {
+ case NOTE:
+ break;
+
+ case INSN:
+ case CALL_INSN:
+ /* Ordinary insn: if this is the last use, we win. */
+ if (regno_last_uid[regno] == INSN_UID (p))
+ return 1;
+ break;
+
+ case JUMP_INSN:
+ /* Jump insn: if this is the last use, we win. */
+ if (regno_last_uid[regno] == INSN_UID (p))
+ return 1;
+ /* Otherwise, it's the end of the basic block, so we lose. */
+ return 0;
+
+ case CODE_LABEL:
+ case BARRIER:
+ /* It's the end of the basic block, so we lose. */
+ return 0;
+ }
+ }
+
+ /* The "last use" doesn't follow the "first use"?? */
+ abort ();
+}
+
+/* Compute the benefit of eliminating the insns in the block whose
+ last insn is LAST. This may be a group of insns used to compute a
+ value directly or can contain a library call. */
+
+static int
+libcall_benefit (last)
+ rtx last;
+{
+ rtx insn;
+ int benefit = 0;
+
+ for (insn = XEXP (find_reg_note (last, REG_RETVAL, NULL_RTX), 0);
+ insn != last; insn = NEXT_INSN (insn))
+ {
+ if (GET_CODE (insn) == CALL_INSN)
+ benefit += 10; /* Assume at least this many insns in a library
+ routine. */
+ else if (GET_CODE (insn) == INSN
+ && GET_CODE (PATTERN (insn)) != USE
+ && GET_CODE (PATTERN (insn)) != CLOBBER)
+ benefit++;
+ }
+
+ return benefit;
+}
+
+/* Skip COUNT insns from INSN, counting library calls as 1 insn. */
+
+static rtx
+skip_consec_insns (insn, count)
+ rtx insn;
+ int count;
+{
+ for (; count > 0; count--)
+ {
+ rtx temp;
+
+ /* If first insn of libcall sequence, skip to end. */
+ /* Do this at start of loop, since INSN is guaranteed to
+ be an insn here. */
+ if (GET_CODE (insn) != NOTE
+ && (temp = find_reg_note (insn, REG_LIBCALL, NULL_RTX)))
+ insn = XEXP (temp, 0);
+
+ do insn = NEXT_INSN (insn);
+ while (GET_CODE (insn) == NOTE);
+ }
+
+ return insn;
+}
+
+/* Ignore any movable whose insn falls within a libcall
+ which is part of another movable.
+ We make use of the fact that the movable for the libcall value
+ was made later and so appears later on the chain. */
+
+static void
+ignore_some_movables (movables)
+ struct movable *movables;
+{
+ register struct movable *m, *m1;
+
+ for (m = movables; m; m = m->next)
+ {
+ /* Is this a movable for the value of a libcall? */
+ rtx note = find_reg_note (m->insn, REG_RETVAL, NULL_RTX);
+ if (note)
+ {
+ rtx insn;
+ /* Check for earlier movables inside that range,
+ and mark them invalid. We cannot use LUIDs here because
+ insns created by loop.c for prior loops don't have LUIDs.
+ Rather than reject all such insns from movables, we just
+ explicitly check each insn in the libcall (since invariant
+ libcalls aren't that common). */
+ for (insn = XEXP (note, 0); insn != m->insn; insn = NEXT_INSN (insn))
+ for (m1 = movables; m1 != m; m1 = m1->next)
+ if (m1->insn == insn)
+ m1->done = 1;
+ }
+ }
+}
+
+/* For each movable insn, see if the reg that it loads
+ leads when it dies right into another conditionally movable insn.
+ If so, record that the second insn "forces" the first one,
+ since the second can be moved only if the first is. */
+
+static void
+force_movables (movables)
+ struct movable *movables;
+{
+ register struct movable *m, *m1;
+ for (m1 = movables; m1; m1 = m1->next)
+ /* Omit this if moving just the (SET (REG) 0) of a zero-extend. */
+ if (!m1->partial && !m1->done)
+ {
+ int regno = m1->regno;
+ for (m = m1->next; m; m = m->next)
+ /* ??? Could this be a bug? What if CSE caused the
+ register of M1 to be used after this insn?
+ Since CSE does not update regno_last_uid,
+ this insn M->insn might not be where it dies.
+ But very likely this doesn't matter; what matters is
+ that M's reg is computed from M1's reg. */
+ if (INSN_UID (m->insn) == regno_last_uid[regno]
+ && !m->done)
+ break;
+ if (m != 0 && m->set_src == m1->set_dest
+ /* If m->consec, m->set_src isn't valid. */
+ && m->consec == 0)
+ m = 0;
+
+ /* Increase the priority of the moving the first insn
+ since it permits the second to be moved as well. */
+ if (m != 0)
+ {
+ m->forces = m1;
+ m1->lifetime += m->lifetime;
+ m1->savings += m1->savings;
+ }
+ }
+}
+
+/* Find invariant expressions that are equal and can be combined into
+ one register. */
+
+static void
+combine_movables (movables, nregs)
+ struct movable *movables;
+ int nregs;
+{
+ register struct movable *m;
+ char *matched_regs = (char *) alloca (nregs);
+ enum machine_mode mode;
+
+ /* Regs that are set more than once are not allowed to match
+ or be matched. I'm no longer sure why not. */
+ /* Perhaps testing m->consec_sets would be more appropriate here? */
+
+ for (m = movables; m; m = m->next)
+ if (m->match == 0 && n_times_used[m->regno] == 1 && !m->partial)
+ {
+ register struct movable *m1;
+ int regno = m->regno;
+
+ bzero (matched_regs, nregs);
+ matched_regs[regno] = 1;
+
+ for (m1 = movables; m1; m1 = m1->next)
+ if (m != m1 && m1->match == 0 && n_times_used[m1->regno] == 1
+ /* A reg used outside the loop mustn't be eliminated. */
+ && !m1->global
+ /* A reg used for zero-extending mustn't be eliminated. */
+ && !m1->partial
+ && (matched_regs[m1->regno]
+ ||
+ (
+ /* Can combine regs with different modes loaded from the
+ same constant only if the modes are the same or
+ if both are integer modes with M wider or the same
+ width as M1. The check for integer is redundant, but
+ safe, since the only case of differing destination
+ modes with equal sources is when both sources are
+ VOIDmode, i.e., CONST_INT. */
+ (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest)
+ || (GET_MODE_CLASS (GET_MODE (m->set_dest)) == MODE_INT
+ && GET_MODE_CLASS (GET_MODE (m1->set_dest)) == MODE_INT
+ && (GET_MODE_BITSIZE (GET_MODE (m->set_dest))
+ >= GET_MODE_BITSIZE (GET_MODE (m1->set_dest)))))
+ /* See if the source of M1 says it matches M. */
+ && ((GET_CODE (m1->set_src) == REG
+ && matched_regs[REGNO (m1->set_src)])
+ || rtx_equal_for_loop_p (m->set_src, m1->set_src,
+ movables))))
+ && ((m->dependencies == m1->dependencies)
+ || rtx_equal_p (m->dependencies, m1->dependencies)))
+ {
+ m->lifetime += m1->lifetime;
+ m->savings += m1->savings;
+ m1->done = 1;
+ m1->match = m;
+ matched_regs[m1->regno] = 1;
+ }
+ }
+
+ /* Now combine the regs used for zero-extension.
+ This can be done for those not marked `global'
+ provided their lives don't overlap. */
+
+ for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
+ mode = GET_MODE_WIDER_MODE (mode))
+ {
+ register struct movable *m0 = 0;
+
+ /* Combine all the registers for extension from mode MODE.
+ Don't combine any that are used outside this loop. */
+ for (m = movables; m; m = m->next)
+ if (m->partial && ! m->global
+ && mode == GET_MODE (SET_SRC (PATTERN (NEXT_INSN (m->insn)))))
+ {
+ register struct movable *m1;
+ int first = uid_luid[regno_first_uid[m->regno]];
+ int last = uid_luid[regno_last_uid[m->regno]];
+
+ if (m0 == 0)
+ {
+ /* First one: don't check for overlap, just record it. */
+ m0 = m;
+ continue;
+ }
+
+ /* Make sure they extend to the same mode.
+ (Almost always true.) */
+ if (GET_MODE (m->set_dest) != GET_MODE (m0->set_dest))
+ continue;
+
+ /* We already have one: check for overlap with those
+ already combined together. */
+ for (m1 = movables; m1 != m; m1 = m1->next)
+ if (m1 == m0 || (m1->partial && m1->match == m0))
+ if (! (uid_luid[regno_first_uid[m1->regno]] > last
+ || uid_luid[regno_last_uid[m1->regno]] < first))
+ goto overlap;
+
+ /* No overlap: we can combine this with the others. */
+ m0->lifetime += m->lifetime;
+ m0->savings += m->savings;
+ m->done = 1;
+ m->match = m0;
+
+ overlap: ;
+ }
+ }
+}
+
+/* Return 1 if regs X and Y will become the same if moved. */
+
+static int
+regs_match_p (x, y, movables)
+ rtx x, y;
+ struct movable *movables;
+{
+ int xn = REGNO (x);
+ int yn = REGNO (y);
+ struct movable *mx, *my;
+
+ for (mx = movables; mx; mx = mx->next)
+ if (mx->regno == xn)
+ break;
+
+ for (my = movables; my; my = my->next)
+ if (my->regno == yn)
+ break;
+
+ return (mx && my
+ && ((mx->match == my->match && mx->match != 0)
+ || mx->match == my
+ || mx == my->match));
+}
+
+/* Return 1 if X and Y are identical-looking rtx's.
+ This is the Lisp function EQUAL for rtx arguments.
+
+ If two registers are matching movables or a movable register and an
+ equivalent constant, consider them equal. */
+
+static int
+rtx_equal_for_loop_p (x, y, movables)
+ rtx x, y;
+ struct movable *movables;
+{
+ register int i;
+ register int j;
+ register struct movable *m;
+ register enum rtx_code code;
+ register char *fmt;
+
+ if (x == y)
+ return 1;
+ if (x == 0 || y == 0)
+ return 0;
+
+ code = GET_CODE (x);
+
+ /* If we have a register and a constant, they may sometimes be
+ equal. */
+ if (GET_CODE (x) == REG && n_times_set[REGNO (x)] == -2
+ && CONSTANT_P (y))
+ for (m = movables; m; m = m->next)
+ if (m->move_insn && m->regno == REGNO (x)
+ && rtx_equal_p (m->set_src, y))
+ return 1;
+
+ else if (GET_CODE (y) == REG && n_times_set[REGNO (y)] == -2
+ && CONSTANT_P (x))
+ for (m = movables; m; m = m->next)
+ if (m->move_insn && m->regno == REGNO (y)
+ && rtx_equal_p (m->set_src, x))
+ return 1;
+
+ /* Otherwise, rtx's of different codes cannot be equal. */
+ if (code != GET_CODE (y))
+ return 0;
+
+ /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
+ (REG:SI x) and (REG:HI x) are NOT equivalent. */
+
+ if (GET_MODE (x) != GET_MODE (y))
+ return 0;
+
+ /* These three types of rtx's can be compared nonrecursively. */
+ if (code == REG)
+ return (REGNO (x) == REGNO (y) || regs_match_p (x, y, movables));
+
+ if (code == LABEL_REF)
+ return XEXP (x, 0) == XEXP (y, 0);
+ if (code == SYMBOL_REF)
+ return XSTR (x, 0) == XSTR (y, 0);
+
+ /* Compare the elements. If any pair of corresponding elements
+ fail to match, return 0 for the whole things. */
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ switch (fmt[i])
+ {
+ case 'w':
+ if (XWINT (x, i) != XWINT (y, i))
+ return 0;
+ break;
+
+ case 'i':
+ if (XINT (x, i) != XINT (y, i))
+ return 0;
+ break;
+
+ case 'E':
+ /* Two vectors must have the same length. */
+ if (XVECLEN (x, i) != XVECLEN (y, i))
+ return 0;
+
+ /* And the corresponding elements must match. */
+ for (j = 0; j < XVECLEN (x, i); j++)
+ if (rtx_equal_for_loop_p (XVECEXP (x, i, j), XVECEXP (y, i, j), movables) == 0)
+ return 0;
+ break;
+
+ case 'e':
+ if (rtx_equal_for_loop_p (XEXP (x, i), XEXP (y, i), movables) == 0)
+ return 0;
+ break;
+
+ case 's':
+ if (strcmp (XSTR (x, i), XSTR (y, i)))
+ return 0;
+ break;
+
+ case 'u':
+ /* These are just backpointers, so they don't matter. */
+ break;
+
+ case '0':
+ break;
+
+ /* It is believed that rtx's at this level will never
+ contain anything but integers and other rtx's,
+ except for within LABEL_REFs and SYMBOL_REFs. */
+ default:
+ abort ();
+ }
+ }
+ return 1;
+}
+
+/* If X contains any LABEL_REF's, add REG_LABEL notes for them to all
+ insns in INSNS which use thet reference. */
+
+static void
+add_label_notes (x, insns)
+ rtx x;
+ rtx insns;
+{
+ enum rtx_code code = GET_CODE (x);
+ int i, j;
+ char *fmt;
+ rtx insn;
+
+ if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
+ {
+ rtx next = next_real_insn (XEXP (x, 0));
+
+ /* Don't record labels that refer to dispatch tables.
+ This is not necessary, since the tablejump references the same label.
+ And if we did record them, flow.c would make worse code. */
+ if (next == 0
+ || ! (GET_CODE (next) == JUMP_INSN
+ && (GET_CODE (PATTERN (next)) == ADDR_VEC
+ || GET_CODE (PATTERN (next)) == ADDR_DIFF_VEC)))
+ {
+ for (insn = insns; insn; insn = NEXT_INSN (insn))
+ if (reg_mentioned_p (XEXP (x, 0), insn))
+ REG_NOTES (insn) = gen_rtx (EXPR_LIST, REG_LABEL, XEXP (x, 0),
+ REG_NOTES (insn));
+ }
+ return;
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ add_label_notes (XEXP (x, i), insns);
+ else if (fmt[i] == 'E')
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ add_label_notes (XVECEXP (x, i, j), insns);
+ }
+}
+
+/* Scan MOVABLES, and move the insns that deserve to be moved.
+ If two matching movables are combined, replace one reg with the
+ other throughout. */
+
+static void
+move_movables (movables, threshold, insn_count, loop_start, end, nregs)
+ struct movable *movables;
+ int threshold;
+ int insn_count;
+ rtx loop_start;
+ rtx end;
+ int nregs;
+{
+ rtx new_start = 0;
+ register struct movable *m;
+ register rtx p;
+ /* Map of pseudo-register replacements to handle combining
+ when we move several insns that load the same value
+ into different pseudo-registers. */
+ rtx *reg_map = (rtx *) alloca (nregs * sizeof (rtx));
+ char *already_moved = (char *) alloca (nregs);
+
+ bzero (already_moved, nregs);
+ bzero ((char *) reg_map, nregs * sizeof (rtx));
+
+ num_movables = 0;
+
+ for (m = movables; m; m = m->next)
+ {
+ /* Describe this movable insn. */
+
+ if (loop_dump_stream)
+ {
+ fprintf (loop_dump_stream, "Insn %d: regno %d (life %d), ",
+ INSN_UID (m->insn), m->regno, m->lifetime);
+ if (m->consec > 0)
+ fprintf (loop_dump_stream, "consec %d, ", m->consec);
+ if (m->cond)
+ fprintf (loop_dump_stream, "cond ");
+ if (m->force)
+ fprintf (loop_dump_stream, "force ");
+ if (m->global)
+ fprintf (loop_dump_stream, "global ");
+ if (m->done)
+ fprintf (loop_dump_stream, "done ");
+ if (m->move_insn)
+ fprintf (loop_dump_stream, "move-insn ");
+ if (m->match)
+ fprintf (loop_dump_stream, "matches %d ",
+ INSN_UID (m->match->insn));
+ if (m->forces)
+ fprintf (loop_dump_stream, "forces %d ",
+ INSN_UID (m->forces->insn));
+ }
+
+ /* Count movables. Value used in heuristics in strength_reduce. */
+ num_movables++;
+
+ /* Ignore the insn if it's already done (it matched something else).
+ Otherwise, see if it is now safe to move. */
+
+ if (!m->done
+ && (! m->cond
+ || (1 == invariant_p (m->set_src)
+ && (m->dependencies == 0
+ || 1 == invariant_p (m->dependencies))
+ && (m->consec == 0
+ || 1 == consec_sets_invariant_p (m->set_dest,
+ m->consec + 1,
+ m->insn))))
+ && (! m->forces || m->forces->done))
+ {
+ register int regno;
+ register rtx p;
+ int savings = m->savings;
+
+ /* We have an insn that is safe to move.
+ Compute its desirability. */
+
+ p = m->insn;
+ regno = m->regno;
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, "savings %d ", savings);
+
+ if (moved_once[regno])
+ {
+ insn_count *= 2;
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, "halved since already moved ");
+ }
+
+ /* An insn MUST be moved if we already moved something else
+ which is safe only if this one is moved too: that is,
+ if already_moved[REGNO] is nonzero. */
+
+ /* An insn is desirable to move if the new lifetime of the
+ register is no more than THRESHOLD times the old lifetime.
+ If it's not desirable, it means the loop is so big
+ that moving won't speed things up much,
+ and it is liable to make register usage worse. */
+
+ /* It is also desirable to move if it can be moved at no
+ extra cost because something else was already moved. */
+
+ if (already_moved[regno]
+ || (threshold * savings * m->lifetime) >= insn_count
+ || (m->forces && m->forces->done
+ && n_times_used[m->forces->regno] == 1))
+ {
+ int count;
+ register struct movable *m1;
+ rtx first;
+
+ /* Now move the insns that set the reg. */
+
+ if (m->partial && m->match)
+ {
+ rtx newpat, i1;
+ rtx r1, r2;
+ /* Find the end of this chain of matching regs.
+ Thus, we load each reg in the chain from that one reg.
+ And that reg is loaded with 0 directly,
+ since it has ->match == 0. */
+ for (m1 = m; m1->match; m1 = m1->match);
+ newpat = gen_move_insn (SET_DEST (PATTERN (m->insn)),
+ SET_DEST (PATTERN (m1->insn)));
+ i1 = emit_insn_before (newpat, loop_start);
+
+ /* Mark the moved, invariant reg as being allowed to
+ share a hard reg with the other matching invariant. */
+ REG_NOTES (i1) = REG_NOTES (m->insn);
+ r1 = SET_DEST (PATTERN (m->insn));
+ r2 = SET_DEST (PATTERN (m1->insn));
+ regs_may_share = gen_rtx (EXPR_LIST, VOIDmode, r1,
+ gen_rtx (EXPR_LIST, VOIDmode, r2,
+ regs_may_share));
+ delete_insn (m->insn);
+
+ if (new_start == 0)
+ new_start = i1;
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
+ }
+ /* If we are to re-generate the item being moved with a
+ new move insn, first delete what we have and then emit
+ the move insn before the loop. */
+ else if (m->move_insn)
+ {
+ rtx i1, temp;
+
+ for (count = m->consec; count >= 0; count--)
+ {
+ /* If this is the first insn of a library call sequence,
+ skip to the end. */
+ if (GET_CODE (p) != NOTE
+ && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
+ p = XEXP (temp, 0);
+
+ /* If this is the last insn of a libcall sequence, then
+ delete every insn in the sequence except the last.
+ The last insn is handled in the normal manner. */
+ if (GET_CODE (p) != NOTE
+ && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
+ {
+ temp = XEXP (temp, 0);
+ while (temp != p)
+ temp = delete_insn (temp);
+ }
+
+ p = delete_insn (p);
+ }
+
+ start_sequence ();
+ emit_move_insn (m->set_dest, m->set_src);
+ temp = get_insns ();
+ end_sequence ();
+
+ add_label_notes (m->set_src, temp);
+
+ i1 = emit_insns_before (temp, loop_start);
+ if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
+ REG_NOTES (i1)
+ = gen_rtx (EXPR_LIST,
+ m->is_equiv ? REG_EQUIV : REG_EQUAL,
+ m->set_src, REG_NOTES (i1));
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
+
+ /* The more regs we move, the less we like moving them. */
+ threshold -= 3;
+ }
+ else
+ {
+ for (count = m->consec; count >= 0; count--)
+ {
+ rtx i1, temp;
+
+ /* If first insn of libcall sequence, skip to end. */
+ /* Do this at start of loop, since p is guaranteed to
+ be an insn here. */
+ if (GET_CODE (p) != NOTE
+ && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
+ p = XEXP (temp, 0);
+
+ /* If last insn of libcall sequence, move all
+ insns except the last before the loop. The last
+ insn is handled in the normal manner. */
+ if (GET_CODE (p) != NOTE
+ && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
+ {
+ rtx fn_address = 0;
+ rtx fn_reg = 0;
+ rtx fn_address_insn = 0;
+
+ first = 0;
+ for (temp = XEXP (temp, 0); temp != p;
+ temp = NEXT_INSN (temp))
+ {
+ rtx body;
+ rtx n;
+ rtx next;
+
+ if (GET_CODE (temp) == NOTE)
+ continue;
+
+ body = PATTERN (temp);
+
+ /* Find the next insn after TEMP,
+ not counting USE or NOTE insns. */
+ for (next = NEXT_INSN (temp); next != p;
+ next = NEXT_INSN (next))
+ if (! (GET_CODE (next) == INSN
+ && GET_CODE (PATTERN (next)) == USE)
+ && GET_CODE (next) != NOTE)
+ break;
+
+ /* If that is the call, this may be the insn
+ that loads the function address.
+
+ Extract the function address from the insn
+ that loads it into a register.
+ If this insn was cse'd, we get incorrect code.
+
+ So emit a new move insn that copies the
+ function address into the register that the
+ call insn will use. flow.c will delete any
+ redundant stores that we have created. */
+ if (GET_CODE (next) == CALL_INSN
+ && GET_CODE (body) == SET
+ && GET_CODE (SET_DEST (body)) == REG
+ && (n = find_reg_note (temp, REG_EQUAL,
+ NULL_RTX)))
+ {
+ fn_reg = SET_SRC (body);
+ if (GET_CODE (fn_reg) != REG)
+ fn_reg = SET_DEST (body);
+ fn_address = XEXP (n, 0);
+ fn_address_insn = temp;
+ }
+ /* We have the call insn.
+ If it uses the register we suspect it might,
+ load it with the correct address directly. */
+ if (GET_CODE (temp) == CALL_INSN
+ && fn_address != 0
+ && reg_referenced_p (fn_reg, body))
+ emit_insn_after (gen_move_insn (fn_reg,
+ fn_address),
+ fn_address_insn);
+
+ if (GET_CODE (temp) == CALL_INSN)
+ {
+ i1 = emit_call_insn_before (body, loop_start);
+ /* Because the USAGE information potentially
+ contains objects other than hard registers
+ we need to copy it. */
+ CALL_INSN_FUNCTION_USAGE (i1) =
+ copy_rtx (CALL_INSN_FUNCTION_USAGE (temp));
+ }
+ else
+ i1 = emit_insn_before (body, loop_start);
+ if (first == 0)
+ first = i1;
+ if (temp == fn_address_insn)
+ fn_address_insn = i1;
+ REG_NOTES (i1) = REG_NOTES (temp);
+ delete_insn (temp);
+ }
+ }
+ if (m->savemode != VOIDmode)
+ {
+ /* P sets REG to zero; but we should clear only
+ the bits that are not covered by the mode
+ m->savemode. */
+ rtx reg = m->set_dest;
+ rtx sequence;
+ rtx tem;
+
+ start_sequence ();
+ tem = expand_binop
+ (GET_MODE (reg), and_optab, reg,
+ GEN_INT ((((HOST_WIDE_INT) 1
+ << GET_MODE_BITSIZE (m->savemode)))
+ - 1),
+ reg, 1, OPTAB_LIB_WIDEN);
+ if (tem == 0)
+ abort ();
+ if (tem != reg)
+ emit_move_insn (reg, tem);
+ sequence = gen_sequence ();
+ end_sequence ();
+ i1 = emit_insn_before (sequence, loop_start);
+ }
+ else if (GET_CODE (p) == CALL_INSN)
+ {
+ i1 = emit_call_insn_before (PATTERN (p), loop_start);
+ /* Because the USAGE information potentially
+ contains objects other than hard registers
+ we need to copy it. */
+ CALL_INSN_FUNCTION_USAGE (i1) =
+ copy_rtx (CALL_INSN_FUNCTION_USAGE (p));
+ }
+ else
+ i1 = emit_insn_before (PATTERN (p), loop_start);
+
+ REG_NOTES (i1) = REG_NOTES (p);
+
+ /* If there is a REG_EQUAL note present whose value is
+ not loop invariant, then delete it, since it may
+ cause problems with later optimization passes.
+ It is possible for cse to create such notes
+ like this as a result of record_jump_cond. */
+
+ if ((temp = find_reg_note (i1, REG_EQUAL, NULL_RTX))
+ && ! invariant_p (XEXP (temp, 0)))
+ remove_note (i1, temp);
+
+ if (new_start == 0)
+ new_start = i1;
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, " moved to %d",
+ INSN_UID (i1));
+
+#if 0
+ /* This isn't needed because REG_NOTES is copied
+ below and is wrong since P might be a PARALLEL. */
+ if (REG_NOTES (i1) == 0
+ && ! m->partial /* But not if it's a zero-extend clr. */
+ && ! m->global /* and not if used outside the loop
+ (since it might get set outside). */
+ && CONSTANT_P (SET_SRC (PATTERN (p))))
+ REG_NOTES (i1)
+ = gen_rtx (EXPR_LIST, REG_EQUAL,
+ SET_SRC (PATTERN (p)), REG_NOTES (i1));
+#endif
+
+ /* If library call, now fix the REG_NOTES that contain
+ insn pointers, namely REG_LIBCALL on FIRST
+ and REG_RETVAL on I1. */
+ if (temp = find_reg_note (i1, REG_RETVAL, NULL_RTX))
+ {
+ XEXP (temp, 0) = first;
+ temp = find_reg_note (first, REG_LIBCALL, NULL_RTX);
+ XEXP (temp, 0) = i1;
+ }
+
+ delete_insn (p);
+ do p = NEXT_INSN (p);
+ while (p && GET_CODE (p) == NOTE);
+ }
+
+ /* The more regs we move, the less we like moving them. */
+ threshold -= 3;
+ }
+
+ /* Any other movable that loads the same register
+ MUST be moved. */
+ already_moved[regno] = 1;
+
+ /* This reg has been moved out of one loop. */
+ moved_once[regno] = 1;
+
+ /* The reg set here is now invariant. */
+ if (! m->partial)
+ n_times_set[regno] = 0;
+
+ m->done = 1;
+
+ /* Change the length-of-life info for the register
+ to say it lives at least the full length of this loop.
+ This will help guide optimizations in outer loops. */
+
+ if (uid_luid[regno_first_uid[regno]] > INSN_LUID (loop_start))
+ /* This is the old insn before all the moved insns.
+ We can't use the moved insn because it is out of range
+ in uid_luid. Only the old insns have luids. */
+ regno_first_uid[regno] = INSN_UID (loop_start);
+ if (uid_luid[regno_last_uid[regno]] < INSN_LUID (end))
+ regno_last_uid[regno] = INSN_UID (end);
+
+ /* Combine with this moved insn any other matching movables. */
+
+ if (! m->partial)
+ for (m1 = movables; m1; m1 = m1->next)
+ if (m1->match == m)
+ {
+ rtx temp;
+
+ /* Schedule the reg loaded by M1
+ for replacement so that shares the reg of M.
+ If the modes differ (only possible in restricted
+ circumstances, make a SUBREG. */
+ if (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest))
+ reg_map[m1->regno] = m->set_dest;
+ else
+ reg_map[m1->regno]
+ = gen_lowpart_common (GET_MODE (m1->set_dest),
+ m->set_dest);
+
+ /* Get rid of the matching insn
+ and prevent further processing of it. */
+ m1->done = 1;
+
+ /* if library call, delete all insn except last, which
+ is deleted below */
+ if (temp = find_reg_note (m1->insn, REG_RETVAL,
+ NULL_RTX))
+ {
+ for (temp = XEXP (temp, 0); temp != m1->insn;
+ temp = NEXT_INSN (temp))
+ delete_insn (temp);
+ }
+ delete_insn (m1->insn);
+
+ /* Any other movable that loads the same register
+ MUST be moved. */
+ already_moved[m1->regno] = 1;
+
+ /* The reg merged here is now invariant,
+ if the reg it matches is invariant. */
+ if (! m->partial)
+ n_times_set[m1->regno] = 0;
+ }
+ }
+ else if (loop_dump_stream)
+ fprintf (loop_dump_stream, "not desirable");
+ }
+ else if (loop_dump_stream && !m->match)
+ fprintf (loop_dump_stream, "not safe");
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, "\n");
+ }
+
+ if (new_start == 0)
+ new_start = loop_start;
+
+ /* Go through all the instructions in the loop, making
+ all the register substitutions scheduled in REG_MAP. */
+ for (p = new_start; p != end; p = NEXT_INSN (p))
+ if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
+ || GET_CODE (p) == CALL_INSN)
+ {
+ replace_regs (PATTERN (p), reg_map, nregs, 0);
+ replace_regs (REG_NOTES (p), reg_map, nregs, 0);
+ INSN_CODE (p) = -1;
+ }
+}
+
+#if 0
+/* Scan X and replace the address of any MEM in it with ADDR.
+ REG is the address that MEM should have before the replacement. */
+
+static void
+replace_call_address (x, reg, addr)
+ rtx x, reg, addr;
+{
+ register enum rtx_code code;
+ register int i;
+ register char *fmt;
+
+ if (x == 0)
+ return;
+ code = GET_CODE (x);
+ switch (code)
+ {
+ case PC:
+ case CC0:
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case CONST:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ case REG:
+ return;
+
+ case SET:
+ /* Short cut for very common case. */
+ replace_call_address (XEXP (x, 1), reg, addr);
+ return;
+
+ case CALL:
+ /* Short cut for very common case. */
+ replace_call_address (XEXP (x, 0), reg, addr);
+ return;
+
+ case MEM:
+ /* If this MEM uses a reg other than the one we expected,
+ something is wrong. */
+ if (XEXP (x, 0) != reg)
+ abort ();
+ XEXP (x, 0) = addr;
+ return;
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ replace_call_address (XEXP (x, i), reg, addr);
+ if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ replace_call_address (XVECEXP (x, i, j), reg, addr);
+ }
+ }
+}
+#endif
+
+/* Return the number of memory refs to addresses that vary
+ in the rtx X. */
+
+static int
+count_nonfixed_reads (x)
+ rtx x;
+{
+ register enum rtx_code code;
+ register int i;
+ register char *fmt;
+ int value;
+
+ if (x == 0)
+ return 0;
+
+ code = GET_CODE (x);
+ switch (code)
+ {
+ case PC:
+ case CC0:
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case CONST:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ case REG:
+ return 0;
+
+ case MEM:
+ return ((invariant_p (XEXP (x, 0)) != 1)
+ + count_nonfixed_reads (XEXP (x, 0)));
+ }
+
+ value = 0;
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ value += count_nonfixed_reads (XEXP (x, i));
+ if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ value += count_nonfixed_reads (XVECEXP (x, i, j));
+ }
+ }
+ return value;
+}
+
+
+#if 0
+/* P is an instruction that sets a register to the result of a ZERO_EXTEND.
+ Replace it with an instruction to load just the low bytes
+ if the machine supports such an instruction,
+ and insert above LOOP_START an instruction to clear the register. */
+
+static void
+constant_high_bytes (p, loop_start)
+ rtx p, loop_start;
+{
+ register rtx new;
+ register int insn_code_number;
+
+ /* Try to change (SET (REG ...) (ZERO_EXTEND (..:B ...)))
+ to (SET (STRICT_LOW_PART (SUBREG:B (REG...))) ...). */
+
+ new = gen_rtx (SET, VOIDmode,
+ gen_rtx (STRICT_LOW_PART, VOIDmode,
+ gen_rtx (SUBREG, GET_MODE (XEXP (SET_SRC (PATTERN (p)), 0)),
+ SET_DEST (PATTERN (p)),
+ 0)),
+ XEXP (SET_SRC (PATTERN (p)), 0));
+ insn_code_number = recog (new, p);
+
+ if (insn_code_number)
+ {
+ register int i;
+
+ /* Clear destination register before the loop. */
+ emit_insn_before (gen_rtx (SET, VOIDmode,
+ SET_DEST (PATTERN (p)),
+ const0_rtx),
+ loop_start);
+
+ /* Inside the loop, just load the low part. */
+ PATTERN (p) = new;
+ }
+}
+#endif
+
+/* Scan a loop setting the variables `unknown_address_altered',
+ `num_mem_sets', `loop_continue', loops_enclosed', `loop_has_call',
+ and `loop_has_volatile'.
+ Also, fill in the array `loop_store_mems'. */
+
+static void
+prescan_loop (start, end)
+ rtx start, end;
+{
+ register int level = 1;
+ register rtx insn;
+
+ unknown_address_altered = 0;
+ loop_has_call = 0;
+ loop_has_volatile = 0;
+ loop_store_mems_idx = 0;
+
+ num_mem_sets = 0;
+ loops_enclosed = 1;
+ loop_continue = 0;
+
+ for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
+ insn = NEXT_INSN (insn))
+ {
+ if (GET_CODE (insn) == NOTE)
+ {
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
+ {
+ ++level;
+ /* Count number of loops contained in this one. */
+ loops_enclosed++;
+ }
+ else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
+ {
+ --level;
+ if (level == 0)
+ {
+ end = insn;
+ break;
+ }
+ }
+ else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_CONT)
+ {
+ if (level == 1)
+ loop_continue = insn;
+ }
+ }
+ else if (GET_CODE (insn) == CALL_INSN)
+ {
+ unknown_address_altered = 1;
+ loop_has_call = 1;
+ }
+ else
+ {
+ if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
+ {
+ if (volatile_refs_p (PATTERN (insn)))
+ loop_has_volatile = 1;
+
+ note_stores (PATTERN (insn), note_addr_stored);
+ }
+ }
+ }
+}
+
+/* Scan the function looking for loops. Record the start and end of each loop.
+ Also mark as invalid loops any loops that contain a setjmp or are branched
+ to from outside the loop. */
+
+static void
+find_and_verify_loops (f)
+ rtx f;
+{
+ rtx insn, label;
+ int current_loop = -1;
+ int next_loop = -1;
+ int loop;
+
+ /* If there are jumps to undefined labels,
+ treat them as jumps out of any/all loops.
+ This also avoids writing past end of tables when there are no loops. */
+ uid_loop_num[0] = -1;
+
+ /* Find boundaries of loops, mark which loops are contained within
+ loops, and invalidate loops that have setjmp. */
+
+ for (insn = f; insn; insn = NEXT_INSN (insn))
+ {
+ if (GET_CODE (insn) == NOTE)
+ switch (NOTE_LINE_NUMBER (insn))
+ {
+ case NOTE_INSN_LOOP_BEG:
+ loop_number_loop_starts[++next_loop] = insn;
+ loop_number_loop_ends[next_loop] = 0;
+ loop_outer_loop[next_loop] = current_loop;
+ loop_invalid[next_loop] = 0;
+ loop_number_exit_labels[next_loop] = 0;
+ current_loop = next_loop;
+ break;
+
+ case NOTE_INSN_SETJMP:
+ /* In this case, we must invalidate our current loop and any
+ enclosing loop. */
+ for (loop = current_loop; loop != -1; loop = loop_outer_loop[loop])
+ {
+ loop_invalid[loop] = 1;
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "\nLoop at %d ignored due to setjmp.\n",
+ INSN_UID (loop_number_loop_starts[loop]));
+ }
+ break;
+
+ case NOTE_INSN_LOOP_END:
+ if (current_loop == -1)
+ abort ();
+
+ loop_number_loop_ends[current_loop] = insn;
+ current_loop = loop_outer_loop[current_loop];
+ break;
+
+ }
+
+ /* Note that this will mark the NOTE_INSN_LOOP_END note as being in the
+ enclosing loop, but this doesn't matter. */
+ uid_loop_num[INSN_UID (insn)] = current_loop;
+ }
+
+ /* Any loop containing a label used in an initializer must be invalidated,
+ because it can be jumped into from anywhere. */
+
+ for (label = forced_labels; label; label = XEXP (label, 1))
+ {
+ int loop_num;
+
+ for (loop_num = uid_loop_num[INSN_UID (XEXP (label, 0))];
+ loop_num != -1;
+ loop_num = loop_outer_loop[loop_num])
+ loop_invalid[loop_num] = 1;
+ }
+
+ /* Now scan all insn's in the function. If any JUMP_INSN branches into a
+ loop that it is not contained within, that loop is marked invalid.
+ If any INSN or CALL_INSN uses a label's address, then the loop containing
+ that label is marked invalid, because it could be jumped into from
+ anywhere.
+
+ Also look for blocks of code ending in an unconditional branch that
+ exits the loop. If such a block is surrounded by a conditional
+ branch around the block, move the block elsewhere (see below) and
+ invert the jump to point to the code block. This may eliminate a
+ label in our loop and will simplify processing by both us and a
+ possible second cse pass. */
+
+ for (insn = f; insn; insn = NEXT_INSN (insn))
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ {
+ int this_loop_num = uid_loop_num[INSN_UID (insn)];
+
+ if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
+ {
+ rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX);
+ if (note)
+ {
+ int loop_num;
+
+ for (loop_num = uid_loop_num[INSN_UID (XEXP (note, 0))];
+ loop_num != -1;
+ loop_num = loop_outer_loop[loop_num])
+ loop_invalid[loop_num] = 1;
+ }
+ }
+
+ if (GET_CODE (insn) != JUMP_INSN)
+ continue;
+
+ mark_loop_jump (PATTERN (insn), this_loop_num);
+
+ /* See if this is an unconditional branch outside the loop. */
+ if (this_loop_num != -1
+ && (GET_CODE (PATTERN (insn)) == RETURN
+ || (simplejump_p (insn)
+ && (uid_loop_num[INSN_UID (JUMP_LABEL (insn))]
+ != this_loop_num)))
+ && get_max_uid () < max_uid_for_loop)
+ {
+ rtx p;
+ rtx our_next = next_real_insn (insn);
+
+ /* Go backwards until we reach the start of the loop, a label,
+ or a JUMP_INSN. */
+ for (p = PREV_INSN (insn);
+ GET_CODE (p) != CODE_LABEL
+ && ! (GET_CODE (p) == NOTE
+ && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
+ && GET_CODE (p) != JUMP_INSN;
+ p = PREV_INSN (p))
+ ;
+
+ /* If we stopped on a JUMP_INSN to the next insn after INSN,
+ we have a block of code to try to move.
+
+ We look backward and then forward from the target of INSN
+ to find a BARRIER at the same loop depth as the target.
+ If we find such a BARRIER, we make a new label for the start
+ of the block, invert the jump in P and point it to that label,
+ and move the block of code to the spot we found. */
+
+ if (GET_CODE (p) == JUMP_INSN
+ && JUMP_LABEL (p) != 0
+ /* Just ignore jumps to labels that were never emitted.
+ These always indicate compilation errors. */
+ && INSN_UID (JUMP_LABEL (p)) != 0
+ && condjump_p (p)
+ && ! simplejump_p (p)
+ && next_real_insn (JUMP_LABEL (p)) == our_next)
+ {
+ rtx target
+ = JUMP_LABEL (insn) ? JUMP_LABEL (insn) : get_last_insn ();
+ int target_loop_num = uid_loop_num[INSN_UID (target)];
+ rtx loc;
+
+ for (loc = target; loc; loc = PREV_INSN (loc))
+ if (GET_CODE (loc) == BARRIER
+ && uid_loop_num[INSN_UID (loc)] == target_loop_num)
+ break;
+
+ if (loc == 0)
+ for (loc = target; loc; loc = NEXT_INSN (loc))
+ if (GET_CODE (loc) == BARRIER
+ && uid_loop_num[INSN_UID (loc)] == target_loop_num)
+ break;
+
+ if (loc)
+ {
+ rtx cond_label = JUMP_LABEL (p);
+ rtx new_label = get_label_after (p);
+
+ /* Ensure our label doesn't go away. */
+ LABEL_NUSES (cond_label)++;
+
+ /* Verify that uid_loop_num is large enough and that
+ we can invert P. */
+ if (invert_jump (p, new_label))
+ {
+ rtx q, r;
+
+ /* Include the BARRIER after INSN and copy the
+ block after LOC. */
+ new_label = squeeze_notes (new_label, NEXT_INSN (insn));
+ reorder_insns (new_label, NEXT_INSN (insn), loc);
+
+ /* All those insns are now in TARGET_LOOP_NUM. */
+ for (q = new_label; q != NEXT_INSN (NEXT_INSN (insn));
+ q = NEXT_INSN (q))
+ uid_loop_num[INSN_UID (q)] = target_loop_num;
+
+ /* The label jumped to by INSN is no longer a loop exit.
+ Unless INSN does not have a label (e.g., it is a
+ RETURN insn), search loop_number_exit_labels to find
+ its label_ref, and remove it. Also turn off
+ LABEL_OUTSIDE_LOOP_P bit. */
+ if (JUMP_LABEL (insn))
+ {
+ for (q = 0,
+ r = loop_number_exit_labels[this_loop_num];
+ r; q = r, r = LABEL_NEXTREF (r))
+ if (XEXP (r, 0) == JUMP_LABEL (insn))
+ {
+ LABEL_OUTSIDE_LOOP_P (r) = 0;
+ if (q)
+ LABEL_NEXTREF (q) = LABEL_NEXTREF (r);
+ else
+ loop_number_exit_labels[this_loop_num]
+ = LABEL_NEXTREF (r);
+ break;
+ }
+
+ /* If we didn't find it, then something is wrong. */
+ if (! r)
+ abort ();
+ }
+
+ /* P is now a jump outside the loop, so it must be put
+ in loop_number_exit_labels, and marked as such.
+ The easiest way to do this is to just call
+ mark_loop_jump again for P. */
+ mark_loop_jump (PATTERN (p), this_loop_num);
+
+ /* If INSN now jumps to the insn after it,
+ delete INSN. */
+ if (JUMP_LABEL (insn) != 0
+ && (next_real_insn (JUMP_LABEL (insn))
+ == next_real_insn (insn)))
+ delete_insn (insn);
+ }
+
+ /* Continue the loop after where the conditional
+ branch used to jump, since the only branch insn
+ in the block (if it still remains) is an inter-loop
+ branch and hence needs no processing. */
+ insn = NEXT_INSN (cond_label);
+
+ if (--LABEL_NUSES (cond_label) == 0)
+ delete_insn (cond_label);
+
+ /* This loop will be continued with NEXT_INSN (insn). */
+ insn = PREV_INSN (insn);
+ }
+ }
+ }
+ }
+}
+
+/* If any label in X jumps to a loop different from LOOP_NUM and any of the
+ loops it is contained in, mark the target loop invalid.
+
+ For speed, we assume that X is part of a pattern of a JUMP_INSN. */
+
+static void
+mark_loop_jump (x, loop_num)
+ rtx x;
+ int loop_num;
+{
+ int dest_loop;
+ int outer_loop;
+ int i;
+
+ switch (GET_CODE (x))
+ {
+ case PC:
+ case USE:
+ case CLOBBER:
+ case REG:
+ case MEM:
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case RETURN:
+ return;
+
+ case CONST:
+ /* There could be a label reference in here. */
+ mark_loop_jump (XEXP (x, 0), loop_num);
+ return;
+
+ case PLUS:
+ case MINUS:
+ case MULT:
+ mark_loop_jump (XEXP (x, 0), loop_num);
+ mark_loop_jump (XEXP (x, 1), loop_num);
+ return;
+
+ case SIGN_EXTEND:
+ case ZERO_EXTEND:
+ mark_loop_jump (XEXP (x, 0), loop_num);
+ return;
+
+ case LABEL_REF:
+ dest_loop = uid_loop_num[INSN_UID (XEXP (x, 0))];
+
+ /* Link together all labels that branch outside the loop. This
+ is used by final_[bg]iv_value and the loop unrolling code. Also
+ mark this LABEL_REF so we know that this branch should predict
+ false. */
+
+ if (dest_loop != loop_num && loop_num != -1)
+ {
+ LABEL_OUTSIDE_LOOP_P (x) = 1;
+ LABEL_NEXTREF (x) = loop_number_exit_labels[loop_num];
+ loop_number_exit_labels[loop_num] = x;
+ }
+
+ /* If this is inside a loop, but not in the current loop or one enclosed
+ by it, it invalidates at least one loop. */
+
+ if (dest_loop == -1)
+ return;
+
+ /* We must invalidate every nested loop containing the target of this
+ label, except those that also contain the jump insn. */
+
+ for (; dest_loop != -1; dest_loop = loop_outer_loop[dest_loop])
+ {
+ /* Stop when we reach a loop that also contains the jump insn. */
+ for (outer_loop = loop_num; outer_loop != -1;
+ outer_loop = loop_outer_loop[outer_loop])
+ if (dest_loop == outer_loop)
+ return;
+
+ /* If we get here, we know we need to invalidate a loop. */
+ if (loop_dump_stream && ! loop_invalid[dest_loop])
+ fprintf (loop_dump_stream,
+ "\nLoop at %d ignored due to multiple entry points.\n",
+ INSN_UID (loop_number_loop_starts[dest_loop]));
+
+ loop_invalid[dest_loop] = 1;
+ }
+ return;
+
+ case SET:
+ /* If this is not setting pc, ignore. */
+ if (SET_DEST (x) == pc_rtx)
+ mark_loop_jump (SET_SRC (x), loop_num);
+ return;
+
+ case IF_THEN_ELSE:
+ mark_loop_jump (XEXP (x, 1), loop_num);
+ mark_loop_jump (XEXP (x, 2), loop_num);
+ return;
+
+ case PARALLEL:
+ case ADDR_VEC:
+ for (i = 0; i < XVECLEN (x, 0); i++)
+ mark_loop_jump (XVECEXP (x, 0, i), loop_num);
+ return;
+
+ case ADDR_DIFF_VEC:
+ for (i = 0; i < XVECLEN (x, 1); i++)
+ mark_loop_jump (XVECEXP (x, 1, i), loop_num);
+ return;
+
+ default:
+ /* Treat anything else (such as a symbol_ref)
+ as a branch out of this loop, but not into any loop. */
+
+ if (loop_num != -1)
+ {
+ LABEL_OUTSIDE_LOOP_P (x) = 1;
+ LABEL_NEXTREF (x) = loop_number_exit_labels[loop_num];
+ loop_number_exit_labels[loop_num] = x;
+ }
+
+ return;
+ }
+}
+
+/* Return nonzero if there is a label in the range from
+ insn INSN to and including the insn whose luid is END
+ INSN must have an assigned luid (i.e., it must not have
+ been previously created by loop.c). */
+
+static int
+labels_in_range_p (insn, end)
+ rtx insn;
+ int end;
+{
+ while (insn && INSN_LUID (insn) <= end)
+ {
+ if (GET_CODE (insn) == CODE_LABEL)
+ return 1;
+ insn = NEXT_INSN (insn);
+ }
+
+ return 0;
+}
+
+/* Record that a memory reference X is being set. */
+
+static void
+note_addr_stored (x)
+ rtx x;
+{
+ register int i;
+
+ if (x == 0 || GET_CODE (x) != MEM)
+ return;
+
+ /* Count number of memory writes.
+ This affects heuristics in strength_reduce. */
+ num_mem_sets++;
+
+ /* BLKmode MEM means all memory is clobbered. */
+ if (GET_MODE (x) == BLKmode)
+ unknown_address_altered = 1;
+
+ if (unknown_address_altered)
+ return;
+
+ for (i = 0; i < loop_store_mems_idx; i++)
+ if (rtx_equal_p (XEXP (loop_store_mems[i], 0), XEXP (x, 0))
+ && MEM_IN_STRUCT_P (x) == MEM_IN_STRUCT_P (loop_store_mems[i]))
+ {
+ /* We are storing at the same address as previously noted. Save the
+ wider reference. */
+ if (GET_MODE_SIZE (GET_MODE (x))
+ > GET_MODE_SIZE (GET_MODE (loop_store_mems[i])))
+ loop_store_mems[i] = x;
+ break;
+ }
+
+ if (i == NUM_STORES)
+ unknown_address_altered = 1;
+
+ else if (i == loop_store_mems_idx)
+ loop_store_mems[loop_store_mems_idx++] = x;
+}
+
+/* Return nonzero if the rtx X is invariant over the current loop.
+
+ The value is 2 if we refer to something only conditionally invariant.
+
+ If `unknown_address_altered' is nonzero, no memory ref is invariant.
+ Otherwise, a memory ref is invariant if it does not conflict with
+ anything stored in `loop_store_mems'. */
+
+int
+invariant_p (x)
+ register rtx x;
+{
+ register int i;
+ register enum rtx_code code;
+ register char *fmt;
+ int conditional = 0;
+
+ if (x == 0)
+ return 1;
+ code = GET_CODE (x);
+ switch (code)
+ {
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case SYMBOL_REF:
+ case CONST:
+ return 1;
+
+ case LABEL_REF:
+ /* A LABEL_REF is normally invariant, however, if we are unrolling
+ loops, and this label is inside the loop, then it isn't invariant.
+ This is because each unrolled copy of the loop body will have
+ a copy of this label. If this was invariant, then an insn loading
+ the address of this label into a register might get moved outside
+ the loop, and then each loop body would end up using the same label.
+
+ We don't know the loop bounds here though, so just fail for all
+ labels. */
+ if (flag_unroll_loops)
+ return 0;
+ else
+ return 1;
+
+ case PC:
+ case CC0:
+ case UNSPEC_VOLATILE:
+ return 0;
+
+ case REG:
+ /* We used to check RTX_UNCHANGING_P (x) here, but that is invalid
+ since the reg might be set by initialization within the loop. */
+ if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
+ || x == arg_pointer_rtx)
+ return 1;
+ if (loop_has_call
+ && REGNO (x) < FIRST_PSEUDO_REGISTER && call_used_regs[REGNO (x)])
+ return 0;
+ if (n_times_set[REGNO (x)] < 0)
+ return 2;
+ return n_times_set[REGNO (x)] == 0;
+
+ case MEM:
+ /* Read-only items (such as constants in a constant pool) are
+ invariant if their address is. */
+ if (RTX_UNCHANGING_P (x))
+ break;
+
+ /* If we filled the table (or had a subroutine call), any location
+ in memory could have been clobbered. */
+ if (unknown_address_altered
+ /* Don't mess with volatile memory references. */
+ || MEM_VOLATILE_P (x))
+ return 0;
+
+ /* See if there is any dependence between a store and this load. */
+ for (i = loop_store_mems_idx - 1; i >= 0; i--)
+ if (true_dependence (loop_store_mems[i], x))
+ return 0;
+
+ /* It's not invalidated by a store in memory
+ but we must still verify the address is invariant. */
+ break;
+
+ case ASM_OPERANDS:
+ /* Don't mess with insns declared volatile. */
+ if (MEM_VOLATILE_P (x))
+ return 0;
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ {
+ int tem = invariant_p (XEXP (x, i));
+ if (tem == 0)
+ return 0;
+ if (tem == 2)
+ conditional = 1;
+ }
+ else if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ {
+ int tem = invariant_p (XVECEXP (x, i, j));
+ if (tem == 0)
+ return 0;
+ if (tem == 2)
+ conditional = 1;
+ }
+
+ }
+ }
+
+ return 1 + conditional;
+}
+
+
+/* Return nonzero if all the insns in the loop that set REG
+ are INSN and the immediately following insns,
+ and if each of those insns sets REG in an invariant way
+ (not counting uses of REG in them).
+
+ The value is 2 if some of these insns are only conditionally invariant.
+
+ We assume that INSN itself is the first set of REG
+ and that its source is invariant. */
+
+static int
+consec_sets_invariant_p (reg, n_sets, insn)
+ int n_sets;
+ rtx reg, insn;
+{
+ register rtx p = insn;
+ register int regno = REGNO (reg);
+ rtx temp;
+ /* Number of sets we have to insist on finding after INSN. */
+ int count = n_sets - 1;
+ int old = n_times_set[regno];
+ int value = 0;
+ int this;
+
+ /* If N_SETS hit the limit, we can't rely on its value. */
+ if (n_sets == 127)
+ return 0;
+
+ n_times_set[regno] = 0;
+
+ while (count > 0)
+ {
+ register enum rtx_code code;
+ rtx set;
+
+ p = NEXT_INSN (p);
+ code = GET_CODE (p);
+
+ /* If library call, skip to end of of it. */
+ if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
+ p = XEXP (temp, 0);
+
+ this = 0;
+ if (code == INSN
+ && (set = single_set (p))
+ && GET_CODE (SET_DEST (set)) == REG
+ && REGNO (SET_DEST (set)) == regno)
+ {
+ this = invariant_p (SET_SRC (set));
+ if (this != 0)
+ value |= this;
+ else if (temp = find_reg_note (p, REG_EQUAL, NULL_RTX))
+ {
+ /* If this is a libcall, then any invariant REG_EQUAL note is OK.
+ If this is an ordinary insn, then only CONSTANT_P REG_EQUAL
+ notes are OK. */
+ this = (CONSTANT_P (XEXP (temp, 0))
+ || (find_reg_note (p, REG_RETVAL, NULL_RTX)
+ && invariant_p (XEXP (temp, 0))));
+ if (this != 0)
+ value |= this;
+ }
+ }
+ if (this != 0)
+ count--;
+ else if (code != NOTE)
+ {
+ n_times_set[regno] = old;
+ return 0;
+ }
+ }
+
+ n_times_set[regno] = old;
+ /* If invariant_p ever returned 2, we return 2. */
+ return 1 + (value & 2);
+}
+
+#if 0
+/* I don't think this condition is sufficient to allow INSN
+ to be moved, so we no longer test it. */
+
+/* Return 1 if all insns in the basic block of INSN and following INSN
+ that set REG are invariant according to TABLE. */
+
+static int
+all_sets_invariant_p (reg, insn, table)
+ rtx reg, insn;
+ short *table;
+{
+ register rtx p = insn;
+ register int regno = REGNO (reg);
+
+ while (1)
+ {
+ register enum rtx_code code;
+ p = NEXT_INSN (p);
+ code = GET_CODE (p);
+ if (code == CODE_LABEL || code == JUMP_INSN)
+ return 1;
+ if (code == INSN && GET_CODE (PATTERN (p)) == SET
+ && GET_CODE (SET_DEST (PATTERN (p))) == REG
+ && REGNO (SET_DEST (PATTERN (p))) == regno)
+ {
+ if (!invariant_p (SET_SRC (PATTERN (p)), table))
+ return 0;
+ }
+ }
+}
+#endif /* 0 */
+
+/* Look at all uses (not sets) of registers in X. For each, if it is
+ the single use, set USAGE[REGNO] to INSN; if there was a previous use in
+ a different insn, set USAGE[REGNO] to const0_rtx. */
+
+static void
+find_single_use_in_loop (insn, x, usage)
+ rtx insn;
+ rtx x;
+ rtx *usage;
+{
+ enum rtx_code code = GET_CODE (x);
+ char *fmt = GET_RTX_FORMAT (code);
+ int i, j;
+
+ if (code == REG)
+ usage[REGNO (x)]
+ = (usage[REGNO (x)] != 0 && usage[REGNO (x)] != insn)
+ ? const0_rtx : insn;
+
+ else if (code == SET)
+ {
+ /* Don't count SET_DEST if it is a REG; otherwise count things
+ in SET_DEST because if a register is partially modified, it won't
+ show up as a potential movable so we don't care how USAGE is set
+ for it. */
+ if (GET_CODE (SET_DEST (x)) != REG)
+ find_single_use_in_loop (insn, SET_DEST (x), usage);
+ find_single_use_in_loop (insn, SET_SRC (x), usage);
+ }
+ else
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e' && XEXP (x, i) != 0)
+ find_single_use_in_loop (insn, XEXP (x, i), usage);
+ else if (fmt[i] == 'E')
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ find_single_use_in_loop (insn, XVECEXP (x, i, j), usage);
+ }
+}
+
+/* Increment N_TIMES_SET at the index of each register
+ that is modified by an insn between FROM and TO.
+ If the value of an element of N_TIMES_SET becomes 127 or more,
+ stop incrementing it, to avoid overflow.
+
+ Store in SINGLE_USAGE[I] the single insn in which register I is
+ used, if it is only used once. Otherwise, it is set to 0 (for no
+ uses) or const0_rtx for more than one use. This parameter may be zero,
+ in which case this processing is not done.
+
+ Store in *COUNT_PTR the number of actual instruction
+ in the loop. We use this to decide what is worth moving out. */
+
+/* last_set[n] is nonzero iff reg n has been set in the current basic block.
+ In that case, it is the insn that last set reg n. */
+
+static void
+count_loop_regs_set (from, to, may_not_move, single_usage, count_ptr, nregs)
+ register rtx from, to;
+ char *may_not_move;
+ rtx *single_usage;
+ int *count_ptr;
+ int nregs;
+{
+ register rtx *last_set = (rtx *) alloca (nregs * sizeof (rtx));
+ register rtx insn;
+ register int count = 0;
+ register rtx dest;
+
+ bzero ((char *) last_set, nregs * sizeof (rtx));
+ for (insn = from; insn != to; insn = NEXT_INSN (insn))
+ {
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ {
+ ++count;
+
+ /* If requested, record registers that have exactly one use. */
+ if (single_usage)
+ {
+ find_single_use_in_loop (insn, PATTERN (insn), single_usage);
+
+ /* Include uses in REG_EQUAL notes. */
+ if (REG_NOTES (insn))
+ find_single_use_in_loop (insn, REG_NOTES (insn), single_usage);
+ }
+
+ if (GET_CODE (PATTERN (insn)) == CLOBBER
+ && GET_CODE (XEXP (PATTERN (insn), 0)) == REG)
+ /* Don't move a reg that has an explicit clobber.
+ We might do so sometimes, but it's not worth the pain. */
+ may_not_move[REGNO (XEXP (PATTERN (insn), 0))] = 1;
+
+ if (GET_CODE (PATTERN (insn)) == SET
+ || GET_CODE (PATTERN (insn)) == CLOBBER)
+ {
+ dest = SET_DEST (PATTERN (insn));
+ while (GET_CODE (dest) == SUBREG
+ || GET_CODE (dest) == ZERO_EXTRACT
+ || GET_CODE (dest) == SIGN_EXTRACT
+ || GET_CODE (dest) == STRICT_LOW_PART)
+ dest = XEXP (dest, 0);
+ if (GET_CODE (dest) == REG)
+ {
+ register int regno = REGNO (dest);
+ /* If this is the first setting of this reg
+ in current basic block, and it was set before,
+ it must be set in two basic blocks, so it cannot
+ be moved out of the loop. */
+ if (n_times_set[regno] > 0 && last_set[regno] == 0)
+ may_not_move[regno] = 1;
+ /* If this is not first setting in current basic block,
+ see if reg was used in between previous one and this.
+ If so, neither one can be moved. */
+ if (last_set[regno] != 0
+ && reg_used_between_p (dest, last_set[regno], insn))
+ may_not_move[regno] = 1;
+ if (n_times_set[regno] < 127)
+ ++n_times_set[regno];
+ last_set[regno] = insn;
+ }
+ }
+ else if (GET_CODE (PATTERN (insn)) == PARALLEL)
+ {
+ register int i;
+ for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
+ {
+ register rtx x = XVECEXP (PATTERN (insn), 0, i);
+ if (GET_CODE (x) == CLOBBER && GET_CODE (XEXP (x, 0)) == REG)
+ /* Don't move a reg that has an explicit clobber.
+ It's not worth the pain to try to do it correctly. */
+ may_not_move[REGNO (XEXP (x, 0))] = 1;
+
+ if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
+ {
+ dest = SET_DEST (x);
+ while (GET_CODE (dest) == SUBREG
+ || GET_CODE (dest) == ZERO_EXTRACT
+ || GET_CODE (dest) == SIGN_EXTRACT
+ || GET_CODE (dest) == STRICT_LOW_PART)
+ dest = XEXP (dest, 0);
+ if (GET_CODE (dest) == REG)
+ {
+ register int regno = REGNO (dest);
+ if (n_times_set[regno] > 0 && last_set[regno] == 0)
+ may_not_move[regno] = 1;
+ if (last_set[regno] != 0
+ && reg_used_between_p (dest, last_set[regno], insn))
+ may_not_move[regno] = 1;
+ if (n_times_set[regno] < 127)
+ ++n_times_set[regno];
+ last_set[regno] = insn;
+ }
+ }
+ }
+ }
+ }
+
+ if (GET_CODE (insn) == CODE_LABEL || GET_CODE (insn) == JUMP_INSN)
+ bzero ((char *) last_set, nregs * sizeof (rtx));
+ }
+ *count_ptr = count;
+}
+
+/* Given a loop that is bounded by LOOP_START and LOOP_END
+ and that is entered at SCAN_START,
+ return 1 if the register set in SET contained in insn INSN is used by
+ any insn that precedes INSN in cyclic order starting
+ from the loop entry point.
+
+ We don't want to use INSN_LUID here because if we restrict INSN to those
+ that have a valid INSN_LUID, it means we cannot move an invariant out
+ from an inner loop past two loops. */
+
+static int
+loop_reg_used_before_p (set, insn, loop_start, scan_start, loop_end)
+ rtx set, insn, loop_start, scan_start, loop_end;
+{
+ rtx reg = SET_DEST (set);
+ rtx p;
+
+ /* Scan forward checking for register usage. If we hit INSN, we
+ are done. Otherwise, if we hit LOOP_END, wrap around to LOOP_START. */
+ for (p = scan_start; p != insn; p = NEXT_INSN (p))
+ {
+ if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
+ && reg_overlap_mentioned_p (reg, PATTERN (p)))
+ return 1;
+
+ if (p == loop_end)
+ p = loop_start;
+ }
+
+ return 0;
+}
+
+/* A "basic induction variable" or biv is a pseudo reg that is set
+ (within this loop) only by incrementing or decrementing it. */
+/* A "general induction variable" or giv is a pseudo reg whose
+ value is a linear function of a biv. */
+
+/* Bivs are recognized by `basic_induction_var';
+ Givs by `general_induct_var'. */
+
+/* Indexed by register number, indicates whether or not register is an
+ induction variable, and if so what type. */
+
+enum iv_mode *reg_iv_type;
+
+/* Indexed by register number, contains pointer to `struct induction'
+ if register is an induction variable. This holds general info for
+ all induction variables. */
+
+struct induction **reg_iv_info;
+
+/* Indexed by register number, contains pointer to `struct iv_class'
+ if register is a basic induction variable. This holds info describing
+ the class (a related group) of induction variables that the biv belongs
+ to. */
+
+struct iv_class **reg_biv_class;
+
+/* The head of a list which links together (via the next field)
+ every iv class for the current loop. */
+
+struct iv_class *loop_iv_list;
+
+/* Communication with routines called via `note_stores'. */
+
+static rtx note_insn;
+
+/* Dummy register to have non-zero DEST_REG for DEST_ADDR type givs. */
+
+static rtx addr_placeholder;
+
+/* ??? Unfinished optimizations, and possible future optimizations,
+ for the strength reduction code. */
+
+/* ??? There is one more optimization you might be interested in doing: to
+ allocate pseudo registers for frequently-accessed memory locations.
+ If the same memory location is referenced each time around, it might
+ be possible to copy it into a register before and out after.
+ This is especially useful when the memory location is a variable which
+ is in a stack slot because somewhere its address is taken. If the
+ loop doesn't contain a function call and the variable isn't volatile,
+ it is safe to keep the value in a register for the duration of the
+ loop. One tricky thing is that the copying of the value back from the
+ register has to be done on all exits from the loop. You need to check that
+ all the exits from the loop go to the same place. */
+
+/* ??? The interaction of biv elimination, and recognition of 'constant'
+ bivs, may cause problems. */
+
+/* ??? Add heuristics so that DEST_ADDR strength reduction does not cause
+ performance problems.
+
+ Perhaps don't eliminate things that can be combined with an addressing
+ mode. Find all givs that have the same biv, mult_val, and add_val;
+ then for each giv, check to see if its only use dies in a following
+ memory address. If so, generate a new memory address and check to see
+ if it is valid. If it is valid, then store the modified memory address,
+ otherwise, mark the giv as not done so that it will get its own iv. */
+
+/* ??? Could try to optimize branches when it is known that a biv is always
+ positive. */
+
+/* ??? When replace a biv in a compare insn, we should replace with closest
+ giv so that an optimized branch can still be recognized by the combiner,
+ e.g. the VAX acb insn. */
+
+/* ??? Many of the checks involving uid_luid could be simplified if regscan
+ was rerun in loop_optimize whenever a register was added or moved.
+ Also, some of the optimizations could be a little less conservative. */
+
+/* Perform strength reduction and induction variable elimination. */
+
+/* Pseudo registers created during this function will be beyond the last
+ valid index in several tables including n_times_set and regno_last_uid.
+ This does not cause a problem here, because the added registers cannot be
+ givs outside of their loop, and hence will never be reconsidered.
+ But scan_loop must check regnos to make sure they are in bounds. */
+
+static void
+strength_reduce (scan_start, end, loop_top, insn_count,
+ loop_start, loop_end)
+ rtx scan_start;
+ rtx end;
+ rtx loop_top;
+ int insn_count;
+ rtx loop_start;
+ rtx loop_end;
+{
+ rtx p;
+ rtx set;
+ rtx inc_val;
+ rtx mult_val;
+ rtx dest_reg;
+ /* This is 1 if current insn is not executed at least once for every loop
+ iteration. */
+ int not_every_iteration = 0;
+ /* This is 1 if current insn may be executed more than once for every
+ loop iteration. */
+ int maybe_multiple = 0;
+ /* Temporary list pointers for traversing loop_iv_list. */
+ struct iv_class *bl, **backbl;
+ /* Ratio of extra register life span we can justify
+ for saving an instruction. More if loop doesn't call subroutines
+ since in that case saving an insn makes more difference
+ and more registers are available. */
+ /* ??? could set this to last value of threshold in move_movables */
+ int threshold = (loop_has_call ? 1 : 2) * (3 + n_non_fixed_regs);
+ /* Map of pseudo-register replacements. */
+ rtx *reg_map;
+ int call_seen;
+ rtx test;
+ rtx end_insert_before;
+ int loop_depth = 0;
+
+ reg_iv_type = (enum iv_mode *) alloca (max_reg_before_loop
+ * sizeof (enum iv_mode *));
+ bzero ((char *) reg_iv_type, max_reg_before_loop * sizeof (enum iv_mode *));
+ reg_iv_info = (struct induction **)
+ alloca (max_reg_before_loop * sizeof (struct induction *));
+ bzero ((char *) reg_iv_info, (max_reg_before_loop
+ * sizeof (struct induction *)));
+ reg_biv_class = (struct iv_class **)
+ alloca (max_reg_before_loop * sizeof (struct iv_class *));
+ bzero ((char *) reg_biv_class, (max_reg_before_loop
+ * sizeof (struct iv_class *)));
+
+ loop_iv_list = 0;
+ addr_placeholder = gen_reg_rtx (Pmode);
+
+ /* Save insn immediately after the loop_end. Insns inserted after loop_end
+ must be put before this insn, so that they will appear in the right
+ order (i.e. loop order).
+
+ If loop_end is the end of the current function, then emit a
+ NOTE_INSN_DELETED after loop_end and set end_insert_before to the
+ dummy note insn. */
+ if (NEXT_INSN (loop_end) != 0)
+ end_insert_before = NEXT_INSN (loop_end);
+ else
+ end_insert_before = emit_note_after (NOTE_INSN_DELETED, loop_end);
+
+ /* Scan through loop to find all possible bivs. */
+
+ p = scan_start;
+ while (1)
+ {
+ p = NEXT_INSN (p);
+ /* At end of a straight-in loop, we are done.
+ At end of a loop entered at the bottom, scan the top. */
+ if (p == scan_start)
+ break;
+ if (p == end)
+ {
+ if (loop_top != 0)
+ p = loop_top;
+ else
+ break;
+ if (p == scan_start)
+ break;
+ }
+
+ if (GET_CODE (p) == INSN
+ && (set = single_set (p))
+ && GET_CODE (SET_DEST (set)) == REG)
+ {
+ dest_reg = SET_DEST (set);
+ if (REGNO (dest_reg) < max_reg_before_loop
+ && REGNO (dest_reg) >= FIRST_PSEUDO_REGISTER
+ && reg_iv_type[REGNO (dest_reg)] != NOT_BASIC_INDUCT)
+ {
+ if (basic_induction_var (SET_SRC (set), GET_MODE (SET_SRC (set)),
+ dest_reg, p, &inc_val, &mult_val))
+ {
+ /* It is a possible basic induction variable.
+ Create and initialize an induction structure for it. */
+
+ struct induction *v
+ = (struct induction *) alloca (sizeof (struct induction));
+
+ record_biv (v, p, dest_reg, inc_val, mult_val,
+ not_every_iteration, maybe_multiple);
+ reg_iv_type[REGNO (dest_reg)] = BASIC_INDUCT;
+ }
+ else if (REGNO (dest_reg) < max_reg_before_loop)
+ reg_iv_type[REGNO (dest_reg)] = NOT_BASIC_INDUCT;
+ }
+ }
+
+ /* Past CODE_LABEL, we get to insns that may be executed multiple
+ times. The only way we can be sure that they can't is if every
+ every jump insn between here and the end of the loop either
+ returns, exits the loop, or is a forward jump. */
+
+ if (GET_CODE (p) == CODE_LABEL)
+ {
+ rtx insn = p;
+
+ maybe_multiple = 0;
+
+ while (1)
+ {
+ insn = NEXT_INSN (insn);
+ if (insn == scan_start)
+ break;
+ if (insn == end)
+ {
+ if (loop_top != 0)
+ insn = loop_top;
+ else
+ break;
+ if (insn == scan_start)
+ break;
+ }
+
+ if (GET_CODE (insn) == JUMP_INSN
+ && GET_CODE (PATTERN (insn)) != RETURN
+ && (! condjump_p (insn)
+ || (JUMP_LABEL (insn) != 0
+ && (INSN_UID (JUMP_LABEL (insn)) >= max_uid_for_loop
+ || INSN_UID (insn) >= max_uid_for_loop
+ || (INSN_LUID (JUMP_LABEL (insn))
+ < INSN_LUID (insn))))))
+ {
+ maybe_multiple = 1;
+ break;
+ }
+ }
+ }
+
+ /* Past a label or a jump, we get to insns for which we can't count
+ on whether or how many times they will be executed during each
+ iteration. */
+ /* This code appears in three places, once in scan_loop, and twice
+ in strength_reduce. */
+ if ((GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN)
+ /* If we enter the loop in the middle, and scan around to the
+ beginning, don't set not_every_iteration for that.
+ This can be any kind of jump, since we want to know if insns
+ will be executed if the loop is executed. */
+ && ! (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == loop_top
+ && ((NEXT_INSN (NEXT_INSN (p)) == loop_end && simplejump_p (p))
+ || (NEXT_INSN (p) == loop_end && condjump_p (p)))))
+ not_every_iteration = 1;
+
+ else if (GET_CODE (p) == NOTE)
+ {
+ /* At the virtual top of a converted loop, insns are again known to
+ be executed each iteration: logically, the loop begins here
+ even though the exit code has been duplicated. */
+ if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
+ not_every_iteration = 0;
+ else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
+ loop_depth++;
+ else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
+ loop_depth--;
+ }
+
+ /* Unlike in the code motion pass where MAYBE_NEVER indicates that
+ an insn may never be executed, NOT_EVERY_ITERATION indicates whether
+ or not an insn is known to be executed each iteration of the
+ loop, whether or not any iterations are known to occur.
+
+ Therefore, if we have just passed a label and have no more labels
+ between here and the test insn of the loop, we know these insns
+ will be executed each iteration. This can also happen if we
+ have just passed a jump, for example, when there are nested loops. */
+
+ if (not_every_iteration && GET_CODE (p) == CODE_LABEL
+ && no_labels_between_p (p, loop_end))
+ not_every_iteration = 0;
+ }
+
+ /* Scan loop_iv_list to remove all regs that proved not to be bivs.
+ Make a sanity check against n_times_set. */
+ for (backbl = &loop_iv_list, bl = *backbl; bl; bl = bl->next)
+ {
+ if (reg_iv_type[bl->regno] != BASIC_INDUCT
+ /* Above happens if register modified by subreg, etc. */
+ /* Make sure it is not recognized as a basic induction var: */
+ || n_times_set[bl->regno] != bl->biv_count
+ /* If never incremented, it is invariant that we decided not to
+ move. So leave it alone. */
+ || ! bl->incremented)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, "Reg %d: biv discarded, %s\n",
+ bl->regno,
+ (reg_iv_type[bl->regno] != BASIC_INDUCT
+ ? "not induction variable"
+ : (! bl->incremented ? "never incremented"
+ : "count error")));
+
+ reg_iv_type[bl->regno] = NOT_BASIC_INDUCT;
+ *backbl = bl->next;
+ }
+ else
+ {
+ backbl = &bl->next;
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, "Reg %d: biv verified\n", bl->regno);
+ }
+ }
+
+ /* Exit if there are no bivs. */
+ if (! loop_iv_list)
+ {
+ /* Can still unroll the loop anyways, but indicate that there is no
+ strength reduction info available. */
+ if (flag_unroll_loops)
+ unroll_loop (loop_end, insn_count, loop_start, end_insert_before, 0);
+
+ return;
+ }
+
+ /* Find initial value for each biv by searching backwards from loop_start,
+ halting at first label. Also record any test condition. */
+
+ call_seen = 0;
+ for (p = loop_start; p && GET_CODE (p) != CODE_LABEL; p = PREV_INSN (p))
+ {
+ note_insn = p;
+
+ if (GET_CODE (p) == CALL_INSN)
+ call_seen = 1;
+
+ if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
+ || GET_CODE (p) == CALL_INSN)
+ note_stores (PATTERN (p), record_initial);
+
+ /* Record any test of a biv that branches around the loop if no store
+ between it and the start of loop. We only care about tests with
+ constants and registers and only certain of those. */
+ if (GET_CODE (p) == JUMP_INSN
+ && JUMP_LABEL (p) != 0
+ && next_real_insn (JUMP_LABEL (p)) == next_real_insn (loop_end)
+ && (test = get_condition_for_loop (p)) != 0
+ && GET_CODE (XEXP (test, 0)) == REG
+ && REGNO (XEXP (test, 0)) < max_reg_before_loop
+ && (bl = reg_biv_class[REGNO (XEXP (test, 0))]) != 0
+ && valid_initial_value_p (XEXP (test, 1), p, call_seen, loop_start)
+ && bl->init_insn == 0)
+ {
+ /* If an NE test, we have an initial value! */
+ if (GET_CODE (test) == NE)
+ {
+ bl->init_insn = p;
+ bl->init_set = gen_rtx (SET, VOIDmode,
+ XEXP (test, 0), XEXP (test, 1));
+ }
+ else
+ bl->initial_test = test;
+ }
+ }
+
+ /* Look at the each biv and see if we can say anything better about its
+ initial value from any initializing insns set up above. (This is done
+ in two passes to avoid missing SETs in a PARALLEL.) */
+ for (bl = loop_iv_list; bl; bl = bl->next)
+ {
+ rtx src;
+
+ if (! bl->init_insn)
+ continue;
+
+ src = SET_SRC (bl->init_set);
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Biv %d initialized at insn %d: initial value ",
+ bl->regno, INSN_UID (bl->init_insn));
+
+ if ((GET_MODE (src) == GET_MODE (regno_reg_rtx[bl->regno])
+ || GET_MODE (src) == VOIDmode)
+ && valid_initial_value_p (src, bl->init_insn, call_seen, loop_start))
+ {
+ bl->initial_value = src;
+
+ if (loop_dump_stream)
+ {
+ if (GET_CODE (src) == CONST_INT)
+ fprintf (loop_dump_stream, "%d\n", INTVAL (src));
+ else
+ {
+ print_rtl (loop_dump_stream, src);
+ fprintf (loop_dump_stream, "\n");
+ }
+ }
+ }
+ else
+ {
+ /* Biv initial value is not simple move,
+ so let it keep initial value of "itself". */
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, "is complex\n");
+ }
+ }
+
+ /* Search the loop for general induction variables. */
+
+ /* A register is a giv if: it is only set once, it is a function of a
+ biv and a constant (or invariant), and it is not a biv. */
+
+ not_every_iteration = 0;
+ loop_depth = 0;
+ p = scan_start;
+ while (1)
+ {
+ p = NEXT_INSN (p);
+ /* At end of a straight-in loop, we are done.
+ At end of a loop entered at the bottom, scan the top. */
+ if (p == scan_start)
+ break;
+ if (p == end)
+ {
+ if (loop_top != 0)
+ p = loop_top;
+ else
+ break;
+ if (p == scan_start)
+ break;
+ }
+
+ /* Look for a general induction variable in a register. */
+ if (GET_CODE (p) == INSN
+ && (set = single_set (p))
+ && GET_CODE (SET_DEST (set)) == REG
+ && ! may_not_optimize[REGNO (SET_DEST (set))])
+ {
+ rtx src_reg;
+ rtx add_val;
+ rtx mult_val;
+ int benefit;
+ rtx regnote = 0;
+
+ dest_reg = SET_DEST (set);
+ if (REGNO (dest_reg) < FIRST_PSEUDO_REGISTER)
+ continue;
+
+ if (/* SET_SRC is a giv. */
+ ((benefit = general_induction_var (SET_SRC (set),
+ &src_reg, &add_val,
+ &mult_val))
+ /* Equivalent expression is a giv. */
+ || ((regnote = find_reg_note (p, REG_EQUAL, NULL_RTX))
+ && (benefit = general_induction_var (XEXP (regnote, 0),
+ &src_reg,
+ &add_val, &mult_val))))
+ /* Don't try to handle any regs made by loop optimization.
+ We have nothing on them in regno_first_uid, etc. */
+ && REGNO (dest_reg) < max_reg_before_loop
+ /* Don't recognize a BASIC_INDUCT_VAR here. */
+ && dest_reg != src_reg
+ /* This must be the only place where the register is set. */
+ && (n_times_set[REGNO (dest_reg)] == 1
+ /* or all sets must be consecutive and make a giv. */
+ || (benefit = consec_sets_giv (benefit, p,
+ src_reg, dest_reg,
+ &add_val, &mult_val))))
+ {
+ int count;
+ struct induction *v
+ = (struct induction *) alloca (sizeof (struct induction));
+ rtx temp;
+
+ /* If this is a library call, increase benefit. */
+ if (find_reg_note (p, REG_RETVAL, NULL_RTX))
+ benefit += libcall_benefit (p);
+
+ /* Skip the consecutive insns, if there are any. */
+ for (count = n_times_set[REGNO (dest_reg)] - 1;
+ count > 0; count--)
+ {
+ /* If first insn of libcall sequence, skip to end.
+ Do this at start of loop, since INSN is guaranteed to
+ be an insn here. */
+ if (GET_CODE (p) != NOTE
+ && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
+ p = XEXP (temp, 0);
+
+ do p = NEXT_INSN (p);
+ while (GET_CODE (p) == NOTE);
+ }
+
+ record_giv (v, p, src_reg, dest_reg, mult_val, add_val, benefit,
+ DEST_REG, not_every_iteration, NULL_PTR, loop_start,
+ loop_end);
+
+ }
+ }
+
+#ifndef DONT_REDUCE_ADDR
+ /* Look for givs which are memory addresses. */
+ /* This resulted in worse code on a VAX 8600. I wonder if it
+ still does. */
+ if (GET_CODE (p) == INSN)
+ find_mem_givs (PATTERN (p), p, not_every_iteration, loop_start,
+ loop_end);
+#endif
+
+ /* Update the status of whether giv can derive other givs. This can
+ change when we pass a label or an insn that updates a biv. */
+ if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
+ || GET_CODE (p) == CODE_LABEL)
+ update_giv_derive (p);
+
+ /* Past a label or a jump, we get to insns for which we can't count
+ on whether or how many times they will be executed during each
+ iteration. */
+ /* This code appears in three places, once in scan_loop, and twice
+ in strength_reduce. */
+ if ((GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN)
+ /* If we enter the loop in the middle, and scan around
+ to the beginning, don't set not_every_iteration for that.
+ This can be any kind of jump, since we want to know if insns
+ will be executed if the loop is executed. */
+ && ! (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == loop_top
+ && ((NEXT_INSN (NEXT_INSN (p)) == loop_end && simplejump_p (p))
+ || (NEXT_INSN (p) == loop_end && condjump_p (p)))))
+ not_every_iteration = 1;
+
+ else if (GET_CODE (p) == NOTE)
+ {
+ /* At the virtual top of a converted loop, insns are again known to
+ be executed each iteration: logically, the loop begins here
+ even though the exit code has been duplicated. */
+ if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
+ not_every_iteration = 0;
+ else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
+ loop_depth++;
+ else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
+ loop_depth--;
+ }
+
+ /* Unlike in the code motion pass where MAYBE_NEVER indicates that
+ an insn may never be executed, NOT_EVERY_ITERATION indicates whether
+ or not an insn is known to be executed each iteration of the
+ loop, whether or not any iterations are known to occur.
+
+ Therefore, if we have just passed a label and have no more labels
+ between here and the test insn of the loop, we know these insns
+ will be executed each iteration. */
+
+ if (not_every_iteration && GET_CODE (p) == CODE_LABEL
+ && no_labels_between_p (p, loop_end))
+ not_every_iteration = 0;
+ }
+
+ /* Try to calculate and save the number of loop iterations. This is
+ set to zero if the actual number can not be calculated. This must
+ be called after all giv's have been identified, since otherwise it may
+ fail if the iteration variable is a giv. */
+
+ loop_n_iterations = loop_iterations (loop_start, loop_end);
+
+ /* Now for each giv for which we still don't know whether or not it is
+ replaceable, check to see if it is replaceable because its final value
+ can be calculated. This must be done after loop_iterations is called,
+ so that final_giv_value will work correctly. */
+
+ for (bl = loop_iv_list; bl; bl = bl->next)
+ {
+ struct induction *v;
+
+ for (v = bl->giv; v; v = v->next_iv)
+ if (! v->replaceable && ! v->not_replaceable)
+ check_final_value (v, loop_start, loop_end);
+ }
+
+ /* Try to prove that the loop counter variable (if any) is always
+ nonnegative; if so, record that fact with a REG_NONNEG note
+ so that "decrement and branch until zero" insn can be used. */
+ check_dbra_loop (loop_end, insn_count, loop_start);
+
+ /* Create reg_map to hold substitutions for replaceable giv regs. */
+ reg_map = (rtx *) alloca (max_reg_before_loop * sizeof (rtx));
+ bzero ((char *) reg_map, max_reg_before_loop * sizeof (rtx));
+
+ /* Examine each iv class for feasibility of strength reduction/induction
+ variable elimination. */
+
+ for (bl = loop_iv_list; bl; bl = bl->next)
+ {
+ struct induction *v;
+ int benefit;
+ int all_reduced;
+ rtx final_value = 0;
+
+ /* Test whether it will be possible to eliminate this biv
+ provided all givs are reduced. This is possible if either
+ the reg is not used outside the loop, or we can compute
+ what its final value will be.
+
+ For architectures with a decrement_and_branch_until_zero insn,
+ don't do this if we put a REG_NONNEG note on the endtest for
+ this biv. */
+
+ /* Compare against bl->init_insn rather than loop_start.
+ We aren't concerned with any uses of the biv between
+ init_insn and loop_start since these won't be affected
+ by the value of the biv elsewhere in the function, so
+ long as init_insn doesn't use the biv itself.
+ March 14, 1989 -- self@bayes.arc.nasa.gov */
+
+ if ((uid_luid[regno_last_uid[bl->regno]] < INSN_LUID (loop_end)
+ && bl->init_insn
+ && INSN_UID (bl->init_insn) < max_uid_for_loop
+ && uid_luid[regno_first_uid[bl->regno]] >= INSN_LUID (bl->init_insn)
+#ifdef HAVE_decrement_and_branch_until_zero
+ && ! bl->nonneg
+#endif
+ && ! reg_mentioned_p (bl->biv->dest_reg, SET_SRC (bl->init_set)))
+ || ((final_value = final_biv_value (bl, loop_start, loop_end))
+#ifdef HAVE_decrement_and_branch_until_zero
+ && ! bl->nonneg
+#endif
+ ))
+ bl->eliminable = maybe_eliminate_biv (bl, loop_start, end, 0,
+ threshold, insn_count);
+ else
+ {
+ if (loop_dump_stream)
+ {
+ fprintf (loop_dump_stream,
+ "Cannot eliminate biv %d.\n",
+ bl->regno);
+ fprintf (loop_dump_stream,
+ "First use: insn %d, last use: insn %d.\n",
+ regno_first_uid[bl->regno],
+ regno_last_uid[bl->regno]);
+ }
+ }
+
+ /* Combine all giv's for this iv_class. */
+ combine_givs (bl);
+
+ /* This will be true at the end, if all givs which depend on this
+ biv have been strength reduced.
+ We can't (currently) eliminate the biv unless this is so. */
+ all_reduced = 1;
+
+ /* Check each giv in this class to see if we will benefit by reducing
+ it. Skip giv's combined with others. */
+ for (v = bl->giv; v; v = v->next_iv)
+ {
+ struct induction *tv;
+
+ if (v->ignore || v->same)
+ continue;
+
+ benefit = v->benefit;
+
+ /* Reduce benefit if not replaceable, since we will insert
+ a move-insn to replace the insn that calculates this giv.
+ Don't do this unless the giv is a user variable, since it
+ will often be marked non-replaceable because of the duplication
+ of the exit code outside the loop. In such a case, the copies
+ we insert are dead and will be deleted. So they don't have
+ a cost. Similar situations exist. */
+ /* ??? The new final_[bg]iv_value code does a much better job
+ of finding replaceable giv's, and hence this code may no longer
+ be necessary. */
+ if (! v->replaceable && ! bl->eliminable
+ && REG_USERVAR_P (v->dest_reg))
+ benefit -= copy_cost;
+
+ /* Decrease the benefit to count the add-insns that we will
+ insert to increment the reduced reg for the giv. */
+ benefit -= add_cost * bl->biv_count;
+
+ /* Decide whether to strength-reduce this giv or to leave the code
+ unchanged (recompute it from the biv each time it is used).
+ This decision can be made independently for each giv. */
+
+ /* ??? Perhaps attempt to guess whether autoincrement will handle
+ some of the new add insns; if so, can increase BENEFIT
+ (undo the subtraction of add_cost that was done above). */
+
+ /* If an insn is not to be strength reduced, then set its ignore
+ flag, and clear all_reduced. */
+
+ /* A giv that depends on a reversed biv must be reduced if it is
+ used after the loop exit, otherwise, it would have the wrong
+ value after the loop exit. To make it simple, just reduce all
+ of such giv's whether or not we know they are used after the loop
+ exit. */
+
+ if (v->lifetime * threshold * benefit < insn_count
+ && ! bl->reversed)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "giv of insn %d not worth while, %d vs %d.\n",
+ INSN_UID (v->insn),
+ v->lifetime * threshold * benefit, insn_count);
+ v->ignore = 1;
+ all_reduced = 0;
+ }
+ else
+ {
+ /* Check that we can increment the reduced giv without a
+ multiply insn. If not, reject it. */
+
+ for (tv = bl->biv; tv; tv = tv->next_iv)
+ if (tv->mult_val == const1_rtx
+ && ! product_cheap_p (tv->add_val, v->mult_val))
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "giv of insn %d: would need a multiply.\n",
+ INSN_UID (v->insn));
+ v->ignore = 1;
+ all_reduced = 0;
+ break;
+ }
+ }
+ }
+
+ /* Reduce each giv that we decided to reduce. */
+
+ for (v = bl->giv; v; v = v->next_iv)
+ {
+ struct induction *tv;
+ if (! v->ignore && v->same == 0)
+ {
+ v->new_reg = gen_reg_rtx (v->mode);
+
+ /* For each place where the biv is incremented,
+ add an insn to increment the new, reduced reg for the giv. */
+ for (tv = bl->biv; tv; tv = tv->next_iv)
+ {
+ if (tv->mult_val == const1_rtx)
+ emit_iv_add_mult (tv->add_val, v->mult_val,
+ v->new_reg, v->new_reg, tv->insn);
+ else /* tv->mult_val == const0_rtx */
+ /* A multiply is acceptable here
+ since this is presumed to be seldom executed. */
+ emit_iv_add_mult (tv->add_val, v->mult_val,
+ v->add_val, v->new_reg, tv->insn);
+ }
+
+ /* Add code at loop start to initialize giv's reduced reg. */
+
+ emit_iv_add_mult (bl->initial_value, v->mult_val,
+ v->add_val, v->new_reg, loop_start);
+ }
+ }
+
+ /* Rescan all givs. If a giv is the same as a giv not reduced, mark it
+ as not reduced.
+
+ For each giv register that can be reduced now: if replaceable,
+ substitute reduced reg wherever the old giv occurs;
+ else add new move insn "giv_reg = reduced_reg".
+
+ Also check for givs whose first use is their definition and whose
+ last use is the definition of another giv. If so, it is likely
+ dead and should not be used to eliminate a biv. */
+ for (v = bl->giv; v; v = v->next_iv)
+ {
+ if (v->same && v->same->ignore)
+ v->ignore = 1;
+
+ if (v->ignore)
+ continue;
+
+ if (v->giv_type == DEST_REG
+ && regno_first_uid[REGNO (v->dest_reg)] == INSN_UID (v->insn))
+ {
+ struct induction *v1;
+
+ for (v1 = bl->giv; v1; v1 = v1->next_iv)
+ if (regno_last_uid[REGNO (v->dest_reg)] == INSN_UID (v1->insn))
+ v->maybe_dead = 1;
+ }
+
+ /* Update expression if this was combined, in case other giv was
+ replaced. */
+ if (v->same)
+ v->new_reg = replace_rtx (v->new_reg,
+ v->same->dest_reg, v->same->new_reg);
+
+ if (v->giv_type == DEST_ADDR)
+ /* Store reduced reg as the address in the memref where we found
+ this giv. */
+ validate_change (v->insn, v->location, v->new_reg, 0);
+ else if (v->replaceable)
+ {
+ reg_map[REGNO (v->dest_reg)] = v->new_reg;
+
+#if 0
+ /* I can no longer duplicate the original problem. Perhaps
+ this is unnecessary now? */
+
+ /* Replaceable; it isn't strictly necessary to delete the old
+ insn and emit a new one, because v->dest_reg is now dead.
+
+ However, especially when unrolling loops, the special
+ handling for (set REG0 REG1) in the second cse pass may
+ make v->dest_reg live again. To avoid this problem, emit
+ an insn to set the original giv reg from the reduced giv.
+ We can not delete the original insn, since it may be part
+ of a LIBCALL, and the code in flow that eliminates dead
+ libcalls will fail if it is deleted. */
+ emit_insn_after (gen_move_insn (v->dest_reg, v->new_reg),
+ v->insn);
+#endif
+ }
+ else
+ {
+ /* Not replaceable; emit an insn to set the original giv reg from
+ the reduced giv, same as above. */
+ emit_insn_after (gen_move_insn (v->dest_reg, v->new_reg),
+ v->insn);
+ }
+
+ /* When a loop is reversed, givs which depend on the reversed
+ biv, and which are live outside the loop, must be set to their
+ correct final value. This insn is only needed if the giv is
+ not replaceable. The correct final value is the same as the
+ value that the giv starts the reversed loop with. */
+ if (bl->reversed && ! v->replaceable)
+ emit_iv_add_mult (bl->initial_value, v->mult_val,
+ v->add_val, v->dest_reg, end_insert_before);
+ else if (v->final_value)
+ {
+ rtx insert_before;
+
+ /* If the loop has multiple exits, emit the insn before the
+ loop to ensure that it will always be executed no matter
+ how the loop exits. Otherwise, emit the insn after the loop,
+ since this is slightly more efficient. */
+ if (loop_number_exit_labels[uid_loop_num[INSN_UID (loop_start)]])
+ insert_before = loop_start;
+ else
+ insert_before = end_insert_before;
+ emit_insn_before (gen_move_insn (v->dest_reg, v->final_value),
+ insert_before);
+
+#if 0
+ /* If the insn to set the final value of the giv was emitted
+ before the loop, then we must delete the insn inside the loop
+ that sets it. If this is a LIBCALL, then we must delete
+ every insn in the libcall. Note, however, that
+ final_giv_value will only succeed when there are multiple
+ exits if the giv is dead at each exit, hence it does not
+ matter that the original insn remains because it is dead
+ anyways. */
+ /* Delete the insn inside the loop that sets the giv since
+ the giv is now set before (or after) the loop. */
+ delete_insn (v->insn);
+#endif
+ }
+
+ if (loop_dump_stream)
+ {
+ fprintf (loop_dump_stream, "giv at %d reduced to ",
+ INSN_UID (v->insn));
+ print_rtl (loop_dump_stream, v->new_reg);
+ fprintf (loop_dump_stream, "\n");
+ }
+ }
+
+ /* All the givs based on the biv bl have been reduced if they
+ merit it. */
+
+ /* For each giv not marked as maybe dead that has been combined with a
+ second giv, clear any "maybe dead" mark on that second giv.
+ v->new_reg will either be or refer to the register of the giv it
+ combined with.
+
+ Doing this clearing avoids problems in biv elimination where a
+ giv's new_reg is a complex value that can't be put in the insn but
+ the giv combined with (with a reg as new_reg) is marked maybe_dead.
+ Since the register will be used in either case, we'd prefer it be
+ used from the simpler giv. */
+
+ for (v = bl->giv; v; v = v->next_iv)
+ if (! v->maybe_dead && v->same)
+ v->same->maybe_dead = 0;
+
+ /* Try to eliminate the biv, if it is a candidate.
+ This won't work if ! all_reduced,
+ since the givs we planned to use might not have been reduced.
+
+ We have to be careful that we didn't initially think we could eliminate
+ this biv because of a giv that we now think may be dead and shouldn't
+ be used as a biv replacement.
+
+ Also, there is the possibility that we may have a giv that looks
+ like it can be used to eliminate a biv, but the resulting insn
+ isn't valid. This can happen, for example, on the 88k, where a
+ JUMP_INSN can compare a register only with zero. Attempts to
+ replace it with a compare with a constant will fail.
+
+ Note that in cases where this call fails, we may have replaced some
+ of the occurrences of the biv with a giv, but no harm was done in
+ doing so in the rare cases where it can occur. */
+
+ if (all_reduced == 1 && bl->eliminable
+ && maybe_eliminate_biv (bl, loop_start, end, 1,
+ threshold, insn_count))
+
+ {
+ /* ?? If we created a new test to bypass the loop entirely,
+ or otherwise drop straight in, based on this test, then
+ we might want to rewrite it also. This way some later
+ pass has more hope of removing the initialization of this
+ biv entirely. */
+
+ /* If final_value != 0, then the biv may be used after loop end
+ and we must emit an insn to set it just in case.
+
+ Reversed bivs already have an insn after the loop setting their
+ value, so we don't need another one. We can't calculate the
+ proper final value for such a biv here anyways. */
+ if (final_value != 0 && ! bl->reversed)
+ {
+ rtx insert_before;
+
+ /* If the loop has multiple exits, emit the insn before the
+ loop to ensure that it will always be executed no matter
+ how the loop exits. Otherwise, emit the insn after the
+ loop, since this is slightly more efficient. */
+ if (loop_number_exit_labels[uid_loop_num[INSN_UID (loop_start)]])
+ insert_before = loop_start;
+ else
+ insert_before = end_insert_before;
+
+ emit_insn_before (gen_move_insn (bl->biv->dest_reg, final_value),
+ end_insert_before);
+ }
+
+#if 0
+ /* Delete all of the instructions inside the loop which set
+ the biv, as they are all dead. If is safe to delete them,
+ because an insn setting a biv will never be part of a libcall. */
+ /* However, deleting them will invalidate the regno_last_uid info,
+ so keeping them around is more convenient. Final_biv_value
+ will only succeed when there are multiple exits if the biv
+ is dead at each exit, hence it does not matter that the original
+ insn remains, because it is dead anyways. */
+ for (v = bl->biv; v; v = v->next_iv)
+ delete_insn (v->insn);
+#endif
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, "Reg %d: biv eliminated\n",
+ bl->regno);
+ }
+ }
+
+ /* Go through all the instructions in the loop, making all the
+ register substitutions scheduled in REG_MAP. */
+
+ for (p = loop_start; p != end; p = NEXT_INSN (p))
+ if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
+ || GET_CODE (p) == CALL_INSN)
+ {
+ replace_regs (PATTERN (p), reg_map, max_reg_before_loop, 0);
+ replace_regs (REG_NOTES (p), reg_map, max_reg_before_loop, 0);
+ INSN_CODE (p) = -1;
+ }
+
+ /* Unroll loops from within strength reduction so that we can use the
+ induction variable information that strength_reduce has already
+ collected. */
+
+ if (flag_unroll_loops)
+ unroll_loop (loop_end, insn_count, loop_start, end_insert_before, 1);
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, "\n");
+}
+
+/* Return 1 if X is a valid source for an initial value (or as value being
+ compared against in an initial test).
+
+ X must be either a register or constant and must not be clobbered between
+ the current insn and the start of the loop.
+
+ INSN is the insn containing X. */
+
+static int
+valid_initial_value_p (x, insn, call_seen, loop_start)
+ rtx x;
+ rtx insn;
+ int call_seen;
+ rtx loop_start;
+{
+ if (CONSTANT_P (x))
+ return 1;
+
+ /* Only consider pseudos we know about initialized in insns whose luids
+ we know. */
+ if (GET_CODE (x) != REG
+ || REGNO (x) >= max_reg_before_loop)
+ return 0;
+
+ /* Don't use call-clobbered registers across a call which clobbers it. On
+ some machines, don't use any hard registers at all. */
+ if (REGNO (x) < FIRST_PSEUDO_REGISTER
+#ifndef SMALL_REGISTER_CLASSES
+ && call_used_regs[REGNO (x)] && call_seen
+#endif
+ )
+ return 0;
+
+ /* Don't use registers that have been clobbered before the start of the
+ loop. */
+ if (reg_set_between_p (x, insn, loop_start))
+ return 0;
+
+ return 1;
+}
+
+/* Scan X for memory refs and check each memory address
+ as a possible giv. INSN is the insn whose pattern X comes from.
+ NOT_EVERY_ITERATION is 1 if the insn might not be executed during
+ every loop iteration. */
+
+static void
+find_mem_givs (x, insn, not_every_iteration, loop_start, loop_end)
+ rtx x;
+ rtx insn;
+ int not_every_iteration;
+ rtx loop_start, loop_end;
+{
+ register int i, j;
+ register enum rtx_code code;
+ register char *fmt;
+
+ if (x == 0)
+ return;
+
+ code = GET_CODE (x);
+ switch (code)
+ {
+ case REG:
+ case CONST_INT:
+ case CONST:
+ case CONST_DOUBLE:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ case PC:
+ case CC0:
+ case ADDR_VEC:
+ case ADDR_DIFF_VEC:
+ case USE:
+ case CLOBBER:
+ return;
+
+ case MEM:
+ {
+ rtx src_reg;
+ rtx add_val;
+ rtx mult_val;
+ int benefit;
+
+ benefit = general_induction_var (XEXP (x, 0),
+ &src_reg, &add_val, &mult_val);
+
+ /* Don't make a DEST_ADDR giv with mult_val == 1 && add_val == 0.
+ Such a giv isn't useful. */
+ if (benefit > 0 && (mult_val != const1_rtx || add_val != const0_rtx))
+ {
+ /* Found one; record it. */
+ struct induction *v
+ = (struct induction *) oballoc (sizeof (struct induction));
+
+ record_giv (v, insn, src_reg, addr_placeholder, mult_val,
+ add_val, benefit, DEST_ADDR, not_every_iteration,
+ &XEXP (x, 0), loop_start, loop_end);
+
+ v->mem_mode = GET_MODE (x);
+ }
+ return;
+ }
+ }
+
+ /* Recursively scan the subexpressions for other mem refs. */
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ if (fmt[i] == 'e')
+ find_mem_givs (XEXP (x, i), insn, not_every_iteration, loop_start,
+ loop_end);
+ else if (fmt[i] == 'E')
+ for (j = 0; j < XVECLEN (x, i); j++)
+ find_mem_givs (XVECEXP (x, i, j), insn, not_every_iteration,
+ loop_start, loop_end);
+}
+
+/* Fill in the data about one biv update.
+ V is the `struct induction' in which we record the biv. (It is
+ allocated by the caller, with alloca.)
+ INSN is the insn that sets it.
+ DEST_REG is the biv's reg.
+
+ MULT_VAL is const1_rtx if the biv is being incremented here, in which case
+ INC_VAL is the increment. Otherwise, MULT_VAL is const0_rtx and the biv is
+ being set to INC_VAL.
+
+ NOT_EVERY_ITERATION is nonzero if this biv update is not know to be
+ executed every iteration; MAYBE_MULTIPLE is nonzero if this biv update
+ can be executed more than once per iteration. If MAYBE_MULTIPLE
+ and NOT_EVERY_ITERATION are both zero, we know that the biv update is
+ executed exactly once per iteration. */
+
+static void
+record_biv (v, insn, dest_reg, inc_val, mult_val,
+ not_every_iteration, maybe_multiple)
+ struct induction *v;
+ rtx insn;
+ rtx dest_reg;
+ rtx inc_val;
+ rtx mult_val;
+ int not_every_iteration;
+ int maybe_multiple;
+{
+ struct iv_class *bl;
+
+ v->insn = insn;
+ v->src_reg = dest_reg;
+ v->dest_reg = dest_reg;
+ v->mult_val = mult_val;
+ v->add_val = inc_val;
+ v->mode = GET_MODE (dest_reg);
+ v->always_computable = ! not_every_iteration;
+ v->maybe_multiple = maybe_multiple;
+
+ /* Add this to the reg's iv_class, creating a class
+ if this is the first incrementation of the reg. */
+
+ bl = reg_biv_class[REGNO (dest_reg)];
+ if (bl == 0)
+ {
+ /* Create and initialize new iv_class. */
+
+ bl = (struct iv_class *) oballoc (sizeof (struct iv_class));
+
+ bl->regno = REGNO (dest_reg);
+ bl->biv = 0;
+ bl->giv = 0;
+ bl->biv_count = 0;
+ bl->giv_count = 0;
+
+ /* Set initial value to the reg itself. */
+ bl->initial_value = dest_reg;
+ /* We haven't seen the initializing insn yet */
+ bl->init_insn = 0;
+ bl->init_set = 0;
+ bl->initial_test = 0;
+ bl->incremented = 0;
+ bl->eliminable = 0;
+ bl->nonneg = 0;
+ bl->reversed = 0;
+ bl->total_benefit = 0;
+
+ /* Add this class to loop_iv_list. */
+ bl->next = loop_iv_list;
+ loop_iv_list = bl;
+
+ /* Put it in the array of biv register classes. */
+ reg_biv_class[REGNO (dest_reg)] = bl;
+ }
+
+ /* Update IV_CLASS entry for this biv. */
+ v->next_iv = bl->biv;
+ bl->biv = v;
+ bl->biv_count++;
+ if (mult_val == const1_rtx)
+ bl->incremented = 1;
+
+ if (loop_dump_stream)
+ {
+ fprintf (loop_dump_stream,
+ "Insn %d: possible biv, reg %d,",
+ INSN_UID (insn), REGNO (dest_reg));
+ if (GET_CODE (inc_val) == CONST_INT)
+ fprintf (loop_dump_stream, " const = %d\n",
+ INTVAL (inc_val));
+ else
+ {
+ fprintf (loop_dump_stream, " const = ");
+ print_rtl (loop_dump_stream, inc_val);
+ fprintf (loop_dump_stream, "\n");
+ }
+ }
+}
+
+/* Fill in the data about one giv.
+ V is the `struct induction' in which we record the giv. (It is
+ allocated by the caller, with alloca.)
+ INSN is the insn that sets it.
+ BENEFIT estimates the savings from deleting this insn.
+ TYPE is DEST_REG or DEST_ADDR; it says whether the giv is computed
+ into a register or is used as a memory address.
+
+ SRC_REG is the biv reg which the giv is computed from.
+ DEST_REG is the giv's reg (if the giv is stored in a reg).
+ MULT_VAL and ADD_VAL are the coefficients used to compute the giv.
+ LOCATION points to the place where this giv's value appears in INSN. */
+
+static void
+record_giv (v, insn, src_reg, dest_reg, mult_val, add_val, benefit,
+ type, not_every_iteration, location, loop_start, loop_end)
+ struct induction *v;
+ rtx insn;
+ rtx src_reg;
+ rtx dest_reg;
+ rtx mult_val, add_val;
+ int benefit;
+ enum g_types type;
+ int not_every_iteration;
+ rtx *location;
+ rtx loop_start, loop_end;
+{
+ struct induction *b;
+ struct iv_class *bl;
+ rtx set = single_set (insn);
+ rtx p;
+
+ v->insn = insn;
+ v->src_reg = src_reg;
+ v->giv_type = type;
+ v->dest_reg = dest_reg;
+ v->mult_val = mult_val;
+ v->add_val = add_val;
+ v->benefit = benefit;
+ v->location = location;
+ v->cant_derive = 0;
+ v->combined_with = 0;
+ v->maybe_multiple = 0;
+ v->maybe_dead = 0;
+ v->derive_adjustment = 0;
+ v->same = 0;
+ v->ignore = 0;
+ v->new_reg = 0;
+ v->final_value = 0;
+
+ /* The v->always_computable field is used in update_giv_derive, to
+ determine whether a giv can be used to derive another giv. For a
+ DEST_REG giv, INSN computes a new value for the giv, so its value
+ isn't computable if INSN insn't executed every iteration.
+ However, for a DEST_ADDR giv, INSN merely uses the value of the giv;
+ it does not compute a new value. Hence the value is always computable
+ regardless of whether INSN is executed each iteration. */
+
+ if (type == DEST_ADDR)
+ v->always_computable = 1;
+ else
+ v->always_computable = ! not_every_iteration;
+
+ if (type == DEST_ADDR)
+ {
+ v->mode = GET_MODE (*location);
+ v->lifetime = 1;
+ v->times_used = 1;
+ }
+ else /* type == DEST_REG */
+ {
+ v->mode = GET_MODE (SET_DEST (set));
+
+ v->lifetime = (uid_luid[regno_last_uid[REGNO (dest_reg)]]
+ - uid_luid[regno_first_uid[REGNO (dest_reg)]]);
+
+ v->times_used = n_times_used[REGNO (dest_reg)];
+
+ /* If the lifetime is zero, it means that this register is
+ really a dead store. So mark this as a giv that can be
+ ignored. This will not prevent the biv from being eliminated. */
+ if (v->lifetime == 0)
+ v->ignore = 1;
+
+ reg_iv_type[REGNO (dest_reg)] = GENERAL_INDUCT;
+ reg_iv_info[REGNO (dest_reg)] = v;
+ }
+
+ /* Add the giv to the class of givs computed from one biv. */
+
+ bl = reg_biv_class[REGNO (src_reg)];
+ if (bl)
+ {
+ v->next_iv = bl->giv;
+ bl->giv = v;
+ /* Don't count DEST_ADDR. This is supposed to count the number of
+ insns that calculate givs. */
+ if (type == DEST_REG)
+ bl->giv_count++;
+ bl->total_benefit += benefit;
+ }
+ else
+ /* Fatal error, biv missing for this giv? */
+ abort ();
+
+ if (type == DEST_ADDR)
+ v->replaceable = 1;
+ else
+ {
+ /* The giv can be replaced outright by the reduced register only if all
+ of the following conditions are true:
+ - the insn that sets the giv is always executed on any iteration
+ on which the giv is used at all
+ (there are two ways to deduce this:
+ either the insn is executed on every iteration,
+ or all uses follow that insn in the same basic block),
+ - the giv is not used outside the loop
+ - no assignments to the biv occur during the giv's lifetime. */
+
+ if (regno_first_uid[REGNO (dest_reg)] == INSN_UID (insn)
+ /* Previous line always fails if INSN was moved by loop opt. */
+ && uid_luid[regno_last_uid[REGNO (dest_reg)]] < INSN_LUID (loop_end)
+ && (! not_every_iteration
+ || last_use_this_basic_block (dest_reg, insn)))
+ {
+ /* Now check that there are no assignments to the biv within the
+ giv's lifetime. This requires two separate checks. */
+
+ /* Check each biv update, and fail if any are between the first
+ and last use of the giv.
+
+ If this loop contains an inner loop that was unrolled, then
+ the insn modifying the biv may have been emitted by the loop
+ unrolling code, and hence does not have a valid luid. Just
+ mark the biv as not replaceable in this case. It is not very
+ useful as a biv, because it is used in two different loops.
+ It is very unlikely that we would be able to optimize the giv
+ using this biv anyways. */
+
+ v->replaceable = 1;
+ for (b = bl->biv; b; b = b->next_iv)
+ {
+ if (INSN_UID (b->insn) >= max_uid_for_loop
+ || ((uid_luid[INSN_UID (b->insn)]
+ >= uid_luid[regno_first_uid[REGNO (dest_reg)]])
+ && (uid_luid[INSN_UID (b->insn)]
+ <= uid_luid[regno_last_uid[REGNO (dest_reg)]])))
+ {
+ v->replaceable = 0;
+ v->not_replaceable = 1;
+ break;
+ }
+ }
+
+ /* Check each insn between the first and last use of the giv,
+ and fail if any of them are branches that jump to a named label
+ outside this range, but still inside the loop. This catches
+ cases of spaghetti code where the execution order of insns
+ is not linear, and hence the above test fails. For example,
+ in the following code, j is not replaceable:
+ for (i = 0; i < 100; ) {
+ L0: j = 4*i; goto L1;
+ L2: k = j; goto L3;
+ L1: i++; goto L2;
+ L3: ; }
+ printf ("k = %d\n", k); }
+ This test is conservative, but this test succeeds rarely enough
+ that it isn't a problem. See also check_final_value below. */
+
+ if (v->replaceable)
+ for (p = insn;
+ INSN_UID (p) >= max_uid_for_loop
+ || INSN_LUID (p) < uid_luid[regno_last_uid[REGNO (dest_reg)]];
+ p = NEXT_INSN (p))
+ {
+ if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
+ && LABEL_NAME (JUMP_LABEL (p))
+ && ((INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (loop_start)
+ && (INSN_LUID (JUMP_LABEL (p))
+ < uid_luid[regno_first_uid[REGNO (dest_reg)]]))
+ || (INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (loop_end)
+ && (INSN_LUID (JUMP_LABEL (p))
+ > uid_luid[regno_last_uid[REGNO (dest_reg)]]))))
+ {
+ v->replaceable = 0;
+ v->not_replaceable = 1;
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Found branch outside giv lifetime.\n");
+
+ break;
+ }
+ }
+ }
+ else
+ {
+ /* May still be replaceable, we don't have enough info here to
+ decide. */
+ v->replaceable = 0;
+ v->not_replaceable = 0;
+ }
+ }
+
+ if (loop_dump_stream)
+ {
+ if (type == DEST_REG)
+ fprintf (loop_dump_stream, "Insn %d: giv reg %d",
+ INSN_UID (insn), REGNO (dest_reg));
+ else
+ fprintf (loop_dump_stream, "Insn %d: dest address",
+ INSN_UID (insn));
+
+ fprintf (loop_dump_stream, " src reg %d benefit %d",
+ REGNO (src_reg), v->benefit);
+ fprintf (loop_dump_stream, " used %d lifetime %d",
+ v->times_used, v->lifetime);
+
+ if (v->replaceable)
+ fprintf (loop_dump_stream, " replaceable");
+
+ if (GET_CODE (mult_val) == CONST_INT)
+ fprintf (loop_dump_stream, " mult %d",
+ INTVAL (mult_val));
+ else
+ {
+ fprintf (loop_dump_stream, " mult ");
+ print_rtl (loop_dump_stream, mult_val);
+ }
+
+ if (GET_CODE (add_val) == CONST_INT)
+ fprintf (loop_dump_stream, " add %d",
+ INTVAL (add_val));
+ else
+ {
+ fprintf (loop_dump_stream, " add ");
+ print_rtl (loop_dump_stream, add_val);
+ }
+ }
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, "\n");
+
+}
+
+
+/* All this does is determine whether a giv can be made replaceable because
+ its final value can be calculated. This code can not be part of record_giv
+ above, because final_giv_value requires that the number of loop iterations
+ be known, and that can not be accurately calculated until after all givs
+ have been identified. */
+
+static void
+check_final_value (v, loop_start, loop_end)
+ struct induction *v;
+ rtx loop_start, loop_end;
+{
+ struct iv_class *bl;
+ rtx final_value = 0;
+
+ bl = reg_biv_class[REGNO (v->src_reg)];
+
+ /* DEST_ADDR givs will never reach here, because they are always marked
+ replaceable above in record_giv. */
+
+ /* The giv can be replaced outright by the reduced register only if all
+ of the following conditions are true:
+ - the insn that sets the giv is always executed on any iteration
+ on which the giv is used at all
+ (there are two ways to deduce this:
+ either the insn is executed on every iteration,
+ or all uses follow that insn in the same basic block),
+ - its final value can be calculated (this condition is different
+ than the one above in record_giv)
+ - no assignments to the biv occur during the giv's lifetime. */
+
+#if 0
+ /* This is only called now when replaceable is known to be false. */
+ /* Clear replaceable, so that it won't confuse final_giv_value. */
+ v->replaceable = 0;
+#endif
+
+ if ((final_value = final_giv_value (v, loop_start, loop_end))
+ && (v->always_computable || last_use_this_basic_block (v->dest_reg, v->insn)))
+ {
+ int biv_increment_seen = 0;
+ rtx p = v->insn;
+ rtx last_giv_use;
+
+ v->replaceable = 1;
+
+ /* When trying to determine whether or not a biv increment occurs
+ during the lifetime of the giv, we can ignore uses of the variable
+ outside the loop because final_value is true. Hence we can not
+ use regno_last_uid and regno_first_uid as above in record_giv. */
+
+ /* Search the loop to determine whether any assignments to the
+ biv occur during the giv's lifetime. Start with the insn
+ that sets the giv, and search around the loop until we come
+ back to that insn again.
+
+ Also fail if there is a jump within the giv's lifetime that jumps
+ to somewhere outside the lifetime but still within the loop. This
+ catches spaghetti code where the execution order is not linear, and
+ hence the above test fails. Here we assume that the giv lifetime
+ does not extend from one iteration of the loop to the next, so as
+ to make the test easier. Since the lifetime isn't known yet,
+ this requires two loops. See also record_giv above. */
+
+ last_giv_use = v->insn;
+
+ while (1)
+ {
+ p = NEXT_INSN (p);
+ if (p == loop_end)
+ p = NEXT_INSN (loop_start);
+ if (p == v->insn)
+ break;
+
+ if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
+ || GET_CODE (p) == CALL_INSN)
+ {
+ if (biv_increment_seen)
+ {
+ if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
+ {
+ v->replaceable = 0;
+ v->not_replaceable = 1;
+ break;
+ }
+ }
+ else if (GET_CODE (PATTERN (p)) == SET
+ && SET_DEST (PATTERN (p)) == v->src_reg)
+ biv_increment_seen = 1;
+ else if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
+ last_giv_use = p;
+ }
+ }
+
+ /* Now that the lifetime of the giv is known, check for branches
+ from within the lifetime to outside the lifetime if it is still
+ replaceable. */
+
+ if (v->replaceable)
+ {
+ p = v->insn;
+ while (1)
+ {
+ p = NEXT_INSN (p);
+ if (p == loop_end)
+ p = NEXT_INSN (loop_start);
+ if (p == last_giv_use)
+ break;
+
+ if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
+ && LABEL_NAME (JUMP_LABEL (p))
+ && ((INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (v->insn)
+ && INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (loop_start))
+ || (INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (last_giv_use)
+ && INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (loop_end))))
+ {
+ v->replaceable = 0;
+ v->not_replaceable = 1;
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Found branch outside giv lifetime.\n");
+
+ break;
+ }
+ }
+ }
+
+ /* If it is replaceable, then save the final value. */
+ if (v->replaceable)
+ v->final_value = final_value;
+ }
+
+ if (loop_dump_stream && v->replaceable)
+ fprintf (loop_dump_stream, "Insn %d: giv reg %d final_value replaceable\n",
+ INSN_UID (v->insn), REGNO (v->dest_reg));
+}
+
+/* Update the status of whether a giv can derive other givs.
+
+ We need to do something special if there is or may be an update to the biv
+ between the time the giv is defined and the time it is used to derive
+ another giv.
+
+ In addition, a giv that is only conditionally set is not allowed to
+ derive another giv once a label has been passed.
+
+ The cases we look at are when a label or an update to a biv is passed. */
+
+static void
+update_giv_derive (p)
+ rtx p;
+{
+ struct iv_class *bl;
+ struct induction *biv, *giv;
+ rtx tem;
+ int dummy;
+
+ /* Search all IV classes, then all bivs, and finally all givs.
+
+ There are three cases we are concerned with. First we have the situation
+ of a giv that is only updated conditionally. In that case, it may not
+ derive any givs after a label is passed.
+
+ The second case is when a biv update occurs, or may occur, after the
+ definition of a giv. For certain biv updates (see below) that are
+ known to occur between the giv definition and use, we can adjust the
+ giv definition. For others, or when the biv update is conditional,
+ we must prevent the giv from deriving any other givs. There are two
+ sub-cases within this case.
+
+ If this is a label, we are concerned with any biv update that is done
+ conditionally, since it may be done after the giv is defined followed by
+ a branch here (actually, we need to pass both a jump and a label, but
+ this extra tracking doesn't seem worth it).
+
+ If this is a jump, we are concerned about any biv update that may be
+ executed multiple times. We are actually only concerned about
+ backward jumps, but it is probably not worth performing the test
+ on the jump again here.
+
+ If this is a biv update, we must adjust the giv status to show that a
+ subsequent biv update was performed. If this adjustment cannot be done,
+ the giv cannot derive further givs. */
+
+ for (bl = loop_iv_list; bl; bl = bl->next)
+ for (biv = bl->biv; biv; biv = biv->next_iv)
+ if (GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN
+ || biv->insn == p)
+ {
+ for (giv = bl->giv; giv; giv = giv->next_iv)
+ {
+ /* If cant_derive is already true, there is no point in
+ checking all of these conditions again. */
+ if (giv->cant_derive)
+ continue;
+
+ /* If this giv is conditionally set and we have passed a label,
+ it cannot derive anything. */
+ if (GET_CODE (p) == CODE_LABEL && ! giv->always_computable)
+ giv->cant_derive = 1;
+
+ /* Skip givs that have mult_val == 0, since
+ they are really invariants. Also skip those that are
+ replaceable, since we know their lifetime doesn't contain
+ any biv update. */
+ else if (giv->mult_val == const0_rtx || giv->replaceable)
+ continue;
+
+ /* The only way we can allow this giv to derive another
+ is if this is a biv increment and we can form the product
+ of biv->add_val and giv->mult_val. In this case, we will
+ be able to compute a compensation. */
+ else if (biv->insn == p)
+ {
+ tem = 0;
+
+ if (biv->mult_val == const1_rtx)
+ tem = simplify_giv_expr (gen_rtx (MULT, giv->mode,
+ biv->add_val,
+ giv->mult_val),
+ &dummy);
+
+ if (tem && giv->derive_adjustment)
+ tem = simplify_giv_expr (gen_rtx (PLUS, giv->mode, tem,
+ giv->derive_adjustment),
+ &dummy);
+ if (tem)
+ giv->derive_adjustment = tem;
+ else
+ giv->cant_derive = 1;
+ }
+ else if ((GET_CODE (p) == CODE_LABEL && ! biv->always_computable)
+ || (GET_CODE (p) == JUMP_INSN && biv->maybe_multiple))
+ giv->cant_derive = 1;
+ }
+ }
+}
+
+/* Check whether an insn is an increment legitimate for a basic induction var.
+ X is the source of insn P, or a part of it.
+ MODE is the mode in which X should be interpreted.
+
+ DEST_REG is the putative biv, also the destination of the insn.
+ We accept patterns of these forms:
+ REG = REG + INVARIANT (includes REG = REG - CONSTANT)
+ REG = INVARIANT + REG
+
+ If X is suitable, we return 1, set *MULT_VAL to CONST1_RTX,
+ and store the additive term into *INC_VAL.
+
+ If X is an assignment of an invariant into DEST_REG, we set
+ *MULT_VAL to CONST0_RTX, and store the invariant into *INC_VAL.
+
+ We also want to detect a BIV when it corresponds to a variable
+ whose mode was promoted via PROMOTED_MODE. In that case, an increment
+ of the variable may be a PLUS that adds a SUBREG of that variable to
+ an invariant and then sign- or zero-extends the result of the PLUS
+ into the variable.
+
+ Most GIVs in such cases will be in the promoted mode, since that is the
+ probably the natural computation mode (and almost certainly the mode
+ used for addresses) on the machine. So we view the pseudo-reg containing
+ the variable as the BIV, as if it were simply incremented.
+
+ Note that treating the entire pseudo as a BIV will result in making
+ simple increments to any GIVs based on it. However, if the variable
+ overflows in its declared mode but not its promoted mode, the result will
+ be incorrect. This is acceptable if the variable is signed, since
+ overflows in such cases are undefined, but not if it is unsigned, since
+ those overflows are defined. So we only check for SIGN_EXTEND and
+ not ZERO_EXTEND.
+
+ If we cannot find a biv, we return 0. */
+
+static int
+basic_induction_var (x, mode, dest_reg, p, inc_val, mult_val)
+ register rtx x;
+ enum machine_mode mode;
+ rtx p;
+ rtx dest_reg;
+ rtx *inc_val;
+ rtx *mult_val;
+{
+ register enum rtx_code code;
+ rtx arg;
+ rtx insn, set = 0;
+
+ code = GET_CODE (x);
+ switch (code)
+ {
+ case PLUS:
+ if (XEXP (x, 0) == dest_reg
+ || (GET_CODE (XEXP (x, 0)) == SUBREG
+ && SUBREG_PROMOTED_VAR_P (XEXP (x, 0))
+ && SUBREG_REG (XEXP (x, 0)) == dest_reg))
+ arg = XEXP (x, 1);
+ else if (XEXP (x, 1) == dest_reg
+ || (GET_CODE (XEXP (x, 1)) == SUBREG
+ && SUBREG_PROMOTED_VAR_P (XEXP (x, 1))
+ && SUBREG_REG (XEXP (x, 1)) == dest_reg))
+ arg = XEXP (x, 0);
+ else
+ return 0;
+
+ if (invariant_p (arg) != 1)
+ return 0;
+
+ *inc_val = convert_modes (GET_MODE (dest_reg), GET_MODE (x), arg, 0);
+ *mult_val = const1_rtx;
+ return 1;
+
+ case SUBREG:
+ /* If this is a SUBREG for a promoted variable, check the inner
+ value. */
+ if (SUBREG_PROMOTED_VAR_P (x))
+ return basic_induction_var (SUBREG_REG (x), GET_MODE (SUBREG_REG (x)),
+ dest_reg, p, inc_val, mult_val);
+
+ case REG:
+ /* If this register is assigned in the previous insn, look at its
+ source, but don't go outside the loop or past a label. */
+
+ for (insn = PREV_INSN (p);
+ (insn && GET_CODE (insn) == NOTE
+ && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
+ insn = PREV_INSN (insn))
+ ;
+
+ if (insn)
+ set = single_set (insn);
+
+ if (set != 0
+ && (SET_DEST (set) == x
+ || (GET_CODE (SET_DEST (set)) == SUBREG
+ && (GET_MODE_SIZE (GET_MODE (SET_DEST (set)))
+ <= UNITS_PER_WORD)
+ && SUBREG_REG (SET_DEST (set)) == x)))
+ return basic_induction_var (SET_SRC (set),
+ (GET_MODE (SET_SRC (set)) == VOIDmode
+ ? GET_MODE (x)
+ : GET_MODE (SET_SRC (set))),
+ dest_reg, insn,
+ inc_val, mult_val);
+ /* ... fall through ... */
+
+ /* Can accept constant setting of biv only when inside inner most loop.
+ Otherwise, a biv of an inner loop may be incorrectly recognized
+ as a biv of the outer loop,
+ causing code to be moved INTO the inner loop. */
+ case MEM:
+ if (invariant_p (x) != 1)
+ return 0;
+ case CONST_INT:
+ case SYMBOL_REF:
+ case CONST:
+ if (loops_enclosed == 1)
+ {
+ /* Possible bug here? Perhaps we don't know the mode of X. */
+ *inc_val = convert_modes (GET_MODE (dest_reg), mode, x, 0);
+ *mult_val = const0_rtx;
+ return 1;
+ }
+ else
+ return 0;
+
+ case SIGN_EXTEND:
+ return basic_induction_var (XEXP (x, 0), GET_MODE (XEXP (x, 0)),
+ dest_reg, p, inc_val, mult_val);
+ case ASHIFTRT:
+ /* Similar, since this can be a sign extension. */
+ for (insn = PREV_INSN (p);
+ (insn && GET_CODE (insn) == NOTE
+ && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
+ insn = PREV_INSN (insn))
+ ;
+
+ if (insn)
+ set = single_set (insn);
+
+ if (set && SET_DEST (set) == XEXP (x, 0)
+ && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && INTVAL (XEXP (x, 1)) >= 0
+ && GET_CODE (SET_SRC (set)) == ASHIFT
+ && XEXP (x, 1) == XEXP (SET_SRC (set), 1))
+ return basic_induction_var (XEXP (SET_SRC (set), 0),
+ GET_MODE (XEXP (x, 0)),
+ dest_reg, insn, inc_val, mult_val);
+ return 0;
+
+ default:
+ return 0;
+ }
+}
+
+/* A general induction variable (giv) is any quantity that is a linear
+ function of a basic induction variable,
+ i.e. giv = biv * mult_val + add_val.
+ The coefficients can be any loop invariant quantity.
+ A giv need not be computed directly from the biv;
+ it can be computed by way of other givs. */
+
+/* Determine whether X computes a giv.
+ If it does, return a nonzero value
+ which is the benefit from eliminating the computation of X;
+ set *SRC_REG to the register of the biv that it is computed from;
+ set *ADD_VAL and *MULT_VAL to the coefficients,
+ such that the value of X is biv * mult + add; */
+
+static int
+general_induction_var (x, src_reg, add_val, mult_val)
+ rtx x;
+ rtx *src_reg;
+ rtx *add_val;
+ rtx *mult_val;
+{
+ rtx orig_x = x;
+ int benefit = 0;
+ char *storage;
+
+ /* If this is an invariant, forget it, it isn't a giv. */
+ if (invariant_p (x) == 1)
+ return 0;
+
+ /* See if the expression could be a giv and get its form.
+ Mark our place on the obstack in case we don't find a giv. */
+ storage = (char *) oballoc (0);
+ x = simplify_giv_expr (x, &benefit);
+ if (x == 0)
+ {
+ obfree (storage);
+ return 0;
+ }
+
+ switch (GET_CODE (x))
+ {
+ case USE:
+ case CONST_INT:
+ /* Since this is now an invariant and wasn't before, it must be a giv
+ with MULT_VAL == 0. It doesn't matter which BIV we associate this
+ with. */
+ *src_reg = loop_iv_list->biv->dest_reg;
+ *mult_val = const0_rtx;
+ *add_val = x;
+ break;
+
+ case REG:
+ /* This is equivalent to a BIV. */
+ *src_reg = x;
+ *mult_val = const1_rtx;
+ *add_val = const0_rtx;
+ break;
+
+ case PLUS:
+ /* Either (plus (biv) (invar)) or
+ (plus (mult (biv) (invar_1)) (invar_2)). */
+ if (GET_CODE (XEXP (x, 0)) == MULT)
+ {
+ *src_reg = XEXP (XEXP (x, 0), 0);
+ *mult_val = XEXP (XEXP (x, 0), 1);
+ }
+ else
+ {
+ *src_reg = XEXP (x, 0);
+ *mult_val = const1_rtx;
+ }
+ *add_val = XEXP (x, 1);
+ break;
+
+ case MULT:
+ /* ADD_VAL is zero. */
+ *src_reg = XEXP (x, 0);
+ *mult_val = XEXP (x, 1);
+ *add_val = const0_rtx;
+ break;
+
+ default:
+ abort ();
+ }
+
+ /* Remove any enclosing USE from ADD_VAL and MULT_VAL (there will be
+ unless they are CONST_INT). */
+ if (GET_CODE (*add_val) == USE)
+ *add_val = XEXP (*add_val, 0);
+ if (GET_CODE (*mult_val) == USE)
+ *mult_val = XEXP (*mult_val, 0);
+
+ benefit += rtx_cost (orig_x, SET);
+
+ /* Always return some benefit if this is a giv so it will be detected
+ as such. This allows elimination of bivs that might otherwise
+ not be eliminated. */
+ return benefit == 0 ? 1 : benefit;
+}
+
+/* Given an expression, X, try to form it as a linear function of a biv.
+ We will canonicalize it to be of the form
+ (plus (mult (BIV) (invar_1))
+ (invar_2))
+ with possible degeneracies.
+
+ The invariant expressions must each be of a form that can be used as a
+ machine operand. We surround then with a USE rtx (a hack, but localized
+ and certainly unambiguous!) if not a CONST_INT for simplicity in this
+ routine; it is the caller's responsibility to strip them.
+
+ If no such canonicalization is possible (i.e., two biv's are used or an
+ expression that is neither invariant nor a biv or giv), this routine
+ returns 0.
+
+ For a non-zero return, the result will have a code of CONST_INT, USE,
+ REG (for a BIV), PLUS, or MULT. No other codes will occur.
+
+ *BENEFIT will be incremented by the benefit of any sub-giv encountered. */
+
+static rtx
+simplify_giv_expr (x, benefit)
+ rtx x;
+ int *benefit;
+{
+ enum machine_mode mode = GET_MODE (x);
+ rtx arg0, arg1;
+ rtx tem;
+
+ /* If this is not an integer mode, or if we cannot do arithmetic in this
+ mode, this can't be a giv. */
+ if (mode != VOIDmode
+ && (GET_MODE_CLASS (mode) != MODE_INT
+ || GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT))
+ return 0;
+
+ switch (GET_CODE (x))
+ {
+ case PLUS:
+ arg0 = simplify_giv_expr (XEXP (x, 0), benefit);
+ arg1 = simplify_giv_expr (XEXP (x, 1), benefit);
+ if (arg0 == 0 || arg1 == 0)
+ return 0;
+
+ /* Put constant last, CONST_INT last if both constant. */
+ if ((GET_CODE (arg0) == USE
+ || GET_CODE (arg0) == CONST_INT)
+ && GET_CODE (arg1) != CONST_INT)
+ tem = arg0, arg0 = arg1, arg1 = tem;
+
+ /* Handle addition of zero, then addition of an invariant. */
+ if (arg1 == const0_rtx)
+ return arg0;
+ else if (GET_CODE (arg1) == CONST_INT || GET_CODE (arg1) == USE)
+ switch (GET_CODE (arg0))
+ {
+ case CONST_INT:
+ case USE:
+ /* Both invariant. Only valid if sum is machine operand.
+ First strip off possible USE on first operand. */
+ if (GET_CODE (arg0) == USE)
+ arg0 = XEXP (arg0, 0);
+
+ tem = 0;
+ if (CONSTANT_P (arg0) && GET_CODE (arg1) == CONST_INT)
+ {
+ tem = plus_constant (arg0, INTVAL (arg1));
+ if (GET_CODE (tem) != CONST_INT)
+ tem = gen_rtx (USE, mode, tem);
+ }
+
+ return tem;
+
+ case REG:
+ case MULT:
+ /* biv + invar or mult + invar. Return sum. */
+ return gen_rtx (PLUS, mode, arg0, arg1);
+
+ case PLUS:
+ /* (a + invar_1) + invar_2. Associate. */
+ return simplify_giv_expr (gen_rtx (PLUS, mode,
+ XEXP (arg0, 0),
+ gen_rtx (PLUS, mode,
+ XEXP (arg0, 1), arg1)),
+ benefit);
+
+ default:
+ abort ();
+ }
+
+ /* Each argument must be either REG, PLUS, or MULT. Convert REG to
+ MULT to reduce cases. */
+ if (GET_CODE (arg0) == REG)
+ arg0 = gen_rtx (MULT, mode, arg0, const1_rtx);
+ if (GET_CODE (arg1) == REG)
+ arg1 = gen_rtx (MULT, mode, arg1, const1_rtx);
+
+ /* Now have PLUS + PLUS, PLUS + MULT, MULT + PLUS, or MULT + MULT.
+ Put a MULT first, leaving PLUS + PLUS, MULT + PLUS, or MULT + MULT.
+ Recurse to associate the second PLUS. */
+ if (GET_CODE (arg1) == MULT)
+ tem = arg0, arg0 = arg1, arg1 = tem;
+
+ if (GET_CODE (arg1) == PLUS)
+ return simplify_giv_expr (gen_rtx (PLUS, mode,
+ gen_rtx (PLUS, mode,
+ arg0, XEXP (arg1, 0)),
+ XEXP (arg1, 1)),
+ benefit);
+
+ /* Now must have MULT + MULT. Distribute if same biv, else not giv. */
+ if (GET_CODE (arg0) != MULT || GET_CODE (arg1) != MULT)
+ abort ();
+
+ if (XEXP (arg0, 0) != XEXP (arg1, 0))
+ return 0;
+
+ return simplify_giv_expr (gen_rtx (MULT, mode,
+ XEXP (arg0, 0),
+ gen_rtx (PLUS, mode,
+ XEXP (arg0, 1),
+ XEXP (arg1, 1))),
+ benefit);
+
+ case MINUS:
+ /* Handle "a - b" as "a + b * (-1)". */
+ return simplify_giv_expr (gen_rtx (PLUS, mode,
+ XEXP (x, 0),
+ gen_rtx (MULT, mode,
+ XEXP (x, 1), constm1_rtx)),
+ benefit);
+
+ case MULT:
+ arg0 = simplify_giv_expr (XEXP (x, 0), benefit);
+ arg1 = simplify_giv_expr (XEXP (x, 1), benefit);
+ if (arg0 == 0 || arg1 == 0)
+ return 0;
+
+ /* Put constant last, CONST_INT last if both constant. */
+ if ((GET_CODE (arg0) == USE || GET_CODE (arg0) == CONST_INT)
+ && GET_CODE (arg1) != CONST_INT)
+ tem = arg0, arg0 = arg1, arg1 = tem;
+
+ /* If second argument is not now constant, not giv. */
+ if (GET_CODE (arg1) != USE && GET_CODE (arg1) != CONST_INT)
+ return 0;
+
+ /* Handle multiply by 0 or 1. */
+ if (arg1 == const0_rtx)
+ return const0_rtx;
+
+ else if (arg1 == const1_rtx)
+ return arg0;
+
+ switch (GET_CODE (arg0))
+ {
+ case REG:
+ /* biv * invar. Done. */
+ return gen_rtx (MULT, mode, arg0, arg1);
+
+ case CONST_INT:
+ /* Product of two constants. */
+ return GEN_INT (INTVAL (arg0) * INTVAL (arg1));
+
+ case USE:
+ /* invar * invar. Not giv. */
+ return 0;
+
+ case MULT:
+ /* (a * invar_1) * invar_2. Associate. */
+ return simplify_giv_expr (gen_rtx (MULT, mode,
+ XEXP (arg0, 0),
+ gen_rtx (MULT, mode,
+ XEXP (arg0, 1), arg1)),
+ benefit);
+
+ case PLUS:
+ /* (a + invar_1) * invar_2. Distribute. */
+ return simplify_giv_expr (gen_rtx (PLUS, mode,
+ gen_rtx (MULT, mode,
+ XEXP (arg0, 0), arg1),
+ gen_rtx (MULT, mode,
+ XEXP (arg0, 1), arg1)),
+ benefit);
+
+ default:
+ abort ();
+ }
+
+ case ASHIFT:
+ /* Shift by constant is multiply by power of two. */
+ if (GET_CODE (XEXP (x, 1)) != CONST_INT)
+ return 0;
+
+ return simplify_giv_expr (gen_rtx (MULT, mode,
+ XEXP (x, 0),
+ GEN_INT ((HOST_WIDE_INT) 1
+ << INTVAL (XEXP (x, 1)))),
+ benefit);
+
+ case NEG:
+ /* "-a" is "a * (-1)" */
+ return simplify_giv_expr (gen_rtx (MULT, mode, XEXP (x, 0), constm1_rtx),
+ benefit);
+
+ case NOT:
+ /* "~a" is "-a - 1". Silly, but easy. */
+ return simplify_giv_expr (gen_rtx (MINUS, mode,
+ gen_rtx (NEG, mode, XEXP (x, 0)),
+ const1_rtx),
+ benefit);
+
+ case USE:
+ /* Already in proper form for invariant. */
+ return x;
+
+ case REG:
+ /* If this is a new register, we can't deal with it. */
+ if (REGNO (x) >= max_reg_before_loop)
+ return 0;
+
+ /* Check for biv or giv. */
+ switch (reg_iv_type[REGNO (x)])
+ {
+ case BASIC_INDUCT:
+ return x;
+ case GENERAL_INDUCT:
+ {
+ struct induction *v = reg_iv_info[REGNO (x)];
+
+ /* Form expression from giv and add benefit. Ensure this giv
+ can derive another and subtract any needed adjustment if so. */
+ *benefit += v->benefit;
+ if (v->cant_derive)
+ return 0;
+
+ tem = gen_rtx (PLUS, mode, gen_rtx (MULT, mode,
+ v->src_reg, v->mult_val),
+ v->add_val);
+ if (v->derive_adjustment)
+ tem = gen_rtx (MINUS, mode, tem, v->derive_adjustment);
+ return simplify_giv_expr (tem, benefit);
+ }
+ }
+
+ /* Fall through to general case. */
+ default:
+ /* If invariant, return as USE (unless CONST_INT).
+ Otherwise, not giv. */
+ if (GET_CODE (x) == USE)
+ x = XEXP (x, 0);
+
+ if (invariant_p (x) == 1)
+ {
+ if (GET_CODE (x) == CONST_INT)
+ return x;
+ else
+ return gen_rtx (USE, mode, x);
+ }
+ else
+ return 0;
+ }
+}
+
+/* Help detect a giv that is calculated by several consecutive insns;
+ for example,
+ giv = biv * M
+ giv = giv + A
+ The caller has already identified the first insn P as having a giv as dest;
+ we check that all other insns that set the same register follow
+ immediately after P, that they alter nothing else,
+ and that the result of the last is still a giv.
+
+ The value is 0 if the reg set in P is not really a giv.
+ Otherwise, the value is the amount gained by eliminating
+ all the consecutive insns that compute the value.
+
+ FIRST_BENEFIT is the amount gained by eliminating the first insn, P.
+ SRC_REG is the reg of the biv; DEST_REG is the reg of the giv.
+
+ The coefficients of the ultimate giv value are stored in
+ *MULT_VAL and *ADD_VAL. */
+
+static int
+consec_sets_giv (first_benefit, p, src_reg, dest_reg,
+ add_val, mult_val)
+ int first_benefit;
+ rtx p;
+ rtx src_reg;
+ rtx dest_reg;
+ rtx *add_val;
+ rtx *mult_val;
+{
+ int count;
+ enum rtx_code code;
+ int benefit;
+ rtx temp;
+ rtx set;
+
+ /* Indicate that this is a giv so that we can update the value produced in
+ each insn of the multi-insn sequence.
+
+ This induction structure will be used only by the call to
+ general_induction_var below, so we can allocate it on our stack.
+ If this is a giv, our caller will replace the induct var entry with
+ a new induction structure. */
+ struct induction *v
+ = (struct induction *) alloca (sizeof (struct induction));
+ v->src_reg = src_reg;
+ v->mult_val = *mult_val;
+ v->add_val = *add_val;
+ v->benefit = first_benefit;
+ v->cant_derive = 0;
+ v->derive_adjustment = 0;
+
+ reg_iv_type[REGNO (dest_reg)] = GENERAL_INDUCT;
+ reg_iv_info[REGNO (dest_reg)] = v;
+
+ count = n_times_set[REGNO (dest_reg)] - 1;
+
+ while (count > 0)
+ {
+ p = NEXT_INSN (p);
+ code = GET_CODE (p);
+
+ /* If libcall, skip to end of call sequence. */
+ if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
+ p = XEXP (temp, 0);
+
+ if (code == INSN
+ && (set = single_set (p))
+ && GET_CODE (SET_DEST (set)) == REG
+ && SET_DEST (set) == dest_reg
+ && ((benefit = general_induction_var (SET_SRC (set), &src_reg,
+ add_val, mult_val))
+ /* Giv created by equivalent expression. */
+ || ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX))
+ && (benefit = general_induction_var (XEXP (temp, 0), &src_reg,
+ add_val, mult_val))))
+ && src_reg == v->src_reg)
+ {
+ if (find_reg_note (p, REG_RETVAL, NULL_RTX))
+ benefit += libcall_benefit (p);
+
+ count--;
+ v->mult_val = *mult_val;
+ v->add_val = *add_val;
+ v->benefit = benefit;
+ }
+ else if (code != NOTE)
+ {
+ /* Allow insns that set something other than this giv to a
+ constant. Such insns are needed on machines which cannot
+ include long constants and should not disqualify a giv. */
+ if (code == INSN
+ && (set = single_set (p))
+ && SET_DEST (set) != dest_reg
+ && CONSTANT_P (SET_SRC (set)))
+ continue;
+
+ reg_iv_type[REGNO (dest_reg)] = UNKNOWN_INDUCT;
+ return 0;
+ }
+ }
+
+ return v->benefit;
+}
+
+/* Return an rtx, if any, that expresses giv G2 as a function of the register
+ represented by G1. If no such expression can be found, or it is clear that
+ it cannot possibly be a valid address, 0 is returned.
+
+ To perform the computation, we note that
+ G1 = a * v + b and
+ G2 = c * v + d
+ where `v' is the biv.
+
+ So G2 = (c/a) * G1 + (d - b*c/a) */
+
+#ifdef ADDRESS_COST
+static rtx
+express_from (g1, g2)
+ struct induction *g1, *g2;
+{
+ rtx mult, add;
+
+ /* The value that G1 will be multiplied by must be a constant integer. Also,
+ the only chance we have of getting a valid address is if b*c/a (see above
+ for notation) is also an integer. */
+ if (GET_CODE (g1->mult_val) != CONST_INT
+ || GET_CODE (g2->mult_val) != CONST_INT
+ || GET_CODE (g1->add_val) != CONST_INT
+ || g1->mult_val == const0_rtx
+ || INTVAL (g2->mult_val) % INTVAL (g1->mult_val) != 0)
+ return 0;
+
+ mult = GEN_INT (INTVAL (g2->mult_val) / INTVAL (g1->mult_val));
+ add = plus_constant (g2->add_val, - INTVAL (g1->add_val) * INTVAL (mult));
+
+ /* Form simplified final result. */
+ if (mult == const0_rtx)
+ return add;
+ else if (mult == const1_rtx)
+ mult = g1->dest_reg;
+ else
+ mult = gen_rtx (MULT, g2->mode, g1->dest_reg, mult);
+
+ if (add == const0_rtx)
+ return mult;
+ else
+ return gen_rtx (PLUS, g2->mode, mult, add);
+}
+#endif
+
+/* Return 1 if giv G2 can be combined with G1. This means that G2 can use
+ (either directly or via an address expression) a register used to represent
+ G1. Set g2->new_reg to a represtation of G1 (normally just
+ g1->dest_reg). */
+
+static int
+combine_givs_p (g1, g2)
+ struct induction *g1, *g2;
+{
+ rtx tem;
+
+ /* If these givs are identical, they can be combined. */
+ if (rtx_equal_p (g1->mult_val, g2->mult_val)
+ && rtx_equal_p (g1->add_val, g2->add_val))
+ {
+ g2->new_reg = g1->dest_reg;
+ return 1;
+ }
+
+#ifdef ADDRESS_COST
+ /* If G2 can be expressed as a function of G1 and that function is valid
+ as an address and no more expensive than using a register for G2,
+ the expression of G2 in terms of G1 can be used. */
+ if (g2->giv_type == DEST_ADDR
+ && (tem = express_from (g1, g2)) != 0
+ && memory_address_p (g2->mem_mode, tem)
+ && ADDRESS_COST (tem) <= ADDRESS_COST (*g2->location))
+ {
+ g2->new_reg = tem;
+ return 1;
+ }
+#endif
+
+ return 0;
+}
+
+/* Check all pairs of givs for iv_class BL and see if any can be combined with
+ any other. If so, point SAME to the giv combined with and set NEW_REG to
+ be an expression (in terms of the other giv's DEST_REG) equivalent to the
+ giv. Also, update BENEFIT and related fields for cost/benefit analysis. */
+
+static void
+combine_givs (bl)
+ struct iv_class *bl;
+{
+ struct induction *g1, *g2;
+ int pass;
+
+ for (g1 = bl->giv; g1; g1 = g1->next_iv)
+ for (pass = 0; pass <= 1; pass++)
+ for (g2 = bl->giv; g2; g2 = g2->next_iv)
+ if (g1 != g2
+ /* First try to combine with replaceable givs, then all givs. */
+ && (g1->replaceable || pass == 1)
+ /* If either has already been combined or is to be ignored, can't
+ combine. */
+ && ! g1->ignore && ! g2->ignore && ! g1->same && ! g2->same
+ /* If something has been based on G2, G2 cannot itself be based
+ on something else. */
+ && ! g2->combined_with
+ && combine_givs_p (g1, g2))
+ {
+ /* g2->new_reg set by `combine_givs_p' */
+ g2->same = g1;
+ g1->combined_with = 1;
+ g1->benefit += g2->benefit;
+ /* ??? The new final_[bg]iv_value code does a much better job
+ of finding replaceable giv's, and hence this code may no
+ longer be necessary. */
+ if (! g2->replaceable && REG_USERVAR_P (g2->dest_reg))
+ g1->benefit -= copy_cost;
+ g1->lifetime += g2->lifetime;
+ g1->times_used += g2->times_used;
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, "giv at %d combined with giv at %d\n",
+ INSN_UID (g2->insn), INSN_UID (g1->insn));
+ }
+}
+
+/* EMIT code before INSERT_BEFORE to set REG = B * M + A. */
+
+void
+emit_iv_add_mult (b, m, a, reg, insert_before)
+ rtx b; /* initial value of basic induction variable */
+ rtx m; /* multiplicative constant */
+ rtx a; /* additive constant */
+ rtx reg; /* destination register */
+ rtx insert_before;
+{
+ rtx seq;
+ rtx result;
+
+ /* Prevent unexpected sharing of these rtx. */
+ a = copy_rtx (a);
+ b = copy_rtx (b);
+
+ /* Increase the lifetime of any invariants moved further in code. */
+ update_reg_last_use (a, insert_before);
+ update_reg_last_use (b, insert_before);
+ update_reg_last_use (m, insert_before);
+
+ start_sequence ();
+ result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 0);
+ if (reg != result)
+ emit_move_insn (reg, result);
+ seq = gen_sequence ();
+ end_sequence ();
+
+ emit_insn_before (seq, insert_before);
+}
+
+/* Test whether A * B can be computed without
+ an actual multiply insn. Value is 1 if so. */
+
+static int
+product_cheap_p (a, b)
+ rtx a;
+ rtx b;
+{
+ int i;
+ rtx tmp;
+ struct obstack *old_rtl_obstack = rtl_obstack;
+ char *storage = (char *) obstack_alloc (&temp_obstack, 0);
+ int win = 1;
+
+ /* If only one is constant, make it B. */
+ if (GET_CODE (a) == CONST_INT)
+ tmp = a, a = b, b = tmp;
+
+ /* If first constant, both constant, so don't need multiply. */
+ if (GET_CODE (a) == CONST_INT)
+ return 1;
+
+ /* If second not constant, neither is constant, so would need multiply. */
+ if (GET_CODE (b) != CONST_INT)
+ return 0;
+
+ /* One operand is constant, so might not need multiply insn. Generate the
+ code for the multiply and see if a call or multiply, or long sequence
+ of insns is generated. */
+
+ rtl_obstack = &temp_obstack;
+ start_sequence ();
+ expand_mult (GET_MODE (a), a, b, NULL_RTX, 0);
+ tmp = gen_sequence ();
+ end_sequence ();
+
+ if (GET_CODE (tmp) == SEQUENCE)
+ {
+ if (XVEC (tmp, 0) == 0)
+ win = 1;
+ else if (XVECLEN (tmp, 0) > 3)
+ win = 0;
+ else
+ for (i = 0; i < XVECLEN (tmp, 0); i++)
+ {
+ rtx insn = XVECEXP (tmp, 0, i);
+
+ if (GET_CODE (insn) != INSN
+ || (GET_CODE (PATTERN (insn)) == SET
+ && GET_CODE (SET_SRC (PATTERN (insn))) == MULT)
+ || (GET_CODE (PATTERN (insn)) == PARALLEL
+ && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET
+ && GET_CODE (SET_SRC (XVECEXP (PATTERN (insn), 0, 0))) == MULT))
+ {
+ win = 0;
+ break;
+ }
+ }
+ }
+ else if (GET_CODE (tmp) == SET
+ && GET_CODE (SET_SRC (tmp)) == MULT)
+ win = 0;
+ else if (GET_CODE (tmp) == PARALLEL
+ && GET_CODE (XVECEXP (tmp, 0, 0)) == SET
+ && GET_CODE (SET_SRC (XVECEXP (tmp, 0, 0))) == MULT)
+ win = 0;
+
+ /* Free any storage we obtained in generating this multiply and restore rtl
+ allocation to its normal obstack. */
+ obstack_free (&temp_obstack, storage);
+ rtl_obstack = old_rtl_obstack;
+
+ return win;
+}
+
+/* Check to see if loop can be terminated by a "decrement and branch until
+ zero" instruction. If so, add a REG_NONNEG note to the branch insn if so.
+ Also try reversing an increment loop to a decrement loop
+ to see if the optimization can be performed.
+ Value is nonzero if optimization was performed. */
+
+/* This is useful even if the architecture doesn't have such an insn,
+ because it might change a loops which increments from 0 to n to a loop
+ which decrements from n to 0. A loop that decrements to zero is usually
+ faster than one that increments from zero. */
+
+/* ??? This could be rewritten to use some of the loop unrolling procedures,
+ such as approx_final_value, biv_total_increment, loop_iterations, and
+ final_[bg]iv_value. */
+
+static int
+check_dbra_loop (loop_end, insn_count, loop_start)
+ rtx loop_end;
+ int insn_count;
+ rtx loop_start;
+{
+ struct iv_class *bl;
+ rtx reg;
+ rtx jump_label;
+ rtx final_value;
+ rtx start_value;
+ rtx new_add_val;
+ rtx comparison;
+ rtx before_comparison;
+ rtx p;
+
+ /* If last insn is a conditional branch, and the insn before tests a
+ register value, try to optimize it. Otherwise, we can't do anything. */
+
+ comparison = get_condition_for_loop (PREV_INSN (loop_end));
+ if (comparison == 0)
+ return 0;
+
+ /* Check all of the bivs to see if the compare uses one of them.
+ Skip biv's set more than once because we can't guarantee that
+ it will be zero on the last iteration. Also skip if the biv is
+ used between its update and the test insn. */
+
+ for (bl = loop_iv_list; bl; bl = bl->next)
+ {
+ if (bl->biv_count == 1
+ && bl->biv->dest_reg == XEXP (comparison, 0)
+ && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn,
+ PREV_INSN (PREV_INSN (loop_end))))
+ break;
+ }
+
+ if (! bl)
+ return 0;
+
+ /* Look for the case where the basic induction variable is always
+ nonnegative, and equals zero on the last iteration.
+ In this case, add a reg_note REG_NONNEG, which allows the
+ m68k DBRA instruction to be used. */
+
+ if (((GET_CODE (comparison) == GT
+ && GET_CODE (XEXP (comparison, 1)) == CONST_INT
+ && INTVAL (XEXP (comparison, 1)) == -1)
+ || (GET_CODE (comparison) == NE && XEXP (comparison, 1) == const0_rtx))
+ && GET_CODE (bl->biv->add_val) == CONST_INT
+ && INTVAL (bl->biv->add_val) < 0)
+ {
+ /* Initial value must be greater than 0,
+ init_val % -dec_value == 0 to ensure that it equals zero on
+ the last iteration */
+
+ if (GET_CODE (bl->initial_value) == CONST_INT
+ && INTVAL (bl->initial_value) > 0
+ && (INTVAL (bl->initial_value) %
+ (-INTVAL (bl->biv->add_val))) == 0)
+ {
+ /* register always nonnegative, add REG_NOTE to branch */
+ REG_NOTES (PREV_INSN (loop_end))
+ = gen_rtx (EXPR_LIST, REG_NONNEG, NULL_RTX,
+ REG_NOTES (PREV_INSN (loop_end)));
+ bl->nonneg = 1;
+
+ return 1;
+ }
+
+ /* If the decrement is 1 and the value was tested as >= 0 before
+ the loop, then we can safely optimize. */
+ for (p = loop_start; p; p = PREV_INSN (p))
+ {
+ if (GET_CODE (p) == CODE_LABEL)
+ break;
+ if (GET_CODE (p) != JUMP_INSN)
+ continue;
+
+ before_comparison = get_condition_for_loop (p);
+ if (before_comparison
+ && XEXP (before_comparison, 0) == bl->biv->dest_reg
+ && GET_CODE (before_comparison) == LT
+ && XEXP (before_comparison, 1) == const0_rtx
+ && ! reg_set_between_p (bl->biv->dest_reg, p, loop_start)
+ && INTVAL (bl->biv->add_val) == -1)
+ {
+ REG_NOTES (PREV_INSN (loop_end))
+ = gen_rtx (EXPR_LIST, REG_NONNEG, NULL_RTX,
+ REG_NOTES (PREV_INSN (loop_end)));
+ bl->nonneg = 1;
+
+ return 1;
+ }
+ }
+ }
+ else if (num_mem_sets <= 1)
+ {
+ /* Try to change inc to dec, so can apply above optimization. */
+ /* Can do this if:
+ all registers modified are induction variables or invariant,
+ all memory references have non-overlapping addresses
+ (obviously true if only one write)
+ allow 2 insns for the compare/jump at the end of the loop. */
+ int num_nonfixed_reads = 0;
+ /* 1 if the iteration var is used only to count iterations. */
+ int no_use_except_counting = 0;
+ /* 1 if the loop has no memory store, or it has a single memory store
+ which is reversible. */
+ int reversible_mem_store = 1;
+
+ for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
+ if (GET_RTX_CLASS (GET_CODE (p)) == 'i')
+ num_nonfixed_reads += count_nonfixed_reads (PATTERN (p));
+
+ if (bl->giv_count == 0
+ && ! loop_number_exit_labels[uid_loop_num[INSN_UID (loop_start)]])
+ {
+ rtx bivreg = regno_reg_rtx[bl->regno];
+
+ /* If there are no givs for this biv, and the only exit is the
+ fall through at the end of the the loop, then
+ see if perhaps there are no uses except to count. */
+ no_use_except_counting = 1;
+ for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
+ if (GET_RTX_CLASS (GET_CODE (p)) == 'i')
+ {
+ rtx set = single_set (p);
+
+ if (set && GET_CODE (SET_DEST (set)) == REG
+ && REGNO (SET_DEST (set)) == bl->regno)
+ /* An insn that sets the biv is okay. */
+ ;
+ else if (p == prev_nonnote_insn (prev_nonnote_insn (loop_end))
+ || p == prev_nonnote_insn (loop_end))
+ /* Don't bother about the end test. */
+ ;
+ else if (reg_mentioned_p (bivreg, PATTERN (p)))
+ /* Any other use of the biv is no good. */
+ {
+ no_use_except_counting = 0;
+ break;
+ }
+ }
+ }
+
+ /* If the loop has a single store, and the destination address is
+ invariant, then we can't reverse the loop, because this address
+ might then have the wrong value at loop exit.
+ This would work if the source was invariant also, however, in that
+ case, the insn should have been moved out of the loop. */
+
+ if (num_mem_sets == 1)
+ reversible_mem_store
+ = (! unknown_address_altered
+ && ! invariant_p (XEXP (loop_store_mems[0], 0)));
+
+ /* This code only acts for innermost loops. Also it simplifies
+ the memory address check by only reversing loops with
+ zero or one memory access.
+ Two memory accesses could involve parts of the same array,
+ and that can't be reversed. */
+
+ if (num_nonfixed_reads <= 1
+ && !loop_has_call
+ && !loop_has_volatile
+ && reversible_mem_store
+ && (no_use_except_counting
+ || (bl->giv_count + bl->biv_count + num_mem_sets
+ + num_movables + 2 == insn_count)))
+ {
+ rtx tem;
+
+ /* Loop can be reversed. */
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, "Can reverse loop\n");
+
+ /* Now check other conditions:
+ initial_value must be zero,
+ final_value % add_val == 0, so that when reversed, the
+ biv will be zero on the last iteration.
+
+ This test can probably be improved since +/- 1 in the constant
+ can be obtained by changing LT to LE and vice versa; this is
+ confusing. */
+
+ if (comparison && bl->initial_value == const0_rtx
+ && GET_CODE (XEXP (comparison, 1)) == CONST_INT
+ /* LE gets turned into LT */
+ && GET_CODE (comparison) == LT
+ && (INTVAL (XEXP (comparison, 1))
+ % INTVAL (bl->biv->add_val)) == 0)
+ {
+ /* Register will always be nonnegative, with value
+ 0 on last iteration if loop reversed */
+
+ /* Save some info needed to produce the new insns. */
+ reg = bl->biv->dest_reg;
+ jump_label = XEXP (SET_SRC (PATTERN (PREV_INSN (loop_end))), 1);
+ new_add_val = GEN_INT (- INTVAL (bl->biv->add_val));
+
+ final_value = XEXP (comparison, 1);
+ start_value = GEN_INT (INTVAL (XEXP (comparison, 1))
+ - INTVAL (bl->biv->add_val));
+
+ /* Initialize biv to start_value before loop start.
+ The old initializing insn will be deleted as a
+ dead store by flow.c. */
+ emit_insn_before (gen_move_insn (reg, start_value), loop_start);
+
+ /* Add insn to decrement register, and delete insn
+ that incremented the register. */
+ p = emit_insn_before (gen_add2_insn (reg, new_add_val),
+ bl->biv->insn);
+ delete_insn (bl->biv->insn);
+
+ /* Update biv info to reflect its new status. */
+ bl->biv->insn = p;
+ bl->initial_value = start_value;
+ bl->biv->add_val = new_add_val;
+
+ /* Inc LABEL_NUSES so that delete_insn will
+ not delete the label. */
+ LABEL_NUSES (XEXP (jump_label, 0)) ++;
+
+ /* Emit an insn after the end of the loop to set the biv's
+ proper exit value if it is used anywhere outside the loop. */
+ if ((regno_last_uid[bl->regno]
+ != INSN_UID (PREV_INSN (PREV_INSN (loop_end))))
+ || ! bl->init_insn
+ || regno_first_uid[bl->regno] != INSN_UID (bl->init_insn))
+ emit_insn_after (gen_move_insn (reg, final_value),
+ loop_end);
+
+ /* Delete compare/branch at end of loop. */
+ delete_insn (PREV_INSN (loop_end));
+ delete_insn (PREV_INSN (loop_end));
+
+ /* Add new compare/branch insn at end of loop. */
+ start_sequence ();
+ emit_cmp_insn (reg, const0_rtx, GE, NULL_RTX,
+ GET_MODE (reg), 0, 0);
+ emit_jump_insn (gen_bge (XEXP (jump_label, 0)));
+ tem = gen_sequence ();
+ end_sequence ();
+ emit_jump_insn_before (tem, loop_end);
+
+ for (tem = PREV_INSN (loop_end);
+ tem && GET_CODE (tem) != JUMP_INSN; tem = PREV_INSN (tem))
+ ;
+ if (tem)
+ {
+ JUMP_LABEL (tem) = XEXP (jump_label, 0);
+
+ /* Increment of LABEL_NUSES done above. */
+ /* Register is now always nonnegative,
+ so add REG_NONNEG note to the branch. */
+ REG_NOTES (tem) = gen_rtx (EXPR_LIST, REG_NONNEG, NULL_RTX,
+ REG_NOTES (tem));
+ }
+
+ bl->nonneg = 1;
+
+ /* Mark that this biv has been reversed. Each giv which depends
+ on this biv, and which is also live past the end of the loop
+ will have to be fixed up. */
+
+ bl->reversed = 1;
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Reversed loop and added reg_nonneg\n");
+
+ return 1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/* Verify whether the biv BL appears to be eliminable,
+ based on the insns in the loop that refer to it.
+ LOOP_START is the first insn of the loop, and END is the end insn.
+
+ If ELIMINATE_P is non-zero, actually do the elimination.
+
+ THRESHOLD and INSN_COUNT are from loop_optimize and are used to
+ determine whether invariant insns should be placed inside or at the
+ start of the loop. */
+
+static int
+maybe_eliminate_biv (bl, loop_start, end, eliminate_p, threshold, insn_count)
+ struct iv_class *bl;
+ rtx loop_start;
+ rtx end;
+ int eliminate_p;
+ int threshold, insn_count;
+{
+ rtx reg = bl->biv->dest_reg;
+ rtx p;
+
+ /* Scan all insns in the loop, stopping if we find one that uses the
+ biv in a way that we cannot eliminate. */
+
+ for (p = loop_start; p != end; p = NEXT_INSN (p))
+ {
+ enum rtx_code code = GET_CODE (p);
+ rtx where = threshold >= insn_count ? loop_start : p;
+
+ if ((code == INSN || code == JUMP_INSN || code == CALL_INSN)
+ && reg_mentioned_p (reg, PATTERN (p))
+ && ! maybe_eliminate_biv_1 (PATTERN (p), p, bl, eliminate_p, where))
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Cannot eliminate biv %d: biv used in insn %d.\n",
+ bl->regno, INSN_UID (p));
+ break;
+ }
+ }
+
+ if (p == end)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, "biv %d %s eliminated.\n",
+ bl->regno, eliminate_p ? "was" : "can be");
+ return 1;
+ }
+
+ return 0;
+}
+
+/* If BL appears in X (part of the pattern of INSN), see if we can
+ eliminate its use. If so, return 1. If not, return 0.
+
+ If BIV does not appear in X, return 1.
+
+ If ELIMINATE_P is non-zero, actually do the elimination. WHERE indicates
+ where extra insns should be added. Depending on how many items have been
+ moved out of the loop, it will either be before INSN or at the start of
+ the loop. */
+
+static int
+maybe_eliminate_biv_1 (x, insn, bl, eliminate_p, where)
+ rtx x, insn;
+ struct iv_class *bl;
+ int eliminate_p;
+ rtx where;
+{
+ enum rtx_code code = GET_CODE (x);
+ rtx reg = bl->biv->dest_reg;
+ enum machine_mode mode = GET_MODE (reg);
+ struct induction *v;
+ rtx arg, new, tem;
+ int arg_operand;
+ char *fmt;
+ int i, j;
+
+ switch (code)
+ {
+ case REG:
+ /* If we haven't already been able to do something with this BIV,
+ we can't eliminate it. */
+ if (x == reg)
+ return 0;
+ return 1;
+
+ case SET:
+ /* If this sets the BIV, it is not a problem. */
+ if (SET_DEST (x) == reg)
+ return 1;
+
+ /* If this is an insn that defines a giv, it is also ok because
+ it will go away when the giv is reduced. */
+ for (v = bl->giv; v; v = v->next_iv)
+ if (v->giv_type == DEST_REG && SET_DEST (x) == v->dest_reg)
+ return 1;
+
+#ifdef HAVE_cc0
+ if (SET_DEST (x) == cc0_rtx && SET_SRC (x) == reg)
+ {
+ /* Can replace with any giv that was reduced and
+ that has (MULT_VAL != 0) and (ADD_VAL == 0).
+ Require a constant for MULT_VAL, so we know it's nonzero. */
+
+ for (v = bl->giv; v; v = v->next_iv)
+ if (CONSTANT_P (v->mult_val) && v->mult_val != const0_rtx
+ && v->add_val == const0_rtx
+ && ! v->ignore && ! v->maybe_dead && v->always_computable
+ && v->mode == mode)
+ {
+ if (! eliminate_p)
+ return 1;
+
+ /* If the giv has the opposite direction of change,
+ then reverse the comparison. */
+ if (INTVAL (v->mult_val) < 0)
+ new = gen_rtx (COMPARE, GET_MODE (v->new_reg),
+ const0_rtx, v->new_reg);
+ else
+ new = v->new_reg;
+
+ /* We can probably test that giv's reduced reg. */
+ if (validate_change (insn, &SET_SRC (x), new, 0))
+ return 1;
+ }
+
+ /* Look for a giv with (MULT_VAL != 0) and (ADD_VAL != 0);
+ replace test insn with a compare insn (cmp REDUCED_GIV ADD_VAL).
+ Require a constant for MULT_VAL, so we know it's nonzero. */
+
+ for (v = bl->giv; v; v = v->next_iv)
+ if (CONSTANT_P (v->mult_val) && v->mult_val != const0_rtx
+ && ! v->ignore && ! v->maybe_dead && v->always_computable
+ && v->mode == mode)
+ {
+ if (! eliminate_p)
+ return 1;
+
+ /* If the giv has the opposite direction of change,
+ then reverse the comparison. */
+ if (INTVAL (v->mult_val) < 0)
+ new = gen_rtx (COMPARE, VOIDmode, copy_rtx (v->add_val),
+ v->new_reg);
+ else
+ new = gen_rtx (COMPARE, VOIDmode, v->new_reg,
+ copy_rtx (v->add_val));
+
+ /* Replace biv with the giv's reduced register. */
+ update_reg_last_use (v->add_val, insn);
+ if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
+ return 1;
+
+ /* Insn doesn't support that constant or invariant. Copy it
+ into a register (it will be a loop invariant.) */
+ tem = gen_reg_rtx (GET_MODE (v->new_reg));
+
+ emit_insn_before (gen_move_insn (tem, copy_rtx (v->add_val)),
+ where);
+
+ if (validate_change (insn, &SET_SRC (PATTERN (insn)),
+ gen_rtx (COMPARE, VOIDmode,
+ v->new_reg, tem), 0))
+ return 1;
+ }
+ }
+#endif
+ break;
+
+ case COMPARE:
+ case EQ: case NE:
+ case GT: case GE: case GTU: case GEU:
+ case LT: case LE: case LTU: case LEU:
+ /* See if either argument is the biv. */
+ if (XEXP (x, 0) == reg)
+ arg = XEXP (x, 1), arg_operand = 1;
+ else if (XEXP (x, 1) == reg)
+ arg = XEXP (x, 0), arg_operand = 0;
+ else
+ break;
+
+ if (CONSTANT_P (arg))
+ {
+ /* First try to replace with any giv that has constant positive
+ mult_val and constant add_val. We might be able to support
+ negative mult_val, but it seems complex to do it in general. */
+
+ for (v = bl->giv; v; v = v->next_iv)
+ if (CONSTANT_P (v->mult_val) && INTVAL (v->mult_val) > 0
+ && CONSTANT_P (v->add_val)
+ && ! v->ignore && ! v->maybe_dead && v->always_computable
+ && v->mode == mode)
+ {
+ if (! eliminate_p)
+ return 1;
+
+ /* Replace biv with the giv's reduced reg. */
+ XEXP (x, 1-arg_operand) = v->new_reg;
+
+ /* If all constants are actually constant integers and
+ the derived constant can be directly placed in the COMPARE,
+ do so. */
+ if (GET_CODE (arg) == CONST_INT
+ && GET_CODE (v->mult_val) == CONST_INT
+ && GET_CODE (v->add_val) == CONST_INT
+ && validate_change (insn, &XEXP (x, arg_operand),
+ GEN_INT (INTVAL (arg)
+ * INTVAL (v->mult_val)
+ + INTVAL (v->add_val)), 0))
+ return 1;
+
+ /* Otherwise, load it into a register. */
+ tem = gen_reg_rtx (mode);
+ emit_iv_add_mult (arg, v->mult_val, v->add_val, tem, where);
+ if (validate_change (insn, &XEXP (x, arg_operand), tem, 0))
+ return 1;
+
+ /* If that failed, put back the change we made above. */
+ XEXP (x, 1-arg_operand) = reg;
+ }
+
+ /* Look for giv with positive constant mult_val and nonconst add_val.
+ Insert insns to calculate new compare value. */
+
+ for (v = bl->giv; v; v = v->next_iv)
+ if (CONSTANT_P (v->mult_val) && INTVAL (v->mult_val) > 0
+ && ! v->ignore && ! v->maybe_dead && v->always_computable
+ && v->mode == mode)
+ {
+ rtx tem;
+
+ if (! eliminate_p)
+ return 1;
+
+ tem = gen_reg_rtx (mode);
+
+ /* Replace biv with giv's reduced register. */
+ validate_change (insn, &XEXP (x, 1 - arg_operand),
+ v->new_reg, 1);
+
+ /* Compute value to compare against. */
+ emit_iv_add_mult (arg, v->mult_val, v->add_val, tem, where);
+ /* Use it in this insn. */
+ validate_change (insn, &XEXP (x, arg_operand), tem, 1);
+ if (apply_change_group ())
+ return 1;
+ }
+ }
+ else if (GET_CODE (arg) == REG || GET_CODE (arg) == MEM)
+ {
+ if (invariant_p (arg) == 1)
+ {
+ /* Look for giv with constant positive mult_val and nonconst
+ add_val. Insert insns to compute new compare value. */
+
+ for (v = bl->giv; v; v = v->next_iv)
+ if (CONSTANT_P (v->mult_val) && INTVAL (v->mult_val) > 0
+ && ! v->ignore && ! v->maybe_dead && v->always_computable
+ && v->mode == mode)
+ {
+ rtx tem;
+
+ if (! eliminate_p)
+ return 1;
+
+ tem = gen_reg_rtx (mode);
+
+ /* Replace biv with giv's reduced register. */
+ validate_change (insn, &XEXP (x, 1 - arg_operand),
+ v->new_reg, 1);
+
+ /* Compute value to compare against. */
+ emit_iv_add_mult (arg, v->mult_val, v->add_val,
+ tem, where);
+ validate_change (insn, &XEXP (x, arg_operand), tem, 1);
+ if (apply_change_group ())
+ return 1;
+ }
+ }
+
+ /* This code has problems. Basically, you can't know when
+ seeing if we will eliminate BL, whether a particular giv
+ of ARG will be reduced. If it isn't going to be reduced,
+ we can't eliminate BL. We can try forcing it to be reduced,
+ but that can generate poor code.
+
+ The problem is that the benefit of reducing TV, below should
+ be increased if BL can actually be eliminated, but this means
+ we might have to do a topological sort of the order in which
+ we try to process biv. It doesn't seem worthwhile to do
+ this sort of thing now. */
+
+#if 0
+ /* Otherwise the reg compared with had better be a biv. */
+ if (GET_CODE (arg) != REG
+ || reg_iv_type[REGNO (arg)] != BASIC_INDUCT)
+ return 0;
+
+ /* Look for a pair of givs, one for each biv,
+ with identical coefficients. */
+ for (v = bl->giv; v; v = v->next_iv)
+ {
+ struct induction *tv;
+
+ if (v->ignore || v->maybe_dead || v->mode != mode)
+ continue;
+
+ for (tv = reg_biv_class[REGNO (arg)]->giv; tv; tv = tv->next_iv)
+ if (! tv->ignore && ! tv->maybe_dead
+ && rtx_equal_p (tv->mult_val, v->mult_val)
+ && rtx_equal_p (tv->add_val, v->add_val)
+ && tv->mode == mode)
+ {
+ if (! eliminate_p)
+ return 1;
+
+ /* Replace biv with its giv's reduced reg. */
+ XEXP (x, 1-arg_operand) = v->new_reg;
+ /* Replace other operand with the other giv's
+ reduced reg. */
+ XEXP (x, arg_operand) = tv->new_reg;
+ return 1;
+ }
+ }
+#endif
+ }
+
+ /* If we get here, the biv can't be eliminated. */
+ return 0;
+
+ case MEM:
+ /* If this address is a DEST_ADDR giv, it doesn't matter if the
+ biv is used in it, since it will be replaced. */
+ for (v = bl->giv; v; v = v->next_iv)
+ if (v->giv_type == DEST_ADDR && v->location == &XEXP (x, 0))
+ return 1;
+ break;
+ }
+
+ /* See if any subexpression fails elimination. */
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ switch (fmt[i])
+ {
+ case 'e':
+ if (! maybe_eliminate_biv_1 (XEXP (x, i), insn, bl,
+ eliminate_p, where))
+ return 0;
+ break;
+
+ case 'E':
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ if (! maybe_eliminate_biv_1 (XVECEXP (x, i, j), insn, bl,
+ eliminate_p, where))
+ return 0;
+ break;
+ }
+ }
+
+ return 1;
+}
+
+/* Return nonzero if the last use of REG
+ is in an insn following INSN in the same basic block. */
+
+static int
+last_use_this_basic_block (reg, insn)
+ rtx reg;
+ rtx insn;
+{
+ rtx n;
+ for (n = insn;
+ n && GET_CODE (n) != CODE_LABEL && GET_CODE (n) != JUMP_INSN;
+ n = NEXT_INSN (n))
+ {
+ if (regno_last_uid[REGNO (reg)] == INSN_UID (n))
+ return 1;
+ }
+ return 0;
+}
+
+/* Called via `note_stores' to record the initial value of a biv. Here we
+ just record the location of the set and process it later. */
+
+static void
+record_initial (dest, set)
+ rtx dest;
+ rtx set;
+{
+ struct iv_class *bl;
+
+ if (GET_CODE (dest) != REG
+ || REGNO (dest) >= max_reg_before_loop
+ || reg_iv_type[REGNO (dest)] != BASIC_INDUCT)
+ return;
+
+ bl = reg_biv_class[REGNO (dest)];
+
+ /* If this is the first set found, record it. */
+ if (bl->init_insn == 0)
+ {
+ bl->init_insn = note_insn;
+ bl->init_set = set;
+ }
+}
+
+/* If any of the registers in X are "old" and currently have a last use earlier
+ than INSN, update them to have a last use of INSN. Their actual last use
+ will be the previous insn but it will not have a valid uid_luid so we can't
+ use it. */
+
+static void
+update_reg_last_use (x, insn)
+ rtx x;
+ rtx insn;
+{
+ /* Check for the case where INSN does not have a valid luid. In this case,
+ there is no need to modify the regno_last_uid, as this can only happen
+ when code is inserted after the loop_end to set a pseudo's final value,
+ and hence this insn will never be the last use of x. */
+ if (GET_CODE (x) == REG && REGNO (x) < max_reg_before_loop
+ && INSN_UID (insn) < max_uid_for_loop
+ && uid_luid[regno_last_uid[REGNO (x)]] < uid_luid[INSN_UID (insn)])
+ regno_last_uid[REGNO (x)] = INSN_UID (insn);
+ else
+ {
+ register int i, j;
+ register char *fmt = GET_RTX_FORMAT (GET_CODE (x));
+ for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ update_reg_last_use (XEXP (x, i), insn);
+ else if (fmt[i] == 'E')
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ update_reg_last_use (XVECEXP (x, i, j), insn);
+ }
+ }
+}
+
+/* Given a jump insn JUMP, return the condition that will cause it to branch
+ to its JUMP_LABEL. If the condition cannot be understood, or is an
+ inequality floating-point comparison which needs to be reversed, 0 will
+ be returned.
+
+ If EARLIEST is non-zero, it is a pointer to a place where the earliest
+ insn used in locating the condition was found. If a replacement test
+ of the condition is desired, it should be placed in front of that
+ insn and we will be sure that the inputs are still valid.
+
+ The condition will be returned in a canonical form to simplify testing by
+ callers. Specifically:
+
+ (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
+ (2) Both operands will be machine operands; (cc0) will have been replaced.
+ (3) If an operand is a constant, it will be the second operand.
+ (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
+ for GE, GEU, and LEU. */
+
+rtx
+get_condition (jump, earliest)
+ rtx jump;
+ rtx *earliest;
+{
+ enum rtx_code code;
+ rtx prev = jump;
+ rtx set;
+ rtx tem;
+ rtx op0, op1;
+ int reverse_code = 0;
+ int did_reverse_condition = 0;
+
+ /* If this is not a standard conditional jump, we can't parse it. */
+ if (GET_CODE (jump) != JUMP_INSN
+ || ! condjump_p (jump) || simplejump_p (jump))
+ return 0;
+
+ code = GET_CODE (XEXP (SET_SRC (PATTERN (jump)), 0));
+ op0 = XEXP (XEXP (SET_SRC (PATTERN (jump)), 0), 0);
+ op1 = XEXP (XEXP (SET_SRC (PATTERN (jump)), 0), 1);
+
+ if (earliest)
+ *earliest = jump;
+
+ /* If this branches to JUMP_LABEL when the condition is false, reverse
+ the condition. */
+ if (GET_CODE (XEXP (SET_SRC (PATTERN (jump)), 2)) == LABEL_REF
+ && XEXP (XEXP (SET_SRC (PATTERN (jump)), 2), 0) == JUMP_LABEL (jump))
+ code = reverse_condition (code), did_reverse_condition ^= 1;
+
+ /* If we are comparing a register with zero, see if the register is set
+ in the previous insn to a COMPARE or a comparison operation. Perform
+ the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
+ in cse.c */
+
+ while (GET_RTX_CLASS (code) == '<' && op1 == const0_rtx)
+ {
+ /* Set non-zero when we find something of interest. */
+ rtx x = 0;
+
+#ifdef HAVE_cc0
+ /* If comparison with cc0, import actual comparison from compare
+ insn. */
+ if (op0 == cc0_rtx)
+ {
+ if ((prev = prev_nonnote_insn (prev)) == 0
+ || GET_CODE (prev) != INSN
+ || (set = single_set (prev)) == 0
+ || SET_DEST (set) != cc0_rtx)
+ return 0;
+
+ op0 = SET_SRC (set);
+ op1 = CONST0_RTX (GET_MODE (op0));
+ if (earliest)
+ *earliest = prev;
+ }
+#endif
+
+ /* If this is a COMPARE, pick up the two things being compared. */
+ if (GET_CODE (op0) == COMPARE)
+ {
+ op1 = XEXP (op0, 1);
+ op0 = XEXP (op0, 0);
+ continue;
+ }
+ else if (GET_CODE (op0) != REG)
+ break;
+
+ /* Go back to the previous insn. Stop if it is not an INSN. We also
+ stop if it isn't a single set or if it has a REG_INC note because
+ we don't want to bother dealing with it. */
+
+ if ((prev = prev_nonnote_insn (prev)) == 0
+ || GET_CODE (prev) != INSN
+ || FIND_REG_INC_NOTE (prev, 0)
+ || (set = single_set (prev)) == 0)
+ break;
+
+ /* If this is setting OP0, get what it sets it to if it looks
+ relevant. */
+ if (SET_DEST (set) == op0)
+ {
+ enum machine_mode inner_mode = GET_MODE (SET_SRC (set));
+
+ if ((GET_CODE (SET_SRC (set)) == COMPARE
+ || (((code == NE
+ || (code == LT
+ && GET_MODE_CLASS (inner_mode) == MODE_INT
+ && (GET_MODE_BITSIZE (inner_mode)
+ <= HOST_BITS_PER_WIDE_INT)
+ && (STORE_FLAG_VALUE
+ & ((HOST_WIDE_INT) 1
+ << (GET_MODE_BITSIZE (inner_mode) - 1))))
+#ifdef FLOAT_STORE_FLAG_VALUE
+ || (code == LT
+ && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
+ && FLOAT_STORE_FLAG_VALUE < 0)
+#endif
+ ))
+ && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<')))
+ x = SET_SRC (set);
+ else if (((code == EQ
+ || (code == GE
+ && (GET_MODE_BITSIZE (inner_mode)
+ <= HOST_BITS_PER_WIDE_INT)
+ && GET_MODE_CLASS (inner_mode) == MODE_INT
+ && (STORE_FLAG_VALUE
+ & ((HOST_WIDE_INT) 1
+ << (GET_MODE_BITSIZE (inner_mode) - 1))))
+#ifdef FLOAT_STORE_FLAG_VALUE
+ || (code == GE
+ && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
+ && FLOAT_STORE_FLAG_VALUE < 0)
+#endif
+ ))
+ && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<')
+ {
+ /* We might have reversed a LT to get a GE here. But this wasn't
+ actually the comparison of data, so we don't flag that we
+ have had to reverse the condition. */
+ did_reverse_condition ^= 1;
+ reverse_code = 1;
+ x = SET_SRC (set);
+ }
+ else
+ break;
+ }
+
+ else if (reg_set_p (op0, prev))
+ /* If this sets OP0, but not directly, we have to give up. */
+ break;
+
+ if (x)
+ {
+ if (GET_RTX_CLASS (GET_CODE (x)) == '<')
+ code = GET_CODE (x);
+ if (reverse_code)
+ {
+ code = reverse_condition (code);
+ did_reverse_condition ^= 1;
+ reverse_code = 0;
+ }
+
+ op0 = XEXP (x, 0), op1 = XEXP (x, 1);
+ if (earliest)
+ *earliest = prev;
+ }
+ }
+
+ /* If constant is first, put it last. */
+ if (CONSTANT_P (op0))
+ code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
+
+ /* If OP0 is the result of a comparison, we weren't able to find what
+ was really being compared, so fail. */
+ if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
+ return 0;
+
+ /* Canonicalize any ordered comparison with integers involving equality
+ if we can do computations in the relevant mode and we do not
+ overflow. */
+
+ if (GET_CODE (op1) == CONST_INT
+ && GET_MODE (op0) != VOIDmode
+ && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
+ {
+ HOST_WIDE_INT const_val = INTVAL (op1);
+ unsigned HOST_WIDE_INT uconst_val = const_val;
+ unsigned HOST_WIDE_INT max_val
+ = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
+
+ switch (code)
+ {
+ case LE:
+ if (const_val != max_val >> 1)
+ code = LT, op1 = GEN_INT (const_val + 1);
+ break;
+
+ case GE:
+ if (const_val
+ != (((HOST_WIDE_INT) 1
+ << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1))))
+ code = GT, op1 = GEN_INT (const_val - 1);
+ break;
+
+ case LEU:
+ if (uconst_val != max_val)
+ code = LTU, op1 = GEN_INT (uconst_val + 1);
+ break;
+
+ case GEU:
+ if (uconst_val != 0)
+ code = GTU, op1 = GEN_INT (uconst_val - 1);
+ break;
+ }
+ }
+
+ /* If this was floating-point and we reversed anything other than an
+ EQ or NE, return zero. */
+ if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
+ && did_reverse_condition && code != NE && code != EQ
+ && ! flag_fast_math
+ && GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
+ return 0;
+
+#ifdef HAVE_cc0
+ /* Never return CC0; return zero instead. */
+ if (op0 == cc0_rtx)
+ return 0;
+#endif
+
+ return gen_rtx (code, VOIDmode, op0, op1);
+}
+
+/* Similar to above routine, except that we also put an invariant last
+ unless both operands are invariants. */
+
+rtx
+get_condition_for_loop (x)
+ rtx x;
+{
+ rtx comparison = get_condition (x, NULL_PTR);
+
+ if (comparison == 0
+ || ! invariant_p (XEXP (comparison, 0))
+ || invariant_p (XEXP (comparison, 1)))
+ return comparison;
+
+ return gen_rtx (swap_condition (GET_CODE (comparison)), VOIDmode,
+ XEXP (comparison, 1), XEXP (comparison, 0));
+}
diff --git a/gnu/usr.bin/cc/cc_int/obstack.c b/gnu/usr.bin/cc/cc_int/obstack.c
new file mode 100644
index 0000000..a8a4500
--- /dev/null
+++ b/gnu/usr.bin/cc/cc_int/obstack.c
@@ -0,0 +1,485 @@
+/* obstack.c - subroutines used implicitly by object stack macros
+ Copyright (C) 1988, 89, 90, 91, 92, 93, 94 Free Software Foundation, Inc.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 2, or (at your option) any
+later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+#include "obstack.h"
+
+/* This is just to get __GNU_LIBRARY__ defined. */
+#include <stdio.h>
+
+/* Comment out all this code if we are using the GNU C Library, and are not
+ actually compiling the library itself. This code is part of the GNU C
+ Library, but also included in many other GNU distributions. Compiling
+ and linking in this code is a waste when using the GNU C library
+ (especially if it is a shared library). Rather than having every GNU
+ program understand `configure --with-gnu-libc' and omit the object files,
+ it is simpler to just do this in the source for each such file. */
+
+#if defined (_LIBC) || !defined (__GNU_LIBRARY__)
+
+
+#ifdef __STDC__
+#define POINTER void *
+#else
+#define POINTER char *
+#endif
+
+/* Determine default alignment. */
+struct fooalign {char x; double d;};
+#define DEFAULT_ALIGNMENT \
+ ((PTR_INT_TYPE) ((char *)&((struct fooalign *) 0)->d - (char *)0))
+/* If malloc were really smart, it would round addresses to DEFAULT_ALIGNMENT.
+ But in fact it might be less smart and round addresses to as much as
+ DEFAULT_ROUNDING. So we prepare for it to do that. */
+union fooround {long x; double d;};
+#define DEFAULT_ROUNDING (sizeof (union fooround))
+
+/* When we copy a long block of data, this is the unit to do it with.
+ On some machines, copying successive ints does not work;
+ in such a case, redefine COPYING_UNIT to `long' (if that works)
+ or `char' as a last resort. */
+#ifndef COPYING_UNIT
+#define COPYING_UNIT int
+#endif
+
+/* The non-GNU-C macros copy the obstack into this global variable
+ to avoid multiple evaluation. */
+
+struct obstack *_obstack;
+
+/* Define a macro that either calls functions with the traditional malloc/free
+ calling interface, or calls functions with the mmalloc/mfree interface
+ (that adds an extra first argument), based on the state of use_extra_arg.
+ For free, do not use ?:, since some compilers, like the MIPS compilers,
+ do not allow (expr) ? void : void. */
+
+#define CALL_CHUNKFUN(h, size) \
+ (((h) -> use_extra_arg) \
+ ? (*(h)->chunkfun) ((h)->extra_arg, (size)) \
+ : (*(h)->chunkfun) ((size)))
+
+#define CALL_FREEFUN(h, old_chunk) \
+ do { \
+ if ((h) -> use_extra_arg) \
+ (*(h)->freefun) ((h)->extra_arg, (old_chunk)); \
+ else \
+ (*(h)->freefun) ((old_chunk)); \
+ } while (0)
+
+
+/* Initialize an obstack H for use. Specify chunk size SIZE (0 means default).
+ Objects start on multiples of ALIGNMENT (0 means use default).
+ CHUNKFUN is the function to use to allocate chunks,
+ and FREEFUN the function to free them.
+
+ Return nonzero if successful, zero if out of memory.
+ To recover from an out of memory error,
+ free up some memory, then call this again. */
+
+int
+_obstack_begin (h, size, alignment, chunkfun, freefun)
+ struct obstack *h;
+ int size;
+ int alignment;
+ POINTER (*chunkfun) ();
+ void (*freefun) ();
+{
+ register struct _obstack_chunk* chunk; /* points to new chunk */
+
+ if (alignment == 0)
+ alignment = DEFAULT_ALIGNMENT;
+ if (size == 0)
+ /* Default size is what GNU malloc can fit in a 4096-byte block. */
+ {
+ /* 12 is sizeof (mhead) and 4 is EXTRA from GNU malloc.
+ Use the values for range checking, because if range checking is off,
+ the extra bytes won't be missed terribly, but if range checking is on
+ and we used a larger request, a whole extra 4096 bytes would be
+ allocated.
+
+ These number are irrelevant to the new GNU malloc. I suspect it is
+ less sensitive to the size of the request. */
+ int extra = ((((12 + DEFAULT_ROUNDING - 1) & ~(DEFAULT_ROUNDING - 1))
+ + 4 + DEFAULT_ROUNDING - 1)
+ & ~(DEFAULT_ROUNDING - 1));
+ size = 4096 - extra;
+ }
+
+ h->chunkfun = (struct _obstack_chunk * (*)()) chunkfun;
+ h->freefun = freefun;
+ h->chunk_size = size;
+ h->alignment_mask = alignment - 1;
+ h->use_extra_arg = 0;
+
+ chunk = h->chunk = CALL_CHUNKFUN (h, h -> chunk_size);
+ if (!chunk)
+ {
+ h->alloc_failed = 1;
+ return 0;
+ }
+ h->alloc_failed = 0;
+ h->next_free = h->object_base = chunk->contents;
+ h->chunk_limit = chunk->limit
+ = (char *) chunk + h->chunk_size;
+ chunk->prev = 0;
+ /* The initial chunk now contains no empty object. */
+ h->maybe_empty_object = 0;
+ return 1;
+}
+
+int
+_obstack_begin_1 (h, size, alignment, chunkfun, freefun, arg)
+ struct obstack *h;
+ int size;
+ int alignment;
+ POINTER (*chunkfun) ();
+ void (*freefun) ();
+ POINTER arg;
+{
+ register struct _obstack_chunk* chunk; /* points to new chunk */
+
+ if (alignment == 0)
+ alignment = DEFAULT_ALIGNMENT;
+ if (size == 0)
+ /* Default size is what GNU malloc can fit in a 4096-byte block. */
+ {
+ /* 12 is sizeof (mhead) and 4 is EXTRA from GNU malloc.
+ Use the values for range checking, because if range checking is off,
+ the extra bytes won't be missed terribly, but if range checking is on
+ and we used a larger request, a whole extra 4096 bytes would be
+ allocated.
+
+ These number are irrelevant to the new GNU malloc. I suspect it is
+ less sensitive to the size of the request. */
+ int extra = ((((12 + DEFAULT_ROUNDING - 1) & ~(DEFAULT_ROUNDING - 1))
+ + 4 + DEFAULT_ROUNDING - 1)
+ & ~(DEFAULT_ROUNDING - 1));
+ size = 4096 - extra;
+ }
+
+ h->chunkfun = (struct _obstack_chunk * (*)()) chunkfun;
+ h->freefun = freefun;
+ h->chunk_size = size;
+ h->alignment_mask = alignment - 1;
+ h->extra_arg = arg;
+ h->use_extra_arg = 1;
+
+ chunk = h->chunk = CALL_CHUNKFUN (h, h -> chunk_size);
+ if (!chunk)
+ {
+ h->alloc_failed = 1;
+ return 0;
+ }
+ h->alloc_failed = 0;
+ h->next_free = h->object_base = chunk->contents;
+ h->chunk_limit = chunk->limit
+ = (char *) chunk + h->chunk_size;
+ chunk->prev = 0;
+ /* The initial chunk now contains no empty object. */
+ h->maybe_empty_object = 0;
+ return 1;
+}
+
+/* Allocate a new current chunk for the obstack *H
+ on the assumption that LENGTH bytes need to be added
+ to the current object, or a new object of length LENGTH allocated.
+ Copies any partial object from the end of the old chunk
+ to the beginning of the new one. */
+
+void
+_obstack_newchunk (h, length)
+ struct obstack *h;
+ int length;
+{
+ register struct _obstack_chunk* old_chunk = h->chunk;
+ register struct _obstack_chunk* new_chunk;
+ register long new_size;
+ register int obj_size = h->next_free - h->object_base;
+ register int i;
+ int already;
+
+ /* Compute size for new chunk. */
+ new_size = (obj_size + length) + (obj_size >> 3) + 100;
+ if (new_size < h->chunk_size)
+ new_size = h->chunk_size;
+
+ /* Allocate and initialize the new chunk. */
+ new_chunk = CALL_CHUNKFUN (h, new_size);
+ if (!new_chunk)
+ {
+ h->alloc_failed = 1;
+ return;
+ }
+ h->alloc_failed = 0;
+ h->chunk = new_chunk;
+ new_chunk->prev = old_chunk;
+ new_chunk->limit = h->chunk_limit = (char *) new_chunk + new_size;
+
+ /* Move the existing object to the new chunk.
+ Word at a time is fast and is safe if the object
+ is sufficiently aligned. */
+ if (h->alignment_mask + 1 >= DEFAULT_ALIGNMENT)
+ {
+ for (i = obj_size / sizeof (COPYING_UNIT) - 1;
+ i >= 0; i--)
+ ((COPYING_UNIT *)new_chunk->contents)[i]
+ = ((COPYING_UNIT *)h->object_base)[i];
+ /* We used to copy the odd few remaining bytes as one extra COPYING_UNIT,
+ but that can cross a page boundary on a machine
+ which does not do strict alignment for COPYING_UNITS. */
+ already = obj_size / sizeof (COPYING_UNIT) * sizeof (COPYING_UNIT);
+ }
+ else
+ already = 0;
+ /* Copy remaining bytes one by one. */
+ for (i = already; i < obj_size; i++)
+ new_chunk->contents[i] = h->object_base[i];
+
+ /* If the object just copied was the only data in OLD_CHUNK,
+ free that chunk and remove it from the chain.
+ But not if that chunk might contain an empty object. */
+ if (h->object_base == old_chunk->contents && ! h->maybe_empty_object)
+ {
+ new_chunk->prev = old_chunk->prev;
+ CALL_FREEFUN (h, old_chunk);
+ }
+
+ h->object_base = new_chunk->contents;
+ h->next_free = h->object_base + obj_size;
+ /* The new chunk certainly contains no empty object yet. */
+ h->maybe_empty_object = 0;
+}
+
+/* Return nonzero if object OBJ has been allocated from obstack H.
+ This is here for debugging.
+ If you use it in a program, you are probably losing. */
+
+#ifdef __STDC__
+/* Suppress -Wmissing-prototypes warning. We don't want to declare this in
+ obstack.h because it is just for debugging. */
+int _obstack_allocated_p (struct obstack *h, POINTER obj);
+#endif
+
+int
+_obstack_allocated_p (h, obj)
+ struct obstack *h;
+ POINTER obj;
+{
+ register struct _obstack_chunk* lp; /* below addr of any objects in this chunk */
+ register struct _obstack_chunk* plp; /* point to previous chunk if any */
+
+ lp = (h)->chunk;
+ /* We use >= rather than > since the object cannot be exactly at
+ the beginning of the chunk but might be an empty object exactly
+ at the end of an adjacent chunk. */
+ while (lp != 0 && ((POINTER)lp >= obj || (POINTER)(lp)->limit < obj))
+ {
+ plp = lp->prev;
+ lp = plp;
+ }
+ return lp != 0;
+}
+
+/* Free objects in obstack H, including OBJ and everything allocate
+ more recently than OBJ. If OBJ is zero, free everything in H. */
+
+#undef obstack_free
+
+/* This function has two names with identical definitions.
+ This is the first one, called from non-ANSI code. */
+
+void
+_obstack_free (h, obj)
+ struct obstack *h;
+ POINTER obj;
+{
+ register struct _obstack_chunk* lp; /* below addr of any objects in this chunk */
+ register struct _obstack_chunk* plp; /* point to previous chunk if any */
+
+ lp = h->chunk;
+ /* We use >= because there cannot be an object at the beginning of a chunk.
+ But there can be an empty object at that address
+ at the end of another chunk. */
+ while (lp != 0 && ((POINTER)lp >= obj || (POINTER)(lp)->limit < obj))
+ {
+ plp = lp->prev;
+ CALL_FREEFUN (h, lp);
+ lp = plp;
+ /* If we switch chunks, we can't tell whether the new current
+ chunk contains an empty object, so assume that it may. */
+ h->maybe_empty_object = 1;
+ }
+ if (lp)
+ {
+ h->object_base = h->next_free = (char *)(obj);
+ h->chunk_limit = lp->limit;
+ h->chunk = lp;
+ }
+ else if (obj != 0)
+ /* obj is not in any of the chunks! */
+ abort ();
+}
+
+/* This function is used from ANSI code. */
+
+void
+obstack_free (h, obj)
+ struct obstack *h;
+ POINTER obj;
+{
+ register struct _obstack_chunk* lp; /* below addr of any objects in this chunk */
+ register struct _obstack_chunk* plp; /* point to previous chunk if any */
+
+ lp = h->chunk;
+ /* We use >= because there cannot be an object at the beginning of a chunk.
+ But there can be an empty object at that address
+ at the end of another chunk. */
+ while (lp != 0 && ((POINTER)lp >= obj || (POINTER)(lp)->limit < obj))
+ {
+ plp = lp->prev;
+ CALL_FREEFUN (h, lp);
+ lp = plp;
+ /* If we switch chunks, we can't tell whether the new current
+ chunk contains an empty object, so assume that it may. */
+ h->maybe_empty_object = 1;
+ }
+ if (lp)
+ {
+ h->object_base = h->next_free = (char *)(obj);
+ h->chunk_limit = lp->limit;
+ h->chunk = lp;
+ }
+ else if (obj != 0)
+ /* obj is not in any of the chunks! */
+ abort ();
+}
+
+#if 0
+/* These are now turned off because the applications do not use it
+ and it uses bcopy via obstack_grow, which causes trouble on sysV. */
+
+/* Now define the functional versions of the obstack macros.
+ Define them to simply use the corresponding macros to do the job. */
+
+#ifdef __STDC__
+/* These function definitions do not work with non-ANSI preprocessors;
+ they won't pass through the macro names in parentheses. */
+
+/* The function names appear in parentheses in order to prevent
+ the macro-definitions of the names from being expanded there. */
+
+POINTER (obstack_base) (obstack)
+ struct obstack *obstack;
+{
+ return obstack_base (obstack);
+}
+
+POINTER (obstack_next_free) (obstack)
+ struct obstack *obstack;
+{
+ return obstack_next_free (obstack);
+}
+
+int (obstack_object_size) (obstack)
+ struct obstack *obstack;
+{
+ return obstack_object_size (obstack);
+}
+
+int (obstack_room) (obstack)
+ struct obstack *obstack;
+{
+ return obstack_room (obstack);
+}
+
+void (obstack_grow) (obstack, pointer, length)
+ struct obstack *obstack;
+ POINTER pointer;
+ int length;
+{
+ obstack_grow (obstack, pointer, length);
+}
+
+void (obstack_grow0) (obstack, pointer, length)
+ struct obstack *obstack;
+ POINTER pointer;
+ int length;
+{
+ obstack_grow0 (obstack, pointer, length);
+}
+
+void (obstack_1grow) (obstack, character)
+ struct obstack *obstack;
+ int character;
+{
+ obstack_1grow (obstack, character);
+}
+
+void (obstack_blank) (obstack, length)
+ struct obstack *obstack;
+ int length;
+{
+ obstack_blank (obstack, length);
+}
+
+void (obstack_1grow_fast) (obstack, character)
+ struct obstack *obstack;
+ int character;
+{
+ obstack_1grow_fast (obstack, character);
+}
+
+void (obstack_blank_fast) (obstack, length)
+ struct obstack *obstack;
+ int length;
+{
+ obstack_blank_fast (obstack, length);
+}
+
+POINTER (obstack_finish) (obstack)
+ struct obstack *obstack;
+{
+ return obstack_finish (obstack);
+}
+
+POINTER (obstack_alloc) (obstack, length)
+ struct obstack *obstack;
+ int length;
+{
+ return obstack_alloc (obstack, length);
+}
+
+POINTER (obstack_copy) (obstack, pointer, length)
+ struct obstack *obstack;
+ POINTER pointer;
+ int length;
+{
+ return obstack_copy (obstack, pointer, length);
+}
+
+POINTER (obstack_copy0) (obstack, pointer, length)
+ struct obstack *obstack;
+ POINTER pointer;
+ int length;
+{
+ return obstack_copy0 (obstack, pointer, length);
+}
+
+#endif /* __STDC__ */
+
+#endif /* 0 */
+
+#endif /* _LIBC or not __GNU_LIBRARY__. */
diff --git a/gnu/usr.bin/cc/cc_int/optabs.c b/gnu/usr.bin/cc/cc_int/optabs.c
new file mode 100644
index 0000000..427ccf4
--- /dev/null
+++ b/gnu/usr.bin/cc/cc_int/optabs.c
@@ -0,0 +1,4100 @@
+/* Expand the basic unary and binary arithmetic operations, for GNU compiler.
+ Copyright (C) 1987, 1988, 1992, 1993, 1994 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+#include "config.h"
+#include "rtl.h"
+#include "tree.h"
+#include "flags.h"
+#include "insn-flags.h"
+#include "insn-codes.h"
+#include "expr.h"
+#include "insn-config.h"
+#include "recog.h"
+#include "reload.h"
+#include <ctype.h>
+
+/* Each optab contains info on how this target machine
+ can perform a particular operation
+ for all sizes and kinds of operands.
+
+ The operation to be performed is often specified
+ by passing one of these optabs as an argument.
+
+ See expr.h for documentation of these optabs. */
+
+optab add_optab;
+optab sub_optab;
+optab smul_optab;
+optab smul_highpart_optab;
+optab umul_highpart_optab;
+optab smul_widen_optab;
+optab umul_widen_optab;
+optab sdiv_optab;
+optab sdivmod_optab;
+optab udiv_optab;
+optab udivmod_optab;
+optab smod_optab;
+optab umod_optab;
+optab flodiv_optab;
+optab ftrunc_optab;
+optab and_optab;
+optab ior_optab;
+optab xor_optab;
+optab ashl_optab;
+optab lshr_optab;
+optab ashr_optab;
+optab rotl_optab;
+optab rotr_optab;
+optab smin_optab;
+optab smax_optab;
+optab umin_optab;
+optab umax_optab;
+
+optab mov_optab;
+optab movstrict_optab;
+
+optab neg_optab;
+optab abs_optab;
+optab one_cmpl_optab;
+optab ffs_optab;
+optab sqrt_optab;
+optab sin_optab;
+optab cos_optab;
+
+optab cmp_optab;
+optab ucmp_optab; /* Used only for libcalls for unsigned comparisons. */
+optab tst_optab;
+
+optab strlen_optab;
+
+/* Tables of patterns for extending one integer mode to another. */
+enum insn_code extendtab[MAX_MACHINE_MODE][MAX_MACHINE_MODE][2];
+
+/* Tables of patterns for converting between fixed and floating point. */
+enum insn_code fixtab[NUM_MACHINE_MODES][NUM_MACHINE_MODES][2];
+enum insn_code fixtrunctab[NUM_MACHINE_MODES][NUM_MACHINE_MODES][2];
+enum insn_code floattab[NUM_MACHINE_MODES][NUM_MACHINE_MODES][2];
+
+/* Contains the optab used for each rtx code. */
+optab code_to_optab[NUM_RTX_CODE + 1];
+
+/* SYMBOL_REF rtx's for the library functions that are called
+ implicitly and not via optabs. */
+
+rtx extendsfdf2_libfunc;
+rtx extendsfxf2_libfunc;
+rtx extendsftf2_libfunc;
+rtx extenddfxf2_libfunc;
+rtx extenddftf2_libfunc;
+
+rtx truncdfsf2_libfunc;
+rtx truncxfsf2_libfunc;
+rtx trunctfsf2_libfunc;
+rtx truncxfdf2_libfunc;
+rtx trunctfdf2_libfunc;
+
+rtx memcpy_libfunc;
+rtx bcopy_libfunc;
+rtx memcmp_libfunc;
+rtx bcmp_libfunc;
+rtx memset_libfunc;
+rtx bzero_libfunc;
+
+rtx eqsf2_libfunc;
+rtx nesf2_libfunc;
+rtx gtsf2_libfunc;
+rtx gesf2_libfunc;
+rtx ltsf2_libfunc;
+rtx lesf2_libfunc;
+
+rtx eqdf2_libfunc;
+rtx nedf2_libfunc;
+rtx gtdf2_libfunc;
+rtx gedf2_libfunc;
+rtx ltdf2_libfunc;
+rtx ledf2_libfunc;
+
+rtx eqxf2_libfunc;
+rtx nexf2_libfunc;
+rtx gtxf2_libfunc;
+rtx gexf2_libfunc;
+rtx ltxf2_libfunc;
+rtx lexf2_libfunc;
+
+rtx eqtf2_libfunc;
+rtx netf2_libfunc;
+rtx gttf2_libfunc;
+rtx getf2_libfunc;
+rtx lttf2_libfunc;
+rtx letf2_libfunc;
+
+rtx floatsisf_libfunc;
+rtx floatdisf_libfunc;
+rtx floattisf_libfunc;
+
+rtx floatsidf_libfunc;
+rtx floatdidf_libfunc;
+rtx floattidf_libfunc;
+
+rtx floatsixf_libfunc;
+rtx floatdixf_libfunc;
+rtx floattixf_libfunc;
+
+rtx floatsitf_libfunc;
+rtx floatditf_libfunc;
+rtx floattitf_libfunc;
+
+rtx fixsfsi_libfunc;
+rtx fixsfdi_libfunc;
+rtx fixsfti_libfunc;
+
+rtx fixdfsi_libfunc;
+rtx fixdfdi_libfunc;
+rtx fixdfti_libfunc;
+
+rtx fixxfsi_libfunc;
+rtx fixxfdi_libfunc;
+rtx fixxfti_libfunc;
+
+rtx fixtfsi_libfunc;
+rtx fixtfdi_libfunc;
+rtx fixtfti_libfunc;
+
+rtx fixunssfsi_libfunc;
+rtx fixunssfdi_libfunc;
+rtx fixunssfti_libfunc;
+
+rtx fixunsdfsi_libfunc;
+rtx fixunsdfdi_libfunc;
+rtx fixunsdfti_libfunc;
+
+rtx fixunsxfsi_libfunc;
+rtx fixunsxfdi_libfunc;
+rtx fixunsxfti_libfunc;
+
+rtx fixunstfsi_libfunc;
+rtx fixunstfdi_libfunc;
+rtx fixunstfti_libfunc;
+
+/* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
+ gives the gen_function to make a branch to test that condition. */
+
+rtxfun bcc_gen_fctn[NUM_RTX_CODE];
+
+/* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
+ gives the insn code to make a store-condition insn
+ to test that condition. */
+
+enum insn_code setcc_gen_code[NUM_RTX_CODE];
+
+static int add_equal_note PROTO((rtx, rtx, enum rtx_code, rtx, rtx));
+static rtx widen_operand PROTO((rtx, enum machine_mode,
+ enum machine_mode, int, int));
+static enum insn_code can_fix_p PROTO((enum machine_mode, enum machine_mode,
+ int, int *));
+static enum insn_code can_float_p PROTO((enum machine_mode, enum machine_mode,
+ int));
+static rtx ftruncify PROTO((rtx));
+static optab init_optab PROTO((enum rtx_code));
+static void init_libfuncs PROTO((optab, int, int, char *, int));
+static void init_integral_libfuncs PROTO((optab, char *, int));
+static void init_floating_libfuncs PROTO((optab, char *, int));
+static void init_complex_libfuncs PROTO((optab, char *, int));
+
+/* Add a REG_EQUAL note to the last insn in SEQ. TARGET is being set to
+ the result of operation CODE applied to OP0 (and OP1 if it is a binary
+ operation).
+
+ If the last insn does not set TARGET, don't do anything, but return 1.
+
+ If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
+ don't add the REG_EQUAL note but return 0. Our caller can then try
+ again, ensuring that TARGET is not one of the operands. */
+
+static int
+add_equal_note (seq, target, code, op0, op1)
+ rtx seq;
+ rtx target;
+ enum rtx_code code;
+ rtx op0, op1;
+{
+ rtx set;
+ int i;
+ rtx note;
+
+ if ((GET_RTX_CLASS (code) != '1' && GET_RTX_CLASS (code) != '2'
+ && GET_RTX_CLASS (code) != 'c' && GET_RTX_CLASS (code) != '<')
+ || GET_CODE (seq) != SEQUENCE
+ || (set = single_set (XVECEXP (seq, 0, XVECLEN (seq, 0) - 1))) == 0
+ || GET_CODE (target) == ZERO_EXTRACT
+ || (! rtx_equal_p (SET_DEST (set), target)
+ /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside the
+ SUBREG. */
+ && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART
+ || ! rtx_equal_p (SUBREG_REG (XEXP (SET_DEST (set), 0)),
+ target))))
+ return 1;
+
+ /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
+ besides the last insn. */
+ if (reg_overlap_mentioned_p (target, op0)
+ || (op1 && reg_overlap_mentioned_p (target, op1)))
+ for (i = XVECLEN (seq, 0) - 2; i >= 0; i--)
+ if (reg_set_p (target, XVECEXP (seq, 0, i)))
+ return 0;
+
+ if (GET_RTX_CLASS (code) == '1')
+ note = gen_rtx (code, GET_MODE (target), copy_rtx (op0));
+ else
+ note = gen_rtx (code, GET_MODE (target), copy_rtx (op0), copy_rtx (op1));
+
+ REG_NOTES (XVECEXP (seq, 0, XVECLEN (seq, 0) - 1))
+ = gen_rtx (EXPR_LIST, REG_EQUAL, note,
+ REG_NOTES (XVECEXP (seq, 0, XVECLEN (seq, 0) - 1)));
+
+ return 1;
+}
+
+/* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
+ says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
+ not actually do a sign-extend or zero-extend, but can leave the
+ higher-order bits of the result rtx undefined, for example, in the case
+ of logical operations, but not right shifts. */
+
+static rtx
+widen_operand (op, mode, oldmode, unsignedp, no_extend)
+ rtx op;
+ enum machine_mode mode, oldmode;
+ int unsignedp;
+ int no_extend;
+{
+ rtx result;
+
+ /* If we must extend do so. If OP is either a constant or a SUBREG
+ for a promoted object, also extend since it will be more efficient to
+ do so. */
+ if (! no_extend
+ || GET_MODE (op) == VOIDmode
+ || (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op)))
+ return convert_modes (mode, oldmode, op, unsignedp);
+
+ /* If MODE is no wider than a single word, we return a paradoxical
+ SUBREG. */
+ if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
+ return gen_rtx (SUBREG, mode, force_reg (GET_MODE (op), op), 0);
+
+ /* Otherwise, get an object of MODE, clobber it, and set the low-order
+ part to OP. */
+
+ result = gen_reg_rtx (mode);
+ emit_insn (gen_rtx (CLOBBER, VOIDmode, result));
+ emit_move_insn (gen_lowpart (GET_MODE (op), result), op);
+ return result;
+}
+
+/* Generate code to perform an operation specified by BINOPTAB
+ on operands OP0 and OP1, with result having machine-mode MODE.
+
+ UNSIGNEDP is for the case where we have to widen the operands
+ to perform the operation. It says to use zero-extension.
+
+ If TARGET is nonzero, the value
+ is generated there, if it is convenient to do so.
+ In all cases an rtx is returned for the locus of the value;
+ this may or may not be TARGET. */
+
+rtx
+expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods)
+ enum machine_mode mode;
+ optab binoptab;
+ rtx op0, op1;
+ rtx target;
+ int unsignedp;
+ enum optab_methods methods;
+{
+ enum optab_methods next_methods
+ = (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN
+ ? OPTAB_WIDEN : methods);
+ enum mode_class class;
+ enum machine_mode wider_mode;
+ register rtx temp;
+ int commutative_op = 0;
+ int shift_op = (binoptab->code == ASHIFT
+ || binoptab->code == ASHIFTRT
+ || binoptab->code == LSHIFTRT
+ || binoptab->code == ROTATE
+ || binoptab->code == ROTATERT);
+ rtx entry_last = get_last_insn ();
+ rtx last;
+
+ class = GET_MODE_CLASS (mode);
+
+ op0 = protect_from_queue (op0, 0);
+ op1 = protect_from_queue (op1, 0);
+ if (target)
+ target = protect_from_queue (target, 1);
+
+ if (flag_force_mem)
+ {
+ op0 = force_not_mem (op0);
+ op1 = force_not_mem (op1);
+ }
+
+ /* If subtracting an integer constant, convert this into an addition of
+ the negated constant. */
+
+ if (binoptab == sub_optab && GET_CODE (op1) == CONST_INT)
+ {
+ op1 = negate_rtx (mode, op1);
+ binoptab = add_optab;
+ }
+
+ /* If we are inside an appropriately-short loop and one operand is an
+ expensive constant, force it into a register. */
+ if (CONSTANT_P (op0) && preserve_subexpressions_p ()
+ && rtx_cost (op0, binoptab->code) > 2)
+ op0 = force_reg (mode, op0);
+
+ if (CONSTANT_P (op1) && preserve_subexpressions_p ()
+ && rtx_cost (op1, binoptab->code) > 2)
+ op1 = force_reg (shift_op ? word_mode : mode, op1);
+
+ /* Record where to delete back to if we backtrack. */
+ last = get_last_insn ();
+
+ /* If operation is commutative,
+ try to make the first operand a register.
+ Even better, try to make it the same as the target.
+ Also try to make the last operand a constant. */
+ if (GET_RTX_CLASS (binoptab->code) == 'c'
+ || binoptab == smul_widen_optab
+ || binoptab == umul_widen_optab
+ || binoptab == smul_highpart_optab
+ || binoptab == umul_highpart_optab)
+ {
+ commutative_op = 1;
+
+ if (((target == 0 || GET_CODE (target) == REG)
+ ? ((GET_CODE (op1) == REG
+ && GET_CODE (op0) != REG)
+ || target == op1)
+ : rtx_equal_p (op1, target))
+ || GET_CODE (op0) == CONST_INT)
+ {
+ temp = op1;
+ op1 = op0;
+ op0 = temp;
+ }
+ }
+
+ /* If we can do it with a three-operand insn, do so. */
+
+ if (methods != OPTAB_MUST_WIDEN
+ && binoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
+ {
+ int icode = (int) binoptab->handlers[(int) mode].insn_code;
+ enum machine_mode mode0 = insn_operand_mode[icode][1];
+ enum machine_mode mode1 = insn_operand_mode[icode][2];
+ rtx pat;
+ rtx xop0 = op0, xop1 = op1;
+
+ if (target)
+ temp = target;
+ else
+ temp = gen_reg_rtx (mode);
+
+ /* If it is a commutative operator and the modes would match
+ if we would swap the operands, we can save the conversions. */
+ if (commutative_op)
+ {
+ if (GET_MODE (op0) != mode0 && GET_MODE (op1) != mode1
+ && GET_MODE (op0) == mode1 && GET_MODE (op1) == mode0)
+ {
+ register rtx tmp;
+
+ tmp = op0; op0 = op1; op1 = tmp;
+ tmp = xop0; xop0 = xop1; xop1 = tmp;
+ }
+ }
+
+ /* In case the insn wants input operands in modes different from
+ the result, convert the operands. */
+
+ if (GET_MODE (op0) != VOIDmode
+ && GET_MODE (op0) != mode0)
+ xop0 = convert_to_mode (mode0, xop0, unsignedp);
+
+ if (GET_MODE (xop1) != VOIDmode
+ && GET_MODE (xop1) != mode1)
+ xop1 = convert_to_mode (mode1, xop1, unsignedp);
+
+ /* Now, if insn's predicates don't allow our operands, put them into
+ pseudo regs. */
+
+ if (! (*insn_operand_predicate[icode][1]) (xop0, mode0))
+ xop0 = copy_to_mode_reg (mode0, xop0);
+
+ if (! (*insn_operand_predicate[icode][2]) (xop1, mode1))
+ xop1 = copy_to_mode_reg (mode1, xop1);
+
+ if (! (*insn_operand_predicate[icode][0]) (temp, mode))
+ temp = gen_reg_rtx (mode);
+
+ pat = GEN_FCN (icode) (temp, xop0, xop1);
+ if (pat)
+ {
+ /* If PAT is a multi-insn sequence, try to add an appropriate
+ REG_EQUAL note to it. If we can't because TEMP conflicts with an
+ operand, call ourselves again, this time without a target. */
+ if (GET_CODE (pat) == SEQUENCE
+ && ! add_equal_note (pat, temp, binoptab->code, xop0, xop1))
+ {
+ delete_insns_since (last);
+ return expand_binop (mode, binoptab, op0, op1, NULL_RTX,
+ unsignedp, methods);
+ }
+
+ emit_insn (pat);
+ return temp;
+ }
+ else
+ delete_insns_since (last);
+ }
+
+ /* If this is a multiply, see if we can do a widening operation that
+ takes operands of this mode and makes a wider mode. */
+
+ if (binoptab == smul_optab && GET_MODE_WIDER_MODE (mode) != VOIDmode
+ && (((unsignedp ? umul_widen_optab : smul_widen_optab)
+ ->handlers[(int) GET_MODE_WIDER_MODE (mode)].insn_code)
+ != CODE_FOR_nothing))
+ {
+ temp = expand_binop (GET_MODE_WIDER_MODE (mode),
+ unsignedp ? umul_widen_optab : smul_widen_optab,
+ op0, op1, NULL_RTX, unsignedp, OPTAB_DIRECT);
+
+ if (temp != 0)
+ {
+ if (GET_MODE_CLASS (mode) == MODE_INT)
+ return gen_lowpart (mode, temp);
+ else
+ return convert_to_mode (mode, temp, unsignedp);
+ }
+ }
+
+ /* Look for a wider mode of the same class for which we think we
+ can open-code the operation. Check for a widening multiply at the
+ wider mode as well. */
+
+ if ((class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
+ && methods != OPTAB_DIRECT && methods != OPTAB_LIB)
+ for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
+ wider_mode = GET_MODE_WIDER_MODE (wider_mode))
+ {
+ if (binoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
+ || (binoptab == smul_optab
+ && GET_MODE_WIDER_MODE (wider_mode) != VOIDmode
+ && (((unsignedp ? umul_widen_optab : smul_widen_optab)
+ ->handlers[(int) GET_MODE_WIDER_MODE (wider_mode)].insn_code)
+ != CODE_FOR_nothing)))
+ {
+ rtx xop0 = op0, xop1 = op1;
+ int no_extend = 0;
+
+ /* For certain integer operations, we need not actually extend
+ the narrow operands, as long as we will truncate
+ the results to the same narrowness. */
+
+ if ((binoptab == ior_optab || binoptab == and_optab
+ || binoptab == xor_optab
+ || binoptab == add_optab || binoptab == sub_optab
+ || binoptab == smul_optab || binoptab == ashl_optab)
+ && class == MODE_INT)
+ no_extend = 1;
+
+ xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, no_extend);
+
+ /* The second operand of a shift must always be extended. */
+ xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
+ no_extend && binoptab != ashl_optab);
+
+ temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
+ unsignedp, OPTAB_DIRECT);
+ if (temp)
+ {
+ if (class != MODE_INT)
+ {
+ if (target == 0)
+ target = gen_reg_rtx (mode);
+ convert_move (target, temp, 0);
+ return target;
+ }
+ else
+ return gen_lowpart (mode, temp);
+ }
+ else
+ delete_insns_since (last);
+ }
+ }
+
+ /* These can be done a word at a time. */
+ if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab)
+ && class == MODE_INT
+ && GET_MODE_SIZE (mode) > UNITS_PER_WORD
+ && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
+ {
+ int i;
+ rtx insns;
+ rtx equiv_value;
+
+ /* If TARGET is the same as one of the operands, the REG_EQUAL note
+ won't be accurate, so use a new target. */
+ if (target == 0 || target == op0 || target == op1)
+ target = gen_reg_rtx (mode);
+
+ start_sequence ();
+
+ /* Do the actual arithmetic. */
+ for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
+ {
+ rtx target_piece = operand_subword (target, i, 1, mode);
+ rtx x = expand_binop (word_mode, binoptab,
+ operand_subword_force (op0, i, mode),
+ operand_subword_force (op1, i, mode),
+ target_piece, unsignedp, next_methods);
+
+ if (x == 0)
+ break;
+
+ if (target_piece != x)
+ emit_move_insn (target_piece, x);
+ }
+
+ insns = get_insns ();
+ end_sequence ();
+
+ if (i == GET_MODE_BITSIZE (mode) / BITS_PER_WORD)
+ {
+ if (binoptab->code != UNKNOWN)
+ equiv_value
+ = gen_rtx (binoptab->code, mode, copy_rtx (op0), copy_rtx (op1));
+ else
+ equiv_value = 0;
+
+ emit_no_conflict_block (insns, target, op0, op1, equiv_value);
+ return target;
+ }
+ }
+
+ /* Synthesize double word shifts from single word shifts. */
+ if ((binoptab == lshr_optab || binoptab == ashl_optab
+ || binoptab == ashr_optab)
+ && class == MODE_INT
+ && GET_CODE (op1) == CONST_INT
+ && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
+ && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
+ && ashl_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
+ && lshr_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
+ {
+ rtx insns, inter, equiv_value;
+ rtx into_target, outof_target;
+ rtx into_input, outof_input;
+ int shift_count, left_shift, outof_word;
+
+ /* If TARGET is the same as one of the operands, the REG_EQUAL note
+ won't be accurate, so use a new target. */
+ if (target == 0 || target == op0 || target == op1)
+ target = gen_reg_rtx (mode);
+
+ start_sequence ();
+
+ shift_count = INTVAL (op1);
+
+ /* OUTOF_* is the word we are shifting bits away from, and
+ INTO_* is the word that we are shifting bits towards, thus
+ they differ depending on the direction of the shift and
+ WORDS_BIG_ENDIAN. */
+
+ left_shift = binoptab == ashl_optab;
+ outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
+
+ outof_target = operand_subword (target, outof_word, 1, mode);
+ into_target = operand_subword (target, 1 - outof_word, 1, mode);
+
+ outof_input = operand_subword_force (op0, outof_word, mode);
+ into_input = operand_subword_force (op0, 1 - outof_word, mode);
+
+ if (shift_count >= BITS_PER_WORD)
+ {
+ inter = expand_binop (word_mode, binoptab,
+ outof_input,
+ GEN_INT (shift_count - BITS_PER_WORD),
+ into_target, unsignedp, next_methods);
+
+ if (inter != 0 && inter != into_target)
+ emit_move_insn (into_target, inter);
+
+ /* For a signed right shift, we must fill the word we are shifting
+ out of with copies of the sign bit. Otherwise it is zeroed. */
+ if (inter != 0 && binoptab != ashr_optab)
+ inter = CONST0_RTX (word_mode);
+ else if (inter != 0)
+ inter = expand_binop (word_mode, binoptab,
+ outof_input,
+ GEN_INT (BITS_PER_WORD - 1),
+ outof_target, unsignedp, next_methods);
+
+ if (inter != 0 && inter != outof_target)
+ emit_move_insn (outof_target, inter);
+ }
+ else
+ {
+ rtx carries;
+ optab reverse_unsigned_shift, unsigned_shift;
+
+ /* For a shift of less then BITS_PER_WORD, to compute the carry,
+ we must do a logical shift in the opposite direction of the
+ desired shift. */
+
+ reverse_unsigned_shift = (left_shift ? lshr_optab : ashl_optab);
+
+ /* For a shift of less than BITS_PER_WORD, to compute the word
+ shifted towards, we need to unsigned shift the orig value of
+ that word. */
+
+ unsigned_shift = (left_shift ? ashl_optab : lshr_optab);
+
+ carries = expand_binop (word_mode, reverse_unsigned_shift,
+ outof_input,
+ GEN_INT (BITS_PER_WORD - shift_count),
+ 0, unsignedp, next_methods);
+
+ if (carries == 0)
+ inter = 0;
+ else
+ inter = expand_binop (word_mode, binoptab, outof_input,
+ op1, outof_target, unsignedp, next_methods);
+
+ if (inter != 0 && inter != outof_target)
+ emit_move_insn (outof_target, inter);
+
+ if (inter != 0)
+ inter = expand_binop (word_mode, unsigned_shift, into_input,
+ op1, 0, unsignedp, next_methods);
+
+ if (inter != 0)
+ inter = expand_binop (word_mode, ior_optab, carries, inter,
+ into_target, unsignedp, next_methods);
+
+ if (inter != 0 && inter != into_target)
+ emit_move_insn (into_target, inter);
+ }
+
+ insns = get_insns ();
+ end_sequence ();
+
+ if (inter != 0)
+ {
+ if (binoptab->code != UNKNOWN)
+ equiv_value = gen_rtx (binoptab->code, mode, op0, op1);
+ else
+ equiv_value = 0;
+
+ emit_no_conflict_block (insns, target, op0, op1, equiv_value);
+ return target;
+ }
+ }
+
+ /* Synthesize double word rotates from single word shifts. */
+ if ((binoptab == rotl_optab || binoptab == rotr_optab)
+ && class == MODE_INT
+ && GET_CODE (op1) == CONST_INT
+ && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
+ && ashl_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
+ && lshr_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
+ {
+ rtx insns, equiv_value;
+ rtx into_target, outof_target;
+ rtx into_input, outof_input;
+ rtx inter;
+ int shift_count, left_shift, outof_word;
+
+ /* If TARGET is the same as one of the operands, the REG_EQUAL note
+ won't be accurate, so use a new target. */
+ if (target == 0 || target == op0 || target == op1)
+ target = gen_reg_rtx (mode);
+
+ start_sequence ();
+
+ shift_count = INTVAL (op1);
+
+ /* OUTOF_* is the word we are shifting bits away from, and
+ INTO_* is the word that we are shifting bits towards, thus
+ they differ depending on the direction of the shift and
+ WORDS_BIG_ENDIAN. */
+
+ left_shift = (binoptab == rotl_optab);
+ outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
+
+ outof_target = operand_subword (target, outof_word, 1, mode);
+ into_target = operand_subword (target, 1 - outof_word, 1, mode);
+
+ outof_input = operand_subword_force (op0, outof_word, mode);
+ into_input = operand_subword_force (op0, 1 - outof_word, mode);
+
+ if (shift_count == BITS_PER_WORD)
+ {
+ /* This is just a word swap. */
+ emit_move_insn (outof_target, into_input);
+ emit_move_insn (into_target, outof_input);
+ inter = const0_rtx;
+ }
+ else
+ {
+ rtx into_temp1, into_temp2, outof_temp1, outof_temp2;
+ rtx first_shift_count, second_shift_count;
+ optab reverse_unsigned_shift, unsigned_shift;
+
+ reverse_unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
+ ? lshr_optab : ashl_optab);
+
+ unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
+ ? ashl_optab : lshr_optab);
+
+ if (shift_count > BITS_PER_WORD)
+ {
+ first_shift_count = GEN_INT (shift_count - BITS_PER_WORD);
+ second_shift_count = GEN_INT (2*BITS_PER_WORD - shift_count);
+ }
+ else
+ {
+ first_shift_count = GEN_INT (BITS_PER_WORD - shift_count);
+ second_shift_count = GEN_INT (shift_count);
+ }
+
+ into_temp1 = expand_binop (word_mode, unsigned_shift,
+ outof_input, first_shift_count,
+ NULL_RTX, unsignedp, next_methods);
+ into_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
+ into_input, second_shift_count,
+ into_target, unsignedp, next_methods);
+
+ if (into_temp1 != 0 && into_temp2 != 0)
+ inter = expand_binop (word_mode, ior_optab, into_temp1, into_temp2,
+ into_target, unsignedp, next_methods);
+ else
+ inter = 0;
+
+ if (inter != 0 && inter != into_target)
+ emit_move_insn (into_target, inter);
+
+ outof_temp1 = expand_binop (word_mode, unsigned_shift,
+ into_input, first_shift_count,
+ NULL_RTX, unsignedp, next_methods);
+ outof_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
+ outof_input, second_shift_count,
+ outof_target, unsignedp, next_methods);
+
+ if (inter != 0 && outof_temp1 != 0 && outof_temp2 != 0)
+ inter = expand_binop (word_mode, ior_optab,
+ outof_temp1, outof_temp2,
+ outof_target, unsignedp, next_methods);
+
+ if (inter != 0 && inter != outof_target)
+ emit_move_insn (outof_target, inter);
+ }
+
+ insns = get_insns ();
+ end_sequence ();
+
+ if (inter != 0)
+ {
+ if (binoptab->code != UNKNOWN)
+ equiv_value = gen_rtx (binoptab->code, mode, op0, op1);
+ else
+ equiv_value = 0;
+
+ /* We can't make this a no conflict block if this is a word swap,
+ because the word swap case fails if the input and output values
+ are in the same register. */
+ if (shift_count != BITS_PER_WORD)
+ emit_no_conflict_block (insns, target, op0, op1, equiv_value);
+ else
+ emit_insns (insns);
+
+
+ return target;
+ }
+ }
+
+ /* These can be done a word at a time by propagating carries. */
+ if ((binoptab == add_optab || binoptab == sub_optab)
+ && class == MODE_INT
+ && GET_MODE_SIZE (mode) >= 2 * UNITS_PER_WORD
+ && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
+ {
+ int i;
+ rtx carry_tmp = gen_reg_rtx (word_mode);
+ optab otheroptab = binoptab == add_optab ? sub_optab : add_optab;
+ int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD;
+ rtx carry_in, carry_out;
+ rtx xop0, xop1;
+
+ /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
+ value is one of those, use it. Otherwise, use 1 since it is the
+ one easiest to get. */
+#if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
+ int normalizep = STORE_FLAG_VALUE;
+#else
+ int normalizep = 1;
+#endif
+
+ /* Prepare the operands. */
+ xop0 = force_reg (mode, op0);
+ xop1 = force_reg (mode, op1);
+
+ if (target == 0 || GET_CODE (target) != REG
+ || target == xop0 || target == xop1)
+ target = gen_reg_rtx (mode);
+
+ /* Indicate for flow that the entire target reg is being set. */
+ if (GET_CODE (target) == REG)
+ emit_insn (gen_rtx (CLOBBER, VOIDmode, target));
+
+ /* Do the actual arithmetic. */
+ for (i = 0; i < nwords; i++)
+ {
+ int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
+ rtx target_piece = operand_subword (target, index, 1, mode);
+ rtx op0_piece = operand_subword_force (xop0, index, mode);
+ rtx op1_piece = operand_subword_force (xop1, index, mode);
+ rtx x;
+
+ /* Main add/subtract of the input operands. */
+ x = expand_binop (word_mode, binoptab,
+ op0_piece, op1_piece,
+ target_piece, unsignedp, next_methods);
+ if (x == 0)
+ break;
+
+ if (i + 1 < nwords)
+ {
+ /* Store carry from main add/subtract. */
+ carry_out = gen_reg_rtx (word_mode);
+ carry_out = emit_store_flag (carry_out,
+ binoptab == add_optab ? LTU : GTU,
+ x, op0_piece,
+ word_mode, 1, normalizep);
+ if (carry_out == 0)
+ break;
+ }
+
+ if (i > 0)
+ {
+ /* Add/subtract previous carry to main result. */
+ x = expand_binop (word_mode,
+ normalizep == 1 ? binoptab : otheroptab,
+ x, carry_in,
+ target_piece, 1, next_methods);
+ if (x == 0)
+ break;
+ else if (target_piece != x)
+ emit_move_insn (target_piece, x);
+
+ if (i + 1 < nwords)
+ {
+ /* THIS CODE HAS NOT BEEN TESTED. */
+ /* Get out carry from adding/subtracting carry in. */
+ carry_tmp = emit_store_flag (carry_tmp,
+ binoptab == add_optab
+ ? LTU : GTU,
+ x, carry_in,
+ word_mode, 1, normalizep);
+
+ /* Logical-ior the two poss. carry together. */
+ carry_out = expand_binop (word_mode, ior_optab,
+ carry_out, carry_tmp,
+ carry_out, 0, next_methods);
+ if (carry_out == 0)
+ break;
+ }
+ }
+
+ carry_in = carry_out;
+ }
+
+ if (i == GET_MODE_BITSIZE (mode) / BITS_PER_WORD)
+ {
+ rtx temp = emit_move_insn (target, target);
+
+ REG_NOTES (temp) = gen_rtx (EXPR_LIST, REG_EQUAL,
+ gen_rtx (binoptab->code, mode,
+ copy_rtx (xop0),
+ copy_rtx (xop1)),
+ REG_NOTES (temp));
+ return target;
+ }
+ else
+ delete_insns_since (last);
+ }
+
+ /* If we want to multiply two two-word values and have normal and widening
+ multiplies of single-word values, we can do this with three smaller
+ multiplications. Note that we do not make a REG_NO_CONFLICT block here
+ because we are not operating on one word at a time.
+
+ The multiplication proceeds as follows:
+ _______________________
+ [__op0_high_|__op0_low__]
+ _______________________
+ * [__op1_high_|__op1_low__]
+ _______________________________________________
+ _______________________
+ (1) [__op0_low__*__op1_low__]
+ _______________________
+ (2a) [__op0_low__*__op1_high_]
+ _______________________
+ (2b) [__op0_high_*__op1_low__]
+ _______________________
+ (3) [__op0_high_*__op1_high_]
+
+
+ This gives a 4-word result. Since we are only interested in the
+ lower 2 words, partial result (3) and the upper words of (2a) and
+ (2b) don't need to be calculated. Hence (2a) and (2b) can be
+ calculated using non-widening multiplication.
+
+ (1), however, needs to be calculated with an unsigned widening
+ multiplication. If this operation is not directly supported we
+ try using a signed widening multiplication and adjust the result.
+ This adjustment works as follows:
+
+ If both operands are positive then no adjustment is needed.
+
+ If the operands have different signs, for example op0_low < 0 and
+ op1_low >= 0, the instruction treats the most significant bit of
+ op0_low as a sign bit instead of a bit with significance
+ 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
+ with 2**BITS_PER_WORD - op0_low, and two's complements the
+ result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
+ the result.
+
+ Similarly, if both operands are negative, we need to add
+ (op0_low + op1_low) * 2**BITS_PER_WORD.
+
+ We use a trick to adjust quickly. We logically shift op0_low right
+ (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
+ op0_high (op1_high) before it is used to calculate 2b (2a). If no
+ logical shift exists, we do an arithmetic right shift and subtract
+ the 0 or -1. */
+
+ if (binoptab == smul_optab
+ && class == MODE_INT
+ && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
+ && smul_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
+ && add_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
+ && ((umul_widen_optab->handlers[(int) mode].insn_code
+ != CODE_FOR_nothing)
+ || (smul_widen_optab->handlers[(int) mode].insn_code
+ != CODE_FOR_nothing)))
+ {
+ int low = (WORDS_BIG_ENDIAN ? 1 : 0);
+ int high = (WORDS_BIG_ENDIAN ? 0 : 1);
+ rtx op0_high = operand_subword_force (op0, high, mode);
+ rtx op0_low = operand_subword_force (op0, low, mode);
+ rtx op1_high = operand_subword_force (op1, high, mode);
+ rtx op1_low = operand_subword_force (op1, low, mode);
+ rtx product = 0;
+ rtx op0_xhigh;
+ rtx op1_xhigh;
+
+ /* If the target is the same as one of the inputs, don't use it. This
+ prevents problems with the REG_EQUAL note. */
+ if (target == op0 || target == op1)
+ target = 0;
+
+ /* Multiply the two lower words to get a double-word product.
+ If unsigned widening multiplication is available, use that;
+ otherwise use the signed form and compensate. */
+
+ if (umul_widen_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
+ {
+ product = expand_binop (mode, umul_widen_optab, op0_low, op1_low,
+ target, 1, OPTAB_DIRECT);
+
+ /* If we didn't succeed, delete everything we did so far. */
+ if (product == 0)
+ delete_insns_since (last);
+ else
+ op0_xhigh = op0_high, op1_xhigh = op1_high;
+ }
+
+ if (product == 0
+ && smul_widen_optab->handlers[(int) mode].insn_code
+ != CODE_FOR_nothing)
+ {
+ rtx wordm1 = GEN_INT (BITS_PER_WORD - 1);
+ product = expand_binop (mode, smul_widen_optab, op0_low, op1_low,
+ target, 1, OPTAB_DIRECT);
+ op0_xhigh = expand_binop (word_mode, lshr_optab, op0_low, wordm1,
+ NULL_RTX, 1, next_methods);
+ if (op0_xhigh)
+ op0_xhigh = expand_binop (word_mode, add_optab, op0_high,
+ op0_xhigh, op0_xhigh, 0, next_methods);
+ else
+ {
+ op0_xhigh = expand_binop (word_mode, ashr_optab, op0_low, wordm1,
+ NULL_RTX, 0, next_methods);
+ if (op0_xhigh)
+ op0_xhigh = expand_binop (word_mode, sub_optab, op0_high,
+ op0_xhigh, op0_xhigh, 0,
+ next_methods);
+ }
+
+ op1_xhigh = expand_binop (word_mode, lshr_optab, op1_low, wordm1,
+ NULL_RTX, 1, next_methods);
+ if (op1_xhigh)
+ op1_xhigh = expand_binop (word_mode, add_optab, op1_high,
+ op1_xhigh, op1_xhigh, 0, next_methods);
+ else
+ {
+ op1_xhigh = expand_binop (word_mode, ashr_optab, op1_low, wordm1,
+ NULL_RTX, 0, next_methods);
+ if (op1_xhigh)
+ op1_xhigh = expand_binop (word_mode, sub_optab, op1_high,
+ op1_xhigh, op1_xhigh, 0,
+ next_methods);
+ }
+ }
+
+ /* If we have been able to directly compute the product of the
+ low-order words of the operands and perform any required adjustments
+ of the operands, we proceed by trying two more multiplications
+ and then computing the appropriate sum.
+
+ We have checked above that the required addition is provided.
+ Full-word addition will normally always succeed, especially if
+ it is provided at all, so we don't worry about its failure. The
+ multiplication may well fail, however, so we do handle that. */
+
+ if (product && op0_xhigh && op1_xhigh)
+ {
+ rtx product_high = operand_subword (product, high, 1, mode);
+ rtx temp = expand_binop (word_mode, binoptab, op0_low, op1_xhigh,
+ NULL_RTX, 0, OPTAB_DIRECT);
+
+ if (temp != 0)
+ temp = expand_binop (word_mode, add_optab, temp, product_high,
+ product_high, 0, next_methods);
+
+ if (temp != 0 && temp != product_high)
+ emit_move_insn (product_high, temp);
+
+ if (temp != 0)
+ temp = expand_binop (word_mode, binoptab, op1_low, op0_xhigh,
+ NULL_RTX, 0, OPTAB_DIRECT);
+
+ if (temp != 0)
+ temp = expand_binop (word_mode, add_optab, temp,
+ product_high, product_high,
+ 0, next_methods);
+
+ if (temp != 0 && temp != product_high)
+ emit_move_insn (product_high, temp);
+
+ if (temp != 0)
+ {
+ temp = emit_move_insn (product, product);
+ REG_NOTES (temp) = gen_rtx (EXPR_LIST, REG_EQUAL,
+ gen_rtx (MULT, mode, copy_rtx (op0),
+ copy_rtx (op1)),
+ REG_NOTES (temp));
+
+ return product;
+ }
+ }
+
+ /* If we get here, we couldn't do it for some reason even though we
+ originally thought we could. Delete anything we've emitted in
+ trying to do it. */
+
+ delete_insns_since (last);
+ }
+
+ /* We need to open-code the complex type operations: '+, -, * and /' */
+
+ /* At this point we allow operations between two similar complex
+ numbers, and also if one of the operands is not a complex number
+ but rather of MODE_FLOAT or MODE_INT. However, the caller
+ must make sure that the MODE of the non-complex operand matches
+ the SUBMODE of the complex operand. */
+
+ if (class == MODE_COMPLEX_FLOAT || class == MODE_COMPLEX_INT)
+ {
+ rtx real0 = 0, imag0 = 0;
+ rtx real1 = 0, imag1 = 0;
+ rtx realr, imagr, res;
+ rtx seq;
+ rtx equiv_value;
+ int ok = 0;
+
+ /* Find the correct mode for the real and imaginary parts */
+ enum machine_mode submode
+ = mode_for_size (GET_MODE_UNIT_SIZE (mode) * BITS_PER_UNIT,
+ class == MODE_COMPLEX_INT ? MODE_INT : MODE_FLOAT,
+ 0);
+
+ if (submode == BLKmode)
+ abort ();
+
+ if (! target)
+ target = gen_reg_rtx (mode);
+
+ start_sequence ();
+
+ realr = gen_realpart (submode, target);
+ imagr = gen_imagpart (submode, target);
+
+ if (GET_MODE (op0) == mode)
+ {
+ real0 = gen_realpart (submode, op0);
+ imag0 = gen_imagpart (submode, op0);
+ }
+ else
+ real0 = op0;
+
+ if (GET_MODE (op1) == mode)
+ {
+ real1 = gen_realpart (submode, op1);
+ imag1 = gen_imagpart (submode, op1);
+ }
+ else
+ real1 = op1;
+
+ if (real0 == 0 || real1 == 0 || ! (imag0 != 0|| imag1 != 0))
+ abort ();
+
+ switch (binoptab->code)
+ {
+ case PLUS:
+ /* (a+ib) + (c+id) = (a+c) + i(b+d) */
+ case MINUS:
+ /* (a+ib) - (c+id) = (a-c) + i(b-d) */
+ res = expand_binop (submode, binoptab, real0, real1,
+ realr, unsignedp, methods);
+
+ if (res == 0)
+ break;
+ else if (res != realr)
+ emit_move_insn (realr, res);
+
+ if (imag0 && imag1)
+ res = expand_binop (submode, binoptab, imag0, imag1,
+ imagr, unsignedp, methods);
+ else if (imag0)
+ res = imag0;
+ else if (binoptab->code == MINUS)
+ res = expand_unop (submode, neg_optab, imag1, imagr, unsignedp);
+ else
+ res = imag1;
+
+ if (res == 0)
+ break;
+ else if (res != imagr)
+ emit_move_insn (imagr, res);
+
+ ok = 1;
+ break;
+
+ case MULT:
+ /* (a+ib) * (c+id) = (ac-bd) + i(ad+cb) */
+
+ if (imag0 && imag1)
+ {
+ rtx temp1, temp2;
+
+ /* Don't fetch these from memory more than once. */
+ real0 = force_reg (submode, real0);
+ real1 = force_reg (submode, real1);
+ imag0 = force_reg (submode, imag0);
+ imag1 = force_reg (submode, imag1);
+
+ temp1 = expand_binop (submode, binoptab, real0, real1, NULL_RTX,
+ unsignedp, methods);
+
+ temp2 = expand_binop (submode, binoptab, imag0, imag1, NULL_RTX,
+ unsignedp, methods);
+
+ if (temp1 == 0 || temp2 == 0)
+ break;
+
+ res = expand_binop (submode, sub_optab, temp1, temp2,
+ realr, unsignedp, methods);
+
+ if (res == 0)
+ break;
+ else if (res != realr)
+ emit_move_insn (realr, res);
+
+ temp1 = expand_binop (submode, binoptab, real0, imag1,
+ NULL_RTX, unsignedp, methods);
+
+ temp2 = expand_binop (submode, binoptab, real1, imag0,
+ NULL_RTX, unsignedp, methods);
+
+ if (temp1 == 0 || temp2 == 0)
+ res = expand_binop (submode, add_optab, temp1, temp2,
+ imagr, unsignedp, methods);
+
+ if (res == 0)
+ break;
+ else if (res != imagr)
+ emit_move_insn (imagr, res);
+
+ ok = 1;
+ }
+ else
+ {
+ /* Don't fetch these from memory more than once. */
+ real0 = force_reg (submode, real0);
+ real1 = force_reg (submode, real1);
+
+ res = expand_binop (submode, binoptab, real0, real1,
+ realr, unsignedp, methods);
+ if (res == 0)
+ break;
+ else if (res != realr)
+ emit_move_insn (realr, res);
+
+ if (imag0 != 0)
+ res = expand_binop (submode, binoptab,
+ real1, imag0, imagr, unsignedp, methods);
+ else
+ res = expand_binop (submode, binoptab,
+ real0, imag1, imagr, unsignedp, methods);
+
+ if (res == 0)
+ break;
+ else if (res != imagr)
+ emit_move_insn (imagr, res);
+
+ ok = 1;
+ }
+ break;
+
+ case DIV:
+ /* (a+ib) / (c+id) = ((ac+bd)/(cc+dd)) + i((bc-ad)/(cc+dd)) */
+
+ if (imag1 == 0)
+ {
+ /* (a+ib) / (c+i0) = (a/c) + i(b/c) */
+
+ /* Don't fetch these from memory more than once. */
+ real1 = force_reg (submode, real1);
+
+ /* Simply divide the real and imaginary parts by `c' */
+ if (class == MODE_COMPLEX_FLOAT)
+ res = expand_binop (submode, binoptab, real0, real1,
+ realr, unsignedp, methods);
+ else
+ res = expand_divmod (0, TRUNC_DIV_EXPR, submode,
+ real0, real1, realr, unsignedp);
+
+ if (res == 0)
+ break;
+ else if (res != realr)
+ emit_move_insn (realr, res);
+
+ if (class == MODE_COMPLEX_FLOAT)
+ res = expand_binop (submode, binoptab, imag0, real1,
+ imagr, unsignedp, methods);
+ else
+ res = expand_divmod (0, TRUNC_DIV_EXPR, submode,
+ imag0, real1, imagr, unsignedp);
+
+ if (res == 0)
+ break;
+ else if (res != imagr)
+ emit_move_insn (imagr, res);
+
+ ok = 1;
+ }
+ else
+ {
+ /* Divisor is of complex type:
+ X/(a+ib) */
+ rtx divisor;
+ rtx real_t, imag_t;
+ rtx lhs, rhs;
+ rtx temp1, temp2;
+
+ /* Don't fetch these from memory more than once. */
+ real0 = force_reg (submode, real0);
+ real1 = force_reg (submode, real1);
+
+ if (imag0 != 0)
+ imag0 = force_reg (submode, imag0);
+
+ imag1 = force_reg (submode, imag1);
+
+ /* Divisor: c*c + d*d */
+ temp1 = expand_binop (submode, smul_optab, real1, real1,
+ NULL_RTX, unsignedp, methods);
+
+ temp2 = expand_binop (submode, smul_optab, imag1, imag1,
+ NULL_RTX, unsignedp, methods);
+
+ if (temp1 == 0 || temp2 == 0)
+ break;
+
+ divisor = expand_binop (submode, add_optab, temp1, temp2,
+ NULL_RTX, unsignedp, methods);
+ if (divisor == 0)
+ break;
+
+ if (imag0 == 0)
+ {
+ /* ((a)(c-id))/divisor */
+ /* (a+i0) / (c+id) = (ac/(cc+dd)) + i(-ad/(cc+dd)) */
+
+ /* Calculate the dividend */
+ real_t = expand_binop (submode, smul_optab, real0, real1,
+ NULL_RTX, unsignedp, methods);
+
+ imag_t = expand_binop (submode, smul_optab, real0, imag1,
+ NULL_RTX, unsignedp, methods);
+
+ if (real_t == 0 || imag_t == 0)
+ break;
+
+ imag_t = expand_unop (submode, neg_optab, imag_t,
+ NULL_RTX, unsignedp);
+ }
+ else
+ {
+ /* ((a+ib)(c-id))/divider */
+ /* Calculate the dividend */
+ temp1 = expand_binop (submode, smul_optab, real0, real1,
+ NULL_RTX, unsignedp, methods);
+
+ temp2 = expand_binop (submode, smul_optab, imag0, imag1,
+ NULL_RTX, unsignedp, methods);
+
+ if (temp1 == 0 || temp2 == 0)
+ break;
+
+ real_t = expand_binop (submode, add_optab, temp1, temp2,
+ NULL_RTX, unsignedp, methods);
+
+ temp1 = expand_binop (submode, smul_optab, imag0, real1,
+ NULL_RTX, unsignedp, methods);
+
+ temp2 = expand_binop (submode, smul_optab, real0, imag1,
+ NULL_RTX, unsignedp, methods);
+
+ if (temp1 == 0 || temp2 == 0)
+ break;
+
+ imag_t = expand_binop (submode, sub_optab, temp1, temp2,
+ NULL_RTX, unsignedp, methods);
+
+ if (real_t == 0 || imag_t == 0)
+ break;
+ }
+
+ if (class == MODE_COMPLEX_FLOAT)
+ res = expand_binop (submode, binoptab, real_t, divisor,
+ realr, unsignedp, methods);
+ else
+ res = expand_divmod (0, TRUNC_DIV_EXPR, submode,
+ real_t, divisor, realr, unsignedp);
+
+ if (res == 0)
+ break;
+ else if (res != realr)
+ emit_move_insn (realr, res);
+
+ if (class == MODE_COMPLEX_FLOAT)
+ res = expand_binop (submode, binoptab, imag_t, divisor,
+ imagr, unsignedp, methods);
+ else
+ res = expand_divmod (0, TRUNC_DIV_EXPR, submode,
+ imag_t, divisor, imagr, unsignedp);
+
+ if (res == 0)
+ break;
+ else if (res != imagr)
+ emit_move_insn (imagr, res);
+
+ ok = 1;
+ }
+ break;
+
+ default:
+ abort ();
+ }
+
+ seq = get_insns ();
+ end_sequence ();
+
+ if (ok)
+ {
+ if (binoptab->code != UNKNOWN)
+ equiv_value
+ = gen_rtx (binoptab->code, mode, copy_rtx (op0), copy_rtx (op1));
+ else
+ equiv_value = 0;
+
+ emit_no_conflict_block (seq, target, op0, op1, equiv_value);
+
+ return target;
+ }
+ }
+
+ /* It can't be open-coded in this mode.
+ Use a library call if one is available and caller says that's ok. */
+
+ if (binoptab->handlers[(int) mode].libfunc
+ && (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN))
+ {
+ rtx insns;
+ rtx funexp = binoptab->handlers[(int) mode].libfunc;
+ rtx op1x = op1;
+ enum machine_mode op1_mode = mode;
+ rtx value;
+
+ start_sequence ();
+
+ if (shift_op)
+ {
+ op1_mode = word_mode;
+ /* Specify unsigned here,
+ since negative shift counts are meaningless. */
+ op1x = convert_to_mode (word_mode, op1, 1);
+ }
+
+ if (GET_MODE (op0) != mode)
+ op0 = convert_to_mode (mode, op0, unsignedp);
+
+ /* Pass 1 for NO_QUEUE so we don't lose any increments
+ if the libcall is cse'd or moved. */
+ value = emit_library_call_value (binoptab->handlers[(int) mode].libfunc,
+ NULL_RTX, 1, mode, 2,
+ op0, mode, op1x, op1_mode);
+
+ insns = get_insns ();
+ end_sequence ();
+
+ target = gen_reg_rtx (mode);
+ emit_libcall_block (insns, target, value,
+ gen_rtx (binoptab->code, mode, op0, op1));
+
+ return target;
+ }
+
+ delete_insns_since (last);
+
+ /* It can't be done in this mode. Can we do it in a wider mode? */
+
+ if (! (methods == OPTAB_WIDEN || methods == OPTAB_LIB_WIDEN
+ || methods == OPTAB_MUST_WIDEN))
+ {
+ /* Caller says, don't even try. */
+ delete_insns_since (entry_last);
+ return 0;
+ }
+
+ /* Compute the value of METHODS to pass to recursive calls.
+ Don't allow widening to be tried recursively. */
+
+ methods = (methods == OPTAB_LIB_WIDEN ? OPTAB_LIB : OPTAB_DIRECT);
+
+ /* Look for a wider mode of the same class for which it appears we can do
+ the operation. */
+
+ if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
+ {
+ for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
+ wider_mode = GET_MODE_WIDER_MODE (wider_mode))
+ {
+ if ((binoptab->handlers[(int) wider_mode].insn_code
+ != CODE_FOR_nothing)
+ || (methods == OPTAB_LIB
+ && binoptab->handlers[(int) wider_mode].libfunc))
+ {
+ rtx xop0 = op0, xop1 = op1;
+ int no_extend = 0;
+
+ /* For certain integer operations, we need not actually extend
+ the narrow operands, as long as we will truncate
+ the results to the same narrowness. */
+
+ if ((binoptab == ior_optab || binoptab == and_optab
+ || binoptab == xor_optab
+ || binoptab == add_optab || binoptab == sub_optab
+ || binoptab == smul_optab || binoptab == ashl_optab)
+ && class == MODE_INT)
+ no_extend = 1;
+
+ xop0 = widen_operand (xop0, wider_mode, mode,
+ unsignedp, no_extend);
+
+ /* The second operand of a shift must always be extended. */
+ xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
+ no_extend && binoptab != ashl_optab);
+
+ temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
+ unsignedp, methods);
+ if (temp)
+ {
+ if (class != MODE_INT)
+ {
+ if (target == 0)
+ target = gen_reg_rtx (mode);
+ convert_move (target, temp, 0);
+ return target;
+ }
+ else
+ return gen_lowpart (mode, temp);
+ }
+ else
+ delete_insns_since (last);
+ }
+ }
+ }
+
+ delete_insns_since (entry_last);
+ return 0;
+}
+
+/* Expand a binary operator which has both signed and unsigned forms.
+ UOPTAB is the optab for unsigned operations, and SOPTAB is for
+ signed operations.
+
+ If we widen unsigned operands, we may use a signed wider operation instead
+ of an unsigned wider operation, since the result would be the same. */
+
+rtx
+sign_expand_binop (mode, uoptab, soptab, op0, op1, target, unsignedp, methods)
+ enum machine_mode mode;
+ optab uoptab, soptab;
+ rtx op0, op1, target;
+ int unsignedp;
+ enum optab_methods methods;
+{
+ register rtx temp;
+ optab direct_optab = unsignedp ? uoptab : soptab;
+ struct optab wide_soptab;
+
+ /* Do it without widening, if possible. */
+ temp = expand_binop (mode, direct_optab, op0, op1, target,
+ unsignedp, OPTAB_DIRECT);
+ if (temp || methods == OPTAB_DIRECT)
+ return temp;
+
+ /* Try widening to a signed int. Make a fake signed optab that
+ hides any signed insn for direct use. */
+ wide_soptab = *soptab;
+ wide_soptab.handlers[(int) mode].insn_code = CODE_FOR_nothing;
+ wide_soptab.handlers[(int) mode].libfunc = 0;
+
+ temp = expand_binop (mode, &wide_soptab, op0, op1, target,
+ unsignedp, OPTAB_WIDEN);
+
+ /* For unsigned operands, try widening to an unsigned int. */
+ if (temp == 0 && unsignedp)
+ temp = expand_binop (mode, uoptab, op0, op1, target,
+ unsignedp, OPTAB_WIDEN);
+ if (temp || methods == OPTAB_WIDEN)
+ return temp;
+
+ /* Use the right width lib call if that exists. */
+ temp = expand_binop (mode, direct_optab, op0, op1, target, unsignedp, OPTAB_LIB);
+ if (temp || methods == OPTAB_LIB)
+ return temp;
+
+ /* Must widen and use a lib call, use either signed or unsigned. */
+ temp = expand_binop (mode, &wide_soptab, op0, op1, target,
+ unsignedp, methods);
+ if (temp != 0)
+ return temp;
+ if (unsignedp)
+ return expand_binop (mode, uoptab, op0, op1, target,
+ unsignedp, methods);
+ return 0;
+}
+
+/* Generate code to perform an operation specified by BINOPTAB
+ on operands OP0 and OP1, with two results to TARG1 and TARG2.
+ We assume that the order of the operands for the instruction
+ is TARG0, OP0, OP1, TARG1, which would fit a pattern like
+ [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
+
+ Either TARG0 or TARG1 may be zero, but what that means is that
+ that result is not actually wanted. We will generate it into
+ a dummy pseudo-reg and discard it. They may not both be zero.
+
+ Returns 1 if this operation can be performed; 0 if not. */
+
+int
+expand_twoval_binop (binoptab, op0, op1, targ0, targ1, unsignedp)
+ optab binoptab;
+ rtx op0, op1;
+ rtx targ0, targ1;
+ int unsignedp;
+{
+ enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
+ enum mode_class class;
+ enum machine_mode wider_mode;
+ rtx entry_last = get_last_insn ();
+ rtx last;
+
+ class = GET_MODE_CLASS (mode);
+
+ op0 = protect_from_queue (op0, 0);
+ op1 = protect_from_queue (op1, 0);
+
+ if (flag_force_mem)
+ {
+ op0 = force_not_mem (op0);
+ op1 = force_not_mem (op1);
+ }
+
+ /* If we are inside an appropriately-short loop and one operand is an
+ expensive constant, force it into a register. */
+ if (CONSTANT_P (op0) && preserve_subexpressions_p ()
+ && rtx_cost (op0, binoptab->code) > 2)
+ op0 = force_reg (mode, op0);
+
+ if (CONSTANT_P (op1) && preserve_subexpressions_p ()
+ && rtx_cost (op1, binoptab->code) > 2)
+ op1 = force_reg (mode, op1);
+
+ if (targ0)
+ targ0 = protect_from_queue (targ0, 1);
+ else
+ targ0 = gen_reg_rtx (mode);
+ if (targ1)
+ targ1 = protect_from_queue (targ1, 1);
+ else
+ targ1 = gen_reg_rtx (mode);
+
+ /* Record where to go back to if we fail. */
+ last = get_last_insn ();
+
+ if (binoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
+ {
+ int icode = (int) binoptab->handlers[(int) mode].insn_code;
+ enum machine_mode mode0 = insn_operand_mode[icode][1];
+ enum machine_mode mode1 = insn_operand_mode[icode][2];
+ rtx pat;
+ rtx xop0 = op0, xop1 = op1;
+
+ /* In case this insn wants input operands in modes different from the
+ result, convert the operands. */
+ if (GET_MODE (op0) != VOIDmode && GET_MODE (op0) != mode0)
+ xop0 = convert_to_mode (mode0, xop0, unsignedp);
+
+ if (GET_MODE (op1) != VOIDmode && GET_MODE (op1) != mode1)
+ xop1 = convert_to_mode (mode1, xop1, unsignedp);
+
+ /* Now, if insn doesn't accept these operands, put them into pseudos. */
+ if (! (*insn_operand_predicate[icode][1]) (xop0, mode0))
+ xop0 = copy_to_mode_reg (mode0, xop0);
+
+ if (! (*insn_operand_predicate[icode][2]) (xop1, mode1))
+ xop1 = copy_to_mode_reg (mode1, xop1);
+
+ /* We could handle this, but we should always be called with a pseudo
+ for our targets and all insns should take them as outputs. */
+ if (! (*insn_operand_predicate[icode][0]) (targ0, mode)
+ || ! (*insn_operand_predicate[icode][3]) (targ1, mode))
+ abort ();
+
+ pat = GEN_FCN (icode) (targ0, xop0, xop1, targ1);
+ if (pat)
+ {
+ emit_insn (pat);
+ return 1;
+ }
+ else
+ delete_insns_since (last);
+ }
+
+ /* It can't be done in this mode. Can we do it in a wider mode? */
+
+ if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
+ {
+ for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
+ wider_mode = GET_MODE_WIDER_MODE (wider_mode))
+ {
+ if (binoptab->handlers[(int) wider_mode].insn_code
+ != CODE_FOR_nothing)
+ {
+ register rtx t0 = gen_reg_rtx (wider_mode);
+ register rtx t1 = gen_reg_rtx (wider_mode);
+
+ if (expand_twoval_binop (binoptab,
+ convert_modes (wider_mode, mode, op0,
+ unsignedp),
+ convert_modes (wider_mode, mode, op1,
+ unsignedp),
+ t0, t1, unsignedp))
+ {
+ convert_move (targ0, t0, unsignedp);
+ convert_move (targ1, t1, unsignedp);
+ return 1;
+ }
+ else
+ delete_insns_since (last);
+ }
+ }
+ }
+
+ delete_insns_since (entry_last);
+ return 0;
+}
+
+/* Generate code to perform an operation specified by UNOPTAB
+ on operand OP0, with result having machine-mode MODE.
+
+ UNSIGNEDP is for the case where we have to widen the operands
+ to perform the operation. It says to use zero-extension.
+
+ If TARGET is nonzero, the value
+ is generated there, if it is convenient to do so.
+ In all cases an rtx is returned for the locus of the value;
+ this may or may not be TARGET. */
+
+rtx
+expand_unop (mode, unoptab, op0, target, unsignedp)
+ enum machine_mode mode;
+ optab unoptab;
+ rtx op0;
+ rtx target;
+ int unsignedp;
+{
+ enum mode_class class;
+ enum machine_mode wider_mode;
+ register rtx temp;
+ rtx last = get_last_insn ();
+ rtx pat;
+
+ class = GET_MODE_CLASS (mode);
+
+ op0 = protect_from_queue (op0, 0);
+
+ if (flag_force_mem)
+ {
+ op0 = force_not_mem (op0);
+ }
+
+ if (target)
+ target = protect_from_queue (target, 1);
+
+ if (unoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
+ {
+ int icode = (int) unoptab->handlers[(int) mode].insn_code;
+ enum machine_mode mode0 = insn_operand_mode[icode][1];
+ rtx xop0 = op0;
+
+ if (target)
+ temp = target;
+ else
+ temp = gen_reg_rtx (mode);
+
+ if (GET_MODE (xop0) != VOIDmode
+ && GET_MODE (xop0) != mode0)
+ xop0 = convert_to_mode (mode0, xop0, unsignedp);
+
+ /* Now, if insn doesn't accept our operand, put it into a pseudo. */
+
+ if (! (*insn_operand_predicate[icode][1]) (xop0, mode0))
+ xop0 = copy_to_mode_reg (mode0, xop0);
+
+ if (! (*insn_operand_predicate[icode][0]) (temp, mode))
+ temp = gen_reg_rtx (mode);
+
+ pat = GEN_FCN (icode) (temp, xop0);
+ if (pat)
+ {
+ if (GET_CODE (pat) == SEQUENCE
+ && ! add_equal_note (pat, temp, unoptab->code, xop0, NULL_RTX))
+ {
+ delete_insns_since (last);
+ return expand_unop (mode, unoptab, op0, NULL_RTX, unsignedp);
+ }
+
+ emit_insn (pat);
+
+ return temp;
+ }
+ else
+ delete_insns_since (last);
+ }
+
+ /* It can't be done in this mode. Can we open-code it in a wider mode? */
+
+ if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
+ for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
+ wider_mode = GET_MODE_WIDER_MODE (wider_mode))
+ {
+ if (unoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing)
+ {
+ rtx xop0 = op0;
+
+ /* For certain operations, we need not actually extend
+ the narrow operand, as long as we will truncate the
+ results to the same narrowness. */
+
+ xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
+ (unoptab == neg_optab
+ || unoptab == one_cmpl_optab)
+ && class == MODE_INT);
+
+ temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
+ unsignedp);
+
+ if (temp)
+ {
+ if (class != MODE_INT)
+ {
+ if (target == 0)
+ target = gen_reg_rtx (mode);
+ convert_move (target, temp, 0);
+ return target;
+ }
+ else
+ return gen_lowpart (mode, temp);
+ }
+ else
+ delete_insns_since (last);
+ }
+ }
+
+ /* These can be done a word at a time. */
+ if (unoptab == one_cmpl_optab
+ && class == MODE_INT
+ && GET_MODE_SIZE (mode) > UNITS_PER_WORD
+ && unoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
+ {
+ int i;
+ rtx insns;
+
+ if (target == 0 || target == op0)
+ target = gen_reg_rtx (mode);
+
+ start_sequence ();
+
+ /* Do the actual arithmetic. */
+ for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
+ {
+ rtx target_piece = operand_subword (target, i, 1, mode);
+ rtx x = expand_unop (word_mode, unoptab,
+ operand_subword_force (op0, i, mode),
+ target_piece, unsignedp);
+ if (target_piece != x)
+ emit_move_insn (target_piece, x);
+ }
+
+ insns = get_insns ();
+ end_sequence ();
+
+ emit_no_conflict_block (insns, target, op0, NULL_RTX,
+ gen_rtx (unoptab->code, mode, copy_rtx (op0)));
+ return target;
+ }
+
+ /* Open-code the complex negation operation. */
+ else if (unoptab == neg_optab
+ && (class == MODE_COMPLEX_FLOAT || class == MODE_COMPLEX_INT))
+ {
+ rtx target_piece;
+ rtx x;
+ rtx seq;
+
+ /* Find the correct mode for the real and imaginary parts */
+ enum machine_mode submode
+ = mode_for_size (GET_MODE_UNIT_SIZE (mode) * BITS_PER_UNIT,
+ class == MODE_COMPLEX_INT ? MODE_INT : MODE_FLOAT,
+ 0);
+
+ if (submode == BLKmode)
+ abort ();
+
+ if (target == 0)
+ target = gen_reg_rtx (mode);
+
+ start_sequence ();
+
+ target_piece = gen_imagpart (submode, target);
+ x = expand_unop (submode, unoptab,
+ gen_imagpart (submode, op0),
+ target_piece, unsignedp);
+ if (target_piece != x)
+ emit_move_insn (target_piece, x);
+
+ target_piece = gen_realpart (submode, target);
+ x = expand_unop (submode, unoptab,
+ gen_realpart (submode, op0),
+ target_piece, unsignedp);
+ if (target_piece != x)
+ emit_move_insn (target_piece, x);
+
+ seq = get_insns ();
+ end_sequence ();
+
+ emit_no_conflict_block (seq, target, op0, 0,
+ gen_rtx (unoptab->code, mode, copy_rtx (op0)));
+ return target;
+ }
+
+ /* Now try a library call in this mode. */
+ if (unoptab->handlers[(int) mode].libfunc)
+ {
+ rtx insns;
+ rtx funexp = unoptab->handlers[(int) mode].libfunc;
+ rtx value;
+
+ start_sequence ();
+
+ /* Pass 1 for NO_QUEUE so we don't lose any increments
+ if the libcall is cse'd or moved. */
+ value = emit_library_call_value (unoptab->handlers[(int) mode].libfunc,
+ NULL_RTX, 1, mode, 1, op0, mode);
+ insns = get_insns ();
+ end_sequence ();
+
+ target = gen_reg_rtx (mode);
+ emit_libcall_block (insns, target, value,
+ gen_rtx (unoptab->code, mode, op0));
+
+ return target;
+ }
+
+ /* It can't be done in this mode. Can we do it in a wider mode? */
+
+ if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
+ {
+ for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
+ wider_mode = GET_MODE_WIDER_MODE (wider_mode))
+ {
+ if ((unoptab->handlers[(int) wider_mode].insn_code
+ != CODE_FOR_nothing)
+ || unoptab->handlers[(int) wider_mode].libfunc)
+ {
+ rtx xop0 = op0;
+
+ /* For certain operations, we need not actually extend
+ the narrow operand, as long as we will truncate the
+ results to the same narrowness. */
+
+ xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
+ (unoptab == neg_optab
+ || unoptab == one_cmpl_optab)
+ && class == MODE_INT);
+
+ temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
+ unsignedp);
+
+ if (temp)
+ {
+ if (class != MODE_INT)
+ {
+ if (target == 0)
+ target = gen_reg_rtx (mode);
+ convert_move (target, temp, 0);
+ return target;
+ }
+ else
+ return gen_lowpart (mode, temp);
+ }
+ else
+ delete_insns_since (last);
+ }
+ }
+ }
+
+ return 0;
+}
+
+/* Emit code to compute the absolute value of OP0, with result to
+ TARGET if convenient. (TARGET may be 0.) The return value says
+ where the result actually is to be found.
+
+ MODE is the mode of the operand; the mode of the result is
+ different but can be deduced from MODE.
+
+ UNSIGNEDP is relevant for complex integer modes. */
+
+rtx
+expand_complex_abs (mode, op0, target, unsignedp)
+ enum machine_mode mode;
+ rtx op0;
+ rtx target;
+ int unsignedp;
+{
+ enum mode_class class = GET_MODE_CLASS (mode);
+ enum machine_mode wider_mode;
+ register rtx temp;
+ rtx entry_last = get_last_insn ();
+ rtx last;
+ rtx pat;
+
+ /* Find the correct mode for the real and imaginary parts. */
+ enum machine_mode submode
+ = mode_for_size (GET_MODE_UNIT_SIZE (mode) * BITS_PER_UNIT,
+ class == MODE_COMPLEX_INT ? MODE_INT : MODE_FLOAT,
+ 0);
+
+ if (submode == BLKmode)
+ abort ();
+
+ op0 = protect_from_queue (op0, 0);
+
+ if (flag_force_mem)
+ {
+ op0 = force_not_mem (op0);
+ }
+
+ last = get_last_insn ();
+
+ if (target)
+ target = protect_from_queue (target, 1);
+
+ if (abs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
+ {
+ int icode = (int) abs_optab->handlers[(int) mode].insn_code;
+ enum machine_mode mode0 = insn_operand_mode[icode][1];
+ rtx xop0 = op0;
+
+ if (target)
+ temp = target;
+ else
+ temp = gen_reg_rtx (submode);
+
+ if (GET_MODE (xop0) != VOIDmode
+ && GET_MODE (xop0) != mode0)
+ xop0 = convert_to_mode (mode0, xop0, unsignedp);
+
+ /* Now, if insn doesn't accept our operand, put it into a pseudo. */
+
+ if (! (*insn_operand_predicate[icode][1]) (xop0, mode0))
+ xop0 = copy_to_mode_reg (mode0, xop0);
+
+ if (! (*insn_operand_predicate[icode][0]) (temp, submode))
+ temp = gen_reg_rtx (submode);
+
+ pat = GEN_FCN (icode) (temp, xop0);
+ if (pat)
+ {
+ if (GET_CODE (pat) == SEQUENCE
+ && ! add_equal_note (pat, temp, abs_optab->code, xop0, NULL_RTX))
+ {
+ delete_insns_since (last);
+ return expand_unop (mode, abs_optab, op0, NULL_RTX, unsignedp);
+ }
+
+ emit_insn (pat);
+
+ return temp;
+ }
+ else
+ delete_insns_since (last);
+ }
+
+ /* It can't be done in this mode. Can we open-code it in a wider mode? */
+
+ for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
+ wider_mode = GET_MODE_WIDER_MODE (wider_mode))
+ {
+ if (abs_optab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing)
+ {
+ rtx xop0 = op0;
+
+ xop0 = convert_modes (wider_mode, mode, xop0, unsignedp);
+ temp = expand_complex_abs (wider_mode, xop0, NULL_RTX, unsignedp);
+
+ if (temp)
+ {
+ if (class != MODE_COMPLEX_INT)
+ {
+ if (target == 0)
+ target = gen_reg_rtx (submode);
+ convert_move (target, temp, 0);
+ return target;
+ }
+ else
+ return gen_lowpart (submode, temp);
+ }
+ else
+ delete_insns_since (last);
+ }
+ }
+
+ /* Open-code the complex absolute-value operation
+ if we can open-code sqrt. Otherwise it's not worth while. */
+ if (sqrt_optab->handlers[(int) submode].insn_code != CODE_FOR_nothing)
+ {
+ rtx real, imag, total;
+
+ real = gen_realpart (submode, op0);
+ imag = gen_imagpart (submode, op0);
+
+ /* Square both parts. */
+ real = expand_mult (submode, real, real, NULL_RTX, 0);
+ imag = expand_mult (submode, imag, imag, NULL_RTX, 0);
+
+ /* Sum the parts. */
+ total = expand_binop (submode, add_optab, real, imag, NULL_RTX,
+ 0, OPTAB_LIB_WIDEN);
+
+ /* Get sqrt in TARGET. Set TARGET to where the result is. */
+ target = expand_unop (submode, sqrt_optab, total, target, 0);
+ if (target == 0)
+ delete_insns_since (last);
+ else
+ return target;
+ }
+
+ /* Now try a library call in this mode. */
+ if (abs_optab->handlers[(int) mode].libfunc)
+ {
+ rtx insns;
+ rtx funexp = abs_optab->handlers[(int) mode].libfunc;
+ rtx value;
+
+ start_sequence ();
+
+ /* Pass 1 for NO_QUEUE so we don't lose any increments
+ if the libcall is cse'd or moved. */
+ value = emit_library_call_value (abs_optab->handlers[(int) mode].libfunc,
+ NULL_RTX, 1, submode, 1, op0, mode);
+ insns = get_insns ();
+ end_sequence ();
+
+ target = gen_reg_rtx (submode);
+ emit_libcall_block (insns, target, value,
+ gen_rtx (abs_optab->code, mode, op0));
+
+ return target;
+ }
+
+ /* It can't be done in this mode. Can we do it in a wider mode? */
+
+ for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
+ wider_mode = GET_MODE_WIDER_MODE (wider_mode))
+ {
+ if ((abs_optab->handlers[(int) wider_mode].insn_code
+ != CODE_FOR_nothing)
+ || abs_optab->handlers[(int) wider_mode].libfunc)
+ {
+ rtx xop0 = op0;
+
+ xop0 = convert_modes (wider_mode, mode, xop0, unsignedp);
+
+ temp = expand_complex_abs (wider_mode, xop0, NULL_RTX, unsignedp);
+
+ if (temp)
+ {
+ if (class != MODE_COMPLEX_INT)
+ {
+ if (target == 0)
+ target = gen_reg_rtx (submode);
+ convert_move (target, temp, 0);
+ return target;
+ }
+ else
+ return gen_lowpart (submode, temp);
+ }
+ else
+ delete_insns_since (last);
+ }
+ }
+
+ delete_insns_since (entry_last);
+ return 0;
+}
+
+/* Generate an instruction whose insn-code is INSN_CODE,
+ with two operands: an output TARGET and an input OP0.
+ TARGET *must* be nonzero, and the output is always stored there.
+ CODE is an rtx code such that (CODE OP0) is an rtx that describes
+ the value that is stored into TARGET. */
+
+void
+emit_unop_insn (icode, target, op0, code)
+ int icode;
+ rtx target;
+ rtx op0;
+ enum rtx_code code;
+{
+ register rtx temp;
+ enum machine_mode mode0 = insn_operand_mode[icode][1];
+ rtx pat;
+
+ temp = target = protect_from_queue (target, 1);
+
+ op0 = protect_from_queue (op0, 0);
+
+ if (flag_force_mem)
+ op0 = force_not_mem (op0);
+
+ /* Now, if insn does not accept our operands, put them into pseudos. */
+
+ if (! (*insn_operand_predicate[icode][1]) (op0, mode0))
+ op0 = copy_to_mode_reg (mode0, op0);
+
+ if (! (*insn_operand_predicate[icode][0]) (temp, GET_MODE (temp))
+ || (flag_force_mem && GET_CODE (temp) == MEM))
+ temp = gen_reg_rtx (GET_MODE (temp));
+
+ pat = GEN_FCN (icode) (temp, op0);
+
+ if (GET_CODE (pat) == SEQUENCE && code != UNKNOWN)
+ add_equal_note (pat, temp, code, op0, NULL_RTX);
+
+ emit_insn (pat);
+
+ if (temp != target)
+ emit_move_insn (target, temp);
+}
+
+/* Emit code to perform a series of operations on a multi-word quantity, one
+ word at a time.
+
+ Such a block is preceded by a CLOBBER of the output, consists of multiple
+ insns, each setting one word of the output, and followed by a SET copying
+ the output to itself.
+
+ Each of the insns setting words of the output receives a REG_NO_CONFLICT
+ note indicating that it doesn't conflict with the (also multi-word)
+ inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
+ notes.
+
+ INSNS is a block of code generated to perform the operation, not including
+ the CLOBBER and final copy. All insns that compute intermediate values
+ are first emitted, followed by the block as described above. Only
+ INSNs are allowed in the block; no library calls or jumps may be
+ present.
+
+ TARGET, OP0, and OP1 are the output and inputs of the operations,
+ respectively. OP1 may be zero for a unary operation.
+
+ EQUIV, if non-zero, is an expression to be placed into a REG_EQUAL note
+ on the last insn.
+
+ If TARGET is not a register, INSNS is simply emitted with no special
+ processing.
+
+ The final insn emitted is returned. */
+
+rtx
+emit_no_conflict_block (insns, target, op0, op1, equiv)
+ rtx insns;
+ rtx target;
+ rtx op0, op1;
+ rtx equiv;
+{
+ rtx prev, next, first, last, insn;
+
+ if (GET_CODE (target) != REG || reload_in_progress)
+ return emit_insns (insns);
+
+ /* First emit all insns that do not store into words of the output and remove
+ these from the list. */
+ for (insn = insns; insn; insn = next)
+ {
+ rtx set = 0;
+ int i;
+
+ next = NEXT_INSN (insn);
+
+ if (GET_CODE (insn) != INSN)
+ abort ();
+
+ if (GET_CODE (PATTERN (insn)) == SET)
+ set = PATTERN (insn);
+ else if (GET_CODE (PATTERN (insn)) == PARALLEL)
+ {
+ for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
+ if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
+ {
+ set = XVECEXP (PATTERN (insn), 0, i);
+ break;
+ }
+ }
+
+ if (set == 0)
+ abort ();
+
+ if (! reg_overlap_mentioned_p (target, SET_DEST (set)))
+ {
+ if (PREV_INSN (insn))
+ NEXT_INSN (PREV_INSN (insn)) = next;
+ else
+ insns = next;
+
+ if (next)
+ PREV_INSN (next) = PREV_INSN (insn);
+
+ add_insn (insn);
+ }
+ }
+
+ prev = get_last_insn ();
+
+ /* Now write the CLOBBER of the output, followed by the setting of each
+ of the words, followed by the final copy. */
+ if (target != op0 && target != op1)
+ emit_insn (gen_rtx (CLOBBER, VOIDmode, target));
+
+ for (insn = insns; insn; insn = next)
+ {
+ next = NEXT_INSN (insn);
+ add_insn (insn);
+
+ if (op1 && GET_CODE (op1) == REG)
+ REG_NOTES (insn) = gen_rtx (EXPR_LIST, REG_NO_CONFLICT, op1,
+ REG_NOTES (insn));
+
+ if (op0 && GET_CODE (op0) == REG)
+ REG_NOTES (insn) = gen_rtx (EXPR_LIST, REG_NO_CONFLICT, op0,
+ REG_NOTES (insn));
+ }
+
+ if (mov_optab->handlers[(int) GET_MODE (target)].insn_code
+ != CODE_FOR_nothing)
+ {
+ last = emit_move_insn (target, target);
+ if (equiv)
+ REG_NOTES (last)
+ = gen_rtx (EXPR_LIST, REG_EQUAL, equiv, REG_NOTES (last));
+ }
+ else
+ last = get_last_insn ();
+
+ if (prev == 0)
+ first = get_insns ();
+ else
+ first = NEXT_INSN (prev);
+
+ /* Encapsulate the block so it gets manipulated as a unit. */
+ REG_NOTES (first) = gen_rtx (INSN_LIST, REG_LIBCALL, last,
+ REG_NOTES (first));
+ REG_NOTES (last) = gen_rtx (INSN_LIST, REG_RETVAL, first, REG_NOTES (last));
+
+ return last;
+}
+
+/* Emit code to make a call to a constant function or a library call.
+
+ INSNS is a list containing all insns emitted in the call.
+ These insns leave the result in RESULT. Our block is to copy RESULT
+ to TARGET, which is logically equivalent to EQUIV.
+
+ We first emit any insns that set a pseudo on the assumption that these are
+ loading constants into registers; doing so allows them to be safely cse'ed
+ between blocks. Then we emit all the other insns in the block, followed by
+ an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
+ note with an operand of EQUIV.
+
+ Moving assignments to pseudos outside of the block is done to improve
+ the generated code, but is not required to generate correct code,
+ hence being unable to move an assignment is not grounds for not making
+ a libcall block. There are two reasons why it is safe to leave these
+ insns inside the block: First, we know that these pseudos cannot be
+ used in generated RTL outside the block since they are created for
+ temporary purposes within the block. Second, CSE will not record the
+ values of anything set inside a libcall block, so we know they must
+ be dead at the end of the block.
+
+ Except for the first group of insns (the ones setting pseudos), the
+ block is delimited by REG_RETVAL and REG_LIBCALL notes. */
+
+void
+emit_libcall_block (insns, target, result, equiv)
+ rtx insns;
+ rtx target;
+ rtx result;
+ rtx equiv;
+{
+ rtx prev, next, first, last, insn;
+
+ /* First emit all insns that set pseudos. Remove them from the list as
+ we go. Avoid insns that set pseudos which were referenced in previous
+ insns. These can be generated by move_by_pieces, for example,
+ to update an address. Similarly, avoid insns that reference things
+ set in previous insns. */
+
+ for (insn = insns; insn; insn = next)
+ {
+ rtx set = single_set (insn);
+
+ next = NEXT_INSN (insn);
+
+ if (set != 0 && GET_CODE (SET_DEST (set)) == REG
+ && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER
+ && (insn == insns
+ || (! reg_mentioned_p (SET_DEST (set), PATTERN (insns))
+ && ! reg_used_between_p (SET_DEST (set), insns, insn)
+ && ! modified_in_p (SET_SRC (set), insns)
+ && ! modified_between_p (SET_SRC (set), insns, insn))))
+ {
+ if (PREV_INSN (insn))
+ NEXT_INSN (PREV_INSN (insn)) = next;
+ else
+ insns = next;
+
+ if (next)
+ PREV_INSN (next) = PREV_INSN (insn);
+
+ add_insn (insn);
+ }
+ }
+
+ prev = get_last_insn ();
+
+ /* Write the remaining insns followed by the final copy. */
+
+ for (insn = insns; insn; insn = next)
+ {
+ next = NEXT_INSN (insn);
+
+ add_insn (insn);
+ }
+
+ last = emit_move_insn (target, result);
+ REG_NOTES (last) = gen_rtx (EXPR_LIST,
+ REG_EQUAL, copy_rtx (equiv), REG_NOTES (last));
+
+ if (prev == 0)
+ first = get_insns ();
+ else
+ first = NEXT_INSN (prev);
+
+ /* Encapsulate the block so it gets manipulated as a unit. */
+ REG_NOTES (first) = gen_rtx (INSN_LIST, REG_LIBCALL, last,
+ REG_NOTES (first));
+ REG_NOTES (last) = gen_rtx (INSN_LIST, REG_RETVAL, first, REG_NOTES (last));
+}
+
+/* Generate code to store zero in X. */
+
+void
+emit_clr_insn (x)
+ rtx x;
+{
+ emit_move_insn (x, const0_rtx);
+}
+
+/* Generate code to store 1 in X
+ assuming it contains zero beforehand. */
+
+void
+emit_0_to_1_insn (x)
+ rtx x;
+{
+ emit_move_insn (x, const1_rtx);
+}
+
+/* Generate code to compare X with Y
+ so that the condition codes are set.
+
+ MODE is the mode of the inputs (in case they are const_int).
+ UNSIGNEDP nonzero says that X and Y are unsigned;
+ this matters if they need to be widened.
+
+ If they have mode BLKmode, then SIZE specifies the size of both X and Y,
+ and ALIGN specifies the known shared alignment of X and Y.
+
+ COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.).
+ It is ignored for fixed-point and block comparisons;
+ it is used only for floating-point comparisons. */
+
+void
+emit_cmp_insn (x, y, comparison, size, mode, unsignedp, align)
+ rtx x, y;
+ enum rtx_code comparison;
+ rtx size;
+ enum machine_mode mode;
+ int unsignedp;
+ int align;
+{
+ enum mode_class class;
+ enum machine_mode wider_mode;
+
+ class = GET_MODE_CLASS (mode);
+
+ /* They could both be VOIDmode if both args are immediate constants,
+ but we should fold that at an earlier stage.
+ With no special code here, this will call abort,
+ reminding the programmer to implement such folding. */
+
+ if (mode != BLKmode && flag_force_mem)
+ {
+ x = force_not_mem (x);
+ y = force_not_mem (y);
+ }
+
+ /* If we are inside an appropriately-short loop and one operand is an
+ expensive constant, force it into a register. */
+ if (CONSTANT_P (x) && preserve_subexpressions_p () && rtx_cost (x, COMPARE) > 2)
+ x = force_reg (mode, x);
+
+ if (CONSTANT_P (y) && preserve_subexpressions_p () && rtx_cost (y, COMPARE) > 2)
+ y = force_reg (mode, y);
+
+ /* Don't let both operands fail to indicate the mode. */
+ if (GET_MODE (x) == VOIDmode && GET_MODE (y) == VOIDmode)
+ x = force_reg (mode, x);
+
+ /* Handle all BLKmode compares. */
+
+ if (mode == BLKmode)
+ {
+ emit_queue ();
+ x = protect_from_queue (x, 0);
+ y = protect_from_queue (y, 0);
+
+ if (size == 0)
+ abort ();
+#ifdef HAVE_cmpstrqi
+ if (HAVE_cmpstrqi
+ && GET_CODE (size) == CONST_INT
+ && INTVAL (size) < (1 << GET_MODE_BITSIZE (QImode)))
+ {
+ enum machine_mode result_mode
+ = insn_operand_mode[(int) CODE_FOR_cmpstrqi][0];
+ rtx result = gen_reg_rtx (result_mode);
+ emit_insn (gen_cmpstrqi (result, x, y, size, GEN_INT (align)));
+ emit_cmp_insn (result, const0_rtx, comparison, NULL_RTX,
+ result_mode, 0, 0);
+ }
+ else
+#endif
+#ifdef HAVE_cmpstrhi
+ if (HAVE_cmpstrhi
+ && GET_CODE (size) == CONST_INT
+ && INTVAL (size) < (1 << GET_MODE_BITSIZE (HImode)))
+ {
+ enum machine_mode result_mode
+ = insn_operand_mode[(int) CODE_FOR_cmpstrhi][0];
+ rtx result = gen_reg_rtx (result_mode);
+ emit_insn (gen_cmpstrhi (result, x, y, size, GEN_INT (align)));
+ emit_cmp_insn (result, const0_rtx, comparison, NULL_RTX,
+ result_mode, 0, 0);
+ }
+ else
+#endif
+#ifdef HAVE_cmpstrsi
+ if (HAVE_cmpstrsi)
+ {
+ enum machine_mode result_mode
+ = insn_operand_mode[(int) CODE_FOR_cmpstrsi][0];
+ rtx result = gen_reg_rtx (result_mode);
+ size = protect_from_queue (size, 0);
+ emit_insn (gen_cmpstrsi (result, x, y,
+ convert_to_mode (SImode, size, 1),
+ GEN_INT (align)));
+ emit_cmp_insn (result, const0_rtx, comparison, NULL_RTX,
+ result_mode, 0, 0);
+ }
+ else
+#endif
+ {
+#ifdef TARGET_MEM_FUNCTIONS
+ emit_library_call (memcmp_libfunc, 0,
+ TYPE_MODE (integer_type_node), 3,
+ XEXP (x, 0), Pmode, XEXP (y, 0), Pmode,
+ size, Pmode);
+#else
+ emit_library_call (bcmp_libfunc, 0,
+ TYPE_MODE (integer_type_node), 3,
+ XEXP (x, 0), Pmode, XEXP (y, 0), Pmode,
+ size, Pmode);
+#endif
+ emit_cmp_insn (hard_libcall_value (TYPE_MODE (integer_type_node)),
+ const0_rtx, comparison, NULL_RTX,
+ TYPE_MODE (integer_type_node), 0, 0);
+ }
+ return;
+ }
+
+ /* Handle some compares against zero. */
+
+ if (y == CONST0_RTX (mode)
+ && tst_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
+ {
+ int icode = (int) tst_optab->handlers[(int) mode].insn_code;
+
+ emit_queue ();
+ x = protect_from_queue (x, 0);
+ y = protect_from_queue (y, 0);
+
+ /* Now, if insn does accept these operands, put them into pseudos. */
+ if (! (*insn_operand_predicate[icode][0])
+ (x, insn_operand_mode[icode][0]))
+ x = copy_to_mode_reg (insn_operand_mode[icode][0], x);
+
+ emit_insn (GEN_FCN (icode) (x));
+ return;
+ }
+
+ /* Handle compares for which there is a directly suitable insn. */
+
+ if (cmp_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
+ {
+ int icode = (int) cmp_optab->handlers[(int) mode].insn_code;
+
+ emit_queue ();
+ x = protect_from_queue (x, 0);
+ y = protect_from_queue (y, 0);
+
+ /* Now, if insn doesn't accept these operands, put them into pseudos. */
+ if (! (*insn_operand_predicate[icode][0])
+ (x, insn_operand_mode[icode][0]))
+ x = copy_to_mode_reg (insn_operand_mode[icode][0], x);
+
+ if (! (*insn_operand_predicate[icode][1])
+ (y, insn_operand_mode[icode][1]))
+ y = copy_to_mode_reg (insn_operand_mode[icode][1], y);
+
+ emit_insn (GEN_FCN (icode) (x, y));
+ return;
+ }
+
+ /* Try widening if we can find a direct insn that way. */
+
+ if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
+ {
+ for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
+ wider_mode = GET_MODE_WIDER_MODE (wider_mode))
+ {
+ if (cmp_optab->handlers[(int) wider_mode].insn_code
+ != CODE_FOR_nothing)
+ {
+ x = protect_from_queue (x, 0);
+ y = protect_from_queue (y, 0);
+ x = convert_modes (wider_mode, mode, x, unsignedp);
+ y = convert_modes (wider_mode, mode, y, unsignedp);
+ emit_cmp_insn (x, y, comparison, NULL_RTX,
+ wider_mode, unsignedp, align);
+ return;
+ }
+ }
+ }
+
+ /* Handle a lib call just for the mode we are using. */
+
+ if (cmp_optab->handlers[(int) mode].libfunc
+ && class != MODE_FLOAT)
+ {
+ rtx libfunc = cmp_optab->handlers[(int) mode].libfunc;
+ /* If we want unsigned, and this mode has a distinct unsigned
+ comparison routine, use that. */
+ if (unsignedp && ucmp_optab->handlers[(int) mode].libfunc)
+ libfunc = ucmp_optab->handlers[(int) mode].libfunc;
+
+ emit_library_call (libfunc, 1,
+ word_mode, 2, x, mode, y, mode);
+
+ /* Integer comparison returns a result that must be compared against 1,
+ so that even if we do an unsigned compare afterward,
+ there is still a value that can represent the result "less than". */
+
+ emit_cmp_insn (hard_libcall_value (word_mode), const1_rtx,
+ comparison, NULL_RTX, word_mode, unsignedp, 0);
+ return;
+ }
+
+ if (class == MODE_FLOAT)
+ emit_float_lib_cmp (x, y, comparison);
+
+ else
+ abort ();
+}
+
+/* Nonzero if a compare of mode MODE can be done straightforwardly
+ (without splitting it into pieces). */
+
+int
+can_compare_p (mode)
+ enum machine_mode mode;
+{
+ do
+ {
+ if (cmp_optab->handlers[(int)mode].insn_code != CODE_FOR_nothing)
+ return 1;
+ mode = GET_MODE_WIDER_MODE (mode);
+ } while (mode != VOIDmode);
+
+ return 0;
+}
+
+/* Emit a library call comparison between floating point X and Y.
+ COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
+
+void
+emit_float_lib_cmp (x, y, comparison)
+ rtx x, y;
+ enum rtx_code comparison;
+{
+ enum machine_mode mode = GET_MODE (x);
+ rtx libfunc = 0;
+
+ if (mode == SFmode)
+ switch (comparison)
+ {
+ case EQ:
+ libfunc = eqsf2_libfunc;
+ break;
+
+ case NE:
+ libfunc = nesf2_libfunc;
+ break;
+
+ case GT:
+ libfunc = gtsf2_libfunc;
+ break;
+
+ case GE:
+ libfunc = gesf2_libfunc;
+ break;
+
+ case LT:
+ libfunc = ltsf2_libfunc;
+ break;
+
+ case LE:
+ libfunc = lesf2_libfunc;
+ break;
+ }
+ else if (mode == DFmode)
+ switch (comparison)
+ {
+ case EQ:
+ libfunc = eqdf2_libfunc;
+ break;
+
+ case NE:
+ libfunc = nedf2_libfunc;
+ break;
+
+ case GT:
+ libfunc = gtdf2_libfunc;
+ break;
+
+ case GE:
+ libfunc = gedf2_libfunc;
+ break;
+
+ case LT:
+ libfunc = ltdf2_libfunc;
+ break;
+
+ case LE:
+ libfunc = ledf2_libfunc;
+ break;
+ }
+ else if (mode == XFmode)
+ switch (comparison)
+ {
+ case EQ:
+ libfunc = eqxf2_libfunc;
+ break;
+
+ case NE:
+ libfunc = nexf2_libfunc;
+ break;
+
+ case GT:
+ libfunc = gtxf2_libfunc;
+ break;
+
+ case GE:
+ libfunc = gexf2_libfunc;
+ break;
+
+ case LT:
+ libfunc = ltxf2_libfunc;
+ break;
+
+ case LE:
+ libfunc = lexf2_libfunc;
+ break;
+ }
+ else if (mode == TFmode)
+ switch (comparison)
+ {
+ case EQ:
+ libfunc = eqtf2_libfunc;
+ break;
+
+ case NE:
+ libfunc = netf2_libfunc;
+ break;
+
+ case GT:
+ libfunc = gttf2_libfunc;
+ break;
+
+ case GE:
+ libfunc = getf2_libfunc;
+ break;
+
+ case LT:
+ libfunc = lttf2_libfunc;
+ break;
+
+ case LE:
+ libfunc = letf2_libfunc;
+ break;
+ }
+ else
+ {
+ enum machine_mode wider_mode;
+
+ for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
+ wider_mode = GET_MODE_WIDER_MODE (wider_mode))
+ {
+ if ((cmp_optab->handlers[(int) wider_mode].insn_code
+ != CODE_FOR_nothing)
+ || (cmp_optab->handlers[(int) wider_mode].libfunc != 0))
+ {
+ x = protect_from_queue (x, 0);
+ y = protect_from_queue (y, 0);
+ x = convert_to_mode (wider_mode, x, 0);
+ y = convert_to_mode (wider_mode, y, 0);
+ emit_float_lib_cmp (x, y, comparison);
+ return;
+ }
+ }
+ abort ();
+ }
+
+ if (libfunc == 0)
+ abort ();
+
+ emit_library_call (libfunc, 1,
+ word_mode, 2, x, mode, y, mode);
+
+ emit_cmp_insn (hard_libcall_value (word_mode), const0_rtx, comparison,
+ NULL_RTX, word_mode, 0, 0);
+}
+
+/* Generate code to indirectly jump to a location given in the rtx LOC. */
+
+void
+emit_indirect_jump (loc)
+ rtx loc;
+{
+ if (! ((*insn_operand_predicate[(int)CODE_FOR_indirect_jump][0])
+ (loc, Pmode)))
+ loc = copy_to_mode_reg (Pmode, loc);
+
+ emit_jump_insn (gen_indirect_jump (loc));
+ emit_barrier ();
+}
+
+/* These three functions generate an insn body and return it
+ rather than emitting the insn.
+
+ They do not protect from queued increments,
+ because they may be used 1) in protect_from_queue itself
+ and 2) in other passes where there is no queue. */
+
+/* Generate and return an insn body to add Y to X. */
+
+rtx
+gen_add2_insn (x, y)
+ rtx x, y;
+{
+ int icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code;
+
+ if (! (*insn_operand_predicate[icode][0]) (x, insn_operand_mode[icode][0])
+ || ! (*insn_operand_predicate[icode][1]) (x, insn_operand_mode[icode][1])
+ || ! (*insn_operand_predicate[icode][2]) (y, insn_operand_mode[icode][2]))
+ abort ();
+
+ return (GEN_FCN (icode) (x, x, y));
+}
+
+int
+have_add2_insn (mode)
+ enum machine_mode mode;
+{
+ return add_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing;
+}
+
+/* Generate and return an insn body to subtract Y from X. */
+
+rtx
+gen_sub2_insn (x, y)
+ rtx x, y;
+{
+ int icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code;
+
+ if (! (*insn_operand_predicate[icode][0]) (x, insn_operand_mode[icode][0])
+ || ! (*insn_operand_predicate[icode][1]) (x, insn_operand_mode[icode][1])
+ || ! (*insn_operand_predicate[icode][2]) (y, insn_operand_mode[icode][2]))
+ abort ();
+
+ return (GEN_FCN (icode) (x, x, y));
+}
+
+int
+have_sub2_insn (mode)
+ enum machine_mode mode;
+{
+ return sub_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing;
+}
+
+/* Generate the body of an instruction to copy Y into X.
+ It may be a SEQUENCE, if one insn isn't enough. */
+
+rtx
+gen_move_insn (x, y)
+ rtx x, y;
+{
+ register enum machine_mode mode = GET_MODE (x);
+ enum insn_code insn_code;
+ rtx seq;
+
+ if (mode == VOIDmode)
+ mode = GET_MODE (y);
+
+ insn_code = mov_optab->handlers[(int) mode].insn_code;
+
+ /* Handle MODE_CC modes: If we don't have a special move insn for this mode,
+ find a mode to do it in. If we have a movcc, use it. Otherwise,
+ find the MODE_INT mode of the same width. */
+
+ if (GET_MODE_CLASS (mode) == MODE_CC && insn_code == CODE_FOR_nothing)
+ {
+ enum machine_mode tmode = VOIDmode;
+ rtx x1 = x, y1 = y;
+
+ if (mode != CCmode
+ && mov_optab->handlers[(int) CCmode].insn_code != CODE_FOR_nothing)
+ tmode = CCmode;
+ else
+ for (tmode = QImode; tmode != VOIDmode;
+ tmode = GET_MODE_WIDER_MODE (tmode))
+ if (GET_MODE_SIZE (tmode) == GET_MODE_SIZE (mode))
+ break;
+
+ if (tmode == VOIDmode)
+ abort ();
+
+ /* Get X and Y in TMODE. We can't use gen_lowpart here because it
+ may call change_address which is not appropriate if we were
+ called when a reload was in progress. We don't have to worry
+ about changing the address since the size in bytes is supposed to
+ be the same. Copy the MEM to change the mode and move any
+ substitutions from the old MEM to the new one. */
+
+ if (reload_in_progress)
+ {
+ x = gen_lowpart_common (tmode, x1);
+ if (x == 0 && GET_CODE (x1) == MEM)
+ {
+ x = gen_rtx (MEM, tmode, XEXP (x1, 0));
+ RTX_UNCHANGING_P (x) = RTX_UNCHANGING_P (x1);
+ MEM_IN_STRUCT_P (x) = MEM_IN_STRUCT_P (x1);
+ MEM_VOLATILE_P (x) = MEM_VOLATILE_P (x1);
+ copy_replacements (x1, x);
+ }
+
+ y = gen_lowpart_common (tmode, y1);
+ if (y == 0 && GET_CODE (y1) == MEM)
+ {
+ y = gen_rtx (MEM, tmode, XEXP (y1, 0));
+ RTX_UNCHANGING_P (y) = RTX_UNCHANGING_P (y1);
+ MEM_IN_STRUCT_P (y) = MEM_IN_STRUCT_P (y1);
+ MEM_VOLATILE_P (y) = MEM_VOLATILE_P (y1);
+ copy_replacements (y1, y);
+ }
+ }
+ else
+ {
+ x = gen_lowpart (tmode, x);
+ y = gen_lowpart (tmode, y);
+ }
+
+ insn_code = mov_optab->handlers[(int) tmode].insn_code;
+ return (GEN_FCN (insn_code) (x, y));
+ }
+
+ start_sequence ();
+ emit_move_insn_1 (x, y);
+ seq = gen_sequence ();
+ end_sequence ();
+ return seq;
+}
+
+/* Return the insn code used to extend FROM_MODE to TO_MODE.
+ UNSIGNEDP specifies zero-extension instead of sign-extension. If
+ no such operation exists, CODE_FOR_nothing will be returned. */
+
+enum insn_code
+can_extend_p (to_mode, from_mode, unsignedp)
+ enum machine_mode to_mode, from_mode;
+ int unsignedp;
+{
+ return extendtab[(int) to_mode][(int) from_mode][unsignedp];
+}
+
+/* Generate the body of an insn to extend Y (with mode MFROM)
+ into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
+
+rtx
+gen_extend_insn (x, y, mto, mfrom, unsignedp)
+ rtx x, y;
+ enum machine_mode mto, mfrom;
+ int unsignedp;
+{
+ return (GEN_FCN (extendtab[(int) mto][(int) mfrom][unsignedp]) (x, y));
+}
+
+/* can_fix_p and can_float_p say whether the target machine
+ can directly convert a given fixed point type to
+ a given floating point type, or vice versa.
+ The returned value is the CODE_FOR_... value to use,
+ or CODE_FOR_nothing if these modes cannot be directly converted.
+
+ *TRUNCP_PTR is set to 1 if it is necessary to output
+ an explicit FTRUNC insn before the fix insn; otherwise 0. */
+
+static enum insn_code
+can_fix_p (fixmode, fltmode, unsignedp, truncp_ptr)
+ enum machine_mode fltmode, fixmode;
+ int unsignedp;
+ int *truncp_ptr;
+{
+ *truncp_ptr = 0;
+ if (fixtrunctab[(int) fltmode][(int) fixmode][unsignedp] != CODE_FOR_nothing)
+ return fixtrunctab[(int) fltmode][(int) fixmode][unsignedp];
+
+ if (ftrunc_optab->handlers[(int) fltmode].insn_code != CODE_FOR_nothing)
+ {
+ *truncp_ptr = 1;
+ return fixtab[(int) fltmode][(int) fixmode][unsignedp];
+ }
+ return CODE_FOR_nothing;
+}
+
+static enum insn_code
+can_float_p (fltmode, fixmode, unsignedp)
+ enum machine_mode fixmode, fltmode;
+ int unsignedp;
+{
+ return floattab[(int) fltmode][(int) fixmode][unsignedp];
+}
+
+/* Generate code to convert FROM to floating point
+ and store in TO. FROM must be fixed point and not VOIDmode.
+ UNSIGNEDP nonzero means regard FROM as unsigned.
+ Normally this is done by correcting the final value
+ if it is negative. */
+
+void
+expand_float (to, from, unsignedp)
+ rtx to, from;
+ int unsignedp;
+{
+ enum insn_code icode;
+ register rtx target = to;
+ enum machine_mode fmode, imode;
+
+ /* Crash now, because we won't be able to decide which mode to use. */
+ if (GET_MODE (from) == VOIDmode)
+ abort ();
+
+ /* Look for an insn to do the conversion. Do it in the specified
+ modes if possible; otherwise convert either input, output or both to
+ wider mode. If the integer mode is wider than the mode of FROM,
+ we can do the conversion signed even if the input is unsigned. */
+
+ for (imode = GET_MODE (from); imode != VOIDmode;
+ imode = GET_MODE_WIDER_MODE (imode))
+ for (fmode = GET_MODE (to); fmode != VOIDmode;
+ fmode = GET_MODE_WIDER_MODE (fmode))
+ {
+ int doing_unsigned = unsignedp;
+
+ icode = can_float_p (fmode, imode, unsignedp);
+ if (icode == CODE_FOR_nothing && imode != GET_MODE (from) && unsignedp)
+ icode = can_float_p (fmode, imode, 0), doing_unsigned = 0;
+
+ if (icode != CODE_FOR_nothing)
+ {
+ to = protect_from_queue (to, 1);
+ from = protect_from_queue (from, 0);
+
+ if (imode != GET_MODE (from))
+ from = convert_to_mode (imode, from, unsignedp);
+
+ if (fmode != GET_MODE (to))
+ target = gen_reg_rtx (fmode);
+
+ emit_unop_insn (icode, target, from,
+ doing_unsigned ? UNSIGNED_FLOAT : FLOAT);
+
+ if (target != to)
+ convert_move (to, target, 0);
+ return;
+ }
+ }
+
+#if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
+
+ /* Unsigned integer, and no way to convert directly.
+ Convert as signed, then conditionally adjust the result. */
+ if (unsignedp)
+ {
+ rtx label = gen_label_rtx ();
+ rtx temp;
+ REAL_VALUE_TYPE offset;
+
+ emit_queue ();
+
+ to = protect_from_queue (to, 1);
+ from = protect_from_queue (from, 0);
+
+ if (flag_force_mem)
+ from = force_not_mem (from);
+
+ /* Look for a usable floating mode FMODE wider than the source and at
+ least as wide as the target. Using FMODE will avoid rounding woes
+ with unsigned values greater than the signed maximum value. */
+
+ for (fmode = GET_MODE (to); fmode != VOIDmode;
+ fmode = GET_MODE_WIDER_MODE (fmode))
+ if (GET_MODE_BITSIZE (GET_MODE (from)) < GET_MODE_BITSIZE (fmode)
+ && can_float_p (fmode, GET_MODE (from), 0) != CODE_FOR_nothing)
+ break;
+
+ if (fmode == VOIDmode)
+ {
+ /* There is no such mode. Pretend the target is wide enough. */
+ fmode = GET_MODE (to);
+
+ /* Avoid double-rounding when TO is narrower than FROM. */
+ if ((significand_size (fmode) + 1)
+ < GET_MODE_BITSIZE (GET_MODE (from)))
+ {
+ rtx temp1;
+ rtx neglabel = gen_label_rtx ();
+
+ /* Don't use TARGET if it isn't a register, is a hard register,
+ or is the wrong mode. */
+ if (GET_CODE (target) != REG
+ || REGNO (target) < FIRST_PSEUDO_REGISTER
+ || GET_MODE (target) != fmode)
+ target = gen_reg_rtx (fmode);
+
+ imode = GET_MODE (from);
+ do_pending_stack_adjust ();
+
+ /* Test whether the sign bit is set. */
+ emit_cmp_insn (from, const0_rtx, GE, NULL_RTX, imode, 0, 0);
+ emit_jump_insn (gen_blt (neglabel));
+
+ /* The sign bit is not set. Convert as signed. */
+ expand_float (target, from, 0);
+ emit_jump_insn (gen_jump (label));
+
+ /* The sign bit is set.
+ Convert to a usable (positive signed) value by shifting right
+ one bit, while remembering if a nonzero bit was shifted
+ out; i.e., compute (from & 1) | (from >> 1). */
+
+ emit_label (neglabel);
+ temp = expand_binop (imode, and_optab, from, const1_rtx,
+ NULL_RTX, 1, OPTAB_LIB_WIDEN);
+ temp1 = expand_shift (RSHIFT_EXPR, imode, from, integer_one_node,
+ NULL_RTX, 1);
+ temp = expand_binop (imode, ior_optab, temp, temp1, temp, 1,
+ OPTAB_LIB_WIDEN);
+ expand_float (target, temp, 0);
+
+ /* Multiply by 2 to undo the shift above. */
+ target = expand_binop (fmode, add_optab, target, target,
+ target, 0, OPTAB_LIB_WIDEN);
+ do_pending_stack_adjust ();
+ emit_label (label);
+ goto done;
+ }
+ }
+
+ /* If we are about to do some arithmetic to correct for an
+ unsigned operand, do it in a pseudo-register. */
+
+ if (GET_MODE (to) != fmode
+ || GET_CODE (to) != REG || REGNO (to) < FIRST_PSEUDO_REGISTER)
+ target = gen_reg_rtx (fmode);
+
+ /* Convert as signed integer to floating. */
+ expand_float (target, from, 0);
+
+ /* If FROM is negative (and therefore TO is negative),
+ correct its value by 2**bitwidth. */
+
+ do_pending_stack_adjust ();
+ emit_cmp_insn (from, const0_rtx, GE, NULL_RTX, GET_MODE (from), 0, 0);
+ emit_jump_insn (gen_bge (label));
+
+ /* On SCO 3.2.1, ldexp rejects values outside [0.5, 1).
+ Rather than setting up a dconst_dot_5, let's hope SCO
+ fixes the bug. */
+ offset = REAL_VALUE_LDEXP (dconst1, GET_MODE_BITSIZE (GET_MODE (from)));
+ temp = expand_binop (fmode, add_optab, target,
+ CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode),
+ target, 0, OPTAB_LIB_WIDEN);
+ if (temp != target)
+ emit_move_insn (target, temp);
+
+ do_pending_stack_adjust ();
+ emit_label (label);
+ goto done;
+ }
+#endif
+
+ /* No hardware instruction available; call a library rotine to convert from
+ SImode, DImode, or TImode into SFmode, DFmode, XFmode, or TFmode. */
+ {
+ rtx libfcn;
+ rtx insns;
+ rtx value;
+
+ to = protect_from_queue (to, 1);
+ from = protect_from_queue (from, 0);
+
+ if (GET_MODE_SIZE (GET_MODE (from)) < GET_MODE_SIZE (SImode))
+ from = convert_to_mode (SImode, from, unsignedp);
+
+ if (flag_force_mem)
+ from = force_not_mem (from);
+
+ if (GET_MODE (to) == SFmode)
+ {
+ if (GET_MODE (from) == SImode)
+ libfcn = floatsisf_libfunc;
+ else if (GET_MODE (from) == DImode)
+ libfcn = floatdisf_libfunc;
+ else if (GET_MODE (from) == TImode)
+ libfcn = floattisf_libfunc;
+ else
+ abort ();
+ }
+ else if (GET_MODE (to) == DFmode)
+ {
+ if (GET_MODE (from) == SImode)
+ libfcn = floatsidf_libfunc;
+ else if (GET_MODE (from) == DImode)
+ libfcn = floatdidf_libfunc;
+ else if (GET_MODE (from) == TImode)
+ libfcn = floattidf_libfunc;
+ else
+ abort ();
+ }
+ else if (GET_MODE (to) == XFmode)
+ {
+ if (GET_MODE (from) == SImode)
+ libfcn = floatsixf_libfunc;
+ else if (GET_MODE (from) == DImode)
+ libfcn = floatdixf_libfunc;
+ else if (GET_MODE (from) == TImode)
+ libfcn = floattixf_libfunc;
+ else
+ abort ();
+ }
+ else if (GET_MODE (to) == TFmode)
+ {
+ if (GET_MODE (from) == SImode)
+ libfcn = floatsitf_libfunc;
+ else if (GET_MODE (from) == DImode)
+ libfcn = floatditf_libfunc;
+ else if (GET_MODE (from) == TImode)
+ libfcn = floattitf_libfunc;
+ else
+ abort ();
+ }
+ else
+ abort ();
+
+ start_sequence ();
+
+ value = emit_library_call_value (libfcn, NULL_RTX, 1,
+ GET_MODE (to),
+ 1, from, GET_MODE (from));
+ insns = get_insns ();
+ end_sequence ();
+
+ emit_libcall_block (insns, target, value,
+ gen_rtx (FLOAT, GET_MODE (to), from));
+ }
+
+ done:
+
+ /* Copy result to requested destination
+ if we have been computing in a temp location. */
+
+ if (target != to)
+ {
+ if (GET_MODE (target) == GET_MODE (to))
+ emit_move_insn (to, target);
+ else
+ convert_move (to, target, 0);
+ }
+}
+
+/* expand_fix: generate code to convert FROM to fixed point
+ and store in TO. FROM must be floating point. */
+
+static rtx
+ftruncify (x)
+ rtx x;
+{
+ rtx temp = gen_reg_rtx (GET_MODE (x));
+ return expand_unop (GET_MODE (x), ftrunc_optab, x, temp, 0);
+}
+
+void
+expand_fix (to, from, unsignedp)
+ register rtx to, from;
+ int unsignedp;
+{
+ enum insn_code icode;
+ register rtx target = to;
+ enum machine_mode fmode, imode;
+ int must_trunc = 0;
+ rtx libfcn = 0;
+
+ /* We first try to find a pair of modes, one real and one integer, at
+ least as wide as FROM and TO, respectively, in which we can open-code
+ this conversion. If the integer mode is wider than the mode of TO,
+ we can do the conversion either signed or unsigned. */
+
+ for (imode = GET_MODE (to); imode != VOIDmode;
+ imode = GET_MODE_WIDER_MODE (imode))
+ for (fmode = GET_MODE (from); fmode != VOIDmode;
+ fmode = GET_MODE_WIDER_MODE (fmode))
+ {
+ int doing_unsigned = unsignedp;
+
+ icode = can_fix_p (imode, fmode, unsignedp, &must_trunc);
+ if (icode == CODE_FOR_nothing && imode != GET_MODE (to) && unsignedp)
+ icode = can_fix_p (imode, fmode, 0, &must_trunc), doing_unsigned = 0;
+
+ if (icode != CODE_FOR_nothing)
+ {
+ to = protect_from_queue (to, 1);
+ from = protect_from_queue (from, 0);
+
+ if (fmode != GET_MODE (from))
+ from = convert_to_mode (fmode, from, 0);
+
+ if (must_trunc)
+ from = ftruncify (from);
+
+ if (imode != GET_MODE (to))
+ target = gen_reg_rtx (imode);
+
+ emit_unop_insn (icode, target, from,
+ doing_unsigned ? UNSIGNED_FIX : FIX);
+ if (target != to)
+ convert_move (to, target, unsignedp);
+ return;
+ }
+ }
+
+#if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
+ /* For an unsigned conversion, there is one more way to do it.
+ If we have a signed conversion, we generate code that compares
+ the real value to the largest representable positive number. If if
+ is smaller, the conversion is done normally. Otherwise, subtract
+ one plus the highest signed number, convert, and add it back.
+
+ We only need to check all real modes, since we know we didn't find
+ anything with a wider integer mode. */
+
+ if (unsignedp && GET_MODE_BITSIZE (GET_MODE (to)) <= HOST_BITS_PER_WIDE_INT)
+ for (fmode = GET_MODE (from); fmode != VOIDmode;
+ fmode = GET_MODE_WIDER_MODE (fmode))
+ /* Make sure we won't lose significant bits doing this. */
+ if (GET_MODE_BITSIZE (fmode) > GET_MODE_BITSIZE (GET_MODE (to))
+ && CODE_FOR_nothing != can_fix_p (GET_MODE (to), fmode, 0,
+ &must_trunc))
+ {
+ int bitsize;
+ REAL_VALUE_TYPE offset;
+ rtx limit, lab1, lab2, insn;
+
+ bitsize = GET_MODE_BITSIZE (GET_MODE (to));
+ offset = REAL_VALUE_LDEXP (dconst1, bitsize - 1);
+ limit = CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode);
+ lab1 = gen_label_rtx ();
+ lab2 = gen_label_rtx ();
+
+ emit_queue ();
+ to = protect_from_queue (to, 1);
+ from = protect_from_queue (from, 0);
+
+ if (flag_force_mem)
+ from = force_not_mem (from);
+
+ if (fmode != GET_MODE (from))
+ from = convert_to_mode (fmode, from, 0);
+
+ /* See if we need to do the subtraction. */
+ do_pending_stack_adjust ();
+ emit_cmp_insn (from, limit, GE, NULL_RTX, GET_MODE (from), 0, 0);
+ emit_jump_insn (gen_bge (lab1));
+
+ /* If not, do the signed "fix" and branch around fixup code. */
+ expand_fix (to, from, 0);
+ emit_jump_insn (gen_jump (lab2));
+ emit_barrier ();
+
+ /* Otherwise, subtract 2**(N-1), convert to signed number,
+ then add 2**(N-1). Do the addition using XOR since this
+ will often generate better code. */
+ emit_label (lab1);
+ target = expand_binop (GET_MODE (from), sub_optab, from, limit,
+ NULL_RTX, 0, OPTAB_LIB_WIDEN);
+ expand_fix (to, target, 0);
+ target = expand_binop (GET_MODE (to), xor_optab, to,
+ GEN_INT ((HOST_WIDE_INT) 1 << (bitsize - 1)),
+ to, 1, OPTAB_LIB_WIDEN);
+
+ if (target != to)
+ emit_move_insn (to, target);
+
+ emit_label (lab2);
+
+ /* Make a place for a REG_NOTE and add it. */
+ insn = emit_move_insn (to, to);
+ REG_NOTES (insn) = gen_rtx (EXPR_LIST, REG_EQUAL,
+ gen_rtx (UNSIGNED_FIX, GET_MODE (to),
+ copy_rtx (from)),
+ REG_NOTES (insn));
+
+ return;
+ }
+#endif
+
+ /* We can't do it with an insn, so use a library call. But first ensure
+ that the mode of TO is at least as wide as SImode, since those are the
+ only library calls we know about. */
+
+ if (GET_MODE_SIZE (GET_MODE (to)) < GET_MODE_SIZE (SImode))
+ {
+ target = gen_reg_rtx (SImode);
+
+ expand_fix (target, from, unsignedp);
+ }
+ else if (GET_MODE (from) == SFmode)
+ {
+ if (GET_MODE (to) == SImode)
+ libfcn = unsignedp ? fixunssfsi_libfunc : fixsfsi_libfunc;
+ else if (GET_MODE (to) == DImode)
+ libfcn = unsignedp ? fixunssfdi_libfunc : fixsfdi_libfunc;
+ else if (GET_MODE (to) == TImode)
+ libfcn = unsignedp ? fixunssfti_libfunc : fixsfti_libfunc;
+ else
+ abort ();
+ }
+ else if (GET_MODE (from) == DFmode)
+ {
+ if (GET_MODE (to) == SImode)
+ libfcn = unsignedp ? fixunsdfsi_libfunc : fixdfsi_libfunc;
+ else if (GET_MODE (to) == DImode)
+ libfcn = unsignedp ? fixunsdfdi_libfunc : fixdfdi_libfunc;
+ else if (GET_MODE (to) == TImode)
+ libfcn = unsignedp ? fixunsdfti_libfunc : fixdfti_libfunc;
+ else
+ abort ();
+ }
+ else if (GET_MODE (from) == XFmode)
+ {
+ if (GET_MODE (to) == SImode)
+ libfcn = unsignedp ? fixunsxfsi_libfunc : fixxfsi_libfunc;
+ else if (GET_MODE (to) == DImode)
+ libfcn = unsignedp ? fixunsxfdi_libfunc : fixxfdi_libfunc;
+ else if (GET_MODE (to) == TImode)
+ libfcn = unsignedp ? fixunsxfti_libfunc : fixxfti_libfunc;
+ else
+ abort ();
+ }
+ else if (GET_MODE (from) == TFmode)
+ {
+ if (GET_MODE (to) == SImode)
+ libfcn = unsignedp ? fixunstfsi_libfunc : fixtfsi_libfunc;
+ else if (GET_MODE (to) == DImode)
+ libfcn = unsignedp ? fixunstfdi_libfunc : fixtfdi_libfunc;
+ else if (GET_MODE (to) == TImode)
+ libfcn = unsignedp ? fixunstfti_libfunc : fixtfti_libfunc;
+ else
+ abort ();
+ }
+ else
+ abort ();
+
+ if (libfcn)
+ {
+ rtx insns;
+
+ to = protect_from_queue (to, 1);
+ from = protect_from_queue (from, 0);
+
+ if (flag_force_mem)
+ from = force_not_mem (from);
+
+ start_sequence ();
+
+ emit_library_call (libfcn, 1, GET_MODE (to), 1, from, GET_MODE (from));
+ insns = get_insns ();
+ end_sequence ();
+
+ emit_libcall_block (insns, target, hard_libcall_value (GET_MODE (to)),
+ gen_rtx (unsignedp ? FIX : UNSIGNED_FIX,
+ GET_MODE (to), from));
+ }
+
+ if (GET_MODE (to) == GET_MODE (target))
+ emit_move_insn (to, target);
+ else
+ convert_move (to, target, 0);
+}
+
+static optab
+init_optab (code)
+ enum rtx_code code;
+{
+ int i;
+ optab op = (optab) xmalloc (sizeof (struct optab));
+ op->code = code;
+ for (i = 0; i < NUM_MACHINE_MODES; i++)
+ {
+ op->handlers[i].insn_code = CODE_FOR_nothing;
+ op->handlers[i].libfunc = 0;
+ }
+
+ if (code != UNKNOWN)
+ code_to_optab[(int) code] = op;
+
+ return op;
+}
+
+/* Initialize the libfunc fields of an entire group of entries in some
+ optab. Each entry is set equal to a string consisting of a leading
+ pair of underscores followed by a generic operation name followed by
+ a mode name (downshifted to lower case) followed by a single character
+ representing the number of operands for the given operation (which is
+ usually one of the characters '2', '3', or '4').
+
+ OPTABLE is the table in which libfunc fields are to be initialized.
+ FIRST_MODE is the first machine mode index in the given optab to
+ initialize.
+ LAST_MODE is the last machine mode index in the given optab to
+ initialize.
+ OPNAME is the generic (string) name of the operation.
+ SUFFIX is the character which specifies the number of operands for
+ the given generic operation.
+*/
+
+static void
+init_libfuncs (optable, first_mode, last_mode, opname, suffix)
+ register optab optable;
+ register int first_mode;
+ register int last_mode;
+ register char *opname;
+ register char suffix;
+{
+ register int mode;
+ register unsigned opname_len = strlen (opname);
+
+ for (mode = first_mode; (int) mode <= (int) last_mode;
+ mode = (enum machine_mode) ((int) mode + 1))
+ {
+ register char *mname = mode_name[(int) mode];
+ register unsigned mname_len = strlen (mname);
+ register char *libfunc_name
+ = (char *) xmalloc (2 + opname_len + mname_len + 1 + 1);
+ register char *p;
+ register char *q;
+
+ p = libfunc_name;
+ *p++ = '_';
+ *p++ = '_';
+ for (q = opname; *q; )
+ *p++ = *q++;
+ for (q = mname; *q; q++)
+ *p++ = tolower (*q);
+ *p++ = suffix;
+ *p++ = '\0';
+ optable->handlers[(int) mode].libfunc
+ = gen_rtx (SYMBOL_REF, Pmode, libfunc_name);
+ }
+}
+
+/* Initialize the libfunc fields of an entire group of entries in some
+ optab which correspond to all integer mode operations. The parameters
+ have the same meaning as similarly named ones for the `init_libfuncs'
+ routine. (See above). */
+
+static void
+init_integral_libfuncs (optable, opname, suffix)
+ register optab optable;
+ register char *opname;
+ register char suffix;
+{
+ init_libfuncs (optable, SImode, TImode, opname, suffix);
+}
+
+/* Initialize the libfunc fields of an entire group of entries in some
+ optab which correspond to all real mode operations. The parameters
+ have the same meaning as similarly named ones for the `init_libfuncs'
+ routine. (See above). */
+
+static void
+init_floating_libfuncs (optable, opname, suffix)
+ register optab optable;
+ register char *opname;
+ register char suffix;
+{
+ init_libfuncs (optable, SFmode, TFmode, opname, suffix);
+}
+
+/* Initialize the libfunc fields of an entire group of entries in some
+ optab which correspond to all complex floating modes. The parameters
+ have the same meaning as similarly named ones for the `init_libfuncs'
+ routine. (See above). */
+
+static void
+init_complex_libfuncs (optable, opname, suffix)
+ register optab optable;
+ register char *opname;
+ register char suffix;
+{
+ init_libfuncs (optable, SCmode, TCmode, opname, suffix);
+}
+
+/* Call this once to initialize the contents of the optabs
+ appropriately for the current target machine. */
+
+void
+init_optabs ()
+{
+ int i, j;
+ enum insn_code *p;
+
+ /* Start by initializing all tables to contain CODE_FOR_nothing. */
+
+ for (p = fixtab[0][0];
+ p < fixtab[0][0] + sizeof fixtab / sizeof (fixtab[0][0][0]);
+ p++)
+ *p = CODE_FOR_nothing;
+
+ for (p = fixtrunctab[0][0];
+ p < fixtrunctab[0][0] + sizeof fixtrunctab / sizeof (fixtrunctab[0][0][0]);
+ p++)
+ *p = CODE_FOR_nothing;
+
+ for (p = floattab[0][0];
+ p < floattab[0][0] + sizeof floattab / sizeof (floattab[0][0][0]);
+ p++)
+ *p = CODE_FOR_nothing;
+
+ for (p = extendtab[0][0];
+ p < extendtab[0][0] + sizeof extendtab / sizeof extendtab[0][0][0];
+ p++)
+ *p = CODE_FOR_nothing;
+
+ for (i = 0; i < NUM_RTX_CODE; i++)
+ setcc_gen_code[i] = CODE_FOR_nothing;
+
+ add_optab = init_optab (PLUS);
+ sub_optab = init_optab (MINUS);
+ smul_optab = init_optab (MULT);
+ smul_highpart_optab = init_optab (UNKNOWN);
+ umul_highpart_optab = init_optab (UNKNOWN);
+ smul_widen_optab = init_optab (UNKNOWN);
+ umul_widen_optab = init_optab (UNKNOWN);
+ sdiv_optab = init_optab (DIV);
+ sdivmod_optab = init_optab (UNKNOWN);
+ udiv_optab = init_optab (UDIV);
+ udivmod_optab = init_optab (UNKNOWN);
+ smod_optab = init_optab (MOD);
+ umod_optab = init_optab (UMOD);
+ flodiv_optab = init_optab (DIV);
+ ftrunc_optab = init_optab (UNKNOWN);
+ and_optab = init_optab (AND);
+ ior_optab = init_optab (IOR);
+ xor_optab = init_optab (XOR);
+ ashl_optab = init_optab (ASHIFT);
+ ashr_optab = init_optab (ASHIFTRT);
+ lshr_optab = init_optab (LSHIFTRT);
+ rotl_optab = init_optab (ROTATE);
+ rotr_optab = init_optab (ROTATERT);
+ smin_optab = init_optab (SMIN);
+ smax_optab = init_optab (SMAX);
+ umin_optab = init_optab (UMIN);
+ umax_optab = init_optab (UMAX);
+ mov_optab = init_optab (UNKNOWN);
+ movstrict_optab = init_optab (UNKNOWN);
+ cmp_optab = init_optab (UNKNOWN);
+ ucmp_optab = init_optab (UNKNOWN);
+ tst_optab = init_optab (UNKNOWN);
+ neg_optab = init_optab (NEG);
+ abs_optab = init_optab (ABS);
+ one_cmpl_optab = init_optab (NOT);
+ ffs_optab = init_optab (FFS);
+ sqrt_optab = init_optab (SQRT);
+ sin_optab = init_optab (UNKNOWN);
+ cos_optab = init_optab (UNKNOWN);
+ strlen_optab = init_optab (UNKNOWN);
+
+ for (i = 0; i < NUM_MACHINE_MODES; i++)
+ {
+ movstr_optab[i] = CODE_FOR_nothing;
+
+#ifdef HAVE_SECONDARY_RELOADS
+ reload_in_optab[i] = reload_out_optab[i] = CODE_FOR_nothing;
+#endif
+ }
+
+ /* Fill in the optabs with the insns we support. */
+ init_all_optabs ();
+
+#ifdef FIXUNS_TRUNC_LIKE_FIX_TRUNC
+ /* This flag says the same insns that convert to a signed fixnum
+ also convert validly to an unsigned one. */
+ for (i = 0; i < NUM_MACHINE_MODES; i++)
+ for (j = 0; j < NUM_MACHINE_MODES; j++)
+ fixtrunctab[i][j][1] = fixtrunctab[i][j][0];
+#endif
+
+#ifdef EXTRA_CC_MODES
+ init_mov_optab ();
+#endif
+
+ /* Initialize the optabs with the names of the library functions. */
+ init_integral_libfuncs (add_optab, "add", '3');
+ init_floating_libfuncs (add_optab, "add", '3');
+ init_integral_libfuncs (sub_optab, "sub", '3');
+ init_floating_libfuncs (sub_optab, "sub", '3');
+ init_integral_libfuncs (smul_optab, "mul", '3');
+ init_floating_libfuncs (smul_optab, "mul", '3');
+ init_integral_libfuncs (sdiv_optab, "div", '3');
+ init_integral_libfuncs (udiv_optab, "udiv", '3');
+ init_integral_libfuncs (sdivmod_optab, "divmod", '4');
+ init_integral_libfuncs (udivmod_optab, "udivmod", '4');
+ init_integral_libfuncs (smod_optab, "mod", '3');
+ init_integral_libfuncs (umod_optab, "umod", '3');
+ init_floating_libfuncs (flodiv_optab, "div", '3');
+ init_floating_libfuncs (ftrunc_optab, "ftrunc", '2');
+ init_integral_libfuncs (and_optab, "and", '3');
+ init_integral_libfuncs (ior_optab, "ior", '3');
+ init_integral_libfuncs (xor_optab, "xor", '3');
+ init_integral_libfuncs (ashl_optab, "ashl", '3');
+ init_integral_libfuncs (ashr_optab, "ashr", '3');
+ init_integral_libfuncs (lshr_optab, "lshr", '3');
+ init_integral_libfuncs (rotl_optab, "rotl", '3');
+ init_integral_libfuncs (rotr_optab, "rotr", '3');
+ init_integral_libfuncs (smin_optab, "min", '3');
+ init_floating_libfuncs (smin_optab, "min", '3');
+ init_integral_libfuncs (smax_optab, "max", '3');
+ init_floating_libfuncs (smax_optab, "max", '3');
+ init_integral_libfuncs (umin_optab, "umin", '3');
+ init_integral_libfuncs (umax_optab, "umax", '3');
+ init_integral_libfuncs (neg_optab, "neg", '2');
+ init_floating_libfuncs (neg_optab, "neg", '2');
+ init_integral_libfuncs (one_cmpl_optab, "one_cmpl", '2');
+ init_integral_libfuncs (ffs_optab, "ffs", '2');
+
+ /* Comparison libcalls for integers MUST come in pairs, signed/unsigned. */
+ init_integral_libfuncs (cmp_optab, "cmp", '2');
+ init_integral_libfuncs (ucmp_optab, "ucmp", '2');
+ init_floating_libfuncs (cmp_optab, "cmp", '2');
+
+#ifdef MULSI3_LIBCALL
+ smul_optab->handlers[(int) SImode].libfunc
+ = gen_rtx (SYMBOL_REF, Pmode, MULSI3_LIBCALL);
+#endif
+#ifdef MULDI3_LIBCALL
+ smul_optab->handlers[(int) DImode].libfunc
+ = gen_rtx (SYMBOL_REF, Pmode, MULDI3_LIBCALL);
+#endif
+#ifdef MULTI3_LIBCALL
+ smul_optab->handlers[(int) TImode].libfunc
+ = gen_rtx (SYMBOL_REF, Pmode, MULTI3_LIBCALL);
+#endif
+
+#ifdef DIVSI3_LIBCALL
+ sdiv_optab->handlers[(int) SImode].libfunc
+ = gen_rtx (SYMBOL_REF, Pmode, DIVSI3_LIBCALL);
+#endif
+#ifdef DIVDI3_LIBCALL
+ sdiv_optab->handlers[(int) DImode].libfunc
+ = gen_rtx (SYMBOL_REF, Pmode, DIVDI3_LIBCALL);
+#endif
+#ifdef DIVTI3_LIBCALL
+ sdiv_optab->handlers[(int) TImode].libfunc
+ = gen_rtx (SYMBOL_REF, Pmode, DIVTI3_LIBCALL);
+#endif
+
+#ifdef UDIVSI3_LIBCALL
+ udiv_optab->handlers[(int) SImode].libfunc
+ = gen_rtx (SYMBOL_REF, Pmode, UDIVSI3_LIBCALL);
+#endif
+#ifdef UDIVDI3_LIBCALL
+ udiv_optab->handlers[(int) DImode].libfunc
+ = gen_rtx (SYMBOL_REF, Pmode, UDIVDI3_LIBCALL);
+#endif
+#ifdef UDIVTI3_LIBCALL
+ udiv_optab->handlers[(int) TImode].libfunc
+ = gen_rtx (SYMBOL_REF, Pmode, UDIVTI3_LIBCALL);
+#endif
+
+
+#ifdef MODSI3_LIBCALL
+ smod_optab->handlers[(int) SImode].libfunc
+ = gen_rtx (SYMBOL_REF, Pmode, MODSI3_LIBCALL);
+#endif
+#ifdef MODDI3_LIBCALL
+ smod_optab->handlers[(int) DImode].libfunc
+ = gen_rtx (SYMBOL_REF, Pmode, MODDI3_LIBCALL);
+#endif
+#ifdef MODTI3_LIBCALL
+ smod_optab->handlers[(int) TImode].libfunc
+ = gen_rtx (SYMBOL_REF, Pmode, MODTI3_LIBCALL);
+#endif
+
+
+#ifdef UMODSI3_LIBCALL
+ umod_optab->handlers[(int) SImode].libfunc
+ = gen_rtx (SYMBOL_REF, Pmode, UMODSI3_LIBCALL);
+#endif
+#ifdef UMODDI3_LIBCALL
+ umod_optab->handlers[(int) DImode].libfunc
+ = gen_rtx (SYMBOL_REF, Pmode, UMODDI3_LIBCALL);
+#endif
+#ifdef UMODTI3_LIBCALL
+ umod_optab->handlers[(int) TImode].libfunc
+ = gen_rtx (SYMBOL_REF, Pmode, UMODTI3_LIBCALL);
+#endif
+
+/* Define library calls for quad FP instructions */
+#ifdef ADDTF3_LIBCALL
+ add_optab->handlers[(int) TFmode].libfunc
+ = gen_rtx (SYMBOL_REF, Pmode, ADDTF3_LIBCALL);
+#endif
+#ifdef SUBTF3_LIBCALL
+ sub_optab->handlers[(int) TFmode].libfunc
+ = gen_rtx (SYMBOL_REF, Pmode, SUBTF3_LIBCALL);
+#endif
+#ifdef MULTF3_LIBCALL
+ smul_optab->handlers[(int) TFmode].libfunc
+ = gen_rtx (SYMBOL_REF, Pmode, MULTF3_LIBCALL);
+#endif
+#ifdef DIVTF3_LIBCALL
+ flodiv_optab->handlers[(int) TFmode].libfunc
+ = gen_rtx (SYMBOL_REF, Pmode, DIVTF3_LIBCALL);
+#endif
+#ifdef SQRTTF2_LIBCALL
+ sqrt_optab->handlers[(int) TFmode].libfunc
+ = gen_rtx (SYMBOL_REF, Pmode, SQRTTF2_LIBCALL);
+#endif
+
+ /* Use cabs for DC complex abs, since systems generally have cabs.
+ Don't define any libcall for SCmode, so that cabs will be used. */
+ abs_optab->handlers[(int) DCmode].libfunc
+ = gen_rtx (SYMBOL_REF, Pmode, "cabs");
+
+ /* The ffs function operates on `int'. */
+#ifndef INT_TYPE_SIZE
+#define INT_TYPE_SIZE BITS_PER_WORD
+#endif
+ ffs_optab->handlers[(int) mode_for_size (INT_TYPE_SIZE, MODE_INT, 0)] .libfunc
+ = gen_rtx (SYMBOL_REF, Pmode, "ffs");
+
+ extendsfdf2_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__extendsfdf2");
+ extendsfxf2_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__extendsfxf2");
+ extendsftf2_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__extendsftf2");
+ extenddfxf2_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__extenddfxf2");
+ extenddftf2_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__extenddftf2");
+
+ truncdfsf2_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__truncdfsf2");
+ truncxfsf2_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__truncxfsf2");
+ trunctfsf2_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__trunctfsf2");
+ truncxfdf2_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__truncxfdf2");
+ trunctfdf2_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__trunctfdf2");
+
+ memcpy_libfunc = gen_rtx (SYMBOL_REF, Pmode, "memcpy");
+ bcopy_libfunc = gen_rtx (SYMBOL_REF, Pmode, "bcopy");
+ memcmp_libfunc = gen_rtx (SYMBOL_REF, Pmode, "memcmp");
+ bcmp_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__gcc_bcmp");
+ memset_libfunc = gen_rtx (SYMBOL_REF, Pmode, "memset");
+ bzero_libfunc = gen_rtx (SYMBOL_REF, Pmode, "bzero");
+
+ eqsf2_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__eqsf2");
+ nesf2_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__nesf2");
+ gtsf2_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__gtsf2");
+ gesf2_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__gesf2");
+ ltsf2_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__ltsf2");
+ lesf2_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__lesf2");
+
+ eqdf2_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__eqdf2");
+ nedf2_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__nedf2");
+ gtdf2_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__gtdf2");
+ gedf2_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__gedf2");
+ ltdf2_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__ltdf2");
+ ledf2_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__ledf2");
+
+ eqxf2_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__eqxf2");
+ nexf2_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__nexf2");
+ gtxf2_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__gtxf2");
+ gexf2_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__gexf2");
+ ltxf2_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__ltxf2");
+ lexf2_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__lexf2");
+
+ eqtf2_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__eqtf2");
+ netf2_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__netf2");
+ gttf2_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__gttf2");
+ getf2_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__getf2");
+ lttf2_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__lttf2");
+ letf2_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__letf2");
+
+/* Define library calls for quad FP instructions */
+#ifdef EQTF2_LIBCALL
+ eqtf2_libfunc = gen_rtx (SYMBOL_REF, Pmode, EQTF2_LIBCALL);
+#endif
+#ifdef NETF2_LIBCALL
+ netf2_libfunc = gen_rtx (SYMBOL_REF, Pmode, NETF2_LIBCALL);
+#endif
+#ifdef GTTF2_LIBCALL
+ gttf2_libfunc = gen_rtx (SYMBOL_REF, Pmode, GTTF2_LIBCALL);
+#endif
+#ifdef GETF2_LIBCALL
+ getf2_libfunc = gen_rtx (SYMBOL_REF, Pmode, GETF2_LIBCALL);
+#endif
+#ifdef LTTF2_LIBCALL
+ lttf2_libfunc = gen_rtx (SYMBOL_REF, Pmode, LTTF2_LIBCALL);
+#endif
+#ifdef LETF2_LIBCALL
+ letf2_libfunc = gen_rtx (SYMBOL_REF, Pmode, LETF2_LIBCALL);
+#endif
+
+ floatsisf_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__floatsisf");
+ floatdisf_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__floatdisf");
+ floattisf_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__floattisf");
+
+ floatsidf_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__floatsidf");
+ floatdidf_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__floatdidf");
+ floattidf_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__floattidf");
+
+ floatsixf_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__floatsixf");
+ floatdixf_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__floatdixf");
+ floattixf_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__floattixf");
+
+ floatsitf_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__floatsitf");
+ floatditf_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__floatditf");
+ floattitf_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__floattitf");
+
+ fixsfsi_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__fixsfsi");
+ fixsfdi_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__fixsfdi");
+ fixsfti_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__fixsfti");
+
+ fixdfsi_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__fixdfsi");
+ fixdfdi_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__fixdfdi");
+ fixdfti_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__fixdfti");
+
+ fixxfsi_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__fixxfsi");
+ fixxfdi_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__fixxfdi");
+ fixxfti_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__fixxfti");
+
+ fixtfsi_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__fixtfsi");
+ fixtfdi_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__fixtfdi");
+ fixtfti_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__fixtfti");
+
+ fixunssfsi_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__fixunssfsi");
+ fixunssfdi_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__fixunssfdi");
+ fixunssfti_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__fixunssfti");
+
+ fixunsdfsi_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__fixunsdfsi");
+ fixunsdfdi_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__fixunsdfdi");
+ fixunsdfti_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__fixunsdfti");
+
+ fixunsxfsi_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__fixunsxfsi");
+ fixunsxfdi_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__fixunsxfdi");
+ fixunsxfti_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__fixunsxfti");
+
+ fixunstfsi_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__fixunstfsi");
+ fixunstfdi_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__fixunstfdi");
+ fixunstfti_libfunc = gen_rtx (SYMBOL_REF, Pmode, "__fixunstfti");
+
+/* Define library calls for quad FP instructions */
+#ifdef TRUNCTFSF2_LIBCALL
+ trunctfsf2_libfunc = gen_rtx (SYMBOL_REF, Pmode, TRUNCTFSF2_LIBCALL);
+#endif
+#ifdef TRUNCTFDF2_LIBCALL
+ trunctfdf2_libfunc = gen_rtx (SYMBOL_REF, Pmode, TRUNCTFDF2_LIBCALL);
+#endif
+#ifdef EXTENDSFTF2_LIBCALL
+ extendsftf2_libfunc = gen_rtx (SYMBOL_REF, Pmode, EXTENDSFTF2_LIBCALL);
+#endif
+#ifdef EXTENDDFTF2_LIBCALL
+ extenddftf2_libfunc = gen_rtx (SYMBOL_REF, Pmode, EXTENDDFTF2_LIBCALL);
+#endif
+#ifdef FLOATSITF2_LIBCALL
+ floatsitf_libfunc = gen_rtx (SYMBOL_REF, Pmode, FLOATSITF2_LIBCALL);
+#endif
+#ifdef FIX_TRUNCTFSI2_LIBCALL
+ fixtfsi_libfunc = gen_rtx (SYMBOL_REF, Pmode, FIX_TRUNCTFSI2_LIBCALL);
+#endif
+#ifdef FIXUNS_TRUNCTFSI2_LIBCALL
+ fixunstfsi_libfunc = gen_rtx (SYMBOL_REF, Pmode, FIXUNS_TRUNCTFSI2_LIBCALL);
+#endif
+
+#ifdef INIT_TARGET_OPTABS
+ /* Allow the target to add more libcalls or rename some, etc. */
+ INIT_TARGET_OPTABS;
+#endif
+}
+
+#ifdef BROKEN_LDEXP
+
+/* SCO 3.2 apparently has a broken ldexp. */
+
+double
+ldexp(x,n)
+ double x;
+ int n;
+{
+ if (n > 0)
+ while (n--)
+ x *= 2;
+
+ return x;
+}
+#endif /* BROKEN_LDEXP */
diff --git a/gnu/usr.bin/cc/cc_int/print-rtl.c b/gnu/usr.bin/cc/cc_int/print-rtl.c
new file mode 100644
index 0000000..5570639
--- /dev/null
+++ b/gnu/usr.bin/cc/cc_int/print-rtl.c
@@ -0,0 +1,328 @@
+/* Print RTL for GNU C Compiler.
+ Copyright (C) 1987, 1988, 1992 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+#include "config.h"
+#include <ctype.h>
+#include <stdio.h>
+#include "rtl.h"
+
+
+/* How to print out a register name.
+ We don't use PRINT_REG because some definitions of PRINT_REG
+ don't work here. */
+#ifndef DEBUG_PRINT_REG
+#define DEBUG_PRINT_REG(RTX, CODE, FILE) \
+ fprintf ((FILE), "%d %s", REGNO (RTX), reg_names[REGNO (RTX)])
+#endif
+
+/* Array containing all of the register names */
+
+#ifdef DEBUG_REGISTER_NAMES
+static char *reg_names[] = DEBUG_REGISTER_NAMES;
+#else
+static char *reg_names[] = REGISTER_NAMES;
+#endif
+
+static FILE *outfile;
+
+char spaces[] = " ";
+
+static int sawclose = 0;
+
+/* Names for patterns. Non-zero only when linked with insn-output.c. */
+
+extern char **insn_name_ptr;
+
+/* Print IN_RTX onto OUTFILE. This is the recursive part of printing. */
+
+static void
+print_rtx (in_rtx)
+ register rtx in_rtx;
+{
+ static int indent;
+ register int i, j;
+ register char *format_ptr;
+ register int is_insn;
+
+ if (sawclose)
+ {
+ fprintf (outfile, "\n%s",
+ (spaces + (sizeof spaces - 1 - indent * 2)));
+ sawclose = 0;
+ }
+
+ if (in_rtx == 0)
+ {
+ fprintf (outfile, "(nil)");
+ sawclose = 1;
+ return;
+ }
+
+ /* print name of expression code */
+ fprintf (outfile, "(%s", GET_RTX_NAME (GET_CODE (in_rtx)));
+
+ if (in_rtx->in_struct)
+ fprintf (outfile, "/s");
+
+ if (in_rtx->volatil)
+ fprintf (outfile, "/v");
+
+ if (in_rtx->unchanging)
+ fprintf (outfile, "/u");
+
+ if (in_rtx->integrated)
+ fprintf (outfile, "/i");
+
+ if (GET_MODE (in_rtx) != VOIDmode)
+ {
+ /* Print REG_NOTE names for EXPR_LIST and INSN_LIST. */
+ if (GET_CODE (in_rtx) == EXPR_LIST || GET_CODE (in_rtx) == INSN_LIST)
+ fprintf (outfile, ":%s", GET_REG_NOTE_NAME (GET_MODE (in_rtx)));
+ else
+ fprintf (outfile, ":%s", GET_MODE_NAME (GET_MODE (in_rtx)));
+ }
+
+ is_insn = (GET_RTX_CLASS (GET_CODE (in_rtx)) == 'i');
+ format_ptr = GET_RTX_FORMAT (GET_CODE (in_rtx));
+
+ for (i = 0; i < GET_RTX_LENGTH (GET_CODE (in_rtx)); i++)
+ switch (*format_ptr++)
+ {
+ case 'S':
+ case 's':
+ if (XSTR (in_rtx, i) == 0)
+ fprintf (outfile, " \"\"");
+ else
+ fprintf (outfile, " (\"%s\")", XSTR (in_rtx, i));
+ sawclose = 1;
+ break;
+
+ /* 0 indicates a field for internal use that should not be printed. */
+ case '0':
+ break;
+
+ case 'e':
+ indent += 2;
+ if (!sawclose)
+ fprintf (outfile, " ");
+ print_rtx (XEXP (in_rtx, i));
+ indent -= 2;
+ break;
+
+ case 'E':
+ case 'V':
+ indent += 2;
+ if (sawclose)
+ {
+ fprintf (outfile, "\n%s",
+ (spaces + (sizeof spaces - 1 - indent * 2)));
+ sawclose = 0;
+ }
+ fprintf (outfile, "[ ");
+ if (NULL != XVEC (in_rtx, i))
+ {
+ indent += 2;
+ if (XVECLEN (in_rtx, i))
+ sawclose = 1;
+
+ for (j = 0; j < XVECLEN (in_rtx, i); j++)
+ print_rtx (XVECEXP (in_rtx, i, j));
+
+ indent -= 2;
+ }
+ if (sawclose)
+ fprintf (outfile, "\n%s",
+ (spaces + (sizeof spaces - 1 - indent * 2)));
+
+ fprintf (outfile, "] ");
+ sawclose = 1;
+ indent -= 2;
+ break;
+
+ case 'w':
+ fprintf (outfile,
+#if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_INT
+ " %d",
+#else
+ " %ld",
+#endif
+ XWINT (in_rtx, i));
+ break;
+
+ case 'i':
+ {
+ register int value = XINT (in_rtx, i);
+
+ if (GET_CODE (in_rtx) == REG && value < FIRST_PSEUDO_REGISTER)
+ {
+ fputc (' ', outfile);
+ DEBUG_PRINT_REG (in_rtx, 0, outfile);
+ }
+ else
+ fprintf (outfile, " %d", value);
+ }
+ if (is_insn && &INSN_CODE (in_rtx) == &XINT (in_rtx, i)
+ && insn_name_ptr
+ && XINT (in_rtx, i) >= 0)
+ fprintf (outfile, " {%s}", insn_name_ptr[XINT (in_rtx, i)]);
+ sawclose = 0;
+ break;
+
+ /* Print NOTE_INSN names rather than integer codes. */
+
+ case 'n':
+ if (XINT (in_rtx, i) <= 0)
+ fprintf (outfile, " %s", GET_NOTE_INSN_NAME (XINT (in_rtx, i)));
+ else
+ fprintf (outfile, " %d", XINT (in_rtx, i));
+ sawclose = 0;
+ break;
+
+ case 'u':
+ if (XEXP (in_rtx, i) != NULL)
+ fprintf (outfile, " %d", INSN_UID (XEXP (in_rtx, i)));
+ else
+ fprintf (outfile, " 0");
+ sawclose = 0;
+ break;
+
+ case '*':
+ fprintf (outfile, " Unknown");
+ sawclose = 0;
+ break;
+
+ default:
+ fprintf (stderr,
+ "switch format wrong in rtl.print_rtx(). format was: %c.\n",
+ format_ptr[-1]);
+ abort ();
+ }
+
+ fprintf (outfile, ")");
+ sawclose = 1;
+}
+
+/* Call this function from the debugger to see what X looks like. */
+
+void
+debug_rtx (x)
+ rtx x;
+{
+ outfile = stderr;
+ print_rtx (x);
+ fprintf (stderr, "\n");
+}
+
+/* Count of rtx's to print with debug_rtx_list.
+ This global exists because gdb user defined commands have no arguments. */
+
+int debug_rtx_count = 0; /* 0 is treated as equivalent to 1 */
+
+/* Call this function to print list from X on.
+
+ N is a count of the rtx's to print. Positive values print from the specified
+ rtx on. Negative values print a window around the rtx.
+ EG: -5 prints 2 rtx's on either side (in addition to the specified rtx). */
+
+void
+debug_rtx_list (x, n)
+ rtx x;
+ int n;
+{
+ int i,count;
+ rtx insn;
+
+ count = n == 0 ? 1 : n < 0 ? -n : n;
+
+ /* If we are printing a window, back up to the start. */
+
+ if (n < 0)
+ for (i = count / 2; i > 0; i--)
+ {
+ if (PREV_INSN (x) == 0)
+ break;
+ x = PREV_INSN (x);
+ }
+
+ for (i = count, insn = x; i > 0 && insn != 0; i--, insn = NEXT_INSN (insn))
+ debug_rtx (insn);
+}
+
+/* Call this function to search an rtx list to find one with insn uid UID,
+ and then call debug_rtx_list to print it, using DEBUG_RTX_COUNT.
+ The found insn is returned to enable further debugging analysis. */
+
+rtx
+debug_rtx_find(x, uid)
+ rtx x;
+ int uid;
+{
+ while (x != 0 && INSN_UID (x) != uid)
+ x = NEXT_INSN (x);
+ if (x != 0)
+ {
+ debug_rtx_list (x, debug_rtx_count);
+ return x;
+ }
+ else
+ {
+ fprintf (stderr, "insn uid %d not found\n", uid);
+ return 0;
+ }
+}
+
+/* External entry point for printing a chain of insns
+ starting with RTX_FIRST onto file OUTF.
+ A blank line separates insns.
+
+ If RTX_FIRST is not an insn, then it alone is printed, with no newline. */
+
+void
+print_rtl (outf, rtx_first)
+ FILE *outf;
+ rtx rtx_first;
+{
+ register rtx tmp_rtx;
+
+ outfile = outf;
+ sawclose = 0;
+
+ if (rtx_first == 0)
+ fprintf (outf, "(nil)\n");
+ else
+ switch (GET_CODE (rtx_first))
+ {
+ case INSN:
+ case JUMP_INSN:
+ case CALL_INSN:
+ case NOTE:
+ case CODE_LABEL:
+ case BARRIER:
+ for (tmp_rtx = rtx_first; NULL != tmp_rtx; tmp_rtx = NEXT_INSN (tmp_rtx))
+ {
+ print_rtx (tmp_rtx);
+ fprintf (outfile, "\n");
+ }
+ break;
+
+ default:
+ print_rtx (rtx_first);
+ }
+}
diff --git a/gnu/usr.bin/cc/cc_int/print-tree.c b/gnu/usr.bin/cc/cc_int/print-tree.c
new file mode 100644
index 0000000..f4f878f
--- /dev/null
+++ b/gnu/usr.bin/cc/cc_int/print-tree.c
@@ -0,0 +1,642 @@
+/* Prints out tree in human readable form - GNU C-compiler
+ Copyright (C) 1990, 1991, 1993, 1994 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+#include "config.h"
+#include "tree.h"
+#include <stdio.h>
+
+extern char **tree_code_name;
+
+extern char *mode_name[];
+
+void print_node ();
+void indent_to ();
+
+/* Define the hash table of nodes already seen.
+ Such nodes are not repeated; brief cross-references are used. */
+
+#define HASH_SIZE 37
+
+struct bucket
+{
+ tree node;
+ struct bucket *next;
+};
+
+static struct bucket **table;
+
+/* Print the node NODE on standard error, for debugging.
+ Most nodes referred to by this one are printed recursively
+ down to a depth of six. */
+
+void
+debug_tree (node)
+ tree node;
+{
+ char *object = (char *) oballoc (0);
+
+ table = (struct bucket **) oballoc (HASH_SIZE * sizeof (struct bucket *));
+ bzero ((char *) table, HASH_SIZE * sizeof (struct bucket *));
+ print_node (stderr, "", node, 0);
+ table = 0;
+ obfree (object);
+ fprintf (stderr, "\n");
+}
+
+/* Print a node in brief fashion, with just the code, address and name. */
+
+void
+print_node_brief (file, prefix, node, indent)
+ FILE *file;
+ char *prefix;
+ tree node;
+ int indent;
+{
+ char class;
+
+ if (node == 0)
+ return;
+
+ class = TREE_CODE_CLASS (TREE_CODE (node));
+
+ /* Always print the slot this node is in, and its code, address and
+ name if any. */
+ if (indent > 0)
+ fprintf (file, " ");
+ fprintf (file, "%s <%s ", prefix, tree_code_name[(int) TREE_CODE (node)]);
+ fprintf (file, HOST_PTR_PRINTF, (HOST_WIDE_INT) node);
+
+ if (class == 'd')
+ {
+ if (DECL_NAME (node))
+ fprintf (file, " %s", IDENTIFIER_POINTER (DECL_NAME (node)));
+ }
+ else if (class == 't')
+ {
+ if (TYPE_NAME (node))
+ {
+ if (TREE_CODE (TYPE_NAME (node)) == IDENTIFIER_NODE)
+ fprintf (file, " %s", IDENTIFIER_POINTER (TYPE_NAME (node)));
+ else if (TREE_CODE (TYPE_NAME (node)) == TYPE_DECL
+ && DECL_NAME (TYPE_NAME (node)))
+ fprintf (file, " %s",
+ IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (node))));
+ }
+ }
+ if (TREE_CODE (node) == IDENTIFIER_NODE)
+ fprintf (file, " %s", IDENTIFIER_POINTER (node));
+ /* We might as well always print the value of an integer. */
+ if (TREE_CODE (node) == INTEGER_CST)
+ {
+ if (TREE_CONSTANT_OVERFLOW (node))
+ fprintf (file, " overflow");
+
+ if (TREE_INT_CST_HIGH (node) == 0)
+ fprintf (file, " %1u", TREE_INT_CST_LOW (node));
+ else if (TREE_INT_CST_HIGH (node) == -1
+ && TREE_INT_CST_LOW (node) != 0)
+ fprintf (file, " -%1u", -TREE_INT_CST_LOW (node));
+ else
+ fprintf (file,
+#if HOST_BITS_PER_WIDE_INT == 64
+#if HOST_BITS_PER_WIDE_INT != HOST_BITS_PER_INT
+ " 0x%lx%016lx",
+#else
+ " 0x%x%016x",
+#endif
+#else
+#if HOST_BITS_PER_WIDE_INT != HOST_BITS_PER_INT
+ " 0x%lx%08lx",
+#else
+ " 0x%x%08x",
+#endif
+#endif
+ TREE_INT_CST_HIGH (node), TREE_INT_CST_LOW (node));
+ }
+ if (TREE_CODE (node) == REAL_CST)
+ {
+#ifndef REAL_IS_NOT_DOUBLE
+ fprintf (file, " %e", TREE_REAL_CST (node));
+#else
+ {
+ int i;
+ unsigned char *p = (unsigned char *) &TREE_REAL_CST (node);
+ fprintf (file, " 0x");
+ for (i = 0; i < sizeof TREE_REAL_CST (node); i++)
+ fprintf (file, "%02x", *p++);
+ fprintf (file, "");
+ }
+#endif /* REAL_IS_NOT_DOUBLE */
+ }
+
+ fprintf (file, ">");
+}
+
+void
+indent_to (file, column)
+ FILE *file;
+ int column;
+{
+ int i;
+
+ /* Since this is the long way, indent to desired column. */
+ if (column > 0)
+ fprintf (file, "\n");
+ for (i = 0; i < column; i++)
+ fprintf (file, " ");
+}
+
+/* Print the node NODE in full on file FILE, preceded by PREFIX,
+ starting in column INDENT. */
+
+void
+print_node (file, prefix, node, indent)
+ FILE *file;
+ char *prefix;
+ tree node;
+ int indent;
+{
+ int hash;
+ struct bucket *b;
+ enum machine_mode mode;
+ char class;
+ int len;
+ int first_rtl;
+ int i;
+
+ if (node == 0)
+ return;
+
+ class = TREE_CODE_CLASS (TREE_CODE (node));
+
+ /* Don't get too deep in nesting. If the user wants to see deeper,
+ it is easy to use the address of a lowest-level node
+ as an argument in another call to debug_tree. */
+
+ if (indent > 24)
+ {
+ print_node_brief (file, prefix, node, indent);
+ return;
+ }
+
+ if (indent > 8 && (class == 't' || class == 'd'))
+ {
+ print_node_brief (file, prefix, node, indent);
+ return;
+ }
+
+ /* It is unsafe to look at any other filds of an ERROR_MARK node. */
+ if (TREE_CODE (node) == ERROR_MARK)
+ {
+ print_node_brief (file, prefix, node, indent);
+ return;
+ }
+
+ hash = ((unsigned HOST_WIDE_INT) node) % HASH_SIZE;
+
+ /* If node is in the table, just mention its address. */
+ for (b = table[hash]; b; b = b->next)
+ if (b->node == node)
+ {
+ print_node_brief (file, prefix, node, indent);
+ return;
+ }
+
+ /* Add this node to the table. */
+ b = (struct bucket *) oballoc (sizeof (struct bucket));
+ b->node = node;
+ b->next = table[hash];
+ table[hash] = b;
+
+ /* Indent to the specified column, since this is the long form. */
+ indent_to (file, indent);
+
+ /* Print the slot this node is in, and its code, and address. */
+ fprintf (file, "%s <%s ", prefix, tree_code_name[(int) TREE_CODE (node)]);
+ fprintf (file, HOST_PTR_PRINTF, (HOST_WIDE_INT) node);
+
+ /* Print the name, if any. */
+ if (class == 'd')
+ {
+ if (DECL_NAME (node))
+ fprintf (file, " %s", IDENTIFIER_POINTER (DECL_NAME (node)));
+ }
+ else if (class == 't')
+ {
+ if (TYPE_NAME (node))
+ {
+ if (TREE_CODE (TYPE_NAME (node)) == IDENTIFIER_NODE)
+ fprintf (file, " %s", IDENTIFIER_POINTER (TYPE_NAME (node)));
+ else if (TREE_CODE (TYPE_NAME (node)) == TYPE_DECL
+ && DECL_NAME (TYPE_NAME (node)))
+ fprintf (file, " %s",
+ IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (node))));
+ }
+ }
+ if (TREE_CODE (node) == IDENTIFIER_NODE)
+ fprintf (file, " %s", IDENTIFIER_POINTER (node));
+
+ if (TREE_CODE (node) == INTEGER_CST)
+ {
+ if (indent <= 4)
+ print_node_brief (file, "type", TREE_TYPE (node), indent + 4);
+ }
+ else
+ {
+ print_node (file, "type", TREE_TYPE (node), indent + 4);
+ if (TREE_TYPE (node))
+ indent_to (file, indent + 3);
+
+ print_obstack_name ((char *) node, file, "");
+ indent_to (file, indent + 3);
+ }
+
+ /* If a permanent object is in the wrong obstack, or the reverse, warn. */
+ if (object_permanent_p (node) != TREE_PERMANENT (node))
+ {
+ if (TREE_PERMANENT (node))
+ fputs (" !!permanent object in non-permanent obstack!!", file);
+ else
+ fputs (" !!non-permanent object in permanent obstack!!", file);
+ indent_to (file, indent + 3);
+ }
+
+ if (TREE_SIDE_EFFECTS (node))
+ fputs (" side-effects", file);
+ if (TREE_READONLY (node))
+ fputs (" readonly", file);
+ if (TREE_CONSTANT (node))
+ fputs (" constant", file);
+ if (TREE_ADDRESSABLE (node))
+ fputs (" addressable", file);
+ if (TREE_THIS_VOLATILE (node))
+ fputs (" volatile", file);
+ if (TREE_UNSIGNED (node))
+ fputs (" unsigned", file);
+ if (TREE_ASM_WRITTEN (node))
+ fputs (" asm_written", file);
+ if (TREE_USED (node))
+ fputs (" used", file);
+ if (TREE_RAISES (node))
+ fputs (" raises", file);
+ if (TREE_PERMANENT (node))
+ fputs (" permanent", file);
+ if (TREE_PUBLIC (node))
+ fputs (" public", file);
+ if (TREE_STATIC (node))
+ fputs (" static", file);
+ if (TREE_LANG_FLAG_0 (node))
+ fputs (" tree_0", file);
+ if (TREE_LANG_FLAG_1 (node))
+ fputs (" tree_1", file);
+ if (TREE_LANG_FLAG_2 (node))
+ fputs (" tree_2", file);
+ if (TREE_LANG_FLAG_3 (node))
+ fputs (" tree_3", file);
+ if (TREE_LANG_FLAG_4 (node))
+ fputs (" tree_4", file);
+ if (TREE_LANG_FLAG_5 (node))
+ fputs (" tree_5", file);
+ if (TREE_LANG_FLAG_6 (node))
+ fputs (" tree_6", file);
+
+ /* DECL_ nodes have additional attributes. */
+
+ switch (TREE_CODE_CLASS (TREE_CODE (node)))
+ {
+ case 'd':
+ mode = DECL_MODE (node);
+
+ if (DECL_EXTERNAL (node))
+ fputs (" external", file);
+ if (DECL_NONLOCAL (node))
+ fputs (" nonlocal", file);
+ if (DECL_REGISTER (node))
+ fputs (" regdecl", file);
+ if (DECL_INLINE (node))
+ fputs (" inline", file);
+ if (DECL_BIT_FIELD (node))
+ fputs (" bit-field", file);
+ if (DECL_VIRTUAL_P (node))
+ fputs (" virtual", file);
+ if (DECL_IGNORED_P (node))
+ fputs (" ignored", file);
+ if (DECL_IN_SYSTEM_HEADER (node))
+ fputs (" in_system_header", file);
+ if (DECL_LANG_FLAG_0 (node))
+ fputs (" decl_0", file);
+ if (DECL_LANG_FLAG_1 (node))
+ fputs (" decl_1", file);
+ if (DECL_LANG_FLAG_2 (node))
+ fputs (" decl_2", file);
+ if (DECL_LANG_FLAG_3 (node))
+ fputs (" decl_3", file);
+ if (DECL_LANG_FLAG_4 (node))
+ fputs (" decl_4", file);
+ if (DECL_LANG_FLAG_5 (node))
+ fputs (" decl_5", file);
+ if (DECL_LANG_FLAG_6 (node))
+ fputs (" decl_6", file);
+ if (DECL_LANG_FLAG_7 (node))
+ fputs (" decl_7", file);
+
+ fprintf (file, " %s", mode_name[(int) mode]);
+
+ fprintf (file, " file %s line %d",
+ DECL_SOURCE_FILE (node), DECL_SOURCE_LINE (node));
+
+ print_node (file, "size", DECL_SIZE (node), indent + 4);
+ indent_to (file, indent + 3);
+ if (TREE_CODE (node) != FUNCTION_DECL)
+ fprintf (file, " align %d", DECL_ALIGN (node));
+ else if (DECL_INLINE (node))
+ fprintf (file, " frame_size %d", DECL_FRAME_SIZE (node));
+ else if (DECL_BUILT_IN (node))
+ fprintf (file, " built-in code %d", DECL_FUNCTION_CODE (node));
+ if (TREE_CODE (node) == FIELD_DECL)
+ print_node (file, "bitpos", DECL_FIELD_BITPOS (node), indent + 4);
+ print_node_brief (file, "context", DECL_CONTEXT (node), indent + 4);
+ print_node_brief (file, "abstract_origin",
+ DECL_ABSTRACT_ORIGIN (node), indent + 4);
+
+ print_node (file, "arguments", DECL_ARGUMENTS (node), indent + 4);
+ print_node (file, "result", DECL_RESULT (node), indent + 4);
+ print_node_brief (file, "initial", DECL_INITIAL (node), indent + 4);
+
+ print_lang_decl (file, node, indent);
+
+ if (DECL_RTL (node) != 0)
+ {
+ indent_to (file, indent + 4);
+ print_rtl (file, DECL_RTL (node));
+ }
+
+ if (DECL_SAVED_INSNS (node) != 0)
+ {
+ indent_to (file, indent + 4);
+ if (TREE_CODE (node) == PARM_DECL)
+ {
+ fprintf (file, "incoming-rtl ");
+ print_rtl (file, DECL_INCOMING_RTL (node));
+ }
+ else if (TREE_CODE (node) == FUNCTION_DECL)
+ {
+ fprintf (file, "saved-insns ");
+ fprintf (file, HOST_PTR_PRINTF,
+ (HOST_WIDE_INT) DECL_SAVED_INSNS (node));
+ }
+ }
+
+ /* Print the decl chain only if decl is at second level. */
+ if (indent == 4)
+ print_node (file, "chain", TREE_CHAIN (node), indent + 4);
+ else
+ print_node_brief (file, "chain", TREE_CHAIN (node), indent + 4);
+ break;
+
+ case 't':
+ if (TYPE_NO_FORCE_BLK (node))
+ fputs (" no_force_blk", file);
+ if (TYPE_LANG_FLAG_0 (node))
+ fputs (" type_0", file);
+ if (TYPE_LANG_FLAG_1 (node))
+ fputs (" type_1", file);
+ if (TYPE_LANG_FLAG_2 (node))
+ fputs (" type_2", file);
+ if (TYPE_LANG_FLAG_3 (node))
+ fputs (" type_3", file);
+ if (TYPE_LANG_FLAG_4 (node))
+ fputs (" type_4", file);
+ if (TYPE_LANG_FLAG_5 (node))
+ fputs (" type_5", file);
+ if (TYPE_LANG_FLAG_6 (node))
+ fputs (" type_6", file);
+
+ mode = TYPE_MODE (node);
+ fprintf (file, " %s", mode_name[(int) mode]);
+
+ print_node (file, "size", TYPE_SIZE (node), indent + 4);
+ indent_to (file, indent + 3);
+
+ fprintf (file, " align %d", TYPE_ALIGN (node));
+ fprintf (file, " symtab %d", TYPE_SYMTAB_ADDRESS (node));
+
+ print_node (file, "attributes", TYPE_ATTRIBUTES (node), indent + 4);
+
+ if (TREE_CODE (node) == ARRAY_TYPE || TREE_CODE (node) == SET_TYPE)
+ print_node (file, "domain", TYPE_DOMAIN (node), indent + 4);
+ else if (TREE_CODE (node) == INTEGER_TYPE
+ || TREE_CODE (node) == BOOLEAN_TYPE
+ || TREE_CODE (node) == CHAR_TYPE)
+ {
+ fprintf (file, " precision %d", TYPE_PRECISION (node));
+ print_node (file, "min", TYPE_MIN_VALUE (node), indent + 4);
+ print_node (file, "max", TYPE_MAX_VALUE (node), indent + 4);
+ }
+ else if (TREE_CODE (node) == ENUMERAL_TYPE)
+ {
+ fprintf (file, " precision %d", TYPE_PRECISION (node));
+ print_node (file, "min", TYPE_MIN_VALUE (node), indent + 4);
+ print_node (file, "max", TYPE_MAX_VALUE (node), indent + 4);
+ print_node (file, "values", TYPE_VALUES (node), indent + 4);
+ }
+ else if (TREE_CODE (node) == REAL_TYPE)
+ fprintf (file, " precision %d", TYPE_PRECISION (node));
+ else if (TREE_CODE (node) == RECORD_TYPE
+ || TREE_CODE (node) == UNION_TYPE
+ || TREE_CODE (node) == QUAL_UNION_TYPE)
+ print_node (file, "fields", TYPE_FIELDS (node), indent + 4);
+ else if (TREE_CODE (node) == FUNCTION_TYPE || TREE_CODE (node) == METHOD_TYPE)
+ {
+ if (TYPE_METHOD_BASETYPE (node))
+ print_node_brief (file, "method basetype", TYPE_METHOD_BASETYPE (node), indent + 4);
+ print_node (file, "arg-types", TYPE_ARG_TYPES (node), indent + 4);
+ }
+ if (TYPE_CONTEXT (node))
+ print_node_brief (file, "context", TYPE_CONTEXT (node), indent + 4);
+
+ print_lang_type (file, node, indent);
+
+ if (TYPE_POINTER_TO (node) || TREE_CHAIN (node))
+ indent_to (file, indent + 3);
+ print_node_brief (file, "pointer_to_this", TYPE_POINTER_TO (node), indent + 4);
+ print_node_brief (file, "reference_to_this", TYPE_REFERENCE_TO (node), indent + 4);
+ print_node_brief (file, "chain", TREE_CHAIN (node), indent + 4);
+ break;
+
+ case 'b':
+ print_node (file, "vars", BLOCK_VARS (node), indent + 4);
+ print_node (file, "tags", BLOCK_TYPE_TAGS (node), indent + 4);
+ print_node (file, "supercontext", BLOCK_SUPERCONTEXT (node), indent + 4);
+ print_node (file, "subblocks", BLOCK_SUBBLOCKS (node), indent + 4);
+ print_node (file, "chain", BLOCK_CHAIN (node), indent + 4);
+ print_node (file, "abstract_origin",
+ BLOCK_ABSTRACT_ORIGIN (node), indent + 4);
+ return;
+
+ case 'e':
+ case '<':
+ case '1':
+ case '2':
+ case 'r':
+ case 's':
+ switch (TREE_CODE (node))
+ {
+ case BIND_EXPR:
+ print_node (file, "vars", TREE_OPERAND (node, 0), indent + 4);
+ print_node (file, "body", TREE_OPERAND (node, 1), indent + 4);
+ print_node (file, "block", TREE_OPERAND (node, 2), indent + 4);
+ return;
+ }
+
+ first_rtl = len = tree_code_length[(int) TREE_CODE (node)];
+ /* These kinds of nodes contain rtx's, not trees,
+ after a certain point. Print the rtx's as rtx's. */
+ switch (TREE_CODE (node))
+ {
+ case SAVE_EXPR:
+ first_rtl = 2;
+ break;
+ case CALL_EXPR:
+ first_rtl = 2;
+ break;
+ case METHOD_CALL_EXPR:
+ first_rtl = 3;
+ break;
+ case WITH_CLEANUP_EXPR:
+ /* Should be defined to be 2. */
+ first_rtl = 1;
+ break;
+ case RTL_EXPR:
+ first_rtl = 0;
+ }
+ for (i = 0; i < len; i++)
+ {
+ if (i >= first_rtl)
+ {
+ indent_to (file, indent + 4);
+ fprintf (file, "rtl %d ", i);
+ if (TREE_OPERAND (node, i))
+ print_rtl (file, (struct rtx_def *) TREE_OPERAND (node, i));
+ else
+ fprintf (file, "(nil)");
+ fprintf (file, "\n");
+ }
+ else
+ {
+ char temp[10];
+
+ sprintf (temp, "arg %d", i);
+ print_node (file, temp, TREE_OPERAND (node, i), indent + 4);
+ }
+ }
+ break;
+
+ case 'c':
+ case 'x':
+ switch (TREE_CODE (node))
+ {
+ case INTEGER_CST:
+ if (TREE_CONSTANT_OVERFLOW (node))
+ fprintf (file, " overflow");
+
+ if (TREE_INT_CST_HIGH (node) == 0)
+ fprintf (file, " %1u", TREE_INT_CST_LOW (node));
+ else if (TREE_INT_CST_HIGH (node) == -1
+ && TREE_INT_CST_LOW (node) != 0)
+ fprintf (file, " -%1u", -TREE_INT_CST_LOW (node));
+ else
+ fprintf (file,
+#if HOST_BITS_PER_WIDE_INT == 64
+#if HOST_BITS_PER_WIDE_INT != HOST_BITS_PER_INT
+ " 0x%lx%016lx",
+#else
+ " 0x%x%016x",
+#endif
+#else
+#if HOST_BITS_PER_WIDE_INT != HOST_BITS_PER_INT
+ " 0x%lx%08lx",
+#else
+ " 0x%x%08x",
+#endif
+#endif
+ TREE_INT_CST_HIGH (node), TREE_INT_CST_LOW (node));
+ break;
+
+ case REAL_CST:
+#ifndef REAL_IS_NOT_DOUBLE
+ fprintf (file, " %e", TREE_REAL_CST (node));
+#else
+ {
+ char *p = (char *) &TREE_REAL_CST (node);
+ fprintf (file, " 0x");
+ for (i = 0; i < sizeof TREE_REAL_CST (node); i++)
+ fprintf (file, "%02x", *p++);
+ fprintf (file, "");
+ }
+#endif /* REAL_IS_NOT_DOUBLE */
+ break;
+
+ case COMPLEX_CST:
+ print_node (file, "real", TREE_REALPART (node), indent + 4);
+ print_node (file, "imag", TREE_IMAGPART (node), indent + 4);
+ break;
+
+ case STRING_CST:
+ fprintf (file, " \"%s\"", TREE_STRING_POINTER (node));
+ /* Print the chain at second level. */
+ if (indent == 4)
+ print_node (file, "chain", TREE_CHAIN (node), indent + 4);
+ else
+ print_node_brief (file, "chain", TREE_CHAIN (node), indent + 4);
+ break;
+
+ case IDENTIFIER_NODE:
+ print_lang_identifier (file, node, indent);
+ break;
+
+ case TREE_LIST:
+ print_node (file, "purpose", TREE_PURPOSE (node), indent + 4);
+ print_node (file, "value", TREE_VALUE (node), indent + 4);
+ print_node (file, "chain", TREE_CHAIN (node), indent + 4);
+ break;
+
+ case TREE_VEC:
+ len = TREE_VEC_LENGTH (node);
+ for (i = 0; i < len; i++)
+ if (TREE_VEC_ELT (node, i))
+ {
+ char temp[10];
+ sprintf (temp, "elt %d", i);
+ indent_to (file, indent + 4);
+ print_node_brief (file, temp, TREE_VEC_ELT (node, i), 0);
+ }
+ break;
+
+ case OP_IDENTIFIER:
+ print_node (file, "op1", TREE_PURPOSE (node), indent + 4);
+ print_node (file, "op2", TREE_VALUE (node), indent + 4);
+ }
+
+ break;
+ }
+
+ fprintf (file, ">");
+}
diff --git a/gnu/usr.bin/cc/cc_int/real.c b/gnu/usr.bin/cc/cc_int/real.c
new file mode 100644
index 0000000..cad4343
--- /dev/null
+++ b/gnu/usr.bin/cc/cc_int/real.c
@@ -0,0 +1,5969 @@
+/* real.c - implementation of REAL_ARITHMETIC, REAL_VALUE_ATOF,
+ and support for XFmode IEEE extended real floating point arithmetic.
+ Copyright (C) 1993, 1994 Free Software Foundation, Inc.
+ Contributed by Stephen L. Moshier (moshier@world.std.com).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+#include <stdio.h>
+#include <errno.h>
+#include "config.h"
+#include "tree.h"
+
+#ifndef errno
+extern int errno;
+#endif
+
+/* To enable support of XFmode extended real floating point, define
+LONG_DOUBLE_TYPE_SIZE 96 in the tm.h file (m68k.h or i386.h).
+
+To support cross compilation between IEEE, VAX and IBM floating
+point formats, define REAL_ARITHMETIC in the tm.h file.
+
+In either case the machine files (tm.h) must not contain any code
+that tries to use host floating point arithmetic to convert
+REAL_VALUE_TYPEs from `double' to `float', pass them to fprintf,
+etc. In cross-compile situations a REAL_VALUE_TYPE may not
+be intelligible to the host computer's native arithmetic.
+
+The emulator defaults to the host's floating point format so that
+its decimal conversion functions can be used if desired (see
+real.h).
+
+The first part of this file interfaces gcc to ieee.c, which is a
+floating point arithmetic suite that was not written with gcc in
+mind. The interface is followed by ieee.c itself and related
+items. Avoid changing ieee.c unless you have suitable test
+programs available. A special version of the PARANOIA floating
+point arithmetic tester, modified for this purpose, can be found
+on usc.edu : /pub/C-numanal/ieeetest.zoo. Some tutorial
+information on ieee.c is given in my book: S. L. Moshier,
+_Methods and Programs for Mathematical Functions_, Prentice-Hall
+or Simon & Schuster Int'l, 1989. A library of XFmode elementary
+transcendental functions can be obtained by ftp from
+research.att.com: netlib/cephes/ldouble.shar.Z */
+
+/* Type of computer arithmetic.
+ Only one of DEC, IBM, MIEEE, IBMPC, or UNK should get defined.
+
+ `MIEEE' refers generically to big-endian IEEE floating-point data
+ structure. This definition should work in SFmode `float' type and
+ DFmode `double' type on virtually all big-endian IEEE machines.
+ If LONG_DOUBLE_TYPE_SIZE has been defined to be 96, then MIEEE
+ also invokes the particular XFmode (`long double' type) data
+ structure used by the Motorola 680x0 series processors.
+
+ `IBMPC' refers generally to little-endian IEEE machines. In this
+ case, if LONG_DOUBLE_TYPE_SIZE has been defined to be 96, then
+ IBMPC also invokes the particular XFmode `long double' data
+ structure used by the Intel 80x86 series processors.
+
+ `DEC' refers specifically to the Digital Equipment Corp PDP-11
+ and VAX floating point data structure. This model currently
+ supports no type wider than DFmode.
+
+ `IBM' refers specifically to the IBM System/370 and compatible
+ floating point data structure. This model currently supports
+ no type wider than DFmode. The IBM conversions were contributed by
+ frank@atom.ansto.gov.au (Frank Crawford).
+
+ If LONG_DOUBLE_TYPE_SIZE = 64 (the default, unless tm.h defines it)
+ then `long double' and `double' are both implemented, but they
+ both mean DFmode. In this case, the software floating-point
+ support available here is activated by writing
+ #define REAL_ARITHMETIC
+ in tm.h.
+
+ The case LONG_DOUBLE_TYPE_SIZE = 128 activates TFmode support
+ and may deactivate XFmode since `long double' is used to refer
+ to both modes.
+
+ The macros FLOAT_WORDS_BIG_ENDIAN, HOST_FLOAT_WORDS_BIG_ENDIAN,
+ contributed by Richard Earnshaw <Richard.Earnshaw@cl.cam.ac.uk>,
+ separate the floating point unit's endian-ness from that of
+ the integer addressing. This permits one to define a big-endian
+ FPU on a little-endian machine (e.g., ARM). An extension to
+ BYTES_BIG_ENDIAN may be required for some machines in the future.
+ These optional macros may be defined in tm.h. In real.h, they
+ default to WORDS_BIG_ENDIAN, etc., so there is no need to define
+ them for any normal host or target machine on which the floats
+ and the integers have the same endian-ness. */
+
+
+/* The following converts gcc macros into the ones used by this file. */
+
+/* REAL_ARITHMETIC defined means that macros in real.h are
+ defined to call emulator functions. */
+#ifdef REAL_ARITHMETIC
+
+#if TARGET_FLOAT_FORMAT == VAX_FLOAT_FORMAT
+/* PDP-11, Pro350, VAX: */
+#define DEC 1
+#else /* it's not VAX */
+#if TARGET_FLOAT_FORMAT == IBM_FLOAT_FORMAT
+/* IBM System/370 style */
+#define IBM 1
+#else /* it's also not an IBM */
+#if TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
+#if FLOAT_WORDS_BIG_ENDIAN
+/* Motorola IEEE, high order words come first (Sun workstation): */
+#define MIEEE 1
+#else /* not big-endian */
+/* Intel IEEE, low order words come first:
+ */
+#define IBMPC 1
+#endif /* big-endian */
+#else /* it's not IEEE either */
+/* UNKnown arithmetic. We don't support this and can't go on. */
+unknown arithmetic type
+#define UNK 1
+#endif /* not IEEE */
+#endif /* not IBM */
+#endif /* not VAX */
+
+#else
+/* REAL_ARITHMETIC not defined means that the *host's* data
+ structure will be used. It may differ by endian-ness from the
+ target machine's structure and will get its ends swapped
+ accordingly (but not here). Probably only the decimal <-> binary
+ functions in this file will actually be used in this case. */
+
+#if HOST_FLOAT_FORMAT == VAX_FLOAT_FORMAT
+#define DEC 1
+#else /* it's not VAX */
+#if HOST_FLOAT_FORMAT == IBM_FLOAT_FORMAT
+/* IBM System/370 style */
+#define IBM 1
+#else /* it's also not an IBM */
+#if HOST_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
+#if HOST_FLOAT_WORDS_BIG_ENDIAN
+#define MIEEE 1
+#else /* not big-endian */
+#define IBMPC 1
+#endif /* big-endian */
+#else /* it's not IEEE either */
+unknown arithmetic type
+#define UNK 1
+#endif /* not IEEE */
+#endif /* not IBM */
+#endif /* not VAX */
+
+#endif /* REAL_ARITHMETIC not defined */
+
+/* Define INFINITY for support of infinity.
+ Define NANS for support of Not-a-Number's (NaN's). */
+#if !defined(DEC) && !defined(IBM)
+#define INFINITY
+#define NANS
+#endif
+
+/* Support of NaNs requires support of infinity. */
+#ifdef NANS
+#ifndef INFINITY
+#define INFINITY
+#endif
+#endif
+
+/* Find a host integer type that is at least 16 bits wide,
+ and another type at least twice whatever that size is. */
+
+#if HOST_BITS_PER_CHAR >= 16
+#define EMUSHORT char
+#define EMUSHORT_SIZE HOST_BITS_PER_CHAR
+#define EMULONG_SIZE (2 * HOST_BITS_PER_CHAR)
+#else
+#if HOST_BITS_PER_SHORT >= 16
+#define EMUSHORT short
+#define EMUSHORT_SIZE HOST_BITS_PER_SHORT
+#define EMULONG_SIZE (2 * HOST_BITS_PER_SHORT)
+#else
+#if HOST_BITS_PER_INT >= 16
+#define EMUSHORT int
+#define EMUSHORT_SIZE HOST_BITS_PER_INT
+#define EMULONG_SIZE (2 * HOST_BITS_PER_INT)
+#else
+#if HOST_BITS_PER_LONG >= 16
+#define EMUSHORT long
+#define EMUSHORT_SIZE HOST_BITS_PER_LONG
+#define EMULONG_SIZE (2 * HOST_BITS_PER_LONG)
+#else
+/* You will have to modify this program to have a smaller unit size. */
+#define EMU_NON_COMPILE
+#endif
+#endif
+#endif
+#endif
+
+#if HOST_BITS_PER_SHORT >= EMULONG_SIZE
+#define EMULONG short
+#else
+#if HOST_BITS_PER_INT >= EMULONG_SIZE
+#define EMULONG int
+#else
+#if HOST_BITS_PER_LONG >= EMULONG_SIZE
+#define EMULONG long
+#else
+#if HOST_BITS_PER_LONG_LONG >= EMULONG_SIZE
+#define EMULONG long long int
+#else
+/* You will have to modify this program to have a smaller unit size. */
+#define EMU_NON_COMPILE
+#endif
+#endif
+#endif
+#endif
+
+
+/* The host interface doesn't work if no 16-bit size exists. */
+#if EMUSHORT_SIZE != 16
+#define EMU_NON_COMPILE
+#endif
+
+/* OK to continue compilation. */
+#ifndef EMU_NON_COMPILE
+
+/* Construct macros to translate between REAL_VALUE_TYPE and e type.
+ In GET_REAL and PUT_REAL, r and e are pointers.
+ A REAL_VALUE_TYPE is guaranteed to occupy contiguous locations
+ in memory, with no holes. */
+
+#if LONG_DOUBLE_TYPE_SIZE == 96
+/* Number of 16 bit words in external e type format */
+#define NE 6
+#define MAXDECEXP 4932
+#define MINDECEXP -4956
+#define GET_REAL(r,e) bcopy (r, e, 2*NE)
+#define PUT_REAL(e,r) bcopy (e, r, 2*NE)
+#else /* no XFmode */
+#if LONG_DOUBLE_TYPE_SIZE == 128
+#define NE 10
+#define MAXDECEXP 4932
+#define MINDECEXP -4977
+#define GET_REAL(r,e) bcopy (r, e, 2*NE)
+#define PUT_REAL(e,r) bcopy (e, r, 2*NE)
+#else
+#define NE 6
+#define MAXDECEXP 4932
+#define MINDECEXP -4956
+#ifdef REAL_ARITHMETIC
+/* Emulator uses target format internally
+ but host stores it in host endian-ness. */
+
+#if HOST_FLOAT_WORDS_BIG_ENDIAN == FLOAT_WORDS_BIG_ENDIAN
+#define GET_REAL(r,e) e53toe ((unsigned EMUSHORT*) (r), (e))
+#define PUT_REAL(e,r) etoe53 ((e), (unsigned EMUSHORT *) (r))
+
+#else /* endian-ness differs */
+/* emulator uses target endian-ness internally */
+#define GET_REAL(r,e) \
+do { unsigned EMUSHORT w[4]; \
+ w[3] = ((EMUSHORT *) r)[0]; \
+ w[2] = ((EMUSHORT *) r)[1]; \
+ w[1] = ((EMUSHORT *) r)[2]; \
+ w[0] = ((EMUSHORT *) r)[3]; \
+ e53toe (w, (e)); } while (0)
+
+#define PUT_REAL(e,r) \
+do { unsigned EMUSHORT w[4]; \
+ etoe53 ((e), w); \
+ *((EMUSHORT *) r) = w[3]; \
+ *((EMUSHORT *) r + 1) = w[2]; \
+ *((EMUSHORT *) r + 2) = w[1]; \
+ *((EMUSHORT *) r + 3) = w[0]; } while (0)
+
+#endif /* endian-ness differs */
+
+#else /* not REAL_ARITHMETIC */
+
+/* emulator uses host format */
+#define GET_REAL(r,e) e53toe ((unsigned EMUSHORT *) (r), (e))
+#define PUT_REAL(e,r) etoe53 ((e), (unsigned EMUSHORT *) (r))
+
+#endif /* not REAL_ARITHMETIC */
+#endif /* not TFmode */
+#endif /* no XFmode */
+
+
+/* Number of 16 bit words in internal format */
+#define NI (NE+3)
+
+/* Array offset to exponent */
+#define E 1
+
+/* Array offset to high guard word */
+#define M 2
+
+/* Number of bits of precision */
+#define NBITS ((NI-4)*16)
+
+/* Maximum number of decimal digits in ASCII conversion
+ * = NBITS*log10(2)
+ */
+#define NDEC (NBITS*8/27)
+
+/* The exponent of 1.0 */
+#define EXONE (0x3fff)
+
+extern int extra_warnings;
+extern unsigned EMUSHORT ezero[], ehalf[], eone[], etwo[];
+extern unsigned EMUSHORT elog2[], esqrt2[];
+
+static void endian PROTO((unsigned EMUSHORT *, long *,
+ enum machine_mode));
+static void eclear PROTO((unsigned EMUSHORT *));
+static void emov PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *));
+static void eabs PROTO((unsigned EMUSHORT *));
+static void eneg PROTO((unsigned EMUSHORT *));
+static int eisneg PROTO((unsigned EMUSHORT *));
+static int eisinf PROTO((unsigned EMUSHORT *));
+static int eisnan PROTO((unsigned EMUSHORT *));
+static void einfin PROTO((unsigned EMUSHORT *));
+static void enan PROTO((unsigned EMUSHORT *, int));
+static void emovi PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *));
+static void emovo PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *));
+static void ecleaz PROTO((unsigned EMUSHORT *));
+static void ecleazs PROTO((unsigned EMUSHORT *));
+static void emovz PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *));
+static void einan PROTO((unsigned EMUSHORT *));
+static int eiisnan PROTO((unsigned EMUSHORT *));
+static int eiisneg PROTO((unsigned EMUSHORT *));
+static void eiinfin PROTO((unsigned EMUSHORT *));
+static int eiisinf PROTO((unsigned EMUSHORT *));
+static int ecmpm PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *));
+static void eshdn1 PROTO((unsigned EMUSHORT *));
+static void eshup1 PROTO((unsigned EMUSHORT *));
+static void eshdn8 PROTO((unsigned EMUSHORT *));
+static void eshup8 PROTO((unsigned EMUSHORT *));
+static void eshup6 PROTO((unsigned EMUSHORT *));
+static void eshdn6 PROTO((unsigned EMUSHORT *));
+static void eaddm PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *));
+static void esubm PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *));
+static void m16m PROTO((unsigned int, unsigned short *,
+ unsigned short *));
+static int edivm PROTO((unsigned short *, unsigned short *));
+static int emulm PROTO((unsigned short *, unsigned short *));
+static void emdnorm PROTO((unsigned EMUSHORT *, int, int, EMULONG, int));
+static void esub PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *,
+ unsigned EMUSHORT *));
+static void eadd PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *,
+ unsigned EMUSHORT *));
+static void eadd1 PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *,
+ unsigned EMUSHORT *));
+static void ediv PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *,
+ unsigned EMUSHORT *));
+static void emul PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *,
+ unsigned EMUSHORT *));
+static void e53toe PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *));
+static void e64toe PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *));
+static void e113toe PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *));
+static void e24toe PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *));
+static void etoe113 PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *));
+static void toe113 PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *));
+static void etoe64 PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *));
+static void toe64 PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *));
+static void etoe53 PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *));
+static void toe53 PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *));
+static void etoe24 PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *));
+static void toe24 PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *));
+static int ecmp PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *));
+static void eround PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *));
+static void ltoe PROTO((HOST_WIDE_INT *, unsigned EMUSHORT *));
+static void ultoe PROTO((unsigned HOST_WIDE_INT *, unsigned EMUSHORT *));
+static void eifrac PROTO((unsigned EMUSHORT *, HOST_WIDE_INT *,
+ unsigned EMUSHORT *));
+static void euifrac PROTO((unsigned EMUSHORT *, unsigned HOST_WIDE_INT *,
+ unsigned EMUSHORT *));
+static int eshift PROTO((unsigned EMUSHORT *, int));
+static int enormlz PROTO((unsigned EMUSHORT *));
+static void e24toasc PROTO((unsigned EMUSHORT *, char *, int));
+static void e53toasc PROTO((unsigned EMUSHORT *, char *, int));
+static void e64toasc PROTO((unsigned EMUSHORT *, char *, int));
+static void e113toasc PROTO((unsigned EMUSHORT *, char *, int));
+static void etoasc PROTO((unsigned EMUSHORT *, char *, int));
+static void asctoe24 PROTO((char *, unsigned EMUSHORT *));
+static void asctoe53 PROTO((char *, unsigned EMUSHORT *));
+static void asctoe64 PROTO((char *, unsigned EMUSHORT *));
+static void asctoe113 PROTO((char *, unsigned EMUSHORT *));
+static void asctoe PROTO((char *, unsigned EMUSHORT *));
+static void asctoeg PROTO((char *, unsigned EMUSHORT *, int));
+static void efloor PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *));
+static void efrexp PROTO((unsigned EMUSHORT *, int *,
+ unsigned EMUSHORT *));
+static void eldexp PROTO((unsigned EMUSHORT *, int, unsigned EMUSHORT *));
+static void eremain PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *,
+ unsigned EMUSHORT *));
+static void eiremain PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *));
+static void mtherr PROTO((char *, int));
+static void dectoe PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *));
+static void etodec PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *));
+static void todec PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *));
+static void ibmtoe PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *,
+ enum machine_mode));
+static void etoibm PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *,
+ enum machine_mode));
+static void toibm PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *,
+ enum machine_mode));
+static void make_nan PROTO((unsigned EMUSHORT *, int, enum machine_mode));
+static void uditoe PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *));
+static void ditoe PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *));
+static void etoudi PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *));
+static void etodi PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *));
+static void esqrt PROTO((unsigned EMUSHORT *, unsigned EMUSHORT *));
+
+/* Copy 32-bit numbers obtained from array containing 16-bit numbers,
+ swapping ends if required, into output array of longs. The
+ result is normally passed to fprintf by the ASM_OUTPUT_ macros. */
+
+static void
+endian (e, x, mode)
+ unsigned EMUSHORT e[];
+ long x[];
+ enum machine_mode mode;
+{
+ unsigned long th, t;
+
+#if FLOAT_WORDS_BIG_ENDIAN
+ switch (mode)
+ {
+
+ case TFmode:
+ /* Swap halfwords in the fourth long. */
+ th = (unsigned long) e[6] & 0xffff;
+ t = (unsigned long) e[7] & 0xffff;
+ t |= th << 16;
+ x[3] = (long) t;
+
+ case XFmode:
+
+ /* Swap halfwords in the third long. */
+ th = (unsigned long) e[4] & 0xffff;
+ t = (unsigned long) e[5] & 0xffff;
+ t |= th << 16;
+ x[2] = (long) t;
+ /* fall into the double case */
+
+ case DFmode:
+
+ /* swap halfwords in the second word */
+ th = (unsigned long) e[2] & 0xffff;
+ t = (unsigned long) e[3] & 0xffff;
+ t |= th << 16;
+ x[1] = (long) t;
+ /* fall into the float case */
+
+ case HFmode:
+ case SFmode:
+
+ /* swap halfwords in the first word */
+ th = (unsigned long) e[0] & 0xffff;
+ t = (unsigned long) e[1] & 0xffff;
+ t |= th << 16;
+ x[0] = t;
+ break;
+
+ default:
+ abort ();
+ }
+
+#else
+
+ /* Pack the output array without swapping. */
+
+ switch (mode)
+ {
+
+ case TFmode:
+
+ /* Pack the fourth long. */
+ th = (unsigned long) e[7] & 0xffff;
+ t = (unsigned long) e[6] & 0xffff;
+ t |= th << 16;
+ x[3] = (long) t;
+
+ case XFmode:
+
+ /* Pack the third long.
+ Each element of the input REAL_VALUE_TYPE array has 16 useful bits
+ in it. */
+ th = (unsigned long) e[5] & 0xffff;
+ t = (unsigned long) e[4] & 0xffff;
+ t |= th << 16;
+ x[2] = (long) t;
+ /* fall into the double case */
+
+ case DFmode:
+
+ /* pack the second long */
+ th = (unsigned long) e[3] & 0xffff;
+ t = (unsigned long) e[2] & 0xffff;
+ t |= th << 16;
+ x[1] = (long) t;
+ /* fall into the float case */
+
+ case HFmode:
+ case SFmode:
+
+ /* pack the first long */
+ th = (unsigned long) e[1] & 0xffff;
+ t = (unsigned long) e[0] & 0xffff;
+ t |= th << 16;
+ x[0] = t;
+ break;
+
+ default:
+ abort ();
+ }
+
+#endif
+}
+
+
+/* This is the implementation of the REAL_ARITHMETIC macro. */
+
+void
+earith (value, icode, r1, r2)
+ REAL_VALUE_TYPE *value;
+ int icode;
+ REAL_VALUE_TYPE *r1;
+ REAL_VALUE_TYPE *r2;
+{
+ unsigned EMUSHORT d1[NE], d2[NE], v[NE];
+ enum tree_code code;
+
+ GET_REAL (r1, d1);
+ GET_REAL (r2, d2);
+#ifdef NANS
+/* Return NaN input back to the caller. */
+ if (eisnan (d1))
+ {
+ PUT_REAL (d1, value);
+ return;
+ }
+ if (eisnan (d2))
+ {
+ PUT_REAL (d2, value);
+ return;
+ }
+#endif
+ code = (enum tree_code) icode;
+ switch (code)
+ {
+ case PLUS_EXPR:
+ eadd (d2, d1, v);
+ break;
+
+ case MINUS_EXPR:
+ esub (d2, d1, v); /* d1 - d2 */
+ break;
+
+ case MULT_EXPR:
+ emul (d2, d1, v);
+ break;
+
+ case RDIV_EXPR:
+#ifndef REAL_INFINITY
+ if (ecmp (d2, ezero) == 0)
+ {
+#ifdef NANS
+ enan (v, eisneg (d1) ^ eisneg (d2));
+ break;
+#else
+ abort ();
+#endif
+ }
+#endif
+ ediv (d2, d1, v); /* d1/d2 */
+ break;
+
+ case MIN_EXPR: /* min (d1,d2) */
+ if (ecmp (d1, d2) < 0)
+ emov (d1, v);
+ else
+ emov (d2, v);
+ break;
+
+ case MAX_EXPR: /* max (d1,d2) */
+ if (ecmp (d1, d2) > 0)
+ emov (d1, v);
+ else
+ emov (d2, v);
+ break;
+ default:
+ emov (ezero, v);
+ break;
+ }
+PUT_REAL (v, value);
+}
+
+
+/* Truncate REAL_VALUE_TYPE toward zero to signed HOST_WIDE_INT.
+ implements REAL_VALUE_RNDZINT (x) (etrunci (x)). */
+
+REAL_VALUE_TYPE
+etrunci (x)
+ REAL_VALUE_TYPE x;
+{
+ unsigned EMUSHORT f[NE], g[NE];
+ REAL_VALUE_TYPE r;
+ HOST_WIDE_INT l;
+
+ GET_REAL (&x, g);
+#ifdef NANS
+ if (eisnan (g))
+ return (x);
+#endif
+ eifrac (g, &l, f);
+ ltoe (&l, g);
+ PUT_REAL (g, &r);
+ return (r);
+}
+
+
+/* Truncate REAL_VALUE_TYPE toward zero to unsigned HOST_WIDE_INT;
+ implements REAL_VALUE_UNSIGNED_RNDZINT (x) (etruncui (x)). */
+
+REAL_VALUE_TYPE
+etruncui (x)
+ REAL_VALUE_TYPE x;
+{
+ unsigned EMUSHORT f[NE], g[NE];
+ REAL_VALUE_TYPE r;
+ unsigned HOST_WIDE_INT l;
+
+ GET_REAL (&x, g);
+#ifdef NANS
+ if (eisnan (g))
+ return (x);
+#endif
+ euifrac (g, &l, f);
+ ultoe (&l, g);
+ PUT_REAL (g, &r);
+ return (r);
+}
+
+
+/* This is the REAL_VALUE_ATOF function. It converts a decimal string to
+ binary, rounding off as indicated by the machine_mode argument. Then it
+ promotes the rounded value to REAL_VALUE_TYPE. */
+
+REAL_VALUE_TYPE
+ereal_atof (s, t)
+ char *s;
+ enum machine_mode t;
+{
+ unsigned EMUSHORT tem[NE], e[NE];
+ REAL_VALUE_TYPE r;
+
+ switch (t)
+ {
+ case HFmode:
+ case SFmode:
+ asctoe24 (s, tem);
+ e24toe (tem, e);
+ break;
+ case DFmode:
+ asctoe53 (s, tem);
+ e53toe (tem, e);
+ break;
+ case XFmode:
+ asctoe64 (s, tem);
+ e64toe (tem, e);
+ break;
+ case TFmode:
+ asctoe113 (s, tem);
+ e113toe (tem, e);
+ break;
+ default:
+ asctoe (s, e);
+ }
+ PUT_REAL (e, &r);
+ return (r);
+}
+
+
+/* Expansion of REAL_NEGATE. */
+
+REAL_VALUE_TYPE
+ereal_negate (x)
+ REAL_VALUE_TYPE x;
+{
+ unsigned EMUSHORT e[NE];
+ REAL_VALUE_TYPE r;
+
+ GET_REAL (&x, e);
+ eneg (e);
+ PUT_REAL (e, &r);
+ return (r);
+}
+
+
+/* Round real toward zero to HOST_WIDE_INT;
+ implements REAL_VALUE_FIX (x). */
+
+HOST_WIDE_INT
+efixi (x)
+ REAL_VALUE_TYPE x;
+{
+ unsigned EMUSHORT f[NE], g[NE];
+ HOST_WIDE_INT l;
+
+ GET_REAL (&x, f);
+#ifdef NANS
+ if (eisnan (f))
+ {
+ warning ("conversion from NaN to int");
+ return (-1);
+ }
+#endif
+ eifrac (f, &l, g);
+ return l;
+}
+
+/* Round real toward zero to unsigned HOST_WIDE_INT
+ implements REAL_VALUE_UNSIGNED_FIX (x).
+ Negative input returns zero. */
+
+unsigned HOST_WIDE_INT
+efixui (x)
+ REAL_VALUE_TYPE x;
+{
+ unsigned EMUSHORT f[NE], g[NE];
+ unsigned HOST_WIDE_INT l;
+
+ GET_REAL (&x, f);
+#ifdef NANS
+ if (eisnan (f))
+ {
+ warning ("conversion from NaN to unsigned int");
+ return (-1);
+ }
+#endif
+ euifrac (f, &l, g);
+ return l;
+}
+
+
+/* REAL_VALUE_FROM_INT macro. */
+
+void
+ereal_from_int (d, i, j)
+ REAL_VALUE_TYPE *d;
+ HOST_WIDE_INT i, j;
+{
+ unsigned EMUSHORT df[NE], dg[NE];
+ HOST_WIDE_INT low, high;
+ int sign;
+
+ sign = 0;
+ low = i;
+ if ((high = j) < 0)
+ {
+ sign = 1;
+ /* complement and add 1 */
+ high = ~high;
+ if (low)
+ low = -low;
+ else
+ high += 1;
+ }
+ eldexp (eone, HOST_BITS_PER_WIDE_INT, df);
+ ultoe ((unsigned HOST_WIDE_INT *) &high, dg);
+ emul (dg, df, dg);
+ ultoe ((unsigned HOST_WIDE_INT *) &low, df);
+ eadd (df, dg, dg);
+ if (sign)
+ eneg (dg);
+ PUT_REAL (dg, d);
+}
+
+
+/* REAL_VALUE_FROM_UNSIGNED_INT macro. */
+
+void
+ereal_from_uint (d, i, j)
+ REAL_VALUE_TYPE *d;
+ unsigned HOST_WIDE_INT i, j;
+{
+ unsigned EMUSHORT df[NE], dg[NE];
+ unsigned HOST_WIDE_INT low, high;
+
+ low = i;
+ high = j;
+ eldexp (eone, HOST_BITS_PER_WIDE_INT, df);
+ ultoe (&high, dg);
+ emul (dg, df, dg);
+ ultoe (&low, df);
+ eadd (df, dg, dg);
+ PUT_REAL (dg, d);
+}
+
+
+/* REAL_VALUE_TO_INT macro. */
+
+void
+ereal_to_int (low, high, rr)
+ HOST_WIDE_INT *low, *high;
+ REAL_VALUE_TYPE rr;
+{
+ unsigned EMUSHORT d[NE], df[NE], dg[NE], dh[NE];
+ int s;
+
+ GET_REAL (&rr, d);
+#ifdef NANS
+ if (eisnan (d))
+ {
+ warning ("conversion from NaN to int");
+ *low = -1;
+ *high = -1;
+ return;
+ }
+#endif
+ /* convert positive value */
+ s = 0;
+ if (eisneg (d))
+ {
+ eneg (d);
+ s = 1;
+ }
+ eldexp (eone, HOST_BITS_PER_WIDE_INT, df);
+ ediv (df, d, dg); /* dg = d / 2^32 is the high word */
+ euifrac (dg, (unsigned HOST_WIDE_INT *) high, dh);
+ emul (df, dh, dg); /* fractional part is the low word */
+ euifrac (dg, (unsigned HOST_WIDE_INT *)low, dh);
+ if (s)
+ {
+ /* complement and add 1 */
+ *high = ~(*high);
+ if (*low)
+ *low = -(*low);
+ else
+ *high += 1;
+ }
+}
+
+
+/* REAL_VALUE_LDEXP macro. */
+
+REAL_VALUE_TYPE
+ereal_ldexp (x, n)
+ REAL_VALUE_TYPE x;
+ int n;
+{
+ unsigned EMUSHORT e[NE], y[NE];
+ REAL_VALUE_TYPE r;
+
+ GET_REAL (&x, e);
+#ifdef NANS
+ if (eisnan (e))
+ return (x);
+#endif
+ eldexp (e, n, y);
+ PUT_REAL (y, &r);
+ return (r);
+}
+
+/* These routines are conditionally compiled because functions
+ of the same names may be defined in fold-const.c. */
+
+#ifdef REAL_ARITHMETIC
+
+/* Check for infinity in a REAL_VALUE_TYPE. */
+
+int
+target_isinf (x)
+ REAL_VALUE_TYPE x;
+{
+ unsigned EMUSHORT e[NE];
+
+#ifdef INFINITY
+ GET_REAL (&x, e);
+ return (eisinf (e));
+#else
+ return 0;
+#endif
+}
+
+
+/* Check whether a REAL_VALUE_TYPE item is a NaN. */
+
+int
+target_isnan (x)
+ REAL_VALUE_TYPE x;
+{
+ unsigned EMUSHORT e[NE];
+
+#ifdef NANS
+ GET_REAL (&x, e);
+ return (eisnan (e));
+#else
+ return (0);
+#endif
+}
+
+
+/* Check for a negative REAL_VALUE_TYPE number.
+ This just checks the sign bit, so that -0 counts as negative. */
+
+int
+target_negative (x)
+ REAL_VALUE_TYPE x;
+{
+ return ereal_isneg (x);
+}
+
+/* Expansion of REAL_VALUE_TRUNCATE.
+ The result is in floating point, rounded to nearest or even. */
+
+REAL_VALUE_TYPE
+real_value_truncate (mode, arg)
+ enum machine_mode mode;
+ REAL_VALUE_TYPE arg;
+{
+ unsigned EMUSHORT e[NE], t[NE];
+ REAL_VALUE_TYPE r;
+
+ GET_REAL (&arg, e);
+#ifdef NANS
+ if (eisnan (e))
+ return (arg);
+#endif
+ eclear (t);
+ switch (mode)
+ {
+ case TFmode:
+ etoe113 (e, t);
+ e113toe (t, t);
+ break;
+
+ case XFmode:
+ etoe64 (e, t);
+ e64toe (t, t);
+ break;
+
+ case DFmode:
+ etoe53 (e, t);
+ e53toe (t, t);
+ break;
+
+ case HFmode:
+ case SFmode:
+ etoe24 (e, t);
+ e24toe (t, t);
+ break;
+
+ case SImode:
+ r = etrunci (arg);
+ return (r);
+
+ /* If an unsupported type was requested, presume that
+ the machine files know something useful to do with
+ the unmodified value. */
+
+ default:
+ return (arg);
+ }
+ PUT_REAL (t, &r);
+ return (r);
+}
+
+#endif /* REAL_ARITHMETIC defined */
+
+/* Used for debugging--print the value of R in human-readable format
+ on stderr. */
+
+void
+debug_real (r)
+ REAL_VALUE_TYPE r;
+{
+ char dstr[30];
+
+ REAL_VALUE_TO_DECIMAL (r, "%.20g", dstr);
+ fprintf (stderr, "%s", dstr);
+}
+
+
+/* Target values are arrays of host longs. A long is guaranteed
+ to be at least 32 bits wide. */
+
+/* 128-bit long double */
+
+void
+etartdouble (r, l)
+ REAL_VALUE_TYPE r;
+ long l[];
+{
+ unsigned EMUSHORT e[NE];
+
+ GET_REAL (&r, e);
+ etoe113 (e, e);
+ endian (e, l, TFmode);
+}
+
+/* 80-bit long double */
+
+void
+etarldouble (r, l)
+ REAL_VALUE_TYPE r;
+ long l[];
+{
+ unsigned EMUSHORT e[NE];
+
+ GET_REAL (&r, e);
+ etoe64 (e, e);
+ endian (e, l, XFmode);
+}
+
+void
+etardouble (r, l)
+ REAL_VALUE_TYPE r;
+ long l[];
+{
+ unsigned EMUSHORT e[NE];
+
+ GET_REAL (&r, e);
+ etoe53 (e, e);
+ endian (e, l, DFmode);
+}
+
+long
+etarsingle (r)
+ REAL_VALUE_TYPE r;
+{
+ unsigned EMUSHORT e[NE];
+ long l;
+
+ GET_REAL (&r, e);
+ etoe24 (e, e);
+ endian (e, &l, SFmode);
+ return ((long) l);
+}
+
+void
+ereal_to_decimal (x, s)
+ REAL_VALUE_TYPE x;
+ char *s;
+{
+ unsigned EMUSHORT e[NE];
+
+ GET_REAL (&x, e);
+ etoasc (e, s, 20);
+}
+
+int
+ereal_cmp (x, y)
+ REAL_VALUE_TYPE x, y;
+{
+ unsigned EMUSHORT ex[NE], ey[NE];
+
+ GET_REAL (&x, ex);
+ GET_REAL (&y, ey);
+ return (ecmp (ex, ey));
+}
+
+int
+ereal_isneg (x)
+ REAL_VALUE_TYPE x;
+{
+ unsigned EMUSHORT ex[NE];
+
+ GET_REAL (&x, ex);
+ return (eisneg (ex));
+}
+
+/* End of REAL_ARITHMETIC interface */
+
+/*
+ Extended precision IEEE binary floating point arithmetic routines
+
+ Numbers are stored in C language as arrays of 16-bit unsigned
+ short integers. The arguments of the routines are pointers to
+ the arrays.
+
+ External e type data structure, simulates Intel 8087 chip
+ temporary real format but possibly with a larger significand:
+
+ NE-1 significand words (least significant word first,
+ most significant bit is normally set)
+ exponent (value = EXONE for 1.0,
+ top bit is the sign)
+
+
+ Internal data structure of a number (a "word" is 16 bits):
+
+ ei[0] sign word (0 for positive, 0xffff for negative)
+ ei[1] biased exponent (value = EXONE for the number 1.0)
+ ei[2] high guard word (always zero after normalization)
+ ei[3]
+ to ei[NI-2] significand (NI-4 significand words,
+ most significant word first,
+ most significant bit is set)
+ ei[NI-1] low guard word (0x8000 bit is rounding place)
+
+
+
+ Routines for external format numbers
+
+ asctoe (string, e) ASCII string to extended double e type
+ asctoe64 (string, &d) ASCII string to long double
+ asctoe53 (string, &d) ASCII string to double
+ asctoe24 (string, &f) ASCII string to single
+ asctoeg (string, e, prec) ASCII string to specified precision
+ e24toe (&f, e) IEEE single precision to e type
+ e53toe (&d, e) IEEE double precision to e type
+ e64toe (&d, e) IEEE long double precision to e type
+ e113toe (&d, e) 128-bit long double precision to e type
+ eabs (e) absolute value
+ eadd (a, b, c) c = b + a
+ eclear (e) e = 0
+ ecmp (a, b) Returns 1 if a > b, 0 if a == b,
+ -1 if a < b, -2 if either a or b is a NaN.
+ ediv (a, b, c) c = b / a
+ efloor (a, b) truncate to integer, toward -infinity
+ efrexp (a, exp, s) extract exponent and significand
+ eifrac (e, &l, frac) e to HOST_WIDE_INT and e type fraction
+ euifrac (e, &l, frac) e to unsigned HOST_WIDE_INT and e type fraction
+ einfin (e) set e to infinity, leaving its sign alone
+ eldexp (a, n, b) multiply by 2**n
+ emov (a, b) b = a
+ emul (a, b, c) c = b * a
+ eneg (e) e = -e
+ eround (a, b) b = nearest integer value to a
+ esub (a, b, c) c = b - a
+ e24toasc (&f, str, n) single to ASCII string, n digits after decimal
+ e53toasc (&d, str, n) double to ASCII string, n digits after decimal
+ e64toasc (&d, str, n) 80-bit long double to ASCII string
+ e113toasc (&d, str, n) 128-bit long double to ASCII string
+ etoasc (e, str, n) e to ASCII string, n digits after decimal
+ etoe24 (e, &f) convert e type to IEEE single precision
+ etoe53 (e, &d) convert e type to IEEE double precision
+ etoe64 (e, &d) convert e type to IEEE long double precision
+ ltoe (&l, e) HOST_WIDE_INT to e type
+ ultoe (&l, e) unsigned HOST_WIDE_INT to e type
+ eisneg (e) 1 if sign bit of e != 0, else 0
+ eisinf (e) 1 if e has maximum exponent (non-IEEE)
+ or is infinite (IEEE)
+ eisnan (e) 1 if e is a NaN
+
+
+ Routines for internal format numbers
+
+ eaddm (ai, bi) add significands, bi = bi + ai
+ ecleaz (ei) ei = 0
+ ecleazs (ei) set ei = 0 but leave its sign alone
+ ecmpm (ai, bi) compare significands, return 1, 0, or -1
+ edivm (ai, bi) divide significands, bi = bi / ai
+ emdnorm (ai,l,s,exp) normalize and round off
+ emovi (a, ai) convert external a to internal ai
+ emovo (ai, a) convert internal ai to external a
+ emovz (ai, bi) bi = ai, low guard word of bi = 0
+ emulm (ai, bi) multiply significands, bi = bi * ai
+ enormlz (ei) left-justify the significand
+ eshdn1 (ai) shift significand and guards down 1 bit
+ eshdn8 (ai) shift down 8 bits
+ eshdn6 (ai) shift down 16 bits
+ eshift (ai, n) shift ai n bits up (or down if n < 0)
+ eshup1 (ai) shift significand and guards up 1 bit
+ eshup8 (ai) shift up 8 bits
+ eshup6 (ai) shift up 16 bits
+ esubm (ai, bi) subtract significands, bi = bi - ai
+ eiisinf (ai) 1 if infinite
+ eiisnan (ai) 1 if a NaN
+ eiisneg (ai) 1 if sign bit of ai != 0, else 0
+ einan (ai) set ai = NaN
+ eiinfin (ai) set ai = infinity
+
+ The result is always normalized and rounded to NI-4 word precision
+ after each arithmetic operation.
+
+ Exception flags are NOT fully supported.
+
+ Signaling NaN's are NOT supported; they are treated the same
+ as quiet NaN's.
+
+ Define INFINITY for support of infinity; otherwise a
+ saturation arithmetic is implemented.
+
+ Define NANS for support of Not-a-Number items; otherwise the
+ arithmetic will never produce a NaN output, and might be confused
+ by a NaN input.
+ If NaN's are supported, the output of `ecmp (a,b)' is -2 if
+ either a or b is a NaN. This means asking `if (ecmp (a,b) < 0)'
+ may not be legitimate. Use `if (ecmp (a,b) == -1)' for `less than'
+ if in doubt.
+
+ Denormals are always supported here where appropriate (e.g., not
+ for conversion to DEC numbers). */
+
+/* Definitions for error codes that are passed to the common error handling
+ routine mtherr.
+
+ For Digital Equipment PDP-11 and VAX computers, certain
+ IBM systems, and others that use numbers with a 56-bit
+ significand, the symbol DEC should be defined. In this
+ mode, most floating point constants are given as arrays
+ of octal integers to eliminate decimal to binary conversion
+ errors that might be introduced by the compiler.
+
+ For computers, such as IBM PC, that follow the IEEE
+ Standard for Binary Floating Point Arithmetic (ANSI/IEEE
+ Std 754-1985), the symbol IBMPC or MIEEE should be defined.
+ These numbers have 53-bit significands. In this mode, constants
+ are provided as arrays of hexadecimal 16 bit integers.
+
+ To accommodate other types of computer arithmetic, all
+ constants are also provided in a normal decimal radix
+ which one can hope are correctly converted to a suitable
+ format by the available C language compiler. To invoke
+ this mode, the symbol UNK is defined.
+
+ An important difference among these modes is a predefined
+ set of machine arithmetic constants for each. The numbers
+ MACHEP (the machine roundoff error), MAXNUM (largest number
+ represented), and several other parameters are preset by
+ the configuration symbol. Check the file const.c to
+ ensure that these values are correct for your computer.
+
+ For ANSI C compatibility, define ANSIC equal to 1. Currently
+ this affects only the atan2 function and others that use it. */
+
+/* Constant definitions for math error conditions. */
+
+#define DOMAIN 1 /* argument domain error */
+#define SING 2 /* argument singularity */
+#define OVERFLOW 3 /* overflow range error */
+#define UNDERFLOW 4 /* underflow range error */
+#define TLOSS 5 /* total loss of precision */
+#define PLOSS 6 /* partial loss of precision */
+#define INVALID 7 /* NaN-producing operation */
+
+/* e type constants used by high precision check routines */
+
+#if LONG_DOUBLE_TYPE_SIZE == 128
+/* 0.0 */
+unsigned EMUSHORT ezero[NE] =
+ {0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,};
+extern unsigned EMUSHORT ezero[];
+
+/* 5.0E-1 */
+unsigned EMUSHORT ehalf[NE] =
+ {0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x3ffe,};
+extern unsigned EMUSHORT ehalf[];
+
+/* 1.0E0 */
+unsigned EMUSHORT eone[NE] =
+ {0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x3fff,};
+extern unsigned EMUSHORT eone[];
+
+/* 2.0E0 */
+unsigned EMUSHORT etwo[NE] =
+ {0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x4000,};
+extern unsigned EMUSHORT etwo[];
+
+/* 3.2E1 */
+unsigned EMUSHORT e32[NE] =
+ {0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x8000, 0x4004,};
+extern unsigned EMUSHORT e32[];
+
+/* 6.93147180559945309417232121458176568075500134360255E-1 */
+unsigned EMUSHORT elog2[NE] =
+ {0x40f3, 0xf6af, 0x03f2, 0xb398,
+ 0xc9e3, 0x79ab, 0150717, 0013767, 0130562, 0x3ffe,};
+extern unsigned EMUSHORT elog2[];
+
+/* 1.41421356237309504880168872420969807856967187537695E0 */
+unsigned EMUSHORT esqrt2[NE] =
+ {0x1d6f, 0xbe9f, 0x754a, 0x89b3,
+ 0x597d, 0x6484, 0174736, 0171463, 0132404, 0x3fff,};
+extern unsigned EMUSHORT esqrt2[];
+
+/* 3.14159265358979323846264338327950288419716939937511E0 */
+unsigned EMUSHORT epi[NE] =
+ {0x2902, 0x1cd1, 0x80dc, 0x628b,
+ 0xc4c6, 0xc234, 0020550, 0155242, 0144417, 0040000,};
+extern unsigned EMUSHORT epi[];
+
+#else
+/* LONG_DOUBLE_TYPE_SIZE is other than 128 */
+unsigned EMUSHORT ezero[NE] =
+ {0, 0000000, 0000000, 0000000, 0000000, 0000000,};
+unsigned EMUSHORT ehalf[NE] =
+ {0, 0000000, 0000000, 0000000, 0100000, 0x3ffe,};
+unsigned EMUSHORT eone[NE] =
+ {0, 0000000, 0000000, 0000000, 0100000, 0x3fff,};
+unsigned EMUSHORT etwo[NE] =
+ {0, 0000000, 0000000, 0000000, 0100000, 0040000,};
+unsigned EMUSHORT e32[NE] =
+ {0, 0000000, 0000000, 0000000, 0100000, 0040004,};
+unsigned EMUSHORT elog2[NE] =
+ {0xc9e4, 0x79ab, 0150717, 0013767, 0130562, 0x3ffe,};
+unsigned EMUSHORT esqrt2[NE] =
+ {0x597e, 0x6484, 0174736, 0171463, 0132404, 0x3fff,};
+unsigned EMUSHORT epi[NE] =
+ {0xc4c6, 0xc234, 0020550, 0155242, 0144417, 0040000,};
+#endif
+
+
+
+/* Control register for rounding precision.
+ This can be set to 113 (if NE=10), 80 (if NE=6), 64, 56, 53, or 24 bits. */
+
+int rndprc = NBITS;
+extern int rndprc;
+
+/* Clear out entire external format number. */
+
+static void
+eclear (x)
+ register unsigned EMUSHORT *x;
+{
+ register int i;
+
+ for (i = 0; i < NE; i++)
+ *x++ = 0;
+}
+
+
+
+/* Move external format number from a to b. */
+
+static void
+emov (a, b)
+ register unsigned EMUSHORT *a, *b;
+{
+ register int i;
+
+ for (i = 0; i < NE; i++)
+ *b++ = *a++;
+}
+
+
+/* Absolute value of external format number. */
+
+static void
+eabs (x)
+ unsigned EMUSHORT x[];
+{
+ /* sign is top bit of last word of external format */
+ x[NE - 1] &= 0x7fff;
+}
+
+/* Negate external format number. */
+
+static void
+eneg (x)
+ unsigned EMUSHORT x[];
+{
+
+ x[NE - 1] ^= 0x8000; /* Toggle the sign bit */
+}
+
+
+
+/* Return 1 if sign bit of external format number is nonzero, else zero. */
+
+static int
+eisneg (x)
+ unsigned EMUSHORT x[];
+{
+
+ if (x[NE - 1] & 0x8000)
+ return (1);
+ else
+ return (0);
+}
+
+
+/* Return 1 if external format number is infinity, else return zero. */
+
+static int
+eisinf (x)
+ unsigned EMUSHORT x[];
+{
+
+#ifdef NANS
+ if (eisnan (x))
+ return (0);
+#endif
+ if ((x[NE - 1] & 0x7fff) == 0x7fff)
+ return (1);
+ else
+ return (0);
+}
+
+
+/* Check if e-type number is not a number. The bit pattern is one that we
+ defined, so we know for sure how to detect it. */
+
+static int
+eisnan (x)
+ unsigned EMUSHORT x[];
+{
+#ifdef NANS
+ int i;
+
+ /* NaN has maximum exponent */
+ if ((x[NE - 1] & 0x7fff) != 0x7fff)
+ return (0);
+ /* ... and non-zero significand field. */
+ for (i = 0; i < NE - 1; i++)
+ {
+ if (*x++ != 0)
+ return (1);
+ }
+#endif
+
+ return (0);
+}
+
+/* Fill external format number with infinity pattern (IEEE)
+ or largest possible number (non-IEEE). */
+
+static void
+einfin (x)
+ register unsigned EMUSHORT *x;
+{
+ register int i;
+
+#ifdef INFINITY
+ for (i = 0; i < NE - 1; i++)
+ *x++ = 0;
+ *x |= 32767;
+#else
+ for (i = 0; i < NE - 1; i++)
+ *x++ = 0xffff;
+ *x |= 32766;
+ if (rndprc < NBITS)
+ {
+ if (rndprc == 113)
+ {
+ *(x - 9) = 0;
+ *(x - 8) = 0;
+ }
+ if (rndprc == 64)
+ {
+ *(x - 5) = 0;
+ }
+ if (rndprc == 53)
+ {
+ *(x - 4) = 0xf800;
+ }
+ else
+ {
+ *(x - 4) = 0;
+ *(x - 3) = 0;
+ *(x - 2) = 0xff00;
+ }
+ }
+#endif
+}
+
+
+/* Output an e-type NaN.
+ This generates Intel's quiet NaN pattern for extended real.
+ The exponent is 7fff, the leading mantissa word is c000. */
+
+static void
+enan (x, sign)
+ register unsigned EMUSHORT *x;
+ int sign;
+{
+ register int i;
+
+ for (i = 0; i < NE - 2; i++)
+ *x++ = 0;
+ *x++ = 0xc000;
+ *x = (sign << 15) | 0x7fff;
+}
+
+
+/* Move in external format number, converting it to internal format. */
+
+static void
+emovi (a, b)
+ unsigned EMUSHORT *a, *b;
+{
+ register unsigned EMUSHORT *p, *q;
+ int i;
+
+ q = b;
+ p = a + (NE - 1); /* point to last word of external number */
+ /* get the sign bit */
+ if (*p & 0x8000)
+ *q++ = 0xffff;
+ else
+ *q++ = 0;
+ /* get the exponent */
+ *q = *p--;
+ *q++ &= 0x7fff; /* delete the sign bit */
+#ifdef INFINITY
+ if ((*(q - 1) & 0x7fff) == 0x7fff)
+ {
+#ifdef NANS
+ if (eisnan (a))
+ {
+ *q++ = 0;
+ for (i = 3; i < NI; i++)
+ *q++ = *p--;
+ return;
+ }
+#endif
+
+ for (i = 2; i < NI; i++)
+ *q++ = 0;
+ return;
+ }
+#endif
+
+ /* clear high guard word */
+ *q++ = 0;
+ /* move in the significand */
+ for (i = 0; i < NE - 1; i++)
+ *q++ = *p--;
+ /* clear low guard word */
+ *q = 0;
+}
+
+
+/* Move internal format number out, converting it to external format. */
+
+static void
+emovo (a, b)
+ unsigned EMUSHORT *a, *b;
+{
+ register unsigned EMUSHORT *p, *q;
+ unsigned EMUSHORT i;
+ int j;
+
+ p = a;
+ q = b + (NE - 1); /* point to output exponent */
+ /* combine sign and exponent */
+ i = *p++;
+ if (i)
+ *q-- = *p++ | 0x8000;
+ else
+ *q-- = *p++;
+#ifdef INFINITY
+ if (*(p - 1) == 0x7fff)
+ {
+#ifdef NANS
+ if (eiisnan (a))
+ {
+ enan (b, eiisneg (a));
+ return;
+ }
+#endif
+ einfin (b);
+ return;
+ }
+#endif
+ /* skip over guard word */
+ ++p;
+ /* move the significand */
+ for (j = 0; j < NE - 1; j++)
+ *q-- = *p++;
+}
+
+/* Clear out internal format number. */
+
+static void
+ecleaz (xi)
+ register unsigned EMUSHORT *xi;
+{
+ register int i;
+
+ for (i = 0; i < NI; i++)
+ *xi++ = 0;
+}
+
+
+/* Same, but don't touch the sign. */
+
+static void
+ecleazs (xi)
+ register unsigned EMUSHORT *xi;
+{
+ register int i;
+
+ ++xi;
+ for (i = 0; i < NI - 1; i++)
+ *xi++ = 0;
+}
+
+
+
+/* Move internal format number from a to b. */
+
+static void
+emovz (a, b)
+ register unsigned EMUSHORT *a, *b;
+{
+ register int i;
+
+ for (i = 0; i < NI - 1; i++)
+ *b++ = *a++;
+ /* clear low guard word */
+ *b = 0;
+}
+
+/* Generate internal format NaN.
+ The explicit pattern for this is maximum exponent and
+ top two significant bits set. */
+
+static void
+einan (x)
+ unsigned EMUSHORT x[];
+{
+
+ ecleaz (x);
+ x[E] = 0x7fff;
+ x[M + 1] = 0xc000;
+}
+
+/* Return nonzero if internal format number is a NaN. */
+
+static int
+eiisnan (x)
+ unsigned EMUSHORT x[];
+{
+ int i;
+
+ if ((x[E] & 0x7fff) == 0x7fff)
+ {
+ for (i = M + 1; i < NI; i++)
+ {
+ if (x[i] != 0)
+ return (1);
+ }
+ }
+ return (0);
+}
+
+/* Return nonzero if sign of internal format number is nonzero. */
+
+static int
+eiisneg (x)
+ unsigned EMUSHORT x[];
+{
+
+ return x[0] != 0;
+}
+
+/* Fill internal format number with infinity pattern.
+ This has maximum exponent and significand all zeros. */
+
+static void
+eiinfin (x)
+ unsigned EMUSHORT x[];
+{
+
+ ecleaz (x);
+ x[E] = 0x7fff;
+}
+
+/* Return nonzero if internal format number is infinite. */
+
+static int
+eiisinf (x)
+ unsigned EMUSHORT x[];
+{
+
+#ifdef NANS
+ if (eiisnan (x))
+ return (0);
+#endif
+ if ((x[E] & 0x7fff) == 0x7fff)
+ return (1);
+ return (0);
+}
+
+
+/* Compare significands of numbers in internal format.
+ Guard words are included in the comparison.
+
+ Returns +1 if a > b
+ 0 if a == b
+ -1 if a < b */
+
+static int
+ecmpm (a, b)
+ register unsigned EMUSHORT *a, *b;
+{
+ int i;
+
+ a += M; /* skip up to significand area */
+ b += M;
+ for (i = M; i < NI; i++)
+ {
+ if (*a++ != *b++)
+ goto difrnt;
+ }
+ return (0);
+
+ difrnt:
+ if (*(--a) > *(--b))
+ return (1);
+ else
+ return (-1);
+}
+
+
+/* Shift significand down by 1 bit. */
+
+static void
+eshdn1 (x)
+ register unsigned EMUSHORT *x;
+{
+ register unsigned EMUSHORT bits;
+ int i;
+
+ x += M; /* point to significand area */
+
+ bits = 0;
+ for (i = M; i < NI; i++)
+ {
+ if (*x & 1)
+ bits |= 1;
+ *x >>= 1;
+ if (bits & 2)
+ *x |= 0x8000;
+ bits <<= 1;
+ ++x;
+ }
+}
+
+
+
+/* Shift significand up by 1 bit. */
+
+static void
+eshup1 (x)
+ register unsigned EMUSHORT *x;
+{
+ register unsigned EMUSHORT bits;
+ int i;
+
+ x += NI - 1;
+ bits = 0;
+
+ for (i = M; i < NI; i++)
+ {
+ if (*x & 0x8000)
+ bits |= 1;
+ *x <<= 1;
+ if (bits & 2)
+ *x |= 1;
+ bits <<= 1;
+ --x;
+ }
+}
+
+
+/* Shift significand down by 8 bits. */
+
+static void
+eshdn8 (x)
+ register unsigned EMUSHORT *x;
+{
+ register unsigned EMUSHORT newbyt, oldbyt;
+ int i;
+
+ x += M;
+ oldbyt = 0;
+ for (i = M; i < NI; i++)
+ {
+ newbyt = *x << 8;
+ *x >>= 8;
+ *x |= oldbyt;
+ oldbyt = newbyt;
+ ++x;
+ }
+}
+
+/* Shift significand up by 8 bits. */
+
+static void
+eshup8 (x)
+ register unsigned EMUSHORT *x;
+{
+ int i;
+ register unsigned EMUSHORT newbyt, oldbyt;
+
+ x += NI - 1;
+ oldbyt = 0;
+
+ for (i = M; i < NI; i++)
+ {
+ newbyt = *x >> 8;
+ *x <<= 8;
+ *x |= oldbyt;
+ oldbyt = newbyt;
+ --x;
+ }
+}
+
+/* Shift significand up by 16 bits. */
+
+static void
+eshup6 (x)
+ register unsigned EMUSHORT *x;
+{
+ int i;
+ register unsigned EMUSHORT *p;
+
+ p = x + M;
+ x += M + 1;
+
+ for (i = M; i < NI - 1; i++)
+ *p++ = *x++;
+
+ *p = 0;
+}
+
+/* Shift significand down by 16 bits. */
+
+static void
+eshdn6 (x)
+ register unsigned EMUSHORT *x;
+{
+ int i;
+ register unsigned EMUSHORT *p;
+
+ x += NI - 1;
+ p = x + 1;
+
+ for (i = M; i < NI - 1; i++)
+ *(--p) = *(--x);
+
+ *(--p) = 0;
+}
+
+/* Add significands. x + y replaces y. */
+
+static void
+eaddm (x, y)
+ unsigned EMUSHORT *x, *y;
+{
+ register unsigned EMULONG a;
+ int i;
+ unsigned int carry;
+
+ x += NI - 1;
+ y += NI - 1;
+ carry = 0;
+ for (i = M; i < NI; i++)
+ {
+ a = (unsigned EMULONG) (*x) + (unsigned EMULONG) (*y) + carry;
+ if (a & 0x10000)
+ carry = 1;
+ else
+ carry = 0;
+ *y = (unsigned EMUSHORT) a;
+ --x;
+ --y;
+ }
+}
+
+/* Subtract significands. y - x replaces y. */
+
+static void
+esubm (x, y)
+ unsigned EMUSHORT *x, *y;
+{
+ unsigned EMULONG a;
+ int i;
+ unsigned int carry;
+
+ x += NI - 1;
+ y += NI - 1;
+ carry = 0;
+ for (i = M; i < NI; i++)
+ {
+ a = (unsigned EMULONG) (*y) - (unsigned EMULONG) (*x) - carry;
+ if (a & 0x10000)
+ carry = 1;
+ else
+ carry = 0;
+ *y = (unsigned EMUSHORT) a;
+ --x;
+ --y;
+ }
+}
+
+
+static unsigned EMUSHORT equot[NI];
+
+
+#if 0
+/* Radix 2 shift-and-add versions of multiply and divide */
+
+
+/* Divide significands */
+
+int
+edivm (den, num)
+ unsigned EMUSHORT den[], num[];
+{
+ int i;
+ register unsigned EMUSHORT *p, *q;
+ unsigned EMUSHORT j;
+
+ p = &equot[0];
+ *p++ = num[0];
+ *p++ = num[1];
+
+ for (i = M; i < NI; i++)
+ {
+ *p++ = 0;
+ }
+
+ /* Use faster compare and subtraction if denominator has only 15 bits of
+ significance. */
+
+ p = &den[M + 2];
+ if (*p++ == 0)
+ {
+ for (i = M + 3; i < NI; i++)
+ {
+ if (*p++ != 0)
+ goto fulldiv;
+ }
+ if ((den[M + 1] & 1) != 0)
+ goto fulldiv;
+ eshdn1 (num);
+ eshdn1 (den);
+
+ p = &den[M + 1];
+ q = &num[M + 1];
+
+ for (i = 0; i < NBITS + 2; i++)
+ {
+ if (*p <= *q)
+ {
+ *q -= *p;
+ j = 1;
+ }
+ else
+ {
+ j = 0;
+ }
+ eshup1 (equot);
+ equot[NI - 2] |= j;
+ eshup1 (num);
+ }
+ goto divdon;
+ }
+
+ /* The number of quotient bits to calculate is NBITS + 1 scaling guard
+ bit + 1 roundoff bit. */
+
+ fulldiv:
+
+ p = &equot[NI - 2];
+ for (i = 0; i < NBITS + 2; i++)
+ {
+ if (ecmpm (den, num) <= 0)
+ {
+ esubm (den, num);
+ j = 1; /* quotient bit = 1 */
+ }
+ else
+ j = 0;
+ eshup1 (equot);
+ *p |= j;
+ eshup1 (num);
+ }
+
+ divdon:
+
+ eshdn1 (equot);
+ eshdn1 (equot);
+
+ /* test for nonzero remainder after roundoff bit */
+ p = &num[M];
+ j = 0;
+ for (i = M; i < NI; i++)
+ {
+ j |= *p++;
+ }
+ if (j)
+ j = 1;
+
+
+ for (i = 0; i < NI; i++)
+ num[i] = equot[i];
+ return ((int) j);
+}
+
+
+/* Multiply significands */
+int
+emulm (a, b)
+ unsigned EMUSHORT a[], b[];
+{
+ unsigned EMUSHORT *p, *q;
+ int i, j, k;
+
+ equot[0] = b[0];
+ equot[1] = b[1];
+ for (i = M; i < NI; i++)
+ equot[i] = 0;
+
+ p = &a[NI - 2];
+ k = NBITS;
+ while (*p == 0) /* significand is not supposed to be zero */
+ {
+ eshdn6 (a);
+ k -= 16;
+ }
+ if ((*p & 0xff) == 0)
+ {
+ eshdn8 (a);
+ k -= 8;
+ }
+
+ q = &equot[NI - 1];
+ j = 0;
+ for (i = 0; i < k; i++)
+ {
+ if (*p & 1)
+ eaddm (b, equot);
+ /* remember if there were any nonzero bits shifted out */
+ if (*q & 1)
+ j |= 1;
+ eshdn1 (a);
+ eshdn1 (equot);
+ }
+
+ for (i = 0; i < NI; i++)
+ b[i] = equot[i];
+
+ /* return flag for lost nonzero bits */
+ return (j);
+}
+
+#else
+
+/* Radix 65536 versions of multiply and divide */
+
+
+/* Multiply significand of e-type number b
+ by 16-bit quantity a, e-type result to c. */
+
+static void
+m16m (a, b, c)
+ unsigned int a;
+ unsigned short b[], c[];
+{
+ register unsigned short *pp;
+ register unsigned long carry;
+ unsigned short *ps;
+ unsigned short p[NI];
+ unsigned long aa, m;
+ int i;
+
+ aa = a;
+ pp = &p[NI-2];
+ *pp++ = 0;
+ *pp = 0;
+ ps = &b[NI-1];
+
+ for (i=M+1; i<NI; i++)
+ {
+ if (*ps == 0)
+ {
+ --ps;
+ --pp;
+ *(pp-1) = 0;
+ }
+ else
+ {
+ m = (unsigned long) aa * *ps--;
+ carry = (m & 0xffff) + *pp;
+ *pp-- = (unsigned short)carry;
+ carry = (carry >> 16) + (m >> 16) + *pp;
+ *pp = (unsigned short)carry;
+ *(pp-1) = carry >> 16;
+ }
+ }
+ for (i=M; i<NI; i++)
+ c[i] = p[i];
+}
+
+
+/* Divide significands. Neither the numerator nor the denominator
+ is permitted to have its high guard word nonzero. */
+
+static int
+edivm (den, num)
+ unsigned short den[], num[];
+{
+ int i;
+ register unsigned short *p;
+ unsigned long tnum;
+ unsigned short j, tdenm, tquot;
+ unsigned short tprod[NI+1];
+
+ p = &equot[0];
+ *p++ = num[0];
+ *p++ = num[1];
+
+ for (i=M; i<NI; i++)
+ {
+ *p++ = 0;
+ }
+ eshdn1 (num);
+ tdenm = den[M+1];
+ for (i=M; i<NI; i++)
+ {
+ /* Find trial quotient digit (the radix is 65536). */
+ tnum = (((unsigned long) num[M]) << 16) + num[M+1];
+
+ /* Do not execute the divide instruction if it will overflow. */
+ if ((tdenm * 0xffffL) < tnum)
+ tquot = 0xffff;
+ else
+ tquot = tnum / tdenm;
+ /* Multiply denominator by trial quotient digit. */
+ m16m ((unsigned int)tquot, den, tprod);
+ /* The quotient digit may have been overestimated. */
+ if (ecmpm (tprod, num) > 0)
+ {
+ tquot -= 1;
+ esubm (den, tprod);
+ if (ecmpm (tprod, num) > 0)
+ {
+ tquot -= 1;
+ esubm (den, tprod);
+ }
+ }
+ esubm (tprod, num);
+ equot[i] = tquot;
+ eshup6(num);
+ }
+ /* test for nonzero remainder after roundoff bit */
+ p = &num[M];
+ j = 0;
+ for (i=M; i<NI; i++)
+ {
+ j |= *p++;
+ }
+ if (j)
+ j = 1;
+
+ for (i=0; i<NI; i++)
+ num[i] = equot[i];
+
+ return ((int)j);
+}
+
+
+
+/* Multiply significands */
+static int
+emulm (a, b)
+ unsigned short a[], b[];
+{
+ unsigned short *p, *q;
+ unsigned short pprod[NI];
+ unsigned short j;
+ int i;
+
+ equot[0] = b[0];
+ equot[1] = b[1];
+ for (i=M; i<NI; i++)
+ equot[i] = 0;
+
+ j = 0;
+ p = &a[NI-1];
+ q = &equot[NI-1];
+ for (i=M+1; i<NI; i++)
+ {
+ if (*p == 0)
+ {
+ --p;
+ }
+ else
+ {
+ m16m ((unsigned int) *p--, b, pprod);
+ eaddm(pprod, equot);
+ }
+ j |= *q;
+ eshdn6(equot);
+ }
+
+ for (i=0; i<NI; i++)
+ b[i] = equot[i];
+
+ /* return flag for lost nonzero bits */
+ return ((int)j);
+}
+#endif
+
+
+/* Normalize and round off.
+
+ The internal format number to be rounded is "s".
+ Input "lost" indicates whether or not the number is exact.
+ This is the so-called sticky bit.
+
+ Input "subflg" indicates whether the number was obtained
+ by a subtraction operation. In that case if lost is nonzero
+ then the number is slightly smaller than indicated.
+
+ Input "exp" is the biased exponent, which may be negative.
+ the exponent field of "s" is ignored but is replaced by
+ "exp" as adjusted by normalization and rounding.
+
+ Input "rcntrl" is the rounding control.
+
+ For future reference: In order for emdnorm to round off denormal
+ significands at the right point, the input exponent must be
+ adjusted to be the actual value it would have after conversion to
+ the final floating point type. This adjustment has been
+ implemented for all type conversions (etoe53, etc.) and decimal
+ conversions, but not for the arithmetic functions (eadd, etc.).
+ Data types having standard 15-bit exponents are not affected by
+ this, but SFmode and DFmode are affected. For example, ediv with
+ rndprc = 24 will not round correctly to 24-bit precision if the
+ result is denormal. */
+
+static int rlast = -1;
+static int rw = 0;
+static unsigned EMUSHORT rmsk = 0;
+static unsigned EMUSHORT rmbit = 0;
+static unsigned EMUSHORT rebit = 0;
+static int re = 0;
+static unsigned EMUSHORT rbit[NI];
+
+static void
+emdnorm (s, lost, subflg, exp, rcntrl)
+ unsigned EMUSHORT s[];
+ int lost;
+ int subflg;
+ EMULONG exp;
+ int rcntrl;
+{
+ int i, j;
+ unsigned EMUSHORT r;
+
+ /* Normalize */
+ j = enormlz (s);
+
+ /* a blank significand could mean either zero or infinity. */
+#ifndef INFINITY
+ if (j > NBITS)
+ {
+ ecleazs (s);
+ return;
+ }
+#endif
+ exp -= j;
+#ifndef INFINITY
+ if (exp >= 32767L)
+ goto overf;
+#else
+ if ((j > NBITS) && (exp < 32767))
+ {
+ ecleazs (s);
+ return;
+ }
+#endif
+ if (exp < 0L)
+ {
+ if (exp > (EMULONG) (-NBITS - 1))
+ {
+ j = (int) exp;
+ i = eshift (s, j);
+ if (i)
+ lost = 1;
+ }
+ else
+ {
+ ecleazs (s);
+ return;
+ }
+ }
+ /* Round off, unless told not to by rcntrl. */
+ if (rcntrl == 0)
+ goto mdfin;
+ /* Set up rounding parameters if the control register changed. */
+ if (rndprc != rlast)
+ {
+ ecleaz (rbit);
+ switch (rndprc)
+ {
+ default:
+ case NBITS:
+ rw = NI - 1; /* low guard word */
+ rmsk = 0xffff;
+ rmbit = 0x8000;
+ re = rw - 1;
+ rebit = 1;
+ break;
+ case 113:
+ rw = 10;
+ rmsk = 0x7fff;
+ rmbit = 0x4000;
+ rebit = 0x8000;
+ re = rw;
+ break;
+ case 64:
+ rw = 7;
+ rmsk = 0xffff;
+ rmbit = 0x8000;
+ re = rw - 1;
+ rebit = 1;
+ break;
+ /* For DEC or IBM arithmetic */
+ case 56:
+ rw = 6;
+ rmsk = 0xff;
+ rmbit = 0x80;
+ rebit = 0x100;
+ re = rw;
+ break;
+ case 53:
+ rw = 6;
+ rmsk = 0x7ff;
+ rmbit = 0x0400;
+ rebit = 0x800;
+ re = rw;
+ break;
+ case 24:
+ rw = 4;
+ rmsk = 0xff;
+ rmbit = 0x80;
+ rebit = 0x100;
+ re = rw;
+ break;
+ }
+ rbit[re] = rebit;
+ rlast = rndprc;
+ }
+
+ /* Shift down 1 temporarily if the data structure has an implied
+ most significant bit and the number is denormal. */
+ if ((exp <= 0) && (rndprc != 64) && (rndprc != NBITS))
+ {
+ lost |= s[NI - 1] & 1;
+ eshdn1 (s);
+ }
+ /* Clear out all bits below the rounding bit,
+ remembering in r if any were nonzero. */
+ r = s[rw] & rmsk;
+ if (rndprc < NBITS)
+ {
+ i = rw + 1;
+ while (i < NI)
+ {
+ if (s[i])
+ r |= 1;
+ s[i] = 0;
+ ++i;
+ }
+ }
+ s[rw] &= ~rmsk;
+ if ((r & rmbit) != 0)
+ {
+ if (r == rmbit)
+ {
+ if (lost == 0)
+ { /* round to even */
+ if ((s[re] & rebit) == 0)
+ goto mddone;
+ }
+ else
+ {
+ if (subflg != 0)
+ goto mddone;
+ }
+ }
+ eaddm (rbit, s);
+ }
+ mddone:
+ if ((exp <= 0) && (rndprc != 64) && (rndprc != NBITS))
+ {
+ eshup1 (s);
+ }
+ if (s[2] != 0)
+ { /* overflow on roundoff */
+ eshdn1 (s);
+ exp += 1;
+ }
+ mdfin:
+ s[NI - 1] = 0;
+ if (exp >= 32767L)
+ {
+#ifndef INFINITY
+ overf:
+#endif
+#ifdef INFINITY
+ s[1] = 32767;
+ for (i = 2; i < NI - 1; i++)
+ s[i] = 0;
+ if (extra_warnings)
+ warning ("floating point overflow");
+#else
+ s[1] = 32766;
+ s[2] = 0;
+ for (i = M + 1; i < NI - 1; i++)
+ s[i] = 0xffff;
+ s[NI - 1] = 0;
+ if ((rndprc < 64) || (rndprc == 113))
+ {
+ s[rw] &= ~rmsk;
+ if (rndprc == 24)
+ {
+ s[5] = 0;
+ s[6] = 0;
+ }
+ }
+#endif
+ return;
+ }
+ if (exp < 0)
+ s[1] = 0;
+ else
+ s[1] = (unsigned EMUSHORT) exp;
+}
+
+
+
+/* Subtract external format numbers. */
+
+static int subflg = 0;
+
+static void
+esub (a, b, c)
+ unsigned EMUSHORT *a, *b, *c;
+{
+
+#ifdef NANS
+ if (eisnan (a))
+ {
+ emov (a, c);
+ return;
+ }
+ if (eisnan (b))
+ {
+ emov (b, c);
+ return;
+ }
+/* Infinity minus infinity is a NaN.
+ Test for subtracting infinities of the same sign. */
+ if (eisinf (a) && eisinf (b)
+ && ((eisneg (a) ^ eisneg (b)) == 0))
+ {
+ mtherr ("esub", INVALID);
+ enan (c, 0);
+ return;
+ }
+#endif
+ subflg = 1;
+ eadd1 (a, b, c);
+}
+
+
+/* Add. */
+
+static void
+eadd (a, b, c)
+ unsigned EMUSHORT *a, *b, *c;
+{
+
+#ifdef NANS
+/* NaN plus anything is a NaN. */
+ if (eisnan (a))
+ {
+ emov (a, c);
+ return;
+ }
+ if (eisnan (b))
+ {
+ emov (b, c);
+ return;
+ }
+/* Infinity minus infinity is a NaN.
+ Test for adding infinities of opposite signs. */
+ if (eisinf (a) && eisinf (b)
+ && ((eisneg (a) ^ eisneg (b)) != 0))
+ {
+ mtherr ("esub", INVALID);
+ enan (c, 0);
+ return;
+ }
+#endif
+ subflg = 0;
+ eadd1 (a, b, c);
+}
+
+static void
+eadd1 (a, b, c)
+ unsigned EMUSHORT *a, *b, *c;
+{
+ unsigned EMUSHORT ai[NI], bi[NI], ci[NI];
+ int i, lost, j, k;
+ EMULONG lt, lta, ltb;
+
+#ifdef INFINITY
+ if (eisinf (a))
+ {
+ emov (a, c);
+ if (subflg)
+ eneg (c);
+ return;
+ }
+ if (eisinf (b))
+ {
+ emov (b, c);
+ return;
+ }
+#endif
+ emovi (a, ai);
+ emovi (b, bi);
+ if (subflg)
+ ai[0] = ~ai[0];
+
+ /* compare exponents */
+ lta = ai[E];
+ ltb = bi[E];
+ lt = lta - ltb;
+ if (lt > 0L)
+ { /* put the larger number in bi */
+ emovz (bi, ci);
+ emovz (ai, bi);
+ emovz (ci, ai);
+ ltb = bi[E];
+ lt = -lt;
+ }
+ lost = 0;
+ if (lt != 0L)
+ {
+ if (lt < (EMULONG) (-NBITS - 1))
+ goto done; /* answer same as larger addend */
+ k = (int) lt;
+ lost = eshift (ai, k); /* shift the smaller number down */
+ }
+ else
+ {
+ /* exponents were the same, so must compare significands */
+ i = ecmpm (ai, bi);
+ if (i == 0)
+ { /* the numbers are identical in magnitude */
+ /* if different signs, result is zero */
+ if (ai[0] != bi[0])
+ {
+ eclear (c);
+ return;
+ }
+ /* if same sign, result is double */
+ /* double denomalized tiny number */
+ if ((bi[E] == 0) && ((bi[3] & 0x8000) == 0))
+ {
+ eshup1 (bi);
+ goto done;
+ }
+ /* add 1 to exponent unless both are zero! */
+ for (j = 1; j < NI - 1; j++)
+ {
+ if (bi[j] != 0)
+ {
+ /* This could overflow, but let emovo take care of that. */
+ ltb += 1;
+ break;
+ }
+ }
+ bi[E] = (unsigned EMUSHORT) ltb;
+ goto done;
+ }
+ if (i > 0)
+ { /* put the larger number in bi */
+ emovz (bi, ci);
+ emovz (ai, bi);
+ emovz (ci, ai);
+ }
+ }
+ if (ai[0] == bi[0])
+ {
+ eaddm (ai, bi);
+ subflg = 0;
+ }
+ else
+ {
+ esubm (ai, bi);
+ subflg = 1;
+ }
+ emdnorm (bi, lost, subflg, ltb, 64);
+
+ done:
+ emovo (bi, c);
+}
+
+
+
+/* Divide. */
+
+static void
+ediv (a, b, c)
+ unsigned EMUSHORT *a, *b, *c;
+{
+ unsigned EMUSHORT ai[NI], bi[NI];
+ int i;
+ EMULONG lt, lta, ltb;
+
+#ifdef NANS
+/* Return any NaN input. */
+ if (eisnan (a))
+ {
+ emov (a, c);
+ return;
+ }
+ if (eisnan (b))
+ {
+ emov (b, c);
+ return;
+ }
+/* Zero over zero, or infinity over infinity, is a NaN. */
+ if (((ecmp (a, ezero) == 0) && (ecmp (b, ezero) == 0))
+ || (eisinf (a) && eisinf (b)))
+ {
+ mtherr ("ediv", INVALID);
+ enan (c, eisneg (a) ^ eisneg (b));
+ return;
+ }
+#endif
+/* Infinity over anything else is infinity. */
+#ifdef INFINITY
+ if (eisinf (b))
+ {
+ if (eisneg (a) ^ eisneg (b))
+ *(c + (NE - 1)) = 0x8000;
+ else
+ *(c + (NE - 1)) = 0;
+ einfin (c);
+ return;
+ }
+/* Anything else over infinity is zero. */
+ if (eisinf (a))
+ {
+ eclear (c);
+ return;
+ }
+#endif
+ emovi (a, ai);
+ emovi (b, bi);
+ lta = ai[E];
+ ltb = bi[E];
+ if (bi[E] == 0)
+ { /* See if numerator is zero. */
+ for (i = 1; i < NI - 1; i++)
+ {
+ if (bi[i] != 0)
+ {
+ ltb -= enormlz (bi);
+ goto dnzro1;
+ }
+ }
+ eclear (c);
+ return;
+ }
+ dnzro1:
+
+ if (ai[E] == 0)
+ { /* possible divide by zero */
+ for (i = 1; i < NI - 1; i++)
+ {
+ if (ai[i] != 0)
+ {
+ lta -= enormlz (ai);
+ goto dnzro2;
+ }
+ }
+ if (ai[0] == bi[0])
+ *(c + (NE - 1)) = 0;
+ else
+ *(c + (NE - 1)) = 0x8000;
+/* Divide by zero is not an invalid operation.
+ It is a divide-by-zero operation! */
+ einfin (c);
+ mtherr ("ediv", SING);
+ return;
+ }
+ dnzro2:
+
+ i = edivm (ai, bi);
+ /* calculate exponent */
+ lt = ltb - lta + EXONE;
+ emdnorm (bi, i, 0, lt, 64);
+ /* set the sign */
+ if (ai[0] == bi[0])
+ bi[0] = 0;
+ else
+ bi[0] = 0Xffff;
+ emovo (bi, c);
+}
+
+
+
+/* Multiply. */
+
+static void
+emul (a, b, c)
+ unsigned EMUSHORT *a, *b, *c;
+{
+ unsigned EMUSHORT ai[NI], bi[NI];
+ int i, j;
+ EMULONG lt, lta, ltb;
+
+#ifdef NANS
+/* NaN times anything is the same NaN. */
+ if (eisnan (a))
+ {
+ emov (a, c);
+ return;
+ }
+ if (eisnan (b))
+ {
+ emov (b, c);
+ return;
+ }
+/* Zero times infinity is a NaN. */
+ if ((eisinf (a) && (ecmp (b, ezero) == 0))
+ || (eisinf (b) && (ecmp (a, ezero) == 0)))
+ {
+ mtherr ("emul", INVALID);
+ enan (c, eisneg (a) ^ eisneg (b));
+ return;
+ }
+#endif
+/* Infinity times anything else is infinity. */
+#ifdef INFINITY
+ if (eisinf (a) || eisinf (b))
+ {
+ if (eisneg (a) ^ eisneg (b))
+ *(c + (NE - 1)) = 0x8000;
+ else
+ *(c + (NE - 1)) = 0;
+ einfin (c);
+ return;
+ }
+#endif
+ emovi (a, ai);
+ emovi (b, bi);
+ lta = ai[E];
+ ltb = bi[E];
+ if (ai[E] == 0)
+ {
+ for (i = 1; i < NI - 1; i++)
+ {
+ if (ai[i] != 0)
+ {
+ lta -= enormlz (ai);
+ goto mnzer1;
+ }
+ }
+ eclear (c);
+ return;
+ }
+ mnzer1:
+
+ if (bi[E] == 0)
+ {
+ for (i = 1; i < NI - 1; i++)
+ {
+ if (bi[i] != 0)
+ {
+ ltb -= enormlz (bi);
+ goto mnzer2;
+ }
+ }
+ eclear (c);
+ return;
+ }
+ mnzer2:
+
+ /* Multiply significands */
+ j = emulm (ai, bi);
+ /* calculate exponent */
+ lt = lta + ltb - (EXONE - 1);
+ emdnorm (bi, j, 0, lt, 64);
+ /* calculate sign of product */
+ if (ai[0] == bi[0])
+ bi[0] = 0;
+ else
+ bi[0] = 0xffff;
+ emovo (bi, c);
+}
+
+
+
+
+/* Convert IEEE double precision to e type. */
+
+static void
+e53toe (pe, y)
+ unsigned EMUSHORT *pe, *y;
+{
+#ifdef DEC
+
+ dectoe (pe, y); /* see etodec.c */
+
+#else
+#ifdef IBM
+
+ ibmtoe (pe, y, DFmode);
+
+#else
+ register unsigned EMUSHORT r;
+ register unsigned EMUSHORT *e, *p;
+ unsigned EMUSHORT yy[NI];
+ int denorm, k;
+
+ e = pe;
+ denorm = 0; /* flag if denormalized number */
+ ecleaz (yy);
+#ifdef IBMPC
+ e += 3;
+#endif
+ r = *e;
+ yy[0] = 0;
+ if (r & 0x8000)
+ yy[0] = 0xffff;
+ yy[M] = (r & 0x0f) | 0x10;
+ r &= ~0x800f; /* strip sign and 4 significand bits */
+#ifdef INFINITY
+ if (r == 0x7ff0)
+ {
+#ifdef NANS
+#ifdef IBMPC
+ if (((pe[3] & 0xf) != 0) || (pe[2] != 0)
+ || (pe[1] != 0) || (pe[0] != 0))
+ {
+ enan (y, yy[0] != 0);
+ return;
+ }
+#else
+ if (((pe[0] & 0xf) != 0) || (pe[1] != 0)
+ || (pe[2] != 0) || (pe[3] != 0))
+ {
+ enan (y, yy[0] != 0);
+ return;
+ }
+#endif
+#endif /* NANS */
+ eclear (y);
+ einfin (y);
+ if (yy[0])
+ eneg (y);
+ return;
+ }
+#endif /* INFINITY */
+ r >>= 4;
+ /* If zero exponent, then the significand is denormalized.
+ So take back the understood high significand bit. */
+
+ if (r == 0)
+ {
+ denorm = 1;
+ yy[M] &= ~0x10;
+ }
+ r += EXONE - 01777;
+ yy[E] = r;
+ p = &yy[M + 1];
+#ifdef IBMPC
+ *p++ = *(--e);
+ *p++ = *(--e);
+ *p++ = *(--e);
+#endif
+#ifdef MIEEE
+ ++e;
+ *p++ = *e++;
+ *p++ = *e++;
+ *p++ = *e++;
+#endif
+ eshift (yy, -5);
+ if (denorm)
+ { /* if zero exponent, then normalize the significand */
+ if ((k = enormlz (yy)) > NBITS)
+ ecleazs (yy);
+ else
+ yy[E] -= (unsigned EMUSHORT) (k - 1);
+ }
+ emovo (yy, y);
+#endif /* not IBM */
+#endif /* not DEC */
+}
+
+static void
+e64toe (pe, y)
+ unsigned EMUSHORT *pe, *y;
+{
+ unsigned EMUSHORT yy[NI];
+ unsigned EMUSHORT *e, *p, *q;
+ int i;
+
+ e = pe;
+ p = yy;
+ for (i = 0; i < NE - 5; i++)
+ *p++ = 0;
+#ifdef IBMPC
+ for (i = 0; i < 5; i++)
+ *p++ = *e++;
+#endif
+/* This precision is not ordinarily supported on DEC or IBM. */
+#ifdef DEC
+ for (i = 0; i < 5; i++)
+ *p++ = *e++;
+#endif
+#ifdef IBM
+ p = &yy[0] + (NE - 1);
+ *p-- = *e++;
+ ++e;
+ for (i = 0; i < 5; i++)
+ *p-- = *e++;
+#endif
+#ifdef MIEEE
+ p = &yy[0] + (NE - 1);
+ *p-- = *e++;
+ ++e;
+ for (i = 0; i < 4; i++)
+ *p-- = *e++;
+#endif
+ p = yy;
+ q = y;
+#ifdef INFINITY
+ if (*p == 0x7fff)
+ {
+#ifdef NANS
+#ifdef IBMPC
+ for (i = 0; i < 4; i++)
+ {
+ if (pe[i] != 0)
+ {
+ enan (y, (*p & 0x8000) != 0);
+ return;
+ }
+ }
+#else
+ for (i = 1; i <= 4; i++)
+ {
+ if (pe[i] != 0)
+ {
+ enan (y, (*p & 0x8000) != 0);
+ return;
+ }
+ }
+#endif
+#endif /* NANS */
+ eclear (y);
+ einfin (y);
+ if (*p & 0x8000)
+ eneg (y);
+ return;
+ }
+#endif /* INFINITY */
+ for (i = 0; i < NE; i++)
+ *q++ = *p++;
+}
+
+
+static void
+e113toe (pe, y)
+ unsigned EMUSHORT *pe, *y;
+{
+ register unsigned EMUSHORT r;
+ unsigned EMUSHORT *e, *p;
+ unsigned EMUSHORT yy[NI];
+ int denorm, i;
+
+ e = pe;
+ denorm = 0;
+ ecleaz (yy);
+#ifdef IBMPC
+ e += 7;
+#endif
+ r = *e;
+ yy[0] = 0;
+ if (r & 0x8000)
+ yy[0] = 0xffff;
+ r &= 0x7fff;
+#ifdef INFINITY
+ if (r == 0x7fff)
+ {
+#ifdef NANS
+#ifdef IBMPC
+ for (i = 0; i < 7; i++)
+ {
+ if (pe[i] != 0)
+ {
+ enan (y, yy[0] != 0);
+ return;
+ }
+ }
+#else
+ for (i = 1; i < 8; i++)
+ {
+ if (pe[i] != 0)
+ {
+ enan (y, yy[0] != 0);
+ return;
+ }
+ }
+#endif
+#endif /* NANS */
+ eclear (y);
+ einfin (y);
+ if (yy[0])
+ eneg (y);
+ return;
+ }
+#endif /* INFINITY */
+ yy[E] = r;
+ p = &yy[M + 1];
+#ifdef IBMPC
+ for (i = 0; i < 7; i++)
+ *p++ = *(--e);
+#endif
+#ifdef MIEEE
+ ++e;
+ for (i = 0; i < 7; i++)
+ *p++ = *e++;
+#endif
+/* If denormal, remove the implied bit; else shift down 1. */
+ if (r == 0)
+ {
+ yy[M] = 0;
+ }
+ else
+ {
+ yy[M] = 1;
+ eshift (yy, -1);
+ }
+ emovo (yy, y);
+}
+
+
+/* Convert IEEE single precision to e type. */
+
+static void
+e24toe (pe, y)
+ unsigned EMUSHORT *pe, *y;
+{
+#ifdef IBM
+
+ ibmtoe (pe, y, SFmode);
+
+#else
+ register unsigned EMUSHORT r;
+ register unsigned EMUSHORT *e, *p;
+ unsigned EMUSHORT yy[NI];
+ int denorm, k;
+
+ e = pe;
+ denorm = 0; /* flag if denormalized number */
+ ecleaz (yy);
+#ifdef IBMPC
+ e += 1;
+#endif
+#ifdef DEC
+ e += 1;
+#endif
+ r = *e;
+ yy[0] = 0;
+ if (r & 0x8000)
+ yy[0] = 0xffff;
+ yy[M] = (r & 0x7f) | 0200;
+ r &= ~0x807f; /* strip sign and 7 significand bits */
+#ifdef INFINITY
+ if (r == 0x7f80)
+ {
+#ifdef NANS
+#ifdef MIEEE
+ if (((pe[0] & 0x7f) != 0) || (pe[1] != 0))
+ {
+ enan (y, yy[0] != 0);
+ return;
+ }
+#else
+ if (((pe[1] & 0x7f) != 0) || (pe[0] != 0))
+ {
+ enan (y, yy[0] != 0);
+ return;
+ }
+#endif
+#endif /* NANS */
+ eclear (y);
+ einfin (y);
+ if (yy[0])
+ eneg (y);
+ return;
+ }
+#endif /* INFINITY */
+ r >>= 7;
+ /* If zero exponent, then the significand is denormalized.
+ So take back the understood high significand bit. */
+ if (r == 0)
+ {
+ denorm = 1;
+ yy[M] &= ~0200;
+ }
+ r += EXONE - 0177;
+ yy[E] = r;
+ p = &yy[M + 1];
+#ifdef IBMPC
+ *p++ = *(--e);
+#endif
+#ifdef DEC
+ *p++ = *(--e);
+#endif
+#ifdef MIEEE
+ ++e;
+ *p++ = *e++;
+#endif
+ eshift (yy, -8);
+ if (denorm)
+ { /* if zero exponent, then normalize the significand */
+ if ((k = enormlz (yy)) > NBITS)
+ ecleazs (yy);
+ else
+ yy[E] -= (unsigned EMUSHORT) (k - 1);
+ }
+ emovo (yy, y);
+#endif /* not IBM */
+}
+
+
+static void
+etoe113 (x, e)
+ unsigned EMUSHORT *x, *e;
+{
+ unsigned EMUSHORT xi[NI];
+ EMULONG exp;
+ int rndsav;
+
+#ifdef NANS
+ if (eisnan (x))
+ {
+ make_nan (e, eisneg (x), TFmode);
+ return;
+ }
+#endif
+ emovi (x, xi);
+ exp = (EMULONG) xi[E];
+#ifdef INFINITY
+ if (eisinf (x))
+ goto nonorm;
+#endif
+ /* round off to nearest or even */
+ rndsav = rndprc;
+ rndprc = 113;
+ emdnorm (xi, 0, 0, exp, 64);
+ rndprc = rndsav;
+ nonorm:
+ toe113 (xi, e);
+}
+
+/* Move out internal format to ieee long double */
+
+static void
+toe113 (a, b)
+ unsigned EMUSHORT *a, *b;
+{
+ register unsigned EMUSHORT *p, *q;
+ unsigned EMUSHORT i;
+
+#ifdef NANS
+ if (eiisnan (a))
+ {
+ make_nan (b, eiisneg (a), TFmode);
+ return;
+ }
+#endif
+ p = a;
+#ifdef MIEEE
+ q = b;
+#else
+ q = b + 7; /* point to output exponent */
+#endif
+
+ /* If not denormal, delete the implied bit. */
+ if (a[E] != 0)
+ {
+ eshup1 (a);
+ }
+ /* combine sign and exponent */
+ i = *p++;
+#ifdef MIEEE
+ if (i)
+ *q++ = *p++ | 0x8000;
+ else
+ *q++ = *p++;
+#else
+ if (i)
+ *q-- = *p++ | 0x8000;
+ else
+ *q-- = *p++;
+#endif
+ /* skip over guard word */
+ ++p;
+ /* move the significand */
+#ifdef MIEEE
+ for (i = 0; i < 7; i++)
+ *q++ = *p++;
+#else
+ for (i = 0; i < 7; i++)
+ *q-- = *p++;
+#endif
+}
+
+static void
+etoe64 (x, e)
+ unsigned EMUSHORT *x, *e;
+{
+ unsigned EMUSHORT xi[NI];
+ EMULONG exp;
+ int rndsav;
+
+#ifdef NANS
+ if (eisnan (x))
+ {
+ make_nan (e, eisneg (x), XFmode);
+ return;
+ }
+#endif
+ emovi (x, xi);
+ /* adjust exponent for offset */
+ exp = (EMULONG) xi[E];
+#ifdef INFINITY
+ if (eisinf (x))
+ goto nonorm;
+#endif
+ /* round off to nearest or even */
+ rndsav = rndprc;
+ rndprc = 64;
+ emdnorm (xi, 0, 0, exp, 64);
+ rndprc = rndsav;
+ nonorm:
+ toe64 (xi, e);
+}
+
+
+/* Move out internal format to ieee long double. */
+
+static void
+toe64 (a, b)
+ unsigned EMUSHORT *a, *b;
+{
+ register unsigned EMUSHORT *p, *q;
+ unsigned EMUSHORT i;
+
+#ifdef NANS
+ if (eiisnan (a))
+ {
+ make_nan (b, eiisneg (a), XFmode);
+ return;
+ }
+#endif
+ p = a;
+#if defined(MIEEE) || defined(IBM)
+ q = b;
+#else
+ q = b + 4; /* point to output exponent */
+#if LONG_DOUBLE_TYPE_SIZE == 96
+ /* Clear the last two bytes of 12-byte Intel format */
+ *(q+1) = 0;
+#endif
+#endif
+
+ /* combine sign and exponent */
+ i = *p++;
+#if defined(MIEEE) || defined(IBM)
+ if (i)
+ *q++ = *p++ | 0x8000;
+ else
+ *q++ = *p++;
+ *q++ = 0;
+#else
+ if (i)
+ *q-- = *p++ | 0x8000;
+ else
+ *q-- = *p++;
+#endif
+ /* skip over guard word */
+ ++p;
+ /* move the significand */
+#if defined(MIEEE) || defined(IBM)
+ for (i = 0; i < 4; i++)
+ *q++ = *p++;
+#else
+ for (i = 0; i < 4; i++)
+ *q-- = *p++;
+#endif
+}
+
+
+/* e type to IEEE double precision. */
+
+#ifdef DEC
+
+static void
+etoe53 (x, e)
+ unsigned EMUSHORT *x, *e;
+{
+ etodec (x, e); /* see etodec.c */
+}
+
+static void
+toe53 (x, y)
+ unsigned EMUSHORT *x, *y;
+{
+ todec (x, y);
+}
+
+#else
+#ifdef IBM
+
+static void
+etoe53 (x, e)
+ unsigned EMUSHORT *x, *e;
+{
+ etoibm (x, e, DFmode);
+}
+
+static void
+toe53 (x, y)
+ unsigned EMUSHORT *x, *y;
+{
+ toibm (x, y, DFmode);
+}
+
+#else /* it's neither DEC nor IBM */
+
+static void
+etoe53 (x, e)
+ unsigned EMUSHORT *x, *e;
+{
+ unsigned EMUSHORT xi[NI];
+ EMULONG exp;
+ int rndsav;
+
+#ifdef NANS
+ if (eisnan (x))
+ {
+ make_nan (e, eisneg (x), DFmode);
+ return;
+ }
+#endif
+ emovi (x, xi);
+ /* adjust exponent for offsets */
+ exp = (EMULONG) xi[E] - (EXONE - 0x3ff);
+#ifdef INFINITY
+ if (eisinf (x))
+ goto nonorm;
+#endif
+ /* round off to nearest or even */
+ rndsav = rndprc;
+ rndprc = 53;
+ emdnorm (xi, 0, 0, exp, 64);
+ rndprc = rndsav;
+ nonorm:
+ toe53 (xi, e);
+}
+
+
+static void
+toe53 (x, y)
+ unsigned EMUSHORT *x, *y;
+{
+ unsigned EMUSHORT i;
+ unsigned EMUSHORT *p;
+
+#ifdef NANS
+ if (eiisnan (x))
+ {
+ make_nan (y, eiisneg (x), DFmode);
+ return;
+ }
+#endif
+ p = &x[0];
+#ifdef IBMPC
+ y += 3;
+#endif
+ *y = 0; /* output high order */
+ if (*p++)
+ *y = 0x8000; /* output sign bit */
+
+ i = *p++;
+ if (i >= (unsigned int) 2047)
+ { /* Saturate at largest number less than infinity. */
+#ifdef INFINITY
+ *y |= 0x7ff0;
+#ifdef IBMPC
+ *(--y) = 0;
+ *(--y) = 0;
+ *(--y) = 0;
+#endif
+#ifdef MIEEE
+ ++y;
+ *y++ = 0;
+ *y++ = 0;
+ *y++ = 0;
+#endif
+#else
+ *y |= (unsigned EMUSHORT) 0x7fef;
+#ifdef IBMPC
+ *(--y) = 0xffff;
+ *(--y) = 0xffff;
+ *(--y) = 0xffff;
+#endif
+#ifdef MIEEE
+ ++y;
+ *y++ = 0xffff;
+ *y++ = 0xffff;
+ *y++ = 0xffff;
+#endif
+#endif
+ return;
+ }
+ if (i == 0)
+ {
+ eshift (x, 4);
+ }
+ else
+ {
+ i <<= 4;
+ eshift (x, 5);
+ }
+ i |= *p++ & (unsigned EMUSHORT) 0x0f; /* *p = xi[M] */
+ *y |= (unsigned EMUSHORT) i; /* high order output already has sign bit set */
+#ifdef IBMPC
+ *(--y) = *p++;
+ *(--y) = *p++;
+ *(--y) = *p;
+#endif
+#ifdef MIEEE
+ ++y;
+ *y++ = *p++;
+ *y++ = *p++;
+ *y++ = *p++;
+#endif
+}
+
+#endif /* not IBM */
+#endif /* not DEC */
+
+
+
+/* e type to IEEE single precision. */
+
+#ifdef IBM
+
+static void
+etoe24 (x, e)
+ unsigned EMUSHORT *x, *e;
+{
+ etoibm (x, e, SFmode);
+}
+
+static void
+toe24 (x, y)
+ unsigned EMUSHORT *x, *y;
+{
+ toibm (x, y, SFmode);
+}
+
+#else
+
+static void
+etoe24 (x, e)
+ unsigned EMUSHORT *x, *e;
+{
+ EMULONG exp;
+ unsigned EMUSHORT xi[NI];
+ int rndsav;
+
+#ifdef NANS
+ if (eisnan (x))
+ {
+ make_nan (e, eisneg (x), SFmode);
+ return;
+ }
+#endif
+ emovi (x, xi);
+ /* adjust exponent for offsets */
+ exp = (EMULONG) xi[E] - (EXONE - 0177);
+#ifdef INFINITY
+ if (eisinf (x))
+ goto nonorm;
+#endif
+ /* round off to nearest or even */
+ rndsav = rndprc;
+ rndprc = 24;
+ emdnorm (xi, 0, 0, exp, 64);
+ rndprc = rndsav;
+ nonorm:
+ toe24 (xi, e);
+}
+
+static void
+toe24 (x, y)
+ unsigned EMUSHORT *x, *y;
+{
+ unsigned EMUSHORT i;
+ unsigned EMUSHORT *p;
+
+#ifdef NANS
+ if (eiisnan (x))
+ {
+ make_nan (y, eiisneg (x), SFmode);
+ return;
+ }
+#endif
+ p = &x[0];
+#ifdef IBMPC
+ y += 1;
+#endif
+#ifdef DEC
+ y += 1;
+#endif
+ *y = 0; /* output high order */
+ if (*p++)
+ *y = 0x8000; /* output sign bit */
+
+ i = *p++;
+/* Handle overflow cases. */
+ if (i >= 255)
+ {
+#ifdef INFINITY
+ *y |= (unsigned EMUSHORT) 0x7f80;
+#ifdef IBMPC
+ *(--y) = 0;
+#endif
+#ifdef DEC
+ *(--y) = 0;
+#endif
+#ifdef MIEEE
+ ++y;
+ *y = 0;
+#endif
+#else /* no INFINITY */
+ *y |= (unsigned EMUSHORT) 0x7f7f;
+#ifdef IBMPC
+ *(--y) = 0xffff;
+#endif
+#ifdef DEC
+ *(--y) = 0xffff;
+#endif
+#ifdef MIEEE
+ ++y;
+ *y = 0xffff;
+#endif
+#ifdef ERANGE
+ errno = ERANGE;
+#endif
+#endif /* no INFINITY */
+ return;
+ }
+ if (i == 0)
+ {
+ eshift (x, 7);
+ }
+ else
+ {
+ i <<= 7;
+ eshift (x, 8);
+ }
+ i |= *p++ & (unsigned EMUSHORT) 0x7f; /* *p = xi[M] */
+ *y |= i; /* high order output already has sign bit set */
+#ifdef IBMPC
+ *(--y) = *p;
+#endif
+#ifdef DEC
+ *(--y) = *p;
+#endif
+#ifdef MIEEE
+ ++y;
+ *y = *p;
+#endif
+}
+#endif /* not IBM */
+
+/* Compare two e type numbers.
+ Return +1 if a > b
+ 0 if a == b
+ -1 if a < b
+ -2 if either a or b is a NaN. */
+
+static int
+ecmp (a, b)
+ unsigned EMUSHORT *a, *b;
+{
+ unsigned EMUSHORT ai[NI], bi[NI];
+ register unsigned EMUSHORT *p, *q;
+ register int i;
+ int msign;
+
+#ifdef NANS
+ if (eisnan (a) || eisnan (b))
+ return (-2);
+#endif
+ emovi (a, ai);
+ p = ai;
+ emovi (b, bi);
+ q = bi;
+
+ if (*p != *q)
+ { /* the signs are different */
+ /* -0 equals + 0 */
+ for (i = 1; i < NI - 1; i++)
+ {
+ if (ai[i] != 0)
+ goto nzro;
+ if (bi[i] != 0)
+ goto nzro;
+ }
+ return (0);
+ nzro:
+ if (*p == 0)
+ return (1);
+ else
+ return (-1);
+ }
+ /* both are the same sign */
+ if (*p == 0)
+ msign = 1;
+ else
+ msign = -1;
+ i = NI - 1;
+ do
+ {
+ if (*p++ != *q++)
+ {
+ goto diff;
+ }
+ }
+ while (--i > 0);
+
+ return (0); /* equality */
+
+
+
+ diff:
+
+ if (*(--p) > *(--q))
+ return (msign); /* p is bigger */
+ else
+ return (-msign); /* p is littler */
+}
+
+
+
+
+/* Find nearest integer to x = floor (x + 0.5). */
+
+static void
+eround (x, y)
+ unsigned EMUSHORT *x, *y;
+{
+ eadd (ehalf, x, y);
+ efloor (y, y);
+}
+
+
+
+
+/* Convert HOST_WIDE_INT to e type. */
+
+static void
+ltoe (lp, y)
+ HOST_WIDE_INT *lp;
+ unsigned EMUSHORT *y;
+{
+ unsigned EMUSHORT yi[NI];
+ unsigned HOST_WIDE_INT ll;
+ int k;
+
+ ecleaz (yi);
+ if (*lp < 0)
+ {
+ /* make it positive */
+ ll = (unsigned HOST_WIDE_INT) (-(*lp));
+ yi[0] = 0xffff; /* put correct sign in the e type number */
+ }
+ else
+ {
+ ll = (unsigned HOST_WIDE_INT) (*lp);
+ }
+ /* move the long integer to yi significand area */
+#if HOST_BITS_PER_WIDE_INT == 64
+ yi[M] = (unsigned EMUSHORT) (ll >> 48);
+ yi[M + 1] = (unsigned EMUSHORT) (ll >> 32);
+ yi[M + 2] = (unsigned EMUSHORT) (ll >> 16);
+ yi[M + 3] = (unsigned EMUSHORT) ll;
+ yi[E] = EXONE + 47; /* exponent if normalize shift count were 0 */
+#else
+ yi[M] = (unsigned EMUSHORT) (ll >> 16);
+ yi[M + 1] = (unsigned EMUSHORT) ll;
+ yi[E] = EXONE + 15; /* exponent if normalize shift count were 0 */
+#endif
+
+ if ((k = enormlz (yi)) > NBITS)/* normalize the significand */
+ ecleaz (yi); /* it was zero */
+ else
+ yi[E] -= (unsigned EMUSHORT) k;/* subtract shift count from exponent */
+ emovo (yi, y); /* output the answer */
+}
+
+/* Convert unsigned HOST_WIDE_INT to e type. */
+
+static void
+ultoe (lp, y)
+ unsigned HOST_WIDE_INT *lp;
+ unsigned EMUSHORT *y;
+{
+ unsigned EMUSHORT yi[NI];
+ unsigned HOST_WIDE_INT ll;
+ int k;
+
+ ecleaz (yi);
+ ll = *lp;
+
+ /* move the long integer to ayi significand area */
+#if HOST_BITS_PER_WIDE_INT == 64
+ yi[M] = (unsigned EMUSHORT) (ll >> 48);
+ yi[M + 1] = (unsigned EMUSHORT) (ll >> 32);
+ yi[M + 2] = (unsigned EMUSHORT) (ll >> 16);
+ yi[M + 3] = (unsigned EMUSHORT) ll;
+ yi[E] = EXONE + 47; /* exponent if normalize shift count were 0 */
+#else
+ yi[M] = (unsigned EMUSHORT) (ll >> 16);
+ yi[M + 1] = (unsigned EMUSHORT) ll;
+ yi[E] = EXONE + 15; /* exponent if normalize shift count were 0 */
+#endif
+
+ if ((k = enormlz (yi)) > NBITS)/* normalize the significand */
+ ecleaz (yi); /* it was zero */
+ else
+ yi[E] -= (unsigned EMUSHORT) k; /* subtract shift count from exponent */
+ emovo (yi, y); /* output the answer */
+}
+
+
+/* Find signed HOST_WIDE_INT integer and floating point fractional
+ parts of e-type (packed internal format) floating point input X.
+ The integer output I has the sign of the input, except that
+ positive overflow is permitted if FIXUNS_TRUNC_LIKE_FIX_TRUNC.
+ The output e-type fraction FRAC is the positive fractional
+ part of abs (X). */
+
+static void
+eifrac (x, i, frac)
+ unsigned EMUSHORT *x;
+ HOST_WIDE_INT *i;
+ unsigned EMUSHORT *frac;
+{
+ unsigned EMUSHORT xi[NI];
+ int j, k;
+ unsigned HOST_WIDE_INT ll;
+
+ emovi (x, xi);
+ k = (int) xi[E] - (EXONE - 1);
+ if (k <= 0)
+ {
+ /* if exponent <= 0, integer = 0 and real output is fraction */
+ *i = 0L;
+ emovo (xi, frac);
+ return;
+ }
+ if (k > (HOST_BITS_PER_WIDE_INT - 1))
+ {
+ /* long integer overflow: output large integer
+ and correct fraction */
+ if (xi[0])
+ *i = ((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1);
+ else
+ {
+#ifdef FIXUNS_TRUNC_LIKE_FIX_TRUNC
+ /* In this case, let it overflow and convert as if unsigned. */
+ euifrac (x, &ll, frac);
+ *i = (HOST_WIDE_INT) ll;
+ return;
+#else
+ /* In other cases, return the largest positive integer. */
+ *i = (((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1)) - 1;
+#endif
+ }
+ eshift (xi, k);
+ if (extra_warnings)
+ warning ("overflow on truncation to integer");
+ }
+ else if (k > 16)
+ {
+ /* Shift more than 16 bits: first shift up k-16 mod 16,
+ then shift up by 16's. */
+ j = k - ((k >> 4) << 4);
+ eshift (xi, j);
+ ll = xi[M];
+ k -= j;
+ do
+ {
+ eshup6 (xi);
+ ll = (ll << 16) | xi[M];
+ }
+ while ((k -= 16) > 0);
+ *i = ll;
+ if (xi[0])
+ *i = -(*i);
+ }
+ else
+ {
+ /* shift not more than 16 bits */
+ eshift (xi, k);
+ *i = (HOST_WIDE_INT) xi[M] & 0xffff;
+ if (xi[0])
+ *i = -(*i);
+ }
+ xi[0] = 0;
+ xi[E] = EXONE - 1;
+ xi[M] = 0;
+ if ((k = enormlz (xi)) > NBITS)
+ ecleaz (xi);
+ else
+ xi[E] -= (unsigned EMUSHORT) k;
+
+ emovo (xi, frac);
+}
+
+
+/* Find unsigned HOST_WIDE_INT integer and floating point fractional parts.
+ A negative e type input yields integer output = 0
+ but correct fraction. */
+
+static void
+euifrac (x, i, frac)
+ unsigned EMUSHORT *x;
+ unsigned HOST_WIDE_INT *i;
+ unsigned EMUSHORT *frac;
+{
+ unsigned HOST_WIDE_INT ll;
+ unsigned EMUSHORT xi[NI];
+ int j, k;
+
+ emovi (x, xi);
+ k = (int) xi[E] - (EXONE - 1);
+ if (k <= 0)
+ {
+ /* if exponent <= 0, integer = 0 and argument is fraction */
+ *i = 0L;
+ emovo (xi, frac);
+ return;
+ }
+ if (k > HOST_BITS_PER_WIDE_INT)
+ {
+ /* Long integer overflow: output large integer
+ and correct fraction.
+ Note, the BSD microvax compiler says that ~(0UL)
+ is a syntax error. */
+ *i = ~(0L);
+ eshift (xi, k);
+ if (extra_warnings)
+ warning ("overflow on truncation to unsigned integer");
+ }
+ else if (k > 16)
+ {
+ /* Shift more than 16 bits: first shift up k-16 mod 16,
+ then shift up by 16's. */
+ j = k - ((k >> 4) << 4);
+ eshift (xi, j);
+ ll = xi[M];
+ k -= j;
+ do
+ {
+ eshup6 (xi);
+ ll = (ll << 16) | xi[M];
+ }
+ while ((k -= 16) > 0);
+ *i = ll;
+ }
+ else
+ {
+ /* shift not more than 16 bits */
+ eshift (xi, k);
+ *i = (HOST_WIDE_INT) xi[M] & 0xffff;
+ }
+
+ if (xi[0]) /* A negative value yields unsigned integer 0. */
+ *i = 0L;
+
+ xi[0] = 0;
+ xi[E] = EXONE - 1;
+ xi[M] = 0;
+ if ((k = enormlz (xi)) > NBITS)
+ ecleaz (xi);
+ else
+ xi[E] -= (unsigned EMUSHORT) k;
+
+ emovo (xi, frac);
+}
+
+
+
+/* Shift significand area up or down by the number of bits given by SC. */
+
+static int
+eshift (x, sc)
+ unsigned EMUSHORT *x;
+ int sc;
+{
+ unsigned EMUSHORT lost;
+ unsigned EMUSHORT *p;
+
+ if (sc == 0)
+ return (0);
+
+ lost = 0;
+ p = x + NI - 1;
+
+ if (sc < 0)
+ {
+ sc = -sc;
+ while (sc >= 16)
+ {
+ lost |= *p; /* remember lost bits */
+ eshdn6 (x);
+ sc -= 16;
+ }
+
+ while (sc >= 8)
+ {
+ lost |= *p & 0xff;
+ eshdn8 (x);
+ sc -= 8;
+ }
+
+ while (sc > 0)
+ {
+ lost |= *p & 1;
+ eshdn1 (x);
+ sc -= 1;
+ }
+ }
+ else
+ {
+ while (sc >= 16)
+ {
+ eshup6 (x);
+ sc -= 16;
+ }
+
+ while (sc >= 8)
+ {
+ eshup8 (x);
+ sc -= 8;
+ }
+
+ while (sc > 0)
+ {
+ eshup1 (x);
+ sc -= 1;
+ }
+ }
+ if (lost)
+ lost = 1;
+ return ((int) lost);
+}
+
+
+
+/* Shift normalize the significand area pointed to by argument.
+ Shift count (up = positive) is returned. */
+
+static int
+enormlz (x)
+ unsigned EMUSHORT x[];
+{
+ register unsigned EMUSHORT *p;
+ int sc;
+
+ sc = 0;
+ p = &x[M];
+ if (*p != 0)
+ goto normdn;
+ ++p;
+ if (*p & 0x8000)
+ return (0); /* already normalized */
+ while (*p == 0)
+ {
+ eshup6 (x);
+ sc += 16;
+
+ /* With guard word, there are NBITS+16 bits available.
+ Return true if all are zero. */
+ if (sc > NBITS)
+ return (sc);
+ }
+ /* see if high byte is zero */
+ while ((*p & 0xff00) == 0)
+ {
+ eshup8 (x);
+ sc += 8;
+ }
+ /* now shift 1 bit at a time */
+ while ((*p & 0x8000) == 0)
+ {
+ eshup1 (x);
+ sc += 1;
+ if (sc > NBITS)
+ {
+ mtherr ("enormlz", UNDERFLOW);
+ return (sc);
+ }
+ }
+ return (sc);
+
+ /* Normalize by shifting down out of the high guard word
+ of the significand */
+ normdn:
+
+ if (*p & 0xff00)
+ {
+ eshdn8 (x);
+ sc -= 8;
+ }
+ while (*p != 0)
+ {
+ eshdn1 (x);
+ sc -= 1;
+
+ if (sc < -NBITS)
+ {
+ mtherr ("enormlz", OVERFLOW);
+ return (sc);
+ }
+ }
+ return (sc);
+}
+
+
+
+
+/* Convert e type number to decimal format ASCII string.
+ The constants are for 64 bit precision. */
+
+#define NTEN 12
+#define MAXP 4096
+
+#if LONG_DOUBLE_TYPE_SIZE == 128
+static unsigned EMUSHORT etens[NTEN + 1][NE] =
+{
+ {0x6576, 0x4a92, 0x804a, 0x153f,
+ 0xc94c, 0x979a, 0x8a20, 0x5202, 0xc460, 0x7525,}, /* 10**4096 */
+ {0x6a32, 0xce52, 0x329a, 0x28ce,
+ 0xa74d, 0x5de4, 0xc53d, 0x3b5d, 0x9e8b, 0x5a92,}, /* 10**2048 */
+ {0x526c, 0x50ce, 0xf18b, 0x3d28,
+ 0x650d, 0x0c17, 0x8175, 0x7586, 0xc976, 0x4d48,},
+ {0x9c66, 0x58f8, 0xbc50, 0x5c54,
+ 0xcc65, 0x91c6, 0xa60e, 0xa0ae, 0xe319, 0x46a3,},
+ {0x851e, 0xeab7, 0x98fe, 0x901b,
+ 0xddbb, 0xde8d, 0x9df9, 0xebfb, 0xaa7e, 0x4351,},
+ {0x0235, 0x0137, 0x36b1, 0x336c,
+ 0xc66f, 0x8cdf, 0x80e9, 0x47c9, 0x93ba, 0x41a8,},
+ {0x50f8, 0x25fb, 0xc76b, 0x6b71,
+ 0x3cbf, 0xa6d5, 0xffcf, 0x1f49, 0xc278, 0x40d3,},
+ {0x0000, 0x0000, 0x0000, 0x0000,
+ 0xf020, 0xb59d, 0x2b70, 0xada8, 0x9dc5, 0x4069,},
+ {0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0400, 0xc9bf, 0x8e1b, 0x4034,},
+ {0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x2000, 0xbebc, 0x4019,},
+ {0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x9c40, 0x400c,},
+ {0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0xc800, 0x4005,},
+ {0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0xa000, 0x4002,}, /* 10**1 */
+};
+
+static unsigned EMUSHORT emtens[NTEN + 1][NE] =
+{
+ {0x2030, 0xcffc, 0xa1c3, 0x8123,
+ 0x2de3, 0x9fde, 0xd2ce, 0x04c8, 0xa6dd, 0x0ad8,}, /* 10**-4096 */
+ {0x8264, 0xd2cb, 0xf2ea, 0x12d4,
+ 0x4925, 0x2de4, 0x3436, 0x534f, 0xceae, 0x256b,}, /* 10**-2048 */
+ {0xf53f, 0xf698, 0x6bd3, 0x0158,
+ 0x87a6, 0xc0bd, 0xda57, 0x82a5, 0xa2a6, 0x32b5,},
+ {0xe731, 0x04d4, 0xe3f2, 0xd332,
+ 0x7132, 0xd21c, 0xdb23, 0xee32, 0x9049, 0x395a,},
+ {0xa23e, 0x5308, 0xfefb, 0x1155,
+ 0xfa91, 0x1939, 0x637a, 0x4325, 0xc031, 0x3cac,},
+ {0xe26d, 0xdbde, 0xd05d, 0xb3f6,
+ 0xac7c, 0xe4a0, 0x64bc, 0x467c, 0xddd0, 0x3e55,},
+ {0x2a20, 0x6224, 0x47b3, 0x98d7,
+ 0x3f23, 0xe9a5, 0xa539, 0xea27, 0xa87f, 0x3f2a,},
+ {0x0b5b, 0x4af2, 0xa581, 0x18ed,
+ 0x67de, 0x94ba, 0x4539, 0x1ead, 0xcfb1, 0x3f94,},
+ {0xbf71, 0xa9b3, 0x7989, 0xbe68,
+ 0x4c2e, 0xe15b, 0xc44d, 0x94be, 0xe695, 0x3fc9,},
+ {0x3d4d, 0x7c3d, 0x36ba, 0x0d2b,
+ 0xfdc2, 0xcefc, 0x8461, 0x7711, 0xabcc, 0x3fe4,},
+ {0xc155, 0xa4a8, 0x404e, 0x6113,
+ 0xd3c3, 0x652b, 0xe219, 0x1758, 0xd1b7, 0x3ff1,},
+ {0xd70a, 0x70a3, 0x0a3d, 0xa3d7,
+ 0x3d70, 0xd70a, 0x70a3, 0x0a3d, 0xa3d7, 0x3ff8,},
+ {0xcccd, 0xcccc, 0xcccc, 0xcccc,
+ 0xcccc, 0xcccc, 0xcccc, 0xcccc, 0xcccc, 0x3ffb,}, /* 10**-1 */
+};
+#else
+/* LONG_DOUBLE_TYPE_SIZE is other than 128 */
+static unsigned EMUSHORT etens[NTEN + 1][NE] =
+{
+ {0xc94c, 0x979a, 0x8a20, 0x5202, 0xc460, 0x7525,}, /* 10**4096 */
+ {0xa74d, 0x5de4, 0xc53d, 0x3b5d, 0x9e8b, 0x5a92,}, /* 10**2048 */
+ {0x650d, 0x0c17, 0x8175, 0x7586, 0xc976, 0x4d48,},
+ {0xcc65, 0x91c6, 0xa60e, 0xa0ae, 0xe319, 0x46a3,},
+ {0xddbc, 0xde8d, 0x9df9, 0xebfb, 0xaa7e, 0x4351,},
+ {0xc66f, 0x8cdf, 0x80e9, 0x47c9, 0x93ba, 0x41a8,},
+ {0x3cbf, 0xa6d5, 0xffcf, 0x1f49, 0xc278, 0x40d3,},
+ {0xf020, 0xb59d, 0x2b70, 0xada8, 0x9dc5, 0x4069,},
+ {0x0000, 0x0000, 0x0400, 0xc9bf, 0x8e1b, 0x4034,},
+ {0x0000, 0x0000, 0x0000, 0x2000, 0xbebc, 0x4019,},
+ {0x0000, 0x0000, 0x0000, 0x0000, 0x9c40, 0x400c,},
+ {0x0000, 0x0000, 0x0000, 0x0000, 0xc800, 0x4005,},
+ {0x0000, 0x0000, 0x0000, 0x0000, 0xa000, 0x4002,}, /* 10**1 */
+};
+
+static unsigned EMUSHORT emtens[NTEN + 1][NE] =
+{
+ {0x2de4, 0x9fde, 0xd2ce, 0x04c8, 0xa6dd, 0x0ad8,}, /* 10**-4096 */
+ {0x4925, 0x2de4, 0x3436, 0x534f, 0xceae, 0x256b,}, /* 10**-2048 */
+ {0x87a6, 0xc0bd, 0xda57, 0x82a5, 0xa2a6, 0x32b5,},
+ {0x7133, 0xd21c, 0xdb23, 0xee32, 0x9049, 0x395a,},
+ {0xfa91, 0x1939, 0x637a, 0x4325, 0xc031, 0x3cac,},
+ {0xac7d, 0xe4a0, 0x64bc, 0x467c, 0xddd0, 0x3e55,},
+ {0x3f24, 0xe9a5, 0xa539, 0xea27, 0xa87f, 0x3f2a,},
+ {0x67de, 0x94ba, 0x4539, 0x1ead, 0xcfb1, 0x3f94,},
+ {0x4c2f, 0xe15b, 0xc44d, 0x94be, 0xe695, 0x3fc9,},
+ {0xfdc2, 0xcefc, 0x8461, 0x7711, 0xabcc, 0x3fe4,},
+ {0xd3c3, 0x652b, 0xe219, 0x1758, 0xd1b7, 0x3ff1,},
+ {0x3d71, 0xd70a, 0x70a3, 0x0a3d, 0xa3d7, 0x3ff8,},
+ {0xcccd, 0xcccc, 0xcccc, 0xcccc, 0xcccc, 0x3ffb,}, /* 10**-1 */
+};
+#endif
+
+static void
+e24toasc (x, string, ndigs)
+ unsigned EMUSHORT x[];
+ char *string;
+ int ndigs;
+{
+ unsigned EMUSHORT w[NI];
+
+ e24toe (x, w);
+ etoasc (w, string, ndigs);
+}
+
+
+static void
+e53toasc (x, string, ndigs)
+ unsigned EMUSHORT x[];
+ char *string;
+ int ndigs;
+{
+ unsigned EMUSHORT w[NI];
+
+ e53toe (x, w);
+ etoasc (w, string, ndigs);
+}
+
+
+static void
+e64toasc (x, string, ndigs)
+ unsigned EMUSHORT x[];
+ char *string;
+ int ndigs;
+{
+ unsigned EMUSHORT w[NI];
+
+ e64toe (x, w);
+ etoasc (w, string, ndigs);
+}
+
+static void
+e113toasc (x, string, ndigs)
+ unsigned EMUSHORT x[];
+ char *string;
+ int ndigs;
+{
+ unsigned EMUSHORT w[NI];
+
+ e113toe (x, w);
+ etoasc (w, string, ndigs);
+}
+
+
+static char wstring[80]; /* working storage for ASCII output */
+
+static void
+etoasc (x, string, ndigs)
+ unsigned EMUSHORT x[];
+ char *string;
+ int ndigs;
+{
+ EMUSHORT digit;
+ unsigned EMUSHORT y[NI], t[NI], u[NI], w[NI];
+ unsigned EMUSHORT *p, *r, *ten;
+ unsigned EMUSHORT sign;
+ int i, j, k, expon, rndsav;
+ char *s, *ss;
+ unsigned EMUSHORT m;
+
+
+ rndsav = rndprc;
+ ss = string;
+ s = wstring;
+ *ss = '\0';
+ *s = '\0';
+#ifdef NANS
+ if (eisnan (x))
+ {
+ sprintf (wstring, " NaN ");
+ goto bxit;
+ }
+#endif
+ rndprc = NBITS; /* set to full precision */
+ emov (x, y); /* retain external format */
+ if (y[NE - 1] & 0x8000)
+ {
+ sign = 0xffff;
+ y[NE - 1] &= 0x7fff;
+ }
+ else
+ {
+ sign = 0;
+ }
+ expon = 0;
+ ten = &etens[NTEN][0];
+ emov (eone, t);
+ /* Test for zero exponent */
+ if (y[NE - 1] == 0)
+ {
+ for (k = 0; k < NE - 1; k++)
+ {
+ if (y[k] != 0)
+ goto tnzro; /* denormalized number */
+ }
+ goto isone; /* legal all zeros */
+ }
+ tnzro:
+
+ /* Test for infinity. */
+ if (y[NE - 1] == 0x7fff)
+ {
+ if (sign)
+ sprintf (wstring, " -Infinity ");
+ else
+ sprintf (wstring, " Infinity ");
+ goto bxit;
+ }
+
+ /* Test for exponent nonzero but significand denormalized.
+ * This is an error condition.
+ */
+ if ((y[NE - 1] != 0) && ((y[NE - 2] & 0x8000) == 0))
+ {
+ mtherr ("etoasc", DOMAIN);
+ sprintf (wstring, "NaN");
+ goto bxit;
+ }
+
+ /* Compare to 1.0 */
+ i = ecmp (eone, y);
+ if (i == 0)
+ goto isone;
+
+ if (i == -2)
+ abort ();
+
+ if (i < 0)
+ { /* Number is greater than 1 */
+ /* Convert significand to an integer and strip trailing decimal zeros. */
+ emov (y, u);
+ u[NE - 1] = EXONE + NBITS - 1;
+
+ p = &etens[NTEN - 4][0];
+ m = 16;
+ do
+ {
+ ediv (p, u, t);
+ efloor (t, w);
+ for (j = 0; j < NE - 1; j++)
+ {
+ if (t[j] != w[j])
+ goto noint;
+ }
+ emov (t, u);
+ expon += (int) m;
+ noint:
+ p += NE;
+ m >>= 1;
+ }
+ while (m != 0);
+
+ /* Rescale from integer significand */
+ u[NE - 1] += y[NE - 1] - (unsigned int) (EXONE + NBITS - 1);
+ emov (u, y);
+ /* Find power of 10 */
+ emov (eone, t);
+ m = MAXP;
+ p = &etens[0][0];
+ /* An unordered compare result shouldn't happen here. */
+ while (ecmp (ten, u) <= 0)
+ {
+ if (ecmp (p, u) <= 0)
+ {
+ ediv (p, u, u);
+ emul (p, t, t);
+ expon += (int) m;
+ }
+ m >>= 1;
+ if (m == 0)
+ break;
+ p += NE;
+ }
+ }
+ else
+ { /* Number is less than 1.0 */
+ /* Pad significand with trailing decimal zeros. */
+ if (y[NE - 1] == 0)
+ {
+ while ((y[NE - 2] & 0x8000) == 0)
+ {
+ emul (ten, y, y);
+ expon -= 1;
+ }
+ }
+ else
+ {
+ emovi (y, w);
+ for (i = 0; i < NDEC + 1; i++)
+ {
+ if ((w[NI - 1] & 0x7) != 0)
+ break;
+ /* multiply by 10 */
+ emovz (w, u);
+ eshdn1 (u);
+ eshdn1 (u);
+ eaddm (w, u);
+ u[1] += 3;
+ while (u[2] != 0)
+ {
+ eshdn1 (u);
+ u[1] += 1;
+ }
+ if (u[NI - 1] != 0)
+ break;
+ if (eone[NE - 1] <= u[1])
+ break;
+ emovz (u, w);
+ expon -= 1;
+ }
+ emovo (w, y);
+ }
+ k = -MAXP;
+ p = &emtens[0][0];
+ r = &etens[0][0];
+ emov (y, w);
+ emov (eone, t);
+ while (ecmp (eone, w) > 0)
+ {
+ if (ecmp (p, w) >= 0)
+ {
+ emul (r, w, w);
+ emul (r, t, t);
+ expon += k;
+ }
+ k /= 2;
+ if (k == 0)
+ break;
+ p += NE;
+ r += NE;
+ }
+ ediv (t, eone, t);
+ }
+ isone:
+ /* Find the first (leading) digit. */
+ emovi (t, w);
+ emovz (w, t);
+ emovi (y, w);
+ emovz (w, y);
+ eiremain (t, y);
+ digit = equot[NI - 1];
+ while ((digit == 0) && (ecmp (y, ezero) != 0))
+ {
+ eshup1 (y);
+ emovz (y, u);
+ eshup1 (u);
+ eshup1 (u);
+ eaddm (u, y);
+ eiremain (t, y);
+ digit = equot[NI - 1];
+ expon -= 1;
+ }
+ s = wstring;
+ if (sign)
+ *s++ = '-';
+ else
+ *s++ = ' ';
+ /* Examine number of digits requested by caller. */
+ if (ndigs < 0)
+ ndigs = 0;
+ if (ndigs > NDEC)
+ ndigs = NDEC;
+ if (digit == 10)
+ {
+ *s++ = '1';
+ *s++ = '.';
+ if (ndigs > 0)
+ {
+ *s++ = '0';
+ ndigs -= 1;
+ }
+ expon += 1;
+ }
+ else
+ {
+ *s++ = (char)digit + '0';
+ *s++ = '.';
+ }
+ /* Generate digits after the decimal point. */
+ for (k = 0; k <= ndigs; k++)
+ {
+ /* multiply current number by 10, without normalizing */
+ eshup1 (y);
+ emovz (y, u);
+ eshup1 (u);
+ eshup1 (u);
+ eaddm (u, y);
+ eiremain (t, y);
+ *s++ = (char) equot[NI - 1] + '0';
+ }
+ digit = equot[NI - 1];
+ --s;
+ ss = s;
+ /* round off the ASCII string */
+ if (digit > 4)
+ {
+ /* Test for critical rounding case in ASCII output. */
+ if (digit == 5)
+ {
+ emovo (y, t);
+ if (ecmp (t, ezero) != 0)
+ goto roun; /* round to nearest */
+ if ((*(s - 1) & 1) == 0)
+ goto doexp; /* round to even */
+ }
+ /* Round up and propagate carry-outs */
+ roun:
+ --s;
+ k = *s & 0x7f;
+ /* Carry out to most significant digit? */
+ if (k == '.')
+ {
+ --s;
+ k = *s;
+ k += 1;
+ *s = (char) k;
+ /* Most significant digit carries to 10? */
+ if (k > '9')
+ {
+ expon += 1;
+ *s = '1';
+ }
+ goto doexp;
+ }
+ /* Round up and carry out from less significant digits */
+ k += 1;
+ *s = (char) k;
+ if (k > '9')
+ {
+ *s = '0';
+ goto roun;
+ }
+ }
+ doexp:
+ /*
+ if (expon >= 0)
+ sprintf (ss, "e+%d", expon);
+ else
+ sprintf (ss, "e%d", expon);
+ */
+ sprintf (ss, "e%d", expon);
+ bxit:
+ rndprc = rndsav;
+ /* copy out the working string */
+ s = string;
+ ss = wstring;
+ while (*ss == ' ') /* strip possible leading space */
+ ++ss;
+ while ((*s++ = *ss++) != '\0')
+ ;
+}
+
+
+/* Convert ASCII string to quadruple precision floating point
+
+ Numeric input is free field decimal number with max of 15 digits with or
+ without decimal point entered as ASCII from teletype. Entering E after
+ the number followed by a second number causes the second number to be
+ interpreted as a power of 10 to be multiplied by the first number
+ (i.e., "scientific" notation). */
+
+/* ASCII to single */
+
+static void
+asctoe24 (s, y)
+ char *s;
+ unsigned EMUSHORT *y;
+{
+ asctoeg (s, y, 24);
+}
+
+
+/* ASCII to double */
+
+static void
+asctoe53 (s, y)
+ char *s;
+ unsigned EMUSHORT *y;
+{
+#if defined(DEC) || defined(IBM)
+ asctoeg (s, y, 56);
+#else
+ asctoeg (s, y, 53);
+#endif
+}
+
+
+/* ASCII to long double */
+
+static void
+asctoe64 (s, y)
+ char *s;
+ unsigned EMUSHORT *y;
+{
+ asctoeg (s, y, 64);
+}
+
+/* ASCII to 128-bit long double */
+
+static void
+asctoe113 (s, y)
+ char *s;
+ unsigned EMUSHORT *y;
+{
+ asctoeg (s, y, 113);
+}
+
+/* ASCII to super double */
+
+static void
+asctoe (s, y)
+ char *s;
+ unsigned EMUSHORT *y;
+{
+ asctoeg (s, y, NBITS);
+}
+
+
+/* ASCII to e type, with specified rounding precision = oprec. */
+
+static void
+asctoeg (ss, y, oprec)
+ char *ss;
+ unsigned EMUSHORT *y;
+ int oprec;
+{
+ unsigned EMUSHORT yy[NI], xt[NI], tt[NI];
+ int esign, decflg, sgnflg, nexp, exp, prec, lost;
+ int k, trail, c, rndsav;
+ EMULONG lexp;
+ unsigned EMUSHORT nsign, *p;
+ char *sp, *s, *lstr;
+
+ /* Copy the input string. */
+ lstr = (char *) alloca (strlen (ss) + 1);
+ s = ss;
+ while (*s == ' ') /* skip leading spaces */
+ ++s;
+ sp = lstr;
+ while ((*sp++ = *s++) != '\0')
+ ;
+ s = lstr;
+
+ rndsav = rndprc;
+ rndprc = NBITS; /* Set to full precision */
+ lost = 0;
+ nsign = 0;
+ decflg = 0;
+ sgnflg = 0;
+ nexp = 0;
+ exp = 0;
+ prec = 0;
+ ecleaz (yy);
+ trail = 0;
+
+ nxtcom:
+ k = *s - '0';
+ if ((k >= 0) && (k <= 9))
+ {
+ /* Ignore leading zeros */
+ if ((prec == 0) && (decflg == 0) && (k == 0))
+ goto donchr;
+ /* Identify and strip trailing zeros after the decimal point. */
+ if ((trail == 0) && (decflg != 0))
+ {
+ sp = s;
+ while ((*sp >= '0') && (*sp <= '9'))
+ ++sp;
+ /* Check for syntax error */
+ c = *sp & 0x7f;
+ if ((c != 'e') && (c != 'E') && (c != '\0')
+ && (c != '\n') && (c != '\r') && (c != ' ')
+ && (c != ','))
+ goto error;
+ --sp;
+ while (*sp == '0')
+ *sp-- = 'z';
+ trail = 1;
+ if (*s == 'z')
+ goto donchr;
+ }
+
+ /* If enough digits were given to more than fill up the yy register,
+ continuing until overflow into the high guard word yy[2]
+ guarantees that there will be a roundoff bit at the top
+ of the low guard word after normalization. */
+
+ if (yy[2] == 0)
+ {
+ if (decflg)
+ nexp += 1; /* count digits after decimal point */
+ eshup1 (yy); /* multiply current number by 10 */
+ emovz (yy, xt);
+ eshup1 (xt);
+ eshup1 (xt);
+ eaddm (xt, yy);
+ ecleaz (xt);
+ xt[NI - 2] = (unsigned EMUSHORT) k;
+ eaddm (xt, yy);
+ }
+ else
+ {
+ /* Mark any lost non-zero digit. */
+ lost |= k;
+ /* Count lost digits before the decimal point. */
+ if (decflg == 0)
+ nexp -= 1;
+ }
+ prec += 1;
+ goto donchr;
+ }
+
+ switch (*s)
+ {
+ case 'z':
+ break;
+ case 'E':
+ case 'e':
+ goto expnt;
+ case '.': /* decimal point */
+ if (decflg)
+ goto error;
+ ++decflg;
+ break;
+ case '-':
+ nsign = 0xffff;
+ if (sgnflg)
+ goto error;
+ ++sgnflg;
+ break;
+ case '+':
+ if (sgnflg)
+ goto error;
+ ++sgnflg;
+ break;
+ case ',':
+ case ' ':
+ case '\0':
+ case '\n':
+ case '\r':
+ goto daldone;
+ case 'i':
+ case 'I':
+ goto infinite;
+ default:
+ error:
+#ifdef NANS
+ einan (yy);
+#else
+ mtherr ("asctoe", DOMAIN);
+ eclear (yy);
+#endif
+ goto aexit;
+ }
+ donchr:
+ ++s;
+ goto nxtcom;
+
+ /* Exponent interpretation */
+ expnt:
+
+ esign = 1;
+ exp = 0;
+ ++s;
+ /* check for + or - */
+ if (*s == '-')
+ {
+ esign = -1;
+ ++s;
+ }
+ if (*s == '+')
+ ++s;
+ while ((*s >= '0') && (*s <= '9'))
+ {
+ exp *= 10;
+ exp += *s++ - '0';
+ if (exp > -(MINDECEXP))
+ {
+ if (esign < 0)
+ goto zero;
+ else
+ goto infinite;
+ }
+ }
+ if (esign < 0)
+ exp = -exp;
+ if (exp > MAXDECEXP)
+ {
+ infinite:
+ ecleaz (yy);
+ yy[E] = 0x7fff; /* infinity */
+ goto aexit;
+ }
+ if (exp < MINDECEXP)
+ {
+ zero:
+ ecleaz (yy);
+ goto aexit;
+ }
+
+ daldone:
+ nexp = exp - nexp;
+ /* Pad trailing zeros to minimize power of 10, per IEEE spec. */
+ while ((nexp > 0) && (yy[2] == 0))
+ {
+ emovz (yy, xt);
+ eshup1 (xt);
+ eshup1 (xt);
+ eaddm (yy, xt);
+ eshup1 (xt);
+ if (xt[2] != 0)
+ break;
+ nexp -= 1;
+ emovz (xt, yy);
+ }
+ if ((k = enormlz (yy)) > NBITS)
+ {
+ ecleaz (yy);
+ goto aexit;
+ }
+ lexp = (EXONE - 1 + NBITS) - k;
+ emdnorm (yy, lost, 0, lexp, 64);
+
+ /* Convert to external format:
+
+ Multiply by 10**nexp. If precision is 64 bits,
+ the maximum relative error incurred in forming 10**n
+ for 0 <= n <= 324 is 8.2e-20, at 10**180.
+ For 0 <= n <= 999, the peak relative error is 1.4e-19 at 10**947.
+ For 0 >= n >= -999, it is -1.55e-19 at 10**-435. */
+
+ lexp = yy[E];
+ if (nexp == 0)
+ {
+ k = 0;
+ goto expdon;
+ }
+ esign = 1;
+ if (nexp < 0)
+ {
+ nexp = -nexp;
+ esign = -1;
+ if (nexp > 4096)
+ {
+ /* Punt. Can't handle this without 2 divides. */
+ emovi (etens[0], tt);
+ lexp -= tt[E];
+ k = edivm (tt, yy);
+ lexp += EXONE;
+ nexp -= 4096;
+ }
+ }
+ p = &etens[NTEN][0];
+ emov (eone, xt);
+ exp = 1;
+ do
+ {
+ if (exp & nexp)
+ emul (p, xt, xt);
+ p -= NE;
+ exp = exp + exp;
+ }
+ while (exp <= MAXP);
+
+ emovi (xt, tt);
+ if (esign < 0)
+ {
+ lexp -= tt[E];
+ k = edivm (tt, yy);
+ lexp += EXONE;
+ }
+ else
+ {
+ lexp += tt[E];
+ k = emulm (tt, yy);
+ lexp -= EXONE - 1;
+ }
+
+ expdon:
+
+ /* Round and convert directly to the destination type */
+ if (oprec == 53)
+ lexp -= EXONE - 0x3ff;
+#ifdef IBM
+ else if (oprec == 24 || oprec == 56)
+ lexp -= EXONE - (0x41 << 2);
+#else
+ else if (oprec == 24)
+ lexp -= EXONE - 0177;
+#endif
+#ifdef DEC
+ else if (oprec == 56)
+ lexp -= EXONE - 0201;
+#endif
+ rndprc = oprec;
+ emdnorm (yy, k, 0, lexp, 64);
+
+ aexit:
+
+ rndprc = rndsav;
+ yy[0] = nsign;
+ switch (oprec)
+ {
+#ifdef DEC
+ case 56:
+ todec (yy, y); /* see etodec.c */
+ break;
+#endif
+#ifdef IBM
+ case 56:
+ toibm (yy, y, DFmode);
+ break;
+#endif
+ case 53:
+ toe53 (yy, y);
+ break;
+ case 24:
+ toe24 (yy, y);
+ break;
+ case 64:
+ toe64 (yy, y);
+ break;
+ case 113:
+ toe113 (yy, y);
+ break;
+ case NBITS:
+ emovo (yy, y);
+ break;
+ }
+}
+
+
+
+/* y = largest integer not greater than x (truncated toward minus infinity) */
+
+static unsigned EMUSHORT bmask[] =
+{
+ 0xffff,
+ 0xfffe,
+ 0xfffc,
+ 0xfff8,
+ 0xfff0,
+ 0xffe0,
+ 0xffc0,
+ 0xff80,
+ 0xff00,
+ 0xfe00,
+ 0xfc00,
+ 0xf800,
+ 0xf000,
+ 0xe000,
+ 0xc000,
+ 0x8000,
+ 0x0000,
+};
+
+static void
+efloor (x, y)
+ unsigned EMUSHORT x[], y[];
+{
+ register unsigned EMUSHORT *p;
+ int e, expon, i;
+ unsigned EMUSHORT f[NE];
+
+ emov (x, f); /* leave in external format */
+ expon = (int) f[NE - 1];
+ e = (expon & 0x7fff) - (EXONE - 1);
+ if (e <= 0)
+ {
+ eclear (y);
+ goto isitneg;
+ }
+ /* number of bits to clear out */
+ e = NBITS - e;
+ emov (f, y);
+ if (e <= 0)
+ return;
+
+ p = &y[0];
+ while (e >= 16)
+ {
+ *p++ = 0;
+ e -= 16;
+ }
+ /* clear the remaining bits */
+ *p &= bmask[e];
+ /* truncate negatives toward minus infinity */
+ isitneg:
+
+ if ((unsigned EMUSHORT) expon & (unsigned EMUSHORT) 0x8000)
+ {
+ for (i = 0; i < NE - 1; i++)
+ {
+ if (f[i] != y[i])
+ {
+ esub (eone, y, y);
+ break;
+ }
+ }
+ }
+}
+
+
+/* Returns s and exp such that s * 2**exp = x and .5 <= s < 1.
+ For example, 1.1 = 0.55 * 2**1
+ Handles denormalized numbers properly using long integer exp. */
+
+static void
+efrexp (x, exp, s)
+ unsigned EMUSHORT x[];
+ int *exp;
+ unsigned EMUSHORT s[];
+{
+ unsigned EMUSHORT xi[NI];
+ EMULONG li;
+
+ emovi (x, xi);
+ li = (EMULONG) ((EMUSHORT) xi[1]);
+
+ if (li == 0)
+ {
+ li -= enormlz (xi);
+ }
+ xi[1] = 0x3ffe;
+ emovo (xi, s);
+ *exp = (int) (li - 0x3ffe);
+}
+
+
+
+/* Return y = x * 2**pwr2. */
+
+static void
+eldexp (x, pwr2, y)
+ unsigned EMUSHORT x[];
+ int pwr2;
+ unsigned EMUSHORT y[];
+{
+ unsigned EMUSHORT xi[NI];
+ EMULONG li;
+ int i;
+
+ emovi (x, xi);
+ li = xi[1];
+ li += pwr2;
+ i = 0;
+ emdnorm (xi, i, i, li, 64);
+ emovo (xi, y);
+}
+
+
+/* c = remainder after dividing b by a
+ Least significant integer quotient bits left in equot[]. */
+
+static void
+eremain (a, b, c)
+ unsigned EMUSHORT a[], b[], c[];
+{
+ unsigned EMUSHORT den[NI], num[NI];
+
+#ifdef NANS
+ if (eisinf (b)
+ || (ecmp (a, ezero) == 0)
+ || eisnan (a)
+ || eisnan (b))
+ {
+ enan (c, 0);
+ return;
+ }
+#endif
+ if (ecmp (a, ezero) == 0)
+ {
+ mtherr ("eremain", SING);
+ eclear (c);
+ return;
+ }
+ emovi (a, den);
+ emovi (b, num);
+ eiremain (den, num);
+ /* Sign of remainder = sign of quotient */
+ if (a[0] == b[0])
+ num[0] = 0;
+ else
+ num[0] = 0xffff;
+ emovo (num, c);
+}
+
+static void
+eiremain (den, num)
+ unsigned EMUSHORT den[], num[];
+{
+ EMULONG ld, ln;
+ unsigned EMUSHORT j;
+
+ ld = den[E];
+ ld -= enormlz (den);
+ ln = num[E];
+ ln -= enormlz (num);
+ ecleaz (equot);
+ while (ln >= ld)
+ {
+ if (ecmpm (den, num) <= 0)
+ {
+ esubm (den, num);
+ j = 1;
+ }
+ else
+ {
+ j = 0;
+ }
+ eshup1 (equot);
+ equot[NI - 1] |= j;
+ eshup1 (num);
+ ln -= 1;
+ }
+ emdnorm (num, 0, 0, ln, 0);
+}
+
+/* This routine may be called to report one of the following
+ error conditions (in the include file mconf.h).
+
+ Mnemonic Value Significance
+
+ DOMAIN 1 argument domain error
+ SING 2 function singularity
+ OVERFLOW 3 overflow range error
+ UNDERFLOW 4 underflow range error
+ TLOSS 5 total loss of precision
+ PLOSS 6 partial loss of precision
+ INVALID 7 NaN - producing operation
+ EDOM 33 Unix domain error code
+ ERANGE 34 Unix range error code
+
+ The default version of the file prints the function name,
+ passed to it by the pointer fctnam, followed by the
+ error condition. The display is directed to the standard
+ output device. The routine then returns to the calling
+ program. Users may wish to modify the program to abort by
+ calling exit under severe error conditions such as domain
+ errors.
+
+ Since all error conditions pass control to this function,
+ the display may be easily changed, eliminated, or directed
+ to an error logging device. */
+
+/* Note: the order of appearance of the following messages is bound to the
+ error codes defined above. */
+
+#define NMSGS 8
+static char *ermsg[NMSGS] =
+{
+ "unknown", /* error code 0 */
+ "domain", /* error code 1 */
+ "singularity", /* et seq. */
+ "overflow",
+ "underflow",
+ "total loss of precision",
+ "partial loss of precision",
+ "invalid operation"
+};
+
+int merror = 0;
+extern int merror;
+
+static void
+mtherr (name, code)
+ char *name;
+ int code;
+{
+ char errstr[80];
+
+ /* Display string passed by calling program, which is supposed to be the
+ name of the function in which the error occurred.
+
+ Display error message defined by the code argument. */
+
+ if ((code <= 0) || (code >= NMSGS))
+ code = 0;
+ sprintf (errstr, " %s %s error", name, ermsg[code]);
+ if (extra_warnings)
+ warning (errstr);
+ /* Set global error message word */
+ merror = code + 1;
+}
+
+#ifdef DEC
+/* Convert DEC double precision to e type. */
+
+static void
+dectoe (d, e)
+ unsigned EMUSHORT *d;
+ unsigned EMUSHORT *e;
+{
+ unsigned EMUSHORT y[NI];
+ register unsigned EMUSHORT r, *p;
+
+ ecleaz (y); /* start with a zero */
+ p = y; /* point to our number */
+ r = *d; /* get DEC exponent word */
+ if (*d & (unsigned int) 0x8000)
+ *p = 0xffff; /* fill in our sign */
+ ++p; /* bump pointer to our exponent word */
+ r &= 0x7fff; /* strip the sign bit */
+ if (r == 0) /* answer = 0 if high order DEC word = 0 */
+ goto done;
+
+
+ r >>= 7; /* shift exponent word down 7 bits */
+ r += EXONE - 0201; /* subtract DEC exponent offset */
+ /* add our e type exponent offset */
+ *p++ = r; /* to form our exponent */
+
+ r = *d++; /* now do the high order mantissa */
+ r &= 0177; /* strip off the DEC exponent and sign bits */
+ r |= 0200; /* the DEC understood high order mantissa bit */
+ *p++ = r; /* put result in our high guard word */
+
+ *p++ = *d++; /* fill in the rest of our mantissa */
+ *p++ = *d++;
+ *p = *d;
+
+ eshdn8 (y); /* shift our mantissa down 8 bits */
+ done:
+ emovo (y, e);
+}
+
+
+
+/*
+; convert e type to DEC double precision
+; double d;
+; EMUSHORT e[NE];
+; etodec (e, &d);
+*/
+
+static void
+etodec (x, d)
+ unsigned EMUSHORT *x, *d;
+{
+ unsigned EMUSHORT xi[NI];
+ EMULONG exp;
+ int rndsav;
+
+ emovi (x, xi);
+ exp = (EMULONG) xi[E] - (EXONE - 0201); /* adjust exponent for offsets */
+/* round off to nearest or even */
+ rndsav = rndprc;
+ rndprc = 56;
+ emdnorm (xi, 0, 0, exp, 64);
+ rndprc = rndsav;
+ todec (xi, d);
+}
+
+static void
+todec (x, y)
+ unsigned EMUSHORT *x, *y;
+{
+ unsigned EMUSHORT i;
+ unsigned EMUSHORT *p;
+
+ p = x;
+ *y = 0;
+ if (*p++)
+ *y = 0100000;
+ i = *p++;
+ if (i == 0)
+ {
+ *y++ = 0;
+ *y++ = 0;
+ *y++ = 0;
+ *y++ = 0;
+ return;
+ }
+ if (i > 0377)
+ {
+ *y++ |= 077777;
+ *y++ = 0xffff;
+ *y++ = 0xffff;
+ *y++ = 0xffff;
+#ifdef ERANGE
+ errno = ERANGE;
+#endif
+ return;
+ }
+ i &= 0377;
+ i <<= 7;
+ eshup8 (x);
+ x[M] &= 0177;
+ i |= x[M];
+ *y++ |= i;
+ *y++ = x[M + 1];
+ *y++ = x[M + 2];
+ *y++ = x[M + 3];
+}
+#endif /* DEC */
+
+#ifdef IBM
+/* Convert IBM single/double precision to e type. */
+
+static void
+ibmtoe (d, e, mode)
+ unsigned EMUSHORT *d;
+ unsigned EMUSHORT *e;
+ enum machine_mode mode;
+{
+ unsigned EMUSHORT y[NI];
+ register unsigned EMUSHORT r, *p;
+ int rndsav;
+
+ ecleaz (y); /* start with a zero */
+ p = y; /* point to our number */
+ r = *d; /* get IBM exponent word */
+ if (*d & (unsigned int) 0x8000)
+ *p = 0xffff; /* fill in our sign */
+ ++p; /* bump pointer to our exponent word */
+ r &= 0x7f00; /* strip the sign bit */
+ r >>= 6; /* shift exponent word down 6 bits */
+ /* in fact shift by 8 right and 2 left */
+ r += EXONE - (0x41 << 2); /* subtract IBM exponent offset */
+ /* add our e type exponent offset */
+ *p++ = r; /* to form our exponent */
+
+ *p++ = *d++ & 0xff; /* now do the high order mantissa */
+ /* strip off the IBM exponent and sign bits */
+ if (mode != SFmode) /* there are only 2 words in SFmode */
+ {
+ *p++ = *d++; /* fill in the rest of our mantissa */
+ *p++ = *d++;
+ }
+ *p = *d;
+
+ if (y[M] == 0 && y[M+1] == 0 && y[M+2] == 0 && y[M+3] == 0)
+ y[0] = y[E] = 0;
+ else
+ y[E] -= 5 + enormlz (y); /* now normalise the mantissa */
+ /* handle change in RADIX */
+ emovo (y, e);
+}
+
+
+
+/* Convert e type to IBM single/double precision. */
+
+static void
+etoibm (x, d, mode)
+ unsigned EMUSHORT *x, *d;
+ enum machine_mode mode;
+{
+ unsigned EMUSHORT xi[NI];
+ EMULONG exp;
+ int rndsav;
+
+ emovi (x, xi);
+ exp = (EMULONG) xi[E] - (EXONE - (0x41 << 2)); /* adjust exponent for offsets */
+ /* round off to nearest or even */
+ rndsav = rndprc;
+ rndprc = 56;
+ emdnorm (xi, 0, 0, exp, 64);
+ rndprc = rndsav;
+ toibm (xi, d, mode);
+}
+
+static void
+toibm (x, y, mode)
+ unsigned EMUSHORT *x, *y;
+ enum machine_mode mode;
+{
+ unsigned EMUSHORT i;
+ unsigned EMUSHORT *p;
+ int r;
+
+ p = x;
+ *y = 0;
+ if (*p++)
+ *y = 0x8000;
+ i = *p++;
+ if (i == 0)
+ {
+ *y++ = 0;
+ *y++ = 0;
+ if (mode != SFmode)
+ {
+ *y++ = 0;
+ *y++ = 0;
+ }
+ return;
+ }
+ r = i & 0x3;
+ i >>= 2;
+ if (i > 0x7f)
+ {
+ *y++ |= 0x7fff;
+ *y++ = 0xffff;
+ if (mode != SFmode)
+ {
+ *y++ = 0xffff;
+ *y++ = 0xffff;
+ }
+#ifdef ERANGE
+ errno = ERANGE;
+#endif
+ return;
+ }
+ i &= 0x7f;
+ *y |= (i << 8);
+ eshift (x, r + 5);
+ *y++ |= x[M];
+ *y++ = x[M + 1];
+ if (mode != SFmode)
+ {
+ *y++ = x[M + 2];
+ *y++ = x[M + 3];
+ }
+}
+#endif /* IBM */
+
+/* Output a binary NaN bit pattern in the target machine's format. */
+
+/* If special NaN bit patterns are required, define them in tm.h
+ as arrays of unsigned 16-bit shorts. Otherwise, use the default
+ patterns here. */
+#ifdef TFMODE_NAN
+TFMODE_NAN;
+#else
+#ifdef MIEEE
+unsigned EMUSHORT TFnan[8] =
+ {0x7fff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff};
+#endif
+#ifdef IBMPC
+unsigned EMUSHORT TFnan[8] = {0, 0, 0, 0, 0, 0, 0x8000, 0xffff};
+#endif
+#endif
+
+#ifdef XFMODE_NAN
+XFMODE_NAN;
+#else
+#ifdef MIEEE
+unsigned EMUSHORT XFnan[6] = {0x7fff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff};
+#endif
+#ifdef IBMPC
+unsigned EMUSHORT XFnan[6] = {0, 0, 0, 0xc000, 0xffff, 0};
+#endif
+#endif
+
+#ifdef DFMODE_NAN
+DFMODE_NAN;
+#else
+#ifdef MIEEE
+unsigned EMUSHORT DFnan[4] = {0x7fff, 0xffff, 0xffff, 0xffff};
+#endif
+#ifdef IBMPC
+unsigned EMUSHORT DFnan[4] = {0, 0, 0, 0xfff8};
+#endif
+#endif
+
+#ifdef SFMODE_NAN
+SFMODE_NAN;
+#else
+#ifdef MIEEE
+unsigned EMUSHORT SFnan[2] = {0x7fff, 0xffff};
+#endif
+#ifdef IBMPC
+unsigned EMUSHORT SFnan[2] = {0, 0xffc0};
+#endif
+#endif
+
+
+static void
+make_nan (nan, sign, mode)
+ unsigned EMUSHORT *nan;
+ int sign;
+ enum machine_mode mode;
+{
+ int n;
+ unsigned EMUSHORT *p;
+
+ switch (mode)
+ {
+/* Possibly the `reserved operand' patterns on a VAX can be
+ used like NaN's, but probably not in the same way as IEEE. */
+#if !defined(DEC) && !defined(IBM)
+ case TFmode:
+ n = 8;
+ p = TFnan;
+ break;
+ case XFmode:
+ n = 6;
+ p = XFnan;
+ break;
+ case DFmode:
+ n = 4;
+ p = DFnan;
+ break;
+ case HFmode:
+ case SFmode:
+ n = 2;
+ p = SFnan;
+ break;
+#endif
+ default:
+ abort ();
+ }
+#ifdef MIEEE
+ *nan++ = (sign << 15) | *p++;
+#endif
+ while (--n != 0)
+ *nan++ = *p++;
+#ifndef MIEEE
+ *nan = (sign << 15) | *p;
+#endif
+}
+
+/* Convert an SFmode target `float' value to a REAL_VALUE_TYPE.
+ This is the inverse of the function `etarsingle' invoked by
+ REAL_VALUE_TO_TARGET_SINGLE. */
+
+REAL_VALUE_TYPE
+ereal_from_float (f)
+ HOST_WIDE_INT f;
+{
+ REAL_VALUE_TYPE r;
+ unsigned EMUSHORT s[2];
+ unsigned EMUSHORT e[NE];
+
+ /* Convert 32 bit integer to array of 16 bit pieces in target machine order.
+ This is the inverse operation to what the function `endian' does. */
+#if FLOAT_WORDS_BIG_ENDIAN
+ s[0] = (unsigned EMUSHORT) (f >> 16);
+ s[1] = (unsigned EMUSHORT) f;
+#else
+ s[0] = (unsigned EMUSHORT) f;
+ s[1] = (unsigned EMUSHORT) (f >> 16);
+#endif
+ /* Convert and promote the target float to E-type. */
+ e24toe (s, e);
+ /* Output E-type to REAL_VALUE_TYPE. */
+ PUT_REAL (e, &r);
+ return r;
+}
+
+
+/* Convert a DFmode target `double' value to a REAL_VALUE_TYPE.
+ This is the inverse of the function `etardouble' invoked by
+ REAL_VALUE_TO_TARGET_DOUBLE.
+
+ The DFmode is stored as an array of HOST_WIDE_INT in the target's
+ data format, with no holes in the bit packing. The first element
+ of the input array holds the bits that would come first in the
+ target computer's memory. */
+
+REAL_VALUE_TYPE
+ereal_from_double (d)
+ HOST_WIDE_INT d[];
+{
+ REAL_VALUE_TYPE r;
+ unsigned EMUSHORT s[4];
+ unsigned EMUSHORT e[NE];
+
+ /* Convert array of HOST_WIDE_INT to equivalent array of 16-bit pieces. */
+#if FLOAT_WORDS_BIG_ENDIAN
+ s[0] = (unsigned EMUSHORT) (d[0] >> 16);
+ s[1] = (unsigned EMUSHORT) d[0];
+#if HOST_BITS_PER_WIDE_INT == 32
+ s[2] = (unsigned EMUSHORT) (d[1] >> 16);
+ s[3] = (unsigned EMUSHORT) d[1];
+#else
+ /* In this case the entire target double is contained in the
+ first array element. The second element of the input is ignored. */
+ s[2] = (unsigned EMUSHORT) (d[0] >> 48);
+ s[3] = (unsigned EMUSHORT) (d[0] >> 32);
+#endif
+#else
+/* Target float words are little-endian. */
+ s[0] = (unsigned EMUSHORT) d[0];
+ s[1] = (unsigned EMUSHORT) (d[0] >> 16);
+#if HOST_BITS_PER_WIDE_INT == 32
+ s[2] = (unsigned EMUSHORT) d[1];
+ s[3] = (unsigned EMUSHORT) (d[1] >> 16);
+#else
+ s[2] = (unsigned EMUSHORT) (d[0] >> 32);
+ s[3] = (unsigned EMUSHORT) (d[0] >> 48);
+#endif
+#endif
+ /* Convert target double to E-type. */
+ e53toe (s, e);
+ /* Output E-type to REAL_VALUE_TYPE. */
+ PUT_REAL (e, &r);
+ return r;
+}
+
+
+/* Convert target computer unsigned 64-bit integer to e-type.
+ The endian-ness of DImode follows the convention for integers,
+ so we use WORDS_BIG_ENDIAN here, not FLOAT_WORDS_BIG_ENDIAN. */
+
+static void
+uditoe (di, e)
+ unsigned EMUSHORT *di; /* Address of the 64-bit int. */
+ unsigned EMUSHORT *e;
+{
+ unsigned EMUSHORT yi[NI];
+ int k;
+
+ ecleaz (yi);
+#if WORDS_BIG_ENDIAN
+ for (k = M; k < M + 4; k++)
+ yi[k] = *di++;
+#else
+ for (k = M + 3; k >= M; k--)
+ yi[k] = *di++;
+#endif
+ yi[E] = EXONE + 47; /* exponent if normalize shift count were 0 */
+ if ((k = enormlz (yi)) > NBITS)/* normalize the significand */
+ ecleaz (yi); /* it was zero */
+ else
+ yi[E] -= (unsigned EMUSHORT) k;/* subtract shift count from exponent */
+ emovo (yi, e);
+}
+
+/* Convert target computer signed 64-bit integer to e-type. */
+
+static void
+ditoe (di, e)
+ unsigned EMUSHORT *di; /* Address of the 64-bit int. */
+ unsigned EMUSHORT *e;
+{
+ unsigned EMULONG acc;
+ unsigned EMUSHORT yi[NI];
+ unsigned EMUSHORT carry;
+ int k, sign;
+
+ ecleaz (yi);
+#if WORDS_BIG_ENDIAN
+ for (k = M; k < M + 4; k++)
+ yi[k] = *di++;
+#else
+ for (k = M + 3; k >= M; k--)
+ yi[k] = *di++;
+#endif
+ /* Take absolute value */
+ sign = 0;
+ if (yi[M] & 0x8000)
+ {
+ sign = 1;
+ carry = 0;
+ for (k = M + 3; k >= M; k--)
+ {
+ acc = (unsigned EMULONG) (~yi[k] & 0xffff) + carry;
+ yi[k] = acc;
+ carry = 0;
+ if (acc & 0x10000)
+ carry = 1;
+ }
+ }
+ yi[E] = EXONE + 47; /* exponent if normalize shift count were 0 */
+ if ((k = enormlz (yi)) > NBITS)/* normalize the significand */
+ ecleaz (yi); /* it was zero */
+ else
+ yi[E] -= (unsigned EMUSHORT) k;/* subtract shift count from exponent */
+ emovo (yi, e);
+ if (sign)
+ eneg (e);
+}
+
+
+/* Convert e-type to unsigned 64-bit int. */
+
+static void
+etoudi (x, i)
+ unsigned EMUSHORT *x;
+ unsigned EMUSHORT *i;
+{
+ unsigned EMUSHORT xi[NI];
+ int j, k;
+
+ emovi (x, xi);
+ if (xi[0])
+ {
+ xi[M] = 0;
+ goto noshift;
+ }
+ k = (int) xi[E] - (EXONE - 1);
+ if (k <= 0)
+ {
+ for (j = 0; j < 4; j++)
+ *i++ = 0;
+ return;
+ }
+ if (k > 64)
+ {
+ for (j = 0; j < 4; j++)
+ *i++ = 0xffff;
+ if (extra_warnings)
+ warning ("overflow on truncation to integer");
+ return;
+ }
+ if (k > 16)
+ {
+ /* Shift more than 16 bits: first shift up k-16 mod 16,
+ then shift up by 16's. */
+ j = k - ((k >> 4) << 4);
+ if (j == 0)
+ j = 16;
+ eshift (xi, j);
+#if WORDS_BIG_ENDIAN
+ *i++ = xi[M];
+#else
+ i += 3;
+ *i-- = xi[M];
+#endif
+ k -= j;
+ do
+ {
+ eshup6 (xi);
+#if WORDS_BIG_ENDIAN
+ *i++ = xi[M];
+#else
+ *i-- = xi[M];
+#endif
+ }
+ while ((k -= 16) > 0);
+ }
+ else
+ {
+ /* shift not more than 16 bits */
+ eshift (xi, k);
+
+noshift:
+
+#if WORDS_BIG_ENDIAN
+ i += 3;
+ *i-- = xi[M];
+ *i-- = 0;
+ *i-- = 0;
+ *i = 0;
+#else
+ *i++ = xi[M];
+ *i++ = 0;
+ *i++ = 0;
+ *i = 0;
+#endif
+ }
+}
+
+
+/* Convert e-type to signed 64-bit int. */
+
+static void
+etodi (x, i)
+ unsigned EMUSHORT *x;
+ unsigned EMUSHORT *i;
+{
+ unsigned EMULONG acc;
+ unsigned EMUSHORT xi[NI];
+ unsigned EMUSHORT carry;
+ unsigned EMUSHORT *isave;
+ int j, k;
+
+ emovi (x, xi);
+ k = (int) xi[E] - (EXONE - 1);
+ if (k <= 0)
+ {
+ for (j = 0; j < 4; j++)
+ *i++ = 0;
+ return;
+ }
+ if (k > 64)
+ {
+ for (j = 0; j < 4; j++)
+ *i++ = 0xffff;
+ if (extra_warnings)
+ warning ("overflow on truncation to integer");
+ return;
+ }
+ isave = i;
+ if (k > 16)
+ {
+ /* Shift more than 16 bits: first shift up k-16 mod 16,
+ then shift up by 16's. */
+ j = k - ((k >> 4) << 4);
+ if (j == 0)
+ j = 16;
+ eshift (xi, j);
+#if WORDS_BIG_ENDIAN
+ *i++ = xi[M];
+#else
+ i += 3;
+ *i-- = xi[M];
+#endif
+ k -= j;
+ do
+ {
+ eshup6 (xi);
+#if WORDS_BIG_ENDIAN
+ *i++ = xi[M];
+#else
+ *i-- = xi[M];
+#endif
+ }
+ while ((k -= 16) > 0);
+ }
+ else
+ {
+ /* shift not more than 16 bits */
+ eshift (xi, k);
+
+#if WORDS_BIG_ENDIAN
+ i += 3;
+ *i = xi[M];
+ *i-- = 0;
+ *i-- = 0;
+ *i = 0;
+#else
+ *i++ = xi[M];
+ *i++ = 0;
+ *i++ = 0;
+ *i = 0;
+#endif
+ }
+ /* Negate if negative */
+ if (xi[0])
+ {
+ carry = 0;
+#if WORDS_BIG_ENDIAN
+ isave += 3;
+#endif
+ for (k = 0; k < 4; k++)
+ {
+ acc = (unsigned EMULONG) (~(*isave) & 0xffff) + carry;
+#if WORDS_BIG_ENDIAN
+ *isave-- = acc;
+#else
+ *isave++ = acc;
+#endif
+ carry = 0;
+ if (acc & 0x10000)
+ carry = 1;
+ }
+ }
+}
+
+
+/* Longhand square root routine. */
+
+
+static int esqinited = 0;
+static unsigned short sqrndbit[NI];
+
+static void
+esqrt (x, y)
+ unsigned EMUSHORT *x, *y;
+{
+ unsigned EMUSHORT temp[NI], num[NI], sq[NI], xx[NI];
+ EMULONG m, exp;
+ int i, j, k, n, nlups;
+
+ if (esqinited == 0)
+ {
+ ecleaz (sqrndbit);
+ sqrndbit[NI - 2] = 1;
+ esqinited = 1;
+ }
+ /* Check for arg <= 0 */
+ i = ecmp (x, ezero);
+ if (i <= 0)
+ {
+ if (i == -1)
+ {
+ mtherr ("esqrt", DOMAIN);
+ eclear (y);
+ }
+ else
+ emov (x, y);
+ return;
+ }
+
+#ifdef INFINITY
+ if (eisinf (x))
+ {
+ eclear (y);
+ einfin (y);
+ return;
+ }
+#endif
+ /* Bring in the arg and renormalize if it is denormal. */
+ emovi (x, xx);
+ m = (EMULONG) xx[1]; /* local long word exponent */
+ if (m == 0)
+ m -= enormlz (xx);
+
+ /* Divide exponent by 2 */
+ m -= 0x3ffe;
+ exp = (unsigned short) ((m / 2) + 0x3ffe);
+
+ /* Adjust if exponent odd */
+ if ((m & 1) != 0)
+ {
+ if (m > 0)
+ exp += 1;
+ eshdn1 (xx);
+ }
+
+ ecleaz (sq);
+ ecleaz (num);
+ n = 8; /* get 8 bits of result per inner loop */
+ nlups = rndprc;
+ j = 0;
+
+ while (nlups > 0)
+ {
+ /* bring in next word of arg */
+ if (j < NE)
+ num[NI - 1] = xx[j + 3];
+ /* Do additional bit on last outer loop, for roundoff. */
+ if (nlups <= 8)
+ n = nlups + 1;
+ for (i = 0; i < n; i++)
+ {
+ /* Next 2 bits of arg */
+ eshup1 (num);
+ eshup1 (num);
+ /* Shift up answer */
+ eshup1 (sq);
+ /* Make trial divisor */
+ for (k = 0; k < NI; k++)
+ temp[k] = sq[k];
+ eshup1 (temp);
+ eaddm (sqrndbit, temp);
+ /* Subtract and insert answer bit if it goes in */
+ if (ecmpm (temp, num) <= 0)
+ {
+ esubm (temp, num);
+ sq[NI - 2] |= 1;
+ }
+ }
+ nlups -= n;
+ j += 1;
+ }
+
+ /* Adjust for extra, roundoff loop done. */
+ exp += (NBITS - 1) - rndprc;
+
+ /* Sticky bit = 1 if the remainder is nonzero. */
+ k = 0;
+ for (i = 3; i < NI; i++)
+ k |= (int) num[i];
+
+ /* Renormalize and round off. */
+ emdnorm (sq, k, 0, exp, 64);
+ emovo (sq, y);
+}
+#endif /* EMU_NON_COMPILE not defined */
+
+/* Return the binary precision of the significand for a given
+ floating point mode. The mode can hold an integer value
+ that many bits wide, without losing any bits. */
+
+int
+significand_size (mode)
+ enum machine_mode mode;
+{
+
+switch (mode)
+ {
+ case SFmode:
+ return 24;
+
+ case DFmode:
+#if TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
+ return 53;
+#else
+#if TARGET_FLOAT_FORMAT == IBM_FLOAT_FORMAT
+ return 56;
+#else
+#if TARGET_FLOAT_FORMAT == VAX_FLOAT_FORMAT
+ return 56;
+#else
+ abort ();
+#endif
+#endif
+#endif
+
+ case XFmode:
+ return 64;
+ case TFmode:
+ return 113;
+
+ default:
+ abort ();
+ }
+}
diff --git a/gnu/usr.bin/cc/cc_int/recog.c b/gnu/usr.bin/cc/cc_int/recog.c
new file mode 100644
index 0000000..1072fe9
--- /dev/null
+++ b/gnu/usr.bin/cc/cc_int/recog.c
@@ -0,0 +1,1970 @@
+/* Subroutines used by or related to instruction recognition.
+ Copyright (C) 1987, 88, 91, 92, 93, 1994 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+#include "config.h"
+#include "rtl.h"
+#include <stdio.h>
+#include "insn-config.h"
+#include "insn-attr.h"
+#include "insn-flags.h"
+#include "insn-codes.h"
+#include "recog.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "flags.h"
+#include "real.h"
+
+#ifndef STACK_PUSH_CODE
+#ifdef STACK_GROWS_DOWNWARD
+#define STACK_PUSH_CODE PRE_DEC
+#else
+#define STACK_PUSH_CODE PRE_INC
+#endif
+#endif
+
+/* Import from final.c: */
+extern rtx alter_subreg ();
+
+int strict_memory_address_p ();
+int memory_address_p ();
+
+/* Nonzero means allow operands to be volatile.
+ This should be 0 if you are generating rtl, such as if you are calling
+ the functions in optabs.c and expmed.c (most of the time).
+ This should be 1 if all valid insns need to be recognized,
+ such as in regclass.c and final.c and reload.c.
+
+ init_recog and init_recog_no_volatile are responsible for setting this. */
+
+int volatile_ok;
+
+/* On return from `constrain_operands', indicate which alternative
+ was satisfied. */
+
+int which_alternative;
+
+/* Nonzero after end of reload pass.
+ Set to 1 or 0 by toplev.c.
+ Controls the significance of (SUBREG (MEM)). */
+
+int reload_completed;
+
+/* Initialize data used by the function `recog'.
+ This must be called once in the compilation of a function
+ before any insn recognition may be done in the function. */
+
+void
+init_recog_no_volatile ()
+{
+ volatile_ok = 0;
+}
+
+void
+init_recog ()
+{
+ volatile_ok = 1;
+}
+
+/* Try recognizing the instruction INSN,
+ and return the code number that results.
+ Remeber the code so that repeated calls do not
+ need to spend the time for actual rerecognition.
+
+ This function is the normal interface to instruction recognition.
+ The automatically-generated function `recog' is normally called
+ through this one. (The only exception is in combine.c.) */
+
+int
+recog_memoized (insn)
+ rtx insn;
+{
+ if (INSN_CODE (insn) < 0)
+ INSN_CODE (insn) = recog (PATTERN (insn), insn, NULL_PTR);
+ return INSN_CODE (insn);
+}
+
+/* Check that X is an insn-body for an `asm' with operands
+ and that the operands mentioned in it are legitimate. */
+
+int
+check_asm_operands (x)
+ rtx x;
+{
+ int noperands = asm_noperands (x);
+ rtx *operands;
+ int i;
+
+ if (noperands < 0)
+ return 0;
+ if (noperands == 0)
+ return 1;
+
+ operands = (rtx *) alloca (noperands * sizeof (rtx));
+ decode_asm_operands (x, operands, NULL_PTR, NULL_PTR, NULL_PTR);
+
+ for (i = 0; i < noperands; i++)
+ if (!general_operand (operands[i], VOIDmode))
+ return 0;
+
+ return 1;
+}
+
+/* Static data for the next two routines.
+
+ The maximum number of changes supported is defined as the maximum
+ number of operands times 5. This allows for repeated substitutions
+ inside complex indexed address, or, alternatively, changes in up
+ to 5 insns. */
+
+#define MAX_CHANGE_LOCS (MAX_RECOG_OPERANDS * 5)
+
+static rtx change_objects[MAX_CHANGE_LOCS];
+static int change_old_codes[MAX_CHANGE_LOCS];
+static rtx *change_locs[MAX_CHANGE_LOCS];
+static rtx change_olds[MAX_CHANGE_LOCS];
+
+static int num_changes = 0;
+
+/* Validate a proposed change to OBJECT. LOC is the location in the rtl for
+ at which NEW will be placed. If OBJECT is zero, no validation is done,
+ the change is simply made.
+
+ Two types of objects are supported: If OBJECT is a MEM, memory_address_p
+ will be called with the address and mode as parameters. If OBJECT is
+ an INSN, CALL_INSN, or JUMP_INSN, the insn will be re-recognized with
+ the change in place.
+
+ IN_GROUP is non-zero if this is part of a group of changes that must be
+ performed as a group. In that case, the changes will be stored. The
+ function `apply_change_group' will validate and apply the changes.
+
+ If IN_GROUP is zero, this is a single change. Try to recognize the insn
+ or validate the memory reference with the change applied. If the result
+ is not valid for the machine, suppress the change and return zero.
+ Otherwise, perform the change and return 1. */
+
+int
+validate_change (object, loc, new, in_group)
+ rtx object;
+ rtx *loc;
+ rtx new;
+ int in_group;
+{
+ rtx old = *loc;
+
+ if (old == new || rtx_equal_p (old, new))
+ return 1;
+
+ if (num_changes >= MAX_CHANGE_LOCS
+ || (in_group == 0 && num_changes != 0))
+ abort ();
+
+ *loc = new;
+
+ /* Save the information describing this change. */
+ change_objects[num_changes] = object;
+ change_locs[num_changes] = loc;
+ change_olds[num_changes] = old;
+
+ if (object && GET_CODE (object) != MEM)
+ {
+ /* Set INSN_CODE to force rerecognition of insn. Save old code in
+ case invalid. */
+ change_old_codes[num_changes] = INSN_CODE (object);
+ INSN_CODE (object) = -1;
+ }
+
+ num_changes++;
+
+ /* If we are making a group of changes, return 1. Otherwise, validate the
+ change group we made. */
+
+ if (in_group)
+ return 1;
+ else
+ return apply_change_group ();
+}
+
+/* Apply a group of changes previously issued with `validate_change'.
+ Return 1 if all changes are valid, zero otherwise. */
+
+int
+apply_change_group ()
+{
+ int i;
+
+ /* The changes have been applied and all INSN_CODEs have been reset to force
+ rerecognition.
+
+ The changes are valid if we aren't given an object, or if we are
+ given a MEM and it still is a valid address, or if this is in insn
+ and it is recognized. In the latter case, if reload has completed,
+ we also require that the operands meet the constraints for
+ the insn. We do not allow modifying an ASM_OPERANDS after reload
+ has completed because verifying the constraints is too difficult. */
+
+ for (i = 0; i < num_changes; i++)
+ {
+ rtx object = change_objects[i];
+
+ if (object == 0)
+ continue;
+
+ if (GET_CODE (object) == MEM)
+ {
+ if (! memory_address_p (GET_MODE (object), XEXP (object, 0)))
+ break;
+ }
+ else if ((recog_memoized (object) < 0
+ && (asm_noperands (PATTERN (object)) < 0
+ || ! check_asm_operands (PATTERN (object))
+ || reload_completed))
+ || (reload_completed
+ && (insn_extract (object),
+ ! constrain_operands (INSN_CODE (object), 1))))
+ {
+ rtx pat = PATTERN (object);
+
+ /* Perhaps we couldn't recognize the insn because there were
+ extra CLOBBERs at the end. If so, try to re-recognize
+ without the last CLOBBER (later iterations will cause each of
+ them to be eliminated, in turn). But don't do this if we
+ have an ASM_OPERAND. */
+ if (GET_CODE (pat) == PARALLEL
+ && GET_CODE (XVECEXP (pat, 0, XVECLEN (pat, 0) - 1)) == CLOBBER
+ && asm_noperands (PATTERN (object)) < 0)
+ {
+ rtx newpat;
+
+ if (XVECLEN (pat, 0) == 2)
+ newpat = XVECEXP (pat, 0, 0);
+ else
+ {
+ int j;
+
+ newpat = gen_rtx (PARALLEL, VOIDmode,
+ gen_rtvec (XVECLEN (pat, 0) - 1));
+ for (j = 0; j < XVECLEN (newpat, 0); j++)
+ XVECEXP (newpat, 0, j) = XVECEXP (pat, 0, j);
+ }
+
+ /* Add a new change to this group to replace the pattern
+ with this new pattern. Then consider this change
+ as having succeeded. The change we added will
+ cause the entire call to fail if things remain invalid.
+
+ Note that this can lose if a later change than the one
+ we are processing specified &XVECEXP (PATTERN (object), 0, X)
+ but this shouldn't occur. */
+
+ validate_change (object, &PATTERN (object), newpat, 1);
+ }
+ else if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER)
+ /* If this insn is a CLOBBER or USE, it is always valid, but is
+ never recognized. */
+ continue;
+ else
+ break;
+ }
+ }
+
+ if (i == num_changes)
+ {
+ num_changes = 0;
+ return 1;
+ }
+ else
+ {
+ cancel_changes (0);
+ return 0;
+ }
+}
+
+/* Return the number of changes so far in the current group. */
+
+int
+num_validated_changes ()
+{
+ return num_changes;
+}
+
+/* Retract the changes numbered NUM and up. */
+
+void
+cancel_changes (num)
+ int num;
+{
+ int i;
+
+ /* Back out all the changes. Do this in the opposite order in which
+ they were made. */
+ for (i = num_changes - 1; i >= num; i--)
+ {
+ *change_locs[i] = change_olds[i];
+ if (change_objects[i] && GET_CODE (change_objects[i]) != MEM)
+ INSN_CODE (change_objects[i]) = change_old_codes[i];
+ }
+ num_changes = num;
+}
+
+/* Replace every occurrence of FROM in X with TO. Mark each change with
+ validate_change passing OBJECT. */
+
+static void
+validate_replace_rtx_1 (loc, from, to, object)
+ rtx *loc;
+ rtx from, to, object;
+{
+ register int i, j;
+ register char *fmt;
+ register rtx x = *loc;
+ enum rtx_code code = GET_CODE (x);
+
+ /* X matches FROM if it is the same rtx or they are both referring to the
+ same register in the same mode. Avoid calling rtx_equal_p unless the
+ operands look similar. */
+
+ if (x == from
+ || (GET_CODE (x) == REG && GET_CODE (from) == REG
+ && GET_MODE (x) == GET_MODE (from)
+ && REGNO (x) == REGNO (from))
+ || (GET_CODE (x) == GET_CODE (from) && GET_MODE (x) == GET_MODE (from)
+ && rtx_equal_p (x, from)))
+ {
+ validate_change (object, loc, to, 1);
+ return;
+ }
+
+ /* For commutative or comparison operations, try replacing each argument
+ separately and seeing if we made any changes. If so, put a constant
+ argument last.*/
+ if (GET_RTX_CLASS (code) == '<' || GET_RTX_CLASS (code) == 'c')
+ {
+ int prev_changes = num_changes;
+
+ validate_replace_rtx_1 (&XEXP (x, 0), from, to, object);
+ validate_replace_rtx_1 (&XEXP (x, 1), from, to, object);
+ if (prev_changes != num_changes && CONSTANT_P (XEXP (x, 0)))
+ {
+ validate_change (object, loc,
+ gen_rtx (GET_RTX_CLASS (code) == 'c' ? code
+ : swap_condition (code),
+ GET_MODE (x), XEXP (x, 1), XEXP (x, 0)),
+ 1);
+ x = *loc;
+ code = GET_CODE (x);
+ }
+ }
+
+ switch (code)
+ {
+ case PLUS:
+ /* If we have have a PLUS whose second operand is now a CONST_INT, use
+ plus_constant to try to simplify it. */
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT && XEXP (x, 1) == to)
+ validate_change (object, loc,
+ plus_constant (XEXP (x, 0), INTVAL (XEXP (x, 1))), 1);
+ return;
+
+ case ZERO_EXTEND:
+ case SIGN_EXTEND:
+ /* In these cases, the operation to be performed depends on the mode
+ of the operand. If we are replacing the operand with a VOIDmode
+ constant, we lose the information. So try to simplify the operation
+ in that case. If it fails, substitute in something that we know
+ won't be recognized. */
+ if (GET_MODE (to) == VOIDmode
+ && (XEXP (x, 0) == from
+ || (GET_CODE (XEXP (x, 0)) == REG && GET_CODE (from) == REG
+ && GET_MODE (XEXP (x, 0)) == GET_MODE (from)
+ && REGNO (XEXP (x, 0)) == REGNO (from))))
+ {
+ rtx new = simplify_unary_operation (code, GET_MODE (x), to,
+ GET_MODE (from));
+ if (new == 0)
+ new = gen_rtx (CLOBBER, GET_MODE (x), const0_rtx);
+
+ validate_change (object, loc, new, 1);
+ return;
+ }
+ break;
+
+ case SUBREG:
+ /* If we have a SUBREG of a register that we are replacing and we are
+ replacing it with a MEM, make a new MEM and try replacing the
+ SUBREG with it. Don't do this if the MEM has a mode-dependent address
+ or if we would be widening it. */
+
+ if (SUBREG_REG (x) == from
+ && GET_CODE (from) == REG
+ && GET_CODE (to) == MEM
+ && ! mode_dependent_address_p (XEXP (to, 0))
+ && ! MEM_VOLATILE_P (to)
+ && GET_MODE_SIZE (GET_MODE (x)) <= GET_MODE_SIZE (GET_MODE (to)))
+ {
+ int offset = SUBREG_WORD (x) * UNITS_PER_WORD;
+ enum machine_mode mode = GET_MODE (x);
+ rtx new;
+
+#if BYTES_BIG_ENDIAN
+ offset += (MIN (UNITS_PER_WORD,
+ GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
+ - MIN (UNITS_PER_WORD, GET_MODE_SIZE (mode)));
+#endif
+
+ new = gen_rtx (MEM, mode, plus_constant (XEXP (to, 0), offset));
+ MEM_VOLATILE_P (new) = MEM_VOLATILE_P (to);
+ RTX_UNCHANGING_P (new) = RTX_UNCHANGING_P (to);
+ MEM_IN_STRUCT_P (new) = MEM_IN_STRUCT_P (to);
+ validate_change (object, loc, new, 1);
+ return;
+ }
+ break;
+
+ case ZERO_EXTRACT:
+ case SIGN_EXTRACT:
+ /* If we are replacing a register with memory, try to change the memory
+ to be the mode required for memory in extract operations (this isn't
+ likely to be an insertion operation; if it was, nothing bad will
+ happen, we might just fail in some cases). */
+
+ if (XEXP (x, 0) == from && GET_CODE (from) == REG && GET_CODE (to) == MEM
+ && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && GET_CODE (XEXP (x, 2)) == CONST_INT
+ && ! mode_dependent_address_p (XEXP (to, 0))
+ && ! MEM_VOLATILE_P (to))
+ {
+ enum machine_mode wanted_mode = VOIDmode;
+ enum machine_mode is_mode = GET_MODE (to);
+ int width = INTVAL (XEXP (x, 1));
+ int pos = INTVAL (XEXP (x, 2));
+
+#ifdef HAVE_extzv
+ if (code == ZERO_EXTRACT)
+ wanted_mode = insn_operand_mode[(int) CODE_FOR_extzv][1];
+#endif
+#ifdef HAVE_extv
+ if (code == SIGN_EXTRACT)
+ wanted_mode = insn_operand_mode[(int) CODE_FOR_extv][1];
+#endif
+
+ /* If we have a narrower mode, we can do something. */
+ if (wanted_mode != VOIDmode
+ && GET_MODE_SIZE (wanted_mode) < GET_MODE_SIZE (is_mode))
+ {
+ int offset = pos / BITS_PER_UNIT;
+ rtx newmem;
+
+ /* If the bytes and bits are counted differently, we
+ must adjust the offset. */
+#if BYTES_BIG_ENDIAN != BITS_BIG_ENDIAN
+ offset = (GET_MODE_SIZE (is_mode) - GET_MODE_SIZE (wanted_mode)
+ - offset);
+#endif
+
+ pos %= GET_MODE_BITSIZE (wanted_mode);
+
+ newmem = gen_rtx (MEM, wanted_mode,
+ plus_constant (XEXP (to, 0), offset));
+ RTX_UNCHANGING_P (newmem) = RTX_UNCHANGING_P (to);
+ MEM_VOLATILE_P (newmem) = MEM_VOLATILE_P (to);
+ MEM_IN_STRUCT_P (newmem) = MEM_IN_STRUCT_P (to);
+
+ validate_change (object, &XEXP (x, 2), GEN_INT (pos), 1);
+ validate_change (object, &XEXP (x, 0), newmem, 1);
+ }
+ }
+
+ break;
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ validate_replace_rtx_1 (&XEXP (x, i), from, to, object);
+ else if (fmt[i] == 'E')
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ validate_replace_rtx_1 (&XVECEXP (x, i, j), from, to, object);
+ }
+}
+
+/* Try replacing every occurrence of FROM in INSN with TO. After all
+ changes have been made, validate by seeing if INSN is still valid. */
+
+int
+validate_replace_rtx (from, to, insn)
+ rtx from, to, insn;
+{
+ validate_replace_rtx_1 (&PATTERN (insn), from, to, insn);
+ return apply_change_group ();
+}
+
+#ifdef HAVE_cc0
+/* Return 1 if the insn using CC0 set by INSN does not contain
+ any ordered tests applied to the condition codes.
+ EQ and NE tests do not count. */
+
+int
+next_insn_tests_no_inequality (insn)
+ rtx insn;
+{
+ register rtx next = next_cc0_user (insn);
+
+ /* If there is no next insn, we have to take the conservative choice. */
+ if (next == 0)
+ return 0;
+
+ return ((GET_CODE (next) == JUMP_INSN
+ || GET_CODE (next) == INSN
+ || GET_CODE (next) == CALL_INSN)
+ && ! inequality_comparisons_p (PATTERN (next)));
+}
+
+#if 0 /* This is useless since the insn that sets the cc's
+ must be followed immediately by the use of them. */
+/* Return 1 if the CC value set up by INSN is not used. */
+
+int
+next_insns_test_no_inequality (insn)
+ rtx insn;
+{
+ register rtx next = NEXT_INSN (insn);
+
+ for (; next != 0; next = NEXT_INSN (next))
+ {
+ if (GET_CODE (next) == CODE_LABEL
+ || GET_CODE (next) == BARRIER)
+ return 1;
+ if (GET_CODE (next) == NOTE)
+ continue;
+ if (inequality_comparisons_p (PATTERN (next)))
+ return 0;
+ if (sets_cc0_p (PATTERN (next)) == 1)
+ return 1;
+ if (! reg_mentioned_p (cc0_rtx, PATTERN (next)))
+ return 1;
+ }
+ return 1;
+}
+#endif
+#endif
+
+/* This is used by find_single_use to locate an rtx that contains exactly one
+ use of DEST, which is typically either a REG or CC0. It returns a
+ pointer to the innermost rtx expression containing DEST. Appearances of
+ DEST that are being used to totally replace it are not counted. */
+
+static rtx *
+find_single_use_1 (dest, loc)
+ rtx dest;
+ rtx *loc;
+{
+ rtx x = *loc;
+ enum rtx_code code = GET_CODE (x);
+ rtx *result = 0;
+ rtx *this_result;
+ int i;
+ char *fmt;
+
+ switch (code)
+ {
+ case CONST_INT:
+ case CONST:
+ case LABEL_REF:
+ case SYMBOL_REF:
+ case CONST_DOUBLE:
+ case CLOBBER:
+ return 0;
+
+ case SET:
+ /* If the destination is anything other than CC0, PC, a REG or a SUBREG
+ of a REG that occupies all of the REG, the insn uses DEST if
+ it is mentioned in the destination or the source. Otherwise, we
+ need just check the source. */
+ if (GET_CODE (SET_DEST (x)) != CC0
+ && GET_CODE (SET_DEST (x)) != PC
+ && GET_CODE (SET_DEST (x)) != REG
+ && ! (GET_CODE (SET_DEST (x)) == SUBREG
+ && GET_CODE (SUBREG_REG (SET_DEST (x))) == REG
+ && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (x))))
+ + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
+ == ((GET_MODE_SIZE (GET_MODE (SET_DEST (x)))
+ + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD))))
+ break;
+
+ return find_single_use_1 (dest, &SET_SRC (x));
+
+ case MEM:
+ case SUBREG:
+ return find_single_use_1 (dest, &XEXP (x, 0));
+ }
+
+ /* If it wasn't one of the common cases above, check each expression and
+ vector of this code. Look for a unique usage of DEST. */
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ {
+ if (dest == XEXP (x, i)
+ || (GET_CODE (dest) == REG && GET_CODE (XEXP (x, i)) == REG
+ && REGNO (dest) == REGNO (XEXP (x, i))))
+ this_result = loc;
+ else
+ this_result = find_single_use_1 (dest, &XEXP (x, i));
+
+ if (result == 0)
+ result = this_result;
+ else if (this_result)
+ /* Duplicate usage. */
+ return 0;
+ }
+ else if (fmt[i] == 'E')
+ {
+ int j;
+
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ {
+ if (XVECEXP (x, i, j) == dest
+ || (GET_CODE (dest) == REG
+ && GET_CODE (XVECEXP (x, i, j)) == REG
+ && REGNO (XVECEXP (x, i, j)) == REGNO (dest)))
+ this_result = loc;
+ else
+ this_result = find_single_use_1 (dest, &XVECEXP (x, i, j));
+
+ if (result == 0)
+ result = this_result;
+ else if (this_result)
+ return 0;
+ }
+ }
+ }
+
+ return result;
+}
+
+/* See if DEST, produced in INSN, is used only a single time in the
+ sequel. If so, return a pointer to the innermost rtx expression in which
+ it is used.
+
+ If PLOC is non-zero, *PLOC is set to the insn containing the single use.
+
+ This routine will return usually zero either before flow is called (because
+ there will be no LOG_LINKS notes) or after reload (because the REG_DEAD
+ note can't be trusted).
+
+ If DEST is cc0_rtx, we look only at the next insn. In that case, we don't
+ care about REG_DEAD notes or LOG_LINKS.
+
+ Otherwise, we find the single use by finding an insn that has a
+ LOG_LINKS pointing at INSN and has a REG_DEAD note for DEST. If DEST is
+ only referenced once in that insn, we know that it must be the first
+ and last insn referencing DEST. */
+
+rtx *
+find_single_use (dest, insn, ploc)
+ rtx dest;
+ rtx insn;
+ rtx *ploc;
+{
+ rtx next;
+ rtx *result;
+ rtx link;
+
+#ifdef HAVE_cc0
+ if (dest == cc0_rtx)
+ {
+ next = NEXT_INSN (insn);
+ if (next == 0
+ || (GET_CODE (next) != INSN && GET_CODE (next) != JUMP_INSN))
+ return 0;
+
+ result = find_single_use_1 (dest, &PATTERN (next));
+ if (result && ploc)
+ *ploc = next;
+ return result;
+ }
+#endif
+
+ if (reload_completed || reload_in_progress || GET_CODE (dest) != REG)
+ return 0;
+
+ for (next = next_nonnote_insn (insn);
+ next != 0 && GET_CODE (next) != CODE_LABEL;
+ next = next_nonnote_insn (next))
+ if (GET_RTX_CLASS (GET_CODE (next)) == 'i' && dead_or_set_p (next, dest))
+ {
+ for (link = LOG_LINKS (next); link; link = XEXP (link, 1))
+ if (XEXP (link, 0) == insn)
+ break;
+
+ if (link)
+ {
+ result = find_single_use_1 (dest, &PATTERN (next));
+ if (ploc)
+ *ploc = next;
+ return result;
+ }
+ }
+
+ return 0;
+}
+
+/* Return 1 if OP is a valid general operand for machine mode MODE.
+ This is either a register reference, a memory reference,
+ or a constant. In the case of a memory reference, the address
+ is checked for general validity for the target machine.
+
+ Register and memory references must have mode MODE in order to be valid,
+ but some constants have no machine mode and are valid for any mode.
+
+ If MODE is VOIDmode, OP is checked for validity for whatever mode
+ it has.
+
+ The main use of this function is as a predicate in match_operand
+ expressions in the machine description.
+
+ For an explanation of this function's behavior for registers of
+ class NO_REGS, see the comment for `register_operand'. */
+
+int
+general_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ register enum rtx_code code = GET_CODE (op);
+ int mode_altering_drug = 0;
+
+ if (mode == VOIDmode)
+ mode = GET_MODE (op);
+
+ /* Don't accept CONST_INT or anything similar
+ if the caller wants something floating. */
+ if (GET_MODE (op) == VOIDmode && mode != VOIDmode
+ && GET_MODE_CLASS (mode) != MODE_INT
+ && GET_MODE_CLASS (mode) != MODE_PARTIAL_INT)
+ return 0;
+
+ if (CONSTANT_P (op))
+ return ((GET_MODE (op) == VOIDmode || GET_MODE (op) == mode)
+#ifdef LEGITIMATE_PIC_OPERAND_P
+ && (! flag_pic || LEGITIMATE_PIC_OPERAND_P (op))
+#endif
+ && LEGITIMATE_CONSTANT_P (op));
+
+ /* Except for certain constants with VOIDmode, already checked for,
+ OP's mode must match MODE if MODE specifies a mode. */
+
+ if (GET_MODE (op) != mode)
+ return 0;
+
+ if (code == SUBREG)
+ {
+#ifdef INSN_SCHEDULING
+ /* On machines that have insn scheduling, we want all memory
+ reference to be explicit, so outlaw paradoxical SUBREGs. */
+ if (GET_CODE (SUBREG_REG (op)) == MEM
+ && GET_MODE_SIZE (mode) > GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
+ return 0;
+#endif
+
+ op = SUBREG_REG (op);
+ code = GET_CODE (op);
+#if 0
+ /* No longer needed, since (SUBREG (MEM...))
+ will load the MEM into a reload reg in the MEM's own mode. */
+ mode_altering_drug = 1;
+#endif
+ }
+
+ if (code == REG)
+ /* A register whose class is NO_REGS is not a general operand. */
+ return (REGNO (op) >= FIRST_PSEUDO_REGISTER
+ || REGNO_REG_CLASS (REGNO (op)) != NO_REGS);
+
+ if (code == MEM)
+ {
+ register rtx y = XEXP (op, 0);
+ if (! volatile_ok && MEM_VOLATILE_P (op))
+ return 0;
+ /* Use the mem's mode, since it will be reloaded thus. */
+ mode = GET_MODE (op);
+ GO_IF_LEGITIMATE_ADDRESS (mode, y, win);
+ }
+ return 0;
+
+ win:
+ if (mode_altering_drug)
+ return ! mode_dependent_address_p (XEXP (op, 0));
+ return 1;
+}
+
+/* Return 1 if OP is a valid memory address for a memory reference
+ of mode MODE.
+
+ The main use of this function is as a predicate in match_operand
+ expressions in the machine description. */
+
+int
+address_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ return memory_address_p (mode, op);
+}
+
+/* Return 1 if OP is a register reference of mode MODE.
+ If MODE is VOIDmode, accept a register in any mode.
+
+ The main use of this function is as a predicate in match_operand
+ expressions in the machine description.
+
+ As a special exception, registers whose class is NO_REGS are
+ not accepted by `register_operand'. The reason for this change
+ is to allow the representation of special architecture artifacts
+ (such as a condition code register) without extending the rtl
+ definitions. Since registers of class NO_REGS cannot be used
+ as registers in any case where register classes are examined,
+ it is most consistent to keep this function from accepting them. */
+
+int
+register_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ if (GET_MODE (op) != mode && mode != VOIDmode)
+ return 0;
+
+ if (GET_CODE (op) == SUBREG)
+ {
+ /* Before reload, we can allow (SUBREG (MEM...)) as a register operand
+ because it is guaranteed to be reloaded into one.
+ Just make sure the MEM is valid in itself.
+ (Ideally, (SUBREG (MEM)...) should not exist after reload,
+ but currently it does result from (SUBREG (REG)...) where the
+ reg went on the stack.) */
+ if (! reload_completed && GET_CODE (SUBREG_REG (op)) == MEM)
+ return general_operand (op, mode);
+ op = SUBREG_REG (op);
+ }
+
+ /* We don't consider registers whose class is NO_REGS
+ to be a register operand. */
+ return (GET_CODE (op) == REG
+ && (REGNO (op) >= FIRST_PSEUDO_REGISTER
+ || REGNO_REG_CLASS (REGNO (op)) != NO_REGS));
+}
+
+/* Return 1 if OP should match a MATCH_SCRATCH, i.e., if it is a SCRATCH
+ or a hard register. */
+
+int
+scratch_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ return (GET_MODE (op) == mode
+ && (GET_CODE (op) == SCRATCH
+ || (GET_CODE (op) == REG
+ && REGNO (op) < FIRST_PSEUDO_REGISTER)));
+}
+
+/* Return 1 if OP is a valid immediate operand for mode MODE.
+
+ The main use of this function is as a predicate in match_operand
+ expressions in the machine description. */
+
+int
+immediate_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ /* Don't accept CONST_INT or anything similar
+ if the caller wants something floating. */
+ if (GET_MODE (op) == VOIDmode && mode != VOIDmode
+ && GET_MODE_CLASS (mode) != MODE_INT
+ && GET_MODE_CLASS (mode) != MODE_PARTIAL_INT)
+ return 0;
+
+ return (CONSTANT_P (op)
+ && (GET_MODE (op) == mode || mode == VOIDmode
+ || GET_MODE (op) == VOIDmode)
+#ifdef LEGITIMATE_PIC_OPERAND_P
+ && (! flag_pic || LEGITIMATE_PIC_OPERAND_P (op))
+#endif
+ && LEGITIMATE_CONSTANT_P (op));
+}
+
+/* Returns 1 if OP is an operand that is a CONST_INT. */
+
+int
+const_int_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ return GET_CODE (op) == CONST_INT;
+}
+
+/* Returns 1 if OP is an operand that is a constant integer or constant
+ floating-point number. */
+
+int
+const_double_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ /* Don't accept CONST_INT or anything similar
+ if the caller wants something floating. */
+ if (GET_MODE (op) == VOIDmode && mode != VOIDmode
+ && GET_MODE_CLASS (mode) != MODE_INT
+ && GET_MODE_CLASS (mode) != MODE_PARTIAL_INT)
+ return 0;
+
+ return ((GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT)
+ && (mode == VOIDmode || GET_MODE (op) == mode
+ || GET_MODE (op) == VOIDmode));
+}
+
+/* Return 1 if OP is a general operand that is not an immediate operand. */
+
+int
+nonimmediate_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ return (general_operand (op, mode) && ! CONSTANT_P (op));
+}
+
+/* Return 1 if OP is a register reference or immediate value of mode MODE. */
+
+int
+nonmemory_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ if (CONSTANT_P (op))
+ {
+ /* Don't accept CONST_INT or anything similar
+ if the caller wants something floating. */
+ if (GET_MODE (op) == VOIDmode && mode != VOIDmode
+ && GET_MODE_CLASS (mode) != MODE_INT
+ && GET_MODE_CLASS (mode) != MODE_PARTIAL_INT)
+ return 0;
+
+ return ((GET_MODE (op) == VOIDmode || GET_MODE (op) == mode)
+#ifdef LEGITIMATE_PIC_OPERAND_P
+ && (! flag_pic || LEGITIMATE_PIC_OPERAND_P (op))
+#endif
+ && LEGITIMATE_CONSTANT_P (op));
+ }
+
+ if (GET_MODE (op) != mode && mode != VOIDmode)
+ return 0;
+
+ if (GET_CODE (op) == SUBREG)
+ {
+ /* Before reload, we can allow (SUBREG (MEM...)) as a register operand
+ because it is guaranteed to be reloaded into one.
+ Just make sure the MEM is valid in itself.
+ (Ideally, (SUBREG (MEM)...) should not exist after reload,
+ but currently it does result from (SUBREG (REG)...) where the
+ reg went on the stack.) */
+ if (! reload_completed && GET_CODE (SUBREG_REG (op)) == MEM)
+ return general_operand (op, mode);
+ op = SUBREG_REG (op);
+ }
+
+ /* We don't consider registers whose class is NO_REGS
+ to be a register operand. */
+ return (GET_CODE (op) == REG
+ && (REGNO (op) >= FIRST_PSEUDO_REGISTER
+ || REGNO_REG_CLASS (REGNO (op)) != NO_REGS));
+}
+
+/* Return 1 if OP is a valid operand that stands for pushing a
+ value of mode MODE onto the stack.
+
+ The main use of this function is as a predicate in match_operand
+ expressions in the machine description. */
+
+int
+push_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (GET_CODE (op) != MEM)
+ return 0;
+
+ if (GET_MODE (op) != mode)
+ return 0;
+
+ op = XEXP (op, 0);
+
+ if (GET_CODE (op) != STACK_PUSH_CODE)
+ return 0;
+
+ return XEXP (op, 0) == stack_pointer_rtx;
+}
+
+/* Return 1 if ADDR is a valid memory address for mode MODE. */
+
+int
+memory_address_p (mode, addr)
+ enum machine_mode mode;
+ register rtx addr;
+{
+ GO_IF_LEGITIMATE_ADDRESS (mode, addr, win);
+ return 0;
+
+ win:
+ return 1;
+}
+
+/* Return 1 if OP is a valid memory reference with mode MODE,
+ including a valid address.
+
+ The main use of this function is as a predicate in match_operand
+ expressions in the machine description. */
+
+int
+memory_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ rtx inner;
+
+ if (! reload_completed)
+ /* Note that no SUBREG is a memory operand before end of reload pass,
+ because (SUBREG (MEM...)) forces reloading into a register. */
+ return GET_CODE (op) == MEM && general_operand (op, mode);
+
+ if (mode != VOIDmode && GET_MODE (op) != mode)
+ return 0;
+
+ inner = op;
+ if (GET_CODE (inner) == SUBREG)
+ inner = SUBREG_REG (inner);
+
+ return (GET_CODE (inner) == MEM && general_operand (op, mode));
+}
+
+/* Return 1 if OP is a valid indirect memory reference with mode MODE;
+ that is, a memory reference whose address is a general_operand. */
+
+int
+indirect_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ /* Before reload, a SUBREG isn't in memory (see memory_operand, above). */
+ if (! reload_completed
+ && GET_CODE (op) == SUBREG && GET_CODE (SUBREG_REG (op)) == MEM)
+ {
+ register int offset = SUBREG_WORD (op) * UNITS_PER_WORD;
+ rtx inner = SUBREG_REG (op);
+
+#if BYTES_BIG_ENDIAN
+ offset -= (MIN (UNITS_PER_WORD, GET_MODE_SIZE (GET_MODE (op)))
+ - MIN (UNITS_PER_WORD, GET_MODE_SIZE (GET_MODE (inner))));
+#endif
+
+ if (mode != VOIDmode && GET_MODE (op) != mode)
+ return 0;
+
+ /* The only way that we can have a general_operand as the resulting
+ address is if OFFSET is zero and the address already is an operand
+ or if the address is (plus Y (const_int -OFFSET)) and Y is an
+ operand. */
+
+ return ((offset == 0 && general_operand (XEXP (inner, 0), Pmode))
+ || (GET_CODE (XEXP (inner, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (inner, 0), 1)) == CONST_INT
+ && INTVAL (XEXP (XEXP (inner, 0), 1)) == -offset
+ && general_operand (XEXP (XEXP (inner, 0), 0), Pmode)));
+ }
+
+ return (GET_CODE (op) == MEM
+ && memory_operand (op, mode)
+ && general_operand (XEXP (op, 0), Pmode));
+}
+
+/* Return 1 if this is a comparison operator. This allows the use of
+ MATCH_OPERATOR to recognize all the branch insns. */
+
+int
+comparison_operator (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ return ((mode == VOIDmode || GET_MODE (op) == mode)
+ && GET_RTX_CLASS (GET_CODE (op)) == '<');
+}
+
+/* If BODY is an insn body that uses ASM_OPERANDS,
+ return the number of operands (both input and output) in the insn.
+ Otherwise return -1. */
+
+int
+asm_noperands (body)
+ rtx body;
+{
+ if (GET_CODE (body) == ASM_OPERANDS)
+ /* No output operands: return number of input operands. */
+ return ASM_OPERANDS_INPUT_LENGTH (body);
+ if (GET_CODE (body) == SET && GET_CODE (SET_SRC (body)) == ASM_OPERANDS)
+ /* Single output operand: BODY is (set OUTPUT (asm_operands ...)). */
+ return ASM_OPERANDS_INPUT_LENGTH (SET_SRC (body)) + 1;
+ else if (GET_CODE (body) == PARALLEL
+ && GET_CODE (XVECEXP (body, 0, 0)) == SET
+ && GET_CODE (SET_SRC (XVECEXP (body, 0, 0))) == ASM_OPERANDS)
+ {
+ /* Multiple output operands, or 1 output plus some clobbers:
+ body is [(set OUTPUT (asm_operands ...))... (clobber (reg ...))...]. */
+ int i;
+ int n_sets;
+
+ /* Count backwards through CLOBBERs to determine number of SETs. */
+ for (i = XVECLEN (body, 0); i > 0; i--)
+ {
+ if (GET_CODE (XVECEXP (body, 0, i - 1)) == SET)
+ break;
+ if (GET_CODE (XVECEXP (body, 0, i - 1)) != CLOBBER)
+ return -1;
+ }
+
+ /* N_SETS is now number of output operands. */
+ n_sets = i;
+
+ /* Verify that all the SETs we have
+ came from a single original asm_operands insn
+ (so that invalid combinations are blocked). */
+ for (i = 0; i < n_sets; i++)
+ {
+ rtx elt = XVECEXP (body, 0, i);
+ if (GET_CODE (elt) != SET)
+ return -1;
+ if (GET_CODE (SET_SRC (elt)) != ASM_OPERANDS)
+ return -1;
+ /* If these ASM_OPERANDS rtx's came from different original insns
+ then they aren't allowed together. */
+ if (ASM_OPERANDS_INPUT_VEC (SET_SRC (elt))
+ != ASM_OPERANDS_INPUT_VEC (SET_SRC (XVECEXP (body, 0, 0))))
+ return -1;
+ }
+ return (ASM_OPERANDS_INPUT_LENGTH (SET_SRC (XVECEXP (body, 0, 0)))
+ + n_sets);
+ }
+ else if (GET_CODE (body) == PARALLEL
+ && GET_CODE (XVECEXP (body, 0, 0)) == ASM_OPERANDS)
+ {
+ /* 0 outputs, but some clobbers:
+ body is [(asm_operands ...) (clobber (reg ...))...]. */
+ int i;
+
+ /* Make sure all the other parallel things really are clobbers. */
+ for (i = XVECLEN (body, 0) - 1; i > 0; i--)
+ if (GET_CODE (XVECEXP (body, 0, i)) != CLOBBER)
+ return -1;
+
+ return ASM_OPERANDS_INPUT_LENGTH (XVECEXP (body, 0, 0));
+ }
+ else
+ return -1;
+}
+
+/* Assuming BODY is an insn body that uses ASM_OPERANDS,
+ copy its operands (both input and output) into the vector OPERANDS,
+ the locations of the operands within the insn into the vector OPERAND_LOCS,
+ and the constraints for the operands into CONSTRAINTS.
+ Write the modes of the operands into MODES.
+ Return the assembler-template.
+
+ If MODES, OPERAND_LOCS, CONSTRAINTS or OPERANDS is 0,
+ we don't store that info. */
+
+char *
+decode_asm_operands (body, operands, operand_locs, constraints, modes)
+ rtx body;
+ rtx *operands;
+ rtx **operand_locs;
+ char **constraints;
+ enum machine_mode *modes;
+{
+ register int i;
+ int noperands;
+ char *template = 0;
+
+ if (GET_CODE (body) == SET && GET_CODE (SET_SRC (body)) == ASM_OPERANDS)
+ {
+ rtx asmop = SET_SRC (body);
+ /* Single output operand: BODY is (set OUTPUT (asm_operands ....)). */
+
+ noperands = ASM_OPERANDS_INPUT_LENGTH (asmop) + 1;
+
+ for (i = 1; i < noperands; i++)
+ {
+ if (operand_locs)
+ operand_locs[i] = &ASM_OPERANDS_INPUT (asmop, i - 1);
+ if (operands)
+ operands[i] = ASM_OPERANDS_INPUT (asmop, i - 1);
+ if (constraints)
+ constraints[i] = ASM_OPERANDS_INPUT_CONSTRAINT (asmop, i - 1);
+ if (modes)
+ modes[i] = ASM_OPERANDS_INPUT_MODE (asmop, i - 1);
+ }
+
+ /* The output is in the SET.
+ Its constraint is in the ASM_OPERANDS itself. */
+ if (operands)
+ operands[0] = SET_DEST (body);
+ if (operand_locs)
+ operand_locs[0] = &SET_DEST (body);
+ if (constraints)
+ constraints[0] = ASM_OPERANDS_OUTPUT_CONSTRAINT (asmop);
+ if (modes)
+ modes[0] = GET_MODE (SET_DEST (body));
+ template = ASM_OPERANDS_TEMPLATE (asmop);
+ }
+ else if (GET_CODE (body) == ASM_OPERANDS)
+ {
+ rtx asmop = body;
+ /* No output operands: BODY is (asm_operands ....). */
+
+ noperands = ASM_OPERANDS_INPUT_LENGTH (asmop);
+
+ /* The input operands are found in the 1st element vector. */
+ /* Constraints for inputs are in the 2nd element vector. */
+ for (i = 0; i < noperands; i++)
+ {
+ if (operand_locs)
+ operand_locs[i] = &ASM_OPERANDS_INPUT (asmop, i);
+ if (operands)
+ operands[i] = ASM_OPERANDS_INPUT (asmop, i);
+ if (constraints)
+ constraints[i] = ASM_OPERANDS_INPUT_CONSTRAINT (asmop, i);
+ if (modes)
+ modes[i] = ASM_OPERANDS_INPUT_MODE (asmop, i);
+ }
+ template = ASM_OPERANDS_TEMPLATE (asmop);
+ }
+ else if (GET_CODE (body) == PARALLEL
+ && GET_CODE (XVECEXP (body, 0, 0)) == SET)
+ {
+ rtx asmop = SET_SRC (XVECEXP (body, 0, 0));
+ int nparallel = XVECLEN (body, 0); /* Includes CLOBBERs. */
+ int nin = ASM_OPERANDS_INPUT_LENGTH (asmop);
+ int nout = 0; /* Does not include CLOBBERs. */
+
+ /* At least one output, plus some CLOBBERs. */
+
+ /* The outputs are in the SETs.
+ Their constraints are in the ASM_OPERANDS itself. */
+ for (i = 0; i < nparallel; i++)
+ {
+ if (GET_CODE (XVECEXP (body, 0, i)) == CLOBBER)
+ break; /* Past last SET */
+
+ if (operands)
+ operands[i] = SET_DEST (XVECEXP (body, 0, i));
+ if (operand_locs)
+ operand_locs[i] = &SET_DEST (XVECEXP (body, 0, i));
+ if (constraints)
+ constraints[i] = XSTR (SET_SRC (XVECEXP (body, 0, i)), 1);
+ if (modes)
+ modes[i] = GET_MODE (SET_DEST (XVECEXP (body, 0, i)));
+ nout++;
+ }
+
+ for (i = 0; i < nin; i++)
+ {
+ if (operand_locs)
+ operand_locs[i + nout] = &ASM_OPERANDS_INPUT (asmop, i);
+ if (operands)
+ operands[i + nout] = ASM_OPERANDS_INPUT (asmop, i);
+ if (constraints)
+ constraints[i + nout] = ASM_OPERANDS_INPUT_CONSTRAINT (asmop, i);
+ if (modes)
+ modes[i + nout] = ASM_OPERANDS_INPUT_MODE (asmop, i);
+ }
+
+ template = ASM_OPERANDS_TEMPLATE (asmop);
+ }
+ else if (GET_CODE (body) == PARALLEL
+ && GET_CODE (XVECEXP (body, 0, 0)) == ASM_OPERANDS)
+ {
+ /* No outputs, but some CLOBBERs. */
+
+ rtx asmop = XVECEXP (body, 0, 0);
+ int nin = ASM_OPERANDS_INPUT_LENGTH (asmop);
+
+ for (i = 0; i < nin; i++)
+ {
+ if (operand_locs)
+ operand_locs[i] = &ASM_OPERANDS_INPUT (asmop, i);
+ if (operands)
+ operands[i] = ASM_OPERANDS_INPUT (asmop, i);
+ if (constraints)
+ constraints[i] = ASM_OPERANDS_INPUT_CONSTRAINT (asmop, i);
+ if (modes)
+ modes[i] = ASM_OPERANDS_INPUT_MODE (asmop, i);
+ }
+
+ template = ASM_OPERANDS_TEMPLATE (asmop);
+ }
+
+ return template;
+}
+
+/* Given an rtx *P, if it is a sum containing an integer constant term,
+ return the location (type rtx *) of the pointer to that constant term.
+ Otherwise, return a null pointer. */
+
+static rtx *
+find_constant_term_loc (p)
+ rtx *p;
+{
+ register rtx *tem;
+ register enum rtx_code code = GET_CODE (*p);
+
+ /* If *P IS such a constant term, P is its location. */
+
+ if (code == CONST_INT || code == SYMBOL_REF || code == LABEL_REF
+ || code == CONST)
+ return p;
+
+ /* Otherwise, if not a sum, it has no constant term. */
+
+ if (GET_CODE (*p) != PLUS)
+ return 0;
+
+ /* If one of the summands is constant, return its location. */
+
+ if (XEXP (*p, 0) && CONSTANT_P (XEXP (*p, 0))
+ && XEXP (*p, 1) && CONSTANT_P (XEXP (*p, 1)))
+ return p;
+
+ /* Otherwise, check each summand for containing a constant term. */
+
+ if (XEXP (*p, 0) != 0)
+ {
+ tem = find_constant_term_loc (&XEXP (*p, 0));
+ if (tem != 0)
+ return tem;
+ }
+
+ if (XEXP (*p, 1) != 0)
+ {
+ tem = find_constant_term_loc (&XEXP (*p, 1));
+ if (tem != 0)
+ return tem;
+ }
+
+ return 0;
+}
+
+/* Return 1 if OP is a memory reference
+ whose address contains no side effects
+ and remains valid after the addition
+ of a positive integer less than the
+ size of the object being referenced.
+
+ We assume that the original address is valid and do not check it.
+
+ This uses strict_memory_address_p as a subroutine, so
+ don't use it before reload. */
+
+int
+offsettable_memref_p (op)
+ rtx op;
+{
+ return ((GET_CODE (op) == MEM)
+ && offsettable_address_p (1, GET_MODE (op), XEXP (op, 0)));
+}
+
+/* Similar, but don't require a strictly valid mem ref:
+ consider pseudo-regs valid as index or base regs. */
+
+int
+offsettable_nonstrict_memref_p (op)
+ rtx op;
+{
+ return ((GET_CODE (op) == MEM)
+ && offsettable_address_p (0, GET_MODE (op), XEXP (op, 0)));
+}
+
+/* Return 1 if Y is a memory address which contains no side effects
+ and would remain valid after the addition of a positive integer
+ less than the size of that mode.
+
+ We assume that the original address is valid and do not check it.
+ We do check that it is valid for narrower modes.
+
+ If STRICTP is nonzero, we require a strictly valid address,
+ for the sake of use in reload.c. */
+
+int
+offsettable_address_p (strictp, mode, y)
+ int strictp;
+ enum machine_mode mode;
+ register rtx y;
+{
+ register enum rtx_code ycode = GET_CODE (y);
+ register rtx z;
+ rtx y1 = y;
+ rtx *y2;
+ int (*addressp) () = (strictp ? strict_memory_address_p : memory_address_p);
+
+ if (CONSTANT_ADDRESS_P (y))
+ return 1;
+
+ /* Adjusting an offsettable address involves changing to a narrower mode.
+ Make sure that's OK. */
+
+ if (mode_dependent_address_p (y))
+ return 0;
+
+ /* If the expression contains a constant term,
+ see if it remains valid when max possible offset is added. */
+
+ if ((ycode == PLUS) && (y2 = find_constant_term_loc (&y1)))
+ {
+ int good;
+
+ y1 = *y2;
+ *y2 = plus_constant (*y2, GET_MODE_SIZE (mode) - 1);
+ /* Use QImode because an odd displacement may be automatically invalid
+ for any wider mode. But it should be valid for a single byte. */
+ good = (*addressp) (QImode, y);
+
+ /* In any case, restore old contents of memory. */
+ *y2 = y1;
+ return good;
+ }
+
+ if (ycode == PRE_DEC || ycode == PRE_INC
+ || ycode == POST_DEC || ycode == POST_INC)
+ return 0;
+
+ /* The offset added here is chosen as the maximum offset that
+ any instruction could need to add when operating on something
+ of the specified mode. We assume that if Y and Y+c are
+ valid addresses then so is Y+d for all 0<d<c. */
+
+ z = plus_constant_for_output (y, GET_MODE_SIZE (mode) - 1);
+
+ /* Use QImode because an odd displacement may be automatically invalid
+ for any wider mode. But it should be valid for a single byte. */
+ return (*addressp) (QImode, z);
+}
+
+/* Return 1 if ADDR is an address-expression whose effect depends
+ on the mode of the memory reference it is used in.
+
+ Autoincrement addressing is a typical example of mode-dependence
+ because the amount of the increment depends on the mode. */
+
+int
+mode_dependent_address_p (addr)
+ rtx addr;
+{
+ GO_IF_MODE_DEPENDENT_ADDRESS (addr, win);
+ return 0;
+ win:
+ return 1;
+}
+
+/* Return 1 if OP is a general operand
+ other than a memory ref with a mode dependent address. */
+
+int
+mode_independent_operand (op, mode)
+ enum machine_mode mode;
+ rtx op;
+{
+ rtx addr;
+
+ if (! general_operand (op, mode))
+ return 0;
+
+ if (GET_CODE (op) != MEM)
+ return 1;
+
+ addr = XEXP (op, 0);
+ GO_IF_MODE_DEPENDENT_ADDRESS (addr, lose);
+ return 1;
+ lose:
+ return 0;
+}
+
+/* Given an operand OP that is a valid memory reference
+ which satisfies offsettable_memref_p,
+ return a new memory reference whose address has been adjusted by OFFSET.
+ OFFSET should be positive and less than the size of the object referenced.
+*/
+
+rtx
+adj_offsettable_operand (op, offset)
+ rtx op;
+ int offset;
+{
+ register enum rtx_code code = GET_CODE (op);
+
+ if (code == MEM)
+ {
+ register rtx y = XEXP (op, 0);
+ register rtx new;
+
+ if (CONSTANT_ADDRESS_P (y))
+ {
+ new = gen_rtx (MEM, GET_MODE (op), plus_constant_for_output (y, offset));
+ RTX_UNCHANGING_P (new) = RTX_UNCHANGING_P (op);
+ return new;
+ }
+
+ if (GET_CODE (y) == PLUS)
+ {
+ rtx z = y;
+ register rtx *const_loc;
+
+ op = copy_rtx (op);
+ z = XEXP (op, 0);
+ const_loc = find_constant_term_loc (&z);
+ if (const_loc)
+ {
+ *const_loc = plus_constant_for_output (*const_loc, offset);
+ return op;
+ }
+ }
+
+ new = gen_rtx (MEM, GET_MODE (op), plus_constant_for_output (y, offset));
+ RTX_UNCHANGING_P (new) = RTX_UNCHANGING_P (op);
+ return new;
+ }
+ abort ();
+}
+
+#ifdef REGISTER_CONSTRAINTS
+
+/* Check the operands of an insn (found in recog_operands)
+ against the insn's operand constraints (found via INSN_CODE_NUM)
+ and return 1 if they are valid.
+
+ WHICH_ALTERNATIVE is set to a number which indicates which
+ alternative of constraints was matched: 0 for the first alternative,
+ 1 for the next, etc.
+
+ In addition, when two operands are match
+ and it happens that the output operand is (reg) while the
+ input operand is --(reg) or ++(reg) (a pre-inc or pre-dec),
+ make the output operand look like the input.
+ This is because the output operand is the one the template will print.
+
+ This is used in final, just before printing the assembler code and by
+ the routines that determine an insn's attribute.
+
+ If STRICT is a positive non-zero value, it means that we have been
+ called after reload has been completed. In that case, we must
+ do all checks strictly. If it is zero, it means that we have been called
+ before reload has completed. In that case, we first try to see if we can
+ find an alternative that matches strictly. If not, we try again, this
+ time assuming that reload will fix up the insn. This provides a "best
+ guess" for the alternative and is used to compute attributes of insns prior
+ to reload. A negative value of STRICT is used for this internal call. */
+
+struct funny_match
+{
+ int this, other;
+};
+
+int
+constrain_operands (insn_code_num, strict)
+ int insn_code_num;
+ int strict;
+{
+ char *constraints[MAX_RECOG_OPERANDS];
+ int matching_operands[MAX_RECOG_OPERANDS];
+ enum op_type {OP_IN, OP_OUT, OP_INOUT} op_types[MAX_RECOG_OPERANDS];
+ int earlyclobber[MAX_RECOG_OPERANDS];
+ register int c;
+ int noperands = insn_n_operands[insn_code_num];
+
+ struct funny_match funny_match[MAX_RECOG_OPERANDS];
+ int funny_match_index;
+ int nalternatives = insn_n_alternatives[insn_code_num];
+
+ if (noperands == 0 || nalternatives == 0)
+ return 1;
+
+ for (c = 0; c < noperands; c++)
+ {
+ constraints[c] = insn_operand_constraint[insn_code_num][c];
+ matching_operands[c] = -1;
+ op_types[c] = OP_IN;
+ }
+
+ which_alternative = 0;
+
+ while (which_alternative < nalternatives)
+ {
+ register int opno;
+ int lose = 0;
+ funny_match_index = 0;
+
+ for (opno = 0; opno < noperands; opno++)
+ {
+ register rtx op = recog_operand[opno];
+ enum machine_mode mode = GET_MODE (op);
+ register char *p = constraints[opno];
+ int offset = 0;
+ int win = 0;
+ int val;
+
+ earlyclobber[opno] = 0;
+
+ if (GET_CODE (op) == SUBREG)
+ {
+ if (GET_CODE (SUBREG_REG (op)) == REG
+ && REGNO (SUBREG_REG (op)) < FIRST_PSEUDO_REGISTER)
+ offset = SUBREG_WORD (op);
+ op = SUBREG_REG (op);
+ }
+
+ /* An empty constraint or empty alternative
+ allows anything which matched the pattern. */
+ if (*p == 0 || *p == ',')
+ win = 1;
+
+ while (*p && (c = *p++) != ',')
+ switch (c)
+ {
+ case '?':
+ case '!':
+ case '*':
+ case '%':
+ break;
+
+ case '#':
+ /* Ignore rest of this alternative as far as
+ constraint checking is concerned. */
+ while (*p && *p != ',')
+ p++;
+ break;
+
+ case '=':
+ op_types[opno] = OP_OUT;
+ break;
+
+ case '+':
+ op_types[opno] = OP_INOUT;
+ break;
+
+ case '&':
+ earlyclobber[opno] = 1;
+ break;
+
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ /* This operand must be the same as a previous one.
+ This kind of constraint is used for instructions such
+ as add when they take only two operands.
+
+ Note that the lower-numbered operand is passed first.
+
+ If we are not testing strictly, assume that this constraint
+ will be satisfied. */
+ if (strict < 0)
+ val = 1;
+ else
+ val = operands_match_p (recog_operand[c - '0'],
+ recog_operand[opno]);
+
+ matching_operands[opno] = c - '0';
+ matching_operands[c - '0'] = opno;
+
+ if (val != 0)
+ win = 1;
+ /* If output is *x and input is *--x,
+ arrange later to change the output to *--x as well,
+ since the output op is the one that will be printed. */
+ if (val == 2 && strict > 0)
+ {
+ funny_match[funny_match_index].this = opno;
+ funny_match[funny_match_index++].other = c - '0';
+ }
+ break;
+
+ case 'p':
+ /* p is used for address_operands. When we are called by
+ gen_input_reload, no one will have checked that the
+ address is strictly valid, i.e., that all pseudos
+ requiring hard regs have gotten them. */
+ if (strict <= 0
+ || (strict_memory_address_p
+ (insn_operand_mode[insn_code_num][opno], op)))
+ win = 1;
+ break;
+
+ /* No need to check general_operand again;
+ it was done in insn-recog.c. */
+ case 'g':
+ /* Anything goes unless it is a REG and really has a hard reg
+ but the hard reg is not in the class GENERAL_REGS. */
+ if (strict < 0
+ || GENERAL_REGS == ALL_REGS
+ || GET_CODE (op) != REG
+ || (reload_in_progress
+ && REGNO (op) >= FIRST_PSEUDO_REGISTER)
+ || reg_fits_class_p (op, GENERAL_REGS, offset, mode))
+ win = 1;
+ break;
+
+ case 'r':
+ if (strict < 0
+ || (strict == 0
+ && GET_CODE (op) == REG
+ && REGNO (op) >= FIRST_PSEUDO_REGISTER)
+ || (strict == 0 && GET_CODE (op) == SCRATCH)
+ || (GET_CODE (op) == REG
+ && ((GENERAL_REGS == ALL_REGS
+ && REGNO (op) < FIRST_PSEUDO_REGISTER)
+ || reg_fits_class_p (op, GENERAL_REGS,
+ offset, mode))))
+ win = 1;
+ break;
+
+ case 'X':
+ /* This is used for a MATCH_SCRATCH in the cases when we
+ don't actually need anything. So anything goes any time. */
+ win = 1;
+ break;
+
+ case 'm':
+ if (GET_CODE (op) == MEM
+ /* Before reload, accept what reload can turn into mem. */
+ || (strict < 0 && CONSTANT_P (op))
+ /* During reload, accept a pseudo */
+ || (reload_in_progress && GET_CODE (op) == REG
+ && REGNO (op) >= FIRST_PSEUDO_REGISTER))
+ win = 1;
+ break;
+
+ case '<':
+ if (GET_CODE (op) == MEM
+ && (GET_CODE (XEXP (op, 0)) == PRE_DEC
+ || GET_CODE (XEXP (op, 0)) == POST_DEC))
+ win = 1;
+ break;
+
+ case '>':
+ if (GET_CODE (op) == MEM
+ && (GET_CODE (XEXP (op, 0)) == PRE_INC
+ || GET_CODE (XEXP (op, 0)) == POST_INC))
+ win = 1;
+ break;
+
+ case 'E':
+ /* Match any CONST_DOUBLE, but only if
+ we can examine the bits of it reliably. */
+ if ((HOST_FLOAT_FORMAT != TARGET_FLOAT_FORMAT
+ || HOST_BITS_PER_WIDE_INT != BITS_PER_WORD)
+ && GET_MODE (op) != VOIDmode && ! flag_pretend_float)
+ break;
+ if (GET_CODE (op) == CONST_DOUBLE)
+ win = 1;
+ break;
+
+ case 'F':
+ if (GET_CODE (op) == CONST_DOUBLE)
+ win = 1;
+ break;
+
+ case 'G':
+ case 'H':
+ if (GET_CODE (op) == CONST_DOUBLE
+ && CONST_DOUBLE_OK_FOR_LETTER_P (op, c))
+ win = 1;
+ break;
+
+ case 's':
+ if (GET_CODE (op) == CONST_INT
+ || (GET_CODE (op) == CONST_DOUBLE
+ && GET_MODE (op) == VOIDmode))
+ break;
+ case 'i':
+ if (CONSTANT_P (op))
+ win = 1;
+ break;
+
+ case 'n':
+ if (GET_CODE (op) == CONST_INT
+ || (GET_CODE (op) == CONST_DOUBLE
+ && GET_MODE (op) == VOIDmode))
+ win = 1;
+ break;
+
+ case 'I':
+ case 'J':
+ case 'K':
+ case 'L':
+ case 'M':
+ case 'N':
+ case 'O':
+ case 'P':
+ if (GET_CODE (op) == CONST_INT
+ && CONST_OK_FOR_LETTER_P (INTVAL (op), c))
+ win = 1;
+ break;
+
+#ifdef EXTRA_CONSTRAINT
+ case 'Q':
+ case 'R':
+ case 'S':
+ case 'T':
+ case 'U':
+ if (EXTRA_CONSTRAINT (op, c))
+ win = 1;
+ break;
+#endif
+
+ case 'V':
+ if (GET_CODE (op) == MEM
+ && ! offsettable_memref_p (op))
+ win = 1;
+ break;
+
+ case 'o':
+ if ((strict > 0 && offsettable_memref_p (op))
+ || (strict == 0 && offsettable_nonstrict_memref_p (op))
+ /* Before reload, accept what reload can handle. */
+ || (strict < 0
+ && (CONSTANT_P (op) || GET_CODE (op) == MEM))
+ /* During reload, accept a pseudo */
+ || (reload_in_progress && GET_CODE (op) == REG
+ && REGNO (op) >= FIRST_PSEUDO_REGISTER))
+ win = 1;
+ break;
+
+ default:
+ if (strict < 0
+ || (strict == 0
+ && GET_CODE (op) == REG
+ && REGNO (op) >= FIRST_PSEUDO_REGISTER)
+ || (strict == 0 && GET_CODE (op) == SCRATCH)
+ || (GET_CODE (op) == REG
+ && reg_fits_class_p (op, REG_CLASS_FROM_LETTER (c),
+ offset, mode)))
+ win = 1;
+ }
+
+ constraints[opno] = p;
+ /* If this operand did not win somehow,
+ this alternative loses. */
+ if (! win)
+ lose = 1;
+ }
+ /* This alternative won; the operands are ok.
+ Change whichever operands this alternative says to change. */
+ if (! lose)
+ {
+ int opno, eopno;
+
+ /* See if any earlyclobber operand conflicts with some other
+ operand. */
+
+ if (strict > 0)
+ for (eopno = 0; eopno < noperands; eopno++)
+ /* Ignore earlyclobber operands now in memory,
+ because we would often report failure when we have
+ two memory operands, one of which was formerly a REG. */
+ if (earlyclobber[eopno]
+ && GET_CODE (recog_operand[eopno]) == REG)
+ for (opno = 0; opno < noperands; opno++)
+ if ((GET_CODE (recog_operand[opno]) == MEM
+ || op_types[opno] != OP_OUT)
+ && opno != eopno
+ /* Ignore things like match_operator operands. */
+ && *constraints[opno] != 0
+ && ! (matching_operands[opno] == eopno
+ && rtx_equal_p (recog_operand[opno],
+ recog_operand[eopno]))
+ && ! safe_from_earlyclobber (recog_operand[opno],
+ recog_operand[eopno]))
+ lose = 1;
+
+ if (! lose)
+ {
+ while (--funny_match_index >= 0)
+ {
+ recog_operand[funny_match[funny_match_index].other]
+ = recog_operand[funny_match[funny_match_index].this];
+ }
+
+ return 1;
+ }
+ }
+
+ which_alternative++;
+ }
+
+ /* If we are about to reject this, but we are not to test strictly,
+ try a very loose test. Only return failure if it fails also. */
+ if (strict == 0)
+ return constrain_operands (insn_code_num, -1);
+ else
+ return 0;
+}
+
+/* Return 1 iff OPERAND (assumed to be a REG rtx)
+ is a hard reg in class CLASS when its regno is offsetted by OFFSET
+ and changed to mode MODE.
+ If REG occupies multiple hard regs, all of them must be in CLASS. */
+
+int
+reg_fits_class_p (operand, class, offset, mode)
+ rtx operand;
+ register enum reg_class class;
+ int offset;
+ enum machine_mode mode;
+{
+ register int regno = REGNO (operand);
+ if (regno < FIRST_PSEUDO_REGISTER
+ && TEST_HARD_REG_BIT (reg_class_contents[(int) class],
+ regno + offset))
+ {
+ register int sr;
+ regno += offset;
+ for (sr = HARD_REGNO_NREGS (regno, mode) - 1;
+ sr > 0; sr--)
+ if (! TEST_HARD_REG_BIT (reg_class_contents[(int) class],
+ regno + sr))
+ break;
+ return sr == 0;
+ }
+
+ return 0;
+}
+
+#endif /* REGISTER_CONSTRAINTS */
diff --git a/gnu/usr.bin/cc/cc_int/reg-stack.c b/gnu/usr.bin/cc/cc_int/reg-stack.c
new file mode 100644
index 0000000..dd30344
--- /dev/null
+++ b/gnu/usr.bin/cc/cc_int/reg-stack.c
@@ -0,0 +1,3008 @@
+/* Register to Stack convert for GNU compiler.
+ Copyright (C) 1992, 1993, 1994 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+/* This pass converts stack-like registers from the "flat register
+ file" model that gcc uses, to a stack convention that the 387 uses.
+
+ * The form of the input:
+
+ On input, the function consists of insn that have had their
+ registers fully allocated to a set of "virtual" registers. Note that
+ the word "virtual" is used differently here than elsewhere in gcc: for
+ each virtual stack reg, there is a hard reg, but the mapping between
+ them is not known until this pass is run. On output, hard register
+ numbers have been substituted, and various pop and exchange insns have
+ been emitted. The hard register numbers and the virtual register
+ numbers completely overlap - before this pass, all stack register
+ numbers are virtual, and afterward they are all hard.
+
+ The virtual registers can be manipulated normally by gcc, and their
+ semantics are the same as for normal registers. After the hard
+ register numbers are substituted, the semantics of an insn containing
+ stack-like regs are not the same as for an insn with normal regs: for
+ instance, it is not safe to delete an insn that appears to be a no-op
+ move. In general, no insn containing hard regs should be changed
+ after this pass is done.
+
+ * The form of the output:
+
+ After this pass, hard register numbers represent the distance from
+ the current top of stack to the desired register. A reference to
+ FIRST_STACK_REG references the top of stack, FIRST_STACK_REG + 1,
+ represents the register just below that, and so forth. Also, REG_DEAD
+ notes indicate whether or not a stack register should be popped.
+
+ A "swap" insn looks like a parallel of two patterns, where each
+ pattern is a SET: one sets A to B, the other B to A.
+
+ A "push" or "load" insn is a SET whose SET_DEST is FIRST_STACK_REG
+ and whose SET_DEST is REG or MEM. Any other SET_DEST, such as PLUS,
+ will replace the existing stack top, not push a new value.
+
+ A store insn is a SET whose SET_DEST is FIRST_STACK_REG, and whose
+ SET_SRC is REG or MEM.
+
+ The case where the SET_SRC and SET_DEST are both FIRST_STACK_REG
+ appears ambiguous. As a special case, the presence of a REG_DEAD note
+ for FIRST_STACK_REG differentiates between a load insn and a pop.
+
+ If a REG_DEAD is present, the insn represents a "pop" that discards
+ the top of the register stack. If there is no REG_DEAD note, then the
+ insn represents a "dup" or a push of the current top of stack onto the
+ stack.
+
+ * Methodology:
+
+ Existing REG_DEAD and REG_UNUSED notes for stack registers are
+ deleted and recreated from scratch. REG_DEAD is never created for a
+ SET_DEST, only REG_UNUSED.
+
+ Before life analysis, the mode of each insn is set based on whether
+ or not any stack registers are mentioned within that insn. VOIDmode
+ means that no regs are mentioned anyway, and QImode means that at
+ least one pattern within the insn mentions stack registers. This
+ information is valid until after reg_to_stack returns, and is used
+ from jump_optimize.
+
+ * asm_operands:
+
+ There are several rules on the usage of stack-like regs in
+ asm_operands insns. These rules apply only to the operands that are
+ stack-like regs:
+
+ 1. Given a set of input regs that die in an asm_operands, it is
+ necessary to know which are implicitly popped by the asm, and
+ which must be explicitly popped by gcc.
+
+ An input reg that is implicitly popped by the asm must be
+ explicitly clobbered, unless it is constrained to match an
+ output operand.
+
+ 2. For any input reg that is implicitly popped by an asm, it is
+ necessary to know how to adjust the stack to compensate for the pop.
+ If any non-popped input is closer to the top of the reg-stack than
+ the implicitly popped reg, it would not be possible to know what the
+ stack looked like - it's not clear how the rest of the stack "slides
+ up".
+
+ All implicitly popped input regs must be closer to the top of
+ the reg-stack than any input that is not implicitly popped.
+
+ 3. It is possible that if an input dies in an insn, reload might
+ use the input reg for an output reload. Consider this example:
+
+ asm ("foo" : "=t" (a) : "f" (b));
+
+ This asm says that input B is not popped by the asm, and that
+ the asm pushes a result onto the reg-stack, ie, the stack is one
+ deeper after the asm than it was before. But, it is possible that
+ reload will think that it can use the same reg for both the input and
+ the output, if input B dies in this insn.
+
+ If any input operand uses the "f" constraint, all output reg
+ constraints must use the "&" earlyclobber.
+
+ The asm above would be written as
+
+ asm ("foo" : "=&t" (a) : "f" (b));
+
+ 4. Some operands need to be in particular places on the stack. All
+ output operands fall in this category - there is no other way to
+ know which regs the outputs appear in unless the user indicates
+ this in the constraints.
+
+ Output operands must specifically indicate which reg an output
+ appears in after an asm. "=f" is not allowed: the operand
+ constraints must select a class with a single reg.
+
+ 5. Output operands may not be "inserted" between existing stack regs.
+ Since no 387 opcode uses a read/write operand, all output operands
+ are dead before the asm_operands, and are pushed by the asm_operands.
+ It makes no sense to push anywhere but the top of the reg-stack.
+
+ Output operands must start at the top of the reg-stack: output
+ operands may not "skip" a reg.
+
+ 6. Some asm statements may need extra stack space for internal
+ calculations. This can be guaranteed by clobbering stack registers
+ unrelated to the inputs and outputs.
+
+ Here are a couple of reasonable asms to want to write. This asm
+ takes one input, which is internally popped, and produces two outputs.
+
+ asm ("fsincos" : "=t" (cos), "=u" (sin) : "0" (inp));
+
+ This asm takes two inputs, which are popped by the fyl2xp1 opcode,
+ and replaces them with one output. The user must code the "st(1)"
+ clobber for reg-stack.c to know that fyl2xp1 pops both inputs.
+
+ asm ("fyl2xp1" : "=t" (result) : "0" (x), "u" (y) : "st(1)");
+
+ */
+
+#include <stdio.h>
+#include "config.h"
+#include "tree.h"
+#include "rtl.h"
+#include "insn-config.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "flags.h"
+
+#ifdef STACK_REGS
+
+#define REG_STACK_SIZE (LAST_STACK_REG - FIRST_STACK_REG + 1)
+
+/* True if the current function returns a real value. */
+static int current_function_returns_real;
+
+/* This is the basic stack record. TOP is an index into REG[] such
+ that REG[TOP] is the top of stack. If TOP is -1 the stack is empty.
+
+ If TOP is -2, REG[] is not yet initialized. Stack initialization
+ consists of placing each live reg in array `reg' and setting `top'
+ appropriately.
+
+ REG_SET indicates which registers are live. */
+
+typedef struct stack_def
+{
+ int top; /* index to top stack element */
+ HARD_REG_SET reg_set; /* set of live registers */
+ char reg[REG_STACK_SIZE]; /* register - stack mapping */
+} *stack;
+
+/* highest instruction uid */
+static int max_uid = 0;
+
+/* Number of basic blocks in the current function. */
+static int blocks;
+
+/* Element N is first insn in basic block N.
+ This info lasts until we finish compiling the function. */
+static rtx *block_begin;
+
+/* Element N is last insn in basic block N.
+ This info lasts until we finish compiling the function. */
+static rtx *block_end;
+
+/* Element N is nonzero if control can drop into basic block N */
+static char *block_drops_in;
+
+/* Element N says all about the stack at entry block N */
+static stack block_stack_in;
+
+/* Element N says all about the stack life at the end of block N */
+static HARD_REG_SET *block_out_reg_set;
+
+/* This is where the BLOCK_NUM values are really stored. This is set
+ up by find_blocks and used there and in life_analysis. It can be used
+ later, but only to look up an insn that is the head or tail of some
+ block. life_analysis and the stack register conversion process can
+ add insns within a block. */
+static int *block_number;
+
+/* This is the register file for all register after conversion */
+static rtx FP_mode_reg[FIRST_PSEUDO_REGISTER][(int) MAX_MACHINE_MODE];
+
+/* Get the basic block number of an insn. See note at block_number
+ definition are validity of this information. */
+
+#define BLOCK_NUM(INSN) \
+ (((INSN_UID (INSN) > max_uid) \
+ ? (int *)(abort() , 0) \
+ : block_number)[INSN_UID (INSN)])
+
+extern rtx forced_labels;
+extern rtx gen_jump ();
+extern rtx gen_movdf (), gen_movxf ();
+extern rtx find_regno_note ();
+extern rtx emit_jump_insn_before ();
+extern rtx emit_label_after ();
+
+/* Forward declarations */
+
+static void find_blocks ();
+static uses_reg_or_mem ();
+static void stack_reg_life_analysis ();
+static void change_stack ();
+static void convert_regs ();
+static void dump_stack_info ();
+
+/* Return non-zero if any stack register is mentioned somewhere within PAT. */
+
+int
+stack_regs_mentioned_p (pat)
+ rtx pat;
+{
+ register char *fmt;
+ register int i;
+
+ if (STACK_REG_P (pat))
+ return 1;
+
+ fmt = GET_RTX_FORMAT (GET_CODE (pat));
+ for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'E')
+ {
+ register int j;
+
+ for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
+ if (stack_regs_mentioned_p (XVECEXP (pat, i, j)))
+ return 1;
+ }
+ else if (fmt[i] == 'e' && stack_regs_mentioned_p (XEXP (pat, i)))
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Convert register usage from "flat" register file usage to a "stack
+ register file. FIRST is the first insn in the function, FILE is the
+ dump file, if used.
+
+ First compute the beginning and end of each basic block. Do a
+ register life analysis on the stack registers, recording the result
+ for the head and tail of each basic block. The convert each insn one
+ by one. Run a last jump_optimize() pass, if optimizing, to eliminate
+ any cross-jumping created when the converter inserts pop insns.*/
+
+void
+reg_to_stack (first, file)
+ rtx first;
+ FILE *file;
+{
+ register rtx insn;
+ register int i;
+ int stack_reg_seen = 0;
+ enum machine_mode mode;
+
+ current_function_returns_real
+ = TREE_CODE (TREE_TYPE (DECL_RESULT (current_function_decl))) == REAL_TYPE;
+
+ for (mode = GET_CLASS_NARROWEST_MODE (MODE_FLOAT); mode != VOIDmode;
+ mode = GET_MODE_WIDER_MODE (mode))
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ FP_mode_reg[i][(int) mode] = gen_rtx (REG, mode, i);
+
+ /* Count the basic blocks. Also find maximum insn uid. */
+ {
+ register RTX_CODE prev_code = BARRIER;
+ register RTX_CODE code;
+
+ max_uid = 0;
+ blocks = 0;
+ for (insn = first; insn; insn = NEXT_INSN (insn))
+ {
+ /* Note that this loop must select the same block boundaries
+ as code in find_blocks. Also note that this code is not the
+ same as that used in flow.c. */
+
+ if (INSN_UID (insn) > max_uid)
+ max_uid = INSN_UID (insn);
+
+ code = GET_CODE (insn);
+
+ if (code == CODE_LABEL
+ || (prev_code != INSN
+ && prev_code != CALL_INSN
+ && prev_code != CODE_LABEL
+ && GET_RTX_CLASS (code) == 'i'))
+ blocks++;
+
+ /* Remember whether or not this insn mentions an FP regs.
+ Check JUMP_INSNs too, in case someone creates a funny PARALLEL. */
+
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
+ && stack_regs_mentioned_p (PATTERN (insn)))
+ {
+ stack_reg_seen = 1;
+ PUT_MODE (insn, QImode);
+ }
+ else
+ PUT_MODE (insn, VOIDmode);
+
+ if (code == CODE_LABEL)
+ LABEL_REFS (insn) = insn; /* delete old chain */
+
+ if (code != NOTE)
+ prev_code = code;
+ }
+ }
+
+ /* If no stack register reference exists in this insn, there isn't
+ anything to convert. */
+
+ if (! stack_reg_seen)
+ return;
+
+ /* If there are stack registers, there must be at least one block. */
+
+ if (! blocks)
+ abort ();
+
+ /* Allocate some tables that last till end of compiling this function
+ and some needed only in find_blocks and life_analysis. */
+
+ block_begin = (rtx *) alloca (blocks * sizeof (rtx));
+ block_end = (rtx *) alloca (blocks * sizeof (rtx));
+ block_drops_in = (char *) alloca (blocks);
+
+ block_stack_in = (stack) alloca (blocks * sizeof (struct stack_def));
+ block_out_reg_set = (HARD_REG_SET *) alloca (blocks * sizeof (HARD_REG_SET));
+ bzero (block_stack_in, blocks * sizeof (struct stack_def));
+ bzero (block_out_reg_set, blocks * sizeof (HARD_REG_SET));
+
+ block_number = (int *) alloca ((max_uid + 1) * sizeof (int));
+
+ find_blocks (first);
+ stack_reg_life_analysis (first);
+
+ /* Dump the life analysis debug information before jump
+ optimization, as that will destroy the LABEL_REFS we keep the
+ information in. */
+
+ if (file)
+ dump_stack_info (file);
+
+ convert_regs ();
+
+ if (optimize)
+ jump_optimize (first, 2, 0, 0);
+}
+
+/* Check PAT, which is in INSN, for LABEL_REFs. Add INSN to the
+ label's chain of references, and note which insn contains each
+ reference. */
+
+static void
+record_label_references (insn, pat)
+ rtx insn, pat;
+{
+ register enum rtx_code code = GET_CODE (pat);
+ register int i;
+ register char *fmt;
+
+ if (code == LABEL_REF)
+ {
+ register rtx label = XEXP (pat, 0);
+ register rtx ref;
+
+ if (GET_CODE (label) != CODE_LABEL)
+ abort ();
+
+ /* Don't make a duplicate in the code_label's chain. */
+
+ for (ref = LABEL_REFS (label);
+ ref && ref != label;
+ ref = LABEL_NEXTREF (ref))
+ if (CONTAINING_INSN (ref) == insn)
+ return;
+
+ CONTAINING_INSN (pat) = insn;
+ LABEL_NEXTREF (pat) = LABEL_REFS (label);
+ LABEL_REFS (label) = pat;
+
+ return;
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ record_label_references (insn, XEXP (pat, i));
+ if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = 0; j < XVECLEN (pat, i); j++)
+ record_label_references (insn, XVECEXP (pat, i, j));
+ }
+ }
+}
+
+/* Return a pointer to the REG expression within PAT. If PAT is not a
+ REG, possible enclosed by a conversion rtx, return the inner part of
+ PAT that stopped the search. */
+
+static rtx *
+get_true_reg (pat)
+ rtx *pat;
+{
+ while (GET_CODE (*pat) == SUBREG
+ || GET_CODE (*pat) == FLOAT
+ || GET_CODE (*pat) == FIX
+ || GET_CODE (*pat) == FLOAT_EXTEND)
+ pat = & XEXP (*pat, 0);
+
+ return pat;
+}
+
+/* Scan the OPERANDS and OPERAND_CONSTRAINTS of an asm_operands.
+ N_OPERANDS is the total number of operands. Return which alternative
+ matched, or -1 is no alternative matches.
+
+ OPERAND_MATCHES is an array which indicates which operand this
+ operand matches due to the constraints, or -1 if no match is required.
+ If two operands match by coincidence, but are not required to match by
+ the constraints, -1 is returned.
+
+ OPERAND_CLASS is an array which indicates the smallest class
+ required by the constraints. If the alternative that matches calls
+ for some class `class', and the operand matches a subclass of `class',
+ OPERAND_CLASS is set to `class' as required by the constraints, not to
+ the subclass. If an alternative allows more than one class,
+ OPERAND_CLASS is set to the smallest class that is a union of the
+ allowed classes. */
+
+static int
+constrain_asm_operands (n_operands, operands, operand_constraints,
+ operand_matches, operand_class)
+ int n_operands;
+ rtx *operands;
+ char **operand_constraints;
+ int *operand_matches;
+ enum reg_class *operand_class;
+{
+ char **constraints = (char **) alloca (n_operands * sizeof (char *));
+ char *q;
+ int this_alternative, this_operand;
+ int n_alternatives;
+ int j;
+
+ for (j = 0; j < n_operands; j++)
+ constraints[j] = operand_constraints[j];
+
+ /* Compute the number of alternatives in the operands. reload has
+ already guaranteed that all operands have the same number of
+ alternatives. */
+
+ n_alternatives = 1;
+ for (q = constraints[0]; *q; q++)
+ n_alternatives += (*q == ',');
+
+ this_alternative = 0;
+ while (this_alternative < n_alternatives)
+ {
+ int lose = 0;
+ int i;
+
+ /* No operands match, no narrow class requirements yet. */
+ for (i = 0; i < n_operands; i++)
+ {
+ operand_matches[i] = -1;
+ operand_class[i] = NO_REGS;
+ }
+
+ for (this_operand = 0; this_operand < n_operands; this_operand++)
+ {
+ rtx op = operands[this_operand];
+ enum machine_mode mode = GET_MODE (op);
+ char *p = constraints[this_operand];
+ int offset = 0;
+ int win = 0;
+ int c;
+
+ if (GET_CODE (op) == SUBREG)
+ {
+ if (GET_CODE (SUBREG_REG (op)) == REG
+ && REGNO (SUBREG_REG (op)) < FIRST_PSEUDO_REGISTER)
+ offset = SUBREG_WORD (op);
+ op = SUBREG_REG (op);
+ }
+
+ /* An empty constraint or empty alternative
+ allows anything which matched the pattern. */
+ if (*p == 0 || *p == ',')
+ win = 1;
+
+ while (*p && (c = *p++) != ',')
+ switch (c)
+ {
+ case '=':
+ case '+':
+ case '?':
+ case '&':
+ case '!':
+ case '*':
+ case '%':
+ /* Ignore these. */
+ break;
+
+ case '#':
+ /* Ignore rest of this alternative. */
+ while (*p && *p != ',') p++;
+ break;
+
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ /* This operand must be the same as a previous one.
+ This kind of constraint is used for instructions such
+ as add when they take only two operands.
+
+ Note that the lower-numbered operand is passed first. */
+
+ if (operands_match_p (operands[c - '0'],
+ operands[this_operand]))
+ {
+ operand_matches[this_operand] = c - '0';
+ win = 1;
+ }
+ break;
+
+ case 'p':
+ /* p is used for address_operands. Since this is an asm,
+ just to make sure that the operand is valid for Pmode. */
+
+ if (strict_memory_address_p (Pmode, op))
+ win = 1;
+ break;
+
+ case 'g':
+ /* Anything goes unless it is a REG and really has a hard reg
+ but the hard reg is not in the class GENERAL_REGS. */
+ if (GENERAL_REGS == ALL_REGS
+ || GET_CODE (op) != REG
+ || reg_fits_class_p (op, GENERAL_REGS, offset, mode))
+ {
+ if (GET_CODE (op) == REG)
+ operand_class[this_operand]
+ = reg_class_subunion[(int) operand_class[this_operand]][(int) GENERAL_REGS];
+ win = 1;
+ }
+ break;
+
+ case 'r':
+ if (GET_CODE (op) == REG
+ && (GENERAL_REGS == ALL_REGS
+ || reg_fits_class_p (op, GENERAL_REGS, offset, mode)))
+ {
+ operand_class[this_operand]
+ = reg_class_subunion[(int) operand_class[this_operand]][(int) GENERAL_REGS];
+ win = 1;
+ }
+ break;
+
+ case 'X':
+ /* This is used for a MATCH_SCRATCH in the cases when we
+ don't actually need anything. So anything goes any time. */
+ win = 1;
+ break;
+
+ case 'm':
+ if (GET_CODE (op) == MEM)
+ win = 1;
+ break;
+
+ case '<':
+ if (GET_CODE (op) == MEM
+ && (GET_CODE (XEXP (op, 0)) == PRE_DEC
+ || GET_CODE (XEXP (op, 0)) == POST_DEC))
+ win = 1;
+ break;
+
+ case '>':
+ if (GET_CODE (op) == MEM
+ && (GET_CODE (XEXP (op, 0)) == PRE_INC
+ || GET_CODE (XEXP (op, 0)) == POST_INC))
+ win = 1;
+ break;
+
+ case 'E':
+ /* Match any CONST_DOUBLE, but only if
+ we can examine the bits of it reliably. */
+ if ((HOST_FLOAT_FORMAT != TARGET_FLOAT_FORMAT
+ || HOST_BITS_PER_WIDE_INT != BITS_PER_WORD)
+ && GET_CODE (op) != VOIDmode && ! flag_pretend_float)
+ break;
+ if (GET_CODE (op) == CONST_DOUBLE)
+ win = 1;
+ break;
+
+ case 'F':
+ if (GET_CODE (op) == CONST_DOUBLE)
+ win = 1;
+ break;
+
+ case 'G':
+ case 'H':
+ if (GET_CODE (op) == CONST_DOUBLE
+ && CONST_DOUBLE_OK_FOR_LETTER_P (op, c))
+ win = 1;
+ break;
+
+ case 's':
+ if (GET_CODE (op) == CONST_INT
+ || (GET_CODE (op) == CONST_DOUBLE
+ && GET_MODE (op) == VOIDmode))
+ break;
+ /* Fall through */
+ case 'i':
+ if (CONSTANT_P (op))
+ win = 1;
+ break;
+
+ case 'n':
+ if (GET_CODE (op) == CONST_INT
+ || (GET_CODE (op) == CONST_DOUBLE
+ && GET_MODE (op) == VOIDmode))
+ win = 1;
+ break;
+
+ case 'I':
+ case 'J':
+ case 'K':
+ case 'L':
+ case 'M':
+ case 'N':
+ case 'O':
+ case 'P':
+ if (GET_CODE (op) == CONST_INT
+ && CONST_OK_FOR_LETTER_P (INTVAL (op), c))
+ win = 1;
+ break;
+
+#ifdef EXTRA_CONSTRAINT
+ case 'Q':
+ case 'R':
+ case 'S':
+ case 'T':
+ case 'U':
+ if (EXTRA_CONSTRAINT (op, c))
+ win = 1;
+ break;
+#endif
+
+ case 'V':
+ if (GET_CODE (op) == MEM && ! offsettable_memref_p (op))
+ win = 1;
+ break;
+
+ case 'o':
+ if (offsettable_memref_p (op))
+ win = 1;
+ break;
+
+ default:
+ if (GET_CODE (op) == REG
+ && reg_fits_class_p (op, REG_CLASS_FROM_LETTER (c),
+ offset, mode))
+ {
+ operand_class[this_operand]
+ = reg_class_subunion[(int)operand_class[this_operand]][(int) REG_CLASS_FROM_LETTER (c)];
+ win = 1;
+ }
+ }
+
+ constraints[this_operand] = p;
+ /* If this operand did not win somehow,
+ this alternative loses. */
+ if (! win)
+ lose = 1;
+ }
+ /* This alternative won; the operands are ok.
+ Change whichever operands this alternative says to change. */
+ if (! lose)
+ break;
+
+ this_alternative++;
+ }
+
+ /* For operands constrained to match another operand, copy the other
+ operand's class to this operand's class. */
+ for (j = 0; j < n_operands; j++)
+ if (operand_matches[j] >= 0)
+ operand_class[j] = operand_class[operand_matches[j]];
+
+ return this_alternative == n_alternatives ? -1 : this_alternative;
+}
+
+/* Record the life info of each stack reg in INSN, updating REGSTACK.
+ N_INPUTS is the number of inputs; N_OUTPUTS the outputs. CONSTRAINTS
+ is an array of the constraint strings used in the asm statement.
+ OPERANDS is an array of all operands for the insn, and is assumed to
+ contain all output operands, then all inputs operands.
+
+ There are many rules that an asm statement for stack-like regs must
+ follow. Those rules are explained at the top of this file: the rule
+ numbers below refer to that explanation. */
+
+static void
+record_asm_reg_life (insn, regstack, operands, constraints,
+ n_inputs, n_outputs)
+ rtx insn;
+ stack regstack;
+ rtx *operands;
+ char **constraints;
+ int n_inputs, n_outputs;
+{
+ int i;
+ int n_operands = n_inputs + n_outputs;
+ int first_input = n_outputs;
+ int n_clobbers;
+ int malformed_asm = 0;
+ rtx body = PATTERN (insn);
+
+ int *operand_matches = (int *) alloca (n_operands * sizeof (int *));
+
+ enum reg_class *operand_class
+ = (enum reg_class *) alloca (n_operands * sizeof (enum reg_class *));
+
+ int reg_used_as_output[FIRST_PSEUDO_REGISTER];
+ int implicitly_dies[FIRST_PSEUDO_REGISTER];
+
+ rtx *clobber_reg;
+
+ /* Find out what the constraints require. If no constraint
+ alternative matches, this asm is malformed. */
+ i = constrain_asm_operands (n_operands, operands, constraints,
+ operand_matches, operand_class);
+ if (i < 0)
+ malformed_asm = 1;
+
+ /* Strip SUBREGs here to make the following code simpler. */
+ for (i = 0; i < n_operands; i++)
+ if (GET_CODE (operands[i]) == SUBREG
+ && GET_CODE (SUBREG_REG (operands[i])) == REG)
+ operands[i] = SUBREG_REG (operands[i]);
+
+ /* Set up CLOBBER_REG. */
+
+ n_clobbers = 0;
+
+ if (GET_CODE (body) == PARALLEL)
+ {
+ clobber_reg = (rtx *) alloca (XVECLEN (body, 0) * sizeof (rtx *));
+
+ for (i = 0; i < XVECLEN (body, 0); i++)
+ if (GET_CODE (XVECEXP (body, 0, i)) == CLOBBER)
+ {
+ rtx clobber = XVECEXP (body, 0, i);
+ rtx reg = XEXP (clobber, 0);
+
+ if (GET_CODE (reg) == SUBREG && GET_CODE (SUBREG_REG (reg)) == REG)
+ reg = SUBREG_REG (reg);
+
+ if (STACK_REG_P (reg))
+ {
+ clobber_reg[n_clobbers] = reg;
+ n_clobbers++;
+ }
+ }
+ }
+
+ /* Enforce rule #4: Output operands must specifically indicate which
+ reg an output appears in after an asm. "=f" is not allowed: the
+ operand constraints must select a class with a single reg.
+
+ Also enforce rule #5: Output operands must start at the top of
+ the reg-stack: output operands may not "skip" a reg. */
+
+ bzero (reg_used_as_output, sizeof (reg_used_as_output));
+ for (i = 0; i < n_outputs; i++)
+ if (STACK_REG_P (operands[i]))
+ if (reg_class_size[(int) operand_class[i]] != 1)
+ {
+ error_for_asm
+ (insn, "Output constraint %d must specify a single register", i);
+ malformed_asm = 1;
+ }
+ else
+ reg_used_as_output[REGNO (operands[i])] = 1;
+
+
+ /* Search for first non-popped reg. */
+ for (i = FIRST_STACK_REG; i < LAST_STACK_REG + 1; i++)
+ if (! reg_used_as_output[i])
+ break;
+
+ /* If there are any other popped regs, that's an error. */
+ for (; i < LAST_STACK_REG + 1; i++)
+ if (reg_used_as_output[i])
+ break;
+
+ if (i != LAST_STACK_REG + 1)
+ {
+ error_for_asm (insn, "Output regs must be grouped at top of stack");
+ malformed_asm = 1;
+ }
+
+ /* Enforce rule #2: All implicitly popped input regs must be closer
+ to the top of the reg-stack than any input that is not implicitly
+ popped. */
+
+ bzero (implicitly_dies, sizeof (implicitly_dies));
+ for (i = first_input; i < first_input + n_inputs; i++)
+ if (STACK_REG_P (operands[i]))
+ {
+ /* An input reg is implicitly popped if it is tied to an
+ output, or if there is a CLOBBER for it. */
+ int j;
+
+ for (j = 0; j < n_clobbers; j++)
+ if (operands_match_p (clobber_reg[j], operands[i]))
+ break;
+
+ if (j < n_clobbers || operand_matches[i] >= 0)
+ implicitly_dies[REGNO (operands[i])] = 1;
+ }
+
+ /* Search for first non-popped reg. */
+ for (i = FIRST_STACK_REG; i < LAST_STACK_REG + 1; i++)
+ if (! implicitly_dies[i])
+ break;
+
+ /* If there are any other popped regs, that's an error. */
+ for (; i < LAST_STACK_REG + 1; i++)
+ if (implicitly_dies[i])
+ break;
+
+ if (i != LAST_STACK_REG + 1)
+ {
+ error_for_asm (insn,
+ "Implicitly popped regs must be grouped at top of stack");
+ malformed_asm = 1;
+ }
+
+ /* Enfore rule #3: If any input operand uses the "f" constraint, all
+ output constraints must use the "&" earlyclobber.
+
+ ??? Detect this more deterministically by having constraint_asm_operands
+ record any earlyclobber. */
+
+ for (i = first_input; i < first_input + n_inputs; i++)
+ if (operand_matches[i] == -1)
+ {
+ int j;
+
+ for (j = 0; j < n_outputs; j++)
+ if (operands_match_p (operands[j], operands[i]))
+ {
+ error_for_asm (insn,
+ "Output operand %d must use `&' constraint", j);
+ malformed_asm = 1;
+ }
+ }
+
+ if (malformed_asm)
+ {
+ /* Avoid further trouble with this insn. */
+ PATTERN (insn) = gen_rtx (USE, VOIDmode, const0_rtx);
+ PUT_MODE (insn, VOIDmode);
+ return;
+ }
+
+ /* Process all outputs */
+ for (i = 0; i < n_outputs; i++)
+ {
+ rtx op = operands[i];
+
+ if (! STACK_REG_P (op))
+ if (stack_regs_mentioned_p (op))
+ abort ();
+ else
+ continue;
+
+ /* Each destination is dead before this insn. If the
+ destination is not used after this insn, record this with
+ REG_UNUSED. */
+
+ if (! TEST_HARD_REG_BIT (regstack->reg_set, REGNO (op)))
+ REG_NOTES (insn) = gen_rtx (EXPR_LIST, REG_UNUSED, op,
+ REG_NOTES (insn));
+
+ CLEAR_HARD_REG_BIT (regstack->reg_set, REGNO (op));
+ }
+
+ /* Process all inputs */
+ for (i = first_input; i < first_input + n_inputs; i++)
+ {
+ if (! STACK_REG_P (operands[i]))
+ if (stack_regs_mentioned_p (operands[i]))
+ abort ();
+ else
+ continue;
+
+ /* If an input is dead after the insn, record a death note.
+ But don't record a death note if there is already a death note,
+ or if the input is also an output. */
+
+ if (! TEST_HARD_REG_BIT (regstack->reg_set, REGNO (operands[i]))
+ && operand_matches[i] == -1
+ && find_regno_note (insn, REG_DEAD, REGNO (operands[i])) == NULL_RTX)
+ REG_NOTES (insn) = gen_rtx (EXPR_LIST, REG_DEAD, operands[i],
+ REG_NOTES (insn));
+
+ SET_HARD_REG_BIT (regstack->reg_set, REGNO (operands[i]));
+ }
+}
+
+/* Scan PAT, which is part of INSN, and record registers appearing in
+ a SET_DEST in DEST, and other registers in SRC.
+
+ This function does not know about SET_DESTs that are both input and
+ output (such as ZERO_EXTRACT) - this cannot happen on a 387. */
+
+void
+record_reg_life_pat (pat, src, dest)
+ rtx pat;
+ HARD_REG_SET *src, *dest;
+{
+ register char *fmt;
+ register int i;
+
+ if (STACK_REG_P (pat))
+ {
+ if (src)
+ SET_HARD_REG_BIT (*src, REGNO (pat));
+
+ if (dest)
+ SET_HARD_REG_BIT (*dest, REGNO (pat));
+
+ return;
+ }
+
+ if (GET_CODE (pat) == SET)
+ {
+ record_reg_life_pat (XEXP (pat, 0), NULL_PTR, dest);
+ record_reg_life_pat (XEXP (pat, 1), src, NULL_PTR);
+ return;
+ }
+
+ /* We don't need to consider either of these cases. */
+ if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER)
+ return;
+
+ fmt = GET_RTX_FORMAT (GET_CODE (pat));
+ for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'E')
+ {
+ register int j;
+
+ for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
+ record_reg_life_pat (XVECEXP (pat, i, j), src, dest);
+ }
+ else if (fmt[i] == 'e')
+ record_reg_life_pat (XEXP (pat, i), src, dest);
+ }
+}
+
+/* Calculate the number of inputs and outputs in BODY, an
+ asm_operands. N_OPERANDS is the total number of operands, and
+ N_INPUTS and N_OUTPUTS are pointers to ints into which the results are
+ placed. */
+
+static void
+get_asm_operand_lengths (body, n_operands, n_inputs, n_outputs)
+ rtx body;
+ int n_operands;
+ int *n_inputs, *n_outputs;
+{
+ if (GET_CODE (body) == SET && GET_CODE (SET_SRC (body)) == ASM_OPERANDS)
+ *n_inputs = ASM_OPERANDS_INPUT_LENGTH (SET_SRC (body));
+
+ else if (GET_CODE (body) == ASM_OPERANDS)
+ *n_inputs = ASM_OPERANDS_INPUT_LENGTH (body);
+
+ else if (GET_CODE (body) == PARALLEL
+ && GET_CODE (XVECEXP (body, 0, 0)) == SET)
+ *n_inputs = ASM_OPERANDS_INPUT_LENGTH (SET_SRC (XVECEXP (body, 0, 0)));
+
+ else if (GET_CODE (body) == PARALLEL
+ && GET_CODE (XVECEXP (body, 0, 0)) == ASM_OPERANDS)
+ *n_inputs = ASM_OPERANDS_INPUT_LENGTH (XVECEXP (body, 0, 0));
+ else
+ abort ();
+
+ *n_outputs = n_operands - *n_inputs;
+}
+
+/* Scan INSN, which is in BLOCK, and record the life & death of stack
+ registers in REGSTACK. This function is called to process insns from
+ the last insn in a block to the first. The actual scanning is done in
+ record_reg_life_pat.
+
+ If a register is live after a CALL_INSN, but is not a value return
+ register for that CALL_INSN, then code is emitted to initialize that
+ register. The block_end[] data is kept accurate.
+
+ Existing death and unset notes for stack registers are deleted
+ before processing the insn. */
+
+static void
+record_reg_life (insn, block, regstack)
+ rtx insn;
+ int block;
+ stack regstack;
+{
+ rtx note, *note_link;
+ int n_operands;
+
+ if ((GET_CODE (insn) != INSN && GET_CODE (insn) != CALL_INSN)
+ || INSN_DELETED_P (insn))
+ return;
+
+ /* Strip death notes for stack regs from this insn */
+
+ note_link = &REG_NOTES(insn);
+ for (note = *note_link; note; note = XEXP (note, 1))
+ if (STACK_REG_P (XEXP (note, 0))
+ && (REG_NOTE_KIND (note) == REG_DEAD
+ || REG_NOTE_KIND (note) == REG_UNUSED))
+ *note_link = XEXP (note, 1);
+ else
+ note_link = &XEXP (note, 1);
+
+ /* Process all patterns in the insn. */
+
+ n_operands = asm_noperands (PATTERN (insn));
+ if (n_operands >= 0)
+ {
+ /* This insn is an `asm' with operands. Decode the operands,
+ decide how many are inputs, and record the life information. */
+
+ rtx operands[MAX_RECOG_OPERANDS];
+ rtx body = PATTERN (insn);
+ int n_inputs, n_outputs;
+ char **constraints = (char **) alloca (n_operands * sizeof (char *));
+
+ decode_asm_operands (body, operands, NULL_PTR, constraints, NULL_PTR);
+ get_asm_operand_lengths (body, n_operands, &n_inputs, &n_outputs);
+ record_asm_reg_life (insn, regstack, operands, constraints,
+ n_inputs, n_outputs);
+ return;
+ }
+
+ /* An insn referencing a stack reg has a mode of QImode. */
+ if (GET_MODE (insn) == QImode)
+ {
+ HARD_REG_SET src, dest;
+ int regno;
+
+ CLEAR_HARD_REG_SET (src);
+ CLEAR_HARD_REG_SET (dest);
+ record_reg_life_pat (PATTERN (insn), &src, &dest);
+
+ for (regno = FIRST_STACK_REG; regno <= LAST_STACK_REG; regno++)
+ if (! TEST_HARD_REG_BIT (regstack->reg_set, regno))
+ {
+ if (TEST_HARD_REG_BIT (src, regno)
+ && ! TEST_HARD_REG_BIT (dest, regno))
+ REG_NOTES (insn) = gen_rtx (EXPR_LIST, REG_DEAD,
+ FP_mode_reg[regno][(int) DFmode],
+ REG_NOTES (insn));
+ else if (TEST_HARD_REG_BIT (dest, regno))
+ REG_NOTES (insn) = gen_rtx (EXPR_LIST, REG_UNUSED,
+ FP_mode_reg[regno][(int) DFmode],
+ REG_NOTES (insn));
+ }
+
+ AND_COMPL_HARD_REG_SET (regstack->reg_set, dest);
+ IOR_HARD_REG_SET (regstack->reg_set, src);
+ }
+
+ /* There might be a reg that is live after a function call.
+ Initialize it to zero so that the program does not crash. See comment
+ towards the end of stack_reg_life_analysis(). */
+
+ if (GET_CODE (insn) == CALL_INSN)
+ {
+ int reg = FIRST_FLOAT_REG;
+
+ /* If a stack reg is mentioned in a CALL_INSN, it must be as the
+ return value. */
+
+ if (stack_regs_mentioned_p (PATTERN (insn)))
+ reg++;
+
+ for (; reg <= LAST_STACK_REG; reg++)
+ if (TEST_HARD_REG_BIT (regstack->reg_set, reg))
+ {
+ rtx init, pat;
+
+ /* The insn will use virtual register numbers, and so
+ convert_regs is expected to process these. But BLOCK_NUM
+ cannot be used on these insns, because they do not appear in
+ block_number[]. */
+
+ pat = gen_rtx (SET, VOIDmode, FP_mode_reg[reg][(int) DFmode],
+ CONST0_RTX (DFmode));
+ init = emit_insn_after (pat, insn);
+ PUT_MODE (init, QImode);
+
+ CLEAR_HARD_REG_BIT (regstack->reg_set, reg);
+
+ /* If the CALL_INSN was the end of a block, move the
+ block_end to point to the new insn. */
+
+ if (block_end[block] == insn)
+ block_end[block] = init;
+ }
+
+ /* Some regs do not survive a CALL */
+
+ AND_COMPL_HARD_REG_SET (regstack->reg_set, call_used_reg_set);
+ }
+}
+
+/* Find all basic blocks of the function, which starts with FIRST.
+ For each JUMP_INSN, build the chain of LABEL_REFS on each CODE_LABEL. */
+
+static void
+find_blocks (first)
+ rtx first;
+{
+ register rtx insn;
+ register int block;
+ register RTX_CODE prev_code = BARRIER;
+ register RTX_CODE code;
+ rtx label_value_list = 0;
+
+ /* Record where all the blocks start and end.
+ Record which basic blocks control can drop in to. */
+
+ block = -1;
+ for (insn = first; insn; insn = NEXT_INSN (insn))
+ {
+ /* Note that this loop must select the same block boundaries
+ as code in reg_to_stack, but that these are not the same
+ as those selected in flow.c. */
+
+ code = GET_CODE (insn);
+
+ if (code == CODE_LABEL
+ || (prev_code != INSN
+ && prev_code != CALL_INSN
+ && prev_code != CODE_LABEL
+ && GET_RTX_CLASS (code) == 'i'))
+ {
+ block_begin[++block] = insn;
+ block_end[block] = insn;
+ block_drops_in[block] = prev_code != BARRIER;
+ }
+ else if (GET_RTX_CLASS (code) == 'i')
+ block_end[block] = insn;
+
+ if (GET_RTX_CLASS (code) == 'i')
+ {
+ rtx note;
+
+ /* Make a list of all labels referred to other than by jumps. */
+ for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
+ if (REG_NOTE_KIND (note) == REG_LABEL)
+ label_value_list = gen_rtx (EXPR_LIST, VOIDmode, XEXP (note, 0),
+ label_value_list);
+ }
+
+ BLOCK_NUM (insn) = block;
+
+ if (code != NOTE)
+ prev_code = code;
+ }
+
+ if (block + 1 != blocks)
+ abort ();
+
+ /* generate all label references to the corresponding jump insn */
+ for (block = 0; block < blocks; block++)
+ {
+ insn = block_end[block];
+
+ if (GET_CODE (insn) == JUMP_INSN)
+ {
+ rtx pat = PATTERN (insn);
+ int computed_jump = 0;
+ rtx x;
+
+ if (GET_CODE (pat) == PARALLEL)
+ {
+ int len = XVECLEN (pat, 0);
+ int has_use_labelref = 0;
+ int i;
+
+ for (i = len - 1; i >= 0; i--)
+ if (GET_CODE (XVECEXP (pat, 0, i)) == USE
+ && GET_CODE (XEXP (XVECEXP (pat, 0, i), 0)) == LABEL_REF)
+ has_use_labelref = 1;
+
+ if (! has_use_labelref)
+ for (i = len - 1; i >= 0; i--)
+ if (GET_CODE (XVECEXP (pat, 0, i)) == SET
+ && SET_DEST (XVECEXP (pat, 0, i)) == pc_rtx
+ && uses_reg_or_mem (SET_SRC (XVECEXP (pat, 0, i))))
+ computed_jump = 1;
+ }
+ else if (GET_CODE (pat) == SET
+ && SET_DEST (pat) == pc_rtx
+ && uses_reg_or_mem (SET_SRC (pat)))
+ computed_jump = 1;
+
+ if (computed_jump)
+ {
+ for (x = label_value_list; x; x = XEXP (x, 1))
+ record_label_references (insn,
+ gen_rtx (LABEL_REF, VOIDmode,
+ XEXP (x, 0)));
+
+ for (x = forced_labels; x; x = XEXP (x, 1))
+ record_label_references (insn,
+ gen_rtx (LABEL_REF, VOIDmode,
+ XEXP (x, 0)));
+ }
+
+ record_label_references (insn, pat);
+ }
+ }
+}
+
+/* Return 1 if X contain a REG or MEM that is not in the constant pool. */
+
+static int
+uses_reg_or_mem (x)
+ rtx x;
+{
+ enum rtx_code code = GET_CODE (x);
+ int i, j;
+ char *fmt;
+
+ if (code == REG
+ || (code == MEM
+ && ! (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
+ && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)))))
+ return 1;
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e'
+ && uses_reg_or_mem (XEXP (x, i)))
+ return 1;
+
+ if (fmt[i] == 'E')
+ for (j = 0; j < XVECLEN (x, i); j++)
+ if (uses_reg_or_mem (XVECEXP (x, i, j)))
+ return 1;
+ }
+
+ return 0;
+}
+
+/* If current function returns its result in an fp stack register,
+ return the register number. Otherwise return -1. */
+
+static int
+stack_result_p (decl)
+ tree decl;
+{
+ rtx result = DECL_RTL (DECL_RESULT (decl));
+
+ if (result != 0
+ && !(GET_CODE (result) == REG
+ && REGNO (result) < FIRST_PSEUDO_REGISTER))
+ {
+#ifdef FUNCTION_OUTGOING_VALUE
+ result
+ = FUNCTION_OUTGOING_VALUE (TREE_TYPE (DECL_RESULT (decl)), decl);
+#else
+ result = FUNCTION_VALUE (TREE_TYPE (DECL_RESULT (decl)), decl);
+#endif
+ }
+
+ return STACK_REG_P (result) ? REGNO (result) : -1;
+}
+
+/* Determine the which registers are live at the start of each basic
+ block of the function whose first insn is FIRST.
+
+ First, if the function returns a real_type, mark the function
+ return type as live at each return point, as the RTL may not give any
+ hint that the register is live.
+
+ Then, start with the last block and work back to the first block.
+ Similarly, work backwards within each block, insn by insn, recording
+ which regs are die and which are used (and therefore live) in the
+ hard reg set of block_stack_in[].
+
+ After processing each basic block, if there is a label at the start
+ of the block, propagate the live registers to all jumps to this block.
+
+ As a special case, if there are regs live in this block, that are
+ not live in a block containing a jump to this label, and the block
+ containing the jump has already been processed, we must propagate this
+ block's entry register life back to the block containing the jump, and
+ restart life analysis from there.
+
+ In the worst case, this function may traverse the insns
+ REG_STACK_SIZE times. This is necessary, since a jump towards the end
+ of the insns may not know that a reg is live at a target that is early
+ in the insns. So we back up and start over with the new reg live.
+
+ If there are registers that are live at the start of the function,
+ insns are emitted to initialize these registers. Something similar is
+ done after CALL_INSNs in record_reg_life. */
+
+static void
+stack_reg_life_analysis (first)
+ rtx first;
+{
+ int reg, block;
+ struct stack_def regstack;
+
+ if (current_function_returns_real
+ && stack_result_p (current_function_decl) >= 0)
+ {
+ /* Find all RETURN insns and mark them. */
+
+ int value_regno = stack_result_p (current_function_decl);
+
+ for (block = blocks - 1; block >= 0; block--)
+ if (GET_CODE (block_end[block]) == JUMP_INSN
+ && GET_CODE (PATTERN (block_end[block])) == RETURN)
+ SET_HARD_REG_BIT (block_out_reg_set[block], value_regno);
+
+ /* Mark of the end of last block if we "fall off" the end of the
+ function into the epilogue. */
+
+ if (GET_CODE (block_end[blocks-1]) != JUMP_INSN
+ || GET_CODE (PATTERN (block_end[blocks-1])) == RETURN)
+ SET_HARD_REG_BIT (block_out_reg_set[blocks-1], value_regno);
+ }
+
+ /* now scan all blocks backward for stack register use */
+
+ block = blocks - 1;
+ while (block >= 0)
+ {
+ register rtx insn, prev;
+
+ /* current register status at last instruction */
+
+ COPY_HARD_REG_SET (regstack.reg_set, block_out_reg_set[block]);
+
+ prev = block_end[block];
+ do
+ {
+ insn = prev;
+ prev = PREV_INSN (insn);
+
+ /* If the insn is a CALL_INSN, we need to ensure that
+ everything dies. But otherwise don't process unless there
+ are some stack regs present. */
+
+ if (GET_MODE (insn) == QImode || GET_CODE (insn) == CALL_INSN)
+ record_reg_life (insn, block, &regstack);
+
+ } while (insn != block_begin[block]);
+
+ /* Set the state at the start of the block. Mark that no
+ register mapping information known yet. */
+
+ COPY_HARD_REG_SET (block_stack_in[block].reg_set, regstack.reg_set);
+ block_stack_in[block].top = -2;
+
+ /* If there is a label, propagate our register life to all jumps
+ to this label. */
+
+ if (GET_CODE (insn) == CODE_LABEL)
+ {
+ register rtx label;
+ int must_restart = 0;
+
+ for (label = LABEL_REFS (insn); label != insn;
+ label = LABEL_NEXTREF (label))
+ {
+ int jump_block = BLOCK_NUM (CONTAINING_INSN (label));
+
+ if (jump_block < block)
+ IOR_HARD_REG_SET (block_out_reg_set[jump_block],
+ block_stack_in[block].reg_set);
+ else
+ {
+ /* The block containing the jump has already been
+ processed. If there are registers that were not known
+ to be live then, but are live now, we must back up
+ and restart life analysis from that point with the new
+ life information. */
+
+ GO_IF_HARD_REG_SUBSET (block_stack_in[block].reg_set,
+ block_out_reg_set[jump_block],
+ win);
+
+ IOR_HARD_REG_SET (block_out_reg_set[jump_block],
+ block_stack_in[block].reg_set);
+
+ block = jump_block;
+ must_restart = 1;
+
+ win:
+ ;
+ }
+ }
+ if (must_restart)
+ continue;
+ }
+
+ if (block_drops_in[block])
+ IOR_HARD_REG_SET (block_out_reg_set[block-1],
+ block_stack_in[block].reg_set);
+
+ block -= 1;
+ }
+
+ {
+ /* If any reg is live at the start of the first block of a
+ function, then we must guarantee that the reg holds some value by
+ generating our own "load" of that register. Otherwise a 387 would
+ fault trying to access an empty register. */
+
+ HARD_REG_SET empty_regs;
+ CLEAR_HARD_REG_SET (empty_regs);
+ GO_IF_HARD_REG_SUBSET (block_stack_in[0].reg_set, empty_regs,
+ no_live_regs);
+ }
+
+ /* Load zero into each live register. The fact that a register
+ appears live at the function start does not necessarily imply an error
+ in the user program: it merely means that we could not determine that
+ there wasn't such an error, just as -Wunused sometimes gives
+ "incorrect" warnings. In those cases, these initializations will do
+ no harm.
+
+ Note that we are inserting virtual register references here:
+ these insns must be processed by convert_regs later. Also, these
+ insns will not be in block_number, so BLOCK_NUM() will fail for them. */
+
+ for (reg = LAST_STACK_REG; reg >= FIRST_STACK_REG; reg--)
+ if (TEST_HARD_REG_BIT (block_stack_in[0].reg_set, reg))
+ {
+ rtx init_rtx;
+
+ init_rtx = gen_rtx (SET, VOIDmode, FP_mode_reg[reg][(int) DFmode],
+ CONST0_RTX (DFmode));
+ block_begin[0] = emit_insn_after (init_rtx, first);
+ PUT_MODE (block_begin[0], QImode);
+
+ CLEAR_HARD_REG_BIT (block_stack_in[0].reg_set, reg);
+ }
+
+ no_live_regs:
+ ;
+}
+
+/*****************************************************************************
+ This section deals with stack register substitution, and forms the second
+ pass over the RTL.
+ *****************************************************************************/
+
+/* Replace REG, which is a pointer to a stack reg RTX, with an RTX for
+ the desired hard REGNO. */
+
+static void
+replace_reg (reg, regno)
+ rtx *reg;
+ int regno;
+{
+ if (regno < FIRST_STACK_REG || regno > LAST_STACK_REG
+ || ! STACK_REG_P (*reg))
+ abort ();
+
+ if (GET_MODE_CLASS (GET_MODE (*reg)) != MODE_FLOAT)
+ abort ();
+
+ *reg = FP_mode_reg[regno][(int) GET_MODE (*reg)];
+}
+
+/* Remove a note of type NOTE, which must be found, for register
+ number REGNO from INSN. Remove only one such note. */
+
+static void
+remove_regno_note (insn, note, regno)
+ rtx insn;
+ enum reg_note note;
+ int regno;
+{
+ register rtx *note_link, this;
+
+ note_link = &REG_NOTES(insn);
+ for (this = *note_link; this; this = XEXP (this, 1))
+ if (REG_NOTE_KIND (this) == note
+ && REG_P (XEXP (this, 0)) && REGNO (XEXP (this, 0)) == regno)
+ {
+ *note_link = XEXP (this, 1);
+ return;
+ }
+ else
+ note_link = &XEXP (this, 1);
+
+ abort ();
+}
+
+/* Find the hard register number of virtual register REG in REGSTACK.
+ The hard register number is relative to the top of the stack. -1 is
+ returned if the register is not found. */
+
+static int
+get_hard_regnum (regstack, reg)
+ stack regstack;
+ rtx reg;
+{
+ int i;
+
+ if (! STACK_REG_P (reg))
+ abort ();
+
+ for (i = regstack->top; i >= 0; i--)
+ if (regstack->reg[i] == REGNO (reg))
+ break;
+
+ return i >= 0 ? (FIRST_STACK_REG + regstack->top - i) : -1;
+}
+
+/* Delete INSN from the RTL. Mark the insn, but don't remove it from
+ the chain of insns. Doing so could confuse block_begin and block_end
+ if this were the only insn in the block. */
+
+static void
+delete_insn_for_stacker (insn)
+ rtx insn;
+{
+ PUT_CODE (insn, NOTE);
+ NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (insn) = 0;
+ INSN_DELETED_P (insn) = 1;
+}
+
+/* Emit an insn to pop virtual register REG before or after INSN.
+ REGSTACK is the stack state after INSN and is updated to reflect this
+ pop. WHEN is either emit_insn_before or emit_insn_after. A pop insn
+ is represented as a SET whose destination is the register to be popped
+ and source is the top of stack. A death note for the top of stack
+ cases the movdf pattern to pop. */
+
+static rtx
+emit_pop_insn (insn, regstack, reg, when)
+ rtx insn;
+ stack regstack;
+ rtx reg;
+ rtx (*when)();
+{
+ rtx pop_insn, pop_rtx;
+ int hard_regno;
+
+ hard_regno = get_hard_regnum (regstack, reg);
+
+ if (hard_regno < FIRST_STACK_REG)
+ abort ();
+
+ pop_rtx = gen_rtx (SET, VOIDmode, FP_mode_reg[hard_regno][(int) DFmode],
+ FP_mode_reg[FIRST_STACK_REG][(int) DFmode]);
+
+ pop_insn = (*when) (pop_rtx, insn);
+ /* ??? This used to be VOIDmode, but that seems wrong. */
+ PUT_MODE (pop_insn, QImode);
+
+ REG_NOTES (pop_insn) = gen_rtx (EXPR_LIST, REG_DEAD,
+ FP_mode_reg[FIRST_STACK_REG][(int) DFmode],
+ REG_NOTES (pop_insn));
+
+ regstack->reg[regstack->top - (hard_regno - FIRST_STACK_REG)]
+ = regstack->reg[regstack->top];
+ regstack->top -= 1;
+ CLEAR_HARD_REG_BIT (regstack->reg_set, REGNO (reg));
+
+ return pop_insn;
+}
+
+/* Emit an insn before or after INSN to swap virtual register REG with the
+ top of stack. WHEN should be `emit_insn_before' or `emit_insn_before'
+ REGSTACK is the stack state before the swap, and is updated to reflect
+ the swap. A swap insn is represented as a PARALLEL of two patterns:
+ each pattern moves one reg to the other.
+
+ If REG is already at the top of the stack, no insn is emitted. */
+
+static void
+emit_swap_insn (insn, regstack, reg)
+ rtx insn;
+ stack regstack;
+ rtx reg;
+{
+ int hard_regno;
+ rtx gen_swapdf();
+ rtx swap_rtx, swap_insn;
+ int tmp, other_reg; /* swap regno temps */
+ rtx i1; /* the stack-reg insn prior to INSN */
+ rtx i1set = NULL_RTX; /* the SET rtx within I1 */
+
+ hard_regno = get_hard_regnum (regstack, reg);
+
+ if (hard_regno < FIRST_STACK_REG)
+ abort ();
+ if (hard_regno == FIRST_STACK_REG)
+ return;
+
+ other_reg = regstack->top - (hard_regno - FIRST_STACK_REG);
+
+ tmp = regstack->reg[other_reg];
+ regstack->reg[other_reg] = regstack->reg[regstack->top];
+ regstack->reg[regstack->top] = tmp;
+
+ /* Find the previous insn involving stack regs, but don't go past
+ any labels, calls or jumps. */
+ i1 = prev_nonnote_insn (insn);
+ while (i1 && GET_CODE (i1) == INSN && GET_MODE (i1) != QImode)
+ i1 = prev_nonnote_insn (i1);
+
+ if (i1)
+ i1set = single_set (i1);
+
+ if (i1set)
+ {
+ rtx i2; /* the stack-reg insn prior to I1 */
+ rtx i1src = *get_true_reg (&SET_SRC (i1set));
+ rtx i1dest = *get_true_reg (&SET_DEST (i1set));
+
+ /* If the previous register stack push was from the reg we are to
+ swap with, omit the swap. */
+
+ if (GET_CODE (i1dest) == REG && REGNO (i1dest) == FIRST_STACK_REG
+ && GET_CODE (i1src) == REG && REGNO (i1src) == hard_regno - 1
+ && find_regno_note (i1, REG_DEAD, FIRST_STACK_REG) == NULL_RTX)
+ return;
+
+ /* If the previous insn wrote to the reg we are to swap with,
+ omit the swap. */
+
+ if (GET_CODE (i1dest) == REG && REGNO (i1dest) == hard_regno
+ && GET_CODE (i1src) == REG && REGNO (i1src) == FIRST_STACK_REG
+ && find_regno_note (i1, REG_DEAD, FIRST_STACK_REG) == NULL_RTX)
+ return;
+ }
+
+ if (GET_RTX_CLASS (GET_CODE (i1)) == 'i' && sets_cc0_p (PATTERN (i1)))
+ {
+ i1 = next_nonnote_insn (i1);
+ if (i1 == insn)
+ abort ();
+ }
+
+ swap_rtx = gen_swapdf (FP_mode_reg[hard_regno][(int) DFmode],
+ FP_mode_reg[FIRST_STACK_REG][(int) DFmode]);
+ swap_insn = emit_insn_after (swap_rtx, i1);
+ /* ??? This used to be VOIDmode, but that seems wrong. */
+ PUT_MODE (swap_insn, QImode);
+}
+
+/* Handle a move to or from a stack register in PAT, which is in INSN.
+ REGSTACK is the current stack. */
+
+static void
+move_for_stack_reg (insn, regstack, pat)
+ rtx insn;
+ stack regstack;
+ rtx pat;
+{
+ rtx *src = get_true_reg (&SET_SRC (pat));
+ rtx *dest = get_true_reg (&SET_DEST (pat));
+ rtx note;
+
+ if (STACK_REG_P (*src) && STACK_REG_P (*dest))
+ {
+ /* Write from one stack reg to another. If SRC dies here, then
+ just change the register mapping and delete the insn. */
+
+ note = find_regno_note (insn, REG_DEAD, REGNO (*src));
+ if (note)
+ {
+ int i;
+
+ /* If this is a no-op move, there must not be a REG_DEAD note. */
+ if (REGNO (*src) == REGNO (*dest))
+ abort ();
+
+ for (i = regstack->top; i >= 0; i--)
+ if (regstack->reg[i] == REGNO (*src))
+ break;
+
+ /* The source must be live, and the dest must be dead. */
+ if (i < 0 || get_hard_regnum (regstack, *dest) >= FIRST_STACK_REG)
+ abort ();
+
+ /* It is possible that the dest is unused after this insn.
+ If so, just pop the src. */
+
+ if (find_regno_note (insn, REG_UNUSED, REGNO (*dest)))
+ {
+ emit_pop_insn (insn, regstack, *src, emit_insn_after);
+
+ delete_insn_for_stacker (insn);
+ return;
+ }
+
+ regstack->reg[i] = REGNO (*dest);
+
+ SET_HARD_REG_BIT (regstack->reg_set, REGNO (*dest));
+ CLEAR_HARD_REG_BIT (regstack->reg_set, REGNO (*src));
+
+ delete_insn_for_stacker (insn);
+
+ return;
+ }
+
+ /* The source reg does not die. */
+
+ /* If this appears to be a no-op move, delete it, or else it
+ will confuse the machine description output patterns. But if
+ it is REG_UNUSED, we must pop the reg now, as per-insn processing
+ for REG_UNUSED will not work for deleted insns. */
+
+ if (REGNO (*src) == REGNO (*dest))
+ {
+ if (find_regno_note (insn, REG_UNUSED, REGNO (*dest)))
+ emit_pop_insn (insn, regstack, *dest, emit_insn_after);
+
+ delete_insn_for_stacker (insn);
+ return;
+ }
+
+ /* The destination ought to be dead */
+ if (get_hard_regnum (regstack, *dest) >= FIRST_STACK_REG)
+ abort ();
+
+ replace_reg (src, get_hard_regnum (regstack, *src));
+
+ regstack->reg[++regstack->top] = REGNO (*dest);
+ SET_HARD_REG_BIT (regstack->reg_set, REGNO (*dest));
+ replace_reg (dest, FIRST_STACK_REG);
+ }
+ else if (STACK_REG_P (*src))
+ {
+ /* Save from a stack reg to MEM, or possibly integer reg. Since
+ only top of stack may be saved, emit an exchange first if
+ needs be. */
+
+ emit_swap_insn (insn, regstack, *src);
+
+ note = find_regno_note (insn, REG_DEAD, REGNO (*src));
+ if (note)
+ {
+ replace_reg (&XEXP (note, 0), FIRST_STACK_REG);
+ regstack->top--;
+ CLEAR_HARD_REG_BIT (regstack->reg_set, REGNO (*src));
+ }
+ else if (GET_MODE (*src) == XFmode && regstack->top != REG_STACK_SIZE)
+ {
+ /* A 387 cannot write an XFmode value to a MEM without
+ clobbering the source reg. The output code can handle
+ this by reading back the value from the MEM.
+ But it is more efficient to use a temp register if one is
+ available. Push the source value here if the register
+ stack is not full, and then write the value to memory via
+ a pop. */
+ rtx push_rtx, push_insn;
+ rtx top_stack_reg = FP_mode_reg[FIRST_STACK_REG][(int) XFmode];
+
+ push_rtx = gen_movxf (top_stack_reg, top_stack_reg);
+ push_insn = emit_insn_before (push_rtx, insn);
+ PUT_MODE (push_insn, QImode);
+ REG_NOTES (insn) = gen_rtx (EXPR_LIST, REG_DEAD, top_stack_reg,
+ REG_NOTES (insn));
+ }
+
+ replace_reg (src, FIRST_STACK_REG);
+ }
+ else if (STACK_REG_P (*dest))
+ {
+ /* Load from MEM, or possibly integer REG or constant, into the
+ stack regs. The actual target is always the top of the
+ stack. The stack mapping is changed to reflect that DEST is
+ now at top of stack. */
+
+ /* The destination ought to be dead */
+ if (get_hard_regnum (regstack, *dest) >= FIRST_STACK_REG)
+ abort ();
+
+ if (regstack->top >= REG_STACK_SIZE)
+ abort ();
+
+ regstack->reg[++regstack->top] = REGNO (*dest);
+ SET_HARD_REG_BIT (regstack->reg_set, REGNO (*dest));
+ replace_reg (dest, FIRST_STACK_REG);
+ }
+ else
+ abort ();
+}
+
+void
+swap_rtx_condition (pat)
+ rtx pat;
+{
+ register char *fmt;
+ register int i;
+
+ if (GET_RTX_CLASS (GET_CODE (pat)) == '<')
+ {
+ PUT_CODE (pat, swap_condition (GET_CODE (pat)));
+ return;
+ }
+
+ fmt = GET_RTX_FORMAT (GET_CODE (pat));
+ for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'E')
+ {
+ register int j;
+
+ for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
+ swap_rtx_condition (XVECEXP (pat, i, j));
+ }
+ else if (fmt[i] == 'e')
+ swap_rtx_condition (XEXP (pat, i));
+ }
+}
+
+/* Handle a comparison. Special care needs to be taken to avoid
+ causing comparisons that a 387 cannot do correctly, such as EQ.
+
+ Also, a pop insn may need to be emitted. The 387 does have an
+ `fcompp' insn that can pop two regs, but it is sometimes too expensive
+ to do this - a `fcomp' followed by a `fstpl %st(0)' may be easier to
+ set up. */
+
+static void
+compare_for_stack_reg (insn, regstack, pat)
+ rtx insn;
+ stack regstack;
+ rtx pat;
+{
+ rtx *src1, *src2;
+ rtx src1_note, src2_note;
+
+ src1 = get_true_reg (&XEXP (SET_SRC (pat), 0));
+ src2 = get_true_reg (&XEXP (SET_SRC (pat), 1));
+
+ /* ??? If fxch turns out to be cheaper than fstp, give priority to
+ registers that die in this insn - move those to stack top first. */
+ if (! STACK_REG_P (*src1)
+ || (STACK_REG_P (*src2)
+ && get_hard_regnum (regstack, *src2) == FIRST_STACK_REG))
+ {
+ rtx temp, next;
+
+ temp = XEXP (SET_SRC (pat), 0);
+ XEXP (SET_SRC (pat), 0) = XEXP (SET_SRC (pat), 1);
+ XEXP (SET_SRC (pat), 1) = temp;
+
+ src1 = get_true_reg (&XEXP (SET_SRC (pat), 0));
+ src2 = get_true_reg (&XEXP (SET_SRC (pat), 1));
+
+ next = next_cc0_user (insn);
+ if (next == NULL_RTX)
+ abort ();
+
+ swap_rtx_condition (PATTERN (next));
+ INSN_CODE (next) = -1;
+ INSN_CODE (insn) = -1;
+ }
+
+ /* We will fix any death note later. */
+
+ src1_note = find_regno_note (insn, REG_DEAD, REGNO (*src1));
+
+ if (STACK_REG_P (*src2))
+ src2_note = find_regno_note (insn, REG_DEAD, REGNO (*src2));
+ else
+ src2_note = NULL_RTX;
+
+ emit_swap_insn (insn, regstack, *src1);
+
+ replace_reg (src1, FIRST_STACK_REG);
+
+ if (STACK_REG_P (*src2))
+ replace_reg (src2, get_hard_regnum (regstack, *src2));
+
+ if (src1_note)
+ {
+ CLEAR_HARD_REG_BIT (regstack->reg_set, REGNO (XEXP (src1_note, 0)));
+ replace_reg (&XEXP (src1_note, 0), FIRST_STACK_REG);
+ regstack->top--;
+ }
+
+ /* If the second operand dies, handle that. But if the operands are
+ the same stack register, don't bother, because only one death is
+ needed, and it was just handled. */
+
+ if (src2_note
+ && ! (STACK_REG_P (*src1) && STACK_REG_P (*src2)
+ && REGNO (*src1) == REGNO (*src2)))
+ {
+ /* As a special case, two regs may die in this insn if src2 is
+ next to top of stack and the top of stack also dies. Since
+ we have already popped src1, "next to top of stack" is really
+ at top (FIRST_STACK_REG) now. */
+
+ if (get_hard_regnum (regstack, XEXP (src2_note, 0)) == FIRST_STACK_REG
+ && src1_note)
+ {
+ CLEAR_HARD_REG_BIT (regstack->reg_set, REGNO (XEXP (src2_note, 0)));
+ replace_reg (&XEXP (src2_note, 0), FIRST_STACK_REG + 1);
+ regstack->top--;
+ }
+ else
+ {
+ /* The 386 can only represent death of the first operand in
+ the case handled above. In all other cases, emit a separate
+ pop and remove the death note from here. */
+
+ link_cc0_insns (insn);
+
+ remove_regno_note (insn, REG_DEAD, REGNO (XEXP (src2_note, 0)));
+
+ emit_pop_insn (insn, regstack, XEXP (src2_note, 0),
+ emit_insn_after);
+ }
+ }
+}
+
+/* Substitute new registers in PAT, which is part of INSN. REGSTACK
+ is the current register layout. */
+
+static void
+subst_stack_regs_pat (insn, regstack, pat)
+ rtx insn;
+ stack regstack;
+ rtx pat;
+{
+ rtx *dest, *src;
+ rtx *src1 = (rtx *) NULL_PTR, *src2;
+ rtx src1_note, src2_note;
+
+ if (GET_CODE (pat) != SET)
+ return;
+
+ dest = get_true_reg (&SET_DEST (pat));
+ src = get_true_reg (&SET_SRC (pat));
+
+ /* See if this is a `movM' pattern, and handle elsewhere if so. */
+
+ if (*dest != cc0_rtx
+ && (STACK_REG_P (*src)
+ || (STACK_REG_P (*dest)
+ && (GET_CODE (*src) == REG || GET_CODE (*src) == MEM
+ || GET_CODE (*src) == CONST_DOUBLE))))
+ move_for_stack_reg (insn, regstack, pat);
+ else
+ switch (GET_CODE (SET_SRC (pat)))
+ {
+ case COMPARE:
+ compare_for_stack_reg (insn, regstack, pat);
+ break;
+
+ case CALL:
+ regstack->reg[++regstack->top] = REGNO (*dest);
+ SET_HARD_REG_BIT (regstack->reg_set, REGNO (*dest));
+ replace_reg (dest, FIRST_STACK_REG);
+ break;
+
+ case REG:
+ /* This is a `tstM2' case. */
+ if (*dest != cc0_rtx)
+ abort ();
+
+ src1 = src;
+
+ /* Fall through. */
+
+ case FLOAT_TRUNCATE:
+ case SQRT:
+ case ABS:
+ case NEG:
+ /* These insns only operate on the top of the stack. DEST might
+ be cc0_rtx if we're processing a tstM pattern. Also, it's
+ possible that the tstM case results in a REG_DEAD note on the
+ source. */
+
+ if (src1 == 0)
+ src1 = get_true_reg (&XEXP (SET_SRC (pat), 0));
+
+ emit_swap_insn (insn, regstack, *src1);
+
+ src1_note = find_regno_note (insn, REG_DEAD, REGNO (*src1));
+
+ if (STACK_REG_P (*dest))
+ replace_reg (dest, FIRST_STACK_REG);
+
+ if (src1_note)
+ {
+ replace_reg (&XEXP (src1_note, 0), FIRST_STACK_REG);
+ regstack->top--;
+ CLEAR_HARD_REG_BIT (regstack->reg_set, REGNO (*src1));
+ }
+
+ replace_reg (src1, FIRST_STACK_REG);
+
+ break;
+
+ case MINUS:
+ case DIV:
+ /* On i386, reversed forms of subM3 and divM3 exist for
+ MODE_FLOAT, so the same code that works for addM3 and mulM3
+ can be used. */
+ case MULT:
+ case PLUS:
+ /* These insns can accept the top of stack as a destination
+ from a stack reg or mem, or can use the top of stack as a
+ source and some other stack register (possibly top of stack)
+ as a destination. */
+
+ src1 = get_true_reg (&XEXP (SET_SRC (pat), 0));
+ src2 = get_true_reg (&XEXP (SET_SRC (pat), 1));
+
+ /* We will fix any death note later. */
+
+ if (STACK_REG_P (*src1))
+ src1_note = find_regno_note (insn, REG_DEAD, REGNO (*src1));
+ else
+ src1_note = NULL_RTX;
+ if (STACK_REG_P (*src2))
+ src2_note = find_regno_note (insn, REG_DEAD, REGNO (*src2));
+ else
+ src2_note = NULL_RTX;
+
+ /* If either operand is not a stack register, then the dest
+ must be top of stack. */
+
+ if (! STACK_REG_P (*src1) || ! STACK_REG_P (*src2))
+ emit_swap_insn (insn, regstack, *dest);
+ else
+ {
+ /* Both operands are REG. If neither operand is already
+ at the top of stack, choose to make the one that is the dest
+ the new top of stack. */
+
+ int src1_hard_regnum, src2_hard_regnum;
+
+ src1_hard_regnum = get_hard_regnum (regstack, *src1);
+ src2_hard_regnum = get_hard_regnum (regstack, *src2);
+ if (src1_hard_regnum == -1 || src2_hard_regnum == -1)
+ abort ();
+
+ if (src1_hard_regnum != FIRST_STACK_REG
+ && src2_hard_regnum != FIRST_STACK_REG)
+ emit_swap_insn (insn, regstack, *dest);
+ }
+
+ if (STACK_REG_P (*src1))
+ replace_reg (src1, get_hard_regnum (regstack, *src1));
+ if (STACK_REG_P (*src2))
+ replace_reg (src2, get_hard_regnum (regstack, *src2));
+
+ if (src1_note)
+ {
+ /* If the register that dies is at the top of stack, then
+ the destination is somewhere else - merely substitute it.
+ But if the reg that dies is not at top of stack, then
+ move the top of stack to the dead reg, as though we had
+ done the insn and then a store-with-pop. */
+
+ if (REGNO (XEXP (src1_note, 0)) == regstack->reg[regstack->top])
+ {
+ SET_HARD_REG_BIT (regstack->reg_set, REGNO (*dest));
+ replace_reg (dest, get_hard_regnum (regstack, *dest));
+ }
+ else
+ {
+ int regno = get_hard_regnum (regstack, XEXP (src1_note, 0));
+
+ SET_HARD_REG_BIT (regstack->reg_set, REGNO (*dest));
+ replace_reg (dest, regno);
+
+ regstack->reg[regstack->top - (regno - FIRST_STACK_REG)]
+ = regstack->reg[regstack->top];
+ }
+
+ CLEAR_HARD_REG_BIT (regstack->reg_set,
+ REGNO (XEXP (src1_note, 0)));
+ replace_reg (&XEXP (src1_note, 0), FIRST_STACK_REG);
+ regstack->top--;
+ }
+ else if (src2_note)
+ {
+ if (REGNO (XEXP (src2_note, 0)) == regstack->reg[regstack->top])
+ {
+ SET_HARD_REG_BIT (regstack->reg_set, REGNO (*dest));
+ replace_reg (dest, get_hard_regnum (regstack, *dest));
+ }
+ else
+ {
+ int regno = get_hard_regnum (regstack, XEXP (src2_note, 0));
+
+ SET_HARD_REG_BIT (regstack->reg_set, REGNO (*dest));
+ replace_reg (dest, regno);
+
+ regstack->reg[regstack->top - (regno - FIRST_STACK_REG)]
+ = regstack->reg[regstack->top];
+ }
+
+ CLEAR_HARD_REG_BIT (regstack->reg_set,
+ REGNO (XEXP (src2_note, 0)));
+ replace_reg (&XEXP (src2_note, 0), FIRST_STACK_REG);
+ regstack->top--;
+ }
+ else
+ {
+ SET_HARD_REG_BIT (regstack->reg_set, REGNO (*dest));
+ replace_reg (dest, get_hard_regnum (regstack, *dest));
+ }
+
+ break;
+
+ case UNSPEC:
+ switch (XINT (SET_SRC (pat), 1))
+ {
+ case 1: /* sin */
+ case 2: /* cos */
+ /* These insns only operate on the top of the stack. */
+
+ src1 = get_true_reg (&XVECEXP (SET_SRC (pat), 0, 0));
+
+ emit_swap_insn (insn, regstack, *src1);
+
+ src1_note = find_regno_note (insn, REG_DEAD, REGNO (*src1));
+
+ if (STACK_REG_P (*dest))
+ replace_reg (dest, FIRST_STACK_REG);
+
+ if (src1_note)
+ {
+ replace_reg (&XEXP (src1_note, 0), FIRST_STACK_REG);
+ regstack->top--;
+ CLEAR_HARD_REG_BIT (regstack->reg_set, REGNO (*src1));
+ }
+
+ replace_reg (src1, FIRST_STACK_REG);
+
+ break;
+
+ default:
+ abort ();
+ }
+ break;
+
+ default:
+ abort ();
+ }
+}
+
+/* Substitute hard regnums for any stack regs in INSN, which has
+ N_INPUTS inputs and N_OUTPUTS outputs. REGSTACK is the stack info
+ before the insn, and is updated with changes made here. CONSTRAINTS is
+ an array of the constraint strings used in the asm statement.
+
+ OPERANDS is an array of the operands, and OPERANDS_LOC is a
+ parallel array of where the operands were found. The output operands
+ all precede the input operands.
+
+ There are several requirements and assumptions about the use of
+ stack-like regs in asm statements. These rules are enforced by
+ record_asm_stack_regs; see comments there for details. Any
+ asm_operands left in the RTL at this point may be assume to meet the
+ requirements, since record_asm_stack_regs removes any problem asm. */
+
+static void
+subst_asm_stack_regs (insn, regstack, operands, operands_loc, constraints,
+ n_inputs, n_outputs)
+ rtx insn;
+ stack regstack;
+ rtx *operands, **operands_loc;
+ char **constraints;
+ int n_inputs, n_outputs;
+{
+ int n_operands = n_inputs + n_outputs;
+ int first_input = n_outputs;
+ rtx body = PATTERN (insn);
+
+ int *operand_matches = (int *) alloca (n_operands * sizeof (int *));
+ enum reg_class *operand_class
+ = (enum reg_class *) alloca (n_operands * sizeof (enum reg_class *));
+
+ rtx *note_reg; /* Array of note contents */
+ rtx **note_loc; /* Address of REG field of each note */
+ enum reg_note *note_kind; /* The type of each note */
+
+ rtx *clobber_reg;
+ rtx **clobber_loc;
+
+ struct stack_def temp_stack;
+ int n_notes;
+ int n_clobbers;
+ rtx note;
+ int i;
+
+ /* Find out what the constraints required. If no constraint
+ alternative matches, that is a compiler bug: we should have caught
+ such an insn during the life analysis pass (and reload should have
+ caught it regardless). */
+
+ i = constrain_asm_operands (n_operands, operands, constraints,
+ operand_matches, operand_class);
+ if (i < 0)
+ abort ();
+
+ /* Strip SUBREGs here to make the following code simpler. */
+ for (i = 0; i < n_operands; i++)
+ if (GET_CODE (operands[i]) == SUBREG
+ && GET_CODE (SUBREG_REG (operands[i])) == REG)
+ {
+ operands_loc[i] = & SUBREG_REG (operands[i]);
+ operands[i] = SUBREG_REG (operands[i]);
+ }
+
+ /* Set up NOTE_REG, NOTE_LOC and NOTE_KIND. */
+
+ for (i = 0, note = REG_NOTES (insn); note; note = XEXP (note, 1))
+ i++;
+
+ note_reg = (rtx *) alloca (i * sizeof (rtx));
+ note_loc = (rtx **) alloca (i * sizeof (rtx *));
+ note_kind = (enum reg_note *) alloca (i * sizeof (enum reg_note));
+
+ n_notes = 0;
+ for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
+ {
+ rtx reg = XEXP (note, 0);
+ rtx *loc = & XEXP (note, 0);
+
+ if (GET_CODE (reg) == SUBREG && GET_CODE (SUBREG_REG (reg)) == REG)
+ {
+ loc = & SUBREG_REG (reg);
+ reg = SUBREG_REG (reg);
+ }
+
+ if (STACK_REG_P (reg)
+ && (REG_NOTE_KIND (note) == REG_DEAD
+ || REG_NOTE_KIND (note) == REG_UNUSED))
+ {
+ note_reg[n_notes] = reg;
+ note_loc[n_notes] = loc;
+ note_kind[n_notes] = REG_NOTE_KIND (note);
+ n_notes++;
+ }
+ }
+
+ /* Set up CLOBBER_REG and CLOBBER_LOC. */
+
+ n_clobbers = 0;
+
+ if (GET_CODE (body) == PARALLEL)
+ {
+ clobber_reg = (rtx *) alloca (XVECLEN (body, 0) * sizeof (rtx *));
+ clobber_loc = (rtx **) alloca (XVECLEN (body, 0) * sizeof (rtx **));
+
+ for (i = 0; i < XVECLEN (body, 0); i++)
+ if (GET_CODE (XVECEXP (body, 0, i)) == CLOBBER)
+ {
+ rtx clobber = XVECEXP (body, 0, i);
+ rtx reg = XEXP (clobber, 0);
+ rtx *loc = & XEXP (clobber, 0);
+
+ if (GET_CODE (reg) == SUBREG && GET_CODE (SUBREG_REG (reg)) == REG)
+ {
+ loc = & SUBREG_REG (reg);
+ reg = SUBREG_REG (reg);
+ }
+
+ if (STACK_REG_P (reg))
+ {
+ clobber_reg[n_clobbers] = reg;
+ clobber_loc[n_clobbers] = loc;
+ n_clobbers++;
+ }
+ }
+ }
+
+ bcopy (regstack, &temp_stack, sizeof (temp_stack));
+
+ /* Put the input regs into the desired place in TEMP_STACK. */
+
+ for (i = first_input; i < first_input + n_inputs; i++)
+ if (STACK_REG_P (operands[i])
+ && reg_class_subset_p (operand_class[i], FLOAT_REGS)
+ && operand_class[i] != FLOAT_REGS)
+ {
+ /* If an operand needs to be in a particular reg in
+ FLOAT_REGS, the constraint was either 't' or 'u'. Since
+ these constraints are for single register classes, and reload
+ guaranteed that operand[i] is already in that class, we can
+ just use REGNO (operands[i]) to know which actual reg this
+ operand needs to be in. */
+
+ int regno = get_hard_regnum (&temp_stack, operands[i]);
+
+ if (regno < 0)
+ abort ();
+
+ if (regno != REGNO (operands[i]))
+ {
+ /* operands[i] is not in the right place. Find it
+ and swap it with whatever is already in I's place.
+ K is where operands[i] is now. J is where it should
+ be. */
+ int j, k, temp;
+
+ k = temp_stack.top - (regno - FIRST_STACK_REG);
+ j = (temp_stack.top
+ - (REGNO (operands[i]) - FIRST_STACK_REG));
+
+ temp = temp_stack.reg[k];
+ temp_stack.reg[k] = temp_stack.reg[j];
+ temp_stack.reg[j] = temp;
+ }
+ }
+
+ /* emit insns before INSN to make sure the reg-stack is in the right
+ order. */
+
+ change_stack (insn, regstack, &temp_stack, emit_insn_before);
+
+ /* Make the needed input register substitutions. Do death notes and
+ clobbers too, because these are for inputs, not outputs. */
+
+ for (i = first_input; i < first_input + n_inputs; i++)
+ if (STACK_REG_P (operands[i]))
+ {
+ int regnum = get_hard_regnum (regstack, operands[i]);
+
+ if (regnum < 0)
+ abort ();
+
+ replace_reg (operands_loc[i], regnum);
+ }
+
+ for (i = 0; i < n_notes; i++)
+ if (note_kind[i] == REG_DEAD)
+ {
+ int regnum = get_hard_regnum (regstack, note_reg[i]);
+
+ if (regnum < 0)
+ abort ();
+
+ replace_reg (note_loc[i], regnum);
+ }
+
+ for (i = 0; i < n_clobbers; i++)
+ {
+ /* It's OK for a CLOBBER to reference a reg that is not live.
+ Don't try to replace it in that case. */
+ int regnum = get_hard_regnum (regstack, clobber_reg[i]);
+
+ if (regnum >= 0)
+ {
+ /* Sigh - clobbers always have QImode. But replace_reg knows
+ that these regs can't be MODE_INT and will abort. Just put
+ the right reg there without calling replace_reg. */
+
+ *clobber_loc[i] = FP_mode_reg[regnum][(int) DFmode];
+ }
+ }
+
+ /* Now remove from REGSTACK any inputs that the asm implicitly popped. */
+
+ for (i = first_input; i < first_input + n_inputs; i++)
+ if (STACK_REG_P (operands[i]))
+ {
+ /* An input reg is implicitly popped if it is tied to an
+ output, or if there is a CLOBBER for it. */
+ int j;
+
+ for (j = 0; j < n_clobbers; j++)
+ if (operands_match_p (clobber_reg[j], operands[i]))
+ break;
+
+ if (j < n_clobbers || operand_matches[i] >= 0)
+ {
+ /* operands[i] might not be at the top of stack. But that's OK,
+ because all we need to do is pop the right number of regs
+ off of the top of the reg-stack. record_asm_stack_regs
+ guaranteed that all implicitly popped regs were grouped
+ at the top of the reg-stack. */
+
+ CLEAR_HARD_REG_BIT (regstack->reg_set,
+ regstack->reg[regstack->top]);
+ regstack->top--;
+ }
+ }
+
+ /* Now add to REGSTACK any outputs that the asm implicitly pushed.
+ Note that there isn't any need to substitute register numbers.
+ ??? Explain why this is true. */
+
+ for (i = LAST_STACK_REG; i >= FIRST_STACK_REG; i--)
+ {
+ /* See if there is an output for this hard reg. */
+ int j;
+
+ for (j = 0; j < n_outputs; j++)
+ if (STACK_REG_P (operands[j]) && REGNO (operands[j]) == i)
+ {
+ regstack->reg[++regstack->top] = i;
+ SET_HARD_REG_BIT (regstack->reg_set, i);
+ break;
+ }
+ }
+
+ /* Now emit a pop insn for any REG_UNUSED output, or any REG_DEAD
+ input that the asm didn't implicitly pop. If the asm didn't
+ implicitly pop an input reg, that reg will still be live.
+
+ Note that we can't use find_regno_note here: the register numbers
+ in the death notes have already been substituted. */
+
+ for (i = 0; i < n_outputs; i++)
+ if (STACK_REG_P (operands[i]))
+ {
+ int j;
+
+ for (j = 0; j < n_notes; j++)
+ if (REGNO (operands[i]) == REGNO (note_reg[j])
+ && note_kind[j] == REG_UNUSED)
+ {
+ insn = emit_pop_insn (insn, regstack, operands[i],
+ emit_insn_after);
+ break;
+ }
+ }
+
+ for (i = first_input; i < first_input + n_inputs; i++)
+ if (STACK_REG_P (operands[i]))
+ {
+ int j;
+
+ for (j = 0; j < n_notes; j++)
+ if (REGNO (operands[i]) == REGNO (note_reg[j])
+ && note_kind[j] == REG_DEAD
+ && TEST_HARD_REG_BIT (regstack->reg_set, REGNO (operands[i])))
+ {
+ insn = emit_pop_insn (insn, regstack, operands[i],
+ emit_insn_after);
+ break;
+ }
+ }
+}
+
+/* Substitute stack hard reg numbers for stack virtual registers in
+ INSN. Non-stack register numbers are not changed. REGSTACK is the
+ current stack content. Insns may be emitted as needed to arrange the
+ stack for the 387 based on the contents of the insn. */
+
+static void
+subst_stack_regs (insn, regstack)
+ rtx insn;
+ stack regstack;
+{
+ register rtx *note_link, note;
+ register int i;
+ int n_operands;
+
+ if ((GET_CODE (insn) != INSN && GET_CODE (insn) != CALL_INSN)
+ || INSN_DELETED_P (insn))
+ return;
+
+ /* The stack should be empty at a call. */
+
+ if (GET_CODE (insn) == CALL_INSN)
+ for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
+ if (TEST_HARD_REG_BIT (regstack->reg_set, i))
+ abort ();
+
+ /* Do the actual substitution if any stack regs are mentioned.
+ Since we only record whether entire insn mentions stack regs, and
+ subst_stack_regs_pat only works for patterns that contain stack regs,
+ we must check each pattern in a parallel here. A call_value_pop could
+ fail otherwise. */
+
+ if (GET_MODE (insn) == QImode)
+ {
+ n_operands = asm_noperands (PATTERN (insn));
+ if (n_operands >= 0)
+ {
+ /* This insn is an `asm' with operands. Decode the operands,
+ decide how many are inputs, and do register substitution.
+ Any REG_UNUSED notes will be handled by subst_asm_stack_regs. */
+
+ rtx operands[MAX_RECOG_OPERANDS];
+ rtx *operands_loc[MAX_RECOG_OPERANDS];
+ rtx body = PATTERN (insn);
+ int n_inputs, n_outputs;
+ char **constraints
+ = (char **) alloca (n_operands * sizeof (char *));
+
+ decode_asm_operands (body, operands, operands_loc,
+ constraints, NULL_PTR);
+ get_asm_operand_lengths (body, n_operands, &n_inputs, &n_outputs);
+ subst_asm_stack_regs (insn, regstack, operands, operands_loc,
+ constraints, n_inputs, n_outputs);
+ return;
+ }
+
+ if (GET_CODE (PATTERN (insn)) == PARALLEL)
+ for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
+ {
+ if (stack_regs_mentioned_p (XVECEXP (PATTERN (insn), 0, i)))
+ subst_stack_regs_pat (insn, regstack,
+ XVECEXP (PATTERN (insn), 0, i));
+ }
+ else
+ subst_stack_regs_pat (insn, regstack, PATTERN (insn));
+ }
+
+ /* subst_stack_regs_pat may have deleted a no-op insn. If so, any
+ REG_UNUSED will already have been dealt with, so just return. */
+
+ if (INSN_DELETED_P (insn))
+ return;
+
+ /* If there is a REG_UNUSED note on a stack register on this insn,
+ the indicated reg must be popped. The REG_UNUSED note is removed,
+ since the form of the newly emitted pop insn references the reg,
+ making it no longer `unset'. */
+
+ note_link = &REG_NOTES(insn);
+ for (note = *note_link; note; note = XEXP (note, 1))
+ if (REG_NOTE_KIND (note) == REG_UNUSED && STACK_REG_P (XEXP (note, 0)))
+ {
+ *note_link = XEXP (note, 1);
+ insn = emit_pop_insn (insn, regstack, XEXP (note, 0), emit_insn_after);
+ }
+ else
+ note_link = &XEXP (note, 1);
+}
+
+/* Change the organization of the stack so that it fits a new basic
+ block. Some registers might have to be popped, but there can never be
+ a register live in the new block that is not now live.
+
+ Insert any needed insns before or after INSN. WHEN is emit_insn_before
+ or emit_insn_after. OLD is the original stack layout, and NEW is
+ the desired form. OLD is updated to reflect the code emitted, ie, it
+ will be the same as NEW upon return.
+
+ This function will not preserve block_end[]. But that information
+ is no longer needed once this has executed. */
+
+static void
+change_stack (insn, old, new, when)
+ rtx insn;
+ stack old;
+ stack new;
+ rtx (*when)();
+{
+ int reg;
+
+ /* We will be inserting new insns "backwards", by calling emit_insn_before.
+ If we are to insert after INSN, find the next insn, and insert before
+ it. */
+
+ if (when == emit_insn_after)
+ insn = NEXT_INSN (insn);
+
+ /* Pop any registers that are not needed in the new block. */
+
+ for (reg = old->top; reg >= 0; reg--)
+ if (! TEST_HARD_REG_BIT (new->reg_set, old->reg[reg]))
+ emit_pop_insn (insn, old, FP_mode_reg[old->reg[reg]][(int) DFmode],
+ emit_insn_before);
+
+ if (new->top == -2)
+ {
+ /* If the new block has never been processed, then it can inherit
+ the old stack order. */
+
+ new->top = old->top;
+ bcopy (old->reg, new->reg, sizeof (new->reg));
+ }
+ else
+ {
+ /* This block has been entered before, and we must match the
+ previously selected stack order. */
+
+ /* By now, the only difference should be the order of the stack,
+ not their depth or liveliness. */
+
+ GO_IF_HARD_REG_EQUAL (old->reg_set, new->reg_set, win);
+
+ abort ();
+
+ win:
+
+ if (old->top != new->top)
+ abort ();
+
+ /* Loop here emitting swaps until the stack is correct. The
+ worst case number of swaps emitted is N + 2, where N is the
+ depth of the stack. In some cases, the reg at the top of
+ stack may be correct, but swapped anyway in order to fix
+ other regs. But since we never swap any other reg away from
+ its correct slot, this algorithm will converge. */
+
+ do
+ {
+ /* Swap the reg at top of stack into the position it is
+ supposed to be in, until the correct top of stack appears. */
+
+ while (old->reg[old->top] != new->reg[new->top])
+ {
+ for (reg = new->top; reg >= 0; reg--)
+ if (new->reg[reg] == old->reg[old->top])
+ break;
+
+ if (reg == -1)
+ abort ();
+
+ emit_swap_insn (insn, old,
+ FP_mode_reg[old->reg[reg]][(int) DFmode]);
+ }
+
+ /* See if any regs remain incorrect. If so, bring an
+ incorrect reg to the top of stack, and let the while loop
+ above fix it. */
+
+ for (reg = new->top; reg >= 0; reg--)
+ if (new->reg[reg] != old->reg[reg])
+ {
+ emit_swap_insn (insn, old,
+ FP_mode_reg[old->reg[reg]][(int) DFmode]);
+ break;
+ }
+ } while (reg >= 0);
+
+ /* At this point there must be no differences. */
+
+ for (reg = old->top; reg >= 0; reg--)
+ if (old->reg[reg] != new->reg[reg])
+ abort ();
+ }
+}
+
+/* Check PAT, which points to RTL in INSN, for a LABEL_REF. If it is
+ found, ensure that a jump from INSN to the code_label to which the
+ label_ref points ends up with the same stack as that at the
+ code_label. Do this by inserting insns just before the code_label to
+ pop and rotate the stack until it is in the correct order. REGSTACK
+ is the order of the register stack in INSN.
+
+ Any code that is emitted here must not be later processed as part
+ of any block, as it will already contain hard register numbers. */
+
+static void
+goto_block_pat (insn, regstack, pat)
+ rtx insn;
+ stack regstack;
+ rtx pat;
+{
+ rtx label;
+ rtx new_jump, new_label, new_barrier;
+ rtx *ref;
+ stack label_stack;
+ struct stack_def temp_stack;
+ int reg;
+
+ if (GET_CODE (pat) != LABEL_REF)
+ {
+ int i, j;
+ char *fmt = GET_RTX_FORMAT (GET_CODE (pat));
+
+ for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ goto_block_pat (insn, regstack, XEXP (pat, i));
+ if (fmt[i] == 'E')
+ for (j = 0; j < XVECLEN (pat, i); j++)
+ goto_block_pat (insn, regstack, XVECEXP (pat, i, j));
+ }
+ return;
+ }
+
+ label = XEXP (pat, 0);
+ if (GET_CODE (label) != CODE_LABEL)
+ abort ();
+
+ /* First, see if in fact anything needs to be done to the stack at all. */
+ if (INSN_UID (label) <= 0)
+ return;
+
+ label_stack = &block_stack_in[BLOCK_NUM (label)];
+
+ if (label_stack->top == -2)
+ {
+ /* If the target block hasn't had a stack order selected, then
+ we need merely ensure that no pops are needed. */
+
+ for (reg = regstack->top; reg >= 0; reg--)
+ if (! TEST_HARD_REG_BIT (label_stack->reg_set, regstack->reg[reg]))
+ break;
+
+ if (reg == -1)
+ {
+ /* change_stack will not emit any code in this case. */
+
+ change_stack (label, regstack, label_stack, emit_insn_after);
+ return;
+ }
+ }
+ else if (label_stack->top == regstack->top)
+ {
+ for (reg = label_stack->top; reg >= 0; reg--)
+ if (label_stack->reg[reg] != regstack->reg[reg])
+ break;
+
+ if (reg == -1)
+ return;
+ }
+
+ /* At least one insn will need to be inserted before label. Insert
+ a jump around the code we are about to emit. Emit a label for the new
+ code, and point the original insn at this new label. We can't use
+ redirect_jump here, because we're using fld[4] of the code labels as
+ LABEL_REF chains, no NUSES counters. */
+
+ new_jump = emit_jump_insn_before (gen_jump (label), label);
+ record_label_references (new_jump, PATTERN (new_jump));
+ JUMP_LABEL (new_jump) = label;
+
+ new_barrier = emit_barrier_after (new_jump);
+
+ new_label = gen_label_rtx ();
+ emit_label_after (new_label, new_barrier);
+ LABEL_REFS (new_label) = new_label;
+
+ /* The old label_ref will no longer point to the code_label if now uses,
+ so strip the label_ref from the code_label's chain of references. */
+
+ for (ref = &LABEL_REFS (label); *ref != label; ref = &LABEL_NEXTREF (*ref))
+ if (*ref == pat)
+ break;
+
+ if (*ref == label)
+ abort ();
+
+ *ref = LABEL_NEXTREF (*ref);
+
+ XEXP (pat, 0) = new_label;
+ record_label_references (insn, PATTERN (insn));
+
+ if (JUMP_LABEL (insn) == label)
+ JUMP_LABEL (insn) = new_label;
+
+ /* Now emit the needed code. */
+
+ temp_stack = *regstack;
+
+ change_stack (new_label, &temp_stack, label_stack, emit_insn_after);
+}
+
+/* Traverse all basic blocks in a function, converting the register
+ references in each insn from the "flat" register file that gcc uses, to
+ the stack-like registers the 387 uses. */
+
+static void
+convert_regs ()
+{
+ register int block, reg;
+ register rtx insn, next;
+ struct stack_def regstack;
+
+ for (block = 0; block < blocks; block++)
+ {
+ if (block_stack_in[block].top == -2)
+ {
+ /* This block has not been previously encountered. Choose a
+ default mapping for any stack regs live on entry */
+
+ block_stack_in[block].top = -1;
+
+ for (reg = LAST_STACK_REG; reg >= FIRST_STACK_REG; reg--)
+ if (TEST_HARD_REG_BIT (block_stack_in[block].reg_set, reg))
+ block_stack_in[block].reg[++block_stack_in[block].top] = reg;
+ }
+
+ /* Process all insns in this block. Keep track of `next' here,
+ so that we don't process any insns emitted while making
+ substitutions in INSN. */
+
+ next = block_begin[block];
+ regstack = block_stack_in[block];
+ do
+ {
+ insn = next;
+ next = NEXT_INSN (insn);
+
+ /* Don't bother processing unless there is a stack reg
+ mentioned.
+
+ ??? For now, process CALL_INSNs too to make sure that the
+ stack regs are dead after a call. Remove this eventually. */
+
+ if (GET_MODE (insn) == QImode || GET_CODE (insn) == CALL_INSN)
+ subst_stack_regs (insn, &regstack);
+
+ } while (insn != block_end[block]);
+
+ /* Something failed if the stack life doesn't match. */
+
+ GO_IF_HARD_REG_EQUAL (regstack.reg_set, block_out_reg_set[block], win);
+
+ abort ();
+
+ win:
+
+ /* Adjust the stack of this block on exit to match the stack of
+ the target block, or copy stack information into stack of
+ jump target if the target block's stack order hasn't been set
+ yet. */
+
+ if (GET_CODE (insn) == JUMP_INSN)
+ goto_block_pat (insn, &regstack, PATTERN (insn));
+
+ /* Likewise handle the case where we fall into the next block. */
+
+ if ((block < blocks - 1) && block_drops_in[block+1])
+ change_stack (insn, &regstack, &block_stack_in[block+1],
+ emit_insn_after);
+ }
+
+ /* If the last basic block is the end of a loop, and that loop has
+ regs live at its start, then the last basic block will have regs live
+ at its end that need to be popped before the function returns. */
+
+ for (reg = regstack.top; reg >= 0; reg--)
+ if (! current_function_returns_real
+ || regstack.reg[reg] != FIRST_STACK_REG)
+ insn = emit_pop_insn (insn, &regstack,
+ FP_mode_reg[regstack.reg[reg]][(int) DFmode],
+ emit_insn_after);
+}
+
+/* Check expression PAT, which is in INSN, for label references. if
+ one is found, print the block number of destination to FILE. */
+
+static void
+print_blocks (file, insn, pat)
+ FILE *file;
+ rtx insn, pat;
+{
+ register RTX_CODE code = GET_CODE (pat);
+ register int i;
+ register char *fmt;
+
+ if (code == LABEL_REF)
+ {
+ register rtx label = XEXP (pat, 0);
+
+ if (GET_CODE (label) != CODE_LABEL)
+ abort ();
+
+ fprintf (file, " %d", BLOCK_NUM (label));
+
+ return;
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ print_blocks (file, insn, XEXP (pat, i));
+ if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = 0; j < XVECLEN (pat, i); j++)
+ print_blocks (file, insn, XVECEXP (pat, i, j));
+ }
+ }
+}
+
+/* Write information about stack registers and stack blocks into FILE.
+ This is part of making a debugging dump. */
+static void
+dump_stack_info (file)
+ FILE *file;
+{
+ register int block;
+
+ fprintf (file, "\n%d stack blocks.\n", blocks);
+ for (block = 0; block < blocks; block++)
+ {
+ register rtx head, jump, end;
+ register int regno;
+
+ fprintf (file, "\nStack block %d: first insn %d, last %d.\n",
+ block, INSN_UID (block_begin[block]),
+ INSN_UID (block_end[block]));
+
+ head = block_begin[block];
+
+ fprintf (file, "Reached from blocks: ");
+ if (GET_CODE (head) == CODE_LABEL)
+ for (jump = LABEL_REFS (head);
+ jump != head;
+ jump = LABEL_NEXTREF (jump))
+ {
+ register int from_block = BLOCK_NUM (CONTAINING_INSN (jump));
+ fprintf (file, " %d", from_block);
+ }
+ if (block_drops_in[block])
+ fprintf (file, " previous");
+
+ fprintf (file, "\nlive stack registers on block entry: ");
+ for (regno = FIRST_STACK_REG; regno <= LAST_STACK_REG ; regno++)
+ {
+ if (TEST_HARD_REG_BIT (block_stack_in[block].reg_set, regno))
+ fprintf (file, "%d ", regno);
+ }
+
+ fprintf (file, "\nlive stack registers on block exit: ");
+ for (regno = FIRST_STACK_REG; regno <= LAST_STACK_REG ; regno++)
+ {
+ if (TEST_HARD_REG_BIT (block_out_reg_set[block], regno))
+ fprintf (file, "%d ", regno);
+ }
+
+ end = block_end[block];
+
+ fprintf (file, "\nJumps to blocks: ");
+ if (GET_CODE (end) == JUMP_INSN)
+ print_blocks (file, end, PATTERN (end));
+
+ if (block + 1 < blocks && block_drops_in[block+1])
+ fprintf (file, " next");
+ else if (block + 1 == blocks
+ || (GET_CODE (end) == JUMP_INSN
+ && GET_CODE (PATTERN (end)) == RETURN))
+ fprintf (file, " return");
+
+ fprintf (file, "\n");
+ }
+}
+#endif /* STACK_REGS */
diff --git a/gnu/usr.bin/cc/cc_int/regclass.c b/gnu/usr.bin/cc/cc_int/regclass.c
new file mode 100644
index 0000000..d4636d5
--- /dev/null
+++ b/gnu/usr.bin/cc/cc_int/regclass.c
@@ -0,0 +1,1856 @@
+/* Compute register class preferences for pseudo-registers.
+ Copyright (C) 1987, 88, 91, 92, 93, 1994 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+/* This file contains two passes of the compiler: reg_scan and reg_class.
+ It also defines some tables of information about the hardware registers
+ and a function init_reg_sets to initialize the tables. */
+
+#include "config.h"
+#include "rtl.h"
+#include "hard-reg-set.h"
+#include "flags.h"
+#include "basic-block.h"
+#include "regs.h"
+#include "insn-config.h"
+#include "recog.h"
+#include "reload.h"
+#include "real.h"
+#include "bytecode.h"
+
+#ifndef REGISTER_MOVE_COST
+#define REGISTER_MOVE_COST(x, y) 2
+#endif
+
+#ifndef MEMORY_MOVE_COST
+#define MEMORY_MOVE_COST(x) 4
+#endif
+
+/* If we have auto-increment or auto-decrement and we can have secondary
+ reloads, we are not allowed to use classes requiring secondary
+ reloads for psuedos auto-incremented since reload can't handle it. */
+
+#ifdef AUTO_INC_DEC
+#if defined(SECONDARY_INPUT_RELOAD_CLASS) || defined(SECONDARY_OUTPUT_RELOAD_CLASS)
+#define FORBIDDEN_INC_DEC_CLASSES
+#endif
+#endif
+
+/* Register tables used by many passes. */
+
+/* Indexed by hard register number, contains 1 for registers
+ that are fixed use (stack pointer, pc, frame pointer, etc.).
+ These are the registers that cannot be used to allocate
+ a pseudo reg whose life does not cross calls. */
+
+char fixed_regs[FIRST_PSEUDO_REGISTER];
+
+/* Same info as a HARD_REG_SET. */
+
+HARD_REG_SET fixed_reg_set;
+
+/* Data for initializing the above. */
+
+static char initial_fixed_regs[] = FIXED_REGISTERS;
+
+/* Indexed by hard register number, contains 1 for registers
+ that are fixed use or are clobbered by function calls.
+ These are the registers that cannot be used to allocate
+ a pseudo reg whose life crosses calls. */
+
+char call_used_regs[FIRST_PSEUDO_REGISTER];
+
+/* Same info as a HARD_REG_SET. */
+
+HARD_REG_SET call_used_reg_set;
+
+/* Data for initializing the above. */
+
+static char initial_call_used_regs[] = CALL_USED_REGISTERS;
+
+/* Indexed by hard register number, contains 1 for registers that are
+ fixed use -- i.e. in fixed_regs -- or a function value return register
+ or STRUCT_VALUE_REGNUM or STATIC_CHAIN_REGNUM. These are the
+ registers that cannot hold quantities across calls even if we are
+ willing to save and restore them. */
+
+char call_fixed_regs[FIRST_PSEUDO_REGISTER];
+
+/* The same info as a HARD_REG_SET. */
+
+HARD_REG_SET call_fixed_reg_set;
+
+/* Number of non-fixed registers. */
+
+int n_non_fixed_regs;
+
+/* Indexed by hard register number, contains 1 for registers
+ that are being used for global register decls.
+ These must be exempt from ordinary flow analysis
+ and are also considered fixed. */
+
+char global_regs[FIRST_PSEUDO_REGISTER];
+
+/* Table of register numbers in the order in which to try to use them. */
+#ifdef REG_ALLOC_ORDER
+int reg_alloc_order[FIRST_PSEUDO_REGISTER] = REG_ALLOC_ORDER;
+#endif
+
+/* For each reg class, a HARD_REG_SET saying which registers are in it. */
+
+HARD_REG_SET reg_class_contents[N_REG_CLASSES];
+
+/* The same information, but as an array of unsigned ints. We copy from
+ these unsigned ints to the table above. We do this so the tm.h files
+ do not have to be aware of the wordsize for machines with <= 64 regs. */
+
+#define N_REG_INTS \
+ ((FIRST_PSEUDO_REGISTER + (HOST_BITS_PER_INT - 1)) / HOST_BITS_PER_INT)
+
+static unsigned int_reg_class_contents[N_REG_CLASSES][N_REG_INTS]
+ = REG_CLASS_CONTENTS;
+
+/* For each reg class, number of regs it contains. */
+
+int reg_class_size[N_REG_CLASSES];
+
+/* For each reg class, table listing all the containing classes. */
+
+enum reg_class reg_class_superclasses[N_REG_CLASSES][N_REG_CLASSES];
+
+/* For each reg class, table listing all the classes contained in it. */
+
+enum reg_class reg_class_subclasses[N_REG_CLASSES][N_REG_CLASSES];
+
+/* For each pair of reg classes,
+ a largest reg class contained in their union. */
+
+enum reg_class reg_class_subunion[N_REG_CLASSES][N_REG_CLASSES];
+
+/* For each pair of reg classes,
+ the smallest reg class containing their union. */
+
+enum reg_class reg_class_superunion[N_REG_CLASSES][N_REG_CLASSES];
+
+/* Array containing all of the register names */
+
+char *reg_names[] = REGISTER_NAMES;
+
+/* For each hard register, the widest mode object that it can contain.
+ This will be a MODE_INT mode if the register can hold integers. Otherwise
+ it will be a MODE_FLOAT or a MODE_CC mode, whichever is valid for the
+ register. */
+
+enum machine_mode reg_raw_mode[FIRST_PSEUDO_REGISTER];
+
+/* Indexed by n, gives number of times (REG n) is set or clobbered.
+ This information remains valid for the rest of the compilation
+ of the current function; it is used to control register allocation.
+
+ This information applies to both hard registers and pseudo registers,
+ unlike much of the information above. */
+
+short *reg_n_sets;
+
+/* Maximum cost of moving from a register in one class to a register in
+ another class. Based on REGISTER_MOVE_COST. */
+
+static int move_cost[N_REG_CLASSES][N_REG_CLASSES];
+
+/* Similar, but here we don't have to move if the first index is a subset
+ of the second so in that case the cost is zero. */
+
+static int may_move_cost[N_REG_CLASSES][N_REG_CLASSES];
+
+#ifdef FORBIDDEN_INC_DEC_CLASSES
+
+/* These are the classes that regs which are auto-incremented or decremented
+ cannot be put in. */
+
+static int forbidden_inc_dec_class[N_REG_CLASSES];
+
+/* Indexed by n, is non-zero if (REG n) is used in an auto-inc or auto-dec
+ context. */
+
+static char *in_inc_dec;
+
+#endif /* FORBIDDEN_INC_DEC_CLASSES */
+
+/* Function called only once to initialize the above data on reg usage.
+ Once this is done, various switches may override. */
+
+void
+init_reg_sets ()
+{
+ register int i, j;
+
+ /* First copy the register information from the initial int form into
+ the regsets. */
+
+ for (i = 0; i < N_REG_CLASSES; i++)
+ {
+ CLEAR_HARD_REG_SET (reg_class_contents[i]);
+
+ for (j = 0; j < FIRST_PSEUDO_REGISTER; j++)
+ if (int_reg_class_contents[i][j / HOST_BITS_PER_INT]
+ & ((unsigned) 1 << (j % HOST_BITS_PER_INT)))
+ SET_HARD_REG_BIT (reg_class_contents[i], j);
+ }
+
+ bcopy (initial_fixed_regs, fixed_regs, sizeof fixed_regs);
+ bcopy (initial_call_used_regs, call_used_regs, sizeof call_used_regs);
+ bzero (global_regs, sizeof global_regs);
+
+ /* Compute number of hard regs in each class. */
+
+ bzero ((char *) reg_class_size, sizeof reg_class_size);
+ for (i = 0; i < N_REG_CLASSES; i++)
+ for (j = 0; j < FIRST_PSEUDO_REGISTER; j++)
+ if (TEST_HARD_REG_BIT (reg_class_contents[i], j))
+ reg_class_size[i]++;
+
+ /* Initialize the table of subunions.
+ reg_class_subunion[I][J] gets the largest-numbered reg-class
+ that is contained in the union of classes I and J. */
+
+ for (i = 0; i < N_REG_CLASSES; i++)
+ {
+ for (j = 0; j < N_REG_CLASSES; j++)
+ {
+#ifdef HARD_REG_SET
+ register /* Declare it register if it's a scalar. */
+#endif
+ HARD_REG_SET c;
+ register int k;
+
+ COPY_HARD_REG_SET (c, reg_class_contents[i]);
+ IOR_HARD_REG_SET (c, reg_class_contents[j]);
+ for (k = 0; k < N_REG_CLASSES; k++)
+ {
+ GO_IF_HARD_REG_SUBSET (reg_class_contents[k], c,
+ subclass1);
+ continue;
+
+ subclass1:
+ /* keep the largest subclass */ /* SPEE 900308 */
+ GO_IF_HARD_REG_SUBSET (reg_class_contents[k],
+ reg_class_contents[(int) reg_class_subunion[i][j]],
+ subclass2);
+ reg_class_subunion[i][j] = (enum reg_class) k;
+ subclass2:
+ ;
+ }
+ }
+ }
+
+ /* Initialize the table of superunions.
+ reg_class_superunion[I][J] gets the smallest-numbered reg-class
+ containing the union of classes I and J. */
+
+ for (i = 0; i < N_REG_CLASSES; i++)
+ {
+ for (j = 0; j < N_REG_CLASSES; j++)
+ {
+#ifdef HARD_REG_SET
+ register /* Declare it register if it's a scalar. */
+#endif
+ HARD_REG_SET c;
+ register int k;
+
+ COPY_HARD_REG_SET (c, reg_class_contents[i]);
+ IOR_HARD_REG_SET (c, reg_class_contents[j]);
+ for (k = 0; k < N_REG_CLASSES; k++)
+ GO_IF_HARD_REG_SUBSET (c, reg_class_contents[k], superclass);
+
+ superclass:
+ reg_class_superunion[i][j] = (enum reg_class) k;
+ }
+ }
+
+ /* Initialize the tables of subclasses and superclasses of each reg class.
+ First clear the whole table, then add the elements as they are found. */
+
+ for (i = 0; i < N_REG_CLASSES; i++)
+ {
+ for (j = 0; j < N_REG_CLASSES; j++)
+ {
+ reg_class_superclasses[i][j] = LIM_REG_CLASSES;
+ reg_class_subclasses[i][j] = LIM_REG_CLASSES;
+ }
+ }
+
+ for (i = 0; i < N_REG_CLASSES; i++)
+ {
+ if (i == (int) NO_REGS)
+ continue;
+
+ for (j = i + 1; j < N_REG_CLASSES; j++)
+ {
+ enum reg_class *p;
+
+ GO_IF_HARD_REG_SUBSET (reg_class_contents[i], reg_class_contents[j],
+ subclass);
+ continue;
+ subclass:
+ /* Reg class I is a subclass of J.
+ Add J to the table of superclasses of I. */
+ p = &reg_class_superclasses[i][0];
+ while (*p != LIM_REG_CLASSES) p++;
+ *p = (enum reg_class) j;
+ /* Add I to the table of superclasses of J. */
+ p = &reg_class_subclasses[j][0];
+ while (*p != LIM_REG_CLASSES) p++;
+ *p = (enum reg_class) i;
+ }
+ }
+
+ /* Initialize the move cost table. Find every subset of each class
+ and take the maximum cost of moving any subset to any other. */
+
+ for (i = 0; i < N_REG_CLASSES; i++)
+ for (j = 0; j < N_REG_CLASSES; j++)
+ {
+ int cost = i == j ? 2 : REGISTER_MOVE_COST (i, j);
+ enum reg_class *p1, *p2;
+
+ for (p2 = &reg_class_subclasses[j][0]; *p2 != LIM_REG_CLASSES; p2++)
+ if (*p2 != i)
+ cost = MAX (cost, REGISTER_MOVE_COST (i, *p2));
+
+ for (p1 = &reg_class_subclasses[i][0]; *p1 != LIM_REG_CLASSES; p1++)
+ {
+ if (*p1 != j)
+ cost = MAX (cost, REGISTER_MOVE_COST (*p1, j));
+
+ for (p2 = &reg_class_subclasses[j][0];
+ *p2 != LIM_REG_CLASSES; p2++)
+ if (*p1 != *p2)
+ cost = MAX (cost, REGISTER_MOVE_COST (*p1, *p2));
+ }
+
+ move_cost[i][j] = cost;
+
+ if (reg_class_subset_p (i, j))
+ cost = 0;
+
+ may_move_cost[i][j] = cost;
+ }
+}
+
+/* After switches have been processed, which perhaps alter
+ `fixed_regs' and `call_used_regs', convert them to HARD_REG_SETs. */
+
+static void
+init_reg_sets_1 ()
+{
+ register int i;
+
+ /* This macro allows the fixed or call-used registers
+ to depend on target flags. */
+
+#ifdef CONDITIONAL_REGISTER_USAGE
+ CONDITIONAL_REGISTER_USAGE;
+#endif
+
+ /* Initialize "constant" tables. */
+
+ CLEAR_HARD_REG_SET (fixed_reg_set);
+ CLEAR_HARD_REG_SET (call_used_reg_set);
+ CLEAR_HARD_REG_SET (call_fixed_reg_set);
+
+ bcopy (fixed_regs, call_fixed_regs, sizeof call_fixed_regs);
+
+ n_non_fixed_regs = 0;
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ {
+ if (fixed_regs[i])
+ SET_HARD_REG_BIT (fixed_reg_set, i);
+ else
+ n_non_fixed_regs++;
+
+ if (call_used_regs[i])
+ SET_HARD_REG_BIT (call_used_reg_set, i);
+ if (call_fixed_regs[i])
+ SET_HARD_REG_BIT (call_fixed_reg_set, i);
+ }
+}
+
+/* Compute the table of register modes.
+ These values are used to record death information for individual registers
+ (as opposed to a multi-register mode). */
+
+static void
+init_reg_modes ()
+{
+ register int i;
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ {
+ reg_raw_mode[i] = choose_hard_reg_mode (i, 1);
+
+ /* If we couldn't find a valid mode, fall back to `word_mode'.
+ ??? We assume `word_mode' has already been initialized.
+ ??? One situation in which we need to do this is on the mips where
+ HARD_REGNO_NREGS (fpreg, [SD]Fmode) returns 2. Ideally we'd like
+ to use DF mode for the even registers and VOIDmode for the odd
+ (for the cpu models where the odd ones are inaccessable). */
+ if (reg_raw_mode[i] == VOIDmode)
+ reg_raw_mode[i] = word_mode;
+ }
+}
+
+/* Finish initializing the register sets and
+ initialize the register modes. */
+
+void
+init_regs ()
+{
+ /* This finishes what was started by init_reg_sets, but couldn't be done
+ until after register usage was specified. */
+ if (!output_bytecode)
+ init_reg_sets_1 ();
+
+ init_reg_modes ();
+}
+
+/* Return a machine mode that is legitimate for hard reg REGNO and large
+ enough to save nregs. If we can't find one, return VOIDmode. */
+
+enum machine_mode
+choose_hard_reg_mode (regno, nregs)
+ int regno;
+ int nregs;
+{
+ enum machine_mode found_mode = VOIDmode, mode;
+
+ /* We first look for the largest integer mode that can be validly
+ held in REGNO. If none, we look for the largest floating-point mode.
+ If we still didn't find a valid mode, try CCmode. */
+
+ for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
+ mode != VOIDmode;
+ mode = GET_MODE_WIDER_MODE (mode))
+ if (HARD_REGNO_NREGS (regno, mode) == nregs
+ && HARD_REGNO_MODE_OK (regno, mode))
+ found_mode = mode;
+
+ if (found_mode != VOIDmode)
+ return found_mode;
+
+ for (mode = GET_CLASS_NARROWEST_MODE (MODE_FLOAT);
+ mode != VOIDmode;
+ mode = GET_MODE_WIDER_MODE (mode))
+ if (HARD_REGNO_NREGS (regno, mode) == nregs
+ && HARD_REGNO_MODE_OK (regno, mode))
+ found_mode = mode;
+
+ if (found_mode != VOIDmode)
+ return found_mode;
+
+ if (HARD_REGNO_NREGS (regno, CCmode) == nregs
+ && HARD_REGNO_MODE_OK (regno, CCmode))
+ return CCmode;
+
+ /* We can't find a mode valid for this register. */
+ return VOIDmode;
+}
+
+/* Specify the usage characteristics of the register named NAME.
+ It should be a fixed register if FIXED and a
+ call-used register if CALL_USED. */
+
+void
+fix_register (name, fixed, call_used)
+ char *name;
+ int fixed, call_used;
+{
+ int i;
+
+ if (output_bytecode)
+ {
+ warning ("request to mark `%s' as %s ignored by bytecode compiler",
+ name, call_used ? "call-used" : "fixed");
+ return;
+ }
+
+ /* Decode the name and update the primary form of
+ the register info. */
+
+ if ((i = decode_reg_name (name)) >= 0)
+ {
+ fixed_regs[i] = fixed;
+ call_used_regs[i] = call_used;
+ }
+ else
+ {
+ warning ("unknown register name: %s", name);
+ }
+}
+
+/* Mark register number I as global. */
+
+void
+globalize_reg (i)
+ int i;
+{
+ if (global_regs[i])
+ {
+ warning ("register used for two global register variables");
+ return;
+ }
+
+ if (call_used_regs[i] && ! fixed_regs[i])
+ warning ("call-clobbered register used for global register variable");
+
+ global_regs[i] = 1;
+
+ /* If already fixed, nothing else to do. */
+ if (fixed_regs[i])
+ return;
+
+ fixed_regs[i] = call_used_regs[i] = call_fixed_regs[i] = 1;
+ n_non_fixed_regs--;
+
+ SET_HARD_REG_BIT (fixed_reg_set, i);
+ SET_HARD_REG_BIT (call_used_reg_set, i);
+ SET_HARD_REG_BIT (call_fixed_reg_set, i);
+}
+
+/* Now the data and code for the `regclass' pass, which happens
+ just before local-alloc. */
+
+/* The `costs' struct records the cost of using a hard register of each class
+ and of using memory for each pseudo. We use this data to set up
+ register class preferences. */
+
+struct costs
+{
+ int cost[N_REG_CLASSES];
+ int mem_cost;
+};
+
+/* Record the cost of each class for each pseudo. */
+
+static struct costs *costs;
+
+/* Record the same data by operand number, accumulated for each alternative
+ in an insn. The contribution to a pseudo is that of the minimum-cost
+ alternative. */
+
+static struct costs op_costs[MAX_RECOG_OPERANDS];
+
+/* (enum reg_class) prefclass[R] is the preferred class for pseudo number R.
+ This is available after `regclass' is run. */
+
+static char *prefclass;
+
+/* altclass[R] is a register class that we should use for allocating
+ pseudo number R if no register in the preferred class is available.
+ If no register in this class is available, memory is preferred.
+
+ It might appear to be more general to have a bitmask of classes here,
+ but since it is recommended that there be a class corresponding to the
+ union of most major pair of classes, that generality is not required.
+
+ This is available after `regclass' is run. */
+
+static char *altclass;
+
+/* Record the depth of loops that we are in. */
+
+static int loop_depth;
+
+/* Account for the fact that insns within a loop are executed very commonly,
+ but don't keep doing this as loops go too deep. */
+
+static int loop_cost;
+
+static void record_reg_classes PROTO((int, int, rtx *, enum machine_mode *,
+ char **, rtx));
+static int copy_cost PROTO((rtx, enum machine_mode,
+ enum reg_class, int));
+static void record_address_regs PROTO((rtx, enum reg_class, int));
+static auto_inc_dec_reg_p PROTO((rtx, enum machine_mode));
+static void reg_scan_mark_refs PROTO((rtx, rtx, int));
+
+/* Return the reg_class in which pseudo reg number REGNO is best allocated.
+ This function is sometimes called before the info has been computed.
+ When that happens, just return GENERAL_REGS, which is innocuous. */
+
+enum reg_class
+reg_preferred_class (regno)
+ int regno;
+{
+ if (prefclass == 0)
+ return GENERAL_REGS;
+ return (enum reg_class) prefclass[regno];
+}
+
+enum reg_class
+reg_alternate_class (regno)
+{
+ if (prefclass == 0)
+ return ALL_REGS;
+
+ return (enum reg_class) altclass[regno];
+}
+
+/* This prevents dump_flow_info from losing if called
+ before regclass is run. */
+
+void
+regclass_init ()
+{
+ prefclass = 0;
+}
+
+/* This is a pass of the compiler that scans all instructions
+ and calculates the preferred class for each pseudo-register.
+ This information can be accessed later by calling `reg_preferred_class'.
+ This pass comes just before local register allocation. */
+
+void
+regclass (f, nregs)
+ rtx f;
+ int nregs;
+{
+#ifdef REGISTER_CONSTRAINTS
+ register rtx insn;
+ register int i, j;
+ struct costs init_cost;
+ rtx set;
+ int pass;
+
+ init_recog ();
+
+ costs = (struct costs *) alloca (nregs * sizeof (struct costs));
+
+#ifdef FORBIDDEN_INC_DEC_CLASSES
+
+ in_inc_dec = (char *) alloca (nregs);
+
+ /* Initialize information about which register classes can be used for
+ pseudos that are auto-incremented or auto-decremented. It would
+ seem better to put this in init_reg_sets, but we need to be able
+ to allocate rtx, which we can't do that early. */
+
+ for (i = 0; i < N_REG_CLASSES; i++)
+ {
+ rtx r = gen_rtx (REG, VOIDmode, 0);
+ enum machine_mode m;
+
+ for (j = 0; j < FIRST_PSEUDO_REGISTER; j++)
+ if (TEST_HARD_REG_BIT (reg_class_contents[i], j))
+ {
+ REGNO (r) = j;
+
+ for (m = VOIDmode; (int) m < (int) MAX_MACHINE_MODE;
+ m = (enum machine_mode) ((int) m + 1))
+ if (HARD_REGNO_MODE_OK (j, m))
+ {
+ PUT_MODE (r, m);
+
+ /* If a register is not directly suitable for an
+ auto-increment or decrement addressing mode and
+ requires secondary reloads, disallow its class from
+ being used in such addresses. */
+
+ if ((0
+#ifdef SECONDARY_INPUT_RELOAD_CLASS
+ || (SECONDARY_INPUT_RELOAD_CLASS (BASE_REG_CLASS, m, r)
+ != NO_REGS)
+#endif
+#ifdef SECONDARY_OUTPUT_RELOAD_CLASS
+ || (SECONDARY_OUTPUT_RELOAD_CLASS (BASE_REG_CLASS, m, r)
+ != NO_REGS)
+#endif
+ )
+ && ! auto_inc_dec_reg_p (r, m))
+ forbidden_inc_dec_class[i] = 1;
+ }
+ }
+ }
+#endif /* FORBIDDEN_INC_DEC_CLASSES */
+
+ init_cost.mem_cost = 10000;
+ for (i = 0; i < N_REG_CLASSES; i++)
+ init_cost.cost[i] = 10000;
+
+ /* Normally we scan the insns once and determine the best class to use for
+ each register. However, if -fexpensive_optimizations are on, we do so
+ twice, the second time using the tentative best classes to guide the
+ selection. */
+
+ for (pass = 0; pass <= flag_expensive_optimizations; pass++)
+ {
+ /* Zero out our accumulation of the cost of each class for each reg. */
+
+ bzero ((char *) costs, nregs * sizeof (struct costs));
+
+#ifdef FORBIDDEN_INC_DEC_CLASSES
+ bzero (in_inc_dec, nregs);
+#endif
+
+ loop_depth = 0, loop_cost = 1;
+
+ /* Scan the instructions and record each time it would
+ save code to put a certain register in a certain class. */
+
+ for (insn = f; insn; insn = NEXT_INSN (insn))
+ {
+ char *constraints[MAX_RECOG_OPERANDS];
+ enum machine_mode modes[MAX_RECOG_OPERANDS];
+ int nalternatives;
+ int noperands;
+
+ /* Show that an insn inside a loop is likely to be executed three
+ times more than insns outside a loop. This is much more aggressive
+ than the assumptions made elsewhere and is being tried as an
+ experiment. */
+
+ if (GET_CODE (insn) == NOTE
+ && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
+ loop_depth++, loop_cost = 1 << (2 * MIN (loop_depth, 5));
+ else if (GET_CODE (insn) == NOTE
+ && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
+ loop_depth--, loop_cost = 1 << (2 * MIN (loop_depth, 5));
+
+ else if ((GET_CODE (insn) == INSN
+ && GET_CODE (PATTERN (insn)) != USE
+ && GET_CODE (PATTERN (insn)) != CLOBBER
+ && GET_CODE (PATTERN (insn)) != ASM_INPUT)
+ || (GET_CODE (insn) == JUMP_INSN
+ && GET_CODE (PATTERN (insn)) != ADDR_VEC
+ && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
+ || GET_CODE (insn) == CALL_INSN)
+ {
+ if (GET_CODE (insn) == INSN
+ && (noperands = asm_noperands (PATTERN (insn))) >= 0)
+ {
+ decode_asm_operands (PATTERN (insn), recog_operand, NULL_PTR,
+ constraints, modes);
+ nalternatives = (noperands == 0 ? 0
+ : n_occurrences (',', constraints[0]) + 1);
+ }
+ else
+ {
+ int insn_code_number = recog_memoized (insn);
+ rtx note;
+
+ set = single_set (insn);
+ insn_extract (insn);
+
+ nalternatives = insn_n_alternatives[insn_code_number];
+ noperands = insn_n_operands[insn_code_number];
+
+ /* If this insn loads a parameter from its stack slot, then
+ it represents a savings, rather than a cost, if the
+ parameter is stored in memory. Record this fact. */
+
+ if (set != 0 && GET_CODE (SET_DEST (set)) == REG
+ && GET_CODE (SET_SRC (set)) == MEM
+ && (note = find_reg_note (insn, REG_EQUIV,
+ NULL_RTX)) != 0
+ && GET_CODE (XEXP (note, 0)) == MEM)
+ {
+ costs[REGNO (SET_DEST (set))].mem_cost
+ -= (MEMORY_MOVE_COST (GET_MODE (SET_DEST (set)))
+ * loop_cost);
+ record_address_regs (XEXP (SET_SRC (set), 0),
+ BASE_REG_CLASS, loop_cost * 2);
+ continue;
+ }
+
+ /* Improve handling of two-address insns such as
+ (set X (ashift CONST Y)) where CONST must be made to
+ match X. Change it into two insns: (set X CONST)
+ (set X (ashift X Y)). If we left this for reloading, it
+ would probably get three insns because X and Y might go
+ in the same place. This prevents X and Y from receiving
+ the same hard reg.
+
+ We can only do this if the modes of operands 0 and 1
+ (which might not be the same) are tieable and we only need
+ do this during our first pass. */
+
+ if (pass == 0 && optimize
+ && noperands >= 3
+ && insn_operand_constraint[insn_code_number][1][0] == '0'
+ && insn_operand_constraint[insn_code_number][1][1] == 0
+ && CONSTANT_P (recog_operand[1])
+ && ! rtx_equal_p (recog_operand[0], recog_operand[1])
+ && ! rtx_equal_p (recog_operand[0], recog_operand[2])
+ && GET_CODE (recog_operand[0]) == REG
+ && MODES_TIEABLE_P (GET_MODE (recog_operand[0]),
+ insn_operand_mode[insn_code_number][1]))
+ {
+ rtx previnsn = prev_real_insn (insn);
+ rtx dest
+ = gen_lowpart (insn_operand_mode[insn_code_number][1],
+ recog_operand[0]);
+ rtx newinsn
+ = emit_insn_before (gen_move_insn (dest,
+ recog_operand[1]),
+ insn);
+
+ /* If this insn was the start of a basic block,
+ include the new insn in that block.
+ We need not check for code_label here;
+ while a basic block can start with a code_label,
+ INSN could not be at the beginning of that block. */
+ if (previnsn == 0 || GET_CODE (previnsn) == JUMP_INSN)
+ {
+ int b;
+ for (b = 0; b < n_basic_blocks; b++)
+ if (insn == basic_block_head[b])
+ basic_block_head[b] = newinsn;
+ }
+
+ /* This makes one more setting of new insns's dest. */
+ reg_n_sets[REGNO (recog_operand[0])]++;
+
+ *recog_operand_loc[1] = recog_operand[0];
+ for (i = insn_n_dups[insn_code_number] - 1; i >= 0; i--)
+ if (recog_dup_num[i] == 1)
+ *recog_dup_loc[i] = recog_operand[0];
+
+ insn = PREV_INSN (newinsn);
+ continue;
+ }
+
+ for (i = 0; i < noperands; i++)
+ {
+ constraints[i]
+ = insn_operand_constraint[insn_code_number][i];
+ modes[i] = insn_operand_mode[insn_code_number][i];
+ }
+ }
+
+ /* If we get here, we are set up to record the costs of all the
+ operands for this insn. Start by initializing the costs.
+ Then handle any address registers. Finally record the desired
+ classes for any pseudos, doing it twice if some pair of
+ operands are commutative. */
+
+ for (i = 0; i < noperands; i++)
+ {
+ op_costs[i] = init_cost;
+
+ if (GET_CODE (recog_operand[i]) == SUBREG)
+ recog_operand[i] = SUBREG_REG (recog_operand[i]);
+
+ if (GET_CODE (recog_operand[i]) == MEM)
+ record_address_regs (XEXP (recog_operand[i], 0),
+ BASE_REG_CLASS, loop_cost * 2);
+ else if (constraints[i][0] == 'p')
+ record_address_regs (recog_operand[i],
+ BASE_REG_CLASS, loop_cost * 2);
+ }
+
+ /* Check for commutative in a separate loop so everything will
+ have been initialized. We must do this even if one operand
+ is a constant--see addsi3 in m68k.md. */
+
+ for (i = 0; i < noperands - 1; i++)
+ if (constraints[i][0] == '%')
+ {
+ char *xconstraints[MAX_RECOG_OPERANDS];
+ int j;
+
+ /* Handle commutative operands by swapping the constraints.
+ We assume the modes are the same. */
+
+ for (j = 0; j < noperands; j++)
+ xconstraints[j] = constraints[j];
+
+ xconstraints[i] = constraints[i+1];
+ xconstraints[i+1] = constraints[i];
+ record_reg_classes (nalternatives, noperands,
+ recog_operand, modes, xconstraints,
+ insn);
+ }
+
+ record_reg_classes (nalternatives, noperands, recog_operand,
+ modes, constraints, insn);
+
+ /* Now add the cost for each operand to the total costs for
+ its register. */
+
+ for (i = 0; i < noperands; i++)
+ if (GET_CODE (recog_operand[i]) == REG
+ && REGNO (recog_operand[i]) >= FIRST_PSEUDO_REGISTER)
+ {
+ int regno = REGNO (recog_operand[i]);
+ struct costs *p = &costs[regno], *q = &op_costs[i];
+
+ p->mem_cost += q->mem_cost * loop_cost;
+ for (j = 0; j < N_REG_CLASSES; j++)
+ p->cost[j] += q->cost[j] * loop_cost;
+ }
+ }
+ }
+
+ /* Now for each register look at how desirable each class is
+ and find which class is preferred. Store that in
+ `prefclass[REGNO]'. Record in `altclass[REGNO]' the largest register
+ class any of whose registers is better than memory. */
+
+ if (pass == 0)
+ {
+ prefclass = (char *) oballoc (nregs);
+ altclass = (char *) oballoc (nregs);
+ }
+
+ for (i = FIRST_PSEUDO_REGISTER; i < nregs; i++)
+ {
+ register int best_cost = (1 << (HOST_BITS_PER_INT - 2)) - 1;
+ enum reg_class best = ALL_REGS, alt = NO_REGS;
+ /* This is an enum reg_class, but we call it an int
+ to save lots of casts. */
+ register int class;
+ register struct costs *p = &costs[i];
+
+ for (class = (int) ALL_REGS - 1; class > 0; class--)
+ {
+ /* Ignore classes that are too small for this operand or
+ invalid for a operand that was auto-incremented. */
+ if (CLASS_MAX_NREGS (class, PSEUDO_REGNO_MODE (i))
+ > reg_class_size[class]
+#ifdef FORBIDDEN_INC_DEC_CLASSES
+ || (in_inc_dec[i] && forbidden_inc_dec_class[class])
+#endif
+ )
+ ;
+ else if (p->cost[class] < best_cost)
+ {
+ best_cost = p->cost[class];
+ best = (enum reg_class) class;
+ }
+ else if (p->cost[class] == best_cost)
+ best = reg_class_subunion[(int)best][class];
+ }
+
+ /* Record the alternate register class; i.e., a class for which
+ every register in it is better than using memory. If adding a
+ class would make a smaller class (i.e., no union of just those
+ classes exists), skip that class. The major unions of classes
+ should be provided as a register class. Don't do this if we
+ will be doing it again later. */
+
+ if (pass == 1 || ! flag_expensive_optimizations)
+ for (class = 0; class < N_REG_CLASSES; class++)
+ if (p->cost[class] < p->mem_cost
+ && (reg_class_size[(int) reg_class_subunion[(int) alt][class]]
+ > reg_class_size[(int) alt])
+#ifdef FORBIDDEN_INC_DEC_CLASSES
+ && ! (in_inc_dec[i] && forbidden_inc_dec_class[class])
+#endif
+ )
+ alt = reg_class_subunion[(int) alt][class];
+
+ /* If we don't add any classes, nothing to try. */
+ if (alt == best)
+ alt = (int) NO_REGS;
+
+ /* We cast to (int) because (char) hits bugs in some compilers. */
+ prefclass[i] = (int) best;
+ altclass[i] = (int) alt;
+ }
+ }
+#endif /* REGISTER_CONSTRAINTS */
+}
+
+#ifdef REGISTER_CONSTRAINTS
+
+/* Record the cost of using memory or registers of various classes for
+ the operands in INSN.
+
+ N_ALTS is the number of alternatives.
+
+ N_OPS is the number of operands.
+
+ OPS is an array of the operands.
+
+ MODES are the modes of the operands, in case any are VOIDmode.
+
+ CONSTRAINTS are the constraints to use for the operands. This array
+ is modified by this procedure.
+
+ This procedure works alternative by alternative. For each alternative
+ we assume that we will be able to allocate all pseudos to their ideal
+ register class and calculate the cost of using that alternative. Then
+ we compute for each operand that is a pseudo-register, the cost of
+ having the pseudo allocated to each register class and using it in that
+ alternative. To this cost is added the cost of the alternative.
+
+ The cost of each class for this insn is its lowest cost among all the
+ alternatives. */
+
+static void
+record_reg_classes (n_alts, n_ops, ops, modes, constraints, insn)
+ int n_alts;
+ int n_ops;
+ rtx *ops;
+ enum machine_mode *modes;
+ char **constraints;
+ rtx insn;
+{
+ int alt;
+ enum op_type {OP_READ, OP_WRITE, OP_READ_WRITE} op_types[MAX_RECOG_OPERANDS];
+ int i, j;
+
+ /* By default, each operand is an input operand. */
+
+ for (i = 0; i < n_ops; i++)
+ op_types[i] = OP_READ;
+
+ /* Process each alternative, each time minimizing an operand's cost with
+ the cost for each operand in that alternative. */
+
+ for (alt = 0; alt < n_alts; alt++)
+ {
+ struct costs this_op_costs[MAX_RECOG_OPERANDS];
+ int alt_fail = 0;
+ int alt_cost = 0;
+ enum reg_class classes[MAX_RECOG_OPERANDS];
+ int class;
+
+ for (i = 0; i < n_ops; i++)
+ {
+ char *p = constraints[i];
+ rtx op = ops[i];
+ enum machine_mode mode = modes[i];
+ int allows_mem = 0;
+ int win = 0;
+ char c;
+
+ /* If this operand has no constraints at all, we can conclude
+ nothing about it since anything is valid. */
+
+ if (*p == 0)
+ {
+ if (GET_CODE (op) == REG && REGNO (op) >= FIRST_PSEUDO_REGISTER)
+ bzero ((char *) &this_op_costs[i], sizeof this_op_costs[i]);
+
+ continue;
+ }
+
+ if (*p == '%')
+ p++;
+
+ /* If this alternative is only relevant when this operand
+ matches a previous operand, we do different things depending
+ on whether this operand is a pseudo-reg or not. */
+
+ if (p[0] >= '0' && p[0] <= '0' + i && (p[1] == ',' || p[1] == 0))
+ {
+ j = p[0] - '0';
+ classes[i] = classes[j];
+
+ if (GET_CODE (op) != REG || REGNO (op) < FIRST_PSEUDO_REGISTER)
+ {
+ /* If this matches the other operand, we have no added
+ cost and we win. */
+ if (rtx_equal_p (ops[j], op))
+ win = 1;
+
+ /* If we can put the other operand into a register, add to
+ the cost of this alternative the cost to copy this
+ operand to the register used for the other operand. */
+
+ else if (classes[j] != NO_REGS)
+ alt_cost += copy_cost (op, mode, classes[j], 1), win = 1;
+ }
+ else if (GET_CODE (ops[j]) != REG
+ || REGNO (ops[j]) < FIRST_PSEUDO_REGISTER)
+ {
+ /* This op is a pseudo but the one it matches is not. */
+
+ /* If we can't put the other operand into a register, this
+ alternative can't be used. */
+
+ if (classes[j] == NO_REGS)
+ alt_fail = 1;
+
+ /* Otherwise, add to the cost of this alternative the cost
+ to copy the other operand to the register used for this
+ operand. */
+
+ else
+ alt_cost += copy_cost (ops[j], mode, classes[j], 1);
+ }
+ else
+ {
+ /* The costs of this operand are the same as that of the
+ other operand. However, if we cannot tie them, this
+ alternative needs to do a copy, which is one
+ instruction. */
+
+ this_op_costs[i] = this_op_costs[j];
+ if (REGNO (ops[i]) != REGNO (ops[j])
+ && ! find_reg_note (insn, REG_DEAD, op))
+ alt_cost += 2;
+
+ /* This is in place of ordinary cost computation
+ for this operand, so skip to the end of the
+ alternative (should be just one character). */
+ while (*p && *p++ != ',')
+ ;
+
+ constraints[i] = p;
+ continue;
+ }
+ }
+
+ /* Scan all the constraint letters. See if the operand matches
+ any of the constraints. Collect the valid register classes
+ and see if this operand accepts memory. */
+
+ classes[i] = NO_REGS;
+ while (*p && (c = *p++) != ',')
+ switch (c)
+ {
+ case '=':
+ op_types[i] = OP_WRITE;
+ break;
+
+ case '+':
+ op_types[i] = OP_READ_WRITE;
+ break;
+
+ case '*':
+ /* Ignore the next letter for this pass. */
+ p++;
+ break;
+
+ case '%':
+ case '?': case '!': case '#':
+ case '&':
+ case '0': case '1': case '2': case '3': case '4':
+ case 'p':
+ break;
+
+ case 'm': case 'o': case 'V':
+ /* It doesn't seem worth distinguishing between offsettable
+ and non-offsettable addresses here. */
+ allows_mem = 1;
+ if (GET_CODE (op) == MEM)
+ win = 1;
+ break;
+
+ case '<':
+ if (GET_CODE (op) == MEM
+ && (GET_CODE (XEXP (op, 0)) == PRE_DEC
+ || GET_CODE (XEXP (op, 0)) == POST_DEC))
+ win = 1;
+ break;
+
+ case '>':
+ if (GET_CODE (op) == MEM
+ && (GET_CODE (XEXP (op, 0)) == PRE_INC
+ || GET_CODE (XEXP (op, 0)) == POST_INC))
+ win = 1;
+ break;
+
+ case 'E':
+ /* Match any floating double constant, but only if
+ we can examine the bits of it reliably. */
+ if ((HOST_FLOAT_FORMAT != TARGET_FLOAT_FORMAT
+ || HOST_BITS_PER_WIDE_INT != BITS_PER_WORD)
+ && GET_MODE (op) != VOIDmode && ! flag_pretend_float)
+ break;
+ if (GET_CODE (op) == CONST_DOUBLE)
+ win = 1;
+ break;
+
+ case 'F':
+ if (GET_CODE (op) == CONST_DOUBLE)
+ win = 1;
+ break;
+
+ case 'G':
+ case 'H':
+ if (GET_CODE (op) == CONST_DOUBLE
+ && CONST_DOUBLE_OK_FOR_LETTER_P (op, c))
+ win = 1;
+ break;
+
+ case 's':
+ if (GET_CODE (op) == CONST_INT
+ || (GET_CODE (op) == CONST_DOUBLE
+ && GET_MODE (op) == VOIDmode))
+ break;
+ case 'i':
+ if (CONSTANT_P (op)
+#ifdef LEGITIMATE_PIC_OPERAND_P
+ && (! flag_pic || LEGITIMATE_PIC_OPERAND_P (op))
+#endif
+ )
+ win = 1;
+ break;
+
+ case 'n':
+ if (GET_CODE (op) == CONST_INT
+ || (GET_CODE (op) == CONST_DOUBLE
+ && GET_MODE (op) == VOIDmode))
+ win = 1;
+ break;
+
+ case 'I':
+ case 'J':
+ case 'K':
+ case 'L':
+ case 'M':
+ case 'N':
+ case 'O':
+ case 'P':
+ if (GET_CODE (op) == CONST_INT
+ && CONST_OK_FOR_LETTER_P (INTVAL (op), c))
+ win = 1;
+ break;
+
+ case 'X':
+ win = 1;
+ break;
+
+#ifdef EXTRA_CONSTRAINT
+ case 'Q':
+ case 'R':
+ case 'S':
+ case 'T':
+ case 'U':
+ if (EXTRA_CONSTRAINT (op, c))
+ win = 1;
+ break;
+#endif
+
+ case 'g':
+ if (GET_CODE (op) == MEM
+ || (CONSTANT_P (op)
+#ifdef LEGITIMATE_PIC_OPERAND_P
+ && (! flag_pic || LEGITIMATE_PIC_OPERAND_P (op))
+#endif
+ ))
+ win = 1;
+ allows_mem = 1;
+ case 'r':
+ classes[i]
+ = reg_class_subunion[(int) classes[i]][(int) GENERAL_REGS];
+ break;
+
+ default:
+ classes[i]
+ = reg_class_subunion[(int) classes[i]]
+ [(int) REG_CLASS_FROM_LETTER (c)];
+ }
+
+ constraints[i] = p;
+
+ /* How we account for this operand now depends on whether it is a
+ pseudo register or not. If it is, we first check if any
+ register classes are valid. If not, we ignore this alternative,
+ since we want to assume that all pseudos get allocated for
+ register preferencing. If some register class is valid, compute
+ the costs of moving the pseudo into that class. */
+
+ if (GET_CODE (op) == REG && REGNO (op) >= FIRST_PSEUDO_REGISTER)
+ {
+ if (classes[i] == NO_REGS)
+ alt_fail = 1;
+ else
+ {
+ struct costs *pp = &this_op_costs[i];
+
+ for (class = 0; class < N_REG_CLASSES; class++)
+ pp->cost[class] = may_move_cost[class][(int) classes[i]];
+
+ /* If the alternative actually allows memory, make things
+ a bit cheaper since we won't need an extra insn to
+ load it. */
+
+ pp->mem_cost = MEMORY_MOVE_COST (mode) - allows_mem;
+
+ /* If we have assigned a class to this register in our
+ first pass, add a cost to this alternative corresponding
+ to what we would add if this register were not in the
+ appropriate class. */
+
+ if (prefclass)
+ alt_cost
+ += may_move_cost[prefclass[REGNO (op)]][(int) classes[i]];
+ }
+ }
+
+ /* Otherwise, if this alternative wins, either because we
+ have already determined that or if we have a hard register of
+ the proper class, there is no cost for this alternative. */
+
+ else if (win
+ || (GET_CODE (op) == REG
+ && reg_fits_class_p (op, classes[i], 0, GET_MODE (op))))
+ ;
+
+ /* If registers are valid, the cost of this alternative includes
+ copying the object to and/or from a register. */
+
+ else if (classes[i] != NO_REGS)
+ {
+ if (op_types[i] != OP_WRITE)
+ alt_cost += copy_cost (op, mode, classes[i], 1);
+
+ if (op_types[i] != OP_READ)
+ alt_cost += copy_cost (op, mode, classes[i], 0);
+ }
+
+ /* The only other way this alternative can be used is if this is a
+ constant that could be placed into memory. */
+
+ else if (CONSTANT_P (op) && allows_mem)
+ alt_cost += MEMORY_MOVE_COST (mode);
+ else
+ alt_fail = 1;
+ }
+
+ if (alt_fail)
+ continue;
+
+ /* Finally, update the costs with the information we've calculated
+ about this alternative. */
+
+ for (i = 0; i < n_ops; i++)
+ if (GET_CODE (ops[i]) == REG
+ && REGNO (ops[i]) >= FIRST_PSEUDO_REGISTER)
+ {
+ struct costs *pp = &op_costs[i], *qq = &this_op_costs[i];
+ int scale = 1 + (op_types[i] == OP_READ_WRITE);
+
+ pp->mem_cost = MIN (pp->mem_cost,
+ (qq->mem_cost + alt_cost) * scale);
+
+ for (class = 0; class < N_REG_CLASSES; class++)
+ pp->cost[class] = MIN (pp->cost[class],
+ (qq->cost[class] + alt_cost) * scale);
+ }
+ }
+}
+
+/* Compute the cost of loading X into (if TO_P is non-zero) or from (if
+ TO_P is zero) a register of class CLASS in mode MODE.
+
+ X must not be a pseudo. */
+
+static int
+copy_cost (x, mode, class, to_p)
+ rtx x;
+ enum machine_mode mode;
+ enum reg_class class;
+ int to_p;
+{
+ enum reg_class secondary_class = NO_REGS;
+
+ /* If X is a SCRATCH, there is actually nothing to move since we are
+ assuming optimal allocation. */
+
+ if (GET_CODE (x) == SCRATCH)
+ return 0;
+
+ /* Get the class we will actually use for a reload. */
+ class = PREFERRED_RELOAD_CLASS (x, class);
+
+#ifdef HAVE_SECONDARY_RELOADS
+ /* If we need a secondary reload (we assume here that we are using
+ the secondary reload as an intermediate, not a scratch register), the
+ cost is that to load the input into the intermediate register, then
+ to copy them. We use a special value of TO_P to avoid recursion. */
+
+#ifdef SECONDARY_INPUT_RELOAD_CLASS
+ if (to_p == 1)
+ secondary_class = SECONDARY_INPUT_RELOAD_CLASS (class, mode, x);
+#endif
+
+#ifdef SECONDARY_OUTPUT_RELOAD_CLASS
+ if (! to_p)
+ secondary_class = SECONDARY_OUTPUT_RELOAD_CLASS (class, mode, x);
+#endif
+
+ if (secondary_class != NO_REGS)
+ return (move_cost[(int) secondary_class][(int) class]
+ + copy_cost (x, mode, secondary_class, 2));
+#endif /* HAVE_SECONDARY_RELOADS */
+
+ /* For memory, use the memory move cost, for (hard) registers, use the
+ cost to move between the register classes, and use 2 for everything
+ else (constants). */
+
+ if (GET_CODE (x) == MEM || class == NO_REGS)
+ return MEMORY_MOVE_COST (mode);
+
+ else if (GET_CODE (x) == REG)
+ return move_cost[(int) REGNO_REG_CLASS (REGNO (x))][(int) class];
+
+ else
+ /* If this is a constant, we may eventually want to call rtx_cost here. */
+ return 2;
+}
+
+/* Record the pseudo registers we must reload into hard registers
+ in a subexpression of a memory address, X.
+
+ CLASS is the class that the register needs to be in and is either
+ BASE_REG_CLASS or INDEX_REG_CLASS.
+
+ SCALE is twice the amount to multiply the cost by (it is twice so we
+ can represent half-cost adjustments). */
+
+static void
+record_address_regs (x, class, scale)
+ rtx x;
+ enum reg_class class;
+ int scale;
+{
+ register enum rtx_code code = GET_CODE (x);
+
+ switch (code)
+ {
+ case CONST_INT:
+ case CONST:
+ case CC0:
+ case PC:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ return;
+
+ case PLUS:
+ /* When we have an address that is a sum,
+ we must determine whether registers are "base" or "index" regs.
+ If there is a sum of two registers, we must choose one to be
+ the "base". Luckily, we can use the REGNO_POINTER_FLAG
+ to make a good choice most of the time. We only need to do this
+ on machines that can have two registers in an address and where
+ the base and index register classes are different.
+
+ ??? This code used to set REGNO_POINTER_FLAG in some cases, but
+ that seems bogus since it should only be set when we are sure
+ the register is being used as a pointer. */
+
+ {
+ rtx arg0 = XEXP (x, 0);
+ rtx arg1 = XEXP (x, 1);
+ register enum rtx_code code0 = GET_CODE (arg0);
+ register enum rtx_code code1 = GET_CODE (arg1);
+
+ /* Look inside subregs. */
+ if (code0 == SUBREG)
+ arg0 = SUBREG_REG (arg0), code0 = GET_CODE (arg0);
+ if (code1 == SUBREG)
+ arg1 = SUBREG_REG (arg1), code1 = GET_CODE (arg1);
+
+ /* If this machine only allows one register per address, it must
+ be in the first operand. */
+
+ if (MAX_REGS_PER_ADDRESS == 1)
+ record_address_regs (arg0, class, scale);
+
+ /* If index and base registers are the same on this machine, just
+ record registers in any non-constant operands. We assume here,
+ as well as in the tests below, that all addresses are in
+ canonical form. */
+
+ else if (INDEX_REG_CLASS == BASE_REG_CLASS)
+ {
+ record_address_regs (arg0, class, scale);
+ if (! CONSTANT_P (arg1))
+ record_address_regs (arg1, class, scale);
+ }
+
+ /* If the second operand is a constant integer, it doesn't change
+ what class the first operand must be. */
+
+ else if (code1 == CONST_INT || code1 == CONST_DOUBLE)
+ record_address_regs (arg0, class, scale);
+
+ /* If the second operand is a symbolic constant, the first operand
+ must be an index register. */
+
+ else if (code1 == SYMBOL_REF || code1 == CONST || code1 == LABEL_REF)
+ record_address_regs (arg0, INDEX_REG_CLASS, scale);
+
+ /* If this the sum of two registers where the first is known to be a
+ pointer, it must be a base register with the second an index. */
+
+ else if (code0 == REG && code1 == REG
+ && REGNO_POINTER_FLAG (REGNO (arg0)))
+ {
+ record_address_regs (arg0, BASE_REG_CLASS, scale);
+ record_address_regs (arg1, INDEX_REG_CLASS, scale);
+ }
+
+ /* If this is the sum of two registers and neither is known to
+ be a pointer, count equal chances that each might be a base
+ or index register. This case should be rare. */
+
+ else if (code0 == REG && code1 == REG
+ && ! REGNO_POINTER_FLAG (REGNO (arg0))
+ && ! REGNO_POINTER_FLAG (REGNO (arg1)))
+ {
+ record_address_regs (arg0, BASE_REG_CLASS, scale / 2);
+ record_address_regs (arg0, INDEX_REG_CLASS, scale / 2);
+ record_address_regs (arg1, BASE_REG_CLASS, scale / 2);
+ record_address_regs (arg1, INDEX_REG_CLASS, scale / 2);
+ }
+
+ /* In all other cases, the first operand is an index and the
+ second is the base. */
+
+ else
+ {
+ record_address_regs (arg0, INDEX_REG_CLASS, scale);
+ record_address_regs (arg1, BASE_REG_CLASS, scale);
+ }
+ }
+ break;
+
+ case POST_INC:
+ case PRE_INC:
+ case POST_DEC:
+ case PRE_DEC:
+ /* Double the importance of a pseudo register that is incremented
+ or decremented, since it would take two extra insns
+ if it ends up in the wrong place. If the operand is a pseudo,
+ show it is being used in an INC_DEC context. */
+
+#ifdef FORBIDDEN_INC_DEC_CLASSES
+ if (GET_CODE (XEXP (x, 0)) == REG
+ && REGNO (XEXP (x, 0)) >= FIRST_PSEUDO_REGISTER)
+ in_inc_dec[REGNO (XEXP (x, 0))] = 1;
+#endif
+
+ record_address_regs (XEXP (x, 0), class, 2 * scale);
+ break;
+
+ case REG:
+ {
+ register struct costs *pp = &costs[REGNO (x)];
+ register int i;
+
+ pp->mem_cost += (MEMORY_MOVE_COST (Pmode) * scale) / 2;
+
+ for (i = 0; i < N_REG_CLASSES; i++)
+ pp->cost[i] += (may_move_cost[i][(int) class] * scale) / 2;
+ }
+ break;
+
+ default:
+ {
+ register char *fmt = GET_RTX_FORMAT (code);
+ register int i;
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ if (fmt[i] == 'e')
+ record_address_regs (XEXP (x, i), class, scale);
+ }
+ }
+}
+
+#ifdef FORBIDDEN_INC_DEC_CLASSES
+
+/* Return 1 if REG is valid as an auto-increment memory reference
+ to an object of MODE. */
+
+static
+auto_inc_dec_reg_p (reg, mode)
+ rtx reg;
+ enum machine_mode mode;
+{
+#ifdef HAVE_POST_INCREMENT
+ if (memory_address_p (mode, gen_rtx (POST_INC, Pmode, reg)))
+ return 1;
+#endif
+
+#ifdef HAVE_POST_DECREMENT
+ if (memory_address_p (mode, gen_rtx (POST_DEC, Pmode, reg)))
+ return 1;
+#endif
+
+#ifdef HAVE_PRE_INCREMENT
+ if (memory_address_p (mode, gen_rtx (PRE_INC, Pmode, reg)))
+ return 1;
+#endif
+
+#ifdef HAVE_PRE_DECREMENT
+ if (memory_address_p (mode, gen_rtx (PRE_DEC, Pmode, reg)))
+ return 1;
+#endif
+
+ return 0;
+}
+#endif
+
+#endif /* REGISTER_CONSTRAINTS */
+
+/* This is the `regscan' pass of the compiler, run just before cse
+ and again just before loop.
+
+ It finds the first and last use of each pseudo-register
+ and records them in the vectors regno_first_uid, regno_last_uid
+ and counts the number of sets in the vector reg_n_sets.
+
+ REPEAT is nonzero the second time this is called. */
+
+/* Indexed by pseudo register number, gives uid of first insn using the reg
+ (as of the time reg_scan is called). */
+
+int *regno_first_uid;
+
+/* Indexed by pseudo register number, gives uid of last insn using the reg
+ (as of the time reg_scan is called). */
+
+int *regno_last_uid;
+
+/* Indexed by pseudo register number, gives uid of last insn using the reg
+ or mentioning it in a note (as of the time reg_scan is called). */
+
+int *regno_last_note_uid;
+
+/* Record the number of registers we used when we allocated the above two
+ tables. If we are called again with more than this, we must re-allocate
+ the tables. */
+
+static int highest_regno_in_uid_map;
+
+/* Maximum number of parallel sets and clobbers in any insn in this fn.
+ Always at least 3, since the combiner could put that many togetherm
+ and we want this to remain correct for all the remaining passes. */
+
+int max_parallel;
+
+void
+reg_scan (f, nregs, repeat)
+ rtx f;
+ int nregs;
+ int repeat;
+{
+ register rtx insn;
+
+ if (!repeat || nregs > highest_regno_in_uid_map)
+ {
+ /* Leave some spare space in case more regs are allocated. */
+ highest_regno_in_uid_map = nregs + nregs / 20;
+ regno_first_uid
+ = (int *) oballoc (highest_regno_in_uid_map * sizeof (int));
+ regno_last_uid
+ = (int *) oballoc (highest_regno_in_uid_map * sizeof (int));
+ regno_last_note_uid
+ = (int *) oballoc (highest_regno_in_uid_map * sizeof (int));
+ reg_n_sets
+ = (short *) oballoc (highest_regno_in_uid_map * sizeof (short));
+ }
+
+ bzero ((char *) regno_first_uid, highest_regno_in_uid_map * sizeof (int));
+ bzero ((char *) regno_last_uid, highest_regno_in_uid_map * sizeof (int));
+ bzero ((char *) regno_last_note_uid,
+ highest_regno_in_uid_map * sizeof (int));
+ bzero ((char *) reg_n_sets, highest_regno_in_uid_map * sizeof (short));
+
+ max_parallel = 3;
+
+ for (insn = f; insn; insn = NEXT_INSN (insn))
+ if (GET_CODE (insn) == INSN
+ || GET_CODE (insn) == CALL_INSN
+ || GET_CODE (insn) == JUMP_INSN)
+ {
+ if (GET_CODE (PATTERN (insn)) == PARALLEL
+ && XVECLEN (PATTERN (insn), 0) > max_parallel)
+ max_parallel = XVECLEN (PATTERN (insn), 0);
+ reg_scan_mark_refs (PATTERN (insn), insn, 0);
+
+ if (REG_NOTES (insn))
+ reg_scan_mark_refs (REG_NOTES (insn), insn, 1);
+ }
+}
+
+/* X is the expression to scan. INSN is the insn it appears in.
+ NOTE_FLAG is nonzero if X is from INSN's notes rather than its body. */
+
+static void
+reg_scan_mark_refs (x, insn, note_flag)
+ rtx x;
+ rtx insn;
+ int note_flag;
+{
+ register enum rtx_code code = GET_CODE (x);
+ register rtx dest;
+ register rtx note;
+
+ switch (code)
+ {
+ case CONST_INT:
+ case CONST:
+ case CONST_DOUBLE:
+ case CC0:
+ case PC:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ case ADDR_VEC:
+ case ADDR_DIFF_VEC:
+ return;
+
+ case REG:
+ {
+ register int regno = REGNO (x);
+
+ regno_last_note_uid[regno] = INSN_UID (insn);
+ if (!note_flag)
+ regno_last_uid[regno] = INSN_UID (insn);
+ if (regno_first_uid[regno] == 0)
+ regno_first_uid[regno] = INSN_UID (insn);
+ }
+ break;
+
+ case EXPR_LIST:
+ if (XEXP (x, 0))
+ reg_scan_mark_refs (XEXP (x, 0), insn, note_flag);
+ if (XEXP (x, 1))
+ reg_scan_mark_refs (XEXP (x, 1), insn, note_flag);
+ break;
+
+ case INSN_LIST:
+ if (XEXP (x, 1))
+ reg_scan_mark_refs (XEXP (x, 1), insn, note_flag);
+ break;
+
+ case SET:
+ /* Count a set of the destination if it is a register. */
+ for (dest = SET_DEST (x);
+ GET_CODE (dest) == SUBREG || GET_CODE (dest) == STRICT_LOW_PART
+ || GET_CODE (dest) == ZERO_EXTEND;
+ dest = XEXP (dest, 0))
+ ;
+
+ if (GET_CODE (dest) == REG)
+ reg_n_sets[REGNO (dest)]++;
+
+ /* If this is setting a pseudo from another pseudo or the sum of a
+ pseudo and a constant integer and the other pseudo is known to be
+ a pointer, set the destination to be a pointer as well.
+
+ Likewise if it is setting the destination from an address or from a
+ value equivalent to an address or to the sum of an address and
+ something else.
+
+ But don't do any of this if the pseudo corresponds to a user
+ variable since it should have already been set as a pointer based
+ on the type. */
+
+ if (GET_CODE (SET_DEST (x)) == REG
+ && REGNO (SET_DEST (x)) >= FIRST_PSEUDO_REGISTER
+ && ! REG_USERVAR_P (SET_DEST (x))
+ && ! REGNO_POINTER_FLAG (REGNO (SET_DEST (x)))
+ && ((GET_CODE (SET_SRC (x)) == REG
+ && REGNO_POINTER_FLAG (REGNO (SET_SRC (x))))
+ || ((GET_CODE (SET_SRC (x)) == PLUS
+ || GET_CODE (SET_SRC (x)) == LO_SUM)
+ && GET_CODE (XEXP (SET_SRC (x), 1)) == CONST_INT
+ && GET_CODE (XEXP (SET_SRC (x), 0)) == REG
+ && REGNO_POINTER_FLAG (REGNO (XEXP (SET_SRC (x), 0))))
+ || GET_CODE (SET_SRC (x)) == CONST
+ || GET_CODE (SET_SRC (x)) == SYMBOL_REF
+ || GET_CODE (SET_SRC (x)) == LABEL_REF
+ || (GET_CODE (SET_SRC (x)) == HIGH
+ && (GET_CODE (XEXP (SET_SRC (x), 0)) == CONST
+ || GET_CODE (XEXP (SET_SRC (x), 0)) == SYMBOL_REF
+ || GET_CODE (XEXP (SET_SRC (x), 0)) == LABEL_REF))
+ || ((GET_CODE (SET_SRC (x)) == PLUS
+ || GET_CODE (SET_SRC (x)) == LO_SUM)
+ && (GET_CODE (XEXP (SET_SRC (x), 1)) == CONST
+ || GET_CODE (XEXP (SET_SRC (x), 1)) == SYMBOL_REF
+ || GET_CODE (XEXP (SET_SRC (x), 1)) == LABEL_REF))
+ || ((note = find_reg_note (insn, REG_EQUAL, 0)) != 0
+ && (GET_CODE (XEXP (note, 0)) == CONST
+ || GET_CODE (XEXP (note, 0)) == SYMBOL_REF
+ || GET_CODE (XEXP (note, 0)) == LABEL_REF))))
+ REGNO_POINTER_FLAG (REGNO (SET_DEST (x))) = 1;
+
+ /* ... fall through ... */
+
+ default:
+ {
+ register char *fmt = GET_RTX_FORMAT (code);
+ register int i;
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ reg_scan_mark_refs (XEXP (x, i), insn, note_flag);
+ else if (fmt[i] == 'E' && XVEC (x, i) != 0)
+ {
+ register int j;
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ reg_scan_mark_refs (XVECEXP (x, i, j), insn, note_flag);
+ }
+ }
+ }
+ }
+}
+
+/* Return nonzero if C1 is a subset of C2, i.e., if every register in C1
+ is also in C2. */
+
+int
+reg_class_subset_p (c1, c2)
+ register enum reg_class c1;
+ register enum reg_class c2;
+{
+ if (c1 == c2) return 1;
+
+ if (c2 == ALL_REGS)
+ win:
+ return 1;
+ GO_IF_HARD_REG_SUBSET (reg_class_contents[(int)c1],
+ reg_class_contents[(int)c2],
+ win);
+ return 0;
+}
+
+/* Return nonzero if there is a register that is in both C1 and C2. */
+
+int
+reg_classes_intersect_p (c1, c2)
+ register enum reg_class c1;
+ register enum reg_class c2;
+{
+#ifdef HARD_REG_SET
+ register
+#endif
+ HARD_REG_SET c;
+
+ if (c1 == c2) return 1;
+
+ if (c1 == ALL_REGS || c2 == ALL_REGS)
+ return 1;
+
+ COPY_HARD_REG_SET (c, reg_class_contents[(int) c1]);
+ AND_HARD_REG_SET (c, reg_class_contents[(int) c2]);
+
+ GO_IF_HARD_REG_SUBSET (c, reg_class_contents[(int) NO_REGS], lose);
+ return 1;
+
+ lose:
+ return 0;
+}
+
diff --git a/gnu/usr.bin/cc/cc_int/reload.c b/gnu/usr.bin/cc/cc_int/reload.c
new file mode 100644
index 0000000..b9a1c27
--- /dev/null
+++ b/gnu/usr.bin/cc/cc_int/reload.c
@@ -0,0 +1,5650 @@
+/* Search an insn for pseudo regs that must be in hard regs and are not.
+ Copyright (C) 1987, 88, 89, 92, 93, 1994 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+/* This file contains subroutines used only from the file reload1.c.
+ It knows how to scan one insn for operands and values
+ that need to be copied into registers to make valid code.
+ It also finds other operands and values which are valid
+ but for which equivalent values in registers exist and
+ ought to be used instead.
+
+ Before processing the first insn of the function, call `init_reload'.
+
+ To scan an insn, call `find_reloads'. This does two things:
+ 1. sets up tables describing which values must be reloaded
+ for this insn, and what kind of hard regs they must be reloaded into;
+ 2. optionally record the locations where those values appear in
+ the data, so they can be replaced properly later.
+ This is done only if the second arg to `find_reloads' is nonzero.
+
+ The third arg to `find_reloads' specifies the number of levels
+ of indirect addressing supported by the machine. If it is zero,
+ indirect addressing is not valid. If it is one, (MEM (REG n))
+ is valid even if (REG n) did not get a hard register; if it is two,
+ (MEM (MEM (REG n))) is also valid even if (REG n) did not get a
+ hard register, and similarly for higher values.
+
+ Then you must choose the hard regs to reload those pseudo regs into,
+ and generate appropriate load insns before this insn and perhaps
+ also store insns after this insn. Set up the array `reload_reg_rtx'
+ to contain the REG rtx's for the registers you used. In some
+ cases `find_reloads' will return a nonzero value in `reload_reg_rtx'
+ for certain reloads. Then that tells you which register to use,
+ so you do not need to allocate one. But you still do need to add extra
+ instructions to copy the value into and out of that register.
+
+ Finally you must call `subst_reloads' to substitute the reload reg rtx's
+ into the locations already recorded.
+
+NOTE SIDE EFFECTS:
+
+ find_reloads can alter the operands of the instruction it is called on.
+
+ 1. Two operands of any sort may be interchanged, if they are in a
+ commutative instruction.
+ This happens only if find_reloads thinks the instruction will compile
+ better that way.
+
+ 2. Pseudo-registers that are equivalent to constants are replaced
+ with those constants if they are not in hard registers.
+
+1 happens every time find_reloads is called.
+2 happens only when REPLACE is 1, which is only when
+actually doing the reloads, not when just counting them.
+
+
+Using a reload register for several reloads in one insn:
+
+When an insn has reloads, it is considered as having three parts:
+the input reloads, the insn itself after reloading, and the output reloads.
+Reloads of values used in memory addresses are often needed for only one part.
+
+When this is so, reload_when_needed records which part needs the reload.
+Two reloads for different parts of the insn can share the same reload
+register.
+
+When a reload is used for addresses in multiple parts, or when it is
+an ordinary operand, it is classified as RELOAD_OTHER, and cannot share
+a register with any other reload. */
+
+#define REG_OK_STRICT
+
+#include "config.h"
+#include "rtl.h"
+#include "insn-config.h"
+#include "insn-codes.h"
+#include "recog.h"
+#include "reload.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "flags.h"
+#include "real.h"
+
+#ifndef REGISTER_MOVE_COST
+#define REGISTER_MOVE_COST(x, y) 2
+#endif
+
+/* The variables set up by `find_reloads' are:
+
+ n_reloads number of distinct reloads needed; max reload # + 1
+ tables indexed by reload number
+ reload_in rtx for value to reload from
+ reload_out rtx for where to store reload-reg afterward if nec
+ (often the same as reload_in)
+ reload_reg_class enum reg_class, saying what regs to reload into
+ reload_inmode enum machine_mode; mode this operand should have
+ when reloaded, on input.
+ reload_outmode enum machine_mode; mode this operand should have
+ when reloaded, on output.
+ reload_optional char, nonzero for an optional reload.
+ Optional reloads are ignored unless the
+ value is already sitting in a register.
+ reload_inc int, positive amount to increment or decrement by if
+ reload_in is a PRE_DEC, PRE_INC, POST_DEC, POST_INC.
+ Ignored otherwise (don't assume it is zero).
+ reload_in_reg rtx. A reg for which reload_in is the equivalent.
+ If reload_in is a symbol_ref which came from
+ reg_equiv_constant, then this is the pseudo
+ which has that symbol_ref as equivalent.
+ reload_reg_rtx rtx. This is the register to reload into.
+ If it is zero when `find_reloads' returns,
+ you must find a suitable register in the class
+ specified by reload_reg_class, and store here
+ an rtx for that register with mode from
+ reload_inmode or reload_outmode.
+ reload_nocombine char, nonzero if this reload shouldn't be
+ combined with another reload.
+ reload_opnum int, operand number being reloaded. This is
+ used to group related reloads and need not always
+ be equal to the actual operand number in the insn,
+ though it current will be; for in-out operands, it
+ is one of the two operand numbers.
+ reload_when_needed enum, classifies reload as needed either for
+ addressing an input reload, addressing an output,
+ for addressing a non-reloaded mem ref,
+ or for unspecified purposes (i.e., more than one
+ of the above).
+ reload_secondary_p int, 1 if this is a secondary register for one
+ or more reloads.
+ reload_secondary_in_reload
+ reload_secondary_out_reload
+ int, gives the reload number of a secondary
+ reload, when needed; otherwise -1
+ reload_secondary_in_icode
+ reload_secondary_out_icode
+ enum insn_code, if a secondary reload is required,
+ gives the INSN_CODE that uses the secondary
+ reload as a scratch register, or CODE_FOR_nothing
+ if the secondary reload register is to be an
+ intermediate register. */
+int n_reloads;
+
+rtx reload_in[MAX_RELOADS];
+rtx reload_out[MAX_RELOADS];
+enum reg_class reload_reg_class[MAX_RELOADS];
+enum machine_mode reload_inmode[MAX_RELOADS];
+enum machine_mode reload_outmode[MAX_RELOADS];
+rtx reload_reg_rtx[MAX_RELOADS];
+char reload_optional[MAX_RELOADS];
+int reload_inc[MAX_RELOADS];
+rtx reload_in_reg[MAX_RELOADS];
+char reload_nocombine[MAX_RELOADS];
+int reload_opnum[MAX_RELOADS];
+enum reload_type reload_when_needed[MAX_RELOADS];
+int reload_secondary_p[MAX_RELOADS];
+int reload_secondary_in_reload[MAX_RELOADS];
+int reload_secondary_out_reload[MAX_RELOADS];
+enum insn_code reload_secondary_in_icode[MAX_RELOADS];
+enum insn_code reload_secondary_out_icode[MAX_RELOADS];
+
+/* All the "earlyclobber" operands of the current insn
+ are recorded here. */
+int n_earlyclobbers;
+rtx reload_earlyclobbers[MAX_RECOG_OPERANDS];
+
+int reload_n_operands;
+
+/* Replacing reloads.
+
+ If `replace_reloads' is nonzero, then as each reload is recorded
+ an entry is made for it in the table `replacements'.
+ Then later `subst_reloads' can look through that table and
+ perform all the replacements needed. */
+
+/* Nonzero means record the places to replace. */
+static int replace_reloads;
+
+/* Each replacement is recorded with a structure like this. */
+struct replacement
+{
+ rtx *where; /* Location to store in */
+ rtx *subreg_loc; /* Location of SUBREG if WHERE is inside
+ a SUBREG; 0 otherwise. */
+ int what; /* which reload this is for */
+ enum machine_mode mode; /* mode it must have */
+};
+
+static struct replacement replacements[MAX_RECOG_OPERANDS * ((MAX_REGS_PER_ADDRESS * 2) + 1)];
+
+/* Number of replacements currently recorded. */
+static int n_replacements;
+
+/* Used to track what is modified by an operand. */
+struct decomposition
+{
+ int reg_flag; /* Nonzero if referencing a register. */
+ int safe; /* Nonzero if this can't conflict with anything. */
+ rtx base; /* Base adddress for MEM. */
+ HOST_WIDE_INT start; /* Starting offset or register number. */
+ HOST_WIDE_INT end; /* Endinf offset or register number. */
+};
+
+/* MEM-rtx's created for pseudo-regs in stack slots not directly addressable;
+ (see reg_equiv_address). */
+static rtx memlocs[MAX_RECOG_OPERANDS * ((MAX_REGS_PER_ADDRESS * 2) + 1)];
+static int n_memlocs;
+
+#ifdef SECONDARY_MEMORY_NEEDED
+
+/* Save MEMs needed to copy from one class of registers to another. One MEM
+ is used per mode, but normally only one or two modes are ever used.
+
+ We keep two versions, before and after register elimination. The one
+ after register elimination is record separately for each operand. This
+ is done in case the address is not valid to be sure that we separately
+ reload each. */
+
+static rtx secondary_memlocs[NUM_MACHINE_MODES];
+static rtx secondary_memlocs_elim[NUM_MACHINE_MODES][MAX_RECOG_OPERANDS];
+#endif
+
+/* The instruction we are doing reloads for;
+ so we can test whether a register dies in it. */
+static rtx this_insn;
+
+/* Nonzero if this instruction is a user-specified asm with operands. */
+static int this_insn_is_asm;
+
+/* If hard_regs_live_known is nonzero,
+ we can tell which hard regs are currently live,
+ at least enough to succeed in choosing dummy reloads. */
+static int hard_regs_live_known;
+
+/* Indexed by hard reg number,
+ element is nonegative if hard reg has been spilled.
+ This vector is passed to `find_reloads' as an argument
+ and is not changed here. */
+static short *static_reload_reg_p;
+
+/* Set to 1 in subst_reg_equivs if it changes anything. */
+static int subst_reg_equivs_changed;
+
+/* On return from push_reload, holds the reload-number for the OUT
+ operand, which can be different for that from the input operand. */
+static int output_reloadnum;
+
+ /* Compare two RTX's. */
+#define MATCHES(x, y) \
+ (x == y || (x != 0 && (GET_CODE (x) == REG \
+ ? GET_CODE (y) == REG && REGNO (x) == REGNO (y) \
+ : rtx_equal_p (x, y) && ! side_effects_p (x))))
+
+ /* Indicates if two reloads purposes are for similar enough things that we
+ can merge their reloads. */
+#define MERGABLE_RELOADS(when1, when2, op1, op2) \
+ ((when1) == RELOAD_OTHER || (when2) == RELOAD_OTHER \
+ || ((when1) == (when2) && (op1) == (op2)) \
+ || ((when1) == RELOAD_FOR_INPUT && (when2) == RELOAD_FOR_INPUT) \
+ || ((when1) == RELOAD_FOR_OPERAND_ADDRESS \
+ && (when2) == RELOAD_FOR_OPERAND_ADDRESS) \
+ || ((when1) == RELOAD_FOR_OTHER_ADDRESS \
+ && (when2) == RELOAD_FOR_OTHER_ADDRESS))
+
+ /* Nonzero if these two reload purposes produce RELOAD_OTHER when merged. */
+#define MERGE_TO_OTHER(when1, when2, op1, op2) \
+ ((when1) != (when2) \
+ || ! ((op1) == (op2) \
+ || (when1) == RELOAD_FOR_INPUT \
+ || (when1) == RELOAD_FOR_OPERAND_ADDRESS \
+ || (when1) == RELOAD_FOR_OTHER_ADDRESS))
+
+static int push_secondary_reload PROTO((int, rtx, int, int, enum reg_class,
+ enum machine_mode, enum reload_type,
+ enum insn_code *));
+static int push_reload PROTO((rtx, rtx, rtx *, rtx *, enum reg_class,
+ enum machine_mode, enum machine_mode,
+ int, int, int, enum reload_type));
+static void push_replacement PROTO((rtx *, int, enum machine_mode));
+static void combine_reloads PROTO((void));
+static rtx find_dummy_reload PROTO((rtx, rtx, rtx *, rtx *,
+ enum machine_mode, enum machine_mode,
+ enum reg_class, int));
+static int earlyclobber_operand_p PROTO((rtx));
+static int hard_reg_set_here_p PROTO((int, int, rtx));
+static struct decomposition decompose PROTO((rtx));
+static int immune_p PROTO((rtx, rtx, struct decomposition));
+static int alternative_allows_memconst PROTO((char *, int));
+static rtx find_reloads_toplev PROTO((rtx, int, enum reload_type, int, int));
+static rtx make_memloc PROTO((rtx, int));
+static int find_reloads_address PROTO((enum machine_mode, rtx *, rtx, rtx *,
+ int, enum reload_type, int));
+static rtx subst_reg_equivs PROTO((rtx));
+static rtx subst_indexed_address PROTO((rtx));
+static int find_reloads_address_1 PROTO((rtx, int, rtx *, int,
+ enum reload_type,int));
+static void find_reloads_address_part PROTO((rtx, rtx *, enum reg_class,
+ enum machine_mode, int,
+ enum reload_type, int));
+static int find_inc_amount PROTO((rtx, rtx));
+
+#ifdef HAVE_SECONDARY_RELOADS
+
+/* Determine if any secondary reloads are needed for loading (if IN_P is
+ non-zero) or storing (if IN_P is zero) X to or from a reload register of
+ register class RELOAD_CLASS in mode RELOAD_MODE. If secondary reloads
+ are needed, push them.
+
+ Return the reload number of the secondary reload we made, or -1 if
+ we didn't need one. *PICODE is set to the insn_code to use if we do
+ need a secondary reload. */
+
+static int
+push_secondary_reload (in_p, x, opnum, optional, reload_class, reload_mode,
+ type, picode)
+ int in_p;
+ rtx x;
+ int opnum;
+ int optional;
+ enum reg_class reload_class;
+ enum machine_mode reload_mode;
+ enum reload_type type;
+ enum insn_code *picode;
+{
+ enum reg_class class = NO_REGS;
+ enum machine_mode mode = reload_mode;
+ enum insn_code icode = CODE_FOR_nothing;
+ enum reg_class t_class = NO_REGS;
+ enum machine_mode t_mode = VOIDmode;
+ enum insn_code t_icode = CODE_FOR_nothing;
+ enum reload_type secondary_type;
+ int i;
+ int s_reload, t_reload = -1;
+
+ if (type == RELOAD_FOR_INPUT_ADDRESS || type == RELOAD_FOR_OUTPUT_ADDRESS)
+ secondary_type = type;
+ else
+ secondary_type = in_p ? RELOAD_FOR_INPUT_ADDRESS : RELOAD_FOR_OUTPUT_ADDRESS;
+
+ *picode = CODE_FOR_nothing;
+
+ /* If X is a pseudo-register that has an equivalent MEM (actually, if it
+ is still a pseudo-register by now, it *must* have an equivalent MEM
+ but we don't want to assume that), use that equivalent when seeing if
+ a secondary reload is needed since whether or not a reload is needed
+ might be sensitive to the form of the MEM. */
+
+ if (GET_CODE (x) == REG && REGNO (x) >= FIRST_PSEUDO_REGISTER
+ && reg_equiv_mem[REGNO (x)] != 0)
+ x = reg_equiv_mem[REGNO (x)];
+
+#ifdef SECONDARY_INPUT_RELOAD_CLASS
+ if (in_p)
+ class = SECONDARY_INPUT_RELOAD_CLASS (reload_class, reload_mode, x);
+#endif
+
+#ifdef SECONDARY_OUTPUT_RELOAD_CLASS
+ if (! in_p)
+ class = SECONDARY_OUTPUT_RELOAD_CLASS (reload_class, reload_mode, x);
+#endif
+
+ /* If we don't need any secondary registers, done. */
+ if (class == NO_REGS)
+ return -1;
+
+ /* Get a possible insn to use. If the predicate doesn't accept X, don't
+ use the insn. */
+
+ icode = (in_p ? reload_in_optab[(int) reload_mode]
+ : reload_out_optab[(int) reload_mode]);
+
+ if (icode != CODE_FOR_nothing
+ && insn_operand_predicate[(int) icode][in_p]
+ && (! (insn_operand_predicate[(int) icode][in_p]) (x, reload_mode)))
+ icode = CODE_FOR_nothing;
+
+ /* If we will be using an insn, see if it can directly handle the reload
+ register we will be using. If it can, the secondary reload is for a
+ scratch register. If it can't, we will use the secondary reload for
+ an intermediate register and require a tertiary reload for the scratch
+ register. */
+
+ if (icode != CODE_FOR_nothing)
+ {
+ /* If IN_P is non-zero, the reload register will be the output in
+ operand 0. If IN_P is zero, the reload register will be the input
+ in operand 1. Outputs should have an initial "=", which we must
+ skip. */
+
+ char insn_letter = insn_operand_constraint[(int) icode][!in_p][in_p];
+ enum reg_class insn_class
+ = (insn_letter == 'r' ? GENERAL_REGS
+ : REG_CLASS_FROM_LETTER (insn_letter));
+
+ if (insn_class == NO_REGS
+ || (in_p && insn_operand_constraint[(int) icode][!in_p][0] != '=')
+ /* The scratch register's constraint must start with "=&". */
+ || insn_operand_constraint[(int) icode][2][0] != '='
+ || insn_operand_constraint[(int) icode][2][1] != '&')
+ abort ();
+
+ if (reg_class_subset_p (reload_class, insn_class))
+ mode = insn_operand_mode[(int) icode][2];
+ else
+ {
+ char t_letter = insn_operand_constraint[(int) icode][2][2];
+ class = insn_class;
+ t_mode = insn_operand_mode[(int) icode][2];
+ t_class = (t_letter == 'r' ? GENERAL_REGS
+ : REG_CLASS_FROM_LETTER (t_letter));
+ t_icode = icode;
+ icode = CODE_FOR_nothing;
+ }
+ }
+
+ /* This case isn't valid, so fail. Reload is allowed to use the same
+ register for RELOAD_FOR_INPUT_ADDRESS and RELOAD_FOR_INPUT reloads, but
+ in the case of a secondary register, we actually need two different
+ registers for correct code. We fail here to prevent the possibility of
+ silently generating incorrect code later.
+
+ The convention is that secondary input reloads are valid only if the
+ secondary_class is different from class. If you have such a case, you
+ can not use secondary reloads, you must work around the problem some
+ other way.
+
+ Allow this when MODE is not reload_mode and assume that the generated
+ code handles this case (it does on the Alpha, which is the only place
+ this currently happens). */
+
+ if (in_p && class == reload_class && mode == reload_mode)
+ abort ();
+
+ /* If we need a tertiary reload, see if we have one we can reuse or else
+ make a new one. */
+
+ if (t_class != NO_REGS)
+ {
+ for (t_reload = 0; t_reload < n_reloads; t_reload++)
+ if (reload_secondary_p[t_reload]
+ && (reg_class_subset_p (t_class, reload_reg_class[t_reload])
+ || reg_class_subset_p (reload_reg_class[t_reload], t_class))
+ && ((in_p && reload_inmode[t_reload] == t_mode)
+ || (! in_p && reload_outmode[t_reload] == t_mode))
+ && ((in_p && (reload_secondary_in_icode[t_reload]
+ == CODE_FOR_nothing))
+ || (! in_p &&(reload_secondary_out_icode[t_reload]
+ == CODE_FOR_nothing)))
+ && (reg_class_size[(int) t_class] == 1
+#ifdef SMALL_REGISTER_CLASSES
+ || 1
+#endif
+ )
+ && MERGABLE_RELOADS (secondary_type,
+ reload_when_needed[t_reload],
+ opnum, reload_opnum[t_reload]))
+ {
+ if (in_p)
+ reload_inmode[t_reload] = t_mode;
+ if (! in_p)
+ reload_outmode[t_reload] = t_mode;
+
+ if (reg_class_subset_p (t_class, reload_reg_class[t_reload]))
+ reload_reg_class[t_reload] = t_class;
+
+ reload_opnum[t_reload] = MIN (reload_opnum[t_reload], opnum);
+ reload_optional[t_reload] &= optional;
+ reload_secondary_p[t_reload] = 1;
+ if (MERGE_TO_OTHER (secondary_type, reload_when_needed[t_reload],
+ opnum, reload_opnum[t_reload]))
+ reload_when_needed[t_reload] = RELOAD_OTHER;
+ }
+
+ if (t_reload == n_reloads)
+ {
+ /* We need to make a new tertiary reload for this register class. */
+ reload_in[t_reload] = reload_out[t_reload] = 0;
+ reload_reg_class[t_reload] = t_class;
+ reload_inmode[t_reload] = in_p ? t_mode : VOIDmode;
+ reload_outmode[t_reload] = ! in_p ? t_mode : VOIDmode;
+ reload_reg_rtx[t_reload] = 0;
+ reload_optional[t_reload] = optional;
+ reload_inc[t_reload] = 0;
+ /* Maybe we could combine these, but it seems too tricky. */
+ reload_nocombine[t_reload] = 1;
+ reload_in_reg[t_reload] = 0;
+ reload_opnum[t_reload] = opnum;
+ reload_when_needed[t_reload] = secondary_type;
+ reload_secondary_in_reload[t_reload] = -1;
+ reload_secondary_out_reload[t_reload] = -1;
+ reload_secondary_in_icode[t_reload] = CODE_FOR_nothing;
+ reload_secondary_out_icode[t_reload] = CODE_FOR_nothing;
+ reload_secondary_p[t_reload] = 1;
+
+ n_reloads++;
+ }
+ }
+
+ /* See if we can reuse an existing secondary reload. */
+ for (s_reload = 0; s_reload < n_reloads; s_reload++)
+ if (reload_secondary_p[s_reload]
+ && (reg_class_subset_p (class, reload_reg_class[s_reload])
+ || reg_class_subset_p (reload_reg_class[s_reload], class))
+ && ((in_p && reload_inmode[s_reload] == mode)
+ || (! in_p && reload_outmode[s_reload] == mode))
+ && ((in_p && reload_secondary_in_reload[s_reload] == t_reload)
+ || (! in_p && reload_secondary_out_reload[s_reload] == t_reload))
+ && ((in_p && reload_secondary_in_icode[s_reload] == t_icode)
+ || (! in_p && reload_secondary_out_icode[s_reload] == t_icode))
+ && (reg_class_size[(int) class] == 1
+#ifdef SMALL_REGISTER_CLASSES
+ || 1
+#endif
+ )
+ && MERGABLE_RELOADS (secondary_type, reload_when_needed[s_reload],
+ opnum, reload_opnum[s_reload]))
+ {
+ if (in_p)
+ reload_inmode[s_reload] = mode;
+ if (! in_p)
+ reload_outmode[s_reload] = mode;
+
+ if (reg_class_subset_p (class, reload_reg_class[s_reload]))
+ reload_reg_class[s_reload] = class;
+
+ reload_opnum[s_reload] = MIN (reload_opnum[s_reload], opnum);
+ reload_optional[s_reload] &= optional;
+ reload_secondary_p[s_reload] = 1;
+ if (MERGE_TO_OTHER (secondary_type, reload_when_needed[s_reload],
+ opnum, reload_opnum[s_reload]))
+ reload_when_needed[s_reload] = RELOAD_OTHER;
+ }
+
+ if (s_reload == n_reloads)
+ {
+ /* We need to make a new secondary reload for this register class. */
+ reload_in[s_reload] = reload_out[s_reload] = 0;
+ reload_reg_class[s_reload] = class;
+
+ reload_inmode[s_reload] = in_p ? mode : VOIDmode;
+ reload_outmode[s_reload] = ! in_p ? mode : VOIDmode;
+ reload_reg_rtx[s_reload] = 0;
+ reload_optional[s_reload] = optional;
+ reload_inc[s_reload] = 0;
+ /* Maybe we could combine these, but it seems too tricky. */
+ reload_nocombine[s_reload] = 1;
+ reload_in_reg[s_reload] = 0;
+ reload_opnum[s_reload] = opnum;
+ reload_when_needed[s_reload] = secondary_type;
+ reload_secondary_in_reload[s_reload] = in_p ? t_reload : -1;
+ reload_secondary_out_reload[s_reload] = ! in_p ? t_reload : -1;
+ reload_secondary_in_icode[s_reload] = in_p ? t_icode : CODE_FOR_nothing;
+ reload_secondary_out_icode[s_reload]
+ = ! in_p ? t_icode : CODE_FOR_nothing;
+ reload_secondary_p[s_reload] = 1;
+
+ n_reloads++;
+
+#ifdef SECONDARY_MEMORY_NEEDED
+ /* If we need a memory location to copy between the two reload regs,
+ set it up now. */
+
+ if (in_p && icode == CODE_FOR_nothing
+ && SECONDARY_MEMORY_NEEDED (class, reload_class, reload_mode))
+ get_secondary_mem (x, reload_mode, opnum, type);
+
+ if (! in_p && icode == CODE_FOR_nothing
+ && SECONDARY_MEMORY_NEEDED (reload_class, class, reload_mode))
+ get_secondary_mem (x, reload_mode, opnum, type);
+#endif
+ }
+
+ *picode = icode;
+ return s_reload;
+}
+#endif /* HAVE_SECONDARY_RELOADS */
+
+#ifdef SECONDARY_MEMORY_NEEDED
+
+/* Return a memory location that will be used to copy X in mode MODE.
+ If we haven't already made a location for this mode in this insn,
+ call find_reloads_address on the location being returned. */
+
+rtx
+get_secondary_mem (x, mode, opnum, type)
+ rtx x;
+ enum machine_mode mode;
+ int opnum;
+ enum reload_type type;
+{
+ rtx loc;
+ int mem_valid;
+
+ /* By default, if MODE is narrower than a word, widen it to a word.
+ This is required because most machines that require these memory
+ locations do not support short load and stores from all registers
+ (e.g., FP registers). */
+
+#ifdef SECONDARY_MEMORY_NEEDED_MODE
+ mode = SECONDARY_MEMORY_NEEDED_MODE (mode);
+#else
+ if (GET_MODE_BITSIZE (mode) < BITS_PER_WORD)
+ mode = mode_for_size (BITS_PER_WORD, GET_MODE_CLASS (mode), 0);
+#endif
+
+ /* If we already have made a MEM for this operand in MODE, return it. */
+ if (secondary_memlocs_elim[(int) mode][opnum] != 0)
+ return secondary_memlocs_elim[(int) mode][opnum];
+
+ /* If this is the first time we've tried to get a MEM for this mode,
+ allocate a new one. `something_changed' in reload will get set
+ by noticing that the frame size has changed. */
+
+ if (secondary_memlocs[(int) mode] == 0)
+ {
+#ifdef SECONDARY_MEMORY_NEEDED_RTX
+ secondary_memlocs[(int) mode] = SECONDARY_MEMORY_NEEDED_RTX (mode);
+#else
+ secondary_memlocs[(int) mode]
+ = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
+#endif
+ }
+
+ /* Get a version of the address doing any eliminations needed. If that
+ didn't give us a new MEM, make a new one if it isn't valid. */
+
+ loc = eliminate_regs (secondary_memlocs[(int) mode], VOIDmode, NULL_RTX);
+ mem_valid = strict_memory_address_p (mode, XEXP (loc, 0));
+
+ if (! mem_valid && loc == secondary_memlocs[(int) mode])
+ loc = copy_rtx (loc);
+
+ /* The only time the call below will do anything is if the stack
+ offset is too large. In that case IND_LEVELS doesn't matter, so we
+ can just pass a zero. Adjust the type to be the address of the
+ corresponding object. If the address was valid, save the eliminated
+ address. If it wasn't valid, we need to make a reload each time, so
+ don't save it. */
+
+ if (! mem_valid)
+ {
+ type = (type == RELOAD_FOR_INPUT ? RELOAD_FOR_INPUT_ADDRESS
+ : type == RELOAD_FOR_OUTPUT ? RELOAD_FOR_OUTPUT_ADDRESS
+ : RELOAD_OTHER);
+
+ find_reloads_address (mode, NULL_PTR, XEXP (loc, 0), &XEXP (loc, 0),
+ opnum, type, 0);
+ }
+
+ secondary_memlocs_elim[(int) mode][opnum] = loc;
+ return loc;
+}
+
+/* Clear any secondary memory locations we've made. */
+
+void
+clear_secondary_mem ()
+{
+ bzero ((char *) secondary_memlocs, sizeof secondary_memlocs);
+}
+#endif /* SECONDARY_MEMORY_NEEDED */
+
+/* Record one reload that needs to be performed.
+ IN is an rtx saying where the data are to be found before this instruction.
+ OUT says where they must be stored after the instruction.
+ (IN is zero for data not read, and OUT is zero for data not written.)
+ INLOC and OUTLOC point to the places in the instructions where
+ IN and OUT were found.
+ If IN and OUT are both non-zero, it means the same register must be used
+ to reload both IN and OUT.
+
+ CLASS is a register class required for the reloaded data.
+ INMODE is the machine mode that the instruction requires
+ for the reg that replaces IN and OUTMODE is likewise for OUT.
+
+ If IN is zero, then OUT's location and mode should be passed as
+ INLOC and INMODE.
+
+ STRICT_LOW is the 1 if there is a containing STRICT_LOW_PART rtx.
+
+ OPTIONAL nonzero means this reload does not need to be performed:
+ it can be discarded if that is more convenient.
+
+ OPNUM and TYPE say what the purpose of this reload is.
+
+ The return value is the reload-number for this reload.
+
+ If both IN and OUT are nonzero, in some rare cases we might
+ want to make two separate reloads. (Actually we never do this now.)
+ Therefore, the reload-number for OUT is stored in
+ output_reloadnum when we return; the return value applies to IN.
+ Usually (presently always), when IN and OUT are nonzero,
+ the two reload-numbers are equal, but the caller should be careful to
+ distinguish them. */
+
+static int
+push_reload (in, out, inloc, outloc, class,
+ inmode, outmode, strict_low, optional, opnum, type)
+ register rtx in, out;
+ rtx *inloc, *outloc;
+ enum reg_class class;
+ enum machine_mode inmode, outmode;
+ int strict_low;
+ int optional;
+ int opnum;
+ enum reload_type type;
+{
+ register int i;
+ int dont_share = 0;
+ rtx *in_subreg_loc = 0, *out_subreg_loc = 0;
+ int secondary_in_reload = -1, secondary_out_reload = -1;
+ enum insn_code secondary_in_icode, secondary_out_icode;
+
+ /* INMODE and/or OUTMODE could be VOIDmode if no mode
+ has been specified for the operand. In that case,
+ use the operand's mode as the mode to reload. */
+ if (inmode == VOIDmode && in != 0)
+ inmode = GET_MODE (in);
+ if (outmode == VOIDmode && out != 0)
+ outmode = GET_MODE (out);
+
+ /* If IN is a pseudo register everywhere-equivalent to a constant, and
+ it is not in a hard register, reload straight from the constant,
+ since we want to get rid of such pseudo registers.
+ Often this is done earlier, but not always in find_reloads_address. */
+ if (in != 0 && GET_CODE (in) == REG)
+ {
+ register int regno = REGNO (in);
+
+ if (regno >= FIRST_PSEUDO_REGISTER && reg_renumber[regno] < 0
+ && reg_equiv_constant[regno] != 0)
+ in = reg_equiv_constant[regno];
+ }
+
+ /* Likewise for OUT. Of course, OUT will never be equivalent to
+ an actual constant, but it might be equivalent to a memory location
+ (in the case of a parameter). */
+ if (out != 0 && GET_CODE (out) == REG)
+ {
+ register int regno = REGNO (out);
+
+ if (regno >= FIRST_PSEUDO_REGISTER && reg_renumber[regno] < 0
+ && reg_equiv_constant[regno] != 0)
+ out = reg_equiv_constant[regno];
+ }
+
+ /* If we have a read-write operand with an address side-effect,
+ change either IN or OUT so the side-effect happens only once. */
+ if (in != 0 && out != 0 && GET_CODE (in) == MEM && rtx_equal_p (in, out))
+ {
+ if (GET_CODE (XEXP (in, 0)) == POST_INC
+ || GET_CODE (XEXP (in, 0)) == POST_DEC)
+ in = gen_rtx (MEM, GET_MODE (in), XEXP (XEXP (in, 0), 0));
+ if (GET_CODE (XEXP (in, 0)) == PRE_INC
+ || GET_CODE (XEXP (in, 0)) == PRE_DEC)
+ out = gen_rtx (MEM, GET_MODE (out), XEXP (XEXP (out, 0), 0));
+ }
+
+ /* If we are reloading a (SUBREG constant ...), really reload just the
+ inside expression in its own mode. Similarly for (SUBREG (PLUS ...)).
+ If we have (SUBREG:M1 (MEM:M2 ...) ...) (or an inner REG that is still
+ a pseudo and hence will become a MEM) with M1 wider than M2 and the
+ register is a pseudo, also reload the inside expression.
+ For machines that extend byte loads, do this for any SUBREG of a pseudo
+ where both M1 and M2 are a word or smaller unless they are the same
+ size.
+ Similar issue for (SUBREG:M1 (REG:M2 ...) ...) for a hard register R where
+ either M1 is not valid for R or M2 is wider than a word but we only
+ need one word to store an M2-sized quantity in R.
+ (However, if OUT is nonzero, we need to reload the reg *and*
+ the subreg, so do nothing here, and let following statement handle it.)
+
+ Note that the case of (SUBREG (CONST_INT...)...) is handled elsewhere;
+ we can't handle it here because CONST_INT does not indicate a mode.
+
+ Similarly, we must reload the inside expression if we have a
+ STRICT_LOW_PART (presumably, in == out in the cas).
+
+ Also reload the inner expression if it does not require a secondary
+ reload but the SUBREG does. */
+
+ if (in != 0 && GET_CODE (in) == SUBREG
+ && (CONSTANT_P (SUBREG_REG (in))
+ || GET_CODE (SUBREG_REG (in)) == PLUS
+ || strict_low
+ || (((GET_CODE (SUBREG_REG (in)) == REG
+ && REGNO (SUBREG_REG (in)) >= FIRST_PSEUDO_REGISTER)
+ || GET_CODE (SUBREG_REG (in)) == MEM)
+ && ((GET_MODE_SIZE (inmode)
+ > GET_MODE_SIZE (GET_MODE (SUBREG_REG (in))))
+#ifdef LOAD_EXTEND_OP
+ || (GET_MODE_SIZE (inmode) <= UNITS_PER_WORD
+ && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (in)))
+ <= UNITS_PER_WORD)
+ && (GET_MODE_SIZE (inmode)
+ != GET_MODE_SIZE (GET_MODE (SUBREG_REG (in)))))
+#endif
+ ))
+ || (GET_CODE (SUBREG_REG (in)) == REG
+ && REGNO (SUBREG_REG (in)) < FIRST_PSEUDO_REGISTER
+ /* The case where out is nonzero
+ is handled differently in the following statement. */
+ && (out == 0 || SUBREG_WORD (in) == 0)
+ && ((GET_MODE_SIZE (inmode) <= UNITS_PER_WORD
+ && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (in)))
+ > UNITS_PER_WORD)
+ && ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (in)))
+ / UNITS_PER_WORD)
+ != HARD_REGNO_NREGS (REGNO (SUBREG_REG (in)),
+ GET_MODE (SUBREG_REG (in)))))
+ || ! HARD_REGNO_MODE_OK ((REGNO (SUBREG_REG (in))
+ + SUBREG_WORD (in)),
+ inmode)))
+#ifdef SECONDARY_INPUT_RELOAD_CLASS
+ || (SECONDARY_INPUT_RELOAD_CLASS (class, inmode, in) != NO_REGS
+ && (SECONDARY_INPUT_RELOAD_CLASS (class,
+ GET_MODE (SUBREG_REG (in)),
+ SUBREG_REG (in))
+ == NO_REGS))
+#endif
+ ))
+ {
+ in_subreg_loc = inloc;
+ inloc = &SUBREG_REG (in);
+ in = *inloc;
+#ifndef LOAD_EXTEND_OP
+ if (GET_CODE (in) == MEM)
+ /* This is supposed to happen only for paradoxical subregs made by
+ combine.c. (SUBREG (MEM)) isn't supposed to occur other ways. */
+ if (GET_MODE_SIZE (GET_MODE (in)) > GET_MODE_SIZE (inmode))
+ abort ();
+#endif
+ inmode = GET_MODE (in);
+ }
+
+ /* Similar issue for (SUBREG:M1 (REG:M2 ...) ...) for a hard register R where
+ either M1 is not valid for R or M2 is wider than a word but we only
+ need one word to store an M2-sized quantity in R.
+
+ However, we must reload the inner reg *as well as* the subreg in
+ that case. */
+
+ if (in != 0 && GET_CODE (in) == SUBREG
+ && GET_CODE (SUBREG_REG (in)) == REG
+ && REGNO (SUBREG_REG (in)) < FIRST_PSEUDO_REGISTER
+ && (! HARD_REGNO_MODE_OK (REGNO (SUBREG_REG (in)), inmode)
+ || (GET_MODE_SIZE (inmode) <= UNITS_PER_WORD
+ && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (in)))
+ > UNITS_PER_WORD)
+ && ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (in)))
+ / UNITS_PER_WORD)
+ != HARD_REGNO_NREGS (REGNO (SUBREG_REG (in)),
+ GET_MODE (SUBREG_REG (in)))))))
+ {
+ push_reload (SUBREG_REG (in), NULL_RTX, &SUBREG_REG (in), NULL_PTR,
+ GENERAL_REGS, VOIDmode, VOIDmode, 0, 0, opnum, type);
+ }
+
+
+ /* Similarly for paradoxical and problematical SUBREGs on the output.
+ Note that there is no reason we need worry about the previous value
+ of SUBREG_REG (out); even if wider than out,
+ storing in a subreg is entitled to clobber it all
+ (except in the case of STRICT_LOW_PART,
+ and in that case the constraint should label it input-output.) */
+ if (out != 0 && GET_CODE (out) == SUBREG
+ && (CONSTANT_P (SUBREG_REG (out))
+ || strict_low
+ || (((GET_CODE (SUBREG_REG (out)) == REG
+ && REGNO (SUBREG_REG (out)) >= FIRST_PSEUDO_REGISTER)
+ || GET_CODE (SUBREG_REG (out)) == MEM)
+ && ((GET_MODE_SIZE (outmode)
+ > GET_MODE_SIZE (GET_MODE (SUBREG_REG (out))))
+#ifdef LOAD_EXTEND_OP
+ || (GET_MODE_SIZE (outmode) <= UNITS_PER_WORD
+ && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (out)))
+ <= UNITS_PER_WORD)
+ && (GET_MODE_SIZE (outmode)
+ != GET_MODE_SIZE (GET_MODE (SUBREG_REG (out)))))
+#endif
+ ))
+ || (GET_CODE (SUBREG_REG (out)) == REG
+ && REGNO (SUBREG_REG (out)) < FIRST_PSEUDO_REGISTER
+ && ((GET_MODE_SIZE (outmode) <= UNITS_PER_WORD
+ && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (out)))
+ > UNITS_PER_WORD)
+ && ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (out)))
+ / UNITS_PER_WORD)
+ != HARD_REGNO_NREGS (REGNO (SUBREG_REG (out)),
+ GET_MODE (SUBREG_REG (out)))))
+ || ! HARD_REGNO_MODE_OK ((REGNO (SUBREG_REG (out))
+ + SUBREG_WORD (out)),
+ outmode)))
+#ifdef SECONDARY_OUTPUT_RELOAD_CLASS
+ || (SECONDARY_OUTPUT_RELOAD_CLASS (class, outmode, out) != NO_REGS
+ && (SECONDARY_OUTPUT_RELOAD_CLASS (class,
+ GET_MODE (SUBREG_REG (out)),
+ SUBREG_REG (out))
+ == NO_REGS))
+#endif
+ ))
+ {
+ out_subreg_loc = outloc;
+ outloc = &SUBREG_REG (out);
+ out = *outloc;
+#ifndef LOAD_EXTEND_OP
+ if (GET_CODE (out) == MEM
+ && GET_MODE_SIZE (GET_MODE (out)) > GET_MODE_SIZE (outmode))
+ abort ();
+#endif
+ outmode = GET_MODE (out);
+ }
+
+ /* If IN appears in OUT, we can't share any input-only reload for IN. */
+ if (in != 0 && out != 0 && GET_CODE (out) == MEM
+ && (GET_CODE (in) == REG || GET_CODE (in) == MEM)
+ && reg_overlap_mentioned_for_reload_p (in, XEXP (out, 0)))
+ dont_share = 1;
+
+ /* If IN is a SUBREG of a hard register, make a new REG. This
+ simplifies some of the cases below. */
+
+ if (in != 0 && GET_CODE (in) == SUBREG && GET_CODE (SUBREG_REG (in)) == REG
+ && REGNO (SUBREG_REG (in)) < FIRST_PSEUDO_REGISTER)
+ in = gen_rtx (REG, GET_MODE (in),
+ REGNO (SUBREG_REG (in)) + SUBREG_WORD (in));
+
+ /* Similarly for OUT. */
+ if (out != 0 && GET_CODE (out) == SUBREG
+ && GET_CODE (SUBREG_REG (out)) == REG
+ && REGNO (SUBREG_REG (out)) < FIRST_PSEUDO_REGISTER)
+ out = gen_rtx (REG, GET_MODE (out),
+ REGNO (SUBREG_REG (out)) + SUBREG_WORD (out));
+
+ /* Narrow down the class of register wanted if that is
+ desirable on this machine for efficiency. */
+ if (in != 0)
+ class = PREFERRED_RELOAD_CLASS (in, class);
+
+ /* Output reloads may need analogous treatment, different in detail. */
+#ifdef PREFERRED_OUTPUT_RELOAD_CLASS
+ if (out != 0)
+ class = PREFERRED_OUTPUT_RELOAD_CLASS (out, class);
+#endif
+
+ /* Make sure we use a class that can handle the actual pseudo
+ inside any subreg. For example, on the 386, QImode regs
+ can appear within SImode subregs. Although GENERAL_REGS
+ can handle SImode, QImode needs a smaller class. */
+#ifdef LIMIT_RELOAD_CLASS
+ if (in_subreg_loc)
+ class = LIMIT_RELOAD_CLASS (inmode, class);
+ else if (in != 0 && GET_CODE (in) == SUBREG)
+ class = LIMIT_RELOAD_CLASS (GET_MODE (SUBREG_REG (in)), class);
+
+ if (out_subreg_loc)
+ class = LIMIT_RELOAD_CLASS (outmode, class);
+ if (out != 0 && GET_CODE (out) == SUBREG)
+ class = LIMIT_RELOAD_CLASS (GET_MODE (SUBREG_REG (out)), class);
+#endif
+
+ /* Verify that this class is at least possible for the mode that
+ is specified. */
+ if (this_insn_is_asm)
+ {
+ enum machine_mode mode;
+ if (GET_MODE_SIZE (inmode) > GET_MODE_SIZE (outmode))
+ mode = inmode;
+ else
+ mode = outmode;
+ if (mode == VOIDmode)
+ {
+ error_for_asm (this_insn, "cannot reload integer constant operand in `asm'");
+ mode = word_mode;
+ if (in != 0)
+ inmode = word_mode;
+ if (out != 0)
+ outmode = word_mode;
+ }
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (HARD_REGNO_MODE_OK (i, mode)
+ && TEST_HARD_REG_BIT (reg_class_contents[(int) class], i))
+ {
+ int nregs = HARD_REGNO_NREGS (i, mode);
+
+ int j;
+ for (j = 1; j < nregs; j++)
+ if (! TEST_HARD_REG_BIT (reg_class_contents[(int) class], i + j))
+ break;
+ if (j == nregs)
+ break;
+ }
+ if (i == FIRST_PSEUDO_REGISTER)
+ {
+ error_for_asm (this_insn, "impossible register constraint in `asm'");
+ class = ALL_REGS;
+ }
+ }
+
+ if (class == NO_REGS)
+ abort ();
+
+ /* We can use an existing reload if the class is right
+ and at least one of IN and OUT is a match
+ and the other is at worst neutral.
+ (A zero compared against anything is neutral.)
+
+ If SMALL_REGISTER_CLASSES, don't use existing reloads unless they are
+ for the same thing since that can cause us to need more reload registers
+ than we otherwise would. */
+
+ for (i = 0; i < n_reloads; i++)
+ if ((reg_class_subset_p (class, reload_reg_class[i])
+ || reg_class_subset_p (reload_reg_class[i], class))
+ /* If the existing reload has a register, it must fit our class. */
+ && (reload_reg_rtx[i] == 0
+ || TEST_HARD_REG_BIT (reg_class_contents[(int) class],
+ true_regnum (reload_reg_rtx[i])))
+ && ((in != 0 && MATCHES (reload_in[i], in) && ! dont_share
+ && (out == 0 || reload_out[i] == 0 || MATCHES (reload_out[i], out)))
+ ||
+ (out != 0 && MATCHES (reload_out[i], out)
+ && (in == 0 || reload_in[i] == 0 || MATCHES (reload_in[i], in))))
+ && (reg_class_size[(int) class] == 1
+#ifdef SMALL_REGISTER_CLASSES
+ || 1
+#endif
+ )
+ && MERGABLE_RELOADS (type, reload_when_needed[i],
+ opnum, reload_opnum[i]))
+ break;
+
+ /* Reloading a plain reg for input can match a reload to postincrement
+ that reg, since the postincrement's value is the right value.
+ Likewise, it can match a preincrement reload, since we regard
+ the preincrementation as happening before any ref in this insn
+ to that register. */
+ if (i == n_reloads)
+ for (i = 0; i < n_reloads; i++)
+ if ((reg_class_subset_p (class, reload_reg_class[i])
+ || reg_class_subset_p (reload_reg_class[i], class))
+ /* If the existing reload has a register, it must fit our class. */
+ && (reload_reg_rtx[i] == 0
+ || TEST_HARD_REG_BIT (reg_class_contents[(int) class],
+ true_regnum (reload_reg_rtx[i])))
+ && out == 0 && reload_out[i] == 0 && reload_in[i] != 0
+ && ((GET_CODE (in) == REG
+ && (GET_CODE (reload_in[i]) == POST_INC
+ || GET_CODE (reload_in[i]) == POST_DEC
+ || GET_CODE (reload_in[i]) == PRE_INC
+ || GET_CODE (reload_in[i]) == PRE_DEC)
+ && MATCHES (XEXP (reload_in[i], 0), in))
+ ||
+ (GET_CODE (reload_in[i]) == REG
+ && (GET_CODE (in) == POST_INC
+ || GET_CODE (in) == POST_DEC
+ || GET_CODE (in) == PRE_INC
+ || GET_CODE (in) == PRE_DEC)
+ && MATCHES (XEXP (in, 0), reload_in[i])))
+ && (reg_class_size[(int) class] == 1
+#ifdef SMALL_REGISTER_CLASSES
+ || 1
+#endif
+ )
+ && MERGABLE_RELOADS (type, reload_when_needed[i],
+ opnum, reload_opnum[i]))
+ {
+ /* Make sure reload_in ultimately has the increment,
+ not the plain register. */
+ if (GET_CODE (in) == REG)
+ in = reload_in[i];
+ break;
+ }
+
+ if (i == n_reloads)
+ {
+ /* See if we need a secondary reload register to move between CLASS
+ and IN or CLASS and OUT. Get the icode and push any required reloads
+ needed for each of them if so. */
+
+#ifdef SECONDARY_INPUT_RELOAD_CLASS
+ if (in != 0)
+ secondary_in_reload
+ = push_secondary_reload (1, in, opnum, optional, class, inmode, type,
+ &secondary_in_icode);
+#endif
+
+#ifdef SECONDARY_OUTPUT_RELOAD_CLASS
+ if (out != 0 && GET_CODE (out) != SCRATCH)
+ secondary_out_reload
+ = push_secondary_reload (0, out, opnum, optional, class, outmode,
+ type, &secondary_out_icode);
+#endif
+
+ /* We found no existing reload suitable for re-use.
+ So add an additional reload. */
+
+ i = n_reloads;
+ reload_in[i] = in;
+ reload_out[i] = out;
+ reload_reg_class[i] = class;
+ reload_inmode[i] = inmode;
+ reload_outmode[i] = outmode;
+ reload_reg_rtx[i] = 0;
+ reload_optional[i] = optional;
+ reload_inc[i] = 0;
+ reload_nocombine[i] = 0;
+ reload_in_reg[i] = inloc ? *inloc : 0;
+ reload_opnum[i] = opnum;
+ reload_when_needed[i] = type;
+ reload_secondary_in_reload[i] = secondary_in_reload;
+ reload_secondary_out_reload[i] = secondary_out_reload;
+ reload_secondary_in_icode[i] = secondary_in_icode;
+ reload_secondary_out_icode[i] = secondary_out_icode;
+ reload_secondary_p[i] = 0;
+
+ n_reloads++;
+
+#ifdef SECONDARY_MEMORY_NEEDED
+ /* If a memory location is needed for the copy, make one. */
+ if (in != 0 && GET_CODE (in) == REG
+ && REGNO (in) < FIRST_PSEUDO_REGISTER
+ && SECONDARY_MEMORY_NEEDED (REGNO_REG_CLASS (REGNO (in)),
+ class, inmode))
+ get_secondary_mem (in, inmode, opnum, type);
+
+ if (out != 0 && GET_CODE (out) == REG
+ && REGNO (out) < FIRST_PSEUDO_REGISTER
+ && SECONDARY_MEMORY_NEEDED (class, REGNO_REG_CLASS (REGNO (out)),
+ outmode))
+ get_secondary_mem (out, outmode, opnum, type);
+#endif
+ }
+ else
+ {
+ /* We are reusing an existing reload,
+ but we may have additional information for it.
+ For example, we may now have both IN and OUT
+ while the old one may have just one of them. */
+
+ if (inmode != VOIDmode)
+ reload_inmode[i] = inmode;
+ if (outmode != VOIDmode)
+ reload_outmode[i] = outmode;
+ if (in != 0)
+ reload_in[i] = in;
+ if (out != 0)
+ reload_out[i] = out;
+ if (reg_class_subset_p (class, reload_reg_class[i]))
+ reload_reg_class[i] = class;
+ reload_optional[i] &= optional;
+ if (MERGE_TO_OTHER (type, reload_when_needed[i],
+ opnum, reload_opnum[i]))
+ reload_when_needed[i] = RELOAD_OTHER;
+ reload_opnum[i] = MIN (reload_opnum[i], opnum);
+ }
+
+ /* If the ostensible rtx being reload differs from the rtx found
+ in the location to substitute, this reload is not safe to combine
+ because we cannot reliably tell whether it appears in the insn. */
+
+ if (in != 0 && in != *inloc)
+ reload_nocombine[i] = 1;
+
+#if 0
+ /* This was replaced by changes in find_reloads_address_1 and the new
+ function inc_for_reload, which go with a new meaning of reload_inc. */
+
+ /* If this is an IN/OUT reload in an insn that sets the CC,
+ it must be for an autoincrement. It doesn't work to store
+ the incremented value after the insn because that would clobber the CC.
+ So we must do the increment of the value reloaded from,
+ increment it, store it back, then decrement again. */
+ if (out != 0 && sets_cc0_p (PATTERN (this_insn)))
+ {
+ out = 0;
+ reload_out[i] = 0;
+ reload_inc[i] = find_inc_amount (PATTERN (this_insn), in);
+ /* If we did not find a nonzero amount-to-increment-by,
+ that contradicts the belief that IN is being incremented
+ in an address in this insn. */
+ if (reload_inc[i] == 0)
+ abort ();
+ }
+#endif
+
+ /* If we will replace IN and OUT with the reload-reg,
+ record where they are located so that substitution need
+ not do a tree walk. */
+
+ if (replace_reloads)
+ {
+ if (inloc != 0)
+ {
+ register struct replacement *r = &replacements[n_replacements++];
+ r->what = i;
+ r->subreg_loc = in_subreg_loc;
+ r->where = inloc;
+ r->mode = inmode;
+ }
+ if (outloc != 0 && outloc != inloc)
+ {
+ register struct replacement *r = &replacements[n_replacements++];
+ r->what = i;
+ r->where = outloc;
+ r->subreg_loc = out_subreg_loc;
+ r->mode = outmode;
+ }
+ }
+
+ /* If this reload is just being introduced and it has both
+ an incoming quantity and an outgoing quantity that are
+ supposed to be made to match, see if either one of the two
+ can serve as the place to reload into.
+
+ If one of them is acceptable, set reload_reg_rtx[i]
+ to that one. */
+
+ if (in != 0 && out != 0 && in != out && reload_reg_rtx[i] == 0)
+ {
+ reload_reg_rtx[i] = find_dummy_reload (in, out, inloc, outloc,
+ inmode, outmode,
+ reload_reg_class[i], i);
+
+ /* If the outgoing register already contains the same value
+ as the incoming one, we can dispense with loading it.
+ The easiest way to tell the caller that is to give a phony
+ value for the incoming operand (same as outgoing one). */
+ if (reload_reg_rtx[i] == out
+ && (GET_CODE (in) == REG || CONSTANT_P (in))
+ && 0 != find_equiv_reg (in, this_insn, 0, REGNO (out),
+ static_reload_reg_p, i, inmode))
+ reload_in[i] = out;
+ }
+
+ /* If this is an input reload and the operand contains a register that
+ dies in this insn and is used nowhere else, see if it is the right class
+ to be used for this reload. Use it if so. (This occurs most commonly
+ in the case of paradoxical SUBREGs and in-out reloads). We cannot do
+ this if it is also an output reload that mentions the register unless
+ the output is a SUBREG that clobbers an entire register.
+
+ Note that the operand might be one of the spill regs, if it is a
+ pseudo reg and we are in a block where spilling has not taken place.
+ But if there is no spilling in this block, that is OK.
+ An explicitly used hard reg cannot be a spill reg. */
+
+ if (reload_reg_rtx[i] == 0 && in != 0)
+ {
+ rtx note;
+ int regno;
+
+ for (note = REG_NOTES (this_insn); note; note = XEXP (note, 1))
+ if (REG_NOTE_KIND (note) == REG_DEAD
+ && GET_CODE (XEXP (note, 0)) == REG
+ && (regno = REGNO (XEXP (note, 0))) < FIRST_PSEUDO_REGISTER
+ && reg_mentioned_p (XEXP (note, 0), in)
+ && ! refers_to_regno_for_reload_p (regno,
+ (regno
+ + HARD_REGNO_NREGS (regno,
+ inmode)),
+ PATTERN (this_insn), inloc)
+ /* If this is also an output reload, IN cannot be used as
+ the reload register if it is set in this insn unless IN
+ is also OUT. */
+ && (out == 0 || in == out
+ || ! hard_reg_set_here_p (regno,
+ (regno
+ + HARD_REGNO_NREGS (regno,
+ inmode)),
+ PATTERN (this_insn)))
+ /* ??? Why is this code so different from the previous?
+ Is there any simple coherent way to describe the two together?
+ What's going on here. */
+ && (in != out
+ || (GET_CODE (in) == SUBREG
+ && (((GET_MODE_SIZE (GET_MODE (in)) + (UNITS_PER_WORD - 1))
+ / UNITS_PER_WORD)
+ == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (in)))
+ + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD))))
+ /* Make sure the operand fits in the reg that dies. */
+ && GET_MODE_SIZE (inmode) <= GET_MODE_SIZE (GET_MODE (XEXP (note, 0)))
+ && HARD_REGNO_MODE_OK (regno, inmode)
+ && GET_MODE_SIZE (outmode) <= GET_MODE_SIZE (GET_MODE (XEXP (note, 0)))
+ && HARD_REGNO_MODE_OK (regno, outmode)
+ && TEST_HARD_REG_BIT (reg_class_contents[(int) class], regno)
+ && !fixed_regs[regno])
+ {
+ reload_reg_rtx[i] = gen_rtx (REG, inmode, regno);
+ break;
+ }
+ }
+
+ if (out)
+ output_reloadnum = i;
+
+ return i;
+}
+
+/* Record an additional place we must replace a value
+ for which we have already recorded a reload.
+ RELOADNUM is the value returned by push_reload
+ when the reload was recorded.
+ This is used in insn patterns that use match_dup. */
+
+static void
+push_replacement (loc, reloadnum, mode)
+ rtx *loc;
+ int reloadnum;
+ enum machine_mode mode;
+{
+ if (replace_reloads)
+ {
+ register struct replacement *r = &replacements[n_replacements++];
+ r->what = reloadnum;
+ r->where = loc;
+ r->subreg_loc = 0;
+ r->mode = mode;
+ }
+}
+
+/* Transfer all replacements that used to be in reload FROM to be in
+ reload TO. */
+
+void
+transfer_replacements (to, from)
+ int to, from;
+{
+ int i;
+
+ for (i = 0; i < n_replacements; i++)
+ if (replacements[i].what == from)
+ replacements[i].what = to;
+}
+
+/* If there is only one output reload, and it is not for an earlyclobber
+ operand, try to combine it with a (logically unrelated) input reload
+ to reduce the number of reload registers needed.
+
+ This is safe if the input reload does not appear in
+ the value being output-reloaded, because this implies
+ it is not needed any more once the original insn completes.
+
+ If that doesn't work, see we can use any of the registers that
+ die in this insn as a reload register. We can if it is of the right
+ class and does not appear in the value being output-reloaded. */
+
+static void
+combine_reloads ()
+{
+ int i;
+ int output_reload = -1;
+ rtx note;
+
+ /* Find the output reload; return unless there is exactly one
+ and that one is mandatory. */
+
+ for (i = 0; i < n_reloads; i++)
+ if (reload_out[i] != 0)
+ {
+ if (output_reload >= 0)
+ return;
+ output_reload = i;
+ }
+
+ if (output_reload < 0 || reload_optional[output_reload])
+ return;
+
+ /* An input-output reload isn't combinable. */
+
+ if (reload_in[output_reload] != 0)
+ return;
+
+ /* If this reload is for an earlyclobber operand, we can't do anything. */
+ if (earlyclobber_operand_p (reload_out[output_reload]))
+ return;
+
+ /* Check each input reload; can we combine it? */
+
+ for (i = 0; i < n_reloads; i++)
+ if (reload_in[i] && ! reload_optional[i] && ! reload_nocombine[i]
+ /* Life span of this reload must not extend past main insn. */
+ && reload_when_needed[i] != RELOAD_FOR_OUTPUT_ADDRESS
+ && reload_when_needed[i] != RELOAD_OTHER
+ && (CLASS_MAX_NREGS (reload_reg_class[i], reload_inmode[i])
+ == CLASS_MAX_NREGS (reload_reg_class[output_reload],
+ reload_outmode[output_reload]))
+ && reload_inc[i] == 0
+ && reload_reg_rtx[i] == 0
+#ifdef SECONDARY_MEMORY_NEEDED
+ /* Don't combine two reloads with different secondary
+ memory locations. */
+ && (secondary_memlocs_elim[(int) reload_outmode[output_reload]][reload_opnum[i]] == 0
+ || secondary_memlocs_elim[(int) reload_outmode[output_reload]][reload_opnum[output_reload]] == 0
+ || rtx_equal_p (secondary_memlocs_elim[(int) reload_outmode[output_reload]][reload_opnum[i]],
+ secondary_memlocs_elim[(int) reload_outmode[output_reload]][reload_opnum[output_reload]]))
+#endif
+#ifdef SMALL_REGISTER_CLASSES
+ && reload_reg_class[i] == reload_reg_class[output_reload]
+#else
+ && (reg_class_subset_p (reload_reg_class[i],
+ reload_reg_class[output_reload])
+ || reg_class_subset_p (reload_reg_class[output_reload],
+ reload_reg_class[i]))
+#endif
+ && (MATCHES (reload_in[i], reload_out[output_reload])
+ /* Args reversed because the first arg seems to be
+ the one that we imagine being modified
+ while the second is the one that might be affected. */
+ || (! reg_overlap_mentioned_for_reload_p (reload_out[output_reload],
+ reload_in[i])
+ /* However, if the input is a register that appears inside
+ the output, then we also can't share.
+ Imagine (set (mem (reg 69)) (plus (reg 69) ...)).
+ If the same reload reg is used for both reg 69 and the
+ result to be stored in memory, then that result
+ will clobber the address of the memory ref. */
+ && ! (GET_CODE (reload_in[i]) == REG
+ && reg_overlap_mentioned_for_reload_p (reload_in[i],
+ reload_out[output_reload]))))
+ && (reg_class_size[(int) reload_reg_class[i]]
+#ifdef SMALL_REGISTER_CLASSES
+ || 1
+#endif
+ )
+ /* We will allow making things slightly worse by combining an
+ input and an output, but no worse than that. */
+ && (reload_when_needed[i] == RELOAD_FOR_INPUT
+ || reload_when_needed[i] == RELOAD_FOR_OUTPUT))
+ {
+ int j;
+
+ /* We have found a reload to combine with! */
+ reload_out[i] = reload_out[output_reload];
+ reload_outmode[i] = reload_outmode[output_reload];
+ /* Mark the old output reload as inoperative. */
+ reload_out[output_reload] = 0;
+ /* The combined reload is needed for the entire insn. */
+ reload_when_needed[i] = RELOAD_OTHER;
+ /* If the output reload had a secondary reload, copy it. */
+ if (reload_secondary_out_reload[output_reload] != -1)
+ {
+ reload_secondary_out_reload[i]
+ = reload_secondary_out_reload[output_reload];
+ reload_secondary_out_icode[i]
+ = reload_secondary_out_icode[output_reload];
+ }
+
+#ifdef SECONDARY_MEMORY_NEEDED
+ /* Copy any secondary MEM. */
+ if (secondary_memlocs_elim[(int) reload_outmode[output_reload]][reload_opnum[output_reload]] != 0)
+ secondary_memlocs_elim[(int) reload_outmode[output_reload]][reload_opnum[i]]
+ = secondary_memlocs_elim[(int) reload_outmode[output_reload]][reload_opnum[output_reload]];
+#endif
+ /* If required, minimize the register class. */
+ if (reg_class_subset_p (reload_reg_class[output_reload],
+ reload_reg_class[i]))
+ reload_reg_class[i] = reload_reg_class[output_reload];
+
+ /* Transfer all replacements from the old reload to the combined. */
+ for (j = 0; j < n_replacements; j++)
+ if (replacements[j].what == output_reload)
+ replacements[j].what = i;
+
+ return;
+ }
+
+ /* If this insn has only one operand that is modified or written (assumed
+ to be the first), it must be the one corresponding to this reload. It
+ is safe to use anything that dies in this insn for that output provided
+ that it does not occur in the output (we already know it isn't an
+ earlyclobber. If this is an asm insn, give up. */
+
+ if (INSN_CODE (this_insn) == -1)
+ return;
+
+ for (i = 1; i < insn_n_operands[INSN_CODE (this_insn)]; i++)
+ if (insn_operand_constraint[INSN_CODE (this_insn)][i][0] == '='
+ || insn_operand_constraint[INSN_CODE (this_insn)][i][0] == '+')
+ return;
+
+ /* See if some hard register that dies in this insn and is not used in
+ the output is the right class. Only works if the register we pick
+ up can fully hold our output reload. */
+ for (note = REG_NOTES (this_insn); note; note = XEXP (note, 1))
+ if (REG_NOTE_KIND (note) == REG_DEAD
+ && GET_CODE (XEXP (note, 0)) == REG
+ && ! reg_overlap_mentioned_for_reload_p (XEXP (note, 0),
+ reload_out[output_reload])
+ && REGNO (XEXP (note, 0)) < FIRST_PSEUDO_REGISTER
+ && HARD_REGNO_MODE_OK (REGNO (XEXP (note, 0)), reload_outmode[output_reload])
+ && TEST_HARD_REG_BIT (reg_class_contents[(int) reload_reg_class[output_reload]],
+ REGNO (XEXP (note, 0)))
+ && (HARD_REGNO_NREGS (REGNO (XEXP (note, 0)), reload_outmode[output_reload])
+ <= HARD_REGNO_NREGS (REGNO (XEXP (note, 0)), GET_MODE (XEXP (note, 0))))
+ && ! fixed_regs[REGNO (XEXP (note, 0))])
+ {
+ reload_reg_rtx[output_reload] = gen_rtx (REG,
+ reload_outmode[output_reload],
+ REGNO (XEXP (note, 0)));
+ return;
+ }
+}
+
+/* Try to find a reload register for an in-out reload (expressions IN and OUT).
+ See if one of IN and OUT is a register that may be used;
+ this is desirable since a spill-register won't be needed.
+ If so, return the register rtx that proves acceptable.
+
+ INLOC and OUTLOC are locations where IN and OUT appear in the insn.
+ CLASS is the register class required for the reload.
+
+ If FOR_REAL is >= 0, it is the number of the reload,
+ and in some cases when it can be discovered that OUT doesn't need
+ to be computed, clear out reload_out[FOR_REAL].
+
+ If FOR_REAL is -1, this should not be done, because this call
+ is just to see if a register can be found, not to find and install it. */
+
+static rtx
+find_dummy_reload (real_in, real_out, inloc, outloc,
+ inmode, outmode, class, for_real)
+ rtx real_in, real_out;
+ rtx *inloc, *outloc;
+ enum machine_mode inmode, outmode;
+ enum reg_class class;
+ int for_real;
+{
+ rtx in = real_in;
+ rtx out = real_out;
+ int in_offset = 0;
+ int out_offset = 0;
+ rtx value = 0;
+
+ /* If operands exceed a word, we can't use either of them
+ unless they have the same size. */
+ if (GET_MODE_SIZE (outmode) != GET_MODE_SIZE (inmode)
+ && (GET_MODE_SIZE (outmode) > UNITS_PER_WORD
+ || GET_MODE_SIZE (inmode) > UNITS_PER_WORD))
+ return 0;
+
+ /* Find the inside of any subregs. */
+ while (GET_CODE (out) == SUBREG)
+ {
+ out_offset = SUBREG_WORD (out);
+ out = SUBREG_REG (out);
+ }
+ while (GET_CODE (in) == SUBREG)
+ {
+ in_offset = SUBREG_WORD (in);
+ in = SUBREG_REG (in);
+ }
+
+ /* Narrow down the reg class, the same way push_reload will;
+ otherwise we might find a dummy now, but push_reload won't. */
+ class = PREFERRED_RELOAD_CLASS (in, class);
+
+ /* See if OUT will do. */
+ if (GET_CODE (out) == REG
+ && REGNO (out) < FIRST_PSEUDO_REGISTER)
+ {
+ register int regno = REGNO (out) + out_offset;
+ int nwords = HARD_REGNO_NREGS (regno, outmode);
+ rtx saved_rtx;
+
+ /* When we consider whether the insn uses OUT,
+ ignore references within IN. They don't prevent us
+ from copying IN into OUT, because those refs would
+ move into the insn that reloads IN.
+
+ However, we only ignore IN in its role as this reload.
+ If the insn uses IN elsewhere and it contains OUT,
+ that counts. We can't be sure it's the "same" operand
+ so it might not go through this reload. */
+ saved_rtx = *inloc;
+ *inloc = const0_rtx;
+
+ if (regno < FIRST_PSEUDO_REGISTER
+ /* A fixed reg that can overlap other regs better not be used
+ for reloading in any way. */
+#ifdef OVERLAPPING_REGNO_P
+ && ! (fixed_regs[regno] && OVERLAPPING_REGNO_P (regno))
+#endif
+ && ! refers_to_regno_for_reload_p (regno, regno + nwords,
+ PATTERN (this_insn), outloc))
+ {
+ int i;
+ for (i = 0; i < nwords; i++)
+ if (! TEST_HARD_REG_BIT (reg_class_contents[(int) class],
+ regno + i))
+ break;
+
+ if (i == nwords)
+ {
+ if (GET_CODE (real_out) == REG)
+ value = real_out;
+ else
+ value = gen_rtx (REG, outmode, regno);
+ }
+ }
+
+ *inloc = saved_rtx;
+ }
+
+ /* Consider using IN if OUT was not acceptable
+ or if OUT dies in this insn (like the quotient in a divmod insn).
+ We can't use IN unless it is dies in this insn,
+ which means we must know accurately which hard regs are live.
+ Also, the result can't go in IN if IN is used within OUT. */
+ if (hard_regs_live_known
+ && GET_CODE (in) == REG
+ && REGNO (in) < FIRST_PSEUDO_REGISTER
+ && (value == 0
+ || find_reg_note (this_insn, REG_UNUSED, real_out))
+ && find_reg_note (this_insn, REG_DEAD, real_in)
+ && !fixed_regs[REGNO (in)]
+ && HARD_REGNO_MODE_OK (REGNO (in),
+ /* The only case where out and real_out might
+ have different modes is where real_out
+ is a subreg, and in that case, out
+ has a real mode. */
+ (GET_MODE (out) != VOIDmode
+ ? GET_MODE (out) : outmode)))
+ {
+ register int regno = REGNO (in) + in_offset;
+ int nwords = HARD_REGNO_NREGS (regno, inmode);
+
+ if (! refers_to_regno_for_reload_p (regno, regno + nwords, out, NULL_PTR)
+ && ! hard_reg_set_here_p (regno, regno + nwords,
+ PATTERN (this_insn)))
+ {
+ int i;
+ for (i = 0; i < nwords; i++)
+ if (! TEST_HARD_REG_BIT (reg_class_contents[(int) class],
+ regno + i))
+ break;
+
+ if (i == nwords)
+ {
+ /* If we were going to use OUT as the reload reg
+ and changed our mind, it means OUT is a dummy that
+ dies here. So don't bother copying value to it. */
+ if (for_real >= 0 && value == real_out)
+ reload_out[for_real] = 0;
+ if (GET_CODE (real_in) == REG)
+ value = real_in;
+ else
+ value = gen_rtx (REG, inmode, regno);
+ }
+ }
+ }
+
+ return value;
+}
+
+/* This page contains subroutines used mainly for determining
+ whether the IN or an OUT of a reload can serve as the
+ reload register. */
+
+/* Return 1 if X is an operand of an insn that is being earlyclobbered. */
+
+static int
+earlyclobber_operand_p (x)
+ rtx x;
+{
+ int i;
+
+ for (i = 0; i < n_earlyclobbers; i++)
+ if (reload_earlyclobbers[i] == x)
+ return 1;
+
+ return 0;
+}
+
+/* Return 1 if expression X alters a hard reg in the range
+ from BEG_REGNO (inclusive) to END_REGNO (exclusive),
+ either explicitly or in the guise of a pseudo-reg allocated to REGNO.
+ X should be the body of an instruction. */
+
+static int
+hard_reg_set_here_p (beg_regno, end_regno, x)
+ register int beg_regno, end_regno;
+ rtx x;
+{
+ if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
+ {
+ register rtx op0 = SET_DEST (x);
+ while (GET_CODE (op0) == SUBREG)
+ op0 = SUBREG_REG (op0);
+ if (GET_CODE (op0) == REG)
+ {
+ register int r = REGNO (op0);
+ /* See if this reg overlaps range under consideration. */
+ if (r < end_regno
+ && r + HARD_REGNO_NREGS (r, GET_MODE (op0)) > beg_regno)
+ return 1;
+ }
+ }
+ else if (GET_CODE (x) == PARALLEL)
+ {
+ register int i = XVECLEN (x, 0) - 1;
+ for (; i >= 0; i--)
+ if (hard_reg_set_here_p (beg_regno, end_regno, XVECEXP (x, 0, i)))
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Return 1 if ADDR is a valid memory address for mode MODE,
+ and check that each pseudo reg has the proper kind of
+ hard reg. */
+
+int
+strict_memory_address_p (mode, addr)
+ enum machine_mode mode;
+ register rtx addr;
+{
+ GO_IF_LEGITIMATE_ADDRESS (mode, addr, win);
+ return 0;
+
+ win:
+ return 1;
+}
+
+/* Like rtx_equal_p except that it allows a REG and a SUBREG to match
+ if they are the same hard reg, and has special hacks for
+ autoincrement and autodecrement.
+ This is specifically intended for find_reloads to use
+ in determining whether two operands match.
+ X is the operand whose number is the lower of the two.
+
+ The value is 2 if Y contains a pre-increment that matches
+ a non-incrementing address in X. */
+
+/* ??? To be completely correct, we should arrange to pass
+ for X the output operand and for Y the input operand.
+ For now, we assume that the output operand has the lower number
+ because that is natural in (SET output (... input ...)). */
+
+int
+operands_match_p (x, y)
+ register rtx x, y;
+{
+ register int i;
+ register RTX_CODE code = GET_CODE (x);
+ register char *fmt;
+ int success_2;
+
+ if (x == y)
+ return 1;
+ if ((code == REG || (code == SUBREG && GET_CODE (SUBREG_REG (x)) == REG))
+ && (GET_CODE (y) == REG || (GET_CODE (y) == SUBREG
+ && GET_CODE (SUBREG_REG (y)) == REG)))
+ {
+ register int j;
+
+ if (code == SUBREG)
+ {
+ i = REGNO (SUBREG_REG (x));
+ if (i >= FIRST_PSEUDO_REGISTER)
+ goto slow;
+ i += SUBREG_WORD (x);
+ }
+ else
+ i = REGNO (x);
+
+ if (GET_CODE (y) == SUBREG)
+ {
+ j = REGNO (SUBREG_REG (y));
+ if (j >= FIRST_PSEUDO_REGISTER)
+ goto slow;
+ j += SUBREG_WORD (y);
+ }
+ else
+ j = REGNO (y);
+
+ /* On a WORDS_BIG_ENDIAN machine, point to the last register of a
+ multiple hard register group, so that for example (reg:DI 0) and
+ (reg:SI 1) will be considered the same register. */
+ if (WORDS_BIG_ENDIAN && GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD
+ && i < FIRST_PSEUDO_REGISTER)
+ i += (GET_MODE_SIZE (GET_MODE (x)) / UNITS_PER_WORD) - 1;
+ if (WORDS_BIG_ENDIAN && GET_MODE_SIZE (GET_MODE (y)) > UNITS_PER_WORD
+ && j < FIRST_PSEUDO_REGISTER)
+ j += (GET_MODE_SIZE (GET_MODE (y)) / UNITS_PER_WORD) - 1;
+
+ return i == j;
+ }
+ /* If two operands must match, because they are really a single
+ operand of an assembler insn, then two postincrements are invalid
+ because the assembler insn would increment only once.
+ On the other hand, an postincrement matches ordinary indexing
+ if the postincrement is the output operand. */
+ if (code == POST_DEC || code == POST_INC)
+ return operands_match_p (XEXP (x, 0), y);
+ /* Two preincrements are invalid
+ because the assembler insn would increment only once.
+ On the other hand, an preincrement matches ordinary indexing
+ if the preincrement is the input operand.
+ In this case, return 2, since some callers need to do special
+ things when this happens. */
+ if (GET_CODE (y) == PRE_DEC || GET_CODE (y) == PRE_INC)
+ return operands_match_p (x, XEXP (y, 0)) ? 2 : 0;
+
+ slow:
+
+ /* Now we have disposed of all the cases
+ in which different rtx codes can match. */
+ if (code != GET_CODE (y))
+ return 0;
+ if (code == LABEL_REF)
+ return XEXP (x, 0) == XEXP (y, 0);
+ if (code == SYMBOL_REF)
+ return XSTR (x, 0) == XSTR (y, 0);
+
+ /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent. */
+
+ if (GET_MODE (x) != GET_MODE (y))
+ return 0;
+
+ /* Compare the elements. If any pair of corresponding elements
+ fail to match, return 0 for the whole things. */
+
+ success_2 = 0;
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ int val;
+ switch (fmt[i])
+ {
+ case 'w':
+ if (XWINT (x, i) != XWINT (y, i))
+ return 0;
+ break;
+
+ case 'i':
+ if (XINT (x, i) != XINT (y, i))
+ return 0;
+ break;
+
+ case 'e':
+ val = operands_match_p (XEXP (x, i), XEXP (y, i));
+ if (val == 0)
+ return 0;
+ /* If any subexpression returns 2,
+ we should return 2 if we are successful. */
+ if (val == 2)
+ success_2 = 1;
+ break;
+
+ case '0':
+ break;
+
+ /* It is believed that rtx's at this level will never
+ contain anything but integers and other rtx's,
+ except for within LABEL_REFs and SYMBOL_REFs. */
+ default:
+ abort ();
+ }
+ }
+ return 1 + success_2;
+}
+
+/* Return the number of times character C occurs in string S. */
+
+int
+n_occurrences (c, s)
+ char c;
+ char *s;
+{
+ int n = 0;
+ while (*s)
+ n += (*s++ == c);
+ return n;
+}
+
+/* Describe the range of registers or memory referenced by X.
+ If X is a register, set REG_FLAG and put the first register
+ number into START and the last plus one into END.
+ If X is a memory reference, put a base address into BASE
+ and a range of integer offsets into START and END.
+ If X is pushing on the stack, we can assume it causes no trouble,
+ so we set the SAFE field. */
+
+static struct decomposition
+decompose (x)
+ rtx x;
+{
+ struct decomposition val;
+ int all_const = 0;
+
+ val.reg_flag = 0;
+ val.safe = 0;
+ if (GET_CODE (x) == MEM)
+ {
+ rtx base, offset = 0;
+ rtx addr = XEXP (x, 0);
+
+ if (GET_CODE (addr) == PRE_DEC || GET_CODE (addr) == PRE_INC
+ || GET_CODE (addr) == POST_DEC || GET_CODE (addr) == POST_INC)
+ {
+ val.base = XEXP (addr, 0);
+ val.start = - GET_MODE_SIZE (GET_MODE (x));
+ val.end = GET_MODE_SIZE (GET_MODE (x));
+ val.safe = REGNO (val.base) == STACK_POINTER_REGNUM;
+ return val;
+ }
+
+ if (GET_CODE (addr) == CONST)
+ {
+ addr = XEXP (addr, 0);
+ all_const = 1;
+ }
+ if (GET_CODE (addr) == PLUS)
+ {
+ if (CONSTANT_P (XEXP (addr, 0)))
+ {
+ base = XEXP (addr, 1);
+ offset = XEXP (addr, 0);
+ }
+ else if (CONSTANT_P (XEXP (addr, 1)))
+ {
+ base = XEXP (addr, 0);
+ offset = XEXP (addr, 1);
+ }
+ }
+
+ if (offset == 0)
+ {
+ base = addr;
+ offset = const0_rtx;
+ }
+ if (GET_CODE (offset) == CONST)
+ offset = XEXP (offset, 0);
+ if (GET_CODE (offset) == PLUS)
+ {
+ if (GET_CODE (XEXP (offset, 0)) == CONST_INT)
+ {
+ base = gen_rtx (PLUS, GET_MODE (base), base, XEXP (offset, 1));
+ offset = XEXP (offset, 0);
+ }
+ else if (GET_CODE (XEXP (offset, 1)) == CONST_INT)
+ {
+ base = gen_rtx (PLUS, GET_MODE (base), base, XEXP (offset, 0));
+ offset = XEXP (offset, 1);
+ }
+ else
+ {
+ base = gen_rtx (PLUS, GET_MODE (base), base, offset);
+ offset = const0_rtx;
+ }
+ }
+ else if (GET_CODE (offset) != CONST_INT)
+ {
+ base = gen_rtx (PLUS, GET_MODE (base), base, offset);
+ offset = const0_rtx;
+ }
+
+ if (all_const && GET_CODE (base) == PLUS)
+ base = gen_rtx (CONST, GET_MODE (base), base);
+
+ if (GET_CODE (offset) != CONST_INT)
+ abort ();
+
+ val.start = INTVAL (offset);
+ val.end = val.start + GET_MODE_SIZE (GET_MODE (x));
+ val.base = base;
+ return val;
+ }
+ else if (GET_CODE (x) == REG)
+ {
+ val.reg_flag = 1;
+ val.start = true_regnum (x);
+ if (val.start < 0)
+ {
+ /* A pseudo with no hard reg. */
+ val.start = REGNO (x);
+ val.end = val.start + 1;
+ }
+ else
+ /* A hard reg. */
+ val.end = val.start + HARD_REGNO_NREGS (val.start, GET_MODE (x));
+ }
+ else if (GET_CODE (x) == SUBREG)
+ {
+ if (GET_CODE (SUBREG_REG (x)) != REG)
+ /* This could be more precise, but it's good enough. */
+ return decompose (SUBREG_REG (x));
+ val.reg_flag = 1;
+ val.start = true_regnum (x);
+ if (val.start < 0)
+ return decompose (SUBREG_REG (x));
+ else
+ /* A hard reg. */
+ val.end = val.start + HARD_REGNO_NREGS (val.start, GET_MODE (x));
+ }
+ else if (CONSTANT_P (x)
+ /* This hasn't been assigned yet, so it can't conflict yet. */
+ || GET_CODE (x) == SCRATCH)
+ val.safe = 1;
+ else
+ abort ();
+ return val;
+}
+
+/* Return 1 if altering Y will not modify the value of X.
+ Y is also described by YDATA, which should be decompose (Y). */
+
+static int
+immune_p (x, y, ydata)
+ rtx x, y;
+ struct decomposition ydata;
+{
+ struct decomposition xdata;
+
+ if (ydata.reg_flag)
+ return !refers_to_regno_for_reload_p (ydata.start, ydata.end, x, NULL_PTR);
+ if (ydata.safe)
+ return 1;
+
+ if (GET_CODE (y) != MEM)
+ abort ();
+ /* If Y is memory and X is not, Y can't affect X. */
+ if (GET_CODE (x) != MEM)
+ return 1;
+
+ xdata = decompose (x);
+
+ if (! rtx_equal_p (xdata.base, ydata.base))
+ {
+ /* If bases are distinct symbolic constants, there is no overlap. */
+ if (CONSTANT_P (xdata.base) && CONSTANT_P (ydata.base))
+ return 1;
+ /* Constants and stack slots never overlap. */
+ if (CONSTANT_P (xdata.base)
+ && (ydata.base == frame_pointer_rtx
+ || ydata.base == hard_frame_pointer_rtx
+ || ydata.base == stack_pointer_rtx))
+ return 1;
+ if (CONSTANT_P (ydata.base)
+ && (xdata.base == frame_pointer_rtx
+ || xdata.base == hard_frame_pointer_rtx
+ || xdata.base == stack_pointer_rtx))
+ return 1;
+ /* If either base is variable, we don't know anything. */
+ return 0;
+ }
+
+
+ return (xdata.start >= ydata.end || ydata.start >= xdata.end);
+}
+
+/* Similar, but calls decompose. */
+
+int
+safe_from_earlyclobber (op, clobber)
+ rtx op, clobber;
+{
+ struct decomposition early_data;
+
+ early_data = decompose (clobber);
+ return immune_p (op, clobber, early_data);
+}
+
+/* Main entry point of this file: search the body of INSN
+ for values that need reloading and record them with push_reload.
+ REPLACE nonzero means record also where the values occur
+ so that subst_reloads can be used.
+
+ IND_LEVELS says how many levels of indirection are supported by this
+ machine; a value of zero means that a memory reference is not a valid
+ memory address.
+
+ LIVE_KNOWN says we have valid information about which hard
+ regs are live at each point in the program; this is true when
+ we are called from global_alloc but false when stupid register
+ allocation has been done.
+
+ RELOAD_REG_P if nonzero is a vector indexed by hard reg number
+ which is nonnegative if the reg has been commandeered for reloading into.
+ It is copied into STATIC_RELOAD_REG_P and referenced from there
+ by various subroutines. */
+
+void
+find_reloads (insn, replace, ind_levels, live_known, reload_reg_p)
+ rtx insn;
+ int replace, ind_levels;
+ int live_known;
+ short *reload_reg_p;
+{
+#ifdef REGISTER_CONSTRAINTS
+
+ register int insn_code_number;
+ register int i, j;
+ int noperands;
+ /* These are the constraints for the insn. We don't change them. */
+ char *constraints1[MAX_RECOG_OPERANDS];
+ /* These start out as the constraints for the insn
+ and they are chewed up as we consider alternatives. */
+ char *constraints[MAX_RECOG_OPERANDS];
+ /* These are the preferred classes for an operand, or NO_REGS if it isn't
+ a register. */
+ enum reg_class preferred_class[MAX_RECOG_OPERANDS];
+ char pref_or_nothing[MAX_RECOG_OPERANDS];
+ /* Nonzero for a MEM operand whose entire address needs a reload. */
+ int address_reloaded[MAX_RECOG_OPERANDS];
+ /* Value of enum reload_type to use for operand. */
+ enum reload_type operand_type[MAX_RECOG_OPERANDS];
+ /* Value of enum reload_type to use within address of operand. */
+ enum reload_type address_type[MAX_RECOG_OPERANDS];
+ /* Save the usage of each operand. */
+ enum reload_usage { RELOAD_READ, RELOAD_READ_WRITE, RELOAD_WRITE } modified[MAX_RECOG_OPERANDS];
+ int no_input_reloads = 0, no_output_reloads = 0;
+ int n_alternatives;
+ int this_alternative[MAX_RECOG_OPERANDS];
+ char this_alternative_win[MAX_RECOG_OPERANDS];
+ char this_alternative_offmemok[MAX_RECOG_OPERANDS];
+ char this_alternative_earlyclobber[MAX_RECOG_OPERANDS];
+ int this_alternative_matches[MAX_RECOG_OPERANDS];
+ int swapped;
+ int goal_alternative[MAX_RECOG_OPERANDS];
+ int this_alternative_number;
+ int goal_alternative_number;
+ int operand_reloadnum[MAX_RECOG_OPERANDS];
+ int goal_alternative_matches[MAX_RECOG_OPERANDS];
+ int goal_alternative_matched[MAX_RECOG_OPERANDS];
+ char goal_alternative_win[MAX_RECOG_OPERANDS];
+ char goal_alternative_offmemok[MAX_RECOG_OPERANDS];
+ char goal_alternative_earlyclobber[MAX_RECOG_OPERANDS];
+ int goal_alternative_swapped;
+ int best;
+ int commutative;
+ char operands_match[MAX_RECOG_OPERANDS][MAX_RECOG_OPERANDS];
+ rtx substed_operand[MAX_RECOG_OPERANDS];
+ rtx body = PATTERN (insn);
+ rtx set = single_set (insn);
+ int goal_earlyclobber, this_earlyclobber;
+ enum machine_mode operand_mode[MAX_RECOG_OPERANDS];
+
+ this_insn = insn;
+ this_insn_is_asm = 0; /* Tentative. */
+ n_reloads = 0;
+ n_replacements = 0;
+ n_memlocs = 0;
+ n_earlyclobbers = 0;
+ replace_reloads = replace;
+ hard_regs_live_known = live_known;
+ static_reload_reg_p = reload_reg_p;
+
+ /* JUMP_INSNs and CALL_INSNs are not allowed to have any output reloads;
+ neither are insns that SET cc0. Insns that use CC0 are not allowed
+ to have any input reloads. */
+ if (GET_CODE (insn) == JUMP_INSN || GET_CODE (insn) == CALL_INSN)
+ no_output_reloads = 1;
+
+#ifdef HAVE_cc0
+ if (reg_referenced_p (cc0_rtx, PATTERN (insn)))
+ no_input_reloads = 1;
+ if (reg_set_p (cc0_rtx, PATTERN (insn)))
+ no_output_reloads = 1;
+#endif
+
+#ifdef SECONDARY_MEMORY_NEEDED
+ /* The eliminated forms of any secondary memory locations are per-insn, so
+ clear them out here. */
+
+ bzero ((char *) secondary_memlocs_elim, sizeof secondary_memlocs_elim);
+#endif
+
+ /* Find what kind of insn this is. NOPERANDS gets number of operands.
+ Make OPERANDS point to a vector of operand values.
+ Make OPERAND_LOCS point to a vector of pointers to
+ where the operands were found.
+ Fill CONSTRAINTS and CONSTRAINTS1 with pointers to the
+ constraint-strings for this insn.
+ Return if the insn needs no reload processing. */
+
+ switch (GET_CODE (body))
+ {
+ case USE:
+ case CLOBBER:
+ case ASM_INPUT:
+ case ADDR_VEC:
+ case ADDR_DIFF_VEC:
+ return;
+
+ case SET:
+ /* Dispose quickly of (set (reg..) (reg..)) if both have hard regs and it
+ is cheap to move between them. If it is not, there may not be an insn
+ to do the copy, so we may need a reload. */
+ if (GET_CODE (SET_DEST (body)) == REG
+ && REGNO (SET_DEST (body)) < FIRST_PSEUDO_REGISTER
+ && GET_CODE (SET_SRC (body)) == REG
+ && REGNO (SET_SRC (body)) < FIRST_PSEUDO_REGISTER
+ && REGISTER_MOVE_COST (REGNO_REG_CLASS (REGNO (SET_SRC (body))),
+ REGNO_REG_CLASS (REGNO (SET_DEST (body)))) == 2)
+ return;
+ case PARALLEL:
+ case ASM_OPERANDS:
+ reload_n_operands = noperands = asm_noperands (body);
+ if (noperands >= 0)
+ {
+ /* This insn is an `asm' with operands. */
+
+ insn_code_number = -1;
+ this_insn_is_asm = 1;
+
+ /* expand_asm_operands makes sure there aren't too many operands. */
+ if (noperands > MAX_RECOG_OPERANDS)
+ abort ();
+
+ /* Now get the operand values and constraints out of the insn. */
+
+ decode_asm_operands (body, recog_operand, recog_operand_loc,
+ constraints, operand_mode);
+ if (noperands > 0)
+ {
+ bcopy ((char *) constraints, (char *) constraints1,
+ noperands * sizeof (char *));
+ n_alternatives = n_occurrences (',', constraints[0]) + 1;
+ for (i = 1; i < noperands; i++)
+ if (n_alternatives != n_occurrences (',', constraints[i]) + 1)
+ {
+ error_for_asm (insn, "operand constraints differ in number of alternatives");
+ /* Avoid further trouble with this insn. */
+ PATTERN (insn) = gen_rtx (USE, VOIDmode, const0_rtx);
+ n_reloads = 0;
+ return;
+ }
+ }
+ break;
+ }
+
+ default:
+ /* Ordinary insn: recognize it, get the operands via insn_extract
+ and get the constraints. */
+
+ insn_code_number = recog_memoized (insn);
+ if (insn_code_number < 0)
+ fatal_insn_not_found (insn);
+
+ reload_n_operands = noperands = insn_n_operands[insn_code_number];
+ n_alternatives = insn_n_alternatives[insn_code_number];
+ /* Just return "no reloads" if insn has no operands with constraints. */
+ if (n_alternatives == 0)
+ return;
+ insn_extract (insn);
+ for (i = 0; i < noperands; i++)
+ {
+ constraints[i] = constraints1[i]
+ = insn_operand_constraint[insn_code_number][i];
+ operand_mode[i] = insn_operand_mode[insn_code_number][i];
+ }
+ }
+
+ if (noperands == 0)
+ return;
+
+ commutative = -1;
+
+ /* If we will need to know, later, whether some pair of operands
+ are the same, we must compare them now and save the result.
+ Reloading the base and index registers will clobber them
+ and afterward they will fail to match. */
+
+ for (i = 0; i < noperands; i++)
+ {
+ register char *p;
+ register int c;
+
+ substed_operand[i] = recog_operand[i];
+ p = constraints[i];
+
+ modified[i] = RELOAD_READ;
+
+ /* Scan this operand's constraint to see if it is an output operand,
+ an in-out operand, is commutative, or should match another. */
+
+ while (c = *p++)
+ {
+ if (c == '=')
+ modified[i] = RELOAD_WRITE;
+ else if (c == '+')
+ modified[i] = RELOAD_READ_WRITE;
+ else if (c == '%')
+ {
+ /* The last operand should not be marked commutative. */
+ if (i == noperands - 1)
+ {
+ if (this_insn_is_asm)
+ warning_for_asm (this_insn,
+ "`%%' constraint used with last operand");
+ else
+ abort ();
+ }
+ else
+ commutative = i;
+ }
+ else if (c >= '0' && c <= '9')
+ {
+ c -= '0';
+ operands_match[c][i]
+ = operands_match_p (recog_operand[c], recog_operand[i]);
+
+ /* An operand may not match itself. */
+ if (c == i)
+ {
+ if (this_insn_is_asm)
+ warning_for_asm (this_insn,
+ "operand %d has constraint %d", i, c);
+ else
+ abort ();
+ }
+
+ /* If C can be commuted with C+1, and C might need to match I,
+ then C+1 might also need to match I. */
+ if (commutative >= 0)
+ {
+ if (c == commutative || c == commutative + 1)
+ {
+ int other = c + (c == commutative ? 1 : -1);
+ operands_match[other][i]
+ = operands_match_p (recog_operand[other], recog_operand[i]);
+ }
+ if (i == commutative || i == commutative + 1)
+ {
+ int other = i + (i == commutative ? 1 : -1);
+ operands_match[c][other]
+ = operands_match_p (recog_operand[c], recog_operand[other]);
+ }
+ /* Note that C is supposed to be less than I.
+ No need to consider altering both C and I because in
+ that case we would alter one into the other. */
+ }
+ }
+ }
+ }
+
+ /* Examine each operand that is a memory reference or memory address
+ and reload parts of the addresses into index registers.
+ Also here any references to pseudo regs that didn't get hard regs
+ but are equivalent to constants get replaced in the insn itself
+ with those constants. Nobody will ever see them again.
+
+ Finally, set up the preferred classes of each operand. */
+
+ for (i = 0; i < noperands; i++)
+ {
+ register RTX_CODE code = GET_CODE (recog_operand[i]);
+
+ address_reloaded[i] = 0;
+ operand_type[i] = (modified[i] == RELOAD_READ ? RELOAD_FOR_INPUT
+ : modified[i] == RELOAD_WRITE ? RELOAD_FOR_OUTPUT
+ : RELOAD_OTHER);
+ address_type[i]
+ = (modified[i] == RELOAD_READ ? RELOAD_FOR_INPUT_ADDRESS
+ : modified[i] == RELOAD_WRITE ? RELOAD_FOR_OUTPUT_ADDRESS
+ : RELOAD_OTHER);
+
+ if (*constraints[i] == 0)
+ /* Ignore things like match_operator operands. */
+ ;
+ else if (constraints[i][0] == 'p')
+ {
+ find_reloads_address (VOIDmode, NULL_PTR,
+ recog_operand[i], recog_operand_loc[i],
+ i, operand_type[i], ind_levels);
+ substed_operand[i] = recog_operand[i] = *recog_operand_loc[i];
+ }
+ else if (code == MEM)
+ {
+ if (find_reloads_address (GET_MODE (recog_operand[i]),
+ recog_operand_loc[i],
+ XEXP (recog_operand[i], 0),
+ &XEXP (recog_operand[i], 0),
+ i, address_type[i], ind_levels))
+ address_reloaded[i] = 1;
+ substed_operand[i] = recog_operand[i] = *recog_operand_loc[i];
+ }
+ else if (code == SUBREG)
+ substed_operand[i] = recog_operand[i] = *recog_operand_loc[i]
+ = find_reloads_toplev (recog_operand[i], i, address_type[i],
+ ind_levels,
+ set != 0
+ && &SET_DEST (set) == recog_operand_loc[i]);
+ else if (code == PLUS)
+ /* We can get a PLUS as an "operand" as a result of
+ register elimination. See eliminate_regs and gen_input_reload. */
+ substed_operand[i] = recog_operand[i] = *recog_operand_loc[i]
+ = find_reloads_toplev (recog_operand[i], i, address_type[i],
+ ind_levels, 0);
+ else if (code == REG)
+ {
+ /* This is equivalent to calling find_reloads_toplev.
+ The code is duplicated for speed.
+ When we find a pseudo always equivalent to a constant,
+ we replace it by the constant. We must be sure, however,
+ that we don't try to replace it in the insn in which it
+ is being set. */
+ register int regno = REGNO (recog_operand[i]);
+ if (reg_equiv_constant[regno] != 0
+ && (set == 0 || &SET_DEST (set) != recog_operand_loc[i]))
+ substed_operand[i] = recog_operand[i]
+ = reg_equiv_constant[regno];
+#if 0 /* This might screw code in reload1.c to delete prior output-reload
+ that feeds this insn. */
+ if (reg_equiv_mem[regno] != 0)
+ substed_operand[i] = recog_operand[i]
+ = reg_equiv_mem[regno];
+#endif
+ if (reg_equiv_address[regno] != 0)
+ {
+ /* If reg_equiv_address is not a constant address, copy it,
+ since it may be shared. */
+ rtx address = reg_equiv_address[regno];
+
+ if (rtx_varies_p (address))
+ address = copy_rtx (address);
+
+ /* If this is an output operand, we must output a CLOBBER
+ after INSN so find_equiv_reg knows REGNO is being written.
+ Mark this insn specially, do we can put our output reloads
+ after it. */
+
+ if (modified[i] != RELOAD_READ)
+ PUT_MODE (emit_insn_after (gen_rtx (CLOBBER, VOIDmode,
+ recog_operand[i]),
+ insn),
+ DImode);
+
+ *recog_operand_loc[i] = recog_operand[i]
+ = gen_rtx (MEM, GET_MODE (recog_operand[i]), address);
+ RTX_UNCHANGING_P (recog_operand[i])
+ = RTX_UNCHANGING_P (regno_reg_rtx[regno]);
+ find_reloads_address (GET_MODE (recog_operand[i]),
+ recog_operand_loc[i],
+ XEXP (recog_operand[i], 0),
+ &XEXP (recog_operand[i], 0),
+ i, address_type[i], ind_levels);
+ substed_operand[i] = recog_operand[i] = *recog_operand_loc[i];
+ }
+ }
+ /* If the operand is still a register (we didn't replace it with an
+ equivalent), get the preferred class to reload it into. */
+ code = GET_CODE (recog_operand[i]);
+ preferred_class[i]
+ = ((code == REG && REGNO (recog_operand[i]) >= FIRST_PSEUDO_REGISTER)
+ ? reg_preferred_class (REGNO (recog_operand[i])) : NO_REGS);
+ pref_or_nothing[i]
+ = (code == REG && REGNO (recog_operand[i]) >= FIRST_PSEUDO_REGISTER
+ && reg_alternate_class (REGNO (recog_operand[i])) == NO_REGS);
+ }
+
+ /* If this is simply a copy from operand 1 to operand 0, merge the
+ preferred classes for the operands. */
+ if (set != 0 && noperands >= 2 && recog_operand[0] == SET_DEST (set)
+ && recog_operand[1] == SET_SRC (set))
+ {
+ preferred_class[0] = preferred_class[1]
+ = reg_class_subunion[(int) preferred_class[0]][(int) preferred_class[1]];
+ pref_or_nothing[0] |= pref_or_nothing[1];
+ pref_or_nothing[1] |= pref_or_nothing[0];
+ }
+
+ /* Now see what we need for pseudo-regs that didn't get hard regs
+ or got the wrong kind of hard reg. For this, we must consider
+ all the operands together against the register constraints. */
+
+ best = MAX_RECOG_OPERANDS + 300;
+
+ swapped = 0;
+ goal_alternative_swapped = 0;
+ try_swapped:
+
+ /* The constraints are made of several alternatives.
+ Each operand's constraint looks like foo,bar,... with commas
+ separating the alternatives. The first alternatives for all
+ operands go together, the second alternatives go together, etc.
+
+ First loop over alternatives. */
+
+ for (this_alternative_number = 0;
+ this_alternative_number < n_alternatives;
+ this_alternative_number++)
+ {
+ /* Loop over operands for one constraint alternative. */
+ /* LOSERS counts those that don't fit this alternative
+ and would require loading. */
+ int losers = 0;
+ /* BAD is set to 1 if it some operand can't fit this alternative
+ even after reloading. */
+ int bad = 0;
+ /* REJECT is a count of how undesirable this alternative says it is
+ if any reloading is required. If the alternative matches exactly
+ then REJECT is ignored, but otherwise it gets this much
+ counted against it in addition to the reloading needed. Each
+ ? counts three times here since we want the disparaging caused by
+ a bad register class to only count 1/3 as much. */
+ int reject = 0;
+
+ this_earlyclobber = 0;
+
+ for (i = 0; i < noperands; i++)
+ {
+ register char *p = constraints[i];
+ register int win = 0;
+ /* 0 => this operand can be reloaded somehow for this alternative */
+ int badop = 1;
+ /* 0 => this operand can be reloaded if the alternative allows regs. */
+ int winreg = 0;
+ int c;
+ register rtx operand = recog_operand[i];
+ int offset = 0;
+ /* Nonzero means this is a MEM that must be reloaded into a reg
+ regardless of what the constraint says. */
+ int force_reload = 0;
+ int offmemok = 0;
+ /* Nonzero if a constant forced into memory would be OK for this
+ operand. */
+ int constmemok = 0;
+ int earlyclobber = 0;
+
+ /* If the operand is a SUBREG, extract
+ the REG or MEM (or maybe even a constant) within.
+ (Constants can occur as a result of reg_equiv_constant.) */
+
+ while (GET_CODE (operand) == SUBREG)
+ {
+ offset += SUBREG_WORD (operand);
+ operand = SUBREG_REG (operand);
+ /* Force reload if this is a constant or PLUS or if there may may
+ be a problem accessing OPERAND in the outer mode. */
+ if (CONSTANT_P (operand)
+ || GET_CODE (operand) == PLUS
+ /* We must force a reload of paradoxical SUBREGs
+ of a MEM because the alignment of the inner value
+ may not be enough to do the outer reference.
+
+ On machines that extend byte operations and we have a
+ SUBREG where both the inner and outer modes are different
+ size but no wider than a word, combine.c has made
+ assumptions about the behavior of the machine in such
+ register access. If the data is, in fact, in memory we
+ must always load using the size assumed to be in the
+ register and let the insn do the different-sized
+ accesses. */
+ || ((GET_CODE (operand) == MEM
+ || (GET_CODE (operand)== REG
+ && REGNO (operand) >= FIRST_PSEUDO_REGISTER))
+ && (((GET_MODE_BITSIZE (GET_MODE (operand))
+ < BIGGEST_ALIGNMENT)
+ && (GET_MODE_SIZE (operand_mode[i])
+ > GET_MODE_SIZE (GET_MODE (operand))))
+#ifdef LOAD_EXTEND_OP
+ || (GET_MODE_SIZE (operand_mode[i]) <= UNITS_PER_WORD
+ && (GET_MODE_SIZE (GET_MODE (operand))
+ <= UNITS_PER_WORD)
+ && (GET_MODE_SIZE (operand_mode[i])
+ != GET_MODE_SIZE (GET_MODE (operand))))
+#endif
+ ))
+ /* Subreg of a hard reg which can't handle the subreg's mode
+ or which would handle that mode in the wrong number of
+ registers for subregging to work. */
+ || (GET_CODE (operand) == REG
+ && REGNO (operand) < FIRST_PSEUDO_REGISTER
+ && ((GET_MODE_SIZE (operand_mode[i]) <= UNITS_PER_WORD
+ && (GET_MODE_SIZE (GET_MODE (operand))
+ > UNITS_PER_WORD)
+ && ((GET_MODE_SIZE (GET_MODE (operand))
+ / UNITS_PER_WORD)
+ != HARD_REGNO_NREGS (REGNO (operand),
+ GET_MODE (operand))))
+ || ! HARD_REGNO_MODE_OK (REGNO (operand) + offset,
+ operand_mode[i]))))
+ force_reload = 1;
+ }
+
+ this_alternative[i] = (int) NO_REGS;
+ this_alternative_win[i] = 0;
+ this_alternative_offmemok[i] = 0;
+ this_alternative_earlyclobber[i] = 0;
+ this_alternative_matches[i] = -1;
+
+ /* An empty constraint or empty alternative
+ allows anything which matched the pattern. */
+ if (*p == 0 || *p == ',')
+ win = 1, badop = 0;
+
+ /* Scan this alternative's specs for this operand;
+ set WIN if the operand fits any letter in this alternative.
+ Otherwise, clear BADOP if this operand could
+ fit some letter after reloads,
+ or set WINREG if this operand could fit after reloads
+ provided the constraint allows some registers. */
+
+ while (*p && (c = *p++) != ',')
+ switch (c)
+ {
+ case '=':
+ case '+':
+ case '*':
+ break;
+
+ case '%':
+ /* The last operand should not be marked commutative. */
+ if (i != noperands - 1)
+ commutative = i;
+ break;
+
+ case '?':
+ reject += 3;
+ break;
+
+ case '!':
+ reject = 300;
+ break;
+
+ case '#':
+ /* Ignore rest of this alternative as far as
+ reloading is concerned. */
+ while (*p && *p != ',') p++;
+ break;
+
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ c -= '0';
+ this_alternative_matches[i] = c;
+ /* We are supposed to match a previous operand.
+ If we do, we win if that one did.
+ If we do not, count both of the operands as losers.
+ (This is too conservative, since most of the time
+ only a single reload insn will be needed to make
+ the two operands win. As a result, this alternative
+ may be rejected when it is actually desirable.) */
+ if ((swapped && (c != commutative || i != commutative + 1))
+ /* If we are matching as if two operands were swapped,
+ also pretend that operands_match had been computed
+ with swapped.
+ But if I is the second of those and C is the first,
+ don't exchange them, because operands_match is valid
+ only on one side of its diagonal. */
+ ? (operands_match
+ [(c == commutative || c == commutative + 1)
+ ? 2*commutative + 1 - c : c]
+ [(i == commutative || i == commutative + 1)
+ ? 2*commutative + 1 - i : i])
+ : operands_match[c][i])
+ win = this_alternative_win[c];
+ else
+ {
+ /* Operands don't match. */
+ rtx value;
+ /* Retroactively mark the operand we had to match
+ as a loser, if it wasn't already. */
+ if (this_alternative_win[c])
+ losers++;
+ this_alternative_win[c] = 0;
+ if (this_alternative[c] == (int) NO_REGS)
+ bad = 1;
+ /* But count the pair only once in the total badness of
+ this alternative, if the pair can be a dummy reload. */
+ value
+ = find_dummy_reload (recog_operand[i], recog_operand[c],
+ recog_operand_loc[i], recog_operand_loc[c],
+ operand_mode[i], operand_mode[c],
+ this_alternative[c], -1);
+
+ if (value != 0)
+ losers--;
+ }
+ /* This can be fixed with reloads if the operand
+ we are supposed to match can be fixed with reloads. */
+ badop = 0;
+ this_alternative[i] = this_alternative[c];
+
+ /* If we have to reload this operand and some previous
+ operand also had to match the same thing as this
+ operand, we don't know how to do that. So reject this
+ alternative. */
+ if (! win || force_reload)
+ for (j = 0; j < i; j++)
+ if (this_alternative_matches[j]
+ == this_alternative_matches[i])
+ badop = 1;
+
+ break;
+
+ case 'p':
+ /* All necessary reloads for an address_operand
+ were handled in find_reloads_address. */
+ this_alternative[i] = (int) ALL_REGS;
+ win = 1;
+ break;
+
+ case 'm':
+ if (force_reload)
+ break;
+ if (GET_CODE (operand) == MEM
+ || (GET_CODE (operand) == REG
+ && REGNO (operand) >= FIRST_PSEUDO_REGISTER
+ && reg_renumber[REGNO (operand)] < 0))
+ win = 1;
+ if (CONSTANT_P (operand))
+ badop = 0;
+ constmemok = 1;
+ break;
+
+ case '<':
+ if (GET_CODE (operand) == MEM
+ && ! address_reloaded[i]
+ && (GET_CODE (XEXP (operand, 0)) == PRE_DEC
+ || GET_CODE (XEXP (operand, 0)) == POST_DEC))
+ win = 1;
+ break;
+
+ case '>':
+ if (GET_CODE (operand) == MEM
+ && ! address_reloaded[i]
+ && (GET_CODE (XEXP (operand, 0)) == PRE_INC
+ || GET_CODE (XEXP (operand, 0)) == POST_INC))
+ win = 1;
+ break;
+
+ /* Memory operand whose address is not offsettable. */
+ case 'V':
+ if (force_reload)
+ break;
+ if (GET_CODE (operand) == MEM
+ && ! (ind_levels ? offsettable_memref_p (operand)
+ : offsettable_nonstrict_memref_p (operand))
+ /* Certain mem addresses will become offsettable
+ after they themselves are reloaded. This is important;
+ we don't want our own handling of unoffsettables
+ to override the handling of reg_equiv_address. */
+ && !(GET_CODE (XEXP (operand, 0)) == REG
+ && (ind_levels == 0
+ || reg_equiv_address[REGNO (XEXP (operand, 0))] != 0)))
+ win = 1;
+ break;
+
+ /* Memory operand whose address is offsettable. */
+ case 'o':
+ if (force_reload)
+ break;
+ if ((GET_CODE (operand) == MEM
+ /* If IND_LEVELS, find_reloads_address won't reload a
+ pseudo that didn't get a hard reg, so we have to
+ reject that case. */
+ && (ind_levels ? offsettable_memref_p (operand)
+ : offsettable_nonstrict_memref_p (operand)))
+ /* Certain mem addresses will become offsettable
+ after they themselves are reloaded. This is important;
+ we don't want our own handling of unoffsettables
+ to override the handling of reg_equiv_address. */
+ || (GET_CODE (operand) == MEM
+ && GET_CODE (XEXP (operand, 0)) == REG
+ && (ind_levels == 0
+ || reg_equiv_address[REGNO (XEXP (operand, 0))] != 0))
+ || (GET_CODE (operand) == REG
+ && REGNO (operand) >= FIRST_PSEUDO_REGISTER
+ && reg_renumber[REGNO (operand)] < 0
+ /* If reg_equiv_address is nonzero, we will be
+ loading it into a register; hence it will be
+ offsettable, but we cannot say that reg_equiv_mem
+ is offsettable without checking. */
+ && ((reg_equiv_mem[REGNO (operand)] != 0
+ && offsettable_memref_p (reg_equiv_mem[REGNO (operand)]))
+ || (reg_equiv_address[REGNO (operand)] != 0))))
+ win = 1;
+ if (CONSTANT_P (operand) || GET_CODE (operand) == MEM)
+ badop = 0;
+ constmemok = 1;
+ offmemok = 1;
+ break;
+
+ case '&':
+ /* Output operand that is stored before the need for the
+ input operands (and their index registers) is over. */
+ earlyclobber = 1, this_earlyclobber = 1;
+ break;
+
+ case 'E':
+ /* Match any floating double constant, but only if
+ we can examine the bits of it reliably. */
+ if ((HOST_FLOAT_FORMAT != TARGET_FLOAT_FORMAT
+ || HOST_BITS_PER_WIDE_INT != BITS_PER_WORD)
+ && GET_MODE (operand) != VOIDmode && ! flag_pretend_float)
+ break;
+ if (GET_CODE (operand) == CONST_DOUBLE)
+ win = 1;
+ break;
+
+ case 'F':
+ if (GET_CODE (operand) == CONST_DOUBLE)
+ win = 1;
+ break;
+
+ case 'G':
+ case 'H':
+ if (GET_CODE (operand) == CONST_DOUBLE
+ && CONST_DOUBLE_OK_FOR_LETTER_P (operand, c))
+ win = 1;
+ break;
+
+ case 's':
+ if (GET_CODE (operand) == CONST_INT
+ || (GET_CODE (operand) == CONST_DOUBLE
+ && GET_MODE (operand) == VOIDmode))
+ break;
+ case 'i':
+ if (CONSTANT_P (operand)
+#ifdef LEGITIMATE_PIC_OPERAND_P
+ && (! flag_pic || LEGITIMATE_PIC_OPERAND_P (operand))
+#endif
+ )
+ win = 1;
+ break;
+
+ case 'n':
+ if (GET_CODE (operand) == CONST_INT
+ || (GET_CODE (operand) == CONST_DOUBLE
+ && GET_MODE (operand) == VOIDmode))
+ win = 1;
+ break;
+
+ case 'I':
+ case 'J':
+ case 'K':
+ case 'L':
+ case 'M':
+ case 'N':
+ case 'O':
+ case 'P':
+ if (GET_CODE (operand) == CONST_INT
+ && CONST_OK_FOR_LETTER_P (INTVAL (operand), c))
+ win = 1;
+ break;
+
+ case 'X':
+ win = 1;
+ break;
+
+ case 'g':
+ if (! force_reload
+ /* A PLUS is never a valid operand, but reload can make
+ it from a register when eliminating registers. */
+ && GET_CODE (operand) != PLUS
+ /* A SCRATCH is not a valid operand. */
+ && GET_CODE (operand) != SCRATCH
+#ifdef LEGITIMATE_PIC_OPERAND_P
+ && (! CONSTANT_P (operand)
+ || ! flag_pic
+ || LEGITIMATE_PIC_OPERAND_P (operand))
+#endif
+ && (GENERAL_REGS == ALL_REGS
+ || GET_CODE (operand) != REG
+ || (REGNO (operand) >= FIRST_PSEUDO_REGISTER
+ && reg_renumber[REGNO (operand)] < 0)))
+ win = 1;
+ /* Drop through into 'r' case */
+
+ case 'r':
+ this_alternative[i]
+ = (int) reg_class_subunion[this_alternative[i]][(int) GENERAL_REGS];
+ goto reg;
+
+#ifdef EXTRA_CONSTRAINT
+ case 'Q':
+ case 'R':
+ case 'S':
+ case 'T':
+ case 'U':
+ if (EXTRA_CONSTRAINT (operand, c))
+ win = 1;
+ break;
+#endif
+
+ default:
+ this_alternative[i]
+ = (int) reg_class_subunion[this_alternative[i]][(int) REG_CLASS_FROM_LETTER (c)];
+
+ reg:
+ if (GET_MODE (operand) == BLKmode)
+ break;
+ winreg = 1;
+ if (GET_CODE (operand) == REG
+ && reg_fits_class_p (operand, this_alternative[i],
+ offset, GET_MODE (recog_operand[i])))
+ win = 1;
+ break;
+ }
+
+ constraints[i] = p;
+
+ /* If this operand could be handled with a reg,
+ and some reg is allowed, then this operand can be handled. */
+ if (winreg && this_alternative[i] != (int) NO_REGS)
+ badop = 0;
+
+ /* Record which operands fit this alternative. */
+ this_alternative_earlyclobber[i] = earlyclobber;
+ if (win && ! force_reload)
+ this_alternative_win[i] = 1;
+ else
+ {
+ int const_to_mem = 0;
+
+ this_alternative_offmemok[i] = offmemok;
+ losers++;
+ if (badop)
+ bad = 1;
+ /* Alternative loses if it has no regs for a reg operand. */
+ if (GET_CODE (operand) == REG
+ && this_alternative[i] == (int) NO_REGS
+ && this_alternative_matches[i] < 0)
+ bad = 1;
+
+ /* Alternative loses if it requires a type of reload not
+ permitted for this insn. We can always reload SCRATCH
+ and objects with a REG_UNUSED note. */
+ if (GET_CODE (operand) != SCRATCH
+ && modified[i] != RELOAD_READ && no_output_reloads
+ && ! find_reg_note (insn, REG_UNUSED, operand))
+ bad = 1;
+ else if (modified[i] != RELOAD_WRITE && no_input_reloads)
+ bad = 1;
+
+ /* If this is a constant that is reloaded into the desired
+ class by copying it to memory first, count that as another
+ reload. This is consistent with other code and is
+ required to avoid chosing another alternative when
+ the constant is moved into memory by this function on
+ an early reload pass. Note that the test here is
+ precisely the same as in the code below that calls
+ force_const_mem. */
+ if (CONSTANT_P (operand)
+ /* force_const_mem does not accept HIGH. */
+ && GET_CODE (operand) != HIGH
+ && (PREFERRED_RELOAD_CLASS (operand,
+ (enum reg_class) this_alternative[i])
+ == NO_REGS)
+ && operand_mode[i] != VOIDmode)
+ {
+ const_to_mem = 1;
+ if (this_alternative[i] != (int) NO_REGS)
+ losers++;
+ }
+
+ /* If we can't reload this value at all, reject this
+ alternative. Note that we could also lose due to
+ LIMIT_RELOAD_RELOAD_CLASS, but we don't check that
+ here. */
+
+ if (! CONSTANT_P (operand)
+ && (enum reg_class) this_alternative[i] != NO_REGS
+ && (PREFERRED_RELOAD_CLASS (operand,
+ (enum reg_class) this_alternative[i])
+ == NO_REGS))
+ bad = 1;
+
+ /* We prefer to reload pseudos over reloading other things,
+ since such reloads may be able to be eliminated later.
+ If we are reloading a SCRATCH, we won't be generating any
+ insns, just using a register, so it is also preferred.
+ So bump REJECT in other cases. Don't do this in the
+ case where we are forcing a constant into memory and
+ it will then win since we don't want to have a different
+ alternative match then. */
+ if (! (GET_CODE (operand) == REG
+ && REGNO (operand) >= FIRST_PSEUDO_REGISTER)
+ && GET_CODE (operand) != SCRATCH
+ && ! (const_to_mem && constmemok))
+ reject++;
+ }
+
+ /* If this operand is a pseudo register that didn't get a hard
+ reg and this alternative accepts some register, see if the
+ class that we want is a subset of the preferred class for this
+ register. If not, but it intersects that class, use the
+ preferred class instead. If it does not intersect the preferred
+ class, show that usage of this alternative should be discouraged;
+ it will be discouraged more still if the register is `preferred
+ or nothing'. We do this because it increases the chance of
+ reusing our spill register in a later insn and avoiding a pair
+ of memory stores and loads.
+
+ Don't bother with this if this alternative will accept this
+ operand.
+
+ Don't do this for a multiword operand, since it is only a
+ small win and has the risk of requiring more spill registers,
+ which could cause a large loss.
+
+ Don't do this if the preferred class has only one register
+ because we might otherwise exhaust the class. */
+
+
+ if (! win && this_alternative[i] != (int) NO_REGS
+ && GET_MODE_SIZE (operand_mode[i]) <= UNITS_PER_WORD
+ && reg_class_size[(int) preferred_class[i]] > 1)
+ {
+ if (! reg_class_subset_p (this_alternative[i],
+ preferred_class[i]))
+ {
+ /* Since we don't have a way of forming the intersection,
+ we just do something special if the preferred class
+ is a subset of the class we have; that's the most
+ common case anyway. */
+ if (reg_class_subset_p (preferred_class[i],
+ this_alternative[i]))
+ this_alternative[i] = (int) preferred_class[i];
+ else
+ reject += (1 + pref_or_nothing[i]);
+ }
+ }
+ }
+
+ /* Now see if any output operands that are marked "earlyclobber"
+ in this alternative conflict with any input operands
+ or any memory addresses. */
+
+ for (i = 0; i < noperands; i++)
+ if (this_alternative_earlyclobber[i]
+ && this_alternative_win[i])
+ {
+ struct decomposition early_data;
+
+ early_data = decompose (recog_operand[i]);
+
+ if (modified[i] == RELOAD_READ)
+ {
+ if (this_insn_is_asm)
+ warning_for_asm (this_insn,
+ "`&' constraint used with input operand");
+ else
+ abort ();
+ continue;
+ }
+
+ if (this_alternative[i] == NO_REGS)
+ {
+ this_alternative_earlyclobber[i] = 0;
+ if (this_insn_is_asm)
+ error_for_asm (this_insn,
+ "`&' constraint used with no register class");
+ else
+ abort ();
+ }
+
+ for (j = 0; j < noperands; j++)
+ /* Is this an input operand or a memory ref? */
+ if ((GET_CODE (recog_operand[j]) == MEM
+ || modified[j] != RELOAD_WRITE)
+ && j != i
+ /* Ignore things like match_operator operands. */
+ && *constraints1[j] != 0
+ /* Don't count an input operand that is constrained to match
+ the early clobber operand. */
+ && ! (this_alternative_matches[j] == i
+ && rtx_equal_p (recog_operand[i], recog_operand[j]))
+ /* Is it altered by storing the earlyclobber operand? */
+ && !immune_p (recog_operand[j], recog_operand[i], early_data))
+ {
+ /* If the output is in a single-reg class,
+ it's costly to reload it, so reload the input instead. */
+ if (reg_class_size[this_alternative[i]] == 1
+ && (GET_CODE (recog_operand[j]) == REG
+ || GET_CODE (recog_operand[j]) == SUBREG))
+ {
+ losers++;
+ this_alternative_win[j] = 0;
+ }
+ else
+ break;
+ }
+ /* If an earlyclobber operand conflicts with something,
+ it must be reloaded, so request this and count the cost. */
+ if (j != noperands)
+ {
+ losers++;
+ this_alternative_win[i] = 0;
+ for (j = 0; j < noperands; j++)
+ if (this_alternative_matches[j] == i
+ && this_alternative_win[j])
+ {
+ this_alternative_win[j] = 0;
+ losers++;
+ }
+ }
+ }
+
+ /* If one alternative accepts all the operands, no reload required,
+ choose that alternative; don't consider the remaining ones. */
+ if (losers == 0)
+ {
+ /* Unswap these so that they are never swapped at `finish'. */
+ if (commutative >= 0)
+ {
+ recog_operand[commutative] = substed_operand[commutative];
+ recog_operand[commutative + 1]
+ = substed_operand[commutative + 1];
+ }
+ for (i = 0; i < noperands; i++)
+ {
+ goal_alternative_win[i] = 1;
+ goal_alternative[i] = this_alternative[i];
+ goal_alternative_offmemok[i] = this_alternative_offmemok[i];
+ goal_alternative_matches[i] = this_alternative_matches[i];
+ goal_alternative_earlyclobber[i]
+ = this_alternative_earlyclobber[i];
+ }
+ goal_alternative_number = this_alternative_number;
+ goal_alternative_swapped = swapped;
+ goal_earlyclobber = this_earlyclobber;
+ goto finish;
+ }
+
+ /* REJECT, set by the ! and ? constraint characters and when a register
+ would be reloaded into a non-preferred class, discourages the use of
+ this alternative for a reload goal. REJECT is incremented by three
+ for each ? and one for each non-preferred class. */
+ losers = losers * 3 + reject;
+
+ /* If this alternative can be made to work by reloading,
+ and it needs less reloading than the others checked so far,
+ record it as the chosen goal for reloading. */
+ if (! bad && best > losers)
+ {
+ for (i = 0; i < noperands; i++)
+ {
+ goal_alternative[i] = this_alternative[i];
+ goal_alternative_win[i] = this_alternative_win[i];
+ goal_alternative_offmemok[i] = this_alternative_offmemok[i];
+ goal_alternative_matches[i] = this_alternative_matches[i];
+ goal_alternative_earlyclobber[i]
+ = this_alternative_earlyclobber[i];
+ }
+ goal_alternative_swapped = swapped;
+ best = losers;
+ goal_alternative_number = this_alternative_number;
+ goal_earlyclobber = this_earlyclobber;
+ }
+ }
+
+ /* If insn is commutative (it's safe to exchange a certain pair of operands)
+ then we need to try each alternative twice,
+ the second time matching those two operands
+ as if we had exchanged them.
+ To do this, really exchange them in operands.
+
+ If we have just tried the alternatives the second time,
+ return operands to normal and drop through. */
+
+ if (commutative >= 0)
+ {
+ swapped = !swapped;
+ if (swapped)
+ {
+ register enum reg_class tclass;
+ register int t;
+
+ recog_operand[commutative] = substed_operand[commutative + 1];
+ recog_operand[commutative + 1] = substed_operand[commutative];
+
+ tclass = preferred_class[commutative];
+ preferred_class[commutative] = preferred_class[commutative + 1];
+ preferred_class[commutative + 1] = tclass;
+
+ t = pref_or_nothing[commutative];
+ pref_or_nothing[commutative] = pref_or_nothing[commutative + 1];
+ pref_or_nothing[commutative + 1] = t;
+
+ bcopy ((char *) constraints1, (char *) constraints,
+ noperands * sizeof (char *));
+ goto try_swapped;
+ }
+ else
+ {
+ recog_operand[commutative] = substed_operand[commutative];
+ recog_operand[commutative + 1] = substed_operand[commutative + 1];
+ }
+ }
+
+ /* The operands don't meet the constraints.
+ goal_alternative describes the alternative
+ that we could reach by reloading the fewest operands.
+ Reload so as to fit it. */
+
+ if (best == MAX_RECOG_OPERANDS + 300)
+ {
+ /* No alternative works with reloads?? */
+ if (insn_code_number >= 0)
+ abort ();
+ error_for_asm (insn, "inconsistent operand constraints in an `asm'");
+ /* Avoid further trouble with this insn. */
+ PATTERN (insn) = gen_rtx (USE, VOIDmode, const0_rtx);
+ n_reloads = 0;
+ return;
+ }
+
+ /* Jump to `finish' from above if all operands are valid already.
+ In that case, goal_alternative_win is all 1. */
+ finish:
+
+ /* Right now, for any pair of operands I and J that are required to match,
+ with I < J,
+ goal_alternative_matches[J] is I.
+ Set up goal_alternative_matched as the inverse function:
+ goal_alternative_matched[I] = J. */
+
+ for (i = 0; i < noperands; i++)
+ goal_alternative_matched[i] = -1;
+
+ for (i = 0; i < noperands; i++)
+ if (! goal_alternative_win[i]
+ && goal_alternative_matches[i] >= 0)
+ goal_alternative_matched[goal_alternative_matches[i]] = i;
+
+ /* If the best alternative is with operands 1 and 2 swapped,
+ consider them swapped before reporting the reloads. Update the
+ operand numbers of any reloads already pushed. */
+
+ if (goal_alternative_swapped)
+ {
+ register rtx tem;
+
+ tem = substed_operand[commutative];
+ substed_operand[commutative] = substed_operand[commutative + 1];
+ substed_operand[commutative + 1] = tem;
+ tem = recog_operand[commutative];
+ recog_operand[commutative] = recog_operand[commutative + 1];
+ recog_operand[commutative + 1] = tem;
+
+ for (i = 0; i < n_reloads; i++)
+ {
+ if (reload_opnum[i] == commutative)
+ reload_opnum[i] = commutative + 1;
+ else if (reload_opnum[i] == commutative + 1)
+ reload_opnum[i] = commutative;
+ }
+ }
+
+ /* Perform whatever substitutions on the operands we are supposed
+ to make due to commutativity or replacement of registers
+ with equivalent constants or memory slots. */
+
+ for (i = 0; i < noperands; i++)
+ {
+ *recog_operand_loc[i] = substed_operand[i];
+ /* While we are looping on operands, initialize this. */
+ operand_reloadnum[i] = -1;
+
+ /* If this is an earlyclobber operand, we need to widen the scope.
+ The reload must remain valid from the start of the insn being
+ reloaded until after the operand is stored into its destination.
+ We approximate this with RELOAD_OTHER even though we know that we
+ do not conflict with RELOAD_FOR_INPUT_ADDRESS reloads.
+
+ One special case that is worth checking is when we have an
+ output that is earlyclobber but isn't used past the insn (typically
+ a SCRATCH). In this case, we only need have the reload live
+ through the insn itself, but not for any of our input or output
+ reloads.
+
+ In any case, anything needed to address this operand can remain
+ however they were previously categorized. */
+
+ if (goal_alternative_earlyclobber[i])
+ operand_type[i]
+ = (find_reg_note (insn, REG_UNUSED, recog_operand[i])
+ ? RELOAD_FOR_INSN : RELOAD_OTHER);
+ }
+
+ /* Any constants that aren't allowed and can't be reloaded
+ into registers are here changed into memory references. */
+ for (i = 0; i < noperands; i++)
+ if (! goal_alternative_win[i]
+ && CONSTANT_P (recog_operand[i])
+ /* force_const_mem does not accept HIGH. */
+ && GET_CODE (recog_operand[i]) != HIGH
+ && (PREFERRED_RELOAD_CLASS (recog_operand[i],
+ (enum reg_class) goal_alternative[i])
+ == NO_REGS)
+ && operand_mode[i] != VOIDmode)
+ {
+ *recog_operand_loc[i] = recog_operand[i]
+ = find_reloads_toplev (force_const_mem (operand_mode[i],
+ recog_operand[i]),
+ i, address_type[i], ind_levels, 0);
+ if (alternative_allows_memconst (constraints1[i],
+ goal_alternative_number))
+ goal_alternative_win[i] = 1;
+ }
+
+ /* Record the values of the earlyclobber operands for the caller. */
+ if (goal_earlyclobber)
+ for (i = 0; i < noperands; i++)
+ if (goal_alternative_earlyclobber[i])
+ reload_earlyclobbers[n_earlyclobbers++] = recog_operand[i];
+
+ /* Now record reloads for all the operands that need them. */
+ for (i = 0; i < noperands; i++)
+ if (! goal_alternative_win[i])
+ {
+ /* Operands that match previous ones have already been handled. */
+ if (goal_alternative_matches[i] >= 0)
+ ;
+ /* Handle an operand with a nonoffsettable address
+ appearing where an offsettable address will do
+ by reloading the address into a base register.
+
+ ??? We can also do this when the operand is a register and
+ reg_equiv_mem is not offsettable, but this is a bit tricky,
+ so we don't bother with it. It may not be worth doing. */
+ else if (goal_alternative_matched[i] == -1
+ && goal_alternative_offmemok[i]
+ && GET_CODE (recog_operand[i]) == MEM)
+ {
+ operand_reloadnum[i]
+ = push_reload (XEXP (recog_operand[i], 0), NULL_RTX,
+ &XEXP (recog_operand[i], 0), NULL_PTR,
+ BASE_REG_CLASS, GET_MODE (XEXP (recog_operand[i], 0)),
+ VOIDmode, 0, 0, i, RELOAD_FOR_INPUT);
+ reload_inc[operand_reloadnum[i]]
+ = GET_MODE_SIZE (GET_MODE (recog_operand[i]));
+
+ /* If this operand is an output, we will have made any
+ reloads for its address as RELOAD_FOR_OUTPUT_ADDRESS, but
+ now we are treating part of the operand as an input, so
+ we must change these to RELOAD_FOR_INPUT_ADDRESS. */
+
+ if (modified[i] == RELOAD_WRITE)
+ for (j = 0; j < n_reloads; j++)
+ if (reload_opnum[j] == i
+ && reload_when_needed[j] == RELOAD_FOR_OUTPUT_ADDRESS)
+ reload_when_needed[j] = RELOAD_FOR_INPUT_ADDRESS;
+ }
+ else if (goal_alternative_matched[i] == -1)
+ operand_reloadnum[i] =
+ push_reload (modified[i] != RELOAD_WRITE ? recog_operand[i] : 0,
+ modified[i] != RELOAD_READ ? recog_operand[i] : 0,
+ (modified[i] != RELOAD_WRITE ?
+ recog_operand_loc[i] : 0),
+ modified[i] != RELOAD_READ ? recog_operand_loc[i] : 0,
+ (enum reg_class) goal_alternative[i],
+ (modified[i] == RELOAD_WRITE
+ ? VOIDmode : operand_mode[i]),
+ (modified[i] == RELOAD_READ
+ ? VOIDmode : operand_mode[i]),
+ (insn_code_number < 0 ? 0
+ : insn_operand_strict_low[insn_code_number][i]),
+ 0, i, operand_type[i]);
+ /* In a matching pair of operands, one must be input only
+ and the other must be output only.
+ Pass the input operand as IN and the other as OUT. */
+ else if (modified[i] == RELOAD_READ
+ && modified[goal_alternative_matched[i]] == RELOAD_WRITE)
+ {
+ operand_reloadnum[i]
+ = push_reload (recog_operand[i],
+ recog_operand[goal_alternative_matched[i]],
+ recog_operand_loc[i],
+ recog_operand_loc[goal_alternative_matched[i]],
+ (enum reg_class) goal_alternative[i],
+ operand_mode[i],
+ operand_mode[goal_alternative_matched[i]],
+ 0, 0, i, RELOAD_OTHER);
+ operand_reloadnum[goal_alternative_matched[i]] = output_reloadnum;
+ }
+ else if (modified[i] == RELOAD_WRITE
+ && modified[goal_alternative_matched[i]] == RELOAD_READ)
+ {
+ operand_reloadnum[goal_alternative_matched[i]]
+ = push_reload (recog_operand[goal_alternative_matched[i]],
+ recog_operand[i],
+ recog_operand_loc[goal_alternative_matched[i]],
+ recog_operand_loc[i],
+ (enum reg_class) goal_alternative[i],
+ operand_mode[goal_alternative_matched[i]],
+ operand_mode[i],
+ 0, 0, i, RELOAD_OTHER);
+ operand_reloadnum[i] = output_reloadnum;
+ }
+ else if (insn_code_number >= 0)
+ abort ();
+ else
+ {
+ error_for_asm (insn, "inconsistent operand constraints in an `asm'");
+ /* Avoid further trouble with this insn. */
+ PATTERN (insn) = gen_rtx (USE, VOIDmode, const0_rtx);
+ n_reloads = 0;
+ return;
+ }
+ }
+ else if (goal_alternative_matched[i] < 0
+ && goal_alternative_matches[i] < 0
+ && optimize)
+ {
+ /* For each non-matching operand that's a MEM or a pseudo-register
+ that didn't get a hard register, make an optional reload.
+ This may get done even if the insn needs no reloads otherwise. */
+
+ rtx operand = recog_operand[i];
+
+ while (GET_CODE (operand) == SUBREG)
+ operand = XEXP (operand, 0);
+ if ((GET_CODE (operand) == MEM
+ || (GET_CODE (operand) == REG
+ && REGNO (operand) >= FIRST_PSEUDO_REGISTER))
+ && (enum reg_class) goal_alternative[i] != NO_REGS
+ && ! no_input_reloads
+ /* Optional output reloads don't do anything and we mustn't
+ make in-out reloads on insns that are not permitted output
+ reloads. */
+ && (modified[i] == RELOAD_READ
+ || (modified[i] == RELOAD_READ_WRITE && ! no_output_reloads)))
+ operand_reloadnum[i]
+ = push_reload (modified[i] != RELOAD_WRITE ? recog_operand[i] : 0,
+ modified[i] != RELOAD_READ ? recog_operand[i] : 0,
+ (modified[i] != RELOAD_WRITE
+ ? recog_operand_loc[i] : 0),
+ (modified[i] != RELOAD_READ
+ ? recog_operand_loc[i] : 0),
+ (enum reg_class) goal_alternative[i],
+ (modified[i] == RELOAD_WRITE
+ ? VOIDmode : operand_mode[i]),
+ (modified[i] == RELOAD_READ
+ ? VOIDmode : operand_mode[i]),
+ (insn_code_number < 0 ? 0
+ : insn_operand_strict_low[insn_code_number][i]),
+ 1, i, operand_type[i]);
+ }
+ else if (goal_alternative_matches[i] >= 0
+ && goal_alternative_win[goal_alternative_matches[i]]
+ && modified[i] == RELOAD_READ
+ && modified[goal_alternative_matches[i]] == RELOAD_WRITE
+ && ! no_input_reloads && ! no_output_reloads
+ && optimize)
+ {
+ /* Similarly, make an optional reload for a pair of matching
+ objects that are in MEM or a pseudo that didn't get a hard reg. */
+
+ rtx operand = recog_operand[i];
+
+ while (GET_CODE (operand) == SUBREG)
+ operand = XEXP (operand, 0);
+ if ((GET_CODE (operand) == MEM
+ || (GET_CODE (operand) == REG
+ && REGNO (operand) >= FIRST_PSEUDO_REGISTER))
+ && ((enum reg_class) goal_alternative[goal_alternative_matches[i]]
+ != NO_REGS))
+ operand_reloadnum[i] = operand_reloadnum[goal_alternative_matches[i]]
+ = push_reload (recog_operand[goal_alternative_matches[i]],
+ recog_operand[i],
+ recog_operand_loc[goal_alternative_matches[i]],
+ recog_operand_loc[i],
+ (enum reg_class) goal_alternative[goal_alternative_matches[i]],
+ operand_mode[goal_alternative_matches[i]],
+ operand_mode[i],
+ 0, 1, goal_alternative_matches[i], RELOAD_OTHER);
+ }
+
+ /* If this insn pattern contains any MATCH_DUP's, make sure that
+ they will be substituted if the operands they match are substituted.
+ Also do now any substitutions we already did on the operands.
+
+ Don't do this if we aren't making replacements because we might be
+ propagating things allocated by frame pointer elimination into places
+ it doesn't expect. */
+
+ if (insn_code_number >= 0 && replace)
+ for (i = insn_n_dups[insn_code_number] - 1; i >= 0; i--)
+ {
+ int opno = recog_dup_num[i];
+ *recog_dup_loc[i] = *recog_operand_loc[opno];
+ if (operand_reloadnum[opno] >= 0)
+ push_replacement (recog_dup_loc[i], operand_reloadnum[opno],
+ insn_operand_mode[insn_code_number][opno]);
+ }
+
+#if 0
+ /* This loses because reloading of prior insns can invalidate the equivalence
+ (or at least find_equiv_reg isn't smart enough to find it any more),
+ causing this insn to need more reload regs than it needed before.
+ It may be too late to make the reload regs available.
+ Now this optimization is done safely in choose_reload_regs. */
+
+ /* For each reload of a reg into some other class of reg,
+ search for an existing equivalent reg (same value now) in the right class.
+ We can use it as long as we don't need to change its contents. */
+ for (i = 0; i < n_reloads; i++)
+ if (reload_reg_rtx[i] == 0
+ && reload_in[i] != 0
+ && GET_CODE (reload_in[i]) == REG
+ && reload_out[i] == 0)
+ {
+ reload_reg_rtx[i]
+ = find_equiv_reg (reload_in[i], insn, reload_reg_class[i], -1,
+ static_reload_reg_p, 0, reload_inmode[i]);
+ /* Prevent generation of insn to load the value
+ because the one we found already has the value. */
+ if (reload_reg_rtx[i])
+ reload_in[i] = reload_reg_rtx[i];
+ }
+#endif
+
+ /* Perhaps an output reload can be combined with another
+ to reduce needs by one. */
+ if (!goal_earlyclobber)
+ combine_reloads ();
+
+ /* If we have a pair of reloads for parts of an address, they are reloading
+ the same object, the operands themselves were not reloaded, and they
+ are for two operands that are supposed to match, merge the reloads and
+ change the type of the surviving reload to RELOAD_FOR_OPERAND_ADDRESS. */
+
+ for (i = 0; i < n_reloads; i++)
+ {
+ int k;
+
+ for (j = i + 1; j < n_reloads; j++)
+ if ((reload_when_needed[i] == RELOAD_FOR_INPUT_ADDRESS
+ || reload_when_needed[i] == RELOAD_FOR_OUTPUT_ADDRESS)
+ && (reload_when_needed[j] == RELOAD_FOR_INPUT_ADDRESS
+ || reload_when_needed[j] == RELOAD_FOR_OUTPUT_ADDRESS)
+ && rtx_equal_p (reload_in[i], reload_in[j])
+ && (operand_reloadnum[reload_opnum[i]] < 0
+ || reload_optional[operand_reloadnum[reload_opnum[i]]])
+ && (operand_reloadnum[reload_opnum[j]] < 0
+ || reload_optional[operand_reloadnum[reload_opnum[j]]])
+ && (goal_alternative_matches[reload_opnum[i]] == reload_opnum[j]
+ || (goal_alternative_matches[reload_opnum[j]]
+ == reload_opnum[i])))
+ {
+ for (k = 0; k < n_replacements; k++)
+ if (replacements[k].what == j)
+ replacements[k].what = i;
+
+ reload_when_needed[i] = RELOAD_FOR_OPERAND_ADDRESS;
+ reload_in[j] = 0;
+ }
+ }
+
+ /* Scan all the reloads and update their type.
+ If a reload is for the address of an operand and we didn't reload
+ that operand, change the type. Similarly, change the operand number
+ of a reload when two operands match. If a reload is optional, treat it
+ as though the operand isn't reloaded.
+
+ ??? This latter case is somewhat odd because if we do the optional
+ reload, it means the object is hanging around. Thus we need only
+ do the address reload if the optional reload was NOT done.
+
+ Change secondary reloads to be the address type of their operand, not
+ the normal type.
+
+ If an operand's reload is now RELOAD_OTHER, change any
+ RELOAD_FOR_INPUT_ADDRESS reloads of that operand to
+ RELOAD_FOR_OTHER_ADDRESS. */
+
+ for (i = 0; i < n_reloads; i++)
+ {
+ if (reload_secondary_p[i]
+ && reload_when_needed[i] == operand_type[reload_opnum[i]])
+ reload_when_needed[i] = address_type[reload_opnum[i]];
+
+ if ((reload_when_needed[i] == RELOAD_FOR_INPUT_ADDRESS
+ || reload_when_needed[i] == RELOAD_FOR_OUTPUT_ADDRESS)
+ && (operand_reloadnum[reload_opnum[i]] < 0
+ || reload_optional[operand_reloadnum[reload_opnum[i]]]))
+ {
+ /* If we have a secondary reload to go along with this reload,
+ change its type to RELOAD_FOR_OPADDR_ADDR. */
+
+ if (reload_when_needed[i] == RELOAD_FOR_INPUT_ADDRESS
+ && reload_secondary_in_reload[i] != -1)
+ {
+ int secondary_in_reload = reload_secondary_in_reload[i];
+
+ reload_when_needed[secondary_in_reload] =
+ RELOAD_FOR_OPADDR_ADDR;
+
+ /* If there's a tertiary reload we have to change it also. */
+ if (secondary_in_reload > 0
+ && reload_secondary_in_reload[secondary_in_reload] != -1)
+ reload_when_needed[reload_secondary_in_reload[secondary_in_reload]]
+ = RELOAD_FOR_OPADDR_ADDR;
+ }
+
+ if (reload_when_needed[i] == RELOAD_FOR_OUTPUT_ADDRESS
+ && reload_secondary_out_reload[i] != -1)
+ {
+ int secondary_out_reload = reload_secondary_out_reload[i];
+
+ reload_when_needed[secondary_out_reload] =
+ RELOAD_FOR_OPADDR_ADDR;
+
+ /* If there's a tertiary reload we have to change it also. */
+ if (secondary_out_reload
+ && reload_secondary_out_reload[secondary_out_reload] != -1)
+ reload_when_needed[reload_secondary_out_reload[secondary_out_reload]]
+ = RELOAD_FOR_OPADDR_ADDR;
+ }
+ reload_when_needed[i] = RELOAD_FOR_OPERAND_ADDRESS;
+ }
+
+ if (reload_when_needed[i] == RELOAD_FOR_INPUT_ADDRESS
+ && operand_reloadnum[reload_opnum[i]] >= 0
+ && (reload_when_needed[operand_reloadnum[reload_opnum[i]]]
+ == RELOAD_OTHER))
+ reload_when_needed[i] = RELOAD_FOR_OTHER_ADDRESS;
+
+ if (goal_alternative_matches[reload_opnum[i]] >= 0)
+ reload_opnum[i] = goal_alternative_matches[reload_opnum[i]];
+ }
+
+ /* See if we have any reloads that are now allowed to be merged
+ because we've changed when the reload is needed to
+ RELOAD_FOR_OPERAND_ADDRESS or RELOAD_FOR_OTHER_ADDRESS. Only
+ check for the most common cases. */
+
+ for (i = 0; i < n_reloads; i++)
+ if (reload_in[i] != 0 && reload_out[i] == 0
+ && (reload_when_needed[i] == RELOAD_FOR_OPERAND_ADDRESS
+ || reload_when_needed[i] == RELOAD_FOR_OTHER_ADDRESS))
+ for (j = 0; j < n_reloads; j++)
+ if (i != j && reload_in[j] != 0 && reload_out[j] == 0
+ && reload_when_needed[j] == reload_when_needed[i]
+ && MATCHES (reload_in[i], reload_in[j])
+ && reload_reg_class[i] == reload_reg_class[j]
+ && !reload_nocombine[i] && !reload_nocombine[j]
+ && reload_reg_rtx[i] == reload_reg_rtx[j])
+ {
+ reload_opnum[i] = MIN (reload_opnum[i], reload_opnum[j]);
+ transfer_replacements (i, j);
+ reload_in[j] = 0;
+ }
+
+#else /* no REGISTER_CONSTRAINTS */
+ int noperands;
+ int insn_code_number;
+ int goal_earlyclobber = 0; /* Always 0, to make combine_reloads happen. */
+ register int i;
+ rtx body = PATTERN (insn);
+
+ n_reloads = 0;
+ n_replacements = 0;
+ n_earlyclobbers = 0;
+ replace_reloads = replace;
+ this_insn = insn;
+
+ /* Find what kind of insn this is. NOPERANDS gets number of operands.
+ Store the operand values in RECOG_OPERAND and the locations
+ of the words in the insn that point to them in RECOG_OPERAND_LOC.
+ Return if the insn needs no reload processing. */
+
+ switch (GET_CODE (body))
+ {
+ case USE:
+ case CLOBBER:
+ case ASM_INPUT:
+ case ADDR_VEC:
+ case ADDR_DIFF_VEC:
+ return;
+
+ case PARALLEL:
+ case SET:
+ noperands = asm_noperands (body);
+ if (noperands >= 0)
+ {
+ /* This insn is an `asm' with operands.
+ First, find out how many operands, and allocate space. */
+
+ insn_code_number = -1;
+ /* ??? This is a bug! ???
+ Give up and delete this insn if it has too many operands. */
+ if (noperands > MAX_RECOG_OPERANDS)
+ abort ();
+
+ /* Now get the operand values out of the insn. */
+
+ decode_asm_operands (body, recog_operand, recog_operand_loc,
+ NULL_PTR, NULL_PTR);
+ break;
+ }
+
+ default:
+ /* Ordinary insn: recognize it, allocate space for operands and
+ constraints, and get them out via insn_extract. */
+
+ insn_code_number = recog_memoized (insn);
+ noperands = insn_n_operands[insn_code_number];
+ insn_extract (insn);
+ }
+
+ if (noperands == 0)
+ return;
+
+ for (i = 0; i < noperands; i++)
+ {
+ register RTX_CODE code = GET_CODE (recog_operand[i]);
+ int is_set_dest = GET_CODE (body) == SET && (i == 0);
+
+ if (insn_code_number >= 0)
+ if (insn_operand_address_p[insn_code_number][i])
+ find_reloads_address (VOIDmode, NULL_PTR,
+ recog_operand[i], recog_operand_loc[i],
+ i, RELOAD_FOR_INPUT, ind_levels);
+
+ /* In these cases, we can't tell if the operand is an input
+ or an output, so be conservative. In practice it won't be
+ problem. */
+
+ if (code == MEM)
+ find_reloads_address (GET_MODE (recog_operand[i]),
+ recog_operand_loc[i],
+ XEXP (recog_operand[i], 0),
+ &XEXP (recog_operand[i], 0),
+ i, RELOAD_OTHER, ind_levels);
+ if (code == SUBREG)
+ recog_operand[i] = *recog_operand_loc[i]
+ = find_reloads_toplev (recog_operand[i], i, RELOAD_OTHER,
+ ind_levels, is_set_dest);
+ if (code == REG)
+ {
+ register int regno = REGNO (recog_operand[i]);
+ if (reg_equiv_constant[regno] != 0 && !is_set_dest)
+ recog_operand[i] = *recog_operand_loc[i]
+ = reg_equiv_constant[regno];
+#if 0 /* This might screw code in reload1.c to delete prior output-reload
+ that feeds this insn. */
+ if (reg_equiv_mem[regno] != 0)
+ recog_operand[i] = *recog_operand_loc[i]
+ = reg_equiv_mem[regno];
+#endif
+ }
+ }
+
+ /* Perhaps an output reload can be combined with another
+ to reduce needs by one. */
+ if (!goal_earlyclobber)
+ combine_reloads ();
+#endif /* no REGISTER_CONSTRAINTS */
+}
+
+/* Return 1 if alternative number ALTNUM in constraint-string CONSTRAINT
+ accepts a memory operand with constant address. */
+
+static int
+alternative_allows_memconst (constraint, altnum)
+ char *constraint;
+ int altnum;
+{
+ register int c;
+ /* Skip alternatives before the one requested. */
+ while (altnum > 0)
+ {
+ while (*constraint++ != ',');
+ altnum--;
+ }
+ /* Scan the requested alternative for 'm' or 'o'.
+ If one of them is present, this alternative accepts memory constants. */
+ while ((c = *constraint++) && c != ',' && c != '#')
+ if (c == 'm' || c == 'o')
+ return 1;
+ return 0;
+}
+
+/* Scan X for memory references and scan the addresses for reloading.
+ Also checks for references to "constant" regs that we want to eliminate
+ and replaces them with the values they stand for.
+ We may alter X destructively if it contains a reference to such.
+ If X is just a constant reg, we return the equivalent value
+ instead of X.
+
+ IND_LEVELS says how many levels of indirect addressing this machine
+ supports.
+
+ OPNUM and TYPE identify the purpose of the reload.
+
+ IS_SET_DEST is true if X is the destination of a SET, which is not
+ appropriate to be replaced by a constant. */
+
+static rtx
+find_reloads_toplev (x, opnum, type, ind_levels, is_set_dest)
+ rtx x;
+ int opnum;
+ enum reload_type type;
+ int ind_levels;
+ int is_set_dest;
+{
+ register RTX_CODE code = GET_CODE (x);
+
+ register char *fmt = GET_RTX_FORMAT (code);
+ register int i;
+
+ if (code == REG)
+ {
+ /* This code is duplicated for speed in find_reloads. */
+ register int regno = REGNO (x);
+ if (reg_equiv_constant[regno] != 0 && !is_set_dest)
+ x = reg_equiv_constant[regno];
+#if 0
+/* This creates (subreg (mem...)) which would cause an unnecessary
+ reload of the mem. */
+ else if (reg_equiv_mem[regno] != 0)
+ x = reg_equiv_mem[regno];
+#endif
+ else if (reg_equiv_address[regno] != 0)
+ {
+ /* If reg_equiv_address varies, it may be shared, so copy it. */
+ rtx addr = reg_equiv_address[regno];
+
+ if (rtx_varies_p (addr))
+ addr = copy_rtx (addr);
+
+ x = gen_rtx (MEM, GET_MODE (x), addr);
+ RTX_UNCHANGING_P (x) = RTX_UNCHANGING_P (regno_reg_rtx[regno]);
+ find_reloads_address (GET_MODE (x), NULL_PTR,
+ XEXP (x, 0),
+ &XEXP (x, 0), opnum, type, ind_levels);
+ }
+ return x;
+ }
+ if (code == MEM)
+ {
+ rtx tem = x;
+ find_reloads_address (GET_MODE (x), &tem, XEXP (x, 0), &XEXP (x, 0),
+ opnum, type, ind_levels);
+ return tem;
+ }
+
+ if (code == SUBREG && GET_CODE (SUBREG_REG (x)) == REG)
+ {
+ /* Check for SUBREG containing a REG that's equivalent to a constant.
+ If the constant has a known value, truncate it right now.
+ Similarly if we are extracting a single-word of a multi-word
+ constant. If the constant is symbolic, allow it to be substituted
+ normally. push_reload will strip the subreg later. If the
+ constant is VOIDmode, abort because we will lose the mode of
+ the register (this should never happen because one of the cases
+ above should handle it). */
+
+ register int regno = REGNO (SUBREG_REG (x));
+ rtx tem;
+
+ if (subreg_lowpart_p (x)
+ && regno >= FIRST_PSEUDO_REGISTER && reg_renumber[regno] < 0
+ && reg_equiv_constant[regno] != 0
+ && (tem = gen_lowpart_common (GET_MODE (x),
+ reg_equiv_constant[regno])) != 0)
+ return tem;
+
+ if (GET_MODE_BITSIZE (GET_MODE (x)) == BITS_PER_WORD
+ && regno >= FIRST_PSEUDO_REGISTER && reg_renumber[regno] < 0
+ && reg_equiv_constant[regno] != 0
+ && (tem = operand_subword (reg_equiv_constant[regno],
+ SUBREG_WORD (x), 0,
+ GET_MODE (SUBREG_REG (x)))) != 0)
+ return tem;
+
+ if (regno >= FIRST_PSEUDO_REGISTER && reg_renumber[regno] < 0
+ && reg_equiv_constant[regno] != 0
+ && GET_MODE (reg_equiv_constant[regno]) == VOIDmode)
+ abort ();
+
+ /* If the subreg contains a reg that will be converted to a mem,
+ convert the subreg to a narrower memref now.
+ Otherwise, we would get (subreg (mem ...) ...),
+ which would force reload of the mem.
+
+ We also need to do this if there is an equivalent MEM that is
+ not offsettable. In that case, alter_subreg would produce an
+ invalid address on big-endian machines.
+
+ For machines that extend byte loads, we must not reload using
+ a wider mode if we have a paradoxical SUBREG. find_reloads will
+ force a reload in that case. So we should not do anything here. */
+
+ else if (regno >= FIRST_PSEUDO_REGISTER
+#ifdef LOAD_EXTEND_OP
+ && (GET_MODE_SIZE (GET_MODE (x))
+ <= GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
+#endif
+ && (reg_equiv_address[regno] != 0
+ || (reg_equiv_mem[regno] != 0
+ && (! strict_memory_address_p (GET_MODE (x),
+ XEXP (reg_equiv_mem[regno], 0))
+ || ! offsettable_memref_p (reg_equiv_mem[regno])))))
+ {
+ int offset = SUBREG_WORD (x) * UNITS_PER_WORD;
+ rtx addr = (reg_equiv_address[regno] ? reg_equiv_address[regno]
+ : XEXP (reg_equiv_mem[regno], 0));
+#if BYTES_BIG_ENDIAN
+ int size;
+ size = GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)));
+ offset += MIN (size, UNITS_PER_WORD);
+ size = GET_MODE_SIZE (GET_MODE (x));
+ offset -= MIN (size, UNITS_PER_WORD);
+#endif
+ addr = plus_constant (addr, offset);
+ x = gen_rtx (MEM, GET_MODE (x), addr);
+ RTX_UNCHANGING_P (x) = RTX_UNCHANGING_P (regno_reg_rtx[regno]);
+ find_reloads_address (GET_MODE (x), NULL_PTR,
+ XEXP (x, 0),
+ &XEXP (x, 0), opnum, type, ind_levels);
+ }
+
+ }
+
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ XEXP (x, i) = find_reloads_toplev (XEXP (x, i), opnum, type,
+ ind_levels, is_set_dest);
+ }
+ return x;
+}
+
+/* Return a mem ref for the memory equivalent of reg REGNO.
+ This mem ref is not shared with anything. */
+
+static rtx
+make_memloc (ad, regno)
+ rtx ad;
+ int regno;
+{
+ register int i;
+ rtx tem = reg_equiv_address[regno];
+
+#if 0 /* We cannot safely reuse a memloc made here;
+ if the pseudo appears twice, and its mem needs a reload,
+ it gets two separate reloads assigned, but it only
+ gets substituted with the second of them;
+ then it can get used before that reload reg gets loaded up. */
+ for (i = 0; i < n_memlocs; i++)
+ if (rtx_equal_p (tem, XEXP (memlocs[i], 0)))
+ return memlocs[i];
+#endif
+
+ /* If TEM might contain a pseudo, we must copy it to avoid
+ modifying it when we do the substitution for the reload. */
+ if (rtx_varies_p (tem))
+ tem = copy_rtx (tem);
+
+ tem = gen_rtx (MEM, GET_MODE (ad), tem);
+ RTX_UNCHANGING_P (tem) = RTX_UNCHANGING_P (regno_reg_rtx[regno]);
+ memlocs[n_memlocs++] = tem;
+ return tem;
+}
+
+/* Record all reloads needed for handling memory address AD
+ which appears in *LOC in a memory reference to mode MODE
+ which itself is found in location *MEMREFLOC.
+ Note that we take shortcuts assuming that no multi-reg machine mode
+ occurs as part of an address.
+
+ OPNUM and TYPE specify the purpose of this reload.
+
+ IND_LEVELS says how many levels of indirect addressing this machine
+ supports.
+
+ Value is nonzero if this address is reloaded or replaced as a whole.
+ This is interesting to the caller if the address is an autoincrement.
+
+ Note that there is no verification that the address will be valid after
+ this routine does its work. Instead, we rely on the fact that the address
+ was valid when reload started. So we need only undo things that reload
+ could have broken. These are wrong register types, pseudos not allocated
+ to a hard register, and frame pointer elimination. */
+
+static int
+find_reloads_address (mode, memrefloc, ad, loc, opnum, type, ind_levels)
+ enum machine_mode mode;
+ rtx *memrefloc;
+ rtx ad;
+ rtx *loc;
+ int opnum;
+ enum reload_type type;
+ int ind_levels;
+{
+ register int regno;
+ rtx tem;
+
+ /* If the address is a register, see if it is a legitimate address and
+ reload if not. We first handle the cases where we need not reload
+ or where we must reload in a non-standard way. */
+
+ if (GET_CODE (ad) == REG)
+ {
+ regno = REGNO (ad);
+
+ if (reg_equiv_constant[regno] != 0
+ && strict_memory_address_p (mode, reg_equiv_constant[regno]))
+ {
+ *loc = ad = reg_equiv_constant[regno];
+ return 1;
+ }
+
+ else if (reg_equiv_address[regno] != 0)
+ {
+ tem = make_memloc (ad, regno);
+ find_reloads_address (GET_MODE (tem), NULL_PTR, XEXP (tem, 0),
+ &XEXP (tem, 0), opnum, type, ind_levels);
+ push_reload (tem, NULL_RTX, loc, NULL_PTR, BASE_REG_CLASS,
+ GET_MODE (ad), VOIDmode, 0, 0,
+ opnum, type);
+ return 1;
+ }
+
+ /* We can avoid a reload if the register's equivalent memory expression
+ is valid as an indirect memory address.
+ But not all addresses are valid in a mem used as an indirect address:
+ only reg or reg+constant. */
+
+ else if (reg_equiv_mem[regno] != 0 && ind_levels > 0
+ && strict_memory_address_p (mode, reg_equiv_mem[regno])
+ && (GET_CODE (XEXP (reg_equiv_mem[regno], 0)) == REG
+ || (GET_CODE (XEXP (reg_equiv_mem[regno], 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (reg_equiv_mem[regno], 0), 0)) == REG
+ && CONSTANT_P (XEXP (XEXP (reg_equiv_mem[regno], 0), 0)))))
+ return 0;
+
+ /* The only remaining case where we can avoid a reload is if this is a
+ hard register that is valid as a base register and which is not the
+ subject of a CLOBBER in this insn. */
+
+ else if (regno < FIRST_PSEUDO_REGISTER && REGNO_OK_FOR_BASE_P (regno)
+ && ! regno_clobbered_p (regno, this_insn))
+ return 0;
+
+ /* If we do not have one of the cases above, we must do the reload. */
+ push_reload (ad, NULL_RTX, loc, NULL_PTR, BASE_REG_CLASS,
+ GET_MODE (ad), VOIDmode, 0, 0, opnum, type);
+ return 1;
+ }
+
+ if (strict_memory_address_p (mode, ad))
+ {
+ /* The address appears valid, so reloads are not needed.
+ But the address may contain an eliminable register.
+ This can happen because a machine with indirect addressing
+ may consider a pseudo register by itself a valid address even when
+ it has failed to get a hard reg.
+ So do a tree-walk to find and eliminate all such regs. */
+
+ /* But first quickly dispose of a common case. */
+ if (GET_CODE (ad) == PLUS
+ && GET_CODE (XEXP (ad, 1)) == CONST_INT
+ && GET_CODE (XEXP (ad, 0)) == REG
+ && reg_equiv_constant[REGNO (XEXP (ad, 0))] == 0)
+ return 0;
+
+ subst_reg_equivs_changed = 0;
+ *loc = subst_reg_equivs (ad);
+
+ if (! subst_reg_equivs_changed)
+ return 0;
+
+ /* Check result for validity after substitution. */
+ if (strict_memory_address_p (mode, ad))
+ return 0;
+ }
+
+ /* The address is not valid. We have to figure out why. One possibility
+ is that it is itself a MEM. This can happen when the frame pointer is
+ being eliminated, a pseudo is not allocated to a hard register, and the
+ offset between the frame and stack pointers is not its initial value.
+ In that case the pseudo will have been replaced by a MEM referring to
+ the stack pointer. */
+ if (GET_CODE (ad) == MEM)
+ {
+ /* First ensure that the address in this MEM is valid. Then, unless
+ indirect addresses are valid, reload the MEM into a register. */
+ tem = ad;
+ find_reloads_address (GET_MODE (ad), &tem, XEXP (ad, 0), &XEXP (ad, 0),
+ opnum, type, ind_levels == 0 ? 0 : ind_levels - 1);
+
+ /* If tem was changed, then we must create a new memory reference to
+ hold it and store it back into memrefloc. */
+ if (tem != ad && memrefloc)
+ {
+ *memrefloc = copy_rtx (*memrefloc);
+ copy_replacements (tem, XEXP (*memrefloc, 0));
+ loc = &XEXP (*memrefloc, 0);
+ }
+
+ /* Check similar cases as for indirect addresses as above except
+ that we can allow pseudos and a MEM since they should have been
+ taken care of above. */
+
+ if (ind_levels == 0
+ || (GET_CODE (XEXP (tem, 0)) == SYMBOL_REF && ! indirect_symref_ok)
+ || GET_CODE (XEXP (tem, 0)) == MEM
+ || ! (GET_CODE (XEXP (tem, 0)) == REG
+ || (GET_CODE (XEXP (tem, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (tem, 0), 0)) == REG
+ && GET_CODE (XEXP (XEXP (tem, 0), 1)) == CONST_INT)))
+ {
+ /* Must use TEM here, not AD, since it is the one that will
+ have any subexpressions reloaded, if needed. */
+ push_reload (tem, NULL_RTX, loc, NULL_PTR,
+ BASE_REG_CLASS, GET_MODE (tem), VOIDmode, 0,
+ 0, opnum, type);
+ return 1;
+ }
+ else
+ return 0;
+ }
+
+ /* If we have address of a stack slot but it's not valid
+ (displacement is too large), compute the sum in a register. */
+ else if (GET_CODE (ad) == PLUS
+ && (XEXP (ad, 0) == frame_pointer_rtx
+#if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
+ || XEXP (ad, 0) == hard_frame_pointer_rtx
+#endif
+#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
+ || XEXP (ad, 0) == arg_pointer_rtx
+#endif
+ || XEXP (ad, 0) == stack_pointer_rtx)
+ && GET_CODE (XEXP (ad, 1)) == CONST_INT)
+ {
+ /* Unshare the MEM rtx so we can safely alter it. */
+ if (memrefloc)
+ {
+ *memrefloc = copy_rtx (*memrefloc);
+ loc = &XEXP (*memrefloc, 0);
+ }
+ if (double_reg_address_ok)
+ {
+ /* Unshare the sum as well. */
+ *loc = ad = copy_rtx (ad);
+ /* Reload the displacement into an index reg.
+ We assume the frame pointer or arg pointer is a base reg. */
+ find_reloads_address_part (XEXP (ad, 1), &XEXP (ad, 1),
+ INDEX_REG_CLASS, GET_MODE (ad), opnum,
+ type, ind_levels);
+ }
+ else
+ {
+ /* If the sum of two regs is not necessarily valid,
+ reload the sum into a base reg.
+ That will at least work. */
+ find_reloads_address_part (ad, loc, BASE_REG_CLASS, Pmode,
+ opnum, type, ind_levels);
+ }
+ return 1;
+ }
+
+ /* If we have an indexed stack slot, there are three possible reasons why
+ it might be invalid: The index might need to be reloaded, the address
+ might have been made by frame pointer elimination and hence have a
+ constant out of range, or both reasons might apply.
+
+ We can easily check for an index needing reload, but even if that is the
+ case, we might also have an invalid constant. To avoid making the
+ conservative assumption and requiring two reloads, we see if this address
+ is valid when not interpreted strictly. If it is, the only problem is
+ that the index needs a reload and find_reloads_address_1 will take care
+ of it.
+
+ There is still a case when we might generate an extra reload,
+ however. In certain cases eliminate_regs will return a MEM for a REG
+ (see the code there for details). In those cases, memory_address_p
+ applied to our address will return 0 so we will think that our offset
+ must be too large. But it might indeed be valid and the only problem
+ is that a MEM is present where a REG should be. This case should be
+ very rare and there doesn't seem to be any way to avoid it.
+
+ If we decide to do something here, it must be that
+ `double_reg_address_ok' is true and that this address rtl was made by
+ eliminate_regs. We generate a reload of the fp/sp/ap + constant and
+ rework the sum so that the reload register will be added to the index.
+ This is safe because we know the address isn't shared.
+
+ We check for fp/ap/sp as both the first and second operand of the
+ innermost PLUS. */
+
+ else if (GET_CODE (ad) == PLUS && GET_CODE (XEXP (ad, 1)) == CONST_INT
+ && GET_CODE (XEXP (ad, 0)) == PLUS
+ && (XEXP (XEXP (ad, 0), 0) == frame_pointer_rtx
+#if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM
+ || XEXP (XEXP (ad, 0), 0) == hard_frame_pointer_rtx
+#endif
+#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
+ || XEXP (XEXP (ad, 0), 0) == arg_pointer_rtx
+#endif
+ || XEXP (XEXP (ad, 0), 0) == stack_pointer_rtx)
+ && ! memory_address_p (mode, ad))
+ {
+ *loc = ad = gen_rtx (PLUS, GET_MODE (ad),
+ plus_constant (XEXP (XEXP (ad, 0), 0),
+ INTVAL (XEXP (ad, 1))),
+ XEXP (XEXP (ad, 0), 1));
+ find_reloads_address_part (XEXP (ad, 0), &XEXP (ad, 0), BASE_REG_CLASS,
+ GET_MODE (ad), opnum, type, ind_levels);
+ find_reloads_address_1 (XEXP (ad, 1), 1, &XEXP (ad, 1), opnum, type, 0);
+
+ return 1;
+ }
+
+ else if (GET_CODE (ad) == PLUS && GET_CODE (XEXP (ad, 1)) == CONST_INT
+ && GET_CODE (XEXP (ad, 0)) == PLUS
+ && (XEXP (XEXP (ad, 0), 1) == frame_pointer_rtx
+#if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
+ || XEXP (XEXP (ad, 0), 1) == hard_frame_pointer_rtx
+#endif
+#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
+ || XEXP (XEXP (ad, 0), 1) == arg_pointer_rtx
+#endif
+ || XEXP (XEXP (ad, 0), 1) == stack_pointer_rtx)
+ && ! memory_address_p (mode, ad))
+ {
+ *loc = ad = gen_rtx (PLUS, GET_MODE (ad),
+ plus_constant (XEXP (XEXP (ad, 0), 1),
+ INTVAL (XEXP (ad, 1))),
+ XEXP (XEXP (ad, 0), 0));
+ find_reloads_address_part (XEXP (ad, 0), &XEXP (ad, 0), BASE_REG_CLASS,
+ GET_MODE (ad), opnum, type, ind_levels);
+ find_reloads_address_1 (XEXP (ad, 1), 1, &XEXP (ad, 1), opnum, type, 0);
+
+ return 1;
+ }
+
+ /* See if address becomes valid when an eliminable register
+ in a sum is replaced. */
+
+ tem = ad;
+ if (GET_CODE (ad) == PLUS)
+ tem = subst_indexed_address (ad);
+ if (tem != ad && strict_memory_address_p (mode, tem))
+ {
+ /* Ok, we win that way. Replace any additional eliminable
+ registers. */
+
+ subst_reg_equivs_changed = 0;
+ tem = subst_reg_equivs (tem);
+
+ /* Make sure that didn't make the address invalid again. */
+
+ if (! subst_reg_equivs_changed || strict_memory_address_p (mode, tem))
+ {
+ *loc = tem;
+ return 0;
+ }
+ }
+
+ /* If constants aren't valid addresses, reload the constant address
+ into a register. */
+ if (CONSTANT_P (ad) && ! strict_memory_address_p (mode, ad))
+ {
+ /* If AD is in address in the constant pool, the MEM rtx may be shared.
+ Unshare it so we can safely alter it. */
+ if (memrefloc && GET_CODE (ad) == SYMBOL_REF
+ && CONSTANT_POOL_ADDRESS_P (ad))
+ {
+ *memrefloc = copy_rtx (*memrefloc);
+ loc = &XEXP (*memrefloc, 0);
+ }
+
+ find_reloads_address_part (ad, loc, BASE_REG_CLASS, Pmode, opnum, type,
+ ind_levels);
+ return 1;
+ }
+
+ return find_reloads_address_1 (ad, 0, loc, opnum, type, ind_levels);
+}
+
+/* Find all pseudo regs appearing in AD
+ that are eliminable in favor of equivalent values
+ and do not have hard regs; replace them by their equivalents. */
+
+static rtx
+subst_reg_equivs (ad)
+ rtx ad;
+{
+ register RTX_CODE code = GET_CODE (ad);
+ register int i;
+ register char *fmt;
+
+ switch (code)
+ {
+ case HIGH:
+ case CONST_INT:
+ case CONST:
+ case CONST_DOUBLE:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ case PC:
+ case CC0:
+ return ad;
+
+ case REG:
+ {
+ register int regno = REGNO (ad);
+
+ if (reg_equiv_constant[regno] != 0)
+ {
+ subst_reg_equivs_changed = 1;
+ return reg_equiv_constant[regno];
+ }
+ }
+ return ad;
+
+ case PLUS:
+ /* Quickly dispose of a common case. */
+ if (XEXP (ad, 0) == frame_pointer_rtx
+ && GET_CODE (XEXP (ad, 1)) == CONST_INT)
+ return ad;
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ if (fmt[i] == 'e')
+ XEXP (ad, i) = subst_reg_equivs (XEXP (ad, i));
+ return ad;
+}
+
+/* Compute the sum of X and Y, making canonicalizations assumed in an
+ address, namely: sum constant integers, surround the sum of two
+ constants with a CONST, put the constant as the second operand, and
+ group the constant on the outermost sum.
+
+ This routine assumes both inputs are already in canonical form. */
+
+rtx
+form_sum (x, y)
+ rtx x, y;
+{
+ rtx tem;
+ enum machine_mode mode = GET_MODE (x);
+
+ if (mode == VOIDmode)
+ mode = GET_MODE (y);
+
+ if (mode == VOIDmode)
+ mode = Pmode;
+
+ if (GET_CODE (x) == CONST_INT)
+ return plus_constant (y, INTVAL (x));
+ else if (GET_CODE (y) == CONST_INT)
+ return plus_constant (x, INTVAL (y));
+ else if (CONSTANT_P (x))
+ tem = x, x = y, y = tem;
+
+ if (GET_CODE (x) == PLUS && CONSTANT_P (XEXP (x, 1)))
+ return form_sum (XEXP (x, 0), form_sum (XEXP (x, 1), y));
+
+ /* Note that if the operands of Y are specified in the opposite
+ order in the recursive calls below, infinite recursion will occur. */
+ if (GET_CODE (y) == PLUS && CONSTANT_P (XEXP (y, 1)))
+ return form_sum (form_sum (x, XEXP (y, 0)), XEXP (y, 1));
+
+ /* If both constant, encapsulate sum. Otherwise, just form sum. A
+ constant will have been placed second. */
+ if (CONSTANT_P (x) && CONSTANT_P (y))
+ {
+ if (GET_CODE (x) == CONST)
+ x = XEXP (x, 0);
+ if (GET_CODE (y) == CONST)
+ y = XEXP (y, 0);
+
+ return gen_rtx (CONST, VOIDmode, gen_rtx (PLUS, mode, x, y));
+ }
+
+ return gen_rtx (PLUS, mode, x, y);
+}
+
+/* If ADDR is a sum containing a pseudo register that should be
+ replaced with a constant (from reg_equiv_constant),
+ return the result of doing so, and also apply the associative
+ law so that the result is more likely to be a valid address.
+ (But it is not guaranteed to be one.)
+
+ Note that at most one register is replaced, even if more are
+ replaceable. Also, we try to put the result into a canonical form
+ so it is more likely to be a valid address.
+
+ In all other cases, return ADDR. */
+
+static rtx
+subst_indexed_address (addr)
+ rtx addr;
+{
+ rtx op0 = 0, op1 = 0, op2 = 0;
+ rtx tem;
+ int regno;
+
+ if (GET_CODE (addr) == PLUS)
+ {
+ /* Try to find a register to replace. */
+ op0 = XEXP (addr, 0), op1 = XEXP (addr, 1), op2 = 0;
+ if (GET_CODE (op0) == REG
+ && (regno = REGNO (op0)) >= FIRST_PSEUDO_REGISTER
+ && reg_renumber[regno] < 0
+ && reg_equiv_constant[regno] != 0)
+ op0 = reg_equiv_constant[regno];
+ else if (GET_CODE (op1) == REG
+ && (regno = REGNO (op1)) >= FIRST_PSEUDO_REGISTER
+ && reg_renumber[regno] < 0
+ && reg_equiv_constant[regno] != 0)
+ op1 = reg_equiv_constant[regno];
+ else if (GET_CODE (op0) == PLUS
+ && (tem = subst_indexed_address (op0)) != op0)
+ op0 = tem;
+ else if (GET_CODE (op1) == PLUS
+ && (tem = subst_indexed_address (op1)) != op1)
+ op1 = tem;
+ else
+ return addr;
+
+ /* Pick out up to three things to add. */
+ if (GET_CODE (op1) == PLUS)
+ op2 = XEXP (op1, 1), op1 = XEXP (op1, 0);
+ else if (GET_CODE (op0) == PLUS)
+ op2 = op1, op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
+
+ /* Compute the sum. */
+ if (op2 != 0)
+ op1 = form_sum (op1, op2);
+ if (op1 != 0)
+ op0 = form_sum (op0, op1);
+
+ return op0;
+ }
+ return addr;
+}
+
+/* Record the pseudo registers we must reload into hard registers
+ in a subexpression of a would-be memory address, X.
+ (This function is not called if the address we find is strictly valid.)
+ CONTEXT = 1 means we are considering regs as index regs,
+ = 0 means we are considering them as base regs.
+
+ OPNUM and TYPE specify the purpose of any reloads made.
+
+ IND_LEVELS says how many levels of indirect addressing are
+ supported at this point in the address.
+
+ We return nonzero if X, as a whole, is reloaded or replaced. */
+
+/* Note that we take shortcuts assuming that no multi-reg machine mode
+ occurs as part of an address.
+ Also, this is not fully machine-customizable; it works for machines
+ such as vaxes and 68000's and 32000's, but other possible machines
+ could have addressing modes that this does not handle right. */
+
+static int
+find_reloads_address_1 (x, context, loc, opnum, type, ind_levels)
+ rtx x;
+ int context;
+ rtx *loc;
+ int opnum;
+ enum reload_type type;
+ int ind_levels;
+{
+ register RTX_CODE code = GET_CODE (x);
+
+ switch (code)
+ {
+ case PLUS:
+ {
+ register rtx orig_op0 = XEXP (x, 0);
+ register rtx orig_op1 = XEXP (x, 1);
+ register RTX_CODE code0 = GET_CODE (orig_op0);
+ register RTX_CODE code1 = GET_CODE (orig_op1);
+ register rtx op0 = orig_op0;
+ register rtx op1 = orig_op1;
+
+ if (GET_CODE (op0) == SUBREG)
+ {
+ op0 = SUBREG_REG (op0);
+ code0 = GET_CODE (op0);
+ }
+
+ if (GET_CODE (op1) == SUBREG)
+ {
+ op1 = SUBREG_REG (op1);
+ code1 = GET_CODE (op1);
+ }
+
+ if (code0 == MULT || code0 == SIGN_EXTEND || code1 == MEM)
+ {
+ find_reloads_address_1 (orig_op0, 1, &XEXP (x, 0), opnum, type,
+ ind_levels);
+ find_reloads_address_1 (orig_op1, 0, &XEXP (x, 1), opnum, type,
+ ind_levels);
+ }
+
+ else if (code1 == MULT || code1 == SIGN_EXTEND || code0 == MEM)
+ {
+ find_reloads_address_1 (orig_op0, 0, &XEXP (x, 0), opnum, type,
+ ind_levels);
+ find_reloads_address_1 (orig_op1, 1, &XEXP (x, 1), opnum, type,
+ ind_levels);
+ }
+
+ else if (code0 == CONST_INT || code0 == CONST
+ || code0 == SYMBOL_REF || code0 == LABEL_REF)
+ find_reloads_address_1 (orig_op1, 0, &XEXP (x, 1), opnum, type,
+ ind_levels);
+
+ else if (code1 == CONST_INT || code1 == CONST
+ || code1 == SYMBOL_REF || code1 == LABEL_REF)
+ find_reloads_address_1 (orig_op0, 0, &XEXP (x, 0), opnum, type,
+ ind_levels);
+
+ else if (code0 == REG && code1 == REG)
+ {
+ if (REG_OK_FOR_INDEX_P (op0)
+ && REG_OK_FOR_BASE_P (op1))
+ return 0;
+ else if (REG_OK_FOR_INDEX_P (op1)
+ && REG_OK_FOR_BASE_P (op0))
+ return 0;
+ else if (REG_OK_FOR_BASE_P (op1))
+ find_reloads_address_1 (orig_op0, 1, &XEXP (x, 0), opnum, type,
+ ind_levels);
+ else if (REG_OK_FOR_BASE_P (op0))
+ find_reloads_address_1 (orig_op1, 1, &XEXP (x, 1), opnum, type,
+ ind_levels);
+ else if (REG_OK_FOR_INDEX_P (op1))
+ find_reloads_address_1 (orig_op0, 0, &XEXP (x, 0), opnum, type,
+ ind_levels);
+ else if (REG_OK_FOR_INDEX_P (op0))
+ find_reloads_address_1 (orig_op1, 0, &XEXP (x, 1), opnum, type,
+ ind_levels);
+ else
+ {
+ find_reloads_address_1 (orig_op0, 1, &XEXP (x, 0), opnum, type,
+ ind_levels);
+ find_reloads_address_1 (orig_op1, 0, &XEXP (x, 1), opnum, type,
+ ind_levels);
+ }
+ }
+
+ else if (code0 == REG)
+ {
+ find_reloads_address_1 (orig_op0, 1, &XEXP (x, 0), opnum, type,
+ ind_levels);
+ find_reloads_address_1 (orig_op1, 0, &XEXP (x, 1), opnum, type,
+ ind_levels);
+ }
+
+ else if (code1 == REG)
+ {
+ find_reloads_address_1 (orig_op1, 1, &XEXP (x, 1), opnum, type,
+ ind_levels);
+ find_reloads_address_1 (orig_op0, 0, &XEXP (x, 0), opnum, type,
+ ind_levels);
+ }
+ }
+
+ return 0;
+
+ case POST_INC:
+ case POST_DEC:
+ case PRE_INC:
+ case PRE_DEC:
+ if (GET_CODE (XEXP (x, 0)) == REG)
+ {
+ register int regno = REGNO (XEXP (x, 0));
+ int value = 0;
+ rtx x_orig = x;
+
+ /* A register that is incremented cannot be constant! */
+ if (regno >= FIRST_PSEUDO_REGISTER
+ && reg_equiv_constant[regno] != 0)
+ abort ();
+
+ /* Handle a register that is equivalent to a memory location
+ which cannot be addressed directly. */
+ if (reg_equiv_address[regno] != 0)
+ {
+ rtx tem = make_memloc (XEXP (x, 0), regno);
+ /* First reload the memory location's address. */
+ find_reloads_address (GET_MODE (tem), 0, XEXP (tem, 0),
+ &XEXP (tem, 0), opnum, type, ind_levels);
+ /* Put this inside a new increment-expression. */
+ x = gen_rtx (GET_CODE (x), GET_MODE (x), tem);
+ /* Proceed to reload that, as if it contained a register. */
+ }
+
+ /* If we have a hard register that is ok as an index,
+ don't make a reload. If an autoincrement of a nice register
+ isn't "valid", it must be that no autoincrement is "valid".
+ If that is true and something made an autoincrement anyway,
+ this must be a special context where one is allowed.
+ (For example, a "push" instruction.)
+ We can't improve this address, so leave it alone. */
+
+ /* Otherwise, reload the autoincrement into a suitable hard reg
+ and record how much to increment by. */
+
+ if (reg_renumber[regno] >= 0)
+ regno = reg_renumber[regno];
+ if ((regno >= FIRST_PSEUDO_REGISTER
+ || !(context ? REGNO_OK_FOR_INDEX_P (regno)
+ : REGNO_OK_FOR_BASE_P (regno))))
+ {
+ register rtx link;
+
+ int reloadnum
+ = push_reload (x, NULL_RTX, loc, NULL_PTR,
+ context ? INDEX_REG_CLASS : BASE_REG_CLASS,
+ GET_MODE (x), GET_MODE (x), VOIDmode, 0,
+ opnum, type);
+ reload_inc[reloadnum]
+ = find_inc_amount (PATTERN (this_insn), XEXP (x_orig, 0));
+
+ value = 1;
+
+#ifdef AUTO_INC_DEC
+ /* Update the REG_INC notes. */
+
+ for (link = REG_NOTES (this_insn);
+ link; link = XEXP (link, 1))
+ if (REG_NOTE_KIND (link) == REG_INC
+ && REGNO (XEXP (link, 0)) == REGNO (XEXP (x_orig, 0)))
+ push_replacement (&XEXP (link, 0), reloadnum, VOIDmode);
+#endif
+ }
+ return value;
+ }
+
+ else if (GET_CODE (XEXP (x, 0)) == MEM)
+ {
+ /* This is probably the result of a substitution, by eliminate_regs,
+ of an equivalent address for a pseudo that was not allocated to a
+ hard register. Verify that the specified address is valid and
+ reload it into a register. */
+ rtx tem = XEXP (x, 0);
+ register rtx link;
+ int reloadnum;
+
+ /* Since we know we are going to reload this item, don't decrement
+ for the indirection level.
+
+ Note that this is actually conservative: it would be slightly
+ more efficient to use the value of SPILL_INDIRECT_LEVELS from
+ reload1.c here. */
+ find_reloads_address (GET_MODE (x), &XEXP (x, 0),
+ XEXP (XEXP (x, 0), 0), &XEXP (XEXP (x, 0), 0),
+ opnum, type, ind_levels);
+
+ reloadnum = push_reload (x, NULL_RTX, loc, NULL_PTR,
+ context ? INDEX_REG_CLASS : BASE_REG_CLASS,
+ GET_MODE (x), VOIDmode, 0, 0, opnum, type);
+ reload_inc[reloadnum]
+ = find_inc_amount (PATTERN (this_insn), XEXP (x, 0));
+
+ link = FIND_REG_INC_NOTE (this_insn, tem);
+ if (link != 0)
+ push_replacement (&XEXP (link, 0), reloadnum, VOIDmode);
+
+ return 1;
+ }
+ return 0;
+
+ case MEM:
+ /* This is probably the result of a substitution, by eliminate_regs, of
+ an equivalent address for a pseudo that was not allocated to a hard
+ register. Verify that the specified address is valid and reload it
+ into a register.
+
+ Since we know we are going to reload this item, don't decrement for
+ the indirection level.
+
+ Note that this is actually conservative: it would be slightly more
+ efficient to use the value of SPILL_INDIRECT_LEVELS from
+ reload1.c here. */
+
+ find_reloads_address (GET_MODE (x), loc, XEXP (x, 0), &XEXP (x, 0),
+ opnum, type, ind_levels);
+ push_reload (*loc, NULL_RTX, loc, NULL_PTR,
+ context ? INDEX_REG_CLASS : BASE_REG_CLASS,
+ GET_MODE (x), VOIDmode, 0, 0, opnum, type);
+ return 1;
+
+ case REG:
+ {
+ register int regno = REGNO (x);
+
+ if (reg_equiv_constant[regno] != 0)
+ {
+ find_reloads_address_part (reg_equiv_constant[regno], loc,
+ (context ? INDEX_REG_CLASS
+ : BASE_REG_CLASS),
+ GET_MODE (x), opnum, type, ind_levels);
+ return 1;
+ }
+
+#if 0 /* This might screw code in reload1.c to delete prior output-reload
+ that feeds this insn. */
+ if (reg_equiv_mem[regno] != 0)
+ {
+ push_reload (reg_equiv_mem[regno], NULL_RTX, loc, NULL_PTR,
+ context ? INDEX_REG_CLASS : BASE_REG_CLASS,
+ GET_MODE (x), VOIDmode, 0, 0, opnum, type);
+ return 1;
+ }
+#endif
+
+ if (reg_equiv_address[regno] != 0)
+ {
+ x = make_memloc (x, regno);
+ find_reloads_address (GET_MODE (x), 0, XEXP (x, 0), &XEXP (x, 0),
+ opnum, type, ind_levels);
+ }
+
+ if (reg_renumber[regno] >= 0)
+ regno = reg_renumber[regno];
+
+ if ((regno >= FIRST_PSEUDO_REGISTER
+ || !(context ? REGNO_OK_FOR_INDEX_P (regno)
+ : REGNO_OK_FOR_BASE_P (regno))))
+ {
+ push_reload (x, NULL_RTX, loc, NULL_PTR,
+ context ? INDEX_REG_CLASS : BASE_REG_CLASS,
+ GET_MODE (x), VOIDmode, 0, 0, opnum, type);
+ return 1;
+ }
+
+ /* If a register appearing in an address is the subject of a CLOBBER
+ in this insn, reload it into some other register to be safe.
+ The CLOBBER is supposed to make the register unavailable
+ from before this insn to after it. */
+ if (regno_clobbered_p (regno, this_insn))
+ {
+ push_reload (x, NULL_RTX, loc, NULL_PTR,
+ context ? INDEX_REG_CLASS : BASE_REG_CLASS,
+ GET_MODE (x), VOIDmode, 0, 0, opnum, type);
+ return 1;
+ }
+ }
+ return 0;
+
+ case SUBREG:
+ /* If this is a SUBREG of a hard register and the resulting register is
+ of the wrong class, reload the whole SUBREG. This avoids needless
+ copies if SUBREG_REG is multi-word. */
+ if (GET_CODE (SUBREG_REG (x)) == REG
+ && REGNO (SUBREG_REG (x)) < FIRST_PSEUDO_REGISTER)
+ {
+ int regno = REGNO (SUBREG_REG (x)) + SUBREG_WORD (x);
+
+ if (! (context ? REGNO_OK_FOR_INDEX_P (regno)
+ : REGNO_OK_FOR_BASE_P (regno)))
+ {
+ push_reload (x, NULL_RTX, loc, NULL_PTR,
+ context ? INDEX_REG_CLASS : BASE_REG_CLASS,
+ GET_MODE (x), VOIDmode, 0, 0, opnum, type);
+ return 1;
+ }
+ }
+ break;
+ }
+
+ {
+ register char *fmt = GET_RTX_FORMAT (code);
+ register int i;
+
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ find_reloads_address_1 (XEXP (x, i), context, &XEXP (x, i),
+ opnum, type, ind_levels);
+ }
+ }
+
+ return 0;
+}
+
+/* X, which is found at *LOC, is a part of an address that needs to be
+ reloaded into a register of class CLASS. If X is a constant, or if
+ X is a PLUS that contains a constant, check that the constant is a
+ legitimate operand and that we are supposed to be able to load
+ it into the register.
+
+ If not, force the constant into memory and reload the MEM instead.
+
+ MODE is the mode to use, in case X is an integer constant.
+
+ OPNUM and TYPE describe the purpose of any reloads made.
+
+ IND_LEVELS says how many levels of indirect addressing this machine
+ supports. */
+
+static void
+find_reloads_address_part (x, loc, class, mode, opnum, type, ind_levels)
+ rtx x;
+ rtx *loc;
+ enum reg_class class;
+ enum machine_mode mode;
+ int opnum;
+ enum reload_type type;
+ int ind_levels;
+{
+ if (CONSTANT_P (x)
+ && (! LEGITIMATE_CONSTANT_P (x)
+ || PREFERRED_RELOAD_CLASS (x, class) == NO_REGS))
+ {
+ rtx tem = x = force_const_mem (mode, x);
+ find_reloads_address (mode, &tem, XEXP (tem, 0), &XEXP (tem, 0),
+ opnum, type, ind_levels);
+ }
+
+ else if (GET_CODE (x) == PLUS
+ && CONSTANT_P (XEXP (x, 1))
+ && (! LEGITIMATE_CONSTANT_P (XEXP (x, 1))
+ || PREFERRED_RELOAD_CLASS (XEXP (x, 1), class) == NO_REGS))
+ {
+ rtx tem = force_const_mem (GET_MODE (x), XEXP (x, 1));
+
+ x = gen_rtx (PLUS, GET_MODE (x), XEXP (x, 0), tem);
+ find_reloads_address (mode, &tem, XEXP (tem, 0), &XEXP (tem, 0),
+ opnum, type, ind_levels);
+ }
+
+ push_reload (x, NULL_RTX, loc, NULL_PTR, class,
+ mode, VOIDmode, 0, 0, opnum, type);
+}
+
+/* Substitute into the current INSN the registers into which we have reloaded
+ the things that need reloading. The array `replacements'
+ says contains the locations of all pointers that must be changed
+ and says what to replace them with.
+
+ Return the rtx that X translates into; usually X, but modified. */
+
+void
+subst_reloads ()
+{
+ register int i;
+
+ for (i = 0; i < n_replacements; i++)
+ {
+ register struct replacement *r = &replacements[i];
+ register rtx reloadreg = reload_reg_rtx[r->what];
+ if (reloadreg)
+ {
+ /* Encapsulate RELOADREG so its machine mode matches what
+ used to be there. Note that gen_lowpart_common will
+ do the wrong thing if RELOADREG is multi-word. RELOADREG
+ will always be a REG here. */
+ if (GET_MODE (reloadreg) != r->mode && r->mode != VOIDmode)
+ reloadreg = gen_rtx (REG, r->mode, REGNO (reloadreg));
+
+ /* If we are putting this into a SUBREG and RELOADREG is a
+ SUBREG, we would be making nested SUBREGs, so we have to fix
+ this up. Note that r->where == &SUBREG_REG (*r->subreg_loc). */
+
+ if (r->subreg_loc != 0 && GET_CODE (reloadreg) == SUBREG)
+ {
+ if (GET_MODE (*r->subreg_loc)
+ == GET_MODE (SUBREG_REG (reloadreg)))
+ *r->subreg_loc = SUBREG_REG (reloadreg);
+ else
+ {
+ *r->where = SUBREG_REG (reloadreg);
+ SUBREG_WORD (*r->subreg_loc) += SUBREG_WORD (reloadreg);
+ }
+ }
+ else
+ *r->where = reloadreg;
+ }
+ /* If reload got no reg and isn't optional, something's wrong. */
+ else if (! reload_optional[r->what])
+ abort ();
+ }
+}
+
+/* Make a copy of any replacements being done into X and move those copies
+ to locations in Y, a copy of X. We only look at the highest level of
+ the RTL. */
+
+void
+copy_replacements (x, y)
+ rtx x;
+ rtx y;
+{
+ int i, j;
+ enum rtx_code code = GET_CODE (x);
+ char *fmt = GET_RTX_FORMAT (code);
+ struct replacement *r;
+
+ /* We can't support X being a SUBREG because we might then need to know its
+ location if something inside it was replaced. */
+ if (code == SUBREG)
+ abort ();
+
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ if (fmt[i] == 'e')
+ for (j = 0; j < n_replacements; j++)
+ {
+ if (replacements[j].subreg_loc == &XEXP (x, i))
+ {
+ r = &replacements[n_replacements++];
+ r->where = replacements[j].where;
+ r->subreg_loc = &XEXP (y, i);
+ r->what = replacements[j].what;
+ r->mode = replacements[j].mode;
+ }
+ else if (replacements[j].where == &XEXP (x, i))
+ {
+ r = &replacements[n_replacements++];
+ r->where = &XEXP (y, i);
+ r->subreg_loc = 0;
+ r->what = replacements[j].what;
+ r->mode = replacements[j].mode;
+ }
+ }
+}
+
+/* If LOC was scheduled to be replaced by something, return the replacement.
+ Otherwise, return *LOC. */
+
+rtx
+find_replacement (loc)
+ rtx *loc;
+{
+ struct replacement *r;
+
+ for (r = &replacements[0]; r < &replacements[n_replacements]; r++)
+ {
+ rtx reloadreg = reload_reg_rtx[r->what];
+
+ if (reloadreg && r->where == loc)
+ {
+ if (r->mode != VOIDmode && GET_MODE (reloadreg) != r->mode)
+ reloadreg = gen_rtx (REG, r->mode, REGNO (reloadreg));
+
+ return reloadreg;
+ }
+ else if (reloadreg && r->subreg_loc == loc)
+ {
+ /* RELOADREG must be either a REG or a SUBREG.
+
+ ??? Is it actually still ever a SUBREG? If so, why? */
+
+ if (GET_CODE (reloadreg) == REG)
+ return gen_rtx (REG, GET_MODE (*loc),
+ REGNO (reloadreg) + SUBREG_WORD (*loc));
+ else if (GET_MODE (reloadreg) == GET_MODE (*loc))
+ return reloadreg;
+ else
+ return gen_rtx (SUBREG, GET_MODE (*loc), SUBREG_REG (reloadreg),
+ SUBREG_WORD (reloadreg) + SUBREG_WORD (*loc));
+ }
+ }
+
+ return *loc;
+}
+
+/* Return nonzero if register in range [REGNO, ENDREGNO)
+ appears either explicitly or implicitly in X
+ other than being stored into (except for earlyclobber operands).
+
+ References contained within the substructure at LOC do not count.
+ LOC may be zero, meaning don't ignore anything.
+
+ This is similar to refers_to_regno_p in rtlanal.c except that we
+ look at equivalences for pseudos that didn't get hard registers. */
+
+int
+refers_to_regno_for_reload_p (regno, endregno, x, loc)
+ int regno, endregno;
+ rtx x;
+ rtx *loc;
+{
+ register int i;
+ register RTX_CODE code;
+ register char *fmt;
+
+ if (x == 0)
+ return 0;
+
+ repeat:
+ code = GET_CODE (x);
+
+ switch (code)
+ {
+ case REG:
+ i = REGNO (x);
+
+ /* If this is a pseudo, a hard register must not have been allocated.
+ X must therefore either be a constant or be in memory. */
+ if (i >= FIRST_PSEUDO_REGISTER)
+ {
+ if (reg_equiv_memory_loc[i])
+ return refers_to_regno_for_reload_p (regno, endregno,
+ reg_equiv_memory_loc[i],
+ NULL_PTR);
+
+ if (reg_equiv_constant[i])
+ return 0;
+
+ abort ();
+ }
+
+ return (endregno > i
+ && regno < i + (i < FIRST_PSEUDO_REGISTER
+ ? HARD_REGNO_NREGS (i, GET_MODE (x))
+ : 1));
+
+ case SUBREG:
+ /* If this is a SUBREG of a hard reg, we can see exactly which
+ registers are being modified. Otherwise, handle normally. */
+ if (GET_CODE (SUBREG_REG (x)) == REG
+ && REGNO (SUBREG_REG (x)) < FIRST_PSEUDO_REGISTER)
+ {
+ int inner_regno = REGNO (SUBREG_REG (x)) + SUBREG_WORD (x);
+ int inner_endregno
+ = inner_regno + (inner_regno < FIRST_PSEUDO_REGISTER
+ ? HARD_REGNO_NREGS (regno, GET_MODE (x)) : 1);
+
+ return endregno > inner_regno && regno < inner_endregno;
+ }
+ break;
+
+ case CLOBBER:
+ case SET:
+ if (&SET_DEST (x) != loc
+ /* Note setting a SUBREG counts as referring to the REG it is in for
+ a pseudo but not for hard registers since we can
+ treat each word individually. */
+ && ((GET_CODE (SET_DEST (x)) == SUBREG
+ && loc != &SUBREG_REG (SET_DEST (x))
+ && GET_CODE (SUBREG_REG (SET_DEST (x))) == REG
+ && REGNO (SUBREG_REG (SET_DEST (x))) >= FIRST_PSEUDO_REGISTER
+ && refers_to_regno_for_reload_p (regno, endregno,
+ SUBREG_REG (SET_DEST (x)),
+ loc))
+ /* If the ouput is an earlyclobber operand, this is
+ a conflict. */
+ || ((GET_CODE (SET_DEST (x)) != REG
+ || earlyclobber_operand_p (SET_DEST (x)))
+ && refers_to_regno_for_reload_p (regno, endregno,
+ SET_DEST (x), loc))))
+ return 1;
+
+ if (code == CLOBBER || loc == &SET_SRC (x))
+ return 0;
+ x = SET_SRC (x);
+ goto repeat;
+ }
+
+ /* X does not match, so try its subexpressions. */
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e' && loc != &XEXP (x, i))
+ {
+ if (i == 0)
+ {
+ x = XEXP (x, 0);
+ goto repeat;
+ }
+ else
+ if (refers_to_regno_for_reload_p (regno, endregno,
+ XEXP (x, i), loc))
+ return 1;
+ }
+ else if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = XVECLEN (x, i) - 1; j >=0; j--)
+ if (loc != &XVECEXP (x, i, j)
+ && refers_to_regno_for_reload_p (regno, endregno,
+ XVECEXP (x, i, j), loc))
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/* Nonzero if modifying X will affect IN. If X is a register or a SUBREG,
+ we check if any register number in X conflicts with the relevant register
+ numbers. If X is a constant, return 0. If X is a MEM, return 1 iff IN
+ contains a MEM (we don't bother checking for memory addresses that can't
+ conflict because we expect this to be a rare case.
+
+ This function is similar to reg_overlap_mention_p in rtlanal.c except
+ that we look at equivalences for pseudos that didn't get hard registers. */
+
+int
+reg_overlap_mentioned_for_reload_p (x, in)
+ rtx x, in;
+{
+ int regno, endregno;
+
+ if (GET_CODE (x) == SUBREG)
+ {
+ regno = REGNO (SUBREG_REG (x));
+ if (regno < FIRST_PSEUDO_REGISTER)
+ regno += SUBREG_WORD (x);
+ }
+ else if (GET_CODE (x) == REG)
+ {
+ regno = REGNO (x);
+
+ /* If this is a pseudo, it must not have been assigned a hard register.
+ Therefore, it must either be in memory or be a constant. */
+
+ if (regno >= FIRST_PSEUDO_REGISTER)
+ {
+ if (reg_equiv_memory_loc[regno])
+ return refers_to_mem_for_reload_p (in);
+ else if (reg_equiv_constant[regno])
+ return 0;
+ abort ();
+ }
+ }
+ else if (CONSTANT_P (x))
+ return 0;
+ else if (GET_CODE (x) == MEM)
+ return refers_to_mem_for_reload_p (in);
+ else if (GET_CODE (x) == SCRATCH || GET_CODE (x) == PC
+ || GET_CODE (x) == CC0)
+ return reg_mentioned_p (x, in);
+ else
+ abort ();
+
+ endregno = regno + (regno < FIRST_PSEUDO_REGISTER
+ ? HARD_REGNO_NREGS (regno, GET_MODE (x)) : 1);
+
+ return refers_to_regno_for_reload_p (regno, endregno, in, NULL_PTR);
+}
+
+/* Return nonzero if anything in X contains a MEM. Look also for pseudo
+ registers. */
+
+int
+refers_to_mem_for_reload_p (x)
+ rtx x;
+{
+ char *fmt;
+ int i;
+
+ if (GET_CODE (x) == MEM)
+ return 1;
+
+ if (GET_CODE (x) == REG)
+ return (REGNO (x) >= FIRST_PSEUDO_REGISTER
+ && reg_equiv_memory_loc[REGNO (x)]);
+
+ fmt = GET_RTX_FORMAT (GET_CODE (x));
+ for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
+ if (fmt[i] == 'e'
+ && (GET_CODE (XEXP (x, i)) == MEM
+ || refers_to_mem_for_reload_p (XEXP (x, i))))
+ return 1;
+
+ return 0;
+}
+
+/* Check the insns before INSN to see if there is a suitable register
+ containing the same value as GOAL.
+ If OTHER is -1, look for a register in class CLASS.
+ Otherwise, just see if register number OTHER shares GOAL's value.
+
+ Return an rtx for the register found, or zero if none is found.
+
+ If RELOAD_REG_P is (short *)1,
+ we reject any hard reg that appears in reload_reg_rtx
+ because such a hard reg is also needed coming into this insn.
+
+ If RELOAD_REG_P is any other nonzero value,
+ it is a vector indexed by hard reg number
+ and we reject any hard reg whose element in the vector is nonnegative
+ as well as any that appears in reload_reg_rtx.
+
+ If GOAL is zero, then GOALREG is a register number; we look
+ for an equivalent for that register.
+
+ MODE is the machine mode of the value we want an equivalence for.
+ If GOAL is nonzero and not VOIDmode, then it must have mode MODE.
+
+ This function is used by jump.c as well as in the reload pass.
+
+ If GOAL is the sum of the stack pointer and a constant, we treat it
+ as if it were a constant except that sp is required to be unchanging. */
+
+rtx
+find_equiv_reg (goal, insn, class, other, reload_reg_p, goalreg, mode)
+ register rtx goal;
+ rtx insn;
+ enum reg_class class;
+ register int other;
+ short *reload_reg_p;
+ int goalreg;
+ enum machine_mode mode;
+{
+ register rtx p = insn;
+ rtx goaltry, valtry, value, where;
+ register rtx pat;
+ register int regno = -1;
+ int valueno;
+ int goal_mem = 0;
+ int goal_const = 0;
+ int goal_mem_addr_varies = 0;
+ int need_stable_sp = 0;
+ int nregs;
+ int valuenregs;
+
+ if (goal == 0)
+ regno = goalreg;
+ else if (GET_CODE (goal) == REG)
+ regno = REGNO (goal);
+ else if (GET_CODE (goal) == MEM)
+ {
+ enum rtx_code code = GET_CODE (XEXP (goal, 0));
+ if (MEM_VOLATILE_P (goal))
+ return 0;
+ if (flag_float_store && GET_MODE_CLASS (GET_MODE (goal)) == MODE_FLOAT)
+ return 0;
+ /* An address with side effects must be reexecuted. */
+ switch (code)
+ {
+ case POST_INC:
+ case PRE_INC:
+ case POST_DEC:
+ case PRE_DEC:
+ return 0;
+ }
+ goal_mem = 1;
+ }
+ else if (CONSTANT_P (goal))
+ goal_const = 1;
+ else if (GET_CODE (goal) == PLUS
+ && XEXP (goal, 0) == stack_pointer_rtx
+ && CONSTANT_P (XEXP (goal, 1)))
+ goal_const = need_stable_sp = 1;
+ else
+ return 0;
+
+ /* On some machines, certain regs must always be rejected
+ because they don't behave the way ordinary registers do. */
+
+#ifdef OVERLAPPING_REGNO_P
+ if (regno >= 0 && regno < FIRST_PSEUDO_REGISTER
+ && OVERLAPPING_REGNO_P (regno))
+ return 0;
+#endif
+
+ /* Scan insns back from INSN, looking for one that copies
+ a value into or out of GOAL.
+ Stop and give up if we reach a label. */
+
+ while (1)
+ {
+ p = PREV_INSN (p);
+ if (p == 0 || GET_CODE (p) == CODE_LABEL)
+ return 0;
+ if (GET_CODE (p) == INSN
+ /* If we don't want spill regs ... */
+ && (! (reload_reg_p != 0
+ && reload_reg_p != (short *) (HOST_WIDE_INT) 1)
+ /* ... then ignore insns introduced by reload; they aren't useful
+ and can cause results in reload_as_needed to be different
+ from what they were when calculating the need for spills.
+ If we notice an input-reload insn here, we will reject it below,
+ but it might hide a usable equivalent. That makes bad code.
+ It may even abort: perhaps no reg was spilled for this insn
+ because it was assumed we would find that equivalent. */
+ || INSN_UID (p) < reload_first_uid))
+ {
+ rtx tem;
+ pat = single_set (p);
+ /* First check for something that sets some reg equal to GOAL. */
+ if (pat != 0
+ && ((regno >= 0
+ && true_regnum (SET_SRC (pat)) == regno
+ && (valueno = true_regnum (valtry = SET_DEST (pat))) >= 0)
+ ||
+ (regno >= 0
+ && true_regnum (SET_DEST (pat)) == regno
+ && (valueno = true_regnum (valtry = SET_SRC (pat))) >= 0)
+ ||
+ (goal_const && rtx_equal_p (SET_SRC (pat), goal)
+ && (valueno = true_regnum (valtry = SET_DEST (pat))) >= 0)
+ || (goal_mem
+ && (valueno = true_regnum (valtry = SET_DEST (pat))) >= 0
+ && rtx_renumbered_equal_p (goal, SET_SRC (pat)))
+ || (goal_mem
+ && (valueno = true_regnum (valtry = SET_SRC (pat))) >= 0
+ && rtx_renumbered_equal_p (goal, SET_DEST (pat)))
+ /* If we are looking for a constant,
+ and something equivalent to that constant was copied
+ into a reg, we can use that reg. */
+ || (goal_const && (tem = find_reg_note (p, REG_EQUIV,
+ NULL_RTX))
+ && rtx_equal_p (XEXP (tem, 0), goal)
+ && (valueno = true_regnum (valtry = SET_DEST (pat))) >= 0)
+ || (goal_const && (tem = find_reg_note (p, REG_EQUIV,
+ NULL_RTX))
+ && GET_CODE (SET_DEST (pat)) == REG
+ && GET_CODE (XEXP (tem, 0)) == CONST_DOUBLE
+ && GET_MODE_CLASS (GET_MODE (XEXP (tem, 0))) == MODE_FLOAT
+ && GET_CODE (goal) == CONST_INT
+ && 0 != (goaltry = operand_subword (XEXP (tem, 0), 0, 0,
+ VOIDmode))
+ && rtx_equal_p (goal, goaltry)
+ && (valtry = operand_subword (SET_DEST (pat), 0, 0,
+ VOIDmode))
+ && (valueno = true_regnum (valtry)) >= 0)
+ || (goal_const && (tem = find_reg_note (p, REG_EQUIV,
+ NULL_RTX))
+ && GET_CODE (SET_DEST (pat)) == REG
+ && GET_CODE (XEXP (tem, 0)) == CONST_DOUBLE
+ && GET_MODE_CLASS (GET_MODE (XEXP (tem, 0))) == MODE_FLOAT
+ && GET_CODE (goal) == CONST_INT
+ && 0 != (goaltry = operand_subword (XEXP (tem, 0), 1, 0,
+ VOIDmode))
+ && rtx_equal_p (goal, goaltry)
+ && (valtry
+ = operand_subword (SET_DEST (pat), 1, 0, VOIDmode))
+ && (valueno = true_regnum (valtry)) >= 0)))
+ if (other >= 0
+ ? valueno == other
+ : ((unsigned) valueno < FIRST_PSEUDO_REGISTER
+ && TEST_HARD_REG_BIT (reg_class_contents[(int) class],
+ valueno)))
+ {
+ value = valtry;
+ where = p;
+ break;
+ }
+ }
+ }
+
+ /* We found a previous insn copying GOAL into a suitable other reg VALUE
+ (or copying VALUE into GOAL, if GOAL is also a register).
+ Now verify that VALUE is really valid. */
+
+ /* VALUENO is the register number of VALUE; a hard register. */
+
+ /* Don't try to re-use something that is killed in this insn. We want
+ to be able to trust REG_UNUSED notes. */
+ if (find_reg_note (where, REG_UNUSED, value))
+ return 0;
+
+ /* If we propose to get the value from the stack pointer or if GOAL is
+ a MEM based on the stack pointer, we need a stable SP. */
+ if (valueno == STACK_POINTER_REGNUM
+ || (goal_mem && reg_overlap_mentioned_for_reload_p (stack_pointer_rtx,
+ goal)))
+ need_stable_sp = 1;
+
+ /* Reject VALUE if the copy-insn moved the wrong sort of datum. */
+ if (GET_MODE (value) != mode)
+ return 0;
+
+ /* Reject VALUE if it was loaded from GOAL
+ and is also a register that appears in the address of GOAL. */
+
+ if (goal_mem && value == SET_DEST (PATTERN (where))
+ && refers_to_regno_for_reload_p (valueno,
+ (valueno
+ + HARD_REGNO_NREGS (valueno, mode)),
+ goal, NULL_PTR))
+ return 0;
+
+ /* Reject registers that overlap GOAL. */
+
+ if (!goal_mem && !goal_const
+ && regno + HARD_REGNO_NREGS (regno, mode) > valueno
+ && regno < valueno + HARD_REGNO_NREGS (valueno, mode))
+ return 0;
+
+ /* Reject VALUE if it is one of the regs reserved for reloads.
+ Reload1 knows how to reuse them anyway, and it would get
+ confused if we allocated one without its knowledge.
+ (Now that insns introduced by reload are ignored above,
+ this case shouldn't happen, but I'm not positive.) */
+
+ if (reload_reg_p != 0 && reload_reg_p != (short *) (HOST_WIDE_INT) 1
+ && reload_reg_p[valueno] >= 0)
+ return 0;
+
+ /* On some machines, certain regs must always be rejected
+ because they don't behave the way ordinary registers do. */
+
+#ifdef OVERLAPPING_REGNO_P
+ if (OVERLAPPING_REGNO_P (valueno))
+ return 0;
+#endif
+
+ nregs = HARD_REGNO_NREGS (regno, mode);
+ valuenregs = HARD_REGNO_NREGS (valueno, mode);
+
+ /* Reject VALUE if it is a register being used for an input reload
+ even if it is not one of those reserved. */
+
+ if (reload_reg_p != 0)
+ {
+ int i;
+ for (i = 0; i < n_reloads; i++)
+ if (reload_reg_rtx[i] != 0 && reload_in[i])
+ {
+ int regno1 = REGNO (reload_reg_rtx[i]);
+ int nregs1 = HARD_REGNO_NREGS (regno1,
+ GET_MODE (reload_reg_rtx[i]));
+ if (regno1 < valueno + valuenregs
+ && regno1 + nregs1 > valueno)
+ return 0;
+ }
+ }
+
+ if (goal_mem)
+ /* We must treat frame pointer as varying here,
+ since it can vary--in a nonlocal goto as generated by expand_goto. */
+ goal_mem_addr_varies = !CONSTANT_ADDRESS_P (XEXP (goal, 0));
+
+ /* Now verify that the values of GOAL and VALUE remain unaltered
+ until INSN is reached. */
+
+ p = insn;
+ while (1)
+ {
+ p = PREV_INSN (p);
+ if (p == where)
+ return value;
+
+ /* Don't trust the conversion past a function call
+ if either of the two is in a call-clobbered register, or memory. */
+ if (GET_CODE (p) == CALL_INSN
+ && ((regno >= 0 && regno < FIRST_PSEUDO_REGISTER
+ && call_used_regs[regno])
+ ||
+ (valueno >= 0 && valueno < FIRST_PSEUDO_REGISTER
+ && call_used_regs[valueno])
+ ||
+ goal_mem
+ || need_stable_sp))
+ return 0;
+
+#ifdef INSN_CLOBBERS_REGNO_P
+ if ((valueno >= 0 && valueno < FIRST_PSEUDO_REGISTER
+ && INSN_CLOBBERS_REGNO_P (p, valueno))
+ || (regno >= 0 && regno < FIRST_PSEUDO_REGISTER
+ && INSN_CLOBBERS_REGNO_P (p, regno)))
+ return 0;
+#endif
+
+ if (GET_RTX_CLASS (GET_CODE (p)) == 'i')
+ {
+ /* If this insn P stores in either GOAL or VALUE, return 0.
+ If GOAL is a memory ref and this insn writes memory, return 0.
+ If GOAL is a memory ref and its address is not constant,
+ and this insn P changes a register used in GOAL, return 0. */
+
+ pat = PATTERN (p);
+ if (GET_CODE (pat) == SET || GET_CODE (pat) == CLOBBER)
+ {
+ register rtx dest = SET_DEST (pat);
+ while (GET_CODE (dest) == SUBREG
+ || GET_CODE (dest) == ZERO_EXTRACT
+ || GET_CODE (dest) == SIGN_EXTRACT
+ || GET_CODE (dest) == STRICT_LOW_PART)
+ dest = XEXP (dest, 0);
+ if (GET_CODE (dest) == REG)
+ {
+ register int xregno = REGNO (dest);
+ int xnregs;
+ if (REGNO (dest) < FIRST_PSEUDO_REGISTER)
+ xnregs = HARD_REGNO_NREGS (xregno, GET_MODE (dest));
+ else
+ xnregs = 1;
+ if (xregno < regno + nregs && xregno + xnregs > regno)
+ return 0;
+ if (xregno < valueno + valuenregs
+ && xregno + xnregs > valueno)
+ return 0;
+ if (goal_mem_addr_varies
+ && reg_overlap_mentioned_for_reload_p (dest, goal))
+ return 0;
+ }
+ else if (goal_mem && GET_CODE (dest) == MEM
+ && ! push_operand (dest, GET_MODE (dest)))
+ return 0;
+ else if (need_stable_sp && push_operand (dest, GET_MODE (dest)))
+ return 0;
+ }
+ else if (GET_CODE (pat) == PARALLEL)
+ {
+ register int i;
+ for (i = XVECLEN (pat, 0) - 1; i >= 0; i--)
+ {
+ register rtx v1 = XVECEXP (pat, 0, i);
+ if (GET_CODE (v1) == SET || GET_CODE (v1) == CLOBBER)
+ {
+ register rtx dest = SET_DEST (v1);
+ while (GET_CODE (dest) == SUBREG
+ || GET_CODE (dest) == ZERO_EXTRACT
+ || GET_CODE (dest) == SIGN_EXTRACT
+ || GET_CODE (dest) == STRICT_LOW_PART)
+ dest = XEXP (dest, 0);
+ if (GET_CODE (dest) == REG)
+ {
+ register int xregno = REGNO (dest);
+ int xnregs;
+ if (REGNO (dest) < FIRST_PSEUDO_REGISTER)
+ xnregs = HARD_REGNO_NREGS (xregno, GET_MODE (dest));
+ else
+ xnregs = 1;
+ if (xregno < regno + nregs
+ && xregno + xnregs > regno)
+ return 0;
+ if (xregno < valueno + valuenregs
+ && xregno + xnregs > valueno)
+ return 0;
+ if (goal_mem_addr_varies
+ && reg_overlap_mentioned_for_reload_p (dest,
+ goal))
+ return 0;
+ }
+ else if (goal_mem && GET_CODE (dest) == MEM
+ && ! push_operand (dest, GET_MODE (dest)))
+ return 0;
+ else if (need_stable_sp
+ && push_operand (dest, GET_MODE (dest)))
+ return 0;
+ }
+ }
+ }
+
+#ifdef AUTO_INC_DEC
+ /* If this insn auto-increments or auto-decrements
+ either regno or valueno, return 0 now.
+ If GOAL is a memory ref and its address is not constant,
+ and this insn P increments a register used in GOAL, return 0. */
+ {
+ register rtx link;
+
+ for (link = REG_NOTES (p); link; link = XEXP (link, 1))
+ if (REG_NOTE_KIND (link) == REG_INC
+ && GET_CODE (XEXP (link, 0)) == REG)
+ {
+ register int incno = REGNO (XEXP (link, 0));
+ if (incno < regno + nregs && incno >= regno)
+ return 0;
+ if (incno < valueno + valuenregs && incno >= valueno)
+ return 0;
+ if (goal_mem_addr_varies
+ && reg_overlap_mentioned_for_reload_p (XEXP (link, 0),
+ goal))
+ return 0;
+ }
+ }
+#endif
+ }
+ }
+}
+
+/* Find a place where INCED appears in an increment or decrement operator
+ within X, and return the amount INCED is incremented or decremented by.
+ The value is always positive. */
+
+static int
+find_inc_amount (x, inced)
+ rtx x, inced;
+{
+ register enum rtx_code code = GET_CODE (x);
+ register char *fmt;
+ register int i;
+
+ if (code == MEM)
+ {
+ register rtx addr = XEXP (x, 0);
+ if ((GET_CODE (addr) == PRE_DEC
+ || GET_CODE (addr) == POST_DEC
+ || GET_CODE (addr) == PRE_INC
+ || GET_CODE (addr) == POST_INC)
+ && XEXP (addr, 0) == inced)
+ return GET_MODE_SIZE (GET_MODE (x));
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ {
+ register int tem = find_inc_amount (XEXP (x, i), inced);
+ if (tem != 0)
+ return tem;
+ }
+ if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ {
+ register int tem = find_inc_amount (XVECEXP (x, i, j), inced);
+ if (tem != 0)
+ return tem;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/* Return 1 if register REGNO is the subject of a clobber in insn INSN. */
+
+int
+regno_clobbered_p (regno, insn)
+ int regno;
+ rtx insn;
+{
+ if (GET_CODE (PATTERN (insn)) == CLOBBER
+ && GET_CODE (XEXP (PATTERN (insn), 0)) == REG)
+ return REGNO (XEXP (PATTERN (insn), 0)) == regno;
+
+ if (GET_CODE (PATTERN (insn)) == PARALLEL)
+ {
+ int i = XVECLEN (PATTERN (insn), 0) - 1;
+
+ for (; i >= 0; i--)
+ {
+ rtx elt = XVECEXP (PATTERN (insn), 0, i);
+ if (GET_CODE (elt) == CLOBBER && GET_CODE (XEXP (elt, 0)) == REG
+ && REGNO (XEXP (elt, 0)) == regno)
+ return 1;
+ }
+ }
+
+ return 0;
+}
diff --git a/gnu/usr.bin/cc/cc_int/reload1.c b/gnu/usr.bin/cc/cc_int/reload1.c
new file mode 100644
index 0000000..e46d764
--- /dev/null
+++ b/gnu/usr.bin/cc/cc_int/reload1.c
@@ -0,0 +1,7122 @@
+/* Reload pseudo regs into hard regs for insns that require hard regs.
+ Copyright (C) 1987, 88, 89, 92, 93, 1994 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+#include <stdio.h>
+#include "config.h"
+#include "rtl.h"
+#include "obstack.h"
+#include "insn-config.h"
+#include "insn-flags.h"
+#include "insn-codes.h"
+#include "flags.h"
+#include "expr.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "reload.h"
+#include "recog.h"
+#include "basic-block.h"
+#include "output.h"
+
+/* This file contains the reload pass of the compiler, which is
+ run after register allocation has been done. It checks that
+ each insn is valid (operands required to be in registers really
+ are in registers of the proper class) and fixes up invalid ones
+ by copying values temporarily into registers for the insns
+ that need them.
+
+ The results of register allocation are described by the vector
+ reg_renumber; the insns still contain pseudo regs, but reg_renumber
+ can be used to find which hard reg, if any, a pseudo reg is in.
+
+ The technique we always use is to free up a few hard regs that are
+ called ``reload regs'', and for each place where a pseudo reg
+ must be in a hard reg, copy it temporarily into one of the reload regs.
+
+ All the pseudos that were formerly allocated to the hard regs that
+ are now in use as reload regs must be ``spilled''. This means
+ that they go to other hard regs, or to stack slots if no other
+ available hard regs can be found. Spilling can invalidate more
+ insns, requiring additional need for reloads, so we must keep checking
+ until the process stabilizes.
+
+ For machines with different classes of registers, we must keep track
+ of the register class needed for each reload, and make sure that
+ we allocate enough reload registers of each class.
+
+ The file reload.c contains the code that checks one insn for
+ validity and reports the reloads that it needs. This file
+ is in charge of scanning the entire rtl code, accumulating the
+ reload needs, spilling, assigning reload registers to use for
+ fixing up each insn, and generating the new insns to copy values
+ into the reload registers. */
+
+
+#ifndef REGISTER_MOVE_COST
+#define REGISTER_MOVE_COST(x, y) 2
+#endif
+
+#ifndef MEMORY_MOVE_COST
+#define MEMORY_MOVE_COST(x) 4
+#endif
+
+/* During reload_as_needed, element N contains a REG rtx for the hard reg
+ into which reg N has been reloaded (perhaps for a previous insn). */
+static rtx *reg_last_reload_reg;
+
+/* Elt N nonzero if reg_last_reload_reg[N] has been set in this insn
+ for an output reload that stores into reg N. */
+static char *reg_has_output_reload;
+
+/* Indicates which hard regs are reload-registers for an output reload
+ in the current insn. */
+static HARD_REG_SET reg_is_output_reload;
+
+/* Element N is the constant value to which pseudo reg N is equivalent,
+ or zero if pseudo reg N is not equivalent to a constant.
+ find_reloads looks at this in order to replace pseudo reg N
+ with the constant it stands for. */
+rtx *reg_equiv_constant;
+
+/* Element N is a memory location to which pseudo reg N is equivalent,
+ prior to any register elimination (such as frame pointer to stack
+ pointer). Depending on whether or not it is a valid address, this value
+ is transferred to either reg_equiv_address or reg_equiv_mem. */
+rtx *reg_equiv_memory_loc;
+
+/* Element N is the address of stack slot to which pseudo reg N is equivalent.
+ This is used when the address is not valid as a memory address
+ (because its displacement is too big for the machine.) */
+rtx *reg_equiv_address;
+
+/* Element N is the memory slot to which pseudo reg N is equivalent,
+ or zero if pseudo reg N is not equivalent to a memory slot. */
+rtx *reg_equiv_mem;
+
+/* Widest width in which each pseudo reg is referred to (via subreg). */
+static int *reg_max_ref_width;
+
+/* Element N is the insn that initialized reg N from its equivalent
+ constant or memory slot. */
+static rtx *reg_equiv_init;
+
+/* During reload_as_needed, element N contains the last pseudo regno
+ reloaded into the Nth reload register. This vector is in parallel
+ with spill_regs. If that pseudo reg occupied more than one register,
+ reg_reloaded_contents points to that pseudo for each spill register in
+ use; all of these must remain set for an inheritance to occur. */
+static int reg_reloaded_contents[FIRST_PSEUDO_REGISTER];
+
+/* During reload_as_needed, element N contains the insn for which
+ the Nth reload register was last used. This vector is in parallel
+ with spill_regs, and its contents are significant only when
+ reg_reloaded_contents is significant. */
+static rtx reg_reloaded_insn[FIRST_PSEUDO_REGISTER];
+
+/* Number of spill-regs so far; number of valid elements of spill_regs. */
+static int n_spills;
+
+/* In parallel with spill_regs, contains REG rtx's for those regs.
+ Holds the last rtx used for any given reg, or 0 if it has never
+ been used for spilling yet. This rtx is reused, provided it has
+ the proper mode. */
+static rtx spill_reg_rtx[FIRST_PSEUDO_REGISTER];
+
+/* In parallel with spill_regs, contains nonzero for a spill reg
+ that was stored after the last time it was used.
+ The precise value is the insn generated to do the store. */
+static rtx spill_reg_store[FIRST_PSEUDO_REGISTER];
+
+/* This table is the inverse mapping of spill_regs:
+ indexed by hard reg number,
+ it contains the position of that reg in spill_regs,
+ or -1 for something that is not in spill_regs. */
+static short spill_reg_order[FIRST_PSEUDO_REGISTER];
+
+/* This reg set indicates registers that may not be used for retrying global
+ allocation. The registers that may not be used include all spill registers
+ and the frame pointer (if we are using one). */
+HARD_REG_SET forbidden_regs;
+
+/* This reg set indicates registers that are not good for spill registers.
+ They will not be used to complete groups of spill registers. This includes
+ all fixed registers, registers that may be eliminated, and, if
+ SMALL_REGISTER_CLASSES is not defined, registers explicitly used in the rtl.
+
+ (spill_reg_order prevents these registers from being used to start a
+ group.) */
+static HARD_REG_SET bad_spill_regs;
+
+/* Describes order of use of registers for reloading
+ of spilled pseudo-registers. `spills' is the number of
+ elements that are actually valid; new ones are added at the end. */
+static short spill_regs[FIRST_PSEUDO_REGISTER];
+
+/* Describes order of preference for putting regs into spill_regs.
+ Contains the numbers of all the hard regs, in order most preferred first.
+ This order is different for each function.
+ It is set up by order_regs_for_reload.
+ Empty elements at the end contain -1. */
+static short potential_reload_regs[FIRST_PSEUDO_REGISTER];
+
+/* 1 for a hard register that appears explicitly in the rtl
+ (for example, function value registers, special registers
+ used by insns, structure value pointer registers). */
+static char regs_explicitly_used[FIRST_PSEUDO_REGISTER];
+
+/* Indicates if a register was counted against the need for
+ groups. 0 means it can count against max_nongroup instead. */
+static HARD_REG_SET counted_for_groups;
+
+/* Indicates if a register was counted against the need for
+ non-groups. 0 means it can become part of a new group.
+ During choose_reload_regs, 1 here means don't use this reg
+ as part of a group, even if it seems to be otherwise ok. */
+static HARD_REG_SET counted_for_nongroups;
+
+/* Indexed by pseudo reg number N,
+ says may not delete stores into the real (memory) home of pseudo N.
+ This is set if we already substituted a memory equivalent in some uses,
+ which happens when we have to eliminate the fp from it. */
+static char *cannot_omit_stores;
+
+/* Nonzero if indirect addressing is supported on the machine; this means
+ that spilling (REG n) does not require reloading it into a register in
+ order to do (MEM (REG n)) or (MEM (PLUS (REG n) (CONST_INT c))). The
+ value indicates the level of indirect addressing supported, e.g., two
+ means that (MEM (MEM (REG n))) is also valid if (REG n) does not get
+ a hard register. */
+
+static char spill_indirect_levels;
+
+/* Nonzero if indirect addressing is supported when the innermost MEM is
+ of the form (MEM (SYMBOL_REF sym)). It is assumed that the level to
+ which these are valid is the same as spill_indirect_levels, above. */
+
+char indirect_symref_ok;
+
+/* Nonzero if an address (plus (reg frame_pointer) (reg ...)) is valid. */
+
+char double_reg_address_ok;
+
+/* Record the stack slot for each spilled hard register. */
+
+static rtx spill_stack_slot[FIRST_PSEUDO_REGISTER];
+
+/* Width allocated so far for that stack slot. */
+
+static int spill_stack_slot_width[FIRST_PSEUDO_REGISTER];
+
+/* Indexed by register class and basic block number, nonzero if there is
+ any need for a spill register of that class in that basic block.
+ The pointer is 0 if we did stupid allocation and don't know
+ the structure of basic blocks. */
+
+char *basic_block_needs[N_REG_CLASSES];
+
+/* First uid used by insns created by reload in this function.
+ Used in find_equiv_reg. */
+int reload_first_uid;
+
+/* Flag set by local-alloc or global-alloc if anything is live in
+ a call-clobbered reg across calls. */
+
+int caller_save_needed;
+
+/* Set to 1 while reload_as_needed is operating.
+ Required by some machines to handle any generated moves differently. */
+
+int reload_in_progress = 0;
+
+/* These arrays record the insn_code of insns that may be needed to
+ perform input and output reloads of special objects. They provide a
+ place to pass a scratch register. */
+
+enum insn_code reload_in_optab[NUM_MACHINE_MODES];
+enum insn_code reload_out_optab[NUM_MACHINE_MODES];
+
+/* This obstack is used for allocation of rtl during register elimination.
+ The allocated storage can be freed once find_reloads has processed the
+ insn. */
+
+struct obstack reload_obstack;
+char *reload_firstobj;
+
+#define obstack_chunk_alloc xmalloc
+#define obstack_chunk_free free
+
+/* List of labels that must never be deleted. */
+extern rtx forced_labels;
+
+/* This structure is used to record information about register eliminations.
+ Each array entry describes one possible way of eliminating a register
+ in favor of another. If there is more than one way of eliminating a
+ particular register, the most preferred should be specified first. */
+
+static struct elim_table
+{
+ int from; /* Register number to be eliminated. */
+ int to; /* Register number used as replacement. */
+ int initial_offset; /* Initial difference between values. */
+ int can_eliminate; /* Non-zero if this elimination can be done. */
+ int can_eliminate_previous; /* Value of CAN_ELIMINATE in previous scan over
+ insns made by reload. */
+ int offset; /* Current offset between the two regs. */
+ int max_offset; /* Maximum offset between the two regs. */
+ int previous_offset; /* Offset at end of previous insn. */
+ int ref_outside_mem; /* "to" has been referenced outside a MEM. */
+ rtx from_rtx; /* REG rtx for the register to be eliminated.
+ We cannot simply compare the number since
+ we might then spuriously replace a hard
+ register corresponding to a pseudo
+ assigned to the reg to be eliminated. */
+ rtx to_rtx; /* REG rtx for the replacement. */
+} reg_eliminate[] =
+
+/* If a set of eliminable registers was specified, define the table from it.
+ Otherwise, default to the normal case of the frame pointer being
+ replaced by the stack pointer. */
+
+#ifdef ELIMINABLE_REGS
+ ELIMINABLE_REGS;
+#else
+ {{ FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}};
+#endif
+
+#define NUM_ELIMINABLE_REGS (sizeof reg_eliminate / sizeof reg_eliminate[0])
+
+/* Record the number of pending eliminations that have an offset not equal
+ to their initial offset. If non-zero, we use a new copy of each
+ replacement result in any insns encountered. */
+static int num_not_at_initial_offset;
+
+/* Count the number of registers that we may be able to eliminate. */
+static int num_eliminable;
+
+/* For each label, we record the offset of each elimination. If we reach
+ a label by more than one path and an offset differs, we cannot do the
+ elimination. This information is indexed by the number of the label.
+ The first table is an array of flags that records whether we have yet
+ encountered a label and the second table is an array of arrays, one
+ entry in the latter array for each elimination. */
+
+static char *offsets_known_at;
+static int (*offsets_at)[NUM_ELIMINABLE_REGS];
+
+/* Number of labels in the current function. */
+
+static int num_labels;
+
+struct hard_reg_n_uses { int regno; int uses; };
+
+static int possible_group_p PROTO((int, int *));
+static void count_possible_groups PROTO((int *, enum machine_mode *,
+ int *));
+static int modes_equiv_for_class_p PROTO((enum machine_mode,
+ enum machine_mode,
+ enum reg_class));
+static void spill_failure PROTO((rtx));
+static int new_spill_reg PROTO((int, int, int *, int *, int,
+ FILE *));
+static void delete_dead_insn PROTO((rtx));
+static void alter_reg PROTO((int, int));
+static void mark_scratch_live PROTO((rtx));
+static void set_label_offsets PROTO((rtx, rtx, int));
+static int eliminate_regs_in_insn PROTO((rtx, int));
+static void mark_not_eliminable PROTO((rtx, rtx));
+static int spill_hard_reg PROTO((int, int, FILE *, int));
+static void scan_paradoxical_subregs PROTO((rtx));
+static int hard_reg_use_compare PROTO((struct hard_reg_n_uses *,
+ struct hard_reg_n_uses *));
+static void order_regs_for_reload PROTO((void));
+static int compare_spill_regs PROTO((short *, short *));
+static void reload_as_needed PROTO((rtx, int));
+static void forget_old_reloads_1 PROTO((rtx, rtx));
+static int reload_reg_class_lower PROTO((short *, short *));
+static void mark_reload_reg_in_use PROTO((int, int, enum reload_type,
+ enum machine_mode));
+static void clear_reload_reg_in_use PROTO((int, int, enum reload_type,
+ enum machine_mode));
+static int reload_reg_free_p PROTO((int, int, enum reload_type));
+static int reload_reg_free_before_p PROTO((int, int, enum reload_type));
+static int reload_reg_reaches_end_p PROTO((int, int, enum reload_type));
+static int reloads_conflict PROTO((int, int));
+static int allocate_reload_reg PROTO((int, rtx, int, int));
+static void choose_reload_regs PROTO((rtx, rtx));
+static void merge_assigned_reloads PROTO((rtx));
+static void emit_reload_insns PROTO((rtx));
+static void delete_output_reload PROTO((rtx, int, rtx));
+static void inc_for_reload PROTO((rtx, rtx, int));
+static int constraint_accepts_reg_p PROTO((char *, rtx));
+static int count_occurrences PROTO((rtx, rtx));
+
+/* Initialize the reload pass once per compilation. */
+
+void
+init_reload ()
+{
+ register int i;
+
+ /* Often (MEM (REG n)) is still valid even if (REG n) is put on the stack.
+ Set spill_indirect_levels to the number of levels such addressing is
+ permitted, zero if it is not permitted at all. */
+
+ register rtx tem
+ = gen_rtx (MEM, Pmode,
+ gen_rtx (PLUS, Pmode,
+ gen_rtx (REG, Pmode, LAST_VIRTUAL_REGISTER + 1),
+ GEN_INT (4)));
+ spill_indirect_levels = 0;
+
+ while (memory_address_p (QImode, tem))
+ {
+ spill_indirect_levels++;
+ tem = gen_rtx (MEM, Pmode, tem);
+ }
+
+ /* See if indirect addressing is valid for (MEM (SYMBOL_REF ...)). */
+
+ tem = gen_rtx (MEM, Pmode, gen_rtx (SYMBOL_REF, Pmode, "foo"));
+ indirect_symref_ok = memory_address_p (QImode, tem);
+
+ /* See if reg+reg is a valid (and offsettable) address. */
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ {
+ tem = gen_rtx (PLUS, Pmode,
+ gen_rtx (REG, Pmode, HARD_FRAME_POINTER_REGNUM),
+ gen_rtx (REG, Pmode, i));
+ /* This way, we make sure that reg+reg is an offsettable address. */
+ tem = plus_constant (tem, 4);
+
+ if (memory_address_p (QImode, tem))
+ {
+ double_reg_address_ok = 1;
+ break;
+ }
+ }
+
+ /* Initialize obstack for our rtl allocation. */
+ gcc_obstack_init (&reload_obstack);
+ reload_firstobj = (char *) obstack_alloc (&reload_obstack, 0);
+}
+
+/* Main entry point for the reload pass.
+
+ FIRST is the first insn of the function being compiled.
+
+ GLOBAL nonzero means we were called from global_alloc
+ and should attempt to reallocate any pseudoregs that we
+ displace from hard regs we will use for reloads.
+ If GLOBAL is zero, we do not have enough information to do that,
+ so any pseudo reg that is spilled must go to the stack.
+
+ DUMPFILE is the global-reg debugging dump file stream, or 0.
+ If it is nonzero, messages are written to it to describe
+ which registers are seized as reload regs, which pseudo regs
+ are spilled from them, and where the pseudo regs are reallocated to.
+
+ Return value is nonzero if reload failed
+ and we must not do any more for this function. */
+
+int
+reload (first, global, dumpfile)
+ rtx first;
+ int global;
+ FILE *dumpfile;
+{
+ register int class;
+ register int i, j, k;
+ register rtx insn;
+ register struct elim_table *ep;
+
+ int something_changed;
+ int something_needs_reloads;
+ int something_needs_elimination;
+ int new_basic_block_needs;
+ enum reg_class caller_save_spill_class = NO_REGS;
+ int caller_save_group_size = 1;
+
+ /* Nonzero means we couldn't get enough spill regs. */
+ int failure = 0;
+
+ /* The basic block number currently being processed for INSN. */
+ int this_block;
+
+ /* Make sure even insns with volatile mem refs are recognizable. */
+ init_recog ();
+
+ /* Enable find_equiv_reg to distinguish insns made by reload. */
+ reload_first_uid = get_max_uid ();
+
+ for (i = 0; i < N_REG_CLASSES; i++)
+ basic_block_needs[i] = 0;
+
+#ifdef SECONDARY_MEMORY_NEEDED
+ /* Initialize the secondary memory table. */
+ clear_secondary_mem ();
+#endif
+
+ /* Remember which hard regs appear explicitly
+ before we merge into `regs_ever_live' the ones in which
+ pseudo regs have been allocated. */
+ bcopy (regs_ever_live, regs_explicitly_used, sizeof regs_ever_live);
+
+ /* We don't have a stack slot for any spill reg yet. */
+ bzero ((char *) spill_stack_slot, sizeof spill_stack_slot);
+ bzero ((char *) spill_stack_slot_width, sizeof spill_stack_slot_width);
+
+ /* Initialize the save area information for caller-save, in case some
+ are needed. */
+ init_save_areas ();
+
+ /* Compute which hard registers are now in use
+ as homes for pseudo registers.
+ This is done here rather than (eg) in global_alloc
+ because this point is reached even if not optimizing. */
+
+ for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++)
+ mark_home_live (i);
+
+ for (i = 0; i < scratch_list_length; i++)
+ if (scratch_list[i])
+ mark_scratch_live (scratch_list[i]);
+
+ /* Make sure that the last insn in the chain
+ is not something that needs reloading. */
+ emit_note (NULL_PTR, NOTE_INSN_DELETED);
+
+ /* Find all the pseudo registers that didn't get hard regs
+ but do have known equivalent constants or memory slots.
+ These include parameters (known equivalent to parameter slots)
+ and cse'd or loop-moved constant memory addresses.
+
+ Record constant equivalents in reg_equiv_constant
+ so they will be substituted by find_reloads.
+ Record memory equivalents in reg_mem_equiv so they can
+ be substituted eventually by altering the REG-rtx's. */
+
+ reg_equiv_constant = (rtx *) alloca (max_regno * sizeof (rtx));
+ bzero ((char *) reg_equiv_constant, max_regno * sizeof (rtx));
+ reg_equiv_memory_loc = (rtx *) alloca (max_regno * sizeof (rtx));
+ bzero ((char *) reg_equiv_memory_loc, max_regno * sizeof (rtx));
+ reg_equiv_mem = (rtx *) alloca (max_regno * sizeof (rtx));
+ bzero ((char *) reg_equiv_mem, max_regno * sizeof (rtx));
+ reg_equiv_init = (rtx *) alloca (max_regno * sizeof (rtx));
+ bzero ((char *) reg_equiv_init, max_regno * sizeof (rtx));
+ reg_equiv_address = (rtx *) alloca (max_regno * sizeof (rtx));
+ bzero ((char *) reg_equiv_address, max_regno * sizeof (rtx));
+ reg_max_ref_width = (int *) alloca (max_regno * sizeof (int));
+ bzero ((char *) reg_max_ref_width, max_regno * sizeof (int));
+ cannot_omit_stores = (char *) alloca (max_regno);
+ bzero (cannot_omit_stores, max_regno);
+
+#ifdef SMALL_REGISTER_CLASSES
+ CLEAR_HARD_REG_SET (forbidden_regs);
+#endif
+
+ /* Look for REG_EQUIV notes; record what each pseudo is equivalent to.
+ Also find all paradoxical subregs and find largest such for each pseudo.
+ On machines with small register classes, record hard registers that
+ are used for user variables. These can never be used for spills. */
+
+ for (insn = first; insn; insn = NEXT_INSN (insn))
+ {
+ rtx set = single_set (insn);
+
+ if (set != 0 && GET_CODE (SET_DEST (set)) == REG)
+ {
+ rtx note = find_reg_note (insn, REG_EQUIV, NULL_RTX);
+ if (note
+#ifdef LEGITIMATE_PIC_OPERAND_P
+ && (! CONSTANT_P (XEXP (note, 0)) || ! flag_pic
+ || LEGITIMATE_PIC_OPERAND_P (XEXP (note, 0)))
+#endif
+ )
+ {
+ rtx x = XEXP (note, 0);
+ i = REGNO (SET_DEST (set));
+ if (i > LAST_VIRTUAL_REGISTER)
+ {
+ if (GET_CODE (x) == MEM)
+ reg_equiv_memory_loc[i] = x;
+ else if (CONSTANT_P (x))
+ {
+ if (LEGITIMATE_CONSTANT_P (x))
+ reg_equiv_constant[i] = x;
+ else
+ reg_equiv_memory_loc[i]
+ = force_const_mem (GET_MODE (SET_DEST (set)), x);
+ }
+ else
+ continue;
+
+ /* If this register is being made equivalent to a MEM
+ and the MEM is not SET_SRC, the equivalencing insn
+ is one with the MEM as a SET_DEST and it occurs later.
+ So don't mark this insn now. */
+ if (GET_CODE (x) != MEM
+ || rtx_equal_p (SET_SRC (set), x))
+ reg_equiv_init[i] = insn;
+ }
+ }
+ }
+
+ /* If this insn is setting a MEM from a register equivalent to it,
+ this is the equivalencing insn. */
+ else if (set && GET_CODE (SET_DEST (set)) == MEM
+ && GET_CODE (SET_SRC (set)) == REG
+ && reg_equiv_memory_loc[REGNO (SET_SRC (set))]
+ && rtx_equal_p (SET_DEST (set),
+ reg_equiv_memory_loc[REGNO (SET_SRC (set))]))
+ reg_equiv_init[REGNO (SET_SRC (set))] = insn;
+
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ scan_paradoxical_subregs (PATTERN (insn));
+ }
+
+ /* Does this function require a frame pointer? */
+
+ frame_pointer_needed = (! flag_omit_frame_pointer
+#ifdef EXIT_IGNORE_STACK
+ /* ?? If EXIT_IGNORE_STACK is set, we will not save
+ and restore sp for alloca. So we can't eliminate
+ the frame pointer in that case. At some point,
+ we should improve this by emitting the
+ sp-adjusting insns for this case. */
+ || (current_function_calls_alloca
+ && EXIT_IGNORE_STACK)
+#endif
+ || FRAME_POINTER_REQUIRED);
+
+ num_eliminable = 0;
+
+ /* Initialize the table of registers to eliminate. The way we do this
+ depends on how the eliminable registers were defined. */
+#ifdef ELIMINABLE_REGS
+ for (ep = reg_eliminate; ep < &reg_eliminate[NUM_ELIMINABLE_REGS]; ep++)
+ {
+ ep->can_eliminate = ep->can_eliminate_previous
+ = (CAN_ELIMINATE (ep->from, ep->to)
+ && ! (ep->to == STACK_POINTER_REGNUM && frame_pointer_needed));
+ }
+#else
+ reg_eliminate[0].can_eliminate = reg_eliminate[0].can_eliminate_previous
+ = ! frame_pointer_needed;
+#endif
+
+ /* Count the number of eliminable registers and build the FROM and TO
+ REG rtx's. Note that code in gen_rtx will cause, e.g.,
+ gen_rtx (REG, Pmode, STACK_POINTER_REGNUM) to equal stack_pointer_rtx.
+ We depend on this. */
+ for (ep = reg_eliminate; ep < &reg_eliminate[NUM_ELIMINABLE_REGS]; ep++)
+ {
+ num_eliminable += ep->can_eliminate;
+ ep->from_rtx = gen_rtx (REG, Pmode, ep->from);
+ ep->to_rtx = gen_rtx (REG, Pmode, ep->to);
+ }
+
+ num_labels = max_label_num () - get_first_label_num ();
+
+ /* Allocate the tables used to store offset information at labels. */
+ offsets_known_at = (char *) alloca (num_labels);
+ offsets_at
+ = (int (*)[NUM_ELIMINABLE_REGS])
+ alloca (num_labels * NUM_ELIMINABLE_REGS * sizeof (int));
+
+ offsets_known_at -= get_first_label_num ();
+ offsets_at -= get_first_label_num ();
+
+ /* Alter each pseudo-reg rtx to contain its hard reg number.
+ Assign stack slots to the pseudos that lack hard regs or equivalents.
+ Do not touch virtual registers. */
+
+ for (i = LAST_VIRTUAL_REGISTER + 1; i < max_regno; i++)
+ alter_reg (i, -1);
+
+ /* Round size of stack frame to BIGGEST_ALIGNMENT. This must be done here
+ because the stack size may be a part of the offset computation for
+ register elimination. */
+ assign_stack_local (BLKmode, 0, 0);
+
+ /* If we have some registers we think can be eliminated, scan all insns to
+ see if there is an insn that sets one of these registers to something
+ other than itself plus a constant. If so, the register cannot be
+ eliminated. Doing this scan here eliminates an extra pass through the
+ main reload loop in the most common case where register elimination
+ cannot be done. */
+ for (insn = first; insn && num_eliminable; insn = NEXT_INSN (insn))
+ if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN
+ || GET_CODE (insn) == CALL_INSN)
+ note_stores (PATTERN (insn), mark_not_eliminable);
+
+#ifndef REGISTER_CONSTRAINTS
+ /* If all the pseudo regs have hard regs,
+ except for those that are never referenced,
+ we know that no reloads are needed. */
+ /* But that is not true if there are register constraints, since
+ in that case some pseudos might be in the wrong kind of hard reg. */
+
+ for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++)
+ if (reg_renumber[i] == -1 && reg_n_refs[i] != 0)
+ break;
+
+ if (i == max_regno && num_eliminable == 0 && ! caller_save_needed)
+ return;
+#endif
+
+ /* Compute the order of preference for hard registers to spill.
+ Store them by decreasing preference in potential_reload_regs. */
+
+ order_regs_for_reload ();
+
+ /* So far, no hard regs have been spilled. */
+ n_spills = 0;
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ spill_reg_order[i] = -1;
+
+ /* On most machines, we can't use any register explicitly used in the
+ rtl as a spill register. But on some, we have to. Those will have
+ taken care to keep the life of hard regs as short as possible. */
+
+#ifndef SMALL_REGISTER_CLASSES
+ COPY_HARD_REG_SET (forbidden_regs, bad_spill_regs);
+#endif
+
+ /* Spill any hard regs that we know we can't eliminate. */
+ for (ep = reg_eliminate; ep < &reg_eliminate[NUM_ELIMINABLE_REGS]; ep++)
+ if (! ep->can_eliminate)
+ spill_hard_reg (ep->from, global, dumpfile, 1);
+
+#if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
+ if (frame_pointer_needed)
+ spill_hard_reg (HARD_FRAME_POINTER_REGNUM, global, dumpfile, 1);
+#endif
+
+ if (global)
+ for (i = 0; i < N_REG_CLASSES; i++)
+ {
+ basic_block_needs[i] = (char *) alloca (n_basic_blocks);
+ bzero (basic_block_needs[i], n_basic_blocks);
+ }
+
+ /* From now on, we need to emit any moves without making new pseudos. */
+ reload_in_progress = 1;
+
+ /* This loop scans the entire function each go-round
+ and repeats until one repetition spills no additional hard regs. */
+
+ /* This flag is set when a pseudo reg is spilled,
+ to require another pass. Note that getting an additional reload
+ reg does not necessarily imply any pseudo reg was spilled;
+ sometimes we find a reload reg that no pseudo reg was allocated in. */
+ something_changed = 1;
+ /* This flag is set if there are any insns that require reloading. */
+ something_needs_reloads = 0;
+ /* This flag is set if there are any insns that require register
+ eliminations. */
+ something_needs_elimination = 0;
+ while (something_changed)
+ {
+ rtx after_call = 0;
+
+ /* For each class, number of reload regs needed in that class.
+ This is the maximum over all insns of the needs in that class
+ of the individual insn. */
+ int max_needs[N_REG_CLASSES];
+ /* For each class, size of group of consecutive regs
+ that is needed for the reloads of this class. */
+ int group_size[N_REG_CLASSES];
+ /* For each class, max number of consecutive groups needed.
+ (Each group contains group_size[CLASS] consecutive registers.) */
+ int max_groups[N_REG_CLASSES];
+ /* For each class, max number needed of regs that don't belong
+ to any of the groups. */
+ int max_nongroups[N_REG_CLASSES];
+ /* For each class, the machine mode which requires consecutive
+ groups of regs of that class.
+ If two different modes ever require groups of one class,
+ they must be the same size and equally restrictive for that class,
+ otherwise we can't handle the complexity. */
+ enum machine_mode group_mode[N_REG_CLASSES];
+ /* Record the insn where each maximum need is first found. */
+ rtx max_needs_insn[N_REG_CLASSES];
+ rtx max_groups_insn[N_REG_CLASSES];
+ rtx max_nongroups_insn[N_REG_CLASSES];
+ rtx x;
+ int starting_frame_size = get_frame_size ();
+ int previous_frame_pointer_needed = frame_pointer_needed;
+ static char *reg_class_names[] = REG_CLASS_NAMES;
+
+ something_changed = 0;
+ bzero ((char *) max_needs, sizeof max_needs);
+ bzero ((char *) max_groups, sizeof max_groups);
+ bzero ((char *) max_nongroups, sizeof max_nongroups);
+ bzero ((char *) max_needs_insn, sizeof max_needs_insn);
+ bzero ((char *) max_groups_insn, sizeof max_groups_insn);
+ bzero ((char *) max_nongroups_insn, sizeof max_nongroups_insn);
+ bzero ((char *) group_size, sizeof group_size);
+ for (i = 0; i < N_REG_CLASSES; i++)
+ group_mode[i] = VOIDmode;
+
+ /* Keep track of which basic blocks are needing the reloads. */
+ this_block = 0;
+
+ /* Remember whether any element of basic_block_needs
+ changes from 0 to 1 in this pass. */
+ new_basic_block_needs = 0;
+
+ /* Reset all offsets on eliminable registers to their initial values. */
+#ifdef ELIMINABLE_REGS
+ for (ep = reg_eliminate; ep < &reg_eliminate[NUM_ELIMINABLE_REGS]; ep++)
+ {
+ INITIAL_ELIMINATION_OFFSET (ep->from, ep->to, ep->initial_offset);
+ ep->previous_offset = ep->offset
+ = ep->max_offset = ep->initial_offset;
+ }
+#else
+#ifdef INITIAL_FRAME_POINTER_OFFSET
+ INITIAL_FRAME_POINTER_OFFSET (reg_eliminate[0].initial_offset);
+#else
+ if (!FRAME_POINTER_REQUIRED)
+ abort ();
+ reg_eliminate[0].initial_offset = 0;
+#endif
+ reg_eliminate[0].previous_offset = reg_eliminate[0].max_offset
+ = reg_eliminate[0].offset = reg_eliminate[0].initial_offset;
+#endif
+
+ num_not_at_initial_offset = 0;
+
+ bzero ((char *) &offsets_known_at[get_first_label_num ()], num_labels);
+
+ /* Set a known offset for each forced label to be at the initial offset
+ of each elimination. We do this because we assume that all
+ computed jumps occur from a location where each elimination is
+ at its initial offset. */
+
+ for (x = forced_labels; x; x = XEXP (x, 1))
+ if (XEXP (x, 0))
+ set_label_offsets (XEXP (x, 0), NULL_RTX, 1);
+
+ /* For each pseudo register that has an equivalent location defined,
+ try to eliminate any eliminable registers (such as the frame pointer)
+ assuming initial offsets for the replacement register, which
+ is the normal case.
+
+ If the resulting location is directly addressable, substitute
+ the MEM we just got directly for the old REG.
+
+ If it is not addressable but is a constant or the sum of a hard reg
+ and constant, it is probably not addressable because the constant is
+ out of range, in that case record the address; we will generate
+ hairy code to compute the address in a register each time it is
+ needed. Similarly if it is a hard register, but one that is not
+ valid as an address register.
+
+ If the location is not addressable, but does not have one of the
+ above forms, assign a stack slot. We have to do this to avoid the
+ potential of producing lots of reloads if, e.g., a location involves
+ a pseudo that didn't get a hard register and has an equivalent memory
+ location that also involves a pseudo that didn't get a hard register.
+
+ Perhaps at some point we will improve reload_when_needed handling
+ so this problem goes away. But that's very hairy. */
+
+ for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++)
+ if (reg_renumber[i] < 0 && reg_equiv_memory_loc[i])
+ {
+ rtx x = eliminate_regs (reg_equiv_memory_loc[i], 0, NULL_RTX);
+
+ if (strict_memory_address_p (GET_MODE (regno_reg_rtx[i]),
+ XEXP (x, 0)))
+ reg_equiv_mem[i] = x, reg_equiv_address[i] = 0;
+ else if (CONSTANT_P (XEXP (x, 0))
+ || (GET_CODE (XEXP (x, 0)) == REG
+ && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER)
+ || (GET_CODE (XEXP (x, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
+ && (REGNO (XEXP (XEXP (x, 0), 0))
+ < FIRST_PSEUDO_REGISTER)
+ && CONSTANT_P (XEXP (XEXP (x, 0), 1))))
+ reg_equiv_address[i] = XEXP (x, 0), reg_equiv_mem[i] = 0;
+ else
+ {
+ /* Make a new stack slot. Then indicate that something
+ changed so we go back and recompute offsets for
+ eliminable registers because the allocation of memory
+ below might change some offset. reg_equiv_{mem,address}
+ will be set up for this pseudo on the next pass around
+ the loop. */
+ reg_equiv_memory_loc[i] = 0;
+ reg_equiv_init[i] = 0;
+ alter_reg (i, -1);
+ something_changed = 1;
+ }
+ }
+
+ /* If we allocated another pseudo to the stack, redo elimination
+ bookkeeping. */
+ if (something_changed)
+ continue;
+
+ /* If caller-saves needs a group, initialize the group to include
+ the size and mode required for caller-saves. */
+
+ if (caller_save_group_size > 1)
+ {
+ group_mode[(int) caller_save_spill_class] = Pmode;
+ group_size[(int) caller_save_spill_class] = caller_save_group_size;
+ }
+
+ /* Compute the most additional registers needed by any instruction.
+ Collect information separately for each class of regs. */
+
+ for (insn = first; insn; insn = NEXT_INSN (insn))
+ {
+ if (global && this_block + 1 < n_basic_blocks
+ && insn == basic_block_head[this_block+1])
+ ++this_block;
+
+ /* If this is a label, a JUMP_INSN, or has REG_NOTES (which
+ might include REG_LABEL), we need to see what effects this
+ has on the known offsets at labels. */
+
+ if (GET_CODE (insn) == CODE_LABEL || GET_CODE (insn) == JUMP_INSN
+ || (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
+ && REG_NOTES (insn) != 0))
+ set_label_offsets (insn, insn, 0);
+
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ {
+ /* Nonzero means don't use a reload reg that overlaps
+ the place where a function value can be returned. */
+ rtx avoid_return_reg = 0;
+
+ rtx old_body = PATTERN (insn);
+ int old_code = INSN_CODE (insn);
+ rtx old_notes = REG_NOTES (insn);
+ int did_elimination = 0;
+
+ /* To compute the number of reload registers of each class
+ needed for an insn, we must similate what choose_reload_regs
+ can do. We do this by splitting an insn into an "input" and
+ an "output" part. RELOAD_OTHER reloads are used in both.
+ The input part uses those reloads, RELOAD_FOR_INPUT reloads,
+ which must be live over the entire input section of reloads,
+ and the maximum of all the RELOAD_FOR_INPUT_ADDRESS and
+ RELOAD_FOR_OPERAND_ADDRESS reloads, which conflict with the
+ inputs.
+
+ The registers needed for output are RELOAD_OTHER and
+ RELOAD_FOR_OUTPUT, which are live for the entire output
+ portion, and the maximum of all the RELOAD_FOR_OUTPUT_ADDRESS
+ reloads for each operand.
+
+ The total number of registers needed is the maximum of the
+ inputs and outputs. */
+
+ struct needs
+ {
+ /* [0] is normal, [1] is nongroup. */
+ int regs[2][N_REG_CLASSES];
+ int groups[N_REG_CLASSES];
+ };
+
+ /* Each `struct needs' corresponds to one RELOAD_... type. */
+ struct {
+ struct needs other;
+ struct needs input;
+ struct needs output;
+ struct needs insn;
+ struct needs other_addr;
+ struct needs op_addr;
+ struct needs op_addr_reload;
+ struct needs in_addr[MAX_RECOG_OPERANDS];
+ struct needs out_addr[MAX_RECOG_OPERANDS];
+ } insn_needs;
+
+ /* If needed, eliminate any eliminable registers. */
+ if (num_eliminable)
+ did_elimination = eliminate_regs_in_insn (insn, 0);
+
+#ifdef SMALL_REGISTER_CLASSES
+ /* Set avoid_return_reg if this is an insn
+ that might use the value of a function call. */
+ if (GET_CODE (insn) == CALL_INSN)
+ {
+ if (GET_CODE (PATTERN (insn)) == SET)
+ after_call = SET_DEST (PATTERN (insn));
+ else if (GET_CODE (PATTERN (insn)) == PARALLEL
+ && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET)
+ after_call = SET_DEST (XVECEXP (PATTERN (insn), 0, 0));
+ else
+ after_call = 0;
+ }
+ else if (after_call != 0
+ && !(GET_CODE (PATTERN (insn)) == SET
+ && SET_DEST (PATTERN (insn)) == stack_pointer_rtx))
+ {
+ if (reg_referenced_p (after_call, PATTERN (insn)))
+ avoid_return_reg = after_call;
+ after_call = 0;
+ }
+#endif /* SMALL_REGISTER_CLASSES */
+
+ /* Analyze the instruction. */
+ find_reloads (insn, 0, spill_indirect_levels, global,
+ spill_reg_order);
+
+ /* Remember for later shortcuts which insns had any reloads or
+ register eliminations.
+
+ One might think that it would be worthwhile to mark insns
+ that need register replacements but not reloads, but this is
+ not safe because find_reloads may do some manipulation of
+ the insn (such as swapping commutative operands), which would
+ be lost when we restore the old pattern after register
+ replacement. So the actions of find_reloads must be redone in
+ subsequent passes or in reload_as_needed.
+
+ However, it is safe to mark insns that need reloads
+ but not register replacement. */
+
+ PUT_MODE (insn, (did_elimination ? QImode
+ : n_reloads ? HImode
+ : GET_MODE (insn) == DImode ? DImode
+ : VOIDmode));
+
+ /* Discard any register replacements done. */
+ if (did_elimination)
+ {
+ obstack_free (&reload_obstack, reload_firstobj);
+ PATTERN (insn) = old_body;
+ INSN_CODE (insn) = old_code;
+ REG_NOTES (insn) = old_notes;
+ something_needs_elimination = 1;
+ }
+
+ /* If this insn has no reloads, we need not do anything except
+ in the case of a CALL_INSN when we have caller-saves and
+ caller-save needs reloads. */
+
+ if (n_reloads == 0
+ && ! (GET_CODE (insn) == CALL_INSN
+ && caller_save_spill_class != NO_REGS))
+ continue;
+
+ something_needs_reloads = 1;
+ bzero ((char *) &insn_needs, sizeof insn_needs);
+
+ /* Count each reload once in every class
+ containing the reload's own class. */
+
+ for (i = 0; i < n_reloads; i++)
+ {
+ register enum reg_class *p;
+ enum reg_class class = reload_reg_class[i];
+ int size;
+ enum machine_mode mode;
+ int nongroup_need;
+ struct needs *this_needs;
+
+ /* Don't count the dummy reloads, for which one of the
+ regs mentioned in the insn can be used for reloading.
+ Don't count optional reloads.
+ Don't count reloads that got combined with others. */
+ if (reload_reg_rtx[i] != 0
+ || reload_optional[i] != 0
+ || (reload_out[i] == 0 && reload_in[i] == 0
+ && ! reload_secondary_p[i]))
+ continue;
+
+ /* Show that a reload register of this class is needed
+ in this basic block. We do not use insn_needs and
+ insn_groups because they are overly conservative for
+ this purpose. */
+ if (global && ! basic_block_needs[(int) class][this_block])
+ {
+ basic_block_needs[(int) class][this_block] = 1;
+ new_basic_block_needs = 1;
+ }
+
+
+ mode = reload_inmode[i];
+ if (GET_MODE_SIZE (reload_outmode[i]) > GET_MODE_SIZE (mode))
+ mode = reload_outmode[i];
+ size = CLASS_MAX_NREGS (class, mode);
+
+ /* If this class doesn't want a group, determine if we have
+ a nongroup need or a regular need. We have a nongroup
+ need if this reload conflicts with a group reload whose
+ class intersects with this reload's class. */
+
+ nongroup_need = 0;
+ if (size == 1)
+ for (j = 0; j < n_reloads; j++)
+ if ((CLASS_MAX_NREGS (reload_reg_class[j],
+ (GET_MODE_SIZE (reload_outmode[j])
+ > GET_MODE_SIZE (reload_inmode[j]))
+ ? reload_outmode[j]
+ : reload_inmode[j])
+ > 1)
+ && (!reload_optional[j])
+ && (reload_in[j] != 0 || reload_out[j] != 0
+ || reload_secondary_p[j])
+ && reloads_conflict (i, j)
+ && reg_classes_intersect_p (class,
+ reload_reg_class[j]))
+ {
+ nongroup_need = 1;
+ break;
+ }
+
+ /* Decide which time-of-use to count this reload for. */
+ switch (reload_when_needed[i])
+ {
+ case RELOAD_OTHER:
+ this_needs = &insn_needs.other;
+ break;
+ case RELOAD_FOR_INPUT:
+ this_needs = &insn_needs.input;
+ break;
+ case RELOAD_FOR_OUTPUT:
+ this_needs = &insn_needs.output;
+ break;
+ case RELOAD_FOR_INSN:
+ this_needs = &insn_needs.insn;
+ break;
+ case RELOAD_FOR_OTHER_ADDRESS:
+ this_needs = &insn_needs.other_addr;
+ break;
+ case RELOAD_FOR_INPUT_ADDRESS:
+ this_needs = &insn_needs.in_addr[reload_opnum[i]];
+ break;
+ case RELOAD_FOR_OUTPUT_ADDRESS:
+ this_needs = &insn_needs.out_addr[reload_opnum[i]];
+ break;
+ case RELOAD_FOR_OPERAND_ADDRESS:
+ this_needs = &insn_needs.op_addr;
+ break;
+ case RELOAD_FOR_OPADDR_ADDR:
+ this_needs = &insn_needs.op_addr_reload;
+ break;
+ }
+
+ if (size > 1)
+ {
+ enum machine_mode other_mode, allocate_mode;
+
+ /* Count number of groups needed separately from
+ number of individual regs needed. */
+ this_needs->groups[(int) class]++;
+ p = reg_class_superclasses[(int) class];
+ while (*p != LIM_REG_CLASSES)
+ this_needs->groups[(int) *p++]++;
+
+ /* Record size and mode of a group of this class. */
+ /* If more than one size group is needed,
+ make all groups the largest needed size. */
+ if (group_size[(int) class] < size)
+ {
+ other_mode = group_mode[(int) class];
+ allocate_mode = mode;
+
+ group_size[(int) class] = size;
+ group_mode[(int) class] = mode;
+ }
+ else
+ {
+ other_mode = mode;
+ allocate_mode = group_mode[(int) class];
+ }
+
+ /* Crash if two dissimilar machine modes both need
+ groups of consecutive regs of the same class. */
+
+ if (other_mode != VOIDmode && other_mode != allocate_mode
+ && ! modes_equiv_for_class_p (allocate_mode,
+ other_mode, class))
+ abort ();
+ }
+ else if (size == 1)
+ {
+ this_needs->regs[nongroup_need][(int) class] += 1;
+ p = reg_class_superclasses[(int) class];
+ while (*p != LIM_REG_CLASSES)
+ this_needs->regs[nongroup_need][(int) *p++] += 1;
+ }
+ else
+ abort ();
+ }
+
+ /* All reloads have been counted for this insn;
+ now merge the various times of use.
+ This sets insn_needs, etc., to the maximum total number
+ of registers needed at any point in this insn. */
+
+ for (i = 0; i < N_REG_CLASSES; i++)
+ {
+ int in_max, out_max;
+
+ /* Compute normal and nongroup needs. */
+ for (j = 0; j <= 1; j++)
+ {
+ for (in_max = 0, out_max = 0, k = 0;
+ k < reload_n_operands; k++)
+ {
+ in_max
+ = MAX (in_max, insn_needs.in_addr[k].regs[j][i]);
+ out_max
+ = MAX (out_max, insn_needs.out_addr[k].regs[j][i]);
+ }
+
+ /* RELOAD_FOR_INSN reloads conflict with inputs, outputs,
+ and operand addresses but not things used to reload
+ them. Similarly, RELOAD_FOR_OPERAND_ADDRESS reloads
+ don't conflict with things needed to reload inputs or
+ outputs. */
+
+ in_max = MAX (MAX (insn_needs.op_addr.regs[j][i],
+ insn_needs.op_addr_reload.regs[j][i]),
+ in_max);
+
+ out_max = MAX (out_max, insn_needs.insn.regs[j][i]);
+
+ insn_needs.input.regs[j][i]
+ = MAX (insn_needs.input.regs[j][i]
+ + insn_needs.op_addr.regs[j][i]
+ + insn_needs.insn.regs[j][i],
+ in_max + insn_needs.input.regs[j][i]);
+
+ insn_needs.output.regs[j][i] += out_max;
+ insn_needs.other.regs[j][i]
+ += MAX (MAX (insn_needs.input.regs[j][i],
+ insn_needs.output.regs[j][i]),
+ insn_needs.other_addr.regs[j][i]);
+
+ }
+
+ /* Now compute group needs. */
+ for (in_max = 0, out_max = 0, j = 0;
+ j < reload_n_operands; j++)
+ {
+ in_max = MAX (in_max, insn_needs.in_addr[j].groups[i]);
+ out_max
+ = MAX (out_max, insn_needs.out_addr[j].groups[i]);
+ }
+
+ in_max = MAX (MAX (insn_needs.op_addr.groups[i],
+ insn_needs.op_addr_reload.groups[i]),
+ in_max);
+ out_max = MAX (out_max, insn_needs.insn.groups[i]);
+
+ insn_needs.input.groups[i]
+ = MAX (insn_needs.input.groups[i]
+ + insn_needs.op_addr.groups[i]
+ + insn_needs.insn.groups[i],
+ in_max + insn_needs.input.groups[i]);
+
+ insn_needs.output.groups[i] += out_max;
+ insn_needs.other.groups[i]
+ += MAX (MAX (insn_needs.input.groups[i],
+ insn_needs.output.groups[i]),
+ insn_needs.other_addr.groups[i]);
+ }
+
+ /* If this is a CALL_INSN and caller-saves will need
+ a spill register, act as if the spill register is
+ needed for this insn. However, the spill register
+ can be used by any reload of this insn, so we only
+ need do something if no need for that class has
+ been recorded.
+
+ The assumption that every CALL_INSN will trigger a
+ caller-save is highly conservative, however, the number
+ of cases where caller-saves will need a spill register but
+ a block containing a CALL_INSN won't need a spill register
+ of that class should be quite rare.
+
+ If a group is needed, the size and mode of the group will
+ have been set up at the beginning of this loop. */
+
+ if (GET_CODE (insn) == CALL_INSN
+ && caller_save_spill_class != NO_REGS)
+ {
+ /* See if this register would conflict with any reload
+ that needs a group. */
+ int nongroup_need = 0;
+ int *caller_save_needs;
+
+ for (j = 0; j < n_reloads; j++)
+ if ((CLASS_MAX_NREGS (reload_reg_class[j],
+ (GET_MODE_SIZE (reload_outmode[j])
+ > GET_MODE_SIZE (reload_inmode[j]))
+ ? reload_outmode[j]
+ : reload_inmode[j])
+ > 1)
+ && reg_classes_intersect_p (caller_save_spill_class,
+ reload_reg_class[j]))
+ {
+ nongroup_need = 1;
+ break;
+ }
+
+ caller_save_needs
+ = (caller_save_group_size > 1
+ ? insn_needs.other.groups
+ : insn_needs.other.regs[nongroup_need]);
+
+ if (caller_save_needs[(int) caller_save_spill_class] == 0)
+ {
+ register enum reg_class *p
+ = reg_class_superclasses[(int) caller_save_spill_class];
+
+ caller_save_needs[(int) caller_save_spill_class]++;
+
+ while (*p != LIM_REG_CLASSES)
+ caller_save_needs[(int) *p++] += 1;
+ }
+
+ /* Show that this basic block will need a register of
+ this class. */
+
+ if (global
+ && ! (basic_block_needs[(int) caller_save_spill_class]
+ [this_block]))
+ {
+ basic_block_needs[(int) caller_save_spill_class]
+ [this_block] = 1;
+ new_basic_block_needs = 1;
+ }
+ }
+
+#ifdef SMALL_REGISTER_CLASSES
+ /* If this insn stores the value of a function call,
+ and that value is in a register that has been spilled,
+ and if the insn needs a reload in a class
+ that might use that register as the reload register,
+ then add add an extra need in that class.
+ This makes sure we have a register available that does
+ not overlap the return value. */
+
+ if (avoid_return_reg)
+ {
+ int regno = REGNO (avoid_return_reg);
+ int nregs
+ = HARD_REGNO_NREGS (regno, GET_MODE (avoid_return_reg));
+ int r;
+ int basic_needs[N_REG_CLASSES], basic_groups[N_REG_CLASSES];
+
+ /* First compute the "basic needs", which counts a
+ need only in the smallest class in which it
+ is required. */
+
+ bcopy (insn_needs.other.regs[0], basic_needs,
+ sizeof basic_needs);
+ bcopy (insn_needs.other.groups, basic_groups,
+ sizeof basic_groups);
+
+ for (i = 0; i < N_REG_CLASSES; i++)
+ {
+ enum reg_class *p;
+
+ if (basic_needs[i] >= 0)
+ for (p = reg_class_superclasses[i];
+ *p != LIM_REG_CLASSES; p++)
+ basic_needs[(int) *p] -= basic_needs[i];
+
+ if (basic_groups[i] >= 0)
+ for (p = reg_class_superclasses[i];
+ *p != LIM_REG_CLASSES; p++)
+ basic_groups[(int) *p] -= basic_groups[i];
+ }
+
+ /* Now count extra regs if there might be a conflict with
+ the return value register.
+
+ ??? This is not quite correct because we don't properly
+ handle the case of groups, but if we end up doing
+ something wrong, it either will end up not mattering or
+ we will abort elsewhere. */
+
+ for (r = regno; r < regno + nregs; r++)
+ if (spill_reg_order[r] >= 0)
+ for (i = 0; i < N_REG_CLASSES; i++)
+ if (TEST_HARD_REG_BIT (reg_class_contents[i], r))
+ {
+ if (basic_needs[i] > 0 || basic_groups[i] > 0)
+ {
+ enum reg_class *p;
+
+ insn_needs.other.regs[0][i]++;
+ p = reg_class_superclasses[i];
+ while (*p != LIM_REG_CLASSES)
+ insn_needs.other.regs[0][(int) *p++]++;
+ }
+ }
+ }
+#endif /* SMALL_REGISTER_CLASSES */
+
+ /* For each class, collect maximum need of any insn. */
+
+ for (i = 0; i < N_REG_CLASSES; i++)
+ {
+ if (max_needs[i] < insn_needs.other.regs[0][i])
+ {
+ max_needs[i] = insn_needs.other.regs[0][i];
+ max_needs_insn[i] = insn;
+ }
+ if (max_groups[i] < insn_needs.other.groups[i])
+ {
+ max_groups[i] = insn_needs.other.groups[i];
+ max_groups_insn[i] = insn;
+ }
+ if (max_nongroups[i] < insn_needs.other.regs[1][i])
+ {
+ max_nongroups[i] = insn_needs.other.regs[1][i];
+ max_nongroups_insn[i] = insn;
+ }
+ }
+ }
+ /* Note that there is a continue statement above. */
+ }
+
+ /* If we allocated any new memory locations, make another pass
+ since it might have changed elimination offsets. */
+ if (starting_frame_size != get_frame_size ())
+ something_changed = 1;
+
+ if (dumpfile)
+ for (i = 0; i < N_REG_CLASSES; i++)
+ {
+ if (max_needs[i] > 0)
+ fprintf (dumpfile,
+ ";; Need %d reg%s of class %s (for insn %d).\n",
+ max_needs[i], max_needs[i] == 1 ? "" : "s",
+ reg_class_names[i], INSN_UID (max_needs_insn[i]));
+ if (max_nongroups[i] > 0)
+ fprintf (dumpfile,
+ ";; Need %d nongroup reg%s of class %s (for insn %d).\n",
+ max_nongroups[i], max_nongroups[i] == 1 ? "" : "s",
+ reg_class_names[i], INSN_UID (max_nongroups_insn[i]));
+ if (max_groups[i] > 0)
+ fprintf (dumpfile,
+ ";; Need %d group%s (%smode) of class %s (for insn %d).\n",
+ max_groups[i], max_groups[i] == 1 ? "" : "s",
+ mode_name[(int) group_mode[i]],
+ reg_class_names[i], INSN_UID (max_groups_insn[i]));
+ }
+
+ /* If we have caller-saves, set up the save areas and see if caller-save
+ will need a spill register. */
+
+ if (caller_save_needed
+ && ! setup_save_areas (&something_changed)
+ && caller_save_spill_class == NO_REGS)
+ {
+ /* The class we will need depends on whether the machine
+ supports the sum of two registers for an address; see
+ find_address_reloads for details. */
+
+ caller_save_spill_class
+ = double_reg_address_ok ? INDEX_REG_CLASS : BASE_REG_CLASS;
+ caller_save_group_size
+ = CLASS_MAX_NREGS (caller_save_spill_class, Pmode);
+ something_changed = 1;
+ }
+
+ /* See if anything that happened changes which eliminations are valid.
+ For example, on the Sparc, whether or not the frame pointer can
+ be eliminated can depend on what registers have been used. We need
+ not check some conditions again (such as flag_omit_frame_pointer)
+ since they can't have changed. */
+
+ for (ep = reg_eliminate; ep < &reg_eliminate[NUM_ELIMINABLE_REGS]; ep++)
+ if ((ep->from == HARD_FRAME_POINTER_REGNUM && FRAME_POINTER_REQUIRED)
+#ifdef ELIMINABLE_REGS
+ || ! CAN_ELIMINATE (ep->from, ep->to)
+#endif
+ )
+ ep->can_eliminate = 0;
+
+ /* Look for the case where we have discovered that we can't replace
+ register A with register B and that means that we will now be
+ trying to replace register A with register C. This means we can
+ no longer replace register C with register B and we need to disable
+ such an elimination, if it exists. This occurs often with A == ap,
+ B == sp, and C == fp. */
+
+ for (ep = reg_eliminate; ep < &reg_eliminate[NUM_ELIMINABLE_REGS]; ep++)
+ {
+ struct elim_table *op;
+ register int new_to = -1;
+
+ if (! ep->can_eliminate && ep->can_eliminate_previous)
+ {
+ /* Find the current elimination for ep->from, if there is a
+ new one. */
+ for (op = reg_eliminate;
+ op < &reg_eliminate[NUM_ELIMINABLE_REGS]; op++)
+ if (op->from == ep->from && op->can_eliminate)
+ {
+ new_to = op->to;
+ break;
+ }
+
+ /* See if there is an elimination of NEW_TO -> EP->TO. If so,
+ disable it. */
+ for (op = reg_eliminate;
+ op < &reg_eliminate[NUM_ELIMINABLE_REGS]; op++)
+ if (op->from == new_to && op->to == ep->to)
+ op->can_eliminate = 0;
+ }
+ }
+
+ /* See if any registers that we thought we could eliminate the previous
+ time are no longer eliminable. If so, something has changed and we
+ must spill the register. Also, recompute the number of eliminable
+ registers and see if the frame pointer is needed; it is if there is
+ no elimination of the frame pointer that we can perform. */
+
+ frame_pointer_needed = 1;
+ for (ep = reg_eliminate; ep < &reg_eliminate[NUM_ELIMINABLE_REGS]; ep++)
+ {
+ if (ep->can_eliminate && ep->from == FRAME_POINTER_REGNUM
+ && ep->to != HARD_FRAME_POINTER_REGNUM)
+ frame_pointer_needed = 0;
+
+ if (! ep->can_eliminate && ep->can_eliminate_previous)
+ {
+ ep->can_eliminate_previous = 0;
+ spill_hard_reg (ep->from, global, dumpfile, 1);
+ something_changed = 1;
+ num_eliminable--;
+ }
+ }
+
+#if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
+ /* If we didn't need a frame pointer last time, but we do now, spill
+ the hard frame pointer. */
+ if (frame_pointer_needed && ! previous_frame_pointer_needed)
+ {
+ spill_hard_reg (HARD_FRAME_POINTER_REGNUM, global, dumpfile, 1);
+ something_changed = 1;
+ }
+#endif
+
+ /* If all needs are met, we win. */
+
+ for (i = 0; i < N_REG_CLASSES; i++)
+ if (max_needs[i] > 0 || max_groups[i] > 0 || max_nongroups[i] > 0)
+ break;
+ if (i == N_REG_CLASSES && !new_basic_block_needs && ! something_changed)
+ break;
+
+ /* Not all needs are met; must spill some hard regs. */
+
+ /* Put all registers spilled so far back in potential_reload_regs, but
+ put them at the front, since we've already spilled most of the
+ psuedos in them (we might have left some pseudos unspilled if they
+ were in a block that didn't need any spill registers of a conflicting
+ class. We used to try to mark off the need for those registers,
+ but doing so properly is very complex and reallocating them is the
+ simpler approach. First, "pack" potential_reload_regs by pushing
+ any nonnegative entries towards the end. That will leave room
+ for the registers we already spilled.
+
+ Also, undo the marking of the spill registers from the last time
+ around in FORBIDDEN_REGS since we will be probably be allocating
+ them again below.
+
+ ??? It is theoretically possible that we might end up not using one
+ of our previously-spilled registers in this allocation, even though
+ they are at the head of the list. It's not clear what to do about
+ this, but it was no better before, when we marked off the needs met
+ by the previously-spilled registers. With the current code, globals
+ can be allocated into these registers, but locals cannot. */
+
+ if (n_spills)
+ {
+ for (i = j = FIRST_PSEUDO_REGISTER - 1; i >= 0; i--)
+ if (potential_reload_regs[i] != -1)
+ potential_reload_regs[j--] = potential_reload_regs[i];
+
+ for (i = 0; i < n_spills; i++)
+ {
+ potential_reload_regs[i] = spill_regs[i];
+ spill_reg_order[spill_regs[i]] = -1;
+ CLEAR_HARD_REG_BIT (forbidden_regs, spill_regs[i]);
+ }
+
+ n_spills = 0;
+ }
+
+ /* Now find more reload regs to satisfy the remaining need
+ Do it by ascending class number, since otherwise a reg
+ might be spilled for a big class and might fail to count
+ for a smaller class even though it belongs to that class.
+
+ Count spilled regs in `spills', and add entries to
+ `spill_regs' and `spill_reg_order'.
+
+ ??? Note there is a problem here.
+ When there is a need for a group in a high-numbered class,
+ and also need for non-group regs that come from a lower class,
+ the non-group regs are chosen first. If there aren't many regs,
+ they might leave no room for a group.
+
+ This was happening on the 386. To fix it, we added the code
+ that calls possible_group_p, so that the lower class won't
+ break up the last possible group.
+
+ Really fixing the problem would require changes above
+ in counting the regs already spilled, and in choose_reload_regs.
+ It might be hard to avoid introducing bugs there. */
+
+ CLEAR_HARD_REG_SET (counted_for_groups);
+ CLEAR_HARD_REG_SET (counted_for_nongroups);
+
+ for (class = 0; class < N_REG_CLASSES; class++)
+ {
+ /* First get the groups of registers.
+ If we got single registers first, we might fragment
+ possible groups. */
+ while (max_groups[class] > 0)
+ {
+ /* If any single spilled regs happen to form groups,
+ count them now. Maybe we don't really need
+ to spill another group. */
+ count_possible_groups (group_size, group_mode, max_groups);
+
+ if (max_groups[class] <= 0)
+ break;
+
+ /* Groups of size 2 (the only groups used on most machines)
+ are treated specially. */
+ if (group_size[class] == 2)
+ {
+ /* First, look for a register that will complete a group. */
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ {
+ int other;
+
+ j = potential_reload_regs[i];
+ if (j >= 0 && ! TEST_HARD_REG_BIT (bad_spill_regs, j)
+ &&
+ ((j > 0 && (other = j - 1, spill_reg_order[other] >= 0)
+ && TEST_HARD_REG_BIT (reg_class_contents[class], j)
+ && TEST_HARD_REG_BIT (reg_class_contents[class], other)
+ && HARD_REGNO_MODE_OK (other, group_mode[class])
+ && ! TEST_HARD_REG_BIT (counted_for_nongroups,
+ other)
+ /* We don't want one part of another group.
+ We could get "two groups" that overlap! */
+ && ! TEST_HARD_REG_BIT (counted_for_groups, other))
+ ||
+ (j < FIRST_PSEUDO_REGISTER - 1
+ && (other = j + 1, spill_reg_order[other] >= 0)
+ && TEST_HARD_REG_BIT (reg_class_contents[class], j)
+ && TEST_HARD_REG_BIT (reg_class_contents[class], other)
+ && HARD_REGNO_MODE_OK (j, group_mode[class])
+ && ! TEST_HARD_REG_BIT (counted_for_nongroups,
+ other)
+ && ! TEST_HARD_REG_BIT (counted_for_groups,
+ other))))
+ {
+ register enum reg_class *p;
+
+ /* We have found one that will complete a group,
+ so count off one group as provided. */
+ max_groups[class]--;
+ p = reg_class_superclasses[class];
+ while (*p != LIM_REG_CLASSES)
+ max_groups[(int) *p++]--;
+
+ /* Indicate both these regs are part of a group. */
+ SET_HARD_REG_BIT (counted_for_groups, j);
+ SET_HARD_REG_BIT (counted_for_groups, other);
+ break;
+ }
+ }
+ /* We can't complete a group, so start one. */
+#ifdef SMALL_REGISTER_CLASSES
+ /* Look for a pair neither of which is explicitly used. */
+ if (i == FIRST_PSEUDO_REGISTER)
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ {
+ int k;
+ j = potential_reload_regs[i];
+ /* Verify that J+1 is a potential reload reg. */
+ for (k = 0; k < FIRST_PSEUDO_REGISTER; k++)
+ if (potential_reload_regs[k] == j + 1)
+ break;
+ if (j >= 0 && j + 1 < FIRST_PSEUDO_REGISTER
+ && k < FIRST_PSEUDO_REGISTER
+ && spill_reg_order[j] < 0 && spill_reg_order[j + 1] < 0
+ && TEST_HARD_REG_BIT (reg_class_contents[class], j)
+ && TEST_HARD_REG_BIT (reg_class_contents[class], j + 1)
+ && HARD_REGNO_MODE_OK (j, group_mode[class])
+ && ! TEST_HARD_REG_BIT (counted_for_nongroups,
+ j + 1)
+ && ! TEST_HARD_REG_BIT (bad_spill_regs, j + 1)
+ /* Reject J at this stage
+ if J+1 was explicitly used. */
+ && ! regs_explicitly_used[j + 1])
+ break;
+ }
+#endif
+ /* Now try any group at all
+ whose registers are not in bad_spill_regs. */
+ if (i == FIRST_PSEUDO_REGISTER)
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ {
+ int k;
+ j = potential_reload_regs[i];
+ /* Verify that J+1 is a potential reload reg. */
+ for (k = 0; k < FIRST_PSEUDO_REGISTER; k++)
+ if (potential_reload_regs[k] == j + 1)
+ break;
+ if (j >= 0 && j + 1 < FIRST_PSEUDO_REGISTER
+ && k < FIRST_PSEUDO_REGISTER
+ && spill_reg_order[j] < 0 && spill_reg_order[j + 1] < 0
+ && TEST_HARD_REG_BIT (reg_class_contents[class], j)
+ && TEST_HARD_REG_BIT (reg_class_contents[class], j + 1)
+ && HARD_REGNO_MODE_OK (j, group_mode[class])
+ && ! TEST_HARD_REG_BIT (counted_for_nongroups,
+ j + 1)
+ && ! TEST_HARD_REG_BIT (bad_spill_regs, j + 1))
+ break;
+ }
+
+ /* I should be the index in potential_reload_regs
+ of the new reload reg we have found. */
+
+ if (i >= FIRST_PSEUDO_REGISTER)
+ {
+ /* There are no groups left to spill. */
+ spill_failure (max_groups_insn[class]);
+ failure = 1;
+ goto failed;
+ }
+ else
+ something_changed
+ |= new_spill_reg (i, class, max_needs, NULL_PTR,
+ global, dumpfile);
+ }
+ else
+ {
+ /* For groups of more than 2 registers,
+ look for a sufficient sequence of unspilled registers,
+ and spill them all at once. */
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ {
+ int k;
+
+ j = potential_reload_regs[i];
+ if (j >= 0
+ && j + group_size[class] <= FIRST_PSEUDO_REGISTER
+ && HARD_REGNO_MODE_OK (j, group_mode[class]))
+ {
+ /* Check each reg in the sequence. */
+ for (k = 0; k < group_size[class]; k++)
+ if (! (spill_reg_order[j + k] < 0
+ && ! TEST_HARD_REG_BIT (bad_spill_regs, j + k)
+ && TEST_HARD_REG_BIT (reg_class_contents[class], j + k)))
+ break;
+ /* We got a full sequence, so spill them all. */
+ if (k == group_size[class])
+ {
+ register enum reg_class *p;
+ for (k = 0; k < group_size[class]; k++)
+ {
+ int idx;
+ SET_HARD_REG_BIT (counted_for_groups, j + k);
+ for (idx = 0; idx < FIRST_PSEUDO_REGISTER; idx++)
+ if (potential_reload_regs[idx] == j + k)
+ break;
+ something_changed
+ |= new_spill_reg (idx, class,
+ max_needs, NULL_PTR,
+ global, dumpfile);
+ }
+
+ /* We have found one that will complete a group,
+ so count off one group as provided. */
+ max_groups[class]--;
+ p = reg_class_superclasses[class];
+ while (*p != LIM_REG_CLASSES)
+ max_groups[(int) *p++]--;
+
+ break;
+ }
+ }
+ }
+ /* We couldn't find any registers for this reload.
+ Avoid going into an infinite loop. */
+ if (i >= FIRST_PSEUDO_REGISTER)
+ {
+ /* There are no groups left. */
+ spill_failure (max_groups_insn[class]);
+ failure = 1;
+ goto failed;
+ }
+ }
+ }
+
+ /* Now similarly satisfy all need for single registers. */
+
+ while (max_needs[class] > 0 || max_nongroups[class] > 0)
+ {
+#ifdef SMALL_REGISTER_CLASSES
+ /* This should be right for all machines, but only the 386
+ is known to need it, so this conditional plays safe.
+ ??? For 2.5, try making this unconditional. */
+ /* If we spilled enough regs, but they weren't counted
+ against the non-group need, see if we can count them now.
+ If so, we can avoid some actual spilling. */
+ if (max_needs[class] <= 0 && max_nongroups[class] > 0)
+ for (i = 0; i < n_spills; i++)
+ if (TEST_HARD_REG_BIT (reg_class_contents[class],
+ spill_regs[i])
+ && !TEST_HARD_REG_BIT (counted_for_groups,
+ spill_regs[i])
+ && !TEST_HARD_REG_BIT (counted_for_nongroups,
+ spill_regs[i])
+ && max_nongroups[class] > 0)
+ {
+ register enum reg_class *p;
+
+ SET_HARD_REG_BIT (counted_for_nongroups, spill_regs[i]);
+ max_nongroups[class]--;
+ p = reg_class_superclasses[class];
+ while (*p != LIM_REG_CLASSES)
+ max_nongroups[(int) *p++]--;
+ }
+ if (max_needs[class] <= 0 && max_nongroups[class] <= 0)
+ break;
+#endif
+
+ /* Consider the potential reload regs that aren't
+ yet in use as reload regs, in order of preference.
+ Find the most preferred one that's in this class. */
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (potential_reload_regs[i] >= 0
+ && TEST_HARD_REG_BIT (reg_class_contents[class],
+ potential_reload_regs[i])
+ /* If this reg will not be available for groups,
+ pick one that does not foreclose possible groups.
+ This is a kludge, and not very general,
+ but it should be sufficient to make the 386 work,
+ and the problem should not occur on machines with
+ more registers. */
+ && (max_nongroups[class] == 0
+ || possible_group_p (potential_reload_regs[i], max_groups)))
+ break;
+
+ /* If we couldn't get a register, try to get one even if we
+ might foreclose possible groups. This may cause problems
+ later, but that's better than aborting now, since it is
+ possible that we will, in fact, be able to form the needed
+ group even with this allocation. */
+
+ if (i >= FIRST_PSEUDO_REGISTER
+ && (asm_noperands (max_needs[class] > 0
+ ? max_needs_insn[class]
+ : max_nongroups_insn[class])
+ < 0))
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (potential_reload_regs[i] >= 0
+ && TEST_HARD_REG_BIT (reg_class_contents[class],
+ potential_reload_regs[i]))
+ break;
+
+ /* I should be the index in potential_reload_regs
+ of the new reload reg we have found. */
+
+ if (i >= FIRST_PSEUDO_REGISTER)
+ {
+ /* There are no possible registers left to spill. */
+ spill_failure (max_needs[class] > 0 ? max_needs_insn[class]
+ : max_nongroups_insn[class]);
+ failure = 1;
+ goto failed;
+ }
+ else
+ something_changed
+ |= new_spill_reg (i, class, max_needs, max_nongroups,
+ global, dumpfile);
+ }
+ }
+ }
+
+ /* If global-alloc was run, notify it of any register eliminations we have
+ done. */
+ if (global)
+ for (ep = reg_eliminate; ep < &reg_eliminate[NUM_ELIMINABLE_REGS]; ep++)
+ if (ep->can_eliminate)
+ mark_elimination (ep->from, ep->to);
+
+ /* Insert code to save and restore call-clobbered hard regs
+ around calls. Tell if what mode to use so that we will process
+ those insns in reload_as_needed if we have to. */
+
+ if (caller_save_needed)
+ save_call_clobbered_regs (num_eliminable ? QImode
+ : caller_save_spill_class != NO_REGS ? HImode
+ : VOIDmode);
+
+ /* If a pseudo has no hard reg, delete the insns that made the equivalence.
+ If that insn didn't set the register (i.e., it copied the register to
+ memory), just delete that insn instead of the equivalencing insn plus
+ anything now dead. If we call delete_dead_insn on that insn, we may
+ delete the insn that actually sets the register if the register die
+ there and that is incorrect. */
+
+ for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++)
+ if (reg_renumber[i] < 0 && reg_equiv_init[i] != 0
+ && GET_CODE (reg_equiv_init[i]) != NOTE)
+ {
+ if (reg_set_p (regno_reg_rtx[i], PATTERN (reg_equiv_init[i])))
+ delete_dead_insn (reg_equiv_init[i]);
+ else
+ {
+ PUT_CODE (reg_equiv_init[i], NOTE);
+ NOTE_SOURCE_FILE (reg_equiv_init[i]) = 0;
+ NOTE_LINE_NUMBER (reg_equiv_init[i]) = NOTE_INSN_DELETED;
+ }
+ }
+
+ /* Use the reload registers where necessary
+ by generating move instructions to move the must-be-register
+ values into or out of the reload registers. */
+
+ if (something_needs_reloads || something_needs_elimination
+ || (caller_save_needed && num_eliminable)
+ || caller_save_spill_class != NO_REGS)
+ reload_as_needed (first, global);
+
+ /* If we were able to eliminate the frame pointer, show that it is no
+ longer live at the start of any basic block. If it ls live by
+ virtue of being in a pseudo, that pseudo will be marked live
+ and hence the frame pointer will be known to be live via that
+ pseudo. */
+
+ if (! frame_pointer_needed)
+ for (i = 0; i < n_basic_blocks; i++)
+ basic_block_live_at_start[i][HARD_FRAME_POINTER_REGNUM / REGSET_ELT_BITS]
+ &= ~ ((REGSET_ELT_TYPE) 1 << (HARD_FRAME_POINTER_REGNUM
+ % REGSET_ELT_BITS));
+
+ /* Come here (with failure set nonzero) if we can't get enough spill regs
+ and we decide not to abort about it. */
+ failed:
+
+ reload_in_progress = 0;
+
+ /* Now eliminate all pseudo regs by modifying them into
+ their equivalent memory references.
+ The REG-rtx's for the pseudos are modified in place,
+ so all insns that used to refer to them now refer to memory.
+
+ For a reg that has a reg_equiv_address, all those insns
+ were changed by reloading so that no insns refer to it any longer;
+ but the DECL_RTL of a variable decl may refer to it,
+ and if so this causes the debugging info to mention the variable. */
+
+ for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++)
+ {
+ rtx addr = 0;
+ int in_struct = 0;
+ if (reg_equiv_mem[i])
+ {
+ addr = XEXP (reg_equiv_mem[i], 0);
+ in_struct = MEM_IN_STRUCT_P (reg_equiv_mem[i]);
+ }
+ if (reg_equiv_address[i])
+ addr = reg_equiv_address[i];
+ if (addr)
+ {
+ if (reg_renumber[i] < 0)
+ {
+ rtx reg = regno_reg_rtx[i];
+ XEXP (reg, 0) = addr;
+ REG_USERVAR_P (reg) = 0;
+ MEM_IN_STRUCT_P (reg) = in_struct;
+ PUT_CODE (reg, MEM);
+ }
+ else if (reg_equiv_mem[i])
+ XEXP (reg_equiv_mem[i], 0) = addr;
+ }
+ }
+
+#ifdef PRESERVE_DEATH_INFO_REGNO_P
+ /* Make a pass over all the insns and remove death notes for things that
+ are no longer registers or no longer die in the insn (e.g., an input
+ and output pseudo being tied). */
+
+ for (insn = first; insn; insn = NEXT_INSN (insn))
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ {
+ rtx note, next;
+
+ for (note = REG_NOTES (insn); note; note = next)
+ {
+ next = XEXP (note, 1);
+ if (REG_NOTE_KIND (note) == REG_DEAD
+ && (GET_CODE (XEXP (note, 0)) != REG
+ || reg_set_p (XEXP (note, 0), PATTERN (insn))))
+ remove_note (insn, note);
+ }
+ }
+#endif
+
+ /* Indicate that we no longer have known memory locations or constants. */
+ reg_equiv_constant = 0;
+ reg_equiv_memory_loc = 0;
+
+ if (scratch_list)
+ free (scratch_list);
+ scratch_list = 0;
+ if (scratch_block)
+ free (scratch_block);
+ scratch_block = 0;
+
+ return failure;
+}
+
+/* Nonzero if, after spilling reg REGNO for non-groups,
+ it will still be possible to find a group if we still need one. */
+
+static int
+possible_group_p (regno, max_groups)
+ int regno;
+ int *max_groups;
+{
+ int i;
+ int class = (int) NO_REGS;
+
+ for (i = 0; i < (int) N_REG_CLASSES; i++)
+ if (max_groups[i] > 0)
+ {
+ class = i;
+ break;
+ }
+
+ if (class == (int) NO_REGS)
+ return 1;
+
+ /* Consider each pair of consecutive registers. */
+ for (i = 0; i < FIRST_PSEUDO_REGISTER - 1; i++)
+ {
+ /* Ignore pairs that include reg REGNO. */
+ if (i == regno || i + 1 == regno)
+ continue;
+
+ /* Ignore pairs that are outside the class that needs the group.
+ ??? Here we fail to handle the case where two different classes
+ independently need groups. But this never happens with our
+ current machine descriptions. */
+ if (! (TEST_HARD_REG_BIT (reg_class_contents[class], i)
+ && TEST_HARD_REG_BIT (reg_class_contents[class], i + 1)))
+ continue;
+
+ /* A pair of consecutive regs we can still spill does the trick. */
+ if (spill_reg_order[i] < 0 && spill_reg_order[i + 1] < 0
+ && ! TEST_HARD_REG_BIT (bad_spill_regs, i)
+ && ! TEST_HARD_REG_BIT (bad_spill_regs, i + 1))
+ return 1;
+
+ /* A pair of one already spilled and one we can spill does it
+ provided the one already spilled is not otherwise reserved. */
+ if (spill_reg_order[i] < 0
+ && ! TEST_HARD_REG_BIT (bad_spill_regs, i)
+ && spill_reg_order[i + 1] >= 0
+ && ! TEST_HARD_REG_BIT (counted_for_groups, i + 1)
+ && ! TEST_HARD_REG_BIT (counted_for_nongroups, i + 1))
+ return 1;
+ if (spill_reg_order[i + 1] < 0
+ && ! TEST_HARD_REG_BIT (bad_spill_regs, i + 1)
+ && spill_reg_order[i] >= 0
+ && ! TEST_HARD_REG_BIT (counted_for_groups, i)
+ && ! TEST_HARD_REG_BIT (counted_for_nongroups, i))
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Count any groups that can be formed from the registers recently spilled.
+ This is done class by class, in order of ascending class number. */
+
+static void
+count_possible_groups (group_size, group_mode, max_groups)
+ int *group_size;
+ enum machine_mode *group_mode;
+ int *max_groups;
+{
+ int i;
+ /* Now find all consecutive groups of spilled registers
+ and mark each group off against the need for such groups.
+ But don't count them against ordinary need, yet. */
+
+ for (i = 0; i < N_REG_CLASSES; i++)
+ if (group_size[i] > 1)
+ {
+ HARD_REG_SET new;
+ int j;
+
+ CLEAR_HARD_REG_SET (new);
+
+ /* Make a mask of all the regs that are spill regs in class I. */
+ for (j = 0; j < n_spills; j++)
+ if (TEST_HARD_REG_BIT (reg_class_contents[i], spill_regs[j])
+ && ! TEST_HARD_REG_BIT (counted_for_groups, spill_regs[j])
+ && ! TEST_HARD_REG_BIT (counted_for_nongroups,
+ spill_regs[j]))
+ SET_HARD_REG_BIT (new, spill_regs[j]);
+
+ /* Find each consecutive group of them. */
+ for (j = 0; j < FIRST_PSEUDO_REGISTER && max_groups[i] > 0; j++)
+ if (TEST_HARD_REG_BIT (new, j)
+ && j + group_size[i] <= FIRST_PSEUDO_REGISTER
+ /* Next line in case group-mode for this class
+ demands an even-odd pair. */
+ && HARD_REGNO_MODE_OK (j, group_mode[i]))
+ {
+ int k;
+ for (k = 1; k < group_size[i]; k++)
+ if (! TEST_HARD_REG_BIT (new, j + k))
+ break;
+ if (k == group_size[i])
+ {
+ /* We found a group. Mark it off against this class's
+ need for groups, and against each superclass too. */
+ register enum reg_class *p;
+ max_groups[i]--;
+ p = reg_class_superclasses[i];
+ while (*p != LIM_REG_CLASSES)
+ max_groups[(int) *p++]--;
+ /* Don't count these registers again. */
+ for (k = 0; k < group_size[i]; k++)
+ SET_HARD_REG_BIT (counted_for_groups, j + k);
+ }
+ /* Skip to the last reg in this group. When j is incremented
+ above, it will then point to the first reg of the next
+ possible group. */
+ j += k - 1;
+ }
+ }
+
+}
+
+/* ALLOCATE_MODE is a register mode that needs to be reloaded. OTHER_MODE is
+ another mode that needs to be reloaded for the same register class CLASS.
+ If any reg in CLASS allows ALLOCATE_MODE but not OTHER_MODE, fail.
+ ALLOCATE_MODE will never be smaller than OTHER_MODE.
+
+ This code used to also fail if any reg in CLASS allows OTHER_MODE but not
+ ALLOCATE_MODE. This test is unnecessary, because we will never try to put
+ something of mode ALLOCATE_MODE into an OTHER_MODE register. Testing this
+ causes unnecessary failures on machines requiring alignment of register
+ groups when the two modes are different sizes, because the larger mode has
+ more strict alignment rules than the smaller mode. */
+
+static int
+modes_equiv_for_class_p (allocate_mode, other_mode, class)
+ enum machine_mode allocate_mode, other_mode;
+ enum reg_class class;
+{
+ register int regno;
+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+ {
+ if (TEST_HARD_REG_BIT (reg_class_contents[(int) class], regno)
+ && HARD_REGNO_MODE_OK (regno, allocate_mode)
+ && ! HARD_REGNO_MODE_OK (regno, other_mode))
+ return 0;
+ }
+ return 1;
+}
+
+/* Handle the failure to find a register to spill.
+ INSN should be one of the insns which needed this particular spill reg. */
+
+static void
+spill_failure (insn)
+ rtx insn;
+{
+ if (asm_noperands (PATTERN (insn)) >= 0)
+ error_for_asm (insn, "`asm' needs too many reloads");
+ else
+ abort ();
+}
+
+/* Add a new register to the tables of available spill-registers
+ (as well as spilling all pseudos allocated to the register).
+ I is the index of this register in potential_reload_regs.
+ CLASS is the regclass whose need is being satisfied.
+ MAX_NEEDS and MAX_NONGROUPS are the vectors of needs,
+ so that this register can count off against them.
+ MAX_NONGROUPS is 0 if this register is part of a group.
+ GLOBAL and DUMPFILE are the same as the args that `reload' got. */
+
+static int
+new_spill_reg (i, class, max_needs, max_nongroups, global, dumpfile)
+ int i;
+ int class;
+ int *max_needs;
+ int *max_nongroups;
+ int global;
+ FILE *dumpfile;
+{
+ register enum reg_class *p;
+ int val;
+ int regno = potential_reload_regs[i];
+
+ if (i >= FIRST_PSEUDO_REGISTER)
+ abort (); /* Caller failed to find any register. */
+
+ if (fixed_regs[regno] || TEST_HARD_REG_BIT (forbidden_regs, regno))
+ fatal ("fixed or forbidden register was spilled.\n\
+This may be due to a compiler bug or to impossible asm\n\
+statements or clauses.");
+
+ /* Make reg REGNO an additional reload reg. */
+
+ potential_reload_regs[i] = -1;
+ spill_regs[n_spills] = regno;
+ spill_reg_order[regno] = n_spills;
+ if (dumpfile)
+ fprintf (dumpfile, "Spilling reg %d.\n", spill_regs[n_spills]);
+
+ /* Clear off the needs we just satisfied. */
+
+ max_needs[class]--;
+ p = reg_class_superclasses[class];
+ while (*p != LIM_REG_CLASSES)
+ max_needs[(int) *p++]--;
+
+ if (max_nongroups && max_nongroups[class] > 0)
+ {
+ SET_HARD_REG_BIT (counted_for_nongroups, regno);
+ max_nongroups[class]--;
+ p = reg_class_superclasses[class];
+ while (*p != LIM_REG_CLASSES)
+ max_nongroups[(int) *p++]--;
+ }
+
+ /* Spill every pseudo reg that was allocated to this reg
+ or to something that overlaps this reg. */
+
+ val = spill_hard_reg (spill_regs[n_spills], global, dumpfile, 0);
+
+ /* If there are some registers still to eliminate and this register
+ wasn't ever used before, additional stack space may have to be
+ allocated to store this register. Thus, we may have changed the offset
+ between the stack and frame pointers, so mark that something has changed.
+ (If new pseudos were spilled, thus requiring more space, VAL would have
+ been set non-zero by the call to spill_hard_reg above since additional
+ reloads may be needed in that case.
+
+ One might think that we need only set VAL to 1 if this is a call-used
+ register. However, the set of registers that must be saved by the
+ prologue is not identical to the call-used set. For example, the
+ register used by the call insn for the return PC is a call-used register,
+ but must be saved by the prologue. */
+ if (num_eliminable && ! regs_ever_live[spill_regs[n_spills]])
+ val = 1;
+
+ regs_ever_live[spill_regs[n_spills]] = 1;
+ n_spills++;
+
+ return val;
+}
+
+/* Delete an unneeded INSN and any previous insns who sole purpose is loading
+ data that is dead in INSN. */
+
+static void
+delete_dead_insn (insn)
+ rtx insn;
+{
+ rtx prev = prev_real_insn (insn);
+ rtx prev_dest;
+
+ /* If the previous insn sets a register that dies in our insn, delete it
+ too. */
+ if (prev && GET_CODE (PATTERN (prev)) == SET
+ && (prev_dest = SET_DEST (PATTERN (prev)), GET_CODE (prev_dest) == REG)
+ && reg_mentioned_p (prev_dest, PATTERN (insn))
+ && find_regno_note (insn, REG_DEAD, REGNO (prev_dest)))
+ delete_dead_insn (prev);
+
+ PUT_CODE (insn, NOTE);
+ NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (insn) = 0;
+}
+
+/* Modify the home of pseudo-reg I.
+ The new home is present in reg_renumber[I].
+
+ FROM_REG may be the hard reg that the pseudo-reg is being spilled from;
+ or it may be -1, meaning there is none or it is not relevant.
+ This is used so that all pseudos spilled from a given hard reg
+ can share one stack slot. */
+
+static void
+alter_reg (i, from_reg)
+ register int i;
+ int from_reg;
+{
+ /* When outputting an inline function, this can happen
+ for a reg that isn't actually used. */
+ if (regno_reg_rtx[i] == 0)
+ return;
+
+ /* If the reg got changed to a MEM at rtl-generation time,
+ ignore it. */
+ if (GET_CODE (regno_reg_rtx[i]) != REG)
+ return;
+
+ /* Modify the reg-rtx to contain the new hard reg
+ number or else to contain its pseudo reg number. */
+ REGNO (regno_reg_rtx[i])
+ = reg_renumber[i] >= 0 ? reg_renumber[i] : i;
+
+ /* If we have a pseudo that is needed but has no hard reg or equivalent,
+ allocate a stack slot for it. */
+
+ if (reg_renumber[i] < 0
+ && reg_n_refs[i] > 0
+ && reg_equiv_constant[i] == 0
+ && reg_equiv_memory_loc[i] == 0)
+ {
+ register rtx x;
+ int inherent_size = PSEUDO_REGNO_BYTES (i);
+ int total_size = MAX (inherent_size, reg_max_ref_width[i]);
+ int adjust = 0;
+
+ /* Each pseudo reg has an inherent size which comes from its own mode,
+ and a total size which provides room for paradoxical subregs
+ which refer to the pseudo reg in wider modes.
+
+ We can use a slot already allocated if it provides both
+ enough inherent space and enough total space.
+ Otherwise, we allocate a new slot, making sure that it has no less
+ inherent space, and no less total space, then the previous slot. */
+ if (from_reg == -1)
+ {
+ /* No known place to spill from => no slot to reuse. */
+ x = assign_stack_local (GET_MODE (regno_reg_rtx[i]), total_size, -1);
+#if BYTES_BIG_ENDIAN
+ /* Cancel the big-endian correction done in assign_stack_local.
+ Get the address of the beginning of the slot.
+ This is so we can do a big-endian correction unconditionally
+ below. */
+ adjust = inherent_size - total_size;
+#endif
+ }
+ /* Reuse a stack slot if possible. */
+ else if (spill_stack_slot[from_reg] != 0
+ && spill_stack_slot_width[from_reg] >= total_size
+ && (GET_MODE_SIZE (GET_MODE (spill_stack_slot[from_reg]))
+ >= inherent_size))
+ x = spill_stack_slot[from_reg];
+ /* Allocate a bigger slot. */
+ else
+ {
+ /* Compute maximum size needed, both for inherent size
+ and for total size. */
+ enum machine_mode mode = GET_MODE (regno_reg_rtx[i]);
+ if (spill_stack_slot[from_reg])
+ {
+ if (GET_MODE_SIZE (GET_MODE (spill_stack_slot[from_reg]))
+ > inherent_size)
+ mode = GET_MODE (spill_stack_slot[from_reg]);
+ if (spill_stack_slot_width[from_reg] > total_size)
+ total_size = spill_stack_slot_width[from_reg];
+ }
+ /* Make a slot with that size. */
+ x = assign_stack_local (mode, total_size, -1);
+#if BYTES_BIG_ENDIAN
+ /* Cancel the big-endian correction done in assign_stack_local.
+ Get the address of the beginning of the slot.
+ This is so we can do a big-endian correction unconditionally
+ below. */
+ adjust = GET_MODE_SIZE (mode) - total_size;
+#endif
+ spill_stack_slot[from_reg] = x;
+ spill_stack_slot_width[from_reg] = total_size;
+ }
+
+#if BYTES_BIG_ENDIAN
+ /* On a big endian machine, the "address" of the slot
+ is the address of the low part that fits its inherent mode. */
+ if (inherent_size < total_size)
+ adjust += (total_size - inherent_size);
+#endif /* BYTES_BIG_ENDIAN */
+
+ /* If we have any adjustment to make, or if the stack slot is the
+ wrong mode, make a new stack slot. */
+ if (adjust != 0 || GET_MODE (x) != GET_MODE (regno_reg_rtx[i]))
+ {
+ x = gen_rtx (MEM, GET_MODE (regno_reg_rtx[i]),
+ plus_constant (XEXP (x, 0), adjust));
+ RTX_UNCHANGING_P (x) = RTX_UNCHANGING_P (regno_reg_rtx[i]);
+ }
+
+ /* Save the stack slot for later. */
+ reg_equiv_memory_loc[i] = x;
+ }
+}
+
+/* Mark the slots in regs_ever_live for the hard regs
+ used by pseudo-reg number REGNO. */
+
+void
+mark_home_live (regno)
+ int regno;
+{
+ register int i, lim;
+ i = reg_renumber[regno];
+ if (i < 0)
+ return;
+ lim = i + HARD_REGNO_NREGS (i, PSEUDO_REGNO_MODE (regno));
+ while (i < lim)
+ regs_ever_live[i++] = 1;
+}
+
+/* Mark the registers used in SCRATCH as being live. */
+
+static void
+mark_scratch_live (scratch)
+ rtx scratch;
+{
+ register int i;
+ int regno = REGNO (scratch);
+ int lim = regno + HARD_REGNO_NREGS (regno, GET_MODE (scratch));
+
+ for (i = regno; i < lim; i++)
+ regs_ever_live[i] = 1;
+}
+
+/* This function handles the tracking of elimination offsets around branches.
+
+ X is a piece of RTL being scanned.
+
+ INSN is the insn that it came from, if any.
+
+ INITIAL_P is non-zero if we are to set the offset to be the initial
+ offset and zero if we are setting the offset of the label to be the
+ current offset. */
+
+static void
+set_label_offsets (x, insn, initial_p)
+ rtx x;
+ rtx insn;
+ int initial_p;
+{
+ enum rtx_code code = GET_CODE (x);
+ rtx tem;
+ int i;
+ struct elim_table *p;
+
+ switch (code)
+ {
+ case LABEL_REF:
+ if (LABEL_REF_NONLOCAL_P (x))
+ return;
+
+ x = XEXP (x, 0);
+
+ /* ... fall through ... */
+
+ case CODE_LABEL:
+ /* If we know nothing about this label, set the desired offsets. Note
+ that this sets the offset at a label to be the offset before a label
+ if we don't know anything about the label. This is not correct for
+ the label after a BARRIER, but is the best guess we can make. If
+ we guessed wrong, we will suppress an elimination that might have
+ been possible had we been able to guess correctly. */
+
+ if (! offsets_known_at[CODE_LABEL_NUMBER (x)])
+ {
+ for (i = 0; i < NUM_ELIMINABLE_REGS; i++)
+ offsets_at[CODE_LABEL_NUMBER (x)][i]
+ = (initial_p ? reg_eliminate[i].initial_offset
+ : reg_eliminate[i].offset);
+ offsets_known_at[CODE_LABEL_NUMBER (x)] = 1;
+ }
+
+ /* Otherwise, if this is the definition of a label and it is
+ preceded by a BARRIER, set our offsets to the known offset of
+ that label. */
+
+ else if (x == insn
+ && (tem = prev_nonnote_insn (insn)) != 0
+ && GET_CODE (tem) == BARRIER)
+ {
+ num_not_at_initial_offset = 0;
+ for (i = 0; i < NUM_ELIMINABLE_REGS; i++)
+ {
+ reg_eliminate[i].offset = reg_eliminate[i].previous_offset
+ = offsets_at[CODE_LABEL_NUMBER (x)][i];
+ if (reg_eliminate[i].can_eliminate
+ && (reg_eliminate[i].offset
+ != reg_eliminate[i].initial_offset))
+ num_not_at_initial_offset++;
+ }
+ }
+
+ else
+ /* If neither of the above cases is true, compare each offset
+ with those previously recorded and suppress any eliminations
+ where the offsets disagree. */
+
+ for (i = 0; i < NUM_ELIMINABLE_REGS; i++)
+ if (offsets_at[CODE_LABEL_NUMBER (x)][i]
+ != (initial_p ? reg_eliminate[i].initial_offset
+ : reg_eliminate[i].offset))
+ reg_eliminate[i].can_eliminate = 0;
+
+ return;
+
+ case JUMP_INSN:
+ set_label_offsets (PATTERN (insn), insn, initial_p);
+
+ /* ... fall through ... */
+
+ case INSN:
+ case CALL_INSN:
+ /* Any labels mentioned in REG_LABEL notes can be branched to indirectly
+ and hence must have all eliminations at their initial offsets. */
+ for (tem = REG_NOTES (x); tem; tem = XEXP (tem, 1))
+ if (REG_NOTE_KIND (tem) == REG_LABEL)
+ set_label_offsets (XEXP (tem, 0), insn, 1);
+ return;
+
+ case ADDR_VEC:
+ case ADDR_DIFF_VEC:
+ /* Each of the labels in the address vector must be at their initial
+ offsets. We want the first first for ADDR_VEC and the second
+ field for ADDR_DIFF_VEC. */
+
+ for (i = 0; i < XVECLEN (x, code == ADDR_DIFF_VEC); i++)
+ set_label_offsets (XVECEXP (x, code == ADDR_DIFF_VEC, i),
+ insn, initial_p);
+ return;
+
+ case SET:
+ /* We only care about setting PC. If the source is not RETURN,
+ IF_THEN_ELSE, or a label, disable any eliminations not at
+ their initial offsets. Similarly if any arm of the IF_THEN_ELSE
+ isn't one of those possibilities. For branches to a label,
+ call ourselves recursively.
+
+ Note that this can disable elimination unnecessarily when we have
+ a non-local goto since it will look like a non-constant jump to
+ someplace in the current function. This isn't a significant
+ problem since such jumps will normally be when all elimination
+ pairs are back to their initial offsets. */
+
+ if (SET_DEST (x) != pc_rtx)
+ return;
+
+ switch (GET_CODE (SET_SRC (x)))
+ {
+ case PC:
+ case RETURN:
+ return;
+
+ case LABEL_REF:
+ set_label_offsets (XEXP (SET_SRC (x), 0), insn, initial_p);
+ return;
+
+ case IF_THEN_ELSE:
+ tem = XEXP (SET_SRC (x), 1);
+ if (GET_CODE (tem) == LABEL_REF)
+ set_label_offsets (XEXP (tem, 0), insn, initial_p);
+ else if (GET_CODE (tem) != PC && GET_CODE (tem) != RETURN)
+ break;
+
+ tem = XEXP (SET_SRC (x), 2);
+ if (GET_CODE (tem) == LABEL_REF)
+ set_label_offsets (XEXP (tem, 0), insn, initial_p);
+ else if (GET_CODE (tem) != PC && GET_CODE (tem) != RETURN)
+ break;
+ return;
+ }
+
+ /* If we reach here, all eliminations must be at their initial
+ offset because we are doing a jump to a variable address. */
+ for (p = reg_eliminate; p < &reg_eliminate[NUM_ELIMINABLE_REGS]; p++)
+ if (p->offset != p->initial_offset)
+ p->can_eliminate = 0;
+ }
+}
+
+/* Used for communication between the next two function to properly share
+ the vector for an ASM_OPERANDS. */
+
+static struct rtvec_def *old_asm_operands_vec, *new_asm_operands_vec;
+
+/* Scan X and replace any eliminable registers (such as fp) with a
+ replacement (such as sp), plus an offset.
+
+ MEM_MODE is the mode of an enclosing MEM. We need this to know how
+ much to adjust a register for, e.g., PRE_DEC. Also, if we are inside a
+ MEM, we are allowed to replace a sum of a register and the constant zero
+ with the register, which we cannot do outside a MEM. In addition, we need
+ to record the fact that a register is referenced outside a MEM.
+
+ If INSN is an insn, it is the insn containing X. If we replace a REG
+ in a SET_DEST with an equivalent MEM and INSN is non-zero, write a
+ CLOBBER of the pseudo after INSN so find_equiv_regs will know that
+ that the REG is being modified.
+
+ Alternatively, INSN may be a note (an EXPR_LIST or INSN_LIST).
+ That's used when we eliminate in expressions stored in notes.
+ This means, do not set ref_outside_mem even if the reference
+ is outside of MEMs.
+
+ If we see a modification to a register we know about, take the
+ appropriate action (see case SET, below).
+
+ REG_EQUIV_MEM and REG_EQUIV_ADDRESS contain address that have had
+ replacements done assuming all offsets are at their initial values. If
+ they are not, or if REG_EQUIV_ADDRESS is nonzero for a pseudo we
+ encounter, return the actual location so that find_reloads will do
+ the proper thing. */
+
+rtx
+eliminate_regs (x, mem_mode, insn)
+ rtx x;
+ enum machine_mode mem_mode;
+ rtx insn;
+{
+ enum rtx_code code = GET_CODE (x);
+ struct elim_table *ep;
+ int regno;
+ rtx new;
+ int i, j;
+ char *fmt;
+ int copied = 0;
+
+ switch (code)
+ {
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case CONST:
+ case SYMBOL_REF:
+ case CODE_LABEL:
+ case PC:
+ case CC0:
+ case ASM_INPUT:
+ case ADDR_VEC:
+ case ADDR_DIFF_VEC:
+ case RETURN:
+ return x;
+
+ case REG:
+ regno = REGNO (x);
+
+ /* First handle the case where we encounter a bare register that
+ is eliminable. Replace it with a PLUS. */
+ if (regno < FIRST_PSEUDO_REGISTER)
+ {
+ for (ep = reg_eliminate; ep < &reg_eliminate[NUM_ELIMINABLE_REGS];
+ ep++)
+ if (ep->from_rtx == x && ep->can_eliminate)
+ {
+ if (! mem_mode
+ /* Refs inside notes don't count for this purpose. */
+ && ! (insn != 0 && (GET_CODE (insn) == EXPR_LIST
+ || GET_CODE (insn) == INSN_LIST)))
+ ep->ref_outside_mem = 1;
+ return plus_constant (ep->to_rtx, ep->previous_offset);
+ }
+
+ }
+ else if (reg_equiv_memory_loc && reg_equiv_memory_loc[regno]
+ && (reg_equiv_address[regno] || num_not_at_initial_offset))
+ {
+ /* In this case, find_reloads would attempt to either use an
+ incorrect address (if something is not at its initial offset)
+ or substitute an replaced address into an insn (which loses
+ if the offset is changed by some later action). So we simply
+ return the replaced stack slot (assuming it is changed by
+ elimination) and ignore the fact that this is actually a
+ reference to the pseudo. Ensure we make a copy of the
+ address in case it is shared. */
+ new = eliminate_regs (reg_equiv_memory_loc[regno],
+ mem_mode, insn);
+ if (new != reg_equiv_memory_loc[regno])
+ {
+ cannot_omit_stores[regno] = 1;
+ return copy_rtx (new);
+ }
+ }
+ return x;
+
+ case PLUS:
+ /* If this is the sum of an eliminable register and a constant, rework
+ the sum. */
+ if (GET_CODE (XEXP (x, 0)) == REG
+ && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
+ && CONSTANT_P (XEXP (x, 1)))
+ {
+ for (ep = reg_eliminate; ep < &reg_eliminate[NUM_ELIMINABLE_REGS];
+ ep++)
+ if (ep->from_rtx == XEXP (x, 0) && ep->can_eliminate)
+ {
+ if (! mem_mode
+ /* Refs inside notes don't count for this purpose. */
+ && ! (insn != 0 && (GET_CODE (insn) == EXPR_LIST
+ || GET_CODE (insn) == INSN_LIST)))
+ ep->ref_outside_mem = 1;
+
+ /* The only time we want to replace a PLUS with a REG (this
+ occurs when the constant operand of the PLUS is the negative
+ of the offset) is when we are inside a MEM. We won't want
+ to do so at other times because that would change the
+ structure of the insn in a way that reload can't handle.
+ We special-case the commonest situation in
+ eliminate_regs_in_insn, so just replace a PLUS with a
+ PLUS here, unless inside a MEM. */
+ if (mem_mode != 0 && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && INTVAL (XEXP (x, 1)) == - ep->previous_offset)
+ return ep->to_rtx;
+ else
+ return gen_rtx (PLUS, Pmode, ep->to_rtx,
+ plus_constant (XEXP (x, 1),
+ ep->previous_offset));
+ }
+
+ /* If the register is not eliminable, we are done since the other
+ operand is a constant. */
+ return x;
+ }
+
+ /* If this is part of an address, we want to bring any constant to the
+ outermost PLUS. We will do this by doing register replacement in
+ our operands and seeing if a constant shows up in one of them.
+
+ We assume here this is part of an address (or a "load address" insn)
+ since an eliminable register is not likely to appear in any other
+ context.
+
+ If we have (plus (eliminable) (reg)), we want to produce
+ (plus (plus (replacement) (reg) (const))). If this was part of a
+ normal add insn, (plus (replacement) (reg)) will be pushed as a
+ reload. This is the desired action. */
+
+ {
+ rtx new0 = eliminate_regs (XEXP (x, 0), mem_mode, insn);
+ rtx new1 = eliminate_regs (XEXP (x, 1), mem_mode, insn);
+
+ if (new0 != XEXP (x, 0) || new1 != XEXP (x, 1))
+ {
+ /* If one side is a PLUS and the other side is a pseudo that
+ didn't get a hard register but has a reg_equiv_constant,
+ we must replace the constant here since it may no longer
+ be in the position of any operand. */
+ if (GET_CODE (new0) == PLUS && GET_CODE (new1) == REG
+ && REGNO (new1) >= FIRST_PSEUDO_REGISTER
+ && reg_renumber[REGNO (new1)] < 0
+ && reg_equiv_constant != 0
+ && reg_equiv_constant[REGNO (new1)] != 0)
+ new1 = reg_equiv_constant[REGNO (new1)];
+ else if (GET_CODE (new1) == PLUS && GET_CODE (new0) == REG
+ && REGNO (new0) >= FIRST_PSEUDO_REGISTER
+ && reg_renumber[REGNO (new0)] < 0
+ && reg_equiv_constant[REGNO (new0)] != 0)
+ new0 = reg_equiv_constant[REGNO (new0)];
+
+ new = form_sum (new0, new1);
+
+ /* As above, if we are not inside a MEM we do not want to
+ turn a PLUS into something else. We might try to do so here
+ for an addition of 0 if we aren't optimizing. */
+ if (! mem_mode && GET_CODE (new) != PLUS)
+ return gen_rtx (PLUS, GET_MODE (x), new, const0_rtx);
+ else
+ return new;
+ }
+ }
+ return x;
+
+ case MULT:
+ /* If this is the product of an eliminable register and a
+ constant, apply the distribute law and move the constant out
+ so that we have (plus (mult ..) ..). This is needed in order
+ to keep load-address insns valid. This case is pathalogical.
+ We ignore the possibility of overflow here. */
+ if (GET_CODE (XEXP (x, 0)) == REG
+ && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
+ && GET_CODE (XEXP (x, 1)) == CONST_INT)
+ for (ep = reg_eliminate; ep < &reg_eliminate[NUM_ELIMINABLE_REGS];
+ ep++)
+ if (ep->from_rtx == XEXP (x, 0) && ep->can_eliminate)
+ {
+ if (! mem_mode
+ /* Refs inside notes don't count for this purpose. */
+ && ! (insn != 0 && (GET_CODE (insn) == EXPR_LIST
+ || GET_CODE (insn) == INSN_LIST)))
+ ep->ref_outside_mem = 1;
+
+ return
+ plus_constant (gen_rtx (MULT, Pmode, ep->to_rtx, XEXP (x, 1)),
+ ep->previous_offset * INTVAL (XEXP (x, 1)));
+ }
+
+ /* ... fall through ... */
+
+ case CALL:
+ case COMPARE:
+ case MINUS:
+ case DIV: case UDIV:
+ case MOD: case UMOD:
+ case AND: case IOR: case XOR:
+ case ROTATERT: case ROTATE:
+ case ASHIFTRT: case LSHIFTRT: case ASHIFT:
+ case NE: case EQ:
+ case GE: case GT: case GEU: case GTU:
+ case LE: case LT: case LEU: case LTU:
+ {
+ rtx new0 = eliminate_regs (XEXP (x, 0), mem_mode, insn);
+ rtx new1
+ = XEXP (x, 1) ? eliminate_regs (XEXP (x, 1), mem_mode, insn) : 0;
+
+ if (new0 != XEXP (x, 0) || new1 != XEXP (x, 1))
+ return gen_rtx (code, GET_MODE (x), new0, new1);
+ }
+ return x;
+
+ case EXPR_LIST:
+ /* If we have something in XEXP (x, 0), the usual case, eliminate it. */
+ if (XEXP (x, 0))
+ {
+ new = eliminate_regs (XEXP (x, 0), mem_mode, insn);
+ if (new != XEXP (x, 0))
+ x = gen_rtx (EXPR_LIST, REG_NOTE_KIND (x), new, XEXP (x, 1));
+ }
+
+ /* ... fall through ... */
+
+ case INSN_LIST:
+ /* Now do eliminations in the rest of the chain. If this was
+ an EXPR_LIST, this might result in allocating more memory than is
+ strictly needed, but it simplifies the code. */
+ if (XEXP (x, 1))
+ {
+ new = eliminate_regs (XEXP (x, 1), mem_mode, insn);
+ if (new != XEXP (x, 1))
+ return gen_rtx (GET_CODE (x), GET_MODE (x), XEXP (x, 0), new);
+ }
+ return x;
+
+ case PRE_INC:
+ case POST_INC:
+ case PRE_DEC:
+ case POST_DEC:
+ for (ep = reg_eliminate; ep < &reg_eliminate[NUM_ELIMINABLE_REGS]; ep++)
+ if (ep->to_rtx == XEXP (x, 0))
+ {
+ int size = GET_MODE_SIZE (mem_mode);
+
+ /* If more bytes than MEM_MODE are pushed, account for them. */
+#ifdef PUSH_ROUNDING
+ if (ep->to_rtx == stack_pointer_rtx)
+ size = PUSH_ROUNDING (size);
+#endif
+ if (code == PRE_DEC || code == POST_DEC)
+ ep->offset += size;
+ else
+ ep->offset -= size;
+ }
+
+ /* Fall through to generic unary operation case. */
+ case USE:
+ case STRICT_LOW_PART:
+ case NEG: case NOT:
+ case SIGN_EXTEND: case ZERO_EXTEND:
+ case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE:
+ case FLOAT: case FIX:
+ case UNSIGNED_FIX: case UNSIGNED_FLOAT:
+ case ABS:
+ case SQRT:
+ case FFS:
+ new = eliminate_regs (XEXP (x, 0), mem_mode, insn);
+ if (new != XEXP (x, 0))
+ return gen_rtx (code, GET_MODE (x), new);
+ return x;
+
+ case SUBREG:
+ /* Similar to above processing, but preserve SUBREG_WORD.
+ Convert (subreg (mem)) to (mem) if not paradoxical.
+ Also, if we have a non-paradoxical (subreg (pseudo)) and the
+ pseudo didn't get a hard reg, we must replace this with the
+ eliminated version of the memory location because push_reloads
+ may do the replacement in certain circumstances. */
+ if (GET_CODE (SUBREG_REG (x)) == REG
+ && (GET_MODE_SIZE (GET_MODE (x))
+ <= GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
+ && reg_equiv_memory_loc != 0
+ && reg_equiv_memory_loc[REGNO (SUBREG_REG (x))] != 0)
+ {
+ new = eliminate_regs (reg_equiv_memory_loc[REGNO (SUBREG_REG (x))],
+ mem_mode, insn);
+
+ /* If we didn't change anything, we must retain the pseudo. */
+ if (new == reg_equiv_memory_loc[REGNO (SUBREG_REG (x))])
+ new = XEXP (x, 0);
+ else
+ /* Otherwise, ensure NEW isn't shared in case we have to reload
+ it. */
+ new = copy_rtx (new);
+ }
+ else
+ new = eliminate_regs (SUBREG_REG (x), mem_mode, insn);
+
+ if (new != XEXP (x, 0))
+ {
+ if (GET_CODE (new) == MEM
+ && (GET_MODE_SIZE (GET_MODE (x))
+ <= GET_MODE_SIZE (GET_MODE (new)))
+#ifdef LOAD_EXTEND_OP
+ /* On these machines we will be reloading what is
+ inside the SUBREG if it originally was a pseudo and
+ the inner and outer modes are both a word or
+ smaller. So leave the SUBREG then. */
+ && ! (GET_CODE (SUBREG_REG (x)) == REG
+ && GET_MODE_SIZE (GET_MODE (x)) <= UNITS_PER_WORD
+ && GET_MODE_SIZE (GET_MODE (new)) <= UNITS_PER_WORD)
+#endif
+ )
+ {
+ int offset = SUBREG_WORD (x) * UNITS_PER_WORD;
+ enum machine_mode mode = GET_MODE (x);
+
+#if BYTES_BIG_ENDIAN
+ offset += (MIN (UNITS_PER_WORD,
+ GET_MODE_SIZE (GET_MODE (new)))
+ - MIN (UNITS_PER_WORD, GET_MODE_SIZE (mode)));
+#endif
+
+ PUT_MODE (new, mode);
+ XEXP (new, 0) = plus_constant (XEXP (new, 0), offset);
+ return new;
+ }
+ else
+ return gen_rtx (SUBREG, GET_MODE (x), new, SUBREG_WORD (x));
+ }
+
+ return x;
+
+ case CLOBBER:
+ /* If clobbering a register that is the replacement register for an
+ elimination we still think can be performed, note that it cannot
+ be performed. Otherwise, we need not be concerned about it. */
+ for (ep = reg_eliminate; ep < &reg_eliminate[NUM_ELIMINABLE_REGS]; ep++)
+ if (ep->to_rtx == XEXP (x, 0))
+ ep->can_eliminate = 0;
+
+ new = eliminate_regs (XEXP (x, 0), mem_mode, insn);
+ if (new != XEXP (x, 0))
+ return gen_rtx (code, GET_MODE (x), new);
+ return x;
+
+ case ASM_OPERANDS:
+ {
+ rtx *temp_vec;
+ /* Properly handle sharing input and constraint vectors. */
+ if (ASM_OPERANDS_INPUT_VEC (x) != old_asm_operands_vec)
+ {
+ /* When we come to a new vector not seen before,
+ scan all its elements; keep the old vector if none
+ of them changes; otherwise, make a copy. */
+ old_asm_operands_vec = ASM_OPERANDS_INPUT_VEC (x);
+ temp_vec = (rtx *) alloca (XVECLEN (x, 3) * sizeof (rtx));
+ for (i = 0; i < ASM_OPERANDS_INPUT_LENGTH (x); i++)
+ temp_vec[i] = eliminate_regs (ASM_OPERANDS_INPUT (x, i),
+ mem_mode, insn);
+
+ for (i = 0; i < ASM_OPERANDS_INPUT_LENGTH (x); i++)
+ if (temp_vec[i] != ASM_OPERANDS_INPUT (x, i))
+ break;
+
+ if (i == ASM_OPERANDS_INPUT_LENGTH (x))
+ new_asm_operands_vec = old_asm_operands_vec;
+ else
+ new_asm_operands_vec
+ = gen_rtvec_v (ASM_OPERANDS_INPUT_LENGTH (x), temp_vec);
+ }
+
+ /* If we had to copy the vector, copy the entire ASM_OPERANDS. */
+ if (new_asm_operands_vec == old_asm_operands_vec)
+ return x;
+
+ new = gen_rtx (ASM_OPERANDS, VOIDmode, ASM_OPERANDS_TEMPLATE (x),
+ ASM_OPERANDS_OUTPUT_CONSTRAINT (x),
+ ASM_OPERANDS_OUTPUT_IDX (x), new_asm_operands_vec,
+ ASM_OPERANDS_INPUT_CONSTRAINT_VEC (x),
+ ASM_OPERANDS_SOURCE_FILE (x),
+ ASM_OPERANDS_SOURCE_LINE (x));
+ new->volatil = x->volatil;
+ return new;
+ }
+
+ case SET:
+ /* Check for setting a register that we know about. */
+ if (GET_CODE (SET_DEST (x)) == REG)
+ {
+ /* See if this is setting the replacement register for an
+ elimination.
+
+ If DEST is the hard frame pointer, we do nothing because we
+ assume that all assignments to the frame pointer are for
+ non-local gotos and are being done at a time when they are valid
+ and do not disturb anything else. Some machines want to
+ eliminate a fake argument pointer (or even a fake frame pointer)
+ with either the real frame or the stack pointer. Assignments to
+ the hard frame pointer must not prevent this elimination. */
+
+ for (ep = reg_eliminate; ep < &reg_eliminate[NUM_ELIMINABLE_REGS];
+ ep++)
+ if (ep->to_rtx == SET_DEST (x)
+ && SET_DEST (x) != hard_frame_pointer_rtx)
+ {
+ /* If it is being incremented, adjust the offset. Otherwise,
+ this elimination can't be done. */
+ rtx src = SET_SRC (x);
+
+ if (GET_CODE (src) == PLUS
+ && XEXP (src, 0) == SET_DEST (x)
+ && GET_CODE (XEXP (src, 1)) == CONST_INT)
+ ep->offset -= INTVAL (XEXP (src, 1));
+ else
+ ep->can_eliminate = 0;
+ }
+
+ /* Now check to see we are assigning to a register that can be
+ eliminated. If so, it must be as part of a PARALLEL, since we
+ will not have been called if this is a single SET. So indicate
+ that we can no longer eliminate this reg. */
+ for (ep = reg_eliminate; ep < &reg_eliminate[NUM_ELIMINABLE_REGS];
+ ep++)
+ if (ep->from_rtx == SET_DEST (x) && ep->can_eliminate)
+ ep->can_eliminate = 0;
+ }
+
+ /* Now avoid the loop below in this common case. */
+ {
+ rtx new0 = eliminate_regs (SET_DEST (x), 0, insn);
+ rtx new1 = eliminate_regs (SET_SRC (x), 0, insn);
+
+ /* If SET_DEST changed from a REG to a MEM and INSN is an insn,
+ write a CLOBBER insn. */
+ if (GET_CODE (SET_DEST (x)) == REG && GET_CODE (new0) == MEM
+ && insn != 0 && GET_CODE (insn) != EXPR_LIST
+ && GET_CODE (insn) != INSN_LIST)
+ emit_insn_after (gen_rtx (CLOBBER, VOIDmode, SET_DEST (x)), insn);
+
+ if (new0 != SET_DEST (x) || new1 != SET_SRC (x))
+ return gen_rtx (SET, VOIDmode, new0, new1);
+ }
+
+ return x;
+
+ case MEM:
+ /* Our only special processing is to pass the mode of the MEM to our
+ recursive call and copy the flags. While we are here, handle this
+ case more efficiently. */
+ new = eliminate_regs (XEXP (x, 0), GET_MODE (x), insn);
+ if (new != XEXP (x, 0))
+ {
+ new = gen_rtx (MEM, GET_MODE (x), new);
+ new->volatil = x->volatil;
+ new->unchanging = x->unchanging;
+ new->in_struct = x->in_struct;
+ return new;
+ }
+ else
+ return x;
+ }
+
+ /* Process each of our operands recursively. If any have changed, make a
+ copy of the rtx. */
+ fmt = GET_RTX_FORMAT (code);
+ for (i = 0; i < GET_RTX_LENGTH (code); i++, fmt++)
+ {
+ if (*fmt == 'e')
+ {
+ new = eliminate_regs (XEXP (x, i), mem_mode, insn);
+ if (new != XEXP (x, i) && ! copied)
+ {
+ rtx new_x = rtx_alloc (code);
+ bcopy ((char *) x, (char *) new_x,
+ (sizeof (*new_x) - sizeof (new_x->fld)
+ + sizeof (new_x->fld[0]) * GET_RTX_LENGTH (code)));
+ x = new_x;
+ copied = 1;
+ }
+ XEXP (x, i) = new;
+ }
+ else if (*fmt == 'E')
+ {
+ int copied_vec = 0;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ {
+ new = eliminate_regs (XVECEXP (x, i, j), mem_mode, insn);
+ if (new != XVECEXP (x, i, j) && ! copied_vec)
+ {
+ rtvec new_v = gen_rtvec_v (XVECLEN (x, i),
+ &XVECEXP (x, i, 0));
+ if (! copied)
+ {
+ rtx new_x = rtx_alloc (code);
+ bcopy ((char *) x, (char *) new_x,
+ (sizeof (*new_x) - sizeof (new_x->fld)
+ + (sizeof (new_x->fld[0])
+ * GET_RTX_LENGTH (code))));
+ x = new_x;
+ copied = 1;
+ }
+ XVEC (x, i) = new_v;
+ copied_vec = 1;
+ }
+ XVECEXP (x, i, j) = new;
+ }
+ }
+ }
+
+ return x;
+}
+
+/* Scan INSN and eliminate all eliminable registers in it.
+
+ If REPLACE is nonzero, do the replacement destructively. Also
+ delete the insn as dead it if it is setting an eliminable register.
+
+ If REPLACE is zero, do all our allocations in reload_obstack.
+
+ If no eliminations were done and this insn doesn't require any elimination
+ processing (these are not identical conditions: it might be updating sp,
+ but not referencing fp; this needs to be seen during reload_as_needed so
+ that the offset between fp and sp can be taken into consideration), zero
+ is returned. Otherwise, 1 is returned. */
+
+static int
+eliminate_regs_in_insn (insn, replace)
+ rtx insn;
+ int replace;
+{
+ rtx old_body = PATTERN (insn);
+ rtx old_set = single_set (insn);
+ rtx new_body;
+ int val = 0;
+ struct elim_table *ep;
+
+ if (! replace)
+ push_obstacks (&reload_obstack, &reload_obstack);
+
+ if (old_set != 0 && GET_CODE (SET_DEST (old_set)) == REG
+ && REGNO (SET_DEST (old_set)) < FIRST_PSEUDO_REGISTER)
+ {
+ /* Check for setting an eliminable register. */
+ for (ep = reg_eliminate; ep < &reg_eliminate[NUM_ELIMINABLE_REGS]; ep++)
+ if (ep->from_rtx == SET_DEST (old_set) && ep->can_eliminate)
+ {
+#if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
+ /* If this is setting the frame pointer register to the
+ hardware frame pointer register and this is an elimination
+ that will be done (tested above), this insn is really
+ adjusting the frame pointer downward to compensate for
+ the adjustment done before a nonlocal goto. */
+ if (ep->from == FRAME_POINTER_REGNUM
+ && ep->to == HARD_FRAME_POINTER_REGNUM)
+ {
+ rtx src = SET_SRC (old_set);
+ int offset, ok = 0;
+
+ if (src == ep->to_rtx)
+ offset = 0, ok = 1;
+ else if (GET_CODE (src) == PLUS
+ && GET_CODE (XEXP (src, 0)) == CONST_INT)
+ offset = INTVAL (XEXP (src, 0)), ok = 1;
+
+ if (ok)
+ {
+ if (replace)
+ {
+ rtx src
+ = plus_constant (ep->to_rtx, offset - ep->offset);
+
+ /* First see if this insn remains valid when we
+ make the change. If not, keep the INSN_CODE
+ the same and let reload fit it up. */
+ validate_change (insn, &SET_SRC (old_set), src, 1);
+ validate_change (insn, &SET_DEST (old_set),
+ ep->to_rtx, 1);
+ if (! apply_change_group ())
+ {
+ SET_SRC (old_set) = src;
+ SET_DEST (old_set) = ep->to_rtx;
+ }
+ }
+
+ val = 1;
+ goto done;
+ }
+ }
+#endif
+
+ /* In this case this insn isn't serving a useful purpose. We
+ will delete it in reload_as_needed once we know that this
+ elimination is, in fact, being done.
+
+ If REPLACE isn't set, we can't delete this insn, but neededn't
+ process it since it won't be used unless something changes. */
+ if (replace)
+ delete_dead_insn (insn);
+ val = 1;
+ goto done;
+ }
+
+ /* Check for (set (reg) (plus (reg from) (offset))) where the offset
+ in the insn is the negative of the offset in FROM. Substitute
+ (set (reg) (reg to)) for the insn and change its code.
+
+ We have to do this here, rather than in eliminate_regs, do that we can
+ change the insn code. */
+
+ if (GET_CODE (SET_SRC (old_set)) == PLUS
+ && GET_CODE (XEXP (SET_SRC (old_set), 0)) == REG
+ && GET_CODE (XEXP (SET_SRC (old_set), 1)) == CONST_INT)
+ for (ep = reg_eliminate; ep < &reg_eliminate[NUM_ELIMINABLE_REGS];
+ ep++)
+ if (ep->from_rtx == XEXP (SET_SRC (old_set), 0)
+ && ep->can_eliminate)
+ {
+ /* We must stop at the first elimination that will be used.
+ If this one would replace the PLUS with a REG, do it
+ now. Otherwise, quit the loop and let eliminate_regs
+ do its normal replacement. */
+ if (ep->offset == - INTVAL (XEXP (SET_SRC (old_set), 1)))
+ {
+ /* We assume here that we don't need a PARALLEL of
+ any CLOBBERs for this assignment. There's not
+ much we can do if we do need it. */
+ PATTERN (insn) = gen_rtx (SET, VOIDmode,
+ SET_DEST (old_set), ep->to_rtx);
+ INSN_CODE (insn) = -1;
+ val = 1;
+ goto done;
+ }
+
+ break;
+ }
+ }
+
+ old_asm_operands_vec = 0;
+
+ /* Replace the body of this insn with a substituted form. If we changed
+ something, return non-zero.
+
+ If we are replacing a body that was a (set X (plus Y Z)), try to
+ re-recognize the insn. We do this in case we had a simple addition
+ but now can do this as a load-address. This saves an insn in this
+ common case. */
+
+ new_body = eliminate_regs (old_body, 0, replace ? insn : NULL_RTX);
+ if (new_body != old_body)
+ {
+ /* If we aren't replacing things permanently and we changed something,
+ make another copy to ensure that all the RTL is new. Otherwise
+ things can go wrong if find_reload swaps commutative operands
+ and one is inside RTL that has been copied while the other is not. */
+
+ /* Don't copy an asm_operands because (1) there's no need and (2)
+ copy_rtx can't do it properly when there are multiple outputs. */
+ if (! replace && asm_noperands (old_body) < 0)
+ new_body = copy_rtx (new_body);
+
+ /* If we had a move insn but now we don't, rerecognize it. This will
+ cause spurious re-recognition if the old move had a PARALLEL since
+ the new one still will, but we can't call single_set without
+ having put NEW_BODY into the insn and the re-recognition won't
+ hurt in this rare case. */
+ if (old_set != 0
+ && ((GET_CODE (SET_SRC (old_set)) == REG
+ && (GET_CODE (new_body) != SET
+ || GET_CODE (SET_SRC (new_body)) != REG))
+ /* If this was a load from or store to memory, compare
+ the MEM in recog_operand to the one in the insn. If they
+ are not equal, then rerecognize the insn. */
+ || (old_set != 0
+ && ((GET_CODE (SET_SRC (old_set)) == MEM
+ && SET_SRC (old_set) != recog_operand[1])
+ || (GET_CODE (SET_DEST (old_set)) == MEM
+ && SET_DEST (old_set) != recog_operand[0])))
+ /* If this was an add insn before, rerecognize. */
+ || GET_CODE (SET_SRC (old_set)) == PLUS))
+ {
+ if (! validate_change (insn, &PATTERN (insn), new_body, 0))
+ /* If recognition fails, store the new body anyway.
+ It's normal to have recognition failures here
+ due to bizarre memory addresses; reloading will fix them. */
+ PATTERN (insn) = new_body;
+ }
+ else
+ PATTERN (insn) = new_body;
+
+ val = 1;
+ }
+
+ /* Loop through all elimination pairs. See if any have changed and
+ recalculate the number not at initial offset.
+
+ Compute the maximum offset (minimum offset if the stack does not
+ grow downward) for each elimination pair.
+
+ We also detect a cases where register elimination cannot be done,
+ namely, if a register would be both changed and referenced outside a MEM
+ in the resulting insn since such an insn is often undefined and, even if
+ not, we cannot know what meaning will be given to it. Note that it is
+ valid to have a register used in an address in an insn that changes it
+ (presumably with a pre- or post-increment or decrement).
+
+ If anything changes, return nonzero. */
+
+ num_not_at_initial_offset = 0;
+ for (ep = reg_eliminate; ep < &reg_eliminate[NUM_ELIMINABLE_REGS]; ep++)
+ {
+ if (ep->previous_offset != ep->offset && ep->ref_outside_mem)
+ ep->can_eliminate = 0;
+
+ ep->ref_outside_mem = 0;
+
+ if (ep->previous_offset != ep->offset)
+ val = 1;
+
+ ep->previous_offset = ep->offset;
+ if (ep->can_eliminate && ep->offset != ep->initial_offset)
+ num_not_at_initial_offset++;
+
+#ifdef STACK_GROWS_DOWNWARD
+ ep->max_offset = MAX (ep->max_offset, ep->offset);
+#else
+ ep->max_offset = MIN (ep->max_offset, ep->offset);
+#endif
+ }
+
+ done:
+ /* If we changed something, perform elmination in REG_NOTES. This is
+ needed even when REPLACE is zero because a REG_DEAD note might refer
+ to a register that we eliminate and could cause a different number
+ of spill registers to be needed in the final reload pass than in
+ the pre-passes. */
+ if (val && REG_NOTES (insn) != 0)
+ REG_NOTES (insn) = eliminate_regs (REG_NOTES (insn), 0, REG_NOTES (insn));
+
+ if (! replace)
+ pop_obstacks ();
+
+ return val;
+}
+
+/* Given X, a SET or CLOBBER of DEST, if DEST is the target of a register
+ replacement we currently believe is valid, mark it as not eliminable if X
+ modifies DEST in any way other than by adding a constant integer to it.
+
+ If DEST is the frame pointer, we do nothing because we assume that
+ all assignments to the hard frame pointer are nonlocal gotos and are being
+ done at a time when they are valid and do not disturb anything else.
+ Some machines want to eliminate a fake argument pointer with either the
+ frame or stack pointer. Assignments to the hard frame pointer must not
+ prevent this elimination.
+
+ Called via note_stores from reload before starting its passes to scan
+ the insns of the function. */
+
+static void
+mark_not_eliminable (dest, x)
+ rtx dest;
+ rtx x;
+{
+ register int i;
+
+ /* A SUBREG of a hard register here is just changing its mode. We should
+ not see a SUBREG of an eliminable hard register, but check just in
+ case. */
+ if (GET_CODE (dest) == SUBREG)
+ dest = SUBREG_REG (dest);
+
+ if (dest == hard_frame_pointer_rtx)
+ return;
+
+ for (i = 0; i < NUM_ELIMINABLE_REGS; i++)
+ if (reg_eliminate[i].can_eliminate && dest == reg_eliminate[i].to_rtx
+ && (GET_CODE (x) != SET
+ || GET_CODE (SET_SRC (x)) != PLUS
+ || XEXP (SET_SRC (x), 0) != dest
+ || GET_CODE (XEXP (SET_SRC (x), 1)) != CONST_INT))
+ {
+ reg_eliminate[i].can_eliminate_previous
+ = reg_eliminate[i].can_eliminate = 0;
+ num_eliminable--;
+ }
+}
+
+/* Kick all pseudos out of hard register REGNO.
+ If GLOBAL is nonzero, try to find someplace else to put them.
+ If DUMPFILE is nonzero, log actions taken on that file.
+
+ If CANT_ELIMINATE is nonzero, it means that we are doing this spill
+ because we found we can't eliminate some register. In the case, no pseudos
+ are allowed to be in the register, even if they are only in a block that
+ doesn't require spill registers, unlike the case when we are spilling this
+ hard reg to produce another spill register.
+
+ Return nonzero if any pseudos needed to be kicked out. */
+
+static int
+spill_hard_reg (regno, global, dumpfile, cant_eliminate)
+ register int regno;
+ int global;
+ FILE *dumpfile;
+ int cant_eliminate;
+{
+ enum reg_class class = REGNO_REG_CLASS (regno);
+ int something_changed = 0;
+ register int i;
+
+ SET_HARD_REG_BIT (forbidden_regs, regno);
+
+ if (cant_eliminate)
+ regs_ever_live[regno] = 1;
+
+ /* Spill every pseudo reg that was allocated to this reg
+ or to something that overlaps this reg. */
+
+ for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++)
+ if (reg_renumber[i] >= 0
+ && reg_renumber[i] <= regno
+ && (reg_renumber[i]
+ + HARD_REGNO_NREGS (reg_renumber[i],
+ PSEUDO_REGNO_MODE (i))
+ > regno))
+ {
+ /* If this register belongs solely to a basic block which needed no
+ spilling of any class that this register is contained in,
+ leave it be, unless we are spilling this register because
+ it was a hard register that can't be eliminated. */
+
+ if (! cant_eliminate
+ && basic_block_needs[0]
+ && reg_basic_block[i] >= 0
+ && basic_block_needs[(int) class][reg_basic_block[i]] == 0)
+ {
+ enum reg_class *p;
+
+ for (p = reg_class_superclasses[(int) class];
+ *p != LIM_REG_CLASSES; p++)
+ if (basic_block_needs[(int) *p][reg_basic_block[i]] > 0)
+ break;
+
+ if (*p == LIM_REG_CLASSES)
+ continue;
+ }
+
+ /* Mark it as no longer having a hard register home. */
+ reg_renumber[i] = -1;
+ /* We will need to scan everything again. */
+ something_changed = 1;
+ if (global)
+ retry_global_alloc (i, forbidden_regs);
+
+ alter_reg (i, regno);
+ if (dumpfile)
+ {
+ if (reg_renumber[i] == -1)
+ fprintf (dumpfile, " Register %d now on stack.\n\n", i);
+ else
+ fprintf (dumpfile, " Register %d now in %d.\n\n",
+ i, reg_renumber[i]);
+ }
+ }
+ for (i = 0; i < scratch_list_length; i++)
+ {
+ if (scratch_list[i] && REGNO (scratch_list[i]) == regno)
+ {
+ if (! cant_eliminate && basic_block_needs[0]
+ && ! basic_block_needs[(int) class][scratch_block[i]])
+ {
+ enum reg_class *p;
+
+ for (p = reg_class_superclasses[(int) class];
+ *p != LIM_REG_CLASSES; p++)
+ if (basic_block_needs[(int) *p][scratch_block[i]] > 0)
+ break;
+
+ if (*p == LIM_REG_CLASSES)
+ continue;
+ }
+ PUT_CODE (scratch_list[i], SCRATCH);
+ scratch_list[i] = 0;
+ something_changed = 1;
+ continue;
+ }
+ }
+
+ return something_changed;
+}
+
+/* Find all paradoxical subregs within X and update reg_max_ref_width.
+ Also mark any hard registers used to store user variables as
+ forbidden from being used for spill registers. */
+
+static void
+scan_paradoxical_subregs (x)
+ register rtx x;
+{
+ register int i;
+ register char *fmt;
+ register enum rtx_code code = GET_CODE (x);
+
+ switch (code)
+ {
+ case REG:
+#ifdef SMALL_REGISTER_CLASSES
+ if (REGNO (x) < FIRST_PSEUDO_REGISTER && REG_USERVAR_P (x))
+ SET_HARD_REG_BIT (forbidden_regs, REGNO (x));
+#endif
+ return;
+
+ case CONST_INT:
+ case CONST:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ case CONST_DOUBLE:
+ case CC0:
+ case PC:
+ case USE:
+ case CLOBBER:
+ return;
+
+ case SUBREG:
+ if (GET_CODE (SUBREG_REG (x)) == REG
+ && GET_MODE_SIZE (GET_MODE (x)) > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
+ reg_max_ref_width[REGNO (SUBREG_REG (x))]
+ = GET_MODE_SIZE (GET_MODE (x));
+ return;
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ scan_paradoxical_subregs (XEXP (x, i));
+ else if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = XVECLEN (x, i) - 1; j >=0; j--)
+ scan_paradoxical_subregs (XVECEXP (x, i, j));
+ }
+ }
+}
+
+static int
+hard_reg_use_compare (p1, p2)
+ struct hard_reg_n_uses *p1, *p2;
+{
+ int tem = p1->uses - p2->uses;
+ if (tem != 0) return tem;
+ /* If regs are equally good, sort by regno,
+ so that the results of qsort leave nothing to chance. */
+ return p1->regno - p2->regno;
+}
+
+/* Choose the order to consider regs for use as reload registers
+ based on how much trouble would be caused by spilling one.
+ Store them in order of decreasing preference in potential_reload_regs. */
+
+static void
+order_regs_for_reload ()
+{
+ register int i;
+ register int o = 0;
+ int large = 0;
+
+ struct hard_reg_n_uses hard_reg_n_uses[FIRST_PSEUDO_REGISTER];
+
+ CLEAR_HARD_REG_SET (bad_spill_regs);
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ potential_reload_regs[i] = -1;
+
+ /* Count number of uses of each hard reg by pseudo regs allocated to it
+ and then order them by decreasing use. */
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ {
+ hard_reg_n_uses[i].uses = 0;
+ hard_reg_n_uses[i].regno = i;
+ }
+
+ for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++)
+ {
+ int regno = reg_renumber[i];
+ if (regno >= 0)
+ {
+ int lim = regno + HARD_REGNO_NREGS (regno, PSEUDO_REGNO_MODE (i));
+ while (regno < lim)
+ hard_reg_n_uses[regno++].uses += reg_n_refs[i];
+ }
+ large += reg_n_refs[i];
+ }
+
+ /* Now fixed registers (which cannot safely be used for reloading)
+ get a very high use count so they will be considered least desirable.
+ Registers used explicitly in the rtl code are almost as bad. */
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ {
+ if (fixed_regs[i])
+ {
+ hard_reg_n_uses[i].uses += 2 * large + 2;
+ SET_HARD_REG_BIT (bad_spill_regs, i);
+ }
+ else if (regs_explicitly_used[i])
+ {
+ hard_reg_n_uses[i].uses += large + 1;
+#ifndef SMALL_REGISTER_CLASSES
+ /* ??? We are doing this here because of the potential that
+ bad code may be generated if a register explicitly used in
+ an insn was used as a spill register for that insn. But
+ not using these are spill registers may lose on some machine.
+ We'll have to see how this works out. */
+ SET_HARD_REG_BIT (bad_spill_regs, i);
+#endif
+ }
+ }
+ hard_reg_n_uses[HARD_FRAME_POINTER_REGNUM].uses += 2 * large + 2;
+ SET_HARD_REG_BIT (bad_spill_regs, HARD_FRAME_POINTER_REGNUM);
+
+#ifdef ELIMINABLE_REGS
+ /* If registers other than the frame pointer are eliminable, mark them as
+ poor choices. */
+ for (i = 0; i < NUM_ELIMINABLE_REGS; i++)
+ {
+ hard_reg_n_uses[reg_eliminate[i].from].uses += 2 * large + 2;
+ SET_HARD_REG_BIT (bad_spill_regs, reg_eliminate[i].from);
+ }
+#endif
+
+ /* Prefer registers not so far used, for use in temporary loading.
+ Among them, if REG_ALLOC_ORDER is defined, use that order.
+ Otherwise, prefer registers not preserved by calls. */
+
+#ifdef REG_ALLOC_ORDER
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ {
+ int regno = reg_alloc_order[i];
+
+ if (hard_reg_n_uses[regno].uses == 0)
+ potential_reload_regs[o++] = regno;
+ }
+#else
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ {
+ if (hard_reg_n_uses[i].uses == 0 && call_used_regs[i])
+ potential_reload_regs[o++] = i;
+ }
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ {
+ if (hard_reg_n_uses[i].uses == 0 && ! call_used_regs[i])
+ potential_reload_regs[o++] = i;
+ }
+#endif
+
+ qsort (hard_reg_n_uses, FIRST_PSEUDO_REGISTER,
+ sizeof hard_reg_n_uses[0], hard_reg_use_compare);
+
+ /* Now add the regs that are already used,
+ preferring those used less often. The fixed and otherwise forbidden
+ registers will be at the end of this list. */
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (hard_reg_n_uses[i].uses != 0)
+ potential_reload_regs[o++] = hard_reg_n_uses[i].regno;
+}
+
+/* Used in reload_as_needed to sort the spilled regs. */
+static int
+compare_spill_regs (r1, r2)
+ short *r1, *r2;
+{
+ return *r1 < *r2 ? -1: 1;
+}
+
+/* Reload pseudo-registers into hard regs around each insn as needed.
+ Additional register load insns are output before the insn that needs it
+ and perhaps store insns after insns that modify the reloaded pseudo reg.
+
+ reg_last_reload_reg and reg_reloaded_contents keep track of
+ which registers are already available in reload registers.
+ We update these for the reloads that we perform,
+ as the insns are scanned. */
+
+static void
+reload_as_needed (first, live_known)
+ rtx first;
+ int live_known;
+{
+ register rtx insn;
+ register int i;
+ int this_block = 0;
+ rtx x;
+ rtx after_call = 0;
+
+ bzero ((char *) spill_reg_rtx, sizeof spill_reg_rtx);
+ bzero ((char *) spill_reg_store, sizeof spill_reg_store);
+ reg_last_reload_reg = (rtx *) alloca (max_regno * sizeof (rtx));
+ bzero ((char *) reg_last_reload_reg, max_regno * sizeof (rtx));
+ reg_has_output_reload = (char *) alloca (max_regno);
+ for (i = 0; i < n_spills; i++)
+ {
+ reg_reloaded_contents[i] = -1;
+ reg_reloaded_insn[i] = 0;
+ }
+
+ /* Reset all offsets on eliminable registers to their initial values. */
+#ifdef ELIMINABLE_REGS
+ for (i = 0; i < NUM_ELIMINABLE_REGS; i++)
+ {
+ INITIAL_ELIMINATION_OFFSET (reg_eliminate[i].from, reg_eliminate[i].to,
+ reg_eliminate[i].initial_offset);
+ reg_eliminate[i].previous_offset
+ = reg_eliminate[i].offset = reg_eliminate[i].initial_offset;
+ }
+#else
+ INITIAL_FRAME_POINTER_OFFSET (reg_eliminate[0].initial_offset);
+ reg_eliminate[0].previous_offset
+ = reg_eliminate[0].offset = reg_eliminate[0].initial_offset;
+#endif
+
+ num_not_at_initial_offset = 0;
+
+ /* Order the spilled regs, so that allocate_reload_regs can guarantee to
+ pack registers with group needs. */
+ if (n_spills > 1)
+ {
+ qsort (spill_regs, n_spills, sizeof (short), compare_spill_regs);
+ for (i = 0; i < n_spills; i++)
+ spill_reg_order[spill_regs[i]] = i;
+ }
+
+ for (insn = first; insn;)
+ {
+ register rtx next = NEXT_INSN (insn);
+
+ /* Notice when we move to a new basic block. */
+ if (live_known && this_block + 1 < n_basic_blocks
+ && insn == basic_block_head[this_block+1])
+ ++this_block;
+
+ /* If we pass a label, copy the offsets from the label information
+ into the current offsets of each elimination. */
+ if (GET_CODE (insn) == CODE_LABEL)
+ {
+ num_not_at_initial_offset = 0;
+ for (i = 0; i < NUM_ELIMINABLE_REGS; i++)
+ {
+ reg_eliminate[i].offset = reg_eliminate[i].previous_offset
+ = offsets_at[CODE_LABEL_NUMBER (insn)][i];
+ if (reg_eliminate[i].can_eliminate
+ && (reg_eliminate[i].offset
+ != reg_eliminate[i].initial_offset))
+ num_not_at_initial_offset++;
+ }
+ }
+
+ else if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ {
+ rtx avoid_return_reg = 0;
+
+#ifdef SMALL_REGISTER_CLASSES
+ /* Set avoid_return_reg if this is an insn
+ that might use the value of a function call. */
+ if (GET_CODE (insn) == CALL_INSN)
+ {
+ if (GET_CODE (PATTERN (insn)) == SET)
+ after_call = SET_DEST (PATTERN (insn));
+ else if (GET_CODE (PATTERN (insn)) == PARALLEL
+ && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET)
+ after_call = SET_DEST (XVECEXP (PATTERN (insn), 0, 0));
+ else
+ after_call = 0;
+ }
+ else if (after_call != 0
+ && !(GET_CODE (PATTERN (insn)) == SET
+ && SET_DEST (PATTERN (insn)) == stack_pointer_rtx))
+ {
+ if (reg_referenced_p (after_call, PATTERN (insn)))
+ avoid_return_reg = after_call;
+ after_call = 0;
+ }
+#endif /* SMALL_REGISTER_CLASSES */
+
+ /* If this is a USE and CLOBBER of a MEM, ensure that any
+ references to eliminable registers have been removed. */
+
+ if ((GET_CODE (PATTERN (insn)) == USE
+ || GET_CODE (PATTERN (insn)) == CLOBBER)
+ && GET_CODE (XEXP (PATTERN (insn), 0)) == MEM)
+ XEXP (XEXP (PATTERN (insn), 0), 0)
+ = eliminate_regs (XEXP (XEXP (PATTERN (insn), 0), 0),
+ GET_MODE (XEXP (PATTERN (insn), 0)), NULL_RTX);
+
+ /* If we need to do register elimination processing, do so.
+ This might delete the insn, in which case we are done. */
+ if (num_eliminable && GET_MODE (insn) == QImode)
+ {
+ eliminate_regs_in_insn (insn, 1);
+ if (GET_CODE (insn) == NOTE)
+ {
+ insn = next;
+ continue;
+ }
+ }
+
+ if (GET_MODE (insn) == VOIDmode)
+ n_reloads = 0;
+ /* First find the pseudo regs that must be reloaded for this insn.
+ This info is returned in the tables reload_... (see reload.h).
+ Also modify the body of INSN by substituting RELOAD
+ rtx's for those pseudo regs. */
+ else
+ {
+ bzero (reg_has_output_reload, max_regno);
+ CLEAR_HARD_REG_SET (reg_is_output_reload);
+
+ find_reloads (insn, 1, spill_indirect_levels, live_known,
+ spill_reg_order);
+ }
+
+ if (n_reloads > 0)
+ {
+ rtx prev = PREV_INSN (insn), next = NEXT_INSN (insn);
+ rtx p;
+ int class;
+
+ /* If this block has not had spilling done for a
+ particular clas and we have any non-optionals that need a
+ spill reg in that class, abort. */
+
+ for (class = 0; class < N_REG_CLASSES; class++)
+ if (basic_block_needs[class] != 0
+ && basic_block_needs[class][this_block] == 0)
+ for (i = 0; i < n_reloads; i++)
+ if (class == (int) reload_reg_class[i]
+ && reload_reg_rtx[i] == 0
+ && ! reload_optional[i]
+ && (reload_in[i] != 0 || reload_out[i] != 0
+ || reload_secondary_p[i] != 0))
+ abort ();
+
+ /* Now compute which reload regs to reload them into. Perhaps
+ reusing reload regs from previous insns, or else output
+ load insns to reload them. Maybe output store insns too.
+ Record the choices of reload reg in reload_reg_rtx. */
+ choose_reload_regs (insn, avoid_return_reg);
+
+#ifdef SMALL_REGISTER_CLASSES
+ /* Merge any reloads that we didn't combine for fear of
+ increasing the number of spill registers needed but now
+ discover can be safely merged. */
+ merge_assigned_reloads (insn);
+#endif
+
+ /* Generate the insns to reload operands into or out of
+ their reload regs. */
+ emit_reload_insns (insn);
+
+ /* Substitute the chosen reload regs from reload_reg_rtx
+ into the insn's body (or perhaps into the bodies of other
+ load and store insn that we just made for reloading
+ and that we moved the structure into). */
+ subst_reloads ();
+
+ /* If this was an ASM, make sure that all the reload insns
+ we have generated are valid. If not, give an error
+ and delete them. */
+
+ if (asm_noperands (PATTERN (insn)) >= 0)
+ for (p = NEXT_INSN (prev); p != next; p = NEXT_INSN (p))
+ if (p != insn && GET_RTX_CLASS (GET_CODE (p)) == 'i'
+ && (recog_memoized (p) < 0
+ || (insn_extract (p),
+ ! constrain_operands (INSN_CODE (p), 1))))
+ {
+ error_for_asm (insn,
+ "`asm' operand requires impossible reload");
+ PUT_CODE (p, NOTE);
+ NOTE_SOURCE_FILE (p) = 0;
+ NOTE_LINE_NUMBER (p) = NOTE_INSN_DELETED;
+ }
+ }
+ /* Any previously reloaded spilled pseudo reg, stored in this insn,
+ is no longer validly lying around to save a future reload.
+ Note that this does not detect pseudos that were reloaded
+ for this insn in order to be stored in
+ (obeying register constraints). That is correct; such reload
+ registers ARE still valid. */
+ note_stores (PATTERN (insn), forget_old_reloads_1);
+
+ /* There may have been CLOBBER insns placed after INSN. So scan
+ between INSN and NEXT and use them to forget old reloads. */
+ for (x = NEXT_INSN (insn); x != next; x = NEXT_INSN (x))
+ if (GET_CODE (x) == INSN && GET_CODE (PATTERN (x)) == CLOBBER)
+ note_stores (PATTERN (x), forget_old_reloads_1);
+
+#ifdef AUTO_INC_DEC
+ /* Likewise for regs altered by auto-increment in this insn.
+ But note that the reg-notes are not changed by reloading:
+ they still contain the pseudo-regs, not the spill regs. */
+ for (x = REG_NOTES (insn); x; x = XEXP (x, 1))
+ if (REG_NOTE_KIND (x) == REG_INC)
+ {
+ /* See if this pseudo reg was reloaded in this insn.
+ If so, its last-reload info is still valid
+ because it is based on this insn's reload. */
+ for (i = 0; i < n_reloads; i++)
+ if (reload_out[i] == XEXP (x, 0))
+ break;
+
+ if (i == n_reloads)
+ forget_old_reloads_1 (XEXP (x, 0), NULL_RTX);
+ }
+#endif
+ }
+ /* A reload reg's contents are unknown after a label. */
+ if (GET_CODE (insn) == CODE_LABEL)
+ for (i = 0; i < n_spills; i++)
+ {
+ reg_reloaded_contents[i] = -1;
+ reg_reloaded_insn[i] = 0;
+ }
+
+ /* Don't assume a reload reg is still good after a call insn
+ if it is a call-used reg. */
+ else if (GET_CODE (insn) == CALL_INSN)
+ for (i = 0; i < n_spills; i++)
+ if (call_used_regs[spill_regs[i]])
+ {
+ reg_reloaded_contents[i] = -1;
+ reg_reloaded_insn[i] = 0;
+ }
+
+ /* In case registers overlap, allow certain insns to invalidate
+ particular hard registers. */
+
+#ifdef INSN_CLOBBERS_REGNO_P
+ for (i = 0 ; i < n_spills ; i++)
+ if (INSN_CLOBBERS_REGNO_P (insn, spill_regs[i]))
+ {
+ reg_reloaded_contents[i] = -1;
+ reg_reloaded_insn[i] = 0;
+ }
+#endif
+
+ insn = next;
+
+#ifdef USE_C_ALLOCA
+ alloca (0);
+#endif
+ }
+}
+
+/* Discard all record of any value reloaded from X,
+ or reloaded in X from someplace else;
+ unless X is an output reload reg of the current insn.
+
+ X may be a hard reg (the reload reg)
+ or it may be a pseudo reg that was reloaded from. */
+
+static void
+forget_old_reloads_1 (x, ignored)
+ rtx x;
+ rtx ignored;
+{
+ register int regno;
+ int nr;
+ int offset = 0;
+
+ /* note_stores does give us subregs of hard regs. */
+ while (GET_CODE (x) == SUBREG)
+ {
+ offset += SUBREG_WORD (x);
+ x = SUBREG_REG (x);
+ }
+
+ if (GET_CODE (x) != REG)
+ return;
+
+ regno = REGNO (x) + offset;
+
+ if (regno >= FIRST_PSEUDO_REGISTER)
+ nr = 1;
+ else
+ {
+ int i;
+ nr = HARD_REGNO_NREGS (regno, GET_MODE (x));
+ /* Storing into a spilled-reg invalidates its contents.
+ This can happen if a block-local pseudo is allocated to that reg
+ and it wasn't spilled because this block's total need is 0.
+ Then some insn might have an optional reload and use this reg. */
+ for (i = 0; i < nr; i++)
+ if (spill_reg_order[regno + i] >= 0
+ /* But don't do this if the reg actually serves as an output
+ reload reg in the current instruction. */
+ && (n_reloads == 0
+ || ! TEST_HARD_REG_BIT (reg_is_output_reload, regno + i)))
+ {
+ reg_reloaded_contents[spill_reg_order[regno + i]] = -1;
+ reg_reloaded_insn[spill_reg_order[regno + i]] = 0;
+ }
+ }
+
+ /* Since value of X has changed,
+ forget any value previously copied from it. */
+
+ while (nr-- > 0)
+ /* But don't forget a copy if this is the output reload
+ that establishes the copy's validity. */
+ if (n_reloads == 0 || reg_has_output_reload[regno + nr] == 0)
+ reg_last_reload_reg[regno + nr] = 0;
+}
+
+/* For each reload, the mode of the reload register. */
+static enum machine_mode reload_mode[MAX_RELOADS];
+
+/* For each reload, the largest number of registers it will require. */
+static int reload_nregs[MAX_RELOADS];
+
+/* Comparison function for qsort to decide which of two reloads
+ should be handled first. *P1 and *P2 are the reload numbers. */
+
+static int
+reload_reg_class_lower (p1, p2)
+ short *p1, *p2;
+{
+ register int r1 = *p1, r2 = *p2;
+ register int t;
+
+ /* Consider required reloads before optional ones. */
+ t = reload_optional[r1] - reload_optional[r2];
+ if (t != 0)
+ return t;
+
+ /* Count all solitary classes before non-solitary ones. */
+ t = ((reg_class_size[(int) reload_reg_class[r2]] == 1)
+ - (reg_class_size[(int) reload_reg_class[r1]] == 1));
+ if (t != 0)
+ return t;
+
+ /* Aside from solitaires, consider all multi-reg groups first. */
+ t = reload_nregs[r2] - reload_nregs[r1];
+ if (t != 0)
+ return t;
+
+ /* Consider reloads in order of increasing reg-class number. */
+ t = (int) reload_reg_class[r1] - (int) reload_reg_class[r2];
+ if (t != 0)
+ return t;
+
+ /* If reloads are equally urgent, sort by reload number,
+ so that the results of qsort leave nothing to chance. */
+ return r1 - r2;
+}
+
+/* The following HARD_REG_SETs indicate when each hard register is
+ used for a reload of various parts of the current insn. */
+
+/* If reg is in use as a reload reg for a RELOAD_OTHER reload. */
+static HARD_REG_SET reload_reg_used;
+/* If reg is in use for a RELOAD_FOR_INPUT_ADDRESS reload for operand I. */
+static HARD_REG_SET reload_reg_used_in_input_addr[MAX_RECOG_OPERANDS];
+/* If reg is in use for a RELOAD_FOR_OUTPUT_ADDRESS reload for operand I. */
+static HARD_REG_SET reload_reg_used_in_output_addr[MAX_RECOG_OPERANDS];
+/* If reg is in use for a RELOAD_FOR_INPUT reload for operand I. */
+static HARD_REG_SET reload_reg_used_in_input[MAX_RECOG_OPERANDS];
+/* If reg is in use for a RELOAD_FOR_OUTPUT reload for operand I. */
+static HARD_REG_SET reload_reg_used_in_output[MAX_RECOG_OPERANDS];
+/* If reg is in use for a RELOAD_FOR_OPERAND_ADDRESS reload. */
+static HARD_REG_SET reload_reg_used_in_op_addr;
+/* If reg is in use for a RELOAD_FOR_OPADDR_ADDR reload. */
+static HARD_REG_SET reload_reg_used_in_op_addr_reload;
+/* If reg is in use for a RELOAD_FOR_INSN reload. */
+static HARD_REG_SET reload_reg_used_in_insn;
+/* If reg is in use for a RELOAD_FOR_OTHER_ADDRESS reload. */
+static HARD_REG_SET reload_reg_used_in_other_addr;
+
+/* If reg is in use as a reload reg for any sort of reload. */
+static HARD_REG_SET reload_reg_used_at_all;
+
+/* If reg is use as an inherited reload. We just mark the first register
+ in the group. */
+static HARD_REG_SET reload_reg_used_for_inherit;
+
+/* Mark reg REGNO as in use for a reload of the sort spec'd by OPNUM and
+ TYPE. MODE is used to indicate how many consecutive regs are
+ actually used. */
+
+static void
+mark_reload_reg_in_use (regno, opnum, type, mode)
+ int regno;
+ int opnum;
+ enum reload_type type;
+ enum machine_mode mode;
+{
+ int nregs = HARD_REGNO_NREGS (regno, mode);
+ int i;
+
+ for (i = regno; i < nregs + regno; i++)
+ {
+ switch (type)
+ {
+ case RELOAD_OTHER:
+ SET_HARD_REG_BIT (reload_reg_used, i);
+ break;
+
+ case RELOAD_FOR_INPUT_ADDRESS:
+ SET_HARD_REG_BIT (reload_reg_used_in_input_addr[opnum], i);
+ break;
+
+ case RELOAD_FOR_OUTPUT_ADDRESS:
+ SET_HARD_REG_BIT (reload_reg_used_in_output_addr[opnum], i);
+ break;
+
+ case RELOAD_FOR_OPERAND_ADDRESS:
+ SET_HARD_REG_BIT (reload_reg_used_in_op_addr, i);
+ break;
+
+ case RELOAD_FOR_OPADDR_ADDR:
+ SET_HARD_REG_BIT (reload_reg_used_in_op_addr_reload, i);
+ break;
+
+ case RELOAD_FOR_OTHER_ADDRESS:
+ SET_HARD_REG_BIT (reload_reg_used_in_other_addr, i);
+ break;
+
+ case RELOAD_FOR_INPUT:
+ SET_HARD_REG_BIT (reload_reg_used_in_input[opnum], i);
+ break;
+
+ case RELOAD_FOR_OUTPUT:
+ SET_HARD_REG_BIT (reload_reg_used_in_output[opnum], i);
+ break;
+
+ case RELOAD_FOR_INSN:
+ SET_HARD_REG_BIT (reload_reg_used_in_insn, i);
+ break;
+ }
+
+ SET_HARD_REG_BIT (reload_reg_used_at_all, i);
+ }
+}
+
+/* Similarly, but show REGNO is no longer in use for a reload. */
+
+static void
+clear_reload_reg_in_use (regno, opnum, type, mode)
+ int regno;
+ int opnum;
+ enum reload_type type;
+ enum machine_mode mode;
+{
+ int nregs = HARD_REGNO_NREGS (regno, mode);
+ int i;
+
+ for (i = regno; i < nregs + regno; i++)
+ {
+ switch (type)
+ {
+ case RELOAD_OTHER:
+ CLEAR_HARD_REG_BIT (reload_reg_used, i);
+ break;
+
+ case RELOAD_FOR_INPUT_ADDRESS:
+ CLEAR_HARD_REG_BIT (reload_reg_used_in_input_addr[opnum], i);
+ break;
+
+ case RELOAD_FOR_OUTPUT_ADDRESS:
+ CLEAR_HARD_REG_BIT (reload_reg_used_in_output_addr[opnum], i);
+ break;
+
+ case RELOAD_FOR_OPERAND_ADDRESS:
+ CLEAR_HARD_REG_BIT (reload_reg_used_in_op_addr, i);
+ break;
+
+ case RELOAD_FOR_OPADDR_ADDR:
+ CLEAR_HARD_REG_BIT (reload_reg_used_in_op_addr_reload, i);
+ break;
+
+ case RELOAD_FOR_OTHER_ADDRESS:
+ CLEAR_HARD_REG_BIT (reload_reg_used_in_other_addr, i);
+ break;
+
+ case RELOAD_FOR_INPUT:
+ CLEAR_HARD_REG_BIT (reload_reg_used_in_input[opnum], i);
+ break;
+
+ case RELOAD_FOR_OUTPUT:
+ CLEAR_HARD_REG_BIT (reload_reg_used_in_output[opnum], i);
+ break;
+
+ case RELOAD_FOR_INSN:
+ CLEAR_HARD_REG_BIT (reload_reg_used_in_insn, i);
+ break;
+ }
+ }
+}
+
+/* 1 if reg REGNO is free as a reload reg for a reload of the sort
+ specified by OPNUM and TYPE. */
+
+static int
+reload_reg_free_p (regno, opnum, type)
+ int regno;
+ int opnum;
+ enum reload_type type;
+{
+ int i;
+
+ /* In use for a RELOAD_OTHER means it's not available for anything except
+ RELOAD_FOR_OTHER_ADDRESS. Recall that RELOAD_FOR_OTHER_ADDRESS is known
+ to be used only for inputs. */
+
+ if (type != RELOAD_FOR_OTHER_ADDRESS
+ && TEST_HARD_REG_BIT (reload_reg_used, regno))
+ return 0;
+
+ switch (type)
+ {
+ case RELOAD_OTHER:
+ /* In use for anything except RELOAD_FOR_OTHER_ADDRESS means
+ we can't use it for RELOAD_OTHER. */
+ if (TEST_HARD_REG_BIT (reload_reg_used, regno)
+ || TEST_HARD_REG_BIT (reload_reg_used_in_op_addr, regno)
+ || TEST_HARD_REG_BIT (reload_reg_used_in_insn, regno))
+ return 0;
+
+ for (i = 0; i < reload_n_operands; i++)
+ if (TEST_HARD_REG_BIT (reload_reg_used_in_input_addr[i], regno)
+ || TEST_HARD_REG_BIT (reload_reg_used_in_output_addr[i], regno)
+ || TEST_HARD_REG_BIT (reload_reg_used_in_input[i], regno)
+ || TEST_HARD_REG_BIT (reload_reg_used_in_output[i], regno))
+ return 0;
+
+ return 1;
+
+ case RELOAD_FOR_INPUT:
+ if (TEST_HARD_REG_BIT (reload_reg_used_in_insn, regno)
+ || TEST_HARD_REG_BIT (reload_reg_used_in_op_addr, regno))
+ return 0;
+
+ if (TEST_HARD_REG_BIT (reload_reg_used_in_op_addr_reload, regno))
+ return 0;
+
+ /* If it is used for some other input, can't use it. */
+ for (i = 0; i < reload_n_operands; i++)
+ if (TEST_HARD_REG_BIT (reload_reg_used_in_input[i], regno))
+ return 0;
+
+ /* If it is used in a later operand's address, can't use it. */
+ for (i = opnum + 1; i < reload_n_operands; i++)
+ if (TEST_HARD_REG_BIT (reload_reg_used_in_input_addr[i], regno))
+ return 0;
+
+ return 1;
+
+ case RELOAD_FOR_INPUT_ADDRESS:
+ /* Can't use a register if it is used for an input address for this
+ operand or used as an input in an earlier one. */
+ if (TEST_HARD_REG_BIT (reload_reg_used_in_input_addr[opnum], regno))
+ return 0;
+
+ for (i = 0; i < opnum; i++)
+ if (TEST_HARD_REG_BIT (reload_reg_used_in_input[i], regno))
+ return 0;
+
+ return 1;
+
+ case RELOAD_FOR_OUTPUT_ADDRESS:
+ /* Can't use a register if it is used for an output address for this
+ operand or used as an output in this or a later operand. */
+ if (TEST_HARD_REG_BIT (reload_reg_used_in_output_addr[opnum], regno))
+ return 0;
+
+ for (i = opnum; i < reload_n_operands; i++)
+ if (TEST_HARD_REG_BIT (reload_reg_used_in_output[i], regno))
+ return 0;
+
+ return 1;
+
+ case RELOAD_FOR_OPERAND_ADDRESS:
+ for (i = 0; i < reload_n_operands; i++)
+ if (TEST_HARD_REG_BIT (reload_reg_used_in_input[i], regno))
+ return 0;
+
+ return (! TEST_HARD_REG_BIT (reload_reg_used_in_insn, regno)
+ && ! TEST_HARD_REG_BIT (reload_reg_used_in_op_addr, regno));
+
+ case RELOAD_FOR_OPADDR_ADDR:
+ for (i = 0; i < reload_n_operands; i++)
+ if (TEST_HARD_REG_BIT (reload_reg_used_in_input[i], regno))
+ return 0;
+
+ return (!TEST_HARD_REG_BIT (reload_reg_used_in_op_addr_reload, regno));
+
+ case RELOAD_FOR_OUTPUT:
+ /* This cannot share a register with RELOAD_FOR_INSN reloads, other
+ outputs, or an operand address for this or an earlier output. */
+ if (TEST_HARD_REG_BIT (reload_reg_used_in_insn, regno))
+ return 0;
+
+ for (i = 0; i < reload_n_operands; i++)
+ if (TEST_HARD_REG_BIT (reload_reg_used_in_output[i], regno))
+ return 0;
+
+ for (i = 0; i <= opnum; i++)
+ if (TEST_HARD_REG_BIT (reload_reg_used_in_output_addr[i], regno))
+ return 0;
+
+ return 1;
+
+ case RELOAD_FOR_INSN:
+ for (i = 0; i < reload_n_operands; i++)
+ if (TEST_HARD_REG_BIT (reload_reg_used_in_input[i], regno)
+ || TEST_HARD_REG_BIT (reload_reg_used_in_output[i], regno))
+ return 0;
+
+ return (! TEST_HARD_REG_BIT (reload_reg_used_in_insn, regno)
+ && ! TEST_HARD_REG_BIT (reload_reg_used_in_op_addr, regno));
+
+ case RELOAD_FOR_OTHER_ADDRESS:
+ return ! TEST_HARD_REG_BIT (reload_reg_used_in_other_addr, regno);
+ }
+ abort ();
+}
+
+/* Return 1 if the value in reload reg REGNO, as used by a reload
+ needed for the part of the insn specified by OPNUM and TYPE,
+ is not in use for a reload in any prior part of the insn.
+
+ We can assume that the reload reg was already tested for availability
+ at the time it is needed, and we should not check this again,
+ in case the reg has already been marked in use. */
+
+static int
+reload_reg_free_before_p (regno, opnum, type)
+ int regno;
+ int opnum;
+ enum reload_type type;
+{
+ int i;
+
+ switch (type)
+ {
+ case RELOAD_FOR_OTHER_ADDRESS:
+ /* These always come first. */
+ return 1;
+
+ case RELOAD_OTHER:
+ return ! TEST_HARD_REG_BIT (reload_reg_used_in_other_addr, regno);
+
+ /* If this use is for part of the insn,
+ check the reg is not in use for any prior part. It is tempting
+ to try to do this by falling through from objecs that occur
+ later in the insn to ones that occur earlier, but that will not
+ correctly take into account the fact that here we MUST ignore
+ things that would prevent the register from being allocated in
+ the first place, since we know that it was allocated. */
+
+ case RELOAD_FOR_OUTPUT_ADDRESS:
+ /* Earlier reloads are for earlier outputs or their addresses,
+ any RELOAD_FOR_INSN reloads, any inputs or their addresses, or any
+ RELOAD_FOR_OTHER_ADDRESS reloads (we know it can't conflict with
+ RELOAD_OTHER).. */
+ for (i = 0; i < opnum; i++)
+ if (TEST_HARD_REG_BIT (reload_reg_used_in_output_addr[i], regno)
+ || TEST_HARD_REG_BIT (reload_reg_used_in_output[i], regno))
+ return 0;
+
+ if (TEST_HARD_REG_BIT (reload_reg_used_in_insn, regno))
+ return 0;
+
+ for (i = 0; i < reload_n_operands; i++)
+ if (TEST_HARD_REG_BIT (reload_reg_used_in_input_addr[i], regno)
+ || TEST_HARD_REG_BIT (reload_reg_used_in_input[i], regno))
+ return 0;
+
+ return (! TEST_HARD_REG_BIT (reload_reg_used_in_other_addr, regno)
+ && ! TEST_HARD_REG_BIT (reload_reg_used_in_insn, regno)
+ && ! TEST_HARD_REG_BIT (reload_reg_used_in_op_addr, regno));
+
+ case RELOAD_FOR_OUTPUT:
+ /* This can't be used in the output address for this operand and
+ anything that can't be used for it, except that we've already
+ tested for RELOAD_FOR_INSN objects. */
+
+ if (TEST_HARD_REG_BIT (reload_reg_used_in_output_addr[opnum], regno))
+ return 0;
+
+ for (i = 0; i < opnum; i++)
+ if (TEST_HARD_REG_BIT (reload_reg_used_in_output_addr[i], regno)
+ || TEST_HARD_REG_BIT (reload_reg_used_in_output[i], regno))
+ return 0;
+
+ for (i = 0; i < reload_n_operands; i++)
+ if (TEST_HARD_REG_BIT (reload_reg_used_in_input_addr[i], regno)
+ || TEST_HARD_REG_BIT (reload_reg_used_in_input[i], regno)
+ || TEST_HARD_REG_BIT (reload_reg_used_in_op_addr, regno))
+ return 0;
+
+ return ! TEST_HARD_REG_BIT (reload_reg_used_in_other_addr, regno);
+
+ case RELOAD_FOR_OPERAND_ADDRESS:
+ case RELOAD_FOR_OPADDR_ADDR:
+ case RELOAD_FOR_INSN:
+ /* These can't conflict with inputs, or each other, so all we have to
+ test is input addresses and the addresses of OTHER items. */
+
+ for (i = 0; i < reload_n_operands; i++)
+ if (TEST_HARD_REG_BIT (reload_reg_used_in_input_addr[i], regno))
+ return 0;
+
+ return ! TEST_HARD_REG_BIT (reload_reg_used_in_other_addr, regno);
+
+ case RELOAD_FOR_INPUT:
+ /* The only things earlier are the address for this and
+ earlier inputs, other inputs (which we know we don't conflict
+ with), and addresses of RELOAD_OTHER objects. */
+
+ for (i = 0; i <= opnum; i++)
+ if (TEST_HARD_REG_BIT (reload_reg_used_in_input_addr[i], regno))
+ return 0;
+
+ return ! TEST_HARD_REG_BIT (reload_reg_used_in_other_addr, regno);
+
+ case RELOAD_FOR_INPUT_ADDRESS:
+ /* Similarly, all we have to check is for use in earlier inputs'
+ addresses. */
+ for (i = 0; i < opnum; i++)
+ if (TEST_HARD_REG_BIT (reload_reg_used_in_input_addr[i], regno))
+ return 0;
+
+ return ! TEST_HARD_REG_BIT (reload_reg_used_in_other_addr, regno);
+ }
+ abort ();
+}
+
+/* Return 1 if the value in reload reg REGNO, as used by a reload
+ needed for the part of the insn specified by OPNUM and TYPE,
+ is still available in REGNO at the end of the insn.
+
+ We can assume that the reload reg was already tested for availability
+ at the time it is needed, and we should not check this again,
+ in case the reg has already been marked in use. */
+
+static int
+reload_reg_reaches_end_p (regno, opnum, type)
+ int regno;
+ int opnum;
+ enum reload_type type;
+{
+ int i;
+
+ switch (type)
+ {
+ case RELOAD_OTHER:
+ /* Since a RELOAD_OTHER reload claims the reg for the entire insn,
+ its value must reach the end. */
+ return 1;
+
+ /* If this use is for part of the insn,
+ its value reaches if no subsequent part uses the same register.
+ Just like the above function, don't try to do this with lots
+ of fallthroughs. */
+
+ case RELOAD_FOR_OTHER_ADDRESS:
+ /* Here we check for everything else, since these don't conflict
+ with anything else and everything comes later. */
+
+ for (i = 0; i < reload_n_operands; i++)
+ if (TEST_HARD_REG_BIT (reload_reg_used_in_output_addr[i], regno)
+ || TEST_HARD_REG_BIT (reload_reg_used_in_output[i], regno)
+ || TEST_HARD_REG_BIT (reload_reg_used_in_input_addr[i], regno)
+ || TEST_HARD_REG_BIT (reload_reg_used_in_input[i], regno))
+ return 0;
+
+ return (! TEST_HARD_REG_BIT (reload_reg_used_in_op_addr, regno)
+ && ! TEST_HARD_REG_BIT (reload_reg_used_in_insn, regno)
+ && ! TEST_HARD_REG_BIT (reload_reg_used, regno));
+
+ case RELOAD_FOR_INPUT_ADDRESS:
+ /* Similar, except that we check only for this and subsequent inputs
+ and the address of only subsequent inputs and we do not need
+ to check for RELOAD_OTHER objects since they are known not to
+ conflict. */
+
+ for (i = opnum; i < reload_n_operands; i++)
+ if (TEST_HARD_REG_BIT (reload_reg_used_in_input[i], regno))
+ return 0;
+
+ for (i = opnum + 1; i < reload_n_operands; i++)
+ if (TEST_HARD_REG_BIT (reload_reg_used_in_input_addr[i], regno))
+ return 0;
+
+ for (i = 0; i < reload_n_operands; i++)
+ if (TEST_HARD_REG_BIT (reload_reg_used_in_output_addr[i], regno)
+ || TEST_HARD_REG_BIT (reload_reg_used_in_output[i], regno))
+ return 0;
+
+ if (TEST_HARD_REG_BIT (reload_reg_used_in_op_addr_reload, regno))
+ return 0;
+
+ return (! TEST_HARD_REG_BIT (reload_reg_used_in_op_addr, regno)
+ && ! TEST_HARD_REG_BIT (reload_reg_used_in_insn, regno));
+
+ case RELOAD_FOR_INPUT:
+ /* Similar to input address, except we start at the next operand for
+ both input and input address and we do not check for
+ RELOAD_FOR_OPERAND_ADDRESS and RELOAD_FOR_INSN since these
+ would conflict. */
+
+ for (i = opnum + 1; i < reload_n_operands; i++)
+ if (TEST_HARD_REG_BIT (reload_reg_used_in_input_addr[i], regno)
+ || TEST_HARD_REG_BIT (reload_reg_used_in_input[i], regno))
+ return 0;
+
+ /* ... fall through ... */
+
+ case RELOAD_FOR_OPERAND_ADDRESS:
+ /* Check outputs and their addresses. */
+
+ for (i = 0; i < reload_n_operands; i++)
+ if (TEST_HARD_REG_BIT (reload_reg_used_in_output_addr[i], regno)
+ || TEST_HARD_REG_BIT (reload_reg_used_in_output[i], regno))
+ return 0;
+
+ return 1;
+
+ case RELOAD_FOR_OPADDR_ADDR:
+ for (i = 0; i < reload_n_operands; i++)
+ if (TEST_HARD_REG_BIT (reload_reg_used_in_output_addr[i], regno)
+ || TEST_HARD_REG_BIT (reload_reg_used_in_output[i], regno))
+ return 0;
+
+ return (! TEST_HARD_REG_BIT (reload_reg_used_in_op_addr, regno)
+ && !TEST_HARD_REG_BIT (reload_reg_used_in_insn, regno));
+
+ case RELOAD_FOR_INSN:
+ /* These conflict with other outputs with RELOAD_OTHER. So
+ we need only check for output addresses. */
+
+ opnum = -1;
+
+ /* ... fall through ... */
+
+ case RELOAD_FOR_OUTPUT:
+ case RELOAD_FOR_OUTPUT_ADDRESS:
+ /* We already know these can't conflict with a later output. So the
+ only thing to check are later output addresses. */
+ for (i = opnum + 1; i < reload_n_operands; i++)
+ if (TEST_HARD_REG_BIT (reload_reg_used_in_output_addr[i], regno))
+ return 0;
+
+ return 1;
+ }
+
+ abort ();
+}
+
+/* Return 1 if the reloads denoted by R1 and R2 cannot share a register.
+ Return 0 otherwise.
+
+ This function uses the same algorithm as reload_reg_free_p above. */
+
+static int
+reloads_conflict (r1, r2)
+ int r1, r2;
+{
+ enum reload_type r1_type = reload_when_needed[r1];
+ enum reload_type r2_type = reload_when_needed[r2];
+ int r1_opnum = reload_opnum[r1];
+ int r2_opnum = reload_opnum[r2];
+
+ /* RELOAD_OTHER conflicts with everything except RELOAD_FOR_OTHER_ADDRESS. */
+
+ if (r2_type == RELOAD_OTHER && r1_type != RELOAD_FOR_OTHER_ADDRESS)
+ return 1;
+
+ /* Otherwise, check conflicts differently for each type. */
+
+ switch (r1_type)
+ {
+ case RELOAD_FOR_INPUT:
+ return (r2_type == RELOAD_FOR_INSN
+ || r2_type == RELOAD_FOR_OPERAND_ADDRESS
+ || r2_type == RELOAD_FOR_OPADDR_ADDR
+ || r2_type == RELOAD_FOR_INPUT
+ || (r2_type == RELOAD_FOR_INPUT_ADDRESS && r2_opnum > r1_opnum));
+
+ case RELOAD_FOR_INPUT_ADDRESS:
+ return ((r2_type == RELOAD_FOR_INPUT_ADDRESS && r1_opnum == r2_opnum)
+ || (r2_type == RELOAD_FOR_INPUT && r2_opnum < r1_opnum));
+
+ case RELOAD_FOR_OUTPUT_ADDRESS:
+ return ((r2_type == RELOAD_FOR_OUTPUT_ADDRESS && r2_opnum == r1_opnum)
+ || (r2_type == RELOAD_FOR_OUTPUT && r2_opnum >= r1_opnum));
+
+ case RELOAD_FOR_OPERAND_ADDRESS:
+ return (r2_type == RELOAD_FOR_INPUT || r2_type == RELOAD_FOR_INSN
+ || r2_type == RELOAD_FOR_OPERAND_ADDRESS);
+
+ case RELOAD_FOR_OPADDR_ADDR:
+ return (r2_type == RELOAD_FOR_INPUT
+ || r2_type == RELOAD_FOR_OPADDR_ADDR);
+
+ case RELOAD_FOR_OUTPUT:
+ return (r2_type == RELOAD_FOR_INSN || r2_type == RELOAD_FOR_OUTPUT
+ || (r2_type == RELOAD_FOR_OUTPUT_ADDRESS
+ && r2_opnum >= r1_opnum));
+
+ case RELOAD_FOR_INSN:
+ return (r2_type == RELOAD_FOR_INPUT || r2_type == RELOAD_FOR_OUTPUT
+ || r2_type == RELOAD_FOR_INSN
+ || r2_type == RELOAD_FOR_OPERAND_ADDRESS);
+
+ case RELOAD_FOR_OTHER_ADDRESS:
+ return r2_type == RELOAD_FOR_OTHER_ADDRESS;
+
+ case RELOAD_OTHER:
+ return r2_type != RELOAD_FOR_OTHER_ADDRESS;
+
+ default:
+ abort ();
+ }
+}
+
+/* Vector of reload-numbers showing the order in which the reloads should
+ be processed. */
+short reload_order[MAX_RELOADS];
+
+/* Indexed by reload number, 1 if incoming value
+ inherited from previous insns. */
+char reload_inherited[MAX_RELOADS];
+
+/* For an inherited reload, this is the insn the reload was inherited from,
+ if we know it. Otherwise, this is 0. */
+rtx reload_inheritance_insn[MAX_RELOADS];
+
+/* If non-zero, this is a place to get the value of the reload,
+ rather than using reload_in. */
+rtx reload_override_in[MAX_RELOADS];
+
+/* For each reload, the index in spill_regs of the spill register used,
+ or -1 if we did not need one of the spill registers for this reload. */
+int reload_spill_index[MAX_RELOADS];
+
+/* Index of last register assigned as a spill register. We allocate in
+ a round-robin fashio. */
+
+static int last_spill_reg = 0;
+
+/* Find a spill register to use as a reload register for reload R.
+ LAST_RELOAD is non-zero if this is the last reload for the insn being
+ processed.
+
+ Set reload_reg_rtx[R] to the register allocated.
+
+ If NOERROR is nonzero, we return 1 if successful,
+ or 0 if we couldn't find a spill reg and we didn't change anything. */
+
+static int
+allocate_reload_reg (r, insn, last_reload, noerror)
+ int r;
+ rtx insn;
+ int last_reload;
+ int noerror;
+{
+ int i;
+ int pass;
+ int count;
+ rtx new;
+ int regno;
+
+ /* If we put this reload ahead, thinking it is a group,
+ then insist on finding a group. Otherwise we can grab a
+ reg that some other reload needs.
+ (That can happen when we have a 68000 DATA_OR_FP_REG
+ which is a group of data regs or one fp reg.)
+ We need not be so restrictive if there are no more reloads
+ for this insn.
+
+ ??? Really it would be nicer to have smarter handling
+ for that kind of reg class, where a problem like this is normal.
+ Perhaps those classes should be avoided for reloading
+ by use of more alternatives. */
+
+ int force_group = reload_nregs[r] > 1 && ! last_reload;
+
+ /* If we want a single register and haven't yet found one,
+ take any reg in the right class and not in use.
+ If we want a consecutive group, here is where we look for it.
+
+ We use two passes so we can first look for reload regs to
+ reuse, which are already in use for other reloads in this insn,
+ and only then use additional registers.
+ I think that maximizing reuse is needed to make sure we don't
+ run out of reload regs. Suppose we have three reloads, and
+ reloads A and B can share regs. These need two regs.
+ Suppose A and B are given different regs.
+ That leaves none for C. */
+ for (pass = 0; pass < 2; pass++)
+ {
+ /* I is the index in spill_regs.
+ We advance it round-robin between insns to use all spill regs
+ equally, so that inherited reloads have a chance
+ of leapfrogging each other. Don't do this, however, when we have
+ group needs and failure would be fatal; if we only have a relatively
+ small number of spill registers, and more than one of them has
+ group needs, then by starting in the middle, we may end up
+ allocating the first one in such a way that we are not left with
+ sufficient groups to handle the rest. */
+
+ if (noerror || ! force_group)
+ i = last_spill_reg;
+ else
+ i = -1;
+
+ for (count = 0; count < n_spills; count++)
+ {
+ int class = (int) reload_reg_class[r];
+
+ i = (i + 1) % n_spills;
+
+ if (reload_reg_free_p (spill_regs[i], reload_opnum[r],
+ reload_when_needed[r])
+ && TEST_HARD_REG_BIT (reg_class_contents[class], spill_regs[i])
+ && HARD_REGNO_MODE_OK (spill_regs[i], reload_mode[r])
+ /* Look first for regs to share, then for unshared. But
+ don't share regs used for inherited reloads; they are
+ the ones we want to preserve. */
+ && (pass
+ || (TEST_HARD_REG_BIT (reload_reg_used_at_all,
+ spill_regs[i])
+ && ! TEST_HARD_REG_BIT (reload_reg_used_for_inherit,
+ spill_regs[i]))))
+ {
+ int nr = HARD_REGNO_NREGS (spill_regs[i], reload_mode[r]);
+ /* Avoid the problem where spilling a GENERAL_OR_FP_REG
+ (on 68000) got us two FP regs. If NR is 1,
+ we would reject both of them. */
+ if (force_group)
+ nr = CLASS_MAX_NREGS (reload_reg_class[r], reload_mode[r]);
+ /* If we need only one reg, we have already won. */
+ if (nr == 1)
+ {
+ /* But reject a single reg if we demand a group. */
+ if (force_group)
+ continue;
+ break;
+ }
+ /* Otherwise check that as many consecutive regs as we need
+ are available here.
+ Also, don't use for a group registers that are
+ needed for nongroups. */
+ if (! TEST_HARD_REG_BIT (counted_for_nongroups, spill_regs[i]))
+ while (nr > 1)
+ {
+ regno = spill_regs[i] + nr - 1;
+ if (!(TEST_HARD_REG_BIT (reg_class_contents[class], regno)
+ && spill_reg_order[regno] >= 0
+ && reload_reg_free_p (regno, reload_opnum[r],
+ reload_when_needed[r])
+ && ! TEST_HARD_REG_BIT (counted_for_nongroups,
+ regno)))
+ break;
+ nr--;
+ }
+ if (nr == 1)
+ break;
+ }
+ }
+
+ /* If we found something on pass 1, omit pass 2. */
+ if (count < n_spills)
+ break;
+ }
+
+ /* We should have found a spill register by now. */
+ if (count == n_spills)
+ {
+ if (noerror)
+ return 0;
+ goto failure;
+ }
+
+ /* I is the index in SPILL_REG_RTX of the reload register we are to
+ allocate. Get an rtx for it and find its register number. */
+
+ new = spill_reg_rtx[i];
+
+ if (new == 0 || GET_MODE (new) != reload_mode[r])
+ spill_reg_rtx[i] = new
+ = gen_rtx (REG, reload_mode[r], spill_regs[i]);
+
+ regno = true_regnum (new);
+
+ /* Detect when the reload reg can't hold the reload mode.
+ This used to be one `if', but Sequent compiler can't handle that. */
+ if (HARD_REGNO_MODE_OK (regno, reload_mode[r]))
+ {
+ enum machine_mode test_mode = VOIDmode;
+ if (reload_in[r])
+ test_mode = GET_MODE (reload_in[r]);
+ /* If reload_in[r] has VOIDmode, it means we will load it
+ in whatever mode the reload reg has: to wit, reload_mode[r].
+ We have already tested that for validity. */
+ /* Aside from that, we need to test that the expressions
+ to reload from or into have modes which are valid for this
+ reload register. Otherwise the reload insns would be invalid. */
+ if (! (reload_in[r] != 0 && test_mode != VOIDmode
+ && ! HARD_REGNO_MODE_OK (regno, test_mode)))
+ if (! (reload_out[r] != 0
+ && ! HARD_REGNO_MODE_OK (regno, GET_MODE (reload_out[r]))))
+ {
+ /* The reg is OK. */
+ last_spill_reg = i;
+
+ /* Mark as in use for this insn the reload regs we use
+ for this. */
+ mark_reload_reg_in_use (spill_regs[i], reload_opnum[r],
+ reload_when_needed[r], reload_mode[r]);
+
+ reload_reg_rtx[r] = new;
+ reload_spill_index[r] = i;
+ return 1;
+ }
+ }
+
+ /* The reg is not OK. */
+ if (noerror)
+ return 0;
+
+ failure:
+ if (asm_noperands (PATTERN (insn)) < 0)
+ /* It's the compiler's fault. */
+ abort ();
+
+ /* It's the user's fault; the operand's mode and constraint
+ don't match. Disable this reload so we don't crash in final. */
+ error_for_asm (insn,
+ "`asm' operand constraint incompatible with operand size");
+ reload_in[r] = 0;
+ reload_out[r] = 0;
+ reload_reg_rtx[r] = 0;
+ reload_optional[r] = 1;
+ reload_secondary_p[r] = 1;
+
+ return 1;
+}
+
+/* Assign hard reg targets for the pseudo-registers we must reload
+ into hard regs for this insn.
+ Also output the instructions to copy them in and out of the hard regs.
+
+ For machines with register classes, we are responsible for
+ finding a reload reg in the proper class. */
+
+static void
+choose_reload_regs (insn, avoid_return_reg)
+ rtx insn;
+ rtx avoid_return_reg;
+{
+ register int i, j;
+ int max_group_size = 1;
+ enum reg_class group_class = NO_REGS;
+ int inheritance;
+
+ rtx save_reload_reg_rtx[MAX_RELOADS];
+ char save_reload_inherited[MAX_RELOADS];
+ rtx save_reload_inheritance_insn[MAX_RELOADS];
+ rtx save_reload_override_in[MAX_RELOADS];
+ int save_reload_spill_index[MAX_RELOADS];
+ HARD_REG_SET save_reload_reg_used;
+ HARD_REG_SET save_reload_reg_used_in_input_addr[MAX_RECOG_OPERANDS];
+ HARD_REG_SET save_reload_reg_used_in_output_addr[MAX_RECOG_OPERANDS];
+ HARD_REG_SET save_reload_reg_used_in_input[MAX_RECOG_OPERANDS];
+ HARD_REG_SET save_reload_reg_used_in_output[MAX_RECOG_OPERANDS];
+ HARD_REG_SET save_reload_reg_used_in_op_addr;
+ HARD_REG_SET save_reload_reg_used_in_op_addr_reload;
+ HARD_REG_SET save_reload_reg_used_in_insn;
+ HARD_REG_SET save_reload_reg_used_in_other_addr;
+ HARD_REG_SET save_reload_reg_used_at_all;
+
+ bzero (reload_inherited, MAX_RELOADS);
+ bzero ((char *) reload_inheritance_insn, MAX_RELOADS * sizeof (rtx));
+ bzero ((char *) reload_override_in, MAX_RELOADS * sizeof (rtx));
+
+ CLEAR_HARD_REG_SET (reload_reg_used);
+ CLEAR_HARD_REG_SET (reload_reg_used_at_all);
+ CLEAR_HARD_REG_SET (reload_reg_used_in_op_addr);
+ CLEAR_HARD_REG_SET (reload_reg_used_in_op_addr_reload);
+ CLEAR_HARD_REG_SET (reload_reg_used_in_insn);
+ CLEAR_HARD_REG_SET (reload_reg_used_in_other_addr);
+
+ for (i = 0; i < reload_n_operands; i++)
+ {
+ CLEAR_HARD_REG_SET (reload_reg_used_in_output[i]);
+ CLEAR_HARD_REG_SET (reload_reg_used_in_input[i]);
+ CLEAR_HARD_REG_SET (reload_reg_used_in_input_addr[i]);
+ CLEAR_HARD_REG_SET (reload_reg_used_in_output_addr[i]);
+ }
+
+#ifdef SMALL_REGISTER_CLASSES
+ /* Don't bother with avoiding the return reg
+ if we have no mandatory reload that could use it. */
+ if (avoid_return_reg)
+ {
+ int do_avoid = 0;
+ int regno = REGNO (avoid_return_reg);
+ int nregs
+ = HARD_REGNO_NREGS (regno, GET_MODE (avoid_return_reg));
+ int r;
+
+ for (r = regno; r < regno + nregs; r++)
+ if (spill_reg_order[r] >= 0)
+ for (j = 0; j < n_reloads; j++)
+ if (!reload_optional[j] && reload_reg_rtx[j] == 0
+ && (reload_in[j] != 0 || reload_out[j] != 0
+ || reload_secondary_p[j])
+ &&
+ TEST_HARD_REG_BIT (reg_class_contents[(int) reload_reg_class[j]], r))
+ do_avoid = 1;
+ if (!do_avoid)
+ avoid_return_reg = 0;
+ }
+#endif /* SMALL_REGISTER_CLASSES */
+
+#if 0 /* Not needed, now that we can always retry without inheritance. */
+ /* See if we have more mandatory reloads than spill regs.
+ If so, then we cannot risk optimizations that could prevent
+ reloads from sharing one spill register.
+
+ Since we will try finding a better register than reload_reg_rtx
+ unless it is equal to reload_in or reload_out, count such reloads. */
+
+ {
+ int tem = 0;
+#ifdef SMALL_REGISTER_CLASSES
+ int tem = (avoid_return_reg != 0);
+#endif
+ for (j = 0; j < n_reloads; j++)
+ if (! reload_optional[j]
+ && (reload_in[j] != 0 || reload_out[j] != 0 || reload_secondary_p[j])
+ && (reload_reg_rtx[j] == 0
+ || (! rtx_equal_p (reload_reg_rtx[j], reload_in[j])
+ && ! rtx_equal_p (reload_reg_rtx[j], reload_out[j]))))
+ tem++;
+ if (tem > n_spills)
+ must_reuse = 1;
+ }
+#endif
+
+#ifdef SMALL_REGISTER_CLASSES
+ /* Don't use the subroutine call return reg for a reload
+ if we are supposed to avoid it. */
+ if (avoid_return_reg)
+ {
+ int regno = REGNO (avoid_return_reg);
+ int nregs
+ = HARD_REGNO_NREGS (regno, GET_MODE (avoid_return_reg));
+ int r;
+
+ for (r = regno; r < regno + nregs; r++)
+ if (spill_reg_order[r] >= 0)
+ SET_HARD_REG_BIT (reload_reg_used, r);
+ }
+#endif /* SMALL_REGISTER_CLASSES */
+
+ /* In order to be certain of getting the registers we need,
+ we must sort the reloads into order of increasing register class.
+ Then our grabbing of reload registers will parallel the process
+ that provided the reload registers.
+
+ Also note whether any of the reloads wants a consecutive group of regs.
+ If so, record the maximum size of the group desired and what
+ register class contains all the groups needed by this insn. */
+
+ for (j = 0; j < n_reloads; j++)
+ {
+ reload_order[j] = j;
+ reload_spill_index[j] = -1;
+
+ reload_mode[j]
+ = (reload_inmode[j] == VOIDmode
+ || (GET_MODE_SIZE (reload_outmode[j])
+ > GET_MODE_SIZE (reload_inmode[j])))
+ ? reload_outmode[j] : reload_inmode[j];
+
+ reload_nregs[j] = CLASS_MAX_NREGS (reload_reg_class[j], reload_mode[j]);
+
+ if (reload_nregs[j] > 1)
+ {
+ max_group_size = MAX (reload_nregs[j], max_group_size);
+ group_class = reg_class_superunion[(int)reload_reg_class[j]][(int)group_class];
+ }
+
+ /* If we have already decided to use a certain register,
+ don't use it in another way. */
+ if (reload_reg_rtx[j])
+ mark_reload_reg_in_use (REGNO (reload_reg_rtx[j]), reload_opnum[j],
+ reload_when_needed[j], reload_mode[j]);
+ }
+
+ if (n_reloads > 1)
+ qsort (reload_order, n_reloads, sizeof (short), reload_reg_class_lower);
+
+ bcopy ((char *) reload_reg_rtx, (char *) save_reload_reg_rtx,
+ sizeof reload_reg_rtx);
+ bcopy (reload_inherited, save_reload_inherited, sizeof reload_inherited);
+ bcopy ((char *) reload_inheritance_insn,
+ (char *) save_reload_inheritance_insn,
+ sizeof reload_inheritance_insn);
+ bcopy ((char *) reload_override_in, (char *) save_reload_override_in,
+ sizeof reload_override_in);
+ bcopy ((char *) reload_spill_index, (char *) save_reload_spill_index,
+ sizeof reload_spill_index);
+ COPY_HARD_REG_SET (save_reload_reg_used, reload_reg_used);
+ COPY_HARD_REG_SET (save_reload_reg_used_at_all, reload_reg_used_at_all);
+ COPY_HARD_REG_SET (save_reload_reg_used_in_op_addr,
+ reload_reg_used_in_op_addr);
+
+ COPY_HARD_REG_SET (save_reload_reg_used_in_op_addr_reload,
+ reload_reg_used_in_op_addr_reload);
+
+ COPY_HARD_REG_SET (save_reload_reg_used_in_insn,
+ reload_reg_used_in_insn);
+ COPY_HARD_REG_SET (save_reload_reg_used_in_other_addr,
+ reload_reg_used_in_other_addr);
+
+ for (i = 0; i < reload_n_operands; i++)
+ {
+ COPY_HARD_REG_SET (save_reload_reg_used_in_output[i],
+ reload_reg_used_in_output[i]);
+ COPY_HARD_REG_SET (save_reload_reg_used_in_input[i],
+ reload_reg_used_in_input[i]);
+ COPY_HARD_REG_SET (save_reload_reg_used_in_input_addr[i],
+ reload_reg_used_in_input_addr[i]);
+ COPY_HARD_REG_SET (save_reload_reg_used_in_output_addr[i],
+ reload_reg_used_in_output_addr[i]);
+ }
+
+ /* If -O, try first with inheritance, then turning it off.
+ If not -O, don't do inheritance.
+ Using inheritance when not optimizing leads to paradoxes
+ with fp on the 68k: fp numbers (not NaNs) fail to be equal to themselves
+ because one side of the comparison might be inherited. */
+
+ for (inheritance = optimize > 0; inheritance >= 0; inheritance--)
+ {
+ /* Process the reloads in order of preference just found.
+ Beyond this point, subregs can be found in reload_reg_rtx.
+
+ This used to look for an existing reloaded home for all
+ of the reloads, and only then perform any new reloads.
+ But that could lose if the reloads were done out of reg-class order
+ because a later reload with a looser constraint might have an old
+ home in a register needed by an earlier reload with a tighter constraint.
+
+ To solve this, we make two passes over the reloads, in the order
+ described above. In the first pass we try to inherit a reload
+ from a previous insn. If there is a later reload that needs a
+ class that is a proper subset of the class being processed, we must
+ also allocate a spill register during the first pass.
+
+ Then make a second pass over the reloads to allocate any reloads
+ that haven't been given registers yet. */
+
+ CLEAR_HARD_REG_SET (reload_reg_used_for_inherit);
+
+ for (j = 0; j < n_reloads; j++)
+ {
+ register int r = reload_order[j];
+
+ /* Ignore reloads that got marked inoperative. */
+ if (reload_out[r] == 0 && reload_in[r] == 0 && ! reload_secondary_p[r])
+ continue;
+
+ /* If find_reloads chose a to use reload_in or reload_out as a reload
+ register, we don't need to chose one. Otherwise, try even if it found
+ one since we might save an insn if we find the value lying around. */
+ if (reload_in[r] != 0 && reload_reg_rtx[r] != 0
+ && (rtx_equal_p (reload_in[r], reload_reg_rtx[r])
+ || rtx_equal_p (reload_out[r], reload_reg_rtx[r])))
+ continue;
+
+#if 0 /* No longer needed for correct operation.
+ It might give better code, or might not; worth an experiment? */
+ /* If this is an optional reload, we can't inherit from earlier insns
+ until we are sure that any non-optional reloads have been allocated.
+ The following code takes advantage of the fact that optional reloads
+ are at the end of reload_order. */
+ if (reload_optional[r] != 0)
+ for (i = 0; i < j; i++)
+ if ((reload_out[reload_order[i]] != 0
+ || reload_in[reload_order[i]] != 0
+ || reload_secondary_p[reload_order[i]])
+ && ! reload_optional[reload_order[i]]
+ && reload_reg_rtx[reload_order[i]] == 0)
+ allocate_reload_reg (reload_order[i], insn, 0, inheritance);
+#endif
+
+ /* First see if this pseudo is already available as reloaded
+ for a previous insn. We cannot try to inherit for reloads
+ that are smaller than the maximum number of registers needed
+ for groups unless the register we would allocate cannot be used
+ for the groups.
+
+ We could check here to see if this is a secondary reload for
+ an object that is already in a register of the desired class.
+ This would avoid the need for the secondary reload register.
+ But this is complex because we can't easily determine what
+ objects might want to be loaded via this reload. So let a register
+ be allocated here. In `emit_reload_insns' we suppress one of the
+ loads in the case described above. */
+
+ if (inheritance)
+ {
+ register int regno = -1;
+ enum machine_mode mode;
+
+ if (reload_in[r] == 0)
+ ;
+ else if (GET_CODE (reload_in[r]) == REG)
+ {
+ regno = REGNO (reload_in[r]);
+ mode = GET_MODE (reload_in[r]);
+ }
+ else if (GET_CODE (reload_in_reg[r]) == REG)
+ {
+ regno = REGNO (reload_in_reg[r]);
+ mode = GET_MODE (reload_in_reg[r]);
+ }
+#if 0
+ /* This won't work, since REGNO can be a pseudo reg number.
+ Also, it takes much more hair to keep track of all the things
+ that can invalidate an inherited reload of part of a pseudoreg. */
+ else if (GET_CODE (reload_in[r]) == SUBREG
+ && GET_CODE (SUBREG_REG (reload_in[r])) == REG)
+ regno = REGNO (SUBREG_REG (reload_in[r])) + SUBREG_WORD (reload_in[r]);
+#endif
+
+ if (regno >= 0 && reg_last_reload_reg[regno] != 0)
+ {
+ i = spill_reg_order[REGNO (reg_last_reload_reg[regno])];
+
+ if (reg_reloaded_contents[i] == regno
+ && (GET_MODE_SIZE (GET_MODE (reg_last_reload_reg[regno]))
+ >= GET_MODE_SIZE (mode))
+ && HARD_REGNO_MODE_OK (spill_regs[i], reload_mode[r])
+ && TEST_HARD_REG_BIT (reg_class_contents[(int) reload_reg_class[r]],
+ spill_regs[i])
+ && (reload_nregs[r] == max_group_size
+ || ! TEST_HARD_REG_BIT (reg_class_contents[(int) group_class],
+ spill_regs[i]))
+ && reload_reg_free_p (spill_regs[i], reload_opnum[r],
+ reload_when_needed[r])
+ && reload_reg_free_before_p (spill_regs[i],
+ reload_opnum[r],
+ reload_when_needed[r]))
+ {
+ /* If a group is needed, verify that all the subsequent
+ registers still have their values intact. */
+ int nr
+ = HARD_REGNO_NREGS (spill_regs[i], reload_mode[r]);
+ int k;
+
+ for (k = 1; k < nr; k++)
+ if (reg_reloaded_contents[spill_reg_order[spill_regs[i] + k]]
+ != regno)
+ break;
+
+ if (k == nr)
+ {
+ int i1;
+
+ /* We found a register that contains the
+ value we need. If this register is the
+ same as an `earlyclobber' operand of the
+ current insn, just mark it as a place to
+ reload from since we can't use it as the
+ reload register itself. */
+
+ for (i1 = 0; i1 < n_earlyclobbers; i1++)
+ if (reg_overlap_mentioned_for_reload_p
+ (reg_last_reload_reg[regno],
+ reload_earlyclobbers[i1]))
+ break;
+
+ if (i1 != n_earlyclobbers
+ /* Don't really use the inherited spill reg
+ if we need it wider than we've got it. */
+ || (GET_MODE_SIZE (reload_mode[r])
+ > GET_MODE_SIZE (mode)))
+ reload_override_in[r] = reg_last_reload_reg[regno];
+ else
+ {
+ int k;
+ /* We can use this as a reload reg. */
+ /* Mark the register as in use for this part of
+ the insn. */
+ mark_reload_reg_in_use (spill_regs[i],
+ reload_opnum[r],
+ reload_when_needed[r],
+ reload_mode[r]);
+ reload_reg_rtx[r] = reg_last_reload_reg[regno];
+ reload_inherited[r] = 1;
+ reload_inheritance_insn[r]
+ = reg_reloaded_insn[i];
+ reload_spill_index[r] = i;
+ for (k = 0; k < nr; k++)
+ SET_HARD_REG_BIT (reload_reg_used_for_inherit,
+ spill_regs[i + k]);
+ }
+ }
+ }
+ }
+ }
+
+ /* Here's another way to see if the value is already lying around. */
+ if (inheritance
+ && reload_in[r] != 0
+ && ! reload_inherited[r]
+ && reload_out[r] == 0
+ && (CONSTANT_P (reload_in[r])
+ || GET_CODE (reload_in[r]) == PLUS
+ || GET_CODE (reload_in[r]) == REG
+ || GET_CODE (reload_in[r]) == MEM)
+ && (reload_nregs[r] == max_group_size
+ || ! reg_classes_intersect_p (reload_reg_class[r], group_class)))
+ {
+ register rtx equiv
+ = find_equiv_reg (reload_in[r], insn, reload_reg_class[r],
+ -1, NULL_PTR, 0, reload_mode[r]);
+ int regno;
+
+ if (equiv != 0)
+ {
+ if (GET_CODE (equiv) == REG)
+ regno = REGNO (equiv);
+ else if (GET_CODE (equiv) == SUBREG)
+ {
+ /* This must be a SUBREG of a hard register.
+ Make a new REG since this might be used in an
+ address and not all machines support SUBREGs
+ there. */
+ regno = REGNO (SUBREG_REG (equiv)) + SUBREG_WORD (equiv);
+ equiv = gen_rtx (REG, reload_mode[r], regno);
+ }
+ else
+ abort ();
+ }
+
+ /* If we found a spill reg, reject it unless it is free
+ and of the desired class. */
+ if (equiv != 0
+ && ((spill_reg_order[regno] >= 0
+ && ! reload_reg_free_before_p (regno, reload_opnum[r],
+ reload_when_needed[r]))
+ || ! TEST_HARD_REG_BIT (reg_class_contents[(int) reload_reg_class[r]],
+ regno)))
+ equiv = 0;
+
+ if (equiv != 0 && TEST_HARD_REG_BIT (reload_reg_used_at_all, regno))
+ equiv = 0;
+
+ if (equiv != 0 && ! HARD_REGNO_MODE_OK (regno, reload_mode[r]))
+ equiv = 0;
+
+ /* We found a register that contains the value we need.
+ If this register is the same as an `earlyclobber' operand
+ of the current insn, just mark it as a place to reload from
+ since we can't use it as the reload register itself. */
+
+ if (equiv != 0)
+ for (i = 0; i < n_earlyclobbers; i++)
+ if (reg_overlap_mentioned_for_reload_p (equiv,
+ reload_earlyclobbers[i]))
+ {
+ reload_override_in[r] = equiv;
+ equiv = 0;
+ break;
+ }
+
+ /* JRV: If the equiv register we have found is explicitly
+ clobbered in the current insn, mark but don't use, as above. */
+
+ if (equiv != 0 && regno_clobbered_p (regno, insn))
+ {
+ reload_override_in[r] = equiv;
+ equiv = 0;
+ }
+
+ /* If we found an equivalent reg, say no code need be generated
+ to load it, and use it as our reload reg. */
+ if (equiv != 0 && regno != HARD_FRAME_POINTER_REGNUM)
+ {
+ reload_reg_rtx[r] = equiv;
+ reload_inherited[r] = 1;
+ /* If it is a spill reg,
+ mark the spill reg as in use for this insn. */
+ i = spill_reg_order[regno];
+ if (i >= 0)
+ {
+ int nr = HARD_REGNO_NREGS (regno, reload_mode[r]);
+ int k;
+ mark_reload_reg_in_use (regno, reload_opnum[r],
+ reload_when_needed[r],
+ reload_mode[r]);
+ for (k = 0; k < nr; k++)
+ SET_HARD_REG_BIT (reload_reg_used_for_inherit, regno + k);
+ }
+ }
+ }
+
+ /* If we found a register to use already, or if this is an optional
+ reload, we are done. */
+ if (reload_reg_rtx[r] != 0 || reload_optional[r] != 0)
+ continue;
+
+#if 0 /* No longer needed for correct operation. Might or might not
+ give better code on the average. Want to experiment? */
+
+ /* See if there is a later reload that has a class different from our
+ class that intersects our class or that requires less register
+ than our reload. If so, we must allocate a register to this
+ reload now, since that reload might inherit a previous reload
+ and take the only available register in our class. Don't do this
+ for optional reloads since they will force all previous reloads
+ to be allocated. Also don't do this for reloads that have been
+ turned off. */
+
+ for (i = j + 1; i < n_reloads; i++)
+ {
+ int s = reload_order[i];
+
+ if ((reload_in[s] == 0 && reload_out[s] == 0
+ && ! reload_secondary_p[s])
+ || reload_optional[s])
+ continue;
+
+ if ((reload_reg_class[s] != reload_reg_class[r]
+ && reg_classes_intersect_p (reload_reg_class[r],
+ reload_reg_class[s]))
+ || reload_nregs[s] < reload_nregs[r])
+ break;
+ }
+
+ if (i == n_reloads)
+ continue;
+
+ allocate_reload_reg (r, insn, j == n_reloads - 1, inheritance);
+#endif
+ }
+
+ /* Now allocate reload registers for anything non-optional that
+ didn't get one yet. */
+ for (j = 0; j < n_reloads; j++)
+ {
+ register int r = reload_order[j];
+
+ /* Ignore reloads that got marked inoperative. */
+ if (reload_out[r] == 0 && reload_in[r] == 0 && ! reload_secondary_p[r])
+ continue;
+
+ /* Skip reloads that already have a register allocated or are
+ optional. */
+ if (reload_reg_rtx[r] != 0 || reload_optional[r])
+ continue;
+
+ if (! allocate_reload_reg (r, insn, j == n_reloads - 1, inheritance))
+ break;
+ }
+
+ /* If that loop got all the way, we have won. */
+ if (j == n_reloads)
+ break;
+
+ fail:
+ /* Loop around and try without any inheritance. */
+ /* First undo everything done by the failed attempt
+ to allocate with inheritance. */
+ bcopy ((char *) save_reload_reg_rtx, (char *) reload_reg_rtx,
+ sizeof reload_reg_rtx);
+ bcopy ((char *) save_reload_inherited, (char *) reload_inherited,
+ sizeof reload_inherited);
+ bcopy ((char *) save_reload_inheritance_insn,
+ (char *) reload_inheritance_insn,
+ sizeof reload_inheritance_insn);
+ bcopy ((char *) save_reload_override_in, (char *) reload_override_in,
+ sizeof reload_override_in);
+ bcopy ((char *) save_reload_spill_index, (char *) reload_spill_index,
+ sizeof reload_spill_index);
+ COPY_HARD_REG_SET (reload_reg_used, save_reload_reg_used);
+ COPY_HARD_REG_SET (reload_reg_used_at_all, save_reload_reg_used_at_all);
+ COPY_HARD_REG_SET (reload_reg_used_in_op_addr,
+ save_reload_reg_used_in_op_addr);
+ COPY_HARD_REG_SET (reload_reg_used_in_op_addr_reload,
+ save_reload_reg_used_in_op_addr_reload);
+ COPY_HARD_REG_SET (reload_reg_used_in_insn,
+ save_reload_reg_used_in_insn);
+ COPY_HARD_REG_SET (reload_reg_used_in_other_addr,
+ save_reload_reg_used_in_other_addr);
+
+ for (i = 0; i < reload_n_operands; i++)
+ {
+ COPY_HARD_REG_SET (reload_reg_used_in_input[i],
+ save_reload_reg_used_in_input[i]);
+ COPY_HARD_REG_SET (reload_reg_used_in_output[i],
+ save_reload_reg_used_in_output[i]);
+ COPY_HARD_REG_SET (reload_reg_used_in_input_addr[i],
+ save_reload_reg_used_in_input_addr[i]);
+ COPY_HARD_REG_SET (reload_reg_used_in_output_addr[i],
+ save_reload_reg_used_in_output_addr[i]);
+ }
+ }
+
+ /* If we thought we could inherit a reload, because it seemed that
+ nothing else wanted the same reload register earlier in the insn,
+ verify that assumption, now that all reloads have been assigned. */
+
+ for (j = 0; j < n_reloads; j++)
+ {
+ register int r = reload_order[j];
+
+ if (reload_inherited[r] && reload_reg_rtx[r] != 0
+ && ! reload_reg_free_before_p (true_regnum (reload_reg_rtx[r]),
+ reload_opnum[r],
+ reload_when_needed[r]))
+ reload_inherited[r] = 0;
+
+ /* If we found a better place to reload from,
+ validate it in the same fashion, if it is a reload reg. */
+ if (reload_override_in[r]
+ && (GET_CODE (reload_override_in[r]) == REG
+ || GET_CODE (reload_override_in[r]) == SUBREG))
+ {
+ int regno = true_regnum (reload_override_in[r]);
+ if (spill_reg_order[regno] >= 0
+ && ! reload_reg_free_before_p (regno, reload_opnum[r],
+ reload_when_needed[r]))
+ reload_override_in[r] = 0;
+ }
+ }
+
+ /* Now that reload_override_in is known valid,
+ actually override reload_in. */
+ for (j = 0; j < n_reloads; j++)
+ if (reload_override_in[j])
+ reload_in[j] = reload_override_in[j];
+
+ /* If this reload won't be done because it has been cancelled or is
+ optional and not inherited, clear reload_reg_rtx so other
+ routines (such as subst_reloads) don't get confused. */
+ for (j = 0; j < n_reloads; j++)
+ if (reload_reg_rtx[j] != 0
+ && ((reload_optional[j] && ! reload_inherited[j])
+ || (reload_in[j] == 0 && reload_out[j] == 0
+ && ! reload_secondary_p[j])))
+ {
+ int regno = true_regnum (reload_reg_rtx[j]);
+
+ if (spill_reg_order[regno] >= 0)
+ clear_reload_reg_in_use (regno, reload_opnum[j],
+ reload_when_needed[j], reload_mode[j]);
+ reload_reg_rtx[j] = 0;
+ }
+
+ /* Record which pseudos and which spill regs have output reloads. */
+ for (j = 0; j < n_reloads; j++)
+ {
+ register int r = reload_order[j];
+
+ i = reload_spill_index[r];
+
+ /* I is nonneg if this reload used one of the spill regs.
+ If reload_reg_rtx[r] is 0, this is an optional reload
+ that we opted to ignore. */
+ if (reload_out[r] != 0 && GET_CODE (reload_out[r]) == REG
+ && reload_reg_rtx[r] != 0)
+ {
+ register int nregno = REGNO (reload_out[r]);
+ int nr = 1;
+
+ if (nregno < FIRST_PSEUDO_REGISTER)
+ nr = HARD_REGNO_NREGS (nregno, reload_mode[r]);
+
+ while (--nr >= 0)
+ reg_has_output_reload[nregno + nr] = 1;
+
+ if (i >= 0)
+ {
+ nr = HARD_REGNO_NREGS (spill_regs[i], reload_mode[r]);
+ while (--nr >= 0)
+ SET_HARD_REG_BIT (reg_is_output_reload, spill_regs[i] + nr);
+ }
+
+ if (reload_when_needed[r] != RELOAD_OTHER
+ && reload_when_needed[r] != RELOAD_FOR_OUTPUT
+ && reload_when_needed[r] != RELOAD_FOR_INSN)
+ abort ();
+ }
+ }
+}
+
+/* If SMALL_REGISTER_CLASSES are defined, we may not have merged two
+ reloads of the same item for fear that we might not have enough reload
+ registers. However, normally they will get the same reload register
+ and hence actually need not be loaded twice.
+
+ Here we check for the most common case of this phenomenon: when we have
+ a number of reloads for the same object, each of which were allocated
+ the same reload_reg_rtx, that reload_reg_rtx is not used for any other
+ reload, and is not modified in the insn itself. If we find such,
+ merge all the reloads and set the resulting reload to RELOAD_OTHER.
+ This will not increase the number of spill registers needed and will
+ prevent redundant code. */
+
+#ifdef SMALL_REGISTER_CLASSES
+
+static void
+merge_assigned_reloads (insn)
+ rtx insn;
+{
+ int i, j;
+
+ /* Scan all the reloads looking for ones that only load values and
+ are not already RELOAD_OTHER and ones whose reload_reg_rtx are
+ assigned and not modified by INSN. */
+
+ for (i = 0; i < n_reloads; i++)
+ {
+ if (reload_in[i] == 0 || reload_when_needed[i] == RELOAD_OTHER
+ || reload_out[i] != 0 || reload_reg_rtx[i] == 0
+ || reg_set_p (reload_reg_rtx[i], insn))
+ continue;
+
+ /* Look at all other reloads. Ensure that the only use of this
+ reload_reg_rtx is in a reload that just loads the same value
+ as we do. Note that any secondary reloads must be of the identical
+ class since the values, modes, and result registers are the
+ same, so we need not do anything with any secondary reloads. */
+
+ for (j = 0; j < n_reloads; j++)
+ {
+ if (i == j || reload_reg_rtx[j] == 0
+ || ! reg_overlap_mentioned_p (reload_reg_rtx[j],
+ reload_reg_rtx[i]))
+ continue;
+
+ /* If the reload regs aren't exactly the same (e.g, different modes)
+ or if the values are different, we can't merge anything with this
+ reload register. */
+
+ if (! rtx_equal_p (reload_reg_rtx[i], reload_reg_rtx[j])
+ || reload_out[j] != 0 || reload_in[j] == 0
+ || ! rtx_equal_p (reload_in[i], reload_in[j]))
+ break;
+ }
+
+ /* If all is OK, merge the reloads. Only set this to RELOAD_OTHER if
+ we, in fact, found any matching reloads. */
+
+ if (j == n_reloads)
+ {
+ for (j = 0; j < n_reloads; j++)
+ if (i != j && reload_reg_rtx[j] != 0
+ && rtx_equal_p (reload_reg_rtx[i], reload_reg_rtx[j]))
+ {
+ reload_when_needed[i] = RELOAD_OTHER;
+ reload_in[j] = 0;
+ transfer_replacements (i, j);
+ }
+
+ /* If this is now RELOAD_OTHER, look for any reloads that load
+ parts of this operand and set them to RELOAD_FOR_OTHER_ADDRESS
+ if they were for inputs, RELOAD_OTHER for outputs. Note that
+ this test is equivalent to looking for reloads for this operand
+ number. */
+
+ if (reload_when_needed[i] == RELOAD_OTHER)
+ for (j = 0; j < n_reloads; j++)
+ if (reload_in[j] != 0
+ && reload_when_needed[i] != RELOAD_OTHER
+ && reg_overlap_mentioned_for_reload_p (reload_in[j],
+ reload_in[i]))
+ reload_when_needed[j]
+ = reload_when_needed[i] == RELOAD_FOR_INPUT_ADDRESS
+ ? RELOAD_FOR_OTHER_ADDRESS : RELOAD_OTHER;
+ }
+ }
+}
+#endif /* SMALL_RELOAD_CLASSES */
+
+/* Output insns to reload values in and out of the chosen reload regs. */
+
+static void
+emit_reload_insns (insn)
+ rtx insn;
+{
+ register int j;
+ rtx input_reload_insns[MAX_RECOG_OPERANDS];
+ rtx other_input_address_reload_insns = 0;
+ rtx other_input_reload_insns = 0;
+ rtx input_address_reload_insns[MAX_RECOG_OPERANDS];
+ rtx output_reload_insns[MAX_RECOG_OPERANDS];
+ rtx output_address_reload_insns[MAX_RECOG_OPERANDS];
+ rtx operand_reload_insns = 0;
+ rtx other_operand_reload_insns = 0;
+ rtx following_insn = NEXT_INSN (insn);
+ rtx before_insn = insn;
+ int special;
+ /* Values to be put in spill_reg_store are put here first. */
+ rtx new_spill_reg_store[FIRST_PSEUDO_REGISTER];
+
+ for (j = 0; j < reload_n_operands; j++)
+ input_reload_insns[j] = input_address_reload_insns[j]
+ = output_reload_insns[j] = output_address_reload_insns[j] = 0;
+
+ /* Now output the instructions to copy the data into and out of the
+ reload registers. Do these in the order that the reloads were reported,
+ since reloads of base and index registers precede reloads of operands
+ and the operands may need the base and index registers reloaded. */
+
+ for (j = 0; j < n_reloads; j++)
+ {
+ register rtx old;
+ rtx oldequiv_reg = 0;
+ rtx store_insn = 0;
+
+ old = reload_in[j];
+ if (old != 0 && ! reload_inherited[j]
+ && ! rtx_equal_p (reload_reg_rtx[j], old)
+ && reload_reg_rtx[j] != 0)
+ {
+ register rtx reloadreg = reload_reg_rtx[j];
+ rtx oldequiv = 0;
+ enum machine_mode mode;
+ rtx *where;
+
+ /* Determine the mode to reload in.
+ This is very tricky because we have three to choose from.
+ There is the mode the insn operand wants (reload_inmode[J]).
+ There is the mode of the reload register RELOADREG.
+ There is the intrinsic mode of the operand, which we could find
+ by stripping some SUBREGs.
+ It turns out that RELOADREG's mode is irrelevant:
+ we can change that arbitrarily.
+
+ Consider (SUBREG:SI foo:QI) as an operand that must be SImode;
+ then the reload reg may not support QImode moves, so use SImode.
+ If foo is in memory due to spilling a pseudo reg, this is safe,
+ because the QImode value is in the least significant part of a
+ slot big enough for a SImode. If foo is some other sort of
+ memory reference, then it is impossible to reload this case,
+ so previous passes had better make sure this never happens.
+
+ Then consider a one-word union which has SImode and one of its
+ members is a float, being fetched as (SUBREG:SF union:SI).
+ We must fetch that as SFmode because we could be loading into
+ a float-only register. In this case OLD's mode is correct.
+
+ Consider an immediate integer: it has VOIDmode. Here we need
+ to get a mode from something else.
+
+ In some cases, there is a fourth mode, the operand's
+ containing mode. If the insn specifies a containing mode for
+ this operand, it overrides all others.
+
+ I am not sure whether the algorithm here is always right,
+ but it does the right things in those cases. */
+
+ mode = GET_MODE (old);
+ if (mode == VOIDmode)
+ mode = reload_inmode[j];
+
+#ifdef SECONDARY_INPUT_RELOAD_CLASS
+ /* If we need a secondary register for this operation, see if
+ the value is already in a register in that class. Don't
+ do this if the secondary register will be used as a scratch
+ register. */
+
+ if (reload_secondary_in_reload[j] >= 0
+ && reload_secondary_in_icode[j] == CODE_FOR_nothing
+ && optimize)
+ oldequiv
+ = find_equiv_reg (old, insn,
+ reload_reg_class[reload_secondary_in_reload[j]],
+ -1, NULL_PTR, 0, mode);
+#endif
+
+ /* If reloading from memory, see if there is a register
+ that already holds the same value. If so, reload from there.
+ We can pass 0 as the reload_reg_p argument because
+ any other reload has either already been emitted,
+ in which case find_equiv_reg will see the reload-insn,
+ or has yet to be emitted, in which case it doesn't matter
+ because we will use this equiv reg right away. */
+
+ if (oldequiv == 0 && optimize
+ && (GET_CODE (old) == MEM
+ || (GET_CODE (old) == REG
+ && REGNO (old) >= FIRST_PSEUDO_REGISTER
+ && reg_renumber[REGNO (old)] < 0)))
+ oldequiv = find_equiv_reg (old, insn, ALL_REGS,
+ -1, NULL_PTR, 0, mode);
+
+ if (oldequiv)
+ {
+ int regno = true_regnum (oldequiv);
+
+ /* If OLDEQUIV is a spill register, don't use it for this
+ if any other reload needs it at an earlier stage of this insn
+ or at this stage. */
+ if (spill_reg_order[regno] >= 0
+ && (! reload_reg_free_p (regno, reload_opnum[j],
+ reload_when_needed[j])
+ || ! reload_reg_free_before_p (regno, reload_opnum[j],
+ reload_when_needed[j])))
+ oldequiv = 0;
+
+ /* If OLDEQUIV is not a spill register,
+ don't use it if any other reload wants it. */
+ if (spill_reg_order[regno] < 0)
+ {
+ int k;
+ for (k = 0; k < n_reloads; k++)
+ if (reload_reg_rtx[k] != 0 && k != j
+ && reg_overlap_mentioned_for_reload_p (reload_reg_rtx[k],
+ oldequiv))
+ {
+ oldequiv = 0;
+ break;
+ }
+ }
+
+ /* If it is no cheaper to copy from OLDEQUIV into the
+ reload register than it would be to move from memory,
+ don't use it. Likewise, if we need a secondary register
+ or memory. */
+
+ if (oldequiv != 0
+ && ((REGNO_REG_CLASS (regno) != reload_reg_class[j]
+ && (REGISTER_MOVE_COST (REGNO_REG_CLASS (regno),
+ reload_reg_class[j])
+ >= MEMORY_MOVE_COST (mode)))
+#ifdef SECONDARY_INPUT_RELOAD_CLASS
+ || (SECONDARY_INPUT_RELOAD_CLASS (reload_reg_class[j],
+ mode, oldequiv)
+ != NO_REGS)
+#endif
+#ifdef SECONDARY_MEMORY_NEEDED
+ || SECONDARY_MEMORY_NEEDED (reload_reg_class[j],
+ REGNO_REG_CLASS (regno),
+ mode)
+#endif
+ ))
+ oldequiv = 0;
+ }
+
+ if (oldequiv == 0)
+ oldequiv = old;
+ else if (GET_CODE (oldequiv) == REG)
+ oldequiv_reg = oldequiv;
+ else if (GET_CODE (oldequiv) == SUBREG)
+ oldequiv_reg = SUBREG_REG (oldequiv);
+
+ /* If we are reloading from a register that was recently stored in
+ with an output-reload, see if we can prove there was
+ actually no need to store the old value in it. */
+
+ if (optimize && GET_CODE (oldequiv) == REG
+ && REGNO (oldequiv) < FIRST_PSEUDO_REGISTER
+ && spill_reg_order[REGNO (oldequiv)] >= 0
+ && spill_reg_store[spill_reg_order[REGNO (oldequiv)]] != 0
+ && find_reg_note (insn, REG_DEAD, reload_in[j])
+ /* This is unsafe if operand occurs more than once in current
+ insn. Perhaps some occurrences weren't reloaded. */
+ && count_occurrences (PATTERN (insn), reload_in[j]) == 1)
+ delete_output_reload
+ (insn, j, spill_reg_store[spill_reg_order[REGNO (oldequiv)]]);
+
+ /* Encapsulate both RELOADREG and OLDEQUIV into that mode,
+ then load RELOADREG from OLDEQUIV. Note that we cannot use
+ gen_lowpart_common since it can do the wrong thing when
+ RELOADREG has a multi-word mode. Note that RELOADREG
+ must always be a REG here. */
+
+ if (GET_MODE (reloadreg) != mode)
+ reloadreg = gen_rtx (REG, mode, REGNO (reloadreg));
+ while (GET_CODE (oldequiv) == SUBREG && GET_MODE (oldequiv) != mode)
+ oldequiv = SUBREG_REG (oldequiv);
+ if (GET_MODE (oldequiv) != VOIDmode
+ && mode != GET_MODE (oldequiv))
+ oldequiv = gen_rtx (SUBREG, mode, oldequiv, 0);
+
+ /* Switch to the right place to emit the reload insns. */
+ switch (reload_when_needed[j])
+ {
+ case RELOAD_OTHER:
+ where = &other_input_reload_insns;
+ break;
+ case RELOAD_FOR_INPUT:
+ where = &input_reload_insns[reload_opnum[j]];
+ break;
+ case RELOAD_FOR_INPUT_ADDRESS:
+ where = &input_address_reload_insns[reload_opnum[j]];
+ break;
+ case RELOAD_FOR_OUTPUT_ADDRESS:
+ where = &output_address_reload_insns[reload_opnum[j]];
+ break;
+ case RELOAD_FOR_OPERAND_ADDRESS:
+ where = &operand_reload_insns;
+ break;
+ case RELOAD_FOR_OPADDR_ADDR:
+ where = &other_operand_reload_insns;
+ break;
+ case RELOAD_FOR_OTHER_ADDRESS:
+ where = &other_input_address_reload_insns;
+ break;
+ default:
+ abort ();
+ }
+
+ push_to_sequence (*where);
+ special = 0;
+
+ /* Auto-increment addresses must be reloaded in a special way. */
+ if (GET_CODE (oldequiv) == POST_INC
+ || GET_CODE (oldequiv) == POST_DEC
+ || GET_CODE (oldequiv) == PRE_INC
+ || GET_CODE (oldequiv) == PRE_DEC)
+ {
+ /* We are not going to bother supporting the case where a
+ incremented register can't be copied directly from
+ OLDEQUIV since this seems highly unlikely. */
+ if (reload_secondary_in_reload[j] >= 0)
+ abort ();
+ /* Prevent normal processing of this reload. */
+ special = 1;
+ /* Output a special code sequence for this case. */
+ inc_for_reload (reloadreg, oldequiv, reload_inc[j]);
+ }
+
+ /* If we are reloading a pseudo-register that was set by the previous
+ insn, see if we can get rid of that pseudo-register entirely
+ by redirecting the previous insn into our reload register. */
+
+ else if (optimize && GET_CODE (old) == REG
+ && REGNO (old) >= FIRST_PSEUDO_REGISTER
+ && dead_or_set_p (insn, old)
+ /* This is unsafe if some other reload
+ uses the same reg first. */
+ && reload_reg_free_before_p (REGNO (reloadreg),
+ reload_opnum[j],
+ reload_when_needed[j]))
+ {
+ rtx temp = PREV_INSN (insn);
+ while (temp && GET_CODE (temp) == NOTE)
+ temp = PREV_INSN (temp);
+ if (temp
+ && GET_CODE (temp) == INSN
+ && GET_CODE (PATTERN (temp)) == SET
+ && SET_DEST (PATTERN (temp)) == old
+ /* Make sure we can access insn_operand_constraint. */
+ && asm_noperands (PATTERN (temp)) < 0
+ /* This is unsafe if prev insn rejects our reload reg. */
+ && constraint_accepts_reg_p (insn_operand_constraint[recog_memoized (temp)][0],
+ reloadreg)
+ /* This is unsafe if operand occurs more than once in current
+ insn. Perhaps some occurrences aren't reloaded. */
+ && count_occurrences (PATTERN (insn), old) == 1
+ /* Don't risk splitting a matching pair of operands. */
+ && ! reg_mentioned_p (old, SET_SRC (PATTERN (temp))))
+ {
+ /* Store into the reload register instead of the pseudo. */
+ SET_DEST (PATTERN (temp)) = reloadreg;
+ /* If these are the only uses of the pseudo reg,
+ pretend for GDB it lives in the reload reg we used. */
+ if (reg_n_deaths[REGNO (old)] == 1
+ && reg_n_sets[REGNO (old)] == 1)
+ {
+ reg_renumber[REGNO (old)] = REGNO (reload_reg_rtx[j]);
+ alter_reg (REGNO (old), -1);
+ }
+ special = 1;
+ }
+ }
+
+ /* We can't do that, so output an insn to load RELOADREG. */
+
+ if (! special)
+ {
+#ifdef SECONDARY_INPUT_RELOAD_CLASS
+ rtx second_reload_reg = 0;
+ enum insn_code icode;
+
+ /* If we have a secondary reload, pick up the secondary register
+ and icode, if any. If OLDEQUIV and OLD are different or
+ if this is an in-out reload, recompute whether or not we
+ still need a secondary register and what the icode should
+ be. If we still need a secondary register and the class or
+ icode is different, go back to reloading from OLD if using
+ OLDEQUIV means that we got the wrong type of register. We
+ cannot have different class or icode due to an in-out reload
+ because we don't make such reloads when both the input and
+ output need secondary reload registers. */
+
+ if (reload_secondary_in_reload[j] >= 0)
+ {
+ int secondary_reload = reload_secondary_in_reload[j];
+ rtx real_oldequiv = oldequiv;
+ rtx real_old = old;
+
+ /* If OLDEQUIV is a pseudo with a MEM, get the real MEM
+ and similarly for OLD.
+ See comments in get_secondary_reload in reload.c. */
+ if (GET_CODE (oldequiv) == REG
+ && REGNO (oldequiv) >= FIRST_PSEUDO_REGISTER
+ && reg_equiv_mem[REGNO (oldequiv)] != 0)
+ real_oldequiv = reg_equiv_mem[REGNO (oldequiv)];
+
+ if (GET_CODE (old) == REG
+ && REGNO (old) >= FIRST_PSEUDO_REGISTER
+ && reg_equiv_mem[REGNO (old)] != 0)
+ real_old = reg_equiv_mem[REGNO (old)];
+
+ second_reload_reg = reload_reg_rtx[secondary_reload];
+ icode = reload_secondary_in_icode[j];
+
+ if ((old != oldequiv && ! rtx_equal_p (old, oldequiv))
+ || (reload_in[j] != 0 && reload_out[j] != 0))
+ {
+ enum reg_class new_class
+ = SECONDARY_INPUT_RELOAD_CLASS (reload_reg_class[j],
+ mode, real_oldequiv);
+
+ if (new_class == NO_REGS)
+ second_reload_reg = 0;
+ else
+ {
+ enum insn_code new_icode;
+ enum machine_mode new_mode;
+
+ if (! TEST_HARD_REG_BIT (reg_class_contents[(int) new_class],
+ REGNO (second_reload_reg)))
+ oldequiv = old, real_oldequiv = real_old;
+ else
+ {
+ new_icode = reload_in_optab[(int) mode];
+ if (new_icode != CODE_FOR_nothing
+ && ((insn_operand_predicate[(int) new_icode][0]
+ && ! ((*insn_operand_predicate[(int) new_icode][0])
+ (reloadreg, mode)))
+ || (insn_operand_predicate[(int) new_icode][1]
+ && ! ((*insn_operand_predicate[(int) new_icode][1])
+ (real_oldequiv, mode)))))
+ new_icode = CODE_FOR_nothing;
+
+ if (new_icode == CODE_FOR_nothing)
+ new_mode = mode;
+ else
+ new_mode = insn_operand_mode[(int) new_icode][2];
+
+ if (GET_MODE (second_reload_reg) != new_mode)
+ {
+ if (!HARD_REGNO_MODE_OK (REGNO (second_reload_reg),
+ new_mode))
+ oldequiv = old, real_oldequiv = real_old;
+ else
+ second_reload_reg
+ = gen_rtx (REG, new_mode,
+ REGNO (second_reload_reg));
+ }
+ }
+ }
+ }
+
+ /* If we still need a secondary reload register, check
+ to see if it is being used as a scratch or intermediate
+ register and generate code appropriately. If we need
+ a scratch register, use REAL_OLDEQUIV since the form of
+ the insn may depend on the actual address if it is
+ a MEM. */
+
+ if (second_reload_reg)
+ {
+ if (icode != CODE_FOR_nothing)
+ {
+ emit_insn (GEN_FCN (icode) (reloadreg, real_oldequiv,
+ second_reload_reg));
+ special = 1;
+ }
+ else
+ {
+ /* See if we need a scratch register to load the
+ intermediate register (a tertiary reload). */
+ enum insn_code tertiary_icode
+ = reload_secondary_in_icode[secondary_reload];
+
+ if (tertiary_icode != CODE_FOR_nothing)
+ {
+ rtx third_reload_reg
+ = reload_reg_rtx[reload_secondary_in_reload[secondary_reload]];
+
+ emit_insn ((GEN_FCN (tertiary_icode)
+ (second_reload_reg, real_oldequiv,
+ third_reload_reg)));
+ }
+ else
+ gen_input_reload (second_reload_reg, oldequiv,
+ reload_opnum[j],
+ reload_when_needed[j]);
+
+ oldequiv = second_reload_reg;
+ }
+ }
+ }
+#endif
+
+ if (! special && ! rtx_equal_p (reloadreg, oldequiv))
+ gen_input_reload (reloadreg, oldequiv, reload_opnum[j],
+ reload_when_needed[j]);
+
+#if defined(SECONDARY_INPUT_RELOAD_CLASS) && defined(PRESERVE_DEATH_INFO_REGNO_P)
+ /* We may have to make a REG_DEAD note for the secondary reload
+ register in the insns we just made. Find the last insn that
+ mentioned the register. */
+ if (! special && second_reload_reg
+ && PRESERVE_DEATH_INFO_REGNO_P (REGNO (second_reload_reg)))
+ {
+ rtx prev;
+
+ for (prev = get_last_insn (); prev;
+ prev = PREV_INSN (prev))
+ if (GET_RTX_CLASS (GET_CODE (prev) == 'i')
+ && reg_overlap_mentioned_for_reload_p (second_reload_reg,
+ PATTERN (prev)))
+ {
+ REG_NOTES (prev) = gen_rtx (EXPR_LIST, REG_DEAD,
+ second_reload_reg,
+ REG_NOTES (prev));
+ break;
+ }
+ }
+#endif
+ }
+
+ /* End this sequence. */
+ *where = get_insns ();
+ end_sequence ();
+ }
+
+ /* Add a note saying the input reload reg
+ dies in this insn, if anyone cares. */
+#ifdef PRESERVE_DEATH_INFO_REGNO_P
+ if (old != 0
+ && reload_reg_rtx[j] != old
+ && reload_reg_rtx[j] != 0
+ && reload_out[j] == 0
+ && ! reload_inherited[j]
+ && PRESERVE_DEATH_INFO_REGNO_P (REGNO (reload_reg_rtx[j])))
+ {
+ register rtx reloadreg = reload_reg_rtx[j];
+
+#if 0
+ /* We can't abort here because we need to support this for sched.c.
+ It's not terrible to miss a REG_DEAD note, but we should try
+ to figure out how to do this correctly. */
+ /* The code below is incorrect for address-only reloads. */
+ if (reload_when_needed[j] != RELOAD_OTHER
+ && reload_when_needed[j] != RELOAD_FOR_INPUT)
+ abort ();
+#endif
+
+ /* Add a death note to this insn, for an input reload. */
+
+ if ((reload_when_needed[j] == RELOAD_OTHER
+ || reload_when_needed[j] == RELOAD_FOR_INPUT)
+ && ! dead_or_set_p (insn, reloadreg))
+ REG_NOTES (insn)
+ = gen_rtx (EXPR_LIST, REG_DEAD,
+ reloadreg, REG_NOTES (insn));
+ }
+
+ /* When we inherit a reload, the last marked death of the reload reg
+ may no longer really be a death. */
+ if (reload_reg_rtx[j] != 0
+ && PRESERVE_DEATH_INFO_REGNO_P (REGNO (reload_reg_rtx[j]))
+ && reload_inherited[j])
+ {
+ /* Handle inheriting an output reload.
+ Remove the death note from the output reload insn. */
+ if (reload_spill_index[j] >= 0
+ && GET_CODE (reload_in[j]) == REG
+ && spill_reg_store[reload_spill_index[j]] != 0
+ && find_regno_note (spill_reg_store[reload_spill_index[j]],
+ REG_DEAD, REGNO (reload_reg_rtx[j])))
+ remove_death (REGNO (reload_reg_rtx[j]),
+ spill_reg_store[reload_spill_index[j]]);
+ /* Likewise for input reloads that were inherited. */
+ else if (reload_spill_index[j] >= 0
+ && GET_CODE (reload_in[j]) == REG
+ && spill_reg_store[reload_spill_index[j]] == 0
+ && reload_inheritance_insn[j] != 0
+ && find_regno_note (reload_inheritance_insn[j], REG_DEAD,
+ REGNO (reload_reg_rtx[j])))
+ remove_death (REGNO (reload_reg_rtx[j]),
+ reload_inheritance_insn[j]);
+ else
+ {
+ rtx prev;
+
+ /* We got this register from find_equiv_reg.
+ Search back for its last death note and get rid of it.
+ But don't search back too far.
+ Don't go past a place where this reg is set,
+ since a death note before that remains valid. */
+ for (prev = PREV_INSN (insn);
+ prev && GET_CODE (prev) != CODE_LABEL;
+ prev = PREV_INSN (prev))
+ if (GET_RTX_CLASS (GET_CODE (prev)) == 'i'
+ && dead_or_set_p (prev, reload_reg_rtx[j]))
+ {
+ if (find_regno_note (prev, REG_DEAD,
+ REGNO (reload_reg_rtx[j])))
+ remove_death (REGNO (reload_reg_rtx[j]), prev);
+ break;
+ }
+ }
+ }
+
+ /* We might have used find_equiv_reg above to choose an alternate
+ place from which to reload. If so, and it died, we need to remove
+ that death and move it to one of the insns we just made. */
+
+ if (oldequiv_reg != 0
+ && PRESERVE_DEATH_INFO_REGNO_P (true_regnum (oldequiv_reg)))
+ {
+ rtx prev, prev1;
+
+ for (prev = PREV_INSN (insn); prev && GET_CODE (prev) != CODE_LABEL;
+ prev = PREV_INSN (prev))
+ if (GET_RTX_CLASS (GET_CODE (prev)) == 'i'
+ && dead_or_set_p (prev, oldequiv_reg))
+ {
+ if (find_regno_note (prev, REG_DEAD, REGNO (oldequiv_reg)))
+ {
+ for (prev1 = this_reload_insn;
+ prev1; prev1 = PREV_INSN (prev1))
+ if (GET_RTX_CLASS (GET_CODE (prev1) == 'i')
+ && reg_overlap_mentioned_for_reload_p (oldequiv_reg,
+ PATTERN (prev1)))
+ {
+ REG_NOTES (prev1) = gen_rtx (EXPR_LIST, REG_DEAD,
+ oldequiv_reg,
+ REG_NOTES (prev1));
+ break;
+ }
+ remove_death (REGNO (oldequiv_reg), prev);
+ }
+ break;
+ }
+ }
+#endif
+
+ /* If we are reloading a register that was recently stored in with an
+ output-reload, see if we can prove there was
+ actually no need to store the old value in it. */
+
+ if (optimize && reload_inherited[j] && reload_spill_index[j] >= 0
+ && reload_in[j] != 0
+ && GET_CODE (reload_in[j]) == REG
+#if 0
+ /* There doesn't seem to be any reason to restrict this to pseudos
+ and doing so loses in the case where we are copying from a
+ register of the wrong class. */
+ && REGNO (reload_in[j]) >= FIRST_PSEUDO_REGISTER
+#endif
+ && spill_reg_store[reload_spill_index[j]] != 0
+ /* This is unsafe if some other reload uses the same reg first. */
+ && reload_reg_free_before_p (spill_regs[reload_spill_index[j]],
+ reload_opnum[j], reload_when_needed[j])
+ && dead_or_set_p (insn, reload_in[j])
+ /* This is unsafe if operand occurs more than once in current
+ insn. Perhaps some occurrences weren't reloaded. */
+ && count_occurrences (PATTERN (insn), reload_in[j]) == 1)
+ delete_output_reload (insn, j,
+ spill_reg_store[reload_spill_index[j]]);
+
+ /* Input-reloading is done. Now do output-reloading,
+ storing the value from the reload-register after the main insn
+ if reload_out[j] is nonzero.
+
+ ??? At some point we need to support handling output reloads of
+ JUMP_INSNs or insns that set cc0. */
+ old = reload_out[j];
+ if (old != 0
+ && reload_reg_rtx[j] != old
+ && reload_reg_rtx[j] != 0)
+ {
+ register rtx reloadreg = reload_reg_rtx[j];
+ register rtx second_reloadreg = 0;
+ rtx note, p;
+ enum machine_mode mode;
+ int special = 0;
+
+ /* An output operand that dies right away does need a reload,
+ but need not be copied from it. Show the new location in the
+ REG_UNUSED note. */
+ if ((GET_CODE (old) == REG || GET_CODE (old) == SCRATCH)
+ && (note = find_reg_note (insn, REG_UNUSED, old)) != 0)
+ {
+ XEXP (note, 0) = reload_reg_rtx[j];
+ continue;
+ }
+ else if (GET_CODE (old) == SCRATCH)
+ /* If we aren't optimizing, there won't be a REG_UNUSED note,
+ but we don't want to make an output reload. */
+ continue;
+
+#if 0
+ /* Strip off of OLD any size-increasing SUBREGs such as
+ (SUBREG:SI foo:QI 0). */
+
+ while (GET_CODE (old) == SUBREG && SUBREG_WORD (old) == 0
+ && (GET_MODE_SIZE (GET_MODE (old))
+ > GET_MODE_SIZE (GET_MODE (SUBREG_REG (old)))))
+ old = SUBREG_REG (old);
+#endif
+
+ /* If is a JUMP_INSN, we can't support output reloads yet. */
+ if (GET_CODE (insn) == JUMP_INSN)
+ abort ();
+
+ push_to_sequence (output_reload_insns[reload_opnum[j]]);
+
+ /* Determine the mode to reload in.
+ See comments above (for input reloading). */
+
+ mode = GET_MODE (old);
+ if (mode == VOIDmode)
+ {
+ /* VOIDmode should never happen for an output. */
+ if (asm_noperands (PATTERN (insn)) < 0)
+ /* It's the compiler's fault. */
+ abort ();
+ error_for_asm (insn, "output operand is constant in `asm'");
+ /* Prevent crash--use something we know is valid. */
+ mode = word_mode;
+ old = gen_rtx (REG, mode, REGNO (reloadreg));
+ }
+
+ if (GET_MODE (reloadreg) != mode)
+ reloadreg = gen_rtx (REG, mode, REGNO (reloadreg));
+
+#ifdef SECONDARY_OUTPUT_RELOAD_CLASS
+
+ /* If we need two reload regs, set RELOADREG to the intermediate
+ one, since it will be stored into OUT. We might need a secondary
+ register only for an input reload, so check again here. */
+
+ if (reload_secondary_out_reload[j] >= 0)
+ {
+ rtx real_old = old;
+
+ if (GET_CODE (old) == REG && REGNO (old) >= FIRST_PSEUDO_REGISTER
+ && reg_equiv_mem[REGNO (old)] != 0)
+ real_old = reg_equiv_mem[REGNO (old)];
+
+ if((SECONDARY_OUTPUT_RELOAD_CLASS (reload_reg_class[j],
+ mode, real_old)
+ != NO_REGS))
+ {
+ second_reloadreg = reloadreg;
+ reloadreg = reload_reg_rtx[reload_secondary_out_reload[j]];
+
+ /* See if RELOADREG is to be used as a scratch register
+ or as an intermediate register. */
+ if (reload_secondary_out_icode[j] != CODE_FOR_nothing)
+ {
+ emit_insn ((GEN_FCN (reload_secondary_out_icode[j])
+ (real_old, second_reloadreg, reloadreg)));
+ special = 1;
+ }
+ else
+ {
+ /* See if we need both a scratch and intermediate reload
+ register. */
+ int secondary_reload = reload_secondary_out_reload[j];
+ enum insn_code tertiary_icode
+ = reload_secondary_out_icode[secondary_reload];
+ rtx pat;
+
+ if (GET_MODE (reloadreg) != mode)
+ reloadreg = gen_rtx (REG, mode, REGNO (reloadreg));
+
+ if (tertiary_icode != CODE_FOR_nothing)
+ {
+ rtx third_reloadreg
+ = reload_reg_rtx[reload_secondary_out_reload[secondary_reload]];
+ pat = (GEN_FCN (tertiary_icode)
+ (reloadreg, second_reloadreg, third_reloadreg));
+ }
+#ifdef SECONDARY_MEMORY_NEEDED
+ /* If we need a memory location to do the move, do it that way. */
+ else if (GET_CODE (reloadreg) == REG
+ && REGNO (reloadreg) < FIRST_PSEUDO_REGISTER
+ && SECONDARY_MEMORY_NEEDED (REGNO_REG_CLASS (REGNO (reloadreg)),
+ REGNO_REG_CLASS (REGNO (second_reloadreg)),
+ GET_MODE (second_reloadreg)))
+ {
+ /* Get the memory to use and rewrite both registers
+ to its mode. */
+ rtx loc
+ = get_secondary_mem (reloadreg,
+ GET_MODE (second_reloadreg),
+ reload_opnum[j],
+ reload_when_needed[j]);
+ rtx tmp_reloadreg;
+
+ if (GET_MODE (loc) != GET_MODE (second_reloadreg))
+ second_reloadreg = gen_rtx (REG, GET_MODE (loc),
+ REGNO (second_reloadreg));
+
+ if (GET_MODE (loc) != GET_MODE (reloadreg))
+ tmp_reloadreg = gen_rtx (REG, GET_MODE (loc),
+ REGNO (reloadreg));
+ else
+ tmp_reloadreg = reloadreg;
+
+ emit_move_insn (loc, second_reloadreg);
+ pat = gen_move_insn (tmp_reloadreg, loc);
+ }
+#endif
+ else
+ pat = gen_move_insn (reloadreg, second_reloadreg);
+
+ emit_insn (pat);
+ }
+ }
+ }
+#endif
+
+ /* Output the last reload insn. */
+ if (! special)
+ {
+#ifdef SECONDARY_MEMORY_NEEDED
+ /* If we need a memory location to do the move, do it that way. */
+ if (GET_CODE (old) == REG && REGNO (old) < FIRST_PSEUDO_REGISTER
+ && SECONDARY_MEMORY_NEEDED (REGNO_REG_CLASS (REGNO (old)),
+ REGNO_REG_CLASS (REGNO (reloadreg)),
+ GET_MODE (reloadreg)))
+ {
+ /* Get the memory to use and rewrite both registers to
+ its mode. */
+ rtx loc = get_secondary_mem (old, GET_MODE (reloadreg),
+ reload_opnum[j],
+ reload_when_needed[j]);
+
+ if (GET_MODE (loc) != GET_MODE (reloadreg))
+ reloadreg = gen_rtx (REG, GET_MODE (loc),
+ REGNO (reloadreg));
+
+ if (GET_MODE (loc) != GET_MODE (old))
+ old = gen_rtx (REG, GET_MODE (loc), REGNO (old));
+
+ emit_insn (gen_move_insn (loc, reloadreg));
+ emit_insn (gen_move_insn (old, loc));
+ }
+ else
+#endif
+ emit_insn (gen_move_insn (old, reloadreg));
+ }
+
+#ifdef PRESERVE_DEATH_INFO_REGNO_P
+ /* If final will look at death notes for this reg,
+ put one on the last output-reload insn to use it. Similarly
+ for any secondary register. */
+ if (PRESERVE_DEATH_INFO_REGNO_P (REGNO (reloadreg)))
+ for (p = get_last_insn (); p; p = PREV_INSN (p))
+ if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
+ && reg_overlap_mentioned_for_reload_p (reloadreg,
+ PATTERN (p)))
+ REG_NOTES (p) = gen_rtx (EXPR_LIST, REG_DEAD,
+ reloadreg, REG_NOTES (p));
+
+#ifdef SECONDARY_OUTPUT_RELOAD_CLASS
+ if (! special
+ && PRESERVE_DEATH_INFO_REGNO_P (REGNO (second_reloadreg)))
+ for (p = get_last_insn (); p; p = PREV_INSN (p))
+ if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
+ && reg_overlap_mentioned_for_reload_p (second_reloadreg,
+ PATTERN (p)))
+ REG_NOTES (p) = gen_rtx (EXPR_LIST, REG_DEAD,
+ second_reloadreg, REG_NOTES (p));
+#endif
+#endif
+ /* Look at all insns we emitted, just to be safe. */
+ for (p = get_insns (); p; p = NEXT_INSN (p))
+ if (GET_RTX_CLASS (GET_CODE (p)) == 'i')
+ {
+ /* If this output reload doesn't come from a spill reg,
+ clear any memory of reloaded copies of the pseudo reg.
+ If this output reload comes from a spill reg,
+ reg_has_output_reload will make this do nothing. */
+ note_stores (PATTERN (p), forget_old_reloads_1);
+
+ if (reg_mentioned_p (reload_reg_rtx[j], PATTERN (p)))
+ store_insn = p;
+ }
+
+ output_reload_insns[reload_opnum[j]] = get_insns ();
+ end_sequence ();
+
+ }
+
+ if (reload_spill_index[j] >= 0)
+ new_spill_reg_store[reload_spill_index[j]] = store_insn;
+ }
+
+ /* Now write all the insns we made for reloads in the order expected by
+ the allocation functions. Prior to the insn being reloaded, we write
+ the following reloads:
+
+ RELOAD_FOR_OTHER_ADDRESS reloads for input addresses.
+
+ RELOAD_OTHER reloads.
+
+ For each operand, any RELOAD_FOR_INPUT_ADDRESS reloads followed by
+ the RELOAD_FOR_INPUT reload for the operand.
+
+ RELOAD_FOR_OPADDR_ADDRS reloads.
+
+ RELOAD_FOR_OPERAND_ADDRESS reloads.
+
+ After the insn being reloaded, we write the following:
+
+ For each operand, any RELOAD_FOR_OUTPUT_ADDRESS reload followed by
+ the RELOAD_FOR_OUTPUT reload for that operand. */
+
+ emit_insns_before (other_input_address_reload_insns, before_insn);
+ emit_insns_before (other_input_reload_insns, before_insn);
+
+ for (j = 0; j < reload_n_operands; j++)
+ {
+ emit_insns_before (input_address_reload_insns[j], before_insn);
+ emit_insns_before (input_reload_insns[j], before_insn);
+ }
+
+ emit_insns_before (other_operand_reload_insns, before_insn);
+ emit_insns_before (operand_reload_insns, before_insn);
+
+ for (j = 0; j < reload_n_operands; j++)
+ {
+ emit_insns_before (output_address_reload_insns[j], following_insn);
+ emit_insns_before (output_reload_insns[j], following_insn);
+ }
+
+ /* Move death notes from INSN
+ to output-operand-address and output reload insns. */
+#ifdef PRESERVE_DEATH_INFO_REGNO_P
+ {
+ rtx insn1;
+ /* Loop over those insns, last ones first. */
+ for (insn1 = PREV_INSN (following_insn); insn1 != insn;
+ insn1 = PREV_INSN (insn1))
+ if (GET_CODE (insn1) == INSN && GET_CODE (PATTERN (insn1)) == SET)
+ {
+ rtx source = SET_SRC (PATTERN (insn1));
+ rtx dest = SET_DEST (PATTERN (insn1));
+
+ /* The note we will examine next. */
+ rtx reg_notes = REG_NOTES (insn);
+ /* The place that pointed to this note. */
+ rtx *prev_reg_note = &REG_NOTES (insn);
+
+ /* If the note is for something used in the source of this
+ reload insn, or in the output address, move the note. */
+ while (reg_notes)
+ {
+ rtx next_reg_notes = XEXP (reg_notes, 1);
+ if (REG_NOTE_KIND (reg_notes) == REG_DEAD
+ && GET_CODE (XEXP (reg_notes, 0)) == REG
+ && ((GET_CODE (dest) != REG
+ && reg_overlap_mentioned_for_reload_p (XEXP (reg_notes, 0),
+ dest))
+ || reg_overlap_mentioned_for_reload_p (XEXP (reg_notes, 0),
+ source)))
+ {
+ *prev_reg_note = next_reg_notes;
+ XEXP (reg_notes, 1) = REG_NOTES (insn1);
+ REG_NOTES (insn1) = reg_notes;
+ }
+ else
+ prev_reg_note = &XEXP (reg_notes, 1);
+
+ reg_notes = next_reg_notes;
+ }
+ }
+ }
+#endif
+
+ /* For all the spill regs newly reloaded in this instruction,
+ record what they were reloaded from, so subsequent instructions
+ can inherit the reloads.
+
+ Update spill_reg_store for the reloads of this insn.
+ Copy the elements that were updated in the loop above. */
+
+ for (j = 0; j < n_reloads; j++)
+ {
+ register int r = reload_order[j];
+ register int i = reload_spill_index[r];
+
+ /* I is nonneg if this reload used one of the spill regs.
+ If reload_reg_rtx[r] is 0, this is an optional reload
+ that we opted to ignore.
+
+ Also ignore reloads that don't reach the end of the insn,
+ since we will eventually see the one that does. */
+
+ if (i >= 0 && reload_reg_rtx[r] != 0
+ && reload_reg_reaches_end_p (spill_regs[i], reload_opnum[r],
+ reload_when_needed[r]))
+ {
+ /* First, clear out memory of what used to be in this spill reg.
+ If consecutive registers are used, clear them all. */
+ int nr
+ = HARD_REGNO_NREGS (spill_regs[i], GET_MODE (reload_reg_rtx[r]));
+ int k;
+
+ for (k = 0; k < nr; k++)
+ {
+ reg_reloaded_contents[spill_reg_order[spill_regs[i] + k]] = -1;
+ reg_reloaded_insn[spill_reg_order[spill_regs[i] + k]] = 0;
+ }
+
+ /* Maybe the spill reg contains a copy of reload_out. */
+ if (reload_out[r] != 0 && GET_CODE (reload_out[r]) == REG)
+ {
+ register int nregno = REGNO (reload_out[r]);
+ int nnr = (nregno >= FIRST_PSEUDO_REGISTER ? 1
+ : HARD_REGNO_NREGS (nregno,
+ GET_MODE (reload_reg_rtx[r])));
+
+ spill_reg_store[i] = new_spill_reg_store[i];
+ reg_last_reload_reg[nregno] = reload_reg_rtx[r];
+
+ /* If NREGNO is a hard register, it may occupy more than
+ one register. If it does, say what is in the
+ rest of the registers assuming that both registers
+ agree on how many words the object takes. If not,
+ invalidate the subsequent registers. */
+
+ if (nregno < FIRST_PSEUDO_REGISTER)
+ for (k = 1; k < nnr; k++)
+ reg_last_reload_reg[nregno + k]
+ = (nr == nnr ? gen_rtx (REG,
+ reg_raw_mode[REGNO (reload_reg_rtx[r]) + k],
+ REGNO (reload_reg_rtx[r]) + k)
+ : 0);
+
+ /* Now do the inverse operation. */
+ for (k = 0; k < nr; k++)
+ {
+ reg_reloaded_contents[spill_reg_order[spill_regs[i] + k]]
+ = (nregno >= FIRST_PSEUDO_REGISTER || nr != nnr ? nregno
+ : nregno + k);
+ reg_reloaded_insn[spill_reg_order[spill_regs[i] + k]] = insn;
+ }
+ }
+
+ /* Maybe the spill reg contains a copy of reload_in. Only do
+ something if there will not be an output reload for
+ the register being reloaded. */
+ else if (reload_out[r] == 0
+ && reload_in[r] != 0
+ && ((GET_CODE (reload_in[r]) == REG
+ && ! reg_has_output_reload[REGNO (reload_in[r])]
+ || (GET_CODE (reload_in_reg[r]) == REG
+ && ! reg_has_output_reload[REGNO (reload_in_reg[r])]))))
+ {
+ register int nregno;
+ int nnr;
+
+ if (GET_CODE (reload_in[r]) == REG)
+ nregno = REGNO (reload_in[r]);
+ else
+ nregno = REGNO (reload_in_reg[r]);
+
+ nnr = (nregno >= FIRST_PSEUDO_REGISTER ? 1
+ : HARD_REGNO_NREGS (nregno,
+ GET_MODE (reload_reg_rtx[r])));
+
+ reg_last_reload_reg[nregno] = reload_reg_rtx[r];
+
+ if (nregno < FIRST_PSEUDO_REGISTER)
+ for (k = 1; k < nnr; k++)
+ reg_last_reload_reg[nregno + k]
+ = (nr == nnr ? gen_rtx (REG,
+ reg_raw_mode[REGNO (reload_reg_rtx[r]) + k],
+ REGNO (reload_reg_rtx[r]) + k)
+ : 0);
+
+ /* Unless we inherited this reload, show we haven't
+ recently done a store. */
+ if (! reload_inherited[r])
+ spill_reg_store[i] = 0;
+
+ for (k = 0; k < nr; k++)
+ {
+ reg_reloaded_contents[spill_reg_order[spill_regs[i] + k]]
+ = (nregno >= FIRST_PSEUDO_REGISTER || nr != nnr ? nregno
+ : nregno + k);
+ reg_reloaded_insn[spill_reg_order[spill_regs[i] + k]]
+ = insn;
+ }
+ }
+ }
+
+ /* The following if-statement was #if 0'd in 1.34 (or before...).
+ It's reenabled in 1.35 because supposedly nothing else
+ deals with this problem. */
+
+ /* If a register gets output-reloaded from a non-spill register,
+ that invalidates any previous reloaded copy of it.
+ But forget_old_reloads_1 won't get to see it, because
+ it thinks only about the original insn. So invalidate it here. */
+ if (i < 0 && reload_out[r] != 0 && GET_CODE (reload_out[r]) == REG)
+ {
+ register int nregno = REGNO (reload_out[r]);
+ int num_regs = HARD_REGNO_NREGS (nregno, GET_MODE (reload_out[r]));
+
+ while (num_regs-- > 0)
+ reg_last_reload_reg[nregno + num_regs] = 0;
+ }
+ }
+}
+
+/* Emit code to perform an input reload of IN to RELOADREG. IN is from
+ operand OPNUM with reload type TYPE.
+
+ Returns first insn emitted. */
+
+rtx
+gen_input_reload (reloadreg, in, opnum, type)
+ rtx reloadreg;
+ rtx in;
+ int opnum;
+ enum reload_type type;
+{
+ rtx last = get_last_insn ();
+
+ /* How to do this reload can get quite tricky. Normally, we are being
+ asked to reload a simple operand, such as a MEM, a constant, or a pseudo
+ register that didn't get a hard register. In that case we can just
+ call emit_move_insn.
+
+ We can also be asked to reload a PLUS that adds a register or a MEM to
+ another register, constant or MEM. This can occur during frame pointer
+ elimination and while reloading addresses. This case is handled by
+ trying to emit a single insn to perform the add. If it is not valid,
+ we use a two insn sequence.
+
+ Finally, we could be called to handle an 'o' constraint by putting
+ an address into a register. In that case, we first try to do this
+ with a named pattern of "reload_load_address". If no such pattern
+ exists, we just emit a SET insn and hope for the best (it will normally
+ be valid on machines that use 'o').
+
+ This entire process is made complex because reload will never
+ process the insns we generate here and so we must ensure that
+ they will fit their constraints and also by the fact that parts of
+ IN might be being reloaded separately and replaced with spill registers.
+ Because of this, we are, in some sense, just guessing the right approach
+ here. The one listed above seems to work.
+
+ ??? At some point, this whole thing needs to be rethought. */
+
+ if (GET_CODE (in) == PLUS
+ && (GET_CODE (XEXP (in, 0)) == REG
+ || GET_CODE (XEXP (in, 0)) == MEM)
+ && (GET_CODE (XEXP (in, 1)) == REG
+ || CONSTANT_P (XEXP (in, 1))
+ || GET_CODE (XEXP (in, 1)) == MEM))
+ {
+ /* We need to compute the sum of a register or a MEM and another
+ register, constant, or MEM, and put it into the reload
+ register. The best possible way of doing this is if the machine
+ has a three-operand ADD insn that accepts the required operands.
+
+ The simplest approach is to try to generate such an insn and see if it
+ is recognized and matches its constraints. If so, it can be used.
+
+ It might be better not to actually emit the insn unless it is valid,
+ but we need to pass the insn as an operand to `recog' and
+ `insn_extract' and it is simpler to emit and then delete the insn if
+ not valid than to dummy things up. */
+
+ rtx op0, op1, tem, insn;
+ int code;
+
+ op0 = find_replacement (&XEXP (in, 0));
+ op1 = find_replacement (&XEXP (in, 1));
+
+ /* Since constraint checking is strict, commutativity won't be
+ checked, so we need to do that here to avoid spurious failure
+ if the add instruction is two-address and the second operand
+ of the add is the same as the reload reg, which is frequently
+ the case. If the insn would be A = B + A, rearrange it so
+ it will be A = A + B as constrain_operands expects. */
+
+ if (GET_CODE (XEXP (in, 1)) == REG
+ && REGNO (reloadreg) == REGNO (XEXP (in, 1)))
+ tem = op0, op0 = op1, op1 = tem;
+
+ if (op0 != XEXP (in, 0) || op1 != XEXP (in, 1))
+ in = gen_rtx (PLUS, GET_MODE (in), op0, op1);
+
+ insn = emit_insn (gen_rtx (SET, VOIDmode, reloadreg, in));
+ code = recog_memoized (insn);
+
+ if (code >= 0)
+ {
+ insn_extract (insn);
+ /* We want constrain operands to treat this insn strictly in
+ its validity determination, i.e., the way it would after reload
+ has completed. */
+ if (constrain_operands (code, 1))
+ return insn;
+ }
+
+ delete_insns_since (last);
+
+ /* If that failed, we must use a conservative two-insn sequence.
+ use move to copy constant, MEM, or pseudo register to the reload
+ register since "move" will be able to handle an arbitrary operand,
+ unlike add which can't, in general. Then add the registers.
+
+ If there is another way to do this for a specific machine, a
+ DEFINE_PEEPHOLE should be specified that recognizes the sequence
+ we emit below. */
+
+ if (CONSTANT_P (op1) || GET_CODE (op1) == MEM
+ || (GET_CODE (op1) == REG
+ && REGNO (op1) >= FIRST_PSEUDO_REGISTER))
+ tem = op0, op0 = op1, op1 = tem;
+
+ emit_insn (gen_move_insn (reloadreg, op0));
+
+ /* If OP0 and OP1 are the same, we can use RELOADREG for OP1.
+ This fixes a problem on the 32K where the stack pointer cannot
+ be used as an operand of an add insn. */
+
+ if (rtx_equal_p (op0, op1))
+ op1 = reloadreg;
+
+ insn = emit_insn (gen_add2_insn (reloadreg, op1));
+
+ /* If that failed, copy the address register to the reload register.
+ Then add the constant to the reload register. */
+
+ code = recog_memoized (insn);
+
+ if (code >= 0)
+ {
+ insn_extract (insn);
+ /* We want constrain operands to treat this insn strictly in
+ its validity determination, i.e., the way it would after reload
+ has completed. */
+ if (constrain_operands (code, 1))
+ return insn;
+ }
+
+ delete_insns_since (last);
+
+ emit_insn (gen_move_insn (reloadreg, op1));
+ emit_insn (gen_add2_insn (reloadreg, op0));
+ }
+
+#ifdef SECONDARY_MEMORY_NEEDED
+ /* If we need a memory location to do the move, do it that way. */
+ else if (GET_CODE (in) == REG && REGNO (in) < FIRST_PSEUDO_REGISTER
+ && SECONDARY_MEMORY_NEEDED (REGNO_REG_CLASS (REGNO (in)),
+ REGNO_REG_CLASS (REGNO (reloadreg)),
+ GET_MODE (reloadreg)))
+ {
+ /* Get the memory to use and rewrite both registers to its mode. */
+ rtx loc = get_secondary_mem (in, GET_MODE (reloadreg), opnum, type);
+
+ if (GET_MODE (loc) != GET_MODE (reloadreg))
+ reloadreg = gen_rtx (REG, GET_MODE (loc), REGNO (reloadreg));
+
+ if (GET_MODE (loc) != GET_MODE (in))
+ in = gen_rtx (REG, GET_MODE (loc), REGNO (in));
+
+ emit_insn (gen_move_insn (loc, in));
+ emit_insn (gen_move_insn (reloadreg, loc));
+ }
+#endif
+
+ /* If IN is a simple operand, use gen_move_insn. */
+ else if (GET_RTX_CLASS (GET_CODE (in)) == 'o' || GET_CODE (in) == SUBREG)
+ emit_insn (gen_move_insn (reloadreg, in));
+
+#ifdef HAVE_reload_load_address
+ else if (HAVE_reload_load_address)
+ emit_insn (gen_reload_load_address (reloadreg, in));
+#endif
+
+ /* Otherwise, just write (set REGLOADREG IN) and hope for the best. */
+ else
+ emit_insn (gen_rtx (SET, VOIDmode, reloadreg, in));
+
+ /* Return the first insn emitted.
+ We can not just return get_last_insn, because there may have
+ been multiple instructions emitted. Also note that gen_move_insn may
+ emit more than one insn itself, so we can not assume that there is one
+ insn emitted per emit_insn_before call. */
+
+ return last ? NEXT_INSN (last) : get_insns ();
+}
+
+/* Delete a previously made output-reload
+ whose result we now believe is not needed.
+ First we double-check.
+
+ INSN is the insn now being processed.
+ OUTPUT_RELOAD_INSN is the insn of the output reload.
+ J is the reload-number for this insn. */
+
+static void
+delete_output_reload (insn, j, output_reload_insn)
+ rtx insn;
+ int j;
+ rtx output_reload_insn;
+{
+ register rtx i1;
+
+ /* Get the raw pseudo-register referred to. */
+
+ rtx reg = reload_in[j];
+ while (GET_CODE (reg) == SUBREG)
+ reg = SUBREG_REG (reg);
+
+ /* If the pseudo-reg we are reloading is no longer referenced
+ anywhere between the store into it and here,
+ and no jumps or labels intervene, then the value can get
+ here through the reload reg alone.
+ Otherwise, give up--return. */
+ for (i1 = NEXT_INSN (output_reload_insn);
+ i1 != insn; i1 = NEXT_INSN (i1))
+ {
+ if (GET_CODE (i1) == CODE_LABEL || GET_CODE (i1) == JUMP_INSN)
+ return;
+ if ((GET_CODE (i1) == INSN || GET_CODE (i1) == CALL_INSN)
+ && reg_mentioned_p (reg, PATTERN (i1)))
+ return;
+ }
+
+ if (cannot_omit_stores[REGNO (reg)])
+ return;
+
+ /* If this insn will store in the pseudo again,
+ the previous store can be removed. */
+ if (reload_out[j] == reload_in[j])
+ delete_insn (output_reload_insn);
+
+ /* See if the pseudo reg has been completely replaced
+ with reload regs. If so, delete the store insn
+ and forget we had a stack slot for the pseudo. */
+ else if (reg_n_deaths[REGNO (reg)] == 1
+ && reg_basic_block[REGNO (reg)] >= 0
+ && find_regno_note (insn, REG_DEAD, REGNO (reg)))
+ {
+ rtx i2;
+
+ /* We know that it was used only between here
+ and the beginning of the current basic block.
+ (We also know that the last use before INSN was
+ the output reload we are thinking of deleting, but never mind that.)
+ Search that range; see if any ref remains. */
+ for (i2 = PREV_INSN (insn); i2; i2 = PREV_INSN (i2))
+ {
+ rtx set = single_set (i2);
+
+ /* Uses which just store in the pseudo don't count,
+ since if they are the only uses, they are dead. */
+ if (set != 0 && SET_DEST (set) == reg)
+ continue;
+ if (GET_CODE (i2) == CODE_LABEL
+ || GET_CODE (i2) == JUMP_INSN)
+ break;
+ if ((GET_CODE (i2) == INSN || GET_CODE (i2) == CALL_INSN)
+ && reg_mentioned_p (reg, PATTERN (i2)))
+ /* Some other ref remains;
+ we can't do anything. */
+ return;
+ }
+
+ /* Delete the now-dead stores into this pseudo. */
+ for (i2 = PREV_INSN (insn); i2; i2 = PREV_INSN (i2))
+ {
+ rtx set = single_set (i2);
+
+ if (set != 0 && SET_DEST (set) == reg)
+ delete_insn (i2);
+ if (GET_CODE (i2) == CODE_LABEL
+ || GET_CODE (i2) == JUMP_INSN)
+ break;
+ }
+
+ /* For the debugging info,
+ say the pseudo lives in this reload reg. */
+ reg_renumber[REGNO (reg)] = REGNO (reload_reg_rtx[j]);
+ alter_reg (REGNO (reg), -1);
+ }
+}
+
+/* Output reload-insns to reload VALUE into RELOADREG.
+ VALUE is an autoincrement or autodecrement RTX whose operand
+ is a register or memory location;
+ so reloading involves incrementing that location.
+
+ INC_AMOUNT is the number to increment or decrement by (always positive).
+ This cannot be deduced from VALUE. */
+
+static void
+inc_for_reload (reloadreg, value, inc_amount)
+ rtx reloadreg;
+ rtx value;
+ int inc_amount;
+{
+ /* REG or MEM to be copied and incremented. */
+ rtx incloc = XEXP (value, 0);
+ /* Nonzero if increment after copying. */
+ int post = (GET_CODE (value) == POST_DEC || GET_CODE (value) == POST_INC);
+ rtx last;
+ rtx inc;
+ rtx add_insn;
+ int code;
+
+ /* No hard register is equivalent to this register after
+ inc/dec operation. If REG_LAST_RELOAD_REG were non-zero,
+ we could inc/dec that register as well (maybe even using it for
+ the source), but I'm not sure it's worth worrying about. */
+ if (GET_CODE (incloc) == REG)
+ reg_last_reload_reg[REGNO (incloc)] = 0;
+
+ if (GET_CODE (value) == PRE_DEC || GET_CODE (value) == POST_DEC)
+ inc_amount = - inc_amount;
+
+ inc = GEN_INT (inc_amount);
+
+ /* If this is post-increment, first copy the location to the reload reg. */
+ if (post)
+ emit_insn (gen_move_insn (reloadreg, incloc));
+
+ /* See if we can directly increment INCLOC. Use a method similar to that
+ in gen_input_reload. */
+
+ last = get_last_insn ();
+ add_insn = emit_insn (gen_rtx (SET, VOIDmode, incloc,
+ gen_rtx (PLUS, GET_MODE (incloc),
+ incloc, inc)));
+
+ code = recog_memoized (add_insn);
+ if (code >= 0)
+ {
+ insn_extract (add_insn);
+ if (constrain_operands (code, 1))
+ {
+ /* If this is a pre-increment and we have incremented the value
+ where it lives, copy the incremented value to RELOADREG to
+ be used as an address. */
+
+ if (! post)
+ emit_insn (gen_move_insn (reloadreg, incloc));
+
+ return;
+ }
+ }
+
+ delete_insns_since (last);
+
+ /* If couldn't do the increment directly, must increment in RELOADREG.
+ The way we do this depends on whether this is pre- or post-increment.
+ For pre-increment, copy INCLOC to the reload register, increment it
+ there, then save back. */
+
+ if (! post)
+ {
+ emit_insn (gen_move_insn (reloadreg, incloc));
+ emit_insn (gen_add2_insn (reloadreg, inc));
+ emit_insn (gen_move_insn (incloc, reloadreg));
+ }
+ else
+ {
+ /* Postincrement.
+ Because this might be a jump insn or a compare, and because RELOADREG
+ may not be available after the insn in an input reload, we must do
+ the incrementation before the insn being reloaded for.
+
+ We have already copied INCLOC to RELOADREG. Increment the copy in
+ RELOADREG, save that back, then decrement RELOADREG so it has
+ the original value. */
+
+ emit_insn (gen_add2_insn (reloadreg, inc));
+ emit_insn (gen_move_insn (incloc, reloadreg));
+ emit_insn (gen_add2_insn (reloadreg, GEN_INT (-inc_amount)));
+ }
+
+ return;
+}
+
+/* Return 1 if we are certain that the constraint-string STRING allows
+ the hard register REG. Return 0 if we can't be sure of this. */
+
+static int
+constraint_accepts_reg_p (string, reg)
+ char *string;
+ rtx reg;
+{
+ int value = 0;
+ int regno = true_regnum (reg);
+ int c;
+
+ /* Initialize for first alternative. */
+ value = 0;
+ /* Check that each alternative contains `g' or `r'. */
+ while (1)
+ switch (c = *string++)
+ {
+ case 0:
+ /* If an alternative lacks `g' or `r', we lose. */
+ return value;
+ case ',':
+ /* If an alternative lacks `g' or `r', we lose. */
+ if (value == 0)
+ return 0;
+ /* Initialize for next alternative. */
+ value = 0;
+ break;
+ case 'g':
+ case 'r':
+ /* Any general reg wins for this alternative. */
+ if (TEST_HARD_REG_BIT (reg_class_contents[(int) GENERAL_REGS], regno))
+ value = 1;
+ break;
+ default:
+ /* Any reg in specified class wins for this alternative. */
+ {
+ enum reg_class class = REG_CLASS_FROM_LETTER (c);
+
+ if (TEST_HARD_REG_BIT (reg_class_contents[(int) class], regno))
+ value = 1;
+ }
+ }
+}
+
+/* Return the number of places FIND appears within X, but don't count
+ an occurrence if some SET_DEST is FIND. */
+
+static int
+count_occurrences (x, find)
+ register rtx x, find;
+{
+ register int i, j;
+ register enum rtx_code code;
+ register char *format_ptr;
+ int count;
+
+ if (x == find)
+ return 1;
+ if (x == 0)
+ return 0;
+
+ code = GET_CODE (x);
+
+ switch (code)
+ {
+ case REG:
+ case QUEUED:
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case SYMBOL_REF:
+ case CODE_LABEL:
+ case PC:
+ case CC0:
+ return 0;
+
+ case SET:
+ if (SET_DEST (x) == find)
+ return count_occurrences (SET_SRC (x), find);
+ break;
+ }
+
+ format_ptr = GET_RTX_FORMAT (code);
+ count = 0;
+
+ for (i = 0; i < GET_RTX_LENGTH (code); i++)
+ {
+ switch (*format_ptr++)
+ {
+ case 'e':
+ count += count_occurrences (XEXP (x, i), find);
+ break;
+
+ case 'E':
+ if (XVEC (x, i) != NULL)
+ {
+ for (j = 0; j < XVECLEN (x, i); j++)
+ count += count_occurrences (XVECEXP (x, i, j), find);
+ }
+ break;
+ }
+ }
+ return count;
+}
diff --git a/gnu/usr.bin/cc/cc_int/reorg.c b/gnu/usr.bin/cc/cc_int/reorg.c
new file mode 100644
index 0000000..4082ad8
--- /dev/null
+++ b/gnu/usr.bin/cc/cc_int/reorg.c
@@ -0,0 +1,4281 @@
+/* Perform instruction reorganizations for delay slot filling.
+ Copyright (C) 1992, 1993, 1994 Free Software Foundation, Inc.
+ Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu).
+ Hacked by Michael Tiemann (tiemann@cygnus.com).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+/* Instruction reorganization pass.
+
+ This pass runs after register allocation and final jump
+ optimization. It should be the last pass to run before peephole.
+ It serves primarily to fill delay slots of insns, typically branch
+ and call insns. Other insns typically involve more complicated
+ interactions of data dependencies and resource constraints, and
+ are better handled by scheduling before register allocation (by the
+ function `schedule_insns').
+
+ The Branch Penalty is the number of extra cycles that are needed to
+ execute a branch insn. On an ideal machine, branches take a single
+ cycle, and the Branch Penalty is 0. Several RISC machines approach
+ branch delays differently:
+
+ The MIPS and AMD 29000 have a single branch delay slot. Most insns
+ (except other branches) can be used to fill this slot. When the
+ slot is filled, two insns execute in two cycles, reducing the
+ branch penalty to zero.
+
+ The Motorola 88000 conditionally exposes its branch delay slot,
+ so code is shorter when it is turned off, but will run faster
+ when useful insns are scheduled there.
+
+ The IBM ROMP has two forms of branch and call insns, both with and
+ without a delay slot. Much like the 88k, insns not using the delay
+ slot can be shorted (2 bytes vs. 4 bytes), but will run slowed.
+
+ The SPARC always has a branch delay slot, but its effects can be
+ annulled when the branch is not taken. This means that failing to
+ find other sources of insns, we can hoist an insn from the branch
+ target that would only be safe to execute knowing that the branch
+ is taken.
+
+ The HP-PA always has a branch delay slot. For unconditional branches
+ its effects can be annulled when the branch is taken. The effects
+ of the delay slot in a conditional branch can be nullified for forward
+ taken branches, or for untaken backward branches. This means
+ we can hoist insns from the fall-through path for forward branches or
+ steal insns from the target of backward branches.
+
+ Three techniques for filling delay slots have been implemented so far:
+
+ (1) `fill_simple_delay_slots' is the simplest, most efficient way
+ to fill delay slots. This pass first looks for insns which come
+ from before the branch and which are safe to execute after the
+ branch. Then it searches after the insn requiring delay slots or,
+ in the case of a branch, for insns that are after the point at
+ which the branch merges into the fallthrough code, if such a point
+ exists. When such insns are found, the branch penalty decreases
+ and no code expansion takes place.
+
+ (2) `fill_eager_delay_slots' is more complicated: it is used for
+ scheduling conditional jumps, or for scheduling jumps which cannot
+ be filled using (1). A machine need not have annulled jumps to use
+ this strategy, but it helps (by keeping more options open).
+ `fill_eager_delay_slots' tries to guess the direction the branch
+ will go; if it guesses right 100% of the time, it can reduce the
+ branch penalty as much as `fill_simple_delay_slots' does. If it
+ guesses wrong 100% of the time, it might as well schedule nops (or
+ on the m88k, unexpose the branch slot). When
+ `fill_eager_delay_slots' takes insns from the fall-through path of
+ the jump, usually there is no code expansion; when it takes insns
+ from the branch target, there is code expansion if it is not the
+ only way to reach that target.
+
+ (3) `relax_delay_slots' uses a set of rules to simplify code that
+ has been reorganized by (1) and (2). It finds cases where
+ conditional test can be eliminated, jumps can be threaded, extra
+ insns can be eliminated, etc. It is the job of (1) and (2) to do a
+ good job of scheduling locally; `relax_delay_slots' takes care of
+ making the various individual schedules work well together. It is
+ especially tuned to handle the control flow interactions of branch
+ insns. It does nothing for insns with delay slots that do not
+ branch.
+
+ On machines that use CC0, we are very conservative. We will not make
+ a copy of an insn involving CC0 since we want to maintain a 1-1
+ correspondence between the insn that sets and uses CC0. The insns are
+ allowed to be separated by placing an insn that sets CC0 (but not an insn
+ that uses CC0; we could do this, but it doesn't seem worthwhile) in a
+ delay slot. In that case, we point each insn at the other with REG_CC_USER
+ and REG_CC_SETTER notes. Note that these restrictions affect very few
+ machines because most RISC machines with delay slots will not use CC0
+ (the RT is the only known exception at this point).
+
+ Not yet implemented:
+
+ The Acorn Risc Machine can conditionally execute most insns, so
+ it is profitable to move single insns into a position to execute
+ based on the condition code of the previous insn.
+
+ The HP-PA can conditionally nullify insns, providing a similar
+ effect to the ARM, differing mostly in which insn is "in charge". */
+
+#include <stdio.h>
+#include "config.h"
+#include "rtl.h"
+#include "insn-config.h"
+#include "conditions.h"
+#include "hard-reg-set.h"
+#include "basic-block.h"
+#include "regs.h"
+#include "insn-flags.h"
+#include "recog.h"
+#include "flags.h"
+#include "output.h"
+#include "obstack.h"
+#include "insn-attr.h"
+
+#ifdef DELAY_SLOTS
+
+#define obstack_chunk_alloc xmalloc
+#define obstack_chunk_free free
+
+#ifndef ANNUL_IFTRUE_SLOTS
+#define eligible_for_annul_true(INSN, SLOTS, TRIAL, FLAGS) 0
+#endif
+#ifndef ANNUL_IFFALSE_SLOTS
+#define eligible_for_annul_false(INSN, SLOTS, TRIAL, FLAGS) 0
+#endif
+
+/* Insns which have delay slots that have not yet been filled. */
+
+static struct obstack unfilled_slots_obstack;
+static rtx *unfilled_firstobj;
+
+/* Define macros to refer to the first and last slot containing unfilled
+ insns. These are used because the list may move and its address
+ should be recomputed at each use. */
+
+#define unfilled_slots_base \
+ ((rtx *) obstack_base (&unfilled_slots_obstack))
+
+#define unfilled_slots_next \
+ ((rtx *) obstack_next_free (&unfilled_slots_obstack))
+
+/* This structure is used to indicate which hardware resources are set or
+ needed by insns so far. */
+
+struct resources
+{
+ char memory; /* Insn sets or needs a memory location. */
+ char volatil; /* Insn sets or needs a volatile memory loc. */
+ char cc; /* Insn sets or needs the condition codes. */
+ HARD_REG_SET regs; /* Which registers are set or needed. */
+};
+
+/* Macro to clear all resources. */
+#define CLEAR_RESOURCE(RES) \
+ do { (RES)->memory = (RES)->volatil = (RES)->cc = 0; \
+ CLEAR_HARD_REG_SET ((RES)->regs); } while (0)
+
+/* Indicates what resources are required at the beginning of the epilogue. */
+static struct resources start_of_epilogue_needs;
+
+/* Indicates what resources are required at function end. */
+static struct resources end_of_function_needs;
+
+/* Points to the label before the end of the function. */
+static rtx end_of_function_label;
+
+/* This structure is used to record liveness information at the targets or
+ fallthrough insns of branches. We will most likely need the information
+ at targets again, so save them in a hash table rather than recomputing them
+ each time. */
+
+struct target_info
+{
+ int uid; /* INSN_UID of target. */
+ struct target_info *next; /* Next info for same hash bucket. */
+ HARD_REG_SET live_regs; /* Registers live at target. */
+ int block; /* Basic block number containing target. */
+ int bb_tick; /* Generation count of basic block info. */
+};
+
+#define TARGET_HASH_PRIME 257
+
+/* Define the hash table itself. */
+static struct target_info **target_hash_table;
+
+/* For each basic block, we maintain a generation number of its basic
+ block info, which is updated each time we move an insn from the
+ target of a jump. This is the generation number indexed by block
+ number. */
+
+static int *bb_ticks;
+
+/* Mapping between INSN_UID's and position in the code since INSN_UID's do
+ not always monotonically increase. */
+static int *uid_to_ruid;
+
+/* Highest valid index in `uid_to_ruid'. */
+static int max_uid;
+
+static void mark_referenced_resources PROTO((rtx, struct resources *, int));
+static void mark_set_resources PROTO((rtx, struct resources *, int, int));
+static int stop_search_p PROTO((rtx, int));
+static int resource_conflicts_p PROTO((struct resources *,
+ struct resources *));
+static int insn_references_resource_p PROTO((rtx, struct resources *, int));
+static int insn_sets_resources_p PROTO((rtx, struct resources *, int));
+static rtx find_end_label PROTO((void));
+static rtx emit_delay_sequence PROTO((rtx, rtx, int, int));
+static rtx add_to_delay_list PROTO((rtx, rtx));
+static void delete_from_delay_slot PROTO((rtx));
+static void delete_scheduled_jump PROTO((rtx));
+static void note_delay_statistics PROTO((int, int));
+static rtx optimize_skip PROTO((rtx));
+static int get_jump_flags PROTO((rtx, rtx));
+static int rare_destination PROTO((rtx));
+static int mostly_true_jump PROTO((rtx, rtx));
+static rtx get_branch_condition PROTO((rtx, rtx));
+static int condition_dominates_p PROTO((rtx, rtx));
+static rtx steal_delay_list_from_target PROTO((rtx, rtx, rtx, rtx,
+ struct resources *,
+ struct resources *,
+ struct resources *,
+ int, int *, int *, rtx *));
+static rtx steal_delay_list_from_fallthrough PROTO((rtx, rtx, rtx, rtx,
+ struct resources *,
+ struct resources *,
+ struct resources *,
+ int, int *, int *));
+static void try_merge_delay_insns PROTO((rtx, rtx));
+static int redundant_insn_p PROTO((rtx, rtx, rtx));
+static int own_thread_p PROTO((rtx, rtx, int));
+static int find_basic_block PROTO((rtx));
+static void update_block PROTO((rtx, rtx));
+static int reorg_redirect_jump PROTO((rtx, rtx));
+static void update_reg_dead_notes PROTO((rtx, rtx));
+static void update_live_status PROTO((rtx, rtx));
+static rtx next_insn_no_annul PROTO((rtx));
+static void mark_target_live_regs PROTO((rtx, struct resources *));
+static void fill_simple_delay_slots PROTO((rtx, int));
+static rtx fill_slots_from_thread PROTO((rtx, rtx, rtx, rtx, int, int,
+ int, int, int, int *));
+static void fill_eager_delay_slots PROTO((rtx));
+static void relax_delay_slots PROTO((rtx));
+static void make_return_insns PROTO((rtx));
+static int redirect_with_delay_slots_safe_p PROTO ((rtx, rtx, rtx));
+static int redirect_with_delay_list_safe_p PROTO ((rtx, rtx, rtx));
+
+/* Given X, some rtl, and RES, a pointer to a `struct resource', mark
+ which resources are references by the insn. If INCLUDE_CALLED_ROUTINE
+ is TRUE, resources used by the called routine will be included for
+ CALL_INSNs. */
+
+static void
+mark_referenced_resources (x, res, include_delayed_effects)
+ register rtx x;
+ register struct resources *res;
+ register int include_delayed_effects;
+{
+ register enum rtx_code code = GET_CODE (x);
+ register int i, j;
+ register char *format_ptr;
+
+ /* Handle leaf items for which we set resource flags. Also, special-case
+ CALL, SET and CLOBBER operators. */
+ switch (code)
+ {
+ case CONST:
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case PC:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ return;
+
+ case SUBREG:
+ if (GET_CODE (SUBREG_REG (x)) != REG)
+ mark_referenced_resources (SUBREG_REG (x), res, 0);
+ else
+ {
+ int regno = REGNO (SUBREG_REG (x)) + SUBREG_WORD (x);
+ int last_regno = regno + HARD_REGNO_NREGS (regno, GET_MODE (x));
+ for (i = regno; i < last_regno; i++)
+ SET_HARD_REG_BIT (res->regs, i);
+ }
+ return;
+
+ case REG:
+ for (i = 0; i < HARD_REGNO_NREGS (REGNO (x), GET_MODE (x)); i++)
+ SET_HARD_REG_BIT (res->regs, REGNO (x) + i);
+ return;
+
+ case MEM:
+ /* If this memory shouldn't change, it really isn't referencing
+ memory. */
+ if (! RTX_UNCHANGING_P (x))
+ res->memory = 1;
+ res->volatil = MEM_VOLATILE_P (x);
+
+ /* Mark registers used to access memory. */
+ mark_referenced_resources (XEXP (x, 0), res, 0);
+ return;
+
+ case CC0:
+ res->cc = 1;
+ return;
+
+ case UNSPEC_VOLATILE:
+ case ASM_INPUT:
+ /* Traditional asm's are always volatile. */
+ res->volatil = 1;
+ return;
+
+ case ASM_OPERANDS:
+ res->volatil = MEM_VOLATILE_P (x);
+
+ /* For all ASM_OPERANDS, we must traverse the vector of input operands.
+ We can not just fall through here since then we would be confused
+ by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
+ traditional asms unlike their normal usage. */
+
+ for (i = 0; i < ASM_OPERANDS_INPUT_LENGTH (x); i++)
+ mark_referenced_resources (ASM_OPERANDS_INPUT (x, i), res, 0);
+ return;
+
+ case CALL:
+ /* The first operand will be a (MEM (xxx)) but doesn't really reference
+ memory. The second operand may be referenced, though. */
+ mark_referenced_resources (XEXP (XEXP (x, 0), 0), res, 0);
+ mark_referenced_resources (XEXP (x, 1), res, 0);
+ return;
+
+ case SET:
+ /* Usually, the first operand of SET is set, not referenced. But
+ registers used to access memory are referenced. SET_DEST is
+ also referenced if it is a ZERO_EXTRACT or SIGN_EXTRACT. */
+
+ mark_referenced_resources (SET_SRC (x), res, 0);
+
+ x = SET_DEST (x);
+ if (GET_CODE (x) == SIGN_EXTRACT || GET_CODE (x) == ZERO_EXTRACT)
+ mark_referenced_resources (x, res, 0);
+ else if (GET_CODE (x) == SUBREG)
+ x = SUBREG_REG (x);
+ if (GET_CODE (x) == MEM)
+ mark_referenced_resources (XEXP (x, 0), res, 0);
+ return;
+
+ case CLOBBER:
+ return;
+
+ case CALL_INSN:
+ if (include_delayed_effects)
+ {
+ /* A CALL references memory, the frame pointer if it exists, the
+ stack pointer, any global registers and any registers given in
+ USE insns immediately in front of the CALL.
+
+ However, we may have moved some of the parameter loading insns
+ into the delay slot of this CALL. If so, the USE's for them
+ don't count and should be skipped. */
+ rtx insn = PREV_INSN (x);
+ rtx sequence = 0;
+ int seq_size = 0;
+ int i;
+
+ /* If we are part of a delay slot sequence, point at the SEQUENCE. */
+ if (NEXT_INSN (insn) != x)
+ {
+ sequence = PATTERN (NEXT_INSN (insn));
+ seq_size = XVECLEN (sequence, 0);
+ if (GET_CODE (sequence) != SEQUENCE)
+ abort ();
+ }
+
+ res->memory = 1;
+ SET_HARD_REG_BIT (res->regs, STACK_POINTER_REGNUM);
+ if (frame_pointer_needed)
+ {
+ SET_HARD_REG_BIT (res->regs, FRAME_POINTER_REGNUM);
+#if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM
+ SET_HARD_REG_BIT (res->regs, HARD_FRAME_POINTER_REGNUM);
+#endif
+ }
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (global_regs[i])
+ SET_HARD_REG_BIT (res->regs, i);
+
+ {
+ rtx link;
+
+ for (link = CALL_INSN_FUNCTION_USAGE (x);
+ link;
+ link = XEXP (link, 1))
+ if (GET_CODE (XEXP (link, 0)) == USE)
+ {
+ for (i = 1; i < seq_size; i++)
+ {
+ rtx slot_pat = PATTERN (XVECEXP (sequence, 0, i));
+ if (GET_CODE (slot_pat) == SET
+ && rtx_equal_p (SET_DEST (slot_pat),
+ SET_DEST (XEXP (link, 0))))
+ break;
+ }
+ if (i >= seq_size)
+ mark_referenced_resources (SET_DEST (XEXP (link, 0)),
+ res, 0);
+ }
+ }
+ }
+
+ /* ... fall through to other INSN processing ... */
+
+ case INSN:
+ case JUMP_INSN:
+
+#ifdef INSN_REFERENCES_ARE_DELAYED
+ if (! include_delayed_effects
+ && INSN_REFERENCES_ARE_DELAYED (x))
+ return;
+#endif
+
+ /* No special processing, just speed up. */
+ mark_referenced_resources (PATTERN (x), res, include_delayed_effects);
+ return;
+ }
+
+ /* Process each sub-expression and flag what it needs. */
+ format_ptr = GET_RTX_FORMAT (code);
+ for (i = 0; i < GET_RTX_LENGTH (code); i++)
+ switch (*format_ptr++)
+ {
+ case 'e':
+ mark_referenced_resources (XEXP (x, i), res, include_delayed_effects);
+ break;
+
+ case 'E':
+ for (j = 0; j < XVECLEN (x, i); j++)
+ mark_referenced_resources (XVECEXP (x, i, j), res,
+ include_delayed_effects);
+ break;
+ }
+}
+
+/* Given X, a part of an insn, and a pointer to a `struct resource', RES,
+ indicate which resources are modified by the insn. If INCLUDE_CALLED_ROUTINE
+ is nonzero, also mark resources potentially set by the called routine.
+
+ If IN_DEST is nonzero, it means we are inside a SET. Otherwise,
+ objects are being referenced instead of set.
+
+ We never mark the insn as modifying the condition code unless it explicitly
+ SETs CC0 even though this is not totally correct. The reason for this is
+ that we require a SET of CC0 to immediately precede the reference to CC0.
+ So if some other insn sets CC0 as a side-effect, we know it cannot affect
+ our computation and thus may be placed in a delay slot. */
+
+static void
+mark_set_resources (x, res, in_dest, include_delayed_effects)
+ register rtx x;
+ register struct resources *res;
+ int in_dest;
+ int include_delayed_effects;
+{
+ register enum rtx_code code;
+ register int i, j;
+ register char *format_ptr;
+
+ restart:
+
+ code = GET_CODE (x);
+
+ switch (code)
+ {
+ case NOTE:
+ case BARRIER:
+ case CODE_LABEL:
+ case USE:
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case LABEL_REF:
+ case SYMBOL_REF:
+ case CONST:
+ case PC:
+ /* These don't set any resources. */
+ return;
+
+ case CC0:
+ if (in_dest)
+ res->cc = 1;
+ return;
+
+ case CALL_INSN:
+ /* Called routine modifies the condition code, memory, any registers
+ that aren't saved across calls, global registers and anything
+ explicitly CLOBBERed immediately after the CALL_INSN. */
+
+ if (include_delayed_effects)
+ {
+ rtx next = NEXT_INSN (x);
+ rtx prev = PREV_INSN (x);
+ rtx link;
+
+ res->cc = res->memory = 1;
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (call_used_regs[i] || global_regs[i])
+ SET_HARD_REG_BIT (res->regs, i);
+
+ /* If X is part of a delay slot sequence, then NEXT should be
+ the first insn after the sequence. */
+ if (NEXT_INSN (prev) != x)
+ next = NEXT_INSN (NEXT_INSN (prev));
+
+ for (link = CALL_INSN_FUNCTION_USAGE (x);
+ link; link = XEXP (link, 1))
+ if (GET_CODE (XEXP (link, 0)) == CLOBBER)
+ mark_set_resources (SET_DEST (XEXP (link, 0)), res, 1, 0);
+
+ /* Check for a NOTE_INSN_SETJMP. If it exists, then we must
+ assume that this call can clobber any register. */
+ if (next && GET_CODE (next) == NOTE
+ && NOTE_LINE_NUMBER (next) == NOTE_INSN_SETJMP)
+ SET_HARD_REG_SET (res->regs);
+ }
+
+ /* ... and also what it's RTL says it modifies, if anything. */
+
+ case JUMP_INSN:
+ case INSN:
+
+ /* An insn consisting of just a CLOBBER (or USE) is just for flow
+ and doesn't actually do anything, so we ignore it. */
+
+#ifdef INSN_SETS_ARE_DELAYED
+ if (! include_delayed_effects
+ && INSN_SETS_ARE_DELAYED (x))
+ return;
+#endif
+
+ x = PATTERN (x);
+ if (GET_CODE (x) != USE && GET_CODE (x) != CLOBBER)
+ goto restart;
+ return;
+
+ case SET:
+ /* If the source of a SET is a CALL, this is actually done by
+ the called routine. So only include it if we are to include the
+ effects of the calling routine. */
+
+ mark_set_resources (SET_DEST (x), res,
+ (include_delayed_effects
+ || GET_CODE (SET_SRC (x)) != CALL),
+ 0);
+
+ mark_set_resources (SET_SRC (x), res, 0, 0);
+ return;
+
+ case CLOBBER:
+ mark_set_resources (XEXP (x, 0), res, 1, 0);
+ return;
+
+ case SEQUENCE:
+ for (i = 0; i < XVECLEN (x, 0); i++)
+ if (! (INSN_ANNULLED_BRANCH_P (XVECEXP (x, 0, 0))
+ && INSN_FROM_TARGET_P (XVECEXP (x, 0, i))))
+ mark_set_resources (XVECEXP (x, 0, i), res, 0,
+ include_delayed_effects);
+ return;
+
+ case POST_INC:
+ case PRE_INC:
+ case POST_DEC:
+ case PRE_DEC:
+ mark_set_resources (XEXP (x, 0), res, 1, 0);
+ return;
+
+ case ZERO_EXTRACT:
+ mark_set_resources (XEXP (x, 0), res, in_dest, 0);
+ mark_set_resources (XEXP (x, 1), res, 0, 0);
+ mark_set_resources (XEXP (x, 2), res, 0, 0);
+ return;
+
+ case MEM:
+ if (in_dest)
+ {
+ res->memory = 1;
+ res->volatil = MEM_VOLATILE_P (x);
+ }
+
+ mark_set_resources (XEXP (x, 0), res, 0, 0);
+ return;
+
+ case REG:
+ if (in_dest)
+ for (i = 0; i < HARD_REGNO_NREGS (REGNO (x), GET_MODE (x)); i++)
+ SET_HARD_REG_BIT (res->regs, REGNO (x) + i);
+ return;
+ }
+
+ /* Process each sub-expression and flag what it needs. */
+ format_ptr = GET_RTX_FORMAT (code);
+ for (i = 0; i < GET_RTX_LENGTH (code); i++)
+ switch (*format_ptr++)
+ {
+ case 'e':
+ mark_set_resources (XEXP (x, i), res, in_dest, include_delayed_effects);
+ break;
+
+ case 'E':
+ for (j = 0; j < XVECLEN (x, i); j++)
+ mark_set_resources (XVECEXP (x, i, j), res, in_dest,
+ include_delayed_effects);
+ break;
+ }
+}
+
+/* Return TRUE if this insn should stop the search for insn to fill delay
+ slots. LABELS_P indicates that labels should terminate the search.
+ In all cases, jumps terminate the search. */
+
+static int
+stop_search_p (insn, labels_p)
+ rtx insn;
+ int labels_p;
+{
+ if (insn == 0)
+ return 1;
+
+ switch (GET_CODE (insn))
+ {
+ case NOTE:
+ case CALL_INSN:
+ return 0;
+
+ case CODE_LABEL:
+ return labels_p;
+
+ case JUMP_INSN:
+ case BARRIER:
+ return 1;
+
+ case INSN:
+ /* OK unless it contains a delay slot or is an `asm' insn of some type.
+ We don't know anything about these. */
+ return (GET_CODE (PATTERN (insn)) == SEQUENCE
+ || GET_CODE (PATTERN (insn)) == ASM_INPUT
+ || asm_noperands (PATTERN (insn)) >= 0);
+
+ default:
+ abort ();
+ }
+}
+
+/* Return TRUE if any resources are marked in both RES1 and RES2 or if either
+ resource set contains a volatile memory reference. Otherwise, return FALSE. */
+
+static int
+resource_conflicts_p (res1, res2)
+ struct resources *res1, *res2;
+{
+ if ((res1->cc && res2->cc) || (res1->memory && res2->memory)
+ || res1->volatil || res2->volatil)
+ return 1;
+
+#ifdef HARD_REG_SET
+ return (res1->regs & res2->regs) != HARD_CONST (0);
+#else
+ {
+ int i;
+
+ for (i = 0; i < HARD_REG_SET_LONGS; i++)
+ if ((res1->regs[i] & res2->regs[i]) != 0)
+ return 1;
+ return 0;
+ }
+#endif
+}
+
+/* Return TRUE if any resource marked in RES, a `struct resources', is
+ referenced by INSN. If INCLUDE_CALLED_ROUTINE is set, return if the called
+ routine is using those resources.
+
+ We compute this by computing all the resources referenced by INSN and
+ seeing if this conflicts with RES. It might be faster to directly check
+ ourselves, and this is the way it used to work, but it means duplicating
+ a large block of complex code. */
+
+static int
+insn_references_resource_p (insn, res, include_delayed_effects)
+ register rtx insn;
+ register struct resources *res;
+ int include_delayed_effects;
+{
+ struct resources insn_res;
+
+ CLEAR_RESOURCE (&insn_res);
+ mark_referenced_resources (insn, &insn_res, include_delayed_effects);
+ return resource_conflicts_p (&insn_res, res);
+}
+
+/* Return TRUE if INSN modifies resources that are marked in RES.
+ INCLUDE_CALLED_ROUTINE is set if the actions of that routine should be
+ included. CC0 is only modified if it is explicitly set; see comments
+ in front of mark_set_resources for details. */
+
+static int
+insn_sets_resource_p (insn, res, include_delayed_effects)
+ register rtx insn;
+ register struct resources *res;
+ int include_delayed_effects;
+{
+ struct resources insn_sets;
+
+ CLEAR_RESOURCE (&insn_sets);
+ mark_set_resources (insn, &insn_sets, 0, include_delayed_effects);
+ return resource_conflicts_p (&insn_sets, res);
+}
+
+/* Find a label at the end of the function or before a RETURN. If there is
+ none, make one. */
+
+static rtx
+find_end_label ()
+{
+ rtx insn;
+
+ /* If we found one previously, return it. */
+ if (end_of_function_label)
+ return end_of_function_label;
+
+ /* Otherwise, see if there is a label at the end of the function. If there
+ is, it must be that RETURN insns aren't needed, so that is our return
+ label and we don't have to do anything else. */
+
+ insn = get_last_insn ();
+ while (GET_CODE (insn) == NOTE
+ || (GET_CODE (insn) == INSN
+ && (GET_CODE (PATTERN (insn)) == USE
+ || GET_CODE (PATTERN (insn)) == CLOBBER)))
+ insn = PREV_INSN (insn);
+
+ /* When a target threads its epilogue we might already have a
+ suitable return insn. If so put a label before it for the
+ end_of_function_label. */
+ if (GET_CODE (insn) == BARRIER
+ && GET_CODE (PREV_INSN (insn)) == JUMP_INSN
+ && GET_CODE (PATTERN (PREV_INSN (insn))) == RETURN)
+ {
+ rtx temp = PREV_INSN (PREV_INSN (insn));
+ end_of_function_label = gen_label_rtx ();
+ LABEL_NUSES (end_of_function_label) = 0;
+
+ /* Put the label before an USE insns that may proceed the RETURN insn. */
+ while (GET_CODE (temp) == USE)
+ temp = PREV_INSN (temp);
+
+ emit_label_after (end_of_function_label, temp);
+ }
+
+ else if (GET_CODE (insn) == CODE_LABEL)
+ end_of_function_label = insn;
+ else
+ {
+ /* Otherwise, make a new label and emit a RETURN and BARRIER,
+ if needed. */
+ end_of_function_label = gen_label_rtx ();
+ LABEL_NUSES (end_of_function_label) = 0;
+ emit_label (end_of_function_label);
+#ifdef HAVE_return
+ if (HAVE_return)
+ {
+ /* The return we make may have delay slots too. */
+ rtx insn = gen_return ();
+ insn = emit_jump_insn (insn);
+ emit_barrier ();
+ if (num_delay_slots (insn) > 0)
+ obstack_ptr_grow (&unfilled_slots_obstack, insn);
+ }
+#endif
+ }
+
+ /* Show one additional use for this label so it won't go away until
+ we are done. */
+ ++LABEL_NUSES (end_of_function_label);
+
+ return end_of_function_label;
+}
+
+/* Put INSN and LIST together in a SEQUENCE rtx of LENGTH, and replace
+ the pattern of INSN with the SEQUENCE.
+
+ Chain the insns so that NEXT_INSN of each insn in the sequence points to
+ the next and NEXT_INSN of the last insn in the sequence points to
+ the first insn after the sequence. Similarly for PREV_INSN. This makes
+ it easier to scan all insns.
+
+ Returns the SEQUENCE that replaces INSN. */
+
+static rtx
+emit_delay_sequence (insn, list, length, avail)
+ rtx insn;
+ rtx list;
+ int length;
+ int avail;
+{
+ register int i = 1;
+ register rtx li;
+ int had_barrier = 0;
+
+ /* Allocate the the rtvec to hold the insns and the SEQUENCE. */
+ rtvec seqv = rtvec_alloc (length + 1);
+ rtx seq = gen_rtx (SEQUENCE, VOIDmode, seqv);
+ rtx seq_insn = make_insn_raw (seq);
+ rtx first = get_insns ();
+ rtx last = get_last_insn ();
+
+ /* Make a copy of the insn having delay slots. */
+ rtx delay_insn = copy_rtx (insn);
+
+ /* If INSN is followed by a BARRIER, delete the BARRIER since it will only
+ confuse further processing. Update LAST in case it was the last insn.
+ We will put the BARRIER back in later. */
+ if (NEXT_INSN (insn) && GET_CODE (NEXT_INSN (insn)) == BARRIER)
+ {
+ delete_insn (NEXT_INSN (insn));
+ last = get_last_insn ();
+ had_barrier = 1;
+ }
+
+ /* Splice our SEQUENCE into the insn stream where INSN used to be. */
+ NEXT_INSN (seq_insn) = NEXT_INSN (insn);
+ PREV_INSN (seq_insn) = PREV_INSN (insn);
+
+ if (insn == last)
+ set_new_first_and_last_insn (first, seq_insn);
+ else
+ PREV_INSN (NEXT_INSN (seq_insn)) = seq_insn;
+
+ if (insn == first)
+ set_new_first_and_last_insn (seq_insn, last);
+ else
+ NEXT_INSN (PREV_INSN (seq_insn)) = seq_insn;
+
+ /* Build our SEQUENCE and rebuild the insn chain. */
+ XVECEXP (seq, 0, 0) = delay_insn;
+ INSN_DELETED_P (delay_insn) = 0;
+ PREV_INSN (delay_insn) = PREV_INSN (seq_insn);
+
+ for (li = list; li; li = XEXP (li, 1), i++)
+ {
+ rtx tem = XEXP (li, 0);
+ rtx note;
+
+ /* Show that this copy of the insn isn't deleted. */
+ INSN_DELETED_P (tem) = 0;
+
+ XVECEXP (seq, 0, i) = tem;
+ PREV_INSN (tem) = XVECEXP (seq, 0, i - 1);
+ NEXT_INSN (XVECEXP (seq, 0, i - 1)) = tem;
+
+ /* Remove any REG_DEAD notes because we can't rely on them now
+ that the insn has been moved. */
+ for (note = REG_NOTES (tem); note; note = XEXP (note, 1))
+ if (REG_NOTE_KIND (note) == REG_DEAD)
+ XEXP (note, 0) = const0_rtx;
+ }
+
+ NEXT_INSN (XVECEXP (seq, 0, length)) = NEXT_INSN (seq_insn);
+
+ /* If the previous insn is a SEQUENCE, update the NEXT_INSN pointer on the
+ last insn in that SEQUENCE to point to us. Similarly for the first
+ insn in the following insn if it is a SEQUENCE. */
+
+ if (PREV_INSN (seq_insn) && GET_CODE (PREV_INSN (seq_insn)) == INSN
+ && GET_CODE (PATTERN (PREV_INSN (seq_insn))) == SEQUENCE)
+ NEXT_INSN (XVECEXP (PATTERN (PREV_INSN (seq_insn)), 0,
+ XVECLEN (PATTERN (PREV_INSN (seq_insn)), 0) - 1))
+ = seq_insn;
+
+ if (NEXT_INSN (seq_insn) && GET_CODE (NEXT_INSN (seq_insn)) == INSN
+ && GET_CODE (PATTERN (NEXT_INSN (seq_insn))) == SEQUENCE)
+ PREV_INSN (XVECEXP (PATTERN (NEXT_INSN (seq_insn)), 0, 0)) = seq_insn;
+
+ /* If there used to be a BARRIER, put it back. */
+ if (had_barrier)
+ emit_barrier_after (seq_insn);
+
+ if (i != length + 1)
+ abort ();
+
+ return seq_insn;
+}
+
+/* Add INSN to DELAY_LIST and return the head of the new list. The list must
+ be in the order in which the insns are to be executed. */
+
+static rtx
+add_to_delay_list (insn, delay_list)
+ rtx insn;
+ rtx delay_list;
+{
+ /* If we have an empty list, just make a new list element. If
+ INSN has it's block number recorded, clear it since we may
+ be moving the insn to a new block. */
+
+ if (delay_list == 0)
+ {
+ struct target_info *tinfo;
+
+ for (tinfo = target_hash_table[INSN_UID (insn) % TARGET_HASH_PRIME];
+ tinfo; tinfo = tinfo->next)
+ if (tinfo->uid == INSN_UID (insn))
+ break;
+
+ if (tinfo)
+ tinfo->block = -1;
+
+ return gen_rtx (INSN_LIST, VOIDmode, insn, NULL_RTX);
+ }
+
+ /* Otherwise this must be an INSN_LIST. Add INSN to the end of the
+ list. */
+ XEXP (delay_list, 1) = add_to_delay_list (insn, XEXP (delay_list, 1));
+
+ return delay_list;
+}
+
+/* Delete INSN from the the delay slot of the insn that it is in. This may
+ produce an insn without anything in its delay slots. */
+
+static void
+delete_from_delay_slot (insn)
+ rtx insn;
+{
+ rtx trial, seq_insn, seq, prev;
+ rtx delay_list = 0;
+ int i;
+
+ /* We first must find the insn containing the SEQUENCE with INSN in its
+ delay slot. Do this by finding an insn, TRIAL, where
+ PREV_INSN (NEXT_INSN (TRIAL)) != TRIAL. */
+
+ for (trial = insn;
+ PREV_INSN (NEXT_INSN (trial)) == trial;
+ trial = NEXT_INSN (trial))
+ ;
+
+ seq_insn = PREV_INSN (NEXT_INSN (trial));
+ seq = PATTERN (seq_insn);
+
+ /* Create a delay list consisting of all the insns other than the one
+ we are deleting (unless we were the only one). */
+ if (XVECLEN (seq, 0) > 2)
+ for (i = 1; i < XVECLEN (seq, 0); i++)
+ if (XVECEXP (seq, 0, i) != insn)
+ delay_list = add_to_delay_list (XVECEXP (seq, 0, i), delay_list);
+
+ /* Delete the old SEQUENCE, re-emit the insn that used to have the delay
+ list, and rebuild the delay list if non-empty. */
+ prev = PREV_INSN (seq_insn);
+ trial = XVECEXP (seq, 0, 0);
+ delete_insn (seq_insn);
+ add_insn_after (trial, prev);
+
+ if (GET_CODE (trial) == JUMP_INSN
+ && (simplejump_p (trial) || GET_CODE (PATTERN (trial)) == RETURN))
+ emit_barrier_after (trial);
+
+ /* If there are any delay insns, remit them. Otherwise clear the
+ annul flag. */
+ if (delay_list)
+ trial = emit_delay_sequence (trial, delay_list, XVECLEN (seq, 0) - 2, 0);
+ else
+ INSN_ANNULLED_BRANCH_P (trial) = 0;
+
+ INSN_FROM_TARGET_P (insn) = 0;
+
+ /* Show we need to fill this insn again. */
+ obstack_ptr_grow (&unfilled_slots_obstack, trial);
+}
+
+/* Delete INSN, a JUMP_INSN. If it is a conditional jump, we must track down
+ the insn that sets CC0 for it and delete it too. */
+
+static void
+delete_scheduled_jump (insn)
+ rtx insn;
+{
+ /* Delete the insn that sets cc0 for us. On machines without cc0, we could
+ delete the insn that sets the condition code, but it is hard to find it.
+ Since this case is rare anyway, don't bother trying; there would likely
+ be other insns that became dead anyway, which we wouldn't know to
+ delete. */
+
+#ifdef HAVE_cc0
+ if (reg_mentioned_p (cc0_rtx, insn))
+ {
+ rtx note = find_reg_note (insn, REG_CC_SETTER, NULL_RTX);
+
+ /* If a reg-note was found, it points to an insn to set CC0. This
+ insn is in the delay list of some other insn. So delete it from
+ the delay list it was in. */
+ if (note)
+ {
+ if (! FIND_REG_INC_NOTE (XEXP (note, 0), NULL_RTX)
+ && sets_cc0_p (PATTERN (XEXP (note, 0))) == 1)
+ delete_from_delay_slot (XEXP (note, 0));
+ }
+ else
+ {
+ /* The insn setting CC0 is our previous insn, but it may be in
+ a delay slot. It will be the last insn in the delay slot, if
+ it is. */
+ rtx trial = previous_insn (insn);
+ if (GET_CODE (trial) == NOTE)
+ trial = prev_nonnote_insn (trial);
+ if (sets_cc0_p (PATTERN (trial)) != 1
+ || FIND_REG_INC_NOTE (trial, 0))
+ return;
+ if (PREV_INSN (NEXT_INSN (trial)) == trial)
+ delete_insn (trial);
+ else
+ delete_from_delay_slot (trial);
+ }
+ }
+#endif
+
+ delete_insn (insn);
+}
+
+/* Counters for delay-slot filling. */
+
+#define NUM_REORG_FUNCTIONS 2
+#define MAX_DELAY_HISTOGRAM 3
+#define MAX_REORG_PASSES 2
+
+static int num_insns_needing_delays[NUM_REORG_FUNCTIONS][MAX_REORG_PASSES];
+
+static int num_filled_delays[NUM_REORG_FUNCTIONS][MAX_DELAY_HISTOGRAM+1][MAX_REORG_PASSES];
+
+static int reorg_pass_number;
+
+static void
+note_delay_statistics (slots_filled, index)
+ int slots_filled, index;
+{
+ num_insns_needing_delays[index][reorg_pass_number]++;
+ if (slots_filled > MAX_DELAY_HISTOGRAM)
+ slots_filled = MAX_DELAY_HISTOGRAM;
+ num_filled_delays[index][slots_filled][reorg_pass_number]++;
+}
+
+#if defined(ANNUL_IFFALSE_SLOTS) || defined(ANNUL_IFTRUE_SLOTS)
+
+/* Optimize the following cases:
+
+ 1. When a conditional branch skips over only one instruction,
+ use an annulling branch and put that insn in the delay slot.
+ Use either a branch that annuls when the condition if true or
+ invert the test with a branch that annuls when the condition is
+ false. This saves insns, since otherwise we must copy an insn
+ from the L1 target.
+
+ (orig) (skip) (otherwise)
+ Bcc.n L1 Bcc',a L1 Bcc,a L1'
+ insn insn insn2
+ L1: L1: L1:
+ insn2 insn2 insn2
+ insn3 insn3 L1':
+ insn3
+
+ 2. When a conditional branch skips over only one instruction,
+ and after that, it unconditionally branches somewhere else,
+ perform the similar optimization. This saves executing the
+ second branch in the case where the inverted condition is true.
+
+ Bcc.n L1 Bcc',a L2
+ insn insn
+ L1: L1:
+ Bra L2 Bra L2
+
+ INSN is a JUMP_INSN.
+
+ This should be expanded to skip over N insns, where N is the number
+ of delay slots required. */
+
+static rtx
+optimize_skip (insn)
+ register rtx insn;
+{
+ register rtx trial = next_nonnote_insn (insn);
+ rtx next_trial = next_active_insn (trial);
+ rtx delay_list = 0;
+ rtx target_label;
+ int flags;
+
+ flags = get_jump_flags (insn, JUMP_LABEL (insn));
+
+ if (trial == 0
+ || GET_CODE (trial) != INSN
+ || GET_CODE (PATTERN (trial)) == SEQUENCE
+ || recog_memoized (trial) < 0
+ || (! eligible_for_annul_false (insn, 0, trial, flags)
+ && ! eligible_for_annul_true (insn, 0, trial, flags)))
+ return 0;
+
+ /* There are two cases where we are just executing one insn (we assume
+ here that a branch requires only one insn; this should be generalized
+ at some point): Where the branch goes around a single insn or where
+ we have one insn followed by a branch to the same label we branch to.
+ In both of these cases, inverting the jump and annulling the delay
+ slot give the same effect in fewer insns. */
+ if ((next_trial == next_active_insn (JUMP_LABEL (insn)))
+ || (next_trial != 0
+ && GET_CODE (next_trial) == JUMP_INSN
+ && JUMP_LABEL (insn) == JUMP_LABEL (next_trial)
+ && (simplejump_p (next_trial)
+ || GET_CODE (PATTERN (next_trial)) == RETURN)))
+ {
+ if (eligible_for_annul_false (insn, 0, trial, flags))
+ {
+ if (invert_jump (insn, JUMP_LABEL (insn)))
+ INSN_FROM_TARGET_P (trial) = 1;
+ else if (! eligible_for_annul_true (insn, 0, trial, flags))
+ return 0;
+ }
+
+ delay_list = add_to_delay_list (trial, NULL_RTX);
+ next_trial = next_active_insn (trial);
+ update_block (trial, trial);
+ delete_insn (trial);
+
+ /* Also, if we are targeting an unconditional
+ branch, thread our jump to the target of that branch. Don't
+ change this into a RETURN here, because it may not accept what
+ we have in the delay slot. We'll fix this up later. */
+ if (next_trial && GET_CODE (next_trial) == JUMP_INSN
+ && (simplejump_p (next_trial)
+ || GET_CODE (PATTERN (next_trial)) == RETURN))
+ {
+ target_label = JUMP_LABEL (next_trial);
+ if (target_label == 0)
+ target_label = find_end_label ();
+
+ /* Recompute the flags based on TARGET_LABEL since threading
+ the jump to TARGET_LABEL may change the direction of the
+ jump (which may change the circumstances in which the
+ delay slot is nullified). */
+ flags = get_jump_flags (insn, target_label);
+ if (eligible_for_annul_true (insn, 0, trial, flags))
+ reorg_redirect_jump (insn, target_label);
+ }
+
+ INSN_ANNULLED_BRANCH_P (insn) = 1;
+ }
+
+ return delay_list;
+}
+#endif
+
+
+/* Encode and return branch direction and prediction information for
+ INSN assuming it will jump to LABEL.
+
+ Non conditional branches return no direction information and
+ are predicted as very likely taken. */
+static int
+get_jump_flags (insn, label)
+ rtx insn, label;
+{
+ int flags;
+
+ /* get_jump_flags can be passed any insn with delay slots, these may
+ be INSNs, CALL_INSNs, or JUMP_INSNs. Only JUMP_INSNs have branch
+ direction information, and only if they are conditional jumps.
+
+ If LABEL is zero, then there is no way to determine the branch
+ direction. */
+ if (GET_CODE (insn) == JUMP_INSN
+ && (condjump_p (insn) || condjump_in_parallel_p (insn))
+ && INSN_UID (insn) <= max_uid
+ && label != 0
+ && INSN_UID (label) <= max_uid)
+ flags
+ = (uid_to_ruid[INSN_UID (label)] > uid_to_ruid[INSN_UID (insn)])
+ ? ATTR_FLAG_forward : ATTR_FLAG_backward;
+ /* No valid direction information. */
+ else
+ flags = 0;
+
+ /* If insn is a conditional branch call mostly_true_jump to get
+ determine the branch prediction.
+
+ Non conditional branches are predicted as very likely taken. */
+ if (GET_CODE (insn) == JUMP_INSN
+ && (condjump_p (insn) || condjump_in_parallel_p (insn)))
+ {
+ int prediction;
+
+ prediction = mostly_true_jump (insn, get_branch_condition (insn, label));
+ switch (prediction)
+ {
+ case 2:
+ flags |= (ATTR_FLAG_very_likely | ATTR_FLAG_likely);
+ break;
+ case 1:
+ flags |= ATTR_FLAG_likely;
+ break;
+ case 0:
+ flags |= ATTR_FLAG_unlikely;
+ break;
+ case -1:
+ flags |= (ATTR_FLAG_very_unlikely | ATTR_FLAG_unlikely);
+ break;
+
+ default:
+ abort();
+ }
+ }
+ else
+ flags |= (ATTR_FLAG_very_likely | ATTR_FLAG_likely);
+
+ return flags;
+}
+
+/* Return 1 if INSN is a destination that will be branched to rarely (the
+ return point of a function); return 2 if DEST will be branched to very
+ rarely (a call to a function that doesn't return). Otherwise,
+ return 0. */
+
+static int
+rare_destination (insn)
+ rtx insn;
+{
+ int jump_count = 0;
+ rtx next;
+
+ for (; insn; insn = next)
+ {
+ if (GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == SEQUENCE)
+ insn = XVECEXP (PATTERN (insn), 0, 0);
+
+ next = NEXT_INSN (insn);
+
+ switch (GET_CODE (insn))
+ {
+ case CODE_LABEL:
+ return 0;
+ case BARRIER:
+ /* A BARRIER can either be after a JUMP_INSN or a CALL_INSN. We
+ don't scan past JUMP_INSNs, so any barrier we find here must
+ have been after a CALL_INSN and hence mean the call doesn't
+ return. */
+ return 2;
+ case JUMP_INSN:
+ if (GET_CODE (PATTERN (insn)) == RETURN)
+ return 1;
+ else if (simplejump_p (insn)
+ && jump_count++ < 10)
+ next = JUMP_LABEL (insn);
+ else
+ return 0;
+ }
+ }
+
+ /* If we got here it means we hit the end of the function. So this
+ is an unlikely destination. */
+
+ return 1;
+}
+
+/* Return truth value of the statement that this branch
+ is mostly taken. If we think that the branch is extremely likely
+ to be taken, we return 2. If the branch is slightly more likely to be
+ taken, return 1. If the branch is slightly less likely to be taken,
+ return 0 and if the branch is highly unlikely to be taken, return -1.
+
+ CONDITION, if non-zero, is the condition that JUMP_INSN is testing. */
+
+static int
+mostly_true_jump (jump_insn, condition)
+ rtx jump_insn, condition;
+{
+ rtx target_label = JUMP_LABEL (jump_insn);
+ rtx insn;
+ int rare_dest = rare_destination (target_label);
+ int rare_fallthrough = rare_destination (NEXT_INSN (jump_insn));
+
+ /* If this is a branch outside a loop, it is highly unlikely. */
+ if (GET_CODE (PATTERN (jump_insn)) == SET
+ && GET_CODE (SET_SRC (PATTERN (jump_insn))) == IF_THEN_ELSE
+ && ((GET_CODE (XEXP (SET_SRC (PATTERN (jump_insn)), 1)) == LABEL_REF
+ && LABEL_OUTSIDE_LOOP_P (XEXP (SET_SRC (PATTERN (jump_insn)), 1)))
+ || (GET_CODE (XEXP (SET_SRC (PATTERN (jump_insn)), 2)) == LABEL_REF
+ && LABEL_OUTSIDE_LOOP_P (XEXP (SET_SRC (PATTERN (jump_insn)), 2)))))
+ return -1;
+
+ if (target_label)
+ {
+ /* If this is the test of a loop, it is very likely true. We scan
+ backwards from the target label. If we find a NOTE_INSN_LOOP_BEG
+ before the next real insn, we assume the branch is to the top of
+ the loop. */
+ for (insn = PREV_INSN (target_label);
+ insn && GET_CODE (insn) == NOTE;
+ insn = PREV_INSN (insn))
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
+ return 2;
+
+ /* If this is a jump to the test of a loop, it is likely true. We scan
+ forwards from the target label. If we find a NOTE_INSN_LOOP_VTOP
+ before the next real insn, we assume the branch is to the loop branch
+ test. */
+ for (insn = NEXT_INSN (target_label);
+ insn && GET_CODE (insn) == NOTE;
+ insn = PREV_INSN (insn))
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_VTOP)
+ return 1;
+ }
+
+ /* Look at the relative rarities of the fallthough and destination. If
+ they differ, we can predict the branch that way. */
+
+ switch (rare_fallthrough - rare_dest)
+ {
+ case -2:
+ return -1;
+ case -1:
+ return 0;
+ case 0:
+ break;
+ case 1:
+ return 1;
+ case 2:
+ return 2;
+ }
+
+ /* If we couldn't figure out what this jump was, assume it won't be
+ taken. This should be rare. */
+ if (condition == 0)
+ return 0;
+
+ /* EQ tests are usually false and NE tests are usually true. Also,
+ most quantities are positive, so we can make the appropriate guesses
+ about signed comparisons against zero. */
+ switch (GET_CODE (condition))
+ {
+ case CONST_INT:
+ /* Unconditional branch. */
+ return 1;
+ case EQ:
+ return 0;
+ case NE:
+ return 1;
+ case LE:
+ case LT:
+ if (XEXP (condition, 1) == const0_rtx)
+ return 0;
+ break;
+ case GE:
+ case GT:
+ if (XEXP (condition, 1) == const0_rtx)
+ return 1;
+ break;
+ }
+
+ /* Predict backward branches usually take, forward branches usually not. If
+ we don't know whether this is forward or backward, assume the branch
+ will be taken, since most are. */
+ return (target_label == 0 || INSN_UID (jump_insn) > max_uid
+ || INSN_UID (target_label) > max_uid
+ || (uid_to_ruid[INSN_UID (jump_insn)]
+ > uid_to_ruid[INSN_UID (target_label)]));;
+}
+
+/* Return the condition under which INSN will branch to TARGET. If TARGET
+ is zero, return the condition under which INSN will return. If INSN is
+ an unconditional branch, return const_true_rtx. If INSN isn't a simple
+ type of jump, or it doesn't go to TARGET, return 0. */
+
+static rtx
+get_branch_condition (insn, target)
+ rtx insn;
+ rtx target;
+{
+ rtx pat = PATTERN (insn);
+ rtx src;
+
+ if (condjump_in_parallel_p (insn))
+ pat = XVECEXP (pat, 0, 0);
+
+ if (GET_CODE (pat) == RETURN)
+ return target == 0 ? const_true_rtx : 0;
+
+ else if (GET_CODE (pat) != SET || SET_DEST (pat) != pc_rtx)
+ return 0;
+
+ src = SET_SRC (pat);
+ if (GET_CODE (src) == LABEL_REF && XEXP (src, 0) == target)
+ return const_true_rtx;
+
+ else if (GET_CODE (src) == IF_THEN_ELSE
+ && ((target == 0 && GET_CODE (XEXP (src, 1)) == RETURN)
+ || (GET_CODE (XEXP (src, 1)) == LABEL_REF
+ && XEXP (XEXP (src, 1), 0) == target))
+ && XEXP (src, 2) == pc_rtx)
+ return XEXP (src, 0);
+
+ else if (GET_CODE (src) == IF_THEN_ELSE
+ && ((target == 0 && GET_CODE (XEXP (src, 2)) == RETURN)
+ || (GET_CODE (XEXP (src, 2)) == LABEL_REF
+ && XEXP (XEXP (src, 2), 0) == target))
+ && XEXP (src, 1) == pc_rtx)
+ return gen_rtx (reverse_condition (GET_CODE (XEXP (src, 0))),
+ GET_MODE (XEXP (src, 0)),
+ XEXP (XEXP (src, 0), 0), XEXP (XEXP (src, 0), 1));
+
+ return 0;
+}
+
+/* Return non-zero if CONDITION is more strict than the condition of
+ INSN, i.e., if INSN will always branch if CONDITION is true. */
+
+static int
+condition_dominates_p (condition, insn)
+ rtx condition;
+ rtx insn;
+{
+ rtx other_condition = get_branch_condition (insn, JUMP_LABEL (insn));
+ enum rtx_code code = GET_CODE (condition);
+ enum rtx_code other_code;
+
+ if (rtx_equal_p (condition, other_condition)
+ || other_condition == const_true_rtx)
+ return 1;
+
+ else if (condition == const_true_rtx || other_condition == 0)
+ return 0;
+
+ other_code = GET_CODE (other_condition);
+ if (GET_RTX_LENGTH (code) != 2 || GET_RTX_LENGTH (other_code) != 2
+ || ! rtx_equal_p (XEXP (condition, 0), XEXP (other_condition, 0))
+ || ! rtx_equal_p (XEXP (condition, 1), XEXP (other_condition, 1)))
+ return 0;
+
+ return comparison_dominates_p (code, other_code);
+}
+
+/* Return non-zero if redirecting JUMP to NEWLABEL does not invalidate
+ any insns already in the delay slot of JUMP. */
+
+static int
+redirect_with_delay_slots_safe_p (jump, newlabel, seq)
+ rtx jump, newlabel, seq;
+{
+ int flags, slots, i;
+ rtx pat = PATTERN (seq);
+
+ /* Make sure all the delay slots of this jump would still
+ be valid after threading the jump. If they are still
+ valid, then return non-zero. */
+
+ flags = get_jump_flags (jump, newlabel);
+ for (i = 1; i < XVECLEN (pat, 0); i++)
+ if (! (
+#ifdef ANNUL_IFFALSE_SLOTS
+ (INSN_ANNULLED_BRANCH_P (jump)
+ && INSN_FROM_TARGET_P (XVECEXP (pat, 0, i)))
+ ? eligible_for_annul_false (jump, i - 1,
+ XVECEXP (pat, 0, i), flags) :
+#endif
+#ifdef ANNUL_IFTRUE_SLOTS
+ (INSN_ANNULLED_BRANCH_P (jump)
+ && ! INSN_FROM_TARGET_P (XVECEXP (pat, 0, i)))
+ ? eligible_for_annul_true (jump, i - 1,
+ XVECEXP (pat, 0, i), flags) :
+#endif
+ eligible_for_delay (jump, i -1, XVECEXP (pat, 0, i), flags)))
+ break;
+
+ return (i == XVECLEN (pat, 0));
+}
+
+/* Return non-zero if redirecting JUMP to NEWLABEL does not invalidate
+ any insns we wish to place in the delay slot of JUMP. */
+
+static int
+redirect_with_delay_list_safe_p (jump, newlabel, delay_list)
+ rtx jump, newlabel, delay_list;
+{
+ int flags, i;
+ rtx li;
+
+ /* Make sure all the insns in DELAY_LIST would still be
+ valid after threading the jump. If they are still
+ valid, then return non-zero. */
+
+ flags = get_jump_flags (jump, newlabel);
+ for (li = delay_list, i = 0; li; li = XEXP (li, 1), i++)
+ if (! (
+#ifdef ANNUL_IFFALSE_SLOTS
+ (INSN_ANNULLED_BRANCH_P (jump)
+ && INSN_FROM_TARGET_P (XEXP (li, 0)))
+ ? eligible_for_annul_false (jump, i, XEXP (li, 0), flags) :
+#endif
+#ifdef ANNUL_IFTRUE_SLOTS
+ (INSN_ANNULLED_BRANCH_P (jump)
+ && ! INSN_FROM_TARGET_P (XEXP (li, 0)))
+ ? eligible_for_annul_true (jump, i, XEXP (li, 0), flags) :
+#endif
+ eligible_for_delay (jump, i, XEXP (li, 0), flags)))
+ break;
+
+ return (li == NULL);
+}
+
+
+/* INSN branches to an insn whose pattern SEQ is a SEQUENCE. Given that
+ the condition tested by INSN is CONDITION and the resources shown in
+ OTHER_NEEDED are needed after INSN, see whether INSN can take all the insns
+ from SEQ's delay list, in addition to whatever insns it may execute
+ (in DELAY_LIST). SETS and NEEDED are denote resources already set and
+ needed while searching for delay slot insns. Return the concatenated
+ delay list if possible, otherwise, return 0.
+
+ SLOTS_TO_FILL is the total number of slots required by INSN, and
+ PSLOTS_FILLED points to the number filled so far (also the number of
+ insns in DELAY_LIST). It is updated with the number that have been
+ filled from the SEQUENCE, if any.
+
+ PANNUL_P points to a non-zero value if we already know that we need
+ to annul INSN. If this routine determines that annulling is needed,
+ it may set that value non-zero.
+
+ PNEW_THREAD points to a location that is to receive the place at which
+ execution should continue. */
+
+static rtx
+steal_delay_list_from_target (insn, condition, seq, delay_list,
+ sets, needed, other_needed,
+ slots_to_fill, pslots_filled, pannul_p,
+ pnew_thread)
+ rtx insn, condition;
+ rtx seq;
+ rtx delay_list;
+ struct resources *sets, *needed, *other_needed;
+ int slots_to_fill;
+ int *pslots_filled;
+ int *pannul_p;
+ rtx *pnew_thread;
+{
+ rtx temp;
+ int slots_remaining = slots_to_fill - *pslots_filled;
+ int total_slots_filled = *pslots_filled;
+ rtx new_delay_list = 0;
+ int must_annul = *pannul_p;
+ int i;
+
+ /* We can't do anything if there are more delay slots in SEQ than we
+ can handle, or if we don't know that it will be a taken branch.
+
+ We know that it will be a taken branch if it is either an unconditional
+ branch or a conditional branch with a stricter branch condition. */
+
+ if (XVECLEN (seq, 0) - 1 > slots_remaining
+ || ! condition_dominates_p (condition, XVECEXP (seq, 0, 0)))
+ return delay_list;
+
+ for (i = 1; i < XVECLEN (seq, 0); i++)
+ {
+ rtx trial = XVECEXP (seq, 0, i);
+ int flags;
+
+ if (insn_references_resource_p (trial, sets, 0)
+ || insn_sets_resource_p (trial, needed, 0)
+ || insn_sets_resource_p (trial, sets, 0)
+#ifdef HAVE_cc0
+ /* If TRIAL sets CC0, we can't copy it, so we can't steal this
+ delay list. */
+ || find_reg_note (trial, REG_CC_USER, NULL_RTX)
+#endif
+ /* If TRIAL is from the fallthrough code of an annulled branch insn
+ in SEQ, we cannot use it. */
+ || (INSN_ANNULLED_BRANCH_P (XVECEXP (seq, 0, 0))
+ && ! INSN_FROM_TARGET_P (trial)))
+ return delay_list;
+
+ /* If this insn was already done (usually in a previous delay slot),
+ pretend we put it in our delay slot. */
+ if (redundant_insn_p (trial, insn, new_delay_list))
+ continue;
+
+ /* We will end up re-vectoring this branch, so compute flags
+ based on jumping to the new label. */
+ flags = get_jump_flags (insn, JUMP_LABEL (XVECEXP (seq, 0, 0)));
+
+ if (! must_annul
+ && ((condition == const_true_rtx
+ || (! insn_sets_resource_p (trial, other_needed, 0)
+ && ! may_trap_p (PATTERN (trial)))))
+ ? eligible_for_delay (insn, total_slots_filled, trial, flags)
+ : (must_annul = 1,
+ eligible_for_annul_false (insn, total_slots_filled, trial, flags)))
+ {
+ temp = copy_rtx (trial);
+ INSN_FROM_TARGET_P (temp) = 1;
+ new_delay_list = add_to_delay_list (temp, new_delay_list);
+ total_slots_filled++;
+
+ if (--slots_remaining == 0)
+ break;
+ }
+ else
+ return delay_list;
+ }
+
+ /* Show the place to which we will be branching. */
+ *pnew_thread = next_active_insn (JUMP_LABEL (XVECEXP (seq, 0, 0)));
+
+ /* Add any new insns to the delay list and update the count of the
+ number of slots filled. */
+ *pslots_filled = total_slots_filled;
+ *pannul_p = must_annul;
+
+ if (delay_list == 0)
+ return new_delay_list;
+
+ for (temp = new_delay_list; temp; temp = XEXP (temp, 1))
+ delay_list = add_to_delay_list (XEXP (temp, 0), delay_list);
+
+ return delay_list;
+}
+
+/* Similar to steal_delay_list_from_target except that SEQ is on the
+ fallthrough path of INSN. Here we only do something if the delay insn
+ of SEQ is an unconditional branch. In that case we steal its delay slot
+ for INSN since unconditional branches are much easier to fill. */
+
+static rtx
+steal_delay_list_from_fallthrough (insn, condition, seq,
+ delay_list, sets, needed, other_needed,
+ slots_to_fill, pslots_filled, pannul_p)
+ rtx insn, condition;
+ rtx seq;
+ rtx delay_list;
+ struct resources *sets, *needed, *other_needed;
+ int slots_to_fill;
+ int *pslots_filled;
+ int *pannul_p;
+{
+ int i;
+ int flags;
+
+ flags = get_jump_flags (insn, JUMP_LABEL (insn));
+
+ /* We can't do anything if SEQ's delay insn isn't an
+ unconditional branch. */
+
+ if (! simplejump_p (XVECEXP (seq, 0, 0))
+ && GET_CODE (PATTERN (XVECEXP (seq, 0, 0))) != RETURN)
+ return delay_list;
+
+ for (i = 1; i < XVECLEN (seq, 0); i++)
+ {
+ rtx trial = XVECEXP (seq, 0, i);
+
+ /* If TRIAL sets CC0, stealing it will move it too far from the use
+ of CC0. */
+ if (insn_references_resource_p (trial, sets, 0)
+ || insn_sets_resource_p (trial, needed, 0)
+ || insn_sets_resource_p (trial, sets, 0)
+#ifdef HAVE_cc0
+ || sets_cc0_p (PATTERN (trial))
+#endif
+ )
+
+ break;
+
+ /* If this insn was already done, we don't need it. */
+ if (redundant_insn_p (trial, insn, delay_list))
+ {
+ delete_from_delay_slot (trial);
+ continue;
+ }
+
+ if (! *pannul_p
+ && ((condition == const_true_rtx
+ || (! insn_sets_resource_p (trial, other_needed, 0)
+ && ! may_trap_p (PATTERN (trial)))))
+ ? eligible_for_delay (insn, *pslots_filled, trial, flags)
+ : (*pannul_p = 1,
+ eligible_for_annul_true (insn, *pslots_filled, trial, flags)))
+ {
+ delete_from_delay_slot (trial);
+ delay_list = add_to_delay_list (trial, delay_list);
+
+ if (++(*pslots_filled) == slots_to_fill)
+ break;
+ }
+ else
+ break;
+ }
+
+ return delay_list;
+}
+
+/* Try merging insns starting at THREAD which match exactly the insns in
+ INSN's delay list.
+
+ If all insns were matched and the insn was previously annulling, the
+ annul bit will be cleared.
+
+ For each insn that is merged, if the branch is or will be non-annulling,
+ we delete the merged insn. */
+
+static void
+try_merge_delay_insns (insn, thread)
+ rtx insn, thread;
+{
+ rtx trial, next_trial;
+ rtx delay_insn = XVECEXP (PATTERN (insn), 0, 0);
+ int annul_p = INSN_ANNULLED_BRANCH_P (delay_insn);
+ int slot_number = 1;
+ int num_slots = XVECLEN (PATTERN (insn), 0);
+ rtx next_to_match = XVECEXP (PATTERN (insn), 0, slot_number);
+ struct resources set, needed;
+ rtx merged_insns = 0;
+ int i;
+ int flags;
+
+ flags = get_jump_flags (delay_insn, JUMP_LABEL (delay_insn));
+
+ CLEAR_RESOURCE (&needed);
+ CLEAR_RESOURCE (&set);
+
+ /* If this is not an annulling branch, take into account anything needed in
+ NEXT_TO_MATCH. This prevents two increments from being incorrectly
+ folded into one. If we are annulling, this would be the correct
+ thing to do. (The alternative, looking at things set in NEXT_TO_MATCH
+ will essentially disable this optimization. This method is somewhat of
+ a kludge, but I don't see a better way.) */
+ if (! annul_p)
+ mark_referenced_resources (next_to_match, &needed, 1);
+
+ for (trial = thread; !stop_search_p (trial, 1); trial = next_trial)
+ {
+ rtx pat = PATTERN (trial);
+
+ next_trial = next_nonnote_insn (trial);
+
+ /* TRIAL must be a CALL_INSN or INSN. Skip USE and CLOBBER. */
+ if (GET_CODE (trial) == INSN
+ && (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER))
+ continue;
+
+ if (GET_CODE (next_to_match) == GET_CODE (trial)
+#ifdef HAVE_cc0
+ /* We can't share an insn that sets cc0. */
+ && ! sets_cc0_p (pat)
+#endif
+ && ! insn_references_resource_p (trial, &set, 1)
+ && ! insn_sets_resource_p (trial, &set, 1)
+ && ! insn_sets_resource_p (trial, &needed, 1)
+ && (trial = try_split (pat, trial, 0)) != 0
+ /* Update next_trial, in case try_split succeeded. */
+ && (next_trial = next_nonnote_insn (trial))
+ && rtx_equal_p (PATTERN (next_to_match), PATTERN (trial))
+ /* Have to test this condition if annul condition is different
+ from (and less restrictive than) non-annulling one. */
+ && eligible_for_delay (delay_insn, slot_number - 1, trial, flags))
+ {
+
+ if (! annul_p)
+ {
+ update_block (trial, thread);
+ delete_insn (trial);
+ INSN_FROM_TARGET_P (next_to_match) = 0;
+ }
+ else
+ merged_insns = gen_rtx (INSN_LIST, VOIDmode, trial, merged_insns);
+
+ if (++slot_number == num_slots)
+ break;
+
+ next_to_match = XVECEXP (PATTERN (insn), 0, slot_number);
+ if (! annul_p)
+ mark_referenced_resources (next_to_match, &needed, 1);
+ }
+
+ mark_set_resources (trial, &set, 0, 1);
+ mark_referenced_resources (trial, &needed, 1);
+ }
+
+ /* See if we stopped on a filled insn. If we did, try to see if its
+ delay slots match. */
+ if (slot_number != num_slots
+ && trial && GET_CODE (trial) == INSN
+ && GET_CODE (PATTERN (trial)) == SEQUENCE
+ && ! INSN_ANNULLED_BRANCH_P (XVECEXP (PATTERN (trial), 0, 0)))
+ {
+ rtx pat = PATTERN (trial);
+ rtx filled_insn = XVECEXP (pat, 0, 0);
+
+ /* Account for resources set/needed by the filled insn. */
+ mark_set_resources (filled_insn, &set, 0, 1);
+ mark_referenced_resources (filled_insn, &needed, 1);
+
+ for (i = 1; i < XVECLEN (pat, 0); i++)
+ {
+ rtx dtrial = XVECEXP (pat, 0, i);
+
+ if (! insn_references_resource_p (dtrial, &set, 1)
+ && ! insn_sets_resource_p (dtrial, &set, 1)
+ && ! insn_sets_resource_p (dtrial, &needed, 1)
+#ifdef HAVE_cc0
+ && ! sets_cc0_p (PATTERN (dtrial))
+#endif
+ && rtx_equal_p (PATTERN (next_to_match), PATTERN (dtrial))
+ && eligible_for_delay (delay_insn, slot_number - 1, dtrial, flags))
+ {
+ if (! annul_p)
+ {
+ update_block (dtrial, thread);
+ delete_from_delay_slot (dtrial);
+ INSN_FROM_TARGET_P (next_to_match) = 0;
+ }
+ else
+ merged_insns = gen_rtx (INSN_LIST, SImode, dtrial,
+ merged_insns);
+
+ if (++slot_number == num_slots)
+ break;
+
+ next_to_match = XVECEXP (PATTERN (insn), 0, slot_number);
+ }
+ }
+ }
+
+ /* If all insns in the delay slot have been matched and we were previously
+ annulling the branch, we need not any more. In that case delete all the
+ merged insns. Also clear the INSN_FROM_TARGET_P bit of each insn the
+ the delay list so that we know that it isn't only being used at the
+ target. */
+ if (slot_number == num_slots && annul_p)
+ {
+ for (; merged_insns; merged_insns = XEXP (merged_insns, 1))
+ {
+ if (GET_MODE (merged_insns) == SImode)
+ {
+ update_block (XEXP (merged_insns, 0), thread);
+ delete_from_delay_slot (XEXP (merged_insns, 0));
+ }
+ else
+ {
+ update_block (XEXP (merged_insns, 0), thread);
+ delete_insn (XEXP (merged_insns, 0));
+ }
+ }
+
+ INSN_ANNULLED_BRANCH_P (delay_insn) = 0;
+
+ for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
+ INSN_FROM_TARGET_P (XVECEXP (PATTERN (insn), 0, i)) = 0;
+ }
+}
+
+/* See if INSN is redundant with an insn in front of TARGET. Often this
+ is called when INSN is a candidate for a delay slot of TARGET.
+ DELAY_LIST are insns that will be placed in delay slots of TARGET in front
+ of INSN. Often INSN will be redundant with an insn in a delay slot of
+ some previous insn. This happens when we have a series of branches to the
+ same label; in that case the first insn at the target might want to go
+ into each of the delay slots.
+
+ If we are not careful, this routine can take up a significant fraction
+ of the total compilation time (4%), but only wins rarely. Hence we
+ speed this routine up by making two passes. The first pass goes back
+ until it hits a label and sees if it find an insn with an identical
+ pattern. Only in this (relatively rare) event does it check for
+ data conflicts.
+
+ We do not split insns we encounter. This could cause us not to find a
+ redundant insn, but the cost of splitting seems greater than the possible
+ gain in rare cases. */
+
+static int
+redundant_insn_p (insn, target, delay_list)
+ rtx insn;
+ rtx target;
+ rtx delay_list;
+{
+ rtx target_main = target;
+ rtx ipat = PATTERN (insn);
+ rtx trial, pat;
+ struct resources needed, set;
+ int i;
+
+ /* Scan backwards looking for a match. */
+ for (trial = PREV_INSN (target); trial; trial = PREV_INSN (trial))
+ {
+ if (GET_CODE (trial) == CODE_LABEL)
+ return 0;
+
+ if (GET_RTX_CLASS (GET_CODE (trial)) != 'i')
+ continue;
+
+ pat = PATTERN (trial);
+ if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER)
+ continue;
+
+ if (GET_CODE (pat) == SEQUENCE)
+ {
+ /* Stop for a CALL and its delay slots because it is difficult to
+ track its resource needs correctly. */
+ if (GET_CODE (XVECEXP (pat, 0, 0)) == CALL_INSN)
+ return 0;
+
+ /* Stop for an INSN or JUMP_INSN with delayed effects and its delay
+ slots because it is difficult to track its resource needs
+ correctly. */
+
+#ifdef INSN_SETS_ARE_DELAYED
+ if (INSN_SETS_ARE_DELAYED (XVECEXP (pat, 0, 0)))
+ return 0;
+#endif
+
+#ifdef INSN_REFERENCES_ARE_DELAYED
+ if (INSN_REFERENCES_ARE_DELAYED (XVECEXP (pat, 0, 0)))
+ return 0;
+#endif
+
+ /* See if any of the insns in the delay slot match, updating
+ resource requirements as we go. */
+ for (i = XVECLEN (pat, 0) - 1; i > 0; i--)
+ if (GET_CODE (XVECEXP (pat, 0, i)) == GET_CODE (insn)
+ && rtx_equal_p (PATTERN (XVECEXP (pat, 0, i)), ipat))
+ break;
+
+ /* If found a match, exit this loop early. */
+ if (i > 0)
+ break;
+ }
+
+ else if (GET_CODE (trial) == GET_CODE (insn) && rtx_equal_p (pat, ipat))
+ break;
+ }
+
+ /* If we didn't find an insn that matches, return 0. */
+ if (trial == 0)
+ return 0;
+
+ /* See what resources this insn sets and needs. If they overlap, or
+ if this insn references CC0, it can't be redundant. */
+
+ CLEAR_RESOURCE (&needed);
+ CLEAR_RESOURCE (&set);
+ mark_set_resources (insn, &set, 0, 1);
+ mark_referenced_resources (insn, &needed, 1);
+
+ /* If TARGET is a SEQUENCE, get the main insn. */
+ if (GET_CODE (target) == INSN && GET_CODE (PATTERN (target)) == SEQUENCE)
+ target_main = XVECEXP (PATTERN (target), 0, 0);
+
+ if (resource_conflicts_p (&needed, &set)
+#ifdef HAVE_cc0
+ || reg_mentioned_p (cc0_rtx, ipat)
+#endif
+ /* The insn requiring the delay may not set anything needed or set by
+ INSN. */
+ || insn_sets_resource_p (target_main, &needed, 1)
+ || insn_sets_resource_p (target_main, &set, 1))
+ return 0;
+
+ /* Insns we pass may not set either NEEDED or SET, so merge them for
+ simpler tests. */
+ needed.memory |= set.memory;
+ IOR_HARD_REG_SET (needed.regs, set.regs);
+
+ /* This insn isn't redundant if it conflicts with an insn that either is
+ or will be in a delay slot of TARGET. */
+
+ while (delay_list)
+ {
+ if (insn_sets_resource_p (XEXP (delay_list, 0), &needed, 1))
+ return 0;
+ delay_list = XEXP (delay_list, 1);
+ }
+
+ if (GET_CODE (target) == INSN && GET_CODE (PATTERN (target)) == SEQUENCE)
+ for (i = 1; i < XVECLEN (PATTERN (target), 0); i++)
+ if (insn_sets_resource_p (XVECEXP (PATTERN (target), 0, i), &needed, 1))
+ return 0;
+
+ /* Scan backwards until we reach a label or an insn that uses something
+ INSN sets or sets something insn uses or sets. */
+
+ for (trial = PREV_INSN (target);
+ trial && GET_CODE (trial) != CODE_LABEL;
+ trial = PREV_INSN (trial))
+ {
+ if (GET_CODE (trial) != INSN && GET_CODE (trial) != CALL_INSN
+ && GET_CODE (trial) != JUMP_INSN)
+ continue;
+
+ pat = PATTERN (trial);
+ if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER)
+ continue;
+
+ if (GET_CODE (pat) == SEQUENCE)
+ {
+ /* If this is a CALL_INSN and its delay slots, it is hard to track
+ the resource needs properly, so give up. */
+ if (GET_CODE (XVECEXP (pat, 0, 0)) == CALL_INSN)
+ return 0;
+
+ /* If this this is an INSN or JUMP_INSN with delayed effects, it
+ is hard to track the resource needs properly, so give up. */
+
+#ifdef INSN_SETS_ARE_DELAYED
+ if (INSN_SETS_ARE_DELAYED (XVECEXP (pat, 0, 0)))
+ return 0;
+#endif
+
+#ifdef INSN_REFERENCES_ARE_DELAYED
+ if (INSN_REFERENCES_ARE_DELAYED (XVECEXP (pat, 0, 0)))
+ return 0;
+#endif
+
+ /* See if any of the insns in the delay slot match, updating
+ resource requirements as we go. */
+ for (i = XVECLEN (pat, 0) - 1; i > 0; i--)
+ {
+ rtx candidate = XVECEXP (pat, 0, i);
+
+ /* If an insn will be annulled if the branch is false, it isn't
+ considered as a possible duplicate insn. */
+ if (rtx_equal_p (PATTERN (candidate), ipat)
+ && ! (INSN_ANNULLED_BRANCH_P (XVECEXP (pat, 0, 0))
+ && INSN_FROM_TARGET_P (candidate)))
+ {
+ /* Show that this insn will be used in the sequel. */
+ INSN_FROM_TARGET_P (candidate) = 0;
+ return 1;
+ }
+
+ /* Unless this is an annulled insn from the target of a branch,
+ we must stop if it sets anything needed or set by INSN. */
+ if ((! INSN_ANNULLED_BRANCH_P (XVECEXP (pat, 0, 0))
+ || ! INSN_FROM_TARGET_P (candidate))
+ && insn_sets_resource_p (candidate, &needed, 1))
+ return 0;
+ }
+
+
+ /* If the insn requiring the delay slot conflicts with INSN, we
+ must stop. */
+ if (insn_sets_resource_p (XVECEXP (pat, 0, 0), &needed, 1))
+ return 0;
+ }
+ else
+ {
+ /* See if TRIAL is the same as INSN. */
+ pat = PATTERN (trial);
+ if (rtx_equal_p (pat, ipat))
+ return 1;
+
+ /* Can't go any further if TRIAL conflicts with INSN. */
+ if (insn_sets_resource_p (trial, &needed, 1))
+ return 0;
+ }
+ }
+
+ return 0;
+}
+
+/* Return 1 if THREAD can only be executed in one way. If LABEL is non-zero,
+ it is the target of the branch insn being scanned. If ALLOW_FALLTHROUGH
+ is non-zero, we are allowed to fall into this thread; otherwise, we are
+ not.
+
+ If LABEL is used more than one or we pass a label other than LABEL before
+ finding an active insn, we do not own this thread. */
+
+static int
+own_thread_p (thread, label, allow_fallthrough)
+ rtx thread;
+ rtx label;
+ int allow_fallthrough;
+{
+ rtx active_insn;
+ rtx insn;
+
+ /* We don't own the function end. */
+ if (thread == 0)
+ return 0;
+
+ /* Get the first active insn, or THREAD, if it is an active insn. */
+ active_insn = next_active_insn (PREV_INSN (thread));
+
+ for (insn = thread; insn != active_insn; insn = NEXT_INSN (insn))
+ if (GET_CODE (insn) == CODE_LABEL
+ && (insn != label || LABEL_NUSES (insn) != 1))
+ return 0;
+
+ if (allow_fallthrough)
+ return 1;
+
+ /* Ensure that we reach a BARRIER before any insn or label. */
+ for (insn = prev_nonnote_insn (thread);
+ insn == 0 || GET_CODE (insn) != BARRIER;
+ insn = prev_nonnote_insn (insn))
+ if (insn == 0
+ || GET_CODE (insn) == CODE_LABEL
+ || (GET_CODE (insn) == INSN
+ && GET_CODE (PATTERN (insn)) != USE
+ && GET_CODE (PATTERN (insn)) != CLOBBER))
+ return 0;
+
+ return 1;
+}
+
+/* Find the number of the basic block that starts closest to INSN. Return -1
+ if we couldn't find such a basic block. */
+
+static int
+find_basic_block (insn)
+ rtx insn;
+{
+ int i;
+
+ /* Scan backwards to the previous BARRIER. Then see if we can find a
+ label that starts a basic block. Return the basic block number. */
+
+ for (insn = prev_nonnote_insn (insn);
+ insn && GET_CODE (insn) != BARRIER;
+ insn = prev_nonnote_insn (insn))
+ ;
+
+ /* The start of the function is basic block zero. */
+ if (insn == 0)
+ return 0;
+
+ /* See if any of the upcoming CODE_LABELs start a basic block. If we reach
+ anything other than a CODE_LABEL or note, we can't find this code. */
+ for (insn = next_nonnote_insn (insn);
+ insn && GET_CODE (insn) == CODE_LABEL;
+ insn = next_nonnote_insn (insn))
+ {
+ for (i = 0; i < n_basic_blocks; i++)
+ if (insn == basic_block_head[i])
+ return i;
+ }
+
+ return -1;
+}
+
+/* Called when INSN is being moved from a location near the target of a jump.
+ We leave a marker of the form (use (INSN)) immediately in front
+ of WHERE for mark_target_live_regs. These markers will be deleted when
+ reorg finishes.
+
+ We used to try to update the live status of registers if WHERE is at
+ the start of a basic block, but that can't work since we may remove a
+ BARRIER in relax_delay_slots. */
+
+static void
+update_block (insn, where)
+ rtx insn;
+ rtx where;
+{
+ int b;
+
+ /* Ignore if this was in a delay slot and it came from the target of
+ a branch. */
+ if (INSN_FROM_TARGET_P (insn))
+ return;
+
+ emit_insn_before (gen_rtx (USE, VOIDmode, insn), where);
+
+ /* INSN might be making a value live in a block where it didn't use to
+ be. So recompute liveness information for this block. */
+
+ b = find_basic_block (insn);
+ if (b != -1)
+ bb_ticks[b]++;
+}
+
+/* Similar to REDIRECT_JUMP except that we update the BB_TICKS entry for
+ the basic block containing the jump. */
+
+static int
+reorg_redirect_jump (jump, nlabel)
+ rtx jump;
+ rtx nlabel;
+{
+ int b = find_basic_block (jump);
+
+ if (b != -1)
+ bb_ticks[b]++;
+
+ return redirect_jump (jump, nlabel);
+}
+
+/* Called when INSN is being moved forward into a delay slot of DELAYED_INSN.
+ We check every instruction between INSN and DELAYED_INSN for REG_DEAD notes
+ that reference values used in INSN. If we find one, then we move the
+ REG_DEAD note to INSN.
+
+ This is needed to handle the case where an later insn (after INSN) has a
+ REG_DEAD note for a register used by INSN, and this later insn subsequently
+ gets moved before a CODE_LABEL because it is a redundant insn. In this
+ case, mark_target_live_regs may be confused into thinking the register
+ is dead because it sees a REG_DEAD note immediately before a CODE_LABEL. */
+
+static void
+update_reg_dead_notes (insn, delayed_insn)
+ rtx insn, delayed_insn;
+{
+ rtx p, link, next;
+
+ for (p = next_nonnote_insn (insn); p != delayed_insn;
+ p = next_nonnote_insn (p))
+ for (link = REG_NOTES (p); link; link = next)
+ {
+ next = XEXP (link, 1);
+
+ if (REG_NOTE_KIND (link) != REG_DEAD
+ || GET_CODE (XEXP (link, 0)) != REG)
+ continue;
+
+ if (reg_referenced_p (XEXP (link, 0), PATTERN (insn)))
+ {
+ /* Move the REG_DEAD note from P to INSN. */
+ remove_note (p, link);
+ XEXP (link, 1) = REG_NOTES (insn);
+ REG_NOTES (insn) = link;
+ }
+ }
+}
+
+/* Marks registers possibly live at the current place being scanned by
+ mark_target_live_regs. Used only by next two function. */
+
+static HARD_REG_SET current_live_regs;
+
+/* Marks registers for which we have seen a REG_DEAD note but no assignment.
+ Also only used by the next two functions. */
+
+static HARD_REG_SET pending_dead_regs;
+
+/* Utility function called from mark_target_live_regs via note_stores.
+ It deadens any CLOBBERed registers and livens any SET registers. */
+
+static void
+update_live_status (dest, x)
+ rtx dest;
+ rtx x;
+{
+ int first_regno, last_regno;
+ int i;
+
+ if (GET_CODE (dest) != REG
+ && (GET_CODE (dest) != SUBREG || GET_CODE (SUBREG_REG (dest)) != REG))
+ return;
+
+ if (GET_CODE (dest) == SUBREG)
+ first_regno = REGNO (SUBREG_REG (dest)) + SUBREG_WORD (dest);
+ else
+ first_regno = REGNO (dest);
+
+ last_regno = first_regno + HARD_REGNO_NREGS (first_regno, GET_MODE (dest));
+
+ if (GET_CODE (x) == CLOBBER)
+ for (i = first_regno; i < last_regno; i++)
+ CLEAR_HARD_REG_BIT (current_live_regs, i);
+ else
+ for (i = first_regno; i < last_regno; i++)
+ {
+ SET_HARD_REG_BIT (current_live_regs, i);
+ CLEAR_HARD_REG_BIT (pending_dead_regs, i);
+ }
+}
+
+/* Similar to next_insn, but ignores insns in the delay slots of
+ an annulled branch. */
+
+static rtx
+next_insn_no_annul (insn)
+ rtx insn;
+{
+ if (insn)
+ {
+ /* If INSN is an annulled branch, skip any insns from the target
+ of the branch. */
+ if (INSN_ANNULLED_BRANCH_P (insn)
+ && NEXT_INSN (PREV_INSN (insn)) != insn)
+ while (INSN_FROM_TARGET_P (NEXT_INSN (insn)))
+ insn = NEXT_INSN (insn);
+
+ insn = NEXT_INSN (insn);
+ if (insn && GET_CODE (insn) == INSN
+ && GET_CODE (PATTERN (insn)) == SEQUENCE)
+ insn = XVECEXP (PATTERN (insn), 0, 0);
+ }
+
+ return insn;
+}
+
+/* Set the resources that are live at TARGET.
+
+ If TARGET is zero, we refer to the end of the current function and can
+ return our precomputed value.
+
+ Otherwise, we try to find out what is live by consulting the basic block
+ information. This is tricky, because we must consider the actions of
+ reload and jump optimization, which occur after the basic block information
+ has been computed.
+
+ Accordingly, we proceed as follows::
+
+ We find the previous BARRIER and look at all immediately following labels
+ (with no intervening active insns) to see if any of them start a basic
+ block. If we hit the start of the function first, we use block 0.
+
+ Once we have found a basic block and a corresponding first insns, we can
+ accurately compute the live status from basic_block_live_regs and
+ reg_renumber. (By starting at a label following a BARRIER, we are immune
+ to actions taken by reload and jump.) Then we scan all insns between
+ that point and our target. For each CLOBBER (or for call-clobbered regs
+ when we pass a CALL_INSN), mark the appropriate registers are dead. For
+ a SET, mark them as live.
+
+ We have to be careful when using REG_DEAD notes because they are not
+ updated by such things as find_equiv_reg. So keep track of registers
+ marked as dead that haven't been assigned to, and mark them dead at the
+ next CODE_LABEL since reload and jump won't propagate values across labels.
+
+ If we cannot find the start of a basic block (should be a very rare
+ case, if it can happen at all), mark everything as potentially live.
+
+ Next, scan forward from TARGET looking for things set or clobbered
+ before they are used. These are not live.
+
+ Because we can be called many times on the same target, save our results
+ in a hash table indexed by INSN_UID. */
+
+static void
+mark_target_live_regs (target, res)
+ rtx target;
+ struct resources *res;
+{
+ int b = -1;
+ int i;
+ struct target_info *tinfo;
+ rtx insn, next;
+ rtx jump_insn = 0;
+ rtx jump_target;
+ HARD_REG_SET scratch;
+ struct resources set, needed;
+ int jump_count = 0;
+
+ /* Handle end of function. */
+ if (target == 0)
+ {
+ *res = end_of_function_needs;
+ return;
+ }
+
+ /* We have to assume memory is needed, but the CC isn't. */
+ res->memory = 1;
+ res->volatil = 0;
+ res->cc = 0;
+
+ /* See if we have computed this value already. */
+ for (tinfo = target_hash_table[INSN_UID (target) % TARGET_HASH_PRIME];
+ tinfo; tinfo = tinfo->next)
+ if (tinfo->uid == INSN_UID (target))
+ break;
+
+ /* Start by getting the basic block number. If we have saved information,
+ we can get it from there unless the insn at the start of the basic block
+ has been deleted. */
+ if (tinfo && tinfo->block != -1
+ && ! INSN_DELETED_P (basic_block_head[tinfo->block]))
+ b = tinfo->block;
+
+ if (b == -1)
+ b = find_basic_block (target);
+
+ if (tinfo)
+ {
+ /* If the information is up-to-date, use it. Otherwise, we will
+ update it below. */
+ if (b == tinfo->block && b != -1 && tinfo->bb_tick == bb_ticks[b])
+ {
+ COPY_HARD_REG_SET (res->regs, tinfo->live_regs);
+ return;
+ }
+ }
+ else
+ {
+ /* Allocate a place to put our results and chain it into the
+ hash table. */
+ tinfo = (struct target_info *) oballoc (sizeof (struct target_info));
+ tinfo->uid = INSN_UID (target);
+ tinfo->block = b;
+ tinfo->next = target_hash_table[INSN_UID (target) % TARGET_HASH_PRIME];
+ target_hash_table[INSN_UID (target) % TARGET_HASH_PRIME] = tinfo;
+ }
+
+ CLEAR_HARD_REG_SET (pending_dead_regs);
+
+ /* If we found a basic block, get the live registers from it and update
+ them with anything set or killed between its start and the insn before
+ TARGET. Otherwise, we must assume everything is live. */
+ if (b != -1)
+ {
+ regset regs_live = basic_block_live_at_start[b];
+ int offset, j;
+ REGSET_ELT_TYPE bit;
+ int regno;
+ rtx start_insn, stop_insn;
+
+ /* Compute hard regs live at start of block -- this is the real hard regs
+ marked live, plus live pseudo regs that have been renumbered to
+ hard regs. */
+
+#ifdef HARD_REG_SET
+ current_live_regs = *regs_live;
+#else
+ COPY_HARD_REG_SET (current_live_regs, regs_live);
+#endif
+
+ for (offset = 0, i = 0; offset < regset_size; offset++)
+ {
+ if (regs_live[offset] == 0)
+ i += REGSET_ELT_BITS;
+ else
+ for (bit = 1; bit && i < max_regno; bit <<= 1, i++)
+ if ((regs_live[offset] & bit)
+ && (regno = reg_renumber[i]) >= 0)
+ for (j = regno;
+ j < regno + HARD_REGNO_NREGS (regno,
+ PSEUDO_REGNO_MODE (i));
+ j++)
+ SET_HARD_REG_BIT (current_live_regs, j);
+ }
+
+ /* Get starting and ending insn, handling the case where each might
+ be a SEQUENCE. */
+ start_insn = (b == 0 ? get_insns () : basic_block_head[b]);
+ stop_insn = target;
+
+ if (GET_CODE (start_insn) == INSN
+ && GET_CODE (PATTERN (start_insn)) == SEQUENCE)
+ start_insn = XVECEXP (PATTERN (start_insn), 0, 0);
+
+ if (GET_CODE (stop_insn) == INSN
+ && GET_CODE (PATTERN (stop_insn)) == SEQUENCE)
+ stop_insn = next_insn (PREV_INSN (stop_insn));
+
+ for (insn = start_insn; insn != stop_insn;
+ insn = next_insn_no_annul (insn))
+ {
+ rtx link;
+ rtx real_insn = insn;
+
+ /* If this insn is from the target of a branch, it isn't going to
+ be used in the sequel. If it is used in both cases, this
+ test will not be true. */
+ if (INSN_FROM_TARGET_P (insn))
+ continue;
+
+ /* If this insn is a USE made by update_block, we care about the
+ underlying insn. */
+ if (GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == USE
+ && GET_RTX_CLASS (GET_CODE (XEXP (PATTERN (insn), 0))) == 'i')
+ real_insn = XEXP (PATTERN (insn), 0);
+
+ if (GET_CODE (real_insn) == CALL_INSN)
+ {
+ /* CALL clobbers all call-used regs that aren't fixed except
+ sp, ap, and fp. Do this before setting the result of the
+ call live. */
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (call_used_regs[i]
+ && i != STACK_POINTER_REGNUM && i != FRAME_POINTER_REGNUM
+ && i != ARG_POINTER_REGNUM
+#if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
+ && i != HARD_FRAME_POINTER_REGNUM
+#endif
+#if ARG_POINTER_REGNUM != FRAME_POINTER_REGNUM
+ && ! (i == ARG_POINTER_REGNUM && fixed_regs[i])
+#endif
+#ifdef PIC_OFFSET_TABLE_REGNUM
+ && ! (i == PIC_OFFSET_TABLE_REGNUM && flag_pic)
+#endif
+ )
+ CLEAR_HARD_REG_BIT (current_live_regs, i);
+
+ /* A CALL_INSN sets any global register live, since it may
+ have been modified by the call. */
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (global_regs[i])
+ SET_HARD_REG_BIT (current_live_regs, i);
+ }
+
+ /* Mark anything killed in an insn to be deadened at the next
+ label. Ignore USE insns; the only REG_DEAD notes will be for
+ parameters. But they might be early. A CALL_INSN will usually
+ clobber registers used for parameters. It isn't worth bothering
+ with the unlikely case when it won't. */
+ if ((GET_CODE (real_insn) == INSN
+ && GET_CODE (PATTERN (real_insn)) != USE
+ && GET_CODE (PATTERN (real_insn)) != CLOBBER)
+ || GET_CODE (real_insn) == JUMP_INSN
+ || GET_CODE (real_insn) == CALL_INSN)
+ {
+ for (link = REG_NOTES (real_insn); link; link = XEXP (link, 1))
+ if (REG_NOTE_KIND (link) == REG_DEAD
+ && GET_CODE (XEXP (link, 0)) == REG
+ && REGNO (XEXP (link, 0)) < FIRST_PSEUDO_REGISTER)
+ {
+ int first_regno = REGNO (XEXP (link, 0));
+ int last_regno
+ = (first_regno
+ + HARD_REGNO_NREGS (first_regno,
+ GET_MODE (XEXP (link, 0))));
+
+ for (i = first_regno; i < last_regno; i++)
+ SET_HARD_REG_BIT (pending_dead_regs, i);
+ }
+
+ note_stores (PATTERN (real_insn), update_live_status);
+
+ /* If any registers were unused after this insn, kill them.
+ These notes will always be accurate. */
+ for (link = REG_NOTES (real_insn); link; link = XEXP (link, 1))
+ if (REG_NOTE_KIND (link) == REG_UNUSED
+ && GET_CODE (XEXP (link, 0)) == REG
+ && REGNO (XEXP (link, 0)) < FIRST_PSEUDO_REGISTER)
+ {
+ int first_regno = REGNO (XEXP (link, 0));
+ int last_regno
+ = (first_regno
+ + HARD_REGNO_NREGS (first_regno,
+ GET_MODE (XEXP (link, 0))));
+
+ for (i = first_regno; i < last_regno; i++)
+ CLEAR_HARD_REG_BIT (current_live_regs, i);
+ }
+ }
+
+ else if (GET_CODE (real_insn) == CODE_LABEL)
+ {
+ /* A label clobbers the pending dead registers since neither
+ reload nor jump will propagate a value across a label. */
+ AND_COMPL_HARD_REG_SET (current_live_regs, pending_dead_regs);
+ CLEAR_HARD_REG_SET (pending_dead_regs);
+ }
+
+ /* The beginning of the epilogue corresponds to the end of the
+ RTL chain when there are no epilogue insns. Certain resources
+ are implicitly required at that point. */
+ else if (GET_CODE (real_insn) == NOTE
+ && NOTE_LINE_NUMBER (real_insn) == NOTE_INSN_EPILOGUE_BEG)
+ IOR_HARD_REG_SET (current_live_regs, start_of_epilogue_needs.regs);
+ }
+
+ COPY_HARD_REG_SET (res->regs, current_live_regs);
+ tinfo->block = b;
+ tinfo->bb_tick = bb_ticks[b];
+ }
+ else
+ /* We didn't find the start of a basic block. Assume everything
+ in use. This should happen only extremely rarely. */
+ SET_HARD_REG_SET (res->regs);
+
+ /* Now step forward from TARGET looking for registers that are set before
+ they are used. These are dead. If we pass a label, any pending dead
+ registers that weren't yet used can be made dead. Stop when we pass a
+ conditional JUMP_INSN; follow the first few unconditional branches. */
+
+ CLEAR_RESOURCE (&set);
+ CLEAR_RESOURCE (&needed);
+
+ for (insn = target; insn; insn = next)
+ {
+ rtx this_jump_insn = insn;
+
+ next = NEXT_INSN (insn);
+ switch (GET_CODE (insn))
+ {
+ case CODE_LABEL:
+ AND_COMPL_HARD_REG_SET (pending_dead_regs, needed.regs);
+ AND_COMPL_HARD_REG_SET (res->regs, pending_dead_regs);
+ CLEAR_HARD_REG_SET (pending_dead_regs);
+ continue;
+
+ case BARRIER:
+ case NOTE:
+ continue;
+
+ case INSN:
+ if (GET_CODE (PATTERN (insn)) == USE)
+ {
+ /* If INSN is a USE made by update_block, we care about the
+ underlying insn. Any registers set by the underlying insn
+ are live since the insn is being done somewhere else. */
+ if (GET_RTX_CLASS (GET_CODE (XEXP (PATTERN (insn), 0))) == 'i')
+ mark_set_resources (XEXP (PATTERN (insn), 0), res, 0, 1);
+
+ /* All other USE insns are to be ignored. */
+ continue;
+ }
+ else if (GET_CODE (PATTERN (insn)) == CLOBBER)
+ continue;
+ else if (GET_CODE (PATTERN (insn)) == SEQUENCE)
+ {
+ /* An unconditional jump can be used to fill the delay slot
+ of a call, so search for a JUMP_INSN in any position. */
+ for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
+ {
+ this_jump_insn = XVECEXP (PATTERN (insn), 0, i);
+ if (GET_CODE (this_jump_insn) == JUMP_INSN)
+ break;
+ }
+ }
+ }
+
+ if (GET_CODE (this_jump_insn) == JUMP_INSN)
+ {
+ if (jump_count++ < 10
+ && (simplejump_p (this_jump_insn)
+ || GET_CODE (PATTERN (this_jump_insn)) == RETURN))
+ {
+ next = next_active_insn (JUMP_LABEL (this_jump_insn));
+ if (jump_insn == 0)
+ {
+ jump_insn = insn;
+ jump_target = JUMP_LABEL (this_jump_insn);
+ }
+ }
+ else
+ break;
+ }
+
+ mark_referenced_resources (insn, &needed, 1);
+ mark_set_resources (insn, &set, 0, 1);
+
+ COPY_HARD_REG_SET (scratch, set.regs);
+ AND_COMPL_HARD_REG_SET (scratch, needed.regs);
+ AND_COMPL_HARD_REG_SET (res->regs, scratch);
+ }
+
+ /* If we hit an unconditional branch, we have another way of finding out
+ what is live: we can see what is live at the branch target and include
+ anything used but not set before the branch. The only things that are
+ live are those that are live using the above test and the test below.
+
+ Don't try this if we expired our jump count above, since that would
+ mean there may be an infinite loop in the function being compiled. */
+
+ if (jump_insn && jump_count < 10)
+ {
+ struct resources new_resources;
+ rtx stop_insn = next_active_insn (jump_insn);
+
+ mark_target_live_regs (next_active_insn (jump_target), &new_resources);
+ CLEAR_RESOURCE (&set);
+ CLEAR_RESOURCE (&needed);
+
+ /* Include JUMP_INSN in the needed registers. */
+ for (insn = target; insn != stop_insn; insn = next_active_insn (insn))
+ {
+ mark_referenced_resources (insn, &needed, 1);
+
+ COPY_HARD_REG_SET (scratch, needed.regs);
+ AND_COMPL_HARD_REG_SET (scratch, set.regs);
+ IOR_HARD_REG_SET (new_resources.regs, scratch);
+
+ mark_set_resources (insn, &set, 0, 1);
+ }
+
+ AND_HARD_REG_SET (res->regs, new_resources.regs);
+ }
+
+ COPY_HARD_REG_SET (tinfo->live_regs, res->regs);
+}
+
+/* Scan a function looking for insns that need a delay slot and find insns to
+ put into the delay slot.
+
+ NON_JUMPS_P is non-zero if we are to only try to fill non-jump insns (such
+ as calls). We do these first since we don't want jump insns (that are
+ easier to fill) to get the only insns that could be used for non-jump insns.
+ When it is zero, only try to fill JUMP_INSNs.
+
+ When slots are filled in this manner, the insns (including the
+ delay_insn) are put together in a SEQUENCE rtx. In this fashion,
+ it is possible to tell whether a delay slot has really been filled
+ or not. `final' knows how to deal with this, by communicating
+ through FINAL_SEQUENCE. */
+
+static void
+fill_simple_delay_slots (first, non_jumps_p)
+ rtx first;
+ int non_jumps_p;
+{
+ register rtx insn, pat, trial, next_trial;
+ register int i, j;
+ int num_unfilled_slots = unfilled_slots_next - unfilled_slots_base;
+ struct resources needed, set;
+ register int slots_to_fill, slots_filled;
+ rtx delay_list;
+
+ for (i = 0; i < num_unfilled_slots; i++)
+ {
+ int flags;
+ /* Get the next insn to fill. If it has already had any slots assigned,
+ we can't do anything with it. Maybe we'll improve this later. */
+
+ insn = unfilled_slots_base[i];
+ if (insn == 0
+ || INSN_DELETED_P (insn)
+ || (GET_CODE (insn) == INSN
+ && GET_CODE (PATTERN (insn)) == SEQUENCE)
+ || (GET_CODE (insn) == JUMP_INSN && non_jumps_p)
+ || (GET_CODE (insn) != JUMP_INSN && ! non_jumps_p))
+ continue;
+
+ if (GET_CODE (insn) == JUMP_INSN)
+ flags = get_jump_flags (insn, JUMP_LABEL (insn));
+ else
+ flags = get_jump_flags (insn, NULL_RTX);
+ slots_to_fill = num_delay_slots (insn);
+ if (slots_to_fill == 0)
+ abort ();
+
+ /* This insn needs, or can use, some delay slots. SLOTS_TO_FILL
+ says how many. After initialization, first try optimizing
+
+ call _foo call _foo
+ nop add %o7,.-L1,%o7
+ b,a L1
+ nop
+
+ If this case applies, the delay slot of the call is filled with
+ the unconditional jump. This is done first to avoid having the
+ delay slot of the call filled in the backward scan. Also, since
+ the unconditional jump is likely to also have a delay slot, that
+ insn must exist when it is subsequently scanned.
+
+ This is tried on each insn with delay slots as some machines
+ have insns which perform calls, but are not represented as
+ CALL_INSNs. */
+
+ slots_filled = 0;
+ delay_list = 0;
+
+ if ((trial = next_active_insn (insn))
+ && GET_CODE (trial) == JUMP_INSN
+ && simplejump_p (trial)
+ && eligible_for_delay (insn, slots_filled, trial, flags)
+ && no_labels_between_p (insn, trial))
+ {
+ slots_filled++;
+ delay_list = add_to_delay_list (trial, delay_list);
+ /* Remove the unconditional jump from consideration for delay slot
+ filling and unthread it. */
+ if (unfilled_slots_base[i + 1] == trial)
+ unfilled_slots_base[i + 1] = 0;
+ {
+ rtx next = NEXT_INSN (trial);
+ rtx prev = PREV_INSN (trial);
+ if (prev)
+ NEXT_INSN (prev) = next;
+ if (next)
+ PREV_INSN (next) = prev;
+ }
+ }
+
+ /* Now, scan backwards from the insn to search for a potential
+ delay-slot candidate. Stop searching when a label or jump is hit.
+
+ For each candidate, if it is to go into the delay slot (moved
+ forward in execution sequence), it must not need or set any resources
+ that were set by later insns and must not set any resources that
+ are needed for those insns.
+
+ The delay slot insn itself sets resources unless it is a call
+ (in which case the called routine, not the insn itself, is doing
+ the setting). */
+
+ if (slots_filled < slots_to_fill)
+ {
+ CLEAR_RESOURCE (&needed);
+ CLEAR_RESOURCE (&set);
+ mark_set_resources (insn, &set, 0, 0);
+ mark_referenced_resources (insn, &needed, 0);
+
+ for (trial = prev_nonnote_insn (insn); ! stop_search_p (trial, 1);
+ trial = next_trial)
+ {
+ next_trial = prev_nonnote_insn (trial);
+
+ /* This must be an INSN or CALL_INSN. */
+ pat = PATTERN (trial);
+
+ /* USE and CLOBBER at this level was just for flow; ignore it. */
+ if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER)
+ continue;
+
+ /* Check for resource conflict first, to avoid unnecessary
+ splitting. */
+ if (! insn_references_resource_p (trial, &set, 1)
+ && ! insn_sets_resource_p (trial, &set, 1)
+ && ! insn_sets_resource_p (trial, &needed, 1)
+#ifdef HAVE_cc0
+ /* Can't separate set of cc0 from its use. */
+ && ! (reg_mentioned_p (cc0_rtx, pat)
+ && ! sets_cc0_p (cc0_rtx, pat))
+#endif
+ )
+ {
+ trial = try_split (pat, trial, 1);
+ next_trial = prev_nonnote_insn (trial);
+ if (eligible_for_delay (insn, slots_filled, trial, flags))
+ {
+ /* In this case, we are searching backward, so if we
+ find insns to put on the delay list, we want
+ to put them at the head, rather than the
+ tail, of the list. */
+
+ update_reg_dead_notes (trial, insn);
+ delay_list = gen_rtx (INSN_LIST, VOIDmode,
+ trial, delay_list);
+ update_block (trial, trial);
+ delete_insn (trial);
+ if (slots_to_fill == ++slots_filled)
+ break;
+ continue;
+ }
+ }
+
+ mark_set_resources (trial, &set, 0, 1);
+ mark_referenced_resources (trial, &needed, 1);
+ }
+ }
+
+ /* If all needed slots haven't been filled, we come here. */
+
+ /* Try to optimize case of jumping around a single insn. */
+#if defined(ANNUL_IFFALSE_SLOTS) || defined(ANNUL_IFTRUE_SLOTS)
+ if (slots_filled != slots_to_fill
+ && delay_list == 0
+ && GET_CODE (insn) == JUMP_INSN
+ && (condjump_p (insn) || condjump_in_parallel_p (insn)))
+ {
+ delay_list = optimize_skip (insn);
+ if (delay_list)
+ slots_filled += 1;
+ }
+#endif
+
+ /* Try to get insns from beyond the insn needing the delay slot.
+ These insns can neither set or reference resources set in insns being
+ skipped, cannot set resources in the insn being skipped, and, if this
+ is a CALL_INSN (or a CALL_INSN is passed), cannot trap (because the
+ call might not return).
+
+ If this is a conditional jump, see if it merges back to us early
+ enough for us to pick up insns from the merge point. Don't do
+ this if there is another branch to our label unless we pass all of
+ them.
+
+ Another similar merge is if we jump to the same place that a
+ later unconditional jump branches to. In that case, we don't
+ care about the number of uses of our label. */
+
+ if (slots_filled != slots_to_fill
+ && (GET_CODE (insn) != JUMP_INSN
+ || ((condjump_p (insn) || condjump_in_parallel_p (insn))
+ && ! simplejump_p (insn)
+ && JUMP_LABEL (insn) != 0)))
+ {
+ rtx target = 0;
+ int maybe_never = 0;
+ int passed_label = 0;
+ int target_uses;
+ struct resources needed_at_jump;
+
+ CLEAR_RESOURCE (&needed);
+ CLEAR_RESOURCE (&set);
+
+ if (GET_CODE (insn) == CALL_INSN)
+ {
+ mark_set_resources (insn, &set, 0, 1);
+ mark_referenced_resources (insn, &needed, 1);
+ maybe_never = 1;
+ }
+ else
+ {
+ mark_set_resources (insn, &set, 0, 1);
+ mark_referenced_resources (insn, &needed, 1);
+ if (GET_CODE (insn) == JUMP_INSN)
+ {
+ /* Get our target and show how many more uses we want to
+ see before we hit the label. */
+ target = JUMP_LABEL (insn);
+ target_uses = LABEL_NUSES (target) - 1;
+ }
+
+ }
+
+ for (trial = next_nonnote_insn (insn); trial; trial = next_trial)
+ {
+ rtx pat, trial_delay;
+
+ next_trial = next_nonnote_insn (trial);
+
+ if (GET_CODE (trial) == CODE_LABEL)
+ {
+ passed_label = 1;
+
+ /* If this is our target, see if we have seen all its uses.
+ If so, indicate we have passed our target and ignore it.
+ All other labels cause us to stop our search. */
+ if (trial == target && target_uses == 0)
+ {
+ target = 0;
+ continue;
+ }
+ else
+ break;
+ }
+ else if (GET_CODE (trial) == BARRIER)
+ break;
+
+ /* We must have an INSN, JUMP_INSN, or CALL_INSN. */
+ pat = PATTERN (trial);
+
+ /* Stand-alone USE and CLOBBER are just for flow. */
+ if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER)
+ continue;
+
+ /* If this already has filled delay slots, get the insn needing
+ the delay slots. */
+ if (GET_CODE (pat) == SEQUENCE)
+ trial_delay = XVECEXP (pat, 0, 0);
+ else
+ trial_delay = trial;
+
+ /* If this is a jump insn to our target, indicate that we have
+ seen another jump to it. If we aren't handling a conditional
+ jump, stop our search. Otherwise, compute the needs at its
+ target and add them to NEEDED. */
+ if (GET_CODE (trial_delay) == JUMP_INSN)
+ {
+ if (target == 0)
+ break;
+ else if (JUMP_LABEL (trial_delay) == target)
+ target_uses--;
+ else
+ {
+ mark_target_live_regs
+ (next_active_insn (JUMP_LABEL (trial_delay)),
+ &needed_at_jump);
+ needed.memory |= needed_at_jump.memory;
+ IOR_HARD_REG_SET (needed.regs, needed_at_jump.regs);
+ }
+ }
+
+ /* See if we have a resource problem before we try to
+ split. */
+ if (target == 0
+ && GET_CODE (pat) != SEQUENCE
+ && ! insn_references_resource_p (trial, &set, 1)
+ && ! insn_sets_resource_p (trial, &set, 1)
+ && ! insn_sets_resource_p (trial, &needed, 1)
+#ifdef HAVE_cc0
+ && ! (reg_mentioned_p (cc0_rtx, pat) && ! sets_cc0_p (pat))
+#endif
+ && ! (maybe_never && may_trap_p (pat))
+ && (trial = try_split (pat, trial, 0))
+ && eligible_for_delay (insn, slots_filled, trial, flags))
+ {
+ next_trial = next_nonnote_insn (trial);
+ delay_list = add_to_delay_list (trial, delay_list);
+
+#ifdef HAVE_cc0
+ if (reg_mentioned_p (cc0_rtx, pat))
+ link_cc0_insns (trial);
+#endif
+
+ if (passed_label)
+ update_block (trial, trial);
+ delete_insn (trial);
+ if (slots_to_fill == ++slots_filled)
+ break;
+ continue;
+ }
+
+ mark_set_resources (trial, &set, 0, 1);
+ mark_referenced_resources (trial, &needed, 1);
+
+ /* Ensure we don't put insns between the setting of cc and the
+ comparison by moving a setting of cc into an earlier delay
+ slot since these insns could clobber the condition code. */
+ set.cc = 1;
+
+ /* If this is a call or jump, we might not get here. */
+ if (GET_CODE (trial) == CALL_INSN
+ || GET_CODE (trial) == JUMP_INSN)
+ maybe_never = 1;
+ }
+
+ /* If there are slots left to fill and our search was stopped by an
+ unconditional branch, try the insn at the branch target. We can
+ redirect the branch if it works. */
+ if (slots_to_fill != slots_filled
+ && trial
+ && GET_CODE (trial) == JUMP_INSN
+ && simplejump_p (trial)
+ && (target == 0 || JUMP_LABEL (trial) == target)
+ && (next_trial = next_active_insn (JUMP_LABEL (trial))) != 0
+ && ! (GET_CODE (next_trial) == INSN
+ && GET_CODE (PATTERN (next_trial)) == SEQUENCE)
+ && ! insn_references_resource_p (next_trial, &set, 1)
+ && ! insn_sets_resource_p (next_trial, &set, 1)
+ && ! insn_sets_resource_p (next_trial, &needed, 1)
+#ifdef HAVE_cc0
+ && ! reg_mentioned_p (cc0_rtx, PATTERN (next_trial))
+#endif
+ && ! (maybe_never && may_trap_p (PATTERN (next_trial)))
+ && (next_trial = try_split (PATTERN (next_trial), next_trial, 0))
+ && eligible_for_delay (insn, slots_filled, next_trial, flags))
+ {
+ rtx new_label = next_active_insn (next_trial);
+
+ if (new_label != 0)
+ new_label = get_label_before (new_label);
+ else
+ new_label = find_end_label ();
+
+ delay_list
+ = add_to_delay_list (copy_rtx (next_trial), delay_list);
+ slots_filled++;
+ reorg_redirect_jump (trial, new_label);
+
+ /* If we merged because we both jumped to the same place,
+ redirect the original insn also. */
+ if (target)
+ reorg_redirect_jump (insn, new_label);
+ }
+ }
+
+ if (delay_list)
+ unfilled_slots_base[i]
+ = emit_delay_sequence (insn, delay_list,
+ slots_filled, slots_to_fill);
+
+ if (slots_to_fill == slots_filled)
+ unfilled_slots_base[i] = 0;
+
+ note_delay_statistics (slots_filled, 0);
+ }
+
+#ifdef DELAY_SLOTS_FOR_EPILOGUE
+ /* See if the epilogue needs any delay slots. Try to fill them if so.
+ The only thing we can do is scan backwards from the end of the
+ function. If we did this in a previous pass, it is incorrect to do it
+ again. */
+ if (current_function_epilogue_delay_list)
+ return;
+
+ slots_to_fill = DELAY_SLOTS_FOR_EPILOGUE;
+ if (slots_to_fill == 0)
+ return;
+
+ slots_filled = 0;
+ needed = end_of_function_needs;
+ CLEAR_RESOURCE (&set);
+
+ for (trial = get_last_insn (); ! stop_search_p (trial, 1);
+ trial = PREV_INSN (trial))
+ {
+ if (GET_CODE (trial) == NOTE)
+ continue;
+ pat = PATTERN (trial);
+ if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER)
+ continue;
+
+ if (! insn_references_resource_p (trial, &set, 1)
+ && ! insn_sets_resource_p (trial, &needed, 1)
+#ifdef HAVE_cc0
+ /* Don't want to mess with cc0 here. */
+ && ! reg_mentioned_p (cc0_rtx, pat)
+#endif
+ )
+ {
+ trial = try_split (pat, trial, 1);
+ if (ELIGIBLE_FOR_EPILOGUE_DELAY (trial, slots_filled))
+ {
+ /* Here as well we are searching backward, so put the
+ insns we find on the head of the list. */
+
+ current_function_epilogue_delay_list
+ = gen_rtx (INSN_LIST, VOIDmode, trial,
+ current_function_epilogue_delay_list);
+ mark_referenced_resources (trial, &end_of_function_needs, 1);
+ update_block (trial, trial);
+ delete_insn (trial);
+
+ /* Clear deleted bit so final.c will output the insn. */
+ INSN_DELETED_P (trial) = 0;
+
+ if (slots_to_fill == ++slots_filled)
+ break;
+ continue;
+ }
+ }
+
+ mark_set_resources (trial, &set, 0, 1);
+ mark_referenced_resources (trial, &needed, 1);
+ }
+
+ note_delay_statistics (slots_filled, 0);
+#endif
+}
+
+/* Try to find insns to place in delay slots.
+
+ INSN is the jump needing SLOTS_TO_FILL delay slots. It tests CONDITION
+ or is an unconditional branch if CONDITION is const_true_rtx.
+ *PSLOTS_FILLED is updated with the number of slots that we have filled.
+
+ THREAD is a flow-of-control, either the insns to be executed if the
+ branch is true or if the branch is false, THREAD_IF_TRUE says which.
+
+ OPPOSITE_THREAD is the thread in the opposite direction. It is used
+ to see if any potential delay slot insns set things needed there.
+
+ LIKELY is non-zero if it is extremely likely that the branch will be
+ taken and THREAD_IF_TRUE is set. This is used for the branch at the
+ end of a loop back up to the top.
+
+ OWN_THREAD and OWN_OPPOSITE_THREAD are true if we are the only user of the
+ thread. I.e., it is the fallthrough code of our jump or the target of the
+ jump when we are the only jump going there.
+
+ If OWN_THREAD is false, it must be the "true" thread of a jump. In that
+ case, we can only take insns from the head of the thread for our delay
+ slot. We then adjust the jump to point after the insns we have taken. */
+
+static rtx
+fill_slots_from_thread (insn, condition, thread, opposite_thread, likely,
+ thread_if_true, own_thread, own_opposite_thread,
+ slots_to_fill, pslots_filled)
+ rtx insn;
+ rtx condition;
+ rtx thread, opposite_thread;
+ int likely;
+ int thread_if_true;
+ int own_thread, own_opposite_thread;
+ int slots_to_fill, *pslots_filled;
+{
+ rtx new_thread;
+ rtx delay_list = 0;
+ struct resources opposite_needed, set, needed;
+ rtx trial;
+ int lose = 0;
+ int must_annul = 0;
+ int flags;
+
+ /* Validate our arguments. */
+ if ((condition == const_true_rtx && ! thread_if_true)
+ || (! own_thread && ! thread_if_true))
+ abort ();
+
+ flags = get_jump_flags (insn, JUMP_LABEL (insn));
+
+ /* If our thread is the end of subroutine, we can't get any delay
+ insns from that. */
+ if (thread == 0)
+ return 0;
+
+ /* If this is an unconditional branch, nothing is needed at the
+ opposite thread. Otherwise, compute what is needed there. */
+ if (condition == const_true_rtx)
+ CLEAR_RESOURCE (&opposite_needed);
+ else
+ mark_target_live_regs (opposite_thread, &opposite_needed);
+
+ /* If the insn at THREAD can be split, do it here to avoid having to
+ update THREAD and NEW_THREAD if it is done in the loop below. Also
+ initialize NEW_THREAD. */
+
+ new_thread = thread = try_split (PATTERN (thread), thread, 0);
+
+ /* Scan insns at THREAD. We are looking for an insn that can be removed
+ from THREAD (it neither sets nor references resources that were set
+ ahead of it and it doesn't set anything needs by the insns ahead of
+ it) and that either can be placed in an annulling insn or aren't
+ needed at OPPOSITE_THREAD. */
+
+ CLEAR_RESOURCE (&needed);
+ CLEAR_RESOURCE (&set);
+
+ /* If we do not own this thread, we must stop as soon as we find
+ something that we can't put in a delay slot, since all we can do
+ is branch into THREAD at a later point. Therefore, labels stop
+ the search if this is not the `true' thread. */
+
+ for (trial = thread;
+ ! stop_search_p (trial, ! thread_if_true) && (! lose || own_thread);
+ trial = next_nonnote_insn (trial))
+ {
+ rtx pat, old_trial;
+
+ /* If we have passed a label, we no longer own this thread. */
+ if (GET_CODE (trial) == CODE_LABEL)
+ {
+ own_thread = 0;
+ continue;
+ }
+
+ pat = PATTERN (trial);
+ if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER)
+ continue;
+
+ /* If TRIAL conflicts with the insns ahead of it, we lose. Also,
+ don't separate or copy insns that set and use CC0. */
+ if (! insn_references_resource_p (trial, &set, 1)
+ && ! insn_sets_resource_p (trial, &set, 1)
+ && ! insn_sets_resource_p (trial, &needed, 1)
+#ifdef HAVE_cc0
+ && ! (reg_mentioned_p (cc0_rtx, pat)
+ && (! own_thread || ! sets_cc0_p (pat)))
+#endif
+ )
+ {
+ /* If TRIAL is redundant with some insn before INSN, we don't
+ actually need to add it to the delay list; we can merely pretend
+ we did. */
+ if (redundant_insn_p (trial, insn, delay_list))
+ {
+ if (own_thread)
+ {
+ update_block (trial, thread);
+ delete_insn (trial);
+ }
+ else
+ new_thread = next_active_insn (trial);
+
+ continue;
+ }
+
+ /* There are two ways we can win: If TRIAL doesn't set anything
+ needed at the opposite thread and can't trap, or if it can
+ go into an annulled delay slot. */
+ if (condition == const_true_rtx
+ || (! insn_sets_resource_p (trial, &opposite_needed, 1)
+ && ! may_trap_p (pat)))
+ {
+ old_trial = trial;
+ trial = try_split (pat, trial, 0);
+ if (new_thread == old_trial)
+ new_thread = trial;
+ pat = PATTERN (trial);
+ if (eligible_for_delay (insn, *pslots_filled, trial, flags))
+ goto winner;
+ }
+ else if (0
+#ifdef ANNUL_IFTRUE_SLOTS
+ || ! thread_if_true
+#endif
+#ifdef ANNUL_IFFALSE_SLOTS
+ || thread_if_true
+#endif
+ )
+ {
+ old_trial = trial;
+ trial = try_split (pat, trial, 0);
+ if (new_thread == old_trial)
+ new_thread = trial;
+ pat = PATTERN (trial);
+ if ((thread_if_true
+ ? eligible_for_annul_false (insn, *pslots_filled, trial, flags)
+ : eligible_for_annul_true (insn, *pslots_filled, trial, flags)))
+ {
+ rtx temp;
+
+ must_annul = 1;
+ winner:
+
+#ifdef HAVE_cc0
+ if (reg_mentioned_p (cc0_rtx, pat))
+ link_cc0_insns (trial);
+#endif
+
+ /* If we own this thread, delete the insn. If this is the
+ destination of a branch, show that a basic block status
+ may have been updated. In any case, mark the new
+ starting point of this thread. */
+ if (own_thread)
+ {
+ update_block (trial, thread);
+ delete_insn (trial);
+ }
+ else
+ new_thread = next_active_insn (trial);
+
+ temp = own_thread ? trial : copy_rtx (trial);
+ if (thread_if_true)
+ INSN_FROM_TARGET_P (temp) = 1;
+
+ delay_list = add_to_delay_list (temp, delay_list);
+
+ if (slots_to_fill == ++(*pslots_filled))
+ {
+ /* Even though we have filled all the slots, we
+ may be branching to a location that has a
+ redundant insn. Skip any if so. */
+ while (new_thread && ! own_thread
+ && ! insn_sets_resource_p (new_thread, &set, 1)
+ && ! insn_sets_resource_p (new_thread, &needed, 1)
+ && ! insn_references_resource_p (new_thread,
+ &set, 1)
+ && redundant_insn_p (new_thread, insn,
+ delay_list))
+ new_thread = next_active_insn (new_thread);
+ break;
+ }
+
+ continue;
+ }
+ }
+ }
+
+ /* This insn can't go into a delay slot. */
+ lose = 1;
+ mark_set_resources (trial, &set, 0, 1);
+ mark_referenced_resources (trial, &needed, 1);
+
+ /* Ensure we don't put insns between the setting of cc and the comparison
+ by moving a setting of cc into an earlier delay slot since these insns
+ could clobber the condition code. */
+ set.cc = 1;
+
+ /* If this insn is a register-register copy and the next insn has
+ a use of our destination, change it to use our source. That way,
+ it will become a candidate for our delay slot the next time
+ through this loop. This case occurs commonly in loops that
+ scan a list.
+
+ We could check for more complex cases than those tested below,
+ but it doesn't seem worth it. It might also be a good idea to try
+ to swap the two insns. That might do better.
+
+ We can't do this if the next insn modifies our destination, because
+ that would make the replacement into the insn invalid. We also can't
+ do this if it modifies our source, because it might be an earlyclobber
+ operand. This latter test also prevents updating the contents of
+ a PRE_INC. */
+
+ if (GET_CODE (trial) == INSN && GET_CODE (pat) == SET
+ && GET_CODE (SET_SRC (pat)) == REG
+ && GET_CODE (SET_DEST (pat)) == REG)
+ {
+ rtx next = next_nonnote_insn (trial);
+
+ if (next && GET_CODE (next) == INSN
+ && GET_CODE (PATTERN (next)) != USE
+ && ! reg_set_p (SET_DEST (pat), next)
+ && ! reg_set_p (SET_SRC (pat), next)
+ && reg_referenced_p (SET_DEST (pat), PATTERN (next)))
+ validate_replace_rtx (SET_DEST (pat), SET_SRC (pat), next);
+ }
+ }
+
+ /* If we stopped on a branch insn that has delay slots, see if we can
+ steal some of the insns in those slots. */
+ if (trial && GET_CODE (trial) == INSN
+ && GET_CODE (PATTERN (trial)) == SEQUENCE
+ && GET_CODE (XVECEXP (PATTERN (trial), 0, 0)) == JUMP_INSN)
+ {
+ /* If this is the `true' thread, we will want to follow the jump,
+ so we can only do this if we have taken everything up to here. */
+ if (thread_if_true && trial == new_thread)
+ delay_list
+ = steal_delay_list_from_target (insn, condition, PATTERN (trial),
+ delay_list, &set, &needed,
+ &opposite_needed, slots_to_fill,
+ pslots_filled, &must_annul,
+ &new_thread);
+ else if (! thread_if_true)
+ delay_list
+ = steal_delay_list_from_fallthrough (insn, condition,
+ PATTERN (trial),
+ delay_list, &set, &needed,
+ &opposite_needed, slots_to_fill,
+ pslots_filled, &must_annul);
+ }
+
+ /* If we haven't found anything for this delay slot and it is very
+ likely that the branch will be taken, see if the insn at our target
+ increments or decrements a register with an increment that does not
+ depend on the destination register. If so, try to place the opposite
+ arithmetic insn after the jump insn and put the arithmetic insn in the
+ delay slot. If we can't do this, return. */
+ if (delay_list == 0 && likely && new_thread && GET_CODE (new_thread) == INSN)
+ {
+ rtx pat = PATTERN (new_thread);
+ rtx dest;
+ rtx src;
+
+ trial = new_thread;
+ pat = PATTERN (trial);
+
+ if (GET_CODE (trial) != INSN || GET_CODE (pat) != SET
+ || ! eligible_for_delay (insn, 0, trial, flags))
+ return 0;
+
+ dest = SET_DEST (pat), src = SET_SRC (pat);
+ if ((GET_CODE (src) == PLUS || GET_CODE (src) == MINUS)
+ && rtx_equal_p (XEXP (src, 0), dest)
+ && ! reg_overlap_mentioned_p (dest, XEXP (src, 1)))
+ {
+ rtx other = XEXP (src, 1);
+ rtx new_arith;
+ rtx ninsn;
+
+ /* If this is a constant adjustment, use the same code with
+ the negated constant. Otherwise, reverse the sense of the
+ arithmetic. */
+ if (GET_CODE (other) == CONST_INT)
+ new_arith = gen_rtx (GET_CODE (src), GET_MODE (src), dest,
+ negate_rtx (GET_MODE (src), other));
+ else
+ new_arith = gen_rtx (GET_CODE (src) == PLUS ? MINUS : PLUS,
+ GET_MODE (src), dest, other);
+
+ ninsn = emit_insn_after (gen_rtx (SET, VOIDmode, dest, new_arith),
+ insn);
+
+ if (recog_memoized (ninsn) < 0
+ || (insn_extract (ninsn),
+ ! constrain_operands (INSN_CODE (ninsn), 1)))
+ {
+ delete_insn (ninsn);
+ return 0;
+ }
+
+ if (own_thread)
+ {
+ update_block (trial, thread);
+ delete_insn (trial);
+ }
+ else
+ new_thread = next_active_insn (trial);
+
+ ninsn = own_thread ? trial : copy_rtx (trial);
+ if (thread_if_true)
+ INSN_FROM_TARGET_P (ninsn) = 1;
+
+ delay_list = add_to_delay_list (ninsn, NULL_RTX);
+ (*pslots_filled)++;
+ }
+ }
+
+ if (delay_list && must_annul)
+ INSN_ANNULLED_BRANCH_P (insn) = 1;
+
+ /* If we are to branch into the middle of this thread, find an appropriate
+ label or make a new one if none, and redirect INSN to it. If we hit the
+ end of the function, use the end-of-function label. */
+ if (new_thread != thread)
+ {
+ rtx label;
+
+ if (! thread_if_true)
+ abort ();
+
+ if (new_thread && GET_CODE (new_thread) == JUMP_INSN
+ && (simplejump_p (new_thread)
+ || GET_CODE (PATTERN (new_thread)) == RETURN)
+ && redirect_with_delay_list_safe_p (insn,
+ JUMP_LABEL (new_thread),
+ delay_list))
+ new_thread = follow_jumps (JUMP_LABEL (new_thread));
+
+ if (new_thread == 0)
+ label = find_end_label ();
+ else if (GET_CODE (new_thread) == CODE_LABEL)
+ label = new_thread;
+ else
+ label = get_label_before (new_thread);
+
+ reorg_redirect_jump (insn, label);
+ }
+
+ return delay_list;
+}
+
+/* Make another attempt to find insns to place in delay slots.
+
+ We previously looked for insns located in front of the delay insn
+ and, for non-jump delay insns, located behind the delay insn.
+
+ Here only try to schedule jump insns and try to move insns from either
+ the target or the following insns into the delay slot. If annulling is
+ supported, we will be likely to do this. Otherwise, we can do this only
+ if safe. */
+
+static void
+fill_eager_delay_slots (first)
+ rtx first;
+{
+ register rtx insn;
+ register int i;
+ int num_unfilled_slots = unfilled_slots_next - unfilled_slots_base;
+
+ for (i = 0; i < num_unfilled_slots; i++)
+ {
+ rtx condition;
+ rtx target_label, insn_at_target, fallthrough_insn;
+ rtx delay_list = 0;
+ int own_target;
+ int own_fallthrough;
+ int prediction, slots_to_fill, slots_filled;
+
+ insn = unfilled_slots_base[i];
+ if (insn == 0
+ || INSN_DELETED_P (insn)
+ || GET_CODE (insn) != JUMP_INSN
+ || ! (condjump_p (insn) || condjump_in_parallel_p (insn)))
+ continue;
+
+ slots_to_fill = num_delay_slots (insn);
+ if (slots_to_fill == 0)
+ abort ();
+
+ slots_filled = 0;
+ target_label = JUMP_LABEL (insn);
+ condition = get_branch_condition (insn, target_label);
+
+ if (condition == 0)
+ continue;
+
+ /* Get the next active fallthough and target insns and see if we own
+ them. Then see whether the branch is likely true. We don't need
+ to do a lot of this for unconditional branches. */
+
+ insn_at_target = next_active_insn (target_label);
+ own_target = own_thread_p (target_label, target_label, 0);
+
+ if (condition == const_true_rtx)
+ {
+ own_fallthrough = 0;
+ fallthrough_insn = 0;
+ prediction = 2;
+ }
+ else
+ {
+ fallthrough_insn = next_active_insn (insn);
+ own_fallthrough = own_thread_p (NEXT_INSN (insn), NULL_RTX, 1);
+ prediction = mostly_true_jump (insn, condition);
+ }
+
+ /* If this insn is expected to branch, first try to get insns from our
+ target, then our fallthrough insns. If it is not, expected to branch,
+ try the other order. */
+
+ if (prediction > 0)
+ {
+ delay_list
+ = fill_slots_from_thread (insn, condition, insn_at_target,
+ fallthrough_insn, prediction == 2, 1,
+ own_target, own_fallthrough,
+ slots_to_fill, &slots_filled);
+
+ if (delay_list == 0 && own_fallthrough)
+ {
+ /* Even though we didn't find anything for delay slots,
+ we might have found a redundant insn which we deleted
+ from the thread that was filled. So we have to recompute
+ the next insn at the target. */
+ target_label = JUMP_LABEL (insn);
+ insn_at_target = next_active_insn (target_label);
+
+ delay_list
+ = fill_slots_from_thread (insn, condition, fallthrough_insn,
+ insn_at_target, 0, 0,
+ own_fallthrough, own_target,
+ slots_to_fill, &slots_filled);
+ }
+ }
+ else
+ {
+ if (own_fallthrough)
+ delay_list
+ = fill_slots_from_thread (insn, condition, fallthrough_insn,
+ insn_at_target, 0, 0,
+ own_fallthrough, own_target,
+ slots_to_fill, &slots_filled);
+
+ if (delay_list == 0)
+ delay_list
+ = fill_slots_from_thread (insn, condition, insn_at_target,
+ next_active_insn (insn), 0, 1,
+ own_target, own_fallthrough,
+ slots_to_fill, &slots_filled);
+ }
+
+ if (delay_list)
+ unfilled_slots_base[i]
+ = emit_delay_sequence (insn, delay_list,
+ slots_filled, slots_to_fill);
+
+ if (slots_to_fill == slots_filled)
+ unfilled_slots_base[i] = 0;
+
+ note_delay_statistics (slots_filled, 1);
+ }
+}
+
+/* Once we have tried two ways to fill a delay slot, make a pass over the
+ code to try to improve the results and to do such things as more jump
+ threading. */
+
+static void
+relax_delay_slots (first)
+ rtx first;
+{
+ register rtx insn, next, pat;
+ register rtx trial, delay_insn, target_label;
+
+ /* Look at every JUMP_INSN and see if we can improve it. */
+ for (insn = first; insn; insn = next)
+ {
+ rtx other;
+
+ next = next_active_insn (insn);
+
+ /* If this is a jump insn, see if it now jumps to a jump, jumps to
+ the next insn, or jumps to a label that is not the last of a
+ group of consecutive labels. */
+ if (GET_CODE (insn) == JUMP_INSN
+ && (condjump_p (insn) || condjump_in_parallel_p (insn))
+ && (target_label = JUMP_LABEL (insn)) != 0)
+ {
+ target_label = follow_jumps (target_label);
+ target_label = prev_label (next_active_insn (target_label));
+
+ if (target_label == 0)
+ target_label = find_end_label ();
+
+ if (next_active_insn (target_label) == next
+ && ! condjump_in_parallel_p (insn))
+ {
+ delete_jump (insn);
+ continue;
+ }
+
+ if (target_label != JUMP_LABEL (insn))
+ reorg_redirect_jump (insn, target_label);
+
+ /* See if this jump branches around a unconditional jump.
+ If so, invert this jump and point it to the target of the
+ second jump. */
+ if (next && GET_CODE (next) == JUMP_INSN
+ && (simplejump_p (next) || GET_CODE (PATTERN (next)) == RETURN)
+ && next_active_insn (target_label) == next_active_insn (next)
+ && no_labels_between_p (insn, next))
+ {
+ rtx label = JUMP_LABEL (next);
+
+ /* Be careful how we do this to avoid deleting code or
+ labels that are momentarily dead. See similar optimization
+ in jump.c.
+
+ We also need to ensure we properly handle the case when
+ invert_jump fails. */
+
+ ++LABEL_NUSES (target_label);
+ if (label)
+ ++LABEL_NUSES (label);
+
+ if (invert_jump (insn, label))
+ {
+ delete_insn (next);
+ next = insn;
+ }
+
+ if (label)
+ --LABEL_NUSES (label);
+
+ if (--LABEL_NUSES (target_label) == 0)
+ delete_insn (target_label);
+
+ continue;
+ }
+ }
+
+ /* If this is an unconditional jump and the previous insn is a
+ conditional jump, try reversing the condition of the previous
+ insn and swapping our targets. The next pass might be able to
+ fill the slots.
+
+ Don't do this if we expect the conditional branch to be true, because
+ we would then be making the more common case longer. */
+
+ if (GET_CODE (insn) == JUMP_INSN
+ && (simplejump_p (insn) || GET_CODE (PATTERN (insn)) == RETURN)
+ && (other = prev_active_insn (insn)) != 0
+ && (condjump_p (other) || condjump_in_parallel_p (other))
+ && no_labels_between_p (other, insn)
+ && 0 < mostly_true_jump (other,
+ get_branch_condition (other,
+ JUMP_LABEL (other))))
+ {
+ rtx other_target = JUMP_LABEL (other);
+ target_label = JUMP_LABEL (insn);
+
+ /* Increment the count of OTHER_TARGET, so it doesn't get deleted
+ as we move the label. */
+ if (other_target)
+ ++LABEL_NUSES (other_target);
+
+ if (invert_jump (other, target_label))
+ reorg_redirect_jump (insn, other_target);
+
+ if (other_target)
+ --LABEL_NUSES (other_target);
+ }
+
+ /* Now look only at cases where we have filled a delay slot. */
+ if (GET_CODE (insn) != INSN
+ || GET_CODE (PATTERN (insn)) != SEQUENCE)
+ continue;
+
+ pat = PATTERN (insn);
+ delay_insn = XVECEXP (pat, 0, 0);
+
+ /* See if the first insn in the delay slot is redundant with some
+ previous insn. Remove it from the delay slot if so; then set up
+ to reprocess this insn. */
+ if (redundant_insn_p (XVECEXP (pat, 0, 1), delay_insn, 0))
+ {
+ delete_from_delay_slot (XVECEXP (pat, 0, 1));
+ next = prev_active_insn (next);
+ continue;
+ }
+
+ /* Now look only at the cases where we have a filled JUMP_INSN. */
+ if (GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) != JUMP_INSN
+ || ! (condjump_p (XVECEXP (PATTERN (insn), 0, 0))
+ || condjump_in_parallel_p (XVECEXP (PATTERN (insn), 0, 0))))
+ continue;
+
+ target_label = JUMP_LABEL (delay_insn);
+
+ if (target_label)
+ {
+ /* If this jump goes to another unconditional jump, thread it, but
+ don't convert a jump into a RETURN here. */
+ trial = follow_jumps (target_label);
+ trial = prev_label (next_active_insn (trial));
+ if (trial == 0 && target_label != 0)
+ trial = find_end_label ();
+
+ if (trial != target_label
+ && redirect_with_delay_slots_safe_p (delay_insn, trial, insn))
+ {
+ reorg_redirect_jump (delay_insn, trial);
+ target_label = trial;
+ }
+
+ /* If the first insn at TARGET_LABEL is redundant with a previous
+ insn, redirect the jump to the following insn process again. */
+ trial = next_active_insn (target_label);
+ if (trial && GET_CODE (PATTERN (trial)) != SEQUENCE
+ && redundant_insn_p (trial, insn, 0))
+ {
+ trial = next_active_insn (trial);
+ if (trial == 0)
+ target_label = find_end_label ();
+ else
+ target_label = get_label_before (trial);
+ reorg_redirect_jump (delay_insn, target_label);
+ next = insn;
+ continue;
+ }
+
+ /* Similarly, if it is an unconditional jump with one insn in its
+ delay list and that insn is redundant, thread the jump. */
+ if (trial && GET_CODE (PATTERN (trial)) == SEQUENCE
+ && XVECLEN (PATTERN (trial), 0) == 2
+ && GET_CODE (XVECEXP (PATTERN (trial), 0, 0)) == JUMP_INSN
+ && (simplejump_p (XVECEXP (PATTERN (trial), 0, 0))
+ || GET_CODE (PATTERN (XVECEXP (PATTERN (trial), 0, 0))) == RETURN)
+ && redundant_insn_p (XVECEXP (PATTERN (trial), 0, 1), insn, 0))
+ {
+ target_label = JUMP_LABEL (XVECEXP (PATTERN (trial), 0, 0));
+ if (target_label == 0)
+ target_label = find_end_label ();
+
+ if (redirect_with_delay_slots_safe_p (delay_insn, target_label,
+ insn))
+ {
+ reorg_redirect_jump (delay_insn, target_label);
+ next = insn;
+ continue;
+ }
+ }
+ }
+
+ if (! INSN_ANNULLED_BRANCH_P (delay_insn)
+ && prev_active_insn (target_label) == insn
+ && ! condjump_in_parallel_p (delay_insn)
+#ifdef HAVE_cc0
+ /* If the last insn in the delay slot sets CC0 for some insn,
+ various code assumes that it is in a delay slot. We could
+ put it back where it belonged and delete the register notes,
+ but it doesn't seem worthwhile in this uncommon case. */
+ && ! find_reg_note (XVECEXP (pat, 0, XVECLEN (pat, 0) - 1),
+ REG_CC_USER, NULL_RTX)
+#endif
+ )
+ {
+ int i;
+
+ /* All this insn does is execute its delay list and jump to the
+ following insn. So delete the jump and just execute the delay
+ list insns.
+
+ We do this by deleting the INSN containing the SEQUENCE, then
+ re-emitting the insns separately, and then deleting the jump.
+ This allows the count of the jump target to be properly
+ decremented. */
+
+ /* Clear the from target bit, since these insns are no longer
+ in delay slots. */
+ for (i = 0; i < XVECLEN (pat, 0); i++)
+ INSN_FROM_TARGET_P (XVECEXP (pat, 0, i)) = 0;
+
+ trial = PREV_INSN (insn);
+ delete_insn (insn);
+ emit_insn_after (pat, trial);
+ delete_scheduled_jump (delay_insn);
+ continue;
+ }
+
+ /* See if this is an unconditional jump around a single insn which is
+ identical to the one in its delay slot. In this case, we can just
+ delete the branch and the insn in its delay slot. */
+ if (next && GET_CODE (next) == INSN
+ && prev_label (next_active_insn (next)) == target_label
+ && simplejump_p (insn)
+ && XVECLEN (pat, 0) == 2
+ && rtx_equal_p (PATTERN (next), PATTERN (XVECEXP (pat, 0, 1))))
+ {
+ delete_insn (insn);
+ continue;
+ }
+
+ /* See if this jump (with its delay slots) branches around another
+ jump (without delay slots). If so, invert this jump and point
+ it to the target of the second jump. We cannot do this for
+ annulled jumps, though. Again, don't convert a jump to a RETURN
+ here. */
+ if (! INSN_ANNULLED_BRANCH_P (delay_insn)
+ && next && GET_CODE (next) == JUMP_INSN
+ && (simplejump_p (next) || GET_CODE (PATTERN (next)) == RETURN)
+ && next_active_insn (target_label) == next_active_insn (next)
+ && no_labels_between_p (insn, next))
+ {
+ rtx label = JUMP_LABEL (next);
+ rtx old_label = JUMP_LABEL (delay_insn);
+
+ if (label == 0)
+ label = find_end_label ();
+
+ if (redirect_with_delay_slots_safe_p (delay_insn, label, insn))
+ {
+ /* Be careful how we do this to avoid deleting code or labels
+ that are momentarily dead. See similar optimization in
+ jump.c */
+ if (old_label)
+ ++LABEL_NUSES (old_label);
+
+ if (invert_jump (delay_insn, label))
+ {
+ delete_insn (next);
+ next = insn;
+ }
+
+ if (old_label && --LABEL_NUSES (old_label) == 0)
+ delete_insn (old_label);
+ continue;
+ }
+ }
+
+ /* If we own the thread opposite the way this insn branches, see if we
+ can merge its delay slots with following insns. */
+ if (INSN_FROM_TARGET_P (XVECEXP (pat, 0, 1))
+ && own_thread_p (NEXT_INSN (insn), 0, 1))
+ try_merge_delay_insns (insn, next);
+ else if (! INSN_FROM_TARGET_P (XVECEXP (pat, 0, 1))
+ && own_thread_p (target_label, target_label, 0))
+ try_merge_delay_insns (insn, next_active_insn (target_label));
+
+ /* If we get here, we haven't deleted INSN. But we may have deleted
+ NEXT, so recompute it. */
+ next = next_active_insn (insn);
+ }
+}
+
+#ifdef HAVE_return
+
+/* Look for filled jumps to the end of function label. We can try to convert
+ them into RETURN insns if the insns in the delay slot are valid for the
+ RETURN as well. */
+
+static void
+make_return_insns (first)
+ rtx first;
+{
+ rtx insn, jump_insn, pat;
+ rtx real_return_label = end_of_function_label;
+ int slots, i;
+
+ /* See if there is a RETURN insn in the function other than the one we
+ made for END_OF_FUNCTION_LABEL. If so, set up anything we can't change
+ into a RETURN to jump to it. */
+ for (insn = first; insn; insn = NEXT_INSN (insn))
+ if (GET_CODE (insn) == JUMP_INSN && GET_CODE (PATTERN (insn)) == RETURN)
+ {
+ real_return_label = get_label_before (insn);
+ break;
+ }
+
+ /* Show an extra usage of REAL_RETURN_LABEL so it won't go away if it
+ was equal to END_OF_FUNCTION_LABEL. */
+ LABEL_NUSES (real_return_label)++;
+
+ /* Clear the list of insns to fill so we can use it. */
+ obstack_free (&unfilled_slots_obstack, unfilled_firstobj);
+
+ for (insn = first; insn; insn = NEXT_INSN (insn))
+ {
+ int flags;
+
+ /* Only look at filled JUMP_INSNs that go to the end of function
+ label. */
+ if (GET_CODE (insn) != INSN
+ || GET_CODE (PATTERN (insn)) != SEQUENCE
+ || GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) != JUMP_INSN
+ || JUMP_LABEL (XVECEXP (PATTERN (insn), 0, 0)) != end_of_function_label)
+ continue;
+
+ pat = PATTERN (insn);
+ jump_insn = XVECEXP (pat, 0, 0);
+
+ /* If we can't make the jump into a RETURN, try to redirect it to the best
+ RETURN and go on to the next insn. */
+ if (! reorg_redirect_jump (jump_insn, NULL_RTX))
+ {
+ /* Make sure redirecting the jump will not invalidate the delay
+ slot insns. */
+ if (redirect_with_delay_slots_safe_p (jump_insn,
+ real_return_label,
+ insn))
+ reorg_redirect_jump (jump_insn, real_return_label);
+ continue;
+ }
+
+ /* See if this RETURN can accept the insns current in its delay slot.
+ It can if it has more or an equal number of slots and the contents
+ of each is valid. */
+
+ flags = get_jump_flags (jump_insn, JUMP_LABEL (jump_insn));
+ slots = num_delay_slots (jump_insn);
+ if (slots >= XVECLEN (pat, 0) - 1)
+ {
+ for (i = 1; i < XVECLEN (pat, 0); i++)
+ if (! (
+#ifdef ANNUL_IFFALSE_SLOTS
+ (INSN_ANNULLED_BRANCH_P (jump_insn)
+ && INSN_FROM_TARGET_P (XVECEXP (pat, 0, i)))
+ ? eligible_for_annul_false (jump_insn, i - 1,
+ XVECEXP (pat, 0, i), flags) :
+#endif
+#ifdef ANNUL_IFTRUE_SLOTS
+ (INSN_ANNULLED_BRANCH_P (jump_insn)
+ && ! INSN_FROM_TARGET_P (XVECEXP (pat, 0, i)))
+ ? eligible_for_annul_true (jump_insn, i - 1,
+ XVECEXP (pat, 0, i), flags) :
+#endif
+ eligible_for_delay (jump_insn, i -1, XVECEXP (pat, 0, i), flags)))
+ break;
+ }
+ else
+ i = 0;
+
+ if (i == XVECLEN (pat, 0))
+ continue;
+
+ /* We have to do something with this insn. If it is an unconditional
+ RETURN, delete the SEQUENCE and output the individual insns,
+ followed by the RETURN. Then set things up so we try to find
+ insns for its delay slots, if it needs some. */
+ if (GET_CODE (PATTERN (jump_insn)) == RETURN)
+ {
+ rtx prev = PREV_INSN (insn);
+
+ delete_insn (insn);
+ for (i = 1; i < XVECLEN (pat, 0); i++)
+ prev = emit_insn_after (PATTERN (XVECEXP (pat, 0, i)), prev);
+
+ insn = emit_jump_insn_after (PATTERN (jump_insn), prev);
+ emit_barrier_after (insn);
+
+ if (slots)
+ obstack_ptr_grow (&unfilled_slots_obstack, insn);
+ }
+ else
+ /* It is probably more efficient to keep this with its current
+ delay slot as a branch to a RETURN. */
+ reorg_redirect_jump (jump_insn, real_return_label);
+ }
+
+ /* Now delete REAL_RETURN_LABEL if we never used it. Then try to fill any
+ new delay slots we have created. */
+ if (--LABEL_NUSES (real_return_label) == 0)
+ delete_insn (real_return_label);
+
+ fill_simple_delay_slots (first, 1);
+ fill_simple_delay_slots (first, 0);
+}
+#endif
+
+/* Try to find insns to place in delay slots. */
+
+void
+dbr_schedule (first, file)
+ rtx first;
+ FILE *file;
+{
+ rtx insn, next, epilogue_insn = 0;
+ int i;
+#if 0
+ int old_flag_no_peephole = flag_no_peephole;
+
+ /* Execute `final' once in prescan mode to delete any insns that won't be
+ used. Don't let final try to do any peephole optimization--it will
+ ruin dataflow information for this pass. */
+
+ flag_no_peephole = 1;
+ final (first, 0, NO_DEBUG, 1, 1);
+ flag_no_peephole = old_flag_no_peephole;
+#endif
+
+ /* If the current function has no insns other than the prologue and
+ epilogue, then do not try to fill any delay slots. */
+ if (n_basic_blocks == 0)
+ return;
+
+ /* Find the highest INSN_UID and allocate and initialize our map from
+ INSN_UID's to position in code. */
+ for (max_uid = 0, insn = first; insn; insn = NEXT_INSN (insn))
+ {
+ if (INSN_UID (insn) > max_uid)
+ max_uid = INSN_UID (insn);
+ if (GET_CODE (insn) == NOTE
+ && NOTE_LINE_NUMBER (insn) == NOTE_INSN_EPILOGUE_BEG)
+ epilogue_insn = insn;
+ }
+
+ uid_to_ruid = (int *) alloca ((max_uid + 1) * sizeof (int *));
+ for (i = 0, insn = first; insn; i++, insn = NEXT_INSN (insn))
+ uid_to_ruid[INSN_UID (insn)] = i;
+
+ /* Initialize the list of insns that need filling. */
+ if (unfilled_firstobj == 0)
+ {
+ gcc_obstack_init (&unfilled_slots_obstack);
+ unfilled_firstobj = (rtx *) obstack_alloc (&unfilled_slots_obstack, 0);
+ }
+
+ for (insn = next_active_insn (first); insn; insn = next_active_insn (insn))
+ {
+ rtx target;
+
+ INSN_ANNULLED_BRANCH_P (insn) = 0;
+ INSN_FROM_TARGET_P (insn) = 0;
+
+ /* Skip vector tables. We can't get attributes for them. */
+ if (GET_CODE (insn) == JUMP_INSN
+ && (GET_CODE (PATTERN (insn)) == ADDR_VEC
+ || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC))
+ continue;
+
+ if (num_delay_slots (insn) > 0)
+ obstack_ptr_grow (&unfilled_slots_obstack, insn);
+
+ /* Ensure all jumps go to the last of a set of consecutive labels. */
+ if (GET_CODE (insn) == JUMP_INSN
+ && (condjump_p (insn) || condjump_in_parallel_p (insn))
+ && JUMP_LABEL (insn) != 0
+ && ((target = prev_label (next_active_insn (JUMP_LABEL (insn))))
+ != JUMP_LABEL (insn)))
+ redirect_jump (insn, target);
+ }
+
+ /* Indicate what resources are required to be valid at the end of the current
+ function. The condition code never is and memory always is. If the
+ frame pointer is needed, it is and so is the stack pointer unless
+ EXIT_IGNORE_STACK is non-zero. If the frame pointer is not needed, the
+ stack pointer is. Registers used to return the function value are
+ needed. Registers holding global variables are needed. */
+
+ end_of_function_needs.cc = 0;
+ end_of_function_needs.memory = 1;
+ CLEAR_HARD_REG_SET (end_of_function_needs.regs);
+
+ if (frame_pointer_needed)
+ {
+ SET_HARD_REG_BIT (end_of_function_needs.regs, FRAME_POINTER_REGNUM);
+#if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
+ SET_HARD_REG_BIT (end_of_function_needs.regs, HARD_FRAME_POINTER_REGNUM);
+#endif
+#ifdef EXIT_IGNORE_STACK
+ if (! EXIT_IGNORE_STACK)
+#endif
+ SET_HARD_REG_BIT (end_of_function_needs.regs, STACK_POINTER_REGNUM);
+ }
+ else
+ SET_HARD_REG_BIT (end_of_function_needs.regs, STACK_POINTER_REGNUM);
+
+ if (current_function_return_rtx != 0
+ && GET_CODE (current_function_return_rtx) == REG)
+ mark_referenced_resources (current_function_return_rtx,
+ &end_of_function_needs, 1);
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (global_regs[i])
+ SET_HARD_REG_BIT (end_of_function_needs.regs, i);
+
+ /* The registers required to be live at the end of the function are
+ represented in the flow information as being dead just prior to
+ reaching the end of the function. For example, the return of a value
+ might be represented by a USE of the return register immediately
+ followed by an unconditional jump to the return label where the
+ return label is the end of the RTL chain. The end of the RTL chain
+ is then taken to mean that the return register is live.
+
+ This sequence is no longer maintained when epilogue instructions are
+ added to the RTL chain. To reconstruct the original meaning, the
+ start of the epilogue (NOTE_INSN_EPILOGUE_BEG) is regarded as the
+ point where these registers become live (start_of_epilogue_needs).
+ If epilogue instructions are present, the registers set by those
+ instructions won't have been processed by flow. Thus, those
+ registers are additionally required at the end of the RTL chain
+ (end_of_function_needs). */
+
+ start_of_epilogue_needs = end_of_function_needs;
+
+ while (epilogue_insn = next_nonnote_insn (epilogue_insn))
+ mark_set_resources (epilogue_insn, &end_of_function_needs, 0, 1);
+
+ /* Show we haven't computed an end-of-function label yet. */
+ end_of_function_label = 0;
+
+ /* Allocate and initialize the tables used by mark_target_live_regs. */
+ target_hash_table
+ = (struct target_info **) alloca ((TARGET_HASH_PRIME
+ * sizeof (struct target_info *)));
+ bzero (target_hash_table, TARGET_HASH_PRIME * sizeof (struct target_info *));
+
+ bb_ticks = (int *) alloca (n_basic_blocks * sizeof (int));
+ bzero (bb_ticks, n_basic_blocks * sizeof (int));
+
+ /* Initialize the statistics for this function. */
+ bzero (num_insns_needing_delays, sizeof num_insns_needing_delays);
+ bzero (num_filled_delays, sizeof num_filled_delays);
+
+ /* Now do the delay slot filling. Try everything twice in case earlier
+ changes make more slots fillable. */
+
+ for (reorg_pass_number = 0;
+ reorg_pass_number < MAX_REORG_PASSES;
+ reorg_pass_number++)
+ {
+ fill_simple_delay_slots (first, 1);
+ fill_simple_delay_slots (first, 0);
+ fill_eager_delay_slots (first);
+ relax_delay_slots (first);
+ }
+
+ /* Delete any USE insns made by update_block; subsequent passes don't need
+ them or know how to deal with them. */
+ for (insn = first; insn; insn = next)
+ {
+ next = NEXT_INSN (insn);
+
+ if (GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == USE
+ && GET_RTX_CLASS (GET_CODE (XEXP (PATTERN (insn), 0))) == 'i')
+ next = delete_insn (insn);
+ }
+
+ /* If we made an end of function label, indicate that it is now
+ safe to delete it by undoing our prior adjustment to LABEL_NUSES.
+ If it is now unused, delete it. */
+ if (end_of_function_label && --LABEL_NUSES (end_of_function_label) == 0)
+ delete_insn (end_of_function_label);
+
+#ifdef HAVE_return
+ if (HAVE_return && end_of_function_label != 0)
+ make_return_insns (first);
+#endif
+
+ obstack_free (&unfilled_slots_obstack, unfilled_firstobj);
+
+ /* It is not clear why the line below is needed, but it does seem to be. */
+ unfilled_firstobj = (rtx *) obstack_alloc (&unfilled_slots_obstack, 0);
+
+ /* Reposition the prologue and epilogue notes in case we moved the
+ prologue/epilogue insns. */
+ reposition_prologue_and_epilogue_notes (first);
+
+ if (file)
+ {
+ register int i, j, need_comma;
+
+ for (reorg_pass_number = 0;
+ reorg_pass_number < MAX_REORG_PASSES;
+ reorg_pass_number++)
+ {
+ fprintf (file, ";; Reorg pass #%d:\n", reorg_pass_number + 1);
+ for (i = 0; i < NUM_REORG_FUNCTIONS; i++)
+ {
+ need_comma = 0;
+ fprintf (file, ";; Reorg function #%d\n", i);
+
+ fprintf (file, ";; %d insns needing delay slots\n;; ",
+ num_insns_needing_delays[i][reorg_pass_number]);
+
+ for (j = 0; j < MAX_DELAY_HISTOGRAM; j++)
+ if (num_filled_delays[i][j][reorg_pass_number])
+ {
+ if (need_comma)
+ fprintf (file, ", ");
+ need_comma = 1;
+ fprintf (file, "%d got %d delays",
+ num_filled_delays[i][j][reorg_pass_number], j);
+ }
+ fprintf (file, "\n");
+ }
+ }
+ }
+}
+#endif /* DELAY_SLOTS */
diff --git a/gnu/usr.bin/cc/cc_int/rtl.c b/gnu/usr.bin/cc/cc_int/rtl.c
new file mode 100644
index 0000000..6f29f7f
--- /dev/null
+++ b/gnu/usr.bin/cc/cc_int/rtl.c
@@ -0,0 +1,850 @@
+/* Allocate and read RTL for GNU C Compiler.
+ Copyright (C) 1987, 1988, 1991, 1994 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+#include "config.h"
+#include <ctype.h>
+#include <stdio.h>
+#include "rtl.h"
+#include "real.h"
+
+#include "obstack.h"
+#define obstack_chunk_alloc xmalloc
+#define obstack_chunk_free free
+
+/* Obstack used for allocating RTL objects.
+ Between functions, this is the permanent_obstack.
+ While parsing and expanding a function, this is maybepermanent_obstack
+ so we can save it if it is an inline function.
+ During optimization and output, this is function_obstack. */
+
+extern struct obstack *rtl_obstack;
+
+#if HOST_BITS_PER_WIDE_INT != HOST_BITS_PER_INT
+extern long atol();
+#endif
+
+/* Indexed by rtx code, gives number of operands for an rtx with that code.
+ Does NOT include rtx header data (code and links).
+ This array is initialized in init_rtl. */
+
+int rtx_length[NUM_RTX_CODE + 1];
+
+/* Indexed by rtx code, gives the name of that kind of rtx, as a C string. */
+
+#define DEF_RTL_EXPR(ENUM, NAME, FORMAT, CLASS) NAME ,
+
+char *rtx_name[] = {
+#include "rtl.def" /* rtl expressions are documented here */
+};
+
+#undef DEF_RTL_EXPR
+
+/* Indexed by machine mode, gives the name of that machine mode.
+ This name does not include the letters "mode". */
+
+#define DEF_MACHMODE(SYM, NAME, CLASS, SIZE, UNIT, WIDER) NAME,
+
+char *mode_name[(int) MAX_MACHINE_MODE] = {
+#include "machmode.def"
+
+#ifdef EXTRA_CC_MODES
+ EXTRA_CC_NAMES
+#endif
+
+};
+
+#undef DEF_MACHMODE
+
+/* Indexed by machine mode, gives the length of the mode, in bytes.
+ GET_MODE_CLASS uses this. */
+
+#define DEF_MACHMODE(SYM, NAME, CLASS, SIZE, UNIT, WIDER) CLASS,
+
+enum mode_class mode_class[(int) MAX_MACHINE_MODE] = {
+#include "machmode.def"
+};
+
+#undef DEF_MACHMODE
+
+/* Indexed by machine mode, gives the length of the mode, in bytes.
+ GET_MODE_SIZE uses this. */
+
+#define DEF_MACHMODE(SYM, NAME, CLASS, SIZE, UNIT, WIDER) SIZE,
+
+int mode_size[(int) MAX_MACHINE_MODE] = {
+#include "machmode.def"
+};
+
+#undef DEF_MACHMODE
+
+/* Indexed by machine mode, gives the length of the mode's subunit.
+ GET_MODE_UNIT_SIZE uses this. */
+
+#define DEF_MACHMODE(SYM, NAME, CLASS, SIZE, UNIT, WIDER) UNIT,
+
+int mode_unit_size[(int) MAX_MACHINE_MODE] = {
+#include "machmode.def" /* machine modes are documented here */
+};
+
+#undef DEF_MACHMODE
+
+/* Indexed by machine mode, gives next wider natural mode
+ (QI -> HI -> SI -> DI, etc.) Widening multiply instructions
+ use this. */
+
+#define DEF_MACHMODE(SYM, NAME, CLASS, SIZE, UNIT, WIDER) \
+ (enum machine_mode) WIDER,
+
+enum machine_mode mode_wider_mode[(int) MAX_MACHINE_MODE] = {
+#include "machmode.def" /* machine modes are documented here */
+};
+
+#undef DEF_MACHMODE
+
+/* Indexed by mode class, gives the narrowest mode for each class. */
+
+enum machine_mode class_narrowest_mode[(int) MAX_MODE_CLASS];
+
+/* Indexed by rtx code, gives a sequence of operand-types for
+ rtx's of that code. The sequence is a C string in which
+ each character describes one operand. */
+
+char *rtx_format[] = {
+ /* "*" undefined.
+ can cause a warning message
+ "0" field is unused (or used in a phase-dependent manner)
+ prints nothing
+ "i" an integer
+ prints the integer
+ "n" like "i", but prints entries from `note_insn_name'
+ "w" an integer of width HOST_BITS_PER_WIDE_INT
+ prints the integer
+ "s" a pointer to a string
+ prints the string
+ "S" like "s", but optional:
+ the containing rtx may end before this operand
+ "e" a pointer to an rtl expression
+ prints the expression
+ "E" a pointer to a vector that points to a number of rtl expressions
+ prints a list of the rtl expressions
+ "V" like "E", but optional:
+ the containing rtx may end before this operand
+ "u" a pointer to another insn
+ prints the uid of the insn. */
+
+#define DEF_RTL_EXPR(ENUM, NAME, FORMAT, CLASS) FORMAT ,
+#include "rtl.def" /* rtl expressions are defined here */
+#undef DEF_RTL_EXPR
+};
+
+/* Indexed by rtx code, gives a character representing the "class" of
+ that rtx code. See rtl.def for documentation on the defined classes. */
+
+char rtx_class[] = {
+#define DEF_RTL_EXPR(ENUM, NAME, FORMAT, CLASS) CLASS,
+#include "rtl.def" /* rtl expressions are defined here */
+#undef DEF_RTL_EXPR
+};
+
+/* Names for kinds of NOTEs and REG_NOTEs. */
+
+char *note_insn_name[] = { 0 , "NOTE_INSN_DELETED",
+ "NOTE_INSN_BLOCK_BEG", "NOTE_INSN_BLOCK_END",
+ "NOTE_INSN_LOOP_BEG", "NOTE_INSN_LOOP_END",
+ "NOTE_INSN_FUNCTION_END", "NOTE_INSN_SETJMP",
+ "NOTE_INSN_LOOP_CONT", "NOTE_INSN_LOOP_VTOP",
+ "NOTE_INSN_PROLOGUE_END", "NOTE_INSN_EPILOGUE_BEG",
+ "NOTE_INSN_DELETED_LABEL", "NOTE_INSN_FUNCTION_BEG"};
+
+char *reg_note_name[] = { "", "REG_DEAD", "REG_INC", "REG_EQUIV", "REG_WAS_0",
+ "REG_EQUAL", "REG_RETVAL", "REG_LIBCALL",
+ "REG_NONNEG", "REG_NO_CONFLICT", "REG_UNUSED",
+ "REG_CC_SETTER", "REG_CC_USER", "REG_LABEL",
+ "REG_DEP_ANTI", "REG_DEP_OUTPUT" };
+
+/* Allocate an rtx vector of N elements.
+ Store the length, and initialize all elements to zero. */
+
+rtvec
+rtvec_alloc (n)
+ int n;
+{
+ rtvec rt;
+ int i;
+
+ rt = (rtvec) obstack_alloc (rtl_obstack,
+ sizeof (struct rtvec_def)
+ + (( n - 1) * sizeof (rtunion)));
+
+ /* clear out the vector */
+ PUT_NUM_ELEM(rt, n);
+ for (i=0; i < n; i++)
+ rt->elem[i].rtvec = NULL; /* @@ not portable due to rtunion */
+
+ return rt;
+}
+
+/* Allocate an rtx of code CODE. The CODE is stored in the rtx;
+ all the rest is initialized to zero. */
+
+rtx
+rtx_alloc (code)
+ RTX_CODE code;
+{
+ rtx rt;
+ register struct obstack *ob = rtl_obstack;
+ register int nelts = GET_RTX_LENGTH (code);
+ register int length = sizeof (struct rtx_def)
+ + (nelts - 1) * sizeof (rtunion);
+
+ /* This function is called more than any other in GCC,
+ so we manipulate the obstack directly.
+
+ Even though rtx objects are word aligned, we may be sharing an obstack
+ with tree nodes, which may have to be double-word aligned. So align
+ our length to the alignment mask in the obstack. */
+
+ length = (length + ob->alignment_mask) & ~ ob->alignment_mask;
+
+ if (ob->chunk_limit - ob->next_free < length)
+ _obstack_newchunk (ob, length);
+ rt = (rtx)ob->object_base;
+ ob->next_free += length;
+ ob->object_base = ob->next_free;
+
+ /* We want to clear everything up to the FLD array. Normally, this is
+ one int, but we don't want to assume that and it isn't very portable
+ anyway; this is. */
+
+ length = (sizeof (struct rtx_def) - sizeof (rtunion) - 1) / sizeof (int);
+ for (; length >= 0; length--)
+ ((int *) rt)[length] = 0;
+
+ PUT_CODE (rt, code);
+
+ return rt;
+}
+
+/* Free the rtx X and all RTL allocated since X. */
+
+void
+rtx_free (x)
+ rtx x;
+{
+ obstack_free (rtl_obstack, x);
+}
+
+/* Create a new copy of an rtx.
+ Recursively copies the operands of the rtx,
+ except for those few rtx codes that are sharable. */
+
+rtx
+copy_rtx (orig)
+ register rtx orig;
+{
+ register rtx copy;
+ register int i, j;
+ register RTX_CODE code;
+ register char *format_ptr;
+
+ code = GET_CODE (orig);
+
+ switch (code)
+ {
+ case REG:
+ case QUEUED:
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case SYMBOL_REF:
+ case CODE_LABEL:
+ case PC:
+ case CC0:
+ case SCRATCH:
+ /* SCRATCH must be shared because they represent distinct values. */
+ return orig;
+
+ case CONST:
+ /* CONST can be shared if it contains a SYMBOL_REF. If it contains
+ a LABEL_REF, it isn't sharable. */
+ if (GET_CODE (XEXP (orig, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (orig, 0), 0)) == SYMBOL_REF
+ && GET_CODE (XEXP (XEXP (orig, 0), 1)) == CONST_INT)
+ return orig;
+ break;
+
+ /* A MEM with a constant address is not sharable. The problem is that
+ the constant address may need to be reloaded. If the mem is shared,
+ then reloading one copy of this mem will cause all copies to appear
+ to have been reloaded. */
+ }
+
+ copy = rtx_alloc (code);
+ PUT_MODE (copy, GET_MODE (orig));
+ copy->in_struct = orig->in_struct;
+ copy->volatil = orig->volatil;
+ copy->unchanging = orig->unchanging;
+ copy->integrated = orig->integrated;
+
+ format_ptr = GET_RTX_FORMAT (GET_CODE (copy));
+
+ for (i = 0; i < GET_RTX_LENGTH (GET_CODE (copy)); i++)
+ {
+ switch (*format_ptr++)
+ {
+ case 'e':
+ XEXP (copy, i) = XEXP (orig, i);
+ if (XEXP (orig, i) != NULL)
+ XEXP (copy, i) = copy_rtx (XEXP (orig, i));
+ break;
+
+ case '0':
+ case 'u':
+ XEXP (copy, i) = XEXP (orig, i);
+ break;
+
+ case 'E':
+ case 'V':
+ XVEC (copy, i) = XVEC (orig, i);
+ if (XVEC (orig, i) != NULL)
+ {
+ XVEC (copy, i) = rtvec_alloc (XVECLEN (orig, i));
+ for (j = 0; j < XVECLEN (copy, i); j++)
+ XVECEXP (copy, i, j) = copy_rtx (XVECEXP (orig, i, j));
+ }
+ break;
+
+ case 'w':
+ XWINT (copy, i) = XWINT (orig, i);
+ break;
+
+ case 'i':
+ XINT (copy, i) = XINT (orig, i);
+ break;
+
+ case 's':
+ case 'S':
+ XSTR (copy, i) = XSTR (orig, i);
+ break;
+
+ default:
+ abort ();
+ }
+ }
+ return copy;
+}
+
+/* Similar to `copy_rtx' except that if MAY_SHARE is present, it is
+ placed in the result directly, rather than being copied. */
+
+rtx
+copy_most_rtx (orig, may_share)
+ register rtx orig;
+ register rtx may_share;
+{
+ register rtx copy;
+ register int i, j;
+ register RTX_CODE code;
+ register char *format_ptr;
+
+ if (orig == may_share)
+ return orig;
+
+ code = GET_CODE (orig);
+
+ switch (code)
+ {
+ case REG:
+ case QUEUED:
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case SYMBOL_REF:
+ case CODE_LABEL:
+ case PC:
+ case CC0:
+ return orig;
+ }
+
+ copy = rtx_alloc (code);
+ PUT_MODE (copy, GET_MODE (orig));
+ copy->in_struct = orig->in_struct;
+ copy->volatil = orig->volatil;
+ copy->unchanging = orig->unchanging;
+ copy->integrated = orig->integrated;
+
+ format_ptr = GET_RTX_FORMAT (GET_CODE (copy));
+
+ for (i = 0; i < GET_RTX_LENGTH (GET_CODE (copy)); i++)
+ {
+ switch (*format_ptr++)
+ {
+ case 'e':
+ XEXP (copy, i) = XEXP (orig, i);
+ if (XEXP (orig, i) != NULL && XEXP (orig, i) != may_share)
+ XEXP (copy, i) = copy_most_rtx (XEXP (orig, i), may_share);
+ break;
+
+ case '0':
+ case 'u':
+ XEXP (copy, i) = XEXP (orig, i);
+ break;
+
+ case 'E':
+ case 'V':
+ XVEC (copy, i) = XVEC (orig, i);
+ if (XVEC (orig, i) != NULL)
+ {
+ XVEC (copy, i) = rtvec_alloc (XVECLEN (orig, i));
+ for (j = 0; j < XVECLEN (copy, i); j++)
+ XVECEXP (copy, i, j)
+ = copy_most_rtx (XVECEXP (orig, i, j), may_share);
+ }
+ break;
+
+ case 'w':
+ XWINT (copy, i) = XWINT (orig, i);
+ break;
+
+ case 'n':
+ case 'i':
+ XINT (copy, i) = XINT (orig, i);
+ break;
+
+ case 's':
+ case 'S':
+ XSTR (copy, i) = XSTR (orig, i);
+ break;
+
+ default:
+ abort ();
+ }
+ }
+ return copy;
+}
+
+/* Subroutines of read_rtx. */
+
+/* Dump code after printing a message. Used when read_rtx finds
+ invalid data. */
+
+static void
+dump_and_abort (expected_c, actual_c, infile)
+ int expected_c, actual_c;
+ FILE *infile;
+{
+ int c, i;
+
+ if (expected_c >= 0)
+ fprintf (stderr,
+ "Expected character %c. Found character %c.",
+ expected_c, actual_c);
+ fprintf (stderr, " At file position: %ld\n", ftell (infile));
+ fprintf (stderr, "Following characters are:\n\t");
+ for (i = 0; i < 200; i++)
+ {
+ c = getc (infile);
+ if (EOF == c) break;
+ putc (c, stderr);
+ }
+ fprintf (stderr, "Aborting.\n");
+ abort ();
+}
+
+/* Read chars from INFILE until a non-whitespace char
+ and return that. Comments, both Lisp style and C style,
+ are treated as whitespace.
+ Tools such as genflags use this function. */
+
+int
+read_skip_spaces (infile)
+ FILE *infile;
+{
+ register int c;
+ while (c = getc (infile))
+ {
+ if (c == ' ' || c == '\n' || c == '\t' || c == '\f')
+ ;
+ else if (c == ';')
+ {
+ while ((c = getc (infile)) && c != '\n') ;
+ }
+ else if (c == '/')
+ {
+ register int prevc;
+ c = getc (infile);
+ if (c != '*')
+ dump_and_abort ('*', c, infile);
+
+ prevc = 0;
+ while (c = getc (infile))
+ {
+ if (prevc == '*' && c == '/')
+ break;
+ prevc = c;
+ }
+ }
+ else break;
+ }
+ return c;
+}
+
+/* Read an rtx code name into the buffer STR[].
+ It is terminated by any of the punctuation chars of rtx printed syntax. */
+
+static void
+read_name (str, infile)
+ char *str;
+ FILE *infile;
+{
+ register char *p;
+ register int c;
+
+ c = read_skip_spaces(infile);
+
+ p = str;
+ while (1)
+ {
+ if (c == ' ' || c == '\n' || c == '\t' || c == '\f')
+ break;
+ if (c == ':' || c == ')' || c == ']' || c == '"' || c == '/'
+ || c == '(' || c == '[')
+ {
+ ungetc (c, infile);
+ break;
+ }
+ *p++ = c;
+ c = getc (infile);
+ }
+ if (p == str)
+ {
+ fprintf (stderr, "missing name or number");
+ dump_and_abort (-1, -1, infile);
+ }
+
+ *p = 0;
+}
+
+/* Read an rtx in printed representation from INFILE
+ and return an actual rtx in core constructed accordingly.
+ read_rtx is not used in the compiler proper, but rather in
+ the utilities gen*.c that construct C code from machine descriptions. */
+
+rtx
+read_rtx (infile)
+ FILE *infile;
+{
+ register int i, j, list_counter;
+ RTX_CODE tmp_code;
+ register char *format_ptr;
+ /* tmp_char is a buffer used for reading decimal integers
+ and names of rtx types and machine modes.
+ Therefore, 256 must be enough. */
+ char tmp_char[256];
+ rtx return_rtx;
+ register int c;
+ int tmp_int;
+ HOST_WIDE_INT tmp_wide;
+
+ /* Linked list structure for making RTXs: */
+ struct rtx_list
+ {
+ struct rtx_list *next;
+ rtx value; /* Value of this node... */
+ };
+
+ c = read_skip_spaces (infile); /* Should be open paren. */
+ if (c != '(')
+ dump_and_abort ('(', c, infile);
+
+ read_name (tmp_char, infile);
+
+ tmp_code = UNKNOWN;
+
+ for (i=0; i < NUM_RTX_CODE; i++) /* @@ might speed this search up */
+ {
+ if (!(strcmp (tmp_char, GET_RTX_NAME (i))))
+ {
+ tmp_code = (RTX_CODE) i; /* get value for name */
+ break;
+ }
+ }
+ if (tmp_code == UNKNOWN)
+ {
+ fprintf (stderr,
+ "Unknown rtx read in rtl.read_rtx(). Code name was %s .",
+ tmp_char);
+ }
+ /* (NIL) stands for an expression that isn't there. */
+ if (tmp_code == NIL)
+ {
+ /* Discard the closeparen. */
+ while ((c = getc (infile)) && c != ')');
+ return 0;
+ }
+
+ return_rtx = rtx_alloc (tmp_code); /* if we end up with an insn expression
+ then we free this space below. */
+ format_ptr = GET_RTX_FORMAT (GET_CODE (return_rtx));
+
+ /* If what follows is `: mode ', read it and
+ store the mode in the rtx. */
+
+ i = read_skip_spaces (infile);
+ if (i == ':')
+ {
+ register int k;
+ read_name (tmp_char, infile);
+ for (k = 0; k < NUM_MACHINE_MODES; k++)
+ if (!strcmp (GET_MODE_NAME (k), tmp_char))
+ break;
+
+ PUT_MODE (return_rtx, (enum machine_mode) k );
+ }
+ else
+ ungetc (i, infile);
+
+ for (i = 0; i < GET_RTX_LENGTH (GET_CODE (return_rtx)); i++)
+ switch (*format_ptr++)
+ {
+ /* 0 means a field for internal use only.
+ Don't expect it to be present in the input. */
+ case '0':
+ break;
+
+ case 'e':
+ case 'u':
+ XEXP (return_rtx, i) = read_rtx (infile);
+ break;
+
+ case 'V':
+ /* 'V' is an optional vector: if a closeparen follows,
+ just store NULL for this element. */
+ c = read_skip_spaces (infile);
+ ungetc (c, infile);
+ if (c == ')')
+ {
+ XVEC (return_rtx, i) = 0;
+ break;
+ }
+ /* Now process the vector. */
+
+ case 'E':
+ {
+ register struct rtx_list *next_rtx, *rtx_list_link;
+ struct rtx_list *list_rtx;
+
+ c = read_skip_spaces (infile);
+ if (c != '[')
+ dump_and_abort ('[', c, infile);
+
+ /* add expressions to a list, while keeping a count */
+ next_rtx = NULL;
+ list_counter = 0;
+ while ((c = read_skip_spaces (infile)) && c != ']')
+ {
+ ungetc (c, infile);
+ list_counter++;
+ rtx_list_link = (struct rtx_list *)
+ alloca (sizeof (struct rtx_list));
+ rtx_list_link->value = read_rtx (infile);
+ if (next_rtx == 0)
+ list_rtx = rtx_list_link;
+ else
+ next_rtx->next = rtx_list_link;
+ next_rtx = rtx_list_link;
+ rtx_list_link->next = 0;
+ }
+ /* get vector length and allocate it */
+ XVEC (return_rtx, i) = (list_counter
+ ? rtvec_alloc (list_counter) : NULL_RTVEC);
+ if (list_counter > 0)
+ {
+ next_rtx = list_rtx;
+ for (j = 0; j < list_counter; j++,
+ next_rtx = next_rtx->next)
+ XVECEXP (return_rtx, i, j) = next_rtx->value;
+ }
+ /* close bracket gotten */
+ }
+ break;
+
+ case 'S':
+ /* 'S' is an optional string: if a closeparen follows,
+ just store NULL for this element. */
+ c = read_skip_spaces (infile);
+ ungetc (c, infile);
+ if (c == ')')
+ {
+ XSTR (return_rtx, i) = 0;
+ break;
+ }
+
+ case 's':
+ {
+ int saw_paren = 0;
+ register char *stringbuf;
+
+ c = read_skip_spaces (infile);
+ if (c == '(')
+ {
+ saw_paren = 1;
+ c = read_skip_spaces (infile);
+ }
+ if (c != '"')
+ dump_and_abort ('"', c, infile);
+
+ while (1)
+ {
+ c = getc (infile); /* Read the string */
+ if (c == '\\')
+ {
+ c = getc (infile); /* Read the string */
+ /* \; makes stuff for a C string constant containing
+ newline and tab. */
+ if (c == ';')
+ {
+ obstack_grow (rtl_obstack, "\\n\\t", 4);
+ continue;
+ }
+ }
+ else if (c == '"')
+ break;
+
+ obstack_1grow (rtl_obstack, c);
+ }
+
+ obstack_1grow (rtl_obstack, 0);
+ stringbuf = (char *) obstack_finish (rtl_obstack);
+
+ if (saw_paren)
+ {
+ c = read_skip_spaces (infile);
+ if (c != ')')
+ dump_and_abort (')', c, infile);
+ }
+ XSTR (return_rtx, i) = stringbuf;
+ }
+ break;
+
+ case 'w':
+ read_name (tmp_char, infile);
+#if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_INT
+ tmp_wide = atoi (tmp_char);
+#else
+ tmp_wide = atol (tmp_char);
+#endif
+ XWINT (return_rtx, i) = tmp_wide;
+ break;
+
+ case 'i':
+ case 'n':
+ read_name (tmp_char, infile);
+ tmp_int = atoi (tmp_char);
+ XINT (return_rtx, i) = tmp_int;
+ break;
+
+ default:
+ fprintf (stderr,
+ "switch format wrong in rtl.read_rtx(). format was: %c.\n",
+ format_ptr[-1]);
+ fprintf (stderr, "\tfile position: %ld\n", ftell (infile));
+ abort ();
+ }
+
+ c = read_skip_spaces (infile);
+ if (c != ')')
+ dump_and_abort (')', c, infile);
+
+ return return_rtx;
+}
+
+/* This is called once per compilation, before any rtx's are constructed.
+ It initializes the vector `rtx_length', the extra CC modes, if any,
+ and computes certain commonly-used modes. */
+
+void
+init_rtl ()
+{
+ int min_class_size[(int) MAX_MODE_CLASS];
+ enum machine_mode mode;
+ int i;
+
+ for (i = 0; i < NUM_RTX_CODE; i++)
+ rtx_length[i] = strlen (rtx_format[i]);
+
+ /* Make CONST_DOUBLE bigger, if real values are bigger than
+ it normally expects to have room for.
+ Note that REAL_VALUE_TYPE is not defined by default,
+ since tree.h is not included. But the default dfn as `double'
+ would do no harm. */
+#ifdef REAL_VALUE_TYPE
+ i = sizeof (REAL_VALUE_TYPE) / sizeof (rtunion) + 2;
+ if (rtx_length[(int) CONST_DOUBLE] < i)
+ {
+ char *s = (char *) xmalloc (i + 1);
+ rtx_length[(int) CONST_DOUBLE] = i;
+ rtx_format[(int) CONST_DOUBLE] = s;
+ *s++ = 'e';
+ *s++ = '0';
+ /* Set the GET_RTX_FORMAT of CONST_DOUBLE to a string
+ of as many `w's as we now have elements. Subtract two from
+ the size to account for the 'e' and the '0'. */
+ for (i = 2; i < rtx_length[(int) CONST_DOUBLE]; i++)
+ *s++ = 'w';
+ *s++ = 0;
+ }
+#endif
+
+#ifdef EXTRA_CC_MODES
+ for (i = (int) CCmode + 1; i < (int) MAX_MACHINE_MODE; i++)
+ {
+ mode_class[i] = MODE_CC;
+ mode_size[i] = mode_size[(int) CCmode];
+ mode_unit_size[i] = mode_unit_size[(int) CCmode];
+ mode_wider_mode[i - 1] = (enum machine_mode) i;
+ mode_wider_mode[i] = VOIDmode;
+ }
+#endif
+
+ /* Find the narrowest mode for each class. */
+
+ for (i = 0; i < (int) MAX_MODE_CLASS; i++)
+ min_class_size[i] = 1000;
+
+ for (mode = VOIDmode; (int) mode < (int) MAX_MACHINE_MODE;
+ mode = (enum machine_mode) ((int) mode + 1))
+ {
+ if (GET_MODE_SIZE (mode) < min_class_size[(int) GET_MODE_CLASS (mode)])
+ {
+ class_narrowest_mode[(int) GET_MODE_CLASS (mode)] = mode;
+ min_class_size[(int) GET_MODE_CLASS (mode)] = GET_MODE_SIZE (mode);
+ }
+ }
+}
+
+#ifdef memset
+gcc_memset (dest, value, len)
+ char *dest;
+ int value;
+ int len;
+{
+ while (len-- > 0)
+ *dest++ = value;
+}
+#endif /* memset */
diff --git a/gnu/usr.bin/cc/cc_int/rtlanal.c b/gnu/usr.bin/cc/cc_int/rtlanal.c
new file mode 100644
index 0000000..188fb93
--- /dev/null
+++ b/gnu/usr.bin/cc/cc_int/rtlanal.c
@@ -0,0 +1,1835 @@
+/* Analyze RTL for C-Compiler
+ Copyright (C) 1987, 88, 91, 92, 93, 1994 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+#include "config.h"
+#include "rtl.h"
+
+void note_stores ();
+int reg_set_p ();
+
+/* Bit flags that specify the machine subtype we are compiling for.
+ Bits are tested using macros TARGET_... defined in the tm.h file
+ and set by `-m...' switches. Must be defined in rtlanal.c. */
+
+int target_flags;
+
+/* Return 1 if the value of X is unstable
+ (would be different at a different point in the program).
+ The frame pointer, arg pointer, etc. are considered stable
+ (within one function) and so is anything marked `unchanging'. */
+
+int
+rtx_unstable_p (x)
+ rtx x;
+{
+ register RTX_CODE code = GET_CODE (x);
+ register int i;
+ register char *fmt;
+
+ if (code == MEM)
+ return ! RTX_UNCHANGING_P (x);
+
+ if (code == QUEUED)
+ return 1;
+
+ if (code == CONST || code == CONST_INT)
+ return 0;
+
+ if (code == REG)
+ return ! (REGNO (x) == FRAME_POINTER_REGNUM
+ || REGNO (x) == HARD_FRAME_POINTER_REGNUM
+ || REGNO (x) == ARG_POINTER_REGNUM
+ || RTX_UNCHANGING_P (x));
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ if (fmt[i] == 'e')
+ if (rtx_unstable_p (XEXP (x, i)))
+ return 1;
+ return 0;
+}
+
+/* Return 1 if X has a value that can vary even between two
+ executions of the program. 0 means X can be compared reliably
+ against certain constants or near-constants.
+ The frame pointer and the arg pointer are considered constant. */
+
+int
+rtx_varies_p (x)
+ rtx x;
+{
+ register RTX_CODE code = GET_CODE (x);
+ register int i;
+ register char *fmt;
+
+ switch (code)
+ {
+ case MEM:
+ case QUEUED:
+ return 1;
+
+ case CONST:
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ return 0;
+
+ case REG:
+ /* Note that we have to test for the actual rtx used for the frame
+ and arg pointers and not just the register number in case we have
+ eliminated the frame and/or arg pointer and are using it
+ for pseudos. */
+ return ! (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
+ || x == arg_pointer_rtx);
+
+ case LO_SUM:
+ /* The operand 0 of a LO_SUM is considered constant
+ (in fact is it related specifically to operand 1). */
+ return rtx_varies_p (XEXP (x, 1));
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ if (fmt[i] == 'e')
+ if (rtx_varies_p (XEXP (x, i)))
+ return 1;
+ return 0;
+}
+
+/* Return 0 if the use of X as an address in a MEM can cause a trap. */
+
+int
+rtx_addr_can_trap_p (x)
+ register rtx x;
+{
+ register enum rtx_code code = GET_CODE (x);
+
+ switch (code)
+ {
+ case SYMBOL_REF:
+ case LABEL_REF:
+ /* SYMBOL_REF is problematic due to the possible presence of
+ a #pragma weak, but to say that loads from symbols can trap is
+ *very* costly. It's not at all clear what's best here. For
+ now, we ignore the impact of #pragma weak. */
+ return 0;
+
+ case REG:
+ /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
+ return ! (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
+ || x == stack_pointer_rtx || x == arg_pointer_rtx);
+
+ case CONST:
+ return rtx_addr_can_trap_p (XEXP (x, 0));
+
+ case PLUS:
+ /* An address is assumed not to trap if it is an address that can't
+ trap plus a constant integer. */
+ return (rtx_addr_can_trap_p (XEXP (x, 0))
+ || GET_CODE (XEXP (x, 1)) != CONST_INT);
+
+ case LO_SUM:
+ return rtx_addr_can_trap_p (XEXP (x, 1));
+ }
+
+ /* If it isn't one of the case above, it can cause a trap. */
+ return 1;
+}
+
+/* Return 1 if X refers to a memory location whose address
+ cannot be compared reliably with constant addresses,
+ or if X refers to a BLKmode memory object. */
+
+int
+rtx_addr_varies_p (x)
+ rtx x;
+{
+ register enum rtx_code code;
+ register int i;
+ register char *fmt;
+
+ if (x == 0)
+ return 0;
+
+ code = GET_CODE (x);
+ if (code == MEM)
+ return GET_MODE (x) == BLKmode || rtx_varies_p (XEXP (x, 0));
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ if (fmt[i] == 'e')
+ if (rtx_addr_varies_p (XEXP (x, i)))
+ return 1;
+ return 0;
+}
+
+/* Return the value of the integer term in X, if one is apparent;
+ otherwise return 0.
+ Only obvious integer terms are detected.
+ This is used in cse.c with the `related_value' field.*/
+
+HOST_WIDE_INT
+get_integer_term (x)
+ rtx x;
+{
+ if (GET_CODE (x) == CONST)
+ x = XEXP (x, 0);
+
+ if (GET_CODE (x) == MINUS
+ && GET_CODE (XEXP (x, 1)) == CONST_INT)
+ return - INTVAL (XEXP (x, 1));
+ if (GET_CODE (x) == PLUS
+ && GET_CODE (XEXP (x, 1)) == CONST_INT)
+ return INTVAL (XEXP (x, 1));
+ return 0;
+}
+
+/* If X is a constant, return the value sans apparent integer term;
+ otherwise return 0.
+ Only obvious integer terms are detected. */
+
+rtx
+get_related_value (x)
+ rtx x;
+{
+ if (GET_CODE (x) != CONST)
+ return 0;
+ x = XEXP (x, 0);
+ if (GET_CODE (x) == PLUS
+ && GET_CODE (XEXP (x, 1)) == CONST_INT)
+ return XEXP (x, 0);
+ else if (GET_CODE (x) == MINUS
+ && GET_CODE (XEXP (x, 1)) == CONST_INT)
+ return XEXP (x, 0);
+ return 0;
+}
+
+/* Nonzero if register REG appears somewhere within IN.
+ Also works if REG is not a register; in this case it checks
+ for a subexpression of IN that is Lisp "equal" to REG. */
+
+int
+reg_mentioned_p (reg, in)
+ register rtx reg, in;
+{
+ register char *fmt;
+ register int i;
+ register enum rtx_code code;
+
+ if (in == 0)
+ return 0;
+
+ if (reg == in)
+ return 1;
+
+ if (GET_CODE (in) == LABEL_REF)
+ return reg == XEXP (in, 0);
+
+ code = GET_CODE (in);
+
+ switch (code)
+ {
+ /* Compare registers by number. */
+ case REG:
+ return GET_CODE (reg) == REG && REGNO (in) == REGNO (reg);
+
+ /* These codes have no constituent expressions
+ and are unique. */
+ case SCRATCH:
+ case CC0:
+ case PC:
+ return 0;
+
+ case CONST_INT:
+ return GET_CODE (reg) == CONST_INT && INTVAL (in) == INTVAL (reg);
+
+ case CONST_DOUBLE:
+ /* These are kept unique for a given value. */
+ return 0;
+ }
+
+ if (GET_CODE (reg) == code && rtx_equal_p (reg, in))
+ return 1;
+
+ fmt = GET_RTX_FORMAT (code);
+
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = XVECLEN (in, i) - 1; j >= 0; j--)
+ if (reg_mentioned_p (reg, XVECEXP (in, i, j)))
+ return 1;
+ }
+ else if (fmt[i] == 'e'
+ && reg_mentioned_p (reg, XEXP (in, i)))
+ return 1;
+ }
+ return 0;
+}
+
+/* Return 1 if in between BEG and END, exclusive of BEG and END, there is
+ no CODE_LABEL insn. */
+
+int
+no_labels_between_p (beg, end)
+ rtx beg, end;
+{
+ register rtx p;
+ for (p = NEXT_INSN (beg); p != end; p = NEXT_INSN (p))
+ if (GET_CODE (p) == CODE_LABEL)
+ return 0;
+ return 1;
+}
+
+/* Nonzero if register REG is used in an insn between
+ FROM_INSN and TO_INSN (exclusive of those two). */
+
+int
+reg_used_between_p (reg, from_insn, to_insn)
+ rtx reg, from_insn, to_insn;
+{
+ register rtx insn;
+
+ if (from_insn == to_insn)
+ return 0;
+
+ for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
+ && (reg_overlap_mentioned_p (reg, PATTERN (insn))
+ || (GET_CODE (insn) == CALL_INSN
+ && (find_reg_fusage (insn, USE, reg)
+ || find_reg_fusage (insn, CLOBBER, reg)))))
+ return 1;
+ return 0;
+}
+
+/* Nonzero if the old value of X, a register, is referenced in BODY. If X
+ is entirely replaced by a new value and the only use is as a SET_DEST,
+ we do not consider it a reference. */
+
+int
+reg_referenced_p (x, body)
+ rtx x;
+ rtx body;
+{
+ int i;
+
+ switch (GET_CODE (body))
+ {
+ case SET:
+ if (reg_overlap_mentioned_p (x, SET_SRC (body)))
+ return 1;
+
+ /* If the destination is anything other than CC0, PC, a REG or a SUBREG
+ of a REG that occupies all of the REG, the insn references X if
+ it is mentioned in the destination. */
+ if (GET_CODE (SET_DEST (body)) != CC0
+ && GET_CODE (SET_DEST (body)) != PC
+ && GET_CODE (SET_DEST (body)) != REG
+ && ! (GET_CODE (SET_DEST (body)) == SUBREG
+ && GET_CODE (SUBREG_REG (SET_DEST (body))) == REG
+ && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (body))))
+ + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
+ == ((GET_MODE_SIZE (GET_MODE (SET_DEST (body)))
+ + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)))
+ && reg_overlap_mentioned_p (x, SET_DEST (body)))
+ return 1;
+ break;
+
+ case ASM_OPERANDS:
+ for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
+ if (reg_overlap_mentioned_p (x, ASM_OPERANDS_INPUT (body, i)))
+ return 1;
+ break;
+
+ case CALL:
+ case USE:
+ return reg_overlap_mentioned_p (x, body);
+
+ case TRAP_IF:
+ return reg_overlap_mentioned_p (x, TRAP_CONDITION (body));
+
+ case UNSPEC:
+ case UNSPEC_VOLATILE:
+ case PARALLEL:
+ for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
+ if (reg_referenced_p (x, XVECEXP (body, 0, i)))
+ return 1;
+ break;
+ }
+
+ return 0;
+}
+
+/* Nonzero if register REG is referenced in an insn between
+ FROM_INSN and TO_INSN (exclusive of those two). Sets of REG do
+ not count. */
+
+int
+reg_referenced_between_p (reg, from_insn, to_insn)
+ rtx reg, from_insn, to_insn;
+{
+ register rtx insn;
+
+ if (from_insn == to_insn)
+ return 0;
+
+ for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
+ && (reg_referenced_p (reg, PATTERN (insn))
+ || (GET_CODE (insn) == CALL_INSN
+ && find_reg_fusage (insn, USE, reg))))
+ return 1;
+ return 0;
+}
+
+/* Nonzero if register REG is set or clobbered in an insn between
+ FROM_INSN and TO_INSN (exclusive of those two). */
+
+int
+reg_set_between_p (reg, from_insn, to_insn)
+ rtx reg, from_insn, to_insn;
+{
+ register rtx insn;
+
+ if (from_insn == to_insn)
+ return 0;
+
+ for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
+ && reg_set_p (reg, insn))
+ return 1;
+ return 0;
+}
+
+/* Internals of reg_set_between_p. */
+
+static rtx reg_set_reg;
+static int reg_set_flag;
+
+void
+reg_set_p_1 (x)
+ rtx x;
+{
+ /* We don't want to return 1 if X is a MEM that contains a register
+ within REG_SET_REG. */
+
+ if ((GET_CODE (x) != MEM)
+ && reg_overlap_mentioned_p (reg_set_reg, x))
+ reg_set_flag = 1;
+}
+
+int
+reg_set_p (reg, insn)
+ rtx reg, insn;
+{
+ rtx body = insn;
+
+ /* We can be passed an insn or part of one. If we are passed an insn,
+ check if a side-effect of the insn clobbers REG. */
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ {
+ if (FIND_REG_INC_NOTE (insn, reg)
+ || (GET_CODE (insn) == CALL_INSN
+ /* We'd like to test call_used_regs here, but rtlanal.c can't
+ reference that variable due to its use in genattrtab. So
+ we'll just be more conservative.
+
+ ??? Unless we could ensure that the CALL_INSN_FUNCTION_USAGE
+ information holds all clobbered registers. */
+ && ((GET_CODE (reg) == REG
+ && REGNO (reg) < FIRST_PSEUDO_REGISTER)
+ || GET_CODE (reg) == MEM
+ || find_reg_fusage (insn, CLOBBER, reg))))
+ return 1;
+
+ body = PATTERN (insn);
+ }
+
+ reg_set_reg = reg;
+ reg_set_flag = 0;
+ note_stores (body, reg_set_p_1);
+ return reg_set_flag;
+}
+
+/* Similar to reg_set_between_p, but check all registers in X. Return 0
+ only if none of them are modified between START and END. Return 1 if
+ X contains a MEM; this routine does not perform any memory aliasing. */
+
+int
+modified_between_p (x, start, end)
+ rtx x;
+ rtx start, end;
+{
+ enum rtx_code code = GET_CODE (x);
+ char *fmt;
+ int i, j;
+
+ switch (code)
+ {
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case CONST:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ return 0;
+
+ case PC:
+ case CC0:
+ return 1;
+
+ case MEM:
+ /* If the memory is not constant, assume it is modified. If it is
+ constant, we still have to check the address. */
+ if (! RTX_UNCHANGING_P (x))
+ return 1;
+ break;
+
+ case REG:
+ return reg_set_between_p (x, start, end);
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e' && modified_between_p (XEXP (x, i), start, end))
+ return 1;
+
+ if (fmt[i] == 'E')
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ if (modified_between_p (XVECEXP (x, i, j), start, end))
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Similar to reg_set_p, but check all registers in X. Return 0 only if none
+ of them are modified in INSN. Return 1 if X contains a MEM; this routine
+ does not perform any memory aliasing. */
+
+int
+modified_in_p (x, insn)
+ rtx x;
+ rtx insn;
+{
+ enum rtx_code code = GET_CODE (x);
+ char *fmt;
+ int i, j;
+
+ switch (code)
+ {
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case CONST:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ return 0;
+
+ case PC:
+ case CC0:
+ return 1;
+
+ case MEM:
+ /* If the memory is not constant, assume it is modified. If it is
+ constant, we still have to check the address. */
+ if (! RTX_UNCHANGING_P (x))
+ return 1;
+ break;
+
+ case REG:
+ return reg_set_p (x, insn);
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e' && modified_in_p (XEXP (x, i), insn))
+ return 1;
+
+ if (fmt[i] == 'E')
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ if (modified_in_p (XVECEXP (x, i, j), insn))
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Given an INSN, return a SET expression if this insn has only a single SET.
+ It may also have CLOBBERs, USEs, or SET whose output
+ will not be used, which we ignore. */
+
+rtx
+single_set (insn)
+ rtx insn;
+{
+ rtx set;
+ int i;
+
+ if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
+ return 0;
+
+ if (GET_CODE (PATTERN (insn)) == SET)
+ return PATTERN (insn);
+
+ else if (GET_CODE (PATTERN (insn)) == PARALLEL)
+ {
+ for (i = 0, set = 0; i < XVECLEN (PATTERN (insn), 0); i++)
+ if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET
+ && (! find_reg_note (insn, REG_UNUSED,
+ SET_DEST (XVECEXP (PATTERN (insn), 0, i)))
+ || side_effects_p (XVECEXP (PATTERN (insn), 0, i))))
+ {
+ if (set)
+ return 0;
+ else
+ set = XVECEXP (PATTERN (insn), 0, i);
+ }
+ return set;
+ }
+
+ return 0;
+}
+
+/* Return the last thing that X was assigned from before *PINSN. Verify that
+ the object is not modified up to VALID_TO. If it was, if we hit
+ a partial assignment to X, or hit a CODE_LABEL first, return X. If we
+ found an assignment, update *PINSN to point to it. */
+
+rtx
+find_last_value (x, pinsn, valid_to)
+ rtx x;
+ rtx *pinsn;
+ rtx valid_to;
+{
+ rtx p;
+
+ for (p = PREV_INSN (*pinsn); p && GET_CODE (p) != CODE_LABEL;
+ p = PREV_INSN (p))
+ if (GET_RTX_CLASS (GET_CODE (p)) == 'i')
+ {
+ rtx set = single_set (p);
+ rtx note = find_reg_note (p, REG_EQUAL, NULL_RTX);
+
+ if (set && rtx_equal_p (x, SET_DEST (set)))
+ {
+ rtx src = SET_SRC (set);
+
+ if (note && GET_CODE (XEXP (note, 0)) != EXPR_LIST)
+ src = XEXP (note, 0);
+
+ if (! modified_between_p (src, PREV_INSN (p), valid_to)
+ /* Reject hard registers because we don't usually want
+ to use them; we'd rather use a pseudo. */
+ && ! (GET_CODE (src) == REG
+ && REGNO (src) < FIRST_PSEUDO_REGISTER))
+ {
+ *pinsn = p;
+ return src;
+ }
+ }
+
+ /* If set in non-simple way, we don't have a value. */
+ if (reg_set_p (x, p))
+ break;
+ }
+
+ return x;
+}
+
+/* Return nonzero if register in range [REGNO, ENDREGNO)
+ appears either explicitly or implicitly in X
+ other than being stored into.
+
+ References contained within the substructure at LOC do not count.
+ LOC may be zero, meaning don't ignore anything. */
+
+int
+refers_to_regno_p (regno, endregno, x, loc)
+ int regno, endregno;
+ rtx x;
+ rtx *loc;
+{
+ register int i;
+ register RTX_CODE code;
+ register char *fmt;
+
+ repeat:
+ /* The contents of a REG_NONNEG note is always zero, so we must come here
+ upon repeat in case the last REG_NOTE is a REG_NONNEG note. */
+ if (x == 0)
+ return 0;
+
+ code = GET_CODE (x);
+
+ switch (code)
+ {
+ case REG:
+ i = REGNO (x);
+
+ /* If we modifying the stack, frame, or argument pointer, it will
+ clobber a virtual register. In fact, we could be more precise,
+ but it isn't worth it. */
+ if ((i == STACK_POINTER_REGNUM
+#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
+ || i == ARG_POINTER_REGNUM
+#endif
+ || i == FRAME_POINTER_REGNUM)
+ && regno >= FIRST_VIRTUAL_REGISTER && regno <= LAST_VIRTUAL_REGISTER)
+ return 1;
+
+ return (endregno > i
+ && regno < i + (i < FIRST_PSEUDO_REGISTER
+ ? HARD_REGNO_NREGS (i, GET_MODE (x))
+ : 1));
+
+ case SUBREG:
+ /* If this is a SUBREG of a hard reg, we can see exactly which
+ registers are being modified. Otherwise, handle normally. */
+ if (GET_CODE (SUBREG_REG (x)) == REG
+ && REGNO (SUBREG_REG (x)) < FIRST_PSEUDO_REGISTER)
+ {
+ int inner_regno = REGNO (SUBREG_REG (x)) + SUBREG_WORD (x);
+ int inner_endregno
+ = inner_regno + (inner_regno < FIRST_PSEUDO_REGISTER
+ ? HARD_REGNO_NREGS (regno, GET_MODE (x)) : 1);
+
+ return endregno > inner_regno && regno < inner_endregno;
+ }
+ break;
+
+ case CLOBBER:
+ case SET:
+ if (&SET_DEST (x) != loc
+ /* Note setting a SUBREG counts as referring to the REG it is in for
+ a pseudo but not for hard registers since we can
+ treat each word individually. */
+ && ((GET_CODE (SET_DEST (x)) == SUBREG
+ && loc != &SUBREG_REG (SET_DEST (x))
+ && GET_CODE (SUBREG_REG (SET_DEST (x))) == REG
+ && REGNO (SUBREG_REG (SET_DEST (x))) >= FIRST_PSEUDO_REGISTER
+ && refers_to_regno_p (regno, endregno,
+ SUBREG_REG (SET_DEST (x)), loc))
+ || (GET_CODE (SET_DEST (x)) != REG
+ && refers_to_regno_p (regno, endregno, SET_DEST (x), loc))))
+ return 1;
+
+ if (code == CLOBBER || loc == &SET_SRC (x))
+ return 0;
+ x = SET_SRC (x);
+ goto repeat;
+ }
+
+ /* X does not match, so try its subexpressions. */
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e' && loc != &XEXP (x, i))
+ {
+ if (i == 0)
+ {
+ x = XEXP (x, 0);
+ goto repeat;
+ }
+ else
+ if (refers_to_regno_p (regno, endregno, XEXP (x, i), loc))
+ return 1;
+ }
+ else if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = XVECLEN (x, i) - 1; j >=0; j--)
+ if (loc != &XVECEXP (x, i, j)
+ && refers_to_regno_p (regno, endregno, XVECEXP (x, i, j), loc))
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/* Nonzero if modifying X will affect IN. If X is a register or a SUBREG,
+ we check if any register number in X conflicts with the relevant register
+ numbers. If X is a constant, return 0. If X is a MEM, return 1 iff IN
+ contains a MEM (we don't bother checking for memory addresses that can't
+ conflict because we expect this to be a rare case. */
+
+int
+reg_overlap_mentioned_p (x, in)
+ rtx x, in;
+{
+ int regno, endregno;
+
+ if (GET_CODE (x) == SUBREG)
+ {
+ regno = REGNO (SUBREG_REG (x));
+ if (regno < FIRST_PSEUDO_REGISTER)
+ regno += SUBREG_WORD (x);
+ }
+ else if (GET_CODE (x) == REG)
+ regno = REGNO (x);
+ else if (CONSTANT_P (x))
+ return 0;
+ else if (GET_CODE (x) == MEM)
+ {
+ char *fmt;
+ int i;
+
+ if (GET_CODE (in) == MEM)
+ return 1;
+
+ fmt = GET_RTX_FORMAT (GET_CODE (in));
+
+ for (i = GET_RTX_LENGTH (GET_CODE (in)) - 1; i >= 0; i--)
+ if (fmt[i] == 'e' && reg_overlap_mentioned_p (x, XEXP (in, i)))
+ return 1;
+
+ return 0;
+ }
+ else if (GET_CODE (x) == SCRATCH || GET_CODE (x) == PC
+ || GET_CODE (x) == CC0)
+ return reg_mentioned_p (x, in);
+ else
+ abort ();
+
+ endregno = regno + (regno < FIRST_PSEUDO_REGISTER
+ ? HARD_REGNO_NREGS (regno, GET_MODE (x)) : 1);
+
+ return refers_to_regno_p (regno, endregno, in, NULL_PTR);
+}
+
+/* Used for communications between the next few functions. */
+
+static int reg_set_last_unknown;
+static rtx reg_set_last_value;
+static int reg_set_last_first_regno, reg_set_last_last_regno;
+
+/* Called via note_stores from reg_set_last. */
+
+static void
+reg_set_last_1 (x, pat)
+ rtx x;
+ rtx pat;
+{
+ int first, last;
+
+ /* If X is not a register, or is not one in the range we care
+ about, ignore. */
+ if (GET_CODE (x) != REG)
+ return;
+
+ first = REGNO (x);
+ last = first + (first < FIRST_PSEUDO_REGISTER
+ ? HARD_REGNO_NREGS (first, GET_MODE (x)) : 1);
+
+ if (first >= reg_set_last_last_regno
+ || last <= reg_set_last_first_regno)
+ return;
+
+ /* If this is a CLOBBER or is some complex LHS, or doesn't modify
+ exactly the registers we care about, show we don't know the value. */
+ if (GET_CODE (pat) == CLOBBER || SET_DEST (pat) != x
+ || first != reg_set_last_first_regno
+ || last != reg_set_last_last_regno)
+ reg_set_last_unknown = 1;
+ else
+ reg_set_last_value = SET_SRC (pat);
+}
+
+/* Return the last value to which REG was set prior to INSN. If we can't
+ find it easily, return 0.
+
+ We only return a REG, SUBREG, or constant because it is too hard to
+ check if a MEM remains unchanged. */
+
+rtx
+reg_set_last (x, insn)
+ rtx x;
+ rtx insn;
+{
+ rtx orig_insn = insn;
+
+ reg_set_last_first_regno = REGNO (x);
+
+ reg_set_last_last_regno
+ = reg_set_last_first_regno
+ + (reg_set_last_first_regno < FIRST_PSEUDO_REGISTER
+ ? HARD_REGNO_NREGS (reg_set_last_first_regno, GET_MODE (x)) : 1);
+
+ reg_set_last_unknown = 0;
+ reg_set_last_value = 0;
+
+ /* Scan backwards until reg_set_last_1 changed one of the above flags.
+ Stop when we reach a label or X is a hard reg and we reach a
+ CALL_INSN (if reg_set_last_last_regno is a hard reg).
+
+ If we find a set of X, ensure that its SET_SRC remains unchanged. */
+
+ /* We compare with <= here, because reg_set_last_last_regno
+ is actually the number of the first reg *not* in X. */
+ for (;
+ insn && GET_CODE (insn) != CODE_LABEL
+ && ! (GET_CODE (insn) == CALL_INSN
+ && reg_set_last_last_regno <= FIRST_PSEUDO_REGISTER);
+ insn = PREV_INSN (insn))
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ {
+ note_stores (PATTERN (insn), reg_set_last_1);
+ if (reg_set_last_unknown)
+ return 0;
+ else if (reg_set_last_value)
+ {
+ if (CONSTANT_P (reg_set_last_value)
+ || ((GET_CODE (reg_set_last_value) == REG
+ || GET_CODE (reg_set_last_value) == SUBREG)
+ && ! reg_set_between_p (reg_set_last_value,
+ NEXT_INSN (insn), orig_insn)))
+ return reg_set_last_value;
+ else
+ return 0;
+ }
+ }
+
+ return 0;
+}
+
+/* This is 1 until after reload pass. */
+int rtx_equal_function_value_matters;
+
+/* Return 1 if X and Y are identical-looking rtx's.
+ This is the Lisp function EQUAL for rtx arguments. */
+
+int
+rtx_equal_p (x, y)
+ rtx x, y;
+{
+ register int i;
+ register int j;
+ register enum rtx_code code;
+ register char *fmt;
+
+ if (x == y)
+ return 1;
+ if (x == 0 || y == 0)
+ return 0;
+
+ code = GET_CODE (x);
+ /* Rtx's of different codes cannot be equal. */
+ if (code != GET_CODE (y))
+ return 0;
+
+ /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
+ (REG:SI x) and (REG:HI x) are NOT equivalent. */
+
+ if (GET_MODE (x) != GET_MODE (y))
+ return 0;
+
+ /* REG, LABEL_REF, and SYMBOL_REF can be compared nonrecursively. */
+
+ if (code == REG)
+ /* Until rtl generation is complete, don't consider a reference to the
+ return register of the current function the same as the return from a
+ called function. This eases the job of function integration. Once the
+ distinction is no longer needed, they can be considered equivalent. */
+ return (REGNO (x) == REGNO (y)
+ && (! rtx_equal_function_value_matters
+ || REG_FUNCTION_VALUE_P (x) == REG_FUNCTION_VALUE_P (y)));
+ else if (code == LABEL_REF)
+ return XEXP (x, 0) == XEXP (y, 0);
+ else if (code == SYMBOL_REF)
+ return XSTR (x, 0) == XSTR (y, 0);
+ else if (code == SCRATCH || code == CONST_DOUBLE)
+ return 0;
+
+ /* Compare the elements. If any pair of corresponding elements
+ fail to match, return 0 for the whole things. */
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ switch (fmt[i])
+ {
+ case 'w':
+ if (XWINT (x, i) != XWINT (y, i))
+ return 0;
+ break;
+
+ case 'n':
+ case 'i':
+ if (XINT (x, i) != XINT (y, i))
+ return 0;
+ break;
+
+ case 'V':
+ case 'E':
+ /* Two vectors must have the same length. */
+ if (XVECLEN (x, i) != XVECLEN (y, i))
+ return 0;
+
+ /* And the corresponding elements must match. */
+ for (j = 0; j < XVECLEN (x, i); j++)
+ if (rtx_equal_p (XVECEXP (x, i, j), XVECEXP (y, i, j)) == 0)
+ return 0;
+ break;
+
+ case 'e':
+ if (rtx_equal_p (XEXP (x, i), XEXP (y, i)) == 0)
+ return 0;
+ break;
+
+ case 'S':
+ case 's':
+ if (strcmp (XSTR (x, i), XSTR (y, i)))
+ return 0;
+ break;
+
+ case 'u':
+ /* These are just backpointers, so they don't matter. */
+ break;
+
+ case '0':
+ break;
+
+ /* It is believed that rtx's at this level will never
+ contain anything but integers and other rtx's,
+ except for within LABEL_REFs and SYMBOL_REFs. */
+ default:
+ abort ();
+ }
+ }
+ return 1;
+}
+
+/* Call FUN on each register or MEM that is stored into or clobbered by X.
+ (X would be the pattern of an insn).
+ FUN receives two arguments:
+ the REG, MEM, CC0 or PC being stored in or clobbered,
+ the SET or CLOBBER rtx that does the store.
+
+ If the item being stored in or clobbered is a SUBREG of a hard register,
+ the SUBREG will be passed. */
+
+void
+note_stores (x, fun)
+ register rtx x;
+ void (*fun) ();
+{
+ if ((GET_CODE (x) == SET || GET_CODE (x) == CLOBBER))
+ {
+ register rtx dest = SET_DEST (x);
+ while ((GET_CODE (dest) == SUBREG
+ && (GET_CODE (SUBREG_REG (dest)) != REG
+ || REGNO (SUBREG_REG (dest)) >= FIRST_PSEUDO_REGISTER))
+ || GET_CODE (dest) == ZERO_EXTRACT
+ || GET_CODE (dest) == SIGN_EXTRACT
+ || GET_CODE (dest) == STRICT_LOW_PART)
+ dest = XEXP (dest, 0);
+ (*fun) (dest, x);
+ }
+ else if (GET_CODE (x) == PARALLEL)
+ {
+ register int i;
+ for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
+ {
+ register rtx y = XVECEXP (x, 0, i);
+ if (GET_CODE (y) == SET || GET_CODE (y) == CLOBBER)
+ {
+ register rtx dest = SET_DEST (y);
+ while ((GET_CODE (dest) == SUBREG
+ && (GET_CODE (SUBREG_REG (dest)) != REG
+ || (REGNO (SUBREG_REG (dest))
+ >= FIRST_PSEUDO_REGISTER)))
+ || GET_CODE (dest) == ZERO_EXTRACT
+ || GET_CODE (dest) == SIGN_EXTRACT
+ || GET_CODE (dest) == STRICT_LOW_PART)
+ dest = XEXP (dest, 0);
+ (*fun) (dest, y);
+ }
+ }
+ }
+}
+
+/* Return nonzero if X's old contents don't survive after INSN.
+ This will be true if X is (cc0) or if X is a register and
+ X dies in INSN or because INSN entirely sets X.
+
+ "Entirely set" means set directly and not through a SUBREG,
+ ZERO_EXTRACT or SIGN_EXTRACT, so no trace of the old contents remains.
+ Likewise, REG_INC does not count.
+
+ REG may be a hard or pseudo reg. Renumbering is not taken into account,
+ but for this use that makes no difference, since regs don't overlap
+ during their lifetimes. Therefore, this function may be used
+ at any time after deaths have been computed (in flow.c).
+
+ If REG is a hard reg that occupies multiple machine registers, this
+ function will only return 1 if each of those registers will be replaced
+ by INSN. */
+
+int
+dead_or_set_p (insn, x)
+ rtx insn;
+ rtx x;
+{
+ register int regno, last_regno;
+ register int i;
+
+ /* Can't use cc0_rtx below since this file is used by genattrtab.c. */
+ if (GET_CODE (x) == CC0)
+ return 1;
+
+ if (GET_CODE (x) != REG)
+ abort ();
+
+ regno = REGNO (x);
+ last_regno = (regno >= FIRST_PSEUDO_REGISTER ? regno
+ : regno + HARD_REGNO_NREGS (regno, GET_MODE (x)) - 1);
+
+ for (i = regno; i <= last_regno; i++)
+ if (! dead_or_set_regno_p (insn, i))
+ return 0;
+
+ return 1;
+}
+
+/* Utility function for dead_or_set_p to check an individual register. Also
+ called from flow.c. */
+
+int
+dead_or_set_regno_p (insn, test_regno)
+ rtx insn;
+ int test_regno;
+{
+ int regno, endregno;
+ rtx link;
+
+ /* See if there is a death note for something that includes TEST_REGNO. */
+ for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
+ {
+ if (REG_NOTE_KIND (link) != REG_DEAD || GET_CODE (XEXP (link, 0)) != REG)
+ continue;
+
+ regno = REGNO (XEXP (link, 0));
+ endregno = (regno >= FIRST_PSEUDO_REGISTER ? regno + 1
+ : regno + HARD_REGNO_NREGS (regno,
+ GET_MODE (XEXP (link, 0))));
+
+ if (test_regno >= regno && test_regno < endregno)
+ return 1;
+ }
+
+ if (GET_CODE (insn) == CALL_INSN
+ && find_regno_fusage (insn, CLOBBER, test_regno))
+ return 1;
+
+ if (GET_CODE (PATTERN (insn)) == SET)
+ {
+ rtx dest = SET_DEST (PATTERN (insn));
+
+ /* A value is totally replaced if it is the destination or the
+ destination is a SUBREG of REGNO that does not change the number of
+ words in it. */
+ if (GET_CODE (dest) == SUBREG
+ && (((GET_MODE_SIZE (GET_MODE (dest))
+ + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
+ == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))
+ + UNITS_PER_WORD - 1) / UNITS_PER_WORD)))
+ dest = SUBREG_REG (dest);
+
+ if (GET_CODE (dest) != REG)
+ return 0;
+
+ regno = REGNO (dest);
+ endregno = (regno >= FIRST_PSEUDO_REGISTER ? regno + 1
+ : regno + HARD_REGNO_NREGS (regno, GET_MODE (dest)));
+
+ return (test_regno >= regno && test_regno < endregno);
+ }
+ else if (GET_CODE (PATTERN (insn)) == PARALLEL)
+ {
+ register int i;
+
+ for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
+ {
+ rtx body = XVECEXP (PATTERN (insn), 0, i);
+
+ if (GET_CODE (body) == SET || GET_CODE (body) == CLOBBER)
+ {
+ rtx dest = SET_DEST (body);
+
+ if (GET_CODE (dest) == SUBREG
+ && (((GET_MODE_SIZE (GET_MODE (dest))
+ + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
+ == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))
+ + UNITS_PER_WORD - 1) / UNITS_PER_WORD)))
+ dest = SUBREG_REG (dest);
+
+ if (GET_CODE (dest) != REG)
+ continue;
+
+ regno = REGNO (dest);
+ endregno = (regno >= FIRST_PSEUDO_REGISTER ? regno + 1
+ : regno + HARD_REGNO_NREGS (regno, GET_MODE (dest)));
+
+ if (test_regno >= regno && test_regno < endregno)
+ return 1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/* Return the reg-note of kind KIND in insn INSN, if there is one.
+ If DATUM is nonzero, look for one whose datum is DATUM. */
+
+rtx
+find_reg_note (insn, kind, datum)
+ rtx insn;
+ enum reg_note kind;
+ rtx datum;
+{
+ register rtx link;
+
+ for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
+ if (REG_NOTE_KIND (link) == kind
+ && (datum == 0 || datum == XEXP (link, 0)))
+ return link;
+ return 0;
+}
+
+/* Return the reg-note of kind KIND in insn INSN which applies to register
+ number REGNO, if any. Return 0 if there is no such reg-note. Note that
+ the REGNO of this NOTE need not be REGNO if REGNO is a hard register;
+ it might be the case that the note overlaps REGNO. */
+
+rtx
+find_regno_note (insn, kind, regno)
+ rtx insn;
+ enum reg_note kind;
+ int regno;
+{
+ register rtx link;
+
+ for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
+ if (REG_NOTE_KIND (link) == kind
+ /* Verify that it is a register, so that scratch and MEM won't cause a
+ problem here. */
+ && GET_CODE (XEXP (link, 0)) == REG
+ && REGNO (XEXP (link, 0)) <= regno
+ && ((REGNO (XEXP (link, 0))
+ + (REGNO (XEXP (link, 0)) >= FIRST_PSEUDO_REGISTER ? 1
+ : HARD_REGNO_NREGS (REGNO (XEXP (link, 0)),
+ GET_MODE (XEXP (link, 0)))))
+ > regno))
+ return link;
+ return 0;
+}
+
+/* Return true if DATUM, or any overlap of DATUM, of kind CODE is found
+ in the CALL_INSN_FUNCTION_USAGE information of INSN. */
+
+int
+find_reg_fusage (insn, code, datum)
+ rtx insn;
+ enum rtx_code code;
+ rtx datum;
+{
+ /* If it's not a CALL_INSN, it can't possibly have a
+ CALL_INSN_FUNCTION_USAGE field, so don't bother checking. */
+ if (GET_CODE (insn) != CALL_INSN)
+ return 0;
+
+ if (! datum)
+ abort();
+
+ if (GET_CODE (datum) != REG)
+ {
+ register rtx link;
+
+ for (link = CALL_INSN_FUNCTION_USAGE (insn);
+ link;
+ link = XEXP (link, 1))
+ if (GET_CODE (XEXP (link, 0)) == code
+ && rtx_equal_p (datum, SET_DEST (XEXP (link, 0))))
+ return 1;
+ }
+ else
+ {
+ register int regno = REGNO (datum);
+
+ /* CALL_INSN_FUNCTION_USAGE information cannot contain references
+ to pseudo registers, so don't bother checking. */
+
+ if (regno < FIRST_PSEUDO_REGISTER)
+ {
+ int end_regno = regno + HARD_REGNO_NREGS (regno, GET_MODE (datum));
+ int i;
+
+ for (i = regno; i < end_regno; i++)
+ if (find_regno_fusage (insn, code, i))
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/* Return true if REGNO, or any overlap of REGNO, of kind CODE is found
+ in the CALL_INSN_FUNCTION_USAGE information of INSN. */
+
+int
+find_regno_fusage (insn, code, regno)
+ rtx insn;
+ enum rtx_code code;
+ int regno;
+{
+ register rtx link;
+
+ /* CALL_INSN_FUNCTION_USAGE information cannot contain references
+ to pseudo registers, so don't bother checking. */
+
+ if (regno >= FIRST_PSEUDO_REGISTER
+ || GET_CODE (insn) != CALL_INSN )
+ return 0;
+
+ for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
+ {
+ register int regnote;
+ register rtx op;
+
+ if (GET_CODE (op = XEXP (link, 0)) == code
+ && GET_CODE (SET_DEST (op)) == REG
+ && (regnote = REGNO (SET_DEST (op))) <= regno
+ && regnote
+ + HARD_REGNO_NREGS (regnote, GET_MODE (SET_DEST (op)))
+ > regno)
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Remove register note NOTE from the REG_NOTES of INSN. */
+
+void
+remove_note (insn, note)
+ register rtx note;
+ register rtx insn;
+{
+ register rtx link;
+
+ if (REG_NOTES (insn) == note)
+ {
+ REG_NOTES (insn) = XEXP (note, 1);
+ return;
+ }
+
+ for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
+ if (XEXP (link, 1) == note)
+ {
+ XEXP (link, 1) = XEXP (note, 1);
+ return;
+ }
+
+ abort ();
+}
+
+/* Nonzero if X contains any volatile instructions. These are instructions
+ which may cause unpredictable machine state instructions, and thus no
+ instructions should be moved or combined across them. This includes
+ only volatile asms and UNSPEC_VOLATILE instructions. */
+
+int
+volatile_insn_p (x)
+ rtx x;
+{
+ register RTX_CODE code;
+
+ code = GET_CODE (x);
+ switch (code)
+ {
+ case LABEL_REF:
+ case SYMBOL_REF:
+ case CONST_INT:
+ case CONST:
+ case CONST_DOUBLE:
+ case CC0:
+ case PC:
+ case REG:
+ case SCRATCH:
+ case CLOBBER:
+ case ASM_INPUT:
+ case ADDR_VEC:
+ case ADDR_DIFF_VEC:
+ case CALL:
+ case MEM:
+ return 0;
+
+ case UNSPEC_VOLATILE:
+ /* case TRAP_IF: This isn't clear yet. */
+ return 1;
+
+ case ASM_OPERANDS:
+ if (MEM_VOLATILE_P (x))
+ return 1;
+ }
+
+ /* Recursively scan the operands of this expression. */
+
+ {
+ register char *fmt = GET_RTX_FORMAT (code);
+ register int i;
+
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ {
+ if (volatile_insn_p (XEXP (x, i)))
+ return 1;
+ }
+ if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ if (volatile_insn_p (XVECEXP (x, i, j)))
+ return 1;
+ }
+ }
+ }
+ return 0;
+}
+
+/* Nonzero if X contains any volatile memory references
+ UNSPEC_VOLATILE operations or volatile ASM_OPERANDS expressions. */
+
+int
+volatile_refs_p (x)
+ rtx x;
+{
+ register RTX_CODE code;
+
+ code = GET_CODE (x);
+ switch (code)
+ {
+ case LABEL_REF:
+ case SYMBOL_REF:
+ case CONST_INT:
+ case CONST:
+ case CONST_DOUBLE:
+ case CC0:
+ case PC:
+ case REG:
+ case SCRATCH:
+ case CLOBBER:
+ case ASM_INPUT:
+ case ADDR_VEC:
+ case ADDR_DIFF_VEC:
+ return 0;
+
+ case CALL:
+ case UNSPEC_VOLATILE:
+ /* case TRAP_IF: This isn't clear yet. */
+ return 1;
+
+ case MEM:
+ case ASM_OPERANDS:
+ if (MEM_VOLATILE_P (x))
+ return 1;
+ }
+
+ /* Recursively scan the operands of this expression. */
+
+ {
+ register char *fmt = GET_RTX_FORMAT (code);
+ register int i;
+
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ {
+ if (volatile_refs_p (XEXP (x, i)))
+ return 1;
+ }
+ if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ if (volatile_refs_p (XVECEXP (x, i, j)))
+ return 1;
+ }
+ }
+ }
+ return 0;
+}
+
+/* Similar to above, except that it also rejects register pre- and post-
+ incrementing. */
+
+int
+side_effects_p (x)
+ rtx x;
+{
+ register RTX_CODE code;
+
+ code = GET_CODE (x);
+ switch (code)
+ {
+ case LABEL_REF:
+ case SYMBOL_REF:
+ case CONST_INT:
+ case CONST:
+ case CONST_DOUBLE:
+ case CC0:
+ case PC:
+ case REG:
+ case SCRATCH:
+ case ASM_INPUT:
+ case ADDR_VEC:
+ case ADDR_DIFF_VEC:
+ return 0;
+
+ case CLOBBER:
+ /* Reject CLOBBER with a non-VOID mode. These are made by combine.c
+ when some combination can't be done. If we see one, don't think
+ that we can simplify the expression. */
+ return (GET_MODE (x) != VOIDmode);
+
+ case PRE_INC:
+ case PRE_DEC:
+ case POST_INC:
+ case POST_DEC:
+ case CALL:
+ case UNSPEC_VOLATILE:
+ /* case TRAP_IF: This isn't clear yet. */
+ return 1;
+
+ case MEM:
+ case ASM_OPERANDS:
+ if (MEM_VOLATILE_P (x))
+ return 1;
+ }
+
+ /* Recursively scan the operands of this expression. */
+
+ {
+ register char *fmt = GET_RTX_FORMAT (code);
+ register int i;
+
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ {
+ if (side_effects_p (XEXP (x, i)))
+ return 1;
+ }
+ if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ if (side_effects_p (XVECEXP (x, i, j)))
+ return 1;
+ }
+ }
+ }
+ return 0;
+}
+
+/* Return nonzero if evaluating rtx X might cause a trap. */
+
+int
+may_trap_p (x)
+ rtx x;
+{
+ int i;
+ enum rtx_code code;
+ char *fmt;
+
+ if (x == 0)
+ return 0;
+ code = GET_CODE (x);
+ switch (code)
+ {
+ /* Handle these cases quickly. */
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ case CONST:
+ case PC:
+ case CC0:
+ case REG:
+ case SCRATCH:
+ return 0;
+
+ /* Conditional trap can trap! */
+ case UNSPEC_VOLATILE:
+ case TRAP_IF:
+ return 1;
+
+ /* Memory ref can trap unless it's a static var or a stack slot. */
+ case MEM:
+ return rtx_addr_can_trap_p (XEXP (x, 0));
+
+ /* Division by a non-constant might trap. */
+ case DIV:
+ case MOD:
+ case UDIV:
+ case UMOD:
+ if (! CONSTANT_P (XEXP (x, 1)))
+ return 1;
+ /* This was const0_rtx, but by not using that,
+ we can link this file into other programs. */
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) == 0)
+ return 1;
+ default:
+ /* Any floating arithmetic may trap. */
+ if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
+ return 1;
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ {
+ if (may_trap_p (XEXP (x, i)))
+ return 1;
+ }
+ else if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ if (may_trap_p (XVECEXP (x, i, j)))
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/* Return nonzero if X contains a comparison that is not either EQ or NE,
+ i.e., an inequality. */
+
+int
+inequality_comparisons_p (x)
+ rtx x;
+{
+ register char *fmt;
+ register int len, i;
+ register enum rtx_code code = GET_CODE (x);
+
+ switch (code)
+ {
+ case REG:
+ case SCRATCH:
+ case PC:
+ case CC0:
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case CONST:
+ case LABEL_REF:
+ case SYMBOL_REF:
+ return 0;
+
+ case LT:
+ case LTU:
+ case GT:
+ case GTU:
+ case LE:
+ case LEU:
+ case GE:
+ case GEU:
+ return 1;
+ }
+
+ len = GET_RTX_LENGTH (code);
+ fmt = GET_RTX_FORMAT (code);
+
+ for (i = 0; i < len; i++)
+ {
+ if (fmt[i] == 'e')
+ {
+ if (inequality_comparisons_p (XEXP (x, i)))
+ return 1;
+ }
+ else if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ if (inequality_comparisons_p (XVECEXP (x, i, j)))
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/* Replace any occurrence of FROM in X with TO.
+
+ Note that copying is not done so X must not be shared unless all copies
+ are to be modified. */
+
+rtx
+replace_rtx (x, from, to)
+ rtx x, from, to;
+{
+ register int i, j;
+ register char *fmt;
+
+ if (x == from)
+ return to;
+
+ /* Allow this function to make replacements in EXPR_LISTs. */
+ if (x == 0)
+ return 0;
+
+ fmt = GET_RTX_FORMAT (GET_CODE (x));
+ for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ XEXP (x, i) = replace_rtx (XEXP (x, i), from, to);
+ else if (fmt[i] == 'E')
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ XVECEXP (x, i, j) = replace_rtx (XVECEXP (x, i, j), from, to);
+ }
+
+ return x;
+}
+
+/* Throughout the rtx X, replace many registers according to REG_MAP.
+ Return the replacement for X (which may be X with altered contents).
+ REG_MAP[R] is the replacement for register R, or 0 for don't replace.
+ NREGS is the length of REG_MAP; regs >= NREGS are not mapped.
+
+ We only support REG_MAP entries of REG or SUBREG. Also, hard registers
+ should not be mapped to pseudos or vice versa since validate_change
+ is not called.
+
+ If REPLACE_DEST is 1, replacements are also done in destinations;
+ otherwise, only sources are replaced. */
+
+rtx
+replace_regs (x, reg_map, nregs, replace_dest)
+ rtx x;
+ rtx *reg_map;
+ int nregs;
+ int replace_dest;
+{
+ register enum rtx_code code;
+ register int i;
+ register char *fmt;
+
+ if (x == 0)
+ return x;
+
+ code = GET_CODE (x);
+ switch (code)
+ {
+ case SCRATCH:
+ case PC:
+ case CC0:
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case CONST:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ return x;
+
+ case REG:
+ /* Verify that the register has an entry before trying to access it. */
+ if (REGNO (x) < nregs && reg_map[REGNO (x)] != 0)
+ {
+ /* SUBREGs can't be shared. Always return a copy to ensure that if
+ this replacement occurs more than once then each instance will
+ get distinct rtx. */
+ if (GET_CODE (reg_map[REGNO (x)]) == SUBREG)
+ return copy_rtx (reg_map[REGNO (x)]);
+ return reg_map[REGNO (x)];
+ }
+ return x;
+
+ case SUBREG:
+ /* Prevent making nested SUBREGs. */
+ if (GET_CODE (SUBREG_REG (x)) == REG && REGNO (SUBREG_REG (x)) < nregs
+ && reg_map[REGNO (SUBREG_REG (x))] != 0
+ && GET_CODE (reg_map[REGNO (SUBREG_REG (x))]) == SUBREG)
+ {
+ rtx map_val = reg_map[REGNO (SUBREG_REG (x))];
+ rtx map_inner = SUBREG_REG (map_val);
+
+ if (GET_MODE (x) == GET_MODE (map_inner))
+ return map_inner;
+ else
+ {
+ /* We cannot call gen_rtx here since we may be linked with
+ genattrtab.c. */
+ /* Let's try clobbering the incoming SUBREG and see
+ if this is really safe. */
+ SUBREG_REG (x) = map_inner;
+ SUBREG_WORD (x) += SUBREG_WORD (map_val);
+ return x;
+#if 0
+ rtx new = rtx_alloc (SUBREG);
+ PUT_MODE (new, GET_MODE (x));
+ SUBREG_REG (new) = map_inner;
+ SUBREG_WORD (new) = SUBREG_WORD (x) + SUBREG_WORD (map_val);
+#endif
+ }
+ }
+ break;
+
+ case SET:
+ if (replace_dest)
+ SET_DEST (x) = replace_regs (SET_DEST (x), reg_map, nregs, 0);
+
+ else if (GET_CODE (SET_DEST (x)) == MEM
+ || GET_CODE (SET_DEST (x)) == STRICT_LOW_PART)
+ /* Even if we are not to replace destinations, replace register if it
+ is CONTAINED in destination (destination is memory or
+ STRICT_LOW_PART). */
+ XEXP (SET_DEST (x), 0) = replace_regs (XEXP (SET_DEST (x), 0),
+ reg_map, nregs, 0);
+ else if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT)
+ /* Similarly, for ZERO_EXTRACT we replace all operands. */
+ break;
+
+ SET_SRC (x) = replace_regs (SET_SRC (x), reg_map, nregs, 0);
+ return x;
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ XEXP (x, i) = replace_regs (XEXP (x, i), reg_map, nregs, replace_dest);
+ if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ XVECEXP (x, i, j) = replace_regs (XVECEXP (x, i, j), reg_map,
+ nregs, replace_dest);
+ }
+ }
+ return x;
+}
diff --git a/gnu/usr.bin/cc/cc_int/sched.c b/gnu/usr.bin/cc/cc_int/sched.c
new file mode 100644
index 0000000..9870967
--- /dev/null
+++ b/gnu/usr.bin/cc/cc_int/sched.c
@@ -0,0 +1,4884 @@
+/* Instruction scheduling pass.
+ Copyright (C) 1992, 1993, 1994 Free Software Foundation, Inc.
+ Contributed by Michael Tiemann (tiemann@cygnus.com)
+ Enhanced by, and currently maintained by, Jim Wilson (wilson@cygnus.com)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+/* Instruction scheduling pass.
+
+ This pass implements list scheduling within basic blocks. It is
+ run after flow analysis, but before register allocation. The
+ scheduler works as follows:
+
+ We compute insn priorities based on data dependencies. Flow
+ analysis only creates a fraction of the data-dependencies we must
+ observe: namely, only those dependencies which the combiner can be
+ expected to use. For this pass, we must therefore create the
+ remaining dependencies we need to observe: register dependencies,
+ memory dependencies, dependencies to keep function calls in order,
+ and the dependence between a conditional branch and the setting of
+ condition codes are all dealt with here.
+
+ The scheduler first traverses the data flow graph, starting with
+ the last instruction, and proceeding to the first, assigning
+ values to insn_priority as it goes. This sorts the instructions
+ topologically by data dependence.
+
+ Once priorities have been established, we order the insns using
+ list scheduling. This works as follows: starting with a list of
+ all the ready insns, and sorted according to priority number, we
+ schedule the insn from the end of the list by placing its
+ predecessors in the list according to their priority order. We
+ consider this insn scheduled by setting the pointer to the "end" of
+ the list to point to the previous insn. When an insn has no
+ predecessors, we either queue it until sufficient time has elapsed
+ or add it to the ready list. As the instructions are scheduled or
+ when stalls are introduced, the queue advances and dumps insns into
+ the ready list. When all insns down to the lowest priority have
+ been scheduled, the critical path of the basic block has been made
+ as short as possible. The remaining insns are then scheduled in
+ remaining slots.
+
+ Function unit conflicts are resolved during reverse list scheduling
+ by tracking the time when each insn is committed to the schedule
+ and from that, the time the function units it uses must be free.
+ As insns on the ready list are considered for scheduling, those
+ that would result in a blockage of the already committed insns are
+ queued until no blockage will result. Among the remaining insns on
+ the ready list to be considered, the first one with the largest
+ potential for causing a subsequent blockage is chosen.
+
+ The following list shows the order in which we want to break ties
+ among insns in the ready list:
+
+ 1. choose insn with lowest conflict cost, ties broken by
+ 2. choose insn with the longest path to end of bb, ties broken by
+ 3. choose insn that kills the most registers, ties broken by
+ 4. choose insn that conflicts with the most ready insns, or finally
+ 5. choose insn with lowest UID.
+
+ Memory references complicate matters. Only if we can be certain
+ that memory references are not part of the data dependency graph
+ (via true, anti, or output dependence), can we move operations past
+ memory references. To first approximation, reads can be done
+ independently, while writes introduce dependencies. Better
+ approximations will yield fewer dependencies.
+
+ Dependencies set up by memory references are treated in exactly the
+ same way as other dependencies, by using LOG_LINKS.
+
+ Having optimized the critical path, we may have also unduly
+ extended the lifetimes of some registers. If an operation requires
+ that constants be loaded into registers, it is certainly desirable
+ to load those constants as early as necessary, but no earlier.
+ I.e., it will not do to load up a bunch of registers at the
+ beginning of a basic block only to use them at the end, if they
+ could be loaded later, since this may result in excessive register
+ utilization.
+
+ Note that since branches are never in basic blocks, but only end
+ basic blocks, this pass will not do any branch scheduling. But
+ that is ok, since we can use GNU's delayed branch scheduling
+ pass to take care of this case.
+
+ Also note that no further optimizations based on algebraic identities
+ are performed, so this pass would be a good one to perform instruction
+ splitting, such as breaking up a multiply instruction into shifts
+ and adds where that is profitable.
+
+ Given the memory aliasing analysis that this pass should perform,
+ it should be possible to remove redundant stores to memory, and to
+ load values from registers instead of hitting memory.
+
+ This pass must update information that subsequent passes expect to be
+ correct. Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
+ reg_n_calls_crossed, and reg_live_length. Also, basic_block_head,
+ basic_block_end.
+
+ The information in the line number notes is carefully retained by this
+ pass. All other NOTE insns are grouped in their same relative order at
+ the beginning of basic blocks that have been scheduled. */
+
+#include <stdio.h>
+#include "config.h"
+#include "rtl.h"
+#include "basic-block.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "flags.h"
+#include "insn-config.h"
+#include "insn-attr.h"
+
+#ifdef INSN_SCHEDULING
+/* Arrays set up by scheduling for the same respective purposes as
+ similar-named arrays set up by flow analysis. We work with these
+ arrays during the scheduling pass so we can compare values against
+ unscheduled code.
+
+ Values of these arrays are copied at the end of this pass into the
+ arrays set up by flow analysis. */
+static short *sched_reg_n_deaths;
+static int *sched_reg_n_calls_crossed;
+static int *sched_reg_live_length;
+
+/* Element N is the next insn that sets (hard or pseudo) register
+ N within the current basic block; or zero, if there is no
+ such insn. Needed for new registers which may be introduced
+ by splitting insns. */
+static rtx *reg_last_uses;
+static rtx *reg_last_sets;
+static regset reg_pending_sets;
+static int reg_pending_sets_all;
+
+/* Vector indexed by INSN_UID giving the original ordering of the insns. */
+static int *insn_luid;
+#define INSN_LUID(INSN) (insn_luid[INSN_UID (INSN)])
+
+/* Vector indexed by INSN_UID giving each instruction a priority. */
+static int *insn_priority;
+#define INSN_PRIORITY(INSN) (insn_priority[INSN_UID (INSN)])
+
+static short *insn_costs;
+#define INSN_COST(INSN) insn_costs[INSN_UID (INSN)]
+
+/* Vector indexed by INSN_UID giving an encoding of the function units
+ used. */
+static short *insn_units;
+#define INSN_UNIT(INSN) insn_units[INSN_UID (INSN)]
+
+/* Vector indexed by INSN_UID giving an encoding of the blockage range
+ function. The unit and the range are encoded. */
+static unsigned int *insn_blockage;
+#define INSN_BLOCKAGE(INSN) insn_blockage[INSN_UID (INSN)]
+#define UNIT_BITS 5
+#define BLOCKAGE_MASK ((1 << BLOCKAGE_BITS) - 1)
+#define ENCODE_BLOCKAGE(U,R) \
+ ((((U) << UNIT_BITS) << BLOCKAGE_BITS \
+ | MIN_BLOCKAGE_COST (R)) << BLOCKAGE_BITS \
+ | MAX_BLOCKAGE_COST (R))
+#define UNIT_BLOCKED(B) ((B) >> (2 * BLOCKAGE_BITS))
+#define BLOCKAGE_RANGE(B) \
+ (((((B) >> BLOCKAGE_BITS) & BLOCKAGE_MASK) << (HOST_BITS_PER_INT / 2)) \
+ | (B) & BLOCKAGE_MASK)
+
+/* Encodings of the `<name>_unit_blockage_range' function. */
+#define MIN_BLOCKAGE_COST(R) ((R) >> (HOST_BITS_PER_INT / 2))
+#define MAX_BLOCKAGE_COST(R) ((R) & ((1 << (HOST_BITS_PER_INT / 2)) - 1))
+
+#define DONE_PRIORITY -1
+#define MAX_PRIORITY 0x7fffffff
+#define TAIL_PRIORITY 0x7ffffffe
+#define LAUNCH_PRIORITY 0x7f000001
+#define DONE_PRIORITY_P(INSN) (INSN_PRIORITY (INSN) < 0)
+#define LOW_PRIORITY_P(INSN) ((INSN_PRIORITY (INSN) & 0x7f000000) == 0)
+
+/* Vector indexed by INSN_UID giving number of insns referring to this insn. */
+static int *insn_ref_count;
+#define INSN_REF_COUNT(INSN) (insn_ref_count[INSN_UID (INSN)])
+
+/* Vector indexed by INSN_UID giving line-number note in effect for each
+ insn. For line-number notes, this indicates whether the note may be
+ reused. */
+static rtx *line_note;
+#define LINE_NOTE(INSN) (line_note[INSN_UID (INSN)])
+
+/* Vector indexed by basic block number giving the starting line-number
+ for each basic block. */
+static rtx *line_note_head;
+
+/* List of important notes we must keep around. This is a pointer to the
+ last element in the list. */
+static rtx note_list;
+
+/* Regsets telling whether a given register is live or dead before the last
+ scheduled insn. Must scan the instructions once before scheduling to
+ determine what registers are live or dead at the end of the block. */
+static regset bb_dead_regs;
+static regset bb_live_regs;
+
+/* Regset telling whether a given register is live after the insn currently
+ being scheduled. Before processing an insn, this is equal to bb_live_regs
+ above. This is used so that we can find registers that are newly born/dead
+ after processing an insn. */
+static regset old_live_regs;
+
+/* The chain of REG_DEAD notes. REG_DEAD notes are removed from all insns
+ during the initial scan and reused later. If there are not exactly as
+ many REG_DEAD notes in the post scheduled code as there were in the
+ prescheduled code then we trigger an abort because this indicates a bug. */
+static rtx dead_notes;
+
+/* Queues, etc. */
+
+/* An instruction is ready to be scheduled when all insns following it
+ have already been scheduled. It is important to ensure that all
+ insns which use its result will not be executed until its result
+ has been computed. An insn is maintained in one of four structures:
+
+ (P) the "Pending" set of insns which cannot be scheduled until
+ their dependencies have been satisfied.
+ (Q) the "Queued" set of insns that can be scheduled when sufficient
+ time has passed.
+ (R) the "Ready" list of unscheduled, uncommitted insns.
+ (S) the "Scheduled" list of insns.
+
+ Initially, all insns are either "Pending" or "Ready" depending on
+ whether their dependencies are satisfied.
+
+ Insns move from the "Ready" list to the "Scheduled" list as they
+ are committed to the schedule. As this occurs, the insns in the
+ "Pending" list have their dependencies satisfied and move to either
+ the "Ready" list or the "Queued" set depending on whether
+ sufficient time has passed to make them ready. As time passes,
+ insns move from the "Queued" set to the "Ready" list. Insns may
+ move from the "Ready" list to the "Queued" set if they are blocked
+ due to a function unit conflict.
+
+ The "Pending" list (P) are the insns in the LOG_LINKS of the unscheduled
+ insns, i.e., those that are ready, queued, and pending.
+ The "Queued" set (Q) is implemented by the variable `insn_queue'.
+ The "Ready" list (R) is implemented by the variables `ready' and
+ `n_ready'.
+ The "Scheduled" list (S) is the new insn chain built by this pass.
+
+ The transition (R->S) is implemented in the scheduling loop in
+ `schedule_block' when the best insn to schedule is chosen.
+ The transition (R->Q) is implemented in `schedule_select' when an
+ insn is found to to have a function unit conflict with the already
+ committed insns.
+ The transitions (P->R and P->Q) are implemented in `schedule_insn' as
+ insns move from the ready list to the scheduled list.
+ The transition (Q->R) is implemented at the top of the scheduling
+ loop in `schedule_block' as time passes or stalls are introduced. */
+
+/* Implement a circular buffer to delay instructions until sufficient
+ time has passed. INSN_QUEUE_SIZE is a power of two larger than
+ MAX_BLOCKAGE and MAX_READY_COST computed by genattr.c. This is the
+ longest time an isnsn may be queued. */
+static rtx insn_queue[INSN_QUEUE_SIZE];
+static int q_ptr = 0;
+static int q_size = 0;
+#define NEXT_Q(X) (((X)+1) & (INSN_QUEUE_SIZE-1))
+#define NEXT_Q_AFTER(X,C) (((X)+C) & (INSN_QUEUE_SIZE-1))
+
+/* Vector indexed by INSN_UID giving the minimum clock tick at which
+ the insn becomes ready. This is used to note timing constraints for
+ insns in the pending list. */
+static int *insn_tick;
+#define INSN_TICK(INSN) (insn_tick[INSN_UID (INSN)])
+
+/* Data structure for keeping track of register information
+ during that register's life. */
+
+struct sometimes
+{
+ short offset; short bit;
+ short live_length; short calls_crossed;
+};
+
+/* Forward declarations. */
+static rtx canon_rtx PROTO((rtx));
+static int rtx_equal_for_memref_p PROTO((rtx, rtx));
+static rtx find_symbolic_term PROTO((rtx));
+static int memrefs_conflict_p PROTO((int, rtx, int, rtx,
+ HOST_WIDE_INT));
+static void add_dependence PROTO((rtx, rtx, enum reg_note));
+static void remove_dependence PROTO((rtx, rtx));
+static rtx find_insn_list PROTO((rtx, rtx));
+static int insn_unit PROTO((rtx));
+static unsigned int blockage_range PROTO((int, rtx));
+static void clear_units PROTO((void));
+static void prepare_unit PROTO((int));
+static int actual_hazard_this_instance PROTO((int, int, rtx, int, int));
+static void schedule_unit PROTO((int, rtx, int));
+static int actual_hazard PROTO((int, rtx, int, int));
+static int potential_hazard PROTO((int, rtx, int));
+static int insn_cost PROTO((rtx, rtx, rtx));
+static int priority PROTO((rtx));
+static void free_pending_lists PROTO((void));
+static void add_insn_mem_dependence PROTO((rtx *, rtx *, rtx, rtx));
+static void flush_pending_lists PROTO((rtx));
+static void sched_analyze_1 PROTO((rtx, rtx));
+static void sched_analyze_2 PROTO((rtx, rtx));
+static void sched_analyze_insn PROTO((rtx, rtx));
+static int sched_analyze PROTO((rtx, rtx));
+static void sched_note_set PROTO((int, rtx, int));
+static int rank_for_schedule PROTO((rtx *, rtx *));
+static void swap_sort PROTO((rtx *, int));
+static void queue_insn PROTO((rtx, int));
+static int birthing_insn PROTO((rtx));
+static void adjust_priority PROTO((rtx));
+static int schedule_insn PROTO((rtx, rtx *, int, int));
+static int schedule_select PROTO((rtx *, int, int, FILE *));
+static void create_reg_dead_note PROTO((rtx, rtx));
+static void attach_deaths PROTO((rtx, rtx, int));
+static void attach_deaths_insn PROTO((rtx));
+static rtx unlink_notes PROTO((rtx, rtx));
+static int new_sometimes_live PROTO((struct sometimes *, int, int,
+ int));
+static void finish_sometimes_live PROTO((struct sometimes *, int));
+static void schedule_block PROTO((int, FILE *));
+static rtx regno_use_in PROTO((int, rtx));
+static void split_hard_reg_notes PROTO((rtx, rtx, rtx, rtx));
+static void new_insn_dead_notes PROTO((rtx, rtx, rtx, rtx));
+static void update_n_sets PROTO((rtx, int));
+static void update_flow_info PROTO((rtx, rtx, rtx, rtx));
+
+/* Main entry point of this file. */
+void schedule_insns PROTO((FILE *));
+
+#endif /* INSN_SCHEDULING */
+
+#define SIZE_FOR_MODE(X) (GET_MODE_SIZE (GET_MODE (X)))
+
+/* Vector indexed by N giving the initial (unchanging) value known
+ for pseudo-register N. */
+static rtx *reg_known_value;
+
+/* Vector recording for each reg_known_value whether it is due to a
+ REG_EQUIV note. Future passes (viz., reload) may replace the
+ pseudo with the equivalent expression and so we account for the
+ dependences that would be introduced if that happens. */
+/* ??? This is a problem only on the Convex. The REG_EQUIV notes created in
+ assign_parms mention the arg pointer, and there are explicit insns in the
+ RTL that modify the arg pointer. Thus we must ensure that such insns don't
+ get scheduled across each other because that would invalidate the REG_EQUIV
+ notes. One could argue that the REG_EQUIV notes are wrong, but solving
+ the problem in the scheduler will likely give better code, so we do it
+ here. */
+static char *reg_known_equiv_p;
+
+/* Indicates number of valid entries in reg_known_value. */
+static int reg_known_value_size;
+
+static rtx
+canon_rtx (x)
+ rtx x;
+{
+ if (GET_CODE (x) == REG && REGNO (x) >= FIRST_PSEUDO_REGISTER
+ && REGNO (x) <= reg_known_value_size)
+ return reg_known_value[REGNO (x)];
+ else if (GET_CODE (x) == PLUS)
+ {
+ rtx x0 = canon_rtx (XEXP (x, 0));
+ rtx x1 = canon_rtx (XEXP (x, 1));
+
+ if (x0 != XEXP (x, 0) || x1 != XEXP (x, 1))
+ {
+ /* We can tolerate LO_SUMs being offset here; these
+ rtl are used for nothing other than comparisons. */
+ if (GET_CODE (x0) == CONST_INT)
+ return plus_constant_for_output (x1, INTVAL (x0));
+ else if (GET_CODE (x1) == CONST_INT)
+ return plus_constant_for_output (x0, INTVAL (x1));
+ return gen_rtx (PLUS, GET_MODE (x), x0, x1);
+ }
+ }
+ return x;
+}
+
+/* Set up all info needed to perform alias analysis on memory references. */
+
+void
+init_alias_analysis ()
+{
+ int maxreg = max_reg_num ();
+ rtx insn;
+ rtx note;
+ rtx set;
+
+ reg_known_value_size = maxreg;
+
+ reg_known_value
+ = (rtx *) oballoc ((maxreg-FIRST_PSEUDO_REGISTER) * sizeof (rtx))
+ - FIRST_PSEUDO_REGISTER;
+ bzero ((char *) (reg_known_value + FIRST_PSEUDO_REGISTER),
+ (maxreg-FIRST_PSEUDO_REGISTER) * sizeof (rtx));
+
+ reg_known_equiv_p
+ = (char *) oballoc ((maxreg -FIRST_PSEUDO_REGISTER) * sizeof (char))
+ - FIRST_PSEUDO_REGISTER;
+ bzero (reg_known_equiv_p + FIRST_PSEUDO_REGISTER,
+ (maxreg - FIRST_PSEUDO_REGISTER) * sizeof (char));
+
+ /* Fill in the entries with known constant values. */
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ if ((set = single_set (insn)) != 0
+ && GET_CODE (SET_DEST (set)) == REG
+ && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER
+ && (((note = find_reg_note (insn, REG_EQUAL, 0)) != 0
+ && reg_n_sets[REGNO (SET_DEST (set))] == 1)
+ || (note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) != 0)
+ && GET_CODE (XEXP (note, 0)) != EXPR_LIST)
+ {
+ int regno = REGNO (SET_DEST (set));
+ reg_known_value[regno] = XEXP (note, 0);
+ reg_known_equiv_p[regno] = REG_NOTE_KIND (note) == REG_EQUIV;
+ }
+
+ /* Fill in the remaining entries. */
+ while (--maxreg >= FIRST_PSEUDO_REGISTER)
+ if (reg_known_value[maxreg] == 0)
+ reg_known_value[maxreg] = regno_reg_rtx[maxreg];
+}
+
+/* Return 1 if X and Y are identical-looking rtx's.
+
+ We use the data in reg_known_value above to see if two registers with
+ different numbers are, in fact, equivalent. */
+
+static int
+rtx_equal_for_memref_p (x, y)
+ rtx x, y;
+{
+ register int i;
+ register int j;
+ register enum rtx_code code;
+ register char *fmt;
+
+ if (x == 0 && y == 0)
+ return 1;
+ if (x == 0 || y == 0)
+ return 0;
+ x = canon_rtx (x);
+ y = canon_rtx (y);
+
+ if (x == y)
+ return 1;
+
+ code = GET_CODE (x);
+ /* Rtx's of different codes cannot be equal. */
+ if (code != GET_CODE (y))
+ return 0;
+
+ /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
+ (REG:SI x) and (REG:HI x) are NOT equivalent. */
+
+ if (GET_MODE (x) != GET_MODE (y))
+ return 0;
+
+ /* REG, LABEL_REF, and SYMBOL_REF can be compared nonrecursively. */
+
+ if (code == REG)
+ return REGNO (x) == REGNO (y);
+ if (code == LABEL_REF)
+ return XEXP (x, 0) == XEXP (y, 0);
+ if (code == SYMBOL_REF)
+ return XSTR (x, 0) == XSTR (y, 0);
+
+ /* For commutative operations, the RTX match if the operand match in any
+ order. Also handle the simple binary and unary cases without a loop. */
+ if (code == EQ || code == NE || GET_RTX_CLASS (code) == 'c')
+ return ((rtx_equal_for_memref_p (XEXP (x, 0), XEXP (y, 0))
+ && rtx_equal_for_memref_p (XEXP (x, 1), XEXP (y, 1)))
+ || (rtx_equal_for_memref_p (XEXP (x, 0), XEXP (y, 1))
+ && rtx_equal_for_memref_p (XEXP (x, 1), XEXP (y, 0))));
+ else if (GET_RTX_CLASS (code) == '<' || GET_RTX_CLASS (code) == '2')
+ return (rtx_equal_for_memref_p (XEXP (x, 0), XEXP (y, 0))
+ && rtx_equal_for_memref_p (XEXP (x, 1), XEXP (y, 1)));
+ else if (GET_RTX_CLASS (code) == '1')
+ return rtx_equal_for_memref_p (XEXP (x, 0), XEXP (y, 0));
+
+ /* Compare the elements. If any pair of corresponding elements
+ fail to match, return 0 for the whole things. */
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ switch (fmt[i])
+ {
+ case 'w':
+ if (XWINT (x, i) != XWINT (y, i))
+ return 0;
+ break;
+
+ case 'n':
+ case 'i':
+ if (XINT (x, i) != XINT (y, i))
+ return 0;
+ break;
+
+ case 'V':
+ case 'E':
+ /* Two vectors must have the same length. */
+ if (XVECLEN (x, i) != XVECLEN (y, i))
+ return 0;
+
+ /* And the corresponding elements must match. */
+ for (j = 0; j < XVECLEN (x, i); j++)
+ if (rtx_equal_for_memref_p (XVECEXP (x, i, j), XVECEXP (y, i, j)) == 0)
+ return 0;
+ break;
+
+ case 'e':
+ if (rtx_equal_for_memref_p (XEXP (x, i), XEXP (y, i)) == 0)
+ return 0;
+ break;
+
+ case 'S':
+ case 's':
+ if (strcmp (XSTR (x, i), XSTR (y, i)))
+ return 0;
+ break;
+
+ case 'u':
+ /* These are just backpointers, so they don't matter. */
+ break;
+
+ case '0':
+ break;
+
+ /* It is believed that rtx's at this level will never
+ contain anything but integers and other rtx's,
+ except for within LABEL_REFs and SYMBOL_REFs. */
+ default:
+ abort ();
+ }
+ }
+ return 1;
+}
+
+/* Given an rtx X, find a SYMBOL_REF or LABEL_REF within
+ X and return it, or return 0 if none found. */
+
+static rtx
+find_symbolic_term (x)
+ rtx x;
+{
+ register int i;
+ register enum rtx_code code;
+ register char *fmt;
+
+ code = GET_CODE (x);
+ if (code == SYMBOL_REF || code == LABEL_REF)
+ return x;
+ if (GET_RTX_CLASS (code) == 'o')
+ return 0;
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ rtx t;
+
+ if (fmt[i] == 'e')
+ {
+ t = find_symbolic_term (XEXP (x, i));
+ if (t != 0)
+ return t;
+ }
+ else if (fmt[i] == 'E')
+ break;
+ }
+ return 0;
+}
+
+/* Return nonzero if X and Y (memory addresses) could reference the
+ same location in memory. C is an offset accumulator. When
+ C is nonzero, we are testing aliases between X and Y + C.
+ XSIZE is the size in bytes of the X reference,
+ similarly YSIZE is the size in bytes for Y.
+
+ If XSIZE or YSIZE is zero, we do not know the amount of memory being
+ referenced (the reference was BLKmode), so make the most pessimistic
+ assumptions.
+
+ We recognize the following cases of non-conflicting memory:
+
+ (1) addresses involving the frame pointer cannot conflict
+ with addresses involving static variables.
+ (2) static variables with different addresses cannot conflict.
+
+ Nice to notice that varying addresses cannot conflict with fp if no
+ local variables had their addresses taken, but that's too hard now. */
+
+/* ??? In Fortran, references to a array parameter can never conflict with
+ another array parameter. */
+
+static int
+memrefs_conflict_p (xsize, x, ysize, y, c)
+ rtx x, y;
+ int xsize, ysize;
+ HOST_WIDE_INT c;
+{
+ if (GET_CODE (x) == HIGH)
+ x = XEXP (x, 0);
+ else if (GET_CODE (x) == LO_SUM)
+ x = XEXP (x, 1);
+ else
+ x = canon_rtx (x);
+ if (GET_CODE (y) == HIGH)
+ y = XEXP (y, 0);
+ else if (GET_CODE (y) == LO_SUM)
+ y = XEXP (y, 1);
+ else
+ y = canon_rtx (y);
+
+ if (rtx_equal_for_memref_p (x, y))
+ return (xsize == 0 || ysize == 0 ||
+ (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0));
+
+ if (y == frame_pointer_rtx || y == hard_frame_pointer_rtx
+ || y == stack_pointer_rtx)
+ {
+ rtx t = y;
+ int tsize = ysize;
+ y = x; ysize = xsize;
+ x = t; xsize = tsize;
+ }
+
+ if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
+ || x == stack_pointer_rtx)
+ {
+ rtx y1;
+
+ if (CONSTANT_P (y))
+ return 0;
+
+ if (GET_CODE (y) == PLUS
+ && canon_rtx (XEXP (y, 0)) == x
+ && (y1 = canon_rtx (XEXP (y, 1)))
+ && GET_CODE (y1) == CONST_INT)
+ {
+ c += INTVAL (y1);
+ return (xsize == 0 || ysize == 0
+ || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0));
+ }
+
+ if (GET_CODE (y) == PLUS
+ && (y1 = canon_rtx (XEXP (y, 0)))
+ && CONSTANT_P (y1))
+ return 0;
+
+ return 1;
+ }
+
+ if (GET_CODE (x) == PLUS)
+ {
+ /* The fact that X is canonicalized means that this
+ PLUS rtx is canonicalized. */
+ rtx x0 = XEXP (x, 0);
+ rtx x1 = XEXP (x, 1);
+
+ if (GET_CODE (y) == PLUS)
+ {
+ /* The fact that Y is canonicalized means that this
+ PLUS rtx is canonicalized. */
+ rtx y0 = XEXP (y, 0);
+ rtx y1 = XEXP (y, 1);
+
+ if (rtx_equal_for_memref_p (x1, y1))
+ return memrefs_conflict_p (xsize, x0, ysize, y0, c);
+ if (rtx_equal_for_memref_p (x0, y0))
+ return memrefs_conflict_p (xsize, x1, ysize, y1, c);
+ if (GET_CODE (x1) == CONST_INT)
+ if (GET_CODE (y1) == CONST_INT)
+ return memrefs_conflict_p (xsize, x0, ysize, y0,
+ c - INTVAL (x1) + INTVAL (y1));
+ else
+ return memrefs_conflict_p (xsize, x0, ysize, y, c - INTVAL (x1));
+ else if (GET_CODE (y1) == CONST_INT)
+ return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1));
+
+ /* Handle case where we cannot understand iteration operators,
+ but we notice that the base addresses are distinct objects. */
+ x = find_symbolic_term (x);
+ if (x == 0)
+ return 1;
+ y = find_symbolic_term (y);
+ if (y == 0)
+ return 1;
+ return rtx_equal_for_memref_p (x, y);
+ }
+ else if (GET_CODE (x1) == CONST_INT)
+ return memrefs_conflict_p (xsize, x0, ysize, y, c - INTVAL (x1));
+ }
+ else if (GET_CODE (y) == PLUS)
+ {
+ /* The fact that Y is canonicalized means that this
+ PLUS rtx is canonicalized. */
+ rtx y0 = XEXP (y, 0);
+ rtx y1 = XEXP (y, 1);
+
+ if (GET_CODE (y1) == CONST_INT)
+ return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1));
+ else
+ return 1;
+ }
+
+ if (GET_CODE (x) == GET_CODE (y))
+ switch (GET_CODE (x))
+ {
+ case MULT:
+ {
+ /* Handle cases where we expect the second operands to be the
+ same, and check only whether the first operand would conflict
+ or not. */
+ rtx x0, y0;
+ rtx x1 = canon_rtx (XEXP (x, 1));
+ rtx y1 = canon_rtx (XEXP (y, 1));
+ if (! rtx_equal_for_memref_p (x1, y1))
+ return 1;
+ x0 = canon_rtx (XEXP (x, 0));
+ y0 = canon_rtx (XEXP (y, 0));
+ if (rtx_equal_for_memref_p (x0, y0))
+ return (xsize == 0 || ysize == 0
+ || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0));
+
+ /* Can't properly adjust our sizes. */
+ if (GET_CODE (x1) != CONST_INT)
+ return 1;
+ xsize /= INTVAL (x1);
+ ysize /= INTVAL (x1);
+ c /= INTVAL (x1);
+ return memrefs_conflict_p (xsize, x0, ysize, y0, c);
+ }
+ }
+
+ if (CONSTANT_P (x))
+ {
+ if (GET_CODE (x) == CONST_INT && GET_CODE (y) == CONST_INT)
+ {
+ c += (INTVAL (y) - INTVAL (x));
+ return (xsize == 0 || ysize == 0
+ || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0));
+ }
+
+ if (GET_CODE (x) == CONST)
+ {
+ if (GET_CODE (y) == CONST)
+ return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
+ ysize, canon_rtx (XEXP (y, 0)), c);
+ else
+ return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
+ ysize, y, c);
+ }
+ if (GET_CODE (y) == CONST)
+ return memrefs_conflict_p (xsize, x, ysize,
+ canon_rtx (XEXP (y, 0)), c);
+
+ if (CONSTANT_P (y))
+ return (rtx_equal_for_memref_p (x, y)
+ && (xsize == 0 || ysize == 0
+ || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0)));
+
+ return 1;
+ }
+ return 1;
+}
+
+/* Functions to compute memory dependencies.
+
+ Since we process the insns in execution order, we can build tables
+ to keep track of what registers are fixed (and not aliased), what registers
+ are varying in known ways, and what registers are varying in unknown
+ ways.
+
+ If both memory references are volatile, then there must always be a
+ dependence between the two references, since their order can not be
+ changed. A volatile and non-volatile reference can be interchanged
+ though.
+
+ A MEM_IN_STRUCT reference at a non-QImode varying address can never
+ conflict with a non-MEM_IN_STRUCT reference at a fixed address. We must
+ allow QImode aliasing because the ANSI C standard allows character
+ pointers to alias anything. We are assuming that characters are
+ always QImode here. */
+
+/* Read dependence: X is read after read in MEM takes place. There can
+ only be a dependence here if both reads are volatile. */
+
+int
+read_dependence (mem, x)
+ rtx mem;
+ rtx x;
+{
+ return MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem);
+}
+
+/* True dependence: X is read after store in MEM takes place. */
+
+int
+true_dependence (mem, x)
+ rtx mem;
+ rtx x;
+{
+ /* If X is an unchanging read, then it can't possibly conflict with any
+ non-unchanging store. It may conflict with an unchanging write though,
+ because there may be a single store to this address to initialize it.
+ Just fall through to the code below to resolve the case where we have
+ both an unchanging read and an unchanging write. This won't handle all
+ cases optimally, but the possible performance loss should be
+ negligible. */
+ if (RTX_UNCHANGING_P (x) && ! RTX_UNCHANGING_P (mem))
+ return 0;
+
+ return ((MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
+ || (memrefs_conflict_p (SIZE_FOR_MODE (mem), XEXP (mem, 0),
+ SIZE_FOR_MODE (x), XEXP (x, 0), 0)
+ && ! (MEM_IN_STRUCT_P (mem) && rtx_addr_varies_p (mem)
+ && GET_MODE (mem) != QImode
+ && ! MEM_IN_STRUCT_P (x) && ! rtx_addr_varies_p (x))
+ && ! (MEM_IN_STRUCT_P (x) && rtx_addr_varies_p (x)
+ && GET_MODE (x) != QImode
+ && ! MEM_IN_STRUCT_P (mem) && ! rtx_addr_varies_p (mem))));
+}
+
+/* Anti dependence: X is written after read in MEM takes place. */
+
+int
+anti_dependence (mem, x)
+ rtx mem;
+ rtx x;
+{
+ /* If MEM is an unchanging read, then it can't possibly conflict with
+ the store to X, because there is at most one store to MEM, and it must
+ have occured somewhere before MEM. */
+ if (RTX_UNCHANGING_P (mem))
+ return 0;
+
+ return ((MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
+ || (memrefs_conflict_p (SIZE_FOR_MODE (mem), XEXP (mem, 0),
+ SIZE_FOR_MODE (x), XEXP (x, 0), 0)
+ && ! (MEM_IN_STRUCT_P (mem) && rtx_addr_varies_p (mem)
+ && GET_MODE (mem) != QImode
+ && ! MEM_IN_STRUCT_P (x) && ! rtx_addr_varies_p (x))
+ && ! (MEM_IN_STRUCT_P (x) && rtx_addr_varies_p (x)
+ && GET_MODE (x) != QImode
+ && ! MEM_IN_STRUCT_P (mem) && ! rtx_addr_varies_p (mem))));
+}
+
+/* Output dependence: X is written after store in MEM takes place. */
+
+int
+output_dependence (mem, x)
+ rtx mem;
+ rtx x;
+{
+ return ((MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
+ || (memrefs_conflict_p (SIZE_FOR_MODE (mem), XEXP (mem, 0),
+ SIZE_FOR_MODE (x), XEXP (x, 0), 0)
+ && ! (MEM_IN_STRUCT_P (mem) && rtx_addr_varies_p (mem)
+ && GET_MODE (mem) != QImode
+ && ! MEM_IN_STRUCT_P (x) && ! rtx_addr_varies_p (x))
+ && ! (MEM_IN_STRUCT_P (x) && rtx_addr_varies_p (x)
+ && GET_MODE (x) != QImode
+ && ! MEM_IN_STRUCT_P (mem) && ! rtx_addr_varies_p (mem))));
+}
+
+/* Helper functions for instruction scheduling. */
+
+/* Add ELEM wrapped in an INSN_LIST with reg note kind DEP_TYPE to the
+ LOG_LINKS of INSN, if not already there. DEP_TYPE indicates the type
+ of dependence that this link represents. */
+
+static void
+add_dependence (insn, elem, dep_type)
+ rtx insn;
+ rtx elem;
+ enum reg_note dep_type;
+{
+ rtx link, next;
+
+ /* Don't depend an insn on itself. */
+ if (insn == elem)
+ return;
+
+ /* If elem is part of a sequence that must be scheduled together, then
+ make the dependence point to the last insn of the sequence.
+ When HAVE_cc0, it is possible for NOTEs to exist between users and
+ setters of the condition codes, so we must skip past notes here.
+ Otherwise, NOTEs are impossible here. */
+
+ next = NEXT_INSN (elem);
+
+#ifdef HAVE_cc0
+ while (next && GET_CODE (next) == NOTE)
+ next = NEXT_INSN (next);
+#endif
+
+ if (next && SCHED_GROUP_P (next))
+ {
+ /* Notes will never intervene here though, so don't bother checking
+ for them. */
+ /* We must reject CODE_LABELs, so that we don't get confused by one
+ that has LABEL_PRESERVE_P set, which is represented by the same
+ bit in the rtl as SCHED_GROUP_P. A CODE_LABEL can never be
+ SCHED_GROUP_P. */
+ while (NEXT_INSN (next) && SCHED_GROUP_P (NEXT_INSN (next))
+ && GET_CODE (NEXT_INSN (next)) != CODE_LABEL)
+ next = NEXT_INSN (next);
+
+ /* Again, don't depend an insn on itself. */
+ if (insn == next)
+ return;
+
+ /* Make the dependence to NEXT, the last insn of the group, instead
+ of the original ELEM. */
+ elem = next;
+ }
+
+ /* Check that we don't already have this dependence. */
+ for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
+ if (XEXP (link, 0) == elem)
+ {
+ /* If this is a more restrictive type of dependence than the existing
+ one, then change the existing dependence to this type. */
+ if ((int) dep_type < (int) REG_NOTE_KIND (link))
+ PUT_REG_NOTE_KIND (link, dep_type);
+ return;
+ }
+ /* Might want to check one level of transitivity to save conses. */
+
+ link = rtx_alloc (INSN_LIST);
+ /* Insn dependency, not data dependency. */
+ PUT_REG_NOTE_KIND (link, dep_type);
+ XEXP (link, 0) = elem;
+ XEXP (link, 1) = LOG_LINKS (insn);
+ LOG_LINKS (insn) = link;
+}
+
+/* Remove ELEM wrapped in an INSN_LIST from the LOG_LINKS
+ of INSN. Abort if not found. */
+
+static void
+remove_dependence (insn, elem)
+ rtx insn;
+ rtx elem;
+{
+ rtx prev, link;
+ int found = 0;
+
+ for (prev = 0, link = LOG_LINKS (insn); link;
+ prev = link, link = XEXP (link, 1))
+ {
+ if (XEXP (link, 0) == elem)
+ {
+ if (prev)
+ XEXP (prev, 1) = XEXP (link, 1);
+ else
+ LOG_LINKS (insn) = XEXP (link, 1);
+ found = 1;
+ }
+ }
+
+ if (! found)
+ abort ();
+ return;
+}
+
+#ifndef INSN_SCHEDULING
+void
+schedule_insns (dump_file)
+ FILE *dump_file;
+{
+}
+#else
+#ifndef __GNUC__
+#define __inline
+#endif
+
+/* Computation of memory dependencies. */
+
+/* The *_insns and *_mems are paired lists. Each pending memory operation
+ will have a pointer to the MEM rtx on one list and a pointer to the
+ containing insn on the other list in the same place in the list. */
+
+/* We can't use add_dependence like the old code did, because a single insn
+ may have multiple memory accesses, and hence needs to be on the list
+ once for each memory access. Add_dependence won't let you add an insn
+ to a list more than once. */
+
+/* An INSN_LIST containing all insns with pending read operations. */
+static rtx pending_read_insns;
+
+/* An EXPR_LIST containing all MEM rtx's which are pending reads. */
+static rtx pending_read_mems;
+
+/* An INSN_LIST containing all insns with pending write operations. */
+static rtx pending_write_insns;
+
+/* An EXPR_LIST containing all MEM rtx's which are pending writes. */
+static rtx pending_write_mems;
+
+/* Indicates the combined length of the two pending lists. We must prevent
+ these lists from ever growing too large since the number of dependencies
+ produced is at least O(N*N), and execution time is at least O(4*N*N), as
+ a function of the length of these pending lists. */
+
+static int pending_lists_length;
+
+/* An INSN_LIST containing all INSN_LISTs allocated but currently unused. */
+
+static rtx unused_insn_list;
+
+/* An EXPR_LIST containing all EXPR_LISTs allocated but currently unused. */
+
+static rtx unused_expr_list;
+
+/* The last insn upon which all memory references must depend.
+ This is an insn which flushed the pending lists, creating a dependency
+ between it and all previously pending memory references. This creates
+ a barrier (or a checkpoint) which no memory reference is allowed to cross.
+
+ This includes all non constant CALL_INSNs. When we do interprocedural
+ alias analysis, this restriction can be relaxed.
+ This may also be an INSN that writes memory if the pending lists grow
+ too large. */
+
+static rtx last_pending_memory_flush;
+
+/* The last function call we have seen. All hard regs, and, of course,
+ the last function call, must depend on this. */
+
+static rtx last_function_call;
+
+/* The LOG_LINKS field of this is a list of insns which use a pseudo register
+ that does not already cross a call. We create dependencies between each
+ of those insn and the next call insn, to ensure that they won't cross a call
+ after scheduling is done. */
+
+static rtx sched_before_next_call;
+
+/* Pointer to the last instruction scheduled. Used by rank_for_schedule,
+ so that insns independent of the last scheduled insn will be preferred
+ over dependent instructions. */
+
+static rtx last_scheduled_insn;
+
+/* Process an insn's memory dependencies. There are four kinds of
+ dependencies:
+
+ (0) read dependence: read follows read
+ (1) true dependence: read follows write
+ (2) anti dependence: write follows read
+ (3) output dependence: write follows write
+
+ We are careful to build only dependencies which actually exist, and
+ use transitivity to avoid building too many links. */
+
+/* Return the INSN_LIST containing INSN in LIST, or NULL
+ if LIST does not contain INSN. */
+
+__inline static rtx
+find_insn_list (insn, list)
+ rtx insn;
+ rtx list;
+{
+ while (list)
+ {
+ if (XEXP (list, 0) == insn)
+ return list;
+ list = XEXP (list, 1);
+ }
+ return 0;
+}
+
+/* Compute the function units used by INSN. This caches the value
+ returned by function_units_used. A function unit is encoded as the
+ unit number if the value is non-negative and the compliment of a
+ mask if the value is negative. A function unit index is the
+ non-negative encoding. */
+
+__inline static int
+insn_unit (insn)
+ rtx insn;
+{
+ register int unit = INSN_UNIT (insn);
+
+ if (unit == 0)
+ {
+ recog_memoized (insn);
+
+ /* A USE insn, or something else we don't need to understand.
+ We can't pass these directly to function_units_used because it will
+ trigger a fatal error for unrecognizable insns. */
+ if (INSN_CODE (insn) < 0)
+ unit = -1;
+ else
+ {
+ unit = function_units_used (insn);
+ /* Increment non-negative values so we can cache zero. */
+ if (unit >= 0) unit++;
+ }
+ /* We only cache 16 bits of the result, so if the value is out of
+ range, don't cache it. */
+ if (FUNCTION_UNITS_SIZE < HOST_BITS_PER_SHORT
+ || unit >= 0
+ || (~unit & ((1 << (HOST_BITS_PER_SHORT - 1)) - 1)) == 0)
+ INSN_UNIT (insn) = unit;
+ }
+ return (unit > 0 ? unit - 1 : unit);
+}
+
+/* Compute the blockage range for executing INSN on UNIT. This caches
+ the value returned by the blockage_range_function for the unit.
+ These values are encoded in an int where the upper half gives the
+ minimum value and the lower half gives the maximum value. */
+
+__inline static unsigned int
+blockage_range (unit, insn)
+ int unit;
+ rtx insn;
+{
+ unsigned int blockage = INSN_BLOCKAGE (insn);
+ unsigned int range;
+
+ if (UNIT_BLOCKED (blockage) != unit + 1)
+ {
+ range = function_units[unit].blockage_range_function (insn);
+ /* We only cache the blockage range for one unit and then only if
+ the values fit. */
+ if (HOST_BITS_PER_INT >= UNIT_BITS + 2 * BLOCKAGE_BITS)
+ INSN_BLOCKAGE (insn) = ENCODE_BLOCKAGE (unit + 1, range);
+ }
+ else
+ range = BLOCKAGE_RANGE (blockage);
+
+ return range;
+}
+
+/* A vector indexed by function unit instance giving the last insn to use
+ the unit. The value of the function unit instance index for unit U
+ instance I is (U + I * FUNCTION_UNITS_SIZE). */
+static rtx unit_last_insn[FUNCTION_UNITS_SIZE * MAX_MULTIPLICITY];
+
+/* A vector indexed by function unit instance giving the minimum time when
+ the unit will unblock based on the maximum blockage cost. */
+static int unit_tick[FUNCTION_UNITS_SIZE * MAX_MULTIPLICITY];
+
+/* A vector indexed by function unit number giving the number of insns
+ that remain to use the unit. */
+static int unit_n_insns[FUNCTION_UNITS_SIZE];
+
+/* Reset the function unit state to the null state. */
+
+static void
+clear_units ()
+{
+ bzero ((char *) unit_last_insn, sizeof (unit_last_insn));
+ bzero ((char *) unit_tick, sizeof (unit_tick));
+ bzero ((char *) unit_n_insns, sizeof (unit_n_insns));
+}
+
+/* Record an insn as one that will use the units encoded by UNIT. */
+
+__inline static void
+prepare_unit (unit)
+ int unit;
+{
+ int i;
+
+ if (unit >= 0)
+ unit_n_insns[unit]++;
+ else
+ for (i = 0, unit = ~unit; unit; i++, unit >>= 1)
+ if ((unit & 1) != 0)
+ prepare_unit (i);
+}
+
+/* Return the actual hazard cost of executing INSN on the unit UNIT,
+ instance INSTANCE at time CLOCK if the previous actual hazard cost
+ was COST. */
+
+__inline static int
+actual_hazard_this_instance (unit, instance, insn, clock, cost)
+ int unit, instance, clock, cost;
+ rtx insn;
+{
+ int tick = unit_tick[instance];
+
+ if (tick - clock > cost)
+ {
+ /* The scheduler is operating in reverse, so INSN is the executing
+ insn and the unit's last insn is the candidate insn. We want a
+ more exact measure of the blockage if we execute INSN at CLOCK
+ given when we committed the execution of the unit's last insn.
+
+ The blockage value is given by either the unit's max blockage
+ constant, blockage range function, or blockage function. Use
+ the most exact form for the given unit. */
+
+ if (function_units[unit].blockage_range_function)
+ {
+ if (function_units[unit].blockage_function)
+ tick += (function_units[unit].blockage_function
+ (insn, unit_last_insn[instance])
+ - function_units[unit].max_blockage);
+ else
+ tick += ((int) MAX_BLOCKAGE_COST (blockage_range (unit, insn))
+ - function_units[unit].max_blockage);
+ }
+ if (tick - clock > cost)
+ cost = tick - clock;
+ }
+ return cost;
+}
+
+/* Record INSN as having begun execution on the units encoded by UNIT at
+ time CLOCK. */
+
+__inline static void
+schedule_unit (unit, insn, clock)
+ int unit, clock;
+ rtx insn;
+{
+ int i;
+
+ if (unit >= 0)
+ {
+ int instance = unit;
+#if MAX_MULTIPLICITY > 1
+ /* Find the first free instance of the function unit and use that
+ one. We assume that one is free. */
+ for (i = function_units[unit].multiplicity - 1; i > 0; i--)
+ {
+ if (! actual_hazard_this_instance (unit, instance, insn, clock, 0))
+ break;
+ instance += FUNCTION_UNITS_SIZE;
+ }
+#endif
+ unit_last_insn[instance] = insn;
+ unit_tick[instance] = (clock + function_units[unit].max_blockage);
+ }
+ else
+ for (i = 0, unit = ~unit; unit; i++, unit >>= 1)
+ if ((unit & 1) != 0)
+ schedule_unit (i, insn, clock);
+}
+
+/* Return the actual hazard cost of executing INSN on the units encoded by
+ UNIT at time CLOCK if the previous actual hazard cost was COST. */
+
+__inline static int
+actual_hazard (unit, insn, clock, cost)
+ int unit, clock, cost;
+ rtx insn;
+{
+ int i;
+
+ if (unit >= 0)
+ {
+ /* Find the instance of the function unit with the minimum hazard. */
+ int instance = unit;
+ int best_cost = actual_hazard_this_instance (unit, instance, insn,
+ clock, cost);
+ int this_cost;
+
+#if MAX_MULTIPLICITY > 1
+ if (best_cost > cost)
+ {
+ for (i = function_units[unit].multiplicity - 1; i > 0; i--)
+ {
+ instance += FUNCTION_UNITS_SIZE;
+ this_cost = actual_hazard_this_instance (unit, instance, insn,
+ clock, cost);
+ if (this_cost < best_cost)
+ {
+ best_cost = this_cost;
+ if (this_cost <= cost)
+ break;
+ }
+ }
+ }
+#endif
+ cost = MAX (cost, best_cost);
+ }
+ else
+ for (i = 0, unit = ~unit; unit; i++, unit >>= 1)
+ if ((unit & 1) != 0)
+ cost = actual_hazard (i, insn, clock, cost);
+
+ return cost;
+}
+
+/* Return the potential hazard cost of executing an instruction on the
+ units encoded by UNIT if the previous potential hazard cost was COST.
+ An insn with a large blockage time is chosen in preference to one
+ with a smaller time; an insn that uses a unit that is more likely
+ to be used is chosen in preference to one with a unit that is less
+ used. We are trying to minimize a subsequent actual hazard. */
+
+__inline static int
+potential_hazard (unit, insn, cost)
+ int unit, cost;
+ rtx insn;
+{
+ int i, ncost;
+ unsigned int minb, maxb;
+
+ if (unit >= 0)
+ {
+ minb = maxb = function_units[unit].max_blockage;
+ if (maxb > 1)
+ {
+ if (function_units[unit].blockage_range_function)
+ {
+ maxb = minb = blockage_range (unit, insn);
+ maxb = MAX_BLOCKAGE_COST (maxb);
+ minb = MIN_BLOCKAGE_COST (minb);
+ }
+
+ if (maxb > 1)
+ {
+ /* Make the number of instructions left dominate. Make the
+ minimum delay dominate the maximum delay. If all these
+ are the same, use the unit number to add an arbitrary
+ ordering. Other terms can be added. */
+ ncost = minb * 0x40 + maxb;
+ ncost *= (unit_n_insns[unit] - 1) * 0x1000 + unit;
+ if (ncost > cost)
+ cost = ncost;
+ }
+ }
+ }
+ else
+ for (i = 0, unit = ~unit; unit; i++, unit >>= 1)
+ if ((unit & 1) != 0)
+ cost = potential_hazard (i, insn, cost);
+
+ return cost;
+}
+
+/* Compute cost of executing INSN given the dependence LINK on the insn USED.
+ This is the number of virtual cycles taken between instruction issue and
+ instruction results. */
+
+__inline static int
+insn_cost (insn, link, used)
+ rtx insn, link, used;
+{
+ register int cost = INSN_COST (insn);
+
+ if (cost == 0)
+ {
+ recog_memoized (insn);
+
+ /* A USE insn, or something else we don't need to understand.
+ We can't pass these directly to result_ready_cost because it will
+ trigger a fatal error for unrecognizable insns. */
+ if (INSN_CODE (insn) < 0)
+ {
+ INSN_COST (insn) = 1;
+ return 1;
+ }
+ else
+ {
+ cost = result_ready_cost (insn);
+
+ if (cost < 1)
+ cost = 1;
+
+ INSN_COST (insn) = cost;
+ }
+ }
+
+ /* A USE insn should never require the value used to be computed. This
+ allows the computation of a function's result and parameter values to
+ overlap the return and call. */
+ recog_memoized (used);
+ if (INSN_CODE (used) < 0)
+ LINK_COST_FREE (link) = 1;
+
+ /* If some dependencies vary the cost, compute the adjustment. Most
+ commonly, the adjustment is complete: either the cost is ignored
+ (in the case of an output- or anti-dependence), or the cost is
+ unchanged. These values are cached in the link as LINK_COST_FREE
+ and LINK_COST_ZERO. */
+
+ if (LINK_COST_FREE (link))
+ cost = 1;
+#ifdef ADJUST_COST
+ else if (! LINK_COST_ZERO (link))
+ {
+ int ncost = cost;
+
+ ADJUST_COST (used, link, insn, ncost);
+ if (ncost <= 1)
+ LINK_COST_FREE (link) = ncost = 1;
+ if (cost == ncost)
+ LINK_COST_ZERO (link) = 1;
+ cost = ncost;
+ }
+#endif
+ return cost;
+}
+
+/* Compute the priority number for INSN. */
+
+static int
+priority (insn)
+ rtx insn;
+{
+ if (insn && GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ {
+ int prev_priority;
+ int max_priority;
+ int this_priority = INSN_PRIORITY (insn);
+ rtx prev;
+
+ if (this_priority > 0)
+ return this_priority;
+
+ max_priority = 1;
+
+ /* Nonzero if these insns must be scheduled together. */
+ if (SCHED_GROUP_P (insn))
+ {
+ prev = insn;
+ while (SCHED_GROUP_P (prev))
+ {
+ prev = PREV_INSN (prev);
+ INSN_REF_COUNT (prev) += 1;
+ }
+ }
+
+ for (prev = LOG_LINKS (insn); prev; prev = XEXP (prev, 1))
+ {
+ rtx x = XEXP (prev, 0);
+
+ /* A dependence pointing to a note or deleted insn is always
+ obsolete, because sched_analyze_insn will have created any
+ necessary new dependences which replace it. Notes and deleted
+ insns can be created when instructions are deleted by insn
+ splitting, or by register allocation. */
+ if (GET_CODE (x) == NOTE || INSN_DELETED_P (x))
+ {
+ remove_dependence (insn, x);
+ continue;
+ }
+
+ /* Clear the link cost adjustment bits. */
+ LINK_COST_FREE (prev) = 0;
+#ifdef ADJUST_COST
+ LINK_COST_ZERO (prev) = 0;
+#endif
+
+ /* This priority calculation was chosen because it results in the
+ least instruction movement, and does not hurt the performance
+ of the resulting code compared to the old algorithm.
+ This makes the sched algorithm more stable, which results
+ in better code, because there is less register pressure,
+ cross jumping is more likely to work, and debugging is easier.
+
+ When all instructions have a latency of 1, there is no need to
+ move any instructions. Subtracting one here ensures that in such
+ cases all instructions will end up with a priority of one, and
+ hence no scheduling will be done.
+
+ The original code did not subtract the one, and added the
+ insn_cost of the current instruction to its priority (e.g.
+ move the insn_cost call down to the end). */
+
+ prev_priority = priority (x) + insn_cost (x, prev, insn) - 1;
+
+ if (prev_priority > max_priority)
+ max_priority = prev_priority;
+ INSN_REF_COUNT (x) += 1;
+ }
+
+ prepare_unit (insn_unit (insn));
+ INSN_PRIORITY (insn) = max_priority;
+ return INSN_PRIORITY (insn);
+ }
+ return 0;
+}
+
+/* Remove all INSN_LISTs and EXPR_LISTs from the pending lists and add
+ them to the unused_*_list variables, so that they can be reused. */
+
+static void
+free_pending_lists ()
+{
+ register rtx link, prev_link;
+
+ if (pending_read_insns)
+ {
+ prev_link = pending_read_insns;
+ link = XEXP (prev_link, 1);
+
+ while (link)
+ {
+ prev_link = link;
+ link = XEXP (link, 1);
+ }
+
+ XEXP (prev_link, 1) = unused_insn_list;
+ unused_insn_list = pending_read_insns;
+ pending_read_insns = 0;
+ }
+
+ if (pending_write_insns)
+ {
+ prev_link = pending_write_insns;
+ link = XEXP (prev_link, 1);
+
+ while (link)
+ {
+ prev_link = link;
+ link = XEXP (link, 1);
+ }
+
+ XEXP (prev_link, 1) = unused_insn_list;
+ unused_insn_list = pending_write_insns;
+ pending_write_insns = 0;
+ }
+
+ if (pending_read_mems)
+ {
+ prev_link = pending_read_mems;
+ link = XEXP (prev_link, 1);
+
+ while (link)
+ {
+ prev_link = link;
+ link = XEXP (link, 1);
+ }
+
+ XEXP (prev_link, 1) = unused_expr_list;
+ unused_expr_list = pending_read_mems;
+ pending_read_mems = 0;
+ }
+
+ if (pending_write_mems)
+ {
+ prev_link = pending_write_mems;
+ link = XEXP (prev_link, 1);
+
+ while (link)
+ {
+ prev_link = link;
+ link = XEXP (link, 1);
+ }
+
+ XEXP (prev_link, 1) = unused_expr_list;
+ unused_expr_list = pending_write_mems;
+ pending_write_mems = 0;
+ }
+}
+
+/* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
+ The MEM is a memory reference contained within INSN, which we are saving
+ so that we can do memory aliasing on it. */
+
+static void
+add_insn_mem_dependence (insn_list, mem_list, insn, mem)
+ rtx *insn_list, *mem_list, insn, mem;
+{
+ register rtx link;
+
+ if (unused_insn_list)
+ {
+ link = unused_insn_list;
+ unused_insn_list = XEXP (link, 1);
+ }
+ else
+ link = rtx_alloc (INSN_LIST);
+ XEXP (link, 0) = insn;
+ XEXP (link, 1) = *insn_list;
+ *insn_list = link;
+
+ if (unused_expr_list)
+ {
+ link = unused_expr_list;
+ unused_expr_list = XEXP (link, 1);
+ }
+ else
+ link = rtx_alloc (EXPR_LIST);
+ XEXP (link, 0) = mem;
+ XEXP (link, 1) = *mem_list;
+ *mem_list = link;
+
+ pending_lists_length++;
+}
+
+/* Make a dependency between every memory reference on the pending lists
+ and INSN, thus flushing the pending lists. */
+
+static void
+flush_pending_lists (insn)
+ rtx insn;
+{
+ rtx link;
+
+ while (pending_read_insns)
+ {
+ add_dependence (insn, XEXP (pending_read_insns, 0), REG_DEP_ANTI);
+
+ link = pending_read_insns;
+ pending_read_insns = XEXP (pending_read_insns, 1);
+ XEXP (link, 1) = unused_insn_list;
+ unused_insn_list = link;
+
+ link = pending_read_mems;
+ pending_read_mems = XEXP (pending_read_mems, 1);
+ XEXP (link, 1) = unused_expr_list;
+ unused_expr_list = link;
+ }
+ while (pending_write_insns)
+ {
+ add_dependence (insn, XEXP (pending_write_insns, 0), REG_DEP_ANTI);
+
+ link = pending_write_insns;
+ pending_write_insns = XEXP (pending_write_insns, 1);
+ XEXP (link, 1) = unused_insn_list;
+ unused_insn_list = link;
+
+ link = pending_write_mems;
+ pending_write_mems = XEXP (pending_write_mems, 1);
+ XEXP (link, 1) = unused_expr_list;
+ unused_expr_list = link;
+ }
+ pending_lists_length = 0;
+
+ if (last_pending_memory_flush)
+ add_dependence (insn, last_pending_memory_flush, REG_DEP_ANTI);
+
+ last_pending_memory_flush = insn;
+}
+
+/* Analyze a single SET or CLOBBER rtx, X, creating all dependencies generated
+ by the write to the destination of X, and reads of everything mentioned. */
+
+static void
+sched_analyze_1 (x, insn)
+ rtx x;
+ rtx insn;
+{
+ register int regno;
+ register rtx dest = SET_DEST (x);
+
+ if (dest == 0)
+ return;
+
+ while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
+ || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
+ {
+ if (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
+ {
+ /* The second and third arguments are values read by this insn. */
+ sched_analyze_2 (XEXP (dest, 1), insn);
+ sched_analyze_2 (XEXP (dest, 2), insn);
+ }
+ dest = SUBREG_REG (dest);
+ }
+
+ if (GET_CODE (dest) == REG)
+ {
+ register int i;
+
+ regno = REGNO (dest);
+
+ /* A hard reg in a wide mode may really be multiple registers.
+ If so, mark all of them just like the first. */
+ if (regno < FIRST_PSEUDO_REGISTER)
+ {
+ i = HARD_REGNO_NREGS (regno, GET_MODE (dest));
+ while (--i >= 0)
+ {
+ rtx u;
+
+ for (u = reg_last_uses[regno+i]; u; u = XEXP (u, 1))
+ add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
+ reg_last_uses[regno + i] = 0;
+ if (reg_last_sets[regno + i])
+ add_dependence (insn, reg_last_sets[regno + i],
+ REG_DEP_OUTPUT);
+ reg_pending_sets[(regno + i) / REGSET_ELT_BITS]
+ |= (REGSET_ELT_TYPE) 1 << ((regno + i) % REGSET_ELT_BITS);
+ if ((call_used_regs[i] || global_regs[i])
+ && last_function_call)
+ /* Function calls clobber all call_used regs. */
+ add_dependence (insn, last_function_call, REG_DEP_ANTI);
+ }
+ }
+ else
+ {
+ rtx u;
+
+ for (u = reg_last_uses[regno]; u; u = XEXP (u, 1))
+ add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
+ reg_last_uses[regno] = 0;
+ if (reg_last_sets[regno])
+ add_dependence (insn, reg_last_sets[regno], REG_DEP_OUTPUT);
+ reg_pending_sets[regno / REGSET_ELT_BITS]
+ |= (REGSET_ELT_TYPE) 1 << (regno % REGSET_ELT_BITS);
+
+ /* Pseudos that are REG_EQUIV to something may be replaced
+ by that during reloading. We need only add dependencies for
+ the address in the REG_EQUIV note. */
+ if (! reload_completed
+ && reg_known_equiv_p[regno]
+ && GET_CODE (reg_known_value[regno]) == MEM)
+ sched_analyze_2 (XEXP (reg_known_value[regno], 0), insn);
+
+ /* Don't let it cross a call after scheduling if it doesn't
+ already cross one. */
+ if (reg_n_calls_crossed[regno] == 0 && last_function_call)
+ add_dependence (insn, last_function_call, REG_DEP_ANTI);
+ }
+ }
+ else if (GET_CODE (dest) == MEM)
+ {
+ /* Writing memory. */
+
+ if (pending_lists_length > 32)
+ {
+ /* Flush all pending reads and writes to prevent the pending lists
+ from getting any larger. Insn scheduling runs too slowly when
+ these lists get long. The number 32 was chosen because it
+ seems like a reasonable number. When compiling GCC with itself,
+ this flush occurs 8 times for sparc, and 10 times for m88k using
+ the number 32. */
+ flush_pending_lists (insn);
+ }
+ else
+ {
+ rtx pending, pending_mem;
+
+ pending = pending_read_insns;
+ pending_mem = pending_read_mems;
+ while (pending)
+ {
+ /* If a dependency already exists, don't create a new one. */
+ if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
+ if (anti_dependence (XEXP (pending_mem, 0), dest))
+ add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
+
+ pending = XEXP (pending, 1);
+ pending_mem = XEXP (pending_mem, 1);
+ }
+
+ pending = pending_write_insns;
+ pending_mem = pending_write_mems;
+ while (pending)
+ {
+ /* If a dependency already exists, don't create a new one. */
+ if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
+ if (output_dependence (XEXP (pending_mem, 0), dest))
+ add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
+
+ pending = XEXP (pending, 1);
+ pending_mem = XEXP (pending_mem, 1);
+ }
+
+ if (last_pending_memory_flush)
+ add_dependence (insn, last_pending_memory_flush, REG_DEP_ANTI);
+
+ add_insn_mem_dependence (&pending_write_insns, &pending_write_mems,
+ insn, dest);
+ }
+ sched_analyze_2 (XEXP (dest, 0), insn);
+ }
+
+ /* Analyze reads. */
+ if (GET_CODE (x) == SET)
+ sched_analyze_2 (SET_SRC (x), insn);
+}
+
+/* Analyze the uses of memory and registers in rtx X in INSN. */
+
+static void
+sched_analyze_2 (x, insn)
+ rtx x;
+ rtx insn;
+{
+ register int i;
+ register int j;
+ register enum rtx_code code;
+ register char *fmt;
+
+ if (x == 0)
+ return;
+
+ code = GET_CODE (x);
+
+ switch (code)
+ {
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case SYMBOL_REF:
+ case CONST:
+ case LABEL_REF:
+ /* Ignore constants. Note that we must handle CONST_DOUBLE here
+ because it may have a cc0_rtx in its CONST_DOUBLE_CHAIN field, but
+ this does not mean that this insn is using cc0. */
+ return;
+
+#ifdef HAVE_cc0
+ case CC0:
+ {
+ rtx link, prev;
+
+ /* There may be a note before this insn now, but all notes will
+ be removed before we actually try to schedule the insns, so
+ it won't cause a problem later. We must avoid it here though. */
+
+ /* User of CC0 depends on immediately preceding insn. */
+ SCHED_GROUP_P (insn) = 1;
+
+ /* Make a copy of all dependencies on the immediately previous insn,
+ and add to this insn. This is so that all the dependencies will
+ apply to the group. Remove an explicit dependence on this insn
+ as SCHED_GROUP_P now represents it. */
+
+ prev = PREV_INSN (insn);
+ while (GET_CODE (prev) == NOTE)
+ prev = PREV_INSN (prev);
+
+ if (find_insn_list (prev, LOG_LINKS (insn)))
+ remove_dependence (insn, prev);
+
+ for (link = LOG_LINKS (prev); link; link = XEXP (link, 1))
+ add_dependence (insn, XEXP (link, 0), REG_NOTE_KIND (link));
+
+ return;
+ }
+#endif
+
+ case REG:
+ {
+ int regno = REGNO (x);
+ if (regno < FIRST_PSEUDO_REGISTER)
+ {
+ int i;
+
+ i = HARD_REGNO_NREGS (regno, GET_MODE (x));
+ while (--i >= 0)
+ {
+ reg_last_uses[regno + i]
+ = gen_rtx (INSN_LIST, VOIDmode,
+ insn, reg_last_uses[regno + i]);
+ if (reg_last_sets[regno + i])
+ add_dependence (insn, reg_last_sets[regno + i], 0);
+ if ((call_used_regs[regno + i] || global_regs[regno + i])
+ && last_function_call)
+ /* Function calls clobber all call_used regs. */
+ add_dependence (insn, last_function_call, REG_DEP_ANTI);
+ }
+ }
+ else
+ {
+ reg_last_uses[regno]
+ = gen_rtx (INSN_LIST, VOIDmode, insn, reg_last_uses[regno]);
+ if (reg_last_sets[regno])
+ add_dependence (insn, reg_last_sets[regno], 0);
+
+ /* Pseudos that are REG_EQUIV to something may be replaced
+ by that during reloading. We need only add dependencies for
+ the address in the REG_EQUIV note. */
+ if (! reload_completed
+ && reg_known_equiv_p[regno]
+ && GET_CODE (reg_known_value[regno]) == MEM)
+ sched_analyze_2 (XEXP (reg_known_value[regno], 0), insn);
+
+ /* If the register does not already cross any calls, then add this
+ insn to the sched_before_next_call list so that it will still
+ not cross calls after scheduling. */
+ if (reg_n_calls_crossed[regno] == 0)
+ add_dependence (sched_before_next_call, insn, REG_DEP_ANTI);
+ }
+ return;
+ }
+
+ case MEM:
+ {
+ /* Reading memory. */
+
+ rtx pending, pending_mem;
+
+ pending = pending_read_insns;
+ pending_mem = pending_read_mems;
+ while (pending)
+ {
+ /* If a dependency already exists, don't create a new one. */
+ if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
+ if (read_dependence (XEXP (pending_mem, 0), x))
+ add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
+
+ pending = XEXP (pending, 1);
+ pending_mem = XEXP (pending_mem, 1);
+ }
+
+ pending = pending_write_insns;
+ pending_mem = pending_write_mems;
+ while (pending)
+ {
+ /* If a dependency already exists, don't create a new one. */
+ if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
+ if (true_dependence (XEXP (pending_mem, 0), x))
+ add_dependence (insn, XEXP (pending, 0), 0);
+
+ pending = XEXP (pending, 1);
+ pending_mem = XEXP (pending_mem, 1);
+ }
+ if (last_pending_memory_flush)
+ add_dependence (insn, last_pending_memory_flush, REG_DEP_ANTI);
+
+ /* Always add these dependencies to pending_reads, since
+ this insn may be followed by a write. */
+ add_insn_mem_dependence (&pending_read_insns, &pending_read_mems,
+ insn, x);
+
+ /* Take advantage of tail recursion here. */
+ sched_analyze_2 (XEXP (x, 0), insn);
+ return;
+ }
+
+ case ASM_OPERANDS:
+ case ASM_INPUT:
+ case UNSPEC_VOLATILE:
+ case TRAP_IF:
+ {
+ rtx u;
+
+ /* Traditional and volatile asm instructions must be considered to use
+ and clobber all hard registers, all pseudo-registers and all of
+ memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
+
+ Consider for instance a volatile asm that changes the fpu rounding
+ mode. An insn should not be moved across this even if it only uses
+ pseudo-regs because it might give an incorrectly rounded result. */
+ if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
+ {
+ int max_reg = max_reg_num ();
+ for (i = 0; i < max_reg; i++)
+ {
+ for (u = reg_last_uses[i]; u; u = XEXP (u, 1))
+ add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
+ reg_last_uses[i] = 0;
+ if (reg_last_sets[i])
+ add_dependence (insn, reg_last_sets[i], 0);
+ }
+ reg_pending_sets_all = 1;
+
+ flush_pending_lists (insn);
+ }
+
+ /* For all ASM_OPERANDS, we must traverse the vector of input operands.
+ We can not just fall through here since then we would be confused
+ by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
+ traditional asms unlike their normal usage. */
+
+ if (code == ASM_OPERANDS)
+ {
+ for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
+ sched_analyze_2 (ASM_OPERANDS_INPUT (x, j), insn);
+ return;
+ }
+ break;
+ }
+
+ case PRE_DEC:
+ case POST_DEC:
+ case PRE_INC:
+ case POST_INC:
+ /* These both read and modify the result. We must handle them as writes
+ to get proper dependencies for following instructions. We must handle
+ them as reads to get proper dependencies from this to previous
+ instructions. Thus we need to pass them to both sched_analyze_1
+ and sched_analyze_2. We must call sched_analyze_2 first in order
+ to get the proper antecedent for the read. */
+ sched_analyze_2 (XEXP (x, 0), insn);
+ sched_analyze_1 (x, insn);
+ return;
+ }
+
+ /* Other cases: walk the insn. */
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ sched_analyze_2 (XEXP (x, i), insn);
+ else if (fmt[i] == 'E')
+ for (j = 0; j < XVECLEN (x, i); j++)
+ sched_analyze_2 (XVECEXP (x, i, j), insn);
+ }
+}
+
+/* Analyze an INSN with pattern X to find all dependencies. */
+
+static void
+sched_analyze_insn (x, insn)
+ rtx x, insn;
+{
+ register RTX_CODE code = GET_CODE (x);
+ rtx link;
+ int maxreg = max_reg_num ();
+ int i;
+
+ if (code == SET || code == CLOBBER)
+ sched_analyze_1 (x, insn);
+ else if (code == PARALLEL)
+ {
+ register int i;
+ for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
+ {
+ code = GET_CODE (XVECEXP (x, 0, i));
+ if (code == SET || code == CLOBBER)
+ sched_analyze_1 (XVECEXP (x, 0, i), insn);
+ else
+ sched_analyze_2 (XVECEXP (x, 0, i), insn);
+ }
+ }
+ else
+ sched_analyze_2 (x, insn);
+
+ /* Mark registers CLOBBERED or used by called function. */
+ if (GET_CODE (insn) == CALL_INSN)
+ for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
+ {
+ if (GET_CODE (XEXP (link, 0)) == CLOBBER)
+ sched_analyze_1 (XEXP (link, 0), insn);
+ else
+ sched_analyze_2 (XEXP (link, 0), insn);
+ }
+
+ /* After reload, it is possible for an instruction to have a REG_DEAD note
+ for a register that actually dies a few instructions earlier. For
+ example, this can happen with SECONDARY_MEMORY_NEEDED reloads.
+ In this case, we must consider the insn to use the register mentioned
+ in the REG_DEAD note. Otherwise, we may accidentally move this insn
+ after another insn that sets the register, thus getting obviously invalid
+ rtl. This confuses reorg which believes that REG_DEAD notes are still
+ meaningful.
+
+ ??? We would get better code if we fixed reload to put the REG_DEAD
+ notes in the right places, but that may not be worth the effort. */
+
+ if (reload_completed)
+ {
+ rtx note;
+
+ for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
+ if (REG_NOTE_KIND (note) == REG_DEAD)
+ sched_analyze_2 (XEXP (note, 0), insn);
+ }
+
+ for (i = 0; i < regset_size; i++)
+ {
+ REGSET_ELT_TYPE sets = reg_pending_sets[i];
+ if (sets)
+ {
+ register int bit;
+ for (bit = 0; bit < REGSET_ELT_BITS; bit++)
+ if (sets & ((REGSET_ELT_TYPE) 1 << bit))
+ reg_last_sets[i * REGSET_ELT_BITS + bit] = insn;
+ reg_pending_sets[i] = 0;
+ }
+ }
+ if (reg_pending_sets_all)
+ {
+ for (i = 0; i < maxreg; i++)
+ reg_last_sets[i] = insn;
+ reg_pending_sets_all = 0;
+ }
+
+ /* Handle function calls and function returns created by the epilogue
+ threading code. */
+ if (GET_CODE (insn) == CALL_INSN || GET_CODE (insn) == JUMP_INSN)
+ {
+ rtx dep_insn;
+ rtx prev_dep_insn;
+
+ /* When scheduling instructions, we make sure calls don't lose their
+ accompanying USE insns by depending them one on another in order.
+
+ Also, we must do the same thing for returns created by the epilogue
+ threading code. Note this code works only in this special case,
+ because other passes make no guarantee that they will never emit
+ an instruction between a USE and a RETURN. There is such a guarantee
+ for USE instructions immediately before a call. */
+
+ prev_dep_insn = insn;
+ dep_insn = PREV_INSN (insn);
+ while (GET_CODE (dep_insn) == INSN
+ && GET_CODE (PATTERN (dep_insn)) == USE)
+ {
+ SCHED_GROUP_P (prev_dep_insn) = 1;
+
+ /* Make a copy of all dependencies on dep_insn, and add to insn.
+ This is so that all of the dependencies will apply to the
+ group. */
+
+ for (link = LOG_LINKS (dep_insn); link; link = XEXP (link, 1))
+ add_dependence (insn, XEXP (link, 0), REG_NOTE_KIND (link));
+
+ prev_dep_insn = dep_insn;
+ dep_insn = PREV_INSN (dep_insn);
+ }
+ }
+}
+
+/* Analyze every insn between HEAD and TAIL inclusive, creating LOG_LINKS
+ for every dependency. */
+
+static int
+sched_analyze (head, tail)
+ rtx head, tail;
+{
+ register rtx insn;
+ register int n_insns = 0;
+ register rtx u;
+ register int luid = 0;
+
+ for (insn = head; ; insn = NEXT_INSN (insn))
+ {
+ INSN_LUID (insn) = luid++;
+
+ if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
+ {
+ sched_analyze_insn (PATTERN (insn), insn);
+ n_insns += 1;
+ }
+ else if (GET_CODE (insn) == CALL_INSN)
+ {
+ rtx x;
+ register int i;
+
+ /* Any instruction using a hard register which may get clobbered
+ by a call needs to be marked as dependent on this call.
+ This prevents a use of a hard return reg from being moved
+ past a void call (i.e. it does not explicitly set the hard
+ return reg). */
+
+ /* If this call is followed by a NOTE_INSN_SETJMP, then assume that
+ all registers, not just hard registers, may be clobbered by this
+ call. */
+
+ /* Insn, being a CALL_INSN, magically depends on
+ `last_function_call' already. */
+
+ if (NEXT_INSN (insn) && GET_CODE (NEXT_INSN (insn)) == NOTE
+ && NOTE_LINE_NUMBER (NEXT_INSN (insn)) == NOTE_INSN_SETJMP)
+ {
+ int max_reg = max_reg_num ();
+ for (i = 0; i < max_reg; i++)
+ {
+ for (u = reg_last_uses[i]; u; u = XEXP (u, 1))
+ add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
+ reg_last_uses[i] = 0;
+ if (reg_last_sets[i])
+ add_dependence (insn, reg_last_sets[i], 0);
+ }
+ reg_pending_sets_all = 1;
+
+ /* Add a fake REG_NOTE which we will later convert
+ back into a NOTE_INSN_SETJMP note. */
+ REG_NOTES (insn) = gen_rtx (EXPR_LIST, REG_DEAD, constm1_rtx,
+ REG_NOTES (insn));
+ }
+ else
+ {
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (call_used_regs[i] || global_regs[i])
+ {
+ for (u = reg_last_uses[i]; u; u = XEXP (u, 1))
+ add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
+ reg_last_uses[i] = 0;
+ if (reg_last_sets[i])
+ add_dependence (insn, reg_last_sets[i], REG_DEP_ANTI);
+ reg_pending_sets[i / REGSET_ELT_BITS]
+ |= (REGSET_ELT_TYPE) 1 << (i % REGSET_ELT_BITS);
+ }
+ }
+
+ /* For each insn which shouldn't cross a call, add a dependence
+ between that insn and this call insn. */
+ x = LOG_LINKS (sched_before_next_call);
+ while (x)
+ {
+ add_dependence (insn, XEXP (x, 0), REG_DEP_ANTI);
+ x = XEXP (x, 1);
+ }
+ LOG_LINKS (sched_before_next_call) = 0;
+
+ sched_analyze_insn (PATTERN (insn), insn);
+
+ /* We don't need to flush memory for a function call which does
+ not involve memory. */
+ if (! CONST_CALL_P (insn))
+ {
+ /* In the absence of interprocedural alias analysis,
+ we must flush all pending reads and writes, and
+ start new dependencies starting from here. */
+ flush_pending_lists (insn);
+ }
+
+ /* Depend this function call (actually, the user of this
+ function call) on all hard register clobberage. */
+ last_function_call = insn;
+ n_insns += 1;
+ }
+
+ if (insn == tail)
+ return n_insns;
+ }
+}
+
+/* Called when we see a set of a register. If death is true, then we are
+ scanning backwards. Mark that register as unborn. If nobody says
+ otherwise, that is how things will remain. If death is false, then we
+ are scanning forwards. Mark that register as being born. */
+
+static void
+sched_note_set (b, x, death)
+ int b;
+ rtx x;
+ int death;
+{
+ register int regno;
+ register rtx reg = SET_DEST (x);
+ int subreg_p = 0;
+
+ if (reg == 0)
+ return;
+
+ while (GET_CODE (reg) == SUBREG || GET_CODE (reg) == STRICT_LOW_PART
+ || GET_CODE (reg) == SIGN_EXTRACT || GET_CODE (reg) == ZERO_EXTRACT)
+ {
+ /* Must treat modification of just one hardware register of a multi-reg
+ value or just a byte field of a register exactly the same way that
+ mark_set_1 in flow.c does, i.e. anything except a paradoxical subreg
+ does not kill the entire register. */
+ if (GET_CODE (reg) != SUBREG
+ || REG_SIZE (SUBREG_REG (reg)) > REG_SIZE (reg))
+ subreg_p = 1;
+
+ reg = SUBREG_REG (reg);
+ }
+
+ if (GET_CODE (reg) != REG)
+ return;
+
+ /* Global registers are always live, so the code below does not apply
+ to them. */
+
+ regno = REGNO (reg);
+ if (regno >= FIRST_PSEUDO_REGISTER || ! global_regs[regno])
+ {
+ register int offset = regno / REGSET_ELT_BITS;
+ register REGSET_ELT_TYPE bit
+ = (REGSET_ELT_TYPE) 1 << (regno % REGSET_ELT_BITS);
+
+ if (death)
+ {
+ /* If we only set part of the register, then this set does not
+ kill it. */
+ if (subreg_p)
+ return;
+
+ /* Try killing this register. */
+ if (regno < FIRST_PSEUDO_REGISTER)
+ {
+ int j = HARD_REGNO_NREGS (regno, GET_MODE (reg));
+ while (--j >= 0)
+ {
+ offset = (regno + j) / REGSET_ELT_BITS;
+ bit = (REGSET_ELT_TYPE) 1 << ((regno + j) % REGSET_ELT_BITS);
+
+ bb_live_regs[offset] &= ~bit;
+ bb_dead_regs[offset] |= bit;
+ }
+ }
+ else
+ {
+ bb_live_regs[offset] &= ~bit;
+ bb_dead_regs[offset] |= bit;
+ }
+ }
+ else
+ {
+ /* Make the register live again. */
+ if (regno < FIRST_PSEUDO_REGISTER)
+ {
+ int j = HARD_REGNO_NREGS (regno, GET_MODE (reg));
+ while (--j >= 0)
+ {
+ offset = (regno + j) / REGSET_ELT_BITS;
+ bit = (REGSET_ELT_TYPE) 1 << ((regno + j) % REGSET_ELT_BITS);
+
+ bb_live_regs[offset] |= bit;
+ bb_dead_regs[offset] &= ~bit;
+ }
+ }
+ else
+ {
+ bb_live_regs[offset] |= bit;
+ bb_dead_regs[offset] &= ~bit;
+ }
+ }
+ }
+}
+
+/* Macros and functions for keeping the priority queue sorted, and
+ dealing with queueing and unqueueing of instructions. */
+
+#define SCHED_SORT(READY, NEW_READY, OLD_READY) \
+ do { if ((NEW_READY) - (OLD_READY) == 1) \
+ swap_sort (READY, NEW_READY); \
+ else if ((NEW_READY) - (OLD_READY) > 1) \
+ qsort (READY, NEW_READY, sizeof (rtx), rank_for_schedule); } \
+ while (0)
+
+/* Returns a positive value if y is preferred; returns a negative value if
+ x is preferred. Should never return 0, since that will make the sort
+ unstable. */
+
+static int
+rank_for_schedule (x, y)
+ rtx *x, *y;
+{
+ rtx tmp = *y;
+ rtx tmp2 = *x;
+ rtx link;
+ int tmp_class, tmp2_class;
+ int value;
+
+ /* Choose the instruction with the highest priority, if different. */
+ if (value = INSN_PRIORITY (tmp) - INSN_PRIORITY (tmp2))
+ return value;
+
+ if (last_scheduled_insn)
+ {
+ /* Classify the instructions into three classes:
+ 1) Data dependent on last schedule insn.
+ 2) Anti/Output dependent on last scheduled insn.
+ 3) Independent of last scheduled insn, or has latency of one.
+ Choose the insn from the highest numbered class if different. */
+ link = find_insn_list (tmp, LOG_LINKS (last_scheduled_insn));
+ if (link == 0 || insn_cost (tmp, link, last_scheduled_insn) == 1)
+ tmp_class = 3;
+ else if (REG_NOTE_KIND (link) == 0) /* Data dependence. */
+ tmp_class = 1;
+ else
+ tmp_class = 2;
+
+ link = find_insn_list (tmp2, LOG_LINKS (last_scheduled_insn));
+ if (link == 0 || insn_cost (tmp2, link, last_scheduled_insn) == 1)
+ tmp2_class = 3;
+ else if (REG_NOTE_KIND (link) == 0) /* Data dependence. */
+ tmp2_class = 1;
+ else
+ tmp2_class = 2;
+
+ if (value = tmp_class - tmp2_class)
+ return value;
+ }
+
+ /* If insns are equally good, sort by INSN_LUID (original insn order),
+ so that we make the sort stable. This minimizes instruction movement,
+ thus minimizing sched's effect on debugging and cross-jumping. */
+ return INSN_LUID (tmp) - INSN_LUID (tmp2);
+}
+
+/* Resort the array A in which only element at index N may be out of order. */
+
+__inline static void
+swap_sort (a, n)
+ rtx *a;
+ int n;
+{
+ rtx insn = a[n-1];
+ int i = n-2;
+
+ while (i >= 0 && rank_for_schedule (a+i, &insn) >= 0)
+ {
+ a[i+1] = a[i];
+ i -= 1;
+ }
+ a[i+1] = insn;
+}
+
+static int max_priority;
+
+/* Add INSN to the insn queue so that it fires at least N_CYCLES
+ before the currently executing insn. */
+
+__inline static void
+queue_insn (insn, n_cycles)
+ rtx insn;
+ int n_cycles;
+{
+ int next_q = NEXT_Q_AFTER (q_ptr, n_cycles);
+ NEXT_INSN (insn) = insn_queue[next_q];
+ insn_queue[next_q] = insn;
+ q_size += 1;
+}
+
+/* Return nonzero if PAT is the pattern of an insn which makes a
+ register live. */
+
+__inline static int
+birthing_insn_p (pat)
+ rtx pat;
+{
+ int j;
+
+ if (reload_completed == 1)
+ return 0;
+
+ if (GET_CODE (pat) == SET
+ && GET_CODE (SET_DEST (pat)) == REG)
+ {
+ rtx dest = SET_DEST (pat);
+ int i = REGNO (dest);
+ int offset = i / REGSET_ELT_BITS;
+ REGSET_ELT_TYPE bit = (REGSET_ELT_TYPE) 1 << (i % REGSET_ELT_BITS);
+
+ /* It would be more accurate to use refers_to_regno_p or
+ reg_mentioned_p to determine when the dest is not live before this
+ insn. */
+
+ if (bb_live_regs[offset] & bit)
+ return (reg_n_sets[i] == 1);
+
+ return 0;
+ }
+ if (GET_CODE (pat) == PARALLEL)
+ {
+ for (j = 0; j < XVECLEN (pat, 0); j++)
+ if (birthing_insn_p (XVECEXP (pat, 0, j)))
+ return 1;
+ }
+ return 0;
+}
+
+/* PREV is an insn that is ready to execute. Adjust its priority if that
+ will help shorten register lifetimes. */
+
+__inline static void
+adjust_priority (prev)
+ rtx prev;
+{
+ /* Trying to shorten register lives after reload has completed
+ is useless and wrong. It gives inaccurate schedules. */
+ if (reload_completed == 0)
+ {
+ rtx note;
+ int n_deaths = 0;
+
+ /* ??? This code has no effect, because REG_DEAD notes are removed
+ before we ever get here. */
+ for (note = REG_NOTES (prev); note; note = XEXP (note, 1))
+ if (REG_NOTE_KIND (note) == REG_DEAD)
+ n_deaths += 1;
+
+ /* Defer scheduling insns which kill registers, since that
+ shortens register lives. Prefer scheduling insns which
+ make registers live for the same reason. */
+ switch (n_deaths)
+ {
+ default:
+ INSN_PRIORITY (prev) >>= 3;
+ break;
+ case 3:
+ INSN_PRIORITY (prev) >>= 2;
+ break;
+ case 2:
+ case 1:
+ INSN_PRIORITY (prev) >>= 1;
+ break;
+ case 0:
+ if (birthing_insn_p (PATTERN (prev)))
+ {
+ int max = max_priority;
+
+ if (max > INSN_PRIORITY (prev))
+ INSN_PRIORITY (prev) = max;
+ }
+ break;
+ }
+ }
+}
+
+/* INSN is the "currently executing insn". Launch each insn which was
+ waiting on INSN (in the backwards dataflow sense). READY is a
+ vector of insns which are ready to fire. N_READY is the number of
+ elements in READY. CLOCK is the current virtual cycle. */
+
+static int
+schedule_insn (insn, ready, n_ready, clock)
+ rtx insn;
+ rtx *ready;
+ int n_ready;
+ int clock;
+{
+ rtx link;
+ int new_ready = n_ready;
+
+ if (MAX_BLOCKAGE > 1)
+ schedule_unit (insn_unit (insn), insn, clock);
+
+ if (LOG_LINKS (insn) == 0)
+ return n_ready;
+
+ /* This is used by the function adjust_priority above. */
+ if (n_ready > 0)
+ max_priority = MAX (INSN_PRIORITY (ready[0]), INSN_PRIORITY (insn));
+ else
+ max_priority = INSN_PRIORITY (insn);
+
+ for (link = LOG_LINKS (insn); link != 0; link = XEXP (link, 1))
+ {
+ rtx prev = XEXP (link, 0);
+ int cost = insn_cost (prev, link, insn);
+
+ if ((INSN_REF_COUNT (prev) -= 1) != 0)
+ {
+ /* We satisfied one requirement to fire PREV. Record the earliest
+ time when PREV can fire. No need to do this if the cost is 1,
+ because PREV can fire no sooner than the next cycle. */
+ if (cost > 1)
+ INSN_TICK (prev) = MAX (INSN_TICK (prev), clock + cost);
+ }
+ else
+ {
+ /* We satisfied the last requirement to fire PREV. Ensure that all
+ timing requirements are satisfied. */
+ if (INSN_TICK (prev) - clock > cost)
+ cost = INSN_TICK (prev) - clock;
+
+ /* Adjust the priority of PREV and either put it on the ready
+ list or queue it. */
+ adjust_priority (prev);
+ if (cost <= 1)
+ ready[new_ready++] = prev;
+ else
+ queue_insn (prev, cost);
+ }
+ }
+
+ return new_ready;
+}
+
+/* Given N_READY insns in the ready list READY at time CLOCK, queue
+ those that are blocked due to function unit hazards and rearrange
+ the remaining ones to minimize subsequent function unit hazards. */
+
+static int
+schedule_select (ready, n_ready, clock, file)
+ rtx *ready;
+ int n_ready, clock;
+ FILE *file;
+{
+ int pri = INSN_PRIORITY (ready[0]);
+ int i, j, k, q, cost, best_cost, best_insn = 0, new_ready = n_ready;
+ rtx insn;
+
+ /* Work down the ready list in groups of instructions with the same
+ priority value. Queue insns in the group that are blocked and
+ select among those that remain for the one with the largest
+ potential hazard. */
+ for (i = 0; i < n_ready; i = j)
+ {
+ int opri = pri;
+ for (j = i + 1; j < n_ready; j++)
+ if ((pri = INSN_PRIORITY (ready[j])) != opri)
+ break;
+
+ /* Queue insns in the group that are blocked. */
+ for (k = i, q = 0; k < j; k++)
+ {
+ insn = ready[k];
+ if ((cost = actual_hazard (insn_unit (insn), insn, clock, 0)) != 0)
+ {
+ q++;
+ ready[k] = 0;
+ queue_insn (insn, cost);
+ if (file)
+ fprintf (file, "\n;; blocking insn %d for %d cycles",
+ INSN_UID (insn), cost);
+ }
+ }
+ new_ready -= q;
+
+ /* Check the next group if all insns were queued. */
+ if (j - i - q == 0)
+ continue;
+
+ /* If more than one remains, select the first one with the largest
+ potential hazard. */
+ else if (j - i - q > 1)
+ {
+ best_cost = -1;
+ for (k = i; k < j; k++)
+ {
+ if ((insn = ready[k]) == 0)
+ continue;
+ if ((cost = potential_hazard (insn_unit (insn), insn, 0))
+ > best_cost)
+ {
+ best_cost = cost;
+ best_insn = k;
+ }
+ }
+ }
+ /* We have found a suitable insn to schedule. */
+ break;
+ }
+
+ /* Move the best insn to be front of the ready list. */
+ if (best_insn != 0)
+ {
+ if (file)
+ {
+ fprintf (file, ", now");
+ for (i = 0; i < n_ready; i++)
+ if (ready[i])
+ fprintf (file, " %d", INSN_UID (ready[i]));
+ fprintf (file, "\n;; insn %d has a greater potential hazard",
+ INSN_UID (ready[best_insn]));
+ }
+ for (i = best_insn; i > 0; i--)
+ {
+ insn = ready[i-1];
+ ready[i-1] = ready[i];
+ ready[i] = insn;
+ }
+ }
+
+ /* Compact the ready list. */
+ if (new_ready < n_ready)
+ for (i = j = 0; i < n_ready; i++)
+ if (ready[i])
+ ready[j++] = ready[i];
+
+ return new_ready;
+}
+
+/* Add a REG_DEAD note for REG to INSN, reusing a REG_DEAD note from the
+ dead_notes list. */
+
+static void
+create_reg_dead_note (reg, insn)
+ rtx reg, insn;
+{
+ rtx link;
+
+ /* The number of registers killed after scheduling must be the same as the
+ number of registers killed before scheduling. The number of REG_DEAD
+ notes may not be conserved, i.e. two SImode hard register REG_DEAD notes
+ might become one DImode hard register REG_DEAD note, but the number of
+ registers killed will be conserved.
+
+ We carefully remove REG_DEAD notes from the dead_notes list, so that
+ there will be none left at the end. If we run out early, then there
+ is a bug somewhere in flow, combine and/or sched. */
+
+ if (dead_notes == 0)
+ {
+#if 1
+ abort ();
+#else
+ link = rtx_alloc (EXPR_LIST);
+ PUT_REG_NOTE_KIND (link, REG_DEAD);
+#endif
+ }
+ else
+ {
+ /* Number of regs killed by REG. */
+ int regs_killed = (REGNO (reg) >= FIRST_PSEUDO_REGISTER ? 1
+ : HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg)));
+ /* Number of regs killed by REG_DEAD notes taken off the list. */
+ int reg_note_regs;
+
+ link = dead_notes;
+ reg_note_regs = (REGNO (XEXP (link, 0)) >= FIRST_PSEUDO_REGISTER ? 1
+ : HARD_REGNO_NREGS (REGNO (XEXP (link, 0)),
+ GET_MODE (XEXP (link, 0))));
+ while (reg_note_regs < regs_killed)
+ {
+ link = XEXP (link, 1);
+ reg_note_regs += (REGNO (XEXP (link, 0)) >= FIRST_PSEUDO_REGISTER ? 1
+ : HARD_REGNO_NREGS (REGNO (XEXP (link, 0)),
+ GET_MODE (XEXP (link, 0))));
+ }
+ dead_notes = XEXP (link, 1);
+
+ /* If we took too many regs kills off, put the extra ones back. */
+ while (reg_note_regs > regs_killed)
+ {
+ rtx temp_reg, temp_link;
+
+ temp_reg = gen_rtx (REG, word_mode, 0);
+ temp_link = rtx_alloc (EXPR_LIST);
+ PUT_REG_NOTE_KIND (temp_link, REG_DEAD);
+ XEXP (temp_link, 0) = temp_reg;
+ XEXP (temp_link, 1) = dead_notes;
+ dead_notes = temp_link;
+ reg_note_regs--;
+ }
+ }
+
+ XEXP (link, 0) = reg;
+ XEXP (link, 1) = REG_NOTES (insn);
+ REG_NOTES (insn) = link;
+}
+
+/* Subroutine on attach_deaths_insn--handles the recursive search
+ through INSN. If SET_P is true, then x is being modified by the insn. */
+
+static void
+attach_deaths (x, insn, set_p)
+ rtx x;
+ rtx insn;
+ int set_p;
+{
+ register int i;
+ register int j;
+ register enum rtx_code code;
+ register char *fmt;
+
+ if (x == 0)
+ return;
+
+ code = GET_CODE (x);
+
+ switch (code)
+ {
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case LABEL_REF:
+ case SYMBOL_REF:
+ case CONST:
+ case CODE_LABEL:
+ case PC:
+ case CC0:
+ /* Get rid of the easy cases first. */
+ return;
+
+ case REG:
+ {
+ /* If the register dies in this insn, queue that note, and mark
+ this register as needing to die. */
+ /* This code is very similar to mark_used_1 (if set_p is false)
+ and mark_set_1 (if set_p is true) in flow.c. */
+
+ register int regno = REGNO (x);
+ register int offset = regno / REGSET_ELT_BITS;
+ register REGSET_ELT_TYPE bit
+ = (REGSET_ELT_TYPE) 1 << (regno % REGSET_ELT_BITS);
+ REGSET_ELT_TYPE all_needed = (old_live_regs[offset] & bit);
+ REGSET_ELT_TYPE some_needed = (old_live_regs[offset] & bit);
+
+ if (set_p)
+ return;
+
+ if (regno < FIRST_PSEUDO_REGISTER)
+ {
+ int n;
+
+ n = HARD_REGNO_NREGS (regno, GET_MODE (x));
+ while (--n > 0)
+ {
+ some_needed |= (old_live_regs[(regno + n) / REGSET_ELT_BITS]
+ & ((REGSET_ELT_TYPE) 1
+ << ((regno + n) % REGSET_ELT_BITS)));
+ all_needed &= (old_live_regs[(regno + n) / REGSET_ELT_BITS]
+ & ((REGSET_ELT_TYPE) 1
+ << ((regno + n) % REGSET_ELT_BITS)));
+ }
+ }
+
+ /* If it wasn't live before we started, then add a REG_DEAD note.
+ We must check the previous lifetime info not the current info,
+ because we may have to execute this code several times, e.g.
+ once for a clobber (which doesn't add a note) and later
+ for a use (which does add a note).
+
+ Always make the register live. We must do this even if it was
+ live before, because this may be an insn which sets and uses
+ the same register, in which case the register has already been
+ killed, so we must make it live again.
+
+ Global registers are always live, and should never have a REG_DEAD
+ note added for them, so none of the code below applies to them. */
+
+ if (regno >= FIRST_PSEUDO_REGISTER || ! global_regs[regno])
+ {
+ /* Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the
+ STACK_POINTER_REGNUM, since these are always considered to be
+ live. Similarly for ARG_POINTER_REGNUM if it is fixed. */
+ if (regno != FRAME_POINTER_REGNUM
+#if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
+ && ! (regno == HARD_FRAME_POINTER_REGNUM)
+#endif
+#if ARG_POINTER_REGNUM != FRAME_POINTER_REGNUM
+ && ! (regno == ARG_POINTER_REGNUM && fixed_regs[regno])
+#endif
+ && regno != STACK_POINTER_REGNUM)
+ {
+ if (! all_needed && ! dead_or_set_p (insn, x))
+ {
+ /* If none of the words in X is needed, make a REG_DEAD
+ note. Otherwise, we must make partial REG_DEAD
+ notes. */
+ if (! some_needed)
+ create_reg_dead_note (x, insn);
+ else
+ {
+ int i;
+
+ /* Don't make a REG_DEAD note for a part of a
+ register that is set in the insn. */
+ for (i = HARD_REGNO_NREGS (regno, GET_MODE (x)) - 1;
+ i >= 0; i--)
+ if ((old_live_regs[(regno + i) / REGSET_ELT_BITS]
+ & ((REGSET_ELT_TYPE) 1
+ << ((regno +i) % REGSET_ELT_BITS))) == 0
+ && ! dead_or_set_regno_p (insn, regno + i))
+ create_reg_dead_note (gen_rtx (REG,
+ reg_raw_mode[regno + i],
+ regno + i),
+ insn);
+ }
+ }
+ }
+
+ if (regno < FIRST_PSEUDO_REGISTER)
+ {
+ int j = HARD_REGNO_NREGS (regno, GET_MODE (x));
+ while (--j >= 0)
+ {
+ offset = (regno + j) / REGSET_ELT_BITS;
+ bit
+ = (REGSET_ELT_TYPE) 1 << ((regno + j) % REGSET_ELT_BITS);
+
+ bb_dead_regs[offset] &= ~bit;
+ bb_live_regs[offset] |= bit;
+ }
+ }
+ else
+ {
+ bb_dead_regs[offset] &= ~bit;
+ bb_live_regs[offset] |= bit;
+ }
+ }
+ return;
+ }
+
+ case MEM:
+ /* Handle tail-recursive case. */
+ attach_deaths (XEXP (x, 0), insn, 0);
+ return;
+
+ case SUBREG:
+ case STRICT_LOW_PART:
+ /* These two cases preserve the value of SET_P, so handle them
+ separately. */
+ attach_deaths (XEXP (x, 0), insn, set_p);
+ return;
+
+ case ZERO_EXTRACT:
+ case SIGN_EXTRACT:
+ /* This case preserves the value of SET_P for the first operand, but
+ clears it for the other two. */
+ attach_deaths (XEXP (x, 0), insn, set_p);
+ attach_deaths (XEXP (x, 1), insn, 0);
+ attach_deaths (XEXP (x, 2), insn, 0);
+ return;
+
+ default:
+ /* Other cases: walk the insn. */
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ attach_deaths (XEXP (x, i), insn, 0);
+ else if (fmt[i] == 'E')
+ for (j = 0; j < XVECLEN (x, i); j++)
+ attach_deaths (XVECEXP (x, i, j), insn, 0);
+ }
+ }
+}
+
+/* After INSN has executed, add register death notes for each register
+ that is dead after INSN. */
+
+static void
+attach_deaths_insn (insn)
+ rtx insn;
+{
+ rtx x = PATTERN (insn);
+ register RTX_CODE code = GET_CODE (x);
+ rtx link;
+
+ if (code == SET)
+ {
+ attach_deaths (SET_SRC (x), insn, 0);
+
+ /* A register might die here even if it is the destination, e.g.
+ it is the target of a volatile read and is otherwise unused.
+ Hence we must always call attach_deaths for the SET_DEST. */
+ attach_deaths (SET_DEST (x), insn, 1);
+ }
+ else if (code == PARALLEL)
+ {
+ register int i;
+ for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
+ {
+ code = GET_CODE (XVECEXP (x, 0, i));
+ if (code == SET)
+ {
+ attach_deaths (SET_SRC (XVECEXP (x, 0, i)), insn, 0);
+
+ attach_deaths (SET_DEST (XVECEXP (x, 0, i)), insn, 1);
+ }
+ /* Flow does not add REG_DEAD notes to registers that die in
+ clobbers, so we can't either. */
+ else if (code != CLOBBER)
+ attach_deaths (XVECEXP (x, 0, i), insn, 0);
+ }
+ }
+ /* If this is a CLOBBER, only add REG_DEAD notes to registers inside a
+ MEM being clobbered, just like flow. */
+ else if (code == CLOBBER && GET_CODE (XEXP (x, 0)) == MEM)
+ attach_deaths (XEXP (XEXP (x, 0), 0), insn, 0);
+ /* Otherwise don't add a death note to things being clobbered. */
+ else if (code != CLOBBER)
+ attach_deaths (x, insn, 0);
+
+ /* Make death notes for things used in the called function. */
+ if (GET_CODE (insn) == CALL_INSN)
+ for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
+ attach_deaths (XEXP (XEXP (link, 0), 0), insn,
+ GET_CODE (XEXP (link, 0)) == CLOBBER);
+}
+
+/* Delete notes beginning with INSN and maybe put them in the chain
+ of notes ended by NOTE_LIST.
+ Returns the insn following the notes. */
+
+static rtx
+unlink_notes (insn, tail)
+ rtx insn, tail;
+{
+ rtx prev = PREV_INSN (insn);
+
+ while (insn != tail && GET_CODE (insn) == NOTE)
+ {
+ rtx next = NEXT_INSN (insn);
+ /* Delete the note from its current position. */
+ if (prev)
+ NEXT_INSN (prev) = next;
+ if (next)
+ PREV_INSN (next) = prev;
+
+ if (write_symbols != NO_DEBUG && NOTE_LINE_NUMBER (insn) > 0)
+ /* Record line-number notes so they can be reused. */
+ LINE_NOTE (insn) = insn;
+
+ /* Don't save away NOTE_INSN_SETJMPs, because they must remain
+ immediately after the call they follow. We use a fake
+ (REG_DEAD (const_int -1)) note to remember them. */
+ else if (NOTE_LINE_NUMBER (insn) != NOTE_INSN_SETJMP)
+ {
+ /* Insert the note at the end of the notes list. */
+ PREV_INSN (insn) = note_list;
+ if (note_list)
+ NEXT_INSN (note_list) = insn;
+ note_list = insn;
+ }
+
+ insn = next;
+ }
+ return insn;
+}
+
+/* Constructor for `sometimes' data structure. */
+
+static int
+new_sometimes_live (regs_sometimes_live, offset, bit, sometimes_max)
+ struct sometimes *regs_sometimes_live;
+ int offset, bit;
+ int sometimes_max;
+{
+ register struct sometimes *p;
+ register int regno = offset * REGSET_ELT_BITS + bit;
+
+ /* There should never be a register greater than max_regno here. If there
+ is, it means that a define_split has created a new pseudo reg. This
+ is not allowed, since there will not be flow info available for any
+ new register, so catch the error here. */
+ if (regno >= max_regno)
+ abort ();
+
+ p = &regs_sometimes_live[sometimes_max];
+ p->offset = offset;
+ p->bit = bit;
+ p->live_length = 0;
+ p->calls_crossed = 0;
+ sometimes_max++;
+ return sometimes_max;
+}
+
+/* Count lengths of all regs we are currently tracking,
+ and find new registers no longer live. */
+
+static void
+finish_sometimes_live (regs_sometimes_live, sometimes_max)
+ struct sometimes *regs_sometimes_live;
+ int sometimes_max;
+{
+ int i;
+
+ for (i = 0; i < sometimes_max; i++)
+ {
+ register struct sometimes *p = &regs_sometimes_live[i];
+ int regno;
+
+ regno = p->offset * REGSET_ELT_BITS + p->bit;
+
+ sched_reg_live_length[regno] += p->live_length;
+ sched_reg_n_calls_crossed[regno] += p->calls_crossed;
+ }
+}
+
+/* Use modified list scheduling to rearrange insns in basic block
+ B. FILE, if nonzero, is where we dump interesting output about
+ this pass. */
+
+static void
+schedule_block (b, file)
+ int b;
+ FILE *file;
+{
+ rtx insn, last;
+ rtx *ready, link;
+ int i, j, n_ready = 0, new_ready, n_insns = 0;
+ int sched_n_insns = 0;
+ int clock;
+#define NEED_NOTHING 0
+#define NEED_HEAD 1
+#define NEED_TAIL 2
+ int new_needs;
+
+ /* HEAD and TAIL delimit the region being scheduled. */
+ rtx head = basic_block_head[b];
+ rtx tail = basic_block_end[b];
+ /* PREV_HEAD and NEXT_TAIL are the boundaries of the insns
+ being scheduled. When the insns have been ordered,
+ these insns delimit where the new insns are to be
+ spliced back into the insn chain. */
+ rtx next_tail;
+ rtx prev_head;
+
+ /* Keep life information accurate. */
+ register struct sometimes *regs_sometimes_live;
+ int sometimes_max;
+
+ if (file)
+ fprintf (file, ";;\t -- basic block number %d from %d to %d --\n",
+ b, INSN_UID (basic_block_head[b]), INSN_UID (basic_block_end[b]));
+
+ i = max_reg_num ();
+ reg_last_uses = (rtx *) alloca (i * sizeof (rtx));
+ bzero ((char *) reg_last_uses, i * sizeof (rtx));
+ reg_last_sets = (rtx *) alloca (i * sizeof (rtx));
+ bzero ((char *) reg_last_sets, i * sizeof (rtx));
+ reg_pending_sets = (regset) alloca (regset_bytes);
+ bzero ((char *) reg_pending_sets, regset_bytes);
+ reg_pending_sets_all = 0;
+ clear_units ();
+
+ /* Remove certain insns at the beginning from scheduling,
+ by advancing HEAD. */
+
+ /* At the start of a function, before reload has run, don't delay getting
+ parameters from hard registers into pseudo registers. */
+ if (reload_completed == 0 && b == 0)
+ {
+ while (head != tail
+ && GET_CODE (head) == NOTE
+ && NOTE_LINE_NUMBER (head) != NOTE_INSN_FUNCTION_BEG)
+ head = NEXT_INSN (head);
+ while (head != tail
+ && GET_CODE (head) == INSN
+ && GET_CODE (PATTERN (head)) == SET)
+ {
+ rtx src = SET_SRC (PATTERN (head));
+ while (GET_CODE (src) == SUBREG
+ || GET_CODE (src) == SIGN_EXTEND
+ || GET_CODE (src) == ZERO_EXTEND
+ || GET_CODE (src) == SIGN_EXTRACT
+ || GET_CODE (src) == ZERO_EXTRACT)
+ src = XEXP (src, 0);
+ if (GET_CODE (src) != REG
+ || REGNO (src) >= FIRST_PSEUDO_REGISTER)
+ break;
+ /* Keep this insn from ever being scheduled. */
+ INSN_REF_COUNT (head) = 1;
+ head = NEXT_INSN (head);
+ }
+ }
+
+ /* Don't include any notes or labels at the beginning of the
+ basic block, or notes at the ends of basic blocks. */
+ while (head != tail)
+ {
+ if (GET_CODE (head) == NOTE)
+ head = NEXT_INSN (head);
+ else if (GET_CODE (tail) == NOTE)
+ tail = PREV_INSN (tail);
+ else if (GET_CODE (head) == CODE_LABEL)
+ head = NEXT_INSN (head);
+ else break;
+ }
+ /* If the only insn left is a NOTE or a CODE_LABEL, then there is no need
+ to schedule this block. */
+ if (head == tail
+ && (GET_CODE (head) == NOTE || GET_CODE (head) == CODE_LABEL))
+ return;
+
+#if 0
+ /* This short-cut doesn't work. It does not count call insns crossed by
+ registers in reg_sometimes_live. It does not mark these registers as
+ dead if they die in this block. It does not mark these registers live
+ (or create new reg_sometimes_live entries if necessary) if they are born
+ in this block.
+
+ The easy solution is to just always schedule a block. This block only
+ has one insn, so this won't slow down this pass by much. */
+
+ if (head == tail)
+ return;
+#endif
+
+ /* Now HEAD through TAIL are the insns actually to be rearranged;
+ Let PREV_HEAD and NEXT_TAIL enclose them. */
+ prev_head = PREV_INSN (head);
+ next_tail = NEXT_INSN (tail);
+
+ /* Initialize basic block data structures. */
+ dead_notes = 0;
+ pending_read_insns = 0;
+ pending_read_mems = 0;
+ pending_write_insns = 0;
+ pending_write_mems = 0;
+ pending_lists_length = 0;
+ last_pending_memory_flush = 0;
+ last_function_call = 0;
+ last_scheduled_insn = 0;
+
+ LOG_LINKS (sched_before_next_call) = 0;
+
+ n_insns += sched_analyze (head, tail);
+ if (n_insns == 0)
+ {
+ free_pending_lists ();
+ return;
+ }
+
+ /* Allocate vector to hold insns to be rearranged (except those
+ insns which are controlled by an insn with SCHED_GROUP_P set).
+ All these insns are included between ORIG_HEAD and ORIG_TAIL,
+ as those variables ultimately are set up. */
+ ready = (rtx *) alloca ((n_insns+1) * sizeof (rtx));
+
+ /* TAIL is now the last of the insns to be rearranged.
+ Put those insns into the READY vector. */
+ insn = tail;
+
+ /* For all branches, calls, uses, and cc0 setters, force them to remain
+ in order at the end of the block by adding dependencies and giving
+ the last a high priority. There may be notes present, and prev_head
+ may also be a note.
+
+ Branches must obviously remain at the end. Calls should remain at the
+ end since moving them results in worse register allocation. Uses remain
+ at the end to ensure proper register allocation. cc0 setters remaim
+ at the end because they can't be moved away from their cc0 user. */
+ last = 0;
+ while (GET_CODE (insn) == CALL_INSN || GET_CODE (insn) == JUMP_INSN
+ || (GET_CODE (insn) == INSN
+ && (GET_CODE (PATTERN (insn)) == USE
+#ifdef HAVE_cc0
+ || sets_cc0_p (PATTERN (insn))
+#endif
+ ))
+ || GET_CODE (insn) == NOTE)
+ {
+ if (GET_CODE (insn) != NOTE)
+ {
+ priority (insn);
+ if (last == 0)
+ {
+ ready[n_ready++] = insn;
+ INSN_PRIORITY (insn) = TAIL_PRIORITY - i;
+ INSN_REF_COUNT (insn) = 0;
+ }
+ else if (! find_insn_list (insn, LOG_LINKS (last)))
+ {
+ add_dependence (last, insn, REG_DEP_ANTI);
+ INSN_REF_COUNT (insn)++;
+ }
+ last = insn;
+
+ /* Skip over insns that are part of a group. */
+ while (SCHED_GROUP_P (insn))
+ {
+ insn = prev_nonnote_insn (insn);
+ priority (insn);
+ }
+ }
+
+ insn = PREV_INSN (insn);
+ /* Don't overrun the bounds of the basic block. */
+ if (insn == prev_head)
+ break;
+ }
+
+ /* Assign priorities to instructions. Also check whether they
+ are in priority order already. If so then I will be nonnegative.
+ We use this shortcut only before reloading. */
+#if 0
+ i = reload_completed ? DONE_PRIORITY : MAX_PRIORITY;
+#endif
+
+ for (; insn != prev_head; insn = PREV_INSN (insn))
+ {
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ {
+ priority (insn);
+ if (INSN_REF_COUNT (insn) == 0)
+ {
+ if (last == 0)
+ ready[n_ready++] = insn;
+ else
+ {
+ /* Make this dependent on the last of the instructions
+ that must remain in order at the end of the block. */
+ add_dependence (last, insn, REG_DEP_ANTI);
+ INSN_REF_COUNT (insn) = 1;
+ }
+ }
+ if (SCHED_GROUP_P (insn))
+ {
+ while (SCHED_GROUP_P (insn))
+ {
+ insn = PREV_INSN (insn);
+ while (GET_CODE (insn) == NOTE)
+ insn = PREV_INSN (insn);
+ priority (insn);
+ }
+ continue;
+ }
+#if 0
+ if (i < 0)
+ continue;
+ if (INSN_PRIORITY (insn) < i)
+ i = INSN_PRIORITY (insn);
+ else if (INSN_PRIORITY (insn) > i)
+ i = DONE_PRIORITY;
+#endif
+ }
+ }
+
+#if 0
+ /* This short-cut doesn't work. It does not count call insns crossed by
+ registers in reg_sometimes_live. It does not mark these registers as
+ dead if they die in this block. It does not mark these registers live
+ (or create new reg_sometimes_live entries if necessary) if they are born
+ in this block.
+
+ The easy solution is to just always schedule a block. These blocks tend
+ to be very short, so this doesn't slow down this pass by much. */
+
+ /* If existing order is good, don't bother to reorder. */
+ if (i != DONE_PRIORITY)
+ {
+ if (file)
+ fprintf (file, ";; already scheduled\n");
+
+ if (reload_completed == 0)
+ {
+ for (i = 0; i < sometimes_max; i++)
+ regs_sometimes_live[i].live_length += n_insns;
+
+ finish_sometimes_live (regs_sometimes_live, sometimes_max);
+ }
+ free_pending_lists ();
+ return;
+ }
+#endif
+
+ /* Scan all the insns to be scheduled, removing NOTE insns
+ and register death notes.
+ Line number NOTE insns end up in NOTE_LIST.
+ Register death notes end up in DEAD_NOTES.
+
+ Recreate the register life information for the end of this basic
+ block. */
+
+ if (reload_completed == 0)
+ {
+ bcopy ((char *) basic_block_live_at_start[b], (char *) bb_live_regs,
+ regset_bytes);
+ bzero ((char *) bb_dead_regs, regset_bytes);
+
+ if (b == 0)
+ {
+ /* This is the first block in the function. There may be insns
+ before head that we can't schedule. We still need to examine
+ them though for accurate register lifetime analysis. */
+
+ /* We don't want to remove any REG_DEAD notes as the code below
+ does. */
+
+ for (insn = basic_block_head[b]; insn != head;
+ insn = NEXT_INSN (insn))
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ {
+ /* See if the register gets born here. */
+ /* We must check for registers being born before we check for
+ registers dying. It is possible for a register to be born
+ and die in the same insn, e.g. reading from a volatile
+ memory location into an otherwise unused register. Such
+ a register must be marked as dead after this insn. */
+ if (GET_CODE (PATTERN (insn)) == SET
+ || GET_CODE (PATTERN (insn)) == CLOBBER)
+ sched_note_set (b, PATTERN (insn), 0);
+ else if (GET_CODE (PATTERN (insn)) == PARALLEL)
+ {
+ int j;
+ for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
+ if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
+ || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
+ sched_note_set (b, XVECEXP (PATTERN (insn), 0, j), 0);
+
+ /* ??? This code is obsolete and should be deleted. It
+ is harmless though, so we will leave it in for now. */
+ for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
+ if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == USE)
+ sched_note_set (b, XVECEXP (PATTERN (insn), 0, j), 0);
+ }
+
+ for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
+ {
+ if ((REG_NOTE_KIND (link) == REG_DEAD
+ || REG_NOTE_KIND (link) == REG_UNUSED)
+ /* Verify that the REG_NOTE has a legal value. */
+ && GET_CODE (XEXP (link, 0)) == REG)
+ {
+ register int regno = REGNO (XEXP (link, 0));
+ register int offset = regno / REGSET_ELT_BITS;
+ register REGSET_ELT_TYPE bit
+ = (REGSET_ELT_TYPE) 1 << (regno % REGSET_ELT_BITS);
+
+ if (regno < FIRST_PSEUDO_REGISTER)
+ {
+ int j = HARD_REGNO_NREGS (regno,
+ GET_MODE (XEXP (link, 0)));
+ while (--j >= 0)
+ {
+ offset = (regno + j) / REGSET_ELT_BITS;
+ bit = ((REGSET_ELT_TYPE) 1
+ << ((regno + j) % REGSET_ELT_BITS));
+
+ bb_live_regs[offset] &= ~bit;
+ bb_dead_regs[offset] |= bit;
+ }
+ }
+ else
+ {
+ bb_live_regs[offset] &= ~bit;
+ bb_dead_regs[offset] |= bit;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ /* If debugging information is being produced, keep track of the line
+ number notes for each insn. */
+ if (write_symbols != NO_DEBUG)
+ {
+ /* We must use the true line number for the first insn in the block
+ that was computed and saved at the start of this pass. We can't
+ use the current line number, because scheduling of the previous
+ block may have changed the current line number. */
+ rtx line = line_note_head[b];
+
+ for (insn = basic_block_head[b];
+ insn != next_tail;
+ insn = NEXT_INSN (insn))
+ if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
+ line = insn;
+ else
+ LINE_NOTE (insn) = line;
+ }
+
+ for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
+ {
+ rtx prev, next, link;
+
+ /* Farm out notes. This is needed to keep the debugger from
+ getting completely deranged. */
+ if (GET_CODE (insn) == NOTE)
+ {
+ prev = insn;
+ insn = unlink_notes (insn, next_tail);
+ if (prev == tail)
+ abort ();
+ if (prev == head)
+ abort ();
+ if (insn == next_tail)
+ abort ();
+ }
+
+ if (reload_completed == 0
+ && GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ {
+ /* See if the register gets born here. */
+ /* We must check for registers being born before we check for
+ registers dying. It is possible for a register to be born and
+ die in the same insn, e.g. reading from a volatile memory
+ location into an otherwise unused register. Such a register
+ must be marked as dead after this insn. */
+ if (GET_CODE (PATTERN (insn)) == SET
+ || GET_CODE (PATTERN (insn)) == CLOBBER)
+ sched_note_set (b, PATTERN (insn), 0);
+ else if (GET_CODE (PATTERN (insn)) == PARALLEL)
+ {
+ int j;
+ for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
+ if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
+ || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
+ sched_note_set (b, XVECEXP (PATTERN (insn), 0, j), 0);
+
+ /* ??? This code is obsolete and should be deleted. It
+ is harmless though, so we will leave it in for now. */
+ for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
+ if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == USE)
+ sched_note_set (b, XVECEXP (PATTERN (insn), 0, j), 0);
+ }
+
+ /* Need to know what registers this insn kills. */
+ for (prev = 0, link = REG_NOTES (insn); link; link = next)
+ {
+ next = XEXP (link, 1);
+ if ((REG_NOTE_KIND (link) == REG_DEAD
+ || REG_NOTE_KIND (link) == REG_UNUSED)
+ /* Verify that the REG_NOTE has a legal value. */
+ && GET_CODE (XEXP (link, 0)) == REG)
+ {
+ register int regno = REGNO (XEXP (link, 0));
+ register int offset = regno / REGSET_ELT_BITS;
+ register REGSET_ELT_TYPE bit
+ = (REGSET_ELT_TYPE) 1 << (regno % REGSET_ELT_BITS);
+
+ /* Only unlink REG_DEAD notes; leave REG_UNUSED notes
+ alone. */
+ if (REG_NOTE_KIND (link) == REG_DEAD)
+ {
+ if (prev)
+ XEXP (prev, 1) = next;
+ else
+ REG_NOTES (insn) = next;
+ XEXP (link, 1) = dead_notes;
+ dead_notes = link;
+ }
+ else
+ prev = link;
+
+ if (regno < FIRST_PSEUDO_REGISTER)
+ {
+ int j = HARD_REGNO_NREGS (regno,
+ GET_MODE (XEXP (link, 0)));
+ while (--j >= 0)
+ {
+ offset = (regno + j) / REGSET_ELT_BITS;
+ bit = ((REGSET_ELT_TYPE) 1
+ << ((regno + j) % REGSET_ELT_BITS));
+
+ bb_live_regs[offset] &= ~bit;
+ bb_dead_regs[offset] |= bit;
+ }
+ }
+ else
+ {
+ bb_live_regs[offset] &= ~bit;
+ bb_dead_regs[offset] |= bit;
+ }
+ }
+ else
+ prev = link;
+ }
+ }
+ }
+
+ if (reload_completed == 0)
+ {
+ /* Keep track of register lives. */
+ old_live_regs = (regset) alloca (regset_bytes);
+ regs_sometimes_live
+ = (struct sometimes *) alloca (max_regno * sizeof (struct sometimes));
+ sometimes_max = 0;
+
+ /* Start with registers live at end. */
+ for (j = 0; j < regset_size; j++)
+ {
+ REGSET_ELT_TYPE live = bb_live_regs[j];
+ old_live_regs[j] = live;
+ if (live)
+ {
+ register int bit;
+ for (bit = 0; bit < REGSET_ELT_BITS; bit++)
+ if (live & ((REGSET_ELT_TYPE) 1 << bit))
+ sometimes_max = new_sometimes_live (regs_sometimes_live, j,
+ bit, sometimes_max);
+ }
+ }
+ }
+
+ SCHED_SORT (ready, n_ready, 1);
+
+ if (file)
+ {
+ fprintf (file, ";; ready list initially:\n;; ");
+ for (i = 0; i < n_ready; i++)
+ fprintf (file, "%d ", INSN_UID (ready[i]));
+ fprintf (file, "\n\n");
+
+ for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
+ if (INSN_PRIORITY (insn) > 0)
+ fprintf (file, ";; insn[%4d]: priority = %4d, ref_count = %4d\n",
+ INSN_UID (insn), INSN_PRIORITY (insn),
+ INSN_REF_COUNT (insn));
+ }
+
+ /* Now HEAD and TAIL are going to become disconnected
+ entirely from the insn chain. */
+ tail = 0;
+
+ /* Q_SIZE will always be zero here. */
+ q_ptr = 0; clock = 0;
+ bzero ((char *) insn_queue, sizeof (insn_queue));
+
+ /* Now, perform list scheduling. */
+
+ /* Where we start inserting insns is after TAIL. */
+ last = next_tail;
+
+ new_needs = (NEXT_INSN (prev_head) == basic_block_head[b]
+ ? NEED_HEAD : NEED_NOTHING);
+ if (PREV_INSN (next_tail) == basic_block_end[b])
+ new_needs |= NEED_TAIL;
+
+ new_ready = n_ready;
+ while (sched_n_insns < n_insns)
+ {
+ q_ptr = NEXT_Q (q_ptr); clock++;
+
+ /* Add all pending insns that can be scheduled without stalls to the
+ ready list. */
+ for (insn = insn_queue[q_ptr]; insn; insn = NEXT_INSN (insn))
+ {
+ if (file)
+ fprintf (file, ";; launching %d before %d with no stalls at T-%d\n",
+ INSN_UID (insn), INSN_UID (last), clock);
+ ready[new_ready++] = insn;
+ q_size -= 1;
+ }
+ insn_queue[q_ptr] = 0;
+
+ /* If there are no ready insns, stall until one is ready and add all
+ of the pending insns at that point to the ready list. */
+ if (new_ready == 0)
+ {
+ register int stalls;
+
+ for (stalls = 1; stalls < INSN_QUEUE_SIZE; stalls++)
+ if (insn = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)])
+ {
+ for (; insn; insn = NEXT_INSN (insn))
+ {
+ if (file)
+ fprintf (file, ";; launching %d before %d with %d stalls at T-%d\n",
+ INSN_UID (insn), INSN_UID (last), stalls, clock);
+ ready[new_ready++] = insn;
+ q_size -= 1;
+ }
+ insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = 0;
+ break;
+ }
+
+ q_ptr = NEXT_Q_AFTER (q_ptr, stalls); clock += stalls;
+ }
+
+ /* There should be some instructions waiting to fire. */
+ if (new_ready == 0)
+ abort ();
+
+ if (file)
+ {
+ fprintf (file, ";; ready list at T-%d:", clock);
+ for (i = 0; i < new_ready; i++)
+ fprintf (file, " %d (%x)",
+ INSN_UID (ready[i]), INSN_PRIORITY (ready[i]));
+ }
+
+ /* Sort the ready list and choose the best insn to schedule. Select
+ which insn should issue in this cycle and queue those that are
+ blocked by function unit hazards.
+
+ N_READY holds the number of items that were scheduled the last time,
+ minus the one instruction scheduled on the last loop iteration; it
+ is not modified for any other reason in this loop. */
+
+ SCHED_SORT (ready, new_ready, n_ready);
+ if (MAX_BLOCKAGE > 1)
+ {
+ new_ready = schedule_select (ready, new_ready, clock, file);
+ if (new_ready == 0)
+ {
+ if (file)
+ fprintf (file, "\n");
+ /* We must set n_ready here, to ensure that sorting always
+ occurs when we come back to the SCHED_SORT line above. */
+ n_ready = 0;
+ continue;
+ }
+ }
+ n_ready = new_ready;
+ last_scheduled_insn = insn = ready[0];
+
+ /* The first insn scheduled becomes the new tail. */
+ if (tail == 0)
+ tail = insn;
+
+ if (file)
+ {
+ fprintf (file, ", now");
+ for (i = 0; i < n_ready; i++)
+ fprintf (file, " %d", INSN_UID (ready[i]));
+ fprintf (file, "\n");
+ }
+
+ if (DONE_PRIORITY_P (insn))
+ abort ();
+
+ if (reload_completed == 0)
+ {
+ /* Process this insn, and each insn linked to this one which must
+ be immediately output after this insn. */
+ do
+ {
+ /* First we kill registers set by this insn, and then we
+ make registers used by this insn live. This is the opposite
+ order used above because we are traversing the instructions
+ backwards. */
+
+ /* Strictly speaking, we should scan REG_UNUSED notes and make
+ every register mentioned there live, however, we will just
+ kill them again immediately below, so there doesn't seem to
+ be any reason why we bother to do this. */
+
+ /* See if this is the last notice we must take of a register. */
+ if (GET_CODE (PATTERN (insn)) == SET
+ || GET_CODE (PATTERN (insn)) == CLOBBER)
+ sched_note_set (b, PATTERN (insn), 1);
+ else if (GET_CODE (PATTERN (insn)) == PARALLEL)
+ {
+ int j;
+ for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
+ if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
+ || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
+ sched_note_set (b, XVECEXP (PATTERN (insn), 0, j), 1);
+ }
+
+ /* This code keeps life analysis information up to date. */
+ if (GET_CODE (insn) == CALL_INSN)
+ {
+ register struct sometimes *p;
+
+ /* A call kills all call used and global registers, except
+ for those mentioned in the call pattern which will be
+ made live again later. */
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (call_used_regs[i] || global_regs[i])
+ {
+ register int offset = i / REGSET_ELT_BITS;
+ register REGSET_ELT_TYPE bit
+ = (REGSET_ELT_TYPE) 1 << (i % REGSET_ELT_BITS);
+
+ bb_live_regs[offset] &= ~bit;
+ bb_dead_regs[offset] |= bit;
+ }
+
+ /* Regs live at the time of a call instruction must not
+ go in a register clobbered by calls. Record this for
+ all regs now live. Note that insns which are born or
+ die in a call do not cross a call, so this must be done
+ after the killings (above) and before the births
+ (below). */
+ p = regs_sometimes_live;
+ for (i = 0; i < sometimes_max; i++, p++)
+ if (bb_live_regs[p->offset]
+ & ((REGSET_ELT_TYPE) 1 << p->bit))
+ p->calls_crossed += 1;
+ }
+
+ /* Make every register used live, and add REG_DEAD notes for
+ registers which were not live before we started. */
+ attach_deaths_insn (insn);
+
+ /* Find registers now made live by that instruction. */
+ for (i = 0; i < regset_size; i++)
+ {
+ REGSET_ELT_TYPE diff = bb_live_regs[i] & ~old_live_regs[i];
+ if (diff)
+ {
+ register int bit;
+ old_live_regs[i] |= diff;
+ for (bit = 0; bit < REGSET_ELT_BITS; bit++)
+ if (diff & ((REGSET_ELT_TYPE) 1 << bit))
+ sometimes_max
+ = new_sometimes_live (regs_sometimes_live, i, bit,
+ sometimes_max);
+ }
+ }
+
+ /* Count lengths of all regs we are worrying about now,
+ and handle registers no longer live. */
+
+ for (i = 0; i < sometimes_max; i++)
+ {
+ register struct sometimes *p = &regs_sometimes_live[i];
+ int regno = p->offset*REGSET_ELT_BITS + p->bit;
+
+ p->live_length += 1;
+
+ if ((bb_live_regs[p->offset]
+ & ((REGSET_ELT_TYPE) 1 << p->bit)) == 0)
+ {
+ /* This is the end of one of this register's lifetime
+ segments. Save the lifetime info collected so far,
+ and clear its bit in the old_live_regs entry. */
+ sched_reg_live_length[regno] += p->live_length;
+ sched_reg_n_calls_crossed[regno] += p->calls_crossed;
+ old_live_regs[p->offset]
+ &= ~((REGSET_ELT_TYPE) 1 << p->bit);
+
+ /* Delete the reg_sometimes_live entry for this reg by
+ copying the last entry over top of it. */
+ *p = regs_sometimes_live[--sometimes_max];
+ /* ...and decrement i so that this newly copied entry
+ will be processed. */
+ i--;
+ }
+ }
+
+ link = insn;
+ insn = PREV_INSN (insn);
+ }
+ while (SCHED_GROUP_P (link));
+
+ /* Set INSN back to the insn we are scheduling now. */
+ insn = ready[0];
+ }
+
+ /* Schedule INSN. Remove it from the ready list. */
+ ready += 1;
+ n_ready -= 1;
+
+ sched_n_insns += 1;
+ NEXT_INSN (insn) = last;
+ PREV_INSN (last) = insn;
+ last = insn;
+
+ /* Check to see if we need to re-emit a NOTE_INSN_SETJMP here. */
+ if (GET_CODE (insn) == CALL_INSN)
+ {
+ rtx note = find_reg_note (insn, REG_DEAD, constm1_rtx);
+
+ if (note)
+ {
+ emit_note_after (NOTE_INSN_SETJMP, insn);
+ remove_note (insn, note);
+ }
+ }
+
+ /* Everything that precedes INSN now either becomes "ready", if
+ it can execute immediately before INSN, or "pending", if
+ there must be a delay. Give INSN high enough priority that
+ at least one (maybe more) reg-killing insns can be launched
+ ahead of all others. Mark INSN as scheduled by changing its
+ priority to -1. */
+ INSN_PRIORITY (insn) = LAUNCH_PRIORITY;
+ new_ready = schedule_insn (insn, ready, n_ready, clock);
+ INSN_PRIORITY (insn) = DONE_PRIORITY;
+
+ /* Schedule all prior insns that must not be moved. */
+ if (SCHED_GROUP_P (insn))
+ {
+ /* Disable these insns from being launched. */
+ link = insn;
+ while (SCHED_GROUP_P (link))
+ {
+ /* Disable these insns from being launched by anybody. */
+ link = PREV_INSN (link);
+ INSN_REF_COUNT (link) = 0;
+ }
+
+ /* None of these insns can move forward into delay slots. */
+ while (SCHED_GROUP_P (insn))
+ {
+ insn = PREV_INSN (insn);
+ new_ready = schedule_insn (insn, ready, new_ready, clock);
+ INSN_PRIORITY (insn) = DONE_PRIORITY;
+
+ sched_n_insns += 1;
+ NEXT_INSN (insn) = last;
+ PREV_INSN (last) = insn;
+ last = insn;
+ }
+ }
+ }
+ if (q_size != 0)
+ abort ();
+
+ if (reload_completed == 0)
+ finish_sometimes_live (regs_sometimes_live, sometimes_max);
+
+ /* HEAD is now the first insn in the chain of insns that
+ been scheduled by the loop above.
+ TAIL is the last of those insns. */
+ head = insn;
+
+ /* NOTE_LIST is the end of a chain of notes previously found
+ among the insns. Insert them at the beginning of the insns. */
+ if (note_list != 0)
+ {
+ rtx note_head = note_list;
+ while (PREV_INSN (note_head))
+ note_head = PREV_INSN (note_head);
+
+ PREV_INSN (head) = note_list;
+ NEXT_INSN (note_list) = head;
+ head = note_head;
+ }
+
+ /* There should be no REG_DEAD notes leftover at the end.
+ In practice, this can occur as the result of bugs in flow, combine.c,
+ and/or sched.c. The values of the REG_DEAD notes remaining are
+ meaningless, because dead_notes is just used as a free list. */
+#if 1
+ if (dead_notes != 0)
+ abort ();
+#endif
+
+ if (new_needs & NEED_HEAD)
+ basic_block_head[b] = head;
+ PREV_INSN (head) = prev_head;
+ NEXT_INSN (prev_head) = head;
+
+ if (new_needs & NEED_TAIL)
+ basic_block_end[b] = tail;
+ NEXT_INSN (tail) = next_tail;
+ PREV_INSN (next_tail) = tail;
+
+ /* Restore the line-number notes of each insn. */
+ if (write_symbols != NO_DEBUG)
+ {
+ rtx line, note, prev, new;
+ int notes = 0;
+
+ head = basic_block_head[b];
+ next_tail = NEXT_INSN (basic_block_end[b]);
+
+ /* Determine the current line-number. We want to know the current
+ line number of the first insn of the block here, in case it is
+ different from the true line number that was saved earlier. If
+ different, then we need a line number note before the first insn
+ of this block. If it happens to be the same, then we don't want to
+ emit another line number note here. */
+ for (line = head; line; line = PREV_INSN (line))
+ if (GET_CODE (line) == NOTE && NOTE_LINE_NUMBER (line) > 0)
+ break;
+
+ /* Walk the insns keeping track of the current line-number and inserting
+ the line-number notes as needed. */
+ for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
+ if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
+ line = insn;
+ /* This used to emit line number notes before every non-deleted note.
+ However, this confuses a debugger, because line notes not separated
+ by real instructions all end up at the same address. I can find no
+ use for line number notes before other notes, so none are emitted. */
+ else if (GET_CODE (insn) != NOTE
+ && (note = LINE_NOTE (insn)) != 0
+ && note != line
+ && (line == 0
+ || NOTE_LINE_NUMBER (note) != NOTE_LINE_NUMBER (line)
+ || NOTE_SOURCE_FILE (note) != NOTE_SOURCE_FILE (line)))
+ {
+ line = note;
+ prev = PREV_INSN (insn);
+ if (LINE_NOTE (note))
+ {
+ /* Re-use the original line-number note. */
+ LINE_NOTE (note) = 0;
+ PREV_INSN (note) = prev;
+ NEXT_INSN (prev) = note;
+ PREV_INSN (insn) = note;
+ NEXT_INSN (note) = insn;
+ }
+ else
+ {
+ notes++;
+ new = emit_note_after (NOTE_LINE_NUMBER (note), prev);
+ NOTE_SOURCE_FILE (new) = NOTE_SOURCE_FILE (note);
+ }
+ }
+ if (file && notes)
+ fprintf (file, ";; added %d line-number notes\n", notes);
+ }
+
+ if (file)
+ {
+ fprintf (file, ";; total time = %d\n;; new basic block head = %d\n;; new basic block end = %d\n\n",
+ clock, INSN_UID (basic_block_head[b]), INSN_UID (basic_block_end[b]));
+ }
+
+ /* Yow! We're done! */
+ free_pending_lists ();
+
+ return;
+}
+
+/* Subroutine of split_hard_reg_notes. Searches X for any reference to
+ REGNO, returning the rtx of the reference found if any. Otherwise,
+ returns 0. */
+
+static rtx
+regno_use_in (regno, x)
+ int regno;
+ rtx x;
+{
+ register char *fmt;
+ int i, j;
+ rtx tem;
+
+ if (GET_CODE (x) == REG && REGNO (x) == regno)
+ return x;
+
+ fmt = GET_RTX_FORMAT (GET_CODE (x));
+ for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ {
+ if (tem = regno_use_in (regno, XEXP (x, i)))
+ return tem;
+ }
+ else if (fmt[i] == 'E')
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ if (tem = regno_use_in (regno , XVECEXP (x, i, j)))
+ return tem;
+ }
+
+ return 0;
+}
+
+/* Subroutine of update_flow_info. Determines whether any new REG_NOTEs are
+ needed for the hard register mentioned in the note. This can happen
+ if the reference to the hard register in the original insn was split into
+ several smaller hard register references in the split insns. */
+
+static void
+split_hard_reg_notes (note, first, last, orig_insn)
+ rtx note, first, last, orig_insn;
+{
+ rtx reg, temp, link;
+ int n_regs, i, new_reg;
+ rtx insn;
+
+ /* Assume that this is a REG_DEAD note. */
+ if (REG_NOTE_KIND (note) != REG_DEAD)
+ abort ();
+
+ reg = XEXP (note, 0);
+
+ n_regs = HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg));
+
+ for (i = 0; i < n_regs; i++)
+ {
+ new_reg = REGNO (reg) + i;
+
+ /* Check for references to new_reg in the split insns. */
+ for (insn = last; ; insn = PREV_INSN (insn))
+ {
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
+ && (temp = regno_use_in (new_reg, PATTERN (insn))))
+ {
+ /* Create a new reg dead note here. */
+ link = rtx_alloc (EXPR_LIST);
+ PUT_REG_NOTE_KIND (link, REG_DEAD);
+ XEXP (link, 0) = temp;
+ XEXP (link, 1) = REG_NOTES (insn);
+ REG_NOTES (insn) = link;
+
+ /* If killed multiple registers here, then add in the excess. */
+ i += HARD_REGNO_NREGS (REGNO (temp), GET_MODE (temp)) - 1;
+
+ break;
+ }
+ /* It isn't mentioned anywhere, so no new reg note is needed for
+ this register. */
+ if (insn == first)
+ break;
+ }
+ }
+}
+
+/* Subroutine of update_flow_info. Determines whether a SET or CLOBBER in an
+ insn created by splitting needs a REG_DEAD or REG_UNUSED note added. */
+
+static void
+new_insn_dead_notes (pat, insn, last, orig_insn)
+ rtx pat, insn, last, orig_insn;
+{
+ rtx dest, tem, set;
+
+ /* PAT is either a CLOBBER or a SET here. */
+ dest = XEXP (pat, 0);
+
+ while (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SUBREG
+ || GET_CODE (dest) == STRICT_LOW_PART
+ || GET_CODE (dest) == SIGN_EXTRACT)
+ dest = XEXP (dest, 0);
+
+ if (GET_CODE (dest) == REG)
+ {
+ for (tem = last; tem != insn; tem = PREV_INSN (tem))
+ {
+ if (GET_RTX_CLASS (GET_CODE (tem)) == 'i'
+ && reg_overlap_mentioned_p (dest, PATTERN (tem))
+ && (set = single_set (tem)))
+ {
+ rtx tem_dest = SET_DEST (set);
+
+ while (GET_CODE (tem_dest) == ZERO_EXTRACT
+ || GET_CODE (tem_dest) == SUBREG
+ || GET_CODE (tem_dest) == STRICT_LOW_PART
+ || GET_CODE (tem_dest) == SIGN_EXTRACT)
+ tem_dest = XEXP (tem_dest, 0);
+
+ if (! rtx_equal_p (tem_dest, dest))
+ {
+ /* Use the same scheme as combine.c, don't put both REG_DEAD
+ and REG_UNUSED notes on the same insn. */
+ if (! find_regno_note (tem, REG_UNUSED, REGNO (dest))
+ && ! find_regno_note (tem, REG_DEAD, REGNO (dest)))
+ {
+ rtx note = rtx_alloc (EXPR_LIST);
+ PUT_REG_NOTE_KIND (note, REG_DEAD);
+ XEXP (note, 0) = dest;
+ XEXP (note, 1) = REG_NOTES (tem);
+ REG_NOTES (tem) = note;
+ }
+ /* The reg only dies in one insn, the last one that uses
+ it. */
+ break;
+ }
+ else if (reg_overlap_mentioned_p (dest, SET_SRC (set)))
+ /* We found an instruction that both uses the register,
+ and sets it, so no new REG_NOTE is needed for this set. */
+ break;
+ }
+ }
+ /* If this is a set, it must die somewhere, unless it is the dest of
+ the original insn, and hence is live after the original insn. Abort
+ if it isn't supposed to be live after the original insn.
+
+ If this is a clobber, then just add a REG_UNUSED note. */
+ if (tem == insn)
+ {
+ int live_after_orig_insn = 0;
+ rtx pattern = PATTERN (orig_insn);
+ int i;
+
+ if (GET_CODE (pat) == CLOBBER)
+ {
+ rtx note = rtx_alloc (EXPR_LIST);
+ PUT_REG_NOTE_KIND (note, REG_UNUSED);
+ XEXP (note, 0) = dest;
+ XEXP (note, 1) = REG_NOTES (insn);
+ REG_NOTES (insn) = note;
+ return;
+ }
+
+ /* The original insn could have multiple sets, so search the
+ insn for all sets. */
+ if (GET_CODE (pattern) == SET)
+ {
+ if (reg_overlap_mentioned_p (dest, SET_DEST (pattern)))
+ live_after_orig_insn = 1;
+ }
+ else if (GET_CODE (pattern) == PARALLEL)
+ {
+ for (i = 0; i < XVECLEN (pattern, 0); i++)
+ if (GET_CODE (XVECEXP (pattern, 0, i)) == SET
+ && reg_overlap_mentioned_p (dest,
+ SET_DEST (XVECEXP (pattern,
+ 0, i))))
+ live_after_orig_insn = 1;
+ }
+
+ if (! live_after_orig_insn)
+ abort ();
+ }
+ }
+}
+
+/* Subroutine of update_flow_info. Update the value of reg_n_sets for all
+ registers modified by X. INC is -1 if the containing insn is being deleted,
+ and is 1 if the containing insn is a newly generated insn. */
+
+static void
+update_n_sets (x, inc)
+ rtx x;
+ int inc;
+{
+ rtx dest = SET_DEST (x);
+
+ while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
+ || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
+ dest = SUBREG_REG (dest);
+
+ if (GET_CODE (dest) == REG)
+ {
+ int regno = REGNO (dest);
+
+ if (regno < FIRST_PSEUDO_REGISTER)
+ {
+ register int i;
+ int endregno = regno + HARD_REGNO_NREGS (regno, GET_MODE (dest));
+
+ for (i = regno; i < endregno; i++)
+ reg_n_sets[i] += inc;
+ }
+ else
+ reg_n_sets[regno] += inc;
+ }
+}
+
+/* Updates all flow-analysis related quantities (including REG_NOTES) for
+ the insns from FIRST to LAST inclusive that were created by splitting
+ ORIG_INSN. NOTES are the original REG_NOTES. */
+
+static void
+update_flow_info (notes, first, last, orig_insn)
+ rtx notes;
+ rtx first, last;
+ rtx orig_insn;
+{
+ rtx insn, note;
+ rtx next;
+ rtx orig_dest, temp;
+ rtx set;
+
+ /* Get and save the destination set by the original insn. */
+
+ orig_dest = single_set (orig_insn);
+ if (orig_dest)
+ orig_dest = SET_DEST (orig_dest);
+
+ /* Move REG_NOTES from the original insn to where they now belong. */
+
+ for (note = notes; note; note = next)
+ {
+ next = XEXP (note, 1);
+ switch (REG_NOTE_KIND (note))
+ {
+ case REG_DEAD:
+ case REG_UNUSED:
+ /* Move these notes from the original insn to the last new insn where
+ the register is now set. */
+
+ for (insn = last; ; insn = PREV_INSN (insn))
+ {
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
+ && reg_mentioned_p (XEXP (note, 0), PATTERN (insn)))
+ {
+ /* If this note refers to a multiple word hard register, it
+ may have been split into several smaller hard register
+ references, so handle it specially. */
+ temp = XEXP (note, 0);
+ if (REG_NOTE_KIND (note) == REG_DEAD
+ && GET_CODE (temp) == REG
+ && REGNO (temp) < FIRST_PSEUDO_REGISTER
+ && HARD_REGNO_NREGS (REGNO (temp), GET_MODE (temp)) > 1)
+ split_hard_reg_notes (note, first, last, orig_insn);
+ else
+ {
+ XEXP (note, 1) = REG_NOTES (insn);
+ REG_NOTES (insn) = note;
+ }
+
+ /* Sometimes need to convert REG_UNUSED notes to REG_DEAD
+ notes. */
+ /* ??? This won't handle multiple word registers correctly,
+ but should be good enough for now. */
+ if (REG_NOTE_KIND (note) == REG_UNUSED
+ && ! dead_or_set_p (insn, XEXP (note, 0)))
+ PUT_REG_NOTE_KIND (note, REG_DEAD);
+
+ /* The reg only dies in one insn, the last one that uses
+ it. */
+ break;
+ }
+ /* It must die somewhere, fail it we couldn't find where it died.
+
+ If this is a REG_UNUSED note, then it must be a temporary
+ register that was not needed by this instantiation of the
+ pattern, so we can safely ignore it. */
+ if (insn == first)
+ {
+ if (REG_NOTE_KIND (note) != REG_UNUSED)
+ abort ();
+
+ break;
+ }
+ }
+ break;
+
+ case REG_WAS_0:
+ /* This note applies to the dest of the original insn. Find the
+ first new insn that now has the same dest, and move the note
+ there. */
+
+ if (! orig_dest)
+ abort ();
+
+ for (insn = first; ; insn = NEXT_INSN (insn))
+ {
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
+ && (temp = single_set (insn))
+ && rtx_equal_p (SET_DEST (temp), orig_dest))
+ {
+ XEXP (note, 1) = REG_NOTES (insn);
+ REG_NOTES (insn) = note;
+ /* The reg is only zero before one insn, the first that
+ uses it. */
+ break;
+ }
+ /* It must be set somewhere, fail if we couldn't find where it
+ was set. */
+ if (insn == last)
+ abort ();
+ }
+ break;
+
+ case REG_EQUAL:
+ case REG_EQUIV:
+ /* A REG_EQUIV or REG_EQUAL note on an insn with more than one
+ set is meaningless. Just drop the note. */
+ if (! orig_dest)
+ break;
+
+ case REG_NO_CONFLICT:
+ /* These notes apply to the dest of the original insn. Find the last
+ new insn that now has the same dest, and move the note there. */
+
+ if (! orig_dest)
+ abort ();
+
+ for (insn = last; ; insn = PREV_INSN (insn))
+ {
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
+ && (temp = single_set (insn))
+ && rtx_equal_p (SET_DEST (temp), orig_dest))
+ {
+ XEXP (note, 1) = REG_NOTES (insn);
+ REG_NOTES (insn) = note;
+ /* Only put this note on one of the new insns. */
+ break;
+ }
+
+ /* The original dest must still be set someplace. Abort if we
+ couldn't find it. */
+ if (insn == first)
+ abort ();
+ }
+ break;
+
+ case REG_LIBCALL:
+ /* Move a REG_LIBCALL note to the first insn created, and update
+ the corresponding REG_RETVAL note. */
+ XEXP (note, 1) = REG_NOTES (first);
+ REG_NOTES (first) = note;
+
+ insn = XEXP (note, 0);
+ note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
+ if (note)
+ XEXP (note, 0) = first;
+ break;
+
+ case REG_RETVAL:
+ /* Move a REG_RETVAL note to the last insn created, and update
+ the corresponding REG_LIBCALL note. */
+ XEXP (note, 1) = REG_NOTES (last);
+ REG_NOTES (last) = note;
+
+ insn = XEXP (note, 0);
+ note = find_reg_note (insn, REG_LIBCALL, NULL_RTX);
+ if (note)
+ XEXP (note, 0) = last;
+ break;
+
+ case REG_NONNEG:
+ /* This should be moved to whichever instruction is a JUMP_INSN. */
+
+ for (insn = last; ; insn = PREV_INSN (insn))
+ {
+ if (GET_CODE (insn) == JUMP_INSN)
+ {
+ XEXP (note, 1) = REG_NOTES (insn);
+ REG_NOTES (insn) = note;
+ /* Only put this note on one of the new insns. */
+ break;
+ }
+ /* Fail if we couldn't find a JUMP_INSN. */
+ if (insn == first)
+ abort ();
+ }
+ break;
+
+ case REG_INC:
+ /* This should be moved to whichever instruction now has the
+ increment operation. */
+ abort ();
+
+ case REG_LABEL:
+ /* Should be moved to the new insn(s) which use the label. */
+ for (insn = first; insn != NEXT_INSN (last); insn = NEXT_INSN (insn))
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
+ && reg_mentioned_p (XEXP (note, 0), PATTERN (insn)))
+ REG_NOTES (insn) = gen_rtx (EXPR_LIST, REG_LABEL,
+ XEXP (note, 0), REG_NOTES (insn));
+ break;
+
+ case REG_CC_SETTER:
+ case REG_CC_USER:
+ /* These two notes will never appear until after reorg, so we don't
+ have to handle them here. */
+ default:
+ abort ();
+ }
+ }
+
+ /* Each new insn created, except the last, has a new set. If the destination
+ is a register, then this reg is now live across several insns, whereas
+ previously the dest reg was born and died within the same insn. To
+ reflect this, we now need a REG_DEAD note on the insn where this
+ dest reg dies.
+
+ Similarly, the new insns may have clobbers that need REG_UNUSED notes. */
+
+ for (insn = first; insn != last; insn = NEXT_INSN (insn))
+ {
+ rtx pat;
+ int i;
+
+ pat = PATTERN (insn);
+ if (GET_CODE (pat) == SET || GET_CODE (pat) == CLOBBER)
+ new_insn_dead_notes (pat, insn, last, orig_insn);
+ else if (GET_CODE (pat) == PARALLEL)
+ {
+ for (i = 0; i < XVECLEN (pat, 0); i++)
+ if (GET_CODE (XVECEXP (pat, 0, i)) == SET
+ || GET_CODE (XVECEXP (pat, 0, i)) == CLOBBER)
+ new_insn_dead_notes (XVECEXP (pat, 0, i), insn, last, orig_insn);
+ }
+ }
+
+ /* If any insn, except the last, uses the register set by the last insn,
+ then we need a new REG_DEAD note on that insn. In this case, there
+ would not have been a REG_DEAD note for this register in the original
+ insn because it was used and set within one insn.
+
+ There is no new REG_DEAD note needed if the last insn uses the register
+ that it is setting. */
+
+ set = single_set (last);
+ if (set)
+ {
+ rtx dest = SET_DEST (set);
+
+ while (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SUBREG
+ || GET_CODE (dest) == STRICT_LOW_PART
+ || GET_CODE (dest) == SIGN_EXTRACT)
+ dest = XEXP (dest, 0);
+
+ if (GET_CODE (dest) == REG
+ && ! reg_overlap_mentioned_p (dest, SET_SRC (set)))
+ {
+ for (insn = PREV_INSN (last); ; insn = PREV_INSN (insn))
+ {
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
+ && reg_mentioned_p (dest, PATTERN (insn))
+ && (set = single_set (insn)))
+ {
+ rtx insn_dest = SET_DEST (set);
+
+ while (GET_CODE (insn_dest) == ZERO_EXTRACT
+ || GET_CODE (insn_dest) == SUBREG
+ || GET_CODE (insn_dest) == STRICT_LOW_PART
+ || GET_CODE (insn_dest) == SIGN_EXTRACT)
+ insn_dest = XEXP (insn_dest, 0);
+
+ if (insn_dest != dest)
+ {
+ note = rtx_alloc (EXPR_LIST);
+ PUT_REG_NOTE_KIND (note, REG_DEAD);
+ XEXP (note, 0) = dest;
+ XEXP (note, 1) = REG_NOTES (insn);
+ REG_NOTES (insn) = note;
+ /* The reg only dies in one insn, the last one
+ that uses it. */
+ break;
+ }
+ }
+ if (insn == first)
+ break;
+ }
+ }
+ }
+
+ /* If the original dest is modifying a multiple register target, and the
+ original instruction was split such that the original dest is now set
+ by two or more SUBREG sets, then the split insns no longer kill the
+ destination of the original insn.
+
+ In this case, if there exists an instruction in the same basic block,
+ before the split insn, which uses the original dest, and this use is
+ killed by the original insn, then we must remove the REG_DEAD note on
+ this insn, because it is now superfluous.
+
+ This does not apply when a hard register gets split, because the code
+ knows how to handle overlapping hard registers properly. */
+ if (orig_dest && GET_CODE (orig_dest) == REG)
+ {
+ int found_orig_dest = 0;
+ int found_split_dest = 0;
+
+ for (insn = first; ; insn = NEXT_INSN (insn))
+ {
+ set = single_set (insn);
+ if (set)
+ {
+ if (GET_CODE (SET_DEST (set)) == REG
+ && REGNO (SET_DEST (set)) == REGNO (orig_dest))
+ {
+ found_orig_dest = 1;
+ break;
+ }
+ else if (GET_CODE (SET_DEST (set)) == SUBREG
+ && SUBREG_REG (SET_DEST (set)) == orig_dest)
+ {
+ found_split_dest = 1;
+ break;
+ }
+ }
+
+ if (insn == last)
+ break;
+ }
+
+ if (found_split_dest)
+ {
+ /* Search backwards from FIRST, looking for the first insn that uses
+ the original dest. Stop if we pass a CODE_LABEL or a JUMP_INSN.
+ If we find an insn, and it has a REG_DEAD note, then delete the
+ note. */
+
+ for (insn = first; insn; insn = PREV_INSN (insn))
+ {
+ if (GET_CODE (insn) == CODE_LABEL
+ || GET_CODE (insn) == JUMP_INSN)
+ break;
+ else if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
+ && reg_mentioned_p (orig_dest, insn))
+ {
+ note = find_regno_note (insn, REG_DEAD, REGNO (orig_dest));
+ if (note)
+ remove_note (insn, note);
+ }
+ }
+ }
+ else if (! found_orig_dest)
+ {
+ /* This should never happen. */
+ abort ();
+ }
+ }
+
+ /* Update reg_n_sets. This is necessary to prevent local alloc from
+ converting REG_EQUAL notes to REG_EQUIV when splitting has modified
+ a reg from set once to set multiple times. */
+
+ {
+ rtx x = PATTERN (orig_insn);
+ RTX_CODE code = GET_CODE (x);
+
+ if (code == SET || code == CLOBBER)
+ update_n_sets (x, -1);
+ else if (code == PARALLEL)
+ {
+ int i;
+ for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
+ {
+ code = GET_CODE (XVECEXP (x, 0, i));
+ if (code == SET || code == CLOBBER)
+ update_n_sets (XVECEXP (x, 0, i), -1);
+ }
+ }
+
+ for (insn = first; ; insn = NEXT_INSN (insn))
+ {
+ x = PATTERN (insn);
+ code = GET_CODE (x);
+
+ if (code == SET || code == CLOBBER)
+ update_n_sets (x, 1);
+ else if (code == PARALLEL)
+ {
+ int i;
+ for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
+ {
+ code = GET_CODE (XVECEXP (x, 0, i));
+ if (code == SET || code == CLOBBER)
+ update_n_sets (XVECEXP (x, 0, i), 1);
+ }
+ }
+
+ if (insn == last)
+ break;
+ }
+ }
+}
+
+/* The one entry point in this file. DUMP_FILE is the dump file for
+ this pass. */
+
+void
+schedule_insns (dump_file)
+ FILE *dump_file;
+{
+ int max_uid = MAX_INSNS_PER_SPLIT * (get_max_uid () + 1);
+ int b;
+ rtx insn;
+
+ /* Taking care of this degenerate case makes the rest of
+ this code simpler. */
+ if (n_basic_blocks == 0)
+ return;
+
+ /* Create an insn here so that we can hang dependencies off of it later. */
+ sched_before_next_call
+ = gen_rtx (INSN, VOIDmode, 0, NULL_RTX, NULL_RTX,
+ NULL_RTX, 0, NULL_RTX, 0);
+
+ /* Initialize the unused_*_lists. We can't use the ones left over from
+ the previous function, because gcc has freed that memory. We can use
+ the ones left over from the first sched pass in the second pass however,
+ so only clear them on the first sched pass. The first pass is before
+ reload if flag_schedule_insns is set, otherwise it is afterwards. */
+
+ if (reload_completed == 0 || ! flag_schedule_insns)
+ {
+ unused_insn_list = 0;
+ unused_expr_list = 0;
+ }
+
+ /* We create no insns here, only reorder them, so we
+ remember how far we can cut back the stack on exit. */
+
+ /* Allocate data for this pass. See comments, above,
+ for what these vectors do. */
+ insn_luid = (int *) alloca (max_uid * sizeof (int));
+ insn_priority = (int *) alloca (max_uid * sizeof (int));
+ insn_tick = (int *) alloca (max_uid * sizeof (int));
+ insn_costs = (short *) alloca (max_uid * sizeof (short));
+ insn_units = (short *) alloca (max_uid * sizeof (short));
+ insn_blockage = (unsigned int *) alloca (max_uid * sizeof (unsigned int));
+ insn_ref_count = (int *) alloca (max_uid * sizeof (int));
+
+ if (reload_completed == 0)
+ {
+ sched_reg_n_deaths = (short *) alloca (max_regno * sizeof (short));
+ sched_reg_n_calls_crossed = (int *) alloca (max_regno * sizeof (int));
+ sched_reg_live_length = (int *) alloca (max_regno * sizeof (int));
+ bb_dead_regs = (regset) alloca (regset_bytes);
+ bb_live_regs = (regset) alloca (regset_bytes);
+ bzero ((char *) sched_reg_n_calls_crossed, max_regno * sizeof (int));
+ bzero ((char *) sched_reg_live_length, max_regno * sizeof (int));
+ bcopy ((char *) reg_n_deaths, (char *) sched_reg_n_deaths,
+ max_regno * sizeof (short));
+ init_alias_analysis ();
+ }
+ else
+ {
+ sched_reg_n_deaths = 0;
+ sched_reg_n_calls_crossed = 0;
+ sched_reg_live_length = 0;
+ bb_dead_regs = 0;
+ bb_live_regs = 0;
+ if (! flag_schedule_insns)
+ init_alias_analysis ();
+ }
+
+ if (write_symbols != NO_DEBUG)
+ {
+ rtx line;
+
+ line_note = (rtx *) alloca (max_uid * sizeof (rtx));
+ bzero ((char *) line_note, max_uid * sizeof (rtx));
+ line_note_head = (rtx *) alloca (n_basic_blocks * sizeof (rtx));
+ bzero ((char *) line_note_head, n_basic_blocks * sizeof (rtx));
+
+ /* Determine the line-number at the start of each basic block.
+ This must be computed and saved now, because after a basic block's
+ predecessor has been scheduled, it is impossible to accurately
+ determine the correct line number for the first insn of the block. */
+
+ for (b = 0; b < n_basic_blocks; b++)
+ for (line = basic_block_head[b]; line; line = PREV_INSN (line))
+ if (GET_CODE (line) == NOTE && NOTE_LINE_NUMBER (line) > 0)
+ {
+ line_note_head[b] = line;
+ break;
+ }
+ }
+
+ bzero ((char *) insn_luid, max_uid * sizeof (int));
+ bzero ((char *) insn_priority, max_uid * sizeof (int));
+ bzero ((char *) insn_tick, max_uid * sizeof (int));
+ bzero ((char *) insn_costs, max_uid * sizeof (short));
+ bzero ((char *) insn_units, max_uid * sizeof (short));
+ bzero ((char *) insn_blockage, max_uid * sizeof (unsigned int));
+ bzero ((char *) insn_ref_count, max_uid * sizeof (int));
+
+ /* Schedule each basic block, block by block. */
+
+ /* ??? Add a NOTE after the last insn of the last basic block. It is not
+ known why this is done. */
+
+ insn = basic_block_end[n_basic_blocks-1];
+ if (NEXT_INSN (insn) == 0
+ || (GET_CODE (insn) != NOTE
+ && GET_CODE (insn) != CODE_LABEL
+ /* Don't emit a NOTE if it would end up between an unconditional
+ jump and a BARRIER. */
+ && ! (GET_CODE (insn) == JUMP_INSN
+ && GET_CODE (NEXT_INSN (insn)) == BARRIER)))
+ emit_note_after (NOTE_INSN_DELETED, basic_block_end[n_basic_blocks-1]);
+
+ for (b = 0; b < n_basic_blocks; b++)
+ {
+ rtx insn, next;
+
+ note_list = 0;
+
+ for (insn = basic_block_head[b]; ; insn = next)
+ {
+ rtx prev;
+ rtx set;
+
+ /* Can't use `next_real_insn' because that
+ might go across CODE_LABELS and short-out basic blocks. */
+ next = NEXT_INSN (insn);
+ if (GET_CODE (insn) != INSN)
+ {
+ if (insn == basic_block_end[b])
+ break;
+
+ continue;
+ }
+
+ /* Don't split no-op move insns. These should silently disappear
+ later in final. Splitting such insns would break the code
+ that handles REG_NO_CONFLICT blocks. */
+ set = single_set (insn);
+ if (set && rtx_equal_p (SET_SRC (set), SET_DEST (set)))
+ {
+ if (insn == basic_block_end[b])
+ break;
+
+ /* Nops get in the way while scheduling, so delete them now if
+ register allocation has already been done. It is too risky
+ to try to do this before register allocation, and there are
+ unlikely to be very many nops then anyways. */
+ if (reload_completed)
+ {
+ PUT_CODE (insn, NOTE);
+ NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
+ NOTE_SOURCE_FILE (insn) = 0;
+ }
+
+ continue;
+ }
+
+ /* Split insns here to get max fine-grain parallelism. */
+ prev = PREV_INSN (insn);
+ if (reload_completed == 0)
+ {
+ rtx last, first = PREV_INSN (insn);
+ rtx notes = REG_NOTES (insn);
+
+ last = try_split (PATTERN (insn), insn, 1);
+ if (last != insn)
+ {
+ /* try_split returns the NOTE that INSN became. */
+ first = NEXT_INSN (first);
+ update_flow_info (notes, first, last, insn);
+
+ PUT_CODE (insn, NOTE);
+ NOTE_SOURCE_FILE (insn) = 0;
+ NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
+ if (insn == basic_block_head[b])
+ basic_block_head[b] = first;
+ if (insn == basic_block_end[b])
+ {
+ basic_block_end[b] = last;
+ break;
+ }
+ }
+ }
+
+ if (insn == basic_block_end[b])
+ break;
+ }
+
+ schedule_block (b, dump_file);
+
+#ifdef USE_C_ALLOCA
+ alloca (0);
+#endif
+ }
+
+ /* Reposition the prologue and epilogue notes in case we moved the
+ prologue/epilogue insns. */
+ if (reload_completed)
+ reposition_prologue_and_epilogue_notes (get_insns ());
+
+ if (write_symbols != NO_DEBUG)
+ {
+ rtx line = 0;
+ rtx insn = get_insns ();
+ int active_insn = 0;
+ int notes = 0;
+
+ /* Walk the insns deleting redundant line-number notes. Many of these
+ are already present. The remainder tend to occur at basic
+ block boundaries. */
+ for (insn = get_last_insn (); insn; insn = PREV_INSN (insn))
+ if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
+ {
+ /* If there are no active insns following, INSN is redundant. */
+ if (active_insn == 0)
+ {
+ notes++;
+ NOTE_SOURCE_FILE (insn) = 0;
+ NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
+ }
+ /* If the line number is unchanged, LINE is redundant. */
+ else if (line
+ && NOTE_LINE_NUMBER (line) == NOTE_LINE_NUMBER (insn)
+ && NOTE_SOURCE_FILE (line) == NOTE_SOURCE_FILE (insn))
+ {
+ notes++;
+ NOTE_SOURCE_FILE (line) = 0;
+ NOTE_LINE_NUMBER (line) = NOTE_INSN_DELETED;
+ line = insn;
+ }
+ else
+ line = insn;
+ active_insn = 0;
+ }
+ else if (! ((GET_CODE (insn) == NOTE
+ && NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED)
+ || (GET_CODE (insn) == INSN
+ && (GET_CODE (PATTERN (insn)) == USE
+ || GET_CODE (PATTERN (insn)) == CLOBBER))))
+ active_insn++;
+
+ if (dump_file && notes)
+ fprintf (dump_file, ";; deleted %d line-number notes\n", notes);
+ }
+
+ if (reload_completed == 0)
+ {
+ int regno;
+ for (regno = 0; regno < max_regno; regno++)
+ if (sched_reg_live_length[regno])
+ {
+ if (dump_file)
+ {
+ if (reg_live_length[regno] > sched_reg_live_length[regno])
+ fprintf (dump_file,
+ ";; register %d life shortened from %d to %d\n",
+ regno, reg_live_length[regno],
+ sched_reg_live_length[regno]);
+ /* Negative values are special; don't overwrite the current
+ reg_live_length value if it is negative. */
+ else if (reg_live_length[regno] < sched_reg_live_length[regno]
+ && reg_live_length[regno] >= 0)
+ fprintf (dump_file,
+ ";; register %d life extended from %d to %d\n",
+ regno, reg_live_length[regno],
+ sched_reg_live_length[regno]);
+
+ if (! reg_n_calls_crossed[regno]
+ && sched_reg_n_calls_crossed[regno])
+ fprintf (dump_file,
+ ";; register %d now crosses calls\n", regno);
+ else if (reg_n_calls_crossed[regno]
+ && ! sched_reg_n_calls_crossed[regno]
+ && reg_basic_block[regno] != REG_BLOCK_GLOBAL)
+ fprintf (dump_file,
+ ";; register %d no longer crosses calls\n", regno);
+
+ }
+ /* Negative values are special; don't overwrite the current
+ reg_live_length value if it is negative. */
+ if (reg_live_length[regno] >= 0)
+ reg_live_length[regno] = sched_reg_live_length[regno];
+
+ /* We can't change the value of reg_n_calls_crossed to zero for
+ pseudos which are live in more than one block.
+
+ This is because combine might have made an optimization which
+ invalidated basic_block_live_at_start and reg_n_calls_crossed,
+ but it does not update them. If we update reg_n_calls_crossed
+ here, the two variables are now inconsistent, and this might
+ confuse the caller-save code into saving a register that doesn't
+ need to be saved. This is only a problem when we zero calls
+ crossed for a pseudo live in multiple basic blocks.
+
+ Alternatively, we could try to correctly update basic block live
+ at start here in sched, but that seems complicated. */
+ if (sched_reg_n_calls_crossed[regno]
+ || reg_basic_block[regno] != REG_BLOCK_GLOBAL)
+ reg_n_calls_crossed[regno] = sched_reg_n_calls_crossed[regno];
+ }
+ }
+}
+#endif /* INSN_SCHEDULING */
diff --git a/gnu/usr.bin/cc/cc_int/sdbout.c b/gnu/usr.bin/cc/cc_int/sdbout.c
new file mode 100644
index 0000000..6a03108
--- /dev/null
+++ b/gnu/usr.bin/cc/cc_int/sdbout.c
@@ -0,0 +1,1530 @@
+/* Output sdb-format symbol table information from GNU compiler.
+ Copyright (C) 1988, 1992, 1993, 1994 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+/* mike@tredysvr.Tredydev.Unisys.COM says:
+I modified the struct.c example and have a nm of a .o resulting from the
+AT&T C compiler. From the example below I would conclude the following:
+
+1. All .defs from structures are emitted as scanned. The example below
+ clearly shows the symbol table entries for BoxRec2 are after the first
+ function.
+
+2. All functions and their locals (including statics) are emitted as scanned.
+
+3. All nested unnamed union and structure .defs must be emitted before
+ the structure in which they are nested. The AT&T assembler is a
+ one pass beast as far as symbolics are concerned.
+
+4. All structure .defs are emitted before the typedefs that refer to them.
+
+5. All top level static and external variable definitions are moved to the
+ end of file with all top level statics occurring first before externs.
+
+6. All undefined references are at the end of the file.
+*/
+
+#include "config.h"
+
+#ifdef SDB_DEBUGGING_INFO
+
+#include "tree.h"
+#include "rtl.h"
+#include <stdio.h>
+#include "regs.h"
+#include "flags.h"
+#include "insn-config.h"
+#include "reload.h"
+
+/* Mips systems use the SDB functions to dump out symbols, but
+ do not supply usable syms.h include files. */
+#if defined(USG) && !defined(MIPS) && !defined (hpux)
+#include <syms.h>
+/* Use T_INT if we don't have T_VOID. */
+#ifndef T_VOID
+#define T_VOID T_INT
+#endif
+#else /* not USG, or MIPS */
+#include "gsyms.h"
+#endif /* not USG, or MIPS */
+
+/* #include <storclass.h> used to be this instead of syms.h. */
+
+/* 1 if PARM is passed to this function in memory. */
+
+#define PARM_PASSED_IN_MEMORY(PARM) \
+ (GET_CODE (DECL_INCOMING_RTL (PARM)) == MEM)
+
+/* A C expression for the integer offset value of an automatic variable
+ (C_AUTO) having address X (an RTX). */
+#ifndef DEBUGGER_AUTO_OFFSET
+#define DEBUGGER_AUTO_OFFSET(X) \
+ (GET_CODE (X) == PLUS ? INTVAL (XEXP (X, 1)) : 0)
+#endif
+
+/* A C expression for the integer offset value of an argument (C_ARG)
+ having address X (an RTX). The nominal offset is OFFSET. */
+#ifndef DEBUGGER_ARG_OFFSET
+#define DEBUGGER_ARG_OFFSET(OFFSET, X) (OFFSET)
+#endif
+
+/* Line number of beginning of current function, minus one.
+ Negative means not in a function or not using sdb. */
+
+int sdb_begin_function_line = -1;
+
+/* Counter to generate unique "names" for nameless struct members. */
+
+static int unnamed_struct_number = 0;
+
+extern FILE *asm_out_file;
+
+extern tree current_function_decl;
+
+void sdbout_init ();
+void sdbout_symbol ();
+void sdbout_types();
+
+static void sdbout_typedefs ();
+static void sdbout_syms ();
+static void sdbout_one_type ();
+static void sdbout_queue_anonymous_type ();
+static void sdbout_dequeue_anonymous_types ();
+static int plain_type_1 ();
+
+/* Define the default sizes for various types. */
+
+#ifndef CHAR_TYPE_SIZE
+#define CHAR_TYPE_SIZE BITS_PER_UNIT
+#endif
+
+#ifndef SHORT_TYPE_SIZE
+#define SHORT_TYPE_SIZE (BITS_PER_UNIT * MIN ((UNITS_PER_WORD + 1) / 2, 2))
+#endif
+
+#ifndef INT_TYPE_SIZE
+#define INT_TYPE_SIZE BITS_PER_WORD
+#endif
+
+#ifndef LONG_TYPE_SIZE
+#define LONG_TYPE_SIZE BITS_PER_WORD
+#endif
+
+#ifndef LONG_LONG_TYPE_SIZE
+#define LONG_LONG_TYPE_SIZE (BITS_PER_WORD * 2)
+#endif
+
+#ifndef FLOAT_TYPE_SIZE
+#define FLOAT_TYPE_SIZE BITS_PER_WORD
+#endif
+
+#ifndef DOUBLE_TYPE_SIZE
+#define DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
+#endif
+
+#ifndef LONG_DOUBLE_TYPE_SIZE
+#define LONG_DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
+#endif
+
+/* Random macros describing parts of SDB data. */
+
+/* Put something here if lines get too long */
+#define CONTIN
+
+/* Default value of delimiter is ";". */
+#ifndef SDB_DELIM
+#define SDB_DELIM ";"
+#endif
+
+/* Maximum number of dimensions the assembler will allow. */
+#ifndef SDB_MAX_DIM
+#define SDB_MAX_DIM 4
+#endif
+
+#ifndef PUT_SDB_SCL
+#define PUT_SDB_SCL(a) fprintf(asm_out_file, "\t.scl\t%d%s", (a), SDB_DELIM)
+#endif
+
+#ifndef PUT_SDB_INT_VAL
+#define PUT_SDB_INT_VAL(a) fprintf (asm_out_file, "\t.val\t%d%s", (a), SDB_DELIM)
+#endif
+
+#ifndef PUT_SDB_VAL
+#define PUT_SDB_VAL(a) \
+( fputs ("\t.val\t", asm_out_file), \
+ output_addr_const (asm_out_file, (a)), \
+ fprintf (asm_out_file, SDB_DELIM))
+#endif
+
+#ifndef PUT_SDB_DEF
+#define PUT_SDB_DEF(a) \
+do { fprintf (asm_out_file, "\t.def\t"); \
+ ASM_OUTPUT_LABELREF (asm_out_file, a); \
+ fprintf (asm_out_file, SDB_DELIM); } while (0)
+#endif
+
+#ifndef PUT_SDB_PLAIN_DEF
+#define PUT_SDB_PLAIN_DEF(a) fprintf(asm_out_file,"\t.def\t.%s%s",a, SDB_DELIM)
+#endif
+
+#ifndef PUT_SDB_ENDEF
+#define PUT_SDB_ENDEF fputs("\t.endef\n", asm_out_file)
+#endif
+
+#ifndef PUT_SDB_TYPE
+#define PUT_SDB_TYPE(a) fprintf(asm_out_file, "\t.type\t0%o%s", a, SDB_DELIM)
+#endif
+
+#ifndef PUT_SDB_SIZE
+#define PUT_SDB_SIZE(a) fprintf(asm_out_file, "\t.size\t%d%s", a, SDB_DELIM)
+#endif
+
+#ifndef PUT_SDB_START_DIM
+#define PUT_SDB_START_DIM fprintf(asm_out_file, "\t.dim\t")
+#endif
+
+#ifndef PUT_SDB_NEXT_DIM
+#define PUT_SDB_NEXT_DIM(a) fprintf(asm_out_file, "%d,", a)
+#endif
+
+#ifndef PUT_SDB_LAST_DIM
+#define PUT_SDB_LAST_DIM(a) fprintf(asm_out_file, "%d%s", a, SDB_DELIM)
+#endif
+
+#ifndef PUT_SDB_TAG
+#define PUT_SDB_TAG(a) \
+do { fprintf (asm_out_file, "\t.tag\t"); \
+ ASM_OUTPUT_LABELREF (asm_out_file, a); \
+ fprintf (asm_out_file, SDB_DELIM); } while (0)
+#endif
+
+#ifndef PUT_SDB_BLOCK_START
+#define PUT_SDB_BLOCK_START(LINE) \
+ fprintf (asm_out_file, \
+ "\t.def\t.bb%s\t.val\t.%s\t.scl\t100%s\t.line\t%d%s\t.endef\n", \
+ SDB_DELIM, SDB_DELIM, SDB_DELIM, (LINE), SDB_DELIM)
+#endif
+
+#ifndef PUT_SDB_BLOCK_END
+#define PUT_SDB_BLOCK_END(LINE) \
+ fprintf (asm_out_file, \
+ "\t.def\t.eb%s\t.val\t.%s\t.scl\t100%s\t.line\t%d%s\t.endef\n", \
+ SDB_DELIM, SDB_DELIM, SDB_DELIM, (LINE), SDB_DELIM)
+#endif
+
+#ifndef PUT_SDB_FUNCTION_START
+#define PUT_SDB_FUNCTION_START(LINE) \
+ fprintf (asm_out_file, \
+ "\t.def\t.bf%s\t.val\t.%s\t.scl\t101%s\t.line\t%d%s\t.endef\n", \
+ SDB_DELIM, SDB_DELIM, SDB_DELIM, (LINE), SDB_DELIM)
+#endif
+
+#ifndef PUT_SDB_FUNCTION_END
+#define PUT_SDB_FUNCTION_END(LINE) \
+ fprintf (asm_out_file, \
+ "\t.def\t.ef%s\t.val\t.%s\t.scl\t101%s\t.line\t%d%s\t.endef\n", \
+ SDB_DELIM, SDB_DELIM, SDB_DELIM, (LINE), SDB_DELIM)
+#endif
+
+#ifndef PUT_SDB_EPILOGUE_END
+#define PUT_SDB_EPILOGUE_END(NAME) \
+do { fprintf (asm_out_file, "\t.def\t"); \
+ ASM_OUTPUT_LABELREF (asm_out_file, NAME); \
+ fprintf (asm_out_file, \
+ "%s\t.val\t.%s\t.scl\t-1%s\t.endef\n", \
+ SDB_DELIM, SDB_DELIM, SDB_DELIM); } while (0)
+#endif
+
+#ifndef SDB_GENERATE_FAKE
+#define SDB_GENERATE_FAKE(BUFFER, NUMBER) \
+ sprintf ((BUFFER), ".%dfake", (NUMBER));
+#endif
+
+/* Return the sdb tag identifier string for TYPE
+ if TYPE has already been defined; otherwise return a null pointer. */
+
+#define KNOWN_TYPE_TAG(type) TYPE_SYMTAB_POINTER (type)
+
+/* Set the sdb tag identifier string for TYPE to NAME. */
+
+#define SET_KNOWN_TYPE_TAG(TYPE, NAME) \
+ TYPE_SYMTAB_POINTER (TYPE) = (NAME)
+
+/* Return the name (a string) of the struct, union or enum tag
+ described by the TREE_LIST node LINK. This is 0 for an anonymous one. */
+
+#define TAG_NAME(link) \
+ (((link) && TREE_PURPOSE ((link)) \
+ && IDENTIFIER_POINTER (TREE_PURPOSE ((link)))) \
+ ? IDENTIFIER_POINTER (TREE_PURPOSE ((link))) : (char *) 0)
+
+/* Ensure we don't output a negative line number. */
+#define MAKE_LINE_SAFE(line) \
+ if (line <= sdb_begin_function_line) line = sdb_begin_function_line + 1
+
+/* Set up for SDB output at the start of compilation. */
+
+void
+sdbout_init (asm_file, input_file_name, syms)
+ FILE *asm_file;
+ char *input_file_name;
+ tree syms;
+{
+#ifdef RMS_QUICK_HACK_1
+ tree t;
+ for (t = syms; t; t = TREE_CHAIN (t))
+ if (DECL_NAME (t) && IDENTIFIER_POINTER (DECL_NAME (t)) != 0
+ && !strcmp (IDENTIFIER_POINTER (DECL_NAME (t)), "__vtbl_ptr_type"))
+ sdbout_symbol (t, 0);
+#endif
+
+#if 0 /* Nothing need be output for the predefined types. */
+ /* Get all permanent types that have typedef names,
+ and output them all, except for those already output. */
+
+ sdbout_typedefs (syms);
+#endif
+}
+
+#if 0
+
+/* return the tag identifier for type
+ */
+
+char *
+tag_of_ru_type (type,link)
+ tree type,link;
+{
+ if (TYPE_SYMTAB_ADDRESS (type))
+ return TYPE_SYMTAB_ADDRESS (type);
+ if (link && TREE_PURPOSE (link)
+ && IDENTIFIER_POINTER (TREE_PURPOSE (link)))
+ TYPE_SYMTAB_ADDRESS (type) = IDENTIFIER_POINTER (TREE_PURPOSE (link));
+ else
+ return (char *) TYPE_SYMTAB_ADDRESS (type);
+}
+#endif
+
+/* Return a unique string to name an anonymous type. */
+
+static char *
+gen_fake_label ()
+{
+ char label[10];
+ char *labelstr;
+ SDB_GENERATE_FAKE (label, unnamed_struct_number);
+ unnamed_struct_number++;
+ labelstr = (char *) permalloc (strlen (label) + 1);
+ strcpy (labelstr, label);
+ return labelstr;
+}
+
+/* Return the number which describes TYPE for SDB.
+ For pointers, etc., this function is recursive.
+ Each record, union or enumeral type must already have had a
+ tag number output. */
+
+/* The number is given by d6d5d4d3d2d1bbbb
+ where bbbb is 4 bit basic type, and di indicate one of notype,ptr,fn,array.
+ Thus, char *foo () has bbbb=T_CHAR
+ d1=D_FCN
+ d2=D_PTR
+ N_BTMASK= 017 1111 basic type field.
+ N_TSHIFT= 2 derived type shift
+ N_BTSHFT= 4 Basic type shift */
+
+/* Produce the number that describes a pointer, function or array type.
+ PREV is the number describing the target, value or element type.
+ DT_type describes how to transform that type. */
+#define PUSH_DERIVED_LEVEL(DT_type,PREV) \
+ ((((PREV) & ~(int)N_BTMASK) << (int)N_TSHIFT) \
+ | ((int)DT_type << (int)N_BTSHFT) \
+ | ((PREV) & (int)N_BTMASK))
+
+/* Number of elements used in sdb_dims. */
+static int sdb_n_dims = 0;
+
+/* Table of array dimensions of current type. */
+static int sdb_dims[SDB_MAX_DIM];
+
+/* Size of outermost array currently being processed. */
+static int sdb_type_size = -1;
+
+static int
+plain_type (type)
+ tree type;
+{
+ int val = plain_type_1 (type);
+
+ /* If we have already saved up some array dimensions, print them now. */
+ if (sdb_n_dims > 0)
+ {
+ int i;
+ PUT_SDB_START_DIM;
+ for (i = sdb_n_dims - 1; i > 0; i--)
+ PUT_SDB_NEXT_DIM (sdb_dims[i]);
+ PUT_SDB_LAST_DIM (sdb_dims[0]);
+ sdb_n_dims = 0;
+
+ sdb_type_size = int_size_in_bytes (type);
+ /* Don't kill sdb if type is not laid out or has variable size. */
+ if (sdb_type_size < 0)
+ sdb_type_size = 0;
+ }
+ /* If we have computed the size of an array containing this type,
+ print it now. */
+ if (sdb_type_size >= 0)
+ {
+ PUT_SDB_SIZE (sdb_type_size);
+ sdb_type_size = -1;
+ }
+ return val;
+}
+
+static int
+template_name_p (name)
+ tree name;
+{
+ register char *ptr = IDENTIFIER_POINTER (name);
+ while (*ptr && *ptr != '<')
+ ptr++;
+
+ return *ptr != '\0';
+}
+
+static void
+sdbout_record_type_name (type)
+ tree type;
+{
+ char *name = 0;
+ int no_name;
+
+ if (KNOWN_TYPE_TAG (type))
+ return;
+
+ if (TYPE_NAME (type) != 0)
+ {
+ tree t = 0;
+ /* Find the IDENTIFIER_NODE for the type name. */
+ if (TREE_CODE (TYPE_NAME (type)) == IDENTIFIER_NODE)
+ {
+ t = TYPE_NAME (type);
+ }
+#if 1 /* As a temporary hack, use typedef names for C++ only. */
+ else if (TREE_CODE (TYPE_NAME (type)) == TYPE_DECL
+ && TYPE_LANG_SPECIFIC (type))
+ {
+ t = DECL_NAME (TYPE_NAME (type));
+ /* The DECL_NAME for templates includes "<>", which breaks
+ most assemblers. Use its assembler name instead, which
+ has been mangled into being safe. */
+ if (t && template_name_p (t))
+ t = DECL_ASSEMBLER_NAME (TYPE_NAME (type));
+ }
+#endif
+
+ /* Now get the name as a string, or invent one. */
+ if (t != NULL_TREE)
+ name = IDENTIFIER_POINTER (t);
+ }
+
+ no_name = (name == 0 || *name == 0);
+ if (no_name)
+ name = gen_fake_label ();
+
+ SET_KNOWN_TYPE_TAG (type, name);
+#ifdef SDB_ALLOW_FORWARD_REFERENCES
+ if (no_name)
+ sdbout_queue_anonymous_type (type);
+#endif
+}
+
+static int
+plain_type_1 (type)
+ tree type;
+{
+ if (type == 0)
+ type = void_type_node;
+ if (type == error_mark_node)
+ type = integer_type_node;
+ type = TYPE_MAIN_VARIANT (type);
+
+ switch (TREE_CODE (type))
+ {
+ case VOID_TYPE:
+ return T_VOID;
+ case INTEGER_TYPE:
+ {
+ int size = int_size_in_bytes (type) * BITS_PER_UNIT;
+
+ /* Carefully distinguish all the standard types of C,
+ without messing up if the language is not C.
+ Note that we check only for the names that contain spaces;
+ other names might occur by coincidence in other languages. */
+ if (TYPE_NAME (type) != 0
+ && TREE_CODE (TYPE_NAME (type)) == TYPE_DECL
+ && DECL_NAME (TYPE_NAME (type)) != 0
+ && TREE_CODE (DECL_NAME (TYPE_NAME (type))) == IDENTIFIER_NODE)
+ {
+ char *name = IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (type)));
+
+ if (!strcmp (name, "unsigned char"))
+ return T_UCHAR;
+ if (!strcmp (name, "signed char"))
+ return T_CHAR;
+ if (!strcmp (name, "unsigned int"))
+ return T_UINT;
+ if (!strcmp (name, "short int"))
+ return T_SHORT;
+ if (!strcmp (name, "short unsigned int"))
+ return T_USHORT;
+ if (!strcmp (name, "long int"))
+ return T_LONG;
+ if (!strcmp (name, "long unsigned int"))
+ return T_ULONG;
+ }
+
+ if (size == CHAR_TYPE_SIZE)
+ return (TREE_UNSIGNED (type) ? T_UCHAR : T_CHAR);
+ if (size == SHORT_TYPE_SIZE)
+ return (TREE_UNSIGNED (type) ? T_USHORT : T_SHORT);
+ if (size == INT_TYPE_SIZE)
+ return (TREE_UNSIGNED (type) ? T_UINT : T_INT);
+ if (size == LONG_TYPE_SIZE)
+ return (TREE_UNSIGNED (type) ? T_ULONG : T_LONG);
+ return 0;
+ }
+
+ case REAL_TYPE:
+ {
+ int size = int_size_in_bytes (type) * BITS_PER_UNIT;
+ if (size == FLOAT_TYPE_SIZE)
+ return T_FLOAT;
+ if (size == DOUBLE_TYPE_SIZE)
+ return T_DOUBLE;
+ return 0;
+ }
+
+ case ARRAY_TYPE:
+ {
+ int m;
+ m = plain_type_1 (TREE_TYPE (type));
+ if (sdb_n_dims < SDB_MAX_DIM)
+ sdb_dims[sdb_n_dims++]
+ = (TYPE_DOMAIN (type)
+ ? TREE_INT_CST_LOW (TYPE_MAX_VALUE (TYPE_DOMAIN (type))) + 1
+ : 0);
+ return PUSH_DERIVED_LEVEL (DT_ARY, m);
+ }
+
+ case RECORD_TYPE:
+ case UNION_TYPE:
+ case QUAL_UNION_TYPE:
+ case ENUMERAL_TYPE:
+ {
+ char *tag;
+#ifdef SDB_ALLOW_FORWARD_REFERENCES
+ sdbout_record_type_name (type);
+#endif
+#ifndef SDB_ALLOW_UNKNOWN_REFERENCES
+ if ((TREE_ASM_WRITTEN (type) && KNOWN_TYPE_TAG (type) != 0)
+#ifdef SDB_ALLOW_FORWARD_REFERENCES
+ || TYPE_MODE (type) != VOIDmode
+#endif
+ )
+#endif
+ {
+ /* Output the referenced structure tag name
+ only if the .def has already been finished.
+ At least on 386, the Unix assembler
+ cannot handle forward references to tags. */
+ /* But the 88100, it requires them, sigh... */
+ /* And the MIPS requires unknown refs as well... */
+ tag = KNOWN_TYPE_TAG (type);
+ PUT_SDB_TAG (tag);
+ /* These 3 lines used to follow the close brace.
+ However, a size of 0 without a tag implies a tag of 0,
+ so if we don't know a tag, we can't mention the size. */
+ sdb_type_size = int_size_in_bytes (type);
+ if (sdb_type_size < 0)
+ sdb_type_size = 0;
+ }
+ return ((TREE_CODE (type) == RECORD_TYPE) ? T_STRUCT
+ : (TREE_CODE (type) == UNION_TYPE) ? T_UNION
+ : (TREE_CODE (type) == QUAL_UNION_TYPE) ? T_UNION
+ : T_ENUM);
+ }
+ case POINTER_TYPE:
+ case REFERENCE_TYPE:
+ {
+ int m = plain_type_1 (TREE_TYPE (type));
+ return PUSH_DERIVED_LEVEL (DT_PTR, m);
+ }
+ case FUNCTION_TYPE:
+ case METHOD_TYPE:
+ {
+ int m = plain_type_1 (TREE_TYPE (type));
+ return PUSH_DERIVED_LEVEL (DT_FCN, m);
+ }
+ default:
+ return 0;
+ }
+}
+
+/* Output the symbols defined in block number DO_BLOCK.
+ Set NEXT_BLOCK_NUMBER to 0 before calling.
+
+ This function works by walking the tree structure of blocks,
+ counting blocks until it finds the desired block. */
+
+static int do_block = 0;
+
+static int next_block_number;
+
+static void
+sdbout_block (block)
+ register tree block;
+{
+ while (block)
+ {
+ /* Ignore blocks never expanded or otherwise marked as real. */
+ if (TREE_USED (block))
+ {
+ /* When we reach the specified block, output its symbols. */
+ if (next_block_number == do_block)
+ {
+ sdbout_syms (BLOCK_VARS (block));
+ }
+
+ /* If we are past the specified block, stop the scan. */
+ if (next_block_number > do_block)
+ return;
+
+ next_block_number++;
+
+ /* Scan the blocks within this block. */
+ sdbout_block (BLOCK_SUBBLOCKS (block));
+ }
+
+ block = BLOCK_CHAIN (block);
+ }
+}
+
+/* Call sdbout_symbol on each decl in the chain SYMS. */
+
+static void
+sdbout_syms (syms)
+ tree syms;
+{
+ while (syms)
+ {
+ if (TREE_CODE (syms) != LABEL_DECL)
+ sdbout_symbol (syms, 1);
+ syms = TREE_CHAIN (syms);
+ }
+}
+
+/* Output SDB information for a symbol described by DECL.
+ LOCAL is nonzero if the symbol is not file-scope. */
+
+void
+sdbout_symbol (decl, local)
+ tree decl;
+ int local;
+{
+ tree type = TREE_TYPE (decl);
+ tree context = NULL_TREE;
+ rtx value;
+ int regno = -1;
+ char *name;
+
+ sdbout_one_type (type);
+
+#if 0 /* This loses when functions are marked to be ignored,
+ which happens in the C++ front end. */
+ if (DECL_IGNORED_P (decl))
+ return;
+#endif
+
+ switch (TREE_CODE (decl))
+ {
+ case CONST_DECL:
+ /* Enum values are defined by defining the enum type. */
+ return;
+
+ case FUNCTION_DECL:
+ /* Don't mention a nested function under its parent. */
+ context = decl_function_context (decl);
+ if (context == current_function_decl)
+ return;
+ if (DECL_EXTERNAL (decl))
+ return;
+ if (GET_CODE (DECL_RTL (decl)) != MEM
+ || GET_CODE (XEXP (DECL_RTL (decl), 0)) != SYMBOL_REF)
+ return;
+ PUT_SDB_DEF (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)));
+ PUT_SDB_VAL (XEXP (DECL_RTL (decl), 0));
+ PUT_SDB_SCL (TREE_PUBLIC (decl) ? C_EXT : C_STAT);
+ break;
+
+ case TYPE_DECL:
+ /* Done with tagged types. */
+ if (DECL_NAME (decl) == 0)
+ return;
+ if (DECL_IGNORED_P (decl))
+ return;
+
+ /* Output typedef name. */
+ if (template_name_p (DECL_NAME (decl)))
+ PUT_SDB_DEF (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)));
+ else
+ PUT_SDB_DEF (IDENTIFIER_POINTER (DECL_NAME (decl)));
+ PUT_SDB_SCL (C_TPDEF);
+ break;
+
+ case PARM_DECL:
+ /* Parm decls go in their own separate chains
+ and are output by sdbout_reg_parms and sdbout_parms. */
+ abort ();
+
+ case VAR_DECL:
+ /* Don't mention a variable that is external.
+ Let the file that defines it describe it. */
+ if (DECL_EXTERNAL (decl))
+ return;
+
+ /* Ignore __FUNCTION__, etc. */
+ if (DECL_IGNORED_P (decl))
+ return;
+
+ /* If there was an error in the declaration, don't dump core
+ if there is no RTL associated with the variable doesn't
+ exist. */
+ if (DECL_RTL (decl) == 0)
+ return;
+
+ DECL_RTL (decl) = eliminate_regs (DECL_RTL (decl), 0, NULL_RTX);
+#ifdef LEAF_REG_REMAP
+ if (leaf_function)
+ leaf_renumber_regs_insn (DECL_RTL (decl));
+#endif
+ value = DECL_RTL (decl);
+
+ /* Don't mention a variable at all
+ if it was completely optimized into nothingness.
+
+ If DECL was from an inline function, then its rtl
+ is not identically the rtl that was used in this
+ particular compilation. */
+ if (GET_CODE (value) == REG)
+ {
+ regno = REGNO (DECL_RTL (decl));
+ if (regno >= FIRST_PSEUDO_REGISTER)
+ return;
+ }
+ else if (GET_CODE (value) == SUBREG)
+ {
+ int offset = 0;
+ while (GET_CODE (value) == SUBREG)
+ {
+ offset += SUBREG_WORD (value);
+ value = SUBREG_REG (value);
+ }
+ if (GET_CODE (value) == REG)
+ {
+ regno = REGNO (value);
+ if (regno >= FIRST_PSEUDO_REGISTER)
+ return;
+ regno += offset;
+ }
+ alter_subreg (DECL_RTL (decl));
+ value = DECL_RTL (decl);
+ }
+ /* Don't output anything if an auto variable
+ gets RTL that is static.
+ GAS version 2.2 can't handle such output. */
+ else if (GET_CODE (value) == MEM && CONSTANT_P (XEXP (value, 0))
+ && ! TREE_STATIC (decl))
+ return;
+
+ /* Emit any structure, union, or enum type that has not been output.
+ This occurs for tag-less structs (et al) used to declare variables
+ within functions. */
+ if (TREE_CODE (type) == ENUMERAL_TYPE
+ || TREE_CODE (type) == RECORD_TYPE
+ || TREE_CODE (type) == UNION_TYPE
+ || TREE_CODE (type) == QUAL_UNION_TYPE)
+ {
+ if (TYPE_SIZE (type) != 0 /* not a forward reference */
+ && KNOWN_TYPE_TAG (type) == 0) /* not yet declared */
+ sdbout_one_type (type);
+ }
+
+ /* Defer SDB information for top-level initialized variables! */
+ if (! local
+ && GET_CODE (value) == MEM
+ && DECL_INITIAL (decl))
+ return;
+
+ /* C++ in 2.3 makes nameless symbols. That will be fixed later.
+ For now, avoid crashing. */
+ if (DECL_NAME (decl) == NULL_TREE)
+ return;
+
+ /* Record the name for, starting a symtab entry. */
+ if (DECL_LANG_SPECIFIC (decl))
+ name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
+ else
+ name = IDENTIFIER_POINTER (DECL_NAME (decl));
+
+ if (GET_CODE (value) == MEM
+ && GET_CODE (XEXP (value, 0)) == SYMBOL_REF)
+ {
+ PUT_SDB_DEF (name);
+ if (TREE_PUBLIC (decl))
+ {
+ PUT_SDB_VAL (XEXP (value, 0));
+ PUT_SDB_SCL (C_EXT);
+ }
+ else
+ {
+ PUT_SDB_VAL (XEXP (value, 0));
+ PUT_SDB_SCL (C_STAT);
+ }
+ }
+ else if (regno >= 0)
+ {
+ PUT_SDB_DEF (name);
+ PUT_SDB_INT_VAL (DBX_REGISTER_NUMBER (regno));
+ PUT_SDB_SCL (C_REG);
+ }
+ else if (GET_CODE (value) == MEM
+ && (GET_CODE (XEXP (value, 0)) == MEM
+ || (GET_CODE (XEXP (value, 0)) == REG
+ && REGNO (XEXP (value, 0)) != HARD_FRAME_POINTER_REGNUM
+ && REGNO (XEXP (value, 0)) != STACK_POINTER_REGNUM)))
+ /* If the value is indirect by memory or by a register
+ that isn't the frame pointer
+ then it means the object is variable-sized and address through
+ that register or stack slot. COFF has no way to represent this
+ so all we can do is output the variable as a pointer. */
+ {
+ PUT_SDB_DEF (name);
+ if (GET_CODE (XEXP (value, 0)) == REG)
+ {
+ PUT_SDB_INT_VAL (DBX_REGISTER_NUMBER (REGNO (XEXP (value, 0))));
+ PUT_SDB_SCL (C_REG);
+ }
+ else
+ {
+ /* DECL_RTL looks like (MEM (MEM (PLUS (REG...)
+ (CONST_INT...)))).
+ We want the value of that CONST_INT. */
+ /* Encore compiler hates a newline in a macro arg, it seems. */
+ PUT_SDB_INT_VAL (DEBUGGER_AUTO_OFFSET
+ (XEXP (XEXP (value, 0), 0)));
+ PUT_SDB_SCL (C_AUTO);
+ }
+
+ type = build_pointer_type (TREE_TYPE (decl));
+ }
+ else if (GET_CODE (value) == MEM
+ && ((GET_CODE (XEXP (value, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (value, 0), 0)) == REG
+ && GET_CODE (XEXP (XEXP (value, 0), 1)) == CONST_INT)
+ /* This is for variables which are at offset zero from
+ the frame pointer. This happens on the Alpha.
+ Non-frame pointer registers are excluded above. */
+ || (GET_CODE (XEXP (value, 0)) == REG)))
+ {
+ /* DECL_RTL looks like (MEM (PLUS (REG...) (CONST_INT...)))
+ or (MEM (REG...)). We want the value of that CONST_INT
+ or zero. */
+ PUT_SDB_DEF (name);
+ PUT_SDB_INT_VAL (DEBUGGER_AUTO_OFFSET (XEXP (value, 0)));
+ PUT_SDB_SCL (C_AUTO);
+ }
+ else if (GET_CODE (value) == MEM && GET_CODE (XEXP (value, 0)) == CONST)
+ {
+ /* Handle an obscure case which can arise when optimizing and
+ when there are few available registers. (This is *always*
+ the case for i386/i486 targets). The DECL_RTL looks like
+ (MEM (CONST ...)) even though this variable is a local `auto'
+ or a local `register' variable. In effect, what has happened
+ is that the reload pass has seen that all assignments and
+ references for one such a local variable can be replaced by
+ equivalent assignments and references to some static storage
+ variable, thereby avoiding the need for a register. In such
+ cases we're forced to lie to debuggers and tell them that
+ this variable was itself `static'. */
+ PUT_SDB_DEF (name);
+ PUT_SDB_VAL (XEXP (XEXP (value, 0), 0));
+ PUT_SDB_SCL (C_STAT);
+ }
+ else
+ {
+ /* It is something we don't know how to represent for SDB. */
+ return;
+ }
+ break;
+ }
+ PUT_SDB_TYPE (plain_type (type));
+ PUT_SDB_ENDEF;
+}
+
+/* Output SDB information for a top-level initialized variable
+ that has been delayed. */
+
+void
+sdbout_toplevel_data (decl)
+ tree decl;
+{
+ tree type = TREE_TYPE (decl);
+
+ if (DECL_IGNORED_P (decl))
+ return;
+
+ if (! (TREE_CODE (decl) == VAR_DECL
+ && GET_CODE (DECL_RTL (decl)) == MEM
+ && DECL_INITIAL (decl)))
+ abort ();
+
+ PUT_SDB_DEF (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)));
+ PUT_SDB_VAL (XEXP (DECL_RTL (decl), 0));
+ if (TREE_PUBLIC (decl))
+ {
+ PUT_SDB_SCL (C_EXT);
+ }
+ else
+ {
+ PUT_SDB_SCL (C_STAT);
+ }
+ PUT_SDB_TYPE (plain_type (type));
+ PUT_SDB_ENDEF;
+}
+
+#ifdef SDB_ALLOW_FORWARD_REFERENCES
+
+/* Machinery to record and output anonymous types. */
+
+static tree anonymous_types;
+
+static void
+sdbout_queue_anonymous_type (type)
+ tree type;
+{
+ anonymous_types = saveable_tree_cons (NULL_TREE, type, anonymous_types);
+}
+
+static void
+sdbout_dequeue_anonymous_types ()
+{
+ register tree types, link;
+
+ while (anonymous_types)
+ {
+ types = nreverse (anonymous_types);
+ anonymous_types = NULL_TREE;
+
+ for (link = types; link; link = TREE_CHAIN (link))
+ {
+ register tree type = TREE_VALUE (link);
+
+ if (type && ! TREE_ASM_WRITTEN (type))
+ sdbout_one_type (type);
+ }
+ }
+}
+
+#endif
+
+/* Given a chain of ..._TYPE nodes, all of which have names,
+ output definitions of those names, as typedefs. */
+
+void
+sdbout_types (types)
+ register tree types;
+{
+ register tree link;
+
+ for (link = types; link; link = TREE_CHAIN (link))
+ sdbout_one_type (link);
+
+#ifdef SDB_ALLOW_FORWARD_REFERENCES
+ sdbout_dequeue_anonymous_types ();
+#endif
+}
+
+static void
+sdbout_type (type)
+ tree type;
+{
+ if (type == error_mark_node)
+ type = integer_type_node;
+ PUT_SDB_TYPE (plain_type (type));
+}
+
+/* Output types of the fields of type TYPE, if they are structs.
+
+ Formerly did not chase through pointer types, since that could be circular.
+ They must come before TYPE, since forward refs are not allowed.
+ Now james@bigtex.cactus.org says to try them. */
+
+static void
+sdbout_field_types (type)
+ tree type;
+{
+ tree tail;
+ for (tail = TYPE_FIELDS (type); tail; tail = TREE_CHAIN (tail))
+ if (TREE_CODE (TREE_TYPE (tail)) == POINTER_TYPE)
+ sdbout_one_type (TREE_TYPE (TREE_TYPE (tail)));
+ else
+ sdbout_one_type (TREE_TYPE (tail));
+}
+
+/* Use this to put out the top level defined record and union types
+ for later reference. If this is a struct with a name, then put that
+ name out. Other unnamed structs will have .xxfake labels generated so
+ that they may be referred to later.
+ The label will be stored in the KNOWN_TYPE_TAG slot of a type.
+ It may NOT be called recursively. */
+
+static void
+sdbout_one_type (type)
+ tree type;
+{
+ text_section ();
+
+ switch (TREE_CODE (type))
+ {
+ case RECORD_TYPE:
+ case UNION_TYPE:
+ case QUAL_UNION_TYPE:
+ case ENUMERAL_TYPE:
+ type = TYPE_MAIN_VARIANT (type);
+ /* Don't output a type twice. */
+ if (TREE_ASM_WRITTEN (type))
+ /* James said test TREE_ASM_BEING_WRITTEN here. */
+ return;
+
+ /* Output nothing if type is not yet defined. */
+ if (TYPE_SIZE (type) == 0)
+ return;
+
+ TREE_ASM_WRITTEN (type) = 1;
+#if 1
+ /* This is reputed to cause trouble with the following case,
+ but perhaps checking TYPE_SIZE above will fix it. */
+
+ /* Here is a test case:
+
+ struct foo {
+ struct badstr *bbb;
+ } forwardref;
+
+ typedef struct intermediate {
+ int aaaa;
+ } intermediate_ref;
+
+ typedef struct badstr {
+ int ccccc;
+ } badtype; */
+
+#if 0
+ TREE_ASM_BEING_WRITTEN (type) = 1;
+#endif
+ /* This change, which ought to make better output,
+ used to make the COFF assembler unhappy.
+ Changes involving KNOWN_TYPE_TAG may fix the problem. */
+ /* Before really doing anything, output types we want to refer to. */
+ /* Note that in version 1 the following two lines
+ are not used if forward references are in use. */
+ if (TREE_CODE (type) != ENUMERAL_TYPE)
+ sdbout_field_types (type);
+#if 0
+ TREE_ASM_WRITTEN (type) = 1;
+#endif
+#endif
+
+ /* Output a structure type. */
+ {
+ int size = int_size_in_bytes (type);
+ int member_scl;
+ tree tem;
+ int i, n_baseclasses = 0;
+
+ /* Record the type tag, but not in its permanent place just yet. */
+ sdbout_record_type_name (type);
+
+ PUT_SDB_DEF (KNOWN_TYPE_TAG (type));
+
+ switch (TREE_CODE (type))
+ {
+ case UNION_TYPE:
+ case QUAL_UNION_TYPE:
+ PUT_SDB_SCL (C_UNTAG);
+ PUT_SDB_TYPE (T_UNION);
+ member_scl = C_MOU;
+ break;
+
+ case RECORD_TYPE:
+ PUT_SDB_SCL (C_STRTAG);
+ PUT_SDB_TYPE (T_STRUCT);
+ member_scl = C_MOS;
+ break;
+
+ case ENUMERAL_TYPE:
+ PUT_SDB_SCL (C_ENTAG);
+ PUT_SDB_TYPE (T_ENUM);
+ member_scl = C_MOE;
+ break;
+ }
+
+ PUT_SDB_SIZE (size);
+ PUT_SDB_ENDEF;
+
+ /* Print out the base class information with fields
+ named after the types they hold. */
+ if (TYPE_BINFO (type)
+ && TYPE_BINFO_BASETYPES (type))
+ n_baseclasses = TREE_VEC_LENGTH (TYPE_BINFO_BASETYPES (type));
+ for (i = 0; i < n_baseclasses; i++)
+ {
+ tree child = TREE_VEC_ELT (BINFO_BASETYPES (TYPE_BINFO (type)), i);
+ tree child_type = BINFO_TYPE (child);
+ tree child_type_name;
+ if (TYPE_NAME (child_type) == 0)
+ continue;
+ if (TREE_CODE (TYPE_NAME (child_type)) == IDENTIFIER_NODE)
+ child_type_name = TYPE_NAME (child_type);
+ else if (TREE_CODE (TYPE_NAME (child_type)) == TYPE_DECL)
+ {
+ child_type_name = DECL_NAME (TYPE_NAME (child_type));
+ if (child_type_name && template_name_p (child_type_name))
+ child_type_name
+ = DECL_ASSEMBLER_NAME (TYPE_NAME (child_type));
+ }
+ else
+ continue;
+
+ CONTIN;
+ PUT_SDB_DEF (IDENTIFIER_POINTER (child_type_name));
+ PUT_SDB_INT_VAL (TREE_INT_CST_LOW (BINFO_OFFSET (child)));
+ PUT_SDB_SCL (member_scl);
+ sdbout_type (BINFO_TYPE (child));
+ PUT_SDB_ENDEF;
+ }
+
+ /* output the individual fields */
+
+ if (TREE_CODE (type) == ENUMERAL_TYPE)
+ for (tem = TYPE_FIELDS (type); tem; tem = TREE_CHAIN (tem))
+ {
+ PUT_SDB_DEF (IDENTIFIER_POINTER (TREE_PURPOSE (tem)));
+ PUT_SDB_INT_VAL (TREE_INT_CST_LOW (TREE_VALUE (tem)));
+ PUT_SDB_SCL (C_MOE);
+ PUT_SDB_TYPE (T_MOE);
+ PUT_SDB_ENDEF;
+ }
+
+ else /* record or union type */
+ for (tem = TYPE_FIELDS (type); tem; tem = TREE_CHAIN (tem))
+ /* Output the name, type, position (in bits), size (in bits)
+ of each field. */
+
+ /* Omit here the nameless fields that are used to skip bits.
+ Also omit fields with variable size or position.
+ Also omit non FIELD_DECL nodes that GNU C++ may put here. */
+ if (TREE_CODE (tem) == FIELD_DECL
+ && DECL_NAME (tem) != 0
+ && TREE_CODE (DECL_SIZE (tem)) == INTEGER_CST
+ && TREE_CODE (DECL_FIELD_BITPOS (tem)) == INTEGER_CST)
+ {
+ char *name;
+
+ CONTIN;
+ if (DECL_LANG_SPECIFIC (tem))
+ name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (tem));
+ else
+ name = IDENTIFIER_POINTER (DECL_NAME (tem));
+ PUT_SDB_DEF (name);
+ if (DECL_BIT_FIELD_TYPE (tem))
+ {
+ PUT_SDB_INT_VAL (TREE_INT_CST_LOW (DECL_FIELD_BITPOS (tem)));
+ PUT_SDB_SCL (C_FIELD);
+ sdbout_type (DECL_BIT_FIELD_TYPE (tem));
+ PUT_SDB_SIZE (TREE_INT_CST_LOW (DECL_SIZE (tem)));
+ }
+ else
+ {
+ PUT_SDB_INT_VAL (TREE_INT_CST_LOW (DECL_FIELD_BITPOS (tem))
+ / BITS_PER_UNIT);
+ PUT_SDB_SCL (member_scl);
+ sdbout_type (TREE_TYPE (tem));
+ }
+ PUT_SDB_ENDEF;
+ }
+ /* output end of a structure,union, or enumeral definition */
+
+ PUT_SDB_PLAIN_DEF ("eos");
+ PUT_SDB_INT_VAL (size);
+ PUT_SDB_SCL (C_EOS);
+ PUT_SDB_TAG (KNOWN_TYPE_TAG (type));
+ PUT_SDB_SIZE (size);
+ PUT_SDB_ENDEF;
+ break;
+ }
+ }
+}
+
+/* The following two functions output definitions of function parameters.
+ Each parameter gets a definition locating it in the parameter list.
+ Each parameter that is a register variable gets a second definition
+ locating it in the register.
+
+ Printing or argument lists in gdb uses the definitions that
+ locate in the parameter list. But reference to the variable in
+ expressions uses preferentially the definition as a register. */
+
+/* Output definitions, referring to storage in the parmlist,
+ of all the parms in PARMS, which is a chain of PARM_DECL nodes. */
+
+static void
+sdbout_parms (parms)
+ tree parms;
+{
+ for (; parms; parms = TREE_CHAIN (parms))
+ if (DECL_NAME (parms))
+ {
+ int current_sym_value = 0;
+ char *name = IDENTIFIER_POINTER (DECL_NAME (parms));
+
+ if (name == 0 || *name == 0)
+ name = gen_fake_label ();
+
+ /* Perform any necessary register eliminations on the parameter's rtl,
+ so that the debugging output will be accurate. */
+ DECL_INCOMING_RTL (parms) =
+ eliminate_regs (DECL_INCOMING_RTL (parms), 0, NULL_RTX);
+ DECL_RTL (parms) = eliminate_regs (DECL_RTL (parms), 0, NULL_RTX);
+
+ if (PARM_PASSED_IN_MEMORY (parms))
+ {
+ rtx addr = XEXP (DECL_INCOMING_RTL (parms), 0);
+ tree type;
+
+ /* ??? Here we assume that the parm address is indexed
+ off the frame pointer or arg pointer.
+ If that is not true, we produce meaningless results,
+ but do not crash. */
+ if (GET_CODE (addr) == PLUS
+ && GET_CODE (XEXP (addr, 1)) == CONST_INT)
+ current_sym_value = INTVAL (XEXP (addr, 1));
+ else
+ current_sym_value = 0;
+
+ if (GET_CODE (DECL_RTL (parms)) == REG
+ && REGNO (DECL_RTL (parms)) >= 0
+ && REGNO (DECL_RTL (parms)) < FIRST_PSEUDO_REGISTER)
+ type = DECL_ARG_TYPE (parms);
+ else
+ {
+ int original_sym_value = current_sym_value;
+
+ /* This is the case where the parm is passed as an int or
+ double and it is converted to a char, short or float
+ and stored back in the parmlist. In this case, describe
+ the parm with the variable's declared type, and adjust
+ the address if the least significant bytes (which we are
+ using) are not the first ones. */
+#if BYTES_BIG_ENDIAN
+ if (TREE_TYPE (parms) != DECL_ARG_TYPE (parms))
+ current_sym_value +=
+ (GET_MODE_SIZE (TYPE_MODE (DECL_ARG_TYPE (parms)))
+ - GET_MODE_SIZE (GET_MODE (DECL_RTL (parms))));
+#endif
+ if (GET_CODE (DECL_RTL (parms)) == MEM
+ && GET_CODE (XEXP (DECL_RTL (parms), 0)) == PLUS
+ && (GET_CODE (XEXP (XEXP (DECL_RTL (parms), 0), 1))
+ == CONST_INT)
+ && (INTVAL (XEXP (XEXP (DECL_RTL (parms), 0), 1))
+ == current_sym_value))
+ type = TREE_TYPE (parms);
+ else
+ {
+ current_sym_value = original_sym_value;
+ type = DECL_ARG_TYPE (parms);
+ }
+ }
+
+ PUT_SDB_DEF (name);
+ PUT_SDB_INT_VAL (DEBUGGER_ARG_OFFSET (current_sym_value, addr));
+ PUT_SDB_SCL (C_ARG);
+ PUT_SDB_TYPE (plain_type (type));
+ PUT_SDB_ENDEF;
+ }
+ else if (GET_CODE (DECL_RTL (parms)) == REG)
+ {
+ rtx best_rtl;
+ /* Parm passed in registers and lives in registers or nowhere. */
+
+ /* If parm lives in a register, use that register;
+ pretend the parm was passed there. It would be more consistent
+ to describe the register where the parm was passed,
+ but in practice that register usually holds something else. */
+ if (REGNO (DECL_RTL (parms)) >= 0
+ && REGNO (DECL_RTL (parms)) < FIRST_PSEUDO_REGISTER)
+ best_rtl = DECL_RTL (parms);
+ /* If the parm lives nowhere,
+ use the register where it was passed. */
+ else
+ best_rtl = DECL_INCOMING_RTL (parms);
+
+ PUT_SDB_DEF (name);
+ PUT_SDB_INT_VAL (DBX_REGISTER_NUMBER (REGNO (best_rtl)));
+ PUT_SDB_SCL (C_REGPARM);
+ PUT_SDB_TYPE (plain_type (TREE_TYPE (parms), 0));
+ PUT_SDB_ENDEF;
+ }
+ else if (GET_CODE (DECL_RTL (parms)) == MEM
+ && XEXP (DECL_RTL (parms), 0) != const0_rtx)
+ {
+ /* Parm was passed in registers but lives on the stack. */
+
+ /* DECL_RTL looks like (MEM (PLUS (REG...) (CONST_INT...))),
+ in which case we want the value of that CONST_INT,
+ or (MEM (REG ...)) or (MEM (MEM ...)),
+ in which case we use a value of zero. */
+ if (GET_CODE (XEXP (DECL_RTL (parms), 0)) == REG
+ || GET_CODE (XEXP (DECL_RTL (parms), 0)) == MEM)
+ current_sym_value = 0;
+ else
+ current_sym_value = INTVAL (XEXP (XEXP (DECL_RTL (parms), 0), 1));
+
+ /* Again, this assumes the offset is based on the arg pointer. */
+ PUT_SDB_DEF (name);
+ PUT_SDB_INT_VAL (DEBUGGER_ARG_OFFSET (current_sym_value,
+ XEXP (DECL_RTL (parms), 0)));
+ PUT_SDB_SCL (C_ARG);
+ PUT_SDB_TYPE (plain_type (TREE_TYPE (parms), 0));
+ PUT_SDB_ENDEF;
+ }
+ }
+}
+
+/* Output definitions for the places where parms live during the function,
+ when different from where they were passed, when the parms were passed
+ in memory.
+
+ It is not useful to do this for parms passed in registers
+ that live during the function in different registers, because it is
+ impossible to look in the passed register for the passed value,
+ so we use the within-the-function register to begin with.
+
+ PARMS is a chain of PARM_DECL nodes. */
+
+static void
+sdbout_reg_parms (parms)
+ tree parms;
+{
+ for (; parms; parms = TREE_CHAIN (parms))
+ if (DECL_NAME (parms))
+ {
+ char *name = IDENTIFIER_POINTER (DECL_NAME (parms));
+
+ /* Report parms that live in registers during the function
+ but were passed in memory. */
+ if (GET_CODE (DECL_RTL (parms)) == REG
+ && REGNO (DECL_RTL (parms)) >= 0
+ && REGNO (DECL_RTL (parms)) < FIRST_PSEUDO_REGISTER
+ && PARM_PASSED_IN_MEMORY (parms))
+ {
+ if (name == 0 || *name == 0)
+ name = gen_fake_label ();
+ PUT_SDB_DEF (name);
+ PUT_SDB_INT_VAL (DBX_REGISTER_NUMBER (REGNO (DECL_RTL (parms))));
+ PUT_SDB_SCL (C_REG);
+ PUT_SDB_TYPE (plain_type (TREE_TYPE (parms), 0));
+ PUT_SDB_ENDEF;
+ }
+ /* Report parms that live in memory but not where they were passed. */
+ else if (GET_CODE (DECL_RTL (parms)) == MEM
+ && GET_CODE (XEXP (DECL_RTL (parms), 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (DECL_RTL (parms), 0), 1)) == CONST_INT
+ && PARM_PASSED_IN_MEMORY (parms)
+ && ! rtx_equal_p (DECL_RTL (parms), DECL_INCOMING_RTL (parms)))
+ {
+#if 0 /* ??? It is not clear yet what should replace this. */
+ int offset = DECL_OFFSET (parms) / BITS_PER_UNIT;
+ /* A parm declared char is really passed as an int,
+ so it occupies the least significant bytes.
+ On a big-endian machine those are not the low-numbered ones. */
+#if BYTES_BIG_ENDIAN
+ if (offset != -1 && TREE_TYPE (parms) != DECL_ARG_TYPE (parms))
+ offset += (GET_MODE_SIZE (TYPE_MODE (DECL_ARG_TYPE (parms)))
+ - GET_MODE_SIZE (GET_MODE (DECL_RTL (parms))));
+#endif
+ if (INTVAL (XEXP (XEXP (DECL_RTL (parms), 0), 1)) != offset) {...}
+#endif
+ {
+ if (name == 0 || *name == 0)
+ name = gen_fake_label ();
+ PUT_SDB_DEF (name);
+ PUT_SDB_INT_VAL (DEBUGGER_AUTO_OFFSET
+ (XEXP (DECL_RTL (parms), 0)));
+ PUT_SDB_SCL (C_AUTO);
+ PUT_SDB_TYPE (plain_type (TREE_TYPE (parms)));
+ PUT_SDB_ENDEF;
+ }
+ }
+ }
+}
+
+/* Describe the beginning of an internal block within a function.
+ Also output descriptions of variables defined in this block.
+
+ N is the number of the block, by order of beginning, counting from 1,
+ and not counting the outermost (function top-level) block.
+ The blocks match the BLOCKs in DECL_INITIAL (current_function_decl),
+ if the count starts at 0 for the outermost one. */
+
+void
+sdbout_begin_block (file, line, n)
+ FILE *file;
+ int line;
+ int n;
+{
+ tree decl = current_function_decl;
+ MAKE_LINE_SAFE (line);
+
+ /* The SCO compiler does not emit a separate block for the function level
+ scope, so we avoid it here also. However, mips ECOFF compilers do emit
+ a separate block, so we retain it when MIPS_DEBUGGING_INFO is defined. */
+#ifndef MIPS_DEBUGGING_INFO
+ if (n != 1)
+#endif
+ PUT_SDB_BLOCK_START (line - sdb_begin_function_line);
+
+ if (n == 1)
+ {
+ /* Include the outermost BLOCK's variables in block 1. */
+ next_block_number = 0;
+ do_block = 0;
+ sdbout_block (DECL_INITIAL (decl));
+ }
+ /* If -g1, suppress all the internal symbols of functions
+ except for arguments. */
+ if (debug_info_level != DINFO_LEVEL_TERSE)
+ {
+ next_block_number = 0;
+ do_block = n;
+ sdbout_block (DECL_INITIAL (decl));
+ }
+
+#ifdef SDB_ALLOW_FORWARD_REFERENCES
+ sdbout_dequeue_anonymous_types ();
+#endif
+}
+
+/* Describe the end line-number of an internal block within a function. */
+
+void
+sdbout_end_block (file, line, n)
+ FILE *file;
+ int line;
+ int n;
+{
+ MAKE_LINE_SAFE (line);
+
+ /* The SCO compiler does not emit a separate block for the function level
+ scope, so we avoid it here also. However, mips ECOFF compilers do emit
+ a separate block, so we retain it when MIPS_DEBUGGING_INFO is defined. */
+#ifndef MIPS_DEBUGGING_INFO
+ if (n != 1)
+#endif
+ PUT_SDB_BLOCK_END (line - sdb_begin_function_line);
+}
+
+/* Output sdb info for the current function name.
+ Called from assemble_start_function. */
+
+void
+sdbout_mark_begin_function ()
+{
+ sdbout_symbol (current_function_decl, 0);
+}
+
+/* Called at beginning of function body (after prologue).
+ Record the function's starting line number, so we can output
+ relative line numbers for the other lines.
+ Describe beginning of outermost block.
+ Also describe the parameter list. */
+
+void
+sdbout_begin_function (line)
+ int line;
+{
+ sdb_begin_function_line = line - 1;
+ PUT_SDB_FUNCTION_START (line);
+ sdbout_parms (DECL_ARGUMENTS (current_function_decl));
+ sdbout_reg_parms (DECL_ARGUMENTS (current_function_decl));
+}
+
+/* Called at end of function (before epilogue).
+ Describe end of outermost block. */
+
+void
+sdbout_end_function (line)
+ int line;
+{
+#ifdef SDB_ALLOW_FORWARD_REFERENCES
+ sdbout_dequeue_anonymous_types ();
+#endif
+
+ MAKE_LINE_SAFE (line);
+ PUT_SDB_FUNCTION_END (line - sdb_begin_function_line);
+
+ /* Indicate we are between functions, for line-number output. */
+ sdb_begin_function_line = -1;
+}
+
+/* Output sdb info for the absolute end of a function.
+ Called after the epilogue is output. */
+
+void
+sdbout_end_epilogue ()
+{
+ char *name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (current_function_decl));
+ PUT_SDB_EPILOGUE_END (name);
+}
+
+/* Output sdb info for the given label. Called only if LABEL_NAME (insn)
+ is present. */
+
+void
+sdbout_label (insn)
+ register rtx insn;
+{
+ PUT_SDB_DEF (LABEL_NAME (insn));
+ PUT_SDB_VAL (insn);
+ PUT_SDB_SCL (C_LABEL);
+ PUT_SDB_TYPE (T_NULL);
+ PUT_SDB_ENDEF;
+}
+
+#endif /* SDB_DEBUGGING_INFO */
diff --git a/gnu/usr.bin/cc/cc_int/stmt.c b/gnu/usr.bin/cc/cc_int/stmt.c
new file mode 100644
index 0000000..4069829
--- /dev/null
+++ b/gnu/usr.bin/cc/cc_int/stmt.c
@@ -0,0 +1,5431 @@
+/* Expands front end tree to back end RTL for GNU C-Compiler
+ Copyright (C) 1987, 88, 89, 92, 93, 1994 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+/* This file handles the generation of rtl code from tree structure
+ above the level of expressions, using subroutines in exp*.c and emit-rtl.c.
+ It also creates the rtl expressions for parameters and auto variables
+ and has full responsibility for allocating stack slots.
+
+ The functions whose names start with `expand_' are called by the
+ parser to generate RTL instructions for various kinds of constructs.
+
+ Some control and binding constructs require calling several such
+ functions at different times. For example, a simple if-then
+ is expanded by calling `expand_start_cond' (with the condition-expression
+ as argument) before parsing the then-clause and calling `expand_end_cond'
+ after parsing the then-clause. */
+
+#include "config.h"
+
+#include <stdio.h>
+#include <ctype.h>
+
+#include "rtl.h"
+#include "tree.h"
+#include "flags.h"
+#include "function.h"
+#include "insn-flags.h"
+#include "insn-config.h"
+#include "insn-codes.h"
+#include "expr.h"
+#include "hard-reg-set.h"
+#include "obstack.h"
+#include "loop.h"
+#include "recog.h"
+#include "machmode.h"
+
+#include "bytecode.h"
+#include "bc-typecd.h"
+#include "bc-opcode.h"
+#include "bc-optab.h"
+#include "bc-emit.h"
+
+#define obstack_chunk_alloc xmalloc
+#define obstack_chunk_free free
+struct obstack stmt_obstack;
+
+/* Filename and line number of last line-number note,
+ whether we actually emitted it or not. */
+char *emit_filename;
+int emit_lineno;
+
+/* Nonzero if within a ({...}) grouping, in which case we must
+ always compute a value for each expr-stmt in case it is the last one. */
+
+int expr_stmts_for_value;
+
+/* Each time we expand an expression-statement,
+ record the expr's type and its RTL value here. */
+
+static tree last_expr_type;
+static rtx last_expr_value;
+
+/* Each time we expand the end of a binding contour (in `expand_end_bindings')
+ and we emit a new NOTE_INSN_BLOCK_END note, we save a pointer to it here.
+ This is used by the `remember_end_note' function to record the endpoint
+ of each generated block in its associated BLOCK node. */
+
+static rtx last_block_end_note;
+
+/* Number of binding contours started so far in this function. */
+
+int block_start_count;
+
+/* Nonzero if function being compiled needs to
+ return the address of where it has put a structure value. */
+
+extern int current_function_returns_pcc_struct;
+
+/* Label that will go on parm cleanup code, if any.
+ Jumping to this label runs cleanup code for parameters, if
+ such code must be run. Following this code is the logical return label. */
+
+extern rtx cleanup_label;
+
+/* Label that will go on function epilogue.
+ Jumping to this label serves as a "return" instruction
+ on machines which require execution of the epilogue on all returns. */
+
+extern rtx return_label;
+
+/* List (chain of EXPR_LISTs) of pseudo-regs of SAVE_EXPRs.
+ So we can mark them all live at the end of the function, if nonopt. */
+extern rtx save_expr_regs;
+
+/* Offset to end of allocated area of stack frame.
+ If stack grows down, this is the address of the last stack slot allocated.
+ If stack grows up, this is the address for the next slot. */
+extern int frame_offset;
+
+/* Label to jump back to for tail recursion, or 0 if we have
+ not yet needed one for this function. */
+extern rtx tail_recursion_label;
+
+/* Place after which to insert the tail_recursion_label if we need one. */
+extern rtx tail_recursion_reentry;
+
+/* Location at which to save the argument pointer if it will need to be
+ referenced. There are two cases where this is done: if nonlocal gotos
+ exist, or if vars whose is an offset from the argument pointer will be
+ needed by inner routines. */
+
+extern rtx arg_pointer_save_area;
+
+/* Chain of all RTL_EXPRs that have insns in them. */
+extern tree rtl_expr_chain;
+
+#if 0 /* Turned off because 0 seems to work just as well. */
+/* Cleanup lists are required for binding levels regardless of whether
+ that binding level has cleanups or not. This node serves as the
+ cleanup list whenever an empty list is required. */
+static tree empty_cleanup_list;
+#endif
+
+extern void (*interim_eh_hook) PROTO((tree));
+
+/* Functions and data structures for expanding case statements. */
+
+/* Case label structure, used to hold info on labels within case
+ statements. We handle "range" labels; for a single-value label
+ as in C, the high and low limits are the same.
+
+ A chain of case nodes is initially maintained via the RIGHT fields
+ in the nodes. Nodes with higher case values are later in the list.
+
+ Switch statements can be output in one of two forms. A branch table
+ is used if there are more than a few labels and the labels are dense
+ within the range between the smallest and largest case value. If a
+ branch table is used, no further manipulations are done with the case
+ node chain.
+
+ The alternative to the use of a branch table is to generate a series
+ of compare and jump insns. When that is done, we use the LEFT, RIGHT,
+ and PARENT fields to hold a binary tree. Initially the tree is
+ totally unbalanced, with everything on the right. We balance the tree
+ with nodes on the left having lower case values than the parent
+ and nodes on the right having higher values. We then output the tree
+ in order. */
+
+struct case_node
+{
+ struct case_node *left; /* Left son in binary tree */
+ struct case_node *right; /* Right son in binary tree; also node chain */
+ struct case_node *parent; /* Parent of node in binary tree */
+ tree low; /* Lowest index value for this label */
+ tree high; /* Highest index value for this label */
+ tree code_label; /* Label to jump to when node matches */
+};
+
+typedef struct case_node case_node;
+typedef struct case_node *case_node_ptr;
+
+/* These are used by estimate_case_costs and balance_case_nodes. */
+
+/* This must be a signed type, and non-ANSI compilers lack signed char. */
+static short *cost_table;
+static int use_cost_table;
+
+/* Stack of control and binding constructs we are currently inside.
+
+ These constructs begin when you call `expand_start_WHATEVER'
+ and end when you call `expand_end_WHATEVER'. This stack records
+ info about how the construct began that tells the end-function
+ what to do. It also may provide information about the construct
+ to alter the behavior of other constructs within the body.
+ For example, they may affect the behavior of C `break' and `continue'.
+
+ Each construct gets one `struct nesting' object.
+ All of these objects are chained through the `all' field.
+ `nesting_stack' points to the first object (innermost construct).
+ The position of an entry on `nesting_stack' is in its `depth' field.
+
+ Each type of construct has its own individual stack.
+ For example, loops have `loop_stack'. Each object points to the
+ next object of the same type through the `next' field.
+
+ Some constructs are visible to `break' exit-statements and others
+ are not. Which constructs are visible depends on the language.
+ Therefore, the data structure allows each construct to be visible
+ or not, according to the args given when the construct is started.
+ The construct is visible if the `exit_label' field is non-null.
+ In that case, the value should be a CODE_LABEL rtx. */
+
+struct nesting
+{
+ struct nesting *all;
+ struct nesting *next;
+ int depth;
+ rtx exit_label;
+ union
+ {
+ /* For conds (if-then and if-then-else statements). */
+ struct
+ {
+ /* Label for the end of the if construct.
+ There is none if EXITFLAG was not set
+ and no `else' has been seen yet. */
+ rtx endif_label;
+ /* Label for the end of this alternative.
+ This may be the end of the if or the next else/elseif. */
+ rtx next_label;
+ } cond;
+ /* For loops. */
+ struct
+ {
+ /* Label at the top of the loop; place to loop back to. */
+ rtx start_label;
+ /* Label at the end of the whole construct. */
+ rtx end_label;
+ /* Label before a jump that branches to the end of the whole
+ construct. This is where destructors go if any. */
+ rtx alt_end_label;
+ /* Label for `continue' statement to jump to;
+ this is in front of the stepper of the loop. */
+ rtx continue_label;
+ } loop;
+ /* For variable binding contours. */
+ struct
+ {
+ /* Sequence number of this binding contour within the function,
+ in order of entry. */
+ int block_start_count;
+ /* Nonzero => value to restore stack to on exit. Complemented by
+ bc_stack_level (see below) when generating bytecodes. */
+ rtx stack_level;
+ /* The NOTE that starts this contour.
+ Used by expand_goto to check whether the destination
+ is within each contour or not. */
+ rtx first_insn;
+ /* Innermost containing binding contour that has a stack level. */
+ struct nesting *innermost_stack_block;
+ /* List of cleanups to be run on exit from this contour.
+ This is a list of expressions to be evaluated.
+ The TREE_PURPOSE of each link is the ..._DECL node
+ which the cleanup pertains to. */
+ tree cleanups;
+ /* List of cleanup-lists of blocks containing this block,
+ as they were at the locus where this block appears.
+ There is an element for each containing block,
+ ordered innermost containing block first.
+ The tail of this list can be 0 (was empty_cleanup_list),
+ if all remaining elements would be empty lists.
+ The element's TREE_VALUE is the cleanup-list of that block,
+ which may be null. */
+ tree outer_cleanups;
+ /* Chain of labels defined inside this binding contour.
+ For contours that have stack levels or cleanups. */
+ struct label_chain *label_chain;
+ /* Number of function calls seen, as of start of this block. */
+ int function_call_count;
+ /* Bytecode specific: stack level to restore stack to on exit. */
+ int bc_stack_level;
+ } block;
+ /* For switch (C) or case (Pascal) statements,
+ and also for dummies (see `expand_start_case_dummy'). */
+ struct
+ {
+ /* The insn after which the case dispatch should finally
+ be emitted. Zero for a dummy. */
+ rtx start;
+ /* For bytecodes, the case table is in-lined right in the code.
+ A label is needed for skipping over this block. It is only
+ used when generating bytecodes. */
+ rtx skip_label;
+ /* A list of case labels, kept in ascending order by value
+ as the list is built.
+ During expand_end_case, this list may be rearranged into a
+ nearly balanced binary tree. */
+ struct case_node *case_list;
+ /* Label to jump to if no case matches. */
+ tree default_label;
+ /* The expression to be dispatched on. */
+ tree index_expr;
+ /* Type that INDEX_EXPR should be converted to. */
+ tree nominal_type;
+ /* Number of range exprs in case statement. */
+ int num_ranges;
+ /* Name of this kind of statement, for warnings. */
+ char *printname;
+ /* Nonzero if a case label has been seen in this case stmt. */
+ char seenlabel;
+ } case_stmt;
+ } data;
+};
+
+/* Chain of all pending binding contours. */
+struct nesting *block_stack;
+
+/* If any new stacks are added here, add them to POPSTACKS too. */
+
+/* Chain of all pending binding contours that restore stack levels
+ or have cleanups. */
+struct nesting *stack_block_stack;
+
+/* Chain of all pending conditional statements. */
+struct nesting *cond_stack;
+
+/* Chain of all pending loops. */
+struct nesting *loop_stack;
+
+/* Chain of all pending case or switch statements. */
+struct nesting *case_stack;
+
+/* Separate chain including all of the above,
+ chained through the `all' field. */
+struct nesting *nesting_stack;
+
+/* Number of entries on nesting_stack now. */
+int nesting_depth;
+
+/* Allocate and return a new `struct nesting'. */
+
+#define ALLOC_NESTING() \
+ (struct nesting *) obstack_alloc (&stmt_obstack, sizeof (struct nesting))
+
+/* Pop the nesting stack element by element until we pop off
+ the element which is at the top of STACK.
+ Update all the other stacks, popping off elements from them
+ as we pop them from nesting_stack. */
+
+#define POPSTACK(STACK) \
+do { struct nesting *target = STACK; \
+ struct nesting *this; \
+ do { this = nesting_stack; \
+ if (loop_stack == this) \
+ loop_stack = loop_stack->next; \
+ if (cond_stack == this) \
+ cond_stack = cond_stack->next; \
+ if (block_stack == this) \
+ block_stack = block_stack->next; \
+ if (stack_block_stack == this) \
+ stack_block_stack = stack_block_stack->next; \
+ if (case_stack == this) \
+ case_stack = case_stack->next; \
+ nesting_depth = nesting_stack->depth - 1; \
+ nesting_stack = this->all; \
+ obstack_free (&stmt_obstack, this); } \
+ while (this != target); } while (0)
+
+/* In some cases it is impossible to generate code for a forward goto
+ until the label definition is seen. This happens when it may be necessary
+ for the goto to reset the stack pointer: we don't yet know how to do that.
+ So expand_goto puts an entry on this fixup list.
+ Each time a binding contour that resets the stack is exited,
+ we check each fixup.
+ If the target label has now been defined, we can insert the proper code. */
+
+struct goto_fixup
+{
+ /* Points to following fixup. */
+ struct goto_fixup *next;
+ /* Points to the insn before the jump insn.
+ If more code must be inserted, it goes after this insn. */
+ rtx before_jump;
+ /* The LABEL_DECL that this jump is jumping to, or 0
+ for break, continue or return. */
+ tree target;
+ /* The BLOCK for the place where this goto was found. */
+ tree context;
+ /* The CODE_LABEL rtx that this is jumping to. */
+ rtx target_rtl;
+ /* Number of binding contours started in current function
+ before the label reference. */
+ int block_start_count;
+ /* The outermost stack level that should be restored for this jump.
+ Each time a binding contour that resets the stack is exited,
+ if the target label is *not* yet defined, this slot is updated. */
+ rtx stack_level;
+ /* List of lists of cleanup expressions to be run by this goto.
+ There is one element for each block that this goto is within.
+ The tail of this list can be 0 (was empty_cleanup_list),
+ if all remaining elements would be empty.
+ The TREE_VALUE contains the cleanup list of that block as of the
+ time this goto was seen.
+ The TREE_ADDRESSABLE flag is 1 for a block that has been exited. */
+ tree cleanup_list_list;
+
+ /* Bytecode specific members follow */
+
+ /* The label that this jump is jumping to, or 0 for break, continue
+ or return. */
+ struct bc_label *bc_target;
+
+ /* The label we use for the fixup patch */
+ struct bc_label *label;
+
+ /* True (non-0) if fixup has been handled */
+ int bc_handled:1;
+
+ /* Like stack_level above, except refers to the interpreter stack */
+ int bc_stack_level;
+};
+
+static struct goto_fixup *goto_fixup_chain;
+
+/* Within any binding contour that must restore a stack level,
+ all labels are recorded with a chain of these structures. */
+
+struct label_chain
+{
+ /* Points to following fixup. */
+ struct label_chain *next;
+ tree label;
+};
+static void expand_goto_internal PROTO((tree, rtx, rtx));
+static void bc_expand_goto_internal PROTO((enum bytecode_opcode,
+ struct bc_label *, tree));
+static int expand_fixup PROTO((tree, rtx, rtx));
+static void bc_expand_fixup PROTO((enum bytecode_opcode,
+ struct bc_label *, int));
+static void fixup_gotos PROTO((struct nesting *, rtx, tree,
+ rtx, int));
+static void bc_fixup_gotos PROTO((struct nesting *, int, tree,
+ rtx, int));
+static int warn_if_unused_value PROTO((tree));
+static void bc_expand_start_cond PROTO((tree, int));
+static void bc_expand_end_cond PROTO((void));
+static void bc_expand_start_else PROTO((void));
+static void bc_expand_end_loop PROTO((void));
+static void bc_expand_end_bindings PROTO((tree, int, int));
+static void bc_expand_decl PROTO((tree, tree));
+static void bc_expand_variable_local_init PROTO((tree));
+static void bc_expand_decl_init PROTO((tree));
+static void expand_null_return_1 PROTO((rtx, int));
+static int tail_recursion_args PROTO((tree, tree));
+static void expand_cleanups PROTO((tree, tree));
+static void bc_expand_start_case PROTO((struct nesting *, tree,
+ tree, char *));
+static int bc_pushcase PROTO((tree, tree));
+static void bc_check_for_full_enumeration_handling PROTO((tree));
+static void bc_expand_end_case PROTO((tree));
+static void do_jump_if_equal PROTO((rtx, rtx, rtx, int));
+static int estimate_case_costs PROTO((case_node_ptr));
+static void group_case_nodes PROTO((case_node_ptr));
+static void balance_case_nodes PROTO((case_node_ptr *,
+ case_node_ptr));
+static int node_has_low_bound PROTO((case_node_ptr, tree));
+static int node_has_high_bound PROTO((case_node_ptr, tree));
+static int node_is_bounded PROTO((case_node_ptr, tree));
+static void emit_jump_if_reachable PROTO((rtx));
+static void emit_case_nodes PROTO((rtx, case_node_ptr, rtx, tree));
+
+int bc_expand_exit_loop_if_false ();
+void bc_expand_start_cond ();
+void bc_expand_end_cond ();
+void bc_expand_start_else ();
+void bc_expand_end_bindings ();
+void bc_expand_start_case ();
+void bc_check_for_full_enumeration_handling ();
+void bc_expand_end_case ();
+void bc_expand_decl ();
+
+extern rtx bc_allocate_local ();
+extern rtx bc_allocate_variable_array ();
+
+void
+init_stmt ()
+{
+ gcc_obstack_init (&stmt_obstack);
+#if 0
+ empty_cleanup_list = build_tree_list (NULL_TREE, NULL_TREE);
+#endif
+}
+
+void
+init_stmt_for_function ()
+{
+ /* We are not currently within any block, conditional, loop or case. */
+ block_stack = 0;
+ stack_block_stack = 0;
+ loop_stack = 0;
+ case_stack = 0;
+ cond_stack = 0;
+ nesting_stack = 0;
+ nesting_depth = 0;
+
+ block_start_count = 0;
+
+ /* No gotos have been expanded yet. */
+ goto_fixup_chain = 0;
+
+ /* We are not processing a ({...}) grouping. */
+ expr_stmts_for_value = 0;
+ last_expr_type = 0;
+}
+
+void
+save_stmt_status (p)
+ struct function *p;
+{
+ p->block_stack = block_stack;
+ p->stack_block_stack = stack_block_stack;
+ p->cond_stack = cond_stack;
+ p->loop_stack = loop_stack;
+ p->case_stack = case_stack;
+ p->nesting_stack = nesting_stack;
+ p->nesting_depth = nesting_depth;
+ p->block_start_count = block_start_count;
+ p->last_expr_type = last_expr_type;
+ p->last_expr_value = last_expr_value;
+ p->expr_stmts_for_value = expr_stmts_for_value;
+ p->emit_filename = emit_filename;
+ p->emit_lineno = emit_lineno;
+ p->goto_fixup_chain = goto_fixup_chain;
+}
+
+void
+restore_stmt_status (p)
+ struct function *p;
+{
+ block_stack = p->block_stack;
+ stack_block_stack = p->stack_block_stack;
+ cond_stack = p->cond_stack;
+ loop_stack = p->loop_stack;
+ case_stack = p->case_stack;
+ nesting_stack = p->nesting_stack;
+ nesting_depth = p->nesting_depth;
+ block_start_count = p->block_start_count;
+ last_expr_type = p->last_expr_type;
+ last_expr_value = p->last_expr_value;
+ expr_stmts_for_value = p->expr_stmts_for_value;
+ emit_filename = p->emit_filename;
+ emit_lineno = p->emit_lineno;
+ goto_fixup_chain = p->goto_fixup_chain;
+}
+
+/* Emit a no-op instruction. */
+
+void
+emit_nop ()
+{
+ rtx last_insn;
+
+ if (!output_bytecode)
+ {
+ last_insn = get_last_insn ();
+ if (!optimize
+ && (GET_CODE (last_insn) == CODE_LABEL
+ || prev_real_insn (last_insn) == 0))
+ emit_insn (gen_nop ());
+ }
+}
+
+/* Return the rtx-label that corresponds to a LABEL_DECL,
+ creating it if necessary. */
+
+rtx
+label_rtx (label)
+ tree label;
+{
+ if (TREE_CODE (label) != LABEL_DECL)
+ abort ();
+
+ if (DECL_RTL (label))
+ return DECL_RTL (label);
+
+ return DECL_RTL (label) = gen_label_rtx ();
+}
+
+/* Add an unconditional jump to LABEL as the next sequential instruction. */
+
+void
+emit_jump (label)
+ rtx label;
+{
+ do_pending_stack_adjust ();
+ emit_jump_insn (gen_jump (label));
+ emit_barrier ();
+}
+
+/* Emit code to jump to the address
+ specified by the pointer expression EXP. */
+
+void
+expand_computed_goto (exp)
+ tree exp;
+{
+ if (output_bytecode)
+ {
+ bc_expand_expr (exp);
+ bc_emit_instruction (jumpP);
+ }
+ else
+ {
+ rtx x = expand_expr (exp, NULL_RTX, VOIDmode, 0);
+ emit_queue ();
+ emit_indirect_jump (x);
+ }
+}
+
+/* Handle goto statements and the labels that they can go to. */
+
+/* Specify the location in the RTL code of a label LABEL,
+ which is a LABEL_DECL tree node.
+
+ This is used for the kind of label that the user can jump to with a
+ goto statement, and for alternatives of a switch or case statement.
+ RTL labels generated for loops and conditionals don't go through here;
+ they are generated directly at the RTL level, by other functions below.
+
+ Note that this has nothing to do with defining label *names*.
+ Languages vary in how they do that and what that even means. */
+
+void
+expand_label (label)
+ tree label;
+{
+ struct label_chain *p;
+
+ if (output_bytecode)
+ {
+ if (! DECL_RTL (label))
+ DECL_RTL (label) = bc_gen_rtx ((char *) 0, 0, bc_get_bytecode_label ());
+ if (! bc_emit_bytecode_labeldef (BYTECODE_BC_LABEL (DECL_RTL (label))))
+ error ("multiply defined label");
+ return;
+ }
+
+ do_pending_stack_adjust ();
+ emit_label (label_rtx (label));
+ if (DECL_NAME (label))
+ LABEL_NAME (DECL_RTL (label)) = IDENTIFIER_POINTER (DECL_NAME (label));
+
+ if (stack_block_stack != 0)
+ {
+ p = (struct label_chain *) oballoc (sizeof (struct label_chain));
+ p->next = stack_block_stack->data.block.label_chain;
+ stack_block_stack->data.block.label_chain = p;
+ p->label = label;
+ }
+}
+
+/* Declare that LABEL (a LABEL_DECL) may be used for nonlocal gotos
+ from nested functions. */
+
+void
+declare_nonlocal_label (label)
+ tree label;
+{
+ nonlocal_labels = tree_cons (NULL_TREE, label, nonlocal_labels);
+ LABEL_PRESERVE_P (label_rtx (label)) = 1;
+ if (nonlocal_goto_handler_slot == 0)
+ {
+ nonlocal_goto_handler_slot
+ = assign_stack_local (Pmode, GET_MODE_SIZE (Pmode), 0);
+ emit_stack_save (SAVE_NONLOCAL,
+ &nonlocal_goto_stack_level,
+ PREV_INSN (tail_recursion_reentry));
+ }
+}
+
+/* Generate RTL code for a `goto' statement with target label LABEL.
+ LABEL should be a LABEL_DECL tree node that was or will later be
+ defined with `expand_label'. */
+
+void
+expand_goto (label)
+ tree label;
+{
+ tree context;
+
+ if (output_bytecode)
+ {
+ expand_goto_internal (label, label_rtx (label), NULL_RTX);
+ return;
+ }
+
+ /* Check for a nonlocal goto to a containing function. */
+ context = decl_function_context (label);
+ if (context != 0 && context != current_function_decl)
+ {
+ struct function *p = find_function_data (context);
+ rtx label_ref = gen_rtx (LABEL_REF, Pmode, label_rtx (label));
+ rtx temp;
+
+ p->has_nonlocal_label = 1;
+ current_function_has_nonlocal_goto = 1;
+ LABEL_REF_NONLOCAL_P (label_ref) = 1;
+
+ /* Copy the rtl for the slots so that they won't be shared in
+ case the virtual stack vars register gets instantiated differently
+ in the parent than in the child. */
+
+#if HAVE_nonlocal_goto
+ if (HAVE_nonlocal_goto)
+ emit_insn (gen_nonlocal_goto (lookup_static_chain (label),
+ copy_rtx (p->nonlocal_goto_handler_slot),
+ copy_rtx (p->nonlocal_goto_stack_level),
+ label_ref));
+ else
+#endif
+ {
+ rtx addr;
+
+ /* Restore frame pointer for containing function.
+ This sets the actual hard register used for the frame pointer
+ to the location of the function's incoming static chain info.
+ The non-local goto handler will then adjust it to contain the
+ proper value and reload the argument pointer, if needed. */
+ emit_move_insn (hard_frame_pointer_rtx, lookup_static_chain (label));
+
+ /* We have now loaded the frame pointer hardware register with
+ the address of that corresponds to the start of the virtual
+ stack vars. So replace virtual_stack_vars_rtx in all
+ addresses we use with stack_pointer_rtx. */
+
+ /* Get addr of containing function's current nonlocal goto handler,
+ which will do any cleanups and then jump to the label. */
+ addr = copy_rtx (p->nonlocal_goto_handler_slot);
+ temp = copy_to_reg (replace_rtx (addr, virtual_stack_vars_rtx,
+ hard_frame_pointer_rtx));
+
+ /* Restore the stack pointer. Note this uses fp just restored. */
+ addr = p->nonlocal_goto_stack_level;
+ if (addr)
+ addr = replace_rtx (copy_rtx (addr),
+ virtual_stack_vars_rtx,
+ hard_frame_pointer_rtx);
+
+ emit_stack_restore (SAVE_NONLOCAL, addr, NULL_RTX);
+
+ /* Put in the static chain register the nonlocal label address. */
+ emit_move_insn (static_chain_rtx, label_ref);
+ /* USE of hard_frame_pointer_rtx added for consistency; not clear if
+ really needed. */
+ emit_insn (gen_rtx (USE, VOIDmode, hard_frame_pointer_rtx));
+ emit_insn (gen_rtx (USE, VOIDmode, stack_pointer_rtx));
+ emit_insn (gen_rtx (USE, VOIDmode, static_chain_rtx));
+ emit_indirect_jump (temp);
+ }
+ }
+ else
+ expand_goto_internal (label, label_rtx (label), NULL_RTX);
+}
+
+/* Generate RTL code for a `goto' statement with target label BODY.
+ LABEL should be a LABEL_REF.
+ LAST_INSN, if non-0, is the rtx we should consider as the last
+ insn emitted (for the purposes of cleaning up a return). */
+
+static void
+expand_goto_internal (body, label, last_insn)
+ tree body;
+ rtx label;
+ rtx last_insn;
+{
+ struct nesting *block;
+ rtx stack_level = 0;
+
+ /* NOTICE! If a bytecode instruction other than `jump' is needed,
+ then the caller has to call bc_expand_goto_internal()
+ directly. This is rather an exceptional case, and there aren't
+ that many places where this is necessary. */
+ if (output_bytecode)
+ {
+ expand_goto_internal (body, label, last_insn);
+ return;
+ }
+
+ if (GET_CODE (label) != CODE_LABEL)
+ abort ();
+
+ /* If label has already been defined, we can tell now
+ whether and how we must alter the stack level. */
+
+ if (PREV_INSN (label) != 0)
+ {
+ /* Find the innermost pending block that contains the label.
+ (Check containment by comparing insn-uids.)
+ Then restore the outermost stack level within that block,
+ and do cleanups of all blocks contained in it. */
+ for (block = block_stack; block; block = block->next)
+ {
+ if (INSN_UID (block->data.block.first_insn) < INSN_UID (label))
+ break;
+ if (block->data.block.stack_level != 0)
+ stack_level = block->data.block.stack_level;
+ /* Execute the cleanups for blocks we are exiting. */
+ if (block->data.block.cleanups != 0)
+ {
+ expand_cleanups (block->data.block.cleanups, NULL_TREE);
+ do_pending_stack_adjust ();
+ }
+ }
+
+ if (stack_level)
+ {
+ /* Ensure stack adjust isn't done by emit_jump, as this would clobber
+ the stack pointer. This one should be deleted as dead by flow. */
+ clear_pending_stack_adjust ();
+ do_pending_stack_adjust ();
+ emit_stack_restore (SAVE_BLOCK, stack_level, NULL_RTX);
+ }
+
+ if (body != 0 && DECL_TOO_LATE (body))
+ error ("jump to `%s' invalidly jumps into binding contour",
+ IDENTIFIER_POINTER (DECL_NAME (body)));
+ }
+ /* Label not yet defined: may need to put this goto
+ on the fixup list. */
+ else if (! expand_fixup (body, label, last_insn))
+ {
+ /* No fixup needed. Record that the label is the target
+ of at least one goto that has no fixup. */
+ if (body != 0)
+ TREE_ADDRESSABLE (body) = 1;
+ }
+
+ emit_jump (label);
+}
+
+/* Generate a jump with OPCODE to the given bytecode LABEL which is
+ found within BODY. */
+
+static void
+bc_expand_goto_internal (opcode, label, body)
+ enum bytecode_opcode opcode;
+ struct bc_label *label;
+ tree body;
+{
+ struct nesting *block;
+ int stack_level = -1;
+
+ /* If the label is defined, adjust the stack as necessary.
+ If it's not defined, we have to push the reference on the
+ fixup list. */
+
+ if (label->defined)
+ {
+
+ /* Find the innermost pending block that contains the label.
+ (Check containment by comparing bytecode uids.) Then restore the
+ outermost stack level within that block. */
+
+ for (block = block_stack; block; block = block->next)
+ {
+ if (BYTECODE_BC_LABEL (block->data.block.first_insn)->uid < label->uid)
+ break;
+ if (block->data.block.bc_stack_level)
+ stack_level = block->data.block.bc_stack_level;
+
+ /* Execute the cleanups for blocks we are exiting. */
+ if (block->data.block.cleanups != 0)
+ {
+ expand_cleanups (block->data.block.cleanups, NULL_TREE);
+ do_pending_stack_adjust ();
+ }
+ }
+
+ /* Restore the stack level. If we need to adjust the stack, we
+ must do so after the jump, since the jump may depend on
+ what's on the stack. Thus, any stack-modifying conditional
+ jumps (these are the only ones that rely on what's on the
+ stack) go into the fixup list. */
+
+ if (stack_level >= 0
+ && stack_depth != stack_level
+ && opcode != jump)
+
+ bc_expand_fixup (opcode, label, stack_level);
+ else
+ {
+ if (stack_level >= 0)
+ bc_adjust_stack (stack_depth - stack_level);
+
+ if (body && DECL_BIT_FIELD (body))
+ error ("jump to `%s' invalidly jumps into binding contour",
+ IDENTIFIER_POINTER (DECL_NAME (body)));
+
+ /* Emit immediate jump */
+ bc_emit_bytecode (opcode);
+ bc_emit_bytecode_labelref (label);
+
+#ifdef DEBUG_PRINT_CODE
+ fputc ('\n', stderr);
+#endif
+ }
+ }
+ else
+ /* Put goto in the fixup list */
+ bc_expand_fixup (opcode, label, stack_level);
+}
+
+/* Generate if necessary a fixup for a goto
+ whose target label in tree structure (if any) is TREE_LABEL
+ and whose target in rtl is RTL_LABEL.
+
+ If LAST_INSN is nonzero, we pretend that the jump appears
+ after insn LAST_INSN instead of at the current point in the insn stream.
+
+ The fixup will be used later to insert insns just before the goto.
+ Those insns will restore the stack level as appropriate for the
+ target label, and will (in the case of C++) also invoke any object
+ destructors which have to be invoked when we exit the scopes which
+ are exited by the goto.
+
+ Value is nonzero if a fixup is made. */
+
+static int
+expand_fixup (tree_label, rtl_label, last_insn)
+ tree tree_label;
+ rtx rtl_label;
+ rtx last_insn;
+{
+ struct nesting *block, *end_block;
+
+ /* See if we can recognize which block the label will be output in.
+ This is possible in some very common cases.
+ If we succeed, set END_BLOCK to that block.
+ Otherwise, set it to 0. */
+
+ if (cond_stack
+ && (rtl_label == cond_stack->data.cond.endif_label
+ || rtl_label == cond_stack->data.cond.next_label))
+ end_block = cond_stack;
+ /* If we are in a loop, recognize certain labels which
+ are likely targets. This reduces the number of fixups
+ we need to create. */
+ else if (loop_stack
+ && (rtl_label == loop_stack->data.loop.start_label
+ || rtl_label == loop_stack->data.loop.end_label
+ || rtl_label == loop_stack->data.loop.continue_label))
+ end_block = loop_stack;
+ else
+ end_block = 0;
+
+ /* Now set END_BLOCK to the binding level to which we will return. */
+
+ if (end_block)
+ {
+ struct nesting *next_block = end_block->all;
+ block = block_stack;
+
+ /* First see if the END_BLOCK is inside the innermost binding level.
+ If so, then no cleanups or stack levels are relevant. */
+ while (next_block && next_block != block)
+ next_block = next_block->all;
+
+ if (next_block)
+ return 0;
+
+ /* Otherwise, set END_BLOCK to the innermost binding level
+ which is outside the relevant control-structure nesting. */
+ next_block = block_stack->next;
+ for (block = block_stack; block != end_block; block = block->all)
+ if (block == next_block)
+ next_block = next_block->next;
+ end_block = next_block;
+ }
+
+ /* Does any containing block have a stack level or cleanups?
+ If not, no fixup is needed, and that is the normal case
+ (the only case, for standard C). */
+ for (block = block_stack; block != end_block; block = block->next)
+ if (block->data.block.stack_level != 0
+ || block->data.block.cleanups != 0)
+ break;
+
+ if (block != end_block)
+ {
+ /* Ok, a fixup is needed. Add a fixup to the list of such. */
+ struct goto_fixup *fixup
+ = (struct goto_fixup *) oballoc (sizeof (struct goto_fixup));
+ /* In case an old stack level is restored, make sure that comes
+ after any pending stack adjust. */
+ /* ?? If the fixup isn't to come at the present position,
+ doing the stack adjust here isn't useful. Doing it with our
+ settings at that location isn't useful either. Let's hope
+ someone does it! */
+ if (last_insn == 0)
+ do_pending_stack_adjust ();
+ fixup->target = tree_label;
+ fixup->target_rtl = rtl_label;
+
+ /* Create a BLOCK node and a corresponding matched set of
+ NOTE_INSN_BEGIN_BLOCK and NOTE_INSN_END_BLOCK notes at
+ this point. The notes will encapsulate any and all fixup
+ code which we might later insert at this point in the insn
+ stream. Also, the BLOCK node will be the parent (i.e. the
+ `SUPERBLOCK') of any other BLOCK nodes which we might create
+ later on when we are expanding the fixup code. */
+
+ {
+ register rtx original_before_jump
+ = last_insn ? last_insn : get_last_insn ();
+
+ start_sequence ();
+ pushlevel (0);
+ fixup->before_jump = emit_note (NULL_PTR, NOTE_INSN_BLOCK_BEG);
+ last_block_end_note = emit_note (NULL_PTR, NOTE_INSN_BLOCK_END);
+ fixup->context = poplevel (1, 0, 0); /* Create the BLOCK node now! */
+ end_sequence ();
+ emit_insns_after (fixup->before_jump, original_before_jump);
+ }
+
+ fixup->block_start_count = block_start_count;
+ fixup->stack_level = 0;
+ fixup->cleanup_list_list
+ = (((block->data.block.outer_cleanups
+#if 0
+ && block->data.block.outer_cleanups != empty_cleanup_list
+#endif
+ )
+ || block->data.block.cleanups)
+ ? tree_cons (NULL_TREE, block->data.block.cleanups,
+ block->data.block.outer_cleanups)
+ : 0);
+ fixup->next = goto_fixup_chain;
+ goto_fixup_chain = fixup;
+ }
+
+ return block != 0;
+}
+
+
+/* Generate bytecode jump with OPCODE to a fixup routine that links to LABEL.
+ Make the fixup restore the stack level to STACK_LEVEL. */
+
+static void
+bc_expand_fixup (opcode, label, stack_level)
+ enum bytecode_opcode opcode;
+ struct bc_label *label;
+ int stack_level;
+{
+ struct goto_fixup *fixup
+ = (struct goto_fixup *) oballoc (sizeof (struct goto_fixup));
+
+ fixup->label = bc_get_bytecode_label ();
+ fixup->bc_target = label;
+ fixup->bc_stack_level = stack_level;
+ fixup->bc_handled = FALSE;
+
+ fixup->next = goto_fixup_chain;
+ goto_fixup_chain = fixup;
+
+ /* Insert a jump to the fixup code */
+ bc_emit_bytecode (opcode);
+ bc_emit_bytecode_labelref (fixup->label);
+
+#ifdef DEBUG_PRINT_CODE
+ fputc ('\n', stderr);
+#endif
+}
+
+/* Expand any needed fixups in the outputmost binding level of the
+ function. FIRST_INSN is the first insn in the function. */
+
+void
+expand_fixups (first_insn)
+ rtx first_insn;
+{
+ fixup_gotos (NULL_PTR, NULL_RTX, NULL_TREE, first_insn, 0);
+}
+
+/* When exiting a binding contour, process all pending gotos requiring fixups.
+ THISBLOCK is the structure that describes the block being exited.
+ STACK_LEVEL is the rtx for the stack level to restore exiting this contour.
+ CLEANUP_LIST is a list of expressions to evaluate on exiting this contour.
+ FIRST_INSN is the insn that began this contour.
+
+ Gotos that jump out of this contour must restore the
+ stack level and do the cleanups before actually jumping.
+
+ DONT_JUMP_IN nonzero means report error there is a jump into this
+ contour from before the beginning of the contour.
+ This is also done if STACK_LEVEL is nonzero. */
+
+static void
+fixup_gotos (thisblock, stack_level, cleanup_list, first_insn, dont_jump_in)
+ struct nesting *thisblock;
+ rtx stack_level;
+ tree cleanup_list;
+ rtx first_insn;
+ int dont_jump_in;
+{
+ register struct goto_fixup *f, *prev;
+
+ if (output_bytecode)
+ {
+ /* ??? The second arg is the bc stack level, which is not the same
+ as STACK_LEVEL. I have no idea what should go here, so I'll
+ just pass 0. */
+ bc_fixup_gotos (thisblock, 0, cleanup_list, first_insn, dont_jump_in);
+ return;
+ }
+
+ /* F is the fixup we are considering; PREV is the previous one. */
+ /* We run this loop in two passes so that cleanups of exited blocks
+ are run first, and blocks that are exited are marked so
+ afterwards. */
+
+ for (prev = 0, f = goto_fixup_chain; f; prev = f, f = f->next)
+ {
+ /* Test for a fixup that is inactive because it is already handled. */
+ if (f->before_jump == 0)
+ {
+ /* Delete inactive fixup from the chain, if that is easy to do. */
+ if (prev != 0)
+ prev->next = f->next;
+ }
+ /* Has this fixup's target label been defined?
+ If so, we can finalize it. */
+ else if (PREV_INSN (f->target_rtl) != 0)
+ {
+ register rtx cleanup_insns;
+
+ /* Get the first non-label after the label
+ this goto jumps to. If that's before this scope begins,
+ we don't have a jump into the scope. */
+ rtx after_label = f->target_rtl;
+ while (after_label != 0 && GET_CODE (after_label) == CODE_LABEL)
+ after_label = NEXT_INSN (after_label);
+
+ /* If this fixup jumped into this contour from before the beginning
+ of this contour, report an error. */
+ /* ??? Bug: this does not detect jumping in through intermediate
+ blocks that have stack levels or cleanups.
+ It detects only a problem with the innermost block
+ around the label. */
+ if (f->target != 0
+ && (dont_jump_in || stack_level || cleanup_list)
+ /* If AFTER_LABEL is 0, it means the jump goes to the end
+ of the rtl, which means it jumps into this scope. */
+ && (after_label == 0
+ || INSN_UID (first_insn) < INSN_UID (after_label))
+ && INSN_UID (first_insn) > INSN_UID (f->before_jump)
+ && ! DECL_REGISTER (f->target))
+ {
+ error_with_decl (f->target,
+ "label `%s' used before containing binding contour");
+ /* Prevent multiple errors for one label. */
+ DECL_REGISTER (f->target) = 1;
+ }
+
+ /* We will expand the cleanups into a sequence of their own and
+ then later on we will attach this new sequence to the insn
+ stream just ahead of the actual jump insn. */
+
+ start_sequence ();
+
+ /* Temporarily restore the lexical context where we will
+ logically be inserting the fixup code. We do this for the
+ sake of getting the debugging information right. */
+
+ pushlevel (0);
+ set_block (f->context);
+
+ /* Expand the cleanups for blocks this jump exits. */
+ if (f->cleanup_list_list)
+ {
+ tree lists;
+ for (lists = f->cleanup_list_list; lists; lists = TREE_CHAIN (lists))
+ /* Marked elements correspond to blocks that have been closed.
+ Do their cleanups. */
+ if (TREE_ADDRESSABLE (lists)
+ && TREE_VALUE (lists) != 0)
+ {
+ expand_cleanups (TREE_VALUE (lists), 0);
+ /* Pop any pushes done in the cleanups,
+ in case function is about to return. */
+ do_pending_stack_adjust ();
+ }
+ }
+
+ /* Restore stack level for the biggest contour that this
+ jump jumps out of. */
+ if (f->stack_level)
+ emit_stack_restore (SAVE_BLOCK, f->stack_level, f->before_jump);
+
+ /* Finish up the sequence containing the insns which implement the
+ necessary cleanups, and then attach that whole sequence to the
+ insn stream just ahead of the actual jump insn. Attaching it
+ at that point insures that any cleanups which are in fact
+ implicit C++ object destructions (which must be executed upon
+ leaving the block) appear (to the debugger) to be taking place
+ in an area of the generated code where the object(s) being
+ destructed are still "in scope". */
+
+ cleanup_insns = get_insns ();
+ poplevel (1, 0, 0);
+
+ end_sequence ();
+ emit_insns_after (cleanup_insns, f->before_jump);
+
+
+ f->before_jump = 0;
+ }
+ }
+
+ /* Mark the cleanups of exited blocks so that they are executed
+ by the code above. */
+ for (prev = 0, f = goto_fixup_chain; f; prev = f, f = f->next)
+ if (f->before_jump != 0
+ && PREV_INSN (f->target_rtl) == 0
+ /* Label has still not appeared. If we are exiting a block with
+ a stack level to restore, that started before the fixup,
+ mark this stack level as needing restoration
+ when the fixup is later finalized.
+ Also mark the cleanup_list_list element for F
+ that corresponds to this block, so that ultimately
+ this block's cleanups will be executed by the code above. */
+ && thisblock != 0
+ /* Note: if THISBLOCK == 0 and we have a label that hasn't appeared,
+ it means the label is undefined. That's erroneous, but possible. */
+ && (thisblock->data.block.block_start_count
+ <= f->block_start_count))
+ {
+ tree lists = f->cleanup_list_list;
+ for (; lists; lists = TREE_CHAIN (lists))
+ /* If the following elt. corresponds to our containing block
+ then the elt. must be for this block. */
+ if (TREE_CHAIN (lists) == thisblock->data.block.outer_cleanups)
+ TREE_ADDRESSABLE (lists) = 1;
+
+ if (stack_level)
+ f->stack_level = stack_level;
+ }
+}
+
+
+/* When exiting a binding contour, process all pending gotos requiring fixups.
+ Note: STACK_DEPTH is not altered.
+
+ The arguments are currently not used in the bytecode compiler, but we may
+ need them one day for languages other than C.
+
+ THISBLOCK is the structure that describes the block being exited.
+ STACK_LEVEL is the rtx for the stack level to restore exiting this contour.
+ CLEANUP_LIST is a list of expressions to evaluate on exiting this contour.
+ FIRST_INSN is the insn that began this contour.
+
+ Gotos that jump out of this contour must restore the
+ stack level and do the cleanups before actually jumping.
+
+ DONT_JUMP_IN nonzero means report error there is a jump into this
+ contour from before the beginning of the contour.
+ This is also done if STACK_LEVEL is nonzero. */
+
+static void
+bc_fixup_gotos (thisblock, stack_level, cleanup_list, first_insn, dont_jump_in)
+ struct nesting *thisblock;
+ int stack_level;
+ tree cleanup_list;
+ rtx first_insn;
+ int dont_jump_in;
+{
+ register struct goto_fixup *f, *prev;
+ int saved_stack_depth;
+
+ /* F is the fixup we are considering; PREV is the previous one. */
+
+ for (prev = 0, f = goto_fixup_chain; f; prev = f, f = f->next)
+ {
+ /* Test for a fixup that is inactive because it is already handled. */
+ if (f->before_jump == 0)
+ {
+ /* Delete inactive fixup from the chain, if that is easy to do. */
+ if (prev)
+ prev->next = f->next;
+ }
+
+ /* Emit code to restore the stack and continue */
+ bc_emit_bytecode_labeldef (f->label);
+
+ /* Save stack_depth across call, since bc_adjust_stack () will alter
+ the perceived stack depth via the instructions generated. */
+
+ if (f->bc_stack_level >= 0)
+ {
+ saved_stack_depth = stack_depth;
+ bc_adjust_stack (stack_depth - f->bc_stack_level);
+ stack_depth = saved_stack_depth;
+ }
+
+ bc_emit_bytecode (jump);
+ bc_emit_bytecode_labelref (f->bc_target);
+
+#ifdef DEBUG_PRINT_CODE
+ fputc ('\n', stderr);
+#endif
+ }
+
+ goto_fixup_chain = NULL;
+}
+
+/* Generate RTL for an asm statement (explicit assembler code).
+ BODY is a STRING_CST node containing the assembler code text,
+ or an ADDR_EXPR containing a STRING_CST. */
+
+void
+expand_asm (body)
+ tree body;
+{
+ if (output_bytecode)
+ {
+ error ("`asm' is illegal when generating bytecode");
+ return;
+ }
+
+ if (TREE_CODE (body) == ADDR_EXPR)
+ body = TREE_OPERAND (body, 0);
+
+ emit_insn (gen_rtx (ASM_INPUT, VOIDmode,
+ TREE_STRING_POINTER (body)));
+ last_expr_type = 0;
+}
+
+/* Generate RTL for an asm statement with arguments.
+ STRING is the instruction template.
+ OUTPUTS is a list of output arguments (lvalues); INPUTS a list of inputs.
+ Each output or input has an expression in the TREE_VALUE and
+ a constraint-string in the TREE_PURPOSE.
+ CLOBBERS is a list of STRING_CST nodes each naming a hard register
+ that is clobbered by this insn.
+
+ Not all kinds of lvalue that may appear in OUTPUTS can be stored directly.
+ Some elements of OUTPUTS may be replaced with trees representing temporary
+ values. The caller should copy those temporary values to the originally
+ specified lvalues.
+
+ VOL nonzero means the insn is volatile; don't optimize it. */
+
+void
+expand_asm_operands (string, outputs, inputs, clobbers, vol, filename, line)
+ tree string, outputs, inputs, clobbers;
+ int vol;
+ char *filename;
+ int line;
+{
+ rtvec argvec, constraints;
+ rtx body;
+ int ninputs = list_length (inputs);
+ int noutputs = list_length (outputs);
+ int nclobbers;
+ tree tail;
+ register int i;
+ /* Vector of RTX's of evaluated output operands. */
+ rtx *output_rtx = (rtx *) alloca (noutputs * sizeof (rtx));
+ /* The insn we have emitted. */
+ rtx insn;
+
+ if (output_bytecode)
+ {
+ error ("`asm' is illegal when generating bytecode");
+ return;
+ }
+
+ /* Count the number of meaningful clobbered registers, ignoring what
+ we would ignore later. */
+ nclobbers = 0;
+ for (tail = clobbers; tail; tail = TREE_CHAIN (tail))
+ {
+ char *regname = TREE_STRING_POINTER (TREE_VALUE (tail));
+ i = decode_reg_name (regname);
+ if (i >= 0 || i == -4)
+ ++nclobbers;
+ }
+
+ last_expr_type = 0;
+
+ for (i = 0, tail = outputs; tail; tail = TREE_CHAIN (tail), i++)
+ {
+ tree val = TREE_VALUE (tail);
+ tree val1;
+ int j;
+ int found_equal;
+
+ /* If there's an erroneous arg, emit no insn. */
+ if (TREE_TYPE (val) == error_mark_node)
+ return;
+
+ /* Make sure constraint has `=' and does not have `+'. */
+
+ found_equal = 0;
+ for (j = 0; j < TREE_STRING_LENGTH (TREE_PURPOSE (tail)); j++)
+ {
+ if (TREE_STRING_POINTER (TREE_PURPOSE (tail))[j] == '+')
+ {
+ error ("output operand constraint contains `+'");
+ return;
+ }
+ if (TREE_STRING_POINTER (TREE_PURPOSE (tail))[j] == '=')
+ found_equal = 1;
+ }
+ if (! found_equal)
+ {
+ error ("output operand constraint lacks `='");
+ return;
+ }
+
+ /* If an output operand is not a variable or indirect ref,
+ or a part of one,
+ create a SAVE_EXPR which is a pseudo-reg
+ to act as an intermediate temporary.
+ Make the asm insn write into that, then copy it to
+ the real output operand. */
+
+ while (TREE_CODE (val) == COMPONENT_REF
+ || TREE_CODE (val) == ARRAY_REF)
+ val = TREE_OPERAND (val, 0);
+
+ if (TREE_CODE (val) != VAR_DECL
+ && TREE_CODE (val) != PARM_DECL
+ && TREE_CODE (val) != INDIRECT_REF)
+ {
+ TREE_VALUE (tail) = save_expr (TREE_VALUE (tail));
+ /* If it's a constant, print error now so don't crash later. */
+ if (TREE_CODE (TREE_VALUE (tail)) != SAVE_EXPR)
+ {
+ error ("invalid output in `asm'");
+ return;
+ }
+ }
+
+ output_rtx[i] = expand_expr (TREE_VALUE (tail), NULL_RTX, VOIDmode, 0);
+ }
+
+ if (ninputs + noutputs > MAX_RECOG_OPERANDS)
+ {
+ error ("more than %d operands in `asm'", MAX_RECOG_OPERANDS);
+ return;
+ }
+
+ /* Make vectors for the expression-rtx and constraint strings. */
+
+ argvec = rtvec_alloc (ninputs);
+ constraints = rtvec_alloc (ninputs);
+
+ body = gen_rtx (ASM_OPERANDS, VOIDmode,
+ TREE_STRING_POINTER (string), "", 0, argvec, constraints,
+ filename, line);
+ MEM_VOLATILE_P (body) = vol;
+
+ /* Eval the inputs and put them into ARGVEC.
+ Put their constraints into ASM_INPUTs and store in CONSTRAINTS. */
+
+ i = 0;
+ for (tail = inputs; tail; tail = TREE_CHAIN (tail))
+ {
+ int j;
+
+ /* If there's an erroneous arg, emit no insn,
+ because the ASM_INPUT would get VOIDmode
+ and that could cause a crash in reload. */
+ if (TREE_TYPE (TREE_VALUE (tail)) == error_mark_node)
+ return;
+ if (TREE_PURPOSE (tail) == NULL_TREE)
+ {
+ error ("hard register `%s' listed as input operand to `asm'",
+ TREE_STRING_POINTER (TREE_VALUE (tail)) );
+ return;
+ }
+
+ /* Make sure constraint has neither `=' nor `+'. */
+
+ for (j = 0; j < TREE_STRING_LENGTH (TREE_PURPOSE (tail)); j++)
+ if (TREE_STRING_POINTER (TREE_PURPOSE (tail))[j] == '='
+ || TREE_STRING_POINTER (TREE_PURPOSE (tail))[j] == '+')
+ {
+ error ("input operand constraint contains `%c'",
+ TREE_STRING_POINTER (TREE_PURPOSE (tail))[j]);
+ return;
+ }
+
+ XVECEXP (body, 3, i) /* argvec */
+ = expand_expr (TREE_VALUE (tail), NULL_RTX, VOIDmode, 0);
+ XVECEXP (body, 4, i) /* constraints */
+ = gen_rtx (ASM_INPUT, TYPE_MODE (TREE_TYPE (TREE_VALUE (tail))),
+ TREE_STRING_POINTER (TREE_PURPOSE (tail)));
+ i++;
+ }
+
+ /* Protect all the operands from the queue,
+ now that they have all been evaluated. */
+
+ for (i = 0; i < ninputs; i++)
+ XVECEXP (body, 3, i) = protect_from_queue (XVECEXP (body, 3, i), 0);
+
+ for (i = 0; i < noutputs; i++)
+ output_rtx[i] = protect_from_queue (output_rtx[i], 1);
+
+ /* Now, for each output, construct an rtx
+ (set OUTPUT (asm_operands INSN OUTPUTNUMBER OUTPUTCONSTRAINT
+ ARGVEC CONSTRAINTS))
+ If there is more than one, put them inside a PARALLEL. */
+
+ if (noutputs == 1 && nclobbers == 0)
+ {
+ XSTR (body, 1) = TREE_STRING_POINTER (TREE_PURPOSE (outputs));
+ insn = emit_insn (gen_rtx (SET, VOIDmode, output_rtx[0], body));
+ }
+ else if (noutputs == 0 && nclobbers == 0)
+ {
+ /* No output operands: put in a raw ASM_OPERANDS rtx. */
+ insn = emit_insn (body);
+ }
+ else
+ {
+ rtx obody = body;
+ int num = noutputs;
+ if (num == 0) num = 1;
+ body = gen_rtx (PARALLEL, VOIDmode, rtvec_alloc (num + nclobbers));
+
+ /* For each output operand, store a SET. */
+
+ for (i = 0, tail = outputs; tail; tail = TREE_CHAIN (tail), i++)
+ {
+ XVECEXP (body, 0, i)
+ = gen_rtx (SET, VOIDmode,
+ output_rtx[i],
+ gen_rtx (ASM_OPERANDS, VOIDmode,
+ TREE_STRING_POINTER (string),
+ TREE_STRING_POINTER (TREE_PURPOSE (tail)),
+ i, argvec, constraints,
+ filename, line));
+ MEM_VOLATILE_P (SET_SRC (XVECEXP (body, 0, i))) = vol;
+ }
+
+ /* If there are no outputs (but there are some clobbers)
+ store the bare ASM_OPERANDS into the PARALLEL. */
+
+ if (i == 0)
+ XVECEXP (body, 0, i++) = obody;
+
+ /* Store (clobber REG) for each clobbered register specified. */
+
+ for (tail = clobbers; tail; tail = TREE_CHAIN (tail))
+ {
+ char *regname = TREE_STRING_POINTER (TREE_VALUE (tail));
+ int j = decode_reg_name (regname);
+
+ if (j < 0)
+ {
+ if (j == -3) /* `cc', which is not a register */
+ continue;
+
+ if (j == -4) /* `memory', don't cache memory across asm */
+ {
+ XVECEXP (body, 0, i++)
+ = gen_rtx (CLOBBER, VOIDmode,
+ gen_rtx (MEM, BLKmode,
+ gen_rtx (SCRATCH, VOIDmode, 0)));
+ continue;
+ }
+
+ error ("unknown register name `%s' in `asm'", regname);
+ return;
+ }
+
+ /* Use QImode since that's guaranteed to clobber just one reg. */
+ XVECEXP (body, 0, i++)
+ = gen_rtx (CLOBBER, VOIDmode, gen_rtx (REG, QImode, j));
+ }
+
+ insn = emit_insn (body);
+ }
+
+ free_temp_slots ();
+}
+
+/* Generate RTL to evaluate the expression EXP
+ and remember it in case this is the VALUE in a ({... VALUE; }) constr. */
+
+void
+expand_expr_stmt (exp)
+ tree exp;
+{
+ if (output_bytecode)
+ {
+ int org_stack_depth = stack_depth;
+
+ bc_expand_expr (exp);
+
+ /* Restore stack depth */
+ if (stack_depth < org_stack_depth)
+ abort ();
+
+ bc_emit_instruction (drop);
+
+ last_expr_type = TREE_TYPE (exp);
+ return;
+ }
+
+ /* If -W, warn about statements with no side effects,
+ except for an explicit cast to void (e.g. for assert()), and
+ except inside a ({...}) where they may be useful. */
+ if (expr_stmts_for_value == 0 && exp != error_mark_node)
+ {
+ if (! TREE_SIDE_EFFECTS (exp) && (extra_warnings || warn_unused)
+ && !(TREE_CODE (exp) == CONVERT_EXPR
+ && TREE_TYPE (exp) == void_type_node))
+ warning_with_file_and_line (emit_filename, emit_lineno,
+ "statement with no effect");
+ else if (warn_unused)
+ warn_if_unused_value (exp);
+ }
+ last_expr_type = TREE_TYPE (exp);
+ if (! flag_syntax_only)
+ last_expr_value = expand_expr (exp,
+ (expr_stmts_for_value
+ ? NULL_RTX : const0_rtx),
+ VOIDmode, 0);
+
+ /* If all we do is reference a volatile value in memory,
+ copy it to a register to be sure it is actually touched. */
+ if (last_expr_value != 0 && GET_CODE (last_expr_value) == MEM
+ && TREE_THIS_VOLATILE (exp))
+ {
+ if (TYPE_MODE (TREE_TYPE (exp)) == VOIDmode)
+ ;
+ else if (TYPE_MODE (TREE_TYPE (exp)) != BLKmode)
+ copy_to_reg (last_expr_value);
+ else
+ {
+ rtx lab = gen_label_rtx ();
+
+ /* Compare the value with itself to reference it. */
+ emit_cmp_insn (last_expr_value, last_expr_value, EQ,
+ expand_expr (TYPE_SIZE (last_expr_type),
+ NULL_RTX, VOIDmode, 0),
+ BLKmode, 0,
+ TYPE_ALIGN (last_expr_type) / BITS_PER_UNIT);
+ emit_jump_insn ((*bcc_gen_fctn[(int) EQ]) (lab));
+ emit_label (lab);
+ }
+ }
+
+ /* If this expression is part of a ({...}) and is in memory, we may have
+ to preserve temporaries. */
+ preserve_temp_slots (last_expr_value);
+
+ /* Free any temporaries used to evaluate this expression. Any temporary
+ used as a result of this expression will already have been preserved
+ above. */
+ free_temp_slots ();
+
+ emit_queue ();
+}
+
+/* Warn if EXP contains any computations whose results are not used.
+ Return 1 if a warning is printed; 0 otherwise. */
+
+static int
+warn_if_unused_value (exp)
+ tree exp;
+{
+ if (TREE_USED (exp))
+ return 0;
+
+ switch (TREE_CODE (exp))
+ {
+ case PREINCREMENT_EXPR:
+ case POSTINCREMENT_EXPR:
+ case PREDECREMENT_EXPR:
+ case POSTDECREMENT_EXPR:
+ case MODIFY_EXPR:
+ case INIT_EXPR:
+ case TARGET_EXPR:
+ case CALL_EXPR:
+ case METHOD_CALL_EXPR:
+ case RTL_EXPR:
+ case WITH_CLEANUP_EXPR:
+ case EXIT_EXPR:
+ /* We don't warn about COND_EXPR because it may be a useful
+ construct if either arm contains a side effect. */
+ case COND_EXPR:
+ return 0;
+
+ case BIND_EXPR:
+ /* For a binding, warn if no side effect within it. */
+ return warn_if_unused_value (TREE_OPERAND (exp, 1));
+
+ case TRUTH_ORIF_EXPR:
+ case TRUTH_ANDIF_EXPR:
+ /* In && or ||, warn if 2nd operand has no side effect. */
+ return warn_if_unused_value (TREE_OPERAND (exp, 1));
+
+ case COMPOUND_EXPR:
+ if (TREE_NO_UNUSED_WARNING (exp))
+ return 0;
+ if (warn_if_unused_value (TREE_OPERAND (exp, 0)))
+ return 1;
+ /* Let people do `(foo (), 0)' without a warning. */
+ if (TREE_CONSTANT (TREE_OPERAND (exp, 1)))
+ return 0;
+ return warn_if_unused_value (TREE_OPERAND (exp, 1));
+
+ case NOP_EXPR:
+ case CONVERT_EXPR:
+ case NON_LVALUE_EXPR:
+ /* Don't warn about values cast to void. */
+ if (TREE_TYPE (exp) == void_type_node)
+ return 0;
+ /* Don't warn about conversions not explicit in the user's program. */
+ if (TREE_NO_UNUSED_WARNING (exp))
+ return 0;
+ /* Assignment to a cast usually results in a cast of a modify.
+ Don't complain about that. There can be an arbitrary number of
+ casts before the modify, so we must loop until we find the first
+ non-cast expression and then test to see if that is a modify. */
+ {
+ tree tem = TREE_OPERAND (exp, 0);
+
+ while (TREE_CODE (tem) == CONVERT_EXPR || TREE_CODE (tem) == NOP_EXPR)
+ tem = TREE_OPERAND (tem, 0);
+
+ if (TREE_CODE (tem) == MODIFY_EXPR)
+ return 0;
+ }
+ /* ... fall through ... */
+
+ default:
+ /* Referencing a volatile value is a side effect, so don't warn. */
+ if ((TREE_CODE_CLASS (TREE_CODE (exp)) == 'd'
+ || TREE_CODE_CLASS (TREE_CODE (exp)) == 'r')
+ && TREE_THIS_VOLATILE (exp))
+ return 0;
+ warning_with_file_and_line (emit_filename, emit_lineno,
+ "value computed is not used");
+ return 1;
+ }
+}
+
+/* Clear out the memory of the last expression evaluated. */
+
+void
+clear_last_expr ()
+{
+ last_expr_type = 0;
+}
+
+/* Begin a statement which will return a value.
+ Return the RTL_EXPR for this statement expr.
+ The caller must save that value and pass it to expand_end_stmt_expr. */
+
+tree
+expand_start_stmt_expr ()
+{
+ int momentary;
+ tree t;
+
+ /* When generating bytecode just note down the stack depth */
+ if (output_bytecode)
+ return (build_int_2 (stack_depth, 0));
+
+ /* Make the RTL_EXPR node temporary, not momentary,
+ so that rtl_expr_chain doesn't become garbage. */
+ momentary = suspend_momentary ();
+ t = make_node (RTL_EXPR);
+ resume_momentary (momentary);
+ start_sequence_for_rtl_expr (t);
+ NO_DEFER_POP;
+ expr_stmts_for_value++;
+ return t;
+}
+
+/* Restore the previous state at the end of a statement that returns a value.
+ Returns a tree node representing the statement's value and the
+ insns to compute the value.
+
+ The nodes of that expression have been freed by now, so we cannot use them.
+ But we don't want to do that anyway; the expression has already been
+ evaluated and now we just want to use the value. So generate a RTL_EXPR
+ with the proper type and RTL value.
+
+ If the last substatement was not an expression,
+ return something with type `void'. */
+
+tree
+expand_end_stmt_expr (t)
+ tree t;
+{
+ if (output_bytecode)
+ {
+ int i;
+ tree t;
+
+
+ /* At this point, all expressions have been evaluated in order.
+ However, all expression values have been popped when evaluated,
+ which means we have to recover the last expression value. This is
+ the last value removed by means of a `drop' instruction. Instead
+ of adding code to inhibit dropping the last expression value, it
+ is here recovered by undoing the `drop'. Since `drop' is
+ equivalent to `adjustackSI [1]', it can be undone with `adjstackSI
+ [-1]'. */
+
+ bc_adjust_stack (-1);
+
+ if (!last_expr_type)
+ last_expr_type = void_type_node;
+
+ t = make_node (RTL_EXPR);
+ TREE_TYPE (t) = last_expr_type;
+ RTL_EXPR_RTL (t) = NULL;
+ RTL_EXPR_SEQUENCE (t) = NULL;
+
+ /* Don't consider deleting this expr or containing exprs at tree level. */
+ TREE_THIS_VOLATILE (t) = 1;
+
+ last_expr_type = 0;
+ return t;
+ }
+
+ OK_DEFER_POP;
+
+ if (last_expr_type == 0)
+ {
+ last_expr_type = void_type_node;
+ last_expr_value = const0_rtx;
+ }
+ else if (last_expr_value == 0)
+ /* There are some cases where this can happen, such as when the
+ statement is void type. */
+ last_expr_value = const0_rtx;
+ else if (GET_CODE (last_expr_value) != REG && ! CONSTANT_P (last_expr_value))
+ /* Remove any possible QUEUED. */
+ last_expr_value = protect_from_queue (last_expr_value, 0);
+
+ emit_queue ();
+
+ TREE_TYPE (t) = last_expr_type;
+ RTL_EXPR_RTL (t) = last_expr_value;
+ RTL_EXPR_SEQUENCE (t) = get_insns ();
+
+ rtl_expr_chain = tree_cons (NULL_TREE, t, rtl_expr_chain);
+
+ end_sequence ();
+
+ /* Don't consider deleting this expr or containing exprs at tree level. */
+ TREE_SIDE_EFFECTS (t) = 1;
+ /* Propagate volatility of the actual RTL expr. */
+ TREE_THIS_VOLATILE (t) = volatile_refs_p (last_expr_value);
+
+ last_expr_type = 0;
+ expr_stmts_for_value--;
+
+ return t;
+}
+
+/* Generate RTL for the start of an if-then. COND is the expression
+ whose truth should be tested.
+
+ If EXITFLAG is nonzero, this conditional is visible to
+ `exit_something'. */
+
+void
+expand_start_cond (cond, exitflag)
+ tree cond;
+ int exitflag;
+{
+ struct nesting *thiscond = ALLOC_NESTING ();
+
+ /* Make an entry on cond_stack for the cond we are entering. */
+
+ thiscond->next = cond_stack;
+ thiscond->all = nesting_stack;
+ thiscond->depth = ++nesting_depth;
+ thiscond->data.cond.next_label = gen_label_rtx ();
+ /* Before we encounter an `else', we don't need a separate exit label
+ unless there are supposed to be exit statements
+ to exit this conditional. */
+ thiscond->exit_label = exitflag ? gen_label_rtx () : 0;
+ thiscond->data.cond.endif_label = thiscond->exit_label;
+ cond_stack = thiscond;
+ nesting_stack = thiscond;
+
+ if (output_bytecode)
+ bc_expand_start_cond (cond, exitflag);
+ else
+ do_jump (cond, thiscond->data.cond.next_label, NULL_RTX);
+}
+
+/* Generate RTL between then-clause and the elseif-clause
+ of an if-then-elseif-.... */
+
+void
+expand_start_elseif (cond)
+ tree cond;
+{
+ if (cond_stack->data.cond.endif_label == 0)
+ cond_stack->data.cond.endif_label = gen_label_rtx ();
+ emit_jump (cond_stack->data.cond.endif_label);
+ emit_label (cond_stack->data.cond.next_label);
+ cond_stack->data.cond.next_label = gen_label_rtx ();
+ do_jump (cond, cond_stack->data.cond.next_label, NULL_RTX);
+}
+
+/* Generate RTL between the then-clause and the else-clause
+ of an if-then-else. */
+
+void
+expand_start_else ()
+{
+ if (cond_stack->data.cond.endif_label == 0)
+ cond_stack->data.cond.endif_label = gen_label_rtx ();
+
+ if (output_bytecode)
+ {
+ bc_expand_start_else ();
+ return;
+ }
+
+ emit_jump (cond_stack->data.cond.endif_label);
+ emit_label (cond_stack->data.cond.next_label);
+ cond_stack->data.cond.next_label = 0; /* No more _else or _elseif calls. */
+}
+
+/* Generate RTL for the end of an if-then.
+ Pop the record for it off of cond_stack. */
+
+void
+expand_end_cond ()
+{
+ struct nesting *thiscond = cond_stack;
+
+ if (output_bytecode)
+ bc_expand_end_cond ();
+ else
+ {
+ do_pending_stack_adjust ();
+ if (thiscond->data.cond.next_label)
+ emit_label (thiscond->data.cond.next_label);
+ if (thiscond->data.cond.endif_label)
+ emit_label (thiscond->data.cond.endif_label);
+ }
+
+ POPSTACK (cond_stack);
+ last_expr_type = 0;
+}
+
+
+/* Generate code for the start of an if-then. COND is the expression
+ whose truth is to be tested; if EXITFLAG is nonzero this conditional
+ is to be visible to exit_something. It is assumed that the caller
+ has pushed the previous context on the cond stack. */
+
+static void
+bc_expand_start_cond (cond, exitflag)
+ tree cond;
+ int exitflag;
+{
+ struct nesting *thiscond = cond_stack;
+
+ thiscond->data.case_stmt.nominal_type = cond;
+ if (! exitflag)
+ thiscond->exit_label = gen_label_rtx ();
+ bc_expand_expr (cond);
+ bc_emit_bytecode (xjumpifnot);
+ bc_emit_bytecode_labelref (BYTECODE_BC_LABEL (thiscond->exit_label));
+
+#ifdef DEBUG_PRINT_CODE
+ fputc ('\n', stderr);
+#endif
+}
+
+/* Generate the label for the end of an if with
+ no else- clause. */
+
+static void
+bc_expand_end_cond ()
+{
+ struct nesting *thiscond = cond_stack;
+
+ bc_emit_bytecode_labeldef (BYTECODE_BC_LABEL (thiscond->exit_label));
+}
+
+/* Generate code for the start of the else- clause of
+ an if-then-else. */
+
+static void
+bc_expand_start_else ()
+{
+ struct nesting *thiscond = cond_stack;
+
+ thiscond->data.cond.endif_label = thiscond->exit_label;
+ thiscond->exit_label = gen_label_rtx ();
+ bc_emit_bytecode (jump);
+ bc_emit_bytecode_labelref (BYTECODE_BC_LABEL (thiscond->exit_label));
+
+#ifdef DEBUG_PRINT_CODE
+ fputc ('\n', stderr);
+#endif
+
+ bc_emit_bytecode_labeldef (BYTECODE_BC_LABEL (thiscond->data.cond.endif_label));
+}
+
+/* Generate RTL for the start of a loop. EXIT_FLAG is nonzero if this
+ loop should be exited by `exit_something'. This is a loop for which
+ `expand_continue' will jump to the top of the loop.
+
+ Make an entry on loop_stack to record the labels associated with
+ this loop. */
+
+struct nesting *
+expand_start_loop (exit_flag)
+ int exit_flag;
+{
+ register struct nesting *thisloop = ALLOC_NESTING ();
+
+ /* Make an entry on loop_stack for the loop we are entering. */
+
+ thisloop->next = loop_stack;
+ thisloop->all = nesting_stack;
+ thisloop->depth = ++nesting_depth;
+ thisloop->data.loop.start_label = gen_label_rtx ();
+ thisloop->data.loop.end_label = gen_label_rtx ();
+ thisloop->data.loop.alt_end_label = 0;
+ thisloop->data.loop.continue_label = thisloop->data.loop.start_label;
+ thisloop->exit_label = exit_flag ? thisloop->data.loop.end_label : 0;
+ loop_stack = thisloop;
+ nesting_stack = thisloop;
+
+ if (output_bytecode)
+ {
+ bc_emit_bytecode_labeldef (BYTECODE_BC_LABEL (thisloop->data.loop.start_label));
+ return thisloop;
+ }
+
+ do_pending_stack_adjust ();
+ emit_queue ();
+ emit_note (NULL_PTR, NOTE_INSN_LOOP_BEG);
+ emit_label (thisloop->data.loop.start_label);
+
+ return thisloop;
+}
+
+/* Like expand_start_loop but for a loop where the continuation point
+ (for expand_continue_loop) will be specified explicitly. */
+
+struct nesting *
+expand_start_loop_continue_elsewhere (exit_flag)
+ int exit_flag;
+{
+ struct nesting *thisloop = expand_start_loop (exit_flag);
+ loop_stack->data.loop.continue_label = gen_label_rtx ();
+ return thisloop;
+}
+
+/* Specify the continuation point for a loop started with
+ expand_start_loop_continue_elsewhere.
+ Use this at the point in the code to which a continue statement
+ should jump. */
+
+void
+expand_loop_continue_here ()
+{
+ if (output_bytecode)
+ {
+ bc_emit_bytecode_labeldef (BYTECODE_BC_LABEL (loop_stack->data.loop.continue_label));
+ return;
+ }
+ do_pending_stack_adjust ();
+ emit_note (NULL_PTR, NOTE_INSN_LOOP_CONT);
+ emit_label (loop_stack->data.loop.continue_label);
+}
+
+/* End a loop. */
+
+static void
+bc_expand_end_loop ()
+{
+ struct nesting *thisloop = loop_stack;
+
+ bc_emit_bytecode (jump);
+ bc_emit_bytecode_labelref (BYTECODE_BC_LABEL (thisloop->data.loop.start_label));
+
+#ifdef DEBUG_PRINT_CODE
+ fputc ('\n', stderr);
+#endif
+
+ bc_emit_bytecode_labeldef (BYTECODE_BC_LABEL (thisloop->exit_label));
+ POPSTACK (loop_stack);
+ last_expr_type = 0;
+}
+
+
+/* Finish a loop. Generate a jump back to the top and the loop-exit label.
+ Pop the block off of loop_stack. */
+
+void
+expand_end_loop ()
+{
+ register rtx insn;
+ register rtx start_label;
+ rtx last_test_insn = 0;
+ int num_insns = 0;
+
+ if (output_bytecode)
+ {
+ bc_expand_end_loop ();
+ return;
+ }
+
+ insn = get_last_insn ();
+ start_label = loop_stack->data.loop.start_label;
+
+ /* Mark the continue-point at the top of the loop if none elsewhere. */
+ if (start_label == loop_stack->data.loop.continue_label)
+ emit_note_before (NOTE_INSN_LOOP_CONT, start_label);
+
+ do_pending_stack_adjust ();
+
+ /* If optimizing, perhaps reorder the loop. If the loop
+ starts with a conditional exit, roll that to the end
+ where it will optimize together with the jump back.
+
+ We look for the last conditional branch to the exit that we encounter
+ before hitting 30 insns or a CALL_INSN. If we see an unconditional
+ branch to the exit first, use it.
+
+ We must also stop at NOTE_INSN_BLOCK_BEG and NOTE_INSN_BLOCK_END notes
+ because moving them is not valid. */
+
+ if (optimize
+ &&
+ ! (GET_CODE (insn) == JUMP_INSN
+ && GET_CODE (PATTERN (insn)) == SET
+ && SET_DEST (PATTERN (insn)) == pc_rtx
+ && GET_CODE (SET_SRC (PATTERN (insn))) == IF_THEN_ELSE))
+ {
+ /* Scan insns from the top of the loop looking for a qualified
+ conditional exit. */
+ for (insn = NEXT_INSN (loop_stack->data.loop.start_label); insn;
+ insn = NEXT_INSN (insn))
+ {
+ if (GET_CODE (insn) == CALL_INSN || GET_CODE (insn) == CODE_LABEL)
+ break;
+
+ if (GET_CODE (insn) == NOTE
+ && (NOTE_LINE_NUMBER (insn) == NOTE_INSN_BLOCK_BEG
+ || NOTE_LINE_NUMBER (insn) == NOTE_INSN_BLOCK_END))
+ break;
+
+ if (GET_CODE (insn) == JUMP_INSN || GET_CODE (insn) == INSN)
+ num_insns++;
+
+ if (last_test_insn && num_insns > 30)
+ break;
+
+ if (GET_CODE (insn) == JUMP_INSN && GET_CODE (PATTERN (insn)) == SET
+ && SET_DEST (PATTERN (insn)) == pc_rtx
+ && GET_CODE (SET_SRC (PATTERN (insn))) == IF_THEN_ELSE
+ && ((GET_CODE (XEXP (SET_SRC (PATTERN (insn)), 1)) == LABEL_REF
+ && ((XEXP (XEXP (SET_SRC (PATTERN (insn)), 1), 0)
+ == loop_stack->data.loop.end_label)
+ || (XEXP (XEXP (SET_SRC (PATTERN (insn)), 1), 0)
+ == loop_stack->data.loop.alt_end_label)))
+ || (GET_CODE (XEXP (SET_SRC (PATTERN (insn)), 2)) == LABEL_REF
+ && ((XEXP (XEXP (SET_SRC (PATTERN (insn)), 2), 0)
+ == loop_stack->data.loop.end_label)
+ || (XEXP (XEXP (SET_SRC (PATTERN (insn)), 2), 0)
+ == loop_stack->data.loop.alt_end_label)))))
+ last_test_insn = insn;
+
+ if (last_test_insn == 0 && GET_CODE (insn) == JUMP_INSN
+ && GET_CODE (PATTERN (insn)) == SET
+ && SET_DEST (PATTERN (insn)) == pc_rtx
+ && GET_CODE (SET_SRC (PATTERN (insn))) == LABEL_REF
+ && ((XEXP (SET_SRC (PATTERN (insn)), 0)
+ == loop_stack->data.loop.end_label)
+ || (XEXP (SET_SRC (PATTERN (insn)), 0)
+ == loop_stack->data.loop.alt_end_label)))
+ /* Include BARRIER. */
+ last_test_insn = NEXT_INSN (insn);
+ }
+
+ if (last_test_insn != 0 && last_test_insn != get_last_insn ())
+ {
+ /* We found one. Move everything from there up
+ to the end of the loop, and add a jump into the loop
+ to jump to there. */
+ register rtx newstart_label = gen_label_rtx ();
+ register rtx start_move = start_label;
+
+ /* If the start label is preceded by a NOTE_INSN_LOOP_CONT note,
+ then we want to move this note also. */
+ if (GET_CODE (PREV_INSN (start_move)) == NOTE
+ && (NOTE_LINE_NUMBER (PREV_INSN (start_move))
+ == NOTE_INSN_LOOP_CONT))
+ start_move = PREV_INSN (start_move);
+
+ emit_label_after (newstart_label, PREV_INSN (start_move));
+ reorder_insns (start_move, last_test_insn, get_last_insn ());
+ emit_jump_insn_after (gen_jump (start_label),
+ PREV_INSN (newstart_label));
+ emit_barrier_after (PREV_INSN (newstart_label));
+ start_label = newstart_label;
+ }
+ }
+
+ emit_jump (start_label);
+ emit_note (NULL_PTR, NOTE_INSN_LOOP_END);
+ emit_label (loop_stack->data.loop.end_label);
+
+ POPSTACK (loop_stack);
+
+ last_expr_type = 0;
+}
+
+/* Generate a jump to the current loop's continue-point.
+ This is usually the top of the loop, but may be specified
+ explicitly elsewhere. If not currently inside a loop,
+ return 0 and do nothing; caller will print an error message. */
+
+int
+expand_continue_loop (whichloop)
+ struct nesting *whichloop;
+{
+ last_expr_type = 0;
+ if (whichloop == 0)
+ whichloop = loop_stack;
+ if (whichloop == 0)
+ return 0;
+ expand_goto_internal (NULL_TREE, whichloop->data.loop.continue_label,
+ NULL_RTX);
+ return 1;
+}
+
+/* Generate a jump to exit the current loop. If not currently inside a loop,
+ return 0 and do nothing; caller will print an error message. */
+
+int
+expand_exit_loop (whichloop)
+ struct nesting *whichloop;
+{
+ last_expr_type = 0;
+ if (whichloop == 0)
+ whichloop = loop_stack;
+ if (whichloop == 0)
+ return 0;
+ expand_goto_internal (NULL_TREE, whichloop->data.loop.end_label, NULL_RTX);
+ return 1;
+}
+
+/* Generate a conditional jump to exit the current loop if COND
+ evaluates to zero. If not currently inside a loop,
+ return 0 and do nothing; caller will print an error message. */
+
+int
+expand_exit_loop_if_false (whichloop, cond)
+ struct nesting *whichloop;
+ tree cond;
+{
+ last_expr_type = 0;
+ if (whichloop == 0)
+ whichloop = loop_stack;
+ if (whichloop == 0)
+ return 0;
+ if (output_bytecode)
+ {
+ bc_expand_expr (cond);
+ bc_expand_goto_internal (xjumpifnot,
+ BYTECODE_BC_LABEL (whichloop->exit_label),
+ NULL_TREE);
+ }
+ else
+ {
+ /* In order to handle fixups, we actually create a conditional jump
+ around a unconditional branch to exit the loop. If fixups are
+ necessary, they go before the unconditional branch. */
+
+ rtx label = gen_label_rtx ();
+ rtx last_insn;
+
+ do_jump (cond, NULL_RTX, label);
+ last_insn = get_last_insn ();
+ if (GET_CODE (last_insn) == CODE_LABEL)
+ whichloop->data.loop.alt_end_label = last_insn;
+ expand_goto_internal (NULL_TREE, whichloop->data.loop.end_label,
+ NULL_RTX);
+ emit_label (label);
+ }
+
+ return 1;
+}
+
+/* Return non-zero if we should preserve sub-expressions as separate
+ pseudos. We never do so if we aren't optimizing. We always do so
+ if -fexpensive-optimizations.
+
+ Otherwise, we only do so if we are in the "early" part of a loop. I.e.,
+ the loop may still be a small one. */
+
+int
+preserve_subexpressions_p ()
+{
+ rtx insn;
+
+ if (flag_expensive_optimizations)
+ return 1;
+
+ if (optimize == 0 || loop_stack == 0)
+ return 0;
+
+ insn = get_last_insn_anywhere ();
+
+ return (insn
+ && (INSN_UID (insn) - INSN_UID (loop_stack->data.loop.start_label)
+ < n_non_fixed_regs * 3));
+
+}
+
+/* Generate a jump to exit the current loop, conditional, binding contour
+ or case statement. Not all such constructs are visible to this function,
+ only those started with EXIT_FLAG nonzero. Individual languages use
+ the EXIT_FLAG parameter to control which kinds of constructs you can
+ exit this way.
+
+ If not currently inside anything that can be exited,
+ return 0 and do nothing; caller will print an error message. */
+
+int
+expand_exit_something ()
+{
+ struct nesting *n;
+ last_expr_type = 0;
+ for (n = nesting_stack; n; n = n->all)
+ if (n->exit_label != 0)
+ {
+ expand_goto_internal (NULL_TREE, n->exit_label, NULL_RTX);
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Generate RTL to return from the current function, with no value.
+ (That is, we do not do anything about returning any value.) */
+
+void
+expand_null_return ()
+{
+ struct nesting *block = block_stack;
+ rtx last_insn = 0;
+
+ if (output_bytecode)
+ {
+ bc_emit_instruction (ret);
+ return;
+ }
+
+ /* Does any pending block have cleanups? */
+
+ while (block && block->data.block.cleanups == 0)
+ block = block->next;
+
+ /* If yes, use a goto to return, since that runs cleanups. */
+
+ expand_null_return_1 (last_insn, block != 0);
+}
+
+/* Generate RTL to return from the current function, with value VAL. */
+
+void
+expand_value_return (val)
+ rtx val;
+{
+ struct nesting *block = block_stack;
+ rtx last_insn = get_last_insn ();
+ rtx return_reg = DECL_RTL (DECL_RESULT (current_function_decl));
+
+ /* Copy the value to the return location
+ unless it's already there. */
+
+ if (return_reg != val)
+ {
+#ifdef PROMOTE_FUNCTION_RETURN
+ tree type = TREE_TYPE (DECL_RESULT (current_function_decl));
+ int unsignedp = TREE_UNSIGNED (type);
+ enum machine_mode mode
+ = promote_mode (type, DECL_MODE (DECL_RESULT (current_function_decl)),
+ &unsignedp, 1);
+
+ if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
+ convert_move (return_reg, val, unsignedp);
+ else
+#endif
+ emit_move_insn (return_reg, val);
+ }
+ if (GET_CODE (return_reg) == REG
+ && REGNO (return_reg) < FIRST_PSEUDO_REGISTER)
+ emit_insn (gen_rtx (USE, VOIDmode, return_reg));
+
+ /* Does any pending block have cleanups? */
+
+ while (block && block->data.block.cleanups == 0)
+ block = block->next;
+
+ /* If yes, use a goto to return, since that runs cleanups.
+ Use LAST_INSN to put cleanups *before* the move insn emitted above. */
+
+ expand_null_return_1 (last_insn, block != 0);
+}
+
+/* Output a return with no value. If LAST_INSN is nonzero,
+ pretend that the return takes place after LAST_INSN.
+ If USE_GOTO is nonzero then don't use a return instruction;
+ go to the return label instead. This causes any cleanups
+ of pending blocks to be executed normally. */
+
+static void
+expand_null_return_1 (last_insn, use_goto)
+ rtx last_insn;
+ int use_goto;
+{
+ rtx end_label = cleanup_label ? cleanup_label : return_label;
+
+ clear_pending_stack_adjust ();
+ do_pending_stack_adjust ();
+ last_expr_type = 0;
+
+ /* PCC-struct return always uses an epilogue. */
+ if (current_function_returns_pcc_struct || use_goto)
+ {
+ if (end_label == 0)
+ end_label = return_label = gen_label_rtx ();
+ expand_goto_internal (NULL_TREE, end_label, last_insn);
+ return;
+ }
+
+ /* Otherwise output a simple return-insn if one is available,
+ unless it won't do the job. */
+#ifdef HAVE_return
+ if (HAVE_return && use_goto == 0 && cleanup_label == 0)
+ {
+ emit_jump_insn (gen_return ());
+ emit_barrier ();
+ return;
+ }
+#endif
+
+ /* Otherwise jump to the epilogue. */
+ expand_goto_internal (NULL_TREE, end_label, last_insn);
+}
+
+/* Generate RTL to evaluate the expression RETVAL and return it
+ from the current function. */
+
+void
+expand_return (retval)
+ tree retval;
+{
+ /* If there are any cleanups to be performed, then they will
+ be inserted following LAST_INSN. It is desirable
+ that the last_insn, for such purposes, should be the
+ last insn before computing the return value. Otherwise, cleanups
+ which call functions can clobber the return value. */
+ /* ??? rms: I think that is erroneous, because in C++ it would
+ run destructors on variables that might be used in the subsequent
+ computation of the return value. */
+ rtx last_insn = 0;
+ register rtx val = 0;
+ register rtx op0;
+ tree retval_rhs;
+ int cleanups;
+ struct nesting *block;
+
+ /* Bytecode returns are quite simple, just leave the result on the
+ arithmetic stack. */
+ if (output_bytecode)
+ {
+ bc_expand_expr (retval);
+ bc_emit_instruction (ret);
+ return;
+ }
+
+ /* If function wants no value, give it none. */
+ if (TREE_CODE (TREE_TYPE (TREE_TYPE (current_function_decl))) == VOID_TYPE)
+ {
+ expand_expr (retval, NULL_RTX, VOIDmode, 0);
+ emit_queue ();
+ expand_null_return ();
+ return;
+ }
+
+ /* Are any cleanups needed? E.g. C++ destructors to be run? */
+ cleanups = any_pending_cleanups (1);
+
+ if (TREE_CODE (retval) == RESULT_DECL)
+ retval_rhs = retval;
+ else if ((TREE_CODE (retval) == MODIFY_EXPR || TREE_CODE (retval) == INIT_EXPR)
+ && TREE_CODE (TREE_OPERAND (retval, 0)) == RESULT_DECL)
+ retval_rhs = TREE_OPERAND (retval, 1);
+ else if (TREE_TYPE (retval) == void_type_node)
+ /* Recognize tail-recursive call to void function. */
+ retval_rhs = retval;
+ else
+ retval_rhs = NULL_TREE;
+
+ /* Only use `last_insn' if there are cleanups which must be run. */
+ if (cleanups || cleanup_label != 0)
+ last_insn = get_last_insn ();
+
+ /* Distribute return down conditional expr if either of the sides
+ may involve tail recursion (see test below). This enhances the number
+ of tail recursions we see. Don't do this always since it can produce
+ sub-optimal code in some cases and we distribute assignments into
+ conditional expressions when it would help. */
+
+ if (optimize && retval_rhs != 0
+ && frame_offset == 0
+ && TREE_CODE (retval_rhs) == COND_EXPR
+ && (TREE_CODE (TREE_OPERAND (retval_rhs, 1)) == CALL_EXPR
+ || TREE_CODE (TREE_OPERAND (retval_rhs, 2)) == CALL_EXPR))
+ {
+ rtx label = gen_label_rtx ();
+ tree expr;
+
+ do_jump (TREE_OPERAND (retval_rhs, 0), label, NULL_RTX);
+ expr = build (MODIFY_EXPR, TREE_TYPE (current_function_decl),
+ DECL_RESULT (current_function_decl),
+ TREE_OPERAND (retval_rhs, 1));
+ TREE_SIDE_EFFECTS (expr) = 1;
+ expand_return (expr);
+ emit_label (label);
+
+ expr = build (MODIFY_EXPR, TREE_TYPE (current_function_decl),
+ DECL_RESULT (current_function_decl),
+ TREE_OPERAND (retval_rhs, 2));
+ TREE_SIDE_EFFECTS (expr) = 1;
+ expand_return (expr);
+ return;
+ }
+
+ /* For tail-recursive call to current function,
+ just jump back to the beginning.
+ It's unsafe if any auto variable in this function
+ has its address taken; for simplicity,
+ require stack frame to be empty. */
+ if (optimize && retval_rhs != 0
+ && frame_offset == 0
+ && TREE_CODE (retval_rhs) == CALL_EXPR
+ && TREE_CODE (TREE_OPERAND (retval_rhs, 0)) == ADDR_EXPR
+ && TREE_OPERAND (TREE_OPERAND (retval_rhs, 0), 0) == current_function_decl
+ /* Finish checking validity, and if valid emit code
+ to set the argument variables for the new call. */
+ && tail_recursion_args (TREE_OPERAND (retval_rhs, 1),
+ DECL_ARGUMENTS (current_function_decl)))
+ {
+ if (tail_recursion_label == 0)
+ {
+ tail_recursion_label = gen_label_rtx ();
+ emit_label_after (tail_recursion_label,
+ tail_recursion_reentry);
+ }
+ emit_queue ();
+ expand_goto_internal (NULL_TREE, tail_recursion_label, last_insn);
+ emit_barrier ();
+ return;
+ }
+#ifdef HAVE_return
+ /* This optimization is safe if there are local cleanups
+ because expand_null_return takes care of them.
+ ??? I think it should also be safe when there is a cleanup label,
+ because expand_null_return takes care of them, too.
+ Any reason why not? */
+ if (HAVE_return && cleanup_label == 0
+ && ! current_function_returns_pcc_struct
+ && BRANCH_COST <= 1)
+ {
+ /* If this is return x == y; then generate
+ if (x == y) return 1; else return 0;
+ if we can do it with explicit return insns and
+ branches are cheap. */
+ if (retval_rhs)
+ switch (TREE_CODE (retval_rhs))
+ {
+ case EQ_EXPR:
+ case NE_EXPR:
+ case GT_EXPR:
+ case GE_EXPR:
+ case LT_EXPR:
+ case LE_EXPR:
+ case TRUTH_ANDIF_EXPR:
+ case TRUTH_ORIF_EXPR:
+ case TRUTH_AND_EXPR:
+ case TRUTH_OR_EXPR:
+ case TRUTH_NOT_EXPR:
+ case TRUTH_XOR_EXPR:
+ op0 = gen_label_rtx ();
+ jumpifnot (retval_rhs, op0);
+ expand_value_return (const1_rtx);
+ emit_label (op0);
+ expand_value_return (const0_rtx);
+ return;
+ }
+ }
+#endif /* HAVE_return */
+
+ if (cleanups
+ && retval_rhs != 0
+ && TREE_TYPE (retval_rhs) != void_type_node
+ && GET_CODE (DECL_RTL (DECL_RESULT (current_function_decl))) == REG)
+ {
+ /* Calculate the return value into a pseudo reg. */
+ val = expand_expr (retval_rhs, NULL_RTX, VOIDmode, 0);
+ emit_queue ();
+ /* All temporaries have now been used. */
+ free_temp_slots ();
+ /* Return the calculated value, doing cleanups first. */
+ expand_value_return (val);
+ }
+ else
+ {
+ /* No cleanups or no hard reg used;
+ calculate value into hard return reg. */
+ expand_expr (retval, const0_rtx, VOIDmode, 0);
+ emit_queue ();
+ free_temp_slots ();
+ expand_value_return (DECL_RTL (DECL_RESULT (current_function_decl)));
+ }
+}
+
+/* Return 1 if the end of the generated RTX is not a barrier.
+ This means code already compiled can drop through. */
+
+int
+drop_through_at_end_p ()
+{
+ rtx insn = get_last_insn ();
+ while (insn && GET_CODE (insn) == NOTE)
+ insn = PREV_INSN (insn);
+ return insn && GET_CODE (insn) != BARRIER;
+}
+
+/* Emit code to alter this function's formal parms for a tail-recursive call.
+ ACTUALS is a list of actual parameter expressions (chain of TREE_LISTs).
+ FORMALS is the chain of decls of formals.
+ Return 1 if this can be done;
+ otherwise return 0 and do not emit any code. */
+
+static int
+tail_recursion_args (actuals, formals)
+ tree actuals, formals;
+{
+ register tree a = actuals, f = formals;
+ register int i;
+ register rtx *argvec;
+
+ /* Check that number and types of actuals are compatible
+ with the formals. This is not always true in valid C code.
+ Also check that no formal needs to be addressable
+ and that all formals are scalars. */
+
+ /* Also count the args. */
+
+ for (a = actuals, f = formals, i = 0; a && f; a = TREE_CHAIN (a), f = TREE_CHAIN (f), i++)
+ {
+ if (TREE_TYPE (TREE_VALUE (a)) != TREE_TYPE (f))
+ return 0;
+ if (GET_CODE (DECL_RTL (f)) != REG || DECL_MODE (f) == BLKmode)
+ return 0;
+ }
+ if (a != 0 || f != 0)
+ return 0;
+
+ /* Compute all the actuals. */
+
+ argvec = (rtx *) alloca (i * sizeof (rtx));
+
+ for (a = actuals, i = 0; a; a = TREE_CHAIN (a), i++)
+ argvec[i] = expand_expr (TREE_VALUE (a), NULL_RTX, VOIDmode, 0);
+
+ /* Find which actual values refer to current values of previous formals.
+ Copy each of them now, before any formal is changed. */
+
+ for (a = actuals, i = 0; a; a = TREE_CHAIN (a), i++)
+ {
+ int copy = 0;
+ register int j;
+ for (f = formals, j = 0; j < i; f = TREE_CHAIN (f), j++)
+ if (reg_mentioned_p (DECL_RTL (f), argvec[i]))
+ { copy = 1; break; }
+ if (copy)
+ argvec[i] = copy_to_reg (argvec[i]);
+ }
+
+ /* Store the values of the actuals into the formals. */
+
+ for (f = formals, a = actuals, i = 0; f;
+ f = TREE_CHAIN (f), a = TREE_CHAIN (a), i++)
+ {
+ if (GET_MODE (DECL_RTL (f)) == GET_MODE (argvec[i]))
+ emit_move_insn (DECL_RTL (f), argvec[i]);
+ else
+ convert_move (DECL_RTL (f), argvec[i],
+ TREE_UNSIGNED (TREE_TYPE (TREE_VALUE (a))));
+ }
+
+ free_temp_slots ();
+ return 1;
+}
+
+/* Generate the RTL code for entering a binding contour.
+ The variables are declared one by one, by calls to `expand_decl'.
+
+ EXIT_FLAG is nonzero if this construct should be visible to
+ `exit_something'. */
+
+void
+expand_start_bindings (exit_flag)
+ int exit_flag;
+{
+ struct nesting *thisblock = ALLOC_NESTING ();
+ rtx note = output_bytecode ? 0 : emit_note (NULL_PTR, NOTE_INSN_BLOCK_BEG);
+
+ /* Make an entry on block_stack for the block we are entering. */
+
+ thisblock->next = block_stack;
+ thisblock->all = nesting_stack;
+ thisblock->depth = ++nesting_depth;
+ thisblock->data.block.stack_level = 0;
+ thisblock->data.block.cleanups = 0;
+ thisblock->data.block.function_call_count = 0;
+#if 0
+ if (block_stack)
+ {
+ if (block_stack->data.block.cleanups == NULL_TREE
+ && (block_stack->data.block.outer_cleanups == NULL_TREE
+ || block_stack->data.block.outer_cleanups == empty_cleanup_list))
+ thisblock->data.block.outer_cleanups = empty_cleanup_list;
+ else
+ thisblock->data.block.outer_cleanups
+ = tree_cons (NULL_TREE, block_stack->data.block.cleanups,
+ block_stack->data.block.outer_cleanups);
+ }
+ else
+ thisblock->data.block.outer_cleanups = 0;
+#endif
+#if 1
+ if (block_stack
+ && !(block_stack->data.block.cleanups == NULL_TREE
+ && block_stack->data.block.outer_cleanups == NULL_TREE))
+ thisblock->data.block.outer_cleanups
+ = tree_cons (NULL_TREE, block_stack->data.block.cleanups,
+ block_stack->data.block.outer_cleanups);
+ else
+ thisblock->data.block.outer_cleanups = 0;
+#endif
+ thisblock->data.block.label_chain = 0;
+ thisblock->data.block.innermost_stack_block = stack_block_stack;
+ thisblock->data.block.first_insn = note;
+ thisblock->data.block.block_start_count = ++block_start_count;
+ thisblock->exit_label = exit_flag ? gen_label_rtx () : 0;
+ block_stack = thisblock;
+ nesting_stack = thisblock;
+
+ if (!output_bytecode)
+ {
+ /* Make a new level for allocating stack slots. */
+ push_temp_slots ();
+ }
+}
+
+/* Given a pointer to a BLOCK node, save a pointer to the most recently
+ generated NOTE_INSN_BLOCK_END in the BLOCK_END_NOTE field of the given
+ BLOCK node. */
+
+void
+remember_end_note (block)
+ register tree block;
+{
+ BLOCK_END_NOTE (block) = last_block_end_note;
+ last_block_end_note = NULL_RTX;
+}
+
+/* Generate RTL code to terminate a binding contour.
+ VARS is the chain of VAR_DECL nodes
+ for the variables bound in this contour.
+ MARK_ENDS is nonzero if we should put a note at the beginning
+ and end of this binding contour.
+
+ DONT_JUMP_IN is nonzero if it is not valid to jump into this contour.
+ (That is true automatically if the contour has a saved stack level.) */
+
+void
+expand_end_bindings (vars, mark_ends, dont_jump_in)
+ tree vars;
+ int mark_ends;
+ int dont_jump_in;
+{
+ register struct nesting *thisblock = block_stack;
+ register tree decl;
+
+ if (output_bytecode)
+ {
+ bc_expand_end_bindings (vars, mark_ends, dont_jump_in);
+ return;
+ }
+
+ if (warn_unused)
+ for (decl = vars; decl; decl = TREE_CHAIN (decl))
+ if (! TREE_USED (decl) && TREE_CODE (decl) == VAR_DECL
+ && ! DECL_IN_SYSTEM_HEADER (decl))
+ warning_with_decl (decl, "unused variable `%s'");
+
+ if (thisblock->exit_label)
+ {
+ do_pending_stack_adjust ();
+ emit_label (thisblock->exit_label);
+ }
+
+ /* If necessary, make a handler for nonlocal gotos taking
+ place in the function calls in this block. */
+ if (function_call_count != thisblock->data.block.function_call_count
+ && nonlocal_labels
+ /* Make handler for outermost block
+ if there were any nonlocal gotos to this function. */
+ && (thisblock->next == 0 ? current_function_has_nonlocal_label
+ /* Make handler for inner block if it has something
+ special to do when you jump out of it. */
+ : (thisblock->data.block.cleanups != 0
+ || thisblock->data.block.stack_level != 0)))
+ {
+ tree link;
+ rtx afterward = gen_label_rtx ();
+ rtx handler_label = gen_label_rtx ();
+ rtx save_receiver = gen_reg_rtx (Pmode);
+ rtx insns;
+
+ /* Don't let jump_optimize delete the handler. */
+ LABEL_PRESERVE_P (handler_label) = 1;
+
+ /* Record the handler address in the stack slot for that purpose,
+ during this block, saving and restoring the outer value. */
+ if (thisblock->next != 0)
+ {
+ emit_move_insn (nonlocal_goto_handler_slot, save_receiver);
+
+ start_sequence ();
+ emit_move_insn (save_receiver, nonlocal_goto_handler_slot);
+ insns = get_insns ();
+ end_sequence ();
+ emit_insns_before (insns, thisblock->data.block.first_insn);
+ }
+
+ start_sequence ();
+ emit_move_insn (nonlocal_goto_handler_slot,
+ gen_rtx (LABEL_REF, Pmode, handler_label));
+ insns = get_insns ();
+ end_sequence ();
+ emit_insns_before (insns, thisblock->data.block.first_insn);
+
+ /* Jump around the handler; it runs only when specially invoked. */
+ emit_jump (afterward);
+ emit_label (handler_label);
+
+#ifdef HAVE_nonlocal_goto
+ if (! HAVE_nonlocal_goto)
+#endif
+ /* First adjust our frame pointer to its actual value. It was
+ previously set to the start of the virtual area corresponding to
+ the stacked variables when we branched here and now needs to be
+ adjusted to the actual hardware fp value.
+
+ Assignments are to virtual registers are converted by
+ instantiate_virtual_regs into the corresponding assignment
+ to the underlying register (fp in this case) that makes
+ the original assignment true.
+ So the following insn will actually be
+ decrementing fp by STARTING_FRAME_OFFSET. */
+ emit_move_insn (virtual_stack_vars_rtx, hard_frame_pointer_rtx);
+
+#if ARG_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM
+ if (fixed_regs[ARG_POINTER_REGNUM])
+ {
+#ifdef ELIMINABLE_REGS
+ /* If the argument pointer can be eliminated in favor of the
+ frame pointer, we don't need to restore it. We assume here
+ that if such an elimination is present, it can always be used.
+ This is the case on all known machines; if we don't make this
+ assumption, we do unnecessary saving on many machines. */
+ static struct elims {int from, to;} elim_regs[] = ELIMINABLE_REGS;
+ int i;
+
+ for (i = 0; i < sizeof elim_regs / sizeof elim_regs[0]; i++)
+ if (elim_regs[i].from == ARG_POINTER_REGNUM
+ && elim_regs[i].to == HARD_FRAME_POINTER_REGNUM)
+ break;
+
+ if (i == sizeof elim_regs / sizeof elim_regs [0])
+#endif
+ {
+ /* Now restore our arg pointer from the address at which it
+ was saved in our stack frame.
+ If there hasn't be space allocated for it yet, make
+ some now. */
+ if (arg_pointer_save_area == 0)
+ arg_pointer_save_area
+ = assign_stack_local (Pmode, GET_MODE_SIZE (Pmode), 0);
+ emit_move_insn (virtual_incoming_args_rtx,
+ /* We need a pseudo here, or else
+ instantiate_virtual_regs_1 complains. */
+ copy_to_reg (arg_pointer_save_area));
+ }
+ }
+#endif
+
+ /* The handler expects the desired label address in the static chain
+ register. It tests the address and does an appropriate jump
+ to whatever label is desired. */
+ for (link = nonlocal_labels; link; link = TREE_CHAIN (link))
+ /* Skip any labels we shouldn't be able to jump to from here. */
+ if (! DECL_TOO_LATE (TREE_VALUE (link)))
+ {
+ rtx not_this = gen_label_rtx ();
+ rtx this = gen_label_rtx ();
+ do_jump_if_equal (static_chain_rtx,
+ gen_rtx (LABEL_REF, Pmode, DECL_RTL (TREE_VALUE (link))),
+ this, 0);
+ emit_jump (not_this);
+ emit_label (this);
+ expand_goto (TREE_VALUE (link));
+ emit_label (not_this);
+ }
+ /* If label is not recognized, abort. */
+ emit_library_call (gen_rtx (SYMBOL_REF, Pmode, "abort"), 0,
+ VOIDmode, 0);
+ emit_label (afterward);
+ }
+
+ /* Don't allow jumping into a block that has cleanups or a stack level. */
+ if (dont_jump_in
+ || thisblock->data.block.stack_level != 0
+ || thisblock->data.block.cleanups != 0)
+ {
+ struct label_chain *chain;
+
+ /* Any labels in this block are no longer valid to go to.
+ Mark them to cause an error message. */
+ for (chain = thisblock->data.block.label_chain; chain; chain = chain->next)
+ {
+ DECL_TOO_LATE (chain->label) = 1;
+ /* If any goto without a fixup came to this label,
+ that must be an error, because gotos without fixups
+ come from outside all saved stack-levels and all cleanups. */
+ if (TREE_ADDRESSABLE (chain->label))
+ error_with_decl (chain->label,
+ "label `%s' used before containing binding contour");
+ }
+ }
+
+ /* Restore stack level in effect before the block
+ (only if variable-size objects allocated). */
+ /* Perform any cleanups associated with the block. */
+
+ if (thisblock->data.block.stack_level != 0
+ || thisblock->data.block.cleanups != 0)
+ {
+ /* Only clean up here if this point can actually be reached. */
+ if (GET_CODE (get_last_insn ()) != BARRIER)
+ {
+ /* Don't let cleanups affect ({...}) constructs. */
+ int old_expr_stmts_for_value = expr_stmts_for_value;
+ rtx old_last_expr_value = last_expr_value;
+ tree old_last_expr_type = last_expr_type;
+ expr_stmts_for_value = 0;
+
+ /* Do the cleanups. */
+ expand_cleanups (thisblock->data.block.cleanups, NULL_TREE);
+ do_pending_stack_adjust ();
+
+ expr_stmts_for_value = old_expr_stmts_for_value;
+ last_expr_value = old_last_expr_value;
+ last_expr_type = old_last_expr_type;
+
+ /* Restore the stack level. */
+
+ if (thisblock->data.block.stack_level != 0)
+ {
+ emit_stack_restore (thisblock->next ? SAVE_BLOCK : SAVE_FUNCTION,
+ thisblock->data.block.stack_level, NULL_RTX);
+ if (nonlocal_goto_handler_slot != 0)
+ emit_stack_save (SAVE_NONLOCAL, &nonlocal_goto_stack_level,
+ NULL_RTX);
+ }
+ }
+
+ /* Any gotos out of this block must also do these things.
+ Also report any gotos with fixups that came to labels in this
+ level. */
+ fixup_gotos (thisblock,
+ thisblock->data.block.stack_level,
+ thisblock->data.block.cleanups,
+ thisblock->data.block.first_insn,
+ dont_jump_in);
+ }
+
+ /* Mark the beginning and end of the scope if requested.
+ We do this now, after running cleanups on the variables
+ just going out of scope, so they are in scope for their cleanups. */
+
+ if (mark_ends)
+ last_block_end_note = emit_note (NULL_PTR, NOTE_INSN_BLOCK_END);
+ else
+ /* Get rid of the beginning-mark if we don't make an end-mark. */
+ NOTE_LINE_NUMBER (thisblock->data.block.first_insn) = NOTE_INSN_DELETED;
+
+ /* If doing stupid register allocation, make sure lives of all
+ register variables declared here extend thru end of scope. */
+
+ if (obey_regdecls)
+ for (decl = vars; decl; decl = TREE_CHAIN (decl))
+ {
+ rtx rtl = DECL_RTL (decl);
+ if (TREE_CODE (decl) == VAR_DECL && rtl != 0)
+ use_variable (rtl);
+ }
+
+ /* Restore block_stack level for containing block. */
+
+ stack_block_stack = thisblock->data.block.innermost_stack_block;
+ POPSTACK (block_stack);
+
+ /* Pop the stack slot nesting and free any slots at this level. */
+ pop_temp_slots ();
+}
+
+
+/* End a binding contour.
+ VARS is the chain of VAR_DECL nodes for the variables bound
+ in this contour. MARK_ENDS is nonzer if we should put a note
+ at the beginning and end of this binding contour.
+ DONT_JUMP_IN is nonzero if it is not valid to jump into this
+ contour. */
+
+static void
+bc_expand_end_bindings (vars, mark_ends, dont_jump_in)
+ tree vars;
+ int mark_ends;
+ int dont_jump_in;
+{
+ struct nesting *thisbind = nesting_stack;
+ tree decl;
+
+ if (warn_unused)
+ for (decl = vars; decl; decl = TREE_CHAIN (decl))
+ if (! TREE_USED (TREE_VALUE (decl)) && TREE_CODE (TREE_VALUE (decl)) == VAR_DECL)
+ warning_with_decl (decl, "unused variable `%s'");
+
+ if (thisbind->exit_label)
+ bc_emit_bytecode_labeldef (BYTECODE_BC_LABEL (thisbind->exit_label));
+
+ /* Pop block/bindings off stack */
+ POPSTACK (block_stack);
+}
+
+/* Generate RTL for the automatic variable declaration DECL.
+ (Other kinds of declarations are simply ignored if seen here.)
+ CLEANUP is an expression to be executed at exit from this binding contour;
+ for example, in C++, it might call the destructor for this variable.
+
+ If CLEANUP contains any SAVE_EXPRs, then you must preevaluate them
+ either before or after calling `expand_decl' but before compiling
+ any subsequent expressions. This is because CLEANUP may be expanded
+ more than once, on different branches of execution.
+ For the same reason, CLEANUP may not contain a CALL_EXPR
+ except as its topmost node--else `preexpand_calls' would get confused.
+
+ If CLEANUP is nonzero and DECL is zero, we record a cleanup
+ that is not associated with any particular variable.
+
+ There is no special support here for C++ constructors.
+ They should be handled by the proper code in DECL_INITIAL. */
+
+void
+expand_decl (decl)
+ register tree decl;
+{
+ struct nesting *thisblock = block_stack;
+ tree type;
+
+ if (output_bytecode)
+ {
+ bc_expand_decl (decl, 0);
+ return;
+ }
+
+ type = TREE_TYPE (decl);
+
+ /* Only automatic variables need any expansion done.
+ Static and external variables, and external functions,
+ will be handled by `assemble_variable' (called from finish_decl).
+ TYPE_DECL and CONST_DECL require nothing.
+ PARM_DECLs are handled in `assign_parms'. */
+
+ if (TREE_CODE (decl) != VAR_DECL)
+ return;
+ if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
+ return;
+
+ /* Create the RTL representation for the variable. */
+
+ if (type == error_mark_node)
+ DECL_RTL (decl) = gen_rtx (MEM, BLKmode, const0_rtx);
+ else if (DECL_SIZE (decl) == 0)
+ /* Variable with incomplete type. */
+ {
+ if (DECL_INITIAL (decl) == 0)
+ /* Error message was already done; now avoid a crash. */
+ DECL_RTL (decl) = assign_stack_temp (DECL_MODE (decl), 0, 1);
+ else
+ /* An initializer is going to decide the size of this array.
+ Until we know the size, represent its address with a reg. */
+ DECL_RTL (decl) = gen_rtx (MEM, BLKmode, gen_reg_rtx (Pmode));
+ }
+ else if (DECL_MODE (decl) != BLKmode
+ /* If -ffloat-store, don't put explicit float vars
+ into regs. */
+ && !(flag_float_store
+ && TREE_CODE (type) == REAL_TYPE)
+ && ! TREE_THIS_VOLATILE (decl)
+ && ! TREE_ADDRESSABLE (decl)
+ && (DECL_REGISTER (decl) || ! obey_regdecls))
+ {
+ /* Automatic variable that can go in a register. */
+ int unsignedp = TREE_UNSIGNED (type);
+ enum machine_mode reg_mode
+ = promote_mode (type, DECL_MODE (decl), &unsignedp, 0);
+
+ if (TREE_CODE (type) == COMPLEX_TYPE)
+ {
+ rtx realpart, imagpart;
+ enum machine_mode partmode = TYPE_MODE (TREE_TYPE (type));
+
+ /* For a complex type variable, make a CONCAT of two pseudos
+ so that the real and imaginary parts
+ can be allocated separately. */
+ realpart = gen_reg_rtx (partmode);
+ REG_USERVAR_P (realpart) = 1;
+ imagpart = gen_reg_rtx (partmode);
+ REG_USERVAR_P (imagpart) = 1;
+ DECL_RTL (decl) = gen_rtx (CONCAT, reg_mode, realpart, imagpart);
+ }
+ else
+ {
+ DECL_RTL (decl) = gen_reg_rtx (reg_mode);
+ if (TREE_CODE (type) == POINTER_TYPE)
+ mark_reg_pointer (DECL_RTL (decl));
+ REG_USERVAR_P (DECL_RTL (decl)) = 1;
+ }
+ }
+ else if (TREE_CODE (DECL_SIZE (decl)) == INTEGER_CST)
+ {
+ /* Variable of fixed size that goes on the stack. */
+ rtx oldaddr = 0;
+ rtx addr;
+
+ /* If we previously made RTL for this decl, it must be an array
+ whose size was determined by the initializer.
+ The old address was a register; set that register now
+ to the proper address. */
+ if (DECL_RTL (decl) != 0)
+ {
+ if (GET_CODE (DECL_RTL (decl)) != MEM
+ || GET_CODE (XEXP (DECL_RTL (decl), 0)) != REG)
+ abort ();
+ oldaddr = XEXP (DECL_RTL (decl), 0);
+ }
+
+ DECL_RTL (decl)
+ = assign_stack_temp (DECL_MODE (decl),
+ ((TREE_INT_CST_LOW (DECL_SIZE (decl))
+ + BITS_PER_UNIT - 1)
+ / BITS_PER_UNIT),
+ 1);
+
+ /* Set alignment we actually gave this decl. */
+ DECL_ALIGN (decl) = (DECL_MODE (decl) == BLKmode ? BIGGEST_ALIGNMENT
+ : GET_MODE_BITSIZE (DECL_MODE (decl)));
+
+ if (oldaddr)
+ {
+ addr = force_operand (XEXP (DECL_RTL (decl), 0), oldaddr);
+ if (addr != oldaddr)
+ emit_move_insn (oldaddr, addr);
+ }
+
+ /* If this is a memory ref that contains aggregate components,
+ mark it as such for cse and loop optimize. */
+ MEM_IN_STRUCT_P (DECL_RTL (decl)) = AGGREGATE_TYPE_P (TREE_TYPE (decl));
+#if 0
+ /* If this is in memory because of -ffloat-store,
+ set the volatile bit, to prevent optimizations from
+ undoing the effects. */
+ if (flag_float_store && TREE_CODE (type) == REAL_TYPE)
+ MEM_VOLATILE_P (DECL_RTL (decl)) = 1;
+#endif
+ }
+ else
+ /* Dynamic-size object: must push space on the stack. */
+ {
+ rtx address, size;
+
+ /* Record the stack pointer on entry to block, if have
+ not already done so. */
+ if (thisblock->data.block.stack_level == 0)
+ {
+ do_pending_stack_adjust ();
+ emit_stack_save (thisblock->next ? SAVE_BLOCK : SAVE_FUNCTION,
+ &thisblock->data.block.stack_level,
+ thisblock->data.block.first_insn);
+ stack_block_stack = thisblock;
+ }
+
+ /* Compute the variable's size, in bytes. */
+ size = expand_expr (size_binop (CEIL_DIV_EXPR,
+ DECL_SIZE (decl),
+ size_int (BITS_PER_UNIT)),
+ NULL_RTX, VOIDmode, 0);
+ free_temp_slots ();
+
+ /* Allocate space on the stack for the variable. */
+ address = allocate_dynamic_stack_space (size, NULL_RTX,
+ DECL_ALIGN (decl));
+
+ /* Reference the variable indirect through that rtx. */
+ DECL_RTL (decl) = gen_rtx (MEM, DECL_MODE (decl), address);
+
+ /* If this is a memory ref that contains aggregate components,
+ mark it as such for cse and loop optimize. */
+ MEM_IN_STRUCT_P (DECL_RTL (decl)) = AGGREGATE_TYPE_P (TREE_TYPE (decl));
+
+ /* Indicate the alignment we actually gave this variable. */
+#ifdef STACK_BOUNDARY
+ DECL_ALIGN (decl) = STACK_BOUNDARY;
+#else
+ DECL_ALIGN (decl) = BIGGEST_ALIGNMENT;
+#endif
+ }
+
+ if (TREE_THIS_VOLATILE (decl))
+ MEM_VOLATILE_P (DECL_RTL (decl)) = 1;
+#if 0 /* A variable is not necessarily unchanging
+ just because it is const. RTX_UNCHANGING_P
+ means no change in the function,
+ not merely no change in the variable's scope.
+ It is correct to set RTX_UNCHANGING_P if the variable's scope
+ is the whole function. There's no convenient way to test that. */
+ if (TREE_READONLY (decl))
+ RTX_UNCHANGING_P (DECL_RTL (decl)) = 1;
+#endif
+
+ /* If doing stupid register allocation, make sure life of any
+ register variable starts here, at the start of its scope. */
+
+ if (obey_regdecls)
+ use_variable (DECL_RTL (decl));
+}
+
+
+/* Generate code for the automatic variable declaration DECL. For
+ most variables this just means we give it a stack offset. The
+ compiler sometimes emits cleanups without variables and we will
+ have to deal with those too. */
+
+static void
+bc_expand_decl (decl, cleanup)
+ tree decl;
+ tree cleanup;
+{
+ tree type;
+
+ if (!decl)
+ {
+ /* A cleanup with no variable. */
+ if (!cleanup)
+ abort ();
+
+ return;
+ }
+
+ /* Only auto variables need any work. */
+ if (TREE_CODE (decl) != VAR_DECL || TREE_STATIC (decl) || DECL_EXTERNAL (decl))
+ return;
+
+ type = TREE_TYPE (decl);
+
+ if (type == error_mark_node)
+ DECL_RTL (decl) = bc_gen_rtx ((char *) 0, 0, (struct bc_label *) 0);
+
+ else if (DECL_SIZE (decl) == 0)
+
+ /* Variable with incomplete type. The stack offset herein will be
+ fixed later in expand_decl_init (). */
+ DECL_RTL (decl) = bc_gen_rtx ((char *) 0, 0, (struct bc_label *) 0);
+
+ else if (TREE_CONSTANT (DECL_SIZE (decl)))
+ {
+ DECL_RTL (decl) = bc_allocate_local (TREE_INT_CST_LOW (DECL_SIZE (decl)) / BITS_PER_UNIT,
+ DECL_ALIGN (decl));
+ }
+ else
+ DECL_RTL (decl) = bc_allocate_variable_array (DECL_SIZE (decl));
+}
+
+/* Emit code to perform the initialization of a declaration DECL. */
+
+void
+expand_decl_init (decl)
+ tree decl;
+{
+ int was_used = TREE_USED (decl);
+
+ if (output_bytecode)
+ {
+ bc_expand_decl_init (decl);
+ return;
+ }
+
+ /* If this is a CONST_DECL, we don't have to generate any code, but
+ if DECL_INITIAL is a constant, call expand_expr to force TREE_CST_RTL
+ to be set while in the obstack containing the constant. If we don't
+ do this, we can lose if we have functions nested three deep and the middle
+ function makes a CONST_DECL whose DECL_INITIAL is a STRING_CST while
+ the innermost function is the first to expand that STRING_CST. */
+ if (TREE_CODE (decl) == CONST_DECL)
+ {
+ if (DECL_INITIAL (decl) && TREE_CONSTANT (DECL_INITIAL (decl)))
+ expand_expr (DECL_INITIAL (decl), NULL_RTX, VOIDmode,
+ EXPAND_INITIALIZER);
+ return;
+ }
+
+ if (TREE_STATIC (decl))
+ return;
+
+ /* Compute and store the initial value now. */
+
+ if (DECL_INITIAL (decl) == error_mark_node)
+ {
+ enum tree_code code = TREE_CODE (TREE_TYPE (decl));
+ if (code == INTEGER_TYPE || code == REAL_TYPE || code == ENUMERAL_TYPE
+ || code == POINTER_TYPE)
+ expand_assignment (decl, convert (TREE_TYPE (decl), integer_zero_node),
+ 0, 0);
+ emit_queue ();
+ }
+ else if (DECL_INITIAL (decl) && TREE_CODE (DECL_INITIAL (decl)) != TREE_LIST)
+ {
+ emit_line_note (DECL_SOURCE_FILE (decl), DECL_SOURCE_LINE (decl));
+ expand_assignment (decl, DECL_INITIAL (decl), 0, 0);
+ emit_queue ();
+ }
+
+ /* Don't let the initialization count as "using" the variable. */
+ TREE_USED (decl) = was_used;
+
+ /* Free any temporaries we made while initializing the decl. */
+ free_temp_slots ();
+}
+
+/* Expand initialization for variable-sized types. Allocate array
+ using newlocalSI and set local variable, which is a pointer to the
+ storage. */
+
+static void
+bc_expand_variable_local_init (decl)
+ tree decl;
+{
+ /* Evaluate size expression and coerce to SI */
+ bc_expand_expr (DECL_SIZE (decl));
+
+ /* Type sizes are always (?) of TREE_CODE INTEGER_CST, so
+ no coercion is necessary (?) */
+
+/* emit_typecode_conversion (preferred_typecode (TYPE_MODE (DECL_SIZE (decl)),
+ TREE_UNSIGNED (DECL_SIZE (decl))), SIcode); */
+
+ /* Emit code to allocate array */
+ bc_emit_instruction (newlocalSI);
+
+ /* Store array pointer in local variable. This is the only instance
+ where we actually want the address of the pointer to the
+ variable-size block, rather than the pointer itself. We avoid
+ using expand_address() since that would cause the pointer to be
+ pushed rather than its address. Hence the hard-coded reference;
+ notice also that the variable is always local (no global
+ variable-size type variables). */
+
+ bc_load_localaddr (DECL_RTL (decl));
+ bc_emit_instruction (storeP);
+}
+
+
+/* Emit code to initialize a declaration. */
+
+static void
+bc_expand_decl_init (decl)
+ tree decl;
+{
+ int org_stack_depth;
+
+ /* Statical initializers are handled elsewhere */
+
+ if (TREE_STATIC (decl))
+ return;
+
+ /* Memory original stack depth */
+ org_stack_depth = stack_depth;
+
+ /* If the type is variable-size, we first create its space (we ASSUME
+ it CAN'T be static). We do this regardless of whether there's an
+ initializer assignment or not. */
+
+ if (TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
+ bc_expand_variable_local_init (decl);
+
+ /* Expand initializer assignment */
+ if (DECL_INITIAL (decl) == error_mark_node)
+ {
+ enum tree_code code = TREE_CODE (TREE_TYPE (decl));
+
+ if (code == INTEGER_TYPE || code == REAL_TYPE || code == ENUMERAL_TYPE
+ || code == POINTER_TYPE)
+
+ expand_assignment (TREE_TYPE (decl), decl, 0, 0);
+ }
+ else if (DECL_INITIAL (decl))
+ expand_assignment (TREE_TYPE (decl), decl, 0, 0);
+
+ /* Restore stack depth */
+ if (org_stack_depth > stack_depth)
+ abort ();
+
+ bc_adjust_stack (stack_depth - org_stack_depth);
+}
+
+
+/* CLEANUP is an expression to be executed at exit from this binding contour;
+ for example, in C++, it might call the destructor for this variable.
+
+ If CLEANUP contains any SAVE_EXPRs, then you must preevaluate them
+ either before or after calling `expand_decl' but before compiling
+ any subsequent expressions. This is because CLEANUP may be expanded
+ more than once, on different branches of execution.
+ For the same reason, CLEANUP may not contain a CALL_EXPR
+ except as its topmost node--else `preexpand_calls' would get confused.
+
+ If CLEANUP is nonzero and DECL is zero, we record a cleanup
+ that is not associated with any particular variable. */
+
+int
+expand_decl_cleanup (decl, cleanup)
+ tree decl, cleanup;
+{
+ struct nesting *thisblock = block_stack;
+
+ /* Error if we are not in any block. */
+ if (thisblock == 0)
+ return 0;
+
+ /* Record the cleanup if there is one. */
+
+ if (cleanup != 0)
+ {
+ thisblock->data.block.cleanups
+ = temp_tree_cons (decl, cleanup, thisblock->data.block.cleanups);
+ /* If this block has a cleanup, it belongs in stack_block_stack. */
+ stack_block_stack = thisblock;
+ (*interim_eh_hook) (NULL_TREE);
+ }
+ return 1;
+}
+
+/* DECL is an anonymous union. CLEANUP is a cleanup for DECL.
+ DECL_ELTS is the list of elements that belong to DECL's type.
+ In each, the TREE_VALUE is a VAR_DECL, and the TREE_PURPOSE a cleanup. */
+
+void
+expand_anon_union_decl (decl, cleanup, decl_elts)
+ tree decl, cleanup, decl_elts;
+{
+ struct nesting *thisblock = block_stack;
+ rtx x;
+
+ expand_decl (decl, cleanup);
+ x = DECL_RTL (decl);
+
+ while (decl_elts)
+ {
+ tree decl_elt = TREE_VALUE (decl_elts);
+ tree cleanup_elt = TREE_PURPOSE (decl_elts);
+ enum machine_mode mode = TYPE_MODE (TREE_TYPE (decl_elt));
+
+ /* (SUBREG (MEM ...)) at RTL generation time is invalid, so we
+ instead create a new MEM rtx with the proper mode. */
+ if (GET_CODE (x) == MEM)
+ {
+ if (mode == GET_MODE (x))
+ DECL_RTL (decl_elt) = x;
+ else
+ {
+ DECL_RTL (decl_elt) = gen_rtx (MEM, mode, copy_rtx (XEXP (x, 0)));
+ MEM_IN_STRUCT_P (DECL_RTL (decl_elt)) = MEM_IN_STRUCT_P (x);
+ RTX_UNCHANGING_P (DECL_RTL (decl_elt)) = RTX_UNCHANGING_P (x);
+ }
+ }
+ else if (GET_CODE (x) == REG)
+ {
+ if (mode == GET_MODE (x))
+ DECL_RTL (decl_elt) = x;
+ else
+ DECL_RTL (decl_elt) = gen_rtx (SUBREG, mode, x, 0);
+ }
+ else
+ abort ();
+
+ /* Record the cleanup if there is one. */
+
+ if (cleanup != 0)
+ thisblock->data.block.cleanups
+ = temp_tree_cons (decl_elt, cleanup_elt,
+ thisblock->data.block.cleanups);
+
+ decl_elts = TREE_CHAIN (decl_elts);
+ }
+}
+
+/* Expand a list of cleanups LIST.
+ Elements may be expressions or may be nested lists.
+
+ If DONT_DO is nonnull, then any list-element
+ whose TREE_PURPOSE matches DONT_DO is omitted.
+ This is sometimes used to avoid a cleanup associated with
+ a value that is being returned out of the scope. */
+
+static void
+expand_cleanups (list, dont_do)
+ tree list;
+ tree dont_do;
+{
+ tree tail;
+ for (tail = list; tail; tail = TREE_CHAIN (tail))
+ if (dont_do == 0 || TREE_PURPOSE (tail) != dont_do)
+ {
+ if (TREE_CODE (TREE_VALUE (tail)) == TREE_LIST)
+ expand_cleanups (TREE_VALUE (tail), dont_do);
+ else
+ {
+ (*interim_eh_hook) (TREE_VALUE (tail));
+
+ /* Cleanups may be run multiple times. For example,
+ when exiting a binding contour, we expand the
+ cleanups associated with that contour. When a goto
+ within that binding contour has a target outside that
+ contour, it will expand all cleanups from its scope to
+ the target. Though the cleanups are expanded multiple
+ times, the control paths are non-overlapping so the
+ cleanups will not be executed twice. */
+ expand_expr (TREE_VALUE (tail), const0_rtx, VOIDmode, 0);
+ free_temp_slots ();
+ }
+ }
+}
+
+/* Move all cleanups from the current block_stack
+ to the containing block_stack, where they are assumed to
+ have been created. If anything can cause a temporary to
+ be created, but not expanded for more than one level of
+ block_stacks, then this code will have to change. */
+
+void
+move_cleanups_up ()
+{
+ struct nesting *block = block_stack;
+ struct nesting *outer = block->next;
+
+ outer->data.block.cleanups
+ = chainon (block->data.block.cleanups,
+ outer->data.block.cleanups);
+ block->data.block.cleanups = 0;
+}
+
+tree
+last_cleanup_this_contour ()
+{
+ if (block_stack == 0)
+ return 0;
+
+ return block_stack->data.block.cleanups;
+}
+
+/* Return 1 if there are any pending cleanups at this point.
+ If THIS_CONTOUR is nonzero, check the current contour as well.
+ Otherwise, look only at the contours that enclose this one. */
+
+int
+any_pending_cleanups (this_contour)
+ int this_contour;
+{
+ struct nesting *block;
+
+ if (block_stack == 0)
+ return 0;
+
+ if (this_contour && block_stack->data.block.cleanups != NULL)
+ return 1;
+ if (block_stack->data.block.cleanups == 0
+ && (block_stack->data.block.outer_cleanups == 0
+#if 0
+ || block_stack->data.block.outer_cleanups == empty_cleanup_list
+#endif
+ ))
+ return 0;
+
+ for (block = block_stack->next; block; block = block->next)
+ if (block->data.block.cleanups != 0)
+ return 1;
+
+ return 0;
+}
+
+/* Enter a case (Pascal) or switch (C) statement.
+ Push a block onto case_stack and nesting_stack
+ to accumulate the case-labels that are seen
+ and to record the labels generated for the statement.
+
+ EXIT_FLAG is nonzero if `exit_something' should exit this case stmt.
+ Otherwise, this construct is transparent for `exit_something'.
+
+ EXPR is the index-expression to be dispatched on.
+ TYPE is its nominal type. We could simply convert EXPR to this type,
+ but instead we take short cuts. */
+
+void
+expand_start_case (exit_flag, expr, type, printname)
+ int exit_flag;
+ tree expr;
+ tree type;
+ char *printname;
+{
+ register struct nesting *thiscase = ALLOC_NESTING ();
+
+ /* Make an entry on case_stack for the case we are entering. */
+
+ thiscase->next = case_stack;
+ thiscase->all = nesting_stack;
+ thiscase->depth = ++nesting_depth;
+ thiscase->exit_label = exit_flag ? gen_label_rtx () : 0;
+ thiscase->data.case_stmt.case_list = 0;
+ thiscase->data.case_stmt.index_expr = expr;
+ thiscase->data.case_stmt.nominal_type = type;
+ thiscase->data.case_stmt.default_label = 0;
+ thiscase->data.case_stmt.num_ranges = 0;
+ thiscase->data.case_stmt.printname = printname;
+ thiscase->data.case_stmt.seenlabel = 0;
+ case_stack = thiscase;
+ nesting_stack = thiscase;
+
+ if (output_bytecode)
+ {
+ bc_expand_start_case (thiscase, expr, type, printname);
+ return;
+ }
+
+ do_pending_stack_adjust ();
+
+ /* Make sure case_stmt.start points to something that won't
+ need any transformation before expand_end_case. */
+ if (GET_CODE (get_last_insn ()) != NOTE)
+ emit_note (NULL_PTR, NOTE_INSN_DELETED);
+
+ thiscase->data.case_stmt.start = get_last_insn ();
+}
+
+
+/* Enter a case statement. It is assumed that the caller has pushed
+ the current context onto the case stack. */
+
+static void
+bc_expand_start_case (thiscase, expr, type, printname)
+ struct nesting *thiscase;
+ tree expr;
+ tree type;
+ char *printname;
+{
+ bc_expand_expr (expr);
+ bc_expand_conversion (TREE_TYPE (expr), type);
+
+ /* For cases, the skip is a place we jump to that's emitted after
+ the size of the jump table is known. */
+
+ thiscase->data.case_stmt.skip_label = gen_label_rtx ();
+ bc_emit_bytecode (jump);
+ bc_emit_bytecode_labelref (BYTECODE_BC_LABEL (thiscase->data.case_stmt.skip_label));
+
+#ifdef DEBUG_PRINT_CODE
+ fputc ('\n', stderr);
+#endif
+}
+
+
+/* Start a "dummy case statement" within which case labels are invalid
+ and are not connected to any larger real case statement.
+ This can be used if you don't want to let a case statement jump
+ into the middle of certain kinds of constructs. */
+
+void
+expand_start_case_dummy ()
+{
+ register struct nesting *thiscase = ALLOC_NESTING ();
+
+ /* Make an entry on case_stack for the dummy. */
+
+ thiscase->next = case_stack;
+ thiscase->all = nesting_stack;
+ thiscase->depth = ++nesting_depth;
+ thiscase->exit_label = 0;
+ thiscase->data.case_stmt.case_list = 0;
+ thiscase->data.case_stmt.start = 0;
+ thiscase->data.case_stmt.nominal_type = 0;
+ thiscase->data.case_stmt.default_label = 0;
+ thiscase->data.case_stmt.num_ranges = 0;
+ case_stack = thiscase;
+ nesting_stack = thiscase;
+}
+
+/* End a dummy case statement. */
+
+void
+expand_end_case_dummy ()
+{
+ POPSTACK (case_stack);
+}
+
+/* Return the data type of the index-expression
+ of the innermost case statement, or null if none. */
+
+tree
+case_index_expr_type ()
+{
+ if (case_stack)
+ return TREE_TYPE (case_stack->data.case_stmt.index_expr);
+ return 0;
+}
+
+/* Accumulate one case or default label inside a case or switch statement.
+ VALUE is the value of the case (a null pointer, for a default label).
+ The function CONVERTER, when applied to arguments T and V,
+ converts the value V to the type T.
+
+ If not currently inside a case or switch statement, return 1 and do
+ nothing. The caller will print a language-specific error message.
+ If VALUE is a duplicate or overlaps, return 2 and do nothing
+ except store the (first) duplicate node in *DUPLICATE.
+ If VALUE is out of range, return 3 and do nothing.
+ If we are jumping into the scope of a cleaup or var-sized array, return 5.
+ Return 0 on success.
+
+ Extended to handle range statements. */
+
+int
+pushcase (value, converter, label, duplicate)
+ register tree value;
+ tree (*converter) PROTO((tree, tree));
+ register tree label;
+ tree *duplicate;
+{
+ register struct case_node **l;
+ register struct case_node *n;
+ tree index_type;
+ tree nominal_type;
+
+ if (output_bytecode)
+ return bc_pushcase (value, label);
+
+ /* Fail if not inside a real case statement. */
+ if (! (case_stack && case_stack->data.case_stmt.start))
+ return 1;
+
+ if (stack_block_stack
+ && stack_block_stack->depth > case_stack->depth)
+ return 5;
+
+ index_type = TREE_TYPE (case_stack->data.case_stmt.index_expr);
+ nominal_type = case_stack->data.case_stmt.nominal_type;
+
+ /* If the index is erroneous, avoid more problems: pretend to succeed. */
+ if (index_type == error_mark_node)
+ return 0;
+
+ /* Convert VALUE to the type in which the comparisons are nominally done. */
+ if (value != 0)
+ value = (*converter) (nominal_type, value);
+
+ /* If this is the first label, warn if any insns have been emitted. */
+ if (case_stack->data.case_stmt.seenlabel == 0)
+ {
+ rtx insn;
+ for (insn = case_stack->data.case_stmt.start;
+ insn;
+ insn = NEXT_INSN (insn))
+ {
+ if (GET_CODE (insn) == CODE_LABEL)
+ break;
+ if (GET_CODE (insn) != NOTE
+ && (GET_CODE (insn) != INSN || GET_CODE (PATTERN (insn)) != USE))
+ {
+ warning ("unreachable code at beginning of %s",
+ case_stack->data.case_stmt.printname);
+ break;
+ }
+ }
+ }
+ case_stack->data.case_stmt.seenlabel = 1;
+
+ /* Fail if this value is out of range for the actual type of the index
+ (which may be narrower than NOMINAL_TYPE). */
+ if (value != 0 && ! int_fits_type_p (value, index_type))
+ return 3;
+
+ /* Fail if this is a duplicate or overlaps another entry. */
+ if (value == 0)
+ {
+ if (case_stack->data.case_stmt.default_label != 0)
+ {
+ *duplicate = case_stack->data.case_stmt.default_label;
+ return 2;
+ }
+ case_stack->data.case_stmt.default_label = label;
+ }
+ else
+ {
+ /* Find the elt in the chain before which to insert the new value,
+ to keep the chain sorted in increasing order.
+ But report an error if this element is a duplicate. */
+ for (l = &case_stack->data.case_stmt.case_list;
+ /* Keep going past elements distinctly less than VALUE. */
+ *l != 0 && tree_int_cst_lt ((*l)->high, value);
+ l = &(*l)->right)
+ ;
+ if (*l)
+ {
+ /* Element we will insert before must be distinctly greater;
+ overlap means error. */
+ if (! tree_int_cst_lt (value, (*l)->low))
+ {
+ *duplicate = (*l)->code_label;
+ return 2;
+ }
+ }
+
+ /* Add this label to the chain, and succeed.
+ Copy VALUE so it is on temporary rather than momentary
+ obstack and will thus survive till the end of the case statement. */
+ n = (struct case_node *) oballoc (sizeof (struct case_node));
+ n->left = 0;
+ n->right = *l;
+ n->high = n->low = copy_node (value);
+ n->code_label = label;
+ *l = n;
+ }
+
+ expand_label (label);
+ return 0;
+}
+
+/* Like pushcase but this case applies to all values
+ between VALUE1 and VALUE2 (inclusive).
+ The return value is the same as that of pushcase
+ but there is one additional error code:
+ 4 means the specified range was empty. */
+
+int
+pushcase_range (value1, value2, converter, label, duplicate)
+ register tree value1, value2;
+ tree (*converter) PROTO((tree, tree));
+ register tree label;
+ tree *duplicate;
+{
+ register struct case_node **l;
+ register struct case_node *n;
+ tree index_type;
+ tree nominal_type;
+
+ /* Fail if not inside a real case statement. */
+ if (! (case_stack && case_stack->data.case_stmt.start))
+ return 1;
+
+ if (stack_block_stack
+ && stack_block_stack->depth > case_stack->depth)
+ return 5;
+
+ index_type = TREE_TYPE (case_stack->data.case_stmt.index_expr);
+ nominal_type = case_stack->data.case_stmt.nominal_type;
+
+ /* If the index is erroneous, avoid more problems: pretend to succeed. */
+ if (index_type == error_mark_node)
+ return 0;
+
+ /* If this is the first label, warn if any insns have been emitted. */
+ if (case_stack->data.case_stmt.seenlabel == 0)
+ {
+ rtx insn;
+ for (insn = case_stack->data.case_stmt.start;
+ insn;
+ insn = NEXT_INSN (insn))
+ {
+ if (GET_CODE (insn) == CODE_LABEL)
+ break;
+ if (GET_CODE (insn) != NOTE
+ && (GET_CODE (insn) != INSN || GET_CODE (PATTERN (insn)) != USE))
+ {
+ warning ("unreachable code at beginning of %s",
+ case_stack->data.case_stmt.printname);
+ break;
+ }
+ }
+ }
+ case_stack->data.case_stmt.seenlabel = 1;
+
+ /* Convert VALUEs to type in which the comparisons are nominally done. */
+ if (value1 == 0) /* Negative infinity. */
+ value1 = TYPE_MIN_VALUE(index_type);
+ value1 = (*converter) (nominal_type, value1);
+
+ if (value2 == 0) /* Positive infinity. */
+ value2 = TYPE_MAX_VALUE(index_type);
+ value2 = (*converter) (nominal_type, value2);
+
+ /* Fail if these values are out of range. */
+ if (! int_fits_type_p (value1, index_type))
+ return 3;
+
+ if (! int_fits_type_p (value2, index_type))
+ return 3;
+
+ /* Fail if the range is empty. */
+ if (tree_int_cst_lt (value2, value1))
+ return 4;
+
+ /* If the bounds are equal, turn this into the one-value case. */
+ if (tree_int_cst_equal (value1, value2))
+ return pushcase (value1, converter, label, duplicate);
+
+ /* Find the elt in the chain before which to insert the new value,
+ to keep the chain sorted in increasing order.
+ But report an error if this element is a duplicate. */
+ for (l = &case_stack->data.case_stmt.case_list;
+ /* Keep going past elements distinctly less than this range. */
+ *l != 0 && tree_int_cst_lt ((*l)->high, value1);
+ l = &(*l)->right)
+ ;
+ if (*l)
+ {
+ /* Element we will insert before must be distinctly greater;
+ overlap means error. */
+ if (! tree_int_cst_lt (value2, (*l)->low))
+ {
+ *duplicate = (*l)->code_label;
+ return 2;
+ }
+ }
+
+ /* Add this label to the chain, and succeed.
+ Copy VALUE1, VALUE2 so they are on temporary rather than momentary
+ obstack and will thus survive till the end of the case statement. */
+
+ n = (struct case_node *) oballoc (sizeof (struct case_node));
+ n->left = 0;
+ n->right = *l;
+ n->low = copy_node (value1);
+ n->high = copy_node (value2);
+ n->code_label = label;
+ *l = n;
+
+ expand_label (label);
+
+ case_stack->data.case_stmt.num_ranges++;
+
+ return 0;
+}
+
+
+/* Accumulate one case or default label; VALUE is the value of the
+ case, or nil for a default label. If not currently inside a case,
+ return 1 and do nothing. If VALUE is a duplicate or overlaps, return
+ 2 and do nothing. If VALUE is out of range, return 3 and do nothing.
+ Return 0 on success. This function is a leftover from the earlier
+ bytecode compiler, which was based on gcc 1.37. It should be
+ merged into pushcase. */
+
+static int
+bc_pushcase (value, label)
+ tree value;
+ tree label;
+{
+ struct nesting *thiscase = case_stack;
+ struct case_node *case_label, *new_label;
+
+ if (! thiscase)
+ return 1;
+
+ /* Fail if duplicate, overlap, or out of type range. */
+ if (value)
+ {
+ value = convert (thiscase->data.case_stmt.nominal_type, value);
+ if (! int_fits_type_p (value, thiscase->data.case_stmt.nominal_type))
+ return 3;
+
+ for (case_label = thiscase->data.case_stmt.case_list;
+ case_label->left; case_label = case_label->left)
+ if (! tree_int_cst_lt (case_label->left->high, value))
+ break;
+
+ if (case_label != thiscase->data.case_stmt.case_list
+ && ! tree_int_cst_lt (case_label->high, value)
+ || case_label->left && ! tree_int_cst_lt (value, case_label->left->low))
+ return 2;
+
+ new_label = (struct case_node *) oballoc (sizeof (struct case_node));
+ new_label->low = new_label->high = copy_node (value);
+ new_label->code_label = label;
+ new_label->left = case_label->left;
+
+ case_label->left = new_label;
+ thiscase->data.case_stmt.num_ranges++;
+ }
+ else
+ {
+ if (thiscase->data.case_stmt.default_label)
+ return 2;
+ thiscase->data.case_stmt.default_label = label;
+ }
+
+ expand_label (label);
+ return 0;
+}
+
+/* Called when the index of a switch statement is an enumerated type
+ and there is no default label.
+
+ Checks that all enumeration literals are covered by the case
+ expressions of a switch. Also, warn if there are any extra
+ switch cases that are *not* elements of the enumerated type.
+
+ If all enumeration literals were covered by the case expressions,
+ turn one of the expressions into the default expression since it should
+ not be possible to fall through such a switch. */
+
+void
+check_for_full_enumeration_handling (type)
+ tree type;
+{
+ register struct case_node *n;
+ register struct case_node **l;
+ register tree chain;
+ int all_values = 1;
+
+ if (output_bytecode)
+ {
+ bc_check_for_full_enumeration_handling (type);
+ return;
+ }
+
+ /* The time complexity of this loop is currently O(N * M), with
+ N being the number of members in the enumerated type, and
+ M being the number of case expressions in the switch. */
+
+ for (chain = TYPE_VALUES (type);
+ chain;
+ chain = TREE_CHAIN (chain))
+ {
+ /* Find a match between enumeral and case expression, if possible.
+ Quit looking when we've gone too far (since case expressions
+ are kept sorted in ascending order). Warn about enumerators not
+ handled in the switch statement case expression list. */
+
+ for (n = case_stack->data.case_stmt.case_list;
+ n && tree_int_cst_lt (n->high, TREE_VALUE (chain));
+ n = n->right)
+ ;
+
+ if (!n || tree_int_cst_lt (TREE_VALUE (chain), n->low))
+ {
+ if (warn_switch)
+ warning ("enumeration value `%s' not handled in switch",
+ IDENTIFIER_POINTER (TREE_PURPOSE (chain)));
+ all_values = 0;
+ }
+ }
+
+ /* Now we go the other way around; we warn if there are case
+ expressions that don't correspond to enumerators. This can
+ occur since C and C++ don't enforce type-checking of
+ assignments to enumeration variables. */
+
+ if (warn_switch)
+ for (n = case_stack->data.case_stmt.case_list; n; n = n->right)
+ {
+ for (chain = TYPE_VALUES (type);
+ chain && !tree_int_cst_equal (n->low, TREE_VALUE (chain));
+ chain = TREE_CHAIN (chain))
+ ;
+
+ if (!chain)
+ {
+ if (TYPE_NAME (type) == 0)
+ warning ("case value `%d' not in enumerated type",
+ TREE_INT_CST_LOW (n->low));
+ else
+ warning ("case value `%d' not in enumerated type `%s'",
+ TREE_INT_CST_LOW (n->low),
+ IDENTIFIER_POINTER ((TREE_CODE (TYPE_NAME (type))
+ == IDENTIFIER_NODE)
+ ? TYPE_NAME (type)
+ : DECL_NAME (TYPE_NAME (type))));
+ }
+ if (!tree_int_cst_equal (n->low, n->high))
+ {
+ for (chain = TYPE_VALUES (type);
+ chain && !tree_int_cst_equal (n->high, TREE_VALUE (chain));
+ chain = TREE_CHAIN (chain))
+ ;
+
+ if (!chain)
+ {
+ if (TYPE_NAME (type) == 0)
+ warning ("case value `%d' not in enumerated type",
+ TREE_INT_CST_LOW (n->high));
+ else
+ warning ("case value `%d' not in enumerated type `%s'",
+ TREE_INT_CST_LOW (n->high),
+ IDENTIFIER_POINTER ((TREE_CODE (TYPE_NAME (type))
+ == IDENTIFIER_NODE)
+ ? TYPE_NAME (type)
+ : DECL_NAME (TYPE_NAME (type))));
+ }
+ }
+ }
+
+#if 0
+ /* ??? This optimization is disabled because it causes valid programs to
+ fail. ANSI C does not guarantee that an expression with enum type
+ will have a value that is the same as one of the enumation literals. */
+
+ /* If all values were found as case labels, make one of them the default
+ label. Thus, this switch will never fall through. We arbitrarily pick
+ the last one to make the default since this is likely the most
+ efficient choice. */
+
+ if (all_values)
+ {
+ for (l = &case_stack->data.case_stmt.case_list;
+ (*l)->right != 0;
+ l = &(*l)->right)
+ ;
+
+ case_stack->data.case_stmt.default_label = (*l)->code_label;
+ *l = 0;
+ }
+#endif /* 0 */
+}
+
+
+/* Check that all enumeration literals are covered by the case
+ expressions of a switch. Also warn if there are any cases
+ that are not elements of the enumerated type. */
+
+static void
+bc_check_for_full_enumeration_handling (type)
+ tree type;
+{
+ struct nesting *thiscase = case_stack;
+ struct case_node *c;
+ tree e;
+
+ /* Check for enums not handled. */
+ for (e = TYPE_VALUES (type); e; e = TREE_CHAIN (e))
+ {
+ for (c = thiscase->data.case_stmt.case_list->left;
+ c && tree_int_cst_lt (c->high, TREE_VALUE (e));
+ c = c->left)
+ ;
+ if (! (c && tree_int_cst_equal (c->low, TREE_VALUE (e))))
+ warning ("enumerated value `%s' not handled in switch",
+ IDENTIFIER_POINTER (TREE_PURPOSE (e)));
+ }
+
+ /* Check for cases not in the enumeration. */
+ for (c = thiscase->data.case_stmt.case_list->left; c; c = c->left)
+ {
+ for (e = TYPE_VALUES (type);
+ e && !tree_int_cst_equal (c->low, TREE_VALUE (e));
+ e = TREE_CHAIN (e))
+ ;
+ if (! e)
+ warning ("case value `%d' not in enumerated type `%s'",
+ TREE_INT_CST_LOW (c->low),
+ IDENTIFIER_POINTER (TREE_CODE (TYPE_NAME (type)) == IDENTIFIER_NODE
+ ? TYPE_NAME (type)
+ : DECL_NAME (TYPE_NAME (type))));
+ }
+}
+
+/* Terminate a case (Pascal) or switch (C) statement
+ in which ORIG_INDEX is the expression to be tested.
+ Generate the code to test it and jump to the right place. */
+
+void
+expand_end_case (orig_index)
+ tree orig_index;
+{
+ tree minval, maxval, range, orig_minval;
+ rtx default_label = 0;
+ register struct case_node *n;
+ int count;
+ rtx index;
+ rtx table_label;
+ int ncases;
+ rtx *labelvec;
+ register int i;
+ rtx before_case;
+ register struct nesting *thiscase = case_stack;
+ tree index_expr, index_type;
+ int unsignedp;
+
+ if (output_bytecode)
+ {
+ bc_expand_end_case (orig_index);
+ return;
+ }
+
+ table_label = gen_label_rtx ();
+ index_expr = thiscase->data.case_stmt.index_expr;
+ index_type = TREE_TYPE (index_expr);
+ unsignedp = TREE_UNSIGNED (index_type);
+
+ do_pending_stack_adjust ();
+
+ /* An ERROR_MARK occurs for various reasons including invalid data type. */
+ if (index_type != error_mark_node)
+ {
+ /* If switch expression was an enumerated type, check that all
+ enumeration literals are covered by the cases.
+ No sense trying this if there's a default case, however. */
+
+ if (!thiscase->data.case_stmt.default_label
+ && TREE_CODE (TREE_TYPE (orig_index)) == ENUMERAL_TYPE
+ && TREE_CODE (index_expr) != INTEGER_CST)
+ check_for_full_enumeration_handling (TREE_TYPE (orig_index));
+
+ /* If this is the first label, warn if any insns have been emitted. */
+ if (thiscase->data.case_stmt.seenlabel == 0)
+ {
+ rtx insn;
+ for (insn = get_last_insn ();
+ insn != case_stack->data.case_stmt.start;
+ insn = PREV_INSN (insn))
+ if (GET_CODE (insn) != NOTE
+ && (GET_CODE (insn) != INSN || GET_CODE (PATTERN (insn))!= USE))
+ {
+ warning ("unreachable code at beginning of %s",
+ case_stack->data.case_stmt.printname);
+ break;
+ }
+ }
+
+ /* If we don't have a default-label, create one here,
+ after the body of the switch. */
+ if (thiscase->data.case_stmt.default_label == 0)
+ {
+ thiscase->data.case_stmt.default_label
+ = build_decl (LABEL_DECL, NULL_TREE, NULL_TREE);
+ expand_label (thiscase->data.case_stmt.default_label);
+ }
+ default_label = label_rtx (thiscase->data.case_stmt.default_label);
+
+ before_case = get_last_insn ();
+
+ /* Simplify the case-list before we count it. */
+ group_case_nodes (thiscase->data.case_stmt.case_list);
+
+ /* Get upper and lower bounds of case values.
+ Also convert all the case values to the index expr's data type. */
+
+ count = 0;
+ for (n = thiscase->data.case_stmt.case_list; n; n = n->right)
+ {
+ /* Check low and high label values are integers. */
+ if (TREE_CODE (n->low) != INTEGER_CST)
+ abort ();
+ if (TREE_CODE (n->high) != INTEGER_CST)
+ abort ();
+
+ n->low = convert (index_type, n->low);
+ n->high = convert (index_type, n->high);
+
+ /* Count the elements and track the largest and smallest
+ of them (treating them as signed even if they are not). */
+ if (count++ == 0)
+ {
+ minval = n->low;
+ maxval = n->high;
+ }
+ else
+ {
+ if (INT_CST_LT (n->low, minval))
+ minval = n->low;
+ if (INT_CST_LT (maxval, n->high))
+ maxval = n->high;
+ }
+ /* A range counts double, since it requires two compares. */
+ if (! tree_int_cst_equal (n->low, n->high))
+ count++;
+ }
+
+ orig_minval = minval;
+
+ /* Compute span of values. */
+ if (count != 0)
+ range = fold (build (MINUS_EXPR, index_type, maxval, minval));
+
+ if (count == 0)
+ {
+ expand_expr (index_expr, const0_rtx, VOIDmode, 0);
+ emit_queue ();
+ emit_jump (default_label);
+ }
+
+ /* If range of values is much bigger than number of values,
+ make a sequence of conditional branches instead of a dispatch.
+ If the switch-index is a constant, do it this way
+ because we can optimize it. */
+
+#ifndef CASE_VALUES_THRESHOLD
+#ifdef HAVE_casesi
+#define CASE_VALUES_THRESHOLD (HAVE_casesi ? 4 : 5)
+#else
+ /* If machine does not have a case insn that compares the
+ bounds, this means extra overhead for dispatch tables
+ which raises the threshold for using them. */
+#define CASE_VALUES_THRESHOLD 5
+#endif /* HAVE_casesi */
+#endif /* CASE_VALUES_THRESHOLD */
+
+ else if (TREE_INT_CST_HIGH (range) != 0
+ || count < CASE_VALUES_THRESHOLD
+ || ((unsigned HOST_WIDE_INT) (TREE_INT_CST_LOW (range))
+ > 10 * count)
+ || TREE_CODE (index_expr) == INTEGER_CST
+ /* These will reduce to a constant. */
+ || (TREE_CODE (index_expr) == CALL_EXPR
+ && TREE_CODE (TREE_OPERAND (index_expr, 0)) == ADDR_EXPR
+ && TREE_CODE (TREE_OPERAND (TREE_OPERAND (index_expr, 0), 0)) == FUNCTION_DECL
+ && DECL_FUNCTION_CODE (TREE_OPERAND (TREE_OPERAND (index_expr, 0), 0)) == BUILT_IN_CLASSIFY_TYPE)
+ || (TREE_CODE (index_expr) == COMPOUND_EXPR
+ && TREE_CODE (TREE_OPERAND (index_expr, 1)) == INTEGER_CST))
+ {
+ index = expand_expr (index_expr, NULL_RTX, VOIDmode, 0);
+
+ /* If the index is a short or char that we do not have
+ an insn to handle comparisons directly, convert it to
+ a full integer now, rather than letting each comparison
+ generate the conversion. */
+
+ if (GET_MODE_CLASS (GET_MODE (index)) == MODE_INT
+ && (cmp_optab->handlers[(int) GET_MODE(index)].insn_code
+ == CODE_FOR_nothing))
+ {
+ enum machine_mode wider_mode;
+ for (wider_mode = GET_MODE (index); wider_mode != VOIDmode;
+ wider_mode = GET_MODE_WIDER_MODE (wider_mode))
+ if (cmp_optab->handlers[(int) wider_mode].insn_code
+ != CODE_FOR_nothing)
+ {
+ index = convert_to_mode (wider_mode, index, unsignedp);
+ break;
+ }
+ }
+
+ emit_queue ();
+ do_pending_stack_adjust ();
+
+ index = protect_from_queue (index, 0);
+ if (GET_CODE (index) == MEM)
+ index = copy_to_reg (index);
+ if (GET_CODE (index) == CONST_INT
+ || TREE_CODE (index_expr) == INTEGER_CST)
+ {
+ /* Make a tree node with the proper constant value
+ if we don't already have one. */
+ if (TREE_CODE (index_expr) != INTEGER_CST)
+ {
+ index_expr
+ = build_int_2 (INTVAL (index),
+ unsignedp || INTVAL (index) >= 0 ? 0 : -1);
+ index_expr = convert (index_type, index_expr);
+ }
+
+ /* For constant index expressions we need only
+ issue a unconditional branch to the appropriate
+ target code. The job of removing any unreachable
+ code is left to the optimisation phase if the
+ "-O" option is specified. */
+ for (n = thiscase->data.case_stmt.case_list; n; n = n->right)
+ if (! tree_int_cst_lt (index_expr, n->low)
+ && ! tree_int_cst_lt (n->high, index_expr))
+ break;
+
+ if (n)
+ emit_jump (label_rtx (n->code_label));
+ else
+ emit_jump (default_label);
+ }
+ else
+ {
+ /* If the index expression is not constant we generate
+ a binary decision tree to select the appropriate
+ target code. This is done as follows:
+
+ The list of cases is rearranged into a binary tree,
+ nearly optimal assuming equal probability for each case.
+
+ The tree is transformed into RTL, eliminating
+ redundant test conditions at the same time.
+
+ If program flow could reach the end of the
+ decision tree an unconditional jump to the
+ default code is emitted. */
+
+ use_cost_table
+ = (TREE_CODE (TREE_TYPE (orig_index)) != ENUMERAL_TYPE
+ && estimate_case_costs (thiscase->data.case_stmt.case_list));
+ balance_case_nodes (&thiscase->data.case_stmt.case_list,
+ NULL_PTR);
+ emit_case_nodes (index, thiscase->data.case_stmt.case_list,
+ default_label, index_type);
+ emit_jump_if_reachable (default_label);
+ }
+ }
+ else
+ {
+ int win = 0;
+#ifdef HAVE_casesi
+ if (HAVE_casesi)
+ {
+ enum machine_mode index_mode = SImode;
+ int index_bits = GET_MODE_BITSIZE (index_mode);
+ rtx op1, op2;
+ enum machine_mode op_mode;
+
+ /* Convert the index to SImode. */
+ if (GET_MODE_BITSIZE (TYPE_MODE (index_type))
+ > GET_MODE_BITSIZE (index_mode))
+ {
+ enum machine_mode omode = TYPE_MODE (index_type);
+ rtx rangertx = expand_expr (range, NULL_RTX, VOIDmode, 0);
+
+ /* We must handle the endpoints in the original mode. */
+ index_expr = build (MINUS_EXPR, index_type,
+ index_expr, minval);
+ minval = integer_zero_node;
+ index = expand_expr (index_expr, NULL_RTX, VOIDmode, 0);
+ emit_cmp_insn (rangertx, index, LTU, NULL_RTX, omode, 1, 0);
+ emit_jump_insn (gen_bltu (default_label));
+ /* Now we can safely truncate. */
+ index = convert_to_mode (index_mode, index, 0);
+ }
+ else
+ {
+ if (TYPE_MODE (index_type) != index_mode)
+ {
+ index_expr = convert (type_for_size (index_bits, 0),
+ index_expr);
+ index_type = TREE_TYPE (index_expr);
+ }
+
+ index = expand_expr (index_expr, NULL_RTX, VOIDmode, 0);
+ }
+ emit_queue ();
+ index = protect_from_queue (index, 0);
+ do_pending_stack_adjust ();
+
+ op_mode = insn_operand_mode[(int)CODE_FOR_casesi][0];
+ if (! (*insn_operand_predicate[(int)CODE_FOR_casesi][0])
+ (index, op_mode))
+ index = copy_to_mode_reg (op_mode, index);
+
+ op1 = expand_expr (minval, NULL_RTX, VOIDmode, 0);
+
+ op_mode = insn_operand_mode[(int)CODE_FOR_casesi][1];
+ if (! (*insn_operand_predicate[(int)CODE_FOR_casesi][1])
+ (op1, op_mode))
+ op1 = copy_to_mode_reg (op_mode, op1);
+
+ op2 = expand_expr (range, NULL_RTX, VOIDmode, 0);
+
+ op_mode = insn_operand_mode[(int)CODE_FOR_casesi][2];
+ if (! (*insn_operand_predicate[(int)CODE_FOR_casesi][2])
+ (op2, op_mode))
+ op2 = copy_to_mode_reg (op_mode, op2);
+
+ emit_jump_insn (gen_casesi (index, op1, op2,
+ table_label, default_label));
+ win = 1;
+ }
+#endif
+#ifdef HAVE_tablejump
+ if (! win && HAVE_tablejump)
+ {
+ index_expr = convert (thiscase->data.case_stmt.nominal_type,
+ fold (build (MINUS_EXPR, index_type,
+ index_expr, minval)));
+ index_type = TREE_TYPE (index_expr);
+ index = expand_expr (index_expr, NULL_RTX, VOIDmode, 0);
+ emit_queue ();
+ index = protect_from_queue (index, 0);
+ do_pending_stack_adjust ();
+
+ do_tablejump (index, TYPE_MODE (index_type),
+ expand_expr (range, NULL_RTX, VOIDmode, 0),
+ table_label, default_label);
+ win = 1;
+ }
+#endif
+ if (! win)
+ abort ();
+
+ /* Get table of labels to jump to, in order of case index. */
+
+ ncases = TREE_INT_CST_LOW (range) + 1;
+ labelvec = (rtx *) alloca (ncases * sizeof (rtx));
+ bzero ((char *) labelvec, ncases * sizeof (rtx));
+
+ for (n = thiscase->data.case_stmt.case_list; n; n = n->right)
+ {
+ register HOST_WIDE_INT i
+ = TREE_INT_CST_LOW (n->low) - TREE_INT_CST_LOW (orig_minval);
+
+ while (1)
+ {
+ labelvec[i]
+ = gen_rtx (LABEL_REF, Pmode, label_rtx (n->code_label));
+ if (i + TREE_INT_CST_LOW (orig_minval)
+ == TREE_INT_CST_LOW (n->high))
+ break;
+ i++;
+ }
+ }
+
+ /* Fill in the gaps with the default. */
+ for (i = 0; i < ncases; i++)
+ if (labelvec[i] == 0)
+ labelvec[i] = gen_rtx (LABEL_REF, Pmode, default_label);
+
+ /* Output the table */
+ emit_label (table_label);
+
+ /* This would be a lot nicer if CASE_VECTOR_PC_RELATIVE
+ were an expression, instead of an #ifdef/#ifndef. */
+ if (
+#ifdef CASE_VECTOR_PC_RELATIVE
+ 1 ||
+#endif
+ flag_pic)
+ emit_jump_insn (gen_rtx (ADDR_DIFF_VEC, CASE_VECTOR_MODE,
+ gen_rtx (LABEL_REF, Pmode, table_label),
+ gen_rtvec_v (ncases, labelvec)));
+ else
+ emit_jump_insn (gen_rtx (ADDR_VEC, CASE_VECTOR_MODE,
+ gen_rtvec_v (ncases, labelvec)));
+
+ /* If the case insn drops through the table,
+ after the table we must jump to the default-label.
+ Otherwise record no drop-through after the table. */
+#ifdef CASE_DROPS_THROUGH
+ emit_jump (default_label);
+#else
+ emit_barrier ();
+#endif
+ }
+
+ before_case = squeeze_notes (NEXT_INSN (before_case), get_last_insn ());
+ reorder_insns (before_case, get_last_insn (),
+ thiscase->data.case_stmt.start);
+ }
+
+ if (thiscase->exit_label)
+ emit_label (thiscase->exit_label);
+
+ POPSTACK (case_stack);
+
+ free_temp_slots ();
+}
+
+
+/* Terminate a case statement. EXPR is the original index
+ expression. */
+
+static void
+bc_expand_end_case (expr)
+ tree expr;
+{
+ struct nesting *thiscase = case_stack;
+ enum bytecode_opcode opcode;
+ struct bc_label *jump_label;
+ struct case_node *c;
+
+ bc_emit_bytecode (jump);
+ bc_emit_bytecode_labelref (BYTECODE_BC_LABEL (thiscase->exit_label));
+
+#ifdef DEBUG_PRINT_CODE
+ fputc ('\n', stderr);
+#endif
+
+ /* Now that the size of the jump table is known, emit the actual
+ indexed jump instruction. */
+ bc_emit_bytecode_labeldef (BYTECODE_BC_LABEL (thiscase->data.case_stmt.skip_label));
+
+ opcode = TYPE_MODE (thiscase->data.case_stmt.nominal_type) == SImode
+ ? TREE_UNSIGNED (thiscase->data.case_stmt.nominal_type) ? caseSU : caseSI
+ : TREE_UNSIGNED (thiscase->data.case_stmt.nominal_type) ? caseDU : caseDI;
+
+ bc_emit_bytecode (opcode);
+
+ /* Now emit the case instructions literal arguments, in order.
+ In addition to the value on the stack, it uses:
+ 1. The address of the jump table.
+ 2. The size of the jump table.
+ 3. The default label. */
+
+ jump_label = bc_get_bytecode_label ();
+ bc_emit_bytecode_labelref (jump_label);
+ bc_emit_bytecode_const ((char *) &thiscase->data.case_stmt.num_ranges,
+ sizeof thiscase->data.case_stmt.num_ranges);
+
+ if (thiscase->data.case_stmt.default_label)
+ bc_emit_bytecode_labelref (BYTECODE_BC_LABEL (DECL_RTL (thiscase->data.case_stmt.default_label)));
+ else
+ bc_emit_bytecode_labelref (BYTECODE_BC_LABEL (thiscase->exit_label));
+
+ /* Output the jump table. */
+
+ bc_align_bytecode (3 /* PTR_ALIGN */);
+ bc_emit_bytecode_labeldef (jump_label);
+
+ if (TYPE_MODE (thiscase->data.case_stmt.nominal_type) == SImode)
+ for (c = thiscase->data.case_stmt.case_list->left; c; c = c->left)
+ {
+ opcode = TREE_INT_CST_LOW (c->low);
+ bc_emit_bytecode_const ((char *) &opcode, sizeof opcode);
+
+ opcode = TREE_INT_CST_LOW (c->high);
+ bc_emit_bytecode_const ((char *) &opcode, sizeof opcode);
+
+ bc_emit_bytecode_labelref (BYTECODE_BC_LABEL (DECL_RTL (c->code_label)));
+ }
+ else
+ if (TYPE_MODE (thiscase->data.case_stmt.nominal_type) == DImode)
+ for (c = thiscase->data.case_stmt.case_list->left; c; c = c->left)
+ {
+ bc_emit_bytecode_DI_const (c->low);
+ bc_emit_bytecode_DI_const (c->high);
+
+ bc_emit_bytecode_labelref (BYTECODE_BC_LABEL (DECL_RTL (c->code_label)));
+ }
+ else
+ /* Bad mode */
+ abort ();
+
+
+ bc_emit_bytecode_labeldef (BYTECODE_BC_LABEL (thiscase->exit_label));
+
+ /* Possibly issue enumeration warnings. */
+
+ if (!thiscase->data.case_stmt.default_label
+ && TREE_CODE (TREE_TYPE (expr)) == ENUMERAL_TYPE
+ && TREE_CODE (expr) != INTEGER_CST
+ && warn_switch)
+ check_for_full_enumeration_handling (TREE_TYPE (expr));
+
+
+#ifdef DEBUG_PRINT_CODE
+ fputc ('\n', stderr);
+#endif
+
+ POPSTACK (case_stack);
+}
+
+
+/* Return unique bytecode ID. */
+
+int
+bc_new_uid ()
+{
+ static int bc_uid = 0;
+
+ return (++bc_uid);
+}
+
+/* Generate code to jump to LABEL if OP1 and OP2 are equal. */
+
+static void
+do_jump_if_equal (op1, op2, label, unsignedp)
+ rtx op1, op2, label;
+ int unsignedp;
+{
+ if (GET_CODE (op1) == CONST_INT
+ && GET_CODE (op2) == CONST_INT)
+ {
+ if (INTVAL (op1) == INTVAL (op2))
+ emit_jump (label);
+ }
+ else
+ {
+ enum machine_mode mode = GET_MODE (op1);
+ if (mode == VOIDmode)
+ mode = GET_MODE (op2);
+ emit_cmp_insn (op1, op2, EQ, NULL_RTX, mode, unsignedp, 0);
+ emit_jump_insn (gen_beq (label));
+ }
+}
+
+/* Not all case values are encountered equally. This function
+ uses a heuristic to weight case labels, in cases where that
+ looks like a reasonable thing to do.
+
+ Right now, all we try to guess is text, and we establish the
+ following weights:
+
+ chars above space: 16
+ digits: 16
+ default: 12
+ space, punct: 8
+ tab: 4
+ newline: 2
+ other "\" chars: 1
+ remaining chars: 0
+
+ If we find any cases in the switch that are not either -1 or in the range
+ of valid ASCII characters, or are control characters other than those
+ commonly used with "\", don't treat this switch scanning text.
+
+ Return 1 if these nodes are suitable for cost estimation, otherwise
+ return 0. */
+
+static int
+estimate_case_costs (node)
+ case_node_ptr node;
+{
+ tree min_ascii = build_int_2 (-1, -1);
+ tree max_ascii = convert (TREE_TYPE (node->high), build_int_2 (127, 0));
+ case_node_ptr n;
+ int i;
+
+ /* If we haven't already made the cost table, make it now. Note that the
+ lower bound of the table is -1, not zero. */
+
+ if (cost_table == NULL)
+ {
+ cost_table = ((short *) xmalloc (129 * sizeof (short))) + 1;
+ bzero ((char *) (cost_table - 1), 129 * sizeof (short));
+
+ for (i = 0; i < 128; i++)
+ {
+ if (isalnum (i))
+ cost_table[i] = 16;
+ else if (ispunct (i))
+ cost_table[i] = 8;
+ else if (iscntrl (i))
+ cost_table[i] = -1;
+ }
+
+ cost_table[' '] = 8;
+ cost_table['\t'] = 4;
+ cost_table['\0'] = 4;
+ cost_table['\n'] = 2;
+ cost_table['\f'] = 1;
+ cost_table['\v'] = 1;
+ cost_table['\b'] = 1;
+ }
+
+ /* See if all the case expressions look like text. It is text if the
+ constant is >= -1 and the highest constant is <= 127. Do all comparisons
+ as signed arithmetic since we don't want to ever access cost_table with a
+ value less than -1. Also check that none of the constants in a range
+ are strange control characters. */
+
+ for (n = node; n; n = n->right)
+ {
+ if ((INT_CST_LT (n->low, min_ascii)) || INT_CST_LT (max_ascii, n->high))
+ return 0;
+
+ for (i = TREE_INT_CST_LOW (n->low); i <= TREE_INT_CST_LOW (n->high); i++)
+ if (cost_table[i] < 0)
+ return 0;
+ }
+
+ /* All interesting values are within the range of interesting
+ ASCII characters. */
+ return 1;
+}
+
+/* Scan an ordered list of case nodes
+ combining those with consecutive values or ranges.
+
+ Eg. three separate entries 1: 2: 3: become one entry 1..3: */
+
+static void
+group_case_nodes (head)
+ case_node_ptr head;
+{
+ case_node_ptr node = head;
+
+ while (node)
+ {
+ rtx lb = next_real_insn (label_rtx (node->code_label));
+ case_node_ptr np = node;
+
+ /* Try to group the successors of NODE with NODE. */
+ while (((np = np->right) != 0)
+ /* Do they jump to the same place? */
+ && next_real_insn (label_rtx (np->code_label)) == lb
+ /* Are their ranges consecutive? */
+ && tree_int_cst_equal (np->low,
+ fold (build (PLUS_EXPR,
+ TREE_TYPE (node->high),
+ node->high,
+ integer_one_node)))
+ /* An overflow is not consecutive. */
+ && tree_int_cst_lt (node->high,
+ fold (build (PLUS_EXPR,
+ TREE_TYPE (node->high),
+ node->high,
+ integer_one_node))))
+ {
+ node->high = np->high;
+ }
+ /* NP is the first node after NODE which can't be grouped with it.
+ Delete the nodes in between, and move on to that node. */
+ node->right = np;
+ node = np;
+ }
+}
+
+/* Take an ordered list of case nodes
+ and transform them into a near optimal binary tree,
+ on the assumption that any target code selection value is as
+ likely as any other.
+
+ The transformation is performed by splitting the ordered
+ list into two equal sections plus a pivot. The parts are
+ then attached to the pivot as left and right branches. Each
+ branch is is then transformed recursively. */
+
+static void
+balance_case_nodes (head, parent)
+ case_node_ptr *head;
+ case_node_ptr parent;
+{
+ register case_node_ptr np;
+
+ np = *head;
+ if (np)
+ {
+ int cost = 0;
+ int i = 0;
+ int ranges = 0;
+ register case_node_ptr *npp;
+ case_node_ptr left;
+
+ /* Count the number of entries on branch. Also count the ranges. */
+
+ while (np)
+ {
+ if (!tree_int_cst_equal (np->low, np->high))
+ {
+ ranges++;
+ if (use_cost_table)
+ cost += cost_table[TREE_INT_CST_LOW (np->high)];
+ }
+
+ if (use_cost_table)
+ cost += cost_table[TREE_INT_CST_LOW (np->low)];
+
+ i++;
+ np = np->right;
+ }
+
+ if (i > 2)
+ {
+ /* Split this list if it is long enough for that to help. */
+ npp = head;
+ left = *npp;
+ if (use_cost_table)
+ {
+ /* Find the place in the list that bisects the list's total cost,
+ Here I gets half the total cost. */
+ int n_moved = 0;
+ i = (cost + 1) / 2;
+ while (1)
+ {
+ /* Skip nodes while their cost does not reach that amount. */
+ if (!tree_int_cst_equal ((*npp)->low, (*npp)->high))
+ i -= cost_table[TREE_INT_CST_LOW ((*npp)->high)];
+ i -= cost_table[TREE_INT_CST_LOW ((*npp)->low)];
+ if (i <= 0)
+ break;
+ npp = &(*npp)->right;
+ n_moved += 1;
+ }
+ if (n_moved == 0)
+ {
+ /* Leave this branch lopsided, but optimize left-hand
+ side and fill in `parent' fields for right-hand side. */
+ np = *head;
+ np->parent = parent;
+ balance_case_nodes (&np->left, np);
+ for (; np->right; np = np->right)
+ np->right->parent = np;
+ return;
+ }
+ }
+ /* If there are just three nodes, split at the middle one. */
+ else if (i == 3)
+ npp = &(*npp)->right;
+ else
+ {
+ /* Find the place in the list that bisects the list's total cost,
+ where ranges count as 2.
+ Here I gets half the total cost. */
+ i = (i + ranges + 1) / 2;
+ while (1)
+ {
+ /* Skip nodes while their cost does not reach that amount. */
+ if (!tree_int_cst_equal ((*npp)->low, (*npp)->high))
+ i--;
+ i--;
+ if (i <= 0)
+ break;
+ npp = &(*npp)->right;
+ }
+ }
+ *head = np = *npp;
+ *npp = 0;
+ np->parent = parent;
+ np->left = left;
+
+ /* Optimize each of the two split parts. */
+ balance_case_nodes (&np->left, np);
+ balance_case_nodes (&np->right, np);
+ }
+ else
+ {
+ /* Else leave this branch as one level,
+ but fill in `parent' fields. */
+ np = *head;
+ np->parent = parent;
+ for (; np->right; np = np->right)
+ np->right->parent = np;
+ }
+ }
+}
+
+/* Search the parent sections of the case node tree
+ to see if a test for the lower bound of NODE would be redundant.
+ INDEX_TYPE is the type of the index expression.
+
+ The instructions to generate the case decision tree are
+ output in the same order as nodes are processed so it is
+ known that if a parent node checks the range of the current
+ node minus one that the current node is bounded at its lower
+ span. Thus the test would be redundant. */
+
+static int
+node_has_low_bound (node, index_type)
+ case_node_ptr node;
+ tree index_type;
+{
+ tree low_minus_one;
+ case_node_ptr pnode;
+
+ /* If the lower bound of this node is the lowest value in the index type,
+ we need not test it. */
+
+ if (tree_int_cst_equal (node->low, TYPE_MIN_VALUE (index_type)))
+ return 1;
+
+ /* If this node has a left branch, the value at the left must be less
+ than that at this node, so it cannot be bounded at the bottom and
+ we need not bother testing any further. */
+
+ if (node->left)
+ return 0;
+
+ low_minus_one = fold (build (MINUS_EXPR, TREE_TYPE (node->low),
+ node->low, integer_one_node));
+
+ /* If the subtraction above overflowed, we can't verify anything.
+ Otherwise, look for a parent that tests our value - 1. */
+
+ if (! tree_int_cst_lt (low_minus_one, node->low))
+ return 0;
+
+ for (pnode = node->parent; pnode; pnode = pnode->parent)
+ if (tree_int_cst_equal (low_minus_one, pnode->high))
+ return 1;
+
+ return 0;
+}
+
+/* Search the parent sections of the case node tree
+ to see if a test for the upper bound of NODE would be redundant.
+ INDEX_TYPE is the type of the index expression.
+
+ The instructions to generate the case decision tree are
+ output in the same order as nodes are processed so it is
+ known that if a parent node checks the range of the current
+ node plus one that the current node is bounded at its upper
+ span. Thus the test would be redundant. */
+
+static int
+node_has_high_bound (node, index_type)
+ case_node_ptr node;
+ tree index_type;
+{
+ tree high_plus_one;
+ case_node_ptr pnode;
+
+ /* If the upper bound of this node is the highest value in the type
+ of the index expression, we need not test against it. */
+
+ if (tree_int_cst_equal (node->high, TYPE_MAX_VALUE (index_type)))
+ return 1;
+
+ /* If this node has a right branch, the value at the right must be greater
+ than that at this node, so it cannot be bounded at the top and
+ we need not bother testing any further. */
+
+ if (node->right)
+ return 0;
+
+ high_plus_one = fold (build (PLUS_EXPR, TREE_TYPE (node->high),
+ node->high, integer_one_node));
+
+ /* If the addition above overflowed, we can't verify anything.
+ Otherwise, look for a parent that tests our value + 1. */
+
+ if (! tree_int_cst_lt (node->high, high_plus_one))
+ return 0;
+
+ for (pnode = node->parent; pnode; pnode = pnode->parent)
+ if (tree_int_cst_equal (high_plus_one, pnode->low))
+ return 1;
+
+ return 0;
+}
+
+/* Search the parent sections of the
+ case node tree to see if both tests for the upper and lower
+ bounds of NODE would be redundant. */
+
+static int
+node_is_bounded (node, index_type)
+ case_node_ptr node;
+ tree index_type;
+{
+ return (node_has_low_bound (node, index_type)
+ && node_has_high_bound (node, index_type));
+}
+
+/* Emit an unconditional jump to LABEL unless it would be dead code. */
+
+static void
+emit_jump_if_reachable (label)
+ rtx label;
+{
+ if (GET_CODE (get_last_insn ()) != BARRIER)
+ emit_jump (label);
+}
+
+/* Emit step-by-step code to select a case for the value of INDEX.
+ The thus generated decision tree follows the form of the
+ case-node binary tree NODE, whose nodes represent test conditions.
+ INDEX_TYPE is the type of the index of the switch.
+
+ Care is taken to prune redundant tests from the decision tree
+ by detecting any boundary conditions already checked by
+ emitted rtx. (See node_has_high_bound, node_has_low_bound
+ and node_is_bounded, above.)
+
+ Where the test conditions can be shown to be redundant we emit
+ an unconditional jump to the target code. As a further
+ optimization, the subordinates of a tree node are examined to
+ check for bounded nodes. In this case conditional and/or
+ unconditional jumps as a result of the boundary check for the
+ current node are arranged to target the subordinates associated
+ code for out of bound conditions on the current node node.
+
+ We can assume that when control reaches the code generated here,
+ the index value has already been compared with the parents
+ of this node, and determined to be on the same side of each parent
+ as this node is. Thus, if this node tests for the value 51,
+ and a parent tested for 52, we don't need to consider
+ the possibility of a value greater than 51. If another parent
+ tests for the value 50, then this node need not test anything. */
+
+static void
+emit_case_nodes (index, node, default_label, index_type)
+ rtx index;
+ case_node_ptr node;
+ rtx default_label;
+ tree index_type;
+{
+ /* If INDEX has an unsigned type, we must make unsigned branches. */
+ int unsignedp = TREE_UNSIGNED (index_type);
+ typedef rtx rtx_function ();
+ rtx_function *gen_bgt_pat = unsignedp ? gen_bgtu : gen_bgt;
+ rtx_function *gen_bge_pat = unsignedp ? gen_bgeu : gen_bge;
+ rtx_function *gen_blt_pat = unsignedp ? gen_bltu : gen_blt;
+ rtx_function *gen_ble_pat = unsignedp ? gen_bleu : gen_ble;
+ enum machine_mode mode = GET_MODE (index);
+
+ /* See if our parents have already tested everything for us.
+ If they have, emit an unconditional jump for this node. */
+ if (node_is_bounded (node, index_type))
+ emit_jump (label_rtx (node->code_label));
+
+ else if (tree_int_cst_equal (node->low, node->high))
+ {
+ /* Node is single valued. First see if the index expression matches
+ this node and then check our children, if any. */
+
+ do_jump_if_equal (index, expand_expr (node->low, NULL_RTX, VOIDmode, 0),
+ label_rtx (node->code_label), unsignedp);
+
+ if (node->right != 0 && node->left != 0)
+ {
+ /* This node has children on both sides.
+ Dispatch to one side or the other
+ by comparing the index value with this node's value.
+ If one subtree is bounded, check that one first,
+ so we can avoid real branches in the tree. */
+
+ if (node_is_bounded (node->right, index_type))
+ {
+ emit_cmp_insn (index, expand_expr (node->high, NULL_RTX,
+ VOIDmode, 0),
+ GT, NULL_RTX, mode, unsignedp, 0);
+
+ emit_jump_insn ((*gen_bgt_pat) (label_rtx (node->right->code_label)));
+ emit_case_nodes (index, node->left, default_label, index_type);
+ }
+
+ else if (node_is_bounded (node->left, index_type))
+ {
+ emit_cmp_insn (index, expand_expr (node->high, NULL_RTX,
+ VOIDmode, 0),
+ LT, NULL_RTX, mode, unsignedp, 0);
+ emit_jump_insn ((*gen_blt_pat) (label_rtx (node->left->code_label)));
+ emit_case_nodes (index, node->right, default_label, index_type);
+ }
+
+ else
+ {
+ /* Neither node is bounded. First distinguish the two sides;
+ then emit the code for one side at a time. */
+
+ tree test_label
+ = build_decl (LABEL_DECL, NULL_TREE, NULL_TREE);
+
+ /* See if the value is on the right. */
+ emit_cmp_insn (index, expand_expr (node->high, NULL_RTX,
+ VOIDmode, 0),
+ GT, NULL_RTX, mode, unsignedp, 0);
+ emit_jump_insn ((*gen_bgt_pat) (label_rtx (test_label)));
+
+ /* Value must be on the left.
+ Handle the left-hand subtree. */
+ emit_case_nodes (index, node->left, default_label, index_type);
+ /* If left-hand subtree does nothing,
+ go to default. */
+ emit_jump_if_reachable (default_label);
+
+ /* Code branches here for the right-hand subtree. */
+ expand_label (test_label);
+ emit_case_nodes (index, node->right, default_label, index_type);
+ }
+ }
+
+ else if (node->right != 0 && node->left == 0)
+ {
+ /* Here we have a right child but no left so we issue conditional
+ branch to default and process the right child.
+
+ Omit the conditional branch to default if we it avoid only one
+ right child; it costs too much space to save so little time. */
+
+ if (node->right->right || node->right->left
+ || !tree_int_cst_equal (node->right->low, node->right->high))
+ {
+ if (!node_has_low_bound (node, index_type))
+ {
+ emit_cmp_insn (index, expand_expr (node->high, NULL_RTX,
+ VOIDmode, 0),
+ LT, NULL_RTX, mode, unsignedp, 0);
+ emit_jump_insn ((*gen_blt_pat) (default_label));
+ }
+
+ emit_case_nodes (index, node->right, default_label, index_type);
+ }
+ else
+ /* We cannot process node->right normally
+ since we haven't ruled out the numbers less than
+ this node's value. So handle node->right explicitly. */
+ do_jump_if_equal (index,
+ expand_expr (node->right->low, NULL_RTX,
+ VOIDmode, 0),
+ label_rtx (node->right->code_label), unsignedp);
+ }
+
+ else if (node->right == 0 && node->left != 0)
+ {
+ /* Just one subtree, on the left. */
+
+#if 0 /* The following code and comment were formerly part
+ of the condition here, but they didn't work
+ and I don't understand what the idea was. -- rms. */
+ /* If our "most probable entry" is less probable
+ than the default label, emit a jump to
+ the default label using condition codes
+ already lying around. With no right branch,
+ a branch-greater-than will get us to the default
+ label correctly. */
+ if (use_cost_table
+ && cost_table[TREE_INT_CST_LOW (node->high)] < 12)
+ ;
+#endif /* 0 */
+ if (node->left->left || node->left->right
+ || !tree_int_cst_equal (node->left->low, node->left->high))
+ {
+ if (!node_has_high_bound (node, index_type))
+ {
+ emit_cmp_insn (index, expand_expr (node->high, NULL_RTX,
+ VOIDmode, 0),
+ GT, NULL_RTX, mode, unsignedp, 0);
+ emit_jump_insn ((*gen_bgt_pat) (default_label));
+ }
+
+ emit_case_nodes (index, node->left, default_label, index_type);
+ }
+ else
+ /* We cannot process node->left normally
+ since we haven't ruled out the numbers less than
+ this node's value. So handle node->left explicitly. */
+ do_jump_if_equal (index,
+ expand_expr (node->left->low, NULL_RTX,
+ VOIDmode, 0),
+ label_rtx (node->left->code_label), unsignedp);
+ }
+ }
+ else
+ {
+ /* Node is a range. These cases are very similar to those for a single
+ value, except that we do not start by testing whether this node
+ is the one to branch to. */
+
+ if (node->right != 0 && node->left != 0)
+ {
+ /* Node has subtrees on both sides.
+ If the right-hand subtree is bounded,
+ test for it first, since we can go straight there.
+ Otherwise, we need to make a branch in the control structure,
+ then handle the two subtrees. */
+ tree test_label = 0;
+
+ emit_cmp_insn (index, expand_expr (node->high, NULL_RTX,
+ VOIDmode, 0),
+ GT, NULL_RTX, mode, unsignedp, 0);
+
+ if (node_is_bounded (node->right, index_type))
+ /* Right hand node is fully bounded so we can eliminate any
+ testing and branch directly to the target code. */
+ emit_jump_insn ((*gen_bgt_pat) (label_rtx (node->right->code_label)));
+ else
+ {
+ /* Right hand node requires testing.
+ Branch to a label where we will handle it later. */
+
+ test_label = build_decl (LABEL_DECL, NULL_TREE, NULL_TREE);
+ emit_jump_insn ((*gen_bgt_pat) (label_rtx (test_label)));
+ }
+
+ /* Value belongs to this node or to the left-hand subtree. */
+
+ emit_cmp_insn (index, expand_expr (node->low, NULL_RTX, VOIDmode, 0),
+ GE, NULL_RTX, mode, unsignedp, 0);
+ emit_jump_insn ((*gen_bge_pat) (label_rtx (node->code_label)));
+
+ /* Handle the left-hand subtree. */
+ emit_case_nodes (index, node->left, default_label, index_type);
+
+ /* If right node had to be handled later, do that now. */
+
+ if (test_label)
+ {
+ /* If the left-hand subtree fell through,
+ don't let it fall into the right-hand subtree. */
+ emit_jump_if_reachable (default_label);
+
+ expand_label (test_label);
+ emit_case_nodes (index, node->right, default_label, index_type);
+ }
+ }
+
+ else if (node->right != 0 && node->left == 0)
+ {
+ /* Deal with values to the left of this node,
+ if they are possible. */
+ if (!node_has_low_bound (node, index_type))
+ {
+ emit_cmp_insn (index, expand_expr (node->low, NULL_RTX,
+ VOIDmode, 0),
+ LT, NULL_RTX, mode, unsignedp, 0);
+ emit_jump_insn ((*gen_blt_pat) (default_label));
+ }
+
+ /* Value belongs to this node or to the right-hand subtree. */
+
+ emit_cmp_insn (index, expand_expr (node->high, NULL_RTX,
+ VOIDmode, 0),
+ LE, NULL_RTX, mode, unsignedp, 0);
+ emit_jump_insn ((*gen_ble_pat) (label_rtx (node->code_label)));
+
+ emit_case_nodes (index, node->right, default_label, index_type);
+ }
+
+ else if (node->right == 0 && node->left != 0)
+ {
+ /* Deal with values to the right of this node,
+ if they are possible. */
+ if (!node_has_high_bound (node, index_type))
+ {
+ emit_cmp_insn (index, expand_expr (node->high, NULL_RTX,
+ VOIDmode, 0),
+ GT, NULL_RTX, mode, unsignedp, 0);
+ emit_jump_insn ((*gen_bgt_pat) (default_label));
+ }
+
+ /* Value belongs to this node or to the left-hand subtree. */
+
+ emit_cmp_insn (index, expand_expr (node->low, NULL_RTX, VOIDmode, 0),
+ GE, NULL_RTX, mode, unsignedp, 0);
+ emit_jump_insn ((*gen_bge_pat) (label_rtx (node->code_label)));
+
+ emit_case_nodes (index, node->left, default_label, index_type);
+ }
+
+ else
+ {
+ /* Node has no children so we check low and high bounds to remove
+ redundant tests. Only one of the bounds can exist,
+ since otherwise this node is bounded--a case tested already. */
+
+ if (!node_has_high_bound (node, index_type))
+ {
+ emit_cmp_insn (index, expand_expr (node->high, NULL_RTX,
+ VOIDmode, 0),
+ GT, NULL_RTX, mode, unsignedp, 0);
+ emit_jump_insn ((*gen_bgt_pat) (default_label));
+ }
+
+ if (!node_has_low_bound (node, index_type))
+ {
+ emit_cmp_insn (index, expand_expr (node->low, NULL_RTX,
+ VOIDmode, 0),
+ LT, NULL_RTX, mode, unsignedp, 0);
+ emit_jump_insn ((*gen_blt_pat) (default_label));
+ }
+
+ emit_jump (label_rtx (node->code_label));
+ }
+ }
+}
+
+/* These routines are used by the loop unrolling code. They copy BLOCK trees
+ so that the debugging info will be correct for the unrolled loop. */
+
+/* Indexed by block number, contains a pointer to the N'th block node. */
+
+static tree *block_vector;
+
+void
+find_loop_tree_blocks ()
+{
+ tree block = DECL_INITIAL (current_function_decl);
+
+ /* There first block is for the function body, and does not have
+ corresponding block notes. Don't include it in the block vector. */
+ block = BLOCK_SUBBLOCKS (block);
+
+ block_vector = identify_blocks (block, get_insns ());
+}
+
+void
+unroll_block_trees ()
+{
+ tree block = DECL_INITIAL (current_function_decl);
+
+ reorder_blocks (block_vector, block, get_insns ());
+}
+
diff --git a/gnu/usr.bin/cc/cc_int/stor-layout.c b/gnu/usr.bin/cc/cc_int/stor-layout.c
new file mode 100644
index 0000000..d2c6f28
--- /dev/null
+++ b/gnu/usr.bin/cc/cc_int/stor-layout.c
@@ -0,0 +1,1176 @@
+/* C-compiler utilities for types and variables storage layout
+ Copyright (C) 1987, 1988, 1992, 1993, 1994 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+#include "config.h"
+#include <stdio.h>
+
+#include "tree.h"
+#include "function.h"
+
+#define CEIL(x,y) (((x) + (y) - 1) / (y))
+
+/* Data type for the expressions representing sizes of data types.
+ It is the first integer type laid out.
+ In C, this is int. */
+
+tree sizetype;
+
+/* An integer constant with value 0 whose type is sizetype. */
+
+tree size_zero_node;
+
+/* An integer constant with value 1 whose type is sizetype. */
+
+tree size_one_node;
+
+/* If nonzero, this is an upper limit on alignment of structure fields.
+ The value is measured in bits. */
+int maximum_field_alignment;
+
+#define GET_MODE_ALIGNMENT(MODE) \
+ MIN (BIGGEST_ALIGNMENT, \
+ MAX (1, (GET_MODE_UNIT_SIZE (MODE) * BITS_PER_UNIT)))
+
+static enum machine_mode smallest_mode_for_size PROTO((unsigned int,
+ enum mode_class));
+static tree layout_record PROTO((tree));
+static void layout_union PROTO((tree));
+
+/* SAVE_EXPRs for sizes of types and decls, waiting to be expanded. */
+
+static tree pending_sizes;
+
+/* Nonzero means cannot safely call expand_expr now,
+ so put variable sizes onto `pending_sizes' instead. */
+
+int immediate_size_expand;
+
+tree
+get_pending_sizes ()
+{
+ tree chain = pending_sizes;
+ tree t;
+
+ /* Put each SAVE_EXPR into the current function. */
+ for (t = chain; t; t = TREE_CHAIN (t))
+ SAVE_EXPR_CONTEXT (TREE_VALUE (t)) = current_function_decl;
+ pending_sizes = 0;
+ return chain;
+}
+
+/* Given a size SIZE that isn't constant, return a SAVE_EXPR
+ to serve as the actual size-expression for a type or decl. */
+
+tree
+variable_size (size)
+ tree size;
+{
+ /* If the language-processor is to take responsibility for variable-sized
+ items (e.g., languages which have elaboration procedures like Ada),
+ just return SIZE unchanged. */
+ if (global_bindings_p () < 0)
+ return size;
+
+ size = save_expr (size);
+
+ if (global_bindings_p ())
+ {
+ if (TREE_CONSTANT (size))
+ error ("type size can't be explicitly evaluated");
+ else
+ error ("variable-size type declared outside of any function");
+
+ return size_int (1);
+ }
+
+ if (immediate_size_expand)
+ /* NULL_RTX is not defined; neither is the rtx type.
+ Also, we would like to pass const0_rtx here, but don't have it. */
+ expand_expr (size, expand_expr (integer_zero_node, NULL_PTR, VOIDmode, 0),
+ VOIDmode, 0);
+ else
+ pending_sizes = tree_cons (NULL_TREE, size, pending_sizes);
+
+ return size;
+}
+
+#ifndef MAX_FIXED_MODE_SIZE
+#define MAX_FIXED_MODE_SIZE GET_MODE_BITSIZE (DImode)
+#endif
+
+/* Return the machine mode to use for a nonscalar of SIZE bits.
+ The mode must be in class CLASS, and have exactly that many bits.
+ If LIMIT is nonzero, modes of wider than MAX_FIXED_MODE_SIZE will not
+ be used. */
+
+enum machine_mode
+mode_for_size (size, class, limit)
+ unsigned int size;
+ enum mode_class class;
+ int limit;
+{
+ register enum machine_mode mode;
+
+ if (limit && size > MAX_FIXED_MODE_SIZE)
+ return BLKmode;
+
+ /* Get the first mode which has this size, in the specified class. */
+ for (mode = GET_CLASS_NARROWEST_MODE (class); mode != VOIDmode;
+ mode = GET_MODE_WIDER_MODE (mode))
+ if (GET_MODE_BITSIZE (mode) == size)
+ return mode;
+
+ return BLKmode;
+}
+
+/* Similar, but never return BLKmode; return the narrowest mode that
+ contains at least the requested number of bits. */
+
+static enum machine_mode
+smallest_mode_for_size (size, class)
+ unsigned int size;
+ enum mode_class class;
+{
+ register enum machine_mode mode;
+
+ /* Get the first mode which has at least this size, in the
+ specified class. */
+ for (mode = GET_CLASS_NARROWEST_MODE (class); mode != VOIDmode;
+ mode = GET_MODE_WIDER_MODE (mode))
+ if (GET_MODE_BITSIZE (mode) >= size)
+ return mode;
+
+ abort ();
+}
+
+/* Return the value of VALUE, rounded up to a multiple of DIVISOR. */
+
+tree
+round_up (value, divisor)
+ tree value;
+ int divisor;
+{
+ return size_binop (MULT_EXPR,
+ size_binop (CEIL_DIV_EXPR, value, size_int (divisor)),
+ size_int (divisor));
+}
+
+/* Set the size, mode and alignment of a ..._DECL node.
+ TYPE_DECL does need this for C++.
+ Note that LABEL_DECL and CONST_DECL nodes do not need this,
+ and FUNCTION_DECL nodes have them set up in a special (and simple) way.
+ Don't call layout_decl for them.
+
+ KNOWN_ALIGN is the amount of alignment we can assume this
+ decl has with no special effort. It is relevant only for FIELD_DECLs
+ and depends on the previous fields.
+ All that matters about KNOWN_ALIGN is which powers of 2 divide it.
+ If KNOWN_ALIGN is 0, it means, "as much alignment as you like":
+ the record will be aligned to suit. */
+
+void
+layout_decl (decl, known_align)
+ tree decl;
+ unsigned known_align;
+{
+ register tree type = TREE_TYPE (decl);
+ register enum tree_code code = TREE_CODE (decl);
+ int spec_size = DECL_FIELD_SIZE (decl);
+
+ if (code == CONST_DECL)
+ return;
+
+ if (code != VAR_DECL && code != PARM_DECL && code != RESULT_DECL
+ && code != FIELD_DECL && code != TYPE_DECL)
+ abort ();
+
+ if (type == error_mark_node)
+ {
+ type = void_type_node;
+ spec_size = 0;
+ }
+
+ /* Usually the size and mode come from the data type without change. */
+
+ DECL_MODE (decl) = TYPE_MODE (type);
+ TREE_UNSIGNED (decl) = TREE_UNSIGNED (type);
+ if (DECL_SIZE (decl) == 0)
+ DECL_SIZE (decl) = TYPE_SIZE (type);
+
+ if (code == FIELD_DECL && DECL_BIT_FIELD (decl))
+ {
+ /* This is a bit-field. We don't know how to handle
+ them except for integral types, and front ends should
+ never generate them otherwise. */
+
+ if (! INTEGRAL_TYPE_P (type))
+ abort ();
+
+ if (spec_size == 0 && DECL_NAME (decl) != 0)
+ abort ();
+
+ /* Size is specified number of bits. */
+ DECL_SIZE (decl) = size_int (spec_size);
+ }
+ /* Force alignment required for the data type.
+ But if the decl itself wants greater alignment, don't override that.
+ Likewise, if the decl is packed, don't override it. */
+ else if (DECL_ALIGN (decl) == 0
+ || (! DECL_PACKED (decl) && TYPE_ALIGN (type) > DECL_ALIGN (decl)))
+ DECL_ALIGN (decl) = TYPE_ALIGN (type);
+
+ /* See if we can use an ordinary integer mode for a bit-field. */
+ /* Conditions are: a fixed size that is correct for another mode
+ and occupying a complete byte or bytes on proper boundary. */
+ if (code == FIELD_DECL)
+ {
+ DECL_BIT_FIELD_TYPE (decl) = DECL_BIT_FIELD (decl) ? type : 0;
+ if (maximum_field_alignment != 0)
+ DECL_ALIGN (decl) = MIN (DECL_ALIGN (decl), maximum_field_alignment);
+ }
+
+ if (DECL_BIT_FIELD (decl)
+ && TYPE_SIZE (type) != 0
+ && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
+ {
+ register enum machine_mode xmode
+ = mode_for_size (TREE_INT_CST_LOW (DECL_SIZE (decl)), MODE_INT, 1);
+
+ if (xmode != BLKmode
+ && known_align % GET_MODE_ALIGNMENT (xmode) == 0)
+ {
+ DECL_ALIGN (decl) = MAX (GET_MODE_ALIGNMENT (xmode),
+ DECL_ALIGN (decl));
+ DECL_MODE (decl) = xmode;
+ DECL_SIZE (decl) = size_int (GET_MODE_BITSIZE (xmode));
+ /* This no longer needs to be accessed as a bit field. */
+ DECL_BIT_FIELD (decl) = 0;
+ }
+ }
+
+ /* Evaluate nonconstant size only once, either now or as soon as safe. */
+ if (DECL_SIZE (decl) != 0 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
+ DECL_SIZE (decl) = variable_size (DECL_SIZE (decl));
+}
+
+/* Lay out a RECORD_TYPE type (a C struct).
+ This means laying out the fields, determining their positions,
+ and computing the overall size and required alignment of the record.
+ Note that if you set the TYPE_ALIGN before calling this
+ then the struct is aligned to at least that boundary.
+
+ If the type has basetypes, you must call layout_basetypes
+ before calling this function.
+
+ The return value is a list of static members of the record.
+ They still need to be laid out. */
+
+static tree
+layout_record (rec)
+ tree rec;
+{
+ register tree field;
+#ifdef STRUCTURE_SIZE_BOUNDARY
+ unsigned record_align = MAX (STRUCTURE_SIZE_BOUNDARY, TYPE_ALIGN (rec));
+#else
+ unsigned record_align = MAX (BITS_PER_UNIT, TYPE_ALIGN (rec));
+#endif
+ /* These must be laid out *after* the record is. */
+ tree pending_statics = NULL_TREE;
+ /* Record size so far is CONST_SIZE + VAR_SIZE bits,
+ where CONST_SIZE is an integer
+ and VAR_SIZE is a tree expression.
+ If VAR_SIZE is null, the size is just CONST_SIZE.
+ Naturally we try to avoid using VAR_SIZE. */
+ register int const_size = 0;
+ register tree var_size = 0;
+ /* Once we start using VAR_SIZE, this is the maximum alignment
+ that we know VAR_SIZE has. */
+ register int var_align = BITS_PER_UNIT;
+
+
+ for (field = TYPE_FIELDS (rec); field; field = TREE_CHAIN (field))
+ {
+ register int known_align = var_size ? var_align : const_size;
+ register int desired_align;
+
+ /* If FIELD is static, then treat it like a separate variable,
+ not really like a structure field.
+ If it is a FUNCTION_DECL, it's a method.
+ In both cases, all we do is lay out the decl,
+ and we do it *after* the record is laid out. */
+
+ if (TREE_STATIC (field))
+ {
+ pending_statics = tree_cons (NULL_TREE, field, pending_statics);
+ continue;
+ }
+ /* Enumerators and enum types which are local to this class need not
+ be laid out. Likewise for initialized constant fields. */
+ if (TREE_CODE (field) != FIELD_DECL)
+ continue;
+
+ /* Lay out the field so we know what alignment it needs.
+ For a packed field, use the alignment as specified,
+ disregarding what the type would want. */
+ if (DECL_PACKED (field))
+ desired_align = DECL_ALIGN (field);
+ layout_decl (field, known_align);
+ if (! DECL_PACKED (field))
+ desired_align = DECL_ALIGN (field);
+ /* Some targets (i.e. VMS) limit struct field alignment
+ to a lower boundary than alignment of variables. */
+#ifdef BIGGEST_FIELD_ALIGNMENT
+ desired_align = MIN (desired_align, BIGGEST_FIELD_ALIGNMENT);
+#endif
+
+ /* Record must have at least as much alignment as any field.
+ Otherwise, the alignment of the field within the record
+ is meaningless. */
+
+#ifndef PCC_BITFIELD_TYPE_MATTERS
+ record_align = MAX (record_align, desired_align);
+#else
+ if (PCC_BITFIELD_TYPE_MATTERS && TREE_TYPE (field) != error_mark_node
+ && DECL_BIT_FIELD_TYPE (field)
+ && ! integer_zerop (TYPE_SIZE (TREE_TYPE (field))))
+ {
+ /* For these machines, a zero-length field does not
+ affect the alignment of the structure as a whole.
+ It does, however, affect the alignment of the next field
+ within the structure. */
+ if (! integer_zerop (DECL_SIZE (field)))
+ record_align = MAX (record_align, desired_align);
+ else if (! DECL_PACKED (field))
+ desired_align = TYPE_ALIGN (TREE_TYPE (field));
+ /* A named bit field of declared type `int'
+ forces the entire structure to have `int' alignment. */
+ if (DECL_NAME (field) != 0)
+ {
+ int type_align = TYPE_ALIGN (TREE_TYPE (field));
+ if (maximum_field_alignment != 0)
+ type_align = MIN (type_align, maximum_field_alignment);
+
+ record_align = MAX (record_align, type_align);
+ }
+ }
+ else
+ record_align = MAX (record_align, desired_align);
+#endif
+
+ /* Does this field automatically have alignment it needs
+ by virtue of the fields that precede it and the record's
+ own alignment? */
+
+ if (const_size % desired_align != 0
+ || (var_align % desired_align != 0
+ && var_size != 0))
+ {
+ /* No, we need to skip space before this field.
+ Bump the cumulative size to multiple of field alignment. */
+
+ if (var_size == 0
+ || var_align % desired_align == 0)
+ const_size
+ = CEIL (const_size, desired_align) * desired_align;
+ else
+ {
+ if (const_size > 0)
+ var_size = size_binop (PLUS_EXPR, var_size,
+ size_int (const_size));
+ const_size = 0;
+ var_size = round_up (var_size, desired_align);
+ var_align = MIN (var_align, desired_align);
+ }
+ }
+
+#ifdef PCC_BITFIELD_TYPE_MATTERS
+ if (PCC_BITFIELD_TYPE_MATTERS
+ && TREE_CODE (field) == FIELD_DECL
+ && TREE_TYPE (field) != error_mark_node
+ && DECL_BIT_FIELD_TYPE (field)
+ && !DECL_PACKED (field)
+ /* If #pragma pack is in effect, turn off this feature. */
+ && maximum_field_alignment == 0
+ && !integer_zerop (DECL_SIZE (field)))
+ {
+ int type_align = TYPE_ALIGN (TREE_TYPE (field));
+ register tree dsize = DECL_SIZE (field);
+ int field_size = TREE_INT_CST_LOW (dsize);
+
+ /* A bit field may not span the unit of alignment of its type.
+ Advance to next boundary if necessary. */
+ /* ??? There is some uncertainty here as to what
+ should be done if type_align is less than the width of the type.
+ That can happen because the width exceeds BIGGEST_ALIGNMENT
+ or because it exceeds maximum_field_alignment. */
+ if (const_size / type_align
+ != (const_size + field_size - 1) / type_align)
+ const_size = CEIL (const_size, type_align) * type_align;
+ }
+#endif
+
+/* No existing machine description uses this parameter.
+ So I have made it in this aspect identical to PCC_BITFIELD_TYPE_MATTERS. */
+#ifdef BITFIELD_NBYTES_LIMITED
+ if (BITFIELD_NBYTES_LIMITED
+ && TREE_CODE (field) == FIELD_DECL
+ && TREE_TYPE (field) != error_mark_node
+ && DECL_BIT_FIELD_TYPE (field)
+ && !DECL_PACKED (field)
+ && !integer_zerop (DECL_SIZE (field)))
+ {
+ int type_align = TYPE_ALIGN (TREE_TYPE (field));
+ register tree dsize = DECL_SIZE (field);
+ int field_size = TREE_INT_CST_LOW (dsize);
+
+ if (maximum_field_alignment != 0)
+ type_align = MIN (type_align, maximum_field_alignment);
+
+ /* A bit field may not span the unit of alignment of its type.
+ Advance to next boundary if necessary. */
+ if (const_size / type_align
+ != (const_size + field_size - 1) / type_align)
+ const_size = CEIL (const_size, type_align) * type_align;
+ }
+#endif
+
+ /* Size so far becomes the position of this field. */
+
+ if (var_size && const_size)
+ DECL_FIELD_BITPOS (field)
+ = size_binop (PLUS_EXPR, var_size, size_int (const_size));
+ else if (var_size)
+ DECL_FIELD_BITPOS (field) = var_size;
+ else
+ {
+ DECL_FIELD_BITPOS (field) = size_int (const_size);
+
+ /* If this field ended up more aligned than we thought it
+ would be (we approximate this by seeing if its position
+ changed), lay out the field again; perhaps we can use an
+ integral mode for it now. */
+ if (known_align != const_size)
+ layout_decl (field, const_size);
+ }
+
+ /* Now add size of this field to the size of the record. */
+
+ {
+ register tree dsize = DECL_SIZE (field);
+
+ /* This can happen when we have an invalid nested struct definition,
+ such as struct j { struct j { int i; } }. The error message is
+ printed in finish_struct. */
+ if (dsize == 0)
+ /* Do nothing. */;
+ else if (TREE_CODE (dsize) == INTEGER_CST
+ && TREE_INT_CST_HIGH (dsize) == 0
+ && TREE_INT_CST_LOW (dsize) + const_size > const_size)
+ /* Use const_size if there's no overflow. */
+ const_size += TREE_INT_CST_LOW (dsize);
+ else
+ {
+ if (var_size == 0)
+ var_size = dsize;
+ else
+ var_size = size_binop (PLUS_EXPR, var_size, dsize);
+ }
+ }
+ }
+
+ /* Work out the total size and alignment of the record
+ as one expression and store in the record type.
+ Round it up to a multiple of the record's alignment. */
+
+ if (var_size == 0)
+ {
+ TYPE_SIZE (rec) = size_int (const_size);
+ }
+ else
+ {
+ if (const_size)
+ var_size
+ = size_binop (PLUS_EXPR, var_size, size_int (const_size));
+ TYPE_SIZE (rec) = var_size;
+ }
+
+ /* Determine the desired alignment. */
+#ifdef ROUND_TYPE_ALIGN
+ TYPE_ALIGN (rec) = ROUND_TYPE_ALIGN (rec, TYPE_ALIGN (rec), record_align);
+#else
+ TYPE_ALIGN (rec) = MAX (TYPE_ALIGN (rec), record_align);
+#endif
+
+#ifdef ROUND_TYPE_SIZE
+ TYPE_SIZE (rec) = ROUND_TYPE_SIZE (rec, TYPE_SIZE (rec), TYPE_ALIGN (rec));
+#else
+ /* Round the size up to be a multiple of the required alignment */
+ TYPE_SIZE (rec) = round_up (TYPE_SIZE (rec), TYPE_ALIGN (rec));
+#endif
+
+ return pending_statics;
+}
+
+/* Lay out a UNION_TYPE or QUAL_UNION_TYPE type.
+ Lay out all the fields, set their positions to zero,
+ and compute the size and alignment of the union (maximum of any field).
+ Note that if you set the TYPE_ALIGN before calling this
+ then the union align is aligned to at least that boundary. */
+
+static void
+layout_union (rec)
+ tree rec;
+{
+ register tree field;
+#ifdef STRUCTURE_SIZE_BOUNDARY
+ unsigned union_align = STRUCTURE_SIZE_BOUNDARY;
+#else
+ unsigned union_align = BITS_PER_UNIT;
+#endif
+
+ /* The size of the union, based on the fields scanned so far,
+ is max (CONST_SIZE, VAR_SIZE).
+ VAR_SIZE may be null; then CONST_SIZE by itself is the size. */
+ register int const_size = 0;
+ register tree var_size = 0;
+
+ /* If this is a QUAL_UNION_TYPE, we want to process the fields in
+ the reverse order in building the COND_EXPR that denotes its
+ size. We reverse them again later. */
+ if (TREE_CODE (rec) == QUAL_UNION_TYPE)
+ TYPE_FIELDS (rec) = nreverse (TYPE_FIELDS (rec));
+
+ for (field = TYPE_FIELDS (rec); field; field = TREE_CHAIN (field))
+ {
+ /* Enums which are local to this class need not be laid out. */
+ if (TREE_CODE (field) == CONST_DECL || TREE_CODE (field) == TYPE_DECL)
+ continue;
+
+ layout_decl (field, 0);
+ DECL_FIELD_BITPOS (field) = size_int (0);
+
+ /* Union must be at least as aligned as any field requires. */
+
+ union_align = MAX (union_align, DECL_ALIGN (field));
+
+#ifdef PCC_BITFIELD_TYPE_MATTERS
+ /* On the m88000, a bit field of declare type `int'
+ forces the entire union to have `int' alignment. */
+ if (PCC_BITFIELD_TYPE_MATTERS && DECL_BIT_FIELD_TYPE (field))
+ union_align = MAX (union_align, TYPE_ALIGN (TREE_TYPE (field)));
+#endif
+
+ if (TREE_CODE (rec) == UNION_TYPE)
+ {
+ /* Set union_size to max (decl_size, union_size).
+ There are more and less general ways to do this.
+ Use only CONST_SIZE unless forced to use VAR_SIZE. */
+
+ if (TREE_CODE (DECL_SIZE (field)) == INTEGER_CST)
+ const_size
+ = MAX (const_size, TREE_INT_CST_LOW (DECL_SIZE (field)));
+ else if (var_size == 0)
+ var_size = DECL_SIZE (field);
+ else
+ var_size = size_binop (MAX_EXPR, var_size, DECL_SIZE (field));
+ }
+ else if (TREE_CODE (rec) == QUAL_UNION_TYPE)
+ var_size = fold (build (COND_EXPR, sizetype, DECL_QUALIFIER (field),
+ DECL_SIZE (field),
+ var_size ? var_size : integer_zero_node));
+ }
+
+ if (TREE_CODE (rec) == QUAL_UNION_TYPE)
+ TYPE_FIELDS (rec) = nreverse (TYPE_FIELDS (rec));
+
+ /* Determine the ultimate size of the union (in bytes). */
+ if (NULL == var_size)
+ TYPE_SIZE (rec) = size_int (CEIL (const_size, BITS_PER_UNIT)
+ * BITS_PER_UNIT);
+ else if (const_size == 0)
+ TYPE_SIZE (rec) = var_size;
+ else
+ TYPE_SIZE (rec) = size_binop (MAX_EXPR, var_size,
+ round_up (size_int (const_size),
+ BITS_PER_UNIT));
+
+ /* Determine the desired alignment. */
+#ifdef ROUND_TYPE_ALIGN
+ TYPE_ALIGN (rec) = ROUND_TYPE_ALIGN (rec, TYPE_ALIGN (rec), union_align);
+#else
+ TYPE_ALIGN (rec) = MAX (TYPE_ALIGN (rec), union_align);
+#endif
+
+#ifdef ROUND_TYPE_SIZE
+ TYPE_SIZE (rec) = ROUND_TYPE_SIZE (rec, TYPE_SIZE (rec), TYPE_ALIGN (rec));
+#else
+ /* Round the size up to be a multiple of the required alignment */
+ TYPE_SIZE (rec) = round_up (TYPE_SIZE (rec), TYPE_ALIGN (rec));
+#endif
+}
+
+/* Calculate the mode, size, and alignment for TYPE.
+ For an array type, calculate the element separation as well.
+ Record TYPE on the chain of permanent or temporary types
+ so that dbxout will find out about it.
+
+ TYPE_SIZE of a type is nonzero if the type has been laid out already.
+ layout_type does nothing on such a type.
+
+ If the type is incomplete, its TYPE_SIZE remains zero. */
+
+void
+layout_type (type)
+ tree type;
+{
+ int old;
+ tree pending_statics;
+
+ if (type == 0)
+ abort ();
+
+ /* Do nothing if type has been laid out before. */
+ if (TYPE_SIZE (type))
+ return;
+
+ /* Make sure all nodes we allocate are not momentary;
+ they must last past the current statement. */
+ old = suspend_momentary ();
+
+ /* Put all our nodes into the same obstack as the type. Also,
+ make expressions saveable (this is a no-op for permanent types). */
+
+ push_obstacks (TYPE_OBSTACK (type), TYPE_OBSTACK (type));
+ saveable_allocation ();
+
+ switch (TREE_CODE (type))
+ {
+ case LANG_TYPE:
+ /* This kind of type is the responsibility
+ of the languge-specific code. */
+ abort ();
+
+ case INTEGER_TYPE:
+ case ENUMERAL_TYPE:
+ if (TREE_CODE (TYPE_MIN_VALUE (type)) == INTEGER_CST
+ && tree_int_cst_sgn (TYPE_MIN_VALUE (type)) >= 0)
+ TREE_UNSIGNED (type) = 1;
+
+ TYPE_MODE (type) = smallest_mode_for_size (TYPE_PRECISION (type),
+ MODE_INT);
+ TYPE_SIZE (type) = size_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
+ break;
+
+ case REAL_TYPE:
+ TYPE_MODE (type) = mode_for_size (TYPE_PRECISION (type), MODE_FLOAT, 0);
+ TYPE_SIZE (type) = size_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
+ break;
+
+ case COMPLEX_TYPE:
+ TREE_UNSIGNED (type) = TREE_UNSIGNED (TREE_TYPE (type));
+ TYPE_MODE (type)
+ = mode_for_size (2 * TYPE_PRECISION (TREE_TYPE (type)),
+ (TREE_CODE (TREE_TYPE (type)) == INTEGER_TYPE
+ ? MODE_COMPLEX_INT : MODE_COMPLEX_FLOAT),
+ 0);
+ TYPE_SIZE (type) = size_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
+ break;
+
+ case VOID_TYPE:
+ TYPE_SIZE (type) = size_zero_node;
+ TYPE_ALIGN (type) = 1;
+ TYPE_MODE (type) = VOIDmode;
+ break;
+
+ case OFFSET_TYPE:
+ TYPE_SIZE (type) = size_int (POINTER_SIZE);
+ TYPE_MODE (type) = mode_for_size (POINTER_SIZE,
+ GET_MODE_CLASS (Pmode), 0);
+ break;
+
+ case FUNCTION_TYPE:
+ case METHOD_TYPE:
+ TYPE_MODE (type) = mode_for_size (2 * POINTER_SIZE, MODE_INT, 0);
+ TYPE_SIZE (type) = size_int (2 * POINTER_SIZE);
+ break;
+
+ case POINTER_TYPE:
+ case REFERENCE_TYPE:
+ TYPE_MODE (type) = mode_for_size (POINTER_SIZE,
+ GET_MODE_CLASS (Pmode), 0);
+ TYPE_SIZE (type) = size_int (POINTER_SIZE);
+ TREE_UNSIGNED (type) = 1;
+ TYPE_PRECISION (type) = POINTER_SIZE;
+ break;
+
+ case ARRAY_TYPE:
+ {
+ register tree index = TYPE_DOMAIN (type);
+ register tree element = TREE_TYPE (type);
+
+ build_pointer_type (element);
+
+ /* We need to know both bounds in order to compute the size. */
+ if (index && TYPE_MAX_VALUE (index) && TYPE_MIN_VALUE (index)
+ && TYPE_SIZE (element))
+ {
+ tree length
+ = size_binop (PLUS_EXPR, size_one_node,
+ size_binop (MINUS_EXPR, TYPE_MAX_VALUE (index),
+ TYPE_MIN_VALUE (index)));
+
+ TYPE_SIZE (type) = size_binop (MULT_EXPR, length,
+ TYPE_SIZE (element));
+ }
+
+ /* Now round the alignment and size,
+ using machine-dependent criteria if any. */
+
+#ifdef ROUND_TYPE_ALIGN
+ TYPE_ALIGN (type)
+ = ROUND_TYPE_ALIGN (type, TYPE_ALIGN (element), BITS_PER_UNIT);
+#else
+ TYPE_ALIGN (type) = MAX (TYPE_ALIGN (element), BITS_PER_UNIT);
+#endif
+
+#ifdef ROUND_TYPE_SIZE
+ if (TYPE_SIZE (type) != 0)
+ TYPE_SIZE (type)
+ = ROUND_TYPE_SIZE (type, TYPE_SIZE (type), TYPE_ALIGN (type));
+#endif
+
+ TYPE_MODE (type) = BLKmode;
+ if (TYPE_SIZE (type) != 0
+ && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
+ /* BLKmode elements force BLKmode aggregate;
+ else extract/store fields may lose. */
+ && (TYPE_MODE (TREE_TYPE (type)) != BLKmode
+ || TYPE_NO_FORCE_BLK (TREE_TYPE (type))))
+ {
+ TYPE_MODE (type)
+ = mode_for_size (TREE_INT_CST_LOW (TYPE_SIZE (type)),
+ MODE_INT, 1);
+
+ if (STRICT_ALIGNMENT && TYPE_ALIGN (type) < BIGGEST_ALIGNMENT
+ && TYPE_ALIGN (type) < TREE_INT_CST_LOW (TYPE_SIZE (type))
+ && TYPE_MODE (type) != BLKmode)
+ {
+ TYPE_NO_FORCE_BLK (type) = 1;
+ TYPE_MODE (type) = BLKmode;
+ }
+ }
+ break;
+ }
+
+ case RECORD_TYPE:
+ pending_statics = layout_record (type);
+ TYPE_MODE (type) = BLKmode;
+ if (TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
+ {
+ tree field;
+ /* A record which has any BLKmode members must itself be BLKmode;
+ it can't go in a register.
+ Unless the member is BLKmode only because it isn't aligned. */
+ for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
+ {
+ int bitpos;
+
+ if (TREE_CODE (field) != FIELD_DECL)
+ continue;
+
+ if (TYPE_MODE (TREE_TYPE (field)) == BLKmode
+ && ! TYPE_NO_FORCE_BLK (TREE_TYPE (field)))
+ goto record_lose;
+
+ if (TREE_CODE (DECL_FIELD_BITPOS (field)) != INTEGER_CST)
+ goto record_lose;
+
+ bitpos = TREE_INT_CST_LOW (DECL_FIELD_BITPOS (field));
+
+ /* Must be BLKmode if any field crosses a word boundary,
+ since extract_bit_field can't handle that in registers. */
+ if (bitpos / BITS_PER_WORD
+ != ((TREE_INT_CST_LOW (DECL_SIZE (field)) + bitpos - 1)
+ / BITS_PER_WORD)
+ /* But there is no problem if the field is entire words. */
+ && TREE_INT_CST_LOW (DECL_SIZE (field)) % BITS_PER_WORD == 0)
+ goto record_lose;
+ }
+
+ TYPE_MODE (type)
+ = mode_for_size (TREE_INT_CST_LOW (TYPE_SIZE (type)),
+ MODE_INT, 1);
+
+ /* If structure's known alignment is less than
+ what the scalar mode would need, and it matters,
+ then stick with BLKmode. */
+ if (STRICT_ALIGNMENT
+ && ! (TYPE_ALIGN (type) >= BIGGEST_ALIGNMENT
+ || (TYPE_ALIGN (type)
+ >= TREE_INT_CST_LOW (TYPE_SIZE (type)))))
+ {
+ if (TYPE_MODE (type) != BLKmode)
+ /* If this is the only reason this type is BLKmode,
+ then don't force containing types to be BLKmode. */
+ TYPE_NO_FORCE_BLK (type) = 1;
+ TYPE_MODE (type) = BLKmode;
+ }
+
+ record_lose: ;
+ }
+
+ /* Lay out any static members. This is done now
+ because their type may use the record's type. */
+ while (pending_statics)
+ {
+ layout_decl (TREE_VALUE (pending_statics), 0);
+ pending_statics = TREE_CHAIN (pending_statics);
+ }
+ break;
+
+ case UNION_TYPE:
+ case QUAL_UNION_TYPE:
+ layout_union (type);
+ TYPE_MODE (type) = BLKmode;
+ if (TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
+ /* If structure's known alignment is less than
+ what the scalar mode would need, and it matters,
+ then stick with BLKmode. */
+ && (! STRICT_ALIGNMENT
+ || TYPE_ALIGN (type) >= BIGGEST_ALIGNMENT
+ || TYPE_ALIGN (type) >= TREE_INT_CST_LOW (TYPE_SIZE (type))))
+ {
+ tree field;
+ /* A union which has any BLKmode members must itself be BLKmode;
+ it can't go in a register.
+ Unless the member is BLKmode only because it isn't aligned. */
+ for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
+ {
+ if (TREE_CODE (field) != FIELD_DECL)
+ continue;
+
+ if (TYPE_MODE (TREE_TYPE (field)) == BLKmode
+ && ! TYPE_NO_FORCE_BLK (TREE_TYPE (field)))
+ goto union_lose;
+ }
+
+ TYPE_MODE (type)
+ = mode_for_size (TREE_INT_CST_LOW (TYPE_SIZE (type)),
+ MODE_INT, 1);
+
+ union_lose: ;
+ }
+ break;
+
+ /* Pascal and Chill types */
+ case BOOLEAN_TYPE: /* store one byte/boolean for now. */
+ TYPE_MODE (type) = QImode;
+ TYPE_SIZE (type) = size_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
+ TYPE_PRECISION (type) = 1;
+ TYPE_ALIGN (type) = GET_MODE_ALIGNMENT (TYPE_MODE (type));
+ if (TREE_CODE (TYPE_MIN_VALUE (type)) == INTEGER_CST
+ && tree_int_cst_sgn (TYPE_MIN_VALUE (type)) >= 0)
+ TREE_UNSIGNED (type) = 1;
+ break;
+
+ case CHAR_TYPE:
+ TYPE_MODE (type) = QImode;
+ TYPE_SIZE (type) = size_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
+ TYPE_PRECISION (type) = GET_MODE_BITSIZE (TYPE_MODE (type));
+ TYPE_ALIGN (type) = GET_MODE_ALIGNMENT (TYPE_MODE (type));
+ break;
+
+ case FILE_TYPE:
+ /* The size may vary in different languages, so the language front end
+ should fill in the size. */
+ TYPE_ALIGN (type) = BIGGEST_ALIGNMENT;
+ TYPE_MODE (type) = BLKmode;
+ break;
+
+ default:
+ abort ();
+ } /* end switch */
+
+ /* Normally, use the alignment corresponding to the mode chosen.
+ However, where strict alignment is not required, avoid
+ over-aligning structures, since most compilers do not do this
+ alignment. */
+
+ if (TYPE_MODE (type) != BLKmode && TYPE_MODE (type) != VOIDmode
+ && (STRICT_ALIGNMENT
+ || (TREE_CODE (type) != RECORD_TYPE && TREE_CODE (type) != UNION_TYPE
+ && TREE_CODE (type) != QUAL_UNION_TYPE
+ && TREE_CODE (type) != ARRAY_TYPE)))
+ TYPE_ALIGN (type) = GET_MODE_ALIGNMENT (TYPE_MODE (type));
+
+ /* Evaluate nonconstant size only once, either now or as soon as safe. */
+ if (TYPE_SIZE (type) != 0 && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
+ TYPE_SIZE (type) = variable_size (TYPE_SIZE (type));
+
+ /* Also layout any other variants of the type. */
+ if (TYPE_NEXT_VARIANT (type)
+ || type != TYPE_MAIN_VARIANT (type))
+ {
+ tree variant;
+ /* Record layout info of this variant. */
+ tree size = TYPE_SIZE (type);
+ int align = TYPE_ALIGN (type);
+ enum machine_mode mode = TYPE_MODE (type);
+
+ /* Copy it into all variants. */
+ for (variant = TYPE_MAIN_VARIANT (type);
+ variant;
+ variant = TYPE_NEXT_VARIANT (variant))
+ {
+ TYPE_SIZE (variant) = size;
+ TYPE_ALIGN (variant) = align;
+ TYPE_MODE (variant) = mode;
+ }
+ }
+
+ pop_obstacks ();
+ resume_momentary (old);
+}
+
+/* Create and return a type for signed integers of PRECISION bits. */
+
+tree
+make_signed_type (precision)
+ int precision;
+{
+ register tree type = make_node (INTEGER_TYPE);
+
+ TYPE_PRECISION (type) = precision;
+
+ /* Create the extreme values based on the number of bits. */
+
+ TYPE_MIN_VALUE (type)
+ = build_int_2 ((precision - HOST_BITS_PER_WIDE_INT > 0
+ ? 0 : (HOST_WIDE_INT) (-1) << (precision - 1)),
+ (((HOST_WIDE_INT) (-1)
+ << (precision - HOST_BITS_PER_WIDE_INT - 1 > 0
+ ? precision - HOST_BITS_PER_WIDE_INT - 1
+ : 0))));
+ TYPE_MAX_VALUE (type)
+ = build_int_2 ((precision - HOST_BITS_PER_WIDE_INT > 0
+ ? -1 : ((HOST_WIDE_INT) 1 << (precision - 1)) - 1),
+ (precision - HOST_BITS_PER_WIDE_INT - 1 > 0
+ ? (((HOST_WIDE_INT) 1
+ << (precision - HOST_BITS_PER_WIDE_INT - 1))) - 1
+ : 0));
+
+ /* Give this type's extreme values this type as their type. */
+
+ TREE_TYPE (TYPE_MIN_VALUE (type)) = type;
+ TREE_TYPE (TYPE_MAX_VALUE (type)) = type;
+
+ /* The first type made with this or `make_unsigned_type'
+ is the type for size values. */
+
+ if (sizetype == 0)
+ {
+ sizetype = type;
+ }
+
+ /* Lay out the type: set its alignment, size, etc. */
+
+ layout_type (type);
+
+ return type;
+}
+
+/* Create and return a type for unsigned integers of PRECISION bits. */
+
+tree
+make_unsigned_type (precision)
+ int precision;
+{
+ register tree type = make_node (INTEGER_TYPE);
+
+ TYPE_PRECISION (type) = precision;
+
+ /* The first type made with this or `make_signed_type'
+ is the type for size values. */
+
+ if (sizetype == 0)
+ {
+ sizetype = type;
+ }
+
+ fixup_unsigned_type (type);
+ return type;
+}
+
+/* Set the extreme values of TYPE based on its precision in bits,
+ then lay it out. Used when make_signed_type won't do
+ because the tree code is not INTEGER_TYPE.
+ E.g. for Pascal, when the -fsigned-char option is given. */
+
+void
+fixup_signed_type (type)
+ tree type;
+{
+ register int precision = TYPE_PRECISION (type);
+
+ TYPE_MIN_VALUE (type)
+ = build_int_2 ((precision - HOST_BITS_PER_WIDE_INT > 0
+ ? 0 : (HOST_WIDE_INT) (-1) << (precision - 1)),
+ (((HOST_WIDE_INT) (-1)
+ << (precision - HOST_BITS_PER_WIDE_INT - 1 > 0
+ ? precision - HOST_BITS_PER_WIDE_INT - 1
+ : 0))));
+ TYPE_MAX_VALUE (type)
+ = build_int_2 ((precision - HOST_BITS_PER_WIDE_INT > 0
+ ? -1 : ((HOST_WIDE_INT) 1 << (precision - 1)) - 1),
+ (precision - HOST_BITS_PER_WIDE_INT - 1 > 0
+ ? (((HOST_WIDE_INT) 1
+ << (precision - HOST_BITS_PER_WIDE_INT - 1))) - 1
+ : 0));
+
+ TREE_TYPE (TYPE_MIN_VALUE (type)) = type;
+ TREE_TYPE (TYPE_MAX_VALUE (type)) = type;
+
+ /* Lay out the type: set its alignment, size, etc. */
+
+ layout_type (type);
+}
+
+/* Set the extreme values of TYPE based on its precision in bits,
+ then lay it out. This is used both in `make_unsigned_type'
+ and for enumeral types. */
+
+void
+fixup_unsigned_type (type)
+ tree type;
+{
+ register int precision = TYPE_PRECISION (type);
+
+ TYPE_MIN_VALUE (type) = build_int_2 (0, 0);
+ TYPE_MAX_VALUE (type)
+ = build_int_2 (precision - HOST_BITS_PER_WIDE_INT >= 0
+ ? -1 : ((HOST_WIDE_INT) 1 << precision) - 1,
+ precision - HOST_BITS_PER_WIDE_INT > 0
+ ? ((unsigned HOST_WIDE_INT) ~0
+ >> (HOST_BITS_PER_WIDE_INT
+ - (precision - HOST_BITS_PER_WIDE_INT)))
+ : 0);
+ TREE_TYPE (TYPE_MIN_VALUE (type)) = type;
+ TREE_TYPE (TYPE_MAX_VALUE (type)) = type;
+
+ /* Lay out the type: set its alignment, size, etc. */
+
+ layout_type (type);
+}
+
+/* Find the best machine mode to use when referencing a bit field of length
+ BITSIZE bits starting at BITPOS.
+
+ The underlying object is known to be aligned to a boundary of ALIGN bits.
+ If LARGEST_MODE is not VOIDmode, it means that we should not use a mode
+ larger than LARGEST_MODE (usually SImode).
+
+ If no mode meets all these conditions, we return VOIDmode. Otherwise, if
+ VOLATILEP is true or SLOW_BYTE_ACCESS is false, we return the smallest
+ mode meeting these conditions.
+
+ Otherwise (VOLATILEP is false and SLOW_BYTE_ACCESS is true), we return
+ the largest mode (but a mode no wider than UNITS_PER_WORD) that meets
+ all the conditions. */
+
+enum machine_mode
+get_best_mode (bitsize, bitpos, align, largest_mode, volatilep)
+ int bitsize, bitpos;
+ int align;
+ enum machine_mode largest_mode;
+ int volatilep;
+{
+ enum machine_mode mode;
+ int unit;
+
+ /* Find the narrowest integer mode that contains the bit field. */
+ for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
+ mode = GET_MODE_WIDER_MODE (mode))
+ {
+ unit = GET_MODE_BITSIZE (mode);
+ if (bitpos / unit == (bitpos + bitsize - 1) / unit)
+ break;
+ }
+
+ if (mode == MAX_MACHINE_MODE
+ /* It is tempting to omit the following line
+ if STRICT_ALIGNMENT is true.
+ But that is incorrect, since if the bitfield uses part of 3 bytes
+ and we use a 4-byte mode, we could get a spurious segv
+ if the extra 4th byte is past the end of memory.
+ (Though at least one Unix compiler ignores this problem:
+ that on the Sequent 386 machine. */
+ || MIN (unit, BIGGEST_ALIGNMENT) > align
+ || (largest_mode != VOIDmode && unit > GET_MODE_BITSIZE (largest_mode)))
+ return VOIDmode;
+
+ if (SLOW_BYTE_ACCESS && ! volatilep)
+ {
+ enum machine_mode wide_mode = VOIDmode, tmode;
+
+ for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT); tmode != VOIDmode;
+ tmode = GET_MODE_WIDER_MODE (tmode))
+ {
+ unit = GET_MODE_BITSIZE (tmode);
+ if (bitpos / unit == (bitpos + bitsize - 1) / unit
+ && unit <= BITS_PER_WORD
+ && unit <= MIN (align, BIGGEST_ALIGNMENT)
+ && (largest_mode == VOIDmode
+ || unit <= GET_MODE_BITSIZE (largest_mode)))
+ wide_mode = tmode;
+ }
+
+ if (wide_mode != VOIDmode)
+ return wide_mode;
+ }
+
+ return mode;
+}
+
+/* Save all variables describing the current status into the structure *P.
+ This is used before starting a nested function. */
+
+void
+save_storage_status (p)
+ struct function *p;
+{
+#if 0 /* Need not save, since always 0 and non0 (resp.) within a function. */
+ p->pending_sizes = pending_sizes;
+ p->immediate_size_expand = immediate_size_expand;
+#endif /* 0 */
+}
+
+/* Restore all variables describing the current status from the structure *P.
+ This is used after a nested function. */
+
+void
+restore_storage_status (p)
+ struct function *p;
+{
+#if 0
+ pending_sizes = p->pending_sizes;
+ immediate_size_expand = p->immediate_size_expand;
+#endif /* 0 */
+}
diff --git a/gnu/usr.bin/cc/cc_int/stupid.c b/gnu/usr.bin/cc/cc_int/stupid.c
new file mode 100644
index 0000000..7ceec9f
--- /dev/null
+++ b/gnu/usr.bin/cc/cc_int/stupid.c
@@ -0,0 +1,518 @@
+/* Dummy data flow analysis for GNU compiler in nonoptimizing mode.
+ Copyright (C) 1987, 1991, 1994 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+/* This file performs stupid register allocation, which is used
+ when cc1 gets the -noreg switch (which is when cc does not get -O).
+
+ Stupid register allocation goes in place of the the flow_analysis,
+ local_alloc and global_alloc passes. combine_instructions cannot
+ be done with stupid allocation because the data flow info that it needs
+ is not computed here.
+
+ In stupid allocation, the only user-defined variables that can
+ go in registers are those declared "register". They are assumed
+ to have a life span equal to their scope. Other user variables
+ are given stack slots in the rtl-generation pass and are not
+ represented as pseudo regs. A compiler-generated temporary
+ is assumed to live from its first mention to its last mention.
+
+ Since each pseudo-reg's life span is just an interval, it can be
+ represented as a pair of numbers, each of which identifies an insn by
+ its position in the function (number of insns before it). The first
+ thing done for stupid allocation is to compute such a number for each
+ insn. It is called the suid. Then the life-interval of each
+ pseudo reg is computed. Then the pseudo regs are ordered by priority
+ and assigned hard regs in priority order. */
+
+#include <stdio.h>
+#include "config.h"
+#include "rtl.h"
+#include "hard-reg-set.h"
+#include "regs.h"
+#include "flags.h"
+
+/* Vector mapping INSN_UIDs to suids.
+ The suids are like uids but increase monotonically always.
+ We use them to see whether a subroutine call came
+ between a variable's birth and its death. */
+
+static int *uid_suid;
+
+/* Get the suid of an insn. */
+
+#define INSN_SUID(INSN) (uid_suid[INSN_UID (INSN)])
+
+/* Record the suid of the last CALL_INSN
+ so we can tell whether a pseudo reg crosses any calls. */
+
+static int last_call_suid;
+
+/* Element N is suid of insn where life span of pseudo reg N ends.
+ Element is 0 if register N has not been seen yet on backward scan. */
+
+static int *reg_where_dead;
+
+/* Element N is suid of insn where life span of pseudo reg N begins. */
+
+static int *reg_where_born;
+
+/* Numbers of pseudo-regs to be allocated, highest priority first. */
+
+static int *reg_order;
+
+/* Indexed by reg number (hard or pseudo), nonzero if register is live
+ at the current point in the instruction stream. */
+
+static char *regs_live;
+
+/* Indexed by insn's suid, the set of hard regs live after that insn. */
+
+static HARD_REG_SET *after_insn_hard_regs;
+
+/* Record that hard reg REGNO is live after insn INSN. */
+
+#define MARK_LIVE_AFTER(INSN,REGNO) \
+ SET_HARD_REG_BIT (after_insn_hard_regs[INSN_SUID (INSN)], (REGNO))
+
+static int stupid_reg_compare PROTO((int *, int *));
+static int stupid_find_reg PROTO((int, enum reg_class, enum machine_mode,
+ int, int));
+static void stupid_mark_refs PROTO((rtx, rtx));
+
+/* Stupid life analysis is for the case where only variables declared
+ `register' go in registers. For this case, we mark all
+ pseudo-registers that belong to register variables as
+ dying in the last instruction of the function, and all other
+ pseudo registers as dying in the last place they are referenced.
+ Hard registers are marked as dying in the last reference before
+ the end or before each store into them. */
+
+void
+stupid_life_analysis (f, nregs, file)
+ rtx f;
+ int nregs;
+ FILE *file;
+{
+ register int i;
+ register rtx last, insn;
+ int max_uid, max_suid;
+
+ bzero (regs_ever_live, sizeof regs_ever_live);
+
+ regs_live = (char *) alloca (nregs);
+
+ /* First find the last real insn, and count the number of insns,
+ and assign insns their suids. */
+
+ for (insn = f, i = 0; insn; insn = NEXT_INSN (insn))
+ if (INSN_UID (insn) > i)
+ i = INSN_UID (insn);
+
+ max_uid = i + 1;
+ uid_suid = (int *) alloca ((i + 1) * sizeof (int));
+
+ /* Compute the mapping from uids to suids.
+ Suids are numbers assigned to insns, like uids,
+ except that suids increase monotonically through the code. */
+
+ last = 0; /* In case of empty function body */
+ for (insn = f, i = 0; insn; insn = NEXT_INSN (insn))
+ {
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ last = insn;
+
+ INSN_SUID (insn) = ++i;
+ }
+
+ last_call_suid = i + 1;
+ max_suid = i + 1;
+
+ max_regno = nregs;
+
+ /* Allocate tables to record info about regs. */
+
+ reg_where_dead = (int *) alloca (nregs * sizeof (int));
+ bzero ((char *) reg_where_dead, nregs * sizeof (int));
+
+ reg_where_born = (int *) alloca (nregs * sizeof (int));
+ bzero ((char *) reg_where_born, nregs * sizeof (int));
+
+ reg_order = (int *) alloca (nregs * sizeof (int));
+ bzero ((char *) reg_order, nregs * sizeof (int));
+
+ reg_renumber = (short *) oballoc (nregs * sizeof (short));
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ reg_renumber[i] = i;
+
+ for (i = FIRST_VIRTUAL_REGISTER; i < max_regno; i++)
+ reg_renumber[i] = -1;
+
+ after_insn_hard_regs
+ = (HARD_REG_SET *) alloca (max_suid * sizeof (HARD_REG_SET));
+
+ bzero ((char *) after_insn_hard_regs, max_suid * sizeof (HARD_REG_SET));
+
+ /* Allocate and zero out many data structures
+ that will record the data from lifetime analysis. */
+
+ allocate_for_life_analysis ();
+
+ for (i = 0; i < max_regno; i++)
+ reg_n_deaths[i] = 1;
+
+ bzero (regs_live, nregs);
+
+ /* Find where each pseudo register is born and dies,
+ by scanning all insns from the end to the start
+ and noting all mentions of the registers.
+
+ Also find where each hard register is live
+ and record that info in after_insn_hard_regs.
+ regs_live[I] is 1 if hard reg I is live
+ at the current point in the scan. */
+
+ for (insn = last; insn; insn = PREV_INSN (insn))
+ {
+ register HARD_REG_SET *p = after_insn_hard_regs + INSN_SUID (insn);
+
+ /* Copy the info in regs_live into the element of after_insn_hard_regs
+ for the current position in the rtl code. */
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (regs_live[i])
+ SET_HARD_REG_BIT (*p, i);
+
+ /* Update which hard regs are currently live
+ and also the birth and death suids of pseudo regs
+ based on the pattern of this insn. */
+
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ stupid_mark_refs (PATTERN (insn), insn);
+
+ /* Mark all call-clobbered regs as live after each call insn
+ so that a pseudo whose life span includes this insn
+ will not go in one of them.
+ Then mark those regs as all dead for the continuing scan
+ of the insns before the call. */
+
+ if (GET_CODE (insn) == CALL_INSN)
+ {
+ last_call_suid = INSN_SUID (insn);
+ IOR_HARD_REG_SET (after_insn_hard_regs[last_call_suid],
+ call_used_reg_set);
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (call_used_regs[i])
+ regs_live[i] = 0;
+
+ /* It is important that this be done after processing the insn's
+ pattern because we want the function result register to still
+ be live if it's also used to pass arguments. */
+ stupid_mark_refs (CALL_INSN_FUNCTION_USAGE (insn), insn);
+ }
+ }
+
+ /* Now decide the order in which to allocate the pseudo registers. */
+
+ for (i = LAST_VIRTUAL_REGISTER + 1; i < max_regno; i++)
+ reg_order[i] = i;
+
+ qsort (&reg_order[LAST_VIRTUAL_REGISTER + 1],
+ max_regno - LAST_VIRTUAL_REGISTER - 1, sizeof (int),
+ stupid_reg_compare);
+
+ /* Now, in that order, try to find hard registers for those pseudo regs. */
+
+ for (i = LAST_VIRTUAL_REGISTER + 1; i < max_regno; i++)
+ {
+ register int r = reg_order[i];
+
+ /* Some regnos disappear from the rtl. Ignore them to avoid crash. */
+ if (regno_reg_rtx[r] == 0)
+ continue;
+
+ /* Now find the best hard-register class for this pseudo register */
+ if (N_REG_CLASSES > 1)
+ reg_renumber[r] = stupid_find_reg (reg_n_calls_crossed[r],
+ reg_preferred_class (r),
+ PSEUDO_REGNO_MODE (r),
+ reg_where_born[r],
+ reg_where_dead[r]);
+
+ /* If no reg available in that class, try alternate class. */
+ if (reg_renumber[r] == -1 && reg_alternate_class (r) != NO_REGS)
+ reg_renumber[r] = stupid_find_reg (reg_n_calls_crossed[r],
+ reg_alternate_class (r),
+ PSEUDO_REGNO_MODE (r),
+ reg_where_born[r],
+ reg_where_dead[r]);
+ }
+
+ if (file)
+ dump_flow_info (file);
+}
+
+/* Comparison function for qsort.
+ Returns -1 (1) if register *R1P is higher priority than *R2P. */
+
+static int
+stupid_reg_compare (r1p, r2p)
+ int *r1p, *r2p;
+{
+ register int r1 = *r1p, r2 = *r2p;
+ register int len1 = reg_where_dead[r1] - reg_where_born[r1];
+ register int len2 = reg_where_dead[r2] - reg_where_born[r2];
+ int tem;
+
+ tem = len2 - len1;
+ if (tem != 0)
+ return tem;
+
+ tem = reg_n_refs[r1] - reg_n_refs[r2];
+ if (tem != 0)
+ return tem;
+
+ /* If regs are equally good, sort by regno,
+ so that the results of qsort leave nothing to chance. */
+ return r1 - r2;
+}
+
+/* Find a block of SIZE words of hard registers in reg_class CLASS
+ that can hold a value of machine-mode MODE
+ (but actually we test only the first of the block for holding MODE)
+ currently free from after insn whose suid is BIRTH
+ through the insn whose suid is DEATH,
+ and return the number of the first of them.
+ Return -1 if such a block cannot be found.
+
+ If CALL_PRESERVED is nonzero, insist on registers preserved
+ over subroutine calls, and return -1 if cannot find such. */
+
+static int
+stupid_find_reg (call_preserved, class, mode, born_insn, dead_insn)
+ int call_preserved;
+ enum reg_class class;
+ enum machine_mode mode;
+ int born_insn, dead_insn;
+{
+ register int i, ins;
+#ifdef HARD_REG_SET
+ register /* Declare them register if they are scalars. */
+#endif
+ HARD_REG_SET used, this_reg;
+#ifdef ELIMINABLE_REGS
+ static struct {int from, to; } eliminables[] = ELIMINABLE_REGS;
+#endif
+
+ COPY_HARD_REG_SET (used,
+ call_preserved ? call_used_reg_set : fixed_reg_set);
+
+#ifdef ELIMINABLE_REGS
+ for (i = 0; i < sizeof eliminables / sizeof eliminables[0]; i++)
+ SET_HARD_REG_BIT (used, eliminables[i].from);
+#if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
+ SET_HARD_REG_BIT (used, HARD_FRAME_POINTER_REGNUM);
+#endif
+#else
+ SET_HARD_REG_BIT (used, FRAME_POINTER_REGNUM);
+#endif
+
+ for (ins = born_insn; ins < dead_insn; ins++)
+ IOR_HARD_REG_SET (used, after_insn_hard_regs[ins]);
+
+ IOR_COMPL_HARD_REG_SET (used, reg_class_contents[(int) class]);
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ {
+#ifdef REG_ALLOC_ORDER
+ int regno = reg_alloc_order[i];
+#else
+ int regno = i;
+#endif
+
+ /* If a register has screwy overlap problems,
+ don't use it at all if not optimizing.
+ Actually this is only for the 387 stack register,
+ and it's because subsequent code won't work. */
+#ifdef OVERLAPPING_REGNO_P
+ if (OVERLAPPING_REGNO_P (regno))
+ continue;
+#endif
+
+ if (! TEST_HARD_REG_BIT (used, regno)
+ && HARD_REGNO_MODE_OK (regno, mode))
+ {
+ register int j;
+ register int size1 = HARD_REGNO_NREGS (regno, mode);
+ for (j = 1; j < size1 && ! TEST_HARD_REG_BIT (used, regno + j); j++);
+ if (j == size1)
+ {
+ CLEAR_HARD_REG_SET (this_reg);
+ while (--j >= 0)
+ SET_HARD_REG_BIT (this_reg, regno + j);
+ for (ins = born_insn; ins < dead_insn; ins++)
+ {
+ IOR_HARD_REG_SET (after_insn_hard_regs[ins], this_reg);
+ }
+ return regno;
+ }
+#ifndef REG_ALLOC_ORDER
+ i += j; /* Skip starting points we know will lose */
+#endif
+ }
+ }
+
+ return -1;
+}
+
+/* Walk X, noting all assignments and references to registers
+ and recording what they imply about life spans.
+ INSN is the current insn, supplied so we can find its suid. */
+
+static void
+stupid_mark_refs (x, insn)
+ rtx x, insn;
+{
+ register RTX_CODE code;
+ register char *fmt;
+ register int regno, i;
+
+ if (x == 0)
+ return;
+
+ code = GET_CODE (x);
+
+ if (code == SET || code == CLOBBER)
+ {
+ if (SET_DEST (x) != 0 && GET_CODE (SET_DEST (x)) == REG)
+ {
+ /* Register is being assigned. */
+ regno = REGNO (SET_DEST (x));
+
+ /* For hard regs, update the where-live info. */
+ if (regno < FIRST_PSEUDO_REGISTER)
+ {
+ register int j
+ = HARD_REGNO_NREGS (regno, GET_MODE (SET_DEST (x)));
+
+ while (--j >= 0)
+ {
+ regs_ever_live[regno+j] = 1;
+ regs_live[regno+j] = 0;
+
+ /* The following line is for unused outputs;
+ they do get stored even though never used again. */
+ MARK_LIVE_AFTER (insn, regno);
+
+ /* When a hard reg is clobbered, mark it in use
+ just before this insn, so it is live all through. */
+ if (code == CLOBBER && INSN_SUID (insn) > 0)
+ SET_HARD_REG_BIT (after_insn_hard_regs[INSN_SUID (insn) - 1],
+ regno);
+ }
+ }
+ /* For pseudo regs, record where born, where dead, number of
+ times used, and whether live across a call. */
+ else
+ {
+ /* Update the life-interval bounds of this pseudo reg. */
+
+ /* When a pseudo-reg is CLOBBERed, it is born just before
+ the clobbering insn. When setting, just after. */
+ int where_born = INSN_SUID (insn) - (code == CLOBBER);
+
+ reg_where_born[regno] = where_born;
+
+ /* The reg must live at least one insn even
+ in it is never again used--because it has to go
+ in SOME hard reg. Mark it as dying after the current
+ insn so that it will conflict with any other outputs of
+ this insn. */
+ if (reg_where_dead[regno] < where_born + 2)
+ {
+ reg_where_dead[regno] = where_born + 2;
+ regs_live[regno] = 1;
+ }
+
+ /* Count the refs of this reg. */
+ reg_n_refs[regno]++;
+
+ if (last_call_suid < reg_where_dead[regno])
+ reg_n_calls_crossed[regno] += 1;
+ }
+ }
+
+ /* Record references from the value being set,
+ or from addresses in the place being set if that's not a reg.
+ If setting a SUBREG, we treat the entire reg as *used*. */
+ if (code == SET)
+ {
+ stupid_mark_refs (SET_SRC (x), insn);
+ if (GET_CODE (SET_DEST (x)) != REG)
+ stupid_mark_refs (SET_DEST (x), insn);
+ }
+ return;
+ }
+
+ /* Register value being used, not set. */
+
+ if (code == REG)
+ {
+ regno = REGNO (x);
+ if (regno < FIRST_PSEUDO_REGISTER)
+ {
+ /* Hard reg: mark it live for continuing scan of previous insns. */
+ register int j = HARD_REGNO_NREGS (regno, GET_MODE (x));
+ while (--j >= 0)
+ {
+ regs_ever_live[regno+j] = 1;
+ regs_live[regno+j] = 1;
+ }
+ }
+ else
+ {
+ /* Pseudo reg: record first use, last use and number of uses. */
+
+ reg_where_born[regno] = INSN_SUID (insn);
+ reg_n_refs[regno]++;
+ if (regs_live[regno] == 0)
+ {
+ regs_live[regno] = 1;
+ reg_where_dead[regno] = INSN_SUID (insn);
+ }
+ }
+ return;
+ }
+
+ /* Recursive scan of all other rtx's. */
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ stupid_mark_refs (XEXP (x, i), insn);
+ if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ stupid_mark_refs (XVECEXP (x, i, j), insn);
+ }
+ }
+}
diff --git a/gnu/usr.bin/cc/cc_int/toplev.c b/gnu/usr.bin/cc/cc_int/toplev.c
new file mode 100644
index 0000000..d4b1043
--- /dev/null
+++ b/gnu/usr.bin/cc/cc_int/toplev.c
@@ -0,0 +1,4061 @@
+/* Top level of GNU C compiler
+ Copyright (C) 1987, 88, 89, 92, 93, 1994 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+/* This is the top level of cc1/c++.
+ It parses command args, opens files, invokes the various passes
+ in the proper order, and counts the time used by each.
+ Error messages and low-level interface to malloc also handled here. */
+
+#include "config.h"
+#ifdef __STDC__
+#include <stdarg.h>
+#else
+#include <varargs.h>
+#endif
+#include <stdio.h>
+#include <signal.h>
+#include <setjmp.h>
+#include <sys/types.h>
+#include <ctype.h>
+#include <sys/stat.h>
+
+#ifdef USG
+#undef FLOAT
+#include <sys/param.h>
+/* This is for hpux. It is a real screw. They should change hpux. */
+#undef FLOAT
+#include <sys/times.h>
+#include <time.h> /* Correct for hpux at least. Is it good on other USG? */
+#undef FFS /* Some systems define this in param.h. */
+#else
+#ifndef VMS
+#include <sys/time.h>
+#include <sys/resource.h>
+#endif
+#endif
+
+#include "input.h"
+#include "tree.h"
+/* #include "c-tree.h" */
+#include "rtl.h"
+#include "flags.h"
+#include "insn-attr.h"
+#include "defaults.h"
+
+#ifdef XCOFF_DEBUGGING_INFO
+#include "xcoffout.h"
+#endif
+
+#include "bytecode.h"
+#include "bc-emit.h"
+
+#ifdef VMS
+/* The extra parameters substantially improve the I/O performance. */
+static FILE *
+VMS_fopen (fname, type)
+ char * fname;
+ char * type;
+{
+ if (strcmp (type, "w") == 0)
+ return fopen (fname, type, "mbc=16", "deq=64", "fop=tef", "shr=nil");
+ return fopen (fname, type, "mbc=16");
+}
+#define fopen VMS_fopen
+#endif
+
+#ifndef DEFAULT_GDB_EXTENSIONS
+#define DEFAULT_GDB_EXTENSIONS 1
+#endif
+
+extern int rtx_equal_function_value_matters;
+
+#if ! (defined (VMS) || defined (OS2))
+extern char **environ;
+#endif
+extern char *version_string, *language_string;
+
+/* Carry information from ASM_DECLARE_OBJECT_NAME
+ to ASM_FINISH_DECLARE_OBJECT. */
+
+extern int size_directive_output;
+extern tree last_assemble_variable_decl;
+
+extern void init_lex ();
+extern void init_decl_processing ();
+extern void init_obstacks ();
+extern void init_tree_codes ();
+extern void init_rtl ();
+extern void init_regs ();
+extern void init_optabs ();
+extern void init_stmt ();
+extern void init_reg_sets ();
+extern void dump_flow_info ();
+extern void dump_sched_info ();
+extern void dump_local_alloc ();
+
+void rest_of_decl_compilation ();
+void error_with_file_and_line PVPROTO((char *file, int line, char *s, ...));
+void error_with_decl PVPROTO((tree decl, char *s, ...));
+void error_for_asm PVPROTO((rtx insn, char *s, ...));
+void error PVPROTO((char *s, ...));
+void fatal PVPROTO((char *s, ...));
+void warning_with_file_and_line PVPROTO((char *file, int line, char *s, ...));
+void warning_with_decl PVPROTO((tree decl, char *s, ...));
+void warning_for_asm PVPROTO((rtx insn, char *s, ...));
+void warning PVPROTO((char *s, ...));
+void pedwarn PVPROTO((char *s, ...));
+void pedwarn_with_decl PVPROTO((tree decl, char *s, ...));
+void pedwarn_with_file_and_line PVPROTO((char *file, int line, char *s, ...));
+void sorry PVPROTO((char *s, ...));
+void really_sorry PVPROTO((char *s, ...));
+void fancy_abort ();
+#ifndef abort
+void abort ();
+#endif
+void set_target_switch ();
+static void print_switch_values ();
+static char *decl_name ();
+
+/* Name of program invoked, sans directories. */
+
+char *progname;
+
+/* Copy of arguments to main. */
+int save_argc;
+char **save_argv;
+
+/* Name of current original source file (what was input to cpp).
+ This comes from each #-command in the actual input. */
+
+char *input_filename;
+
+/* Name of top-level original source file (what was input to cpp).
+ This comes from the #-command at the beginning of the actual input.
+ If there isn't any there, then this is the cc1 input file name. */
+
+char *main_input_filename;
+
+/* Stream for reading from the input file. */
+
+FILE *finput;
+
+/* Current line number in real source file. */
+
+int lineno;
+
+/* Stack of currently pending input files. */
+
+struct file_stack *input_file_stack;
+
+/* Incremented on each change to input_file_stack. */
+int input_file_stack_tick;
+
+/* FUNCTION_DECL for function now being parsed or compiled. */
+
+extern tree current_function_decl;
+
+/* Name to use as base of names for dump output files. */
+
+char *dump_base_name;
+
+/* Bit flags that specify the machine subtype we are compiling for.
+ Bits are tested using macros TARGET_... defined in the tm.h file
+ and set by `-m...' switches. Must be defined in rtlanal.c. */
+
+extern int target_flags;
+
+/* Flags saying which kinds of debugging dump have been requested. */
+
+int rtl_dump = 0;
+int rtl_dump_and_exit = 0;
+int jump_opt_dump = 0;
+int cse_dump = 0;
+int loop_dump = 0;
+int cse2_dump = 0;
+int flow_dump = 0;
+int combine_dump = 0;
+int sched_dump = 0;
+int local_reg_dump = 0;
+int global_reg_dump = 0;
+int sched2_dump = 0;
+int jump2_opt_dump = 0;
+int dbr_sched_dump = 0;
+int flag_print_asm_name = 0;
+int stack_reg_dump = 0;
+
+/* Name for output file of assembly code, specified with -o. */
+
+char *asm_file_name;
+
+/* Value of the -G xx switch, and whether it was passed or not. */
+int g_switch_value;
+int g_switch_set;
+
+/* Type(s) of debugging information we are producing (if any).
+ See flags.h for the definitions of the different possible
+ types of debugging information. */
+enum debug_info_type write_symbols = NO_DEBUG;
+
+/* Level of debugging information we are producing. See flags.h
+ for the definitions of the different possible levels. */
+enum debug_info_level debug_info_level = DINFO_LEVEL_NONE;
+
+/* Nonzero means use GNU-only extensions in the generated symbolic
+ debugging information. */
+/* Currently, this only has an effect when write_symbols is set to
+ DBX_DEBUG, XCOFF_DEBUG, or DWARF_DEBUG. */
+int use_gnu_debug_info_extensions = 0;
+
+/* Nonzero means do optimizations. -O.
+ Particular numeric values stand for particular amounts of optimization;
+ thus, -O2 stores 2 here. However, the optimizations beyond the basic
+ ones are not controlled directly by this variable. Instead, they are
+ controlled by individual `flag_...' variables that are defaulted
+ based on this variable. */
+
+int optimize = 0;
+
+/* Number of error messages and warning messages so far. */
+
+int errorcount = 0;
+int warningcount = 0;
+int sorrycount = 0;
+
+/* Flag to output bytecode instead of native assembler */
+int output_bytecode = 0;
+
+/* Pointer to function to compute the name to use to print a declaration. */
+
+char *(*decl_printable_name) ();
+
+/* Pointer to function to compute rtl for a language-specific tree code. */
+
+struct rtx_def *(*lang_expand_expr) ();
+
+/* Pointer to function to finish handling an incomplete decl at the
+ end of compilation. */
+
+void (*incomplete_decl_finalize_hook) () = 0;
+
+/* Pointer to function for interim exception handling implementation.
+ This interface will change, and it is only here until a better interface
+ replaces it. */
+
+void (*interim_eh_hook) PROTO((tree));
+
+/* Nonzero if generating code to do profiling. */
+
+int profile_flag = 0;
+
+/* Nonzero if generating code to do profiling on a line-by-line basis. */
+
+int profile_block_flag;
+
+/* Nonzero for -pedantic switch: warn about anything
+ that standard spec forbids. */
+
+int pedantic = 0;
+
+/* Temporarily suppress certain warnings.
+ This is set while reading code from a system header file. */
+
+int in_system_header = 0;
+
+/* Nonzero means do stupid register allocation.
+ Currently, this is 1 if `optimize' is 0. */
+
+int obey_regdecls = 0;
+
+/* Don't print functions as they are compiled and don't print
+ times taken by the various passes. -quiet. */
+
+int quiet_flag = 0;
+
+/* -f flags. */
+
+/* Nonzero means `char' should be signed. */
+
+int flag_signed_char;
+
+/* Nonzero means give an enum type only as many bytes as it needs. */
+
+int flag_short_enums;
+
+/* Nonzero for -fcaller-saves: allocate values in regs that need to
+ be saved across function calls, if that produces overall better code.
+ Optional now, so people can test it. */
+
+#ifdef DEFAULT_CALLER_SAVES
+int flag_caller_saves = 1;
+#else
+int flag_caller_saves = 0;
+#endif
+
+/* Nonzero if structures and unions should be returned in memory.
+
+ This should only be defined if compatibility with another compiler or
+ with an ABI is needed, because it results in slower code. */
+
+#ifndef DEFAULT_PCC_STRUCT_RETURN
+#define DEFAULT_PCC_STRUCT_RETURN 1
+#endif
+
+/* Nonzero for -fpcc-struct-return: return values the same way PCC does. */
+
+int flag_pcc_struct_return = DEFAULT_PCC_STRUCT_RETURN;
+
+/* Nonzero for -fforce-mem: load memory value into a register
+ before arithmetic on it. This makes better cse but slower compilation. */
+
+int flag_force_mem = 0;
+
+/* Nonzero for -fforce-addr: load memory address into a register before
+ reference to memory. This makes better cse but slower compilation. */
+
+int flag_force_addr = 0;
+
+/* Nonzero for -fdefer-pop: don't pop args after each function call;
+ instead save them up to pop many calls' args with one insns. */
+
+int flag_defer_pop = 0;
+
+/* Nonzero for -ffloat-store: don't allocate floats and doubles
+ in extended-precision registers. */
+
+int flag_float_store = 0;
+
+/* Nonzero for -fcse-follow-jumps:
+ have cse follow jumps to do a more extensive job. */
+
+int flag_cse_follow_jumps;
+
+/* Nonzero for -fcse-skip-blocks:
+ have cse follow a branch around a block. */
+int flag_cse_skip_blocks;
+
+/* Nonzero for -fexpensive-optimizations:
+ perform miscellaneous relatively-expensive optimizations. */
+int flag_expensive_optimizations;
+
+/* Nonzero for -fthread-jumps:
+ have jump optimize output of loop. */
+
+int flag_thread_jumps;
+
+/* Nonzero enables strength-reduction in loop.c. */
+
+int flag_strength_reduce = 0;
+
+/* Nonzero enables loop unrolling in unroll.c. Only loops for which the
+ number of iterations can be calculated at compile-time (UNROLL_COMPLETELY,
+ UNROLL_MODULO) or at run-time (preconditioned to be UNROLL_MODULO) are
+ unrolled. */
+
+int flag_unroll_loops;
+
+/* Nonzero enables loop unrolling in unroll.c. All loops are unrolled.
+ This is generally not a win. */
+
+int flag_unroll_all_loops;
+
+/* Nonzero for -fwritable-strings:
+ store string constants in data segment and don't uniquize them. */
+
+int flag_writable_strings = 0;
+
+/* Nonzero means don't put addresses of constant functions in registers.
+ Used for compiling the Unix kernel, where strange substitutions are
+ done on the assembly output. */
+
+int flag_no_function_cse = 0;
+
+/* Nonzero for -fomit-frame-pointer:
+ don't make a frame pointer in simple functions that don't require one. */
+
+int flag_omit_frame_pointer = 0;
+
+/* Nonzero to inhibit use of define_optimization peephole opts. */
+
+int flag_no_peephole = 0;
+
+/* Nonzero allows GCC to violate some IEEE or ANSI rules regarding math
+ operations in the interest of optimization. For example it allows
+ GCC to assume arguments to sqrt are nonnegative numbers, allowing
+ faster code for sqrt to be generated. */
+
+int flag_fast_math = 0;
+
+/* Nonzero means all references through pointers are volatile. */
+
+int flag_volatile;
+
+/* Nonzero means treat all global and extern variables as global. */
+
+int flag_volatile_global;
+
+/* Nonzero means just do syntax checking; don't output anything. */
+
+int flag_syntax_only = 0;
+
+/* Nonzero means to rerun cse after loop optimization. This increases
+ compilation time about 20% and picks up a few more common expressions. */
+
+static int flag_rerun_cse_after_loop;
+
+/* Nonzero for -finline-functions: ok to inline functions that look like
+ good inline candidates. */
+
+int flag_inline_functions;
+
+/* Nonzero for -fkeep-inline-functions: even if we make a function
+ go inline everywhere, keep its definition around for debugging
+ purposes. */
+
+int flag_keep_inline_functions;
+
+/* Nonzero means that functions declared `inline' will be treated
+ as `static'. Prevents generation of zillions of copies of unused
+ static inline functions; instead, `inlines' are written out
+ only when actually used. Used in conjunction with -g. Also
+ does the right thing with #pragma interface. */
+
+int flag_no_inline;
+
+/* Nonzero means we should be saving declaration info into a .X file. */
+
+int flag_gen_aux_info = 0;
+
+/* Specified name of aux-info file. */
+
+static char *aux_info_file_name;
+
+/* Nonzero means make the text shared if supported. */
+
+int flag_shared_data;
+
+/* Nonzero means schedule into delayed branch slots if supported. */
+
+int flag_delayed_branch;
+
+/* Nonzero means to run cleanups after CALL_EXPRs. */
+
+int flag_short_temps;
+
+/* Nonzero if we are compiling pure (sharable) code.
+ Value is 1 if we are doing reasonable (i.e. simple
+ offset into offset table) pic. Value is 2 if we can
+ only perform register offsets. */
+
+int flag_pic;
+
+/* Nonzero means place uninitialized global data in the bss section. */
+
+int flag_no_common;
+
+/* Nonzero means pretend it is OK to examine bits of target floats,
+ even if that isn't true. The resulting code will have incorrect constants,
+ but the same series of instructions that the native compiler would make. */
+
+int flag_pretend_float;
+
+/* Nonzero means change certain warnings into errors.
+ Usually these are warnings about failure to conform to some standard. */
+
+int flag_pedantic_errors = 0;
+
+/* flag_schedule_insns means schedule insns within basic blocks (before
+ local_alloc).
+ flag_schedule_insns_after_reload means schedule insns after
+ global_alloc. */
+
+int flag_schedule_insns = 0;
+int flag_schedule_insns_after_reload = 0;
+
+/* -finhibit-size-directive inhibits output of .size for ELF.
+ This is used only for compiling crtstuff.c,
+ and it may be extended to other effects
+ needed for crtstuff.c on other systems. */
+int flag_inhibit_size_directive = 0;
+
+/* -fverbose-asm causes extra commentary information to be produced in
+ the generated assembly code (to make it more readable). This option
+ is generally only of use to those who actually need to read the
+ generated assembly code (perhaps while debugging the compiler itself). */
+
+int flag_verbose_asm = 0;
+
+/* -fgnu-linker specifies use of the GNU linker for initializations.
+ (Or, more generally, a linker that handles initializations.)
+ -fno-gnu-linker says that collect2 will be used. */
+#ifdef USE_COLLECT2
+int flag_gnu_linker = 0;
+#else
+int flag_gnu_linker = 1;
+#endif
+
+/* Table of language-independent -f options.
+ STRING is the option name. VARIABLE is the address of the variable.
+ ON_VALUE is the value to store in VARIABLE
+ if `-fSTRING' is seen as an option.
+ (If `-fno-STRING' is seen as an option, the opposite value is stored.) */
+
+struct { char *string; int *variable; int on_value;} f_options[] =
+{
+ {"float-store", &flag_float_store, 1},
+ {"volatile", &flag_volatile, 1},
+ {"volatile-global", &flag_volatile_global, 1},
+ {"defer-pop", &flag_defer_pop, 1},
+ {"omit-frame-pointer", &flag_omit_frame_pointer, 1},
+ {"cse-follow-jumps", &flag_cse_follow_jumps, 1},
+ {"cse-skip-blocks", &flag_cse_skip_blocks, 1},
+ {"expensive-optimizations", &flag_expensive_optimizations, 1},
+ {"thread-jumps", &flag_thread_jumps, 1},
+ {"strength-reduce", &flag_strength_reduce, 1},
+ {"unroll-loops", &flag_unroll_loops, 1},
+ {"unroll-all-loops", &flag_unroll_all_loops, 1},
+ {"writable-strings", &flag_writable_strings, 1},
+ {"peephole", &flag_no_peephole, 0},
+ {"force-mem", &flag_force_mem, 1},
+ {"force-addr", &flag_force_addr, 1},
+ {"function-cse", &flag_no_function_cse, 0},
+ {"inline-functions", &flag_inline_functions, 1},
+ {"keep-inline-functions", &flag_keep_inline_functions, 1},
+ {"inline", &flag_no_inline, 0},
+ {"syntax-only", &flag_syntax_only, 1},
+ {"shared-data", &flag_shared_data, 1},
+ {"caller-saves", &flag_caller_saves, 1},
+ {"pcc-struct-return", &flag_pcc_struct_return, 1},
+ {"reg-struct-return", &flag_pcc_struct_return, 0},
+ {"delayed-branch", &flag_delayed_branch, 1},
+ {"rerun-cse-after-loop", &flag_rerun_cse_after_loop, 1},
+ {"pretend-float", &flag_pretend_float, 1},
+ {"schedule-insns", &flag_schedule_insns, 1},
+ {"schedule-insns2", &flag_schedule_insns_after_reload, 1},
+ {"pic", &flag_pic, 1},
+ {"PIC", &flag_pic, 2},
+ {"fast-math", &flag_fast_math, 1},
+ {"common", &flag_no_common, 0},
+ {"inhibit-size-directive", &flag_inhibit_size_directive, 1},
+ {"verbose-asm", &flag_verbose_asm, 1},
+ {"gnu-linker", &flag_gnu_linker, 1},
+ {"bytecode", &output_bytecode, 1}
+};
+
+/* Table of language-specific options. */
+
+char *lang_options[] =
+{
+ "-ansi",
+ "-fallow-single-precision",
+
+ "-fsigned-bitfields",
+ "-funsigned-bitfields",
+ "-fno-signed-bitfields",
+ "-fno-unsigned-bitfields",
+ "-fsigned-char",
+ "-funsigned-char",
+ "-fno-signed-char",
+ "-fno-unsigned-char",
+
+ "-ftraditional",
+ "-traditional",
+ "-fnotraditional",
+ "-fno-traditional",
+
+ "-fasm",
+ "-fno-asm",
+ "-fbuiltin",
+ "-fno-builtin",
+ "-fcond-mismatch",
+ "-fno-cond-mismatch",
+ "-fdollars-in-identifiers",
+ "-fno-dollars-in-identifiers",
+ "-fident",
+ "-fno-ident",
+ "-fshort-double",
+ "-fno-short-double",
+ "-fshort-enums",
+ "-fno-short-enums",
+
+ "-Wall",
+ "-Wbad-function-cast",
+ "-Wno-bad-function-cast",
+ "-Wcast-qual",
+ "-Wno-cast-qual",
+ "-Wchar-subscripts",
+ "-Wno-char-subscripts",
+ "-Wcomment",
+ "-Wno-comment",
+ "-Wcomments",
+ "-Wno-comments",
+ "-Wconversion",
+ "-Wno-conversion",
+ "-Wformat",
+ "-Wno-format",
+ "-Wimport",
+ "-Wno-import",
+ "-Wimplicit",
+ "-Wno-implicit",
+ "-Wmissing-braces",
+ "-Wno-missing-braces",
+ "-Wmissing-declarations",
+ "-Wno-missing-declarations",
+ "-Wmissing-prototypes",
+ "-Wno-missing-prototypes",
+ "-Wnested-externs",
+ "-Wno-nested-externs",
+ "-Wparentheses",
+ "-Wno-parentheses",
+ "-Wpointer-arith",
+ "-Wno-pointer-arith",
+ "-Wredundant-decls",
+ "-Wno-redundant-decls",
+ "-Wstrict-prototypes",
+ "-Wno-strict-prototypes",
+ "-Wtraditional",
+ "-Wno-traditional",
+ "-Wtrigraphs",
+ "-Wno-trigraphs",
+ "-Wwrite-strings",
+ "-Wno-write-strings",
+
+ /* These are for C++. */
+ "-+e0", /* gcc.c tacks the `-' on the front. */
+ "-+e1",
+ "-+e2",
+ "-fall-virtual",
+ "-fno-all-virtual",
+ "-falt-external-templates",
+ "-fno-alt-external-templates",
+ "-fansi-overloading",
+ "-fno-ansi-overloading",
+ "-fcadillac",
+ "-fno-cadillac",
+ "-fconserve-space",
+ "-fno-conserve-space",
+ "-fdefault-inline",
+ "-fno-default-inline",
+ "-fdossier",
+ "-fno-dossier",
+ "-felide-constructors",
+ "-fno-elide-constructors",
+ "-fenum-int-equiv",
+ "-fno-enum-int-equiv",
+ "-fexternal-templates",
+ "-fno-external-templates",
+ "-fgc",
+ "-fno-gc",
+ "-fhandle-exceptions",
+ "-fno-handle-exceptions",
+ "-fhandle-signatures",
+ "-fno-handle-signatures",
+ "-fhuge-objects",
+ "-fno-huge-objects",
+ "-fimplement-inlines",
+ "-fno-implement-inlines",
+ "-fimplicit-templates",
+ "-fno-implicit-templates",
+ "-flabels-ok",
+ "-fno-labels-ok",
+ "-fmemoize-lookups",
+ "-fno-memoize-lookups",
+ "-fnonnull-objects",
+ "-fno-nonnull-objects",
+ "-fsave-memoized",
+ "-fno-save-memoized",
+ "-fshort-temps",
+ "-fno-short-temps",
+ "-fstats",
+ "-fno-stats",
+ "-fstrict-prototype",
+ "-fno-strict-prototype",
+ "-fthis-is-variable",
+ "-fno-this-is-variable",
+ "-fvtable-thunks",
+ "-fno-vtable-thunks",
+ "-fxref",
+ "-fno-xref",
+
+ "-Wreturn-type",
+ "-Wno-return-type",
+ "-Woverloaded-virtual",
+ "-Wno-overloaded-virtual",
+ "-Wenum-clash",
+ "-Wno-enum-clash",
+ "-Wtemplate-debugging",
+ "-Wno-template-debugging",
+ "-Wctor-dtor-privacy",
+ "-Wno-ctor-dtor-privacy",
+ "-Wnon-virtual-dtor",
+ "-Wno-non-virtual-dtor",
+ "-Wextern-inline",
+ "-Wno-extern-inline",
+
+ /* these are for obj c */
+ "-lang-objc",
+ "-gen-decls",
+ "-fgnu-runtime",
+ "-fno-gnu-runtime",
+ "-fnext-runtime",
+ "-fno-next-runtime",
+ "-Wselector",
+ "-Wno-selector",
+ "-Wprotocol",
+ "-Wno-protocol",
+
+ /* This is for GNAT and is temporary. */
+ "-gnat",
+ 0
+};
+
+/* Options controlling warnings */
+
+/* Don't print warning messages. -w. */
+
+int inhibit_warnings = 0;
+
+/* Print various extra warnings. -W. */
+
+int extra_warnings = 0;
+
+/* Treat warnings as errors. -Werror. */
+
+int warnings_are_errors = 0;
+
+/* Nonzero to warn about unused local variables. */
+
+int warn_unused;
+
+/* Nonzero to warn about variables used before they are initialized. */
+
+int warn_uninitialized;
+
+/* Nonzero means warn about all declarations which shadow others. */
+
+int warn_shadow;
+
+/* Warn if a switch on an enum fails to have a case for every enum value. */
+
+int warn_switch;
+
+/* Nonzero means warn about function definitions that default the return type
+ or that use a null return and have a return-type other than void. */
+
+int warn_return_type;
+
+/* Nonzero means warn about pointer casts that increase the required
+ alignment of the target type (and might therefore lead to a crash
+ due to a misaligned access). */
+
+int warn_cast_align;
+
+/* Nonzero means warn about any identifiers that match in the first N
+ characters. The value N is in `id_clash_len'. */
+
+int warn_id_clash;
+unsigned id_clash_len;
+
+/* Nonzero means warn about any objects definitions whose size is larger
+ than N bytes. Also want about function definitions whose returned
+ values are larger than N bytes. The value N is in `larger_than_size'. */
+
+int warn_larger_than;
+unsigned larger_than_size;
+
+/* Nonzero means warn if inline function is too large. */
+
+int warn_inline;
+
+/* Warn if a function returns an aggregate,
+ since there are often incompatible calling conventions for doing this. */
+
+int warn_aggregate_return;
+
+/* Likewise for -W. */
+
+struct { char *string; int *variable; int on_value;} W_options[] =
+{
+ {"unused", &warn_unused, 1},
+ {"error", &warnings_are_errors, 1},
+ {"shadow", &warn_shadow, 1},
+ {"switch", &warn_switch, 1},
+ {"aggregate-return", &warn_aggregate_return, 1},
+ {"cast-align", &warn_cast_align, 1},
+ {"uninitialized", &warn_uninitialized, 1},
+ {"inline", &warn_inline, 1}
+};
+
+/* Output files for assembler code (real compiler output)
+ and debugging dumps. */
+
+FILE *asm_out_file;
+FILE *aux_info_file;
+FILE *rtl_dump_file;
+FILE *jump_opt_dump_file;
+FILE *cse_dump_file;
+FILE *loop_dump_file;
+FILE *cse2_dump_file;
+FILE *flow_dump_file;
+FILE *combine_dump_file;
+FILE *sched_dump_file;
+FILE *local_reg_dump_file;
+FILE *global_reg_dump_file;
+FILE *sched2_dump_file;
+FILE *jump2_opt_dump_file;
+FILE *dbr_sched_dump_file;
+FILE *stack_reg_dump_file;
+
+/* Time accumulators, to count the total time spent in various passes. */
+
+int parse_time;
+int varconst_time;
+int integration_time;
+int jump_time;
+int cse_time;
+int loop_time;
+int cse2_time;
+int flow_time;
+int combine_time;
+int sched_time;
+int local_alloc_time;
+int global_alloc_time;
+int sched2_time;
+int dbr_sched_time;
+int shorten_branch_time;
+int stack_reg_time;
+int final_time;
+int symout_time;
+int dump_time;
+
+/* Return time used so far, in microseconds. */
+
+int
+get_run_time ()
+{
+#ifdef USG
+ struct tms tms;
+#else
+#ifndef VMS
+ struct rusage rusage;
+#else /* VMS */
+ struct
+ {
+ int proc_user_time;
+ int proc_system_time;
+ int child_user_time;
+ int child_system_time;
+ } vms_times;
+#endif
+#endif
+
+ if (quiet_flag)
+ return 0;
+
+#ifdef USG
+ times (&tms);
+ return (tms.tms_utime + tms.tms_stime) * (1000000 / HZ);
+#else
+#ifndef VMS
+ getrusage (0, &rusage);
+ return (rusage.ru_utime.tv_sec * 1000000 + rusage.ru_utime.tv_usec
+ + rusage.ru_stime.tv_sec * 1000000 + rusage.ru_stime.tv_usec);
+#else /* VMS */
+ times (&vms_times);
+ return (vms_times.proc_user_time + vms_times.proc_system_time) * 10000;
+#endif
+#endif
+}
+
+#define TIMEVAR(VAR, BODY) \
+do { int otime = get_run_time (); BODY; VAR += get_run_time () - otime; } while (0)
+
+void
+print_time (str, total)
+ char *str;
+ int total;
+{
+ fprintf (stderr,
+ "time in %s: %d.%06d\n",
+ str, total / 1000000, total % 1000000);
+}
+
+/* Count an error or warning. Return 1 if the message should be printed. */
+
+int
+count_error (warningp)
+ int warningp;
+{
+ if (warningp && inhibit_warnings)
+ return 0;
+
+ if (warningp && !warnings_are_errors)
+ warningcount++;
+ else
+ {
+ static int warning_message = 0;
+
+ if (warningp && !warning_message)
+ {
+ fprintf (stderr, "%s: warnings being treated as errors\n", progname);
+ warning_message = 1;
+ }
+ errorcount++;
+ }
+
+ return 1;
+}
+
+/* Print a fatal error message. NAME is the text.
+ Also include a system error message based on `errno'. */
+
+void
+pfatal_with_name (name)
+ char *name;
+{
+ fprintf (stderr, "%s: ", progname);
+ perror (name);
+ exit (35);
+}
+
+void
+fatal_io_error (name)
+ char *name;
+{
+ fprintf (stderr, "%s: %s: I/O error\n", progname, name);
+ exit (35);
+}
+
+/* Called to give a better error message when we don't have an insn to match
+ what we are looking for or if the insn's constraints aren't satisfied,
+ rather than just calling abort(). */
+
+void
+fatal_insn_not_found (insn)
+ rtx insn;
+{
+ if (!output_bytecode)
+ {
+ if (INSN_CODE (insn) < 0)
+ error ("internal error--unrecognizable insn:");
+ else
+ error ("internal error--insn does not satisfy its constraints:");
+ debug_rtx (insn);
+ }
+ if (asm_out_file)
+ fflush (asm_out_file);
+ if (aux_info_file)
+ fflush (aux_info_file);
+ if (rtl_dump_file)
+ fflush (rtl_dump_file);
+ if (jump_opt_dump_file)
+ fflush (jump_opt_dump_file);
+ if (cse_dump_file)
+ fflush (cse_dump_file);
+ if (loop_dump_file)
+ fflush (loop_dump_file);
+ if (cse2_dump_file)
+ fflush (cse2_dump_file);
+ if (flow_dump_file)
+ fflush (flow_dump_file);
+ if (combine_dump_file)
+ fflush (combine_dump_file);
+ if (sched_dump_file)
+ fflush (sched_dump_file);
+ if (local_reg_dump_file)
+ fflush (local_reg_dump_file);
+ if (global_reg_dump_file)
+ fflush (global_reg_dump_file);
+ if (sched2_dump_file)
+ fflush (sched2_dump_file);
+ if (jump2_opt_dump_file)
+ fflush (jump2_opt_dump_file);
+ if (dbr_sched_dump_file)
+ fflush (dbr_sched_dump_file);
+ if (stack_reg_dump_file)
+ fflush (stack_reg_dump_file);
+ abort ();
+}
+
+/* This is the default decl_printable_name function. */
+
+static char *
+decl_name (decl, kind)
+ tree decl;
+ char **kind;
+{
+ return IDENTIFIER_POINTER (DECL_NAME (decl));
+}
+
+/* This is the default interim_eh_hook function. */
+
+void
+interim_eh (finalization)
+ tree finalization;
+{
+ /* Don't do anything by default. */
+}
+
+static int need_error_newline;
+
+/* Function of last error message;
+ more generally, function such that if next error message is in it
+ then we don't have to mention the function name. */
+static tree last_error_function = NULL;
+
+/* Used to detect when input_file_stack has changed since last described. */
+static int last_error_tick;
+
+/* Called when the start of a function definition is parsed,
+ this function prints on stderr the name of the function. */
+
+void
+announce_function (decl)
+ tree decl;
+{
+ if (! quiet_flag)
+ {
+ char *junk;
+ if (rtl_dump_and_exit)
+ fprintf (stderr, "%s ", IDENTIFIER_POINTER (DECL_NAME (decl)));
+ else
+ fprintf (stderr, " %s", (*decl_printable_name) (decl, &junk));
+ fflush (stderr);
+ need_error_newline = 1;
+ last_error_function = current_function_decl;
+ }
+}
+
+/* Prints out, if necessary, the name of the current function
+ which caused an error. Called from all error and warning functions. */
+
+void
+report_error_function (file)
+ char *file;
+{
+ struct file_stack *p;
+
+ if (need_error_newline)
+ {
+ fprintf (stderr, "\n");
+ need_error_newline = 0;
+ }
+
+ if (last_error_function != current_function_decl)
+ {
+ char *kind = "function";
+ if (current_function_decl != 0
+ && TREE_CODE (TREE_TYPE (current_function_decl)) == METHOD_TYPE)
+ kind = "method";
+
+ if (file)
+ fprintf (stderr, "%s: ", file);
+
+ if (current_function_decl == NULL)
+ fprintf (stderr, "At top level:\n");
+ else
+ {
+ char *name = (*decl_printable_name) (current_function_decl, &kind);
+ fprintf (stderr, "In %s `%s':\n", kind, name);
+ }
+
+ last_error_function = current_function_decl;
+ }
+ if (input_file_stack && input_file_stack->next != 0
+ && input_file_stack_tick != last_error_tick)
+ {
+ fprintf (stderr, "In file included");
+ for (p = input_file_stack->next; p; p = p->next)
+ {
+ fprintf (stderr, " from %s:%d", p->name, p->line);
+ if (p->next)
+ fprintf (stderr, ",\n ");
+ }
+ fprintf (stderr, ":\n");
+ last_error_tick = input_file_stack_tick;
+ }
+}
+
+/* Print a message. */
+
+static void
+vmessage (prefix, s, ap)
+ char *prefix;
+ char *s;
+ va_list ap;
+{
+ if (prefix)
+ fprintf (stderr, "%s: ", prefix);
+
+#ifdef HAVE_VPRINTF
+ vfprintf (stderr, s, ap);
+#else
+ {
+ HOST_WIDE_INT v1 = va_arg(ap, HOST_WIDE_INT);
+ HOST_WIDE_INT v2 = va_arg(ap, HOST_WIDE_INT);
+ HOST_WIDE_INT v3 = va_arg(ap, HOST_WIDE_INT);
+ fprintf (stderr, s, v1, v2, v3);
+ }
+#endif
+}
+
+/* Print a message relevant to line LINE of file FILE. */
+
+static void
+v_message_with_file_and_line (file, line, prefix, s, ap)
+ char *file;
+ int line;
+ char *prefix;
+ char *s;
+ va_list ap;
+{
+ if (file)
+ fprintf (stderr, "%s:%d: ", file, line);
+ else
+ fprintf (stderr, "%s: ", progname);
+
+ vmessage (prefix, s, ap);
+ fputc ('\n', stderr);
+}
+
+/* Print a message relevant to the given DECL. */
+
+static void
+v_message_with_decl (decl, prefix, s, ap)
+ tree decl;
+ char *prefix;
+ char *s;
+ va_list ap;
+{
+ char *n, *p, *junk;
+
+ fprintf (stderr, "%s:%d: ",
+ DECL_SOURCE_FILE (decl), DECL_SOURCE_LINE (decl));
+
+ if (prefix)
+ fprintf (stderr, "%s: ", prefix);
+
+ /* Do magic to get around lack of varargs support for insertion
+ of arguments into existing list. We know that the decl is first;
+ we ass_u_me that it will be printed with "%s". */
+
+ for (p = s; *p; ++p)
+ {
+ if (*p == '%')
+ {
+ if (*(p + 1) == '%')
+ ++p;
+ else
+ break;
+ }
+ }
+
+ if (p > s) /* Print the left-hand substring. */
+ {
+ char fmt[sizeof "%.255s"];
+ long width = p - s;
+
+ if (width > 255L) width = 255L; /* arbitrary */
+ sprintf (fmt, "%%.%lds", width);
+ fprintf (stderr, fmt, s);
+ }
+
+ if (*p == '%') /* Print the name. */
+ {
+ char *n = (DECL_NAME (decl)
+ ? (*decl_printable_name) (decl, &junk)
+ : "((anonymous))");
+ fputs (n, stderr);
+ while (*p)
+ {
+ ++p;
+ if (isalpha (*(p - 1) & 0xFF))
+ break;
+ }
+ }
+
+ if (*p) /* Print the rest of the message. */
+ vmessage ((char *)NULL, p, ap);
+
+ fputc ('\n', stderr);
+}
+
+/* Figure file and line of the given INSN. */
+
+static void
+file_and_line_for_asm (insn, pfile, pline)
+ rtx insn;
+ char **pfile;
+ int *pline;
+{
+ rtx body = PATTERN (insn);
+ rtx asmop;
+
+ /* Find the (or one of the) ASM_OPERANDS in the insn. */
+ if (GET_CODE (body) == SET && GET_CODE (SET_SRC (body)) == ASM_OPERANDS)
+ asmop = SET_SRC (body);
+ else if (GET_CODE (body) == ASM_OPERANDS)
+ asmop = body;
+ else if (GET_CODE (body) == PARALLEL
+ && GET_CODE (XVECEXP (body, 0, 0)) == SET)
+ asmop = SET_SRC (XVECEXP (body, 0, 0));
+ else if (GET_CODE (body) == PARALLEL
+ && GET_CODE (XVECEXP (body, 0, 0)) == ASM_OPERANDS)
+ asmop = XVECEXP (body, 0, 0);
+ else
+ asmop = NULL;
+
+ if (asmop)
+ {
+ *pfile = ASM_OPERANDS_SOURCE_FILE (asmop);
+ *pline = ASM_OPERANDS_SOURCE_LINE (asmop);
+ }
+ else
+ {
+ *pfile = input_filename;
+ *pline = lineno;
+ }
+}
+
+/* Report an error at line LINE of file FILE. */
+
+static void
+v_error_with_file_and_line (file, line, s, ap)
+ char *file;
+ int line;
+ char *s;
+ va_list ap;
+{
+ count_error (0);
+ report_error_function (file);
+ v_message_with_file_and_line (file, line, (char *)NULL, s, ap);
+}
+
+void
+error_with_file_and_line VPROTO((char *file, int line, char *s, ...))
+{
+#ifndef __STDC__
+ char *file;
+ int line;
+ char *s;
+#endif
+ va_list ap;
+
+ VA_START (ap, s);
+
+#ifndef __STDC__
+ file = va_arg (ap, char *);
+ line = va_arg (ap, int);
+ s = va_arg (ap, char *);
+#endif
+
+ v_error_with_file_and_line (file, line, s, ap);
+ va_end (ap);
+}
+
+/* Report an error at the declaration DECL.
+ S is a format string which uses %s to substitute the declaration
+ name; subsequent substitutions are a la printf. */
+
+static void
+v_error_with_decl (decl, s, ap)
+ tree decl;
+ char *s;
+ va_list ap;
+{
+ count_error (0);
+ report_error_function (DECL_SOURCE_FILE (decl));
+ v_message_with_decl (decl, (char *)NULL, s, ap);
+}
+
+void
+error_with_decl VPROTO((tree decl, char *s, ...))
+{
+#ifndef __STDC__
+ tree decl;
+ char *s;
+#endif
+ va_list ap;
+
+ VA_START (ap, s);
+
+#ifndef __STDC__
+ decl = va_arg (ap, tree);
+ s = va_arg (ap, char *);
+#endif
+
+ v_error_with_decl (decl, s, ap);
+ va_end (ap);
+}
+
+/* Report an error at the line number of the insn INSN.
+ This is used only when INSN is an `asm' with operands,
+ and each ASM_OPERANDS records its own source file and line. */
+
+static void
+v_error_for_asm (insn, s, ap)
+ rtx insn;
+ char *s;
+ va_list ap;
+{
+ char *file;
+ int line;
+
+ count_error (0);
+ file_and_line_for_asm (insn, &file, &line);
+ report_error_function (file);
+ v_message_with_file_and_line (file, line, (char *)NULL, s, ap);
+}
+
+void
+error_for_asm VPROTO((rtx insn, char *s, ...))
+{
+#ifndef __STDC__
+ rtx insn;
+ char *s;
+#endif
+ va_list ap;
+
+ VA_START (ap, s);
+
+#ifndef __STDC__
+ insn = va_arg (ap, rtx);
+ s = va_arg (ap, char *);
+#endif
+
+ v_error_for_asm (insn, s, ap);
+ va_end (ap);
+}
+
+/* Report an error at the current line number. */
+
+static void
+verror (s, ap)
+ char *s;
+ va_list ap;
+{
+ v_error_with_file_and_line (input_filename, lineno, s, ap);
+}
+
+void
+error VPROTO((char *s, ...))
+{
+#ifndef __STDC__
+ char *s;
+#endif
+ va_list ap;
+
+ VA_START (ap, s);
+
+#ifndef __STDC__
+ s = va_arg (ap, char *);
+#endif
+
+ verror (s, ap);
+ va_end (ap);
+}
+
+/* Report a fatal error at the current line number. */
+
+static void
+vfatal (s, ap)
+ char *s;
+ va_list ap;
+{
+ verror (s, ap);
+ exit (34);
+}
+
+void
+fatal VPROTO((char *s, ...))
+{
+#ifndef __STDC__
+ char *s;
+#endif
+ va_list ap;
+
+ VA_START (ap, s);
+
+#ifndef __STDC__
+ s = va_arg (ap, char *);
+#endif
+
+ vfatal (s, ap);
+ va_end (ap);
+}
+
+/* Report a warning at line LINE of file FILE. */
+
+static void
+v_warning_with_file_and_line (file, line, s, ap)
+ char *file;
+ int line;
+ char *s;
+ va_list ap;
+{
+ if (count_error (1))
+ {
+ report_error_function (file);
+ v_message_with_file_and_line (file, line, "warning", s, ap);
+ }
+}
+
+void
+warning_with_file_and_line VPROTO((char *file, int line, char *s, ...))
+{
+#ifndef __STDC__
+ char *file;
+ int line;
+ char *s;
+#endif
+ va_list ap;
+
+ VA_START (ap, s);
+
+#ifndef __STDC__
+ file = va_arg (ap, char *);
+ line = va_arg (ap, int);
+ s = va_arg (ap, char *);
+#endif
+
+ v_warning_with_file_and_line (file, line, s, ap);
+ va_end (ap);
+}
+
+/* Report a warning at the declaration DECL.
+ S is a format string which uses %s to substitute the declaration
+ name; subsequent substitutions are a la printf. */
+
+static void
+v_warning_with_decl (decl, s, ap)
+ tree decl;
+ char *s;
+ va_list ap;
+{
+ if (count_error (1))
+ {
+ report_error_function (DECL_SOURCE_FILE (decl));
+ v_message_with_decl (decl, "warning", s, ap);
+ }
+}
+
+void
+warning_with_decl VPROTO((tree decl, char *s, ...))
+{
+#ifndef __STDC__
+ tree decl;
+ char *s;
+#endif
+ va_list ap;
+
+ VA_START (ap, s);
+
+#ifndef __STDC__
+ decl = va_arg (ap, tree);
+ s = va_arg (ap, char *);
+#endif
+
+ v_warning_with_decl (decl, s, ap);
+ va_end (ap);
+}
+
+/* Report a warning at the line number of the insn INSN.
+ This is used only when INSN is an `asm' with operands,
+ and each ASM_OPERANDS records its own source file and line. */
+
+static void
+v_warning_for_asm (insn, s, ap)
+ rtx insn;
+ char *s;
+ va_list ap;
+{
+ if (count_error (1))
+ {
+ char *file;
+ int line;
+
+ file_and_line_for_asm (insn, &file, &line);
+ report_error_function (file);
+ v_message_with_file_and_line (file, line, "warning", s, ap);
+ }
+}
+
+void
+warning_for_asm VPROTO((rtx insn, char *s, ...))
+{
+#ifndef __STDC__
+ rtx insn;
+ char *s;
+#endif
+ va_list ap;
+
+ VA_START (ap, s);
+
+#ifndef __STDC__
+ insn = va_arg (ap, rtx);
+ s = va_arg (ap, char *);
+#endif
+
+ v_warning_for_asm (insn, s, ap);
+ va_end (ap);
+}
+
+/* Report a warning at the current line number. */
+
+static void
+vwarning (s, ap)
+ char *s;
+ va_list ap;
+{
+ v_warning_with_file_and_line (input_filename, lineno, s, ap);
+}
+
+void
+warning VPROTO((char *s, ...))
+{
+#ifndef __STDC__
+ char *s;
+#endif
+ va_list ap;
+
+ VA_START (ap, s);
+
+#ifndef __STDC__
+ s = va_arg (ap, char *);
+#endif
+
+ vwarning (s, ap);
+ va_end (ap);
+}
+
+/* These functions issue either warnings or errors depending on
+ -pedantic-errors. */
+
+static void
+vpedwarn (s, ap)
+ char *s;
+ va_list ap;
+{
+ if (flag_pedantic_errors)
+ verror (s, ap);
+ else
+ vwarning (s, ap);
+}
+
+void
+pedwarn VPROTO((char *s, ...))
+{
+#ifndef __STDC__
+ char *s;
+#endif
+ va_list ap;
+
+ VA_START (ap, s);
+
+#ifndef __STDC__
+ s = va_arg (ap, char *);
+#endif
+
+ vpedwarn (s, ap);
+ va_end (ap);
+}
+
+static void
+v_pedwarn_with_decl (decl, s, ap)
+ tree decl;
+ char *s;
+ va_list ap;
+{
+ /* We don't want -pedantic-errors to cause the compilation to fail from
+ "errors" in system header files. Sometimes fixincludes can't fix what's
+ broken (eg: unsigned char bitfields - fixing it may change the alignment
+ which will cause programs to mysteriously fail because the C library
+ or kernel uses the original layout). There's no point in issuing a
+ warning either, it's just unnecessary noise. */
+
+ if (! DECL_IN_SYSTEM_HEADER (decl))
+ {
+ if (flag_pedantic_errors)
+ v_error_with_decl (decl, s, ap);
+ else
+ v_warning_with_decl (decl, s, ap);
+ }
+}
+
+void
+pedwarn_with_decl VPROTO((tree decl, char *s, ...))
+{
+#ifndef __STDC__
+ tree decl;
+ char *s;
+#endif
+ va_list ap;
+
+ VA_START (ap, s);
+
+#ifndef __STDC__
+ decl = va_arg (ap, tree);
+ s = va_arg (ap, char *);
+#endif
+
+ v_pedwarn_with_decl (decl, s, ap);
+ va_end (ap);
+}
+
+static void
+v_pedwarn_with_file_and_line (file, line, s, ap)
+ char *file;
+ int line;
+ char *s;
+ va_list ap;
+{
+ if (flag_pedantic_errors)
+ v_error_with_file_and_line (file, line, s, ap);
+ else
+ v_warning_with_file_and_line (file, line, s, ap);
+}
+
+void
+pedwarn_with_file_and_line VPROTO((char *file, int line, char *s, ...))
+{
+#ifndef __STDC__
+ char *file;
+ int line;
+ char *s;
+#endif
+ va_list ap;
+
+ VA_START (ap, s);
+
+#ifndef __STDC__
+ file = va_arg (ap, char *);
+ line = va_arg (ap, int);
+ s = va_arg (ap, char *);
+#endif
+
+ v_pedwarn_with_file_and_line (file, line, s, ap);
+ va_end (ap);
+}
+
+/* Apologize for not implementing some feature. */
+
+static void
+vsorry (s, ap)
+ char *s;
+ va_list ap;
+{
+ sorrycount++;
+ if (input_filename)
+ fprintf (stderr, "%s:%d: ", input_filename, lineno);
+ else
+ fprintf (stderr, "%s: ", progname);
+ vmessage ("sorry, not implemented", s, ap);
+ fputc ('\n', stderr);
+}
+
+void
+sorry VPROTO((char *s, ...))
+{
+#ifndef __STDC__
+ char *s;
+#endif
+ va_list ap;
+
+ VA_START (ap, s);
+
+#ifndef __STDC__
+ s = va_arg (ap, char *);
+#endif
+
+ vsorry (s, ap);
+ va_end (ap);
+}
+
+/* Apologize for not implementing some feature, then quit. */
+
+static void
+v_really_sorry (s, ap)
+ char *s;
+ va_list ap;
+{
+ sorrycount++;
+ if (input_filename)
+ fprintf (stderr, "%s:%d: ", input_filename, lineno);
+ else
+ fprintf (stderr, "%s: ", progname);
+ vmessage ("sorry, not implemented", s, ap);
+ fatal (" (fatal)\n");
+}
+
+void
+really_sorry VPROTO((char *s, ...))
+{
+#ifndef __STDC__
+ char *s;
+#endif
+ va_list ap;
+
+ VA_START (ap, s);
+
+#ifndef __STDC__
+ s = va_arg (ap, char *);
+#endif
+
+ v_really_sorry (s, ap);
+ va_end (ap);
+}
+
+/* More 'friendly' abort that prints the line and file.
+ config.h can #define abort fancy_abort if you like that sort of thing.
+
+ I don't think this is actually a good idea.
+ Other sorts of crashes will look a certain way.
+ It is a good thing if crashes from calling abort look the same way.
+ -- RMS */
+
+void
+fancy_abort ()
+{
+ fatal ("internal gcc abort");
+}
+
+/* This calls abort and is used to avoid problems when abort if a macro.
+ It is used when we need to pass the address of abort. */
+
+void
+do_abort ()
+{
+ abort ();
+}
+
+/* When `malloc.c' is compiled with `rcheck' defined,
+ it calls this function to report clobberage. */
+
+void
+botch (s)
+{
+ abort ();
+}
+
+/* Same as `malloc' but report error if no memory available. */
+
+char *
+xmalloc (size)
+ unsigned size;
+{
+ register char *value = (char *) malloc (size);
+ if (value == 0)
+ fatal ("virtual memory exhausted");
+ return value;
+}
+
+/* Same as `realloc' but report error if no memory available. */
+
+char *
+xrealloc (ptr, size)
+ char *ptr;
+ int size;
+{
+ char *result = (char *) realloc (ptr, size);
+ if (!result)
+ fatal ("virtual memory exhausted");
+ return result;
+}
+
+/* Return the logarithm of X, base 2, considering X unsigned,
+ if X is a power of 2. Otherwise, returns -1.
+
+ This should be used via the `exact_log2' macro. */
+
+int
+exact_log2_wide (x)
+ register unsigned HOST_WIDE_INT x;
+{
+ register int log = 0;
+ /* Test for 0 or a power of 2. */
+ if (x == 0 || x != (x & -x))
+ return -1;
+ while ((x >>= 1) != 0)
+ log++;
+ return log;
+}
+
+/* Given X, an unsigned number, return the largest int Y such that 2**Y <= X.
+ If X is 0, return -1.
+
+ This should be used via the floor_log2 macro. */
+
+int
+floor_log2_wide (x)
+ register unsigned HOST_WIDE_INT x;
+{
+ register int log = -1;
+ while (x != 0)
+ log++,
+ x >>= 1;
+ return log;
+}
+
+int float_handled;
+jmp_buf float_handler;
+
+/* Specify where to longjmp to when a floating arithmetic error happens.
+ If HANDLER is 0, it means don't handle the errors any more. */
+
+void
+set_float_handler (handler)
+ jmp_buf handler;
+{
+ float_handled = (handler != 0);
+ if (handler)
+ bcopy ((char *) handler, (char *) float_handler, sizeof (float_handler));
+}
+
+/* Specify, in HANDLER, where to longjmp to when a floating arithmetic
+ error happens, pushing the previous specification into OLD_HANDLER.
+ Return an indication of whether there was a previous handler in effect. */
+
+int
+push_float_handler (handler, old_handler)
+ jmp_buf handler, old_handler;
+{
+ int was_handled = float_handled;
+
+ float_handled = 1;
+ if (was_handled)
+ bcopy ((char *) float_handler, (char *) old_handler,
+ sizeof (float_handler));
+
+ bcopy ((char *) handler, (char *) float_handler, sizeof (float_handler));
+ return was_handled;
+}
+
+/* Restore the previous specification of whether and where to longjmp to
+ when a floating arithmetic error happens. */
+
+void
+pop_float_handler (handled, handler)
+ int handled;
+ jmp_buf handler;
+{
+ float_handled = handled;
+ if (handled)
+ bcopy ((char *) handler, (char *) float_handler, sizeof (float_handler));
+}
+
+/* Signals actually come here. */
+
+static void
+float_signal (signo)
+ /* If this is missing, some compilers complain. */
+ int signo;
+{
+ if (float_handled == 0)
+ abort ();
+#if defined (USG) || defined (hpux)
+ signal (SIGFPE, float_signal); /* re-enable the signal catcher */
+#endif
+ float_handled = 0;
+ signal (SIGFPE, float_signal);
+ longjmp (float_handler, 1);
+}
+
+/* Handler for SIGPIPE. */
+
+static void
+pipe_closed (signo)
+ /* If this is missing, some compilers complain. */
+ int signo;
+{
+ fatal ("output pipe has been closed");
+}
+
+/* Strip off a legitimate source ending from the input string NAME of
+ length LEN. */
+
+void
+strip_off_ending (name, len)
+ char *name;
+ int len;
+{
+ if (len > 2 && ! strcmp (".c", name + len - 2))
+ name[len - 2] = 0;
+ else if (len > 2 && ! strcmp (".m", name + len - 2))
+ name[len - 2] = 0;
+ else if (len > 2 && ! strcmp (".i", name + len - 2))
+ name[len - 2] = 0;
+ else if (len > 3 && ! strcmp (".ii", name + len - 3))
+ name[len - 3] = 0;
+ else if (len > 3 && ! strcmp (".co", name + len - 3))
+ name[len - 3] = 0;
+ else if (len > 3 && ! strcmp (".cc", name + len - 3))
+ name[len - 3] = 0;
+ else if (len > 2 && ! strcmp (".C", name + len - 2))
+ name[len - 2] = 0;
+ else if (len > 4 && ! strcmp (".cxx", name + len - 4))
+ name[len - 4] = 0;
+ else if (len > 4 && ! strcmp (".cpp", name + len - 4))
+ name[len - 4] = 0;
+ else if (len > 2 && ! strcmp (".f", name + len - 2))
+ name[len - 2] = 0;
+ /* Ada will use extensions like .ada, .adb, and .ads, so just test
+ for "ad". */
+ else if (len > 4 && ! strncmp (".ad", name + len - 4, 3))
+ name[len - 4] = 0;
+ else if (len > 4 && ! strcmp (".atr", name + len - 4))
+ name[len - 4] = 0;
+}
+
+/* Output a quoted string. */
+void
+output_quoted_string (asm_file, string)
+ FILE *asm_file;
+ char *string;
+{
+ char c;
+
+ putc ('\"', asm_file);
+ while ((c = *string++) != 0)
+ {
+ if (c == '\"' || c == '\\')
+ putc ('\\', asm_file);
+ putc (c, asm_file);
+ }
+ putc ('\"', asm_file);
+}
+
+/* Output a file name in the form wanted by System V. */
+
+void
+output_file_directive (asm_file, input_name)
+ FILE *asm_file;
+ char *input_name;
+{
+ int len = strlen (input_name);
+ char *na = input_name + len;
+
+ /* NA gets INPUT_NAME sans directory names. */
+ while (na > input_name)
+ {
+ if (na[-1] == '/')
+ break;
+ na--;
+ }
+
+#ifdef ASM_OUTPUT_MAIN_SOURCE_FILENAME
+ ASM_OUTPUT_MAIN_SOURCE_FILENAME (asm_file, na);
+#else
+#ifdef ASM_OUTPUT_SOURCE_FILENAME
+ ASM_OUTPUT_SOURCE_FILENAME (asm_file, na);
+#else
+ fprintf (asm_file, "\t.file\t");
+ output_quoted_string (asm_file, na);
+ fputc ('\n', asm_file);
+#endif
+#endif
+}
+
+/* Routine to build language identifier for object file. */
+static void
+output_lang_identify (asm_out_file)
+ FILE *asm_out_file;
+{
+ int len = strlen (lang_identify ()) + sizeof ("__gnu_compiled_") + 1;
+ char *s = (char *) alloca (len);
+ sprintf (s, "__gnu_compiled_%s", lang_identify ());
+ ASM_OUTPUT_LABEL (asm_out_file, s);
+}
+
+/* Routine to open a dump file. */
+static FILE *
+open_dump_file (base_name, suffix)
+ char *base_name;
+ char *suffix;
+{
+ FILE *f;
+ char *dumpname = (char *) alloca (strlen (base_name) + strlen (suffix) + 1);
+
+ strcpy (dumpname, base_name);
+ strcat (dumpname, suffix);
+ f = fopen (dumpname, "w");
+ if (f == 0)
+ pfatal_with_name (dumpname);
+ return f;
+}
+
+/* Compile an entire file of output from cpp, named NAME.
+ Write a file of assembly output and various debugging dumps. */
+
+static void
+compile_file (name)
+ char *name;
+{
+ tree globals;
+ int start_time;
+
+ int name_specified = name != 0;
+
+ if (dump_base_name == 0)
+ dump_base_name = name ? name : "gccdump";
+
+ parse_time = 0;
+ varconst_time = 0;
+ integration_time = 0;
+ jump_time = 0;
+ cse_time = 0;
+ loop_time = 0;
+ cse2_time = 0;
+ flow_time = 0;
+ combine_time = 0;
+ sched_time = 0;
+ local_alloc_time = 0;
+ global_alloc_time = 0;
+ sched2_time = 0;
+ dbr_sched_time = 0;
+ shorten_branch_time = 0;
+ stack_reg_time = 0;
+ final_time = 0;
+ symout_time = 0;
+ dump_time = 0;
+
+ /* Open input file. */
+
+ if (name == 0 || !strcmp (name, "-"))
+ {
+ finput = stdin;
+ name = "stdin";
+ }
+ else
+ finput = fopen (name, "r");
+ if (finput == 0)
+ pfatal_with_name (name);
+
+#ifdef IO_BUFFER_SIZE
+ setvbuf (finput, (char *) xmalloc (IO_BUFFER_SIZE), _IOFBF, IO_BUFFER_SIZE);
+#endif
+
+ /* Initialize data in various passes. */
+
+ init_obstacks ();
+ init_tree_codes ();
+ init_lex ();
+ /* Some of these really don't need to be called when generating bytecode,
+ but the options would have to be parsed first to know that. -bson */
+ init_rtl ();
+ init_emit_once (debug_info_level == DINFO_LEVEL_NORMAL
+ || debug_info_level == DINFO_LEVEL_VERBOSE);
+ init_regs ();
+ init_decl_processing ();
+ init_optabs ();
+ init_stmt ();
+ init_expmed ();
+ init_expr_once ();
+ init_loop ();
+ init_reload ();
+
+ if (flag_caller_saves)
+ init_caller_save ();
+
+ /* If auxiliary info generation is desired, open the output file.
+ This goes in the same directory as the source file--unlike
+ all the other output files. */
+ if (flag_gen_aux_info)
+ {
+ aux_info_file = fopen (aux_info_file_name, "w");
+ if (aux_info_file == 0)
+ pfatal_with_name (aux_info_file_name);
+ }
+
+ /* If rtl dump desired, open the output file. */
+ if (rtl_dump)
+ rtl_dump_file = open_dump_file (dump_base_name, ".rtl");
+
+ /* If jump_opt dump desired, open the output file. */
+ if (jump_opt_dump)
+ jump_opt_dump_file = open_dump_file (dump_base_name, ".jump");
+
+ /* If cse dump desired, open the output file. */
+ if (cse_dump)
+ cse_dump_file = open_dump_file (dump_base_name, ".cse");
+
+ /* If loop dump desired, open the output file. */
+ if (loop_dump)
+ loop_dump_file = open_dump_file (dump_base_name, ".loop");
+
+ /* If cse2 dump desired, open the output file. */
+ if (cse2_dump)
+ cse2_dump_file = open_dump_file (dump_base_name, ".cse2");
+
+ /* If flow dump desired, open the output file. */
+ if (flow_dump)
+ flow_dump_file = open_dump_file (dump_base_name, ".flow");
+
+ /* If combine dump desired, open the output file. */
+ if (combine_dump)
+ combine_dump_file = open_dump_file (dump_base_name, ".combine");
+
+ /* If scheduling dump desired, open the output file. */
+ if (sched_dump)
+ sched_dump_file = open_dump_file (dump_base_name, ".sched");
+
+ /* If local_reg dump desired, open the output file. */
+ if (local_reg_dump)
+ local_reg_dump_file = open_dump_file (dump_base_name, ".lreg");
+
+ /* If global_reg dump desired, open the output file. */
+ if (global_reg_dump)
+ global_reg_dump_file = open_dump_file (dump_base_name, ".greg");
+
+ /* If 2nd scheduling dump desired, open the output file. */
+ if (sched2_dump)
+ sched2_dump_file = open_dump_file (dump_base_name, ".sched2");
+
+ /* If jump2_opt dump desired, open the output file. */
+ if (jump2_opt_dump)
+ jump2_opt_dump_file = open_dump_file (dump_base_name, ".jump2");
+
+ /* If dbr_sched dump desired, open the output file. */
+ if (dbr_sched_dump)
+ dbr_sched_dump_file = open_dump_file (dump_base_name, ".dbr");
+
+#ifdef STACK_REGS
+
+ /* If stack_reg dump desired, open the output file. */
+ if (stack_reg_dump)
+ stack_reg_dump_file = open_dump_file (dump_base_name, ".stack");
+
+#endif
+
+ /* Open assembler code output file. */
+
+ if (! name_specified && asm_file_name == 0)
+ asm_out_file = stdout;
+ else
+ {
+ int len = strlen (dump_base_name);
+ register char *dumpname = (char *) xmalloc (len + 6);
+ strcpy (dumpname, dump_base_name);
+ strip_off_ending (dumpname, len);
+ strcat (dumpname, ".s");
+ if (asm_file_name == 0)
+ {
+ asm_file_name = (char *) xmalloc (strlen (dumpname) + 1);
+ strcpy (asm_file_name, dumpname);
+ }
+ if (!strcmp (asm_file_name, "-"))
+ asm_out_file = stdout;
+ else
+ asm_out_file = fopen (asm_file_name, "w");
+ if (asm_out_file == 0)
+ pfatal_with_name (asm_file_name);
+ }
+
+#ifdef IO_BUFFER_SIZE
+ setvbuf (asm_out_file, (char *) xmalloc (IO_BUFFER_SIZE),
+ _IOFBF, IO_BUFFER_SIZE);
+#endif
+
+ input_filename = name;
+
+ /* Perform language-specific initialization.
+ This may set main_input_filename. */
+ lang_init ();
+
+ /* If the input doesn't start with a #line, use the input name
+ as the official input file name. */
+ if (main_input_filename == 0)
+ main_input_filename = name;
+
+ /* Put an entry on the input file stack for the main input file. */
+ input_file_stack
+ = (struct file_stack *) xmalloc (sizeof (struct file_stack));
+ input_file_stack->next = 0;
+ input_file_stack->name = input_filename;
+
+ if (!output_bytecode)
+ {
+ ASM_FILE_START (asm_out_file);
+ }
+
+ /* Output something to inform GDB that this compilation was by GCC. Also
+ serves to tell GDB file consists of bytecodes. */
+ if (output_bytecode)
+ fprintf (asm_out_file, "bc_gcc2_compiled.:\n");
+ else
+ {
+#ifndef ASM_IDENTIFY_GCC
+ fprintf (asm_out_file, "gcc2_compiled.:\n");
+#else
+ ASM_IDENTIFY_GCC (asm_out_file);
+#endif
+ }
+
+ /* Output something to identify which front-end produced this file. */
+#ifdef ASM_IDENTIFY_LANGUAGE
+ ASM_IDENTIFY_LANGUAGE (asm_out_file);
+#endif
+
+ if (output_bytecode)
+ {
+ if (profile_flag || profile_block_flag)
+ error ("profiling not supported in bytecode compilation");
+ }
+ else
+ {
+ /* ??? Note: There used to be a conditional here
+ to call assemble_zeros without fail if DBX_DEBUGGING_INFO is defined.
+ This was to guarantee separation between gcc_compiled. and
+ the first function, for the sake of dbx on Suns.
+ However, having the extra zero here confused the Emacs
+ code for unexec, and might confuse other programs too.
+ Therefore, I took out that change.
+ In future versions we should find another way to solve
+ that dbx problem. -- rms, 23 May 93. */
+
+ /* Don't let the first function fall at the same address
+ as gcc_compiled., if profiling. */
+ if (profile_flag || profile_block_flag)
+ assemble_zeros (UNITS_PER_WORD);
+ }
+
+ /* If dbx symbol table desired, initialize writing it
+ and output the predefined types. */
+#if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
+ if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
+ TIMEVAR (symout_time, dbxout_init (asm_out_file, main_input_filename,
+ getdecls ()));
+#endif
+#ifdef SDB_DEBUGGING_INFO
+ if (write_symbols == SDB_DEBUG)
+ TIMEVAR (symout_time, sdbout_init (asm_out_file, main_input_filename,
+ getdecls ()));
+#endif
+#ifdef DWARF_DEBUGGING_INFO
+ if (write_symbols == DWARF_DEBUG)
+ TIMEVAR (symout_time, dwarfout_init (asm_out_file, main_input_filename));
+#endif
+
+ /* Initialize yet another pass. */
+
+ if (!output_bytecode)
+ init_final (main_input_filename);
+
+ start_time = get_run_time ();
+
+ /* Call the parser, which parses the entire file
+ (calling rest_of_compilation for each function). */
+
+ if (yyparse () != 0)
+ {
+ if (errorcount == 0)
+ fprintf (stderr, "Errors detected in input file (your bison.simple is out of date)");
+
+ /* In case there were missing closebraces,
+ get us back to the global binding level. */
+ while (! global_bindings_p ())
+ poplevel (0, 0, 0);
+ }
+
+ /* Compilation is now finished except for writing
+ what's left of the symbol table output. */
+
+ parse_time += get_run_time () - start_time;
+
+ parse_time -= integration_time;
+ parse_time -= varconst_time;
+
+ globals = getdecls ();
+
+ /* Really define vars that have had only a tentative definition.
+ Really output inline functions that must actually be callable
+ and have not been output so far. */
+
+ {
+ int len = list_length (globals);
+ tree *vec = (tree *) alloca (sizeof (tree) * len);
+ int i;
+ tree decl;
+ int reconsider = 1;
+
+ /* Process the decls in reverse order--earliest first.
+ Put them into VEC from back to front, then take out from front. */
+
+ for (i = 0, decl = globals; i < len; i++, decl = TREE_CHAIN (decl))
+ vec[len - i - 1] = decl;
+
+ for (i = 0; i < len; i++)
+ {
+ decl = vec[i];
+
+ /* We're not deferring this any longer. */
+ DECL_DEFER_OUTPUT (decl) = 0;
+
+ if (TREE_CODE (decl) == VAR_DECL && DECL_SIZE (decl) == 0
+ && incomplete_decl_finalize_hook != 0)
+ (*incomplete_decl_finalize_hook) (decl);
+ }
+
+ /* Now emit any global variables or functions that we have been putting
+ off. We need to loop in case one of the things emitted here
+ references another one which comes earlier in the list. */
+ while (reconsider)
+ {
+ reconsider = 0;
+ for (i = 0; i < len; i++)
+ {
+ decl = vec[i];
+
+ if (TREE_ASM_WRITTEN (decl) || DECL_EXTERNAL (decl))
+ continue;
+
+ /* Don't write out static consts, unless we still need them.
+
+ We also keep static consts if not optimizing (for debugging).
+ ??? They might be better written into the debug information.
+ This is possible when using DWARF.
+
+ A language processor that wants static constants to be always
+ written out (even if it is not used) is responsible for
+ calling rest_of_decl_compilation itself. E.g. the C front-end
+ calls rest_of_decl_compilation from finish_decl.
+ One motivation for this is that is conventional in some
+ environments to write things like:
+ static const char rcsid[] = "... version string ...";
+ intending to force the string to be in the executable.
+
+ A language processor that would prefer to have unneeded
+ static constants "optimized away" would just defer writing
+ them out until here. E.g. C++ does this, because static
+ constants are often defined in header files.
+
+ ??? A tempting alternative (for both C and C++) would be
+ to force a constant to be written if and only if it is
+ defined in a main file, as opposed to an include file. */
+
+ if (TREE_CODE (decl) == VAR_DECL && TREE_STATIC (decl)
+ && (! TREE_READONLY (decl)
+ || TREE_PUBLIC (decl)
+ || !optimize
+ || TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl))))
+ {
+ reconsider = 1;
+ rest_of_decl_compilation (decl, NULL_PTR, 1, 1);
+ }
+
+ if (TREE_CODE (decl) == FUNCTION_DECL
+ && DECL_INITIAL (decl) != 0
+ && DECL_SAVED_INSNS (decl) != 0
+ && (flag_keep_inline_functions
+ || TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl))))
+ {
+ reconsider = 1;
+ temporary_allocation ();
+ output_inline_function (decl);
+ permanent_allocation (1);
+ }
+ }
+ }
+
+ for (i = 0; i < len; i++)
+ {
+ decl = vec[i];
+
+ if (TREE_CODE (decl) == VAR_DECL && TREE_STATIC (decl)
+ && ! TREE_ASM_WRITTEN (decl))
+ /* Cancel the RTL for this decl so that, if debugging info
+ output for global variables is still to come,
+ this one will be omitted. */
+ DECL_RTL (decl) = NULL;
+
+ /* Warn about any function
+ declared static but not defined.
+ We don't warn about variables,
+ because many programs have static variables
+ that exist only to get some text into the object file. */
+ if (TREE_CODE (decl) == FUNCTION_DECL
+ && (warn_unused
+ || TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)))
+ && DECL_INITIAL (decl) == 0
+ && DECL_EXTERNAL (decl)
+ && ! TREE_PUBLIC (decl))
+ {
+ pedwarn_with_decl (decl,
+ "`%s' declared `static' but never defined");
+ /* This symbol is effectively an "extern" declaration now. */
+ TREE_PUBLIC (decl) = 1;
+ assemble_external (decl);
+ }
+
+ /* Warn about static fns or vars defined but not used,
+ but not about inline functions or static consts
+ since defining those in header files is normal practice. */
+ if (warn_unused
+ && ((TREE_CODE (decl) == FUNCTION_DECL && ! DECL_INLINE (decl))
+ || (TREE_CODE (decl) == VAR_DECL && ! TREE_READONLY (decl)))
+ && ! DECL_IN_SYSTEM_HEADER (decl)
+ && ! DECL_EXTERNAL (decl)
+ && ! TREE_PUBLIC (decl)
+ && ! TREE_USED (decl)
+ && ! DECL_REGISTER (decl)
+ /* The TREE_USED bit for file-scope decls
+ is kept in the identifier, to handle multiple
+ external decls in different scopes. */
+ && ! TREE_USED (DECL_NAME (decl)))
+ warning_with_decl (decl, "`%s' defined but not used");
+
+#ifdef SDB_DEBUGGING_INFO
+ /* The COFF linker can move initialized global vars to the end.
+ And that can screw up the symbol ordering.
+ By putting the symbols in that order to begin with,
+ we avoid a problem. mcsun!unido!fauern!tumuc!pes@uunet.uu.net. */
+ if (write_symbols == SDB_DEBUG && TREE_CODE (decl) == VAR_DECL
+ && TREE_PUBLIC (decl) && DECL_INITIAL (decl)
+ && ! DECL_EXTERNAL (decl)
+ && DECL_RTL (decl) != 0)
+ TIMEVAR (symout_time, sdbout_symbol (decl, 0));
+
+ /* Output COFF information for non-global
+ file-scope initialized variables. */
+ if (write_symbols == SDB_DEBUG
+ && TREE_CODE (decl) == VAR_DECL
+ && DECL_INITIAL (decl)
+ && ! DECL_EXTERNAL (decl)
+ && DECL_RTL (decl) != 0
+ && GET_CODE (DECL_RTL (decl)) == MEM)
+ TIMEVAR (symout_time, sdbout_toplevel_data (decl));
+#endif /* SDB_DEBUGGING_INFO */
+#ifdef DWARF_DEBUGGING_INFO
+ /* Output DWARF information for file-scope tentative data object
+ declarations, file-scope (extern) function declarations (which
+ had no corresponding body) and file-scope tagged type declarations
+ and definitions which have not yet been forced out. */
+
+ if (write_symbols == DWARF_DEBUG
+ && (TREE_CODE (decl) != FUNCTION_DECL || !DECL_INITIAL (decl)))
+ TIMEVAR (symout_time, dwarfout_file_scope_decl (decl, 1));
+#endif
+ }
+ }
+
+ /* Do dbx symbols */
+#if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
+ if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
+ TIMEVAR (symout_time,
+ {
+ dbxout_finish (asm_out_file, main_input_filename);
+ });
+#endif
+
+#ifdef DWARF_DEBUGGING_INFO
+ if (write_symbols == DWARF_DEBUG)
+ TIMEVAR (symout_time,
+ {
+ dwarfout_finish ();
+ });
+#endif
+
+ /* Output some stuff at end of file if nec. */
+
+ if (!output_bytecode)
+ {
+ end_final (main_input_filename);
+
+#ifdef ASM_FILE_END
+ ASM_FILE_END (asm_out_file);
+#endif
+ }
+
+ /* Language-specific end of compilation actions. */
+
+ lang_finish ();
+
+ if (output_bytecode)
+ bc_write_file (asm_out_file);
+
+ /* Close the dump files. */
+
+ if (flag_gen_aux_info)
+ {
+ fclose (aux_info_file);
+ if (errorcount)
+ unlink (aux_info_file_name);
+ }
+
+ if (rtl_dump)
+ fclose (rtl_dump_file);
+
+ if (jump_opt_dump)
+ fclose (jump_opt_dump_file);
+
+ if (cse_dump)
+ fclose (cse_dump_file);
+
+ if (loop_dump)
+ fclose (loop_dump_file);
+
+ if (cse2_dump)
+ fclose (cse2_dump_file);
+
+ if (flow_dump)
+ fclose (flow_dump_file);
+
+ if (combine_dump)
+ {
+ dump_combine_total_stats (combine_dump_file);
+ fclose (combine_dump_file);
+ }
+
+ if (sched_dump)
+ fclose (sched_dump_file);
+
+ if (local_reg_dump)
+ fclose (local_reg_dump_file);
+
+ if (global_reg_dump)
+ fclose (global_reg_dump_file);
+
+ if (sched2_dump)
+ fclose (sched2_dump_file);
+
+ if (jump2_opt_dump)
+ fclose (jump2_opt_dump_file);
+
+ if (dbr_sched_dump)
+ fclose (dbr_sched_dump_file);
+
+#ifdef STACK_REGS
+ if (stack_reg_dump)
+ fclose (stack_reg_dump_file);
+#endif
+
+ /* Close non-debugging input and output files. Take special care to note
+ whether fclose returns an error, since the pages might still be on the
+ buffer chain while the file is open. */
+
+ fclose (finput);
+ if (ferror (asm_out_file) != 0 || fclose (asm_out_file) != 0)
+ fatal_io_error (asm_file_name);
+
+ /* Print the times. */
+
+ if (! quiet_flag)
+ {
+ fprintf (stderr,"\n");
+ print_time ("parse", parse_time);
+
+ if (!output_bytecode)
+ {
+ print_time ("integration", integration_time);
+ print_time ("jump", jump_time);
+ print_time ("cse", cse_time);
+ print_time ("loop", loop_time);
+ print_time ("cse2", cse2_time);
+ print_time ("flow", flow_time);
+ print_time ("combine", combine_time);
+ print_time ("sched", sched_time);
+ print_time ("local-alloc", local_alloc_time);
+ print_time ("global-alloc", global_alloc_time);
+ print_time ("sched2", sched2_time);
+ print_time ("dbranch", dbr_sched_time);
+ print_time ("shorten-branch", shorten_branch_time);
+ print_time ("stack-reg", stack_reg_time);
+ print_time ("final", final_time);
+ print_time ("varconst", varconst_time);
+ print_time ("symout", symout_time);
+ print_time ("dump", dump_time);
+ }
+ }
+}
+
+/* This is called from various places for FUNCTION_DECL, VAR_DECL,
+ and TYPE_DECL nodes.
+
+ This does nothing for local (non-static) variables.
+ Otherwise, it sets up the RTL and outputs any assembler code
+ (label definition, storage allocation and initialization).
+
+ DECL is the declaration. If ASMSPEC is nonzero, it specifies
+ the assembler symbol name to be used. TOP_LEVEL is nonzero
+ if this declaration is not within a function. */
+
+void
+rest_of_decl_compilation (decl, asmspec, top_level, at_end)
+ tree decl;
+ char *asmspec;
+ int top_level;
+ int at_end;
+{
+ /* Declarations of variables, and of functions defined elsewhere. */
+
+/* The most obvious approach, to put an #ifndef around where
+ this macro is used, doesn't work since it's inside a macro call. */
+#ifndef ASM_FINISH_DECLARE_OBJECT
+#define ASM_FINISH_DECLARE_OBJECT(FILE, DECL, TOP, END)
+#endif
+
+ /* Forward declarations for nested functions are not "external",
+ but we need to treat them as if they were. */
+ if (TREE_STATIC (decl) || DECL_EXTERNAL (decl)
+ || TREE_CODE (decl) == FUNCTION_DECL)
+ TIMEVAR (varconst_time,
+ {
+ make_decl_rtl (decl, asmspec, top_level);
+ /* Initialized extern variable exists to be replaced
+ with its value, or represents something that will be
+ output in another file. */
+ if (! (TREE_CODE (decl) == VAR_DECL
+ && DECL_EXTERNAL (decl) && TREE_READONLY (decl)
+ && DECL_INITIAL (decl) != 0
+ && DECL_INITIAL (decl) != error_mark_node))
+ /* Don't output anything
+ when a tentative file-scope definition is seen.
+ But at end of compilation, do output code for them. */
+ if (! (! at_end && top_level
+ && (DECL_INITIAL (decl) == 0
+ || DECL_INITIAL (decl) == error_mark_node)))
+ assemble_variable (decl, top_level, at_end, 0);
+ if (decl == last_assemble_variable_decl)
+ {
+ ASM_FINISH_DECLARE_OBJECT (asm_out_file, decl,
+ top_level, at_end);
+ }
+ });
+ else if (DECL_REGISTER (decl) && asmspec != 0)
+ {
+ if (decode_reg_name (asmspec) >= 0)
+ {
+ DECL_RTL (decl) = 0;
+ make_decl_rtl (decl, asmspec, top_level);
+ }
+ else
+ error ("invalid register name `%s' for register variable", asmspec);
+ }
+#if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
+ else if ((write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
+ && TREE_CODE (decl) == TYPE_DECL)
+ TIMEVAR (symout_time, dbxout_symbol (decl, 0));
+#endif
+#ifdef SDB_DEBUGGING_INFO
+ else if (write_symbols == SDB_DEBUG && top_level
+ && TREE_CODE (decl) == TYPE_DECL)
+ TIMEVAR (symout_time, sdbout_symbol (decl, 0));
+#endif
+}
+
+/* Called after finishing a record, union or enumeral type. */
+
+void
+rest_of_type_compilation (type, toplev)
+ tree type;
+ int toplev;
+{
+#if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
+ if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
+ TIMEVAR (symout_time, dbxout_symbol (TYPE_STUB_DECL (type), !toplev));
+#endif
+#ifdef SDB_DEBUGGING_INFO
+ if (write_symbols == SDB_DEBUG)
+ TIMEVAR (symout_time, sdbout_symbol (TYPE_STUB_DECL (type), !toplev));
+#endif
+}
+
+/* This is called from finish_function (within yyparse)
+ after each top-level definition is parsed.
+ It is supposed to compile that function or variable
+ and output the assembler code for it.
+ After we return, the tree storage is freed. */
+
+void
+rest_of_compilation (decl)
+ tree decl;
+{
+ register rtx insns;
+ int start_time = get_run_time ();
+ int tem;
+ /* Nonzero if we have saved the original DECL_INITIAL of the function,
+ to be restored after we finish compiling the function
+ (for use when compiling inline calls to this function). */
+ tree saved_block_tree = 0;
+ /* Likewise, for DECL_ARGUMENTS. */
+ tree saved_arguments = 0;
+ int failure = 0;
+
+ if (output_bytecode)
+ return;
+
+ /* If we are reconsidering an inline function
+ at the end of compilation, skip the stuff for making it inline. */
+
+ if (DECL_SAVED_INSNS (decl) == 0)
+ {
+ int specd = DECL_INLINE (decl);
+ char *lose;
+
+ /* If requested, consider whether to make this function inline. */
+ if (specd || flag_inline_functions)
+ TIMEVAR (integration_time,
+ {
+ lose = function_cannot_inline_p (decl);
+ /* If not optimzing, then make sure the DECL_INLINE
+ bit is off. */
+ if (lose || ! optimize)
+ {
+ if (warn_inline && specd)
+ warning_with_decl (decl, lose);
+ DECL_INLINE (decl) = 0;
+ /* Don't really compile an extern inline function.
+ If we can't make it inline, pretend
+ it was only declared. */
+ if (DECL_EXTERNAL (decl))
+ {
+ DECL_INITIAL (decl) = 0;
+ goto exit_rest_of_compilation;
+ }
+ }
+ else
+ DECL_INLINE (decl) = 1;
+ });
+
+ insns = get_insns ();
+
+ /* Dump the rtl code if we are dumping rtl. */
+
+ if (rtl_dump)
+ TIMEVAR (dump_time,
+ {
+ fprintf (rtl_dump_file, "\n;; Function %s\n\n",
+ IDENTIFIER_POINTER (DECL_NAME (decl)));
+ if (DECL_SAVED_INSNS (decl))
+ fprintf (rtl_dump_file, ";; (integrable)\n\n");
+ print_rtl (rtl_dump_file, insns);
+ fflush (rtl_dump_file);
+ });
+
+ /* If function is inline, and we don't yet know whether to
+ compile it by itself, defer decision till end of compilation.
+ finish_compilation will call rest_of_compilation again
+ for those functions that need to be output. Also defer those
+ functions that were marked inline but weren't inlined; they
+ may never be used. */
+
+ if ((specd || DECL_INLINE (decl))
+ && ((! TREE_PUBLIC (decl) && ! TREE_ADDRESSABLE (decl)
+ && ! flag_keep_inline_functions)
+ || DECL_DEFER_OUTPUT (decl)
+ || DECL_EXTERNAL (decl)))
+ {
+#ifdef DWARF_DEBUGGING_INFO
+ /* Generate the DWARF info for the "abstract" instance
+ of a function which we may later generate inlined and/or
+ out-of-line instances of. */
+ if (write_symbols == DWARF_DEBUG)
+ {
+ set_decl_abstract_flags (decl, 1);
+ TIMEVAR (symout_time, dwarfout_file_scope_decl (decl, 0));
+ set_decl_abstract_flags (decl, 0);
+ }
+#endif
+ TIMEVAR (integration_time, save_for_inline_nocopy (decl));
+ goto exit_rest_of_compilation;
+ }
+
+ /* If we have to compile the function now, save its rtl and subdecls
+ so that its compilation will not affect what others get. */
+ if (DECL_INLINE (decl))
+ {
+#ifdef DWARF_DEBUGGING_INFO
+ /* Generate the DWARF info for the "abstract" instance of
+ a function which we will generate an out-of-line instance
+ of almost immediately (and which we may also later generate
+ various inlined instances of). */
+ if (write_symbols == DWARF_DEBUG)
+ {
+ set_decl_abstract_flags (decl, 1);
+ TIMEVAR (symout_time, dwarfout_file_scope_decl (decl, 0));
+ set_decl_abstract_flags (decl, 0);
+ }
+#endif
+ saved_block_tree = DECL_INITIAL (decl);
+ saved_arguments = DECL_ARGUMENTS (decl);
+ TIMEVAR (integration_time, save_for_inline_copying (decl));
+ }
+ }
+
+ if (DECL_DEFER_OUTPUT (decl))
+ goto exit_rest_of_compilation;
+
+ TREE_ASM_WRITTEN (decl) = 1;
+
+ /* Now that integrate will no longer see our rtl, we need not distinguish
+ between the return value of this function and the return value of called
+ functions. */
+ rtx_equal_function_value_matters = 0;
+
+ /* Don't return yet if -Wreturn-type; we need to do jump_optimize. */
+ if ((rtl_dump_and_exit || flag_syntax_only) && !warn_return_type)
+ {
+ goto exit_rest_of_compilation;
+ }
+
+ /* From now on, allocate rtl in current_obstack, not in saveable_obstack.
+ Note that that may have been done above, in save_for_inline_copying.
+ The call to resume_temporary_allocation near the end of this function
+ goes back to the usual state of affairs. */
+
+ rtl_in_current_obstack ();
+
+#ifdef FINALIZE_PIC
+ /* If we are doing position-independent code generation, now
+ is the time to output special prologues and epilogues.
+ We do not want to do this earlier, because it just clutters
+ up inline functions with meaningless insns. */
+ if (flag_pic)
+ FINALIZE_PIC;
+#endif
+
+ insns = get_insns ();
+
+ /* Copy any shared structure that should not be shared. */
+
+ unshare_all_rtl (insns);
+
+ /* Instantiate all virtual registers. */
+
+ instantiate_virtual_regs (current_function_decl, get_insns ());
+
+ /* See if we have allocated stack slots that are not directly addressable.
+ If so, scan all the insns and create explicit address computation
+ for all references to such slots. */
+/* fixup_stack_slots (); */
+
+ /* Do jump optimization the first time, if -opt.
+ Also do it if -W, but in that case it doesn't change the rtl code,
+ it only computes whether control can drop off the end of the function. */
+
+ if (optimize > 0 || extra_warnings || warn_return_type
+ /* If function is `noreturn', we should warn if it tries to return. */
+ || TREE_THIS_VOLATILE (decl))
+ {
+ TIMEVAR (jump_time, reg_scan (insns, max_reg_num (), 0));
+ TIMEVAR (jump_time, jump_optimize (insns, 0, 0, 1));
+ }
+
+ /* Now is when we stop if -fsyntax-only and -Wreturn-type. */
+ if (rtl_dump_and_exit || flag_syntax_only)
+ goto exit_rest_of_compilation;
+
+ /* Dump rtl code after jump, if we are doing that. */
+
+ if (jump_opt_dump)
+ TIMEVAR (dump_time,
+ {
+ fprintf (jump_opt_dump_file, "\n;; Function %s\n\n",
+ IDENTIFIER_POINTER (DECL_NAME (decl)));
+ print_rtl (jump_opt_dump_file, insns);
+ fflush (jump_opt_dump_file);
+ });
+
+ /* Perform common subexpression elimination.
+ Nonzero value from `cse_main' means that jumps were simplified
+ and some code may now be unreachable, so do
+ jump optimization again. */
+
+ if (cse_dump)
+ TIMEVAR (dump_time,
+ {
+ fprintf (cse_dump_file, "\n;; Function %s\n\n",
+ IDENTIFIER_POINTER (DECL_NAME (decl)));
+ });
+
+ if (optimize > 0)
+ {
+ TIMEVAR (cse_time, reg_scan (insns, max_reg_num (), 1));
+
+ if (flag_thread_jumps)
+ /* Hacks by tiemann & kenner. */
+ TIMEVAR (jump_time, thread_jumps (insns, max_reg_num (), 1));
+
+ TIMEVAR (cse_time, tem = cse_main (insns, max_reg_num (),
+ 0, cse_dump_file));
+ TIMEVAR (cse_time, delete_dead_from_cse (insns, max_reg_num ()));
+
+ if (tem)
+ TIMEVAR (jump_time, jump_optimize (insns, 0, 0, 0));
+ }
+
+ /* Dump rtl code after cse, if we are doing that. */
+
+ if (cse_dump)
+ TIMEVAR (dump_time,
+ {
+ print_rtl (cse_dump_file, insns);
+ fflush (cse_dump_file);
+ });
+
+ if (loop_dump)
+ TIMEVAR (dump_time,
+ {
+ fprintf (loop_dump_file, "\n;; Function %s\n\n",
+ IDENTIFIER_POINTER (DECL_NAME (decl)));
+ });
+
+ /* Move constant computations out of loops. */
+
+ if (optimize > 0)
+ {
+ TIMEVAR (loop_time,
+ {
+ loop_optimize (insns, loop_dump_file);
+ });
+ }
+
+ /* Dump rtl code after loop opt, if we are doing that. */
+
+ if (loop_dump)
+ TIMEVAR (dump_time,
+ {
+ print_rtl (loop_dump_file, insns);
+ fflush (loop_dump_file);
+ });
+
+ if (cse2_dump)
+ TIMEVAR (dump_time,
+ {
+ fprintf (cse2_dump_file, "\n;; Function %s\n\n",
+ IDENTIFIER_POINTER (DECL_NAME (decl)));
+ });
+
+ if (optimize > 0 && flag_rerun_cse_after_loop)
+ {
+ /* Running another jump optimization pass before the second
+ cse pass sometimes simplifies the RTL enough to allow
+ the second CSE pass to do a better job. Jump_optimize can change
+ max_reg_num so we must rerun reg_scan afterwards.
+ ??? Rework to not call reg_scan so often. */
+ TIMEVAR (jump_time, reg_scan (insns, max_reg_num (), 0));
+ TIMEVAR (jump_time, jump_optimize (insns, 0, 0, 1));
+
+ TIMEVAR (cse2_time, reg_scan (insns, max_reg_num (), 0));
+ TIMEVAR (cse2_time, tem = cse_main (insns, max_reg_num (),
+ 1, cse2_dump_file));
+ if (tem)
+ TIMEVAR (jump_time, jump_optimize (insns, 0, 0, 0));
+ }
+
+ if (optimize > 0 && flag_thread_jumps)
+ /* This pass of jump threading straightens out code
+ that was kinked by loop optimization. */
+ TIMEVAR (jump_time, thread_jumps (insns, max_reg_num (), 0));
+
+ /* Dump rtl code after cse, if we are doing that. */
+
+ if (cse2_dump)
+ TIMEVAR (dump_time,
+ {
+ print_rtl (cse2_dump_file, insns);
+ fflush (cse2_dump_file);
+ });
+
+ /* We are no longer anticipating cse in this function, at least. */
+
+ cse_not_expected = 1;
+
+ /* Now we choose between stupid (pcc-like) register allocation
+ (if we got the -noreg switch and not -opt)
+ and smart register allocation. */
+
+ if (optimize > 0) /* Stupid allocation probably won't work */
+ obey_regdecls = 0; /* if optimizations being done. */
+
+ regclass_init ();
+
+ /* Print function header into flow dump now
+ because doing the flow analysis makes some of the dump. */
+
+ if (flow_dump)
+ TIMEVAR (dump_time,
+ {
+ fprintf (flow_dump_file, "\n;; Function %s\n\n",
+ IDENTIFIER_POINTER (DECL_NAME (decl)));
+ });
+
+ if (obey_regdecls)
+ {
+ TIMEVAR (flow_time,
+ {
+ regclass (insns, max_reg_num ());
+ stupid_life_analysis (insns, max_reg_num (),
+ flow_dump_file);
+ });
+ }
+ else
+ {
+ /* Do control and data flow analysis,
+ and write some of the results to dump file. */
+
+ TIMEVAR (flow_time, flow_analysis (insns, max_reg_num (),
+ flow_dump_file));
+ if (warn_uninitialized)
+ {
+ uninitialized_vars_warning (DECL_INITIAL (decl));
+ setjmp_args_warning ();
+ }
+ }
+
+ /* Dump rtl after flow analysis. */
+
+ if (flow_dump)
+ TIMEVAR (dump_time,
+ {
+ print_rtl (flow_dump_file, insns);
+ fflush (flow_dump_file);
+ });
+
+ /* If -opt, try combining insns through substitution. */
+
+ if (optimize > 0)
+ TIMEVAR (combine_time, combine_instructions (insns, max_reg_num ()));
+
+ /* Dump rtl code after insn combination. */
+
+ if (combine_dump)
+ TIMEVAR (dump_time,
+ {
+ fprintf (combine_dump_file, "\n;; Function %s\n\n",
+ IDENTIFIER_POINTER (DECL_NAME (decl)));
+ dump_combine_stats (combine_dump_file);
+ print_rtl (combine_dump_file, insns);
+ fflush (combine_dump_file);
+ });
+
+ /* Print function header into sched dump now
+ because doing the sched analysis makes some of the dump. */
+
+ if (sched_dump)
+ TIMEVAR (dump_time,
+ {
+ fprintf (sched_dump_file, "\n;; Function %s\n\n",
+ IDENTIFIER_POINTER (DECL_NAME (decl)));
+ });
+
+ if (optimize > 0 && flag_schedule_insns)
+ {
+ /* Do control and data sched analysis,
+ and write some of the results to dump file. */
+
+ TIMEVAR (sched_time, schedule_insns (sched_dump_file));
+ }
+
+ /* Dump rtl after instruction scheduling. */
+
+ if (sched_dump)
+ TIMEVAR (dump_time,
+ {
+ print_rtl (sched_dump_file, insns);
+ fflush (sched_dump_file);
+ });
+
+ /* Unless we did stupid register allocation,
+ allocate pseudo-regs that are used only within 1 basic block. */
+
+ if (!obey_regdecls)
+ TIMEVAR (local_alloc_time,
+ {
+ regclass (insns, max_reg_num ());
+ local_alloc ();
+ });
+
+ /* Dump rtl code after allocating regs within basic blocks. */
+
+ if (local_reg_dump)
+ TIMEVAR (dump_time,
+ {
+ fprintf (local_reg_dump_file, "\n;; Function %s\n\n",
+ IDENTIFIER_POINTER (DECL_NAME (decl)));
+ dump_flow_info (local_reg_dump_file);
+ dump_local_alloc (local_reg_dump_file);
+ print_rtl (local_reg_dump_file, insns);
+ fflush (local_reg_dump_file);
+ });
+
+ if (global_reg_dump)
+ TIMEVAR (dump_time,
+ fprintf (global_reg_dump_file, "\n;; Function %s\n\n",
+ IDENTIFIER_POINTER (DECL_NAME (decl))));
+
+ /* Unless we did stupid register allocation,
+ allocate remaining pseudo-regs, then do the reload pass
+ fixing up any insns that are invalid. */
+
+ TIMEVAR (global_alloc_time,
+ {
+ if (!obey_regdecls)
+ failure = global_alloc (global_reg_dump_file);
+ else
+ failure = reload (insns, 0, global_reg_dump_file);
+ });
+
+ if (global_reg_dump)
+ TIMEVAR (dump_time,
+ {
+ dump_global_regs (global_reg_dump_file);
+ print_rtl (global_reg_dump_file, insns);
+ fflush (global_reg_dump_file);
+ });
+
+ if (failure)
+ goto exit_rest_of_compilation;
+
+ reload_completed = 1;
+
+ /* On some machines, the prologue and epilogue code, or parts thereof,
+ can be represented as RTL. Doing so lets us schedule insns between
+ it and the rest of the code and also allows delayed branch
+ scheduling to operate in the epilogue. */
+
+ thread_prologue_and_epilogue_insns (insns);
+
+ if (optimize > 0 && flag_schedule_insns_after_reload)
+ {
+ if (sched2_dump)
+ TIMEVAR (dump_time,
+ {
+ fprintf (sched2_dump_file, "\n;; Function %s\n\n",
+ IDENTIFIER_POINTER (DECL_NAME (decl)));
+ });
+
+ /* Do control and data sched analysis again,
+ and write some more of the results to dump file. */
+
+ TIMEVAR (sched2_time, schedule_insns (sched2_dump_file));
+
+ /* Dump rtl after post-reorder instruction scheduling. */
+
+ if (sched2_dump)
+ TIMEVAR (dump_time,
+ {
+ print_rtl (sched2_dump_file, insns);
+ fflush (sched2_dump_file);
+ });
+ }
+
+#ifdef LEAF_REGISTERS
+ leaf_function = 0;
+ if (optimize > 0 && only_leaf_regs_used () && leaf_function_p ())
+ leaf_function = 1;
+#endif
+
+ /* One more attempt to remove jumps to .+1
+ left by dead-store-elimination.
+ Also do cross-jumping this time
+ and delete no-op move insns. */
+
+ if (optimize > 0)
+ {
+ TIMEVAR (jump_time, jump_optimize (insns, 1, 1, 0));
+ }
+
+ /* Dump rtl code after jump, if we are doing that. */
+
+ if (jump2_opt_dump)
+ TIMEVAR (dump_time,
+ {
+ fprintf (jump2_opt_dump_file, "\n;; Function %s\n\n",
+ IDENTIFIER_POINTER (DECL_NAME (decl)));
+ print_rtl (jump2_opt_dump_file, insns);
+ fflush (jump2_opt_dump_file);
+ });
+
+ /* If a machine dependent reorganization is needed, call it. */
+#ifdef MACHINE_DEPENDENT_REORG
+ MACHINE_DEPENDENT_REORG (insns);
+#endif
+
+ /* If a scheduling pass for delayed branches is to be done,
+ call the scheduling code. */
+
+#ifdef DELAY_SLOTS
+ if (optimize > 0 && flag_delayed_branch)
+ {
+ TIMEVAR (dbr_sched_time, dbr_schedule (insns, dbr_sched_dump_file));
+ if (dbr_sched_dump)
+ {
+ TIMEVAR (dump_time,
+ {
+ fprintf (dbr_sched_dump_file, "\n;; Function %s\n\n",
+ IDENTIFIER_POINTER (DECL_NAME (decl)));
+ print_rtl (dbr_sched_dump_file, insns);
+ fflush (dbr_sched_dump_file);
+ });
+ }
+ }
+#endif
+
+ if (optimize > 0)
+ /* Shorten branches. */
+ TIMEVAR (shorten_branch_time,
+ {
+ shorten_branches (get_insns ());
+ });
+
+#ifdef STACK_REGS
+ TIMEVAR (stack_reg_time, reg_to_stack (insns, stack_reg_dump_file));
+ if (stack_reg_dump)
+ {
+ TIMEVAR (dump_time,
+ {
+ fprintf (stack_reg_dump_file, "\n;; Function %s\n\n",
+ IDENTIFIER_POINTER (DECL_NAME (decl)));
+ print_rtl (stack_reg_dump_file, insns);
+ fflush (stack_reg_dump_file);
+ });
+ }
+#endif
+
+ /* Now turn the rtl into assembler code. */
+
+ TIMEVAR (final_time,
+ {
+ rtx x;
+ char *fnname;
+
+ /* Get the function's name, as described by its RTL.
+ This may be different from the DECL_NAME name used
+ in the source file. */
+
+ x = DECL_RTL (decl);
+ if (GET_CODE (x) != MEM)
+ abort ();
+ x = XEXP (x, 0);
+ if (GET_CODE (x) != SYMBOL_REF)
+ abort ();
+ fnname = XSTR (x, 0);
+
+ assemble_start_function (decl, fnname);
+ final_start_function (insns, asm_out_file, optimize);
+ final (insns, asm_out_file, optimize, 0);
+ final_end_function (insns, asm_out_file, optimize);
+ assemble_end_function (decl, fnname);
+ fflush (asm_out_file);
+ });
+
+ /* Write DBX symbols if requested */
+
+ /* Note that for those inline functions where we don't initially
+ know for certain that we will be generating an out-of-line copy,
+ the first invocation of this routine (rest_of_compilation) will
+ skip over this code by doing a `goto exit_rest_of_compilation;'.
+ Later on, finish_compilation will call rest_of_compilation again
+ for those inline functions that need to have out-of-line copies
+ generated. During that call, we *will* be routed past here. */
+
+#ifdef DBX_DEBUGGING_INFO
+ if (write_symbols == DBX_DEBUG)
+ TIMEVAR (symout_time, dbxout_function (decl));
+#endif
+
+#ifdef DWARF_DEBUGGING_INFO
+ if (write_symbols == DWARF_DEBUG)
+ TIMEVAR (symout_time, dwarfout_file_scope_decl (decl, 0));
+#endif
+
+ exit_rest_of_compilation:
+
+ /* In case the function was not output,
+ don't leave any temporary anonymous types
+ queued up for sdb output. */
+#ifdef SDB_DEBUGGING_INFO
+ if (write_symbols == SDB_DEBUG)
+ sdbout_types (NULL_TREE);
+#endif
+
+ /* Put back the tree of subblocks and list of arguments
+ from before we copied them.
+ Code generation and the output of debugging info may have modified
+ the copy, but the original is unchanged. */
+
+ if (saved_block_tree != 0)
+ DECL_INITIAL (decl) = saved_block_tree;
+ if (saved_arguments != 0)
+ DECL_ARGUMENTS (decl) = saved_arguments;
+
+ reload_completed = 0;
+
+ /* Clear out the real_constant_chain before some of the rtx's
+ it runs through become garbage. */
+
+ clear_const_double_mem ();
+
+ /* Cancel the effect of rtl_in_current_obstack. */
+
+ resume_temporary_allocation ();
+
+ /* The parsing time is all the time spent in yyparse
+ *except* what is spent in this function. */
+
+ parse_time -= get_run_time () - start_time;
+}
+
+/* Entry point of cc1/c++. Decode command args, then call compile_file.
+ Exit code is 35 if can't open files, 34 if fatal error,
+ 33 if had nonfatal errors, else success. */
+
+int
+main (argc, argv, envp)
+ int argc;
+ char **argv;
+ char **envp;
+{
+ register int i;
+ char *filename = 0;
+ int flag_print_mem = 0;
+ int version_flag = 0;
+ char *p;
+
+ /* save in case md file wants to emit args as a comment. */
+ save_argc = argc;
+ save_argv = argv;
+
+ p = argv[0] + strlen (argv[0]);
+ while (p != argv[0] && p[-1] != '/') --p;
+ progname = p;
+
+#ifdef RLIMIT_STACK
+ /* Get rid of any avoidable limit on stack size. */
+ {
+ struct rlimit rlim;
+
+ /* Set the stack limit huge so that alloca does not fail. */
+ getrlimit (RLIMIT_STACK, &rlim);
+ rlim.rlim_cur = rlim.rlim_max;
+ setrlimit (RLIMIT_STACK, &rlim);
+ }
+#endif /* RLIMIT_STACK */
+
+ signal (SIGFPE, float_signal);
+
+#ifdef SIGPIPE
+ signal (SIGPIPE, pipe_closed);
+#endif
+
+ decl_printable_name = decl_name;
+ lang_expand_expr = (struct rtx_def *(*)()) do_abort;
+ interim_eh_hook = interim_eh;
+
+ /* Initialize whether `char' is signed. */
+ flag_signed_char = DEFAULT_SIGNED_CHAR;
+#ifdef DEFAULT_SHORT_ENUMS
+ /* Initialize how much space enums occupy, by default. */
+ flag_short_enums = DEFAULT_SHORT_ENUMS;
+#endif
+
+ /* Scan to see what optimization level has been specified. That will
+ determine the default value of many flags. */
+ for (i = 1; i < argc; i++)
+ {
+ if (!strcmp (argv[i], "-O"))
+ {
+ optimize = 1;
+ }
+ else if (argv[i][0] == '-' && argv[i][1] == 'O')
+ {
+ /* Handle -O2, -O3, -O69, ... */
+ char *p = &argv[i][2];
+ int c;
+
+ while (c = *p++)
+ if (! (c >= '0' && c <= '9'))
+ break;
+ if (c == 0)
+ optimize = atoi (&argv[i][2]);
+ }
+ }
+
+ obey_regdecls = (optimize == 0);
+ if (optimize == 0)
+ {
+ flag_no_inline = 1;
+ warn_inline = 0;
+ }
+
+ if (optimize >= 1)
+ {
+ flag_defer_pop = 1;
+ flag_thread_jumps = 1;
+#ifdef DELAY_SLOTS
+ flag_delayed_branch = 1;
+#endif
+#ifdef CAN_DEBUG_WITHOUT_FP
+ flag_omit_frame_pointer = 1;
+#endif
+ }
+
+ if (optimize >= 2)
+ {
+ flag_cse_follow_jumps = 1;
+ flag_cse_skip_blocks = 1;
+ flag_expensive_optimizations = 1;
+ flag_strength_reduce = 1;
+ flag_rerun_cse_after_loop = 1;
+ flag_caller_saves = 1;
+#ifdef INSN_SCHEDULING
+ flag_schedule_insns = 1;
+ flag_schedule_insns_after_reload = 1;
+#endif
+ }
+
+ if (optimize >= 3)
+ {
+ flag_inline_functions = 1;
+ }
+
+#ifdef OPTIMIZATION_OPTIONS
+ /* Allow default optimizations to be specified on a per-machine basis. */
+ OPTIMIZATION_OPTIONS (optimize);
+#endif
+
+ /* Initialize register usage now so switches may override. */
+ init_reg_sets ();
+
+ target_flags = 0;
+ set_target_switch ("");
+
+ for (i = 1; i < argc; i++)
+ {
+ int j;
+ /* If this is a language-specific option,
+ decode it in a language-specific way. */
+ for (j = 0; lang_options[j] != 0; j++)
+ if (!strncmp (argv[i], lang_options[j],
+ strlen (lang_options[j])))
+ break;
+ if (lang_options[j] != 0)
+ /* If the option is valid for *some* language,
+ treat it as valid even if this language doesn't understand it. */
+ lang_decode_option (argv[i]);
+ else if (argv[i][0] == '-' && argv[i][1] != 0)
+ {
+ register char *str = argv[i] + 1;
+ if (str[0] == 'Y')
+ str++;
+
+ if (str[0] == 'm')
+ set_target_switch (&str[1]);
+ else if (!strcmp (str, "dumpbase"))
+ {
+ dump_base_name = argv[++i];
+ }
+ else if (str[0] == 'd')
+ {
+ register char *p = &str[1];
+ while (*p)
+ switch (*p++)
+ {
+ case 'a':
+ combine_dump = 1;
+ dbr_sched_dump = 1;
+ flow_dump = 1;
+ global_reg_dump = 1;
+ jump_opt_dump = 1;
+ jump2_opt_dump = 1;
+ local_reg_dump = 1;
+ loop_dump = 1;
+ rtl_dump = 1;
+ cse_dump = 1, cse2_dump = 1;
+ sched_dump = 1;
+ sched2_dump = 1;
+ stack_reg_dump = 1;
+ break;
+ case 'k':
+ stack_reg_dump = 1;
+ break;
+ case 'c':
+ combine_dump = 1;
+ break;
+ case 'd':
+ dbr_sched_dump = 1;
+ break;
+ case 'f':
+ flow_dump = 1;
+ break;
+ case 'g':
+ global_reg_dump = 1;
+ break;
+ case 'j':
+ jump_opt_dump = 1;
+ break;
+ case 'J':
+ jump2_opt_dump = 1;
+ break;
+ case 'l':
+ local_reg_dump = 1;
+ break;
+ case 'L':
+ loop_dump = 1;
+ break;
+ case 'm':
+ flag_print_mem = 1;
+ break;
+ case 'p':
+ flag_print_asm_name = 1;
+ break;
+ case 'r':
+ rtl_dump = 1;
+ break;
+ case 's':
+ cse_dump = 1;
+ break;
+ case 't':
+ cse2_dump = 1;
+ break;
+ case 'S':
+ sched_dump = 1;
+ break;
+ case 'R':
+ sched2_dump = 1;
+ break;
+ case 'y':
+ set_yydebug (1);
+ break;
+
+ case 'x':
+ rtl_dump_and_exit = 1;
+ break;
+ }
+ }
+ else if (str[0] == 'f')
+ {
+ register char *p = &str[1];
+ int found = 0;
+
+ /* Some kind of -f option.
+ P's value is the option sans `-f'.
+ Search for it in the table of options. */
+
+ for (j = 0;
+ !found && j < sizeof (f_options) / sizeof (f_options[0]);
+ j++)
+ {
+ if (!strcmp (p, f_options[j].string))
+ {
+ *f_options[j].variable = f_options[j].on_value;
+ /* A goto here would be cleaner,
+ but breaks the vax pcc. */
+ found = 1;
+ }
+ if (p[0] == 'n' && p[1] == 'o' && p[2] == '-'
+ && ! strcmp (p+3, f_options[j].string))
+ {
+ *f_options[j].variable = ! f_options[j].on_value;
+ found = 1;
+ }
+ }
+
+ if (found)
+ ;
+ else if (!strncmp (p, "fixed-", 6))
+ fix_register (&p[6], 1, 1);
+ else if (!strncmp (p, "call-used-", 10))
+ fix_register (&p[10], 0, 1);
+ else if (!strncmp (p, "call-saved-", 11))
+ fix_register (&p[11], 0, 0);
+ else
+ error ("Invalid option `%s'", argv[i]);
+ }
+ else if (str[0] == 'O')
+ {
+ register char *p = str+1;
+ while (*p && *p >= '0' && *p <= '9')
+ p++;
+ if (*p == '\0')
+ ;
+ else
+ error ("Invalid option `%s'", argv[i]);
+ }
+ else if (!strcmp (str, "pedantic"))
+ pedantic = 1;
+ else if (!strcmp (str, "pedantic-errors"))
+ flag_pedantic_errors = pedantic = 1;
+ else if (!strcmp (str, "quiet"))
+ quiet_flag = 1;
+ else if (!strcmp (str, "version"))
+ version_flag = 1;
+ else if (!strcmp (str, "w"))
+ inhibit_warnings = 1;
+ else if (!strcmp (str, "W"))
+ {
+ extra_warnings = 1;
+ /* We save the value of warn_uninitialized, since if they put
+ -Wuninitialized on the command line, we need to generate a
+ warning about not using it without also specifying -O. */
+ if (warn_uninitialized != 1)
+ warn_uninitialized = 2;
+ }
+ else if (str[0] == 'W')
+ {
+ register char *p = &str[1];
+ int found = 0;
+
+ /* Some kind of -W option.
+ P's value is the option sans `-W'.
+ Search for it in the table of options. */
+
+ for (j = 0;
+ !found && j < sizeof (W_options) / sizeof (W_options[0]);
+ j++)
+ {
+ if (!strcmp (p, W_options[j].string))
+ {
+ *W_options[j].variable = W_options[j].on_value;
+ /* A goto here would be cleaner,
+ but breaks the vax pcc. */
+ found = 1;
+ }
+ if (p[0] == 'n' && p[1] == 'o' && p[2] == '-'
+ && ! strcmp (p+3, W_options[j].string))
+ {
+ *W_options[j].variable = ! W_options[j].on_value;
+ found = 1;
+ }
+ }
+
+ if (found)
+ ;
+ else if (!strncmp (p, "id-clash-", 9))
+ {
+ char *endp = p + 9;
+
+ while (*endp)
+ {
+ if (*endp >= '0' && *endp <= '9')
+ endp++;
+ else
+ {
+ error ("Invalid option `%s'", argv[i]);
+ goto id_clash_lose;
+ }
+ }
+ warn_id_clash = 1;
+ id_clash_len = atoi (str + 10);
+ id_clash_lose: ;
+ }
+ else if (!strncmp (p, "larger-than-", 12))
+ {
+ char *endp = p + 12;
+
+ while (*endp)
+ {
+ if (*endp >= '0' && *endp <= '9')
+ endp++;
+ else
+ {
+ error ("Invalid option `%s'", argv[i]);
+ goto larger_than_lose;
+ }
+ }
+ warn_larger_than = 1;
+ larger_than_size = atoi (str + 13);
+ larger_than_lose: ;
+ }
+ else
+ error ("Invalid option `%s'", argv[i]);
+ }
+ else if (!strcmp (str, "p"))
+ {
+ if (!output_bytecode)
+ profile_flag = 1;
+ else
+ error ("profiling not supported in bytecode compilation");
+ }
+ else if (!strcmp (str, "a"))
+ {
+#if !defined (BLOCK_PROFILER) || !defined (FUNCTION_BLOCK_PROFILER)
+ warning ("`-a' option (basic block profile) not supported");
+#else
+ profile_block_flag = 1;
+#endif
+ }
+ else if (str[0] == 'g')
+ {
+ char *p = str + 1;
+ char *q;
+ unsigned len;
+ unsigned level;
+
+ while (*p && (*p < '0' || *p > '9'))
+ p++;
+ len = p - str;
+ q = p;
+ while (*q && (*q >= '0' && *q <= '9'))
+ q++;
+ if (*p)
+ level = atoi (p);
+ else
+ level = 2; /* default debugging info level */
+ if (*q || level > 3)
+ {
+ warning ("invalid debug level specification in option: `-%s'",
+ str);
+ warning ("no debugging information will be generated");
+ level = 0;
+ }
+
+ /* If more than one debugging type is supported,
+ you must define PREFERRED_DEBUGGING_TYPE
+ to choose a format in a system-dependent way. */
+ /* This is one long line cause VAXC can't handle a \-newline. */
+#if 1 < (defined (DBX_DEBUGGING_INFO) + defined (SDB_DEBUGGING_INFO) + defined (DWARF_DEBUGGING_INFO) + defined (XCOFF_DEBUGGING_INFO))
+#ifdef PREFERRED_DEBUGGING_TYPE
+ if (!strncmp (str, "ggdb", len))
+ write_symbols = PREFERRED_DEBUGGING_TYPE;
+#else /* no PREFERRED_DEBUGGING_TYPE */
+You Lose! You must define PREFERRED_DEBUGGING_TYPE!
+#endif /* no PREFERRED_DEBUGGING_TYPE */
+#endif /* More than one debugger format enabled. */
+#ifdef DBX_DEBUGGING_INFO
+ if (write_symbols != NO_DEBUG)
+ ;
+ else if (!strncmp (str, "ggdb", len))
+ write_symbols = DBX_DEBUG;
+ else if (!strncmp (str, "gstabs", len))
+ write_symbols = DBX_DEBUG;
+ else if (!strncmp (str, "gstabs+", len))
+ write_symbols = DBX_DEBUG;
+
+ /* Always enable extensions for -ggdb or -gstabs+,
+ always disable for -gstabs.
+ For plain -g, use system-specific default. */
+ if (write_symbols == DBX_DEBUG && !strncmp (str, "ggdb", len)
+ && len >= 2)
+ use_gnu_debug_info_extensions = 1;
+ else if (write_symbols == DBX_DEBUG && !strncmp (str, "gstabs+", len)
+ && len >= 7)
+ use_gnu_debug_info_extensions = 1;
+ else if (write_symbols == DBX_DEBUG
+ && !strncmp (str, "gstabs", len) && len >= 2)
+ use_gnu_debug_info_extensions = 0;
+ else
+ use_gnu_debug_info_extensions = DEFAULT_GDB_EXTENSIONS;
+#endif /* DBX_DEBUGGING_INFO */
+#ifdef DWARF_DEBUGGING_INFO
+ if (write_symbols != NO_DEBUG)
+ ;
+ else if (!strncmp (str, "g", len))
+ write_symbols = DWARF_DEBUG;
+ else if (!strncmp (str, "ggdb", len))
+ write_symbols = DWARF_DEBUG;
+ else if (!strncmp (str, "gdwarf", len))
+ write_symbols = DWARF_DEBUG;
+
+ /* Always enable extensions for -ggdb or -gdwarf+,
+ always disable for -gdwarf.
+ For plain -g, use system-specific default. */
+ if (write_symbols == DWARF_DEBUG && !strncmp (str, "ggdb", len)
+ && len >= 2)
+ use_gnu_debug_info_extensions = 1;
+ else if (write_symbols == DWARF_DEBUG && !strcmp (str, "gdwarf+"))
+ use_gnu_debug_info_extensions = 1;
+ else if (write_symbols == DWARF_DEBUG
+ && !strncmp (str, "gdwarf", len) && len >= 2)
+ use_gnu_debug_info_extensions = 0;
+ else
+ use_gnu_debug_info_extensions = DEFAULT_GDB_EXTENSIONS;
+#endif
+#ifdef SDB_DEBUGGING_INFO
+ if (write_symbols != NO_DEBUG)
+ ;
+ else if (!strncmp (str, "g", len))
+ write_symbols = SDB_DEBUG;
+ else if (!strncmp (str, "gdb", len))
+ write_symbols = SDB_DEBUG;
+ else if (!strncmp (str, "gcoff", len))
+ write_symbols = SDB_DEBUG;
+#endif /* SDB_DEBUGGING_INFO */
+#ifdef XCOFF_DEBUGGING_INFO
+ if (write_symbols != NO_DEBUG)
+ ;
+ else if (!strncmp (str, "g", len))
+ write_symbols = XCOFF_DEBUG;
+ else if (!strncmp (str, "ggdb", len))
+ write_symbols = XCOFF_DEBUG;
+ else if (!strncmp (str, "gxcoff", len))
+ write_symbols = XCOFF_DEBUG;
+
+ /* Always enable extensions for -ggdb or -gxcoff+,
+ always disable for -gxcoff.
+ For plain -g, use system-specific default. */
+ if (write_symbols == XCOFF_DEBUG && !strncmp (str, "ggdb", len)
+ && len >= 2)
+ use_gnu_debug_info_extensions = 1;
+ else if (write_symbols == XCOFF_DEBUG && !strcmp (str, "gxcoff+"))
+ use_gnu_debug_info_extensions = 1;
+ else if (write_symbols == XCOFF_DEBUG
+ && !strncmp (str, "gxcoff", len) && len >= 2)
+ use_gnu_debug_info_extensions = 0;
+ else
+ use_gnu_debug_info_extensions = DEFAULT_GDB_EXTENSIONS;
+#endif
+ if (write_symbols == NO_DEBUG)
+ warning ("`-%s' option not supported on this version of GCC", str);
+ else if (level == 0)
+ write_symbols = NO_DEBUG;
+ else
+ debug_info_level = (enum debug_info_level) level;
+ }
+ else if (!strcmp (str, "o"))
+ {
+ asm_file_name = argv[++i];
+ }
+ else if (str[0] == 'G')
+ {
+ g_switch_set = TRUE;
+ g_switch_value = atoi ((str[1] != '\0') ? str+1 : argv[++i]);
+ }
+ else if (!strncmp (str, "aux-info", 8))
+ {
+ flag_gen_aux_info = 1;
+ aux_info_file_name = (str[8] != '\0' ? str+8 : argv[++i]);
+ }
+ else
+ error ("Invalid option `%s'", argv[i]);
+ }
+ else if (argv[i][0] == '+')
+ error ("Invalid option `%s'", argv[i]);
+ else
+ filename = argv[i];
+ }
+
+ /* Initialize for bytecode output. A good idea to do this as soon as
+ possible after the "-f" options have been parsed. */
+ if (output_bytecode)
+ {
+#ifndef TARGET_SUPPORTS_BYTECODE
+ /* Just die with a fatal error if not supported */
+ fatal ("-fbytecode not supporter for this target");
+#else
+ bc_initialize ();
+#endif
+ }
+
+ if (optimize == 0)
+ {
+ /* Inlining does not work if not optimizing,
+ so force it not to be done. */
+ flag_no_inline = 1;
+ warn_inline = 0;
+
+ /* The c_decode_option and lang_decode_option functions set
+ this to `2' if -Wall is used, so we can avoid giving out
+ lots of errors for people who don't realize what -Wall does. */
+ if (warn_uninitialized == 1)
+ warning ("-Wuninitialized is not supported without -O");
+ }
+
+#if defined(DWARF_DEBUGGING_INFO)
+ if (write_symbols == DWARF_DEBUG
+ && strcmp (language_string, "GNU C++") == 0)
+ {
+ warning ("-g option not supported for C++ on SVR4 systems");
+ write_symbols = NO_DEBUG;
+ }
+#endif /* defined(DWARF_DEBUGGING_INFO) */
+
+#ifdef OVERRIDE_OPTIONS
+ /* Some machines may reject certain combinations of options. */
+ OVERRIDE_OPTIONS;
+#endif
+
+ /* Unrolling all loops implies that standard loop unrolling must also
+ be done. */
+ if (flag_unroll_all_loops)
+ flag_unroll_loops = 1;
+ /* Loop unrolling requires that strength_reduction be on also. Silently
+ turn on strength reduction here if it isn't already on. Also, the loop
+ unrolling code assumes that cse will be run after loop, so that must
+ be turned on also. */
+ if (flag_unroll_loops)
+ {
+ flag_strength_reduce = 1;
+ flag_rerun_cse_after_loop = 1;
+ }
+
+ /* Warn about options that are not supported on this machine. */
+#ifndef INSN_SCHEDULING
+ if (flag_schedule_insns || flag_schedule_insns_after_reload)
+ warning ("instruction scheduling not supported on this target machine");
+#endif
+#ifndef DELAY_SLOTS
+ if (flag_delayed_branch)
+ warning ("this target machine does not have delayed branches");
+#endif
+
+ /* If we are in verbose mode, write out the version and maybe all the
+ option flags in use. */
+ if (version_flag)
+ {
+ fprintf (stderr, "%s version %s", language_string, version_string);
+#ifdef TARGET_VERSION
+ TARGET_VERSION;
+#endif
+#ifdef __GNUC__
+#ifndef __VERSION__
+#define __VERSION__ "[unknown]"
+#endif
+ fprintf (stderr, " compiled by GNU C version %s.\n", __VERSION__);
+#else
+ fprintf (stderr, " compiled by CC.\n");
+#endif
+ if (! quiet_flag)
+ print_switch_values ();
+ }
+
+ compile_file (filename);
+
+#ifndef OS2
+#ifndef VMS
+ if (flag_print_mem)
+ {
+#ifdef __alpha
+ char *sbrk ();
+#endif
+ char *lim = (char *) sbrk (0);
+
+ fprintf (stderr, "Data size %d.\n",
+ lim - (char *) &environ);
+ fflush (stderr);
+
+#ifdef USG
+ system ("ps -l 1>&2");
+#else /* not USG */
+ system ("ps v");
+#endif /* not USG */
+ }
+#endif /* not VMS */
+#endif /* not OS2 */
+
+ if (errorcount)
+ exit (FATAL_EXIT_CODE);
+ if (sorrycount)
+ exit (FATAL_EXIT_CODE);
+ exit (SUCCESS_EXIT_CODE);
+ return 34;
+}
+
+/* Decode -m switches. */
+
+/* Here is a table, controlled by the tm.h file, listing each -m switch
+ and which bits in `target_switches' it should set or clear.
+ If VALUE is positive, it is bits to set.
+ If VALUE is negative, -VALUE is bits to clear.
+ (The sign bit is not used so there is no confusion.) */
+
+struct {char *name; int value;} target_switches []
+ = TARGET_SWITCHES;
+
+/* This table is similar, but allows the switch to have a value. */
+
+#ifdef TARGET_OPTIONS
+struct {char *prefix; char ** variable;} target_options []
+ = TARGET_OPTIONS;
+#endif
+
+/* Decode the switch -mNAME. */
+
+void
+set_target_switch (name)
+ char *name;
+{
+ register int j;
+ int valid = 0;
+
+ for (j = 0; j < sizeof target_switches / sizeof target_switches[0]; j++)
+ if (!strcmp (target_switches[j].name, name))
+ {
+ if (target_switches[j].value < 0)
+ target_flags &= ~-target_switches[j].value;
+ else
+ target_flags |= target_switches[j].value;
+ valid = 1;
+ }
+
+#ifdef TARGET_OPTIONS
+ if (!valid)
+ for (j = 0; j < sizeof target_options / sizeof target_options[0]; j++)
+ {
+ int len = strlen (target_options[j].prefix);
+ if (!strncmp (target_options[j].prefix, name, len))
+ {
+ *target_options[j].variable = name + len;
+ valid = 1;
+ }
+ }
+#endif
+
+ if (!valid)
+ error ("Invalid option `%s'", name);
+}
+
+/* Variable used for communication between the following two routines. */
+
+static int line_position;
+
+/* Print an option value and adjust the position in the line. */
+
+static void
+print_single_switch (type, name)
+ char *type, *name;
+{
+ fprintf (stderr, " %s%s", type, name);
+
+ line_position += strlen (type) + strlen (name) + 1;
+
+ if (line_position > 65)
+ {
+ fprintf (stderr, "\n\t");
+ line_position = 8;
+ }
+}
+
+/* Print default target switches for -version. */
+
+static void
+print_switch_values ()
+{
+ register int j;
+
+ fprintf (stderr, "enabled:");
+ line_position = 8;
+
+ for (j = 0; j < sizeof f_options / sizeof f_options[0]; j++)
+ if (*f_options[j].variable == f_options[j].on_value)
+ print_single_switch ("-f", f_options[j].string);
+
+ for (j = 0; j < sizeof W_options / sizeof W_options[0]; j++)
+ if (*W_options[j].variable == W_options[j].on_value)
+ print_single_switch ("-W", W_options[j].string);
+
+ for (j = 0; j < sizeof target_switches / sizeof target_switches[0]; j++)
+ if (target_switches[j].name[0] != '\0'
+ && target_switches[j].value > 0
+ && ((target_switches[j].value & target_flags)
+ == target_switches[j].value))
+ print_single_switch ("-m", target_switches[j].name);
+
+ fprintf (stderr, "\n");
+}
diff --git a/gnu/usr.bin/cc/cc_int/tree.c b/gnu/usr.bin/cc/cc_int/tree.c
new file mode 100644
index 0000000..e0aa0ae
--- /dev/null
+++ b/gnu/usr.bin/cc/cc_int/tree.c
@@ -0,0 +1,3996 @@
+/* Language-independent node constructors for parse phase of GNU compiler.
+ Copyright (C) 1987, 1988, 1992, 1993, 1994 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+/* This file contains the low level primitives for operating on tree nodes,
+ including allocation, list operations, interning of identifiers,
+ construction of data type nodes and statement nodes,
+ and construction of type conversion nodes. It also contains
+ tables index by tree code that describe how to take apart
+ nodes of that code.
+
+ It is intended to be language-independent, but occasionally
+ calls language-dependent routines defined (for C) in typecheck.c.
+
+ The low-level allocation routines oballoc and permalloc
+ are used also for allocating many other kinds of objects
+ by all passes of the compiler. */
+
+#include <setjmp.h>
+#include "config.h"
+#include "flags.h"
+#include "tree.h"
+#include "function.h"
+#include "obstack.h"
+#ifdef __STDC__
+#include <stdarg.h>
+#else
+#include <varargs.h>
+#endif
+#include <stdio.h>
+
+#define obstack_chunk_alloc xmalloc
+#define obstack_chunk_free free
+
+/* Tree nodes of permanent duration are allocated in this obstack.
+ They are the identifier nodes, and everything outside of
+ the bodies and parameters of function definitions. */
+
+struct obstack permanent_obstack;
+
+/* The initial RTL, and all ..._TYPE nodes, in a function
+ are allocated in this obstack. Usually they are freed at the
+ end of the function, but if the function is inline they are saved.
+ For top-level functions, this is maybepermanent_obstack.
+ Separate obstacks are made for nested functions. */
+
+struct obstack *function_maybepermanent_obstack;
+
+/* This is the function_maybepermanent_obstack for top-level functions. */
+
+struct obstack maybepermanent_obstack;
+
+/* The contents of the current function definition are allocated
+ in this obstack, and all are freed at the end of the function.
+ For top-level functions, this is temporary_obstack.
+ Separate obstacks are made for nested functions. */
+
+struct obstack *function_obstack;
+
+/* This is used for reading initializers of global variables. */
+
+struct obstack temporary_obstack;
+
+/* The tree nodes of an expression are allocated
+ in this obstack, and all are freed at the end of the expression. */
+
+struct obstack momentary_obstack;
+
+/* The tree nodes of a declarator are allocated
+ in this obstack, and all are freed when the declarator
+ has been parsed. */
+
+static struct obstack temp_decl_obstack;
+
+/* This points at either permanent_obstack
+ or the current function_maybepermanent_obstack. */
+
+struct obstack *saveable_obstack;
+
+/* This is same as saveable_obstack during parse and expansion phase;
+ it points to the current function's obstack during optimization.
+ This is the obstack to be used for creating rtl objects. */
+
+struct obstack *rtl_obstack;
+
+/* This points at either permanent_obstack or the current function_obstack. */
+
+struct obstack *current_obstack;
+
+/* This points at either permanent_obstack or the current function_obstack
+ or momentary_obstack. */
+
+struct obstack *expression_obstack;
+
+/* Stack of obstack selections for push_obstacks and pop_obstacks. */
+
+struct obstack_stack
+{
+ struct obstack_stack *next;
+ struct obstack *current;
+ struct obstack *saveable;
+ struct obstack *expression;
+ struct obstack *rtl;
+};
+
+struct obstack_stack *obstack_stack;
+
+/* Obstack for allocating struct obstack_stack entries. */
+
+static struct obstack obstack_stack_obstack;
+
+/* Addresses of first objects in some obstacks.
+ This is for freeing their entire contents. */
+char *maybepermanent_firstobj;
+char *temporary_firstobj;
+char *momentary_firstobj;
+char *temp_decl_firstobj;
+
+/* This is used to preserve objects (mainly array initializers) that need to
+ live until the end of the current function, but no further. */
+char *momentary_function_firstobj;
+
+/* Nonzero means all ..._TYPE nodes should be allocated permanently. */
+
+int all_types_permanent;
+
+/* Stack of places to restore the momentary obstack back to. */
+
+struct momentary_level
+{
+ /* Pointer back to previous such level. */
+ struct momentary_level *prev;
+ /* First object allocated within this level. */
+ char *base;
+ /* Value of expression_obstack saved at entry to this level. */
+ struct obstack *obstack;
+};
+
+struct momentary_level *momentary_stack;
+
+/* Table indexed by tree code giving a string containing a character
+ classifying the tree code. Possibilities are
+ t, d, s, c, r, <, 1, 2 and e. See tree.def for details. */
+
+#define DEFTREECODE(SYM, NAME, TYPE, LENGTH) TYPE,
+
+char *standard_tree_code_type[] = {
+#include "tree.def"
+};
+#undef DEFTREECODE
+
+/* Table indexed by tree code giving number of expression
+ operands beyond the fixed part of the node structure.
+ Not used for types or decls. */
+
+#define DEFTREECODE(SYM, NAME, TYPE, LENGTH) LENGTH,
+
+int standard_tree_code_length[] = {
+#include "tree.def"
+};
+#undef DEFTREECODE
+
+/* Names of tree components.
+ Used for printing out the tree and error messages. */
+#define DEFTREECODE(SYM, NAME, TYPE, LEN) NAME,
+
+char *standard_tree_code_name[] = {
+#include "tree.def"
+};
+#undef DEFTREECODE
+
+/* Table indexed by tree code giving a string containing a character
+ classifying the tree code. Possibilities are
+ t, d, s, c, r, e, <, 1 and 2. See tree.def for details. */
+
+char **tree_code_type;
+
+/* Table indexed by tree code giving number of expression
+ operands beyond the fixed part of the node structure.
+ Not used for types or decls. */
+
+int *tree_code_length;
+
+/* Table indexed by tree code giving name of tree code, as a string. */
+
+char **tree_code_name;
+
+/* Statistics-gathering stuff. */
+typedef enum
+{
+ d_kind,
+ t_kind,
+ b_kind,
+ s_kind,
+ r_kind,
+ e_kind,
+ c_kind,
+ id_kind,
+ op_id_kind,
+ perm_list_kind,
+ temp_list_kind,
+ vec_kind,
+ x_kind,
+ lang_decl,
+ lang_type,
+ all_kinds
+} tree_node_kind;
+
+int tree_node_counts[(int)all_kinds];
+int tree_node_sizes[(int)all_kinds];
+int id_string_size = 0;
+
+char *tree_node_kind_names[] = {
+ "decls",
+ "types",
+ "blocks",
+ "stmts",
+ "refs",
+ "exprs",
+ "constants",
+ "identifiers",
+ "op_identifiers",
+ "perm_tree_lists",
+ "temp_tree_lists",
+ "vecs",
+ "random kinds",
+ "lang_decl kinds",
+ "lang_type kinds"
+};
+
+/* Hash table for uniquizing IDENTIFIER_NODEs by name. */
+
+#define MAX_HASH_TABLE 1009
+static tree hash_table[MAX_HASH_TABLE]; /* id hash buckets */
+
+/* 0 while creating built-in identifiers. */
+static int do_identifier_warnings;
+
+/* Unique id for next decl created. */
+static int next_decl_uid;
+/* Unique id for next type created. */
+static int next_type_uid = 1;
+
+/* Here is how primitive or already-canonicalized types' hash
+ codes are made. */
+#define TYPE_HASH(TYPE) ((HOST_WIDE_INT) (TYPE) & 0777777)
+
+extern char *mode_name[];
+
+void gcc_obstack_init ();
+static tree stabilize_reference_1 ();
+
+/* Init the principal obstacks. */
+
+void
+init_obstacks ()
+{
+ gcc_obstack_init (&obstack_stack_obstack);
+ gcc_obstack_init (&permanent_obstack);
+
+ gcc_obstack_init (&temporary_obstack);
+ temporary_firstobj = (char *) obstack_alloc (&temporary_obstack, 0);
+ gcc_obstack_init (&momentary_obstack);
+ momentary_firstobj = (char *) obstack_alloc (&momentary_obstack, 0);
+ momentary_function_firstobj = momentary_firstobj;
+ gcc_obstack_init (&maybepermanent_obstack);
+ maybepermanent_firstobj
+ = (char *) obstack_alloc (&maybepermanent_obstack, 0);
+ gcc_obstack_init (&temp_decl_obstack);
+ temp_decl_firstobj = (char *) obstack_alloc (&temp_decl_obstack, 0);
+
+ function_obstack = &temporary_obstack;
+ function_maybepermanent_obstack = &maybepermanent_obstack;
+ current_obstack = &permanent_obstack;
+ expression_obstack = &permanent_obstack;
+ rtl_obstack = saveable_obstack = &permanent_obstack;
+
+ /* Init the hash table of identifiers. */
+ bzero ((char *) hash_table, sizeof hash_table);
+}
+
+void
+gcc_obstack_init (obstack)
+ struct obstack *obstack;
+{
+ /* Let particular systems override the size of a chunk. */
+#ifndef OBSTACK_CHUNK_SIZE
+#define OBSTACK_CHUNK_SIZE 0
+#endif
+ /* Let them override the alloc and free routines too. */
+#ifndef OBSTACK_CHUNK_ALLOC
+#define OBSTACK_CHUNK_ALLOC xmalloc
+#endif
+#ifndef OBSTACK_CHUNK_FREE
+#define OBSTACK_CHUNK_FREE free
+#endif
+ _obstack_begin (obstack, OBSTACK_CHUNK_SIZE, 0,
+ (void *(*) ()) OBSTACK_CHUNK_ALLOC,
+ (void (*) ()) OBSTACK_CHUNK_FREE);
+}
+
+/* Save all variables describing the current status into the structure *P.
+ This is used before starting a nested function. */
+
+void
+save_tree_status (p)
+ struct function *p;
+{
+ p->all_types_permanent = all_types_permanent;
+ p->momentary_stack = momentary_stack;
+ p->maybepermanent_firstobj = maybepermanent_firstobj;
+ p->momentary_firstobj = momentary_firstobj;
+ p->momentary_function_firstobj = momentary_function_firstobj;
+ p->function_obstack = function_obstack;
+ p->function_maybepermanent_obstack = function_maybepermanent_obstack;
+ p->current_obstack = current_obstack;
+ p->expression_obstack = expression_obstack;
+ p->saveable_obstack = saveable_obstack;
+ p->rtl_obstack = rtl_obstack;
+
+ /* Objects that need to be saved in this function can be in the nonsaved
+ obstack of the enclosing function since they can't possibly be needed
+ once it has returned. */
+ function_maybepermanent_obstack = function_obstack;
+
+ function_obstack = (struct obstack *) xmalloc (sizeof (struct obstack));
+ gcc_obstack_init (function_obstack);
+
+ current_obstack = &permanent_obstack;
+ expression_obstack = &permanent_obstack;
+ rtl_obstack = saveable_obstack = &permanent_obstack;
+
+ momentary_firstobj = (char *) obstack_finish (&momentary_obstack);
+ momentary_function_firstobj = momentary_firstobj;
+ maybepermanent_firstobj
+ = (char *) obstack_finish (function_maybepermanent_obstack);
+}
+
+/* Restore all variables describing the current status from the structure *P.
+ This is used after a nested function. */
+
+void
+restore_tree_status (p)
+ struct function *p;
+{
+ all_types_permanent = p->all_types_permanent;
+ momentary_stack = p->momentary_stack;
+
+ obstack_free (&momentary_obstack, momentary_function_firstobj);
+
+ /* Free saveable storage used by the function just compiled and not
+ saved.
+
+ CAUTION: This is in function_obstack of the containing function. So
+ we must be sure that we never allocate from that obstack during
+ the compilation of a nested function if we expect it to survive past the
+ nested function's end. */
+ obstack_free (function_maybepermanent_obstack, maybepermanent_firstobj);
+
+ obstack_free (function_obstack, 0);
+ free (function_obstack);
+
+ momentary_firstobj = p->momentary_firstobj;
+ momentary_function_firstobj = p->momentary_function_firstobj;
+ maybepermanent_firstobj = p->maybepermanent_firstobj;
+ function_obstack = p->function_obstack;
+ function_maybepermanent_obstack = p->function_maybepermanent_obstack;
+ current_obstack = p->current_obstack;
+ expression_obstack = p->expression_obstack;
+ saveable_obstack = p->saveable_obstack;
+ rtl_obstack = p->rtl_obstack;
+}
+
+/* Start allocating on the temporary (per function) obstack.
+ This is done in start_function before parsing the function body,
+ and before each initialization at top level, and to go back
+ to temporary allocation after doing permanent_allocation. */
+
+void
+temporary_allocation ()
+{
+ /* Note that function_obstack at top level points to temporary_obstack.
+ But within a nested function context, it is a separate obstack. */
+ current_obstack = function_obstack;
+ expression_obstack = function_obstack;
+ rtl_obstack = saveable_obstack = function_maybepermanent_obstack;
+ momentary_stack = 0;
+}
+
+/* Start allocating on the permanent obstack but don't
+ free the temporary data. After calling this, call
+ `permanent_allocation' to fully resume permanent allocation status. */
+
+void
+end_temporary_allocation ()
+{
+ current_obstack = &permanent_obstack;
+ expression_obstack = &permanent_obstack;
+ rtl_obstack = saveable_obstack = &permanent_obstack;
+}
+
+/* Resume allocating on the temporary obstack, undoing
+ effects of `end_temporary_allocation'. */
+
+void
+resume_temporary_allocation ()
+{
+ current_obstack = function_obstack;
+ expression_obstack = function_obstack;
+ rtl_obstack = saveable_obstack = function_maybepermanent_obstack;
+}
+
+/* While doing temporary allocation, switch to allocating in such a
+ way as to save all nodes if the function is inlined. Call
+ resume_temporary_allocation to go back to ordinary temporary
+ allocation. */
+
+void
+saveable_allocation ()
+{
+ /* Note that function_obstack at top level points to temporary_obstack.
+ But within a nested function context, it is a separate obstack. */
+ expression_obstack = current_obstack = saveable_obstack;
+}
+
+/* Switch to current obstack CURRENT and maybepermanent obstack SAVEABLE,
+ recording the previously current obstacks on a stack.
+ This does not free any storage in any obstack. */
+
+void
+push_obstacks (current, saveable)
+ struct obstack *current, *saveable;
+{
+ struct obstack_stack *p
+ = (struct obstack_stack *) obstack_alloc (&obstack_stack_obstack,
+ (sizeof (struct obstack_stack)));
+
+ p->current = current_obstack;
+ p->saveable = saveable_obstack;
+ p->expression = expression_obstack;
+ p->rtl = rtl_obstack;
+ p->next = obstack_stack;
+ obstack_stack = p;
+
+ current_obstack = current;
+ expression_obstack = current;
+ rtl_obstack = saveable_obstack = saveable;
+}
+
+/* Save the current set of obstacks, but don't change them. */
+
+void
+push_obstacks_nochange ()
+{
+ struct obstack_stack *p
+ = (struct obstack_stack *) obstack_alloc (&obstack_stack_obstack,
+ (sizeof (struct obstack_stack)));
+
+ p->current = current_obstack;
+ p->saveable = saveable_obstack;
+ p->expression = expression_obstack;
+ p->rtl = rtl_obstack;
+ p->next = obstack_stack;
+ obstack_stack = p;
+}
+
+/* Pop the obstack selection stack. */
+
+void
+pop_obstacks ()
+{
+ struct obstack_stack *p = obstack_stack;
+ obstack_stack = p->next;
+
+ current_obstack = p->current;
+ saveable_obstack = p->saveable;
+ expression_obstack = p->expression;
+ rtl_obstack = p->rtl;
+
+ obstack_free (&obstack_stack_obstack, p);
+}
+
+/* Nonzero if temporary allocation is currently in effect.
+ Zero if currently doing permanent allocation. */
+
+int
+allocation_temporary_p ()
+{
+ return current_obstack != &permanent_obstack;
+}
+
+/* Go back to allocating on the permanent obstack
+ and free everything in the temporary obstack.
+
+ FUNCTION_END is true only if we have just finished compiling a function.
+ In that case, we also free preserved initial values on the momentary
+ obstack. */
+
+void
+permanent_allocation (function_end)
+ int function_end;
+{
+ /* Free up previous temporary obstack data */
+ obstack_free (&temporary_obstack, temporary_firstobj);
+ if (function_end)
+ obstack_free (&momentary_obstack, momentary_function_firstobj);
+ else
+ obstack_free (&momentary_obstack, momentary_firstobj);
+ obstack_free (&maybepermanent_obstack, maybepermanent_firstobj);
+ obstack_free (&temp_decl_obstack, temp_decl_firstobj);
+
+ current_obstack = &permanent_obstack;
+ expression_obstack = &permanent_obstack;
+ rtl_obstack = saveable_obstack = &permanent_obstack;
+}
+
+/* Save permanently everything on the maybepermanent_obstack. */
+
+void
+preserve_data ()
+{
+ maybepermanent_firstobj
+ = (char *) obstack_alloc (function_maybepermanent_obstack, 0);
+}
+
+void
+preserve_initializer ()
+{
+ struct momentary_level *tem;
+ char *old_momentary;
+
+ temporary_firstobj
+ = (char *) obstack_alloc (&temporary_obstack, 0);
+ maybepermanent_firstobj
+ = (char *) obstack_alloc (function_maybepermanent_obstack, 0);
+
+ old_momentary = momentary_firstobj;
+ momentary_firstobj
+ = (char *) obstack_alloc (&momentary_obstack, 0);
+ if (momentary_firstobj != old_momentary)
+ for (tem = momentary_stack; tem; tem = tem->prev)
+ tem->base = momentary_firstobj;
+}
+
+/* Start allocating new rtl in current_obstack.
+ Use resume_temporary_allocation
+ to go back to allocating rtl in saveable_obstack. */
+
+void
+rtl_in_current_obstack ()
+{
+ rtl_obstack = current_obstack;
+}
+
+/* Start allocating rtl from saveable_obstack. Intended to be used after
+ a call to push_obstacks_nochange. */
+
+void
+rtl_in_saveable_obstack ()
+{
+ rtl_obstack = saveable_obstack;
+}
+
+/* Allocate SIZE bytes in the current obstack
+ and return a pointer to them.
+ In practice the current obstack is always the temporary one. */
+
+char *
+oballoc (size)
+ int size;
+{
+ return (char *) obstack_alloc (current_obstack, size);
+}
+
+/* Free the object PTR in the current obstack
+ as well as everything allocated since PTR.
+ In practice the current obstack is always the temporary one. */
+
+void
+obfree (ptr)
+ char *ptr;
+{
+ obstack_free (current_obstack, ptr);
+}
+
+/* Allocate SIZE bytes in the permanent obstack
+ and return a pointer to them. */
+
+char *
+permalloc (size)
+ int size;
+{
+ return (char *) obstack_alloc (&permanent_obstack, size);
+}
+
+/* Allocate NELEM items of SIZE bytes in the permanent obstack
+ and return a pointer to them. The storage is cleared before
+ returning the value. */
+
+char *
+perm_calloc (nelem, size)
+ int nelem;
+ long size;
+{
+ char *rval = (char *) obstack_alloc (&permanent_obstack, nelem * size);
+ bzero (rval, nelem * size);
+ return rval;
+}
+
+/* Allocate SIZE bytes in the saveable obstack
+ and return a pointer to them. */
+
+char *
+savealloc (size)
+ int size;
+{
+ return (char *) obstack_alloc (saveable_obstack, size);
+}
+
+/* Print out which obstack an object is in. */
+
+void
+print_obstack_name (object, file, prefix)
+ char *object;
+ FILE *file;
+ char *prefix;
+{
+ struct obstack *obstack = NULL;
+ char *obstack_name = NULL;
+ struct function *p;
+
+ for (p = outer_function_chain; p; p = p->next)
+ {
+ if (_obstack_allocated_p (p->function_obstack, object))
+ {
+ obstack = p->function_obstack;
+ obstack_name = "containing function obstack";
+ }
+ if (_obstack_allocated_p (p->function_maybepermanent_obstack, object))
+ {
+ obstack = p->function_maybepermanent_obstack;
+ obstack_name = "containing function maybepermanent obstack";
+ }
+ }
+
+ if (_obstack_allocated_p (&obstack_stack_obstack, object))
+ {
+ obstack = &obstack_stack_obstack;
+ obstack_name = "obstack_stack_obstack";
+ }
+ else if (_obstack_allocated_p (function_obstack, object))
+ {
+ obstack = function_obstack;
+ obstack_name = "function obstack";
+ }
+ else if (_obstack_allocated_p (&permanent_obstack, object))
+ {
+ obstack = &permanent_obstack;
+ obstack_name = "permanent_obstack";
+ }
+ else if (_obstack_allocated_p (&momentary_obstack, object))
+ {
+ obstack = &momentary_obstack;
+ obstack_name = "momentary_obstack";
+ }
+ else if (_obstack_allocated_p (function_maybepermanent_obstack, object))
+ {
+ obstack = function_maybepermanent_obstack;
+ obstack_name = "function maybepermanent obstack";
+ }
+ else if (_obstack_allocated_p (&temp_decl_obstack, object))
+ {
+ obstack = &temp_decl_obstack;
+ obstack_name = "temp_decl_obstack";
+ }
+
+ /* Check to see if the object is in the free area of the obstack. */
+ if (obstack != NULL)
+ {
+ if (object >= obstack->next_free
+ && object < obstack->chunk_limit)
+ fprintf (file, "%s in free portion of obstack %s",
+ prefix, obstack_name);
+ else
+ fprintf (file, "%s allocated from %s", prefix, obstack_name);
+ }
+ else
+ fprintf (file, "%s not allocated from any obstack", prefix);
+}
+
+void
+debug_obstack (object)
+ char *object;
+{
+ print_obstack_name (object, stderr, "object");
+ fprintf (stderr, ".\n");
+}
+
+/* Return 1 if OBJ is in the permanent obstack.
+ This is slow, and should be used only for debugging.
+ Use TREE_PERMANENT for other purposes. */
+
+int
+object_permanent_p (obj)
+ tree obj;
+{
+ return _obstack_allocated_p (&permanent_obstack, obj);
+}
+
+/* Start a level of momentary allocation.
+ In C, each compound statement has its own level
+ and that level is freed at the end of each statement.
+ All expression nodes are allocated in the momentary allocation level. */
+
+void
+push_momentary ()
+{
+ struct momentary_level *tem
+ = (struct momentary_level *) obstack_alloc (&momentary_obstack,
+ sizeof (struct momentary_level));
+ tem->prev = momentary_stack;
+ tem->base = (char *) obstack_base (&momentary_obstack);
+ tem->obstack = expression_obstack;
+ momentary_stack = tem;
+ expression_obstack = &momentary_obstack;
+}
+
+/* Free all the storage in the current momentary-allocation level.
+ In C, this happens at the end of each statement. */
+
+void
+clear_momentary ()
+{
+ obstack_free (&momentary_obstack, momentary_stack->base);
+}
+
+/* Discard a level of momentary allocation.
+ In C, this happens at the end of each compound statement.
+ Restore the status of expression node allocation
+ that was in effect before this level was created. */
+
+void
+pop_momentary ()
+{
+ struct momentary_level *tem = momentary_stack;
+ momentary_stack = tem->prev;
+ expression_obstack = tem->obstack;
+ /* We can't free TEM from the momentary_obstack, because there might
+ be objects above it which have been saved. We can free back to the
+ stack of the level we are popping off though. */
+ obstack_free (&momentary_obstack, tem->base);
+}
+
+/* Pop back to the previous level of momentary allocation,
+ but don't free any momentary data just yet. */
+
+void
+pop_momentary_nofree ()
+{
+ struct momentary_level *tem = momentary_stack;
+ momentary_stack = tem->prev;
+ expression_obstack = tem->obstack;
+}
+
+/* Call when starting to parse a declaration:
+ make expressions in the declaration last the length of the function.
+ Returns an argument that should be passed to resume_momentary later. */
+
+int
+suspend_momentary ()
+{
+ register int tem = expression_obstack == &momentary_obstack;
+ expression_obstack = saveable_obstack;
+ return tem;
+}
+
+/* Call when finished parsing a declaration:
+ restore the treatment of node-allocation that was
+ in effect before the suspension.
+ YES should be the value previously returned by suspend_momentary. */
+
+void
+resume_momentary (yes)
+ int yes;
+{
+ if (yes)
+ expression_obstack = &momentary_obstack;
+}
+
+/* Init the tables indexed by tree code.
+ Note that languages can add to these tables to define their own codes. */
+
+void
+init_tree_codes ()
+{
+ tree_code_type = (char **) xmalloc (sizeof (standard_tree_code_type));
+ tree_code_length = (int *) xmalloc (sizeof (standard_tree_code_length));
+ tree_code_name = (char **) xmalloc (sizeof (standard_tree_code_name));
+ bcopy ((char *) standard_tree_code_type, (char *) tree_code_type,
+ sizeof (standard_tree_code_type));
+ bcopy ((char *) standard_tree_code_length, (char *) tree_code_length,
+ sizeof (standard_tree_code_length));
+ bcopy ((char *) standard_tree_code_name, (char *) tree_code_name,
+ sizeof (standard_tree_code_name));
+}
+
+/* Return a newly allocated node of code CODE.
+ Initialize the node's unique id and its TREE_PERMANENT flag.
+ For decl and type nodes, some other fields are initialized.
+ The rest of the node is initialized to zero.
+
+ Achoo! I got a code in the node. */
+
+tree
+make_node (code)
+ enum tree_code code;
+{
+ register tree t;
+ register int type = TREE_CODE_CLASS (code);
+ register int length;
+ register struct obstack *obstack = current_obstack;
+ register int i;
+ register tree_node_kind kind;
+
+ switch (type)
+ {
+ case 'd': /* A decl node */
+#ifdef GATHER_STATISTICS
+ kind = d_kind;
+#endif
+ length = sizeof (struct tree_decl);
+ /* All decls in an inline function need to be saved. */
+ if (obstack != &permanent_obstack)
+ obstack = saveable_obstack;
+
+ /* PARM_DECLs go on the context of the parent. If this is a nested
+ function, then we must allocate the PARM_DECL on the parent's
+ obstack, so that they will live to the end of the parent's
+ closing brace. This is neccesary in case we try to inline the
+ function into its parent.
+
+ PARM_DECLs of top-level functions do not have this problem. However,
+ we allocate them where we put the FUNCTION_DECL for languauges such as
+ Ada that need to consult some flags in the PARM_DECLs of the function
+ when calling it.
+
+ See comment in restore_tree_status for why we can't put this
+ in function_obstack. */
+ if (code == PARM_DECL && obstack != &permanent_obstack)
+ {
+ tree context = 0;
+ if (current_function_decl)
+ context = decl_function_context (current_function_decl);
+
+ if (context)
+ obstack
+ = find_function_data (context)->function_maybepermanent_obstack;
+ }
+ break;
+
+ case 't': /* a type node */
+#ifdef GATHER_STATISTICS
+ kind = t_kind;
+#endif
+ length = sizeof (struct tree_type);
+ /* All data types are put where we can preserve them if nec. */
+ if (obstack != &permanent_obstack)
+ obstack = all_types_permanent ? &permanent_obstack : saveable_obstack;
+ break;
+
+ case 'b': /* a lexical block */
+#ifdef GATHER_STATISTICS
+ kind = b_kind;
+#endif
+ length = sizeof (struct tree_block);
+ /* All BLOCK nodes are put where we can preserve them if nec. */
+ if (obstack != &permanent_obstack)
+ obstack = saveable_obstack;
+ break;
+
+ case 's': /* an expression with side effects */
+#ifdef GATHER_STATISTICS
+ kind = s_kind;
+ goto usual_kind;
+#endif
+ case 'r': /* a reference */
+#ifdef GATHER_STATISTICS
+ kind = r_kind;
+ goto usual_kind;
+#endif
+ case 'e': /* an expression */
+ case '<': /* a comparison expression */
+ case '1': /* a unary arithmetic expression */
+ case '2': /* a binary arithmetic expression */
+#ifdef GATHER_STATISTICS
+ kind = e_kind;
+ usual_kind:
+#endif
+ obstack = expression_obstack;
+ /* All BIND_EXPR nodes are put where we can preserve them if nec. */
+ if (code == BIND_EXPR && obstack != &permanent_obstack)
+ obstack = saveable_obstack;
+ length = sizeof (struct tree_exp)
+ + (tree_code_length[(int) code] - 1) * sizeof (char *);
+ break;
+
+ case 'c': /* a constant */
+#ifdef GATHER_STATISTICS
+ kind = c_kind;
+#endif
+ obstack = expression_obstack;
+
+ /* We can't use tree_code_length for INTEGER_CST, since the number of
+ words is machine-dependent due to varying length of HOST_WIDE_INT,
+ which might be wider than a pointer (e.g., long long). Similarly
+ for REAL_CST, since the number of words is machine-dependent due
+ to varying size and alignment of `double'. */
+
+ if (code == INTEGER_CST)
+ length = sizeof (struct tree_int_cst);
+ else if (code == REAL_CST)
+ length = sizeof (struct tree_real_cst);
+ else
+ length = sizeof (struct tree_common)
+ + tree_code_length[(int) code] * sizeof (char *);
+ break;
+
+ case 'x': /* something random, like an identifier. */
+#ifdef GATHER_STATISTICS
+ if (code == IDENTIFIER_NODE)
+ kind = id_kind;
+ else if (code == OP_IDENTIFIER)
+ kind = op_id_kind;
+ else if (code == TREE_VEC)
+ kind = vec_kind;
+ else
+ kind = x_kind;
+#endif
+ length = sizeof (struct tree_common)
+ + tree_code_length[(int) code] * sizeof (char *);
+ /* Identifier nodes are always permanent since they are
+ unique in a compiler run. */
+ if (code == IDENTIFIER_NODE) obstack = &permanent_obstack;
+ break;
+
+ default:
+ abort ();
+ }
+
+ t = (tree) obstack_alloc (obstack, length);
+
+#ifdef GATHER_STATISTICS
+ tree_node_counts[(int)kind]++;
+ tree_node_sizes[(int)kind] += length;
+#endif
+
+ /* Clear a word at a time. */
+ for (i = (length / sizeof (int)) - 1; i >= 0; i--)
+ ((int *) t)[i] = 0;
+ /* Clear any extra bytes. */
+ for (i = length / sizeof (int) * sizeof (int); i < length; i++)
+ ((char *) t)[i] = 0;
+
+ TREE_SET_CODE (t, code);
+ if (obstack == &permanent_obstack)
+ TREE_PERMANENT (t) = 1;
+
+ switch (type)
+ {
+ case 's':
+ TREE_SIDE_EFFECTS (t) = 1;
+ TREE_TYPE (t) = void_type_node;
+ break;
+
+ case 'd':
+ if (code != FUNCTION_DECL)
+ DECL_ALIGN (t) = 1;
+ DECL_IN_SYSTEM_HEADER (t)
+ = in_system_header && (obstack == &permanent_obstack);
+ DECL_SOURCE_LINE (t) = lineno;
+ DECL_SOURCE_FILE (t) = (input_filename) ? input_filename : "<built-in>";
+ DECL_UID (t) = next_decl_uid++;
+ break;
+
+ case 't':
+ TYPE_UID (t) = next_type_uid++;
+ TYPE_ALIGN (t) = 1;
+ TYPE_MAIN_VARIANT (t) = t;
+ TYPE_OBSTACK (t) = obstack;
+ TYPE_ATTRIBUTES (t) = NULL_TREE;
+#ifdef SET_DEFAULT_TYPE_ATTRIBUTES
+ SET_DEFAULT_TYPE_ATTRIBUTES (t);
+#endif
+ break;
+
+ case 'c':
+ TREE_CONSTANT (t) = 1;
+ break;
+ }
+
+ return t;
+}
+
+/* Return a new node with the same contents as NODE
+ except that its TREE_CHAIN is zero and it has a fresh uid. */
+
+tree
+copy_node (node)
+ tree node;
+{
+ register tree t;
+ register enum tree_code code = TREE_CODE (node);
+ register int length;
+ register int i;
+
+ switch (TREE_CODE_CLASS (code))
+ {
+ case 'd': /* A decl node */
+ length = sizeof (struct tree_decl);
+ break;
+
+ case 't': /* a type node */
+ length = sizeof (struct tree_type);
+ break;
+
+ case 'b': /* a lexical block node */
+ length = sizeof (struct tree_block);
+ break;
+
+ case 'r': /* a reference */
+ case 'e': /* an expression */
+ case 's': /* an expression with side effects */
+ case '<': /* a comparison expression */
+ case '1': /* a unary arithmetic expression */
+ case '2': /* a binary arithmetic expression */
+ length = sizeof (struct tree_exp)
+ + (tree_code_length[(int) code] - 1) * sizeof (char *);
+ break;
+
+ case 'c': /* a constant */
+ /* We can't use tree_code_length for INTEGER_CST, since the number of
+ words is machine-dependent due to varying length of HOST_WIDE_INT,
+ which might be wider than a pointer (e.g., long long). Similarly
+ for REAL_CST, since the number of words is machine-dependent due
+ to varying size and alignment of `double'. */
+ if (code == INTEGER_CST)
+ {
+ length = sizeof (struct tree_int_cst);
+ break;
+ }
+ else if (code == REAL_CST)
+ {
+ length = sizeof (struct tree_real_cst);
+ break;
+ }
+
+ case 'x': /* something random, like an identifier. */
+ length = sizeof (struct tree_common)
+ + tree_code_length[(int) code] * sizeof (char *);
+ if (code == TREE_VEC)
+ length += (TREE_VEC_LENGTH (node) - 1) * sizeof (char *);
+ }
+
+ t = (tree) obstack_alloc (current_obstack, length);
+
+ for (i = (length / sizeof (int)) - 1; i >= 0; i--)
+ ((int *) t)[i] = ((int *) node)[i];
+ /* Clear any extra bytes. */
+ for (i = length / sizeof (int) * sizeof (int); i < length; i++)
+ ((char *) t)[i] = ((char *) node)[i];
+
+ TREE_CHAIN (t) = 0;
+
+ if (TREE_CODE_CLASS (code) == 'd')
+ DECL_UID (t) = next_decl_uid++;
+ else if (TREE_CODE_CLASS (code) == 't')
+ {
+ TYPE_UID (t) = next_type_uid++;
+ TYPE_OBSTACK (t) = current_obstack;
+ }
+
+ TREE_PERMANENT (t) = (current_obstack == &permanent_obstack);
+
+ return t;
+}
+
+/* Return a copy of a chain of nodes, chained through the TREE_CHAIN field.
+ For example, this can copy a list made of TREE_LIST nodes. */
+
+tree
+copy_list (list)
+ tree list;
+{
+ tree head;
+ register tree prev, next;
+
+ if (list == 0)
+ return 0;
+
+ head = prev = copy_node (list);
+ next = TREE_CHAIN (list);
+ while (next)
+ {
+ TREE_CHAIN (prev) = copy_node (next);
+ prev = TREE_CHAIN (prev);
+ next = TREE_CHAIN (next);
+ }
+ return head;
+}
+
+#define HASHBITS 30
+
+/* Return an IDENTIFIER_NODE whose name is TEXT (a null-terminated string).
+ If an identifier with that name has previously been referred to,
+ the same node is returned this time. */
+
+tree
+get_identifier (text)
+ register char *text;
+{
+ register int hi;
+ register int i;
+ register tree idp;
+ register int len, hash_len;
+
+ /* Compute length of text in len. */
+ for (len = 0; text[len]; len++);
+
+ /* Decide how much of that length to hash on */
+ hash_len = len;
+ if (warn_id_clash && len > id_clash_len)
+ hash_len = id_clash_len;
+
+ /* Compute hash code */
+ hi = hash_len * 613 + (unsigned)text[0];
+ for (i = 1; i < hash_len; i += 2)
+ hi = ((hi * 613) + (unsigned)(text[i]));
+
+ hi &= (1 << HASHBITS) - 1;
+ hi %= MAX_HASH_TABLE;
+
+ /* Search table for identifier */
+ for (idp = hash_table[hi]; idp; idp = TREE_CHAIN (idp))
+ if (IDENTIFIER_LENGTH (idp) == len
+ && IDENTIFIER_POINTER (idp)[0] == text[0]
+ && !bcmp (IDENTIFIER_POINTER (idp), text, len))
+ return idp; /* <-- return if found */
+
+ /* Not found; optionally warn about a similar identifier */
+ if (warn_id_clash && do_identifier_warnings && len >= id_clash_len)
+ for (idp = hash_table[hi]; idp; idp = TREE_CHAIN (idp))
+ if (!strncmp (IDENTIFIER_POINTER (idp), text, id_clash_len))
+ {
+ warning ("`%s' and `%s' identical in first %d characters",
+ IDENTIFIER_POINTER (idp), text, id_clash_len);
+ break;
+ }
+
+ if (tree_code_length[(int) IDENTIFIER_NODE] < 0)
+ abort (); /* set_identifier_size hasn't been called. */
+
+ /* Not found, create one, add to chain */
+ idp = make_node (IDENTIFIER_NODE);
+ IDENTIFIER_LENGTH (idp) = len;
+#ifdef GATHER_STATISTICS
+ id_string_size += len;
+#endif
+
+ IDENTIFIER_POINTER (idp) = obstack_copy0 (&permanent_obstack, text, len);
+
+ TREE_CHAIN (idp) = hash_table[hi];
+ hash_table[hi] = idp;
+ return idp; /* <-- return if created */
+}
+
+/* Enable warnings on similar identifiers (if requested).
+ Done after the built-in identifiers are created. */
+
+void
+start_identifier_warnings ()
+{
+ do_identifier_warnings = 1;
+}
+
+/* Record the size of an identifier node for the language in use.
+ SIZE is the total size in bytes.
+ This is called by the language-specific files. This must be
+ called before allocating any identifiers. */
+
+void
+set_identifier_size (size)
+ int size;
+{
+ tree_code_length[(int) IDENTIFIER_NODE]
+ = (size - sizeof (struct tree_common)) / sizeof (tree);
+}
+
+/* Return a newly constructed INTEGER_CST node whose constant value
+ is specified by the two ints LOW and HI.
+ The TREE_TYPE is set to `int'.
+
+ This function should be used via the `build_int_2' macro. */
+
+tree
+build_int_2_wide (low, hi)
+ HOST_WIDE_INT low, hi;
+{
+ register tree t = make_node (INTEGER_CST);
+ TREE_INT_CST_LOW (t) = low;
+ TREE_INT_CST_HIGH (t) = hi;
+ TREE_TYPE (t) = integer_type_node;
+ return t;
+}
+
+/* Return a new REAL_CST node whose type is TYPE and value is D. */
+
+tree
+build_real (type, d)
+ tree type;
+ REAL_VALUE_TYPE d;
+{
+ tree v;
+ int overflow = 0;
+
+ /* Check for valid float value for this type on this target machine;
+ if not, can print error message and store a valid value in D. */
+#ifdef CHECK_FLOAT_VALUE
+ CHECK_FLOAT_VALUE (TYPE_MODE (type), d, overflow);
+#endif
+
+ v = make_node (REAL_CST);
+ TREE_TYPE (v) = type;
+ TREE_REAL_CST (v) = d;
+ TREE_OVERFLOW (v) = TREE_CONSTANT_OVERFLOW (v) = overflow;
+ return v;
+}
+
+/* Return a new REAL_CST node whose type is TYPE
+ and whose value is the integer value of the INTEGER_CST node I. */
+
+#if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
+
+REAL_VALUE_TYPE
+real_value_from_int_cst (i)
+ tree i;
+{
+ REAL_VALUE_TYPE d;
+ REAL_VALUE_TYPE e;
+ /* Some 386 compilers mishandle unsigned int to float conversions,
+ so introduce a temporary variable E to avoid those bugs. */
+
+#ifdef REAL_ARITHMETIC
+ if (! TREE_UNSIGNED (TREE_TYPE (i)))
+ REAL_VALUE_FROM_INT (d, TREE_INT_CST_LOW (i), TREE_INT_CST_HIGH (i));
+ else
+ REAL_VALUE_FROM_UNSIGNED_INT (d, TREE_INT_CST_LOW (i), TREE_INT_CST_HIGH (i));
+#else /* not REAL_ARITHMETIC */
+ if (TREE_INT_CST_HIGH (i) < 0 && ! TREE_UNSIGNED (TREE_TYPE (i)))
+ {
+ d = (double) (~ TREE_INT_CST_HIGH (i));
+ e = ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
+ * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
+ d *= e;
+ e = (double) (unsigned HOST_WIDE_INT) (~ TREE_INT_CST_LOW (i));
+ d += e;
+ d = (- d - 1.0);
+ }
+ else
+ {
+ d = (double) (unsigned HOST_WIDE_INT) TREE_INT_CST_HIGH (i);
+ e = ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
+ * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
+ d *= e;
+ e = (double) (unsigned HOST_WIDE_INT) TREE_INT_CST_LOW (i);
+ d += e;
+ }
+#endif /* not REAL_ARITHMETIC */
+ return d;
+}
+
+/* This function can't be implemented if we can't do arithmetic
+ on the float representation. */
+
+tree
+build_real_from_int_cst (type, i)
+ tree type;
+ tree i;
+{
+ tree v;
+ int overflow = TREE_OVERFLOW (i);
+ REAL_VALUE_TYPE d;
+ jmp_buf float_error;
+
+ v = make_node (REAL_CST);
+ TREE_TYPE (v) = type;
+
+ if (setjmp (float_error))
+ {
+ d = dconst0;
+ overflow = 1;
+ goto got_it;
+ }
+
+ set_float_handler (float_error);
+
+ d = REAL_VALUE_TRUNCATE (TYPE_MODE (type), real_value_from_int_cst (i));
+
+ /* Check for valid float value for this type on this target machine. */
+
+ got_it:
+ set_float_handler (NULL_PTR);
+
+#ifdef CHECK_FLOAT_VALUE
+ CHECK_FLOAT_VALUE (TYPE_MODE (type), d, overflow);
+#endif
+
+ TREE_REAL_CST (v) = d;
+ TREE_OVERFLOW (v) = TREE_CONSTANT_OVERFLOW (v) = overflow;
+ return v;
+}
+
+#endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
+
+/* Return a newly constructed STRING_CST node whose value is
+ the LEN characters at STR.
+ The TREE_TYPE is not initialized. */
+
+tree
+build_string (len, str)
+ int len;
+ char *str;
+{
+ /* Put the string in saveable_obstack since it will be placed in the RTL
+ for an "asm" statement and will also be kept around a while if
+ deferring constant output in varasm.c. */
+
+ register tree s = make_node (STRING_CST);
+ TREE_STRING_LENGTH (s) = len;
+ TREE_STRING_POINTER (s) = obstack_copy0 (saveable_obstack, str, len);
+ return s;
+}
+
+/* Return a newly constructed COMPLEX_CST node whose value is
+ specified by the real and imaginary parts REAL and IMAG.
+ Both REAL and IMAG should be constant nodes.
+ The TREE_TYPE is not initialized. */
+
+tree
+build_complex (real, imag)
+ tree real, imag;
+{
+ register tree t = make_node (COMPLEX_CST);
+
+ TREE_REALPART (t) = real;
+ TREE_IMAGPART (t) = imag;
+ TREE_TYPE (t) = build_complex_type (TREE_TYPE (real));
+ TREE_OVERFLOW (t) = TREE_OVERFLOW (real) | TREE_OVERFLOW (imag);
+ TREE_CONSTANT_OVERFLOW (t)
+ = TREE_CONSTANT_OVERFLOW (real) | TREE_CONSTANT_OVERFLOW (imag);
+ return t;
+}
+
+/* Build a newly constructed TREE_VEC node of length LEN. */
+tree
+make_tree_vec (len)
+ int len;
+{
+ register tree t;
+ register int length = (len-1) * sizeof (tree) + sizeof (struct tree_vec);
+ register struct obstack *obstack = current_obstack;
+ register int i;
+
+#ifdef GATHER_STATISTICS
+ tree_node_counts[(int)vec_kind]++;
+ tree_node_sizes[(int)vec_kind] += length;
+#endif
+
+ t = (tree) obstack_alloc (obstack, length);
+
+ for (i = (length / sizeof (int)) - 1; i >= 0; i--)
+ ((int *) t)[i] = 0;
+
+ TREE_SET_CODE (t, TREE_VEC);
+ TREE_VEC_LENGTH (t) = len;
+ if (obstack == &permanent_obstack)
+ TREE_PERMANENT (t) = 1;
+
+ return t;
+}
+
+/* Return 1 if EXPR is the integer constant zero. */
+
+int
+integer_zerop (expr)
+ tree expr;
+{
+ STRIP_NOPS (expr);
+
+ return (TREE_CODE (expr) == INTEGER_CST
+ && TREE_INT_CST_LOW (expr) == 0
+ && TREE_INT_CST_HIGH (expr) == 0);
+}
+
+/* Return 1 if EXPR is the integer constant one. */
+
+int
+integer_onep (expr)
+ tree expr;
+{
+ STRIP_NOPS (expr);
+
+ return (TREE_CODE (expr) == INTEGER_CST
+ && TREE_INT_CST_LOW (expr) == 1
+ && TREE_INT_CST_HIGH (expr) == 0);
+}
+
+/* Return 1 if EXPR is an integer containing all 1's
+ in as much precision as it contains. */
+
+int
+integer_all_onesp (expr)
+ tree expr;
+{
+ register int prec;
+ register int uns;
+
+ STRIP_NOPS (expr);
+
+ if (TREE_CODE (expr) != INTEGER_CST)
+ return 0;
+
+ uns = TREE_UNSIGNED (TREE_TYPE (expr));
+ if (!uns)
+ return TREE_INT_CST_LOW (expr) == -1 && TREE_INT_CST_HIGH (expr) == -1;
+
+ prec = TYPE_PRECISION (TREE_TYPE (expr));
+ if (prec >= HOST_BITS_PER_WIDE_INT)
+ {
+ int high_value, shift_amount;
+
+ shift_amount = prec - HOST_BITS_PER_WIDE_INT;
+
+ if (shift_amount > HOST_BITS_PER_WIDE_INT)
+ /* Can not handle precisions greater than twice the host int size. */
+ abort ();
+ else if (shift_amount == HOST_BITS_PER_WIDE_INT)
+ /* Shifting by the host word size is undefined according to the ANSI
+ standard, so we must handle this as a special case. */
+ high_value = -1;
+ else
+ high_value = ((HOST_WIDE_INT) 1 << shift_amount) - 1;
+
+ return TREE_INT_CST_LOW (expr) == -1
+ && TREE_INT_CST_HIGH (expr) == high_value;
+ }
+ else
+ return TREE_INT_CST_LOW (expr) == ((HOST_WIDE_INT) 1 << prec) - 1;
+}
+
+/* Return 1 if EXPR is an integer constant that is a power of 2 (i.e., has only
+ one bit on). */
+
+int
+integer_pow2p (expr)
+ tree expr;
+{
+ HOST_WIDE_INT high, low;
+
+ STRIP_NOPS (expr);
+
+ if (TREE_CODE (expr) != INTEGER_CST)
+ return 0;
+
+ high = TREE_INT_CST_HIGH (expr);
+ low = TREE_INT_CST_LOW (expr);
+
+ if (high == 0 && low == 0)
+ return 0;
+
+ return ((high == 0 && (low & (low - 1)) == 0)
+ || (low == 0 && (high & (high - 1)) == 0));
+}
+
+/* Return 1 if EXPR is the real constant zero. */
+
+int
+real_zerop (expr)
+ tree expr;
+{
+ STRIP_NOPS (expr);
+
+ return (TREE_CODE (expr) == REAL_CST
+ && REAL_VALUES_EQUAL (TREE_REAL_CST (expr), dconst0));
+}
+
+/* Return 1 if EXPR is the real constant one. */
+
+int
+real_onep (expr)
+ tree expr;
+{
+ STRIP_NOPS (expr);
+
+ return (TREE_CODE (expr) == REAL_CST
+ && REAL_VALUES_EQUAL (TREE_REAL_CST (expr), dconst1));
+}
+
+/* Return 1 if EXPR is the real constant two. */
+
+int
+real_twop (expr)
+ tree expr;
+{
+ STRIP_NOPS (expr);
+
+ return (TREE_CODE (expr) == REAL_CST
+ && REAL_VALUES_EQUAL (TREE_REAL_CST (expr), dconst2));
+}
+
+/* Nonzero if EXP is a constant or a cast of a constant. */
+
+int
+really_constant_p (exp)
+ tree exp;
+{
+ /* This is not quite the same as STRIP_NOPS. It does more. */
+ while (TREE_CODE (exp) == NOP_EXPR
+ || TREE_CODE (exp) == CONVERT_EXPR
+ || TREE_CODE (exp) == NON_LVALUE_EXPR)
+ exp = TREE_OPERAND (exp, 0);
+ return TREE_CONSTANT (exp);
+}
+
+/* Return first list element whose TREE_VALUE is ELEM.
+ Return 0 if ELEM is not it LIST. */
+
+tree
+value_member (elem, list)
+ tree elem, list;
+{
+ while (list)
+ {
+ if (elem == TREE_VALUE (list))
+ return list;
+ list = TREE_CHAIN (list);
+ }
+ return NULL_TREE;
+}
+
+/* Return first list element whose TREE_PURPOSE is ELEM.
+ Return 0 if ELEM is not it LIST. */
+
+tree
+purpose_member (elem, list)
+ tree elem, list;
+{
+ while (list)
+ {
+ if (elem == TREE_PURPOSE (list))
+ return list;
+ list = TREE_CHAIN (list);
+ }
+ return NULL_TREE;
+}
+
+/* Return first list element whose BINFO_TYPE is ELEM.
+ Return 0 if ELEM is not it LIST. */
+
+tree
+binfo_member (elem, list)
+ tree elem, list;
+{
+ while (list)
+ {
+ if (elem == BINFO_TYPE (list))
+ return list;
+ list = TREE_CHAIN (list);
+ }
+ return NULL_TREE;
+}
+
+/* Return nonzero if ELEM is part of the chain CHAIN. */
+
+int
+chain_member (elem, chain)
+ tree elem, chain;
+{
+ while (chain)
+ {
+ if (elem == chain)
+ return 1;
+ chain = TREE_CHAIN (chain);
+ }
+
+ return 0;
+}
+
+/* Return the length of a chain of nodes chained through TREE_CHAIN.
+ We expect a null pointer to mark the end of the chain.
+ This is the Lisp primitive `length'. */
+
+int
+list_length (t)
+ tree t;
+{
+ register tree tail;
+ register int len = 0;
+
+ for (tail = t; tail; tail = TREE_CHAIN (tail))
+ len++;
+
+ return len;
+}
+
+/* Concatenate two chains of nodes (chained through TREE_CHAIN)
+ by modifying the last node in chain 1 to point to chain 2.
+ This is the Lisp primitive `nconc'. */
+
+tree
+chainon (op1, op2)
+ tree op1, op2;
+{
+
+ if (op1)
+ {
+ register tree t1;
+ register tree t2;
+
+ for (t1 = op1; TREE_CHAIN (t1); t1 = TREE_CHAIN (t1))
+ ;
+ TREE_CHAIN (t1) = op2;
+ for (t2 = op2; t2; t2 = TREE_CHAIN (t2))
+ if (t2 == t1)
+ abort (); /* Circularity created. */
+ return op1;
+ }
+ else return op2;
+}
+
+/* Return the last node in a chain of nodes (chained through TREE_CHAIN). */
+
+tree
+tree_last (chain)
+ register tree chain;
+{
+ register tree next;
+ if (chain)
+ while (next = TREE_CHAIN (chain))
+ chain = next;
+ return chain;
+}
+
+/* Reverse the order of elements in the chain T,
+ and return the new head of the chain (old last element). */
+
+tree
+nreverse (t)
+ tree t;
+{
+ register tree prev = 0, decl, next;
+ for (decl = t; decl; decl = next)
+ {
+ next = TREE_CHAIN (decl);
+ TREE_CHAIN (decl) = prev;
+ prev = decl;
+ }
+ return prev;
+}
+
+/* Given a chain CHAIN of tree nodes,
+ construct and return a list of those nodes. */
+
+tree
+listify (chain)
+ tree chain;
+{
+ tree result = NULL_TREE;
+ tree in_tail = chain;
+ tree out_tail = NULL_TREE;
+
+ while (in_tail)
+ {
+ tree next = tree_cons (NULL_TREE, in_tail, NULL_TREE);
+ if (out_tail)
+ TREE_CHAIN (out_tail) = next;
+ else
+ result = next;
+ out_tail = next;
+ in_tail = TREE_CHAIN (in_tail);
+ }
+
+ return result;
+}
+
+/* Return a newly created TREE_LIST node whose
+ purpose and value fields are PARM and VALUE. */
+
+tree
+build_tree_list (parm, value)
+ tree parm, value;
+{
+ register tree t = make_node (TREE_LIST);
+ TREE_PURPOSE (t) = parm;
+ TREE_VALUE (t) = value;
+ return t;
+}
+
+/* Similar, but build on the temp_decl_obstack. */
+
+tree
+build_decl_list (parm, value)
+ tree parm, value;
+{
+ register tree node;
+ register struct obstack *ambient_obstack = current_obstack;
+ current_obstack = &temp_decl_obstack;
+ node = build_tree_list (parm, value);
+ current_obstack = ambient_obstack;
+ return node;
+}
+
+/* Return a newly created TREE_LIST node whose
+ purpose and value fields are PARM and VALUE
+ and whose TREE_CHAIN is CHAIN. */
+
+tree
+tree_cons (purpose, value, chain)
+ tree purpose, value, chain;
+{
+#if 0
+ register tree node = make_node (TREE_LIST);
+#else
+ register int i;
+ register tree node = (tree) obstack_alloc (current_obstack, sizeof (struct tree_list));
+#ifdef GATHER_STATISTICS
+ tree_node_counts[(int)x_kind]++;
+ tree_node_sizes[(int)x_kind] += sizeof (struct tree_list);
+#endif
+
+ for (i = (sizeof (struct tree_common) / sizeof (int)) - 1; i >= 0; i--)
+ ((int *) node)[i] = 0;
+
+ TREE_SET_CODE (node, TREE_LIST);
+ if (current_obstack == &permanent_obstack)
+ TREE_PERMANENT (node) = 1;
+#endif
+
+ TREE_CHAIN (node) = chain;
+ TREE_PURPOSE (node) = purpose;
+ TREE_VALUE (node) = value;
+ return node;
+}
+
+/* Similar, but build on the temp_decl_obstack. */
+
+tree
+decl_tree_cons (purpose, value, chain)
+ tree purpose, value, chain;
+{
+ register tree node;
+ register struct obstack *ambient_obstack = current_obstack;
+ current_obstack = &temp_decl_obstack;
+ node = tree_cons (purpose, value, chain);
+ current_obstack = ambient_obstack;
+ return node;
+}
+
+/* Same as `tree_cons' but make a permanent object. */
+
+tree
+perm_tree_cons (purpose, value, chain)
+ tree purpose, value, chain;
+{
+ register tree node;
+ register struct obstack *ambient_obstack = current_obstack;
+ current_obstack = &permanent_obstack;
+
+ node = tree_cons (purpose, value, chain);
+ current_obstack = ambient_obstack;
+ return node;
+}
+
+/* Same as `tree_cons', but make this node temporary, regardless. */
+
+tree
+temp_tree_cons (purpose, value, chain)
+ tree purpose, value, chain;
+{
+ register tree node;
+ register struct obstack *ambient_obstack = current_obstack;
+ current_obstack = &temporary_obstack;
+
+ node = tree_cons (purpose, value, chain);
+ current_obstack = ambient_obstack;
+ return node;
+}
+
+/* Same as `tree_cons', but save this node if the function's RTL is saved. */
+
+tree
+saveable_tree_cons (purpose, value, chain)
+ tree purpose, value, chain;
+{
+ register tree node;
+ register struct obstack *ambient_obstack = current_obstack;
+ current_obstack = saveable_obstack;
+
+ node = tree_cons (purpose, value, chain);
+ current_obstack = ambient_obstack;
+ return node;
+}
+
+/* Return the size nominally occupied by an object of type TYPE
+ when it resides in memory. The value is measured in units of bytes,
+ and its data type is that normally used for type sizes
+ (which is the first type created by make_signed_type or
+ make_unsigned_type). */
+
+tree
+size_in_bytes (type)
+ tree type;
+{
+ tree t;
+
+ if (type == error_mark_node)
+ return integer_zero_node;
+ type = TYPE_MAIN_VARIANT (type);
+ if (TYPE_SIZE (type) == 0)
+ {
+ incomplete_type_error (NULL_TREE, type);
+ return integer_zero_node;
+ }
+ t = size_binop (CEIL_DIV_EXPR, TYPE_SIZE (type),
+ size_int (BITS_PER_UNIT));
+ if (TREE_CODE (t) == INTEGER_CST)
+ force_fit_type (t, 0);
+ return t;
+}
+
+/* Return the size of TYPE (in bytes) as an integer,
+ or return -1 if the size can vary. */
+
+int
+int_size_in_bytes (type)
+ tree type;
+{
+ unsigned int size;
+ if (type == error_mark_node)
+ return 0;
+ type = TYPE_MAIN_VARIANT (type);
+ if (TYPE_SIZE (type) == 0)
+ return -1;
+ if (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
+ return -1;
+ if (TREE_INT_CST_HIGH (TYPE_SIZE (type)) != 0)
+ {
+ tree t = size_binop (CEIL_DIV_EXPR, TYPE_SIZE (type),
+ size_int (BITS_PER_UNIT));
+ return TREE_INT_CST_LOW (t);
+ }
+ size = TREE_INT_CST_LOW (TYPE_SIZE (type));
+ return (size + BITS_PER_UNIT - 1) / BITS_PER_UNIT;
+}
+
+/* Return, as a tree node, the number of elements for TYPE (which is an
+ ARRAY_TYPE) minus one. This counts only elements of the top array. */
+
+tree
+array_type_nelts (type)
+ tree type;
+{
+ tree index_type = TYPE_DOMAIN (type);
+
+ return (integer_zerop (TYPE_MIN_VALUE (index_type))
+ ? TYPE_MAX_VALUE (index_type)
+ : fold (build (MINUS_EXPR, TREE_TYPE (TYPE_MAX_VALUE (index_type)),
+ TYPE_MAX_VALUE (index_type),
+ TYPE_MIN_VALUE (index_type))));
+}
+
+/* Return nonzero if arg is static -- a reference to an object in
+ static storage. This is not the same as the C meaning of `static'. */
+
+int
+staticp (arg)
+ tree arg;
+{
+ switch (TREE_CODE (arg))
+ {
+ case FUNCTION_DECL:
+ /* Nested functions aren't static. Since taking their address
+ involves a trampoline. */
+ if (decl_function_context (arg) != 0)
+ return 0;
+ /* ... fall through ... */
+ case VAR_DECL:
+ return TREE_STATIC (arg) || DECL_EXTERNAL (arg);
+
+ case CONSTRUCTOR:
+ return TREE_STATIC (arg);
+
+ case STRING_CST:
+ return 1;
+
+ case COMPONENT_REF:
+ case BIT_FIELD_REF:
+ return staticp (TREE_OPERAND (arg, 0));
+
+ case INDIRECT_REF:
+ return TREE_CONSTANT (TREE_OPERAND (arg, 0));
+
+ case ARRAY_REF:
+ if (TREE_CODE (TYPE_SIZE (TREE_TYPE (arg))) == INTEGER_CST
+ && TREE_CODE (TREE_OPERAND (arg, 1)) == INTEGER_CST)
+ return staticp (TREE_OPERAND (arg, 0));
+ }
+
+ return 0;
+}
+
+/* Wrap a SAVE_EXPR around EXPR, if appropriate.
+ Do this to any expression which may be used in more than one place,
+ but must be evaluated only once.
+
+ Normally, expand_expr would reevaluate the expression each time.
+ Calling save_expr produces something that is evaluated and recorded
+ the first time expand_expr is called on it. Subsequent calls to
+ expand_expr just reuse the recorded value.
+
+ The call to expand_expr that generates code that actually computes
+ the value is the first call *at compile time*. Subsequent calls
+ *at compile time* generate code to use the saved value.
+ This produces correct result provided that *at run time* control
+ always flows through the insns made by the first expand_expr
+ before reaching the other places where the save_expr was evaluated.
+ You, the caller of save_expr, must make sure this is so.
+
+ Constants, and certain read-only nodes, are returned with no
+ SAVE_EXPR because that is safe. Expressions containing placeholders
+ are not touched; see tree.def for an explanation of what these
+ are used for. */
+
+tree
+save_expr (expr)
+ tree expr;
+{
+ register tree t = fold (expr);
+
+ /* We don't care about whether this can be used as an lvalue in this
+ context. */
+ while (TREE_CODE (t) == NON_LVALUE_EXPR)
+ t = TREE_OPERAND (t, 0);
+
+ /* If the tree evaluates to a constant, then we don't want to hide that
+ fact (i.e. this allows further folding, and direct checks for constants).
+ However, a read-only object that has side effects cannot be bypassed.
+ Since it is no problem to reevaluate literals, we just return the
+ literal node. */
+
+ if (TREE_CONSTANT (t) || (TREE_READONLY (t) && ! TREE_SIDE_EFFECTS (t))
+ || TREE_CODE (t) == SAVE_EXPR)
+ return t;
+
+ /* If T contains a PLACEHOLDER_EXPR, we must evaluate it each time, since
+ it means that the size or offset of some field of an object depends on
+ the value within another field.
+
+ Note that it must not be the case that T contains both a PLACEHOLDER_EXPR
+ and some variable since it would then need to be both evaluated once and
+ evaluated more than once. Front-ends must assure this case cannot
+ happen by surrounding any such subexpressions in their own SAVE_EXPR
+ and forcing evaluation at the proper time. */
+ if (contains_placeholder_p (t))
+ return t;
+
+ t = build (SAVE_EXPR, TREE_TYPE (expr), t, current_function_decl, NULL_TREE);
+
+ /* This expression might be placed ahead of a jump to ensure that the
+ value was computed on both sides of the jump. So make sure it isn't
+ eliminated as dead. */
+ TREE_SIDE_EFFECTS (t) = 1;
+ return t;
+}
+
+/* Return 1 if EXP contains a PLACEHOLDER_EXPR; i.e., if it represents a size
+ or offset that depends on a field within a record.
+
+ Note that we only allow such expressions within simple arithmetic
+ or a COND_EXPR. */
+
+int
+contains_placeholder_p (exp)
+ tree exp;
+{
+ register enum tree_code code = TREE_CODE (exp);
+ tree inner;
+
+ /* If we have a WITH_RECORD_EXPR, it "cancels" any PLACEHOLDER_EXPR
+ in it since it is supplying a value for it. */
+ if (code == WITH_RECORD_EXPR)
+ return 0;
+
+ switch (TREE_CODE_CLASS (code))
+ {
+ case 'r':
+ for (inner = TREE_OPERAND (exp, 0);
+ TREE_CODE_CLASS (TREE_CODE (inner)) == 'r';
+ inner = TREE_OPERAND (inner, 0))
+ ;
+ return TREE_CODE (inner) == PLACEHOLDER_EXPR;
+
+ case '1':
+ case '2': case '<':
+ case 'e':
+ switch (tree_code_length[(int) code])
+ {
+ case 1:
+ return contains_placeholder_p (TREE_OPERAND (exp, 0));
+ case 2:
+ return (code != RTL_EXPR
+ && code != CONSTRUCTOR
+ && ! (code == SAVE_EXPR && SAVE_EXPR_RTL (exp) != 0)
+ && code != WITH_RECORD_EXPR
+ && (contains_placeholder_p (TREE_OPERAND (exp, 0))
+ || contains_placeholder_p (TREE_OPERAND (exp, 1))));
+ case 3:
+ return (code == COND_EXPR
+ && (contains_placeholder_p (TREE_OPERAND (exp, 0))
+ || contains_placeholder_p (TREE_OPERAND (exp, 1))
+ || contains_placeholder_p (TREE_OPERAND (exp, 2))));
+ }
+ }
+
+ return 0;
+}
+
+/* Given a tree EXP, a FIELD_DECL F, and a replacement value R,
+ return a tree with all occurrences of references to F in a
+ PLACEHOLDER_EXPR replaced by R. Note that we assume here that EXP
+ contains only arithmetic expressions. */
+
+tree
+substitute_in_expr (exp, f, r)
+ tree exp;
+ tree f;
+ tree r;
+{
+ enum tree_code code = TREE_CODE (exp);
+ tree inner;
+
+ switch (TREE_CODE_CLASS (code))
+ {
+ case 'c':
+ case 'd':
+ return exp;
+
+ case 'x':
+ if (code == PLACEHOLDER_EXPR)
+ return exp;
+ break;
+
+ case '1':
+ case '2':
+ case '<':
+ case 'e':
+ switch (tree_code_length[(int) code])
+ {
+ case 1:
+ return fold (build1 (code, TREE_TYPE (exp),
+ substitute_in_expr (TREE_OPERAND (exp, 0),
+ f, r)));
+
+ case 2:
+ /* An RTL_EXPR cannot contain a PLACEHOLDER_EXPR; a CONSTRUCTOR
+ could, but we don't support it. */
+ if (code == RTL_EXPR)
+ return exp;
+ else if (code == CONSTRUCTOR)
+ abort ();
+
+ return fold (build (code, TREE_TYPE (exp),
+ substitute_in_expr (TREE_OPERAND (exp, 0), f, r),
+ substitute_in_expr (TREE_OPERAND (exp, 1),
+ f, r)));
+
+ case 3:
+ /* It cannot be that anything inside a SAVE_EXPR contains a
+ PLACEHOLDER_EXPR. */
+ if (code == SAVE_EXPR)
+ return exp;
+
+ if (code != COND_EXPR)
+ abort ();
+
+ return fold (build (code, TREE_TYPE (exp),
+ substitute_in_expr (TREE_OPERAND (exp, 0), f, r),
+ substitute_in_expr (TREE_OPERAND (exp, 1), f, r),
+ substitute_in_expr (TREE_OPERAND (exp, 2),
+ f, r)));
+ }
+
+ break;
+
+ case 'r':
+ switch (code)
+ {
+ case COMPONENT_REF:
+ /* If this expression is getting a value from a PLACEHOLDER_EXPR
+ and it is the right field, replace it with R. */
+ for (inner = TREE_OPERAND (exp, 0);
+ TREE_CODE_CLASS (TREE_CODE (inner)) == 'r';
+ inner = TREE_OPERAND (inner, 0))
+ ;
+ if (TREE_CODE (inner) == PLACEHOLDER_EXPR
+ && TREE_OPERAND (exp, 1) == f)
+ return r;
+
+ return fold (build (code, TREE_TYPE (exp),
+ substitute_in_expr (TREE_OPERAND (exp, 0), f, r),
+ TREE_OPERAND (exp, 1)));
+ case BIT_FIELD_REF:
+ return fold (build (code, TREE_TYPE (exp),
+ substitute_in_expr (TREE_OPERAND (exp, 0), f, r),
+ substitute_in_expr (TREE_OPERAND (exp, 1), f, r),
+ substitute_in_expr (TREE_OPERAND (exp, 2), f, r)));
+ case INDIRECT_REF:
+ case BUFFER_REF:
+ return fold (build1 (code, TREE_TYPE (exp),
+ substitute_in_expr (TREE_OPERAND (exp, 0),
+ f, r)));
+ case OFFSET_REF:
+ return fold (build (code, TREE_TYPE (exp),
+ substitute_in_expr (TREE_OPERAND (exp, 0), f, r),
+ substitute_in_expr (TREE_OPERAND (exp, 1), f, r)));
+ }
+ }
+
+ /* If it wasn't one of the cases we handle, give up. */
+
+ abort ();
+}
+
+/* Given a type T, a FIELD_DECL F, and a replacement value R,
+ return a new type with all size expressions that contain F
+ updated by replacing F with R. */
+
+tree
+substitute_in_type (t, f, r)
+ tree t, f, r;
+{
+ switch (TREE_CODE (t))
+ {
+ case POINTER_TYPE:
+ case VOID_TYPE:
+ return t;
+ case INTEGER_TYPE:
+ case ENUMERAL_TYPE:
+ case BOOLEAN_TYPE:
+ case CHAR_TYPE:
+ if ((TREE_CODE (TYPE_MIN_VALUE (t)) != INTEGER_CST
+ && contains_placeholder_p (TYPE_MIN_VALUE (t)))
+ || (TREE_CODE (TYPE_MAX_VALUE (t)) != INTEGER_CST
+ && contains_placeholder_p (TYPE_MAX_VALUE (t))))
+ return build_range_type (t,
+ substitute_in_expr (TYPE_MIN_VALUE (t), f, r),
+ substitute_in_expr (TYPE_MAX_VALUE (t), f, r));
+ return t;
+
+ case REAL_TYPE:
+ if ((TYPE_MIN_VALUE (t) != 0
+ && TREE_CODE (TYPE_MIN_VALUE (t)) != REAL_CST
+ && contains_placeholder_p (TYPE_MIN_VALUE (t)))
+ || (TYPE_MAX_VALUE (t) != 0
+ && TREE_CODE (TYPE_MAX_VALUE (t)) != REAL_CST
+ && contains_placeholder_p (TYPE_MAX_VALUE (t))))
+ {
+ t = build_type_copy (t);
+
+ if (TYPE_MIN_VALUE (t))
+ TYPE_MIN_VALUE (t) = substitute_in_expr (TYPE_MIN_VALUE (t), f, r);
+ if (TYPE_MAX_VALUE (t))
+ TYPE_MAX_VALUE (t) = substitute_in_expr (TYPE_MAX_VALUE (t), f, r);
+ }
+ return t;
+
+ case COMPLEX_TYPE:
+ return build_complex_type (substitute_in_type (TREE_TYPE (t), f, r));
+
+ case OFFSET_TYPE:
+ case METHOD_TYPE:
+ case REFERENCE_TYPE:
+ case FILE_TYPE:
+ case SET_TYPE:
+ case FUNCTION_TYPE:
+ case LANG_TYPE:
+ /* Don't know how to do these yet. */
+ abort ();
+
+ case ARRAY_TYPE:
+ t = build_array_type (substitute_in_type (TREE_TYPE (t), f, r),
+ substitute_in_type (TYPE_DOMAIN (t), f, r));
+ TYPE_SIZE (t) = 0;
+ layout_type (t);
+ return t;
+
+ case RECORD_TYPE:
+ case UNION_TYPE:
+ case QUAL_UNION_TYPE:
+ {
+ tree new = copy_node (t);
+ tree field;
+ tree last_field = 0;
+
+ /* Start out with no fields, make new fields, and chain them
+ in. */
+
+ TYPE_FIELDS (new) = 0;
+ TYPE_SIZE (new) = 0;
+
+ for (field = TYPE_FIELDS (t); field;
+ field = TREE_CHAIN (field))
+ {
+ tree new_field = copy_node (field);
+
+ TREE_TYPE (new_field)
+ = substitute_in_type (TREE_TYPE (new_field), f, r);
+
+ /* If this is an anonymous field and the type of this field is
+ a UNION_TYPE or RECORD_TYPE with no elements, ignore it. If
+ the type just has one element, treat that as the field.
+ But don't do this if we are processing a QUAL_UNION_TYPE. */
+ if (TREE_CODE (t) != QUAL_UNION_TYPE && DECL_NAME (new_field) == 0
+ && (TREE_CODE (TREE_TYPE (new_field)) == UNION_TYPE
+ || TREE_CODE (TREE_TYPE (new_field)) == RECORD_TYPE))
+ {
+ if (TYPE_FIELDS (TREE_TYPE (new_field)) == 0)
+ continue;
+
+ if (TREE_CHAIN (TYPE_FIELDS (TREE_TYPE (new_field))) == 0)
+ new_field = TYPE_FIELDS (TREE_TYPE (new_field));
+ }
+
+ DECL_CONTEXT (new_field) = new;
+ DECL_SIZE (new_field) = 0;
+
+ if (TREE_CODE (t) == QUAL_UNION_TYPE)
+ {
+ /* Do the substitution inside the qualifier and if we find
+ that this field will not be present, omit it. */
+ DECL_QUALIFIER (new_field)
+ = substitute_in_expr (DECL_QUALIFIER (field), f, r);
+ if (integer_zerop (DECL_QUALIFIER (new_field)))
+ continue;
+ }
+
+ if (last_field == 0)
+ TYPE_FIELDS (new) = new_field;
+ else
+ TREE_CHAIN (last_field) = new_field;
+
+ last_field = new_field;
+
+ /* If this is a qualified type and this field will always be
+ present, we are done. */
+ if (TREE_CODE (t) == QUAL_UNION_TYPE
+ && integer_onep (DECL_QUALIFIER (new_field)))
+ break;
+ }
+
+ /* If this used to be a qualified union type, but we now know what
+ field will be present, make this a normal union. */
+ if (TREE_CODE (new) == QUAL_UNION_TYPE
+ && (TYPE_FIELDS (new) == 0
+ || integer_onep (DECL_QUALIFIER (TYPE_FIELDS (new)))))
+ TREE_SET_CODE (new, UNION_TYPE);
+
+ layout_type (new);
+ return new;
+ }
+ }
+}
+
+/* Stabilize a reference so that we can use it any number of times
+ without causing its operands to be evaluated more than once.
+ Returns the stabilized reference. This works by means of save_expr,
+ so see the caveats in the comments about save_expr.
+
+ Also allows conversion expressions whose operands are references.
+ Any other kind of expression is returned unchanged. */
+
+tree
+stabilize_reference (ref)
+ tree ref;
+{
+ register tree result;
+ register enum tree_code code = TREE_CODE (ref);
+
+ switch (code)
+ {
+ case VAR_DECL:
+ case PARM_DECL:
+ case RESULT_DECL:
+ /* No action is needed in this case. */
+ return ref;
+
+ case NOP_EXPR:
+ case CONVERT_EXPR:
+ case FLOAT_EXPR:
+ case FIX_TRUNC_EXPR:
+ case FIX_FLOOR_EXPR:
+ case FIX_ROUND_EXPR:
+ case FIX_CEIL_EXPR:
+ result = build_nt (code, stabilize_reference (TREE_OPERAND (ref, 0)));
+ break;
+
+ case INDIRECT_REF:
+ result = build_nt (INDIRECT_REF,
+ stabilize_reference_1 (TREE_OPERAND (ref, 0)));
+ break;
+
+ case COMPONENT_REF:
+ result = build_nt (COMPONENT_REF,
+ stabilize_reference (TREE_OPERAND (ref, 0)),
+ TREE_OPERAND (ref, 1));
+ break;
+
+ case BIT_FIELD_REF:
+ result = build_nt (BIT_FIELD_REF,
+ stabilize_reference (TREE_OPERAND (ref, 0)),
+ stabilize_reference_1 (TREE_OPERAND (ref, 1)),
+ stabilize_reference_1 (TREE_OPERAND (ref, 2)));
+ break;
+
+ case ARRAY_REF:
+ result = build_nt (ARRAY_REF,
+ stabilize_reference (TREE_OPERAND (ref, 0)),
+ stabilize_reference_1 (TREE_OPERAND (ref, 1)));
+ break;
+
+ /* If arg isn't a kind of lvalue we recognize, make no change.
+ Caller should recognize the error for an invalid lvalue. */
+ default:
+ return ref;
+
+ case ERROR_MARK:
+ return error_mark_node;
+ }
+
+ TREE_TYPE (result) = TREE_TYPE (ref);
+ TREE_READONLY (result) = TREE_READONLY (ref);
+ TREE_SIDE_EFFECTS (result) = TREE_SIDE_EFFECTS (ref);
+ TREE_THIS_VOLATILE (result) = TREE_THIS_VOLATILE (ref);
+ TREE_RAISES (result) = TREE_RAISES (ref);
+
+ return result;
+}
+
+/* Subroutine of stabilize_reference; this is called for subtrees of
+ references. Any expression with side-effects must be put in a SAVE_EXPR
+ to ensure that it is only evaluated once.
+
+ We don't put SAVE_EXPR nodes around everything, because assigning very
+ simple expressions to temporaries causes us to miss good opportunities
+ for optimizations. Among other things, the opportunity to fold in the
+ addition of a constant into an addressing mode often gets lost, e.g.
+ "y[i+1] += x;". In general, we take the approach that we should not make
+ an assignment unless we are forced into it - i.e., that any non-side effect
+ operator should be allowed, and that cse should take care of coalescing
+ multiple utterances of the same expression should that prove fruitful. */
+
+static tree
+stabilize_reference_1 (e)
+ tree e;
+{
+ register tree result;
+ register enum tree_code code = TREE_CODE (e);
+
+ /* We cannot ignore const expressions because it might be a reference
+ to a const array but whose index contains side-effects. But we can
+ ignore things that are actual constant or that already have been
+ handled by this function. */
+
+ if (TREE_CONSTANT (e) || code == SAVE_EXPR)
+ return e;
+
+ switch (TREE_CODE_CLASS (code))
+ {
+ case 'x':
+ case 't':
+ case 'd':
+ case 'b':
+ case '<':
+ case 's':
+ case 'e':
+ case 'r':
+ /* If the expression has side-effects, then encase it in a SAVE_EXPR
+ so that it will only be evaluated once. */
+ /* The reference (r) and comparison (<) classes could be handled as
+ below, but it is generally faster to only evaluate them once. */
+ if (TREE_SIDE_EFFECTS (e))
+ return save_expr (e);
+ return e;
+
+ case 'c':
+ /* Constants need no processing. In fact, we should never reach
+ here. */
+ return e;
+
+ case '2':
+ /* Division is slow and tends to be compiled with jumps,
+ especially the division by powers of 2 that is often
+ found inside of an array reference. So do it just once. */
+ if (code == TRUNC_DIV_EXPR || code == TRUNC_MOD_EXPR
+ || code == FLOOR_DIV_EXPR || code == FLOOR_MOD_EXPR
+ || code == CEIL_DIV_EXPR || code == CEIL_MOD_EXPR
+ || code == ROUND_DIV_EXPR || code == ROUND_MOD_EXPR)
+ return save_expr (e);
+ /* Recursively stabilize each operand. */
+ result = build_nt (code, stabilize_reference_1 (TREE_OPERAND (e, 0)),
+ stabilize_reference_1 (TREE_OPERAND (e, 1)));
+ break;
+
+ case '1':
+ /* Recursively stabilize each operand. */
+ result = build_nt (code, stabilize_reference_1 (TREE_OPERAND (e, 0)));
+ break;
+
+ default:
+ abort ();
+ }
+
+ TREE_TYPE (result) = TREE_TYPE (e);
+ TREE_READONLY (result) = TREE_READONLY (e);
+ TREE_SIDE_EFFECTS (result) = TREE_SIDE_EFFECTS (e);
+ TREE_THIS_VOLATILE (result) = TREE_THIS_VOLATILE (e);
+ TREE_RAISES (result) = TREE_RAISES (e);
+
+ return result;
+}
+
+/* Low-level constructors for expressions. */
+
+/* Build an expression of code CODE, data type TYPE,
+ and operands as specified by the arguments ARG1 and following arguments.
+ Expressions and reference nodes can be created this way.
+ Constants, decls, types and misc nodes cannot be. */
+
+tree
+build VPROTO((enum tree_code code, tree tt, ...))
+{
+#ifndef __STDC__
+ enum tree_code code;
+ tree tt;
+#endif
+ va_list p;
+ register tree t;
+ register int length;
+ register int i;
+
+ VA_START (p, tt);
+
+#ifndef __STDC__
+ code = va_arg (p, enum tree_code);
+ tt = va_arg (p, tree);
+#endif
+
+ t = make_node (code);
+ length = tree_code_length[(int) code];
+ TREE_TYPE (t) = tt;
+
+ if (length == 2)
+ {
+ /* This is equivalent to the loop below, but faster. */
+ register tree arg0 = va_arg (p, tree);
+ register tree arg1 = va_arg (p, tree);
+ TREE_OPERAND (t, 0) = arg0;
+ TREE_OPERAND (t, 1) = arg1;
+ if ((arg0 && TREE_SIDE_EFFECTS (arg0))
+ || (arg1 && TREE_SIDE_EFFECTS (arg1)))
+ TREE_SIDE_EFFECTS (t) = 1;
+ TREE_RAISES (t)
+ = (arg0 && TREE_RAISES (arg0)) || (arg1 && TREE_RAISES (arg1));
+ }
+ else if (length == 1)
+ {
+ register tree arg0 = va_arg (p, tree);
+
+ /* Call build1 for this! */
+ if (TREE_CODE_CLASS (code) != 's')
+ abort ();
+ TREE_OPERAND (t, 0) = arg0;
+ if (arg0 && TREE_SIDE_EFFECTS (arg0))
+ TREE_SIDE_EFFECTS (t) = 1;
+ TREE_RAISES (t) = (arg0 && TREE_RAISES (arg0));
+ }
+ else
+ {
+ for (i = 0; i < length; i++)
+ {
+ register tree operand = va_arg (p, tree);
+ TREE_OPERAND (t, i) = operand;
+ if (operand)
+ {
+ if (TREE_SIDE_EFFECTS (operand))
+ TREE_SIDE_EFFECTS (t) = 1;
+ if (TREE_RAISES (operand))
+ TREE_RAISES (t) = 1;
+ }
+ }
+ }
+ va_end (p);
+ return t;
+}
+
+/* Same as above, but only builds for unary operators.
+ Saves lions share of calls to `build'; cuts down use
+ of varargs, which is expensive for RISC machines. */
+tree
+build1 (code, type, node)
+ enum tree_code code;
+ tree type;
+ tree node;
+{
+ register struct obstack *obstack = current_obstack;
+ register int i, length;
+ register tree_node_kind kind;
+ register tree t;
+
+#ifdef GATHER_STATISTICS
+ if (TREE_CODE_CLASS (code) == 'r')
+ kind = r_kind;
+ else
+ kind = e_kind;
+#endif
+
+ obstack = expression_obstack;
+ length = sizeof (struct tree_exp);
+
+ t = (tree) obstack_alloc (obstack, length);
+
+#ifdef GATHER_STATISTICS
+ tree_node_counts[(int)kind]++;
+ tree_node_sizes[(int)kind] += length;
+#endif
+
+ for (i = (length / sizeof (int)) - 1; i >= 0; i--)
+ ((int *) t)[i] = 0;
+
+ TREE_TYPE (t) = type;
+ TREE_SET_CODE (t, code);
+
+ if (obstack == &permanent_obstack)
+ TREE_PERMANENT (t) = 1;
+
+ TREE_OPERAND (t, 0) = node;
+ if (node)
+ {
+ if (TREE_SIDE_EFFECTS (node))
+ TREE_SIDE_EFFECTS (t) = 1;
+ if (TREE_RAISES (node))
+ TREE_RAISES (t) = 1;
+ }
+
+ return t;
+}
+
+/* Similar except don't specify the TREE_TYPE
+ and leave the TREE_SIDE_EFFECTS as 0.
+ It is permissible for arguments to be null,
+ or even garbage if their values do not matter. */
+
+tree
+build_nt VPROTO((enum tree_code code, ...))
+{
+#ifndef __STDC__
+ enum tree_code code;
+#endif
+ va_list p;
+ register tree t;
+ register int length;
+ register int i;
+
+ VA_START (p, code);
+
+#ifndef __STDC__
+ code = va_arg (p, enum tree_code);
+#endif
+
+ t = make_node (code);
+ length = tree_code_length[(int) code];
+
+ for (i = 0; i < length; i++)
+ TREE_OPERAND (t, i) = va_arg (p, tree);
+
+ va_end (p);
+ return t;
+}
+
+/* Similar to `build_nt', except we build
+ on the temp_decl_obstack, regardless. */
+
+tree
+build_parse_node VPROTO((enum tree_code code, ...))
+{
+#ifndef __STDC__
+ enum tree_code code;
+#endif
+ register struct obstack *ambient_obstack = expression_obstack;
+ va_list p;
+ register tree t;
+ register int length;
+ register int i;
+
+ VA_START (p, code);
+
+#ifndef __STDC__
+ code = va_arg (p, enum tree_code);
+#endif
+
+ expression_obstack = &temp_decl_obstack;
+
+ t = make_node (code);
+ length = tree_code_length[(int) code];
+
+ for (i = 0; i < length; i++)
+ TREE_OPERAND (t, i) = va_arg (p, tree);
+
+ va_end (p);
+ expression_obstack = ambient_obstack;
+ return t;
+}
+
+#if 0
+/* Commented out because this wants to be done very
+ differently. See cp-lex.c. */
+tree
+build_op_identifier (op1, op2)
+ tree op1, op2;
+{
+ register tree t = make_node (OP_IDENTIFIER);
+ TREE_PURPOSE (t) = op1;
+ TREE_VALUE (t) = op2;
+ return t;
+}
+#endif
+
+/* Create a DECL_... node of code CODE, name NAME and data type TYPE.
+ We do NOT enter this node in any sort of symbol table.
+
+ layout_decl is used to set up the decl's storage layout.
+ Other slots are initialized to 0 or null pointers. */
+
+tree
+build_decl (code, name, type)
+ enum tree_code code;
+ tree name, type;
+{
+ register tree t;
+
+ t = make_node (code);
+
+/* if (type == error_mark_node)
+ type = integer_type_node; */
+/* That is not done, deliberately, so that having error_mark_node
+ as the type can suppress useless errors in the use of this variable. */
+
+ DECL_NAME (t) = name;
+ DECL_ASSEMBLER_NAME (t) = name;
+ TREE_TYPE (t) = type;
+
+ if (code == VAR_DECL || code == PARM_DECL || code == RESULT_DECL)
+ layout_decl (t, 0);
+ else if (code == FUNCTION_DECL)
+ DECL_MODE (t) = FUNCTION_MODE;
+
+ return t;
+}
+
+/* BLOCK nodes are used to represent the structure of binding contours
+ and declarations, once those contours have been exited and their contents
+ compiled. This information is used for outputting debugging info. */
+
+tree
+build_block (vars, tags, subblocks, supercontext, chain)
+ tree vars, tags, subblocks, supercontext, chain;
+{
+ register tree block = make_node (BLOCK);
+ BLOCK_VARS (block) = vars;
+ BLOCK_TYPE_TAGS (block) = tags;
+ BLOCK_SUBBLOCKS (block) = subblocks;
+ BLOCK_SUPERCONTEXT (block) = supercontext;
+ BLOCK_CHAIN (block) = chain;
+ return block;
+}
+
+/* Return a type like TTYPE except that its TYPE_ATTRIBUTE
+ is ATTRIBUTE.
+
+ Such modified types already made are recorded so that duplicates
+ are not made. */
+
+tree
+build_type_attribute_variant (ttype, attribute)
+ tree ttype, attribute;
+{
+ if ( ! attribute_list_equal (TYPE_ATTRIBUTES (ttype), attribute))
+ {
+ register int hashcode;
+ register struct obstack *ambient_obstack = current_obstack;
+ tree ntype;
+
+ if (ambient_obstack != &permanent_obstack)
+ current_obstack = TYPE_OBSTACK (ttype);
+
+ ntype = copy_node (ttype);
+ current_obstack = ambient_obstack;
+
+ TYPE_POINTER_TO (ntype) = 0;
+ TYPE_REFERENCE_TO (ntype) = 0;
+ TYPE_ATTRIBUTES (ntype) = attribute;
+
+ /* Create a new main variant of TYPE. */
+ TYPE_MAIN_VARIANT (ntype) = ntype;
+ TYPE_NEXT_VARIANT (ntype) = 0;
+ TYPE_READONLY (ntype) = TYPE_VOLATILE (ntype) = 0;
+
+ hashcode = TYPE_HASH (TREE_CODE (ntype))
+ + TYPE_HASH (TREE_TYPE (ntype))
+ + type_hash_list (attribute);
+
+ switch (TREE_CODE (ntype))
+ {
+ case FUNCTION_TYPE:
+ hashcode += TYPE_HASH (TYPE_ARG_TYPES (ntype));
+ break;
+ case ARRAY_TYPE:
+ hashcode += TYPE_HASH (TYPE_DOMAIN (ntype));
+ break;
+ case INTEGER_TYPE:
+ hashcode += TYPE_HASH (TYPE_MAX_VALUE (ntype));
+ break;
+ case REAL_TYPE:
+ hashcode += TYPE_HASH (TYPE_PRECISION (ntype));
+ break;
+ }
+
+ ntype = type_hash_canon (hashcode, ntype);
+ ttype = build_type_variant (ntype, TYPE_READONLY (ttype),
+ TYPE_VOLATILE (ttype));
+ }
+
+ return ttype;
+}
+
+/* Return a type like TYPE except that its TYPE_READONLY is CONSTP
+ and its TYPE_VOLATILE is VOLATILEP.
+
+ Such variant types already made are recorded so that duplicates
+ are not made.
+
+ A variant types should never be used as the type of an expression.
+ Always copy the variant information into the TREE_READONLY
+ and TREE_THIS_VOLATILE of the expression, and then give the expression
+ as its type the "main variant", the variant whose TYPE_READONLY
+ and TYPE_VOLATILE are zero. Use TYPE_MAIN_VARIANT to find the
+ main variant. */
+
+tree
+build_type_variant (type, constp, volatilep)
+ tree type;
+ int constp, volatilep;
+{
+ register tree t;
+
+ /* Treat any nonzero argument as 1. */
+ constp = !!constp;
+ volatilep = !!volatilep;
+
+ /* If not generating auxiliary info, search the chain of variants to see
+ if there is already one there just like the one we need to have. If so,
+ use that existing one.
+
+ We don't do this in the case where we are generating aux info because
+ in that case we want each typedef names to get it's own distinct type
+ node, even if the type of this new typedef is the same as some other
+ (existing) type. */
+
+ if (!flag_gen_aux_info)
+ for (t = TYPE_MAIN_VARIANT(type); t; t = TYPE_NEXT_VARIANT (t))
+ if (constp == TYPE_READONLY (t) && volatilep == TYPE_VOLATILE (t))
+ return t;
+
+ /* We need a new one. */
+
+ t = build_type_copy (type);
+ TYPE_READONLY (t) = constp;
+ TYPE_VOLATILE (t) = volatilep;
+
+ return t;
+}
+
+/* Give TYPE a new main variant: NEW_MAIN.
+ This is the right thing to do only when something else
+ about TYPE is modified in place. */
+
+tree
+change_main_variant (type, new_main)
+ tree type, new_main;
+{
+ tree t;
+ tree omain = TYPE_MAIN_VARIANT (type);
+
+ /* Remove TYPE from the TYPE_NEXT_VARIANT chain of its main variant. */
+ if (TYPE_NEXT_VARIANT (omain) == type)
+ TYPE_NEXT_VARIANT (omain) = TYPE_NEXT_VARIANT (type);
+ else
+ for (t = TYPE_NEXT_VARIANT (omain); t && TYPE_NEXT_VARIANT (t);
+ t = TYPE_NEXT_VARIANT (t))
+ if (TYPE_NEXT_VARIANT (t) == type)
+ {
+ TYPE_NEXT_VARIANT (t) = TYPE_NEXT_VARIANT (type);
+ break;
+ }
+
+ TYPE_MAIN_VARIANT (type) = new_main;
+ TYPE_NEXT_VARIANT (type) = TYPE_NEXT_VARIANT (new_main);
+ TYPE_NEXT_VARIANT (new_main) = type;
+}
+
+/* Create a new variant of TYPE, equivalent but distinct.
+ This is so the caller can modify it. */
+
+tree
+build_type_copy (type)
+ tree type;
+{
+ register tree t, m = TYPE_MAIN_VARIANT (type);
+ register struct obstack *ambient_obstack = current_obstack;
+
+ current_obstack = TYPE_OBSTACK (type);
+ t = copy_node (type);
+ current_obstack = ambient_obstack;
+
+ TYPE_POINTER_TO (t) = 0;
+ TYPE_REFERENCE_TO (t) = 0;
+
+ /* Add this type to the chain of variants of TYPE. */
+ TYPE_NEXT_VARIANT (t) = TYPE_NEXT_VARIANT (m);
+ TYPE_NEXT_VARIANT (m) = t;
+
+ return t;
+}
+
+/* Hashing of types so that we don't make duplicates.
+ The entry point is `type_hash_canon'. */
+
+/* Each hash table slot is a bucket containing a chain
+ of these structures. */
+
+struct type_hash
+{
+ struct type_hash *next; /* Next structure in the bucket. */
+ int hashcode; /* Hash code of this type. */
+ tree type; /* The type recorded here. */
+};
+
+/* Now here is the hash table. When recording a type, it is added
+ to the slot whose index is the hash code mod the table size.
+ Note that the hash table is used for several kinds of types
+ (function types, array types and array index range types, for now).
+ While all these live in the same table, they are completely independent,
+ and the hash code is computed differently for each of these. */
+
+#define TYPE_HASH_SIZE 59
+struct type_hash *type_hash_table[TYPE_HASH_SIZE];
+
+/* Compute a hash code for a list of types (chain of TREE_LIST nodes
+ with types in the TREE_VALUE slots), by adding the hash codes
+ of the individual types. */
+
+int
+type_hash_list (list)
+ tree list;
+{
+ register int hashcode;
+ register tree tail;
+ for (hashcode = 0, tail = list; tail; tail = TREE_CHAIN (tail))
+ hashcode += TYPE_HASH (TREE_VALUE (tail));
+ return hashcode;
+}
+
+/* Look in the type hash table for a type isomorphic to TYPE.
+ If one is found, return it. Otherwise return 0. */
+
+tree
+type_hash_lookup (hashcode, type)
+ int hashcode;
+ tree type;
+{
+ register struct type_hash *h;
+ for (h = type_hash_table[hashcode % TYPE_HASH_SIZE]; h; h = h->next)
+ if (h->hashcode == hashcode
+ && TREE_CODE (h->type) == TREE_CODE (type)
+ && TREE_TYPE (h->type) == TREE_TYPE (type)
+ && attribute_list_equal (TYPE_ATTRIBUTES (h->type),
+ TYPE_ATTRIBUTES (type))
+ && (TYPE_MAX_VALUE (h->type) == TYPE_MAX_VALUE (type)
+ || tree_int_cst_equal (TYPE_MAX_VALUE (h->type),
+ TYPE_MAX_VALUE (type)))
+ && (TYPE_MIN_VALUE (h->type) == TYPE_MIN_VALUE (type)
+ || tree_int_cst_equal (TYPE_MIN_VALUE (h->type),
+ TYPE_MIN_VALUE (type)))
+ && (TYPE_DOMAIN (h->type) == TYPE_DOMAIN (type)
+ || (TYPE_DOMAIN (h->type)
+ && TREE_CODE (TYPE_DOMAIN (h->type)) == TREE_LIST
+ && TYPE_DOMAIN (type)
+ && TREE_CODE (TYPE_DOMAIN (type)) == TREE_LIST
+ && type_list_equal (TYPE_DOMAIN (h->type), TYPE_DOMAIN (type)))))
+ return h->type;
+ return 0;
+}
+
+/* Add an entry to the type-hash-table
+ for a type TYPE whose hash code is HASHCODE. */
+
+void
+type_hash_add (hashcode, type)
+ int hashcode;
+ tree type;
+{
+ register struct type_hash *h;
+
+ h = (struct type_hash *) oballoc (sizeof (struct type_hash));
+ h->hashcode = hashcode;
+ h->type = type;
+ h->next = type_hash_table[hashcode % TYPE_HASH_SIZE];
+ type_hash_table[hashcode % TYPE_HASH_SIZE] = h;
+}
+
+/* Given TYPE, and HASHCODE its hash code, return the canonical
+ object for an identical type if one already exists.
+ Otherwise, return TYPE, and record it as the canonical object
+ if it is a permanent object.
+
+ To use this function, first create a type of the sort you want.
+ Then compute its hash code from the fields of the type that
+ make it different from other similar types.
+ Then call this function and use the value.
+ This function frees the type you pass in if it is a duplicate. */
+
+/* Set to 1 to debug without canonicalization. Never set by program. */
+int debug_no_type_hash = 0;
+
+tree
+type_hash_canon (hashcode, type)
+ int hashcode;
+ tree type;
+{
+ tree t1;
+
+ if (debug_no_type_hash)
+ return type;
+
+ t1 = type_hash_lookup (hashcode, type);
+ if (t1 != 0)
+ {
+ obstack_free (TYPE_OBSTACK (type), type);
+#ifdef GATHER_STATISTICS
+ tree_node_counts[(int)t_kind]--;
+ tree_node_sizes[(int)t_kind] -= sizeof (struct tree_type);
+#endif
+ return t1;
+ }
+
+ /* If this is a permanent type, record it for later reuse. */
+ if (TREE_PERMANENT (type))
+ type_hash_add (hashcode, type);
+
+ return type;
+}
+
+/* Given two lists of attributes, return true if list l2 is
+ equivalent to l1. */
+
+int
+attribute_list_equal (l1, l2)
+ tree l1, l2;
+{
+ return attribute_list_contained (l1, l2)
+ && attribute_list_contained (l2, l1);
+}
+
+/* Given two lists of attributes, return true if list l2 is
+ completely contained within l1. */
+
+int
+attribute_list_contained (l1, l2)
+ tree l1, l2;
+{
+ register tree t1, t2;
+
+ /* First check the obvious, maybe the lists are identical. */
+ if (l1 == l2)
+ return 1;
+
+ /* Then check the obvious, maybe the lists are similar. */
+ for (t1 = l1, t2 = l2;
+ t1 && t2
+ && TREE_VALUE (t1) == TREE_VALUE (t2);
+ t1 = TREE_CHAIN (t1), t2 = TREE_CHAIN (t2));
+
+ /* Maybe the lists are equal. */
+ if (t1 == 0 && t2 == 0)
+ return 1;
+
+ for (; t2; t2 = TREE_CHAIN (t2))
+ if (!value_member (l1, t2))
+ return 0;
+ return 1;
+}
+
+/* Given two lists of types
+ (chains of TREE_LIST nodes with types in the TREE_VALUE slots)
+ return 1 if the lists contain the same types in the same order.
+ Also, the TREE_PURPOSEs must match. */
+
+int
+type_list_equal (l1, l2)
+ tree l1, l2;
+{
+ register tree t1, t2;
+ for (t1 = l1, t2 = l2; t1 && t2; t1 = TREE_CHAIN (t1), t2 = TREE_CHAIN (t2))
+ {
+ if (TREE_VALUE (t1) != TREE_VALUE (t2))
+ return 0;
+ if (TREE_PURPOSE (t1) != TREE_PURPOSE (t2))
+ {
+ int cmp = simple_cst_equal (TREE_PURPOSE (t1), TREE_PURPOSE (t2));
+ if (cmp < 0)
+ abort ();
+ if (cmp == 0)
+ return 0;
+ }
+ }
+
+ return t1 == t2;
+}
+
+/* Nonzero if integer constants T1 and T2
+ represent the same constant value. */
+
+int
+tree_int_cst_equal (t1, t2)
+ tree t1, t2;
+{
+ if (t1 == t2)
+ return 1;
+ if (t1 == 0 || t2 == 0)
+ return 0;
+ if (TREE_CODE (t1) == INTEGER_CST
+ && TREE_CODE (t2) == INTEGER_CST
+ && TREE_INT_CST_LOW (t1) == TREE_INT_CST_LOW (t2)
+ && TREE_INT_CST_HIGH (t1) == TREE_INT_CST_HIGH (t2))
+ return 1;
+ return 0;
+}
+
+/* Nonzero if integer constants T1 and T2 represent values that satisfy <.
+ The precise way of comparison depends on their data type. */
+
+int
+tree_int_cst_lt (t1, t2)
+ tree t1, t2;
+{
+ if (t1 == t2)
+ return 0;
+
+ if (!TREE_UNSIGNED (TREE_TYPE (t1)))
+ return INT_CST_LT (t1, t2);
+ return INT_CST_LT_UNSIGNED (t1, t2);
+}
+
+/* Return an indication of the sign of the integer constant T.
+ The return value is -1 if T < 0, 0 if T == 0, and 1 if T > 0.
+ Note that -1 will never be returned it T's type is unsigned. */
+
+int
+tree_int_cst_sgn (t)
+ tree t;
+{
+ if (TREE_INT_CST_LOW (t) == 0 && TREE_INT_CST_HIGH (t) == 0)
+ return 0;
+ else if (TREE_UNSIGNED (TREE_TYPE (t)))
+ return 1;
+ else if (TREE_INT_CST_HIGH (t) < 0)
+ return -1;
+ else
+ return 1;
+}
+
+/* Compare two constructor-element-type constants. */
+int
+simple_cst_list_equal (l1, l2)
+ tree l1, l2;
+{
+ while (l1 != NULL_TREE && l2 != NULL_TREE)
+ {
+ int cmp = simple_cst_equal (TREE_VALUE (l1), TREE_VALUE (l2));
+ if (cmp < 0)
+ abort ();
+ if (cmp == 0)
+ return 0;
+ l1 = TREE_CHAIN (l1);
+ l2 = TREE_CHAIN (l2);
+ }
+ return (l1 == l2);
+}
+
+/* Return truthvalue of whether T1 is the same tree structure as T2.
+ Return 1 if they are the same.
+ Return 0 if they are understandably different.
+ Return -1 if either contains tree structure not understood by
+ this function. */
+
+int
+simple_cst_equal (t1, t2)
+ tree t1, t2;
+{
+ register enum tree_code code1, code2;
+ int cmp;
+
+ if (t1 == t2)
+ return 1;
+ if (t1 == 0 || t2 == 0)
+ return 0;
+
+ code1 = TREE_CODE (t1);
+ code2 = TREE_CODE (t2);
+
+ if (code1 == NOP_EXPR || code1 == CONVERT_EXPR || code1 == NON_LVALUE_EXPR)
+ if (code2 == NOP_EXPR || code2 == CONVERT_EXPR || code2 == NON_LVALUE_EXPR)
+ return simple_cst_equal (TREE_OPERAND (t1, 0), TREE_OPERAND (t2, 0));
+ else
+ return simple_cst_equal (TREE_OPERAND (t1, 0), t2);
+ else if (code2 == NOP_EXPR || code2 == CONVERT_EXPR
+ || code2 == NON_LVALUE_EXPR)
+ return simple_cst_equal (t1, TREE_OPERAND (t2, 0));
+
+ if (code1 != code2)
+ return 0;
+
+ switch (code1)
+ {
+ case INTEGER_CST:
+ return TREE_INT_CST_LOW (t1) == TREE_INT_CST_LOW (t2)
+ && TREE_INT_CST_HIGH (t1) == TREE_INT_CST_HIGH (t2);
+
+ case REAL_CST:
+ return REAL_VALUES_EQUAL (TREE_REAL_CST (t1), TREE_REAL_CST (t2));
+
+ case STRING_CST:
+ return TREE_STRING_LENGTH (t1) == TREE_STRING_LENGTH (t2)
+ && !bcmp (TREE_STRING_POINTER (t1), TREE_STRING_POINTER (t2),
+ TREE_STRING_LENGTH (t1));
+
+ case CONSTRUCTOR:
+ abort ();
+
+ case SAVE_EXPR:
+ return simple_cst_equal (TREE_OPERAND (t1, 0), TREE_OPERAND (t2, 0));
+
+ case CALL_EXPR:
+ cmp = simple_cst_equal (TREE_OPERAND (t1, 0), TREE_OPERAND (t2, 0));
+ if (cmp <= 0)
+ return cmp;
+ return simple_cst_list_equal (TREE_OPERAND (t1, 1), TREE_OPERAND (t2, 1));
+
+ case TARGET_EXPR:
+ /* Special case: if either target is an unallocated VAR_DECL,
+ it means that it's going to be unified with whatever the
+ TARGET_EXPR is really supposed to initialize, so treat it
+ as being equivalent to anything. */
+ if ((TREE_CODE (TREE_OPERAND (t1, 0)) == VAR_DECL
+ && DECL_NAME (TREE_OPERAND (t1, 0)) == NULL_TREE
+ && DECL_RTL (TREE_OPERAND (t1, 0)) == 0)
+ || (TREE_CODE (TREE_OPERAND (t2, 0)) == VAR_DECL
+ && DECL_NAME (TREE_OPERAND (t2, 0)) == NULL_TREE
+ && DECL_RTL (TREE_OPERAND (t2, 0)) == 0))
+ cmp = 1;
+ else
+ cmp = simple_cst_equal (TREE_OPERAND (t1, 0), TREE_OPERAND (t2, 0));
+ if (cmp <= 0)
+ return cmp;
+ return simple_cst_equal (TREE_OPERAND (t1, 1), TREE_OPERAND (t2, 1));
+
+ case WITH_CLEANUP_EXPR:
+ cmp = simple_cst_equal (TREE_OPERAND (t1, 0), TREE_OPERAND (t2, 0));
+ if (cmp <= 0)
+ return cmp;
+ return simple_cst_equal (TREE_OPERAND (t1, 2), TREE_OPERAND (t1, 2));
+
+ case COMPONENT_REF:
+ if (TREE_OPERAND (t1, 1) == TREE_OPERAND (t2, 1))
+ return simple_cst_equal (TREE_OPERAND (t1, 0), TREE_OPERAND (t2, 0));
+ return 0;
+
+ case VAR_DECL:
+ case PARM_DECL:
+ case CONST_DECL:
+ case FUNCTION_DECL:
+ return 0;
+ }
+
+ /* This general rule works for most tree codes.
+ All exceptions should be handled above. */
+
+ switch (TREE_CODE_CLASS (code1))
+ {
+ int i;
+ case '1':
+ case '2':
+ case '<':
+ case 'e':
+ case 'r':
+ case 's':
+ cmp = 1;
+ for (i=0; i<tree_code_length[(int) code1]; ++i)
+ {
+ cmp = simple_cst_equal (TREE_OPERAND (t1, i), TREE_OPERAND (t2, i));
+ if (cmp <= 0)
+ return cmp;
+ }
+ return cmp;
+ }
+
+ return -1;
+}
+
+/* Constructors for pointer, array and function types.
+ (RECORD_TYPE, UNION_TYPE and ENUMERAL_TYPE nodes are
+ constructed by language-dependent code, not here.) */
+
+/* Construct, lay out and return the type of pointers to TO_TYPE.
+ If such a type has already been constructed, reuse it. */
+
+tree
+build_pointer_type (to_type)
+ tree to_type;
+{
+ register tree t = TYPE_POINTER_TO (to_type);
+
+ /* First, if we already have a type for pointers to TO_TYPE, use it. */
+
+ if (t)
+ return t;
+
+ /* We need a new one. Put this in the same obstack as TO_TYPE. */
+ push_obstacks (TYPE_OBSTACK (to_type), TYPE_OBSTACK (to_type));
+ t = make_node (POINTER_TYPE);
+ pop_obstacks ();
+
+ TREE_TYPE (t) = to_type;
+
+ /* Record this type as the pointer to TO_TYPE. */
+ TYPE_POINTER_TO (to_type) = t;
+
+ /* Lay out the type. This function has many callers that are concerned
+ with expression-construction, and this simplifies them all.
+ Also, it guarantees the TYPE_SIZE is in the same obstack as the type. */
+ layout_type (t);
+
+ return t;
+}
+
+/* Create a type of integers to be the TYPE_DOMAIN of an ARRAY_TYPE.
+ MAXVAL should be the maximum value in the domain
+ (one less than the length of the array). */
+
+tree
+build_index_type (maxval)
+ tree maxval;
+{
+ register tree itype = make_node (INTEGER_TYPE);
+ TYPE_PRECISION (itype) = TYPE_PRECISION (sizetype);
+ TYPE_MIN_VALUE (itype) = build_int_2 (0, 0);
+ TREE_TYPE (TYPE_MIN_VALUE (itype)) = sizetype;
+ TYPE_MAX_VALUE (itype) = convert (sizetype, maxval);
+ TYPE_MODE (itype) = TYPE_MODE (sizetype);
+ TYPE_SIZE (itype) = TYPE_SIZE (sizetype);
+ TYPE_ALIGN (itype) = TYPE_ALIGN (sizetype);
+ if (TREE_CODE (maxval) == INTEGER_CST)
+ {
+ int maxint = (int) TREE_INT_CST_LOW (maxval);
+ /* If the domain should be empty, make sure the maxval
+ remains -1 and is not spoiled by truncation. */
+ if (INT_CST_LT (maxval, integer_zero_node))
+ {
+ TYPE_MAX_VALUE (itype) = build_int_2 (-1, -1);
+ TREE_TYPE (TYPE_MAX_VALUE (itype)) = sizetype;
+ }
+ return type_hash_canon (maxint < 0 ? ~maxint : maxint, itype);
+ }
+ else
+ return itype;
+}
+
+/* Create a range of some discrete type TYPE (an INTEGER_TYPE,
+ ENUMERAL_TYPE, BOOLEAN_TYPE, or CHAR_TYPE), with
+ low bound LOWVAL and high bound HIGHVAL.
+ if TYPE==NULL_TREE, sizetype is used. */
+
+tree
+build_range_type (type, lowval, highval)
+ tree type, lowval, highval;
+{
+ register tree itype = make_node (INTEGER_TYPE);
+ TREE_TYPE (itype) = type;
+ if (type == NULL_TREE)
+ type = sizetype;
+ TYPE_PRECISION (itype) = TYPE_PRECISION (type);
+ TYPE_MIN_VALUE (itype) = convert (type, lowval);
+ TYPE_MAX_VALUE (itype) = convert (type, highval);
+ TYPE_MODE (itype) = TYPE_MODE (type);
+ TYPE_SIZE (itype) = TYPE_SIZE (type);
+ TYPE_ALIGN (itype) = TYPE_ALIGN (type);
+ if ((TREE_CODE (lowval) == INTEGER_CST)
+ && (TREE_CODE (highval) == INTEGER_CST))
+ {
+ HOST_WIDE_INT highint = TREE_INT_CST_LOW (highval);
+ HOST_WIDE_INT lowint = TREE_INT_CST_LOW (lowval);
+ int maxint = (int) (highint - lowint);
+ return type_hash_canon (maxint < 0 ? ~maxint : maxint, itype);
+ }
+ else
+ return itype;
+}
+
+/* Just like build_index_type, but takes lowval and highval instead
+ of just highval (maxval). */
+
+tree
+build_index_2_type (lowval,highval)
+ tree lowval, highval;
+{
+ return build_range_type (NULL_TREE, lowval, highval);
+}
+
+/* Return nonzero iff ITYPE1 and ITYPE2 are equal (in the LISP sense).
+ Needed because when index types are not hashed, equal index types
+ built at different times appear distinct, even though structurally,
+ they are not. */
+
+int
+index_type_equal (itype1, itype2)
+ tree itype1, itype2;
+{
+ if (TREE_CODE (itype1) != TREE_CODE (itype2))
+ return 0;
+ if (TREE_CODE (itype1) == INTEGER_TYPE)
+ {
+ if (TYPE_PRECISION (itype1) != TYPE_PRECISION (itype2)
+ || TYPE_MODE (itype1) != TYPE_MODE (itype2)
+ || ! simple_cst_equal (TYPE_SIZE (itype1), TYPE_SIZE (itype2))
+ || TYPE_ALIGN (itype1) != TYPE_ALIGN (itype2))
+ return 0;
+ if (simple_cst_equal (TYPE_MIN_VALUE (itype1), TYPE_MIN_VALUE (itype2))
+ && simple_cst_equal (TYPE_MAX_VALUE (itype1), TYPE_MAX_VALUE (itype2)))
+ return 1;
+ }
+ return 0;
+}
+
+/* Construct, lay out and return the type of arrays of elements with ELT_TYPE
+ and number of elements specified by the range of values of INDEX_TYPE.
+ If such a type has already been constructed, reuse it. */
+
+tree
+build_array_type (elt_type, index_type)
+ tree elt_type, index_type;
+{
+ register tree t;
+ int hashcode;
+
+ if (TREE_CODE (elt_type) == FUNCTION_TYPE)
+ {
+ error ("arrays of functions are not meaningful");
+ elt_type = integer_type_node;
+ }
+
+ /* Make sure TYPE_POINTER_TO (elt_type) is filled in. */
+ build_pointer_type (elt_type);
+
+ /* Allocate the array after the pointer type,
+ in case we free it in type_hash_canon. */
+ t = make_node (ARRAY_TYPE);
+ TREE_TYPE (t) = elt_type;
+ TYPE_DOMAIN (t) = index_type;
+
+ if (index_type == 0)
+ {
+ return t;
+ }
+
+ hashcode = TYPE_HASH (elt_type) + TYPE_HASH (index_type);
+ t = type_hash_canon (hashcode, t);
+
+#if 0 /* This led to crashes, because it could put a temporary node
+ on the TYPE_NEXT_VARIANT chain of a permanent one. */
+ /* The main variant of an array type should always
+ be an array whose element type is the main variant. */
+ if (elt_type != TYPE_MAIN_VARIANT (elt_type))
+ change_main_variant (t, build_array_type (TYPE_MAIN_VARIANT (elt_type),
+ index_type));
+#endif
+
+ if (TYPE_SIZE (t) == 0)
+ layout_type (t);
+ return t;
+}
+
+/* Construct, lay out and return
+ the type of functions returning type VALUE_TYPE
+ given arguments of types ARG_TYPES.
+ ARG_TYPES is a chain of TREE_LIST nodes whose TREE_VALUEs
+ are data type nodes for the arguments of the function.
+ If such a type has already been constructed, reuse it. */
+
+tree
+build_function_type (value_type, arg_types)
+ tree value_type, arg_types;
+{
+ register tree t;
+ int hashcode;
+
+ if (TREE_CODE (value_type) == FUNCTION_TYPE)
+ {
+ error ("function return type cannot be function");
+ value_type = integer_type_node;
+ }
+
+ /* Make a node of the sort we want. */
+ t = make_node (FUNCTION_TYPE);
+ TREE_TYPE (t) = value_type;
+ TYPE_ARG_TYPES (t) = arg_types;
+
+ /* If we already have such a type, use the old one and free this one. */
+ hashcode = TYPE_HASH (value_type) + type_hash_list (arg_types);
+ t = type_hash_canon (hashcode, t);
+
+ if (TYPE_SIZE (t) == 0)
+ layout_type (t);
+ return t;
+}
+
+/* Build the node for the type of references-to-TO_TYPE. */
+
+tree
+build_reference_type (to_type)
+ tree to_type;
+{
+ register tree t = TYPE_REFERENCE_TO (to_type);
+ register struct obstack *ambient_obstack = current_obstack;
+ register struct obstack *ambient_saveable_obstack = saveable_obstack;
+
+ /* First, if we already have a type for pointers to TO_TYPE, use it. */
+
+ if (t)
+ return t;
+
+ /* We need a new one. If TO_TYPE is permanent, make this permanent too. */
+ if (TREE_PERMANENT (to_type))
+ {
+ current_obstack = &permanent_obstack;
+ saveable_obstack = &permanent_obstack;
+ }
+
+ t = make_node (REFERENCE_TYPE);
+ TREE_TYPE (t) = to_type;
+
+ /* Record this type as the pointer to TO_TYPE. */
+ TYPE_REFERENCE_TO (to_type) = t;
+
+ layout_type (t);
+
+ current_obstack = ambient_obstack;
+ saveable_obstack = ambient_saveable_obstack;
+ return t;
+}
+
+/* Construct, lay out and return the type of methods belonging to class
+ BASETYPE and whose arguments and values are described by TYPE.
+ If that type exists already, reuse it.
+ TYPE must be a FUNCTION_TYPE node. */
+
+tree
+build_method_type (basetype, type)
+ tree basetype, type;
+{
+ register tree t;
+ int hashcode;
+
+ /* Make a node of the sort we want. */
+ t = make_node (METHOD_TYPE);
+
+ if (TREE_CODE (type) != FUNCTION_TYPE)
+ abort ();
+
+ TYPE_METHOD_BASETYPE (t) = TYPE_MAIN_VARIANT (basetype);
+ TREE_TYPE (t) = TREE_TYPE (type);
+
+ /* The actual arglist for this function includes a "hidden" argument
+ which is "this". Put it into the list of argument types. */
+
+ TYPE_ARG_TYPES (t)
+ = tree_cons (NULL_TREE,
+ build_pointer_type (basetype), TYPE_ARG_TYPES (type));
+
+ /* If we already have such a type, use the old one and free this one. */
+ hashcode = TYPE_HASH (basetype) + TYPE_HASH (type);
+ t = type_hash_canon (hashcode, t);
+
+ if (TYPE_SIZE (t) == 0)
+ layout_type (t);
+
+ return t;
+}
+
+/* Construct, lay out and return the type of offsets to a value
+ of type TYPE, within an object of type BASETYPE.
+ If a suitable offset type exists already, reuse it. */
+
+tree
+build_offset_type (basetype, type)
+ tree basetype, type;
+{
+ register tree t;
+ int hashcode;
+
+ /* Make a node of the sort we want. */
+ t = make_node (OFFSET_TYPE);
+
+ TYPE_OFFSET_BASETYPE (t) = TYPE_MAIN_VARIANT (basetype);
+ TREE_TYPE (t) = type;
+
+ /* If we already have such a type, use the old one and free this one. */
+ hashcode = TYPE_HASH (basetype) + TYPE_HASH (type);
+ t = type_hash_canon (hashcode, t);
+
+ if (TYPE_SIZE (t) == 0)
+ layout_type (t);
+
+ return t;
+}
+
+/* Create a complex type whose components are COMPONENT_TYPE. */
+
+tree
+build_complex_type (component_type)
+ tree component_type;
+{
+ register tree t;
+ int hashcode;
+
+ /* Make a node of the sort we want. */
+ t = make_node (COMPLEX_TYPE);
+
+ TREE_TYPE (t) = TYPE_MAIN_VARIANT (component_type);
+ TYPE_VOLATILE (t) = TYPE_VOLATILE (component_type);
+ TYPE_READONLY (t) = TYPE_READONLY (component_type);
+
+ /* If we already have such a type, use the old one and free this one. */
+ hashcode = TYPE_HASH (component_type);
+ t = type_hash_canon (hashcode, t);
+
+ if (TYPE_SIZE (t) == 0)
+ layout_type (t);
+
+ return t;
+}
+
+/* Return OP, stripped of any conversions to wider types as much as is safe.
+ Converting the value back to OP's type makes a value equivalent to OP.
+
+ If FOR_TYPE is nonzero, we return a value which, if converted to
+ type FOR_TYPE, would be equivalent to converting OP to type FOR_TYPE.
+
+ If FOR_TYPE is nonzero, unaligned bit-field references may be changed to the
+ narrowest type that can hold the value, even if they don't exactly fit.
+ Otherwise, bit-field references are changed to a narrower type
+ only if they can be fetched directly from memory in that type.
+
+ OP must have integer, real or enumeral type. Pointers are not allowed!
+
+ There are some cases where the obvious value we could return
+ would regenerate to OP if converted to OP's type,
+ but would not extend like OP to wider types.
+ If FOR_TYPE indicates such extension is contemplated, we eschew such values.
+ For example, if OP is (unsigned short)(signed char)-1,
+ we avoid returning (signed char)-1 if FOR_TYPE is int,
+ even though extending that to an unsigned short would regenerate OP,
+ since the result of extending (signed char)-1 to (int)
+ is different from (int) OP. */
+
+tree
+get_unwidened (op, for_type)
+ register tree op;
+ tree for_type;
+{
+ /* Set UNS initially if converting OP to FOR_TYPE is a zero-extension. */
+ /* TYPE_PRECISION is safe in place of type_precision since
+ pointer types are not allowed. */
+ register tree type = TREE_TYPE (op);
+ register unsigned final_prec
+ = TYPE_PRECISION (for_type != 0 ? for_type : type);
+ register int uns
+ = (for_type != 0 && for_type != type
+ && final_prec > TYPE_PRECISION (type)
+ && TREE_UNSIGNED (type));
+ register tree win = op;
+
+ while (TREE_CODE (op) == NOP_EXPR)
+ {
+ register int bitschange
+ = TYPE_PRECISION (TREE_TYPE (op))
+ - TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (op, 0)));
+
+ /* Truncations are many-one so cannot be removed.
+ Unless we are later going to truncate down even farther. */
+ if (bitschange < 0
+ && final_prec > TYPE_PRECISION (TREE_TYPE (op)))
+ break;
+
+ /* See what's inside this conversion. If we decide to strip it,
+ we will set WIN. */
+ op = TREE_OPERAND (op, 0);
+
+ /* If we have not stripped any zero-extensions (uns is 0),
+ we can strip any kind of extension.
+ If we have previously stripped a zero-extension,
+ only zero-extensions can safely be stripped.
+ Any extension can be stripped if the bits it would produce
+ are all going to be discarded later by truncating to FOR_TYPE. */
+
+ if (bitschange > 0)
+ {
+ if (! uns || final_prec <= TYPE_PRECISION (TREE_TYPE (op)))
+ win = op;
+ /* TREE_UNSIGNED says whether this is a zero-extension.
+ Let's avoid computing it if it does not affect WIN
+ and if UNS will not be needed again. */
+ if ((uns || TREE_CODE (op) == NOP_EXPR)
+ && TREE_UNSIGNED (TREE_TYPE (op)))
+ {
+ uns = 1;
+ win = op;
+ }
+ }
+ }
+
+ if (TREE_CODE (op) == COMPONENT_REF
+ /* Since type_for_size always gives an integer type. */
+ && TREE_CODE (type) != REAL_TYPE)
+ {
+ unsigned innerprec = TREE_INT_CST_LOW (DECL_SIZE (TREE_OPERAND (op, 1)));
+ type = type_for_size (innerprec, TREE_UNSIGNED (TREE_OPERAND (op, 1)));
+
+ /* We can get this structure field in the narrowest type it fits in.
+ If FOR_TYPE is 0, do this only for a field that matches the
+ narrower type exactly and is aligned for it
+ The resulting extension to its nominal type (a fullword type)
+ must fit the same conditions as for other extensions. */
+
+ if (innerprec < TYPE_PRECISION (TREE_TYPE (op))
+ && (for_type || ! DECL_BIT_FIELD (TREE_OPERAND (op, 1)))
+ && (! uns || final_prec <= innerprec
+ || TREE_UNSIGNED (TREE_OPERAND (op, 1)))
+ && type != 0)
+ {
+ win = build (COMPONENT_REF, type, TREE_OPERAND (op, 0),
+ TREE_OPERAND (op, 1));
+ TREE_SIDE_EFFECTS (win) = TREE_SIDE_EFFECTS (op);
+ TREE_THIS_VOLATILE (win) = TREE_THIS_VOLATILE (op);
+ TREE_RAISES (win) = TREE_RAISES (op);
+ }
+ }
+ return win;
+}
+
+/* Return OP or a simpler expression for a narrower value
+ which can be sign-extended or zero-extended to give back OP.
+ Store in *UNSIGNEDP_PTR either 1 if the value should be zero-extended
+ or 0 if the value should be sign-extended. */
+
+tree
+get_narrower (op, unsignedp_ptr)
+ register tree op;
+ int *unsignedp_ptr;
+{
+ register int uns = 0;
+ int first = 1;
+ register tree win = op;
+
+ while (TREE_CODE (op) == NOP_EXPR)
+ {
+ register int bitschange
+ = TYPE_PRECISION (TREE_TYPE (op))
+ - TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (op, 0)));
+
+ /* Truncations are many-one so cannot be removed. */
+ if (bitschange < 0)
+ break;
+
+ /* See what's inside this conversion. If we decide to strip it,
+ we will set WIN. */
+ op = TREE_OPERAND (op, 0);
+
+ if (bitschange > 0)
+ {
+ /* An extension: the outermost one can be stripped,
+ but remember whether it is zero or sign extension. */
+ if (first)
+ uns = TREE_UNSIGNED (TREE_TYPE (op));
+ /* Otherwise, if a sign extension has been stripped,
+ only sign extensions can now be stripped;
+ if a zero extension has been stripped, only zero-extensions. */
+ else if (uns != TREE_UNSIGNED (TREE_TYPE (op)))
+ break;
+ first = 0;
+ }
+ else /* bitschange == 0 */
+ {
+ /* A change in nominal type can always be stripped, but we must
+ preserve the unsignedness. */
+ if (first)
+ uns = TREE_UNSIGNED (TREE_TYPE (op));
+ first = 0;
+ }
+
+ win = op;
+ }
+
+ if (TREE_CODE (op) == COMPONENT_REF
+ /* Since type_for_size always gives an integer type. */
+ && TREE_CODE (TREE_TYPE (op)) != REAL_TYPE)
+ {
+ unsigned innerprec = TREE_INT_CST_LOW (DECL_SIZE (TREE_OPERAND (op, 1)));
+ tree type = type_for_size (innerprec, TREE_UNSIGNED (op));
+
+ /* We can get this structure field in a narrower type that fits it,
+ but the resulting extension to its nominal type (a fullword type)
+ must satisfy the same conditions as for other extensions.
+
+ Do this only for fields that are aligned (not bit-fields),
+ because when bit-field insns will be used there is no
+ advantage in doing this. */
+
+ if (innerprec < TYPE_PRECISION (TREE_TYPE (op))
+ && ! DECL_BIT_FIELD (TREE_OPERAND (op, 1))
+ && (first || uns == TREE_UNSIGNED (TREE_OPERAND (op, 1)))
+ && type != 0)
+ {
+ if (first)
+ uns = TREE_UNSIGNED (TREE_OPERAND (op, 1));
+ win = build (COMPONENT_REF, type, TREE_OPERAND (op, 0),
+ TREE_OPERAND (op, 1));
+ TREE_SIDE_EFFECTS (win) = TREE_SIDE_EFFECTS (op);
+ TREE_THIS_VOLATILE (win) = TREE_THIS_VOLATILE (op);
+ TREE_RAISES (win) = TREE_RAISES (op);
+ }
+ }
+ *unsignedp_ptr = uns;
+ return win;
+}
+
+/* Return the precision of a type, for arithmetic purposes.
+ Supports all types on which arithmetic is possible
+ (including pointer types).
+ It's not clear yet what will be right for complex types. */
+
+int
+type_precision (type)
+ register tree type;
+{
+ return ((TREE_CODE (type) == INTEGER_TYPE
+ || TREE_CODE (type) == ENUMERAL_TYPE
+ || TREE_CODE (type) == REAL_TYPE)
+ ? TYPE_PRECISION (type) : POINTER_SIZE);
+}
+
+/* Nonzero if integer constant C has a value that is permissible
+ for type TYPE (an INTEGER_TYPE). */
+
+int
+int_fits_type_p (c, type)
+ tree c, type;
+{
+ if (TREE_UNSIGNED (type))
+ return (! (TREE_CODE (TYPE_MAX_VALUE (type)) == INTEGER_CST
+ && INT_CST_LT_UNSIGNED (TYPE_MAX_VALUE (type), c))
+ && ! (TREE_CODE (TYPE_MIN_VALUE (type)) == INTEGER_CST
+ && INT_CST_LT_UNSIGNED (c, TYPE_MIN_VALUE (type))));
+ else
+ return (! (TREE_CODE (TYPE_MAX_VALUE (type)) == INTEGER_CST
+ && INT_CST_LT (TYPE_MAX_VALUE (type), c))
+ && ! (TREE_CODE (TYPE_MIN_VALUE (type)) == INTEGER_CST
+ && INT_CST_LT (c, TYPE_MIN_VALUE (type))));
+}
+
+/* Return the innermost context enclosing DECL that is
+ a FUNCTION_DECL, or zero if none. */
+
+tree
+decl_function_context (decl)
+ tree decl;
+{
+ tree context;
+
+ if (TREE_CODE (decl) == ERROR_MARK)
+ return 0;
+
+ if (TREE_CODE (decl) == SAVE_EXPR)
+ context = SAVE_EXPR_CONTEXT (decl);
+ else
+ context = DECL_CONTEXT (decl);
+
+ while (context && TREE_CODE (context) != FUNCTION_DECL)
+ {
+ if (TREE_CODE (context) == RECORD_TYPE
+ || TREE_CODE (context) == UNION_TYPE)
+ context = TYPE_CONTEXT (context);
+ else if (TREE_CODE (context) == TYPE_DECL)
+ context = DECL_CONTEXT (context);
+ else if (TREE_CODE (context) == BLOCK)
+ context = BLOCK_SUPERCONTEXT (context);
+ else
+ /* Unhandled CONTEXT !? */
+ abort ();
+ }
+
+ return context;
+}
+
+/* Return the innermost context enclosing DECL that is
+ a RECORD_TYPE, UNION_TYPE or QUAL_UNION_TYPE, or zero if none.
+ TYPE_DECLs and FUNCTION_DECLs are transparent to this function. */
+
+tree
+decl_type_context (decl)
+ tree decl;
+{
+ tree context = DECL_CONTEXT (decl);
+
+ while (context)
+ {
+ if (TREE_CODE (context) == RECORD_TYPE
+ || TREE_CODE (context) == UNION_TYPE
+ || TREE_CODE (context) == QUAL_UNION_TYPE)
+ return context;
+ if (TREE_CODE (context) == TYPE_DECL
+ || TREE_CODE (context) == FUNCTION_DECL)
+ context = DECL_CONTEXT (context);
+ else if (TREE_CODE (context) == BLOCK)
+ context = BLOCK_SUPERCONTEXT (context);
+ else
+ /* Unhandled CONTEXT!? */
+ abort ();
+ }
+ return NULL_TREE;
+}
+
+void
+print_obstack_statistics (str, o)
+ char *str;
+ struct obstack *o;
+{
+ struct _obstack_chunk *chunk = o->chunk;
+ int n_chunks = 0;
+ int n_alloc = 0;
+
+ while (chunk)
+ {
+ n_chunks += 1;
+ n_alloc += chunk->limit - &chunk->contents[0];
+ chunk = chunk->prev;
+ }
+ fprintf (stderr, "obstack %s: %d bytes, %d chunks\n",
+ str, n_alloc, n_chunks);
+}
+void
+dump_tree_statistics ()
+{
+ int i;
+ int total_nodes, total_bytes;
+
+ fprintf (stderr, "\n??? tree nodes created\n\n");
+#ifdef GATHER_STATISTICS
+ fprintf (stderr, "Kind Nodes Bytes\n");
+ fprintf (stderr, "-------------------------------------\n");
+ total_nodes = total_bytes = 0;
+ for (i = 0; i < (int) all_kinds; i++)
+ {
+ fprintf (stderr, "%-20s %6d %9d\n", tree_node_kind_names[i],
+ tree_node_counts[i], tree_node_sizes[i]);
+ total_nodes += tree_node_counts[i];
+ total_bytes += tree_node_sizes[i];
+ }
+ fprintf (stderr, "%-20s %9d\n", "identifier names", id_string_size);
+ fprintf (stderr, "-------------------------------------\n");
+ fprintf (stderr, "%-20s %6d %9d\n", "Total", total_nodes, total_bytes);
+ fprintf (stderr, "-------------------------------------\n");
+#else
+ fprintf (stderr, "(No per-node statistics)\n");
+#endif
+ print_lang_statistics ();
+}
+
+#define FILE_FUNCTION_PREFIX_LEN 9
+
+#ifndef NO_DOLLAR_IN_LABEL
+#define FILE_FUNCTION_FORMAT "_GLOBAL_$D$%s"
+#else /* NO_DOLLAR_IN_LABEL */
+#ifndef NO_DOT_IN_LABEL
+#define FILE_FUNCTION_FORMAT "_GLOBAL_.D.%s"
+#else /* NO_DOT_IN_LABEL */
+#define FILE_FUNCTION_FORMAT "_GLOBAL__D_%s"
+#endif /* NO_DOT_IN_LABEL */
+#endif /* NO_DOLLAR_IN_LABEL */
+
+extern char * first_global_object_name;
+
+/* If KIND=='I', return a suitable global initializer (constructor) name.
+ If KIND=='D', return a suitable global clean-up (destructor) name. */
+
+tree
+get_file_function_name (kind)
+ int kind;
+{
+ char *buf;
+ register char *p;
+
+ if (first_global_object_name)
+ p = first_global_object_name;
+ else if (main_input_filename)
+ p = main_input_filename;
+ else
+ p = input_filename;
+
+ buf = (char *) alloca (sizeof (FILE_FUNCTION_FORMAT) + strlen (p));
+
+ /* Set up the name of the file-level functions we may need. */
+ /* Use a global object (which is already required to be unique over
+ the program) rather than the file name (which imposes extra
+ constraints). -- Raeburn@MIT.EDU, 10 Jan 1990. */
+ sprintf (buf, FILE_FUNCTION_FORMAT, p);
+
+ /* Don't need to pull wierd characters out of global names. */
+ if (p != first_global_object_name)
+ {
+ for (p = buf+11; *p; p++)
+ if (! ((*p >= '0' && *p <= '9')
+#if 0 /* we always want labels, which are valid C++ identifiers (+ `$') */
+#ifndef ASM_IDENTIFY_GCC /* this is required if `.' is invalid -- k. raeburn */
+ || *p == '.'
+#endif
+#endif
+#ifndef NO_DOLLAR_IN_LABEL /* this for `$'; unlikely, but... -- kr */
+ || *p == '$'
+#endif
+#ifndef NO_DOT_IN_LABEL /* this for `.'; unlikely, but... */
+ || *p == '.'
+#endif
+ || (*p >= 'A' && *p <= 'Z')
+ || (*p >= 'a' && *p <= 'z')))
+ *p = '_';
+ }
+
+ buf[FILE_FUNCTION_PREFIX_LEN] = kind;
+
+ return get_identifier (buf);
+}
diff --git a/gnu/usr.bin/cc/cc_int/unroll.c b/gnu/usr.bin/cc/cc_int/unroll.c
new file mode 100644
index 0000000..9b968ac
--- /dev/null
+++ b/gnu/usr.bin/cc/cc_int/unroll.c
@@ -0,0 +1,3345 @@
+/* Try to unroll loops, and split induction variables.
+ Copyright (C) 1992, 1993, 1994 Free Software Foundation, Inc.
+ Contributed by James E. Wilson, Cygnus Support/UC Berkeley.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+/* Try to unroll a loop, and split induction variables.
+
+ Loops for which the number of iterations can be calculated exactly are
+ handled specially. If the number of iterations times the insn_count is
+ less than MAX_UNROLLED_INSNS, then the loop is unrolled completely.
+ Otherwise, we try to unroll the loop a number of times modulo the number
+ of iterations, so that only one exit test will be needed. It is unrolled
+ a number of times approximately equal to MAX_UNROLLED_INSNS divided by
+ the insn count.
+
+ Otherwise, if the number of iterations can be calculated exactly at
+ run time, and the loop is always entered at the top, then we try to
+ precondition the loop. That is, at run time, calculate how many times
+ the loop will execute, and then execute the loop body a few times so
+ that the remaining iterations will be some multiple of 4 (or 2 if the
+ loop is large). Then fall through to a loop unrolled 4 (or 2) times,
+ with only one exit test needed at the end of the loop.
+
+ Otherwise, if the number of iterations can not be calculated exactly,
+ not even at run time, then we still unroll the loop a number of times
+ approximately equal to MAX_UNROLLED_INSNS divided by the insn count,
+ but there must be an exit test after each copy of the loop body.
+
+ For each induction variable, which is dead outside the loop (replaceable)
+ or for which we can easily calculate the final value, if we can easily
+ calculate its value at each place where it is set as a function of the
+ current loop unroll count and the variable's value at loop entry, then
+ the induction variable is split into `N' different variables, one for
+ each copy of the loop body. One variable is live across the backward
+ branch, and the others are all calculated as a function of this variable.
+ This helps eliminate data dependencies, and leads to further opportunities
+ for cse. */
+
+/* Possible improvements follow: */
+
+/* ??? Add an extra pass somewhere to determine whether unrolling will
+ give any benefit. E.g. after generating all unrolled insns, compute the
+ cost of all insns and compare against cost of insns in rolled loop.
+
+ - On traditional architectures, unrolling a non-constant bound loop
+ is a win if there is a giv whose only use is in memory addresses, the
+ memory addresses can be split, and hence giv increments can be
+ eliminated.
+ - It is also a win if the loop is executed many times, and preconditioning
+ can be performed for the loop.
+ Add code to check for these and similar cases. */
+
+/* ??? Improve control of which loops get unrolled. Could use profiling
+ info to only unroll the most commonly executed loops. Perhaps have
+ a user specifyable option to control the amount of code expansion,
+ or the percent of loops to consider for unrolling. Etc. */
+
+/* ??? Look at the register copies inside the loop to see if they form a
+ simple permutation. If so, iterate the permutation until it gets back to
+ the start state. This is how many times we should unroll the loop, for
+ best results, because then all register copies can be eliminated.
+ For example, the lisp nreverse function should be unrolled 3 times
+ while (this)
+ {
+ next = this->cdr;
+ this->cdr = prev;
+ prev = this;
+ this = next;
+ }
+
+ ??? The number of times to unroll the loop may also be based on data
+ references in the loop. For example, if we have a loop that references
+ x[i-1], x[i], and x[i+1], we should unroll it a multiple of 3 times. */
+
+/* ??? Add some simple linear equation solving capability so that we can
+ determine the number of loop iterations for more complex loops.
+ For example, consider this loop from gdb
+ #define SWAP_TARGET_AND_HOST(buffer,len)
+ {
+ char tmp;
+ char *p = (char *) buffer;
+ char *q = ((char *) buffer) + len - 1;
+ int iterations = (len + 1) >> 1;
+ int i;
+ for (p; p < q; p++, q--;)
+ {
+ tmp = *q;
+ *q = *p;
+ *p = tmp;
+ }
+ }
+ Note that:
+ start value = p = &buffer + current_iteration
+ end value = q = &buffer + len - 1 - current_iteration
+ Given the loop exit test of "p < q", then there must be "q - p" iterations,
+ set equal to zero and solve for number of iterations:
+ q - p = len - 1 - 2*current_iteration = 0
+ current_iteration = (len - 1) / 2
+ Hence, there are (len - 1) / 2 (rounded up to the nearest integer)
+ iterations of this loop. */
+
+/* ??? Currently, no labels are marked as loop invariant when doing loop
+ unrolling. This is because an insn inside the loop, that loads the address
+ of a label inside the loop into a register, could be moved outside the loop
+ by the invariant code motion pass if labels were invariant. If the loop
+ is subsequently unrolled, the code will be wrong because each unrolled
+ body of the loop will use the same address, whereas each actually needs a
+ different address. A case where this happens is when a loop containing
+ a switch statement is unrolled.
+
+ It would be better to let labels be considered invariant. When we
+ unroll loops here, check to see if any insns using a label local to the
+ loop were moved before the loop. If so, then correct the problem, by
+ moving the insn back into the loop, or perhaps replicate the insn before
+ the loop, one copy for each time the loop is unrolled. */
+
+/* The prime factors looked for when trying to unroll a loop by some
+ number which is modulo the total number of iterations. Just checking
+ for these 4 prime factors will find at least one factor for 75% of
+ all numbers theoretically. Practically speaking, this will succeed
+ almost all of the time since loops are generally a multiple of 2
+ and/or 5. */
+
+#define NUM_FACTORS 4
+
+struct _factor { int factor, count; } factors[NUM_FACTORS]
+ = { {2, 0}, {3, 0}, {5, 0}, {7, 0}};
+
+/* Describes the different types of loop unrolling performed. */
+
+enum unroll_types { UNROLL_COMPLETELY, UNROLL_MODULO, UNROLL_NAIVE };
+
+#include "config.h"
+#include "rtl.h"
+#include "insn-config.h"
+#include "integrate.h"
+#include "regs.h"
+#include "flags.h"
+#include "expr.h"
+#include <stdio.h>
+#include "loop.h"
+
+/* This controls which loops are unrolled, and by how much we unroll
+ them. */
+
+#ifndef MAX_UNROLLED_INSNS
+#define MAX_UNROLLED_INSNS 100
+#endif
+
+/* Indexed by register number, if non-zero, then it contains a pointer
+ to a struct induction for a DEST_REG giv which has been combined with
+ one of more address givs. This is needed because whenever such a DEST_REG
+ giv is modified, we must modify the value of all split address givs
+ that were combined with this DEST_REG giv. */
+
+static struct induction **addr_combined_regs;
+
+/* Indexed by register number, if this is a splittable induction variable,
+ then this will hold the current value of the register, which depends on the
+ iteration number. */
+
+static rtx *splittable_regs;
+
+/* Indexed by register number, if this is a splittable induction variable,
+ then this will hold the number of instructions in the loop that modify
+ the induction variable. Used to ensure that only the last insn modifying
+ a split iv will update the original iv of the dest. */
+
+static int *splittable_regs_updates;
+
+/* Values describing the current loop's iteration variable. These are set up
+ by loop_iterations, and used by precondition_loop_p. */
+
+static rtx loop_iteration_var;
+static rtx loop_initial_value;
+static rtx loop_increment;
+static rtx loop_final_value;
+
+/* Forward declarations. */
+
+static void init_reg_map ();
+static int precondition_loop_p ();
+static void copy_loop_body ();
+static void iteration_info ();
+static rtx approx_final_value ();
+static int find_splittable_regs ();
+static int find_splittable_givs ();
+static rtx fold_rtx_mult_add ();
+static rtx remap_split_bivs ();
+
+/* Try to unroll one loop and split induction variables in the loop.
+
+ The loop is described by the arguments LOOP_END, INSN_COUNT, and
+ LOOP_START. END_INSERT_BEFORE indicates where insns should be added
+ which need to be executed when the loop falls through. STRENGTH_REDUCTION_P
+ indicates whether information generated in the strength reduction pass
+ is available.
+
+ This function is intended to be called from within `strength_reduce'
+ in loop.c. */
+
+void
+unroll_loop (loop_end, insn_count, loop_start, end_insert_before,
+ strength_reduce_p)
+ rtx loop_end;
+ int insn_count;
+ rtx loop_start;
+ rtx end_insert_before;
+ int strength_reduce_p;
+{
+ int i, j, temp;
+ int unroll_number = 1;
+ rtx copy_start, copy_end;
+ rtx insn, copy, sequence, pattern, tem;
+ int max_labelno, max_insnno;
+ rtx insert_before;
+ struct inline_remap *map;
+ char *local_label;
+ int maxregnum;
+ int new_maxregnum;
+ rtx exit_label = 0;
+ rtx start_label;
+ struct iv_class *bl;
+ int splitting_not_safe = 0;
+ enum unroll_types unroll_type;
+ int loop_preconditioned = 0;
+ rtx safety_label;
+ /* This points to the last real insn in the loop, which should be either
+ a JUMP_INSN (for conditional jumps) or a BARRIER (for unconditional
+ jumps). */
+ rtx last_loop_insn;
+
+ /* Don't bother unrolling huge loops. Since the minimum factor is
+ two, loops greater than one half of MAX_UNROLLED_INSNS will never
+ be unrolled. */
+ if (insn_count > MAX_UNROLLED_INSNS / 2)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, "Unrolling failure: Loop too big.\n");
+ return;
+ }
+
+ /* When emitting debugger info, we can't unroll loops with unequal numbers
+ of block_beg and block_end notes, because that would unbalance the block
+ structure of the function. This can happen as a result of the
+ "if (foo) bar; else break;" optimization in jump.c. */
+
+ if (write_symbols != NO_DEBUG)
+ {
+ int block_begins = 0;
+ int block_ends = 0;
+
+ for (insn = loop_start; insn != loop_end; insn = NEXT_INSN (insn))
+ {
+ if (GET_CODE (insn) == NOTE)
+ {
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_BLOCK_BEG)
+ block_begins++;
+ else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_BLOCK_END)
+ block_ends++;
+ }
+ }
+
+ if (block_begins != block_ends)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Unrolling failure: Unbalanced block notes.\n");
+ return;
+ }
+ }
+
+ /* Determine type of unroll to perform. Depends on the number of iterations
+ and the size of the loop. */
+
+ /* If there is no strength reduce info, then set loop_n_iterations to zero.
+ This can happen if strength_reduce can't find any bivs in the loop.
+ A value of zero indicates that the number of iterations could not be
+ calculated. */
+
+ if (! strength_reduce_p)
+ loop_n_iterations = 0;
+
+ if (loop_dump_stream && loop_n_iterations > 0)
+ fprintf (loop_dump_stream,
+ "Loop unrolling: %d iterations.\n", loop_n_iterations);
+
+ /* Find and save a pointer to the last nonnote insn in the loop. */
+
+ last_loop_insn = prev_nonnote_insn (loop_end);
+
+ /* Calculate how many times to unroll the loop. Indicate whether or
+ not the loop is being completely unrolled. */
+
+ if (loop_n_iterations == 1)
+ {
+ /* If number of iterations is exactly 1, then eliminate the compare and
+ branch at the end of the loop since they will never be taken.
+ Then return, since no other action is needed here. */
+
+ /* If the last instruction is not a BARRIER or a JUMP_INSN, then
+ don't do anything. */
+
+ if (GET_CODE (last_loop_insn) == BARRIER)
+ {
+ /* Delete the jump insn. This will delete the barrier also. */
+ delete_insn (PREV_INSN (last_loop_insn));
+ }
+ else if (GET_CODE (last_loop_insn) == JUMP_INSN)
+ {
+#ifdef HAVE_cc0
+ /* The immediately preceding insn is a compare which must be
+ deleted. */
+ delete_insn (last_loop_insn);
+ delete_insn (PREV_INSN (last_loop_insn));
+#else
+ /* The immediately preceding insn may not be the compare, so don't
+ delete it. */
+ delete_insn (last_loop_insn);
+#endif
+ }
+ return;
+ }
+ else if (loop_n_iterations > 0
+ && loop_n_iterations * insn_count < MAX_UNROLLED_INSNS)
+ {
+ unroll_number = loop_n_iterations;
+ unroll_type = UNROLL_COMPLETELY;
+ }
+ else if (loop_n_iterations > 0)
+ {
+ /* Try to factor the number of iterations. Don't bother with the
+ general case, only using 2, 3, 5, and 7 will get 75% of all
+ numbers theoretically, and almost all in practice. */
+
+ for (i = 0; i < NUM_FACTORS; i++)
+ factors[i].count = 0;
+
+ temp = loop_n_iterations;
+ for (i = NUM_FACTORS - 1; i >= 0; i--)
+ while (temp % factors[i].factor == 0)
+ {
+ factors[i].count++;
+ temp = temp / factors[i].factor;
+ }
+
+ /* Start with the larger factors first so that we generally
+ get lots of unrolling. */
+
+ unroll_number = 1;
+ temp = insn_count;
+ for (i = 3; i >= 0; i--)
+ while (factors[i].count--)
+ {
+ if (temp * factors[i].factor < MAX_UNROLLED_INSNS)
+ {
+ unroll_number *= factors[i].factor;
+ temp *= factors[i].factor;
+ }
+ else
+ break;
+ }
+
+ /* If we couldn't find any factors, then unroll as in the normal
+ case. */
+ if (unroll_number == 1)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Loop unrolling: No factors found.\n");
+ }
+ else
+ unroll_type = UNROLL_MODULO;
+ }
+
+
+ /* Default case, calculate number of times to unroll loop based on its
+ size. */
+ if (unroll_number == 1)
+ {
+ if (8 * insn_count < MAX_UNROLLED_INSNS)
+ unroll_number = 8;
+ else if (4 * insn_count < MAX_UNROLLED_INSNS)
+ unroll_number = 4;
+ else
+ unroll_number = 2;
+
+ unroll_type = UNROLL_NAIVE;
+ }
+
+ /* Now we know how many times to unroll the loop. */
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Unrolling loop %d times.\n", unroll_number);
+
+
+ if (unroll_type == UNROLL_COMPLETELY || unroll_type == UNROLL_MODULO)
+ {
+ /* Loops of these types should never start with a jump down to
+ the exit condition test. For now, check for this case just to
+ be sure. UNROLL_NAIVE loops can be of this form, this case is
+ handled below. */
+ insn = loop_start;
+ while (GET_CODE (insn) != CODE_LABEL && GET_CODE (insn) != JUMP_INSN)
+ insn = NEXT_INSN (insn);
+ if (GET_CODE (insn) == JUMP_INSN)
+ abort ();
+ }
+
+ if (unroll_type == UNROLL_COMPLETELY)
+ {
+ /* Completely unrolling the loop: Delete the compare and branch at
+ the end (the last two instructions). This delete must done at the
+ very end of loop unrolling, to avoid problems with calls to
+ back_branch_in_range_p, which is called by find_splittable_regs.
+ All increments of splittable bivs/givs are changed to load constant
+ instructions. */
+
+ copy_start = loop_start;
+
+ /* Set insert_before to the instruction immediately after the JUMP_INSN
+ (or BARRIER), so that any NOTEs between the JUMP_INSN and the end of
+ the loop will be correctly handled by copy_loop_body. */
+ insert_before = NEXT_INSN (last_loop_insn);
+
+ /* Set copy_end to the insn before the jump at the end of the loop. */
+ if (GET_CODE (last_loop_insn) == BARRIER)
+ copy_end = PREV_INSN (PREV_INSN (last_loop_insn));
+ else if (GET_CODE (last_loop_insn) == JUMP_INSN)
+ {
+#ifdef HAVE_cc0
+ /* The instruction immediately before the JUMP_INSN is a compare
+ instruction which we do not want to copy. */
+ copy_end = PREV_INSN (PREV_INSN (last_loop_insn));
+#else
+ /* The instruction immediately before the JUMP_INSN may not be the
+ compare, so we must copy it. */
+ copy_end = PREV_INSN (last_loop_insn);
+#endif
+ }
+ else
+ {
+ /* We currently can't unroll a loop if it doesn't end with a
+ JUMP_INSN. There would need to be a mechanism that recognizes
+ this case, and then inserts a jump after each loop body, which
+ jumps to after the last loop body. */
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Unrolling failure: loop does not end with a JUMP_INSN.\n");
+ return;
+ }
+ }
+ else if (unroll_type == UNROLL_MODULO)
+ {
+ /* Partially unrolling the loop: The compare and branch at the end
+ (the last two instructions) must remain. Don't copy the compare
+ and branch instructions at the end of the loop. Insert the unrolled
+ code immediately before the compare/branch at the end so that the
+ code will fall through to them as before. */
+
+ copy_start = loop_start;
+
+ /* Set insert_before to the jump insn at the end of the loop.
+ Set copy_end to before the jump insn at the end of the loop. */
+ if (GET_CODE (last_loop_insn) == BARRIER)
+ {
+ insert_before = PREV_INSN (last_loop_insn);
+ copy_end = PREV_INSN (insert_before);
+ }
+ else if (GET_CODE (last_loop_insn) == JUMP_INSN)
+ {
+#ifdef HAVE_cc0
+ /* The instruction immediately before the JUMP_INSN is a compare
+ instruction which we do not want to copy or delete. */
+ insert_before = PREV_INSN (last_loop_insn);
+ copy_end = PREV_INSN (insert_before);
+#else
+ /* The instruction immediately before the JUMP_INSN may not be the
+ compare, so we must copy it. */
+ insert_before = last_loop_insn;
+ copy_end = PREV_INSN (last_loop_insn);
+#endif
+ }
+ else
+ {
+ /* We currently can't unroll a loop if it doesn't end with a
+ JUMP_INSN. There would need to be a mechanism that recognizes
+ this case, and then inserts a jump after each loop body, which
+ jumps to after the last loop body. */
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Unrolling failure: loop does not end with a JUMP_INSN.\n");
+ return;
+ }
+ }
+ else
+ {
+ /* Normal case: Must copy the compare and branch instructions at the
+ end of the loop. */
+
+ if (GET_CODE (last_loop_insn) == BARRIER)
+ {
+ /* Loop ends with an unconditional jump and a barrier.
+ Handle this like above, don't copy jump and barrier.
+ This is not strictly necessary, but doing so prevents generating
+ unconditional jumps to an immediately following label.
+
+ This will be corrected below if the target of this jump is
+ not the start_label. */
+
+ insert_before = PREV_INSN (last_loop_insn);
+ copy_end = PREV_INSN (insert_before);
+ }
+ else if (GET_CODE (last_loop_insn) == JUMP_INSN)
+ {
+ /* Set insert_before to immediately after the JUMP_INSN, so that
+ NOTEs at the end of the loop will be correctly handled by
+ copy_loop_body. */
+ insert_before = NEXT_INSN (last_loop_insn);
+ copy_end = last_loop_insn;
+ }
+ else
+ {
+ /* We currently can't unroll a loop if it doesn't end with a
+ JUMP_INSN. There would need to be a mechanism that recognizes
+ this case, and then inserts a jump after each loop body, which
+ jumps to after the last loop body. */
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Unrolling failure: loop does not end with a JUMP_INSN.\n");
+ return;
+ }
+
+ /* If copying exit test branches because they can not be eliminated,
+ then must convert the fall through case of the branch to a jump past
+ the end of the loop. Create a label to emit after the loop and save
+ it for later use. Do not use the label after the loop, if any, since
+ it might be used by insns outside the loop, or there might be insns
+ added before it later by final_[bg]iv_value which must be after
+ the real exit label. */
+ exit_label = gen_label_rtx ();
+
+ insn = loop_start;
+ while (GET_CODE (insn) != CODE_LABEL && GET_CODE (insn) != JUMP_INSN)
+ insn = NEXT_INSN (insn);
+
+ if (GET_CODE (insn) == JUMP_INSN)
+ {
+ /* The loop starts with a jump down to the exit condition test.
+ Start copying the loop after the barrier following this
+ jump insn. */
+ copy_start = NEXT_INSN (insn);
+
+ /* Splitting induction variables doesn't work when the loop is
+ entered via a jump to the bottom, because then we end up doing
+ a comparison against a new register for a split variable, but
+ we did not execute the set insn for the new register because
+ it was skipped over. */
+ splitting_not_safe = 1;
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Splitting not safe, because loop not entered at top.\n");
+ }
+ else
+ copy_start = loop_start;
+ }
+
+ /* This should always be the first label in the loop. */
+ start_label = NEXT_INSN (copy_start);
+ /* There may be a line number note and/or a loop continue note here. */
+ while (GET_CODE (start_label) == NOTE)
+ start_label = NEXT_INSN (start_label);
+ if (GET_CODE (start_label) != CODE_LABEL)
+ {
+ /* This can happen as a result of jump threading. If the first insns in
+ the loop test the same condition as the loop's backward jump, or the
+ opposite condition, then the backward jump will be modified to point
+ to elsewhere, and the loop's start label is deleted.
+
+ This case currently can not be handled by the loop unrolling code. */
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Unrolling failure: unknown insns between BEG note and loop label.\n");
+ return;
+ }
+ if (LABEL_NAME (start_label))
+ {
+ /* The jump optimization pass must have combined the original start label
+ with a named label for a goto. We can't unroll this case because
+ jumps which go to the named label must be handled differently than
+ jumps to the loop start, and it is impossible to differentiate them
+ in this case. */
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Unrolling failure: loop start label is gone\n");
+ return;
+ }
+
+ if (unroll_type == UNROLL_NAIVE
+ && GET_CODE (last_loop_insn) == BARRIER
+ && start_label != JUMP_LABEL (PREV_INSN (last_loop_insn)))
+ {
+ /* In this case, we must copy the jump and barrier, because they will
+ not be converted to jumps to an immediately following label. */
+
+ insert_before = NEXT_INSN (last_loop_insn);
+ copy_end = last_loop_insn;
+ }
+
+ /* Allocate a translation table for the labels and insn numbers.
+ They will be filled in as we copy the insns in the loop. */
+
+ max_labelno = max_label_num ();
+ max_insnno = get_max_uid ();
+
+ map = (struct inline_remap *) alloca (sizeof (struct inline_remap));
+
+ map->integrating = 0;
+
+ /* Allocate the label map. */
+
+ if (max_labelno > 0)
+ {
+ map->label_map = (rtx *) alloca (max_labelno * sizeof (rtx));
+
+ local_label = (char *) alloca (max_labelno);
+ bzero (local_label, max_labelno);
+ }
+ else
+ map->label_map = 0;
+
+ /* Search the loop and mark all local labels, i.e. the ones which have to
+ be distinct labels when copied. For all labels which might be
+ non-local, set their label_map entries to point to themselves.
+ If they happen to be local their label_map entries will be overwritten
+ before the loop body is copied. The label_map entries for local labels
+ will be set to a different value each time the loop body is copied. */
+
+ for (insn = copy_start; insn != loop_end; insn = NEXT_INSN (insn))
+ {
+ if (GET_CODE (insn) == CODE_LABEL)
+ local_label[CODE_LABEL_NUMBER (insn)] = 1;
+ else if (GET_CODE (insn) == JUMP_INSN)
+ {
+ if (JUMP_LABEL (insn))
+ map->label_map[CODE_LABEL_NUMBER (JUMP_LABEL (insn))]
+ = JUMP_LABEL (insn);
+ else if (GET_CODE (PATTERN (insn)) == ADDR_VEC
+ || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC)
+ {
+ rtx pat = PATTERN (insn);
+ int diff_vec_p = GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC;
+ int len = XVECLEN (pat, diff_vec_p);
+ rtx label;
+
+ for (i = 0; i < len; i++)
+ {
+ label = XEXP (XVECEXP (pat, diff_vec_p, i), 0);
+ map->label_map[CODE_LABEL_NUMBER (label)] = label;
+ }
+ }
+ }
+ }
+
+ /* Allocate space for the insn map. */
+
+ map->insn_map = (rtx *) alloca (max_insnno * sizeof (rtx));
+
+ /* Set this to zero, to indicate that we are doing loop unrolling,
+ not function inlining. */
+ map->inline_target = 0;
+
+ /* The register and constant maps depend on the number of registers
+ present, so the final maps can't be created until after
+ find_splittable_regs is called. However, they are needed for
+ preconditioning, so we create temporary maps when preconditioning
+ is performed. */
+
+ /* The preconditioning code may allocate two new pseudo registers. */
+ maxregnum = max_reg_num ();
+
+ /* Allocate and zero out the splittable_regs and addr_combined_regs
+ arrays. These must be zeroed here because they will be used if
+ loop preconditioning is performed, and must be zero for that case.
+
+ It is safe to do this here, since the extra registers created by the
+ preconditioning code and find_splittable_regs will never be used
+ to access the splittable_regs[] and addr_combined_regs[] arrays. */
+
+ splittable_regs = (rtx *) alloca (maxregnum * sizeof (rtx));
+ bzero ((char *) splittable_regs, maxregnum * sizeof (rtx));
+ splittable_regs_updates = (int *) alloca (maxregnum * sizeof (int));
+ bzero ((char *) splittable_regs_updates, maxregnum * sizeof (int));
+ addr_combined_regs
+ = (struct induction **) alloca (maxregnum * sizeof (struct induction *));
+ bzero ((char *) addr_combined_regs, maxregnum * sizeof (struct induction *));
+
+ /* If this loop requires exit tests when unrolled, check to see if we
+ can precondition the loop so as to make the exit tests unnecessary.
+ Just like variable splitting, this is not safe if the loop is entered
+ via a jump to the bottom. Also, can not do this if no strength
+ reduce info, because precondition_loop_p uses this info. */
+
+ /* Must copy the loop body for preconditioning before the following
+ find_splittable_regs call since that will emit insns which need to
+ be after the preconditioned loop copies, but immediately before the
+ unrolled loop copies. */
+
+ /* Also, it is not safe to split induction variables for the preconditioned
+ copies of the loop body. If we split induction variables, then the code
+ assumes that each induction variable can be represented as a function
+ of its initial value and the loop iteration number. This is not true
+ in this case, because the last preconditioned copy of the loop body
+ could be any iteration from the first up to the `unroll_number-1'th,
+ depending on the initial value of the iteration variable. Therefore
+ we can not split induction variables here, because we can not calculate
+ their value. Hence, this code must occur before find_splittable_regs
+ is called. */
+
+ if (unroll_type == UNROLL_NAIVE && ! splitting_not_safe && strength_reduce_p)
+ {
+ rtx initial_value, final_value, increment;
+
+ if (precondition_loop_p (&initial_value, &final_value, &increment,
+ loop_start, loop_end))
+ {
+ register rtx diff, temp;
+ enum machine_mode mode;
+ rtx *labels;
+ int abs_inc, neg_inc;
+
+ map->reg_map = (rtx *) alloca (maxregnum * sizeof (rtx));
+
+ map->const_equiv_map = (rtx *) alloca (maxregnum * sizeof (rtx));
+ map->const_age_map = (unsigned *) alloca (maxregnum
+ * sizeof (unsigned));
+ map->const_equiv_map_size = maxregnum;
+ global_const_equiv_map = map->const_equiv_map;
+ global_const_equiv_map_size = maxregnum;
+
+ init_reg_map (map, maxregnum);
+
+ /* Limit loop unrolling to 4, since this will make 7 copies of
+ the loop body. */
+ if (unroll_number > 4)
+ unroll_number = 4;
+
+ /* Save the absolute value of the increment, and also whether or
+ not it is negative. */
+ neg_inc = 0;
+ abs_inc = INTVAL (increment);
+ if (abs_inc < 0)
+ {
+ abs_inc = - abs_inc;
+ neg_inc = 1;
+ }
+
+ start_sequence ();
+
+ /* Decide what mode to do these calculations in. Choose the larger
+ of final_value's mode and initial_value's mode, or a full-word if
+ both are constants. */
+ mode = GET_MODE (final_value);
+ if (mode == VOIDmode)
+ {
+ mode = GET_MODE (initial_value);
+ if (mode == VOIDmode)
+ mode = word_mode;
+ }
+ else if (mode != GET_MODE (initial_value)
+ && (GET_MODE_SIZE (mode)
+ < GET_MODE_SIZE (GET_MODE (initial_value))))
+ mode = GET_MODE (initial_value);
+
+ /* Calculate the difference between the final and initial values.
+ Final value may be a (plus (reg x) (const_int 1)) rtx.
+ Let the following cse pass simplify this if initial value is
+ a constant.
+
+ We must copy the final and initial values here to avoid
+ improperly shared rtl. */
+
+ diff = expand_binop (mode, sub_optab, copy_rtx (final_value),
+ copy_rtx (initial_value), NULL_RTX, 0,
+ OPTAB_LIB_WIDEN);
+
+ /* Now calculate (diff % (unroll * abs (increment))) by using an
+ and instruction. */
+ diff = expand_binop (GET_MODE (diff), and_optab, diff,
+ GEN_INT (unroll_number * abs_inc - 1),
+ NULL_RTX, 0, OPTAB_LIB_WIDEN);
+
+ /* Now emit a sequence of branches to jump to the proper precond
+ loop entry point. */
+
+ labels = (rtx *) alloca (sizeof (rtx) * unroll_number);
+ for (i = 0; i < unroll_number; i++)
+ labels[i] = gen_label_rtx ();
+
+ /* Assuming the unroll_number is 4, and the increment is 2, then
+ for a negative increment: for a positive increment:
+ diff = 0,1 precond 0 diff = 0,7 precond 0
+ diff = 2,3 precond 3 diff = 1,2 precond 1
+ diff = 4,5 precond 2 diff = 3,4 precond 2
+ diff = 6,7 precond 1 diff = 5,6 precond 3 */
+
+ /* We only need to emit (unroll_number - 1) branches here, the
+ last case just falls through to the following code. */
+
+ /* ??? This would give better code if we emitted a tree of branches
+ instead of the current linear list of branches. */
+
+ for (i = 0; i < unroll_number - 1; i++)
+ {
+ int cmp_const;
+
+ /* For negative increments, must invert the constant compared
+ against, except when comparing against zero. */
+ if (i == 0)
+ cmp_const = 0;
+ else if (neg_inc)
+ cmp_const = unroll_number - i;
+ else
+ cmp_const = i;
+
+ emit_cmp_insn (diff, GEN_INT (abs_inc * cmp_const),
+ EQ, NULL_RTX, mode, 0, 0);
+
+ if (i == 0)
+ emit_jump_insn (gen_beq (labels[i]));
+ else if (neg_inc)
+ emit_jump_insn (gen_bge (labels[i]));
+ else
+ emit_jump_insn (gen_ble (labels[i]));
+ JUMP_LABEL (get_last_insn ()) = labels[i];
+ LABEL_NUSES (labels[i])++;
+ }
+
+ /* If the increment is greater than one, then we need another branch,
+ to handle other cases equivalent to 0. */
+
+ /* ??? This should be merged into the code above somehow to help
+ simplify the code here, and reduce the number of branches emitted.
+ For the negative increment case, the branch here could easily
+ be merged with the `0' case branch above. For the positive
+ increment case, it is not clear how this can be simplified. */
+
+ if (abs_inc != 1)
+ {
+ int cmp_const;
+
+ if (neg_inc)
+ cmp_const = abs_inc - 1;
+ else
+ cmp_const = abs_inc * (unroll_number - 1) + 1;
+
+ emit_cmp_insn (diff, GEN_INT (cmp_const), EQ, NULL_RTX,
+ mode, 0, 0);
+
+ if (neg_inc)
+ emit_jump_insn (gen_ble (labels[0]));
+ else
+ emit_jump_insn (gen_bge (labels[0]));
+ JUMP_LABEL (get_last_insn ()) = labels[0];
+ LABEL_NUSES (labels[0])++;
+ }
+
+ sequence = gen_sequence ();
+ end_sequence ();
+ emit_insn_before (sequence, loop_start);
+
+ /* Only the last copy of the loop body here needs the exit
+ test, so set copy_end to exclude the compare/branch here,
+ and then reset it inside the loop when get to the last
+ copy. */
+
+ if (GET_CODE (last_loop_insn) == BARRIER)
+ copy_end = PREV_INSN (PREV_INSN (last_loop_insn));
+ else if (GET_CODE (last_loop_insn) == JUMP_INSN)
+ {
+#ifdef HAVE_cc0
+ /* The immediately preceding insn is a compare which we do not
+ want to copy. */
+ copy_end = PREV_INSN (PREV_INSN (last_loop_insn));
+#else
+ /* The immediately preceding insn may not be a compare, so we
+ must copy it. */
+ copy_end = PREV_INSN (last_loop_insn);
+#endif
+ }
+ else
+ abort ();
+
+ for (i = 1; i < unroll_number; i++)
+ {
+ emit_label_after (labels[unroll_number - i],
+ PREV_INSN (loop_start));
+
+ bzero ((char *) map->insn_map, max_insnno * sizeof (rtx));
+ bzero ((char *) map->const_equiv_map, maxregnum * sizeof (rtx));
+ bzero ((char *) map->const_age_map,
+ maxregnum * sizeof (unsigned));
+ map->const_age = 0;
+
+ for (j = 0; j < max_labelno; j++)
+ if (local_label[j])
+ map->label_map[j] = gen_label_rtx ();
+
+ /* The last copy needs the compare/branch insns at the end,
+ so reset copy_end here if the loop ends with a conditional
+ branch. */
+
+ if (i == unroll_number - 1)
+ {
+ if (GET_CODE (last_loop_insn) == BARRIER)
+ copy_end = PREV_INSN (PREV_INSN (last_loop_insn));
+ else
+ copy_end = last_loop_insn;
+ }
+
+ /* None of the copies are the `last_iteration', so just
+ pass zero for that parameter. */
+ copy_loop_body (copy_start, copy_end, map, exit_label, 0,
+ unroll_type, start_label, loop_end,
+ loop_start, copy_end);
+ }
+ emit_label_after (labels[0], PREV_INSN (loop_start));
+
+ if (GET_CODE (last_loop_insn) == BARRIER)
+ {
+ insert_before = PREV_INSN (last_loop_insn);
+ copy_end = PREV_INSN (insert_before);
+ }
+ else
+ {
+#ifdef HAVE_cc0
+ /* The immediately preceding insn is a compare which we do not
+ want to copy. */
+ insert_before = PREV_INSN (last_loop_insn);
+ copy_end = PREV_INSN (insert_before);
+#else
+ /* The immediately preceding insn may not be a compare, so we
+ must copy it. */
+ insert_before = last_loop_insn;
+ copy_end = PREV_INSN (last_loop_insn);
+#endif
+ }
+
+ /* Set unroll type to MODULO now. */
+ unroll_type = UNROLL_MODULO;
+ loop_preconditioned = 1;
+ }
+ }
+
+ /* If reach here, and the loop type is UNROLL_NAIVE, then don't unroll
+ the loop unless all loops are being unrolled. */
+ if (unroll_type == UNROLL_NAIVE && ! flag_unroll_all_loops)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, "Unrolling failure: Naive unrolling not being done.\n");
+ return;
+ }
+
+ /* At this point, we are guaranteed to unroll the loop. */
+
+ /* For each biv and giv, determine whether it can be safely split into
+ a different variable for each unrolled copy of the loop body.
+ We precalculate and save this info here, since computing it is
+ expensive.
+
+ Do this before deleting any instructions from the loop, so that
+ back_branch_in_range_p will work correctly. */
+
+ if (splitting_not_safe)
+ temp = 0;
+ else
+ temp = find_splittable_regs (unroll_type, loop_start, loop_end,
+ end_insert_before, unroll_number);
+
+ /* find_splittable_regs may have created some new registers, so must
+ reallocate the reg_map with the new larger size, and must realloc
+ the constant maps also. */
+
+ maxregnum = max_reg_num ();
+ map->reg_map = (rtx *) alloca (maxregnum * sizeof (rtx));
+
+ init_reg_map (map, maxregnum);
+
+ /* Space is needed in some of the map for new registers, so new_maxregnum
+ is an (over)estimate of how many registers will exist at the end. */
+ new_maxregnum = maxregnum + (temp * unroll_number * 2);
+
+ /* Must realloc space for the constant maps, because the number of registers
+ may have changed. */
+
+ map->const_equiv_map = (rtx *) alloca (new_maxregnum * sizeof (rtx));
+ map->const_age_map = (unsigned *) alloca (new_maxregnum * sizeof (unsigned));
+
+ map->const_equiv_map_size = new_maxregnum;
+ global_const_equiv_map = map->const_equiv_map;
+ global_const_equiv_map_size = new_maxregnum;
+
+ /* Search the list of bivs and givs to find ones which need to be remapped
+ when split, and set their reg_map entry appropriately. */
+
+ for (bl = loop_iv_list; bl; bl = bl->next)
+ {
+ if (REGNO (bl->biv->src_reg) != bl->regno)
+ map->reg_map[bl->regno] = bl->biv->src_reg;
+#if 0
+ /* Currently, non-reduced/final-value givs are never split. */
+ for (v = bl->giv; v; v = v->next_iv)
+ if (REGNO (v->src_reg) != bl->regno)
+ map->reg_map[REGNO (v->dest_reg)] = v->src_reg;
+#endif
+ }
+
+ /* If the loop is being partially unrolled, and the iteration variables
+ are being split, and are being renamed for the split, then must fix up
+ the compare/jump instruction at the end of the loop to refer to the new
+ registers. This compare isn't copied, so the registers used in it
+ will never be replaced if it isn't done here. */
+
+ if (unroll_type == UNROLL_MODULO)
+ {
+ insn = NEXT_INSN (copy_end);
+ if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
+ PATTERN (insn) = remap_split_bivs (PATTERN (insn));
+ }
+
+ /* For unroll_number - 1 times, make a copy of each instruction
+ between copy_start and copy_end, and insert these new instructions
+ before the end of the loop. */
+
+ for (i = 0; i < unroll_number; i++)
+ {
+ bzero ((char *) map->insn_map, max_insnno * sizeof (rtx));
+ bzero ((char *) map->const_equiv_map, new_maxregnum * sizeof (rtx));
+ bzero ((char *) map->const_age_map, new_maxregnum * sizeof (unsigned));
+ map->const_age = 0;
+
+ for (j = 0; j < max_labelno; j++)
+ if (local_label[j])
+ map->label_map[j] = gen_label_rtx ();
+
+ /* If loop starts with a branch to the test, then fix it so that
+ it points to the test of the first unrolled copy of the loop. */
+ if (i == 0 && loop_start != copy_start)
+ {
+ insn = PREV_INSN (copy_start);
+ pattern = PATTERN (insn);
+
+ tem = map->label_map[CODE_LABEL_NUMBER
+ (XEXP (SET_SRC (pattern), 0))];
+ SET_SRC (pattern) = gen_rtx (LABEL_REF, VOIDmode, tem);
+
+ /* Set the jump label so that it can be used by later loop unrolling
+ passes. */
+ JUMP_LABEL (insn) = tem;
+ LABEL_NUSES (tem)++;
+ }
+
+ copy_loop_body (copy_start, copy_end, map, exit_label,
+ i == unroll_number - 1, unroll_type, start_label,
+ loop_end, insert_before, insert_before);
+ }
+
+ /* Before deleting any insns, emit a CODE_LABEL immediately after the last
+ insn to be deleted. This prevents any runaway delete_insn call from
+ more insns that it should, as it always stops at a CODE_LABEL. */
+
+ /* Delete the compare and branch at the end of the loop if completely
+ unrolling the loop. Deleting the backward branch at the end also
+ deletes the code label at the start of the loop. This is done at
+ the very end to avoid problems with back_branch_in_range_p. */
+
+ if (unroll_type == UNROLL_COMPLETELY)
+ safety_label = emit_label_after (gen_label_rtx (), last_loop_insn);
+ else
+ safety_label = emit_label_after (gen_label_rtx (), copy_end);
+
+ /* Delete all of the original loop instructions. Don't delete the
+ LOOP_BEG note, or the first code label in the loop. */
+
+ insn = NEXT_INSN (copy_start);
+ while (insn != safety_label)
+ {
+ if (insn != start_label)
+ insn = delete_insn (insn);
+ else
+ insn = NEXT_INSN (insn);
+ }
+
+ /* Can now delete the 'safety' label emitted to protect us from runaway
+ delete_insn calls. */
+ if (INSN_DELETED_P (safety_label))
+ abort ();
+ delete_insn (safety_label);
+
+ /* If exit_label exists, emit it after the loop. Doing the emit here
+ forces it to have a higher INSN_UID than any insn in the unrolled loop.
+ This is needed so that mostly_true_jump in reorg.c will treat jumps
+ to this loop end label correctly, i.e. predict that they are usually
+ not taken. */
+ if (exit_label)
+ emit_label_after (exit_label, loop_end);
+}
+
+/* Return true if the loop can be safely, and profitably, preconditioned
+ so that the unrolled copies of the loop body don't need exit tests.
+
+ This only works if final_value, initial_value and increment can be
+ determined, and if increment is a constant power of 2.
+ If increment is not a power of 2, then the preconditioning modulo
+ operation would require a real modulo instead of a boolean AND, and this
+ is not considered `profitable'. */
+
+/* ??? If the loop is known to be executed very many times, or the machine
+ has a very cheap divide instruction, then preconditioning is a win even
+ when the increment is not a power of 2. Use RTX_COST to compute
+ whether divide is cheap. */
+
+static int
+precondition_loop_p (initial_value, final_value, increment, loop_start,
+ loop_end)
+ rtx *initial_value, *final_value, *increment;
+ rtx loop_start, loop_end;
+{
+
+ if (loop_n_iterations > 0)
+ {
+ *initial_value = const0_rtx;
+ *increment = const1_rtx;
+ *final_value = GEN_INT (loop_n_iterations);
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Preconditioning: Success, number of iterations known, %d.\n",
+ loop_n_iterations);
+ return 1;
+ }
+
+ if (loop_initial_value == 0)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Preconditioning: Could not find initial value.\n");
+ return 0;
+ }
+ else if (loop_increment == 0)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Preconditioning: Could not find increment value.\n");
+ return 0;
+ }
+ else if (GET_CODE (loop_increment) != CONST_INT)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Preconditioning: Increment not a constant.\n");
+ return 0;
+ }
+ else if ((exact_log2 (INTVAL (loop_increment)) < 0)
+ && (exact_log2 (- INTVAL (loop_increment)) < 0))
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Preconditioning: Increment not a constant power of 2.\n");
+ return 0;
+ }
+
+ /* Unsigned_compare and compare_dir can be ignored here, since they do
+ not matter for preconditioning. */
+
+ if (loop_final_value == 0)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Preconditioning: EQ comparison loop.\n");
+ return 0;
+ }
+
+ /* Must ensure that final_value is invariant, so call invariant_p to
+ check. Before doing so, must check regno against max_reg_before_loop
+ to make sure that the register is in the range covered by invariant_p.
+ If it isn't, then it is most likely a biv/giv which by definition are
+ not invariant. */
+ if ((GET_CODE (loop_final_value) == REG
+ && REGNO (loop_final_value) >= max_reg_before_loop)
+ || (GET_CODE (loop_final_value) == PLUS
+ && REGNO (XEXP (loop_final_value, 0)) >= max_reg_before_loop)
+ || ! invariant_p (loop_final_value))
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Preconditioning: Final value not invariant.\n");
+ return 0;
+ }
+
+ /* Fail for floating point values, since the caller of this function
+ does not have code to deal with them. */
+ if (GET_MODE_CLASS (GET_MODE (loop_final_value)) == MODE_FLOAT
+ || GET_MODE_CLASS (GET_MODE (loop_initial_value)) == MODE_FLOAT)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Preconditioning: Floating point final or initial value.\n");
+ return 0;
+ }
+
+ /* Now set initial_value to be the iteration_var, since that may be a
+ simpler expression, and is guaranteed to be correct if all of the
+ above tests succeed.
+
+ We can not use the initial_value as calculated, because it will be
+ one too small for loops of the form "while (i-- > 0)". We can not
+ emit code before the loop_skip_over insns to fix this problem as this
+ will then give a number one too large for loops of the form
+ "while (--i > 0)".
+
+ Note that all loops that reach here are entered at the top, because
+ this function is not called if the loop starts with a jump. */
+
+ /* Fail if loop_iteration_var is not live before loop_start, since we need
+ to test its value in the preconditioning code. */
+
+ if (uid_luid[regno_first_uid[REGNO (loop_iteration_var)]]
+ > INSN_LUID (loop_start))
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Preconditioning: Iteration var not live before loop start.\n");
+ return 0;
+ }
+
+ *initial_value = loop_iteration_var;
+ *increment = loop_increment;
+ *final_value = loop_final_value;
+
+ /* Success! */
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, "Preconditioning: Successful.\n");
+ return 1;
+}
+
+
+/* All pseudo-registers must be mapped to themselves. Two hard registers
+ must be mapped, VIRTUAL_STACK_VARS_REGNUM and VIRTUAL_INCOMING_ARGS_
+ REGNUM, to avoid function-inlining specific conversions of these
+ registers. All other hard regs can not be mapped because they may be
+ used with different
+ modes. */
+
+static void
+init_reg_map (map, maxregnum)
+ struct inline_remap *map;
+ int maxregnum;
+{
+ int i;
+
+ for (i = maxregnum - 1; i > LAST_VIRTUAL_REGISTER; i--)
+ map->reg_map[i] = regno_reg_rtx[i];
+ /* Just clear the rest of the entries. */
+ for (i = LAST_VIRTUAL_REGISTER; i >= 0; i--)
+ map->reg_map[i] = 0;
+
+ map->reg_map[VIRTUAL_STACK_VARS_REGNUM]
+ = regno_reg_rtx[VIRTUAL_STACK_VARS_REGNUM];
+ map->reg_map[VIRTUAL_INCOMING_ARGS_REGNUM]
+ = regno_reg_rtx[VIRTUAL_INCOMING_ARGS_REGNUM];
+}
+
+/* Strength-reduction will often emit code for optimized biv/givs which
+ calculates their value in a temporary register, and then copies the result
+ to the iv. This procedure reconstructs the pattern computing the iv;
+ verifying that all operands are of the proper form.
+
+ The return value is the amount that the giv is incremented by. */
+
+static rtx
+calculate_giv_inc (pattern, src_insn, regno)
+ rtx pattern, src_insn;
+ int regno;
+{
+ rtx increment;
+ rtx increment_total = 0;
+ int tries = 0;
+
+ retry:
+ /* Verify that we have an increment insn here. First check for a plus
+ as the set source. */
+ if (GET_CODE (SET_SRC (pattern)) != PLUS)
+ {
+ /* SR sometimes computes the new giv value in a temp, then copies it
+ to the new_reg. */
+ src_insn = PREV_INSN (src_insn);
+ pattern = PATTERN (src_insn);
+ if (GET_CODE (SET_SRC (pattern)) != PLUS)
+ abort ();
+
+ /* The last insn emitted is not needed, so delete it to avoid confusing
+ the second cse pass. This insn sets the giv unnecessarily. */
+ delete_insn (get_last_insn ());
+ }
+
+ /* Verify that we have a constant as the second operand of the plus. */
+ increment = XEXP (SET_SRC (pattern), 1);
+ if (GET_CODE (increment) != CONST_INT)
+ {
+ /* SR sometimes puts the constant in a register, especially if it is
+ too big to be an add immed operand. */
+ src_insn = PREV_INSN (src_insn);
+ increment = SET_SRC (PATTERN (src_insn));
+
+ /* SR may have used LO_SUM to compute the constant if it is too large
+ for a load immed operand. In this case, the constant is in operand
+ one of the LO_SUM rtx. */
+ if (GET_CODE (increment) == LO_SUM)
+ increment = XEXP (increment, 1);
+
+ if (GET_CODE (increment) != CONST_INT)
+ abort ();
+
+ /* The insn loading the constant into a register is not longer needed,
+ so delete it. */
+ delete_insn (get_last_insn ());
+ }
+
+ if (increment_total)
+ increment_total = GEN_INT (INTVAL (increment_total) + INTVAL (increment));
+ else
+ increment_total = increment;
+
+ /* Check that the source register is the same as the register we expected
+ to see as the source. If not, something is seriously wrong. */
+ if (GET_CODE (XEXP (SET_SRC (pattern), 0)) != REG
+ || REGNO (XEXP (SET_SRC (pattern), 0)) != regno)
+ {
+ /* Some machines (e.g. the romp), may emit two add instructions for
+ certain constants, so lets try looking for another add immediately
+ before this one if we have only seen one add insn so far. */
+
+ if (tries == 0)
+ {
+ tries++;
+
+ src_insn = PREV_INSN (src_insn);
+ pattern = PATTERN (src_insn);
+
+ delete_insn (get_last_insn ());
+
+ goto retry;
+ }
+
+ abort ();
+ }
+
+ return increment_total;
+}
+
+/* Copy REG_NOTES, except for insn references, because not all insn_map
+ entries are valid yet. We do need to copy registers now though, because
+ the reg_map entries can change during copying. */
+
+static rtx
+initial_reg_note_copy (notes, map)
+ rtx notes;
+ struct inline_remap *map;
+{
+ rtx copy;
+
+ if (notes == 0)
+ return 0;
+
+ copy = rtx_alloc (GET_CODE (notes));
+ PUT_MODE (copy, GET_MODE (notes));
+
+ if (GET_CODE (notes) == EXPR_LIST)
+ XEXP (copy, 0) = copy_rtx_and_substitute (XEXP (notes, 0), map);
+ else if (GET_CODE (notes) == INSN_LIST)
+ /* Don't substitute for these yet. */
+ XEXP (copy, 0) = XEXP (notes, 0);
+ else
+ abort ();
+
+ XEXP (copy, 1) = initial_reg_note_copy (XEXP (notes, 1), map);
+
+ return copy;
+}
+
+/* Fixup insn references in copied REG_NOTES. */
+
+static void
+final_reg_note_copy (notes, map)
+ rtx notes;
+ struct inline_remap *map;
+{
+ rtx note;
+
+ for (note = notes; note; note = XEXP (note, 1))
+ if (GET_CODE (note) == INSN_LIST)
+ XEXP (note, 0) = map->insn_map[INSN_UID (XEXP (note, 0))];
+}
+
+/* Copy each instruction in the loop, substituting from map as appropriate.
+ This is very similar to a loop in expand_inline_function. */
+
+static void
+copy_loop_body (copy_start, copy_end, map, exit_label, last_iteration,
+ unroll_type, start_label, loop_end, insert_before,
+ copy_notes_from)
+ rtx copy_start, copy_end;
+ struct inline_remap *map;
+ rtx exit_label;
+ int last_iteration;
+ enum unroll_types unroll_type;
+ rtx start_label, loop_end, insert_before, copy_notes_from;
+{
+ rtx insn, pattern;
+ rtx tem, copy;
+ int dest_reg_was_split, i;
+ rtx cc0_insn = 0;
+ rtx final_label = 0;
+ rtx giv_inc, giv_dest_reg, giv_src_reg;
+
+ /* If this isn't the last iteration, then map any references to the
+ start_label to final_label. Final label will then be emitted immediately
+ after the end of this loop body if it was ever used.
+
+ If this is the last iteration, then map references to the start_label
+ to itself. */
+ if (! last_iteration)
+ {
+ final_label = gen_label_rtx ();
+ map->label_map[CODE_LABEL_NUMBER (start_label)] = final_label;
+ }
+ else
+ map->label_map[CODE_LABEL_NUMBER (start_label)] = start_label;
+
+ start_sequence ();
+
+ insn = copy_start;
+ do
+ {
+ insn = NEXT_INSN (insn);
+
+ map->orig_asm_operands_vector = 0;
+
+ switch (GET_CODE (insn))
+ {
+ case INSN:
+ pattern = PATTERN (insn);
+ copy = 0;
+ giv_inc = 0;
+
+ /* Check to see if this is a giv that has been combined with
+ some split address givs. (Combined in the sense that
+ `combine_givs' in loop.c has put two givs in the same register.)
+ In this case, we must search all givs based on the same biv to
+ find the address givs. Then split the address givs.
+ Do this before splitting the giv, since that may map the
+ SET_DEST to a new register. */
+
+ if (GET_CODE (pattern) == SET
+ && GET_CODE (SET_DEST (pattern)) == REG
+ && addr_combined_regs[REGNO (SET_DEST (pattern))])
+ {
+ struct iv_class *bl;
+ struct induction *v, *tv;
+ int regno = REGNO (SET_DEST (pattern));
+
+ v = addr_combined_regs[REGNO (SET_DEST (pattern))];
+ bl = reg_biv_class[REGNO (v->src_reg)];
+
+ /* Although the giv_inc amount is not needed here, we must call
+ calculate_giv_inc here since it might try to delete the
+ last insn emitted. If we wait until later to call it,
+ we might accidentally delete insns generated immediately
+ below by emit_unrolled_add. */
+
+ giv_inc = calculate_giv_inc (pattern, insn, regno);
+
+ /* Now find all address giv's that were combined with this
+ giv 'v'. */
+ for (tv = bl->giv; tv; tv = tv->next_iv)
+ if (tv->giv_type == DEST_ADDR && tv->same == v)
+ {
+ int this_giv_inc = INTVAL (giv_inc);
+
+ /* Scale this_giv_inc if the multiplicative factors of
+ the two givs are different. */
+ if (tv->mult_val != v->mult_val)
+ this_giv_inc = (this_giv_inc / INTVAL (v->mult_val)
+ * INTVAL (tv->mult_val));
+
+ tv->dest_reg = plus_constant (tv->dest_reg, this_giv_inc);
+ *tv->location = tv->dest_reg;
+
+ if (last_iteration && unroll_type != UNROLL_COMPLETELY)
+ {
+ /* Must emit an insn to increment the split address
+ giv. Add in the const_adjust field in case there
+ was a constant eliminated from the address. */
+ rtx value, dest_reg;
+
+ /* tv->dest_reg will be either a bare register,
+ or else a register plus a constant. */
+ if (GET_CODE (tv->dest_reg) == REG)
+ dest_reg = tv->dest_reg;
+ else
+ dest_reg = XEXP (tv->dest_reg, 0);
+
+ /* Check for shared address givs, and avoid
+ incrementing the shared psuedo reg more than
+ once. */
+ if (! (tv != v && tv->insn == v->insn
+ && tv->new_reg == v->new_reg))
+ {
+ /* tv->dest_reg may actually be a (PLUS (REG)
+ (CONST)) here, so we must call plus_constant
+ to add the const_adjust amount before calling
+ emit_unrolled_add below. */
+ value = plus_constant (tv->dest_reg,
+ tv->const_adjust);
+
+ /* The constant could be too large for an add
+ immediate, so can't directly emit an insn
+ here. */
+ emit_unrolled_add (dest_reg, XEXP (value, 0),
+ XEXP (value, 1));
+ }
+
+ /* Reset the giv to be just the register again, in case
+ it is used after the set we have just emitted.
+ We must subtract the const_adjust factor added in
+ above. */
+ tv->dest_reg = plus_constant (dest_reg,
+ - tv->const_adjust);
+ *tv->location = tv->dest_reg;
+ }
+ }
+ }
+
+ /* If this is a setting of a splittable variable, then determine
+ how to split the variable, create a new set based on this split,
+ and set up the reg_map so that later uses of the variable will
+ use the new split variable. */
+
+ dest_reg_was_split = 0;
+
+ if (GET_CODE (pattern) == SET
+ && GET_CODE (SET_DEST (pattern)) == REG
+ && splittable_regs[REGNO (SET_DEST (pattern))])
+ {
+ int regno = REGNO (SET_DEST (pattern));
+
+ dest_reg_was_split = 1;
+
+ /* Compute the increment value for the giv, if it wasn't
+ already computed above. */
+
+ if (giv_inc == 0)
+ giv_inc = calculate_giv_inc (pattern, insn, regno);
+ giv_dest_reg = SET_DEST (pattern);
+ giv_src_reg = SET_DEST (pattern);
+
+ if (unroll_type == UNROLL_COMPLETELY)
+ {
+ /* Completely unrolling the loop. Set the induction
+ variable to a known constant value. */
+
+ /* The value in splittable_regs may be an invariant
+ value, so we must use plus_constant here. */
+ splittable_regs[regno]
+ = plus_constant (splittable_regs[regno], INTVAL (giv_inc));
+
+ if (GET_CODE (splittable_regs[regno]) == PLUS)
+ {
+ giv_src_reg = XEXP (splittable_regs[regno], 0);
+ giv_inc = XEXP (splittable_regs[regno], 1);
+ }
+ else
+ {
+ /* The splittable_regs value must be a REG or a
+ CONST_INT, so put the entire value in the giv_src_reg
+ variable. */
+ giv_src_reg = splittable_regs[regno];
+ giv_inc = const0_rtx;
+ }
+ }
+ else
+ {
+ /* Partially unrolling loop. Create a new pseudo
+ register for the iteration variable, and set it to
+ be a constant plus the original register. Except
+ on the last iteration, when the result has to
+ go back into the original iteration var register. */
+
+ /* Handle bivs which must be mapped to a new register
+ when split. This happens for bivs which need their
+ final value set before loop entry. The new register
+ for the biv was stored in the biv's first struct
+ induction entry by find_splittable_regs. */
+
+ if (regno < max_reg_before_loop
+ && reg_iv_type[regno] == BASIC_INDUCT)
+ {
+ giv_src_reg = reg_biv_class[regno]->biv->src_reg;
+ giv_dest_reg = giv_src_reg;
+ }
+
+#if 0
+ /* If non-reduced/final-value givs were split, then
+ this would have to remap those givs also. See
+ find_splittable_regs. */
+#endif
+
+ splittable_regs[regno]
+ = GEN_INT (INTVAL (giv_inc)
+ + INTVAL (splittable_regs[regno]));
+ giv_inc = splittable_regs[regno];
+
+ /* Now split the induction variable by changing the dest
+ of this insn to a new register, and setting its
+ reg_map entry to point to this new register.
+
+ If this is the last iteration, and this is the last insn
+ that will update the iv, then reuse the original dest,
+ to ensure that the iv will have the proper value when
+ the loop exits or repeats.
+
+ Using splittable_regs_updates here like this is safe,
+ because it can only be greater than one if all
+ instructions modifying the iv are always executed in
+ order. */
+
+ if (! last_iteration
+ || (splittable_regs_updates[regno]-- != 1))
+ {
+ tem = gen_reg_rtx (GET_MODE (giv_src_reg));
+ giv_dest_reg = tem;
+ map->reg_map[regno] = tem;
+ }
+ else
+ map->reg_map[regno] = giv_src_reg;
+ }
+
+ /* The constant being added could be too large for an add
+ immediate, so can't directly emit an insn here. */
+ emit_unrolled_add (giv_dest_reg, giv_src_reg, giv_inc);
+ copy = get_last_insn ();
+ pattern = PATTERN (copy);
+ }
+ else
+ {
+ pattern = copy_rtx_and_substitute (pattern, map);
+ copy = emit_insn (pattern);
+ }
+ REG_NOTES (copy) = initial_reg_note_copy (REG_NOTES (insn), map);
+
+#ifdef HAVE_cc0
+ /* If this insn is setting CC0, it may need to look at
+ the insn that uses CC0 to see what type of insn it is.
+ In that case, the call to recog via validate_change will
+ fail. So don't substitute constants here. Instead,
+ do it when we emit the following insn.
+
+ For example, see the pyr.md file. That machine has signed and
+ unsigned compares. The compare patterns must check the
+ following branch insn to see which what kind of compare to
+ emit.
+
+ If the previous insn set CC0, substitute constants on it as
+ well. */
+ if (sets_cc0_p (copy) != 0)
+ cc0_insn = copy;
+ else
+ {
+ if (cc0_insn)
+ try_constants (cc0_insn, map);
+ cc0_insn = 0;
+ try_constants (copy, map);
+ }
+#else
+ try_constants (copy, map);
+#endif
+
+ /* Make split induction variable constants `permanent' since we
+ know there are no backward branches across iteration variable
+ settings which would invalidate this. */
+ if (dest_reg_was_split)
+ {
+ int regno = REGNO (SET_DEST (pattern));
+
+ if (regno < map->const_equiv_map_size
+ && map->const_age_map[regno] == map->const_age)
+ map->const_age_map[regno] = -1;
+ }
+ break;
+
+ case JUMP_INSN:
+ pattern = copy_rtx_and_substitute (PATTERN (insn), map);
+ copy = emit_jump_insn (pattern);
+ REG_NOTES (copy) = initial_reg_note_copy (REG_NOTES (insn), map);
+
+ if (JUMP_LABEL (insn) == start_label && insn == copy_end
+ && ! last_iteration)
+ {
+ /* This is a branch to the beginning of the loop; this is the
+ last insn being copied; and this is not the last iteration.
+ In this case, we want to change the original fall through
+ case to be a branch past the end of the loop, and the
+ original jump label case to fall_through. */
+
+ if (! invert_exp (pattern, copy)
+ || ! redirect_exp (&pattern,
+ map->label_map[CODE_LABEL_NUMBER
+ (JUMP_LABEL (insn))],
+ exit_label, copy))
+ abort ();
+ }
+
+#ifdef HAVE_cc0
+ if (cc0_insn)
+ try_constants (cc0_insn, map);
+ cc0_insn = 0;
+#endif
+ try_constants (copy, map);
+
+ /* Set the jump label of COPY correctly to avoid problems with
+ later passes of unroll_loop, if INSN had jump label set. */
+ if (JUMP_LABEL (insn))
+ {
+ rtx label = 0;
+
+ /* Can't use the label_map for every insn, since this may be
+ the backward branch, and hence the label was not mapped. */
+ if (GET_CODE (pattern) == SET)
+ {
+ tem = SET_SRC (pattern);
+ if (GET_CODE (tem) == LABEL_REF)
+ label = XEXP (tem, 0);
+ else if (GET_CODE (tem) == IF_THEN_ELSE)
+ {
+ if (XEXP (tem, 1) != pc_rtx)
+ label = XEXP (XEXP (tem, 1), 0);
+ else
+ label = XEXP (XEXP (tem, 2), 0);
+ }
+ }
+
+ if (label && GET_CODE (label) == CODE_LABEL)
+ JUMP_LABEL (copy) = label;
+ else
+ {
+ /* An unrecognizable jump insn, probably the entry jump
+ for a switch statement. This label must have been mapped,
+ so just use the label_map to get the new jump label. */
+ JUMP_LABEL (copy) = map->label_map[CODE_LABEL_NUMBER
+ (JUMP_LABEL (insn))];
+ }
+
+ /* If this is a non-local jump, then must increase the label
+ use count so that the label will not be deleted when the
+ original jump is deleted. */
+ LABEL_NUSES (JUMP_LABEL (copy))++;
+ }
+ else if (GET_CODE (PATTERN (copy)) == ADDR_VEC
+ || GET_CODE (PATTERN (copy)) == ADDR_DIFF_VEC)
+ {
+ rtx pat = PATTERN (copy);
+ int diff_vec_p = GET_CODE (pat) == ADDR_DIFF_VEC;
+ int len = XVECLEN (pat, diff_vec_p);
+ int i;
+
+ for (i = 0; i < len; i++)
+ LABEL_NUSES (XEXP (XVECEXP (pat, diff_vec_p, i), 0))++;
+ }
+
+ /* If this used to be a conditional jump insn but whose branch
+ direction is now known, we must do something special. */
+ if (condjump_p (insn) && !simplejump_p (insn) && map->last_pc_value)
+ {
+#ifdef HAVE_cc0
+ /* The previous insn set cc0 for us. So delete it. */
+ delete_insn (PREV_INSN (copy));
+#endif
+
+ /* If this is now a no-op, delete it. */
+ if (map->last_pc_value == pc_rtx)
+ {
+ /* Don't let delete_insn delete the label referenced here,
+ because we might possibly need it later for some other
+ instruction in the loop. */
+ if (JUMP_LABEL (copy))
+ LABEL_NUSES (JUMP_LABEL (copy))++;
+ delete_insn (copy);
+ if (JUMP_LABEL (copy))
+ LABEL_NUSES (JUMP_LABEL (copy))--;
+ copy = 0;
+ }
+ else
+ /* Otherwise, this is unconditional jump so we must put a
+ BARRIER after it. We could do some dead code elimination
+ here, but jump.c will do it just as well. */
+ emit_barrier ();
+ }
+ break;
+
+ case CALL_INSN:
+ pattern = copy_rtx_and_substitute (PATTERN (insn), map);
+ copy = emit_call_insn (pattern);
+ REG_NOTES (copy) = initial_reg_note_copy (REG_NOTES (insn), map);
+
+ /* Because the USAGE information potentially contains objects other
+ than hard registers, we need to copy it. */
+ CALL_INSN_FUNCTION_USAGE (copy) =
+ copy_rtx_and_substitute (CALL_INSN_FUNCTION_USAGE (insn), map);
+
+#ifdef HAVE_cc0
+ if (cc0_insn)
+ try_constants (cc0_insn, map);
+ cc0_insn = 0;
+#endif
+ try_constants (copy, map);
+
+ /* Be lazy and assume CALL_INSNs clobber all hard registers. */
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ map->const_equiv_map[i] = 0;
+ break;
+
+ case CODE_LABEL:
+ /* If this is the loop start label, then we don't need to emit a
+ copy of this label since no one will use it. */
+
+ if (insn != start_label)
+ {
+ copy = emit_label (map->label_map[CODE_LABEL_NUMBER (insn)]);
+ map->const_age++;
+ }
+ break;
+
+ case BARRIER:
+ copy = emit_barrier ();
+ break;
+
+ case NOTE:
+ /* VTOP notes are valid only before the loop exit test. If placed
+ anywhere else, loop may generate bad code. */
+
+ if (NOTE_LINE_NUMBER (insn) != NOTE_INSN_DELETED
+ && (NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_VTOP
+ || (last_iteration && unroll_type != UNROLL_COMPLETELY)))
+ copy = emit_note (NOTE_SOURCE_FILE (insn),
+ NOTE_LINE_NUMBER (insn));
+ else
+ copy = 0;
+ break;
+
+ default:
+ abort ();
+ break;
+ }
+
+ map->insn_map[INSN_UID (insn)] = copy;
+ }
+ while (insn != copy_end);
+
+ /* Now finish coping the REG_NOTES. */
+ insn = copy_start;
+ do
+ {
+ insn = NEXT_INSN (insn);
+ if ((GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN
+ || GET_CODE (insn) == CALL_INSN)
+ && map->insn_map[INSN_UID (insn)])
+ final_reg_note_copy (REG_NOTES (map->insn_map[INSN_UID (insn)]), map);
+ }
+ while (insn != copy_end);
+
+ /* There may be notes between copy_notes_from and loop_end. Emit a copy of
+ each of these notes here, since there may be some important ones, such as
+ NOTE_INSN_BLOCK_END notes, in this group. We don't do this on the last
+ iteration, because the original notes won't be deleted.
+
+ We can't use insert_before here, because when from preconditioning,
+ insert_before points before the loop. We can't use copy_end, because
+ there may be insns already inserted after it (which we don't want to
+ copy) when not from preconditioning code. */
+
+ if (! last_iteration)
+ {
+ for (insn = copy_notes_from; insn != loop_end; insn = NEXT_INSN (insn))
+ {
+ if (GET_CODE (insn) == NOTE
+ && NOTE_LINE_NUMBER (insn) != NOTE_INSN_DELETED)
+ emit_note (NOTE_SOURCE_FILE (insn), NOTE_LINE_NUMBER (insn));
+ }
+ }
+
+ if (final_label && LABEL_NUSES (final_label) > 0)
+ emit_label (final_label);
+
+ tem = gen_sequence ();
+ end_sequence ();
+ emit_insn_before (tem, insert_before);
+}
+
+/* Emit an insn, using the expand_binop to ensure that a valid insn is
+ emitted. This will correctly handle the case where the increment value
+ won't fit in the immediate field of a PLUS insns. */
+
+void
+emit_unrolled_add (dest_reg, src_reg, increment)
+ rtx dest_reg, src_reg, increment;
+{
+ rtx result;
+
+ result = expand_binop (GET_MODE (dest_reg), add_optab, src_reg, increment,
+ dest_reg, 0, OPTAB_LIB_WIDEN);
+
+ if (dest_reg != result)
+ emit_move_insn (dest_reg, result);
+}
+
+/* Searches the insns between INSN and LOOP_END. Returns 1 if there
+ is a backward branch in that range that branches to somewhere between
+ LOOP_START and INSN. Returns 0 otherwise. */
+
+/* ??? This is quadratic algorithm. Could be rewritten to be linear.
+ In practice, this is not a problem, because this function is seldom called,
+ and uses a negligible amount of CPU time on average. */
+
+static int
+back_branch_in_range_p (insn, loop_start, loop_end)
+ rtx insn;
+ rtx loop_start, loop_end;
+{
+ rtx p, q, target_insn;
+
+ /* Stop before we get to the backward branch at the end of the loop. */
+ loop_end = prev_nonnote_insn (loop_end);
+ if (GET_CODE (loop_end) == BARRIER)
+ loop_end = PREV_INSN (loop_end);
+
+ /* Check in case insn has been deleted, search forward for first non
+ deleted insn following it. */
+ while (INSN_DELETED_P (insn))
+ insn = NEXT_INSN (insn);
+
+ /* Check for the case where insn is the last insn in the loop. */
+ if (insn == loop_end)
+ return 0;
+
+ for (p = NEXT_INSN (insn); p != loop_end; p = NEXT_INSN (p))
+ {
+ if (GET_CODE (p) == JUMP_INSN)
+ {
+ target_insn = JUMP_LABEL (p);
+
+ /* Search from loop_start to insn, to see if one of them is
+ the target_insn. We can't use INSN_LUID comparisons here,
+ since insn may not have an LUID entry. */
+ for (q = loop_start; q != insn; q = NEXT_INSN (q))
+ if (q == target_insn)
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/* Try to generate the simplest rtx for the expression
+ (PLUS (MULT mult1 mult2) add1). This is used to calculate the initial
+ value of giv's. */
+
+static rtx
+fold_rtx_mult_add (mult1, mult2, add1, mode)
+ rtx mult1, mult2, add1;
+ enum machine_mode mode;
+{
+ rtx temp, mult_res;
+ rtx result;
+
+ /* The modes must all be the same. This should always be true. For now,
+ check to make sure. */
+ if ((GET_MODE (mult1) != mode && GET_MODE (mult1) != VOIDmode)
+ || (GET_MODE (mult2) != mode && GET_MODE (mult2) != VOIDmode)
+ || (GET_MODE (add1) != mode && GET_MODE (add1) != VOIDmode))
+ abort ();
+
+ /* Ensure that if at least one of mult1/mult2 are constant, then mult2
+ will be a constant. */
+ if (GET_CODE (mult1) == CONST_INT)
+ {
+ temp = mult2;
+ mult2 = mult1;
+ mult1 = temp;
+ }
+
+ mult_res = simplify_binary_operation (MULT, mode, mult1, mult2);
+ if (! mult_res)
+ mult_res = gen_rtx (MULT, mode, mult1, mult2);
+
+ /* Again, put the constant second. */
+ if (GET_CODE (add1) == CONST_INT)
+ {
+ temp = add1;
+ add1 = mult_res;
+ mult_res = temp;
+ }
+
+ result = simplify_binary_operation (PLUS, mode, add1, mult_res);
+ if (! result)
+ result = gen_rtx (PLUS, mode, add1, mult_res);
+
+ return result;
+}
+
+/* Searches the list of induction struct's for the biv BL, to try to calculate
+ the total increment value for one iteration of the loop as a constant.
+
+ Returns the increment value as an rtx, simplified as much as possible,
+ if it can be calculated. Otherwise, returns 0. */
+
+rtx
+biv_total_increment (bl, loop_start, loop_end)
+ struct iv_class *bl;
+ rtx loop_start, loop_end;
+{
+ struct induction *v;
+ rtx result;
+
+ /* For increment, must check every instruction that sets it. Each
+ instruction must be executed only once each time through the loop.
+ To verify this, we check that the the insn is always executed, and that
+ there are no backward branches after the insn that branch to before it.
+ Also, the insn must have a mult_val of one (to make sure it really is
+ an increment). */
+
+ result = const0_rtx;
+ for (v = bl->biv; v; v = v->next_iv)
+ {
+ if (v->always_computable && v->mult_val == const1_rtx
+ && ! back_branch_in_range_p (v->insn, loop_start, loop_end))
+ result = fold_rtx_mult_add (result, const1_rtx, v->add_val, v->mode);
+ else
+ return 0;
+ }
+
+ return result;
+}
+
+/* Determine the initial value of the iteration variable, and the amount
+ that it is incremented each loop. Use the tables constructed by
+ the strength reduction pass to calculate these values.
+
+ Initial_value and/or increment are set to zero if their values could not
+ be calculated. */
+
+static void
+iteration_info (iteration_var, initial_value, increment, loop_start, loop_end)
+ rtx iteration_var, *initial_value, *increment;
+ rtx loop_start, loop_end;
+{
+ struct iv_class *bl;
+ struct induction *v, *b;
+
+ /* Clear the result values, in case no answer can be found. */
+ *initial_value = 0;
+ *increment = 0;
+
+ /* The iteration variable can be either a giv or a biv. Check to see
+ which it is, and compute the variable's initial value, and increment
+ value if possible. */
+
+ /* If this is a new register, can't handle it since we don't have any
+ reg_iv_type entry for it. */
+ if (REGNO (iteration_var) >= max_reg_before_loop)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Loop unrolling: No reg_iv_type entry for iteration var.\n");
+ return;
+ }
+ /* Reject iteration variables larger than the host long size, since they
+ could result in a number of iterations greater than the range of our
+ `unsigned long' variable loop_n_iterations. */
+ else if (GET_MODE_BITSIZE (GET_MODE (iteration_var)) > HOST_BITS_PER_LONG)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Loop unrolling: Iteration var rejected because mode larger than host long.\n");
+ return;
+ }
+ else if (GET_MODE_CLASS (GET_MODE (iteration_var)) != MODE_INT)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Loop unrolling: Iteration var not an integer.\n");
+ return;
+ }
+ else if (reg_iv_type[REGNO (iteration_var)] == BASIC_INDUCT)
+ {
+ /* Grab initial value, only useful if it is a constant. */
+ bl = reg_biv_class[REGNO (iteration_var)];
+ *initial_value = bl->initial_value;
+
+ *increment = biv_total_increment (bl, loop_start, loop_end);
+ }
+ else if (reg_iv_type[REGNO (iteration_var)] == GENERAL_INDUCT)
+ {
+#if 1
+ /* ??? The code below does not work because the incorrect number of
+ iterations is calculated when the biv is incremented after the giv
+ is set (which is the usual case). This can probably be accounted
+ for by biasing the initial_value by subtracting the amount of the
+ increment that occurs between the giv set and the giv test. However,
+ a giv as an iterator is very rare, so it does not seem worthwhile
+ to handle this. */
+ /* ??? An example failure is: i = 6; do {;} while (i++ < 9). */
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Loop unrolling: Giv iterators are not handled.\n");
+ return;
+#else
+ /* Initial value is mult_val times the biv's initial value plus
+ add_val. Only useful if it is a constant. */
+ v = reg_iv_info[REGNO (iteration_var)];
+ bl = reg_biv_class[REGNO (v->src_reg)];
+ *initial_value = fold_rtx_mult_add (v->mult_val, bl->initial_value,
+ v->add_val, v->mode);
+
+ /* Increment value is mult_val times the increment value of the biv. */
+
+ *increment = biv_total_increment (bl, loop_start, loop_end);
+ if (*increment)
+ *increment = fold_rtx_mult_add (v->mult_val, *increment, const0_rtx,
+ v->mode);
+#endif
+ }
+ else
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Loop unrolling: Not basic or general induction var.\n");
+ return;
+ }
+}
+
+/* Calculate the approximate final value of the iteration variable
+ which has an loop exit test with code COMPARISON_CODE and comparison value
+ of COMPARISON_VALUE. Also returns an indication of whether the comparison
+ was signed or unsigned, and the direction of the comparison. This info is
+ needed to calculate the number of loop iterations. */
+
+static rtx
+approx_final_value (comparison_code, comparison_value, unsigned_p, compare_dir)
+ enum rtx_code comparison_code;
+ rtx comparison_value;
+ int *unsigned_p;
+ int *compare_dir;
+{
+ /* Calculate the final value of the induction variable.
+ The exact final value depends on the branch operator, and increment sign.
+ This is only an approximate value. It will be wrong if the iteration
+ variable is not incremented by one each time through the loop, and
+ approx final value - start value % increment != 0. */
+
+ *unsigned_p = 0;
+ switch (comparison_code)
+ {
+ case LEU:
+ *unsigned_p = 1;
+ case LE:
+ *compare_dir = 1;
+ return plus_constant (comparison_value, 1);
+ case GEU:
+ *unsigned_p = 1;
+ case GE:
+ *compare_dir = -1;
+ return plus_constant (comparison_value, -1);
+ case EQ:
+ /* Can not calculate a final value for this case. */
+ *compare_dir = 0;
+ return 0;
+ case LTU:
+ *unsigned_p = 1;
+ case LT:
+ *compare_dir = 1;
+ return comparison_value;
+ break;
+ case GTU:
+ *unsigned_p = 1;
+ case GT:
+ *compare_dir = -1;
+ return comparison_value;
+ case NE:
+ *compare_dir = 0;
+ return comparison_value;
+ default:
+ abort ();
+ }
+}
+
+/* For each biv and giv, determine whether it can be safely split into
+ a different variable for each unrolled copy of the loop body. If it
+ is safe to split, then indicate that by saving some useful info
+ in the splittable_regs array.
+
+ If the loop is being completely unrolled, then splittable_regs will hold
+ the current value of the induction variable while the loop is unrolled.
+ It must be set to the initial value of the induction variable here.
+ Otherwise, splittable_regs will hold the difference between the current
+ value of the induction variable and the value the induction variable had
+ at the top of the loop. It must be set to the value 0 here.
+
+ Returns the total number of instructions that set registers that are
+ splittable. */
+
+/* ?? If the loop is only unrolled twice, then most of the restrictions to
+ constant values are unnecessary, since we can easily calculate increment
+ values in this case even if nothing is constant. The increment value
+ should not involve a multiply however. */
+
+/* ?? Even if the biv/giv increment values aren't constant, it may still
+ be beneficial to split the variable if the loop is only unrolled a few
+ times, since multiplies by small integers (1,2,3,4) are very cheap. */
+
+static int
+find_splittable_regs (unroll_type, loop_start, loop_end, end_insert_before,
+ unroll_number)
+ enum unroll_types unroll_type;
+ rtx loop_start, loop_end;
+ rtx end_insert_before;
+ int unroll_number;
+{
+ struct iv_class *bl;
+ struct induction *v;
+ rtx increment, tem;
+ rtx biv_final_value;
+ int biv_splittable;
+ int result = 0;
+
+ for (bl = loop_iv_list; bl; bl = bl->next)
+ {
+ /* Biv_total_increment must return a constant value,
+ otherwise we can not calculate the split values. */
+
+ increment = biv_total_increment (bl, loop_start, loop_end);
+ if (! increment || GET_CODE (increment) != CONST_INT)
+ continue;
+
+ /* The loop must be unrolled completely, or else have a known number
+ of iterations and only one exit, or else the biv must be dead
+ outside the loop, or else the final value must be known. Otherwise,
+ it is unsafe to split the biv since it may not have the proper
+ value on loop exit. */
+
+ /* loop_number_exit_labels is non-zero if the loop has an exit other than
+ a fall through at the end. */
+
+ biv_splittable = 1;
+ biv_final_value = 0;
+ if (unroll_type != UNROLL_COMPLETELY
+ && (loop_number_exit_labels[uid_loop_num[INSN_UID (loop_start)]]
+ || unroll_type == UNROLL_NAIVE)
+ && (uid_luid[regno_last_uid[bl->regno]] >= INSN_LUID (loop_end)
+ || ! bl->init_insn
+ || INSN_UID (bl->init_insn) >= max_uid_for_loop
+ || (uid_luid[regno_first_uid[bl->regno]]
+ < INSN_LUID (bl->init_insn))
+ || reg_mentioned_p (bl->biv->dest_reg, SET_SRC (bl->init_set)))
+ && ! (biv_final_value = final_biv_value (bl, loop_start, loop_end)))
+ biv_splittable = 0;
+
+ /* If any of the insns setting the BIV don't do so with a simple
+ PLUS, we don't know how to split it. */
+ for (v = bl->biv; biv_splittable && v; v = v->next_iv)
+ if ((tem = single_set (v->insn)) == 0
+ || GET_CODE (SET_DEST (tem)) != REG
+ || REGNO (SET_DEST (tem)) != bl->regno
+ || GET_CODE (SET_SRC (tem)) != PLUS)
+ biv_splittable = 0;
+
+ /* If final value is non-zero, then must emit an instruction which sets
+ the value of the biv to the proper value. This is done after
+ handling all of the givs, since some of them may need to use the
+ biv's value in their initialization code. */
+
+ /* This biv is splittable. If completely unrolling the loop, save
+ the biv's initial value. Otherwise, save the constant zero. */
+
+ if (biv_splittable == 1)
+ {
+ if (unroll_type == UNROLL_COMPLETELY)
+ {
+ /* If the initial value of the biv is itself (i.e. it is too
+ complicated for strength_reduce to compute), or is a hard
+ register, then we must create a new pseudo reg to hold the
+ initial value of the biv. */
+
+ if (GET_CODE (bl->initial_value) == REG
+ && (REGNO (bl->initial_value) == bl->regno
+ || REGNO (bl->initial_value) < FIRST_PSEUDO_REGISTER))
+ {
+ rtx tem = gen_reg_rtx (bl->biv->mode);
+
+ emit_insn_before (gen_move_insn (tem, bl->biv->src_reg),
+ loop_start);
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, "Biv %d initial value remapped to %d.\n",
+ bl->regno, REGNO (tem));
+
+ splittable_regs[bl->regno] = tem;
+ }
+ else
+ splittable_regs[bl->regno] = bl->initial_value;
+ }
+ else
+ splittable_regs[bl->regno] = const0_rtx;
+
+ /* Save the number of instructions that modify the biv, so that
+ we can treat the last one specially. */
+
+ splittable_regs_updates[bl->regno] = bl->biv_count;
+ result += bl->biv_count;
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Biv %d safe to split.\n", bl->regno);
+ }
+
+ /* Check every giv that depends on this biv to see whether it is
+ splittable also. Even if the biv isn't splittable, givs which
+ depend on it may be splittable if the biv is live outside the
+ loop, and the givs aren't. */
+
+ result += find_splittable_givs (bl, unroll_type, loop_start, loop_end,
+ increment, unroll_number);
+
+ /* If final value is non-zero, then must emit an instruction which sets
+ the value of the biv to the proper value. This is done after
+ handling all of the givs, since some of them may need to use the
+ biv's value in their initialization code. */
+ if (biv_final_value)
+ {
+ /* If the loop has multiple exits, emit the insns before the
+ loop to ensure that it will always be executed no matter
+ how the loop exits. Otherwise emit the insn after the loop,
+ since this is slightly more efficient. */
+ if (! loop_number_exit_labels[uid_loop_num[INSN_UID (loop_start)]])
+ emit_insn_before (gen_move_insn (bl->biv->src_reg,
+ biv_final_value),
+ end_insert_before);
+ else
+ {
+ /* Create a new register to hold the value of the biv, and then
+ set the biv to its final value before the loop start. The biv
+ is set to its final value before loop start to ensure that
+ this insn will always be executed, no matter how the loop
+ exits. */
+ rtx tem = gen_reg_rtx (bl->biv->mode);
+ emit_insn_before (gen_move_insn (tem, bl->biv->src_reg),
+ loop_start);
+ emit_insn_before (gen_move_insn (bl->biv->src_reg,
+ biv_final_value),
+ loop_start);
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, "Biv %d mapped to %d for split.\n",
+ REGNO (bl->biv->src_reg), REGNO (tem));
+
+ /* Set up the mapping from the original biv register to the new
+ register. */
+ bl->biv->src_reg = tem;
+ }
+ }
+ }
+ return result;
+}
+
+/* For every giv based on the biv BL, check to determine whether it is
+ splittable. This is a subroutine to find_splittable_regs ().
+
+ Return the number of instructions that set splittable registers. */
+
+static int
+find_splittable_givs (bl, unroll_type, loop_start, loop_end, increment,
+ unroll_number)
+ struct iv_class *bl;
+ enum unroll_types unroll_type;
+ rtx loop_start, loop_end;
+ rtx increment;
+ int unroll_number;
+{
+ struct induction *v;
+ rtx final_value;
+ rtx tem;
+ int result = 0;
+
+ for (v = bl->giv; v; v = v->next_iv)
+ {
+ rtx giv_inc, value;
+
+ /* Only split the giv if it has already been reduced, or if the loop is
+ being completely unrolled. */
+ if (unroll_type != UNROLL_COMPLETELY && v->ignore)
+ continue;
+
+ /* The giv can be split if the insn that sets the giv is executed once
+ and only once on every iteration of the loop. */
+ /* An address giv can always be split. v->insn is just a use not a set,
+ and hence it does not matter whether it is always executed. All that
+ matters is that all the biv increments are always executed, and we
+ won't reach here if they aren't. */
+ if (v->giv_type != DEST_ADDR
+ && (! v->always_computable
+ || back_branch_in_range_p (v->insn, loop_start, loop_end)))
+ continue;
+
+ /* The giv increment value must be a constant. */
+ giv_inc = fold_rtx_mult_add (v->mult_val, increment, const0_rtx,
+ v->mode);
+ if (! giv_inc || GET_CODE (giv_inc) != CONST_INT)
+ continue;
+
+ /* The loop must be unrolled completely, or else have a known number of
+ iterations and only one exit, or else the giv must be dead outside
+ the loop, or else the final value of the giv must be known.
+ Otherwise, it is not safe to split the giv since it may not have the
+ proper value on loop exit. */
+
+ /* The used outside loop test will fail for DEST_ADDR givs. They are
+ never used outside the loop anyways, so it is always safe to split a
+ DEST_ADDR giv. */
+
+ final_value = 0;
+ if (unroll_type != UNROLL_COMPLETELY
+ && (loop_number_exit_labels[uid_loop_num[INSN_UID (loop_start)]]
+ || unroll_type == UNROLL_NAIVE)
+ && v->giv_type != DEST_ADDR
+ && ((regno_first_uid[REGNO (v->dest_reg)] != INSN_UID (v->insn)
+ /* Check for the case where the pseudo is set by a shift/add
+ sequence, in which case the first insn setting the pseudo
+ is the first insn of the shift/add sequence. */
+ && (! (tem = find_reg_note (v->insn, REG_RETVAL, NULL_RTX))
+ || (regno_first_uid[REGNO (v->dest_reg)]
+ != INSN_UID (XEXP (tem, 0)))))
+ /* Line above always fails if INSN was moved by loop opt. */
+ || (uid_luid[regno_last_uid[REGNO (v->dest_reg)]]
+ >= INSN_LUID (loop_end)))
+ && ! (final_value = v->final_value))
+ continue;
+
+#if 0
+ /* Currently, non-reduced/final-value givs are never split. */
+ /* Should emit insns after the loop if possible, as the biv final value
+ code below does. */
+
+ /* If the final value is non-zero, and the giv has not been reduced,
+ then must emit an instruction to set the final value. */
+ if (final_value && !v->new_reg)
+ {
+ /* Create a new register to hold the value of the giv, and then set
+ the giv to its final value before the loop start. The giv is set
+ to its final value before loop start to ensure that this insn
+ will always be executed, no matter how we exit. */
+ tem = gen_reg_rtx (v->mode);
+ emit_insn_before (gen_move_insn (tem, v->dest_reg), loop_start);
+ emit_insn_before (gen_move_insn (v->dest_reg, final_value),
+ loop_start);
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, "Giv %d mapped to %d for split.\n",
+ REGNO (v->dest_reg), REGNO (tem));
+
+ v->src_reg = tem;
+ }
+#endif
+
+ /* This giv is splittable. If completely unrolling the loop, save the
+ giv's initial value. Otherwise, save the constant zero for it. */
+
+ if (unroll_type == UNROLL_COMPLETELY)
+ {
+ /* It is not safe to use bl->initial_value here, because it may not
+ be invariant. It is safe to use the initial value stored in
+ the splittable_regs array if it is set. In rare cases, it won't
+ be set, so then we do exactly the same thing as
+ find_splittable_regs does to get a safe value. */
+ rtx biv_initial_value;
+
+ if (splittable_regs[bl->regno])
+ biv_initial_value = splittable_regs[bl->regno];
+ else if (GET_CODE (bl->initial_value) != REG
+ || (REGNO (bl->initial_value) != bl->regno
+ && REGNO (bl->initial_value) >= FIRST_PSEUDO_REGISTER))
+ biv_initial_value = bl->initial_value;
+ else
+ {
+ rtx tem = gen_reg_rtx (bl->biv->mode);
+
+ emit_insn_before (gen_move_insn (tem, bl->biv->src_reg),
+ loop_start);
+ biv_initial_value = tem;
+ }
+ value = fold_rtx_mult_add (v->mult_val, biv_initial_value,
+ v->add_val, v->mode);
+ }
+ else
+ value = const0_rtx;
+
+ if (v->new_reg)
+ {
+ /* If a giv was combined with another giv, then we can only split
+ this giv if the giv it was combined with was reduced. This
+ is because the value of v->new_reg is meaningless in this
+ case. */
+ if (v->same && ! v->same->new_reg)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "giv combined with unreduced giv not split.\n");
+ continue;
+ }
+ /* If the giv is an address destination, it could be something other
+ than a simple register, these have to be treated differently. */
+ else if (v->giv_type == DEST_REG)
+ {
+ /* If value is not a constant, register, or register plus
+ constant, then compute its value into a register before
+ loop start. This prevents illegal rtx sharing, and should
+ generate better code. We can use bl->initial_value here
+ instead of splittable_regs[bl->regno] because this code
+ is going before the loop start. */
+ if (unroll_type == UNROLL_COMPLETELY
+ && GET_CODE (value) != CONST_INT
+ && GET_CODE (value) != REG
+ && (GET_CODE (value) != PLUS
+ || GET_CODE (XEXP (value, 0)) != REG
+ || GET_CODE (XEXP (value, 1)) != CONST_INT))
+ {
+ rtx tem = gen_reg_rtx (v->mode);
+ emit_iv_add_mult (bl->initial_value, v->mult_val,
+ v->add_val, tem, loop_start);
+ value = tem;
+ }
+
+ splittable_regs[REGNO (v->new_reg)] = value;
+ }
+ else
+ {
+ /* Splitting address givs is useful since it will often allow us
+ to eliminate some increment insns for the base giv as
+ unnecessary. */
+
+ /* If the addr giv is combined with a dest_reg giv, then all
+ references to that dest reg will be remapped, which is NOT
+ what we want for split addr regs. We always create a new
+ register for the split addr giv, just to be safe. */
+
+ /* ??? If there are multiple address givs which have been
+ combined with the same dest_reg giv, then we may only need
+ one new register for them. Pulling out constants below will
+ catch some of the common cases of this. Currently, I leave
+ the work of simplifying multiple address givs to the
+ following cse pass. */
+
+ /* As a special case, if we have multiple identical address givs
+ within a single instruction, then we do use a single psuedo
+ reg for both. This is necessary in case one is a match_dup
+ of the other. */
+
+ v->const_adjust = 0;
+
+ if (v->same && v->same->insn == v->insn
+ && v->new_reg == v->same->new_reg)
+ {
+ v->dest_reg = v->same->dest_reg;
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Sharing address givs with reg %d\n",
+ REGNO (v->dest_reg));
+ }
+ else if (unroll_type != UNROLL_COMPLETELY)
+ {
+ /* If not completely unrolling the loop, then create a new
+ register to hold the split value of the DEST_ADDR giv.
+ Emit insn to initialize its value before loop start. */
+ tem = gen_reg_rtx (v->mode);
+
+ /* If the address giv has a constant in its new_reg value,
+ then this constant can be pulled out and put in value,
+ instead of being part of the initialization code. */
+
+ if (GET_CODE (v->new_reg) == PLUS
+ && GET_CODE (XEXP (v->new_reg, 1)) == CONST_INT)
+ {
+ v->dest_reg
+ = plus_constant (tem, INTVAL (XEXP (v->new_reg,1)));
+
+ /* Only succeed if this will give valid addresses.
+ Try to validate both the first and the last
+ address resulting from loop unrolling, if
+ one fails, then can't do const elim here. */
+ if (memory_address_p (v->mem_mode, v->dest_reg)
+ && memory_address_p (v->mem_mode,
+ plus_constant (v->dest_reg,
+ INTVAL (giv_inc)
+ * (unroll_number - 1))))
+ {
+ /* Save the negative of the eliminated const, so
+ that we can calculate the dest_reg's increment
+ value later. */
+ v->const_adjust = - INTVAL (XEXP (v->new_reg, 1));
+
+ v->new_reg = XEXP (v->new_reg, 0);
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Eliminating constant from giv %d\n",
+ REGNO (tem));
+ }
+ else
+ v->dest_reg = tem;
+ }
+ else
+ v->dest_reg = tem;
+
+ /* If the address hasn't been checked for validity yet, do so
+ now, and fail completely if either the first or the last
+ unrolled copy of the address is not a valid address. */
+ if (v->dest_reg == tem
+ && (! memory_address_p (v->mem_mode, v->dest_reg)
+ || ! memory_address_p (v->mem_mode,
+ plus_constant (v->dest_reg,
+ INTVAL (giv_inc)
+ * (unroll_number -1)))))
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Illegal address for giv at insn %d\n",
+ INSN_UID (v->insn));
+ continue;
+ }
+
+ /* To initialize the new register, just move the value of
+ new_reg into it. This is not guaranteed to give a valid
+ instruction on machines with complex addressing modes.
+ If we can't recognize it, then delete it and emit insns
+ to calculate the value from scratch. */
+ emit_insn_before (gen_rtx (SET, VOIDmode, tem,
+ copy_rtx (v->new_reg)),
+ loop_start);
+ if (recog_memoized (PREV_INSN (loop_start)) < 0)
+ {
+ rtx sequence, ret;
+
+ /* We can't use bl->initial_value to compute the initial
+ value, because the loop may have been preconditioned.
+ We must calculate it from NEW_REG. Try using
+ force_operand instead of emit_iv_add_mult. */
+ delete_insn (PREV_INSN (loop_start));
+
+ start_sequence ();
+ ret = force_operand (v->new_reg, tem);
+ if (ret != tem)
+ emit_move_insn (tem, ret);
+ sequence = gen_sequence ();
+ end_sequence ();
+ emit_insn_before (sequence, loop_start);
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Illegal init insn, rewritten.\n");
+ }
+ }
+ else
+ {
+ v->dest_reg = value;
+
+ /* Check the resulting address for validity, and fail
+ if the resulting address would be illegal. */
+ if (! memory_address_p (v->mem_mode, v->dest_reg)
+ || ! memory_address_p (v->mem_mode,
+ plus_constant (v->dest_reg,
+ INTVAL (giv_inc) *
+ (unroll_number -1))))
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Illegal address for giv at insn %d\n",
+ INSN_UID (v->insn));
+ continue;
+ }
+ }
+
+ /* Store the value of dest_reg into the insn. This sharing
+ will not be a problem as this insn will always be copied
+ later. */
+
+ *v->location = v->dest_reg;
+
+ /* If this address giv is combined with a dest reg giv, then
+ save the base giv's induction pointer so that we will be
+ able to handle this address giv properly. The base giv
+ itself does not have to be splittable. */
+
+ if (v->same && v->same->giv_type == DEST_REG)
+ addr_combined_regs[REGNO (v->same->new_reg)] = v->same;
+
+ if (GET_CODE (v->new_reg) == REG)
+ {
+ /* This giv maybe hasn't been combined with any others.
+ Make sure that it's giv is marked as splittable here. */
+
+ splittable_regs[REGNO (v->new_reg)] = value;
+
+ /* Make it appear to depend upon itself, so that the
+ giv will be properly split in the main loop above. */
+ if (! v->same)
+ {
+ v->same = v;
+ addr_combined_regs[REGNO (v->new_reg)] = v;
+ }
+ }
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream, "DEST_ADDR giv being split.\n");
+ }
+ }
+ else
+ {
+#if 0
+ /* Currently, unreduced giv's can't be split. This is not too much
+ of a problem since unreduced giv's are not live across loop
+ iterations anyways. When unrolling a loop completely though,
+ it makes sense to reduce&split givs when possible, as this will
+ result in simpler instructions, and will not require that a reg
+ be live across loop iterations. */
+
+ splittable_regs[REGNO (v->dest_reg)] = value;
+ fprintf (stderr, "Giv %d at insn %d not reduced\n",
+ REGNO (v->dest_reg), INSN_UID (v->insn));
+#else
+ continue;
+#endif
+ }
+
+ /* Givs are only updated once by definition. Mark it so if this is
+ a splittable register. Don't need to do anything for address givs
+ where this may not be a register. */
+
+ if (GET_CODE (v->new_reg) == REG)
+ splittable_regs_updates[REGNO (v->new_reg)] = 1;
+
+ result++;
+
+ if (loop_dump_stream)
+ {
+ int regnum;
+
+ if (GET_CODE (v->dest_reg) == CONST_INT)
+ regnum = -1;
+ else if (GET_CODE (v->dest_reg) != REG)
+ regnum = REGNO (XEXP (v->dest_reg, 0));
+ else
+ regnum = REGNO (v->dest_reg);
+ fprintf (loop_dump_stream, "Giv %d at insn %d safe to split.\n",
+ regnum, INSN_UID (v->insn));
+ }
+ }
+
+ return result;
+}
+
+/* Try to prove that the register is dead after the loop exits. Trace every
+ loop exit looking for an insn that will always be executed, which sets
+ the register to some value, and appears before the first use of the register
+ is found. If successful, then return 1, otherwise return 0. */
+
+/* ?? Could be made more intelligent in the handling of jumps, so that
+ it can search past if statements and other similar structures. */
+
+static int
+reg_dead_after_loop (reg, loop_start, loop_end)
+ rtx reg, loop_start, loop_end;
+{
+ rtx insn, label;
+ enum rtx_code code;
+ int jump_count = 0;
+
+ /* HACK: Must also search the loop fall through exit, create a label_ref
+ here which points to the loop_end, and append the loop_number_exit_labels
+ list to it. */
+ label = gen_rtx (LABEL_REF, VOIDmode, loop_end);
+ LABEL_NEXTREF (label)
+ = loop_number_exit_labels[uid_loop_num[INSN_UID (loop_start)]];
+
+ for ( ; label; label = LABEL_NEXTREF (label))
+ {
+ /* Succeed if find an insn which sets the biv or if reach end of
+ function. Fail if find an insn that uses the biv, or if come to
+ a conditional jump. */
+
+ insn = NEXT_INSN (XEXP (label, 0));
+ while (insn)
+ {
+ code = GET_CODE (insn);
+ if (GET_RTX_CLASS (code) == 'i')
+ {
+ rtx set;
+
+ if (reg_referenced_p (reg, PATTERN (insn)))
+ return 0;
+
+ set = single_set (insn);
+ if (set && rtx_equal_p (SET_DEST (set), reg))
+ break;
+ }
+
+ if (code == JUMP_INSN)
+ {
+ if (GET_CODE (PATTERN (insn)) == RETURN)
+ break;
+ else if (! simplejump_p (insn)
+ /* Prevent infinite loop following infinite loops. */
+ || jump_count++ > 20)
+ return 0;
+ else
+ insn = JUMP_LABEL (insn);
+ }
+
+ insn = NEXT_INSN (insn);
+ }
+ }
+
+ /* Success, the register is dead on all loop exits. */
+ return 1;
+}
+
+/* Try to calculate the final value of the biv, the value it will have at
+ the end of the loop. If we can do it, return that value. */
+
+rtx
+final_biv_value (bl, loop_start, loop_end)
+ struct iv_class *bl;
+ rtx loop_start, loop_end;
+{
+ rtx increment, tem;
+
+ /* ??? This only works for MODE_INT biv's. Reject all others for now. */
+
+ if (GET_MODE_CLASS (bl->biv->mode) != MODE_INT)
+ return 0;
+
+ /* The final value for reversed bivs must be calculated differently than
+ for ordinary bivs. In this case, there is already an insn after the
+ loop which sets this biv's final value (if necessary), and there are
+ no other loop exits, so we can return any value. */
+ if (bl->reversed)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Final biv value for %d, reversed biv.\n", bl->regno);
+
+ return const0_rtx;
+ }
+
+ /* Try to calculate the final value as initial value + (number of iterations
+ * increment). For this to work, increment must be invariant, the only
+ exit from the loop must be the fall through at the bottom (otherwise
+ it may not have its final value when the loop exits), and the initial
+ value of the biv must be invariant. */
+
+ if (loop_n_iterations != 0
+ && ! loop_number_exit_labels[uid_loop_num[INSN_UID (loop_start)]]
+ && invariant_p (bl->initial_value))
+ {
+ increment = biv_total_increment (bl, loop_start, loop_end);
+
+ if (increment && invariant_p (increment))
+ {
+ /* Can calculate the loop exit value, emit insns after loop
+ end to calculate this value into a temporary register in
+ case it is needed later. */
+
+ tem = gen_reg_rtx (bl->biv->mode);
+ /* Make sure loop_end is not the last insn. */
+ if (NEXT_INSN (loop_end) == 0)
+ emit_note_after (NOTE_INSN_DELETED, loop_end);
+ emit_iv_add_mult (increment, GEN_INT (loop_n_iterations),
+ bl->initial_value, tem, NEXT_INSN (loop_end));
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Final biv value for %d, calculated.\n", bl->regno);
+
+ return tem;
+ }
+ }
+
+ /* Check to see if the biv is dead at all loop exits. */
+ if (reg_dead_after_loop (bl->biv->src_reg, loop_start, loop_end))
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Final biv value for %d, biv dead after loop exit.\n",
+ bl->regno);
+
+ return const0_rtx;
+ }
+
+ return 0;
+}
+
+/* Try to calculate the final value of the giv, the value it will have at
+ the end of the loop. If we can do it, return that value. */
+
+rtx
+final_giv_value (v, loop_start, loop_end)
+ struct induction *v;
+ rtx loop_start, loop_end;
+{
+ struct iv_class *bl;
+ rtx insn;
+ rtx increment, tem;
+ rtx insert_before, seq;
+
+ bl = reg_biv_class[REGNO (v->src_reg)];
+
+ /* The final value for givs which depend on reversed bivs must be calculated
+ differently than for ordinary givs. In this case, there is already an
+ insn after the loop which sets this giv's final value (if necessary),
+ and there are no other loop exits, so we can return any value. */
+ if (bl->reversed)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Final giv value for %d, depends on reversed biv\n",
+ REGNO (v->dest_reg));
+ return const0_rtx;
+ }
+
+ /* Try to calculate the final value as a function of the biv it depends
+ upon. The only exit from the loop must be the fall through at the bottom
+ (otherwise it may not have its final value when the loop exits). */
+
+ /* ??? Can calculate the final giv value by subtracting off the
+ extra biv increments times the giv's mult_val. The loop must have
+ only one exit for this to work, but the loop iterations does not need
+ to be known. */
+
+ if (loop_n_iterations != 0
+ && ! loop_number_exit_labels[uid_loop_num[INSN_UID (loop_start)]])
+ {
+ /* ?? It is tempting to use the biv's value here since these insns will
+ be put after the loop, and hence the biv will have its final value
+ then. However, this fails if the biv is subsequently eliminated.
+ Perhaps determine whether biv's are eliminable before trying to
+ determine whether giv's are replaceable so that we can use the
+ biv value here if it is not eliminable. */
+
+ increment = biv_total_increment (bl, loop_start, loop_end);
+
+ if (increment && invariant_p (increment))
+ {
+ /* Can calculate the loop exit value of its biv as
+ (loop_n_iterations * increment) + initial_value */
+
+ /* The loop exit value of the giv is then
+ (final_biv_value - extra increments) * mult_val + add_val.
+ The extra increments are any increments to the biv which
+ occur in the loop after the giv's value is calculated.
+ We must search from the insn that sets the giv to the end
+ of the loop to calculate this value. */
+
+ insert_before = NEXT_INSN (loop_end);
+
+ /* Put the final biv value in tem. */
+ tem = gen_reg_rtx (bl->biv->mode);
+ emit_iv_add_mult (increment, GEN_INT (loop_n_iterations),
+ bl->initial_value, tem, insert_before);
+
+ /* Subtract off extra increments as we find them. */
+ for (insn = NEXT_INSN (v->insn); insn != loop_end;
+ insn = NEXT_INSN (insn))
+ {
+ struct induction *biv;
+
+ for (biv = bl->biv; biv; biv = biv->next_iv)
+ if (biv->insn == insn)
+ {
+ start_sequence ();
+ tem = expand_binop (GET_MODE (tem), sub_optab, tem,
+ biv->add_val, NULL_RTX, 0,
+ OPTAB_LIB_WIDEN);
+ seq = gen_sequence ();
+ end_sequence ();
+ emit_insn_before (seq, insert_before);
+ }
+ }
+
+ /* Now calculate the giv's final value. */
+ emit_iv_add_mult (tem, v->mult_val, v->add_val, tem,
+ insert_before);
+
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Final giv value for %d, calc from biv's value.\n",
+ REGNO (v->dest_reg));
+
+ return tem;
+ }
+ }
+
+ /* Replaceable giv's should never reach here. */
+ if (v->replaceable)
+ abort ();
+
+ /* Check to see if the biv is dead at all loop exits. */
+ if (reg_dead_after_loop (v->dest_reg, loop_start, loop_end))
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Final giv value for %d, giv dead after loop exit.\n",
+ REGNO (v->dest_reg));
+
+ return const0_rtx;
+ }
+
+ return 0;
+}
+
+
+/* Calculate the number of loop iterations. Returns the exact number of loop
+ iterations if it can be calculated, otherwise returns zero. */
+
+unsigned HOST_WIDE_INT
+loop_iterations (loop_start, loop_end)
+ rtx loop_start, loop_end;
+{
+ rtx comparison, comparison_value;
+ rtx iteration_var, initial_value, increment, final_value;
+ enum rtx_code comparison_code;
+ HOST_WIDE_INT i;
+ int increment_dir;
+ int unsigned_compare, compare_dir, final_larger;
+ unsigned long tempu;
+ rtx last_loop_insn;
+
+ /* First find the iteration variable. If the last insn is a conditional
+ branch, and the insn before tests a register value, make that the
+ iteration variable. */
+
+ loop_initial_value = 0;
+ loop_increment = 0;
+ loop_final_value = 0;
+ loop_iteration_var = 0;
+
+ last_loop_insn = prev_nonnote_insn (loop_end);
+
+ comparison = get_condition_for_loop (last_loop_insn);
+ if (comparison == 0)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Loop unrolling: No final conditional branch found.\n");
+ return 0;
+ }
+
+ /* ??? Get_condition may switch position of induction variable and
+ invariant register when it canonicalizes the comparison. */
+
+ comparison_code = GET_CODE (comparison);
+ iteration_var = XEXP (comparison, 0);
+ comparison_value = XEXP (comparison, 1);
+
+ if (GET_CODE (iteration_var) != REG)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Loop unrolling: Comparison not against register.\n");
+ return 0;
+ }
+
+ /* Loop iterations is always called before any new registers are created
+ now, so this should never occur. */
+
+ if (REGNO (iteration_var) >= max_reg_before_loop)
+ abort ();
+
+ iteration_info (iteration_var, &initial_value, &increment,
+ loop_start, loop_end);
+ if (initial_value == 0)
+ /* iteration_info already printed a message. */
+ return 0;
+
+ if (increment == 0)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Loop unrolling: Increment value can't be calculated.\n");
+ return 0;
+ }
+ if (GET_CODE (increment) != CONST_INT)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Loop unrolling: Increment value not constant.\n");
+ return 0;
+ }
+ if (GET_CODE (initial_value) != CONST_INT)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Loop unrolling: Initial value not constant.\n");
+ return 0;
+ }
+
+ /* If the comparison value is an invariant register, then try to find
+ its value from the insns before the start of the loop. */
+
+ if (GET_CODE (comparison_value) == REG && invariant_p (comparison_value))
+ {
+ rtx insn, set;
+
+ for (insn = PREV_INSN (loop_start); insn ; insn = PREV_INSN (insn))
+ {
+ if (GET_CODE (insn) == CODE_LABEL)
+ break;
+
+ else if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
+ && reg_set_p (comparison_value, insn))
+ {
+ /* We found the last insn before the loop that sets the register.
+ If it sets the entire register, and has a REG_EQUAL note,
+ then use the value of the REG_EQUAL note. */
+ if ((set = single_set (insn))
+ && (SET_DEST (set) == comparison_value))
+ {
+ rtx note = find_reg_note (insn, REG_EQUAL, NULL_RTX);
+
+ /* Only use the REG_EQUAL note if it is a constant.
+ Other things, divide in particular, will cause
+ problems later if we use them. */
+ if (note && GET_CODE (XEXP (note, 0)) != EXPR_LIST
+ && CONSTANT_P (XEXP (note, 0)))
+ comparison_value = XEXP (note, 0);
+ }
+ break;
+ }
+ }
+ }
+
+ final_value = approx_final_value (comparison_code, comparison_value,
+ &unsigned_compare, &compare_dir);
+
+ /* Save the calculated values describing this loop's bounds, in case
+ precondition_loop_p will need them later. These values can not be
+ recalculated inside precondition_loop_p because strength reduction
+ optimizations may obscure the loop's structure. */
+
+ loop_iteration_var = iteration_var;
+ loop_initial_value = initial_value;
+ loop_increment = increment;
+ loop_final_value = final_value;
+
+ if (final_value == 0)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Loop unrolling: EQ comparison loop.\n");
+ return 0;
+ }
+ else if (GET_CODE (final_value) != CONST_INT)
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Loop unrolling: Final value not constant.\n");
+ return 0;
+ }
+
+ /* ?? Final value and initial value do not have to be constants.
+ Only their difference has to be constant. When the iteration variable
+ is an array address, the final value and initial value might both
+ be addresses with the same base but different constant offsets.
+ Final value must be invariant for this to work.
+
+ To do this, need some way to find the values of registers which are
+ invariant. */
+
+ /* Final_larger is 1 if final larger, 0 if they are equal, otherwise -1. */
+ if (unsigned_compare)
+ final_larger
+ = ((unsigned HOST_WIDE_INT) INTVAL (final_value)
+ > (unsigned HOST_WIDE_INT) INTVAL (initial_value))
+ - ((unsigned HOST_WIDE_INT) INTVAL (final_value)
+ < (unsigned HOST_WIDE_INT) INTVAL (initial_value));
+ else
+ final_larger = (INTVAL (final_value) > INTVAL (initial_value))
+ - (INTVAL (final_value) < INTVAL (initial_value));
+
+ if (INTVAL (increment) > 0)
+ increment_dir = 1;
+ else if (INTVAL (increment) == 0)
+ increment_dir = 0;
+ else
+ increment_dir = -1;
+
+ /* There are 27 different cases: compare_dir = -1, 0, 1;
+ final_larger = -1, 0, 1; increment_dir = -1, 0, 1.
+ There are 4 normal cases, 4 reverse cases (where the iteration variable
+ will overflow before the loop exits), 4 infinite loop cases, and 15
+ immediate exit (0 or 1 iteration depending on loop type) cases.
+ Only try to optimize the normal cases. */
+
+ /* (compare_dir/final_larger/increment_dir)
+ Normal cases: (0/-1/-1), (0/1/1), (-1/-1/-1), (1/1/1)
+ Reverse cases: (0/-1/1), (0/1/-1), (-1/-1/1), (1/1/-1)
+ Infinite loops: (0/-1/0), (0/1/0), (-1/-1/0), (1/1/0)
+ Immediate exit: (0/0/X), (-1/0/X), (-1/1/X), (1/0/X), (1/-1/X) */
+
+ /* ?? If the meaning of reverse loops (where the iteration variable
+ will overflow before the loop exits) is undefined, then could
+ eliminate all of these special checks, and just always assume
+ the loops are normal/immediate/infinite. Note that this means
+ the sign of increment_dir does not have to be known. Also,
+ since it does not really hurt if immediate exit loops or infinite loops
+ are optimized, then that case could be ignored also, and hence all
+ loops can be optimized.
+
+ According to ANSI Spec, the reverse loop case result is undefined,
+ because the action on overflow is undefined.
+
+ See also the special test for NE loops below. */
+
+ if (final_larger == increment_dir && final_larger != 0
+ && (final_larger == compare_dir || compare_dir == 0))
+ /* Normal case. */
+ ;
+ else
+ {
+ if (loop_dump_stream)
+ fprintf (loop_dump_stream,
+ "Loop unrolling: Not normal loop.\n");
+ return 0;
+ }
+
+ /* Calculate the number of iterations, final_value is only an approximation,
+ so correct for that. Note that tempu and loop_n_iterations are
+ unsigned, because they can be as large as 2^n - 1. */
+
+ i = INTVAL (increment);
+ if (i > 0)
+ tempu = INTVAL (final_value) - INTVAL (initial_value);
+ else if (i < 0)
+ {
+ tempu = INTVAL (initial_value) - INTVAL (final_value);
+ i = -i;
+ }
+ else
+ abort ();
+
+ /* For NE tests, make sure that the iteration variable won't miss the
+ final value. If tempu mod i is not zero, then the iteration variable
+ will overflow before the loop exits, and we can not calculate the
+ number of iterations. */
+ if (compare_dir == 0 && (tempu % i) != 0)
+ return 0;
+
+ return tempu / i + ((tempu % i) != 0);
+}
+
+/* Replace uses of split bivs with their split psuedo register. This is
+ for original instructions which remain after loop unrolling without
+ copying. */
+
+static rtx
+remap_split_bivs (x)
+ rtx x;
+{
+ register enum rtx_code code;
+ register int i;
+ register char *fmt;
+
+ if (x == 0)
+ return x;
+
+ code = GET_CODE (x);
+ switch (code)
+ {
+ case SCRATCH:
+ case PC:
+ case CC0:
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case CONST:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ return x;
+
+ case REG:
+#if 0
+ /* If non-reduced/final-value givs were split, then this would also
+ have to remap those givs also. */
+#endif
+ if (REGNO (x) < max_reg_before_loop
+ && reg_iv_type[REGNO (x)] == BASIC_INDUCT)
+ return reg_biv_class[REGNO (x)]->biv->src_reg;
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ XEXP (x, i) = remap_split_bivs (XEXP (x, i));
+ if (fmt[i] == 'E')
+ {
+ register int j;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ XVECEXP (x, i, j) = remap_split_bivs (XVECEXP (x, i, j));
+ }
+ }
+ return x;
+}
diff --git a/gnu/usr.bin/cc/cc_int/varasm.c b/gnu/usr.bin/cc/cc_int/varasm.c
new file mode 100644
index 0000000..cd49b0c
--- /dev/null
+++ b/gnu/usr.bin/cc/cc_int/varasm.c
@@ -0,0 +1,3883 @@
+/* Output variables, constants and external declarations, for GNU compiler.
+ Copyright (C) 1987, 88, 89, 92, 93, 1994 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+/* This file handles generation of all the assembler code
+ *except* the instructions of a function.
+ This includes declarations of variables and their initial values.
+
+ We also output the assembler code for constants stored in memory
+ and are responsible for combining constants with the same value. */
+
+#include <stdio.h>
+#include <setjmp.h>
+/* #include <stab.h> */
+#include "config.h"
+#include "rtl.h"
+#include "tree.h"
+#include "flags.h"
+#include "function.h"
+#include "expr.h"
+#include "hard-reg-set.h"
+#include "regs.h"
+#include "defaults.h"
+#include "real.h"
+#include "bytecode.h"
+
+#include "obstack.h"
+
+#ifdef XCOFF_DEBUGGING_INFO
+#include "xcoffout.h"
+#endif
+
+#include <ctype.h>
+
+#ifndef ASM_STABS_OP
+#define ASM_STABS_OP ".stabs"
+#endif
+
+/* This macro gets just the user-specified name
+ out of the string in a SYMBOL_REF. On most machines,
+ we discard the * if any and that's all. */
+#ifndef STRIP_NAME_ENCODING
+#define STRIP_NAME_ENCODING(VAR,SYMBOL_NAME) \
+ (VAR) = ((SYMBOL_NAME) + ((SYMBOL_NAME)[0] == '*'))
+#endif
+
+/* File in which assembler code is being written. */
+
+extern FILE *asm_out_file;
+
+/* The (assembler) name of the first globally-visible object output. */
+char *first_global_object_name;
+
+extern struct obstack *current_obstack;
+extern struct obstack *saveable_obstack;
+extern struct obstack *rtl_obstack;
+extern struct obstack permanent_obstack;
+#define obstack_chunk_alloc xmalloc
+
+/* Number for making the label on the next
+ constant that is stored in memory. */
+
+int const_labelno;
+
+/* Number for making the label on the next
+ static variable internal to a function. */
+
+int var_labelno;
+
+/* Carry information from ASM_DECLARE_OBJECT_NAME
+ to ASM_FINISH_DECLARE_OBJECT. */
+
+int size_directive_output;
+
+/* The last decl for which assemble_variable was called,
+ if it did ASM_DECLARE_OBJECT_NAME.
+ If the last call to assemble_variable didn't do that,
+ this holds 0. */
+
+tree last_assemble_variable_decl;
+
+/* Nonzero if at least one function definition has been seen. */
+static int function_defined;
+
+extern FILE *asm_out_file;
+
+static char *compare_constant_1 ();
+static void record_constant_1 ();
+static void output_constant_def_contents ();
+static int contains_pointers_p ();
+static void bc_output_ascii ();
+
+void output_constant_pool ();
+void assemble_name ();
+int output_addressed_constants ();
+void output_constant ();
+void output_constructor ();
+void output_byte_asm ();
+void text_section ();
+void readonly_data_section ();
+void data_section ();
+void named_section ();
+static void bc_assemble_integer ();
+
+#ifdef EXTRA_SECTIONS
+static enum in_section {no_section, in_text, in_data, in_named, EXTRA_SECTIONS} in_section
+ = no_section;
+#else
+static enum in_section {no_section, in_text, in_data, in_named} in_section
+ = no_section;
+#endif
+
+/* Return a non-zero value if DECL has a section attribute. */
+#define IN_NAMED_SECTION(DECL) \
+ ((TREE_CODE (DECL) == FUNCTION_DECL || TREE_CODE (DECL) == VAR_DECL) \
+ && DECL_SECTION_NAME (DECL) != NULL_TREE)
+
+/* Text of section name when in_section == in_named. */
+static char *in_named_name;
+
+/* Define functions like text_section for any extra sections. */
+#ifdef EXTRA_SECTION_FUNCTIONS
+EXTRA_SECTION_FUNCTIONS
+#endif
+
+/* Tell assembler to switch to text section. */
+
+void
+text_section ()
+{
+ if (in_section != in_text)
+ {
+ if (output_bytecode)
+ bc_text ();
+ else
+ fprintf (asm_out_file, "%s\n", TEXT_SECTION_ASM_OP);
+
+ in_section = in_text;
+ }
+}
+
+/* Tell assembler to switch to data section. */
+
+void
+data_section ()
+{
+ if (in_section != in_data)
+ {
+ if (output_bytecode)
+ bc_data ();
+ else
+ {
+ if (flag_shared_data)
+ {
+#ifdef SHARED_SECTION_ASM_OP
+ fprintf (asm_out_file, "%s\n", SHARED_SECTION_ASM_OP);
+#else
+ fprintf (asm_out_file, "%s\n", DATA_SECTION_ASM_OP);
+#endif
+ }
+ else
+ fprintf (asm_out_file, "%s\n", DATA_SECTION_ASM_OP);
+ }
+
+ in_section = in_data;
+ }
+}
+
+/* Tell assembler to switch to read-only data section. This is normally
+ the text section. */
+
+void
+readonly_data_section ()
+{
+#ifdef READONLY_DATA_SECTION
+ READONLY_DATA_SECTION (); /* Note this can call data_section. */
+#else
+ text_section ();
+#endif
+}
+
+/* Determine if we're in the text section. */
+
+int
+in_text_section ()
+{
+ return in_section == in_text;
+}
+
+/* Tell assembler to change to named section. */
+
+void
+named_section (name)
+ char *name;
+{
+ if (in_section != in_named || strcmp (name, in_named_name))
+ {
+ in_named_name = name;
+ in_section = in_named;
+
+#ifdef ASM_OUTPUT_SECTION_NAME
+ ASM_OUTPUT_SECTION_NAME (asm_out_file, name);
+#else
+ /* Section attributes are not supported if this macro isn't provided -
+ some host formats don't support them at all. The front-end should
+ already have flagged this as an error. */
+ abort ();
+#endif
+ }
+}
+
+/* Create the rtl to represent a function, for a function definition.
+ DECL is a FUNCTION_DECL node which describes which function.
+ The rtl is stored into DECL. */
+
+void
+make_function_rtl (decl)
+ tree decl;
+{
+ char *name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
+
+ if (output_bytecode)
+ {
+ if (DECL_RTL (decl) == 0)
+ DECL_RTL (decl) = bc_gen_rtx (name, 0, (struct bc_label *) 0);
+
+ /* Record that at least one function has been defined. */
+ function_defined = 1;
+ return;
+ }
+
+ /* Rename a nested function to avoid conflicts. */
+ if (decl_function_context (decl) != 0
+ && DECL_INITIAL (decl) != 0
+ && DECL_RTL (decl) == 0)
+ {
+ char *label;
+
+ name = IDENTIFIER_POINTER (DECL_NAME (decl));
+ ASM_FORMAT_PRIVATE_NAME (label, name, var_labelno);
+ name = obstack_copy0 (saveable_obstack, label, strlen (label));
+ var_labelno++;
+ }
+
+ if (DECL_RTL (decl) == 0)
+ {
+ DECL_RTL (decl)
+ = gen_rtx (MEM, DECL_MODE (decl),
+ gen_rtx (SYMBOL_REF, Pmode, name));
+
+ /* Optionally set flags or add text to the name to record information
+ such as that it is a function name. If the name is changed, the macro
+ ASM_OUTPUT_LABELREF will have to know how to strip this information. */
+#ifdef ENCODE_SECTION_INFO
+ ENCODE_SECTION_INFO (decl);
+#endif
+ }
+
+ /* Record at least one function has been defined. */
+ function_defined = 1;
+}
+
+/* Create the DECL_RTL for a declaration for a static or external
+ variable or static or external function.
+ ASMSPEC, if not 0, is the string which the user specified
+ as the assembler symbol name.
+ TOP_LEVEL is nonzero if this is a file-scope variable.
+ This is never called for PARM_DECLs. */
+void
+bc_make_decl_rtl (decl, asmspec, top_level)
+ tree decl;
+ char *asmspec;
+ int top_level;
+{
+ register char *name = TREE_STRING_POINTER (DECL_ASSEMBLER_NAME (decl));
+
+ if (DECL_RTL (decl) == 0)
+ {
+ /* Print an error message for register variables. */
+ if (DECL_REGISTER (decl) && TREE_CODE (decl) == FUNCTION_DECL)
+ error ("function declared `register'");
+ else if (DECL_REGISTER (decl))
+ error ("global register variables not supported in the interpreter");
+
+ /* Handle ordinary static variables and functions. */
+ if (DECL_RTL (decl) == 0)
+ {
+ /* Can't use just the variable's own name for a variable
+ whose scope is less than the whole file.
+ Concatenate a distinguishing number. */
+ if (!top_level && !DECL_EXTERNAL (decl) && asmspec == 0)
+ {
+ char *label;
+
+ ASM_FORMAT_PRIVATE_NAME (label, name, var_labelno);
+ name = obstack_copy0 (saveable_obstack, label, strlen (label));
+ var_labelno++;
+ }
+
+ DECL_RTL (decl) = bc_gen_rtx (name, 0, (struct bc_label *) 0);
+ }
+ }
+}
+
+/* Given NAME, a putative register name, discard any customary prefixes. */
+
+static char *
+strip_reg_name (name)
+ char *name;
+{
+#ifdef REGISTER_PREFIX
+ if (!strncmp (name, REGISTER_PREFIX, strlen (REGISTER_PREFIX)))
+ name += strlen (REGISTER_PREFIX);
+#endif
+ if (name[0] == '%' || name[0] == '#')
+ name++;
+ return name;
+}
+
+/* Decode an `asm' spec for a declaration as a register name.
+ Return the register number, or -1 if nothing specified,
+ or -2 if the ASMSPEC is not `cc' or `memory' and is not recognized,
+ or -3 if ASMSPEC is `cc' and is not recognized,
+ or -4 if ASMSPEC is `memory' and is not recognized.
+ Accept an exact spelling or a decimal number.
+ Prefixes such as % are optional. */
+
+int
+decode_reg_name (asmspec)
+ char *asmspec;
+{
+ if (asmspec != 0)
+ {
+ int i;
+
+ /* Get rid of confusing prefixes. */
+ asmspec = strip_reg_name (asmspec);
+
+ /* Allow a decimal number as a "register name". */
+ for (i = strlen (asmspec) - 1; i >= 0; i--)
+ if (! (asmspec[i] >= '0' && asmspec[i] <= '9'))
+ break;
+ if (asmspec[0] != 0 && i < 0)
+ {
+ i = atoi (asmspec);
+ if (i < FIRST_PSEUDO_REGISTER && i >= 0)
+ return i;
+ else
+ return -2;
+ }
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (reg_names[i][0]
+ && ! strcmp (asmspec, strip_reg_name (reg_names[i])))
+ return i;
+
+#ifdef ADDITIONAL_REGISTER_NAMES
+ {
+ static struct { char *name; int number; } table[]
+ = ADDITIONAL_REGISTER_NAMES;
+
+ for (i = 0; i < sizeof (table) / sizeof (table[0]); i++)
+ if (! strcmp (asmspec, table[i].name))
+ return table[i].number;
+ }
+#endif /* ADDITIONAL_REGISTER_NAMES */
+
+ if (!strcmp (asmspec, "memory"))
+ return -4;
+
+ if (!strcmp (asmspec, "cc"))
+ return -3;
+
+ return -2;
+ }
+
+ return -1;
+}
+
+/* Create the DECL_RTL for a declaration for a static or external variable
+ or static or external function.
+ ASMSPEC, if not 0, is the string which the user specified
+ as the assembler symbol name.
+ TOP_LEVEL is nonzero if this is a file-scope variable.
+
+ This is never called for PARM_DECL nodes. */
+
+void
+make_decl_rtl (decl, asmspec, top_level)
+ tree decl;
+ char *asmspec;
+ int top_level;
+{
+ register char *name = 0;
+ int reg_number;
+
+ if (output_bytecode)
+ {
+ bc_make_decl_rtl (decl, asmspec, top_level);
+ return;
+ }
+
+ reg_number = decode_reg_name (asmspec);
+
+ if (DECL_ASSEMBLER_NAME (decl) != NULL_TREE)
+ name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
+
+ if (reg_number == -2)
+ {
+ /* ASMSPEC is given, and not the name of a register. */
+ name = (char *) obstack_alloc (saveable_obstack,
+ strlen (asmspec) + 2);
+ name[0] = '*';
+ strcpy (&name[1], asmspec);
+ }
+
+ /* For a duplicate declaration, we can be called twice on the
+ same DECL node. Don't discard the RTL already made. */
+ if (DECL_RTL (decl) == 0)
+ {
+ DECL_RTL (decl) = 0;
+
+ /* First detect errors in declaring global registers. */
+ if (DECL_REGISTER (decl) && reg_number == -1)
+ error_with_decl (decl,
+ "register name not specified for `%s'");
+ else if (DECL_REGISTER (decl) && reg_number < 0)
+ error_with_decl (decl,
+ "invalid register name for `%s'");
+ else if ((reg_number >= 0 || reg_number == -3) && ! DECL_REGISTER (decl))
+ error_with_decl (decl,
+ "register name given for non-register variable `%s'");
+ else if (DECL_REGISTER (decl) && TREE_CODE (decl) == FUNCTION_DECL)
+ error ("function declared `register'");
+ else if (DECL_REGISTER (decl) && TYPE_MODE (TREE_TYPE (decl)) == BLKmode)
+ error_with_decl (decl, "data type of `%s' isn't suitable for a register");
+ else if (DECL_REGISTER (decl)
+ && ! HARD_REGNO_MODE_OK (reg_number, TYPE_MODE (TREE_TYPE (decl))))
+ error_with_decl (decl, "register number for `%s' isn't suitable for the data type");
+ /* Now handle properly declared static register variables. */
+ else if (DECL_REGISTER (decl))
+ {
+ int nregs;
+#if 0 /* yylex should print the warning for this */
+ if (pedantic)
+ pedwarn ("ANSI C forbids global register variables");
+#endif
+ if (DECL_INITIAL (decl) != 0 && top_level)
+ {
+ DECL_INITIAL (decl) = 0;
+ error ("global register variable has initial value");
+ }
+ if (fixed_regs[reg_number] == 0
+ && function_defined && top_level)
+ error ("global register variable follows a function definition");
+ if (TREE_THIS_VOLATILE (decl))
+ warning ("volatile register variables don't work as you might wish");
+
+ /* If the user specified one of the eliminables registers here,
+ e.g., FRAME_POINTER_REGNUM, we don't want to get this variable
+ confused with that register and be eliminated. Although this
+ usage is somewhat suspect, we nevertheless use the following
+ kludge to avoid setting DECL_RTL to frame_pointer_rtx. */
+
+ DECL_RTL (decl)
+ = gen_rtx (REG, DECL_MODE (decl), FIRST_PSEUDO_REGISTER);
+ REGNO (DECL_RTL (decl)) = reg_number;
+ REG_USERVAR_P (DECL_RTL (decl)) = 1;
+
+ if (top_level)
+ {
+ /* Make this register global, so not usable for anything
+ else. */
+ nregs = HARD_REGNO_NREGS (reg_number, DECL_MODE (decl));
+ while (nregs > 0)
+ globalize_reg (reg_number + --nregs);
+ }
+ }
+ /* Specifying a section attribute on an uninitialized variable does not
+ (and cannot) cause it to be put in the given section. The linker
+ can only put initialized objects in specific sections, everything
+ else goes in bss for the linker to sort out later (otherwise the
+ linker would give a duplicate definition error for each compilation
+ unit that behaved thusly). So warn the user. */
+ else if (TREE_CODE (decl) == VAR_DECL
+ && DECL_SECTION_NAME (decl) != NULL_TREE
+ && DECL_INITIAL (decl) == NULL_TREE)
+ {
+ warning_with_decl (decl,
+ "section attribute ignored for uninitialized variable `%s'");
+ /* Remove the section name so subsequent declarations won't see it.
+ We are ignoring it, remember. */
+ DECL_SECTION_NAME (decl) = NULL_TREE;
+ }
+
+ /* Now handle ordinary static variables and functions (in memory).
+ Also handle vars declared register invalidly. */
+ if (DECL_RTL (decl) == 0)
+ {
+ /* Can't use just the variable's own name for a variable
+ whose scope is less than the whole file.
+ Concatenate a distinguishing number. */
+ if (!top_level && !DECL_EXTERNAL (decl) && asmspec == 0)
+ {
+ char *label;
+
+ ASM_FORMAT_PRIVATE_NAME (label, name, var_labelno);
+ name = obstack_copy0 (saveable_obstack, label, strlen (label));
+ var_labelno++;
+ }
+
+ if (name == 0)
+ abort ();
+
+ DECL_RTL (decl) = gen_rtx (MEM, DECL_MODE (decl),
+ gen_rtx (SYMBOL_REF, Pmode, name));
+
+ /* If this variable is to be treated as volatile, show its
+ tree node has side effects. If it has side effects, either
+ because of this test or from TREE_THIS_VOLATILE also
+ being set, show the MEM is volatile. */
+ if (flag_volatile_global && TREE_CODE (decl) == VAR_DECL
+ && TREE_PUBLIC (decl))
+ TREE_SIDE_EFFECTS (decl) = 1;
+ if (TREE_SIDE_EFFECTS (decl))
+ MEM_VOLATILE_P (DECL_RTL (decl)) = 1;
+
+ if (TREE_READONLY (decl))
+ RTX_UNCHANGING_P (DECL_RTL (decl)) = 1;
+ MEM_IN_STRUCT_P (DECL_RTL (decl))
+ = AGGREGATE_TYPE_P (TREE_TYPE (decl));
+
+ /* Optionally set flags or add text to the name to record information
+ such as that it is a function name.
+ If the name is changed, the macro ASM_OUTPUT_LABELREF
+ will have to know how to strip this information. */
+#ifdef ENCODE_SECTION_INFO
+ ENCODE_SECTION_INFO (decl);
+#endif
+ }
+ }
+ /* If the old RTL had the wrong mode, fix the mode. */
+ else if (GET_MODE (DECL_RTL (decl)) != DECL_MODE (decl))
+ {
+ rtx rtl = DECL_RTL (decl);
+ PUT_MODE (rtl, DECL_MODE (decl));
+ }
+}
+
+/* Make the rtl for variable VAR be volatile.
+ Use this only for static variables. */
+
+void
+make_var_volatile (var)
+ tree var;
+{
+ if (GET_CODE (DECL_RTL (var)) != MEM)
+ abort ();
+
+ MEM_VOLATILE_P (DECL_RTL (var)) = 1;
+}
+
+/* Output alignment directive to align for constant expression EXP. */
+
+void
+assemble_constant_align (exp)
+ tree exp;
+{
+ int align;
+
+ /* Align the location counter as required by EXP's data type. */
+ align = TYPE_ALIGN (TREE_TYPE (exp));
+#ifdef CONSTANT_ALIGNMENT
+ align = CONSTANT_ALIGNMENT (exp, align);
+#endif
+
+ if (align > BITS_PER_UNIT)
+ ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (align / BITS_PER_UNIT));
+}
+
+/* Output a string of literal assembler code
+ for an `asm' keyword used between functions. */
+
+void
+assemble_asm (string)
+ tree string;
+{
+ if (output_bytecode)
+ {
+ error ("asm statements not allowed in interpreter");
+ return;
+ }
+
+ app_enable ();
+
+ if (TREE_CODE (string) == ADDR_EXPR)
+ string = TREE_OPERAND (string, 0);
+
+ fprintf (asm_out_file, "\t%s\n", TREE_STRING_POINTER (string));
+}
+
+#if 0 /* This should no longer be needed, because
+ flag_gnu_linker should be 0 on these systems,
+ which should prevent any output
+ if ASM_OUTPUT_CONSTRUCTOR and ASM_OUTPUT_DESTRUCTOR are absent. */
+#if !(defined(DBX_DEBUGGING_INFO) && !defined(FASCIST_ASSEMBLER))
+#ifndef ASM_OUTPUT_CONSTRUCTOR
+#define ASM_OUTPUT_CONSTRUCTOR(file, name)
+#endif
+#ifndef ASM_OUTPUT_DESTRUCTOR
+#define ASM_OUTPUT_DESTRUCTOR(file, name)
+#endif
+#endif
+#endif /* 0 */
+
+/* Record an element in the table of global destructors.
+ How this is done depends on what sort of assembler and linker
+ are in use.
+
+ NAME should be the name of a global function to be called
+ at exit time. This name is output using assemble_name. */
+
+void
+assemble_destructor (name)
+ char *name;
+{
+#ifdef ASM_OUTPUT_DESTRUCTOR
+ ASM_OUTPUT_DESTRUCTOR (asm_out_file, name);
+#else
+ if (flag_gnu_linker)
+ {
+ /* Now tell GNU LD that this is part of the static destructor set. */
+ /* This code works for any machine provided you use GNU as/ld. */
+ fprintf (asm_out_file, "%s \"___DTOR_LIST__\",22,0,0,", ASM_STABS_OP);
+ assemble_name (asm_out_file, name);
+ fputc ('\n', asm_out_file);
+ }
+#endif
+}
+
+/* Likewise for global constructors. */
+
+void
+assemble_constructor (name)
+ char *name;
+{
+#ifdef ASM_OUTPUT_CONSTRUCTOR
+ ASM_OUTPUT_CONSTRUCTOR (asm_out_file, name);
+#else
+ if (flag_gnu_linker)
+ {
+ /* Now tell GNU LD that this is part of the static constructor set. */
+ /* This code works for any machine provided you use GNU as/ld. */
+ fprintf (asm_out_file, "%s \"___CTOR_LIST__\",22,0,0,", ASM_STABS_OP);
+ assemble_name (asm_out_file, name);
+ fputc ('\n', asm_out_file);
+ }
+#endif
+}
+
+/* Likewise for entries we want to record for garbage collection.
+ Garbage collection is still under development. */
+
+void
+assemble_gc_entry (name)
+ char *name;
+{
+#ifdef ASM_OUTPUT_GC_ENTRY
+ ASM_OUTPUT_GC_ENTRY (asm_out_file, name);
+#else
+ if (flag_gnu_linker)
+ {
+ /* Now tell GNU LD that this is part of the static constructor set. */
+ fprintf (asm_out_file, "%s \"___PTR_LIST__\",22,0,0,", ASM_STABS_OP);
+ assemble_name (asm_out_file, name);
+ fputc ('\n', asm_out_file);
+ }
+#endif
+}
+
+/* Output assembler code for the constant pool of a function and associated
+ with defining the name of the function. DECL describes the function.
+ NAME is the function's name. For the constant pool, we use the current
+ constant pool data. */
+
+void
+assemble_start_function (decl, fnname)
+ tree decl;
+ char *fnname;
+{
+ int align;
+
+ /* The following code does not need preprocessing in the assembler. */
+
+ app_disable ();
+
+ output_constant_pool (fnname, decl);
+
+ if (IN_NAMED_SECTION (decl))
+ named_section (TREE_STRING_POINTER (DECL_SECTION_NAME (decl)));
+ else
+ text_section ();
+
+ /* Tell assembler to move to target machine's alignment for functions. */
+ align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
+ if (align > 0)
+ {
+ if (output_bytecode)
+ BC_OUTPUT_ALIGN (asm_out_file, align);
+ else
+ ASM_OUTPUT_ALIGN (asm_out_file, align);
+ }
+
+#ifdef ASM_OUTPUT_FUNCTION_PREFIX
+ ASM_OUTPUT_FUNCTION_PREFIX (asm_out_file, fnname);
+#endif
+
+#ifdef SDB_DEBUGGING_INFO
+ /* Output SDB definition of the function. */
+ if (write_symbols == SDB_DEBUG)
+ sdbout_mark_begin_function ();
+#endif
+
+#ifdef DBX_DEBUGGING_INFO
+ /* Output DBX definition of the function. */
+ if (write_symbols == DBX_DEBUG)
+ dbxout_begin_function (decl);
+#endif
+
+ /* Make function name accessible from other files, if appropriate. */
+
+ if (TREE_PUBLIC (decl))
+ {
+ if (!first_global_object_name)
+ STRIP_NAME_ENCODING (first_global_object_name, fnname);
+ if (output_bytecode)
+ BC_GLOBALIZE_LABEL (asm_out_file, fnname);
+ else
+ ASM_GLOBALIZE_LABEL (asm_out_file, fnname);
+ }
+
+ /* Do any machine/system dependent processing of the function name */
+#ifdef ASM_DECLARE_FUNCTION_NAME
+ ASM_DECLARE_FUNCTION_NAME (asm_out_file, fnname, current_function_decl);
+#else
+ /* Standard thing is just output label for the function. */
+ if (output_bytecode)
+ BC_OUTPUT_LABEL (asm_out_file, fnname);
+ else
+ ASM_OUTPUT_LABEL (asm_out_file, fnname);
+#endif /* ASM_DECLARE_FUNCTION_NAME */
+}
+
+/* Output assembler code associated with defining the size of the
+ function. DECL describes the function. NAME is the function's name. */
+
+void
+assemble_end_function (decl, fnname)
+ tree decl;
+ char *fnname;
+{
+#ifdef ASM_DECLARE_FUNCTION_SIZE
+ ASM_DECLARE_FUNCTION_SIZE (asm_out_file, fnname, decl);
+#endif
+}
+
+/* Assemble code to leave SIZE bytes of zeros. */
+
+void
+assemble_zeros (size)
+ int size;
+{
+ if (output_bytecode)
+ {
+ bc_emit_const_skip (size);
+ return;
+ }
+
+#ifdef ASM_NO_SKIP_IN_TEXT
+ /* The `space' pseudo in the text section outputs nop insns rather than 0s,
+ so we must output 0s explicitly in the text section. */
+ if (ASM_NO_SKIP_IN_TEXT && in_text_section ())
+ {
+ int i;
+
+ for (i = 0; i < size - 20; i += 20)
+ {
+#ifdef ASM_BYTE_OP
+ fprintf (asm_out_file,
+ "%s 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n", ASM_BYTE_OP);
+#else
+ fprintf (asm_out_file,
+ "\tbyte 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n");
+#endif
+ }
+ if (i < size)
+ {
+#ifdef ASM_BYTE_OP
+ fprintf (asm_out_file, "%s 0", ASM_BYTE_OP);
+#else
+ fprintf (asm_out_file, "\tbyte 0");
+#endif
+ i++;
+ for (; i < size; i++)
+ fprintf (asm_out_file, ",0");
+ fprintf (asm_out_file, "\n");
+ }
+ }
+ else
+#endif
+ if (size > 0)
+ {
+ if (output_bytecode)
+ BC_OUTPUT_SKIP (asm_out_file, size);
+ else
+ ASM_OUTPUT_SKIP (asm_out_file, size);
+ }
+}
+
+/* Assemble an alignment pseudo op for an ALIGN-bit boundary. */
+
+void
+assemble_align (align)
+ int align;
+{
+ if (align > BITS_PER_UNIT)
+ ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (align / BITS_PER_UNIT));
+}
+
+/* Assemble a string constant with the specified C string as contents. */
+
+void
+assemble_string (p, size)
+ char *p;
+ int size;
+{
+ register int i;
+ int pos = 0;
+ int maximum = 2000;
+
+ if (output_bytecode)
+ {
+ bc_emit (p, size);
+ return;
+ }
+
+ /* If the string is very long, split it up. */
+
+ while (pos < size)
+ {
+ int thissize = size - pos;
+ if (thissize > maximum)
+ thissize = maximum;
+
+ if (output_bytecode)
+ bc_output_ascii (asm_out_file, p, thissize);
+ else
+ {
+ ASM_OUTPUT_ASCII (asm_out_file, p, thissize);
+ }
+
+ pos += thissize;
+ p += thissize;
+ }
+}
+
+static void
+bc_output_ascii (file, p, size)
+ FILE *file;
+ char *p;
+ int size;
+{
+ BC_OUTPUT_ASCII (file, p, size);
+}
+
+/* Assemble everything that is needed for a variable or function declaration.
+ Not used for automatic variables, and not used for function definitions.
+ Should not be called for variables of incomplete structure type.
+
+ TOP_LEVEL is nonzero if this variable has file scope.
+ AT_END is nonzero if this is the special handling, at end of compilation,
+ to define things that have had only tentative definitions.
+ DONT_OUTPUT_DATA if nonzero means don't actually output the
+ initial value (that will be done by the caller). */
+
+void
+assemble_variable (decl, top_level, at_end, dont_output_data)
+ tree decl;
+ int top_level;
+ int at_end;
+{
+ register char *name;
+ int align;
+ tree size_tree;
+ int reloc = 0;
+ enum in_section saved_in_section;
+
+ last_assemble_variable_decl = 0;
+
+ if (output_bytecode)
+ return;
+
+ if (GET_CODE (DECL_RTL (decl)) == REG)
+ {
+ /* Do output symbol info for global register variables, but do nothing
+ else for them. */
+
+ if (TREE_ASM_WRITTEN (decl))
+ return;
+ TREE_ASM_WRITTEN (decl) = 1;
+
+ if (!output_bytecode)
+ {
+#if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
+ /* File-scope global variables are output here. */
+ if ((write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
+ && top_level)
+ dbxout_symbol (decl, 0);
+#endif
+#ifdef SDB_DEBUGGING_INFO
+ if (write_symbols == SDB_DEBUG && top_level
+ /* Leave initialized global vars for end of compilation;
+ see comment in compile_file. */
+ && (TREE_PUBLIC (decl) == 0 || DECL_INITIAL (decl) == 0))
+ sdbout_symbol (decl, 0);
+#endif
+ }
+
+ /* Don't output any DWARF debugging information for variables here.
+ In the case of local variables, the information for them is output
+ when we do our recursive traversal of the tree representation for
+ the entire containing function. In the case of file-scope variables,
+ we output information for all of them at the very end of compilation
+ while we are doing our final traversal of the chain of file-scope
+ declarations. */
+
+ return;
+ }
+
+ /* Normally no need to say anything here for external references,
+ since assemble_external is called by the langauge-specific code
+ when a declaration is first seen. */
+
+ if (DECL_EXTERNAL (decl))
+ return;
+
+ /* Output no assembler code for a function declaration.
+ Only definitions of functions output anything. */
+
+ if (TREE_CODE (decl) == FUNCTION_DECL)
+ return;
+
+ /* If type was incomplete when the variable was declared,
+ see if it is complete now. */
+
+ if (DECL_SIZE (decl) == 0)
+ layout_decl (decl, 0);
+
+ /* Still incomplete => don't allocate it; treat the tentative defn
+ (which is what it must have been) as an `extern' reference. */
+
+ if (!dont_output_data && DECL_SIZE (decl) == 0)
+ {
+ error_with_file_and_line (DECL_SOURCE_FILE (decl),
+ DECL_SOURCE_LINE (decl),
+ "storage size of `%s' isn't known",
+ IDENTIFIER_POINTER (DECL_NAME (decl)));
+ TREE_ASM_WRITTEN (decl) = 1;
+ return;
+ }
+
+ /* The first declaration of a variable that comes through this function
+ decides whether it is global (in C, has external linkage)
+ or local (in C, has internal linkage). So do nothing more
+ if this function has already run. */
+
+ if (TREE_ASM_WRITTEN (decl))
+ return;
+
+ TREE_ASM_WRITTEN (decl) = 1;
+
+ /* If storage size is erroneously variable, just continue.
+ Error message was already made. */
+
+ if (DECL_SIZE (decl))
+ {
+ if (TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
+ goto finish;
+
+ app_disable ();
+
+ /* This is better than explicit arithmetic, since it avoids overflow. */
+ size_tree = size_binop (CEIL_DIV_EXPR,
+ DECL_SIZE (decl), size_int (BITS_PER_UNIT));
+
+ if (TREE_INT_CST_HIGH (size_tree) != 0)
+ {
+ error_with_decl (decl, "size of variable `%s' is too large");
+ goto finish;
+ }
+ }
+
+ name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
+
+ /* Handle uninitialized definitions. */
+
+ /* ANSI specifies that a tentative definition which is not merged with
+ a non-tentative definition behaves exactly like a definition with an
+ initializer equal to zero. (Section 3.7.2)
+ -fno-common gives strict ANSI behavior. Usually you don't want it.
+ This matters only for variables with external linkage. */
+ if ((! flag_no_common || ! TREE_PUBLIC (decl))
+ && DECL_COMMON (decl)
+ && ! dont_output_data
+ && (DECL_INITIAL (decl) == 0 || DECL_INITIAL (decl) == error_mark_node))
+ {
+ int size = TREE_INT_CST_LOW (size_tree);
+ int rounded = size;
+
+ if (TREE_INT_CST_HIGH (size_tree) != 0)
+ error_with_decl (decl, "size of variable `%s' is too large");
+ /* Don't allocate zero bytes of common,
+ since that means "undefined external" in the linker. */
+ if (size == 0) rounded = 1;
+ /* Round size up to multiple of BIGGEST_ALIGNMENT bits
+ so that each uninitialized object starts on such a boundary. */
+ rounded += (BIGGEST_ALIGNMENT / BITS_PER_UNIT) - 1;
+ rounded = (rounded / (BIGGEST_ALIGNMENT / BITS_PER_UNIT)
+ * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
+
+#ifdef DBX_DEBUGGING_INFO
+ /* File-scope global variables are output here. */
+ if (write_symbols == DBX_DEBUG && top_level)
+ dbxout_symbol (decl, 0);
+#endif
+#ifdef SDB_DEBUGGING_INFO
+ if (write_symbols == SDB_DEBUG && top_level
+ /* Leave initialized global vars for end of compilation;
+ see comment in compile_file. */
+ && (TREE_PUBLIC (decl) == 0 || DECL_INITIAL (decl) == 0))
+ sdbout_symbol (decl, 0);
+#endif
+
+ /* Don't output any DWARF debugging information for variables here.
+ In the case of local variables, the information for them is output
+ when we do our recursive traversal of the tree representation for
+ the entire containing function. In the case of file-scope variables,
+ we output information for all of them at the very end of compilation
+ while we are doing our final traversal of the chain of file-scope
+ declarations. */
+
+#if 0
+ if (flag_shared_data)
+ data_section ();
+#endif
+ if (TREE_PUBLIC (decl))
+ {
+#ifdef ASM_OUTPUT_SHARED_COMMON
+ if (flag_shared_data)
+ ASM_OUTPUT_SHARED_COMMON (asm_out_file, name, size, rounded);
+ else
+#endif
+ if (output_bytecode)
+ {
+ BC_OUTPUT_COMMON (asm_out_file, name, size, rounded);
+ }
+ else
+ {
+#ifdef ASM_OUTPUT_ALIGNED_COMMON
+ ASM_OUTPUT_ALIGNED_COMMON (asm_out_file, name, size,
+ DECL_ALIGN (decl));
+#else
+ ASM_OUTPUT_COMMON (asm_out_file, name, size, rounded);
+#endif
+ }
+ }
+ else
+ {
+#ifdef ASM_OUTPUT_SHARED_LOCAL
+ if (flag_shared_data)
+ ASM_OUTPUT_SHARED_LOCAL (asm_out_file, name, size, rounded);
+ else
+#endif
+ if (output_bytecode)
+ {
+ BC_OUTPUT_LOCAL (asm_out_file, name, size, rounded);
+ }
+ else
+ {
+#ifdef ASM_OUTPUT_ALIGNED_LOCAL
+ ASM_OUTPUT_ALIGNED_LOCAL (asm_out_file, name, size,
+ DECL_ALIGN (decl));
+#else
+ ASM_OUTPUT_LOCAL (asm_out_file, name, size, rounded);
+#endif
+ }
+ }
+ goto finish;
+ }
+
+ /* Handle initialized definitions. */
+
+ /* First make the assembler name(s) global if appropriate. */
+ if (TREE_PUBLIC (decl) && DECL_NAME (decl))
+ {
+ if (!first_global_object_name)
+ STRIP_NAME_ENCODING(first_global_object_name, name);
+ ASM_GLOBALIZE_LABEL (asm_out_file, name);
+ }
+#if 0
+ for (d = equivalents; d; d = TREE_CHAIN (d))
+ {
+ tree e = TREE_VALUE (d);
+ if (TREE_PUBLIC (e) && DECL_NAME (e))
+ ASM_GLOBALIZE_LABEL (asm_out_file,
+ XSTR (XEXP (DECL_RTL (e), 0), 0));
+ }
+#endif
+
+ /* Output any data that we will need to use the address of. */
+ if (DECL_INITIAL (decl) == error_mark_node)
+ reloc = contains_pointers_p (TREE_TYPE (decl));
+ else if (DECL_INITIAL (decl))
+ reloc = output_addressed_constants (DECL_INITIAL (decl));
+
+ /* Switch to the proper section for this data. */
+ if (IN_NAMED_SECTION (decl))
+ named_section (TREE_STRING_POINTER (DECL_SECTION_NAME (decl)));
+ else
+ {
+ /* C++ can have const variables that get initialized from constructors,
+ and thus can not be in a readonly section. We prevent this by
+ verifying that the initial value is constant for objects put in a
+ readonly section.
+
+ error_mark_node is used by the C front end to indicate that the
+ initializer has not been seen yet. In this case, we assume that
+ the initializer must be constant. */
+#ifdef SELECT_SECTION
+ SELECT_SECTION (decl, reloc);
+#else
+ if (TREE_READONLY (decl)
+ && ! TREE_THIS_VOLATILE (decl)
+ && DECL_INITIAL (decl)
+ && (DECL_INITIAL (decl) == error_mark_node
+ || TREE_CONSTANT (DECL_INITIAL (decl)))
+ && ! (flag_pic && reloc))
+ readonly_data_section ();
+ else
+ data_section ();
+#endif
+ }
+
+ /* dbxout.c needs to know this. */
+ if (in_text_section ())
+ DECL_IN_TEXT_SECTION (decl) = 1;
+
+ /* Record current section so we can restore it if dbxout.c clobbers it. */
+ saved_in_section = in_section;
+
+ /* Output the dbx info now that we have chosen the section. */
+
+#ifdef DBX_DEBUGGING_INFO
+ /* File-scope global variables are output here. */
+ if (write_symbols == DBX_DEBUG && top_level)
+ dbxout_symbol (decl, 0);
+#endif
+#ifdef SDB_DEBUGGING_INFO
+ if (write_symbols == SDB_DEBUG && top_level
+ /* Leave initialized global vars for end of compilation;
+ see comment in compile_file. */
+ && (TREE_PUBLIC (decl) == 0 || DECL_INITIAL (decl) == 0))
+ sdbout_symbol (decl, 0);
+#endif
+
+ /* Don't output any DWARF debugging information for variables here.
+ In the case of local variables, the information for them is output
+ when we do our recursive traversal of the tree representation for
+ the entire containing function. In the case of file-scope variables,
+ we output information for all of them at the very end of compilation
+ while we are doing our final traversal of the chain of file-scope
+ declarations. */
+
+ /* If the debugging output changed sections, reselect the section
+ that's supposed to be selected. */
+ if (in_section != saved_in_section)
+ {
+ /* Switch to the proper section for this data. */
+#ifdef SELECT_SECTION
+ SELECT_SECTION (decl, reloc);
+#else
+ if (TREE_READONLY (decl)
+ && ! TREE_THIS_VOLATILE (decl)
+ && DECL_INITIAL (decl)
+ && (DECL_INITIAL (decl) == error_mark_node
+ || TREE_CONSTANT (DECL_INITIAL (decl)))
+ && ! (flag_pic && reloc))
+ readonly_data_section ();
+ else
+ data_section ();
+#endif
+ }
+
+ /* Compute and output the alignment of this data. */
+
+ align = DECL_ALIGN (decl);
+ /* In the case for initialing an array whose length isn't specified,
+ where we have not yet been able to do the layout,
+ figure out the proper alignment now. */
+ if (dont_output_data && DECL_SIZE (decl) == 0
+ && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
+ align = MAX (align, TYPE_ALIGN (TREE_TYPE (TREE_TYPE (decl))));
+
+ /* Some object file formats have a maximum alignment which they support.
+ In particular, a.out format supports a maximum alignment of 4. */
+#ifndef MAX_OFILE_ALIGNMENT
+#define MAX_OFILE_ALIGNMENT BIGGEST_ALIGNMENT
+#endif
+ if (align > MAX_OFILE_ALIGNMENT)
+ {
+ warning_with_decl (decl,
+ "alignment of `%s' is greater than maximum object file alignment");
+ align = MAX_OFILE_ALIGNMENT;
+ }
+#ifdef DATA_ALIGNMENT
+ /* On some machines, it is good to increase alignment sometimes. */
+ align = DATA_ALIGNMENT (TREE_TYPE (decl), align);
+#endif
+#ifdef CONSTANT_ALIGNMENT
+ if (DECL_INITIAL (decl))
+ align = CONSTANT_ALIGNMENT (DECL_INITIAL (decl), align);
+#endif
+
+ /* Reset the alignment in case we have made it tighter, so we can benefit
+ from it in get_pointer_alignment. */
+ DECL_ALIGN (decl) = align;
+
+ if (align > BITS_PER_UNIT)
+ {
+ if (output_bytecode)
+ BC_OUTPUT_ALIGN (asm_out_file, floor_log2 (align / BITS_PER_UNIT));
+ else
+ ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (align / BITS_PER_UNIT));
+ }
+
+ /* Do any machine/system dependent processing of the object. */
+#ifdef ASM_DECLARE_OBJECT_NAME
+ last_assemble_variable_decl = decl;
+ ASM_DECLARE_OBJECT_NAME (asm_out_file, name, decl);
+#else
+ /* Standard thing is just output label for the object. */
+ if (output_bytecode)
+ BC_OUTPUT_LABEL (asm_out_file, name);
+ else
+ ASM_OUTPUT_LABEL (asm_out_file, name);
+#endif /* ASM_DECLARE_OBJECT_NAME */
+
+ if (!dont_output_data)
+ {
+ if (DECL_INITIAL (decl))
+ /* Output the actual data. */
+ output_constant (DECL_INITIAL (decl),
+ int_size_in_bytes (TREE_TYPE (decl)));
+ else
+ /* Leave space for it. */
+ assemble_zeros (int_size_in_bytes (TREE_TYPE (decl)));
+ }
+
+ finish:
+#ifdef XCOFF_DEBUGGING_INFO
+ /* Unfortunately, the IBM assembler cannot handle stabx before the actual
+ declaration. When something like ".stabx "aa:S-2",aa,133,0" is emitted
+ and `aa' hasn't been output yet, the assembler generates a stab entry with
+ a value of zero, in addition to creating an unnecessary external entry
+ for `aa'. Hence, we must postpone dbxout_symbol to here at the end. */
+
+ /* File-scope global variables are output here. */
+ if (write_symbols == XCOFF_DEBUG && top_level)
+ {
+ saved_in_section = in_section;
+
+ dbxout_symbol (decl, 0);
+
+ if (in_section != saved_in_section)
+ {
+ /* Switch to the proper section for this data. */
+#ifdef SELECT_SECTION
+ SELECT_SECTION (decl, reloc);
+#else
+ if (TREE_READONLY (decl)
+ && ! TREE_THIS_VOLATILE (decl)
+ && DECL_INITIAL (decl)
+ && (DECL_INITIAL (decl) == error_mark_node
+ || TREE_CONSTANT (DECL_INITIAL (decl)))
+ && ! (flag_pic && reloc))
+ readonly_data_section ();
+ else
+ data_section ();
+#endif
+ }
+ }
+#else
+ /* There must be a statement after a label. */
+ ;
+#endif
+}
+
+/* Return 1 if type TYPE contains any pointers. */
+
+static int
+contains_pointers_p (type)
+ tree type;
+{
+ switch (TREE_CODE (type))
+ {
+ case POINTER_TYPE:
+ case REFERENCE_TYPE:
+ /* I'm not sure whether OFFSET_TYPE needs this treatment,
+ so I'll play safe and return 1. */
+ case OFFSET_TYPE:
+ return 1;
+
+ case RECORD_TYPE:
+ case UNION_TYPE:
+ case QUAL_UNION_TYPE:
+ {
+ tree fields;
+ /* For a type that has fields, see if the fields have pointers. */
+ for (fields = TYPE_FIELDS (type); fields; fields = TREE_CHAIN (fields))
+ if (TREE_CODE (fields) == FIELD_DECL
+ && contains_pointers_p (TREE_TYPE (fields)))
+ return 1;
+ return 0;
+ }
+
+ case ARRAY_TYPE:
+ /* An array type contains pointers if its element type does. */
+ return contains_pointers_p (TREE_TYPE (type));
+
+ default:
+ return 0;
+ }
+}
+
+/* Output text storage for constructor CONSTR. Returns rtx of
+ storage. */
+
+rtx
+bc_output_constructor (constr)
+ tree constr;
+{
+ int i;
+
+ /* Must always be a literal; non-literal constructors are handled
+ differently. */
+
+ if (!TREE_CONSTANT (constr))
+ abort ();
+
+ /* Always const */
+ text_section ();
+
+ /* Align */
+ for (i = 0; TYPE_ALIGN (constr) >= BITS_PER_UNIT << (i + 1); i++);
+ if (i > 0)
+ BC_OUTPUT_ALIGN (asm_out_file, i);
+
+ /* Output data */
+ output_constant (constr, int_size_in_bytes (TREE_TYPE (constr)));
+}
+
+
+/* Create storage for constructor CONSTR. */
+
+void
+bc_output_data_constructor (constr)
+ tree constr;
+{
+ int i;
+
+ /* Put in data section */
+ data_section ();
+
+ /* Align */
+ for (i = 0; TYPE_ALIGN (constr) >= BITS_PER_UNIT << (i + 1); i++);
+ if (i > 0)
+ BC_OUTPUT_ALIGN (asm_out_file, i);
+
+ /* The constructor is filled in at runtime. */
+ BC_OUTPUT_SKIP (asm_out_file, int_size_in_bytes (TREE_TYPE (constr)));
+}
+
+
+/* Output something to declare an external symbol to the assembler.
+ (Most assemblers don't need this, so we normally output nothing.)
+ Do nothing if DECL is not external. */
+
+void
+assemble_external (decl)
+ tree decl;
+{
+ if (output_bytecode)
+ return;
+
+#ifdef ASM_OUTPUT_EXTERNAL
+ if (TREE_CODE_CLASS (TREE_CODE (decl)) == 'd'
+ && DECL_EXTERNAL (decl) && TREE_PUBLIC (decl))
+ {
+ rtx rtl = DECL_RTL (decl);
+
+ if (GET_CODE (rtl) == MEM && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF
+ && ! SYMBOL_REF_USED (XEXP (rtl, 0)))
+ {
+ /* Some systems do require some output. */
+ SYMBOL_REF_USED (XEXP (rtl, 0)) = 1;
+ ASM_OUTPUT_EXTERNAL (asm_out_file, decl, XSTR (XEXP (rtl, 0), 0));
+ }
+ }
+#endif
+}
+
+/* Similar, for calling a library function FUN. */
+
+void
+assemble_external_libcall (fun)
+ rtx fun;
+{
+#ifdef ASM_OUTPUT_EXTERNAL_LIBCALL
+ if (!output_bytecode)
+ {
+ /* Declare library function name external when first used, if nec. */
+ if (! SYMBOL_REF_USED (fun))
+ {
+ SYMBOL_REF_USED (fun) = 1;
+ ASM_OUTPUT_EXTERNAL_LIBCALL (asm_out_file, fun);
+ }
+ }
+#endif
+}
+
+/* Declare the label NAME global. */
+
+void
+assemble_global (name)
+ char *name;
+{
+ ASM_GLOBALIZE_LABEL (asm_out_file, name);
+}
+
+/* Assemble a label named NAME. */
+
+void
+assemble_label (name)
+ char *name;
+{
+ if (output_bytecode)
+ BC_OUTPUT_LABEL (asm_out_file, name);
+ else
+ ASM_OUTPUT_LABEL (asm_out_file, name);
+}
+
+/* Output to FILE a reference to the assembler name of a C-level name NAME.
+ If NAME starts with a *, the rest of NAME is output verbatim.
+ Otherwise NAME is transformed in an implementation-defined way
+ (usually by the addition of an underscore).
+ Many macros in the tm file are defined to call this function. */
+
+void
+assemble_name (file, name)
+ FILE *file;
+ char *name;
+{
+ char *real_name;
+
+ STRIP_NAME_ENCODING (real_name, name);
+ TREE_SYMBOL_REFERENCED (get_identifier (real_name)) = 1;
+
+ if (name[0] == '*')
+ {
+ if (output_bytecode)
+ bc_emit_labelref (name);
+ else
+ fputs (&name[1], file);
+ }
+ else
+ {
+ if (output_bytecode)
+ BC_OUTPUT_LABELREF (file, name);
+ else
+ ASM_OUTPUT_LABELREF (file, name);
+ }
+}
+
+/* Allocate SIZE bytes writable static space with a gensym name
+ and return an RTX to refer to its address. */
+
+rtx
+assemble_static_space (size)
+ int size;
+{
+ char name[12];
+ char *namestring;
+ rtx x;
+ /* Round size up to multiple of BIGGEST_ALIGNMENT bits
+ so that each uninitialized object starts on such a boundary. */
+ int rounded = ((size + (BIGGEST_ALIGNMENT / BITS_PER_UNIT) - 1)
+ / (BIGGEST_ALIGNMENT / BITS_PER_UNIT)
+ * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
+
+#if 0
+ if (flag_shared_data)
+ data_section ();
+#endif
+
+ ASM_GENERATE_INTERNAL_LABEL (name, "LF", const_labelno);
+ ++const_labelno;
+
+ namestring = (char *) obstack_alloc (saveable_obstack,
+ strlen (name) + 2);
+ strcpy (namestring, name);
+
+ if (output_bytecode)
+ x = bc_gen_rtx (namestring, 0, (struct bc_label *) 0);
+ else
+ x = gen_rtx (SYMBOL_REF, Pmode, namestring);
+
+ if (output_bytecode)
+ {
+ BC_OUTPUT_LOCAL (asm_out_file, name, size, rounded);
+ }
+ else
+ {
+#ifdef ASM_OUTPUT_ALIGNED_LOCAL
+ ASM_OUTPUT_ALIGNED_LOCAL (asm_out_file, name, size, BIGGEST_ALIGNMENT);
+#else
+ ASM_OUTPUT_LOCAL (asm_out_file, name, size, rounded);
+#endif
+ }
+ return x;
+}
+
+/* Assemble the static constant template for function entry trampolines.
+ This is done at most once per compilation.
+ Returns an RTX for the address of the template. */
+
+rtx
+assemble_trampoline_template ()
+{
+ char label[256];
+ char *name;
+ int align;
+
+ /* Shouldn't get here */
+ if (output_bytecode)
+ abort ();
+
+ /* By default, put trampoline templates in read-only data section. */
+
+#ifdef TRAMPOLINE_SECTION
+ TRAMPOLINE_SECTION ();
+#else
+ readonly_data_section ();
+#endif
+
+ /* Write the assembler code to define one. */
+ align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
+ if (align > 0)
+ ASM_OUTPUT_ALIGN (asm_out_file, align);
+
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, "LTRAMP", 0);
+ TRAMPOLINE_TEMPLATE (asm_out_file);
+
+ /* Record the rtl to refer to it. */
+ ASM_GENERATE_INTERNAL_LABEL (label, "LTRAMP", 0);
+ name
+ = (char *) obstack_copy0 (&permanent_obstack, label, strlen (label));
+ return gen_rtx (SYMBOL_REF, Pmode, name);
+}
+
+/* Assemble the integer constant X into an object of SIZE bytes.
+ X must be either a CONST_INT or CONST_DOUBLE.
+
+ Return 1 if we were able to output the constant, otherwise 0. If FORCE is
+ non-zero, abort if we can't output the constant. */
+
+int
+assemble_integer (x, size, force)
+ rtx x;
+ int size;
+ int force;
+{
+ /* First try to use the standard 1, 2, 4, 8, and 16 byte
+ ASM_OUTPUT... macros. */
+
+ switch (size)
+ {
+#ifdef ASM_OUTPUT_CHAR
+ case 1:
+ ASM_OUTPUT_CHAR (asm_out_file, x);
+ return 1;
+#endif
+
+#ifdef ASM_OUTPUT_SHORT
+ case 2:
+ ASM_OUTPUT_SHORT (asm_out_file, x);
+ return 1;
+#endif
+
+#ifdef ASM_OUTPUT_INT
+ case 4:
+ ASM_OUTPUT_INT (asm_out_file, x);
+ return 1;
+#endif
+
+#ifdef ASM_OUTPUT_DOUBLE_INT
+ case 8:
+ ASM_OUTPUT_DOUBLE_INT (asm_out_file, x);
+ return 1;
+#endif
+
+#ifdef ASM_OUTPUT_QUADRUPLE_INT
+ case 16:
+ ASM_OUTPUT_QUADRUPLE_INT (asm_out_file, x);
+ return 1;
+#endif
+ }
+
+ /* If we couldn't do it that way, there are two other possibilities: First,
+ if the machine can output an explicit byte and this is a 1 byte constant,
+ we can use ASM_OUTPUT_BYTE. */
+
+#ifdef ASM_OUTPUT_BYTE
+ if (size == 1 && GET_CODE (x) == CONST_INT)
+ {
+ ASM_OUTPUT_BYTE (asm_out_file, INTVAL (x));
+ return 1;
+ }
+#endif
+
+ /* Finally, if SIZE is larger than a single word, try to output the constant
+ one word at a time. */
+
+ if (size > UNITS_PER_WORD)
+ {
+ int i;
+ enum machine_mode mode
+ = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
+ rtx word;
+
+ for (i = 0; i < size / UNITS_PER_WORD; i++)
+ {
+ word = operand_subword (x, i, 0, mode);
+
+ if (word == 0)
+ break;
+
+ if (! assemble_integer (word, UNITS_PER_WORD, 0))
+ break;
+ }
+
+ if (i == size / UNITS_PER_WORD)
+ return 1;
+ /* If we output at least one word and then could not finish,
+ there is no valid way to continue. */
+ if (i > 0)
+ abort ();
+ }
+
+ if (force)
+ abort ();
+
+ return 0;
+}
+
+/* Assemble the floating-point constant D into an object of size MODE. */
+
+void
+assemble_real (d, mode)
+ REAL_VALUE_TYPE d;
+ enum machine_mode mode;
+{
+ jmp_buf output_constant_handler;
+
+ if (setjmp (output_constant_handler))
+ {
+ error ("floating point trap outputting a constant");
+#ifdef REAL_IS_NOT_DOUBLE
+ bzero ((char *) &d, sizeof d);
+ d = dconst0;
+#else
+ d = 0;
+#endif
+ }
+
+ set_float_handler (output_constant_handler);
+
+ switch (mode)
+ {
+#ifdef ASM_OUTPUT_BYTE_FLOAT
+ case QFmode:
+ ASM_OUTPUT_BYTE_FLOAT (asm_out_file, d);
+ break;
+#endif
+#ifdef ASM_OUTPUT_SHORT_FLOAT
+ case HFmode:
+ ASM_OUTPUT_SHORT_FLOAT (asm_out_file, d);
+ break;
+#endif
+#ifdef ASM_OUTPUT_THREE_QUARTER_FLOAT
+ case TQFmode:
+ ASM_OUTPUT_THREE_QUARTER_FLOAT (asm_out_file, d);
+ break;
+#endif
+#ifdef ASM_OUTPUT_FLOAT
+ case SFmode:
+ ASM_OUTPUT_FLOAT (asm_out_file, d);
+ break;
+#endif
+
+#ifdef ASM_OUTPUT_DOUBLE
+ case DFmode:
+ ASM_OUTPUT_DOUBLE (asm_out_file, d);
+ break;
+#endif
+
+#ifdef ASM_OUTPUT_LONG_DOUBLE
+ case XFmode:
+ case TFmode:
+ ASM_OUTPUT_LONG_DOUBLE (asm_out_file, d);
+ break;
+#endif
+
+ default:
+ abort ();
+ }
+
+ set_float_handler (NULL_PTR);
+}
+
+/* Here we combine duplicate floating constants to make
+ CONST_DOUBLE rtx's, and force those out to memory when necessary. */
+
+/* Chain of all CONST_DOUBLE rtx's constructed for the current function.
+ They are chained through the CONST_DOUBLE_CHAIN.
+ A CONST_DOUBLE rtx has CONST_DOUBLE_MEM != cc0_rtx iff it is on this chain.
+ In that case, CONST_DOUBLE_MEM is either a MEM,
+ or const0_rtx if no MEM has been made for this CONST_DOUBLE yet.
+
+ (CONST_DOUBLE_MEM is used only for top-level functions.
+ See force_const_mem for explanation.) */
+
+static rtx const_double_chain;
+
+/* Return a CONST_DOUBLE or CONST_INT for a value specified as a pair of ints.
+ For an integer, I0 is the low-order word and I1 is the high-order word.
+ For a real number, I0 is the word with the low address
+ and I1 is the word with the high address. */
+
+rtx
+immed_double_const (i0, i1, mode)
+ HOST_WIDE_INT i0, i1;
+ enum machine_mode mode;
+{
+ register rtx r;
+ int in_current_obstack;
+
+ if (GET_MODE_CLASS (mode) == MODE_INT
+ || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
+ {
+ /* We clear out all bits that don't belong in MODE, unless they and our
+ sign bit are all one. So we get either a reasonable negative value
+ or a reasonable unsigned value for this mode. */
+ int width = GET_MODE_BITSIZE (mode);
+ if (width < HOST_BITS_PER_WIDE_INT
+ && ((i0 & ((HOST_WIDE_INT) (-1) << (width - 1)))
+ != ((HOST_WIDE_INT) (-1) << (width - 1))))
+ i0 &= ((HOST_WIDE_INT) 1 << width) - 1, i1 = 0;
+ else if (width == HOST_BITS_PER_WIDE_INT
+ && ! (i1 == ~0 && i0 < 0))
+ i1 = 0;
+ else if (width > 2 * HOST_BITS_PER_WIDE_INT)
+ /* We cannot represent this value as a constant. */
+ abort ();
+
+ /* If this would be an entire word for the target, but is not for
+ the host, then sign-extend on the host so that the number will look
+ the same way on the host that it would on the target.
+
+ For example, when building a 64 bit alpha hosted 32 bit sparc
+ targeted compiler, then we want the 32 bit unsigned value -1 to be
+ represented as a 64 bit value -1, and not as 0x00000000ffffffff.
+ The later confuses the sparc backend. */
+
+ if (BITS_PER_WORD < HOST_BITS_PER_WIDE_INT && BITS_PER_WORD == width
+ && (i0 & ((HOST_WIDE_INT) 1 << (width - 1))))
+ i0 |= ((HOST_WIDE_INT) (-1) << width);
+
+ /* If MODE fits within HOST_BITS_PER_WIDE_INT, always use a CONST_INT.
+
+ ??? Strictly speaking, this is wrong if we create a CONST_INT
+ for a large unsigned constant with the size of MODE being
+ HOST_BITS_PER_WIDE_INT and later try to interpret that constant in a
+ wider mode. In that case we will mis-interpret it as a negative
+ number.
+
+ Unfortunately, the only alternative is to make a CONST_DOUBLE
+ for any constant in any mode if it is an unsigned constant larger
+ than the maximum signed integer in an int on the host. However,
+ doing this will break everyone that always expects to see a CONST_INT
+ for SImode and smaller.
+
+ We have always been making CONST_INTs in this case, so nothing new
+ is being broken. */
+
+ if (width <= HOST_BITS_PER_WIDE_INT)
+ i1 = (i0 < 0) ? ~0 : 0;
+
+ /* If this integer fits in one word, return a CONST_INT. */
+ if ((i1 == 0 && i0 >= 0)
+ || (i1 == ~0 && i0 < 0))
+ return GEN_INT (i0);
+
+ /* We use VOIDmode for integers. */
+ mode = VOIDmode;
+ }
+
+ /* Search the chain for an existing CONST_DOUBLE with the right value.
+ If one is found, return it. */
+
+ for (r = const_double_chain; r; r = CONST_DOUBLE_CHAIN (r))
+ if (CONST_DOUBLE_LOW (r) == i0 && CONST_DOUBLE_HIGH (r) == i1
+ && GET_MODE (r) == mode)
+ return r;
+
+ /* No; make a new one and add it to the chain.
+
+ We may be called by an optimizer which may be discarding any memory
+ allocated during its processing (such as combine and loop). However,
+ we will be leaving this constant on the chain, so we cannot tolerate
+ freed memory. So switch to saveable_obstack for this allocation
+ and then switch back if we were in current_obstack. */
+
+ push_obstacks_nochange ();
+ rtl_in_saveable_obstack ();
+ r = gen_rtx (CONST_DOUBLE, mode, 0, i0, i1);
+ pop_obstacks ();
+
+ /* Don't touch const_double_chain in nested function; see force_const_mem.
+ Also, don't touch it if not inside any function. */
+ if (outer_function_chain == 0 && current_function_decl != 0)
+ {
+ CONST_DOUBLE_CHAIN (r) = const_double_chain;
+ const_double_chain = r;
+ }
+
+ /* Store const0_rtx in mem-slot since this CONST_DOUBLE is on the chain.
+ Actual use of mem-slot is only through force_const_mem. */
+
+ CONST_DOUBLE_MEM (r) = const0_rtx;
+
+ return r;
+}
+
+/* Return a CONST_DOUBLE for a specified `double' value
+ and machine mode. */
+
+rtx
+immed_real_const_1 (d, mode)
+ REAL_VALUE_TYPE d;
+ enum machine_mode mode;
+{
+ union real_extract u;
+ register rtx r;
+ int in_current_obstack;
+
+ /* Get the desired `double' value as a sequence of ints
+ since that is how they are stored in a CONST_DOUBLE. */
+
+ u.d = d;
+
+ /* Detect special cases. */
+
+ /* Avoid REAL_VALUES_EQUAL here in order to distinguish minus zero. */
+ if (!bcmp ((char *) &dconst0, (char *) &d, sizeof d))
+ return CONST0_RTX (mode);
+ /* Check for NaN first, because some ports (specifically the i386) do not
+ emit correct ieee-fp code by default, and thus will generate a core
+ dump here if we pass a NaN to REAL_VALUES_EQUAL and if REAL_VALUES_EQUAL
+ does a floating point comparison. */
+ else if (! REAL_VALUE_ISNAN (d) && REAL_VALUES_EQUAL (dconst1, d))
+ return CONST1_RTX (mode);
+
+ if (sizeof u == 2 * sizeof (HOST_WIDE_INT))
+ return immed_double_const (u.i[0], u.i[1], mode);
+
+ /* The rest of this function handles the case where
+ a float value requires more than 2 ints of space.
+ It will be deleted as dead code on machines that don't need it. */
+
+ /* Search the chain for an existing CONST_DOUBLE with the right value.
+ If one is found, return it. */
+
+ for (r = const_double_chain; r; r = CONST_DOUBLE_CHAIN (r))
+ if (! bcmp ((char *) &CONST_DOUBLE_LOW (r), (char *) &u, sizeof u)
+ && GET_MODE (r) == mode)
+ return r;
+
+ /* No; make a new one and add it to the chain.
+
+ We may be called by an optimizer which may be discarding any memory
+ allocated during its processing (such as combine and loop). However,
+ we will be leaving this constant on the chain, so we cannot tolerate
+ freed memory. So switch to saveable_obstack for this allocation
+ and then switch back if we were in current_obstack. */
+
+ push_obstacks_nochange ();
+ rtl_in_saveable_obstack ();
+ r = rtx_alloc (CONST_DOUBLE);
+ PUT_MODE (r, mode);
+ bcopy ((char *) &u, (char *) &CONST_DOUBLE_LOW (r), sizeof u);
+ pop_obstacks ();
+
+ /* Don't touch const_double_chain in nested function; see force_const_mem.
+ Also, don't touch it if not inside any function. */
+ if (outer_function_chain == 0 && current_function_decl != 0)
+ {
+ CONST_DOUBLE_CHAIN (r) = const_double_chain;
+ const_double_chain = r;
+ }
+
+ /* Store const0_rtx in CONST_DOUBLE_MEM since this CONST_DOUBLE is on the
+ chain, but has not been allocated memory. Actual use of CONST_DOUBLE_MEM
+ is only through force_const_mem. */
+
+ CONST_DOUBLE_MEM (r) = const0_rtx;
+
+ return r;
+}
+
+/* Return a CONST_DOUBLE rtx for a value specified by EXP,
+ which must be a REAL_CST tree node. */
+
+rtx
+immed_real_const (exp)
+ tree exp;
+{
+ return immed_real_const_1 (TREE_REAL_CST (exp), TYPE_MODE (TREE_TYPE (exp)));
+}
+
+/* At the end of a function, forget the memory-constants
+ previously made for CONST_DOUBLEs. Mark them as not on real_constant_chain.
+ Also clear out real_constant_chain and clear out all the chain-pointers. */
+
+void
+clear_const_double_mem ()
+{
+ register rtx r, next;
+
+ /* Don't touch CONST_DOUBLE_MEM for nested functions.
+ See force_const_mem for explanation. */
+ if (outer_function_chain != 0)
+ return;
+
+ for (r = const_double_chain; r; r = next)
+ {
+ next = CONST_DOUBLE_CHAIN (r);
+ CONST_DOUBLE_CHAIN (r) = 0;
+ CONST_DOUBLE_MEM (r) = cc0_rtx;
+ }
+ const_double_chain = 0;
+}
+
+/* Given an expression EXP with a constant value,
+ reduce it to the sum of an assembler symbol and an integer.
+ Store them both in the structure *VALUE.
+ Abort if EXP does not reduce. */
+
+struct addr_const
+{
+ rtx base;
+ HOST_WIDE_INT offset;
+};
+
+static void
+decode_addr_const (exp, value)
+ tree exp;
+ struct addr_const *value;
+{
+ register tree target = TREE_OPERAND (exp, 0);
+ register int offset = 0;
+ register rtx x;
+
+ while (1)
+ {
+ if (TREE_CODE (target) == COMPONENT_REF
+ && (TREE_CODE (DECL_FIELD_BITPOS (TREE_OPERAND (target, 1)))
+ == INTEGER_CST))
+ {
+ offset += TREE_INT_CST_LOW (DECL_FIELD_BITPOS (TREE_OPERAND (target, 1))) / BITS_PER_UNIT;
+ target = TREE_OPERAND (target, 0);
+ }
+ else if (TREE_CODE (target) == ARRAY_REF)
+ {
+ if (TREE_CODE (TREE_OPERAND (target, 1)) != INTEGER_CST
+ || TREE_CODE (TYPE_SIZE (TREE_TYPE (target))) != INTEGER_CST)
+ abort ();
+ offset += ((TREE_INT_CST_LOW (TYPE_SIZE (TREE_TYPE (target)))
+ * TREE_INT_CST_LOW (TREE_OPERAND (target, 1)))
+ / BITS_PER_UNIT);
+ target = TREE_OPERAND (target, 0);
+ }
+ else
+ break;
+ }
+
+ switch (TREE_CODE (target))
+ {
+ case VAR_DECL:
+ case FUNCTION_DECL:
+ x = DECL_RTL (target);
+ break;
+
+ case LABEL_DECL:
+ if (output_bytecode)
+ /* FIXME: this may not be correct, check it */
+ x = bc_gen_rtx (TREE_STRING_POINTER (target), 0, (struct bc_label *) 0);
+ else
+ x = gen_rtx (MEM, FUNCTION_MODE,
+ gen_rtx (LABEL_REF, VOIDmode,
+ label_rtx (TREE_OPERAND (exp, 0))));
+ break;
+
+ case REAL_CST:
+ case STRING_CST:
+ case COMPLEX_CST:
+ case CONSTRUCTOR:
+ x = TREE_CST_RTL (target);
+ break;
+
+ default:
+ abort ();
+ }
+
+ if (!output_bytecode)
+ {
+ if (GET_CODE (x) != MEM)
+ abort ();
+ x = XEXP (x, 0);
+ }
+
+ value->base = x;
+ value->offset = offset;
+}
+
+/* Uniquize all constants that appear in memory.
+ Each constant in memory thus far output is recorded
+ in `const_hash_table' with a `struct constant_descriptor'
+ that contains a polish representation of the value of
+ the constant.
+
+ We cannot store the trees in the hash table
+ because the trees may be temporary. */
+
+struct constant_descriptor
+{
+ struct constant_descriptor *next;
+ char *label;
+ char contents[1];
+};
+
+#define HASHBITS 30
+#define MAX_HASH_TABLE 1009
+static struct constant_descriptor *const_hash_table[MAX_HASH_TABLE];
+
+/* Compute a hash code for a constant expression. */
+
+int
+const_hash (exp)
+ tree exp;
+{
+ register char *p;
+ register int len, hi, i;
+ register enum tree_code code = TREE_CODE (exp);
+
+ if (code == INTEGER_CST)
+ {
+ p = (char *) &TREE_INT_CST_LOW (exp);
+ len = 2 * sizeof TREE_INT_CST_LOW (exp);
+ }
+ else if (code == REAL_CST)
+ {
+ p = (char *) &TREE_REAL_CST (exp);
+ len = sizeof TREE_REAL_CST (exp);
+ }
+ else if (code == STRING_CST)
+ p = TREE_STRING_POINTER (exp), len = TREE_STRING_LENGTH (exp);
+ else if (code == COMPLEX_CST)
+ return const_hash (TREE_REALPART (exp)) * 5
+ + const_hash (TREE_IMAGPART (exp));
+ else if (code == CONSTRUCTOR)
+ {
+ register tree link;
+
+ /* For record type, include the type in the hashing.
+ We do not do so for array types
+ because (1) the sizes of the elements are sufficient
+ and (2) distinct array types can have the same constructor.
+ Instead, we include the array size because the constructor could
+ be shorter. */
+ if (TREE_CODE (TREE_TYPE (exp)) == RECORD_TYPE)
+ hi = ((HOST_WIDE_INT) TREE_TYPE (exp) & ((1 << HASHBITS) - 1))
+ % MAX_HASH_TABLE;
+ else
+ hi = ((5 + int_size_in_bytes (TREE_TYPE (exp)))
+ & ((1 << HASHBITS) - 1)) % MAX_HASH_TABLE;
+
+ for (link = CONSTRUCTOR_ELTS (exp); link; link = TREE_CHAIN (link))
+ if (TREE_VALUE (link))
+ hi = (hi * 603 + const_hash (TREE_VALUE (link))) % MAX_HASH_TABLE;
+
+ return hi;
+ }
+ else if (code == ADDR_EXPR)
+ {
+ struct addr_const value;
+ decode_addr_const (exp, &value);
+ if (GET_CODE (value.base) == SYMBOL_REF)
+ {
+ /* Don't hash the address of the SYMBOL_REF;
+ only use the offset and the symbol name. */
+ hi = value.offset;
+ p = XSTR (value.base, 0);
+ for (i = 0; p[i] != 0; i++)
+ hi = ((hi * 613) + (unsigned)(p[i]));
+ }
+ else if (GET_CODE (value.base) == LABEL_REF)
+ hi = value.offset + CODE_LABEL_NUMBER (XEXP (value.base, 0)) * 13;
+
+ hi &= (1 << HASHBITS) - 1;
+ hi %= MAX_HASH_TABLE;
+ return hi;
+ }
+ else if (code == PLUS_EXPR || code == MINUS_EXPR)
+ return const_hash (TREE_OPERAND (exp, 0)) * 9
+ + const_hash (TREE_OPERAND (exp, 1));
+ else if (code == NOP_EXPR || code == CONVERT_EXPR)
+ return const_hash (TREE_OPERAND (exp, 0)) * 7 + 2;
+
+ /* Compute hashing function */
+ hi = len;
+ for (i = 0; i < len; i++)
+ hi = ((hi * 613) + (unsigned)(p[i]));
+
+ hi &= (1 << HASHBITS) - 1;
+ hi %= MAX_HASH_TABLE;
+ return hi;
+}
+
+/* Compare a constant expression EXP with a constant-descriptor DESC.
+ Return 1 if DESC describes a constant with the same value as EXP. */
+
+static int
+compare_constant (exp, desc)
+ tree exp;
+ struct constant_descriptor *desc;
+{
+ return 0 != compare_constant_1 (exp, desc->contents);
+}
+
+/* Compare constant expression EXP with a substring P of a constant descriptor.
+ If they match, return a pointer to the end of the substring matched.
+ If they do not match, return 0.
+
+ Since descriptors are written in polish prefix notation,
+ this function can be used recursively to test one operand of EXP
+ against a subdescriptor, and if it succeeds it returns the
+ address of the subdescriptor for the next operand. */
+
+static char *
+compare_constant_1 (exp, p)
+ tree exp;
+ char *p;
+{
+ register char *strp;
+ register int len;
+ register enum tree_code code = TREE_CODE (exp);
+
+ if (code != (enum tree_code) *p++)
+ return 0;
+
+ if (code == INTEGER_CST)
+ {
+ /* Integer constants are the same only if the same width of type. */
+ if (*p++ != TYPE_PRECISION (TREE_TYPE (exp)))
+ return 0;
+ strp = (char *) &TREE_INT_CST_LOW (exp);
+ len = 2 * sizeof TREE_INT_CST_LOW (exp);
+ }
+ else if (code == REAL_CST)
+ {
+ /* Real constants are the same only if the same width of type. */
+ if (*p++ != TYPE_PRECISION (TREE_TYPE (exp)))
+ return 0;
+ strp = (char *) &TREE_REAL_CST (exp);
+ len = sizeof TREE_REAL_CST (exp);
+ }
+ else if (code == STRING_CST)
+ {
+ if (flag_writable_strings)
+ return 0;
+ strp = TREE_STRING_POINTER (exp);
+ len = TREE_STRING_LENGTH (exp);
+ if (bcmp ((char *) &TREE_STRING_LENGTH (exp), p,
+ sizeof TREE_STRING_LENGTH (exp)))
+ return 0;
+ p += sizeof TREE_STRING_LENGTH (exp);
+ }
+ else if (code == COMPLEX_CST)
+ {
+ p = compare_constant_1 (TREE_REALPART (exp), p);
+ if (p == 0) return 0;
+ p = compare_constant_1 (TREE_IMAGPART (exp), p);
+ return p;
+ }
+ else if (code == CONSTRUCTOR)
+ {
+ register tree link;
+ int length = list_length (CONSTRUCTOR_ELTS (exp));
+ tree type;
+
+ if (bcmp ((char *) &length, p, sizeof length))
+ return 0;
+ p += sizeof length;
+
+ /* For record constructors, insist that the types match.
+ For arrays, just verify both constructors are for arrays. */
+ if (TREE_CODE (TREE_TYPE (exp)) == RECORD_TYPE)
+ type = TREE_TYPE (exp);
+ else
+ type = 0;
+ if (bcmp ((char *) &type, p, sizeof type))
+ return 0;
+ p += sizeof type;
+
+ /* For arrays, insist that the size in bytes match. */
+ if (TREE_CODE (TREE_TYPE (exp)) == ARRAY_TYPE)
+ {
+ int size = int_size_in_bytes (TREE_TYPE (exp));
+ if (bcmp ((char *) &size, p, sizeof size))
+ return 0;
+ p += sizeof size;
+ }
+
+ for (link = CONSTRUCTOR_ELTS (exp); link; link = TREE_CHAIN (link))
+ {
+ if (TREE_VALUE (link))
+ {
+ if ((p = compare_constant_1 (TREE_VALUE (link), p)) == 0)
+ return 0;
+ }
+ else
+ {
+ tree zero = 0;
+
+ if (bcmp ((char *) &zero, p, sizeof zero))
+ return 0;
+ p += sizeof zero;
+ }
+ }
+
+ return p;
+ }
+ else if (code == ADDR_EXPR)
+ {
+ struct addr_const value;
+ decode_addr_const (exp, &value);
+ strp = (char *) &value.offset;
+ len = sizeof value.offset;
+ /* Compare the offset. */
+ while (--len >= 0)
+ if (*p++ != *strp++)
+ return 0;
+ /* Compare symbol name. */
+ strp = XSTR (value.base, 0);
+ len = strlen (strp) + 1;
+ }
+ else if (code == PLUS_EXPR || code == MINUS_EXPR)
+ {
+ p = compare_constant_1 (TREE_OPERAND (exp, 0), p);
+ if (p == 0) return 0;
+ p = compare_constant_1 (TREE_OPERAND (exp, 1), p);
+ return p;
+ }
+ else if (code == NOP_EXPR || code == CONVERT_EXPR)
+ {
+ p = compare_constant_1 (TREE_OPERAND (exp, 0), p);
+ return p;
+ }
+
+ /* Compare constant contents. */
+ while (--len >= 0)
+ if (*p++ != *strp++)
+ return 0;
+
+ return p;
+}
+
+/* Construct a constant descriptor for the expression EXP.
+ It is up to the caller to enter the descriptor in the hash table. */
+
+static struct constant_descriptor *
+record_constant (exp)
+ tree exp;
+{
+ struct constant_descriptor *next = 0;
+ char *label = 0;
+
+ /* Make a struct constant_descriptor. The first two pointers will
+ be filled in later. Here we just leave space for them. */
+
+ obstack_grow (&permanent_obstack, (char *) &next, sizeof next);
+ obstack_grow (&permanent_obstack, (char *) &label, sizeof label);
+ record_constant_1 (exp);
+ return (struct constant_descriptor *) obstack_finish (&permanent_obstack);
+}
+
+/* Add a description of constant expression EXP
+ to the object growing in `permanent_obstack'.
+ No need to return its address; the caller will get that
+ from the obstack when the object is complete. */
+
+static void
+record_constant_1 (exp)
+ tree exp;
+{
+ register char *strp;
+ register int len;
+ register enum tree_code code = TREE_CODE (exp);
+
+ obstack_1grow (&permanent_obstack, (unsigned int) code);
+
+ if (code == INTEGER_CST)
+ {
+ obstack_1grow (&permanent_obstack, TYPE_PRECISION (TREE_TYPE (exp)));
+ strp = (char *) &TREE_INT_CST_LOW (exp);
+ len = 2 * sizeof TREE_INT_CST_LOW (exp);
+ }
+ else if (code == REAL_CST)
+ {
+ obstack_1grow (&permanent_obstack, TYPE_PRECISION (TREE_TYPE (exp)));
+ strp = (char *) &TREE_REAL_CST (exp);
+ len = sizeof TREE_REAL_CST (exp);
+ }
+ else if (code == STRING_CST)
+ {
+ if (flag_writable_strings)
+ return;
+ strp = TREE_STRING_POINTER (exp);
+ len = TREE_STRING_LENGTH (exp);
+ obstack_grow (&permanent_obstack, (char *) &TREE_STRING_LENGTH (exp),
+ sizeof TREE_STRING_LENGTH (exp));
+ }
+ else if (code == COMPLEX_CST)
+ {
+ record_constant_1 (TREE_REALPART (exp));
+ record_constant_1 (TREE_IMAGPART (exp));
+ return;
+ }
+ else if (code == CONSTRUCTOR)
+ {
+ register tree link;
+ int length = list_length (CONSTRUCTOR_ELTS (exp));
+ tree type;
+
+ obstack_grow (&permanent_obstack, (char *) &length, sizeof length);
+
+ /* For record constructors, insist that the types match.
+ For arrays, just verify both constructors are for arrays. */
+ if (TREE_CODE (TREE_TYPE (exp)) == RECORD_TYPE)
+ type = TREE_TYPE (exp);
+ else
+ type = 0;
+ obstack_grow (&permanent_obstack, (char *) &type, sizeof type);
+
+ /* For arrays, insist that the size in bytes match. */
+ if (TREE_CODE (TREE_TYPE (exp)) == ARRAY_TYPE)
+ {
+ int size = int_size_in_bytes (TREE_TYPE (exp));
+ obstack_grow (&permanent_obstack, (char *) &size, sizeof size);
+ }
+
+ for (link = CONSTRUCTOR_ELTS (exp); link; link = TREE_CHAIN (link))
+ {
+ if (TREE_VALUE (link))
+ record_constant_1 (TREE_VALUE (link));
+ else
+ {
+ tree zero = 0;
+
+ obstack_grow (&permanent_obstack, (char *) &zero, sizeof zero);
+ }
+ }
+
+ return;
+ }
+ else if (code == ADDR_EXPR)
+ {
+ struct addr_const value;
+ decode_addr_const (exp, &value);
+ /* Record the offset. */
+ obstack_grow (&permanent_obstack,
+ (char *) &value.offset, sizeof value.offset);
+ /* Record the symbol name. */
+ obstack_grow (&permanent_obstack, XSTR (value.base, 0),
+ strlen (XSTR (value.base, 0)) + 1);
+ return;
+ }
+ else if (code == PLUS_EXPR || code == MINUS_EXPR)
+ {
+ record_constant_1 (TREE_OPERAND (exp, 0));
+ record_constant_1 (TREE_OPERAND (exp, 1));
+ return;
+ }
+ else if (code == NOP_EXPR || code == CONVERT_EXPR)
+ {
+ record_constant_1 (TREE_OPERAND (exp, 0));
+ return;
+ }
+
+ /* Record constant contents. */
+ obstack_grow (&permanent_obstack, strp, len);
+}
+
+/* Record a list of constant expressions that were passed to
+ output_constant_def but that could not be output right away. */
+
+struct deferred_constant
+{
+ struct deferred_constant *next;
+ tree exp;
+ int reloc;
+ int labelno;
+};
+
+static struct deferred_constant *deferred_constants;
+
+/* Nonzero means defer output of addressed subconstants
+ (i.e., those for which output_constant_def is called.) */
+static int defer_addressed_constants_flag;
+
+/* Start deferring output of subconstants. */
+
+void
+defer_addressed_constants ()
+{
+ defer_addressed_constants_flag++;
+}
+
+/* Stop deferring output of subconstants,
+ and output now all those that have been deferred. */
+
+void
+output_deferred_addressed_constants ()
+{
+ struct deferred_constant *p, *next;
+
+ defer_addressed_constants_flag--;
+
+ if (defer_addressed_constants_flag > 0)
+ return;
+
+ for (p = deferred_constants; p; p = next)
+ {
+ output_constant_def_contents (p->exp, p->reloc, p->labelno);
+ next = p->next;
+ free (p);
+ }
+
+ deferred_constants = 0;
+}
+
+/* Make a copy of the whole tree structure for a constant.
+ This handles the same types of nodes that compare_constant
+ and record_constant handle. */
+
+static tree
+copy_constant (exp)
+ tree exp;
+{
+ switch (TREE_CODE (exp))
+ {
+ case INTEGER_CST:
+ case REAL_CST:
+ case STRING_CST:
+ case ADDR_EXPR:
+ /* For ADDR_EXPR, we do not want to copy the decl
+ whose address is requested. */
+ return copy_node (exp);
+
+ case COMPLEX_CST:
+ return build_complex (copy_constant (TREE_REALPART (exp)),
+ copy_constant (TREE_IMAGPART (exp)));
+
+ case PLUS_EXPR:
+ case MINUS_EXPR:
+ return build (TREE_CODE (exp), TREE_TYPE (exp),
+ copy_constant (TREE_OPERAND (exp, 0)),
+ copy_constant (TREE_OPERAND (exp, 1)));
+
+ case NOP_EXPR:
+ case CONVERT_EXPR:
+ return build1 (TREE_CODE (exp), TREE_TYPE (exp),
+ copy_constant (TREE_OPERAND (exp, 0)));
+
+ case CONSTRUCTOR:
+ {
+ tree copy = copy_node (exp);
+ tree list = copy_list (CONSTRUCTOR_ELTS (exp));
+ tree tail;
+
+ CONSTRUCTOR_ELTS (copy) = list;
+ for (tail = list; tail; tail = TREE_CHAIN (tail))
+ TREE_VALUE (tail) = copy_constant (TREE_VALUE (tail));
+
+ return copy;
+ }
+
+ default:
+ abort ();
+ }
+}
+
+/* Return an rtx representing a reference to constant data in memory
+ for the constant expression EXP.
+
+ If assembler code for such a constant has already been output,
+ return an rtx to refer to it.
+ Otherwise, output such a constant in memory (or defer it for later)
+ and generate an rtx for it.
+
+ The TREE_CST_RTL of EXP is set up to point to that rtx.
+ The const_hash_table records which constants already have label strings. */
+
+rtx
+output_constant_def (exp)
+ tree exp;
+{
+ register int hash;
+ register struct constant_descriptor *desc;
+ char label[256];
+ char *found = 0;
+ int reloc;
+ register rtx def;
+
+ if (TREE_CODE (exp) == INTEGER_CST)
+ abort (); /* No TREE_CST_RTL slot in these. */
+
+ if (TREE_CST_RTL (exp))
+ return TREE_CST_RTL (exp);
+
+ /* Make sure any other constants whose addresses appear in EXP
+ are assigned label numbers. */
+
+ reloc = output_addressed_constants (exp);
+
+ /* Compute hash code of EXP. Search the descriptors for that hash code
+ to see if any of them describes EXP. If yes, the descriptor records
+ the label number already assigned. */
+
+ hash = const_hash (exp) % MAX_HASH_TABLE;
+
+ for (desc = const_hash_table[hash]; desc; desc = desc->next)
+ if (compare_constant (exp, desc))
+ {
+ found = desc->label;
+ break;
+ }
+
+ if (found == 0)
+ {
+ /* No constant equal to EXP is known to have been output.
+ Make a constant descriptor to enter EXP in the hash table.
+ Assign the label number and record it in the descriptor for
+ future calls to this function to find. */
+
+ /* Create a string containing the label name, in LABEL. */
+ ASM_GENERATE_INTERNAL_LABEL (label, "LC", const_labelno);
+
+ desc = record_constant (exp);
+ desc->next = const_hash_table[hash];
+ desc->label
+ = (char *) obstack_copy0 (&permanent_obstack, label, strlen (label));
+ const_hash_table[hash] = desc;
+ }
+ else
+ {
+ /* Create a string containing the label name, in LABEL. */
+ ASM_GENERATE_INTERNAL_LABEL (label, "LC", const_labelno);
+ }
+
+ /* We have a symbol name; construct the SYMBOL_REF and the MEM. */
+
+ push_obstacks_nochange ();
+ if (TREE_PERMANENT (exp))
+ end_temporary_allocation ();
+
+ def = gen_rtx (SYMBOL_REF, Pmode, desc->label);
+
+ TREE_CST_RTL (exp)
+ = gen_rtx (MEM, TYPE_MODE (TREE_TYPE (exp)), def);
+ RTX_UNCHANGING_P (TREE_CST_RTL (exp)) = 1;
+ if (AGGREGATE_TYPE_P (TREE_TYPE (exp)))
+ MEM_IN_STRUCT_P (TREE_CST_RTL (exp)) = 1;
+
+ pop_obstacks ();
+
+ /* Optionally set flags or add text to the name to record information
+ such as that it is a function name. If the name is changed, the macro
+ ASM_OUTPUT_LABELREF will have to know how to strip this information. */
+#ifdef ENCODE_SECTION_INFO
+ ENCODE_SECTION_INFO (exp);
+#endif
+
+ /* If this is the first time we've seen this particular constant,
+ output it (or defer its output for later). */
+ if (found == 0)
+ {
+ if (defer_addressed_constants_flag)
+ {
+ struct deferred_constant *p;
+ p = (struct deferred_constant *) xmalloc (sizeof (struct deferred_constant));
+
+ push_obstacks_nochange ();
+ suspend_momentary ();
+ p->exp = copy_constant (exp);
+ pop_obstacks ();
+ p->reloc = reloc;
+ p->labelno = const_labelno++;
+ p->next = deferred_constants;
+ deferred_constants = p;
+ }
+ else
+ output_constant_def_contents (exp, reloc, const_labelno++);
+ }
+
+ return TREE_CST_RTL (exp);
+}
+
+/* Now output assembler code to define the label for EXP,
+ and follow it with the data of EXP. */
+
+static void
+output_constant_def_contents (exp, reloc, labelno)
+ tree exp;
+ int reloc;
+ int labelno;
+{
+ int align;
+
+ if (IN_NAMED_SECTION (exp))
+ named_section (TREE_STRING_POINTER (DECL_SECTION_NAME (exp)));
+ else
+ {
+ /* First switch to text section, except for writable strings. */
+#ifdef SELECT_SECTION
+ SELECT_SECTION (exp, reloc);
+#else
+ if (((TREE_CODE (exp) == STRING_CST) && flag_writable_strings)
+ || (flag_pic && reloc))
+ data_section ();
+ else
+ readonly_data_section ();
+#endif
+ }
+
+ /* Align the location counter as required by EXP's data type. */
+ align = TYPE_ALIGN (TREE_TYPE (exp));
+#ifdef CONSTANT_ALIGNMENT
+ align = CONSTANT_ALIGNMENT (exp, align);
+#endif
+
+ if (align > BITS_PER_UNIT)
+ {
+ if (!output_bytecode)
+ {
+ ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (align / BITS_PER_UNIT));
+ }
+ else
+ {
+ BC_OUTPUT_ALIGN (asm_out_file, floor_log2 (align / BITS_PER_UNIT));
+ }
+ }
+
+ /* Output the label itself. */
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, "LC", labelno);
+
+ /* Output the value of EXP. */
+ output_constant (exp,
+ (TREE_CODE (exp) == STRING_CST
+ ? TREE_STRING_LENGTH (exp)
+ : int_size_in_bytes (TREE_TYPE (exp))));
+
+}
+
+/* Similar hash facility for making memory-constants
+ from constant rtl-expressions. It is used on RISC machines
+ where immediate integer arguments and constant addresses are restricted
+ so that such constants must be stored in memory.
+
+ This pool of constants is reinitialized for each function
+ so each function gets its own constants-pool that comes right before it.
+
+ All structures allocated here are discarded when functions are saved for
+ inlining, so they do not need to be allocated permanently. */
+
+#define MAX_RTX_HASH_TABLE 61
+static struct constant_descriptor **const_rtx_hash_table;
+
+/* Structure to represent sufficient information about a constant so that
+ it can be output when the constant pool is output, so that function
+ integration can be done, and to simplify handling on machines that reference
+ constant pool as base+displacement. */
+
+struct pool_constant
+{
+ struct constant_descriptor *desc;
+ struct pool_constant *next;
+ enum machine_mode mode;
+ rtx constant;
+ int labelno;
+ int align;
+ int offset;
+};
+
+/* Pointers to first and last constant in pool. */
+
+static struct pool_constant *first_pool, *last_pool;
+
+/* Current offset in constant pool (does not include any machine-specific
+ header. */
+
+static int pool_offset;
+
+/* Structure used to maintain hash table mapping symbols used to their
+ corresponding constants. */
+
+struct pool_sym
+{
+ char *label;
+ struct pool_constant *pool;
+ struct pool_sym *next;
+};
+
+static struct pool_sym **const_rtx_sym_hash_table;
+
+/* Hash code for a SYMBOL_REF with CONSTANT_POOL_ADDRESS_P true.
+ The argument is XSTR (... , 0) */
+
+#define SYMHASH(LABEL) \
+ ((((HOST_WIDE_INT) (LABEL)) & ((1 << HASHBITS) - 1)) % MAX_RTX_HASH_TABLE)
+
+/* Initialize constant pool hashing for next function. */
+
+void
+init_const_rtx_hash_table ()
+{
+ const_rtx_hash_table
+ = ((struct constant_descriptor **)
+ oballoc (MAX_RTX_HASH_TABLE * sizeof (struct constant_descriptor *)));
+ const_rtx_sym_hash_table
+ = ((struct pool_sym **)
+ oballoc (MAX_RTX_HASH_TABLE * sizeof (struct pool_sym *)));
+ bzero ((char *) const_rtx_hash_table,
+ MAX_RTX_HASH_TABLE * sizeof (struct constant_descriptor *));
+ bzero ((char *) const_rtx_sym_hash_table,
+ MAX_RTX_HASH_TABLE * sizeof (struct pool_sym *));
+
+ first_pool = last_pool = 0;
+ pool_offset = 0;
+}
+
+/* Save and restore it for a nested function. */
+
+void
+save_varasm_status (p)
+ struct function *p;
+{
+ p->const_rtx_hash_table = const_rtx_hash_table;
+ p->const_rtx_sym_hash_table = const_rtx_sym_hash_table;
+ p->first_pool = first_pool;
+ p->last_pool = last_pool;
+ p->pool_offset = pool_offset;
+}
+
+void
+restore_varasm_status (p)
+ struct function *p;
+{
+ const_rtx_hash_table = p->const_rtx_hash_table;
+ const_rtx_sym_hash_table = p->const_rtx_sym_hash_table;
+ first_pool = p->first_pool;
+ last_pool = p->last_pool;
+ pool_offset = p->pool_offset;
+}
+
+enum kind { RTX_DOUBLE, RTX_INT };
+
+struct rtx_const
+{
+#ifdef ONLY_INT_FIELDS
+ unsigned int kind : 16;
+ unsigned int mode : 16;
+#else
+ enum kind kind : 16;
+ enum machine_mode mode : 16;
+#endif
+ union {
+ union real_extract du;
+ struct addr_const addr;
+ } un;
+};
+
+/* Express an rtx for a constant integer (perhaps symbolic)
+ as the sum of a symbol or label plus an explicit integer.
+ They are stored into VALUE. */
+
+static void
+decode_rtx_const (mode, x, value)
+ enum machine_mode mode;
+ rtx x;
+ struct rtx_const *value;
+{
+ /* Clear the whole structure, including any gaps. */
+
+ {
+ int *p = (int *) value;
+ int *end = (int *) (value + 1);
+ while (p < end)
+ *p++ = 0;
+ }
+
+ value->kind = RTX_INT; /* Most usual kind. */
+ value->mode = mode;
+
+ switch (GET_CODE (x))
+ {
+ case CONST_DOUBLE:
+ value->kind = RTX_DOUBLE;
+ if (GET_MODE (x) != VOIDmode)
+ value->mode = GET_MODE (x);
+ bcopy ((char *) &CONST_DOUBLE_LOW (x),
+ (char *) &value->un.du, sizeof value->un.du);
+ break;
+
+ case CONST_INT:
+ value->un.addr.offset = INTVAL (x);
+ break;
+
+ case SYMBOL_REF:
+ case LABEL_REF:
+ case PC:
+ value->un.addr.base = x;
+ break;
+
+ case CONST:
+ x = XEXP (x, 0);
+ if (GET_CODE (x) == PLUS)
+ {
+ value->un.addr.base = XEXP (x, 0);
+ if (GET_CODE (XEXP (x, 1)) != CONST_INT)
+ abort ();
+ value->un.addr.offset = INTVAL (XEXP (x, 1));
+ }
+ else if (GET_CODE (x) == MINUS)
+ {
+ value->un.addr.base = XEXP (x, 0);
+ if (GET_CODE (XEXP (x, 1)) != CONST_INT)
+ abort ();
+ value->un.addr.offset = - INTVAL (XEXP (x, 1));
+ }
+ else
+ abort ();
+ break;
+
+ default:
+ abort ();
+ }
+
+ if (value->kind == RTX_INT && value->un.addr.base != 0)
+ switch (GET_CODE (value->un.addr.base))
+ {
+ case SYMBOL_REF:
+ case LABEL_REF:
+ /* Use the string's address, not the SYMBOL_REF's address,
+ for the sake of addresses of library routines.
+ For a LABEL_REF, compare labels. */
+ value->un.addr.base = XEXP (value->un.addr.base, 0);
+ }
+}
+
+/* Given a MINUS expression, simplify it if both sides
+ include the same symbol. */
+
+rtx
+simplify_subtraction (x)
+ rtx x;
+{
+ struct rtx_const val0, val1;
+
+ decode_rtx_const (GET_MODE (x), XEXP (x, 0), &val0);
+ decode_rtx_const (GET_MODE (x), XEXP (x, 1), &val1);
+
+ if (val0.un.addr.base == val1.un.addr.base)
+ return GEN_INT (val0.un.addr.offset - val1.un.addr.offset);
+ return x;
+}
+
+/* Compute a hash code for a constant RTL expression. */
+
+int
+const_hash_rtx (mode, x)
+ enum machine_mode mode;
+ rtx x;
+{
+ register int hi, i;
+
+ struct rtx_const value;
+ decode_rtx_const (mode, x, &value);
+
+ /* Compute hashing function */
+ hi = 0;
+ for (i = 0; i < sizeof value / sizeof (int); i++)
+ hi += ((int *) &value)[i];
+
+ hi &= (1 << HASHBITS) - 1;
+ hi %= MAX_RTX_HASH_TABLE;
+ return hi;
+}
+
+/* Compare a constant rtl object X with a constant-descriptor DESC.
+ Return 1 if DESC describes a constant with the same value as X. */
+
+static int
+compare_constant_rtx (mode, x, desc)
+ enum machine_mode mode;
+ rtx x;
+ struct constant_descriptor *desc;
+{
+ register int *p = (int *) desc->contents;
+ register int *strp;
+ register int len;
+ struct rtx_const value;
+
+ decode_rtx_const (mode, x, &value);
+ strp = (int *) &value;
+ len = sizeof value / sizeof (int);
+
+ /* Compare constant contents. */
+ while (--len >= 0)
+ if (*p++ != *strp++)
+ return 0;
+
+ return 1;
+}
+
+/* Construct a constant descriptor for the rtl-expression X.
+ It is up to the caller to enter the descriptor in the hash table. */
+
+static struct constant_descriptor *
+record_constant_rtx (mode, x)
+ enum machine_mode mode;
+ rtx x;
+{
+ struct constant_descriptor *ptr;
+ char *label;
+ struct rtx_const value;
+
+ decode_rtx_const (mode, x, &value);
+
+ /* Put these things in the saveable obstack so we can ensure it won't
+ be freed if we are called from combine or some other phase that discards
+ memory allocated from function_obstack (current_obstack). */
+ obstack_grow (saveable_obstack, &ptr, sizeof ptr);
+ obstack_grow (saveable_obstack, &label, sizeof label);
+
+ /* Record constant contents. */
+ obstack_grow (saveable_obstack, &value, sizeof value);
+
+ return (struct constant_descriptor *) obstack_finish (saveable_obstack);
+}
+
+/* Given a constant rtx X, make (or find) a memory constant for its value
+ and return a MEM rtx to refer to it in memory. */
+
+rtx
+force_const_mem (mode, x)
+ enum machine_mode mode;
+ rtx x;
+{
+ register int hash;
+ register struct constant_descriptor *desc;
+ char label[256];
+ char *found = 0;
+ rtx def;
+
+ /* If we want this CONST_DOUBLE in the same mode as it is in memory
+ (this will always be true for floating CONST_DOUBLEs that have been
+ placed in memory, but not for VOIDmode (integer) CONST_DOUBLEs),
+ use the previous copy. Otherwise, make a new one. Note that in
+ the unlikely event that this same CONST_DOUBLE is used in two different
+ modes in an alternating fashion, we will allocate a lot of different
+ memory locations, but this should be extremely rare. */
+
+ /* Don't use CONST_DOUBLE_MEM in a nested function.
+ Nested functions have their own constant pools,
+ so they can't share the same values in CONST_DOUBLE_MEM
+ with the containing function. */
+ if (outer_function_chain == 0)
+ if (GET_CODE (x) == CONST_DOUBLE
+ && GET_CODE (CONST_DOUBLE_MEM (x)) == MEM
+ && GET_MODE (CONST_DOUBLE_MEM (x)) == mode)
+ return CONST_DOUBLE_MEM (x);
+
+ /* Compute hash code of X. Search the descriptors for that hash code
+ to see if any of them describes X. If yes, the descriptor records
+ the label number already assigned. */
+
+ hash = const_hash_rtx (mode, x);
+
+ for (desc = const_rtx_hash_table[hash]; desc; desc = desc->next)
+ if (compare_constant_rtx (mode, x, desc))
+ {
+ found = desc->label;
+ break;
+ }
+
+ if (found == 0)
+ {
+ register struct pool_constant *pool;
+ register struct pool_sym *sym;
+ int align;
+
+ /* No constant equal to X is known to have been output.
+ Make a constant descriptor to enter X in the hash table.
+ Assign the label number and record it in the descriptor for
+ future calls to this function to find. */
+
+ desc = record_constant_rtx (mode, x);
+ desc->next = const_rtx_hash_table[hash];
+ const_rtx_hash_table[hash] = desc;
+
+ /* Align the location counter as required by EXP's data type. */
+ align = (mode == VOIDmode) ? UNITS_PER_WORD : GET_MODE_SIZE (mode);
+ if (align > BIGGEST_ALIGNMENT / BITS_PER_UNIT)
+ align = BIGGEST_ALIGNMENT / BITS_PER_UNIT;
+
+ pool_offset += align - 1;
+ pool_offset &= ~ (align - 1);
+
+ /* If RTL is not being placed into the saveable obstack, make a
+ copy of X that is in the saveable obstack in case we are being
+ called from combine or some other phase that discards memory
+ it allocates. We need only do this if it is a CONST, since
+ no other RTX should be allocated in this situation. */
+ if (rtl_obstack != saveable_obstack
+ && GET_CODE (x) == CONST)
+ {
+ push_obstacks_nochange ();
+ rtl_in_saveable_obstack ();
+
+ x = gen_rtx (CONST, GET_MODE (x),
+ gen_rtx (PLUS, GET_MODE (x),
+ XEXP (XEXP (x, 0), 0), XEXP (XEXP (x, 0), 1)));
+ pop_obstacks ();
+ }
+
+ /* Allocate a pool constant descriptor, fill it in, and chain it in. */
+
+ pool = (struct pool_constant *) savealloc (sizeof (struct pool_constant));
+ pool->desc = desc;
+ pool->constant = x;
+ pool->mode = mode;
+ pool->labelno = const_labelno;
+ pool->align = align;
+ pool->offset = pool_offset;
+ pool->next = 0;
+
+ if (last_pool == 0)
+ first_pool = pool;
+ else
+ last_pool->next = pool;
+
+ last_pool = pool;
+ pool_offset += GET_MODE_SIZE (mode);
+
+ /* Create a string containing the label name, in LABEL. */
+ ASM_GENERATE_INTERNAL_LABEL (label, "LC", const_labelno);
+
+ ++const_labelno;
+
+ desc->label = found
+ = (char *) obstack_copy0 (saveable_obstack, label, strlen (label));
+
+ /* Add label to symbol hash table. */
+ hash = SYMHASH (found);
+ sym = (struct pool_sym *) savealloc (sizeof (struct pool_sym));
+ sym->label = found;
+ sym->pool = pool;
+ sym->next = const_rtx_sym_hash_table[hash];
+ const_rtx_sym_hash_table[hash] = sym;
+ }
+
+ /* We have a symbol name; construct the SYMBOL_REF and the MEM. */
+
+ def = gen_rtx (MEM, mode, gen_rtx (SYMBOL_REF, Pmode, found));
+
+ RTX_UNCHANGING_P (def) = 1;
+ /* Mark the symbol_ref as belonging to this constants pool. */
+ CONSTANT_POOL_ADDRESS_P (XEXP (def, 0)) = 1;
+ current_function_uses_const_pool = 1;
+
+ if (outer_function_chain == 0)
+ if (GET_CODE (x) == CONST_DOUBLE)
+ {
+ if (CONST_DOUBLE_MEM (x) == cc0_rtx)
+ {
+ CONST_DOUBLE_CHAIN (x) = const_double_chain;
+ const_double_chain = x;
+ }
+ CONST_DOUBLE_MEM (x) = def;
+ }
+
+ return def;
+}
+
+/* Given a SYMBOL_REF with CONSTANT_POOL_ADDRESS_P true, return a pointer to
+ the corresponding pool_constant structure. */
+
+static struct pool_constant *
+find_pool_constant (addr)
+ rtx addr;
+{
+ struct pool_sym *sym;
+ char *label = XSTR (addr, 0);
+
+ for (sym = const_rtx_sym_hash_table[SYMHASH (label)]; sym; sym = sym->next)
+ if (sym->label == label)
+ return sym->pool;
+
+ abort ();
+}
+
+/* Given a constant pool SYMBOL_REF, return the corresponding constant. */
+
+rtx
+get_pool_constant (addr)
+ rtx addr;
+{
+ return (find_pool_constant (addr))->constant;
+}
+
+/* Similar, return the mode. */
+
+enum machine_mode
+get_pool_mode (addr)
+ rtx addr;
+{
+ return (find_pool_constant (addr))->mode;
+}
+
+/* Similar, return the offset in the constant pool. */
+
+int
+get_pool_offset (addr)
+ rtx addr;
+{
+ return (find_pool_constant (addr))->offset;
+}
+
+/* Return the size of the constant pool. */
+
+int
+get_pool_size ()
+{
+ return pool_offset;
+}
+
+/* Write all the constants in the constant pool. */
+
+void
+output_constant_pool (fnname, fndecl)
+ char *fnname;
+ tree fndecl;
+{
+ struct pool_constant *pool;
+ rtx x;
+ union real_extract u;
+
+#ifdef ASM_OUTPUT_POOL_PROLOGUE
+ ASM_OUTPUT_POOL_PROLOGUE (asm_out_file, fnname, fndecl, pool_offset);
+#endif
+
+ for (pool = first_pool; pool; pool = pool->next)
+ {
+ x = pool->constant;
+
+ /* See if X is a LABEL_REF (or a CONST referring to a LABEL_REF)
+ whose CODE_LABEL has been deleted. This can occur if a jump table
+ is eliminated by optimization. If so, write a constant of zero
+ instead. Note that this can also happen by turning the
+ CODE_LABEL into a NOTE. */
+ if (((GET_CODE (x) == LABEL_REF
+ && (INSN_DELETED_P (XEXP (x, 0))
+ || GET_CODE (XEXP (x, 0)) == NOTE)))
+ || (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
+ && (INSN_DELETED_P (XEXP (XEXP (XEXP (x, 0), 0), 0))
+ || GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == NOTE)))
+ x = const0_rtx;
+
+ /* First switch to correct section. */
+#ifdef SELECT_RTX_SECTION
+ SELECT_RTX_SECTION (pool->mode, x);
+#else
+ readonly_data_section ();
+#endif
+
+#ifdef ASM_OUTPUT_SPECIAL_POOL_ENTRY
+ ASM_OUTPUT_SPECIAL_POOL_ENTRY (asm_out_file, x, pool->mode,
+ pool->align, pool->labelno, done);
+#endif
+
+ if (pool->align > 1)
+ ASM_OUTPUT_ALIGN (asm_out_file, exact_log2 (pool->align));
+
+ /* Output the label. */
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, "LC", pool->labelno);
+
+ /* Output the value of the constant itself. */
+ switch (GET_MODE_CLASS (pool->mode))
+ {
+ case MODE_FLOAT:
+ if (GET_CODE (x) != CONST_DOUBLE)
+ abort ();
+
+ bcopy ((char *) &CONST_DOUBLE_LOW (x), (char *) &u, sizeof u);
+ assemble_real (u.d, pool->mode);
+ break;
+
+ case MODE_INT:
+ case MODE_PARTIAL_INT:
+ assemble_integer (x, GET_MODE_SIZE (pool->mode), 1);
+ break;
+
+ default:
+ abort ();
+ }
+
+ done: ;
+ }
+
+ /* Done with this pool. */
+ first_pool = last_pool = 0;
+}
+
+/* Find all the constants whose addresses are referenced inside of EXP,
+ and make sure assembler code with a label has been output for each one.
+ Indicate whether an ADDR_EXPR has been encountered. */
+
+int
+output_addressed_constants (exp)
+ tree exp;
+{
+ int reloc = 0;
+
+ switch (TREE_CODE (exp))
+ {
+ case ADDR_EXPR:
+ {
+ register tree constant = TREE_OPERAND (exp, 0);
+
+ while (TREE_CODE (constant) == COMPONENT_REF)
+ {
+ constant = TREE_OPERAND (constant, 0);
+ }
+
+ if (TREE_CODE_CLASS (TREE_CODE (constant)) == 'c'
+ || TREE_CODE (constant) == CONSTRUCTOR)
+ /* No need to do anything here
+ for addresses of variables or functions. */
+ output_constant_def (constant);
+ }
+ reloc = 1;
+ break;
+
+ case PLUS_EXPR:
+ case MINUS_EXPR:
+ reloc = output_addressed_constants (TREE_OPERAND (exp, 0));
+ reloc |= output_addressed_constants (TREE_OPERAND (exp, 1));
+ break;
+
+ case NOP_EXPR:
+ case CONVERT_EXPR:
+ case NON_LVALUE_EXPR:
+ reloc = output_addressed_constants (TREE_OPERAND (exp, 0));
+ break;
+
+ case CONSTRUCTOR:
+ {
+ register tree link;
+ for (link = CONSTRUCTOR_ELTS (exp); link; link = TREE_CHAIN (link))
+ if (TREE_VALUE (link) != 0)
+ reloc |= output_addressed_constants (TREE_VALUE (link));
+ }
+ break;
+
+ case ERROR_MARK:
+ break;
+ }
+ return reloc;
+}
+
+
+/* Output assembler for byte constant */
+void
+output_byte_asm (byte)
+ int byte;
+{
+ if (output_bytecode)
+ bc_emit_const ((char *) &byte, sizeof (char));
+#ifdef ASM_OUTPUT_BYTE
+ else
+ {
+ ASM_OUTPUT_BYTE (asm_out_file, byte);
+ }
+#endif
+}
+
+/* Output assembler code for constant EXP to FILE, with no label.
+ This includes the pseudo-op such as ".int" or ".byte", and a newline.
+ Assumes output_addressed_constants has been done on EXP already.
+
+ Generate exactly SIZE bytes of assembler data, padding at the end
+ with zeros if necessary. SIZE must always be specified.
+
+ SIZE is important for structure constructors,
+ since trailing members may have been omitted from the constructor.
+ It is also important for initialization of arrays from string constants
+ since the full length of the string constant might not be wanted.
+ It is also needed for initialization of unions, where the initializer's
+ type is just one member, and that may not be as long as the union.
+
+ There a case in which we would fail to output exactly SIZE bytes:
+ for a structure constructor that wants to produce more than SIZE bytes.
+ But such constructors will never be generated for any possible input. */
+
+void
+output_constant (exp, size)
+ register tree exp;
+ register int size;
+{
+ register enum tree_code code = TREE_CODE (TREE_TYPE (exp));
+ rtx x;
+
+ if (size == 0)
+ return;
+
+ /* Eliminate the NON_LVALUE_EXPR_EXPR that makes a cast not be an lvalue.
+ That way we get the constant (we hope) inside it. Also, strip
+ off any NOP_EXPR that converts between two record or union types. */
+ while ((TREE_CODE (exp) == NOP_EXPR
+ && (TREE_TYPE (exp) == TREE_TYPE (TREE_OPERAND (exp, 0))
+ || TREE_CODE (TREE_TYPE (exp)) == RECORD_TYPE
+ || TREE_CODE (TREE_TYPE (exp)) == UNION_TYPE
+ || TREE_CODE (TREE_TYPE (exp)) == QUAL_UNION_TYPE))
+ || TREE_CODE (exp) == NON_LVALUE_EXPR)
+ exp = TREE_OPERAND (exp, 0);
+
+ /* Allow a constructor with no elements for any data type.
+ This means to fill the space with zeros. */
+ if (TREE_CODE (exp) == CONSTRUCTOR && CONSTRUCTOR_ELTS (exp) == 0)
+ {
+ if (output_bytecode)
+ bc_emit_const_skip (size);
+ else
+ assemble_zeros (size);
+ return;
+ }
+
+ switch (code)
+ {
+ case CHAR_TYPE:
+ case BOOLEAN_TYPE:
+ case INTEGER_TYPE:
+ case ENUMERAL_TYPE:
+ case POINTER_TYPE:
+ case REFERENCE_TYPE:
+ /* ??? What about (int)((float)(int)&foo + 4) */
+ while (TREE_CODE (exp) == NOP_EXPR || TREE_CODE (exp) == CONVERT_EXPR
+ || TREE_CODE (exp) == NON_LVALUE_EXPR)
+ exp = TREE_OPERAND (exp, 0);
+
+ if (! assemble_integer (expand_expr (exp, NULL_RTX, VOIDmode,
+ EXPAND_INITIALIZER),
+ size, 0))
+ error ("initializer for integer value is too complicated");
+ size = 0;
+ break;
+
+ case REAL_TYPE:
+ if (TREE_CODE (exp) != REAL_CST)
+ error ("initializer for floating value is not a floating constant");
+
+ assemble_real (TREE_REAL_CST (exp),
+ mode_for_size (size * BITS_PER_UNIT, MODE_FLOAT, 0));
+ size = 0;
+ break;
+
+ case COMPLEX_TYPE:
+ output_constant (TREE_REALPART (exp), size / 2);
+ output_constant (TREE_IMAGPART (exp), size / 2);
+ size -= (size / 2) * 2;
+ break;
+
+ case ARRAY_TYPE:
+ if (TREE_CODE (exp) == CONSTRUCTOR)
+ {
+ output_constructor (exp, size);
+ return;
+ }
+ else if (TREE_CODE (exp) == STRING_CST)
+ {
+ int excess = 0;
+
+ if (size > TREE_STRING_LENGTH (exp))
+ {
+ excess = size - TREE_STRING_LENGTH (exp);
+ size = TREE_STRING_LENGTH (exp);
+ }
+
+ assemble_string (TREE_STRING_POINTER (exp), size);
+ size = excess;
+ }
+ else
+ abort ();
+ break;
+
+ case RECORD_TYPE:
+ case UNION_TYPE:
+ if (TREE_CODE (exp) == CONSTRUCTOR)
+ output_constructor (exp, size);
+ else
+ abort ();
+ return;
+ }
+
+ if (size > 0)
+ assemble_zeros (size);
+}
+
+
+/* Bytecode specific code to output assembler for integer. */
+static void
+bc_assemble_integer (exp, size)
+ tree exp;
+ int size;
+{
+ tree const_part;
+ tree addr_part;
+ tree tmp;
+
+ /* FIXME: is this fold() business going to be as good as the
+ expand_expr() using EXPAND_SUM above in the RTL case? I
+ hate RMS.
+ FIXME: Copied as is from BC-GCC1; may need work. Don't hate. -bson */
+
+ exp = fold (exp);
+
+ while (TREE_CODE (exp) == NOP_EXPR || TREE_CODE (exp) == CONVERT_EXPR)
+ exp = TREE_OPERAND (exp, 0);
+ if (TREE_CODE (exp) == INTEGER_CST)
+ {
+ const_part = exp;
+ addr_part = 0;
+ }
+ else if (TREE_CODE (exp) == PLUS_EXPR)
+ {
+ const_part = TREE_OPERAND (exp, 0);
+ while (TREE_CODE (const_part) == NOP_EXPR
+ || TREE_CODE (const_part) == CONVERT_EXPR)
+ const_part = TREE_OPERAND (const_part, 0);
+ addr_part = TREE_OPERAND (exp, 1);
+ while (TREE_CODE (addr_part) == NOP_EXPR
+ || TREE_CODE (addr_part) == CONVERT_EXPR)
+ addr_part = TREE_OPERAND (addr_part, 0);
+ if (TREE_CODE (const_part) != INTEGER_CST)
+ tmp = const_part, const_part = addr_part, addr_part = tmp;
+ if (TREE_CODE (const_part) != INTEGER_CST
+ || TREE_CODE (addr_part) != ADDR_EXPR)
+ abort (); /* FIXME: we really haven't considered
+ all the possible cases here. */
+ }
+ else if (TREE_CODE (exp) == ADDR_EXPR)
+ {
+ const_part = integer_zero_node;
+ addr_part = exp;
+ }
+ else
+ abort (); /* FIXME: ditto previous. */
+
+ if (addr_part == 0)
+ {
+ if (size == 1)
+ {
+ char c = TREE_INT_CST_LOW (const_part);
+ bc_emit (&c, 1);
+ size -= 1;
+ }
+ else if (size == 2)
+ {
+ short s = TREE_INT_CST_LOW (const_part);
+ bc_emit ((char *) &s, 2);
+ size -= 2;
+ }
+ else if (size == 4)
+ {
+ int i = TREE_INT_CST_LOW (const_part);
+ bc_emit ((char *) &i, 4);
+ size -= 4;
+ }
+ else if (size == 8)
+ {
+#if WORDS_BIG_ENDIAN
+ int i = TREE_INT_CST_HIGH (const_part);
+ bc_emit ((char *) &i, 4);
+ i = TREE_INT_CST_LOW (const_part);
+ bc_emit ((char *) &i, 4);
+#else
+ int i = TREE_INT_CST_LOW (const_part);
+ bc_emit ((char *) &i, 4);
+ i = TREE_INT_CST_HIGH (const_part);
+ bc_emit ((char *) &i, 4);
+#endif
+ size -= 8;
+ }
+ }
+ else
+ if (size == 4
+ && TREE_CODE (TREE_OPERAND (addr_part, 0)) == VAR_DECL)
+ bc_emit_labelref (DECL_ASSEMBLER_NAME (TREE_OPERAND (addr_part, 0)),
+ TREE_INT_CST_LOW (const_part));
+ else
+ abort (); /* FIXME: there may be more cases. */
+}
+
+/* Subroutine of output_constant, used for CONSTRUCTORs
+ (aggregate constants).
+ Generate at least SIZE bytes, padding if necessary. */
+
+void
+output_constructor (exp, size)
+ tree exp;
+ int size;
+{
+ register tree link, field = 0;
+ HOST_WIDE_INT min_index = 0;
+ /* Number of bytes output or skipped so far.
+ In other words, current position within the constructor. */
+ int total_bytes = 0;
+ /* Non-zero means BYTE contains part of a byte, to be output. */
+ int byte_buffer_in_use = 0;
+ register int byte;
+
+ if (HOST_BITS_PER_WIDE_INT < BITS_PER_UNIT)
+ abort ();
+
+ if (TREE_CODE (TREE_TYPE (exp)) == RECORD_TYPE)
+ field = TYPE_FIELDS (TREE_TYPE (exp));
+
+ if (TREE_CODE (TREE_TYPE (exp)) == ARRAY_TYPE
+ && TYPE_DOMAIN (TREE_TYPE (exp)) != 0)
+ min_index
+ = TREE_INT_CST_LOW (TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (exp))));
+
+ /* As LINK goes through the elements of the constant,
+ FIELD goes through the structure fields, if the constant is a structure.
+ if the constant is a union, then we override this,
+ by getting the field from the TREE_LIST element.
+ But the constant could also be an array. Then FIELD is zero. */
+ for (link = CONSTRUCTOR_ELTS (exp);
+ link;
+ link = TREE_CHAIN (link),
+ field = field ? TREE_CHAIN (field) : 0)
+ {
+ tree val = TREE_VALUE (link);
+ tree index = 0;
+
+ /* the element in a union constructor specifies the proper field. */
+
+ if (TREE_CODE (TREE_TYPE (exp)) == RECORD_TYPE
+ || TREE_CODE (TREE_TYPE (exp)) == UNION_TYPE)
+ {
+ /* if available, use the type given by link */
+ if (TREE_PURPOSE (link) != 0)
+ field = TREE_PURPOSE (link);
+ }
+
+ if (TREE_CODE (TREE_TYPE (exp)) == ARRAY_TYPE)
+ index = TREE_PURPOSE (link);
+
+ /* Eliminate the marker that makes a cast not be an lvalue. */
+ if (val != 0)
+ STRIP_NOPS (val);
+
+ if (field == 0 || !DECL_BIT_FIELD (field))
+ {
+ /* An element that is not a bit-field. */
+
+ register int fieldsize;
+ /* Since this structure is static,
+ we know the positions are constant. */
+ int bitpos = (field ? (TREE_INT_CST_LOW (DECL_FIELD_BITPOS (field))
+ / BITS_PER_UNIT)
+ : 0);
+ if (index != 0)
+ bitpos = (TREE_INT_CST_LOW (TYPE_SIZE (TREE_TYPE (val)))
+ / BITS_PER_UNIT
+ * (TREE_INT_CST_LOW (index) - min_index));
+
+ /* Output any buffered-up bit-fields preceding this element. */
+ if (byte_buffer_in_use)
+ {
+ ASM_OUTPUT_BYTE (asm_out_file, byte);
+ total_bytes++;
+ byte_buffer_in_use = 0;
+ }
+
+ /* Advance to offset of this element.
+ Note no alignment needed in an array, since that is guaranteed
+ if each element has the proper size. */
+ if ((field != 0 || index != 0) && bitpos != total_bytes)
+ {
+ if (!output_bytecode)
+ assemble_zeros (bitpos - total_bytes);
+ else
+ bc_emit_const_skip (bitpos - total_bytes);
+ total_bytes = bitpos;
+ }
+
+ /* Determine size this element should occupy. */
+ if (field)
+ {
+ if (TREE_CODE (DECL_SIZE (field)) != INTEGER_CST)
+ abort ();
+ if (TREE_INT_CST_LOW (DECL_SIZE (field)) > 100000)
+ {
+ /* This avoids overflow trouble. */
+ tree size_tree = size_binop (CEIL_DIV_EXPR,
+ DECL_SIZE (field),
+ size_int (BITS_PER_UNIT));
+ fieldsize = TREE_INT_CST_LOW (size_tree);
+ }
+ else
+ {
+ fieldsize = TREE_INT_CST_LOW (DECL_SIZE (field));
+ fieldsize = (fieldsize + BITS_PER_UNIT - 1) / BITS_PER_UNIT;
+ }
+ }
+ else
+ fieldsize = int_size_in_bytes (TREE_TYPE (TREE_TYPE (exp)));
+
+ /* Output the element's initial value. */
+ if (val == 0)
+ assemble_zeros (fieldsize);
+ else
+ output_constant (val, fieldsize);
+
+ /* Count its size. */
+ total_bytes += fieldsize;
+ }
+ else if (val != 0 && TREE_CODE (val) != INTEGER_CST)
+ error ("invalid initial value for member `%s'",
+ IDENTIFIER_POINTER (DECL_NAME (field)));
+ else
+ {
+ /* Element that is a bit-field. */
+
+ int next_offset = TREE_INT_CST_LOW (DECL_FIELD_BITPOS (field));
+ int end_offset
+ = (next_offset + TREE_INT_CST_LOW (DECL_SIZE (field)));
+
+ if (val == 0)
+ val = integer_zero_node;
+
+ /* If this field does not start in this (or, next) byte,
+ skip some bytes. */
+ if (next_offset / BITS_PER_UNIT != total_bytes)
+ {
+ /* Output remnant of any bit field in previous bytes. */
+ if (byte_buffer_in_use)
+ {
+ ASM_OUTPUT_BYTE (asm_out_file, byte);
+ total_bytes++;
+ byte_buffer_in_use = 0;
+ }
+
+ /* If still not at proper byte, advance to there. */
+ if (next_offset / BITS_PER_UNIT != total_bytes)
+ {
+ assemble_zeros (next_offset / BITS_PER_UNIT - total_bytes);
+ total_bytes = next_offset / BITS_PER_UNIT;
+ }
+ }
+
+ if (! byte_buffer_in_use)
+ byte = 0;
+
+ /* We must split the element into pieces that fall within
+ separate bytes, and combine each byte with previous or
+ following bit-fields. */
+
+ /* next_offset is the offset n fbits from the beginning of
+ the structure to the next bit of this element to be processed.
+ end_offset is the offset of the first bit past the end of
+ this element. */
+ while (next_offset < end_offset)
+ {
+ int this_time;
+ int shift, value;
+ int next_byte = next_offset / BITS_PER_UNIT;
+ int next_bit = next_offset % BITS_PER_UNIT;
+
+ /* Advance from byte to byte
+ within this element when necessary. */
+ while (next_byte != total_bytes)
+ {
+ ASM_OUTPUT_BYTE (asm_out_file, byte);
+ total_bytes++;
+ byte = 0;
+ }
+
+ /* Number of bits we can process at once
+ (all part of the same byte). */
+ this_time = MIN (end_offset - next_offset,
+ BITS_PER_UNIT - next_bit);
+#if BYTES_BIG_ENDIAN
+ /* On big-endian machine, take the most significant bits
+ first (of the bits that are significant)
+ and put them into bytes from the most significant end. */
+ shift = end_offset - next_offset - this_time;
+ /* Don't try to take a bunch of bits that cross
+ the word boundary in the INTEGER_CST. */
+ if (shift < HOST_BITS_PER_WIDE_INT
+ && shift + this_time > HOST_BITS_PER_WIDE_INT)
+ {
+ this_time -= (HOST_BITS_PER_WIDE_INT - shift);
+ shift = HOST_BITS_PER_WIDE_INT;
+ }
+
+ /* Now get the bits from the appropriate constant word. */
+ if (shift < HOST_BITS_PER_WIDE_INT)
+ {
+ value = TREE_INT_CST_LOW (val);
+ }
+ else if (shift < 2 * HOST_BITS_PER_WIDE_INT)
+ {
+ value = TREE_INT_CST_HIGH (val);
+ shift -= HOST_BITS_PER_WIDE_INT;
+ }
+ else
+ abort ();
+ byte |= (((value >> shift)
+ & (((HOST_WIDE_INT) 1 << this_time) - 1))
+ << (BITS_PER_UNIT - this_time - next_bit));
+#else
+ /* On little-endian machines,
+ take first the least significant bits of the value
+ and pack them starting at the least significant
+ bits of the bytes. */
+ shift = (next_offset
+ - TREE_INT_CST_LOW (DECL_FIELD_BITPOS (field)));
+ /* Don't try to take a bunch of bits that cross
+ the word boundary in the INTEGER_CST. */
+ if (shift < HOST_BITS_PER_WIDE_INT
+ && shift + this_time > HOST_BITS_PER_WIDE_INT)
+ {
+ this_time -= (HOST_BITS_PER_WIDE_INT - shift);
+ shift = HOST_BITS_PER_WIDE_INT;
+ }
+
+ /* Now get the bits from the appropriate constant word. */
+ if (shift < HOST_BITS_PER_INT)
+ value = TREE_INT_CST_LOW (val);
+ else if (shift < 2 * HOST_BITS_PER_WIDE_INT)
+ {
+ value = TREE_INT_CST_HIGH (val);
+ shift -= HOST_BITS_PER_WIDE_INT;
+ }
+ else
+ abort ();
+ byte |= ((value >> shift)
+ & (((HOST_WIDE_INT) 1 << this_time) - 1)) << next_bit;
+#endif
+ next_offset += this_time;
+ byte_buffer_in_use = 1;
+ }
+ }
+ }
+ if (byte_buffer_in_use)
+ {
+ ASM_OUTPUT_BYTE (asm_out_file, byte);
+ total_bytes++;
+ }
+ if (total_bytes < size)
+ assemble_zeros (size - total_bytes);
+}
+
+
+#ifdef HANDLE_SYSV_PRAGMA
+
+/* Support #pragma weak by default if WEAK_ASM_OP and ASM_OUTPUT_DEF
+ are defined. */
+#if defined (WEAK_ASM_OP) && defined (ASM_OUTPUT_DEF)
+
+/* See c-pragma.c for an identical definition. */
+enum pragma_state
+{
+ ps_start,
+ ps_done,
+ ps_bad,
+ ps_weak,
+ ps_name,
+ ps_equals,
+ ps_value,
+ ps_pack,
+ ps_left,
+ ps_align,
+ ps_right
+};
+
+/* Output asm to handle ``#pragma weak'' */
+void
+handle_pragma_weak (what, asm_out_file, name, value)
+ enum pragma_state what;
+ FILE *asm_out_file;
+ char *name, *value;
+{
+ if (what == ps_name || what == ps_value)
+ {
+ fprintf (asm_out_file, "\t%s\t", WEAK_ASM_OP);
+
+ if (output_bytecode)
+ BC_OUTPUT_LABELREF (asm_out_file, name);
+ else
+ ASM_OUTPUT_LABELREF (asm_out_file, name);
+
+ fputc ('\n', asm_out_file);
+ if (what == ps_value)
+ ASM_OUTPUT_DEF (asm_out_file, name, value);
+ }
+ else if (! (what == ps_done || what == ps_start))
+ warning ("malformed `#pragma weak'");
+}
+
+#endif /* HANDLE_PRAGMA_WEAK or (WEAK_ASM_OP and SET_ASM_OP) */
+
+#endif /* WEAK_ASM_OP && ASM_OUTPUT_DEF */
diff --git a/gnu/usr.bin/cc/cc_int/version.c b/gnu/usr.bin/cc/cc_int/version.c
new file mode 100644
index 0000000..a77e9ff
--- /dev/null
+++ b/gnu/usr.bin/cc/cc_int/version.c
@@ -0,0 +1 @@
+char *version_string = "2.6.0";
diff --git a/gnu/usr.bin/cc/cc_int/xcoffout.c b/gnu/usr.bin/cc/cc_int/xcoffout.c
new file mode 100644
index 0000000..42b01f9
--- /dev/null
+++ b/gnu/usr.bin/cc/cc_int/xcoffout.c
@@ -0,0 +1,536 @@
+/* Output xcoff-format symbol table information from GNU compiler.
+ Copyright (C) 1992 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+/* Output xcoff-format symbol table data. The main functionality is contained
+ in dbxout.c. This file implements the sdbout-like parts of the xcoff
+ interface. Many functions are very similar to their counterparts in
+ sdbout.c. */
+
+/* Include this first, because it may define MIN and MAX. */
+#include <stdio.h>
+
+#include "config.h"
+#include "tree.h"
+#include "rtl.h"
+#include "flags.h"
+
+#ifdef XCOFF_DEBUGGING_INFO
+
+/* This defines the C_* storage classes. */
+#include <dbxstclass.h>
+
+#include "xcoffout.h"
+
+#if defined (USG) || defined (NO_STAB_H)
+#include "gstab.h"
+#else
+#include <stab.h>
+
+/* This is a GNU extension we need to reference in this file. */
+#ifndef N_CATCH
+#define N_CATCH 0x54
+#endif
+#endif
+
+/* Line number of beginning of current function, minus one.
+ Negative means not in a function or not using xcoff. */
+
+int xcoff_begin_function_line = -1;
+
+/* Name of the current include file. */
+
+char *xcoff_current_include_file;
+
+/* Name of the current function file. This is the file the `.bf' is
+ emitted from. In case a line is emitted from a different file,
+ (by including that file of course), then the line number will be
+ absolute. */
+
+char *xcoff_current_function_file;
+
+/* Names of bss and data sections. These should be unique names for each
+ compilation unit. */
+
+char *xcoff_bss_section_name;
+char *xcoff_private_data_section_name;
+char *xcoff_read_only_section_name;
+
+/* Last source file name mentioned in a NOTE insn. */
+
+char *xcoff_lastfile;
+
+/* Macro definitions used below. */
+
+#define ABS_OR_RELATIVE_LINENO(LINENO) \
+ (xcoff_current_include_file ? (LINENO) : (LINENO) - xcoff_begin_function_line)
+
+/* Output source line numbers via ".line" rather than ".stabd". */
+#define ASM_OUTPUT_SOURCE_LINE(FILE,LINENUM) \
+ do { \
+ if (xcoff_begin_function_line >= 0) \
+ fprintf (FILE, "\t.line\t%d\n", ABS_OR_RELATIVE_LINENO (LINENUM)); \
+ } while (0)
+
+#define ASM_OUTPUT_LFB(FILE,LINENUM) \
+{ \
+ if (xcoff_begin_function_line == -1) \
+ { \
+ xcoff_begin_function_line = (LINENUM) - 1;\
+ fprintf (FILE, "\t.bf\t%d\n", (LINENUM)); \
+ } \
+ xcoff_current_function_file \
+ = (xcoff_current_include_file \
+ ? xcoff_current_include_file : main_input_filename); \
+}
+
+#define ASM_OUTPUT_LFE(FILE,LINENUM) \
+ do { \
+ fprintf (FILE, "\t.ef\t%d\n", (LINENUM)); \
+ xcoff_begin_function_line = -1; \
+ } while (0)
+
+#define ASM_OUTPUT_LBB(FILE,LINENUM,BLOCKNUM) \
+ fprintf (FILE, "\t.bb\t%d\n", ABS_OR_RELATIVE_LINENO (LINENUM))
+
+#define ASM_OUTPUT_LBE(FILE,LINENUM,BLOCKNUM) \
+ fprintf (FILE, "\t.eb\t%d\n", ABS_OR_RELATIVE_LINENO (LINENUM))
+
+/* Support routines for XCOFF debugging info. */
+
+/* Assign NUMBER as the stabx type number for the type described by NAME.
+ Search all decls in the list SYMS to find the type NAME. */
+
+static void
+assign_type_number (syms, name, number)
+ tree syms;
+ char *name;
+ int number;
+{
+ tree decl;
+
+ for (decl = syms; decl; decl = TREE_CHAIN (decl))
+ if (DECL_NAME (decl)
+ && strcmp (IDENTIFIER_POINTER (DECL_NAME (decl)), name) == 0)
+ {
+ TREE_ASM_WRITTEN (decl) = 1;
+ TYPE_SYMTAB_ADDRESS (TREE_TYPE (decl)) = number;
+ }
+}
+
+/* Setup gcc primitive types to use the XCOFF built-in type numbers where
+ possible. */
+
+void
+xcoff_output_standard_types (syms)
+ tree syms;
+{
+ /* Handle built-in C types here. */
+
+ assign_type_number (syms, "int", -1);
+ assign_type_number (syms, "char", -2);
+ assign_type_number (syms, "short int", -3);
+ assign_type_number (syms, "long int", -4);
+ assign_type_number (syms, "unsigned char", -5);
+ assign_type_number (syms, "signed char", -6);
+ assign_type_number (syms, "short unsigned int", -7);
+ assign_type_number (syms, "unsigned int", -8);
+ /* No such type "unsigned". */
+ assign_type_number (syms, "long unsigned int", -10);
+ assign_type_number (syms, "void", -11);
+ assign_type_number (syms, "float", -12);
+ assign_type_number (syms, "double", -13);
+ assign_type_number (syms, "long double", -14);
+ /* Pascal and Fortran types run from -15 to -29. */
+ /* No such type "wchar". */
+
+ /* "long long int", and "long long unsigned int", are not handled here,
+ because there are no predefined types that match them. */
+
+ /* ??? Should also handle built-in C++ and Obj-C types. There perhaps
+ aren't any that C doesn't already have. */
+}
+
+/* Print an error message for unrecognized stab codes. */
+
+#define UNKNOWN_STAB(STR) \
+ do { \
+ fprintf(stderr, "Error, unknown stab %s: : 0x%x\n", STR, stab); \
+ fflush (stderr); \
+ } while (0)
+
+/* Conversion routine from BSD stabs to AIX storage classes. */
+
+int
+stab_to_sclass (stab)
+ int stab;
+{
+ switch (stab)
+ {
+ case N_GSYM:
+ return C_GSYM;
+
+ case N_FNAME:
+ UNKNOWN_STAB ("N_FNAME");
+ abort();
+
+ case N_FUN:
+ return C_FUN;
+
+ case N_STSYM:
+ case N_LCSYM:
+ return C_STSYM;
+
+#ifdef N_MAIN
+ case N_MAIN:
+ UNKNOWN_STAB ("N_MAIN");
+ abort ();
+#endif
+
+ case N_RSYM:
+ return C_RSYM;
+
+ case N_SSYM:
+ UNKNOWN_STAB ("N_SSYM");
+ abort ();
+
+ case N_RPSYM:
+ return C_RPSYM;
+
+ case N_PSYM:
+ return C_PSYM;
+ case N_LSYM:
+ return C_LSYM;
+ case N_DECL:
+ return C_DECL;
+ case N_ENTRY:
+ return C_ENTRY;
+
+ case N_SO:
+ UNKNOWN_STAB ("N_SO");
+ abort ();
+
+ case N_SOL:
+ UNKNOWN_STAB ("N_SOL");
+ abort ();
+
+ case N_SLINE:
+ UNKNOWN_STAB ("N_SLINE");
+ abort ();
+
+#ifdef N_DSLINE
+ case N_DSLINE:
+ UNKNOWN_STAB ("N_DSLINE");
+ abort ();
+#endif
+
+#ifdef N_BSLINE
+ case N_BSLINE:
+ UNKNOWN_STAB ("N_BSLINE");
+ abort ();
+#endif
+#if 0
+ /* This has the same value as N_BSLINE. */
+ case N_BROWS:
+ UNKNOWN_STAB ("N_BROWS");
+ abort ();
+#endif
+
+#ifdef N_BINCL
+ case N_BINCL:
+ UNKNOWN_STAB ("N_BINCL");
+ abort ();
+#endif
+
+#ifdef N_EINCL
+ case N_EINCL:
+ UNKNOWN_STAB ("N_EINCL");
+ abort ();
+#endif
+
+#ifdef N_EXCL
+ case N_EXCL:
+ UNKNOWN_STAB ("N_EXCL");
+ abort ();
+#endif
+
+ case N_LBRAC:
+ UNKNOWN_STAB ("N_LBRAC");
+ abort ();
+
+ case N_RBRAC:
+ UNKNOWN_STAB ("N_RBRAC");
+ abort ();
+
+ case N_BCOMM:
+ return C_BCOMM;
+ case N_ECOMM:
+ return C_ECOMM;
+ case N_ECOML:
+ return C_ECOML;
+
+ case N_LENG:
+ UNKNOWN_STAB ("N_LENG");
+ abort ();
+
+ case N_PC:
+ UNKNOWN_STAB ("N_PC");
+ abort ();
+
+#ifdef N_M2C
+ case N_M2C:
+ UNKNOWN_STAB ("N_M2C");
+ abort ();
+#endif
+
+#ifdef N_SCOPE
+ case N_SCOPE:
+ UNKNOWN_STAB ("N_SCOPE");
+ abort ();
+#endif
+
+ case N_CATCH:
+ UNKNOWN_STAB ("N_CATCH");
+ abort ();
+
+ default:
+ UNKNOWN_STAB ("default");
+ abort ();
+ }
+}
+
+/* In XCOFF, we have to have this .bf before the function prologue.
+ Rely on the value of `dbx_begin_function_line' not to duplicate .bf. */
+
+void
+xcoffout_output_first_source_line (file, last_linenum)
+ FILE *file;
+ int last_linenum;
+{
+ ASM_OUTPUT_LFB (file, last_linenum);
+ dbxout_parms (DECL_ARGUMENTS (current_function_decl));
+ ASM_OUTPUT_SOURCE_LINE (file, last_linenum);
+}
+
+/* Output debugging info to FILE to switch to sourcefile FILENAME.
+ INLINE_P is true if this is from an inlined function. */
+
+void
+xcoffout_source_file (file, filename, inline_p)
+ FILE *file;
+ char *filename;
+ int inline_p;
+{
+ if (filename
+ && (xcoff_lastfile == 0 || strcmp (filename, xcoff_lastfile)
+ || (inline_p && ! xcoff_current_include_file)
+ || (! inline_p && xcoff_current_include_file)))
+ {
+ if (xcoff_current_include_file)
+ {
+ fprintf (file, "\t.ei\t");
+ output_quoted_string (file, xcoff_current_include_file);
+ fprintf (file, "\n");
+ xcoff_current_include_file = NULL;
+ }
+ if (strcmp (main_input_filename, filename) || inline_p)
+ {
+ fprintf (file, "\t.bi\t");
+ output_quoted_string (file, filename);
+ fprintf (file, "\n");
+ xcoff_current_include_file = filename;
+ }
+
+ xcoff_lastfile = filename;
+ }
+}
+
+/* Output a line number symbol entry into output stream FILE,
+ for source file FILENAME and line number NOTE. */
+
+void
+xcoffout_source_line (file, filename, note)
+ FILE *file;
+ char *filename;
+ rtx note;
+{
+ xcoffout_source_file (file, filename, RTX_INTEGRATED_P (note));
+
+ ASM_OUTPUT_SOURCE_LINE (file, NOTE_LINE_NUMBER (note));
+}
+
+/* Output the symbols defined in block number DO_BLOCK.
+ Set NEXT_BLOCK_NUMBER to 0 before calling.
+
+ This function works by walking the tree structure of blocks,
+ counting blocks until it finds the desired block. */
+
+static int do_block = 0;
+
+static int next_block_number;
+
+static void
+xcoffout_block (block, depth, args)
+ register tree block;
+ int depth;
+ tree args;
+{
+ while (block)
+ {
+ /* Ignore blocks never expanded or otherwise marked as real. */
+ if (TREE_USED (block))
+ {
+ /* When we reach the specified block, output its symbols. */
+ if (next_block_number == do_block)
+ {
+ /* Output the syms of the block. */
+ if (debug_info_level != DINFO_LEVEL_TERSE || depth == 0)
+ dbxout_syms (BLOCK_VARS (block));
+ if (args)
+ dbxout_reg_parms (args);
+
+ /* We are now done with the block. Don't go to inner blocks. */
+ return;
+ }
+ /* If we are past the specified block, stop the scan. */
+ else if (next_block_number >= do_block)
+ return;
+
+ next_block_number++;
+
+ /* Output the subblocks. */
+ xcoffout_block (BLOCK_SUBBLOCKS (block), depth + 1, NULL_TREE);
+ }
+ block = BLOCK_CHAIN (block);
+ }
+}
+
+/* Describe the beginning of an internal block within a function.
+ Also output descriptions of variables defined in this block.
+
+ N is the number of the block, by order of beginning, counting from 1,
+ and not counting the outermost (function top-level) block.
+ The blocks match the BLOCKs in DECL_INITIAL (current_function_decl),
+ if the count starts at 0 for the outermost one. */
+
+void
+xcoffout_begin_block (file, line, n)
+ FILE *file;
+ int line;
+ int n;
+{
+ tree decl = current_function_decl;
+
+
+ /* The IBM AIX compiler does not emit a .bb for the function level scope,
+ so we avoid it here also. */
+ if (n != 1)
+ ASM_OUTPUT_LBB (file, line, n);
+
+ do_block = n;
+ next_block_number = 0;
+ xcoffout_block (DECL_INITIAL (decl), 0, DECL_ARGUMENTS (decl));
+}
+
+/* Describe the end line-number of an internal block within a function. */
+
+void
+xcoffout_end_block (file, line, n)
+ FILE *file;
+ int line;
+ int n;
+{
+ if (n != 1)
+ ASM_OUTPUT_LBE (file, line, n);
+}
+
+/* Called at beginning of function (before prologue).
+ Declare function as needed for debugging. */
+
+void
+xcoffout_declare_function (file, decl, name)
+ FILE *file;
+ tree decl;
+ char *name;
+{
+ char *n = name;
+ int i;
+
+ for (i = 0; name[i]; ++i)
+ {
+ if (name[i] == '[')
+ {
+ n = (char *) alloca (i + 1);
+ strncpy (n, name, i);
+ n[i] = '\0';
+ break;
+ }
+ }
+
+ /* Any pending .bi or .ei must occur before the .function psuedo op.
+ Otherwise debuggers will think that the function is in the previous
+ file and/or at the wrong line number. */
+ xcoffout_source_file (file, DECL_SOURCE_FILE (decl), 0);
+ dbxout_symbol (decl, 0);
+ fprintf (file, "\t.function .%s,.%s,16,044,FE..%s-.%s\n", n, n, n, n);
+}
+
+/* Called at beginning of function body (after prologue).
+ Record the function's starting line number, so we can output
+ relative line numbers for the other lines.
+ Record the file name that this function is contained in. */
+
+void
+xcoffout_begin_function (file, last_linenum)
+ FILE *file;
+ int last_linenum;
+{
+ ASM_OUTPUT_LFB (file, last_linenum);
+}
+
+/* Called at end of function (before epilogue).
+ Describe end of outermost block. */
+
+void
+xcoffout_end_function (file, last_linenum)
+ FILE *file;
+ int last_linenum;
+{
+ ASM_OUTPUT_LFE (file, last_linenum);
+}
+
+/* Output xcoff info for the absolute end of a function.
+ Called after the epilogue is output. */
+
+void
+xcoffout_end_epilogue (file)
+ FILE *file;
+{
+ /* We need to pass the correct function size to .function, otherwise,
+ the xas assembler can't figure out the correct size for the function
+ aux entry. So, we emit a label after the last instruction which can
+ be used by the .function pseudo op to calculate the function size. */
+
+ char *fname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
+ if (*fname == '*')
+ ++fname;
+ fprintf (file, "FE..");
+ ASM_OUTPUT_LABEL (file, fname);
+}
+#endif /* XCOFF_DEBUGGING_INFO */
diff --git a/gnu/usr.bin/cc/cpp/Makefile b/gnu/usr.bin/cc/cpp/Makefile
new file mode 100644
index 0000000..8467da4
--- /dev/null
+++ b/gnu/usr.bin/cc/cpp/Makefile
@@ -0,0 +1,12 @@
+#
+# $FreeBSD$
+#
+
+PROG = cpp
+SRCS = cccp.c cexp.c
+BINDIR= /usr/libexec
+LDDESTDIR+= -L${.CURDIR}/../cc_int/obj
+LDDESTDIR+= -L${.CURDIR}/../cc_int
+LDADD+= -lcc_int
+
+.include <bsd.prog.mk>
diff --git a/gnu/usr.bin/cc/cpp/cccp.c b/gnu/usr.bin/cc/cpp/cccp.c
new file mode 100644
index 0000000..640b36b
--- /dev/null
+++ b/gnu/usr.bin/cc/cpp/cccp.c
@@ -0,0 +1,9804 @@
+/* C Compatible Compiler Preprocessor (CCCP)
+ Copyright (C) 1986, 87, 89, 92, 93, 1994 Free Software Foundation, Inc.
+ Written by Paul Rubin, June 1986
+ Adapted to ANSI C, Richard Stallman, Jan 1987
+
+This program is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 2, or (at your option) any
+later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+
+ In other words, you are welcome to use, share and improve this program.
+ You are forbidden to forbid anyone else to use, share and improve
+ what you give them. Help stamp out software-hoarding! */
+
+typedef unsigned char U_CHAR;
+
+#ifdef EMACS
+#define NO_SHORTNAMES
+#include "../src/config.h"
+#ifdef open
+#undef open
+#undef read
+#undef write
+#endif /* open */
+#endif /* EMACS */
+
+/* The macro EMACS is defined when cpp is distributed as part of Emacs,
+ for the sake of machines with limited C compilers. */
+#ifndef EMACS
+#include "config.h"
+#endif /* not EMACS */
+
+#ifndef STANDARD_INCLUDE_DIR
+#define STANDARD_INCLUDE_DIR "/usr/include"
+#endif
+
+#ifndef LOCAL_INCLUDE_DIR
+#define LOCAL_INCLUDE_DIR "/usr/local/include"
+#endif
+
+#if 0 /* We can't get ptrdiff_t, so I arranged not to need PTR_INT_TYPE. */
+#ifdef __STDC__
+#define PTR_INT_TYPE ptrdiff_t
+#else
+#define PTR_INT_TYPE long
+#endif
+#endif /* 0 */
+
+#include "pcp.h"
+
+#ifndef STDC_VALUE
+#define STDC_VALUE 1
+#endif
+
+/* By default, colon separates directories in a path. */
+#ifndef PATH_SEPARATOR
+#define PATH_SEPARATOR ':'
+#endif
+
+/* In case config.h defines these. */
+#undef bcopy
+#undef bzero
+#undef bcmp
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <ctype.h>
+#include <stdio.h>
+#include <signal.h>
+
+#ifndef VMS
+#ifndef USG
+#include <sys/time.h> /* for __DATE__ and __TIME__ */
+#include <sys/resource.h>
+#else
+#include <time.h>
+#include <fcntl.h>
+#endif /* USG */
+#endif /* not VMS */
+
+/* This defines "errno" properly for VMS, and gives us EACCES. */
+#include <errno.h>
+
+/* VMS-specific definitions */
+#ifdef VMS
+#include <time.h>
+#include <perror.h> /* This defines sys_errlist/sys_nerr properly */
+#include <descrip.h>
+#define O_RDONLY 0 /* Open arg for Read/Only */
+#define O_WRONLY 1 /* Open arg for Write/Only */
+#define read(fd,buf,size) VMS_read (fd,buf,size)
+#define write(fd,buf,size) VMS_write (fd,buf,size)
+#define open(fname,mode,prot) VMS_open (fname,mode,prot)
+#define fopen(fname,mode) VMS_fopen (fname,mode)
+#define freopen(fname,mode,ofile) VMS_freopen (fname,mode,ofile)
+#define strncat(dst,src,cnt) VMS_strncat (dst,src,cnt)
+static char * VMS_strncat ();
+static int VMS_read ();
+static int VMS_write ();
+static int VMS_open ();
+static FILE * VMS_fopen ();
+static FILE * VMS_freopen ();
+static void hack_vms_include_specification ();
+typedef struct { unsigned :16, :16, :16; } vms_ino_t;
+#define ino_t vms_ino_t
+#define INCLUDE_LEN_FUDGE 10 /* leave room for VMS syntax conversion */
+#ifdef __GNUC__
+#define BSTRING /* VMS/GCC supplies the bstring routines */
+#endif /* __GNUC__ */
+#endif /* VMS */
+
+extern char *index ();
+extern char *rindex ();
+
+#ifndef O_RDONLY
+#define O_RDONLY 0
+#endif
+
+#undef MIN
+#undef MAX
+#define MIN(X,Y) ((X) < (Y) ? (X) : (Y))
+#define MAX(X,Y) ((X) > (Y) ? (X) : (Y))
+
+/* Find the largest host integer type and set its size and type. */
+
+#ifndef HOST_BITS_PER_WIDE_INT
+
+#if HOST_BITS_PER_LONG > HOST_BITS_PER_INT
+#define HOST_BITS_PER_WIDE_INT HOST_BITS_PER_LONG
+#define HOST_WIDE_INT long
+#else
+#define HOST_BITS_PER_WIDE_INT HOST_BITS_PER_INT
+#define HOST_WIDE_INT int
+#endif
+
+#endif
+
+#ifndef S_ISREG
+#define S_ISREG(m) (((m) & S_IFMT) == S_IFREG)
+#endif
+
+#ifndef S_ISDIR
+#define S_ISDIR(m) (((m) & S_IFMT) == S_IFDIR)
+#endif
+
+/* Define a generic NULL if one hasn't already been defined. */
+
+#ifndef NULL
+#define NULL 0
+#endif
+
+#ifndef GENERIC_PTR
+#if defined (USE_PROTOTYPES) ? USE_PROTOTYPES : defined (__STDC__)
+#define GENERIC_PTR void *
+#else
+#define GENERIC_PTR char *
+#endif
+#endif
+
+#ifndef NULL_PTR
+#define NULL_PTR ((GENERIC_PTR)0)
+#endif
+
+#ifndef INCLUDE_LEN_FUDGE
+#define INCLUDE_LEN_FUDGE 0
+#endif
+
+/* Forward declarations. */
+
+char *xmalloc ();
+void error ();
+void warning ();
+
+/* External declarations. */
+
+extern char *getenv ();
+extern FILE *fdopen ();
+extern char *version_string;
+extern struct tm *localtime ();
+extern int sys_nerr;
+#if defined(bsd4_4) || defined(__NetBSD__)
+extern const char *const sys_errlist[];
+#else
+extern char *sys_errlist[];
+#endif
+extern int parse_escape ();
+
+#ifndef errno
+extern int errno;
+#endif
+
+/* Forward declarations. */
+
+struct directive;
+struct file_buf;
+struct arglist;
+struct argdata;
+
+#if defined(USG) || defined(VMS)
+#ifndef BSTRING
+void bcopy ();
+void bzero ();
+int bcmp ();
+#endif
+#endif
+
+/* These functions are declared to return int instead of void since they
+ are going to be placed in a table and some old compilers have trouble with
+ pointers to functions returning void. */
+
+static int do_define ();
+static int do_line ();
+static int do_include ();
+static int do_undef ();
+static int do_error ();
+static int do_pragma ();
+static int do_ident ();
+static int do_if ();
+static int do_xifdef ();
+static int do_else ();
+static int do_elif ();
+static int do_endif ();
+static int do_sccs ();
+static int do_once ();
+static int do_assert ();
+static int do_unassert ();
+static int do_warning ();
+
+static void add_import ();
+static void append_include_chain ();
+static void deps_output ();
+static void make_undef ();
+static void make_definition ();
+static void make_assertion ();
+static void path_include ();
+static void initialize_builtins ();
+static void initialize_char_syntax ();
+static void dump_arg_n ();
+static void dump_defn_1 ();
+static void delete_macro ();
+static void trigraph_pcp ();
+static void rescan ();
+static void finclude ();
+static void validate_else ();
+static int comp_def_part ();
+static void error_from_errno ();
+static void error_with_line ();
+void pedwarn ();
+void pedwarn_with_line ();
+static void pedwarn_with_file_and_line ();
+static void fatal ();
+void fancy_abort ();
+static void pfatal_with_name ();
+static void perror_with_name ();
+static void pipe_closed ();
+static void print_containing_files ();
+static int lookup_import ();
+static int redundant_include_p ();
+static is_system_include ();
+static struct file_name_map *read_name_map ();
+static char *read_filename_string ();
+static int open_include_file ();
+static int check_preconditions ();
+static void pcfinclude ();
+static void pcstring_used ();
+static void write_output ();
+static int check_macro_name ();
+static int compare_defs ();
+static int compare_token_lists ();
+static int eval_if_expression ();
+static int discard_comments ();
+static int change_newlines ();
+static int line_for_error ();
+static int hashf ();
+static int file_size_and_mode ();
+
+static struct arglist *read_token_list ();
+static void free_token_list ();
+
+static struct hashnode *install ();
+struct hashnode *lookup ();
+
+static struct assertion_hashnode *assertion_install ();
+static struct assertion_hashnode *assertion_lookup ();
+
+static char *xrealloc ();
+static char *xcalloc ();
+static char *savestring ();
+
+static void delete_assertion ();
+static void macroexpand ();
+static void dump_all_macros ();
+static void conditional_skip ();
+static void skip_if_group ();
+static void output_line_command ();
+
+/* Last arg to output_line_command. */
+enum file_change_code {same_file, enter_file, leave_file};
+
+static int grow_outbuf ();
+static int handle_directive ();
+static void memory_full ();
+
+static U_CHAR *macarg1 ();
+static char *macarg ();
+
+static U_CHAR *skip_to_end_of_comment ();
+static U_CHAR *skip_quoted_string ();
+static U_CHAR *skip_paren_group ();
+static char *quote_string ();
+
+static char *check_precompiled ();
+/* static struct macrodef create_definition (); [moved below] */
+static void dump_single_macro ();
+static void output_dots ();
+
+#ifndef FAILURE_EXIT_CODE
+#define FAILURE_EXIT_CODE 33 /* gnu cc command understands this */
+#endif
+
+#ifndef SUCCESS_EXIT_CODE
+#define SUCCESS_EXIT_CODE 0 /* 0 means success on Unix. */
+#endif
+
+/* Name under which this program was invoked. */
+
+static char *progname;
+
+/* Nonzero means use extra default include directories for C++. */
+
+static int cplusplus;
+
+/* Nonzero means handle cplusplus style comments */
+
+static int cplusplus_comments;
+
+/* Nonzero means handle #import, for objective C. */
+
+static int objc;
+
+/* Nonzero means this is an assembly file, and allow
+ unknown directives, which could be comments. */
+
+static int lang_asm;
+
+/* Current maximum length of directory names in the search path
+ for include files. (Altered as we get more of them.) */
+
+static int max_include_len;
+
+/* Nonzero means turn NOTREACHED into #pragma NOTREACHED etc */
+
+static int for_lint = 0;
+
+/* Nonzero means copy comments into the output file. */
+
+static int put_out_comments = 0;
+
+/* Nonzero means don't process the ANSI trigraph sequences. */
+
+static int no_trigraphs = 0;
+
+/* Nonzero means print the names of included files rather than
+ the preprocessed output. 1 means just the #include "...",
+ 2 means #include <...> as well. */
+
+static int print_deps = 0;
+
+/* Nonzero if missing .h files in -M output are assumed to be generated
+ files and not errors. */
+
+static int print_deps_missing_files = 0;
+
+/* Nonzero means print names of header files (-H). */
+
+static int print_include_names = 0;
+
+/* Nonzero means don't output line number information. */
+
+static int no_line_commands;
+
+/* dump_only means inhibit output of the preprocessed text
+ and instead output the definitions of all user-defined
+ macros in a form suitable for use as input to cccp.
+ dump_names means pass #define and the macro name through to output.
+ dump_definitions means pass the whole definition (plus #define) through
+*/
+
+static enum {dump_none, dump_only, dump_names, dump_definitions}
+ dump_macros = dump_none;
+
+/* Nonzero means pass all #define and #undef directives which we actually
+ process through to the output stream. This feature is used primarily
+ to allow cc1 to record the #defines and #undefs for the sake of
+ debuggers which understand about preprocessor macros, but it may
+ also be useful with -E to figure out how symbols are defined, and
+ where they are defined. */
+static int debug_output = 0;
+
+/* Nonzero indicates special processing used by the pcp program. The
+ special effects of this mode are:
+
+ Inhibit all macro expansion, except those inside #if directives.
+
+ Process #define directives normally, and output their contents
+ to the output file.
+
+ Output preconditions to pcp_outfile indicating all the relevant
+ preconditions for use of this file in a later cpp run.
+*/
+static FILE *pcp_outfile;
+
+/* Nonzero means we are inside an IF during a -pcp run. In this mode
+ macro expansion is done, and preconditions are output for all macro
+ uses requiring them. */
+static int pcp_inside_if;
+
+/* Nonzero means never to include precompiled files.
+ This is 1 since there's no way now to make precompiled files,
+ so it's not worth testing for them. */
+static int no_precomp = 1;
+
+/* Nonzero means give all the error messages the ANSI standard requires. */
+
+int pedantic;
+
+/* Nonzero means try to make failure to fit ANSI C an error. */
+
+static int pedantic_errors;
+
+/* Nonzero means don't print warning messages. -w. */
+
+static int inhibit_warnings = 0;
+
+/* Nonzero means warn if slash-star appears in a comment. */
+
+static int warn_comments;
+
+/* Nonzero means warn if a macro argument is (or would be)
+ stringified with -traditional. */
+
+static int warn_stringify;
+
+/* Nonzero means warn if there are any trigraphs. */
+
+static int warn_trigraphs;
+
+/* Nonzero means warn if #import is used. */
+
+static int warn_import = 1;
+
+/* Nonzero means turn warnings into errors. */
+
+static int warnings_are_errors;
+
+/* Nonzero means try to imitate old fashioned non-ANSI preprocessor. */
+
+int traditional;
+
+/* Nonzero causes output not to be done,
+ but directives such as #define that have side effects
+ are still obeyed. */
+
+static int no_output;
+
+/* Nonzero means this file was included with a -imacros or -include
+ command line and should not be recorded as an include file. */
+
+static int no_record_file;
+
+/* Nonzero means that we have finished processing the command line options.
+ This flag is used to decide whether or not to issue certain errors
+ and/or warnings. */
+
+static int done_initializing = 0;
+
+/* Line where a newline was first seen in a string constant. */
+
+static int multiline_string_line = 0;
+
+/* I/O buffer structure.
+ The `fname' field is nonzero for source files and #include files
+ and for the dummy text used for -D and -U.
+ It is zero for rescanning results of macro expansion
+ and for expanding macro arguments. */
+#define INPUT_STACK_MAX 400
+static struct file_buf {
+ char *fname;
+ /* Filename specified with #line command. */
+ char *nominal_fname;
+ /* Record where in the search path this file was found.
+ For #include_next. */
+ struct file_name_list *dir;
+ int lineno;
+ int length;
+ U_CHAR *buf;
+ U_CHAR *bufp;
+ /* Macro that this level is the expansion of.
+ Included so that we can reenable the macro
+ at the end of this level. */
+ struct hashnode *macro;
+ /* Value of if_stack at start of this file.
+ Used to prohibit unmatched #endif (etc) in an include file. */
+ struct if_stack *if_stack;
+ /* Object to be freed at end of input at this level. */
+ U_CHAR *free_ptr;
+ /* True if this is a header file included using <FILENAME>. */
+ char system_header_p;
+} instack[INPUT_STACK_MAX];
+
+static int last_error_tick; /* Incremented each time we print it. */
+static int input_file_stack_tick; /* Incremented when the status changes. */
+
+/* Current nesting level of input sources.
+ `instack[indepth]' is the level currently being read. */
+static int indepth = -1;
+#define CHECK_DEPTH(code) \
+ if (indepth >= (INPUT_STACK_MAX - 1)) \
+ { \
+ error_with_line (line_for_error (instack[indepth].lineno), \
+ "macro or `#include' recursion too deep"); \
+ code; \
+ }
+
+/* Current depth in #include directives that use <...>. */
+static int system_include_depth = 0;
+
+typedef struct file_buf FILE_BUF;
+
+/* The output buffer. Its LENGTH field is the amount of room allocated
+ for the buffer, not the number of chars actually present. To get
+ that, subtract outbuf.buf from outbuf.bufp. */
+
+#define OUTBUF_SIZE 10 /* initial size of output buffer */
+static FILE_BUF outbuf;
+
+/* Grow output buffer OBUF points at
+ so it can hold at least NEEDED more chars. */
+
+#define check_expand(OBUF, NEEDED) \
+ (((OBUF)->length - ((OBUF)->bufp - (OBUF)->buf) <= (NEEDED)) \
+ ? grow_outbuf ((OBUF), (NEEDED)) : 0)
+
+struct file_name_list
+ {
+ struct file_name_list *next;
+ char *fname;
+ /* If the following is nonzero, it is a macro name.
+ Don't include the file again if that macro is defined. */
+ U_CHAR *control_macro;
+ /* If the following is nonzero, it is a C-language system include
+ directory. */
+ int c_system_include_path;
+ /* Mapping of file names for this directory. */
+ struct file_name_map *name_map;
+ /* Non-zero if name_map is valid. */
+ int got_name_map;
+ };
+
+/* #include "file" looks in source file dir, then stack. */
+/* #include <file> just looks in the stack. */
+/* -I directories are added to the end, then the defaults are added. */
+/* The */
+static struct default_include {
+ char *fname; /* The name of the directory. */
+ int cplusplus; /* Only look here if we're compiling C++. */
+ int cxx_aware; /* Includes in this directory don't need to
+ be wrapped in extern "C" when compiling
+ C++. */
+} include_defaults_array[]
+#ifdef INCLUDE_DEFAULTS
+ = INCLUDE_DEFAULTS;
+#else
+ = {
+ /* Pick up GNU C++ specific include files. */
+ { GPLUSPLUS_INCLUDE_DIR, 1, 1 },
+#ifdef CROSS_COMPILE
+ /* This is the dir for fixincludes. Put it just before
+ the files that we fix. */
+ { GCC_INCLUDE_DIR, 0, 0 },
+ /* For cross-compilation, this dir name is generated
+ automatically in Makefile.in. */
+ { CROSS_INCLUDE_DIR, 0, 0 },
+ /* This is another place that the target system's headers might be. */
+ { TOOL_INCLUDE_DIR, 0, 1 },
+#else /* not CROSS_COMPILE */
+ /* This should be /usr/local/include and should come before
+ the fixincludes-fixed header files. */
+ { LOCAL_INCLUDE_DIR, 0, 1 },
+ /* This is here ahead of GCC_INCLUDE_DIR because assert.h goes here.
+ Likewise, behind LOCAL_INCLUDE_DIR, where glibc puts its assert.h. */
+ { TOOL_INCLUDE_DIR, 0, 1 },
+ /* This is the dir for fixincludes. Put it just before
+ the files that we fix. */
+ { GCC_INCLUDE_DIR, 0, 0 },
+ /* Some systems have an extra dir of include files. */
+#ifdef SYSTEM_INCLUDE_DIR
+ { SYSTEM_INCLUDE_DIR, 0, 0 },
+#endif
+ { STANDARD_INCLUDE_DIR, 0, 0 },
+#endif /* not CROSS_COMPILE */
+ { 0, 0, 0 }
+ };
+#endif /* no INCLUDE_DEFAULTS */
+
+/* The code looks at the defaults through this pointer, rather than through
+ the constant structure above. This pointer gets changed if an environment
+ variable specifies other defaults. */
+static struct default_include *include_defaults = include_defaults_array;
+
+static struct file_name_list *include = 0; /* First dir to search */
+ /* First dir to search for <file> */
+/* This is the first element to use for #include <...>.
+ If it is 0, use the entire chain for such includes. */
+static struct file_name_list *first_bracket_include = 0;
+/* This is the first element in the chain that corresponds to
+ a directory of system header files. */
+static struct file_name_list *first_system_include = 0;
+static struct file_name_list *last_include = 0; /* Last in chain */
+
+/* Chain of include directories to put at the end of the other chain. */
+static struct file_name_list *after_include = 0;
+static struct file_name_list *last_after_include = 0; /* Last in chain */
+
+/* Chain to put at the start of the system include files. */
+static struct file_name_list *before_system = 0;
+static struct file_name_list *last_before_system = 0; /* Last in chain */
+
+/* List of included files that contained #pragma once. */
+static struct file_name_list *dont_repeat_files = 0;
+
+/* List of other included files.
+ If ->control_macro if nonzero, the file had a #ifndef
+ around the entire contents, and ->control_macro gives the macro name. */
+static struct file_name_list *all_include_files = 0;
+
+/* Directory prefix that should replace `/usr' in the standard
+ include file directories. */
+static char *include_prefix;
+
+/* Global list of strings read in from precompiled files. This list
+ is kept in the order the strings are read in, with new strings being
+ added at the end through stringlist_tailp. We use this list to output
+ the strings at the end of the run.
+*/
+static STRINGDEF *stringlist;
+static STRINGDEF **stringlist_tailp = &stringlist;
+
+
+/* Structure returned by create_definition */
+typedef struct macrodef MACRODEF;
+struct macrodef
+{
+ struct definition *defn;
+ U_CHAR *symnam;
+ int symlen;
+};
+
+static struct macrodef create_definition ();
+
+
+/* Structure allocated for every #define. For a simple replacement
+ such as
+ #define foo bar ,
+ nargs = -1, the `pattern' list is null, and the expansion is just
+ the replacement text. Nargs = 0 means a functionlike macro with no args,
+ e.g.,
+ #define getchar() getc (stdin) .
+ When there are args, the expansion is the replacement text with the
+ args squashed out, and the reflist is a list describing how to
+ build the output from the input: e.g., "3 chars, then the 1st arg,
+ then 9 chars, then the 3rd arg, then 0 chars, then the 2nd arg".
+ The chars here come from the expansion. Whatever is left of the
+ expansion after the last arg-occurrence is copied after that arg.
+ Note that the reflist can be arbitrarily long---
+ its length depends on the number of times the arguments appear in
+ the replacement text, not how many args there are. Example:
+ #define f(x) x+x+x+x+x+x+x would have replacement text "++++++" and
+ pattern list
+ { (0, 1), (1, 1), (1, 1), ..., (1, 1), NULL }
+ where (x, y) means (nchars, argno). */
+
+typedef struct definition DEFINITION;
+struct definition {
+ int nargs;
+ int length; /* length of expansion string */
+ int predefined; /* True if the macro was builtin or */
+ /* came from the command line */
+ U_CHAR *expansion;
+ int line; /* Line number of definition */
+ char *file; /* File of definition */
+ char rest_args; /* Nonzero if last arg. absorbs the rest */
+ struct reflist {
+ struct reflist *next;
+ char stringify; /* nonzero if this arg was preceded by a
+ # operator. */
+ char raw_before; /* Nonzero if a ## operator before arg. */
+ char raw_after; /* Nonzero if a ## operator after arg. */
+ char rest_args; /* Nonzero if this arg. absorbs the rest */
+ int nchars; /* Number of literal chars to copy before
+ this arg occurrence. */
+ int argno; /* Number of arg to substitute (origin-0) */
+ } *pattern;
+ union {
+ /* Names of macro args, concatenated in reverse order
+ with comma-space between them.
+ The only use of this is that we warn on redefinition
+ if this differs between the old and new definitions. */
+ U_CHAR *argnames;
+ } args;
+};
+
+/* different kinds of things that can appear in the value field
+ of a hash node. Actually, this may be useless now. */
+union hashval {
+ int ival;
+ char *cpval;
+ DEFINITION *defn;
+ KEYDEF *keydef;
+};
+
+/*
+ * special extension string that can be added to the last macro argument to
+ * allow it to absorb the "rest" of the arguments when expanded. Ex:
+ * #define wow(a, b...) process (b, a, b)
+ * { wow (1, 2, 3); } -> { process (2, 3, 1, 2, 3); }
+ * { wow (one, two); } -> { process (two, one, two); }
+ * if this "rest_arg" is used with the concat token '##' and if it is not
+ * supplied then the token attached to with ## will not be outputted. Ex:
+ * #define wow (a, b...) process (b ## , a, ## b)
+ * { wow (1, 2); } -> { process (2, 1, 2); }
+ * { wow (one); } -> { process (one); {
+ */
+static char rest_extension[] = "...";
+#define REST_EXTENSION_LENGTH (sizeof (rest_extension) - 1)
+
+/* The structure of a node in the hash table. The hash table
+ has entries for all tokens defined by #define commands (type T_MACRO),
+ plus some special tokens like __LINE__ (these each have their own
+ type, and the appropriate code is run when that type of node is seen.
+ It does not contain control words like "#define", which are recognized
+ by a separate piece of code. */
+
+/* different flavors of hash nodes --- also used in keyword table */
+enum node_type {
+ T_DEFINE = 1, /* the `#define' keyword */
+ T_INCLUDE, /* the `#include' keyword */
+ T_INCLUDE_NEXT, /* the `#include_next' keyword */
+ T_IMPORT, /* the `#import' keyword */
+ T_IFDEF, /* the `#ifdef' keyword */
+ T_IFNDEF, /* the `#ifndef' keyword */
+ T_IF, /* the `#if' keyword */
+ T_ELSE, /* `#else' */
+ T_PRAGMA, /* `#pragma' */
+ T_ELIF, /* `#elif' */
+ T_UNDEF, /* `#undef' */
+ T_LINE, /* `#line' */
+ T_ERROR, /* `#error' */
+ T_WARNING, /* `#warning' */
+ T_ENDIF, /* `#endif' */
+ T_SCCS, /* `#sccs', used on system V. */
+ T_IDENT, /* `#ident', used on system V. */
+ T_ASSERT, /* `#assert', taken from system V. */
+ T_UNASSERT, /* `#unassert', taken from system V. */
+ T_SPECLINE, /* special symbol `__LINE__' */
+ T_DATE, /* `__DATE__' */
+ T_FILE, /* `__FILE__' */
+ T_BASE_FILE, /* `__BASE_FILE__' */
+ T_INCLUDE_LEVEL, /* `__INCLUDE_LEVEL__' */
+ T_VERSION, /* `__VERSION__' */
+ T_SIZE_TYPE, /* `__SIZE_TYPE__' */
+ T_PTRDIFF_TYPE, /* `__PTRDIFF_TYPE__' */
+ T_WCHAR_TYPE, /* `__WCHAR_TYPE__' */
+ T_USER_LABEL_PREFIX_TYPE, /* `__USER_LABEL_PREFIX__' */
+ T_REGISTER_PREFIX_TYPE, /* `__REGISTER_PREFIX__' */
+ T_TIME, /* `__TIME__' */
+ T_CONST, /* Constant value, used by `__STDC__' */
+ T_MACRO, /* macro defined by `#define' */
+ T_DISABLED, /* macro temporarily turned off for rescan */
+ T_SPEC_DEFINED, /* special `defined' macro for use in #if statements */
+ T_PCSTRING, /* precompiled string (hashval is KEYDEF *) */
+ T_UNUSED /* Used for something not defined. */
+ };
+
+struct hashnode {
+ struct hashnode *next; /* double links for easy deletion */
+ struct hashnode *prev;
+ struct hashnode **bucket_hdr; /* also, a back pointer to this node's hash
+ chain is kept, in case the node is the head
+ of the chain and gets deleted. */
+ enum node_type type; /* type of special token */
+ int length; /* length of token, for quick comparison */
+ U_CHAR *name; /* the actual name */
+ union hashval value; /* pointer to expansion, or whatever */
+};
+
+typedef struct hashnode HASHNODE;
+
+/* Some definitions for the hash table. The hash function MUST be
+ computed as shown in hashf () below. That is because the rescan
+ loop computes the hash value `on the fly' for most tokens,
+ in order to avoid the overhead of a lot of procedure calls to
+ the hashf () function. Hashf () only exists for the sake of
+ politeness, for use when speed isn't so important. */
+
+#define HASHSIZE 1403
+static HASHNODE *hashtab[HASHSIZE];
+#define HASHSTEP(old, c) ((old << 2) + c)
+#define MAKE_POS(v) (v & 0x7fffffff) /* make number positive */
+
+/* Symbols to predefine. */
+
+#ifdef CPP_PREDEFINES
+static char *predefs = CPP_PREDEFINES;
+#else
+static char *predefs = "";
+#endif
+
+/* We let tm.h override the types used here, to handle trivial differences
+ such as the choice of unsigned int or long unsigned int for size_t.
+ When machines start needing nontrivial differences in the size type,
+ it would be best to do something here to figure out automatically
+ from other information what type to use. */
+
+/* The string value for __SIZE_TYPE__. */
+
+#ifndef SIZE_TYPE
+#define SIZE_TYPE "long unsigned int"
+#endif
+
+/* The string value for __PTRDIFF_TYPE__. */
+
+#ifndef PTRDIFF_TYPE
+#define PTRDIFF_TYPE "long int"
+#endif
+
+/* The string value for __WCHAR_TYPE__. */
+
+#ifndef WCHAR_TYPE
+#define WCHAR_TYPE "int"
+#endif
+char * wchar_type = WCHAR_TYPE;
+#undef WCHAR_TYPE
+
+/* The string value for __USER_LABEL_PREFIX__ */
+
+#ifndef USER_LABEL_PREFIX
+#define USER_LABEL_PREFIX ""
+#endif
+
+/* The string value for __REGISTER_PREFIX__ */
+
+#ifndef REGISTER_PREFIX
+#define REGISTER_PREFIX ""
+#endif
+
+/* In the definition of a #assert name, this structure forms
+ a list of the individual values asserted.
+ Each value is itself a list of "tokens".
+ These are strings that are compared by name. */
+
+struct tokenlist_list {
+ struct tokenlist_list *next;
+ struct arglist *tokens;
+};
+
+struct assertion_hashnode {
+ struct assertion_hashnode *next; /* double links for easy deletion */
+ struct assertion_hashnode *prev;
+ /* also, a back pointer to this node's hash
+ chain is kept, in case the node is the head
+ of the chain and gets deleted. */
+ struct assertion_hashnode **bucket_hdr;
+ int length; /* length of token, for quick comparison */
+ U_CHAR *name; /* the actual name */
+ /* List of token-sequences. */
+ struct tokenlist_list *value;
+};
+
+typedef struct assertion_hashnode ASSERTION_HASHNODE;
+
+/* Some definitions for the hash table. The hash function MUST be
+ computed as shown in hashf below. That is because the rescan
+ loop computes the hash value `on the fly' for most tokens,
+ in order to avoid the overhead of a lot of procedure calls to
+ the hashf function. hashf only exists for the sake of
+ politeness, for use when speed isn't so important. */
+
+#define ASSERTION_HASHSIZE 37
+static ASSERTION_HASHNODE *assertion_hashtab[ASSERTION_HASHSIZE];
+
+/* Nonzero means inhibit macroexpansion of what seem to be
+ assertion tests, in rescan. For #if. */
+static int assertions_flag;
+
+/* `struct directive' defines one #-directive, including how to handle it. */
+
+struct directive {
+ int length; /* Length of name */
+ int (*func)(); /* Function to handle directive */
+ char *name; /* Name of directive */
+ enum node_type type; /* Code which describes which directive. */
+ char angle_brackets; /* Nonzero => <...> is special. */
+ char traditional_comments; /* Nonzero: keep comments if -traditional. */
+ char pass_thru; /* Copy preprocessed directive to output file. */
+};
+
+/* Here is the actual list of #-directives, most-often-used first. */
+
+static struct directive directive_table[] = {
+ { 6, do_define, "define", T_DEFINE, 0, 1},
+ { 2, do_if, "if", T_IF},
+ { 5, do_xifdef, "ifdef", T_IFDEF},
+ { 6, do_xifdef, "ifndef", T_IFNDEF},
+ { 5, do_endif, "endif", T_ENDIF},
+ { 4, do_else, "else", T_ELSE},
+ { 4, do_elif, "elif", T_ELIF},
+ { 4, do_line, "line", T_LINE},
+ { 7, do_include, "include", T_INCLUDE, 1},
+ { 12, do_include, "include_next", T_INCLUDE_NEXT, 1},
+ { 6, do_include, "import", T_IMPORT, 1},
+ { 5, do_undef, "undef", T_UNDEF},
+ { 5, do_error, "error", T_ERROR},
+ { 7, do_warning, "warning", T_WARNING},
+#ifdef SCCS_DIRECTIVE
+ { 4, do_sccs, "sccs", T_SCCS},
+#endif
+ { 6, do_pragma, "pragma", T_PRAGMA, 0, 0, 1},
+ { 5, do_ident, "ident", T_IDENT},
+ { 6, do_assert, "assert", T_ASSERT},
+ { 8, do_unassert, "unassert", T_UNASSERT},
+ { -1, 0, "", T_UNUSED},
+};
+
+/* When a directive handler is called,
+ this points to the # that started the directive. */
+U_CHAR *directive_start;
+
+/* table to tell if char can be part of a C identifier. */
+U_CHAR is_idchar[256];
+/* table to tell if char can be first char of a c identifier. */
+U_CHAR is_idstart[256];
+/* table to tell if c is horizontal space. */
+U_CHAR is_hor_space[256];
+/* table to tell if c is horizontal or vertical space. */
+static U_CHAR is_space[256];
+
+#define SKIP_WHITE_SPACE(p) do { while (is_hor_space[*p]) p++; } while (0)
+#define SKIP_ALL_WHITE_SPACE(p) do { while (is_space[*p]) p++; } while (0)
+
+static int errors = 0; /* Error counter for exit code */
+
+/* Name of output file, for error messages. */
+static char *out_fname;
+
+/* Zero means dollar signs are punctuation.
+ -$ stores 0; -traditional may store 1. Default is 1 for VMS, 0 otherwise.
+ This must be 0 for correct processing of this ANSI C program:
+ #define foo(a) #a
+ #define lose(b) foo (b)
+ #define test$
+ lose (test) */
+static int dollars_in_ident;
+#ifndef DOLLARS_IN_IDENTIFIERS
+#define DOLLARS_IN_IDENTIFIERS 1
+#endif
+
+static FILE_BUF expand_to_temp_buffer ();
+
+static DEFINITION *collect_expansion ();
+
+/* Stack of conditionals currently in progress
+ (including both successful and failing conditionals). */
+
+struct if_stack {
+ struct if_stack *next; /* for chaining to the next stack frame */
+ char *fname; /* copied from input when frame is made */
+ int lineno; /* similarly */
+ int if_succeeded; /* true if a leg of this if-group
+ has been passed through rescan */
+ U_CHAR *control_macro; /* For #ifndef at start of file,
+ this is the macro name tested. */
+ enum node_type type; /* type of last directive seen in this group */
+};
+typedef struct if_stack IF_STACK_FRAME;
+static IF_STACK_FRAME *if_stack = NULL;
+
+/* Buffer of -M output. */
+static char *deps_buffer;
+
+/* Number of bytes allocated in above. */
+static int deps_allocated_size;
+
+/* Number of bytes used. */
+static int deps_size;
+
+/* Number of bytes since the last newline. */
+static int deps_column;
+
+/* Nonzero means -I- has been seen,
+ so don't look for #include "foo" the source-file directory. */
+static int ignore_srcdir;
+
+/* Read LEN bytes at PTR from descriptor DESC, for file FILENAME,
+ retrying if necessary. Return a negative value if an error occurs,
+ otherwise return the actual number of bytes read,
+ which must be LEN unless end-of-file was reached. */
+
+static int
+safe_read (desc, ptr, len)
+ int desc;
+ char *ptr;
+ int len;
+{
+ int left = len;
+ while (left > 0) {
+ int nchars = read (desc, ptr, left);
+ if (nchars < 0)
+ {
+#ifdef EINTR
+ if (errno == EINTR)
+ continue;
+#endif
+ return nchars;
+ }
+ if (nchars == 0)
+ break;
+ ptr += nchars;
+ left -= nchars;
+ }
+ return len - left;
+}
+
+/* Write LEN bytes at PTR to descriptor DESC,
+ retrying if necessary, and treating any real error as fatal. */
+
+static void
+safe_write (desc, ptr, len)
+ int desc;
+ char *ptr;
+ int len;
+{
+ while (len > 0) {
+ int written = write (desc, ptr, len);
+ if (written < 0)
+ {
+#ifdef EINTR
+ if (errno == EINTR)
+ continue;
+#endif
+ pfatal_with_name (out_fname);
+ }
+ ptr += written;
+ len -= written;
+ }
+}
+
+int
+main (argc, argv)
+ int argc;
+ char **argv;
+{
+ int st_mode;
+ long st_size;
+ char *in_fname;
+ char *p;
+ int f, i;
+ FILE_BUF *fp;
+ char **pend_files = (char **) xmalloc (argc * sizeof (char *));
+ char **pend_defs = (char **) xmalloc (argc * sizeof (char *));
+ char **pend_undefs = (char **) xmalloc (argc * sizeof (char *));
+ char **pend_assertions = (char **) xmalloc (argc * sizeof (char *));
+ char **pend_includes = (char **) xmalloc (argc * sizeof (char *));
+
+ /* Record the option used with each element of pend_assertions.
+ This is preparation for supporting more than one option for making
+ an assertion. */
+ char **pend_assertion_options = (char **) xmalloc (argc * sizeof (char *));
+ int inhibit_predefs = 0;
+ int no_standard_includes = 0;
+ int no_standard_cplusplus_includes = 0;
+ int missing_newline = 0;
+
+ /* Non-0 means don't output the preprocessed program. */
+ int inhibit_output = 0;
+ /* Non-0 means -v, so print the full set of include dirs. */
+ int verbose = 0;
+
+ /* File name which deps are being written to.
+ This is 0 if deps are being written to stdout. */
+ char *deps_file = 0;
+ /* Fopen file mode to open deps_file with. */
+ char *deps_mode = "a";
+ /* Stream on which to print the dependency information. */
+ FILE *deps_stream = 0;
+ /* Target-name to write with the dependency information. */
+ char *deps_target = 0;
+
+#ifdef RLIMIT_STACK
+ /* Get rid of any avoidable limit on stack size. */
+ {
+ struct rlimit rlim;
+
+ /* Set the stack limit huge so that alloca (particularly stringtab
+ * in dbxread.c) does not fail. */
+ getrlimit (RLIMIT_STACK, &rlim);
+ rlim.rlim_cur = rlim.rlim_max;
+ setrlimit (RLIMIT_STACK, &rlim);
+ }
+#endif /* RLIMIT_STACK defined */
+
+#ifdef SIGPIPE
+ signal (SIGPIPE, pipe_closed);
+#endif
+
+ p = argv[0] + strlen (argv[0]);
+ while (p != argv[0] && p[-1] != '/') --p;
+ progname = p;
+
+#ifdef VMS
+ {
+ /* Remove directories from PROGNAME. */
+ char *s;
+
+ progname = savestring (argv[0]);
+
+ if (!(s = rindex (progname, ']')))
+ s = rindex (progname, ':');
+ if (s)
+ strcpy (progname, s+1);
+ if (s = rindex (progname, '.'))
+ *s = '\0';
+ }
+#endif
+
+ in_fname = NULL;
+ out_fname = NULL;
+
+ /* Initialize is_idchar to allow $. */
+ dollars_in_ident = 1;
+ initialize_char_syntax ();
+ dollars_in_ident = DOLLARS_IN_IDENTIFIERS > 0;
+
+ no_line_commands = 0;
+ no_trigraphs = 1;
+ dump_macros = dump_none;
+ no_output = 0;
+ cplusplus = 0;
+ cplusplus_comments = 0;
+
+ bzero ((char *) pend_files, argc * sizeof (char *));
+ bzero ((char *) pend_defs, argc * sizeof (char *));
+ bzero ((char *) pend_undefs, argc * sizeof (char *));
+ bzero ((char *) pend_assertions, argc * sizeof (char *));
+ bzero ((char *) pend_includes, argc * sizeof (char *));
+
+ /* Process switches and find input file name. */
+
+ for (i = 1; i < argc; i++) {
+ if (argv[i][0] != '-') {
+ if (out_fname != NULL)
+ fatal ("Usage: %s [switches] input output", argv[0]);
+ else if (in_fname != NULL)
+ out_fname = argv[i];
+ else
+ in_fname = argv[i];
+ } else {
+ switch (argv[i][1]) {
+
+ case 'i':
+ if (!strcmp (argv[i], "-include")) {
+ if (i + 1 == argc)
+ fatal ("Filename missing after `-include' option");
+ else
+ pend_includes[i] = argv[i+1], i++;
+ }
+ if (!strcmp (argv[i], "-imacros")) {
+ if (i + 1 == argc)
+ fatal ("Filename missing after `-imacros' option");
+ else
+ pend_files[i] = argv[i+1], i++;
+ }
+ if (!strcmp (argv[i], "-iprefix")) {
+ if (i + 1 == argc)
+ fatal ("Filename missing after `-iprefix' option");
+ else
+ include_prefix = argv[++i];
+ }
+ if (!strcmp (argv[i], "-isystem")) {
+ struct file_name_list *dirtmp;
+
+ if (i + 1 == argc)
+ fatal ("Filename missing after `-isystem' option");
+
+ dirtmp = (struct file_name_list *)
+ xmalloc (sizeof (struct file_name_list));
+ dirtmp->next = 0;
+ dirtmp->control_macro = 0;
+ dirtmp->c_system_include_path = 1;
+ dirtmp->fname = (char *) xmalloc (strlen (argv[i+1]) + 1);
+ strcpy (dirtmp->fname, argv[++i]);
+ dirtmp->got_name_map = 0;
+
+ if (before_system == 0)
+ before_system = dirtmp;
+ else
+ last_before_system->next = dirtmp;
+ last_before_system = dirtmp; /* Tail follows the last one */
+ }
+ /* Add directory to end of path for includes,
+ with the default prefix at the front of its name. */
+ if (!strcmp (argv[i], "-iwithprefix")) {
+ struct file_name_list *dirtmp;
+ char *prefix;
+
+ if (include_prefix != 0)
+ prefix = include_prefix;
+ else {
+ prefix = savestring (GCC_INCLUDE_DIR);
+ /* Remove the `include' from /usr/local/lib/gcc.../include. */
+ if (!strcmp (prefix + strlen (prefix) - 8, "/include"))
+ prefix[strlen (prefix) - 7] = 0;
+ }
+
+ dirtmp = (struct file_name_list *)
+ xmalloc (sizeof (struct file_name_list));
+ dirtmp->next = 0; /* New one goes on the end */
+ dirtmp->control_macro = 0;
+ dirtmp->c_system_include_path = 0;
+ if (i + 1 == argc)
+ fatal ("Directory name missing after `-iwithprefix' option");
+
+ dirtmp->fname = (char *) xmalloc (strlen (argv[i+1])
+ + strlen (prefix) + 1);
+ strcpy (dirtmp->fname, prefix);
+ strcat (dirtmp->fname, argv[++i]);
+ dirtmp->got_name_map = 0;
+
+ if (after_include == 0)
+ after_include = dirtmp;
+ else
+ last_after_include->next = dirtmp;
+ last_after_include = dirtmp; /* Tail follows the last one */
+ }
+ /* Add directory to main path for includes,
+ with the default prefix at the front of its name. */
+ if (!strcmp (argv[i], "-iwithprefixbefore")) {
+ struct file_name_list *dirtmp;
+ char *prefix;
+
+ if (include_prefix != 0)
+ prefix = include_prefix;
+ else {
+ prefix = savestring (GCC_INCLUDE_DIR);
+ /* Remove the `include' from /usr/local/lib/gcc.../include. */
+ if (!strcmp (prefix + strlen (prefix) - 8, "/include"))
+ prefix[strlen (prefix) - 7] = 0;
+ }
+
+ dirtmp = (struct file_name_list *)
+ xmalloc (sizeof (struct file_name_list));
+ dirtmp->next = 0; /* New one goes on the end */
+ dirtmp->control_macro = 0;
+ dirtmp->c_system_include_path = 0;
+ if (i + 1 == argc)
+ fatal ("Directory name missing after `-iwithprefixbefore' option");
+
+ dirtmp->fname = (char *) xmalloc (strlen (argv[i+1])
+ + strlen (prefix) + 1);
+ strcpy (dirtmp->fname, prefix);
+ strcat (dirtmp->fname, argv[++i]);
+ dirtmp->got_name_map = 0;
+
+ append_include_chain (dirtmp, dirtmp);
+ }
+ /* Add directory to end of path for includes. */
+ if (!strcmp (argv[i], "-idirafter")) {
+ struct file_name_list *dirtmp;
+
+ dirtmp = (struct file_name_list *)
+ xmalloc (sizeof (struct file_name_list));
+ dirtmp->next = 0; /* New one goes on the end */
+ dirtmp->control_macro = 0;
+ dirtmp->c_system_include_path = 0;
+ if (i + 1 == argc)
+ fatal ("Directory name missing after `-idirafter' option");
+ else
+ dirtmp->fname = argv[++i];
+ dirtmp->got_name_map = 0;
+
+ if (after_include == 0)
+ after_include = dirtmp;
+ else
+ last_after_include->next = dirtmp;
+ last_after_include = dirtmp; /* Tail follows the last one */
+ }
+ break;
+
+ case 'o':
+ if (out_fname != NULL)
+ fatal ("Output filename specified twice");
+ if (i + 1 == argc)
+ fatal ("Filename missing after -o option");
+ out_fname = argv[++i];
+ if (!strcmp (out_fname, "-"))
+ out_fname = "";
+ break;
+
+ case 'p':
+ if (!strcmp (argv[i], "-pedantic"))
+ pedantic = 1;
+ else if (!strcmp (argv[i], "-pedantic-errors")) {
+ pedantic = 1;
+ pedantic_errors = 1;
+ } else if (!strcmp (argv[i], "-pcp")) {
+ char *pcp_fname = argv[++i];
+ pcp_outfile =
+ ((pcp_fname[0] != '-' || pcp_fname[1] != '\0')
+ ? fopen (pcp_fname, "w")
+ : fdopen (dup (fileno (stdout)), "w"));
+ if (pcp_outfile == 0)
+ pfatal_with_name (pcp_fname);
+ no_precomp = 1;
+ }
+ break;
+
+ case 't':
+ if (!strcmp (argv[i], "-traditional")) {
+ traditional = 1;
+ if (dollars_in_ident > 0)
+ dollars_in_ident = 1;
+ } else if (!strcmp (argv[i], "-trigraphs")) {
+ no_trigraphs = 0;
+ }
+ break;
+
+ case 'l':
+ if (! strcmp (argv[i], "-lang-c"))
+ cplusplus = 0, cplusplus_comments = 0, objc = 0;
+ if (! strcmp (argv[i], "-lang-c++"))
+ cplusplus = 1, cplusplus_comments = 1, objc = 0;
+ if (! strcmp (argv[i], "-lang-c-c++-comments"))
+ cplusplus = 0, cplusplus_comments = 1, objc = 0;
+ if (! strcmp (argv[i], "-lang-objc"))
+ objc = 1, cplusplus = 0, cplusplus_comments = 1;
+ if (! strcmp (argv[i], "-lang-objc++"))
+ objc = 1, cplusplus = 1, cplusplus_comments = 1;
+ if (! strcmp (argv[i], "-lang-asm"))
+ lang_asm = 1;
+ if (! strcmp (argv[i], "-lint"))
+ for_lint = 1;
+ break;
+
+ case '+':
+ cplusplus = 1, cplusplus_comments = 1;
+ break;
+
+ case 'w':
+ inhibit_warnings = 1;
+ break;
+
+ case 'W':
+ if (!strcmp (argv[i], "-Wtrigraphs"))
+ warn_trigraphs = 1;
+ else if (!strcmp (argv[i], "-Wno-trigraphs"))
+ warn_trigraphs = 0;
+ else if (!strcmp (argv[i], "-Wcomment"))
+ warn_comments = 1;
+ else if (!strcmp (argv[i], "-Wno-comment"))
+ warn_comments = 0;
+ else if (!strcmp (argv[i], "-Wcomments"))
+ warn_comments = 1;
+ else if (!strcmp (argv[i], "-Wno-comments"))
+ warn_comments = 0;
+ else if (!strcmp (argv[i], "-Wtraditional"))
+ warn_stringify = 1;
+ else if (!strcmp (argv[i], "-Wno-traditional"))
+ warn_stringify = 0;
+ else if (!strcmp (argv[i], "-Wimport"))
+ warn_import = 1;
+ else if (!strcmp (argv[i], "-Wno-import"))
+ warn_import = 0;
+ else if (!strcmp (argv[i], "-Werror"))
+ warnings_are_errors = 1;
+ else if (!strcmp (argv[i], "-Wno-error"))
+ warnings_are_errors = 0;
+ else if (!strcmp (argv[i], "-Wall"))
+ {
+ warn_trigraphs = 1;
+ warn_comments = 1;
+ }
+ break;
+
+ case 'M':
+ /* The style of the choices here is a bit mixed.
+ The chosen scheme is a hybrid of keeping all options in one string
+ and specifying each option in a separate argument:
+ -M|-MM|-MD file|-MMD file [-MG]. An alternative is:
+ -M|-MM|-MD file|-MMD file|-MG|-MMG; or more concisely:
+ -M[M][G][D file]. This is awkward to handle in specs, and is not
+ as extensible. */
+ /* ??? -MG must be specified in addition to one of -M or -MM.
+ This can be relaxed in the future without breaking anything.
+ The converse isn't true. */
+
+ /* -MG isn't valid with -MD or -MMD. This is checked for later. */
+ if (!strcmp (argv[i], "-MG"))
+ {
+ print_deps_missing_files = 1;
+ break;
+ }
+ if (!strcmp (argv[i], "-M"))
+ print_deps = 2;
+ else if (!strcmp (argv[i], "-MM"))
+ print_deps = 1;
+ else if (!strcmp (argv[i], "-MD"))
+ print_deps = 2;
+ else if (!strcmp (argv[i], "-MMD"))
+ print_deps = 1;
+ /* For -MD and -MMD options, write deps on file named by next arg. */
+ if (!strcmp (argv[i], "-MD")
+ || !strcmp (argv[i], "-MMD")) {
+ i++;
+ deps_file = argv[i];
+ deps_mode = "w";
+ } else {
+ /* For -M and -MM, write deps on standard output
+ and suppress the usual output. */
+ deps_stream = stdout;
+ inhibit_output = 1;
+ }
+ break;
+
+ case 'd':
+ {
+ char *p = argv[i] + 2;
+ char c;
+ while (c = *p++) {
+ /* Arg to -d specifies what parts of macros to dump */
+ switch (c) {
+ case 'M':
+ dump_macros = dump_only;
+ no_output = 1;
+ break;
+ case 'N':
+ dump_macros = dump_names;
+ break;
+ case 'D':
+ dump_macros = dump_definitions;
+ break;
+ }
+ }
+ }
+ break;
+
+ case 'g':
+ if (argv[i][2] == '3')
+ debug_output = 1;
+ break;
+
+ case 'v':
+ fprintf (stderr, "GNU CPP version %s", version_string);
+#ifdef TARGET_VERSION
+ TARGET_VERSION;
+#endif
+ fprintf (stderr, "\n");
+ verbose = 1;
+ break;
+
+ case 'H':
+ print_include_names = 1;
+ break;
+
+ case 'D':
+ if (argv[i][2] != 0)
+ pend_defs[i] = argv[i] + 2;
+ else if (i + 1 == argc)
+ fatal ("Macro name missing after -D option");
+ else
+ i++, pend_defs[i] = argv[i];
+ break;
+
+ case 'A':
+ {
+ char *p;
+
+ if (argv[i][2] != 0)
+ p = argv[i] + 2;
+ else if (i + 1 == argc)
+ fatal ("Assertion missing after -A option");
+ else
+ p = argv[++i];
+
+ if (!strcmp (p, "-")) {
+ /* -A- eliminates all predefined macros and assertions.
+ Let's include also any that were specified earlier
+ on the command line. That way we can get rid of any
+ that were passed automatically in from GCC. */
+ int j;
+ inhibit_predefs = 1;
+ for (j = 0; j < i; j++)
+ pend_defs[j] = pend_assertions[j] = 0;
+ } else {
+ pend_assertions[i] = p;
+ pend_assertion_options[i] = "-A";
+ }
+ }
+ break;
+
+ case 'U': /* JF #undef something */
+ if (argv[i][2] != 0)
+ pend_undefs[i] = argv[i] + 2;
+ else if (i + 1 == argc)
+ fatal ("Macro name missing after -U option");
+ else
+ pend_undefs[i] = argv[i+1], i++;
+ break;
+
+ case 'C':
+ put_out_comments = 1;
+ break;
+
+ case 'E': /* -E comes from cc -E; ignore it. */
+ break;
+
+ case 'P':
+ no_line_commands = 1;
+ break;
+
+ case '$': /* Don't include $ in identifiers. */
+ dollars_in_ident = 0;
+ break;
+
+ case 'I': /* Add directory to path for includes. */
+ {
+ struct file_name_list *dirtmp;
+
+ if (! ignore_srcdir && !strcmp (argv[i] + 2, "-")) {
+ ignore_srcdir = 1;
+ /* Don't use any preceding -I directories for #include <...>. */
+ first_bracket_include = 0;
+ }
+ else {
+ dirtmp = (struct file_name_list *)
+ xmalloc (sizeof (struct file_name_list));
+ dirtmp->next = 0; /* New one goes on the end */
+ dirtmp->control_macro = 0;
+ dirtmp->c_system_include_path = 0;
+ if (argv[i][2] != 0)
+ dirtmp->fname = argv[i] + 2;
+ else if (i + 1 == argc)
+ fatal ("Directory name missing after -I option");
+ else
+ dirtmp->fname = argv[++i];
+ dirtmp->got_name_map = 0;
+ append_include_chain (dirtmp, dirtmp);
+ }
+ }
+ break;
+
+ case 'n':
+ if (!strcmp (argv[i], "-nostdinc"))
+ /* -nostdinc causes no default include directories.
+ You must specify all include-file directories with -I. */
+ no_standard_includes = 1;
+ else if (!strcmp (argv[i], "-nostdinc++"))
+ /* -nostdinc++ causes no default C++-specific include directories. */
+ no_standard_cplusplus_includes = 1;
+ else if (!strcmp (argv[i], "-noprecomp"))
+ no_precomp = 1;
+ break;
+
+ case 'u':
+ /* Sun compiler passes undocumented switch "-undef".
+ Let's assume it means to inhibit the predefined symbols. */
+ inhibit_predefs = 1;
+ break;
+
+ case '\0': /* JF handle '-' as file name meaning stdin or stdout */
+ if (in_fname == NULL) {
+ in_fname = "";
+ break;
+ } else if (out_fname == NULL) {
+ out_fname = "";
+ break;
+ } /* else fall through into error */
+
+ default:
+ fatal ("Invalid option `%s'", argv[i]);
+ }
+ }
+ }
+
+ /* Add dirs from CPATH after dirs from -I. */
+ /* There seems to be confusion about what CPATH should do,
+ so for the moment it is not documented. */
+ /* Some people say that CPATH should replace the standard include dirs,
+ but that seems pointless: it comes before them, so it overrides them
+ anyway. */
+ p = (char *) getenv ("CPATH");
+ if (p != 0 && ! no_standard_includes)
+ path_include (p);
+
+ /* Now that dollars_in_ident is known, initialize is_idchar. */
+ initialize_char_syntax ();
+
+ /* Initialize output buffer */
+
+ outbuf.buf = (U_CHAR *) xmalloc (OUTBUF_SIZE);
+ outbuf.bufp = outbuf.buf;
+ outbuf.length = OUTBUF_SIZE;
+
+ /* Do partial setup of input buffer for the sake of generating
+ early #line directives (when -g is in effect). */
+
+ fp = &instack[++indepth];
+ if (in_fname == NULL)
+ in_fname = "";
+ fp->nominal_fname = fp->fname = in_fname;
+ fp->lineno = 0;
+
+ /* In C++, wchar_t is a distinct basic type, and we can expect
+ __wchar_t to be defined by cc1plus. */
+ if (cplusplus)
+ wchar_type = "__wchar_t";
+
+ /* Install __LINE__, etc. Must follow initialize_char_syntax
+ and option processing. */
+ initialize_builtins (fp, &outbuf);
+
+ /* Do standard #defines and assertions
+ that identify system and machine type. */
+
+ if (!inhibit_predefs) {
+ char *p = (char *) alloca (strlen (predefs) + 1);
+ strcpy (p, predefs);
+ while (*p) {
+ char *q;
+ while (*p == ' ' || *p == '\t')
+ p++;
+ /* Handle -D options. */
+ if (p[0] == '-' && p[1] == 'D') {
+ q = &p[2];
+ while (*p && *p != ' ' && *p != '\t')
+ p++;
+ if (*p != 0)
+ *p++= 0;
+ if (debug_output)
+ output_line_command (fp, &outbuf, 0, same_file);
+ make_definition (q, &outbuf);
+ while (*p == ' ' || *p == '\t')
+ p++;
+ } else if (p[0] == '-' && p[1] == 'A') {
+ /* Handle -A options (assertions). */
+ char *assertion;
+ char *past_name;
+ char *value;
+ char *past_value;
+ char *termination;
+ int save_char;
+
+ assertion = &p[2];
+ past_name = assertion;
+ /* Locate end of name. */
+ while (*past_name && *past_name != ' '
+ && *past_name != '\t' && *past_name != '(')
+ past_name++;
+ /* Locate `(' at start of value. */
+ value = past_name;
+ while (*value && (*value == ' ' || *value == '\t'))
+ value++;
+ if (*value++ != '(')
+ abort ();
+ while (*value && (*value == ' ' || *value == '\t'))
+ value++;
+ past_value = value;
+ /* Locate end of value. */
+ while (*past_value && *past_value != ' '
+ && *past_value != '\t' && *past_value != ')')
+ past_value++;
+ termination = past_value;
+ while (*termination && (*termination == ' ' || *termination == '\t'))
+ termination++;
+ if (*termination++ != ')')
+ abort ();
+ if (*termination && *termination != ' ' && *termination != '\t')
+ abort ();
+ /* Temporarily null-terminate the value. */
+ save_char = *termination;
+ *termination = '\0';
+ /* Install the assertion. */
+ make_assertion ("-A", assertion);
+ *termination = (char) save_char;
+ p = termination;
+ while (*p == ' ' || *p == '\t')
+ p++;
+ } else {
+ abort ();
+ }
+ }
+ }
+
+ /* Now handle the command line options. */
+
+ /* Do -U's, -D's and -A's in the order they were seen. */
+ for (i = 1; i < argc; i++) {
+ if (pend_undefs[i]) {
+ if (debug_output)
+ output_line_command (fp, &outbuf, 0, same_file);
+ make_undef (pend_undefs[i], &outbuf);
+ }
+ if (pend_defs[i]) {
+ if (debug_output)
+ output_line_command (fp, &outbuf, 0, same_file);
+ make_definition (pend_defs[i], &outbuf);
+ }
+ if (pend_assertions[i])
+ make_assertion (pend_assertion_options[i], pend_assertions[i]);
+ }
+
+ done_initializing = 1;
+
+ { /* read the appropriate environment variable and if it exists
+ replace include_defaults with the listed path. */
+ char *epath = 0;
+ switch ((objc << 1) + cplusplus)
+ {
+ case 0:
+ epath = getenv ("C_INCLUDE_PATH");
+ break;
+ case 1:
+ epath = getenv ("CPLUS_INCLUDE_PATH");
+ break;
+ case 2:
+ epath = getenv ("OBJC_INCLUDE_PATH");
+ break;
+ case 3:
+ epath = getenv ("OBJCPLUS_INCLUDE_PATH");
+ break;
+ }
+ /* If the environment var for this language is set,
+ add to the default list of include directories. */
+ if (epath) {
+ char *nstore = (char *) alloca (strlen (epath) + 2);
+ int num_dirs;
+ char *startp, *endp;
+
+ for (num_dirs = 1, startp = epath; *startp; startp++)
+ if (*startp == PATH_SEPARATOR)
+ num_dirs++;
+ include_defaults
+ = (struct default_include *) xmalloc ((num_dirs
+ * sizeof (struct default_include))
+ + sizeof (include_defaults_array));
+ startp = endp = epath;
+ num_dirs = 0;
+ while (1) {
+ /* Handle cases like c:/usr/lib:d:/gcc/lib */
+ if ((*endp == PATH_SEPARATOR
+#if 0 /* Obsolete, now that we use semicolons as the path separator. */
+#ifdef __MSDOS__
+ && (endp-startp != 1 || !isalpha (*startp))
+#endif
+#endif
+ )
+ || *endp == 0) {
+ strncpy (nstore, startp, endp-startp);
+ if (endp == startp)
+ strcpy (nstore, ".");
+ else
+ nstore[endp-startp] = '\0';
+
+ include_defaults[num_dirs].fname = savestring (nstore);
+ include_defaults[num_dirs].cplusplus = cplusplus;
+ include_defaults[num_dirs].cxx_aware = 1;
+ num_dirs++;
+ if (*endp == '\0')
+ break;
+ endp = startp = endp + 1;
+ } else
+ endp++;
+ }
+ /* Put the usual defaults back in at the end. */
+ bcopy ((char *) include_defaults_array,
+ (char *) &include_defaults[num_dirs],
+ sizeof (include_defaults_array));
+ }
+ }
+
+ append_include_chain (before_system, last_before_system);
+ first_system_include = before_system;
+
+ /* Unless -fnostdinc,
+ tack on the standard include file dirs to the specified list */
+ if (!no_standard_includes) {
+ struct default_include *p = include_defaults;
+ char *specd_prefix = include_prefix;
+ char *default_prefix = savestring (GCC_INCLUDE_DIR);
+ int default_len = 0;
+ /* Remove the `include' from /usr/local/lib/gcc.../include. */
+ if (!strcmp (default_prefix + strlen (default_prefix) - 8, "/include")) {
+ default_len = strlen (default_prefix) - 7;
+ default_prefix[default_len] = 0;
+ }
+ /* Search "translated" versions of GNU directories.
+ These have /usr/local/lib/gcc... replaced by specd_prefix. */
+ if (specd_prefix != 0 && default_len != 0)
+ for (p = include_defaults; p->fname; p++) {
+ /* Some standard dirs are only for C++. */
+ if (!p->cplusplus || (cplusplus && !no_standard_cplusplus_includes)) {
+ /* Does this dir start with the prefix? */
+ if (!strncmp (p->fname, default_prefix, default_len)) {
+ /* Yes; change prefix and add to search list. */
+ struct file_name_list *new
+ = (struct file_name_list *) xmalloc (sizeof (struct file_name_list));
+ int this_len = strlen (specd_prefix) + strlen (p->fname) - default_len;
+ char *str = (char *) xmalloc (this_len + 1);
+ strcpy (str, specd_prefix);
+ strcat (str, p->fname + default_len);
+ new->fname = str;
+ new->control_macro = 0;
+ new->c_system_include_path = !p->cxx_aware;
+ new->got_name_map = 0;
+ append_include_chain (new, new);
+ if (first_system_include == 0)
+ first_system_include = new;
+ }
+ }
+ }
+ /* Search ordinary names for GNU include directories. */
+ for (p = include_defaults; p->fname; p++) {
+ /* Some standard dirs are only for C++. */
+ if (!p->cplusplus || (cplusplus && !no_standard_cplusplus_includes)) {
+ struct file_name_list *new
+ = (struct file_name_list *) xmalloc (sizeof (struct file_name_list));
+ new->control_macro = 0;
+ new->c_system_include_path = !p->cxx_aware;
+ new->fname = p->fname;
+ new->got_name_map = 0;
+ append_include_chain (new, new);
+ if (first_system_include == 0)
+ first_system_include = new;
+ }
+ }
+ }
+
+ /* Tack the after_include chain at the end of the include chain. */
+ append_include_chain (after_include, last_after_include);
+ if (first_system_include == 0)
+ first_system_include = after_include;
+
+ /* With -v, print the list of dirs to search. */
+ if (verbose) {
+ struct file_name_list *p;
+ fprintf (stderr, "#include \"...\" search starts here:\n");
+ for (p = include; p; p = p->next) {
+ if (p == first_bracket_include)
+ fprintf (stderr, "#include <...> search starts here:\n");
+ fprintf (stderr, " %s\n", p->fname);
+ }
+ fprintf (stderr, "End of search list.\n");
+ }
+
+ /* Scan the -imacros files before the main input.
+ Much like #including them, but with no_output set
+ so that only their macro definitions matter. */
+
+ no_output++; no_record_file++;
+ for (i = 1; i < argc; i++)
+ if (pend_files[i]) {
+ int fd = open (pend_files[i], O_RDONLY, 0666);
+ if (fd < 0) {
+ perror_with_name (pend_files[i]);
+ return FAILURE_EXIT_CODE;
+ }
+ finclude (fd, pend_files[i], &outbuf, 0, NULL_PTR);
+ }
+ no_output--; no_record_file--;
+
+ /* Copy the entire contents of the main input file into
+ the stacked input buffer previously allocated for it. */
+
+ /* JF check for stdin */
+ if (in_fname == NULL || *in_fname == 0) {
+ in_fname = "";
+ f = 0;
+ } else if ((f = open (in_fname, O_RDONLY, 0666)) < 0)
+ goto perror;
+
+ /* -MG doesn't select the form of output and must be specified with one of
+ -M or -MM. -MG doesn't make sense with -MD or -MMD since they don't
+ inhibit compilation. */
+ if (print_deps_missing_files && (print_deps == 0 || !inhibit_output))
+ fatal ("-MG must be specified with one of -M or -MM");
+
+ /* Either of two environment variables can specify output of deps.
+ Its value is either "OUTPUT_FILE" or "OUTPUT_FILE DEPS_TARGET",
+ where OUTPUT_FILE is the file to write deps info to
+ and DEPS_TARGET is the target to mention in the deps. */
+
+ if (print_deps == 0
+ && (getenv ("SUNPRO_DEPENDENCIES") != 0
+ || getenv ("DEPENDENCIES_OUTPUT") != 0)) {
+ char *spec = getenv ("DEPENDENCIES_OUTPUT");
+ char *s;
+ char *output_file;
+
+ if (spec == 0) {
+ spec = getenv ("SUNPRO_DEPENDENCIES");
+ print_deps = 2;
+ }
+ else
+ print_deps = 1;
+
+ s = spec;
+ /* Find the space before the DEPS_TARGET, if there is one. */
+ /* This should use index. (mrs) */
+ while (*s != 0 && *s != ' ') s++;
+ if (*s != 0) {
+ deps_target = s + 1;
+ output_file = (char *) xmalloc (s - spec + 1);
+ bcopy (spec, output_file, s - spec);
+ output_file[s - spec] = 0;
+ }
+ else {
+ deps_target = 0;
+ output_file = spec;
+ }
+
+ deps_file = output_file;
+ deps_mode = "a";
+ }
+
+ /* For -M, print the expected object file name
+ as the target of this Make-rule. */
+ if (print_deps) {
+ deps_allocated_size = 200;
+ deps_buffer = (char *) xmalloc (deps_allocated_size);
+ deps_buffer[0] = 0;
+ deps_size = 0;
+ deps_column = 0;
+
+ if (deps_target) {
+ deps_output (deps_target, ':');
+ } else if (*in_fname == 0) {
+ deps_output ("-", ':');
+ } else {
+ char *p, *q;
+ int len;
+
+ /* Discard all directory prefixes from filename. */
+ if ((q = rindex (in_fname, '/')) != NULL)
+ ++q;
+ else
+ q = in_fname;
+
+ /* Copy remainder to mungable area. */
+ p = (char *) alloca (strlen(q) + 8);
+ strcpy (p, q);
+
+ /* Output P, but remove known suffixes. */
+ len = strlen (p);
+ q = p + len;
+ if (len >= 2
+ && p[len - 2] == '.'
+ && index("cCsSm", p[len - 1]))
+ q = p + (len - 2);
+ else if (len >= 3
+ && p[len - 3] == '.'
+ && p[len - 2] == 'c'
+ && p[len - 1] == 'c')
+ q = p + (len - 3);
+ else if (len >= 4
+ && p[len - 4] == '.'
+ && p[len - 3] == 'c'
+ && p[len - 2] == 'x'
+ && p[len - 1] == 'x')
+ q = p + (len - 4);
+ else if (len >= 4
+ && p[len - 4] == '.'
+ && p[len - 3] == 'c'
+ && p[len - 2] == 'p'
+ && p[len - 1] == 'p')
+ q = p + (len - 4);
+
+ /* Supply our own suffix. */
+#ifndef VMS
+ strcpy (q, ".o");
+#else
+ strcpy (q, ".obj");
+#endif
+
+ deps_output (p, ':');
+ deps_output (in_fname, ' ');
+ }
+ }
+
+ file_size_and_mode (f, &st_mode, &st_size);
+ fp->nominal_fname = fp->fname = in_fname;
+ fp->lineno = 1;
+ fp->system_header_p = 0;
+ /* JF all this is mine about reading pipes and ttys */
+ if (! S_ISREG (st_mode)) {
+ /* Read input from a file that is not a normal disk file.
+ We cannot preallocate a buffer with the correct size,
+ so we must read in the file a piece at the time and make it bigger. */
+ int size;
+ int bsize;
+ int cnt;
+
+ bsize = 2000;
+ size = 0;
+ fp->buf = (U_CHAR *) xmalloc (bsize + 2);
+ for (;;) {
+ cnt = safe_read (f, fp->buf + size, bsize - size);
+ if (cnt < 0) goto perror; /* error! */
+ size += cnt;
+ if (size != bsize) break; /* End of file */
+ bsize *= 2;
+ fp->buf = (U_CHAR *) xrealloc (fp->buf, bsize + 2);
+ }
+ fp->length = size;
+ } else {
+ /* Read a file whose size we can determine in advance.
+ For the sake of VMS, st_size is just an upper bound. */
+ fp->buf = (U_CHAR *) xmalloc (st_size + 2);
+ fp->length = safe_read (f, fp->buf, st_size);
+ if (fp->length < 0) goto perror;
+ }
+ fp->bufp = fp->buf;
+ fp->if_stack = if_stack;
+
+ /* Make sure data ends with a newline. And put a null after it. */
+
+ if ((fp->length > 0 && fp->buf[fp->length - 1] != '\n')
+ /* Backslash-newline at end is not good enough. */
+ || (fp->length > 1 && fp->buf[fp->length - 2] == '\\')) {
+ fp->buf[fp->length++] = '\n';
+ missing_newline = 1;
+ }
+ fp->buf[fp->length] = '\0';
+
+ /* Unless inhibited, convert trigraphs in the input. */
+
+ if (!no_trigraphs)
+ trigraph_pcp (fp);
+
+ /* Now that we know the input file is valid, open the output. */
+
+ if (!out_fname || !strcmp (out_fname, ""))
+ out_fname = "stdout";
+ else if (! freopen (out_fname, "w", stdout))
+ pfatal_with_name (out_fname);
+
+ output_line_command (fp, &outbuf, 0, same_file);
+
+ /* Scan the -include files before the main input. */
+
+ no_record_file++;
+ for (i = 1; i < argc; i++)
+ if (pend_includes[i]) {
+ int fd = open (pend_includes[i], O_RDONLY, 0666);
+ if (fd < 0) {
+ perror_with_name (pend_includes[i]);
+ return FAILURE_EXIT_CODE;
+ }
+ finclude (fd, pend_includes[i], &outbuf, 0, NULL_PTR);
+ }
+ no_record_file--;
+
+ /* Scan the input, processing macros and directives. */
+
+ rescan (&outbuf, 0);
+
+ if (missing_newline)
+ fp->lineno--;
+
+ if (pedantic && missing_newline)
+ pedwarn ("file does not end in newline");
+
+ /* Now we have processed the entire input
+ Write whichever kind of output has been requested. */
+
+ if (dump_macros == dump_only)
+ dump_all_macros ();
+ else if (! inhibit_output) {
+ write_output ();
+ }
+
+ if (print_deps) {
+ /* Don't actually write the deps file if compilation has failed. */
+ if (errors == 0) {
+ if (deps_file && ! (deps_stream = fopen (deps_file, deps_mode)))
+ pfatal_with_name (deps_file);
+ fputs (deps_buffer, deps_stream);
+ putc ('\n', deps_stream);
+ if (deps_file) {
+ if (ferror (deps_stream) || fclose (deps_stream) != 0)
+ fatal ("I/O error on output");
+ }
+ }
+ }
+
+ if (pcp_outfile && pcp_outfile != stdout
+ && (ferror (pcp_outfile) || fclose (pcp_outfile) != 0))
+ fatal ("I/O error on `-pcp' output");
+
+ if (ferror (stdout) || fclose (stdout) != 0)
+ fatal ("I/O error on output");
+
+ if (errors)
+ exit (FAILURE_EXIT_CODE);
+ exit (SUCCESS_EXIT_CODE);
+
+ perror:
+ pfatal_with_name (in_fname);
+ return 0;
+}
+
+/* Given a colon-separated list of file names PATH,
+ add all the names to the search path for include files. */
+
+static void
+path_include (path)
+ char *path;
+{
+ char *p;
+
+ p = path;
+
+ if (*p)
+ while (1) {
+ char *q = p;
+ char *name;
+ struct file_name_list *dirtmp;
+
+ /* Find the end of this name. */
+ while (*q != 0 && *q != PATH_SEPARATOR) q++;
+ if (p == q) {
+ /* An empty name in the path stands for the current directory. */
+ name = (char *) xmalloc (2);
+ name[0] = '.';
+ name[1] = 0;
+ } else {
+ /* Otherwise use the directory that is named. */
+ name = (char *) xmalloc (q - p + 1);
+ bcopy (p, name, q - p);
+ name[q - p] = 0;
+ }
+
+ dirtmp = (struct file_name_list *)
+ xmalloc (sizeof (struct file_name_list));
+ dirtmp->next = 0; /* New one goes on the end */
+ dirtmp->control_macro = 0;
+ dirtmp->c_system_include_path = 0;
+ dirtmp->fname = name;
+ dirtmp->got_name_map = 0;
+ append_include_chain (dirtmp, dirtmp);
+
+ /* Advance past this name. */
+ p = q;
+ if (*p == 0)
+ break;
+ /* Skip the colon. */
+ p++;
+ }
+}
+
+/* Pre-C-Preprocessor to translate ANSI trigraph idiocy in BUF
+ before main CCCP processing. Name `pcp' is also in honor of the
+ drugs the trigraph designers must have been on.
+
+ Using an extra pass through the buffer takes a little extra time,
+ but is infinitely less hairy than trying to handle trigraphs inside
+ strings, etc. everywhere, and also makes sure that trigraphs are
+ only translated in the top level of processing. */
+
+static void
+trigraph_pcp (buf)
+ FILE_BUF *buf;
+{
+ register U_CHAR c, *fptr, *bptr, *sptr;
+ int len;
+
+ fptr = bptr = sptr = buf->buf;
+ while ((sptr = (U_CHAR *) index (sptr, '?')) != NULL) {
+ if (*++sptr != '?')
+ continue;
+ switch (*++sptr) {
+ case '=':
+ c = '#';
+ break;
+ case '(':
+ c = '[';
+ break;
+ case '/':
+ c = '\\';
+ break;
+ case ')':
+ c = ']';
+ break;
+ case '\'':
+ c = '^';
+ break;
+ case '<':
+ c = '{';
+ break;
+ case '!':
+ c = '|';
+ break;
+ case '>':
+ c = '}';
+ break;
+ case '-':
+ c = '~';
+ break;
+ case '?':
+ sptr--;
+ continue;
+ default:
+ continue;
+ }
+ len = sptr - fptr - 2;
+
+ /* BSD doc says bcopy () works right for overlapping strings. In ANSI
+ C, this will be memmove (). */
+ if (bptr != fptr && len > 0)
+ bcopy ((char *) fptr, (char *) bptr, len);
+
+ bptr += len;
+ *bptr++ = c;
+ fptr = ++sptr;
+ }
+ len = buf->length - (fptr - buf->buf);
+ if (bptr != fptr && len > 0)
+ bcopy ((char *) fptr, (char *) bptr, len);
+ buf->length -= fptr - bptr;
+ buf->buf[buf->length] = '\0';
+ if (warn_trigraphs && fptr != bptr)
+ warning ("%d trigraph(s) encountered", (fptr - bptr) / 2);
+}
+
+/* Move all backslash-newline pairs out of embarrassing places.
+ Exchange all such pairs following BP
+ with any potentially-embarrassing characters that follow them.
+ Potentially-embarrassing characters are / and *
+ (because a backslash-newline inside a comment delimiter
+ would cause it not to be recognized). */
+
+static void
+newline_fix (bp)
+ U_CHAR *bp;
+{
+ register U_CHAR *p = bp;
+ register int count = 0;
+
+ /* First count the backslash-newline pairs here. */
+
+ while (1) {
+ if (p[0] == '\\') {
+ if (p[1] == '\n')
+ p += 2, count++;
+ else if (p[1] == '\r' && p[2] == '\n')
+ p += 3, count++;
+ else
+ break;
+ } else
+ break;
+ }
+
+ /* What follows the backslash-newlines is not embarrassing. */
+
+ if (count == 0 || (*p != '/' && *p != '*'))
+ return;
+
+ /* Copy all potentially embarrassing characters
+ that follow the backslash-newline pairs
+ down to where the pairs originally started. */
+
+ while (*p == '*' || *p == '/')
+ *bp++ = *p++;
+
+ /* Now write the same number of pairs after the embarrassing chars. */
+ while (count-- > 0) {
+ *bp++ = '\\';
+ *bp++ = '\n';
+ }
+}
+
+/* Like newline_fix but for use within a directive-name.
+ Move any backslash-newlines up past any following symbol constituents. */
+
+static void
+name_newline_fix (bp)
+ U_CHAR *bp;
+{
+ register U_CHAR *p = bp;
+ register int count = 0;
+
+ /* First count the backslash-newline pairs here. */
+ while (1) {
+ if (p[0] == '\\') {
+ if (p[1] == '\n')
+ p += 2, count++;
+ else if (p[1] == '\r' && p[2] == '\n')
+ p += 3, count++;
+ else
+ break;
+ } else
+ break;
+ }
+
+ /* What follows the backslash-newlines is not embarrassing. */
+
+ if (count == 0 || !is_idchar[*p])
+ return;
+
+ /* Copy all potentially embarrassing characters
+ that follow the backslash-newline pairs
+ down to where the pairs originally started. */
+
+ while (is_idchar[*p])
+ *bp++ = *p++;
+
+ /* Now write the same number of pairs after the embarrassing chars. */
+ while (count-- > 0) {
+ *bp++ = '\\';
+ *bp++ = '\n';
+ }
+}
+
+/* Look for lint commands in comments.
+
+ When we come in here, ibp points into a comment. Limit is as one expects.
+ scan within the comment -- it should start, after lwsp, with a lint command.
+ If so that command is returned as a (constant) string.
+
+ Upon return, any arg will be pointed to with argstart and will be
+ arglen long. Note that we don't parse that arg since it will just
+ be printed out again.
+*/
+
+static char *
+get_lintcmd (ibp, limit, argstart, arglen, cmdlen)
+ register U_CHAR *ibp;
+ register U_CHAR *limit;
+ U_CHAR **argstart; /* point to command arg */
+ int *arglen, *cmdlen; /* how long they are */
+{
+ long linsize;
+ register U_CHAR *numptr; /* temp for arg parsing */
+
+ *arglen = 0;
+
+ SKIP_WHITE_SPACE (ibp);
+
+ if (ibp >= limit) return NULL;
+
+ linsize = limit - ibp;
+
+ /* Oh, I wish C had lexical functions... hell, I'll just open-code the set */
+ if ((linsize >= 10) && !strncmp (ibp, "NOTREACHED", 10)) {
+ *cmdlen = 10;
+ return "NOTREACHED";
+ }
+ if ((linsize >= 8) && !strncmp (ibp, "ARGSUSED", 8)) {
+ *cmdlen = 8;
+ return "ARGSUSED";
+ }
+ if ((linsize >= 11) && !strncmp (ibp, "LINTLIBRARY", 11)) {
+ *cmdlen = 11;
+ return "LINTLIBRARY";
+ }
+ if ((linsize >= 7) && !strncmp (ibp, "VARARGS", 7)) {
+ *cmdlen = 7;
+ ibp += 7; linsize -= 7;
+ if ((linsize == 0) || ! isdigit (*ibp)) return "VARARGS";
+
+ /* OK, read a number */
+ for (numptr = *argstart = ibp; (numptr < limit) && isdigit (*numptr);
+ numptr++);
+ *arglen = numptr - *argstart;
+ return "VARARGS";
+ }
+ return NULL;
+}
+
+/*
+ * The main loop of the program.
+ *
+ * Read characters from the input stack, transferring them to the
+ * output buffer OP.
+ *
+ * Macros are expanded and push levels on the input stack.
+ * At the end of such a level it is popped off and we keep reading.
+ * At the end of any other kind of level, we return.
+ * #-directives are handled, except within macros.
+ *
+ * If OUTPUT_MARKS is nonzero, keep Newline markers found in the input
+ * and insert them when appropriate. This is set while scanning macro
+ * arguments before substitution. It is zero when scanning for final output.
+ * There are three types of Newline markers:
+ * * Newline - follows a macro name that was not expanded
+ * because it appeared inside an expansion of the same macro.
+ * This marker prevents future expansion of that identifier.
+ * When the input is rescanned into the final output, these are deleted.
+ * These are also deleted by ## concatenation.
+ * * Newline Space (or Newline and any other whitespace character)
+ * stands for a place that tokens must be separated or whitespace
+ * is otherwise desirable, but where the ANSI standard specifies there
+ * is no whitespace. This marker turns into a Space (or whichever other
+ * whitespace char appears in the marker) in the final output,
+ * but it turns into nothing in an argument that is stringified with #.
+ * Such stringified arguments are the only place where the ANSI standard
+ * specifies with precision that whitespace may not appear.
+ *
+ * During this function, IP->bufp is kept cached in IBP for speed of access.
+ * Likewise, OP->bufp is kept in OBP. Before calling a subroutine
+ * IBP, IP and OBP must be copied back to memory. IP and IBP are
+ * copied back with the RECACHE macro. OBP must be copied back from OP->bufp
+ * explicitly, and before RECACHE, since RECACHE uses OBP.
+ */
+
+static void
+rescan (op, output_marks)
+ FILE_BUF *op;
+ int output_marks;
+{
+ /* Character being scanned in main loop. */
+ register U_CHAR c;
+
+ /* Length of pending accumulated identifier. */
+ register int ident_length = 0;
+
+ /* Hash code of pending accumulated identifier. */
+ register int hash = 0;
+
+ /* Current input level (&instack[indepth]). */
+ FILE_BUF *ip;
+
+ /* Pointer for scanning input. */
+ register U_CHAR *ibp;
+
+ /* Pointer to end of input. End of scan is controlled by LIMIT. */
+ register U_CHAR *limit;
+
+ /* Pointer for storing output. */
+ register U_CHAR *obp;
+
+ /* REDO_CHAR is nonzero if we are processing an identifier
+ after backing up over the terminating character.
+ Sometimes we process an identifier without backing up over
+ the terminating character, if the terminating character
+ is not special. Backing up is done so that the terminating character
+ will be dispatched on again once the identifier is dealt with. */
+ int redo_char = 0;
+
+ /* 1 if within an identifier inside of which a concatenation
+ marker (Newline -) has been seen. */
+ int concatenated = 0;
+
+ /* While scanning a comment or a string constant,
+ this records the line it started on, for error messages. */
+ int start_line;
+
+ /* Record position of last `real' newline. */
+ U_CHAR *beg_of_line;
+
+/* Pop the innermost input stack level, assuming it is a macro expansion. */
+
+#define POPMACRO \
+do { ip->macro->type = T_MACRO; \
+ if (ip->free_ptr) free (ip->free_ptr); \
+ --indepth; } while (0)
+
+/* Reload `rescan's local variables that describe the current
+ level of the input stack. */
+
+#define RECACHE \
+do { ip = &instack[indepth]; \
+ ibp = ip->bufp; \
+ limit = ip->buf + ip->length; \
+ op->bufp = obp; \
+ check_expand (op, limit - ibp); \
+ beg_of_line = 0; \
+ obp = op->bufp; } while (0)
+
+ if (no_output && instack[indepth].fname != 0)
+ skip_if_group (&instack[indepth], 1);
+
+ obp = op->bufp;
+ RECACHE;
+
+ beg_of_line = ibp;
+
+ /* Our caller must always put a null after the end of
+ the input at each input stack level. */
+ if (*limit != 0)
+ abort ();
+
+ while (1) {
+ c = *ibp++;
+ *obp++ = c;
+
+ switch (c) {
+ case '\\':
+ if (ibp >= limit)
+ break;
+ if (*ibp == '\n') {
+ /* Always merge lines ending with backslash-newline,
+ even in middle of identifier. */
+ ++ibp;
+ ++ip->lineno;
+ --obp; /* remove backslash from obuf */
+ break;
+ }
+ /* Otherwise, backslash suppresses specialness of following char,
+ so copy it here to prevent the switch from seeing it.
+ But first get any pending identifier processed. */
+ if (ident_length > 0)
+ goto specialchar;
+ *obp++ = *ibp++;
+ break;
+
+ case '#':
+ if (assertions_flag) {
+ /* Copy #foo (bar lose) without macro expansion. */
+ SKIP_WHITE_SPACE (ibp);
+ while (is_idchar[*ibp])
+ *obp++ = *ibp++;
+ SKIP_WHITE_SPACE (ibp);
+ if (*ibp == '(') {
+ ip->bufp = ibp;
+ skip_paren_group (ip);
+ bcopy ((char *) ibp, (char *) obp, ip->bufp - ibp);
+ obp += ip->bufp - ibp;
+ ibp = ip->bufp;
+ }
+ }
+
+ /* If this is expanding a macro definition, don't recognize
+ preprocessor directives. */
+ if (ip->macro != 0)
+ goto randomchar;
+ /* If this is expand_into_temp_buffer, recognize them
+ only after an actual newline at this level,
+ not at the beginning of the input level. */
+ if (ip->fname == 0 && beg_of_line == ip->buf)
+ goto randomchar;
+ if (ident_length)
+ goto specialchar;
+
+
+ /* # keyword: a # must be first nonblank char on the line */
+ if (beg_of_line == 0)
+ goto randomchar;
+ {
+ U_CHAR *bp;
+
+ /* Scan from start of line, skipping whitespace, comments
+ and backslash-newlines, and see if we reach this #.
+ If not, this # is not special. */
+ bp = beg_of_line;
+ /* If -traditional, require # to be at beginning of line. */
+ if (!traditional)
+ while (1) {
+ if (is_hor_space[*bp])
+ bp++;
+ else if (*bp == '\\' && bp[1] == '\n')
+ bp += 2;
+ else if (*bp == '/' && bp[1] == '*') {
+ bp += 2;
+ while (!(*bp == '*' && bp[1] == '/'))
+ bp++;
+ bp += 2;
+ }
+ /* There is no point in trying to deal with C++ // comments here,
+ because if there is one, then this # must be part of the
+ comment and we would never reach here. */
+ else break;
+ }
+ if (bp + 1 != ibp)
+ goto randomchar;
+ }
+
+ /* This # can start a directive. */
+
+ --obp; /* Don't copy the '#' */
+
+ ip->bufp = ibp;
+ op->bufp = obp;
+ if (! handle_directive (ip, op)) {
+#ifdef USE_C_ALLOCA
+ alloca (0);
+#endif
+ /* Not a known directive: treat it as ordinary text.
+ IP, OP, IBP, etc. have not been changed. */
+ if (no_output && instack[indepth].fname) {
+ /* If not generating expanded output,
+ what we do with ordinary text is skip it.
+ Discard everything until next # directive. */
+ skip_if_group (&instack[indepth], 1);
+ RECACHE;
+ beg_of_line = ibp;
+ break;
+ }
+ ++obp; /* Copy the '#' after all */
+ /* Don't expand an identifier that could be a macro directive.
+ (Section 3.8.3 of the ANSI C standard) */
+ SKIP_WHITE_SPACE (ibp);
+ if (is_idstart[*ibp])
+ {
+ *obp++ = *ibp++;
+ while (is_idchar[*ibp])
+ *obp++ = *ibp++;
+ }
+ goto randomchar;
+ }
+#ifdef USE_C_ALLOCA
+ alloca (0);
+#endif
+ /* A # directive has been successfully processed. */
+ /* If not generating expanded output, ignore everything until
+ next # directive. */
+ if (no_output && instack[indepth].fname)
+ skip_if_group (&instack[indepth], 1);
+ obp = op->bufp;
+ RECACHE;
+ beg_of_line = ibp;
+ break;
+
+ case '\"': /* skip quoted string */
+ case '\'':
+ /* A single quoted string is treated like a double -- some
+ programs (e.g., troff) are perverse this way */
+
+ if (ident_length)
+ goto specialchar;
+
+ start_line = ip->lineno;
+
+ /* Skip ahead to a matching quote. */
+
+ while (1) {
+ if (ibp >= limit) {
+ if (ip->macro != 0) {
+ /* try harder: this string crosses a macro expansion boundary.
+ This can happen naturally if -traditional.
+ Otherwise, only -D can make a macro with an unmatched quote. */
+ POPMACRO;
+ RECACHE;
+ continue;
+ }
+ if (!traditional) {
+ error_with_line (line_for_error (start_line),
+ "unterminated string or character constant");
+ error_with_line (multiline_string_line,
+ "possible real start of unterminated constant");
+ multiline_string_line = 0;
+ }
+ break;
+ }
+ *obp++ = *ibp;
+ switch (*ibp++) {
+ case '\n':
+ ++ip->lineno;
+ ++op->lineno;
+ /* Traditionally, end of line ends a string constant with no error.
+ So exit the loop and record the new line. */
+ if (traditional) {
+ beg_of_line = ibp;
+ goto while2end;
+ }
+ if (c == '\'') {
+ error_with_line (line_for_error (start_line),
+ "unterminated character constant");
+ goto while2end;
+ }
+ if (pedantic && multiline_string_line == 0) {
+ pedwarn_with_line (line_for_error (start_line),
+ "string constant runs past end of line");
+ }
+ if (multiline_string_line == 0)
+ multiline_string_line = ip->lineno - 1;
+ break;
+
+ case '\\':
+ if (ibp >= limit)
+ break;
+ if (*ibp == '\n') {
+ /* Backslash newline is replaced by nothing at all,
+ but keep the line counts correct. */
+ --obp;
+ ++ibp;
+ ++ip->lineno;
+ } else {
+ /* ANSI stupidly requires that in \\ the second \
+ is *not* prevented from combining with a newline. */
+ while (*ibp == '\\' && ibp[1] == '\n') {
+ ibp += 2;
+ ++ip->lineno;
+ }
+ *obp++ = *ibp++;
+ }
+ break;
+
+ case '\"':
+ case '\'':
+ if (ibp[-1] == c)
+ goto while2end;
+ break;
+ }
+ }
+ while2end:
+ break;
+
+ case '/':
+ if (*ibp == '\\' && ibp[1] == '\n')
+ newline_fix (ibp);
+
+ if (*ibp != '*'
+ && !(cplusplus_comments && *ibp == '/'))
+ goto randomchar;
+ if (ip->macro != 0)
+ goto randomchar;
+ if (ident_length)
+ goto specialchar;
+
+ if (*ibp == '/') {
+ /* C++ style comment... */
+ start_line = ip->lineno;
+
+ --ibp; /* Back over the slash */
+ --obp;
+
+ /* Comments are equivalent to spaces. */
+ if (! put_out_comments)
+ *obp++ = ' ';
+ else {
+ /* must fake up a comment here */
+ *obp++ = '/';
+ *obp++ = '/';
+ }
+ {
+ U_CHAR *before_bp = ibp+2;
+
+ while (ibp < limit) {
+ if (ibp[-1] != '\\' && *ibp == '\n') {
+ if (put_out_comments) {
+ bcopy ((char *) before_bp, (char *) obp, ibp - before_bp);
+ obp += ibp - before_bp;
+ }
+ break;
+ } else {
+ if (*ibp == '\n') {
+ ++ip->lineno;
+ /* Copy the newline into the output buffer, in order to
+ avoid the pain of a #line every time a multiline comment
+ is seen. */
+ if (!put_out_comments)
+ *obp++ = '\n';
+ ++op->lineno;
+ }
+ ibp++;
+ }
+ }
+ break;
+ }
+ }
+
+ /* Ordinary C comment. Skip it, optionally copying it to output. */
+
+ start_line = ip->lineno;
+
+ ++ibp; /* Skip the star. */
+
+ /* If this cpp is for lint, we peek inside the comments: */
+ if (for_lint) {
+ U_CHAR *argbp;
+ int cmdlen, arglen;
+ char *lintcmd = get_lintcmd (ibp, limit, &argbp, &arglen, &cmdlen);
+
+ if (lintcmd != NULL) {
+ /* I believe it is always safe to emit this newline: */
+ obp[-1] = '\n';
+ bcopy ("#pragma lint ", (char *) obp, 13);
+ obp += 13;
+ bcopy (lintcmd, (char *) obp, cmdlen);
+ obp += cmdlen;
+
+ if (arglen != 0) {
+ *(obp++) = ' ';
+ bcopy (argbp, (char *) obp, arglen);
+ obp += arglen;
+ }
+
+ /* OK, now bring us back to the state we were in before we entered
+ this branch. We need #line b/c the newline for the pragma
+ could fuck things up. */
+ output_line_command (ip, op, 0, same_file);
+ *(obp++) = ' '; /* just in case, if comments are copied thru */
+ *(obp++) = '/';
+ }
+ }
+
+ /* Comments are equivalent to spaces.
+ Note that we already output the slash; we might not want it.
+ For -traditional, a comment is equivalent to nothing. */
+ if (! put_out_comments) {
+ if (traditional)
+ obp--;
+ else
+ obp[-1] = ' ';
+ }
+ else
+ *obp++ = '*';
+
+ {
+ U_CHAR *before_bp = ibp;
+
+ while (ibp < limit) {
+ switch (*ibp++) {
+ case '/':
+ if (warn_comments && ibp < limit && *ibp == '*')
+ warning ("`/*' within comment");
+ break;
+ case '*':
+ if (*ibp == '\\' && ibp[1] == '\n')
+ newline_fix (ibp);
+ if (ibp >= limit || *ibp == '/')
+ goto comment_end;
+ break;
+ case '\n':
+ ++ip->lineno;
+ /* Copy the newline into the output buffer, in order to
+ avoid the pain of a #line every time a multiline comment
+ is seen. */
+ if (!put_out_comments)
+ *obp++ = '\n';
+ ++op->lineno;
+ }
+ }
+ comment_end:
+
+ if (ibp >= limit)
+ error_with_line (line_for_error (start_line),
+ "unterminated comment");
+ else {
+ ibp++;
+ if (put_out_comments) {
+ bcopy ((char *) before_bp, (char *) obp, ibp - before_bp);
+ obp += ibp - before_bp;
+ }
+ }
+ }
+ break;
+
+ case '$':
+ if (!dollars_in_ident)
+ goto randomchar;
+ goto letter;
+
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ /* If digit is not part of identifier, it starts a number,
+ which means that following letters are not an identifier.
+ "0x5" does not refer to an identifier "x5".
+ So copy all alphanumerics that follow without accumulating
+ as an identifier. Periods also, for sake of "3.e7". */
+
+ if (ident_length == 0) {
+ while (ibp < limit) {
+ while (ibp < limit && ibp[0] == '\\' && ibp[1] == '\n') {
+ ++ip->lineno;
+ ibp += 2;
+ }
+ c = *ibp++;
+ /* ".." terminates a preprocessing number. This is useless for C
+ code but useful for preprocessing other things. */
+ if (!isalnum (c) && (c != '.' || *ibp == '.') && c != '_') {
+ --ibp;
+ break;
+ }
+ *obp++ = c;
+ /* A sign can be part of a preprocessing number
+ if it follows an e. */
+ if (c == 'e' || c == 'E') {
+ while (ibp < limit && ibp[0] == '\\' && ibp[1] == '\n') {
+ ++ip->lineno;
+ ibp += 2;
+ }
+ if (ibp < limit && (*ibp == '+' || *ibp == '-')) {
+ *obp++ = *ibp++;
+ /* But traditional C does not let the token go past the sign. */
+ if (traditional)
+ break;
+ }
+ }
+ }
+ break;
+ }
+ /* fall through */
+
+ case '_':
+ case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
+ case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
+ case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
+ case 's': case 't': case 'u': case 'v': case 'w': case 'x':
+ case 'y': case 'z':
+ case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
+ case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
+ case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
+ case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
+ case 'Y': case 'Z':
+ letter:
+ ident_length++;
+ /* Compute step of hash function, to avoid a proc call on every token */
+ hash = HASHSTEP (hash, c);
+ break;
+
+ case '\n':
+ if (ip->fname == 0 && *ibp == '-') {
+ /* Newline - inhibits expansion of preceding token.
+ If expanding a macro arg, we keep the newline -.
+ In final output, it is deleted.
+ We recognize Newline - in macro bodies and macro args. */
+ if (! concatenated) {
+ ident_length = 0;
+ hash = 0;
+ }
+ ibp++;
+ if (!output_marks) {
+ obp--;
+ } else {
+ /* If expanding a macro arg, keep the newline -. */
+ *obp++ = '-';
+ }
+ break;
+ }
+
+ /* If reprocessing a macro expansion, newline is a special marker. */
+ else if (ip->macro != 0) {
+ /* Newline White is a "funny space" to separate tokens that are
+ supposed to be separate but without space between.
+ Here White means any whitespace character.
+ Newline - marks a recursive macro use that is not
+ supposed to be expandable. */
+
+ if (is_space[*ibp]) {
+ /* Newline Space does not prevent expansion of preceding token
+ so expand the preceding token and then come back. */
+ if (ident_length > 0)
+ goto specialchar;
+
+ /* If generating final output, newline space makes a space. */
+ if (!output_marks) {
+ obp[-1] = *ibp++;
+ /* And Newline Newline makes a newline, so count it. */
+ if (obp[-1] == '\n')
+ op->lineno++;
+ } else {
+ /* If expanding a macro arg, keep the newline space.
+ If the arg gets stringified, newline space makes nothing. */
+ *obp++ = *ibp++;
+ }
+ } else abort (); /* Newline followed by something random? */
+ break;
+ }
+
+ /* If there is a pending identifier, handle it and come back here. */
+ if (ident_length > 0)
+ goto specialchar;
+
+ beg_of_line = ibp;
+
+ /* Update the line counts and output a #line if necessary. */
+ ++ip->lineno;
+ ++op->lineno;
+ if (ip->lineno != op->lineno) {
+ op->bufp = obp;
+ output_line_command (ip, op, 1, same_file);
+ check_expand (op, ip->length - (ip->bufp - ip->buf));
+ obp = op->bufp;
+ }
+ break;
+
+ /* Come here either after (1) a null character that is part of the input
+ or (2) at the end of the input, because there is a null there. */
+ case 0:
+ if (ibp <= limit)
+ /* Our input really contains a null character. */
+ goto randomchar;
+
+ /* At end of a macro-expansion level, pop it and read next level. */
+ if (ip->macro != 0) {
+ obp--;
+ ibp--;
+ /* If traditional, and we have an identifier that ends here,
+ process it now, so we get the right error for recursion. */
+ if (traditional && ident_length
+ && ! is_idchar[*instack[indepth - 1].bufp]) {
+ redo_char = 1;
+ goto randomchar;
+ }
+ POPMACRO;
+ RECACHE;
+ break;
+ }
+
+ /* If we don't have a pending identifier,
+ return at end of input. */
+ if (ident_length == 0) {
+ obp--;
+ ibp--;
+ op->bufp = obp;
+ ip->bufp = ibp;
+ goto ending;
+ }
+
+ /* If we do have a pending identifier, just consider this null
+ a special character and arrange to dispatch on it again.
+ The second time, IDENT_LENGTH will be zero so we will return. */
+
+ /* Fall through */
+
+specialchar:
+
+ /* Handle the case of a character such as /, ', " or null
+ seen following an identifier. Back over it so that
+ after the identifier is processed the special char
+ will be dispatched on again. */
+
+ ibp--;
+ obp--;
+ redo_char = 1;
+
+ default:
+
+randomchar:
+
+ if (ident_length > 0) {
+ register HASHNODE *hp;
+
+ /* We have just seen an identifier end. If it's a macro, expand it.
+
+ IDENT_LENGTH is the length of the identifier
+ and HASH is its hash code.
+
+ The identifier has already been copied to the output,
+ so if it is a macro we must remove it.
+
+ If REDO_CHAR is 0, the char that terminated the identifier
+ has been skipped in the output and the input.
+ OBP-IDENT_LENGTH-1 points to the identifier.
+ If the identifier is a macro, we must back over the terminator.
+
+ If REDO_CHAR is 1, the terminating char has already been
+ backed over. OBP-IDENT_LENGTH points to the identifier. */
+
+ if (!pcp_outfile || pcp_inside_if) {
+startagain:
+ for (hp = hashtab[MAKE_POS (hash) % HASHSIZE]; hp != NULL;
+ hp = hp->next) {
+
+ if (hp->length == ident_length) {
+ int obufp_before_macroname;
+ int op_lineno_before_macroname;
+ register int i = ident_length;
+ register U_CHAR *p = hp->name;
+ register U_CHAR *q = obp - i;
+ int disabled;
+
+ if (! redo_char)
+ q--;
+
+ do { /* All this to avoid a strncmp () */
+ if (*p++ != *q++)
+ goto hashcollision;
+ } while (--i);
+
+ /* We found a use of a macro name.
+ see if the context shows it is a macro call. */
+
+ /* Back up over terminating character if not already done. */
+ if (! redo_char) {
+ ibp--;
+ obp--;
+ }
+
+ /* Save this as a displacement from the beginning of the output
+ buffer. We can not save this as a position in the output
+ buffer, because it may get realloc'ed by RECACHE. */
+ obufp_before_macroname = (obp - op->buf) - ident_length;
+ op_lineno_before_macroname = op->lineno;
+
+ if (hp->type == T_PCSTRING) {
+ pcstring_used (hp); /* Mark the definition of this key
+ as needed, ensuring that it
+ will be output. */
+ break; /* Exit loop, since the key cannot have a
+ definition any longer. */
+ }
+
+ /* Record whether the macro is disabled. */
+ disabled = hp->type == T_DISABLED;
+
+ /* This looks like a macro ref, but if the macro was disabled,
+ just copy its name and put in a marker if requested. */
+
+ if (disabled) {
+#if 0
+ /* This error check caught useful cases such as
+ #define foo(x,y) bar (x (y,0), y)
+ foo (foo, baz) */
+ if (traditional)
+ error ("recursive use of macro `%s'", hp->name);
+#endif
+
+ if (output_marks) {
+ check_expand (op, limit - ibp + 2);
+ *obp++ = '\n';
+ *obp++ = '-';
+ }
+ break;
+ }
+
+ /* If macro wants an arglist, verify that a '(' follows.
+ first skip all whitespace, copying it to the output
+ after the macro name. Then, if there is no '(',
+ decide this is not a macro call and leave things that way. */
+ if ((hp->type == T_MACRO || hp->type == T_DISABLED)
+ && hp->value.defn->nargs >= 0)
+ {
+ U_CHAR *old_ibp = ibp;
+ U_CHAR *old_obp = obp;
+ int old_iln = ip->lineno;
+ int old_oln = op->lineno;
+
+ while (1) {
+ /* Scan forward over whitespace, copying it to the output. */
+ if (ibp == limit && ip->macro != 0) {
+ POPMACRO;
+ RECACHE;
+ old_ibp = ibp;
+ old_obp = obp;
+ old_iln = ip->lineno;
+ old_oln = op->lineno;
+ }
+ /* A comment: copy it unchanged or discard it. */
+ else if (*ibp == '/' && ibp+1 != limit && ibp[1] == '*') {
+ if (put_out_comments) {
+ *obp++ = '/';
+ *obp++ = '*';
+ } else if (! traditional) {
+ *obp++ = ' ';
+ }
+ ibp += 2;
+ while (ibp + 1 != limit
+ && !(ibp[0] == '*' && ibp[1] == '/')) {
+ /* We need not worry about newline-marks,
+ since they are never found in comments. */
+ if (*ibp == '\n') {
+ /* Newline in a file. Count it. */
+ ++ip->lineno;
+ ++op->lineno;
+ }
+ if (put_out_comments)
+ *obp++ = *ibp++;
+ else
+ ibp++;
+ }
+ ibp += 2;
+ if (put_out_comments) {
+ *obp++ = '*';
+ *obp++ = '/';
+ }
+ }
+ else if (is_space[*ibp]) {
+ *obp++ = *ibp++;
+ if (ibp[-1] == '\n') {
+ if (ip->macro == 0) {
+ /* Newline in a file. Count it. */
+ ++ip->lineno;
+ ++op->lineno;
+ } else if (!output_marks) {
+ /* A newline mark, and we don't want marks
+ in the output. If it is newline-hyphen,
+ discard it entirely. Otherwise, it is
+ newline-whitechar, so keep the whitechar. */
+ obp--;
+ if (*ibp == '-')
+ ibp++;
+ else {
+ if (*ibp == '\n')
+ ++op->lineno;
+ *obp++ = *ibp++;
+ }
+ } else {
+ /* A newline mark; copy both chars to the output. */
+ *obp++ = *ibp++;
+ }
+ }
+ }
+ else break;
+ }
+ if (*ibp != '(') {
+ /* It isn't a macro call.
+ Put back the space that we just skipped. */
+ ibp = old_ibp;
+ obp = old_obp;
+ ip->lineno = old_iln;
+ op->lineno = old_oln;
+ /* Exit the for loop. */
+ break;
+ }
+ }
+
+ /* This is now known to be a macro call.
+ Discard the macro name from the output,
+ along with any following whitespace just copied. */
+ obp = op->buf + obufp_before_macroname;
+ op->lineno = op_lineno_before_macroname;
+
+ /* Prevent accidental token-pasting with a character
+ before the macro call. */
+ if (!traditional && obp != op->buf
+ && (obp[-1] == '-' || obp[1] == '+' || obp[1] == '&'
+ || obp[-1] == '|' || obp[1] == '<' || obp[1] == '>')) {
+ /* If we are expanding a macro arg, make a newline marker
+ to separate the tokens. If we are making real output,
+ a plain space will do. */
+ if (output_marks)
+ *obp++ = '\n';
+ *obp++ = ' ';
+ }
+
+ /* Expand the macro, reading arguments as needed,
+ and push the expansion on the input stack. */
+ ip->bufp = ibp;
+ op->bufp = obp;
+ macroexpand (hp, op);
+
+ /* Reexamine input stack, since macroexpand has pushed
+ a new level on it. */
+ obp = op->bufp;
+ RECACHE;
+ break;
+ }
+hashcollision:
+ ;
+ } /* End hash-table-search loop */
+ }
+ ident_length = hash = 0; /* Stop collecting identifier */
+ redo_char = 0;
+ concatenated = 0;
+ } /* End if (ident_length > 0) */
+ } /* End switch */
+ } /* End per-char loop */
+
+ /* Come here to return -- but first give an error message
+ if there was an unterminated successful conditional. */
+ ending:
+ if (if_stack != ip->if_stack)
+ {
+ char *str = "unknown";
+
+ switch (if_stack->type)
+ {
+ case T_IF:
+ str = "if";
+ break;
+ case T_IFDEF:
+ str = "ifdef";
+ break;
+ case T_IFNDEF:
+ str = "ifndef";
+ break;
+ case T_ELSE:
+ str = "else";
+ break;
+ case T_ELIF:
+ str = "elif";
+ break;
+ }
+
+ error_with_line (line_for_error (if_stack->lineno),
+ "unterminated `#%s' conditional", str);
+ }
+ if_stack = ip->if_stack;
+}
+
+/*
+ * Rescan a string into a temporary buffer and return the result
+ * as a FILE_BUF. Note this function returns a struct, not a pointer.
+ *
+ * OUTPUT_MARKS nonzero means keep Newline markers found in the input
+ * and insert such markers when appropriate. See `rescan' for details.
+ * OUTPUT_MARKS is 1 for macroexpanding a macro argument separately
+ * before substitution; it is 0 for other uses.
+ */
+static FILE_BUF
+expand_to_temp_buffer (buf, limit, output_marks, assertions)
+ U_CHAR *buf, *limit;
+ int output_marks, assertions;
+{
+ register FILE_BUF *ip;
+ FILE_BUF obuf;
+ int length = limit - buf;
+ U_CHAR *buf1;
+ int odepth = indepth;
+ int save_assertions_flag = assertions_flag;
+
+ assertions_flag = assertions;
+
+ if (length < 0)
+ abort ();
+
+ /* Set up the input on the input stack. */
+
+ buf1 = (U_CHAR *) alloca (length + 1);
+ {
+ register U_CHAR *p1 = buf;
+ register U_CHAR *p2 = buf1;
+
+ while (p1 != limit)
+ *p2++ = *p1++;
+ }
+ buf1[length] = 0;
+
+ /* Set up to receive the output. */
+
+ obuf.length = length * 2 + 100; /* Usually enough. Why be stingy? */
+ obuf.bufp = obuf.buf = (U_CHAR *) xmalloc (obuf.length);
+ obuf.fname = 0;
+ obuf.macro = 0;
+ obuf.free_ptr = 0;
+
+ CHECK_DEPTH ({return obuf;});
+
+ ++indepth;
+
+ ip = &instack[indepth];
+ ip->fname = 0;
+ ip->nominal_fname = 0;
+ ip->system_header_p = 0;
+ ip->macro = 0;
+ ip->free_ptr = 0;
+ ip->length = length;
+ ip->buf = ip->bufp = buf1;
+ ip->if_stack = if_stack;
+
+ ip->lineno = obuf.lineno = 1;
+
+ /* Scan the input, create the output. */
+ rescan (&obuf, output_marks);
+
+ /* Pop input stack to original state. */
+ --indepth;
+
+ if (indepth != odepth)
+ abort ();
+
+ /* Record the output. */
+ obuf.length = obuf.bufp - obuf.buf;
+
+ assertions_flag = save_assertions_flag;
+ return obuf;
+}
+
+/*
+ * Process a # directive. Expects IP->bufp to point after the '#', as in
+ * `#define foo bar'. Passes to the command handler
+ * (do_define, do_include, etc.): the addresses of the 1st and
+ * last chars of the command (starting immediately after the #
+ * keyword), plus op and the keyword table pointer. If the command
+ * contains comments it is copied into a temporary buffer sans comments
+ * and the temporary buffer is passed to the command handler instead.
+ * Likewise for backslash-newlines.
+ *
+ * Returns nonzero if this was a known # directive.
+ * Otherwise, returns zero, without advancing the input pointer.
+ */
+
+static int
+handle_directive (ip, op)
+ FILE_BUF *ip, *op;
+{
+ register U_CHAR *bp, *cp;
+ register struct directive *kt;
+ register int ident_length;
+ U_CHAR *resume_p;
+
+ /* Nonzero means we must copy the entire command
+ to get rid of comments or backslash-newlines. */
+ int copy_command = 0;
+
+ U_CHAR *ident, *after_ident;
+
+ bp = ip->bufp;
+
+ /* Record where the directive started. do_xifdef needs this. */
+ directive_start = bp - 1;
+
+ /* Skip whitespace and \-newline. */
+ while (1) {
+ if (is_hor_space[*bp]) {
+ if ((*bp == '\f' || *bp == '\v') && pedantic)
+ pedwarn ("%s in preprocessing directive",
+ *bp == '\f' ? "formfeed" : "vertical tab");
+ bp++;
+ } else if (*bp == '/' && (bp[1] == '*'
+ || (cplusplus_comments && bp[1] == '/'))) {
+ ip->bufp = bp + 2;
+ skip_to_end_of_comment (ip, &ip->lineno, 0);
+ bp = ip->bufp;
+ } else if (*bp == '\\' && bp[1] == '\n') {
+ bp += 2; ip->lineno++;
+ } else break;
+ }
+
+ /* Now find end of directive name.
+ If we encounter a backslash-newline, exchange it with any following
+ symbol-constituents so that we end up with a contiguous name. */
+
+ cp = bp;
+ while (1) {
+ if (is_idchar[*cp])
+ cp++;
+ else {
+ if (*cp == '\\' && cp[1] == '\n')
+ name_newline_fix (cp);
+ if (is_idchar[*cp])
+ cp++;
+ else break;
+ }
+ }
+ ident_length = cp - bp;
+ ident = bp;
+ after_ident = cp;
+
+ /* A line of just `#' becomes blank. */
+
+ if (ident_length == 0 && *after_ident == '\n') {
+ ip->bufp = after_ident;
+ return 1;
+ }
+
+ if (ident_length == 0 || !is_idstart[*ident]) {
+ U_CHAR *p = ident;
+ while (is_idchar[*p]) {
+ if (*p < '0' || *p > '9')
+ break;
+ p++;
+ }
+ /* Handle # followed by a line number. */
+ if (p != ident && !is_idchar[*p]) {
+ static struct directive line_directive_table[] = {
+ { 4, do_line, "line", T_LINE},
+ };
+ if (pedantic)
+ pedwarn ("`#' followed by integer");
+ after_ident = ident;
+ kt = line_directive_table;
+ goto old_linenum;
+ }
+
+ /* Avoid error for `###' and similar cases unless -pedantic. */
+ if (p == ident) {
+ while (*p == '#' || is_hor_space[*p]) p++;
+ if (*p == '\n') {
+ if (pedantic && !lang_asm)
+ warning ("invalid preprocessor directive");
+ return 0;
+ }
+ }
+
+ if (!lang_asm)
+ error ("invalid preprocessor directive name");
+
+ return 0;
+ }
+
+ /*
+ * Decode the keyword and call the appropriate expansion
+ * routine, after moving the input pointer up to the next line.
+ */
+ for (kt = directive_table; kt->length > 0; kt++) {
+ if (kt->length == ident_length && !strncmp (kt->name, ident, ident_length)) {
+ register U_CHAR *buf;
+ register U_CHAR *limit;
+ int unterminated;
+ int junk;
+ int *already_output;
+
+ /* Nonzero means do not delete comments within the directive.
+ #define needs this when -traditional. */
+ int keep_comments;
+
+ old_linenum:
+
+ limit = ip->buf + ip->length;
+ unterminated = 0;
+ already_output = 0;
+ keep_comments = traditional && kt->traditional_comments;
+ /* #import is defined only in Objective C, or when on the NeXT. */
+ if (kt->type == T_IMPORT && !(objc || lookup ("__NeXT__", -1, -1)))
+ break;
+
+ /* Find the end of this command (first newline not backslashed
+ and not in a string or comment).
+ Set COPY_COMMAND if the command must be copied
+ (it contains a backslash-newline or a comment). */
+
+ buf = bp = after_ident;
+ while (bp < limit) {
+ register U_CHAR c = *bp++;
+ switch (c) {
+ case '\\':
+ if (bp < limit) {
+ if (*bp == '\n') {
+ ip->lineno++;
+ copy_command = 1;
+ }
+ bp++;
+ }
+ break;
+
+ case '\'':
+ case '\"':
+ bp = skip_quoted_string (bp - 1, limit, ip->lineno, &ip->lineno, &copy_command, &unterminated);
+ /* Don't bother calling the directive if we already got an error
+ message due to unterminated string. Skip everything and pretend
+ we called the directive. */
+ if (unterminated) {
+ if (traditional) {
+ /* Traditional preprocessing permits unterminated strings. */
+ ip->bufp = bp;
+ goto endloop1;
+ }
+ ip->bufp = bp;
+ return 1;
+ }
+ break;
+
+ /* <...> is special for #include. */
+ case '<':
+ if (!kt->angle_brackets)
+ break;
+ while (*bp && *bp != '>') bp++;
+ break;
+
+ case '/':
+ if (*bp == '\\' && bp[1] == '\n')
+ newline_fix (bp);
+ if (*bp == '*'
+ || (cplusplus_comments && *bp == '/')) {
+ U_CHAR *obp = bp - 1;
+ ip->bufp = bp + 1;
+ skip_to_end_of_comment (ip, &ip->lineno, 0);
+ bp = ip->bufp;
+ /* No need to copy the command because of a comment at the end;
+ just don't include the comment in the directive. */
+ if (bp == limit || *bp == '\n') {
+ bp = obp;
+ goto endloop1;
+ }
+ /* Don't remove the comments if -traditional. */
+ if (! keep_comments)
+ copy_command++;
+ }
+ break;
+
+ case '\f':
+ case '\v':
+ if (pedantic)
+ pedwarn ("%s in preprocessing directive",
+ c == '\f' ? "formfeed" : "vertical tab");
+ break;
+
+ case '\n':
+ --bp; /* Point to the newline */
+ ip->bufp = bp;
+ goto endloop1;
+ }
+ }
+ ip->bufp = bp;
+
+ endloop1:
+ resume_p = ip->bufp;
+ /* BP is the end of the directive.
+ RESUME_P is the next interesting data after the directive.
+ A comment may come between. */
+
+ /* If a directive should be copied through, and -E was given,
+ pass it through before removing comments. */
+ if (!no_output && kt->pass_thru && put_out_comments) {
+ int len;
+
+ /* Output directive name. */
+ check_expand (op, kt->length + 2);
+ /* Make sure # is at the start of a line */
+ if (op->bufp > op->buf && op->bufp[-1] != '\n') {
+ op->lineno++;
+ *op->bufp++ = '\n';
+ }
+ *op->bufp++ = '#';
+ bcopy (kt->name, op->bufp, kt->length);
+ op->bufp += kt->length;
+
+ /* Output arguments. */
+ len = (bp - buf);
+ check_expand (op, len);
+ bcopy (buf, (char *) op->bufp, len);
+ op->bufp += len;
+ /* Take account of any (escaped) newlines just output. */
+ while (--len >= 0)
+ if (buf[len] == '\n')
+ op->lineno++;
+
+ already_output = &junk;
+ } /* Don't we need a newline or #line? */
+
+ if (copy_command) {
+ register U_CHAR *xp = buf;
+ /* Need to copy entire command into temp buffer before dispatching */
+
+ cp = (U_CHAR *) alloca (bp - buf + 5); /* room for cmd plus
+ some slop */
+ buf = cp;
+
+ /* Copy to the new buffer, deleting comments
+ and backslash-newlines (and whitespace surrounding the latter). */
+
+ while (xp < bp) {
+ register U_CHAR c = *xp++;
+ *cp++ = c;
+
+ switch (c) {
+ case '\n':
+ abort (); /* A bare newline should never part of the line. */
+ break;
+
+ /* <...> is special for #include. */
+ case '<':
+ if (!kt->angle_brackets)
+ break;
+ while (xp < bp && c != '>') {
+ c = *xp++;
+ if (c == '\\' && xp < bp && *xp == '\n')
+ xp++;
+ else
+ *cp++ = c;
+ }
+ break;
+
+ case '\\':
+ if (*xp == '\n') {
+ xp++;
+ cp--;
+ if (cp != buf && is_space[cp[-1]]) {
+ while (cp != buf && is_space[cp[-1]]) cp--;
+ cp++;
+ SKIP_WHITE_SPACE (xp);
+ } else if (is_space[*xp]) {
+ *cp++ = *xp++;
+ SKIP_WHITE_SPACE (xp);
+ }
+ } else {
+ *cp++ = *xp++;
+ }
+ break;
+
+ case '\'':
+ case '\"':
+ {
+ register U_CHAR *bp1
+ = skip_quoted_string (xp - 1, bp, ip->lineno,
+ NULL_PTR, NULL_PTR, NULL_PTR);
+ while (xp != bp1)
+ if (*xp == '\\') {
+ if (*++xp != '\n')
+ *cp++ = '\\';
+ else
+ xp++;
+ } else
+ *cp++ = *xp++;
+ }
+ break;
+
+ case '/':
+ if (*xp == '*'
+ || (cplusplus_comments && *xp == '/')) {
+ ip->bufp = xp + 1;
+ /* If we already copied the command through,
+ already_output != 0 prevents outputting comment now. */
+ skip_to_end_of_comment (ip, already_output, 0);
+ if (keep_comments)
+ while (xp != ip->bufp)
+ *cp++ = *xp++;
+ /* Delete or replace the slash. */
+ else if (traditional)
+ cp--;
+ else
+ cp[-1] = ' ';
+ xp = ip->bufp;
+ }
+ }
+ }
+
+ /* Null-terminate the copy. */
+
+ *cp = 0;
+ } else
+ cp = bp;
+
+ ip->bufp = resume_p;
+
+ /* Some directives should be written out for cc1 to process,
+ just as if they were not defined. And sometimes we're copying
+ definitions through. */
+
+ if (!no_output && already_output == 0
+ && (kt->pass_thru
+ || (kt->type == T_DEFINE
+ && (dump_macros == dump_names
+ || dump_macros == dump_definitions)))) {
+ int len;
+
+ /* Output directive name. */
+ check_expand (op, kt->length + 1);
+ *op->bufp++ = '#';
+ bcopy (kt->name, (char *) op->bufp, kt->length);
+ op->bufp += kt->length;
+
+ if (kt->pass_thru || dump_macros == dump_definitions) {
+ /* Output arguments. */
+ len = (cp - buf);
+ check_expand (op, len);
+ bcopy (buf, (char *) op->bufp, len);
+ op->bufp += len;
+ } else if (kt->type == T_DEFINE && dump_macros == dump_names) {
+ U_CHAR *xp = buf;
+ U_CHAR *yp;
+ SKIP_WHITE_SPACE (xp);
+ yp = xp;
+ while (is_idchar[*xp]) xp++;
+ len = (xp - yp);
+ check_expand (op, len + 1);
+ *op->bufp++ = ' ';
+ bcopy (yp, op->bufp, len);
+ op->bufp += len;
+ }
+ } /* Don't we need a newline or #line? */
+
+ /* Call the appropriate command handler. buf now points to
+ either the appropriate place in the input buffer, or to
+ the temp buffer if it was necessary to make one. cp
+ points to the first char after the contents of the (possibly
+ copied) command, in either case. */
+ (*kt->func) (buf, cp, op, kt);
+ check_expand (op, ip->length - (ip->bufp - ip->buf));
+
+ return 1;
+ }
+ }
+
+ /* It is deliberate that we don't warn about undefined directives.
+ That is the responsibility of cc1. */
+ return 0;
+}
+
+static struct tm *
+timestamp ()
+{
+ static struct tm *timebuf;
+ if (!timebuf) {
+ time_t t = time (0);
+ timebuf = localtime (&t);
+ }
+ return timebuf;
+}
+
+static char *monthnames[] = {"Jan", "Feb", "Mar", "Apr", "May", "Jun",
+ "Jul", "Aug", "Sep", "Oct", "Nov", "Dec",
+ };
+
+/*
+ * expand things like __FILE__. Place the expansion into the output
+ * buffer *without* rescanning.
+ */
+
+static void
+special_symbol (hp, op)
+ HASHNODE *hp;
+ FILE_BUF *op;
+{
+ char *buf;
+ int i, len;
+ int true_indepth;
+ FILE_BUF *ip = NULL;
+ struct tm *timebuf;
+
+ int paren = 0; /* For special `defined' keyword */
+
+ if (pcp_outfile && pcp_inside_if
+ && hp->type != T_SPEC_DEFINED && hp->type != T_CONST)
+ error ("Predefined macro `%s' used inside `#if' during precompilation",
+ hp->name);
+
+ for (i = indepth; i >= 0; i--)
+ if (instack[i].fname != NULL) {
+ ip = &instack[i];
+ break;
+ }
+ if (ip == NULL) {
+ error ("cccp error: not in any file?!");
+ return; /* the show must go on */
+ }
+
+ switch (hp->type) {
+ case T_FILE:
+ case T_BASE_FILE:
+ {
+ char *string;
+ if (hp->type == T_FILE)
+ string = ip->nominal_fname;
+ else
+ string = instack[0].nominal_fname;
+
+ if (string)
+ {
+ buf = (char *) alloca (3 + 4 * strlen (string));
+ quote_string (buf, string);
+ }
+ else
+ buf = "\"\"";
+
+ break;
+ }
+
+ case T_INCLUDE_LEVEL:
+ true_indepth = 0;
+ for (i = indepth; i >= 0; i--)
+ if (instack[i].fname != NULL)
+ true_indepth++;
+
+ buf = (char *) alloca (8); /* Eight bytes ought to be more than enough */
+ sprintf (buf, "%d", true_indepth - 1);
+ break;
+
+ case T_VERSION:
+ buf = (char *) alloca (3 + strlen (version_string));
+ sprintf (buf, "\"%s\"", version_string);
+ break;
+
+#ifndef NO_BUILTIN_SIZE_TYPE
+ case T_SIZE_TYPE:
+ buf = SIZE_TYPE;
+ break;
+#endif
+
+#ifndef NO_BUILTIN_PTRDIFF_TYPE
+ case T_PTRDIFF_TYPE:
+ buf = PTRDIFF_TYPE;
+ break;
+#endif
+
+ case T_WCHAR_TYPE:
+ buf = wchar_type;
+ break;
+
+ case T_USER_LABEL_PREFIX_TYPE:
+ buf = USER_LABEL_PREFIX;
+ break;
+
+ case T_REGISTER_PREFIX_TYPE:
+ buf = REGISTER_PREFIX;
+ break;
+
+ case T_CONST:
+ buf = (char *) alloca (4 * sizeof (int));
+ sprintf (buf, "%d", hp->value.ival);
+ if (pcp_inside_if && pcp_outfile)
+ /* Output a precondition for this macro use */
+ fprintf (pcp_outfile, "#define %s %d\n", hp->name, hp->value.ival);
+ break;
+
+ case T_SPECLINE:
+ buf = (char *) alloca (10);
+ sprintf (buf, "%d", ip->lineno);
+ break;
+
+ case T_DATE:
+ case T_TIME:
+ buf = (char *) alloca (20);
+ timebuf = timestamp ();
+ if (hp->type == T_DATE)
+ sprintf (buf, "\"%s %2d %4d\"", monthnames[timebuf->tm_mon],
+ timebuf->tm_mday, timebuf->tm_year + 1900);
+ else
+ sprintf (buf, "\"%02d:%02d:%02d\"", timebuf->tm_hour, timebuf->tm_min,
+ timebuf->tm_sec);
+ break;
+
+ case T_SPEC_DEFINED:
+ buf = " 0 "; /* Assume symbol is not defined */
+ ip = &instack[indepth];
+ SKIP_WHITE_SPACE (ip->bufp);
+ if (*ip->bufp == '(') {
+ paren++;
+ ip->bufp++; /* Skip over the paren */
+ SKIP_WHITE_SPACE (ip->bufp);
+ }
+
+ if (!is_idstart[*ip->bufp])
+ goto oops;
+ if (hp = lookup (ip->bufp, -1, -1)) {
+ if (pcp_outfile && pcp_inside_if
+ && hp->value.defn->predefined)
+ /* Output a precondition for this macro use. */
+ fprintf (pcp_outfile, "#define %s\n", hp->name);
+ buf = " 1 ";
+ }
+ else
+ if (pcp_outfile && pcp_inside_if) {
+ /* Output a precondition for this macro use */
+ U_CHAR *cp = ip->bufp;
+ fprintf (pcp_outfile, "#undef ");
+ while (is_idchar[*cp]) /* Ick! */
+ fputc (*cp++, pcp_outfile);
+ putc ('\n', pcp_outfile);
+ }
+ while (is_idchar[*ip->bufp])
+ ++ip->bufp;
+ SKIP_WHITE_SPACE (ip->bufp);
+ if (paren) {
+ if (*ip->bufp != ')')
+ goto oops;
+ ++ip->bufp;
+ }
+ break;
+
+oops:
+
+ error ("`defined' without an identifier");
+ break;
+
+ default:
+ error ("cccp error: invalid special hash type"); /* time for gdb */
+ abort ();
+ }
+ len = strlen (buf);
+ check_expand (op, len);
+ bcopy (buf, (char *) op->bufp, len);
+ op->bufp += len;
+
+ return;
+}
+
+
+/* Routines to handle #directives */
+
+/* Handle #include and #import.
+ This function expects to see "fname" or <fname> on the input. */
+
+static int
+do_include (buf, limit, op, keyword)
+ U_CHAR *buf, *limit;
+ FILE_BUF *op;
+ struct directive *keyword;
+{
+ int importing = (keyword->type == T_IMPORT);
+ int skip_dirs = (keyword->type == T_INCLUDE_NEXT);
+ static int import_warning = 0;
+ char *fname; /* Dynamically allocated fname buffer */
+ char *pcftry;
+ char *pcfname;
+ U_CHAR *fbeg, *fend; /* Beginning and end of fname */
+
+ struct file_name_list *search_start = include; /* Chain of dirs to search */
+ struct file_name_list dsp[1]; /* First in chain, if #include "..." */
+ struct file_name_list *searchptr = 0;
+ int flen;
+
+ int f; /* file number */
+
+ int retried = 0; /* Have already tried macro
+ expanding the include line*/
+ FILE_BUF trybuf; /* It got expanded into here */
+ int angle_brackets = 0; /* 0 for "...", 1 for <...> */
+ int pcf = -1;
+ char *pcfbuf;
+ int pcfbuflimit;
+ int pcfnum;
+ f= -1; /* JF we iz paranoid! */
+
+ if (importing && warn_import && !inhibit_warnings
+ && !instack[indepth].system_header_p && !import_warning) {
+ import_warning = 1;
+ warning ("using `#import' is not recommended");
+ fprintf (stderr, "The fact that a certain header file need not be processed more than once\n");
+ fprintf (stderr, "should be indicated in the header file, not where it is used.\n");
+ fprintf (stderr, "The best way to do this is with a conditional of this form:\n\n");
+ fprintf (stderr, " #ifndef _FOO_H_INCLUDED\n");
+ fprintf (stderr, " #define _FOO_H_INCLUDED\n");
+ fprintf (stderr, " ... <real contents of file> ...\n");
+ fprintf (stderr, " #endif /* Not _FOO_H_INCLUDED */\n\n");
+ fprintf (stderr, "Then users can use `#include' any number of times.\n");
+ fprintf (stderr, "GNU C automatically avoids processing the file more than once\n");
+ fprintf (stderr, "when it is equipped with such a conditional.\n");
+ }
+
+get_filename:
+
+ fbeg = buf;
+ SKIP_WHITE_SPACE (fbeg);
+ /* Discard trailing whitespace so we can easily see
+ if we have parsed all the significant chars we were given. */
+ while (limit != fbeg && is_hor_space[limit[-1]]) limit--;
+
+ switch (*fbeg++) {
+ case '\"':
+ {
+ FILE_BUF *fp;
+ /* Copy the operand text, concatenating the strings. */
+ {
+ U_CHAR *fin = fbeg;
+ fbeg = (U_CHAR *) alloca (limit - fbeg + 1);
+ fend = fbeg;
+ while (fin != limit) {
+ while (fin != limit && *fin != '\"')
+ *fend++ = *fin++;
+ fin++;
+ if (fin == limit)
+ break;
+ /* If not at the end, there had better be another string. */
+ /* Skip just horiz space, and don't go past limit. */
+ while (fin != limit && is_hor_space[*fin]) fin++;
+ if (fin != limit && *fin == '\"')
+ fin++;
+ else
+ goto fail;
+ }
+ }
+ *fend = 0;
+
+ /* We have "filename". Figure out directory this source
+ file is coming from and put it on the front of the list. */
+
+ /* If -I- was specified, don't search current dir, only spec'd ones. */
+ if (ignore_srcdir) break;
+
+ for (fp = &instack[indepth]; fp >= instack; fp--)
+ {
+ int n;
+ char *ep,*nam;
+
+ if ((nam = fp->nominal_fname) != NULL) {
+ /* Found a named file. Figure out dir of the file,
+ and put it in front of the search list. */
+ dsp[0].next = search_start;
+ search_start = dsp;
+#ifndef VMS
+ ep = rindex (nam, '/');
+#else /* VMS */
+ ep = rindex (nam, ']');
+ if (ep == NULL) ep = rindex (nam, '>');
+ if (ep == NULL) ep = rindex (nam, ':');
+ if (ep != NULL) ep++;
+#endif /* VMS */
+ if (ep != NULL) {
+ n = ep - nam;
+ dsp[0].fname = (char *) alloca (n + 1);
+ strncpy (dsp[0].fname, nam, n);
+ dsp[0].fname[n] = '\0';
+ if (n + INCLUDE_LEN_FUDGE > max_include_len)
+ max_include_len = n + INCLUDE_LEN_FUDGE;
+ } else {
+ dsp[0].fname = 0; /* Current directory */
+ }
+ dsp[0].got_name_map = 0;
+ break;
+ }
+ }
+ break;
+ }
+
+ case '<':
+ fend = fbeg;
+ while (fend != limit && *fend != '>') fend++;
+ if (*fend == '>' && fend + 1 == limit) {
+ angle_brackets = 1;
+ /* If -I-, start with the first -I dir after the -I-. */
+ if (first_bracket_include)
+ search_start = first_bracket_include;
+ break;
+ }
+ goto fail;
+
+ default:
+#ifdef VMS
+ /*
+ * Support '#include xyz' like VAX-C to allow for easy use of all the
+ * decwindow include files. It defaults to '#include <xyz.h>' (so the
+ * code from case '<' is repeated here) and generates a warning.
+ */
+ if (isalpha(*(--fbeg))) {
+ fend = fbeg;
+ while (fend != limit && (!isspace(*fend))) fend++;
+ warning ("VAX-C-style include specification found, use '#include <filename.h>' !");
+ if (fend == limit) {
+ angle_brackets = 1;
+ /* If -I-, start with the first -I dir after the -I-. */
+ if (first_bracket_include)
+ search_start = first_bracket_include;
+ break;
+ }
+ }
+#endif
+
+ fail:
+ if (retried) {
+ error ("`#%s' expects \"FILENAME\" or <FILENAME>", keyword->name);
+ return 0;
+ } else {
+ trybuf = expand_to_temp_buffer (buf, limit, 0, 0);
+ buf = (U_CHAR *) alloca (trybuf.bufp - trybuf.buf + 1);
+ bcopy ((char *) trybuf.buf, (char *) buf, trybuf.bufp - trybuf.buf);
+ limit = buf + (trybuf.bufp - trybuf.buf);
+ free (trybuf.buf);
+ retried++;
+ goto get_filename;
+ }
+ }
+
+ /* For #include_next, skip in the search path
+ past the dir in which the containing file was found. */
+ if (skip_dirs) {
+ FILE_BUF *fp;
+ for (fp = &instack[indepth]; fp >= instack; fp--)
+ if (fp->fname != NULL) {
+ /* fp->dir is null if the containing file was specified
+ with an absolute file name. In that case, don't skip anything. */
+ if (fp->dir)
+ search_start = fp->dir->next;
+ break;
+ }
+ }
+
+ flen = fend - fbeg;
+
+ if (flen == 0)
+ {
+ error ("empty file name in `#%s'", keyword->name);
+ return 0;
+ }
+
+ /* Allocate this permanently, because it gets stored in the definitions
+ of macros. */
+ fname = (char *) xmalloc (max_include_len + flen + 4);
+ /* + 2 above for slash and terminating null. */
+ /* + 2 added for '.h' on VMS (to support '#include filename') */
+
+ /* If specified file name is absolute, just open it. */
+
+ if (*fbeg == '/') {
+ strncpy (fname, fbeg, flen);
+ fname[flen] = 0;
+ if (redundant_include_p (fname))
+ return 0;
+ if (importing)
+ f = lookup_import (fname, NULL_PTR);
+ else
+ f = open_include_file (fname, NULL_PTR);
+ if (f == -2)
+ return 0; /* Already included this file */
+ } else {
+ /* Search directory path, trying to open the file.
+ Copy each filename tried into FNAME. */
+
+ for (searchptr = search_start; searchptr; searchptr = searchptr->next) {
+ if (searchptr->fname) {
+ /* The empty string in a search path is ignored.
+ This makes it possible to turn off entirely
+ a standard piece of the list. */
+ if (searchptr->fname[0] == 0)
+ continue;
+ strcpy (fname, searchptr->fname);
+ strcat (fname, "/");
+ fname[strlen (fname) + flen] = 0;
+ } else {
+ fname[0] = 0;
+ }
+ strncat (fname, fbeg, flen);
+#ifdef VMS
+ /* Change this 1/2 Unix 1/2 VMS file specification into a
+ full VMS file specification */
+ if (searchptr->fname && (searchptr->fname[0] != 0)) {
+ /* Fix up the filename */
+ hack_vms_include_specification (fname);
+ } else {
+ /* This is a normal VMS filespec, so use it unchanged. */
+ strncpy (fname, fbeg, flen);
+ fname[flen] = 0;
+ /* if it's '#include filename', add the missing .h */
+ if (index(fname,'.')==NULL) {
+ strcat (fname, ".h");
+ }
+ }
+#endif /* VMS */
+ if (importing)
+ f = lookup_import (fname, searchptr);
+ else
+ f = open_include_file (fname, searchptr);
+ if (f == -2)
+ return 0; /* Already included this file */
+#ifdef EACCES
+ else if (f == -1 && errno == EACCES)
+ warning ("Header file %s exists, but is not readable", fname);
+#endif
+ if (redundant_include_p (fname)) {
+ close (f);
+ return 0;
+ }
+ if (f >= 0)
+ break;
+ }
+ }
+
+ if (f < 0) {
+ /* A file that was not found. */
+
+ strncpy (fname, fbeg, flen);
+ fname[flen] = 0;
+ /* If generating dependencies and -MG was specified, we assume missing
+ files are leaf files, living in the same directory as the source file
+ or other similar place; these missing files may be generated from
+ other files and may not exist yet (eg: y.tab.h). */
+ if (print_deps_missing_files
+ && print_deps > (angle_brackets || (system_include_depth > 0)))
+ {
+ /* If it was requested as a system header file,
+ then assume it belongs in the first place to look for such. */
+ if (angle_brackets)
+ {
+ for (searchptr = search_start; searchptr; searchptr = searchptr->next)
+ {
+ if (searchptr->fname)
+ {
+ char *p;
+
+ if (searchptr->fname[0] == 0)
+ continue;
+ p = xmalloc (strlen (searchptr->fname)
+ + strlen (fname) + 2);
+ strcpy (p, searchptr->fname);
+ strcat (p, "/");
+ strcat (p, fname);
+ deps_output (p, ' ');
+ break;
+ }
+ }
+ }
+ else
+ {
+ /* Otherwise, omit the directory, as if the file existed
+ in the directory with the source. */
+ deps_output (fname, ' ');
+ }
+ }
+ /* If -M was specified, and this header file won't be added to the
+ dependency list, then don't count this as an error, because we can
+ still produce correct output. Otherwise, we can't produce correct
+ output, because there may be dependencies we need inside the missing
+ file, and we don't know what directory this missing file exists in. */
+ else if (print_deps
+ && (print_deps <= (angle_brackets || (system_include_depth > 0))))
+ warning ("No include path in which to find %s", fname);
+ else if (search_start)
+ error_from_errno (fname);
+ else
+ error ("No include path in which to find %s", fname);
+ } else {
+ struct stat stat_f;
+
+ /* Check to see if this include file is a once-only include file.
+ If so, give up. */
+
+ struct file_name_list* ptr;
+
+ for (ptr = dont_repeat_files; ptr; ptr = ptr->next) {
+ if (!strcmp (ptr->fname, fname)) {
+ close (f);
+ return 0; /* This file was once'd. */
+ }
+ }
+
+ for (ptr = all_include_files; ptr; ptr = ptr->next) {
+ if (!strcmp (ptr->fname, fname))
+ break; /* This file was included before. */
+ }
+
+ if (ptr == 0) {
+ /* This is the first time for this file. */
+ /* Add it to list of files included. */
+
+ ptr = (struct file_name_list *) xmalloc (sizeof (struct file_name_list));
+ ptr->control_macro = 0;
+ ptr->c_system_include_path = 0;
+ ptr->next = all_include_files;
+ all_include_files = ptr;
+ ptr->fname = savestring (fname);
+ ptr->got_name_map = 0;
+
+ /* For -M, add this file to the dependencies. */
+ if (print_deps > (angle_brackets || (system_include_depth > 0)))
+ deps_output (fname, ' ');
+ }
+
+ /* Handle -H option. */
+ if (print_include_names) {
+ output_dots (stderr, indepth);
+ fprintf (stderr, "%s\n", fname);
+ }
+
+ if (angle_brackets)
+ system_include_depth++;
+
+ /* Actually process the file. */
+ add_import (f, fname); /* Record file on "seen" list for #import. */
+
+ pcftry = (char *) alloca (strlen (fname) + 30);
+ pcfbuf = 0;
+ pcfnum = 0;
+
+ fstat (f, &stat_f);
+
+ if (!no_precomp)
+ do {
+ sprintf (pcftry, "%s%d", fname, pcfnum++);
+
+ pcf = open (pcftry, O_RDONLY, 0666);
+ if (pcf != -1)
+ {
+ struct stat s;
+
+ fstat (pcf, &s);
+ if (bcmp ((char *) &stat_f.st_ino, (char *) &s.st_ino,
+ sizeof (s.st_ino))
+ || stat_f.st_dev != s.st_dev)
+ {
+ pcfbuf = check_precompiled (pcf, fname, &pcfbuflimit);
+ /* Don't need it any more. */
+ close (pcf);
+ }
+ else
+ {
+ /* Don't need it at all. */
+ close (pcf);
+ break;
+ }
+ }
+ } while (pcf != -1 && !pcfbuf);
+
+ /* Actually process the file */
+ if (pcfbuf) {
+ pcfname = xmalloc (strlen (pcftry) + 1);
+ strcpy (pcfname, pcftry);
+ pcfinclude (pcfbuf, pcfbuflimit, fname, op);
+ }
+ else
+ finclude (f, fname, op, is_system_include (fname), searchptr);
+
+ if (angle_brackets)
+ system_include_depth--;
+ }
+ return 0;
+}
+
+/* Return nonzero if there is no need to include file NAME
+ because it has already been included and it contains a conditional
+ to make a repeated include do nothing. */
+
+static int
+redundant_include_p (name)
+ char *name;
+{
+ struct file_name_list *l = all_include_files;
+ for (; l; l = l->next)
+ if (! strcmp (name, l->fname)
+ && l->control_macro
+ && lookup (l->control_macro, -1, -1))
+ return 1;
+ return 0;
+}
+
+/* Return nonzero if the given FILENAME is an absolute pathname which
+ designates a file within one of the known "system" include file
+ directories. We assume here that if the given FILENAME looks like
+ it is the name of a file which resides either directly in a "system"
+ include file directory, or within any subdirectory thereof, then the
+ given file must be a "system" include file. This function tells us
+ if we should suppress pedantic errors/warnings for the given FILENAME.
+
+ The value is 2 if the file is a C-language system header file
+ for which C++ should (on most systems) assume `extern "C"'. */
+
+static int
+is_system_include (filename)
+ register char *filename;
+{
+ struct file_name_list *searchptr;
+
+ for (searchptr = first_system_include; searchptr;
+ searchptr = searchptr->next)
+ if (searchptr->fname) {
+ register char *sys_dir = searchptr->fname;
+ register unsigned length = strlen (sys_dir);
+
+ if (! strncmp (sys_dir, filename, length) && filename[length] == '/')
+ {
+ if (searchptr->c_system_include_path)
+ return 2;
+ else
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/* The file_name_map structure holds a mapping of file names for a
+ particular directory. This mapping is read from the file named
+ FILE_NAME_MAP_FILE in that directory. Such a file can be used to
+ map filenames on a file system with severe filename restrictions,
+ such as DOS. The format of the file name map file is just a series
+ of lines with two tokens on each line. The first token is the name
+ to map, and the second token is the actual name to use. */
+
+struct file_name_map
+{
+ struct file_name_map *map_next;
+ char *map_from;
+ char *map_to;
+};
+
+#define FILE_NAME_MAP_FILE "header.gcc"
+
+/* Read a space delimited string of unlimited length from a stdio
+ file. */
+
+static char *
+read_filename_string (ch, f)
+ int ch;
+ FILE *f;
+{
+ char *alloc, *set;
+ int len;
+
+ len = 20;
+ set = alloc = xmalloc (len + 1);
+ if (! is_space[ch])
+ {
+ *set++ = ch;
+ while ((ch = getc (f)) != EOF && ! is_space[ch])
+ {
+ if (set - alloc == len)
+ {
+ len *= 2;
+ alloc = xrealloc (alloc, len + 1);
+ set = alloc + len / 2;
+ }
+ *set++ = ch;
+ }
+ }
+ *set = '\0';
+ ungetc (ch, f);
+ return alloc;
+}
+
+/* Read the file name map file for DIRNAME. */
+
+static struct file_name_map *
+read_name_map (dirname)
+ char *dirname;
+{
+ /* This structure holds a linked list of file name maps, one per
+ directory. */
+ struct file_name_map_list
+ {
+ struct file_name_map_list *map_list_next;
+ char *map_list_name;
+ struct file_name_map *map_list_map;
+ };
+ static struct file_name_map_list *map_list;
+ register struct file_name_map_list *map_list_ptr;
+ char *name;
+ FILE *f;
+
+ for (map_list_ptr = map_list; map_list_ptr;
+ map_list_ptr = map_list_ptr->map_list_next)
+ if (! strcmp (map_list_ptr->map_list_name, dirname))
+ return map_list_ptr->map_list_map;
+
+ map_list_ptr = ((struct file_name_map_list *)
+ xmalloc (sizeof (struct file_name_map_list)));
+ map_list_ptr->map_list_name = savestring (dirname);
+ map_list_ptr->map_list_map = NULL;
+
+ name = (char *) alloca (strlen (dirname) + strlen (FILE_NAME_MAP_FILE) + 2);
+ strcpy (name, dirname);
+ if (*dirname)
+ strcat (name, "/");
+ strcat (name, FILE_NAME_MAP_FILE);
+ f = fopen (name, "r");
+ if (!f)
+ map_list_ptr->map_list_map = NULL;
+ else
+ {
+ int ch;
+ int dirlen = strlen (dirname);
+
+ while ((ch = getc (f)) != EOF)
+ {
+ char *from, *to;
+ struct file_name_map *ptr;
+
+ if (is_space[ch])
+ continue;
+ from = read_filename_string (ch, f);
+ while ((ch = getc (f)) != EOF && is_hor_space[ch])
+ ;
+ to = read_filename_string (ch, f);
+
+ ptr = ((struct file_name_map *)
+ xmalloc (sizeof (struct file_name_map)));
+ ptr->map_from = from;
+
+ /* Make the real filename absolute. */
+ if (*to == '/')
+ ptr->map_to = to;
+ else
+ {
+ ptr->map_to = xmalloc (dirlen + strlen (to) + 2);
+ strcpy (ptr->map_to, dirname);
+ ptr->map_to[dirlen] = '/';
+ strcpy (ptr->map_to + dirlen + 1, to);
+ free (to);
+ }
+
+ ptr->map_next = map_list_ptr->map_list_map;
+ map_list_ptr->map_list_map = ptr;
+
+ while ((ch = getc (f)) != '\n')
+ if (ch == EOF)
+ break;
+ }
+ fclose (f);
+ }
+
+ map_list_ptr->map_list_next = map_list;
+ map_list = map_list_ptr;
+
+ return map_list_ptr->map_list_map;
+}
+
+/* Try to open include file FILENAME. SEARCHPTR is the directory
+ being tried from the include file search path. This function maps
+ filenames on file systems based on information read by
+ read_name_map. */
+
+static int
+open_include_file (filename, searchptr)
+ char *filename;
+ struct file_name_list *searchptr;
+{
+ register struct file_name_map *map;
+ register char *from;
+ char *p, *dir;
+
+ if (searchptr && ! searchptr->got_name_map)
+ {
+ searchptr->name_map = read_name_map (searchptr->fname
+ ? searchptr->fname : ".");
+ searchptr->got_name_map = 1;
+ }
+
+ /* First check the mapping for the directory we are using. */
+ if (searchptr && searchptr->name_map)
+ {
+ from = filename;
+ if (searchptr->fname)
+ from += strlen (searchptr->fname) + 1;
+ for (map = searchptr->name_map; map; map = map->map_next)
+ {
+ if (! strcmp (map->map_from, from))
+ {
+ /* Found a match. */
+ return open (map->map_to, O_RDONLY, 0666);
+ }
+ }
+ }
+
+ /* Try to find a mapping file for the particular directory we are
+ looking in. Thus #include <sys/types.h> will look up sys/types.h
+ in /usr/include/header.gcc and look up types.h in
+ /usr/include/sys/header.gcc. */
+ p = rindex (filename, '/');
+ if (! p)
+ p = filename;
+ if (searchptr
+ && searchptr->fname
+ && strlen (searchptr->fname) == p - filename
+ && ! strncmp (searchptr->fname, filename, p - filename))
+ {
+ /* FILENAME is in SEARCHPTR, which we've already checked. */
+ return open (filename, O_RDONLY, 0666);
+ }
+
+ if (p == filename)
+ {
+ dir = ".";
+ from = filename;
+ }
+ else
+ {
+ dir = (char *) alloca (p - filename + 1);
+ bcopy (filename, dir, p - filename);
+ dir[p - filename] = '\0';
+ from = p + 1;
+ }
+ for (map = read_name_map (dir); map; map = map->map_next)
+ if (! strcmp (map->map_from, from))
+ return open (map->map_to, O_RDONLY, 0666);
+
+ return open (filename, O_RDONLY, 0666);
+}
+
+/* Process the contents of include file FNAME, already open on descriptor F,
+ with output to OP.
+ SYSTEM_HEADER_P is 1 if this file resides in any one of the known
+ "system" include directories (as decided by the `is_system_include'
+ function above).
+ DIRPTR is the link in the dir path through which this file was found,
+ or 0 if the file name was absolute. */
+
+static void
+finclude (f, fname, op, system_header_p, dirptr)
+ int f;
+ char *fname;
+ FILE_BUF *op;
+ int system_header_p;
+ struct file_name_list *dirptr;
+{
+ int st_mode;
+ long st_size;
+ long i;
+ FILE_BUF *fp; /* For input stack frame */
+ int missing_newline = 0;
+
+ CHECK_DEPTH (return;);
+
+ if (file_size_and_mode (f, &st_mode, &st_size) < 0)
+ {
+ perror_with_name (fname);
+ close (f);
+ return;
+ }
+
+ fp = &instack[indepth + 1];
+ bzero ((char *) fp, sizeof (FILE_BUF));
+ fp->nominal_fname = fp->fname = fname;
+ fp->length = 0;
+ fp->lineno = 1;
+ fp->if_stack = if_stack;
+ fp->system_header_p = system_header_p;
+ fp->dir = dirptr;
+
+ if (S_ISREG (st_mode)) {
+ fp->buf = (U_CHAR *) xmalloc (st_size + 2);
+ fp->bufp = fp->buf;
+
+ /* Read the file contents, knowing that st_size is an upper bound
+ on the number of bytes we can read. */
+ fp->length = safe_read (f, fp->buf, st_size);
+ if (fp->length < 0) goto nope;
+ }
+ else if (S_ISDIR (st_mode)) {
+ error ("directory `%s' specified in #include", fname);
+ close (f);
+ return;
+ } else {
+ /* Cannot count its file size before reading.
+ First read the entire file into heap and
+ copy them into buffer on stack. */
+
+ int bsize = 2000;
+
+ st_size = 0;
+ fp->buf = (U_CHAR *) xmalloc (bsize + 2);
+
+ for (;;) {
+ i = safe_read (f, fp->buf + st_size, bsize - st_size);
+ if (i < 0)
+ goto nope; /* error! */
+ st_size += i;
+ if (st_size != bsize)
+ break; /* End of file */
+ bsize *= 2;
+ fp->buf = (U_CHAR *) xrealloc (fp->buf, bsize + 2);
+ }
+ fp->bufp = fp->buf;
+ fp->length = st_size;
+ }
+
+ if ((fp->length > 0 && fp->buf[fp->length - 1] != '\n')
+ /* Backslash-newline at end is not good enough. */
+ || (fp->length > 1 && fp->buf[fp->length - 2] == '\\')) {
+ fp->buf[fp->length++] = '\n';
+ missing_newline = 1;
+ }
+ fp->buf[fp->length] = '\0';
+
+ /* Close descriptor now, so nesting does not use lots of descriptors. */
+ close (f);
+
+ /* Must do this before calling trigraph_pcp, so that the correct file name
+ will be printed in warning messages. */
+
+ indepth++;
+ input_file_stack_tick++;
+
+ if (!no_trigraphs)
+ trigraph_pcp (fp);
+
+ output_line_command (fp, op, 0, enter_file);
+ rescan (op, 0);
+
+ if (missing_newline)
+ fp->lineno--;
+
+ if (pedantic && missing_newline)
+ pedwarn ("file does not end in newline");
+
+ indepth--;
+ input_file_stack_tick++;
+ output_line_command (&instack[indepth], op, 0, leave_file);
+ free (fp->buf);
+ return;
+
+ nope:
+
+ perror_with_name (fname);
+ close (f);
+ free (fp->buf);
+}
+
+/* Record that inclusion of the file named FILE
+ should be controlled by the macro named MACRO_NAME.
+ This means that trying to include the file again
+ will do something if that macro is defined. */
+
+static void
+record_control_macro (file, macro_name)
+ char *file;
+ U_CHAR *macro_name;
+{
+ struct file_name_list *new;
+
+ for (new = all_include_files; new; new = new->next) {
+ if (!strcmp (new->fname, file)) {
+ new->control_macro = macro_name;
+ return;
+ }
+ }
+
+ /* If the file is not in all_include_files, something's wrong. */
+ abort ();
+}
+
+/* Maintain and search list of included files, for #import. */
+
+#define IMPORT_HASH_SIZE 31
+
+struct import_file {
+ char *name;
+ ino_t inode;
+ dev_t dev;
+ struct import_file *next;
+};
+
+/* Hash table of files already included with #include or #import. */
+
+static struct import_file *import_hash_table[IMPORT_HASH_SIZE];
+
+/* Hash a file name for import_hash_table. */
+
+static int
+import_hash (f)
+ char *f;
+{
+ int val = 0;
+
+ while (*f) val += *f++;
+ return (val%IMPORT_HASH_SIZE);
+}
+
+/* Search for file FILENAME in import_hash_table.
+ Return -2 if found, either a matching name or a matching inode.
+ Otherwise, open the file and return a file descriptor if successful
+ or -1 if unsuccessful. */
+
+static int
+lookup_import (filename, searchptr)
+ char *filename;
+ struct file_name_list *searchptr;
+{
+ struct import_file *i;
+ int h;
+ int hashval;
+ struct stat sb;
+ int fd;
+
+ hashval = import_hash (filename);
+
+ /* Attempt to find file in list of already included files */
+ i = import_hash_table[hashval];
+
+ while (i) {
+ if (!strcmp (filename, i->name))
+ return -2; /* return found */
+ i = i->next;
+ }
+ /* Open it and try a match on inode/dev */
+ fd = open_include_file (filename, searchptr);
+ if (fd < 0)
+ return fd;
+ fstat (fd, &sb);
+ for (h = 0; h < IMPORT_HASH_SIZE; h++) {
+ i = import_hash_table[h];
+ while (i) {
+ /* Compare the inode and the device.
+ Supposedly on some systems the inode is not a scalar. */
+ if (!bcmp ((char *) &i->inode, (char *) &sb.st_ino, sizeof (sb.st_ino))
+ && i->dev == sb.st_dev) {
+ close (fd);
+ return -2; /* return found */
+ }
+ i = i->next;
+ }
+ }
+ return fd; /* Not found, return open file */
+}
+
+/* Add the file FNAME, open on descriptor FD, to import_hash_table. */
+
+static void
+add_import (fd, fname)
+ int fd;
+ char *fname;
+{
+ struct import_file *i;
+ int hashval;
+ struct stat sb;
+
+ hashval = import_hash (fname);
+ fstat (fd, &sb);
+ i = (struct import_file *)xmalloc (sizeof (struct import_file));
+ i->name = (char *)xmalloc (strlen (fname)+1);
+ strcpy (i->name, fname);
+ bcopy ((char *) &sb.st_ino, (char *) &i->inode, sizeof (sb.st_ino));
+ i->dev = sb.st_dev;
+ i->next = import_hash_table[hashval];
+ import_hash_table[hashval] = i;
+}
+
+/* Load the specified precompiled header into core, and verify its
+ preconditions. PCF indicates the file descriptor to read, which must
+ be a regular file. FNAME indicates the file name of the original
+ header. *LIMIT will be set to an address one past the end of the file.
+ If the preconditions of the file are not satisfied, the buffer is
+ freed and we return 0. If the preconditions are satisfied, return
+ the address of the buffer following the preconditions. The buffer, in
+ this case, should never be freed because various pieces of it will
+ be referred to until all precompiled strings are output at the end of
+ the run.
+*/
+static char *
+check_precompiled (pcf, fname, limit)
+ int pcf;
+ char *fname;
+ char **limit;
+{
+ int st_mode;
+ long st_size;
+ int length = 0;
+ char *buf;
+ char *cp;
+
+ if (pcp_outfile)
+ return 0;
+
+ if (file_size_and_mode (pcf, &st_mode, &st_size) < 0)
+ return 0;
+
+ if (S_ISREG (st_mode))
+ {
+ buf = xmalloc (st_size + 2);
+ length = safe_read (pcf, buf, st_size);
+ if (length < 0)
+ goto nope;
+ }
+ else
+ abort ();
+
+ if (length > 0 && buf[length-1] != '\n')
+ buf[length++] = '\n';
+ buf[length] = '\0';
+
+ *limit = buf + length;
+
+ /* File is in core. Check the preconditions. */
+ if (!check_preconditions (buf))
+ goto nope;
+ for (cp = buf; *cp; cp++)
+ ;
+#ifdef DEBUG_PCP
+ fprintf (stderr, "Using preinclude %s\n", fname);
+#endif
+ return cp + 1;
+
+ nope:
+#ifdef DEBUG_PCP
+ fprintf (stderr, "Cannot use preinclude %s\n", fname);
+#endif
+ free (buf);
+ return 0;
+}
+
+/* PREC (null terminated) points to the preconditions of a
+ precompiled header. These are a series of #define and #undef
+ lines which must match the current contents of the hash
+ table. */
+static int
+check_preconditions (prec)
+ char *prec;
+{
+ MACRODEF mdef;
+ char *lineend;
+
+ while (*prec) {
+ lineend = (char *) index (prec, '\n');
+
+ if (*prec++ != '#') {
+ error ("Bad format encountered while reading precompiled file");
+ return 0;
+ }
+ if (!strncmp (prec, "define", 6)) {
+ HASHNODE *hp;
+
+ prec += 6;
+ mdef = create_definition (prec, lineend, NULL_PTR);
+
+ if (mdef.defn == 0)
+ abort ();
+
+ if ((hp = lookup (mdef.symnam, mdef.symlen, -1)) == NULL
+ || (hp->type != T_MACRO && hp->type != T_CONST)
+ || (hp->type == T_MACRO
+ && !compare_defs (mdef.defn, hp->value.defn)
+ && (mdef.defn->length != 2
+ || mdef.defn->expansion[0] != '\n'
+ || mdef.defn->expansion[1] != ' ')))
+ return 0;
+ } else if (!strncmp (prec, "undef", 5)) {
+ char *name;
+ int len;
+
+ prec += 5;
+ while (is_hor_space[(U_CHAR) *prec])
+ prec++;
+ name = prec;
+ while (is_idchar[(U_CHAR) *prec])
+ prec++;
+ len = prec - name;
+
+ if (lookup (name, len, -1))
+ return 0;
+ } else {
+ error ("Bad format encountered while reading precompiled file");
+ return 0;
+ }
+ prec = lineend + 1;
+ }
+ /* They all passed successfully */
+ return 1;
+}
+
+/* Process the main body of a precompiled file. BUF points to the
+ string section of the file, following the preconditions. LIMIT is one
+ character past the end. NAME is the name of the file being read
+ in. OP is the main output buffer */
+static void
+pcfinclude (buf, limit, name, op)
+ U_CHAR *buf, *limit, *name;
+ FILE_BUF *op;
+{
+ FILE_BUF tmpbuf;
+ int nstrings;
+ U_CHAR *cp = buf;
+
+ /* First in the file comes 4 bytes indicating the number of strings, */
+ /* in network byte order. (MSB first). */
+ nstrings = *cp++;
+ nstrings = (nstrings << 8) | *cp++;
+ nstrings = (nstrings << 8) | *cp++;
+ nstrings = (nstrings << 8) | *cp++;
+
+ /* Looping over each string... */
+ while (nstrings--) {
+ U_CHAR *string_start;
+ U_CHAR *endofthiskey;
+ STRINGDEF *str;
+ int nkeys;
+
+ /* Each string starts with a STRINGDEF structure (str), followed */
+ /* by the text of the string (string_start) */
+
+ /* First skip to a longword boundary */
+ /* ??? Why a 4-byte boundary? On all machines? */
+ /* NOTE: This works correctly even if HOST_WIDE_INT
+ is narrower than a pointer.
+ Do not try risky measures here to get another type to use!
+ Do not include stddef.h--it will fail! */
+ if ((HOST_WIDE_INT) cp & 3)
+ cp += 4 - ((HOST_WIDE_INT) cp & 3);
+
+ /* Now get the string. */
+ str = (STRINGDEF *) cp;
+ string_start = cp += sizeof (STRINGDEF);
+
+ for (; *cp; cp++) /* skip the string */
+ ;
+
+ /* We need to macro expand the string here to ensure that the
+ proper definition environment is in place. If it were only
+ expanded when we find out it is needed, macros necessary for
+ its proper expansion might have had their definitions changed. */
+ tmpbuf = expand_to_temp_buffer (string_start, cp++, 0, 0);
+ /* Lineno is already set in the precompiled file */
+ str->contents = tmpbuf.buf;
+ str->len = tmpbuf.length;
+ str->writeflag = 0;
+ str->filename = name;
+ str->output_mark = outbuf.bufp - outbuf.buf;
+
+ str->chain = 0;
+ *stringlist_tailp = str;
+ stringlist_tailp = &str->chain;
+
+ /* Next comes a fourbyte number indicating the number of keys */
+ /* for this string. */
+ nkeys = *cp++;
+ nkeys = (nkeys << 8) | *cp++;
+ nkeys = (nkeys << 8) | *cp++;
+ nkeys = (nkeys << 8) | *cp++;
+
+ /* If this number is -1, then the string is mandatory. */
+ if (nkeys == -1)
+ str->writeflag = 1;
+ else
+ /* Otherwise, for each key, */
+ for (; nkeys--; free (tmpbuf.buf), cp = endofthiskey + 1) {
+ KEYDEF *kp = (KEYDEF *) cp;
+ HASHNODE *hp;
+
+ /* It starts with a KEYDEF structure */
+ cp += sizeof (KEYDEF);
+
+ /* Find the end of the key. At the end of this for loop we
+ advance CP to the start of the next key using this variable. */
+ endofthiskey = cp + strlen (cp);
+ kp->str = str;
+
+ /* Expand the key, and enter it into the hash table. */
+ tmpbuf = expand_to_temp_buffer (cp, endofthiskey, 0, 0);
+ tmpbuf.bufp = tmpbuf.buf;
+
+ while (is_hor_space[*tmpbuf.bufp])
+ tmpbuf.bufp++;
+ if (!is_idstart[*tmpbuf.bufp]
+ || tmpbuf.bufp == tmpbuf.buf + tmpbuf.length) {
+ str->writeflag = 1;
+ continue;
+ }
+
+ hp = lookup (tmpbuf.bufp, -1, -1);
+ if (hp == NULL) {
+ kp->chain = 0;
+ install (tmpbuf.bufp, -1, T_PCSTRING, 0, (char *) kp, -1);
+ }
+ else if (hp->type == T_PCSTRING) {
+ kp->chain = hp->value.keydef;
+ hp->value.keydef = kp;
+ }
+ else
+ str->writeflag = 1;
+ }
+ }
+ /* This output_line_command serves to switch us back to the current
+ input file in case some of these strings get output (which will
+ result in line commands for the header file being output). */
+ output_line_command (&instack[indepth], op, 0, enter_file);
+}
+
+/* Called from rescan when it hits a key for strings. Mark them all */
+ /* used and clean up. */
+static void
+pcstring_used (hp)
+ HASHNODE *hp;
+{
+ KEYDEF *kp;
+
+ for (kp = hp->value.keydef; kp; kp = kp->chain)
+ kp->str->writeflag = 1;
+ delete_macro (hp);
+}
+
+/* Write the output, interspersing precompiled strings in their */
+ /* appropriate places. */
+static void
+write_output ()
+{
+ STRINGDEF *next_string;
+ U_CHAR *cur_buf_loc;
+ int line_command_len = 80;
+ char *line_command = xmalloc (line_command_len);
+ int len;
+
+ /* In each run through the loop, either cur_buf_loc == */
+ /* next_string_loc, in which case we print a series of strings, or */
+ /* it is less than next_string_loc, in which case we write some of */
+ /* the buffer. */
+ cur_buf_loc = outbuf.buf;
+ next_string = stringlist;
+
+ while (cur_buf_loc < outbuf.bufp || next_string) {
+ if (next_string
+ && cur_buf_loc - outbuf.buf == next_string->output_mark) {
+ if (next_string->writeflag) {
+ len = 4 * strlen (next_string->filename) + 32;
+ while (len > line_command_len)
+ line_command = xrealloc (line_command,
+ line_command_len *= 2);
+ sprintf (line_command, "\n# %d ", next_string->lineno);
+ strcpy (quote_string (line_command + strlen (line_command),
+ next_string->filename),
+ "\n");
+ safe_write (fileno (stdout), line_command, strlen (line_command));
+ safe_write (fileno (stdout), next_string->contents, next_string->len);
+ }
+ next_string = next_string->chain;
+ }
+ else {
+ len = (next_string
+ ? (next_string->output_mark
+ - (cur_buf_loc - outbuf.buf))
+ : outbuf.bufp - cur_buf_loc);
+
+ safe_write (fileno (stdout), cur_buf_loc, len);
+ cur_buf_loc += len;
+ }
+ }
+ free (line_command);
+}
+
+/* Pass a directive through to the output file.
+ BUF points to the contents of the directive, as a contiguous string.
+ LIMIT points to the first character past the end of the directive.
+ KEYWORD is the keyword-table entry for the directive. */
+
+static void
+pass_thru_directive (buf, limit, op, keyword)
+ U_CHAR *buf, *limit;
+ FILE_BUF *op;
+ struct directive *keyword;
+{
+ register unsigned keyword_length = keyword->length;
+
+ check_expand (op, 1 + keyword_length + (limit - buf));
+ *op->bufp++ = '#';
+ bcopy (keyword->name, (char *) op->bufp, keyword_length);
+ op->bufp += keyword_length;
+ if (limit != buf && buf[0] != ' ')
+ *op->bufp++ = ' ';
+ bcopy ((char *) buf, (char *) op->bufp, limit - buf);
+ op->bufp += (limit - buf);
+#if 0
+ *op->bufp++ = '\n';
+ /* Count the line we have just made in the output,
+ to get in sync properly. */
+ op->lineno++;
+#endif
+}
+
+/* The arglist structure is built by do_define to tell
+ collect_definition where the argument names begin. That
+ is, for a define like "#define f(x,y,z) foo+x-bar*y", the arglist
+ would contain pointers to the strings x, y, and z.
+ Collect_definition would then build a DEFINITION node,
+ with reflist nodes pointing to the places x, y, and z had
+ appeared. So the arglist is just convenience data passed
+ between these two routines. It is not kept around after
+ the current #define has been processed and entered into the
+ hash table. */
+
+struct arglist {
+ struct arglist *next;
+ U_CHAR *name;
+ int length;
+ int argno;
+ char rest_args;
+};
+
+/* Create a DEFINITION node from a #define directive. Arguments are
+ as for do_define. */
+static MACRODEF
+create_definition (buf, limit, op)
+ U_CHAR *buf, *limit;
+ FILE_BUF *op;
+{
+ U_CHAR *bp; /* temp ptr into input buffer */
+ U_CHAR *symname; /* remember where symbol name starts */
+ int sym_length; /* and how long it is */
+ int line = instack[indepth].lineno;
+ char *file = instack[indepth].nominal_fname;
+ int rest_args = 0;
+
+ DEFINITION *defn;
+ int arglengths = 0; /* Accumulate lengths of arg names
+ plus number of args. */
+ MACRODEF mdef;
+
+ bp = buf;
+
+ while (is_hor_space[*bp])
+ bp++;
+
+ symname = bp; /* remember where it starts */
+ sym_length = check_macro_name (bp, "macro");
+ bp += sym_length;
+
+ /* Lossage will occur if identifiers or control keywords are broken
+ across lines using backslash. This is not the right place to take
+ care of that. */
+
+ if (*bp == '(') {
+ struct arglist *arg_ptrs = NULL;
+ int argno = 0;
+
+ bp++; /* skip '(' */
+ SKIP_WHITE_SPACE (bp);
+
+ /* Loop over macro argument names. */
+ while (*bp != ')') {
+ struct arglist *temp;
+
+ temp = (struct arglist *) alloca (sizeof (struct arglist));
+ temp->name = bp;
+ temp->next = arg_ptrs;
+ temp->argno = argno++;
+ temp->rest_args = 0;
+ arg_ptrs = temp;
+
+ if (rest_args)
+ pedwarn ("another parameter follows `%s'",
+ rest_extension);
+
+ if (!is_idstart[*bp])
+ pedwarn ("invalid character in macro parameter name");
+
+ /* Find the end of the arg name. */
+ while (is_idchar[*bp]) {
+ bp++;
+ /* do we have a "special" rest-args extension here? */
+ if (limit - bp > REST_EXTENSION_LENGTH &&
+ strncmp (rest_extension, bp, REST_EXTENSION_LENGTH) == 0) {
+ rest_args = 1;
+ temp->rest_args = 1;
+ break;
+ }
+ }
+ temp->length = bp - temp->name;
+ if (rest_args == 1)
+ bp += REST_EXTENSION_LENGTH;
+ arglengths += temp->length + 2;
+ SKIP_WHITE_SPACE (bp);
+ if (temp->length == 0 || (*bp != ',' && *bp != ')')) {
+ error ("badly punctuated parameter list in `#define'");
+ goto nope;
+ }
+ if (*bp == ',') {
+ bp++;
+ SKIP_WHITE_SPACE (bp);
+ }
+ if (bp >= limit) {
+ error ("unterminated parameter list in `#define'");
+ goto nope;
+ }
+ {
+ struct arglist *otemp;
+
+ for (otemp = temp->next; otemp != NULL; otemp = otemp->next)
+ if (temp->length == otemp->length &&
+ strncmp (temp->name, otemp->name, temp->length) == 0) {
+ U_CHAR *name;
+
+ name = (U_CHAR *) alloca (temp->length + 1);
+ (void) strncpy (name, temp->name, temp->length);
+ name[temp->length] = '\0';
+ error ("duplicate argument name `%s' in `#define'", name);
+ goto nope;
+ }
+ }
+ }
+
+ ++bp; /* skip paren */
+ /* Skip spaces and tabs if any. */
+ while (bp < limit && (*bp == ' ' || *bp == '\t'))
+ ++bp;
+ /* now everything from bp before limit is the definition. */
+ defn = collect_expansion (bp, limit, argno, arg_ptrs);
+ defn->rest_args = rest_args;
+
+ /* Now set defn->args.argnames to the result of concatenating
+ the argument names in reverse order
+ with comma-space between them. */
+ defn->args.argnames = (U_CHAR *) xmalloc (arglengths + 1);
+ {
+ struct arglist *temp;
+ int i = 0;
+ for (temp = arg_ptrs; temp; temp = temp->next) {
+ bcopy (temp->name, &defn->args.argnames[i], temp->length);
+ i += temp->length;
+ if (temp->next != 0) {
+ defn->args.argnames[i++] = ',';
+ defn->args.argnames[i++] = ' ';
+ }
+ }
+ defn->args.argnames[i] = 0;
+ }
+ } else {
+ /* Simple expansion or empty definition. */
+
+ /* Skip spaces and tabs if any. */
+ while (bp < limit && (*bp == ' ' || *bp == '\t'))
+ ++bp;
+ /* Now everything from bp before limit is the definition. */
+ defn = collect_expansion (bp, limit, -1, NULL_PTR);
+ defn->args.argnames = (U_CHAR *) "";
+ }
+
+ defn->line = line;
+ defn->file = file;
+
+ /* OP is null if this is a predefinition */
+ defn->predefined = !op;
+ mdef.defn = defn;
+ mdef.symnam = symname;
+ mdef.symlen = sym_length;
+
+ return mdef;
+
+ nope:
+ mdef.defn = 0;
+ return mdef;
+}
+
+/* Process a #define command.
+BUF points to the contents of the #define command, as a contiguous string.
+LIMIT points to the first character past the end of the definition.
+KEYWORD is the keyword-table entry for #define. */
+
+static int
+do_define (buf, limit, op, keyword)
+ U_CHAR *buf, *limit;
+ FILE_BUF *op;
+ struct directive *keyword;
+{
+ int hashcode;
+ MACRODEF mdef;
+
+ /* If this is a precompiler run (with -pcp) pass thru #define commands. */
+ if (pcp_outfile && op)
+ pass_thru_directive (buf, limit, op, keyword);
+
+ mdef = create_definition (buf, limit, op);
+ if (mdef.defn == 0)
+ goto nope;
+
+ hashcode = hashf (mdef.symnam, mdef.symlen, HASHSIZE);
+
+ {
+ HASHNODE *hp;
+ if ((hp = lookup (mdef.symnam, mdef.symlen, hashcode)) != NULL) {
+ int ok = 0;
+ /* Redefining a precompiled key is ok. */
+ if (hp->type == T_PCSTRING)
+ ok = 1;
+ /* Redefining a macro is ok if the definitions are the same. */
+ else if (hp->type == T_MACRO)
+ ok = ! compare_defs (mdef.defn, hp->value.defn);
+ /* Redefining a constant is ok with -D. */
+ else if (hp->type == T_CONST)
+ ok = ! done_initializing;
+ /* Print the warning if it's not ok. */
+ if (!ok) {
+ U_CHAR *msg; /* what pain... */
+
+ /* If we are passing through #define and #undef directives, do
+ that for this re-definition now. */
+ if (debug_output && op)
+ pass_thru_directive (buf, limit, op, keyword);
+
+ msg = (U_CHAR *) alloca (mdef.symlen + 22);
+ *msg = '`';
+ bcopy ((char *) mdef.symnam, (char *) (msg + 1), mdef.symlen);
+ strcpy ((char *) (msg + mdef.symlen + 1), "' redefined");
+ pedwarn (msg);
+ if (hp->type == T_MACRO)
+ pedwarn_with_file_and_line (hp->value.defn->file, hp->value.defn->line,
+ "this is the location of the previous definition");
+ }
+ /* Replace the old definition. */
+ hp->type = T_MACRO;
+ hp->value.defn = mdef.defn;
+ } else {
+ /* If we are passing through #define and #undef directives, do
+ that for this new definition now. */
+ if (debug_output && op)
+ pass_thru_directive (buf, limit, op, keyword);
+ install (mdef.symnam, mdef.symlen, T_MACRO, 0,
+ (char *) mdef.defn, hashcode);
+ }
+ }
+
+ return 0;
+
+nope:
+
+ return 1;
+}
+
+/* Check a purported macro name SYMNAME, and yield its length.
+ USAGE is the kind of name this is intended for. */
+
+static int
+check_macro_name (symname, usage)
+ U_CHAR *symname;
+ char *usage;
+{
+ U_CHAR *p;
+ int sym_length;
+
+ for (p = symname; is_idchar[*p]; p++)
+ ;
+ sym_length = p - symname;
+ if (sym_length == 0)
+ error ("invalid %s name", usage);
+ else if (!is_idstart[*symname]) {
+ U_CHAR *msg; /* what pain... */
+ msg = (U_CHAR *) alloca (sym_length + 1);
+ bcopy ((char *) symname, (char *) msg, sym_length);
+ msg[sym_length] = 0;
+ error ("invalid %s name `%s'", usage, msg);
+ } else {
+ if (! strncmp (symname, "defined", 7) && sym_length == 7)
+ error ("invalid %s name `defined'", usage);
+ }
+ return sym_length;
+}
+
+/*
+ * return zero if two DEFINITIONs are isomorphic
+ */
+static int
+compare_defs (d1, d2)
+ DEFINITION *d1, *d2;
+{
+ register struct reflist *a1, *a2;
+ register U_CHAR *p1 = d1->expansion;
+ register U_CHAR *p2 = d2->expansion;
+ int first = 1;
+
+ if (d1->nargs != d2->nargs)
+ return 1;
+ if (strcmp ((char *)d1->args.argnames, (char *)d2->args.argnames))
+ return 1;
+ for (a1 = d1->pattern, a2 = d2->pattern; a1 && a2;
+ a1 = a1->next, a2 = a2->next) {
+ if (!((a1->nchars == a2->nchars && ! strncmp (p1, p2, a1->nchars))
+ || ! comp_def_part (first, p1, a1->nchars, p2, a2->nchars, 0))
+ || a1->argno != a2->argno
+ || a1->stringify != a2->stringify
+ || a1->raw_before != a2->raw_before
+ || a1->raw_after != a2->raw_after)
+ return 1;
+ first = 0;
+ p1 += a1->nchars;
+ p2 += a2->nchars;
+ }
+ if (a1 != a2)
+ return 1;
+ if (comp_def_part (first, p1, d1->length - (p1 - d1->expansion),
+ p2, d2->length - (p2 - d2->expansion), 1))
+ return 1;
+ return 0;
+}
+
+/* Return 1 if two parts of two macro definitions are effectively different.
+ One of the parts starts at BEG1 and has LEN1 chars;
+ the other has LEN2 chars at BEG2.
+ Any sequence of whitespace matches any other sequence of whitespace.
+ FIRST means these parts are the first of a macro definition;
+ so ignore leading whitespace entirely.
+ LAST means these parts are the last of a macro definition;
+ so ignore trailing whitespace entirely. */
+
+static int
+comp_def_part (first, beg1, len1, beg2, len2, last)
+ int first;
+ U_CHAR *beg1, *beg2;
+ int len1, len2;
+ int last;
+{
+ register U_CHAR *end1 = beg1 + len1;
+ register U_CHAR *end2 = beg2 + len2;
+ if (first) {
+ while (beg1 != end1 && is_space[*beg1]) beg1++;
+ while (beg2 != end2 && is_space[*beg2]) beg2++;
+ }
+ if (last) {
+ while (beg1 != end1 && is_space[end1[-1]]) end1--;
+ while (beg2 != end2 && is_space[end2[-1]]) end2--;
+ }
+ while (beg1 != end1 && beg2 != end2) {
+ if (is_space[*beg1] && is_space[*beg2]) {
+ while (beg1 != end1 && is_space[*beg1]) beg1++;
+ while (beg2 != end2 && is_space[*beg2]) beg2++;
+ } else if (*beg1 == *beg2) {
+ beg1++; beg2++;
+ } else break;
+ }
+ return (beg1 != end1) || (beg2 != end2);
+}
+
+/* Read a replacement list for a macro with parameters.
+ Build the DEFINITION structure.
+ Reads characters of text starting at BUF until END.
+ ARGLIST specifies the formal parameters to look for
+ in the text of the definition; NARGS is the number of args
+ in that list, or -1 for a macro name that wants no argument list.
+ MACRONAME is the macro name itself (so we can avoid recursive expansion)
+ and NAMELEN is its length in characters.
+
+Note that comments and backslash-newlines have already been deleted
+from the argument. */
+
+/* Leading and trailing Space, Tab, etc. are converted to markers
+ Newline Space, Newline Tab, etc.
+ Newline Space makes a space in the final output
+ but is discarded if stringified. (Newline Tab is similar but
+ makes a Tab instead.)
+
+ If there is no trailing whitespace, a Newline Space is added at the end
+ to prevent concatenation that would be contrary to the standard. */
+
+static DEFINITION *
+collect_expansion (buf, end, nargs, arglist)
+ U_CHAR *buf, *end;
+ int nargs;
+ struct arglist *arglist;
+{
+ DEFINITION *defn;
+ register U_CHAR *p, *limit, *lastp, *exp_p;
+ struct reflist *endpat = NULL;
+ /* Pointer to first nonspace after last ## seen. */
+ U_CHAR *concat = 0;
+ /* Pointer to first nonspace after last single-# seen. */
+ U_CHAR *stringify = 0;
+ int maxsize;
+ int expected_delimiter = '\0';
+
+ /* Scan thru the replacement list, ignoring comments and quoted
+ strings, picking up on the macro calls. It does a linear search
+ thru the arg list on every potential symbol. Profiling might say
+ that something smarter should happen. */
+
+ if (end < buf)
+ abort ();
+
+ /* Find the beginning of the trailing whitespace. */
+ /* Find end of leading whitespace. */
+ limit = end;
+ p = buf;
+ while (p < limit && is_space[limit[-1]]) limit--;
+ while (p < limit && is_space[*p]) p++;
+
+ /* Allocate space for the text in the macro definition.
+ Leading and trailing whitespace chars need 2 bytes each.
+ Each other input char may or may not need 1 byte,
+ so this is an upper bound.
+ The extra 2 are for invented trailing newline-marker and final null. */
+ maxsize = (sizeof (DEFINITION)
+ + 2 * (end - limit) + 2 * (p - buf)
+ + (limit - p) + 3);
+ defn = (DEFINITION *) xcalloc (1, maxsize);
+
+ defn->nargs = nargs;
+ exp_p = defn->expansion = (U_CHAR *) defn + sizeof (DEFINITION);
+ lastp = exp_p;
+
+ p = buf;
+
+ /* Convert leading whitespace to Newline-markers. */
+ while (p < limit && is_space[*p]) {
+ *exp_p++ = '\n';
+ *exp_p++ = *p++;
+ }
+
+ if (limit - p >= 2 && p[0] == '#' && p[1] == '#') {
+ error ("`##' at start of macro definition");
+ p += 2;
+ }
+
+ /* Process the main body of the definition. */
+ while (p < limit) {
+ int skipped_arg = 0;
+ register U_CHAR c = *p++;
+
+ *exp_p++ = c;
+
+ if (!traditional) {
+ switch (c) {
+ case '\'':
+ case '\"':
+ if (expected_delimiter != '\0') {
+ if (c == expected_delimiter)
+ expected_delimiter = '\0';
+ } else
+ expected_delimiter = c;
+ break;
+
+ /* Special hack: if a \# is written in the #define
+ include a # in the definition. This is useless for C code
+ but useful for preprocessing other things. */
+
+ case '\\':
+ /* \# quotes a # even outside of strings. */
+ if (p < limit && *p == '#' && !expected_delimiter) {
+ exp_p--;
+ *exp_p++ = *p++;
+ } else if (p < limit && expected_delimiter) {
+ /* In a string, backslash goes through
+ and makes next char ordinary. */
+ *exp_p++ = *p++;
+ }
+ break;
+
+ case '#':
+ /* # is ordinary inside a string. */
+ if (expected_delimiter)
+ break;
+ if (p < limit && *p == '#') {
+ /* ##: concatenate preceding and following tokens. */
+ /* Take out the first #, discard preceding whitespace. */
+ exp_p--;
+ while (exp_p > lastp && is_hor_space[exp_p[-1]])
+ --exp_p;
+ /* Skip the second #. */
+ p++;
+ /* Discard following whitespace. */
+ SKIP_WHITE_SPACE (p);
+ concat = p;
+ if (p == limit)
+ error ("`##' at end of macro definition");
+ } else if (nargs >= 0) {
+ /* Single #: stringify following argument ref.
+ Don't leave the # in the expansion. */
+ exp_p--;
+ SKIP_WHITE_SPACE (p);
+ if (p == limit || ! is_idstart[*p])
+ error ("`#' operator is not followed by a macro argument name");
+ else
+ stringify = p;
+ }
+ break;
+ }
+ } else {
+ /* In -traditional mode, recognize arguments inside strings and
+ and character constants, and ignore special properties of #.
+ Arguments inside strings are considered "stringified", but no
+ extra quote marks are supplied. */
+ switch (c) {
+ case '\'':
+ case '\"':
+ if (expected_delimiter != '\0') {
+ if (c == expected_delimiter)
+ expected_delimiter = '\0';
+ } else
+ expected_delimiter = c;
+ break;
+
+ case '\\':
+ /* Backslash quotes delimiters and itself, but not macro args. */
+ if (expected_delimiter != 0 && p < limit
+ && (*p == expected_delimiter || *p == '\\')) {
+ *exp_p++ = *p++;
+ continue;
+ }
+ break;
+
+ case '/':
+ if (expected_delimiter != '\0') /* No comments inside strings. */
+ break;
+ if (*p == '*') {
+ /* If we find a comment that wasn't removed by handle_directive,
+ this must be -traditional. So replace the comment with
+ nothing at all. */
+ exp_p--;
+ p += 1;
+ while (p < limit && !(p[-2] == '*' && p[-1] == '/'))
+ p++;
+#if 0
+ /* Mark this as a concatenation-point, as if it had been ##. */
+ concat = p;
+#endif
+ }
+ break;
+ }
+ }
+
+ /* Handle the start of a symbol. */
+ if (is_idchar[c] && nargs > 0) {
+ U_CHAR *id_beg = p - 1;
+ int id_len;
+
+ --exp_p;
+ while (p != limit && is_idchar[*p]) p++;
+ id_len = p - id_beg;
+
+ if (is_idstart[c]) {
+ register struct arglist *arg;
+
+ for (arg = arglist; arg != NULL; arg = arg->next) {
+ struct reflist *tpat;
+
+ if (arg->name[0] == c
+ && arg->length == id_len
+ && strncmp (arg->name, id_beg, id_len) == 0) {
+ if (expected_delimiter && warn_stringify) {
+ if (traditional) {
+ warning ("macro argument `%.*s' is stringified.",
+ id_len, arg->name);
+ } else {
+ warning ("macro arg `%.*s' would be stringified with -traditional.",
+ id_len, arg->name);
+ }
+ }
+ /* If ANSI, don't actually substitute inside a string. */
+ if (!traditional && expected_delimiter)
+ break;
+ /* make a pat node for this arg and append it to the end of
+ the pat list */
+ tpat = (struct reflist *) xmalloc (sizeof (struct reflist));
+ tpat->next = NULL;
+ tpat->raw_before = concat == id_beg;
+ tpat->raw_after = 0;
+ tpat->rest_args = arg->rest_args;
+ tpat->stringify = (traditional ? expected_delimiter != '\0'
+ : stringify == id_beg);
+
+ if (endpat == NULL)
+ defn->pattern = tpat;
+ else
+ endpat->next = tpat;
+ endpat = tpat;
+
+ tpat->argno = arg->argno;
+ tpat->nchars = exp_p - lastp;
+ {
+ register U_CHAR *p1 = p;
+ SKIP_WHITE_SPACE (p1);
+ if (p1 + 2 <= limit && p1[0] == '#' && p1[1] == '#')
+ tpat->raw_after = 1;
+ }
+ lastp = exp_p; /* place to start copying from next time */
+ skipped_arg = 1;
+ break;
+ }
+ }
+ }
+
+ /* If this was not a macro arg, copy it into the expansion. */
+ if (! skipped_arg) {
+ register U_CHAR *lim1 = p;
+ p = id_beg;
+ while (p != lim1)
+ *exp_p++ = *p++;
+ if (stringify == id_beg)
+ error ("`#' operator should be followed by a macro argument name");
+ }
+ }
+ }
+
+ if (!traditional && expected_delimiter == 0) {
+ /* There is no trailing whitespace, so invent some in ANSI mode.
+ But not if "inside a string" (which in ANSI mode
+ happens only for -D option). */
+ *exp_p++ = '\n';
+ *exp_p++ = ' ';
+ }
+
+ *exp_p = '\0';
+
+ defn->length = exp_p - defn->expansion;
+
+ /* Crash now if we overrun the allocated size. */
+ if (defn->length + 1 > maxsize)
+ abort ();
+
+#if 0
+/* This isn't worth the time it takes. */
+ /* give back excess storage */
+ defn->expansion = (U_CHAR *) xrealloc (defn->expansion, defn->length + 1);
+#endif
+
+ return defn;
+}
+
+static int
+do_assert (buf, limit, op, keyword)
+ U_CHAR *buf, *limit;
+ FILE_BUF *op;
+ struct directive *keyword;
+{
+ U_CHAR *bp; /* temp ptr into input buffer */
+ U_CHAR *symname; /* remember where symbol name starts */
+ int sym_length; /* and how long it is */
+ struct arglist *tokens = NULL;
+
+ if (pedantic && done_initializing && !instack[indepth].system_header_p)
+ pedwarn ("ANSI C does not allow `#assert'");
+
+ bp = buf;
+
+ while (is_hor_space[*bp])
+ bp++;
+
+ symname = bp; /* remember where it starts */
+ sym_length = check_macro_name (bp, "assertion");
+ bp += sym_length;
+ /* #define doesn't do this, but we should. */
+ SKIP_WHITE_SPACE (bp);
+
+ /* Lossage will occur if identifiers or control tokens are broken
+ across lines using backslash. This is not the right place to take
+ care of that. */
+
+ if (*bp != '(') {
+ error ("missing token-sequence in `#assert'");
+ return 1;
+ }
+
+ {
+ int error_flag = 0;
+
+ bp++; /* skip '(' */
+ SKIP_WHITE_SPACE (bp);
+
+ tokens = read_token_list (&bp, limit, &error_flag);
+ if (error_flag)
+ return 1;
+ if (tokens == 0) {
+ error ("empty token-sequence in `#assert'");
+ return 1;
+ }
+
+ ++bp; /* skip paren */
+ SKIP_WHITE_SPACE (bp);
+ }
+
+ /* If this name isn't already an assertion name, make it one.
+ Error if it was already in use in some other way. */
+
+ {
+ ASSERTION_HASHNODE *hp;
+ int hashcode = hashf (symname, sym_length, ASSERTION_HASHSIZE);
+ struct tokenlist_list *value
+ = (struct tokenlist_list *) xmalloc (sizeof (struct tokenlist_list));
+
+ hp = assertion_lookup (symname, sym_length, hashcode);
+ if (hp == NULL) {
+ if (sym_length == 7 && ! strncmp (symname, "defined", sym_length))
+ error ("`defined' redefined as assertion");
+ hp = assertion_install (symname, sym_length, hashcode);
+ }
+
+ /* Add the spec'd token-sequence to the list of such. */
+ value->tokens = tokens;
+ value->next = hp->value;
+ hp->value = value;
+ }
+
+ return 0;
+}
+
+static int
+do_unassert (buf, limit, op, keyword)
+ U_CHAR *buf, *limit;
+ FILE_BUF *op;
+ struct directive *keyword;
+{
+ U_CHAR *bp; /* temp ptr into input buffer */
+ U_CHAR *symname; /* remember where symbol name starts */
+ int sym_length; /* and how long it is */
+
+ struct arglist *tokens = NULL;
+ int tokens_specified = 0;
+
+ if (pedantic && done_initializing && !instack[indepth].system_header_p)
+ pedwarn ("ANSI C does not allow `#unassert'");
+
+ bp = buf;
+
+ while (is_hor_space[*bp])
+ bp++;
+
+ symname = bp; /* remember where it starts */
+ sym_length = check_macro_name (bp, "assertion");
+ bp += sym_length;
+ /* #define doesn't do this, but we should. */
+ SKIP_WHITE_SPACE (bp);
+
+ /* Lossage will occur if identifiers or control tokens are broken
+ across lines using backslash. This is not the right place to take
+ care of that. */
+
+ if (*bp == '(') {
+ int error_flag = 0;
+
+ bp++; /* skip '(' */
+ SKIP_WHITE_SPACE (bp);
+
+ tokens = read_token_list (&bp, limit, &error_flag);
+ if (error_flag)
+ return 1;
+ if (tokens == 0) {
+ error ("empty token list in `#unassert'");
+ return 1;
+ }
+
+ tokens_specified = 1;
+
+ ++bp; /* skip paren */
+ SKIP_WHITE_SPACE (bp);
+ }
+
+ {
+ ASSERTION_HASHNODE *hp;
+ int hashcode = hashf (symname, sym_length, ASSERTION_HASHSIZE);
+ struct tokenlist_list *tail, *prev;
+
+ hp = assertion_lookup (symname, sym_length, hashcode);
+ if (hp == NULL)
+ return 1;
+
+ /* If no token list was specified, then eliminate this assertion
+ entirely. */
+ if (! tokens_specified) {
+ struct tokenlist_list *next;
+ for (tail = hp->value; tail; tail = next) {
+ next = tail->next;
+ free_token_list (tail->tokens);
+ free (tail);
+ }
+ delete_assertion (hp);
+ } else {
+ /* If a list of tokens was given, then delete any matching list. */
+
+ tail = hp->value;
+ prev = 0;
+ while (tail) {
+ struct tokenlist_list *next = tail->next;
+ if (compare_token_lists (tail->tokens, tokens)) {
+ if (prev)
+ prev->next = next;
+ else
+ hp->value = tail->next;
+ free_token_list (tail->tokens);
+ free (tail);
+ } else {
+ prev = tail;
+ }
+ tail = next;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/* Test whether there is an assertion named NAME
+ and optionally whether it has an asserted token list TOKENS.
+ NAME is not null terminated; its length is SYM_LENGTH.
+ If TOKENS_SPECIFIED is 0, then don't check for any token list. */
+
+int
+check_assertion (name, sym_length, tokens_specified, tokens)
+ U_CHAR *name;
+ int sym_length;
+ int tokens_specified;
+ struct arglist *tokens;
+{
+ ASSERTION_HASHNODE *hp;
+ int hashcode = hashf (name, sym_length, ASSERTION_HASHSIZE);
+
+ if (pedantic && !instack[indepth].system_header_p)
+ pedwarn ("ANSI C does not allow testing assertions");
+
+ hp = assertion_lookup (name, sym_length, hashcode);
+ if (hp == NULL)
+ /* It is not an assertion; just return false. */
+ return 0;
+
+ /* If no token list was specified, then value is 1. */
+ if (! tokens_specified)
+ return 1;
+
+ {
+ struct tokenlist_list *tail;
+
+ tail = hp->value;
+
+ /* If a list of tokens was given,
+ then succeed if the assertion records a matching list. */
+
+ while (tail) {
+ if (compare_token_lists (tail->tokens, tokens))
+ return 1;
+ tail = tail->next;
+ }
+
+ /* Fail if the assertion has no matching list. */
+ return 0;
+ }
+}
+
+/* Compare two lists of tokens for equality including order of tokens. */
+
+static int
+compare_token_lists (l1, l2)
+ struct arglist *l1, *l2;
+{
+ while (l1 && l2) {
+ if (l1->length != l2->length)
+ return 0;
+ if (strncmp (l1->name, l2->name, l1->length))
+ return 0;
+ l1 = l1->next;
+ l2 = l2->next;
+ }
+
+ /* Succeed if both lists end at the same time. */
+ return l1 == l2;
+}
+
+/* Read a space-separated list of tokens ending in a close parenthesis.
+ Return a list of strings, in the order they were written.
+ (In case of error, return 0 and store -1 in *ERROR_FLAG.)
+ Parse the text starting at *BPP, and update *BPP.
+ Don't parse beyond LIMIT. */
+
+static struct arglist *
+read_token_list (bpp, limit, error_flag)
+ U_CHAR **bpp;
+ U_CHAR *limit;
+ int *error_flag;
+{
+ struct arglist *token_ptrs = 0;
+ U_CHAR *bp = *bpp;
+ int depth = 1;
+
+ *error_flag = 0;
+
+ /* Loop over the assertion value tokens. */
+ while (depth > 0) {
+ struct arglist *temp;
+ int eofp = 0;
+ U_CHAR *beg = bp;
+
+ /* Find the end of the token. */
+ if (*bp == '(') {
+ bp++;
+ depth++;
+ } else if (*bp == ')') {
+ depth--;
+ if (depth == 0)
+ break;
+ bp++;
+ } else if (*bp == '"' || *bp == '\'')
+ bp = skip_quoted_string (bp, limit, 0, NULL_PTR, NULL_PTR, &eofp);
+ else
+ while (! is_hor_space[*bp] && *bp != '(' && *bp != ')'
+ && *bp != '"' && *bp != '\'' && bp != limit)
+ bp++;
+
+ temp = (struct arglist *) xmalloc (sizeof (struct arglist));
+ temp->name = (U_CHAR *) xmalloc (bp - beg + 1);
+ bcopy ((char *) beg, (char *) temp->name, bp - beg);
+ temp->name[bp - beg] = 0;
+ temp->next = token_ptrs;
+ token_ptrs = temp;
+ temp->length = bp - beg;
+
+ SKIP_WHITE_SPACE (bp);
+
+ if (bp >= limit) {
+ error ("unterminated token sequence in `#assert' or `#unassert'");
+ *error_flag = -1;
+ return 0;
+ }
+ }
+ *bpp = bp;
+
+ /* We accumulated the names in reverse order.
+ Now reverse them to get the proper order. */
+ {
+ register struct arglist *prev = 0, *this, *next;
+ for (this = token_ptrs; this; this = next) {
+ next = this->next;
+ this->next = prev;
+ prev = this;
+ }
+ return prev;
+ }
+}
+
+static void
+free_token_list (tokens)
+ struct arglist *tokens;
+{
+ while (tokens) {
+ struct arglist *next = tokens->next;
+ free (tokens->name);
+ free (tokens);
+ tokens = next;
+ }
+}
+
+/*
+ * Install a name in the assertion hash table.
+ *
+ * If LEN is >= 0, it is the length of the name.
+ * Otherwise, compute the length by scanning the entire name.
+ *
+ * If HASH is >= 0, it is the precomputed hash code.
+ * Otherwise, compute the hash code.
+ */
+static ASSERTION_HASHNODE *
+assertion_install (name, len, hash)
+ U_CHAR *name;
+ int len;
+ int hash;
+{
+ register ASSERTION_HASHNODE *hp;
+ register int i, bucket;
+ register U_CHAR *p, *q;
+
+ i = sizeof (ASSERTION_HASHNODE) + len + 1;
+ hp = (ASSERTION_HASHNODE *) xmalloc (i);
+ bucket = hash;
+ hp->bucket_hdr = &assertion_hashtab[bucket];
+ hp->next = assertion_hashtab[bucket];
+ assertion_hashtab[bucket] = hp;
+ hp->prev = NULL;
+ if (hp->next != NULL)
+ hp->next->prev = hp;
+ hp->length = len;
+ hp->value = 0;
+ hp->name = ((U_CHAR *) hp) + sizeof (ASSERTION_HASHNODE);
+ p = hp->name;
+ q = name;
+ for (i = 0; i < len; i++)
+ *p++ = *q++;
+ hp->name[len] = 0;
+ return hp;
+}
+
+/*
+ * find the most recent hash node for name name (ending with first
+ * non-identifier char) installed by install
+ *
+ * If LEN is >= 0, it is the length of the name.
+ * Otherwise, compute the length by scanning the entire name.
+ *
+ * If HASH is >= 0, it is the precomputed hash code.
+ * Otherwise, compute the hash code.
+ */
+static ASSERTION_HASHNODE *
+assertion_lookup (name, len, hash)
+ U_CHAR *name;
+ int len;
+ int hash;
+{
+ register ASSERTION_HASHNODE *bucket;
+
+ bucket = assertion_hashtab[hash];
+ while (bucket) {
+ if (bucket->length == len && strncmp (bucket->name, name, len) == 0)
+ return bucket;
+ bucket = bucket->next;
+ }
+ return NULL;
+}
+
+static void
+delete_assertion (hp)
+ ASSERTION_HASHNODE *hp;
+{
+
+ if (hp->prev != NULL)
+ hp->prev->next = hp->next;
+ if (hp->next != NULL)
+ hp->next->prev = hp->prev;
+
+ /* make sure that the bucket chain header that
+ the deleted guy was on points to the right thing afterwards. */
+ if (hp == *hp->bucket_hdr)
+ *hp->bucket_hdr = hp->next;
+
+ free (hp);
+}
+
+/*
+ * interpret #line command. Remembers previously seen fnames
+ * in its very own hash table.
+ */
+#define FNAME_HASHSIZE 37
+
+static int
+do_line (buf, limit, op, keyword)
+ U_CHAR *buf, *limit;
+ FILE_BUF *op;
+ struct directive *keyword;
+{
+ register U_CHAR *bp;
+ FILE_BUF *ip = &instack[indepth];
+ FILE_BUF tem;
+ int new_lineno;
+ enum file_change_code file_change = same_file;
+
+ /* Expand any macros. */
+ tem = expand_to_temp_buffer (buf, limit, 0, 0);
+
+ /* Point to macroexpanded line, which is null-terminated now. */
+ bp = tem.buf;
+ SKIP_WHITE_SPACE (bp);
+
+ if (!isdigit (*bp)) {
+ error ("invalid format `#line' command");
+ return 0;
+ }
+
+ /* The Newline at the end of this line remains to be processed.
+ To put the next line at the specified line number,
+ we must store a line number now that is one less. */
+ new_lineno = atoi (bp) - 1;
+
+ /* NEW_LINENO is one less than the actual line number here. */
+ if (pedantic && new_lineno < 0)
+ pedwarn ("line number out of range in `#line' command");
+
+ /* skip over the line number. */
+ while (isdigit (*bp))
+ bp++;
+
+#if 0 /* #line 10"foo.c" is supposed to be allowed. */
+ if (*bp && !is_space[*bp]) {
+ error ("invalid format `#line' command");
+ return;
+ }
+#endif
+
+ SKIP_WHITE_SPACE (bp);
+
+ if (*bp == '\"') {
+ static HASHNODE *fname_table[FNAME_HASHSIZE];
+ HASHNODE *hp, **hash_bucket;
+ U_CHAR *fname, *p;
+ int fname_length;
+
+ fname = ++bp;
+
+ /* Turn the file name, which is a character string literal,
+ into a null-terminated string. Do this in place. */
+ p = bp;
+ for (;;)
+ switch ((*p++ = *bp++)) {
+ case '\0':
+ error ("invalid format `#line' command");
+ return 0;
+
+ case '\\':
+ {
+ char *bpc = (char *) bp;
+ int c = parse_escape (&bpc);
+ bp = (U_CHAR *) bpc;
+ if (c < 0)
+ p--;
+ else
+ p[-1] = c;
+ }
+ break;
+
+ case '\"':
+ p[-1] = 0;
+ goto fname_done;
+ }
+ fname_done:
+ fname_length = p - fname;
+
+ SKIP_WHITE_SPACE (bp);
+ if (*bp) {
+ if (pedantic)
+ pedwarn ("garbage at end of `#line' command");
+ if (*bp == '1')
+ file_change = enter_file;
+ else if (*bp == '2')
+ file_change = leave_file;
+ else if (*bp == '3')
+ ip->system_header_p = 1;
+ else if (*bp == '4')
+ ip->system_header_p = 2;
+ else {
+ error ("invalid format `#line' command");
+ return 0;
+ }
+
+ bp++;
+ SKIP_WHITE_SPACE (bp);
+ if (*bp == '3') {
+ ip->system_header_p = 1;
+ bp++;
+ SKIP_WHITE_SPACE (bp);
+ }
+ if (*bp == '4') {
+ ip->system_header_p = 2;
+ bp++;
+ SKIP_WHITE_SPACE (bp);
+ }
+ if (*bp) {
+ error ("invalid format `#line' command");
+ return 0;
+ }
+ }
+
+ hash_bucket =
+ &fname_table[hashf (fname, fname_length, FNAME_HASHSIZE)];
+ for (hp = *hash_bucket; hp != NULL; hp = hp->next)
+ if (hp->length == fname_length &&
+ strncmp (hp->value.cpval, fname, fname_length) == 0) {
+ ip->nominal_fname = hp->value.cpval;
+ break;
+ }
+ if (hp == 0) {
+ /* Didn't find it; cons up a new one. */
+ hp = (HASHNODE *) xcalloc (1, sizeof (HASHNODE) + fname_length + 1);
+ hp->next = *hash_bucket;
+ *hash_bucket = hp;
+
+ hp->length = fname_length;
+ ip->nominal_fname = hp->value.cpval = ((char *) hp) + sizeof (HASHNODE);
+ bcopy (fname, hp->value.cpval, fname_length);
+ }
+ } else if (*bp) {
+ error ("invalid format `#line' command");
+ return 0;
+ }
+
+ ip->lineno = new_lineno;
+ output_line_command (ip, op, 0, file_change);
+ check_expand (op, ip->length - (ip->bufp - ip->buf));
+ return 0;
+}
+
+/*
+ * remove the definition of a symbol from the symbol table.
+ * according to un*x /lib/cpp, it is not an error to undef
+ * something that has no definitions, so it isn't one here either.
+ */
+
+static int
+do_undef (buf, limit, op, keyword)
+ U_CHAR *buf, *limit;
+ FILE_BUF *op;
+ struct directive *keyword;
+{
+ int sym_length;
+ HASHNODE *hp;
+ U_CHAR *orig_buf = buf;
+
+ /* If this is a precompiler run (with -pcp) pass thru #undef commands. */
+ if (pcp_outfile && op)
+ pass_thru_directive (buf, limit, op, keyword);
+
+ SKIP_WHITE_SPACE (buf);
+ sym_length = check_macro_name (buf, "macro");
+
+ while ((hp = lookup (buf, sym_length, -1)) != NULL) {
+ /* If we are generating additional info for debugging (with -g) we
+ need to pass through all effective #undef commands. */
+ if (debug_output && op)
+ pass_thru_directive (orig_buf, limit, op, keyword);
+ if (hp->type != T_MACRO)
+ warning ("undefining `%s'", hp->name);
+ delete_macro (hp);
+ }
+
+ if (pedantic) {
+ buf += sym_length;
+ SKIP_WHITE_SPACE (buf);
+ if (buf != limit)
+ pedwarn ("garbage after `#undef' directive");
+ }
+ return 0;
+}
+
+/*
+ * Report an error detected by the program we are processing.
+ * Use the text of the line in the error message.
+ * (We use error because it prints the filename & line#.)
+ */
+
+static int
+do_error (buf, limit, op, keyword)
+ U_CHAR *buf, *limit;
+ FILE_BUF *op;
+ struct directive *keyword;
+{
+ int length = limit - buf;
+ U_CHAR *copy = (U_CHAR *) xmalloc (length + 1);
+ bcopy ((char *) buf, (char *) copy, length);
+ copy[length] = 0;
+ SKIP_WHITE_SPACE (copy);
+ error ("#error %s", copy);
+ return 0;
+}
+
+/*
+ * Report a warning detected by the program we are processing.
+ * Use the text of the line in the warning message, then continue.
+ * (We use error because it prints the filename & line#.)
+ */
+
+static int
+do_warning (buf, limit, op, keyword)
+ U_CHAR *buf, *limit;
+ FILE_BUF *op;
+ struct directive *keyword;
+{
+ int length = limit - buf;
+ U_CHAR *copy = (U_CHAR *) xmalloc (length + 1);
+ bcopy ((char *) buf, (char *) copy, length);
+ copy[length] = 0;
+ SKIP_WHITE_SPACE (copy);
+ warning ("#warning %s", copy);
+ return 0;
+}
+
+/* Remember the name of the current file being read from so that we can
+ avoid ever including it again. */
+
+static int
+do_once ()
+{
+ int i;
+ FILE_BUF *ip = NULL;
+
+ for (i = indepth; i >= 0; i--)
+ if (instack[i].fname != NULL) {
+ ip = &instack[i];
+ break;
+ }
+
+ if (ip != NULL) {
+ struct file_name_list *new;
+
+ new = (struct file_name_list *) xmalloc (sizeof (struct file_name_list));
+ new->next = dont_repeat_files;
+ dont_repeat_files = new;
+ new->fname = savestring (ip->fname);
+ new->control_macro = 0;
+ new->got_name_map = 0;
+ new->c_system_include_path = 0;
+ }
+ return 0;
+}
+
+/* #ident has already been copied to the output file, so just ignore it. */
+
+static int
+do_ident (buf, limit)
+ U_CHAR *buf, *limit;
+{
+ FILE_BUF trybuf;
+ int len;
+ FILE_BUF *op = &outbuf;
+
+ /* Allow #ident in system headers, since that's not user's fault. */
+ if (pedantic && !instack[indepth].system_header_p)
+ pedwarn ("ANSI C does not allow `#ident'");
+
+ trybuf = expand_to_temp_buffer (buf, limit, 0, 0);
+ buf = (U_CHAR *) alloca (trybuf.bufp - trybuf.buf + 1);
+ bcopy ((char *) trybuf.buf, (char *) buf, trybuf.bufp - trybuf.buf);
+ limit = buf + (trybuf.bufp - trybuf.buf);
+ len = (limit - buf);
+ free (trybuf.buf);
+
+ /* Output directive name. */
+ check_expand (op, 8);
+ bcopy ("#ident ", (char *) op->bufp, 7);
+ op->bufp += 7;
+
+ /* Output the expanded argument line. */
+ check_expand (op, len);
+ bcopy ((char *) buf, (char *) op->bufp, len);
+ op->bufp += len;
+
+ return 0;
+}
+
+/* #pragma and its argument line have already been copied to the output file.
+ Just check for some recognized pragmas that need validation here. */
+
+static int
+do_pragma (buf, limit)
+ U_CHAR *buf, *limit;
+{
+ while (*buf == ' ' || *buf == '\t')
+ buf++;
+ if (!strncmp (buf, "once", 4)) {
+ /* Allow #pragma once in system headers, since that's not the user's
+ fault. */
+ if (!instack[indepth].system_header_p)
+ warning ("`#pragma once' is obsolete");
+ do_once ();
+ }
+
+ if (!strncmp (buf, "implementation", 14)) {
+ /* Be quiet about `#pragma implementation' for a file only if it hasn't
+ been included yet. */
+ struct file_name_list *ptr;
+ U_CHAR *p = buf + 14, *fname, *inc_fname;
+ SKIP_WHITE_SPACE (p);
+ if (*p == '\n' || *p != '\"')
+ return 0;
+
+ fname = p + 1;
+ if (p = (U_CHAR *) index (fname, '\"'))
+ *p = '\0';
+
+ for (ptr = all_include_files; ptr; ptr = ptr->next) {
+ inc_fname = (U_CHAR *) rindex (ptr->fname, '/');
+ inc_fname = inc_fname ? inc_fname + 1 : (U_CHAR *) ptr->fname;
+ if (inc_fname && !strcmp (inc_fname, fname))
+ warning ("`#pragma implementation' for `%s' appears after file is included",
+ fname);
+ }
+ }
+
+ return 0;
+}
+
+#if 0
+/* This was a fun hack, but #pragma seems to start to be useful.
+ By failing to recognize it, we pass it through unchanged to cc1. */
+
+/*
+ * the behavior of the #pragma directive is implementation defined.
+ * this implementation defines it as follows.
+ */
+
+static int
+do_pragma ()
+{
+ close (0);
+ if (open ("/dev/tty", O_RDONLY, 0666) != 0)
+ goto nope;
+ close (1);
+ if (open ("/dev/tty", O_WRONLY, 0666) != 1)
+ goto nope;
+ execl ("/usr/games/hack", "#pragma", 0);
+ execl ("/usr/games/rogue", "#pragma", 0);
+ execl ("/usr/new/emacs", "-f", "hanoi", "9", "-kill", 0);
+ execl ("/usr/local/emacs", "-f", "hanoi", "9", "-kill", 0);
+nope:
+ fatal ("You are in a maze of twisty compiler features, all different");
+}
+#endif
+
+/* Just ignore #sccs, on systems where we define it at all. */
+
+static int
+do_sccs ()
+{
+ if (pedantic)
+ pedwarn ("ANSI C does not allow `#sccs'");
+ return 0;
+}
+
+/*
+ * handle #if command by
+ * 1) inserting special `defined' keyword into the hash table
+ * that gets turned into 0 or 1 by special_symbol (thus,
+ * if the luser has a symbol called `defined' already, it won't
+ * work inside the #if command)
+ * 2) rescan the input into a temporary output buffer
+ * 3) pass the output buffer to the yacc parser and collect a value
+ * 4) clean up the mess left from steps 1 and 2.
+ * 5) call conditional_skip to skip til the next #endif (etc.),
+ * or not, depending on the value from step 3.
+ */
+
+static int
+do_if (buf, limit, op, keyword)
+ U_CHAR *buf, *limit;
+ FILE_BUF *op;
+ struct directive *keyword;
+{
+ int value;
+ FILE_BUF *ip = &instack[indepth];
+
+ value = eval_if_expression (buf, limit - buf);
+ conditional_skip (ip, value == 0, T_IF, NULL_PTR);
+ return 0;
+}
+
+/*
+ * handle a #elif directive by not changing if_stack either.
+ * see the comment above do_else.
+ */
+
+static int
+do_elif (buf, limit, op, keyword)
+ U_CHAR *buf, *limit;
+ FILE_BUF *op;
+ struct directive *keyword;
+{
+ int value;
+ FILE_BUF *ip = &instack[indepth];
+
+ if (if_stack == instack[indepth].if_stack) {
+ error ("`#elif' not within a conditional");
+ return 0;
+ } else {
+ if (if_stack->type != T_IF && if_stack->type != T_ELIF) {
+ error ("`#elif' after `#else'");
+ fprintf (stderr, " (matches line %d", if_stack->lineno);
+ if (if_stack->fname != NULL && ip->fname != NULL &&
+ strcmp (if_stack->fname, ip->nominal_fname) != 0)
+ fprintf (stderr, ", file %s", if_stack->fname);
+ fprintf (stderr, ")\n");
+ }
+ if_stack->type = T_ELIF;
+ }
+
+ if (if_stack->if_succeeded)
+ skip_if_group (ip, 0);
+ else {
+ value = eval_if_expression (buf, limit - buf);
+ if (value == 0)
+ skip_if_group (ip, 0);
+ else {
+ ++if_stack->if_succeeded; /* continue processing input */
+ output_line_command (ip, op, 1, same_file);
+ }
+ }
+ return 0;
+}
+
+/*
+ * evaluate a #if expression in BUF, of length LENGTH,
+ * then parse the result as a C expression and return the value as an int.
+ */
+static int
+eval_if_expression (buf, length)
+ U_CHAR *buf;
+ int length;
+{
+ FILE_BUF temp_obuf;
+ HASHNODE *save_defined;
+ int value;
+
+ save_defined = install ("defined", -1, T_SPEC_DEFINED, 0, 0, -1);
+ pcp_inside_if = 1;
+ temp_obuf = expand_to_temp_buffer (buf, buf + length, 0, 1);
+ pcp_inside_if = 0;
+ delete_macro (save_defined); /* clean up special symbol */
+
+ value = parse_c_expression (temp_obuf.buf);
+
+ free (temp_obuf.buf);
+
+ return value;
+}
+
+/*
+ * routine to handle ifdef/ifndef. Try to look up the symbol,
+ * then do or don't skip to the #endif/#else/#elif depending
+ * on what directive is actually being processed.
+ */
+
+static int
+do_xifdef (buf, limit, op, keyword)
+ U_CHAR *buf, *limit;
+ FILE_BUF *op;
+ struct directive *keyword;
+{
+ int skip;
+ FILE_BUF *ip = &instack[indepth];
+ U_CHAR *end;
+ int start_of_file = 0;
+ U_CHAR *control_macro = 0;
+
+ /* Detect a #ifndef at start of file (not counting comments). */
+ if (ip->fname != 0 && keyword->type == T_IFNDEF) {
+ U_CHAR *p = ip->buf;
+ while (p != directive_start) {
+ U_CHAR c = *p++;
+ if (is_space[c])
+ ;
+ else if (c == '/' && p != ip->bufp && *p == '*') {
+ /* Skip this comment. */
+ int junk = 0;
+ U_CHAR *save_bufp = ip->bufp;
+ ip->bufp = p + 1;
+ p = skip_to_end_of_comment (ip, &junk, 1);
+ ip->bufp = save_bufp;
+ } else {
+ goto fail;
+ }
+ }
+ /* If we get here, this conditional is the beginning of the file. */
+ start_of_file = 1;
+ fail: ;
+ }
+
+ /* Discard leading and trailing whitespace. */
+ SKIP_WHITE_SPACE (buf);
+ while (limit != buf && is_hor_space[limit[-1]]) limit--;
+
+ /* Find the end of the identifier at the beginning. */
+ for (end = buf; is_idchar[*end]; end++);
+
+ if (end == buf) {
+ skip = (keyword->type == T_IFDEF);
+ if (! traditional)
+ pedwarn (end == limit ? "`#%s' with no argument"
+ : "`#%s' argument starts with punctuation",
+ keyword->name);
+ } else {
+ HASHNODE *hp;
+
+ if (pedantic && buf[0] >= '0' && buf[0] <= '9')
+ pedwarn ("`#%s' argument starts with a digit", keyword->name);
+ else if (end != limit && !traditional)
+ pedwarn ("garbage at end of `#%s' argument", keyword->name);
+
+ hp = lookup (buf, end-buf, -1);
+
+ if (pcp_outfile) {
+ /* Output a precondition for this macro. */
+ if (hp && hp->value.defn->predefined)
+ fprintf (pcp_outfile, "#define %s\n", hp->name);
+ else {
+ U_CHAR *cp = buf;
+ fprintf (pcp_outfile, "#undef ");
+ while (is_idchar[*cp]) /* Ick! */
+ fputc (*cp++, pcp_outfile);
+ putc ('\n', pcp_outfile);
+ }
+ }
+
+ skip = (hp == NULL) ^ (keyword->type == T_IFNDEF);
+ if (start_of_file && !skip) {
+ control_macro = (U_CHAR *) xmalloc (end - buf + 1);
+ bcopy ((char *) buf, (char *) control_macro, end - buf);
+ control_macro[end - buf] = 0;
+ }
+ }
+
+ conditional_skip (ip, skip, T_IF, control_macro);
+ return 0;
+}
+
+/* Push TYPE on stack; then, if SKIP is nonzero, skip ahead.
+ If this is a #ifndef starting at the beginning of a file,
+ CONTROL_MACRO is the macro name tested by the #ifndef.
+ Otherwise, CONTROL_MACRO is 0. */
+
+static void
+conditional_skip (ip, skip, type, control_macro)
+ FILE_BUF *ip;
+ int skip;
+ enum node_type type;
+ U_CHAR *control_macro;
+{
+ IF_STACK_FRAME *temp;
+
+ temp = (IF_STACK_FRAME *) xcalloc (1, sizeof (IF_STACK_FRAME));
+ temp->fname = ip->nominal_fname;
+ temp->lineno = ip->lineno;
+ temp->next = if_stack;
+ temp->control_macro = control_macro;
+ if_stack = temp;
+
+ if_stack->type = type;
+
+ if (skip != 0) {
+ skip_if_group (ip, 0);
+ return;
+ } else {
+ ++if_stack->if_succeeded;
+ output_line_command (ip, &outbuf, 1, same_file);
+ }
+}
+
+/*
+ * skip to #endif, #else, or #elif. adjust line numbers, etc.
+ * leaves input ptr at the sharp sign found.
+ * If ANY is nonzero, return at next directive of any sort.
+ */
+static void
+skip_if_group (ip, any)
+ FILE_BUF *ip;
+ int any;
+{
+ register U_CHAR *bp = ip->bufp, *cp;
+ register U_CHAR *endb = ip->buf + ip->length;
+ struct directive *kt;
+ IF_STACK_FRAME *save_if_stack = if_stack; /* don't pop past here */
+ U_CHAR *beg_of_line = bp;
+ register int ident_length;
+ U_CHAR *ident, *after_ident;
+
+ while (bp < endb) {
+ switch (*bp++) {
+ case '/': /* possible comment */
+ if (*bp == '\\' && bp[1] == '\n')
+ newline_fix (bp);
+ if (*bp == '*'
+ || (cplusplus_comments && *bp == '/')) {
+ ip->bufp = ++bp;
+ bp = skip_to_end_of_comment (ip, &ip->lineno, 0);
+ }
+ break;
+ case '\"':
+ case '\'':
+ bp = skip_quoted_string (bp - 1, endb, ip->lineno, &ip->lineno,
+ NULL_PTR, NULL_PTR);
+ break;
+ case '\\':
+ /* Char after backslash loses its special meaning. */
+ if (bp < endb) {
+ if (*bp == '\n')
+ ++ip->lineno; /* But do update the line-count. */
+ bp++;
+ }
+ break;
+ case '\n':
+ ++ip->lineno;
+ beg_of_line = bp;
+ break;
+ case '#':
+ ip->bufp = bp - 1;
+
+ /* # keyword: a # must be first nonblank char on the line */
+ if (beg_of_line == 0)
+ break;
+ /* Scan from start of line, skipping whitespace, comments
+ and backslash-newlines, and see if we reach this #.
+ If not, this # is not special. */
+ bp = beg_of_line;
+ /* If -traditional, require # to be at beginning of line. */
+ if (!traditional)
+ while (1) {
+ if (is_hor_space[*bp])
+ bp++;
+ else if (*bp == '\\' && bp[1] == '\n')
+ bp += 2;
+ else if (*bp == '/' && bp[1] == '*') {
+ bp += 2;
+ while (!(*bp == '*' && bp[1] == '/'))
+ bp++;
+ bp += 2;
+ }
+ /* There is no point in trying to deal with C++ // comments here,
+ because if there is one, then this # must be part of the
+ comment and we would never reach here. */
+ else break;
+ }
+ if (bp != ip->bufp) {
+ bp = ip->bufp + 1; /* Reset bp to after the #. */
+ break;
+ }
+
+ bp = ip->bufp + 1; /* Point after the '#' */
+
+ /* Skip whitespace and \-newline. */
+ while (1) {
+ if (is_hor_space[*bp])
+ bp++;
+ else if (*bp == '\\' && bp[1] == '\n')
+ bp += 2;
+ else if (*bp == '/' && bp[1] == '*') {
+ bp += 2;
+ while (!(*bp == '*' && bp[1] == '/')) {
+ if (*bp == '\n')
+ ip->lineno++;
+ bp++;
+ }
+ bp += 2;
+ } else if (cplusplus_comments && *bp == '/' && bp[1] == '/') {
+ bp += 2;
+ while (bp[-1] == '\\' || *bp != '\n') {
+ if (*bp == '\n')
+ ip->lineno++;
+ bp++;
+ }
+ }
+ else break;
+ }
+
+ cp = bp;
+
+ /* Now find end of directive name.
+ If we encounter a backslash-newline, exchange it with any following
+ symbol-constituents so that we end up with a contiguous name. */
+
+ while (1) {
+ if (is_idchar[*bp])
+ bp++;
+ else {
+ if (*bp == '\\' && bp[1] == '\n')
+ name_newline_fix (bp);
+ if (is_idchar[*bp])
+ bp++;
+ else break;
+ }
+ }
+ ident_length = bp - cp;
+ ident = cp;
+ after_ident = bp;
+
+ /* A line of just `#' becomes blank. */
+
+ if (ident_length == 0 && *after_ident == '\n') {
+ continue;
+ }
+
+ if (ident_length == 0 || !is_idstart[*ident]) {
+ U_CHAR *p = ident;
+ while (is_idchar[*p]) {
+ if (*p < '0' || *p > '9')
+ break;
+ p++;
+ }
+ /* Handle # followed by a line number. */
+ if (p != ident && !is_idchar[*p]) {
+ if (pedantic)
+ pedwarn ("`#' followed by integer");
+ continue;
+ }
+
+ /* Avoid error for `###' and similar cases unless -pedantic. */
+ if (p == ident) {
+ while (*p == '#' || is_hor_space[*p]) p++;
+ if (*p == '\n') {
+ if (pedantic && !lang_asm)
+ pedwarn ("invalid preprocessor directive");
+ continue;
+ }
+ }
+
+ if (!lang_asm && pedantic)
+ pedwarn ("invalid preprocessor directive name");
+ continue;
+ }
+
+ for (kt = directive_table; kt->length >= 0; kt++) {
+ IF_STACK_FRAME *temp;
+ if (ident_length == kt->length
+ && strncmp (cp, kt->name, kt->length) == 0) {
+ /* If we are asked to return on next directive, do so now. */
+ if (any)
+ return;
+
+ switch (kt->type) {
+ case T_IF:
+ case T_IFDEF:
+ case T_IFNDEF:
+ temp = (IF_STACK_FRAME *) xcalloc (1, sizeof (IF_STACK_FRAME));
+ temp->next = if_stack;
+ if_stack = temp;
+ temp->lineno = ip->lineno;
+ temp->fname = ip->nominal_fname;
+ temp->type = kt->type;
+ break;
+ case T_ELSE:
+ case T_ENDIF:
+ if (pedantic && if_stack != save_if_stack)
+ validate_else (bp);
+ case T_ELIF:
+ if (if_stack == instack[indepth].if_stack) {
+ error ("`#%s' not within a conditional", kt->name);
+ break;
+ }
+ else if (if_stack == save_if_stack)
+ return; /* found what we came for */
+
+ if (kt->type != T_ENDIF) {
+ if (if_stack->type == T_ELSE)
+ error ("`#else' or `#elif' after `#else'");
+ if_stack->type = kt->type;
+ break;
+ }
+
+ temp = if_stack;
+ if_stack = if_stack->next;
+ free (temp);
+ break;
+ }
+ break;
+ }
+ }
+ /* Don't let erroneous code go by. */
+ if (kt->length < 0 && !lang_asm && pedantic)
+ pedwarn ("invalid preprocessor directive name");
+ }
+ }
+ ip->bufp = bp;
+ /* after this returns, rescan will exit because ip->bufp
+ now points to the end of the buffer.
+ rescan is responsible for the error message also. */
+}
+
+/*
+ * handle a #else directive. Do this by just continuing processing
+ * without changing if_stack ; this is so that the error message
+ * for missing #endif's etc. will point to the original #if. It
+ * is possible that something different would be better.
+ */
+
+static int
+do_else (buf, limit, op, keyword)
+ U_CHAR *buf, *limit;
+ FILE_BUF *op;
+ struct directive *keyword;
+{
+ FILE_BUF *ip = &instack[indepth];
+
+ if (pedantic) {
+ SKIP_WHITE_SPACE (buf);
+ if (buf != limit)
+ pedwarn ("text following `#else' violates ANSI standard");
+ }
+
+ if (if_stack == instack[indepth].if_stack) {
+ error ("`#else' not within a conditional");
+ return 0;
+ } else {
+ /* #ifndef can't have its special treatment for containing the whole file
+ if it has a #else clause. */
+ if_stack->control_macro = 0;
+
+ if (if_stack->type != T_IF && if_stack->type != T_ELIF) {
+ error ("`#else' after `#else'");
+ fprintf (stderr, " (matches line %d", if_stack->lineno);
+ if (strcmp (if_stack->fname, ip->nominal_fname) != 0)
+ fprintf (stderr, ", file %s", if_stack->fname);
+ fprintf (stderr, ")\n");
+ }
+ if_stack->type = T_ELSE;
+ }
+
+ if (if_stack->if_succeeded)
+ skip_if_group (ip, 0);
+ else {
+ ++if_stack->if_succeeded; /* continue processing input */
+ output_line_command (ip, op, 1, same_file);
+ }
+ return 0;
+}
+
+/*
+ * unstack after #endif command
+ */
+
+static int
+do_endif (buf, limit, op, keyword)
+ U_CHAR *buf, *limit;
+ FILE_BUF *op;
+ struct directive *keyword;
+{
+ if (pedantic) {
+ SKIP_WHITE_SPACE (buf);
+ if (buf != limit)
+ pedwarn ("text following `#endif' violates ANSI standard");
+ }
+
+ if (if_stack == instack[indepth].if_stack)
+ error ("unbalanced `#endif'");
+ else {
+ IF_STACK_FRAME *temp = if_stack;
+ if_stack = if_stack->next;
+ if (temp->control_macro != 0) {
+ /* This #endif matched a #ifndef at the start of the file.
+ See if it is at the end of the file. */
+ FILE_BUF *ip = &instack[indepth];
+ U_CHAR *p = ip->bufp;
+ U_CHAR *ep = ip->buf + ip->length;
+
+ while (p != ep) {
+ U_CHAR c = *p++;
+ switch (c) {
+ case ' ':
+ case '\t':
+ case '\n':
+ break;
+ case '/':
+ if (p != ep && *p == '*') {
+ /* Skip this comment. */
+ int junk = 0;
+ U_CHAR *save_bufp = ip->bufp;
+ ip->bufp = p + 1;
+ p = skip_to_end_of_comment (ip, &junk, 1);
+ ip->bufp = save_bufp;
+ }
+ break;
+ default:
+ goto fail;
+ }
+ }
+ /* If we get here, this #endif ends a #ifndef
+ that contains all of the file (aside from whitespace).
+ Arrange not to include the file again
+ if the macro that was tested is defined.
+
+ Do not do this for the top-level file in a -include or any
+ file in a -imacros. */
+ if (indepth != 0
+ && ! (indepth == 1 && no_record_file)
+ && ! (no_record_file && no_output))
+ record_control_macro (ip->fname, temp->control_macro);
+ fail: ;
+ }
+ free (temp);
+ output_line_command (&instack[indepth], op, 1, same_file);
+ }
+ return 0;
+}
+
+/* When an #else or #endif is found while skipping failed conditional,
+ if -pedantic was specified, this is called to warn about text after
+ the command name. P points to the first char after the command name. */
+
+static void
+validate_else (p)
+ register U_CHAR *p;
+{
+ /* Advance P over whitespace and comments. */
+ while (1) {
+ if (*p == '\\' && p[1] == '\n')
+ p += 2;
+ if (is_hor_space[*p])
+ p++;
+ else if (*p == '/') {
+ if (p[1] == '\\' && p[2] == '\n')
+ newline_fix (p + 1);
+ if (p[1] == '*') {
+ p += 2;
+ /* Don't bother warning about unterminated comments
+ since that will happen later. Just be sure to exit. */
+ while (*p) {
+ if (p[1] == '\\' && p[2] == '\n')
+ newline_fix (p + 1);
+ if (*p == '*' && p[1] == '/') {
+ p += 2;
+ break;
+ }
+ p++;
+ }
+ }
+ else if (cplusplus_comments && p[1] == '/') {
+ p += 2;
+ while (*p && (*p != '\n' || p[-1] == '\\'))
+ p++;
+ }
+ } else break;
+ }
+ if (*p && *p != '\n')
+ pedwarn ("text following `#else' or `#endif' violates ANSI standard");
+}
+
+/* Skip a comment, assuming the input ptr immediately follows the
+ initial slash-star. Bump *LINE_COUNTER for each newline.
+ (The canonical line counter is &ip->lineno.)
+ Don't use this routine (or the next one) if bumping the line
+ counter is not sufficient to deal with newlines in the string.
+
+ If NOWARN is nonzero, don't warn about slash-star inside a comment.
+ This feature is useful when processing a comment that is going to be
+ processed or was processed at another point in the preprocessor,
+ to avoid a duplicate warning. Likewise for unterminated comment errors. */
+
+static U_CHAR *
+skip_to_end_of_comment (ip, line_counter, nowarn)
+ register FILE_BUF *ip;
+ int *line_counter; /* place to remember newlines, or NULL */
+ int nowarn;
+{
+ register U_CHAR *limit = ip->buf + ip->length;
+ register U_CHAR *bp = ip->bufp;
+ FILE_BUF *op = &outbuf; /* JF */
+ int output = put_out_comments && !line_counter;
+ int start_line = line_counter ? *line_counter : 0;
+
+ /* JF this line_counter stuff is a crock to make sure the
+ comment is only put out once, no matter how many times
+ the comment is skipped. It almost works */
+ if (output) {
+ *op->bufp++ = '/';
+ *op->bufp++ = '*';
+ }
+ if (cplusplus_comments && bp[-1] == '/') {
+ if (output) {
+ while (bp < limit) {
+ *op->bufp++ = *bp;
+ if (*bp == '\n' && bp[-1] != '\\')
+ break;
+ if (*bp == '\n') {
+ ++*line_counter;
+ ++op->lineno;
+ }
+ bp++;
+ }
+ op->bufp[-1] = '*';
+ *op->bufp++ = '/';
+ *op->bufp++ = '\n';
+ } else {
+ while (bp < limit) {
+ if (bp[-1] != '\\' && *bp == '\n') {
+ break;
+ } else {
+ if (*bp == '\n' && line_counter)
+ ++*line_counter;
+ bp++;
+ }
+ }
+ }
+ ip->bufp = bp;
+ return bp;
+ }
+ while (bp < limit) {
+ if (output)
+ *op->bufp++ = *bp;
+ switch (*bp++) {
+ case '/':
+ if (warn_comments && !nowarn && bp < limit && *bp == '*')
+ warning ("`/*' within comment");
+ break;
+ case '\n':
+ /* If this is the end of the file, we have an unterminated comment.
+ Don't swallow the newline. We are guaranteed that there will be a
+ trailing newline and various pieces assume it's there. */
+ if (bp == limit)
+ {
+ --bp;
+ --limit;
+ break;
+ }
+ if (line_counter != NULL)
+ ++*line_counter;
+ if (output)
+ ++op->lineno;
+ break;
+ case '*':
+ if (*bp == '\\' && bp[1] == '\n')
+ newline_fix (bp);
+ if (*bp == '/') {
+ if (output)
+ *op->bufp++ = '/';
+ ip->bufp = ++bp;
+ return bp;
+ }
+ break;
+ }
+ }
+
+ if (!nowarn)
+ error_with_line (line_for_error (start_line), "unterminated comment");
+ ip->bufp = bp;
+ return bp;
+}
+
+/*
+ * Skip over a quoted string. BP points to the opening quote.
+ * Returns a pointer after the closing quote. Don't go past LIMIT.
+ * START_LINE is the line number of the starting point (but it need
+ * not be valid if the starting point is inside a macro expansion).
+ *
+ * The input stack state is not changed.
+ *
+ * If COUNT_NEWLINES is nonzero, it points to an int to increment
+ * for each newline passed.
+ *
+ * If BACKSLASH_NEWLINES_P is nonzero, store 1 thru it
+ * if we pass a backslash-newline.
+ *
+ * If EOFP is nonzero, set *EOFP to 1 if the string is unterminated.
+ */
+static U_CHAR *
+skip_quoted_string (bp, limit, start_line, count_newlines, backslash_newlines_p, eofp)
+ register U_CHAR *bp;
+ register U_CHAR *limit;
+ int start_line;
+ int *count_newlines;
+ int *backslash_newlines_p;
+ int *eofp;
+{
+ register U_CHAR c, match;
+
+ match = *bp++;
+ while (1) {
+ if (bp >= limit) {
+ error_with_line (line_for_error (start_line),
+ "unterminated string or character constant");
+ error_with_line (multiline_string_line,
+ "possible real start of unterminated constant");
+ multiline_string_line = 0;
+ if (eofp)
+ *eofp = 1;
+ break;
+ }
+ c = *bp++;
+ if (c == '\\') {
+ while (*bp == '\\' && bp[1] == '\n') {
+ if (backslash_newlines_p)
+ *backslash_newlines_p = 1;
+ if (count_newlines)
+ ++*count_newlines;
+ bp += 2;
+ }
+ if (*bp == '\n' && count_newlines) {
+ if (backslash_newlines_p)
+ *backslash_newlines_p = 1;
+ ++*count_newlines;
+ }
+ bp++;
+ } else if (c == '\n') {
+ if (traditional) {
+ /* Unterminated strings and character constants are 'legal'. */
+ bp--; /* Don't consume the newline. */
+ if (eofp)
+ *eofp = 1;
+ break;
+ }
+ if (pedantic || match == '\'') {
+ error_with_line (line_for_error (start_line),
+ "unterminated string or character constant");
+ bp--;
+ if (eofp)
+ *eofp = 1;
+ break;
+ }
+ /* If not traditional, then allow newlines inside strings. */
+ if (count_newlines)
+ ++*count_newlines;
+ if (multiline_string_line == 0)
+ multiline_string_line = start_line;
+ } else if (c == match)
+ break;
+ }
+ return bp;
+}
+
+/* Place into DST a quoted string representing the string SRC.
+ Return the address of DST's terminating null. */
+static char *
+quote_string (dst, src)
+ char *dst, *src;
+{
+ U_CHAR c;
+
+ *dst++ = '\"';
+ for (;;)
+ switch ((c = *src++))
+ {
+ default:
+ if (isprint (c))
+ *dst++ = c;
+ else
+ {
+ sprintf (dst, "\\%03o", c);
+ dst += 4;
+ }
+ break;
+
+ case '\"':
+ case '\\':
+ *dst++ = '\\';
+ *dst++ = c;
+ break;
+
+ case '\0':
+ *dst++ = '\"';
+ *dst = '\0';
+ return dst;
+ }
+}
+
+/* Skip across a group of balanced parens, starting from IP->bufp.
+ IP->bufp is updated. Use this with IP->bufp pointing at an open-paren.
+
+ This does not handle newlines, because it's used for the arg of #if,
+ where there aren't any newlines. Also, backslash-newline can't appear. */
+
+static U_CHAR *
+skip_paren_group (ip)
+ register FILE_BUF *ip;
+{
+ U_CHAR *limit = ip->buf + ip->length;
+ U_CHAR *p = ip->bufp;
+ int depth = 0;
+ int lines_dummy = 0;
+
+ while (p != limit) {
+ int c = *p++;
+ switch (c) {
+ case '(':
+ depth++;
+ break;
+
+ case ')':
+ depth--;
+ if (depth == 0)
+ return ip->bufp = p;
+ break;
+
+ case '/':
+ if (*p == '*') {
+ ip->bufp = p;
+ p = skip_to_end_of_comment (ip, &lines_dummy, 0);
+ p = ip->bufp;
+ }
+
+ case '"':
+ case '\'':
+ {
+ int eofp = 0;
+ p = skip_quoted_string (p - 1, limit, 0, NULL_PTR, NULL_PTR, &eofp);
+ if (eofp)
+ return ip->bufp = p;
+ }
+ break;
+ }
+ }
+
+ ip->bufp = p;
+ return p;
+}
+
+/*
+ * write out a #line command, for instance, after an #include file.
+ * If CONDITIONAL is nonzero, we can omit the #line if it would
+ * appear to be a no-op, and we can output a few newlines instead
+ * if we want to increase the line number by a small amount.
+ * FILE_CHANGE says whether we are entering a file, leaving, or neither.
+ */
+
+static void
+output_line_command (ip, op, conditional, file_change)
+ FILE_BUF *ip, *op;
+ int conditional;
+ enum file_change_code file_change;
+{
+ int len;
+ char *line_cmd_buf, *line_end;
+
+ if (no_line_commands
+ || ip->fname == NULL
+ || no_output) {
+ op->lineno = ip->lineno;
+ return;
+ }
+
+ if (conditional) {
+ if (ip->lineno == op->lineno)
+ return;
+
+ /* If the inherited line number is a little too small,
+ output some newlines instead of a #line command. */
+ if (ip->lineno > op->lineno && ip->lineno < op->lineno + 8) {
+ check_expand (op, 10);
+ while (ip->lineno > op->lineno) {
+ *op->bufp++ = '\n';
+ op->lineno++;
+ }
+ return;
+ }
+ }
+
+ /* Don't output a line number of 0 if we can help it. */
+ if (ip->lineno == 0 && ip->bufp - ip->buf < ip->length
+ && *ip->bufp == '\n') {
+ ip->lineno++;
+ ip->bufp++;
+ }
+
+ line_cmd_buf = (char *) alloca (4 * strlen (ip->nominal_fname) + 100);
+#ifdef OUTPUT_LINE_COMMANDS
+ sprintf (line_cmd_buf, "#line %d ", ip->lineno);
+#else
+ sprintf (line_cmd_buf, "# %d ", ip->lineno);
+#endif
+ line_end = quote_string (line_cmd_buf + strlen (line_cmd_buf),
+ ip->nominal_fname);
+ if (file_change != same_file) {
+ *line_end++ = ' ';
+ *line_end++ = file_change == enter_file ? '1' : '2';
+ }
+ /* Tell cc1 if following text comes from a system header file. */
+ if (ip->system_header_p) {
+ *line_end++ = ' ';
+ *line_end++ = '3';
+ }
+#ifndef NO_IMPLICIT_EXTERN_C
+ /* Tell cc1plus if following text should be treated as C. */
+ if (ip->system_header_p == 2 && cplusplus) {
+ *line_end++ = ' ';
+ *line_end++ = '4';
+ }
+#endif
+ *line_end++ = '\n';
+ len = line_end - line_cmd_buf;
+ check_expand (op, len + 1);
+ if (op->bufp > op->buf && op->bufp[-1] != '\n')
+ *op->bufp++ = '\n';
+ bcopy ((char *) line_cmd_buf, (char *) op->bufp, len);
+ op->bufp += len;
+ op->lineno = ip->lineno;
+}
+
+/* This structure represents one parsed argument in a macro call.
+ `raw' points to the argument text as written (`raw_length' is its length).
+ `expanded' points to the argument's macro-expansion
+ (its length is `expand_length').
+ `stringified_length' is the length the argument would have
+ if stringified.
+ `use_count' is the number of times this macro arg is substituted
+ into the macro. If the actual use count exceeds 10,
+ the value stored is 10.
+ `free1' and `free2', if nonzero, point to blocks to be freed
+ when the macro argument data is no longer needed. */
+
+struct argdata {
+ U_CHAR *raw, *expanded;
+ int raw_length, expand_length;
+ int stringified_length;
+ U_CHAR *free1, *free2;
+ char newlines;
+ char comments;
+ char use_count;
+};
+
+/* Expand a macro call.
+ HP points to the symbol that is the macro being called.
+ Put the result of expansion onto the input stack
+ so that subsequent input by our caller will use it.
+
+ If macro wants arguments, caller has already verified that
+ an argument list follows; arguments come from the input stack. */
+
+static void
+macroexpand (hp, op)
+ HASHNODE *hp;
+ FILE_BUF *op;
+{
+ int nargs;
+ DEFINITION *defn = hp->value.defn;
+ register U_CHAR *xbuf;
+ int xbuf_len;
+ int start_line = instack[indepth].lineno;
+ int rest_args, rest_zero;
+
+ CHECK_DEPTH (return;);
+
+ /* it might not actually be a macro. */
+ if (hp->type != T_MACRO) {
+ special_symbol (hp, op);
+ return;
+ }
+
+ /* This macro is being used inside a #if, which means it must be */
+ /* recorded as a precondition. */
+ if (pcp_inside_if && pcp_outfile && defn->predefined)
+ dump_single_macro (hp, pcp_outfile);
+
+ nargs = defn->nargs;
+
+ if (nargs >= 0) {
+ register int i;
+ struct argdata *args;
+ char *parse_error = 0;
+
+ args = (struct argdata *) alloca ((nargs + 1) * sizeof (struct argdata));
+
+ for (i = 0; i < nargs; i++) {
+ args[i].raw = (U_CHAR *) "";
+ args[i].expanded = 0;
+ args[i].raw_length = args[i].expand_length
+ = args[i].stringified_length = 0;
+ args[i].free1 = args[i].free2 = 0;
+ args[i].use_count = 0;
+ }
+
+ /* Parse all the macro args that are supplied. I counts them.
+ The first NARGS args are stored in ARGS.
+ The rest are discarded.
+ If rest_args is set then we assume macarg absorbed the rest of the args.
+ */
+ i = 0;
+ rest_args = 0;
+ do {
+ /* Discard the open-parenthesis or comma before the next arg. */
+ ++instack[indepth].bufp;
+ if (rest_args)
+ continue;
+ if (i < nargs || (nargs == 0 && i == 0)) {
+ /* if we are working on last arg which absorbs rest of args... */
+ if (i == nargs - 1 && defn->rest_args)
+ rest_args = 1;
+ parse_error = macarg (&args[i], rest_args);
+ }
+ else
+ parse_error = macarg (NULL_PTR, 0);
+ if (parse_error) {
+ error_with_line (line_for_error (start_line), parse_error);
+ break;
+ }
+ i++;
+ } while (*instack[indepth].bufp != ')');
+
+ /* If we got one arg but it was just whitespace, call that 0 args. */
+ if (i == 1) {
+ register U_CHAR *bp = args[0].raw;
+ register U_CHAR *lim = bp + args[0].raw_length;
+ /* cpp.texi says for foo ( ) we provide one argument.
+ However, if foo wants just 0 arguments, treat this as 0. */
+ if (nargs == 0)
+ while (bp != lim && is_space[*bp]) bp++;
+ if (bp == lim)
+ i = 0;
+ }
+
+ /* Don't output an error message if we have already output one for
+ a parse error above. */
+ rest_zero = 0;
+ if (nargs == 0 && i > 0) {
+ if (! parse_error)
+ error ("arguments given to macro `%s'", hp->name);
+ } else if (i < nargs) {
+ /* traditional C allows foo() if foo wants one argument. */
+ if (nargs == 1 && i == 0 && traditional)
+ ;
+ /* the rest args token is allowed to absorb 0 tokens */
+ else if (i == nargs - 1 && defn->rest_args)
+ rest_zero = 1;
+ else if (parse_error)
+ ;
+ else if (i == 0)
+ error ("macro `%s' used without args", hp->name);
+ else if (i == 1)
+ error ("macro `%s' used with just one arg", hp->name);
+ else
+ error ("macro `%s' used with only %d args", hp->name, i);
+ } else if (i > nargs) {
+ if (! parse_error)
+ error ("macro `%s' used with too many (%d) args", hp->name, i);
+ }
+
+ /* Swallow the closeparen. */
+ ++instack[indepth].bufp;
+
+ /* If macro wants zero args, we parsed the arglist for checking only.
+ Read directly from the macro definition. */
+ if (nargs == 0) {
+ xbuf = defn->expansion;
+ xbuf_len = defn->length;
+ } else {
+ register U_CHAR *exp = defn->expansion;
+ register int offset; /* offset in expansion,
+ copied a piece at a time */
+ register int totlen; /* total amount of exp buffer filled so far */
+
+ register struct reflist *ap, *last_ap;
+
+ /* Macro really takes args. Compute the expansion of this call. */
+
+ /* Compute length in characters of the macro's expansion.
+ Also count number of times each arg is used. */
+ xbuf_len = defn->length;
+ for (ap = defn->pattern; ap != NULL; ap = ap->next) {
+ if (ap->stringify)
+ xbuf_len += args[ap->argno].stringified_length;
+ else if (ap->raw_before || ap->raw_after || traditional)
+ /* Add 4 for two newline-space markers to prevent
+ token concatenation. */
+ xbuf_len += args[ap->argno].raw_length + 4;
+ else {
+ /* We have an ordinary (expanded) occurrence of the arg.
+ So compute its expansion, if we have not already. */
+ if (args[ap->argno].expanded == 0) {
+ FILE_BUF obuf;
+ obuf = expand_to_temp_buffer (args[ap->argno].raw,
+ args[ap->argno].raw + args[ap->argno].raw_length,
+ 1, 0);
+
+ args[ap->argno].expanded = obuf.buf;
+ args[ap->argno].expand_length = obuf.length;
+ args[ap->argno].free2 = obuf.buf;
+ }
+
+ /* Add 4 for two newline-space markers to prevent
+ token concatenation. */
+ xbuf_len += args[ap->argno].expand_length + 4;
+ }
+ if (args[ap->argno].use_count < 10)
+ args[ap->argno].use_count++;
+ }
+
+ xbuf = (U_CHAR *) xmalloc (xbuf_len + 1);
+
+ /* Generate in XBUF the complete expansion
+ with arguments substituted in.
+ TOTLEN is the total size generated so far.
+ OFFSET is the index in the definition
+ of where we are copying from. */
+ offset = totlen = 0;
+ for (last_ap = NULL, ap = defn->pattern; ap != NULL;
+ last_ap = ap, ap = ap->next) {
+ register struct argdata *arg = &args[ap->argno];
+ int count_before = totlen;
+
+ /* Add chars to XBUF. */
+ for (i = 0; i < ap->nchars; i++, offset++)
+ xbuf[totlen++] = exp[offset];
+
+ /* If followed by an empty rest arg with concatenation,
+ delete the last run of nonwhite chars. */
+ if (rest_zero && totlen > count_before
+ && ((ap->rest_args && ap->raw_before)
+ || (last_ap != NULL && last_ap->rest_args
+ && last_ap->raw_after))) {
+ /* Delete final whitespace. */
+ while (totlen > count_before && is_space[xbuf[totlen - 1]]) {
+ totlen--;
+ }
+
+ /* Delete the nonwhites before them. */
+ while (totlen > count_before && ! is_space[xbuf[totlen - 1]]) {
+ totlen--;
+ }
+ }
+
+ if (ap->stringify != 0) {
+ int arglen = arg->raw_length;
+ int escaped = 0;
+ int in_string = 0;
+ int c;
+ i = 0;
+ while (i < arglen
+ && (c = arg->raw[i], is_space[c]))
+ i++;
+ while (i < arglen
+ && (c = arg->raw[arglen - 1], is_space[c]))
+ arglen--;
+ if (!traditional)
+ xbuf[totlen++] = '\"'; /* insert beginning quote */
+ for (; i < arglen; i++) {
+ c = arg->raw[i];
+
+ /* Special markers Newline Space
+ generate nothing for a stringified argument. */
+ if (c == '\n' && arg->raw[i+1] != '\n') {
+ i++;
+ continue;
+ }
+
+ /* Internal sequences of whitespace are replaced by one space
+ except within an string or char token. */
+ if (! in_string
+ && (c == '\n' ? arg->raw[i+1] == '\n' : is_space[c])) {
+ while (1) {
+ /* Note that Newline Space does occur within whitespace
+ sequences; consider it part of the sequence. */
+ if (c == '\n' && is_space[arg->raw[i+1]])
+ i += 2;
+ else if (c != '\n' && is_space[c])
+ i++;
+ else break;
+ c = arg->raw[i];
+ }
+ i--;
+ c = ' ';
+ }
+
+ if (escaped)
+ escaped = 0;
+ else {
+ if (c == '\\')
+ escaped = 1;
+ if (in_string) {
+ if (c == in_string)
+ in_string = 0;
+ } else if (c == '\"' || c == '\'')
+ in_string = c;
+ }
+
+ /* Escape these chars */
+ if (c == '\"' || (in_string && c == '\\'))
+ xbuf[totlen++] = '\\';
+ if (isprint (c))
+ xbuf[totlen++] = c;
+ else {
+ sprintf ((char *) &xbuf[totlen], "\\%03o", (unsigned int) c);
+ totlen += 4;
+ }
+ }
+ if (!traditional)
+ xbuf[totlen++] = '\"'; /* insert ending quote */
+ } else if (ap->raw_before || ap->raw_after || traditional) {
+ U_CHAR *p1 = arg->raw;
+ U_CHAR *l1 = p1 + arg->raw_length;
+ if (ap->raw_before) {
+ while (p1 != l1 && is_space[*p1]) p1++;
+ while (p1 != l1 && is_idchar[*p1])
+ xbuf[totlen++] = *p1++;
+ /* Delete any no-reexpansion marker that follows
+ an identifier at the beginning of the argument
+ if the argument is concatenated with what precedes it. */
+ if (p1[0] == '\n' && p1[1] == '-')
+ p1 += 2;
+ } else if (!traditional) {
+ /* Ordinary expanded use of the argument.
+ Put in newline-space markers to prevent token pasting. */
+ xbuf[totlen++] = '\n';
+ xbuf[totlen++] = ' ';
+ }
+ if (ap->raw_after) {
+ /* Arg is concatenated after: delete trailing whitespace,
+ whitespace markers, and no-reexpansion markers. */
+ while (p1 != l1) {
+ if (is_space[l1[-1]]) l1--;
+ else if (l1[-1] == '-') {
+ U_CHAR *p2 = l1 - 1;
+ /* If a `-' is preceded by an odd number of newlines then it
+ and the last newline are a no-reexpansion marker. */
+ while (p2 != p1 && p2[-1] == '\n') p2--;
+ if ((l1 - 1 - p2) & 1) {
+ l1 -= 2;
+ }
+ else break;
+ }
+ else break;
+ }
+ }
+
+ bcopy ((char *) p1, (char *) (xbuf + totlen), l1 - p1);
+ totlen += l1 - p1;
+ if (!traditional && !ap->raw_after) {
+ /* Ordinary expanded use of the argument.
+ Put in newline-space markers to prevent token pasting. */
+ xbuf[totlen++] = '\n';
+ xbuf[totlen++] = ' ';
+ }
+ } else {
+ /* Ordinary expanded use of the argument.
+ Put in newline-space markers to prevent token pasting. */
+ if (!traditional) {
+ xbuf[totlen++] = '\n';
+ xbuf[totlen++] = ' ';
+ }
+ bcopy ((char *) arg->expanded, (char *) (xbuf + totlen),
+ arg->expand_length);
+ totlen += arg->expand_length;
+ if (!traditional) {
+ xbuf[totlen++] = '\n';
+ xbuf[totlen++] = ' ';
+ }
+ /* If a macro argument with newlines is used multiple times,
+ then only expand the newlines once. This avoids creating output
+ lines which don't correspond to any input line, which confuses
+ gdb and gcov. */
+ if (arg->use_count > 1 && arg->newlines > 0) {
+ /* Don't bother doing change_newlines for subsequent
+ uses of arg. */
+ arg->use_count = 1;
+ arg->expand_length
+ = change_newlines (arg->expanded, arg->expand_length);
+ }
+ }
+
+ if (totlen > xbuf_len)
+ abort ();
+ }
+
+ /* if there is anything left of the definition
+ after handling the arg list, copy that in too. */
+
+ for (i = offset; i < defn->length; i++) {
+ /* if we've reached the end of the macro */
+ if (exp[i] == ')')
+ rest_zero = 0;
+ if (! (rest_zero && last_ap != NULL && last_ap->rest_args
+ && last_ap->raw_after))
+ xbuf[totlen++] = exp[i];
+ }
+
+ xbuf[totlen] = 0;
+ xbuf_len = totlen;
+
+ for (i = 0; i < nargs; i++) {
+ if (args[i].free1 != 0)
+ free (args[i].free1);
+ if (args[i].free2 != 0)
+ free (args[i].free2);
+ }
+ }
+ } else {
+ xbuf = defn->expansion;
+ xbuf_len = defn->length;
+ }
+
+ /* Now put the expansion on the input stack
+ so our caller will commence reading from it. */
+ {
+ register FILE_BUF *ip2;
+
+ ip2 = &instack[++indepth];
+
+ ip2->fname = 0;
+ ip2->nominal_fname = 0;
+ /* This may not be exactly correct, but will give much better error
+ messages for nested macro calls than using a line number of zero. */
+ ip2->lineno = start_line;
+ ip2->buf = xbuf;
+ ip2->length = xbuf_len;
+ ip2->bufp = xbuf;
+ ip2->free_ptr = (nargs > 0) ? xbuf : 0;
+ ip2->macro = hp;
+ ip2->if_stack = if_stack;
+ ip2->system_header_p = 0;
+
+ /* Recursive macro use sometimes works traditionally.
+ #define foo(x,y) bar (x (y,0), y)
+ foo (foo, baz) */
+
+ if (!traditional)
+ hp->type = T_DISABLED;
+ }
+}
+
+/*
+ * Parse a macro argument and store the info on it into *ARGPTR.
+ * REST_ARGS is passed to macarg1 to make it absorb the rest of the args.
+ * Return nonzero to indicate a syntax error.
+ */
+
+static char *
+macarg (argptr, rest_args)
+ register struct argdata *argptr;
+ int rest_args;
+{
+ FILE_BUF *ip = &instack[indepth];
+ int paren = 0;
+ int newlines = 0;
+ int comments = 0;
+
+ /* Try to parse as much of the argument as exists at this
+ input stack level. */
+ U_CHAR *bp = macarg1 (ip->bufp, ip->buf + ip->length,
+ &paren, &newlines, &comments, rest_args);
+
+ /* If we find the end of the argument at this level,
+ set up *ARGPTR to point at it in the input stack. */
+ if (!(ip->fname != 0 && (newlines != 0 || comments != 0))
+ && bp != ip->buf + ip->length) {
+ if (argptr != 0) {
+ argptr->raw = ip->bufp;
+ argptr->raw_length = bp - ip->bufp;
+ argptr->newlines = newlines;
+ }
+ ip->bufp = bp;
+ } else {
+ /* This input stack level ends before the macro argument does.
+ We must pop levels and keep parsing.
+ Therefore, we must allocate a temporary buffer and copy
+ the macro argument into it. */
+ int bufsize = bp - ip->bufp;
+ int extra = newlines;
+ U_CHAR *buffer = (U_CHAR *) xmalloc (bufsize + extra + 1);
+ int final_start = 0;
+
+ bcopy ((char *) ip->bufp, (char *) buffer, bufsize);
+ ip->bufp = bp;
+ ip->lineno += newlines;
+
+ while (bp == ip->buf + ip->length) {
+ if (instack[indepth].macro == 0) {
+ free (buffer);
+ return "unterminated macro call";
+ }
+ ip->macro->type = T_MACRO;
+ if (ip->free_ptr)
+ free (ip->free_ptr);
+ ip = &instack[--indepth];
+ newlines = 0;
+ comments = 0;
+ bp = macarg1 (ip->bufp, ip->buf + ip->length, &paren,
+ &newlines, &comments, rest_args);
+ final_start = bufsize;
+ bufsize += bp - ip->bufp;
+ extra += newlines;
+ buffer = (U_CHAR *) xrealloc (buffer, bufsize + extra + 1);
+ bcopy ((char *) ip->bufp, (char *) (buffer + bufsize - (bp - ip->bufp)),
+ bp - ip->bufp);
+ ip->bufp = bp;
+ ip->lineno += newlines;
+ }
+
+ /* Now, if arg is actually wanted, record its raw form,
+ discarding comments and duplicating newlines in whatever
+ part of it did not come from a macro expansion.
+ EXTRA space has been preallocated for duplicating the newlines.
+ FINAL_START is the index of the start of that part. */
+ if (argptr != 0) {
+ argptr->raw = buffer;
+ argptr->raw_length = bufsize;
+ argptr->free1 = buffer;
+ argptr->newlines = newlines;
+ argptr->comments = comments;
+ if ((newlines || comments) && ip->fname != 0)
+ argptr->raw_length
+ = final_start +
+ discard_comments (argptr->raw + final_start,
+ argptr->raw_length - final_start,
+ newlines);
+ argptr->raw[argptr->raw_length] = 0;
+ if (argptr->raw_length > bufsize + extra)
+ abort ();
+ }
+ }
+
+ /* If we are not discarding this argument,
+ macroexpand it and compute its length as stringified.
+ All this info goes into *ARGPTR. */
+
+ if (argptr != 0) {
+ register U_CHAR *buf, *lim;
+ register int totlen;
+
+ buf = argptr->raw;
+ lim = buf + argptr->raw_length;
+
+ while (buf != lim && is_space[*buf])
+ buf++;
+ while (buf != lim && is_space[lim[-1]])
+ lim--;
+ totlen = traditional ? 0 : 2; /* Count opening and closing quote. */
+ while (buf != lim) {
+ register U_CHAR c = *buf++;
+ totlen++;
+ /* Internal sequences of whitespace are replaced by one space
+ in most cases, but not always. So count all the whitespace
+ in case we need to keep it all. */
+#if 0
+ if (is_space[c])
+ SKIP_ALL_WHITE_SPACE (buf);
+ else
+#endif
+ if (c == '\"' || c == '\\') /* escape these chars */
+ totlen++;
+ else if (!isprint (c))
+ totlen += 3;
+ }
+ argptr->stringified_length = totlen;
+ }
+ return 0;
+}
+
+/* Scan text from START (inclusive) up to LIMIT (exclusive),
+ counting parens in *DEPTHPTR,
+ and return if reach LIMIT
+ or before a `)' that would make *DEPTHPTR negative
+ or before a comma when *DEPTHPTR is zero.
+ Single and double quotes are matched and termination
+ is inhibited within them. Comments also inhibit it.
+ Value returned is pointer to stopping place.
+
+ Increment *NEWLINES each time a newline is passed.
+ REST_ARGS notifies macarg1 that it should absorb the rest of the args.
+ Set *COMMENTS to 1 if a comment is seen. */
+
+static U_CHAR *
+macarg1 (start, limit, depthptr, newlines, comments, rest_args)
+ U_CHAR *start;
+ register U_CHAR *limit;
+ int *depthptr, *newlines, *comments;
+ int rest_args;
+{
+ register U_CHAR *bp = start;
+
+ while (bp < limit) {
+ switch (*bp) {
+ case '(':
+ (*depthptr)++;
+ break;
+ case ')':
+ if (--(*depthptr) < 0)
+ return bp;
+ break;
+ case '\\':
+ /* Traditionally, backslash makes following char not special. */
+ if (bp + 1 < limit && traditional)
+ {
+ bp++;
+ /* But count source lines anyway. */
+ if (*bp == '\n')
+ ++*newlines;
+ }
+ break;
+ case '\n':
+ ++*newlines;
+ break;
+ case '/':
+ if (bp[1] == '\\' && bp[2] == '\n')
+ newline_fix (bp + 1);
+ if (cplusplus_comments && bp[1] == '/') {
+ *comments = 1;
+ bp += 2;
+ while (bp < limit && (*bp != '\n' || bp[-1] == '\\')) {
+ if (*bp == '\n') ++*newlines;
+ bp++;
+ }
+ break;
+ }
+ if (bp[1] != '*' || bp + 1 >= limit)
+ break;
+ *comments = 1;
+ bp += 2;
+ while (bp + 1 < limit) {
+ if (bp[0] == '*'
+ && bp[1] == '\\' && bp[2] == '\n')
+ newline_fix (bp + 1);
+ if (bp[0] == '*' && bp[1] == '/')
+ break;
+ if (*bp == '\n') ++*newlines;
+ bp++;
+ }
+ break;
+ case '\'':
+ case '\"':
+ {
+ int quotec;
+ for (quotec = *bp++; bp + 1 < limit && *bp != quotec; bp++) {
+ if (*bp == '\\') {
+ bp++;
+ if (*bp == '\n')
+ ++*newlines;
+ while (*bp == '\\' && bp[1] == '\n') {
+ bp += 2;
+ }
+ } else if (*bp == '\n') {
+ ++*newlines;
+ if (quotec == '\'')
+ break;
+ }
+ }
+ }
+ break;
+ case ',':
+ /* if we've returned to lowest level and we aren't absorbing all args */
+ if ((*depthptr) == 0 && rest_args == 0)
+ return bp;
+ break;
+ }
+ bp++;
+ }
+
+ return bp;
+}
+
+/* Discard comments and duplicate newlines
+ in the string of length LENGTH at START,
+ except inside of string constants.
+ The string is copied into itself with its beginning staying fixed.
+
+ NEWLINES is the number of newlines that must be duplicated.
+ We assume that that much extra space is available past the end
+ of the string. */
+
+static int
+discard_comments (start, length, newlines)
+ U_CHAR *start;
+ int length;
+ int newlines;
+{
+ register U_CHAR *ibp;
+ register U_CHAR *obp;
+ register U_CHAR *limit;
+ register int c;
+
+ /* If we have newlines to duplicate, copy everything
+ that many characters up. Then, in the second part,
+ we will have room to insert the newlines
+ while copying down.
+ NEWLINES may actually be too large, because it counts
+ newlines in string constants, and we don't duplicate those.
+ But that does no harm. */
+ if (newlines > 0) {
+ ibp = start + length;
+ obp = ibp + newlines;
+ limit = start;
+ while (limit != ibp)
+ *--obp = *--ibp;
+ }
+
+ ibp = start + newlines;
+ limit = start + length + newlines;
+ obp = start;
+
+ while (ibp < limit) {
+ *obp++ = c = *ibp++;
+ switch (c) {
+ case '\n':
+ /* Duplicate the newline. */
+ *obp++ = '\n';
+ break;
+
+ case '\\':
+ if (*ibp == '\n') {
+ obp--;
+ ibp++;
+ }
+ break;
+
+ case '/':
+ if (*ibp == '\\' && ibp[1] == '\n')
+ newline_fix (ibp);
+ /* Delete any comment. */
+ if (cplusplus_comments && ibp[0] == '/') {
+ /* Comments are equivalent to spaces. */
+ obp[-1] = ' ';
+ ibp++;
+ while (ibp < limit && (*ibp != '\n' || ibp[-1] == '\\'))
+ ibp++;
+ break;
+ }
+ if (ibp[0] != '*' || ibp + 1 >= limit)
+ break;
+ /* Comments are equivalent to spaces. */
+ obp[-1] = ' ';
+ ibp++;
+ while (ibp + 1 < limit) {
+ if (ibp[0] == '*'
+ && ibp[1] == '\\' && ibp[2] == '\n')
+ newline_fix (ibp + 1);
+ if (ibp[0] == '*' && ibp[1] == '/')
+ break;
+ ibp++;
+ }
+ ibp += 2;
+ break;
+
+ case '\'':
+ case '\"':
+ /* Notice and skip strings, so that we don't
+ think that comments start inside them,
+ and so we don't duplicate newlines in them. */
+ {
+ int quotec = c;
+ while (ibp < limit) {
+ *obp++ = c = *ibp++;
+ if (c == quotec)
+ break;
+ if (c == '\n' && quotec == '\'')
+ break;
+ if (c == '\\' && ibp < limit) {
+ while (*ibp == '\\' && ibp[1] == '\n')
+ ibp += 2;
+ *obp++ = *ibp++;
+ }
+ }
+ }
+ break;
+ }
+ }
+
+ return obp - start;
+}
+
+/* Turn newlines to spaces in the string of length LENGTH at START,
+ except inside of string constants.
+ The string is copied into itself with its beginning staying fixed. */
+
+static int
+change_newlines (start, length)
+ U_CHAR *start;
+ int length;
+{
+ register U_CHAR *ibp;
+ register U_CHAR *obp;
+ register U_CHAR *limit;
+ register int c;
+
+ ibp = start;
+ limit = start + length;
+ obp = start;
+
+ while (ibp < limit) {
+ *obp++ = c = *ibp++;
+ switch (c) {
+ case '\n':
+ /* If this is a NEWLINE NEWLINE, then this is a real newline in the
+ string. Skip past the newline and its duplicate.
+ Put a space in the output. */
+ if (*ibp == '\n')
+ {
+ ibp++;
+ obp--;
+ *obp++ = ' ';
+ }
+ break;
+
+ case '\'':
+ case '\"':
+ /* Notice and skip strings, so that we don't delete newlines in them. */
+ {
+ int quotec = c;
+ while (ibp < limit) {
+ *obp++ = c = *ibp++;
+ if (c == quotec)
+ break;
+ if (c == '\n' && quotec == '\'')
+ break;
+ }
+ }
+ break;
+ }
+ }
+
+ return obp - start;
+}
+
+/*
+ * error - print error message and increment count of errors.
+ */
+
+void
+error (msg, arg1, arg2, arg3)
+ char *msg;
+ char *arg1, *arg2, *arg3;
+{
+ int i;
+ FILE_BUF *ip = NULL;
+
+ print_containing_files ();
+
+ for (i = indepth; i >= 0; i--)
+ if (instack[i].fname != NULL) {
+ ip = &instack[i];
+ break;
+ }
+
+ if (ip != NULL)
+ fprintf (stderr, "%s:%d: ", ip->nominal_fname, ip->lineno);
+ fprintf (stderr, msg, arg1, arg2, arg3);
+ fprintf (stderr, "\n");
+ errors++;
+}
+
+/* Error including a message from `errno'. */
+
+static void
+error_from_errno (name)
+ char *name;
+{
+ int i;
+ FILE_BUF *ip = NULL;
+
+ print_containing_files ();
+
+ for (i = indepth; i >= 0; i--)
+ if (instack[i].fname != NULL) {
+ ip = &instack[i];
+ break;
+ }
+
+ if (ip != NULL)
+ fprintf (stderr, "%s:%d: ", ip->nominal_fname, ip->lineno);
+
+ if (errno < sys_nerr)
+ fprintf (stderr, "%s: %s\n", name, sys_errlist[errno]);
+ else
+ fprintf (stderr, "%s: undocumented I/O error\n", name);
+
+ errors++;
+}
+
+/* Print error message but don't count it. */
+
+void
+warning (msg, arg1, arg2, arg3)
+ char *msg;
+ char *arg1, *arg2, *arg3;
+{
+ int i;
+ FILE_BUF *ip = NULL;
+
+ if (inhibit_warnings)
+ return;
+
+ if (warnings_are_errors)
+ errors++;
+
+ print_containing_files ();
+
+ for (i = indepth; i >= 0; i--)
+ if (instack[i].fname != NULL) {
+ ip = &instack[i];
+ break;
+ }
+
+ if (ip != NULL)
+ fprintf (stderr, "%s:%d: ", ip->nominal_fname, ip->lineno);
+ fprintf (stderr, "warning: ");
+ fprintf (stderr, msg, arg1, arg2, arg3);
+ fprintf (stderr, "\n");
+}
+
+static void
+error_with_line (line, msg, arg1, arg2, arg3)
+ int line;
+ char *msg;
+ char *arg1, *arg2, *arg3;
+{
+ int i;
+ FILE_BUF *ip = NULL;
+
+ print_containing_files ();
+
+ for (i = indepth; i >= 0; i--)
+ if (instack[i].fname != NULL) {
+ ip = &instack[i];
+ break;
+ }
+
+ if (ip != NULL)
+ fprintf (stderr, "%s:%d: ", ip->nominal_fname, line);
+ fprintf (stderr, msg, arg1, arg2, arg3);
+ fprintf (stderr, "\n");
+ errors++;
+}
+
+static void
+warning_with_line (line, msg, arg1, arg2, arg3)
+ int line;
+ char *msg;
+ char *arg1, *arg2, *arg3;
+{
+ int i;
+ FILE_BUF *ip = NULL;
+
+ if (inhibit_warnings)
+ return;
+
+ if (warnings_are_errors)
+ errors++;
+
+ print_containing_files ();
+
+ for (i = indepth; i >= 0; i--)
+ if (instack[i].fname != NULL) {
+ ip = &instack[i];
+ break;
+ }
+
+ if (ip != NULL)
+ fprintf (stderr, "%s:%d: ", ip->nominal_fname, line);
+ fprintf (stderr, "warning: ");
+ fprintf (stderr, msg, arg1, arg2, arg3);
+ fprintf (stderr, "\n");
+}
+
+/* print an error message and maybe count it. */
+
+void
+pedwarn (msg, arg1, arg2, arg3)
+ char *msg;
+ char *arg1, *arg2, *arg3;
+{
+ if (pedantic_errors)
+ error (msg, arg1, arg2, arg3);
+ else
+ warning (msg, arg1, arg2, arg3);
+}
+
+void
+pedwarn_with_line (line, msg, arg1, arg2, arg3)
+ int line;
+ char *msg;
+ char *arg1, *arg2, *arg3;
+{
+ if (pedantic_errors)
+ error_with_line (line, msg, arg1, arg2, arg3);
+ else
+ warning_with_line (line, msg, arg1, arg2, arg3);
+}
+
+/* Report a warning (or an error if pedantic_errors)
+ giving specified file name and line number, not current. */
+
+static void
+pedwarn_with_file_and_line (file, line, msg, arg1, arg2, arg3)
+ char *file;
+ int line;
+ char *msg;
+ char *arg1, *arg2, *arg3;
+{
+ if (!pedantic_errors && inhibit_warnings)
+ return;
+ if (file != NULL)
+ fprintf (stderr, "%s:%d: ", file, line);
+ if (pedantic_errors)
+ errors++;
+ if (!pedantic_errors)
+ fprintf (stderr, "warning: ");
+ fprintf (stderr, msg, arg1, arg2, arg3);
+ fprintf (stderr, "\n");
+}
+
+/* Print the file names and line numbers of the #include
+ commands which led to the current file. */
+
+static void
+print_containing_files ()
+{
+ FILE_BUF *ip = NULL;
+ int i;
+ int first = 1;
+
+ /* If stack of files hasn't changed since we last printed
+ this info, don't repeat it. */
+ if (last_error_tick == input_file_stack_tick)
+ return;
+
+ for (i = indepth; i >= 0; i--)
+ if (instack[i].fname != NULL) {
+ ip = &instack[i];
+ break;
+ }
+
+ /* Give up if we don't find a source file. */
+ if (ip == NULL)
+ return;
+
+ /* Find the other, outer source files. */
+ for (i--; i >= 0; i--)
+ if (instack[i].fname != NULL) {
+ ip = &instack[i];
+ if (first) {
+ first = 0;
+ fprintf (stderr, "In file included");
+ } else {
+ fprintf (stderr, ",\n ");
+ }
+
+ fprintf (stderr, " from %s:%d", ip->nominal_fname, ip->lineno);
+ }
+ if (! first)
+ fprintf (stderr, ":\n");
+
+ /* Record we have printed the status as of this time. */
+ last_error_tick = input_file_stack_tick;
+}
+
+/* Return the line at which an error occurred.
+ The error is not necessarily associated with the current spot
+ in the input stack, so LINE says where. LINE will have been
+ copied from ip->lineno for the current input level.
+ If the current level is for a file, we return LINE.
+ But if the current level is not for a file, LINE is meaningless.
+ In that case, we return the lineno of the innermost file. */
+
+static int
+line_for_error (line)
+ int line;
+{
+ int i;
+ int line1 = line;
+
+ for (i = indepth; i >= 0; ) {
+ if (instack[i].fname != 0)
+ return line1;
+ i--;
+ if (i < 0)
+ return 0;
+ line1 = instack[i].lineno;
+ }
+ abort ();
+ /*NOTREACHED*/
+ return 0;
+}
+
+/*
+ * If OBUF doesn't have NEEDED bytes after OPTR, make it bigger.
+ *
+ * As things stand, nothing is ever placed in the output buffer to be
+ * removed again except when it's KNOWN to be part of an identifier,
+ * so flushing and moving down everything left, instead of expanding,
+ * should work ok.
+ */
+
+/* You might think void was cleaner for the return type,
+ but that would get type mismatch in check_expand in strict ANSI. */
+static int
+grow_outbuf (obuf, needed)
+ register FILE_BUF *obuf;
+ register int needed;
+{
+ register U_CHAR *p;
+ int minsize;
+
+ if (obuf->length - (obuf->bufp - obuf->buf) > needed)
+ return 0;
+
+ /* Make it at least twice as big as it is now. */
+ obuf->length *= 2;
+ /* Make it have at least 150% of the free space we will need. */
+ minsize = (3 * needed) / 2 + (obuf->bufp - obuf->buf);
+ if (minsize > obuf->length)
+ obuf->length = minsize;
+
+ if ((p = (U_CHAR *) xrealloc (obuf->buf, obuf->length)) == NULL)
+ memory_full ();
+
+ obuf->bufp = p + (obuf->bufp - obuf->buf);
+ obuf->buf = p;
+
+ return 0;
+}
+
+/* Symbol table for macro names and special symbols */
+
+/*
+ * install a name in the main hash table, even if it is already there.
+ * name stops with first non alphanumeric, except leading '#'.
+ * caller must check against redefinition if that is desired.
+ * delete_macro () removes things installed by install () in fifo order.
+ * this is important because of the `defined' special symbol used
+ * in #if, and also if pushdef/popdef directives are ever implemented.
+ *
+ * If LEN is >= 0, it is the length of the name.
+ * Otherwise, compute the length by scanning the entire name.
+ *
+ * If HASH is >= 0, it is the precomputed hash code.
+ * Otherwise, compute the hash code.
+ */
+static HASHNODE *
+install (name, len, type, ivalue, value, hash)
+ U_CHAR *name;
+ int len;
+ enum node_type type;
+ int ivalue;
+ char *value;
+ int hash;
+{
+ register HASHNODE *hp;
+ register int i, bucket;
+ register U_CHAR *p, *q;
+
+ if (len < 0) {
+ p = name;
+ while (is_idchar[*p])
+ p++;
+ len = p - name;
+ }
+
+ if (hash < 0)
+ hash = hashf (name, len, HASHSIZE);
+
+ i = sizeof (HASHNODE) + len + 1;
+ hp = (HASHNODE *) xmalloc (i);
+ bucket = hash;
+ hp->bucket_hdr = &hashtab[bucket];
+ hp->next = hashtab[bucket];
+ hashtab[bucket] = hp;
+ hp->prev = NULL;
+ if (hp->next != NULL)
+ hp->next->prev = hp;
+ hp->type = type;
+ hp->length = len;
+ if (hp->type == T_CONST)
+ hp->value.ival = ivalue;
+ else
+ hp->value.cpval = value;
+ hp->name = ((U_CHAR *) hp) + sizeof (HASHNODE);
+ p = hp->name;
+ q = name;
+ for (i = 0; i < len; i++)
+ *p++ = *q++;
+ hp->name[len] = 0;
+ return hp;
+}
+
+/*
+ * find the most recent hash node for name name (ending with first
+ * non-identifier char) installed by install
+ *
+ * If LEN is >= 0, it is the length of the name.
+ * Otherwise, compute the length by scanning the entire name.
+ *
+ * If HASH is >= 0, it is the precomputed hash code.
+ * Otherwise, compute the hash code.
+ */
+HASHNODE *
+lookup (name, len, hash)
+ U_CHAR *name;
+ int len;
+ int hash;
+{
+ register U_CHAR *bp;
+ register HASHNODE *bucket;
+
+ if (len < 0) {
+ for (bp = name; is_idchar[*bp]; bp++) ;
+ len = bp - name;
+ }
+
+ if (hash < 0)
+ hash = hashf (name, len, HASHSIZE);
+
+ bucket = hashtab[hash];
+ while (bucket) {
+ if (bucket->length == len && strncmp (bucket->name, name, len) == 0)
+ return bucket;
+ bucket = bucket->next;
+ }
+ return NULL;
+}
+
+/*
+ * Delete a hash node. Some weirdness to free junk from macros.
+ * More such weirdness will have to be added if you define more hash
+ * types that need it.
+ */
+
+/* Note that the DEFINITION of a macro is removed from the hash table
+ but its storage is not freed. This would be a storage leak
+ except that it is not reasonable to keep undefining and redefining
+ large numbers of macros many times.
+ In any case, this is necessary, because a macro can be #undef'd
+ in the middle of reading the arguments to a call to it.
+ If #undef freed the DEFINITION, that would crash. */
+
+static void
+delete_macro (hp)
+ HASHNODE *hp;
+{
+
+ if (hp->prev != NULL)
+ hp->prev->next = hp->next;
+ if (hp->next != NULL)
+ hp->next->prev = hp->prev;
+
+ /* make sure that the bucket chain header that
+ the deleted guy was on points to the right thing afterwards. */
+ if (hp == *hp->bucket_hdr)
+ *hp->bucket_hdr = hp->next;
+
+#if 0
+ if (hp->type == T_MACRO) {
+ DEFINITION *d = hp->value.defn;
+ struct reflist *ap, *nextap;
+
+ for (ap = d->pattern; ap != NULL; ap = nextap) {
+ nextap = ap->next;
+ free (ap);
+ }
+ free (d);
+ }
+#endif
+ free (hp);
+}
+
+/*
+ * return hash function on name. must be compatible with the one
+ * computed a step at a time, elsewhere
+ */
+static int
+hashf (name, len, hashsize)
+ register U_CHAR *name;
+ register int len;
+ int hashsize;
+{
+ register int r = 0;
+
+ while (len--)
+ r = HASHSTEP (r, *name++);
+
+ return MAKE_POS (r) % hashsize;
+}
+
+
+/* Dump the definition of a single macro HP to OF. */
+static void
+dump_single_macro (hp, of)
+ register HASHNODE *hp;
+ FILE *of;
+{
+ register DEFINITION *defn = hp->value.defn;
+ struct reflist *ap;
+ int offset;
+ int concat;
+
+
+ /* Print the definition of the macro HP. */
+
+ fprintf (of, "#define %s", hp->name);
+
+ if (defn->nargs >= 0) {
+ int i;
+
+ fprintf (of, "(");
+ for (i = 0; i < defn->nargs; i++) {
+ dump_arg_n (defn, i, of);
+ if (i + 1 < defn->nargs)
+ fprintf (of, ", ");
+ }
+ fprintf (of, ")");
+ }
+
+ fprintf (of, " ");
+
+ offset = 0;
+ concat = 0;
+ for (ap = defn->pattern; ap != NULL; ap = ap->next) {
+ dump_defn_1 (defn->expansion, offset, ap->nchars, of);
+ if (ap->nchars != 0)
+ concat = 0;
+ offset += ap->nchars;
+ if (ap->stringify)
+ fprintf (of, " #");
+ if (ap->raw_before && !concat)
+ fprintf (of, " ## ");
+ concat = 0;
+ dump_arg_n (defn, ap->argno, of);
+ if (ap->raw_after) {
+ fprintf (of, " ## ");
+ concat = 1;
+ }
+ }
+ dump_defn_1 (defn->expansion, offset, defn->length - offset, of);
+ fprintf (of, "\n");
+}
+
+/* Dump all macro definitions as #defines to stdout. */
+
+static void
+dump_all_macros ()
+{
+ int bucket;
+
+ for (bucket = 0; bucket < HASHSIZE; bucket++) {
+ register HASHNODE *hp;
+
+ for (hp = hashtab[bucket]; hp; hp= hp->next) {
+ if (hp->type == T_MACRO)
+ dump_single_macro (hp, stdout);
+ }
+ }
+}
+
+/* Output to OF a substring of a macro definition.
+ BASE is the beginning of the definition.
+ Output characters START thru LENGTH.
+ Discard newlines outside of strings, thus
+ converting funny-space markers to ordinary spaces. */
+
+static void
+dump_defn_1 (base, start, length, of)
+ U_CHAR *base;
+ int start;
+ int length;
+ FILE *of;
+{
+ U_CHAR *p = base + start;
+ U_CHAR *limit = base + start + length;
+
+ while (p < limit) {
+ if (*p != '\n')
+ putc (*p, of);
+ else if (*p == '\"' || *p =='\'') {
+ U_CHAR *p1 = skip_quoted_string (p, limit, 0, NULL_PTR,
+ NULL_PTR, NULL_PTR);
+ fwrite (p, p1 - p, 1, of);
+ p = p1 - 1;
+ }
+ p++;
+ }
+}
+
+/* Print the name of argument number ARGNUM of macro definition DEFN
+ to OF.
+ Recall that DEFN->args.argnames contains all the arg names
+ concatenated in reverse order with comma-space in between. */
+
+static void
+dump_arg_n (defn, argnum, of)
+ DEFINITION *defn;
+ int argnum;
+ FILE *of;
+{
+ register U_CHAR *p = defn->args.argnames;
+ while (argnum + 1 < defn->nargs) {
+ p = (U_CHAR *) index (p, ' ') + 1;
+ argnum++;
+ }
+
+ while (*p && *p != ',') {
+ putc (*p, of);
+ p++;
+ }
+}
+
+/* Initialize syntactic classifications of characters. */
+
+static void
+initialize_char_syntax ()
+{
+ register int i;
+
+ /*
+ * Set up is_idchar and is_idstart tables. These should be
+ * faster than saying (is_alpha (c) || c == '_'), etc.
+ * Set up these things before calling any routines tthat
+ * refer to them.
+ */
+ for (i = 'a'; i <= 'z'; i++) {
+ is_idchar[i - 'a' + 'A'] = 1;
+ is_idchar[i] = 1;
+ is_idstart[i - 'a' + 'A'] = 1;
+ is_idstart[i] = 1;
+ }
+ for (i = '0'; i <= '9'; i++)
+ is_idchar[i] = 1;
+ is_idchar['_'] = 1;
+ is_idstart['_'] = 1;
+ is_idchar['$'] = dollars_in_ident;
+ is_idstart['$'] = dollars_in_ident;
+
+ /* horizontal space table */
+ is_hor_space[' '] = 1;
+ is_hor_space['\t'] = 1;
+ is_hor_space['\v'] = 1;
+ is_hor_space['\f'] = 1;
+ is_hor_space['\r'] = 1;
+
+ is_space[' '] = 1;
+ is_space['\t'] = 1;
+ is_space['\v'] = 1;
+ is_space['\f'] = 1;
+ is_space['\n'] = 1;
+ is_space['\r'] = 1;
+}
+
+/* Initialize the built-in macros. */
+
+static void
+initialize_builtins (inp, outp)
+ FILE_BUF *inp;
+ FILE_BUF *outp;
+{
+ install ("__LINE__", -1, T_SPECLINE, 0, 0, -1);
+ install ("__DATE__", -1, T_DATE, 0, 0, -1);
+ install ("__FILE__", -1, T_FILE, 0, 0, -1);
+ install ("__BASE_FILE__", -1, T_BASE_FILE, 0, 0, -1);
+ install ("__INCLUDE_LEVEL__", -1, T_INCLUDE_LEVEL, 0, 0, -1);
+ install ("__VERSION__", -1, T_VERSION, 0, 0, -1);
+#ifndef NO_BUILTIN_SIZE_TYPE
+ install ("__SIZE_TYPE__", -1, T_SIZE_TYPE, 0, 0, -1);
+#endif
+#ifndef NO_BUILTIN_PTRDIFF_TYPE
+ install ("__PTRDIFF_TYPE__ ", -1, T_PTRDIFF_TYPE, 0, 0, -1);
+#endif
+ install ("__WCHAR_TYPE__", -1, T_WCHAR_TYPE, 0, 0, -1);
+ install ("__USER_LABEL_PREFIX__", -1, T_USER_LABEL_PREFIX_TYPE, 0, 0, -1);
+ install ("__REGISTER_PREFIX__", -1, T_REGISTER_PREFIX_TYPE, 0, 0, -1);
+ install ("__TIME__", -1, T_TIME, 0, 0, -1);
+ if (!traditional)
+ install ("__STDC__", -1, T_CONST, STDC_VALUE, 0, -1);
+ if (objc)
+ install ("__OBJC__", -1, T_CONST, 1, 0, -1);
+/* This is supplied using a -D by the compiler driver
+ so that it is present only when truly compiling with GNU C. */
+/* install ("__GNUC__", -1, T_CONST, 2, 0, -1); */
+
+ if (debug_output)
+ {
+ char directive[2048];
+ register struct directive *dp = &directive_table[0];
+ struct tm *timebuf = timestamp ();
+
+ sprintf (directive, " __BASE_FILE__ \"%s\"\n",
+ instack[0].nominal_fname);
+ output_line_command (inp, outp, 0, same_file);
+ pass_thru_directive (directive, &directive[strlen (directive)], outp, dp);
+
+ sprintf (directive, " __VERSION__ \"%s\"\n", version_string);
+ output_line_command (inp, outp, 0, same_file);
+ pass_thru_directive (directive, &directive[strlen (directive)], outp, dp);
+
+#ifndef NO_BUILTIN_SIZE_TYPE
+ sprintf (directive, " __SIZE_TYPE__ %s\n", SIZE_TYPE);
+ output_line_command (inp, outp, 0, same_file);
+ pass_thru_directive (directive, &directive[strlen (directive)], outp, dp);
+#endif
+
+#ifndef NO_BUILTIN_PTRDIFF_TYPE
+ sprintf (directive, " __PTRDIFF_TYPE__ %s\n", PTRDIFF_TYPE);
+ output_line_command (inp, outp, 0, same_file);
+ pass_thru_directive (directive, &directive[strlen (directive)], outp, dp);
+#endif
+
+ sprintf (directive, " __WCHAR_TYPE__ %s\n", wchar_type);
+ output_line_command (inp, outp, 0, same_file);
+ pass_thru_directive (directive, &directive[strlen (directive)], outp, dp);
+
+ sprintf (directive, " __DATE__ \"%s %2d %4d\"\n",
+ monthnames[timebuf->tm_mon],
+ timebuf->tm_mday, timebuf->tm_year + 1900);
+ output_line_command (inp, outp, 0, same_file);
+ pass_thru_directive (directive, &directive[strlen (directive)], outp, dp);
+
+ sprintf (directive, " __TIME__ \"%02d:%02d:%02d\"\n",
+ timebuf->tm_hour, timebuf->tm_min, timebuf->tm_sec);
+ output_line_command (inp, outp, 0, same_file);
+ pass_thru_directive (directive, &directive[strlen (directive)], outp, dp);
+
+ if (!traditional)
+ {
+ sprintf (directive, " __STDC__ 1");
+ output_line_command (inp, outp, 0, same_file);
+ pass_thru_directive (directive, &directive[strlen (directive)],
+ outp, dp);
+ }
+ if (objc)
+ {
+ sprintf (directive, " __OBJC__ 1");
+ output_line_command (inp, outp, 0, same_file);
+ pass_thru_directive (directive, &directive[strlen (directive)],
+ outp, dp);
+ }
+ }
+}
+
+/*
+ * process a given definition string, for initialization
+ * If STR is just an identifier, define it with value 1.
+ * If STR has anything after the identifier, then it should
+ * be identifier=definition.
+ */
+
+static void
+make_definition (str, op)
+ U_CHAR *str;
+ FILE_BUF *op;
+{
+ FILE_BUF *ip;
+ struct directive *kt;
+ U_CHAR *buf, *p;
+
+ buf = str;
+ p = str;
+ if (!is_idstart[*p]) {
+ error ("malformed option `-D %s'", str);
+ return;
+ }
+ while (is_idchar[*++p])
+ ;
+ if (*p == 0) {
+ buf = (U_CHAR *) alloca (p - buf + 4);
+ strcpy ((char *)buf, str);
+ strcat ((char *)buf, " 1");
+ } else if (*p != '=') {
+ error ("malformed option `-D %s'", str);
+ return;
+ } else {
+ U_CHAR *q;
+ /* Copy the entire option so we can modify it. */
+ buf = (U_CHAR *) alloca (2 * strlen (str) + 1);
+ strncpy (buf, str, p - str);
+ /* Change the = to a space. */
+ buf[p - str] = ' ';
+ /* Scan for any backslash-newline and remove it. */
+ p++;
+ q = &buf[p - str];
+ while (*p) {
+ if (*p == '\\' && p[1] == '\n')
+ p += 2;
+ /* Change newline chars into newline-markers. */
+ else if (*p == '\n')
+ {
+ *q++ = '\n';
+ *q++ = '\n';
+ p++;
+ }
+ else
+ *q++ = *p++;
+ }
+ *q = 0;
+ }
+
+ ip = &instack[++indepth];
+ ip->nominal_fname = ip->fname = "*Initialization*";
+
+ ip->buf = ip->bufp = buf;
+ ip->length = strlen (buf);
+ ip->lineno = 1;
+ ip->macro = 0;
+ ip->free_ptr = 0;
+ ip->if_stack = if_stack;
+ ip->system_header_p = 0;
+
+ for (kt = directive_table; kt->type != T_DEFINE; kt++)
+ ;
+
+ /* Pass NULL instead of OP, since this is a "predefined" macro. */
+ do_define (buf, buf + strlen (buf), NULL, kt);
+ --indepth;
+}
+
+/* JF, this does the work for the -U option */
+
+static void
+make_undef (str, op)
+ U_CHAR *str;
+ FILE_BUF *op;
+{
+ FILE_BUF *ip;
+ struct directive *kt;
+
+ ip = &instack[++indepth];
+ ip->nominal_fname = ip->fname = "*undef*";
+
+ ip->buf = ip->bufp = str;
+ ip->length = strlen (str);
+ ip->lineno = 1;
+ ip->macro = 0;
+ ip->free_ptr = 0;
+ ip->if_stack = if_stack;
+ ip->system_header_p = 0;
+
+ for (kt = directive_table; kt->type != T_UNDEF; kt++)
+ ;
+
+ do_undef (str, str + strlen (str), op, kt);
+ --indepth;
+}
+
+/* Process the string STR as if it appeared as the body of a #assert.
+ OPTION is the option name for which STR was the argument. */
+
+static void
+make_assertion (option, str)
+ char *option;
+ U_CHAR *str;
+{
+ FILE_BUF *ip;
+ struct directive *kt;
+ U_CHAR *buf, *p, *q;
+
+ /* Copy the entire option so we can modify it. */
+ buf = (U_CHAR *) alloca (strlen (str) + 1);
+ strcpy ((char *) buf, str);
+ /* Scan for any backslash-newline and remove it. */
+ p = q = buf;
+ while (*p) {
+ if (*p == '\\' && p[1] == '\n')
+ p += 2;
+ else
+ *q++ = *p++;
+ }
+ *q = 0;
+
+ p = buf;
+ if (!is_idstart[*p]) {
+ error ("malformed option `%s %s'", option, str);
+ return;
+ }
+ while (is_idchar[*++p])
+ ;
+ while (*p == ' ' || *p == '\t') p++;
+ if (! (*p == 0 || *p == '(')) {
+ error ("malformed option `%s %s'", option, str);
+ return;
+ }
+
+ ip = &instack[++indepth];
+ ip->nominal_fname = ip->fname = "*Initialization*";
+
+ ip->buf = ip->bufp = buf;
+ ip->length = strlen (buf);
+ ip->lineno = 1;
+ ip->macro = 0;
+ ip->free_ptr = 0;
+ ip->if_stack = if_stack;
+ ip->system_header_p = 0;
+
+ for (kt = directive_table; kt->type != T_ASSERT; kt++)
+ ;
+
+ /* pass NULL as output ptr to do_define since we KNOW it never
+ does any output.... */
+ do_assert (buf, buf + strlen (buf) , NULL_PTR, kt);
+ --indepth;
+}
+
+/* Append a chain of `struct file_name_list's
+ to the end of the main include chain.
+ FIRST is the beginning of the chain to append, and LAST is the end. */
+
+static void
+append_include_chain (first, last)
+ struct file_name_list *first, *last;
+{
+ struct file_name_list *dir;
+
+ if (!first || !last)
+ return;
+
+ if (include == 0)
+ include = first;
+ else
+ last_include->next = first;
+
+ if (first_bracket_include == 0)
+ first_bracket_include = first;
+
+ for (dir = first; ; dir = dir->next) {
+ int len = strlen (dir->fname) + INCLUDE_LEN_FUDGE;
+ if (len > max_include_len)
+ max_include_len = len;
+ if (dir == last)
+ break;
+ }
+
+ last->next = NULL;
+ last_include = last;
+}
+
+/* Add output to `deps_buffer' for the -M switch.
+ STRING points to the text to be output.
+ SPACER is ':' for targets, ' ' for dependencies, zero for text
+ to be inserted literally. */
+
+static void
+deps_output (string, spacer)
+ char *string;
+ int spacer;
+{
+ int size = strlen (string);
+
+ if (size == 0)
+ return;
+
+#ifndef MAX_OUTPUT_COLUMNS
+#define MAX_OUTPUT_COLUMNS 72
+#endif
+ if (spacer
+ && deps_column > 0
+ && (deps_column + size) > MAX_OUTPUT_COLUMNS)
+ {
+ deps_output (" \\\n ", 0);
+ deps_column = 0;
+ }
+
+ if (deps_size + size + 8 > deps_allocated_size) {
+ deps_allocated_size = (deps_size + size + 50) * 2;
+ deps_buffer = (char *) xrealloc (deps_buffer, deps_allocated_size);
+ }
+ if (spacer == ' ' && deps_column > 0)
+ deps_buffer[deps_size++] = ' ';
+ bcopy (string, &deps_buffer[deps_size], size);
+ deps_size += size;
+ deps_column += size;
+ if (spacer == ':')
+ deps_buffer[deps_size++] = ':';
+ deps_buffer[deps_size] = 0;
+}
+
+#if defined(USG) || defined(VMS)
+#ifndef BSTRING
+
+void
+bzero (b, length)
+ register char *b;
+ register unsigned length;
+{
+ while (length-- > 0)
+ *b++ = 0;
+}
+
+void
+bcopy (b1, b2, length)
+ register char *b1;
+ register char *b2;
+ register unsigned length;
+{
+ while (length-- > 0)
+ *b2++ = *b1++;
+}
+
+int
+bcmp (b1, b2, length) /* This could be a macro! */
+ register char *b1;
+ register char *b2;
+ register unsigned length;
+{
+ while (length-- > 0)
+ if (*b1++ != *b2++)
+ return 1;
+
+ return 0;
+}
+#endif /* not BSTRING */
+#endif /* USG or VMS */
+
+
+static void
+fatal (str, arg)
+ char *str, *arg;
+{
+ fprintf (stderr, "%s: ", progname);
+ fprintf (stderr, str, arg);
+ fprintf (stderr, "\n");
+ exit (FAILURE_EXIT_CODE);
+}
+
+/* More 'friendly' abort that prints the line and file.
+ config.h can #define abort fancy_abort if you like that sort of thing. */
+
+void
+fancy_abort ()
+{
+ fatal ("Internal gcc abort.");
+}
+
+static void
+perror_with_name (name)
+ char *name;
+{
+ fprintf (stderr, "%s: ", progname);
+ if (errno < sys_nerr)
+ fprintf (stderr, "%s: %s\n", name, sys_errlist[errno]);
+ else
+ fprintf (stderr, "%s: undocumented I/O error\n", name);
+ errors++;
+}
+
+static void
+pfatal_with_name (name)
+ char *name;
+{
+ perror_with_name (name);
+#ifdef VMS
+ exit (vaxc$errno);
+#else
+ exit (FAILURE_EXIT_CODE);
+#endif
+}
+
+/* Handler for SIGPIPE. */
+
+static void
+pipe_closed (signo)
+ /* If this is missing, some compilers complain. */
+ int signo;
+{
+ fatal ("output pipe has been closed");
+}
+
+static void
+memory_full ()
+{
+ fatal ("Memory exhausted.");
+}
+
+
+char *
+xmalloc (size)
+ unsigned size;
+{
+ register char *ptr = (char *) malloc (size);
+ if (ptr != 0) return (ptr);
+ memory_full ();
+ /*NOTREACHED*/
+ return 0;
+}
+
+static char *
+xrealloc (old, size)
+ char *old;
+ unsigned size;
+{
+ register char *ptr = (char *) realloc (old, size);
+ if (ptr != 0) return (ptr);
+ memory_full ();
+ /*NOTREACHED*/
+ return 0;
+}
+
+static char *
+xcalloc (number, size)
+ unsigned number, size;
+{
+ register unsigned total = number * size;
+ register char *ptr = (char *) malloc (total);
+ if (ptr != 0) {
+ if (total > 100)
+ bzero (ptr, total);
+ else {
+ /* It's not too long, so loop, zeroing by longs.
+ It must be safe because malloc values are always well aligned. */
+ register long *zp = (long *) ptr;
+ register long *zl = (long *) (ptr + total - 4);
+ register int i = total - 4;
+ while (zp < zl)
+ *zp++ = 0;
+ if (i < 0)
+ i = 0;
+ while (i < total)
+ ptr[i++] = 0;
+ }
+ return ptr;
+ }
+ memory_full ();
+ /*NOTREACHED*/
+ return 0;
+}
+
+static char *
+savestring (input)
+ char *input;
+{
+ unsigned size = strlen (input);
+ char *output = xmalloc (size + 1);
+ strcpy (output, input);
+ return output;
+}
+
+/* Get the file-mode and data size of the file open on FD
+ and store them in *MODE_POINTER and *SIZE_POINTER. */
+
+static int
+file_size_and_mode (fd, mode_pointer, size_pointer)
+ int fd;
+ int *mode_pointer;
+ long int *size_pointer;
+{
+ struct stat sbuf;
+
+ if (fstat (fd, &sbuf) < 0) return (-1);
+ if (mode_pointer) *mode_pointer = sbuf.st_mode;
+ if (size_pointer) *size_pointer = sbuf.st_size;
+ return 0;
+}
+
+static void
+output_dots (fd, depth)
+ FILE* fd;
+ int depth;
+{
+ while (depth > 0) {
+ putc ('.', fd);
+ depth--;
+ }
+}
+
+
+#ifdef VMS
+
+/* Under VMS we need to fix up the "include" specification
+ filename so that everything following the 1st slash is
+ changed into its correct VMS file specification. */
+
+static void
+hack_vms_include_specification (fname)
+ char *fname;
+{
+ register char *cp, *cp1, *cp2;
+ int f, check_filename_before_returning, no_prefix_seen;
+ char Local[512];
+
+ check_filename_before_returning = 0;
+ no_prefix_seen = 0;
+
+ /* Ignore leading "./"s */
+ while (fname[0] == '.' && fname[1] == '/') {
+ strcpy (fname, fname+2);
+ no_prefix_seen = 1; /* mark this for later */
+ }
+ /* Look for the boundary between the VMS and UNIX filespecs */
+ cp = rindex (fname, ']'); /* Look for end of dirspec. */
+ if (cp == 0) cp = rindex (fname, '>'); /* ... Ditto */
+ if (cp == 0) cp = rindex (fname, ':'); /* Look for end of devspec. */
+ if (cp) {
+ cp++;
+ } else {
+ cp = index (fname, '/'); /* Look for the "/" */
+ }
+
+ /*
+ * Check if we have a vax-c style '#include filename'
+ * and add the missing .h
+ */
+ if (cp == 0) {
+ if (index(fname,'.') == 0)
+ strcat(fname, ".h");
+ } else {
+ if (index(cp,'.') == 0)
+ strcat(cp, ".h");
+ }
+
+ cp2 = Local; /* initialize */
+
+ /* We are trying to do a number of things here. First of all, we are
+ trying to hammer the filenames into a standard format, such that later
+ processing can handle them.
+
+ If the file name contains something like [dir.], then it recognizes this
+ as a root, and strips the ".]". Later processing will add whatever is
+ needed to get things working properly.
+
+ If no device is specified, then the first directory name is taken to be
+ a device name (or a rooted logical). */
+
+ /* See if we found that 1st slash */
+ if (cp == 0) return; /* Nothing to do!!! */
+ if (*cp != '/') return; /* Nothing to do!!! */
+ /* Point to the UNIX filename part (which needs to be fixed!) */
+ cp1 = cp+1;
+ /* If the directory spec is not rooted, we can just copy
+ the UNIX filename part and we are done */
+ if (((cp - fname) > 1) && ((cp[-1] == ']') || (cp[-1] == '>'))) {
+ if (cp[-2] != '.') {
+ /*
+ * The VMS part ends in a `]', and the preceding character is not a `.'.
+ * We strip the `]', and then splice the two parts of the name in the
+ * usual way. Given the default locations for include files in cccp.c,
+ * we will only use this code if the user specifies alternate locations
+ * with the /include (-I) switch on the command line. */
+ cp -= 1; /* Strip "]" */
+ cp1--; /* backspace */
+ } else {
+ /*
+ * The VMS part has a ".]" at the end, and this will not do. Later
+ * processing will add a second directory spec, and this would be a syntax
+ * error. Thus we strip the ".]", and thus merge the directory specs.
+ * We also backspace cp1, so that it points to a '/'. This inhibits the
+ * generation of the 000000 root directory spec (which does not belong here
+ * in this case).
+ */
+ cp -= 2; /* Strip ".]" */
+ cp1--; }; /* backspace */
+ } else {
+
+ /* We drop in here if there is no VMS style directory specification yet.
+ * If there is no device specification either, we make the first dir a
+ * device and try that. If we do not do this, then we will be essentially
+ * searching the users default directory (as if they did a #include "asdf.h").
+ *
+ * Then all we need to do is to push a '[' into the output string. Later
+ * processing will fill this in, and close the bracket.
+ */
+ if (cp[-1] != ':') *cp2++ = ':'; /* dev not in spec. take first dir */
+ *cp2++ = '['; /* Open the directory specification */
+ }
+
+ /* at this point we assume that we have the device spec, and (at least
+ the opening "[" for a directory specification. We may have directories
+ specified already */
+
+ /* If there are no other slashes then the filename will be
+ in the "root" directory. Otherwise, we need to add
+ directory specifications. */
+ if (index (cp1, '/') == 0) {
+ /* Just add "000000]" as the directory string */
+ strcpy (cp2, "000000]");
+ cp2 += strlen (cp2);
+ check_filename_before_returning = 1; /* we might need to fool with this later */
+ } else {
+ /* As long as there are still subdirectories to add, do them. */
+ while (index (cp1, '/') != 0) {
+ /* If this token is "." we can ignore it */
+ if ((cp1[0] == '.') && (cp1[1] == '/')) {
+ cp1 += 2;
+ continue;
+ }
+ /* Add a subdirectory spec. Do not duplicate "." */
+ if (cp2[-1] != '.' && cp2[-1] != '[' && cp2[-1] != '<')
+ *cp2++ = '.';
+ /* If this is ".." then the spec becomes "-" */
+ if ((cp1[0] == '.') && (cp1[1] == '.') && (cp[2] == '/')) {
+ /* Add "-" and skip the ".." */
+ *cp2++ = '-';
+ cp1 += 3;
+ continue;
+ }
+ /* Copy the subdirectory */
+ while (*cp1 != '/') *cp2++= *cp1++;
+ cp1++; /* Skip the "/" */
+ }
+ /* Close the directory specification */
+ if (cp2[-1] == '.') /* no trailing periods */
+ cp2--;
+ *cp2++ = ']';
+ }
+ /* Now add the filename */
+ while (*cp1) *cp2++ = *cp1++;
+ *cp2 = 0;
+ /* Now append it to the original VMS spec. */
+ strcpy (cp, Local);
+
+ /* If we put a [000000] in the filename, try to open it first. If this fails,
+ remove the [000000], and return that name. This provides flexibility
+ to the user in that they can use both rooted and non-rooted logical names
+ to point to the location of the file. */
+
+ if (check_filename_before_returning && no_prefix_seen) {
+ f = open (fname, O_RDONLY, 0666);
+ if (f >= 0) {
+ /* The file name is OK as it is, so return it as is. */
+ close (f);
+ return;
+ }
+ /* The filename did not work. Try to remove the [000000] from the name,
+ and return it. */
+ cp = index (fname, '[');
+ cp2 = index (fname, ']') + 1;
+ strcpy (cp, cp2); /* this gets rid of it */
+ }
+ return;
+}
+#endif /* VMS */
+
+#ifdef VMS
+
+/* These are the read/write replacement routines for
+ VAX-11 "C". They make read/write behave enough
+ like their UNIX counterparts that CCCP will work */
+
+static int
+read (fd, buf, size)
+ int fd;
+ char *buf;
+ int size;
+{
+#undef read /* Get back the REAL read routine */
+ register int i;
+ register int total = 0;
+
+ /* Read until the buffer is exhausted */
+ while (size > 0) {
+ /* Limit each read to 32KB */
+ i = (size > (32*1024)) ? (32*1024) : size;
+ i = read (fd, buf, i);
+ if (i <= 0) {
+ if (i == 0) return (total);
+ return (i);
+ }
+ /* Account for this read */
+ total += i;
+ buf += i;
+ size -= i;
+ }
+ return (total);
+}
+
+static int
+write (fd, buf, size)
+ int fd;
+ char *buf;
+ int size;
+{
+#undef write /* Get back the REAL write routine */
+ int i;
+ int j;
+
+ /* Limit individual writes to 32Kb */
+ i = size;
+ while (i > 0) {
+ j = (i > (32*1024)) ? (32*1024) : i;
+ if (write (fd, buf, j) < 0) return (-1);
+ /* Account for the data written */
+ buf += j;
+ i -= j;
+ }
+ return (size);
+}
+
+/* The following wrapper functions supply additional arguments to the VMS
+ I/O routines to optimize performance with file handling. The arguments
+ are:
+ "mbc=16" - Set multi-block count to 16 (use a 8192 byte buffer).
+ "deq=64" - When extending the file, extend it in chunks of 32Kbytes.
+ "fop=tef"- Truncate unused portions of file when closing file.
+ "shr=nil"- Disallow file sharing while file is open.
+ */
+
+static FILE *
+freopen (fname, type, oldfile)
+ char *fname;
+ char *type;
+ FILE *oldfile;
+{
+#undef freopen /* Get back the REAL fopen routine */
+ if (strcmp (type, "w") == 0)
+ return freopen (fname, type, oldfile, "mbc=16", "deq=64", "fop=tef", "shr=nil");
+ return freopen (fname, type, oldfile, "mbc=16");
+}
+
+static FILE *
+fopen (fname, type)
+ char *fname;
+ char *type;
+{
+#undef fopen /* Get back the REAL fopen routine */
+ if (strcmp (type, "w") == 0)
+ return fopen (fname, type, "mbc=16", "deq=64", "fop=tef", "shr=nil");
+ return fopen (fname, type, "mbc=16");
+}
+
+static int
+open (fname, flags, prot)
+ char *fname;
+ int flags;
+ int prot;
+{
+#undef open /* Get back the REAL open routine */
+ return open (fname, flags, prot, "mbc=16", "deq=64", "fop=tef");
+}
+
+/* Avoid run-time library bug, where copying M out of N+M characters with
+ N >= 65535 results in VAXCRTL's strncat falling into an infinite loop.
+ gcc-cpp exercises this particular bug. */
+
+static char *
+strncat (dst, src, cnt)
+ char *dst;
+ const char *src;
+ unsigned cnt;
+{
+ register char *d = dst, *s = (char *) src;
+ register int n = cnt; /* convert to _signed_ type */
+
+ while (*d) d++; /* advance to end */
+ while (--n >= 0)
+ if (!(*d++ = *s++)) break;
+ if (n < 0) *d = '\0';
+ return dst;
+}
+#endif /* VMS */
diff --git a/gnu/usr.bin/cc/cpp/cexp.c b/gnu/usr.bin/cc/cpp/cexp.c
new file mode 100644
index 0000000..d1471aa
--- /dev/null
+++ b/gnu/usr.bin/cc/cpp/cexp.c
@@ -0,0 +1,1926 @@
+
+/* A Bison parser, made from cexp.y with Bison version GNU Bison version 1.22
+ */
+
+#define YYBISON 1 /* Identify Bison output. */
+
+#define INT 258
+#define CHAR 259
+#define NAME 260
+#define ERROR 261
+#define OR 262
+#define AND 263
+#define EQUAL 264
+#define NOTEQUAL 265
+#define LEQ 266
+#define GEQ 267
+#define LSH 268
+#define RSH 269
+#define UNARY 270
+
+#line 26 "cexp.y"
+
+#include "config.h"
+#include <setjmp.h>
+/* #define YYDEBUG 1 */
+
+#ifdef MULTIBYTE_CHARS
+#include <stdlib.h>
+#include <locale.h>
+#endif
+
+#include <stdio.h>
+
+typedef unsigned char U_CHAR;
+
+/* This is used for communicating lists of keywords with cccp.c. */
+struct arglist {
+ struct arglist *next;
+ U_CHAR *name;
+ int length;
+ int argno;
+};
+
+/* Define a generic NULL if one hasn't already been defined. */
+
+#ifndef NULL
+#define NULL 0
+#endif
+
+#ifndef GENERIC_PTR
+#if defined (USE_PROTOTYPES) ? USE_PROTOTYPES : defined (__STDC__)
+#define GENERIC_PTR void *
+#else
+#define GENERIC_PTR char *
+#endif
+#endif
+
+#ifndef NULL_PTR
+#define NULL_PTR ((GENERIC_PTR)0)
+#endif
+
+int yylex ();
+void yyerror ();
+int expression_value;
+
+static jmp_buf parse_return_error;
+
+/* Nonzero means count most punctuation as part of a name. */
+static int keyword_parsing = 0;
+
+/* some external tables of character types */
+extern unsigned char is_idstart[], is_idchar[], is_hor_space[];
+
+extern char *xmalloc ();
+
+/* Flag for -pedantic. */
+extern int pedantic;
+
+/* Flag for -traditional. */
+extern int traditional;
+
+#ifndef CHAR_TYPE_SIZE
+#define CHAR_TYPE_SIZE BITS_PER_UNIT
+#endif
+
+#ifndef INT_TYPE_SIZE
+#define INT_TYPE_SIZE BITS_PER_WORD
+#endif
+
+#ifndef LONG_TYPE_SIZE
+#define LONG_TYPE_SIZE BITS_PER_WORD
+#endif
+
+#ifndef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE INT_TYPE_SIZE
+#endif
+
+#ifndef MAX_CHAR_TYPE_SIZE
+#define MAX_CHAR_TYPE_SIZE CHAR_TYPE_SIZE
+#endif
+
+#ifndef MAX_INT_TYPE_SIZE
+#define MAX_INT_TYPE_SIZE INT_TYPE_SIZE
+#endif
+
+#ifndef MAX_LONG_TYPE_SIZE
+#define MAX_LONG_TYPE_SIZE LONG_TYPE_SIZE
+#endif
+
+#ifndef MAX_WCHAR_TYPE_SIZE
+#define MAX_WCHAR_TYPE_SIZE WCHAR_TYPE_SIZE
+#endif
+
+/* Yield nonzero if adding two numbers with A's and B's signs can yield a
+ number with SUM's sign, where A, B, and SUM are all C integers. */
+#define possible_sum_sign(a, b, sum) ((((a) ^ (b)) | ~ ((a) ^ (sum))) < 0)
+
+static void integer_overflow ();
+static long left_shift ();
+static long right_shift ();
+
+#line 127 "cexp.y"
+typedef union {
+ struct constant {long value; int unsignedp;} integer;
+ struct name {U_CHAR *address; int length;} name;
+ struct arglist *keywords;
+ int voidval;
+ char *sval;
+} YYSTYPE;
+
+#ifndef YYLTYPE
+typedef
+ struct yyltype
+ {
+ int timestamp;
+ int first_line;
+ int first_column;
+ int last_line;
+ int last_column;
+ char *text;
+ }
+ yyltype;
+
+#define YYLTYPE yyltype
+#endif
+
+#include <stdio.h>
+
+#ifndef __cplusplus
+#ifndef __STDC__
+#define const
+#endif
+#endif
+
+
+
+#define YYFINAL 73
+#define YYFLAG -32768
+#define YYNTBASE 34
+
+#define YYTRANSLATE(x) ((unsigned)(x) <= 270 ? yytranslate[x] : 39)
+
+static const char yytranslate[] = { 0,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 29, 2, 31, 2, 27, 14, 2, 32,
+ 33, 25, 23, 9, 24, 2, 26, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 8, 2, 17,
+ 2, 18, 7, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 13, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 12, 2, 30, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 1, 2, 3, 4, 5,
+ 6, 10, 11, 15, 16, 19, 20, 21, 22, 28
+};
+
+#if YYDEBUG != 0
+static const short yyprhs[] = { 0,
+ 0, 2, 4, 8, 11, 14, 17, 20, 23, 24,
+ 31, 35, 39, 43, 47, 51, 55, 59, 63, 67,
+ 71, 75, 79, 83, 87, 91, 95, 99, 103, 107,
+ 113, 115, 117, 119, 120, 125
+};
+
+static const short yyrhs[] = { 35,
+ 0, 36, 0, 35, 9, 36, 0, 24, 36, 0,
+ 29, 36, 0, 23, 36, 0, 30, 36, 0, 31,
+ 5, 0, 0, 31, 5, 37, 32, 38, 33, 0,
+ 32, 35, 33, 0, 36, 25, 36, 0, 36, 26,
+ 36, 0, 36, 27, 36, 0, 36, 23, 36, 0,
+ 36, 24, 36, 0, 36, 21, 36, 0, 36, 22,
+ 36, 0, 36, 15, 36, 0, 36, 16, 36, 0,
+ 36, 19, 36, 0, 36, 20, 36, 0, 36, 17,
+ 36, 0, 36, 18, 36, 0, 36, 14, 36, 0,
+ 36, 13, 36, 0, 36, 12, 36, 0, 36, 11,
+ 36, 0, 36, 10, 36, 0, 36, 7, 36, 8,
+ 36, 0, 3, 0, 4, 0, 5, 0, 0, 32,
+ 38, 33, 38, 0, 5, 38, 0
+};
+
+#endif
+
+#if YYDEBUG != 0
+static const short yyrline[] = { 0,
+ 159, 164, 165, 172, 177, 180, 182, 185, 189, 191,
+ 196, 201, 213, 228, 239, 246, 253, 259, 265, 268,
+ 271, 277, 283, 289, 295, 298, 301, 304, 307, 310,
+ 313, 315, 317, 322, 324, 337
+};
+
+static const char * const yytname[] = { "$","error","$illegal.","INT","CHAR",
+"NAME","ERROR","'?'","':'","','","OR","AND","'|'","'^'","'&'","EQUAL","NOTEQUAL",
+"'<'","'>'","LEQ","GEQ","LSH","RSH","'+'","'-'","'*'","'/'","'%'","UNARY","'!'",
+"'~'","'#'","'('","')'","start","exp1","exp","@1","keywords",""
+};
+#endif
+
+static const short yyr1[] = { 0,
+ 34, 35, 35, 36, 36, 36, 36, 36, 37, 36,
+ 36, 36, 36, 36, 36, 36, 36, 36, 36, 36,
+ 36, 36, 36, 36, 36, 36, 36, 36, 36, 36,
+ 36, 36, 36, 38, 38, 38
+};
+
+static const short yyr2[] = { 0,
+ 1, 1, 3, 2, 2, 2, 2, 2, 0, 6,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 5,
+ 1, 1, 1, 0, 4, 2
+};
+
+static const short yydefact[] = { 0,
+ 31, 32, 33, 0, 0, 0, 0, 0, 0, 1,
+ 2, 6, 4, 5, 7, 8, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 11, 3,
+ 0, 29, 28, 27, 26, 25, 19, 20, 23, 24,
+ 21, 22, 17, 18, 15, 16, 12, 13, 14, 34,
+ 0, 34, 34, 0, 30, 36, 0, 10, 34, 35,
+ 0, 0, 0
+};
+
+static const short yydefgoto[] = { 71,
+ 10, 11, 38, 64
+};
+
+static const short yypact[] = { 31,
+-32768,-32768,-32768, 31, 31, 31, 31, 4, 31, 3,
+ 80,-32768,-32768,-32768,-32768, 6, 32, 31, 31, 31,
+ 31, 31, 31, 31, 31, 31, 31, 31, 31, 31,
+ 31, 31, 31, 31, 31, 31, 31, 7,-32768, 80,
+ 59, 97, 113, 128, 142, 155, 25, 25, 162, 162,
+ 162, 162, 167, 167, -19, -19,-32768,-32768,-32768, 5,
+ 31, 5, 5, -20, 80,-32768, 20,-32768, 5,-32768,
+ 40, 56,-32768
+};
+
+static const short yypgoto[] = {-32768,
+ 49, -4,-32768, -58
+};
+
+
+#define YYLAST 194
+
+
+static const short yytable[] = { 12,
+ 13, 14, 15, 66, 67, 35, 36, 37, 16, 62,
+ 70, 18, 68, 40, 41, 42, 43, 44, 45, 46,
+ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
+ 57, 58, 59, 1, 2, 3, 63, -9, 60, 72,
+ 18, 27, 28, 29, 30, 31, 32, 33, 34, 35,
+ 36, 37, 69, 4, 5, 73, 65, 17, 0, 6,
+ 7, 8, 9, 0, 39, 19, 61, 0, 20, 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 19, 0, 0, 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+ 31, 32, 33, 34, 35, 36, 37, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
+ 34, 35, 36, 37, 22, 23, 24, 25, 26, 27,
+ 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
+ 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
+ 33, 34, 35, 36, 37, 24, 25, 26, 27, 28,
+ 29, 30, 31, 32, 33, 34, 35, 36, 37, 25,
+ 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
+ 36, 37, 31, 32, 33, 34, 35, 36, 37, 33,
+ 34, 35, 36, 37
+};
+
+static const short yycheck[] = { 4,
+ 5, 6, 7, 62, 63, 25, 26, 27, 5, 5,
+ 69, 9, 33, 18, 19, 20, 21, 22, 23, 24,
+ 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+ 35, 36, 37, 3, 4, 5, 32, 32, 32, 0,
+ 9, 17, 18, 19, 20, 21, 22, 23, 24, 25,
+ 26, 27, 33, 23, 24, 0, 61, 9, -1, 29,
+ 30, 31, 32, -1, 33, 7, 8, -1, 10, 11,
+ 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+ 22, 23, 24, 25, 26, 27, 7, -1, -1, 10,
+ 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 21, 22, 23, 24, 25, 26, 27, 11, 12, 13,
+ 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
+ 24, 25, 26, 27, 12, 13, 14, 15, 16, 17,
+ 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
+ 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
+ 23, 24, 25, 26, 27, 14, 15, 16, 17, 18,
+ 19, 20, 21, 22, 23, 24, 25, 26, 27, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
+ 26, 27, 21, 22, 23, 24, 25, 26, 27, 23,
+ 24, 25, 26, 27
+};
+/* -*-C-*- Note some compilers choke on comments on `#line' lines. */
+#line 3 "/usr/local/lib/bison.simple"
+
+/* Skeleton output parser for bison,
+ Copyright (C) 1984, 1989, 1990 Bob Corbett and Richard Stallman
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 1, or (at your option)
+ any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+#ifndef alloca
+#ifdef __GNUC__
+#define alloca __builtin_alloca
+#else /* not GNU C. */
+#if (!defined (__STDC__) && defined (sparc)) || defined (__sparc__) || defined (__sparc) || defined (__sgi)
+#include <alloca.h>
+#else /* not sparc */
+#if defined (MSDOS) && !defined (__TURBOC__)
+#include <malloc.h>
+#else /* not MSDOS, or __TURBOC__ */
+#if defined(_AIX)
+#include <malloc.h>
+ #pragma alloca
+#else /* not MSDOS, __TURBOC__, or _AIX */
+#ifdef __hpux
+#ifdef __cplusplus
+extern "C" {
+void *alloca (unsigned int);
+};
+#else /* not __cplusplus */
+void *alloca ();
+#endif /* not __cplusplus */
+#endif /* __hpux */
+#endif /* not _AIX */
+#endif /* not MSDOS, or __TURBOC__ */
+#endif /* not sparc. */
+#endif /* not GNU C. */
+#endif /* alloca not defined. */
+
+/* This is the parser code that is written into each bison parser
+ when the %semantic_parser declaration is not specified in the grammar.
+ It was written by Richard Stallman by simplifying the hairy parser
+ used when %semantic_parser is specified. */
+
+/* Note: there must be only one dollar sign in this file.
+ It is replaced by the list of actions, each action
+ as one case of the switch. */
+
+#define yyerrok (yyerrstatus = 0)
+#define yyclearin (yychar = YYEMPTY)
+#define YYEMPTY -2
+#define YYEOF 0
+#define YYACCEPT return(0)
+#define YYABORT return(1)
+#define YYERROR goto yyerrlab1
+/* Like YYERROR except do call yyerror.
+ This remains here temporarily to ease the
+ transition to the new meaning of YYERROR, for GCC.
+ Once GCC version 2 has supplanted version 1, this can go. */
+#define YYFAIL goto yyerrlab
+#define YYRECOVERING() (!!yyerrstatus)
+#define YYBACKUP(token, value) \
+do \
+ if (yychar == YYEMPTY && yylen == 1) \
+ { yychar = (token), yylval = (value); \
+ yychar1 = YYTRANSLATE (yychar); \
+ YYPOPSTACK; \
+ goto yybackup; \
+ } \
+ else \
+ { yyerror ("syntax error: cannot back up"); YYERROR; } \
+while (0)
+
+#define YYTERROR 1
+#define YYERRCODE 256
+
+#ifndef YYPURE
+#define YYLEX yylex()
+#endif
+
+#ifdef YYPURE
+#ifdef YYLSP_NEEDED
+#define YYLEX yylex(&yylval, &yylloc)
+#else
+#define YYLEX yylex(&yylval)
+#endif
+#endif
+
+/* If nonreentrant, generate the variables here */
+
+#ifndef YYPURE
+
+int yychar; /* the lookahead symbol */
+YYSTYPE yylval; /* the semantic value of the */
+ /* lookahead symbol */
+
+#ifdef YYLSP_NEEDED
+YYLTYPE yylloc; /* location data for the lookahead */
+ /* symbol */
+#endif
+
+int yynerrs; /* number of parse errors so far */
+#endif /* not YYPURE */
+
+#if YYDEBUG != 0
+int yydebug; /* nonzero means print parse trace */
+/* Since this is uninitialized, it does not stop multiple parsers
+ from coexisting. */
+#endif
+
+/* YYINITDEPTH indicates the initial size of the parser's stacks */
+
+#ifndef YYINITDEPTH
+#define YYINITDEPTH 200
+#endif
+
+/* YYMAXDEPTH is the maximum size the stacks can grow to
+ (effective only if the built-in stack extension method is used). */
+
+#if YYMAXDEPTH == 0
+#undef YYMAXDEPTH
+#endif
+
+#ifndef YYMAXDEPTH
+#define YYMAXDEPTH 10000
+#endif
+
+/* Prevent warning if -Wstrict-prototypes. */
+#ifdef __GNUC__
+int yyparse (void);
+#endif
+
+#if __GNUC__ > 1 /* GNU C and GNU C++ define this. */
+#define __yy_bcopy(FROM,TO,COUNT) __builtin_memcpy(TO,FROM,COUNT)
+#else /* not GNU C or C++ */
+#ifndef __cplusplus
+
+/* This is the most reliable way to avoid incompatibilities
+ in available built-in functions on various systems. */
+static void
+__yy_bcopy (from, to, count)
+ char *from;
+ char *to;
+ int count;
+{
+ register char *f = from;
+ register char *t = to;
+ register int i = count;
+
+ while (i-- > 0)
+ *t++ = *f++;
+}
+
+#else /* __cplusplus */
+
+/* This is the most reliable way to avoid incompatibilities
+ in available built-in functions on various systems. */
+static void
+__yy_bcopy (char *from, char *to, int count)
+{
+ register char *f = from;
+ register char *t = to;
+ register int i = count;
+
+ while (i-- > 0)
+ *t++ = *f++;
+}
+
+#endif
+#endif
+
+#line 184 "/usr/local/lib/bison.simple"
+
+/* The user can define YYPARSE_PARAM as the name of an argument to be passed
+ into yyparse. The argument should have type void *.
+ It should actually point to an object.
+ Grammar actions can access the variable by casting it
+ to the proper pointer type. */
+
+#ifdef YYPARSE_PARAM
+#define YYPARSE_PARAM_DECL void *YYPARSE_PARAM;
+#else
+#define YYPARSE_PARAM
+#define YYPARSE_PARAM_DECL
+#endif
+
+int
+yyparse(YYPARSE_PARAM)
+ YYPARSE_PARAM_DECL
+{
+ register int yystate;
+ register int yyn;
+ register short *yyssp;
+ register YYSTYPE *yyvsp;
+ int yyerrstatus; /* number of tokens to shift before error messages enabled */
+ int yychar1 = 0; /* lookahead token as an internal (translated) token number */
+
+ short yyssa[YYINITDEPTH]; /* the state stack */
+ YYSTYPE yyvsa[YYINITDEPTH]; /* the semantic value stack */
+
+ short *yyss = yyssa; /* refer to the stacks thru separate pointers */
+ YYSTYPE *yyvs = yyvsa; /* to allow yyoverflow to reallocate them elsewhere */
+
+#ifdef YYLSP_NEEDED
+ YYLTYPE yylsa[YYINITDEPTH]; /* the location stack */
+ YYLTYPE *yyls = yylsa;
+ YYLTYPE *yylsp;
+
+#define YYPOPSTACK (yyvsp--, yyssp--, yylsp--)
+#else
+#define YYPOPSTACK (yyvsp--, yyssp--)
+#endif
+
+ int yystacksize = YYINITDEPTH;
+
+#ifdef YYPURE
+ int yychar;
+ YYSTYPE yylval;
+ int yynerrs;
+#ifdef YYLSP_NEEDED
+ YYLTYPE yylloc;
+#endif
+#endif
+
+ YYSTYPE yyval; /* the variable used to return */
+ /* semantic values from the action */
+ /* routines */
+
+ int yylen;
+
+#if YYDEBUG != 0
+ if (yydebug)
+ fprintf(stderr, "Starting parse\n");
+#endif
+
+ yystate = 0;
+ yyerrstatus = 0;
+ yynerrs = 0;
+ yychar = YYEMPTY; /* Cause a token to be read. */
+
+ /* Initialize stack pointers.
+ Waste one element of value and location stack
+ so that they stay on the same level as the state stack.
+ The wasted elements are never initialized. */
+
+ yyssp = yyss - 1;
+ yyvsp = yyvs;
+#ifdef YYLSP_NEEDED
+ yylsp = yyls;
+#endif
+
+/* Push a new state, which is found in yystate . */
+/* In all cases, when you get here, the value and location stacks
+ have just been pushed. so pushing a state here evens the stacks. */
+yynewstate:
+
+ *++yyssp = yystate;
+
+ if (yyssp >= yyss + yystacksize - 1)
+ {
+ /* Give user a chance to reallocate the stack */
+ /* Use copies of these so that the &'s don't force the real ones into memory. */
+ YYSTYPE *yyvs1 = yyvs;
+ short *yyss1 = yyss;
+#ifdef YYLSP_NEEDED
+ YYLTYPE *yyls1 = yyls;
+#endif
+
+ /* Get the current used size of the three stacks, in elements. */
+ int size = yyssp - yyss + 1;
+
+#ifdef yyoverflow
+ /* Each stack pointer address is followed by the size of
+ the data in use in that stack, in bytes. */
+#ifdef YYLSP_NEEDED
+ /* This used to be a conditional around just the two extra args,
+ but that might be undefined if yyoverflow is a macro. */
+ yyoverflow("parser stack overflow",
+ &yyss1, size * sizeof (*yyssp),
+ &yyvs1, size * sizeof (*yyvsp),
+ &yyls1, size * sizeof (*yylsp),
+ &yystacksize);
+#else
+ yyoverflow("parser stack overflow",
+ &yyss1, size * sizeof (*yyssp),
+ &yyvs1, size * sizeof (*yyvsp),
+ &yystacksize);
+#endif
+
+ yyss = yyss1; yyvs = yyvs1;
+#ifdef YYLSP_NEEDED
+ yyls = yyls1;
+#endif
+#else /* no yyoverflow */
+ /* Extend the stack our own way. */
+ if (yystacksize >= YYMAXDEPTH)
+ {
+ yyerror("parser stack overflow");
+ return 2;
+ }
+ yystacksize *= 2;
+ if (yystacksize > YYMAXDEPTH)
+ yystacksize = YYMAXDEPTH;
+ yyss = (short *) alloca (yystacksize * sizeof (*yyssp));
+ __yy_bcopy ((char *)yyss1, (char *)yyss, size * sizeof (*yyssp));
+ yyvs = (YYSTYPE *) alloca (yystacksize * sizeof (*yyvsp));
+ __yy_bcopy ((char *)yyvs1, (char *)yyvs, size * sizeof (*yyvsp));
+#ifdef YYLSP_NEEDED
+ yyls = (YYLTYPE *) alloca (yystacksize * sizeof (*yylsp));
+ __yy_bcopy ((char *)yyls1, (char *)yyls, size * sizeof (*yylsp));
+#endif
+#endif /* no yyoverflow */
+
+ yyssp = yyss + size - 1;
+ yyvsp = yyvs + size - 1;
+#ifdef YYLSP_NEEDED
+ yylsp = yyls + size - 1;
+#endif
+
+#if YYDEBUG != 0
+ if (yydebug)
+ fprintf(stderr, "Stack size increased to %d\n", yystacksize);
+#endif
+
+ if (yyssp >= yyss + yystacksize - 1)
+ YYABORT;
+ }
+
+#if YYDEBUG != 0
+ if (yydebug)
+ fprintf(stderr, "Entering state %d\n", yystate);
+#endif
+
+ goto yybackup;
+ yybackup:
+
+/* Do appropriate processing given the current state. */
+/* Read a lookahead token if we need one and don't already have one. */
+/* yyresume: */
+
+ /* First try to decide what to do without reference to lookahead token. */
+
+ yyn = yypact[yystate];
+ if (yyn == YYFLAG)
+ goto yydefault;
+
+ /* Not known => get a lookahead token if don't already have one. */
+
+ /* yychar is either YYEMPTY or YYEOF
+ or a valid token in external form. */
+
+ if (yychar == YYEMPTY)
+ {
+#if YYDEBUG != 0
+ if (yydebug)
+ fprintf(stderr, "Reading a token: ");
+#endif
+ yychar = YYLEX;
+ }
+
+ /* Convert token to internal form (in yychar1) for indexing tables with */
+
+ if (yychar <= 0) /* This means end of input. */
+ {
+ yychar1 = 0;
+ yychar = YYEOF; /* Don't call YYLEX any more */
+
+#if YYDEBUG != 0
+ if (yydebug)
+ fprintf(stderr, "Now at end of input.\n");
+#endif
+ }
+ else
+ {
+ yychar1 = YYTRANSLATE(yychar);
+
+#if YYDEBUG != 0
+ if (yydebug)
+ {
+ fprintf (stderr, "Next token is %d (%s", yychar, yytname[yychar1]);
+ /* Give the individual parser a way to print the precise meaning
+ of a token, for further debugging info. */
+#ifdef YYPRINT
+ YYPRINT (stderr, yychar, yylval);
+#endif
+ fprintf (stderr, ")\n");
+ }
+#endif
+ }
+
+ yyn += yychar1;
+ if (yyn < 0 || yyn > YYLAST || yycheck[yyn] != yychar1)
+ goto yydefault;
+
+ yyn = yytable[yyn];
+
+ /* yyn is what to do for this token type in this state.
+ Negative => reduce, -yyn is rule number.
+ Positive => shift, yyn is new state.
+ New state is final state => don't bother to shift,
+ just return success.
+ 0, or most negative number => error. */
+
+ if (yyn < 0)
+ {
+ if (yyn == YYFLAG)
+ goto yyerrlab;
+ yyn = -yyn;
+ goto yyreduce;
+ }
+ else if (yyn == 0)
+ goto yyerrlab;
+
+ if (yyn == YYFINAL)
+ YYACCEPT;
+
+ /* Shift the lookahead token. */
+
+#if YYDEBUG != 0
+ if (yydebug)
+ fprintf(stderr, "Shifting token %d (%s), ", yychar, yytname[yychar1]);
+#endif
+
+ /* Discard the token being shifted unless it is eof. */
+ if (yychar != YYEOF)
+ yychar = YYEMPTY;
+
+ *++yyvsp = yylval;
+#ifdef YYLSP_NEEDED
+ *++yylsp = yylloc;
+#endif
+
+ /* count tokens shifted since error; after three, turn off error status. */
+ if (yyerrstatus) yyerrstatus--;
+
+ yystate = yyn;
+ goto yynewstate;
+
+/* Do the default action for the current state. */
+yydefault:
+
+ yyn = yydefact[yystate];
+ if (yyn == 0)
+ goto yyerrlab;
+
+/* Do a reduction. yyn is the number of a rule to reduce with. */
+yyreduce:
+ yylen = yyr2[yyn];
+ if (yylen > 0)
+ yyval = yyvsp[1-yylen]; /* implement default value of the action */
+
+#if YYDEBUG != 0
+ if (yydebug)
+ {
+ int i;
+
+ fprintf (stderr, "Reducing via rule %d (line %d), ",
+ yyn, yyrline[yyn]);
+
+ /* Print the symbols being reduced, and their result. */
+ for (i = yyprhs[yyn]; yyrhs[i] > 0; i++)
+ fprintf (stderr, "%s ", yytname[yyrhs[i]]);
+ fprintf (stderr, " -> %s\n", yytname[yyr1[yyn]]);
+ }
+#endif
+
+
+ switch (yyn) {
+
+case 1:
+#line 160 "cexp.y"
+{ expression_value = yyvsp[0].integer.value; ;
+ break;}
+case 3:
+#line 166 "cexp.y"
+{ if (pedantic)
+ pedwarn ("comma operator in operand of `#if'");
+ yyval.integer = yyvsp[0].integer; ;
+ break;}
+case 4:
+#line 173 "cexp.y"
+{ yyval.integer.value = - yyvsp[0].integer.value;
+ if ((yyval.integer.value & yyvsp[0].integer.value) < 0 && ! yyvsp[0].integer.unsignedp)
+ integer_overflow ();
+ yyval.integer.unsignedp = yyvsp[0].integer.unsignedp; ;
+ break;}
+case 5:
+#line 178 "cexp.y"
+{ yyval.integer.value = ! yyvsp[0].integer.value;
+ yyval.integer.unsignedp = 0; ;
+ break;}
+case 6:
+#line 181 "cexp.y"
+{ yyval.integer = yyvsp[0].integer; ;
+ break;}
+case 7:
+#line 183 "cexp.y"
+{ yyval.integer.value = ~ yyvsp[0].integer.value;
+ yyval.integer.unsignedp = yyvsp[0].integer.unsignedp; ;
+ break;}
+case 8:
+#line 186 "cexp.y"
+{ yyval.integer.value = check_assertion (yyvsp[0].name.address, yyvsp[0].name.length,
+ 0, NULL_PTR);
+ yyval.integer.unsignedp = 0; ;
+ break;}
+case 9:
+#line 190 "cexp.y"
+{ keyword_parsing = 1; ;
+ break;}
+case 10:
+#line 192 "cexp.y"
+{ yyval.integer.value = check_assertion (yyvsp[-4].name.address, yyvsp[-4].name.length,
+ 1, yyvsp[-1].keywords);
+ keyword_parsing = 0;
+ yyval.integer.unsignedp = 0; ;
+ break;}
+case 11:
+#line 197 "cexp.y"
+{ yyval.integer = yyvsp[-1].integer; ;
+ break;}
+case 12:
+#line 202 "cexp.y"
+{ yyval.integer.unsignedp = yyvsp[-2].integer.unsignedp || yyvsp[0].integer.unsignedp;
+ if (yyval.integer.unsignedp)
+ yyval.integer.value = (unsigned long) yyvsp[-2].integer.value * yyvsp[0].integer.value;
+ else
+ {
+ yyval.integer.value = yyvsp[-2].integer.value * yyvsp[0].integer.value;
+ if (yyvsp[-2].integer.value
+ && (yyval.integer.value / yyvsp[-2].integer.value != yyvsp[0].integer.value
+ || (yyval.integer.value & yyvsp[-2].integer.value & yyvsp[0].integer.value) < 0))
+ integer_overflow ();
+ } ;
+ break;}
+case 13:
+#line 214 "cexp.y"
+{ if (yyvsp[0].integer.value == 0)
+ {
+ error ("division by zero in #if");
+ yyvsp[0].integer.value = 1;
+ }
+ yyval.integer.unsignedp = yyvsp[-2].integer.unsignedp || yyvsp[0].integer.unsignedp;
+ if (yyval.integer.unsignedp)
+ yyval.integer.value = (unsigned long) yyvsp[-2].integer.value / yyvsp[0].integer.value;
+ else
+ {
+ yyval.integer.value = yyvsp[-2].integer.value / yyvsp[0].integer.value;
+ if ((yyval.integer.value & yyvsp[-2].integer.value & yyvsp[0].integer.value) < 0)
+ integer_overflow ();
+ } ;
+ break;}
+case 14:
+#line 229 "cexp.y"
+{ if (yyvsp[0].integer.value == 0)
+ {
+ error ("division by zero in #if");
+ yyvsp[0].integer.value = 1;
+ }
+ yyval.integer.unsignedp = yyvsp[-2].integer.unsignedp || yyvsp[0].integer.unsignedp;
+ if (yyval.integer.unsignedp)
+ yyval.integer.value = (unsigned long) yyvsp[-2].integer.value % yyvsp[0].integer.value;
+ else
+ yyval.integer.value = yyvsp[-2].integer.value % yyvsp[0].integer.value; ;
+ break;}
+case 15:
+#line 240 "cexp.y"
+{ yyval.integer.value = yyvsp[-2].integer.value + yyvsp[0].integer.value;
+ yyval.integer.unsignedp = yyvsp[-2].integer.unsignedp || yyvsp[0].integer.unsignedp;
+ if (! yyval.integer.unsignedp
+ && ! possible_sum_sign (yyvsp[-2].integer.value, yyvsp[0].integer.value,
+ yyval.integer.value))
+ integer_overflow (); ;
+ break;}
+case 16:
+#line 247 "cexp.y"
+{ yyval.integer.value = yyvsp[-2].integer.value - yyvsp[0].integer.value;
+ yyval.integer.unsignedp = yyvsp[-2].integer.unsignedp || yyvsp[0].integer.unsignedp;
+ if (! yyval.integer.unsignedp
+ && ! possible_sum_sign (yyval.integer.value, yyvsp[0].integer.value,
+ yyvsp[-2].integer.value))
+ integer_overflow (); ;
+ break;}
+case 17:
+#line 254 "cexp.y"
+{ yyval.integer.unsignedp = yyvsp[-2].integer.unsignedp;
+ if (yyvsp[0].integer.value < 0 && ! yyvsp[0].integer.unsignedp)
+ yyval.integer.value = right_shift (&yyvsp[-2].integer, -yyvsp[0].integer.value);
+ else
+ yyval.integer.value = left_shift (&yyvsp[-2].integer, yyvsp[0].integer.value); ;
+ break;}
+case 18:
+#line 260 "cexp.y"
+{ yyval.integer.unsignedp = yyvsp[-2].integer.unsignedp;
+ if (yyvsp[0].integer.value < 0 && ! yyvsp[0].integer.unsignedp)
+ yyval.integer.value = left_shift (&yyvsp[-2].integer, -yyvsp[0].integer.value);
+ else
+ yyval.integer.value = right_shift (&yyvsp[-2].integer, yyvsp[0].integer.value); ;
+ break;}
+case 19:
+#line 266 "cexp.y"
+{ yyval.integer.value = (yyvsp[-2].integer.value == yyvsp[0].integer.value);
+ yyval.integer.unsignedp = 0; ;
+ break;}
+case 20:
+#line 269 "cexp.y"
+{ yyval.integer.value = (yyvsp[-2].integer.value != yyvsp[0].integer.value);
+ yyval.integer.unsignedp = 0; ;
+ break;}
+case 21:
+#line 272 "cexp.y"
+{ yyval.integer.unsignedp = 0;
+ if (yyvsp[-2].integer.unsignedp || yyvsp[0].integer.unsignedp)
+ yyval.integer.value = (unsigned long) yyvsp[-2].integer.value <= yyvsp[0].integer.value;
+ else
+ yyval.integer.value = yyvsp[-2].integer.value <= yyvsp[0].integer.value; ;
+ break;}
+case 22:
+#line 278 "cexp.y"
+{ yyval.integer.unsignedp = 0;
+ if (yyvsp[-2].integer.unsignedp || yyvsp[0].integer.unsignedp)
+ yyval.integer.value = (unsigned long) yyvsp[-2].integer.value >= yyvsp[0].integer.value;
+ else
+ yyval.integer.value = yyvsp[-2].integer.value >= yyvsp[0].integer.value; ;
+ break;}
+case 23:
+#line 284 "cexp.y"
+{ yyval.integer.unsignedp = 0;
+ if (yyvsp[-2].integer.unsignedp || yyvsp[0].integer.unsignedp)
+ yyval.integer.value = (unsigned long) yyvsp[-2].integer.value < yyvsp[0].integer.value;
+ else
+ yyval.integer.value = yyvsp[-2].integer.value < yyvsp[0].integer.value; ;
+ break;}
+case 24:
+#line 290 "cexp.y"
+{ yyval.integer.unsignedp = 0;
+ if (yyvsp[-2].integer.unsignedp || yyvsp[0].integer.unsignedp)
+ yyval.integer.value = (unsigned long) yyvsp[-2].integer.value > yyvsp[0].integer.value;
+ else
+ yyval.integer.value = yyvsp[-2].integer.value > yyvsp[0].integer.value; ;
+ break;}
+case 25:
+#line 296 "cexp.y"
+{ yyval.integer.value = yyvsp[-2].integer.value & yyvsp[0].integer.value;
+ yyval.integer.unsignedp = yyvsp[-2].integer.unsignedp || yyvsp[0].integer.unsignedp; ;
+ break;}
+case 26:
+#line 299 "cexp.y"
+{ yyval.integer.value = yyvsp[-2].integer.value ^ yyvsp[0].integer.value;
+ yyval.integer.unsignedp = yyvsp[-2].integer.unsignedp || yyvsp[0].integer.unsignedp; ;
+ break;}
+case 27:
+#line 302 "cexp.y"
+{ yyval.integer.value = yyvsp[-2].integer.value | yyvsp[0].integer.value;
+ yyval.integer.unsignedp = yyvsp[-2].integer.unsignedp || yyvsp[0].integer.unsignedp; ;
+ break;}
+case 28:
+#line 305 "cexp.y"
+{ yyval.integer.value = (yyvsp[-2].integer.value && yyvsp[0].integer.value);
+ yyval.integer.unsignedp = 0; ;
+ break;}
+case 29:
+#line 308 "cexp.y"
+{ yyval.integer.value = (yyvsp[-2].integer.value || yyvsp[0].integer.value);
+ yyval.integer.unsignedp = 0; ;
+ break;}
+case 30:
+#line 311 "cexp.y"
+{ yyval.integer.value = yyvsp[-4].integer.value ? yyvsp[-2].integer.value : yyvsp[0].integer.value;
+ yyval.integer.unsignedp = yyvsp[-2].integer.unsignedp || yyvsp[0].integer.unsignedp; ;
+ break;}
+case 31:
+#line 314 "cexp.y"
+{ yyval.integer = yylval.integer; ;
+ break;}
+case 32:
+#line 316 "cexp.y"
+{ yyval.integer = yylval.integer; ;
+ break;}
+case 33:
+#line 318 "cexp.y"
+{ yyval.integer.value = 0;
+ yyval.integer.unsignedp = 0; ;
+ break;}
+case 34:
+#line 323 "cexp.y"
+{ yyval.keywords = 0; ;
+ break;}
+case 35:
+#line 325 "cexp.y"
+{ struct arglist *temp;
+ yyval.keywords = (struct arglist *) xmalloc (sizeof (struct arglist));
+ yyval.keywords->next = yyvsp[-2].keywords;
+ yyval.keywords->name = (U_CHAR *) "(";
+ yyval.keywords->length = 1;
+ temp = yyval.keywords;
+ while (temp != 0 && temp->next != 0)
+ temp = temp->next;
+ temp->next = (struct arglist *) xmalloc (sizeof (struct arglist));
+ temp->next->next = yyvsp[0].keywords;
+ temp->next->name = (U_CHAR *) ")";
+ temp->next->length = 1; ;
+ break;}
+case 36:
+#line 338 "cexp.y"
+{ yyval.keywords = (struct arglist *) xmalloc (sizeof (struct arglist));
+ yyval.keywords->name = yyvsp[-1].name.address;
+ yyval.keywords->length = yyvsp[-1].name.length;
+ yyval.keywords->next = yyvsp[0].keywords; ;
+ break;}
+}
+ /* the action file gets copied in in place of this dollarsign */
+#line 480 "/usr/local/lib/bison.simple"
+
+ yyvsp -= yylen;
+ yyssp -= yylen;
+#ifdef YYLSP_NEEDED
+ yylsp -= yylen;
+#endif
+
+#if YYDEBUG != 0
+ if (yydebug)
+ {
+ short *ssp1 = yyss - 1;
+ fprintf (stderr, "state stack now");
+ while (ssp1 != yyssp)
+ fprintf (stderr, " %d", *++ssp1);
+ fprintf (stderr, "\n");
+ }
+#endif
+
+ *++yyvsp = yyval;
+
+#ifdef YYLSP_NEEDED
+ yylsp++;
+ if (yylen == 0)
+ {
+ yylsp->first_line = yylloc.first_line;
+ yylsp->first_column = yylloc.first_column;
+ yylsp->last_line = (yylsp-1)->last_line;
+ yylsp->last_column = (yylsp-1)->last_column;
+ yylsp->text = 0;
+ }
+ else
+ {
+ yylsp->last_line = (yylsp+yylen-1)->last_line;
+ yylsp->last_column = (yylsp+yylen-1)->last_column;
+ }
+#endif
+
+ /* Now "shift" the result of the reduction.
+ Determine what state that goes to,
+ based on the state we popped back to
+ and the rule number reduced by. */
+
+ yyn = yyr1[yyn];
+
+ yystate = yypgoto[yyn - YYNTBASE] + *yyssp;
+ if (yystate >= 0 && yystate <= YYLAST && yycheck[yystate] == *yyssp)
+ yystate = yytable[yystate];
+ else
+ yystate = yydefgoto[yyn - YYNTBASE];
+
+ goto yynewstate;
+
+yyerrlab: /* here on detecting error */
+
+ if (! yyerrstatus)
+ /* If not already recovering from an error, report this error. */
+ {
+ ++yynerrs;
+
+#ifdef YYERROR_VERBOSE
+ yyn = yypact[yystate];
+
+ if (yyn > YYFLAG && yyn < YYLAST)
+ {
+ int size = 0;
+ char *msg;
+ int x, count;
+
+ count = 0;
+ /* Start X at -yyn if nec to avoid negative indexes in yycheck. */
+ for (x = (yyn < 0 ? -yyn : 0);
+ x < (sizeof(yytname) / sizeof(char *)); x++)
+ if (yycheck[x + yyn] == x)
+ size += strlen(yytname[x]) + 15, count++;
+ msg = (char *) malloc(size + 15);
+ if (msg != 0)
+ {
+ strcpy(msg, "parse error");
+
+ if (count < 5)
+ {
+ count = 0;
+ for (x = (yyn < 0 ? -yyn : 0);
+ x < (sizeof(yytname) / sizeof(char *)); x++)
+ if (yycheck[x + yyn] == x)
+ {
+ strcat(msg, count == 0 ? ", expecting `" : " or `");
+ strcat(msg, yytname[x]);
+ strcat(msg, "'");
+ count++;
+ }
+ }
+ yyerror(msg);
+ free(msg);
+ }
+ else
+ yyerror ("parse error; also virtual memory exceeded");
+ }
+ else
+#endif /* YYERROR_VERBOSE */
+ yyerror("parse error");
+ }
+
+ goto yyerrlab1;
+yyerrlab1: /* here on error raised explicitly by an action */
+
+ if (yyerrstatus == 3)
+ {
+ /* if just tried and failed to reuse lookahead token after an error, discard it. */
+
+ /* return failure if at end of input */
+ if (yychar == YYEOF)
+ YYABORT;
+
+#if YYDEBUG != 0
+ if (yydebug)
+ fprintf(stderr, "Discarding token %d (%s).\n", yychar, yytname[yychar1]);
+#endif
+
+ yychar = YYEMPTY;
+ }
+
+ /* Else will try to reuse lookahead token
+ after shifting the error token. */
+
+ yyerrstatus = 3; /* Each real token shifted decrements this */
+
+ goto yyerrhandle;
+
+yyerrdefault: /* current state does not do anything special for the error token. */
+
+#if 0
+ /* This is wrong; only states that explicitly want error tokens
+ should shift them. */
+ yyn = yydefact[yystate]; /* If its default is to accept any token, ok. Otherwise pop it.*/
+ if (yyn) goto yydefault;
+#endif
+
+yyerrpop: /* pop the current state because it cannot handle the error token */
+
+ if (yyssp == yyss) YYABORT;
+ yyvsp--;
+ yystate = *--yyssp;
+#ifdef YYLSP_NEEDED
+ yylsp--;
+#endif
+
+#if YYDEBUG != 0
+ if (yydebug)
+ {
+ short *ssp1 = yyss - 1;
+ fprintf (stderr, "Error: state stack now");
+ while (ssp1 != yyssp)
+ fprintf (stderr, " %d", *++ssp1);
+ fprintf (stderr, "\n");
+ }
+#endif
+
+yyerrhandle:
+
+ yyn = yypact[yystate];
+ if (yyn == YYFLAG)
+ goto yyerrdefault;
+
+ yyn += YYTERROR;
+ if (yyn < 0 || yyn > YYLAST || yycheck[yyn] != YYTERROR)
+ goto yyerrdefault;
+
+ yyn = yytable[yyn];
+ if (yyn < 0)
+ {
+ if (yyn == YYFLAG)
+ goto yyerrpop;
+ yyn = -yyn;
+ goto yyreduce;
+ }
+ else if (yyn == 0)
+ goto yyerrpop;
+
+ if (yyn == YYFINAL)
+ YYACCEPT;
+
+#if YYDEBUG != 0
+ if (yydebug)
+ fprintf(stderr, "Shifting error token, ");
+#endif
+
+ *++yyvsp = yylval;
+#ifdef YYLSP_NEEDED
+ *++yylsp = yylloc;
+#endif
+
+ yystate = yyn;
+ goto yynewstate;
+}
+#line 343 "cexp.y"
+
+
+/* During parsing of a C expression, the pointer to the next character
+ is in this variable. */
+
+static char *lexptr;
+
+/* Take care of parsing a number (anything that starts with a digit).
+ Set yylval and return the token type; update lexptr.
+ LEN is the number of characters in it. */
+
+/* maybe needs to actually deal with floating point numbers */
+
+int
+parse_number (olen)
+ int olen;
+{
+ register char *p = lexptr;
+ register int c;
+ register unsigned long n = 0, nd, ULONG_MAX_over_base;
+ register int base = 10;
+ register int len = olen;
+ register int overflow = 0;
+ register int digit, largest_digit = 0;
+ int spec_long = 0;
+
+ for (c = 0; c < len; c++)
+ if (p[c] == '.') {
+ /* It's a float since it contains a point. */
+ yyerror ("floating point numbers not allowed in #if expressions");
+ return ERROR;
+ }
+
+ yylval.integer.unsignedp = 0;
+
+ if (len >= 3 && (!strncmp (p, "0x", 2) || !strncmp (p, "0X", 2))) {
+ p += 2;
+ base = 16;
+ len -= 2;
+ }
+ else if (*p == '0')
+ base = 8;
+
+ ULONG_MAX_over_base = (unsigned long) -1 / base;
+
+ for (; len > 0; len--) {
+ c = *p++;
+
+ if (c >= '0' && c <= '9')
+ digit = c - '0';
+ else if (base == 16 && c >= 'a' && c <= 'f')
+ digit = c - 'a' + 10;
+ else if (base == 16 && c >= 'A' && c <= 'F')
+ digit = c - 'A' + 10;
+ else {
+ /* `l' means long, and `u' means unsigned. */
+ while (1) {
+ if (c == 'l' || c == 'L')
+ {
+ if (spec_long)
+ yyerror ("two `l's in integer constant");
+ spec_long = 1;
+ }
+ else if (c == 'u' || c == 'U')
+ {
+ if (yylval.integer.unsignedp)
+ yyerror ("two `u's in integer constant");
+ yylval.integer.unsignedp = 1;
+ }
+ else
+ break;
+
+ if (--len == 0)
+ break;
+ c = *p++;
+ }
+ /* Don't look for any more digits after the suffixes. */
+ break;
+ }
+ if (largest_digit < digit)
+ largest_digit = digit;
+ nd = n * base + digit;
+ overflow |= ULONG_MAX_over_base < n | nd < n;
+ n = nd;
+ }
+
+ if (len != 0) {
+ yyerror ("Invalid number in #if expression");
+ return ERROR;
+ }
+
+ if (base <= largest_digit)
+ warning ("integer constant contains digits beyond the radix");
+
+ if (overflow)
+ warning ("integer constant out of range");
+
+ /* If too big to be signed, consider it unsigned. */
+ if ((long) n < 0 && ! yylval.integer.unsignedp)
+ {
+ if (base == 10)
+ warning ("integer constant is so large that it is unsigned");
+ yylval.integer.unsignedp = 1;
+ }
+
+ lexptr = p;
+ yylval.integer.value = n;
+ return INT;
+}
+
+struct token {
+ char *operator;
+ int token;
+};
+
+static struct token tokentab2[] = {
+ {"&&", AND},
+ {"||", OR},
+ {"<<", LSH},
+ {">>", RSH},
+ {"==", EQUAL},
+ {"!=", NOTEQUAL},
+ {"<=", LEQ},
+ {">=", GEQ},
+ {"++", ERROR},
+ {"--", ERROR},
+ {NULL, ERROR}
+};
+
+/* Read one token, getting characters through lexptr. */
+
+int
+yylex ()
+{
+ register int c;
+ register int namelen;
+ register unsigned char *tokstart;
+ register struct token *toktab;
+ int wide_flag;
+
+ retry:
+
+ tokstart = (unsigned char *) lexptr;
+ c = *tokstart;
+ /* See if it is a special token of length 2. */
+ if (! keyword_parsing)
+ for (toktab = tokentab2; toktab->operator != NULL; toktab++)
+ if (c == *toktab->operator && tokstart[1] == toktab->operator[1]) {
+ lexptr += 2;
+ if (toktab->token == ERROR)
+ {
+ char *buf = (char *) alloca (40);
+ sprintf (buf, "`%s' not allowed in operand of `#if'", toktab->operator);
+ yyerror (buf);
+ }
+ return toktab->token;
+ }
+
+ switch (c) {
+ case 0:
+ return 0;
+
+ case ' ':
+ case '\t':
+ case '\r':
+ case '\n':
+ lexptr++;
+ goto retry;
+
+ case 'L':
+ /* Capital L may start a wide-string or wide-character constant. */
+ if (lexptr[1] == '\'')
+ {
+ lexptr++;
+ wide_flag = 1;
+ goto char_constant;
+ }
+ if (lexptr[1] == '"')
+ {
+ lexptr++;
+ wide_flag = 1;
+ goto string_constant;
+ }
+ break;
+
+ case '\'':
+ wide_flag = 0;
+ char_constant:
+ lexptr++;
+ if (keyword_parsing) {
+ char *start_ptr = lexptr - 1;
+ while (1) {
+ c = *lexptr++;
+ if (c == '\\')
+ c = parse_escape (&lexptr);
+ else if (c == '\'')
+ break;
+ }
+ yylval.name.address = tokstart;
+ yylval.name.length = lexptr - start_ptr;
+ return NAME;
+ }
+
+ /* This code for reading a character constant
+ handles multicharacter constants and wide characters.
+ It is mostly copied from c-lex.c. */
+ {
+ register int result = 0;
+ register num_chars = 0;
+ unsigned width = MAX_CHAR_TYPE_SIZE;
+ int max_chars;
+ char *token_buffer;
+
+ if (wide_flag)
+ {
+ width = MAX_WCHAR_TYPE_SIZE;
+#ifdef MULTIBYTE_CHARS
+ max_chars = MB_CUR_MAX;
+#else
+ max_chars = 1;
+#endif
+ }
+ else
+ max_chars = MAX_LONG_TYPE_SIZE / width;
+
+ token_buffer = (char *) alloca (max_chars + 1);
+
+ while (1)
+ {
+ c = *lexptr++;
+
+ if (c == '\'' || c == EOF)
+ break;
+
+ if (c == '\\')
+ {
+ c = parse_escape (&lexptr);
+ if (width < HOST_BITS_PER_INT
+ && (unsigned) c >= (1 << width))
+ pedwarn ("escape sequence out of range for character");
+ }
+
+ num_chars++;
+
+ /* Merge character into result; ignore excess chars. */
+ if (num_chars < max_chars + 1)
+ {
+ if (width < HOST_BITS_PER_INT)
+ result = (result << width) | (c & ((1 << width) - 1));
+ else
+ result = c;
+ token_buffer[num_chars - 1] = c;
+ }
+ }
+
+ token_buffer[num_chars] = 0;
+
+ if (c != '\'')
+ error ("malformatted character constant");
+ else if (num_chars == 0)
+ error ("empty character constant");
+ else if (num_chars > max_chars)
+ {
+ num_chars = max_chars;
+ error ("character constant too long");
+ }
+ else if (num_chars != 1 && ! traditional)
+ warning ("multi-character character constant");
+
+ /* If char type is signed, sign-extend the constant. */
+ if (! wide_flag)
+ {
+ int num_bits = num_chars * width;
+
+ if (lookup ("__CHAR_UNSIGNED__", sizeof ("__CHAR_UNSIGNED__")-1, -1)
+ || ((result >> (num_bits - 1)) & 1) == 0)
+ yylval.integer.value
+ = result & ((unsigned long) ~0 >> (HOST_BITS_PER_LONG - num_bits));
+ else
+ yylval.integer.value
+ = result | ~((unsigned long) ~0 >> (HOST_BITS_PER_LONG - num_bits));
+ }
+ else
+ {
+#ifdef MULTIBYTE_CHARS
+ /* Set the initial shift state and convert the next sequence. */
+ result = 0;
+ /* In all locales L'\0' is zero and mbtowc will return zero,
+ so don't use it. */
+ if (num_chars > 1
+ || (num_chars == 1 && token_buffer[0] != '\0'))
+ {
+ wchar_t wc;
+ (void) mbtowc (NULL_PTR, NULL_PTR, 0);
+ if (mbtowc (& wc, token_buffer, num_chars) == num_chars)
+ result = wc;
+ else
+ warning ("Ignoring invalid multibyte character");
+ }
+#endif
+ yylval.integer.value = result;
+ }
+ }
+
+ /* This is always a signed type. */
+ yylval.integer.unsignedp = 0;
+
+ return CHAR;
+
+ /* some of these chars are invalid in constant expressions;
+ maybe do something about them later */
+ case '/':
+ case '+':
+ case '-':
+ case '*':
+ case '%':
+ case '|':
+ case '&':
+ case '^':
+ case '~':
+ case '!':
+ case '@':
+ case '<':
+ case '>':
+ case '[':
+ case ']':
+ case '.':
+ case '?':
+ case ':':
+ case '=':
+ case '{':
+ case '}':
+ case ',':
+ case '#':
+ if (keyword_parsing)
+ break;
+ case '(':
+ case ')':
+ lexptr++;
+ return c;
+
+ case '"':
+ string_constant:
+ if (keyword_parsing) {
+ char *start_ptr = lexptr;
+ lexptr++;
+ while (1) {
+ c = *lexptr++;
+ if (c == '\\')
+ c = parse_escape (&lexptr);
+ else if (c == '"')
+ break;
+ }
+ yylval.name.address = tokstart;
+ yylval.name.length = lexptr - start_ptr;
+ return NAME;
+ }
+ yyerror ("string constants not allowed in #if expressions");
+ return ERROR;
+ }
+
+ if (c >= '0' && c <= '9' && !keyword_parsing) {
+ /* It's a number */
+ for (namelen = 0;
+ c = tokstart[namelen], is_idchar[c] || c == '.';
+ namelen++)
+ ;
+ return parse_number (namelen);
+ }
+
+ /* It is a name. See how long it is. */
+
+ if (keyword_parsing) {
+ for (namelen = 0;; namelen++) {
+ if (is_hor_space[tokstart[namelen]])
+ break;
+ if (tokstart[namelen] == '(' || tokstart[namelen] == ')')
+ break;
+ if (tokstart[namelen] == '"' || tokstart[namelen] == '\'')
+ break;
+ }
+ } else {
+ if (!is_idstart[c]) {
+ yyerror ("Invalid token in expression");
+ return ERROR;
+ }
+
+ for (namelen = 0; is_idchar[tokstart[namelen]]; namelen++)
+ ;
+ }
+
+ lexptr += namelen;
+ yylval.name.address = tokstart;
+ yylval.name.length = namelen;
+ return NAME;
+}
+
+
+/* Parse a C escape sequence. STRING_PTR points to a variable
+ containing a pointer to the string to parse. That pointer
+ is updated past the characters we use. The value of the
+ escape sequence is returned.
+
+ A negative value means the sequence \ newline was seen,
+ which is supposed to be equivalent to nothing at all.
+
+ If \ is followed by a null character, we return a negative
+ value and leave the string pointer pointing at the null character.
+
+ If \ is followed by 000, we return 0 and leave the string pointer
+ after the zeros. A value of 0 does not mean end of string. */
+
+int
+parse_escape (string_ptr)
+ char **string_ptr;
+{
+ register int c = *(*string_ptr)++;
+ switch (c)
+ {
+ case 'a':
+ return TARGET_BELL;
+ case 'b':
+ return TARGET_BS;
+ case 'e':
+ case 'E':
+ if (pedantic)
+ pedwarn ("non-ANSI-standard escape sequence, `\\%c'", c);
+ return 033;
+ case 'f':
+ return TARGET_FF;
+ case 'n':
+ return TARGET_NEWLINE;
+ case 'r':
+ return TARGET_CR;
+ case 't':
+ return TARGET_TAB;
+ case 'v':
+ return TARGET_VT;
+ case '\n':
+ return -2;
+ case 0:
+ (*string_ptr)--;
+ return 0;
+
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ {
+ register int i = c - '0';
+ register int count = 0;
+ while (++count < 3)
+ {
+ c = *(*string_ptr)++;
+ if (c >= '0' && c <= '7')
+ i = (i << 3) + c - '0';
+ else
+ {
+ (*string_ptr)--;
+ break;
+ }
+ }
+ if ((i & ~((1 << MAX_CHAR_TYPE_SIZE) - 1)) != 0)
+ {
+ i &= (1 << MAX_CHAR_TYPE_SIZE) - 1;
+ warning ("octal character constant does not fit in a byte");
+ }
+ return i;
+ }
+ case 'x':
+ {
+ register unsigned i = 0, overflow = 0, digits_found = 0, digit;
+ for (;;)
+ {
+ c = *(*string_ptr)++;
+ if (c >= '0' && c <= '9')
+ digit = c - '0';
+ else if (c >= 'a' && c <= 'f')
+ digit = c - 'a' + 10;
+ else if (c >= 'A' && c <= 'F')
+ digit = c - 'A' + 10;
+ else
+ {
+ (*string_ptr)--;
+ break;
+ }
+ overflow |= i ^ (i << 4 >> 4);
+ i = (i << 4) + digit;
+ digits_found = 1;
+ }
+ if (!digits_found)
+ yyerror ("\\x used with no following hex digits");
+ if (overflow | (i & ~((1 << BITS_PER_UNIT) - 1)))
+ {
+ i &= (1 << BITS_PER_UNIT) - 1;
+ warning ("hex character constant does not fit in a byte");
+ }
+ return i;
+ }
+ default:
+ return c;
+ }
+}
+
+void
+yyerror (s)
+ char *s;
+{
+ error (s);
+ longjmp (parse_return_error, 1);
+}
+
+static void
+integer_overflow ()
+{
+ if (pedantic)
+ pedwarn ("integer overflow in preprocessor expression");
+}
+
+static long
+left_shift (a, b)
+ struct constant *a;
+ unsigned long b;
+{
+ if (b >= HOST_BITS_PER_LONG)
+ {
+ if (! a->unsignedp && a->value != 0)
+ integer_overflow ();
+ return 0;
+ }
+ else if (a->unsignedp)
+ return (unsigned long) a->value << b;
+ else
+ {
+ long l = a->value << b;
+ if (l >> b != a->value)
+ integer_overflow ();
+ return l;
+ }
+}
+
+static long
+right_shift (a, b)
+ struct constant *a;
+ unsigned long b;
+{
+ if (b >= HOST_BITS_PER_LONG)
+ return a->unsignedp ? 0 : a->value >> (HOST_BITS_PER_LONG - 1);
+ else if (a->unsignedp)
+ return (unsigned long) a->value >> b;
+ else
+ return a->value >> b;
+}
+
+/* This page contains the entry point to this file. */
+
+/* Parse STRING as an expression, and complain if this fails
+ to use up all of the contents of STRING. */
+/* We do not support C comments. They should be removed before
+ this function is called. */
+
+int
+parse_c_expression (string)
+ char *string;
+{
+ lexptr = string;
+
+ if (lexptr == 0 || *lexptr == 0) {
+ error ("empty #if expression");
+ return 0; /* don't include the #if group */
+ }
+
+ /* if there is some sort of scanning error, just return 0 and assume
+ the parsing routine has printed an error message somewhere.
+ there is surely a better thing to do than this. */
+ if (setjmp (parse_return_error))
+ return 0;
+
+ if (yyparse ())
+ return 0; /* actually this is never reached
+ the way things stand. */
+ if (*lexptr)
+ error ("Junk after end of expression.");
+
+ return expression_value; /* set by yyparse () */
+}
+
+#ifdef TEST_EXP_READER
+extern int yydebug;
+
+/* Main program for testing purposes. */
+int
+main ()
+{
+ int n, c;
+ char buf[1024];
+
+/*
+ yydebug = 1;
+*/
+ initialize_random_junk ();
+
+ for (;;) {
+ printf ("enter expression: ");
+ n = 0;
+ while ((buf[n] = getchar ()) != '\n' && buf[n] != EOF)
+ n++;
+ if (buf[n] == EOF)
+ break;
+ buf[n] = '\0';
+ printf ("parser returned %d\n", parse_c_expression (buf));
+ }
+
+ return 0;
+}
+
+/* table to tell if char can be part of a C identifier. */
+unsigned char is_idchar[256];
+/* table to tell if char can be first char of a c identifier. */
+unsigned char is_idstart[256];
+/* table to tell if c is horizontal space. isspace () thinks that
+ newline is space; this is not a good idea for this program. */
+char is_hor_space[256];
+
+/*
+ * initialize random junk in the hash table and maybe other places
+ */
+initialize_random_junk ()
+{
+ register int i;
+
+ /*
+ * Set up is_idchar and is_idstart tables. These should be
+ * faster than saying (is_alpha (c) || c == '_'), etc.
+ * Must do set up these things before calling any routines tthat
+ * refer to them.
+ */
+ for (i = 'a'; i <= 'z'; i++) {
+ ++is_idchar[i - 'a' + 'A'];
+ ++is_idchar[i];
+ ++is_idstart[i - 'a' + 'A'];
+ ++is_idstart[i];
+ }
+ for (i = '0'; i <= '9'; i++)
+ ++is_idchar[i];
+ ++is_idchar['_'];
+ ++is_idstart['_'];
+#if DOLLARS_IN_IDENTIFIERS
+ ++is_idchar['$'];
+ ++is_idstart['$'];
+#endif
+
+ /* horizontal space table */
+ ++is_hor_space[' '];
+ ++is_hor_space['\t'];
+}
+
+error (msg)
+{
+ printf ("error: %s\n", msg);
+}
+
+warning (msg)
+{
+ printf ("warning: %s\n", msg);
+}
+
+struct hashnode *
+lookup (name, len, hash)
+ char *name;
+ int len;
+ int hash;
+{
+ return (DEFAULT_SIGNED_CHAR) ? 0 : ((struct hashnode *) -1);
+}
+#endif
diff --git a/gnu/usr.bin/cc/cpp/cpp.1 b/gnu/usr.bin/cc/cpp/cpp.1
new file mode 100644
index 0000000..54c4dfb
--- /dev/null
+++ b/gnu/usr.bin/cc/cpp/cpp.1
@@ -0,0 +1 @@
+.so man1/cccp.1
diff --git a/gnu/usr.bin/cc/include/basic-block.h b/gnu/usr.bin/cc/include/basic-block.h
new file mode 100644
index 0000000..b1bc002
--- /dev/null
+++ b/gnu/usr.bin/cc/include/basic-block.h
@@ -0,0 +1,68 @@
+/* Define control and data flow tables, and regsets.
+ Copyright (C) 1987 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+/* Number of bits in each actual element of a regset. */
+
+#define REGSET_ELT_BITS HOST_BITS_PER_WIDE_INT
+
+/* Type to use for a regset element. Note that lots of code assumes
+ that the initial part of a regset that contains information on the
+ hard registers is the same format as a HARD_REG_SET. */
+
+#define REGSET_ELT_TYPE unsigned HOST_WIDE_INT
+
+/* Define the type for a pointer to a set with a bit for each
+ (hard or pseudo) register. */
+
+typedef REGSET_ELT_TYPE *regset;
+
+/* Size of a regset for the current function,
+ in (1) bytes and (2) elements. */
+
+extern int regset_bytes;
+extern int regset_size;
+
+/* Number of basic blocks in the current function. */
+
+extern int n_basic_blocks;
+
+/* Index by basic block number, get first insn in the block. */
+
+extern rtx *basic_block_head;
+
+/* Index by basic block number, get last insn in the block. */
+
+extern rtx *basic_block_end;
+
+/* Index by basic block number, get address of regset
+ describing the registers live at the start of that block. */
+
+extern regset *basic_block_live_at_start;
+
+/* Indexed by n, gives number of basic block that (REG n) is used in.
+ If the value is REG_BLOCK_GLOBAL (-2),
+ it means (REG n) is used in more than one basic block.
+ REG_BLOCK_UNKNOWN (-1) means it hasn't been seen yet so we don't know.
+ This information remains valid for the rest of the compilation
+ of the current function; it is used to control register allocation. */
+
+#define REG_BLOCK_UNKNOWN -1
+#define REG_BLOCK_GLOBAL -2
+extern int *reg_basic_block;
diff --git a/gnu/usr.bin/cc/include/bc-arity.h b/gnu/usr.bin/cc/include/bc-arity.h
new file mode 100644
index 0000000..d311745
--- /dev/null
+++ b/gnu/usr.bin/cc/include/bc-arity.h
@@ -0,0 +1,232 @@
+{ 0, 0, 0, {0}},
+{ 1, 0, 0, {0}},
+{ 1, 2, 0, {0}},
+{ 1, 2, 0, {0}},
+{ 0, 0, 1, {SIcode, }},
+{ 0, 0, 1, {SIcode, }},
+{ 0, 1, 1, {QIcode, }},
+{ 0, 1, 1, {HIcode, }},
+{ 0, 1, 1, {SIcode, }},
+{ 0, 1, 1, {DIcode, }},
+{ 0, 1, 1, {SFcode, }},
+{ 0, 1, 1, {DFcode, }},
+{ 0, 1, 1, {XFcode, }},
+{ 0, 1, 1, {Pcode, }},
+{ 1, 1, 0, {0}},
+{ 1, 1, 0, {0}},
+{ 1, 1, 0, {0}},
+{ 1, 1, 0, {0}},
+{ 1, 1, 0, {0}},
+{ 1, 1, 0, {0}},
+{ 1, 1, 0, {0}},
+{ 1, 1, 0, {0}},
+{ 2, 0, 0, {0}},
+{ 2, 0, 0, {0}},
+{ 2, 0, 0, {0}},
+{ 2, 0, 0, {0}},
+{ 2, 0, 0, {0}},
+{ 2, 0, 0, {0}},
+{ 2, 0, 0, {0}},
+{ 2, 0, 0, {0}},
+{ 3, 0, 0, {0}},
+{ 2, 0, 0, {0}},
+{ 1, 1, 1, {SIcode, }},
+{ 1, 1, 0, {0}},
+{ 0, 1, 1, {SIcode, }},
+{ 0, 1, 1, {SIcode, }},
+{ 1, 1, 0, {0}},
+{ 1, 1, 0, {0}},
+{ 1, 1, 0, {0}},
+{ 1, 1, 0, {0}},
+{ 1, 1, 0, {0}},
+{ 1, 1, 0, {0}},
+{ 1, 1, 0, {0}},
+{ 1, 1, 0, {0}},
+{ 1, 1, 0, {0}},
+{ 1, 1, 0, {0}},
+{ 1, 1, 0, {0}},
+{ 1, 1, 0, {0}},
+{ 1, 1, 0, {0}},
+{ 1, 1, 0, {0}},
+{ 1, 1, 0, {0}},
+{ 1, 1, 0, {0}},
+{ 1, 1, 0, {0}},
+{ 1, 1, 0, {0}},
+{ 1, 1, 0, {0}},
+{ 1, 1, 0, {0}},
+{ 1, 1, 0, {0}},
+{ 1, 1, 0, {0}},
+{ 1, 1, 0, {0}},
+{ 1, 1, 0, {0}},
+{ 1, 1, 0, {0}},
+{ 1, 1, 0, {0}},
+{ 1, 1, 0, {0}},
+{ 1, 1, 0, {0}},
+{ 1, 1, 0, {0}},
+{ 1, 1, 0, {0}},
+{ 1, 1, 0, {0}},
+{ 1, 1, 0, {0}},
+{ 1, 1, 0, {0}},
+{ 1, 1, 0, {0}},
+{ 1, 1, 0, {0}},
+{ 1, 1, 0, {0}},
+{ 1, 1, 0, {0}},
+{ 1, 1, 0, {0}},
+{ 1, 1, 0, {0}},
+{ 1, 1, 0, {0}},
+{ 1, 1, 0, {0}},
+{ 1, 1, 0, {0}},
+{ 1, 1, 0, {0}},
+{ 1, 1, 0, {0}},
+{ 1, 1, 0, {0}},
+{ 1, 1, 0, {0}},
+{ 1, 1, 0, {0}},
+{ 1, 1, 0, {0}},
+{ 1, 1, 0, {0}},
+{ 3, 1, 0, {0}},
+{ 3, 1, 0, {0}},
+{ 4, 0, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 1, 1, 0, {0}},
+{ 1, 1, 0, {0}},
+{ 1, 1, 0, {0}},
+{ 1, 1, 0, {0}},
+{ 1, 1, 0, {0}},
+{ 1, 1, 0, {0}},
+{ 1, 1, 0, {0}},
+{ 1, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 4, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 4, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 4, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 2, 1, 0, {0}},
+{ 4, 1, 0, {0}},
+{ 1, 0, 1, {SIcode, }},
+{ 1, 0, 1, {SIcode, }},
+{ 0, 0, 1, {SIcode, }},
+{ 0, 0, 0, {0}},
+{ 1, 0, 3, {SIcode, SIcode, SIcode, }},
+{ 1, 0, 3, {SIcode, SIcode, SIcode, }},
+{ 1, 0, 3, {SIcode, SIcode, SIcode, }},
+{ 1, 0, 3, {SIcode, SIcode, SIcode, }},
+{ 3, 0, 0, {0}},
+{ 0, 1, 0, {0}},
+{ 0, 0, 0, {0}},
+{ 0, 0, 1, {SIcode, }},
diff --git a/gnu/usr.bin/cc/include/bc-emit.h b/gnu/usr.bin/cc/include/bc-emit.h
new file mode 100644
index 0000000..c00da5b
--- /dev/null
+++ b/gnu/usr.bin/cc/include/bc-emit.h
@@ -0,0 +1,133 @@
+/* bc-emit.h - declare entry points for producing object files of bytecodes. */
+
+/* Internal format of symbol table for the object file. */
+struct bc_sym
+{
+ /* Private copy separately malloc'd. */
+ char *name;
+
+ /* Symbol has a defined value. */
+ unsigned int defined:1;
+
+ /* Symbol has been globalized. */
+ unsigned int global:1;
+
+ /* Symbol is common. */
+ unsigned int common:1;
+
+ /* Value if defined. */
+ unsigned long int val;
+
+ /* Used in internal symbol table structure. */
+ struct bc_sym *next;
+};
+
+
+/* List of symbols defined in a particular segment. */
+struct bc_segsym
+{
+ struct bc_sym *sym;
+ struct bc_segsym *next;
+};
+
+
+/* List of relocations needed in a particular segment. */
+struct bc_segreloc
+{
+ /* Offset of datum to be relocated. */
+ unsigned int offset;
+
+ /* Symbol to be relocated by. */
+ struct bc_sym *sym;
+
+ struct bc_segreloc *next;
+};
+
+
+/* Segment of an object file. */
+struct bc_seg
+{
+ /* Size allocated to contents. */
+ unsigned int alloc;
+
+ /* Pointer to base of contents. */
+ char *data;
+
+ /* Actual size of contents. */
+ unsigned int size;
+
+ /* List of symbols defined in this segment. */
+ struct bc_segsym *syms;
+
+ /* List of relocations for this segment. */
+ struct bc_segreloc *relocs;
+};
+
+
+/* Anonymous bytecode label within a single function. */
+struct bc_label
+{
+ /* Offset of label from start of segment. */
+ unsigned int offset;
+
+ /* True when offset is valid. */
+ unsigned int defined:1;
+
+ /* Unique bytecode ID, used to determine innermost
+ block containment */
+ int uid;
+
+ /* Next node in list */
+ struct bc_label *next;
+};
+
+
+/* Reference to a bc_label; a list of all such references is kept for
+ the function, then when it is finished they are backpatched to
+ contain the correct values. */
+
+struct bc_labelref
+{
+ /* Label referenced. */
+ struct bc_label *label;
+
+ /* Code offset of reference. */
+ unsigned int offset;
+
+ /* Next labelref in list */
+ struct bc_labelref *next;
+};
+
+
+
+extern void bc_initialize();
+extern int bc_begin_function();
+extern char *bc_emit_trampoline();
+extern void bc_emit_bytecode();
+extern void bc_emit_bytecode_const();
+extern struct bc_label *bc_get_bytecode_label();
+extern int bc_emit_bytecode_labeldef();
+extern void bc_emit_bytecode_labelref();
+extern void bc_emit_code_labelref();
+extern char *bc_end_function();
+extern void bc_align_const();
+extern void bc_emit_const();
+extern void bc_emit_const_skip();
+extern int bc_emit_const_labeldef();
+extern void bc_emit_const_labelref();
+extern void bc_align_data();
+extern void bc_emit_data();
+extern void bc_emit_data_skip();
+extern int bc_emit_data_labeldef();
+extern void bc_emit_data_labelref();
+extern int bc_define_pointer ();
+extern int bc_emit_common();
+extern void bc_globalize_label();
+extern void bc_text();
+extern void bc_data();
+extern void bc_align();
+extern void bc_emit();
+extern void bc_emit_skip();
+extern int bc_emit_labeldef();
+extern void bc_emit_labelref();
+extern void bc_write_file();
diff --git a/gnu/usr.bin/cc/include/bc-opcode.h b/gnu/usr.bin/cc/include/bc-opcode.h
new file mode 100644
index 0000000..ba5cafe
--- /dev/null
+++ b/gnu/usr.bin/cc/include/bc-opcode.h
@@ -0,0 +1,238 @@
+/* This file is automatically generated from bytecode.def,
+do not make any changes here. Instead edit bytecode.def. */
+
+enum bytecode_opcode
+{ neverneverland,
+ drop,
+ duplicate,
+ over,
+ setstackSI,
+ adjstackSI,
+ constQI,
+ constHI,
+ constSI,
+ constDI,
+ constSF,
+ constDF,
+ constXF,
+ constP,
+ loadQI,
+ loadHI,
+ loadSI,
+ loadDI,
+ loadSF,
+ loadDF,
+ loadXF,
+ loadP,
+ storeQI,
+ storeHI,
+ storeSI,
+ storeDI,
+ storeSF,
+ storeDF,
+ storeXF,
+ storeP,
+ storeBLK,
+ clearBLK,
+ addconstPSI,
+ newlocalSI,
+ localP,
+ argP,
+ convertQIHI,
+ convertHISI,
+ convertSIDI,
+ convertQISI,
+ convertQUHU,
+ convertHUSU,
+ convertSUDU,
+ convertQUSU,
+ convertSFDF,
+ convertDFXF,
+ convertHIQI,
+ convertSIHI,
+ convertDISI,
+ convertSIQI,
+ convertSUQU,
+ convertDFSF,
+ convertXFDF,
+ convertSISF,
+ convertSIDF,
+ convertSIXF,
+ convertSUSF,
+ convertSUDF,
+ convertSUXF,
+ convertDISF,
+ convertDIDF,
+ convertDIXF,
+ convertDUSF,
+ convertDUDF,
+ convertDUXF,
+ convertSFSI,
+ convertDFSI,
+ convertXFSI,
+ convertSFSU,
+ convertDFSU,
+ convertXFSU,
+ convertSFDI,
+ convertDFDI,
+ convertXFDI,
+ convertSFDU,
+ convertDFDU,
+ convertXFDU,
+ convertPSI,
+ convertSIP,
+ convertSIT,
+ convertDIT,
+ convertSFT,
+ convertDFT,
+ convertXFT,
+ convertPT,
+ zxloadBI,
+ sxloadBI,
+ sstoreBI,
+ addSI,
+ addDI,
+ addSF,
+ addDF,
+ addXF,
+ addPSI,
+ subSI,
+ subDI,
+ subSF,
+ subDF,
+ subXF,
+ subPP,
+ mulSI,
+ mulDI,
+ mulSU,
+ mulDU,
+ mulSF,
+ mulDF,
+ mulXF,
+ divSI,
+ divDI,
+ divSU,
+ divDU,
+ divSF,
+ divDF,
+ divXF,
+ modSI,
+ modDI,
+ modSU,
+ modDU,
+ andSI,
+ andDI,
+ iorSI,
+ iorDI,
+ xorSI,
+ xorDI,
+ lshiftSI,
+ lshiftSU,
+ lshiftDI,
+ lshiftDU,
+ rshiftSI,
+ rshiftSU,
+ rshiftDI,
+ rshiftDU,
+ ltSI,
+ ltSU,
+ ltDI,
+ ltDU,
+ ltSF,
+ ltDF,
+ ltXF,
+ ltP,
+ leSI,
+ leSU,
+ leDI,
+ leDU,
+ leSF,
+ leDF,
+ leXF,
+ leP,
+ geSI,
+ geSU,
+ geDI,
+ geDU,
+ geSF,
+ geDF,
+ geXF,
+ geP,
+ gtSI,
+ gtSU,
+ gtDI,
+ gtDU,
+ gtSF,
+ gtDF,
+ gtXF,
+ gtP,
+ eqSI,
+ eqDI,
+ eqSF,
+ eqDF,
+ eqXF,
+ eqP,
+ neSI,
+ neDI,
+ neSF,
+ neDF,
+ neXF,
+ neP,
+ negSI,
+ negDI,
+ negSF,
+ negDF,
+ negXF,
+ notSI,
+ notDI,
+ notT,
+ predecQI,
+ predecHI,
+ predecSI,
+ predecDI,
+ predecP,
+ predecSF,
+ predecDF,
+ predecXF,
+ predecBI,
+ preincQI,
+ preincHI,
+ preincSI,
+ preincDI,
+ preincP,
+ preincSF,
+ preincDF,
+ preincXF,
+ preincBI,
+ postdecQI,
+ postdecHI,
+ postdecSI,
+ postdecDI,
+ postdecP,
+ postdecSF,
+ postdecDF,
+ postdecXF,
+ postdecBI,
+ postincQI,
+ postincHI,
+ postincSI,
+ postincDI,
+ postincP,
+ postincSF,
+ postincDF,
+ postincXF,
+ postincBI,
+ xjumpif,
+ xjumpifnot,
+ jump,
+ jumpP,
+ caseSI,
+ caseSU,
+ caseDI,
+ caseDU,
+ call,
+ returnP,
+ ret,
+ linenote,
+ LAST_AND_UNUSED_OPCODE
+};
diff --git a/gnu/usr.bin/cc/include/bc-optab.h b/gnu/usr.bin/cc/include/bc-optab.h
new file mode 100644
index 0000000..f42485f
--- /dev/null
+++ b/gnu/usr.bin/cc/include/bc-optab.h
@@ -0,0 +1,74 @@
+/* Bytecode token definitions for GNU C-compiler.
+ Copyright (C) 1993 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+extern void bc_expand_conversion ();
+extern void bc_expand_truth_conversion ();
+extern void bc_expand_binary_operation ();
+extern void bc_expand_unary_operation ();
+
+struct binary_operator
+{
+ enum bytecode_opcode opcode;
+ enum typecode result;
+ enum typecode arg0;
+ enum typecode arg1;
+};
+
+extern struct binary_operator optab_plus_expr[];
+extern struct binary_operator optab_minus_expr[];
+extern struct binary_operator optab_mult_expr[];
+extern struct binary_operator optab_trunc_div_expr[];
+extern struct binary_operator optab_trunc_mod_expr[];
+extern struct binary_operator optab_rdiv_expr[];
+extern struct binary_operator optab_bit_and_expr[];
+extern struct binary_operator optab_bit_ior_expr[];
+extern struct binary_operator optab_bit_xor_expr[];
+extern struct binary_operator optab_lshift_expr[];
+extern struct binary_operator optab_rshift_expr[];
+extern struct binary_operator optab_truth_and_expr[];
+extern struct binary_operator optab_truth_or_expr[];
+extern struct binary_operator optab_lt_expr[];
+extern struct binary_operator optab_le_expr[];
+extern struct binary_operator optab_ge_expr[];
+extern struct binary_operator optab_gt_expr[];
+extern struct binary_operator optab_eq_expr[];
+extern struct binary_operator optab_ne_expr[];
+
+struct unary_operator
+{
+ enum bytecode_opcode opcode;
+ enum typecode result;
+ enum typecode arg0;
+};
+
+extern struct unary_operator optab_negate_expr[];
+extern struct unary_operator optab_bit_not_expr[];
+extern struct unary_operator optab_truth_not_expr[];
+
+struct increment_operator
+{
+ enum bytecode_opcode opcode;
+ enum typecode arg;
+};
+
+extern struct increment_operator optab_predecrement_expr[];
+extern struct increment_operator optab_preincrement_expr[];
+extern struct increment_operator optab_postdecrement_expr[];
+extern struct increment_operator optab_postincrement_expr[];
diff --git a/gnu/usr.bin/cc/include/bc-typecd.def b/gnu/usr.bin/cc/include/bc-typecd.def
new file mode 100644
index 0000000..fd92cdd
--- /dev/null
+++ b/gnu/usr.bin/cc/include/bc-typecd.def
@@ -0,0 +1,21 @@
+/* Typecodes used by the interpreter and their related
+ machine modes and types.
+
+ The last argument is used for retrieving the given
+ type from a varargs list. Due to a bug in varargs,
+ the type has to be the generic machine type of
+ larger. */
+
+DEFTYPECODE (QIcode, "QI", QImode, SItype)
+DEFTYPECODE (QUcode, "QU", QImode, SUtype)
+DEFTYPECODE (HIcode, "HI", HImode, SItype)
+DEFTYPECODE (HUcode, "HU", HImode, SUtype)
+DEFTYPECODE (SIcode, "SI", SImode, SItype)
+DEFTYPECODE (SUcode, "SU", SImode, SUtype)
+DEFTYPECODE (DIcode, "DI", DImode, DItype)
+DEFTYPECODE (DUcode, "DU", DImode, DUtype)
+DEFTYPECODE (SFcode, "SF", SFmode, SFtype)
+DEFTYPECODE (DFcode, "DF", DFmode, DFtype)
+DEFTYPECODE (XFcode, "XF", XFmode, XFtype)
+DEFTYPECODE (Pcode, "P", PSImode, Ptype)
+DEFTYPECODE (Tcode, "T", SImode, SItype)
diff --git a/gnu/usr.bin/cc/include/bc-typecd.h b/gnu/usr.bin/cc/include/bc-typecd.h
new file mode 100644
index 0000000..097cd62
--- /dev/null
+++ b/gnu/usr.bin/cc/include/bc-typecd.h
@@ -0,0 +1,53 @@
+/* Typecode definitions for Bytecode Interpreter.
+ Copyright (C) 1993 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+#ifndef TYPECODE_H
+#define TYPECODE_H
+
+enum typecode
+{
+#define DEFTYPECODE(CODE, NAME, MACHMODE, TYPE) CODE,
+#include "bc-typecd.def"
+#undef DEFTYPECODE
+
+ LAST_AND_UNUSED_TYPECODE
+};
+
+/* Determine if a given type is integer. */
+#define TYPECODE_INTEGER_P(TYPECODE) ((int) (TYPECODE) < (int) SFcode)
+
+/* Determine if a given type is unsigned. */
+#define TYPECODE_UNSIGNED_P(TYPECODE) \
+ (TYPECODE_INTEGER_P(TYPECODE) && (int) (TYPECODE) & 1)
+
+/* Determine if a given type is signed. */
+#define TYPECODE_SIGNED_P(TYPECODE) \
+ (TYPECODE_INTEGER_P(TYPECODE) && !((int) (TYPECODE) & 1))
+
+/* Determine if a given type is floating. */
+#define TYPECODE_FLOAT_P(TYPECODE) \
+ ((int) (TYPECODE) < (int) Pcode && !TYPECODE_INTEGER_P(TYPECODE))
+
+/* Determine if the given type is arithmetic. */
+#define TYPECODE_ARITH_P(TYPECODE) \
+ (TYPECODE_INTEGER_P(TYPECODE) || TYPECODE_FLOAT_P(TYPECODE))
+
+#define NUM_TYPECODES ((int) LAST_AND_UNUSED_TYPECODE)
+
+#endif
diff --git a/gnu/usr.bin/cc/include/bi-run.h b/gnu/usr.bin/cc/include/bi-run.h
new file mode 100644
index 0000000..669f2ab
--- /dev/null
+++ b/gnu/usr.bin/cc/include/bi-run.h
@@ -0,0 +1,165 @@
+/* Definitions for Bytecode Interpreter.
+ Copyright (C) 1993, 1994 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+#define MAXLITERALS 5
+
+struct arityvec
+{
+ char ninputs;
+ char noutputs;
+ char nliterals;
+ char literals[MAXLITERALS];
+};
+
+struct argtype
+{
+ int modealign; /* Argument mode:alignment */
+ int size; /* Argument size, in bytes */
+};
+
+struct callinfo
+{
+ int nargs; /* Number of arguments in call */
+ struct argtype retvaltype; /* Type of return value */
+ struct argtype argtypes[1]; /* Argument types */
+};
+
+/* Structure describing a bytecode function. If this changes, we also
+ need to change expand_function_end () in bc-trans.c */
+struct bytecode
+{
+ int stacksize; /* Depth required of evaluation stack. */
+ int localsize; /* Size in bytes of local variables. */
+ unsigned char *pc0; /* Initial program counter. */
+ void **ptrlit; /* Vector of (relocatable) pointer literals. */
+ struct callinfo *callinfo; /* Vector of procedure call type info. */
+};
+
+
+#define INTERP_BPC 8 /* Bits per char */
+#define INTERP_BPI \
+ (sizeof (int) * INTERP_BPC) /* Bits per int */
+
+
+#ifndef min
+#define min(L, R) ((L) < (R) ? (L) : (R))
+#endif
+
+
+/* bit field operations. */
+
+/* Low (high) mask: int with low (high) N bits set */
+
+#define LM(N) ((1 << (N)) - 1)
+#define HM(N) ((~LM (INTERP_BPI - (N))))
+
+
+/* Sign-extend SIZE low bits of VALUE to integer (typeof VALUE)
+ Signed bitfields are loaded from memory by the sxloadBI instruction,
+ which first retrieves the bitfield with XFIELD and then sign extends
+ it to an SItype. */
+
+#define EXTEND(SIZE, VALUE) \
+ ({ SUtype value = (SUtype) (VALUE); \
+ (value & (1 << ((SIZE) - 1)) ? value | ~LM (SIZE) : value); })
+
+
+/* Given OFFSET:SIZE for a bitfield, calculate:
+
+ [1] BYTE_OFFSET = the byte offset of the bit field.
+ [2] BIT_OFFSET = the bit offset of the bit field (less than INTERP_BPC).
+ [3] NBYTES = the number of integral bytes in the bit field.
+ [4] TRAILING_BITS= the number of trailing bits (less than INTERP_BPC).
+
+
+ , , , , , (memory bytes)
+ ---------------- (bitfield)
+ | | || | | (divisions)
+ ^ ^ ^ ^
+ | | | |__ [4] (bits)
+ | | |_________ [3] (bytes)
+ | |_________________ [2] (bits)
+ |___________________________ [1] (bytes)
+
+
+ The above applies to BYTE_LOW_ENDIAN machines. In BYTE_BIG_ENDIAN machines, the
+ bit numbering is reversed (i.e. bit 0 is the sign bit).
+
+ (Alright, so I drew this to keep my tongue in cheek while writing the code below,
+ not because I'm into ASCII art.) */
+
+
+#define BI_PARAMS(OFFSET, SIZE, BYTE_OFFSET, BIT_OFFSET, NBYTES, TRAILING_BITS) \
+ { BYTE_OFFSET = (OFFSET) / (INTERP_BPC); \
+ BIT_OFFSET = (OFFSET) % (INTERP_BPC); \
+ NBYTES = ((SIZE) - (INTERP_BPC - (BIT_OFFSET))) / INTERP_BPC; \
+ if ((NBYTES) < 0 || ((NBYTES) > 64)) \
+ NBYTES = 0; \
+ if ((SIZE) + (BIT_OFFSET) <= INTERP_BPC) \
+ TRAILING_BITS = 0; \
+ else \
+ TRAILING_BITS = ((SIZE) - (INTERP_BPC - (BIT_OFFSET))) % INTERP_BPC; }
+
+
+/* SHIFT_IN_BITS retrieves NBITS bits from SOURCE and shifts into
+ DEST. The bit field starts OFFSET bits into SOURCE.
+
+ OR_IN_BITS copies the NBITS low bits from VALUE into a the bitfield in
+ DEST offset by OFFSET bits. */
+
+
+#if BYTES_BIG_ENDIAN
+
+#define SHIFT_IN_BITS(DEST, SOURCE, OFFSET, NBITS) \
+ (DEST = ((DEST) << (NBITS)) \
+ | (LM ((NBITS)) \
+ & ((SOURCE) >> (INTERP_BPC - (OFFSET) - (NBITS)))))
+
+#define OR_IN_BITS(DEST, VALUE, OFFSET, NBITS) \
+ (DEST = ((DEST) & ~(LM ((NBITS)) << (INTERP_BPC - (OFFSET) - (NBITS)))) \
+ | (((VALUE) & LM ((NBITS))) << (INTERP_BPC - (OFFSET) - (NBITS))))
+
+#else
+
+#define SHIFT_IN_BITS(DEST, SOURCE, OFFSET, NBITS) \
+ (DEST = ((DEST) << (NBITS)) \
+ | (LM ((NBITS)) \
+ & ((SOURCE) >> (OFFSET))))
+
+#define OR_IN_BITS(DEST, VALUE, OFFSET, NBITS) \
+ (DEST = ((DEST) & ~(LM ((NBITS)) << (OFFSET))) \
+ | (((VALUE) & LM ((NBITS))) << (OFFSET)))
+
+#endif
+
+
+/* Procedure call; arguments are a pointer to the function to be called,
+ a pointer to a place to store the return value, a pointer to a vector
+ describing the type of procedure call, and the interpreter's stack pointer,
+ which will point to the first of the arguments at this point. */
+
+#define CALL(FUNC, CALLDESC, RETVAL, SP) __call(FUNC, CALLDESC, RETVAL, SP)
+
+
+/* Procedure return; arguments are a pointer to the calldesc for this
+ function, and a pointer to the place where the value to be returned
+ may be found. Generally the MACHARGS above contain a machine dependent
+ cookie that is used to determine where to jump to. */
+
+#define PROCRET(CALLDESC, RETVAL) return
diff --git a/gnu/usr.bin/cc/include/bytecode.h b/gnu/usr.bin/cc/include/bytecode.h
new file mode 100644
index 0000000..87030be
--- /dev/null
+++ b/gnu/usr.bin/cc/include/bytecode.h
@@ -0,0 +1,91 @@
+/* Bytecode definitions for GNU C-compiler.
+ Copyright (C) 1993, 1994 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+extern int output_bytecode;
+extern int stack_depth;
+extern int max_stack_depth;
+
+/* Emit DI constant according to target machine word ordering */
+
+#if WORDS_BIG_ENDIAN
+
+#define bc_emit_bytecode_DI_const(CST) \
+{ int opcode; \
+ opcode = TREE_INT_CST_HIGH (CST); \
+ bc_emit_bytecode_const ((char *) &opcode, sizeof opcode); \
+ opcode = TREE_INT_CST_LOW (CST); \
+ bc_emit_bytecode_const ((char *) &opcode, sizeof opcode); \
+}
+
+#else
+
+#define bc_emit_bytecode_DI_const(CST) \
+{ int opcode; \
+ opcode = TREE_INT_CST_LOW (CST); \
+ bc_emit_bytecode_const ((char *) &opcode, sizeof opcode); \
+ opcode = TREE_INT_CST_HIGH (CST); \
+ bc_emit_bytecode_const ((char *) &opcode, sizeof opcode); \
+}
+
+#endif
+
+
+extern void bc_expand_expr ();
+extern void bc_output_data_constructor ();
+extern void bc_store_field ();
+extern void bc_load_bit_field ();
+extern void bc_store_bit_field ();
+extern void bc_push_offset_and_size ();
+extern void bc_init_mode_to_code_map ();
+
+/* These are just stubs, so the compiler will compile for targets
+ that aren't yet supported by the bytecode generator. */
+
+#ifndef TARGET_SUPPORTS_BYTECODE
+
+#define MACHINE_SEG_ALIGN 1
+#define INT_ALIGN 1
+#define PTR_ALIGN 1
+#define NAMES_HAVE_UNDERSCORES
+#define BC_NOP (0)
+#define BC_GLOBALIZE_LABEL(FP, NAME) BC_NOP
+#define BC_OUTPUT_COMMON(FP, NAME, SIZE, ROUNDED) BC_NOP
+#define BC_OUTPUT_LOCAL(FP, NAME, SIZE, ROUNDED) BC_NOP
+#define BC_OUTPUT_ALIGN(FP, ALIGN) BC_NOP
+#define BC_OUTPUT_LABEL(FP, NAME) BC_NOP
+#define BC_OUTPUT_SKIP(FP, SIZE) BC_NOP
+#define BC_OUTPUT_LABELREF(FP, NAME) BC_NOP
+#define BC_OUTPUT_FLOAT(FP, VAL) BC_NOP
+#define BC_OUTPUT_DOUBLE(FP, VAL) BC_NOP
+#define BC_OUTPUT_BYTE(FP, VAL) BC_NOP
+#define BC_OUTPUT_FILE ASM_OUTPUT_FILE
+#define BC_OUTPUT_ASCII ASM_OUTPUT_ASCII
+#define BC_OUTPUT_IDENT ASM_OUTPUT_IDENT
+#define BCXSTR(RTX) ((RTX)->bc_label)
+#define BC_WRITE_FILE(FP) BC_NOP
+#define BC_WRITE_SEGSYM(SEGSYM, FP) BC_NOP
+#define BC_WRITE_RELOC_ENTRY(SEGRELOC, FP, OFFSET) BC_NOP
+#define BC_START_BYTECODE_LINE(FP) BC_NOP
+#define BC_WRITE_BYTECODE(SEP, VAL, FP) BC_NOP
+#define BC_WRITE_RTL(R, FP) BC_NOP
+#define BC_EMIT_TRAMPOLINE(TRAMPSEG, CALLINFO) BC_NOP
+#define VALIDATE_STACK BC_NOP
+
+#endif /* !TARGET_SUPPORTS_BYTECODE */
diff --git a/gnu/usr.bin/cc/include/bytetypes.h b/gnu/usr.bin/cc/include/bytetypes.h
new file mode 100644
index 0000000..f915669
--- /dev/null
+++ b/gnu/usr.bin/cc/include/bytetypes.h
@@ -0,0 +1,35 @@
+/* These should come from genemit */
+
+/* Use __signed__ in case compiling with -traditional. */
+
+typedef __signed__ char QItype;
+typedef unsigned char QUtype;
+typedef __signed__ short int HItype;
+typedef unsigned short int HUtype;
+typedef __signed__ long int SItype;
+typedef unsigned long int SUtype;
+typedef __signed__ long long int DItype;
+typedef unsigned long long int DUtype;
+typedef float SFtype;
+typedef double DFtype;
+typedef long double XFtype;
+typedef char *Ptype;
+typedef int Ttype;
+
+
+typedef union stacktype
+{
+ QItype QIval;
+ QUtype QUval;
+ HItype HIval;
+ HUtype HUval;
+ SItype SIval;
+ SUtype SUval;
+ DItype DIval;
+ DUtype DUval;
+ SFtype SFval;
+ DFtype DFval;
+ XFtype XFval;
+ Ptype Pval;
+ Ttype Tval;
+} stacktype;
diff --git a/gnu/usr.bin/cc/include/c-gperf.h b/gnu/usr.bin/cc/include/c-gperf.h
new file mode 100644
index 0000000..edaaf22
--- /dev/null
+++ b/gnu/usr.bin/cc/include/c-gperf.h
@@ -0,0 +1,184 @@
+/* C code produced by gperf version 2.5 (GNU C++ version) */
+/* Command-line: gperf -p -j1 -i 1 -g -o -t -G -N is_reserved_word -k1,3,$ c-parse.gperf */
+struct resword { char *name; short token; enum rid rid; };
+
+#define TOTAL_KEYWORDS 79
+#define MIN_WORD_LENGTH 2
+#define MAX_WORD_LENGTH 20
+#define MIN_HASH_VALUE 10
+#define MAX_HASH_VALUE 144
+/* maximum key range = 135, duplicates = 0 */
+
+#ifdef __GNUC__
+__inline
+#endif
+static unsigned int
+hash (str, len)
+ register char *str;
+ register int unsigned len;
+{
+ static unsigned char asso_values[] =
+ {
+ 145, 145, 145, 145, 145, 145, 145, 145, 145, 145,
+ 145, 145, 145, 145, 145, 145, 145, 145, 145, 145,
+ 145, 145, 145, 145, 145, 145, 145, 145, 145, 145,
+ 145, 145, 145, 145, 145, 145, 145, 145, 145, 145,
+ 145, 145, 145, 145, 145, 145, 145, 145, 145, 145,
+ 145, 145, 145, 145, 145, 145, 145, 145, 145, 145,
+ 145, 145, 145, 145, 25, 145, 145, 145, 145, 145,
+ 145, 145, 145, 145, 145, 145, 145, 145, 145, 145,
+ 145, 145, 145, 145, 145, 145, 145, 145, 145, 145,
+ 145, 145, 145, 145, 145, 1, 145, 46, 8, 15,
+ 61, 6, 36, 48, 3, 5, 145, 18, 63, 25,
+ 29, 76, 1, 145, 13, 2, 1, 51, 37, 9,
+ 9, 1, 3, 145, 145, 145, 145, 145,
+ };
+ register int hval = len;
+
+ switch (hval)
+ {
+ default:
+ case 3:
+ hval += asso_values[str[2]];
+ case 2:
+ case 1:
+ hval += asso_values[str[0]];
+ }
+ return hval + asso_values[str[len - 1]];
+}
+
+static struct resword wordlist[] =
+{
+ {"",}, {"",}, {"",}, {"",}, {"",}, {"",}, {"",}, {"",}, {"",},
+ {"",},
+ {"int", TYPESPEC, RID_INT},
+ {"",}, {"",},
+ {"__typeof__", TYPEOF, NORID},
+ {"__signed__", TYPESPEC, RID_SIGNED},
+ {"__imag__", IMAGPART, NORID},
+ {"switch", SWITCH, NORID},
+ {"__inline__", SCSPEC, RID_INLINE},
+ {"else", ELSE, NORID},
+ {"__iterator__", SCSPEC, RID_ITERATOR},
+ {"__inline", SCSPEC, RID_INLINE},
+ {"__extension__", EXTENSION, NORID},
+ {"struct", STRUCT, NORID},
+ {"__real__", REALPART, NORID},
+ {"__const", TYPE_QUAL, RID_CONST},
+ {"while", WHILE, NORID},
+ {"__const__", TYPE_QUAL, RID_CONST},
+ {"case", CASE, NORID},
+ {"__complex__", TYPESPEC, RID_COMPLEX},
+ {"__iterator", SCSPEC, RID_ITERATOR},
+ {"bycopy", TYPE_QUAL, RID_BYCOPY},
+ {"",}, {"",}, {"",},
+ {"__complex", TYPESPEC, RID_COMPLEX},
+ {"",},
+ {"in", TYPE_QUAL, RID_IN},
+ {"break", BREAK, NORID},
+ {"@defs", DEFS, NORID},
+ {"",}, {"",}, {"",},
+ {"extern", SCSPEC, RID_EXTERN},
+ {"if", IF, NORID},
+ {"typeof", TYPEOF, NORID},
+ {"typedef", SCSPEC, RID_TYPEDEF},
+ {"__typeof", TYPEOF, NORID},
+ {"sizeof", SIZEOF, NORID},
+ {"",},
+ {"return", RETURN, NORID},
+ {"const", TYPE_QUAL, RID_CONST},
+ {"__volatile__", TYPE_QUAL, RID_VOLATILE},
+ {"@private", PRIVATE, NORID},
+ {"@selector", SELECTOR, NORID},
+ {"__volatile", TYPE_QUAL, RID_VOLATILE},
+ {"__asm__", ASM_KEYWORD, NORID},
+ {"",}, {"",},
+ {"continue", CONTINUE, NORID},
+ {"__alignof__", ALIGNOF, NORID},
+ {"__imag", IMAGPART, NORID},
+ {"__attribute__", ATTRIBUTE, NORID},
+ {"",}, {"",},
+ {"__attribute", ATTRIBUTE, NORID},
+ {"for", FOR, NORID},
+ {"",},
+ {"@encode", ENCODE, NORID},
+ {"id", OBJECTNAME, RID_ID},
+ {"static", SCSPEC, RID_STATIC},
+ {"@interface", INTERFACE, NORID},
+ {"",},
+ {"__signed", TYPESPEC, RID_SIGNED},
+ {"",},
+ {"__label__", LABEL, NORID},
+ {"",}, {"",},
+ {"__asm", ASM_KEYWORD, NORID},
+ {"char", TYPESPEC, RID_CHAR},
+ {"",},
+ {"inline", SCSPEC, RID_INLINE},
+ {"out", TYPE_QUAL, RID_OUT},
+ {"register", SCSPEC, RID_REGISTER},
+ {"__real", REALPART, NORID},
+ {"short", TYPESPEC, RID_SHORT},
+ {"",},
+ {"enum", ENUM, NORID},
+ {"inout", TYPE_QUAL, RID_INOUT},
+ {"",},
+ {"oneway", TYPE_QUAL, RID_ONEWAY},
+ {"union", UNION, NORID},
+ {"",},
+ {"__alignof", ALIGNOF, NORID},
+ {"",},
+ {"@implementation", IMPLEMENTATION, NORID},
+ {"",},
+ {"@class", CLASS, NORID},
+ {"",},
+ {"@public", PUBLIC, NORID},
+ {"asm", ASM_KEYWORD, NORID},
+ {"",}, {"",}, {"",}, {"",}, {"",},
+ {"default", DEFAULT, NORID},
+ {"",},
+ {"void", TYPESPEC, RID_VOID},
+ {"",},
+ {"@protected", PROTECTED, NORID},
+ {"@protocol", PROTOCOL, NORID},
+ {"",}, {"",}, {"",},
+ {"volatile", TYPE_QUAL, RID_VOLATILE},
+ {"",}, {"",},
+ {"signed", TYPESPEC, RID_SIGNED},
+ {"float", TYPESPEC, RID_FLOAT},
+ {"@end", END, NORID},
+ {"",}, {"",},
+ {"unsigned", TYPESPEC, RID_UNSIGNED},
+ {"@compatibility_alias", ALIAS, NORID},
+ {"double", TYPESPEC, RID_DOUBLE},
+ {"",}, {"",},
+ {"auto", SCSPEC, RID_AUTO},
+ {"",},
+ {"goto", GOTO, NORID},
+ {"",}, {"",}, {"",}, {"",}, {"",}, {"",}, {"",}, {"",}, {"",},
+ {"do", DO, NORID},
+ {"",}, {"",}, {"",}, {"",},
+ {"long", TYPESPEC, RID_LONG},
+};
+
+#ifdef __GNUC__
+__inline
+#endif
+struct resword *
+is_reserved_word (str, len)
+ register char *str;
+ register unsigned int len;
+{
+ if (len <= MAX_WORD_LENGTH && len >= MIN_WORD_LENGTH)
+ {
+ register int key = hash (str, len);
+
+ if (key <= MAX_HASH_VALUE && key >= 0)
+ {
+ register char *s = wordlist[key].name;
+
+ if (*s == *str && !strcmp (str + 1, s + 1))
+ return &wordlist[key];
+ }
+ }
+ return 0;
+}
diff --git a/gnu/usr.bin/cc/include/c-lex.h b/gnu/usr.bin/cc/include/c-lex.h
new file mode 100644
index 0000000..ae67d4c
--- /dev/null
+++ b/gnu/usr.bin/cc/include/c-lex.h
@@ -0,0 +1,79 @@
+/* Define constants for communication with c-parse.y.
+ Copyright (C) 1987, 1992 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+
+enum rid
+{
+ RID_UNUSED,
+ RID_INT,
+ RID_CHAR,
+ RID_FLOAT,
+ RID_DOUBLE,
+ RID_VOID,
+ RID_UNUSED1,
+
+ RID_UNSIGNED,
+ RID_SHORT,
+ RID_LONG,
+ RID_AUTO,
+ RID_STATIC,
+ RID_EXTERN,
+ RID_REGISTER,
+ RID_TYPEDEF,
+ RID_SIGNED,
+ RID_CONST,
+ RID_VOLATILE,
+ RID_INLINE,
+ RID_NOALIAS,
+ RID_ITERATOR,
+ RID_COMPLEX,
+
+ RID_IN,
+ RID_OUT,
+ RID_INOUT,
+ RID_BYCOPY,
+ RID_ONEWAY,
+ RID_ID,
+
+ RID_MAX
+};
+
+#define NORID RID_UNUSED
+
+#define RID_FIRST_MODIFIER RID_UNSIGNED
+
+/* The elements of `ridpointers' are identifier nodes
+ for the reserved type names and storage classes.
+ It is indexed by a RID_... value. */
+extern tree ridpointers[(int) RID_MAX];
+
+/* the declaration found for the last IDENTIFIER token read in.
+ yylex must look this up to detect typedefs, which get token type TYPENAME,
+ so it is left around in case the identifier is not a typedef but is
+ used in a context which makes it a reference to a variable. */
+extern tree lastiddecl;
+
+extern char *token_buffer; /* Pointer to token buffer. */
+
+extern tree make_pointer_declarator ();
+extern void reinit_parse_for_function ();
+extern int yylex ();
+
+extern char *get_directive_line ();
diff --git a/gnu/usr.bin/cc/include/c-parse.h b/gnu/usr.bin/cc/include/c-parse.h
new file mode 100644
index 0000000..dab903e
--- /dev/null
+++ b/gnu/usr.bin/cc/include/c-parse.h
@@ -0,0 +1,65 @@
+typedef union {long itype; tree ttype; enum tree_code code;
+ char *filename; int lineno; } YYSTYPE;
+#define IDENTIFIER 258
+#define TYPENAME 259
+#define SCSPEC 260
+#define TYPESPEC 261
+#define TYPE_QUAL 262
+#define CONSTANT 263
+#define STRING 264
+#define ELLIPSIS 265
+#define SIZEOF 266
+#define ENUM 267
+#define STRUCT 268
+#define UNION 269
+#define IF 270
+#define ELSE 271
+#define WHILE 272
+#define DO 273
+#define FOR 274
+#define SWITCH 275
+#define CASE 276
+#define DEFAULT 277
+#define BREAK 278
+#define CONTINUE 279
+#define RETURN 280
+#define GOTO 281
+#define ASM_KEYWORD 282
+#define TYPEOF 283
+#define ALIGNOF 284
+#define ALIGN 285
+#define ATTRIBUTE 286
+#define EXTENSION 287
+#define LABEL 288
+#define REALPART 289
+#define IMAGPART 290
+#define ASSIGN 291
+#define OROR 292
+#define ANDAND 293
+#define EQCOMPARE 294
+#define ARITHCOMPARE 295
+#define LSHIFT 296
+#define RSHIFT 297
+#define UNARY 298
+#define PLUSPLUS 299
+#define MINUSMINUS 300
+#define HYPERUNARY 301
+#define POINTSAT 302
+#define INTERFACE 303
+#define IMPLEMENTATION 304
+#define END 305
+#define SELECTOR 306
+#define DEFS 307
+#define ENCODE 308
+#define CLASSNAME 309
+#define PUBLIC 310
+#define PRIVATE 311
+#define PROTECTED 312
+#define PROTOCOL 313
+#define OBJECTNAME 314
+#define CLASS 315
+#define ALIAS 316
+#define OBJC_STRING 317
+
+
+extern YYSTYPE yylval;
diff --git a/gnu/usr.bin/cc/include/c-tree.h b/gnu/usr.bin/cc/include/c-tree.h
new file mode 100644
index 0000000..2300351
--- /dev/null
+++ b/gnu/usr.bin/cc/include/c-tree.h
@@ -0,0 +1,483 @@
+/* Definitions for C parsing and type checking.
+ Copyright (C) 1987, 1993, 1994 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+#ifndef _C_TREE_H
+#define _C_TREE_H
+
+/* Language-dependent contents of an identifier. */
+
+/* The limbo_value is used for block level extern declarations, which need
+ to be type checked against subsequent extern declarations. They can't
+ be referenced after they fall out of scope, so they can't be global. */
+
+struct lang_identifier
+{
+ struct tree_identifier ignore;
+ tree global_value, local_value, label_value, implicit_decl;
+ tree error_locus, limbo_value;
+};
+
+/* Macros for access to language-specific slots in an identifier. */
+/* Each of these slots contains a DECL node or null. */
+
+/* This represents the value which the identifier has in the
+ file-scope namespace. */
+#define IDENTIFIER_GLOBAL_VALUE(NODE) \
+ (((struct lang_identifier *)(NODE))->global_value)
+/* This represents the value which the identifier has in the current
+ scope. */
+#define IDENTIFIER_LOCAL_VALUE(NODE) \
+ (((struct lang_identifier *)(NODE))->local_value)
+/* This represents the value which the identifier has as a label in
+ the current label scope. */
+#define IDENTIFIER_LABEL_VALUE(NODE) \
+ (((struct lang_identifier *)(NODE))->label_value)
+/* This records the extern decl of this identifier, if it has had one
+ at any point in this compilation. */
+#define IDENTIFIER_LIMBO_VALUE(NODE) \
+ (((struct lang_identifier *)(NODE))->limbo_value)
+/* This records the implicit function decl of this identifier, if it
+ has had one at any point in this compilation. */
+#define IDENTIFIER_IMPLICIT_DECL(NODE) \
+ (((struct lang_identifier *)(NODE))->implicit_decl)
+/* This is the last function in which we printed an "undefined variable"
+ message for this identifier. Value is a FUNCTION_DECL or null. */
+#define IDENTIFIER_ERROR_LOCUS(NODE) \
+ (((struct lang_identifier *)(NODE))->error_locus)
+
+/* In identifiers, C uses the following fields in a special way:
+ TREE_PUBLIC to record that there was a previous local extern decl.
+ TREE_USED to record that such a decl was used.
+ TREE_ADDRESSABLE to record that the address of such a decl was used. */
+
+/* Nonzero means reject anything that ANSI standard C forbids. */
+extern int pedantic;
+
+/* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is read-only. */
+#define C_TYPE_FIELDS_READONLY(type) TREE_LANG_FLAG_1 (type)
+
+/* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is volatile. */
+#define C_TYPE_FIELDS_VOLATILE(type) TREE_LANG_FLAG_2 (type)
+
+/* In a RECORD_TYPE or UNION_TYPE or ENUMERAL_TYPE
+ nonzero if the definition of the type has already started. */
+#define C_TYPE_BEING_DEFINED(type) TYPE_LANG_FLAG_0 (type)
+
+/* In a RECORD_TYPE, a sorted array of the fields of the type. */
+struct lang_type
+{
+ int len;
+ tree elts[1];
+};
+
+/* Mark which labels are explicitly declared.
+ These may be shadowed, and may be referenced from nested functions. */
+#define C_DECLARED_LABEL_FLAG(label) TREE_LANG_FLAG_1 (label)
+
+/* Record whether a type or decl was written with nonconstant size.
+ Note that TYPE_SIZE may have simplified to a constant. */
+#define C_TYPE_VARIABLE_SIZE(type) TYPE_LANG_FLAG_1 (type)
+#define C_DECL_VARIABLE_SIZE(type) DECL_LANG_FLAG_0 (type)
+
+/* Record in each node resulting from a binary operator
+ what operator was specified for it. */
+#define C_EXP_ORIGINAL_CODE(exp) ((enum tree_code) TREE_COMPLEXITY (exp))
+
+#if 0 /* Not used. */
+/* Record whether a decl for a function or function pointer has
+ already been mentioned (in a warning) because it was called
+ but didn't have a prototype. */
+#define C_MISSING_PROTOTYPE_WARNED(decl) DECL_LANG_FLAG_2(decl)
+#endif
+
+/* Store a value in that field. */
+#define C_SET_EXP_ORIGINAL_CODE(exp, code) \
+ (TREE_COMPLEXITY (exp) = (int)(code))
+
+/* Record whether a typedef for type `int' was actually `signed int'. */
+#define C_TYPEDEF_EXPLICITLY_SIGNED(exp) DECL_LANG_FLAG_1 ((exp))
+
+/* Nonzero for a declaration of a built in function if there has been no
+ occasion that would declare the function in ordinary C.
+ Using the function draws a pedantic warning in this case. */
+#define C_DECL_ANTICIPATED(exp) DECL_LANG_FLAG_3 ((exp))
+
+/* For FUNCTION_TYPE, a hidden list of types of arguments. The same as
+ TYPE_ARG_TYPES for functions with prototypes, but created for functions
+ without prototypes. */
+#define TYPE_ACTUAL_ARG_TYPES(NODE) TYPE_NONCOPIED_PARTS (NODE)
+
+/* Nonzero if the type T promotes to itself.
+ ANSI C states explicitly the list of types that promote;
+ in particular, short promotes to int even if they have the same width. */
+#define C_PROMOTING_INTEGER_TYPE_P(t) \
+ (TREE_CODE ((t)) == INTEGER_TYPE \
+ && (TYPE_MAIN_VARIANT (t) == char_type_node \
+ || TYPE_MAIN_VARIANT (t) == signed_char_type_node \
+ || TYPE_MAIN_VARIANT (t) == unsigned_char_type_node \
+ || TYPE_MAIN_VARIANT (t) == short_integer_type_node \
+ || TYPE_MAIN_VARIANT (t) == short_unsigned_type_node))
+
+/* In a VAR_DECL, means the variable is really an iterator. */
+#define ITERATOR_P(D) (DECL_LANG_FLAG_4(D))
+
+/* In a VAR_DECL for an iterator, means we are within
+ an explicit loop over that iterator. */
+#define ITERATOR_BOUND_P(NODE) ((NODE)->common.readonly_flag)
+
+/* in c-lang.c and objc-act.c */
+extern tree lookup_interface PROTO((tree));
+extern tree is_class_name PROTO((tree));
+extern void maybe_objc_check_decl PROTO((tree));
+extern int maybe_objc_comptypes PROTO((tree, tree, int));
+extern tree maybe_building_objc_message_expr PROTO((void));
+extern tree maybe_objc_method_name PROTO((tree));
+extern int recognize_objc_keyword PROTO((void));
+extern tree build_objc_string PROTO((int, char *));
+
+/* in c-aux-info.c */
+extern void gen_aux_info_record PROTO((tree, int, int, int));
+
+/* in c-common.c */
+extern void declare_function_name PROTO((void));
+extern void decl_attributes PROTO((tree, tree));
+extern void init_function_format_info PROTO((void));
+extern void record_function_format PROTO((tree, tree, int, int, int));
+extern void check_function_format PROTO((tree, tree, tree));
+/* Print an error message for invalid operands to arith operation CODE.
+ NOP_EXPR is used as a special case (see truthvalue_conversion). */
+extern void binary_op_error PROTO((enum tree_code));
+extern void c_expand_expr_stmt PROTO((tree));
+/* Validate the expression after `case' and apply default promotions. */
+extern tree check_case_value PROTO((tree));
+/* Concatenate a list of STRING_CST nodes into one STRING_CST. */
+extern tree combine_strings PROTO((tree));
+extern void constant_expression_warning PROTO((tree));
+extern tree convert_and_check PROTO((tree, tree));
+extern void overflow_warning PROTO((tree));
+extern void unsigned_conversion_warning PROTO((tree, tree));
+/* Read the rest of the current #-directive line. */
+extern char *get_directive_line STDIO_PROTO((FILE *));
+/* Subroutine of build_binary_op, used for comparison operations.
+ See if the operands have both been converted from subword integer types
+ and, if so, perhaps change them both back to their original type. */
+extern tree shorten_compare PROTO((tree *, tree *, tree *, enum tree_code *));
+/* Prepare expr to be an argument of a TRUTH_NOT_EXPR,
+ or validate its data type for an `if' or `while' statement or ?..: exp. */
+extern tree truthvalue_conversion PROTO((tree));
+extern tree type_for_mode PROTO((enum machine_mode, int));
+extern tree type_for_size PROTO((unsigned, int));
+
+/* in c-convert.c */
+extern tree convert PROTO((tree, tree));
+
+/* in c-decl.c */
+/* Standard named or nameless data types of the C compiler. */
+extern tree char_array_type_node;
+extern tree char_type_node;
+extern tree const_ptr_type_node;
+extern tree const_string_type_node;
+extern tree default_function_type;
+extern tree double_ftype_double;
+extern tree double_ftype_double_double;
+extern tree double_type_node;
+extern tree float_type_node;
+extern tree intDI_type_node;
+extern tree intHI_type_node;
+extern tree intQI_type_node;
+extern tree intSI_type_node;
+extern tree int_array_type_node;
+extern tree int_ftype_cptr_cptr_sizet;
+extern tree int_ftype_int;
+extern tree int_ftype_ptr_ptr_int;
+extern tree int_ftype_string_string;
+extern tree integer_type_node;
+extern tree long_double_type_node;
+extern tree long_ftype_long;
+extern tree long_integer_type_node;
+extern tree long_long_integer_type_node;
+extern tree long_long_unsigned_type_node;
+extern tree long_unsigned_type_node;
+extern tree complex_integer_type_node;
+extern tree complex_float_type_node;
+extern tree complex_double_type_node;
+extern tree complex_long_double_type_node;
+extern tree ptr_type_node;
+extern tree ptrdiff_type_node;
+extern tree short_integer_type_node;
+extern tree short_unsigned_type_node;
+extern tree signed_char_type_node;
+extern tree signed_wchar_type_node;
+extern tree string_ftype_ptr_ptr;
+extern tree string_type_node;
+extern tree unsigned_char_type_node;
+extern tree unsigned_intDI_type_node;
+extern tree unsigned_intHI_type_node;
+extern tree unsigned_intQI_type_node;
+extern tree unsigned_intSI_type_node;
+extern tree unsigned_type_node;
+extern tree unsigned_wchar_type_node;
+extern tree void_ftype_ptr_int_int;
+extern tree void_ftype_ptr_ptr_int;
+extern tree void_type_node;
+extern tree wchar_array_type_node;
+extern tree wchar_type_node;
+
+extern tree build_enumerator PROTO((tree, tree));
+/* Declare a predefined function. Return the declaration. */
+extern tree builtin_function PROTO((char *, tree, enum built_in_function function_, char *));
+/* Add qualifiers to a type, in the fashion for C. */
+extern tree c_build_type_variant PROTO((tree, int, int));
+extern int c_decode_option PROTO((char *));
+extern void c_mark_varargs PROTO((void));
+extern tree check_identifier PROTO((tree, tree));
+extern void clear_parm_order PROTO((void));
+extern tree combine_parm_decls PROTO((tree, tree, int));
+extern int complete_array_type PROTO((tree, tree, int));
+extern void declare_parm_level PROTO((int));
+extern tree define_label PROTO((char *, int, tree));
+extern void delete_block PROTO((tree));
+extern void finish_decl PROTO((tree, tree, tree));
+extern tree finish_enum PROTO((tree, tree));
+extern void finish_function PROTO((int));
+extern tree finish_struct PROTO((tree, tree));
+extern tree get_parm_info PROTO((int));
+extern tree getdecls PROTO((void));
+extern tree gettags PROTO((void));
+extern int global_bindings_p PROTO((void));
+extern tree grokfield PROTO((char *, int, tree, tree, tree));
+extern tree groktypename PROTO((tree));
+extern tree groktypename_in_parm_context PROTO((tree));
+extern tree implicitly_declare PROTO((tree));
+extern int in_parm_level_p PROTO((void));
+extern void init_decl_processing PROTO((void));
+extern void insert_block PROTO((tree));
+extern void keep_next_level PROTO((void));
+extern int kept_level_p PROTO((void));
+extern tree lookup_label PROTO((tree));
+extern tree lookup_name PROTO((tree));
+extern tree lookup_name_current_level PROTO((tree));
+extern tree lookup_name_current_level_global PROTO((tree));
+extern tree maybe_build_cleanup PROTO((tree));
+extern void parmlist_tags_warning PROTO((void));
+extern void pending_xref_error PROTO((void));
+extern void pop_c_function_context PROTO((void));
+extern void pop_label_level PROTO((void));
+extern tree poplevel PROTO((int, int, int));
+extern void print_lang_decl STDIO_PROTO((FILE *, tree,
+ int));
+extern void print_lang_identifier STDIO_PROTO((FILE *, tree,
+ int));
+extern void print_lang_type STDIO_PROTO((FILE *, tree,
+ int));
+extern void push_c_function_context PROTO((void));
+extern void push_label_level PROTO((void));
+extern void push_parm_decl PROTO((tree));
+extern tree pushdecl PROTO((tree));
+extern tree pushdecl_top_level PROTO((tree));
+extern void pushlevel PROTO((int));
+extern void pushtag PROTO((tree, tree));
+extern void set_block PROTO((tree));
+extern tree shadow_label PROTO((tree));
+extern void shadow_record_fields PROTO((tree));
+extern void shadow_tag PROTO((tree));
+extern void shadow_tag_warned PROTO((tree, int));
+extern tree start_enum PROTO((tree));
+extern int start_function PROTO((tree, tree, int));
+extern tree start_decl PROTO((tree, tree, int));
+extern tree start_struct PROTO((enum tree_code, tree));
+extern void store_parm_decls PROTO((void));
+extern tree xref_tag PROTO((enum tree_code, tree));
+
+/* in c-typeck.c */
+extern tree require_complete_type PROTO((tree));
+extern void incomplete_type_error PROTO((tree, tree));
+/* Given two integer or real types, return the type for their sum.
+ Given two compatible ANSI C types, returns the merged type. */
+extern tree common_type PROTO((tree, tree));
+extern int comptypes PROTO((tree, tree));
+extern int self_promoting_args_p PROTO((tree));
+extern tree c_sizeof PROTO((tree));
+extern tree c_sizeof_nowarn PROTO((tree));
+extern tree c_size_in_bytes PROTO((tree));
+extern tree c_alignof PROTO((tree));
+extern tree c_alignof_expr PROTO((tree));
+extern tree default_conversion PROTO((tree));
+extern tree build_component_ref PROTO((tree, tree));
+extern tree build_indirect_ref PROTO((tree, char *));
+extern tree build_array_ref PROTO((tree, tree));
+extern tree build_function_call PROTO((tree, tree));
+extern tree parser_build_binary_op PROTO((enum tree_code,
+ tree, tree));
+extern tree build_binary_op PROTO((enum tree_code,
+ tree, tree, int));
+extern tree build_unary_op PROTO((enum tree_code,
+ tree, int));
+extern int lvalue_p PROTO((tree));
+extern int lvalue_or_else PROTO((tree, char *));
+extern void readonly_warning PROTO((tree, char *));
+extern int mark_addressable PROTO((tree));
+extern tree build_conditional_expr PROTO((tree, tree, tree));
+extern tree build_compound_expr PROTO((tree));
+extern tree build_c_cast PROTO((tree, tree));
+extern tree build_modify_expr PROTO((tree, enum tree_code,
+ tree));
+extern tree initializer_constant_valid_p PROTO((tree, tree));
+extern void store_init_value PROTO((tree, tree));
+extern void error_init PROTO((char *, char *,
+ char *));
+extern void pedwarn_init PROTO((char *, char *,
+ char *));
+extern void start_init PROTO((tree, tree, int));
+extern void finish_init PROTO((void));
+extern void really_start_incremental_init PROTO((tree));
+extern void push_init_level PROTO((int));
+extern tree pop_init_level PROTO((int));
+extern void set_init_index PROTO((tree, tree));
+extern void set_init_label PROTO((tree));
+extern void process_init_element PROTO((tree));
+extern void c_expand_asm_operands PROTO((tree, tree, tree, tree,
+ int, char *, int));
+extern void c_expand_return PROTO((tree));
+extern tree c_expand_start_case PROTO((tree));
+
+/* in c-iterate.c */
+extern void iterator_expand PROTO((tree));
+extern void iterator_for_loop_start PROTO((tree));
+extern void iterator_for_loop_end PROTO((tree));
+extern void iterator_for_loop_record PROTO((tree));
+extern void push_iterator_stack PROTO((void));
+extern void pop_iterator_stack PROTO((void));
+
+/* Set to 0 at beginning of a function definition, set to 1 if
+ a return statement that specifies a return value is seen. */
+
+extern int current_function_returns_value;
+
+/* Set to 0 at beginning of a function definition, set to 1 if
+ a return statement with no argument is seen. */
+
+extern int current_function_returns_null;
+
+/* Nonzero means `$' can be in an identifier. */
+
+extern int dollars_in_ident;
+
+/* Nonzero means allow type mismatches in conditional expressions;
+ just make their values `void'. */
+
+extern int flag_cond_mismatch;
+
+/* Nonzero means don't recognize the keyword `asm'. */
+
+extern int flag_no_asm;
+
+/* Nonzero means ignore `#ident' directives. */
+
+extern int flag_no_ident;
+
+/* Nonzero means warn about implicit declarations. */
+
+extern int warn_implicit;
+
+/* Nonzero means give string constants the type `const char *'
+ to get extra warnings from them. These warnings will be too numerous
+ to be useful, except in thoroughly ANSIfied programs. */
+
+extern int warn_write_strings;
+
+/* Nonzero means warn about sizeof (function) or addition/subtraction
+ of function pointers. */
+
+extern int warn_pointer_arith;
+
+/* Nonzero means warn for all old-style non-prototype function decls. */
+
+extern int warn_strict_prototypes;
+
+/* Nonzero means warn about multiple (redundant) decls for the same single
+ variable or function. */
+
+extern int warn_redundant_decls;
+
+/* Nonzero means warn about extern declarations of objects not at
+ file-scope level and about *all* declarations of functions (whether
+ extern or static) not at file-scope level. Note that we exclude
+ implicit function declarations. To get warnings about those, use
+ -Wimplicit. */
+
+extern int warn_nested_externs;
+
+/* Nonzero means warn about pointer casts that can drop a type qualifier
+ from the pointer target type. */
+
+extern int warn_cast_qual;
+
+/* Nonzero means warn when casting a function call to a type that does
+ not match the return type (e.g. (float)sqrt() or (anything*)malloc()
+ when there is no previous declaration of sqrt or malloc. */
+
+extern int warn_bad_function_cast;
+
+/* Warn about traditional constructs whose meanings changed in ANSI C. */
+
+extern int warn_traditional;
+
+/* Warn about *printf or *scanf format/argument anomalies. */
+
+extern int warn_format;
+
+/* Warn about a subscript that has type char. */
+
+extern int warn_char_subscripts;
+
+/* Warn if a type conversion is done that might have confusing results. */
+
+extern int warn_conversion;
+
+/* Nonzero means do some things the same way PCC does. */
+
+extern int flag_traditional;
+
+/* Nonzero means to allow single precision math even if we're generally
+ being traditional. */
+extern int flag_allow_single_precision;
+
+/* Nonzero means warn about suggesting putting in ()'s. */
+
+extern int warn_parentheses;
+
+/* Warn if initializer is not completely bracketed. */
+
+extern int warn_missing_braces;
+
+/* Nonzero means this is a function to call to perform comptypes
+ on two record types. */
+
+extern int (*comptypes_record_hook) ();
+
+/* Nonzero means we are reading code that came from a system header file. */
+
+extern int system_header_p;
+
+/* Nonzero enables objc features. */
+
+extern int doing_objc_thang;
+
+#endif /* not _C_TREE_H */
diff --git a/gnu/usr.bin/cc/include/conditions.h b/gnu/usr.bin/cc/include/conditions.h
new file mode 100644
index 0000000..e7319377
--- /dev/null
+++ b/gnu/usr.bin/cc/include/conditions.h
@@ -0,0 +1,115 @@
+/* Definitions for condition code handling in final.c and output routines.
+ Copyright (C) 1987 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+/* None of the things in the files exist if we don't use CC0. */
+
+#ifdef HAVE_cc0
+
+/* The variable cc_status says how to interpret the condition code.
+ It is set by output routines for an instruction that sets the cc's
+ and examined by output routines for jump instructions.
+
+ cc_status contains two components named `value1' and `value2'
+ that record two equivalent expressions for the values that the
+ condition codes were set from. (Either or both may be null if
+ there is no useful expression to record.) These fields are
+ used for eliminating redundant test and compare instructions
+ in the cases where the condition codes were already set by the
+ previous instruction.
+
+ cc_status.flags contains flags which say that the condition codes
+ were set in a nonstandard manner. The output of jump instructions
+ uses these flags to compensate and produce the standard result
+ with the nonstandard condition codes. Standard flags are defined here.
+ The tm.h file can also define other machine-dependent flags.
+
+ cc_status also contains a machine-dependent component `mdep'
+ whose type, `CC_STATUS_MDEP', may be defined as a macro in the
+ tm.h file. */
+
+#ifndef CC_STATUS_MDEP
+#define CC_STATUS_MDEP int
+#endif
+
+#ifndef CC_STATUS_MDEP_INIT
+#define CC_STATUS_MDEP_INIT 0
+#endif
+
+typedef struct {int flags; rtx value1, value2; CC_STATUS_MDEP mdep;} CC_STATUS;
+
+/* While outputting an insn as assembler code,
+ this is the status BEFORE that insn. */
+extern CC_STATUS cc_prev_status;
+
+/* While outputting an insn as assembler code,
+ this is being altered to the status AFTER that insn. */
+extern CC_STATUS cc_status;
+
+/* These are the machine-independent flags: */
+
+/* Set if the sign of the cc value is inverted:
+ output a following jump-if-less as a jump-if-greater, etc. */
+#define CC_REVERSED 1
+
+/* This bit means that the current setting of the N bit is bogus
+ and conditional jumps should use the Z bit in its place.
+ This state obtains when an extraction of a signed single-bit field
+ or an arithmetic shift right of a byte by 7 bits
+ is turned into a btst, because btst does not set the N bit. */
+#define CC_NOT_POSITIVE 2
+
+/* This bit means that the current setting of the N bit is bogus
+ and conditional jumps should pretend that the N bit is clear.
+ Used after extraction of an unsigned bit
+ or logical shift right of a byte by 7 bits is turned into a btst.
+ The btst does not alter the N bit, but the result of that shift
+ or extract is never negative. */
+#define CC_NOT_NEGATIVE 4
+
+/* This bit means that the current setting of the overflow flag
+ is bogus and conditional jumps should pretend there is no overflow. */
+#define CC_NO_OVERFLOW 010
+
+/* This bit means that what ought to be in the Z bit
+ should be tested as the complement of the N bit. */
+#define CC_Z_IN_NOT_N 020
+
+/* This bit means that what ought to be in the Z bit
+ should be tested as the N bit. */
+#define CC_Z_IN_N 040
+
+/* Nonzero if we must invert the sense of the following branch, i.e.
+ change EQ to NE. This is not safe for IEEE floating point operations!
+ It is intended for use only when a combination of arithmetic
+ or logical insns can leave the condition codes set in a fortuitous
+ (though inverted) state. */
+#define CC_INVERTED 0100
+
+/* Nonzero if we must convert signed condition operators to unsigned.
+ This is only used by machine description files. */
+#define CC_NOT_SIGNED 0200
+
+/* This is how to initialize the variable cc_status.
+ final does this at appropriate moments. */
+
+#define CC_STATUS_INIT \
+ (cc_status.flags = 0, cc_status.value1 = 0, cc_status.value2 = 0, \
+ CC_STATUS_MDEP_INIT)
+
+#endif
diff --git a/gnu/usr.bin/cc/include/config.h b/gnu/usr.bin/cc/include/config.h
new file mode 100644
index 0000000..7886724
--- /dev/null
+++ b/gnu/usr.bin/cc/include/config.h
@@ -0,0 +1,42 @@
+/* Configuration for GNU C-compiler for Intel 80386.
+ Copyright (C) 1988, 1993 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+#ifndef i386
+#define i386
+#endif
+
+/* #defines that need visibility everywhere. */
+#define FALSE 0
+#define TRUE 1
+
+/* This describes the machine the compiler is hosted on. */
+#define HOST_BITS_PER_CHAR 8
+#define HOST_BITS_PER_SHORT 16
+#define HOST_BITS_PER_INT 32
+#define HOST_BITS_PER_LONG 32
+#define HOST_BITS_PER_LONGLONG 64
+
+/* Arguments to use with `exit'. */
+#define SUCCESS_EXIT_CODE 0
+#define FATAL_EXIT_CODE 33
+
+/* target machine dependencies.
+ tm.h is a symbolic link to the actual target specific file. */
+
+#include "tm.h"
diff --git a/gnu/usr.bin/cc/include/convert.h b/gnu/usr.bin/cc/include/convert.h
new file mode 100644
index 0000000..b2c8c79
--- /dev/null
+++ b/gnu/usr.bin/cc/include/convert.h
@@ -0,0 +1,23 @@
+/* Definition of functions in convert.c.
+ Copyright (C) 1993 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+extern tree convert_to_integer PROTO ((tree, tree));
+extern tree convert_to_pointer PROTO ((tree, tree));
+extern tree convert_to_real PROTO ((tree, tree));
+extern tree convert_to_complex PROTO ((tree, tree));
diff --git a/gnu/usr.bin/cc/include/defaults.h b/gnu/usr.bin/cc/include/defaults.h
new file mode 100644
index 0000000..df5ce1c
--- /dev/null
+++ b/gnu/usr.bin/cc/include/defaults.h
@@ -0,0 +1,133 @@
+/* Definitions of various defaults for how to do assembler output
+ (most of which are designed to be appropriate for GAS or for
+ some BSD assembler).
+
+ Written by Ron Guilmette (rfg@netcom.com)
+
+Copyright (C) 1992 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+/* Store in OUTPUT a string (made with alloca) containing
+ an assembler-name for a local static variable or function named NAME.
+ LABELNO is an integer which is different for each call. */
+
+#ifndef ASM_FORMAT_PRIVATE_NAME
+#define ASM_FORMAT_PRIVATE_NAME(OUTPUT, NAME, LABELNO) \
+ do { \
+ int len = strlen (NAME); \
+ char *temp = (char *) alloca (len + 3); \
+ temp[0] = 'L'; \
+ strcpy (&temp[1], (NAME)); \
+ temp[len + 1] = '.'; \
+ temp[len + 2] = 0; \
+ (OUTPUT) = (char *) alloca (strlen (NAME) + 11); \
+ ASM_GENERATE_INTERNAL_LABEL (OUTPUT, temp, LABELNO); \
+ } while (0)
+#endif
+
+#ifndef ASM_STABD_OP
+#define ASM_STABD_OP ".stabd"
+#endif
+
+/* This is how to output an element of a case-vector that is absolute.
+ Some targets don't use this, but we have to define it anyway. */
+
+#ifndef ASM_OUTPUT_ADDR_VEC_ELT
+#define ASM_OUTPUT_ADDR_VEC_ELT(FILE, VALUE) \
+do { fprintf (FILE, "\t%s\t", ASM_LONG); \
+ ASM_OUTPUT_INTERNAL_LABEL (FILE, "L", (VALUE)); \
+ fputc ('\n', FILE); \
+ } while (0)
+#endif
+
+/* This is how to output an element of a case-vector that is relative.
+ Some targets don't use this, but we have to define it anyway. */
+
+#ifndef ASM_OUTPUT_ADDR_DIFF_ELT
+#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, VALUE, REL) \
+do { fprintf (FILE, "\t%s\t", ASM_SHORT); \
+ ASM_OUTPUT_INTERNAL_LABEL (FILE, "L", (VALUE)); \
+ fputc ('-', FILE); \
+ ASM_OUTPUT_INTERNAL_LABEL (FILE, "L", (REL)); \
+ fputc ('\n', FILE); \
+ } while (0)
+#endif
+
+/* choose a reasonable default for ASM_OUTPUT_ASCII. */
+
+#ifndef ASM_OUTPUT_ASCII
+#define ASM_OUTPUT_ASCII(MYFILE, MYSTRING, MYLENGTH) \
+ do { \
+ FILE *_hide_asm_out_file = (MYFILE); \
+ unsigned char *_hide_p = (unsigned char *) (MYSTRING); \
+ int _hide_thissize = (MYLENGTH); \
+ { \
+ FILE *asm_out_file = _hide_asm_out_file; \
+ unsigned char *p = _hide_p; \
+ int thissize = _hide_thissize; \
+ int i; \
+ fprintf (asm_out_file, "\t.ascii \""); \
+ \
+ for (i = 0; i < thissize; i++) \
+ { \
+ register int c = p[i]; \
+ if (c == '\"' || c == '\\') \
+ putc ('\\', asm_out_file); \
+ if (c >= ' ' && c < 0177) \
+ putc (c, asm_out_file); \
+ else \
+ { \
+ fprintf (asm_out_file, "\\%o", c); \
+ /* After an octal-escape, if a digit follows, \
+ terminate one string constant and start another. \
+ The Vax assembler fails to stop reading the escape \
+ after three digits, so this is the only way we \
+ can get it to parse the data properly. */ \
+ if (i < thissize - 1 \
+ && p[i + 1] >= '0' && p[i + 1] <= '9') \
+ fprintf (asm_out_file, "\"\n\t.ascii \""); \
+ } \
+ } \
+ fprintf (asm_out_file, "\"\n"); \
+ } \
+ } \
+ while (0)
+#endif
+
+#ifndef ASM_IDENTIFY_GCC
+ /* Default the definition, only if ASM_IDENTIFY_GCC is not set,
+ because if it is set, we might not want ASM_IDENTIFY_LANGUAGE
+ outputting labels, if we do want it to, then it must be defined
+ in the tm.h file. */
+#ifndef ASM_IDENTIFY_LANGUAGE
+#define ASM_IDENTIFY_LANGUAGE(FILE) output_lang_identify (FILE);
+#endif
+#endif
+
+/* This is how we tell the assembler to equate two values. */
+#ifdef SET_ASM_OP
+#ifndef ASM_OUTPUT_DEF
+#define ASM_OUTPUT_DEF(FILE,LABEL1,LABEL2) \
+ do { fprintf ((FILE), "\t%s\t", SET_ASM_OP); \
+ assemble_name (FILE, LABEL1); \
+ fprintf (FILE, ","); \
+ assemble_name (FILE, LABEL2); \
+ fprintf (FILE, "\n"); \
+ } while (0)
+#endif
+#endif
diff --git a/gnu/usr.bin/cc/include/expr.h b/gnu/usr.bin/cc/include/expr.h
new file mode 100644
index 0000000..3bb9490
--- /dev/null
+++ b/gnu/usr.bin/cc/include/expr.h
@@ -0,0 +1,834 @@
+/* Definitions for code generation pass of GNU compiler.
+ Copyright (C) 1987, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+#ifndef __STDC__
+#ifndef const
+#define const
+#endif
+#endif
+
+/* The default branch cost is 1. */
+#ifndef BRANCH_COST
+#define BRANCH_COST 1
+#endif
+
+/* Macros to access the slots of a QUEUED rtx.
+ Here rather than in rtl.h because only the expansion pass
+ should ever encounter a QUEUED. */
+
+/* The variable for which an increment is queued. */
+#define QUEUED_VAR(P) XEXP (P, 0)
+/* If the increment has been emitted, this is the insn
+ that does the increment. It is zero before the increment is emitted. */
+#define QUEUED_INSN(P) XEXP (P, 1)
+/* If a pre-increment copy has been generated, this is the copy
+ (it is a temporary reg). Zero if no copy made yet. */
+#define QUEUED_COPY(P) XEXP (P, 2)
+/* This is the body to use for the insn to do the increment.
+ It is used to emit the increment. */
+#define QUEUED_BODY(P) XEXP (P, 3)
+/* Next QUEUED in the queue. */
+#define QUEUED_NEXT(P) XEXP (P, 4)
+
+/* This is the 4th arg to `expand_expr'.
+ EXPAND_SUM means it is ok to return a PLUS rtx or MULT rtx.
+ EXPAND_INITIALIZER is similar but also record any labels on forced_labels.
+ EXPAND_CONST_ADDRESS means it is ok to return a MEM whose address
+ is a constant that is not a legitimate address. */
+enum expand_modifier {EXPAND_NORMAL, EXPAND_SUM,
+ EXPAND_CONST_ADDRESS, EXPAND_INITIALIZER};
+
+/* List of labels that must never be deleted. */
+extern rtx forced_labels;
+
+/* List (chain of EXPR_LISTs) of pseudo-regs of SAVE_EXPRs.
+ So we can mark them all live at the end of the function, if stupid. */
+extern rtx save_expr_regs;
+
+extern int current_function_calls_alloca;
+extern int current_function_outgoing_args_size;
+
+/* This is the offset from the arg pointer to the place where the first
+ anonymous arg can be found, if there is one. */
+extern rtx current_function_arg_offset_rtx;
+
+/* This is nonzero if the current function uses the constant pool. */
+extern int current_function_uses_const_pool;
+
+/* This is nonzero if the current function uses pic_offset_table_rtx. */
+extern int current_function_uses_pic_offset_table;
+
+/* The arg pointer hard register, or the pseudo into which it was copied. */
+extern rtx current_function_internal_arg_pointer;
+
+/* Nonzero means stack pops must not be deferred, and deferred stack
+ pops must not be output. It is nonzero inside a function call,
+ inside a conditional expression, inside a statement expression,
+ and in other cases as well. */
+extern int inhibit_defer_pop;
+
+/* Number of function calls seen so far in current function. */
+
+extern int function_call_count;
+
+/* RTX for stack slot that holds the current handler for nonlocal gotos.
+ Zero when function does not have nonlocal labels. */
+
+extern rtx nonlocal_goto_handler_slot;
+
+/* RTX for stack slot that holds the stack pointer value to restore
+ for a nonlocal goto.
+ Zero when function does not have nonlocal labels. */
+
+extern rtx nonlocal_goto_stack_level;
+
+/* List (chain of TREE_LIST) of LABEL_DECLs for all nonlocal labels
+ (labels to which there can be nonlocal gotos from nested functions)
+ in this function. */
+
+#ifdef TREE_CODE /* Don't lose if tree.h not included. */
+extern tree nonlocal_labels;
+#endif
+
+#define NO_DEFER_POP (inhibit_defer_pop += 1)
+#define OK_DEFER_POP (inhibit_defer_pop -= 1)
+
+/* Number of units that we should eventually pop off the stack.
+ These are the arguments to function calls that have already returned. */
+extern int pending_stack_adjust;
+
+/* A list of all cleanups which belong to the arguments of
+ function calls being expanded by expand_call. */
+#ifdef TREE_CODE /* Don't lose if tree.h not included. */
+extern tree cleanups_this_call;
+#endif
+
+/* When temporaries are created by TARGET_EXPRs, they are created at
+ this level of temp_slot_level, so that they can remain allocated
+ until no longer needed. CLEANUP_POINT_EXPRs define the lifetime
+ of TARGET_EXPRs. */
+extern int target_temp_slot_level;
+
+#ifdef TREE_CODE /* Don't lose if tree.h not included. */
+/* Structure to record the size of a sequence of arguments
+ as the sum of a tree-expression and a constant. */
+
+struct args_size
+{
+ int constant;
+ tree var;
+};
+#endif
+
+/* Add the value of the tree INC to the `struct args_size' TO. */
+
+#define ADD_PARM_SIZE(TO, INC) \
+{ tree inc = (INC); \
+ if (TREE_CODE (inc) == INTEGER_CST) \
+ (TO).constant += TREE_INT_CST_LOW (inc); \
+ else if ((TO).var == 0) \
+ (TO).var = inc; \
+ else \
+ (TO).var = size_binop (PLUS_EXPR, (TO).var, inc); }
+
+#define SUB_PARM_SIZE(TO, DEC) \
+{ tree dec = (DEC); \
+ if (TREE_CODE (dec) == INTEGER_CST) \
+ (TO).constant -= TREE_INT_CST_LOW (dec); \
+ else if ((TO).var == 0) \
+ (TO).var = size_binop (MINUS_EXPR, integer_zero_node, dec); \
+ else \
+ (TO).var = size_binop (MINUS_EXPR, (TO).var, dec); }
+
+/* Convert the implicit sum in a `struct args_size' into an rtx. */
+#define ARGS_SIZE_RTX(SIZE) \
+((SIZE).var == 0 ? GEN_INT ((SIZE).constant) \
+ : expand_expr (size_binop (PLUS_EXPR, (SIZE).var, \
+ size_int ((SIZE).constant)), \
+ NULL_RTX, VOIDmode, 0))
+
+/* Convert the implicit sum in a `struct args_size' into a tree. */
+#define ARGS_SIZE_TREE(SIZE) \
+((SIZE).var == 0 ? size_int ((SIZE).constant) \
+ : size_binop (PLUS_EXPR, (SIZE).var, size_int ((SIZE).constant)))
+
+/* Supply a default definition for FUNCTION_ARG_PADDING:
+ usually pad upward, but pad short args downward on
+ big-endian machines. */
+
+enum direction {none, upward, downward}; /* Value has this type. */
+
+#ifndef FUNCTION_ARG_PADDING
+#if BYTES_BIG_ENDIAN
+#define FUNCTION_ARG_PADDING(MODE, TYPE) \
+ (((MODE) == BLKmode \
+ ? ((TYPE) && TREE_CODE (TYPE_SIZE (TYPE)) == INTEGER_CST \
+ && int_size_in_bytes (TYPE) < (PARM_BOUNDARY / BITS_PER_UNIT)) \
+ : GET_MODE_BITSIZE (MODE) < PARM_BOUNDARY) \
+ ? downward : upward)
+#else
+#define FUNCTION_ARG_PADDING(MODE, TYPE) upward
+#endif
+#endif
+
+/* Supply a default definition for FUNCTION_ARG_BOUNDARY. Normally, we let
+ FUNCTION_ARG_PADDING, which also pads the length, handle any needed
+ alignment. */
+
+#ifndef FUNCTION_ARG_BOUNDARY
+#define FUNCTION_ARG_BOUNDARY(MODE, TYPE) PARM_BOUNDARY
+#endif
+
+/* Nonzero if we do not know how to pass TYPE solely in registers.
+ We cannot do so in the following cases:
+
+ - if the type has variable size
+ - if the type is marked as addressable (it is required to be constructed
+ into the stack)
+ - if the padding and mode of the type is such that a copy into a register
+ would put it into the wrong part of the register.
+
+ Which padding can't be supported depends on the byte endianness.
+
+ A value in a register is implicitly padded at the most significant end.
+ On a big-endian machine, that is the lower end in memory.
+ So a value padded in memory at the upper end can't go in a register.
+ For a little-endian machine, the reverse is true. */
+
+#if BYTES_BIG_ENDIAN
+#define MUST_PASS_IN_STACK_BAD_PADDING upward
+#else
+#define MUST_PASS_IN_STACK_BAD_PADDING downward
+#endif
+
+#define MUST_PASS_IN_STACK(MODE,TYPE) \
+ ((TYPE) != 0 \
+ && (TREE_CODE (TYPE_SIZE (TYPE)) != INTEGER_CST \
+ || TREE_ADDRESSABLE (TYPE) \
+ || ((MODE) == BLKmode \
+ && ! ((TYPE) != 0 && TREE_CODE (TYPE_SIZE (TYPE)) == INTEGER_CST \
+ && 0 == (int_size_in_bytes (TYPE) \
+ % (PARM_BOUNDARY / BITS_PER_UNIT))) \
+ && (FUNCTION_ARG_PADDING (MODE, TYPE) \
+ == MUST_PASS_IN_STACK_BAD_PADDING))))
+
+/* Nonzero if type TYPE should be returned in memory.
+ Most machines can use the following default definition. */
+
+#ifndef RETURN_IN_MEMORY
+#define RETURN_IN_MEMORY(TYPE) (TYPE_MODE (TYPE) == BLKmode)
+#endif
+
+/* Optabs are tables saying how to generate insn bodies
+ for various machine modes and numbers of operands.
+ Each optab applies to one operation.
+ For example, add_optab applies to addition.
+
+ The insn_code slot is the enum insn_code that says how to
+ generate an insn for this operation on a particular machine mode.
+ It is CODE_FOR_nothing if there is no such insn on the target machine.
+
+ The `lib_call' slot is the name of the library function that
+ can be used to perform the operation.
+
+ A few optabs, such as move_optab and cmp_optab, are used
+ by special code. */
+
+/* Everything that uses expr.h needs to define enum insn_code
+ but we don't list it in the Makefile dependencies just for that. */
+#include "insn-codes.h"
+
+typedef struct optab
+{
+ enum rtx_code code;
+ struct {
+ enum insn_code insn_code;
+ rtx libfunc;
+ } handlers [NUM_MACHINE_MODES];
+} * optab;
+
+/* Given an enum insn_code, access the function to construct
+ the body of that kind of insn. */
+#ifdef FUNCTION_CONVERSION_BUG
+/* Some compilers fail to convert a function properly to a
+ pointer-to-function when used as an argument.
+ So produce the pointer-to-function directly.
+ Luckily, these compilers seem to work properly when you
+ call the pointer-to-function. */
+#define GEN_FCN(CODE) (insn_gen_function[(int) (CODE)])
+#else
+#define GEN_FCN(CODE) (*insn_gen_function[(int) (CODE)])
+#endif
+
+extern rtx (*const insn_gen_function[]) ();
+
+extern optab add_optab;
+extern optab sub_optab;
+extern optab smul_optab; /* Signed and floating-point multiply */
+extern optab smul_highpart_optab; /* Signed multiply, return high word */
+extern optab umul_highpart_optab;
+extern optab smul_widen_optab; /* Signed multiply with result
+ one machine mode wider than args */
+extern optab umul_widen_optab;
+extern optab sdiv_optab; /* Signed divide */
+extern optab sdivmod_optab; /* Signed divide-and-remainder in one */
+extern optab udiv_optab;
+extern optab udivmod_optab;
+extern optab smod_optab; /* Signed remainder */
+extern optab umod_optab;
+extern optab flodiv_optab; /* Optab for floating divide. */
+extern optab ftrunc_optab; /* Convert float to integer in float fmt */
+extern optab and_optab; /* Logical and */
+extern optab ior_optab; /* Logical or */
+extern optab xor_optab; /* Logical xor */
+extern optab ashl_optab; /* Arithmetic shift left */
+extern optab ashr_optab; /* Arithmetic shift right */
+extern optab lshr_optab; /* Logical shift right */
+extern optab rotl_optab; /* Rotate left */
+extern optab rotr_optab; /* Rotate right */
+extern optab smin_optab; /* Signed and floating-point minimum value */
+extern optab smax_optab; /* Signed and floating-point maximum value */
+extern optab umin_optab; /* Unsigned minimum value */
+extern optab umax_optab; /* Unsigned maximum value */
+
+extern optab mov_optab; /* Move instruction. */
+extern optab movstrict_optab; /* Move, preserving high part of register. */
+
+extern optab cmp_optab; /* Compare insn; two operands. */
+extern optab tst_optab; /* tst insn; compare one operand against 0 */
+
+/* Unary operations */
+extern optab neg_optab; /* Negation */
+extern optab abs_optab; /* Abs value */
+extern optab one_cmpl_optab; /* Bitwise not */
+extern optab ffs_optab; /* Find first bit set */
+extern optab sqrt_optab; /* Square root */
+extern optab sin_optab; /* Sine */
+extern optab cos_optab; /* Cosine */
+extern optab strlen_optab; /* String length */
+
+/* Tables of patterns for extending one integer mode to another. */
+extern enum insn_code extendtab[MAX_MACHINE_MODE][MAX_MACHINE_MODE][2];
+
+/* Tables of patterns for converting between fixed and floating point. */
+extern enum insn_code fixtab[NUM_MACHINE_MODES][NUM_MACHINE_MODES][2];
+extern enum insn_code fixtrunctab[NUM_MACHINE_MODES][NUM_MACHINE_MODES][2];
+extern enum insn_code floattab[NUM_MACHINE_MODES][NUM_MACHINE_MODES][2];
+
+/* Contains the optab used for each rtx code. */
+extern optab code_to_optab[NUM_RTX_CODE + 1];
+
+/* Passed to expand_binop and expand_unop to say which options to try to use
+ if the requested operation can't be open-coded on the requisite mode.
+ Either OPTAB_LIB or OPTAB_LIB_WIDEN says try using a library call.
+ Either OPTAB_WIDEN or OPTAB_LIB_WIDEN says try using a wider mode.
+ OPTAB_MUST_WIDEN says try widening and don't try anything else. */
+
+enum optab_methods
+{
+ OPTAB_DIRECT,
+ OPTAB_LIB,
+ OPTAB_WIDEN,
+ OPTAB_LIB_WIDEN,
+ OPTAB_MUST_WIDEN
+};
+
+/* SYMBOL_REF rtx's for the library functions that are called
+ implicitly and not via optabs. */
+
+extern rtx extendsfdf2_libfunc;
+extern rtx extendsfxf2_libfunc;
+extern rtx extendsftf2_libfunc;
+extern rtx extenddfxf2_libfunc;
+extern rtx extenddftf2_libfunc;
+
+extern rtx truncdfsf2_libfunc;
+extern rtx truncxfsf2_libfunc;
+extern rtx trunctfsf2_libfunc;
+extern rtx truncxfdf2_libfunc;
+extern rtx trunctfdf2_libfunc;
+
+extern rtx memcpy_libfunc;
+extern rtx bcopy_libfunc;
+extern rtx memcmp_libfunc;
+extern rtx bcmp_libfunc;
+extern rtx memset_libfunc;
+extern rtx bzero_libfunc;
+
+extern rtx eqsf2_libfunc;
+extern rtx nesf2_libfunc;
+extern rtx gtsf2_libfunc;
+extern rtx gesf2_libfunc;
+extern rtx ltsf2_libfunc;
+extern rtx lesf2_libfunc;
+
+extern rtx eqdf2_libfunc;
+extern rtx nedf2_libfunc;
+extern rtx gtdf2_libfunc;
+extern rtx gedf2_libfunc;
+extern rtx ltdf2_libfunc;
+extern rtx ledf2_libfunc;
+
+extern rtx eqxf2_libfunc;
+extern rtx nexf2_libfunc;
+extern rtx gtxf2_libfunc;
+extern rtx gexf2_libfunc;
+extern rtx ltxf2_libfunc;
+extern rtx lexf2_libfunc;
+
+extern rtx eqtf2_libfunc;
+extern rtx netf2_libfunc;
+extern rtx gttf2_libfunc;
+extern rtx getf2_libfunc;
+extern rtx lttf2_libfunc;
+extern rtx letf2_libfunc;
+
+extern rtx floatsisf_libfunc;
+extern rtx floatdisf_libfunc;
+extern rtx floattisf_libfunc;
+
+extern rtx floatsidf_libfunc;
+extern rtx floatdidf_libfunc;
+extern rtx floattidf_libfunc;
+
+extern rtx floatsixf_libfunc;
+extern rtx floatdixf_libfunc;
+extern rtx floattixf_libfunc;
+
+extern rtx floatsitf_libfunc;
+extern rtx floatditf_libfunc;
+extern rtx floattitf_libfunc;
+
+extern rtx fixsfsi_libfunc;
+extern rtx fixsfdi_libfunc;
+extern rtx fixsfti_libfunc;
+
+extern rtx fixdfsi_libfunc;
+extern rtx fixdfdi_libfunc;
+extern rtx fixdfti_libfunc;
+
+extern rtx fixxfsi_libfunc;
+extern rtx fixxfdi_libfunc;
+extern rtx fixxfti_libfunc;
+
+extern rtx fixtfsi_libfunc;
+extern rtx fixtfdi_libfunc;
+extern rtx fixtfti_libfunc;
+
+extern rtx fixunssfsi_libfunc;
+extern rtx fixunssfdi_libfunc;
+extern rtx fixunssfti_libfunc;
+
+extern rtx fixunsdfsi_libfunc;
+extern rtx fixunsdfdi_libfunc;
+extern rtx fixunsdfti_libfunc;
+
+extern rtx fixunsxfsi_libfunc;
+extern rtx fixunsxfdi_libfunc;
+extern rtx fixunsxfti_libfunc;
+
+extern rtx fixunstfsi_libfunc;
+extern rtx fixunstfdi_libfunc;
+extern rtx fixunstfti_libfunc;
+
+typedef rtx (*rtxfun) ();
+
+/* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
+ gives the gen_function to make a branch to test that condition. */
+
+extern rtxfun bcc_gen_fctn[NUM_RTX_CODE];
+
+/* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
+ gives the insn code to make a store-condition insn
+ to test that condition. */
+
+extern enum insn_code setcc_gen_code[NUM_RTX_CODE];
+
+/* This array records the insn_code of insns to perform block moves. */
+extern enum insn_code movstr_optab[NUM_MACHINE_MODES];
+
+/* Define functions given in optabs.c. */
+
+/* Expand a binary operation given optab and rtx operands. */
+extern rtx expand_binop PROTO((enum machine_mode, optab, rtx, rtx, rtx,
+ int, enum optab_methods));
+
+/* Expand a binary operation with both signed and unsigned forms. */
+extern rtx sign_expand_binop PROTO((enum machine_mode, optab, optab, rtx,
+ rtx, rtx, int, enum optab_methods));
+
+/* Generate code to perform an operation on two operands with two results. */
+extern int expand_twoval_binop PROTO((optab, rtx, rtx, rtx, rtx, int));
+
+/* Expand a unary arithmetic operation given optab rtx operand. */
+extern rtx expand_unop PROTO((enum machine_mode, optab, rtx, rtx, int));
+
+/* Expand the complex absolute value operation. */
+extern rtx expand_complex_abs PROTO((enum machine_mode, rtx, rtx, int));
+
+/* Generate an instruction with a given INSN_CODE with an output and
+ an input. */
+extern void emit_unop_insn PROTO((int, rtx, rtx, enum rtx_code));
+
+/* Emit code to perform a series of operations on a multi-word quantity, one
+ word at a time. */
+extern rtx emit_no_conflict_block PROTO((rtx, rtx, rtx, rtx, rtx));
+
+/* Emit code to make a call to a constant function or a library call. */
+extern void emit_libcall_block PROTO((rtx, rtx, rtx, rtx));
+
+/* Emit one rtl instruction to store zero in specified rtx. */
+extern void emit_clr_insn PROTO((rtx));
+
+/* Emit one rtl insn to store 1 in specified rtx assuming it contains 0. */
+extern void emit_0_to_1_insn PROTO((rtx));
+
+/* Emit one rtl insn to compare two rtx's. */
+extern void emit_cmp_insn PROTO((rtx, rtx, enum rtx_code, rtx,
+ enum machine_mode, int, int));
+
+/* Nonzero if a compare of mode MODE can be done straightforwardly
+ (without splitting it into pieces). */
+extern int can_compare_p PROTO((enum machine_mode));
+
+/* Emit a library call comparison between floating point X and Y.
+ COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
+extern void emit_float_lib_cmp PROTO((rtx, rtx, enum rtx_code));
+
+/* Generate code to indirectly jump to a location given in the rtx LOC. */
+extern void emit_indirect_jump PROTO((rtx));
+
+/* Create but don't emit one rtl instruction to add one rtx into another.
+ Modes must match; operands must meet the operation's predicates.
+ Likewise for subtraction and for just copying.
+ These do not call protect_from_queue; caller must do so. */
+extern rtx gen_add2_insn PROTO((rtx, rtx));
+extern rtx gen_sub2_insn PROTO((rtx, rtx));
+extern rtx gen_move_insn PROTO((rtx, rtx));
+extern int have_add2_insn PROTO((enum machine_mode));
+extern int have_sub2_insn PROTO((enum machine_mode));
+
+/* Return the INSN_CODE to use for an extend operation. */
+extern enum insn_code can_extend_p PROTO((enum machine_mode,
+ enum machine_mode, int));
+
+/* Generate the body of an insn to extend Y (with mode MFROM)
+ into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
+extern rtx gen_extend_insn PROTO((rtx, rtx, enum machine_mode,
+ enum machine_mode, int));
+
+/* Initialize the tables that control conversion between fixed and
+ floating values. */
+extern void init_fixtab PROTO((void));
+extern void init_floattab PROTO((void));
+
+/* Generate code for a FLOAT_EXPR. */
+extern void expand_float PROTO((rtx, rtx, int));
+
+/* Generate code for a FIX_EXPR. */
+extern void expand_fix PROTO((rtx, rtx, int));
+
+/* Call this once to initialize the contents of the optabs
+ appropriately for the current target machine. */
+extern void init_optabs PROTO((void));
+
+/* Functions from expmed.c: */
+
+/* Arguments MODE, RTX: return an rtx for the negation of that value.
+ May emit insns. */
+extern rtx negate_rtx PROTO((enum machine_mode, rtx));
+
+/* Expand a logical AND operation. */
+extern rtx expand_and PROTO((rtx, rtx, rtx));
+
+/* Emit a store-flag operation. */
+extern rtx emit_store_flag PROTO((rtx, enum rtx_code, rtx, rtx,
+ enum machine_mode, int, int));
+
+/* Functions from loop.c: */
+
+/* Given a JUMP_INSN, return a description of the test being made. */
+extern rtx get_condition PROTO((rtx, rtx *));
+
+/* Functions from expr.c: */
+
+/* This is run once per compilation to set up which modes can be used
+ directly in memory and to initialize the block move optab. */
+extern void init_expr_once PROTO((void));
+
+/* This is run at the start of compiling a function. */
+extern void init_expr PROTO((void));
+
+/* Use protect_from_queue to convert a QUEUED expression
+ into something that you can put immediately into an instruction. */
+extern rtx protect_from_queue PROTO((rtx, int));
+
+/* Perform all the pending incrementations. */
+extern void emit_queue PROTO((void));
+
+/* Emit some rtl insns to move data between rtx's, converting machine modes.
+ Both modes must be floating or both fixed. */
+extern void convert_move PROTO((rtx, rtx, int));
+
+/* Convert an rtx to specified machine mode and return the result. */
+extern rtx convert_to_mode PROTO((enum machine_mode, rtx, int));
+
+/* Convert an rtx to MODE from OLDMODE and return the result. */
+extern rtx convert_modes PROTO((enum machine_mode, enum machine_mode, rtx, int));
+
+/* Emit code to move a block Y to a block X. */
+extern void emit_block_move PROTO((rtx, rtx, rtx, int));
+
+/* Copy all or part of a value X into registers starting at REGNO.
+ The number of registers to be filled is NREGS. */
+extern void move_block_to_reg PROTO((int, rtx, int, enum machine_mode));
+
+/* Copy all or part of a BLKmode value X out of registers starting at REGNO.
+ The number of registers to be filled is NREGS. */
+extern void move_block_from_reg PROTO((int, rtx, int, int));
+
+/* Mark REG as holding a parameter for the next CALL_INSN. */
+extern void use_reg PROTO((rtx*, rtx));
+/* Mark NREGS consecutive regs, starting at REGNO, as holding parameters
+ for the next CALL_INSN. */
+extern void use_regs PROTO((rtx*, int, int));
+
+/* Write zeros through the storage of OBJECT.
+ If OBJECT has BLKmode, SIZE is its length in bytes. */
+extern void clear_storage PROTO((rtx, int));
+
+/* Emit insns to set X from Y. */
+extern rtx emit_move_insn PROTO((rtx, rtx));
+
+/* Emit insns to set X from Y, with no frills. */
+extern rtx emit_move_insn_1 PROTO ((rtx, rtx));
+
+/* Push a block of length SIZE (perhaps variable)
+ and return an rtx to address the beginning of the block. */
+extern rtx push_block PROTO((rtx, int, int));
+
+/* Make an operand to push someting on the stack. */
+extern rtx gen_push_operand PROTO((void));
+
+#ifdef TREE_CODE
+/* Generate code to push something onto the stack, given its mode and type. */
+extern void emit_push_insn PROTO((rtx, enum machine_mode, tree, rtx, int,
+ int, rtx, int, rtx, rtx));
+
+/* Emit library call. */
+extern void emit_library_call PVPROTO((rtx orgfun, int no_queue,
+ enum machine_mode outmode, int nargs, ...));
+extern rtx emit_library_call_value PVPROTO((rtx orgfun, rtx value, int no_queue,
+ enum machine_mode outmode, int nargs, ...));
+
+/* Expand an assignment that stores the value of FROM into TO. */
+extern rtx expand_assignment PROTO((tree, tree, int, int));
+
+/* Generate code for computing expression EXP,
+ and storing the value into TARGET.
+ If SUGGEST_REG is nonzero, copy the value through a register
+ and return that register, if that is possible. */
+extern rtx store_expr PROTO((tree, rtx, int));
+#endif
+
+/* Given an rtx that may include add and multiply operations,
+ generate them as insns and return a pseudo-reg containing the value.
+ Useful after calling expand_expr with 1 as sum_ok. */
+extern rtx force_operand PROTO((rtx, rtx));
+
+#ifdef TREE_CODE
+/* Generate code for computing expression EXP.
+ An rtx for the computed value is returned. The value is never null.
+ In the case of a void EXP, const0_rtx is returned. */
+extern rtx expand_expr PROTO((tree, rtx, enum machine_mode,
+ enum expand_modifier));
+#endif
+
+/* At the start of a function, record that we have no previously-pushed
+ arguments waiting to be popped. */
+extern void init_pending_stack_adjust PROTO((void));
+
+/* When exiting from function, if safe, clear out any pending stack adjust
+ so the adjustment won't get done. */
+extern void clear_pending_stack_adjust PROTO((void));
+
+/* Pop any previously-pushed arguments that have not been popped yet. */
+extern void do_pending_stack_adjust PROTO((void));
+
+#ifdef TREE_CODE
+/* Expand all cleanups up to OLD_CLEANUPS. */
+extern void expand_cleanups_to PROTO((tree));
+
+/* Generate code to evaluate EXP and jump to LABEL if the value is zero. */
+extern void jumpifnot PROTO((tree, rtx));
+
+/* Generate code to evaluate EXP and jump to LABEL if the value is nonzero. */
+extern void jumpif PROTO((tree, rtx));
+
+/* Generate code to evaluate EXP and jump to IF_FALSE_LABEL if
+ the result is zero, or IF_TRUE_LABEL if the result is one. */
+extern void do_jump PROTO((tree, rtx, rtx));
+#endif
+
+/* Generate rtl to compare two rtx's, will call emit_cmp_insn. */
+extern rtx compare_from_rtx PROTO((rtx, rtx, enum rtx_code, int,
+ enum machine_mode, rtx, int));
+
+/* Generate a tablejump instruction (used for switch statements). */
+extern void do_tablejump PROTO((rtx, enum machine_mode, rtx, rtx, rtx));
+
+#ifdef TREE_CODE
+/* rtl.h and tree.h were included. */
+/* Return an rtx for the size in bytes of the value of an expr. */
+extern rtx expr_size PROTO((tree));
+
+extern rtx lookup_static_chain PROTO((tree));
+
+/* Convert a stack slot address ADDR valid in function FNDECL
+ into an address valid in this function (using a static chain). */
+extern rtx fix_lexical_addr PROTO((rtx, tree));
+
+/* Return the address of the trampoline for entering nested fn FUNCTION. */
+extern rtx trampoline_address PROTO((tree));
+
+/* Return an rtx that refers to the value returned by a function
+ in its original home. This becomes invalid if any more code is emitted. */
+extern rtx hard_function_value PROTO((tree, tree));
+
+extern rtx prepare_call_address PROTO((rtx, tree, rtx *, int));
+
+extern rtx expand_call PROTO((tree, rtx, int));
+
+extern rtx expand_shift PROTO((enum tree_code, enum machine_mode, rtx, tree, rtx, int));
+extern rtx expand_divmod PROTO((int, enum tree_code, enum machine_mode, rtx, rtx, rtx, int));
+extern void locate_and_pad_parm PROTO((enum machine_mode, tree, int, tree, struct args_size *, struct args_size *, struct args_size *));
+extern rtx expand_inline_function PROTO((tree, tree, rtx, int, tree, rtx));
+/* Return the CODE_LABEL rtx for a LABEL_DECL, creating it if necessary. */
+extern rtx label_rtx PROTO((tree));
+#endif
+
+/* Indicate how an input argument register was promoted. */
+extern rtx promoted_input_arg PROTO((int, enum machine_mode *, int *));
+
+/* Return an rtx like arg but sans any constant terms.
+ Returns the original rtx if it has no constant terms.
+ The constant terms are added and stored via a second arg. */
+extern rtx eliminate_constant_term PROTO((rtx, rtx *));
+
+/* Convert arg to a valid memory address for specified machine mode,
+ by emitting insns to perform arithmetic if nec. */
+extern rtx memory_address PROTO((enum machine_mode, rtx));
+
+/* Like `memory_address' but pretent `flag_force_addr' is 0. */
+extern rtx memory_address_noforce PROTO((enum machine_mode, rtx));
+
+/* Return a memory reference like MEMREF, but with its mode changed
+ to MODE and its address changed to ADDR.
+ (VOIDmode means don't change the mode.
+ NULL for ADDR means don't change the address.) */
+extern rtx change_address PROTO((rtx, enum machine_mode, rtx));
+
+/* Return a memory reference like MEMREF, but which is known to have a
+ valid address. */
+
+extern rtx validize_mem PROTO((rtx));
+
+/* Assemble the static constant template for function entry trampolines. */
+extern rtx assemble_trampoline_template PROTO((void));
+
+/* Return 1 if two rtx's are equivalent in structure and elements. */
+extern int rtx_equal_p PROTO((rtx, rtx));
+
+/* Given rtx, return new rtx whose address won't be affected by
+ any side effects. It has been copied to a new temporary reg. */
+extern rtx stabilize PROTO((rtx));
+
+/* Given an rtx, copy all regs it refers to into new temps
+ and return a modified copy that refers to the new temps. */
+extern rtx copy_all_regs PROTO((rtx));
+
+/* Copy given rtx to a new temp reg and return that. */
+extern rtx copy_to_reg PROTO((rtx));
+
+/* Like copy_to_reg but always make the reg Pmode. */
+extern rtx copy_addr_to_reg PROTO((rtx));
+
+/* Like copy_to_reg but always make the reg the specified mode MODE. */
+extern rtx copy_to_mode_reg PROTO((enum machine_mode, rtx));
+
+/* Copy given rtx to given temp reg and return that. */
+extern rtx copy_to_suggested_reg PROTO((rtx, rtx, enum machine_mode));
+
+/* Copy a value to a register if it isn't already a register.
+ Args are mode (in case value is a constant) and the value. */
+extern rtx force_reg PROTO((enum machine_mode, rtx));
+
+/* Return given rtx, copied into a new temp reg if it was in memory. */
+extern rtx force_not_mem PROTO((rtx));
+
+#ifdef TREE_CODE
+/* Return mode and signedness to use when object is promoted. */
+extern enum machine_mode promote_mode PROTO((tree, enum machine_mode,
+ int *, int));
+#endif
+
+/* Remove some bytes from the stack. An rtx says how many. */
+extern void adjust_stack PROTO((rtx));
+
+/* Add some bytes to the stack. An rtx says how many. */
+extern void anti_adjust_stack PROTO((rtx));
+
+/* This enum is used for the following two functions. */
+enum save_level {SAVE_BLOCK, SAVE_FUNCTION, SAVE_NONLOCAL};
+
+/* Save the stack pointer at the specified level. */
+extern void emit_stack_save PROTO((enum save_level, rtx *, rtx));
+
+/* Restore the stack pointer from a save area of the specified level. */
+extern void emit_stack_restore PROTO((enum save_level, rtx, rtx));
+
+/* Allocate some space on the stack dynamically and return its address. An rtx
+ says how many bytes. */
+extern rtx allocate_dynamic_stack_space PROTO((rtx, rtx, int));
+
+/* Emit code to copy function value to a new temp reg and return that reg. */
+extern rtx function_value ();
+
+/* Return an rtx that refers to the value returned by a library call
+ in its original home. This becomes invalid if any more code is emitted. */
+extern rtx hard_libcall_value PROTO((enum machine_mode));
+
+/* Given an rtx, return an rtx for a value rounded up to a multiple
+ of STACK_BOUNDARY / BITS_PER_UNIT. */
+extern rtx round_push PROTO((rtx));
+
+extern void emit_block_move PROTO((rtx, rtx, rtx, int));
+
+extern rtx store_bit_field PROTO((rtx, int, int, enum machine_mode, rtx, int, int));
+extern rtx extract_bit_field PROTO((rtx, int, int, int, rtx, enum machine_mode, enum machine_mode, int, int));
+extern rtx expand_mult PROTO((enum machine_mode, rtx, rtx, rtx, int));
+extern rtx expand_mult_add PROTO((rtx, rtx, rtx, rtx,enum machine_mode, int));
+
+extern rtx assemble_static_space PROTO((int));
+
+/* Hook called by expand_expr for language-specific tree codes.
+ It is up to the language front end to install a hook
+ if it has any such codes that expand_expr needs to know about. */
+extern rtx (*lang_expand_expr) ();
diff --git a/gnu/usr.bin/cc/include/flags.h b/gnu/usr.bin/cc/include/flags.h
new file mode 100644
index 0000000..07ea734
--- /dev/null
+++ b/gnu/usr.bin/cc/include/flags.h
@@ -0,0 +1,359 @@
+/* Compilation switch flag definitions for GNU CC.
+ Copyright (C) 1987, 1988, 1994 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+/* Name of the input .c file being compiled. */
+extern char *main_input_filename;
+
+enum debug_info_type
+{
+ NO_DEBUG, /* Write no debug info. */
+ DBX_DEBUG, /* Write BSD .stabs for DBX (using dbxout.c). */
+ SDB_DEBUG, /* Write COFF for (old) SDB (using sdbout.c). */
+ DWARF_DEBUG, /* Write Dwarf debug info (using dwarfout.c). */
+ XCOFF_DEBUG /* Write IBM/Xcoff debug info (using dbxout.c). */
+};
+
+/* Specify which kind of debugging info to generate. */
+extern enum debug_info_type write_symbols;
+
+enum debug_info_level
+{
+ DINFO_LEVEL_NONE, /* Write no debugging info. */
+ DINFO_LEVEL_TERSE, /* Write minimal info to support tracebacks only. */
+ DINFO_LEVEL_NORMAL, /* Write info for all declarations (and line table). */
+ DINFO_LEVEL_VERBOSE /* Write normal info plus #define/#undef info. */
+};
+
+/* Specify how much debugging info to generate. */
+extern enum debug_info_level debug_info_level;
+
+/* Nonzero means use GNU-only extensions in the generated symbolic
+ debugging information. */
+extern int use_gnu_debug_info_extensions;
+
+/* Nonzero means do optimizations. -opt. */
+
+extern int optimize;
+
+/* Nonzero means do stupid register allocation. -noreg.
+ Currently, this is 1 if `optimize' is 0. */
+
+extern int obey_regdecls;
+
+/* Don't print functions as they are compiled and don't print
+ times taken by the various passes. -quiet. */
+
+extern int quiet_flag;
+
+/* Don't print warning messages. -w. */
+
+extern int inhibit_warnings;
+
+/* Do print extra warnings (such as for uninitialized variables). -W. */
+
+extern int extra_warnings;
+
+/* Nonzero to warn about unused local variables. */
+
+extern int warn_unused;
+
+/* Nonzero means warn if inline function is too large. */
+
+extern int warn_inline;
+
+/* Nonzero to warn about variables used before they are initialized. */
+
+extern int warn_uninitialized;
+
+/* Nonzero means warn about all declarations which shadow others. */
+
+extern int warn_shadow;
+
+/* Warn if a switch on an enum fails to have a case for every enum value. */
+
+extern int warn_switch;
+
+/* Nonzero means warn about function definitions that default the return type
+ or that use a null return and have a return-type other than void. */
+
+extern int warn_return_type;
+
+/* Nonzero means warn about pointer casts that increase the required
+ alignment of the target type (and might therefore lead to a crash
+ due to a misaligned access). */
+
+extern int warn_cast_align;
+
+/* Nonzero means warn that dbx info for template class methods isn't fully
+ supported yet. */
+
+extern int warn_template_debugging;
+
+/* Nonzero means warn about any identifiers that match in the first N
+ characters. The value N is in `id_clash_len'. */
+
+extern int warn_id_clash;
+extern unsigned id_clash_len;
+
+/* Nonzero means warn about any objects definitions whose size is larger
+ than N bytes. Also want about function definitions whose returned
+ values are larger than N bytes. The value N is in `larger_than_size'. */
+
+extern int warn_larger_than;
+extern unsigned larger_than_size;
+
+/* Warn if a function returns an aggregate,
+ since there are often incompatible calling conventions for doing this. */
+
+extern int warn_aggregate_return;
+
+/* Nonzero if generating code to do profiling. */
+
+extern int profile_flag;
+
+/* Nonzero if generating code to do profiling on the basis of basic blocks. */
+
+extern int profile_block_flag;
+
+/* Nonzero for -pedantic switch: warn about anything
+ that standard C forbids. */
+
+extern int pedantic;
+
+/* Temporarily suppress certain warnings.
+ This is set while reading code from a system header file. */
+
+extern int in_system_header;
+
+/* Nonzero for -dp: annotate the assembly with a comment describing the
+ pattern and alternative used. */
+
+extern int flag_print_asm_name;
+
+/* Now the symbols that are set with `-f' switches. */
+
+/* Nonzero means `char' should be signed. */
+
+extern int flag_signed_char;
+
+/* Nonzero means give an enum type only as many bytes as it needs. */
+
+extern int flag_short_enums;
+
+/* Nonzero for -fcaller-saves: allocate values in regs that need to
+ be saved across function calls, if that produces overall better code.
+ Optional now, so people can test it. */
+
+extern int flag_caller_saves;
+
+/* Nonzero for -fpcc-struct-return: return values the same way PCC does. */
+
+extern int flag_pcc_struct_return;
+
+/* Nonzero for -fforce-mem: load memory value into a register
+ before arithmetic on it. This makes better cse but slower compilation. */
+
+extern int flag_force_mem;
+
+/* Nonzero for -fforce-addr: load memory address into a register before
+ reference to memory. This makes better cse but slower compilation. */
+
+extern int flag_force_addr;
+
+/* Nonzero for -fdefer-pop: don't pop args after each function call;
+ instead save them up to pop many calls' args with one insns. */
+
+extern int flag_defer_pop;
+
+/* Nonzero for -ffloat-store: don't allocate floats and doubles
+ in extended-precision registers. */
+
+extern int flag_float_store;
+
+/* Nonzero enables strength-reduction in loop.c. */
+
+extern int flag_strength_reduce;
+
+/* Nonzero enables loop unrolling in unroll.c. Only loops for which the
+ number of iterations can be calculated at compile-time (UNROLL_COMPLETELY,
+ UNROLL_MODULO) or at run-time (preconditioned to be UNROLL_MODULO) are
+ unrolled. */
+
+extern int flag_unroll_loops;
+
+/* Nonzero enables loop unrolling in unroll.c. All loops are unrolled.
+ This is generally not a win. */
+
+extern int flag_unroll_all_loops;
+
+/* Nonzero for -fcse-follow-jumps:
+ have cse follow jumps to do a more extensive job. */
+
+extern int flag_cse_follow_jumps;
+
+/* Nonzero for -fcse-skip-blocks:
+ have cse follow a branch around a block. */
+
+extern int flag_cse_skip_blocks;
+
+/* Nonzero for -fexpensive-optimizations:
+ perform miscellaneous relatively-expensive optimizations. */
+extern int flag_expensive_optimizations;
+
+/* Nonzero for -fwritable-strings:
+ store string constants in data segment and don't uniquize them. */
+
+extern int flag_writable_strings;
+
+/* Nonzero means don't put addresses of constant functions in registers.
+ Used for compiling the Unix kernel, where strange substitutions are
+ done on the assembly output. */
+
+extern int flag_no_function_cse;
+
+/* Nonzero for -fomit-frame-pointer:
+ don't make a frame pointer in simple functions that don't require one. */
+
+extern int flag_omit_frame_pointer;
+
+/* Nonzero to inhibit use of define_optimization peephole opts. */
+
+extern int flag_no_peephole;
+
+/* Nonzero means all references through pointers are volatile. */
+
+extern int flag_volatile;
+
+/* Nonzero means treat all global and extern variables as global. */
+
+extern int flag_volatile_global;
+
+/* Nonzero allows GCC to violate some IEEE or ANSI rules regarding math
+ operations in the interest of optimization. For example it allows
+ GCC to assume arguments to sqrt are nonnegative numbers, allowing
+ faster code for sqrt to be generated. */
+
+extern int flag_fast_math;
+
+/* Nonzero means make functions that look like good inline candidates
+ go inline. */
+
+extern int flag_inline_functions;
+
+/* Nonzero for -fkeep-inline-functions: even if we make a function
+ go inline everywhere, keep its definition around for debugging
+ purposes. */
+
+extern int flag_keep_inline_functions;
+
+/* Nonzero means that functions declared `inline' will be treated
+ as `static'. Prevents generation of zillions of copies of unused
+ static inline functions; instead, `inlines' are written out
+ only when actually used. Used in conjunction with -g. Also
+ does the right thing with #pragma interface. */
+
+extern int flag_no_inline;
+
+/* Nonzero if we are only using compiler to check syntax errors. */
+
+extern int flag_syntax_only;
+
+/* Nonzero means we should save auxiliary info into a .X file. */
+
+extern int flag_gen_aux_info;
+
+/* Nonzero means make the text shared if supported. */
+
+extern int flag_shared_data;
+
+/* flag_schedule_insns means schedule insns within basic blocks (before
+ local_alloc).
+ flag_schedule_insns_after_reload means schedule insns after
+ global_alloc. */
+
+extern int flag_schedule_insns;
+extern int flag_schedule_insns_after_reload;
+
+/* Nonzero means put things in delayed-branch slots if supported. */
+
+extern int flag_delayed_branch;
+
+/* Nonzero means to run cleanups after CALL_EXPRs. */
+
+extern int flag_short_temps;
+
+/* Nonzero means pretend it is OK to examine bits of target floats,
+ even if that isn't true. The resulting code will have incorrect constants,
+ but the same series of instructions that the native compiler would make. */
+
+extern int flag_pretend_float;
+
+/* Nonzero means change certain warnings into errors.
+ Usually these are warnings about failure to conform to some standard. */
+
+extern int flag_pedantic_errors;
+
+/* Nonzero means generate position-independent code.
+ This is not fully implemented yet. */
+
+extern int flag_pic;
+
+/* Nonzero means place uninitialized global data in the bss section. */
+
+extern int flag_no_common;
+
+/* -finhibit-size-directive inhibits output of .size for ELF.
+ This is used only for compiling crtstuff.c,
+ and it may be extended to other effects
+ needed for crtstuff.c on other systems. */
+extern int flag_inhibit_size_directive;
+
+/* -fverbose-asm causes extra commentary information to be produced in
+ the generated assembly code (to make it more readable). This option
+ is generally only of use to those who actually need to read the
+ generated assembly code (perhaps while debugging the compiler itself). */
+
+extern int flag_verbose_asm;
+
+/* -fgnu-linker specifies use of the GNU linker for initializations.
+ -fno-gnu-linker says that collect will be used. */
+extern int flag_gnu_linker;
+
+/* Other basic status info about current function. */
+
+/* Nonzero means current function must be given a frame pointer.
+ Set in stmt.c if anything is allocated on the stack there.
+ Set in reload1.c if anything is allocated on the stack there. */
+
+extern int frame_pointer_needed;
+
+/* Set nonzero if jump_optimize finds that control falls through
+ at the end of the function. */
+
+extern int can_reach_end;
+
+/* Nonzero if function being compiled receives nonlocal gotos
+ from nested functions. */
+
+extern int current_function_has_nonlocal_label;
+
+/* Nonzero if function being compiled has nonlocal gotos to parent
+ function. */
+
+extern int current_function_has_nonlocal_goto;
diff --git a/gnu/usr.bin/cc/include/function.h b/gnu/usr.bin/cc/include/function.h
new file mode 100644
index 0000000..b37a59a
--- /dev/null
+++ b/gnu/usr.bin/cc/include/function.h
@@ -0,0 +1,216 @@
+/* Structure for saving state for a nested function.
+ Copyright (C) 1989, 1992, 1993, 1994 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+#ifndef NULL_TREE
+#define tree int *
+#endif
+#ifndef GET_CODE
+#define rtx int *
+#endif
+
+struct var_refs_queue
+ {
+ rtx modified;
+ enum machine_mode promoted_mode;
+ int unsignedp;
+ struct var_refs_queue *next;
+ };
+
+/* Stack of pending (incomplete) sequences saved by `start_sequence'.
+ Each element describes one pending sequence.
+ The main insn-chain is saved in the last element of the chain,
+ unless the chain is empty. */
+
+struct sequence_stack
+{
+ /* First and last insns in the chain of the saved sequence. */
+ rtx first, last;
+ tree sequence_rtl_expr;
+ struct sequence_stack *next;
+};
+
+extern struct sequence_stack *sequence_stack;
+
+/* This structure can save all the important global and static variables
+ describing the status of the current function. */
+
+struct function
+{
+ struct function *next;
+
+ /* For function.c. */
+ char *name;
+ tree decl;
+ int pops_args;
+ int returns_struct;
+ int returns_pcc_struct;
+ int needs_context;
+ int calls_setjmp;
+ int calls_longjmp;
+ int calls_alloca;
+ int has_nonlocal_label;
+ int has_nonlocal_goto;
+ rtx nonlocal_goto_handler_slot;
+ rtx nonlocal_goto_stack_level;
+ tree nonlocal_labels;
+ int args_size;
+ int pretend_args_size;
+ rtx arg_offset_rtx;
+ int varargs;
+ int max_parm_reg;
+ rtx *parm_reg_stack_loc;
+ int outgoing_args_size;
+ rtx return_rtx;
+ rtx cleanup_label;
+ rtx return_label;
+ rtx save_expr_regs;
+ rtx stack_slot_list;
+ rtx parm_birth_insn;
+ int frame_offset;
+ rtx tail_recursion_label;
+ rtx tail_recursion_reentry;
+ rtx internal_arg_pointer;
+ rtx arg_pointer_save_area;
+ tree rtl_expr_chain;
+ rtx last_parm_insn;
+ tree context_display;
+ tree trampoline_list;
+ int function_call_count;
+ struct temp_slot *temp_slots;
+ int temp_slot_level;
+ /* This slot is initialized as 0 and is added to
+ during the nested function. */
+ struct var_refs_queue *fixup_var_refs_queue;
+
+ /* For stmt.c */
+ struct nesting *block_stack;
+ struct nesting *stack_block_stack;
+ struct nesting *cond_stack;
+ struct nesting *loop_stack;
+ struct nesting *case_stack;
+ struct nesting *nesting_stack;
+ int nesting_depth;
+ int block_start_count;
+ tree last_expr_type;
+ rtx last_expr_value;
+ int expr_stmts_for_value;
+ char *emit_filename;
+ int emit_lineno;
+ struct goto_fixup *goto_fixup_chain;
+
+ /* For expr.c. */
+ int pending_stack_adjust;
+ int inhibit_defer_pop;
+ tree cleanups_this_call;
+ rtx saveregs_value;
+ rtx apply_args_value;
+ rtx forced_labels;
+
+ /* For emit-rtl.c. */
+ int reg_rtx_no;
+ int first_label_num;
+ rtx first_insn;
+ rtx last_insn;
+ tree sequence_rtl_expr;
+ struct sequence_stack *sequence_stack;
+ int cur_insn_uid;
+ int last_linenum;
+ char *last_filename;
+ char *regno_pointer_flag;
+ int regno_pointer_flag_length;
+ rtx *regno_reg_rtx;
+
+ /* For stor-layout.c. */
+ tree permanent_type_chain;
+ tree temporary_type_chain;
+ tree permanent_type_end;
+ tree temporary_type_end;
+ tree pending_sizes;
+ int immediate_size_expand;
+
+ /* For tree.c. */
+ int all_types_permanent;
+ struct momentary_level *momentary_stack;
+ char *maybepermanent_firstobj;
+ char *temporary_firstobj;
+ char *momentary_firstobj;
+ char *momentary_function_firstobj;
+ struct obstack *current_obstack;
+ struct obstack *function_obstack;
+ struct obstack *function_maybepermanent_obstack;
+ struct obstack *expression_obstack;
+ struct obstack *saveable_obstack;
+ struct obstack *rtl_obstack;
+
+ /* For integrate.c. */
+ int uses_const_pool;
+
+ /* For md files. */
+ int uses_pic_offset_table;
+ /* tm.h can use this to store whatever it likes. */
+ struct machine_function *machine;
+
+ /* For reorg. */
+ rtx epilogue_delay_list;
+
+ /* For varasm. */
+ struct constant_descriptor **const_rtx_hash_table;
+ struct pool_sym **const_rtx_sym_hash_table;
+ struct pool_constant *first_pool, *last_pool;
+ int pool_offset;
+};
+
+/* The FUNCTION_DECL for an inline function currently being expanded. */
+extern tree inline_function_decl;
+
+/* Label that will go on function epilogue.
+ Jumping to this label serves as a "return" instruction
+ on machines which require execution of the epilogue on all returns. */
+extern rtx return_label;
+
+/* List (chain of EXPR_LISTs) of all stack slots in this function.
+ Made for the sake of unshare_all_rtl. */
+extern rtx stack_slot_list;
+
+/* Given a function decl for a containing function,
+ return the `struct function' for it. */
+struct function *find_function_data PROTO((tree));
+
+/* Pointer to chain of `struct function' for containing functions. */
+extern struct function *outer_function_chain;
+
+/* Put all this function's BLOCK nodes into a vector and return it.
+ Also store in each NOTE for the beginning or end of a block
+ the index of that block in the vector. */
+extern tree *identify_blocks PROTO((tree, rtx));
+
+/* These variables hold pointers to functions to
+ save and restore machine-specific data,
+ in push_function_context and pop_function_context. */
+extern void (*save_machine_status) ();
+extern void (*restore_machine_status) ();
+
+#ifdef rtx
+#undef rtx
+#endif
+
+#ifdef tree
+#undef tree
+#endif
diff --git a/gnu/usr.bin/cc/include/gbl-ctors.h b/gnu/usr.bin/cc/include/gbl-ctors.h
new file mode 100644
index 0000000..2e7f520
--- /dev/null
+++ b/gnu/usr.bin/cc/include/gbl-ctors.h
@@ -0,0 +1,80 @@
+/* Definitions relating to the special __do_global_init function used
+ for getting g++ file-scope static objects constructed. This file
+ will get included either by libgcc2.c (for systems that don't support
+ a .init section) or by crtstuff.c (for those that do).
+
+ Written by Ron Guilmette (rfg@netcom.com)
+
+Copyright (C) 1991 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+/* This file contains definitions and declarations of things
+ relating to the normal start-up-time invocation of C++
+ file-scope static object constructors. These declarations
+ and definitions are used by *both* libgcc2.c and by crtstuff.c.
+
+ Note that this file should only be compiled with GCC.
+*/
+
+#ifdef HAVE_ATEXIT
+extern void atexit (void (*) (void));
+#define ON_EXIT(FUNC,ARG) atexit ((FUNC))
+#else
+#ifdef sun
+extern void on_exit (void*, void*);
+#define ON_EXIT(FUNC,ARG) on_exit ((FUNC), (ARG))
+#endif
+#endif
+
+/* Declare a pointer to void function type. */
+
+typedef void (*func_ptr) (void);
+
+/* Declare the set of symbols use as begin and end markers for the lists
+ of global object constructors and global object destructors. */
+
+extern func_ptr __CTOR_LIST__[];
+extern func_ptr __DTOR_LIST__[];
+
+/* Declare the routine which need to get invoked at program exit time. */
+
+extern void __do_global_dtors ();
+
+/* Define a macro with the code which needs to be executed at program
+ start-up time. This macro is used in two places in crtstuff.c (for
+ systems which support a .init section) and in one place in libgcc2.c
+ (for those system which do *not* support a .init section). For all
+ three places where this code might appear, it must be identical, so
+ we define it once here as a macro to avoid various instances getting
+ out-of-sync with one another. */
+
+/* The first word may or may not contain the number of pointers in the table.
+ In all cases, the table is null-terminated.
+ We ignore the first word and scan up to the null. */
+
+/* Some systems use a different strategy for finding the ctors.
+ For example, svr3. */
+#ifndef DO_GLOBAL_CTORS_BODY
+#define DO_GLOBAL_CTORS_BODY \
+do { \
+ func_ptr *p; \
+ for (p = __CTOR_LIST__ + 1; *p; ) \
+ (*p++) (); \
+} while (0)
+#endif
+
diff --git a/gnu/usr.bin/cc/include/glimits.h b/gnu/usr.bin/cc/include/glimits.h
new file mode 100644
index 0000000..ff25a97
--- /dev/null
+++ b/gnu/usr.bin/cc/include/glimits.h
@@ -0,0 +1,93 @@
+#ifndef _LIMITS_H___
+#ifndef _MACH_MACHLIMITS_H_
+
+/* _MACH_MACHLIMITS_H_ is used on OSF/1. */
+#define _LIMITS_H___
+#define _MACH_MACHLIMITS_H_
+
+/* Number of bits in a `char'. */
+#undef CHAR_BIT
+#define CHAR_BIT 8
+
+/* Maximum length of a multibyte character. */
+#ifndef MB_LEN_MAX
+#define MB_LEN_MAX 1
+#endif
+
+/* Minimum and maximum values a `signed char' can hold. */
+#undef SCHAR_MIN
+#define SCHAR_MIN (-128)
+#undef SCHAR_MAX
+#define SCHAR_MAX 127
+
+/* Maximum value an `unsigned char' can hold. (Minimum is 0). */
+#undef UCHAR_MAX
+#define UCHAR_MAX 255
+
+/* Minimum and maximum values a `char' can hold. */
+#ifdef __CHAR_UNSIGNED__
+#undef CHAR_MIN
+#define CHAR_MIN 0
+#undef CHAR_MAX
+#define CHAR_MAX 255
+#else
+#undef CHAR_MIN
+#define CHAR_MIN (-128)
+#undef CHAR_MAX
+#define CHAR_MAX 127
+#endif
+
+/* Minimum and maximum values a `signed short int' can hold. */
+#undef SHRT_MIN
+#define SHRT_MIN (-32768)
+#undef SHRT_MAX
+#define SHRT_MAX 32767
+
+/* Maximum value an `unsigned short int' can hold. (Minimum is 0). */
+#undef USHRT_MAX
+#define USHRT_MAX 65535
+
+/* Minimum and maximum values a `signed int' can hold. */
+#ifndef __INT_MAX__
+#define __INT_MAX__ 2147483647
+#endif
+#undef INT_MIN
+#define INT_MIN (-INT_MAX-1)
+#undef INT_MAX
+#define INT_MAX __INT_MAX__
+
+/* Maximum value an `unsigned int' can hold. (Minimum is 0). */
+#undef UINT_MAX
+#define UINT_MAX (INT_MAX * 2U + 1)
+
+/* Minimum and maximum values a `signed long int' can hold.
+ (Same as `int'). */
+#ifndef __LONG_MAX__
+#define __LONG_MAX__ 2147483647L
+#endif
+#undef LONG_MIN
+#define LONG_MIN (-LONG_MAX-1)
+#undef LONG_MAX
+#define LONG_MAX __LONG_MAX__
+
+/* Maximum value an `unsigned long int' can hold. (Minimum is 0). */
+#undef ULONG_MAX
+#define ULONG_MAX (LONG_MAX * 2UL + 1)
+
+#if defined (__GNU_LIBRARY__) ? defined (__USE_GNU) : !defined (__STRICT_ANSI__)
+/* Minimum and maximum values a `signed long long int' can hold. */
+#ifndef __LONG_LONG_MAX__
+#define __LONG_LONG_MAX__ 9223372036854775807LL
+#endif
+#undef LONG_LONG_MIN
+#define LONG_LONG_MIN (-LONG_LONG_MAX-1)
+#undef LONG_LONG_MAX
+#define LONG_LONG_MAX __LONG_LONG_MAX__
+
+/* Maximum value an `unsigned long long int' can hold. (Minimum is 0). */
+#undef ULONG_LONG_MAX
+#define ULONG_LONG_MAX (LONG_LONG_MAX * 2ULL + 1)
+#endif
+
+#endif /* _MACH_MACHLIMITS_H_ */
+#endif /* _LIMITS_H___ */
diff --git a/gnu/usr.bin/cc/include/hard-reg-set.h b/gnu/usr.bin/cc/include/hard-reg-set.h
new file mode 100644
index 0000000..6bc668b
--- /dev/null
+++ b/gnu/usr.bin/cc/include/hard-reg-set.h
@@ -0,0 +1,270 @@
+/* Sets (bit vectors) of hard registers, and operations on them.
+ Copyright (C) 1987, 1992, 1994 Free Software Foundation, Inc.
+
+This file is part of GNU CC
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+/* Define the type of a set of hard registers. */
+
+/* HARD_REG_ELT_TYPE is a typedef of the unsigned integral type which
+ will be used for hard reg sets, either alone or in an array.
+
+ If HARD_REG_SET is a macro, its definition is HARD_REG_ELT_TYPE,
+ and it has enough bits to represent all the target machine's hard
+ registers. Otherwise, it is a typedef for a suitably sized array
+ of HARD_REG_ELT_TYPEs. HARD_REG_SET_LONGS is defined as how many.
+
+ Note that lots of code assumes that the first part of a regset is
+ the same format as a HARD_REG_SET. To help make sure this is true,
+ we only try the widest integer mode (HOST_WIDE_INT) instead of all the
+ smaller types. This approach loses only if there are a very few
+ registers and then only in the few cases where we have an array of
+ HARD_REG_SETs, so it needn't be as complex as it used to be. */
+
+typedef unsigned HOST_WIDE_INT HARD_REG_ELT_TYPE;
+
+#if FIRST_PSEUDO_REGISTER <= HOST_BITS_PER_WIDE_INT
+
+#define HARD_REG_SET HARD_REG_ELT_TYPE
+
+#else
+
+#define HARD_REG_SET_LONGS \
+ ((FIRST_PSEUDO_REGISTER + HOST_BITS_PER_WIDE_INT - 1) \
+ / HOST_BITS_PER_WIDE_INT)
+typedef HARD_REG_ELT_TYPE HARD_REG_SET[HARD_REG_SET_LONGS];
+
+#endif
+
+/* HARD_CONST is used to cast a constant to the appropriate type
+ for use with a HARD_REG_SET. */
+
+#define HARD_CONST(X) ((HARD_REG_ELT_TYPE) (X))
+
+/* Define macros SET_HARD_REG_BIT, CLEAR_HARD_REG_BIT and TEST_HARD_REG_BIT
+ to set, clear or test one bit in a hard reg set of type HARD_REG_SET.
+ All three take two arguments: the set and the register number.
+
+ In the case where sets are arrays of longs, the first argument
+ is actually a pointer to a long.
+
+ Define two macros for initializing a set:
+ CLEAR_HARD_REG_SET and SET_HARD_REG_SET.
+ These take just one argument.
+
+ Also define macros for copying hard reg sets:
+ COPY_HARD_REG_SET and COMPL_HARD_REG_SET.
+ These take two arguments TO and FROM; they read from FROM
+ and store into TO. COMPL_HARD_REG_SET complements each bit.
+
+ Also define macros for combining hard reg sets:
+ IOR_HARD_REG_SET and AND_HARD_REG_SET.
+ These take two arguments TO and FROM; they read from FROM
+ and combine bitwise into TO. Define also two variants
+ IOR_COMPL_HARD_REG_SET and AND_COMPL_HARD_REG_SET
+ which use the complement of the set FROM.
+
+ Also define GO_IF_HARD_REG_SUBSET (X, Y, TO):
+ if X is a subset of Y, go to TO.
+*/
+
+#ifdef HARD_REG_SET
+
+#define SET_HARD_REG_BIT(SET, BIT) \
+ ((SET) |= HARD_CONST (1) << (BIT))
+#define CLEAR_HARD_REG_BIT(SET, BIT) \
+ ((SET) &= ~(HARD_CONST (1) << (BIT)))
+#define TEST_HARD_REG_BIT(SET, BIT) \
+ ((SET) & (HARD_CONST (1) << (BIT)))
+
+#define CLEAR_HARD_REG_SET(TO) ((TO) = HARD_CONST (0))
+#define SET_HARD_REG_SET(TO) ((TO) = ~ HARD_CONST (0))
+
+#define COPY_HARD_REG_SET(TO, FROM) ((TO) = (FROM))
+#define COMPL_HARD_REG_SET(TO, FROM) ((TO) = ~(FROM))
+
+#define IOR_HARD_REG_SET(TO, FROM) ((TO) |= (FROM))
+#define IOR_COMPL_HARD_REG_SET(TO, FROM) ((TO) |= ~ (FROM))
+#define AND_HARD_REG_SET(TO, FROM) ((TO) &= (FROM))
+#define AND_COMPL_HARD_REG_SET(TO, FROM) ((TO) &= ~ (FROM))
+
+#define GO_IF_HARD_REG_SUBSET(X,Y,TO) if (HARD_CONST (0) == ((X) & ~(Y))) goto TO
+
+#define GO_IF_HARD_REG_EQUAL(X,Y,TO) if ((X) == (Y)) goto TO
+
+#else
+
+#define UHOST_BITS_PER_WIDE_INT ((unsigned) HOST_BITS_PER_WIDE_INT)
+
+#define SET_HARD_REG_BIT(SET, BIT) \
+ ((SET)[(BIT) / UHOST_BITS_PER_WIDE_INT] \
+ |= HARD_CONST (1) << ((BIT) % UHOST_BITS_PER_WIDE_INT))
+
+#define CLEAR_HARD_REG_BIT(SET, BIT) \
+ ((SET)[(BIT) / UHOST_BITS_PER_WIDE_INT] \
+ &= ~(HARD_CONST (1) << ((BIT) % UHOST_BITS_PER_WIDE_INT)))
+
+#define TEST_HARD_REG_BIT(SET, BIT) \
+ ((SET)[(BIT) / UHOST_BITS_PER_WIDE_INT] \
+ & (HARD_CONST (1) << ((BIT) % UHOST_BITS_PER_WIDE_INT)))
+
+#define CLEAR_HARD_REG_SET(TO) \
+do { register HARD_REG_ELT_TYPE *scan_tp_ = (TO); \
+ register int i; \
+ for (i = 0; i < HARD_REG_SET_LONGS; i++) \
+ *scan_tp_++ = 0; } while (0)
+
+#define SET_HARD_REG_SET(TO) \
+do { register HARD_REG_ELT_TYPE *scan_tp_ = (TO); \
+ register int i; \
+ for (i = 0; i < HARD_REG_SET_LONGS; i++) \
+ *scan_tp_++ = -1; } while (0)
+
+#define COPY_HARD_REG_SET(TO, FROM) \
+do { register HARD_REG_ELT_TYPE *scan_tp_ = (TO), *scan_fp_ = (FROM); \
+ register int i; \
+ for (i = 0; i < HARD_REG_SET_LONGS; i++) \
+ *scan_tp_++ = *scan_fp_++; } while (0)
+
+#define COMPL_HARD_REG_SET(TO, FROM) \
+do { register HARD_REG_ELT_TYPE *scan_tp_ = (TO), *scan_fp_ = (FROM); \
+ register int i; \
+ for (i = 0; i < HARD_REG_SET_LONGS; i++) \
+ *scan_tp_++ = ~ *scan_fp_++; } while (0)
+
+#define AND_HARD_REG_SET(TO, FROM) \
+do { register HARD_REG_ELT_TYPE *scan_tp_ = (TO), *scan_fp_ = (FROM); \
+ register int i; \
+ for (i = 0; i < HARD_REG_SET_LONGS; i++) \
+ *scan_tp_++ &= *scan_fp_++; } while (0)
+
+#define AND_COMPL_HARD_REG_SET(TO, FROM) \
+do { register HARD_REG_ELT_TYPE *scan_tp_ = (TO), *scan_fp_ = (FROM); \
+ register int i; \
+ for (i = 0; i < HARD_REG_SET_LONGS; i++) \
+ *scan_tp_++ &= ~ *scan_fp_++; } while (0)
+
+#define IOR_HARD_REG_SET(TO, FROM) \
+do { register HARD_REG_ELT_TYPE *scan_tp_ = (TO), *scan_fp_ = (FROM); \
+ register int i; \
+ for (i = 0; i < HARD_REG_SET_LONGS; i++) \
+ *scan_tp_++ |= *scan_fp_++; } while (0)
+
+#define IOR_COMPL_HARD_REG_SET(TO, FROM) \
+do { register HARD_REG_ELT_TYPE *scan_tp_ = (TO), *scan_fp_ = (FROM); \
+ register int i; \
+ for (i = 0; i < HARD_REG_SET_LONGS; i++) \
+ *scan_tp_++ |= ~ *scan_fp_++; } while (0)
+
+#define GO_IF_HARD_REG_SUBSET(X,Y,TO) \
+do { register HARD_REG_ELT_TYPE *scan_xp_ = (X), *scan_yp_ = (Y); \
+ register int i; \
+ for (i = 0; i < HARD_REG_SET_LONGS; i++) \
+ if (0 != (*scan_xp_++ & ~ *scan_yp_++)) break; \
+ if (i == HARD_REG_SET_LONGS) goto TO; } while (0)
+
+#define GO_IF_HARD_REG_EQUAL(X,Y,TO) \
+do { register HARD_REG_ELT_TYPE *scan_xp_ = (X), *scan_yp_ = (Y); \
+ register int i; \
+ for (i = 0; i < HARD_REG_SET_LONGS; i++) \
+ if (*scan_xp_++ != *scan_yp_++) break; \
+ if (i == HARD_REG_SET_LONGS) goto TO; } while (0)
+
+#endif
+
+/* Define some standard sets of registers. */
+
+/* Indexed by hard register number, contains 1 for registers
+ that are fixed use (stack pointer, pc, frame pointer, etc.).
+ These are the registers that cannot be used to allocate
+ a pseudo reg whose life does not cross calls. */
+
+extern char fixed_regs[FIRST_PSEUDO_REGISTER];
+
+/* The same info as a HARD_REG_SET. */
+
+extern HARD_REG_SET fixed_reg_set;
+
+/* Indexed by hard register number, contains 1 for registers
+ that are fixed use or are clobbered by function calls.
+ These are the registers that cannot be used to allocate
+ a pseudo reg whose life crosses calls. */
+
+extern char call_used_regs[FIRST_PSEUDO_REGISTER];
+
+/* The same info as a HARD_REG_SET. */
+
+extern HARD_REG_SET call_used_reg_set;
+
+/* Indexed by hard register number, contains 1 for registers that are
+ fixed use -- i.e. in fixed_regs -- or a function value return register
+ or STRUCT_VALUE_REGNUM or STATIC_CHAIN_REGNUM. These are the
+ registers that cannot hold quantities across calls even if we are
+ willing to save and restore them. */
+
+extern char call_fixed_regs[FIRST_PSEUDO_REGISTER];
+
+/* The same info as a HARD_REG_SET. */
+
+extern HARD_REG_SET call_fixed_reg_set;
+
+/* Indexed by hard register number, contains 1 for registers
+ that are being used for global register decls.
+ These must be exempt from ordinary flow analysis
+ and are also considered fixed. */
+
+extern char global_regs[FIRST_PSEUDO_REGISTER];
+
+/* Table of register numbers in the order in which to try to use them. */
+
+#ifdef REG_ALLOC_ORDER /* Avoid undef symbol in certain broken linkers. */
+extern int reg_alloc_order[FIRST_PSEUDO_REGISTER];
+#endif
+
+/* For each reg class, a HARD_REG_SET saying which registers are in it. */
+
+extern HARD_REG_SET reg_class_contents[];
+
+/* For each reg class, number of regs it contains. */
+
+extern int reg_class_size[N_REG_CLASSES];
+
+/* For each reg class, table listing all the containing classes. */
+
+extern enum reg_class reg_class_superclasses[N_REG_CLASSES][N_REG_CLASSES];
+
+/* For each reg class, table listing all the classes contained in it. */
+
+extern enum reg_class reg_class_subclasses[N_REG_CLASSES][N_REG_CLASSES];
+
+/* For each pair of reg classes,
+ a largest reg class contained in their union. */
+
+extern enum reg_class reg_class_subunion[N_REG_CLASSES][N_REG_CLASSES];
+
+/* For each pair of reg classes,
+ the smallest reg class that contains their union. */
+
+extern enum reg_class reg_class_superunion[N_REG_CLASSES][N_REG_CLASSES];
+
+/* Number of non-fixed registers. */
+
+extern int n_non_fixed_regs;
+
+/* Vector indexed by hardware reg giving its name. */
+
+extern char *reg_names[FIRST_PSEUDO_REGISTER];
diff --git a/gnu/usr.bin/cc/include/i386/bsd.h b/gnu/usr.bin/cc/include/i386/bsd.h
new file mode 100644
index 0000000..8aec304
--- /dev/null
+++ b/gnu/usr.bin/cc/include/i386/bsd.h
@@ -0,0 +1,129 @@
+/* Definitions for BSD assembler syntax for Intel 386
+ (actually AT&T syntax for insns and operands,
+ adapted to BSD conventions for symbol names and debugging.)
+ Copyright (C) 1988 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+/* Include common aspects of all 386 Unix assemblers. */
+#include "i386/unix.h"
+
+/* Use the Sequent Symmetry assembler syntax. */
+
+#define TARGET_VERSION fprintf (stderr, " (80386, BSD syntax)");
+
+/* Define the syntax of pseudo-ops, labels and comments. */
+
+/* Prefix for internally generated assembler labels. If we aren't using
+ underscores, we are using prefix `.'s to identify labels that should
+ be ignored, as in `i386/gas.h' --karl@cs.umb.edu */
+#ifdef NO_UNDERSCORES
+#define LPREFIX ".L"
+#else
+#define LPREFIX "L"
+#endif /* not NO_UNDERSCORES */
+
+/* Assembler pseudos to introduce constants of various size. */
+
+#define ASM_BYTE_OP "\t.byte"
+#define ASM_SHORT "\t.word"
+#define ASM_LONG "\t.long"
+#define ASM_DOUBLE "\t.double"
+
+/* Output at beginning of assembler file.
+ ??? I am skeptical of this -- RMS. */
+
+#define ASM_FILE_START(FILE) \
+ do { fprintf (FILE, "\t.file\t"); \
+ output_quoted_string (FILE, dump_base_name); \
+ fprintf (FILE, "\n"); \
+ } while (0)
+
+/* This was suggested, but it shouldn't be right for DBX output. -- RMS
+ #define ASM_OUTPUT_SOURCE_FILENAME(FILE, NAME) */
+
+
+/* Define the syntax of labels and symbol definitions/declarations. */
+
+/* This is how to output an assembler line
+ that says to advance the location counter by SIZE bytes. */
+
+#define ASM_OUTPUT_SKIP(FILE,SIZE) \
+ fprintf (FILE, "\t.space %u\n", (SIZE))
+
+/* Define the syntax of labels and symbol definitions/declarations. */
+
+/* This says how to output an assembler line
+ to define a global common symbol. */
+
+#define ASM_OUTPUT_COMMON(FILE, NAME, SIZE, ROUNDED) \
+( fputs (".comm ", (FILE)), \
+ assemble_name ((FILE), (NAME)), \
+ fprintf ((FILE), ",%u\n", (ROUNDED)))
+
+/* This says how to output an assembler line
+ to define a local common symbol. */
+
+#define ASM_OUTPUT_LOCAL(FILE, NAME, SIZE, ROUNDED) \
+( fputs (".lcomm ", (FILE)), \
+ assemble_name ((FILE), (NAME)), \
+ fprintf ((FILE), ",%u\n", (ROUNDED)))
+
+/* This is how to output an assembler line
+ that says to advance the location counter
+ to a multiple of 2**LOG bytes. */
+
+#define ASM_OUTPUT_ALIGN(FILE,LOG) \
+ if ((LOG)!=0) fprintf ((FILE), "\t.align %d\n", (LOG))
+
+/* This is how to store into the string BUF
+ the symbol_ref name of an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class.
+ This is suitable for output with `assemble_name'. */
+
+#ifdef NO_UNDERSCORES
+#define ASM_GENERATE_INTERNAL_LABEL(BUF,PREFIX,NUMBER) \
+ sprintf ((BUF), "*.%s%d", (PREFIX), (NUMBER))
+#else
+#define ASM_GENERATE_INTERNAL_LABEL(BUF,PREFIX,NUMBER) \
+ sprintf ((BUF), "*%s%d", (PREFIX), (NUMBER))
+#endif
+
+/* This is how to output an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class. */
+
+#ifdef NO_UNDERSCORES
+#define ASM_OUTPUT_INTERNAL_LABEL(FILE,PREFIX,NUM) \
+ fprintf (FILE, ".%s%d:\n", PREFIX, NUM)
+#else
+#define ASM_OUTPUT_INTERNAL_LABEL(FILE,PREFIX,NUM) \
+ fprintf (FILE, "%s%d:\n", PREFIX, NUM)
+#endif
+
+/* This is how to output a reference to a user-level label named NAME. */
+
+#ifdef NO_UNDERSCORES
+#define ASM_OUTPUT_LABELREF(FILE,NAME) fprintf (FILE, "%s", NAME)
+#else
+#define ASM_OUTPUT_LABELREF(FILE,NAME) fprintf (FILE, "_%s", NAME)
+#endif /* not NO_UNDERSCORES */
+
+/* Sequent has some changes in the format of DBX symbols. */
+#define DBX_NO_XREFS 1
+
+/* Don't split DBX symbols into continuations. */
+#define DBX_CONTIN_LENGTH 0
diff --git a/gnu/usr.bin/cc/include/i386/gas.h b/gnu/usr.bin/cc/include/i386/gas.h
new file mode 100644
index 0000000..3e8dba5
--- /dev/null
+++ b/gnu/usr.bin/cc/include/i386/gas.h
@@ -0,0 +1,154 @@
+/* Definitions for Intel 386 running system V with gnu tools
+ Copyright (C) 1988, 1993, 1994 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+/* Note that i386/seq-gas.h is a GAS configuration that does not use this
+ file. */
+
+#include "i386/i386.h"
+
+#ifndef YES_UNDERSCORES
+/* Define this now, because i386/bsd.h tests it. */
+#define NO_UNDERSCORES
+#endif
+
+/* Use the bsd assembler syntax. */
+/* we need to do this because gas is really a bsd style assembler,
+ * and so doesn't work well this these att-isms:
+ *
+ * ASM_OUTPUT_SKIP is .set .,.+N, which isn't implemented in gas
+ * ASM_OUTPUT_LOCAL is done with .set .,.+N, but that can't be
+ * used to define bss static space
+ *
+ * Next is the question of whether to uses underscores. RMS didn't
+ * like this idea at first, but since it is now obvious that we
+ * need this separate tm file for use with gas, at least to get
+ * dbx debugging info, I think we should also switch to underscores.
+ * We can keep i386v for real att style output, and the few
+ * people who want both form will have to compile twice.
+ */
+
+#include "i386/bsd.h"
+
+/* these come from i386/bsd.h, but are specific to sequent */
+#undef DBX_NO_XREFS
+#undef DBX_CONTIN_LENGTH
+
+/* Ask for COFF symbols. */
+
+#define SDB_DEBUGGING_INFO
+
+/* Specify predefined symbols in preprocessor. */
+
+#define CPP_PREDEFINES "-Dunix -Di386 -Asystem(unix) -Acpu(i386) -Amachine(i386)"
+#define CPP_SPEC "%{posix:-D_POSIX_SOURCE}"
+
+/* Allow #sccs in preprocessor. */
+
+#define SCCS_DIRECTIVE
+
+/* Output #ident as a .ident. */
+
+#define ASM_OUTPUT_IDENT(FILE, NAME) fprintf (FILE, "\t.ident \"%s\"\n", NAME);
+
+/* Implicit library calls should use memcpy, not bcopy, etc. */
+
+#define TARGET_MEM_FUNCTIONS
+
+#if 0 /* People say gas uses the log as the arg to .align. */
+/* When using gas, .align N aligns to an N-byte boundary. */
+
+#undef ASM_OUTPUT_ALIGN
+#define ASM_OUTPUT_ALIGN(FILE,LOG) \
+ if ((LOG)!=0) fprintf ((FILE), "\t.align %d\n", 1<<(LOG))
+#endif
+
+/* Align labels, etc. at 4-byte boundaries.
+ For the 486, align to 16-byte boundary for sake of cache. */
+
+#undef ASM_OUTPUT_ALIGN_CODE
+#define ASM_OUTPUT_ALIGN_CODE(FILE) \
+ fprintf ((FILE), "\t.align %d,0x90\n", \
+ TARGET_486 ? 4 : 2); /* Use log of 16 or log of 4 as arg. */
+
+/* Align start of loop at 4-byte boundary. */
+
+#undef ASM_OUTPUT_LOOP_ALIGN
+#define ASM_OUTPUT_LOOP_ALIGN(FILE) \
+ fprintf ((FILE), "\t.align 2,0x90\n"); /* Use log of 4 as arg. */
+
+/* A C statement or statements which output an assembler instruction
+ opcode to the stdio stream STREAM. The macro-operand PTR is a
+ variable of type `char *' which points to the opcode name in its
+ "internal" form--the form that is written in the machine description.
+
+ GAS version 1.38.1 doesn't understand the `repz' opcode mnemonic.
+ So use `repe' instead. */
+
+#define ASM_OUTPUT_OPCODE(STREAM, PTR) \
+{ \
+ if ((PTR)[0] == 'r' \
+ && (PTR)[1] == 'e' \
+ && (PTR)[2] == 'p') \
+ { \
+ if ((PTR)[3] == 'z') \
+ { \
+ fprintf (STREAM, "repe"); \
+ (PTR) += 4; \
+ } \
+ else if ((PTR)[3] == 'n' && (PTR)[4] == 'z') \
+ { \
+ fprintf (STREAM, "repne"); \
+ (PTR) += 5; \
+ } \
+ } \
+}
+
+/* Define macro used to output shift-double opcodes when the shift
+ count is in %cl. Some assemblers require %cl as an argument;
+ some don't.
+
+ GAS requires the %cl argument, so override i386/unix.h. */
+
+#undef AS3_SHIFT_DOUBLE
+#define AS3_SHIFT_DOUBLE(a,b,c,d) AS3 (a,b,c,d)
+
+/* Print opcodes the way that GAS expects them. */
+#define GAS_MNEMONICS 1
+
+#ifdef NO_UNDERSCORES /* If user-symbols don't have underscores,
+ then it must take more than `L' to identify
+ a label that should be ignored. */
+
+/* This is how to store into the string BUF
+ the symbol_ref name of an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class.
+ This is suitable for output with `assemble_name'. */
+
+#undef ASM_GENERATE_INTERNAL_LABEL
+#define ASM_GENERATE_INTERNAL_LABEL(BUF,PREFIX,NUMBER) \
+ sprintf ((BUF), ".%s%d", (PREFIX), (NUMBER))
+
+/* This is how to output an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class. */
+
+#undef ASM_OUTPUT_INTERNAL_LABEL
+#define ASM_OUTPUT_INTERNAL_LABEL(FILE,PREFIX,NUM) \
+ fprintf (FILE, ".%s%d:\n", PREFIX, NUM)
+
+#endif /* NO_UNDERSCORES */
diff --git a/gnu/usr.bin/cc/include/i386/gstabs.h b/gnu/usr.bin/cc/include/i386/gstabs.h
new file mode 100644
index 0000000..5f0ae34
--- /dev/null
+++ b/gnu/usr.bin/cc/include/i386/gstabs.h
@@ -0,0 +1,9 @@
+#include "i386/gas.h"
+
+/* We do not want to output SDB debugging information. */
+
+#undef SDB_DEBUGGING_INFO
+
+/* We want to output DBX debugging information. */
+
+#define DBX_DEBUGGING_INFO
diff --git a/gnu/usr.bin/cc/include/i386/i386.h b/gnu/usr.bin/cc/include/i386/i386.h
new file mode 100644
index 0000000..983bfc5
--- /dev/null
+++ b/gnu/usr.bin/cc/include/i386/i386.h
@@ -0,0 +1,1665 @@
+/* Definitions of target machine for GNU compiler for Intel 80386.
+ Copyright (C) 1988, 1992, 1994 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+/* The purpose of this file is to define the characteristics of the i386,
+ independent of assembler syntax or operating system.
+
+ Three other files build on this one to describe a specific assembler syntax:
+ bsd386.h, att386.h, and sun386.h.
+
+ The actual tm.h file for a particular system should include
+ this file, and then the file for the appropriate assembler syntax.
+
+ Many macros that specify assembler syntax are omitted entirely from
+ this file because they really belong in the files for particular
+ assemblers. These include AS1, AS2, AS3, RP, IP, LPREFIX, L_SIZE,
+ PUT_OP_SIZE, USE_STAR, ADDR_BEG, ADDR_END, PRINT_IREG, PRINT_SCALE,
+ PRINT_B_I_S, and many that start with ASM_ or end in ASM_OP. */
+
+/* Names to predefine in the preprocessor for this target machine. */
+
+#define I386 1
+
+/* Stubs for half-pic support if not OSF/1 reference platform. */
+
+#ifndef HALF_PIC_P
+#define HALF_PIC_P() 0
+#define HALF_PIC_NUMBER_PTRS 0
+#define HALF_PIC_NUMBER_REFS 0
+#define HALF_PIC_ENCODE(DECL)
+#define HALF_PIC_DECLARE(NAME)
+#define HALF_PIC_INIT() error ("half-pic init called on systems that don't support it.")
+#define HALF_PIC_ADDRESS_P(X) 0
+#define HALF_PIC_PTR(X) X
+#define HALF_PIC_FINISH(STREAM)
+#endif
+
+/* Run-time compilation parameters selecting different hardware subsets. */
+
+extern int target_flags;
+
+/* Macros used in the machine description to test the flags. */
+
+/* configure can arrage to make this 2, to force a 486. */
+#ifndef TARGET_CPU_DEFAULT
+#define TARGET_CPU_DEFAULT 0
+#endif
+
+/* Compile 80387 insns for floating point (not library calls). */
+#define TARGET_80387 (target_flags & 1)
+/* Compile code for an i486. */
+#define TARGET_486 (target_flags & 2)
+/* Compile using ret insn that pops args.
+ This will not work unless you use prototypes at least
+ for all functions that can take varying numbers of args. */
+#define TARGET_RTD (target_flags & 8)
+/* Compile passing first two args in regs 0 and 1.
+ This exists only to test compiler features that will
+ be needed for RISC chips. It is not usable
+ and is not intended to be usable on this cpu. */
+#define TARGET_REGPARM (target_flags & 020)
+
+/* Put uninitialized locals into bss, not data.
+ Meaningful only on svr3. */
+#define TARGET_SVR3_SHLIB (target_flags & 040)
+
+/* Use IEEE floating point comparisons. These handle correctly the cases
+ where the result of a comparison is unordered. Normally SIGFPE is
+ generated in such cases, in which case this isn't needed. */
+#define TARGET_IEEE_FP (target_flags & 0100)
+
+/* Functions that return a floating point value may return that value
+ in the 387 FPU or in 386 integer registers. If set, this flag causes
+ the 387 to be used, which is compatible with most calling conventions. */
+#define TARGET_FLOAT_RETURNS_IN_80387 (target_flags & 0200)
+
+/* Disable generation of FP sin, cos and sqrt operations for 387.
+ This is because FreeBSD lacks these in the math-emulator-code */
+#define TARGET_NO_FANCY_MATH_387 (target_flags & 0400)
+
+/* Macro to define tables used to set the flags.
+ This is a list in braces of pairs in braces,
+ each pair being { "NAME", VALUE }
+ where VALUE is the bits to set or minus the bits to clear.
+ An empty string NAME is used to identify the default VALUE. */
+
+#define TARGET_SWITCHES \
+ { { "80387", 1}, \
+ { "no-80387", -1}, \
+ { "soft-float", -1}, \
+ { "no-soft-float", 1}, \
+ { "486", 2}, \
+ { "no-486", -2}, \
+ { "386", -2}, \
+ { "rtd", 8}, \
+ { "no-rtd", -8}, \
+ { "regparm", 020}, \
+ { "no-regparm", -020}, \
+ { "svr3-shlib", 040}, \
+ { "no-svr3-shlib", -040}, \
+ { "ieee-fp", 0100}, \
+ { "no-ieee-fp", -0100}, \
+ { "fp-ret-in-387", 0200}, \
+ { "no-fp-ret-in-387", -0200}, \
+ { "no-fancy-math-387", 0400}, \
+ { "fancy-math-387", -0400}, \
+ SUBTARGET_SWITCHES \
+ { "", TARGET_DEFAULT | TARGET_CPU_DEFAULT}}
+
+/* This is meant to be redefined in the host dependent files */
+#define SUBTARGET_SWITCHES
+
+#define OVERRIDE_OPTIONS \
+{ \
+ SUBTARGET_OVERRIDE_OPTIONS \
+}
+
+/* This is meant to be redefined in the host dependent files */
+#define SUBTARGET_OVERRIDE_OPTIONS
+
+/* target machine storage layout */
+
+/* Define for XFmode extended real floating point support.
+ This will automatically cause REAL_ARITHMETIC to be defined. */
+#define LONG_DOUBLE_TYPE_SIZE 96
+
+/* Define if you don't want extended real, but do want to use the
+ software floating point emulator for REAL_ARITHMETIC and
+ decimal <-> binary conversion. */
+/* #define REAL_ARITHMETIC */
+
+/* Define this if most significant byte of a word is the lowest numbered. */
+/* That is true on the 80386. */
+
+#define BITS_BIG_ENDIAN 0
+
+/* Define this if most significant byte of a word is the lowest numbered. */
+/* That is not true on the 80386. */
+#define BYTES_BIG_ENDIAN 0
+
+/* Define this if most significant word of a multiword number is the lowest
+ numbered. */
+/* Not true for 80386 */
+#define WORDS_BIG_ENDIAN 0
+
+/* number of bits in an addressable storage unit */
+#define BITS_PER_UNIT 8
+
+/* Width in bits of a "word", which is the contents of a machine register.
+ Note that this is not necessarily the width of data type `int';
+ if using 16-bit ints on a 80386, this would still be 32.
+ But on a machine with 16-bit registers, this would be 16. */
+#define BITS_PER_WORD 32
+
+/* Width of a word, in units (bytes). */
+#define UNITS_PER_WORD 4
+
+/* Width in bits of a pointer.
+ See also the macro `Pmode' defined below. */
+#define POINTER_SIZE 32
+
+/* Allocation boundary (in *bits*) for storing arguments in argument list. */
+#define PARM_BOUNDARY 32
+
+/* Boundary (in *bits*) on which stack pointer should be aligned. */
+#define STACK_BOUNDARY 32
+
+/* Allocation boundary (in *bits*) for the code of a function.
+ For i486, we get better performance by aligning to a cache
+ line (i.e. 16 byte) boundary. */
+#define FUNCTION_BOUNDARY (TARGET_486 ? 128 : 32)
+
+/* Alignment of field after `int : 0' in a structure. */
+
+#define EMPTY_FIELD_BOUNDARY 32
+
+/* Minimum size in bits of the largest boundary to which any
+ and all fundamental data types supported by the hardware
+ might need to be aligned. No data type wants to be aligned
+ rounder than this. The i386 supports 64-bit floating point
+ quantities, but these can be aligned on any 32-bit boundary. */
+#define BIGGEST_ALIGNMENT 32
+
+/* Set this non-zero if move instructions will actually fail to work
+ when given unaligned data. */
+#define STRICT_ALIGNMENT 0
+
+/* If bit field type is int, don't let it cross an int,
+ and give entire struct the alignment of an int. */
+/* Required on the 386 since it doesn't have bitfield insns. */
+#define PCC_BITFIELD_TYPE_MATTERS 1
+
+/* Align loop starts for optimal branching. */
+#define ASM_OUTPUT_LOOP_ALIGN(FILE) \
+ ASM_OUTPUT_ALIGN (FILE, 2)
+
+/* This is how to align an instruction for optimal branching.
+ On i486 we'll get better performance by aligning on a
+ cache line (i.e. 16 byte) boundary. */
+#define ASM_OUTPUT_ALIGN_CODE(FILE) \
+ ASM_OUTPUT_ALIGN ((FILE), (TARGET_486 ? 4 : 2))
+
+/* Standard register usage. */
+
+/* This processor has special stack-like registers. See reg-stack.c
+ for details. */
+
+#define STACK_REGS
+
+/* Number of actual hardware registers.
+ The hardware registers are assigned numbers for the compiler
+ from 0 to just below FIRST_PSEUDO_REGISTER.
+ All registers that the compiler knows about must be given numbers,
+ even those that are not normally considered general registers.
+
+ In the 80386 we give the 8 general purpose registers the numbers 0-7.
+ We number the floating point registers 8-15.
+ Note that registers 0-7 can be accessed as a short or int,
+ while only 0-3 may be used with byte `mov' instructions.
+
+ Reg 16 does not correspond to any hardware register, but instead
+ appears in the RTL as an argument pointer prior to reload, and is
+ eliminated during reloading in favor of either the stack or frame
+ pointer. */
+
+#define FIRST_PSEUDO_REGISTER 17
+
+/* 1 for registers that have pervasive standard uses
+ and are not available for the register allocator.
+ On the 80386, the stack pointer is such, as is the arg pointer. */
+#define FIXED_REGISTERS \
+/*ax,dx,cx,bx,si,di,bp,sp,st,st1,st2,st3,st4,st5,st6,st7,arg*/ \
+{ 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1 }
+
+/* 1 for registers not available across function calls.
+ These must include the FIXED_REGISTERS and also any
+ registers that can be used without being saved.
+ The latter must include the registers where values are returned
+ and the register where structure-value addresses are passed.
+ Aside from that, you can include as many other registers as you like. */
+
+#define CALL_USED_REGISTERS \
+/*ax,dx,cx,bx,si,di,bp,sp,st,st1,st2,st3,st4,st5,st6,st7,arg*/ \
+{ 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }
+
+/* Order in which to allocate registers. Each register must be
+ listed once, even those in FIXED_REGISTERS. List frame pointer
+ late and fixed registers last. Note that, in general, we prefer
+ registers listed in CALL_USED_REGISTERS, keeping the others
+ available for storage of persistent values. */
+
+#define REG_ALLOC_ORDER \
+/*ax,cx,dx,bx,si,di,bp,sp,st,st1,st2,st3,st4,st5,st6,st7,arg*/ \
+{ 0, 2, 1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16 }
+
+/* Macro to conditionally modify fixed_regs/call_used_regs. */
+#define CONDITIONAL_REGISTER_USAGE \
+ { \
+ if (flag_pic) \
+ { \
+ fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1; \
+ call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1; \
+ } \
+ if (! TARGET_80387 && ! TARGET_FLOAT_RETURNS_IN_80387) \
+ { \
+ int i; \
+ HARD_REG_SET x; \
+ COPY_HARD_REG_SET (x, reg_class_contents[(int)FLOAT_REGS]); \
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++ ) \
+ if (TEST_HARD_REG_BIT (x, i)) \
+ fixed_regs[i] = call_used_regs[i] = 1; \
+ } \
+ }
+
+/* Return number of consecutive hard regs needed starting at reg REGNO
+ to hold something of mode MODE.
+ This is ordinarily the length in words of a value of mode MODE
+ but can be less for certain modes in special long registers.
+
+ Actually there are no two word move instructions for consecutive
+ registers. And only registers 0-3 may have mov byte instructions
+ applied to them.
+ */
+
+#define HARD_REGNO_NREGS(REGNO, MODE) \
+ (FP_REGNO_P (REGNO) ? 1 \
+ : ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD))
+
+/* Value is 1 if hard register REGNO can hold a value of machine-mode MODE.
+ On the 80386, the first 4 cpu registers can hold any mode
+ while the floating point registers may hold only floating point.
+ Make it clear that the fp regs could not hold a 16-byte float. */
+
+/* The casts to int placate a compiler on a microvax,
+ for cross-compiler testing. */
+
+#define HARD_REGNO_MODE_OK(REGNO, MODE) \
+ ((REGNO) < 2 ? 1 \
+ : (REGNO) < 4 ? 1 \
+ : FP_REGNO_P (REGNO) \
+ ? (((int) GET_MODE_CLASS (MODE) == (int) MODE_FLOAT \
+ || (int) GET_MODE_CLASS (MODE) == (int) MODE_COMPLEX_FLOAT) \
+ && GET_MODE_UNIT_SIZE (MODE) <= 12) \
+ : (int) (MODE) != (int) QImode)
+
+/* Value is 1 if it is a good idea to tie two pseudo registers
+ when one has mode MODE1 and one has mode MODE2.
+ If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
+ for any hard reg, then this must be 0 for correct output. */
+
+#define MODES_TIEABLE_P(MODE1, MODE2) ((MODE1) == (MODE2))
+
+/* A C expression returning the cost of moving data from a register of class
+ CLASS1 to one of CLASS2.
+
+ On the i386, copying between floating-point and fixed-point
+ registers is expensive. */
+
+#define REGISTER_MOVE_COST(CLASS1, CLASS2) \
+ (((FLOAT_CLASS_P (CLASS1) && ! FLOAT_CLASS_P (CLASS2)) \
+ || (! FLOAT_CLASS_P (CLASS1) && FLOAT_CLASS_P (CLASS2))) ? 10 \
+ : 2)
+
+/* Specify the registers used for certain standard purposes.
+ The values of these macros are register numbers. */
+
+/* on the 386 the pc register is %eip, and is not usable as a general
+ register. The ordinary mov instructions won't work */
+/* #define PC_REGNUM */
+
+/* Register to use for pushing function arguments. */
+#define STACK_POINTER_REGNUM 7
+
+/* Base register for access to local variables of the function. */
+#define FRAME_POINTER_REGNUM 6
+
+/* First floating point reg */
+#define FIRST_FLOAT_REG 8
+
+/* First & last stack-like regs */
+#define FIRST_STACK_REG FIRST_FLOAT_REG
+#define LAST_STACK_REG (FIRST_FLOAT_REG + 7)
+
+/* Value should be nonzero if functions must have frame pointers.
+ Zero means the frame pointer need not be set up (and parms
+ may be accessed via the stack pointer) in functions that seem suitable.
+ This is computed in `reload', in reload1.c. */
+#define FRAME_POINTER_REQUIRED 0
+
+/* Base register for access to arguments of the function. */
+#define ARG_POINTER_REGNUM 16
+
+/* Register in which static-chain is passed to a function. */
+#define STATIC_CHAIN_REGNUM 2
+
+/* Register to hold the addressing base for position independent
+ code access to data items. */
+#define PIC_OFFSET_TABLE_REGNUM 3
+
+/* Register in which address to store a structure value
+ arrives in the function. On the 386, the prologue
+ copies this from the stack to register %eax. */
+#define STRUCT_VALUE_INCOMING 0
+
+/* Place in which caller passes the structure value address.
+ 0 means push the value on the stack like an argument. */
+#define STRUCT_VALUE 0
+
+/* Define the classes of registers for register constraints in the
+ machine description. Also define ranges of constants.
+
+ One of the classes must always be named ALL_REGS and include all hard regs.
+ If there is more than one class, another class must be named NO_REGS
+ and contain no registers.
+
+ The name GENERAL_REGS must be the name of a class (or an alias for
+ another name such as ALL_REGS). This is the class of registers
+ that is allowed by "g" or "r" in a register constraint.
+ Also, registers outside this class are allocated only when
+ instructions express preferences for them.
+
+ The classes must be numbered in nondecreasing order; that is,
+ a larger-numbered class must never be contained completely
+ in a smaller-numbered class.
+
+ For any two classes, it is very desirable that there be another
+ class that represents their union.
+
+ It might seem that class BREG is unnecessary, since no useful 386
+ opcode needs reg %ebx. But some systems pass args to the OS in ebx,
+ and the "b" register constraint is useful in asms for syscalls. */
+
+enum reg_class
+{
+ NO_REGS,
+ AREG, DREG, CREG, BREG,
+ AD_REGS, /* %eax/%edx for DImode */
+ Q_REGS, /* %eax %ebx %ecx %edx */
+ SIREG, DIREG,
+ INDEX_REGS, /* %eax %ebx %ecx %edx %esi %edi %ebp */
+ GENERAL_REGS, /* %eax %ebx %ecx %edx %esi %edi %ebp %esp */
+ FP_TOP_REG, FP_SECOND_REG, /* %st(0) %st(1) */
+ FLOAT_REGS,
+ ALL_REGS, LIM_REG_CLASSES
+};
+
+#define N_REG_CLASSES (int) LIM_REG_CLASSES
+
+#define FLOAT_CLASS_P(CLASS) (reg_class_subset_p (CLASS, FLOAT_REGS))
+
+/* Give names of register classes as strings for dump file. */
+
+#define REG_CLASS_NAMES \
+{ "NO_REGS", \
+ "AREG", "DREG", "CREG", "BREG", \
+ "AD_REGS", \
+ "Q_REGS", \
+ "SIREG", "DIREG", \
+ "INDEX_REGS", \
+ "GENERAL_REGS", \
+ "FP_TOP_REG", "FP_SECOND_REG", \
+ "FLOAT_REGS", \
+ "ALL_REGS" }
+
+/* Define which registers fit in which classes.
+ This is an initializer for a vector of HARD_REG_SET
+ of length N_REG_CLASSES. */
+
+#define REG_CLASS_CONTENTS \
+{ 0, \
+ 0x1, 0x2, 0x4, 0x8, /* AREG, DREG, CREG, BREG */ \
+ 0x3, /* AD_REGS */ \
+ 0xf, /* Q_REGS */ \
+ 0x10, 0x20, /* SIREG, DIREG */ \
+ 0x1007f, /* INDEX_REGS */ \
+ 0x100ff, /* GENERAL_REGS */ \
+ 0x0100, 0x0200, /* FP_TOP_REG, FP_SECOND_REG */ \
+ 0xff00, /* FLOAT_REGS */ \
+ 0x1ffff }
+
+/* The same information, inverted:
+ Return the class number of the smallest class containing
+ reg number REGNO. This could be a conditional expression
+ or could index an array. */
+
+extern enum reg_class regclass_map[FIRST_PSEUDO_REGISTER];
+#define REGNO_REG_CLASS(REGNO) (regclass_map[REGNO])
+
+/* When defined, the compiler allows registers explicitly used in the
+ rtl to be used as spill registers but prevents the compiler from
+ extending the lifetime of these registers. */
+
+#define SMALL_REGISTER_CLASSES
+
+#define QI_REG_P(X) \
+ (REG_P (X) && REGNO (X) < 4)
+#define NON_QI_REG_P(X) \
+ (REG_P (X) && REGNO (X) >= 4 && REGNO (X) < FIRST_PSEUDO_REGISTER)
+
+#define FP_REG_P(X) (REG_P (X) && FP_REGNO_P (REGNO (X)))
+#define FP_REGNO_P(n) ((n) >= FIRST_STACK_REG && (n) <= LAST_STACK_REG)
+
+#define STACK_REG_P(xop) (REG_P (xop) && \
+ REGNO (xop) >= FIRST_STACK_REG && \
+ REGNO (xop) <= LAST_STACK_REG)
+
+#define NON_STACK_REG_P(xop) (REG_P (xop) && ! STACK_REG_P (xop))
+
+#define STACK_TOP_P(xop) (REG_P (xop) && REGNO (xop) == FIRST_STACK_REG)
+
+/* Try to maintain the accuracy of the death notes for regs satisfying the
+ following. Important for stack like regs, to know when to pop. */
+
+/* #define PRESERVE_DEATH_INFO_REGNO_P(x) FP_REGNO_P(x) */
+
+/* 1 if register REGNO can magically overlap other regs.
+ Note that nonzero values work only in very special circumstances. */
+
+/* #define OVERLAPPING_REGNO_P(REGNO) FP_REGNO_P (REGNO) */
+
+/* The class value for index registers, and the one for base regs. */
+
+#define INDEX_REG_CLASS INDEX_REGS
+#define BASE_REG_CLASS GENERAL_REGS
+
+/* Get reg_class from a letter such as appears in the machine description. */
+
+#define REG_CLASS_FROM_LETTER(C) \
+ ((C) == 'r' ? GENERAL_REGS : \
+ (C) == 'q' ? Q_REGS : \
+ (C) == 'f' ? (TARGET_80387 || TARGET_FLOAT_RETURNS_IN_80387 \
+ ? FLOAT_REGS \
+ : NO_REGS) : \
+ (C) == 't' ? (TARGET_80387 || TARGET_FLOAT_RETURNS_IN_80387 \
+ ? FP_TOP_REG \
+ : NO_REGS) : \
+ (C) == 'u' ? (TARGET_80387 || TARGET_FLOAT_RETURNS_IN_80387 \
+ ? FP_SECOND_REG \
+ : NO_REGS) : \
+ (C) == 'a' ? AREG : \
+ (C) == 'b' ? BREG : \
+ (C) == 'c' ? CREG : \
+ (C) == 'd' ? DREG : \
+ (C) == 'A' ? AD_REGS : \
+ (C) == 'D' ? DIREG : \
+ (C) == 'S' ? SIREG : NO_REGS)
+
+/* The letters I, J, K, L and M in a register constraint string
+ can be used to stand for particular ranges of immediate operands.
+ This macro defines what the ranges are.
+ C is the letter, and VALUE is a constant value.
+ Return 1 if VALUE is in the range specified by C.
+
+ I is for non-DImode shifts.
+ J is for DImode shifts.
+ K and L are for an `andsi' optimization.
+ M is for shifts that can be executed by the "lea" opcode.
+ */
+
+#define CONST_OK_FOR_LETTER_P(VALUE, C) \
+ ((C) == 'I' ? (VALUE) >= 0 && (VALUE) <= 31 : \
+ (C) == 'J' ? (VALUE) >= 0 && (VALUE) <= 63 : \
+ (C) == 'K' ? (VALUE) == 0xff : \
+ (C) == 'L' ? (VALUE) == 0xffff : \
+ (C) == 'M' ? (VALUE) >= 0 && (VALUE) <= 3 : \
+ 0)
+
+/* Similar, but for floating constants, and defining letters G and H.
+ Here VALUE is the CONST_DOUBLE rtx itself. We allow constants even if
+ TARGET_387 isn't set, because the stack register converter may need to
+ load 0.0 into the function value register. */
+
+#define CONST_DOUBLE_OK_FOR_LETTER_P(VALUE, C) \
+ ((C) == 'G' ? standard_80387_constant_p (VALUE) : 0)
+
+/* Place additional restrictions on the register class to use when it
+ is necessary to be able to hold a value of mode MODE in a reload
+ register for which class CLASS would ordinarily be used. */
+
+#define LIMIT_RELOAD_CLASS(MODE, CLASS) \
+ ((MODE) == QImode && ((CLASS) == ALL_REGS || (CLASS) == GENERAL_REGS) \
+ ? Q_REGS : (CLASS))
+
+/* Given an rtx X being reloaded into a reg required to be
+ in class CLASS, return the class of reg to actually use.
+ In general this is just CLASS; but on some machines
+ in some cases it is preferable to use a more restrictive class.
+ On the 80386 series, we prevent floating constants from being
+ reloaded into floating registers (since no move-insn can do that)
+ and we ensure that QImodes aren't reloaded into the esi or edi reg. */
+
+/* Put float CONST_DOUBLE in the constant pool instead of fp regs.
+ QImode must go into class Q_REGS.
+ Narrow ALL_REGS to GENERAL_REGS. This supports allowing movsf and
+ movdf to do mem-to-mem moves through integer regs. */
+
+#define PREFERRED_RELOAD_CLASS(X,CLASS) \
+ (GET_CODE (X) == CONST_DOUBLE && GET_MODE (X) != VOIDmode ? NO_REGS \
+ : GET_MODE (X) == QImode && ! reg_class_subset_p (CLASS, Q_REGS) ? Q_REGS \
+ : ((CLASS) == ALL_REGS \
+ && GET_MODE_CLASS (GET_MODE (X)) == MODE_FLOAT) ? GENERAL_REGS \
+ : (CLASS))
+
+/* If we are copying between general and FP registers, we need a memory
+ location. */
+
+#define SECONDARY_MEMORY_NEEDED(CLASS1,CLASS2,MODE) \
+ ((FLOAT_CLASS_P (CLASS1) && ! FLOAT_CLASS_P (CLASS2)) \
+ || (! FLOAT_CLASS_P (CLASS1) && FLOAT_CLASS_P (CLASS2)))
+
+/* Return the maximum number of consecutive registers
+ needed to represent mode MODE in a register of class CLASS. */
+/* On the 80386, this is the size of MODE in words,
+ except in the FP regs, where a single reg is always enough. */
+#define CLASS_MAX_NREGS(CLASS, MODE) \
+ (FLOAT_CLASS_P (CLASS) ? 1 : \
+ ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD))
+
+/* Stack layout; function entry, exit and calling. */
+
+/* Define this if pushing a word on the stack
+ makes the stack pointer a smaller address. */
+#define STACK_GROWS_DOWNWARD
+
+/* Define this if the nominal address of the stack frame
+ is at the high-address end of the local variables;
+ that is, each additional local variable allocated
+ goes at a more negative offset in the frame. */
+#define FRAME_GROWS_DOWNWARD
+
+/* Offset within stack frame to start allocating local variables at.
+ If FRAME_GROWS_DOWNWARD, this is the offset to the END of the
+ first local allocated. Otherwise, it is the offset to the BEGINNING
+ of the first local allocated. */
+#define STARTING_FRAME_OFFSET 0
+
+/* If we generate an insn to push BYTES bytes,
+ this says how many the stack pointer really advances by.
+ On 386 pushw decrements by exactly 2 no matter what the position was.
+ On the 386 there is no pushb; we use pushw instead, and this
+ has the effect of rounding up to 2. */
+
+#define PUSH_ROUNDING(BYTES) (((BYTES) + 1) & (-2))
+
+/* Offset of first parameter from the argument pointer register value. */
+#define FIRST_PARM_OFFSET(FNDECL) 0
+
+/* Value is the number of bytes of arguments automatically
+ popped when returning from a subroutine call.
+ FUNTYPE is the data type of the function (as a tree),
+ or for a library call it is an identifier node for the subroutine name.
+ SIZE is the number of bytes of arguments passed on the stack.
+
+ On the 80386, the RTD insn may be used to pop them if the number
+ of args is fixed, but if the number is variable then the caller
+ must pop them all. RTD can't be used for library calls now
+ because the library is compiled with the Unix compiler.
+ Use of RTD is a selectable option, since it is incompatible with
+ standard Unix calling sequences. If the option is not selected,
+ the caller must always pop the args. */
+
+#define RETURN_POPS_ARGS(FUNTYPE,SIZE) \
+ (TREE_CODE (FUNTYPE) == IDENTIFIER_NODE ? 0 \
+ : (TARGET_RTD \
+ && (TYPE_ARG_TYPES (FUNTYPE) == 0 \
+ || (TREE_VALUE (tree_last (TYPE_ARG_TYPES (FUNTYPE))) \
+ == void_type_node))) ? (SIZE) \
+ : (aggregate_value_p (TREE_TYPE (FUNTYPE))) ? GET_MODE_SIZE (Pmode) : 0)
+
+/* Define how to find the value returned by a function.
+ VALTYPE is the data type of the value (as a tree).
+ If the precise function being called is known, FUNC is its FUNCTION_DECL;
+ otherwise, FUNC is 0. */
+#define FUNCTION_VALUE(VALTYPE, FUNC) \
+ gen_rtx (REG, TYPE_MODE (VALTYPE), \
+ VALUE_REGNO (TYPE_MODE (VALTYPE)))
+
+/* Define how to find the value returned by a library function
+ assuming the value has mode MODE. */
+
+#define LIBCALL_VALUE(MODE) \
+ gen_rtx (REG, MODE, VALUE_REGNO (MODE))
+
+/* Define the size of the result block used for communication between
+ untyped_call and untyped_return. The block contains a DImode value
+ followed by the block used by fnsave and frstor. */
+
+#define APPLY_RESULT_SIZE (8+108)
+
+/* 1 if N is a possible register number for function argument passing.
+ On the 80386, no registers are used in this way.
+ *NOTE* -mregparm does not work.
+ It exists only to test register calling conventions. */
+
+#define FUNCTION_ARG_REGNO_P(N) 0
+
+/* Define a data type for recording info about an argument list
+ during the scan of that argument list. This data type should
+ hold all necessary information about the function itself
+ and about the args processed so far, enough to enable macros
+ such as FUNCTION_ARG to determine where the next arg should go.
+
+ On the 80386, this is a single integer, which is a number of bytes
+ of arguments scanned so far. */
+
+#define CUMULATIVE_ARGS int
+
+/* Initialize a variable CUM of type CUMULATIVE_ARGS
+ for a call to a function whose data type is FNTYPE.
+ For a library call, FNTYPE is 0.
+
+ On the 80386, the offset starts at 0. */
+
+#define INIT_CUMULATIVE_ARGS(CUM,FNTYPE,LIBNAME) \
+ ((CUM) = 0)
+
+/* Update the data in CUM to advance over an argument
+ of mode MODE and data type TYPE.
+ (TYPE is null for libcalls where that information may not be available.) */
+
+#define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) \
+ ((CUM) += ((MODE) != BLKmode \
+ ? (GET_MODE_SIZE (MODE) + 3) & ~3 \
+ : (int_size_in_bytes (TYPE) + 3) & ~3))
+
+/* Define where to put the arguments to a function.
+ Value is zero to push the argument on the stack,
+ or a hard register in which to store the argument.
+
+ MODE is the argument's machine mode.
+ TYPE is the data type of the argument (as a tree).
+ This is null for libcalls where that information may
+ not be available.
+ CUM is a variable of type CUMULATIVE_ARGS which gives info about
+ the preceding args and about the function being called.
+ NAMED is nonzero if this argument is a named parameter
+ (otherwise it is an extra parameter matching an ellipsis). */
+
+
+/* On the 80386 all args are pushed, except if -mregparm is specified
+ then the first two words of arguments are passed in EAX, EDX.
+ *NOTE* -mregparm does not work.
+ It exists only to test register calling conventions. */
+
+#define FUNCTION_ARG(CUM, MODE, TYPE, NAMED) \
+((TARGET_REGPARM && (CUM) < 8) ? gen_rtx (REG, (MODE), (CUM) / 4) : 0)
+
+/* For an arg passed partly in registers and partly in memory,
+ this is the number of registers used.
+ For args passed entirely in registers or entirely in memory, zero. */
+
+
+#define FUNCTION_ARG_PARTIAL_NREGS(CUM, MODE, TYPE, NAMED) \
+((TARGET_REGPARM && (CUM) < 8 \
+ && 8 < ((CUM) + ((MODE) == BLKmode \
+ ? int_size_in_bytes (TYPE) \
+ : GET_MODE_SIZE (MODE)))) \
+ ? 2 - (CUM) / 4 : 0)
+
+/* This macro generates the assembly code for function entry.
+ FILE is a stdio stream to output the code to.
+ SIZE is an int: how many units of temporary storage to allocate.
+ Refer to the array `regs_ever_live' to determine which registers
+ to save; `regs_ever_live[I]' is nonzero if register number I
+ is ever used in the function. This macro is responsible for
+ knowing which registers should not be saved even if used. */
+
+#define FUNCTION_PROLOGUE(FILE, SIZE) \
+ function_prologue (FILE, SIZE)
+
+/* Output assembler code to FILE to increment profiler label # LABELNO
+ for profiling a function entry. */
+
+#define FUNCTION_PROFILER(FILE, LABELNO) \
+{ \
+ if (flag_pic) \
+ { \
+ fprintf (FILE, "\tleal %sP%d@GOTOFF(%%ebx),%%edx\n", \
+ LPREFIX, (LABELNO)); \
+ fprintf (FILE, "\tcall *_mcount@GOT(%%ebx)\n"); \
+ } \
+ else \
+ { \
+ fprintf (FILE, "\tmovl $%sP%d,%%edx\n", LPREFIX, (LABELNO)); \
+ fprintf (FILE, "\tcall _mcount\n"); \
+ } \
+}
+
+/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function,
+ the stack pointer does not matter. The value is tested only in
+ functions that have frame pointers.
+ No definition is equivalent to always zero. */
+/* Note on the 386 it might be more efficient not to define this since
+ we have to restore it ourselves from the frame pointer, in order to
+ use pop */
+
+#define EXIT_IGNORE_STACK 1
+
+/* This macro generates the assembly code for function exit,
+ on machines that need it. If FUNCTION_EPILOGUE is not defined
+ then individual return instructions are generated for each
+ return statement. Args are same as for FUNCTION_PROLOGUE.
+
+ The function epilogue should not depend on the current stack pointer!
+ It should use the frame pointer only. This is mandatory because
+ of alloca; we also take advantage of it to omit stack adjustments
+ before returning.
+
+ If the last non-note insn in the function is a BARRIER, then there
+ is no need to emit a function prologue, because control does not fall
+ off the end. This happens if the function ends in an "exit" call, or
+ if a `return' insn is emitted directly into the function. */
+
+#define FUNCTION_EPILOGUE(FILE, SIZE) \
+do { \
+ rtx last = get_last_insn (); \
+ if (last && GET_CODE (last) == NOTE) \
+ last = prev_nonnote_insn (last); \
+ if (! last || GET_CODE (last) != BARRIER) \
+ function_epilogue (FILE, SIZE); \
+} while (0)
+
+/* Output assembler code for a block containing the constant parts
+ of a trampoline, leaving space for the variable parts. */
+
+/* On the 386, the trampoline contains three instructions:
+ mov #STATIC,ecx
+ mov #FUNCTION,eax
+ jmp @eax */
+#define TRAMPOLINE_TEMPLATE(FILE) \
+{ \
+ ASM_OUTPUT_CHAR (FILE, GEN_INT (0xb9)); \
+ ASM_OUTPUT_SHORT (FILE, const0_rtx); \
+ ASM_OUTPUT_SHORT (FILE, const0_rtx); \
+ ASM_OUTPUT_CHAR (FILE, GEN_INT (0xb8)); \
+ ASM_OUTPUT_SHORT (FILE, const0_rtx); \
+ ASM_OUTPUT_SHORT (FILE, const0_rtx); \
+ ASM_OUTPUT_CHAR (FILE, GEN_INT (0xff)); \
+ ASM_OUTPUT_CHAR (FILE, GEN_INT (0xe0)); \
+}
+
+/* Length in units of the trampoline for entering a nested function. */
+
+#define TRAMPOLINE_SIZE 12
+
+/* Emit RTL insns to initialize the variable parts of a trampoline.
+ FNADDR is an RTX for the address of the function's pure code.
+ CXT is an RTX for the static chain value for the function. */
+
+#define INITIALIZE_TRAMPOLINE(TRAMP, FNADDR, CXT) \
+{ \
+ emit_move_insn (gen_rtx (MEM, SImode, plus_constant (TRAMP, 1)), CXT); \
+ emit_move_insn (gen_rtx (MEM, SImode, plus_constant (TRAMP, 6)), FNADDR); \
+}
+
+/* Definitions for register eliminations.
+
+ This is an array of structures. Each structure initializes one pair
+ of eliminable registers. The "from" register number is given first,
+ followed by "to". Eliminations of the same "from" register are listed
+ in order of preference.
+
+ We have two registers that can be eliminated on the i386. First, the
+ frame pointer register can often be eliminated in favor of the stack
+ pointer register. Secondly, the argument pointer register can always be
+ eliminated; it is replaced with either the stack or frame pointer. */
+
+#define ELIMINABLE_REGS \
+{{ ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ { ARG_POINTER_REGNUM, FRAME_POINTER_REGNUM}, \
+ { FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}}
+
+/* Given FROM and TO register numbers, say whether this elimination is allowed.
+ Frame pointer elimination is automatically handled.
+
+ For the i386, if frame pointer elimination is being done, we would like to
+ convert ap into sp, not fp.
+
+ All other eliminations are valid. */
+
+#define CAN_ELIMINATE(FROM, TO) \
+ ((FROM) == ARG_POINTER_REGNUM && (TO) == STACK_POINTER_REGNUM \
+ ? ! frame_pointer_needed \
+ : 1)
+
+/* Define the offset between two registers, one to be eliminated, and the other
+ its replacement, at the start of a routine. */
+
+#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
+{ \
+ if ((FROM) == ARG_POINTER_REGNUM && (TO) == FRAME_POINTER_REGNUM) \
+ (OFFSET) = 8; /* Skip saved PC and previous frame pointer */ \
+ else \
+ { \
+ int regno; \
+ int offset = 0; \
+ \
+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++) \
+ if ((regs_ever_live[regno] && ! call_used_regs[regno]) \
+ || (current_function_uses_pic_offset_table \
+ && regno == PIC_OFFSET_TABLE_REGNUM)) \
+ offset += 4; \
+ \
+ (OFFSET) = offset + get_frame_size (); \
+ \
+ if ((FROM) == ARG_POINTER_REGNUM && (TO) == STACK_POINTER_REGNUM) \
+ (OFFSET) += 4; /* Skip saved PC */ \
+ } \
+}
+
+/* Addressing modes, and classification of registers for them. */
+
+/* #define HAVE_POST_INCREMENT */
+/* #define HAVE_POST_DECREMENT */
+
+/* #define HAVE_PRE_DECREMENT */
+/* #define HAVE_PRE_INCREMENT */
+
+/* Macros to check register numbers against specific register classes. */
+
+/* These assume that REGNO is a hard or pseudo reg number.
+ They give nonzero only if REGNO is a hard reg of the suitable class
+ or a pseudo reg currently allocated to a suitable hard reg.
+ Since they use reg_renumber, they are safe only once reg_renumber
+ has been allocated, which happens in local-alloc.c. */
+
+#define REGNO_OK_FOR_INDEX_P(REGNO) \
+ ((REGNO) < STACK_POINTER_REGNUM \
+ || (unsigned) reg_renumber[REGNO] < STACK_POINTER_REGNUM)
+
+#define REGNO_OK_FOR_BASE_P(REGNO) \
+ ((REGNO) <= STACK_POINTER_REGNUM \
+ || (REGNO) == ARG_POINTER_REGNUM \
+ || (unsigned) reg_renumber[REGNO] <= STACK_POINTER_REGNUM)
+
+#define REGNO_OK_FOR_SIREG_P(REGNO) ((REGNO) == 4 || reg_renumber[REGNO] == 4)
+#define REGNO_OK_FOR_DIREG_P(REGNO) ((REGNO) == 5 || reg_renumber[REGNO] == 5)
+
+/* The macros REG_OK_FOR..._P assume that the arg is a REG rtx
+ and check its validity for a certain class.
+ We have two alternate definitions for each of them.
+ The usual definition accepts all pseudo regs; the other rejects
+ them unless they have been allocated suitable hard regs.
+ The symbol REG_OK_STRICT causes the latter definition to be used.
+
+ Most source files want to accept pseudo regs in the hope that
+ they will get allocated to the class that the insn wants them to be in.
+ Source files for reload pass need to be strict.
+ After reload, it makes no difference, since pseudo regs have
+ been eliminated by then. */
+
+#ifndef REG_OK_STRICT
+
+/* Nonzero if X is a hard reg that can be used as an index or if
+ it is a pseudo reg. */
+
+#define REG_OK_FOR_INDEX_P(X) \
+ (REGNO (X) < STACK_POINTER_REGNUM \
+ || REGNO (X) >= FIRST_PSEUDO_REGISTER)
+
+/* Nonzero if X is a hard reg that can be used as a base reg
+ of if it is a pseudo reg. */
+ /* ?wfs */
+
+#define REG_OK_FOR_BASE_P(X) \
+ (REGNO (X) <= STACK_POINTER_REGNUM \
+ || REGNO (X) == ARG_POINTER_REGNUM \
+ || REGNO(X) >= FIRST_PSEUDO_REGISTER)
+
+#define REG_OK_FOR_STRREG_P(X) \
+ (REGNO (X) == 4 || REGNO (X) == 5 || REGNO (X) >= FIRST_PSEUDO_REGISTER)
+
+#else
+
+/* Nonzero if X is a hard reg that can be used as an index. */
+#define REG_OK_FOR_INDEX_P(X) REGNO_OK_FOR_INDEX_P (REGNO (X))
+/* Nonzero if X is a hard reg that can be used as a base reg. */
+#define REG_OK_FOR_BASE_P(X) REGNO_OK_FOR_BASE_P (REGNO (X))
+#define REG_OK_FOR_STRREG_P(X) \
+ (REGNO_OK_FOR_DIREG_P (REGNO (X)) || REGNO_OK_FOR_SIREG_P (REGNO (X)))
+
+#endif
+
+/* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression
+ that is a valid memory address for an instruction.
+ The MODE argument is the machine mode for the MEM expression
+ that wants to use this address.
+
+ The other macros defined here are used only in GO_IF_LEGITIMATE_ADDRESS,
+ except for CONSTANT_ADDRESS_P which is usually machine-independent.
+
+ See legitimize_pic_address in i386.c for details as to what
+ constitutes a legitimate address when -fpic is used. */
+
+#define MAX_REGS_PER_ADDRESS 2
+
+#define CONSTANT_ADDRESS_P(X) \
+ (GET_CODE (X) == LABEL_REF || GET_CODE (X) == SYMBOL_REF \
+ || GET_CODE (X) == CONST_INT || GET_CODE (X) == CONST \
+ || GET_CODE (X) == HIGH)
+
+/* Nonzero if the constant value X is a legitimate general operand.
+ It is given that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
+
+#define LEGITIMATE_CONSTANT_P(X) 1
+
+#define GO_IF_INDEXABLE_BASE(X, ADDR) \
+ if (GET_CODE (X) == REG && REG_OK_FOR_BASE_P (X)) goto ADDR
+
+#define LEGITIMATE_INDEX_REG_P(X) \
+ (GET_CODE (X) == REG && REG_OK_FOR_INDEX_P (X))
+
+/* Return 1 if X is an index or an index times a scale. */
+
+#define LEGITIMATE_INDEX_P(X) \
+ (LEGITIMATE_INDEX_REG_P (X) \
+ || (GET_CODE (X) == MULT \
+ && LEGITIMATE_INDEX_REG_P (XEXP (X, 0)) \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT \
+ && (INTVAL (XEXP (X, 1)) == 2 \
+ || INTVAL (XEXP (X, 1)) == 4 \
+ || INTVAL (XEXP (X, 1)) == 8)))
+
+/* Go to ADDR if X is an index term, a base reg, or a sum of those. */
+
+#define GO_IF_INDEXING(X, ADDR) \
+{ if (LEGITIMATE_INDEX_P (X)) goto ADDR; \
+ GO_IF_INDEXABLE_BASE (X, ADDR); \
+ if (GET_CODE (X) == PLUS && LEGITIMATE_INDEX_P (XEXP (X, 0))) \
+ { GO_IF_INDEXABLE_BASE (XEXP (X, 1), ADDR); } \
+ if (GET_CODE (X) == PLUS && LEGITIMATE_INDEX_P (XEXP (X, 1))) \
+ { GO_IF_INDEXABLE_BASE (XEXP (X, 0), ADDR); } }
+
+/* We used to allow this, but it isn't ever used.
+ || ((GET_CODE (X) == POST_DEC || GET_CODE (X) == POST_INC) \
+ && REG_P (XEXP (X, 0)) \
+ && REG_OK_FOR_STRREG_P (XEXP (X, 0))) \
+*/
+
+#define GO_IF_LEGITIMATE_ADDRESS(MODE, X, ADDR) \
+{ \
+ if (CONSTANT_ADDRESS_P (X) \
+ && (! flag_pic || LEGITIMATE_PIC_OPERAND_P (X))) \
+ goto ADDR; \
+ GO_IF_INDEXING (X, ADDR); \
+ if (GET_CODE (X) == PLUS && CONSTANT_ADDRESS_P (XEXP (X, 1))) \
+ { \
+ rtx x0 = XEXP (X, 0); \
+ if (! flag_pic || ! SYMBOLIC_CONST (XEXP (X, 1))) \
+ { GO_IF_INDEXING (x0, ADDR); } \
+ else if (x0 == pic_offset_table_rtx) \
+ goto ADDR; \
+ else if (GET_CODE (x0) == PLUS) \
+ { \
+ if (XEXP (x0, 0) == pic_offset_table_rtx) \
+ { GO_IF_INDEXABLE_BASE (XEXP (x0, 1), ADDR); } \
+ if (XEXP (x0, 1) == pic_offset_table_rtx) \
+ { GO_IF_INDEXABLE_BASE (XEXP (x0, 0), ADDR); } \
+ } \
+ } \
+}
+
+/* Try machine-dependent ways of modifying an illegitimate address
+ to be legitimate. If we find one, return the new, valid address.
+ This macro is used in only one place: `memory_address' in explow.c.
+
+ OLDX is the address as it was before break_out_memory_refs was called.
+ In some cases it is useful to look at this to decide what needs to be done.
+
+ MODE and WIN are passed so that this macro can use
+ GO_IF_LEGITIMATE_ADDRESS.
+
+ It is always safe for this macro to do nothing. It exists to recognize
+ opportunities to optimize the output.
+
+ For the 80386, we handle X+REG by loading X into a register R and
+ using R+REG. R will go in a general reg and indexing will be used.
+ However, if REG is a broken-out memory address or multiplication,
+ nothing needs to be done because REG can certainly go in a general reg.
+
+ When -fpic is used, special handling is needed for symbolic references.
+ See comments by legitimize_pic_address in i386.c for details. */
+
+#define LEGITIMIZE_ADDRESS(X,OLDX,MODE,WIN) \
+{ extern rtx legitimize_pic_address (); \
+ int ch = (X) != (OLDX); \
+ if (flag_pic && SYMBOLIC_CONST (X)) \
+ { \
+ (X) = legitimize_pic_address (X, 0); \
+ if (memory_address_p (MODE, X)) \
+ goto WIN; \
+ } \
+ if (GET_CODE (X) == PLUS) \
+ { if (GET_CODE (XEXP (X, 0)) == MULT) \
+ ch = 1, XEXP (X, 0) = force_operand (XEXP (X, 0), 0); \
+ if (GET_CODE (XEXP (X, 1)) == MULT) \
+ ch = 1, XEXP (X, 1) = force_operand (XEXP (X, 1), 0); \
+ if (ch && GET_CODE (XEXP (X, 1)) == REG \
+ && GET_CODE (XEXP (X, 0)) == REG) \
+ goto WIN; \
+ if (flag_pic && SYMBOLIC_CONST (XEXP (X, 1))) \
+ ch = 1, (X) = legitimize_pic_address (X, 0); \
+ if (ch) { GO_IF_LEGITIMATE_ADDRESS (MODE, X, WIN); } \
+ if (GET_CODE (XEXP (X, 0)) == REG) \
+ { register rtx temp = gen_reg_rtx (Pmode); \
+ register rtx val = force_operand (XEXP (X, 1), temp); \
+ if (val != temp) emit_move_insn (temp, val); \
+ XEXP (X, 1) = temp; \
+ goto WIN; } \
+ else if (GET_CODE (XEXP (X, 1)) == REG) \
+ { register rtx temp = gen_reg_rtx (Pmode); \
+ register rtx val = force_operand (XEXP (X, 0), temp); \
+ if (val != temp) emit_move_insn (temp, val); \
+ XEXP (X, 0) = temp; \
+ goto WIN; }}}
+
+/* Nonzero if the constant value X is a legitimate general operand
+ when generating PIC code. It is given that flag_pic is on and
+ that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
+
+#define LEGITIMATE_PIC_OPERAND_P(X) \
+ (! SYMBOLIC_CONST (X) \
+ || (GET_CODE (X) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (X)))
+
+#define SYMBOLIC_CONST(X) \
+(GET_CODE (X) == SYMBOL_REF \
+ || GET_CODE (X) == LABEL_REF \
+ || (GET_CODE (X) == CONST && symbolic_reference_mentioned_p (X)))
+
+/* Go to LABEL if ADDR (a legitimate address expression)
+ has an effect that depends on the machine mode it is used for.
+ On the 80386, only postdecrement and postincrement address depend thus
+ (the amount of decrement or increment being the length of the operand). */
+#define GO_IF_MODE_DEPENDENT_ADDRESS(ADDR,LABEL) \
+ if (GET_CODE (ADDR) == POST_INC || GET_CODE (ADDR) == POST_DEC) goto LABEL
+
+/* Define this macro if references to a symbol must be treated
+ differently depending on something about the variable or
+ function named by the symbol (such as what section it is in).
+
+ On i386, if using PIC, mark a SYMBOL_REF for a non-global symbol
+ so that we may access it directly in the GOT. */
+
+#define ENCODE_SECTION_INFO(DECL) \
+do \
+ { \
+ if (flag_pic) \
+ { \
+ rtx rtl = (TREE_CODE_CLASS (TREE_CODE (DECL)) != 'd' \
+ ? TREE_CST_RTL (DECL) : DECL_RTL (DECL)); \
+ SYMBOL_REF_FLAG (XEXP (rtl, 0)) \
+ = (TREE_CODE_CLASS (TREE_CODE (DECL)) != 'd' \
+ || ! TREE_PUBLIC (DECL)); \
+ } \
+ } \
+while (0)
+
+/* Initialize data used by insn expanders. This is called from
+ init_emit, once for each function, before code is generated.
+ For 386, clear stack slot assignments remembered from previous
+ functions. */
+
+#define INIT_EXPANDERS clear_386_stack_locals ()
+
+/* The `FINALIZE_PIC' macro serves as a hook to emit these special
+ codes once the function is being compiled into assembly code, but
+ not before. (It is not done before, because in the case of
+ compiling an inline function, it would lead to multiple PIC
+ prologues being included in functions which used inline functions
+ and were compiled to assembly language.) */
+
+#define FINALIZE_PIC \
+do \
+ { \
+ extern int current_function_uses_pic_offset_table; \
+ \
+ current_function_uses_pic_offset_table |= profile_flag | profile_block_flag; \
+ } \
+while (0)
+
+
+/* Specify the machine mode that this machine uses
+ for the index in the tablejump instruction. */
+#define CASE_VECTOR_MODE Pmode
+
+/* Define this if the tablejump instruction expects the table
+ to contain offsets from the address of the table.
+ Do not define this if the table should contain absolute addresses. */
+/* #define CASE_VECTOR_PC_RELATIVE */
+
+/* Specify the tree operation to be used to convert reals to integers.
+ This should be changed to take advantage of fist --wfs ??
+ */
+#define IMPLICIT_FIX_EXPR FIX_ROUND_EXPR
+
+/* This is the kind of divide that is easiest to do in the general case. */
+#define EASY_DIV_EXPR TRUNC_DIV_EXPR
+
+/* Define this as 1 if `char' should by default be signed; else as 0. */
+#define DEFAULT_SIGNED_CHAR 1
+
+/* Max number of bytes we can move from memory to memory
+ in one reasonably fast instruction. */
+#define MOVE_MAX 4
+
+/* MOVE_RATIO is the number of move instructions that is better than a
+ block move. Make this large on i386, since the block move is very
+ inefficient with small blocks, and the hard register needs of the
+ block move require much reload work. */
+#define MOVE_RATIO 5
+
+/* Define this if zero-extension is slow (more than one real instruction). */
+/* #define SLOW_ZERO_EXTEND */
+
+/* Nonzero if access to memory by bytes is slow and undesirable. */
+#define SLOW_BYTE_ACCESS 0
+
+/* Define if shifts truncate the shift count
+ which implies one can omit a sign-extension or zero-extension
+ of a shift count. */
+/* One i386, shifts do truncate the count. But bit opcodes don't. */
+
+/* #define SHIFT_COUNT_TRUNCATED */
+
+/* Value is 1 if truncating an integer of INPREC bits to OUTPREC bits
+ is done just by pretending it is already truncated. */
+#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) 1
+
+/* We assume that the store-condition-codes instructions store 0 for false
+ and some other value for true. This is the value stored for true. */
+
+#define STORE_FLAG_VALUE 1
+
+/* When a prototype says `char' or `short', really pass an `int'.
+ (The 386 can't easily push less than an int.) */
+
+#define PROMOTE_PROTOTYPES
+
+/* Specify the machine mode that pointers have.
+ After generation of rtl, the compiler makes no further distinction
+ between pointers and any other objects of this machine mode. */
+#define Pmode SImode
+
+/* A function address in a call instruction
+ is a byte address (for indexing purposes)
+ so give the MEM rtx a byte's mode. */
+#define FUNCTION_MODE QImode
+
+/* Define this if addresses of constant functions
+ shouldn't be put through pseudo regs where they can be cse'd.
+ Desirable on the 386 because a CALL with a constant address is
+ not much slower than one with a register address. */
+#define NO_FUNCTION_CSE
+
+/* Provide the costs of a rtl expression. This is in the body of a
+ switch on CODE. */
+
+#define RTX_COSTS(X,CODE,OUTER_CODE) \
+ case MULT: \
+ return COSTS_N_INSNS (10); \
+ case DIV: \
+ case UDIV: \
+ case MOD: \
+ case UMOD: \
+ return COSTS_N_INSNS (40); \
+ case PLUS: \
+ if (GET_CODE (XEXP (X, 0)) == REG \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT) \
+ return 1; \
+ break;
+
+
+/* Compute the cost of computing a constant rtl expression RTX
+ whose rtx-code is CODE. The body of this macro is a portion
+ of a switch statement. If the code is computed here,
+ return it with a return statement. Otherwise, break from the switch. */
+
+#define CONST_COSTS(RTX,CODE,OUTER_CODE) \
+ case CONST_INT: \
+ case CONST: \
+ case LABEL_REF: \
+ case SYMBOL_REF: \
+ return flag_pic && SYMBOLIC_CONST (RTX) ? 2 : 0; \
+ case CONST_DOUBLE: \
+ { \
+ int code; \
+ if (GET_MODE (RTX) == VOIDmode) \
+ return 2; \
+ code = standard_80387_constant_p (RTX); \
+ return code == 1 ? 0 : \
+ code == 2 ? 1 : \
+ 2; \
+ }
+
+/* Compute the cost of an address. This is meant to approximate the size
+ and/or execution delay of an insn using that address. If the cost is
+ approximated by the RTL complexity, including CONST_COSTS above, as
+ is usually the case for CISC machines, this macro should not be defined.
+ For aggressively RISCy machines, only one insn format is allowed, so
+ this macro should be a constant. The value of this macro only matters
+ for valid addresses.
+
+ For i386, it is better to use a complex address than let gcc copy
+ the address into a reg and make a new pseudo. But not if the address
+ requires to two regs - that would mean more pseudos with longer
+ lifetimes. */
+
+#define ADDRESS_COST(RTX) \
+ ((CONSTANT_P (RTX) \
+ || (GET_CODE (RTX) == PLUS && CONSTANT_P (XEXP (RTX, 1)) \
+ && REG_P (XEXP (RTX, 0)))) ? 0 \
+ : REG_P (RTX) ? 1 \
+ : 2)
+
+/* Add any extra modes needed to represent the condition code.
+
+ For the i386, we need separate modes when floating-point equality
+ comparisons are being done. */
+
+#define EXTRA_CC_MODES CCFPEQmode
+
+/* Define the names for the modes specified above. */
+#define EXTRA_CC_NAMES "CCFPEQ"
+
+/* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
+ return the mode to be used for the comparison.
+
+ For floating-point equality comparisons, CCFPEQmode should be used.
+ VOIDmode should be used in all other cases. */
+
+#define SELECT_CC_MODE(OP,X,Y) \
+ (GET_MODE_CLASS (GET_MODE (X)) == MODE_FLOAT \
+ && ((OP) == EQ || (OP) == NE) ? CCFPEQmode : VOIDmode)
+
+/* Define the information needed to generate branch and scc insns. This is
+ stored from the compare operation. Note that we can't use "rtx" here
+ since it hasn't been defined! */
+
+extern struct rtx_def *i386_compare_op0, *i386_compare_op1;
+extern struct rtx_def *(*i386_compare_gen)(), *(*i386_compare_gen_eq)();
+
+/* Tell final.c how to eliminate redundant test instructions. */
+
+/* Here we define machine-dependent flags and fields in cc_status
+ (see `conditions.h'). */
+
+/* Set if the cc value is actually in the 80387, so a floating point
+ conditional branch must be output. */
+#define CC_IN_80387 04000
+
+/* Set if the CC value was stored in a nonstandard way, so that
+ the state of equality is indicated by zero in the carry bit. */
+#define CC_Z_IN_NOT_C 010000
+
+/* Store in cc_status the expressions
+ that the condition codes will describe
+ after execution of an instruction whose pattern is EXP.
+ Do not alter them if the instruction would not alter the cc's. */
+
+#define NOTICE_UPDATE_CC(EXP, INSN) \
+ notice_update_cc((EXP))
+
+/* Output a signed jump insn. Use template NORMAL ordinarily, or
+ FLOAT following a floating point comparison.
+ Use NO_OV following an arithmetic insn that set the cc's
+ before a test insn that was deleted.
+ NO_OV may be zero, meaning final should reinsert the test insn
+ because the jump cannot be handled properly without it. */
+
+#define OUTPUT_JUMP(NORMAL, FLOAT, NO_OV) \
+{ \
+ if (cc_prev_status.flags & CC_IN_80387) \
+ return FLOAT; \
+ if (cc_prev_status.flags & CC_NO_OVERFLOW) \
+ return NO_OV; \
+ return NORMAL; \
+}
+
+/* Control the assembler format that we output, to the extent
+ this does not vary between assemblers. */
+
+/* How to refer to registers in assembler output.
+ This sequence is indexed by compiler's hard-register-number (see above). */
+
+/* In order to refer to the first 8 regs as 32 bit regs prefix an "e"
+ For non floating point regs, the following are the HImode names.
+
+ For float regs, the stack top is sometimes referred to as "%st(0)"
+ instead of just "%st". PRINT_REG handles this with the "y" code. */
+
+#define HI_REGISTER_NAMES \
+{"ax","dx","cx","bx","si","di","bp","sp", \
+ "st","st(1)","st(2)","st(3)","st(4)","st(5)","st(6)","st(7)","" }
+
+#define REGISTER_NAMES HI_REGISTER_NAMES
+
+/* Table of additional register names to use in user input. */
+
+#define ADDITIONAL_REGISTER_NAMES \
+{ "eax", 0, "edx", 1, "ecx", 2, "ebx", 3, \
+ "esi", 4, "edi", 5, "ebp", 6, "esp", 7, \
+ "al", 0, "dl", 1, "cl", 2, "bl", 3, \
+ "ah", 0, "dh", 1, "ch", 2, "bh", 3 }
+
+/* Note we are omitting these since currently I don't know how
+to get gcc to use these, since they want the same but different
+number as al, and ax.
+*/
+
+/* note the last four are not really qi_registers, but
+ the md will have to never output movb into one of them
+ only a movw . There is no movb into the last four regs */
+
+#define QI_REGISTER_NAMES \
+{"al", "dl", "cl", "bl", "si", "di", "bp", "sp",}
+
+/* These parallel the array above, and can be used to access bits 8:15
+ of regs 0 through 3. */
+
+#define QI_HIGH_REGISTER_NAMES \
+{"ah", "dh", "ch", "bh", }
+
+/* How to renumber registers for dbx and gdb. */
+
+/* {0,2,1,3,6,7,4,5,12,13,14,15,16,17} */
+#define DBX_REGISTER_NUMBER(n) \
+((n) == 0 ? 0 : \
+ (n) == 1 ? 2 : \
+ (n) == 2 ? 1 : \
+ (n) == 3 ? 3 : \
+ (n) == 4 ? 6 : \
+ (n) == 5 ? 7 : \
+ (n) == 6 ? 4 : \
+ (n) == 7 ? 5 : \
+ (n) + 4)
+
+/* This is how to output the definition of a user-level label named NAME,
+ such as the label on a static function or variable NAME. */
+
+#define ASM_OUTPUT_LABEL(FILE,NAME) \
+ (assemble_name (FILE, NAME), fputs (":\n", FILE))
+
+/* This is how to output an assembler line defining a `double' constant. */
+
+#define ASM_OUTPUT_DOUBLE(FILE,VALUE) \
+do { long l[2]; \
+ REAL_VALUE_TO_TARGET_DOUBLE (VALUE, l); \
+ if (sizeof (int) == sizeof (long)) \
+ fprintf (FILE, "%s 0x%x,0x%x\n", ASM_LONG, l[0], l[1]); \
+ else \
+ fprintf (FILE, "%s 0x%lx,0x%lx\n", ASM_LONG, l[0], l[1]); \
+ } while (0)
+
+/* This is how to output a `long double' extended real constant. */
+
+#undef ASM_OUTPUT_LONG_DOUBLE
+#define ASM_OUTPUT_LONG_DOUBLE(FILE,VALUE) \
+do { long l[3]; \
+ REAL_VALUE_TO_TARGET_LONG_DOUBLE (VALUE, l); \
+ if (sizeof (int) == sizeof (long)) \
+ fprintf (FILE, "%s 0x%x,0x%x,0x%x\n", ASM_LONG, l[0], l[1], l[2]); \
+ else \
+ fprintf (FILE, "%s 0x%lx,0x%lx,0x%lx\n", ASM_LONG, l[0], l[1], l[2]); \
+ } while (0)
+
+/* This is how to output an assembler line defining a `float' constant. */
+
+#define ASM_OUTPUT_FLOAT(FILE,VALUE) \
+do { long l; \
+ REAL_VALUE_TO_TARGET_SINGLE (VALUE, l); \
+ if (sizeof (int) == sizeof (long)) \
+ fprintf ((FILE), "%s 0x%x\n", ASM_LONG, l); \
+ else \
+ fprintf ((FILE), "%s 0x%lx\n", ASM_LONG, l); \
+ } while (0)
+
+/* Store in OUTPUT a string (made with alloca) containing
+ an assembler-name for a local static variable named NAME.
+ LABELNO is an integer which is different for each call. */
+
+#define ASM_FORMAT_PRIVATE_NAME(OUTPUT, NAME, LABELNO) \
+( (OUTPUT) = (char *) alloca (strlen ((NAME)) + 10), \
+ sprintf ((OUTPUT), "%s.%d", (NAME), (LABELNO)))
+
+
+
+/* This is how to output an assembler line defining an `int' constant. */
+
+#define ASM_OUTPUT_INT(FILE,VALUE) \
+( fprintf (FILE, "%s ", ASM_LONG), \
+ output_addr_const (FILE,(VALUE)), \
+ putc('\n',FILE))
+
+/* Likewise for `char' and `short' constants. */
+/* is this supposed to do align too?? */
+
+#define ASM_OUTPUT_SHORT(FILE,VALUE) \
+( fprintf (FILE, "%s ", ASM_SHORT), \
+ output_addr_const (FILE,(VALUE)), \
+ putc('\n',FILE))
+
+/*
+#define ASM_OUTPUT_SHORT(FILE,VALUE) \
+( fprintf (FILE, "%s ", ASM_BYTE_OP), \
+ output_addr_const (FILE,(VALUE)), \
+ fputs (",", FILE), \
+ output_addr_const (FILE,(VALUE)), \
+ fputs (" >> 8\n",FILE))
+*/
+
+
+#define ASM_OUTPUT_CHAR(FILE,VALUE) \
+( fprintf (FILE, "%s ", ASM_BYTE_OP), \
+ output_addr_const (FILE, (VALUE)), \
+ putc ('\n', FILE))
+
+/* This is how to output an assembler line for a numeric constant byte. */
+
+#define ASM_OUTPUT_BYTE(FILE,VALUE) \
+ fprintf ((FILE), "%s 0x%x\n", ASM_BYTE_OP, (VALUE))
+
+/* This is how to output an insn to push a register on the stack.
+ It need not be very fast code. */
+
+#define ASM_OUTPUT_REG_PUSH(FILE,REGNO) \
+ fprintf (FILE, "\tpushl e%s\n", reg_names[REGNO])
+
+/* This is how to output an insn to pop a register from the stack.
+ It need not be very fast code. */
+
+#define ASM_OUTPUT_REG_POP(FILE,REGNO) \
+ fprintf (FILE, "\tpopl e%s\n", reg_names[REGNO])
+
+/* This is how to output an element of a case-vector that is absolute.
+ */
+
+#define ASM_OUTPUT_ADDR_VEC_ELT(FILE, VALUE) \
+ fprintf (FILE, "%s %s%d\n", ASM_LONG, LPREFIX, VALUE)
+
+/* This is how to output an element of a case-vector that is relative.
+ We don't use these on the 386 yet, because the ATT assembler can't do
+ forward reference the differences.
+ */
+
+#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, VALUE, REL) \
+ fprintf (FILE, "\t.word %s%d-%s%d\n",LPREFIX, VALUE,LPREFIX, REL)
+
+/* Define the parentheses used to group arithmetic operations
+ in assembler code. */
+
+#define ASM_OPEN_PAREN ""
+#define ASM_CLOSE_PAREN ""
+
+/* Define results of standard character escape sequences. */
+#define TARGET_BELL 007
+#define TARGET_BS 010
+#define TARGET_TAB 011
+#define TARGET_NEWLINE 012
+#define TARGET_VT 013
+#define TARGET_FF 014
+#define TARGET_CR 015
+
+/* Print operand X (an rtx) in assembler syntax to file FILE.
+ CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
+ The CODE z takes the size of operand from the following digit, and
+ outputs b,w,or l respectively.
+
+ On the 80386, we use several such letters:
+ f -- float insn (print a CONST_DOUBLE as a float rather than in hex).
+ L,W,B,Q,S,T -- print the opcode suffix for specified size of operand.
+ R -- print the prefix for register names.
+ z -- print the opcode suffix for the size of the current operand.
+ * -- print a star (in certain assembler syntax)
+ w -- print the operand as if it's a "word" (HImode) even if it isn't.
+ b -- print the operand as if it's a byte (QImode) even if it isn't.
+ c -- don't print special prefixes before constant operands. */
+
+#define PRINT_OPERAND_PUNCT_VALID_P(CODE) \
+ ((CODE) == '*')
+
+/* Print the name of a register based on its machine mode and number.
+ If CODE is 'w', pretend the mode is HImode.
+ If CODE is 'b', pretend the mode is QImode.
+ If CODE is 'k', pretend the mode is SImode.
+ If CODE is 'h', pretend the reg is the `high' byte register.
+ If CODE is 'y', print "st(0)" instead of "st", if the reg is stack op. */
+
+extern char *hi_reg_name[];
+extern char *qi_reg_name[];
+extern char *qi_high_reg_name[];
+
+#define PRINT_REG(X, CODE, FILE) \
+ do { if (REGNO (X) == ARG_POINTER_REGNUM) \
+ abort (); \
+ fprintf (FILE, "%s", RP); \
+ switch ((CODE == 'w' ? 2 \
+ : CODE == 'b' ? 1 \
+ : CODE == 'k' ? 4 \
+ : CODE == 'y' ? 3 \
+ : CODE == 'h' ? 0 \
+ : GET_MODE_SIZE (GET_MODE (X)))) \
+ { \
+ case 3: \
+ if (STACK_TOP_P (X)) \
+ { \
+ fputs ("st(0)", FILE); \
+ break; \
+ } \
+ case 4: \
+ case 8: \
+ case 12: \
+ if (! FP_REG_P (X)) fputs ("e", FILE); \
+ case 2: \
+ fputs (hi_reg_name[REGNO (X)], FILE); \
+ break; \
+ case 1: \
+ fputs (qi_reg_name[REGNO (X)], FILE); \
+ break; \
+ case 0: \
+ fputs (qi_high_reg_name[REGNO (X)], FILE); \
+ break; \
+ } \
+ } while (0)
+
+#define PRINT_OPERAND(FILE, X, CODE) \
+ print_operand (FILE, X, CODE)
+
+#define PRINT_OPERAND_ADDRESS(FILE, ADDR) \
+ print_operand_address (FILE, ADDR)
+
+/* Print the name of a register for based on its machine mode and number.
+ This macro is used to print debugging output.
+ This macro is different from PRINT_REG in that it may be used in
+ programs that are not linked with aux-output.o. */
+
+#define DEBUG_PRINT_REG(X, CODE, FILE) \
+ do { static char *hi_name[] = HI_REGISTER_NAMES; \
+ static char *qi_name[] = QI_REGISTER_NAMES; \
+ fprintf (FILE, "%d %s", REGNO (X), RP); \
+ if (REGNO (X) == ARG_POINTER_REGNUM) \
+ { fputs ("argp", FILE); break; } \
+ if (STACK_TOP_P (X)) \
+ { fputs ("st(0)", FILE); break; } \
+ if (FP_REG_P (X)) \
+ { fputs (hi_name[REGNO(X)], FILE); break; } \
+ switch (GET_MODE_SIZE (GET_MODE (X))) \
+ { \
+ default: \
+ fputs ("e", FILE); \
+ case 2: \
+ fputs (hi_name[REGNO (X)], FILE); \
+ break; \
+ case 1: \
+ fputs (qi_name[REGNO (X)], FILE); \
+ break; \
+ } \
+ } while (0)
+
+/* Output the prefix for an immediate operand, or for an offset operand. */
+#define PRINT_IMMED_PREFIX(FILE) fputs (IP, (FILE))
+#define PRINT_OFFSET_PREFIX(FILE) fputs (IP, (FILE))
+
+/* Routines in libgcc that return floats must return them in an fp reg,
+ just as other functions do which return such values.
+ These macros make that happen. */
+
+#define FLOAT_VALUE_TYPE float
+#define INTIFY(FLOATVAL) FLOATVAL
+
+/* Nonzero if INSN magically clobbers register REGNO. */
+
+/* #define INSN_CLOBBERS_REGNO_P(INSN, REGNO) \
+ (FP_REGNO_P (REGNO) \
+ && (GET_CODE (INSN) == JUMP_INSN || GET_CODE (INSN) == BARRIER))
+*/
+
+/* a letter which is not needed by the normal asm syntax, which
+ we can use for operand syntax in the extended asm */
+
+#define ASM_OPERAND_LETTER '#'
+
+#define RET return ""
+#define AT_SP(mode) (gen_rtx (MEM, (mode), stack_pointer_rtx))
+
+/*
+Local variables:
+version-control: t
+End:
+*/
diff --git a/gnu/usr.bin/cc/include/i386/perform.h b/gnu/usr.bin/cc/include/i386/perform.h
new file mode 100644
index 0000000..4fdd7b3
--- /dev/null
+++ b/gnu/usr.bin/cc/include/i386/perform.h
@@ -0,0 +1,97 @@
+/* Definitions for AT&T assembler syntax for the Intel 80386.
+ Copyright (C) 1993 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+/* Defines to be able to build libgcc.a with GCC. */
+
+/* It might seem that these are not important, since gcc 2 will never
+ call libgcc for these functions. But programs might be linked with
+ code compiled by gcc 1, and then these will be used. */
+
+/* The arg names used to be a and b, but `a' appears inside strings
+ and that confuses non-ANSI cpp. */
+
+#define perform_udivsi3(arg0,arg1) \
+{ \
+ register int dx asm("dx"); \
+ register int ax asm("ax"); \
+ \
+ dx = 0; \
+ ax = arg0; \
+ asm ("divl %3" : "=a" (ax), "=d" (dx) : "a" (ax), "g" (arg1), "d" (dx)); \
+ return ax; \
+}
+
+#define perform_divsi3(arg0,arg1) \
+{ \
+ register int dx asm("dx"); \
+ register int ax asm("ax"); \
+ register int cx asm("cx"); \
+ \
+ ax = arg0; \
+ cx = arg1; \
+ asm ("cltd\n\tidivl %3" : "=a" (ax), "=&d" (dx) : "a" (ax), "c" (cx)); \
+ return ax; \
+}
+
+#define perform_umodsi3(arg0,arg1) \
+{ \
+ register int dx asm("dx"); \
+ register int ax asm("ax"); \
+ \
+ dx = 0; \
+ ax = arg0; \
+ asm ("divl %3" : "=a" (ax), "=d" (dx) : "a" (ax), "g" (arg1), "d" (dx)); \
+ return dx; \
+}
+
+#define perform_modsi3(arg0,arg1) \
+{ \
+ register int dx asm("dx"); \
+ register int ax asm("ax"); \
+ register int cx asm("cx"); \
+ \
+ ax = arg0; \
+ cx = arg1; \
+ asm ("cltd\n\tidivl %3" : "=a" (ax), "=&d" (dx) : "a" (ax), "c" (cx)); \
+ return dx; \
+}
+
+#define perform_fixdfsi(arg0) \
+{ \
+ auto unsigned short ostatus; \
+ auto unsigned short nstatus; \
+ auto int ret; \
+ auto double tmp; \
+ \
+ &ostatus; /* guarantee these land in memory */ \
+ &nstatus; \
+ &ret; \
+ &tmp; \
+ \
+ asm volatile ("fnstcw %0" : "=m" (ostatus)); \
+ nstatus = ostatus | 0x0c00; \
+ asm volatile ("fldcw %0" : /* no outputs */ : "m" (nstatus)); \
+ tmp = arg0; \
+ asm volatile ("fldl %0" : /* no outputs */ : "m" (tmp)); \
+ asm volatile ("fistpl %0" : "=m" (ret)); \
+ asm volatile ("fldcw %0" : /* no outputs */ : "m" (ostatus)); \
+ \
+ return ret; \
+}
+
diff --git a/gnu/usr.bin/cc/include/i386/unix.h b/gnu/usr.bin/cc/include/i386/unix.h
new file mode 100644
index 0000000..7209176
--- /dev/null
+++ b/gnu/usr.bin/cc/include/i386/unix.h
@@ -0,0 +1,145 @@
+/* Definitions for Unix assembler syntax for the Intel 80386.
+ Copyright (C) 1988 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+/* This file defines the aspects of assembler syntax
+ that are the same for all the i386 Unix systems
+ (though they may differ in non-Unix systems). */
+
+/* Define some concatenation macros to concatenate an opcode
+ and one, two or three operands. In other assembler syntaxes
+ they may alter the order of ther operands. */
+
+/* Note that the other files fail to use these
+ in some of the places where they should. */
+
+#ifdef __STDC__
+#define AS2(a,b,c) #a " " #b "," #c
+#define AS3(a,b,c,d) #a " " #b "," #c "," #d
+#define AS1(a,b) #a " " #b
+#else
+#define AS1(a,b) "a b"
+#define AS2(a,b,c) "a b,c"
+#define AS3(a,b,c,d) "a b,c,d"
+#endif
+
+/* Define macro used to output shift-double opcodes when the shift
+ count is in %cl. Some assemblers require %cl as an argument;
+ some don't. This macro controls what to do: by default, don't
+ print %cl. */
+#define AS3_SHIFT_DOUBLE(a,b,c,d) AS2 (a,c,d)
+
+/* Output the size-letter for an opcode.
+ CODE is the letter used in an operand spec (L, B, W, S or Q).
+ CH is the corresponding lower case letter
+ (except if CODE is `Q' then CH is `l', unless GAS_MNEMONICS). */
+#define PUT_OP_SIZE(CODE,CH,FILE) putc (CH,(FILE))
+
+/* Opcode suffix for fullword insn. */
+#define L_SIZE "l"
+
+/* Prefix for register names in this syntax. */
+#define RP "%"
+
+/* Prefix for immediate operands in this syntax. */
+#define IP "$"
+
+/* Indirect call instructions should use `*'. */
+#define USE_STAR 1
+
+/* Prefix for a memory-operand X. */
+#define PRINT_PTR(X, FILE)
+
+/* Delimiters that surround base reg and index reg. */
+#define ADDR_BEG(FILE) putc('(', (FILE))
+#define ADDR_END(FILE) putc(')', (FILE))
+
+/* Print an index register (whose rtx is IREG). */
+#define PRINT_IREG(FILE,IREG) \
+ do \
+ { fputs (",", (FILE)); PRINT_REG ((IREG), 0, (FILE)); } \
+ while (0)
+
+/* Print an index scale factor SCALE. */
+#define PRINT_SCALE(FILE,SCALE) \
+ if ((SCALE) != 1) fprintf ((FILE), ",%d", (SCALE))
+
+/* Print a base/index combination.
+ BREG is the base reg rtx, IREG is the index reg rtx,
+ and SCALE is the index scale factor (an integer). */
+
+#define PRINT_B_I_S(BREG,IREG,SCALE,FILE) \
+ { ADDR_BEG (FILE); \
+ if (BREG) PRINT_REG ((BREG), 0, (FILE)); \
+ if ((IREG) != 0) \
+ { PRINT_IREG ((FILE), (IREG)); \
+ PRINT_SCALE ((FILE), (SCALE)); } \
+ ADDR_END (FILE); }
+
+/* Define the syntax of pseudo-ops, labels and comments. */
+
+/* String containing the assembler's comment-starter. */
+
+#define ASM_COMMENT_START "/"
+#define COMMENT_BEGIN "/"
+
+/* Output to assembler file text saying following lines
+ may contain character constants, extra white space, comments, etc. */
+
+#define ASM_APP_ON "/APP\n"
+
+/* Output to assembler file text saying following lines
+ no longer contain unusual constructs. */
+
+#define ASM_APP_OFF "/NO_APP\n"
+
+/* Output before read-only data. */
+
+#define TEXT_SECTION_ASM_OP ".text"
+
+/* Output before writable (initialized) data. */
+
+#define DATA_SECTION_ASM_OP ".data"
+
+/* Output before writable (uninitialized) data. */
+
+#define BSS_SECTION_ASM_OP ".bss"
+
+/* This is how to output a command to make the user-level label named NAME
+ defined for reference from other files. */
+
+#define ASM_GLOBALIZE_LABEL(FILE,NAME) \
+ (fputs (".globl ", FILE), assemble_name (FILE, NAME), fputs ("\n", FILE))
+
+/* By default, target has a 80387, uses IEEE compatible arithmetic,
+ and returns float values in the 387, ie,
+ (TARGET_80387 | TARGET_IEEE_FP | TARGET_FLOAT_RETURNS_IN_80387) */
+
+#define TARGET_DEFAULT 0301
+
+/* Floating-point return values come in the FP register. */
+
+#define VALUE_REGNO(MODE) \
+ (GET_MODE_CLASS (MODE) == MODE_FLOAT \
+ && TARGET_FLOAT_RETURNS_IN_80387 ? FIRST_FLOAT_REG : 0)
+
+/* 1 if N is a possible register number for a function value. */
+
+#define FUNCTION_VALUE_REGNO_P(N) \
+ ((N) == 0 || ((N)== FIRST_FLOAT_REG && TARGET_FLOAT_RETURNS_IN_80387))
+
diff --git a/gnu/usr.bin/cc/include/input.h b/gnu/usr.bin/cc/include/input.h
new file mode 100644
index 0000000..39590e2
--- /dev/null
+++ b/gnu/usr.bin/cc/include/input.h
@@ -0,0 +1,46 @@
+/* Declarations for variables relating to reading the source file.
+ Used by parsers, lexical analyzers, and error message routines.
+
+ Copyright (C) 1993 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+/* Source file current line is coming from. */
+extern char *input_filename;
+
+/* Top-level source file. */
+extern char *main_input_filename;
+
+/* Line number in current source file. */
+extern int lineno;
+
+/* Stream for reading from input file. */
+extern FILE *finput;
+
+struct file_stack
+ {
+ char *name;
+ struct file_stack *next;
+ int line;
+ };
+
+/* Stack of currently pending input files.
+ The line member is not accurate for the innermost file on the stack. */
+extern struct file_stack *input_file_stack;
+
+/* Incremented on each change to input_file_stack. */
+extern int input_file_stack_tick;
diff --git a/gnu/usr.bin/cc/include/insn-attr.h b/gnu/usr.bin/cc/include/insn-attr.h
new file mode 100644
index 0000000..5fe9a2f
--- /dev/null
+++ b/gnu/usr.bin/cc/include/insn-attr.h
@@ -0,0 +1,19 @@
+/* Generated automatically by the program `genattr'
+from the machine description file `md'. */
+
+#ifndef PROTO
+#if defined (USE_PROTOTYPES) ? USE_PROTOTYPES : defined (__STDC__)
+#define PROTO(ARGS) ARGS
+#else
+#define PROTO(ARGS) ()
+#endif
+#endif
+#define HAVE_ATTR_alternative
+#define get_attr_alternative(insn) which_alternative
+
+#define ATTR_FLAG_forward 0x1
+#define ATTR_FLAG_backward 0x2
+#define ATTR_FLAG_likely 0x4
+#define ATTR_FLAG_very_likely 0x8
+#define ATTR_FLAG_unlikely 0x10
+#define ATTR_FLAG_very_unlikely 0x20
diff --git a/gnu/usr.bin/cc/include/insn-codes.h b/gnu/usr.bin/cc/include/insn-codes.h
new file mode 100644
index 0000000..59e24c9
--- /dev/null
+++ b/gnu/usr.bin/cc/include/insn-codes.h
@@ -0,0 +1,201 @@
+/* Generated automatically by the program `gencodes'
+from the machine description file `md'. */
+
+#ifndef MAX_INSN_CODE
+
+enum insn_code {
+ CODE_FOR_tstsi_1 = 0,
+ CODE_FOR_tstsi = 1,
+ CODE_FOR_tsthi_1 = 2,
+ CODE_FOR_tsthi = 3,
+ CODE_FOR_tstqi_1 = 4,
+ CODE_FOR_tstqi = 5,
+ CODE_FOR_tstsf_cc = 6,
+ CODE_FOR_tstsf = 7,
+ CODE_FOR_tstdf_cc = 8,
+ CODE_FOR_tstdf = 9,
+ CODE_FOR_tstxf_cc = 10,
+ CODE_FOR_tstxf = 11,
+ CODE_FOR_cmpsi_1 = 12,
+ CODE_FOR_cmpsi = 13,
+ CODE_FOR_cmphi_1 = 14,
+ CODE_FOR_cmphi = 15,
+ CODE_FOR_cmpqi_1 = 16,
+ CODE_FOR_cmpqi = 17,
+ CODE_FOR_cmpsf_cc_1 = 30,
+ CODE_FOR_cmpxf = 34,
+ CODE_FOR_cmpdf = 35,
+ CODE_FOR_cmpsf = 36,
+ CODE_FOR_cmpxf_cc = 37,
+ CODE_FOR_cmpxf_ccfpeq = 38,
+ CODE_FOR_cmpdf_cc = 39,
+ CODE_FOR_cmpdf_ccfpeq = 40,
+ CODE_FOR_cmpsf_cc = 41,
+ CODE_FOR_cmpsf_ccfpeq = 42,
+ CODE_FOR_movsi = 48,
+ CODE_FOR_movhi = 51,
+ CODE_FOR_movstricthi = 52,
+ CODE_FOR_movqi = 54,
+ CODE_FOR_movstrictqi = 55,
+ CODE_FOR_movsf = 57,
+ CODE_FOR_swapdf = 59,
+ CODE_FOR_movdf = 60,
+ CODE_FOR_swapxf = 62,
+ CODE_FOR_movxf = 63,
+ CODE_FOR_movdi = 65,
+ CODE_FOR_zero_extendhisi2 = 66,
+ CODE_FOR_zero_extendqihi2 = 67,
+ CODE_FOR_zero_extendqisi2 = 68,
+ CODE_FOR_zero_extendsidi2 = 69,
+ CODE_FOR_extendsidi2 = 70,
+ CODE_FOR_extendhisi2 = 71,
+ CODE_FOR_extendqihi2 = 72,
+ CODE_FOR_extendqisi2 = 73,
+ CODE_FOR_extendsfdf2 = 74,
+ CODE_FOR_extenddfxf2 = 75,
+ CODE_FOR_extendsfxf2 = 76,
+ CODE_FOR_truncdfsf2 = 77,
+ CODE_FOR_truncxfsf2 = 79,
+ CODE_FOR_truncxfdf2 = 80,
+ CODE_FOR_fixuns_truncxfsi2 = 81,
+ CODE_FOR_fixuns_truncdfsi2 = 82,
+ CODE_FOR_fixuns_truncsfsi2 = 83,
+ CODE_FOR_fix_truncxfdi2 = 84,
+ CODE_FOR_fix_truncdfdi2 = 85,
+ CODE_FOR_fix_truncsfdi2 = 86,
+ CODE_FOR_fix_truncxfsi2 = 90,
+ CODE_FOR_fix_truncdfsi2 = 91,
+ CODE_FOR_fix_truncsfsi2 = 92,
+ CODE_FOR_floatsisf2 = 96,
+ CODE_FOR_floatdisf2 = 97,
+ CODE_FOR_floatsidf2 = 98,
+ CODE_FOR_floatdidf2 = 99,
+ CODE_FOR_floatsixf2 = 100,
+ CODE_FOR_floatdixf2 = 101,
+ CODE_FOR_adddi3 = 108,
+ CODE_FOR_addsi3 = 109,
+ CODE_FOR_addhi3 = 110,
+ CODE_FOR_addqi3 = 111,
+ CODE_FOR_addxf3 = 113,
+ CODE_FOR_adddf3 = 114,
+ CODE_FOR_addsf3 = 115,
+ CODE_FOR_subdi3 = 116,
+ CODE_FOR_subsi3 = 117,
+ CODE_FOR_subhi3 = 118,
+ CODE_FOR_subqi3 = 119,
+ CODE_FOR_subxf3 = 120,
+ CODE_FOR_subdf3 = 121,
+ CODE_FOR_subsf3 = 122,
+ CODE_FOR_mulhi3 = 124,
+ CODE_FOR_mulsi3 = 126,
+ CODE_FOR_umulqihi3 = 127,
+ CODE_FOR_mulqihi3 = 128,
+ CODE_FOR_umulsidi3 = 129,
+ CODE_FOR_mulsidi3 = 130,
+ CODE_FOR_mulxf3 = 131,
+ CODE_FOR_muldf3 = 132,
+ CODE_FOR_mulsf3 = 133,
+ CODE_FOR_divqi3 = 134,
+ CODE_FOR_udivqi3 = 135,
+ CODE_FOR_divxf3 = 136,
+ CODE_FOR_divdf3 = 137,
+ CODE_FOR_divsf3 = 138,
+ CODE_FOR_divmodsi4 = 139,
+ CODE_FOR_divmodhi4 = 140,
+ CODE_FOR_udivmodsi4 = 141,
+ CODE_FOR_udivmodhi4 = 142,
+ CODE_FOR_andsi3 = 143,
+ CODE_FOR_andhi3 = 144,
+ CODE_FOR_andqi3 = 145,
+ CODE_FOR_iorsi3 = 146,
+ CODE_FOR_iorhi3 = 147,
+ CODE_FOR_iorqi3 = 148,
+ CODE_FOR_xorsi3 = 149,
+ CODE_FOR_xorhi3 = 150,
+ CODE_FOR_xorqi3 = 151,
+ CODE_FOR_negdi2 = 152,
+ CODE_FOR_negsi2 = 153,
+ CODE_FOR_neghi2 = 154,
+ CODE_FOR_negqi2 = 155,
+ CODE_FOR_negsf2 = 156,
+ CODE_FOR_negdf2 = 157,
+ CODE_FOR_negxf2 = 159,
+ CODE_FOR_abssf2 = 161,
+ CODE_FOR_absdf2 = 162,
+ CODE_FOR_absxf2 = 164,
+ CODE_FOR_sqrtsf2 = 166,
+ CODE_FOR_sqrtdf2 = 167,
+ CODE_FOR_sqrtxf2 = 169,
+ CODE_FOR_sindf2 = 172,
+ CODE_FOR_sinsf2 = 173,
+ CODE_FOR_cosdf2 = 175,
+ CODE_FOR_cossf2 = 176,
+ CODE_FOR_one_cmplsi2 = 178,
+ CODE_FOR_one_cmplhi2 = 179,
+ CODE_FOR_one_cmplqi2 = 180,
+ CODE_FOR_ashldi3 = 181,
+ CODE_FOR_ashldi3_const_int = 182,
+ CODE_FOR_ashldi3_non_const_int = 183,
+ CODE_FOR_ashlsi3 = 184,
+ CODE_FOR_ashlhi3 = 185,
+ CODE_FOR_ashlqi3 = 186,
+ CODE_FOR_ashrdi3 = 187,
+ CODE_FOR_ashrdi3_const_int = 188,
+ CODE_FOR_ashrdi3_non_const_int = 189,
+ CODE_FOR_ashrsi3 = 190,
+ CODE_FOR_ashrhi3 = 191,
+ CODE_FOR_ashrqi3 = 192,
+ CODE_FOR_lshrdi3 = 193,
+ CODE_FOR_lshrdi3_const_int = 194,
+ CODE_FOR_lshrdi3_non_const_int = 195,
+ CODE_FOR_lshrsi3 = 196,
+ CODE_FOR_lshrhi3 = 197,
+ CODE_FOR_lshrqi3 = 198,
+ CODE_FOR_rotlsi3 = 199,
+ CODE_FOR_rotlhi3 = 200,
+ CODE_FOR_rotlqi3 = 201,
+ CODE_FOR_rotrsi3 = 202,
+ CODE_FOR_rotrhi3 = 203,
+ CODE_FOR_rotrqi3 = 204,
+ CODE_FOR_seq = 211,
+ CODE_FOR_sne = 213,
+ CODE_FOR_sgt = 215,
+ CODE_FOR_sgtu = 217,
+ CODE_FOR_slt = 219,
+ CODE_FOR_sltu = 221,
+ CODE_FOR_sge = 223,
+ CODE_FOR_sgeu = 225,
+ CODE_FOR_sle = 227,
+ CODE_FOR_sleu = 229,
+ CODE_FOR_beq = 231,
+ CODE_FOR_bne = 233,
+ CODE_FOR_bgt = 235,
+ CODE_FOR_bgtu = 237,
+ CODE_FOR_blt = 239,
+ CODE_FOR_bltu = 241,
+ CODE_FOR_bge = 243,
+ CODE_FOR_bgeu = 245,
+ CODE_FOR_ble = 247,
+ CODE_FOR_bleu = 249,
+ CODE_FOR_jump = 261,
+ CODE_FOR_indirect_jump = 262,
+ CODE_FOR_casesi = 263,
+ CODE_FOR_tablejump = 265,
+ CODE_FOR_call_pop = 266,
+ CODE_FOR_call = 269,
+ CODE_FOR_call_value_pop = 272,
+ CODE_FOR_call_value = 275,
+ CODE_FOR_untyped_call = 278,
+ CODE_FOR_untyped_return = 281,
+ CODE_FOR_update_return = 282,
+ CODE_FOR_return = 283,
+ CODE_FOR_nop = 284,
+ CODE_FOR_movstrsi = 285,
+ CODE_FOR_cmpstrsi = 287,
+ CODE_FOR_ffssi2 = 290,
+ CODE_FOR_ffshi2 = 292,
+ CODE_FOR_strlensi = 307,
+ CODE_FOR_nothing };
+
+#define MAX_INSN_CODE ((int) CODE_FOR_nothing)
+#endif /* MAX_INSN_CODE */
diff --git a/gnu/usr.bin/cc/include/insn-config.h b/gnu/usr.bin/cc/include/insn-config.h
new file mode 100644
index 0000000..7dba886
--- /dev/null
+++ b/gnu/usr.bin/cc/include/insn-config.h
@@ -0,0 +1,12 @@
+/* Generated automatically by the program `genconfig'
+from the machine description file `md'. */
+
+
+#define MAX_RECOG_OPERANDS 10
+
+#define MAX_DUP_OPERANDS 3
+#ifndef MAX_INSNS_PER_SPLIT
+#define MAX_INSNS_PER_SPLIT 1
+#endif
+#define REGISTER_CONSTRAINTS
+#define HAVE_cc0
diff --git a/gnu/usr.bin/cc/include/insn-flags.h b/gnu/usr.bin/cc/include/insn-flags.h
new file mode 100644
index 0000000..c9dd771
--- /dev/null
+++ b/gnu/usr.bin/cc/include/insn-flags.h
@@ -0,0 +1,598 @@
+/* Generated automatically by the program `genflags'
+from the machine description file `md'. */
+
+#define HAVE_tstsi_1 1
+#define HAVE_tstsi 1
+#define HAVE_tsthi_1 1
+#define HAVE_tsthi 1
+#define HAVE_tstqi_1 1
+#define HAVE_tstqi 1
+#define HAVE_tstsf_cc (TARGET_80387 && ! TARGET_IEEE_FP)
+#define HAVE_tstsf (TARGET_80387 && ! TARGET_IEEE_FP)
+#define HAVE_tstdf_cc (TARGET_80387 && ! TARGET_IEEE_FP)
+#define HAVE_tstdf (TARGET_80387 && ! TARGET_IEEE_FP)
+#define HAVE_tstxf_cc (TARGET_80387 && ! TARGET_IEEE_FP)
+#define HAVE_tstxf (TARGET_80387 && ! TARGET_IEEE_FP)
+#define HAVE_cmpsi_1 (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)
+#define HAVE_cmpsi 1
+#define HAVE_cmphi_1 (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)
+#define HAVE_cmphi 1
+#define HAVE_cmpqi_1 (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)
+#define HAVE_cmpqi 1
+#define HAVE_cmpsf_cc_1 (TARGET_80387 \
+ && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM))
+#define HAVE_cmpxf (TARGET_80387)
+#define HAVE_cmpdf (TARGET_80387)
+#define HAVE_cmpsf (TARGET_80387)
+#define HAVE_cmpxf_cc (TARGET_80387)
+#define HAVE_cmpxf_ccfpeq (TARGET_80387)
+#define HAVE_cmpdf_cc (TARGET_80387)
+#define HAVE_cmpdf_ccfpeq (TARGET_80387)
+#define HAVE_cmpsf_cc (TARGET_80387)
+#define HAVE_cmpsf_ccfpeq (TARGET_80387)
+#define HAVE_movsi 1
+#define HAVE_movhi 1
+#define HAVE_movstricthi 1
+#define HAVE_movqi 1
+#define HAVE_movstrictqi 1
+#define HAVE_movsf 1
+#define HAVE_swapdf 1
+#define HAVE_movdf 1
+#define HAVE_swapxf 1
+#define HAVE_movxf 1
+#define HAVE_movdi 1
+#define HAVE_zero_extendhisi2 1
+#define HAVE_zero_extendqihi2 1
+#define HAVE_zero_extendqisi2 1
+#define HAVE_zero_extendsidi2 1
+#define HAVE_extendsidi2 1
+#define HAVE_extendhisi2 1
+#define HAVE_extendqihi2 1
+#define HAVE_extendqisi2 1
+#define HAVE_extendsfdf2 (TARGET_80387)
+#define HAVE_extenddfxf2 (TARGET_80387)
+#define HAVE_extendsfxf2 (TARGET_80387)
+#define HAVE_truncdfsf2 (TARGET_80387)
+#define HAVE_truncxfsf2 (TARGET_80387)
+#define HAVE_truncxfdf2 (TARGET_80387)
+#define HAVE_fixuns_truncxfsi2 (TARGET_80387)
+#define HAVE_fixuns_truncdfsi2 (TARGET_80387)
+#define HAVE_fixuns_truncsfsi2 (TARGET_80387)
+#define HAVE_fix_truncxfdi2 (TARGET_80387)
+#define HAVE_fix_truncdfdi2 (TARGET_80387)
+#define HAVE_fix_truncsfdi2 (TARGET_80387)
+#define HAVE_fix_truncxfsi2 (TARGET_80387)
+#define HAVE_fix_truncdfsi2 (TARGET_80387)
+#define HAVE_fix_truncsfsi2 (TARGET_80387)
+#define HAVE_floatsisf2 (TARGET_80387)
+#define HAVE_floatdisf2 (TARGET_80387)
+#define HAVE_floatsidf2 (TARGET_80387)
+#define HAVE_floatdidf2 (TARGET_80387)
+#define HAVE_floatsixf2 (TARGET_80387)
+#define HAVE_floatdixf2 (TARGET_80387)
+#define HAVE_adddi3 1
+#define HAVE_addsi3 1
+#define HAVE_addhi3 1
+#define HAVE_addqi3 1
+#define HAVE_addxf3 (TARGET_80387)
+#define HAVE_adddf3 (TARGET_80387)
+#define HAVE_addsf3 (TARGET_80387)
+#define HAVE_subdi3 1
+#define HAVE_subsi3 1
+#define HAVE_subhi3 1
+#define HAVE_subqi3 1
+#define HAVE_subxf3 (TARGET_80387)
+#define HAVE_subdf3 (TARGET_80387)
+#define HAVE_subsf3 (TARGET_80387)
+#define HAVE_mulhi3 1
+#define HAVE_mulsi3 1
+#define HAVE_umulqihi3 1
+#define HAVE_mulqihi3 1
+#define HAVE_umulsidi3 1
+#define HAVE_mulsidi3 1
+#define HAVE_mulxf3 (TARGET_80387)
+#define HAVE_muldf3 (TARGET_80387)
+#define HAVE_mulsf3 (TARGET_80387)
+#define HAVE_divqi3 1
+#define HAVE_udivqi3 1
+#define HAVE_divxf3 (TARGET_80387)
+#define HAVE_divdf3 (TARGET_80387)
+#define HAVE_divsf3 (TARGET_80387)
+#define HAVE_divmodsi4 1
+#define HAVE_divmodhi4 1
+#define HAVE_udivmodsi4 1
+#define HAVE_udivmodhi4 1
+#define HAVE_andsi3 1
+#define HAVE_andhi3 1
+#define HAVE_andqi3 1
+#define HAVE_iorsi3 1
+#define HAVE_iorhi3 1
+#define HAVE_iorqi3 1
+#define HAVE_xorsi3 1
+#define HAVE_xorhi3 1
+#define HAVE_xorqi3 1
+#define HAVE_negdi2 1
+#define HAVE_negsi2 1
+#define HAVE_neghi2 1
+#define HAVE_negqi2 1
+#define HAVE_negsf2 (TARGET_80387)
+#define HAVE_negdf2 (TARGET_80387)
+#define HAVE_negxf2 (TARGET_80387)
+#define HAVE_abssf2 (TARGET_80387)
+#define HAVE_absdf2 (TARGET_80387)
+#define HAVE_absxf2 (TARGET_80387)
+#define HAVE_sqrtsf2 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \
+ && (TARGET_IEEE_FP || flag_fast_math) )
+#define HAVE_sqrtdf2 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \
+ && (TARGET_IEEE_FP || flag_fast_math) )
+#define HAVE_sqrtxf2 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \
+ && (TARGET_IEEE_FP || flag_fast_math) )
+#define HAVE_sindf2 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \
+ && (TARGET_IEEE_FP || flag_fast_math) )
+#define HAVE_sinsf2 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \
+ && (TARGET_IEEE_FP || flag_fast_math) )
+#define HAVE_cosdf2 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \
+ && (TARGET_IEEE_FP || flag_fast_math) )
+#define HAVE_cossf2 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \
+ && (TARGET_IEEE_FP || flag_fast_math) )
+#define HAVE_one_cmplsi2 1
+#define HAVE_one_cmplhi2 1
+#define HAVE_one_cmplqi2 1
+#define HAVE_ashldi3 1
+#define HAVE_ashldi3_const_int 1
+#define HAVE_ashldi3_non_const_int 1
+#define HAVE_ashlsi3 1
+#define HAVE_ashlhi3 1
+#define HAVE_ashlqi3 1
+#define HAVE_ashrdi3 1
+#define HAVE_ashrdi3_const_int 1
+#define HAVE_ashrdi3_non_const_int 1
+#define HAVE_ashrsi3 1
+#define HAVE_ashrhi3 1
+#define HAVE_ashrqi3 1
+#define HAVE_lshrdi3 1
+#define HAVE_lshrdi3_const_int 1
+#define HAVE_lshrdi3_non_const_int 1
+#define HAVE_lshrsi3 1
+#define HAVE_lshrhi3 1
+#define HAVE_lshrqi3 1
+#define HAVE_rotlsi3 1
+#define HAVE_rotlhi3 1
+#define HAVE_rotlqi3 1
+#define HAVE_rotrsi3 1
+#define HAVE_rotrhi3 1
+#define HAVE_rotrqi3 1
+#define HAVE_seq 1
+#define HAVE_sne 1
+#define HAVE_sgt 1
+#define HAVE_sgtu 1
+#define HAVE_slt 1
+#define HAVE_sltu 1
+#define HAVE_sge 1
+#define HAVE_sgeu 1
+#define HAVE_sle 1
+#define HAVE_sleu 1
+#define HAVE_beq 1
+#define HAVE_bne 1
+#define HAVE_bgt 1
+#define HAVE_bgtu 1
+#define HAVE_blt 1
+#define HAVE_bltu 1
+#define HAVE_bge 1
+#define HAVE_bgeu 1
+#define HAVE_ble 1
+#define HAVE_bleu 1
+#define HAVE_jump 1
+#define HAVE_indirect_jump 1
+#define HAVE_casesi (flag_pic)
+#define HAVE_tablejump 1
+#define HAVE_call_pop 1
+#define HAVE_call 1
+#define HAVE_call_value_pop 1
+#define HAVE_call_value 1
+#define HAVE_untyped_call 1
+#define HAVE_untyped_return 1
+#define HAVE_update_return 1
+#define HAVE_return (simple_386_epilogue ())
+#define HAVE_nop 1
+#define HAVE_movstrsi 1
+#define HAVE_cmpstrsi 1
+#define HAVE_ffssi2 1
+#define HAVE_ffshi2 1
+#define HAVE_strlensi 1
+
+#ifndef NO_MD_PROTOTYPES
+extern rtx gen_tstsi_1 PROTO((rtx));
+extern rtx gen_tstsi PROTO((rtx));
+extern rtx gen_tsthi_1 PROTO((rtx));
+extern rtx gen_tsthi PROTO((rtx));
+extern rtx gen_tstqi_1 PROTO((rtx));
+extern rtx gen_tstqi PROTO((rtx));
+extern rtx gen_tstsf_cc PROTO((rtx));
+extern rtx gen_tstsf PROTO((rtx));
+extern rtx gen_tstdf_cc PROTO((rtx));
+extern rtx gen_tstdf PROTO((rtx));
+extern rtx gen_tstxf_cc PROTO((rtx));
+extern rtx gen_tstxf PROTO((rtx));
+extern rtx gen_cmpsi_1 PROTO((rtx, rtx));
+extern rtx gen_cmpsi PROTO((rtx, rtx));
+extern rtx gen_cmphi_1 PROTO((rtx, rtx));
+extern rtx gen_cmphi PROTO((rtx, rtx));
+extern rtx gen_cmpqi_1 PROTO((rtx, rtx));
+extern rtx gen_cmpqi PROTO((rtx, rtx));
+extern rtx gen_cmpsf_cc_1 PROTO((rtx, rtx, rtx));
+extern rtx gen_cmpxf PROTO((rtx, rtx));
+extern rtx gen_cmpdf PROTO((rtx, rtx));
+extern rtx gen_cmpsf PROTO((rtx, rtx));
+extern rtx gen_cmpxf_cc PROTO((rtx, rtx));
+extern rtx gen_cmpxf_ccfpeq PROTO((rtx, rtx));
+extern rtx gen_cmpdf_cc PROTO((rtx, rtx));
+extern rtx gen_cmpdf_ccfpeq PROTO((rtx, rtx));
+extern rtx gen_cmpsf_cc PROTO((rtx, rtx));
+extern rtx gen_cmpsf_ccfpeq PROTO((rtx, rtx));
+extern rtx gen_movsi PROTO((rtx, rtx));
+extern rtx gen_movhi PROTO((rtx, rtx));
+extern rtx gen_movstricthi PROTO((rtx, rtx));
+extern rtx gen_movqi PROTO((rtx, rtx));
+extern rtx gen_movstrictqi PROTO((rtx, rtx));
+extern rtx gen_movsf PROTO((rtx, rtx));
+extern rtx gen_swapdf PROTO((rtx, rtx));
+extern rtx gen_movdf PROTO((rtx, rtx));
+extern rtx gen_swapxf PROTO((rtx, rtx));
+extern rtx gen_movxf PROTO((rtx, rtx));
+extern rtx gen_movdi PROTO((rtx, rtx));
+extern rtx gen_zero_extendhisi2 PROTO((rtx, rtx));
+extern rtx gen_zero_extendqihi2 PROTO((rtx, rtx));
+extern rtx gen_zero_extendqisi2 PROTO((rtx, rtx));
+extern rtx gen_zero_extendsidi2 PROTO((rtx, rtx));
+extern rtx gen_extendsidi2 PROTO((rtx, rtx));
+extern rtx gen_extendhisi2 PROTO((rtx, rtx));
+extern rtx gen_extendqihi2 PROTO((rtx, rtx));
+extern rtx gen_extendqisi2 PROTO((rtx, rtx));
+extern rtx gen_extendsfdf2 PROTO((rtx, rtx));
+extern rtx gen_extenddfxf2 PROTO((rtx, rtx));
+extern rtx gen_extendsfxf2 PROTO((rtx, rtx));
+extern rtx gen_truncdfsf2 PROTO((rtx, rtx));
+extern rtx gen_truncxfsf2 PROTO((rtx, rtx));
+extern rtx gen_truncxfdf2 PROTO((rtx, rtx));
+extern rtx gen_fixuns_truncxfsi2 PROTO((rtx, rtx));
+extern rtx gen_fixuns_truncdfsi2 PROTO((rtx, rtx));
+extern rtx gen_fixuns_truncsfsi2 PROTO((rtx, rtx));
+extern rtx gen_fix_truncxfdi2 PROTO((rtx, rtx));
+extern rtx gen_fix_truncdfdi2 PROTO((rtx, rtx));
+extern rtx gen_fix_truncsfdi2 PROTO((rtx, rtx));
+extern rtx gen_fix_truncxfsi2 PROTO((rtx, rtx));
+extern rtx gen_fix_truncdfsi2 PROTO((rtx, rtx));
+extern rtx gen_fix_truncsfsi2 PROTO((rtx, rtx));
+extern rtx gen_floatsisf2 PROTO((rtx, rtx));
+extern rtx gen_floatdisf2 PROTO((rtx, rtx));
+extern rtx gen_floatsidf2 PROTO((rtx, rtx));
+extern rtx gen_floatdidf2 PROTO((rtx, rtx));
+extern rtx gen_floatsixf2 PROTO((rtx, rtx));
+extern rtx gen_floatdixf2 PROTO((rtx, rtx));
+extern rtx gen_adddi3 PROTO((rtx, rtx, rtx));
+extern rtx gen_addsi3 PROTO((rtx, rtx, rtx));
+extern rtx gen_addhi3 PROTO((rtx, rtx, rtx));
+extern rtx gen_addqi3 PROTO((rtx, rtx, rtx));
+extern rtx gen_addxf3 PROTO((rtx, rtx, rtx));
+extern rtx gen_adddf3 PROTO((rtx, rtx, rtx));
+extern rtx gen_addsf3 PROTO((rtx, rtx, rtx));
+extern rtx gen_subdi3 PROTO((rtx, rtx, rtx));
+extern rtx gen_subsi3 PROTO((rtx, rtx, rtx));
+extern rtx gen_subhi3 PROTO((rtx, rtx, rtx));
+extern rtx gen_subqi3 PROTO((rtx, rtx, rtx));
+extern rtx gen_subxf3 PROTO((rtx, rtx, rtx));
+extern rtx gen_subdf3 PROTO((rtx, rtx, rtx));
+extern rtx gen_subsf3 PROTO((rtx, rtx, rtx));
+extern rtx gen_mulhi3 PROTO((rtx, rtx, rtx));
+extern rtx gen_mulsi3 PROTO((rtx, rtx, rtx));
+extern rtx gen_umulqihi3 PROTO((rtx, rtx, rtx));
+extern rtx gen_mulqihi3 PROTO((rtx, rtx, rtx));
+extern rtx gen_umulsidi3 PROTO((rtx, rtx, rtx));
+extern rtx gen_mulsidi3 PROTO((rtx, rtx, rtx));
+extern rtx gen_mulxf3 PROTO((rtx, rtx, rtx));
+extern rtx gen_muldf3 PROTO((rtx, rtx, rtx));
+extern rtx gen_mulsf3 PROTO((rtx, rtx, rtx));
+extern rtx gen_divqi3 PROTO((rtx, rtx, rtx));
+extern rtx gen_udivqi3 PROTO((rtx, rtx, rtx));
+extern rtx gen_divxf3 PROTO((rtx, rtx, rtx));
+extern rtx gen_divdf3 PROTO((rtx, rtx, rtx));
+extern rtx gen_divsf3 PROTO((rtx, rtx, rtx));
+extern rtx gen_divmodsi4 PROTO((rtx, rtx, rtx, rtx));
+extern rtx gen_divmodhi4 PROTO((rtx, rtx, rtx, rtx));
+extern rtx gen_udivmodsi4 PROTO((rtx, rtx, rtx, rtx));
+extern rtx gen_udivmodhi4 PROTO((rtx, rtx, rtx, rtx));
+extern rtx gen_andsi3 PROTO((rtx, rtx, rtx));
+extern rtx gen_andhi3 PROTO((rtx, rtx, rtx));
+extern rtx gen_andqi3 PROTO((rtx, rtx, rtx));
+extern rtx gen_iorsi3 PROTO((rtx, rtx, rtx));
+extern rtx gen_iorhi3 PROTO((rtx, rtx, rtx));
+extern rtx gen_iorqi3 PROTO((rtx, rtx, rtx));
+extern rtx gen_xorsi3 PROTO((rtx, rtx, rtx));
+extern rtx gen_xorhi3 PROTO((rtx, rtx, rtx));
+extern rtx gen_xorqi3 PROTO((rtx, rtx, rtx));
+extern rtx gen_negdi2 PROTO((rtx, rtx));
+extern rtx gen_negsi2 PROTO((rtx, rtx));
+extern rtx gen_neghi2 PROTO((rtx, rtx));
+extern rtx gen_negqi2 PROTO((rtx, rtx));
+extern rtx gen_negsf2 PROTO((rtx, rtx));
+extern rtx gen_negdf2 PROTO((rtx, rtx));
+extern rtx gen_negxf2 PROTO((rtx, rtx));
+extern rtx gen_abssf2 PROTO((rtx, rtx));
+extern rtx gen_absdf2 PROTO((rtx, rtx));
+extern rtx gen_absxf2 PROTO((rtx, rtx));
+extern rtx gen_sqrtsf2 PROTO((rtx, rtx));
+extern rtx gen_sqrtdf2 PROTO((rtx, rtx));
+extern rtx gen_sqrtxf2 PROTO((rtx, rtx));
+extern rtx gen_sindf2 PROTO((rtx, rtx));
+extern rtx gen_sinsf2 PROTO((rtx, rtx));
+extern rtx gen_cosdf2 PROTO((rtx, rtx));
+extern rtx gen_cossf2 PROTO((rtx, rtx));
+extern rtx gen_one_cmplsi2 PROTO((rtx, rtx));
+extern rtx gen_one_cmplhi2 PROTO((rtx, rtx));
+extern rtx gen_one_cmplqi2 PROTO((rtx, rtx));
+extern rtx gen_ashldi3 PROTO((rtx, rtx, rtx));
+extern rtx gen_ashldi3_const_int PROTO((rtx, rtx, rtx));
+extern rtx gen_ashldi3_non_const_int PROTO((rtx, rtx, rtx));
+extern rtx gen_ashlsi3 PROTO((rtx, rtx, rtx));
+extern rtx gen_ashlhi3 PROTO((rtx, rtx, rtx));
+extern rtx gen_ashlqi3 PROTO((rtx, rtx, rtx));
+extern rtx gen_ashrdi3 PROTO((rtx, rtx, rtx));
+extern rtx gen_ashrdi3_const_int PROTO((rtx, rtx, rtx));
+extern rtx gen_ashrdi3_non_const_int PROTO((rtx, rtx, rtx));
+extern rtx gen_ashrsi3 PROTO((rtx, rtx, rtx));
+extern rtx gen_ashrhi3 PROTO((rtx, rtx, rtx));
+extern rtx gen_ashrqi3 PROTO((rtx, rtx, rtx));
+extern rtx gen_lshrdi3 PROTO((rtx, rtx, rtx));
+extern rtx gen_lshrdi3_const_int PROTO((rtx, rtx, rtx));
+extern rtx gen_lshrdi3_non_const_int PROTO((rtx, rtx, rtx));
+extern rtx gen_lshrsi3 PROTO((rtx, rtx, rtx));
+extern rtx gen_lshrhi3 PROTO((rtx, rtx, rtx));
+extern rtx gen_lshrqi3 PROTO((rtx, rtx, rtx));
+extern rtx gen_rotlsi3 PROTO((rtx, rtx, rtx));
+extern rtx gen_rotlhi3 PROTO((rtx, rtx, rtx));
+extern rtx gen_rotlqi3 PROTO((rtx, rtx, rtx));
+extern rtx gen_rotrsi3 PROTO((rtx, rtx, rtx));
+extern rtx gen_rotrhi3 PROTO((rtx, rtx, rtx));
+extern rtx gen_rotrqi3 PROTO((rtx, rtx, rtx));
+extern rtx gen_seq PROTO((rtx));
+extern rtx gen_sne PROTO((rtx));
+extern rtx gen_sgt PROTO((rtx));
+extern rtx gen_sgtu PROTO((rtx));
+extern rtx gen_slt PROTO((rtx));
+extern rtx gen_sltu PROTO((rtx));
+extern rtx gen_sge PROTO((rtx));
+extern rtx gen_sgeu PROTO((rtx));
+extern rtx gen_sle PROTO((rtx));
+extern rtx gen_sleu PROTO((rtx));
+extern rtx gen_beq PROTO((rtx));
+extern rtx gen_bne PROTO((rtx));
+extern rtx gen_bgt PROTO((rtx));
+extern rtx gen_bgtu PROTO((rtx));
+extern rtx gen_blt PROTO((rtx));
+extern rtx gen_bltu PROTO((rtx));
+extern rtx gen_bge PROTO((rtx));
+extern rtx gen_bgeu PROTO((rtx));
+extern rtx gen_ble PROTO((rtx));
+extern rtx gen_bleu PROTO((rtx));
+extern rtx gen_jump PROTO((rtx));
+extern rtx gen_indirect_jump PROTO((rtx));
+extern rtx gen_casesi PROTO((rtx, rtx, rtx, rtx, rtx));
+extern rtx gen_tablejump PROTO((rtx, rtx));
+extern rtx gen_untyped_call PROTO((rtx, rtx, rtx));
+extern rtx gen_untyped_return PROTO((rtx, rtx));
+extern rtx gen_update_return PROTO((rtx));
+extern rtx gen_return PROTO((void));
+extern rtx gen_nop PROTO((void));
+extern rtx gen_movstrsi PROTO((rtx, rtx, rtx, rtx));
+extern rtx gen_cmpstrsi PROTO((rtx, rtx, rtx, rtx, rtx));
+extern rtx gen_ffssi2 PROTO((rtx, rtx));
+extern rtx gen_ffshi2 PROTO((rtx, rtx));
+extern rtx gen_strlensi PROTO((rtx, rtx, rtx, rtx));
+
+#ifdef MD_CALL_PROTOTYPES
+extern rtx gen_call_pop PROTO((rtx, rtx, rtx));
+extern rtx gen_call PROTO((rtx, rtx));
+extern rtx gen_call_value_pop PROTO((rtx, rtx, rtx, rtx));
+extern rtx gen_call_value PROTO((rtx, rtx, rtx));
+
+#else /* !MD_CALL_PROTOTYPES */
+extern rtx gen_call_pop ();
+extern rtx gen_call ();
+extern rtx gen_call_value_pop ();
+extern rtx gen_call_value ();
+#endif /* !MD_CALL_PROTOTYPES */
+
+#else /* NO_MD_PROTOTYPES */
+extern rtx gen_tstsi_1 ();
+extern rtx gen_tstsi ();
+extern rtx gen_tsthi_1 ();
+extern rtx gen_tsthi ();
+extern rtx gen_tstqi_1 ();
+extern rtx gen_tstqi ();
+extern rtx gen_tstsf_cc ();
+extern rtx gen_tstsf ();
+extern rtx gen_tstdf_cc ();
+extern rtx gen_tstdf ();
+extern rtx gen_tstxf_cc ();
+extern rtx gen_tstxf ();
+extern rtx gen_cmpsi_1 ();
+extern rtx gen_cmpsi ();
+extern rtx gen_cmphi_1 ();
+extern rtx gen_cmphi ();
+extern rtx gen_cmpqi_1 ();
+extern rtx gen_cmpqi ();
+extern rtx gen_cmpsf_cc_1 ();
+extern rtx gen_cmpxf ();
+extern rtx gen_cmpdf ();
+extern rtx gen_cmpsf ();
+extern rtx gen_cmpxf_cc ();
+extern rtx gen_cmpxf_ccfpeq ();
+extern rtx gen_cmpdf_cc ();
+extern rtx gen_cmpdf_ccfpeq ();
+extern rtx gen_cmpsf_cc ();
+extern rtx gen_cmpsf_ccfpeq ();
+extern rtx gen_movsi ();
+extern rtx gen_movhi ();
+extern rtx gen_movstricthi ();
+extern rtx gen_movqi ();
+extern rtx gen_movstrictqi ();
+extern rtx gen_movsf ();
+extern rtx gen_swapdf ();
+extern rtx gen_movdf ();
+extern rtx gen_swapxf ();
+extern rtx gen_movxf ();
+extern rtx gen_movdi ();
+extern rtx gen_zero_extendhisi2 ();
+extern rtx gen_zero_extendqihi2 ();
+extern rtx gen_zero_extendqisi2 ();
+extern rtx gen_zero_extendsidi2 ();
+extern rtx gen_extendsidi2 ();
+extern rtx gen_extendhisi2 ();
+extern rtx gen_extendqihi2 ();
+extern rtx gen_extendqisi2 ();
+extern rtx gen_extendsfdf2 ();
+extern rtx gen_extenddfxf2 ();
+extern rtx gen_extendsfxf2 ();
+extern rtx gen_truncdfsf2 ();
+extern rtx gen_truncxfsf2 ();
+extern rtx gen_truncxfdf2 ();
+extern rtx gen_fixuns_truncxfsi2 ();
+extern rtx gen_fixuns_truncdfsi2 ();
+extern rtx gen_fixuns_truncsfsi2 ();
+extern rtx gen_fix_truncxfdi2 ();
+extern rtx gen_fix_truncdfdi2 ();
+extern rtx gen_fix_truncsfdi2 ();
+extern rtx gen_fix_truncxfsi2 ();
+extern rtx gen_fix_truncdfsi2 ();
+extern rtx gen_fix_truncsfsi2 ();
+extern rtx gen_floatsisf2 ();
+extern rtx gen_floatdisf2 ();
+extern rtx gen_floatsidf2 ();
+extern rtx gen_floatdidf2 ();
+extern rtx gen_floatsixf2 ();
+extern rtx gen_floatdixf2 ();
+extern rtx gen_adddi3 ();
+extern rtx gen_addsi3 ();
+extern rtx gen_addhi3 ();
+extern rtx gen_addqi3 ();
+extern rtx gen_addxf3 ();
+extern rtx gen_adddf3 ();
+extern rtx gen_addsf3 ();
+extern rtx gen_subdi3 ();
+extern rtx gen_subsi3 ();
+extern rtx gen_subhi3 ();
+extern rtx gen_subqi3 ();
+extern rtx gen_subxf3 ();
+extern rtx gen_subdf3 ();
+extern rtx gen_subsf3 ();
+extern rtx gen_mulhi3 ();
+extern rtx gen_mulsi3 ();
+extern rtx gen_umulqihi3 ();
+extern rtx gen_mulqihi3 ();
+extern rtx gen_umulsidi3 ();
+extern rtx gen_mulsidi3 ();
+extern rtx gen_mulxf3 ();
+extern rtx gen_muldf3 ();
+extern rtx gen_mulsf3 ();
+extern rtx gen_divqi3 ();
+extern rtx gen_udivqi3 ();
+extern rtx gen_divxf3 ();
+extern rtx gen_divdf3 ();
+extern rtx gen_divsf3 ();
+extern rtx gen_divmodsi4 ();
+extern rtx gen_divmodhi4 ();
+extern rtx gen_udivmodsi4 ();
+extern rtx gen_udivmodhi4 ();
+extern rtx gen_andsi3 ();
+extern rtx gen_andhi3 ();
+extern rtx gen_andqi3 ();
+extern rtx gen_iorsi3 ();
+extern rtx gen_iorhi3 ();
+extern rtx gen_iorqi3 ();
+extern rtx gen_xorsi3 ();
+extern rtx gen_xorhi3 ();
+extern rtx gen_xorqi3 ();
+extern rtx gen_negdi2 ();
+extern rtx gen_negsi2 ();
+extern rtx gen_neghi2 ();
+extern rtx gen_negqi2 ();
+extern rtx gen_negsf2 ();
+extern rtx gen_negdf2 ();
+extern rtx gen_negxf2 ();
+extern rtx gen_abssf2 ();
+extern rtx gen_absdf2 ();
+extern rtx gen_absxf2 ();
+extern rtx gen_sqrtsf2 ();
+extern rtx gen_sqrtdf2 ();
+extern rtx gen_sqrtxf2 ();
+extern rtx gen_sindf2 ();
+extern rtx gen_sinsf2 ();
+extern rtx gen_cosdf2 ();
+extern rtx gen_cossf2 ();
+extern rtx gen_one_cmplsi2 ();
+extern rtx gen_one_cmplhi2 ();
+extern rtx gen_one_cmplqi2 ();
+extern rtx gen_ashldi3 ();
+extern rtx gen_ashldi3_const_int ();
+extern rtx gen_ashldi3_non_const_int ();
+extern rtx gen_ashlsi3 ();
+extern rtx gen_ashlhi3 ();
+extern rtx gen_ashlqi3 ();
+extern rtx gen_ashrdi3 ();
+extern rtx gen_ashrdi3_const_int ();
+extern rtx gen_ashrdi3_non_const_int ();
+extern rtx gen_ashrsi3 ();
+extern rtx gen_ashrhi3 ();
+extern rtx gen_ashrqi3 ();
+extern rtx gen_lshrdi3 ();
+extern rtx gen_lshrdi3_const_int ();
+extern rtx gen_lshrdi3_non_const_int ();
+extern rtx gen_lshrsi3 ();
+extern rtx gen_lshrhi3 ();
+extern rtx gen_lshrqi3 ();
+extern rtx gen_rotlsi3 ();
+extern rtx gen_rotlhi3 ();
+extern rtx gen_rotlqi3 ();
+extern rtx gen_rotrsi3 ();
+extern rtx gen_rotrhi3 ();
+extern rtx gen_rotrqi3 ();
+extern rtx gen_seq ();
+extern rtx gen_sne ();
+extern rtx gen_sgt ();
+extern rtx gen_sgtu ();
+extern rtx gen_slt ();
+extern rtx gen_sltu ();
+extern rtx gen_sge ();
+extern rtx gen_sgeu ();
+extern rtx gen_sle ();
+extern rtx gen_sleu ();
+extern rtx gen_beq ();
+extern rtx gen_bne ();
+extern rtx gen_bgt ();
+extern rtx gen_bgtu ();
+extern rtx gen_blt ();
+extern rtx gen_bltu ();
+extern rtx gen_bge ();
+extern rtx gen_bgeu ();
+extern rtx gen_ble ();
+extern rtx gen_bleu ();
+extern rtx gen_jump ();
+extern rtx gen_indirect_jump ();
+extern rtx gen_casesi ();
+extern rtx gen_tablejump ();
+extern rtx gen_untyped_call ();
+extern rtx gen_untyped_return ();
+extern rtx gen_update_return ();
+extern rtx gen_return ();
+extern rtx gen_nop ();
+extern rtx gen_movstrsi ();
+extern rtx gen_cmpstrsi ();
+extern rtx gen_ffssi2 ();
+extern rtx gen_ffshi2 ();
+extern rtx gen_strlensi ();
+extern rtx gen_call_pop ();
+extern rtx gen_call ();
+extern rtx gen_call_value_pop ();
+extern rtx gen_call_value ();
+#endif /* NO_MD_PROTOTYPES */
diff --git a/gnu/usr.bin/cc/include/integrate.h b/gnu/usr.bin/cc/include/integrate.h
new file mode 100644
index 0000000..1176ac0
--- /dev/null
+++ b/gnu/usr.bin/cc/include/integrate.h
@@ -0,0 +1,125 @@
+/* Function integration definitions for GNU C-Compiler
+ Copyright (C) 1990 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+/* This structure is used to remap objects in the function being inlined to
+ those belonging to the calling function. It is passed by
+ expand_inline_function to its children.
+
+ This structure is also used when unrolling loops and otherwise
+ replicating code, although not all fields are needed in this case;
+ only those fields needed by copy_rtx_and_substitute() and its children
+ are used.
+
+ This structure is used instead of static variables because
+ expand_inline_function may be called recursively via expand_expr. */
+
+struct inline_remap
+{
+ /* True if we are doing function integration, false otherwise.
+ Used to control whether RTX_UNCHANGING bits are copied by
+ copy_rtx_and_substitute. */
+ int integrating;
+ /* Definition of function be inlined. */
+ union tree_node *fndecl;
+ /* Place to put insns needed at start of function. */
+ rtx insns_at_start;
+ /* Mapping from old registers to new registers.
+ It is allocated and deallocated in `expand_inline_function' */
+ rtx *reg_map;
+ /* Mapping from old code-labels to new code-labels.
+ The first element of this map is label_map[min_labelno]. */
+ rtx *label_map;
+ /* Mapping from old insn uid's to copied insns. The first element
+ of this map is insn_map[min_insnno]; the last element is
+ insn_map[max_insnno]. We keep the bounds here for when the map
+ only covers a partial range of insns (such as loop unrolling or
+ code replication). */
+ rtx *insn_map;
+ int min_insnno, max_insnno;
+
+ /* Map pseudo reg number in calling function to equivalent constant. We
+ cannot in general substitute constants into parameter pseudo registers,
+ since some machine descriptions (many RISCs) won't always handle
+ the resulting insns. So if an incoming parameter has a constant
+ equivalent, we record it here, and if the resulting insn is
+ recognizable, we go with it.
+
+ We also use this mechanism to convert references to incoming arguments
+ and stacked variables. copy_rtx_and_substitute will replace the virtual
+ incoming argument and virtual stacked variables registers with new
+ pseudos that contain pointers into the replacement area allocated for
+ this inline instance. These pseudos are then marked as being equivalent
+ to the appropriate address and substituted if valid. */
+ rtx *const_equiv_map;
+ /* Number of entries in const_equiv_map and const_arg_map. */
+ int const_equiv_map_size;
+ /* This is incremented for each new basic block.
+ It is used to store in const_age_map to record the domain of validity
+ of each entry in const_equiv_map.
+ A value of -1 indicates an entry for a reg which is a parm.
+ All other values are "positive". */
+#define CONST_AGE_PARM (-1)
+ unsigned int const_age;
+ /* In parallel with const_equiv_map, record the valid age for each entry.
+ The entry is invalid if its age is less than const_age. */
+ unsigned int *const_age_map;
+ /* Target of the inline function being expanded, or NULL if none. */
+ rtx inline_target;
+ /* When an insn is being copied by copy_rtx_and_substitute,
+ this is nonzero if we have copied an ASM_OPERANDS.
+ In that case, it is the original input-operand vector. */
+ rtvec orig_asm_operands_vector;
+ /* When an insn is being copied by copy_rtx_and_substitute,
+ this is nonzero if we have copied an ASM_OPERANDS.
+ In that case, it is the copied input-operand vector. */
+ rtvec copy_asm_operands_vector;
+ /* Likewise, this is the copied constraints vector. */
+ rtvec copy_asm_constraints_vector;
+
+ /* The next few fields are used for subst_constants to record the SETs
+ that it saw. */
+ int num_sets;
+ struct equiv_table
+ {
+ rtx dest;
+ rtx equiv;
+ } equiv_sets[MAX_RECOG_OPERANDS];
+ /* Record the last thing assigned to pc. This is used for folded
+ conditional branch insns. */
+ rtx last_pc_value;
+#ifdef HAVE_cc0
+ /* Record the last thing assigned to cc0. */
+ rtx last_cc0_value;
+#endif
+};
+
+/* Return a copy of an rtx (as needed), substituting pseudo-register,
+ labels, and frame-pointer offsets as necessary. */
+extern rtx copy_rtx_and_substitute PROTO((rtx, struct inline_remap *));
+
+extern void try_constants PROTO((rtx, struct inline_remap *));
+
+extern void mark_stores PROTO((rtx, rtx));
+
+/* Unfortunately, we need a global copy of const_equiv map for communication
+ with a function called from note_stores. Be *very* careful that this
+ is used properly in the presence of recursion. */
+
+extern rtx *global_const_equiv_map;
+extern int global_const_equiv_map_size;
diff --git a/gnu/usr.bin/cc/include/longlong.h b/gnu/usr.bin/cc/include/longlong.h
new file mode 100644
index 0000000..e811c73
--- /dev/null
+++ b/gnu/usr.bin/cc/include/longlong.h
@@ -0,0 +1,1185 @@
+/* longlong.h -- definitions for mixed size 32/64 bit arithmetic.
+ Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
+
+ This definition file is free software; you can redistribute it
+ and/or modify it under the terms of the GNU General Public
+ License as published by the Free Software Foundation; either
+ version 2, or (at your option) any later version.
+
+ This definition file is distributed in the hope that it will be
+ useful, but WITHOUT ANY WARRANTY; without even the implied
+ warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+#ifndef SI_TYPE_SIZE
+#define SI_TYPE_SIZE 32
+#endif
+
+#define __BITS4 (SI_TYPE_SIZE / 4)
+#define __ll_B (1L << (SI_TYPE_SIZE / 2))
+#define __ll_lowpart(t) ((USItype) (t) % __ll_B)
+#define __ll_highpart(t) ((USItype) (t) / __ll_B)
+
+/* Define auxiliary asm macros.
+
+ 1) umul_ppmm(high_prod, low_prod, multipler, multiplicand)
+ multiplies two USItype integers MULTIPLER and MULTIPLICAND,
+ and generates a two-part USItype product in HIGH_PROD and
+ LOW_PROD.
+
+ 2) __umulsidi3(a,b) multiplies two USItype integers A and B,
+ and returns a UDItype product. This is just a variant of umul_ppmm.
+
+ 3) udiv_qrnnd(quotient, remainder, high_numerator, low_numerator,
+ denominator) divides a two-word unsigned integer, composed by the
+ integers HIGH_NUMERATOR and LOW_NUMERATOR, by DENOMINATOR and
+ places the quotient in QUOTIENT and the remainder in REMAINDER.
+ HIGH_NUMERATOR must be less than DENOMINATOR for correct operation.
+ If, in addition, the most significant bit of DENOMINATOR must be 1,
+ then the pre-processor symbol UDIV_NEEDS_NORMALIZATION is defined to 1.
+
+ 4) sdiv_qrnnd(quotient, remainder, high_numerator, low_numerator,
+ denominator). Like udiv_qrnnd but the numbers are signed. The
+ quotient is rounded towards 0.
+
+ 5) count_leading_zeros(count, x) counts the number of zero-bits from
+ the msb to the first non-zero bit. This is the number of steps X
+ needs to be shifted left to set the msb. Undefined for X == 0.
+
+ 6) add_ssaaaa(high_sum, low_sum, high_addend_1, low_addend_1,
+ high_addend_2, low_addend_2) adds two two-word unsigned integers,
+ composed by HIGH_ADDEND_1 and LOW_ADDEND_1, and HIGH_ADDEND_2 and
+ LOW_ADDEND_2 respectively. The result is placed in HIGH_SUM and
+ LOW_SUM. Overflow (i.e. carry out) is not stored anywhere, and is
+ lost.
+
+ 7) sub_ddmmss(high_difference, low_difference, high_minuend,
+ low_minuend, high_subtrahend, low_subtrahend) subtracts two
+ two-word unsigned integers, composed by HIGH_MINUEND_1 and
+ LOW_MINUEND_1, and HIGH_SUBTRAHEND_2 and LOW_SUBTRAHEND_2
+ respectively. The result is placed in HIGH_DIFFERENCE and
+ LOW_DIFFERENCE. Overflow (i.e. carry out) is not stored anywhere,
+ and is lost.
+
+ If any of these macros are left undefined for a particular CPU,
+ C macros are used. */
+
+/* The CPUs come in alphabetical order below.
+
+ Please add support for more CPUs here, or improve the current support
+ for the CPUs below!
+ (E.g. WE32100, IBM360.) */
+
+#if defined (__GNUC__) && !defined (NO_ASM)
+
+/* We sometimes need to clobber "cc" with gcc2, but that would not be
+ understood by gcc1. Use cpp to avoid major code duplication. */
+#if __GNUC__ < 2
+#define __CLOBBER_CC
+#define __AND_CLOBBER_CC
+#else /* __GNUC__ >= 2 */
+#define __CLOBBER_CC : "cc"
+#define __AND_CLOBBER_CC , "cc"
+#endif /* __GNUC__ < 2 */
+
+#if defined (__a29k__) || defined (_AM29K)
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("add %1,%4,%5
+ addc %0,%2,%3" \
+ : "=r" ((USItype)(sh)), \
+ "=&r" ((USItype)(sl)) \
+ : "%r" ((USItype)(ah)), \
+ "rI" ((USItype)(bh)), \
+ "%r" ((USItype)(al)), \
+ "rI" ((USItype)(bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("sub %1,%4,%5
+ subc %0,%2,%3" \
+ : "=r" ((USItype)(sh)), \
+ "=&r" ((USItype)(sl)) \
+ : "r" ((USItype)(ah)), \
+ "rI" ((USItype)(bh)), \
+ "r" ((USItype)(al)), \
+ "rI" ((USItype)(bl)))
+#define umul_ppmm(xh, xl, m0, m1) \
+ do { \
+ USItype __m0 = (m0), __m1 = (m1); \
+ __asm__ ("multiplu %0,%1,%2" \
+ : "=r" ((USItype)(xl)) \
+ : "r" (__m0), \
+ "r" (__m1)); \
+ __asm__ ("multmu %0,%1,%2" \
+ : "=r" ((USItype)(xh)) \
+ : "r" (__m0), \
+ "r" (__m1)); \
+ } while (0)
+#define udiv_qrnnd(q, r, n1, n0, d) \
+ __asm__ ("dividu %0,%3,%4" \
+ : "=r" ((USItype)(q)), \
+ "=q" ((USItype)(r)) \
+ : "1" ((USItype)(n1)), \
+ "r" ((USItype)(n0)), \
+ "r" ((USItype)(d)))
+#define count_leading_zeros(count, x) \
+ __asm__ ("clz %0,%1" \
+ : "=r" ((USItype)(count)) \
+ : "r" ((USItype)(x)))
+#endif /* __a29k__ */
+
+#if defined (__arm__)
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("adds %1, %4, %5
+ adc %0, %2, %3" \
+ : "=r" ((USItype)(sh)), \
+ "=&r" ((USItype)(sl)) \
+ : "%r" ((USItype)(ah)), \
+ "rI" ((USItype)(bh)), \
+ "%r" ((USItype)(al)), \
+ "rI" ((USItype)(bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("subs %1, %4, %5
+ sbc %0, %2, %3" \
+ : "=r" ((USItype)(sh)), \
+ "=&r" ((USItype)(sl)) \
+ : "r" ((USItype)(ah)), \
+ "rI" ((USItype)(bh)), \
+ "r" ((USItype)(al)), \
+ "rI" ((USItype)(bl)))
+#define umul_ppmm(xh, xl, a, b) \
+{register USItype __t0, __t1, __t2; \
+ __asm__ ("%@ Inlined umul_ppmm
+ mov %2, %5, lsr #16
+ mov %0, %6, lsr #16
+ bic %3, %5, %2, lsl #16
+ bic %4, %6, %0, lsl #16
+ mul %1, %3, %4
+ mul %4, %2, %4
+ mul %3, %0, %3
+ mul %0, %2, %0
+ adds %3, %4, %3
+ addcs %0, %0, #65536
+ adds %1, %1, %3, lsl #16
+ adc %0, %0, %3, lsr #16" \
+ : "=&r" ((USItype)(xh)), \
+ "=r" ((USItype)(xl)), \
+ "=&r" (__t0), "=&r" (__t1), "=r" (__t2) \
+ : "r" ((USItype)(a)), \
+ "r" ((USItype)(b)));}
+#define UMUL_TIME 20
+#define UDIV_TIME 100
+#endif /* __arm__ */
+
+#if defined (__clipper__)
+#define umul_ppmm(w1, w0, u, v) \
+ ({union {UDItype __ll; \
+ struct {USItype __l, __h;} __i; \
+ } __xx; \
+ __asm__ ("mulwux %2,%0" \
+ : "=r" (__xx.__ll) \
+ : "%0" ((USItype)(u)), \
+ "r" ((USItype)(v))); \
+ (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;})
+#define smul_ppmm(w1, w0, u, v) \
+ ({union {DItype __ll; \
+ struct {SItype __l, __h;} __i; \
+ } __xx; \
+ __asm__ ("mulwx %2,%0" \
+ : "=r" (__xx.__ll) \
+ : "%0" ((SItype)(u)), \
+ "r" ((SItype)(v))); \
+ (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;})
+#define __umulsidi3(u, v) \
+ ({UDItype __w; \
+ __asm__ ("mulwux %2,%0" \
+ : "=r" (__w) \
+ : "%0" ((USItype)(u)), \
+ "r" ((USItype)(v))); \
+ __w; })
+#endif /* __clipper__ */
+
+#if defined (__gmicro__)
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("add.w %5,%1
+ addx %3,%0" \
+ : "=g" ((USItype)(sh)), \
+ "=&g" ((USItype)(sl)) \
+ : "%0" ((USItype)(ah)), \
+ "g" ((USItype)(bh)), \
+ "%1" ((USItype)(al)), \
+ "g" ((USItype)(bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("sub.w %5,%1
+ subx %3,%0" \
+ : "=g" ((USItype)(sh)), \
+ "=&g" ((USItype)(sl)) \
+ : "0" ((USItype)(ah)), \
+ "g" ((USItype)(bh)), \
+ "1" ((USItype)(al)), \
+ "g" ((USItype)(bl)))
+#define umul_ppmm(ph, pl, m0, m1) \
+ __asm__ ("mulx %3,%0,%1" \
+ : "=g" ((USItype)(ph)), \
+ "=r" ((USItype)(pl)) \
+ : "%0" ((USItype)(m0)), \
+ "g" ((USItype)(m1)))
+#define udiv_qrnnd(q, r, nh, nl, d) \
+ __asm__ ("divx %4,%0,%1" \
+ : "=g" ((USItype)(q)), \
+ "=r" ((USItype)(r)) \
+ : "1" ((USItype)(nh)), \
+ "0" ((USItype)(nl)), \
+ "g" ((USItype)(d)))
+#define count_leading_zeros(count, x) \
+ __asm__ ("bsch/1 %1,%0" \
+ : "=g" (count) \
+ : "g" ((USItype)(x)), \
+ "0" ((USItype)0))
+#endif
+
+#if defined (__hppa)
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("add %4,%5,%1
+ addc %2,%3,%0" \
+ : "=r" ((USItype)(sh)), \
+ "=&r" ((USItype)(sl)) \
+ : "%rM" ((USItype)(ah)), \
+ "rM" ((USItype)(bh)), \
+ "%rM" ((USItype)(al)), \
+ "rM" ((USItype)(bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("sub %4,%5,%1
+ subb %2,%3,%0" \
+ : "=r" ((USItype)(sh)), \
+ "=&r" ((USItype)(sl)) \
+ : "rM" ((USItype)(ah)), \
+ "rM" ((USItype)(bh)), \
+ "rM" ((USItype)(al)), \
+ "rM" ((USItype)(bl)))
+#if defined (_PA_RISC1_1)
+#define umul_ppmm(w1, w0, u, v) \
+ do { \
+ union \
+ { \
+ UDItype __f; \
+ struct {USItype __w1, __w0;} __w1w0; \
+ } __t; \
+ __asm__ ("xmpyu %1,%2,%0" \
+ : "=x" (__t.__f) \
+ : "x" ((USItype)(u)), \
+ "x" ((USItype)(v))); \
+ (w1) = __t.__w1w0.__w1; \
+ (w0) = __t.__w1w0.__w0; \
+ } while (0)
+#define UMUL_TIME 8
+#else
+#define UMUL_TIME 30
+#endif
+#define UDIV_TIME 40
+#define count_leading_zeros(count, x) \
+ do { \
+ USItype __tmp; \
+ __asm__ ( \
+ "ldi 1,%0
+ extru,= %1,15,16,%%r0 ; Bits 31..16 zero?
+ extru,tr %1,15,16,%1 ; No. Shift down, skip add.
+ ldo 16(%0),%0 ; Yes. Perform add.
+ extru,= %1,23,8,%%r0 ; Bits 15..8 zero?
+ extru,tr %1,23,8,%1 ; No. Shift down, skip add.
+ ldo 8(%0),%0 ; Yes. Perform add.
+ extru,= %1,27,4,%%r0 ; Bits 7..4 zero?
+ extru,tr %1,27,4,%1 ; No. Shift down, skip add.
+ ldo 4(%0),%0 ; Yes. Perform add.
+ extru,= %1,29,2,%%r0 ; Bits 3..2 zero?
+ extru,tr %1,29,2,%1 ; No. Shift down, skip add.
+ ldo 2(%0),%0 ; Yes. Perform add.
+ extru %1,30,1,%1 ; Extract bit 1.
+ sub %0,%1,%0 ; Subtract it.
+ " : "=r" (count), "=r" (__tmp) : "1" (x)); \
+ } while (0)
+#endif
+
+#if defined (__i386__) || defined (__i486__)
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("addl %5,%1
+ adcl %3,%0" \
+ : "=r" ((USItype)(sh)), \
+ "=&r" ((USItype)(sl)) \
+ : "%0" ((USItype)(ah)), \
+ "g" ((USItype)(bh)), \
+ "%1" ((USItype)(al)), \
+ "g" ((USItype)(bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("subl %5,%1
+ sbbl %3,%0" \
+ : "=r" ((USItype)(sh)), \
+ "=&r" ((USItype)(sl)) \
+ : "0" ((USItype)(ah)), \
+ "g" ((USItype)(bh)), \
+ "1" ((USItype)(al)), \
+ "g" ((USItype)(bl)))
+#define umul_ppmm(w1, w0, u, v) \
+ __asm__ ("mull %3" \
+ : "=a" ((USItype)(w0)), \
+ "=d" ((USItype)(w1)) \
+ : "%0" ((USItype)(u)), \
+ "rm" ((USItype)(v)))
+#define udiv_qrnnd(q, r, n1, n0, d) \
+ __asm__ ("divl %4" \
+ : "=a" ((USItype)(q)), \
+ "=d" ((USItype)(r)) \
+ : "0" ((USItype)(n0)), \
+ "1" ((USItype)(n1)), \
+ "rm" ((USItype)(d)))
+#define count_leading_zeros(count, x) \
+ do { \
+ USItype __cbtmp; \
+ __asm__ ("bsrl %1,%0" \
+ : "=r" (__cbtmp) : "rm" ((USItype)(x))); \
+ (count) = __cbtmp ^ 31; \
+ } while (0)
+#define UMUL_TIME 40
+#define UDIV_TIME 40
+#endif /* 80x86 */
+
+#if defined (__i860__)
+#if 0
+/* Make sure these patterns really improve the code before
+ switching them on. */
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ do { \
+ union \
+ { \
+ DItype __ll; \
+ struct {USItype __l, __h;} __i; \
+ } __a, __b, __s; \
+ __a.__i.__l = (al); \
+ __a.__i.__h = (ah); \
+ __b.__i.__l = (bl); \
+ __b.__i.__h = (bh); \
+ __asm__ ("fiadd.dd %1,%2,%0" \
+ : "=f" (__s.__ll) \
+ : "%f" (__a.__ll), "f" (__b.__ll)); \
+ (sh) = __s.__i.__h; \
+ (sl) = __s.__i.__l; \
+ } while (0)
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ do { \
+ union \
+ { \
+ DItype __ll; \
+ struct {USItype __l, __h;} __i; \
+ } __a, __b, __s; \
+ __a.__i.__l = (al); \
+ __a.__i.__h = (ah); \
+ __b.__i.__l = (bl); \
+ __b.__i.__h = (bh); \
+ __asm__ ("fisub.dd %1,%2,%0" \
+ : "=f" (__s.__ll) \
+ : "%f" (__a.__ll), "f" (__b.__ll)); \
+ (sh) = __s.__i.__h; \
+ (sl) = __s.__i.__l; \
+ } while (0)
+#endif
+#endif /* __i860__ */
+
+#if defined (__i960__)
+#define umul_ppmm(w1, w0, u, v) \
+ ({union {UDItype __ll; \
+ struct {USItype __l, __h;} __i; \
+ } __xx; \
+ __asm__ ("emul %2,%1,%0" \
+ : "=d" (__xx.__ll) \
+ : "%dI" ((USItype)(u)), \
+ "dI" ((USItype)(v))); \
+ (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;})
+#define __umulsidi3(u, v) \
+ ({UDItype __w; \
+ __asm__ ("emul %2,%1,%0" \
+ : "=d" (__w) \
+ : "%dI" ((USItype)(u)), \
+ "dI" ((USItype)(v))); \
+ __w; })
+#endif /* __i960__ */
+
+#if defined (__mc68000__)
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("add%.l %5,%1
+ addx%.l %3,%0" \
+ : "=d" ((USItype)(sh)), \
+ "=&d" ((USItype)(sl)) \
+ : "%0" ((USItype)(ah)), \
+ "d" ((USItype)(bh)), \
+ "%1" ((USItype)(al)), \
+ "g" ((USItype)(bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("sub%.l %5,%1
+ subx%.l %3,%0" \
+ : "=d" ((USItype)(sh)), \
+ "=&d" ((USItype)(sl)) \
+ : "0" ((USItype)(ah)), \
+ "d" ((USItype)(bh)), \
+ "1" ((USItype)(al)), \
+ "g" ((USItype)(bl)))
+#if defined (__mc68020__) || defined (__NeXT__) || defined(mc68020)
+#define umul_ppmm(w1, w0, u, v) \
+ __asm__ ("mulu%.l %3,%1:%0" \
+ : "=d" ((USItype)(w0)), \
+ "=d" ((USItype)(w1)) \
+ : "%0" ((USItype)(u)), \
+ "dmi" ((USItype)(v)))
+#define UMUL_TIME 45
+#define udiv_qrnnd(q, r, n1, n0, d) \
+ __asm__ ("divu%.l %4,%1:%0" \
+ : "=d" ((USItype)(q)), \
+ "=d" ((USItype)(r)) \
+ : "0" ((USItype)(n0)), \
+ "1" ((USItype)(n1)), \
+ "dmi" ((USItype)(d)))
+#define UDIV_TIME 90
+#define sdiv_qrnnd(q, r, n1, n0, d) \
+ __asm__ ("divs%.l %4,%1:%0" \
+ : "=d" ((USItype)(q)), \
+ "=d" ((USItype)(r)) \
+ : "0" ((USItype)(n0)), \
+ "1" ((USItype)(n1)), \
+ "dmi" ((USItype)(d)))
+#define count_leading_zeros(count, x) \
+ __asm__ ("bfffo %1{%b2:%b2},%0" \
+ : "=d" ((USItype)(count)) \
+ : "od" ((USItype)(x)), "n" (0))
+#else /* not mc68020 */
+/* %/ inserts REGISTER_PREFIX. */
+#define umul_ppmm(xh, xl, a, b) \
+ __asm__ ("| Inlined umul_ppmm
+ move%.l %2,%/d0
+ move%.l %3,%/d1
+ move%.l %/d0,%/d2
+ swap %/d0
+ move%.l %/d1,%/d3
+ swap %/d1
+ move%.w %/d2,%/d4
+ mulu %/d3,%/d4
+ mulu %/d1,%/d2
+ mulu %/d0,%/d3
+ mulu %/d0,%/d1
+ move%.l %/d4,%/d0
+ eor%.w %/d0,%/d0
+ swap %/d0
+ add%.l %/d0,%/d2
+ add%.l %/d3,%/d2
+ jcc 1f
+ add%.l #65536,%/d1
+1: swap %/d2
+ moveq #0,%/d0
+ move%.w %/d2,%/d0
+ move%.w %/d4,%/d2
+ move%.l %/d2,%1
+ add%.l %/d1,%/d0
+ move%.l %/d0,%0" \
+ : "=g" ((USItype)(xh)), \
+ "=g" ((USItype)(xl)) \
+ : "g" ((USItype)(a)), \
+ "g" ((USItype)(b)) \
+ : "d0", "d1", "d2", "d3", "d4")
+#define UMUL_TIME 100
+#define UDIV_TIME 400
+#endif /* not mc68020 */
+#endif /* mc68000 */
+
+#if defined (__m88000__)
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("addu.co %1,%r4,%r5
+ addu.ci %0,%r2,%r3" \
+ : "=r" ((USItype)(sh)), \
+ "=&r" ((USItype)(sl)) \
+ : "%rJ" ((USItype)(ah)), \
+ "rJ" ((USItype)(bh)), \
+ "%rJ" ((USItype)(al)), \
+ "rJ" ((USItype)(bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("subu.co %1,%r4,%r5
+ subu.ci %0,%r2,%r3" \
+ : "=r" ((USItype)(sh)), \
+ "=&r" ((USItype)(sl)) \
+ : "rJ" ((USItype)(ah)), \
+ "rJ" ((USItype)(bh)), \
+ "rJ" ((USItype)(al)), \
+ "rJ" ((USItype)(bl)))
+#define count_leading_zeros(count, x) \
+ do { \
+ USItype __cbtmp; \
+ __asm__ ("ff1 %0,%1" \
+ : "=r" (__cbtmp) \
+ : "r" ((USItype)(x))); \
+ (count) = __cbtmp ^ 31; \
+ } while (0)
+#if defined (__mc88110__)
+#define umul_ppmm(wh, wl, u, v) \
+ do { \
+ union {UDItype __ll; \
+ struct {USItype __h, __l;} __i; \
+ } __xx; \
+ __asm__ ("mulu.d %0,%1,%2" \
+ : "=r" (__xx.__ll) \
+ : "r" ((USItype)(u)), \
+ "r" ((USItype)(v))); \
+ (wh) = __xx.__i.__h; \
+ (wl) = __xx.__i.__l; \
+ } while (0)
+#define udiv_qrnnd(q, r, n1, n0, d) \
+ ({union {UDItype __ll; \
+ struct {USItype __h, __l;} __i; \
+ } __xx; \
+ USItype __q; \
+ __xx.__i.__h = (n1); __xx.__i.__l = (n0); \
+ __asm__ ("divu.d %0,%1,%2" \
+ : "=r" (__q) \
+ : "r" (__xx.__ll), \
+ "r" ((USItype)(d))); \
+ (r) = (n0) - __q * (d); (q) = __q; })
+#define UMUL_TIME 5
+#define UDIV_TIME 25
+#else
+#define UMUL_TIME 17
+#define UDIV_TIME 150
+#endif /* __mc88110__ */
+#endif /* __m88000__ */
+
+#if defined (__mips__)
+#define umul_ppmm(w1, w0, u, v) \
+ __asm__ ("multu %2,%3
+ mflo %0
+ mfhi %1" \
+ : "=d" ((USItype)(w0)), \
+ "=d" ((USItype)(w1)) \
+ : "d" ((USItype)(u)), \
+ "d" ((USItype)(v)))
+#define UMUL_TIME 10
+#define UDIV_TIME 100
+#endif /* __mips__ */
+
+#if defined (__ns32000__)
+#define umul_ppmm(w1, w0, u, v) \
+ ({union {UDItype __ll; \
+ struct {USItype __l, __h;} __i; \
+ } __xx; \
+ __asm__ ("meid %2,%0" \
+ : "=g" (__xx.__ll) \
+ : "%0" ((USItype)(u)), \
+ "g" ((USItype)(v))); \
+ (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;})
+#define __umulsidi3(u, v) \
+ ({UDItype __w; \
+ __asm__ ("meid %2,%0" \
+ : "=g" (__w) \
+ : "%0" ((USItype)(u)), \
+ "g" ((USItype)(v))); \
+ __w; })
+#define udiv_qrnnd(q, r, n1, n0, d) \
+ ({union {UDItype __ll; \
+ struct {USItype __l, __h;} __i; \
+ } __xx; \
+ __xx.__i.__h = (n1); __xx.__i.__l = (n0); \
+ __asm__ ("deid %2,%0" \
+ : "=g" (__xx.__ll) \
+ : "0" (__xx.__ll), \
+ "g" ((USItype)(d))); \
+ (r) = __xx.__i.__l; (q) = __xx.__i.__h; })
+#endif /* __ns32000__ */
+
+#if (defined (_ARCH_PPC) || defined (_IBMR2)) && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ do { \
+ if (__builtin_constant_p (bh) && (bh) == 0) \
+ __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{aze|addze} %0,%2" \
+ : "=r" ((USItype)(sh)), \
+ "=&r" ((USItype)(sl)) \
+ : "%r" ((USItype)(ah)), \
+ "%r" ((USItype)(al)), \
+ "rI" ((USItype)(bl))); \
+ else if (__builtin_constant_p (bh) && (bh) ==~(USItype) 0) \
+ __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{ame|addme} %0,%2" \
+ : "=r" ((USItype)(sh)), \
+ "=&r" ((USItype)(sl)) \
+ : "%r" ((USItype)(ah)), \
+ "%r" ((USItype)(al)), \
+ "rI" ((USItype)(bl))); \
+ else \
+ __asm__ ("{a%I5|add%I5c} %1,%4,%5\n\t{ae|adde} %0,%2,%3" \
+ : "=r" ((USItype)(sh)), \
+ "=&r" ((USItype)(sl)) \
+ : "%r" ((USItype)(ah)), \
+ "r" ((USItype)(bh)), \
+ "%r" ((USItype)(al)), \
+ "rI" ((USItype)(bl))); \
+ } while (0)
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ do { \
+ if (__builtin_constant_p (ah) && (ah) == 0) \
+ __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfze|subfze} %0,%2" \
+ : "=r" ((USItype)(sh)), \
+ "=&r" ((USItype)(sl)) \
+ : "r" ((USItype)(bh)), \
+ "rI" ((USItype)(al)), \
+ "r" ((USItype)(bl))); \
+ else if (__builtin_constant_p (ah) && (ah) ==~(USItype) 0) \
+ __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfme|subfme} %0,%2" \
+ : "=r" ((USItype)(sh)), \
+ "=&r" ((USItype)(sl)) \
+ : "r" ((USItype)(bh)), \
+ "rI" ((USItype)(al)), \
+ "r" ((USItype)(bl))); \
+ else if (__builtin_constant_p (bh) && (bh) == 0) \
+ __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{ame|addme} %0,%2" \
+ : "=r" ((USItype)(sh)), \
+ "=&r" ((USItype)(sl)) \
+ : "r" ((USItype)(ah)), \
+ "rI" ((USItype)(al)), \
+ "r" ((USItype)(bl))); \
+ else if (__builtin_constant_p (bh) && (bh) ==~(USItype) 0) \
+ __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{aze|addze} %0,%2" \
+ : "=r" ((USItype)(sh)), \
+ "=&r" ((USItype)(sl)) \
+ : "r" ((USItype)(ah)), \
+ "rI" ((USItype)(al)), \
+ "r" ((USItype)(bl))); \
+ else \
+ __asm__ ("{sf%I4|subf%I4c} %1,%5,%4\n\t{sfe|subfe} %0,%3,%2" \
+ : "=r" ((USItype)(sh)), \
+ "=&r" ((USItype)(sl)) \
+ : "r" ((USItype)(ah)), \
+ "r" ((USItype)(bh)), \
+ "rI" ((USItype)(al)), \
+ "r" ((USItype)(bl))); \
+ } while (0)
+#define count_leading_zeros(count, x) \
+ __asm__ ("{cntlz|cntlzw} %0,%1" \
+ : "=r" ((USItype)(count)) \
+ : "r" ((USItype)(x)))
+#if defined (_ARCH_PPC)
+#define umul_ppmm(ph, pl, m0, m1) \
+ do { \
+ USItype __m0 = (m0), __m1 = (m1); \
+ __asm__ ("mulhwu %0,%1,%2" \
+ : "=r" ((USItype) ph) \
+ : "%r" (__m0), \
+ "r" (__m1)); \
+ (pl) = __m0 * __m1; \
+ } while (0)
+#define UMUL_TIME 15
+#define smul_ppmm(ph, pl, m0, m1) \
+ do { \
+ SItype __m0 = (m0), __m1 = (m1); \
+ __asm__ ("mulhw %0,%1,%2" \
+ : "=r" ((SItype) ph) \
+ : "%r" (__m0), \
+ "r" (__m1)); \
+ (pl) = __m0 * __m1; \
+ } while (0)
+#define SMUL_TIME 14
+#define UDIV_TIME 120
+#else
+#define umul_ppmm(xh, xl, m0, m1) \
+ do { \
+ USItype __m0 = (m0), __m1 = (m1); \
+ __asm__ ("mul %0,%2,%3" \
+ : "=r" ((USItype)(xh)), \
+ "=q" ((USItype)(xl)) \
+ : "r" (__m0), \
+ "r" (__m1)); \
+ (xh) += ((((SItype) __m0 >> 31) & __m1) \
+ + (((SItype) __m1 >> 31) & __m0)); \
+ } while (0)
+#define UMUL_TIME 8
+#define smul_ppmm(xh, xl, m0, m1) \
+ __asm__ ("mul %0,%2,%3" \
+ : "=r" ((SItype)(xh)), \
+ "=q" ((SItype)(xl)) \
+ : "r" (m0), \
+ "r" (m1))
+#define SMUL_TIME 4
+#define sdiv_qrnnd(q, r, nh, nl, d) \
+ __asm__ ("div %0,%2,%4" \
+ : "=r" ((SItype)(q)), "=q" ((SItype)(r)) \
+ : "r" ((SItype)(nh)), "1" ((SItype)(nl)), "r" ((SItype)(d)))
+#define UDIV_TIME 100
+#endif
+#endif /* Power architecture variants. */
+
+#if defined (__pyr__)
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("addw %5,%1
+ addwc %3,%0" \
+ : "=r" ((USItype)(sh)), \
+ "=&r" ((USItype)(sl)) \
+ : "%0" ((USItype)(ah)), \
+ "g" ((USItype)(bh)), \
+ "%1" ((USItype)(al)), \
+ "g" ((USItype)(bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("subw %5,%1
+ subwb %3,%0" \
+ : "=r" ((USItype)(sh)), \
+ "=&r" ((USItype)(sl)) \
+ : "0" ((USItype)(ah)), \
+ "g" ((USItype)(bh)), \
+ "1" ((USItype)(al)), \
+ "g" ((USItype)(bl)))
+/* This insn doesn't work on ancient pyramids. */
+#define umul_ppmm(w1, w0, u, v) \
+ ({union { \
+ UDItype __ll; \
+ struct {USItype __h, __l;} __i; \
+ } __xx; \
+ __xx.__i.__l = u; \
+ __asm__ ("uemul %3,%0" \
+ : "=r" (__xx.__i.__h), \
+ "=r" (__xx.__i.__l) \
+ : "1" (__xx.__i.__l), \
+ "g" ((USItype)(v))); \
+ (w1) = __xx.__i.__h; \
+ (w0) = __xx.__i.__l;})
+#endif /* __pyr__ */
+
+#if defined (__ibm032__) /* RT/ROMP */
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("a %1,%5
+ ae %0,%3" \
+ : "=r" ((USItype)(sh)), \
+ "=&r" ((USItype)(sl)) \
+ : "%0" ((USItype)(ah)), \
+ "r" ((USItype)(bh)), \
+ "%1" ((USItype)(al)), \
+ "r" ((USItype)(bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("s %1,%5
+ se %0,%3" \
+ : "=r" ((USItype)(sh)), \
+ "=&r" ((USItype)(sl)) \
+ : "0" ((USItype)(ah)), \
+ "r" ((USItype)(bh)), \
+ "1" ((USItype)(al)), \
+ "r" ((USItype)(bl)))
+#define umul_ppmm(ph, pl, m0, m1) \
+ do { \
+ USItype __m0 = (m0), __m1 = (m1); \
+ __asm__ ( \
+ "s r2,r2
+ mts r10,%2
+ m r2,%3
+ m r2,%3
+ m r2,%3
+ m r2,%3
+ m r2,%3
+ m r2,%3
+ m r2,%3
+ m r2,%3
+ m r2,%3
+ m r2,%3
+ m r2,%3
+ m r2,%3
+ m r2,%3
+ m r2,%3
+ m r2,%3
+ m r2,%3
+ cas %0,r2,r0
+ mfs r10,%1" \
+ : "=r" ((USItype)(ph)), \
+ "=r" ((USItype)(pl)) \
+ : "%r" (__m0), \
+ "r" (__m1) \
+ : "r2"); \
+ (ph) += ((((SItype) __m0 >> 31) & __m1) \
+ + (((SItype) __m1 >> 31) & __m0)); \
+ } while (0)
+#define UMUL_TIME 20
+#define UDIV_TIME 200
+#define count_leading_zeros(count, x) \
+ do { \
+ if ((x) >= 0x10000) \
+ __asm__ ("clz %0,%1" \
+ : "=r" ((USItype)(count)) \
+ : "r" ((USItype)(x) >> 16)); \
+ else \
+ { \
+ __asm__ ("clz %0,%1" \
+ : "=r" ((USItype)(count)) \
+ : "r" ((USItype)(x))); \
+ (count) += 16; \
+ } \
+ } while (0)
+#endif
+
+#if defined (__sparc__)
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("addcc %r4,%5,%1
+ addx %r2,%3,%0" \
+ : "=r" ((USItype)(sh)), \
+ "=&r" ((USItype)(sl)) \
+ : "%rJ" ((USItype)(ah)), \
+ "rI" ((USItype)(bh)), \
+ "%rJ" ((USItype)(al)), \
+ "rI" ((USItype)(bl)) \
+ __CLOBBER_CC)
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("subcc %r4,%5,%1
+ subx %r2,%3,%0" \
+ : "=r" ((USItype)(sh)), \
+ "=&r" ((USItype)(sl)) \
+ : "rJ" ((USItype)(ah)), \
+ "rI" ((USItype)(bh)), \
+ "rJ" ((USItype)(al)), \
+ "rI" ((USItype)(bl)) \
+ __CLOBBER_CC)
+#if defined (__sparc_v8__)
+#define umul_ppmm(w1, w0, u, v) \
+ __asm__ ("umul %2,%3,%1;rd %%y,%0" \
+ : "=r" ((USItype)(w1)), \
+ "=r" ((USItype)(w0)) \
+ : "r" ((USItype)(u)), \
+ "r" ((USItype)(v)))
+#define udiv_qrnnd(q, r, n1, n0, d) \
+ __asm__ ("mov %2,%%y;nop;nop;nop;udiv %3,%4,%0;umul %0,%4,%1;sub %3,%1,%1"\
+ : "=&r" ((USItype)(q)), \
+ "=&r" ((USItype)(r)) \
+ : "r" ((USItype)(n1)), \
+ "r" ((USItype)(n0)), \
+ "r" ((USItype)(d)))
+#else
+#if defined (__sparclite__)
+/* This has hardware multiply but not divide. It also has two additional
+ instructions scan (ffs from high bit) and divscc. */
+#define umul_ppmm(w1, w0, u, v) \
+ __asm__ ("umul %2,%3,%1;rd %%y,%0" \
+ : "=r" ((USItype)(w1)), \
+ "=r" ((USItype)(w0)) \
+ : "r" ((USItype)(u)), \
+ "r" ((USItype)(v)))
+#define udiv_qrnnd(q, r, n1, n0, d) \
+ __asm__ ("! Inlined udiv_qrnnd
+ wr %%g0,%2,%%y ! Not a delayed write for sparclite
+ tst %%g0
+ divscc %3,%4,%%g1
+ divscc %%g1,%4,%%g1
+ divscc %%g1,%4,%%g1
+ divscc %%g1,%4,%%g1
+ divscc %%g1,%4,%%g1
+ divscc %%g1,%4,%%g1
+ divscc %%g1,%4,%%g1
+ divscc %%g1,%4,%%g1
+ divscc %%g1,%4,%%g1
+ divscc %%g1,%4,%%g1
+ divscc %%g1,%4,%%g1
+ divscc %%g1,%4,%%g1
+ divscc %%g1,%4,%%g1
+ divscc %%g1,%4,%%g1
+ divscc %%g1,%4,%%g1
+ divscc %%g1,%4,%%g1
+ divscc %%g1,%4,%%g1
+ divscc %%g1,%4,%%g1
+ divscc %%g1,%4,%%g1
+ divscc %%g1,%4,%%g1
+ divscc %%g1,%4,%%g1
+ divscc %%g1,%4,%%g1
+ divscc %%g1,%4,%%g1
+ divscc %%g1,%4,%%g1
+ divscc %%g1,%4,%%g1
+ divscc %%g1,%4,%%g1
+ divscc %%g1,%4,%%g1
+ divscc %%g1,%4,%%g1
+ divscc %%g1,%4,%%g1
+ divscc %%g1,%4,%%g1
+ divscc %%g1,%4,%%g1
+ divscc %%g1,%4,%0
+ rd %%y,%1
+ bl,a 1f
+ add %1,%4,%1
+1: ! End of inline udiv_qrnnd" \
+ : "=r" ((USItype)(q)), \
+ "=r" ((USItype)(r)) \
+ : "r" ((USItype)(n1)), \
+ "r" ((USItype)(n0)), \
+ "rI" ((USItype)(d)) \
+ : "%g1" __AND_CLOBBER_CC)
+#define UDIV_TIME 37
+#define count_leading_zeros(count, x) \
+ __asm__ ("scan %1,0,%0" \
+ : "=r" ((USItype)(x)) \
+ : "r" ((USItype)(count)))
+#else
+/* SPARC without integer multiplication and divide instructions.
+ (i.e. at least Sun4/20,40,60,65,75,110,260,280,330,360,380,470,490) */
+#define umul_ppmm(w1, w0, u, v) \
+ __asm__ ("! Inlined umul_ppmm
+ wr %%g0,%2,%%y ! SPARC has 0-3 delay insn after a wr
+ sra %3,31,%%g2 ! Don't move this insn
+ and %2,%%g2,%%g2 ! Don't move this insn
+ andcc %%g0,0,%%g1 ! Don't move this insn
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,%3,%%g1
+ mulscc %%g1,0,%%g1
+ add %%g1,%%g2,%0
+ rd %%y,%1" \
+ : "=r" ((USItype)(w1)), \
+ "=r" ((USItype)(w0)) \
+ : "%rI" ((USItype)(u)), \
+ "r" ((USItype)(v)) \
+ : "%g1", "%g2" __AND_CLOBBER_CC)
+#define UMUL_TIME 39 /* 39 instructions */
+/* It's quite necessary to add this much assembler for the sparc.
+ The default udiv_qrnnd (in C) is more than 10 times slower! */
+#define udiv_qrnnd(q, r, n1, n0, d) \
+ __asm__ ("! Inlined udiv_qrnnd
+ mov 32,%%g1
+ subcc %1,%2,%%g0
+1: bcs 5f
+ addxcc %0,%0,%0 ! shift n1n0 and a q-bit in lsb
+ sub %1,%2,%1 ! this kills msb of n
+ addx %1,%1,%1 ! so this can't give carry
+ subcc %%g1,1,%%g1
+2: bne 1b
+ subcc %1,%2,%%g0
+ bcs 3f
+ addxcc %0,%0,%0 ! shift n1n0 and a q-bit in lsb
+ b 3f
+ sub %1,%2,%1 ! this kills msb of n
+4: sub %1,%2,%1
+5: addxcc %1,%1,%1
+ bcc 2b
+ subcc %%g1,1,%%g1
+! Got carry from n. Subtract next step to cancel this carry.
+ bne 4b
+ addcc %0,%0,%0 ! shift n1n0 and a 0-bit in lsb
+ sub %1,%2,%1
+3: xnor %0,0,%0
+ ! End of inline udiv_qrnnd" \
+ : "=&r" ((USItype)(q)), \
+ "=&r" ((USItype)(r)) \
+ : "r" ((USItype)(d)), \
+ "1" ((USItype)(n1)), \
+ "0" ((USItype)(n0)) : "%g1" __AND_CLOBBER_CC)
+#define UDIV_TIME (3+7*32) /* 7 instructions/iteration. 32 iterations. */
+#endif /* __sparclite__ */
+#endif /* __sparc_v8__ */
+#endif /* __sparc__ */
+
+#if defined (__vax__)
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("addl2 %5,%1
+ adwc %3,%0" \
+ : "=g" ((USItype)(sh)), \
+ "=&g" ((USItype)(sl)) \
+ : "%0" ((USItype)(ah)), \
+ "g" ((USItype)(bh)), \
+ "%1" ((USItype)(al)), \
+ "g" ((USItype)(bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("subl2 %5,%1
+ sbwc %3,%0" \
+ : "=g" ((USItype)(sh)), \
+ "=&g" ((USItype)(sl)) \
+ : "0" ((USItype)(ah)), \
+ "g" ((USItype)(bh)), \
+ "1" ((USItype)(al)), \
+ "g" ((USItype)(bl)))
+#define umul_ppmm(xh, xl, m0, m1) \
+ do { \
+ union { \
+ UDItype __ll; \
+ struct {USItype __l, __h;} __i; \
+ } __xx; \
+ USItype __m0 = (m0), __m1 = (m1); \
+ __asm__ ("emul %1,%2,$0,%0" \
+ : "=r" (__xx.__ll) \
+ : "g" (__m0), \
+ "g" (__m1)); \
+ (xh) = __xx.__i.__h; \
+ (xl) = __xx.__i.__l; \
+ (xh) += ((((SItype) __m0 >> 31) & __m1) \
+ + (((SItype) __m1 >> 31) & __m0)); \
+ } while (0)
+#define sdiv_qrnnd(q, r, n1, n0, d) \
+ do { \
+ union {DItype __ll; \
+ struct {SItype __l, __h;} __i; \
+ } __xx; \
+ __xx.__i.__h = n1; __xx.__i.__l = n0; \
+ __asm__ ("ediv %3,%2,%0,%1" \
+ : "=g" (q), "=g" (r) \
+ : "g" (__xx.__ll), "g" (d)); \
+ } while (0)
+#endif /* __vax__ */
+
+#endif /* __GNUC__ */
+
+/* If this machine has no inline assembler, use C macros. */
+
+#if !defined (add_ssaaaa)
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ do { \
+ USItype __x; \
+ __x = (al) + (bl); \
+ (sh) = (ah) + (bh) + (__x < (al)); \
+ (sl) = __x; \
+ } while (0)
+#endif
+
+#if !defined (sub_ddmmss)
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ do { \
+ USItype __x; \
+ __x = (al) - (bl); \
+ (sh) = (ah) - (bh) - (__x > (al)); \
+ (sl) = __x; \
+ } while (0)
+#endif
+
+#if !defined (umul_ppmm)
+#define umul_ppmm(w1, w0, u, v) \
+ do { \
+ USItype __x0, __x1, __x2, __x3; \
+ USItype __ul, __vl, __uh, __vh; \
+ \
+ __ul = __ll_lowpart (u); \
+ __uh = __ll_highpart (u); \
+ __vl = __ll_lowpart (v); \
+ __vh = __ll_highpart (v); \
+ \
+ __x0 = (USItype) __ul * __vl; \
+ __x1 = (USItype) __ul * __vh; \
+ __x2 = (USItype) __uh * __vl; \
+ __x3 = (USItype) __uh * __vh; \
+ \
+ __x1 += __ll_highpart (__x0);/* this can't give carry */ \
+ __x1 += __x2; /* but this indeed can */ \
+ if (__x1 < __x2) /* did we get it? */ \
+ __x3 += __ll_B; /* yes, add it in the proper pos. */ \
+ \
+ (w1) = __x3 + __ll_highpart (__x1); \
+ (w0) = __ll_lowpart (__x1) * __ll_B + __ll_lowpart (__x0); \
+ } while (0)
+#endif
+
+#if !defined (__umulsidi3)
+#define __umulsidi3(u, v) \
+ ({DIunion __w; \
+ umul_ppmm (__w.s.high, __w.s.low, u, v); \
+ __w.ll; })
+#endif
+
+/* Define this unconditionally, so it can be used for debugging. */
+#define __udiv_qrnnd_c(q, r, n1, n0, d) \
+ do { \
+ USItype __d1, __d0, __q1, __q0; \
+ USItype __r1, __r0, __m; \
+ __d1 = __ll_highpart (d); \
+ __d0 = __ll_lowpart (d); \
+ \
+ __r1 = (n1) % __d1; \
+ __q1 = (n1) / __d1; \
+ __m = (USItype) __q1 * __d0; \
+ __r1 = __r1 * __ll_B | __ll_highpart (n0); \
+ if (__r1 < __m) \
+ { \
+ __q1--, __r1 += (d); \
+ if (__r1 >= (d)) /* i.e. we didn't get carry when adding to __r1 */\
+ if (__r1 < __m) \
+ __q1--, __r1 += (d); \
+ } \
+ __r1 -= __m; \
+ \
+ __r0 = __r1 % __d1; \
+ __q0 = __r1 / __d1; \
+ __m = (USItype) __q0 * __d0; \
+ __r0 = __r0 * __ll_B | __ll_lowpart (n0); \
+ if (__r0 < __m) \
+ { \
+ __q0--, __r0 += (d); \
+ if (__r0 >= (d)) \
+ if (__r0 < __m) \
+ __q0--, __r0 += (d); \
+ } \
+ __r0 -= __m; \
+ \
+ (q) = (USItype) __q1 * __ll_B | __q0; \
+ (r) = __r0; \
+ } while (0)
+
+/* If the processor has no udiv_qrnnd but sdiv_qrnnd, go through
+ __udiv_w_sdiv (defined in libgcc or elsewhere). */
+#if !defined (udiv_qrnnd) && defined (sdiv_qrnnd)
+#define udiv_qrnnd(q, r, nh, nl, d) \
+ do { \
+ USItype __r; \
+ (q) = __udiv_w_sdiv (&__r, nh, nl, d); \
+ (r) = __r; \
+ } while (0)
+#endif
+
+/* If udiv_qrnnd was not defined for this processor, use __udiv_qrnnd_c. */
+#if !defined (udiv_qrnnd)
+#define UDIV_NEEDS_NORMALIZATION 1
+#define udiv_qrnnd __udiv_qrnnd_c
+#endif
+
+#if !defined (count_leading_zeros)
+extern const UQItype __clz_tab[];
+#define count_leading_zeros(count, x) \
+ do { \
+ USItype __xr = (x); \
+ USItype __a; \
+ \
+ if (SI_TYPE_SIZE <= 32) \
+ { \
+ __a = __xr < (1<<2*__BITS4) \
+ ? (__xr < (1<<__BITS4) ? 0 : __BITS4) \
+ : (__xr < (1<<3*__BITS4) ? 2*__BITS4 : 3*__BITS4); \
+ } \
+ else \
+ { \
+ for (__a = SI_TYPE_SIZE - 8; __a > 0; __a -= 8) \
+ if (((__xr >> __a) & 0xff) != 0) \
+ break; \
+ } \
+ \
+ (count) = SI_TYPE_SIZE - (__clz_tab[__xr >> __a] + __a); \
+ } while (0)
+#endif
+
+#ifndef UDIV_NEEDS_NORMALIZATION
+#define UDIV_NEEDS_NORMALIZATION 0
+#endif
diff --git a/gnu/usr.bin/cc/include/loop.h b/gnu/usr.bin/cc/include/loop.h
new file mode 100644
index 0000000..bb219c3
--- /dev/null
+++ b/gnu/usr.bin/cc/include/loop.h
@@ -0,0 +1,169 @@
+/* Loop optimization definitions for GNU C-Compiler
+ Copyright (C) 1991 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+/* Get the luid of an insn. Catch the error of trying to reference the LUID
+ of an insn added during loop, since these don't have LUIDs. */
+
+#define INSN_LUID(INSN) \
+ (INSN_UID (INSN) < max_uid_for_loop ? uid_luid[INSN_UID (INSN)] \
+ : (abort (), -1))
+
+/* A "basic induction variable" or biv is a pseudo reg that is set
+ (within this loop) only by incrementing or decrementing it. */
+/* A "general induction variable" or giv is a pseudo reg whose
+ value is a linear function of a biv. */
+
+/* Bivs are recognized by `basic_induction_var';
+ Givs by `general_induct_var'. */
+
+/* An enum for the two different types of givs, those that are used
+ as memory addresses and those that are calculated into registers. */
+enum g_types { DEST_ADDR, DEST_REG };
+
+/* A `struct induction' is created for every instruction that sets
+ an induction variable (either a biv or a giv). */
+
+struct induction
+{
+ rtx insn; /* The insn that sets a biv or giv */
+ rtx new_reg; /* New register, containing strength reduced
+ version of this giv. */
+ rtx src_reg; /* Biv from which this giv is computed.
+ (If this is a biv, then this is the biv.) */
+ enum g_types giv_type; /* Indicate whether DEST_ADDR or DEST_REG */
+ rtx dest_reg; /* Destination register for insn: this is the
+ register which was the biv or giv.
+ For a biv, this equals src_reg.
+ For a DEST_ADDR type giv, this is 0. */
+ rtx *location; /* Place in the insn where this giv occurs.
+ If GIV_TYPE is DEST_REG, this is 0. */
+ enum machine_mode mode; /* The mode of this biv or giv */
+ enum machine_mode mem_mode; /* For DEST_ADDR, mode of the memory object. */
+ rtx mult_val; /* Multiplicative factor for src_reg. */
+ rtx add_val; /* Additive constant for that product. */
+ int benefit; /* Gain from eliminating this insn. */
+ rtx final_value; /* If the giv is used outside the loop, and its
+ final value could be calculated, it is put
+ here, and the giv is made replaceable. Set
+ the giv to this value before the loop. */
+ unsigned replaceable : 1; /* 1 if we can substitute the strength-reduced
+ variable for the original variable.
+ 0 means they must be kept separate and the
+ new one must be copied into the old pseudo
+ reg each time the old one is set. */
+ unsigned not_replaceable : 1; /* Used to prevent duplicating work. This is
+ 1 if we know that the giv definitely can
+ not be made replaceable, in which case we
+ don't bother checking the variable again
+ even if further info is available.
+ Both this and the above can be zero. */
+ unsigned ignore : 1; /* 1 prohibits further processing of giv */
+ unsigned always_computable : 1;/* 1 if this set occurs each iteration */
+ unsigned maybe_multiple : 1; /* Only used for a biv and 1 if this biv
+ update may be done multiple times per
+ iteration. */
+ unsigned cant_derive : 1; /* For giv's, 1 if this giv cannot derive
+ another giv. This occurs in many cases
+ where a giv's lifetime spans an update to
+ a biv. */
+ unsigned combined_with : 1; /* 1 if this giv has been combined with. It
+ then cannot combine with any other giv. */
+ unsigned maybe_dead : 1; /* 1 if this giv might be dead. In that case,
+ we won't use it to eliminate a biv, it
+ would probably lose. */
+ int lifetime; /* Length of life of this giv */
+ int times_used; /* # times this giv is used. */
+ rtx derive_adjustment; /* If nonzero, is an adjustment to be
+ subtracted from add_val when this giv
+ derives another. This occurs when the
+ giv spans a biv update by incrementation. */
+ struct induction *next_iv; /* For givs, links together all givs that are
+ based on the same biv. For bivs, links
+ together all biv entries that refer to the
+ same biv register. */
+ struct induction *same; /* If this giv has been combined with another
+ giv, this points to the base giv. The base
+ giv will have COMBINED_WITH non-zero. */
+ HOST_WIDE_INT const_adjust; /* Used by loop unrolling, when an address giv
+ is split, and a constant is eliminated from
+ the address, the -constant is stored here
+ for later use. */
+};
+
+/* A `struct iv_class' is created for each biv. */
+
+struct iv_class {
+ int regno; /* Pseudo reg which is the biv. */
+ int biv_count; /* Number of insns setting this reg. */
+ struct induction *biv; /* List of all insns that set this reg. */
+ int giv_count; /* Number of DEST_REG givs computed from this
+ biv. The resulting count is only used in
+ check_dbra_loop. */
+ struct induction *giv; /* List of all insns that compute a giv
+ from this reg. */
+ int total_benefit; /* Sum of BENEFITs of all those givs */
+ rtx initial_value; /* Value of reg at loop start */
+ rtx initial_test; /* Test performed on BIV before loop */
+ struct iv_class *next; /* Links all class structures together */
+ rtx init_insn; /* insn which initializes biv, 0 if none. */
+ rtx init_set; /* SET of INIT_INSN, if any. */
+ unsigned incremented : 1; /* 1 if somewhere incremented/decremented */
+ unsigned eliminable : 1; /* 1 if plausible candidate for elimination. */
+ unsigned nonneg : 1; /* 1 if we added a REG_NONNEG note for this. */
+ unsigned reversed : 1; /* 1 if we reversed the loop that this
+ biv controls. */
+};
+
+/* Definitions used by the basic induction variable discovery code. */
+enum iv_mode { UNKNOWN_INDUCT, BASIC_INDUCT, NOT_BASIC_INDUCT,
+ GENERAL_INDUCT };
+
+/* Variables declared in loop.c, but also needed in unroll.c. */
+
+extern int *uid_luid;
+extern int max_uid_for_loop;
+extern int *uid_loop_num;
+extern int *loop_outer_loop;
+extern rtx *loop_number_exit_labels;
+extern unsigned HOST_WIDE_INT loop_n_iterations;
+extern int max_reg_before_loop;
+
+extern FILE *loop_dump_stream;
+
+extern enum iv_mode *reg_iv_type;
+extern struct induction **reg_iv_info;
+extern struct iv_class **reg_biv_class;
+extern struct iv_class *loop_iv_list;
+
+/* Forward declarations for non-static functions declared in loop.c and
+ unroll.c. */
+int invariant_p PROTO((rtx));
+rtx get_condition_for_loop PROTO((rtx));
+void emit_iv_add_mult PROTO((rtx, rtx, rtx, rtx, rtx));
+
+/* Forward declarations for non-static functions declared in stmt.c. */
+void find_loop_tree_blocks PROTO((void));
+void unroll_block_trees PROTO((void));
+
+void unroll_loop PROTO((rtx, int, rtx, rtx, int));
+rtx biv_total_increment PROTO((struct iv_class *, rtx, rtx));
+unsigned HOST_WIDE_INT loop_iterations PROTO((rtx, rtx));
+rtx final_biv_value PROTO((struct iv_class *, rtx, rtx));
+rtx final_giv_value PROTO((struct induction *, rtx, rtx));
+void emit_unrolled_add PROTO((rtx, rtx, rtx));
diff --git a/gnu/usr.bin/cc/include/machmode.def b/gnu/usr.bin/cc/include/machmode.def
new file mode 100644
index 0000000..24d0ba5
--- /dev/null
+++ b/gnu/usr.bin/cc/include/machmode.def
@@ -0,0 +1,118 @@
+/* This file contains the definitions and documentation for the
+ machine modes used in the the GNU compiler.
+ Copyright (C) 1987, 1992, 1994 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+/* This file defines all the MACHINE MODES used by GNU CC.
+
+ A machine mode specifies a size and format of data
+ at the machine level.
+
+ Each RTL expression has a machine mode.
+
+ At the syntax tree level, each ..._TYPE and each ..._DECL node
+ has a machine mode which describes data of that type or the
+ data of the variable declared. */
+
+/* The first argument is the internal name of the machine mode
+ used in the C source.
+ By convention these are in UPPER_CASE, except for the word "mode".
+
+ The second argument is the name of the machine mode in the
+ external ASCII format used for reading and printing RTL and trees.
+ By convention these names in UPPER_CASE.
+
+ Third argument states the kind of representation:
+ MODE_INT - integer
+ MODE_FLOAT - floating
+ MODE_PARTIAL_INT - PSImode and PDImode
+ MODE_CC - modes used for representing the condition code in a register
+ MODE_COMPLEX_INT, MODE_COMPLEX_FLOAT - complex number
+ MODE_RANDOM - anything else
+
+ Fourth argument is the relative size of the object, in bytes.
+ It is zero when the size is meaningless or not determined.
+ A byte's size is determined by BITS_PER_UNIT in tm.h.
+
+
+ Fifth arg is the relative size of subunits of the object.
+ It is same as the fourth argument except for complexes,
+ since they are really made of two equal size subunits.
+
+ Sixth arg is next wider natural mode of the same class.
+ 0 if there is none. */
+
+/* VOIDmode is used when no mode needs to be specified,
+ as for example on CONST_INT RTL expressions. */
+DEF_MACHMODE (VOIDmode, "VOID", MODE_RANDOM, 0, 0, VOIDmode)
+
+DEF_MACHMODE (QImode, "QI", MODE_INT, 1, 1, HImode) /* int types */
+DEF_MACHMODE (HImode, "HI", MODE_INT, 2, 2, SImode)
+/* Pointers on some machines use this type to distinguish them from ints.
+ Useful if a pointer is 4 bytes but has some bits that are not significant,
+ so it is really not quite as wide as an integer. */
+DEF_MACHMODE (PSImode, "PSI", MODE_PARTIAL_INT, 4, 4, VOIDmode)
+DEF_MACHMODE (SImode, "SI", MODE_INT, 4, 4, DImode)
+DEF_MACHMODE (PDImode, "PDI", MODE_PARTIAL_INT, 8, 8, VOIDmode)
+DEF_MACHMODE (DImode, "DI", MODE_INT, 8, 8, TImode)
+DEF_MACHMODE (TImode, "TI", MODE_INT, 16, 16, OImode)
+DEF_MACHMODE (OImode, "OI", MODE_INT, 32, 32, VOIDmode)
+
+DEF_MACHMODE (QFmode, "QF", MODE_FLOAT, 1, 1, HFmode)
+DEF_MACHMODE (HFmode, "HF", MODE_FLOAT, 2, 2, TQFmode)
+DEF_MACHMODE (TQFmode, "TQF", MODE_FLOAT, 3, 3, SFmode) /* MIL-STD-1750A */
+DEF_MACHMODE (SFmode, "SF", MODE_FLOAT, 4, 4, DFmode)
+DEF_MACHMODE (DFmode, "DF", MODE_FLOAT, 8, 8, XFmode)
+DEF_MACHMODE (XFmode, "XF", MODE_FLOAT, 12, 12, TFmode) /* IEEE extended */
+DEF_MACHMODE (TFmode, "TF", MODE_FLOAT, 16, 16, VOIDmode)
+
+/* Complex modes. */
+DEF_MACHMODE (SCmode, "SC", MODE_COMPLEX_FLOAT, 8, 4, DCmode)
+DEF_MACHMODE (DCmode, "DC", MODE_COMPLEX_FLOAT, 16, 8, XCmode)
+DEF_MACHMODE (XCmode, "XC", MODE_COMPLEX_FLOAT, 24, 12, TCmode)
+DEF_MACHMODE (TCmode, "TC", MODE_COMPLEX_FLOAT, 32, 16, VOIDmode)
+
+DEF_MACHMODE (CQImode, "CQI", MODE_COMPLEX_INT, 2, 1, CHImode)
+DEF_MACHMODE (CHImode, "CHI", MODE_COMPLEX_INT, 4, 2, CSImode)
+DEF_MACHMODE (CSImode, "CSI", MODE_COMPLEX_INT, 8, 4, CDImode)
+DEF_MACHMODE (CDImode, "CDI", MODE_COMPLEX_INT, 16, 8, CTImode)
+DEF_MACHMODE (CTImode, "CTI", MODE_COMPLEX_INT, 32, 16, COImode)
+DEF_MACHMODE (COImode, "COI", MODE_COMPLEX_INT, 64, 32, VOIDmode)
+
+/* BLKmode is used for structures, arrays, etc.
+ that fit no more specific mode. */
+DEF_MACHMODE (BLKmode, "BLK", MODE_RANDOM, 0, 0, VOIDmode)
+
+/* The modes for representing the condition codes come last. CCmode is
+ always defined. Additional modes for the condition code can be specified
+ in the EXTRA_CC_MODES macro. Everything but the names of the modes
+ are copied from CCmode. For these modes, GET_MODE_WIDER_MODE points
+ to the next defined CC mode, if any. */
+
+DEF_MACHMODE (CCmode, "CC", MODE_CC, 4, 4, VOIDmode)
+
+/* The symbol Pmode stands for one of the above machine modes (usually SImode).
+ The tm file specifies which one. It is not a distinct mode. */
+
+/*
+Local variables:
+mode:c
+version-control: t
+End:
+*/
diff --git a/gnu/usr.bin/cc/include/machmode.h b/gnu/usr.bin/cc/include/machmode.h
new file mode 100644
index 0000000..307422b
--- /dev/null
+++ b/gnu/usr.bin/cc/include/machmode.h
@@ -0,0 +1,169 @@
+/* Machine mode definitions for GNU C-Compiler; included by rtl.h and tree.h.
+ Copyright (C) 1991, 1993 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+/* Add prototype support. */
+#ifndef PROTO
+#if defined (USE_PROTOTYPES) ? USE_PROTOTYPES : defined (__STDC__)
+#define PROTO(ARGS) ARGS
+#else
+#define PROTO(ARGS) ()
+#endif
+#endif
+
+#ifndef HAVE_MACHINE_MODES
+
+/* Strictly speaking, this isn't the proper place to include these definitions,
+ but this file is included by every GCC file.
+
+ Some systems define these in, e.g., param.h. We undefine these names
+ here to avoid the warnings. We prefer to use our definitions since we
+ know they are correct. */
+
+#undef MIN
+#undef MAX
+
+#define MIN(X,Y) ((X) < (Y) ? (X) : (Y))
+#define MAX(X,Y) ((X) > (Y) ? (X) : (Y))
+
+/* Find the largest host integer type and set its size and type. */
+
+#ifndef HOST_BITS_PER_WIDE_INT
+
+#if HOST_BITS_PER_LONG > HOST_BITS_PER_INT
+#define HOST_BITS_PER_WIDE_INT HOST_BITS_PER_LONG
+#define HOST_WIDE_INT long
+#else
+#define HOST_BITS_PER_WIDE_INT HOST_BITS_PER_INT
+#define HOST_WIDE_INT int
+#endif
+
+#endif
+
+/* Provide a default way to print an address in hex via printf. */
+
+#ifndef HOST_PTR_PRINTF
+#define HOST_PTR_PRINTF sizeof (int) == sizeof (char *) ? "%x" : "%lx"
+#endif
+
+/* Make an enum class that gives all the machine modes. */
+
+#define DEF_MACHMODE(SYM, NAME, TYPE, SIZE, UNIT, WIDER) SYM,
+
+enum machine_mode {
+#include "machmode.def"
+
+#ifdef EXTRA_CC_MODES
+ EXTRA_CC_MODES,
+#endif
+MAX_MACHINE_MODE };
+
+#undef DEF_MACHMODE
+
+#define HAVE_MACHINE_MODES
+
+#ifndef NUM_MACHINE_MODES
+#define NUM_MACHINE_MODES (int) MAX_MACHINE_MODE
+#endif
+
+/* Get the name of mode MODE as a string. */
+
+extern char *mode_name[];
+#define GET_MODE_NAME(MODE) (mode_name[(int)(MODE)])
+
+enum mode_class { MODE_RANDOM, MODE_INT, MODE_FLOAT, MODE_PARTIAL_INT, MODE_CC,
+ MODE_COMPLEX_INT, MODE_COMPLEX_FLOAT, MAX_MODE_CLASS};
+
+/* Get the general kind of object that mode MODE represents
+ (integer, floating, complex, etc.) */
+
+extern enum mode_class mode_class[];
+#define GET_MODE_CLASS(MODE) (mode_class[(int)(MODE)])
+
+/* Nonzero if MODE is an integral mode. */
+#define INTEGRAL_MODE_P(MODE) \
+ (GET_MODE_CLASS (MODE) == MODE_INT \
+ || GET_MODE_CLASS (MODE) == MODE_PARTIAL_INT \
+ || GET_MODE_CLASS (MODE) == MODE_COMPLEX_INT)
+
+/* Nonzero if MODE is a floating-point mode. */
+#define FLOAT_MODE_P(MODE) \
+ (GET_MODE_CLASS (MODE) == MODE_FLOAT \
+ || GET_MODE_CLASS (MODE) == MODE_COMPLEX_FLOAT)
+
+/* Get the size in bytes of an object of mode MODE. */
+
+extern int mode_size[];
+#define GET_MODE_SIZE(MODE) (mode_size[(int)(MODE)])
+
+/* Get the size in bytes of the basic parts of an object of mode MODE. */
+
+extern int mode_unit_size[];
+#define GET_MODE_UNIT_SIZE(MODE) (mode_unit_size[(int)(MODE)])
+
+/* Get the number of units in the object. */
+
+#define GET_MODE_NUNITS(MODE) \
+ ((GET_MODE_UNIT_SIZE ((MODE)) == 0) ? 0 \
+ : (GET_MODE_SIZE ((MODE)) / GET_MODE_UNIT_SIZE ((MODE))))
+
+/* Get the size in bits of an object of mode MODE. */
+
+#define GET_MODE_BITSIZE(MODE) (BITS_PER_UNIT * mode_size[(int)(MODE)])
+
+/* Get a bitmask containing 1 for all bits in a word
+ that fit within mode MODE. */
+
+#define GET_MODE_MASK(MODE) \
+ ((GET_MODE_BITSIZE (MODE) >= HOST_BITS_PER_WIDE_INT) \
+ ?(HOST_WIDE_INT) ~0 : (((HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (MODE)) - 1))
+
+/* Get the next wider natural mode (eg, QI -> HI -> SI -> DI -> TI). */
+
+extern enum machine_mode mode_wider_mode[];
+#define GET_MODE_WIDER_MODE(MODE) (mode_wider_mode[(int)(MODE)])
+
+/* Return the mode for data of a given size SIZE and mode class CLASS.
+ If LIMIT is nonzero, then don't use modes bigger than MAX_FIXED_MODE_SIZE.
+ The value is BLKmode if no other mode is found. */
+
+extern enum machine_mode mode_for_size PROTO((unsigned int, enum mode_class, int));
+
+/* Find the best mode to use to access a bit field. */
+
+extern enum machine_mode get_best_mode PROTO((int, int, int, enum machine_mode, int));
+
+/* Determine alignment, 1<=result<=BIGGEST_ALIGNMENT. */
+
+#define GET_MODE_ALIGNMENT(MODE) \
+ MIN (BIGGEST_ALIGNMENT, \
+ MAX (1, (GET_MODE_UNIT_SIZE (MODE) * BITS_PER_UNIT)))
+
+/* For each class, get the narrowest mode in that class. */
+
+extern enum machine_mode class_narrowest_mode[];
+#define GET_CLASS_NARROWEST_MODE(CLASS) class_narrowest_mode[(int)(CLASS)]
+
+/* Define the integer modes whose sizes are BITS_PER_UNIT
+ and BITS_PER_WORD. */
+
+extern enum machine_mode byte_mode;
+extern enum machine_mode word_mode;
+
+#endif /* not HAVE_MACHINE_MODES */
diff --git a/gnu/usr.bin/cc/include/modemap.def b/gnu/usr.bin/cc/include/modemap.def
new file mode 100644
index 0000000..3257640
--- /dev/null
+++ b/gnu/usr.bin/cc/include/modemap.def
@@ -0,0 +1,30 @@
+/* Bytecode specific machine mode info for GNU C-compiler.
+ Copyright (C) 1993 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+/* Map mode to signed, unsigned typecodes, bytecode to push const,
+ to load, to store */
+DEF_MODEMAP(QImode, QIcode, QUcode, constQI, loadQI, storeQI)
+DEF_MODEMAP(HImode, HIcode, HUcode, constHI, loadHI, storeHI)
+DEF_MODEMAP(VOIDmode, SIcode, SUcode, constSI, loadSI, storeSI)
+DEF_MODEMAP(SImode, SIcode, SUcode, constSI, loadSI, storeSI)
+DEF_MODEMAP(DImode, DIcode, DUcode, constDI, loadDI, storeDI)
+DEF_MODEMAP(PSImode, Pcode, Pcode, constP, loadP, storeP)
+DEF_MODEMAP(BLKmode, Pcode, Pcode, constP, loadP, neverneverland)
+DEF_MODEMAP(SFmode, SFcode, SFcode, constSF, loadSF, storeSF)
+DEF_MODEMAP(DFmode, DFcode, DFcode, constDF, loadDF, storeDF)
diff --git a/gnu/usr.bin/cc/include/multilib.h b/gnu/usr.bin/cc/include/multilib.h
new file mode 100644
index 0000000..b2a5790
--- /dev/null
+++ b/gnu/usr.bin/cc/include/multilib.h
@@ -0,0 +1,3 @@
+#define MULTILIB_SELECT "\
+. ;\
+"
diff --git a/gnu/usr.bin/cc/include/obstack.h b/gnu/usr.bin/cc/include/obstack.h
new file mode 100644
index 0000000..0176719
--- /dev/null
+++ b/gnu/usr.bin/cc/include/obstack.h
@@ -0,0 +1,513 @@
+/* obstack.h - object stack macros
+ Copyright (C) 1988, 89, 90, 91, 92, 93, 94 Free Software Foundation, Inc.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 2, or (at your option) any
+later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+/* Summary:
+
+All the apparent functions defined here are macros. The idea
+is that you would use these pre-tested macros to solve a
+very specific set of problems, and they would run fast.
+Caution: no side-effects in arguments please!! They may be
+evaluated MANY times!!
+
+These macros operate a stack of objects. Each object starts life
+small, and may grow to maturity. (Consider building a word syllable
+by syllable.) An object can move while it is growing. Once it has
+been "finished" it never changes address again. So the "top of the
+stack" is typically an immature growing object, while the rest of the
+stack is of mature, fixed size and fixed address objects.
+
+These routines grab large chunks of memory, using a function you
+supply, called `obstack_chunk_alloc'. On occasion, they free chunks,
+by calling `obstack_chunk_free'. You must define them and declare
+them before using any obstack macros.
+
+Each independent stack is represented by a `struct obstack'.
+Each of the obstack macros expects a pointer to such a structure
+as the first argument.
+
+One motivation for this package is the problem of growing char strings
+in symbol tables. Unless you are "fascist pig with a read-only mind"
+--Gosper's immortal quote from HAKMEM item 154, out of context--you
+would not like to put any arbitrary upper limit on the length of your
+symbols.
+
+In practice this often means you will build many short symbols and a
+few long symbols. At the time you are reading a symbol you don't know
+how long it is. One traditional method is to read a symbol into a
+buffer, realloc()ating the buffer every time you try to read a symbol
+that is longer than the buffer. This is beaut, but you still will
+want to copy the symbol from the buffer to a more permanent
+symbol-table entry say about half the time.
+
+With obstacks, you can work differently. Use one obstack for all symbol
+names. As you read a symbol, grow the name in the obstack gradually.
+When the name is complete, finalize it. Then, if the symbol exists already,
+free the newly read name.
+
+The way we do this is to take a large chunk, allocating memory from
+low addresses. When you want to build a symbol in the chunk you just
+add chars above the current "high water mark" in the chunk. When you
+have finished adding chars, because you got to the end of the symbol,
+you know how long the chars are, and you can create a new object.
+Mostly the chars will not burst over the highest address of the chunk,
+because you would typically expect a chunk to be (say) 100 times as
+long as an average object.
+
+In case that isn't clear, when we have enough chars to make up
+the object, THEY ARE ALREADY CONTIGUOUS IN THE CHUNK (guaranteed)
+so we just point to it where it lies. No moving of chars is
+needed and this is the second win: potentially long strings need
+never be explicitly shuffled. Once an object is formed, it does not
+change its address during its lifetime.
+
+When the chars burst over a chunk boundary, we allocate a larger
+chunk, and then copy the partly formed object from the end of the old
+chunk to the beginning of the new larger chunk. We then carry on
+accreting characters to the end of the object as we normally would.
+
+A special macro is provided to add a single char at a time to a
+growing object. This allows the use of register variables, which
+break the ordinary 'growth' macro.
+
+Summary:
+ We allocate large chunks.
+ We carve out one object at a time from the current chunk.
+ Once carved, an object never moves.
+ We are free to append data of any size to the currently
+ growing object.
+ Exactly one object is growing in an obstack at any one time.
+ You can run one obstack per control block.
+ You may have as many control blocks as you dare.
+ Because of the way we do it, you can `unwind' an obstack
+ back to a previous state. (You may remove objects much
+ as you would with a stack.)
+*/
+
+
+/* Don't do the contents of this file more than once. */
+
+#ifndef __OBSTACK_H__
+#define __OBSTACK_H__
+
+/* We use subtraction of (char *)0 instead of casting to int
+ because on word-addressable machines a simple cast to int
+ may ignore the byte-within-word field of the pointer. */
+
+#ifndef __PTR_TO_INT
+#define __PTR_TO_INT(P) ((P) - (char *)0)
+#endif
+
+#ifndef __INT_TO_PTR
+#define __INT_TO_PTR(P) ((P) + (char *)0)
+#endif
+
+/* We need the type of the resulting object. In ANSI C it is ptrdiff_t
+ but in traditional C it is usually long. If we are in ANSI C and
+ don't already have ptrdiff_t get it. */
+
+#if defined (__STDC__) && ! defined (offsetof)
+#if defined (__GNUC__) && defined (IN_GCC)
+/* On Next machine, the system's stddef.h screws up if included
+ after we have defined just ptrdiff_t, so include all of stddef.h.
+ Otherwise, define just ptrdiff_t, which is all we need. */
+#ifndef __NeXT__
+#define __need_ptrdiff_t
+#endif
+#endif
+
+#include <stddef.h>
+#endif
+
+#ifdef __STDC__
+#define PTR_INT_TYPE ptrdiff_t
+#else
+#define PTR_INT_TYPE long
+#endif
+
+struct _obstack_chunk /* Lives at front of each chunk. */
+{
+ char *limit; /* 1 past end of this chunk */
+ struct _obstack_chunk *prev; /* address of prior chunk or NULL */
+ char contents[4]; /* objects begin here */
+};
+
+struct obstack /* control current object in current chunk */
+{
+ long chunk_size; /* preferred size to allocate chunks in */
+ struct _obstack_chunk* chunk; /* address of current struct obstack_chunk */
+ char *object_base; /* address of object we are building */
+ char *next_free; /* where to add next char to current object */
+ char *chunk_limit; /* address of char after current chunk */
+ PTR_INT_TYPE temp; /* Temporary for some macros. */
+ int alignment_mask; /* Mask of alignment for each object. */
+ struct _obstack_chunk *(*chunkfun) (); /* User's fcn to allocate a chunk. */
+ void (*freefun) (); /* User's function to free a chunk. */
+ char *extra_arg; /* first arg for chunk alloc/dealloc funcs */
+ unsigned use_extra_arg:1; /* chunk alloc/dealloc funcs take extra arg */
+ unsigned maybe_empty_object:1;/* There is a possibility that the current
+ chunk contains a zero-length object. This
+ prevents freeing the chunk if we allocate
+ a bigger chunk to replace it. */
+ unsigned alloc_failed:1; /* chunk alloc func returned 0 */
+};
+
+/* Declare the external functions we use; they are in obstack.c. */
+
+#ifdef __STDC__
+extern void _obstack_newchunk (struct obstack *, int);
+extern void _obstack_free (struct obstack *, void *);
+extern int _obstack_begin (struct obstack *, int, int,
+ void *(*) (), void (*) ());
+extern int _obstack_begin_1 (struct obstack *, int, int,
+ void *(*) (), void (*) (), void *);
+#else
+extern void _obstack_newchunk ();
+extern void _obstack_free ();
+extern int _obstack_begin ();
+extern int _obstack_begin_1 ();
+#endif
+
+#ifdef __STDC__
+
+/* Do the function-declarations after the structs
+ but before defining the macros. */
+
+void obstack_init (struct obstack *obstack);
+
+void * obstack_alloc (struct obstack *obstack, int size);
+
+void * obstack_copy (struct obstack *obstack, void *address, int size);
+void * obstack_copy0 (struct obstack *obstack, void *address, int size);
+
+void obstack_free (struct obstack *obstack, void *block);
+
+void obstack_blank (struct obstack *obstack, int size);
+
+void obstack_grow (struct obstack *obstack, void *data, int size);
+void obstack_grow0 (struct obstack *obstack, void *data, int size);
+
+void obstack_1grow (struct obstack *obstack, int data_char);
+void obstack_ptr_grow (struct obstack *obstack, void *data);
+void obstack_int_grow (struct obstack *obstack, int data);
+
+void * obstack_finish (struct obstack *obstack);
+
+int obstack_object_size (struct obstack *obstack);
+
+int obstack_room (struct obstack *obstack);
+void obstack_1grow_fast (struct obstack *obstack, int data_char);
+void obstack_ptr_grow_fast (struct obstack *obstack, void *data);
+void obstack_int_grow_fast (struct obstack *obstack, int data);
+void obstack_blank_fast (struct obstack *obstack, int size);
+
+void * obstack_base (struct obstack *obstack);
+void * obstack_next_free (struct obstack *obstack);
+int obstack_alignment_mask (struct obstack *obstack);
+int obstack_chunk_size (struct obstack *obstack);
+
+#endif /* __STDC__ */
+
+/* Non-ANSI C cannot really support alternative functions for these macros,
+ so we do not declare them. */
+
+/* Pointer to beginning of object being allocated or to be allocated next.
+ Note that this might not be the final address of the object
+ because a new chunk might be needed to hold the final size. */
+
+#define obstack_base(h) ((h)->alloc_failed ? 0 : (h)->object_base)
+
+/* Size for allocating ordinary chunks. */
+
+#define obstack_chunk_size(h) ((h)->chunk_size)
+
+/* Pointer to next byte not yet allocated in current chunk. */
+
+#define obstack_next_free(h) ((h)->alloc_failed ? 0 : (h)->next_free)
+
+/* Mask specifying low bits that should be clear in address of an object. */
+
+#define obstack_alignment_mask(h) ((h)->alignment_mask)
+
+#define obstack_init(h) \
+ _obstack_begin ((h), 0, 0, \
+ (void *(*) ()) obstack_chunk_alloc, (void (*) ()) obstack_chunk_free)
+
+#define obstack_begin(h, size) \
+ _obstack_begin ((h), (size), 0, \
+ (void *(*) ()) obstack_chunk_alloc, (void (*) ()) obstack_chunk_free)
+
+#define obstack_specify_allocation(h, size, alignment, chunkfun, freefun) \
+ _obstack_begin ((h), (size), (alignment), \
+ (void *(*) ()) (chunkfun), (void (*) ()) (freefun))
+
+#define obstack_specify_allocation_with_arg(h, size, alignment, chunkfun, freefun, arg) \
+ _obstack_begin_1 ((h), (size), (alignment), \
+ (void *(*) ()) (chunkfun), (void (*) ()) (freefun), (arg))
+
+#define obstack_chunkfun(h, newchunkfun) \
+ ((h) -> chunkfun = (struct _obstack_chunk *(*)()) (newchunkfun))
+
+#define obstack_freefun(h, newfreefun) \
+ ((h) -> freefun = (void (*)()) (newfreefun))
+
+#define obstack_1grow_fast(h,achar) (*((h)->next_free)++ = achar)
+
+#define obstack_blank_fast(h,n) ((h)->next_free += (n))
+
+#if defined (__GNUC__) && defined (__STDC__)
+#if __GNUC__ < 2
+#define __extension__
+#endif
+
+/* For GNU C, if not -traditional,
+ we can define these macros to compute all args only once
+ without using a global variable.
+ Also, we can avoid using the `temp' slot, to make faster code. */
+
+#define obstack_object_size(OBSTACK) \
+ __extension__ \
+ ({ struct obstack *__o = (OBSTACK); \
+ __o->alloc_failed ? 0 : \
+ (unsigned) (__o->next_free - __o->object_base); })
+
+#define obstack_room(OBSTACK) \
+ __extension__ \
+ ({ struct obstack *__o = (OBSTACK); \
+ (unsigned) (__o->chunk_limit - __o->next_free); })
+
+#define obstack_grow(OBSTACK,where,length) \
+__extension__ \
+({ struct obstack *__o = (OBSTACK); \
+ int __len = (length); \
+ if (__o->next_free + __len > __o->chunk_limit) \
+ _obstack_newchunk (__o, __len); \
+ if (!__o->alloc_failed) \
+ { \
+ bcopy ((char *) (where), __o->next_free, __len); \
+ __o->next_free += __len; \
+ } \
+ (void) 0; })
+
+#define obstack_grow0(OBSTACK,where,length) \
+__extension__ \
+({ struct obstack *__o = (OBSTACK); \
+ int __len = (length); \
+ if (__o->next_free + __len + 1 > __o->chunk_limit) \
+ _obstack_newchunk (__o, __len + 1); \
+ if (!__o->alloc_failed) \
+ { \
+ bcopy ((char *) (where), __o->next_free, __len); \
+ __o->next_free += __len; \
+ *(__o->next_free)++ = 0; \
+ } \
+ (void) 0; })
+
+#define obstack_1grow(OBSTACK,datum) \
+__extension__ \
+({ struct obstack *__o = (OBSTACK); \
+ if (__o->next_free + 1 > __o->chunk_limit) \
+ _obstack_newchunk (__o, 1); \
+ if (!__o->alloc_failed) \
+ *(__o->next_free)++ = (datum); \
+ (void) 0; })
+
+/* These assume that the obstack alignment is good enough for pointers or ints,
+ and that the data added so far to the current object
+ shares that much alignment. */
+
+#define obstack_ptr_grow(OBSTACK,datum) \
+__extension__ \
+({ struct obstack *__o = (OBSTACK); \
+ if (__o->next_free + sizeof (void *) > __o->chunk_limit) \
+ _obstack_newchunk (__o, sizeof (void *)); \
+ if (!__o->alloc_failed) \
+ *((void **)__o->next_free)++ = ((void *)datum); \
+ (void) 0; })
+
+#define obstack_int_grow(OBSTACK,datum) \
+__extension__ \
+({ struct obstack *__o = (OBSTACK); \
+ if (__o->next_free + sizeof (int) > __o->chunk_limit) \
+ _obstack_newchunk (__o, sizeof (int)); \
+ if (!__o->alloc_failed) \
+ *((int *)__o->next_free)++ = ((int)datum); \
+ (void) 0; })
+
+#define obstack_ptr_grow_fast(h,aptr) (*((void **)(h)->next_free)++ = (void *)aptr)
+#define obstack_int_grow_fast(h,aint) (*((int *)(h)->next_free)++ = (int)aint)
+
+#define obstack_blank(OBSTACK,length) \
+__extension__ \
+({ struct obstack *__o = (OBSTACK); \
+ int __len = (length); \
+ if (__o->chunk_limit - __o->next_free < __len) \
+ _obstack_newchunk (__o, __len); \
+ if (!__o->alloc_failed) \
+ __o->next_free += __len; \
+ (void) 0; })
+
+#define obstack_alloc(OBSTACK,length) \
+__extension__ \
+({ struct obstack *__h = (OBSTACK); \
+ obstack_blank (__h, (length)); \
+ obstack_finish (__h); })
+
+#define obstack_copy(OBSTACK,where,length) \
+__extension__ \
+({ struct obstack *__h = (OBSTACK); \
+ obstack_grow (__h, (where), (length)); \
+ obstack_finish (__h); })
+
+#define obstack_copy0(OBSTACK,where,length) \
+__extension__ \
+({ struct obstack *__h = (OBSTACK); \
+ obstack_grow0 (__h, (where), (length)); \
+ obstack_finish (__h); })
+
+/* The local variable is named __o1 to avoid a name conflict
+ when obstack_blank is called. */
+#define obstack_finish(OBSTACK) \
+__extension__ \
+({ struct obstack *__o1 = (OBSTACK); \
+ void *value; \
+ if (__o1->alloc_failed) \
+ value = 0; \
+ else \
+ { \
+ value = (void *) __o1->object_base; \
+ if (__o1->next_free == value) \
+ __o1->maybe_empty_object = 1; \
+ __o1->next_free \
+ = __INT_TO_PTR ((__PTR_TO_INT (__o1->next_free)+__o1->alignment_mask)\
+ & ~ (__o1->alignment_mask)); \
+ if (__o1->next_free - (char *)__o1->chunk \
+ > __o1->chunk_limit - (char *)__o1->chunk) \
+ __o1->next_free = __o1->chunk_limit; \
+ __o1->object_base = __o1->next_free; \
+ } \
+ value; })
+
+#define obstack_free(OBSTACK, OBJ) \
+__extension__ \
+({ struct obstack *__o = (OBSTACK); \
+ void *__obj = (OBJ); \
+ if (__obj > (void *)__o->chunk && __obj < (void *)__o->chunk_limit) \
+ __o->next_free = __o->object_base = __obj; \
+ else (obstack_free) (__o, __obj); })
+
+#else /* not __GNUC__ or not __STDC__ */
+
+#define obstack_object_size(h) \
+ (unsigned) ((h)->alloc_failed ? 0 : (h)->next_free - (h)->object_base)
+
+#define obstack_room(h) \
+ (unsigned) ((h)->chunk_limit - (h)->next_free)
+
+/* Note that the call to _obstack_newchunk is enclosed in (..., 0)
+ so that we can avoid having void expressions
+ in the arms of the conditional expression.
+ Casting the third operand to void was tried before,
+ but some compilers won't accept it. */
+
+#define obstack_grow(h,where,length) \
+( (h)->temp = (length), \
+ (((h)->next_free + (h)->temp > (h)->chunk_limit) \
+ ? (_obstack_newchunk ((h), (h)->temp), 0) : 0), \
+ ((h)->alloc_failed ? 0 : \
+ (bcopy ((char *) (where), (h)->next_free, (h)->temp), \
+ (h)->next_free += (h)->temp)))
+
+#define obstack_grow0(h,where,length) \
+( (h)->temp = (length), \
+ (((h)->next_free + (h)->temp + 1 > (h)->chunk_limit) \
+ ? (_obstack_newchunk ((h), (h)->temp + 1), 0) : 0), \
+ ((h)->alloc_failed ? 0 : \
+ (bcopy ((char *) (where), (h)->next_free, (h)->temp), \
+ (h)->next_free += (h)->temp, \
+ *((h)->next_free)++ = 0)))
+
+#define obstack_1grow(h,datum) \
+( (((h)->next_free + 1 > (h)->chunk_limit) \
+ ? (_obstack_newchunk ((h), 1), 0) : 0), \
+ ((h)->alloc_failed ? 0 : \
+ (*((h)->next_free)++ = (datum))))
+
+#define obstack_ptr_grow(h,datum) \
+( (((h)->next_free + sizeof (char *) > (h)->chunk_limit) \
+ ? (_obstack_newchunk ((h), sizeof (char *)), 0) : 0), \
+ ((h)->alloc_failed ? 0 : \
+ (*((char **)(((h)->next_free+=sizeof(char *))-sizeof(char *))) = ((char *)datum))))
+
+#define obstack_int_grow(h,datum) \
+( (((h)->next_free + sizeof (int) > (h)->chunk_limit) \
+ ? (_obstack_newchunk ((h), sizeof (int)), 0) : 0), \
+ ((h)->alloc_failed ? 0 : \
+ (*((int *)(((h)->next_free+=sizeof(int))-sizeof(int))) = ((int)datum))))
+
+#define obstack_ptr_grow_fast(h,aptr) (*((char **)(h)->next_free)++ = (char *)aptr)
+#define obstack_int_grow_fast(h,aint) (*((int *)(h)->next_free)++ = (int)aint)
+
+#define obstack_blank(h,length) \
+( (h)->temp = (length), \
+ (((h)->chunk_limit - (h)->next_free < (h)->temp) \
+ ? (_obstack_newchunk ((h), (h)->temp), 0) : 0), \
+ ((h)->alloc_failed ? 0 : \
+ ((h)->next_free += (h)->temp)))
+
+#define obstack_alloc(h,length) \
+ (obstack_blank ((h), (length)), obstack_finish ((h)))
+
+#define obstack_copy(h,where,length) \
+ (obstack_grow ((h), (where), (length)), obstack_finish ((h)))
+
+#define obstack_copy0(h,where,length) \
+ (obstack_grow0 ((h), (where), (length)), obstack_finish ((h)))
+
+#define obstack_finish(h) \
+( (h)->alloc_failed ? 0 : \
+ (((h)->next_free == (h)->object_base \
+ ? (((h)->maybe_empty_object = 1), 0) \
+ : 0), \
+ (h)->temp = __PTR_TO_INT ((h)->object_base), \
+ (h)->next_free \
+ = __INT_TO_PTR ((__PTR_TO_INT ((h)->next_free)+(h)->alignment_mask) \
+ & ~ ((h)->alignment_mask)), \
+ (((h)->next_free - (char *)(h)->chunk \
+ > (h)->chunk_limit - (char *)(h)->chunk) \
+ ? ((h)->next_free = (h)->chunk_limit) : 0), \
+ (h)->object_base = (h)->next_free, \
+ __INT_TO_PTR ((h)->temp)))
+
+#ifdef __STDC__
+#define obstack_free(h,obj) \
+( (h)->temp = (char *)(obj) - (char *) (h)->chunk, \
+ (((h)->temp > 0 && (h)->temp < (h)->chunk_limit - (char *) (h)->chunk)\
+ ? (int) ((h)->next_free = (h)->object_base \
+ = (h)->temp + (char *) (h)->chunk) \
+ : (((obstack_free) ((h), (h)->temp + (char *) (h)->chunk), 0), 0)))
+#else
+#define obstack_free(h,obj) \
+( (h)->temp = (char *)(obj) - (char *) (h)->chunk, \
+ (((h)->temp > 0 && (h)->temp < (h)->chunk_limit - (char *) (h)->chunk)\
+ ? (int) ((h)->next_free = (h)->object_base \
+ = (h)->temp + (char *) (h)->chunk) \
+ : (_obstack_free ((h), (h)->temp + (char *) (h)->chunk), 0)))
+#endif
+
+#endif /* not __GNUC__ or not __STDC__ */
+
+#endif /* not __OBSTACK_H__ */
diff --git a/gnu/usr.bin/cc/include/output.h b/gnu/usr.bin/cc/include/output.h
new file mode 100644
index 0000000..ebd0a2f
--- /dev/null
+++ b/gnu/usr.bin/cc/include/output.h
@@ -0,0 +1,241 @@
+/* Declarations for insn-output.c. These functions are defined in recog.c,
+ final.c, and varasm.c.
+ Copyright (C) 1987, 1991, 1994 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+/* Initialize data in final at the beginning of a compilation. */
+extern void init_final PROTO((char *));
+
+/* Called at end of source file,
+ to output the block-profiling table for this entire compilation. */
+extern void end_final PROTO((char *));
+
+/* Enable APP processing of subsequent output.
+ Used before the output from an `asm' statement. */
+extern void app_enable PROTO((void));
+
+/* Disable APP processing of subsequent output.
+ Called from varasm.c before most kinds of output. */
+extern void app_disable PROTO((void));
+
+/* Return the number of slots filled in the current
+ delayed branch sequence (we don't count the insn needing the
+ delay slot). Zero if not in a delayed branch sequence. */
+extern int dbr_sequence_length PROTO((void));
+
+/* Indicate that branch shortening hasn't yet been done. */
+extern void init_insn_lengths PROTO((void));
+
+/* Obtain the current length of an insn. If branch shortening has been done,
+ get its actual length. Otherwise, get its maximum length. */
+extern int get_attr_length PROTO((rtx));
+
+/* Make a pass over all insns and compute their actual lengths by shortening
+ any branches of variable length if possible. */
+extern void shorten_branches PROTO((rtx));
+
+/* Output assembler code for the start of a function,
+ and initialize some of the variables in this file
+ for the new function. The label for the function and associated
+ assembler pseudo-ops have already been output in
+ `assemble_start_function'. */
+extern void final_start_function STDIO_PROTO((rtx, FILE *, int));
+
+/* Output assembler code for the end of a function.
+ For clarity, args are same as those of `final_start_function'
+ even though not all of them are needed. */
+extern void final_end_function STDIO_PROTO((rtx, FILE *, int));
+
+/* Output assembler code for some insns: all or part of a function. */
+extern void final STDIO_PROTO((rtx, FILE *, int, int));
+
+/* The final scan for one insn, INSN. Args are same as in `final', except
+ that INSN is the insn being scanned. Value returned is the next insn to
+ be scanned. */
+extern rtx final_scan_insn STDIO_PROTO((rtx, FILE *, int, int, int));
+
+/* Replace a SUBREG with a REG or a MEM, based on the thing it is a
+ subreg of. */
+extern rtx alter_subreg PROTO((rtx));
+
+/* Report inconsistency between the assembler template and the operands.
+ In an `asm', it's the user's fault; otherwise, the compiler's fault. */
+extern void output_operand_lossage PROTO((char *));
+
+/* Output a string of assembler code, substituting insn operands.
+ Defined in final.c. */
+extern void output_asm_insn PROTO((char *, rtx *));
+
+/* Output a LABEL_REF, or a bare CODE_LABEL, as an assembler symbol. */
+extern void output_asm_label PROTO((rtx));
+
+/* Print a memory reference operand for address X
+ using machine-dependent assembler syntax. */
+extern void output_address PROTO((rtx));
+
+/* Print an integer constant expression in assembler syntax.
+ Addition and subtraction are the only arithmetic
+ that may appear in these expressions. */
+extern void output_addr_const STDIO_PROTO((FILE *, rtx));
+
+/* Output a string of assembler code, substituting numbers, strings
+ and fixed syntactic prefixes. */
+extern void asm_fprintf STDIO_PROTO(PVPROTO((FILE *file,
+ char *p, ...)));
+
+/* Split up a CONST_DOUBLE or integer constant rtx into two rtx's for single
+ words. */
+extern void split_double PROTO((rtx, rtx *, rtx *));
+
+/* Return nonzero if this function has no function calls. */
+extern int leaf_function_p PROTO((void));
+
+/* Return 1 if this function uses only the registers that can be
+ safely renumbered. */
+extern int only_leaf_regs_used PROTO((void));
+
+/* Scan IN_RTX and its subexpressions, and renumber all regs into those
+ available in leaf functions. */
+extern void leaf_renumber_regs_insn PROTO((rtx));
+
+/* Output a name (as found inside a symbol_ref) in assembler syntax. */
+extern void assemble_name STDIO_PROTO((FILE *, char *));
+
+/* When outputting assembler code, indicates which alternative
+ of the constraints was actually satisfied. */
+extern int which_alternative;
+
+/* When outputting delayed branch sequences, this rtx holds the
+ sequence being output. It is null when no delayed branch
+ sequence is being output, so it can be used as a test in the
+ insn output code.
+
+ This variable is defined in final.c. */
+extern rtx final_sequence;
+
+/* Number of bytes of args popped by function being compiled on its return.
+ Zero if no bytes are to be popped.
+ May affect compilation of return insn or of function epilogue. */
+
+extern int current_function_pops_args;
+
+/* Nonzero if function being compiled needs to be given an address
+ where the value should be stored. */
+
+extern int current_function_returns_struct;
+
+/* Nonzero if function being compiled needs to
+ return the address of where it has put a structure value. */
+
+extern int current_function_returns_pcc_struct;
+
+/* Nonzero if function being compiled needs to be passed a static chain. */
+
+extern int current_function_needs_context;
+
+/* Nonzero if function being compiled can call setjmp. */
+
+extern int current_function_calls_setjmp;
+
+/* Nonzero if function being compiled can call longjmp. */
+
+extern int current_function_calls_longjmp;
+
+/* Nonzero if function being compiled can call alloca,
+ either as a subroutine or builtin. */
+
+extern int current_function_calls_alloca;
+
+/* Nonzero if function being compiled receives nonlocal gotos
+ from nested functions. */
+
+extern int current_function_has_nonlocal_label;
+
+/* Nonzero if function being compiled contains nested functions. */
+
+extern int current_function_contains_functions;
+
+/* Nonzero if the current function returns a pointer type */
+
+extern int current_function_returns_pointer;
+
+/* If function's args have a fixed size, this is that size, in bytes.
+ Otherwise, it is -1.
+ May affect compilation of return insn or of function epilogue. */
+
+extern int current_function_args_size;
+
+/* # bytes the prologue should push and pretend that the caller pushed them.
+ The prologue must do this, but only if parms can be passed in registers. */
+
+extern int current_function_pretend_args_size;
+
+/* # of bytes of outgoing arguments required to be pushed by the prologue.
+ If this is non-zero, it means that ACCUMULATE_OUTGOING_ARGS was defined
+ and no stack adjusts will be done on function calls. */
+
+extern int current_function_outgoing_args_size;
+
+/* Nonzero if current function uses varargs.h or equivalent.
+ Zero for functions that use stdarg.h. */
+
+extern int current_function_varargs;
+
+/* Quantities of various kinds of registers
+ used for the current function's args. */
+
+extern CUMULATIVE_ARGS current_function_args_info;
+
+/* Name of function now being compiled. */
+
+extern char *current_function_name;
+
+/* If non-zero, an RTL expression for that location at which the current
+ function returns its result. Usually equal to
+ DECL_RTL (DECL_RESULT (current_function_decl)). */
+
+extern rtx current_function_return_rtx;
+
+/* If some insns can be deferred to the delay slots of the epilogue, the
+ delay list for them is recorded here. */
+
+extern rtx current_function_epilogue_delay_list;
+
+/* Nonzero means generate position-independent code.
+ This is not fully implemented yet. */
+
+extern int flag_pic;
+
+/* This is nonzero if the current function uses pic_offset_table_rtx. */
+extern int current_function_uses_pic_offset_table;
+
+/* This is nonzero if the current function uses the constant pool. */
+extern int current_function_uses_const_pool;
+
+/* The line number of the beginning of the current function.
+ sdbout.c needs this so that it can output relative linenumbers. */
+
+#ifdef SDB_DEBUGGING_INFO /* Avoid undef sym in certain broken linkers. */
+extern int sdb_begin_function_line;
+#endif
+
+/* File in which assembler code is being written. */
+
+#ifdef BUFSIZ
+extern FILE *asm_out_file;
+#endif
diff --git a/gnu/usr.bin/cc/include/pcp.h b/gnu/usr.bin/cc/include/pcp.h
new file mode 100644
index 0000000..0b86a87
--- /dev/null
+++ b/gnu/usr.bin/cc/include/pcp.h
@@ -0,0 +1,100 @@
+/* pcp.h -- Describes the format of a precompiled file
+ Copyright (C) 1990 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+
+/* Structure allocated for every string in a precompiled file */
+typedef struct stringdef STRINGDEF;
+struct stringdef
+{
+ U_CHAR *contents; /* String to include */
+ int len; /* Its length */
+ int writeflag; /* Whether we write this */
+ int lineno; /* Linenumber of source file */
+ U_CHAR *filename; /* Name of source file */
+ STRINGDEF *chain; /* Global list of strings in natural order */
+ int output_mark; /* Where in the output this goes */
+};
+
+typedef struct keydef KEYDEF;
+struct keydef
+{
+ STRINGDEF *str;
+ KEYDEF *chain;
+};
+
+/* Format: */
+/* A precompiled file starts with a series of #define and #undef
+ statements:
+ #define MAC DEF --- Indicates MAC must be defined with defn DEF
+ #define MAC --- Indicates MAC must be defined with any defn
+ #undef MAC --- Indicates MAC cannot be defined
+
+These preconditions must be true for a precompiled file to be used.
+The preconditions section is null terminated. */
+
+/* Then, there is a four byte number (in network byte order) which */
+ /* indicates the number of strings the file contains. */
+
+/* Each string contains a STRINGDEF structure. The only component of */
+ /* the STRINGDEF structure which is used is the lineno field, which */
+ /* should hold the line number in the original header file. */
+ /* Then follows the string, followed by a null. Then comes a four */
+ /* byte number (again, in network byte order) indicating the number */
+ /* of keys for this string. Each key is a KEYDEF structure, with */
+ /* irrelevant contents, followed by the null-terminated string. */
+
+/* If the number of keys is 0, then there are no keys for the string, */
+ /* in other words, the string will never be included. If the number */
+ /* of keys is -1, this is a special flag indicating there are no keys */
+ /* in the file, and the string is mandatory (that is, it must be */
+ /* included regardless in the included output). */
+
+/* A file, then, looks like this:
+
+ Precondition 1
+ Precondition 2
+ .
+ .
+ .
+ <NUL>
+ Number of strings
+ STRINGDEF
+ String . . . <NUL>
+ Number of keys
+ KEYDEF
+ Key . . . <NUL>
+ KEYDEF
+ Key . . . <NUL>
+ .
+ .
+ .
+ STRINGDEF
+ String . . . <NUL>
+ Number of keys
+ KEYDEF
+ Key . . . <NUL>
+ .
+ .
+ .
+ .
+ .
+ .
+
+*/
diff --git a/gnu/usr.bin/cc/include/real.h b/gnu/usr.bin/cc/include/real.h
new file mode 100644
index 0000000..34d6d67
--- /dev/null
+++ b/gnu/usr.bin/cc/include/real.h
@@ -0,0 +1,437 @@
+/* Front-end tree definitions for GNU compiler.
+ Copyright (C) 1989, 1991, 1994 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+#ifndef REAL_H_INCLUDED
+#define REAL_H_INCLUDED
+
+/* Define codes for all the float formats that we know of. */
+#define UNKNOWN_FLOAT_FORMAT 0
+#define IEEE_FLOAT_FORMAT 1
+#define VAX_FLOAT_FORMAT 2
+#define IBM_FLOAT_FORMAT 3
+
+/* Default to IEEE float if not specified. Nearly all machines use it. */
+
+#ifndef TARGET_FLOAT_FORMAT
+#define TARGET_FLOAT_FORMAT IEEE_FLOAT_FORMAT
+#endif
+
+#ifndef HOST_FLOAT_FORMAT
+#define HOST_FLOAT_FORMAT IEEE_FLOAT_FORMAT
+#endif
+
+#if TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
+#define REAL_INFINITY
+#endif
+
+/* If FLOAT_WORDS_BIG_ENDIAN and HOST_FLOAT_WORDS_BIG_ENDIAN are not defined
+ in the header files, then this implies the word-endianness is the same as
+ for integers. */
+
+/* This is defined 0 or 1, like WORDS_BIG_ENDIAN. */
+#ifndef FLOAT_WORDS_BIG_ENDIAN
+#define FLOAT_WORDS_BIG_ENDIAN WORDS_BIG_ENDIAN
+#endif
+
+/* This is defined 0 or 1, unlike HOST_WORDS_BIG_ENDIAN. */
+#ifndef HOST_FLOAT_WORDS_BIG_ENDIAN
+#ifdef HOST_WORDS_BIG_ENDIAN
+#define HOST_FLOAT_WORDS_BIG_ENDIAN 1
+#else
+#define HOST_FLOAT_WORDS_BIG_ENDIAN 0
+#endif
+#endif
+
+/* Defining REAL_ARITHMETIC invokes a floating point emulator
+ that can produce a target machine format differing by more
+ than just endian-ness from the host's format. The emulator
+ is also used to support extended real XFmode. */
+#ifndef LONG_DOUBLE_TYPE_SIZE
+#define LONG_DOUBLE_TYPE_SIZE 64
+#endif
+#if (LONG_DOUBLE_TYPE_SIZE == 96) || (LONG_DOUBLE_TYPE_SIZE == 128)
+#ifndef REAL_ARITHMETIC
+#define REAL_ARITHMETIC
+#endif
+#endif
+#ifdef REAL_ARITHMETIC
+/* **** Start of software floating point emulator interface macros **** */
+
+/* Support 80-bit extended real XFmode if LONG_DOUBLE_TYPE_SIZE
+ has been defined to be 96 in the tm.h machine file. */
+#if (LONG_DOUBLE_TYPE_SIZE == 96)
+#define REAL_IS_NOT_DOUBLE
+#define REAL_ARITHMETIC
+typedef struct {
+ HOST_WIDE_INT r[(11 + sizeof (HOST_WIDE_INT))/(sizeof (HOST_WIDE_INT))];
+} realvaluetype;
+#define REAL_VALUE_TYPE realvaluetype
+
+#else /* no XFmode support */
+
+#if (LONG_DOUBLE_TYPE_SIZE == 128)
+
+#define REAL_IS_NOT_DOUBLE
+#define REAL_ARITHMETIC
+typedef struct {
+ HOST_WIDE_INT r[(19 + sizeof (HOST_WIDE_INT))/(sizeof (HOST_WIDE_INT))];
+} realvaluetype;
+#define REAL_VALUE_TYPE realvaluetype
+
+#else /* not TFmode */
+
+#if HOST_FLOAT_FORMAT != TARGET_FLOAT_FORMAT
+/* If no XFmode support, then a REAL_VALUE_TYPE is 64 bits wide
+ but it is not necessarily a host machine double. */
+#define REAL_IS_NOT_DOUBLE
+typedef struct {
+ HOST_WIDE_INT r[(7 + sizeof (HOST_WIDE_INT))/(sizeof (HOST_WIDE_INT))];
+} realvaluetype;
+#define REAL_VALUE_TYPE realvaluetype
+#else
+/* If host and target formats are compatible, then a REAL_VALUE_TYPE
+ is actually a host machine double. */
+#define REAL_VALUE_TYPE double
+#endif
+
+#endif /* no TFmode support */
+#endif /* no XFmode support */
+
+extern int significand_size PROTO((enum machine_mode));
+
+/* If emulation has been enabled by defining REAL_ARITHMETIC or by
+ setting LONG_DOUBLE_TYPE_SIZE to 96 or 128, then define macros so that
+ they invoke emulator functions. This will succeed only if the machine
+ files have been updated to use these macros in place of any
+ references to host machine `double' or `float' types. */
+#ifdef REAL_ARITHMETIC
+#undef REAL_ARITHMETIC
+#define REAL_ARITHMETIC(value, code, d1, d2) \
+ earith (&(value), (code), &(d1), &(d2))
+
+/* Declare functions in real.c. */
+extern void earith PROTO((REAL_VALUE_TYPE *, int,
+ REAL_VALUE_TYPE *, REAL_VALUE_TYPE *));
+extern REAL_VALUE_TYPE etrunci PROTO((REAL_VALUE_TYPE));
+extern REAL_VALUE_TYPE etruncui PROTO((REAL_VALUE_TYPE));
+extern REAL_VALUE_TYPE ereal_atof PROTO((char *, enum machine_mode));
+extern REAL_VALUE_TYPE ereal_negate PROTO((REAL_VALUE_TYPE));
+extern HOST_WIDE_INT efixi PROTO((REAL_VALUE_TYPE));
+extern unsigned HOST_WIDE_INT efixui PROTO((REAL_VALUE_TYPE));
+extern void ereal_from_int PROTO((REAL_VALUE_TYPE *,
+ HOST_WIDE_INT, HOST_WIDE_INT));
+extern void ereal_from_uint PROTO((REAL_VALUE_TYPE *,
+ unsigned HOST_WIDE_INT,
+ unsigned HOST_WIDE_INT));
+extern void ereal_to_int PROTO((HOST_WIDE_INT *, HOST_WIDE_INT *,
+ REAL_VALUE_TYPE));
+extern REAL_VALUE_TYPE ereal_ldexp PROTO((REAL_VALUE_TYPE, int));
+
+extern void etartdouble PROTO((REAL_VALUE_TYPE, long *));
+extern void etarldouble PROTO((REAL_VALUE_TYPE, long *));
+extern void etardouble PROTO((REAL_VALUE_TYPE, long *));
+extern long etarsingle PROTO((REAL_VALUE_TYPE));
+extern void ereal_to_decimal PROTO((REAL_VALUE_TYPE, char *));
+extern int ereal_cmp PROTO((REAL_VALUE_TYPE, REAL_VALUE_TYPE));
+extern int ereal_isneg PROTO((REAL_VALUE_TYPE));
+extern REAL_VALUE_TYPE ereal_from_float PROTO((HOST_WIDE_INT));
+extern REAL_VALUE_TYPE ereal_from_double PROTO((HOST_WIDE_INT *));
+
+#define REAL_VALUES_EQUAL(x, y) (ereal_cmp ((x), (y)) == 0)
+/* true if x < y : */
+#define REAL_VALUES_LESS(x, y) (ereal_cmp ((x), (y)) == -1)
+#define REAL_VALUE_LDEXP(x, n) ereal_ldexp (x, n)
+
+/* These return REAL_VALUE_TYPE: */
+#define REAL_VALUE_RNDZINT(x) (etrunci (x))
+#define REAL_VALUE_UNSIGNED_RNDZINT(x) (etruncui (x))
+extern REAL_VALUE_TYPE real_value_truncate ();
+#define REAL_VALUE_TRUNCATE(mode, x) real_value_truncate (mode, x)
+
+/* These return HOST_WIDE_INT: */
+/* Convert a floating-point value to integer, rounding toward zero. */
+#define REAL_VALUE_FIX(x) (efixi (x))
+/* Convert a floating-point value to unsigned integer, rounding
+ toward zero. */
+#define REAL_VALUE_UNSIGNED_FIX(x) (efixui (x))
+
+#define REAL_VALUE_ATOF ereal_atof
+#define REAL_VALUE_NEGATE ereal_negate
+
+#define REAL_VALUE_MINUS_ZERO(x) \
+ ((ereal_cmp (x, dconst0) == 0) && (ereal_isneg (x) != 0 ))
+
+#define REAL_VALUE_TO_INT ereal_to_int
+
+/* Here the cast to HOST_WIDE_INT sign-extends arguments such as ~0. */
+#define REAL_VALUE_FROM_INT(d, lo, hi) \
+ ereal_from_int (&d, (HOST_WIDE_INT) (lo), (HOST_WIDE_INT) (hi))
+
+#define REAL_VALUE_FROM_UNSIGNED_INT(d, lo, hi) (ereal_from_uint (&d, lo, hi))
+
+/* IN is a REAL_VALUE_TYPE. OUT is an array of longs. */
+#if LONG_DOUBLE_TYPE_SIZE == 96
+#define REAL_VALUE_TO_TARGET_LONG_DOUBLE(IN, OUT) (etarldouble ((IN), (OUT)))
+#else
+#define REAL_VALUE_TO_TARGET_LONG_DOUBLE(IN, OUT) (etartdouble ((IN), (OUT)))
+#endif
+#define REAL_VALUE_TO_TARGET_DOUBLE(IN, OUT) (etardouble ((IN), (OUT)))
+
+/* IN is a REAL_VALUE_TYPE. OUT is a long. */
+#define REAL_VALUE_TO_TARGET_SINGLE(IN, OUT) ((OUT) = etarsingle ((IN)))
+
+/* d is an array of HOST_WIDE_INT that holds a double precision
+ value in the target computer's floating point format. */
+#define REAL_VALUE_FROM_TARGET_DOUBLE(d) (ereal_from_double (d))
+
+/* f is a HOST_WIDE_INT containing a single precision target float value. */
+#define REAL_VALUE_FROM_TARGET_SINGLE(f) (ereal_from_float (f))
+
+/* Conversions to decimal ASCII string. */
+#define REAL_VALUE_TO_DECIMAL(r, fmt, s) (ereal_to_decimal (r, s))
+
+#endif /* REAL_ARITHMETIC defined */
+
+/* **** End of software floating point emulator interface macros **** */
+#else /* No XFmode or TFmode and REAL_ARITHMETIC not defined */
+
+/* old interface */
+#ifdef REAL_ARITHMETIC
+/* Defining REAL_IS_NOT_DOUBLE breaks certain initializations
+ when REAL_ARITHMETIC etc. are not defined. */
+
+/* Now see if the host and target machines use the same format.
+ If not, define REAL_IS_NOT_DOUBLE (even if we end up representing
+ reals as doubles because we have no better way in this cross compiler.)
+ This turns off various optimizations that can happen when we know the
+ compiler's float format matches the target's float format.
+ */
+#if HOST_FLOAT_FORMAT != TARGET_FLOAT_FORMAT
+#define REAL_IS_NOT_DOUBLE
+#ifndef REAL_VALUE_TYPE
+typedef struct {
+ HOST_WIDE_INT r[sizeof (double)/sizeof (HOST_WIDE_INT)];
+ } realvaluetype;
+#define REAL_VALUE_TYPE realvaluetype
+#endif /* no REAL_VALUE_TYPE */
+#endif /* formats differ */
+#endif /* 0 */
+
+#endif /* emulator not used */
+
+/* If we are not cross-compiling, use a `double' to represent the
+ floating-point value. Otherwise, use some other type
+ (probably a struct containing an array of longs). */
+#ifndef REAL_VALUE_TYPE
+#define REAL_VALUE_TYPE double
+#else
+#define REAL_IS_NOT_DOUBLE
+#endif
+
+#if HOST_FLOAT_FORMAT == TARGET_FLOAT_FORMAT
+
+/* Convert a type `double' value in host format first to a type `float'
+ value in host format and then to a single type `long' value which
+ is the bitwise equivalent of the `float' value. */
+#ifndef REAL_VALUE_TO_TARGET_SINGLE
+#define REAL_VALUE_TO_TARGET_SINGLE(IN, OUT) \
+do { float f = (float) (IN); \
+ (OUT) = *(long *) &f; \
+ } while (0)
+#endif
+
+/* Convert a type `double' value in host format to a pair of type `long'
+ values which is its bitwise equivalent, but put the two words into
+ proper word order for the target. */
+#ifndef REAL_VALUE_TO_TARGET_DOUBLE
+#if HOST_FLOAT_WORDS_BIG_ENDIAN == FLOAT_WORDS_BIG_ENDIAN
+#define REAL_VALUE_TO_TARGET_DOUBLE(IN, OUT) \
+do { REAL_VALUE_TYPE in = (IN); /* Make sure it's not in a register. */\
+ (OUT)[0] = ((long *) &in)[0]; \
+ (OUT)[1] = ((long *) &in)[1]; \
+ } while (0)
+#else
+#define REAL_VALUE_TO_TARGET_DOUBLE(IN, OUT) \
+do { REAL_VALUE_TYPE in = (IN); /* Make sure it's not in a register. */\
+ (OUT)[1] = ((long *) &in)[0]; \
+ (OUT)[0] = ((long *) &in)[1]; \
+ } while (0)
+#endif
+#endif
+#endif /* HOST_FLOAT_FORMAT == TARGET_FLOAT_FORMAT */
+
+/* In this configuration, double and long double are the same. */
+#ifndef REAL_VALUE_TO_TARGET_LONG_DOUBLE
+#define REAL_VALUE_TO_TARGET_LONG_DOUBLE(a, b) REAL_VALUE_TO_TARGET_DOUBLE (a, b)
+#endif
+
+/* Compare two floating-point values for equality. */
+#ifndef REAL_VALUES_EQUAL
+#define REAL_VALUES_EQUAL(x, y) ((x) == (y))
+#endif
+
+/* Compare two floating-point values for less than. */
+#ifndef REAL_VALUES_LESS
+#define REAL_VALUES_LESS(x, y) ((x) < (y))
+#endif
+
+/* Truncate toward zero to an integer floating-point value. */
+#ifndef REAL_VALUE_RNDZINT
+#define REAL_VALUE_RNDZINT(x) ((double) ((int) (x)))
+#endif
+
+/* Truncate toward zero to an unsigned integer floating-point value. */
+#ifndef REAL_VALUE_UNSIGNED_RNDZINT
+#define REAL_VALUE_UNSIGNED_RNDZINT(x) ((double) ((unsigned int) (x)))
+#endif
+
+/* Convert a floating-point value to integer, rounding toward zero. */
+#ifndef REAL_VALUE_FIX
+#define REAL_VALUE_FIX(x) ((int) (x))
+#endif
+
+/* Convert a floating-point value to unsigned integer, rounding
+ toward zero. */
+#ifndef REAL_VALUE_UNSIGNED_FIX
+#define REAL_VALUE_UNSIGNED_FIX(x) ((unsigned int) (x))
+#endif
+
+/* Scale X by Y powers of 2. */
+#ifndef REAL_VALUE_LDEXP
+#define REAL_VALUE_LDEXP(x, y) ldexp (x, y)
+extern double ldexp ();
+#endif
+
+/* Convert the string X to a floating-point value. */
+#ifndef REAL_VALUE_ATOF
+#if 1
+/* Use real.c to convert decimal numbers to binary, ... */
+REAL_VALUE_TYPE ereal_atof ();
+#define REAL_VALUE_ATOF(x, s) ereal_atof (x, s)
+#else
+/* ... or, if you like the host computer's atof, go ahead and use it: */
+#define REAL_VALUE_ATOF(x, s) atof (x)
+#if defined (MIPSEL) || defined (MIPSEB)
+/* MIPS compiler can't handle parens around the function name.
+ This problem *does not* appear to be connected with any
+ macro definition for atof. It does not seem there is one. */
+extern double atof ();
+#else
+extern double (atof) ();
+#endif
+#endif
+#endif
+
+/* Negate the floating-point value X. */
+#ifndef REAL_VALUE_NEGATE
+#define REAL_VALUE_NEGATE(x) (- (x))
+#endif
+
+/* Truncate the floating-point value X to mode MODE. This is correct only
+ for the most common case where the host and target have objects of the same
+ size and where `float' is SFmode. */
+
+/* Don't use REAL_VALUE_TRUNCATE directly--always call real_value_truncate. */
+extern REAL_VALUE_TYPE real_value_truncate ();
+
+#ifndef REAL_VALUE_TRUNCATE
+#define REAL_VALUE_TRUNCATE(mode, x) \
+ (GET_MODE_BITSIZE (mode) == sizeof (float) * HOST_BITS_PER_CHAR \
+ ? (float) (x) : (x))
+#endif
+
+/* Determine whether a floating-point value X is infinite. */
+#ifndef REAL_VALUE_ISINF
+#define REAL_VALUE_ISINF(x) (target_isinf (x))
+#endif
+
+/* Determine whether a floating-point value X is a NaN. */
+#ifndef REAL_VALUE_ISNAN
+#define REAL_VALUE_ISNAN(x) (target_isnan (x))
+#endif
+
+/* Determine whether a floating-point value X is negative. */
+#ifndef REAL_VALUE_NEGATIVE
+#define REAL_VALUE_NEGATIVE(x) (target_negative (x))
+#endif
+
+/* Determine whether a floating-point value X is minus 0. */
+#ifndef REAL_VALUE_MINUS_ZERO
+#define REAL_VALUE_MINUS_ZERO(x) ((x) == 0 && REAL_VALUE_NEGATIVE (x))
+#endif
+
+/* Constant real values 0, 1, 2, and -1. */
+
+extern REAL_VALUE_TYPE dconst0;
+extern REAL_VALUE_TYPE dconst1;
+extern REAL_VALUE_TYPE dconst2;
+extern REAL_VALUE_TYPE dconstm1;
+
+/* Union type used for extracting real values from CONST_DOUBLEs
+ or putting them in. */
+
+union real_extract
+{
+ REAL_VALUE_TYPE d;
+ HOST_WIDE_INT i[sizeof (REAL_VALUE_TYPE) / sizeof (HOST_WIDE_INT)];
+};
+
+/* For a CONST_DOUBLE:
+ The usual two ints that hold the value.
+ For a DImode, that is all there are;
+ and CONST_DOUBLE_LOW is the low-order word and ..._HIGH the high-order.
+ For a float, the number of ints varies,
+ and CONST_DOUBLE_LOW is the one that should come first *in memory*.
+ So use &CONST_DOUBLE_LOW(r) as the address of an array of ints. */
+#define CONST_DOUBLE_LOW(r) XWINT (r, 2)
+#define CONST_DOUBLE_HIGH(r) XWINT (r, 3)
+
+/* Link for chain of all CONST_DOUBLEs in use in current function. */
+#define CONST_DOUBLE_CHAIN(r) XEXP (r, 1)
+/* The MEM which represents this CONST_DOUBLE's value in memory,
+ or const0_rtx if no MEM has been made for it yet,
+ or cc0_rtx if it is not on the chain. */
+#define CONST_DOUBLE_MEM(r) XEXP (r, 0)
+
+/* Function to return a real value (not a tree node)
+ from a given integer constant. */
+REAL_VALUE_TYPE real_value_from_int_cst ();
+
+/* Given a CONST_DOUBLE in FROM, store into TO the value it represents. */
+
+#define REAL_VALUE_FROM_CONST_DOUBLE(to, from) \
+do { union real_extract u; \
+ bcopy ((char *) &CONST_DOUBLE_LOW ((from)), (char *) &u, sizeof u); \
+ to = u.d; } while (0)
+
+/* Return a CONST_DOUBLE with value R and mode M. */
+
+#define CONST_DOUBLE_FROM_REAL_VALUE(r, m) immed_real_const_1 (r, m)
+extern struct rtx_def *immed_real_const_1 PROTO((REAL_VALUE_TYPE,
+ enum machine_mode));
+
+
+/* Convert a floating point value `r', that can be interpreted
+ as a host machine float or double, to a decimal ASCII string `s'
+ using printf format string `fmt'. */
+#ifndef REAL_VALUE_TO_DECIMAL
+#define REAL_VALUE_TO_DECIMAL(r, fmt, s) (sprintf (s, fmt, r))
+#endif
+
+#endif /* Not REAL_H_INCLUDED */
diff --git a/gnu/usr.bin/cc/include/recog.h b/gnu/usr.bin/cc/include/recog.h
new file mode 100644
index 0000000..8fc2efb
--- /dev/null
+++ b/gnu/usr.bin/cc/include/recog.h
@@ -0,0 +1,120 @@
+/* Declarations for interface to insn recognizer and insn-output.c.
+ Copyright (C) 1987 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+/* Add prototype support. */
+#ifndef PROTO
+#if defined (USE_PROTOTYPES) ? USE_PROTOTYPES : defined (__STDC__)
+#define PROTO(ARGS) ARGS
+#else
+#define PROTO(ARGS) ()
+#endif
+#endif
+
+/* Recognize an insn and return its insn-code,
+ which is the sequence number of the DEFINE_INSN that it matches.
+ If the insn does not match, return -1. */
+
+extern int recog_memoized PROTO((rtx));
+
+/* Determine whether a proposed change to an insn or MEM will make it
+ invalid. Make the change if not. */
+
+extern int validate_change PROTO((rtx, rtx *, rtx, int));
+
+/* Apply a group of changes if valid. */
+
+extern int apply_change_group PROTO((void));
+
+/* Return the number of changes so far in the current group. */
+
+extern int num_validated_changes PROTO((void));
+
+/* Retract some changes. */
+
+extern void cancel_changes PROTO((int));
+
+/* Nonzero means volatile operands are recognized. */
+
+extern int volatile_ok;
+
+/* Extract the operands from an insn that has been recognized. */
+
+extern void insn_extract PROTO((rtx));
+
+/* The following vectors hold the results from insn_extract. */
+
+/* Indexed by N, gives value of operand N. */
+extern rtx recog_operand[];
+
+/* Indexed by N, gives location where operand N was found. */
+extern rtx *recog_operand_loc[];
+
+/* Indexed by N, gives location where the Nth duplicate-appearance of
+ an operand was found. This is something that matched MATCH_DUP. */
+extern rtx *recog_dup_loc[];
+
+/* Indexed by N, gives the operand number that was duplicated in the
+ Nth duplicate-appearance of an operand. */
+extern char recog_dup_num[];
+
+#ifndef __STDC__
+#ifndef const
+#define const
+#endif
+#endif
+
+/* Access the output function for CODE. */
+
+#define OUT_FCN(CODE) (*insn_outfun[(int) (CODE)])
+
+/* Tables defined in insn-output.c that give information about
+ each insn-code value. */
+
+/* These are vectors indexed by insn-code. Details in genoutput.c. */
+
+extern char *const insn_template[];
+
+extern char *(*const insn_outfun[]) ();
+
+extern const int insn_n_operands[];
+
+extern const int insn_n_dups[];
+
+/* Indexed by insn code number, gives # of constraint alternatives. */
+
+extern const int insn_n_alternatives[];
+
+/* These are two-dimensional arrays indexed first by the insn-code
+ and second by the operand number. Details in genoutput.c. */
+
+#ifdef REGISTER_CONSTRAINTS /* Avoid undef sym in certain broken linkers. */
+extern char *const insn_operand_constraint[][MAX_RECOG_OPERANDS];
+#endif
+
+#ifndef REGISTER_CONSTRAINTS /* Avoid undef sym in certain broken linkers. */
+extern const char insn_operand_address_p[][MAX_RECOG_OPERANDS];
+#endif
+
+extern const enum machine_mode insn_operand_mode[][MAX_RECOG_OPERANDS];
+
+extern const char insn_operand_strict_low[][MAX_RECOG_OPERANDS];
+
+extern int (*const insn_operand_predicate[][MAX_RECOG_OPERANDS]) ();
+
+extern char * insn_name[];
diff --git a/gnu/usr.bin/cc/include/regs.h b/gnu/usr.bin/cc/include/regs.h
new file mode 100644
index 0000000..47463bf
--- /dev/null
+++ b/gnu/usr.bin/cc/include/regs.h
@@ -0,0 +1,168 @@
+/* Define per-register tables for data flow info and register allocation.
+ Copyright (C) 1987, 1993 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+
+#define REG_BYTES(R) mode_size[(int) GET_MODE (R)]
+
+/* Get the number of consecutive hard regs required to hold the REG rtx R.
+ When something may be an explicit hard reg, REG_SIZE is the only
+ valid way to get this value. You cannot get it from the regno. */
+
+#define REG_SIZE(R) \
+ ((mode_size[(int) GET_MODE (R)] + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
+
+/* Maximum register number used in this function, plus one. */
+
+extern int max_regno;
+
+/* Maximum number of SCRATCH rtx's in each block of this function. */
+
+extern int max_scratch;
+
+/* Indexed by n, gives number of times (REG n) is used or set.
+ References within loops may be counted more times. */
+
+extern int *reg_n_refs;
+
+/* Indexed by n, gives number of times (REG n) is set. */
+
+extern short *reg_n_sets;
+
+/* Indexed by N, gives number of insns in which register N dies.
+ Note that if register N is live around loops, it can die
+ in transitions between basic blocks, and that is not counted here.
+ So this is only a reliable indicator of how many regions of life there are
+ for registers that are contained in one basic block. */
+
+extern short *reg_n_deaths;
+
+/* Get the number of consecutive words required to hold pseudo-reg N. */
+
+#define PSEUDO_REGNO_SIZE(N) \
+ ((GET_MODE_SIZE (PSEUDO_REGNO_MODE (N)) + UNITS_PER_WORD - 1) \
+ / UNITS_PER_WORD)
+
+/* Get the number of bytes required to hold pseudo-reg N. */
+
+#define PSEUDO_REGNO_BYTES(N) \
+ GET_MODE_SIZE (PSEUDO_REGNO_MODE (N))
+
+/* Get the machine mode of pseudo-reg N. */
+
+#define PSEUDO_REGNO_MODE(N) GET_MODE (regno_reg_rtx[N])
+
+/* Indexed by N, gives number of CALL_INSNS across which (REG n) is live. */
+
+extern int *reg_n_calls_crossed;
+
+/* Total number of instructions at which (REG n) is live.
+ The larger this is, the less priority (REG n) gets for
+ allocation in a hard register (in global-alloc).
+ This is set in flow.c and remains valid for the rest of the compilation
+ of the function; it is used to control register allocation.
+
+ local-alloc.c may alter this number to change the priority.
+
+ Negative values are special.
+ -1 is used to mark a pseudo reg which has a constant or memory equivalent
+ and is used infrequently enough that it should not get a hard register.
+ -2 is used to mark a pseudo reg for a parameter, when a frame pointer
+ is not required. global.c makes an allocno for this but does
+ not try to assign a hard register to it. */
+
+extern int *reg_live_length;
+
+/* Vector of substitutions of register numbers,
+ used to map pseudo regs into hardware regs. */
+
+extern short *reg_renumber;
+
+/* Vector indexed by hardware reg
+ saying whether that reg is ever used. */
+
+extern char regs_ever_live[FIRST_PSEUDO_REGISTER];
+
+/* Vector indexed by hardware reg giving its name. */
+
+extern char *reg_names[FIRST_PSEUDO_REGISTER];
+
+/* For each hard register, the widest mode object that it can contain.
+ This will be a MODE_INT mode if the register can hold integers. Otherwise
+ it will be a MODE_FLOAT or a MODE_CC mode, whichever is valid for the
+ register. */
+
+extern enum machine_mode reg_raw_mode[FIRST_PSEUDO_REGISTER];
+
+/* Vector indexed by regno; gives uid of first insn using that reg.
+ This is computed by reg_scan for use by cse and loop.
+ It is sometimes adjusted for subsequent changes during loop,
+ but not adjusted by cse even if cse invalidates it. */
+
+extern int *regno_first_uid;
+
+/* Vector indexed by regno; gives uid of last insn using that reg.
+ This is computed by reg_scan for use by cse and loop.
+ It is sometimes adjusted for subsequent changes during loop,
+ but not adjusted by cse even if cse invalidates it.
+ This is harmless since cse won't scan through a loop end. */
+
+extern int *regno_last_uid;
+
+/* Similar, but includes insns that mention the reg in their notes. */
+
+extern int *regno_last_note_uid;
+
+/* Vector indexed by regno; contains 1 for a register is considered a pointer.
+ Reloading, etc. will use a pointer register rather than a non-pointer
+ as the base register in an address, when there is a choice of two regs. */
+
+extern char *regno_pointer_flag;
+#define REGNO_POINTER_FLAG(REGNO) regno_pointer_flag[REGNO]
+
+/* List made of EXPR_LIST rtx's which gives pairs of pseudo registers
+ that have to go in the same hard reg. */
+extern rtx regs_may_share;
+
+/* Vector mapping pseudo regno into the REG rtx for that register.
+ This is computed by reg_scan. */
+
+extern rtx *regno_reg_rtx;
+
+/* Flag set by local-alloc or global-alloc if they decide to allocate
+ something in a call-clobbered register. */
+
+extern int caller_save_needed;
+
+/* Predicate to decide whether to give a hard reg to a pseudo which
+ is referenced REFS times and would need to be saved and restored
+ around a call CALLS times. */
+
+#ifndef CALLER_SAVE_PROFITABLE
+#define CALLER_SAVE_PROFITABLE(REFS, CALLS) (4 * (CALLS) < (REFS))
+#endif
+
+/* Allocated in local_alloc. */
+
+/* A list of SCRATCH rtl allocated by local-alloc. */
+extern rtx *scratch_list;
+/* The basic block in which each SCRATCH is used. */
+extern int *scratch_block;
+/* The length of the arrays pointed to by scratch_block and scratch_list. */
+extern int scratch_list_length;
diff --git a/gnu/usr.bin/cc/include/reload.h b/gnu/usr.bin/cc/include/reload.h
new file mode 100644
index 0000000..4478b6a
--- /dev/null
+++ b/gnu/usr.bin/cc/include/reload.h
@@ -0,0 +1,235 @@
+/* Communication between reload.c and reload1.c.
+ Copyright (C) 1987, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+/* If secondary reloads are the same for inputs and outputs, define those
+ macros here. */
+
+#ifdef SECONDARY_RELOAD_CLASS
+#define SECONDARY_INPUT_RELOAD_CLASS(CLASS, MODE, X) \
+ SECONDARY_RELOAD_CLASS (CLASS, MODE, X)
+#define SECONDARY_OUTPUT_RELOAD_CLASS(CLASS, MODE, X) \
+ SECONDARY_RELOAD_CLASS (CLASS, MODE, X)
+#endif
+
+/* If either macro is defined, show that we need secondary reloads. */
+#if defined(SECONDARY_INPUT_RELOAD_CLASS) || defined(SECONDARY_OUTPUT_RELOAD_CLASS)
+#define HAVE_SECONDARY_RELOADS
+#endif
+
+/* See reload.c and reload1.c for comments on these variables. */
+
+/* Maximum number of reloads we can need. */
+#define MAX_RELOADS (2 * MAX_RECOG_OPERANDS * (MAX_REGS_PER_ADDRESS + 1))
+
+extern rtx reload_in[MAX_RELOADS];
+extern rtx reload_out[MAX_RELOADS];
+extern rtx reload_in_reg[MAX_RELOADS];
+extern enum reg_class reload_reg_class[MAX_RELOADS];
+extern enum machine_mode reload_inmode[MAX_RELOADS];
+extern enum machine_mode reload_outmode[MAX_RELOADS];
+extern char reload_optional[MAX_RELOADS];
+extern int reload_inc[MAX_RELOADS];
+extern int reload_opnum[MAX_RELOADS];
+extern int reload_secondary_p[MAX_RELOADS];
+extern int reload_secondary_in_reload[MAX_RELOADS];
+extern int reload_secondary_out_reload[MAX_RELOADS];
+#ifdef MAX_INSN_CODE
+extern enum insn_code reload_secondary_in_icode[MAX_RELOADS];
+extern enum insn_code reload_secondary_out_icode[MAX_RELOADS];
+#endif
+extern int n_reloads;
+
+extern rtx reload_reg_rtx[MAX_RELOADS];
+
+/* Encode the usage of a reload. The following codes are supported:
+
+ RELOAD_FOR_INPUT reload of an input operand
+ RELOAD_FOR_OUTPUT likewise, for output
+ RELOAD_FOR_INSN a reload that must not conflict with anything
+ used in the insn, but may conflict with
+ something used before or after the insn
+ RELOAD_FOR_INPUT_ADDRESS reload for parts of the address of an object
+ that is an input reload
+ RELOAD_FOR_OUTPUT_ADDRESS likewise, for output reload
+ RELOAD_FOR_OPERAND_ADDRESS reload for the address of a non-reloaded
+ operand; these don't conflict with
+ any other addresses.
+ RELOAD_FOR_OPADDR_ADDR reload needed for RELOAD_FOR_OPERAND_ADDRESS
+ reloads; usually secondary reloads
+ RELOAD_OTHER none of the above, usually multiple uses
+ RELOAD_FOR_OTHER_ADDRESS reload for part of the address of an input
+ that is marked RELOAD_OTHER.
+
+ This used to be "enum reload_when_needed" but some debuggers have trouble
+ with an enum tag and variable of the same name. */
+
+enum reload_type
+{
+ RELOAD_FOR_INPUT, RELOAD_FOR_OUTPUT, RELOAD_FOR_INSN,
+ RELOAD_FOR_INPUT_ADDRESS, RELOAD_FOR_OUTPUT_ADDRESS,
+ RELOAD_FOR_OPERAND_ADDRESS, RELOAD_FOR_OPADDR_ADDR,
+ RELOAD_OTHER, RELOAD_FOR_OTHER_ADDRESS
+};
+
+extern enum reload_type reload_when_needed[MAX_RELOADS];
+
+extern rtx *reg_equiv_constant;
+extern rtx *reg_equiv_memory_loc;
+extern rtx *reg_equiv_address;
+extern rtx *reg_equiv_mem;
+
+/* All the "earlyclobber" operands of the current insn
+ are recorded here. */
+extern int n_earlyclobbers;
+extern rtx reload_earlyclobbers[MAX_RECOG_OPERANDS];
+
+/* Save the number of operands. */
+extern int reload_n_operands;
+
+/* First uid used by insns created by reload in this function.
+ Used in find_equiv_reg. */
+extern int reload_first_uid;
+
+/* Nonzero if indirect addressing is supported when the innermost MEM is
+ of the form (MEM (SYMBOL_REF sym)). It is assumed that the level to
+ which these are valid is the same as spill_indirect_levels, above. */
+
+extern char indirect_symref_ok;
+
+/* Nonzero if an address (plus (reg frame_pointer) (reg ...)) is valid. */
+extern char double_reg_address_ok;
+
+#ifdef MAX_INSN_CODE
+/* These arrays record the insn_code of insns that may be needed to
+ perform input and output reloads of special objects. They provide a
+ place to pass a scratch register. */
+extern enum insn_code reload_in_optab[];
+extern enum insn_code reload_out_optab[];
+#endif
+
+/* Functions from reload.c: */
+
+/* Return a memory location that will be used to copy X in mode MODE.
+ If we haven't already made a location for this mode in this insn,
+ call find_reloads_address on the location being returned. */
+extern rtx get_secondary_mem PROTO((rtx, enum machine_mode,
+ int, enum reload_type));
+
+/* Clear any secondary memory locations we've made. */
+extern void clear_secondary_mem PROTO((void));
+
+/* Transfer all replacements that used to be in reload FROM to be in
+ reload TO. */
+extern void transfer_replacements PROTO((int, int));
+
+/* Return 1 if ADDR is a valid memory address for mode MODE,
+ and check that each pseudo reg has the proper kind of
+ hard reg. */
+extern int strict_memory_address_p PROTO((enum machine_mode, rtx));
+
+/* Like rtx_equal_p except that it allows a REG and a SUBREG to match
+ if they are the same hard reg, and has special hacks for
+ autoincrement and autodecrement. */
+extern int operands_match_p PROTO((rtx, rtx));
+
+/* Return the number of times character C occurs in string S. */
+extern int n_occurrences PROTO((int, char *));
+
+/* Return 1 if altering OP will not modify the value of CLOBBER. */
+extern int safe_from_earlyclobber PROTO((rtx, rtx));
+
+/* Search the body of INSN for values that need reloading and record them
+ with push_reload. REPLACE nonzero means record also where the values occur
+ so that subst_reloads can be used. */
+extern void find_reloads PROTO((rtx, int, int, int, short *));
+
+/* Compute the sum of X and Y, making canonicalizations assumed in an
+ address, namely: sum constant integers, surround the sum of two
+ constants with a CONST, put the constant as the second operand, and
+ group the constant on the outermost sum. */
+extern rtx form_sum PROTO((rtx, rtx));
+
+/* Substitute into the current INSN the registers into which we have reloaded
+ the things that need reloading. */
+extern void subst_reloads PROTO((void));
+
+/* Make a copy of any replacements being done into X and move those copies
+ to locations in Y, a copy of X. We only look at the highest level of
+ the RTL. */
+extern void copy_replacements PROTO((rtx, rtx));
+
+/* If LOC was scheduled to be replaced by something, return the replacement.
+ Otherwise, return *LOC. */
+extern rtx find_replacement PROTO((rtx *));
+
+/* Return nonzero if register in range [REGNO, ENDREGNO)
+ appears either explicitly or implicitly in X
+ other than being stored into. */
+extern int refers_to_regno_for_reload_p PROTO((int, int, rtx, rtx *));
+
+/* Nonzero if modifying X will affect IN. */
+extern int reg_overlap_mentioned_for_reload_p PROTO((rtx, rtx));
+
+/* Return nonzero if anything in X contains a MEM. Look also for pseudo
+ registers. */
+extern int refers_to_mem_for_reload_p PROTO((rtx));
+
+/* Check the insns before INSN to see if there is a suitable register
+ containing the same value as GOAL. */
+extern rtx find_equiv_reg PROTO((rtx, rtx, enum reg_class, int, short *,
+ int, enum machine_mode));
+
+/* Return 1 if register REGNO is the subject of a clobber in insn INSN. */
+extern int regno_clobbered_p PROTO((int, rtx));
+
+
+/* Functions in reload1.c: */
+
+/* Initialize the reload pass once per compilation. */
+extern void init_reload PROTO((void));
+
+/* The reload pass itself. */
+extern int reload STDIO_PROTO((rtx, int, FILE *));
+
+/* Mark the slots in regs_ever_live for the hard regs
+ used by pseudo-reg number REGNO. */
+extern void mark_home_live PROTO((int));
+
+/* Scan X and replace any eliminable registers (such as fp) with a
+ replacement (such as sp), plus an offset. */
+extern rtx eliminate_regs PROTO((rtx, enum machine_mode, rtx));
+
+/* Emit code to perform an input reload of IN to RELOADREG. IN is from
+ operand OPNUM with reload type TYPE. */
+extern rtx gen_input_reload PROTO((rtx, rtx, int, enum reload_type));
+
+/* Functions in caller-save.c: */
+
+/* Initialize for caller-save. */
+extern void init_caller_save PROTO((void));
+
+/* Initialize save areas by showing that we haven't allocated any yet. */
+extern void init_save_areas PROTO((void));
+
+/* Allocate save areas for any hard registers that might need saving. */
+extern int setup_save_areas PROTO((int *));
+
+/* Find the places where hard regs are live across calls and save them. */
+extern void save_call_clobbered_regs PROTO((enum machine_mode));
diff --git a/gnu/usr.bin/cc/include/rtl.def b/gnu/usr.bin/cc/include/rtl.def
new file mode 100644
index 0000000..686ad21
--- /dev/null
+++ b/gnu/usr.bin/cc/include/rtl.def
@@ -0,0 +1,764 @@
+/* This file contains the definitions and documentation for the
+ Register Transfer Expressions (rtx's) that make up the
+ Register Transfer Language (rtl) used in the Back End of the GNU compiler.
+ Copyright (C) 1987, 1988, 1992, 1994 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+/* Expression definitions and descriptions for all targets are in this file.
+ Some will not be used for some targets.
+
+ The fields in the cpp macro call "DEF_RTL_EXPR()"
+ are used to create declarations in the C source of the compiler.
+
+ The fields are:
+
+ 1. The internal name of the rtx used in the C source.
+ It is a tag in the enumeration "enum rtx_code" defined in "rtl.h".
+ By convention these are in UPPER_CASE.
+
+ 2. The name of the rtx in the external ASCII format read by
+ read_rtx(), and printed by print_rtx().
+ These names are stored in rtx_name[].
+ By convention these are the internal (field 1) names in lower_case.
+
+ 3. The print format, and type of each rtx->fld[] (field) in this rtx.
+ These formats are stored in rtx_format[].
+ The meaning of the formats is documented in front of this array in rtl.c
+
+ 4. The class of the rtx. These are stored in rtx_class and are accessed
+ via the GET_RTX_CLASS macro. They are defined as follows:
+
+ "o" an rtx code that can be used to represent an object (e.g, REG, MEM)
+ "<" an rtx code for a comparison (e.g, EQ, NE, LT)
+ "1" an rtx code for a unary arithmetic expression (e.g, NEG, NOT)
+ "c" an rtx code for a commutative binary operation (e.g,, PLUS, MULT)
+ "3" an rtx code for a non-bitfield three input operation (IF_THEN_ELSE)
+ "2" an rtx code for a non-commutative binary operation (e.g., MINUS, DIV)
+ "b" an rtx code for a bit-field operation (ZERO_EXTRACT, SIGN_EXTRACT)
+ "i" an rtx code for a machine insn (INSN, JUMP_INSN, CALL_INSN)
+ "m" an rtx code for something that matches in insns (e.g, MATCH_DUP)
+ "x" everything else
+
+ */
+
+/* ---------------------------------------------------------------------
+ Expressions (and "meta" expressions) used for structuring the
+ rtl representation of a program.
+ --------------------------------------------------------------------- */
+
+/* an expression code name unknown to the reader */
+DEF_RTL_EXPR(UNKNOWN, "UnKnown", "*", 'x')
+
+/* (NIL) is used by rtl reader and printer to represent a null pointer. */
+
+DEF_RTL_EXPR(NIL, "nil", "*", 'x')
+
+/* ---------------------------------------------------------------------
+ Expressions used in constructing lists.
+ --------------------------------------------------------------------- */
+
+/* a linked list of expressions */
+DEF_RTL_EXPR(EXPR_LIST, "expr_list", "ee", 'x')
+
+/* a linked list of instructions.
+ The insns are represented in print by their uids. */
+DEF_RTL_EXPR(INSN_LIST, "insn_list", "ue", 'x')
+
+/* ----------------------------------------------------------------------
+ Expression types for machine descriptions.
+ These do not appear in actual rtl code in the compiler.
+ ---------------------------------------------------------------------- */
+
+/* Appears only in machine descriptions.
+ Means use the function named by the second arg (the string)
+ as a predicate; if matched, store the structure that was matched
+ in the operand table at index specified by the first arg (the integer).
+ If the second arg is the null string, the structure is just stored.
+
+ A third string argument indicates to the register allocator restrictions
+ on where the operand can be allocated.
+
+ If the target needs no restriction on any instruction this field should
+ be the null string.
+
+ The string is prepended by:
+ '=' to indicate the operand is only written to.
+ '+' to indicate the operand is both read and written to.
+
+ Each character in the string represents an allocatable class for an operand.
+ 'g' indicates the operand can be any valid class.
+ 'i' indicates the operand can be immediate (in the instruction) data.
+ 'r' indicates the operand can be in a register.
+ 'm' indicates the operand can be in memory.
+ 'o' a subset of the 'm' class. Those memory addressing modes that
+ can be offset at compile time (have a constant added to them).
+
+ Other characters indicate target dependent operand classes and
+ are described in each target's machine description.
+
+ For instructions with more than one operand, sets of classes can be
+ separated by a comma to indicate the appropriate multi-operand constraints.
+ There must be a 1 to 1 correspondence between these sets of classes in
+ all operands for an instruction.
+ */
+DEF_RTL_EXPR(MATCH_OPERAND, "match_operand", "iss", 'm')
+
+/* Appears only in machine descriptions.
+ Means match a SCRATCH or a register. When used to generate rtl, a
+ SCRATCH is generated. As for MATCH_OPERAND, the mode specifies
+ the desired mode and the first argument is the operand number.
+ The second argument is the constraint. */
+DEF_RTL_EXPR(MATCH_SCRATCH, "match_scratch", "is", 'm')
+
+/* Appears only in machine descriptions.
+ Means match only something equal to what is stored in the operand table
+ at the index specified by the argument. */
+DEF_RTL_EXPR(MATCH_DUP, "match_dup", "i", 'm')
+
+/* Appears only in machine descriptions.
+ Means apply a predicate, AND match recursively the operands of the rtx.
+ Operand 0 is the operand-number, as in match_operand.
+ Operand 1 is a predicate to apply (as a string, a function name).
+ Operand 2 is a vector of expressions, each of which must match
+ one subexpression of the rtx this construct is matching. */
+DEF_RTL_EXPR(MATCH_OPERATOR, "match_operator", "isE", 'm')
+
+/* Appears only in machine descriptions.
+ Means to match a PARALLEL of arbitrary length. The predicate is applied
+ to the PARALLEL and the initial expressions in the PARALLEL are matched.
+ Operand 0 is the operand-number, as in match_operand.
+ Operand 1 is a predicate to apply to the PARALLEL.
+ Operand 2 is a vector of expressions, each of which must match the
+ corresponding element in the PARALLEL. */
+DEF_RTL_EXPR(MATCH_PARALLEL, "match_parallel", "isE", 'm')
+
+/* Appears only in machine descriptions.
+ Means match only something equal to what is stored in the operand table
+ at the index specified by the argument. For MATCH_OPERATOR. */
+DEF_RTL_EXPR(MATCH_OP_DUP, "match_op_dup", "iE", 'm')
+
+/* Appears only in machine descriptions.
+ Means match only something equal to what is stored in the operand table
+ at the index specified by the argument. For MATCH_PARALLEL. */
+DEF_RTL_EXPR(MATCH_PAR_DUP, "match_par_dup", "iE", 'm')
+
+/* Appears only in machine descriptions.
+ Defines the pattern for one kind of instruction.
+ Operand:
+ 0: names this instruction.
+ If the name is the null string, the instruction is in the
+ machine description just to be recognized, and will never be emitted by
+ the tree to rtl expander.
+ 1: is the pattern.
+ 2: is a string which is a C expression
+ giving an additional condition for recognizing this pattern.
+ A null string means no extra condition.
+ 3: is the action to execute if this pattern is matched.
+ If this assembler code template starts with a * then it is a fragment of
+ C code to run to decide on a template to use. Otherwise, it is the
+ template to use.
+ 4: optionally, a vector of attributes for this insn.
+ */
+DEF_RTL_EXPR(DEFINE_INSN, "define_insn", "sEssV", 'x')
+
+/* Definition of a peephole optimization.
+ 1st operand: vector of insn patterns to match
+ 2nd operand: C expression that must be true
+ 3rd operand: template or C code to produce assembler output.
+ 4: optionally, a vector of attributes for this insn.
+ */
+DEF_RTL_EXPR(DEFINE_PEEPHOLE, "define_peephole", "EssV", 'x')
+
+/* Definition of a split operation.
+ 1st operand: insn pattern to match
+ 2nd operand: C expression that must be true
+ 3rd operand: vector of insn patterns to place into a SEQUENCE
+ 4th operand: optionally, some C code to execute before generating the
+ insns. This might, for example, create some RTX's and store them in
+ elements of `recog_operand' for use by the vector of insn-patterns.
+ (`operands' is an alias here for `recog_operand'). */
+DEF_RTL_EXPR(DEFINE_SPLIT, "define_split", "EsES", 'x')
+
+/* Definition of a combiner pattern.
+ Operands not defined yet. */
+DEF_RTL_EXPR(DEFINE_COMBINE, "define_combine", "Ess", 'x')
+
+/* Define how to generate multiple insns for a standard insn name.
+ 1st operand: the insn name.
+ 2nd operand: vector of insn-patterns.
+ Use match_operand to substitute an element of `recog_operand'.
+ 3rd operand: C expression that must be true for this to be available.
+ This may not test any operands.
+ 4th operand: Extra C code to execute before generating the insns.
+ This might, for example, create some RTX's and store them in
+ elements of `recog_operand' for use by the vector of insn-patterns.
+ (`operands' is an alias here for `recog_operand'). */
+DEF_RTL_EXPR(DEFINE_EXPAND, "define_expand", "sEss", 'x')
+
+/* Define a requirement for delay slots.
+ 1st operand: Condition involving insn attributes that, if true,
+ indicates that the insn requires the number of delay slots
+ shown.
+ 2nd operand: Vector whose length is the three times the number of delay
+ slots required.
+ Each entry gives three conditions, each involving attributes.
+ The first must be true for an insn to occupy that delay slot
+ location. The second is true for all insns that can be
+ annulled if the branch is true and the third is true for all
+ insns that can be annulled if the branch is false.
+
+ Multiple DEFINE_DELAYs may be present. They indicate differing
+ requirements for delay slots. */
+DEF_RTL_EXPR(DEFINE_DELAY, "define_delay", "eE", 'x')
+
+/* Define a set of insns that requires a function unit. This means that
+ these insns produce their result after a delay and that there may be
+ restrictions on the number of insns of this type that can be scheduled
+ simultaneously.
+
+ More than one DEFINE_FUNCTION_UNIT can be specified for a function unit.
+ Each gives a set of operations and associated delays. The first three
+ operands must be the same for each operation for the same function unit.
+
+ All delays are specified in cycles.
+
+ 1st operand: Name of function unit (mostly for documentation)
+ 2nd operand: Number of identical function units in CPU
+ 3rd operand: Total number of simultaneous insns that can execute on this
+ function unit; 0 if unlimited.
+ 4th operand: Condition involving insn attribute, that, if true, specifies
+ those insns that this expression applies to.
+ 5th operand: Constant delay after which insn result will be
+ available.
+ 6th operand: Delay until next insn can be scheduled on the function unit
+ executing this operation. The meaning depends on whether or
+ not the next operand is supplied.
+ 7th operand: If this operand is not specified, the 6th operand gives the
+ number of cycles after the instruction matching the 4th
+ operand begins using the function unit until a subsequent
+ insn can begin. A value of zero should be used for a
+ unit with no issue constraints. If only one operation can
+ be executed a time and the unit is busy for the entire time,
+ the 3rd operand should be specified as 1, the 6th operand
+ sould be specified as 0, and the 7th operand should not
+ be specified.
+
+ If this operand is specified, it is a list of attribute
+ expressions. If an insn for which any of these expressions
+ is true is currently executing on the function unit, the
+ issue delay will be given by the 6th operand. Otherwise,
+ the insn can be immediately scheduled (subject to the limit
+ on the number of simultaneous operations executing on the
+ unit.) */
+DEF_RTL_EXPR(DEFINE_FUNCTION_UNIT, "define_function_unit", "siieiiV", 'x')
+
+/* Define attribute computation for `asm' instructions. */
+DEF_RTL_EXPR(DEFINE_ASM_ATTRIBUTES, "define_asm_attributes", "V", 'x' )
+
+/* SEQUENCE appears in the result of a `gen_...' function
+ for a DEFINE_EXPAND that wants to make several insns.
+ Its elements are the bodies of the insns that should be made.
+ `emit_insn' takes the SEQUENCE apart and makes separate insns. */
+DEF_RTL_EXPR(SEQUENCE, "sequence", "E", 'x')
+
+/* Refers to the address of its argument.
+ This appears only in machine descriptions, indicating that
+ any expression that would be acceptable as the operand of MEM
+ should be matched. */
+DEF_RTL_EXPR(ADDRESS, "address", "e", 'm')
+
+/* ----------------------------------------------------------------------
+ Expressions used for insn attributes. These also do not appear in
+ actual rtl code in the compiler.
+ ---------------------------------------------------------------------- */
+
+/* Definition of an insn attribute.
+ 1st operand: name of the attribute
+ 2nd operand: comma-separated list of possible attribute values
+ 3rd operand: expression for the default value of the attribute. */
+DEF_RTL_EXPR(DEFINE_ATTR, "define_attr", "sse", 'x')
+
+/* Marker for the name of an attribute. */
+DEF_RTL_EXPR(ATTR, "attr", "s", 'x')
+
+/* For use in the last (optional) operand of DEFINE_INSN or DEFINE_PEEPHOLE and
+ in DEFINE_ASM_INSN to specify an attribute to assign to insns matching that
+ pattern.
+
+ (set_attr "name" "value") is equivalent to
+ (set (attr "name") (const_string "value")) */
+DEF_RTL_EXPR(SET_ATTR, "set_attr", "ss", 'x')
+
+/* In the last operand of DEFINE_INSN and DEFINE_PEEPHOLE, this can be used to
+ specify that attribute values are to be assigned according to the
+ alternative matched.
+
+ The following three expressions are equivalent:
+
+ (set (attr "att") (cond [(eq_attrq "alternative" "1") (const_string "a1")
+ (eq_attrq "alternative" "2") (const_string "a2")]
+ (const_string "a3")))
+ (set_attr_alternative "att" [(const_string "a1") (const_string "a2")
+ (const_string "a3")])
+ (set_attr "att" "a1,a2,a3")
+ */
+DEF_RTL_EXPR(SET_ATTR_ALTERNATIVE, "set_attr_alternative", "sE", 'x')
+
+/* A conditional expression true if the value of the specified attribute of
+ the current insn equals the specified value. The first operand is the
+ attribute name and the second is the comparison value. */
+DEF_RTL_EXPR(EQ_ATTR, "eq_attr", "ss", 'x')
+
+/* A conditional expression which is true if the specified flag is
+ true for the insn being scheduled in reorg.
+
+ genattr.c defines the following flags which can be tested by
+ (attr_flag "foo") expressions in eligible_for_delay.
+
+ forward, backward, very_likely, likely, very_unlikely, and unlikely. */
+
+DEF_RTL_EXPR (ATTR_FLAG, "attr_flag", "s", 'x')
+
+/* ----------------------------------------------------------------------
+ Expression types used for things in the instruction chain.
+
+ All formats must start with "iuu" to handle the chain.
+ Each insn expression holds an rtl instruction and its semantics
+ during back-end processing.
+ See macros's in "rtl.h" for the meaning of each rtx->fld[].
+
+ ---------------------------------------------------------------------- */
+
+/* An instruction that cannot jump. */
+DEF_RTL_EXPR(INSN, "insn", "iuueiee", 'i')
+
+/* An instruction that can possibly jump.
+ Fields ( rtx->fld[] ) have exact same meaning as INSN's. */
+DEF_RTL_EXPR(JUMP_INSN, "jump_insn", "iuueiee0", 'i')
+
+/* An instruction that can possibly call a subroutine
+ but which will not change which instruction comes next
+ in the current function.
+ Field ( rtx->fld[7] ) is CALL_INSN_FUNCTION_USAGE.
+ All other fields ( rtx->fld[] ) have exact same meaning as INSN's. */
+DEF_RTL_EXPR(CALL_INSN, "call_insn", "iuueieee", 'i')
+
+/* A marker that indicates that control will not flow through. */
+DEF_RTL_EXPR(BARRIER, "barrier", "iuu", 'x')
+
+/* Holds a label that is followed by instructions.
+ Operand:
+ 3: is a number that is unique in the entire compilation.
+ 4: is the user-given name of the label, if any.
+ 5: is used in jump.c for the use-count of the label.
+ and in flow.c to point to the chain of label_ref's to this label. */
+DEF_RTL_EXPR(CODE_LABEL, "code_label", "iuuis0", 'x')
+
+/* Say where in the code a source line starts, for symbol table's sake.
+ Contains a filename and a line number. Line numbers <= 0 are special:
+ 0 is used in a dummy placed at the front of every function
+ just so there will never be a need to delete the first insn;
+ -1 indicates a dummy; insns to be deleted by flow analysis and combining
+ are really changed to NOTEs with a number of -1.
+ -2 means beginning of a name binding contour; output N_LBRAC.
+ -3 means end of a contour; output N_RBRAC. */
+DEF_RTL_EXPR(NOTE, "note", "iuusn", 'x')
+
+/* INLINE_HEADER is use by inline function machinery. The information
+ it contains helps to build the mapping function between the rtx's of
+ the function to be inlined and the current function being expanded. */
+
+DEF_RTL_EXPR(INLINE_HEADER, "inline_header", "iuuuiiiiiieiiEe", 'x')
+
+/* ----------------------------------------------------------------------
+ Top level constituents of INSN, JUMP_INSN and CALL_INSN.
+ ---------------------------------------------------------------------- */
+
+/* Several operations to be done in parallel. */
+DEF_RTL_EXPR(PARALLEL, "parallel", "E", 'x')
+
+/* A string that is passed through to the assembler as input.
+ One can obviously pass comments through by using the
+ assembler comment syntax.
+ These occur in an insn all by themselves as the PATTERN.
+ They also appear inside an ASM_OPERANDS
+ as a convenient way to hold a string. */
+DEF_RTL_EXPR(ASM_INPUT, "asm_input", "s", 'x')
+
+/* An assembler instruction with operands.
+ 1st operand is the instruction template.
+ 2nd operand is the constraint for the output.
+ 3rd operand is the number of the output this expression refers to.
+ When an insn stores more than one value, a separate ASM_OPERANDS
+ is made for each output; this integer distinguishes them.
+ 4th is a vector of values of input operands.
+ 5th is a vector of modes and constraints for the input operands.
+ Each element is an ASM_INPUT containing a constraint string
+ and whose mode indicates the mode of the input operand.
+ 6th is the name of the containing source file.
+ 7th is the source line number. */
+DEF_RTL_EXPR(ASM_OPERANDS, "asm_operands", "ssiEEsi", 'x')
+
+/* A machine-specific operation.
+ 1st operand is a vector of operands being used by the operation so that
+ any needed reloads can be done.
+ 2nd operand is a unique value saying which of a number of machine-specific
+ operations is to be performed.
+ (Note that the vector must be the first operand because of the way that
+ genrecog.c record positions within an insn.)
+ This can occur all by itself in a PATTERN, as a component of a PARALLEL,
+ or inside an expression. */
+DEF_RTL_EXPR(UNSPEC, "unspec", "Ei", 'x')
+
+/* Similar, but a volatile operation and one which may trap. */
+DEF_RTL_EXPR(UNSPEC_VOLATILE, "unspec_volatile", "Ei", 'x')
+
+/* Vector of addresses, stored as full words. */
+/* Each element is a LABEL_REF to a CODE_LABEL whose address we want. */
+DEF_RTL_EXPR(ADDR_VEC, "addr_vec", "E", 'x')
+
+/* Vector of address differences X0 - BASE, X1 - BASE, ...
+ First operand is BASE; the vector contains the X's.
+ The machine mode of this rtx says how much space to leave
+ for each difference. */
+DEF_RTL_EXPR(ADDR_DIFF_VEC, "addr_diff_vec", "eE", 'x')
+
+/* ----------------------------------------------------------------------
+ At the top level of an instruction (perhaps under PARALLEL).
+ ---------------------------------------------------------------------- */
+
+/* Assignment.
+ Operand 1 is the location (REG, MEM, PC, CC0 or whatever) assigned to.
+ Operand 2 is the value stored there.
+ ALL assignment must use SET.
+ Instructions that do multiple assignments must use multiple SET,
+ under PARALLEL. */
+DEF_RTL_EXPR(SET, "set", "ee", 'x')
+
+/* Indicate something is used in a way that we don't want to explain.
+ For example, subroutine calls will use the register
+ in which the static chain is passed. */
+DEF_RTL_EXPR(USE, "use", "e", 'x')
+
+/* Indicate something is clobbered in a way that we don't want to explain.
+ For example, subroutine calls will clobber some physical registers
+ (the ones that are by convention not saved). */
+DEF_RTL_EXPR(CLOBBER, "clobber", "e", 'x')
+
+/* Call a subroutine.
+ Operand 1 is the address to call.
+ Operand 2 is the number of arguments. */
+
+DEF_RTL_EXPR(CALL, "call", "ee", 'x')
+
+/* Return from a subroutine. */
+
+DEF_RTL_EXPR(RETURN, "return", "", 'x')
+
+/* Conditional trap.
+ Operand 1 is the condition.
+ Operand 2 is the trap code.
+ For an unconditional trap, make the condition (const_int 1). */
+DEF_RTL_EXPR(TRAP_IF, "trap_if", "ei", 'x')
+
+/* ----------------------------------------------------------------------
+ Primitive values for use in expressions.
+ ---------------------------------------------------------------------- */
+
+/* numeric integer constant */
+DEF_RTL_EXPR(CONST_INT, "const_int", "w", 'o')
+
+/* numeric double constant.
+ Operand 0 is the MEM that stores this constant in memory,
+ or various other things (see comments at immed_double_const in varasm.c).
+ Operand 1 is a chain of all CONST_DOUBLEs in use in the current function.
+ Remaining operands hold the actual value.
+ The number of operands may be more than 2 if cross-compiling;
+ see init_rtl. */
+DEF_RTL_EXPR(CONST_DOUBLE, "const_double", "e0ww", 'o')
+
+/* String constant. Used only for attributes right now. */
+DEF_RTL_EXPR(CONST_STRING, "const_string", "s", 'o')
+
+/* This is used to encapsulate an expression whose value is constant
+ (such as the sum of a SYMBOL_REF and a CONST_INT) so that it will be
+ recognized as a constant operand rather than by arithmetic instructions. */
+
+DEF_RTL_EXPR(CONST, "const", "e", 'o')
+
+/* program counter. Ordinary jumps are represented
+ by a SET whose first operand is (PC). */
+DEF_RTL_EXPR(PC, "pc", "", 'o')
+
+/* A register. The "operand" is the register number, accessed
+ with the REGNO macro. If this number is less than FIRST_PSEUDO_REGISTER
+ than a hardware register is being referred to. */
+DEF_RTL_EXPR(REG, "reg", "i", 'o')
+
+/* A scratch register. This represents a register used only within a
+ single insn. It will be turned into a REG during register allocation
+ or reload unless the constraint indicates that the register won't be
+ needed, in which case it can remain a SCRATCH. This code is
+ marked as having one operand so it can be turned into a REG. */
+DEF_RTL_EXPR(SCRATCH, "scratch", "0", 'o')
+
+/* One word of a multi-word value.
+ The first operand is the complete value; the second says which word.
+ The WORDS_BIG_ENDIAN flag controls whether word number 0
+ (as numbered in a SUBREG) is the most or least significant word.
+
+ This is also used to refer to a value in a different machine mode.
+ For example, it can be used to refer to a SImode value as if it were
+ Qimode, or vice versa. Then the word number is always 0. */
+DEF_RTL_EXPR(SUBREG, "subreg", "ei", 'x')
+
+/* This one-argument rtx is used for move instructions
+ that are guaranteed to alter only the low part of a destination.
+ Thus, (SET (SUBREG:HI (REG...)) (MEM:HI ...))
+ has an unspecified effect on the high part of REG,
+ but (SET (STRICT_LOW_PART (SUBREG:HI (REG...))) (MEM:HI ...))
+ is guaranteed to alter only the bits of REG that are in HImode.
+
+ The actual instruction used is probably the same in both cases,
+ but the register constraints may be tighter when STRICT_LOW_PART
+ is in use. */
+
+DEF_RTL_EXPR(STRICT_LOW_PART, "strict_low_part", "e", 'x')
+
+/* (CONCAT a b) represents the virtual concatenation of a and b
+ to make a value that has as many bits as a and b put together.
+ This is used for complex values. Normally it appears only
+ in DECL_RTLs and during RTL generation, but not in the insn chain. */
+DEF_RTL_EXPR(CONCAT, "concat", "ee", 'o')
+
+/* A memory location; operand is the address.
+ Can be nested inside a VOLATILE. */
+DEF_RTL_EXPR(MEM, "mem", "e", 'o')
+
+/* Reference to an assembler label in the code for this function.
+ The operand is a CODE_LABEL found in the insn chain.
+ The unprinted fields 1 and 2 are used in flow.c for the
+ LABEL_NEXTREF and CONTAINING_INSN. */
+DEF_RTL_EXPR(LABEL_REF, "label_ref", "u00", 'o')
+
+/* Reference to a named label: the string that is the first operand,
+ with `_' added implicitly in front.
+ Exception: if the first character explicitly given is `*',
+ to give it to the assembler, remove the `*' and do not add `_'. */
+DEF_RTL_EXPR(SYMBOL_REF, "symbol_ref", "s", 'o')
+
+/* The condition code register is represented, in our imagination,
+ as a register holding a value that can be compared to zero.
+ In fact, the machine has already compared them and recorded the
+ results; but instructions that look at the condition code
+ pretend to be looking at the entire value and comparing it. */
+DEF_RTL_EXPR(CC0, "cc0", "", 'o')
+
+/* =====================================================================
+ A QUEUED expression really points to a member of the queue of instructions
+ to be output later for postincrement/postdecrement.
+ QUEUED expressions never become part of instructions.
+ When a QUEUED expression would be put into an instruction,
+ instead either the incremented variable or a copy of its previous
+ value is used.
+
+ Operands are:
+ 0. the variable to be incremented (a REG rtx).
+ 1. the incrementing instruction, or 0 if it hasn't been output yet.
+ 2. A REG rtx for a copy of the old value of the variable, or 0 if none yet.
+ 3. the body to use for the incrementing instruction
+ 4. the next QUEUED expression in the queue.
+ ====================================================================== */
+
+DEF_RTL_EXPR(QUEUED, "queued", "eeeee", 'x')
+
+/* ----------------------------------------------------------------------
+ Expressions for operators in an rtl pattern
+ ---------------------------------------------------------------------- */
+
+/* if_then_else. This is used in representing ordinary
+ conditional jump instructions.
+ Operand:
+ 0: condition
+ 1: then expr
+ 2: else expr */
+DEF_RTL_EXPR(IF_THEN_ELSE, "if_then_else", "eee", '3')
+
+/* General conditional. The first operand is a vector composed of pairs of
+ expressions. The first element of each pair is evaluated, in turn.
+ The value of the conditional is the second expression of the first pair
+ whose first expression evaluates non-zero. If none of the expressions is
+ true, the second operand will be used as the value of the conditional.
+
+ This should be replaced with use of IF_THEN_ELSE. */
+DEF_RTL_EXPR(COND, "cond", "Ee", 'x')
+
+/* Comparison, produces a condition code result. */
+DEF_RTL_EXPR(COMPARE, "compare", "ee", '2')
+
+/* plus */
+DEF_RTL_EXPR(PLUS, "plus", "ee", 'c')
+
+/* Operand 0 minus operand 1. */
+DEF_RTL_EXPR(MINUS, "minus", "ee", '2')
+
+/* Minus operand 0. */
+DEF_RTL_EXPR(NEG, "neg", "e", '1')
+
+DEF_RTL_EXPR(MULT, "mult", "ee", 'c')
+
+/* Operand 0 divided by operand 1. */
+DEF_RTL_EXPR(DIV, "div", "ee", '2')
+/* Remainder of operand 0 divided by operand 1. */
+DEF_RTL_EXPR(MOD, "mod", "ee", '2')
+
+/* Unsigned divide and remainder. */
+DEF_RTL_EXPR(UDIV, "udiv", "ee", '2')
+DEF_RTL_EXPR(UMOD, "umod", "ee", '2')
+
+/* Bitwise operations. */
+DEF_RTL_EXPR(AND, "and", "ee", 'c')
+
+DEF_RTL_EXPR(IOR, "ior", "ee", 'c')
+
+DEF_RTL_EXPR(XOR, "xor", "ee", 'c')
+
+DEF_RTL_EXPR(NOT, "not", "e", '1')
+
+/* Operand:
+ 0: value to be shifted.
+ 1: number of bits. */
+DEF_RTL_EXPR(ASHIFT, "ashift", "ee", '2')
+DEF_RTL_EXPR(ROTATE, "rotate", "ee", '2')
+
+/* Right shift operations, for machines where these are not the same
+ as left shifting with a negative argument. */
+
+DEF_RTL_EXPR(ASHIFTRT, "ashiftrt", "ee", '2')
+DEF_RTL_EXPR(LSHIFTRT, "lshiftrt", "ee", '2')
+DEF_RTL_EXPR(ROTATERT, "rotatert", "ee", '2')
+
+/* Minimum and maximum values of two operands. We need both signed and
+ unsigned forms. (We cannot use MIN for SMIN because it conflicts
+ with a macro of the same name.) */
+
+DEF_RTL_EXPR(SMIN, "smin", "ee", 'c')
+DEF_RTL_EXPR(SMAX, "smax", "ee", 'c')
+DEF_RTL_EXPR(UMIN, "umin", "ee", 'c')
+DEF_RTL_EXPR(UMAX, "umax", "ee", 'c')
+
+/* These unary operations are used to represent incrementation
+ and decrementation as they occur in memory addresses.
+ The amount of increment or decrement are not represented
+ because they can be understood from the machine-mode of the
+ containing MEM. These operations exist in only two cases:
+ 1. pushes onto the stack.
+ 2. created automatically by the life_analysis pass in flow.c. */
+DEF_RTL_EXPR(PRE_DEC, "pre_dec", "e", 'x')
+DEF_RTL_EXPR(PRE_INC, "pre_inc", "e", 'x')
+DEF_RTL_EXPR(POST_DEC, "post_dec", "e", 'x')
+DEF_RTL_EXPR(POST_INC, "post_inc", "e", 'x')
+
+/* Comparison operations. The ordered comparisons exist in two
+ flavors, signed and unsigned. */
+DEF_RTL_EXPR(NE, "ne", "ee", '<')
+DEF_RTL_EXPR(EQ, "eq", "ee", '<')
+DEF_RTL_EXPR(GE, "ge", "ee", '<')
+DEF_RTL_EXPR(GT, "gt", "ee", '<')
+DEF_RTL_EXPR(LE, "le", "ee", '<')
+DEF_RTL_EXPR(LT, "lt", "ee", '<')
+DEF_RTL_EXPR(GEU, "geu", "ee", '<')
+DEF_RTL_EXPR(GTU, "gtu", "ee", '<')
+DEF_RTL_EXPR(LEU, "leu", "ee", '<')
+DEF_RTL_EXPR(LTU, "ltu", "ee", '<')
+
+/* Represents the result of sign-extending the sole operand.
+ The machine modes of the operand and of the SIGN_EXTEND expression
+ determine how much sign-extension is going on. */
+DEF_RTL_EXPR(SIGN_EXTEND, "sign_extend", "e", '1')
+
+/* Similar for zero-extension (such as unsigned short to int). */
+DEF_RTL_EXPR(ZERO_EXTEND, "zero_extend", "e", '1')
+
+/* Similar but here the operand has a wider mode. */
+DEF_RTL_EXPR(TRUNCATE, "truncate", "e", '1')
+
+/* Similar for extending floating-point values (such as SFmode to DFmode). */
+DEF_RTL_EXPR(FLOAT_EXTEND, "float_extend", "e", '1')
+DEF_RTL_EXPR(FLOAT_TRUNCATE, "float_truncate", "e", '1')
+
+/* Conversion of fixed point operand to floating point value. */
+DEF_RTL_EXPR(FLOAT, "float", "e", '1')
+
+/* With fixed-point machine mode:
+ Conversion of floating point operand to fixed point value.
+ Value is defined only when the operand's value is an integer.
+ With floating-point machine mode (and operand with same mode):
+ Operand is rounded toward zero to produce an integer value
+ represented in floating point. */
+DEF_RTL_EXPR(FIX, "fix", "e", '1')
+
+/* Conversion of unsigned fixed point operand to floating point value. */
+DEF_RTL_EXPR(UNSIGNED_FLOAT, "unsigned_float", "e", '1')
+
+/* With fixed-point machine mode:
+ Conversion of floating point operand to *unsigned* fixed point value.
+ Value is defined only when the operand's value is an integer. */
+DEF_RTL_EXPR(UNSIGNED_FIX, "unsigned_fix", "e", '1')
+
+/* Absolute value */
+DEF_RTL_EXPR(ABS, "abs", "e", '1')
+
+/* Square root */
+DEF_RTL_EXPR(SQRT, "sqrt", "e", '1')
+
+/* Find first bit that is set.
+ Value is 1 + number of trailing zeros in the arg.,
+ or 0 if arg is 0. */
+DEF_RTL_EXPR(FFS, "ffs", "e", '1')
+
+/* Reference to a signed bit-field of specified size and position.
+ Operand 0 is the memory unit (usually SImode or QImode) which
+ contains the field's first bit. Operand 1 is the width, in bits.
+ Operand 2 is the number of bits in the memory unit before the
+ first bit of this field.
+ If BITS_BIG_ENDIAN is defined, the first bit is the msb and
+ operand 2 counts from the msb of the memory unit.
+ Otherwise, the first bit is the lsb and operand 2 counts from
+ the lsb of the memory unit. */
+DEF_RTL_EXPR(SIGN_EXTRACT, "sign_extract", "eee", 'b')
+
+/* Similar for unsigned bit-field. */
+DEF_RTL_EXPR(ZERO_EXTRACT, "zero_extract", "eee", 'b')
+
+/* For RISC machines. These save memory when splitting insns. */
+
+/* HIGH are the high-order bits of a constant expression. */
+DEF_RTL_EXPR(HIGH, "high", "e", 'o')
+
+/* LO_SUM is the sum of a register and the low-order bits
+ of a constant expression. */
+DEF_RTL_EXPR(LO_SUM, "lo_sum", "ee", 'o')
+
+/*
+Local variables:
+mode:c
+version-control: t
+End:
+*/
diff --git a/gnu/usr.bin/cc/include/rtl.h b/gnu/usr.bin/cc/include/rtl.h
new file mode 100644
index 0000000..b0eb1c52
--- /dev/null
+++ b/gnu/usr.bin/cc/include/rtl.h
@@ -0,0 +1,957 @@
+/* Register Transfer Language (RTL) definitions for GNU C-Compiler
+ Copyright (C) 1987, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+#include "machmode.h"
+
+#undef FFS /* Some systems predefine this symbol; don't let it interfere. */
+#undef FLOAT /* Likewise. */
+#undef ABS /* Likewise. */
+#undef PC /* Likewise. */
+
+#ifndef TREE_CODE
+union tree_node;
+#endif
+
+/* Register Transfer Language EXPRESSIONS CODES */
+
+#define RTX_CODE enum rtx_code
+enum rtx_code {
+
+#define DEF_RTL_EXPR(ENUM, NAME, FORMAT, CLASS) ENUM ,
+#include "rtl.def" /* rtl expressions are documented here */
+#undef DEF_RTL_EXPR
+
+ LAST_AND_UNUSED_RTX_CODE}; /* A convenient way to get a value for
+ NUM_RTX_CODE.
+ Assumes default enum value assignment. */
+
+#define NUM_RTX_CODE ((int)LAST_AND_UNUSED_RTX_CODE)
+ /* The cast here, saves many elsewhere. */
+
+extern int rtx_length[];
+#define GET_RTX_LENGTH(CODE) (rtx_length[(int)(CODE)])
+
+extern char *rtx_name[];
+#define GET_RTX_NAME(CODE) (rtx_name[(int)(CODE)])
+
+extern char *rtx_format[];
+#define GET_RTX_FORMAT(CODE) (rtx_format[(int)(CODE)])
+
+extern char rtx_class[];
+#define GET_RTX_CLASS(CODE) (rtx_class[(int)(CODE)])
+
+/* Common union for an element of an rtx. */
+
+typedef union rtunion_def
+{
+ HOST_WIDE_INT rtwint;
+ int rtint;
+ char *rtstr;
+ struct rtx_def *rtx;
+ struct rtvec_def *rtvec;
+ enum machine_mode rttype;
+} rtunion;
+
+/* RTL expression ("rtx"). */
+
+typedef struct rtx_def
+{
+#ifdef ONLY_INT_FIELDS
+#ifdef CODE_FIELD_BUG
+ unsigned int code : 16;
+#else
+ unsigned short code;
+#endif
+#else
+ /* The kind of expression this is. */
+ enum rtx_code code : 16;
+#endif
+ /* The kind of value the expression has. */
+#ifdef ONLY_INT_FIELDS
+ int mode : 8;
+#else
+ enum machine_mode mode : 8;
+#endif
+ /* 1 in an INSN if it can alter flow of control
+ within this function. Not yet used! */
+ unsigned int jump : 1;
+ /* 1 in an INSN if it can call another function. Not yet used! */
+ unsigned int call : 1;
+ /* 1 in a MEM or REG if value of this expression will never change
+ during the current function, even though it is not
+ manifestly constant.
+ 1 in a SUBREG if it is from a promoted variable that is unsigned.
+ 1 in a SYMBOL_REF if it addresses something in the per-function
+ constants pool.
+ 1 in a CALL_INSN if it is a const call.
+ 1 in a JUMP_INSN if it is a branch that should be annulled. Valid from
+ reorg until end of compilation; cleared before used. */
+ unsigned int unchanging : 1;
+ /* 1 in a MEM expression if contents of memory are volatile.
+ 1 in an INSN, CALL_INSN, JUMP_INSN, CODE_LABEL or BARRIER
+ if it is deleted.
+ 1 in a REG expression if corresponds to a variable declared by the user.
+ 0 for an internally generated temporary.
+ In a SYMBOL_REF, this flag is used for machine-specific purposes.
+ In a LABEL_REF or in a REG_LABEL note, this is LABEL_REF_NONLOCAL_P. */
+ unsigned int volatil : 1;
+ /* 1 in a MEM referring to a field of a structure (not a union!).
+ 0 if the MEM was a variable or the result of a * operator in C;
+ 1 if it was the result of a . or -> operator (on a struct) in C.
+ 1 in a REG if the register is used only in exit code a loop.
+ 1 in a SUBREG expression if was generated from a variable with a
+ promoted mode.
+ 1 in a CODE_LABEL if the label is used for nonlocal gotos
+ and must not be deleted even if its count is zero.
+ 1 in a LABEL_REF if this is a reference to a label outside the
+ current loop.
+ 1 in an INSN, JUMP_INSN, or CALL_INSN if this insn must be scheduled
+ together with the preceding insn. Valid only within sched.
+ 1 in an INSN, JUMP_INSN, or CALL_INSN if insn is in a delay slot and
+ from the target of a branch. Valid from reorg until end of compilation;
+ cleared before used. */
+ unsigned int in_struct : 1;
+ /* 1 if this rtx is used. This is used for copying shared structure.
+ See `unshare_all_rtl'.
+ In a REG, this is not needed for that purpose, and used instead
+ in `leaf_renumber_regs_insn'.
+ In a SYMBOL_REF, means that emit_library_call
+ has used it as the function. */
+ unsigned int used : 1;
+ /* Nonzero if this rtx came from procedure integration.
+ In a REG, nonzero means this reg refers to the return value
+ of the current function. */
+ unsigned integrated : 1;
+ /* The first element of the operands of this rtx.
+ The number of operands and their types are controlled
+ by the `code' field, according to rtl.def. */
+ rtunion fld[1];
+} *rtx;
+
+
+/* Add prototype support. */
+#ifndef PROTO
+#if defined (USE_PROTOTYPES) ? USE_PROTOTYPES : defined (__STDC__)
+#define PROTO(ARGS) ARGS
+#else
+#define PROTO(ARGS) ()
+#endif
+#endif
+
+#ifndef VPROTO
+#ifdef __STDC__
+#define PVPROTO(ARGS) ARGS
+#define VPROTO(ARGS) ARGS
+#define VA_START(va_list,var) va_start(va_list,var)
+#else
+#define PVPROTO(ARGS) ()
+#define VPROTO(ARGS) (va_alist) va_dcl
+#define VA_START(va_list,var) va_start(va_list)
+#endif
+#endif
+
+#ifndef STDIO_PROTO
+#ifdef BUFSIZ
+#define STDIO_PROTO(ARGS) PROTO(ARGS)
+#else
+#define STDIO_PROTO(ARGS) ()
+#endif
+#endif
+
+#define NULL_RTX (rtx) 0
+
+/* Define a generic NULL if one hasn't already been defined. */
+
+#ifndef NULL
+#define NULL 0
+#endif
+
+#ifndef GENERIC_PTR
+#if defined (USE_PROTOTYPES) ? USE_PROTOTYPES : defined (__STDC__)
+#define GENERIC_PTR void *
+#else
+#define GENERIC_PTR char *
+#endif
+#endif
+
+#ifndef NULL_PTR
+#define NULL_PTR ((GENERIC_PTR)0)
+#endif
+
+/* Define macros to access the `code' field of the rtx. */
+
+#ifdef SHORT_ENUM_BUG
+#define GET_CODE(RTX) ((enum rtx_code) ((RTX)->code))
+#define PUT_CODE(RTX, CODE) ((RTX)->code = ((short) (CODE)))
+#else
+#define GET_CODE(RTX) ((RTX)->code)
+#define PUT_CODE(RTX, CODE) ((RTX)->code = (CODE))
+#endif
+
+#define GET_MODE(RTX) ((RTX)->mode)
+#define PUT_MODE(RTX, MODE) ((RTX)->mode = (MODE))
+
+#define RTX_INTEGRATED_P(RTX) ((RTX)->integrated)
+#define RTX_UNCHANGING_P(RTX) ((RTX)->unchanging)
+
+/* RTL vector. These appear inside RTX's when there is a need
+ for a variable number of things. The principle use is inside
+ PARALLEL expressions. */
+
+typedef struct rtvec_def{
+ unsigned num_elem; /* number of elements */
+ rtunion elem[1];
+} *rtvec;
+
+#define NULL_RTVEC (rtvec) 0
+
+#define GET_NUM_ELEM(RTVEC) ((RTVEC)->num_elem)
+#define PUT_NUM_ELEM(RTVEC, NUM) ((RTVEC)->num_elem = (unsigned) NUM)
+
+#define RTVEC_ELT(RTVEC, I) ((RTVEC)->elem[(I)].rtx)
+
+/* 1 if X is a REG. */
+
+#define REG_P(X) (GET_CODE (X) == REG)
+
+/* 1 if X is a constant value that is an integer. */
+
+#define CONSTANT_P(X) \
+ (GET_CODE (X) == LABEL_REF || GET_CODE (X) == SYMBOL_REF \
+ || GET_CODE (X) == CONST_INT || GET_CODE (X) == CONST_DOUBLE \
+ || GET_CODE (X) == CONST || GET_CODE (X) == HIGH)
+
+/* General accessor macros for accessing the fields of an rtx. */
+
+#define XEXP(RTX, N) ((RTX)->fld[N].rtx)
+#define XINT(RTX, N) ((RTX)->fld[N].rtint)
+#define XWINT(RTX, N) ((RTX)->fld[N].rtwint)
+#define XSTR(RTX, N) ((RTX)->fld[N].rtstr)
+#define XVEC(RTX, N) ((RTX)->fld[N].rtvec)
+#define XVECLEN(RTX, N) ((RTX)->fld[N].rtvec->num_elem)
+#define XVECEXP(RTX,N,M)((RTX)->fld[N].rtvec->elem[M].rtx)
+
+/* ACCESS MACROS for particular fields of insns. */
+
+/* Holds a unique number for each insn.
+ These are not necessarily sequentially increasing. */
+#define INSN_UID(INSN) ((INSN)->fld[0].rtint)
+
+/* Chain insns together in sequence. */
+#define PREV_INSN(INSN) ((INSN)->fld[1].rtx)
+#define NEXT_INSN(INSN) ((INSN)->fld[2].rtx)
+
+/* The body of an insn. */
+#define PATTERN(INSN) ((INSN)->fld[3].rtx)
+
+/* Code number of instruction, from when it was recognized.
+ -1 means this instruction has not been recognized yet. */
+#define INSN_CODE(INSN) ((INSN)->fld[4].rtint)
+
+/* Set up in flow.c; empty before then.
+ Holds a chain of INSN_LIST rtx's whose first operands point at
+ previous insns with direct data-flow connections to this one.
+ That means that those insns set variables whose next use is in this insn.
+ They are always in the same basic block as this insn. */
+#define LOG_LINKS(INSN) ((INSN)->fld[5].rtx)
+
+/* 1 if insn has been deleted. */
+#define INSN_DELETED_P(INSN) ((INSN)->volatil)
+
+/* 1 if insn is a call to a const function. */
+#define CONST_CALL_P(INSN) ((INSN)->unchanging)
+
+/* 1 if insn is a branch that should not unconditionally execute its
+ delay slots, i.e., it is an annulled branch. */
+#define INSN_ANNULLED_BRANCH_P(INSN) ((INSN)->unchanging)
+
+/* 1 if insn is in a delay slot and is from the target of the branch. If
+ the branch insn has INSN_ANNULLED_BRANCH_P set, this insn should only be
+ executed if the branch is taken. For annulled branches with this bit
+ clear, the insn should be executed only if the branch is not taken. */
+#define INSN_FROM_TARGET_P(INSN) ((INSN)->in_struct)
+
+/* Holds a list of notes on what this insn does to various REGs.
+ It is a chain of EXPR_LIST rtx's, where the second operand
+ is the chain pointer and the first operand is the REG being described.
+ The mode field of the EXPR_LIST contains not a real machine mode
+ but a value that says what this note says about the REG:
+ REG_DEAD means that the value in REG dies in this insn (i.e., it is
+ not needed past this insn). If REG is set in this insn, the REG_DEAD
+ note may, but need not, be omitted.
+ REG_INC means that the REG is autoincremented or autodecremented.
+ REG_EQUIV describes the insn as a whole; it says that the
+ insn sets a register to a constant value or to be equivalent to
+ a memory address. If the
+ register is spilled to the stack then the constant value
+ should be substituted for it. The contents of the REG_EQUIV
+ is the constant value or memory address, which may be different
+ from the source of the SET although it has the same value.
+ REG_EQUAL is like REG_EQUIV except that the destination
+ is only momentarily equal to the specified rtx. Therefore, it
+ cannot be used for substitution; but it can be used for cse.
+ REG_RETVAL means that this insn copies the return-value of
+ a library call out of the hard reg for return values. This note
+ is actually an INSN_LIST and it points to the first insn involved
+ in setting up arguments for the call. flow.c uses this to delete
+ the entire library call when its result is dead.
+ REG_LIBCALL is the inverse of REG_RETVAL: it goes on the first insn
+ of the library call and points at the one that has the REG_RETVAL.
+ REG_WAS_0 says that the register set in this insn held 0 before the insn.
+ The contents of the note is the insn that stored the 0.
+ If that insn is deleted or patched to a NOTE, the REG_WAS_0 is inoperative.
+ The REG_WAS_0 note is actually an INSN_LIST, not an EXPR_LIST.
+ REG_NONNEG means that the register is always nonnegative during
+ the containing loop. This is used in branches so that decrement and
+ branch instructions terminating on zero can be matched. There must be
+ an insn pattern in the md file named `decrement_and_branch_until_zero'
+ or else this will never be added to any instructions.
+ REG_NO_CONFLICT means there is no conflict *after this insn*
+ between the register in the note and the destination of this insn.
+ REG_UNUSED identifies a register set in this insn and never used.
+ REG_CC_SETTER and REG_CC_USER link a pair of insns that set and use
+ CC0, respectively. Normally, these are required to be consecutive insns,
+ but we permit putting a cc0-setting insn in the delay slot of a branch
+ as long as only one copy of the insn exists. In that case, these notes
+ point from one to the other to allow code generation to determine what
+ any require information and to properly update CC_STATUS.
+ REG_LABEL points to a CODE_LABEL. Used by non-JUMP_INSNs to
+ say that the CODE_LABEL contained in the REG_LABEL note is used
+ by the insn.
+ REG_DEP_ANTI is used in LOG_LINKS which represent anti (write after read)
+ dependencies. REG_DEP_OUTPUT is used in LOG_LINKS which represent output
+ (write after write) dependencies. Data dependencies, which are the only
+ type of LOG_LINK created by flow, are represented by a 0 reg note kind. */
+
+#define REG_NOTES(INSN) ((INSN)->fld[6].rtx)
+
+/* Don't forget to change reg_note_name in rtl.c. */
+enum reg_note { REG_DEAD = 1, REG_INC = 2, REG_EQUIV = 3, REG_WAS_0 = 4,
+ REG_EQUAL = 5, REG_RETVAL = 6, REG_LIBCALL = 7,
+ REG_NONNEG = 8, REG_NO_CONFLICT = 9, REG_UNUSED = 10,
+ REG_CC_SETTER = 11, REG_CC_USER = 12, REG_LABEL = 13,
+ REG_DEP_ANTI = 14, REG_DEP_OUTPUT = 15 };
+
+/* Define macros to extract and insert the reg-note kind in an EXPR_LIST. */
+#define REG_NOTE_KIND(LINK) ((enum reg_note) GET_MODE (LINK))
+#define PUT_REG_NOTE_KIND(LINK,KIND) PUT_MODE(LINK, (enum machine_mode) (KIND))
+
+/* Names for REG_NOTE's in EXPR_LIST insn's. */
+
+extern char *reg_note_name[];
+#define GET_REG_NOTE_NAME(MODE) (reg_note_name[(int)(MODE)])
+
+/* This field is only present on CALL_INSNs. It holds a chain of EXPR_LIST of
+ USE and CLOBBER expressions.
+ USE expressions list the registers filled with arguments that
+ are passed to the function.
+ CLOBBER expressions document the registers explicitly clobbered
+ by this CALL_INSN.
+ Pseudo registers can not be mentioned in this list. */
+#define CALL_INSN_FUNCTION_USAGE(INSN) ((INSN)->fld[7].rtx)
+
+/* The label-number of a code-label. The assembler label
+ is made from `L' and the label-number printed in decimal.
+ Label numbers are unique in a compilation. */
+#define CODE_LABEL_NUMBER(INSN) ((INSN)->fld[3].rtint)
+
+#define LINE_NUMBER NOTE
+
+/* In a NOTE that is a line number, this is a string for the file name
+ that the line is in. We use the same field to record block numbers
+ temporarily in NOTE_INSN_BLOCK_BEG and NOTE_INSN_BLOCK_END notes.
+ (We avoid lots of casts between ints and pointers if we use a
+ different macro for the bock number.) */
+
+#define NOTE_SOURCE_FILE(INSN) ((INSN)->fld[3].rtstr)
+#define NOTE_BLOCK_NUMBER(INSN) ((INSN)->fld[3].rtint)
+
+/* In a NOTE that is a line number, this is the line number.
+ Other kinds of NOTEs are identified by negative numbers here. */
+#define NOTE_LINE_NUMBER(INSN) ((INSN)->fld[4].rtint)
+
+/* Codes that appear in the NOTE_LINE_NUMBER field
+ for kinds of notes that are not line numbers.
+
+ Notice that we do not try to use zero here for any of
+ the special note codes because sometimes the source line
+ actually can be zero! This happens (for example) when we
+ are generating code for the per-translation-unit constructor
+ and destructor routines for some C++ translation unit.
+
+ If you should change any of the following values, or if you
+ should add a new value here, don't forget to change the
+ note_insn_name array in rtl.c. */
+
+/* This note is used to get rid of an insn
+ when it isn't safe to patch the insn out of the chain. */
+#define NOTE_INSN_DELETED -1
+#define NOTE_INSN_BLOCK_BEG -2
+#define NOTE_INSN_BLOCK_END -3
+#define NOTE_INSN_LOOP_BEG -4
+#define NOTE_INSN_LOOP_END -5
+/* This kind of note is generated at the end of the function body,
+ just before the return insn or return label.
+ In an optimizing compilation it is deleted by the first jump optimization,
+ after enabling that optimizer to determine whether control can fall
+ off the end of the function body without a return statement. */
+#define NOTE_INSN_FUNCTION_END -6
+/* This kind of note is generated just after each call to `setjmp', et al. */
+#define NOTE_INSN_SETJMP -7
+/* Generated at the place in a loop that `continue' jumps to. */
+#define NOTE_INSN_LOOP_CONT -8
+/* Generated at the start of a duplicated exit test. */
+#define NOTE_INSN_LOOP_VTOP -9
+/* This marks the point immediately after the last prologue insn. */
+#define NOTE_INSN_PROLOGUE_END -10
+/* This marks the point immediately prior to the first epilogue insn. */
+#define NOTE_INSN_EPILOGUE_BEG -11
+/* Generated in place of user-declared labels when they are deleted. */
+#define NOTE_INSN_DELETED_LABEL -12
+/* This note indicates the start of the real body of the function,
+ i.e. the point just after all of the parms have been moved into
+ their homes, etc. */
+#define NOTE_INSN_FUNCTION_BEG -13
+
+
+#if 0 /* These are not used, and I don't know what they were for. --rms. */
+#define NOTE_DECL_NAME(INSN) ((INSN)->fld[3].rtstr)
+#define NOTE_DECL_CODE(INSN) ((INSN)->fld[4].rtint)
+#define NOTE_DECL_RTL(INSN) ((INSN)->fld[5].rtx)
+#define NOTE_DECL_IDENTIFIER(INSN) ((INSN)->fld[6].rtint)
+#define NOTE_DECL_TYPE(INSN) ((INSN)->fld[7].rtint)
+#endif /* 0 */
+
+/* Names for NOTE insn's other than line numbers. */
+
+extern char *note_insn_name[];
+#define GET_NOTE_INSN_NAME(NOTE_CODE) (note_insn_name[-(NOTE_CODE)])
+
+/* The name of a label, in case it corresponds to an explicit label
+ in the input source code. */
+#define LABEL_NAME(LABEL) ((LABEL)->fld[4].rtstr)
+
+/* In jump.c, each label contains a count of the number
+ of LABEL_REFs that point at it, so unused labels can be deleted. */
+#define LABEL_NUSES(LABEL) ((LABEL)->fld[5].rtint)
+
+/* The rest is used instead of the above, in a CODE_LABEL,
+ if bytecode is being output.
+ We make the slightly klugy assumption that a LABEL has enough slots
+ to hold these things. That happens to be true. */
+
+/* For static or external objects. */
+#define BYTECODE_LABEL(X) (XEXP ((X), 0))
+
+/* For goto labels inside bytecode functions. */
+#define BYTECODE_BC_LABEL(X) (*(struct bc_label **) &XEXP ((X), 1))
+
+/* In jump.c, each JUMP_INSN can point to a label that it can jump to,
+ so that if the JUMP_INSN is deleted, the label's LABEL_NUSES can
+ be decremented and possibly the label can be deleted. */
+#define JUMP_LABEL(INSN) ((INSN)->fld[7].rtx)
+
+/* Once basic blocks are found in flow.c,
+ each CODE_LABEL starts a chain that goes through
+ all the LABEL_REFs that jump to that label.
+ The chain eventually winds up at the CODE_LABEL; it is circular. */
+#define LABEL_REFS(LABEL) ((LABEL)->fld[5].rtx)
+
+/* This is the field in the LABEL_REF through which the circular chain
+ of references to a particular label is linked.
+ This chain is set up in flow.c. */
+
+#define LABEL_NEXTREF(REF) ((REF)->fld[1].rtx)
+
+/* Once basic blocks are found in flow.c,
+ Each LABEL_REF points to its containing instruction with this field. */
+
+#define CONTAINING_INSN(RTX) ((RTX)->fld[2].rtx)
+
+/* For a REG rtx, REGNO extracts the register number. */
+
+#define REGNO(RTX) ((RTX)->fld[0].rtint)
+
+/* For a REG rtx, REG_FUNCTION_VALUE_P is nonzero if the reg
+ is the current function's return value. */
+
+#define REG_FUNCTION_VALUE_P(RTX) ((RTX)->integrated)
+
+/* 1 in a REG rtx if it corresponds to a variable declared by the user. */
+#define REG_USERVAR_P(RTX) ((RTX)->volatil)
+
+/* For a CONST_INT rtx, INTVAL extracts the integer. */
+
+#define INTVAL(RTX) ((RTX)->fld[0].rtwint)
+
+/* For a SUBREG rtx, SUBREG_REG extracts the value we want a subreg of.
+ SUBREG_WORD extracts the word-number. */
+
+#define SUBREG_REG(RTX) ((RTX)->fld[0].rtx)
+#define SUBREG_WORD(RTX) ((RTX)->fld[1].rtint)
+
+/* 1 if the REG contained in SUBREG_REG is already known to be
+ sign- or zero-extended from the mode of the SUBREG to the mode of
+ the reg. SUBREG_PROMOTED_UNSIGNED_P gives the signedness of the
+ extension.
+
+ When used as a LHS, is means that this extension must be done
+ when assigning to SUBREG_REG. */
+
+#define SUBREG_PROMOTED_VAR_P(RTX) ((RTX)->in_struct)
+#define SUBREG_PROMOTED_UNSIGNED_P(RTX) ((RTX)->unchanging)
+
+/* Access various components of an ASM_OPERANDS rtx. */
+
+#define ASM_OPERANDS_TEMPLATE(RTX) XSTR ((RTX), 0)
+#define ASM_OPERANDS_OUTPUT_CONSTRAINT(RTX) XSTR ((RTX), 1)
+#define ASM_OPERANDS_OUTPUT_IDX(RTX) XINT ((RTX), 2)
+#define ASM_OPERANDS_INPUT_VEC(RTX) XVEC ((RTX), 3)
+#define ASM_OPERANDS_INPUT_CONSTRAINT_VEC(RTX) XVEC ((RTX), 4)
+#define ASM_OPERANDS_INPUT(RTX, N) XVECEXP ((RTX), 3, (N))
+#define ASM_OPERANDS_INPUT_LENGTH(RTX) XVECLEN ((RTX), 3)
+#define ASM_OPERANDS_INPUT_CONSTRAINT(RTX, N) XSTR (XVECEXP ((RTX), 4, (N)), 0)
+#define ASM_OPERANDS_INPUT_MODE(RTX, N) GET_MODE (XVECEXP ((RTX), 4, (N)))
+#define ASM_OPERANDS_SOURCE_FILE(RTX) XSTR ((RTX), 5)
+#define ASM_OPERANDS_SOURCE_LINE(RTX) XINT ((RTX), 6)
+
+/* For a MEM rtx, 1 if it's a volatile reference.
+ Also in an ASM_OPERANDS rtx. */
+#define MEM_VOLATILE_P(RTX) ((RTX)->volatil)
+
+/* For a MEM rtx, 1 if it refers to a structure or union component. */
+#define MEM_IN_STRUCT_P(RTX) ((RTX)->in_struct)
+
+/* For a LABEL_REF, 1 means that this reference is to a label outside the
+ loop containing the reference. */
+#define LABEL_OUTSIDE_LOOP_P(RTX) ((RTX)->in_struct)
+
+/* For a LABEL_REF, 1 means it is for a nonlocal label. */
+/* Likewise in an EXPR_LIST for a REG_LABEL note. */
+#define LABEL_REF_NONLOCAL_P(RTX) ((RTX)->volatil)
+
+/* For a CODE_LABEL, 1 means always consider this label to be needed. */
+#define LABEL_PRESERVE_P(RTX) ((RTX)->in_struct)
+
+/* For a REG, 1 means the register is used only in an exit test of a loop. */
+#define REG_LOOP_TEST_P(RTX) ((RTX)->in_struct)
+
+/* During sched, for an insn, 1 means that the insn must be scheduled together
+ with the preceding insn. */
+#define SCHED_GROUP_P(INSN) ((INSN)->in_struct)
+
+/* During sched, for the LOG_LINKS of an insn, these cache the adjusted
+ cost of the dependence link. The cost of executing an instruction
+ may vary based on how the results are used. LINK_COST_ZERO is 1 when
+ the cost through the link varies and is unchanged (i.e., the link has
+ zero additional cost). LINK_COST_FREE is 1 when the cost through the
+ link is zero (i.e., the link makes the cost free). In other cases,
+ the adjustment to the cost is recomputed each time it is needed. */
+#define LINK_COST_ZERO(X) ((X)->jump)
+#define LINK_COST_FREE(X) ((X)->call)
+
+/* For a SET rtx, SET_DEST is the place that is set
+ and SET_SRC is the value it is set to. */
+#define SET_DEST(RTX) ((RTX)->fld[0].rtx)
+#define SET_SRC(RTX) ((RTX)->fld[1].rtx)
+
+/* For a TRAP_IF rtx, TRAP_CONDITION is an expression. */
+#define TRAP_CONDITION(RTX) ((RTX)->fld[0].rtx)
+
+/* 1 in a SYMBOL_REF if it addresses this function's constants pool. */
+#define CONSTANT_POOL_ADDRESS_P(RTX) ((RTX)->unchanging)
+
+/* Flag in a SYMBOL_REF for machine-specific purposes. */
+#define SYMBOL_REF_FLAG(RTX) ((RTX)->volatil)
+
+/* 1 means a SYMBOL_REF has been the library function in emit_library_call. */
+#define SYMBOL_REF_USED(RTX) ((RTX)->used)
+
+/* For an INLINE_HEADER rtx, FIRST_FUNCTION_INSN is the first insn
+ of the function that is not involved in copying parameters to
+ pseudo-registers. FIRST_PARM_INSN is the very first insn of
+ the function, including the parameter copying.
+ We keep this around in case we must splice
+ this function into the assembly code at the end of the file.
+ FIRST_LABELNO is the first label number used by the function (inclusive).
+ LAST_LABELNO is the last label used by the function (exclusive).
+ MAX_REGNUM is the largest pseudo-register used by that function.
+ FUNCTION_ARGS_SIZE is the size of the argument block in the stack.
+ POPS_ARGS is the number of bytes of input arguments popped by the function
+ STACK_SLOT_LIST is the list of stack slots.
+ FUNCTION_FLAGS are where single-bit flags are saved.
+ OUTGOING_ARGS_SIZE is the size of the largest outgoing stack parameter list.
+ ORIGINAL_ARG_VECTOR is a vector of the original DECL_RTX values
+ for the function arguments.
+ ORIGINAL_DECL_INITIAL is a pointer to the original DECL_INITIAL for the
+ function.
+
+ We want this to lay down like an INSN. The PREV_INSN field
+ is always NULL. The NEXT_INSN field always points to the
+ first function insn of the function being squirreled away. */
+
+#define FIRST_FUNCTION_INSN(RTX) ((RTX)->fld[2].rtx)
+#define FIRST_PARM_INSN(RTX) ((RTX)->fld[3].rtx)
+#define FIRST_LABELNO(RTX) ((RTX)->fld[4].rtint)
+#define LAST_LABELNO(RTX) ((RTX)->fld[5].rtint)
+#define MAX_PARMREG(RTX) ((RTX)->fld[6].rtint)
+#define MAX_REGNUM(RTX) ((RTX)->fld[7].rtint)
+#define FUNCTION_ARGS_SIZE(RTX) ((RTX)->fld[8].rtint)
+#define POPS_ARGS(RTX) ((RTX)->fld[9].rtint)
+#define STACK_SLOT_LIST(RTX) ((RTX)->fld[10].rtx)
+#define FUNCTION_FLAGS(RTX) ((RTX)->fld[11].rtint)
+#define OUTGOING_ARGS_SIZE(RTX) ((RTX)->fld[12].rtint)
+#define ORIGINAL_ARG_VECTOR(RTX) ((RTX)->fld[13].rtvec)
+#define ORIGINAL_DECL_INITIAL(RTX) ((RTX)->fld[14].rtx)
+
+/* In FUNCTION_FLAGS we save some variables computed when emitting the code
+ for the function and which must be `or'ed into the current flag values when
+ insns from that function are being inlined. */
+
+/* These ought to be an enum, but non-ANSI compilers don't like that. */
+#define FUNCTION_FLAGS_CALLS_ALLOCA 01
+#define FUNCTION_FLAGS_CALLS_SETJMP 02
+#define FUNCTION_FLAGS_RETURNS_STRUCT 04
+#define FUNCTION_FLAGS_RETURNS_PCC_STRUCT 010
+#define FUNCTION_FLAGS_NEEDS_CONTEXT 020
+#define FUNCTION_FLAGS_HAS_NONLOCAL_LABEL 040
+#define FUNCTION_FLAGS_RETURNS_POINTER 0100
+#define FUNCTION_FLAGS_USES_CONST_POOL 0200
+#define FUNCTION_FLAGS_CALLS_LONGJMP 0400
+#define FUNCTION_FLAGS_USES_PIC_OFFSET_TABLE 01000
+
+/* Define a macro to look for REG_INC notes,
+ but save time on machines where they never exist. */
+
+/* Don't continue this line--convex cc version 4.1 would lose. */
+#if (defined (HAVE_PRE_INCREMENT) || defined (HAVE_PRE_DECREMENT) || defined (HAVE_POST_INCREMENT) || defined (HAVE_POST_DECREMENT))
+#define FIND_REG_INC_NOTE(insn, reg) (find_reg_note ((insn), REG_INC, (reg)))
+#else
+#define FIND_REG_INC_NOTE(insn, reg) 0
+#endif
+
+/* Indicate whether the machine has any sort of auto increment addressing.
+ If not, we can avoid checking for REG_INC notes. */
+
+/* Don't continue this line--convex cc version 4.1 would lose. */
+#if (defined (HAVE_PRE_INCREMENT) || defined (HAVE_PRE_DECREMENT) || defined (HAVE_POST_INCREMENT) || defined (HAVE_POST_DECREMENT))
+#define AUTO_INC_DEC
+#endif
+
+/* Generally useful functions. */
+
+/* The following functions accept a wide integer argument. Rather than
+ having to cast on every function call, we use a macro instead, that is
+ defined here and in tree.h. */
+
+#ifndef exact_log2
+#define exact_log2(N) exact_log2_wide ((HOST_WIDE_INT) (N))
+#define floor_log2(N) floor_log2_wide ((HOST_WIDE_INT) (N))
+#endif
+
+#define plus_constant(X,C) plus_constant_wide (X, (HOST_WIDE_INT) (C))
+
+#define plus_constant_for_output(X,C) \
+ plus_constant_for_output_wide (X, (HOST_WIDE_INT) (C))
+
+extern rtx plus_constant_wide PROTO((rtx, HOST_WIDE_INT));
+extern rtx plus_constant_for_output_wide PROTO((rtx, HOST_WIDE_INT));
+
+#define GEN_INT(N) gen_rtx (CONST_INT, VOIDmode, (HOST_WIDE_INT) (N))
+
+extern rtx bc_gen_rtx ();
+
+extern rtx gen_rtx PVPROTO((enum rtx_code,
+ enum machine_mode, ...));
+extern rtvec gen_rtvec PVPROTO((int, ...));
+
+extern rtx read_rtx STDIO_PROTO((FILE *));
+
+#if 0
+/* At present, don't prototype xrealloc, since all of the callers don't
+ cast their pointers to char *, and all of the xrealloc's don't use
+ void * yet. */
+extern char *xmalloc PROTO((size_t));
+extern char *xrealloc PROTO((void *, size_t));
+#else
+extern char *xmalloc ();
+extern char *xrealloc ();
+#endif
+
+extern char *oballoc PROTO((int));
+extern char *permalloc PROTO((int));
+extern void free PROTO((void *));
+extern rtx rtx_alloc PROTO((RTX_CODE));
+extern rtvec rtvec_alloc PROTO((int));
+extern rtx find_reg_note PROTO((rtx, enum reg_note, rtx));
+extern rtx find_regno_note PROTO((rtx, enum reg_note, int));
+extern int find_reg_fusage PROTO((rtx, enum rtx_code, rtx));
+extern int find_regno_fusage PROTO((rtx, enum rtx_code, int));
+extern HOST_WIDE_INT get_integer_term PROTO((rtx));
+extern rtx get_related_value PROTO((rtx));
+extern rtx single_set PROTO((rtx));
+extern rtx find_last_value PROTO((rtx, rtx *, rtx));
+extern rtx copy_rtx PROTO((rtx));
+extern rtx copy_rtx_if_shared PROTO((rtx));
+extern rtx copy_most_rtx PROTO((rtx, rtx));
+extern rtx replace_rtx PROTO((rtx, rtx, rtx));
+extern rtvec gen_rtvec_v PROTO((int, rtx *));
+extern rtx gen_reg_rtx PROTO((enum machine_mode));
+extern rtx gen_label_rtx PROTO((void));
+extern rtx gen_inline_header_rtx PROTO((rtx, rtx, int, int, int, int, int, int, rtx, int, int, rtvec, rtx));
+extern rtx gen_lowpart_common PROTO((enum machine_mode, rtx));
+extern rtx gen_lowpart PROTO((enum machine_mode, rtx));
+extern rtx gen_lowpart_if_possible PROTO((enum machine_mode, rtx));
+extern rtx gen_highpart PROTO((enum machine_mode, rtx));
+extern rtx gen_realpart PROTO((enum machine_mode, rtx));
+extern rtx gen_imagpart PROTO((enum machine_mode, rtx));
+extern rtx operand_subword PROTO((rtx, int, int, enum machine_mode));
+extern rtx operand_subword_force PROTO((rtx, int, enum machine_mode));
+extern int subreg_lowpart_p PROTO((rtx));
+extern rtx make_safe_from PROTO((rtx, rtx));
+extern rtx memory_address PROTO((enum machine_mode, rtx));
+extern rtx get_insns PROTO((void));
+extern rtx get_last_insn PROTO((void));
+extern rtx get_last_insn_anywhere PROTO((void));
+extern void start_sequence PROTO((void));
+extern void push_to_sequence PROTO((rtx));
+extern void end_sequence PROTO((void));
+extern rtx gen_sequence PROTO((void));
+extern rtx immed_double_const PROTO((HOST_WIDE_INT, HOST_WIDE_INT, enum machine_mode));
+extern rtx force_const_mem PROTO((enum machine_mode, rtx));
+extern rtx force_reg PROTO((enum machine_mode, rtx));
+extern rtx get_pool_constant PROTO((rtx));
+extern enum machine_mode get_pool_mode PROTO((rtx));
+extern int get_pool_offset PROTO((rtx));
+extern rtx simplify_subtraction PROTO((rtx));
+extern rtx assign_stack_local PROTO((enum machine_mode, int, int));
+extern rtx assign_stack_temp PROTO((enum machine_mode, int, int));
+extern rtx protect_from_queue PROTO((rtx, int));
+extern void emit_queue PROTO((void));
+extern rtx emit_move_insn PROTO((rtx, rtx));
+extern rtx emit_insn_before PROTO((rtx, rtx));
+extern rtx emit_jump_insn_before PROTO((rtx, rtx));
+extern rtx emit_call_insn_before PROTO((rtx, rtx));
+extern rtx emit_barrier_before PROTO((rtx));
+extern rtx emit_note_before PROTO((int, rtx));
+extern rtx emit_insn_after PROTO((rtx, rtx));
+extern rtx emit_jump_insn_after PROTO((rtx, rtx));
+extern rtx emit_barrier_after PROTO((rtx));
+extern rtx emit_label_after PROTO((rtx, rtx));
+extern rtx emit_note_after PROTO((int, rtx));
+extern rtx emit_line_note_after PROTO((char *, int, rtx));
+extern rtx emit_insn PROTO((rtx));
+extern rtx emit_insns PROTO((rtx));
+extern rtx emit_insns_before PROTO((rtx, rtx));
+extern rtx emit_jump_insn PROTO((rtx));
+extern rtx emit_call_insn PROTO((rtx));
+extern rtx emit_label PROTO((rtx));
+extern rtx emit_barrier PROTO((void));
+extern rtx emit_line_note PROTO((char *, int));
+extern rtx emit_note PROTO((char *, int));
+extern rtx emit_line_note_force PROTO((char *, int));
+extern rtx make_insn_raw PROTO((rtx));
+extern rtx previous_insn PROTO((rtx));
+extern rtx next_insn PROTO((rtx));
+extern rtx prev_nonnote_insn PROTO((rtx));
+extern rtx next_nonnote_insn PROTO((rtx));
+extern rtx prev_real_insn PROTO((rtx));
+extern rtx next_real_insn PROTO((rtx));
+extern rtx prev_active_insn PROTO((rtx));
+extern rtx next_active_insn PROTO((rtx));
+extern rtx prev_label PROTO((rtx));
+extern rtx next_label PROTO((rtx));
+extern rtx next_cc0_user PROTO((rtx));
+extern rtx prev_cc0_setter PROTO((rtx));
+extern rtx reg_set_last PROTO((rtx, rtx));
+extern rtx next_nondeleted_insn PROTO((rtx));
+extern enum rtx_code reverse_condition PROTO((enum rtx_code));
+extern enum rtx_code swap_condition PROTO((enum rtx_code));
+extern enum rtx_code unsigned_condition PROTO((enum rtx_code));
+extern enum rtx_code signed_condition PROTO((enum rtx_code));
+extern rtx find_equiv_reg PROTO((rtx, rtx, enum reg_class, int, short *, int, enum machine_mode));
+extern rtx squeeze_notes PROTO((rtx, rtx));
+extern rtx delete_insn PROTO((rtx));
+extern void delete_jump PROTO((rtx));
+extern rtx get_label_before PROTO((rtx));
+extern rtx get_label_after PROTO((rtx));
+extern rtx follow_jumps PROTO((rtx));
+extern rtx adj_offsettable_operand PROTO((rtx, int));
+extern rtx try_split PROTO((rtx, rtx, int));
+extern rtx split_insns PROTO((rtx, rtx));
+extern rtx simplify_unary_operation PROTO((enum rtx_code, enum machine_mode, rtx, enum machine_mode));
+extern rtx simplify_binary_operation PROTO((enum rtx_code, enum machine_mode, rtx, rtx));
+extern rtx simplify_ternary_operation PROTO((enum rtx_code, enum machine_mode, enum machine_mode, rtx, rtx, rtx));
+extern rtx simplify_relational_operation PROTO((enum rtx_code, enum machine_mode, rtx, rtx));
+extern rtx nonlocal_label_rtx_list PROTO((void));
+extern rtx gen_move_insn PROTO((rtx, rtx));
+extern rtx gen_jump PROTO((rtx));
+extern rtx gen_beq PROTO((rtx));
+extern rtx gen_bge PROTO((rtx));
+extern rtx gen_ble PROTO((rtx));
+extern rtx eliminate_constant_term PROTO((rtx, rtx *));
+extern rtx expand_complex_abs PROTO((enum machine_mode, rtx, rtx, int));
+extern enum machine_mode choose_hard_reg_mode PROTO((int, int));
+
+/* Maximum number of parallel sets and clobbers in any insn in this fn.
+ Always at least 3, since the combiner could put that many togetherm
+ and we want this to remain correct for all the remaining passes. */
+
+extern int max_parallel;
+
+extern int asm_noperands PROTO((rtx));
+extern char *decode_asm_operands PROTO((rtx, rtx *, rtx **, char **, enum machine_mode *));
+
+extern enum reg_class reg_preferred_class PROTO((int));
+extern enum reg_class reg_alternate_class PROTO((int));
+
+extern rtx get_first_nonparm_insn PROTO((void));
+
+/* Standard pieces of rtx, to be substituted directly into things. */
+extern rtx pc_rtx;
+extern rtx cc0_rtx;
+extern rtx const0_rtx;
+extern rtx const1_rtx;
+extern rtx const2_rtx;
+extern rtx constm1_rtx;
+extern rtx const_true_rtx;
+
+extern rtx const_tiny_rtx[3][(int) MAX_MACHINE_MODE];
+
+/* Returns a constant 0 rtx in mode MODE. Integer modes are treated the
+ same as VOIDmode. */
+
+#define CONST0_RTX(MODE) (const_tiny_rtx[0][(int) (MODE)])
+
+/* Likewise, for the constants 1 and 2. */
+
+#define CONST1_RTX(MODE) (const_tiny_rtx[1][(int) (MODE)])
+#define CONST2_RTX(MODE) (const_tiny_rtx[2][(int) (MODE)])
+
+/* All references to certain hard regs, except those created
+ by allocating pseudo regs into them (when that's possible),
+ go through these unique rtx objects. */
+extern rtx stack_pointer_rtx;
+extern rtx frame_pointer_rtx;
+extern rtx hard_frame_pointer_rtx;
+extern rtx arg_pointer_rtx;
+extern rtx pic_offset_table_rtx;
+extern rtx struct_value_rtx;
+extern rtx struct_value_incoming_rtx;
+extern rtx static_chain_rtx;
+extern rtx static_chain_incoming_rtx;
+
+/* If HARD_FRAME_POINTER_REGNUM is defined, then a special dummy reg
+ is used to represent the frame pointer. This is because the
+ hard frame pointer and the automatic variables are separated by an amount
+ that cannot be determined until after register allocation. We can assume
+ that in this case ELIMINABLE_REGS will be defined, one action of which
+ will be to eliminate FRAME_POINTER_REGNUM into HARD_FRAME_POINTER_REGNUM. */
+#ifndef HARD_FRAME_POINTER_REGNUM
+#define HARD_FRAME_POINTER_REGNUM FRAME_POINTER_REGNUM
+#endif
+
+/* Virtual registers are used during RTL generation to refer to locations into
+ the stack frame when the actual location isn't known until RTL generation
+ is complete. The routine instantiate_virtual_regs replaces these with
+ the proper value, which is normally {frame,arg,stack}_pointer_rtx plus
+ a constant. */
+
+#define FIRST_VIRTUAL_REGISTER (FIRST_PSEUDO_REGISTER)
+
+/* This points to the first word of the incoming arguments passed on the stack,
+ either by the caller or by the callee when pretending it was passed by the
+ caller. */
+
+extern rtx virtual_incoming_args_rtx;
+
+#define VIRTUAL_INCOMING_ARGS_REGNUM (FIRST_VIRTUAL_REGISTER)
+
+/* If FRAME_GROWS_DOWNWARD, this points to immediately above the first
+ variable on the stack. Otherwise, it points to the first variable on
+ the stack. */
+
+extern rtx virtual_stack_vars_rtx;
+
+#define VIRTUAL_STACK_VARS_REGNUM ((FIRST_VIRTUAL_REGISTER) + 1)
+
+/* This points to the location of dynamically-allocated memory on the stack
+ immediately after the stack pointer has been adjusted by the amount
+ desired. */
+
+extern rtx virtual_stack_dynamic_rtx;
+
+#define VIRTUAL_STACK_DYNAMIC_REGNUM ((FIRST_VIRTUAL_REGISTER) + 2)
+
+/* This points to the location in the stack at which outgoing arguments should
+ be written when the stack is pre-pushed (arguments pushed using push
+ insns always use sp). */
+
+extern rtx virtual_outgoing_args_rtx;
+
+#define VIRTUAL_OUTGOING_ARGS_REGNUM ((FIRST_VIRTUAL_REGISTER) + 3)
+
+#define LAST_VIRTUAL_REGISTER ((FIRST_VIRTUAL_REGISTER) + 3)
+
+extern rtx find_next_ref PROTO((rtx, rtx));
+extern rtx *find_single_use PROTO((rtx, rtx, rtx *));
+
+/* It is hard to write the prototype for expand_expr, since it needs
+ expr.h to be included for the enumeration. */
+
+extern rtx expand_expr ();
+
+extern rtx output_constant_def PROTO((union tree_node *));
+extern rtx immed_real_const PROTO((union tree_node *));
+extern union tree_node *make_tree PROTO((union tree_node *, rtx));
+
+/* Define a default value for STORE_FLAG_VALUE. */
+
+#ifndef STORE_FLAG_VALUE
+#define STORE_FLAG_VALUE 1
+#endif
+
+/* Nonzero after end of reload pass.
+ Set to 1 or 0 by toplev.c. */
+
+extern int reload_completed;
+
+/* Set to 1 while reload_as_needed is operating.
+ Required by some machines to handle any generated moves differently. */
+
+extern int reload_in_progress;
+
+/* If this is nonzero, we do not bother generating VOLATILE
+ around volatile memory references, and we are willing to
+ output indirect addresses. If cse is to follow, we reject
+ indirect addresses so a useful potential cse is generated;
+ if it is used only once, instruction combination will produce
+ the same indirect address eventually. */
+extern int cse_not_expected;
+
+/* Indexed by pseudo register number, gives the rtx for that pseudo.
+ Allocated in parallel with regno_pointer_flag. */
+extern rtx *regno_reg_rtx;
+
+/* Translates rtx code to tree code, for those codes needed by
+ REAL_ARITHMETIC. The function returns an int because the caller may not
+ know what `enum tree_code' means. */
+
+extern int rtx_to_tree_code PROTO((enum rtx_code));
diff --git a/gnu/usr.bin/cc/include/stack.h b/gnu/usr.bin/cc/include/stack.h
new file mode 100644
index 0000000..c5d9a25
--- /dev/null
+++ b/gnu/usr.bin/cc/include/stack.h
@@ -0,0 +1,41 @@
+/* stack.h - structed access to object stacks
+ Copyright (C) 1988 Free Software Foundation, Inc.
+ Contributed by Michael Tiemann (tiemann@cygnus.com).
+
+This program is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 2, or (at your option) any
+later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+/* Summary: this file contains additional structures that layer
+ on top of obstacks for GNU C++. */
+
+/* Stack of data placed on obstacks. */
+
+struct stack_level
+{
+ /* Pointer back to previous such level. */
+ struct stack_level *prev;
+
+ /* Point to obstack we should return to. */
+ struct obstack *obstack;
+
+ /* First place we start putting data. */
+ tree *first;
+
+ /* Number of entries we can have from `first'.
+ Right now we are dumb: if we overflow, abort. */
+ int limit;
+};
+
+struct stack_level *push_stack_level PROTO((struct obstack *, char *, int));
+struct stack_level *pop_stack_level PROTO((struct stack_level *));
diff --git a/gnu/usr.bin/cc/include/tconfig.h b/gnu/usr.bin/cc/include/tconfig.h
new file mode 100644
index 0000000..7886724
--- /dev/null
+++ b/gnu/usr.bin/cc/include/tconfig.h
@@ -0,0 +1,42 @@
+/* Configuration for GNU C-compiler for Intel 80386.
+ Copyright (C) 1988, 1993 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+#ifndef i386
+#define i386
+#endif
+
+/* #defines that need visibility everywhere. */
+#define FALSE 0
+#define TRUE 1
+
+/* This describes the machine the compiler is hosted on. */
+#define HOST_BITS_PER_CHAR 8
+#define HOST_BITS_PER_SHORT 16
+#define HOST_BITS_PER_INT 32
+#define HOST_BITS_PER_LONG 32
+#define HOST_BITS_PER_LONGLONG 64
+
+/* Arguments to use with `exit'. */
+#define SUCCESS_EXIT_CODE 0
+#define FATAL_EXIT_CODE 33
+
+/* target machine dependencies.
+ tm.h is a symbolic link to the actual target specific file. */
+
+#include "tm.h"
diff --git a/gnu/usr.bin/cc/include/tm.h b/gnu/usr.bin/cc/include/tm.h
new file mode 100644
index 0000000..12f7ebc
--- /dev/null
+++ b/gnu/usr.bin/cc/include/tm.h
@@ -0,0 +1,327 @@
+/* Definitions of target machine for GNU compiler for Intel 80386
+ running FreeBSD.
+ Copyright (C) 1988, 1992, 1994 Free Software Foundation, Inc.
+ Contributed by Poul-Henning Kamp <phk@login.dkuug.dk>
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+/* This goes away when the math-emulator is fixed */
+#define TARGET_CPU_DEFAULT 0400 /* TARGET_NO_FANCY_MATH_387 */
+
+/* This is tested by i386gas.h. */
+#define YES_UNDERSCORES
+
+#include "i386/gstabs.h"
+
+/* Get perform_* macros to build libgcc.a. */
+#include "i386/perform.h"
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES "-Dunix -Di386 -D__FreeBSD__ -D__386BSD__ -Asystem(unix) -Asystem(FreeBSD) -Acpu(i386) -Amachine(i386)"
+
+#define INCLUDE_DEFAULTS { \
+ { "/usr/include", 0 }, \
+ { "/usr/include/g++", 1 }, \
+ { 0, 0} \
+ }
+
+#define ASM_SPEC " %| %{fpic:-k} %{fPIC:-k}"
+
+/* Like the default, except no -lg. */
+#define LIB_SPEC "%{!p:%{!pg:-lc}}%{p:-lc_p}%{pg:-lc_p}"
+
+#define LINK_SPEC \
+ "%{!nostdlib:%{!r*:%{!e*:-e start}}} -dc -dp %{static:-Bstatic} %{assert*} \
+ %{p:-Bstatic} %{pg:-Bstatic} %{Z}"
+
+#undef SIZE_TYPE
+#define SIZE_TYPE "unsigned int"
+
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE "int"
+
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "int"
+
+#define WCHAR_UNSIGNED 0
+
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE BITS_PER_WORD
+
+#define HAVE_ATEXIT
+
+/* Tell final.c that we don't need a label passed to mcount. */
+
+#define NO_PROFILE_DATA
+
+/* Redefine this to not pass an unused label in %edx. */
+
+#undef FUNCTION_PROFILER
+#define FUNCTION_PROFILER(FILE, LABELNO) \
+{ \
+ if (flag_pic) \
+ fprintf (FILE, "\tcall *mcount@GOT(%%ebx)\n"); \
+ else \
+ fprintf (FILE, "\tcall mcount\n"); \
+}
+
+#define FUNCTION_PROFILER_EPILOGUE(FILE) \
+{ \
+ if (flag_pic) \
+ fprintf (FILE, "\tcall *mexitcount@GOT(%%ebx)\n"); \
+ else \
+ fprintf (FILE, "\tcall mexitcount\n"); \
+}
+
+/* There are conflicting reports about whether this system uses
+ a different assembler syntax. wilson@cygnus.com says # is right. */
+#undef COMMENT_BEGIN
+#define COMMENT_BEGIN "#"
+
+#undef ASM_APP_ON
+#define ASM_APP_ON "#APP\n"
+
+#undef ASM_APP_OFF
+#define ASM_APP_OFF "#NO_APP\n"
+
+/* The following macros are stolen from i386v4.h */
+/* These have to be defined to get PIC code correct */
+
+/* This is how to output an element of a case-vector that is relative.
+ This is only used for PIC code. See comments by the `casesi' insn in
+ i386.md for an explanation of the expression this outputs. */
+
+#undef ASM_OUTPUT_ADDR_DIFF_ELT
+#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, VALUE, REL) \
+ fprintf (FILE, "\t.long _GLOBAL_OFFSET_TABLE_+[.-%s%d]\n", LPREFIX, VALUE)
+
+/* Indicate that jump tables go in the text section. This is
+ necessary when compiling PIC code. */
+
+#define JUMP_TABLES_IN_TEXT_SECTION
+
+/* Don't default to pcc-struct-return, because gcc is the only compiler, and
+ we want to retain compatibility with older gcc versions. */
+#define DEFAULT_PCC_STRUCT_RETURN 0
+
+/*
+ * Some imports from svr4.h in support of shared libraries.
+ * Currently, we need the DECLARE_OBJECT_SIZE stuff.
+ */
+
+/* Define the strings used for the special svr4 .type and .size directives.
+ These strings generally do not vary from one system running svr4 to
+ another, but if a given system (e.g. m88k running svr) needs to use
+ different pseudo-op names for these, they may be overridden in the
+ file which includes this one. */
+
+#define TYPE_ASM_OP ".type"
+#define SIZE_ASM_OP ".size"
+#define WEAK_ASM_OP ".weak"
+
+/* The following macro defines the format used to output the second
+ operand of the .type assembler directive. Different svr4 assemblers
+ expect various different forms for this operand. The one given here
+ is just a default. You may need to override it in your machine-
+ specific tm.h file (depending upon the particulars of your assembler). */
+
+#define TYPE_OPERAND_FMT "@%s"
+
+/* Write the extra assembler code needed to declare a function's result.
+ Most svr4 assemblers don't require any special declaration of the
+ result value, but there are exceptions. */
+
+#ifndef ASM_DECLARE_RESULT
+#define ASM_DECLARE_RESULT(FILE, RESULT)
+#endif
+
+/* These macros generate the special .type and .size directives which
+ are used to set the corresponding fields of the linker symbol table
+ entries in an ELF object file under SVR4. These macros also output
+ the starting labels for the relevant functions/objects. */
+
+/* Write the extra assembler code needed to declare a function properly.
+ Some svr4 assemblers need to also have something extra said about the
+ function's return value. We allow for that here. */
+
+#define ASM_DECLARE_FUNCTION_NAME(FILE, NAME, DECL) \
+ do { \
+ fprintf (FILE, "\t%s\t ", TYPE_ASM_OP); \
+ assemble_name (FILE, NAME); \
+ putc (',', FILE); \
+ fprintf (FILE, TYPE_OPERAND_FMT, "function"); \
+ putc ('\n', FILE); \
+ ASM_DECLARE_RESULT (FILE, DECL_RESULT (DECL)); \
+ ASM_OUTPUT_LABEL(FILE, NAME); \
+ } while (0)
+
+/* Write the extra assembler code needed to declare an object properly. */
+
+#define ASM_DECLARE_OBJECT_NAME(FILE, NAME, DECL) \
+ do { \
+ fprintf (FILE, "\t%s\t ", TYPE_ASM_OP); \
+ assemble_name (FILE, NAME); \
+ putc (',', FILE); \
+ fprintf (FILE, TYPE_OPERAND_FMT, "object"); \
+ putc ('\n', FILE); \
+ size_directive_output = 0; \
+ if (!flag_inhibit_size_directive && DECL_SIZE (DECL)) \
+ { \
+ size_directive_output = 1; \
+ fprintf (FILE, "\t%s\t ", SIZE_ASM_OP); \
+ assemble_name (FILE, NAME); \
+ fprintf (FILE, ",%d\n", int_size_in_bytes (TREE_TYPE (DECL))); \
+ } \
+ ASM_OUTPUT_LABEL(FILE, NAME); \
+ } while (0)
+
+/* Output the size directive for a decl in rest_of_decl_compilation
+ in the case where we did not do so before the initializer.
+ Once we find the error_mark_node, we know that the value of
+ size_directive_output was set
+ by ASM_DECLARE_OBJECT_NAME when it was run for the same decl. */
+
+#define ASM_FINISH_DECLARE_OBJECT(FILE, DECL, TOP_LEVEL, AT_END) \
+do { \
+ char *name = XSTR (XEXP (DECL_RTL (DECL), 0), 0); \
+ if (!flag_inhibit_size_directive && DECL_SIZE (DECL) \
+ && ! AT_END && TOP_LEVEL \
+ && DECL_INITIAL (DECL) == error_mark_node \
+ && !size_directive_output) \
+ { \
+ fprintf (FILE, "\t%s\t ", SIZE_ASM_OP); \
+ assemble_name (FILE, name); \
+ fprintf (FILE, ",%d\n", int_size_in_bytes (TREE_TYPE (DECL))); \
+ } \
+ } while (0)
+
+/* This is how to declare the size of a function. */
+
+#define ASM_DECLARE_FUNCTION_SIZE(FILE, FNAME, DECL) \
+ do { \
+ if (!flag_inhibit_size_directive) \
+ { \
+ char label[256]; \
+ static int labelno; \
+ labelno++; \
+ ASM_GENERATE_INTERNAL_LABEL (label, "Lfe", labelno); \
+ ASM_OUTPUT_INTERNAL_LABEL (FILE, "Lfe", labelno); \
+ fprintf (FILE, "\t%s\t ", SIZE_ASM_OP); \
+ assemble_name (FILE, (FNAME)); \
+ fprintf (FILE, ","); \
+ assemble_name (FILE, label); \
+ fprintf (FILE, "-"); \
+ assemble_name (FILE, (FNAME)); \
+ putc ('\n', FILE); \
+ } \
+ } while (0)
+
+/* This section copied from i386/osfrose.h */
+
+/* A C statement or compound statement to output to FILE some
+ assembler code to initialize basic-block profiling for the current
+ object module. This code should call the subroutine
+ `__bb_init_func' once per object module, passing it as its sole
+ argument the address of a block allocated in the object module.
+
+ The name of the block is a local symbol made with this statement:
+
+ ASM_GENERATE_INTERNAL_LABEL (BUFFER, "LPBX", 0);
+
+ Of course, since you are writing the definition of
+ `ASM_GENERATE_INTERNAL_LABEL' as well as that of this macro, you
+ can take a short cut in the definition of this macro and use the
+ name that you know will result.
+
+ The first word of this block is a flag which will be nonzero if the
+ object module has already been initialized. So test this word
+ first, and do not call `__bb_init_func' if the flag is nonzero. */
+
+#undef FUNCTION_BLOCK_PROFILER
+#define FUNCTION_BLOCK_PROFILER(STREAM, LABELNO) \
+do \
+ { \
+ if (!flag_pic) \
+ { \
+ fprintf (STREAM, "\tcmpl $0,%sPBX0\n", LPREFIX); \
+ fprintf (STREAM, "\tjne 0f\n"); \
+ fprintf (STREAM, "\tpushl $%sPBX0\n", LPREFIX); \
+ fprintf (STREAM, "\tcall ___bb_init_func\n"); \
+ fprintf (STREAM, "0:\n"); \
+ } \
+ else \
+ { \
+ fprintf (STREAM, "\tpushl %eax\n"); \
+ fprintf (STREAM, "\tmovl %sPBX0@GOT(%ebx),%eax\n"); \
+ fprintf (STREAM, "\tcmpl $0,(%eax)\n"); \
+ fprintf (STREAM, "\tjne 0f\n"); \
+ fprintf (STREAM, "\tpushl %eax\n"); \
+ fprintf (STREAM, "\tcall ___bb_init_func@PLT\n"); \
+ fprintf (STREAM, "0:\n"); \
+ fprintf (STREAM, "\tpopl %eax\n"); \
+ } \
+ } \
+while (0)
+
+/* A C statement or compound statement to increment the count
+ associated with the basic block number BLOCKNO. Basic blocks are
+ numbered separately from zero within each compilation. The count
+ associated with block number BLOCKNO is at index BLOCKNO in a
+ vector of words; the name of this array is a local symbol made
+ with this statement:
+
+ ASM_GENERATE_INTERNAL_LABEL (BUFFER, "LPBX", 2);
+
+ Of course, since you are writing the definition of
+ `ASM_GENERATE_INTERNAL_LABEL' as well as that of this macro, you
+ can take a short cut in the definition of this macro and use the
+ name that you know will result. */
+
+#undef BLOCK_PROFILER
+#define BLOCK_PROFILER(STREAM, BLOCKNO) \
+do \
+ { \
+ if (!flag_pic) \
+ fprintf (STREAM, "\tincl %sPBX2+%d\n", LPREFIX, (BLOCKNO)*4); \
+ else \
+ { \
+ fprintf (STREAM, "\tpushl %eax\n"); \
+ fprintf (STREAM, "\tmovl %sPBX2@GOT(%ebx),%eax\n", LPREFIX); \
+ fprintf (STREAM, "\tincl %d(%eax)\n", (BLOCKNO)*4); \
+ fprintf (STREAM, "\tpopl %eax\n"); \
+ } \
+ } \
+while (0)
+
+/* This is defined when gcc is compiled in the BSD-directory-tree, and must
+ * make up for the gap to all the stuff done in the GNU-makefiles.
+ */
+
+#ifdef FREEBSD_NATIVE
+
+#undef MD_EXEC_PREFIX
+#define MD_EXEC_PREFIX "/usr/libexec/"
+
+#undef STANDARD_STARTFILE_PREFIX
+#define STANDARD_STARTFILE_PREFIX "/usr/lib"
+
+#define DEFAULT_TARGET_MACHINE "i386-unknown-freebsd_1.0"
+#define GPLUSPLUS_INCLUDE_DIR "/usr/local/lib/gcc-lib/i386-unknown-freebsd_1.0/2.5.8/include"
+#define TOOL_INCLUDE_DIR "/usr/local/i386-unknown-freebsd_1.0/include"
+#define GCC_INCLUDE_DIR "/usr/local/lib/gcc-lib/i386-unknown-freebsd_1.0/2.5.8/include"
+
+#endif /* FREEBSD_NATIVE */
diff --git a/gnu/usr.bin/cc/include/tree.def b/gnu/usr.bin/cc/include/tree.def
new file mode 100644
index 0000000..71d6386
--- /dev/null
+++ b/gnu/usr.bin/cc/include/tree.def
@@ -0,0 +1,695 @@
+/* This file contains the definitions and documentation for the
+ tree codes used in the GNU C compiler.
+ Copyright (C) 1987, 1988, 1993 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+/* The third argument can be:
+ "x" for an exceptional code (fits no category).
+ "t" for a type object code.
+ "b" for a lexical block.
+ "c" for codes for constants.
+ "d" for codes for declarations (also serving as variable refs).
+ "r" for codes for references to storage.
+ "<" for codes for comparison expressions.
+ "1" for codes for unary arithmetic expressions.
+ "2" for codes for binary arithmetic expressions.
+ "s" for codes for expressions with inherent side effects.
+ "e" for codes for other kinds of expressions. */
+
+/* For `r', `e', `<', `1', `2', `s' and `x' nodes,
+ the 4th element is the number of argument slots to allocate.
+ This determines the size of the tree node object. */
+
+/* Any erroneous construct is parsed into a node of this type.
+ This type of node is accepted without complaint in all contexts
+ by later parsing activities, to avoid multiple error messages
+ for one error.
+ No fields in these nodes are used except the TREE_CODE. */
+DEFTREECODE (ERROR_MARK, "error_mark", "x", 0)
+
+/* Used to represent a name (such as, in the DECL_NAME of a decl node).
+ Internally it looks like a STRING_CST node.
+ There is only one IDENTIFIER_NODE ever made for any particular name.
+ Use `get_identifier' to get it (or create it, the first time). */
+DEFTREECODE (IDENTIFIER_NODE, "identifier_node", "x", -1)
+
+/* Used to hold information to identify an operator (or combination
+ of two operators) considered as a `noun' rather than a `verb'.
+ The first operand is encoded in the TREE_TYPE field. */
+DEFTREECODE (OP_IDENTIFIER, "op_identifier", "x", 2)
+
+/* Has the TREE_VALUE and TREE_PURPOSE fields. */
+/* These nodes are made into lists by chaining through the
+ TREE_CHAIN field. The elements of the list live in the
+ TREE_VALUE fields, while TREE_PURPOSE fields are occasionally
+ used as well to get the effect of Lisp association lists. */
+DEFTREECODE (TREE_LIST, "tree_list", "x", 2)
+
+/* These nodes contain an array of tree nodes. */
+DEFTREECODE (TREE_VEC, "tree_vec", "x", 2)
+
+/* A symbol binding block. These are arranged in a tree,
+ where the BLOCK_SUBBLOCKS field contains a chain of subblocks
+ chained through the BLOCK_CHAIN field.
+ BLOCK_SUPERCONTEXT points to the parent block.
+ For a block which represents the outermost scope of a function, it
+ points to the FUNCTION_DECL node.
+ BLOCK_VARS points to a chain of decl nodes.
+ BLOCK_TYPE_TAGS points to a chain of types which have their own names.
+ BLOCK_CHAIN points to the next BLOCK at the same level.
+ BLOCK_ABSTRACT_ORIGIN points to the original (abstract) tree node which
+ this block is an instance of, or else is NULL to indicate that this
+ block is not an instance of anything else. When non-NULL, the value
+ could either point to another BLOCK node or it could point to a
+ FUNCTION_DECL node (e.g. in the case of a block representing the
+ outermost scope of a particular inlining of a function).
+ BLOCK_ABSTRACT is non-zero if the block represents an abstract
+ instance of a block (i.e. one which is nested within an abstract
+ instance of a inline function. */
+DEFTREECODE (BLOCK, "block", "b", 0)
+
+/* Each data type is represented by a tree node whose code is one of
+ the following: */
+/* Each node that represents a data type has a component TYPE_SIZE
+ containing a tree that is an expression for the size in bits.
+ The TYPE_MODE contains the machine mode for values of this type.
+ The TYPE_POINTER_TO field contains a type for a pointer to this type,
+ or zero if no such has been created yet.
+ The TYPE_NEXT_VARIANT field is used to chain together types
+ that are variants made by type modifiers such as "const" and "volatile".
+ The TYPE_MAIN_VARIANT field, in any member of such a chain,
+ points to the start of the chain.
+ The TYPE_NONCOPIED_PARTS field is a list specifying which parts
+ of an object of this type should *not* be copied by assignment.
+ The TREE_PURPOSE of each element is the offset of the part
+ and the TREE_VALUE is the size in bits of the part.
+ The TYPE_NAME field contains info on the name used in the program
+ for this type (for GDB symbol table output). It is either a
+ TYPE_DECL node, for types that are typedefs, or an IDENTIFIER_NODE
+ in the case of structs, unions or enums that are known with a tag,
+ or zero for types that have no special name.
+ The TYPE_CONTEXT for any sort of type which could have a name or
+ which could have named members (e.g. tagged types in C/C++) will
+ point to the node which represents the scope of the given type, or
+ will be NULL_TREE if the type has "file scope". For most types, this
+ will point to a BLOCK node or a FUNCTION_DECL node, but it could also
+ point to a FUNCTION_TYPE node (for types whose scope is limited to the
+ formal parameter list of some function type specification) or it
+ could point to a RECORD_TYPE, UNION_TYPE or QUAL_UNION_TYPE node
+ (for C++ "member" types).
+ For non-tagged-types, TYPE_CONTEXT need not be set to anything in
+ particular, since any type which is of some type category (e.g.
+ an array type or a function type) which cannot either have a name
+ itself or have named members doesn't really have a "scope" per se.
+ The TREE_CHAIN field is used as a forward-references to names for
+ ENUMERAL_TYPE, RECORD_TYPE, UNION_TYPE, and QUAL_UNION_TYPE nodes;
+ see below. */
+
+DEFTREECODE (VOID_TYPE, "void_type", "t", 0) /* The void type in C */
+
+/* Integer types in all languages, including char in C.
+ Also used for sub-ranges of other discrete types.
+ Has components TYPE_MIN_VALUE, TYPE_MAX_VALUE (expressions, inclusive)
+ and TYPE_PRECISION (number of bits used by this type).
+ In the case of a subrange type in Pascal, the TREE_TYPE
+ of this will point at the supertype (another INTEGER_TYPE,
+ or an ENUMERAL_TYPE, CHAR_TYPE, or BOOLEAN_TYPE).
+ Otherwise, the TREE_TYPE is zero. */
+DEFTREECODE (INTEGER_TYPE, "integer_type", "t", 0)
+
+/* C's float and double. Different floating types are distinguished
+ by machine mode and by the TYPE_SIZE and the TYPE_PRECISION. */
+DEFTREECODE (REAL_TYPE, "real_type", "t", 0)
+
+/* Complex number types. The TREE_TYPE field is the data type
+ of the real and imaginary parts. */
+DEFTREECODE (COMPLEX_TYPE, "complex_type", "t", 0)
+
+/* C enums. The type node looks just like an INTEGER_TYPE node.
+ The symbols for the values of the enum type are defined by
+ CONST_DECL nodes, but the type does not point to them;
+ however, the TYPE_VALUES is a list in which each element's TREE_PURPOSE
+ is a name and the TREE_VALUE is the value (an INTEGER_CST node). */
+/* A forward reference `enum foo' when no enum named foo is defined yet
+ has zero (a null pointer) in its TYPE_SIZE. The tag name is in
+ the TYPE_NAME field. If the type is later defined, the normal
+ fields are filled in.
+ RECORD_TYPE, UNION_TYPE, and QUAL_UNION_TYPE forward refs are
+ treated similarly. */
+DEFTREECODE (ENUMERAL_TYPE, "enumeral_type", "t", 0)
+
+/* Pascal's boolean type (true or false are the only values);
+ no special fields needed. */
+DEFTREECODE (BOOLEAN_TYPE, "boolean_type", "t", 0)
+
+/* CHAR in Pascal; not used in C.
+ No special fields needed. */
+DEFTREECODE (CHAR_TYPE, "char_type", "t", 0)
+
+/* All pointer-to-x types have code POINTER_TYPE.
+ The TREE_TYPE points to the node for the type pointed to. */
+DEFTREECODE (POINTER_TYPE, "pointer_type", "t", 0)
+
+/* An offset is a pointer relative to an object.
+ The TREE_TYPE field is the type of the object at the offset.
+ The TYPE_OFFSET_BASETYPE points to the node for the type of object
+ that the offset is relative to. */
+DEFTREECODE (OFFSET_TYPE, "offset_type", "t", 0)
+
+/* A reference is like a pointer except that it is coerced
+ automatically to the value it points to. Used in C++. */
+DEFTREECODE (REFERENCE_TYPE, "reference_type", "t", 0)
+
+/* METHOD_TYPE is the type of a function which takes an extra first
+ argument for "self", which is not present in the declared argument list.
+ The TREE_TYPE is the return type of the method. The TYPE_METHOD_BASETYPE
+ is the type of "self". TYPE_ARG_TYPES is the real argument list, which
+ includes the hidden argument for "self". */
+DEFTREECODE (METHOD_TYPE, "method_type", "t", 0)
+
+/* Used for Pascal; details not determined right now. */
+DEFTREECODE (FILE_TYPE, "file_type", "t", 0)
+
+/* Types of arrays. Special fields:
+ TREE_TYPE Type of an array element.
+ TYPE_DOMAIN Type to index by.
+ Its range of values specifies the array length.
+ TYPE_SEP Expression for units from one elt to the next.
+ TYPE_SEP_UNIT Number of bits in a unit for previous.
+ The field TYPE_POINTER_TO (TREE_TYPE (array_type)) is always nonzero
+ and holds the type to coerce a value of that array type to in C.
+ TYPE_STRING_FLAG indicates a string (in contrast to an array of chars)
+ in languages (such as Chill) that make a distinction. */
+/* Array types in C or Pascal */
+DEFTREECODE (ARRAY_TYPE, "array_type", "t", 0)
+
+/* Types of sets for Pascal. Special fields are the same as
+ in an array type. The target type is always a boolean type. */
+DEFTREECODE (SET_TYPE, "set_type", "t", 0)
+
+/* Struct in C, or record in Pascal. */
+/* Special fields:
+ TYPE_FIELDS chain of FIELD_DECLs for the fields of the struct.
+ A few may need to be added for Pascal. */
+/* See the comment above, before ENUMERAL_TYPE, for how
+ forward references to struct tags are handled in C. */
+DEFTREECODE (RECORD_TYPE, "record_type", "t", 0)
+
+/* Union in C. Like a struct, except that the offsets of the fields
+ will all be zero. */
+/* See the comment above, before ENUMERAL_TYPE, for how
+ forward references to union tags are handled in C. */
+DEFTREECODE (UNION_TYPE, "union_type", "t", 0) /* C union type */
+
+/* Similar to UNION_TYPE, except that the expressions in DECL_QUALIFIER
+ in each FIELD_DECL determine what the union contains. The first
+ field whose DECL_QUALIFIER expression is true is deemed to occupy
+ the union. */
+DEFTREECODE (QUAL_UNION_TYPE, "qual_union_type", "t", 0)
+
+/* Type of functions. Special fields:
+ TREE_TYPE type of value returned.
+ TYPE_ARG_TYPES list of types of arguments expected.
+ this list is made of TREE_LIST nodes.
+ Types of "Procedures" in languages where they are different from functions
+ have code FUNCTION_TYPE also, but then TREE_TYPE is zero or void type. */
+DEFTREECODE (FUNCTION_TYPE, "function_type", "t", 0)
+
+/* This is a language-specific kind of type.
+ Its meaning is defined by the language front end.
+ layout_type does not know how to lay this out,
+ so the front-end must do so manually. */
+DEFTREECODE (LANG_TYPE, "lang_type", "t", 0)
+
+/* Expressions */
+
+/* First, the constants. */
+
+/* Contents are in TREE_INT_CST_LOW and TREE_INT_CST_HIGH fields,
+ 32 bits each, giving us a 64 bit constant capability.
+ Note: constants of type char in Pascal are INTEGER_CST,
+ and so are pointer constants such as nil in Pascal or NULL in C.
+ `(int *) 1' in C also results in an INTEGER_CST. */
+DEFTREECODE (INTEGER_CST, "integer_cst", "c", 2)
+
+/* Contents are in TREE_REAL_CST field. Also there is TREE_CST_RTL. */
+DEFTREECODE (REAL_CST, "real_cst", "c", 3)
+
+/* Contents are in TREE_REALPART and TREE_IMAGPART fields,
+ whose contents are other constant nodes.
+ Also there is TREE_CST_RTL. */
+DEFTREECODE (COMPLEX_CST, "complex_cst", "c", 3)
+
+/* Contents are TREE_STRING_LENGTH and TREE_STRING_POINTER fields.
+ Also there is TREE_CST_RTL. */
+DEFTREECODE (STRING_CST, "string_cst", "c", 3)
+
+/* Declarations. All references to names are represented as ..._DECL nodes.
+ The decls in one binding context are chained through the TREE_CHAIN field.
+ Each DECL has a DECL_NAME field which contains an IDENTIFIER_NODE.
+ (Some decls, most often labels, may have zero as the DECL_NAME).
+ DECL_CONTEXT points to the node representing the context in which
+ this declaration has its scope. For FIELD_DECLs, this is the
+ RECORD_TYPE, UNION_TYPE, or QUAL_UNION_TYPE node that the field
+ is a member of. For VAR_DECL, PARM_DECL, FUNCTION_DECL, LABEL_DECL,
+ and CONST_DECL nodes, this points to the FUNCTION_DECL for the
+ containing function, or else yields NULL_TREE if the given decl
+ has "file scope".
+ DECL_ABSTRACT_ORIGIN, if non-NULL, points to the original (abstract)
+ ..._DECL node of which this decl is an (inlined or template expanded)
+ instance.
+ The TREE_TYPE field holds the data type of the object, when relevant.
+ LABEL_DECLs have no data type. For TYPE_DECL, the TREE_TYPE field
+ contents are the type whose name is being declared.
+ The DECL_ALIGN, DECL_SIZE,
+ and DECL_MODE fields exist in decl nodes just as in type nodes.
+ They are unused in LABEL_DECL, TYPE_DECL and CONST_DECL nodes.
+
+ DECL_OFFSET holds an integer number of bits offset for the location.
+ DECL_VOFFSET holds an expression for a variable offset; it is
+ to be multiplied by DECL_VOFFSET_UNIT (an integer).
+ These fields are relevant only in FIELD_DECLs and PARM_DECLs.
+
+ DECL_INITIAL holds the value to initialize a variable to,
+ or the value of a constant. For a function, it holds the body
+ (a node of type BLOCK representing the function's binding contour
+ and whose body contains the function's statements.) For a LABEL_DECL
+ in C, it is a flag, nonzero if the label's definition has been seen.
+
+ PARM_DECLs use a special field:
+ DECL_ARG_TYPE is the type in which the argument is actually
+ passed, which may be different from its type within the function.
+
+ FUNCTION_DECLs use four special fields:
+ DECL_ARGUMENTS holds a chain of PARM_DECL nodes for the arguments.
+ DECL_RESULT holds a RESULT_DECL node for the value of a function,
+ or it is 0 for a function that returns no value.
+ (C functions returning void have zero here.)
+ DECL_RESULT_TYPE holds the type in which the result is actually
+ returned. This is usually the same as the type of DECL_RESULT,
+ but (1) it may be a wider integer type and
+ (2) it remains valid, for the sake of inlining, even after the
+ function's compilation is done.
+ DECL_FUNCTION_CODE is a code number that is nonzero for
+ built-in functions. Its value is an enum built_in_function
+ that says which built-in function it is.
+
+ DECL_SOURCE_FILE holds a filename string and DECL_SOURCE_LINE
+ holds a line number. In some cases these can be the location of
+ a reference, if no definition has been seen.
+
+ DECL_ABSTRACT is non-zero if the decl represents an abstract instance
+ of a decl (i.e. one which is nested within an abstract instance of a
+ inline function. */
+
+DEFTREECODE (FUNCTION_DECL, "function_decl", "d", 0)
+DEFTREECODE (LABEL_DECL, "label_decl", "d", 0)
+DEFTREECODE (CONST_DECL, "const_decl", "d", 0)
+DEFTREECODE (TYPE_DECL, "type_decl", "d", 0)
+DEFTREECODE (VAR_DECL, "var_decl", "d", 0)
+DEFTREECODE (PARM_DECL, "parm_decl", "d", 0)
+DEFTREECODE (RESULT_DECL, "result_decl", "d", 0)
+DEFTREECODE (FIELD_DECL, "field_decl", "d", 0)
+
+/* References to storage. */
+
+/* Value is structure or union component.
+ Operand 0 is the structure or union (an expression);
+ operand 1 is the field (a node of type FIELD_DECL). */
+DEFTREECODE (COMPONENT_REF, "component_ref", "r", 2)
+
+/* Reference to a group of bits within an object. Similar to COMPONENT_REF
+ except the position is given explicitly rather than via a FIELD_DECL.
+ Operand 0 is the structure or union expression;
+ operand 1 is a tree giving the number of bits being referenced;
+ operand 2 is a tree giving the position of the first referenced bit.
+ The field can be either a signed or unsigned field;
+ TREE_UNSIGNED says which. */
+DEFTREECODE (BIT_FIELD_REF, "bit_field_ref", "r", 3)
+
+/* C unary `*' or Pascal `^'. One operand, an expression for a pointer. */
+DEFTREECODE (INDIRECT_REF, "indirect_ref", "r", 1)
+
+/* Reference to the contents of an offset
+ (a value whose type is an OFFSET_TYPE).
+ Operand 0 is the object within which the offset is taken.
+ Operand 1 is the offset. */
+DEFTREECODE (OFFSET_REF, "offset_ref", "r", 2)
+
+/* Pascal `^` on a file. One operand, an expression for the file. */
+DEFTREECODE (BUFFER_REF, "buffer_ref", "r", 1)
+
+/* Array indexing in languages other than C.
+ Operand 0 is the array; operand 1 is a list of indices
+ stored as a chain of TREE_LIST nodes. */
+DEFTREECODE (ARRAY_REF, "array_ref", "r", 2)
+
+/* Constructor: return an aggregate value made from specified components.
+ In C, this is used only for structure and array initializers.
+ The first "operand" is really a pointer to the RTL,
+ for constant constructors only.
+ The second operand is a list of component values
+ made out of a chain of TREE_LIST nodes. */
+DEFTREECODE (CONSTRUCTOR, "constructor", "e", 2)
+
+/* The expression types are mostly straightforward,
+ with the fourth argument of DEFTREECODE saying
+ how many operands there are.
+ Unless otherwise specified, the operands are expressions. */
+
+/* Contains two expressions to compute, one followed by the other.
+ the first value is ignored. The second one's value is used. */
+DEFTREECODE (COMPOUND_EXPR, "compound_expr", "e", 2)
+
+/* Assignment expression. Operand 0 is the what to set; 1, the new value. */
+DEFTREECODE (MODIFY_EXPR, "modify_expr", "e", 2)
+
+/* Initialization expression. Operand 0 is the variable to initialize;
+ Operand 1 is the initializer. */
+DEFTREECODE (INIT_EXPR, "init_expr", "e", 2)
+
+/* For TARGET_EXPR, operand 0 is the target of an initialization,
+ operand 1 is the initializer for the target,
+ and operand 2 is the cleanup for this node, if any. */
+DEFTREECODE (TARGET_EXPR, "target_expr", "e", 3)
+
+/* Conditional expression ( ... ? ... : ... in C).
+ Operand 0 is the condition.
+ Operand 1 is the then-value.
+ Operand 2 is the else-value. */
+DEFTREECODE (COND_EXPR, "cond_expr", "e", 3)
+
+/* Declare local variables, including making RTL and allocating space.
+ Operand 0 is a chain of VAR_DECL nodes for the variables.
+ Operand 1 is the body, the expression to be computed using
+ the variables. The value of operand 1 becomes that of the BIND_EXPR.
+ Operand 2 is the BLOCK that corresponds to these bindings
+ for debugging purposes. If this BIND_EXPR is actually expanded,
+ that sets the TREE_USED flag in the BLOCK.
+
+ The BIND_EXPR is not responsible for informing parsers
+ about these variables. If the body is coming from the input file,
+ then the code that creates the BIND_EXPR is also responsible for
+ informing the parser of the variables.
+
+ If the BIND_EXPR is ever expanded, its TREE_USED flag is set.
+ This tells the code for debugging symbol tables not to ignore the BIND_EXPR.
+ If the BIND_EXPR should be output for debugging but will not be expanded,
+ set the TREE_USED flag by hand.
+
+ In order for the BIND_EXPR to be known at all, the code that creates it
+ must also install it as a subblock in the tree of BLOCK
+ nodes for the function. */
+DEFTREECODE (BIND_EXPR, "bind_expr", "e", 3)
+
+/* Function call. Operand 0 is the function.
+ Operand 1 is the argument list, a list of expressions
+ made out of a chain of TREE_LIST nodes.
+ There is no operand 2. That slot is used for the
+ CALL_EXPR_RTL macro (see preexpand_calls). */
+DEFTREECODE (CALL_EXPR, "call_expr", "e", 3)
+
+/* Call a method. Operand 0 is the method, whose type is a METHOD_TYPE.
+ Operand 1 is the expression for "self".
+ Operand 2 is the list of explicit arguments. */
+DEFTREECODE (METHOD_CALL_EXPR, "method_call_expr", "e", 4)
+
+/* Specify a value to compute along with its corresponding cleanup.
+ Operand 0 argument is an expression whose value needs a cleanup.
+ Operand 1 is an RTL_EXPR which will eventually represent that value.
+ Operand 2 is the cleanup expression for the object.
+ The RTL_EXPR is used in this expression, which is how the expression
+ manages to act on the proper value.
+ The cleanup is executed by the first enclosing CLEANUP_POINT_EXPR, if
+ it exists, otherwise it is the responsibility of the caller to manually
+ call expand_cleanups_to, as needed. */
+DEFTREECODE (WITH_CLEANUP_EXPR, "with_cleanup_expr", "e", 3)
+
+/* Specify a cleanup point.
+ Operand 0 is the expression that has cleanups that we want ensure are
+ cleaned up. */
+DEFTREECODE (CLEANUP_POINT_EXPR, "cleanup_point_expr", "e", 1)
+
+/* The following two codes are used in languages that have types where
+ the position and/or sizes of fields vary from object to object of the
+ same type, i.e., where some other field in the object contains a value
+ that is used in the computation of another field's offset or size.
+
+ For example, a record type with a discriminant in Ada is such a type.
+ This mechanism is also used to create "fat pointers" for unconstrained
+ array types in Ada; the fat pointer is a structure one of whose fields is
+ a pointer to the actual array type and the other field is a pointer to a
+ template, which is a structure containing the bounds of the array. The
+ bounds in the type pointed to by the first field in the fat pointer refer
+ to the values in the template.
+
+ These "self-references" are doing using a PLACEHOLDER_EXPR. This is a
+ node that will later be replaced with the object being referenced. Its type
+ is that of the object and selects which object to use from a chain of
+ references (see below).
+
+ When we wish to evaluate a size or offset, we check it is contains a
+ placeholder. If it does, we construct a WITH_RECORD_EXPR that contains
+ both the expression we wish to evaluate and an expression within which the
+ object may be found. The latter expression is the object itself in
+ the simple case of an Ada record with discriminant, but it can be the
+ array in the case of an unconstrained array.
+
+ In the latter case, we need the fat pointer, because the bounds of the
+ array can only be accessed from it. However, we rely here on the fact that
+ the expression for the array contains the dereference of the fat pointer
+ that obtained the array pointer.
+
+ Accordingly, when looking for the object to substitute in place of
+ a PLACEHOLDER_EXPR, we look down the first operand of the expression
+ passed as the second operand to WITH_RECORD_EXPR until we find something
+ of the desired type or reach a constant. */
+
+/* Denotes a record to later be supplied with a WITH_RECORD_EXPR when
+ evaluating this expression. The type of this expression is used to
+ find the record to replace it. */
+DEFTREECODE (PLACEHOLDER_EXPR, "placeholder_expr", "x", 0)
+
+/* Provide an expression that references a record to be used in place
+ of a PLACEHOLDER_EXPR. The record to be used is the record within
+ operand 1 that has the same type as the PLACEHOLDER_EXPR in
+ operand 0. */
+DEFTREECODE (WITH_RECORD_EXPR, "with_record_expr", "e", 2)
+
+/* Simple arithmetic. Operands must have the same machine mode
+ and the value shares that mode. */
+DEFTREECODE (PLUS_EXPR, "plus_expr", "2", 2)
+DEFTREECODE (MINUS_EXPR, "minus_expr", "2", 2)
+DEFTREECODE (MULT_EXPR, "mult_expr", "2", 2)
+
+/* Division for integer result that rounds the quotient toward zero. */
+/* Operands must have the same machine mode.
+ In principle they may be real, but that is not currently supported.
+ The result is always fixed point, and it has the same type as the
+ operands if they are fixed point. */
+DEFTREECODE (TRUNC_DIV_EXPR, "trunc_div_expr", "2", 2)
+
+/* Division for integer result that rounds the quotient toward infinity. */
+DEFTREECODE (CEIL_DIV_EXPR, "ceil_div_expr", "2", 2)
+
+/* Division for integer result that rounds toward minus infinity. */
+DEFTREECODE (FLOOR_DIV_EXPR, "floor_div_expr", "2", 2)
+
+/* Division for integer result that rounds toward nearest integer. */
+DEFTREECODE (ROUND_DIV_EXPR, "round_div_expr", "2", 2)
+
+/* Four kinds of remainder that go with the four kinds of division. */
+DEFTREECODE (TRUNC_MOD_EXPR, "trunc_mod_expr", "2", 2)
+DEFTREECODE (CEIL_MOD_EXPR, "ceil_mod_expr", "2", 2)
+DEFTREECODE (FLOOR_MOD_EXPR, "floor_mod_expr", "2", 2)
+DEFTREECODE (ROUND_MOD_EXPR, "round_mod_expr", "2", 2)
+
+/* Division for real result. The two operands must have the same type.
+ In principle they could be integers, but currently only real
+ operands are supported. The result must have the same type
+ as the operands. */
+DEFTREECODE (RDIV_EXPR, "rdiv_expr", "2", 2)
+
+/* Division which is not supposed to need rounding.
+ Used for pointer subtraction in C. */
+DEFTREECODE (EXACT_DIV_EXPR, "exact_div_expr", "2", 2)
+
+/* Conversion of real to fixed point: four ways to round,
+ like the four ways to divide.
+ CONVERT_EXPR can also be used to convert a real to an integer,
+ and that is what is used in languages that do not have ways of
+ specifying which of these is wanted. Maybe these are not needed. */
+DEFTREECODE (FIX_TRUNC_EXPR, "fix_trunc_expr", "1", 1)
+DEFTREECODE (FIX_CEIL_EXPR, "fix_ceil_expr", "1", 1)
+DEFTREECODE (FIX_FLOOR_EXPR, "fix_floor_expr", "1", 1)
+DEFTREECODE (FIX_ROUND_EXPR, "fix_round_expr", "1", 1)
+
+/* Conversion of an integer to a real. */
+DEFTREECODE (FLOAT_EXPR, "float_expr", "1", 1)
+
+/* Exponentiation. Operands may have any types;
+ constraints on value type are not known yet. */
+DEFTREECODE (EXPON_EXPR, "expon_expr", "2", 2)
+
+/* Unary negation. Value has same type as operand. */
+DEFTREECODE (NEGATE_EXPR, "negate_expr", "1", 1)
+
+DEFTREECODE (MIN_EXPR, "min_expr", "2", 2)
+DEFTREECODE (MAX_EXPR, "max_expr", "2", 2)
+DEFTREECODE (ABS_EXPR, "abs_expr", "1", 1)
+DEFTREECODE (FFS_EXPR, "ffs_expr", "1", 1)
+
+/* Shift operations for shift and rotate.
+ Shift is supposed to mean logical shift if done on an
+ unsigned type, arithmetic shift on a signed type.
+ The second operand is the number of bits to
+ shift by, and must always have mode SImode.
+ The result has the same mode as the first operand. */
+DEFTREECODE (LSHIFT_EXPR, "alshift_expr", "2", 2)
+DEFTREECODE (RSHIFT_EXPR, "arshift_expr", "2", 2)
+DEFTREECODE (LROTATE_EXPR, "lrotate_expr", "2", 2)
+DEFTREECODE (RROTATE_EXPR, "rrotate_expr", "2", 2)
+
+/* Bitwise operations. Operands have same mode as result. */
+DEFTREECODE (BIT_IOR_EXPR, "bit_ior_expr", "2", 2)
+DEFTREECODE (BIT_XOR_EXPR, "bit_xor_expr", "2", 2)
+DEFTREECODE (BIT_AND_EXPR, "bit_and_expr", "2", 2)
+DEFTREECODE (BIT_ANDTC_EXPR, "bit_andtc_expr", "2", 2)
+DEFTREECODE (BIT_NOT_EXPR, "bit_not_expr", "1", 1)
+
+/* Combination of boolean values or of integers considered only
+ as zero or nonzero. ANDIF and ORIF allow the second operand
+ not to be computed if the value of the expression is determined
+ from the first operand. AND, OR, and XOR always compute the second
+ operand whether its value is needed or not (for side effects). */
+DEFTREECODE (TRUTH_ANDIF_EXPR, "truth_andif_expr", "e", 2)
+DEFTREECODE (TRUTH_ORIF_EXPR, "truth_orif_expr", "e", 2)
+DEFTREECODE (TRUTH_AND_EXPR, "truth_and_expr", "e", 2)
+DEFTREECODE (TRUTH_OR_EXPR, "truth_or_expr", "e", 2)
+DEFTREECODE (TRUTH_XOR_EXPR, "truth_xor_expr", "e", 2)
+DEFTREECODE (TRUTH_NOT_EXPR, "truth_not_expr", "e", 1)
+
+/* Relational operators.
+ `EQ_EXPR' and `NE_EXPR' are allowed for any types.
+ The others are allowed only for integer (or pointer or enumeral)
+ or real types.
+ In all cases the operands will have the same type,
+ and the value is always the type used by the language for booleans. */
+DEFTREECODE (LT_EXPR, "lt_expr", "<", 2)
+DEFTREECODE (LE_EXPR, "le_expr", "<", 2)
+DEFTREECODE (GT_EXPR, "gt_expr", "<", 2)
+DEFTREECODE (GE_EXPR, "ge_expr", "<", 2)
+DEFTREECODE (EQ_EXPR, "eq_expr", "<", 2)
+DEFTREECODE (NE_EXPR, "ne_expr", "<", 2)
+
+/* Operations for Pascal sets. Not used now. */
+DEFTREECODE (IN_EXPR, "in_expr", "2", 2)
+DEFTREECODE (SET_LE_EXPR, "set_le_expr", "<", 2)
+DEFTREECODE (CARD_EXPR, "card_expr", "1", 1)
+DEFTREECODE (RANGE_EXPR, "range_expr", "2", 2)
+
+/* Represents a conversion of type of a value.
+ All conversions, including implicit ones, must be
+ represented by CONVERT_EXPR nodes. */
+DEFTREECODE (CONVERT_EXPR, "convert_expr", "1", 1)
+
+/* Represents a conversion expected to require no code to be generated. */
+DEFTREECODE (NOP_EXPR, "nop_expr", "1", 1)
+
+/* Value is same as argument, but guaranteed not an lvalue. */
+DEFTREECODE (NON_LVALUE_EXPR, "non_lvalue_expr", "1", 1)
+
+/* Represents something we computed once and will use multiple times.
+ First operand is that expression. Second is the function decl
+ in which the SAVE_EXPR was created. The third operand is the RTL,
+ nonzero only after the expression has been computed. */
+DEFTREECODE (SAVE_EXPR, "save_expr", "e", 3)
+
+/* Represents something whose RTL has already been expanded
+ as a sequence which should be emitted when this expression is expanded.
+ The first operand is the RTL to emit. It is the first of a chain of insns.
+ The second is the RTL expression for the result. */
+DEFTREECODE (RTL_EXPR, "rtl_expr", "e", 2)
+
+/* & in C. Value is the address at which the operand's value resides.
+ Operand may have any mode. Result mode is Pmode. */
+DEFTREECODE (ADDR_EXPR, "addr_expr", "e", 1)
+
+/* Non-lvalue reference or pointer to an object. */
+DEFTREECODE (REFERENCE_EXPR, "reference_expr", "e", 1)
+
+/* Operand is a function constant; result is a function variable value
+ of typeEPmode. Used only for languages that need static chains. */
+DEFTREECODE (ENTRY_VALUE_EXPR, "entry_value_expr", "e", 1)
+
+/* Given two real or integer operands of the same type,
+ returns a complex value of the corresponding complex type. */
+DEFTREECODE (COMPLEX_EXPR, "complex_expr", "2", 2)
+
+/* Complex conjugate of operand. Used only on complex types.
+ The value has the same type as the operand. */
+DEFTREECODE (CONJ_EXPR, "conj_expr", "1", 1)
+
+/* Used only on an operand of complex type, these return
+ a value of the corresponding component type. */
+DEFTREECODE (REALPART_EXPR, "realpart_expr", "1", 1)
+DEFTREECODE (IMAGPART_EXPR, "imagpart_expr", "1", 1)
+
+/* Nodes for ++ and -- in C.
+ The second arg is how much to increment or decrement by.
+ For a pointer, it would be the size of the object pointed to. */
+DEFTREECODE (PREDECREMENT_EXPR, "predecrement_expr", "e", 2)
+DEFTREECODE (PREINCREMENT_EXPR, "preincrement_expr", "e", 2)
+DEFTREECODE (POSTDECREMENT_EXPR, "postdecrement_expr", "e", 2)
+DEFTREECODE (POSTINCREMENT_EXPR, "postincrement_expr", "e", 2)
+
+/* These types of expressions have no useful value,
+ and always have side effects. */
+
+/* A label definition, encapsulated as a statement.
+ Operand 0 is the LABEL_DECL node for the label that appears here.
+ The type should be void and the value should be ignored. */
+DEFTREECODE (LABEL_EXPR, "label_expr", "s", 1)
+
+/* GOTO. Operand 0 is a LABEL_DECL node.
+ The type should be void and the value should be ignored. */
+DEFTREECODE (GOTO_EXPR, "goto_expr", "s", 1)
+
+/* RETURN. Evaluates operand 0, then returns from the current function.
+ Presumably that operand is an assignment that stores into the
+ RESULT_DECL that hold the value to be returned.
+ The operand may be null.
+ The type should be void and the value should be ignored. */
+DEFTREECODE (RETURN_EXPR, "return_expr", "s", 1)
+
+/* Exit the inner most loop conditionally. Operand 0 is the condition.
+ The type should be void and the value should be ignored. */
+DEFTREECODE (EXIT_EXPR, "exit_expr", "s", 1)
+
+/* A loop. Operand 0 is the body of the loop.
+ It must contain an EXIT_EXPR or is an infinite loop.
+ The type should be void and the value should be ignored. */
+DEFTREECODE (LOOP_EXPR, "loop_expr", "s", 1)
+
+/*
+Local variables:
+mode:c
+version-control: t
+End:
+*/
diff --git a/gnu/usr.bin/cc/include/tree.h b/gnu/usr.bin/cc/include/tree.h
new file mode 100644
index 0000000..dbe5ff9
--- /dev/null
+++ b/gnu/usr.bin/cc/include/tree.h
@@ -0,0 +1,1638 @@
+/* Front-end tree definitions for GNU compiler.
+ Copyright (C) 1989, 1993, 1994 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+#include "machmode.h"
+
+#ifndef RTX_CODE
+struct rtx_def;
+#endif
+
+/* Codes of tree nodes */
+
+#define DEFTREECODE(SYM, STRING, TYPE, NARGS) SYM,
+
+enum tree_code {
+#include "tree.def"
+
+ LAST_AND_UNUSED_TREE_CODE /* A convenient way to get a value for
+ NUM_TREE_CODE. */
+};
+
+#undef DEFTREECODE
+
+/* Number of tree codes. */
+#define NUM_TREE_CODES ((int)LAST_AND_UNUSED_TREE_CODE)
+
+/* Indexed by enum tree_code, contains a character which is
+ `<' for a comparison expression, `1', for a unary arithmetic
+ expression, `2' for a binary arithmetic expression, `e' for
+ other types of expressions, `r' for a reference, `c' for a
+ constant, `d' for a decl, `t' for a type, `s' for a statement,
+ and `x' for anything else (TREE_LIST, IDENTIFIER, etc). */
+
+extern char **tree_code_type;
+#define TREE_CODE_CLASS(CODE) (*tree_code_type[(int) (CODE)])
+
+/* Number of argument-words in each kind of tree-node. */
+
+extern int *tree_code_length;
+
+/* Names of tree components. */
+
+extern char **tree_code_name;
+
+/* Codes that identify the various built in functions
+ so that expand_call can identify them quickly. */
+
+enum built_in_function
+{
+ NOT_BUILT_IN,
+ BUILT_IN_ALLOCA,
+ BUILT_IN_ABS,
+ BUILT_IN_FABS,
+ BUILT_IN_LABS,
+ BUILT_IN_FFS,
+ BUILT_IN_DIV,
+ BUILT_IN_LDIV,
+ BUILT_IN_FFLOOR,
+ BUILT_IN_FCEIL,
+ BUILT_IN_FMOD,
+ BUILT_IN_FREM,
+ BUILT_IN_MEMCPY,
+ BUILT_IN_MEMCMP,
+ BUILT_IN_MEMSET,
+ BUILT_IN_STRCPY,
+ BUILT_IN_STRCMP,
+ BUILT_IN_STRLEN,
+ BUILT_IN_FSQRT,
+ BUILT_IN_SIN,
+ BUILT_IN_COS,
+ BUILT_IN_GETEXP,
+ BUILT_IN_GETMAN,
+ BUILT_IN_SAVEREGS,
+ BUILT_IN_CLASSIFY_TYPE,
+ BUILT_IN_NEXT_ARG,
+ BUILT_IN_ARGS_INFO,
+ BUILT_IN_CONSTANT_P,
+ BUILT_IN_FRAME_ADDRESS,
+ BUILT_IN_RETURN_ADDRESS,
+ BUILT_IN_CALLER_RETURN_ADDRESS,
+ BUILT_IN_APPLY_ARGS,
+ BUILT_IN_APPLY,
+ BUILT_IN_RETURN,
+
+ /* C++ extensions */
+ BUILT_IN_NEW,
+ BUILT_IN_VEC_NEW,
+ BUILT_IN_DELETE,
+ BUILT_IN_VEC_DELETE,
+
+ /* Upper bound on non-language-specific builtins. */
+ END_BUILTINS
+};
+
+/* The definition of tree nodes fills the next several pages. */
+
+/* A tree node can represent a data type, a variable, an expression
+ or a statement. Each node has a TREE_CODE which says what kind of
+ thing it represents. Some common codes are:
+ INTEGER_TYPE -- represents a type of integers.
+ ARRAY_TYPE -- represents a type of pointer.
+ VAR_DECL -- represents a declared variable.
+ INTEGER_CST -- represents a constant integer value.
+ PLUS_EXPR -- represents a sum (an expression).
+
+ As for the contents of a tree node: there are some fields
+ that all nodes share. Each TREE_CODE has various special-purpose
+ fields as well. The fields of a node are never accessed directly,
+ always through accessor macros. */
+
+/* This type is used everywhere to refer to a tree node. */
+
+typedef union tree_node *tree;
+
+/* Every kind of tree node starts with this structure,
+ so all nodes have these fields.
+
+ See the accessor macros, defined below, for documentation of the fields. */
+
+struct tree_common
+{
+ union tree_node *chain;
+ union tree_node *type;
+#ifdef ONLY_INT_FIELDS
+ unsigned int code : 8;
+#else
+ enum tree_code code : 8;
+#endif
+
+ unsigned side_effects_flag : 1;
+ unsigned constant_flag : 1;
+ unsigned permanent_flag : 1;
+ unsigned addressable_flag : 1;
+ unsigned volatile_flag : 1;
+ unsigned readonly_flag : 1;
+ unsigned unsigned_flag : 1;
+ unsigned asm_written_flag: 1;
+
+ unsigned used_flag : 1;
+ unsigned raises_flag : 1;
+ unsigned static_flag : 1;
+ unsigned public_flag : 1;
+ unsigned private_flag : 1;
+ unsigned protected_flag : 1;
+
+ unsigned lang_flag_0 : 1;
+ unsigned lang_flag_1 : 1;
+ unsigned lang_flag_2 : 1;
+ unsigned lang_flag_3 : 1;
+ unsigned lang_flag_4 : 1;
+ unsigned lang_flag_5 : 1;
+ unsigned lang_flag_6 : 1;
+ /* There is room for two more flags. */
+};
+
+/* Define accessors for the fields that all tree nodes have
+ (though some fields are not used for all kinds of nodes). */
+
+/* The tree-code says what kind of node it is.
+ Codes are defined in tree.def. */
+#define TREE_CODE(NODE) ((enum tree_code) (NODE)->common.code)
+#define TREE_SET_CODE(NODE, VALUE) ((NODE)->common.code = (int) (VALUE))
+
+/* In all nodes that are expressions, this is the data type of the expression.
+ In POINTER_TYPE nodes, this is the type that the pointer points to.
+ In ARRAY_TYPE nodes, this is the type of the elements. */
+#define TREE_TYPE(NODE) ((NODE)->common.type)
+
+/* Nodes are chained together for many purposes.
+ Types are chained together to record them for being output to the debugger
+ (see the function `chain_type').
+ Decls in the same scope are chained together to record the contents
+ of the scope.
+ Statement nodes for successive statements used to be chained together.
+ Often lists of things are represented by TREE_LIST nodes that
+ are chained together. */
+
+#define TREE_CHAIN(NODE) ((NODE)->common.chain)
+
+/* Given an expression as a tree, strip any NON_LVALUE_EXPRs and NOP_EXPRs
+ that don't change the machine mode. */
+
+#define STRIP_NOPS(EXP) \
+ while ((TREE_CODE (EXP) == NOP_EXPR \
+ || TREE_CODE (EXP) == CONVERT_EXPR \
+ || TREE_CODE (EXP) == NON_LVALUE_EXPR) \
+ && (TYPE_MODE (TREE_TYPE (EXP)) \
+ == TYPE_MODE (TREE_TYPE (TREE_OPERAND (EXP, 0))))) \
+ (EXP) = TREE_OPERAND (EXP, 0);
+
+/* Like STRIP_NOPS, but don't alter the TREE_TYPE either. */
+
+#define STRIP_TYPE_NOPS(EXP) \
+ while ((TREE_CODE (EXP) == NOP_EXPR \
+ || TREE_CODE (EXP) == CONVERT_EXPR \
+ || TREE_CODE (EXP) == NON_LVALUE_EXPR) \
+ && (TREE_TYPE (EXP) \
+ == TREE_TYPE (TREE_OPERAND (EXP, 0)))) \
+ (EXP) = TREE_OPERAND (EXP, 0);
+
+/* Nonzero if TYPE represents an integral type. Note that we do not
+ include COMPLEX types here. */
+
+#define INTEGRAL_TYPE_P(TYPE) \
+ (TREE_CODE (TYPE) == INTEGER_TYPE || TREE_CODE (TYPE) == ENUMERAL_TYPE \
+ || TREE_CODE (TYPE) == BOOLEAN_TYPE || TREE_CODE (TYPE) == CHAR_TYPE)
+
+/* Nonzero if TYPE represents a floating-point type, including complex
+ floating-point types. */
+
+#define FLOAT_TYPE_P(TYPE) \
+ (TREE_CODE (TYPE) == REAL_TYPE \
+ || (TREE_CODE (TYPE) == COMPLEX_TYPE \
+ && TREE_CODE (TREE_TYPE (TYPE)) == REAL_TYPE))
+
+/* Nonzero if TYPE represents an aggregate (multi-component) type. */
+
+#define AGGREGATE_TYPE_P(TYPE) \
+ (TREE_CODE (TYPE) == ARRAY_TYPE || TREE_CODE (TYPE) == RECORD_TYPE \
+ || TREE_CODE (TYPE) == UNION_TYPE || TREE_CODE (TYPE) == QUAL_UNION_TYPE \
+ || TREE_CODE (TYPE) == SET_TYPE)
+
+/* Define many boolean fields that all tree nodes have. */
+
+/* In VAR_DECL nodes, nonzero means address of this is needed.
+ So it cannot be in a register.
+ In a FUNCTION_DECL, nonzero means its address is needed.
+ So it must be compiled even if it is an inline function.
+ In CONSTRUCTOR nodes, it means object constructed must be in memory.
+ In LABEL_DECL nodes, it means a goto for this label has been seen
+ from a place outside all binding contours that restore stack levels.
+ In ..._TYPE nodes, it means that objects of this type must
+ be fully addressable. This means that pieces of this
+ object cannot go into register parameters, for example.
+ In IDENTIFIER_NODEs, this means that some extern decl for this name
+ had its address taken. That matters for inline functions. */
+#define TREE_ADDRESSABLE(NODE) ((NODE)->common.addressable_flag)
+
+/* In a VAR_DECL, nonzero means allocate static storage.
+ In a FUNCTION_DECL, nonzero if function has been defined.
+ In a CONSTRUCTOR, nonzero means allocate static storage. */
+#define TREE_STATIC(NODE) ((NODE)->common.static_flag)
+
+/* In a CONVERT_EXPR, NOP_EXPR or COMPOUND_EXPR, this means the node was
+ made implicitly and should not lead to an "unused value" warning. */
+#define TREE_NO_UNUSED_WARNING(NODE) ((NODE)->common.static_flag)
+
+/* Nonzero for a TREE_LIST or TREE_VEC node means that the derivation
+ chain is via a `virtual' declaration. */
+#define TREE_VIA_VIRTUAL(NODE) ((NODE)->common.static_flag)
+
+/* In an INTEGER_CST, REAL_CST, or COMPLEX_CST, this means there was an
+ overflow in folding. This is distinct from TREE_OVERFLOW because ANSI C
+ requires a diagnostic when overflows occur in constant expressions. */
+#define TREE_CONSTANT_OVERFLOW(NODE) ((NODE)->common.static_flag)
+
+/* In an IDENTIFIER_NODE, this means that assemble_name was called with
+ this string as an argument. */
+#define TREE_SYMBOL_REFERENCED(NODE) ((NODE)->common.static_flag)
+
+/* In an INTEGER_CST, REAL_CST, of COMPLEX_CST, this means there was an
+ overflow in folding, and no warning has been issued for this subexpression.
+ TREE_OVERFLOW implies TREE_CONSTANT_OVERFLOW, but not vice versa. */
+#define TREE_OVERFLOW(NODE) ((NODE)->common.public_flag)
+
+/* In a VAR_DECL or FUNCTION_DECL,
+ nonzero means name is to be accessible from outside this module.
+ In an identifier node, nonzero means an external declaration
+ accessible from outside this module was previously seen
+ for this name in an inner scope. */
+#define TREE_PUBLIC(NODE) ((NODE)->common.public_flag)
+
+/* Nonzero for TREE_LIST or TREE_VEC node means that the path to the
+ base class is via a `public' declaration, which preserves public
+ fields from the base class as public. */
+#define TREE_VIA_PUBLIC(NODE) ((NODE)->common.public_flag)
+
+/* Ditto, for `private' declarations. */
+#define TREE_VIA_PRIVATE(NODE) ((NODE)->common.private_flag)
+
+/* Nonzero for TREE_LIST node means that the path to the
+ base class is via a `protected' declaration, which preserves
+ protected fields from the base class as protected.
+ OVERLOADED. */
+#define TREE_VIA_PROTECTED(NODE) ((NODE)->common.protected_flag)
+
+/* In any expression, nonzero means it has side effects or reevaluation
+ of the whole expression could produce a different value.
+ This is set if any subexpression is a function call, a side effect
+ or a reference to a volatile variable.
+ In a ..._DECL, this is set only if the declaration said `volatile'. */
+#define TREE_SIDE_EFFECTS(NODE) ((NODE)->common.side_effects_flag)
+
+/* Nonzero means this expression is volatile in the C sense:
+ its address should be of type `volatile WHATEVER *'.
+ In other words, the declared item is volatile qualified.
+ This is used in _DECL nodes and _REF nodes.
+
+ In a ..._TYPE node, means this type is volatile-qualified.
+ But use TYPE_VOLATILE instead of this macro when the node is a type,
+ because eventually we may make that a different bit.
+
+ If this bit is set in an expression, so is TREE_SIDE_EFFECTS. */
+#define TREE_THIS_VOLATILE(NODE) ((NODE)->common.volatile_flag)
+
+/* In a VAR_DECL, PARM_DECL or FIELD_DECL, or any kind of ..._REF node,
+ nonzero means it may not be the lhs of an assignment.
+ In a ..._TYPE node, means this type is const-qualified
+ (but the macro TYPE_READONLY should be used instead of this macro
+ when the node is a type). */
+#define TREE_READONLY(NODE) ((NODE)->common.readonly_flag)
+
+/* Value of expression is constant.
+ Always appears in all ..._CST nodes.
+ May also appear in an arithmetic expression, an ADDR_EXPR or a CONSTRUCTOR
+ if the value is constant. */
+#define TREE_CONSTANT(NODE) ((NODE)->common.constant_flag)
+
+/* Nonzero means permanent node;
+ node will continue to exist for the entire compiler run.
+ Otherwise it will be recycled at the end of the function. */
+#define TREE_PERMANENT(NODE) ((NODE)->common.permanent_flag)
+
+/* In INTEGER_TYPE or ENUMERAL_TYPE nodes, means an unsigned type.
+ In FIELD_DECL nodes, means an unsigned bit field.
+ The same bit is used in functions as DECL_BUILT_IN_NONANSI. */
+#define TREE_UNSIGNED(NODE) ((NODE)->common.unsigned_flag)
+
+/* Nonzero in a VAR_DECL means assembler code has been written.
+ Nonzero in a FUNCTION_DECL means that the function has been compiled.
+ This is interesting in an inline function, since it might not need
+ to be compiled separately.
+ Nonzero in a RECORD_TYPE, UNION_TYPE, QUAL_UNION_TYPE or ENUMERAL_TYPE
+ if the sdb debugging info for the type has been written.
+ In a BLOCK node, nonzero if reorder_blocks has already seen this block. */
+#define TREE_ASM_WRITTEN(NODE) ((NODE)->common.asm_written_flag)
+
+/* Nonzero in a _DECL if the name is used in its scope.
+ Nonzero in an expr node means inhibit warning if value is unused.
+ In IDENTIFIER_NODEs, this means that some extern decl for this name
+ was used. */
+#define TREE_USED(NODE) ((NODE)->common.used_flag)
+
+/* Nonzero for a tree node whose evaluation could result
+ in the raising of an exception. Not implemented yet. */
+#define TREE_RAISES(NODE) ((NODE)->common.raises_flag)
+
+/* Used in classes in C++. */
+#define TREE_PRIVATE(NODE) ((NODE)->common.private_flag)
+/* Used in classes in C++.
+ In a BLOCK node, this is BLOCK_HANDLER_BLOCK. */
+#define TREE_PROTECTED(NODE) ((NODE)->common.protected_flag)
+
+/* These flags are available for each language front end to use internally. */
+#define TREE_LANG_FLAG_0(NODE) ((NODE)->common.lang_flag_0)
+#define TREE_LANG_FLAG_1(NODE) ((NODE)->common.lang_flag_1)
+#define TREE_LANG_FLAG_2(NODE) ((NODE)->common.lang_flag_2)
+#define TREE_LANG_FLAG_3(NODE) ((NODE)->common.lang_flag_3)
+#define TREE_LANG_FLAG_4(NODE) ((NODE)->common.lang_flag_4)
+#define TREE_LANG_FLAG_5(NODE) ((NODE)->common.lang_flag_5)
+#define TREE_LANG_FLAG_6(NODE) ((NODE)->common.lang_flag_6)
+
+/* Define additional fields and accessors for nodes representing constants. */
+
+/* In an INTEGER_CST node. These two together make a 2-word integer.
+ If the data type is signed, the value is sign-extended to 2 words
+ even though not all of them may really be in use.
+ In an unsigned constant shorter than 2 words, the extra bits are 0. */
+#define TREE_INT_CST_LOW(NODE) ((NODE)->int_cst.int_cst_low)
+#define TREE_INT_CST_HIGH(NODE) ((NODE)->int_cst.int_cst_high)
+
+#define INT_CST_LT(A, B) \
+(TREE_INT_CST_HIGH (A) < TREE_INT_CST_HIGH (B) \
+ || (TREE_INT_CST_HIGH (A) == TREE_INT_CST_HIGH (B) \
+ && ((unsigned HOST_WIDE_INT) TREE_INT_CST_LOW (A) \
+ < (unsigned HOST_WIDE_INT) TREE_INT_CST_LOW (B))))
+
+#define INT_CST_LT_UNSIGNED(A, B) \
+(((unsigned HOST_WIDE_INT) TREE_INT_CST_HIGH (A) \
+ < (unsigned HOST_WIDE_INT) TREE_INT_CST_HIGH (B)) \
+ || (((unsigned HOST_WIDE_INT) TREE_INT_CST_HIGH (A) \
+ == (unsigned HOST_WIDE_INT ) TREE_INT_CST_HIGH (B)) \
+ && (((unsigned HOST_WIDE_INT) TREE_INT_CST_LOW (A) \
+ < (unsigned HOST_WIDE_INT) TREE_INT_CST_LOW (B)))))
+
+struct tree_int_cst
+{
+ char common[sizeof (struct tree_common)];
+ HOST_WIDE_INT int_cst_low;
+ HOST_WIDE_INT int_cst_high;
+};
+
+/* In REAL_CST, STRING_CST, COMPLEX_CST nodes, and CONSTRUCTOR nodes,
+ and generally in all kinds of constants that could
+ be given labels (rather than being immediate). */
+
+#define TREE_CST_RTL(NODE) ((NODE)->real_cst.rtl)
+
+/* In a REAL_CST node. */
+/* We can represent a real value as either a `double' or a string.
+ Strings don't allow for any optimization, but they do allow
+ for cross-compilation. */
+
+#define TREE_REAL_CST(NODE) ((NODE)->real_cst.real_cst)
+
+#include "real.h"
+
+struct tree_real_cst
+{
+ char common[sizeof (struct tree_common)];
+ struct rtx_def *rtl; /* acts as link to register transfer language
+ (rtl) info */
+ REAL_VALUE_TYPE real_cst;
+};
+
+/* In a STRING_CST */
+#define TREE_STRING_LENGTH(NODE) ((NODE)->string.length)
+#define TREE_STRING_POINTER(NODE) ((NODE)->string.pointer)
+
+struct tree_string
+{
+ char common[sizeof (struct tree_common)];
+ struct rtx_def *rtl; /* acts as link to register transfer language
+ (rtl) info */
+ int length;
+ char *pointer;
+};
+
+/* In a COMPLEX_CST node. */
+#define TREE_REALPART(NODE) ((NODE)->complex.real)
+#define TREE_IMAGPART(NODE) ((NODE)->complex.imag)
+
+struct tree_complex
+{
+ char common[sizeof (struct tree_common)];
+ struct rtx_def *rtl; /* acts as link to register transfer language
+ (rtl) info */
+ union tree_node *real;
+ union tree_node *imag;
+};
+
+/* Define fields and accessors for some special-purpose tree nodes. */
+
+#define IDENTIFIER_LENGTH(NODE) ((NODE)->identifier.length)
+#define IDENTIFIER_POINTER(NODE) ((NODE)->identifier.pointer)
+
+struct tree_identifier
+{
+ char common[sizeof (struct tree_common)];
+ int length;
+ char *pointer;
+};
+
+/* In a TREE_LIST node. */
+#define TREE_PURPOSE(NODE) ((NODE)->list.purpose)
+#define TREE_VALUE(NODE) ((NODE)->list.value)
+
+struct tree_list
+{
+ char common[sizeof (struct tree_common)];
+ union tree_node *purpose;
+ union tree_node *value;
+};
+
+/* In a TREE_VEC node. */
+#define TREE_VEC_LENGTH(NODE) ((NODE)->vec.length)
+#define TREE_VEC_ELT(NODE,I) ((NODE)->vec.a[I])
+#define TREE_VEC_END(NODE) (&((NODE)->vec.a[(NODE)->vec.length]))
+
+struct tree_vec
+{
+ char common[sizeof (struct tree_common)];
+ int length;
+ union tree_node *a[1];
+};
+
+/* Define fields and accessors for some nodes that represent expressions. */
+
+/* In a SAVE_EXPR node. */
+#define SAVE_EXPR_CONTEXT(NODE) TREE_OPERAND(NODE, 1)
+#define SAVE_EXPR_RTL(NODE) (*(struct rtx_def **) &(NODE)->exp.operands[2])
+
+/* In a RTL_EXPR node. */
+#define RTL_EXPR_SEQUENCE(NODE) (*(struct rtx_def **) &(NODE)->exp.operands[0])
+#define RTL_EXPR_RTL(NODE) (*(struct rtx_def **) &(NODE)->exp.operands[1])
+
+/* In a CALL_EXPR node. */
+#define CALL_EXPR_RTL(NODE) (*(struct rtx_def **) &(NODE)->exp.operands[2])
+
+/* In a CONSTRUCTOR node. */
+#define CONSTRUCTOR_ELTS(NODE) TREE_OPERAND (NODE, 1)
+
+/* In ordinary expression nodes. */
+#define TREE_OPERAND(NODE, I) ((NODE)->exp.operands[I])
+#define TREE_COMPLEXITY(NODE) ((NODE)->exp.complexity)
+
+struct tree_exp
+{
+ char common[sizeof (struct tree_common)];
+ int complexity;
+ union tree_node *operands[1];
+};
+
+/* In a BLOCK node. */
+#define BLOCK_VARS(NODE) ((NODE)->block.vars)
+#define BLOCK_TYPE_TAGS(NODE) ((NODE)->block.type_tags)
+#define BLOCK_SUBBLOCKS(NODE) ((NODE)->block.subblocks)
+#define BLOCK_SUPERCONTEXT(NODE) ((NODE)->block.supercontext)
+/* Note: when changing this, make sure to find the places
+ that use chainon or nreverse. */
+#define BLOCK_CHAIN(NODE) TREE_CHAIN (NODE)
+#define BLOCK_ABSTRACT_ORIGIN(NODE) ((NODE)->block.abstract_origin)
+#define BLOCK_ABSTRACT(NODE) ((NODE)->block.abstract_flag)
+#define BLOCK_END_NOTE(NODE) ((NODE)->block.end_note)
+
+/* Nonzero means that this block is prepared to handle exceptions
+ listed in the BLOCK_VARS slot. */
+#define BLOCK_HANDLER_BLOCK(NODE) ((NODE)->block.handler_block_flag)
+
+struct tree_block
+{
+ char common[sizeof (struct tree_common)];
+
+ unsigned handler_block_flag : 1;
+ unsigned abstract_flag : 1;
+
+ union tree_node *vars;
+ union tree_node *type_tags;
+ union tree_node *subblocks;
+ union tree_node *supercontext;
+ union tree_node *abstract_origin;
+ struct rtx_def *end_note;
+};
+
+/* Define fields and accessors for nodes representing data types. */
+
+/* See tree.def for documentation of the use of these fields.
+ Look at the documentation of the various ..._TYPE tree codes. */
+
+#define TYPE_UID(NODE) ((NODE)->type.uid)
+#define TYPE_SIZE(NODE) ((NODE)->type.size)
+#define TYPE_MODE(NODE) ((NODE)->type.mode)
+#define TYPE_VALUES(NODE) ((NODE)->type.values)
+#define TYPE_DOMAIN(NODE) ((NODE)->type.values)
+#define TYPE_FIELDS(NODE) ((NODE)->type.values)
+#define TYPE_METHODS(NODE) ((NODE)->type.maxval)
+#define TYPE_VFIELD(NODE) ((NODE)->type.minval)
+#define TYPE_ARG_TYPES(NODE) ((NODE)->type.values)
+#define TYPE_METHOD_BASETYPE(NODE) ((NODE)->type.maxval)
+#define TYPE_OFFSET_BASETYPE(NODE) ((NODE)->type.maxval)
+#define TYPE_POINTER_TO(NODE) ((NODE)->type.pointer_to)
+#define TYPE_REFERENCE_TO(NODE) ((NODE)->type.reference_to)
+#define TYPE_MIN_VALUE(NODE) ((NODE)->type.minval)
+#define TYPE_MAX_VALUE(NODE) ((NODE)->type.maxval)
+#define TYPE_PRECISION(NODE) ((NODE)->type.precision)
+#define TYPE_PARSE_INFO(NODE) ((NODE)->type.parse_info)
+#define TYPE_SYMTAB_ADDRESS(NODE) ((NODE)->type.symtab.address)
+#define TYPE_SYMTAB_POINTER(NODE) ((NODE)->type.symtab.pointer)
+#define TYPE_NAME(NODE) ((NODE)->type.name)
+#define TYPE_NEXT_VARIANT(NODE) ((NODE)->type.next_variant)
+#define TYPE_MAIN_VARIANT(NODE) ((NODE)->type.main_variant)
+#define TYPE_BINFO(NODE) ((NODE)->type.binfo)
+#define TYPE_NONCOPIED_PARTS(NODE) ((NODE)->type.noncopied_parts)
+#define TYPE_CONTEXT(NODE) ((NODE)->type.context)
+#define TYPE_OBSTACK(NODE) ((NODE)->type.obstack)
+#define TYPE_LANG_SPECIFIC(NODE) ((NODE)->type.lang_specific)
+
+/* A TREE_LIST of IDENTIFIER nodes of the attributes that apply
+ to this type. */
+#define TYPE_ATTRIBUTES(NODE) ((NODE)->type.attributes)
+
+/* The alignment necessary for objects of this type.
+ The value is an int, measured in bits. */
+#define TYPE_ALIGN(NODE) ((NODE)->type.align)
+
+#define TYPE_STUB_DECL(NODE) (TREE_CHAIN (NODE))
+
+/* In a RECORD_TYPE, UNION_TYPE or QUAL_UNION_TYPE, it means the type
+ has BLKmode only because it lacks the alignment requirement for
+ its size. */
+#define TYPE_NO_FORCE_BLK(NODE) ((NODE)->type.no_force_blk_flag)
+
+/* Nonzero in a type considered volatile as a whole. */
+#define TYPE_VOLATILE(NODE) ((NODE)->common.volatile_flag)
+
+/* Means this type is const-qualified. */
+#define TYPE_READONLY(NODE) ((NODE)->common.readonly_flag)
+
+/* These flags are available for each language front end to use internally. */
+#define TYPE_LANG_FLAG_0(NODE) ((NODE)->type.lang_flag_0)
+#define TYPE_LANG_FLAG_1(NODE) ((NODE)->type.lang_flag_1)
+#define TYPE_LANG_FLAG_2(NODE) ((NODE)->type.lang_flag_2)
+#define TYPE_LANG_FLAG_3(NODE) ((NODE)->type.lang_flag_3)
+#define TYPE_LANG_FLAG_4(NODE) ((NODE)->type.lang_flag_4)
+#define TYPE_LANG_FLAG_5(NODE) ((NODE)->type.lang_flag_5)
+#define TYPE_LANG_FLAG_6(NODE) ((NODE)->type.lang_flag_6)
+
+/* If set in an ARRAY_TYPE, indicates a string type (for languages
+ that distinguish string from array of char).
+ If set in a SET_TYPE, indicates a bitstring type. */
+#define TYPE_STRING_FLAG(NODE) ((NODE)->type.string_flag)
+
+/* Indicates that objects of this type must be initialized by calling a
+ function when they are created. */
+#define TYPE_NEEDS_CONSTRUCTING(NODE) ((NODE)->type.needs_constructing_flag)
+
+struct tree_type
+{
+ char common[sizeof (struct tree_common)];
+ union tree_node *values;
+ union tree_node *size;
+ union tree_node *attributes;
+ unsigned uid;
+
+ unsigned char precision;
+#ifdef ONLY_INT_FIELDS
+ int mode : 8;
+#else
+ enum machine_mode mode : 8;
+#endif
+
+ unsigned string_flag : 1;
+ unsigned no_force_blk_flag : 1;
+ unsigned needs_constructing_flag : 1;
+ unsigned lang_flag_0 : 1;
+ unsigned lang_flag_1 : 1;
+ unsigned lang_flag_2 : 1;
+ unsigned lang_flag_3 : 1;
+ unsigned lang_flag_4 : 1;
+ unsigned lang_flag_5 : 1;
+ unsigned lang_flag_6 : 1;
+ /* room for 6 more bits */
+
+ unsigned int align;
+ union tree_node *pointer_to;
+ union tree_node *reference_to;
+ int parse_info;
+ union {int address; char *pointer; } symtab;
+ union tree_node *name;
+ union tree_node *minval;
+ union tree_node *maxval;
+ union tree_node *next_variant;
+ union tree_node *main_variant;
+ union tree_node *binfo;
+ union tree_node *noncopied_parts;
+ union tree_node *context;
+ struct obstack *obstack;
+ /* Points to a structure whose details depend on the language in use. */
+ struct lang_type *lang_specific;
+};
+
+/* Define accessor macros for information about type inheritance
+ and basetypes.
+
+ A "basetype" means a particular usage of a data type for inheritance
+ in another type. Each such basetype usage has its own "binfo"
+ object to describe it. The binfo object is a TREE_VEC node.
+
+ Inheritance is represented by the binfo nodes allocated for a
+ given type. For example, given types C and D, such that D is
+ inherited by C, 3 binfo nodes will be allocated: one for describing
+ the binfo properties of C, similarly one for D, and one for
+ describing the binfo properties of D as a base type for C.
+ Thus, given a pointer to class C, one can get a pointer to the binfo
+ of D acting as a basetype for C by looking at C's binfo's basetypes. */
+
+/* The actual data type node being inherited in this basetype. */
+#define BINFO_TYPE(NODE) TREE_TYPE (NODE)
+
+/* The offset where this basetype appears in its containing type.
+ BINFO_OFFSET slot holds the offset (in bytes)
+ from the base of the complete object to the base of the part of the
+ object that is allocated on behalf of this `type'.
+ This is always 0 except when there is multiple inheritance. */
+
+#define BINFO_OFFSET(NODE) TREE_VEC_ELT ((NODE), 1)
+#define TYPE_BINFO_OFFSET(NODE) BINFO_OFFSET (TYPE_BINFO (NODE))
+#define BINFO_OFFSET_ZEROP(NODE) (BINFO_OFFSET (NODE) == integer_zero_node)
+
+/* The virtual function table belonging to this basetype. Virtual
+ function tables provide a mechanism for run-time method dispatching.
+ The entries of a virtual function table are language-dependent. */
+
+#define BINFO_VTABLE(NODE) TREE_VEC_ELT ((NODE), 2)
+#define TYPE_BINFO_VTABLE(NODE) BINFO_VTABLE (TYPE_BINFO (NODE))
+
+/* The virtual functions in the virtual function table. This is
+ a TREE_LIST that is used as an initial approximation for building
+ a virtual function table for this basetype. */
+#define BINFO_VIRTUALS(NODE) TREE_VEC_ELT ((NODE), 3)
+#define TYPE_BINFO_VIRTUALS(NODE) BINFO_VIRTUALS (TYPE_BINFO (NODE))
+
+/* A vector of additional binfos for the types inherited by this basetype.
+
+ If this basetype describes type D as inherited in C,
+ and if the basetypes of D are E anf F,
+ then this vector contains binfos for inheritance of E and F by C.
+
+ ??? This could probably be done by just allocating the
+ base types at the end of this TREE_VEC (instead of using
+ another TREE_VEC). This would simplify the calculation
+ of how many basetypes a given type had. */
+#define BINFO_BASETYPES(NODE) TREE_VEC_ELT ((NODE), 4)
+#define TYPE_BINFO_BASETYPES(NODE) TREE_VEC_ELT (TYPE_BINFO (NODE), 4)
+
+/* For a BINFO record describing an inheritance, this yields a pointer
+ to the artificial FIELD_DECL node which contains the "virtual base
+ class pointer" for the given inheritance. */
+
+#define BINFO_VPTR_FIELD(NODE) TREE_VEC_ELT ((NODE), 5)
+
+/* Accessor macro to get to the Nth basetype of this basetype. */
+#define BINFO_BASETYPE(NODE,N) TREE_VEC_ELT (BINFO_BASETYPES (NODE), (N))
+#define TYPE_BINFO_BASETYPE(NODE,N) BINFO_TYPE (TREE_VEC_ELT (BINFO_BASETYPES (TYPE_BINFO (NODE)), (N)))
+
+/* Slot used to build a chain that represents a use of inheritance.
+ For example, if X is derived from Y, and Y is derived from Z,
+ then this field can be used to link the binfo node for X to
+ the binfo node for X's Y to represent the use of inheritance
+ from X to Y. Similarly, this slot of the binfo node for X's Y
+ can point to the Z from which Y is inherited (in X's inheritance
+ hierarchy). In this fashion, one can represent and traverse specific
+ uses of inheritance using the binfo nodes themselves (instead of
+ consing new space pointing to binfo nodes).
+ It is up to the language-dependent front-ends to maintain
+ this information as necessary. */
+#define BINFO_INHERITANCE_CHAIN(NODE) TREE_VEC_ELT ((NODE), 0)
+
+/* Define fields and accessors for nodes representing declared names. */
+
+/* This is the name of the object as written by the user.
+ It is an IDENTIFIER_NODE. */
+#define DECL_NAME(NODE) ((NODE)->decl.name)
+/* This is the name of the object as the assembler will see it
+ (but before any translations made by ASM_OUTPUT_LABELREF).
+ Often this is the same as DECL_NAME.
+ It is an IDENTIFIER_NODE. */
+#define DECL_ASSEMBLER_NAME(NODE) ((NODE)->decl.assembler_name)
+/* Records the section name in a section attribute. Used to pass
+ the name from decl_attributes to make_function_rtl and make_decl_rtl. */
+#define DECL_SECTION_NAME(NODE) ((NODE)->decl.section_name)
+/* For FIELD_DECLs, this is the
+ RECORD_TYPE, UNION_TYPE, or QUAL_UNION_TYPE node that the field is
+ a member of. For VAR_DECL, PARM_DECL, FUNCTION_DECL, LABEL_DECL,
+ and CONST_DECL nodes, this points to the FUNCTION_DECL for the
+ containing function, or else yields NULL_TREE if the given decl has "file scope". */
+#define DECL_CONTEXT(NODE) ((NODE)->decl.context)
+#define DECL_FIELD_CONTEXT(NODE) ((NODE)->decl.context)
+/* In a FIELD_DECL, this is the field position, counting in bits,
+ of the bit closest to the beginning of the structure. */
+#define DECL_FIELD_BITPOS(NODE) ((NODE)->decl.arguments)
+/* In a FIELD_DECL, this indicates whether the field was a bit-field and
+ if so, the type that was originally specified for it.
+ TREE_TYPE may have been modified (in finish_struct). */
+#define DECL_BIT_FIELD_TYPE(NODE) ((NODE)->decl.result)
+/* In FUNCTION_DECL, a chain of ..._DECL nodes. */
+/* VAR_DECL and PARM_DECL reserve the arguments slot
+ for language-specific uses. */
+#define DECL_ARGUMENTS(NODE) ((NODE)->decl.arguments)
+/* In FUNCTION_DECL, holds the decl for the return value. */
+#define DECL_RESULT(NODE) ((NODE)->decl.result)
+/* In PARM_DECL, holds the type as written (perhaps a function or array). */
+#define DECL_ARG_TYPE_AS_WRITTEN(NODE) ((NODE)->decl.result)
+/* For a FUNCTION_DECL, holds the tree of BINDINGs.
+ For a VAR_DECL, holds the initial value.
+ For a PARM_DECL, not used--default
+ values for parameters are encoded in the type of the function,
+ not in the PARM_DECL slot. */
+#define DECL_INITIAL(NODE) ((NODE)->decl.initial)
+/* For a PARM_DECL, records the data type used to pass the argument,
+ which may be different from the type seen in the program. */
+#define DECL_ARG_TYPE(NODE) ((NODE)->decl.initial) /* In PARM_DECL. */
+/* For a FIELD_DECL in a QUAL_UNION_TYPE, records the expression, which
+ if nonzero, indicates that the field occupies the type. */
+#define DECL_QUALIFIER(NODE) ((NODE)->decl.initial)
+/* These two fields describe where in the source code the declaration was. */
+#define DECL_SOURCE_FILE(NODE) ((NODE)->decl.filename)
+#define DECL_SOURCE_LINE(NODE) ((NODE)->decl.linenum)
+/* Holds the size of the datum, as a tree expression.
+ Need not be constant. */
+#define DECL_SIZE(NODE) ((NODE)->decl.size)
+/* Holds the alignment required for the datum. */
+#define DECL_ALIGN(NODE) ((NODE)->decl.frame_size.u)
+/* Holds the machine mode corresponding to the declaration of a variable or
+ field. Always equal to TYPE_MODE (TREE_TYPE (decl)) except for a
+ FIELD_DECL. */
+#define DECL_MODE(NODE) ((NODE)->decl.mode)
+/* Holds the RTL expression for the value of a variable or function. If
+ PROMOTED_MODE is defined, the mode of this expression may not be same
+ as DECL_MODE. In that case, DECL_MODE contains the mode corresponding
+ to the variable's data type, while the mode
+ of DECL_RTL is the mode actually used to contain the data. */
+#define DECL_RTL(NODE) ((NODE)->decl.rtl)
+/* For PARM_DECL, holds an RTL for the stack slot or register
+ where the data was actually passed. */
+#define DECL_INCOMING_RTL(NODE) ((NODE)->decl.saved_insns.r)
+/* For FUNCTION_DECL, if it is inline, holds the saved insn chain. */
+#define DECL_SAVED_INSNS(NODE) ((NODE)->decl.saved_insns.r)
+/* For FUNCTION_DECL, if it is inline,
+ holds the size of the stack frame, as an integer. */
+#define DECL_FRAME_SIZE(NODE) ((NODE)->decl.frame_size.i)
+/* For FUNCTION_DECL, if it is built-in,
+ this identifies which built-in operation it is. */
+#define DECL_FUNCTION_CODE(NODE) ((NODE)->decl.frame_size.f)
+#define DECL_SET_FUNCTION_CODE(NODE,VAL) ((NODE)->decl.frame_size.f = (VAL))
+/* For a FIELD_DECL, holds the size of the member as an integer. */
+#define DECL_FIELD_SIZE(NODE) ((NODE)->decl.saved_insns.i)
+
+/* The DECL_VINDEX is used for FUNCTION_DECLS in two different ways.
+ Before the struct containing the FUNCTION_DECL is laid out,
+ DECL_VINDEX may point to a FUNCTION_DECL in a base class which
+ is the FUNCTION_DECL which this FUNCTION_DECL will replace as a virtual
+ function. When the class is laid out, this pointer is changed
+ to an INTEGER_CST node which is suitable for use as an index
+ into the virtual function table. */
+#define DECL_VINDEX(NODE) ((NODE)->decl.vindex)
+/* For FIELD_DECLS, DECL_FCONTEXT is the *first* baseclass in
+ which this FIELD_DECL is defined. This information is needed when
+ writing debugging information about vfield and vbase decls for C++. */
+#define DECL_FCONTEXT(NODE) ((NODE)->decl.vindex)
+
+/* Every ..._DECL node gets a unique number. */
+#define DECL_UID(NODE) ((NODE)->decl.uid)
+
+/* For any sort of a ..._DECL node, this points to the original (abstract)
+ decl node which this decl is an instance of, or else it is NULL indicating
+ that this decl is not an instance of some other decl. */
+#define DECL_ABSTRACT_ORIGIN(NODE) ((NODE)->decl.abstract_origin)
+
+/* Nonzero for any sort of ..._DECL node means this decl node represents
+ an inline instance of some original (abstract) decl from an inline function;
+ suppress any warnings about shadowing some other variable. */
+#define DECL_FROM_INLINE(NODE) (DECL_ABSTRACT_ORIGIN (NODE) != (tree) 0)
+
+/* Nonzero if a _DECL means that the name of this decl should be ignored
+ for symbolic debug purposes. */
+#define DECL_IGNORED_P(NODE) ((NODE)->decl.ignored_flag)
+
+/* Nonzero for a given ..._DECL node means that this node represents an
+ "abstract instance" of the given declaration (e.g. in the original
+ declaration of an inline function). When generating symbolic debugging
+ information, we musn't try to generate any address information for nodes
+ marked as "abstract instances" because we don't actually generate
+ any code or allocate any data space for such instances. */
+#define DECL_ABSTRACT(NODE) ((NODE)->decl.abstract_flag)
+
+/* Nonzero if a _DECL means that no warnings should be generated just
+ because this decl is unused. */
+#define DECL_IN_SYSTEM_HEADER(NODE) ((NODE)->decl.in_system_header_flag)
+
+/* Nonzero for a given ..._DECL node means that this node should be
+ put in .common, if possible. If a DECL_INITIAL is given, and it
+ is not error_mark_node, then the decl cannot be put in .common. */
+#define DECL_COMMON(NODE) ((NODE)->decl.common_flag)
+
+/* Language-specific decl information. */
+#define DECL_LANG_SPECIFIC(NODE) ((NODE)->decl.lang_specific)
+
+/* In a VAR_DECL or FUNCTION_DECL,
+ nonzero means external reference:
+ do not allocate storage, and refer to a definition elsewhere. */
+#define DECL_EXTERNAL(NODE) ((NODE)->decl.external_flag)
+
+/* In a TYPE_DECL
+ nonzero means the detail info about this type is not dumped into stabs.
+ Instead it will generate cross reference ('x') of names.
+ This uses the same flag as DECL_EXTERNAL. */
+#define TYPE_DECL_SUPPRESS_DEBUG(NODE) ((NODE)->decl.external_flag)
+
+
+/* In VAR_DECL and PARM_DECL nodes, nonzero means declared `register'.
+ In LABEL_DECL nodes, nonzero means that an error message about
+ jumping into such a binding contour has been printed for this label. */
+#define DECL_REGISTER(NODE) ((NODE)->decl.regdecl_flag)
+/* In a FIELD_DECL, indicates this field should be bit-packed. */
+#define DECL_PACKED(NODE) ((NODE)->decl.regdecl_flag)
+
+/* Nonzero in a ..._DECL means this variable is ref'd from a nested function.
+ For VAR_DECL nodes, PARM_DECL nodes, and FUNCTION_DECL nodes.
+
+ For LABEL_DECL nodes, nonzero if nonlocal gotos to the label are permitted.
+
+ Also set in some languages for variables, etc., outside the normal
+ lexical scope, such as class instance variables. */
+#define DECL_NONLOCAL(NODE) ((NODE)->decl.nonlocal_flag)
+
+/* Nonzero in a FUNCTION_DECL means this function can be substituted
+ where it is called. */
+#define DECL_INLINE(NODE) ((NODE)->decl.inline_flag)
+
+/* Nonzero in a FUNCTION_DECL means this is a built-in function
+ that is not specified by ansi C and that users are supposed to be allowed
+ to redefine for any purpose whatever. */
+#define DECL_BUILT_IN_NONANSI(NODE) ((NODE)->common.unsigned_flag)
+
+/* Nonzero in a FIELD_DECL means it is a bit field, and must be accessed
+ specially. */
+#define DECL_BIT_FIELD(NODE) ((NODE)->decl.bit_field_flag)
+/* In a LABEL_DECL, nonzero means label was defined inside a binding
+ contour that restored a stack level and which is now exited. */
+#define DECL_TOO_LATE(NODE) ((NODE)->decl.bit_field_flag)
+/* In a FUNCTION_DECL, nonzero means a built in function. */
+#define DECL_BUILT_IN(NODE) ((NODE)->decl.bit_field_flag)
+/* In a VAR_DECL that's static,
+ nonzero if the space is in the text section. */
+#define DECL_IN_TEXT_SECTION(NODE) ((NODE)->decl.bit_field_flag)
+
+/* Used in VAR_DECLs to indicate that the variable is a vtable.
+ It is also used in FIELD_DECLs for vtable pointers. */
+#define DECL_VIRTUAL_P(NODE) ((NODE)->decl.virtual_flag)
+
+/* Used to indicate that the linkage status of this DECL is not yet known,
+ so it should not be output now. */
+#define DECL_DEFER_OUTPUT(NODE) ((NODE)->decl.defer_output)
+
+/* Additional flags for language-specific uses. */
+#define DECL_LANG_FLAG_0(NODE) ((NODE)->decl.lang_flag_0)
+#define DECL_LANG_FLAG_1(NODE) ((NODE)->decl.lang_flag_1)
+#define DECL_LANG_FLAG_2(NODE) ((NODE)->decl.lang_flag_2)
+#define DECL_LANG_FLAG_3(NODE) ((NODE)->decl.lang_flag_3)
+#define DECL_LANG_FLAG_4(NODE) ((NODE)->decl.lang_flag_4)
+#define DECL_LANG_FLAG_5(NODE) ((NODE)->decl.lang_flag_5)
+#define DECL_LANG_FLAG_6(NODE) ((NODE)->decl.lang_flag_6)
+#define DECL_LANG_FLAG_7(NODE) ((NODE)->decl.lang_flag_7)
+
+struct tree_decl
+{
+ char common[sizeof (struct tree_common)];
+ char *filename;
+ int linenum;
+ union tree_node *size;
+ unsigned int uid;
+#ifdef ONLY_INT_FIELDS
+ int mode : 8;
+#else
+ enum machine_mode mode : 8;
+#endif
+
+ unsigned external_flag : 1;
+ unsigned nonlocal_flag : 1;
+ unsigned regdecl_flag : 1;
+ unsigned inline_flag : 1;
+ unsigned bit_field_flag : 1;
+ unsigned virtual_flag : 1;
+ unsigned ignored_flag : 1;
+ unsigned abstract_flag : 1;
+
+ unsigned in_system_header_flag : 1;
+ unsigned common_flag : 1;
+ unsigned defer_output : 1;
+ /* room for five more */
+
+ unsigned lang_flag_0 : 1;
+ unsigned lang_flag_1 : 1;
+ unsigned lang_flag_2 : 1;
+ unsigned lang_flag_3 : 1;
+ unsigned lang_flag_4 : 1;
+ unsigned lang_flag_5 : 1;
+ unsigned lang_flag_6 : 1;
+ unsigned lang_flag_7 : 1;
+
+ union tree_node *name;
+ union tree_node *context;
+ union tree_node *arguments;
+ union tree_node *result;
+ union tree_node *initial;
+ union tree_node *abstract_origin;
+ union tree_node *assembler_name;
+ union tree_node *section_name;
+ struct rtx_def *rtl; /* acts as link to register transfer language
+ (rtl) info */
+ /* For a FUNCTION_DECL, if inline, this is the size of frame needed.
+ If built-in, this is the code for which built-in function.
+ For other kinds of decls, this is DECL_ALIGN. */
+ union {
+ int i;
+ unsigned int u;
+ enum built_in_function f;
+ } frame_size;
+ /* For FUNCTION_DECLs: points to insn that constitutes its definition
+ on the permanent obstack. For any other kind of decl, this is the
+ alignment. */
+ union {
+ struct rtx_def *r;
+ int i;
+ } saved_insns;
+ union tree_node *vindex;
+ /* Points to a structure whose details depend on the language in use. */
+ struct lang_decl *lang_specific;
+};
+
+/* Define the overall contents of a tree node.
+ It may be any of the structures declared above
+ for various types of node. */
+
+union tree_node
+{
+ struct tree_common common;
+ struct tree_int_cst int_cst;
+ struct tree_real_cst real_cst;
+ struct tree_string string;
+ struct tree_complex complex;
+ struct tree_identifier identifier;
+ struct tree_decl decl;
+ struct tree_type type;
+ struct tree_list list;
+ struct tree_vec vec;
+ struct tree_exp exp;
+ struct tree_block block;
+ };
+
+/* Add prototype support. */
+#ifndef PROTO
+#if defined (USE_PROTOTYPES) ? USE_PROTOTYPES : defined (__STDC__)
+#define PROTO(ARGS) ARGS
+#else
+#define PROTO(ARGS) ()
+#endif
+#endif
+
+#ifndef VPROTO
+#ifdef __STDC__
+#define PVPROTO(ARGS) ARGS
+#define VPROTO(ARGS) ARGS
+#define VA_START(va_list,var) va_start(va_list,var)
+#else
+#define PVPROTO(ARGS) ()
+#define VPROTO(ARGS) (va_alist) va_dcl
+#define VA_START(va_list,var) va_start(va_list)
+#endif
+#endif
+
+#ifndef STDIO_PROTO
+#ifdef BUFSIZ
+#define STDIO_PROTO(ARGS) PROTO(ARGS)
+#else
+#define STDIO_PROTO(ARGS) ()
+#endif
+#endif
+
+#define NULL_TREE (tree) NULL
+
+/* Define a generic NULL if one hasn't already been defined. */
+
+#ifndef NULL
+#define NULL 0
+#endif
+
+#ifndef GENERIC_PTR
+#if defined (USE_PROTOTYPES) ? USE_PROTOTYPES : defined (__STDC__)
+#define GENERIC_PTR void *
+#else
+#define GENERIC_PTR char *
+#endif
+#endif
+
+#ifndef NULL_PTR
+#define NULL_PTR ((GENERIC_PTR)0)
+#endif
+
+/* The following functions accept a wide integer argument. Rather than
+ having to cast on every function call, we use a macro instead, that is
+ defined here and in rtl.h. */
+
+#ifndef exact_log2
+#define exact_log2(N) exact_log2_wide ((HOST_WIDE_INT) (N))
+#define floor_log2(N) floor_log2_wide ((HOST_WIDE_INT) (N))
+#endif
+
+#if 0
+/* At present, don't prototype xrealloc, since all of the callers don't
+ cast their pointers to char *, and all of the xrealloc's don't use
+ void * yet. */
+extern char *xmalloc PROTO((size_t));
+extern char *xrealloc PROTO((void *, size_t));
+#else
+extern char *xmalloc ();
+extern char *xrealloc ();
+#endif
+
+extern char *oballoc PROTO((int));
+extern char *permalloc PROTO((int));
+extern char *savealloc PROTO((int));
+extern void free PROTO((void *));
+
+/* Lowest level primitive for allocating a node.
+ The TREE_CODE is the only argument. Contents are initialized
+ to zero except for a few of the common fields. */
+
+extern tree make_node PROTO((enum tree_code));
+
+/* Make a copy of a node, with all the same contents except
+ for TREE_PERMANENT. (The copy is permanent
+ iff nodes being made now are permanent.) */
+
+extern tree copy_node PROTO((tree));
+
+/* Make a copy of a chain of TREE_LIST nodes. */
+
+extern tree copy_list PROTO((tree));
+
+/* Make a TREE_VEC. */
+
+extern tree make_tree_vec PROTO((int));
+
+/* Return the (unique) IDENTIFIER_NODE node for a given name.
+ The name is supplied as a char *. */
+
+extern tree get_identifier PROTO((char *));
+
+/* Construct various types of nodes. */
+
+#define build_int_2(LO,HI) \
+ build_int_2_wide ((HOST_WIDE_INT) (LO), (HOST_WIDE_INT) (HI))
+
+extern tree build PVPROTO((enum tree_code, tree, ...));
+extern tree build_nt PVPROTO((enum tree_code, ...));
+extern tree build_parse_node PVPROTO((enum tree_code, ...));
+
+extern tree build_int_2_wide PROTO((HOST_WIDE_INT, HOST_WIDE_INT));
+extern tree build_real PROTO((tree, REAL_VALUE_TYPE));
+extern tree build_real_from_int_cst PROTO((tree, tree));
+extern tree build_complex PROTO((tree, tree));
+extern tree build_string PROTO((int, char *));
+extern tree build1 PROTO((enum tree_code, tree, tree));
+extern tree build_tree_list PROTO((tree, tree));
+extern tree build_decl_list PROTO((tree, tree));
+extern tree build_decl PROTO((enum tree_code, tree, tree));
+extern tree build_block PROTO((tree, tree, tree, tree, tree));
+
+/* Construct various nodes representing data types. */
+
+extern tree make_signed_type PROTO((int));
+extern tree make_unsigned_type PROTO((int));
+extern tree signed_or_unsigned_type PROTO((int, tree));
+extern void fixup_unsigned_type PROTO((tree));
+extern tree build_pointer_type PROTO((tree));
+extern tree build_reference_type PROTO((tree));
+extern tree build_index_type PROTO((tree));
+extern tree build_index_2_type PROTO((tree, tree));
+extern tree build_array_type PROTO((tree, tree));
+extern tree build_function_type PROTO((tree, tree));
+extern tree build_method_type PROTO((tree, tree));
+extern tree build_offset_type PROTO((tree, tree));
+extern tree build_complex_type PROTO((tree));
+extern tree array_type_nelts PROTO((tree));
+
+extern tree value_member PROTO((tree, tree));
+extern tree purpose_member PROTO((tree, tree));
+extern tree binfo_member PROTO((tree, tree));
+extern int attribute_list_equal PROTO((tree, tree));
+extern int attribute_list_contained PROTO((tree, tree));
+extern int tree_int_cst_equal PROTO((tree, tree));
+extern int tree_int_cst_lt PROTO((tree, tree));
+extern int tree_int_cst_sgn PROTO((tree));
+extern int index_type_equal PROTO((tree, tree));
+
+/* From expmed.c. Since rtl.h is included after tree.h, we can't
+ put the prototype here. Rtl.h does declare the prototype if
+ tree.h had been included. */
+
+extern tree make_tree ();
+
+/* Return a type like TTYPE except that its TYPE_ATTRIBUTES
+ is ATTRIBUTE.
+
+ Such modified types already made are recorded so that duplicates
+ are not made. */
+
+extern tree build_type_attribute_variant PROTO((tree, tree));
+
+/* Given a type node TYPE, and CONSTP and VOLATILEP, return a type
+ for the same kind of data as TYPE describes.
+ Variants point to the "main variant" (which has neither CONST nor VOLATILE)
+ via TYPE_MAIN_VARIANT, and it points to a chain of other variants
+ so that duplicate variants are never made.
+ Only main variants should ever appear as types of expressions. */
+
+extern tree build_type_variant PROTO((tree, int, int));
+
+/* Make a copy of a type node. */
+
+extern tree build_type_copy PROTO((tree));
+
+/* Given a ..._TYPE node, calculate the TYPE_SIZE, TYPE_SIZE_UNIT,
+ TYPE_ALIGN and TYPE_MODE fields.
+ If called more than once on one node, does nothing except
+ for the first time. */
+
+extern void layout_type PROTO((tree));
+
+/* Given a hashcode and a ..._TYPE node (for which the hashcode was made),
+ return a canonicalized ..._TYPE node, so that duplicates are not made.
+ How the hash code is computed is up to the caller, as long as any two
+ callers that could hash identical-looking type nodes agree. */
+
+extern tree type_hash_canon PROTO((int, tree));
+
+/* Given a VAR_DECL, PARM_DECL, RESULT_DECL or FIELD_DECL node,
+ calculates the DECL_SIZE, DECL_SIZE_UNIT, DECL_ALIGN and DECL_MODE
+ fields. Call this only once for any given decl node.
+
+ Second argument is the boundary that this field can be assumed to
+ be starting at (in bits). Zero means it can be assumed aligned
+ on any boundary that may be needed. */
+
+extern void layout_decl PROTO((tree, unsigned));
+
+/* Return an expr equal to X but certainly not valid as an lvalue. */
+
+extern tree non_lvalue PROTO((tree));
+extern tree pedantic_non_lvalue PROTO((tree));
+
+extern tree convert PROTO((tree, tree));
+extern tree size_in_bytes PROTO((tree));
+extern int int_size_in_bytes PROTO((tree));
+extern tree size_binop PROTO((enum tree_code, tree, tree));
+extern tree size_int PROTO((unsigned));
+extern tree round_up PROTO((tree, int));
+extern tree get_pending_sizes PROTO((void));
+
+/* Type for sizes of data-type. */
+
+extern tree sizetype;
+
+/* Concatenate two lists (chains of TREE_LIST nodes) X and Y
+ by making the last node in X point to Y.
+ Returns X, except if X is 0 returns Y. */
+
+extern tree chainon PROTO((tree, tree));
+
+/* Make a new TREE_LIST node from specified PURPOSE, VALUE and CHAIN. */
+
+extern tree tree_cons PROTO((tree, tree, tree));
+extern tree perm_tree_cons PROTO((tree, tree, tree));
+extern tree temp_tree_cons PROTO((tree, tree, tree));
+extern tree saveable_tree_cons PROTO((tree, tree, tree));
+extern tree decl_tree_cons PROTO((tree, tree, tree));
+
+/* Return the last tree node in a chain. */
+
+extern tree tree_last PROTO((tree));
+
+/* Reverse the order of elements in a chain, and return the new head. */
+
+extern tree nreverse PROTO((tree));
+
+/* Returns the length of a chain of nodes
+ (number of chain pointers to follow before reaching a null pointer). */
+
+extern int list_length PROTO((tree));
+
+/* integer_zerop (tree x) is nonzero if X is an integer constant of value 0 */
+
+extern int integer_zerop PROTO((tree));
+
+/* integer_onep (tree x) is nonzero if X is an integer constant of value 1 */
+
+extern int integer_onep PROTO((tree));
+
+/* integer_all_onesp (tree x) is nonzero if X is an integer constant
+ all of whose significant bits are 1. */
+
+extern int integer_all_onesp PROTO((tree));
+
+/* integer_pow2p (tree x) is nonzero is X is an integer constant with
+ exactly one bit 1. */
+
+extern int integer_pow2p PROTO((tree));
+
+/* staticp (tree x) is nonzero if X is a reference to data allocated
+ at a fixed address in memory. */
+
+extern int staticp PROTO((tree));
+
+/* Gets an error if argument X is not an lvalue.
+ Also returns 1 if X is an lvalue, 0 if not. */
+
+extern int lvalue_or_else PROTO((tree, char *));
+
+/* save_expr (EXP) returns an expression equivalent to EXP
+ but it can be used multiple times within context CTX
+ and only evaluate EXP once. */
+
+extern tree save_expr PROTO((tree));
+
+/* Return 1 if EXP contains a PLACEHOLDER_EXPR; i.e., if it represents a size
+ or offset that depends on a field within a record.
+
+ Note that we only allow such expressions within simple arithmetic
+ or a COND_EXPR. */
+
+extern int contains_placeholder_p PROTO((tree));
+
+/* Given a tree EXP, a FIELD_DECL F, and a replacement value R,
+ return a tree with all occurrences of references to F in a
+ PLACEHOLDER_EXPR replaced by R. Note that we assume here that EXP
+ contains only arithmetic expressions. */
+
+extern tree substitute_in_expr PROTO((tree, tree, tree));
+
+/* Given a type T, a FIELD_DECL F, and a replacement value R,
+ return a new type with all size expressions that contain F
+ updated by replacing the reference to F with R. */
+
+extern tree substitute_in_type PROTO((tree, tree, tree));
+
+/* variable_size (EXP) is like save_expr (EXP) except that it
+ is for the special case of something that is part of a
+ variable size for a data type. It makes special arrangements
+ to compute the value at the right time when the data type
+ belongs to a function parameter. */
+
+extern tree variable_size PROTO((tree));
+
+/* stabilize_reference (EXP) returns an reference equivalent to EXP
+ but it can be used multiple times
+ and only evaluate the subexpressions once. */
+
+extern tree stabilize_reference PROTO((tree));
+
+/* Return EXP, stripped of any conversions to wider types
+ in such a way that the result of converting to type FOR_TYPE
+ is the same as if EXP were converted to FOR_TYPE.
+ If FOR_TYPE is 0, it signifies EXP's type. */
+
+extern tree get_unwidened PROTO((tree, tree));
+
+/* Return OP or a simpler expression for a narrower value
+ which can be sign-extended or zero-extended to give back OP.
+ Store in *UNSIGNEDP_PTR either 1 if the value should be zero-extended
+ or 0 if the value should be sign-extended. */
+
+extern tree get_narrower PROTO((tree, int *));
+
+/* Given MODE and UNSIGNEDP, return a suitable type-tree
+ with that mode.
+ The definition of this resides in language-specific code
+ as the repertoire of available types may vary. */
+
+extern tree type_for_mode PROTO((enum machine_mode, int));
+
+/* Given PRECISION and UNSIGNEDP, return a suitable type-tree
+ for an integer type with at least that precision.
+ The definition of this resides in language-specific code
+ as the repertoire of available types may vary. */
+
+extern tree type_for_size PROTO((unsigned, int));
+
+/* Given an integer type T, return a type like T but unsigned.
+ If T is unsigned, the value is T.
+ The definition of this resides in language-specific code
+ as the repertoire of available types may vary. */
+
+extern tree unsigned_type PROTO((tree));
+
+/* Given an integer type T, return a type like T but signed.
+ If T is signed, the value is T.
+ The definition of this resides in language-specific code
+ as the repertoire of available types may vary. */
+
+extern tree signed_type PROTO((tree));
+
+/* This function must be defined in the language-specific files.
+ expand_expr calls it to build the cleanup-expression for a TARGET_EXPR.
+ This is defined in a language-specific file. */
+
+extern tree maybe_build_cleanup PROTO((tree));
+
+/* Given an expression EXP that may be a COMPONENT_REF or an ARRAY_REF,
+ look for nested component-refs or array-refs at constant positions
+ and find the ultimate containing object, which is returned. */
+
+extern tree get_inner_reference PROTO((tree, int *, int *, tree *, enum machine_mode *, int *, int *));
+
+/* Return the FUNCTION_DECL which provides this _DECL with its context,
+ or zero if none. */
+extern tree decl_function_context PROTO((tree));
+
+/* Return the RECORD_TYPE, UNION_TYPE, or QUAL_UNION_TYPE which provides
+ this _DECL with its context, or zero if none. */
+extern tree decl_type_context PROTO((tree));
+
+/* Given the FUNCTION_DECL for the current function,
+ return zero if it is ok for this function to be inline.
+ Otherwise return a warning message with a single %s
+ for the function's name. */
+
+extern char *function_cannot_inline_p PROTO((tree));
+
+/* Return 1 if EXPR is the real constant zero. */
+extern int real_zerop PROTO((tree));
+
+/* Declare commonly used variables for tree structure. */
+
+/* An integer constant with value 0 */
+extern tree integer_zero_node;
+
+/* An integer constant with value 1 */
+extern tree integer_one_node;
+
+/* An integer constant with value 0 whose type is sizetype. */
+extern tree size_zero_node;
+
+/* An integer constant with value 1 whose type is sizetype. */
+extern tree size_one_node;
+
+/* A constant of type pointer-to-int and value 0 */
+extern tree null_pointer_node;
+
+/* A node of type ERROR_MARK. */
+extern tree error_mark_node;
+
+/* The type node for the void type. */
+extern tree void_type_node;
+
+/* The type node for the ordinary (signed) integer type. */
+extern tree integer_type_node;
+
+/* The type node for the unsigned integer type. */
+extern tree unsigned_type_node;
+
+/* The type node for the ordinary character type. */
+extern tree char_type_node;
+
+/* Points to the name of the input file from which the current input
+ being parsed originally came (before it went into cpp). */
+extern char *input_filename;
+
+/* Current line number in input file. */
+extern int lineno;
+
+/* Nonzero for -pedantic switch: warn about anything
+ that standard C forbids. */
+extern int pedantic;
+
+/* Nonzero means can safely call expand_expr now;
+ otherwise layout_type puts variable sizes onto `pending_sizes' instead. */
+
+extern int immediate_size_expand;
+
+/* Points to the FUNCTION_DECL of the function whose body we are reading. */
+
+extern tree current_function_decl;
+
+/* Nonzero if function being compiled can call setjmp. */
+
+extern int current_function_calls_setjmp;
+
+/* Nonzero if function being compiled can call longjmp. */
+
+extern int current_function_calls_longjmp;
+
+/* Nonzero means all ..._TYPE nodes should be allocated permanently. */
+
+extern int all_types_permanent;
+
+/* Pointer to function to compute the name to use to print a declaration. */
+
+extern char *(*decl_printable_name) ();
+
+/* Pointer to function to finish handling an incomplete decl at the
+ end of compilation. */
+
+extern void (*incomplete_decl_finalize_hook) ();
+
+/* In tree.c */
+extern char *perm_calloc PROTO((int, long));
+
+/* In stmt.c */
+
+extern void expand_fixups PROTO((struct rtx_def *));
+extern tree expand_start_stmt_expr PROTO((void));
+extern tree expand_end_stmt_expr PROTO((tree));
+extern void expand_expr_stmt PROTO((tree));
+extern void expand_decl_init PROTO((tree));
+extern void clear_last_expr PROTO((void));
+extern void expand_label PROTO((tree));
+extern void expand_goto PROTO((tree));
+extern void expand_asm PROTO((tree));
+extern void expand_start_cond PROTO((tree, int));
+extern void expand_end_cond PROTO((void));
+extern void expand_start_else PROTO((void));
+extern void expand_start_elseif PROTO((tree));
+extern struct nesting *expand_start_loop PROTO((int));
+extern struct nesting *expand_start_loop_continue_elsewhere PROTO((int));
+extern void expand_loop_continue_here PROTO((void));
+extern void expand_end_loop PROTO((void));
+extern int expand_continue_loop PROTO((struct nesting *));
+extern int expand_exit_loop PROTO((struct nesting *));
+extern int expand_exit_loop_if_false PROTO((struct nesting *,
+ tree));
+extern int expand_exit_something PROTO((void));
+
+extern void expand_null_return PROTO((void));
+extern void expand_return PROTO((tree));
+extern void expand_start_bindings PROTO((int));
+extern void expand_end_bindings PROTO((tree, int, int));
+extern tree last_cleanup_this_contour PROTO((void));
+extern void expand_start_case PROTO((int, tree, tree,
+ char *));
+extern void expand_end_case PROTO((tree));
+extern int pushcase PROTO((tree,
+ tree (*) (tree, tree),
+ tree, tree *));
+extern int pushcase_range PROTO((tree, tree,
+ tree (*) (tree, tree),
+ tree, tree *));
+
+/* In fold-const.c */
+
+/* Fold constants as much as possible in an expression.
+ Returns the simplified expression.
+ Acts only on the top level of the expression;
+ if the argument itself cannot be simplified, its
+ subexpressions are not changed. */
+
+extern tree fold PROTO((tree));
+
+extern int force_fit_type PROTO((tree, int));
+extern int add_double PROTO((HOST_WIDE_INT, HOST_WIDE_INT,
+ HOST_WIDE_INT, HOST_WIDE_INT,
+ HOST_WIDE_INT *, HOST_WIDE_INT *));
+extern int neg_double PROTO((HOST_WIDE_INT, HOST_WIDE_INT,
+ HOST_WIDE_INT *, HOST_WIDE_INT *));
+extern int mul_double PROTO((HOST_WIDE_INT, HOST_WIDE_INT,
+ HOST_WIDE_INT, HOST_WIDE_INT,
+ HOST_WIDE_INT *, HOST_WIDE_INT *));
+extern void lshift_double PROTO((HOST_WIDE_INT, HOST_WIDE_INT,
+ HOST_WIDE_INT, int, HOST_WIDE_INT *,
+ HOST_WIDE_INT *, int));
+extern void rshift_double PROTO((HOST_WIDE_INT, HOST_WIDE_INT,
+ HOST_WIDE_INT, int,
+ HOST_WIDE_INT *, HOST_WIDE_INT *, int));
+extern void lrotate_double PROTO((HOST_WIDE_INT, HOST_WIDE_INT,
+ HOST_WIDE_INT, int, HOST_WIDE_INT *,
+ HOST_WIDE_INT *));
+extern void rrotate_double PROTO((HOST_WIDE_INT, HOST_WIDE_INT,
+ HOST_WIDE_INT, int, HOST_WIDE_INT *,
+ HOST_WIDE_INT *));
+extern int operand_equal_p PROTO((tree, tree, int));
+extern tree invert_truthvalue PROTO((tree));
+
+/* The language front-end must define these functions. */
+
+/* Function of no arguments for initializing lexical scanning. */
+extern void init_lex PROTO((void));
+/* Function of no arguments for initializing the symbol table. */
+extern void init_decl_processing PROTO((void));
+
+/* Functions called with no arguments at the beginning and end or processing
+ the input source file. */
+extern void lang_init PROTO((void));
+extern void lang_finish PROTO((void));
+
+/* Funtion to identify which front-end produced the output file. */
+extern char *lang_identify PROTO((void));
+
+/* Function to replace the DECL_LANG_SPECIFIC field of a DECL with a copy. */
+extern void copy_lang_decl PROTO((tree));
+
+/* Function called with no arguments to parse and compile the input. */
+extern int yyparse PROTO((void));
+/* Function called with option as argument
+ to decode options starting with -f or -W or +.
+ It should return nonzero if it handles the option. */
+extern int lang_decode_option PROTO((char *));
+
+/* Functions for processing symbol declarations. */
+/* Function to enter a new lexical scope.
+ Takes one argument: always zero when called from outside the front end. */
+extern void pushlevel PROTO((int));
+/* Function to exit a lexical scope. It returns a BINDING for that scope.
+ Takes three arguments:
+ KEEP -- nonzero if there were declarations in this scope.
+ REVERSE -- reverse the order of decls before returning them.
+ FUNCTIONBODY -- nonzero if this level is the body of a function. */
+extern tree poplevel PROTO((int, int, int));
+/* Set the BLOCK node for the current scope level. */
+extern void set_block PROTO((tree));
+/* Function to add a decl to the current scope level.
+ Takes one argument, a decl to add.
+ Returns that decl, or, if the same symbol is already declared, may
+ return a different decl for that name. */
+extern tree pushdecl PROTO((tree));
+/* Function to return the chain of decls so far in the current scope level. */
+extern tree getdecls PROTO((void));
+/* Function to return the chain of structure tags in the current scope level. */
+extern tree gettags PROTO((void));
+
+extern tree build_range_type PROTO((tree, tree, tree));
+
+/* Call when starting to parse a declaration:
+ make expressions in the declaration last the length of the function.
+ Returns an argument that should be passed to resume_momentary later. */
+extern int suspend_momentary PROTO((void));
+
+extern int allocation_temporary_p PROTO((void));
+
+/* Call when finished parsing a declaration:
+ restore the treatment of node-allocation that was
+ in effect before the suspension.
+ YES should be the value previously returned by suspend_momentary. */
+extern void resume_momentary PROTO((int));
+
+/* Called after finishing a record, union or enumeral type. */
+extern void rest_of_type_compilation PROTO((tree, int));
+
+/* Save the current set of obstacks, but don't change them. */
+extern void push_obstacks_nochange PROTO((void));
+
+extern void permanent_allocation PROTO((int));
+
+extern void push_momentary PROTO((void));
+
+extern void clear_momentary PROTO((void));
+
+extern void pop_momentary PROTO((void));
+
+extern void end_temporary_allocation PROTO((void));
+
+/* Pop the obstack selection stack. */
+extern void pop_obstacks PROTO((void));
diff --git a/gnu/usr.bin/cc/include/typeclass.h b/gnu/usr.bin/cc/include/typeclass.h
new file mode 100644
index 0000000..b166042
--- /dev/null
+++ b/gnu/usr.bin/cc/include/typeclass.h
@@ -0,0 +1,14 @@
+/* Values returned by __builtin_classify_type. */
+
+enum type_class
+{
+ no_type_class = -1,
+ void_type_class, integer_type_class, char_type_class,
+ enumeral_type_class, boolean_type_class,
+ pointer_type_class, reference_type_class, offset_type_class,
+ real_type_class, complex_type_class,
+ function_type_class, method_type_class,
+ record_type_class, union_type_class,
+ array_type_class, string_type_class, set_type_class, file_type_class,
+ lang_type_class
+};
diff --git a/gnu/usr.bin/cc/libgcc/Makefile b/gnu/usr.bin/cc/libgcc/Makefile
new file mode 100644
index 0000000..4b2c5a4
--- /dev/null
+++ b/gnu/usr.bin/cc/libgcc/Makefile
@@ -0,0 +1,46 @@
+#
+# $FreeBSD$
+#
+
+LIB= gcc
+INSTALL_PIC_ARCHIVE= yes
+SHLIB_MAJOR= 26
+SHLIB_MINOR= 0
+
+LIB1OBJS= _mulsi3.o _udivsi3.o _divsi3.o _umodsi3.o _modsi3.o _lshrsi3.o _lshlsi3.o _ashrsi3.o _ashlsi3.o _divdf3.o _muldf3.o _negdf2.o _adddf3.o _subdf3.o _fixdfsi.o _fixsfsi.o _floatsidf.o _floatsisf.o _truncdfsf2.o _extendsfdf2.o _addsf3.o _negsf2.o _subsf3.o _mulsf3.o _divsf3.o _eqdf2.o _nedf2.o _gtdf2.o _gedf2.o _ltdf2.o _ledf2.o _eqsf2.o _nesf2.o _gtsf2.o _gesf2.o _ltsf2.o _lesf2.o
+LIB2OBJS= _muldi3.o _divdi3.o _moddi3.o _udivdi3.o _umoddi3.o _negdi2.o _lshrdi3.o _lshldi3.o _ashldi3.o _ashrdi3.o _ffsdi2.o _udiv_w_sdiv.o _udivmoddi4.o _cmpdi2.o _ucmpdi2.o _floatdidf.o _floatdisf.o _fixunsdfsi.o _fixunssfsi.o _fixunsdfdi.o _fixdfdi.o _fixunssfdi.o _fixsfdi.o _fixxfdi.o _fixunsxfdi.o _floatdixf.o _fixunsxfsi.o _fixtfdi.o _fixunstfdi.o _floatditf.o __gcc_bcmp.o _varargs.o _eprintf.o _op_new.o _op_vnew.o _new_handler.o _op_delete.o _op_vdel.o _bb.o _shtab.o _clear_cache.o _trampoline.o __main.o _exit.o _ctors.o
+
+OBJS= ${LIB1OBJS} ${LIB2OBJS}
+LIB1SOBJS=${LIB1OBJS:.o=.so}
+LIB2SOBJS=${LIB2OBJS:.o=.so}
+P1OBJS=${LIB1OBJS:.o=.po}
+P2OBJS=${LIB2OBJS:.o=.po}
+
+${LIB1OBJS}: libgcc1.c
+ ${CC} -c ${CFLAGS} -DL${.PREFIX} -o ${.TARGET} ${.CURDIR}/libgcc1.c
+ @${LD} -x -r ${.TARGET}
+ @mv a.out ${.TARGET}
+
+${LIB2OBJS}: libgcc2.c
+ ${CC} -c ${CFLAGS} -DL${.PREFIX} -o ${.TARGET} ${.CURDIR}/libgcc2.c
+ @${LD} -x -r ${.TARGET}
+ @mv a.out ${.TARGET}
+
+.if !defined(NOPIC)
+${LIB1SOBJS}: libgcc1.c
+ ${CC} -c -fpic ${CFLAGS} -DL${.PREFIX} -o ${.TARGET} ${.CURDIR}/libgcc1.c
+
+${LIB2SOBJS}: libgcc2.c
+ ${CC} -c -fpic ${CFLAGS} -DL${.PREFIX} -o ${.TARGET} ${.CURDIR}/libgcc2.c
+.endif
+
+.if !defined(NOPROFILE)
+${P1OBJS}: libgcc1.c
+ ${CC} -c -p ${CFLAGS} -DL${.PREFIX} -o ${.TARGET} ${.CURDIR}/libgcc1.c
+
+${P2OBJS}: libgcc2.c
+ ${CC} -c -p ${CFLAGS} -DL${.PREFIX} -o ${.TARGET} ${.CURDIR}/libgcc2.c
+.endif
+
+.include <bsd.lib.mk>
+
diff --git a/gnu/usr.bin/cc/libgcc/libgcc1.c b/gnu/usr.bin/cc/libgcc/libgcc1.c
new file mode 100644
index 0000000..7c0e0c1
--- /dev/null
+++ b/gnu/usr.bin/cc/libgcc/libgcc1.c
@@ -0,0 +1,608 @@
+/* Subroutines needed by GCC output code on some machines. */
+/* Compile this file with the Unix C compiler! */
+/* Copyright (C) 1987, 1988, 1992, 1994 Free Software Foundation, Inc.
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 2, or (at your option) any
+later version.
+
+In addition to the permissions in the GNU General Public License, the
+Free Software Foundation gives you unlimited permission to link the
+compiled version of this file with other programs, and to distribute
+those programs without any restriction coming from the use of this
+file. (The General Public License restrictions do apply in other
+respects; for example, they cover modification of the file, and
+distribution when not linked into another program.)
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+/* As a special exception, if you link this library with other files,
+ some of which are compiled with GCC, to produce an executable,
+ this library does not by itself cause the resulting executable
+ to be covered by the GNU General Public License.
+ This exception does not however invalidate any other reasons why
+ the executable file might be covered by the GNU General Public License. */
+
+#include "config.h"
+
+/* Don't use `fancy_abort' here even if config.h says to use it. */
+#ifdef abort
+#undef abort
+#endif
+
+/* On some machines, cc is really GCC. For these machines, we can't
+ expect these functions to be properly compiled unless GCC open codes
+ the operation (which is precisely when the function won't be used).
+ So allow tm.h to specify ways of accomplishing the operations
+ by defining the macros perform_*.
+
+ On a machine where cc is some other compiler, there is usually no
+ reason to define perform_*. The other compiler normally has other ways
+ of implementing all of these operations.
+
+ In some cases a certain machine may come with GCC installed as cc
+ or may have some other compiler. Then it may make sense for tm.h
+ to define perform_* only if __GNUC__ is defined. */
+
+#ifndef perform_mulsi3
+#define perform_mulsi3(a, b) return a * b
+#endif
+
+#ifndef perform_divsi3
+#define perform_divsi3(a, b) return a / b
+#endif
+
+#ifndef perform_udivsi3
+#define perform_udivsi3(a, b) return a / b
+#endif
+
+#ifndef perform_modsi3
+#define perform_modsi3(a, b) return a % b
+#endif
+
+#ifndef perform_umodsi3
+#define perform_umodsi3(a, b) return a % b
+#endif
+
+#ifndef perform_lshrsi3
+#define perform_lshrsi3(a, b) return a >> b
+#endif
+
+#ifndef perform_lshlsi3
+#define perform_lshlsi3(a, b) return a << b
+#endif
+
+#ifndef perform_ashrsi3
+#define perform_ashrsi3(a, b) return a >> b
+#endif
+
+#ifndef perform_ashlsi3
+#define perform_ashlsi3(a, b) return a << b
+#endif
+
+#ifndef perform_adddf3
+#define perform_adddf3(a, b) return a + b
+#endif
+
+#ifndef perform_subdf3
+#define perform_subdf3(a, b) return a - b
+#endif
+
+#ifndef perform_muldf3
+#define perform_muldf3(a, b) return a * b
+#endif
+
+#ifndef perform_divdf3
+#define perform_divdf3(a, b) return a / b
+#endif
+
+#ifndef perform_addsf3
+#define perform_addsf3(a, b) return INTIFY (a + b)
+#endif
+
+#ifndef perform_subsf3
+#define perform_subsf3(a, b) return INTIFY (a - b)
+#endif
+
+#ifndef perform_mulsf3
+#define perform_mulsf3(a, b) return INTIFY (a * b)
+#endif
+
+#ifndef perform_divsf3
+#define perform_divsf3(a, b) return INTIFY (a / b)
+#endif
+
+#ifndef perform_negdf2
+#define perform_negdf2(a) return -a
+#endif
+
+#ifndef perform_negsf2
+#define perform_negsf2(a) return INTIFY (-a)
+#endif
+
+#ifndef perform_fixdfsi
+#define perform_fixdfsi(a) return (nongcc_SI_type) a;
+#endif
+
+#ifndef perform_fixsfsi
+#define perform_fixsfsi(a) return (nongcc_SI_type) a
+#endif
+
+#ifndef perform_floatsidf
+#define perform_floatsidf(a) return (double) a
+#endif
+
+#ifndef perform_floatsisf
+#define perform_floatsisf(a) return INTIFY ((float) a)
+#endif
+
+#ifndef perform_extendsfdf2
+#define perform_extendsfdf2(a) return a
+#endif
+
+#ifndef perform_truncdfsf2
+#define perform_truncdfsf2(a) return INTIFY (a)
+#endif
+
+/* Note that eqdf2 returns a value for "true" that is == 0,
+ nedf2 returns a value for "true" that is != 0,
+ gtdf2 returns a value for "true" that is > 0,
+ and so on. */
+
+#ifndef perform_eqdf2
+#define perform_eqdf2(a, b) return !(a == b)
+#endif
+
+#ifndef perform_nedf2
+#define perform_nedf2(a, b) return a != b
+#endif
+
+#ifndef perform_gtdf2
+#define perform_gtdf2(a, b) return a > b
+#endif
+
+#ifndef perform_gedf2
+#define perform_gedf2(a, b) return (a >= b) - 1
+#endif
+
+#ifndef perform_ltdf2
+#define perform_ltdf2(a, b) return -(a < b)
+#endif
+
+#ifndef perform_ledf2
+#define perform_ledf2(a, b) return 1 - (a <= b)
+#endif
+
+#ifndef perform_eqsf2
+#define perform_eqsf2(a, b) return !(a == b)
+#endif
+
+#ifndef perform_nesf2
+#define perform_nesf2(a, b) return a != b
+#endif
+
+#ifndef perform_gtsf2
+#define perform_gtsf2(a, b) return a > b
+#endif
+
+#ifndef perform_gesf2
+#define perform_gesf2(a, b) return (a >= b) - 1
+#endif
+
+#ifndef perform_ltsf2
+#define perform_ltsf2(a, b) return -(a < b)
+#endif
+
+#ifndef perform_lesf2
+#define perform_lesf2(a, b) return 1 - (a <= b);
+#endif
+
+/* Define the C data type to use for an SImode value. */
+
+#ifndef nongcc_SI_type
+#define nongcc_SI_type long int
+#endif
+
+/* Define the C data type to use for a value of word size */
+#ifndef nongcc_word_type
+#define nongcc_word_type nongcc_SI_type
+#endif
+
+/* Define the type to be used for returning an SF mode value
+ and the method for turning a float into that type.
+ These definitions work for machines where an SF value is
+ returned in the same register as an int. */
+
+#ifndef FLOAT_VALUE_TYPE
+#define FLOAT_VALUE_TYPE int
+#endif
+
+#ifndef INTIFY
+#define INTIFY(FLOATVAL) (intify.f = (FLOATVAL), intify.i)
+#endif
+
+#ifndef FLOATIFY
+#define FLOATIFY(INTVAL) ((INTVAL).f)
+#endif
+
+#ifndef FLOAT_ARG_TYPE
+#define FLOAT_ARG_TYPE union flt_or_int
+#endif
+
+union flt_or_value { FLOAT_VALUE_TYPE i; float f; };
+
+union flt_or_int { int i; float f; };
+
+
+#ifdef L_mulsi3
+nongcc_SI_type
+__mulsi3 (a, b)
+ nongcc_SI_type a, b;
+{
+ perform_mulsi3 (a, b);
+}
+#endif
+
+#ifdef L_udivsi3
+nongcc_SI_type
+__udivsi3 (a, b)
+ unsigned nongcc_SI_type a, b;
+{
+ perform_udivsi3 (a, b);
+}
+#endif
+
+#ifdef L_divsi3
+nongcc_SI_type
+__divsi3 (a, b)
+ nongcc_SI_type a, b;
+{
+ perform_divsi3 (a, b);
+}
+#endif
+
+#ifdef L_umodsi3
+nongcc_SI_type
+__umodsi3 (a, b)
+ unsigned nongcc_SI_type a, b;
+{
+ perform_umodsi3 (a, b);
+}
+#endif
+
+#ifdef L_modsi3
+nongcc_SI_type
+__modsi3 (a, b)
+ nongcc_SI_type a, b;
+{
+ perform_modsi3 (a, b);
+}
+#endif
+
+#ifdef L_lshrsi3
+nongcc_SI_type
+__lshrsi3 (a, b)
+ unsigned nongcc_SI_type a, b;
+{
+ perform_lshrsi3 (a, b);
+}
+#endif
+
+#ifdef L_lshlsi3
+nongcc_SI_type
+__lshlsi3 (a, b)
+ unsigned nongcc_SI_type a, b;
+{
+ perform_lshlsi3 (a, b);
+}
+#endif
+
+#ifdef L_ashrsi3
+nongcc_SI_type
+__ashrsi3 (a, b)
+ nongcc_SI_type a, b;
+{
+ perform_ashrsi3 (a, b);
+}
+#endif
+
+#ifdef L_ashlsi3
+nongcc_SI_type
+__ashlsi3 (a, b)
+ nongcc_SI_type a, b;
+{
+ perform_ashlsi3 (a, b);
+}
+#endif
+
+#ifdef L_divdf3
+double
+__divdf3 (a, b)
+ double a, b;
+{
+ perform_divdf3 (a, b);
+}
+#endif
+
+#ifdef L_muldf3
+double
+__muldf3 (a, b)
+ double a, b;
+{
+ perform_muldf3 (a, b);
+}
+#endif
+
+#ifdef L_negdf2
+double
+__negdf2 (a)
+ double a;
+{
+ perform_negdf2 (a);
+}
+#endif
+
+#ifdef L_adddf3
+double
+__adddf3 (a, b)
+ double a, b;
+{
+ perform_adddf3 (a, b);
+}
+#endif
+
+#ifdef L_subdf3
+double
+__subdf3 (a, b)
+ double a, b;
+{
+ perform_subdf3 (a, b);
+}
+#endif
+
+/* Note that eqdf2 returns a value for "true" that is == 0,
+ nedf2 returns a value for "true" that is != 0,
+ gtdf2 returns a value for "true" that is > 0,
+ and so on. */
+
+#ifdef L_eqdf2
+nongcc_word_type
+__eqdf2 (a, b)
+ double a, b;
+{
+ /* Value == 0 iff a == b. */
+ perform_eqdf2 (a, b);
+}
+#endif
+
+#ifdef L_nedf2
+nongcc_word_type
+__nedf2 (a, b)
+ double a, b;
+{
+ /* Value != 0 iff a != b. */
+ perform_nedf2 (a, b);
+}
+#endif
+
+#ifdef L_gtdf2
+nongcc_word_type
+__gtdf2 (a, b)
+ double a, b;
+{
+ /* Value > 0 iff a > b. */
+ perform_gtdf2 (a, b);
+}
+#endif
+
+#ifdef L_gedf2
+nongcc_word_type
+__gedf2 (a, b)
+ double a, b;
+{
+ /* Value >= 0 iff a >= b. */
+ perform_gedf2 (a, b);
+}
+#endif
+
+#ifdef L_ltdf2
+nongcc_word_type
+__ltdf2 (a, b)
+ double a, b;
+{
+ /* Value < 0 iff a < b. */
+ perform_ltdf2 (a, b);
+}
+#endif
+
+#ifdef L_ledf2
+nongcc_word_type
+__ledf2 (a, b)
+ double a, b;
+{
+ /* Value <= 0 iff a <= b. */
+ perform_ledf2 (a, b);
+}
+#endif
+
+#ifdef L_fixdfsi
+nongcc_SI_type
+__fixdfsi (a)
+ double a;
+{
+ perform_fixdfsi (a);
+}
+#endif
+
+#ifdef L_fixsfsi
+nongcc_SI_type
+__fixsfsi (a)
+ FLOAT_ARG_TYPE a;
+{
+ union flt_or_value intify;
+ perform_fixsfsi (FLOATIFY (a));
+}
+#endif
+
+#ifdef L_floatsidf
+double
+__floatsidf (a)
+ nongcc_SI_type a;
+{
+ perform_floatsidf (a);
+}
+#endif
+
+#ifdef L_floatsisf
+FLOAT_VALUE_TYPE
+__floatsisf (a)
+ nongcc_SI_type a;
+{
+ union flt_or_value intify;
+ perform_floatsisf (a);
+}
+#endif
+
+#ifdef L_addsf3
+FLOAT_VALUE_TYPE
+__addsf3 (a, b)
+ FLOAT_ARG_TYPE a, b;
+{
+ union flt_or_value intify;
+ perform_addsf3 (FLOATIFY (a), FLOATIFY (b));
+}
+#endif
+
+#ifdef L_negsf2
+FLOAT_VALUE_TYPE
+__negsf2 (a)
+ FLOAT_ARG_TYPE a;
+{
+ union flt_or_value intify;
+ perform_negsf2 (FLOATIFY (a));
+}
+#endif
+
+#ifdef L_subsf3
+FLOAT_VALUE_TYPE
+__subsf3 (a, b)
+ FLOAT_ARG_TYPE a, b;
+{
+ union flt_or_value intify;
+ perform_subsf3 (FLOATIFY (a), FLOATIFY (b));
+}
+#endif
+
+#ifdef L_eqsf2
+nongcc_word_type
+__eqsf2 (a, b)
+ FLOAT_ARG_TYPE a, b;
+{
+ union flt_or_int intify;
+ /* Value == 0 iff a == b. */
+ perform_eqsf2 (FLOATIFY (a), FLOATIFY (b));
+}
+#endif
+
+#ifdef L_nesf2
+nongcc_word_type
+__nesf2 (a, b)
+ FLOAT_ARG_TYPE a, b;
+{
+ union flt_or_int intify;
+ /* Value != 0 iff a != b. */
+ perform_nesf2 (FLOATIFY (a), FLOATIFY (b));
+}
+#endif
+
+#ifdef L_gtsf2
+nongcc_word_type
+__gtsf2 (a, b)
+ FLOAT_ARG_TYPE a, b;
+{
+ union flt_or_int intify;
+ /* Value > 0 iff a > b. */
+ perform_gtsf2 (FLOATIFY (a), FLOATIFY (b));
+}
+#endif
+
+#ifdef L_gesf2
+nongcc_word_type
+__gesf2 (a, b)
+ FLOAT_ARG_TYPE a, b;
+{
+ union flt_or_int intify;
+ /* Value >= 0 iff a >= b. */
+ perform_gesf2 (FLOATIFY (a), FLOATIFY (b));
+}
+#endif
+
+#ifdef L_ltsf2
+nongcc_word_type
+__ltsf2 (a, b)
+ FLOAT_ARG_TYPE a, b;
+{
+ union flt_or_int intify;
+ /* Value < 0 iff a < b. */
+ perform_ltsf2 (FLOATIFY (a), FLOATIFY (b));
+}
+#endif
+
+#ifdef L_lesf2
+nongcc_word_type
+__lesf2 (a, b)
+ FLOAT_ARG_TYPE a, b;
+{
+ union flt_or_int intify;
+ /* Value <= 0 iff a <= b. */
+ perform_lesf2 (FLOATIFY (a), FLOATIFY (b));
+}
+#endif
+
+#ifdef L_mulsf3
+FLOAT_VALUE_TYPE
+__mulsf3 (a, b)
+ FLOAT_ARG_TYPE a, b;
+{
+ union flt_or_value intify;
+ perform_mulsf3 (FLOATIFY (a), FLOATIFY (b));
+}
+#endif
+
+#ifdef L_divsf3
+FLOAT_VALUE_TYPE
+__divsf3 (a, b)
+ FLOAT_ARG_TYPE a, b;
+{
+ union flt_or_value intify;
+ perform_divsf3 (FLOATIFY (a), FLOATIFY (b));
+}
+#endif
+
+#ifdef L_truncdfsf2
+FLOAT_VALUE_TYPE
+__truncdfsf2 (a)
+ double a;
+{
+ union flt_or_value intify;
+ perform_truncdfsf2 (a);
+}
+#endif
+
+#ifdef L_extendsfdf2
+double
+__extendsfdf2 (a)
+ FLOAT_ARG_TYPE a;
+{
+ union flt_or_value intify;
+ perform_extendsfdf2 (FLOATIFY (a));
+}
+#endif
diff --git a/gnu/usr.bin/cc/libgcc/libgcc2.c b/gnu/usr.bin/cc/libgcc/libgcc2.c
new file mode 100644
index 0000000..fc2e1ac
--- /dev/null
+++ b/gnu/usr.bin/cc/libgcc/libgcc2.c
@@ -0,0 +1,2151 @@
+/* More subroutines needed by GCC output code on some machines. */
+/* Compile this one with gcc. */
+/* Copyright (C) 1989, 1992, 1993, 1994 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+/* As a special exception, if you link this library with other files,
+ some of which are compiled with GCC, to produce an executable,
+ this library does not by itself cause the resulting executable
+ to be covered by the GNU General Public License.
+ This exception does not however invalidate any other reasons why
+ the executable file might be covered by the GNU General Public License. */
+
+/* It is incorrect to include config.h here, because this file is being
+ compiled for the target, and hence definitions concerning only the host
+ do not apply. */
+
+#include "tconfig.h"
+#include "machmode.h"
+#ifndef L_trampoline
+#include <stddef.h>
+#endif
+
+/* Don't use `fancy_abort' here even if config.h says to use it. */
+#ifdef abort
+#undef abort
+#endif
+
+/* In the first part of this file, we are interfacing to calls generated
+ by the compiler itself. These calls pass values into these routines
+ which have very specific modes (rather than very specific types), and
+ these compiler-generated calls also expect any return values to have
+ very specific modes (rather than very specific types). Thus, we need
+ to avoid using regular C language type names in this part of the file
+ because the sizes for those types can be configured to be anything.
+ Instead we use the following special type names. */
+
+typedef unsigned int UQItype __attribute__ ((mode (QI)));
+typedef int SItype __attribute__ ((mode (SI)));
+typedef unsigned int USItype __attribute__ ((mode (SI)));
+typedef int DItype __attribute__ ((mode (DI)));
+typedef unsigned int UDItype __attribute__ ((mode (DI)));
+typedef float SFtype __attribute__ ((mode (SF)));
+typedef float DFtype __attribute__ ((mode (DF)));
+#if LONG_DOUBLE_TYPE_SIZE == 96
+typedef float XFtype __attribute__ ((mode (XF)));
+#endif
+#if LONG_DOUBLE_TYPE_SIZE == 128
+typedef float TFtype __attribute__ ((mode (TF)));
+#endif
+
+#if BITS_PER_WORD==16
+typedef int word_type __attribute__ ((mode (HI)));
+#endif
+#if BITS_PER_WORD==32
+typedef int word_type __attribute__ ((mode (SI)));
+#endif
+#if BITS_PER_WORD==64
+typedef int word_type __attribute__ ((mode (DI)));
+#endif
+
+/* Make sure that we don't accidentally use any normal C language built-in
+ type names in the first part of this file. Instead we want to use *only*
+ the type names defined above. The following macro definitions insure
+ that if we *do* accidentally use some normal C language built-in type name,
+ we will get a syntax error. */
+
+#define char bogus_type
+#define short bogus_type
+#define int bogus_type
+#define long bogus_type
+#define unsigned bogus_type
+#define float bogus_type
+#define double bogus_type
+
+#define SI_TYPE_SIZE (sizeof (SItype) * BITS_PER_UNIT)
+
+/* DIstructs are pairs of SItype values in the order determined by
+ WORDS_BIG_ENDIAN. */
+
+#if WORDS_BIG_ENDIAN
+ struct DIstruct {SItype high, low;};
+#else
+ struct DIstruct {SItype low, high;};
+#endif
+
+/* We need this union to unpack/pack DImode values, since we don't have
+ any arithmetic yet. Incoming DImode parameters are stored into the
+ `ll' field, and the unpacked result is read from the struct `s'. */
+
+typedef union
+{
+ struct DIstruct s;
+ DItype ll;
+} DIunion;
+
+#if defined (L_udivmoddi4) || defined (L_muldi3) || defined (L_udiv_w_sdiv)
+
+#include "longlong.h"
+
+#endif /* udiv or mul */
+
+extern DItype __fixunssfdi (SFtype a);
+extern DItype __fixunsdfdi (DFtype a);
+#if LONG_DOUBLE_TYPE_SIZE == 96
+extern DItype __fixunsxfdi (XFtype a);
+#endif
+#if LONG_DOUBLE_TYPE_SIZE == 128
+extern DItype __fixunstfdi (TFtype a);
+#endif
+
+#if defined (L_negdi2) || defined (L_divdi3) || defined (L_moddi3)
+#if defined (L_divdi3) || defined (L_moddi3)
+static inline
+#endif
+DItype
+__negdi2 (u)
+ DItype u;
+{
+ DIunion w;
+ DIunion uu;
+
+ uu.ll = u;
+
+ w.s.low = -uu.s.low;
+ w.s.high = -uu.s.high - ((USItype) w.s.low > 0);
+
+ return w.ll;
+}
+#endif
+
+#ifdef L_lshldi3
+DItype
+__lshldi3 (u, b)
+ DItype u;
+ SItype b;
+{
+ DIunion w;
+ SItype bm;
+ DIunion uu;
+
+ if (b == 0)
+ return u;
+
+ uu.ll = u;
+
+ bm = (sizeof (SItype) * BITS_PER_UNIT) - b;
+ if (bm <= 0)
+ {
+ w.s.low = 0;
+ w.s.high = (USItype)uu.s.low << -bm;
+ }
+ else
+ {
+ USItype carries = (USItype)uu.s.low >> bm;
+ w.s.low = (USItype)uu.s.low << b;
+ w.s.high = ((USItype)uu.s.high << b) | carries;
+ }
+
+ return w.ll;
+}
+#endif
+
+#ifdef L_lshrdi3
+DItype
+__lshrdi3 (u, b)
+ DItype u;
+ SItype b;
+{
+ DIunion w;
+ SItype bm;
+ DIunion uu;
+
+ if (b == 0)
+ return u;
+
+ uu.ll = u;
+
+ bm = (sizeof (SItype) * BITS_PER_UNIT) - b;
+ if (bm <= 0)
+ {
+ w.s.high = 0;
+ w.s.low = (USItype)uu.s.high >> -bm;
+ }
+ else
+ {
+ USItype carries = (USItype)uu.s.high << bm;
+ w.s.high = (USItype)uu.s.high >> b;
+ w.s.low = ((USItype)uu.s.low >> b) | carries;
+ }
+
+ return w.ll;
+}
+#endif
+
+#ifdef L_ashldi3
+DItype
+__ashldi3 (u, b)
+ DItype u;
+ SItype b;
+{
+ DIunion w;
+ SItype bm;
+ DIunion uu;
+
+ if (b == 0)
+ return u;
+
+ uu.ll = u;
+
+ bm = (sizeof (SItype) * BITS_PER_UNIT) - b;
+ if (bm <= 0)
+ {
+ w.s.low = 0;
+ w.s.high = (USItype)uu.s.low << -bm;
+ }
+ else
+ {
+ USItype carries = (USItype)uu.s.low >> bm;
+ w.s.low = (USItype)uu.s.low << b;
+ w.s.high = ((USItype)uu.s.high << b) | carries;
+ }
+
+ return w.ll;
+}
+#endif
+
+#ifdef L_ashrdi3
+DItype
+__ashrdi3 (u, b)
+ DItype u;
+ SItype b;
+{
+ DIunion w;
+ SItype bm;
+ DIunion uu;
+
+ if (b == 0)
+ return u;
+
+ uu.ll = u;
+
+ bm = (sizeof (SItype) * BITS_PER_UNIT) - b;
+ if (bm <= 0)
+ {
+ /* w.s.high = 1..1 or 0..0 */
+ w.s.high = uu.s.high >> (sizeof (SItype) * BITS_PER_UNIT - 1);
+ w.s.low = uu.s.high >> -bm;
+ }
+ else
+ {
+ USItype carries = (USItype)uu.s.high << bm;
+ w.s.high = uu.s.high >> b;
+ w.s.low = ((USItype)uu.s.low >> b) | carries;
+ }
+
+ return w.ll;
+}
+#endif
+
+#ifdef L_ffsdi2
+DItype
+__ffsdi2 (u)
+ DItype u;
+{
+ DIunion uu, w;
+ uu.ll = u;
+ w.s.high = 0;
+ w.s.low = ffs (uu.s.low);
+ if (w.s.low != 0)
+ return w.ll;
+ w.s.low = ffs (uu.s.high);
+ if (w.s.low != 0)
+ {
+ w.s.low += BITS_PER_UNIT * sizeof (SItype);
+ return w.ll;
+ }
+ return w.ll;
+}
+#endif
+
+#ifdef L_muldi3
+DItype
+__muldi3 (u, v)
+ DItype u, v;
+{
+ DIunion w;
+ DIunion uu, vv;
+
+ uu.ll = u,
+ vv.ll = v;
+
+ w.ll = __umulsidi3 (uu.s.low, vv.s.low);
+ w.s.high += ((USItype) uu.s.low * (USItype) vv.s.high
+ + (USItype) uu.s.high * (USItype) vv.s.low);
+
+ return w.ll;
+}
+#endif
+
+#ifdef L_udiv_w_sdiv
+USItype
+__udiv_w_sdiv (rp, a1, a0, d)
+ USItype *rp, a1, a0, d;
+{
+ USItype q, r;
+ USItype c0, c1, b1;
+
+ if ((SItype) d >= 0)
+ {
+ if (a1 < d - a1 - (a0 >> (SI_TYPE_SIZE - 1)))
+ {
+ /* dividend, divisor, and quotient are nonnegative */
+ sdiv_qrnnd (q, r, a1, a0, d);
+ }
+ else
+ {
+ /* Compute c1*2^32 + c0 = a1*2^32 + a0 - 2^31*d */
+ sub_ddmmss (c1, c0, a1, a0, d >> 1, d << (SI_TYPE_SIZE - 1));
+ /* Divide (c1*2^32 + c0) by d */
+ sdiv_qrnnd (q, r, c1, c0, d);
+ /* Add 2^31 to quotient */
+ q += (USItype) 1 << (SI_TYPE_SIZE - 1);
+ }
+ }
+ else
+ {
+ b1 = d >> 1; /* d/2, between 2^30 and 2^31 - 1 */
+ c1 = a1 >> 1; /* A/2 */
+ c0 = (a1 << (SI_TYPE_SIZE - 1)) + (a0 >> 1);
+
+ if (a1 < b1) /* A < 2^32*b1, so A/2 < 2^31*b1 */
+ {
+ sdiv_qrnnd (q, r, c1, c0, b1); /* (A/2) / (d/2) */
+
+ r = 2*r + (a0 & 1); /* Remainder from A/(2*b1) */
+ if ((d & 1) != 0)
+ {
+ if (r >= q)
+ r = r - q;
+ else if (q - r <= d)
+ {
+ r = r - q + d;
+ q--;
+ }
+ else
+ {
+ r = r - q + 2*d;
+ q -= 2;
+ }
+ }
+ }
+ else if (c1 < b1) /* So 2^31 <= (A/2)/b1 < 2^32 */
+ {
+ c1 = (b1 - 1) - c1;
+ c0 = ~c0; /* logical NOT */
+
+ sdiv_qrnnd (q, r, c1, c0, b1); /* (A/2) / (d/2) */
+
+ q = ~q; /* (A/2)/b1 */
+ r = (b1 - 1) - r;
+
+ r = 2*r + (a0 & 1); /* A/(2*b1) */
+
+ if ((d & 1) != 0)
+ {
+ if (r >= q)
+ r = r - q;
+ else if (q - r <= d)
+ {
+ r = r - q + d;
+ q--;
+ }
+ else
+ {
+ r = r - q + 2*d;
+ q -= 2;
+ }
+ }
+ }
+ else /* Implies c1 = b1 */
+ { /* Hence a1 = d - 1 = 2*b1 - 1 */
+ if (a0 >= -d)
+ {
+ q = -1;
+ r = a0 + d;
+ }
+ else
+ {
+ q = -2;
+ r = a0 + 2*d;
+ }
+ }
+ }
+
+ *rp = r;
+ return q;
+}
+#endif
+
+#ifdef L_udivmoddi4
+static const UQItype __clz_tab[] =
+{
+ 0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
+ 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
+ 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
+ 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
+ 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
+ 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
+ 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
+ 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
+};
+
+UDItype
+__udivmoddi4 (n, d, rp)
+ UDItype n, d;
+ UDItype *rp;
+{
+ DIunion ww;
+ DIunion nn, dd;
+ DIunion rr;
+ USItype d0, d1, n0, n1, n2;
+ USItype q0, q1;
+ USItype b, bm;
+
+ nn.ll = n;
+ dd.ll = d;
+
+ d0 = dd.s.low;
+ d1 = dd.s.high;
+ n0 = nn.s.low;
+ n1 = nn.s.high;
+
+#if !UDIV_NEEDS_NORMALIZATION
+ if (d1 == 0)
+ {
+ if (d0 > n1)
+ {
+ /* 0q = nn / 0D */
+
+ udiv_qrnnd (q0, n0, n1, n0, d0);
+ q1 = 0;
+
+ /* Remainder in n0. */
+ }
+ else
+ {
+ /* qq = NN / 0d */
+
+ if (d0 == 0)
+ d0 = 1 / d0; /* Divide intentionally by zero. */
+
+ udiv_qrnnd (q1, n1, 0, n1, d0);
+ udiv_qrnnd (q0, n0, n1, n0, d0);
+
+ /* Remainder in n0. */
+ }
+
+ if (rp != 0)
+ {
+ rr.s.low = n0;
+ rr.s.high = 0;
+ *rp = rr.ll;
+ }
+ }
+
+#else /* UDIV_NEEDS_NORMALIZATION */
+
+ if (d1 == 0)
+ {
+ if (d0 > n1)
+ {
+ /* 0q = nn / 0D */
+
+ count_leading_zeros (bm, d0);
+
+ if (bm != 0)
+ {
+ /* Normalize, i.e. make the most significant bit of the
+ denominator set. */
+
+ d0 = d0 << bm;
+ n1 = (n1 << bm) | (n0 >> (SI_TYPE_SIZE - bm));
+ n0 = n0 << bm;
+ }
+
+ udiv_qrnnd (q0, n0, n1, n0, d0);
+ q1 = 0;
+
+ /* Remainder in n0 >> bm. */
+ }
+ else
+ {
+ /* qq = NN / 0d */
+
+ if (d0 == 0)
+ d0 = 1 / d0; /* Divide intentionally by zero. */
+
+ count_leading_zeros (bm, d0);
+
+ if (bm == 0)
+ {
+ /* From (n1 >= d0) /\ (the most significant bit of d0 is set),
+ conclude (the most significant bit of n1 is set) /\ (the
+ leading quotient digit q1 = 1).
+
+ This special case is necessary, not an optimization.
+ (Shifts counts of SI_TYPE_SIZE are undefined.) */
+
+ n1 -= d0;
+ q1 = 1;
+ }
+ else
+ {
+ /* Normalize. */
+
+ b = SI_TYPE_SIZE - bm;
+
+ d0 = d0 << bm;
+ n2 = n1 >> b;
+ n1 = (n1 << bm) | (n0 >> b);
+ n0 = n0 << bm;
+
+ udiv_qrnnd (q1, n1, n2, n1, d0);
+ }
+
+ /* n1 != d0... */
+
+ udiv_qrnnd (q0, n0, n1, n0, d0);
+
+ /* Remainder in n0 >> bm. */
+ }
+
+ if (rp != 0)
+ {
+ rr.s.low = n0 >> bm;
+ rr.s.high = 0;
+ *rp = rr.ll;
+ }
+ }
+#endif /* UDIV_NEEDS_NORMALIZATION */
+
+ else
+ {
+ if (d1 > n1)
+ {
+ /* 00 = nn / DD */
+
+ q0 = 0;
+ q1 = 0;
+
+ /* Remainder in n1n0. */
+ if (rp != 0)
+ {
+ rr.s.low = n0;
+ rr.s.high = n1;
+ *rp = rr.ll;
+ }
+ }
+ else
+ {
+ /* 0q = NN / dd */
+
+ count_leading_zeros (bm, d1);
+ if (bm == 0)
+ {
+ /* From (n1 >= d1) /\ (the most significant bit of d1 is set),
+ conclude (the most significant bit of n1 is set) /\ (the
+ quotient digit q0 = 0 or 1).
+
+ This special case is necessary, not an optimization. */
+
+ /* The condition on the next line takes advantage of that
+ n1 >= d1 (true due to program flow). */
+ if (n1 > d1 || n0 >= d0)
+ {
+ q0 = 1;
+ sub_ddmmss (n1, n0, n1, n0, d1, d0);
+ }
+ else
+ q0 = 0;
+
+ q1 = 0;
+
+ if (rp != 0)
+ {
+ rr.s.low = n0;
+ rr.s.high = n1;
+ *rp = rr.ll;
+ }
+ }
+ else
+ {
+ USItype m1, m0;
+ /* Normalize. */
+
+ b = SI_TYPE_SIZE - bm;
+
+ d1 = (d1 << bm) | (d0 >> b);
+ d0 = d0 << bm;
+ n2 = n1 >> b;
+ n1 = (n1 << bm) | (n0 >> b);
+ n0 = n0 << bm;
+
+ udiv_qrnnd (q0, n1, n2, n1, d1);
+ umul_ppmm (m1, m0, q0, d0);
+
+ if (m1 > n1 || (m1 == n1 && m0 > n0))
+ {
+ q0--;
+ sub_ddmmss (m1, m0, m1, m0, d1, d0);
+ }
+
+ q1 = 0;
+
+ /* Remainder in (n1n0 - m1m0) >> bm. */
+ if (rp != 0)
+ {
+ sub_ddmmss (n1, n0, n1, n0, m1, m0);
+ rr.s.low = (n1 << b) | (n0 >> bm);
+ rr.s.high = n1 >> bm;
+ *rp = rr.ll;
+ }
+ }
+ }
+ }
+
+ ww.s.low = q0;
+ ww.s.high = q1;
+ return ww.ll;
+}
+#endif
+
+#ifdef L_divdi3
+UDItype __udivmoddi4 ();
+
+DItype
+__divdi3 (u, v)
+ DItype u, v;
+{
+ SItype c = 0;
+ DIunion uu, vv;
+ DItype w;
+
+ uu.ll = u;
+ vv.ll = v;
+
+ if (uu.s.high < 0)
+ c = ~c,
+ uu.ll = __negdi2 (uu.ll);
+ if (vv.s.high < 0)
+ c = ~c,
+ vv.ll = __negdi2 (vv.ll);
+
+ w = __udivmoddi4 (uu.ll, vv.ll, (UDItype *) 0);
+ if (c)
+ w = __negdi2 (w);
+
+ return w;
+}
+#endif
+
+#ifdef L_moddi3
+UDItype __udivmoddi4 ();
+DItype
+__moddi3 (u, v)
+ DItype u, v;
+{
+ SItype c = 0;
+ DIunion uu, vv;
+ DItype w;
+
+ uu.ll = u;
+ vv.ll = v;
+
+ if (uu.s.high < 0)
+ c = ~c,
+ uu.ll = __negdi2 (uu.ll);
+ if (vv.s.high < 0)
+ vv.ll = __negdi2 (vv.ll);
+
+ (void) __udivmoddi4 (uu.ll, vv.ll, &w);
+ if (c)
+ w = __negdi2 (w);
+
+ return w;
+}
+#endif
+
+#ifdef L_umoddi3
+UDItype __udivmoddi4 ();
+UDItype
+__umoddi3 (u, v)
+ UDItype u, v;
+{
+ UDItype w;
+
+ (void) __udivmoddi4 (u, v, &w);
+
+ return w;
+}
+#endif
+
+#ifdef L_udivdi3
+UDItype __udivmoddi4 ();
+UDItype
+__udivdi3 (n, d)
+ UDItype n, d;
+{
+ return __udivmoddi4 (n, d, (UDItype *) 0);
+}
+#endif
+
+#ifdef L_cmpdi2
+word_type
+__cmpdi2 (a, b)
+ DItype a, b;
+{
+ DIunion au, bu;
+
+ au.ll = a, bu.ll = b;
+
+ if (au.s.high < bu.s.high)
+ return 0;
+ else if (au.s.high > bu.s.high)
+ return 2;
+ if ((USItype) au.s.low < (USItype) bu.s.low)
+ return 0;
+ else if ((USItype) au.s.low > (USItype) bu.s.low)
+ return 2;
+ return 1;
+}
+#endif
+
+#ifdef L_ucmpdi2
+word_type
+__ucmpdi2 (a, b)
+ DItype a, b;
+{
+ DIunion au, bu;
+
+ au.ll = a, bu.ll = b;
+
+ if ((USItype) au.s.high < (USItype) bu.s.high)
+ return 0;
+ else if ((USItype) au.s.high > (USItype) bu.s.high)
+ return 2;
+ if ((USItype) au.s.low < (USItype) bu.s.low)
+ return 0;
+ else if ((USItype) au.s.low > (USItype) bu.s.low)
+ return 2;
+ return 1;
+}
+#endif
+
+#if defined(L_fixunstfdi) && (LONG_DOUBLE_TYPE_SIZE == 128)
+#define WORD_SIZE (sizeof (SItype) * BITS_PER_UNIT)
+#define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE)
+
+DItype
+__fixunstfdi (a)
+ TFtype a;
+{
+ TFtype b;
+ UDItype v;
+
+ if (a < 0)
+ return 0;
+
+ /* Compute high word of result, as a flonum. */
+ b = (a / HIGH_WORD_COEFF);
+ /* Convert that to fixed (but not to DItype!),
+ and shift it into the high word. */
+ v = (USItype) b;
+ v <<= WORD_SIZE;
+ /* Remove high part from the TFtype, leaving the low part as flonum. */
+ a -= (TFtype)v;
+ /* Convert that to fixed (but not to DItype!) and add it in.
+ Sometimes A comes out negative. This is significant, since
+ A has more bits than a long int does. */
+ if (a < 0)
+ v -= (USItype) (- a);
+ else
+ v += (USItype) a;
+ return v;
+}
+#endif
+
+#if defined(L_fixtfdi) && (LONG_DOUBLE_TYPE_SIZE == 128)
+DItype
+__fixtfdi (a)
+ TFtype a;
+{
+ if (a < 0)
+ return - __fixunstfdi (-a);
+ return __fixunstfdi (a);
+}
+#endif
+
+#if defined(L_fixunsxfdi) && (LONG_DOUBLE_TYPE_SIZE == 96)
+#define WORD_SIZE (sizeof (SItype) * BITS_PER_UNIT)
+#define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE)
+
+DItype
+__fixunsxfdi (a)
+ XFtype a;
+{
+ XFtype b;
+ UDItype v;
+
+ if (a < 0)
+ return 0;
+
+ /* Compute high word of result, as a flonum. */
+ b = (a / HIGH_WORD_COEFF);
+ /* Convert that to fixed (but not to DItype!),
+ and shift it into the high word. */
+ v = (USItype) b;
+ v <<= WORD_SIZE;
+ /* Remove high part from the XFtype, leaving the low part as flonum. */
+ a -= (XFtype)v;
+ /* Convert that to fixed (but not to DItype!) and add it in.
+ Sometimes A comes out negative. This is significant, since
+ A has more bits than a long int does. */
+ if (a < 0)
+ v -= (USItype) (- a);
+ else
+ v += (USItype) a;
+ return v;
+}
+#endif
+
+#if defined(L_fixxfdi) && (LONG_DOUBLE_TYPE_SIZE == 96)
+DItype
+__fixxfdi (a)
+ XFtype a;
+{
+ if (a < 0)
+ return - __fixunsxfdi (-a);
+ return __fixunsxfdi (a);
+}
+#endif
+
+#ifdef L_fixunsdfdi
+#define WORD_SIZE (sizeof (SItype) * BITS_PER_UNIT)
+#define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE)
+
+DItype
+__fixunsdfdi (a)
+ DFtype a;
+{
+ DFtype b;
+ UDItype v;
+
+ if (a < 0)
+ return 0;
+
+ /* Compute high word of result, as a flonum. */
+ b = (a / HIGH_WORD_COEFF);
+ /* Convert that to fixed (but not to DItype!),
+ and shift it into the high word. */
+ v = (USItype) b;
+ v <<= WORD_SIZE;
+ /* Remove high part from the DFtype, leaving the low part as flonum. */
+ a -= (DFtype)v;
+ /* Convert that to fixed (but not to DItype!) and add it in.
+ Sometimes A comes out negative. This is significant, since
+ A has more bits than a long int does. */
+ if (a < 0)
+ v -= (USItype) (- a);
+ else
+ v += (USItype) a;
+ return v;
+}
+#endif
+
+#ifdef L_fixdfdi
+DItype
+__fixdfdi (a)
+ DFtype a;
+{
+ if (a < 0)
+ return - __fixunsdfdi (-a);
+ return __fixunsdfdi (a);
+}
+#endif
+
+#ifdef L_fixunssfdi
+#define WORD_SIZE (sizeof (SItype) * BITS_PER_UNIT)
+#define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE)
+
+DItype
+__fixunssfdi (SFtype original_a)
+{
+ /* Convert the SFtype to a DFtype, because that is surely not going
+ to lose any bits. Some day someone else can write a faster version
+ that avoids converting to DFtype, and verify it really works right. */
+ DFtype a = original_a;
+ DFtype b;
+ UDItype v;
+
+ if (a < 0)
+ return 0;
+
+ /* Compute high word of result, as a flonum. */
+ b = (a / HIGH_WORD_COEFF);
+ /* Convert that to fixed (but not to DItype!),
+ and shift it into the high word. */
+ v = (USItype) b;
+ v <<= WORD_SIZE;
+ /* Remove high part from the DFtype, leaving the low part as flonum. */
+ a -= (DFtype)v;
+ /* Convert that to fixed (but not to DItype!) and add it in.
+ Sometimes A comes out negative. This is significant, since
+ A has more bits than a long int does. */
+ if (a < 0)
+ v -= (USItype) (- a);
+ else
+ v += (USItype) a;
+ return v;
+}
+#endif
+
+#ifdef L_fixsfdi
+DItype
+__fixsfdi (SFtype a)
+{
+ if (a < 0)
+ return - __fixunssfdi (-a);
+ return __fixunssfdi (a);
+}
+#endif
+
+#if defined(L_floatdixf) && (LONG_DOUBLE_TYPE_SIZE == 96)
+#define WORD_SIZE (sizeof (SItype) * BITS_PER_UNIT)
+#define HIGH_HALFWORD_COEFF (((UDItype) 1) << (WORD_SIZE / 2))
+#define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE)
+
+XFtype
+__floatdixf (u)
+ DItype u;
+{
+ XFtype d;
+ SItype negate = 0;
+
+ if (u < 0)
+ u = -u, negate = 1;
+
+ d = (USItype) (u >> WORD_SIZE);
+ d *= HIGH_HALFWORD_COEFF;
+ d *= HIGH_HALFWORD_COEFF;
+ d += (USItype) (u & (HIGH_WORD_COEFF - 1));
+
+ return (negate ? -d : d);
+}
+#endif
+
+#if defined(L_floatditf) && (LONG_DOUBLE_TYPE_SIZE == 128)
+#define WORD_SIZE (sizeof (SItype) * BITS_PER_UNIT)
+#define HIGH_HALFWORD_COEFF (((UDItype) 1) << (WORD_SIZE / 2))
+#define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE)
+
+TFtype
+__floatditf (u)
+ DItype u;
+{
+ TFtype d;
+ SItype negate = 0;
+
+ if (u < 0)
+ u = -u, negate = 1;
+
+ d = (USItype) (u >> WORD_SIZE);
+ d *= HIGH_HALFWORD_COEFF;
+ d *= HIGH_HALFWORD_COEFF;
+ d += (USItype) (u & (HIGH_WORD_COEFF - 1));
+
+ return (negate ? -d : d);
+}
+#endif
+
+#ifdef L_floatdidf
+#define WORD_SIZE (sizeof (SItype) * BITS_PER_UNIT)
+#define HIGH_HALFWORD_COEFF (((UDItype) 1) << (WORD_SIZE / 2))
+#define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE)
+
+DFtype
+__floatdidf (u)
+ DItype u;
+{
+ DFtype d;
+ SItype negate = 0;
+
+ if (u < 0)
+ u = -u, negate = 1;
+
+ d = (USItype) (u >> WORD_SIZE);
+ d *= HIGH_HALFWORD_COEFF;
+ d *= HIGH_HALFWORD_COEFF;
+ d += (USItype) (u & (HIGH_WORD_COEFF - 1));
+
+ return (negate ? -d : d);
+}
+#endif
+
+#ifdef L_floatdisf
+#define WORD_SIZE (sizeof (SItype) * BITS_PER_UNIT)
+#define HIGH_HALFWORD_COEFF (((UDItype) 1) << (WORD_SIZE / 2))
+#define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE)
+#define DI_SIZE (sizeof (DItype) * BITS_PER_UNIT)
+#if TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
+#define DF_SIZE 53
+#define SF_SIZE 24
+#else
+#if TARGET_FLOAT_FORMAT == IBM_FLOAT_FORMAT
+#define DF_SIZE 56
+#define SF_SIZE 24
+#else
+#if TARGET_FLOAT_FORMAT == VAX_FLOAT_FORMAT
+#define DF_SIZE 56
+#define SF_SIZE 24
+#else
+#define DF_SIZE 0
+#define SF_SIZE 0
+#endif
+#endif
+#endif
+
+
+SFtype
+__floatdisf (u)
+ DItype u;
+{
+ /* Do the calculation in DFmode
+ so that we don't lose any of the precision of the high word
+ while multiplying it. */
+ DFtype f;
+ SItype negate = 0;
+
+ if (u < 0)
+ u = -u, negate = 1;
+
+ /* Protect against double-rounding error.
+ Represent any low-order bits, that might be truncated in DFmode,
+ by a bit that won't be lost. The bit can go in anywhere below the
+ rounding position of the SFmode. A fixed mask and bit position
+ handles all usual configurations. It doesn't handle the case
+ of 128-bit DImode, however. */
+ if (DF_SIZE < DI_SIZE
+ && DF_SIZE > (DI_SIZE - DF_SIZE + SF_SIZE))
+ {
+#define REP_BIT ((USItype) 1 << (DI_SIZE - DF_SIZE))
+ if (u >= ((UDItype) 1 << DF_SIZE))
+ {
+ if ((USItype) u & (REP_BIT - 1))
+ u |= REP_BIT;
+ }
+ }
+ f = (USItype) (u >> WORD_SIZE);
+ f *= HIGH_HALFWORD_COEFF;
+ f *= HIGH_HALFWORD_COEFF;
+ f += (USItype) (u & (HIGH_WORD_COEFF - 1));
+
+ return (SFtype) (negate ? -f : f);
+}
+#endif
+
+#if defined(L_fixunsxfsi) && LONG_DOUBLE_TYPE_SIZE == 96
+#include "glimits.h"
+
+USItype
+__fixunsxfsi (a)
+ XFtype a;
+{
+ if (a >= - (DFtype) LONG_MIN)
+ return (SItype) (a + LONG_MIN) - LONG_MIN;
+ return (SItype) a;
+}
+#endif
+
+#ifdef L_fixunsdfsi
+#include "glimits.h"
+
+USItype
+__fixunsdfsi (a)
+ DFtype a;
+{
+ if (a >= - (DFtype) LONG_MIN)
+ return (SItype) (a + LONG_MIN) - LONG_MIN;
+ return (SItype) a;
+}
+#endif
+
+#ifdef L_fixunssfsi
+#include "glimits.h"
+
+USItype
+__fixunssfsi (SFtype a)
+{
+ if (a >= - (SFtype) LONG_MIN)
+ return (SItype) (a + LONG_MIN) - LONG_MIN;
+ return (SItype) a;
+}
+#endif
+
+/* From here on down, the routines use normal data types. */
+
+#define SItype bogus_type
+#define USItype bogus_type
+#define DItype bogus_type
+#define UDItype bogus_type
+#define SFtype bogus_type
+#define DFtype bogus_type
+
+#undef char
+#undef short
+#undef int
+#undef long
+#undef unsigned
+#undef float
+#undef double
+
+#ifdef L__gcc_bcmp
+
+/* Like bcmp except the sign is meaningful.
+ Reult is negative if S1 is less than S2,
+ positive if S1 is greater, 0 if S1 and S2 are equal. */
+
+int
+__gcc_bcmp (s1, s2, size)
+ unsigned char *s1, *s2;
+ size_t size;
+{
+ while (size > 0)
+ {
+ unsigned char c1 = *s1++, c2 = *s2++;
+ if (c1 != c2)
+ return c1 - c2;
+ size--;
+ }
+ return 0;
+}
+
+#endif
+
+#ifdef L_varargs
+#ifdef __i860__
+#if defined(__svr4__) || defined(__alliant__)
+ asm (" .text");
+ asm (" .align 4");
+
+/* The Alliant needs the added underscore. */
+ asm (".globl __builtin_saveregs");
+asm ("__builtin_saveregs:");
+ asm (".globl ___builtin_saveregs");
+asm ("___builtin_saveregs:");
+
+ asm (" andnot 0x0f,%sp,%sp"); /* round down to 16-byte boundary */
+ asm (" adds -96,%sp,%sp"); /* allocate stack space for reg save
+ area and also for a new va_list
+ structure */
+ /* Save all argument registers in the arg reg save area. The
+ arg reg save area must have the following layout (according
+ to the svr4 ABI):
+
+ struct {
+ union {
+ float freg[8];
+ double dreg[4];
+ } float_regs;
+ long ireg[12];
+ };
+ */
+
+ asm (" fst.q %f8, 0(%sp)"); /* save floating regs (f8-f15) */
+ asm (" fst.q %f12,16(%sp)");
+
+ asm (" st.l %r16,32(%sp)"); /* save integer regs (r16-r27) */
+ asm (" st.l %r17,36(%sp)");
+ asm (" st.l %r18,40(%sp)");
+ asm (" st.l %r19,44(%sp)");
+ asm (" st.l %r20,48(%sp)");
+ asm (" st.l %r21,52(%sp)");
+ asm (" st.l %r22,56(%sp)");
+ asm (" st.l %r23,60(%sp)");
+ asm (" st.l %r24,64(%sp)");
+ asm (" st.l %r25,68(%sp)");
+ asm (" st.l %r26,72(%sp)");
+ asm (" st.l %r27,76(%sp)");
+
+ asm (" adds 80,%sp,%r16"); /* compute the address of the new
+ va_list structure. Put in into
+ r16 so that it will be returned
+ to the caller. */
+
+ /* Initialize all fields of the new va_list structure. This
+ structure looks like:
+
+ typedef struct {
+ unsigned long ireg_used;
+ unsigned long freg_used;
+ long *reg_base;
+ long *mem_ptr;
+ } va_list;
+ */
+
+ asm (" st.l %r0, 0(%r16)"); /* nfixed */
+ asm (" st.l %r0, 4(%r16)"); /* nfloating */
+ asm (" st.l %sp, 8(%r16)"); /* __va_ctl points to __va_struct. */
+ asm (" bri %r1"); /* delayed return */
+ asm (" st.l %r28,12(%r16)"); /* pointer to overflow args */
+
+#else /* not __svr4__ */
+#if defined(__PARAGON__)
+ /*
+ * we'll use SVR4-ish varargs but need SVR3.2 assembler syntax,
+ * and we stand a better chance of hooking into libraries
+ * compiled by PGI. [andyp@ssd.intel.com]
+ */
+ asm (" .text");
+ asm (" .align 4");
+ asm (".globl __builtin_saveregs");
+asm ("__builtin_saveregs:");
+ asm (".globl ___builtin_saveregs");
+asm ("___builtin_saveregs:");
+
+ asm (" andnot 0x0f,sp,sp"); /* round down to 16-byte boundary */
+ asm (" adds -96,sp,sp"); /* allocate stack space for reg save
+ area and also for a new va_list
+ structure */
+ /* Save all argument registers in the arg reg save area. The
+ arg reg save area must have the following layout (according
+ to the svr4 ABI):
+
+ struct {
+ union {
+ float freg[8];
+ double dreg[4];
+ } float_regs;
+ long ireg[12];
+ };
+ */
+
+ asm (" fst.q f8, 0(sp)");
+ asm (" fst.q f12,16(sp)");
+ asm (" st.l r16,32(sp)");
+ asm (" st.l r17,36(sp)");
+ asm (" st.l r18,40(sp)");
+ asm (" st.l r19,44(sp)");
+ asm (" st.l r20,48(sp)");
+ asm (" st.l r21,52(sp)");
+ asm (" st.l r22,56(sp)");
+ asm (" st.l r23,60(sp)");
+ asm (" st.l r24,64(sp)");
+ asm (" st.l r25,68(sp)");
+ asm (" st.l r26,72(sp)");
+ asm (" st.l r27,76(sp)");
+
+ asm (" adds 80,sp,r16"); /* compute the address of the new
+ va_list structure. Put in into
+ r16 so that it will be returned
+ to the caller. */
+
+ /* Initialize all fields of the new va_list structure. This
+ structure looks like:
+
+ typedef struct {
+ unsigned long ireg_used;
+ unsigned long freg_used;
+ long *reg_base;
+ long *mem_ptr;
+ } va_list;
+ */
+
+ asm (" st.l r0, 0(r16)"); /* nfixed */
+ asm (" st.l r0, 4(r16)"); /* nfloating */
+ asm (" st.l sp, 8(r16)"); /* __va_ctl points to __va_struct. */
+ asm (" bri r1"); /* delayed return */
+ asm (" st.l r28,12(r16)"); /* pointer to overflow args */
+#else /* not __PARAGON__ */
+ asm (" .text");
+ asm (" .align 4");
+
+ asm (".globl ___builtin_saveregs");
+ asm ("___builtin_saveregs:");
+ asm (" mov sp,r30");
+ asm (" andnot 0x0f,sp,sp");
+ asm (" adds -96,sp,sp"); /* allocate sufficient space on the stack */
+
+/* Fill in the __va_struct. */
+ asm (" st.l r16, 0(sp)"); /* save integer regs (r16-r27) */
+ asm (" st.l r17, 4(sp)"); /* int fixed[12] */
+ asm (" st.l r18, 8(sp)");
+ asm (" st.l r19,12(sp)");
+ asm (" st.l r20,16(sp)");
+ asm (" st.l r21,20(sp)");
+ asm (" st.l r22,24(sp)");
+ asm (" st.l r23,28(sp)");
+ asm (" st.l r24,32(sp)");
+ asm (" st.l r25,36(sp)");
+ asm (" st.l r26,40(sp)");
+ asm (" st.l r27,44(sp)");
+
+ asm (" fst.q f8, 48(sp)"); /* save floating regs (f8-f15) */
+ asm (" fst.q f12,64(sp)"); /* int floating[8] */
+
+/* Fill in the __va_ctl. */
+ asm (" st.l sp, 80(sp)"); /* __va_ctl points to __va_struct. */
+ asm (" st.l r28,84(sp)"); /* pointer to more args */
+ asm (" st.l r0, 88(sp)"); /* nfixed */
+ asm (" st.l r0, 92(sp)"); /* nfloating */
+
+ asm (" adds 80,sp,r16"); /* return address of the __va_ctl. */
+ asm (" bri r1");
+ asm (" mov r30,sp");
+ /* recover stack and pass address to start
+ of data. */
+#endif /* not __PARAGON__ */
+#endif /* not __svr4__ */
+#else /* not __i860__ */
+#ifdef __sparc__
+ asm (".global __builtin_saveregs");
+ asm ("__builtin_saveregs:");
+ asm (".global ___builtin_saveregs");
+ asm ("___builtin_saveregs:");
+#ifdef NEED_PROC_COMMAND
+ asm (".proc 020");
+#endif
+ asm ("st %i0,[%fp+68]");
+ asm ("st %i1,[%fp+72]");
+ asm ("st %i2,[%fp+76]");
+ asm ("st %i3,[%fp+80]");
+ asm ("st %i4,[%fp+84]");
+ asm ("retl");
+ asm ("st %i5,[%fp+88]");
+#ifdef NEED_TYPE_COMMAND
+ asm (".type __builtin_saveregs,#function");
+ asm (".size __builtin_saveregs,.-__builtin_saveregs");
+#endif
+#else /* not __sparc__ */
+#if defined(__MIPSEL__) | defined(__R3000__) | defined(__R2000__) | defined(__mips__)
+
+ asm (" .text");
+ asm (" .ent __builtin_saveregs");
+ asm (" .globl __builtin_saveregs");
+ asm ("__builtin_saveregs:");
+ asm (" sw $4,0($30)");
+ asm (" sw $5,4($30)");
+ asm (" sw $6,8($30)");
+ asm (" sw $7,12($30)");
+ asm (" j $31");
+ asm (" .end __builtin_saveregs");
+#else /* not __mips__, etc. */
+
+void *
+__builtin_saveregs ()
+{
+ abort ();
+}
+
+#endif /* not __mips__ */
+#endif /* not __sparc__ */
+#endif /* not __i860__ */
+#endif
+
+#ifdef L_eprintf
+#ifndef inhibit_libc
+
+#undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
+#include <stdio.h>
+/* This is used by the `assert' macro. */
+void
+__eprintf (string, expression, line, filename)
+ const char *string;
+ const char *expression;
+ int line;
+ const char *filename;
+{
+ fprintf (stderr, string, expression, line, filename);
+ fflush (stderr);
+ abort ();
+}
+
+#endif
+#endif
+
+#ifdef L_bb
+
+/* Structure emitted by -a */
+struct bb
+{
+ long zero_word;
+ const char *filename;
+ long *counts;
+ long ncounts;
+ struct bb *next;
+ const unsigned long *addresses;
+
+ /* Older GCC's did not emit these fields. */
+ long nwords;
+ const char **functions;
+ const long *line_nums;
+ const char **filenames;
+};
+
+#ifdef BLOCK_PROFILER_CODE
+BLOCK_PROFILER_CODE
+#else
+#ifndef inhibit_libc
+
+/* Simple minded basic block profiling output dumper for
+ systems that don't provde tcov support. At present,
+ it requires atexit and stdio. */
+
+#undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
+#include <stdio.h>
+
+#ifdef HAVE_ATEXIT
+extern void atexit (void (*) (void));
+#define ON_EXIT(FUNC,ARG) atexit ((FUNC))
+#else
+#ifdef sun
+extern void on_exit (void*, void*);
+#define ON_EXIT(FUNC,ARG) on_exit ((FUNC), (ARG))
+#endif
+#endif
+
+static struct bb *bb_head = (struct bb *)0;
+
+/* Return the number of digits needed to print a value */
+/* __inline__ */ static int num_digits (long value, int base)
+{
+ int minus = (value < 0 && base != 16);
+ unsigned long v = (minus) ? -value : value;
+ int ret = minus;
+
+ do
+ {
+ v /= base;
+ ret++;
+ }
+ while (v);
+
+ return ret;
+}
+
+void
+__bb_exit_func (void)
+{
+ FILE *file = fopen ("bb.out", "a");
+ long time_value;
+
+ if (!file)
+ perror ("bb.out");
+
+ else
+ {
+ struct bb *ptr;
+
+ /* This is somewhat type incorrect, but it avoids worrying about
+ exactly where time.h is included from. It should be ok unless
+ a void * differs from other pointer formats, or if sizeof(long)
+ is < sizeof (time_t). It would be nice if we could assume the
+ use of rationale standards here. */
+
+ time((void *) &time_value);
+ fprintf (file, "Basic block profiling finished on %s\n", ctime ((void *) &time_value));
+
+ /* We check the length field explicitly in order to allow compatibility
+ with older GCC's which did not provide it. */
+
+ for (ptr = bb_head; ptr != (struct bb *)0; ptr = ptr->next)
+ {
+ int i;
+ int func_p = (ptr->nwords >= sizeof (struct bb) && ptr->nwords <= 1000);
+ int line_p = (func_p && ptr->line_nums);
+ int file_p = (func_p && ptr->filenames);
+ long ncounts = ptr->ncounts;
+ long cnt_max = 0;
+ long line_max = 0;
+ long addr_max = 0;
+ int file_len = 0;
+ int func_len = 0;
+ int blk_len = num_digits (ncounts, 10);
+ int cnt_len;
+ int line_len;
+ int addr_len;
+
+ fprintf (file, "File %s, %ld basic blocks \n\n",
+ ptr->filename, ncounts);
+
+ /* Get max values for each field. */
+ for (i = 0; i < ncounts; i++)
+ {
+ const char *p;
+ int len;
+
+ if (cnt_max < ptr->counts[i])
+ cnt_max = ptr->counts[i];
+
+ if (addr_max < ptr->addresses[i])
+ addr_max = ptr->addresses[i];
+
+ if (line_p && line_max < ptr->line_nums[i])
+ line_max = ptr->line_nums[i];
+
+ if (func_p)
+ {
+ p = (ptr->functions[i]) ? (ptr->functions[i]) : "<none>";
+ len = strlen (p);
+ if (func_len < len)
+ func_len = len;
+ }
+
+ if (file_p)
+ {
+ p = (ptr->filenames[i]) ? (ptr->filenames[i]) : "<none>";
+ len = strlen (p);
+ if (file_len < len)
+ file_len = len;
+ }
+ }
+
+ addr_len = num_digits (addr_max, 16);
+ cnt_len = num_digits (cnt_max, 10);
+ line_len = num_digits (line_max, 10);
+
+ /* Now print out the basic block information. */
+ for (i = 0; i < ncounts; i++)
+ {
+ fprintf (file,
+ " Block #%*d: executed %*ld time(s) address= 0x%.*lx",
+ blk_len, i+1,
+ cnt_len, ptr->counts[i],
+ addr_len, ptr->addresses[i]);
+
+ if (func_p)
+ fprintf (file, " function= %-*s", func_len,
+ (ptr->functions[i]) ? ptr->functions[i] : "<none>");
+
+ if (line_p)
+ fprintf (file, " line= %*ld", line_len, ptr->line_nums[i]);
+
+ if (file_p)
+ fprintf (file, " file= %s",
+ (ptr->filenames[i]) ? ptr->filenames[i] : "<none>");
+
+ fprintf (file, "\n");
+ }
+
+ fprintf (file, "\n");
+ fflush (file);
+ }
+
+ fprintf (file, "\n\n");
+ fclose (file);
+ }
+}
+
+void
+__bb_init_func (struct bb *blocks)
+{
+ /* User is supposed to check whether the first word is non-0,
+ but just in case.... */
+
+ if (blocks->zero_word)
+ return;
+
+#ifdef ON_EXIT
+ /* Initialize destructor. */
+ if (!bb_head)
+ ON_EXIT (__bb_exit_func, 0);
+#endif
+
+ /* Set up linked list. */
+ blocks->zero_word = 1;
+ blocks->next = bb_head;
+ bb_head = blocks;
+}
+
+#endif /* not inhibit_libc */
+#endif /* not BLOCK_PROFILER_CODE */
+#endif /* L_bb */
+
+/* Default free-store management functions for C++, per sections 12.5 and
+ 17.3.3 of the Working Paper. */
+
+#ifdef L_op_new
+/* operator new (size_t), described in 17.3.3.5. This function is used by
+ C++ programs to allocate a block of memory to hold a single object. */
+
+typedef void (*vfp)(void);
+extern vfp __new_handler;
+
+void *
+__builtin_new (size_t sz)
+{
+ void *p;
+
+ /* malloc (0) is unpredictable; avoid it. */
+ if (sz == 0)
+ sz = 1;
+ p = (void *) malloc (sz);
+ while (p == 0)
+ {
+ (*__new_handler) ();
+ p = (void *) malloc (sz);
+ }
+
+ return p;
+}
+#endif /* L_op_new */
+
+#ifdef L_op_vnew
+/* void * operator new [] (size_t), described in 17.3.3.6. This function
+ is used by C++ programs to allocate a block of memory for an array. */
+
+extern void * __builtin_new (size_t);
+
+void *
+__builtin_vec_new (size_t sz)
+{
+ return __builtin_new (sz);
+}
+#endif /* L_op_vnew */
+
+#ifdef L_new_handler
+/* set_new_handler (fvoid_t *) and the default new handler, described in
+ 17.3.3.2 and 17.3.3.5. These functions define the result of a failure
+ to allocate the amount of memory requested from operator new or new []. */
+
+#ifndef inhibit_libc
+/* This gets us __GNU_LIBRARY__. */
+#undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
+#include <stdio.h>
+
+#ifdef __GNU_LIBRARY__
+ /* Avoid forcing the library's meaning of `write' on the user program
+ by using the "internal" name (for use within the library) */
+#define write(fd, buf, n) __write((fd), (buf), (n))
+#endif
+#endif /* inhibit_libc */
+
+typedef void (*vfp)(void);
+void __default_new_handler (void);
+
+vfp __new_handler = __default_new_handler;
+
+vfp
+set_new_handler (vfp handler)
+{
+ vfp prev_handler;
+
+ prev_handler = __new_handler;
+ if (handler == 0) handler = __default_new_handler;
+ __new_handler = handler;
+ return prev_handler;
+}
+
+#define MESSAGE "Virtual memory exceeded in `new'\n"
+
+void
+__default_new_handler ()
+{
+ /* don't use fprintf (stderr, ...) because it may need to call malloc. */
+ /* This should really print the name of the program, but that is hard to
+ do. We need a standard, clean way to get at the name. */
+ write (2, MESSAGE, sizeof (MESSAGE));
+ /* don't call exit () because that may call global destructors which
+ may cause a loop. */
+ _exit (-1);
+}
+#endif
+
+#ifdef L_op_delete
+/* operator delete (void *), described in 17.3.3.3. This function is used
+ by C++ programs to return to the free store a block of memory allocated
+ as a single object. */
+
+void
+__builtin_delete (void *ptr)
+{
+ if (ptr)
+ free (ptr);
+}
+#endif
+
+#ifdef L_op_vdel
+/* operator delete [] (void *), described in 17.3.3.4. This function is
+ used by C++ programs to return to the free store a block of memory
+ allocated as an array. */
+
+extern void __builtin_delete (void *);
+
+void
+__builtin_vec_delete (void *ptr)
+{
+ __builtin_delete (ptr);
+}
+#endif
+
+/* End of C++ free-store management functions */
+
+#ifdef L_shtab
+unsigned int __shtab[] = {
+ 0x00000001, 0x00000002, 0x00000004, 0x00000008,
+ 0x00000010, 0x00000020, 0x00000040, 0x00000080,
+ 0x00000100, 0x00000200, 0x00000400, 0x00000800,
+ 0x00001000, 0x00002000, 0x00004000, 0x00008000,
+ 0x00010000, 0x00020000, 0x00040000, 0x00080000,
+ 0x00100000, 0x00200000, 0x00400000, 0x00800000,
+ 0x01000000, 0x02000000, 0x04000000, 0x08000000,
+ 0x10000000, 0x20000000, 0x40000000, 0x80000000
+ };
+#endif
+
+#ifdef L_clear_cache
+/* Clear part of an instruction cache. */
+
+#define INSN_CACHE_PLANE_SIZE (INSN_CACHE_SIZE / INSN_CACHE_DEPTH)
+
+void
+__clear_cache (beg, end)
+ char *beg, *end;
+{
+#ifdef CLEAR_INSN_CACHE
+ CLEAR_INSN_CACHE (beg, end);
+#else
+#ifdef INSN_CACHE_SIZE
+ static char array[INSN_CACHE_SIZE + INSN_CACHE_PLANE_SIZE + INSN_CACHE_LINE_WIDTH];
+ static int initialized = 0;
+ int offset;
+ void *start_addr
+ void *end_addr;
+ typedef (*function_ptr) ();
+
+#if (INSN_CACHE_SIZE / INSN_CACHE_LINE_WIDTH) < 16
+ /* It's cheaper to clear the whole cache.
+ Put in a series of jump instructions so that calling the beginning
+ of the cache will clear the whole thing. */
+
+ if (! initialized)
+ {
+ int ptr = (((int) array + INSN_CACHE_LINE_WIDTH - 1)
+ & -INSN_CACHE_LINE_WIDTH);
+ int end_ptr = ptr + INSN_CACHE_SIZE;
+
+ while (ptr < end_ptr)
+ {
+ *(INSTRUCTION_TYPE *)ptr
+ = JUMP_AHEAD_INSTRUCTION + INSN_CACHE_LINE_WIDTH;
+ ptr += INSN_CACHE_LINE_WIDTH;
+ }
+ *(INSTRUCTION_TYPE *)(ptr - INSN_CACHE_LINE_WIDTH) = RETURN_INSTRUCTION;
+
+ initialized = 1;
+ }
+
+ /* Call the beginning of the sequence. */
+ (((function_ptr) (((int) array + INSN_CACHE_LINE_WIDTH - 1)
+ & -INSN_CACHE_LINE_WIDTH))
+ ());
+
+#else /* Cache is large. */
+
+ if (! initialized)
+ {
+ int ptr = (((int) array + INSN_CACHE_LINE_WIDTH - 1)
+ & -INSN_CACHE_LINE_WIDTH);
+
+ while (ptr < (int) array + sizeof array)
+ {
+ *(INSTRUCTION_TYPE *)ptr = RETURN_INSTRUCTION;
+ ptr += INSN_CACHE_LINE_WIDTH;
+ }
+
+ initialized = 1;
+ }
+
+ /* Find the location in array that occupies the same cache line as BEG. */
+
+ offset = ((int) beg & -INSN_CACHE_LINE_WIDTH) & (INSN_CACHE_PLANE_SIZE - 1);
+ start_addr = (((int) (array + INSN_CACHE_PLANE_SIZE - 1)
+ & -INSN_CACHE_PLANE_SIZE)
+ + offset);
+
+ /* Compute the cache alignment of the place to stop clearing. */
+#if 0 /* This is not needed for gcc's purposes. */
+ /* If the block to clear is bigger than a cache plane,
+ we clear the entire cache, and OFFSET is already correct. */
+ if (end < beg + INSN_CACHE_PLANE_SIZE)
+#endif
+ offset = (((int) (end + INSN_CACHE_LINE_WIDTH - 1)
+ & -INSN_CACHE_LINE_WIDTH)
+ & (INSN_CACHE_PLANE_SIZE - 1));
+
+#if INSN_CACHE_DEPTH > 1
+ end_addr = (start_addr & -INSN_CACHE_PLANE_SIZE) + offset;
+ if (end_addr <= start_addr)
+ end_addr += INSN_CACHE_PLANE_SIZE;
+
+ for (plane = 0; plane < INSN_CACHE_DEPTH; plane++)
+ {
+ int addr = start_addr + plane * INSN_CACHE_PLANE_SIZE;
+ int stop = end_addr + plane * INSN_CACHE_PLANE_SIZE;
+
+ while (addr != stop)
+ {
+ /* Call the return instruction at ADDR. */
+ ((function_ptr) addr) ();
+
+ addr += INSN_CACHE_LINE_WIDTH;
+ }
+ }
+#else /* just one plane */
+ do
+ {
+ /* Call the return instruction at START_ADDR. */
+ ((function_ptr) start_addr) ();
+
+ start_addr += INSN_CACHE_LINE_WIDTH;
+ }
+ while ((start_addr % INSN_CACHE_SIZE) != offset);
+#endif /* just one plane */
+#endif /* Cache is large */
+#endif /* Cache exists */
+#endif /* CLEAR_INSN_CACHE */
+}
+
+#endif /* L_clear_cache */
+
+#ifdef L_trampoline
+
+/* Jump to a trampoline, loading the static chain address. */
+
+#ifdef TRANSFER_FROM_TRAMPOLINE
+TRANSFER_FROM_TRAMPOLINE
+#endif
+
+#if defined (NeXT) && defined (__MACH__)
+
+/* Make stack executable so we can call trampolines on stack.
+ This is called from INITIALIZE_TRAMPOLINE in next.h. */
+#ifdef NeXTStep21
+ #include <mach.h>
+#else
+ #include <mach/mach.h>
+#endif
+
+void
+__enable_execute_stack (addr)
+ char *addr;
+{
+ kern_return_t r;
+ char *eaddr = addr + TRAMPOLINE_SIZE;
+ vm_address_t a = (vm_address_t) addr;
+
+ /* turn on execute access on stack */
+ r = vm_protect (task_self (), a, TRAMPOLINE_SIZE, FALSE, VM_PROT_ALL);
+ if (r != KERN_SUCCESS)
+ {
+ mach_error("vm_protect VM_PROT_ALL", r);
+ exit(1);
+ }
+
+ /* We inline the i-cache invalidation for speed */
+
+#ifdef CLEAR_INSN_CACHE
+ CLEAR_INSN_CACHE (addr, eaddr);
+#else
+ __clear_cache ((int) addr, (int) eaddr);
+#endif
+}
+
+#endif /* defined (NeXT) && defined (__MACH__) */
+
+#ifdef __convex__
+
+/* Make stack executable so we can call trampolines on stack.
+ This is called from INITIALIZE_TRAMPOLINE in convex.h. */
+
+#include <sys/mman.h>
+#include <sys/vmparam.h>
+#include <machine/machparam.h>
+
+void
+__enable_execute_stack ()
+{
+ int fp;
+ static unsigned lowest = USRSTACK;
+ unsigned current = (unsigned) &fp & -NBPG;
+
+ if (lowest > current)
+ {
+ unsigned len = lowest - current;
+ mremap (current, &len, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE);
+ lowest = current;
+ }
+
+ /* Clear instruction cache in case an old trampoline is in it. */
+ asm ("pich");
+}
+#endif /* __convex__ */
+
+#ifdef __DOLPHIN__
+
+/* Modified from the convex -code above. */
+
+#include <sys/param.h>
+#include <errno.h>
+#include <sys/m88kbcs.h>
+
+void
+__enable_execute_stack ()
+{
+ int save_errno;
+ static unsigned long lowest = USRSTACK;
+ unsigned long current = (unsigned long) &save_errno & -NBPC;
+
+ /* Ignore errno being set. memctl sets errno to EINVAL whenever the
+ address is seen as 'negative'. That is the case with the stack. */
+
+ save_errno=errno;
+ if (lowest > current)
+ {
+ unsigned len=lowest-current;
+ memctl(current,len,MCT_TEXT);
+ lowest = current;
+ }
+ else
+ memctl(current,NBPC,MCT_TEXT);
+ errno=save_errno;
+}
+
+#endif /* __DOLPHIN__ */
+
+#ifdef __pyr__
+
+#undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
+#include <stdio.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/vmmac.h>
+
+/* Modified from the convex -code above.
+ mremap promises to clear the i-cache. */
+
+void
+__enable_execute_stack ()
+{
+ int fp;
+ if (mprotect (((unsigned int)&fp/PAGSIZ)*PAGSIZ, PAGSIZ,
+ PROT_READ|PROT_WRITE|PROT_EXEC))
+ {
+ perror ("mprotect in __enable_execute_stack");
+ fflush (stderr);
+ abort ();
+ }
+}
+#endif /* __pyr__ */
+#endif /* L_trampoline */
+
+#ifdef L__main
+
+#include "gbl-ctors.h"
+/* Some systems use __main in a way incompatible with its use in gcc, in these
+ cases use the macros NAME__MAIN to give a quoted symbol and SYMBOL__MAIN to
+ give the same symbol without quotes for an alternative entry point. You
+ must define both, or niether. */
+#ifndef NAME__MAIN
+#define NAME__MAIN "__main"
+#define SYMBOL__MAIN __main
+#endif
+
+/* Run all the global destructors on exit from the program. */
+
+void
+__do_global_dtors ()
+{
+#ifdef DO_GLOBAL_DTORS_BODY
+ DO_GLOBAL_DTORS_BODY;
+#else
+ unsigned nptrs = (unsigned HOST_WIDE_INT) __DTOR_LIST__[0];
+ unsigned i;
+
+ /* Some systems place the number of pointers
+ in the first word of the table.
+ On other systems, that word is -1.
+ In all cases, the table is null-terminated. */
+
+ /* If the length is not recorded, count up to the null. */
+ if (nptrs == -1)
+ for (nptrs = 0; __DTOR_LIST__[nptrs + 1] != 0; nptrs++);
+
+ /* GNU LD format. */
+ for (i = nptrs; i >= 1; i--)
+ __DTOR_LIST__[i] ();
+#endif
+}
+
+#ifndef INIT_SECTION_ASM_OP
+/* Run all the global constructors on entry to the program. */
+
+#ifndef ON_EXIT
+#define ON_EXIT(a, b)
+#else
+/* Make sure the exit routine is pulled in to define the globals as
+ bss symbols, just in case the linker does not automatically pull
+ bss definitions from the library. */
+
+extern int _exit_dummy_decl;
+int *_exit_dummy_ref = &_exit_dummy_decl;
+#endif /* ON_EXIT */
+
+void
+__do_global_ctors ()
+{
+ DO_GLOBAL_CTORS_BODY;
+ ON_EXIT (__do_global_dtors, 0);
+}
+#endif /* no INIT_SECTION_ASM_OP */
+
+#if !defined (INIT_SECTION_ASM_OP) || defined (INVOKE__main)
+/* Subroutine called automatically by `main'.
+ Compiling a global function named `main'
+ produces an automatic call to this function at the beginning.
+
+ For many systems, this routine calls __do_global_ctors.
+ For systems which support a .init section we use the .init section
+ to run __do_global_ctors, so we need not do anything here. */
+
+void
+SYMBOL__MAIN ()
+{
+ /* Support recursive calls to `main': run initializers just once. */
+ static int initialized = 0;
+ if (! initialized)
+ {
+ initialized = 1;
+ __do_global_ctors ();
+ }
+}
+#endif /* no INIT_SECTION_ASM_OP or INVOKE__main */
+
+#endif /* L__main */
+
+#ifdef L_ctors
+
+#include "gbl-ctors.h"
+
+/* Provide default definitions for the lists of constructors and
+ destructors, so that we don't get linker errors. These symbols are
+ intentionally bss symbols, so that gld and/or collect will provide
+ the right values. */
+
+/* We declare the lists here with two elements each,
+ so that they are valid empty lists if no other definition is loaded. */
+#if !defined(INIT_SECTION_ASM_OP) && !defined(CTOR_LISTS_DEFINED_EXTERNALLY)
+#ifdef __NeXT__
+/* After 2.3, try this definition on all systems. */
+func_ptr __CTOR_LIST__[2] = {0, 0};
+func_ptr __DTOR_LIST__[2] = {0, 0};
+#else
+func_ptr __CTOR_LIST__[2];
+func_ptr __DTOR_LIST__[2];
+#endif
+#endif /* no INIT_SECTION_ASM_OP and not CTOR_LISTS_DEFINED_EXTERNALLY */
+#endif /* L_ctors */
+
+#ifdef L_exit
+
+#include "gbl-ctors.h"
+
+#ifndef ON_EXIT
+
+/* If we have no known way of registering our own __do_global_dtors
+ routine so that it will be invoked at program exit time, then we
+ have to define our own exit routine which will get this to happen. */
+
+extern void __do_global_dtors ();
+extern void _cleanup ();
+extern void _exit () __attribute__ ((noreturn));
+
+void
+exit (status)
+ int status;
+{
+ __do_global_dtors ();
+#ifdef EXIT_BODY
+ EXIT_BODY;
+#else
+ _cleanup ();
+#endif
+ _exit (status);
+}
+
+#else
+int _exit_dummy_decl = 0; /* prevent compiler & linker warnings */
+#endif
+
+#endif /* L_exit */
+
+/* In a.out systems, we need to have these dummy constructor and destructor
+ lists in the library.
+
+ When using `collect', the first link will resolve __CTOR_LIST__
+ and __DTOR_LIST__ to these symbols. We will then run "nm" on the
+ result, build the correct __CTOR_LIST__ and __DTOR_LIST__, and relink.
+ Since we don't do the second link if no constructors existed, these
+ dummies must be fully functional empty lists.
+
+ When using `gnu ld', these symbols will be used if there are no
+ constructors. If there are constructors, the N_SETV symbol defined
+ by the linker from the N_SETT's in input files will define __CTOR_LIST__
+ and __DTOR_LIST__ rather than its being allocated as common storage
+ by the definitions below.
+
+ When using a linker that supports constructor and destructor segments,
+ these definitions will not be used, since crtbegin.o and crtend.o
+ (from crtstuff.c) will have already defined __CTOR_LIST__ and
+ __DTOR_LIST__. The crt*.o files are passed directly to the linker
+ on its command line, by gcc. */
+
+/* The list needs two elements: one is ignored (the old count); the
+ second is the terminating zero. Since both values are zero, this
+ declaration is not initialized, and it becomes `common'. */
+
+#ifdef L_ctor_list
+#include "gbl-ctors.h"
+func_ptr __CTOR_LIST__[2];
+#endif
+
+#ifdef L_dtor_list
+#include "gbl-ctors.h"
+func_ptr __DTOR_LIST__[2];
+#endif
OpenPOWER on IntegriCloud